summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/acpi_apd.c7
-rw-r--r--drivers/acpi/acpi_lpss.c8
-rw-r--r--drivers/acpi/acpi_video.c11
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/actables.h11
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h1
-rw-r--r--drivers/acpi/acpica/dsinit.c4
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evrgnini.c59
-rw-r--r--drivers/acpi/acpica/exconfig.c42
-rw-r--r--drivers/acpi/acpica/nsnames.c45
-rw-r--r--drivers/acpi/acpica/nsxfname.c43
-rw-r--r--drivers/acpi/acpica/tbdata.c81
-rw-r--r--drivers/acpi/acpica/tbfadt.c14
-rw-r--r--drivers/acpi/acpica/tbutils.c85
-rw-r--r--drivers/acpi/acpica/tbxface.c146
-rw-r--r--drivers/acpi/acpica/tbxfload.c44
-rw-r--r--drivers/acpi/acpica/utdecode.c49
-rw-r--r--drivers/acpi/apei/ghes.c7
-rw-r--r--drivers/acpi/apei/hest.c13
-rw-r--r--drivers/acpi/arm64/iort.c607
-rw-r--r--drivers/acpi/battery.c72
-rw-r--r--drivers/acpi/blacklist.c28
-rw-r--r--drivers/acpi/bus.c12
-rw-r--r--drivers/acpi/cppc_acpi.c16
-rw-r--r--drivers/acpi/device_sysfs.c8
-rw-r--r--drivers/acpi/event.c6
-rw-r--r--drivers/acpi/glue.c4
-rw-r--r--drivers/acpi/nfit/core.c3
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/osl.c34
-rw-r--r--drivers/acpi/pci_mcfg.c190
-rw-r--r--drivers/acpi/processor_core.c8
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_perflib.c55
-rw-r--r--drivers/acpi/property.c125
-rw-r--r--drivers/acpi/resource.c57
-rw-r--r--drivers/acpi/scan.c36
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/spcr.c8
-rw-r--r--drivers/acpi/tables.c17
-rw-r--r--drivers/acpi/video_detect.c20
-rw-r--r--drivers/ata/ahci.c39
-rw-r--r--drivers/ata/ahci_qoriq.c16
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c44
-rw-r--r--drivers/ata/libata-scsi.c84
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_imx.c82
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/auxdisplay/Kconfig13
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/ht16k33.c563
-rw-r--r--drivers/base/Kconfig9
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h15
-rw-r--r--drivers/base/cacheinfo.c200
-rw-r--r--drivers/base/class.c15
-rw-r--r--drivers/base/core.c578
-rw-r--r--drivers/base/dd.c79
-rw-r--r--drivers/base/devcoredump.c10
-rw-r--r--drivers/base/devres.c66
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c178
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/domain.c363
-rw-r--r--drivers/base/power/main.c89
-rw-r--r--drivers/base/power/opp/core.c521
-rw-r--r--drivers/base/power/opp/debugfs.c52
-rw-r--r--drivers/base/power/opp/of.c111
-rw-r--r--drivers/base/power/opp/opp.h23
-rw-r--r--drivers/base/power/power.h29
-rw-r--r--drivers/base/power/qos.c6
-rw-r--r--drivers/base/power/runtime.c236
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c27
-rw-r--r--drivers/base/power/wakeirq.c76
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/regcache-lzo.c8
-rw-r--r--drivers/base/soc.c79
-rw-r--r--drivers/base/test/Kconfig9
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/base/test/test_async_driver_probe.c169
-rw-r--r--drivers/base/topology.c42
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/block/Kconfig5
-rw-r--r--drivers/block/brd.c39
-rw-r--r--drivers/block/cciss_scsi.c72
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c16
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c18
-rw-r--r--drivers/block/nbd.c443
-rw-r--r--drivers/block/null_blk.c1
-rw-r--r--drivers/block/pktcdvd.c49
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/skd_main.c238
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/xenbus.c36
-rw-r--r--drivers/block/xen-blkfront.c84
-rw-r--r--drivers/block/zram/zcomp.c76
-rw-r--r--drivers/block/zram/zcomp.h5
-rw-r--r--drivers/block/zram/zram_drv.c9
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/btmrvl_drv.h1
-rw-r--r--drivers/bluetooth/hci_bcsp.c4
-rw-r--r--drivers/bluetooth/hci_h5.c4
-rw-r--r--drivers/bluetooth/hci_qca.c9
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/bus/Kconfig16
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/arm-cci.c10
-rw-r--r--drivers/bus/da8xx-mstpri.c267
-rw-r--r--drivers/bus/tegra-gmi.c284
-rw-r--r--drivers/bus/vexpress-config.c7
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/agp/alpha-agp.c3
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c26
-rw-r--r--drivers/char/hw_random/core.c3
-rw-r--r--drivers/char/hw_random/meson-rng.c2
-rw-r--r--drivers/char/hw_random/msm-rng.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c162
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/pseries-rng.c5
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c190
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c37
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/pcmcia/Kconfig11
-rw-r--r--drivers/char/pcmcia/Makefile1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c373
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/char/ppdev.c36
-rw-r--r--drivers/char/snsc.c2
-rw-r--r--drivers/char/tile-srom.c3
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/Makefile14
-rw-r--r--drivers/char/tpm/tpm-chip.c42
-rw-r--r--drivers/char/tpm/tpm-interface.c110
-rw-r--r--drivers/char/tpm/tpm-sysfs.c7
-rw-r--r--drivers/char/tpm/tpm.h41
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_acpi.c46
-rw-r--r--drivers/char/tpm/tpm_crb.c173
-rw-r--r--drivers/char/tpm/tpm_eventlog.c230
-rw-r--r--drivers/char/tpm/tpm_eventlog.h22
-rw-r--r--drivers/char/tpm/tpm_of.c48
-rw-r--r--drivers/char/tpm/tpm_tis.c11
-rw-r--r--drivers/char/tpm/tpm_tis_core.c64
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c85
-rw-r--r--drivers/char/tpm/xen-tpmfront.c9
-rw-r--r--drivers/char/virtio_console.c14
-rw-r--r--drivers/clk/Kconfig4
-rw-r--r--drivers/clk/bcm/Kconfig16
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c80
-rw-r--r--drivers/clk/clk-cdce925.c2
-rw-r--r--drivers/clk/clk-devres.c21
-rw-r--r--drivers/clk/clk-gate.c4
-rw-r--r--drivers/clk/clk-oxnas.c232
-rw-r--r--drivers/clk/clk-qoriq.c60
-rw-r--r--drivers/clk/clk-stm32f4.c435
-rw-r--r--drivers/clk/clk-wm831x.c2
-rw-r--r--drivers/clk/clkdev.c8
-rw-r--r--drivers/clk/hisilicon/Kconfig17
-rw-r--r--drivers/clk/hisilicon/Makefile2
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c330
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c337
-rw-r--r--drivers/clk/hisilicon/crg.h34
-rw-r--r--drivers/clk/imx/clk-imx31.c52
-rw-r--r--drivers/clk/imx/clk-imx6q.c283
-rw-r--r--drivers/clk/imx/clk-imx6ul.c72
-rw-r--r--drivers/clk/imx/clk-pllv3.c8
-rw-r--r--drivers/clk/imx/clk.h8
-rw-r--r--drivers/clk/keystone/pll.c13
-rw-r--r--drivers/clk/mediatek/Kconfig43
-rw-r--r--drivers/clk/mediatek/Makefile7
-rw-r--r--drivers/clk/mediatek/clk-gate.c52
-rw-r--r--drivers/clk/mediatek/clk-gate.h2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c138
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c80
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c81
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c80
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c123
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c91
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c1035
-rw-r--r--drivers/clk/mediatek/clk-mtk.c40
-rw-r--r--drivers/clk/mediatek/clk-mtk.h41
-rw-r--r--drivers/clk/mediatek/clk-pll.c1
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c15
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c3
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c19
-rw-r--r--drivers/clk/mvebu/ap806-system-controller.c23
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c167
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c5
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c32
-rw-r--r--drivers/clk/pxa/clk-pxa.c145
-rw-r--r--drivers/clk/pxa/clk-pxa.h59
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c116
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c168
-rw-r--r--drivers/clk/qcom/Kconfig37
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c187
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h25
-rw-r--r--drivers/clk/qcom/clk-pll.c31
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c76
-rw-r--r--drivers/clk/qcom/clk-rpm.c497
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c578
-rw-r--r--drivers/clk/qcom/common.c52
-rw-r--r--drivers/clk/qcom/common.h11
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c8
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c3
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c2300
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c33
-rw-r--r--drivers/clk/qcom/gdsc.c44
-rw-r--r--drivers/clk/qcom/gdsc.h3
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c26
-rw-r--r--drivers/clk/renesas/Kconfig2
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c26
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c18
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c32
-rw-r--r--drivers/clk/renesas/r8a7743-cpg-mssr.c270
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c259
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c65
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c371
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h43
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c31
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c29
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h2
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-cpu.c9
-rw-r--r--drivers/clk/rockchip/clk-pll.c6
-rw-r--r--drivers/clk/rockchip/clk-rk1108.c531
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c13
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c31
-rw-r--r--drivers/clk/rockchip/clk.h15
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c30
-rw-r--r--drivers/clk/st/clk-flexgen.c5
-rw-r--r--drivers/clk/sunxi-ng/Kconfig14
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c915
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h72
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h6
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c12
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c33
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.h17
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c43
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c45
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.h6
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c55
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.h8
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h6
-rw-r--r--drivers/clk/sunxi/clk-mod0.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c21
-rw-r--r--drivers/clk/tegra/cvb.c10
-rw-r--r--drivers/clk/ti/clk-3xxx.c20
-rw-r--r--drivers/clk/ti/clk-7xx.c1
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c20
-rw-r--r--drivers/clk/ti/clock.h9
-rw-r--r--drivers/clk/ti/dpll.c19
-rw-r--r--drivers/clk/ti/dpll3xxx.c67
-rw-r--r--drivers/clk/uniphier/Makefile3
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c3
-rw-r--r--drivers/clk/uniphier/clk-uniphier-cpugear.c115
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c32
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h45
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arc_timer.c336
-rw-r--r--drivers/clocksource/arm_arch_timer.c14
-rw-r--r--drivers/clocksource/bcm2835_timer.c14
-rw-r--r--drivers/clocksource/moxart_timer.c22
-rw-r--r--drivers/clocksource/pxa_timer.c11
-rw-r--r--drivers/clocksource/timer-nps.c224
-rw-r--r--drivers/cpufreq/Kconfig.arm29
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c117
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c1057
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c7
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c15
-rw-r--r--drivers/cpufreq/cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/cpufreq.c25
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c46
-rw-r--r--drivers/cpufreq/cpufreq_governor.c30
-rw-r--r--drivers/cpufreq/cpufreq_governor.h5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c17
-rw-r--r--drivers/cpufreq/cpufreq_stats.c22
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c239
-rw-r--r--drivers/cpufreq/intel_pstate.c882
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c65
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c2
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/cpuidle/dt_idle_states.c6
-rw-r--r--drivers/cpuidle/governor.c4
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/cpuidle/sysfs.c4
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c3
-rw-r--r--drivers/crypto/atmel-aes-regs.h4
-rw-r--r--drivers/crypto/atmel-aes.c189
-rw-r--r--drivers/crypto/caam/Kconfig11
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c1505
-rw-r--r--drivers/crypto/caam/caamalg_desc.c1306
-rw-r--r--drivers/crypto/caam/caamalg_desc.h97
-rw-r--r--drivers/crypto/caam/caamhash.c227
-rw-r--r--drivers/crypto/caam/caampkc.c4
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/ctrl.c75
-rw-r--r--drivers/crypto/caam/desc.h22
-rw-r--r--drivers/crypto/caam/desc_constr.h133
-rw-r--r--drivers/crypto/caam/error.c5
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c27
-rw-r--r--drivers/crypto/caam/key_gen.c62
-rw-r--r--drivers/crypto/caam/key_gen.h6
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c30
-rw-r--r--drivers/crypto/ccp/ccp-dev.c6
-rw-r--r--drivers/crypto/ccp/ccp-dev.h45
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2017
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h103
-rw-r--r--drivers/crypto/chelsio/chcr_core.c11
-rw-r--r--drivers/crypto/chelsio/chcr_core.h18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h115
-rw-r--r--drivers/crypto/marvell/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa.h5
-rw-r--r--drivers/crypto/marvell/cipher.c8
-rw-r--r--drivers/crypto/marvell/hash.c65
-rw-r--r--drivers/crypto/marvell/tdma.c33
-rw-r--r--drivers/crypto/mv_cesa.c4
-rw-r--r--drivers/crypto/nx/nx.c1
-rw-r--r--drivers/crypto/padlock-aes.c23
-rw-r--r--drivers/crypto/padlock-sha.c18
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/virtio/Kconfig10
-rw-r--r--drivers/crypto/virtio/Makefile5
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c540
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h128
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c476
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c264
-rw-r--r--drivers/crypto/vmx/Makefile12
-rw-r--r--drivers/dax/dax.c97
-rw-r--r--drivers/dax/pmem.c3
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/devfreq/event/exynos-nocp.c1
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c6
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c1
-rw-r--r--drivers/devfreq/exynos-bus.c29
-rw-r--r--drivers/devfreq/rk3399_dmc.c15
-rw-r--r--drivers/dma-buf/Kconfig2
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c28
-rw-r--r--drivers/dma-buf/dma-fence-array.c (renamed from drivers/dma-buf/fence-array.c)91
-rw-r--r--drivers/dma-buf/dma-fence.c (renamed from drivers/dma-buf/fence.c)221
-rw-r--r--drivers/dma-buf/reservation.c197
-rw-r--r--drivers/dma-buf/seqno-fence.c18
-rw-r--r--drivers/dma-buf/sw_sync.c50
-rw-r--r--drivers/dma-buf/sync_debug.c13
-rw-r--r--drivers/dma-buf/sync_debug.h9
-rw-r--r--drivers/dma-buf/sync_file.c66
-rw-r--r--drivers/dma/Kconfig16
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c3
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dmatest.c74
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c18
-rw-r--r--drivers/dma/dw/regs.h3
-rw-r--r--drivers/dma/edma.c3
-rw-r--r--drivers/dma/fsl_raid.c1
-rw-r--r--drivers/dma/hsu/pci.c8
-rw-r--r--drivers/dma/img-mdc-dma.c9
-rw-r--r--drivers/dma/imx-sdma.c13
-rw-r--r--drivers/dma/ioat/dma.c17
-rw-r--r--drivers/dma/ioat/init.c21
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/k3dma.c3
-rw-r--r--drivers/dma/mic_x100_dma.c2
-rw-r--r--drivers/dma/mv_xor.c190
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/nbpfaxi.c38
-rw-r--r--drivers/dma/omap-dma.c187
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c23
-rw-r--r--drivers/dma/pxa_dma.c28
-rw-r--r--drivers/dma/qcom/hidma.c173
-rw-r--r--drivers/dma/qcom/hidma.h9
-rw-r--r--drivers/dma/qcom/hidma_dbg.c4
-rw-r--r--drivers/dma/qcom/hidma_ll.c176
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c11
-rw-r--r--drivers/dma/s3c24xx-dma.c5
-rw-r--r--drivers/dma/sh/usb-dmac.c3
-rw-r--r--drivers/dma/sirf-dma.c4
-rw-r--r--drivers/dma/st_fdma.c889
-rw-r--r--drivers/dma/st_fdma.h249
-rw-r--r--drivers/dma/stm32-dma.c6
-rw-r--r--drivers/dma/zx296702_dma.c3
-rw-r--r--drivers/edac/altera_edac.c5
-rw-r--r--drivers/edac/amd64_edac.c692
-rw-r--r--drivers/edac/amd64_edac.h58
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/amd8111_edac.c1
-rw-r--r--drivers/edac/amd8131_edac.c1
-rw-r--r--drivers/edac/cell_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c1
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_device.c79
-rw-r--r--drivers/edac/edac_device.h (renamed from drivers/edac/edac_core.h)309
-rw-r--r--drivers/edac/edac_device_sysfs.c4
-rw-r--r--drivers/edac/edac_mc.c133
-rw-r--r--drivers/edac/edac_mc.h245
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/edac_module.c2
-rw-r--r--drivers/edac/edac_module.h4
-rw-r--r--drivers/edac/edac_pci.c84
-rw-r--r--drivers/edac/edac_pci.h271
-rw-r--r--drivers/edac/edac_pci_sysfs.c13
-rw-r--r--drivers/edac/fsl_ddr_edac.c1
-rw-r--r--drivers/edac/ghes_edac.c2
-rw-r--r--drivers/edac/highbank_l2_edac.c1
-rw-r--r--drivers/edac/highbank_mc_edac.c1
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c1
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82443bxgx_edac.c2
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/layerscape_edac.c2
-rw-r--r--drivers/edac/mce_amd.c44
-rw-r--r--drivers/edac/mpc85xx_edac.c18
-rw-r--r--drivers/edac/mv64x60_edac.c1
-rw-r--r--drivers/edac/octeon_edac-l2c.c1
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/octeon_edac-pc.c1
-rw-r--r--drivers/edac/octeon_edac-pci.c1
-rw-r--r--drivers/edac/pasemi_edac.c2
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c16
-rw-r--r--drivers/edac/skx_edac.c9
-rw-r--r--drivers/edac/synopsys_edac.c2
-rw-r--r--drivers/edac/tile_edac.c2
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/edac/xgene_edac.c7
-rw-r--r--drivers/extcon/extcon-arizona.c8
-rw-r--r--drivers/extcon/extcon-usb-gpio.c169
-rw-r--r--drivers/firewire/net.c21
-rw-r--r--drivers/firmware/Kconfig27
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/arm_scpi.c276
-rw-r--r--drivers/firmware/dmi_scan.c4
-rw-r--r--drivers/firmware/efi/Kconfig18
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/apple-properties.c248
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/arm-runtime.c4
-rw-r--r--drivers/firmware/efi/dev-path-parser.c203
-rw-r--r--drivers/firmware/efi/efi.c76
-rw-r--r--drivers/firmware/efi/libstub/Makefile6
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c33
-rw-r--r--drivers/firmware/efi/libstub/efistub.h11
-rw-r--r--drivers/firmware/efi/libstub/random.c67
-rw-r--r--drivers/firmware/efi/test/efi_test.c15
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/psci_checker.c490
-rw-r--r--drivers/firmware/qcom_scm.c53
-rw-r--r--drivers/firmware/tegra/Kconfig25
-rw-r--r--drivers/firmware/tegra/Makefile2
-rw-r--r--drivers/firmware/tegra/bpmp.c868
-rw-r--r--drivers/firmware/tegra/ivc.c695
-rw-r--r--drivers/firmware/ti_sci.c1991
-rw-r--r--drivers/firmware/ti_sci.h492
-rw-r--r--drivers/fpga/Kconfig39
-rw-r--r--drivers/fpga/Makefile9
-rw-r--r--drivers/fpga/altera-fpga2sdram.c180
-rw-r--r--drivers/fpga/altera-freeze-bridge.c273
-rw-r--r--drivers/fpga/altera-hps2fpga.c222
-rw-r--r--drivers/fpga/fpga-bridge.c395
-rw-r--r--drivers/fpga/fpga-mgr.c97
-rw-r--r--drivers/fpga/fpga-region.c603
-rw-r--r--drivers/fpga/socfpga-a10.c557
-rw-r--r--drivers/fpga/socfpga.c7
-rw-r--r--drivers/fpga/zynq-fpga.c56
-rw-r--r--drivers/gpio/Kconfig38
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-adnp.c12
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c130
-rw-r--r--drivers/gpio/gpio-arizona.c9
-rw-r--r--drivers/gpio/gpio-axp209.c8
-rw-r--r--drivers/gpio/gpio-crystalcove.c6
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpio/gpio-dln2.c1
-rw-r--r--drivers/gpio/gpio-etraxfs.c7
-rw-r--r--drivers/gpio/gpio-htc-egpio.c54
-rw-r--r--drivers/gpio/gpio-intel-mid.c7
-rw-r--r--drivers/gpio/gpio-max732x.c17
-rw-r--r--drivers/gpio/gpio-max77620.c11
-rw-r--r--drivers/gpio/gpio-mb86s7x.c6
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-merrifield.c33
-rw-r--r--drivers/gpio/gpio-mxs.c45
-rw-r--r--drivers/gpio/gpio-pca953x.c18
-rw-r--r--drivers/gpio/gpio-pcf857x.c11
-rw-r--r--drivers/gpio/gpio-pl061.c208
-rw-r--r--drivers/gpio/gpio-stmpe.c19
-rw-r--r--drivers/gpio/gpio-sx150x.c792
-rw-r--r--drivers/gpio/gpio-tc3589x.c17
-rw-r--r--drivers/gpio/gpio-tps65218.c3
-rw-r--r--drivers/gpio/gpio-vf610.c6
-rw-r--r--drivers/gpio/gpio-wcove.c6
-rw-r--r--drivers/gpio/gpiolib-acpi.c107
-rw-r--r--drivers/gpio/gpiolib-devprop.c67
-rw-r--r--drivers/gpio/gpiolib-of.c51
-rw-r--r--drivers/gpio/gpiolib.c107
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Kconfig21
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h896
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c307
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c575
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h450
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c280
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c450
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c834
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c798
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c326
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c435
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c1771
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c852
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c356
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c604
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c1200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h661
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h8127
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h0
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h4457
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h9836
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h1784
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h12821
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h1274
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h11895
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h275
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h1079
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h148
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h715
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h96
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h795
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h64
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h99
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c93
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c73
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h22
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c12
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c67
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h26
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c48
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c7
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c24
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c43
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c12
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h9
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c99
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c121
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h10
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c238
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c15
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c65
-rw-r--r--drivers/gpu/drm/armada/armada_trace.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h66
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c7
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c7
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c5
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c41
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig23
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h16
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c213
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c2
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c33
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-audio.h7
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c141
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c301
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.h39
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c7
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c7
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1564
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.h1517
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c9
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c317
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c583
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c205
-rw-r--r--drivers/gpu/drm/drm_blend.c39
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c148
-rw-r--r--drivers/gpu/drm/drm_crtc.c776
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h36
-rw-r--r--drivers/gpu/drm/drm_debugfs.c52
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c352
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c121
-rw-r--r--drivers/gpu/drm/drm_drv.c130
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c128
-rw-r--r--drivers/gpu/drm/drm_edid.c207
-rw-r--r--drivers/gpu/drm/drm_encoder.c9
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c67
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c155
-rw-r--r--drivers/gpu/drm/drm_fops.c21
-rw-r--r--drivers/gpu/drm/drm_fourcc.c293
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c123
-rw-r--r--drivers/gpu/drm/drm_internal.h28
-rw-r--r--drivers/gpu/drm/drm_ioctl.c33
-rw-r--r--drivers/gpu/drm/drm_irq.c164
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_mm.c99
-rw-r--r--drivers/gpu/drm/drm_mode_config.c494
-rw-r--r--drivers/gpu/drm/drm_modes.c16
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c25
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c23
-rw-r--r--drivers/gpu/drm/drm_of.c28
-rw-r--r--drivers/gpu/drm/drm_plane.c16
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c11
-rw-r--r--drivers/gpu/drm/drm_prime.c12
-rw-r--r--drivers/gpu/drm/drm_print.c59
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/drm_property.c54
-rw-r--r--drivers/gpu/drm/drm_rect.c11
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/etnaviv/cmdstream.xml.h60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c19
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c50
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c141
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c28
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c14
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c37
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/gtt.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c14
-rw-r--r--drivers/gpu/drm/hisilicon/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/Makefile1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c477
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c456
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h114
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c267
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h196
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c140
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c558
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c9
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c863
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig65
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile9
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c352
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c284
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2831
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h49
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h29
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c330
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h163
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c531
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h150
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c858
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h188
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c312
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2244
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h306
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c205
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h384
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2848
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h26
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c741
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h233
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c1458
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c304
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h106
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h259
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c320
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h80
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c310
-rw-r--r--drivers/gpu/drm/i915/gvt/render.h43
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c292
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h58
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c583
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h139
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h286
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c (renamed from drivers/gpu/drm/i915/i915_gem_dmabuf.h)37
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c409
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c676
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c248
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1149
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3100
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c109
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c150
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c168
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c (renamed from drivers/gpu/drm/i915/i915_gem_fence.c)54
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1089
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h250
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c170
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h338
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c186
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c766
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h212
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c111
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c112
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c126
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c727
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c675
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c838
-rw-r--r--drivers/gpu/drm/i915/i915_params.c18
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h285
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c32
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c88
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h38
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c27
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c638
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h341
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c412
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c76
-rw-r--r--drivers/gpu/drm/i915/intel_color.c52
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c141
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c534
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c21
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2057
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c526
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c559
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c99
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h199
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c63
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c22
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c203
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c148
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c17
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c33
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h82
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c56
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c450
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c111
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c427
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c185
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c45
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c139
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c10
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1517
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c36
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c371
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h147
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c182
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c45
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c207
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c63
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c695
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c30
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c19
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c7
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c177
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c7
-rw-r--r--drivers/gpu/drm/meson/Kconfig9
-rw-r--r--drivers/gpu/drm/meson/Makefile4
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c68
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h42
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c208
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.h32
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c343
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h59
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c230
-rw-r--r--drivers/gpu/drm/meson/meson_plane.h30
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h1395
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c167
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h34
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c254
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h72
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c293
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.h41
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c331
-rw-r--r--drivers/gpu/drm/meson/meson_viu.h64
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c162
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.h35
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h27
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c112
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h111
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c119
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h3757
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c888
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c344
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h162
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h300
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h2
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h2
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c40
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c267
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c133
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h56
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c297
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c306
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h70
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c37
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c17
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c47
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h42
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c9
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c68
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h25
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c90
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c68
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h45
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h19
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig19
-rw-r--r--drivers/gpu/drm/mxsfb/Makefile2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c241
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c444
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h54
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c131
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_regs.h114
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c81
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c650
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c349
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c97
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c82
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c139
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h57
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4190
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c)12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c)22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c227
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c141
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c47
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c50
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c31
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c33
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c30
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c25
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c59
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c52
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c53
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c57
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c228
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c40
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c160
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c85
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h98
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c97
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c87
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c70
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c56
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c207
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c69
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h12
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c37
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c85
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c22
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c19
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c10
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c45
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c4
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.c5
-rw-r--r--drivers/gpu/drm/tegra/fb.c6
-rw-r--r--drivers/gpu/drm/tegra/gem.c45
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c598
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c214
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h11
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c260
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.h5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c7
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c68
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c5
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h3
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c82
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c657
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c58
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/zte/Kconfig8
-rw-r--r--drivers/gpu/drm/zte/Makefile7
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c267
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h36
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c624
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h56
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c299
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h26
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h91
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c661
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h46
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h157
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/job.c9
-rw-r--r--drivers/gpu/host1x/syncpt.c23
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c43
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c16
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c80
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-asus.c299
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/hid-cp2112.c205
-rw-r--r--drivers/hid/hid-ids.h19
-rw-r--r--drivers/hid/hid-input.c96
-rw-r--r--drivers/hid/hid-mf.c166
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c88
-rw-r--r--drivers/hid/hid-sensor-hub.c6
-rw-r--r--drivers/hid/hid-sony.c445
-rw-r--r--drivers/hid/hid-udraw-ps3.c474
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c146
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c67
-rw-r--r--drivers/hid/intel-ish-hid/ipc/utils.h64
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c5
-rw-r--r--drivers/hid/wacom.h2
-rw-r--r--drivers/hid/wacom_sys.c92
-rw-r--r--drivers/hid/wacom_wac.c601
-rw-r--r--drivers/hid/wacom_wac.h76
-rw-r--r--drivers/hsi/clients/ssi_protocol.c14
-rw-r--r--drivers/hv/channel.c93
-rw-r--r--drivers/hv/channel_mgmt.c10
-rw-r--r--drivers/hv/connection.c1
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_balloon.c44
-rw-r--r--drivers/hv/hv_snapshot.c33
-rw-r--r--drivers/hv/hv_util.c9
-rw-r--r--drivers/hv/hyperv_vmbus.h12
-rw-r--r--drivers/hv/ring_buffer.c44
-rw-r--r--drivers/hv/vmbus_drv.c174
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c26
-rw-r--r--drivers/hwmon/adm9240.c9
-rw-r--r--drivers/hwmon/adt7411.c301
-rw-r--r--drivers/hwmon/adt7462.c12
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/amc6821.c4
-rw-r--r--drivers/hwmon/coretemp.c321
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc2103.c4
-rw-r--r--drivers/hwmon/emc6w201.c8
-rw-r--r--drivers/hwmon/g762.c11
-rw-r--r--drivers/hwmon/hwmon.c79
-rw-r--r--drivers/hwmon/lm85.c3
-rw-r--r--drivers/hwmon/lm87.c136
-rw-r--r--drivers/hwmon/mcp3021.c50
-rw-r--r--drivers/hwmon/nct7802.c8
-rw-r--r--drivers/hwmon/pmbus/adm1275.c20
-rw-r--r--drivers/hwmon/scpi-hwmon.c1
-rw-r--r--drivers/hwmon/smsc47m192.c5
-rw-r--r--drivers/hwmon/tc654.c514
-rw-r--r--drivers/hwmon/tmp108.c469
-rw-r--r--drivers/hwmon/via-cputemp.c77
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c48
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight.c74
-rw-r--r--drivers/hwtracing/intel_th/core.c28
-rw-r--r--drivers/hwtracing/intel_th/gth.c26
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h4
-rw-r--r--drivers/hwtracing/intel_th/sth.c11
-rw-r--r--drivers/hwtracing/stm/Kconfig11
-rw-r--r--drivers/hwtracing/stm/Makefile2
-rw-r--r--drivers/hwtracing/stm/core.c15
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c2
-rw-r--r--drivers/hwtracing/stm/ftrace.c87
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig25
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c218
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c46
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h8
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c54
-rw-r--r--drivers/i2c/busses/i2c-dln2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c123
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c652
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c504
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c50
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h21
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c32
-rw-r--r--drivers/i2c/busses/i2c-pxa.c26
-rw-r--r--drivers/i2c/busses/i2c-qup.c122
-rw-r--r--drivers/i2c/busses/i2c-rcar.c5
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c6
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c6
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c1
-rw-r--r--drivers/i2c/i2c-core.c200
-rw-r--r--drivers/i2c/i2c-smbus.c102
-rw-r--r--drivers/i2c/muxes/Kconfig11
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c18
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c222
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c33
-rw-r--r--drivers/ide/ide-atapi.c6
-rw-r--r--drivers/ide/ide-cd.c46
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-cd_ioctl.c6
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/idle/Kconfig17
-rw-r--r--drivers/idle/Makefile1
-rw-r--r--drivers/idle/i7300_idle.c612
-rw-r--r--drivers/idle/intel_idle.c154
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/Kconfig45
-rw-r--r--drivers/iio/accel/Makefile5
-rw-r--r--drivers/iio/accel/da280.c183
-rw-r--r--drivers/iio/accel/da311.c305
-rw-r--r--drivers/iio/accel/dmard10.c266
-rw-r--r--drivers/iio/accel/mma7660.c2
-rw-r--r--drivers/iio/accel/mma8452.c79
-rw-r--r--drivers/iio/accel/sca3000.c1576
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c605
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/adc/Kconfig46
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7766.c330
-rw-r--r--drivers/iio/adc/at91_adc.c28
-rw-r--r--drivers/iio/adc/envelope-detector.c422
-rw-r--r--drivers/iio/adc/max1027.c17
-rw-r--r--drivers/iio/adc/stm32-adc-core.c303
-rw-r--r--drivers/iio/adc/stm32-adc-core.h52
-rw-r--r--drivers/iio/adc/stm32-adc.c518
-rw-r--r--drivers/iio/adc/ti-adc0832.c106
-rw-r--r--drivers/iio/adc/ti-adc161s626.c55
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c148
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig22
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile6
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c322
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c450
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h175
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c5
-rw-r--r--drivers/iio/counter/104-quad-8.c593
-rw-r--r--drivers/iio/counter/Kconfig24
-rw-r--r--drivers/iio/counter/Makefile7
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5592r.c2
-rw-r--r--drivers/iio/dac/dpot-dac.c266
-rw-r--r--drivers/iio/dac/mcp4725.c176
-rw-r--r--drivers/iio/gyro/Kconfig18
-rw-r--r--drivers/iio/gyro/Makefile5
-rw-r--r--drivers/iio/gyro/mpu3050-core.c1306
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c124
-rw-r--r--drivers/iio/gyro/mpu3050.h96
-rw-r--r--drivers/iio/gyro/st_gyro_core.c205
-rw-r--r--drivers/iio/humidity/Kconfig24
-rw-r--r--drivers/iio/humidity/Makefile7
-rw-r--r--drivers/iio/humidity/hdc100x.c130
-rw-r--r--drivers/iio/humidity/hts221.h73
-rw-r--r--drivers/iio/humidity/hts221_buffer.c168
-rw-r--r--drivers/iio/humidity/hts221_core.c687
-rw-r--r--drivers/iio/humidity/hts221_i2c.c110
-rw-r--r--drivers/iio/humidity/hts221_spi.c125
-rw-r--r--drivers/iio/humidity/si7020.c11
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/industrialio-buffer.c7
-rw-r--r--drivers/iio/industrialio-core.c261
-rw-r--r--drivers/iio/industrialio-trigger.c21
-rw-r--r--drivers/iio/inkern.c123
-rw-r--r--drivers/iio/light/Kconfig19
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/isl29018.c (renamed from drivers/staging/iio/light/isl29018.c)159
-rw-r--r--drivers/iio/light/ltr501.c111
-rw-r--r--drivers/iio/light/max44000.c5
-rw-r--r--drivers/iio/light/tsl2583.c913
-rw-r--r--drivers/iio/magnetometer/ak8974.c8
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c147
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c376
-rw-r--r--drivers/iio/potentiometer/mcp4531.c104
-rw-r--r--drivers/iio/potentiostat/Kconfig22
-rw-r--r--drivers/iio/potentiostat/Makefile6
-rw-r--r--drivers/iio/potentiostat/lmp91000.c446
-rw-r--r--drivers/iio/pressure/Kconfig10
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/abp060mg.c276
-rw-r--r--drivers/iio/pressure/mpl3115.c26
-rw-r--r--drivers/iio/pressure/ms5611_core.c19
-rw-r--r--drivers/iio/pressure/st_pressure_core.c257
-rw-r--r--drivers/iio/pressure/zpa2326.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/agent.c1
-rw-r--r--drivers/infiniband/core/cache.c16
-rw-r--r--drivers/infiniband/core/cm.c72
-rw-r--r--drivers/infiniband/core/cma.c45
-rw-r--r--drivers/infiniband/core/core_priv.h12
-rw-r--r--drivers/infiniband/core/device.c5
-rw-r--r--drivers/infiniband/core/fmr_pool.c1
-rw-r--r--drivers/infiniband/core/iwcm.c21
-rw-r--r--drivers/infiniband/core/iwpm_msg.c1
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/mad.c46
-rw-r--r--drivers/infiniband/core/multicast.c7
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c57
-rw-r--r--drivers/infiniband/core/ucm.c5
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c229
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/core/verbs.c108
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_dbg.c20
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c3
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c9
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h3
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c110
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c3
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c211
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c156
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h144
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h8
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c31
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c40
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h38
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c22
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c193
-rw-r--r--drivers/infiniband/hw/hfi1/platform.h127
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c11
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c60
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c44
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c18
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h12
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c60
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c209
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h16
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c13
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c46
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h66
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c1222
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h74
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c329
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_user.h53
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h37
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c377
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c671
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h25
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c61
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c166
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h21
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c273
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h20
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h102
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ucontext.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c57
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c289
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c259
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c33
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c10
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c58
-rw-r--r--drivers/infiniband/hw/mlx4/main.c49
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c13
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c25
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c34
-rw-r--r--drivers/infiniband/hw/mlx5/main.c294
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c7
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c71
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c131
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.h4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c94
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c22
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c44
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c33
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c22
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.c22
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Kconfig7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Makefile3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h474
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c119
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c425
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h586
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c127
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c1211
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c304
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c334
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c972
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h131
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c579
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h436
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c63
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c22
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c20
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h141
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h112
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h96
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rvt.h81
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h132
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c28
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c43
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c5
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c31
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c22
-rw-r--r--drivers/input/joystick/xpad.c7
-rw-r--r--drivers/input/keyboard/gpio_keys.c190
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c128
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c21
-rw-r--r--drivers/input/misc/Kconfig7
-rw-r--r--drivers/input/misc/arizona-haptics.c13
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/da9063_onkey.c9
-rw-r--r--drivers/input/misc/drv260x.c119
-rw-r--r--drivers/input/misc/drv2665.c6
-rw-r--r--drivers/input/misc/drv2667.c4
-rw-r--r--drivers/input/misc/soc_button_array.c8
-rw-r--r--drivers/input/misc/tps65218-pwrbutton.c8
-rw-r--r--drivers/input/misc/xen-kbdfront.c13
-rw-r--r--drivers/input/mouse/alps.c56
-rw-r--r--drivers/input/mouse/alps.h24
-rw-r--r--drivers/input/mouse/elan_i2c_core.c17
-rw-r--r--drivers/input/rmi4/Kconfig42
-rw-r--r--drivers/input/rmi4/Makefile4
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c10
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h2
-rw-r--r--drivers/input/rmi4/rmi_bus.c12
-rw-r--r--drivers/input/rmi4/rmi_bus.h12
-rw-r--r--drivers/input/rmi4/rmi_driver.c420
-rw-r--r--drivers/input/rmi4/rmi_driver.h31
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/rmi4/rmi_f03.c250
-rw-r--r--drivers/input/rmi4/rmi_f11.c98
-rw-r--r--drivers/input/rmi4/rmi_f12.c146
-rw-r--r--drivers/input/rmi4/rmi_f30.c22
-rw-r--r--drivers/input/rmi4/rmi_f34.c516
-rw-r--r--drivers/input/rmi4/rmi_f34.h314
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c1372
-rw-r--r--drivers/input/rmi4/rmi_f54.c19
-rw-r--r--drivers/input/rmi4/rmi_f55.c131
-rw-r--r--drivers/input/rmi4/rmi_i2c.c74
-rw-r--r--drivers/input/rmi4/rmi_smbus.c447
-rw-r--r--drivers/input/rmi4/rmi_spi.c72
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h88
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c83
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c51
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/silead.c29
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c14
-rw-r--r--drivers/iommu/amd_iommu_v2.c4
-rw-r--r--drivers/iommu/arm-smmu-v3.c104
-rw-r--r--drivers/iommu/arm-smmu.c177
-rw-r--r--drivers/iommu/dma-iommu.c24
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/iommu/exynos-iommu.c293
-rw-r--r--drivers/iommu/intel-iommu.c24
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c5
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/iommu.c53
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/iommu/mtk_iommu.c85
-rw-r--r--drivers/iommu/mtk_iommu.h11
-rw-r--r--drivers/iommu/mtk_iommu_v1.c105
-rw-r--r--drivers/iommu/of_iommu.c39
-rw-r--r--drivers/iommu/s390-iommu.c1
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c26
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c75
-rw-r--r--drivers/irqchip/irq-gic-v3.c13
-rw-r--r--drivers/irqchip/irq-st.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c241
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c32
-rw-r--r--drivers/isdn/hisax/config.c16
-rw-r--r--drivers/isdn/hisax/hisax.h4
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c1
-rw-r--r--drivers/isdn/i4l/isdn_concap.c6
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c16
-rw-r--r--drivers/leds/Kconfig21
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c4
-rw-r--r--drivers/leds/led-core.c62
-rw-r--r--drivers/leds/leds-cobalt-raq.c6
-rw-r--r--drivers/leds/leds-lp3952.c1
-rw-r--r--drivers/leds/leds-mc13783.c5
-rw-r--r--drivers/leds/leds-mlxcpld.c5
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-nic78bx.c209
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pca955x.c24
-rw-r--r--drivers/leds/leds-pca963x.c80
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c2
-rw-r--r--drivers/leds/uleds.c235
-rw-r--r--drivers/lguest/hypercalls.c4
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c19
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c387
-rw-r--r--drivers/lightnvm/gennvm.c645
-rw-r--r--drivers/lightnvm/gennvm.h34
-rw-r--r--drivers/lightnvm/lightnvm.h35
-rw-r--r--drivers/lightnvm/rrpc.c514
-rw-r--r--drivers/lightnvm/rrpc.h65
-rw-r--r--drivers/lightnvm/sysblk.c98
-rw-r--r--drivers/lightnvm/sysfs.c198
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c548
-rw-r--r--drivers/mailbox/mailbox-sti.c1
-rw-r--r--drivers/mailbox/mailbox-test.c92
-rw-r--r--drivers/mailbox/pcc.c5
-rw-r--r--drivers/mailbox/tegra-hsp.c479
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/btree.c43
-rw-r--r--drivers/md/bcache/btree.h3
-rw-r--r--drivers/md/bcache/debug.c15
-rw-r--r--drivers/md/bcache/io.c4
-rw-r--r--drivers/md/bcache/journal.c4
-rw-r--r--drivers/md/bcache/movinggc.c6
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c23
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/bcache/writeback.h3
-rw-r--r--drivers/md/bitmap.c166
-rw-r--r--drivers/md/dm-bufio.c34
-rw-r--r--drivers/md/dm-cache-block-types.h6
-rw-r--r--drivers/md/dm-cache-metadata.c3
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c216
-rw-r--r--drivers/md/dm-flakey.c53
-rw-r--r--drivers/md/dm-io.c34
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c42
-rw-r--r--drivers/md/dm-raid.c86
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-rq.c70
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-table.c43
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/linear.c31
-rw-r--r--drivers/md/md.c703
-rw-r--r--drivers/md/md.h108
-rw-r--r--drivers/md/multipath.c94
-rw-r--r--drivers/md/persistent-data/dm-array.c2
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c19
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c14
-rw-r--r--drivers/md/raid0.c107
-rw-r--r--drivers/md/raid1.c247
-rw-r--r--drivers/md/raid1.h19
-rw-r--r--drivers/md/raid10.c295
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c1837
-rw-r--r--drivers/md/raid5.c634
-rw-r--r--drivers/md/raid5.h172
-rw-r--r--drivers/media/Kconfig18
-rw-r--r--drivers/media/Makefile4
-rw-r--r--drivers/media/cec/Makefile (renamed from drivers/staging/media/cec/Makefile)2
-rw-r--r--drivers/media/cec/cec-adap.c (renamed from drivers/staging/media/cec/cec-adap.c)244
-rw-r--r--drivers/media/cec/cec-api.c (renamed from drivers/staging/media/cec/cec-api.c)13
-rw-r--r--drivers/media/cec/cec-core.c (renamed from drivers/staging/media/cec/cec-core.c)18
-rw-r--r--drivers/media/cec/cec-priv.h (renamed from drivers/staging/media/cec/cec-priv.h)0
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h1
-rw-r--r--drivers/media/common/b2c2/flexcop-eeprom.c3
-rw-r--r--drivers/media/common/b2c2/flexcop-i2c.c14
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c9
-rw-r--r--drivers/media/common/b2c2/flexcop.c3
-rw-r--r--drivers/media/common/cx2341x.c12
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c4
-rw-r--r--drivers/media/common/siano/smsdvb-main.c2
-rw-r--r--drivers/media/common/tveeprom.c77
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c412
-rw-r--r--drivers/media/dvb-core/Kconfig17
-rw-r--r--drivers/media/dvb-core/Makefile2
-rw-r--r--drivers/media/dvb-core/demux.h5
-rw-r--r--drivers/media/dvb-core/dmxdev.c28
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h2
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c60
-rw-r--r--drivers/media/dvb-core/dvb_demux.c115
-rw-r--r--drivers/media/dvb-core/dvb_demux.h2
-rw-r--r--drivers/media/dvb-core/dvb_filter.c603
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c107
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h10
-rw-r--r--drivers/media/dvb-core/dvb_net.c954
-rw-r--r--drivers/media/dvb-core/dvbdev.c44
-rw-r--r--drivers/media/dvb-core/dvbdev.h25
-rw-r--r--drivers/media/dvb-frontends/Kconfig9
-rw-r--r--drivers/media/dvb-frontends/af9013.c4
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c2
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c3
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_common.c4
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c4
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c4
-rw-r--r--drivers/media/dvb-frontends/cx22700.c4
-rw-r--r--drivers/media/dvb-frontends/cx24110.c8
-rw-r--r--drivers/media/dvb-frontends/cx24113.c7
-rw-r--r--drivers/media/dvb-frontends/cx24116.c12
-rw-r--r--drivers/media/dvb-frontends/cx24117.c8
-rw-r--r--drivers/media/dvb-frontends/cx24120.c7
-rw-r--r--drivers/media/dvb-frontends/cx24123.c8
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c6
-rw-r--r--drivers/media/dvb-frontends/dib0070.c53
-rw-r--r--drivers/media/dvb-frontends/dib0090.c165
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c141
-rw-r--r--drivers/media/dvb-frontends/dib3000mb_priv.h16
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c12
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c77
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c131
-rw-r--r--drivers/media/dvb-frontends/dib8000.c261
-rw-r--r--drivers/media/dvb-frontends/dib9000.c175
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.c46
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.h2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c4
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/ds3000.c19
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c22
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c12
-rw-r--r--drivers/media/dvb-frontends/ec100.c4
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c4
-rw-r--r--drivers/media/dvb-frontends/hd29l2.c4
-rw-r--r--drivers/media/dvb-frontends/helene.c3
-rw-r--r--drivers/media/dvb-frontends/horus3a.c3
-rw-r--r--drivers/media/dvb-frontends/itd1000.c3
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c3
-rw-r--r--drivers/media/dvb-frontends/l64781.c4
-rw-r--r--drivers/media/dvb-frontends/lg2160.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c8
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c11
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.c4
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c4
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c13
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c5
-rw-r--r--drivers/media/dvb-frontends/mn88472.c26
-rw-r--r--drivers/media/dvb-frontends/mn88473.c225
-rw-r--r--drivers/media/dvb-frontends/mn88473_priv.h2
-rw-r--r--drivers/media/dvb-frontends/mt312.c9
-rw-r--r--drivers/media/dvb-frontends/mt352.c4
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c15
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c140
-rw-r--r--drivers/media/dvb-frontends/or51132.c10
-rw-r--r--drivers/media/dvb-frontends/or51211.c7
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c8
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c8
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c4
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c8
-rw-r--r--drivers/media/dvb-frontends/s921.c8
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c10
-rw-r--r--drivers/media/dvb-frontends/sp8870.c4
-rw-r--r--drivers/media/dvb-frontends/sp887x.c7
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c34
-rw-r--r--drivers/media/dvb-frontends/stb6000.c3
-rw-r--r--drivers/media/dvb-frontends/stb6100.c6
-rw-r--r--drivers/media/dvb-frontends/stv0288.c13
-rw-r--r--drivers/media/dvb-frontends/stv0297.c8
-rw-r--r--drivers/media/dvb-frontends/stv0299.c11
-rw-r--r--drivers/media/dvb-frontends/stv0367.c4
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/stv0900_sw.c3
-rw-r--r--drivers/media/dvb-frontends/stv090x.c28
-rw-r--r--drivers/media/dvb-frontends/stv6110.c3
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c4
-rw-r--r--drivers/media/dvb-frontends/tc90522.c7
-rw-r--r--drivers/media/dvb-frontends/tda10021.c7
-rw-r--r--drivers/media/dvb-frontends/tda10023.c10
-rw-r--r--drivers/media/dvb-frontends/tda10048.c16
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb-frontends/tda10071.c4
-rw-r--r--drivers/media/dvb-frontends/tda10086.c2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c3
-rw-r--r--drivers/media/dvb-frontends/tda665x.c3
-rw-r--r--drivers/media/dvb-frontends/tda8083.c4
-rw-r--r--drivers/media/dvb-frontends/tda8261.c3
-rw-r--r--drivers/media/dvb-frontends/tda826x.c3
-rw-r--r--drivers/media/dvb-frontends/ts2020.c3
-rw-r--r--drivers/media/dvb-frontends/tua6100.c3
-rw-r--r--drivers/media/dvb-frontends/ves1820.c12
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c4
-rw-r--r--drivers/media/dvb-frontends/zl10036.c8
-rw-r--r--drivers/media/dvb-frontends/zl10039.c6
-rw-r--r--drivers/media/dvb-frontends/zl10353.c4
-rw-r--r--drivers/media/firewire/firedtv-avc.c4
-rw-r--r--drivers/media/firewire/firedtv-rc.c5
-rw-r--r--drivers/media/i2c/Kconfig6
-rw-r--r--drivers/media/i2c/ad5820.c5
-rw-r--r--drivers/media/i2c/adv7511.c5
-rw-r--r--drivers/media/i2c/adv7604.c30
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c11
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c7
-rw-r--r--drivers/media/i2c/msp3400-driver.c90
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c115
-rw-r--r--drivers/media/i2c/smiapp-pll.c3
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c916
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c4
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h28
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c3
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c3
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c3
-rw-r--r--drivers/media/i2c/tvaudio.c5
-rw-r--r--drivers/media/i2c/tvp514x.c6
-rw-r--r--drivers/media/i2c/tvp5150.c298
-rw-r--r--drivers/media/media-device.c32
-rw-r--r--drivers/media/media-entity.c6
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c6
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c7
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.c46
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c9
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/pci/bt8xx/dst.c278
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c25
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c23
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c8
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c17
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c3
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c9
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c35
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.c6
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c6
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c6
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c4
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c39
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c8
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c7
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c15
-rw-r--r--drivers/media/pci/cx23885/altera-ci.h14
-rw-r--r--drivers/media/pci/cx23885/cimax2.c15
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c65
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c30
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c53
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c146
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c41
-rw-r--r--drivers/media/pci/cx23885/cx23885-f300.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c27
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-ir.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c26
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c13
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.c4
-rw-r--r--drivers/media/pci/cx23885/netup-init.c8
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c304
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c292
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c485
-rw-r--r--drivers/media/pci/cx88/cx88-core.c420
-rw-r--r--drivers/media/pci/cx88/cx88-dsp.c136
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c331
-rw-r--r--drivers/media/pci/cx88/cx88-i2c.c136
-rw-r--r--drivers/media/pci/cx88/cx88-input.c60
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c315
-rw-r--r--drivers/media/pci/cx88/cx88-reg.h123
-rw-r--r--drivers/media/pci/cx88/cx88-tvaudio.c169
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c47
-rw-r--r--drivers/media/pci/cx88/cx88-video.c403
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.c60
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.h38
-rw-r--r--drivers/media/pci/cx88/cx88.h203
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c6
-rw-r--r--drivers/media/pci/dm1105/dm1105.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c12
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c37
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c3
-rw-r--r--drivers/media/pci/meye/meye.c17
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c13
-rw-r--r--drivers/media/pci/pluto2/pluto2.c4
-rw-r--r--drivers/media/pci/pt1/pt1.c7
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007s.c2
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007t.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c8
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c39
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c32
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c31
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c13
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c3
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c12
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c66
-rw-r--r--drivers/media/pci/saa7164/saa7164-dvb.c34
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c18
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c10
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c14
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h3
-rw-r--r--drivers/media/pci/ttpci/Makefile2
-rw-r--r--drivers/media/pci/ttpci/av7110.c49
-rw-r--r--drivers/media/pci/ttpci/av7110.h7
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c12
-rw-r--r--drivers/media/pci/ttpci/budget-av.c3
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c4
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c3
-rw-r--r--drivers/media/pci/ttpci/budget.c3
-rw-r--r--drivers/media/pci/ttpci/budget.h8
-rw-r--r--drivers/media/pci/ttpci/dvb_filter.c114
-rw-r--r--drivers/media/pci/ttpci/dvb_filter.h (renamed from drivers/media/dvb-core/dvb_filter.h)0
-rw-r--r--drivers/media/pci/ttpci/ttpci-eeprom.c3
-rw-r--r--drivers/media/pci/tw5864/tw5864-reg.h8
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c13
-rw-r--r--drivers/media/pci/tw68/tw68-video.c16
-rw-r--r--drivers/media/pci/zoran/zoran_device.c35
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/Kconfig49
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c9
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c6
-rw-r--r--drivers/media/platform/blackfin/ppi.c2
-rw-r--r--drivers/media/platform/coda/coda-common.c7
-rw-r--r--drivers/media/platform/coda/coda-h264.c1
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c4
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c4
-rw-r--r--drivers/media/platform/davinci/vpbe.c82
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c91
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c37
-rw-r--r--drivers/media/platform/davinci/vpif_display.c39
-rw-r--r--drivers/media/platform/davinci/vpss.c7
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c279
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h11
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c38
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c14
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c3
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c26
-rw-r--r--drivers/media/platform/mtk-mdp/Makefile9
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c159
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.h72
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c290
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.h260
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h126
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c1286
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h22
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_regs.c156
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_regs.h31
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c145
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h41
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile15
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c1451
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h88
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c394
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c202
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h28
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h62
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c33
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h5
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c507
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c634
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c967
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_base.h56
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c122
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h101
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h103
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c170
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h96
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c21
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.h48
-rw-r--r--drivers/media/platform/mx2_emmaprp.c10
-rw-r--r--drivers/media/platform/omap/omap_vout.c24
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c5
-rw-r--r--drivers/media/platform/omap3isp/isp.c23
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c9
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c13
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c4
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c8
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c8
-rw-r--r--drivers/media/platform/omap3isp/isphist.c28
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c58
-rw-r--r--drivers/media/platform/pxa_camera.c18
-rw-r--r--drivers/media/platform/rcar-fcp.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c2445
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c17
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h3
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v8.h2
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h3
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c73
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_debug.h6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c15
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c132
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c24
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c8
-rw-r--r--drivers/media/platform/ti-vpe/Makefile10
-rw-r--r--drivers/media/platform/ti-vpe/cal.c14
-rw-r--r--drivers/media/platform/ti-vpe/csc.c18
-rw-r--r--drivers/media/platform/ti-vpe/csc.h2
-rw-r--r--drivers/media/platform/ti-vpe/sc.c28
-rw-r--r--drivers/media/platform/ti-vpe/sc.h11
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c355
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h85
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h130
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c471
-rw-r--r--drivers/media/platform/via-camera.c7
-rw-r--r--drivers/media/platform/vivid/Kconfig2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c3
-rw-r--r--drivers/media/platform/vivid/vivid-cec.h1
-rw-r--r--drivers/media/platform/vivid/vivid-core.c13
-rw-r--r--drivers/media/platform/vivid/vivid-core.h3
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c25
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c17
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c70
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c5
-rw-r--r--drivers/media/radio/radio-gemtek.c8
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c7
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c15
-rw-r--r--drivers/media/radio/si4713/si4713.c13
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c46
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c8
-rw-r--r--drivers/media/rc/Kconfig17
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c3
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/fintek-cir.c6
-rw-r--r--drivers/media/rc/imon.c61
-rw-r--r--drivers/media/rc/ir-hix5hd2.c25
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c3
-rw-r--r--drivers/media/rc/ite-cir.c11
-rw-r--r--drivers/media/rc/lirc_dev.c29
-rw-r--r--drivers/media/rc/mceusb.c102
-rw-r--r--drivers/media/rc/meson-ir.c1
-rw-r--r--drivers/media/rc/nuvoton-cir.c143
-rw-r--r--drivers/media/rc/nuvoton-cir.h4
-rw-r--r--drivers/media/rc/rc-ir-raw.c17
-rw-r--r--drivers/media/rc/rc-main.c70
-rw-r--r--drivers/media/rc/redrat3.c327
-rw-r--r--drivers/media/rc/serial_ir.c844
-rw-r--r--drivers/media/rc/streamzap.c11
-rw-r--r--drivers/media/rc/winbond-cir.c13
-rw-r--r--drivers/media/spi/gs1662.c4
-rw-r--r--drivers/media/tuners/fc0011.c11
-rw-r--r--drivers/media/tuners/fc0012.c3
-rw-r--r--drivers/media/tuners/fc0013.c3
-rw-r--r--drivers/media/tuners/max2165.c4
-rw-r--r--drivers/media/tuners/mc44s803.c8
-rw-r--r--drivers/media/tuners/mt2060.c3
-rw-r--r--drivers/media/tuners/mt2063.c4
-rw-r--r--drivers/media/tuners/mt20xx.c25
-rw-r--r--drivers/media/tuners/mt2131.c3
-rw-r--r--drivers/media/tuners/mt2266.c3
-rw-r--r--drivers/media/tuners/mxl5005s.c3
-rw-r--r--drivers/media/tuners/mxl5007t.c4
-rw-r--r--drivers/media/tuners/qt1010.c3
-rw-r--r--drivers/media/tuners/r820t.c4
-rw-r--r--drivers/media/tuners/tda18218.c3
-rw-r--r--drivers/media/tuners/tda18271-common.c4
-rw-r--r--drivers/media/tuners/tda18271-fe.c7
-rw-r--r--drivers/media/tuners/tda18271-maps.c6
-rw-r--r--drivers/media/tuners/tda827x.c3
-rw-r--r--drivers/media/tuners/tda8290.c8
-rw-r--r--drivers/media/tuners/tda9887.c2
-rw-r--r--drivers/media/tuners/tea5761.c10
-rw-r--r--drivers/media/tuners/tea5767.c4
-rw-r--r--drivers/media/tuners/tuner-simple.c49
-rw-r--r--drivers/media/tuners/tuner-xc2028.c120
-rw-r--r--drivers/media/tuners/xc4000.c29
-rw-r--r--drivers/media/tuners/xc5000.c4
-rw-r--r--drivers/media/usb/Kconfig5
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-video.c3
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c11
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c14
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c10
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c1
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c6
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c26
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c3
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc-common.c1
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c6
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c12
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/friio.c4
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c3
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c10
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c3
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c3
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c4
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c95
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c69
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c204
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c206
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c112
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c291
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c65
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c9
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c163
-rw-r--r--drivers/media/usb/em28xx/em28xx.h19
-rw-r--r--drivers/media/usb/go7007/Kconfig2
-rw-r--r--drivers/media/usb/gspca/gspca.c3
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c5
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_core.c11
-rw-r--r--drivers/media/usb/gspca/mr97310a.c3
-rw-r--r--drivers/media/usb/gspca/ov519.c3
-rw-r--r--drivers/media/usb/gspca/pac207.c4
-rw-r--r--drivers/media/usb/gspca/pac7302.c3
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c6
-rw-r--r--drivers/media/usb/gspca/spca506.c3
-rw-r--r--drivers/media/usb/gspca/sq905.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c9
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c27
-rw-r--r--drivers/media/usb/gspca/sunplus.c3
-rw-r--r--drivers/media/usb/gspca/topro.c3
-rw-r--r--drivers/media/usb/gspca/zc3xx.c3
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c9
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c7
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c26
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig (renamed from drivers/staging/media/pulse8-cec/Kconfig)2
-rw-r--r--drivers/media/usb/pulse8-cec/Makefile (renamed from drivers/staging/media/pulse8-cec/Makefile)0
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c (renamed from drivers/staging/media/pulse8-cec/pulse8-cec.c)12
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-audio.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debugifc.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c29
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c181
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c47
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c35
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ioread.c36
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.c1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-wm8775.c3
-rw-r--r--drivers/media/usb/pwc/pwc-if.c4
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c6
-rw-r--r--drivers/media/usb/siano/smsusb.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-sensor.c10
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c14
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c14
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c16
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c3
-rw-r--r--drivers/media/usb/tm6000/tm6000-stds.c3
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c18
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c3
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c92
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c8
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c7
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c105
-rw-r--r--drivers/media/usb/usbtv/usbtv.h3
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c20
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c177
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c19
-rw-r--r--drivers/media/usb/uvc/uvc_video.c6
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/tuner-core.c121
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c30
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c59
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c103
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c3
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c25
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c3
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/memory/atmel-sdramc.c6
-rw-r--r--drivers/memory/da8xx-ddrctl.c173
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/message/fusion/mptbase.c28
-rw-r--r--drivers/message/fusion/mptlan.c15
-rw-r--r--drivers/message/fusion/mptscsih.c11
-rw-r--r--drivers/mfd/88pm860x-core.c5
-rw-r--r--drivers/mfd/Kconfig40
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/ab3100-core.c39
-rw-r--r--drivers/mfd/ab8500-core.c37
-rw-r--r--drivers/mfd/ab8500-debugfs.c21
-rw-r--r--drivers/mfd/ab8500-gpadc.c19
-rw-r--r--drivers/mfd/ab8500-sysctrl.c9
-rw-r--r--drivers/mfd/abx500-core.c7
-rw-r--r--drivers/mfd/arizona-core.c1
-rw-r--r--drivers/mfd/arizona-irq.c8
-rw-r--r--drivers/mfd/axp20x-i2c.c7
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/bcm590xx.c2
-rw-r--r--drivers/mfd/cs47l24-tables.c6
-rw-r--r--drivers/mfd/davinci_voicecodec.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c1
-rw-r--r--drivers/mfd/hi655x-pmic.c1
-rw-r--r--drivers/mfd/intel-lpss-pci.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c40
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/palmas.c3
-rw-r--r--drivers/mfd/qcom-pm8xxx.c (renamed from drivers/mfd/pm8921-core.c)271
-rw-r--r--drivers/mfd/rk808.c23
-rw-r--r--drivers/mfd/rn5t618.c1
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/sun4i-gpadc.c181
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c1
-rw-r--r--drivers/mfd/tps65217.c71
-rw-r--r--drivers/mfd/tps65218.c34
-rw-r--r--drivers/mfd/tps65912-core.c17
-rw-r--r--drivers/mfd/wm5102-tables.c1412
-rw-r--r--drivers/mfd/wm8994-core.c6
-rw-r--r--drivers/misc/cxl/api.c147
-rw-r--r--drivers/misc/cxl/context.c22
-rw-r--r--drivers/misc/cxl/cxl.h6
-rw-r--r--drivers/misc/cxl/debugfs.c6
-rw-r--r--drivers/misc/cxl/file.c5
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/cxl/irq.c2
-rw-r--r--drivers/misc/cxl/native.c20
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/phb.c2
-rw-r--r--drivers/misc/genwqe/card_base.h1
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/misc/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm_bugs.c71
-rw-r--r--drivers/misc/lkdtm_core.c2
-rw-r--r--drivers/misc/lkdtm_perms.c7
-rw-r--r--drivers/misc/mei/amthif.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c100
-rw-r--r--drivers/misc/mei/bus.c217
-rw-r--r--drivers/misc/mei/client.c23
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw-me.c90
-rw-r--r--drivers/misc/mei/hw-me.h2
-rw-r--r--drivers/misc/mei/hw-txe.c18
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/interrupt.c7
-rw-r--r--drivers/misc/mei/main.c45
-rw-r--r--drivers/misc/mei/mei_dev.h38
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c21
-rw-r--r--drivers/misc/sram.c42
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/Makefile1
-rw-r--r--drivers/mmc/card/Kconfig70
-rw-r--r--drivers/mmc/card/Makefile10
-rw-r--r--drivers/mmc/core/Kconfig66
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/block.c (renamed from drivers/mmc/card/block.c)501
-rw-r--r--drivers/mmc/core/block.h (renamed from drivers/mmc/card/block.h)0
-rw-r--r--drivers/mmc/core/core.c107
-rw-r--r--drivers/mmc/core/debugfs.c6
-rw-r--r--drivers/mmc/core/mmc.c113
-rw-r--r--drivers/mmc/core/mmc_ops.c220
-rw-r--r--drivers/mmc/core/mmc_ops.h7
-rw-r--r--drivers/mmc/core/mmc_test.c (renamed from drivers/mmc/card/mmc_test.c)60
-rw-r--r--drivers/mmc/core/queue.c (renamed from drivers/mmc/card/queue.c)318
-rw-r--r--drivers/mmc/core/queue.h (renamed from drivers/mmc/card/queue.h)29
-rw-r--r--drivers/mmc/core/sd.c26
-rw-r--r--drivers/mmc/core/sd_ops.c27
-rw-r--r--drivers/mmc/core/sdio.c17
-rw-r--r--drivers/mmc/core/sdio_cis.c3
-rw-r--r--drivers/mmc/core/sdio_irq.c12
-rw-r--r--drivers/mmc/core/sdio_uart.c (renamed from drivers/mmc/card/sdio_uart.c)4
-rw-r--r--drivers/mmc/core/slot-gpio.c8
-rw-r--r--drivers/mmc/host/Kconfig23
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/davinci_mmc.c130
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c52
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c39
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c29
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c28
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c42
-rw-r--r--drivers/mmc/host/dw_mmc.c181
-rw-r--r--drivers/mmc/host/dw_mmc.h7
-rw-r--r--drivers/mmc/host/jz4740_mmc.c3
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c851
-rw-r--r--drivers/mmc/host/mmci.c128
-rw-r--r--drivers/mmc/host/mmci.h71
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c3
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c3
-rw-r--r--drivers/mmc/host/s3cmci.c15
-rw-r--r--drivers/mmc/host/sdhci-acpi.c1
-rw-r--r--drivers/mmc/host/sdhci-cadence.c284
-rw-r--r--drivers/mmc/host/sdhci-iproc.c35
-rw-r--r--drivers/mmc/host/sdhci-msm.c694
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c20
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c98
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci.c395
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c274
-rw-r--r--drivers/mmc/host/sunxi-mmc.c15
-rw-r--r--drivers/mmc/host/tmio_mmc.h32
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c119
-rw-r--r--drivers/mmc/host/wbsd.c11
-rw-r--r--drivers/mtd/bcm47xxpart.c10
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c24
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/mtdcore.c44
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ams-delta.c5
-rw-r--r--drivers/mtd/nand/atmel_nand.c10
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c10
-rw-r--r--drivers/mtd/nand/cafe_nand.c5
-rw-r--r--drivers/mtd/nand/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/cs553x_nand.c5
-rw-r--r--drivers/mtd/nand/denali.c101
-rw-r--r--drivers/mtd/nand/denali.h12
-rw-r--r--drivers/mtd/nand/denali_dt.c3
-rw-r--r--drivers/mtd/nand/denali_pci.c1
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpio.c5
-rw-r--r--drivers/mtd/nand/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c10
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/mxc_nand.c10
-rw-r--r--drivers/mtd/nand/nand_base.c84
-rw-r--r--drivers/mtd/nand/nand_ids.c3
-rw-r--r--drivers/mtd/nand/nand_timings.c26
-rw-r--r--drivers/mtd/nand/nandsim.c19
-rw-r--r--drivers/mtd/nand/omap2.c10
-rw-r--r--drivers/mtd/nand/orion_nand.c5
-rw-r--r--drivers/mtd/nand/oxnas_nand.c195
-rw-r--r--drivers/mtd/nand/pasemi_nand.c5
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c22
-rw-r--r--drivers/mtd/nand/s3c2410.c286
-rw-r--r--drivers/mtd/nand/socrates_nand.c12
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/mtd/nand/tango_nand.c676
-rw-r--r--drivers/mtd/nand/tmio_nand.c6
-rw-r--r--drivers/mtd/nand/vf610_nfc.c10
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c6
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c14
-rw-r--r--drivers/net/appletalk/ipddp.c1
-rw-r--r--drivers/net/bonding/bond_alb.c82
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c4
-rw-r--r--drivers/net/cris/eth_v10.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig1
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2023
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c367
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c186
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h17
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h244
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c729
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h71
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/3com/3c509.c56
-rw-r--r--drivers/net/ethernet/3com/3c515.c16
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c16
-rw-r--r--drivers/net/ethernet/3com/typhoon.c66
-rw-r--r--drivers/net/ethernet/8390/8390.c1
-rw-r--r--drivers/net/ethernet/8390/8390p.c1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c1
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/etherh.c1
-rw-r--r--drivers/net/ethernet/8390/hydra.c1
-rw-r--r--drivers/net/ethernet/8390/mac8390.c1
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c1
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c1
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c1
-rw-r--r--drivers/net/ethernet/8390/wd.c1
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c15
-rw-r--r--drivers/net/ethernet/adi/Kconfig2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c1
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c9
-rw-r--r--drivers/net/ethernet/agere/et131x.c7
-rw-r--r--drivers/net/ethernet/alacritech/Kconfig28
-rw-r--r--drivers/net/ethernet/alacritech/Makefile4
-rw-r--r--drivers/net/ethernet/alacritech/slic.h575
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c1870
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c26
-rw-r--r--drivers/net/ethernet/alteon/acenic.c70
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h12
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c107
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c18
-rw-r--r--drivers/net/ethernet/amd/Kconfig12
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c19
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c9
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c1
-rw-r--r--drivers/net/ethernet/amd/hplance.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/mvme147.c1
-rw-r--r--drivers/net/ethernet/amd/ni65.c1
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c1
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h385
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c154
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c1054
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c349
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c87
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c492
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c705
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1130
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c529
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c845
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c3084
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c642
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h386
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c704
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c70
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c140
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h27
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c350
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h31
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c146
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c140
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h9
-rw-r--r--drivers/net/ethernet/apple/bmac.c1
-rw-r--r--drivers/net/ethernet/apple/mace.c1
-rw-r--r--drivers/net/ethernet/apple/macmace.c1
-rw-r--r--drivers/net/ethernet/arc/Kconfig5
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h36
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c59
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c564
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c54
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c41
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c62
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c81
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c82
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h3
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c17
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig14
-rw-r--r--drivers/net/ethernet/broadcom/b44.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c42
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c74
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c32
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c107
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c474
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c502
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c190
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1622
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c346
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h93
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c9
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c54
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/Makefile1
-rw-r--r--drivers/net/ethernet/cadence/macb.c360
-rw-r--r--drivers/net/ethernet/cadence/macb.h17
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c153
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c37
-rw-r--r--drivers/net/ethernet/cavium/Kconfig14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c322
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h44
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c722
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h50
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h274
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c49
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h41
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h49
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c38
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h37
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h37
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c74
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c577
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c450
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3251
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h101
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_image.h36
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h46
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c156
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c150
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h147
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c101
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c318
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h115
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h6
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c19
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h19
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c117
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c74
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c33
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h27
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c74
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c69
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c111
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c121
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c15
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c22
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/ec_bhf.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c22
-rw-r--r--drivers/net/ethernet/ethoc.c45
-rw-r--r--drivers/net/ethernet/fealnx.c1
-rw-r--r--drivers/net/ethernet/freescale/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Makefile12
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2756
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h185
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c165
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h141
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c417
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c38
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c15
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c51
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c25
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c9
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c353
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c73
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c411
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c117
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c3
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c1
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e100.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c22
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h85
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c85
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c316
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c113
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c168
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1484
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c158
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c219
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h106
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c141
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c156
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h46
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h87
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c260
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c92
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c16
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h3
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c179
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c240
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c751
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c42
-rw-r--r--drivers/net/ethernet/jme.c12
-rw-r--r--drivers/net/ethernet/korina.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c26
-rw-r--r--drivers/net/ethernet/marvell/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c179
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c388
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c96
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.h4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c431
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c314
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c352
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c583
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c251
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c405
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c336
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c442
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c199
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h208
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h229
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h888
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h127
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c532
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c605
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c650
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c33
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c17
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c19
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c22
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c7
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c13
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h73
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1255
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c50
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c94
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c45
-rw-r--r--drivers/net/ethernet/netx-eth.c1
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c9
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c13
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c1
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c12
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c437
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h121
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1277
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c611
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h133
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c555
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c105
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c441
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h158
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c501
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h176
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c101
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c121
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c398
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c101
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h54
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h170
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c268
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1501
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/Makefile4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c245
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c217
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c210
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c569
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c22
-rw-r--r--drivers/net/ethernet/qualcomm/qca_framing.h6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/rdc/r6040.c11
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c8
-rw-r--r--drivers/net/ethernet/realtek/8139too.c13
-rw-r--r--drivers/net/ethernet/realtek/atp.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker.h1
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c130
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c1
-rw-r--r--drivers/net/ethernet/samsung/Kconfig2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c44
-rw-r--r--drivers/net/ethernet/seeq/ether3.c1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig30
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/ef10.c242
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h103
-rw-r--r--drivers/net/ethernet/sfc/efx.c89
-rw-r--r--drivers/net/ethernet/sfc/enum.h5
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c68
-rw-r--r--drivers/net/ethernet/sfc/falcon/Kconfig21
-rw-r--r--drivers/net/ethernet/sfc/falcon/Makefile6
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h542
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c3350
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h277
-rw-r--r--drivers/net/ethernet/sfc/falcon/enum.h171
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c1343
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c (renamed from drivers/net/ethernet/sfc/falcon.c)1040
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c (renamed from drivers/net/ethernet/sfc/falcon_boards.c)94
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c2892
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch_regs.h2932
-rw-r--r--drivers/net/ethernet/sfc/falcon/filter.h272
-rw-r--r--drivers/net/ethernet/sfc/falcon/io.h290
-rw-r--r--drivers/net/ethernet/sfc/falcon/mdio_10g.c (renamed from drivers/net/ethernet/sfc/mdio_10g.c)86
-rw-r--r--drivers/net/ethernet/sfc/falcon/mdio_10g.h (renamed from drivers/net/ethernet/sfc/mdio_10g.h)54
-rw-r--r--drivers/net/ethernet/sfc/falcon/mtd.c133
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h1464
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c527
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h513
-rw-r--r--drivers/net/ethernet/sfc/falcon/phy.h (renamed from drivers/net/ethernet/sfc/phy.h)18
-rw-r--r--drivers/net/ethernet/sfc/falcon/qt202x_phy.c (renamed from drivers/net/ethernet/sfc/qt202x_phy.c)140
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c974
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.c808
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.h55
-rw-r--r--drivers/net/ethernet/sfc/falcon/tenxpress.c (renamed from drivers/net/ethernet/sfc/tenxpress.c)108
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c649
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h27
-rw-r--r--drivers/net/ethernet/sfc/falcon/txc43128_phy.c (renamed from drivers/net/ethernet/sfc/txc43128_phy.c)134
-rw-r--r--drivers/net/ethernet/sfc/falcon/workarounds.h44
-rw-r--r--drivers/net/ethernet/sfc/farch.c182
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h483
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c63
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h90
-rw-r--r--drivers/net/ethernet/sfc/nic.h174
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/rx.c27
-rw-r--r--drivers/net/ethernet/sfc/siena.c4
-rw-r--r--drivers/net/ethernet/sfc/tx.c1055
-rw-r--r--drivers/net/ethernet/sfc/tx.h27
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c451
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h21
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/silan/sc92031.c1
-rw-r--r--drivers/net/ethernet/sis/sis190.c1
-rw-r--r--drivers/net/ethernet/sis/sis900.c1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c1
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h80
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c14
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c217
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c103
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c83
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c122
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c392
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c41
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c7
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c11
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.h2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c5
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c26
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h3
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c14
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h3
-rw-r--r--drivers/net/ethernet/ti/Kconfig11
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c441
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c233
-rw-r--r--drivers/net/ethernet/ti/cpts.h80
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c561
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h6
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c52
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c503
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c21
-rw-r--r--drivers/net/ethernet/tile/tilepro.c27
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c23
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c24
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/via/via-velocity.c11
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c18
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c20
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c8
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c12
-rw-r--r--drivers/net/fddi/skfp/pmf.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/fjes/Makefile2
-rw-r--r--drivers/net/fjes/fjes.h16
-rw-r--r--drivers/net/fjes/fjes_debugfs.c117
-rw-r--r--drivers/net/fjes/fjes_ethtool.c181
-rw-r--r--drivers/net/fjes/fjes_hw.c171
-rw-r--r--drivers/net/fjes/fjes_hw.h34
-rw-r--r--drivers/net/fjes/fjes_main.c65
-rw-r--r--drivers/net/fjes/fjes_trace.c30
-rw-r--r--drivers/net/fjes/fjes_trace.h380
-rw-r--r--drivers/net/geneve.c713
-rw-r--r--drivers/net/gtp.c32
-rw-r--r--drivers/net/hippi/rrunner.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/hyperv/netvsc_drv.c14
-rw-r--r--drivers/net/hyperv/rndis_filter.c6
-rw-r--r--drivers/net/ieee802154/adf7242.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c16
-rw-r--r--drivers/net/ieee802154/atusb.c81
-rw-r--r--drivers/net/ieee802154/atusb.h11
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c8
-rw-r--r--drivers/net/irda/w83977af_ir.c675
-rw-r--r--drivers/net/macsec.c49
-rw-r--r--drivers/net/macvlan.c24
-rw-r--r--drivers/net/macvtap.c11
-rw-r--r--drivers/net/mii.c197
-rw-r--r--drivers/net/nlmon.c20
-rw-r--r--drivers/net/ntb_netdev.c3
-rw-r--r--drivers/net/phy/Kconfig22
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/aquantia.c28
-rw-r--r--drivers/net/phy/at803x.c13
-rw-r--r--drivers/net/phy/bcm-cygnus.c5
-rw-r--r--drivers/net/phy/bcm-phy-lib.c187
-rw-r--r--drivers/net/phy/bcm-phy-lib.h15
-rw-r--r--drivers/net/phy/bcm7xxx.c92
-rw-r--r--drivers/net/phy/broadcom.c139
-rw-r--r--drivers/net/phy/dp83640.c14
-rw-r--r--drivers/net/phy/dp83867.c28
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/intel-xway.c24
-rw-r--r--drivers/net/phy/marvell.c10
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c60
-rw-r--r--drivers/net/phy/mdio_bus.c11
-rw-r--r--drivers/net/phy/mdio_device.c2
-rw-r--r--drivers/net/phy/meson-gxl.c81
-rw-r--r--drivers/net/phy/micrel.c30
-rw-r--r--drivers/net/phy/microchip.c39
-rw-r--r--drivers/net/phy/mscc.c358
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy.c136
-rw-r--r--drivers/net/phy/phy_device.c171
-rw-r--r--drivers/net/phy/phy_led_triggers.c134
-rw-r--r--drivers/net/phy/smsc.c18
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/rionet.c15
-rw-r--r--drivers/net/sb1000.c1
-rw-r--r--drivers/net/slip/slip.c11
-rw-r--r--drivers/net/team/team.c25
-rw-r--r--drivers/net/tun.c59
-rw-r--r--drivers/net/usb/Kconfig5
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/ax88172a.c10
-rw-r--r--drivers/net/usb/ax88179_178a.c4
-rw-r--r--drivers/net/usb/catc.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c12
-rw-r--r--drivers/net/usb/cdc_ether.c28
-rw-r--r--drivers/net/usb/cdc_ncm.c5
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/lan78xx.c462
-rw-r--r--drivers/net/usb/lan78xx.h14
-rw-r--r--drivers/net/usb/pegasus.c1
-rw-r--r--drivers/net/usb/r8152.c24
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/usb/sierra_net.c13
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/veth.c17
-rw-r--r--drivers/net/virtio_net.c425
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h3
-rw-r--r--drivers/net/vrf.c29
-rw-r--r--drivers/net/vxlan.c350
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1
-rw-r--r--drivers/net/wan/hdlc.c11
-rw-r--r--drivers/net/wan/hdlc_fr.c3
-rw-r--r--drivers/net/wan/hostess_sv11.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/lmc/lmc_media.c97
-rw-r--r--drivers/net/wan/n2.c1
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/sbni.c1
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c5
-rw-r--r--drivers/net/wimax/i2400m/netdev.c22
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h44
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h31
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c137
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c129
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c142
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c192
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c77
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c64
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c340
-rw-r--r--drivers/net/wireless/ath/main.c7
-rw-r--r--drivers/net/wireless/ath/regd.c3
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c129
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c100
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c160
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c55
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c112
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h25
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c160
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h586
-rw-r--r--drivers/net/wireless/atmel/atmel.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c418
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c171
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c243
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/cisco/airo.c14
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c8
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c9
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_rx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c15
-rw-r--r--drivers/net/wireless/intersil/orinoco/Makefile3
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c6
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c111
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/README23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c116
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c183
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c157
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c87
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c63
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c31
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c67
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c25
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/ray_cs.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h27
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c175
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c134
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h4
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h32
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c24
-rw-r--r--drivers/net/wireless/ti/wl1251/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c28
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c9
-rw-r--r--drivers/net/wireless/wl3501_cs.c1
-rw-r--r--drivers/net/wireless/zydas/zd1201.c1
-rw-r--r--drivers/net/xen-netback/interface.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c64
-rw-r--r--drivers/net/xen-netfront.c69
-rw-r--r--drivers/nfc/mei_phy.c42
-rw-r--r--drivers/nfc/microread/mei.c23
-rw-r--r--drivers/nfc/pn544/mei.c23
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c25
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h5
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c662
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h48
-rw-r--r--drivers/ntb/ntb_transport.c29
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/claim.c46
-rw-r--r--drivers/nvdimm/core.c29
-rw-r--r--drivers/nvdimm/dimm.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/e820.c12
-rw-r--r--drivers/nvdimm/label.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c8
-rw-r--r--drivers/nvdimm/nd.h12
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c21
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--drivers/nvme/host/Kconfig17
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/core.c131
-rw-r--r--drivers/nvme/host/fabrics.c33
-rw-r--r--drivers/nvme/host/fc.c2586
-rw-r--r--drivers/nvme/host/lightnvm.c223
-rw-r--r--drivers/nvme/host/nvme.h51
-rw-r--r--drivers/nvme/host/pci.c76
-rw-r--r--drivers/nvme/host/rdma.c85
-rw-r--r--drivers/nvme/host/scsi.c11
-rw-r--r--drivers/nvme/target/Kconfig24
-rw-r--r--drivers/nvme/target/Makefile4
-rw-r--r--drivers/nvme/target/admin-cmd.c3
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c22
-rw-r--r--drivers/nvme/target/fabrics-cmd.c14
-rw-r--r--drivers/nvme/target/fc.c2288
-rw-r--r--drivers/nvme/target/fcloop.c1148
-rw-r--r--drivers/nvme/target/io-cmd.c39
-rw-r--r--drivers/nvme/target/loop.c26
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/rdma.c11
-rw-r--r--drivers/nvmem/Kconfig22
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/bcm-ocotp.c335
-rw-r--r--drivers/nvmem/lpc18xx_otp.c124
-rw-r--r--drivers/of/base.c9
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--drivers/of/irq.c1
-rw-r--r--drivers/of/of_numa.c7
-rw-r--r--drivers/of/of_pci.c21
-rw-r--r--drivers/of/overlay.c47
-rw-r--r--drivers/of/platform.c6
-rw-r--r--drivers/of/resolver.c358
-rw-r--r--drivers/oprofile/buffer_sync.c2
-rw-r--r--drivers/oprofile/nmi_timer_int.c58
-rw-r--r--drivers/pci/access.c16
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/ecam.c12
-rw-r--r--drivers/pci/host/Kconfig16
-rw-r--r--drivers/pci/host/Makefile19
-rw-r--r--drivers/pci/host/pci-hyperv.c100
-rw-r--r--drivers/pci/host/pci-layerscape.c20
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c160
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c9
-rw-r--r--drivers/pci/host/pci-thunder-pem.c94
-rw-r--r--drivers/pci/host/pci-xgene-msi.c69
-rw-r--r--drivers/pci/host/pci-xgene.c126
-rw-r--r--drivers/pci/host/pcie-altera.c10
-rw-r--r--drivers/pci/host/pcie-hisi.c107
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c1
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c1
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c28
-rw-r--r--drivers/pci/host/pcie-iproc.c949
-rw-r--r--drivers/pci/host/pcie-iproc.h45
-rw-r--r--drivers/pci/host/pcie-qcom.c177
-rw-r--r--drivers/pci/host/pcie-rcar.c5
-rw-r--r--drivers/pci/host/pcie-rockchip.c284
-rw-r--r--drivers/pci/host/pcie-spear13xx.c6
-rw-r--r--drivers/pci/host/vmd.c30
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c31
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c10
-rw-r--r--drivers/pci/hotplug/pciehp_core.c9
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c21
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c10
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c7
-rw-r--r--drivers/pci/iov.c70
-rw-r--r--drivers/pci/msi.c81
-rw-r--r--drivers/pci/pci-acpi.c100
-rw-r--r--drivers/pci/pci-mid.c2
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c138
-rw-r--r--drivers/pci/pci.h19
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c18
-rw-r--r--drivers/pci/pcie/aspm.c22
-rw-r--r--drivers/pci/pcie/pme.c29
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c13
-rw-r--r--drivers/pci/probe.c248
-rw-r--r--drivers/pci/quirks.c182
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/rom.c5
-rw-r--r--drivers/pci/setup-res.c48
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/pcmcia/m32r_pcc.c41
-rw-r--r--drivers/phy/Kconfig33
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/phy-berlin-sata.c3
-rw-r--r--drivers/phy/phy-brcm-sata.c6
-rw-r--r--drivers/phy/phy-da8xx-usb.c5
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c15
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c4
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c4
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c2
-rw-r--r--drivers/phy/phy-meson8b-usb2.c286
-rw-r--r--drivers/phy/phy-miphy365x.c625
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h7
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c72
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c65
-rw-r--r--drivers/phy/phy-qcom-ufs.c273
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c118
-rw-r--r--drivers/phy/phy-rockchip-emmc.c2
-rw-r--r--drivers/phy/phy-rockchip-inno-usb2.c607
-rw-r--r--drivers/phy/phy-s5pv210-usb2.c4
-rw-r--r--drivers/phy/phy-stih41x-usb.c188
-rw-r--r--drivers/phy/phy-sun4i-usb.c14
-rw-r--r--drivers/phy/phy-ti-pipe3.c10
-rw-r--r--drivers/phy/phy-twl4030-usb.c3
-rw-r--r--drivers/phy/tegra/xusb-tegra124.c3
-rw-r--r--drivers/phy/tegra/xusb.c10
-rw-r--r--drivers/pinctrl/Kconfig26
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c171
-rw-r--r--drivers/pinctrl/devicetree.c144
-rw-r--r--drivers/pinctrl/devicetree.h23
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c41
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6397.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h2
-rw-r--r--drivers/pinctrl/meson/Makefile3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c589
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c23
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91.c21
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c210
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c605
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c86
-rw-r--r--drivers/pinctrl/pinctrl-single.c217
-rw-r--r--drivers/pinctrl/pinctrl-st.c4
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c1275
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c6
-rw-r--r--drivers/pinctrl/qcom/Kconfig9
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c1379
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c45
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c37
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c40
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c40
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h10
-rw-r--r--drivers/pinctrl/sh-pfc/core.c15
-rw-r--r--drivers/pinctrl/sh-pfc/core.h4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c342
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c616
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c576
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c3
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h14
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32f429.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-gr8.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c11
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c506
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h8
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-vt8500.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8505.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8650.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8750.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8850.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c10
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.h1
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c159
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c6
-rw-r--r--drivers/platform/x86/Kconfig49
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/acer-wmi.c56
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c23
-rw-r--r--drivers/platform/x86/asus-wmi.c29
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/dell-laptop.c26
-rw-r--r--drivers/platform/x86/dell-wmi.c12
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c6
-rw-r--r--drivers/platform/x86/intel-smartconnect.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c35
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c162
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c2
-rw-r--r--drivers/platform/x86/intel_pmc_core.c386
-rw-r--r--drivers/platform/x86/intel_pmc_core.h110
-rw-r--r--drivers/platform/x86/mlx-platform.c361
-rw-r--r--drivers/platform/x86/mlxcpld-hotplug.c515
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c297
-rw-r--r--drivers/platform/x86/surface3_button.c250
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c95
-rw-r--r--drivers/power/avs/rockchip-io-domain.c2
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-poweroff.c1
-rw-r--r--drivers/power/reset/at91-reset.c2
-rw-r--r--drivers/power/reset/piix4-poweroff.c113
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c1
-rw-r--r--drivers/power/reset/zx-reboot.c1
-rw-r--r--drivers/power/supply/ab8500_fg.c8
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c44
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c3
-rw-r--r--drivers/power/supply/max17040_battery.c52
-rw-r--r--drivers/power/supply/max8997_charger.c1
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c25
-rw-r--r--drivers/powercap/intel_rapl.c389
-rw-r--r--drivers/ptp/Kconfig10
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/ptp/ptp_sysfs.c2
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-hibvt.c271
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/arizona-ldo1.c2
-rw-r--r--drivers/regulator/axp20x-regulator.c12
-rw-r--r--drivers/regulator/core.c38
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/gpio-regulator.c9
-rw-r--r--drivers/regulator/helpers.c6
-rw-r--r--drivers/regulator/lp873x-regulator.c1
-rw-r--r--drivers/regulator/max77620-regulator.c47
-rw-r--r--drivers/regulator/rk808-regulator.c9
-rw-r--r--drivers/regulator/stw481x-vmmc.c3
-rw-r--r--drivers/regulator/tps6507x-regulator.c2
-rw-r--r--drivers/regulator/tps65086-regulator.c51
-rw-r--r--drivers/regulator/tps65218-regulator.c153
-rw-r--r--drivers/regulator/twl-regulator.c673
-rw-r--r--drivers/regulator/twl6030-regulator.c793
-rw-r--r--drivers/remoteproc/Kconfig52
-rw-r--r--drivers/remoteproc/Makefile9
-rw-r--r--drivers/remoteproc/qcom_adsp_pil.c428
-rw-r--r--drivers/remoteproc/qcom_mdt_loader.c1
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c1
-rw-r--r--drivers/remoteproc/qcom_wcnss.c53
-rw-r--r--drivers/remoteproc/qcom_wcnss.h2
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c9
-rw-r--r--drivers/remoteproc/remoteproc_core.c247
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c71
-rw-r--r--drivers/remoteproc/remoteproc_internal.h6
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c151
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c17
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/remoteproc/st_slim_rproc.c364
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c342
-rw-r--r--drivers/reset/Kconfig1
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c43
-rw-r--r--drivers/reset/reset-berlin.c12
-rw-r--r--drivers/reset/reset-lpc18xx.c32
-rw-r--r--drivers/reset/reset-oxnas.c1
-rw-r--r--drivers/reset/reset-socfpga.c10
-rw-r--r--drivers/reset/reset-sunxi.c9
-rw-r--r--drivers/reset/reset-zynq.c10
-rw-r--r--drivers/reset/sti/Kconfig8
-rw-r--r--drivers/reset/sti/Makefile2
-rw-r--r--drivers/reset/sti/reset-stih415.c112
-rw-r--r--drivers/reset/sti/reset-stih416.c143
-rw-r--r--drivers/reset/tegra/Kconfig2
-rw-r--r--drivers/reset/tegra/Makefile1
-rw-r--r--drivers/reset/tegra/reset-bpmp.c71
-rw-r--r--drivers/rpmsg/qcom_smd.c18
-rw-r--r--drivers/rpmsg/rpmsg_core.c74
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-cmos.c82
-rw-r--r--drivers/rtc/rtc-ds1307.c52
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c154
-rw-r--r--drivers/rtc/rtc-lib.c4
-rw-r--r--drivers/rtc/rtc-mcp795.c122
-rw-r--r--drivers/rtc/rtc-pcf85063.c7
-rw-r--r--drivers/rtc/rtc-r7301.c453
-rw-r--r--drivers/rtc/rtc-starfire.c10
-rw-r--r--drivers/rtc/rtc-sun4v.c10
-rw-r--r--drivers/rtc/rtc-twl.c202
-rw-r--r--drivers/s390/block/dasd.c301
-rw-r--r--drivers/s390/block/dasd_3990_erp.c58
-rw-r--r--drivers/s390/block/dasd_devmap.c326
-rw-r--r--drivers/s390/block/dasd_eckd.c323
-rw-r--r--drivers/s390/block/dasd_eckd.h5
-rw-r--r--drivers/s390/block/dasd_eer.c29
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_int.h449
-rw-r--r--drivers/s390/char/con3215.c12
-rw-r--r--drivers/s390/char/sclp.h23
-rw-r--r--drivers/s390/char/sclp_cmd.c25
-rw-r--r--drivers/s390/char/sclp_ctl.c4
-rw-r--r--drivers/s390/char/sclp_early.c31
-rw-r--r--drivers/s390/char/sclp_quiesce.c4
-rw-r--r--drivers/s390/char/sclp_tty.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/zcore.c22
-rw-r--r--drivers/s390/cio/cmf.c10
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/device.c6
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c6
-rw-r--r--drivers/s390/cio/device_ops.c5
-rw-r--r--drivers/s390/crypto/Makefile13
-rw-r--r--drivers/s390/crypto/ap_asm.h191
-rw-r--r--drivers/s390/crypto/ap_bus.c1331
-rw-r--r--drivers/s390/crypto/ap_bus.h98
-rw-r--r--drivers/s390/crypto/ap_card.c170
-rw-r--r--drivers/s390/crypto/ap_debug.h28
-rw-r--r--drivers/s390/crypto/ap_queue.c701
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1137
-rw-r--r--drivers/s390/crypto/zcrypt_api.h99
-rw-r--r--drivers/s390/crypto/zcrypt_card.c187
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c214
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c319
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h50
-rw-r--r--drivers/s390/crypto/zcrypt_error.h105
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c135
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h5
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c660
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h23
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c376
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c226
-rw-r--r--drivers/s390/net/ctcm_main.c5
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/netiucv.c41
-rw-r--r--drivers/s390/net/qeth_core_main.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c17
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h41
-rw-r--r--drivers/s390/scsi/zfcp_erp.c61
-rw-r--r--drivers/s390/scsi/zfcp_ext.h8
-rw-r--r--drivers/s390/scsi/zfcp_fc.c36
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h3
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c61
-rw-r--r--drivers/s390/virtio/virtio_ccw.c25
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c7
-rw-r--r--drivers/scsi/3w-sas.h7
-rw-r--r--drivers/scsi/3w-xxxx.c7
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/Kconfig36
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR5380.c214
-rw-r--r--drivers/scsi/NCR5380.h98
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/comminit.c10
-rw-r--r--drivers/scsi/aacraid/commsup.c25
-rw-r--r--drivers/scsi/aacraid/linit.c22
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c5
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h5
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c82
-rw-r--r--drivers/scsi/arm/cumana_1.c98
-rw-r--r--drivers/scsi/arm/oak.c34
-rw-r--r--drivers/scsi/atari_scsi.c77
-rw-r--r--drivers/scsi/be2iscsi/be_main.c8
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h30
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_im.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c323
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c40
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/cxlflash/common.h39
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c410
-rw-r--r--drivers/scsi/cxlflash/sislite.h2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c24
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dmx3191d.c33
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/fcoe/fcoe.c25
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c157
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c83
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c30
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/g_NCR5380.c445
-rw-r--r--drivers/scsi/g_NCR5380.h34
-rw-r--r--drivers/scsi/g_NCR5380_mmio.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c79
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c556
-rw-r--r--drivers/scsi/hpsa.c289
-rw-r--r--drivers/scsi/hpsa.h6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c901
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h6
-rw-r--r--drivers/scsi/ipr.c174
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/ips.c13
-rw-r--r--drivers/scsi/isci/host.h1
-rw-r--r--drivers/scsi/isci/init.c23
-rw-r--r--drivers/scsi/isci/probe_roms.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.c7
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c61
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c256
-rw-r--r--drivers/scsi/libfc/fc_fcp.c235
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c126
-rw-r--r--drivers/scsi/libfc/fc_rport.c561
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c422
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c83
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c136
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c23
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h45
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c112
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c163
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/pmcraid.c21
-rw-r--r--drivers/scsi/qedi/Kconfig10
-rw-r--r--drivers/scsi/qedi/Makefile5
-rw-r--r--drivers/scsi/qedi/qedi.h364
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c143
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h144
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c244
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2378
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h73
-rw-r--r--drivers/scsi/qedi/qedi_hsi.h52
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c1624
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h232
-rw-r--r--drivers/scsi/qedi/qedi_main.c2127
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c52
-rw-r--r--drivers/scsi/qedi/qedi_version.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h110
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c420
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c276
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c116
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c475
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h18
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c27
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c97
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c143
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c455
-rw-r--r--drivers/scsi/scsi_transport_srp.c52
-rw-r--r--drivers/scsi/sd.c185
-rw-r--r--drivers/scsi/sd.h70
-rw-r--r--drivers/scsi/sd_zbc.c648
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c102
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c80
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c83
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufs.h5
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h35
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c543
-rw-r--r--drivers/scsi/ufs/ufshcd.h54
-rw-r--r--drivers/scsi/ufs/ufshci.h10
-rw-r--r--drivers/scsi/ufs/unipro.h4
-rw-r--r--drivers/scsi/xen-scsifront.c193
-rw-r--r--drivers/sh/intc/virq.c2
-rw-r--r--drivers/soc/Kconfig3
-rw-r--r--drivers/soc/fsl/Kconfig18
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/guts.c239
-rw-r--r--drivers/soc/fsl/qbman/bman.c8
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c63
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h1
-rw-r--r--drivers/soc/fsl/qbman/qman.c233
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c80
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h17
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c27
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c38
-rw-r--r--drivers/soc/fsl/qe/qe.c6
-rw-r--r--drivers/soc/mediatek/Kconfig2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c465
-rw-r--r--drivers/soc/renesas/Makefile9
-rw-r--r--drivers/soc/renesas/r8a7743-sysc.c32
-rw-r--r--drivers/soc/renesas/r8a7745-sysc.c32
-rw-r--r--drivers/soc/renesas/rcar-rst.c92
-rw-r--r--drivers/soc/renesas/rcar-sysc.c6
-rw-r--r--drivers/soc/renesas/rcar-sysc.h2
-rw-r--r--drivers/soc/renesas/renesas-soc.c257
-rw-r--r--drivers/soc/rockchip/pm_domains.c81
-rw-r--r--drivers/soc/tegra/Kconfig14
-rw-r--r--drivers/soc/tegra/pmc.c398
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c2
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-armada-3700.c923
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c324
-rw-r--r--drivers/spi/spi-axi-spi-engine.c1
-rw-r--r--drivers/spi/spi-dw.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c306
-rw-r--r--drivers/spi/spi-fsl-espi.c728
-rw-r--r--drivers/spi/spi-fsl-lib.h4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c525
-rw-r--r--drivers/spi/spi-imx.c35
-rw-r--r--drivers/spi/spi-jcore.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c11
-rw-r--r--drivers/spi/spi-orion.c83
-rw-r--r--drivers/spi/spi-pxa2xx.h1
-rw-r--r--drivers/spi/spi-rspi.c52
-rw-r--r--drivers/spi/spi-s3c64xx.c21
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-sun4i.c75
-rw-r--r--drivers/spi/spi-sun6i.c18
-rw-r--r--drivers/spi/spi-ti-qspi.c1
-rw-r--r--drivers/spi/spi-topcliff-pch.c13
-rw-r--r--drivers/spi/spi-xlp.c1
-rw-r--r--drivers/spi/spi.c26
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/ssb/pci.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/TODO8
-rw-r--r--drivers/staging/android/ashmem.c40
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/android/uapi/ion_test.h1
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/staging/comedi/comedi.h55
-rw-r--r--drivers/staging/comedi/comedidev.h12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c4
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c172
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h14
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c16
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c6
-rw-r--r--drivers/staging/comedi/drivers/s626.c182
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c12
-rw-r--r--drivers/staging/dgnc/Makefile3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c44
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c558
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h189
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c6
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c111
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h58
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c703
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h40
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c362
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h6
-rw-r--r--drivers/staging/dgnc/digi.h107
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c69
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c68
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c19
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c2
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c33
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fbtft/fbtft_device.c12
-rw-r--r--drivers/staging/fbtft/flexfb.c373
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig24
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp-cmd.h (renamed from drivers/staging/fsl-mc/include/dpbp-cmd.h)61
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c74
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon-cmd.h (renamed from drivers/staging/fsl-mc/include/dpcon-cmd.h)4
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h49
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c70
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h141
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h14
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c37
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h90
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c23
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c69
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c78
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c66
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c2
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-private.h3
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c12
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h169
-rw-r--r--drivers/staging/fsl-mc/include/dpmng.h18
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h402
-rw-r--r--drivers/staging/fsl-mc/include/mc-bus.h6
-rw-r--r--drivers/staging/fsl-mc/include/mc-cmd.h44
-rw-r--r--drivers/staging/fsl-mc/include/mc-sys.h3
-rw-r--r--drivers/staging/fsl-mc/include/mc.h4
-rw-r--r--drivers/staging/fwserial/fwserial.c6
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h14
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h1
-rw-r--r--drivers/staging/gdm724x/netlink_k.h3
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c8
-rw-r--r--drivers/staging/greybus/arche-platform.c3
-rw-r--r--drivers/staging/greybus/audio_codec.c5
-rw-r--r--drivers/staging/greybus/audio_codec.h1
-rw-r--r--drivers/staging/greybus/audio_manager.h3
-rw-r--r--drivers/staging/greybus/audio_manager_module.c35
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c16
-rw-r--r--drivers/staging/greybus/audio_module.c7
-rw-r--r--drivers/staging/greybus/audio_topology.c8
-rw-r--r--drivers/staging/greybus/camera.c11
-rw-r--r--drivers/staging/greybus/es2.c11
-rw-r--r--drivers/staging/greybus/log.c6
-rw-r--r--drivers/staging/greybus/sdio.c3
-rw-r--r--drivers/staging/greybus/svc.c6
-rw-r--r--drivers/staging/greybus/timesync.c8
-rw-r--r--drivers/staging/greybus/uart.c36
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c2
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c1
-rw-r--r--drivers/staging/i4l/act2000/capi.c7
-rw-r--r--drivers/staging/i4l/act2000/module.c24
-rw-r--r--drivers/staging/i4l/icn/icn.c3
-rw-r--r--drivers/staging/i4l/icn/icn.h5
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.c2
-rw-r--r--drivers/staging/i4l/pcbit/capi.c5
-rw-r--r--drivers/staging/i4l/pcbit/drv.c5
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.c2
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl25836
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl258320
-rw-r--r--drivers/staging/iio/TODO70
-rw-r--r--drivers/staging/iio/accel/Kconfig10
-rw-r--r--drivers/staging/iio/accel/Makefile3
-rw-r--r--drivers/staging/iio/accel/sca3000.h279
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c1210
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c350
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c127
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/adc/ad7606.c (renamed from drivers/staging/iio/adc/ad7606_core.c)436
-rw-r--r--drivers/staging/iio/adc/ad7606.h58
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c23
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c102
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c19
-rw-r--r--drivers/staging/iio/adc/ad7780.c22
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c4
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/cdc/ad7152.c140
-rw-r--r--drivers/staging/iio/cdc/ad7746.c151
-rw-r--r--drivers/staging/iio/frequency/ad9832.c66
-rw-r--r--drivers/staging/iio/frequency/ad9832.h6
-rw-r--r--drivers/staging/iio/frequency/ad9834.c19
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c21
-rw-r--r--drivers/staging/iio/light/Kconfig19
-rw-r--r--drivers/staging/iio/light/Makefile2
-rw-r--r--drivers/staging/iio/light/tsl2583.c963
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c86
-rw-r--r--drivers/staging/iio/ring_hw.h27
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c37
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h22
-rw-r--r--drivers/staging/ks7010/ks_hostif.c124
-rw-r--r--drivers/staging/ks7010/ks_wlan.h143
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c436
-rw-r--r--drivers/staging/ks7010/michael_mic.c29
-rw-r--r--drivers/staging/ks7010/michael_mic.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h53
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h185
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h13
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h24
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h8
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h8
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c26
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c31
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c44
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c36
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c5
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c143
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c20
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c22
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c58
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c187
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c32
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c62
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c10
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c108
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c50
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c18
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c17
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c73
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c36
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h23
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c29
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h25
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c22
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h8
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h30
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c7
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c18
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c8
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h379
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h65
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h438
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h898
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h717
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h70
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h102
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h291
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h264
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h15
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h199
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c22
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h88
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c45
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c316
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c26
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c132
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c42
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile10
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c195
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c968
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c139
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c47
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c395
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h138
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c342
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c69
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c81
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c303
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c65
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c191
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c122
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c360
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c88
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c11
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c407
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h29
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c52
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c208
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h100
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c116
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c50
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c720
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c698
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c293
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c46
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c292
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c61
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c22
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c98
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c80
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c60
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c107
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c317
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c167
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h23
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c290
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c452
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c68
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c228
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c80
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c139
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c27
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c84
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c70
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c7
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c65
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c11
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c65
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h50
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h43
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c330
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c143
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c171
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c186
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c52
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c669
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c338
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c95
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c66
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c98
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c15
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c106
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c48
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c410
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre2
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c68
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.h5
-rw-r--r--drivers/staging/media/cec/Kconfig12
-rw-r--r--drivers/staging/media/cec/TODO32
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c31
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c8
-rw-r--r--drivers/staging/media/lirc/Kconfig13
-rw-r--r--drivers/staging/media/lirc/Makefile1
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c11
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c14
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c1130
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/pulse8-cec/TODO52
-rw-r--r--drivers/staging/media/s5p-cec/Kconfig2
-rw-r--r--drivers/staging/media/s5p-cec/TODO12
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c11
-rw-r--r--drivers/staging/media/st-cec/Kconfig2
-rw-r--r--drivers/staging/media/st-cec/TODO7
-rw-r--r--drivers/staging/media/st-cec/stih-cec.c11
-rw-r--r--drivers/staging/most/aim-network/networking.c53
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c5
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c230
-rw-r--r--drivers/staging/most/mostcore/core.c55
-rw-r--r--drivers/staging/netlogic/xlr_net.c20
-rw-r--r--drivers/staging/octeon/ethernet.c22
-rw-r--r--drivers/staging/rtl8188eu/Makefile2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c78
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c32
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c46
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c69
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c22
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c70
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c51
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c20
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c25
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c21
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h11
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h6
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h3
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h20
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h13
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h2
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c33
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c27
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c58
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c8
-rw-r--r--drivers/staging/rtl8192e/Makefile2
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c3
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c1
-rw-r--r--drivers/staging/rtl8712/osdep_service.h9
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h12
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h18
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c13
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c50
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h6
-rw-r--r--drivers/staging/rts5208/ms.c393
-rw-r--r--drivers/staging/rts5208/ms.h4
-rw-r--r--drivers/staging/rts5208/rtsx.c55
-rw-r--r--drivers/staging/rts5208/rtsx.h2
-rw-r--r--drivers/staging/rts5208/rtsx_card.c94
-rw-r--r--drivers/staging/rts5208/rtsx_card.h16
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c17
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h137
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c319
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h4
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h4
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h30
-rw-r--r--drivers/staging/rts5208/sd.c813
-rw-r--r--drivers/staging/rts5208/sd.h5
-rw-r--r--drivers/staging/rts5208/spi.c144
-rw-r--r--drivers/staging/rts5208/xd.c461
-rw-r--r--drivers/staging/rts5208/xd.h2
-rw-r--r--drivers/staging/skein/skein_api.c26
-rw-r--r--drivers/staging/skein/threefish_block.c16
-rw-r--r--drivers/staging/slicoss/Kconfig14
-rw-r--r--drivers/staging/slicoss/Makefile1
-rw-r--r--drivers/staging/slicoss/README7
-rw-r--r--drivers/staging/slicoss/TODO36
-rw-r--r--drivers/staging/slicoss/slic.h573
-rw-r--r--drivers/staging/slicoss/slichw.h652
-rw-r--r--drivers/staging/slicoss/slicoss.c3132
-rw-r--r--drivers/staging/sm750fb/Makefile2
-rw-r--r--drivers/staging/sm750fb/ddk750.h23
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c100
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h89
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c75
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h30
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_help.c17
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h21
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c15
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c37
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c74
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h22
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c31
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h24
-rw-r--r--drivers/staging/sm750fb/sm750.c52
-rw-r--r--drivers/staging/sm750fb/sm750.h6
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c52
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h10
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c14
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h14
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c40
-rw-r--r--drivers/staging/speakup/TODO2
-rw-r--r--drivers/staging/speakup/main.c42
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/speakup/serialio.c6
-rw-r--r--drivers/staging/speakup/speakup_soft.c46
-rw-r--r--drivers/staging/speakup/speakup_spkout.c31
-rw-r--r--drivers/staging/speakup/speakup_txprt.c29
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h148
-rw-r--r--drivers/staging/speakup/spk_types.h16
-rw-r--r--drivers/staging/speakup/synth.c22
-rw-r--r--drivers/staging/speakup/thread.c5
-rw-r--r--drivers/staging/speakup/varhandlers.c6
-rw-r--r--drivers/staging/unisys/include/iochannel.h341
-rw-r--r--drivers/staging/unisys/include/visorbus.h2
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h225
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c231
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h4
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c44
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c653
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h185
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c6
-rw-r--r--drivers/staging/vc04_services/Kconfig7
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/interface/vchi/TODO50
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h25
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h11
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c324
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c202
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c659
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h9
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c17
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c138
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h13
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--drivers/staging/vt6655/baseband.c58
-rw-r--r--drivers/staging/vt6655/baseband.h11
-rw-r--r--drivers/staging/vt6655/card.c45
-rw-r--r--drivers/staging/vt6655/card.h6
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/channel.h4
-rw-r--r--drivers/staging/vt6655/desc.h4
-rw-r--r--drivers/staging/vt6655/device.h16
-rw-r--r--drivers/staging/vt6655/device_cfg.h4
-rw-r--r--drivers/staging/vt6655/device_main.c9
-rw-r--r--drivers/staging/vt6655/dpc.c4
-rw-r--r--drivers/staging/vt6655/dpc.h4
-rw-r--r--drivers/staging/vt6655/key.c5
-rw-r--r--drivers/staging/vt6655/key.h5
-rw-r--r--drivers/staging/vt6655/mac.c8
-rw-r--r--drivers/staging/vt6655/mac.h327
-rw-r--r--drivers/staging/vt6655/power.c6
-rw-r--r--drivers/staging/vt6655/power.h5
-rw-r--r--drivers/staging/vt6655/rf.c718
-rw-r--r--drivers/staging/vt6655/rf.h5
-rw-r--r--drivers/staging/vt6655/rxtx.c8
-rw-r--r--drivers/staging/vt6655/rxtx.h4
-rw-r--r--drivers/staging/vt6655/srom.c36
-rw-r--r--drivers/staging/vt6655/srom.h11
-rw-r--r--drivers/staging/vt6655/tmacro.h4
-rw-r--r--drivers/staging/vt6655/upc.h4
-rw-r--r--drivers/staging/vt6656/baseband.h20
-rw-r--r--drivers/staging/vt6656/card.c15
-rw-r--r--drivers/staging/vt6656/mac.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/staging/vt6656/rf.c10
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c12
-rw-r--r--drivers/staging/wilc1000/host_interface.h1
-rw-r--r--drivers/staging/wilc1000/linux_mon.c4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c51
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c6
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c3
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c24
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h128
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2479
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c322
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h100
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h118
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h120
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h88
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h194
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h90
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c655
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h102
-rw-r--r--drivers/staging/wlan-ng/p80211req.c189
-rw-r--r--drivers/staging/wlan-ng/p80211req.h90
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c100
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c559
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c544
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h125
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c102
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c210
-rw-r--r--drivers/staging/xgifb/XGI_main.h54
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c196
-rw-r--r--drivers/staging/xgifb/vb_init.c56
-rw-r--r--drivers/staging/xgifb/vb_setmode.c667
-rw-r--r--drivers/staging/xgifb/vb_table.h9
-rw-r--r--drivers/staging/xgifb/vb_util.h4
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h8
-rw-r--r--drivers/target/loopback/tcm_loop.h4
-rw-r--r--drivers/target/sbp/sbp_target.c3
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_alua.h2
-rw-r--r--drivers/target/target_core_configfs.c6
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/target/target_core_fabric_configfs.c7
-rw-r--r--drivers/target/target_core_file.c1
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_iblock.h3
-rw-r--r--drivers/target/target_core_internal.h5
-rw-r--r--drivers/target/target_core_pr.c5
-rw-r--r--drivers/target/target_core_pr.h4
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_pscsi.h7
-rw-r--r--drivers/target/target_core_rd.c2
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_sbc.c1
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/target/target_core_xcopy.c1
-rw-r--r--drivers/target/target_core_xcopy.h2
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h3
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/db8500_thermal.c1
-rw-r--r--drivers/thermal/devfreq_cooling.c5
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/intel_pch_thermal.c64
-rw-r--r--drivers/thermal/intel_powerclamp.c360
-rw-r--r--drivers/thermal/max77620_thermal.c1
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c6
-rw-r--r--drivers/thermal/rockchip_thermal.c7
-rw-r--r--drivers/thermal/tango_thermal.c1
-rw-r--r--drivers/thermal/thermal_core.c1450
-rw-r--r--drivers/thermal/thermal_core.h26
-rw-r--r--drivers/thermal/thermal_helpers.c226
-rw-r--r--drivers/thermal/thermal_hwmon.c4
-rw-r--r--drivers/thermal/thermal_sysfs.c771
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c5
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c587
-rw-r--r--drivers/thunderbolt/Kconfig2
-rw-r--r--drivers/thunderbolt/eeprom.c43
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/switch.c2
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/nozomi.c51
-rw-r--r--drivers/tty/rocket.c4
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_core.c7
-rw-r--r--drivers/tty/serial/8250/8250_dma.c9
-rw-r--r--drivers/tty/serial/8250/8250_dw.c22
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c231
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c9
-rw-r--r--drivers/tty/serial/8250/8250_mid.c4
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c57
-rw-r--r--drivers/tty/serial/8250/8250_port.c18
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c190
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c46
-rw-r--r--drivers/tty/serial/8250/Kconfig10
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile7
-rw-r--r--drivers/tty/serial/amba-pl011.c56
-rw-r--r--drivers/tty/serial/crisv10.c8
-rw-r--r--drivers/tty/serial/fsl_lpuart.c64
-rw-r--r--drivers/tty/serial/ifx6x60.c1
-rw-r--r--drivers/tty/serial/ioc4_serial.c7
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/serial_core.c17
-rw-r--r--drivers/tty/serial/sh-sci.c20
-rw-r--r--drivers/tty/serial/sunhv.c3
-rw-r--r--drivers/tty/serial/sunsu.c1
-rw-r--r--drivers/tty/synclink.c1
-rw-r--r--drivers/tty/synclink_gt.c1
-rw-r--r--drivers/tty/synclinkmp.c1
-rw-r--r--drivers/tty/vt/consolemap.c115
-rw-r--r--drivers/tty/vt/keyboard.c4
-rw-r--r--drivers/tty/vt/vt.c89
-rw-r--r--drivers/uio/Kconfig9
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_hv_generic.c218
-rw-r--r--drivers/uio/uio_pruss.c10
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/udc.c12
-rw-r--r--drivers/usb/chipidea/udc.h12
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c86
-rw-r--r--drivers/usb/class/cdc-acm.c216
-rw-r--r--drivers/usb/class/cdc-acm.h5
-rw-r--r--drivers/usb/class/usbtmc.c5
-rw-r--r--drivers/usb/core/buffer.c3
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/core/devices.c12
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/endpoint.c7
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hub.c114
-rw-r--r--drivers/usb/core/ledtrig-usbport.c7
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/core/notify.c2
-rw-r--r--drivers/usb/core/sysfs.c17
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c3
-rw-r--r--drivers/usb/core/usb.h5
-rw-r--r--drivers/usb/dwc2/Makefile1
-rw-r--r--drivers/usb/dwc2/core.c930
-rw-r--r--drivers/usb/dwc2/core.h324
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc2/gadget.c1126
-rw-r--r--drivers/usb/dwc2/hcd.c264
-rw-r--r--drivers/usb/dwc2/hcd.h7
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c56
-rw-r--r--drivers/usb/dwc2/hcd_intr.c55
-rw-r--r--drivers/usb/dwc2/hcd_queue.c87
-rw-r--r--drivers/usb/dwc2/hw.h48
-rw-r--r--drivers/usb/dwc2/params.c1435
-rw-r--r--drivers/usb/dwc2/pci.c18
-rw-r--r--drivers/usb/dwc2/platform.c207
-rw-r--r--drivers/usb/dwc3/Kconfig6
-rw-r--r--drivers/usb/dwc3/Makefile6
-rw-r--r--drivers/usb/dwc3/core.c358
-rw-r--r--drivers/usb/dwc3/core.h53
-rw-r--r--drivers/usb/dwc3/debug.c32
-rw-r--r--drivers/usb/dwc3/debug.h41
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c134
-rw-r--r--drivers/usb/dwc3/dwc3-st.c1
-rw-r--r--drivers/usb/dwc3/ep0.c347
-rw-r--r--drivers/usb/dwc3/gadget.c578
-rw-r--r--drivers/usb/dwc3/gadget.h5
-rw-r--r--drivers/usb/dwc3/host.c88
-rw-r--r--drivers/usb/dwc3/io.h6
-rw-r--r--drivers/usb/dwc3/trace.h123
-rw-r--r--drivers/usb/gadget/composite.c23
-rw-r--r--drivers/usb/gadget/configfs.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c73
-rw-r--r--drivers/usb/gadget/function/f_ncm.c11
-rw-r--r--drivers/usb/gadget/function/f_phonet.c11
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c14
-rw-r--r--drivers/usb/gadget/function/rndis.c12
-rw-r--r--drivers/usb/gadget/function/rndis.h51
-rw-r--r--drivers/usb/gadget/function/u_ether.c19
-rw-r--r--drivers/usb/gadget/function/u_serial.c7
-rw-r--r--drivers/usb/gadget/function/uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c25
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c3
-rw-r--r--drivers/usb/gadget/function/uvc_video.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h2
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c34
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/net2280.c6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.h2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h2
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/host/Kconfig5
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ehci-hub.c14
-rw-r--r--drivers/usb/host/ehci-pci.c3
-rw-r--r--drivers/usb/host/ehci-q.c30
-rw-r--r--drivers/usb/host/ehci-sched.c3
-rw-r--r--drivers/usb/host/ehci-w90x900.c30
-rw-r--r--drivers/usb/host/ehci.h8
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c27
-rw-r--r--drivers/usb/host/ohci-at91.c123
-rw-r--r--drivers/usb/host/ohci-da8xx.c522
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/ohci-mem.c6
-rw-r--r--drivers/usb/host/ohci-nxp.c7
-rw-r--r--drivers/usb/host/ohci-omap.c39
-rw-r--r--drivers/usb/host/ohci-pxa27x.c36
-rw-r--r--drivers/usb/host/ohci-s3c2410.c47
-rw-r--r--drivers/usb/host/uhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-mem.c16
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c4
-rw-r--r--drivers/usb/host/xhci-mtk.c38
-rw-r--r--drivers/usb/host/xhci-mtk.h1
-rw-r--r--drivers/usb/host/xhci-plat.c9
-rw-r--r--drivers/usb/host/xhci-rcar.c4
-rw-r--r--drivers/usb/host/xhci-rcar.h1
-rw-r--r--drivers/usb/host/xhci-ring.c686
-rw-r--r--drivers/usb/host/xhci.c44
-rw-r--r--drivers/usb/host/xhci.h13
-rw-r--r--drivers/usb/isp1760/isp1760-if.c2
-rw-r--r--drivers/usb/misc/chaoskey.c14
-rw-r--r--drivers/usb/misc/rio500.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c55
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/mtu3/Kconfig54
-rw-r--r--drivers/usb/mtu3/Makefile18
-rw-r--r--drivers/usb/mtu3/mtu3.h417
-rw-r--r--drivers/usb/mtu3/mtu3_core.c863
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c379
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h108
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c730
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c881
-rw-r--r--drivers/usb/mtu3/mtu3_host.c294
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h473
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c484
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c573
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h43
-rw-r--r--drivers/usb/musb/da8xx.c70
-rw-r--r--drivers/usb/musb/musb_core.c29
-rw-r--r--drivers/usb/musb/musb_core.h6
-rw-r--r--drivers/usb/musb/musb_gadget.c8
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musb_virthub.c1
-rw-r--r--drivers/usb/musb/omap2430.c6
-rw-r--r--drivers/usb/musb/sunxi.c25
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/phy/phy-am335x-control.c2
-rw-r--r--drivers/usb/phy/phy-generic.c9
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c23
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/Kconfig10
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/ch341.c113
-rw-r--r--drivers/usb/serial/cp210x.c411
-rw-r--r--drivers/usb/serial/f81534.c1409
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/io_ti.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c35
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/serial/opticon.c3
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/quatech2.c3
-rw-r--r--drivers/usb/serial/ssu100.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c3
-rw-r--r--drivers/usb/storage/usb.c1
-rw-r--r--drivers/usb/usbip/vhci_hcd.c3
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c1
-rw-r--r--drivers/usb/usbip/vudc_dev.c45
-rw-r--r--drivers/usb/usbip/vudc_transfer.c8
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c6
-rw-r--r--drivers/usb/wusbcore/security.c1
-rw-r--r--drivers/usb/wusbcore/wa-nep.c1
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1
-rw-r--r--drivers/usb/wusbcore/wusbhc.c13
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/Makefile1
-rw-r--r--drivers/vfio/mdev/Kconfig17
-rw-r--r--drivers/vfio/mdev/Makefile5
-rw-r--r--drivers/vfio/mdev/mdev_core.c385
-rw-r--r--drivers/vfio/mdev/mdev_driver.c119
-rw-r--r--drivers/vfio/mdev/mdev_private.h41
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c286
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c148
-rw-r--r--drivers/vfio/pci/vfio_pci.c83
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c12
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c31
-rw-r--r--drivers/vfio/vfio.c461
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c328
-rw-r--r--drivers/vfio/vfio_iommu_type1.c885
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/scsi.c5
-rw-r--r--drivers/vhost/vhost.c45
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/vhost/vringh.c5
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/console/mdacon.c7
-rw-r--r--drivers/video/console/newport_con.c8
-rw-r--r--drivers/video/console/sticon.c7
-rw-r--r--drivers/video/console/vgacon.c133
-rw-r--r--drivers/video/fbdev/efifb.c59
-rw-r--r--drivers/video/fbdev/skeletonfb.c8
-rw-r--r--drivers/video/fbdev/xen-fbfront.c13
-rw-r--r--drivers/video/hdmi.c4
-rw-r--r--drivers/video/of_display_timing.c15
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c201
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_modern.c8
-rw-r--r--drivers/virtio/virtio_ring.c6
-rw-r--r--drivers/watchdog/mei_wdt.c58
-rw-r--r--drivers/watchdog/octeon-wdt-main.c62
-rw-r--r--drivers/watchdog/sa1100_wdt.c24
-rw-r--r--drivers/xen/balloon.c6
-rw-r--r--drivers/xen/events/events_base.c4
-rw-r--r--drivers/xen/gntalloc.c9
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/platform-pci.c6
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/swiotlb-xen.c27
-rw-r--r--drivers/xen/xen-pciback/xenbus.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c22
5344 files changed, 407665 insertions, 130528 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 194d20bee7dc..060026a02f59 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -107,7 +107,7 @@ obj-$(CONFIG_INPUT) += input/
obj-$(CONFIG_RTC_LIB) += rtc/
obj-y += i2c/ media/
obj-$(CONFIG_PPS) += pps/
-obj-$(CONFIG_PTP_1588_CLOCK) += ptp/
+obj-y += ptp/
obj-$(CONFIG_W1) += w1/
obj-y += power/
obj-$(CONFIG_HWMON) += hwmon/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 535e7828445a..83e5f7e1a20d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -104,7 +104,7 @@ config ACPI_PROCFS_POWER
Say N to delete power /proc/acpi/ directories that have moved to /sys/
config ACPI_REV_OVERRIDE_POSSIBLE
- bool "Allow supported ACPI revision to be overriden"
+ bool "Allow supported ACPI revision to be overridden"
depends on X86
default y
help
@@ -342,7 +342,7 @@ config ACPI_DEBUG
Use the acpi.debug_layer and acpi.debug_level kernel command-line
parameters documented in Documentation/acpi/debug.txt and
- Documentation/kernel-parameters.txt to control the type and
+ Documentation/admin-guide/kernel-parameters.rst to control the type and
amount of debug output.
config ACPI_PCI_SLOT
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 7dd70927991e..26696b693e63 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -77,6 +77,11 @@ static const struct apd_device_desc cz_i2c_desc = {
.fixed_clk_rate = 133000000,
};
+static const struct apd_device_desc wt_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 150000000,
+};
+
static struct property_entry uart_properties[] = {
PROPERTY_ENTRY_U32("reg-io-width", 4),
PROPERTY_ENTRY_U32("reg-shift", 2),
@@ -156,7 +161,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
/* Generic apd devices */
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
{ "AMD0010", APD_ADDR(cz_i2c_desc) },
- { "AMDI0010", APD_ADDR(cz_i2c_desc) },
+ { "AMDI0010", APD_ADDR(wt_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 373657f7e35a..8ea836c046f8 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -718,13 +718,14 @@ static int acpi_lpss_resume_early(struct device *dev)
#define LPSS_GPIODEF0_DMA1_D3 BIT(2)
#define LPSS_GPIODEF0_DMA2_D3 BIT(3)
#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
+#define LPSS_GPIODEF0_DMA_LLP BIT(13)
static DEFINE_MUTEX(lpss_iosf_mutex);
static void lpss_iosf_enter_d3_state(void)
{
u32 value1 = 0;
- u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+ u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
u32 value2 = LPSS_PMCSR_D3hot;
u32 mask2 = LPSS_PMCSR_Dx_MASK;
/*
@@ -768,8 +769,9 @@ exit:
static void lpss_iosf_exit_d3_state(void)
{
- u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3;
- u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+ u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
+ LPSS_GPIODEF0_DMA_LLP;
+ u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
u32 value2 = LPSS_PMCSR_D0;
u32 mask2 = LPSS_PMCSR_Dx_MASK;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index c5557d070954..201292e67ee8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -43,17 +43,6 @@
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
-#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
-#define ACPI_VIDEO_NOTIFY_PROBE 0x81
-#define ACPI_VIDEO_NOTIFY_CYCLE 0x82
-#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83
-#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84
-
-#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85
-#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
-#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
-#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88
-#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89
#define MAX_NAME_LEN 20
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 92fa47c6498c..8a0049d5cdf3 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -243,9 +243,7 @@ acpi_ev_default_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context);
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
- u8 acpi_ns_locked);
+acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj);
/*
* evsci - SCI (System Control Interrupt) handling/dispatch
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 750fa824d42c..edbb42e251a6 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -240,10 +240,6 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-/* Maximum number of While() loop iterations before forced abort */
-
-ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations);
-
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
@@ -318,6 +314,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_emit_external_opcodes, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_do_disassembler_optimizations, TRUE);
ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index dff1207a6078..792660054992 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -765,7 +765,7 @@ union acpi_parse_value {
union acpi_parse_value value; /* Value or args associated with the opcode */\
u8 arg_list_length; /* Number of elements in the arg list */\
ACPI_DISASM_ONLY_MEMBERS (\
- u8 disasm_flags; /* Used during AML disassembly */\
+ u16 disasm_flags; /* Used during AML disassembly */\
u8 disasm_opcode; /* Subtype used for disassembly */\
char *operator_symbol;/* Used for C-style operator name strings */\
char aml_op_name[16]) /* Op name (debug only) */
@@ -868,14 +868,15 @@ struct acpi_parse_state {
/* Parse object disasm_flags */
-#define ACPI_PARSEOP_IGNORE 0x01
-#define ACPI_PARSEOP_PARAMETER_LIST 0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
-#define ACPI_PARSEOP_PREDEFINED_CHECKED 0x08
-#define ACPI_PARSEOP_CLOSING_PAREN 0x10
-#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT 0x20
-#define ACPI_PARSEOP_ASSIGNMENT 0x40
-#define ACPI_PARSEOP_ELSEIF 0x80
+#define ACPI_PARSEOP_IGNORE 0x0001
+#define ACPI_PARSEOP_PARAMETER_LIST 0x0002
+#define ACPI_PARSEOP_EMPTY_TERMLIST 0x0004
+#define ACPI_PARSEOP_PREDEFINED_CHECKED 0x0008
+#define ACPI_PARSEOP_CLOSING_PAREN 0x0010
+#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT 0x0020
+#define ACPI_PARSEOP_ASSIGNMENT 0x0040
+#define ACPI_PARSEOP_ELSEIF 0x0080
+#define ACPI_PARSEOP_LEGACY_ASL_ONLY 0x0100
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index bb7fca1c8ba3..7affdcdfcc81 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -292,6 +292,9 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
+acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer);
+
+acpi_status
acpi_ns_handle_to_pathname(acpi_handle target_handle,
struct acpi_buffer *buffer, u8 no_trailing);
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index e85953b6fa0e..94be8a8e6c08 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -127,10 +127,11 @@ acpi_status
acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node);
acpi_status
-acpi_tb_install_and_load_table(struct acpi_table_header *table,
- acpi_physical_address address,
+acpi_tb_install_and_load_table(acpi_physical_address address,
u8 flags, u8 override, u32 *table_index);
+acpi_status acpi_tb_unload_table(u32 table_index);
+
void acpi_tb_terminate(void);
acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index);
@@ -165,6 +166,12 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+ struct acpi_table_header **out_table);
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc);
+
/*
* tbxfload
*/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 0a1b53c9ee0e..845afb180a7e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -232,6 +232,8 @@ const char *acpi_ut_get_region_name(u8 space_id);
const char *acpi_ut_get_event_name(u32 event_id);
+const char *acpi_ut_get_argument_type_name(u32 arg_type);
+
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position);
acpi_status acpi_ut_ascii_to_hex_byte(char *two_ascii_chars, u8 *return_byte);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index ceb4f7365f7f..6bd8d4bcff65 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -240,6 +240,7 @@
#define ARGP_QWORDDATA 0x11
#define ARGP_SIMPLENAME 0x12 /* name_string | local_term | arg_term */
#define ARGP_NAME_OR_REF 0x13 /* For object_type only */
+#define ARGP_MAX 0x13
/*
* Resolved argument types for the AML Interpreter
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 54d48b90de2c..5de3f10cab03 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -221,8 +221,8 @@ acpi_ds_initialize_objects(u32 table_index,
*/
status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
- 0, acpi_ds_init_one_object, NULL, &info,
- NULL);
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ds_init_one_object, NULL, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 4cc9d989a114..77fd7c84ec39 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -84,7 +84,7 @@ acpi_status acpi_ds_initialize_region(acpi_handle obj_handle)
/* Namespace is NOT locked */
- status = acpi_ev_initialize_region(obj_desc, FALSE);
+ status = acpi_ev_initialize_region(obj_desc);
return (status);
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index e36218206bb0..651f35a66cc2 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -609,18 +609,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ev_initialize_region
- (acpi_ns_get_attached_object(node), FALSE);
-
- if (ACPI_FAILURE(status)) {
- /*
- * If AE_NOT_EXIST is returned, it is not fatal
- * because many regions get created before a handler
- * is installed for said region.
- */
- if (AE_NOT_EXIST == status) {
- status = AE_OK;
- }
- }
+ (acpi_ns_get_attached_object(node));
break;
case AML_NAME_OP:
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 75ddd160a716..a9092251ce80 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -479,7 +479,6 @@ acpi_ev_default_region_setup(acpi_handle handle,
* FUNCTION: acpi_ev_initialize_region
*
* PARAMETERS: region_obj - Region we are initializing
- * acpi_ns_locked - Is namespace locked?
*
* RETURN: Status
*
@@ -497,19 +496,28 @@ acpi_ev_default_region_setup(acpi_handle handle,
* MUTEX: Interpreter should be unlocked, because we may run the _REG
* method for this region.
*
+ * NOTE: Possible incompliance:
+ * There is a behavior conflict in automatic _REG execution:
+ * 1. When the interpreter is evaluating a method, we can only
+ * automatically run _REG for the following case:
+ * operation_region (OPR1, 0x80, 0x1000010, 0x4)
+ * 2. When the interpreter is loading a table, we can also
+ * automatically run _REG for the following case:
+ * operation_region (OPR1, 0x80, 0x1000010, 0x4)
+ * Though this may not be compliant to the de-facto standard, the
+ * logic is kept in order not to trigger regressions. And keeping
+ * this logic should be taken care by the caller of this function.
+ *
******************************************************************************/
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
- u8 acpi_ns_locked)
+acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj)
{
union acpi_operand_object *handler_obj;
union acpi_operand_object *obj_desc;
acpi_adr_space_type space_id;
struct acpi_namespace_node *node;
- acpi_status status;
- ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
+ ACPI_FUNCTION_TRACE(ev_initialize_region);
if (!region_obj) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -580,39 +588,17 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
handler_obj, region_obj,
obj_desc));
- status =
- acpi_ev_attach_region(handler_obj,
- region_obj,
- acpi_ns_locked);
+ (void)acpi_ev_attach_region(handler_obj,
+ region_obj, FALSE);
/*
* Tell all users that this region is usable by
* running the _REG method
*/
- if (acpi_ns_locked) {
- status =
- acpi_ut_release_mutex
- (ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
acpi_ex_exit_interpreter();
- status =
- acpi_ev_execute_reg_method(region_obj,
- ACPI_REG_CONNECT);
+ (void)acpi_ev_execute_reg_method(region_obj,
+ ACPI_REG_CONNECT);
acpi_ex_enter_interpreter();
-
- if (acpi_ns_locked) {
- status =
- acpi_ut_acquire_mutex
- (ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
return_ACPI_STATUS(AE_OK);
}
}
@@ -622,12 +608,15 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
node = node->parent;
}
- /* If we get here, there is no handler for this region */
-
+ /*
+ * If we get here, there is no handler for this region. This is not
+ * fatal because many regions get created before a handler is installed
+ * for said region.
+ */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"No handler for RegionType %s(%X) (RegionObj %p)\n",
acpi_ut_get_region_name(space_id), space_id,
region_obj));
- return_ACPI_STATUS(AE_NOT_EXIST);
+ return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 718428ba0b89..c32c7829878a 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -437,10 +437,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
ACPI_INFO(("Dynamic OEM Table Load:"));
acpi_ex_exit_interpreter();
- status =
- acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table),
- ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
- TRUE, &table_index);
+ status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
+ ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
+ TRUE, &table_index);
acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
@@ -500,7 +499,6 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
acpi_status status = AE_OK;
union acpi_operand_object *table_desc = ddb_handle;
u32 table_index;
- struct acpi_table_header *table;
ACPI_FUNCTION_TRACE(ex_unload_table);
@@ -537,39 +535,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
* strict order requirement against it.
*/
acpi_ex_exit_interpreter();
-
- /* Ensure the table is still loaded */
-
- if (!acpi_tb_is_table_loaded(table_index)) {
- status = AE_NOT_EXIST;
- goto lock_and_exit;
- }
-
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- status = acpi_get_table_by_index(table_index, &table);
- if (ACPI_SUCCESS(status)) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- table,
- acpi_gbl_table_handler_context);
- }
- }
-
- /* Delete the portion of the namespace owned by this table */
-
- status = acpi_tb_delete_namespace_by_owner(table_index);
- if (ACPI_FAILURE(status)) {
- goto lock_and_exit;
- }
-
- (void)acpi_tb_release_owner_id(table_index);
- acpi_tb_set_table_loaded_flag(table_index, FALSE);
-
-lock_and_exit:
-
- /* Re-acquire the interpreter lock */
-
+ status = acpi_tb_unload_table(table_index);
acpi_ex_enter_interpreter();
/*
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index f03dd41e86d0..94d5d3339845 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -97,6 +97,51 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_handle_to_name
+ *
+ * PARAMETERS: target_handle - Handle of named object whose name is
+ * to be found
+ * buffer - Where the name is returned
+ *
+ * RETURN: Status, Buffer is filled with name if status is AE_OK
+ *
+ * DESCRIPTION: Build and return a full namespace name
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ const char *node_name;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_handle_to_name, target_handle);
+
+ node = acpi_ns_validate_handle(target_handle);
+ if (!node) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Just copy the ACPI name from the Node and zero terminate it */
+
+ node_name = acpi_ut_get_node_name(node);
+ ACPI_MOVE_NAME(buffer->pointer, node_name);
+ ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_handle_to_pathname
*
* PARAMETERS: target_handle - Handle of named object whose name is
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 76a1bd4bb070..e525cbe7d83b 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -158,8 +158,6 @@ acpi_status
acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
{
acpi_status status;
- struct acpi_namespace_node *node;
- const char *node_name;
/* Parameter validation */
@@ -172,18 +170,6 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
return (status);
}
- if (name_type == ACPI_FULL_PATHNAME ||
- name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
-
- /* Get the full pathname (From the namespace root) */
-
- status = acpi_ns_handle_to_pathname(handle, buffer,
- name_type ==
- ACPI_FULL_PATHNAME ? FALSE :
- TRUE);
- return (status);
- }
-
/*
* Wants the single segment ACPI name.
* Validate handle and convert to a namespace Node
@@ -193,27 +179,20 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
return (status);
}
- node = acpi_ns_validate_handle(handle);
- if (!node) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- /* Validate/Allocate/Clear caller buffer */
-
- status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+ if (name_type == ACPI_FULL_PATHNAME ||
+ name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
- /* Just copy the ACPI name from the Node and zero terminate it */
+ /* Get the full pathname (From the namespace root) */
- node_name = acpi_ut_get_node_name(node);
- ACPI_MOVE_NAME(buffer->pointer, node_name);
- ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
- status = AE_OK;
+ status = acpi_ns_handle_to_pathname(handle, buffer,
+ name_type ==
+ ACPI_FULL_PATHNAME ? FALSE :
+ TRUE);
+ } else {
+ /* Get the single name */
-unlock_and_exit:
+ status = acpi_ns_handle_to_name(handle, buffer);
+ }
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index d9ca8c2aa2d3..82b0b5710979 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -832,9 +832,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
*
* FUNCTION: acpi_tb_install_and_load_table
*
- * PARAMETERS: table - Pointer to the table
- * address - Physical address of the table
+ * PARAMETERS: address - Physical address of the table
* flags - Allocation flags of the table
+ * override - Whether override should be performed
* table_index - Where table index is returned
*
* RETURN: Status
@@ -844,15 +844,13 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
******************************************************************************/
acpi_status
-acpi_tb_install_and_load_table(struct acpi_table_header *table,
- acpi_physical_address address,
+acpi_tb_install_and_load_table(acpi_physical_address address,
u8 flags, u8 override, u32 *table_index)
{
acpi_status status;
u32 i;
- acpi_owner_id owner_id;
- ACPI_FUNCTION_TRACE(acpi_load_table);
+ ACPI_FUNCTION_TRACE(tb_install_and_load_table);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
@@ -864,45 +862,60 @@ acpi_tb_install_and_load_table(struct acpi_table_header *table,
goto unlock_and_exit;
}
- /*
- * Note: Now table is "INSTALLED", it must be validated before
- * using.
- */
- status = acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ status = acpi_tb_load_table(i, acpi_gbl_root_node);
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+unlock_and_exit:
+ *table_index = i;
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- status = acpi_ns_load_table(i, acpi_gbl_root_node);
+ return_ACPI_STATUS(status);
+}
- /* Execute any module-level code that was found in the table */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_unload_table
+ *
+ * PARAMETERS: table_index - Table index
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Unload an ACPI table
+ *
+ ******************************************************************************/
- if (!acpi_gbl_parse_table_as_term_list
- && acpi_gbl_group_module_level_code) {
- acpi_ns_exec_module_code_list();
- }
+acpi_status acpi_tb_unload_table(u32 table_index)
+{
+ acpi_status status = AE_OK;
+ struct acpi_table_header *table;
- /*
- * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
- * responsible for discovering any new wake GPEs by running _PRW methods
- * that may have been loaded by this table.
- */
- status = acpi_tb_get_owner_id(i, &owner_id);
- if (ACPI_SUCCESS(status)) {
- acpi_ev_update_gpes(owner_id);
+ ACPI_FUNCTION_TRACE(tb_unload_table);
+
+ /* Ensure the table is still loaded */
+
+ if (!acpi_tb_is_table_loaded(table_index)) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
}
/* Invoke table handler if present */
if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
- acpi_gbl_table_handler_context);
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+ table,
+ acpi_gbl_table_handler_context);
+ }
}
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-unlock_and_exit:
- *table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ /* Delete the portion of the namespace owned by this table */
+
+ status = acpi_tb_delete_namespace_by_owner(table_index);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ (void)acpi_tb_release_owner_id(table_index);
+ acpi_tb_set_table_loaded_flag(table_index, FALSE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 5fb838e592dc..81473a4880ce 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -311,6 +311,8 @@ void acpi_tb_parse_fadt(void)
{
u32 length;
struct acpi_table_header *table;
+ struct acpi_table_desc *fadt_desc;
+ acpi_status status;
/*
* The FADT has multiple versions with different lengths,
@@ -319,14 +321,12 @@ void acpi_tb_parse_fadt(void)
* Get a local copy of the FADT and convert it to a common format
* Map entire FADT, assumed to be smaller than one page.
*/
- length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
-
- table =
- acpi_os_map_memory(acpi_gbl_root_table_list.
- tables[acpi_gbl_fadt_index].address, length);
- if (!table) {
+ fadt_desc = &acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index];
+ status = acpi_tb_get_table(fadt_desc, &table);
+ if (ACPI_FAILURE(status)) {
return;
}
+ length = fadt_desc->length;
/*
* Validate the FADT checksum before we copy the table. Ignore
@@ -340,7 +340,7 @@ void acpi_tb_parse_fadt(void)
/* All done with the real FADT, unmap it */
- acpi_os_unmap_memory(table, length);
+ acpi_tb_put_table(fadt_desc);
/* Obtain the DSDT and FACS tables via their addresses within the FADT */
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 51eb07cf9898..86854e846800 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -381,3 +381,88 @@ next_table:
acpi_os_unmap_memory(table, length);
return_ACPI_STATUS(AE_OK);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_get_table
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * out_table - Where the pointer to the table is returned
+ *
+ * RETURN: Status and pointer to the requested table
+ *
+ * DESCRIPTION: Increase a reference to a table descriptor and return the
+ * validated table pointer.
+ * If the table descriptor is an entry of the root table list,
+ * this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+ struct acpi_table_header **out_table)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_tb_get_table);
+
+ if (table_desc->validation_count == 0) {
+
+ /* Table need to be "VALIDATED" */
+
+ status = acpi_tb_validate_table(table_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ table_desc->validation_count++;
+ if (table_desc->validation_count == 0) {
+ ACPI_ERROR((AE_INFO,
+ "Table %p, Validation count is zero after increment\n",
+ table_desc));
+ table_desc->validation_count--;
+ return_ACPI_STATUS(AE_LIMIT);
+ }
+
+ *out_table = table_desc->pointer;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_put_table
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Decrease a reference to a table descriptor and release the
+ * validated table pointer if no references.
+ * If the table descriptor is an entry of the root table list,
+ * this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc)
+{
+
+ ACPI_FUNCTION_TRACE(acpi_tb_put_table);
+
+ if (table_desc->validation_count == 0) {
+ ACPI_WARNING((AE_INFO,
+ "Table %p, Validation count is zero before decrement\n",
+ table_desc));
+ return_VOID;
+ }
+ table_desc->validation_count--;
+
+ if (table_desc->validation_count == 0) {
+
+ /* Table need to be "INVALIDATED" */
+
+ acpi_tb_invalidate_table(table_desc);
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4ab6b9cd0aec..7684707b254b 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -167,6 +167,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
{
acpi_status status;
+ u32 i;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -178,6 +179,21 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
+ /*
+ * Ensure OS early boot logic, which is required by some hosts. If the
+ * table state is reported to be wrong, developers should fix the
+ * issue by invoking acpi_put_table() for the reported table during the
+ * early stage.
+ */
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+ if (acpi_gbl_root_table_list.tables[i].pointer) {
+ ACPI_ERROR((AE_INFO,
+ "Table [%4.4s] is not invalidated during early boot stage",
+ acpi_gbl_root_table_list.tables[i].
+ signature.ascii));
+ }
+ }
+
acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
status = acpi_tb_resize_root_table_list();
@@ -266,7 +282,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
/*******************************************************************************
*
- * FUNCTION: acpi_get_table_with_size
+ * FUNCTION: acpi_get_table
*
* PARAMETERS: signature - ACPI signature of needed table
* instance - Which instance (for SSDTs)
@@ -276,16 +292,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
*
* DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
* RSDT/XSDT.
+ * Note that an early stage acpi_get_table() call must be paired
+ * with an early stage acpi_put_table() call. otherwise the table
+ * pointer mapped by the early stage mapping implementation may be
+ * erroneously unmapped by the late stage unmapping implementation
+ * in an acpi_put_table() invoked during the late stage.
*
******************************************************************************/
acpi_status
-acpi_get_table_with_size(char *signature,
- u32 instance, struct acpi_table_header **out_table,
- acpi_size *tbl_size)
+acpi_get_table(char *signature,
+ u32 instance, struct acpi_table_header ** out_table)
{
u32 i;
u32 j;
- acpi_status status;
+ acpi_status status = AE_NOT_FOUND;
+ struct acpi_table_desc *table_desc;
/* Parameter validation */
@@ -293,13 +314,22 @@ acpi_get_table_with_size(char *signature,
return (AE_BAD_PARAMETER);
}
+ /*
+ * Note that the following line is required by some OSPMs, they only
+ * check if the returned table is NULL instead of the returned status
+ * to determined if this function is succeeded.
+ */
+ *out_table = NULL;
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
/* Walk the root table list */
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
- if (!ACPI_COMPARE_NAME
- (&(acpi_gbl_root_table_list.tables[i].signature),
- signature)) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+
+ if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
continue;
}
@@ -307,43 +337,65 @@ acpi_get_table_with_size(char *signature,
continue;
}
- status =
- acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
- if (ACPI_SUCCESS(status)) {
- *out_table = acpi_gbl_root_table_list.tables[i].pointer;
- *tbl_size = acpi_gbl_root_table_list.tables[i].length;
- }
-
- if (!acpi_gbl_permanent_mmap) {
- acpi_gbl_root_table_list.tables[i].pointer = NULL;
- }
-
- return (status);
+ status = acpi_tb_get_table(table_desc, out_table);
+ break;
}
- return (AE_NOT_FOUND);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return (status);
}
-ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+ACPI_EXPORT_SYMBOL(acpi_get_table)
-acpi_status
-acpi_get_table(char *signature,
- u32 instance, struct acpi_table_header **out_table)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_put_table
+ *
+ * PARAMETERS: table - The pointer to the table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release a table returned by acpi_get_table() and its clones.
+ * Note that it is not safe if this function was invoked after an
+ * uninstallation happened to the original table descriptor.
+ * Currently there is no OSPMs' requirement to handle such
+ * situations.
+ *
+ ******************************************************************************/
+void acpi_put_table(struct acpi_table_header *table)
{
- acpi_size tbl_size;
+ u32 i;
+ struct acpi_table_desc *table_desc;
+
+ ACPI_FUNCTION_TRACE(acpi_put_table);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /* Walk the root table list */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+
+ if (table_desc->pointer != table) {
+ continue;
+ }
+
+ acpi_tb_put_table(table_desc);
+ break;
+ }
- return acpi_get_table_with_size(signature,
- instance, out_table, &tbl_size);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_VOID;
}
-ACPI_EXPORT_SYMBOL(acpi_get_table)
+ACPI_EXPORT_SYMBOL(acpi_put_table)
/*******************************************************************************
*
* FUNCTION: acpi_get_table_by_index
*
* PARAMETERS: table_index - Table index
- * table - Where the pointer to the table is returned
+ * out_table - Where the pointer to the table is returned
*
* RETURN: Status and pointer to the requested table
*
@@ -352,7 +404,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
*
******************************************************************************/
acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table)
{
acpi_status status;
@@ -360,35 +412,33 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
/* Parameter validation */
- if (!table) {
+ if (!out_table) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ /*
+ * Note that the following line is required by some OSPMs, they only
+ * check if the returned table is NULL instead of the returned status
+ * to determined if this function is succeeded.
+ */
+ *out_table = NULL;
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Validate index */
if (table_index >= acpi_gbl_root_table_list.current_table_count) {
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
}
- if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
+ status =
+ acpi_tb_get_table(&acpi_gbl_root_table_list.tables[table_index],
+ out_table);
- /* Table is not mapped, map it */
-
- status =
- acpi_tb_validate_table(&acpi_gbl_root_table_list.
- tables[table_index]);
- if (ACPI_FAILURE(status)) {
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(status);
- }
- }
-
- *table = acpi_gbl_root_table_list.tables[table_index].pointer;
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 5569f637f669..82019c01a0e5 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -239,7 +239,7 @@ acpi_status acpi_tb_load_namespace(void)
}
if (!tables_failed) {
- ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded\n", tables_loaded));
+ ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded", tables_loaded));
} else {
ACPI_ERROR((AE_INFO,
"%u table load failures, %u successful",
@@ -250,6 +250,10 @@ acpi_status acpi_tb_load_namespace(void)
status = AE_CTRL_TERMINATE;
}
+#ifdef ACPI_APPLICATION
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\n"));
+#endif
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
@@ -326,10 +330,9 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
/* Install the table and load it into the namespace */
ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
- status =
- acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table),
- ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
- FALSE, &table_index);
+ status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
+ ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
+ FALSE, &table_index);
return_ACPI_STATUS(status);
}
@@ -405,37 +408,8 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
break;
}
- /* Ensure the table is actually loaded */
-
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- if (!acpi_tb_is_table_loaded(i)) {
- status = AE_NOT_EXIST;
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- break;
- }
-
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- acpi_gbl_root_table_list.
- tables[i].pointer,
- acpi_gbl_table_handler_context);
- }
-
- /*
- * Delete all namespace objects owned by this table. Note that
- * these objects can appear anywhere in the namespace by virtue
- * of the AML "Scope" operator. Thus, we need to track ownership
- * by an ID, not simply a position within the hierarchy.
- */
- status = acpi_tb_delete_namespace_by_owner(i);
- if (ACPI_FAILURE(status)) {
- break;
- }
-
- status = acpi_tb_release_owner_id(i);
- acpi_tb_set_table_loaded_flag(i, FALSE);
+ status = acpi_tb_unload_table(i);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
break;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 15728ad8356b..b3d8421cfb80 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -44,6 +44,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdecode")
@@ -532,6 +533,54 @@ const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
return ("Hardware-Specific");
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_argument_type_name
+ *
+ * PARAMETERS: arg_type - an ARGP_* parser argument type
+ *
+ * RETURN: Decoded ARGP_* type
+ *
+ * DESCRIPTION: Decode an ARGP_* parser type, as defined in the amlcode.h file,
+ * and used in the acopcode.h file. For example, ARGP_TERMARG.
+ * Used for debug only.
+ *
+ ******************************************************************************/
+
+static const char *acpi_gbl_argument_type[20] = {
+ /* 00 */ "Unknown ARGP",
+ /* 01 */ "ByteData",
+ /* 02 */ "ByteList",
+ /* 03 */ "CharList",
+ /* 04 */ "DataObject",
+ /* 05 */ "DataObjectList",
+ /* 06 */ "DWordData",
+ /* 07 */ "FieldList",
+ /* 08 */ "Name",
+ /* 09 */ "NameString",
+ /* 0A */ "ObjectList",
+ /* 0B */ "PackageLength",
+ /* 0C */ "SuperName",
+ /* 0D */ "Target",
+ /* 0E */ "TermArg",
+ /* 0F */ "TermList",
+ /* 10 */ "WordData",
+ /* 11 */ "QWordData",
+ /* 12 */ "SimpleName",
+ /* 13 */ "NameOrRef"
+};
+
+const char *acpi_ut_get_argument_type_name(u32 arg_type)
+{
+
+ if (arg_type > ARGP_MAX) {
+ return ("Unknown ARGP");
+ }
+
+ return (acpi_gbl_argument_type[arg_type]);
+}
+
#endif
/*******************************************************************************
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0d099a24f776..e53bef6cf53c 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -852,6 +852,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (ghes_read_estatus(ghes, 1)) {
ghes_clear_estatus(ghes);
continue;
+ } else {
+ ret = NMI_HANDLED;
}
sev = ghes_severity(ghes->estatus->error_severity);
@@ -863,12 +865,11 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
__process_error(ghes);
ghes_clear_estatus(ghes);
-
- ret = NMI_HANDLED;
}
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- irq_work_queue(&ghes_proc_irq_work);
+ if (ret == NMI_HANDLED)
+ irq_work_queue(&ghes_proc_irq_work);
#endif
atomic_dec(&ghes_in_nmi);
return ret;
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 20b3fcf4007c..8f2a98e23bba 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -123,7 +123,13 @@ EXPORT_SYMBOL_GPL(apei_hest_parse);
*/
static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data)
{
- return arch_apei_enable_cmcff(hest_hdr, data);
+ if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
+ return 0;
+
+ if (!acpi_disable_cmcff)
+ return !arch_apei_enable_cmcff(hest_hdr, data);
+
+ return 0;
}
struct ghes_arr {
@@ -232,8 +238,9 @@ void __init acpi_hest_init(void)
goto err;
}
- if (!acpi_disable_cmcff)
- apei_hest_parse(hest_parse_cmc, NULL);
+ rc = apei_hest_parse(hest_parse_cmc, NULL);
+ if (rc)
+ goto err;
if (!ghes_disable) {
rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 6b81746cd13c..e0d2e6e6e40c 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -19,8 +19,17 @@
#define pr_fmt(fmt) "ACPI: IORT: " fmt
#include <linux/acpi_iort.h>
+#include <linux/iommu.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define IORT_TYPE_MASK(type) (1 << (type))
+#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
+#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
+ (1 << ACPI_IORT_NODE_SMMU_V3))
struct iort_its_msi_chip {
struct list_head list;
@@ -28,6 +37,90 @@ struct iort_its_msi_chip {
u32 translation_id;
};
+struct iort_fwnode {
+ struct list_head list;
+ struct acpi_iort_node *iort_node;
+ struct fwnode_handle *fwnode;
+};
+static LIST_HEAD(iort_fwnode_list);
+static DEFINE_SPINLOCK(iort_fwnode_lock);
+
+/**
+ * iort_set_fwnode() - Create iort_fwnode and use it to register
+ * iommu data in the iort_fwnode_list
+ *
+ * @node: IORT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the IORT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
+ struct fwnode_handle *fwnode)
+{
+ struct iort_fwnode *np;
+
+ np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->iort_node = iort_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&iort_fwnode_lock);
+ list_add_tail(&np->list, &iort_fwnode_list);
+ spin_unlock(&iort_fwnode_lock);
+
+ return 0;
+}
+
+/**
+ * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
+ *
+ * @node: IORT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static inline
+struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
+{
+ struct iort_fwnode *curr;
+ struct fwnode_handle *fwnode = NULL;
+
+ spin_lock(&iort_fwnode_lock);
+ list_for_each_entry(curr, &iort_fwnode_list, list) {
+ if (curr->iort_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&iort_fwnode_lock);
+
+ return fwnode;
+}
+
+/**
+ * iort_delete_fwnode() - Delete fwnode associated with an IORT node
+ *
+ * @node: IORT table node associated with fwnode to delete
+ */
+static inline void iort_delete_fwnode(struct acpi_iort_node *node)
+{
+ struct iort_fwnode *curr, *tmp;
+
+ spin_lock(&iort_fwnode_lock);
+ list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
+ if (curr->iort_node == node) {
+ list_del(&curr->list);
+ kfree(curr);
+ break;
+ }
+ }
+ spin_unlock(&iort_fwnode_lock);
+}
+
typedef acpi_status (*iort_find_node_callback)
(struct acpi_iort_node *node, void *context);
@@ -141,6 +234,21 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
return NULL;
}
+static acpi_status
+iort_match_type_callback(struct acpi_iort_node *node, void *context)
+{
+ return AE_OK;
+}
+
+bool iort_node_match(u8 type)
+{
+ struct acpi_iort_node *node;
+
+ node = iort_scan_node(type, iort_match_type_callback, NULL);
+
+ return node != NULL;
+}
+
static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
void *context)
{
@@ -212,9 +320,48 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return 0;
}
+static
+struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
+ u32 *id_out, u8 type_mask,
+ int index)
+{
+ struct acpi_iort_node *parent;
+ struct acpi_iort_id_mapping *map;
+
+ if (!node->mapping_offset || !node->mapping_count ||
+ index >= node->mapping_count)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->output_reference) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+
+ if (!(IORT_TYPE_MASK(parent->type) & type_mask))
+ return NULL;
+
+ if (map[index].flags & ACPI_IORT_ID_SINGLE_MAPPING) {
+ if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
+ node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+ *id_out = map[index].output_base;
+ return parent;
+ }
+ }
+
+ return NULL;
+}
+
static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
u32 rid_in, u32 *rid_out,
- u8 type)
+ u8 type_mask)
{
u32 rid = rid_in;
@@ -223,7 +370,7 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
struct acpi_iort_id_mapping *map;
int i;
- if (node->type == type) {
+ if (IORT_TYPE_MASK(node->type) & type_mask) {
if (rid_out)
*rid_out = rid;
return node;
@@ -296,7 +443,7 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
if (!node)
return req_id;
- iort_node_map_rid(node, req_id, &dev_id, ACPI_IORT_NODE_ITS_GROUP);
+ iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
@@ -318,7 +465,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node)
return -ENXIO;
- node = iort_node_map_rid(node, req_id, NULL, ACPI_IORT_NODE_ITS_GROUP);
+ node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
@@ -356,13 +503,459 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
}
+static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+{
+ u32 *rid = data;
+
+ *rid = alias;
+ return 0;
+}
+
+static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
+ struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops)
+{
+ int ret = iommu_fwspec_init(dev, fwnode, ops);
+
+ if (!ret)
+ ret = iommu_fwspec_add_ids(dev, &streamid, 1);
+
+ return ret;
+}
+
+static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
+ struct acpi_iort_node *node,
+ u32 streamid)
+{
+ const struct iommu_ops *ops = NULL;
+ int ret = -ENODEV;
+ struct fwnode_handle *iort_fwnode;
+
+ if (node) {
+ iort_fwnode = iort_get_fwnode(node);
+ if (!iort_fwnode)
+ return NULL;
+
+ ops = iommu_get_instance(iort_fwnode);
+ if (!ops)
+ return NULL;
+
+ ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
+ }
+
+ return ret ? NULL : ops;
+}
+
+/**
+ * iort_set_dma_mask - Set-up dma mask for a device.
+ *
+ * @dev: device to configure
+ */
+void iort_set_dma_mask(struct device *dev)
+{
+ /*
+ * Set default coherent_dma_mask to 32 bit. Drivers are expected to
+ * setup the correct supported mask.
+ */
+ if (!dev->coherent_dma_mask)
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ /*
+ * Set it to coherent_dma_mask by default if the architecture
+ * code has not set it.
+ */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+}
+
+/**
+ * iort_iommu_configure - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ *
+ * Returns: iommu_ops pointer on configuration success
+ * NULL on configuration failure
+ */
+const struct iommu_ops *iort_iommu_configure(struct device *dev)
+{
+ struct acpi_iort_node *node, *parent;
+ const struct iommu_ops *ops = NULL;
+ u32 streamid = 0;
+
+ if (dev_is_pci(dev)) {
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ u32 rid;
+
+ pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
+ &rid);
+
+ node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
+ iort_match_node_callback, &bus->dev);
+ if (!node)
+ return NULL;
+
+ parent = iort_node_map_rid(node, rid, &streamid,
+ IORT_IOMMU_TYPE);
+
+ ops = iort_iommu_xlate(dev, parent, streamid);
+
+ } else {
+ int i = 0;
+
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return NULL;
+
+ parent = iort_node_get_id(node, &streamid,
+ IORT_IOMMU_TYPE, i++);
+
+ while (parent) {
+ ops = iort_iommu_xlate(dev, parent, streamid);
+
+ parent = iort_node_get_id(node, &streamid,
+ IORT_IOMMU_TYPE, i++);
+ }
+ }
+
+ return ops;
+}
+
+static void __init acpi_iort_register_irq(int hwirq, const char *name,
+ int trigger,
+ struct resource *res)
+{
+ int irq = acpi_register_gsi(NULL, hwirq, trigger,
+ ACPI_ACTIVE_HIGH);
+
+ if (irq <= 0) {
+ pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
+ name);
+ return;
+ }
+
+ res->start = irq;
+ res->end = irq;
+ res->flags = IORESOURCE_IRQ;
+ res->name = name;
+}
+
+static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+ /* Always present mem resource */
+ int num_res = 1;
+
+ /* Retrieve SMMUv3 specific data */
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ if (smmu->event_gsiv)
+ num_res++;
+
+ if (smmu->pri_gsiv)
+ num_res++;
+
+ if (smmu->gerr_gsiv)
+ num_res++;
+
+ if (smmu->sync_gsiv)
+ num_res++;
+
+ return num_res;
+}
+
+static void __init arm_smmu_v3_init_resources(struct resource *res,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+ int num_res = 0;
+
+ /* Retrieve SMMUv3 specific data */
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ res[num_res].start = smmu->base_address;
+ res[num_res].end = smmu->base_address + SZ_128K - 1;
+ res[num_res].flags = IORESOURCE_MEM;
+
+ num_res++;
+
+ if (smmu->event_gsiv)
+ acpi_iort_register_irq(smmu->event_gsiv, "eventq",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+
+ if (smmu->pri_gsiv)
+ acpi_iort_register_irq(smmu->pri_gsiv, "priq",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+
+ if (smmu->gerr_gsiv)
+ acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+
+ if (smmu->sync_gsiv)
+ acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+}
+
+static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+
+ /* Retrieve SMMUv3 specific data */
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
+}
+
+static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu *smmu;
+
+ /* Retrieve SMMU specific data */
+ smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ /*
+ * Only consider the global fault interrupt and ignore the
+ * configuration access interrupt.
+ *
+ * MMIO address and global fault interrupt resources are always
+ * present so add them to the context interrupt count as a static
+ * value.
+ */
+ return smmu->context_interrupt_count + 2;
+}
+
+static void __init arm_smmu_init_resources(struct resource *res,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu *smmu;
+ int i, hw_irq, trigger, num_res = 0;
+ u64 *ctx_irq, *glb_irq;
+
+ /* Retrieve SMMU specific data */
+ smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ res[num_res].start = smmu->base_address;
+ res[num_res].end = smmu->base_address + smmu->span - 1;
+ res[num_res].flags = IORESOURCE_MEM;
+ num_res++;
+
+ glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
+ /* Global IRQs */
+ hw_irq = IORT_IRQ_MASK(glb_irq[0]);
+ trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
+
+ acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
+ &res[num_res++]);
+
+ /* Context IRQs */
+ ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
+ for (i = 0; i < smmu->context_interrupt_count; i++) {
+ hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
+ trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
+
+ acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
+ &res[num_res++]);
+ }
+}
+
+static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu *smmu;
+
+ /* Retrieve SMMU specific data */
+ smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
+}
+
+struct iort_iommu_config {
+ const char *name;
+ int (*iommu_init)(struct acpi_iort_node *node);
+ bool (*iommu_is_coherent)(struct acpi_iort_node *node);
+ int (*iommu_count_resources)(struct acpi_iort_node *node);
+ void (*iommu_init_resources)(struct resource *res,
+ struct acpi_iort_node *node);
+};
+
+static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
+ .name = "arm-smmu-v3",
+ .iommu_is_coherent = arm_smmu_v3_is_coherent,
+ .iommu_count_resources = arm_smmu_v3_count_resources,
+ .iommu_init_resources = arm_smmu_v3_init_resources
+};
+
+static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
+ .name = "arm-smmu",
+ .iommu_is_coherent = arm_smmu_is_coherent,
+ .iommu_count_resources = arm_smmu_count_resources,
+ .iommu_init_resources = arm_smmu_init_resources
+};
+
+static __init
+const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
+{
+ switch (node->type) {
+ case ACPI_IORT_NODE_SMMU_V3:
+ return &iort_arm_smmu_v3_cfg;
+ case ACPI_IORT_NODE_SMMU:
+ return &iort_arm_smmu_cfg;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
+ * @node: Pointer to SMMU ACPI IORT node
+ *
+ * Returns: 0 on success, <0 failure
+ */
+static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
+{
+ struct fwnode_handle *fwnode;
+ struct platform_device *pdev;
+ struct resource *r;
+ enum dev_dma_attr attr;
+ int ret, count;
+ const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
+
+ if (!ops)
+ return -ENODEV;
+
+ pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return PTR_ERR(pdev);
+
+ count = ops->iommu_count_resources(node);
+
+ r = kcalloc(count, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ ret = -ENOMEM;
+ goto dev_put;
+ }
+
+ ops->iommu_init_resources(r, node);
+
+ ret = platform_device_add_resources(pdev, r, count);
+ /*
+ * Resources are duplicated in platform_device_add_resources,
+ * free their allocated memory
+ */
+ kfree(r);
+
+ if (ret)
+ goto dev_put;
+
+ /*
+ * Add a copy of IORT node pointer to platform_data to
+ * be used to retrieve IORT data information.
+ */
+ ret = platform_device_add_data(pdev, &node, sizeof(node));
+ if (ret)
+ goto dev_put;
+
+ /*
+ * We expect the dma masks to be equivalent for
+ * all SMMUs set-ups
+ */
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ fwnode = iort_get_fwnode(node);
+
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto dev_put;
+ }
+
+ pdev->dev.fwnode = fwnode;
+
+ attr = ops->iommu_is_coherent(node) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(&pdev->dev, attr);
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto dma_deconfigure;
+
+ return 0;
+
+dma_deconfigure:
+ acpi_dma_deconfigure(&pdev->dev);
+dev_put:
+ platform_device_put(pdev);
+
+ return ret;
+}
+
+static void __init iort_init_platform_devices(void)
+{
+ struct acpi_iort_node *iort_node, *iort_end;
+ struct acpi_table_iort *iort;
+ struct fwnode_handle *fwnode;
+ int i, ret;
+
+ /*
+ * iort_table and iort both point to the start of IORT table, but
+ * have different struct types
+ */
+ iort = (struct acpi_table_iort *)iort_table;
+
+ /* Get the first IORT node */
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort->node_offset);
+ iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort_table->length);
+
+ for (i = 0; i < iort->node_count; i++) {
+ if (iort_node >= iort_end) {
+ pr_err("iort node pointer overflows, bad table\n");
+ return;
+ }
+
+ if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
+ (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
+
+ fwnode = acpi_alloc_fwnode_static();
+ if (!fwnode)
+ return;
+
+ iort_set_fwnode(iort_node, fwnode);
+
+ ret = iort_add_smmu_platform_device(iort_node);
+ if (ret) {
+ iort_delete_fwnode(iort_node);
+ acpi_free_fwnode_static(fwnode);
+ return;
+ }
+ }
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
+ iort_node->length);
+ }
+}
+
void __init acpi_iort_init(void)
{
acpi_status status;
status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- const char *msg = acpi_format_exception(status);
- pr_err("Failed to get table, %s\n", msg);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
}
+
+ iort_init_platform_devices();
+
+ acpi_probe_device_table(iort);
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 93ecae55fe6a..05fe9ebfb9b5 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -430,39 +430,24 @@ static int acpi_battery_get_status(struct acpi_battery *battery)
return 0;
}
-static int acpi_battery_get_info(struct acpi_battery *battery)
+
+static int extract_battery_info(const int use_bix,
+ struct acpi_battery *battery,
+ const struct acpi_buffer *buffer)
{
int result = -EFAULT;
- acpi_status status = 0;
- char *name = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags) ?
- "_BIX" : "_BIF";
-
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- if (!acpi_battery_present(battery))
- return 0;
- mutex_lock(&battery->lock);
- status = acpi_evaluate_object(battery->device->handle, name,
- NULL, &buffer);
- mutex_unlock(&battery->lock);
-
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
- return -ENODEV;
- }
-
- if (battery_bix_broken_package)
- result = extract_package(battery, buffer.pointer,
+ if (use_bix && battery_bix_broken_package)
+ result = extract_package(battery, buffer->pointer,
extended_info_offsets + 1,
ARRAY_SIZE(extended_info_offsets) - 1);
- else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
- result = extract_package(battery, buffer.pointer,
+ else if (use_bix)
+ result = extract_package(battery, buffer->pointer,
extended_info_offsets,
ARRAY_SIZE(extended_info_offsets));
else
- result = extract_package(battery, buffer.pointer,
+ result = extract_package(battery, buffer->pointer,
info_offsets, ARRAY_SIZE(info_offsets));
- kfree(buffer.pointer);
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
battery->full_charge_capacity = battery->design_capacity;
if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
@@ -483,6 +468,45 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
return result;
}
+static int acpi_battery_get_info(struct acpi_battery *battery)
+{
+ const int xinfo = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+ int use_bix;
+ int result = -ENODEV;
+
+ if (!acpi_battery_present(battery))
+ return 0;
+
+
+ for (use_bix = xinfo ? 1 : 0; use_bix >= 0; use_bix--) {
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status = AE_ERROR;
+
+ mutex_lock(&battery->lock);
+ status = acpi_evaluate_object(battery->device->handle,
+ use_bix ? "_BIX":"_BIF",
+ NULL, &buffer);
+ mutex_unlock(&battery->lock);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s",
+ use_bix ? "_BIX":"_BIF"));
+ } else {
+ result = extract_battery_info(use_bix,
+ battery,
+ &buffer);
+
+ kfree(buffer.pointer);
+ break;
+ }
+ }
+
+ if (!result && !use_bix && xinfo)
+ pr_warn(FW_BUG "The _BIX method is broken, using _BIF.\n");
+
+ return result;
+}
+
static int acpi_battery_get_state(struct acpi_battery *battery)
{
int result = 0;
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bdc67bad61a7..4421f7c9981c 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -160,6 +160,34 @@ static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
},
},
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 5520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
+ },
+ },
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 3520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
+ },
+ },
+ /*
+ * Resolves a quirk with the Dell Latitude 3350 that
+ * causes the ethernet adapter to not function.
+ */
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Latitude 3350",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
+ },
+ },
#endif
{}
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 56190d00fd87..95855cb9d6fb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -331,6 +331,16 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+#ifdef CONFIG_X86
+ if (boot_cpu_has(X86_FEATURE_HWP)) {
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
+ }
+#endif
+
+ if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
+
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
@@ -964,7 +974,7 @@ void __init acpi_early_init(void)
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
- acpi_gbl_permanent_mmap = 1;
+ acpi_permanent_mmap = true;
/*
* If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d0d0504b7c89..3ca0729f7e0e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -776,21 +776,25 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
init_waitqueue_head(&pcc_data.pcc_write_wait_q);
}
- /* Plug PSD data into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
/* Add per logical CPU nodes for reading its feedback counters. */
cpu_dev = get_cpu_device(pr->id);
- if (!cpu_dev)
+ if (!cpu_dev) {
+ ret = -EINVAL;
goto out_free;
+ }
+
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
"acpi_cppc");
- if (ret)
+ if (ret) {
+ per_cpu(cpc_desc_ptr, pr->id) = NULL;
goto out_free;
+ }
kfree(output.pointer);
return 0;
@@ -824,6 +828,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
void __iomem *addr;
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
+ if (!cpc_ptr)
+ return;
/* Free all the mapped sys mem areas for this CPU */
for (i = 2; i < cpc_ptr->num_entries; i++) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 7b2c48fde4e2..24418932612e 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -52,7 +52,7 @@ struct acpi_data_node_attr {
static ssize_t data_node_show_path(struct acpi_data_node *dn, char *buf)
{
- return acpi_object_path(dn->handle, buf);
+ return dn->handle ? acpi_object_path(dn->handle, buf) : 0;
}
DATA_NODE_ATTR(path);
@@ -105,10 +105,10 @@ static void acpi_expose_nondev_subnodes(struct kobject *kobj,
init_completion(&dn->kobj_done);
ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype,
kobj, "%s", dn->name);
- if (ret)
- acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
- else
+ if (!ret)
acpi_expose_nondev_subnodes(&dn->kobj, &dn->data);
+ else if (dn->handle)
+ acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
}
}
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index e24ea4e796e4..7fceb3b4691b 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -82,8 +82,8 @@ static const struct genl_multicast_group acpi_event_mcgrps[] = {
{ .name = ACPI_GENL_MCAST_GROUP_NAME, },
};
-static struct genl_family acpi_event_genl_family = {
- .id = GENL_ID_GENERATE,
+static struct genl_family acpi_event_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
.name = ACPI_GENL_FAMILY_NAME,
.version = ACPI_GENL_VERSION,
.maxattr = ACPI_GENL_ATTR_MAX,
@@ -144,7 +144,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
-static int acpi_event_genetlink_init(void)
+static int __init acpi_event_genetlink_init(void)
{
return genl_register_family(&acpi_event_genl_family);
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 5ea5dc219f56..f8d65647ea79 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -227,8 +227,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
attr = acpi_get_dma_attr(acpi_dev);
if (attr != DEV_DMA_NOT_SUPPORTED)
- arch_setup_dma_ops(dev, 0, 0, NULL,
- attr == DEV_DMA_COHERENT);
+ acpi_dma_configure(dev, attr);
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
@@ -251,6 +250,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
return 0;
err:
+ acpi_dma_deconfigure(dev);
ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
put_device(&acpi_dev->dev);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 312c4b4dc363..2f82b8eba360 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2806,12 +2806,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
acpi_size sz;
int rc = 0;
- status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
+ status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
if (ACPI_FAILURE(status)) {
/* This is ok, we could have an nvdimm hotplugged later */
dev_dbg(dev, "failed to find NFIT at startup\n");
return 0;
}
+ sz = tbl->length;
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
if (!acpi_desc)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index ce3a7a16f03f..edb0c79f7c64 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -70,7 +70,7 @@ int acpi_map_pxm_to_node(int pxm)
{
int node;
- if (pxm < 0 || pxm >= MAX_PXM_DOMAINS)
+ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
return NUMA_NO_NODE;
node = pxm_to_node_map[pxm];
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 416953a42510..a404ff4d7151 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -76,6 +76,7 @@ static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq;
static bool acpi_os_initialized;
unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
+bool acpi_permanent_mmap = false;
/*
* This list of permanent mappings is for memory that may be accessed from
@@ -181,15 +182,15 @@ void acpi_os_vprintf(const char *fmt, va_list args)
static unsigned long acpi_rsdp;
static int __init setup_acpi_rsdp(char *arg)
{
- if (kstrtoul(arg, 16, &acpi_rsdp))
- return -EINVAL;
- return 0;
+ return kstrtoul(arg, 16, &acpi_rsdp);
}
early_param("acpi_rsdp", setup_acpi_rsdp);
#endif
acpi_physical_address __init acpi_os_get_root_pointer(void)
{
+ acpi_physical_address pa = 0;
+
#ifdef CONFIG_KEXEC
if (acpi_rsdp)
return acpi_rsdp;
@@ -198,21 +199,14 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
- else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ if (efi.acpi != EFI_INVALID_TABLE_ADDR)
return efi.acpi;
- else {
- printk(KERN_ERR PREFIX
- "System description tables not found\n");
- return 0;
- }
+ pr_err(PREFIX "System description tables not found\n");
} else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
- acpi_physical_address pa = 0;
-
acpi_find_root_pointer(&pa);
- return pa;
}
- return 0;
+ return pa;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
@@ -313,7 +307,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* virtual address). If not found, map it, add it to that list and return a
* pointer to it.
*
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
void __iomem *__ref
@@ -329,7 +323,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
return NULL;
}
- if (!acpi_gbl_permanent_mmap)
+ if (!acpi_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
mutex_lock(&acpi_ioremap_lock);
@@ -399,7 +393,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
* mappings, drop a reference to it and unmap it if there are no more active
* references to it.
*
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_unmap_table() to get the job done. Since
* __acpi_unmap_table() is an __init function, the __ref annotation is needed
* here.
@@ -408,7 +402,7 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- if (!acpi_gbl_permanent_mmap) {
+ if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
@@ -433,12 +427,6 @@ void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
-void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
-{
- if (!acpi_gbl_permanent_mmap)
- __acpi_unmap_table(virt, size);
-}
-
int acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index b5b376e081f5..a6a4ceaa6cc3 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
/* Structure to hold entries from the MCFG table */
struct mcfg_entry {
@@ -32,12 +33,166 @@ struct mcfg_entry {
u8 bus_end;
};
+#ifdef CONFIG_PCI_QUIRKS
+struct mcfg_fixup {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+ u16 segment;
+ struct resource bus_range;
+ struct pci_ecam_ops *ops;
+ struct resource cfgres;
+};
+
+#define MCFG_BUS_RANGE(start, end) DEFINE_RES_NAMED((start), \
+ ((end) - (start) + 1), \
+ NULL, IORESOURCE_BUS)
+#define MCFG_BUS_ANY MCFG_BUS_RANGE(0x0, 0xff)
+
+static struct mcfg_fixup mcfg_quirks[] = {
+/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+
+#define QCOM_ECAM32(seg) \
+ { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
+ QCOM_ECAM32(0),
+ QCOM_ECAM32(1),
+ QCOM_ECAM32(2),
+ QCOM_ECAM32(3),
+ QCOM_ECAM32(4),
+ QCOM_ECAM32(5),
+ QCOM_ECAM32(6),
+ QCOM_ECAM32(7),
+
+#define HISI_QUAD_DOM(table_id, seg, ops) \
+ { "HISI ", table_id, 0, (seg) + 0, MCFG_BUS_ANY, ops }, \
+ { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
+ { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
+ { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
+ HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 4, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 8, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 12, &hisi_pcie_ops),
+
+#define THUNDER_PEM_RES(addr, node) \
+ DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
+#define THUNDER_PEM_QUIRK(rev, node) \
+ { "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 5 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x884057000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 6 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88808f000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 7 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89001f000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 8 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
+ /* SoC pass2.x */
+ THUNDER_PEM_QUIRK(1, 0),
+ THUNDER_PEM_QUIRK(1, 1),
+
+#define THUNDER_ECAM_QUIRK(rev, seg) \
+ { "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \
+ &pci_thunder_ecam_ops }
+ /* SoC pass1.x */
+ THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */
+ THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */
+ THUNDER_ECAM_QUIRK(2, 0),
+ THUNDER_ECAM_QUIRK(2, 1),
+ THUNDER_ECAM_QUIRK(2, 2),
+ THUNDER_ECAM_QUIRK(2, 3),
+ THUNDER_ECAM_QUIRK(2, 10),
+ THUNDER_ECAM_QUIRK(2, 11),
+ THUNDER_ECAM_QUIRK(2, 12),
+ THUNDER_ECAM_QUIRK(2, 13),
+
+#define XGENE_V1_ECAM_MCFG(rev, seg) \
+ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
+ &xgene_v1_pcie_ecam_ops }
+#define XGENE_V2_ECAM_MCFG(rev, seg) \
+ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
+ &xgene_v2_pcie_ecam_ops }
+ /* X-Gene SoC with v1 PCIe controller */
+ XGENE_V1_ECAM_MCFG(1, 0),
+ XGENE_V1_ECAM_MCFG(1, 1),
+ XGENE_V1_ECAM_MCFG(1, 2),
+ XGENE_V1_ECAM_MCFG(1, 3),
+ XGENE_V1_ECAM_MCFG(1, 4),
+ XGENE_V1_ECAM_MCFG(2, 0),
+ XGENE_V1_ECAM_MCFG(2, 1),
+ XGENE_V1_ECAM_MCFG(2, 2),
+ XGENE_V1_ECAM_MCFG(2, 3),
+ XGENE_V1_ECAM_MCFG(2, 4),
+ /* X-Gene SoC with v2.1 PCIe controller */
+ XGENE_V2_ECAM_MCFG(3, 0),
+ XGENE_V2_ECAM_MCFG(3, 1),
+ /* X-Gene SoC with v2.2 PCIe controller */
+ XGENE_V2_ECAM_MCFG(4, 0),
+ XGENE_V2_ECAM_MCFG(4, 1),
+ XGENE_V2_ECAM_MCFG(4, 2),
+};
+
+static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
+static char mcfg_oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+static u32 mcfg_oem_revision;
+
+static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
+ struct resource *bus_range)
+{
+ if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(f->oem_table_id, mcfg_oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE) &&
+ f->oem_revision == mcfg_oem_revision &&
+ f->segment == segment &&
+ resource_contains(&f->bus_range, bus_range))
+ return 1;
+
+ return 0;
+}
+#endif
+
+static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
+ struct resource *cfgres,
+ struct pci_ecam_ops **ecam_ops)
+{
+#ifdef CONFIG_PCI_QUIRKS
+ u16 segment = root->segment;
+ struct resource *bus_range = &root->secondary;
+ struct mcfg_fixup *f;
+ int i;
+
+ for (i = 0, f = mcfg_quirks; i < ARRAY_SIZE(mcfg_quirks); i++, f++) {
+ if (pci_mcfg_quirk_matches(f, segment, bus_range)) {
+ if (f->cfgres.start)
+ *cfgres = f->cfgres;
+ if (f->ops)
+ *ecam_ops = f->ops;
+ dev_info(&root->device->dev, "MCFG quirk: ECAM at %pR for %pR with %ps\n",
+ cfgres, bus_range, *ecam_ops);
+ return;
+ }
+ }
+#endif
+}
+
/* List to save MCFG entries */
static LIST_HEAD(pci_mcfg_list);
-phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
+int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
+ struct pci_ecam_ops **ecam_ops)
{
+ struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
struct mcfg_entry *e;
+ struct resource res;
+
+ /* Use address from _CBA if present, otherwise lookup MCFG */
+ if (root->mcfg_addr)
+ goto skip_lookup;
/*
* We expect exact match, unless MCFG entry end bus covers more than
@@ -45,10 +200,32 @@ phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
*/
list_for_each_entry(e, &pci_mcfg_list, list) {
if (e->segment == seg && e->bus_start == bus_res->start &&
- e->bus_end >= bus_res->end)
- return e->addr;
+ e->bus_end >= bus_res->end) {
+ root->mcfg_addr = e->addr;
+ }
+
+ }
+
+skip_lookup:
+ memset(&res, 0, sizeof(res));
+ if (root->mcfg_addr) {
+ res.start = root->mcfg_addr + (bus_res->start << 20);
+ res.end = res.start + (resource_size(bus_res) << 20) - 1;
+ res.flags = IORESOURCE_MEM;
}
+ /*
+ * Allow quirks to override default ECAM ops and CFG resource
+ * range. This may even fabricate a CFG resource range in case
+ * MCFG does not have it. Invalid CFG start address means MCFG
+ * firmware bug or we need another quirk in array.
+ */
+ pci_mcfg_apply_quirks(root, &res, &ops);
+ if (!res.start)
+ return -ENXIO;
+
+ *cfgres = res;
+ *ecam_ops = ops;
return 0;
}
@@ -79,6 +256,13 @@ static __init int pci_mcfg_parse(struct acpi_table_header *header)
list_add(&e->list, &pci_mcfg_list);
}
+#ifdef CONFIG_PCI_QUIRKS
+ /* Save MCFG IDs and revision for quirks matching */
+ memcpy(mcfg_oem_id, header->oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(mcfg_oem_table_id, header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ mcfg_oem_revision = header->oem_revision;
+#endif
+
pr_info("MCFG table detected, %d entries\n", n);
return 0;
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5c78ee1860b0..611a5585a902 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -154,18 +154,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
{
struct acpi_table_madt *madt = NULL;
- acpi_size tbl_size;
phys_cpuid_t rv;
- acpi_get_table_with_size(ACPI_SIG_MADT, 0,
- (struct acpi_table_header **)&madt,
- &tbl_size);
+ acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt);
if (!madt)
return PHYS_CPUID_INVALID;
rv = map_madt_entry(madt, 1, acpi_id, true);
- early_acpi_os_unmap_memory(madt, tbl_size);
+ acpi_put_table((struct acpi_table_header *)madt);
return rv;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2237d3f24f0e..5c8aa9cf62d7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -141,7 +141,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
- if (amd_e400_c1e_detected)
+ if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
type = ACPI_STATE_C1;
/*
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index bb01dea39fdc..f0b4a981b8d3 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -157,7 +157,7 @@ static void acpi_processor_ppc_ost(acpi_handle handle, int status)
status, NULL);
}
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
+void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
{
int ret;
@@ -168,7 +168,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
*/
if (event_flag)
acpi_processor_ppc_ost(pr->handle, 1);
- return 0;
+ return;
}
ret = acpi_processor_get_platform_limit(pr);
@@ -182,10 +182,8 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
else
acpi_processor_ppc_ost(pr->handle, 0);
}
- if (ret < 0)
- return (ret);
- else
- return cpufreq_update_policy(pr->id);
+ if (ret >= 0)
+ cpufreq_update_policy(pr->id);
}
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
@@ -465,11 +463,33 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
return result;
}
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
-int acpi_processor_notify_smm(struct module *calling_module)
+
+int acpi_processor_pstate_control(void)
{
acpi_status status;
- static int is_done = 0;
+ if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
+ return 0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
+ acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ (u32)acpi_gbl_FADT.pstate_control, 8);
+ if (ACPI_SUCCESS(status))
+ return 1;
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Failed to write pstate_control [0x%x] to smi_command [0x%x]",
+ acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
+ return -EIO;
+}
+
+int acpi_processor_notify_smm(struct module *calling_module)
+{
+ static int is_done = 0;
+ int result;
if (!(acpi_processor_ppc_status & PPC_REGISTERED))
return -EBUSY;
@@ -492,26 +512,15 @@ int acpi_processor_notify_smm(struct module *calling_module)
is_done = -EIO;
- /* Can't write pstate_control to smi_command if either value is zero */
- if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
+ result = acpi_processor_pstate_control();
+ if (!result) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
module_put(calling_module);
return 0;
}
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
- acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
-
- status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
- (u32) acpi_gbl_FADT.pstate_control, 8);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Failed to write pstate_control [0x%x] to "
- "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
- acpi_gbl_FADT.smi_command));
+ if (result < 0) {
module_put(calling_module);
- return status;
+ return result;
}
/* Success. If there's no _PPC, we need to fear nothing, so
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 03f5ec11ab31..3afddcd834ef 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -41,14 +41,13 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
static bool acpi_extract_properties(const union acpi_object *desc,
struct acpi_device_data *data);
-static bool acpi_nondev_subnode_ok(acpi_handle scope,
- const union acpi_object *link,
- struct list_head *list)
+static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
+ acpi_handle handle,
+ const union acpi_object *link,
+ struct list_head *list)
{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
struct acpi_data_node *dn;
- acpi_handle handle;
- acpi_status status;
+ bool result;
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
@@ -58,43 +57,75 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
dn->fwnode.type = FWNODE_ACPI_DATA;
INIT_LIST_HEAD(&dn->data.subnodes);
- status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
- &handle);
- if (ACPI_FAILURE(status))
- goto fail;
+ result = acpi_extract_properties(desc, &dn->data);
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
- goto fail;
+ if (handle) {
+ acpi_handle scope;
+ acpi_status status;
- if (acpi_extract_properties(buf.pointer, &dn->data))
- dn->handle = handle;
+ /*
+ * The scope for the subnode object lookup is the one of the
+ * namespace node (device) containing the object that has
+ * returned the package. That is, it's the scope of that
+ * object's parent.
+ */
+ status = acpi_get_parent(handle, &scope);
+ if (ACPI_SUCCESS(status)
+ && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data))
+ result = true;
+ } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data)) {
+ result = true;
+ }
- /*
- * The scope for the subnode object lookup is the one of the namespace
- * node (device) containing the object that has returned the package.
- * That is, it's the scope of that object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, buf.pointer, &dn->data))
+ if (result) {
dn->handle = handle;
-
- if (dn->handle) {
- dn->data.pointer = buf.pointer;
+ dn->data.pointer = desc;
list_add_tail(&dn->sibling, list);
return true;
}
+ kfree(dn);
acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
+ return false;
+}
+
+static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
+ const union acpi_object *link,
+ struct list_head *list)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ acpi_status status;
+
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list))
+ return true;
- fail:
ACPI_FREE(buf.pointer);
- kfree(dn);
return false;
}
+static bool acpi_nondev_subnode_ok(acpi_handle scope,
+ const union acpi_object *link,
+ struct list_head *list)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!scope)
+ return false;
+
+ status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
+ &handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return acpi_nondev_subnode_data_ok(handle, link, list);
+}
+
static int acpi_add_nondev_subnodes(acpi_handle scope,
const union acpi_object *links,
struct list_head *list)
@@ -103,15 +134,37 @@ static int acpi_add_nondev_subnodes(acpi_handle scope,
int i;
for (i = 0; i < links->package.count; i++) {
- const union acpi_object *link;
+ const union acpi_object *link, *desc;
+ acpi_handle handle;
+ bool result;
link = &links->package.elements[i];
- /* Only two elements allowed, both must be strings. */
- if (link->package.count == 2
- && link->package.elements[0].type == ACPI_TYPE_STRING
- && link->package.elements[1].type == ACPI_TYPE_STRING
- && acpi_nondev_subnode_ok(scope, link, list))
- ret = true;
+ /* Only two elements allowed. */
+ if (link->package.count != 2)
+ continue;
+
+ /* The first one must be a string. */
+ if (link->package.elements[0].type != ACPI_TYPE_STRING)
+ continue;
+
+ /* The second one may be a string, a reference or a package. */
+ switch (link->package.elements[1].type) {
+ case ACPI_TYPE_STRING:
+ result = acpi_nondev_subnode_ok(scope, link, list);
+ break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ handle = link->package.elements[1].reference.handle;
+ result = acpi_nondev_subnode_data_ok(handle, link, list);
+ break;
+ case ACPI_TYPE_PACKAGE:
+ desc = &link->package.elements[1];
+ result = acpi_nondev_subnode_extract(desc, NULL, link, list);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ ret = ret || result;
}
return ret;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 56241eb341f4..cb57962ef7c4 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -664,3 +664,60 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
+
+static int acpi_dev_consumes_res(struct acpi_device *adev, struct resource *res)
+{
+ struct list_head resource_list;
+ struct resource_entry *rentry;
+ int ret, found = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (ret < 0)
+ return 0;
+
+ list_for_each_entry(rentry, &resource_list, node) {
+ if (resource_contains(rentry->res, res)) {
+ found = 1;
+ break;
+ }
+
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+ return found;
+}
+
+static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth,
+ void *context, void **ret)
+{
+ struct resource *res = context;
+ struct acpi_device **consumer = (struct acpi_device **) ret;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (acpi_dev_consumes_res(adev, res)) {
+ *consumer = adev;
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_resource_consumer - Find the ACPI device that consumes @res.
+ * @res: Resource to search for.
+ *
+ * Search the current resource settings (_CRS) of every ACPI device node
+ * for @res. If we find an ACPI device whose _CRS includes @res, return
+ * it. Otherwise, return NULL.
+ */
+struct acpi_device *acpi_resource_consumer(struct resource *res)
+{
+ struct acpi_device *consumer = NULL;
+
+ acpi_get_devices(NULL, acpi_res_consumer_cb, res, (void **) &consumer);
+ return consumer;
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3d1856f1f4d0..45dec874ea55 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
@@ -1119,9 +1120,6 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
"support\n"));
*cap |= ACPI_VIDEO_BACKLIGHT;
- if (!acpi_has_method(handle, "_BQC"))
- printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
- "cannot determine initial brightness\n");
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
}
@@ -1370,6 +1368,38 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NON_COHERENT;
}
+/**
+ * acpi_dma_configure - Set-up DMA configuration for the device.
+ * @dev: The pointer to the device
+ * @attr: device dma attributes
+ */
+void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
+{
+ const struct iommu_ops *iommu;
+
+ iort_set_dma_mask(dev);
+
+ iommu = iort_iommu_configure(dev);
+
+ /*
+ * Assume dma valid range starts at 0 and covers the whole
+ * coherent_dma_mask.
+ */
+ arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
+ attr == DEV_DMA_COHERENT);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_configure);
+
+/**
+ * acpi_dma_deconfigure - Tear-down DMA configuration for the device.
+ * @dev: The pointer to the device
+ */
+void acpi_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_deconfigure);
+
static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 54abb26b7366..9b6cebe227a0 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -674,6 +674,14 @@ static void acpi_sleep_suspend_setup(void)
if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
+ /*
+ * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
+ * the default suspend mode was not selected from the command line.
+ */
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
+ mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_default = PM_SUSPEND_FREEZE;
+
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index e8d7bc7d4da8..b8019c4c1d38 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -33,7 +33,6 @@ int __init parse_spcr(bool earlycon)
{
static char opts[64];
struct acpi_table_spcr *table;
- acpi_size table_size;
acpi_status status;
char *uart;
char *iotype;
@@ -43,9 +42,8 @@ int __init parse_spcr(bool earlycon)
if (acpi_disabled)
return -ENODEV;
- status = acpi_get_table_with_size(ACPI_SIG_SPCR, 0,
- (struct acpi_table_header **)&table,
- &table_size);
+ status = acpi_get_table(ACPI_SIG_SPCR, 0,
+ (struct acpi_table_header **)&table);
if (ACPI_FAILURE(status))
return -ENOENT;
@@ -106,6 +104,6 @@ int __init parse_spcr(bool earlycon)
err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
done:
- early_acpi_os_unmap_memory((void __iomem *)table, table_size);
+ acpi_put_table((struct acpi_table_header *)table);
return err;
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index cdd56c4657e0..2604189d6cd1 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -333,7 +333,6 @@ acpi_table_parse_entries_array(char *id,
unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
- acpi_size tbl_size;
int count;
u32 instance = 0;
@@ -346,7 +345,7 @@ acpi_table_parse_entries_array(char *id,
if (!strncmp(id, ACPI_SIG_MADT, 4))
instance = acpi_apic_instance;
- acpi_get_table_with_size(id, instance, &table_header, &tbl_size);
+ acpi_get_table(id, instance, &table_header);
if (!table_header) {
pr_warn("%4.4s not present\n", id);
return -ENODEV;
@@ -355,7 +354,7 @@ acpi_table_parse_entries_array(char *id,
count = acpi_parse_entries_array(id, table_size, table_header,
proc, proc_num, max_entries);
- early_acpi_os_unmap_memory((char *)table_header, tbl_size);
+ acpi_put_table(table_header);
return count;
}
@@ -397,7 +396,6 @@ acpi_table_parse_madt(enum acpi_madt_type id,
int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
struct acpi_table_header *table = NULL;
- acpi_size tbl_size;
if (acpi_disabled)
return -ENODEV;
@@ -406,13 +404,13 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
- acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
+ acpi_get_table(id, acpi_apic_instance, &table);
else
- acpi_get_table_with_size(id, 0, &table, &tbl_size);
+ acpi_get_table(id, 0, &table);
if (table) {
handler(table);
- early_acpi_os_unmap_memory(table, tbl_size);
+ acpi_put_table(table);
return 0;
} else
return -ENODEV;
@@ -426,16 +424,15 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
static void __init check_multiple_madt(void)
{
struct acpi_table_header *table = NULL;
- acpi_size tbl_size;
- acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
+ acpi_get_table(ACPI_SIG_MADT, 2, &table);
if (table) {
pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
acpi_apic_instance);
pr_warn("If \"acpi_apic_instance=%d\" works better, "
"notify linux-acpi@vger.kernel.org\n",
acpi_apic_instance ? 0 : 2);
- early_acpi_os_unmap_memory(table, tbl_size);
+ acpi_put_table(table);
} else
acpi_apic_instance = 0;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a6b36fc53aec..02ded25c82e4 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -296,6 +296,26 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
},
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */
+ .callback = video_detect_force_native,
+ .ident = "Dell XPS 17 L702X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
+ },
+ },
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
+ /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
+ .callback = video_detect_force_native,
+ .ident = "HP Pavilion dv6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
+ },
+ },
+
{ },
};
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 74f4c662f776..2fc52407306c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,6 +46,8 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
+#include <linux/ahci-remap.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "ahci.h"
#define DRV_NAME "ahci"
@@ -1400,6 +1402,40 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
}
#endif
+static void ahci_remap_check(struct pci_dev *pdev, int bar,
+ struct ahci_host_priv *hpriv)
+{
+ int i, count = 0;
+ u32 cap;
+
+ /*
+ * Check if this device might have remapped nvme devices.
+ */
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL ||
+ pci_resource_len(pdev, bar) < SZ_512K ||
+ bar != AHCI_PCI_BAR_STANDARD ||
+ !(readl(hpriv->mmio + AHCI_VSCAP) & 1))
+ return;
+
+ cap = readq(hpriv->mmio + AHCI_REMAP_CAP);
+ for (i = 0; i < AHCI_MAX_REMAP; i++) {
+ if ((cap & (1 << i)) == 0)
+ continue;
+ if (readl(hpriv->mmio + ahci_remap_dcc(i))
+ != PCI_CLASS_STORAGE_EXPRESS)
+ continue;
+
+ /* We've found a remapped device */
+ count++;
+ }
+
+ if (!count)
+ return;
+
+ dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+ dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
+}
+
static int ahci_get_irq_vector(struct ata_host *host, int port)
{
return pci_irq_vector(to_pci_dev(host->dev), port);
@@ -1541,6 +1577,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ /* detect remapped nvme devices */
+ ahci_remap_check(pdev, ahci_pci_bar, hpriv);
+
/* must set flag prior to save config in order to take effect */
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 1eba8dff875e..9884c8c6e934 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -46,11 +46,13 @@
#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
+#define LS1046A_SATA_ECC_DIS 0x80000000
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
AHCI_LS2080A,
+ AHCI_LS1046A,
};
struct ahci_qoriq_priv {
@@ -63,6 +65,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
+ { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
{},
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -175,6 +178,13 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
+
+ case AHCI_LS1046A:
+ writel(LS1046A_SATA_ECC_DIS, qpriv->ecc_addr);
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ break;
}
return 0;
@@ -204,9 +214,9 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
- if (qoriq_priv->type == AHCI_LS1021A) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sata-ecc");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "sata-ecc");
+ if (res) {
qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(qoriq_priv->ecc_addr))
return PTR_ERR(qoriq_priv->ecc_addr);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0d028ead99e8..ee7db3119b18 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -140,6 +140,7 @@ EXPORT_SYMBOL_GPL(ahci_shost_attrs);
struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
+ &dev_attr_ncq_prio_enable,
NULL
};
EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 223a770f78f3..9cd0a2d41816 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -129,7 +129,7 @@ static int ata_force_tbl_size;
static char ata_force_param_buf[PAGE_SIZE] __initdata;
/* param_buf is thrown away after initialization, disallow read */
module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
-MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
+MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
static int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
@@ -739,6 +739,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
* @n_block: Number of blocks
* @tf_flags: RW/FUA etc...
* @tag: tag
+ * @class: IO priority class
*
* LOCKING:
* None.
@@ -753,7 +754,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
*/
int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag)
+ unsigned int tag, int class)
{
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= tf_flags;
@@ -785,6 +786,12 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
tf->device = ATA_LBA;
if (tf->flags & ATA_TFLAG_FUA)
tf->device |= 1 << 7;
+
+ if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
+ if (class == IOPRIO_CLASS_RT)
+ tf->hob_nsect |= ATA_PRIO_HIGH <<
+ ATA_SHIFT_PRIO;
+ }
} else if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
@@ -2156,6 +2163,37 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
}
}
+static void ata_dev_config_ncq_prio(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+
+ if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+ return;
+ }
+
+ err_mask = ata_read_log_page(dev,
+ ATA_LOG_SATA_ID_DEV_DATA,
+ ATA_LOG_SATA_SETTINGS,
+ ap->sector_buf,
+ 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get Identify Device data, Emask 0x%x\n",
+ err_mask);
+ return;
+ }
+
+ if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
+ dev->flags |= ATA_DFLAG_NCQ_PRIO;
+ } else {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+ ata_dev_dbg(dev, "SATA page does not support priority\n");
+ }
+
+}
+
static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
@@ -2205,6 +2243,8 @@ static int ata_dev_config_ncq(struct ata_device *dev,
ata_dev_config_ncq_send_recv(dev);
if (ata_id_has_ncq_non_data(dev->id))
ata_dev_config_ncq_non_data(dev);
+ if (ata_id_has_ncq_prio(dev->id))
+ ata_dev_config_ncq_prio(dev);
}
return 0;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8e575fbdf31d..1f863e757ee4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -50,6 +50,7 @@
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <asm/unaligned.h>
+#include <linux/ioprio.h>
#include "libata.h"
#include "libata-transport.h"
@@ -270,6 +271,83 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ bool ncq_prio_enable;
+ int rc = 0;
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+
+ ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+unlock:
+ spin_unlock_irq(ap->lock);
+
+ return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+}
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ long int input;
+ int rc;
+
+ rc = kstrtol(buf, 10, &input);
+ if (rc)
+ return rc;
+ if ((input < 0) || (input > 1))
+ return -EINVAL;
+
+ ap = ata_shost_to_port(sdev->host);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (unlikely(!dev))
+ return -ENODEV;
+
+ spin_lock_irq(ap->lock);
+ if (input)
+ dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+ else
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+ dev->link->eh_info.action |= ATA_EH_REVALIDATE;
+ dev->link->eh_info.flags |= ATA_EHI_QUIET;
+ ata_port_schedule_eh(ap);
+ spin_unlock_irq(ap->lock);
+
+ ata_port_wait_eh(ap);
+
+ if (input) {
+ spin_lock_irq(ap->lock);
+ if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+ rc = -EIO;
+ }
+ spin_unlock_irq(ap->lock);
+ }
+
+ return rc ? rc : len;
+}
+
+DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+ ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
+
void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
u8 sk, u8 asc, u8 ascq)
{
@@ -401,6 +479,7 @@ EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
struct device_attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads,
+ &dev_attr_ncq_prio_enable,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
@@ -1756,6 +1835,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
+ struct request *rq = scmd->request;
+ int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
unsigned int tf_flags = 0;
u64 block;
u32 n_block;
@@ -1822,7 +1903,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
qc->nbytes = n_block * scmd->device->sector_size;
rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
- qc->tag);
+ qc->tag, class);
+
if (likely(rc == 0))
return 0;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 3b301a48007c..8f3a5596dd67 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -66,7 +66,7 @@ extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag);
+ unsigned int tag, int class);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 139d20778b29..d4caa23f5a88 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -11,19 +11,26 @@
*
* TODO:
* - dmaengine support
- * - check if timing stuff needed
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi_host.h>
+
#include <linux/ata.h>
+#include <linux/clk.h>
#include <linux/libata.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
#define DRV_NAME "pata_imx"
+#define PATA_IMX_ATA_TIME_OFF 0x00
+#define PATA_IMX_ATA_TIME_ON 0x01
+#define PATA_IMX_ATA_TIME_1 0x02
+#define PATA_IMX_ATA_TIME_2W 0x03
+#define PATA_IMX_ATA_TIME_2R 0x04
+#define PATA_IMX_ATA_TIME_AX 0x05
+#define PATA_IMX_ATA_TIME_PIO_RDX 0x06
+#define PATA_IMX_ATA_TIME_4 0x07
+#define PATA_IMX_ATA_TIME_9 0x08
+
#define PATA_IMX_ATA_CONTROL 0x24
#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
@@ -33,6 +40,10 @@
#define PATA_IMX_DRIVE_DATA 0xA0
#define PATA_IMX_DRIVE_CONTROL 0xD8
+static u32 pio_t4[] = { 30, 20, 15, 10, 10 };
+static u32 pio_t9[] = { 20, 15, 10, 10, 10 };
+static u32 pio_tA[] = { 35, 35, 35, 35, 35 };
+
struct pata_imx_priv {
struct clk *clk;
/* timings/interrupt/control regs */
@@ -40,28 +51,49 @@ struct pata_imx_priv {
u32 ata_ctl;
};
-static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
+static void pata_imx_set_timing(struct ata_device *adev,
+ struct pata_imx_priv *priv)
+{
+ struct ata_timing timing;
+ unsigned long clkrate;
+ u32 T, mode;
+
+ clkrate = clk_get_rate(priv->clk);
+
+ if (adev->pio_mode < XFER_PIO_0 || adev->pio_mode > XFER_PIO_4 ||
+ !clkrate)
+ return;
+
+ T = 1000000000 / clkrate;
+ ata_timing_compute(adev, adev->pio_mode, &timing, T * 1000, 0);
+
+ mode = adev->pio_mode - XFER_PIO_0;
+
+ writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_OFF);
+ writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_ON);
+ writeb(timing.setup, priv->host_regs + PATA_IMX_ATA_TIME_1);
+ writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2W);
+ writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2R);
+ writeb(1, priv->host_regs + PATA_IMX_ATA_TIME_PIO_RDX);
+
+ writeb(pio_t4[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_4);
+ writeb(pio_t9[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_9);
+ writeb(pio_tA[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_AX);
+}
+
+static void pata_imx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- struct ata_device *dev;
- struct ata_port *ap = link->ap;
struct pata_imx_priv *priv = ap->host->private_data;
u32 val;
- ata_for_each_dev(dev, link, ENABLED) {
- dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
- dev->xfer_shift = ATA_SHIFT_PIO;
- dev->flags |= ATA_DFLAG_PIO;
-
- val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
- if (ata_pio_need_iordy(dev))
- val |= PATA_IMX_ATA_CTRL_IORDY_EN;
- else
- val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
- __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
+ pata_imx_set_timing(adev, priv);
- ata_dev_info(dev, "configured for PIO\n");
- }
- return 0;
+ val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ if (ata_pio_need_iordy(adev))
+ val |= PATA_IMX_ATA_CTRL_IORDY_EN;
+ else
+ val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
+ __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
}
static struct scsi_host_template pata_imx_sht = {
@@ -72,7 +104,7 @@ static struct ata_port_operations pata_imx_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
- .set_mode = pata_imx_set_mode,
+ .set_piomode = pata_imx_set_piomode,
};
static void pata_imx_setup_port(struct ata_ioports *ioaddr)
@@ -128,7 +160,7 @@ static int pata_imx_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->ops = &pata_imx_port_ops;
- ap->pio_mask = ATA_PIO0;
+ ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 6ac2b2b1e8de..5ad037c07ec7 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -584,7 +584,7 @@ static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", data32);
}
-static DEVICE_ATTR(console, 0644, console_show, console_store);
+static DEVICE_ATTR_RW(console);
#define SOLOS_ATTR_RO(x) static DEVICE_ATTR(x, 0444, solos_param_show, NULL);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 10e1b9eee10e..4ef4c5caed4f 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -128,4 +128,17 @@ config IMG_ASCII_LCD
development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
from Imagination Technologies.
+config HT16K33
+ tristate "Holtek Ht16K33 LED controller with keyscan"
+ depends on FB && OF && I2C && INPUT
+ select FB_SYS_FOPS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select INPUT_MATRIXKMAP
+ select FB_BACKLIGHT
+ help
+ Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
+ LED controller driver with keyscan.
+
endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 3127175c89df..cb3dd847713b 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_HT16K33) += ht16k33.o
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
new file mode 100644
index 000000000000..eeb323f56c07
--- /dev/null
+++ b/drivers/auxdisplay/ht16k33.c
@@ -0,0 +1,563 @@
+/*
+ * HT16K33 driver
+ *
+ * Author: Robin van der Gracht <robin@protonic.nl>
+ *
+ * Copyright: (C) 2016 Protonic Holland.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/workqueue.h>
+#include <linux/mm.h>
+
+/* Registers */
+#define REG_SYSTEM_SETUP 0x20
+#define REG_SYSTEM_SETUP_OSC_ON BIT(0)
+
+#define REG_DISPLAY_SETUP 0x80
+#define REG_DISPLAY_SETUP_ON BIT(0)
+
+#define REG_ROWINT_SET 0xA0
+#define REG_ROWINT_SET_INT_EN BIT(0)
+#define REG_ROWINT_SET_INT_ACT_HIGH BIT(1)
+
+#define REG_BRIGHTNESS 0xE0
+
+/* Defines */
+#define DRIVER_NAME "ht16k33"
+
+#define MIN_BRIGHTNESS 0x1
+#define MAX_BRIGHTNESS 0x10
+
+#define HT16K33_MATRIX_LED_MAX_COLS 8
+#define HT16K33_MATRIX_LED_MAX_ROWS 16
+#define HT16K33_MATRIX_KEYPAD_MAX_COLS 3
+#define HT16K33_MATRIX_KEYPAD_MAX_ROWS 12
+
+#define BYTES_PER_ROW (HT16K33_MATRIX_LED_MAX_ROWS / 8)
+#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
+
+struct ht16k33_keypad {
+ struct input_dev *dev;
+ spinlock_t lock;
+ struct delayed_work work;
+ uint32_t cols;
+ uint32_t rows;
+ uint32_t row_shift;
+ uint32_t debounce_ms;
+ uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+};
+
+struct ht16k33_fbdev {
+ struct fb_info *info;
+ uint32_t refresh_rate;
+ uint8_t *buffer;
+ uint8_t *cache;
+ struct delayed_work work;
+};
+
+struct ht16k33_priv {
+ struct i2c_client *client;
+ struct ht16k33_keypad keypad;
+ struct ht16k33_fbdev fbdev;
+ struct workqueue_struct *workqueue;
+};
+
+static struct fb_fix_screeninfo ht16k33_fb_fix = {
+ .id = DRIVER_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO10,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = HT16K33_MATRIX_LED_MAX_ROWS,
+ .accel = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ht16k33_fb_var = {
+ .xres = HT16K33_MATRIX_LED_MAX_ROWS,
+ .yres = HT16K33_MATRIX_LED_MAX_COLS,
+ .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
+ .yres_virtual = HT16K33_MATRIX_LED_MAX_COLS,
+ .bits_per_pixel = 1,
+ .red = { 0, 1, 0 },
+ .green = { 0, 1, 0 },
+ .blue = { 0, 1, 0 },
+ .left_margin = 0,
+ .right_margin = 0,
+ .upper_margin = 0,
+ .lower_margin = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static int ht16k33_display_on(struct ht16k33_priv *priv)
+{
+ uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON;
+
+ return i2c_smbus_write_byte(priv->client, data);
+}
+
+static int ht16k33_display_off(struct ht16k33_priv *priv)
+{
+ return i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP);
+}
+
+static void ht16k33_fb_queue(struct ht16k33_priv *priv)
+{
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+ queue_delayed_work(priv->workqueue, &fbdev->work,
+ msecs_to_jiffies(HZ / fbdev->refresh_rate));
+}
+
+static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
+{
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ queue_delayed_work(priv->workqueue, &keypad->work,
+ msecs_to_jiffies(keypad->debounce_ms));
+}
+
+/*
+ * This gets the fb data from cache and copies it to ht16k33 display RAM
+ */
+static void ht16k33_fb_update(struct work_struct *work)
+{
+ struct ht16k33_fbdev *fbdev =
+ container_of(work, struct ht16k33_fbdev, work.work);
+ struct ht16k33_priv *priv =
+ container_of(fbdev, struct ht16k33_priv, fbdev);
+
+ uint8_t *p1, *p2;
+ int len, pos = 0, first = -1;
+
+ p1 = fbdev->cache;
+ p2 = fbdev->buffer;
+
+ /* Search for the first byte with changes */
+ while (pos < HT16K33_FB_SIZE && first < 0) {
+ if (*(p1++) - *(p2++))
+ first = pos;
+ pos++;
+ }
+
+ /* No changes found */
+ if (first < 0)
+ goto requeue;
+
+ len = HT16K33_FB_SIZE - first;
+ p1 = fbdev->cache + HT16K33_FB_SIZE - 1;
+ p2 = fbdev->buffer + HT16K33_FB_SIZE - 1;
+
+ /* Determine i2c transfer length */
+ while (len > 1) {
+ if (*(p1--) - *(p2--))
+ break;
+ len--;
+ }
+
+ p1 = fbdev->cache + first;
+ p2 = fbdev->buffer + first;
+ if (!i2c_smbus_write_i2c_block_data(priv->client, first, len, p2))
+ memcpy(p1, p2, len);
+requeue:
+ ht16k33_fb_queue(priv);
+}
+
+static int ht16k33_keypad_start(struct input_dev *dev)
+{
+ struct ht16k33_priv *priv = input_get_drvdata(dev);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ /*
+ * Schedule an immediate key scan to capture current key state;
+ * columns will be activated and IRQs be enabled after the scan.
+ */
+ queue_delayed_work(priv->workqueue, &keypad->work, 0);
+ return 0;
+}
+
+static void ht16k33_keypad_stop(struct input_dev *dev)
+{
+ struct ht16k33_priv *priv = input_get_drvdata(dev);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ cancel_delayed_work(&keypad->work);
+ /*
+ * ht16k33_keypad_scan() will leave IRQs enabled;
+ * we should disable them now.
+ */
+ disable_irq_nosync(priv->client->irq);
+}
+
+static int ht16k33_initialize(struct ht16k33_priv *priv)
+{
+ uint8_t byte;
+ int err;
+ uint8_t data[HT16K33_MATRIX_LED_MAX_COLS * 2];
+
+ /* Clear RAM (8 * 16 bits) */
+ memset(data, 0, sizeof(data));
+ err = i2c_smbus_write_block_data(priv->client, 0, sizeof(data), data);
+ if (err)
+ return err;
+
+ /* Turn on internal oscillator */
+ byte = REG_SYSTEM_SETUP_OSC_ON | REG_SYSTEM_SETUP;
+ err = i2c_smbus_write_byte(priv->client, byte);
+ if (err)
+ return err;
+
+ /* Configure INT pin */
+ byte = REG_ROWINT_SET | REG_ROWINT_SET_INT_ACT_HIGH;
+ if (priv->client->irq > 0)
+ byte |= REG_ROWINT_SET_INT_EN;
+ return i2c_smbus_write_byte(priv->client, byte);
+}
+
+/*
+ * This gets the keys from keypad and reports it to input subsystem
+ */
+static void ht16k33_keypad_scan(struct work_struct *work)
+{
+ struct ht16k33_keypad *keypad =
+ container_of(work, struct ht16k33_keypad, work.work);
+ struct ht16k33_priv *priv =
+ container_of(keypad, struct ht16k33_priv, keypad);
+ const unsigned short *keycodes = keypad->dev->keycode;
+ uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+ uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
+ int row, col, code;
+ bool reschedule = false;
+
+ if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
+ dev_err(&priv->client->dev, "Failed to read key data\n");
+ goto end;
+ }
+
+ for (col = 0; col < keypad->cols; col++) {
+ new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
+ if (new_state[col])
+ reschedule = true;
+ bits_changed = keypad->last_key_state[col] ^ new_state[col];
+
+ while (bits_changed) {
+ row = ffs(bits_changed) - 1;
+ code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+ input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(keypad->dev, keycodes[code],
+ new_state[col] & BIT(row));
+ bits_changed &= ~BIT(row);
+ }
+ }
+ input_sync(keypad->dev);
+ memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+end:
+ if (reschedule)
+ ht16k33_keypad_queue(priv);
+ else
+ enable_irq(priv->client->irq);
+}
+
+static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
+{
+ struct ht16k33_priv *priv = dev;
+
+ disable_irq_nosync(priv->client->irq);
+ ht16k33_keypad_queue(priv);
+
+ return IRQ_HANDLED;
+}
+
+static int ht16k33_bl_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+ struct ht16k33_priv *priv = bl_get_data(bl);
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK || brightness == 0) {
+ return ht16k33_display_off(priv);
+ }
+
+ ht16k33_display_on(priv);
+ return i2c_smbus_write_byte(priv->client,
+ REG_BRIGHTNESS | (brightness - 1));
+}
+
+static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi)
+{
+ struct ht16k33_priv *priv = bl_get_data(bl);
+
+ return (fi == NULL) || (fi->par == priv);
+}
+
+static const struct backlight_ops ht16k33_bl_ops = {
+ .update_status = ht16k33_bl_update_status,
+ .check_fb = ht16k33_bl_check_fb,
+};
+
+static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct ht16k33_priv *priv = info->par;
+
+ return vm_insert_page(vma, vma->vm_start,
+ virt_to_page(priv->fbdev.buffer));
+}
+
+static struct fb_ops ht16k33_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_mmap = ht16k33_mmap,
+};
+
+static int ht16k33_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ uint32_t rows, cols, dft_brightness;
+ struct backlight_device *bl;
+ struct backlight_properties bl_props;
+ struct ht16k33_priv *priv;
+ struct ht16k33_keypad *keypad;
+ struct ht16k33_fbdev *fbdev;
+ struct device_node *node = client->dev.of_node;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c_check_functionality error\n");
+ return -EIO;
+ }
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "No IRQ specified\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+ fbdev = &priv->fbdev;
+ keypad = &priv->keypad;
+
+ priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
+ if (priv->workqueue == NULL)
+ return -ENOMEM;
+
+ err = ht16k33_initialize(priv);
+ if (err)
+ goto err_destroy_wq;
+
+ /* Framebuffer (2 bytes per column) */
+ BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
+ fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
+ if (!fbdev->buffer) {
+ err = -ENOMEM;
+ goto err_free_fbdev;
+ }
+
+ fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
+ if (!fbdev->cache) {
+ err = -ENOMEM;
+ goto err_fbdev_buffer;
+ }
+
+ fbdev->info = framebuffer_alloc(0, &client->dev);
+ if (!fbdev->info) {
+ err = -ENOMEM;
+ goto err_fbdev_buffer;
+ }
+
+ err = of_property_read_u32(node, "refresh-rate-hz",
+ &fbdev->refresh_rate);
+ if (err) {
+ dev_err(&client->dev, "refresh rate not specified\n");
+ goto err_fbdev_info;
+ }
+ fb_bl_default_curve(fbdev->info, 0, MIN_BRIGHTNESS, MAX_BRIGHTNESS);
+
+ INIT_DELAYED_WORK(&fbdev->work, ht16k33_fb_update);
+ fbdev->info->fbops = &ht16k33_fb_ops;
+ fbdev->info->screen_base = (char __iomem *) fbdev->buffer;
+ fbdev->info->screen_size = HT16K33_FB_SIZE;
+ fbdev->info->fix = ht16k33_fb_fix;
+ fbdev->info->var = ht16k33_fb_var;
+ fbdev->info->pseudo_palette = NULL;
+ fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+ fbdev->info->par = priv;
+
+ err = register_framebuffer(fbdev->info);
+ if (err)
+ goto err_fbdev_info;
+
+ /* Keypad */
+ keypad->dev = devm_input_allocate_device(&client->dev);
+ if (!keypad->dev) {
+ err = -ENOMEM;
+ goto err_fbdev_unregister;
+ }
+
+ keypad->dev->name = DRIVER_NAME"-keypad";
+ keypad->dev->id.bustype = BUS_I2C;
+ keypad->dev->open = ht16k33_keypad_start;
+ keypad->dev->close = ht16k33_keypad_stop;
+
+ if (!of_get_property(node, "linux,no-autorepeat", NULL))
+ __set_bit(EV_REP, keypad->dev->evbit);
+
+ err = of_property_read_u32(node, "debounce-delay-ms",
+ &keypad->debounce_ms);
+ if (err) {
+ dev_err(&client->dev, "key debounce delay not specified\n");
+ goto err_fbdev_unregister;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ ht16k33_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, priv);
+ if (err) {
+ dev_err(&client->dev, "irq request failed %d, error %d\n",
+ client->irq, err);
+ goto err_fbdev_unregister;
+ }
+
+ disable_irq_nosync(client->irq);
+ rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
+ cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
+ err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
+ if (err)
+ goto err_fbdev_unregister;
+
+ err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
+ keypad->dev);
+ if (err) {
+ dev_err(&client->dev, "failed to build keymap\n");
+ goto err_fbdev_unregister;
+ }
+
+ input_set_drvdata(keypad->dev, priv);
+ keypad->rows = rows;
+ keypad->cols = cols;
+ keypad->row_shift = get_count_order(cols);
+ INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
+
+ err = input_register_device(keypad->dev);
+ if (err)
+ goto err_fbdev_unregister;
+
+ /* Backlight */
+ memset(&bl_props, 0, sizeof(struct backlight_properties));
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.max_brightness = MAX_BRIGHTNESS;
+
+ bl = devm_backlight_device_register(&client->dev, DRIVER_NAME"-bl",
+ &client->dev, priv,
+ &ht16k33_bl_ops, &bl_props);
+ if (IS_ERR(bl)) {
+ dev_err(&client->dev, "failed to register backlight\n");
+ err = PTR_ERR(bl);
+ goto err_keypad_unregister;
+ }
+
+ err = of_property_read_u32(node, "default-brightness-level",
+ &dft_brightness);
+ if (err) {
+ dft_brightness = MAX_BRIGHTNESS;
+ } else if (dft_brightness > MAX_BRIGHTNESS) {
+ dev_warn(&client->dev,
+ "invalid default brightness level: %u, using %u\n",
+ dft_brightness, MAX_BRIGHTNESS);
+ dft_brightness = MAX_BRIGHTNESS;
+ }
+
+ bl->props.brightness = dft_brightness;
+ ht16k33_bl_update_status(bl);
+
+ ht16k33_fb_queue(priv);
+ return 0;
+
+err_keypad_unregister:
+ input_unregister_device(keypad->dev);
+err_fbdev_unregister:
+ unregister_framebuffer(fbdev->info);
+err_fbdev_info:
+ framebuffer_release(fbdev->info);
+err_fbdev_buffer:
+ free_page((unsigned long) fbdev->buffer);
+err_free_fbdev:
+ kfree(fbdev);
+err_destroy_wq:
+ destroy_workqueue(priv->workqueue);
+
+ return err;
+}
+
+static int ht16k33_remove(struct i2c_client *client)
+{
+ struct ht16k33_priv *priv = i2c_get_clientdata(client);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+ ht16k33_keypad_stop(keypad->dev);
+
+ cancel_delayed_work(&fbdev->work);
+ unregister_framebuffer(fbdev->info);
+ framebuffer_release(fbdev->info);
+ free_page((unsigned long) fbdev->buffer);
+
+ destroy_workqueue(priv->workqueue);
+ return 0;
+}
+
+static const struct i2c_device_id ht16k33_i2c_match[] = {
+ { "ht16k33", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
+
+static const struct of_device_id ht16k33_of_match[] = {
+ { .compatible = "holtek,ht16k33", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ht16k33_of_match);
+
+static struct i2c_driver ht16k33_driver = {
+ .probe = ht16k33_probe,
+ .remove = ht16k33_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(ht16k33_of_match),
+ },
+ .id_table = ht16k33_i2c_match,
+};
+module_i2c_driver(ht16k33_driver);
+
+MODULE_DESCRIPTION("Holtek HT16K33 driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d02e7c0f5bfd..d718ae4b907a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -224,6 +224,8 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+source "drivers/base/test/Kconfig"
+
config SYS_HYPERVISOR
bool
default n
@@ -237,6 +239,7 @@ config GENERIC_CPU_AUTOPROBE
config SOC_BUS
bool
+ select GLOB
source "drivers/base/regmap/Kconfig"
@@ -250,11 +253,11 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config FENCE_TRACE
- bool "Enable verbose FENCE_TRACE messages"
+config DMA_FENCE_TRACE
+ bool "Enable verbose DMA_FENCE_TRACE messages"
depends on DMA_SHARED_BUFFER
help
- Enable the FENCE_TRACE printks. This will add extra
+ Enable the DMA_FENCE_TRACE printks. This will add extra
spam to the console log, but will make it easier to diagnose
lockup related problems for dma-buffers shared across multiple
devices.
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 2609ba20b396..f2816f6ff76a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,5 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-y += test/
+
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e05db388bd1c..ada9dce34e6d 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
+extern struct device_attribute dev_attr_deferred_probe;
+
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
@@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index e9fd32e91668..eb3af2739537 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -16,6 +16,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
return sib_leaf->of_node == this_leaf->of_node;
}
+
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+ const char *size_prop;
+ const char *line_size_props[2];
+ const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+ {
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ }, {
+ .size_prop = "i-cache-size",
+ .line_size_props = { "i-cache-line-size",
+ "i-cache-block-size", },
+ .nr_sets_prop = "i-cache-sets",
+ }, {
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+ if (type == CACHE_TYPE_UNIFIED)
+ return 0;
+ return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *cache_size;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].size_prop;
+
+ cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (cache_size)
+ this_leaf->size = of_read_number(cache_size, 1);
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf)
+{
+ const __be32 *line_size;
+ int i, lim, ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+ for (i = 0; i < lim; i++) {
+ const char *propname;
+
+ propname = cache_type_info[ct_idx].line_size_props[i];
+ line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (line_size)
+ break;
+ }
+
+ if (line_size)
+ this_leaf->coherency_line_size = of_read_number(line_size, 1);
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *nr_sets;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].nr_sets_prop;
+
+ nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ if (nr_sets)
+ this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+ unsigned int line_size = this_leaf->coherency_line_size;
+ unsigned int nr_sets = this_leaf->number_of_sets;
+ unsigned int size = this_leaf->size;
+
+ /*
+ * If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static void cache_of_override_properties(unsigned int cpu)
+{
+ int index;
+ struct cacheinfo *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ cache_size(this_leaf);
+ cache_get_line_size(this_leaf);
+ cache_nr_sets(this_leaf);
+ cache_associativity(this_leaf);
+ }
+}
#else
+static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
@@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
- int ret;
+ int ret = 0;
+
+ if (this_cpu_ci->cpu_map_populated)
+ return 0;
- ret = cache_setup_of_node(cpu);
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ /* No cache property/hierarchy support yet in ACPI */
+ ret = -ENOTSUPP;
if (ret)
return ret;
@@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
+static void cache_override_properties(unsigned int cpu)
+{
+ if (of_have_populated_dt())
+ return cache_of_override_properties(cpu);
+}
+
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
- cpu);
+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
+
+ cache_override_properties(cpu);
return 0;
free_ci:
@@ -233,6 +363,7 @@ static ssize_t file_name##_show(struct device *dev, \
return sprintf(buf, "%u\n", this_leaf->object); \
}
+show_one(id, id);
show_one(level, level);
show_one(coherency_line_size, coherency_line_size);
show_one(number_of_sets, number_of_sets);
@@ -314,6 +445,7 @@ static ssize_t write_policy_show(struct device *dev,
return n;
}
+static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(level);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(coherency_line_size);
@@ -327,6 +459,7 @@ static DEVICE_ATTR_RO(shared_cpu_list);
static DEVICE_ATTR_RO(physical_line_partition);
static struct attribute *cache_default_attrs[] = {
+ &dev_attr_id.attr,
&dev_attr_type.attr,
&dev_attr_level.attr,
&dev_attr_shared_cpu_map.attr,
@@ -350,6 +483,8 @@ cache_default_attrs_is_visible(struct kobject *kobj,
const struct cpumask *mask = &this_leaf->shared_cpu_map;
umode_t mode = attr->mode;
+ if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+ return mode;
if ((attr == &dev_attr_type.attr) && this_leaf->type)
return mode;
if ((attr == &dev_attr_level.attr) && this_leaf->level)
@@ -498,57 +633,30 @@ err:
return rc;
}
-static void cache_remove_dev(unsigned int cpu)
+static int cacheinfo_cpu_online(unsigned int cpu)
{
- if (!cpumask_test_cpu(cpu, &cache_dev_map))
- return;
- cpumask_clear_cpu(cpu, &cache_dev_map);
+ int rc = detect_cache_attributes(cpu);
- cpu_cache_sysfs_exit(cpu);
+ if (rc)
+ return rc;
+ rc = cache_add_dev(cpu);
+ if (rc)
+ free_cache_attributes(cpu);
+ return rc;
}
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
+ if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
+ cpu_cache_sysfs_exit(cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = detect_cache_attributes(cpu);
- if (!rc)
- rc = cache_add_dev(cpu);
- break;
- case CPU_DEAD:
- cache_remove_dev(cpu);
- free_cache_attributes(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ free_cache_attributes(cpu);
+ return 0;
}
static int __init cacheinfo_sysfs_init(void)
{
- int cpu, rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = detect_cache_attributes(cpu);
- if (rc)
- goto out;
- rc = cache_add_dev(cpu);
- if (rc) {
- free_cache_attributes(cpu);
- pr_err("error populating cacheinfo..cpu%d\n", cpu);
- goto out;
- }
- }
- __hotcpu_notifier(cacheinfo_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
+ cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
}
-
device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 71059e32bebc..a2b2896693d6 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -163,6 +163,18 @@ static void klist_class_dev_put(struct klist_node *n)
put_device(dev);
}
+static int class_add_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
int __class_register(struct class *cls, struct lock_class_key *key)
{
struct subsys_private *cp;
@@ -203,6 +215,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
kfree(cp);
return error;
}
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
error = add_class_attrs(class_get(cls));
class_put(cls);
return error;
@@ -213,6 +227,7 @@ void class_unregister(struct class *cls)
{
pr_debug("device class '%s': unregistering\n", cls->name);
remove_class_attrs(cls);
+ class_remove_groups(cls, cls->class_groups);
kset_unregister(&cls->p->subsys);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ce057a568673..020ea7f05520 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
+/* Device links support. */
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+ mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx)
+{
+ srcu_read_unlock(&device_links_srcu, idx);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+ down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ down_read(&device_links_lock);
+ return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+ up_read(&device_links_lock);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc). Return 1 if that is the case or 0 otherwise.
+ */
+static int device_is_dependent(struct device *dev, void *target)
+{
+ struct device_link *link;
+ int ret;
+
+ if (WARN_ON(dev == target))
+ return 1;
+
+ ret = device_for_each_child(dev, target, device_is_dependent);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (WARN_ON(link->consumer == target))
+ return 1;
+
+ ret = device_is_dependent(link->consumer, target);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+ struct device_link *link;
+
+ /*
+ * Devices that have not been registered yet will be put to the ends
+ * of the lists during the registration, so skip them here.
+ */
+ if (device_is_registered(dev))
+ devices_kset_move_last(dev);
+
+ if (device_pm_initialized(dev))
+ device_pm_move_last(dev);
+
+ device_for_each_child(dev, NULL, device_reorder_to_tail);
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ device_reorder_to_tail(link->consumer, NULL);
+
+ return 0;
+}
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account. Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active metastate and reference-counted upon the creation
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
+ * when the consumer device driver unbinds from it. The combination of both
+ * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
+ * to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case. The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags)
+{
+ struct device_link *link;
+
+ if (!consumer || !supplier ||
+ ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ return NULL;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ /*
+ * If the supplier has not been fully registered yet or there is a
+ * reverse dependency between the consumer and the supplier already in
+ * the graph, return NULL.
+ */
+ if (!device_pm_initialized(supplier)
+ || device_is_dependent(consumer, supplier)) {
+ link = NULL;
+ goto out;
+ }
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node)
+ if (link->consumer == consumer)
+ goto out;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (flags & DL_FLAG_RPM_ACTIVE) {
+ if (pm_runtime_get_sync(supplier) < 0) {
+ pm_runtime_put_noidle(supplier);
+ kfree(link);
+ link = NULL;
+ goto out;
+ }
+ link->rpm_active = true;
+ }
+ pm_runtime_new_link(consumer);
+ }
+ get_device(supplier);
+ link->supplier = supplier;
+ INIT_LIST_HEAD(&link->s_node);
+ get_device(consumer);
+ link->consumer = consumer;
+ INIT_LIST_HEAD(&link->c_node);
+ link->flags = flags;
+
+ /* Determine the initial link state. */
+ if (flags & DL_FLAG_STATELESS) {
+ link->status = DL_STATE_NONE;
+ } else {
+ switch (supplier->links.status) {
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+ * Balance the decrementation of the supplier's
+ * runtime PM usage counter after consumer probe
+ * in driver_probe_device().
+ */
+ if (flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(supplier);
+
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ link->status = DL_STATE_ACTIVE;
+ break;
+ default:
+ link->status = DL_STATE_AVAILABLE;
+ break;
+ }
+ break;
+ case DL_DEV_UNBINDING:
+ link->status = DL_STATE_SUPPLIER_UNBIND;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ }
+
+ /*
+ * Move the consumer and all of the devices depending on it to the end
+ * of dpm_list and the devices_kset list.
+ *
+ * It is necessary to hold dpm_list locked throughout all that or else
+ * we may end up suspending with a wrong ordering of it.
+ */
+ device_reorder_to_tail(consumer, NULL);
+
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+ dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+ out:
+ device_pm_unlock();
+ device_links_write_unlock();
+ return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void device_link_free(struct device_link *link)
+{
+ put_device(link->consumer);
+ put_device(link->supplier);
+ kfree(link);
+}
+
+#ifdef CONFIG_SRCU
+static void __device_link_free_srcu(struct rcu_head *rhead)
+{
+ device_link_free(container_of(rhead, struct device_link, rcu_head));
+}
+
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_drop_link(link->consumer);
+
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+ call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+}
+#else /* !CONFIG_SRCU */
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+ device_link_free(link);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_link_del - Delete a link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_del(struct device_link *link)
+{
+ device_links_write_lock();
+ device_pm_lock();
+ __device_link_del(link);
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node)
+ if (link->status == DL_STATE_CONSUMER_PROBE)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers. Walk the list of the device's
+ * links to suppliers and see if all of them are available. If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here. It only can go away in __device_release_driver() and
+ * that function checks the device's links to consumers. This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int ret = 0;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE) {
+ device_links_missing_supplier(dev);
+ ret = -EPROBE_DEFER;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_DORMANT);
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+
+ dev->links.status = DL_DEV_DRIVER_BOUND;
+
+ device_links_write_unlock();
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->flags & DL_FLAG_AUTOREMOVE)
+ __device_link_del(link);
+ else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+void device_links_no_driver(struct device *dev)
+{
+ device_links_write_lock();
+ __device_links_no_driver(dev);
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+ __device_links_no_driver(dev);
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present). Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = false;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status == DL_STATE_CONSUMER_PROBE
+ || link->status == DL_STATE_ACTIVE) {
+ ret = true;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ }
+
+ dev->links.status = DL_DEV_UNBINDING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state. If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ start:
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ enum device_link_state status;
+
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ status = link->status;
+ if (status == DL_STATE_CONSUMER_PROBE) {
+ device_links_write_unlock();
+
+ wait_for_device_probe();
+ goto start;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ if (status == DL_STATE_ACTIVE) {
+ struct device *consumer = link->consumer;
+
+ get_device(consumer);
+
+ device_links_write_unlock();
+
+ device_release_driver_internal(consumer, NULL,
+ consumer->parent);
+ put_device(consumer);
+ goto start;
+ }
+ }
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ /*
+ * Delete all of the remaining links from this device to any other
+ * devices (either consumers or suppliers).
+ */
+ device_links_write_lock();
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ WARN_ON(link->status == DL_STATE_ACTIVE);
+ __device_link_del(link);
+ }
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+ WARN_ON(link->status != DL_STATE_DORMANT &&
+ link->status != DL_STATE_NONE);
+ __device_link_del(link);
+ }
+
+ device_links_write_unlock();
+}
+
+/* Device links support end. */
+
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
+ error = device_create_file(dev, &dev_attr_deferred_probe);
+ if (error)
+ goto err_remove_online;
+
return 0;
+ err_remove_online:
+ device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
#ifdef CONFIG_GENERIC_MSI_IRQ
INIT_LIST_HEAD(&dev->msi_list);
#endif
+ INIT_LIST_HEAD(&dev->links.consumers);
+ INIT_LIST_HEAD(&dev->links.suppliers);
+ dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
+
+ device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d76cd97a98b6..a8b258e5407b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static ssize_t deferred_probe_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool value;
+
+ mutex_lock(&deferred_probe_mutex);
+ value = !list_empty(&dev->p->deferred_probe);
+ mutex_unlock(&deferred_probe_mutex);
+
+ return sprintf(buf, "%d\n", value);
+}
+DEVICE_ATTR_RO(deferred_probe);
+
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
@@ -338,6 +352,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
+ ret = device_links_check_suppliers(dev);
+ if (ret)
+ return ret;
+
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -416,6 +434,7 @@ probe_failed:
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
+ device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
@@ -508,6 +527,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@@ -518,6 +538,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
if (dev->parent)
pm_runtime_put(dev->parent);
+ pm_runtime_put_suppliers(dev);
return ret;
}
@@ -772,7 +793,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
-static void __device_release_driver(struct device *dev)
+static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
@@ -781,7 +802,27 @@ static void __device_release_driver(struct device *dev)
if (driver_allows_async_probing(drv))
async_synchronize_full();
+ while (device_links_busy(dev)) {
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+
+ device_links_unbind_consumers(dev);
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ /*
+ * A concurrent invocation of the same function might
+ * have released the driver successfully while this one
+ * was waiting, so check for that.
+ */
+ if (dev->driver != drv)
+ return;
+ }
+
pm_runtime_get_sync(dev);
+ pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
@@ -796,6 +837,8 @@ static void __device_release_driver(struct device *dev)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
+
+ device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -812,12 +855,32 @@ static void __device_release_driver(struct device *dev)
}
}
+void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent)
+{
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ if (!drv || drv == dev->driver)
+ __device_release_driver(dev, parent);
+
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+}
+
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
@@ -826,9 +889,7 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
- device_lock(dev);
- __device_release_driver(dev);
- device_unlock(dev);
+ device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@@ -853,15 +914,7 @@ void driver_detach(struct device_driver *drv)
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
-
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- if (dev->driver == drv)
- __device_release_driver(dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 240374fd1838..7be310f7db73 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -160,18 +160,20 @@ static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(disabled);
-static struct class_attribute devcd_class_attrs[] = {
- __ATTR_RW(disabled),
- __ATTR_NULL
+static struct attribute *devcd_class_attrs[] = {
+ &class_attr_disabled.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(devcd_class);
static struct class devcd_class = {
.name = "devcoredump",
.owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
- .class_attrs = devcd_class_attrs,
+ .class_groups = devcd_class_groups,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 8fc654f0807b..71d577025285 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/percpu.h>
#include "base.h"
@@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev, unsigned long addr)
&devres));
}
EXPORT_SYMBOL_GPL(devm_free_pages);
+
+static void devm_percpu_release(struct device *dev, void *pdata)
+{
+ void __percpu *p;
+
+ p = *(void __percpu **)pdata;
+ free_percpu(p);
+}
+
+static int devm_percpu_match(struct device *dev, void *data, void *p)
+{
+ struct devres *devr = container_of(data, struct devres, data);
+
+ return *(void **)devr->data == p;
+}
+
+/**
+ * __devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @size: Size of per-cpu memory to allocate
+ * @align: Alignment of per-cpu memory to allocate
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+ size_t align)
+{
+ void *p;
+ void __percpu *pcpu;
+
+ pcpu = __alloc_percpu(size, align);
+ if (!pcpu)
+ return NULL;
+
+ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
+ if (!p) {
+ free_percpu(pcpu);
+ return NULL;
+ }
+
+ *(void __percpu **)p = pcpu;
+
+ devres_add(dev, p);
+
+ return pcpu;
+}
+EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+
+/**
+ * devm_free_percpu - Resource-managed free_percpu
+ * @dev: Device this memory belongs to
+ * @pdata: Per-cpu memory to free
+ *
+ * Free memory allocated with devm_alloc_percpu().
+ */
+void devm_free_percpu(struct device *dev, void __percpu *pdata)
+{
+ WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
+ (void *)pdata));
+}
+EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 8f8b68c80986..efd71cf4fdea 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -108,13 +108,13 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
+ * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
*
- * Managed dma_alloc_non_coherent(). Memory allocated using this
+ * Managed dma_alloc_noncoherent(). Memory allocated using this
* function will be automatically released on driver detach.
*
* RETURNS:
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 22d1760a4278..4497d263209f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,6 +30,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -91,10 +92,11 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
}
#endif
-enum {
+enum fw_status {
+ FW_STATUS_UNKNOWN,
FW_STATUS_LOADING,
FW_STATUS_DONE,
- FW_STATUS_ABORT,
+ FW_STATUS_ABORTED,
};
static int loading_timeout = 60; /* In seconds */
@@ -104,6 +106,82 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
}
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct swait_queue_head wq;
+ enum fw_status status;
+};
+
+static void fw_state_init(struct fw_state *fw_st)
+{
+ init_swait_queue_head(&fw_st->wq);
+ fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline bool __fw_state_is_done(enum fw_status status)
+{
+ return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
+}
+
+static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+{
+ long ret;
+
+ ret = swait_event_interruptible_timeout(fw_st->wq,
+ __fw_state_is_done(READ_ONCE(fw_st->status)),
+ timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static void __fw_state_set(struct fw_state *fw_st,
+ enum fw_status status)
+{
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ swake_up(&fw_st->wq);
+}
+
+#define fw_state_start(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_LOADING)
+#define fw_state_done(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_wait(fw_st) \
+ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
+
+#ifndef CONFIG_FW_LOADER_USER_HELPER
+
+#define fw_state_is_aborted(fw_st) false
+
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+
+static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+{
+ return fw_st->status == status;
+}
+
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
+#define fw_state_is_done(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_DONE)
+#define fw_state_is_loading(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_LOADING)
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+#define fw_state_wait_timeout(fw_st, timeout) \
+ __fw_state_wait_common(fw_st, timeout)
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
/* firmware behavior options */
#define FW_OPT_UEVENT (1U << 0)
#define FW_OPT_NOWAIT (1U << 1)
@@ -145,9 +223,8 @@ struct firmware_cache {
struct firmware_buf {
struct kref ref;
struct list_head list;
- struct completion completion;
struct firmware_cache *fwc;
- unsigned long status;
+ struct fw_state fw_st;
void *data;
size_t size;
size_t allocated_size;
@@ -205,7 +282,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
buf->fwc = fwc;
buf->data = dbuf;
buf->allocated_size = size;
- init_completion(&buf->completion);
+ fw_state_init(&buf->fw_st);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
#endif
@@ -305,15 +382,6 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
-static void fw_finish_direct_load(struct device *device,
- struct firmware_buf *buf)
-{
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- mutex_unlock(&fw_lock);
-}
-
static int
fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
{
@@ -360,7 +428,7 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
}
dev_dbg(device, "direct-loading %s\n", buf->fw_id);
buf->size = size;
- fw_finish_direct_load(device, buf);
+ fw_state_done(&buf->fw_st);
break;
}
__putname(path);
@@ -478,12 +546,11 @@ static void __fw_load_abort(struct firmware_buf *buf)
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
*/
- if (test_bit(FW_STATUS_DONE, &buf->status))
+ if (fw_state_is_done(&buf->fw_st))
return;
list_del_init(&buf->pending_list);
- set_bit(FW_STATUS_ABORT, &buf->status);
- complete_all(&buf->completion);
+ fw_state_aborted(&buf->fw_st);
}
static void fw_load_abort(struct firmware_priv *fw_priv)
@@ -496,9 +563,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
fw_priv->buf = NULL;
}
-#define is_fw_load_aborted(buf) \
- test_bit(FW_STATUS_ABORT, &(buf)->status)
-
static LIST_HEAD(pending_fw_head);
/* reboot notifier for avoid deadlock with usermode_lock */
@@ -546,11 +610,13 @@ static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(timeout);
-static struct class_attribute firmware_class_attrs[] = {
- __ATTR_RW(timeout),
- __ATTR_NULL
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(firmware_class);
static void fw_dev_release(struct device *dev)
{
@@ -585,7 +651,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct class firmware_class = {
.name = "firmware",
- .class_attrs = firmware_class_attrs,
+ .class_groups = firmware_class_groups,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -598,7 +664,7 @@ static ssize_t firmware_loading_show(struct device *dev,
mutex_lock(&fw_lock);
if (fw_priv->buf)
- loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+ loading = fw_state_is_loading(&fw_priv->buf->fw_st);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
@@ -653,23 +719,20 @@ static ssize_t firmware_loading_store(struct device *dev,
switch (loading) {
case 1:
/* discarding any previous partial load */
- if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+ if (!fw_state_is_done(&fw_buf->fw_st)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
vfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
- set_bit(FW_STATUS_LOADING, &fw_buf->status);
+ fw_state_start(&fw_buf->fw_st);
}
break;
case 0:
- if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
+ if (fw_state_is_loading(&fw_buf->fw_st)) {
int rc;
- set_bit(FW_STATUS_DONE, &fw_buf->status);
- clear_bit(FW_STATUS_LOADING, &fw_buf->status);
-
/*
* Several loading requests may be pending on
* one same firmware buf, so let all requests
@@ -691,10 +754,11 @@ static ssize_t firmware_loading_store(struct device *dev,
*/
list_del_init(&fw_buf->pending_list);
if (rc) {
- set_bit(FW_STATUS_ABORT, &fw_buf->status);
+ fw_state_aborted(&fw_buf->fw_st);
written = rc;
+ } else {
+ fw_state_done(&fw_buf->fw_st);
}
- complete_all(&fw_buf->completion);
break;
}
/* fallthrough */
@@ -755,7 +819,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
ret_count = -ENODEV;
goto out;
}
@@ -842,7 +906,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
retval = -ENODEV;
goto out;
}
@@ -955,17 +1019,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
timeout = MAX_JIFFY_OFFSET;
}
- retval = wait_for_completion_interruptible_timeout(&buf->completion,
- timeout);
- if (retval == -ERESTARTSYS || !retval) {
+ retval = fw_state_wait_timeout(&buf->fw_st, timeout);
+ if (retval < 0) {
mutex_lock(&fw_lock);
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
- } else if (retval > 0) {
- retval = 0;
}
- if (is_fw_load_aborted(buf))
+ if (fw_state_is_aborted(&buf->fw_st))
retval = -EAGAIN;
else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
@@ -1015,35 +1076,12 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
return -ENOENT;
}
-/* No abort during direct loading */
-#define is_fw_load_aborted(buf) false
-
#ifdef CONFIG_PM_SLEEP
static inline void kill_requests_without_uevent(void) { }
#endif
#endif /* CONFIG_FW_LOADER_USER_HELPER */
-
-/* wait until the shared firmware_buf becomes ready (or error) */
-static int sync_cached_firmware_buf(struct firmware_buf *buf)
-{
- int ret = 0;
-
- mutex_lock(&fw_lock);
- while (!test_bit(FW_STATUS_DONE, &buf->status)) {
- if (is_fw_load_aborted(buf)) {
- ret = -ENOENT;
- break;
- }
- mutex_unlock(&fw_lock);
- ret = wait_for_completion_interruptible(&buf->completion);
- mutex_lock(&fw_lock);
- }
- mutex_unlock(&fw_lock);
- return ret;
-}
-
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
@@ -1077,7 +1115,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
firmware->priv = buf;
if (ret > 0) {
- ret = sync_cached_firmware_buf(buf);
+ ret = fw_state_wait(&buf->fw_st);
if (!ret) {
fw_set_page_data(buf, firmware);
return 0; /* assigned */
@@ -1095,7 +1133,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
struct firmware_buf *buf = fw->priv;
mutex_lock(&fw_lock);
- if (!buf->size || is_fw_load_aborted(buf)) {
+ if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
@@ -1345,9 +1383,9 @@ static void request_firmware_work_func(struct work_struct *work)
*
* Asynchronous variant of request_firmware() for user contexts:
* - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
*
* - can't sleep at all if @gfp is GFP_ATOMIC.
**/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 62c63c0c5c22..bb69e58c29f3 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -226,11 +226,9 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
int ret;
start_pfn = section_nr_to_pfn(phys_index);
- first_page = pfn_to_page(start_pfn);
switch (action) {
case MEM_ONLINE:
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e023066e4215..5711708532db 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -39,6 +39,105 @@
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
+struct genpd_lock_ops {
+ void (*lock)(struct generic_pm_domain *genpd);
+ void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
+ int (*lock_interruptible)(struct generic_pm_domain *genpd);
+ void (*unlock)(struct generic_pm_domain *genpd);
+};
+
+static void genpd_lock_mtx(struct generic_pm_domain *genpd)
+{
+ mutex_lock(&genpd->mlock);
+}
+
+static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
+ int depth)
+{
+ mutex_lock_nested(&genpd->mlock, depth);
+}
+
+static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_lock_interruptible(&genpd->mlock);
+}
+
+static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_unlock(&genpd->mlock);
+}
+
+static const struct genpd_lock_ops genpd_mtx_ops = {
+ .lock = genpd_lock_mtx,
+ .lock_nested = genpd_lock_nested_mtx,
+ .lock_interruptible = genpd_lock_interruptible_mtx,
+ .unlock = genpd_unlock_mtx,
+};
+
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+ genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->slock)
+{
+ spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_spin_ops = {
+ .lock = genpd_lock_spin,
+ .lock_nested = genpd_lock_nested_spin,
+ .lock_interruptible = genpd_lock_interruptible_spin,
+ .unlock = genpd_unlock_spin,
+};
+
+#define genpd_lock(p) p->lock_ops->lock(p)
+#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
+#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
+#define genpd_unlock(p) p->lock_ops->unlock(p)
+
+#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
+
+static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+ struct generic_pm_domain *genpd)
+{
+ bool ret;
+
+ ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+
+ /* Warn once for each IRQ safe dev in no sleep domain */
+ if (ret)
+ dev_warn_once(dev, "PM domain %s will not be powered off\n",
+ genpd->name);
+
+ return ret;
+}
+
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
@@ -200,9 +299,9 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
genpd_sd_counter_inc(master);
- mutex_lock_nested(&master->lock, depth + 1);
+ genpd_lock_nested(master, depth + 1);
ret = genpd_poweron(master, depth + 1);
- mutex_unlock(&master->lock);
+ genpd_unlock(master);
if (ret) {
genpd_sd_counter_dec(master);
@@ -255,9 +354,9 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
spin_unlock_irq(&dev->power.lock);
if (!IS_ERR(genpd)) {
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->max_off_time_changed = true;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
dev = dev->parent;
@@ -303,7 +402,12 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
- if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
+ /*
+ * Do not allow PM domain to be powered off, when an IRQ safe
+ * device is part of a non-IRQ safe domain.
+ */
+ if (!pm_runtime_suspended(pdd->dev) ||
+ irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
@@ -354,9 +458,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd_poweroff(genpd, true);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
/**
@@ -466,15 +570,15 @@ static int genpd_runtime_suspend(struct device *dev)
}
/*
- * If power.irq_safe is set, this routine will be run with interrupts
- * off, so it can't use mutexes.
+ * If power.irq_safe is set, this routine may be run with
+ * IRQs disabled, so suspend only if the PM domain also is irq_safe.
*/
- if (dev->power.irq_safe)
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
return 0;
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd_poweroff(genpd, false);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
return 0;
}
@@ -503,15 +607,18 @@ static int genpd_runtime_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- /* If power.irq_safe, the PM domain is never powered off. */
- if (dev->power.irq_safe) {
+ /*
+ * As we don't power off a non IRQ safe domain, which holds
+ * an IRQ safe device, we don't need to restore power to it.
+ */
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
timed = false;
goto out;
}
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
ret = genpd_poweron(genpd, 0);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
if (ret)
return ret;
@@ -546,10 +653,11 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!dev->power.irq_safe) {
- mutex_lock(&genpd->lock);
+ if (!pm_runtime_is_irq_safe(dev) ||
+ (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ genpd_lock(genpd);
genpd_poweroff(genpd, 0);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
return ret;
@@ -732,20 +840,20 @@ static int pm_genpd_prepare(struct device *dev)
if (resume_needed(dev, genpd))
pm_runtime_resume(dev);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count++ == 0)
genpd->suspended_count = 0;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
ret = pm_generic_prepare(dev);
if (ret) {
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->prepared_count--;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
return ret;
@@ -936,13 +1044,13 @@ static void pm_genpd_complete(struct device *dev)
pm_generic_complete(dev);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->prepared_count--;
if (!genpd->prepared_count)
genpd_queue_power_off_work(genpd);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
/**
@@ -1071,7 +1179,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1088,7 +1196,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
out:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
if (ret)
genpd_free_dev_data(dev, gpd_data);
@@ -1130,7 +1238,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1145,14 +1253,14 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
list_del_init(&pdd->list_node);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
genpd_free_dev_data(dev, gpd_data);
return 0;
out:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret;
@@ -1183,12 +1291,23 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|| genpd == subdomain)
return -EINVAL;
+ /*
+ * If the domain can be powered on/off in an IRQ safe
+ * context, ensure that the subdomain can also be
+ * powered on/off in that context.
+ */
+ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+ WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
+ genpd->name, subdomain->name);
+ return -EINVAL;
+ }
+
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
- mutex_lock(&subdomain->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1211,8 +1330,8 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
genpd_sd_counter_inc(genpd);
out:
- mutex_unlock(&genpd->lock);
- mutex_unlock(&subdomain->lock);
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
if (ret)
kfree(link);
return ret;
@@ -1250,8 +1369,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
- mutex_lock(&subdomain->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1275,13 +1394,39 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
out:
- mutex_unlock(&genpd->lock);
- mutex_unlock(&subdomain->lock);
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+{
+ struct genpd_power_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ genpd->states = state;
+ genpd->state_count = 1;
+ genpd->free = state;
+
+ return 0;
+}
+
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+ if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+ spin_lock_init(&genpd->slock);
+ genpd->lock_ops = &genpd_spin_ops;
+ } else {
+ mutex_init(&genpd->mlock);
+ genpd->lock_ops = &genpd_mtx_ops;
+ }
+}
+
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -1293,13 +1438,15 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off)
{
+ int ret;
+
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->slave_links);
INIT_LIST_HEAD(&genpd->dev_list);
- mutex_init(&genpd->lock);
+ genpd_lock_init(genpd);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
@@ -1325,19 +1472,12 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->dev_ops.start = pm_clk_resume;
}
- if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
- pr_warn("Initial state index out of bounds.\n");
- genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
- }
-
- if (genpd->state_count > GENPD_MAX_NUM_STATES) {
- pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
- genpd->state_count = GENPD_MAX_NUM_STATES;
- }
-
/* Use only one "off" state if there were no states declared */
- if (genpd->state_count == 0)
- genpd->state_count = 1;
+ if (genpd->state_count == 0) {
+ ret = genpd_set_default_power_state(genpd);
+ if (ret)
+ return ret;
+ }
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
@@ -1354,16 +1494,16 @@ static int genpd_remove(struct generic_pm_domain *genpd)
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->has_provider) {
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
pr_err("Provider present, unable to remove %s\n", genpd->name);
return -EBUSY;
}
if (!list_empty(&genpd->master_links) || genpd->device_count) {
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
@@ -1375,8 +1515,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
}
list_del(&genpd->gpd_list_node);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
cancel_work_sync(&genpd->power_off_work);
+ kfree(genpd->free);
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
@@ -1890,21 +2031,117 @@ int genpd_dev_pm_attach(struct device *dev)
mutex_unlock(&gpd_list_lock);
if (ret < 0) {
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to add to PM domain %s: %d",
+ pd->name, ret);
goto out;
}
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- mutex_lock(&pd->lock);
+ genpd_lock(pd);
ret = genpd_poweron(pd, 0);
- mutex_unlock(&pd->lock);
+ genpd_unlock(pd);
out:
return ret ? -EPROBE_DEFER : 0;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+
+static const struct of_device_id idle_state_match[] = {
+ { .compatible = "domain-idle-state", },
+ { }
+};
+
+static int genpd_parse_state(struct genpd_power_state *genpd_state,
+ struct device_node *state_node)
+{
+ int err;
+ u32 residency;
+ u32 entry_latency, exit_latency;
+ const struct of_device_id *match_id;
+
+ match_id = of_match_node(idle_state_match, state_node);
+ if (!match_id)
+ return -EINVAL;
+
+ err = of_property_read_u32(state_node, "entry-latency-us",
+ &entry_latency);
+ if (err) {
+ pr_debug(" * %s missing entry-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "exit-latency-us",
+ &exit_latency);
+ if (err) {
+ pr_debug(" * %s missing exit-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "min-residency-us", &residency);
+ if (!err)
+ genpd_state->residency_ns = 1000 * residency;
+
+ genpd_state->power_on_latency_ns = 1000 * exit_latency;
+ genpd_state->power_off_latency_ns = 1000 * entry_latency;
+ genpd_state->fwnode = &state_node->fwnode;
+
+ return 0;
+}
+
+/**
+ * of_genpd_parse_idle_states: Return array of idle states for the genpd.
+ *
+ * @dn: The genpd device node
+ * @states: The pointer to which the state array will be saved.
+ * @n: The count of elements in the array returned from this function.
+ *
+ * Returns the device states parsed from the OF node. The memory for the states
+ * is allocated by this function and is the responsibility of the caller to
+ * free the memory after use.
+ */
+int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n)
+{
+ struct genpd_power_state *st;
+ struct device_node *np;
+ int i = 0;
+ int err, ret;
+ int count;
+ struct of_phandle_iterator it;
+
+ count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+ if (count <= 0)
+ return -EINVAL;
+
+ st = kcalloc(count, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ /* Loop over the phandles until all the requested entry is found */
+ of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
+ np = it.node;
+ ret = genpd_parse_state(&st[i++], np);
+ if (ret) {
+ pr_err
+ ("Parsing idle state node %s failed with err %d\n",
+ np->full_name, ret);
+ of_node_put(np);
+ kfree(st);
+ return ret;
+ }
+ }
+
+ *n = count;
+ *states = st;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
+
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -1958,7 +2195,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
char state[16];
int ret;
- ret = mutex_lock_interruptible(&genpd->lock);
+ ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
@@ -1984,7 +2221,9 @@ static int pm_genpd_summary_one(struct seq_file *s,
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
@@ -1995,7 +2234,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
seq_puts(s, "\n");
exit:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
return 0;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2932a5bd892f..48c6294e9c34 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
+ dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
@@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
+ dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
@@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
device_for_each_child(dev, &async, dpm_wait_fn);
}
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * If the supplier goes away right after we've checked the link to it,
+ * we'll wait for its completion to change the state, but that's fine,
+ * because the only things that will block as a result are the SRCU
+ * callbacks freeing the link objects for the links in the list we're
+ * walking.
+ */
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->supplier, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_superior(struct device *dev, bool async)
+{
+ dpm_wait(dev->parent, async);
+ dpm_wait_for_suppliers(dev, async);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * The status of a device link can only be changed from "dormant" by a
+ * probe, but that cannot happen during system suspend/resume. In
+ * theory it can change to "dormant" at that time, but then it is
+ * reasonable to wait for the target device anyway (eg. if it goes
+ * away, it's better to wait for it to go away completely and then
+ * continue instead of trying to continue in parallel with its
+ * unregistration).
+ */
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->consumer, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+ dpm_wait_for_children(dev, async);
+ dpm_wait_for_consumers(dev, async);
+}
+
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
@@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1027,7 +1085,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1174,7 +1232,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1342,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static void dpm_clear_suppliers_direct_complete(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ spin_lock_irq(&link->supplier->power.lock);
+ link->supplier->power.direct_complete = false;
+ spin_unlock_irq(&link->supplier->power.lock);
+ }
+
+ device_links_read_unlock(idx);
+}
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1358,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1454,16 +1528,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
+ dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);
dpm_watchdog_clear(&wd);
Complete:
- complete_all(&dev->power.completion);
if (error)
async_error = error;
+ complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
return error;
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 4c7c6da7a989..35ff06283738 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -93,6 +93,8 @@ struct opp_table *_find_opp_table(struct device *dev)
* Return: voltage in micro volt corresponding to the opp, else
* return 0
*
+ * This is useful only for devices with single power supply.
+ *
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. This means that opp which could have been fetched by
* opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
@@ -112,7 +114,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
if (IS_ERR_OR_NULL(tmp_opp))
pr_err("%s: Invalid parameters\n", __func__);
else
- v = tmp_opp->u_volt;
+ v = tmp_opp->supplies[0].u_volt;
return v;
}
@@ -210,6 +212,24 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
+static int _get_regulator_count(struct device *dev)
+{
+ struct opp_table *opp_table;
+ int count;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (!IS_ERR(opp_table))
+ count = opp_table->regulator_count;
+ else
+ count = 0;
+
+ rcu_read_unlock();
+
+ return count;
+}
+
/**
* dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
* @dev: device for which we do this operation
@@ -222,34 +242,51 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp;
- struct regulator *reg;
+ struct regulator *reg, **regulators;
unsigned long latency_ns = 0;
- unsigned long min_uV = ~0, max_uV = 0;
- int ret;
+ int ret, i, count;
+ struct {
+ unsigned long min;
+ unsigned long max;
+ } *uV;
+
+ count = _get_regulator_count(dev);
+
+ /* Regulator may not be required for the device */
+ if (!count)
+ return 0;
+
+ regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
+ if (!regulators)
+ return 0;
+
+ uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
+ if (!uV)
+ goto free_regulators;
rcu_read_lock();
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
rcu_read_unlock();
- return 0;
+ goto free_uV;
}
- reg = opp_table->regulator;
- if (IS_ERR(reg)) {
- /* Regulator may not be required for device */
- rcu_read_unlock();
- return 0;
- }
+ memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
- list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
- if (!opp->available)
- continue;
+ for (i = 0; i < count; i++) {
+ uV[i].min = ~0;
+ uV[i].max = 0;
+
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
- if (opp->u_volt_min < min_uV)
- min_uV = opp->u_volt_min;
- if (opp->u_volt_max > max_uV)
- max_uV = opp->u_volt_max;
+ if (opp->supplies[i].u_volt_min < uV[i].min)
+ uV[i].min = opp->supplies[i].u_volt_min;
+ if (opp->supplies[i].u_volt_max > uV[i].max)
+ uV[i].max = opp->supplies[i].u_volt_max;
+ }
}
rcu_read_unlock();
@@ -258,9 +295,16 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
* The caller needs to ensure that opp_table (and hence the regulator)
* isn't freed, while we are executing this routine.
*/
- ret = regulator_set_voltage_time(reg, min_uV, max_uV);
- if (ret > 0)
- latency_ns = ret * 1000;
+ for (i = 0; reg = regulators[i], i < count; i++) {
+ ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
+ if (ret > 0)
+ latency_ns += ret * 1000;
+ }
+
+free_uV:
+ kfree(uV);
+free_regulators:
+ kfree(regulators);
return latency_ns;
}
@@ -542,8 +586,7 @@ unlock:
}
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
- unsigned long u_volt, unsigned long u_volt_min,
- unsigned long u_volt_max)
+ struct dev_pm_opp_supply *supply)
{
int ret;
@@ -554,14 +597,78 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return 0;
}
- dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
- u_volt, u_volt_max);
+ dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
+ supply->u_volt_min, supply->u_volt, supply->u_volt_max);
- ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
- u_volt_max);
+ ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
+ supply->u_volt, supply->u_volt_max);
if (ret)
dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
- __func__, u_volt_min, u_volt, u_volt_max, ret);
+ __func__, supply->u_volt_min, supply->u_volt,
+ supply->u_volt_max, ret);
+
+ return ret;
+}
+
+static inline int
+_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
+ unsigned long old_freq, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ }
+
+ return ret;
+}
+
+static int _generic_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
+ struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct regulator *reg = data->regulators[0];
+ struct device *dev= data->dev;
+ int ret;
+
+ /* This function only supports single regulator per device */
+ if (WARN_ON(data->regulator_count > 1)) {
+ dev_err(dev, "multiple regulators are not supported\n");
+ return -EINVAL;
+ }
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _set_opp_voltage(dev, reg, new_supply);
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
+ if (ret)
+ goto restore_voltage;
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _set_opp_voltage(dev, reg, new_supply);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply->u_volt)
+ _set_opp_voltage(dev, reg, old_supply);
return ret;
}
@@ -579,12 +686,13 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
+ unsigned long freq, old_freq;
+ int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_opp *old_opp, *opp;
- struct regulator *reg;
+ struct regulator **regulators;
+ struct dev_pm_set_opp_data *data;
struct clk *clk;
- unsigned long freq, old_freq;
- unsigned long u_volt, u_volt_min, u_volt_max;
- int ret;
+ int ret, size;
if (unlikely(!target_freq)) {
dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
@@ -633,55 +741,41 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return ret;
}
- u_volt = opp->u_volt;
- u_volt_min = opp->u_volt_min;
- u_volt_max = opp->u_volt_max;
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
+ old_freq, freq);
- reg = opp_table->regulator;
+ regulators = opp_table->regulators;
- rcu_read_unlock();
-
- /* Scaling up? Scale voltage before frequency */
- if (freq > old_freq) {
- ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
- u_volt_max);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
-
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
- __func__, old_freq, freq);
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- goto restore_voltage;
+ /* Only frequency scaling */
+ if (!regulators) {
+ rcu_read_unlock();
+ return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
}
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
- ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
- u_volt_max);
- if (ret)
- goto restore_freq;
- }
+ if (opp_table->set_opp)
+ set_opp = opp_table->set_opp;
+ else
+ set_opp = _generic_set_opp;
+
+ data = opp_table->set_opp_data;
+ data->regulators = regulators;
+ data->regulator_count = opp_table->regulator_count;
+ data->clk = clk;
+ data->dev = dev;
+
+ data->old_opp.rate = old_freq;
+ size = sizeof(*opp->supplies) * opp_table->regulator_count;
+ if (IS_ERR(old_opp))
+ memset(data->old_opp.supplies, 0, size);
+ else
+ memcpy(data->old_opp.supplies, old_opp->supplies, size);
- return 0;
+ data->new_opp.rate = freq;
+ memcpy(data->new_opp.supplies, opp->supplies, size);
-restore_freq:
- if (clk_set_rate(clk, old_freq))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- if (!IS_ERR(old_opp))
- _set_opp_voltage(dev, reg, old_opp->u_volt,
- old_opp->u_volt_min, old_opp->u_volt_max);
+ rcu_read_unlock();
- return ret;
+ return set_opp(data);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
@@ -764,9 +858,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
_of_init_opp_table(opp_table, dev);
- /* Set regulator to a non-NULL error value */
- opp_table->regulator = ERR_PTR(-ENXIO);
-
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
if (IS_ERR(opp_table->clk)) {
@@ -815,7 +906,10 @@ static void _remove_opp_table(struct opp_table *opp_table)
if (opp_table->prop_name)
return;
- if (!IS_ERR(opp_table->regulator))
+ if (opp_table->regulators)
+ return;
+
+ if (opp_table->set_opp)
return;
/* Release clk */
@@ -924,34 +1018,50 @@ struct dev_pm_opp *_allocate_opp(struct device *dev,
struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
+ int count, supply_size;
+ struct opp_table *table;
- /* allocate new OPP node */
- opp = kzalloc(sizeof(*opp), GFP_KERNEL);
- if (!opp)
+ table = _add_opp_table(dev);
+ if (!table)
return NULL;
- INIT_LIST_HEAD(&opp->node);
+ /* Allocate space for at least one supply */
+ count = table->regulator_count ? table->regulator_count : 1;
+ supply_size = sizeof(*opp->supplies) * count;
- *opp_table = _add_opp_table(dev);
- if (!*opp_table) {
- kfree(opp);
+ /* allocate new OPP node and supplies structures */
+ opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
+ if (!opp) {
+ kfree(table);
return NULL;
}
+ /* Put the supplies at the end of the OPP structure as an empty array */
+ opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+ INIT_LIST_HEAD(&opp->node);
+
+ *opp_table = table;
+
return opp;
}
static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
struct opp_table *opp_table)
{
- struct regulator *reg = opp_table->regulator;
-
- if (!IS_ERR(reg) &&
- !regulator_is_supported_voltage(reg, opp->u_volt_min,
- opp->u_volt_max)) {
- pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
- __func__, opp->u_volt_min, opp->u_volt_max);
- return false;
+ struct regulator *reg;
+ int i;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+
+ if (!regulator_is_supported_voltage(reg,
+ opp->supplies[i].u_volt_min,
+ opp->supplies[i].u_volt_max)) {
+ pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+ __func__, opp->supplies[i].u_volt_min,
+ opp->supplies[i].u_volt_max);
+ return false;
+ }
}
return true;
@@ -983,11 +1093,13 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->u_volt, opp->available,
- new_opp->rate, new_opp->u_volt, new_opp->available);
+ __func__, opp->rate, opp->supplies[0].u_volt,
+ opp->available, new_opp->rate,
+ new_opp->supplies[0].u_volt, new_opp->available);
- return opp->available && new_opp->u_volt == opp->u_volt ?
- 0 : -EEXIST;
+ /* Should we compare voltages for all regulators here ? */
+ return opp->available &&
+ new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
}
new_opp->opp_table = opp_table;
@@ -1054,9 +1166,9 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
/* populate the opp table */
new_opp->rate = freq;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
- new_opp->u_volt = u_volt;
- new_opp->u_volt_min = u_volt - tol;
- new_opp->u_volt_max = u_volt + tol;
+ new_opp->supplies[0].u_volt = u_volt;
+ new_opp->supplies[0].u_volt_min = u_volt - tol;
+ new_opp->supplies[0].u_volt_max = u_volt + tol;
new_opp->available = true;
new_opp->dynamic = dynamic;
@@ -1300,13 +1412,47 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+static int _allocate_set_opp_data(struct opp_table *opp_table)
+{
+ struct dev_pm_set_opp_data *data;
+ int len, count = opp_table->regulator_count;
+
+ if (WARN_ON(!count))
+ return -EINVAL;
+
+ /* space for set_opp_data */
+ len = sizeof(*data);
+
+ /* space for old_opp.supplies and new_opp.supplies */
+ len += 2 * sizeof(struct dev_pm_opp_supply) * count;
+
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->old_opp.supplies = (void *)(data + 1);
+ data->new_opp.supplies = data->old_opp.supplies + count;
+
+ opp_table->set_opp_data = data;
+
+ return 0;
+}
+
+static void _free_set_opp_data(struct opp_table *opp_table)
+{
+ kfree(opp_table->set_opp_data);
+ opp_table->set_opp_data = NULL;
+}
+
/**
- * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * dev_pm_opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
- * @name: Name of the regulator.
+ * @names: Array of pointers to the names of the regulator.
+ * @count: Number of regulators.
*
* In order to support OPP switching, OPP layer needs to know the name of the
- * device's regulator, as the core would be required to switch voltages as well.
+ * device's regulators, as the core would be required to switch voltages as
+ * well.
*
* This must be called before any OPPs are initialized for the device.
*
@@ -1316,11 +1462,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[],
+ unsigned int count)
{
struct opp_table *opp_table;
struct regulator *reg;
- int ret;
+ int ret, i;
mutex_lock(&opp_table_lock);
@@ -1336,22 +1484,146 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name)
goto err;
}
- /* Already have a regulator set */
- if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+ /* Already have regulators set */
+ if (opp_table->regulators) {
ret = -EBUSY;
goto err;
}
- /* Allocate the regulator */
- reg = regulator_get_optional(dev, name);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%s: no regulator (%s) found: %d\n",
- __func__, name, ret);
+
+ opp_table->regulators = kmalloc_array(count,
+ sizeof(*opp_table->regulators),
+ GFP_KERNEL);
+ if (!opp_table->regulators) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < count; i++) {
+ reg = regulator_get_optional(dev, names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: no regulator (%s) found: %d\n",
+ __func__, names[i], ret);
+ goto free_regulators;
+ }
+
+ opp_table->regulators[i] = reg;
+ }
+
+ opp_table->regulator_count = count;
+
+ /* Allocate block only once to pass to set_opp() routines */
+ ret = _allocate_set_opp_data(opp_table);
+ if (ret)
+ goto free_regulators;
+
+ mutex_unlock(&opp_table_lock);
+ return opp_table;
+
+free_regulators:
+ while (i != 0)
+ regulator_put(opp_table->regulators[--i]);
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+ opp_table->regulator_count = 0;
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
+
+/**
+ * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+{
+ int i;
+
+ mutex_lock(&opp_table_lock);
+
+ if (!opp_table->regulators) {
+ pr_err("%s: Doesn't have regulators set\n", __func__);
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
+ regulator_put(opp_table->regulators[i]);
+
+ _free_set_opp_data(opp_table);
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+ opp_table->regulator_count = 0;
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
+
+/**
+ * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * @dev: Device for which the helper is getting registered.
+ * @set_opp: Custom set OPP helper.
+ *
+ * This is useful to support complex platforms (like platforms with multiple
+ * regulators per device), instead of the generic OPP set rate helper.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ struct opp_table *opp_table;
+ int ret;
+
+ if (!set_opp)
+ return -EINVAL;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
goto err;
}
- opp_table->regulator = reg;
+ /* Already have custom set_opp helper */
+ if (WARN_ON(opp_table->set_opp)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->set_opp = set_opp;
mutex_unlock(&opp_table_lock);
return 0;
@@ -1363,11 +1635,12 @@ unlock:
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
/**
- * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
- * @dev: Device for which regulator was set.
+ * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
+ * set_opp helper
+ * @dev: Device for which custom set_opp helper has to be cleared.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -1375,7 +1648,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-void dev_pm_opp_put_regulator(struct device *dev)
+void dev_pm_opp_register_put_opp_helper(struct device *dev)
{
struct opp_table *opp_table;
@@ -1389,16 +1662,16 @@ void dev_pm_opp_put_regulator(struct device *dev)
goto unlock;
}
- if (IS_ERR(opp_table->regulator)) {
- dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+ if (!opp_table->set_opp) {
+ dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
+ __func__);
goto unlock;
}
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- regulator_put(opp_table->regulator);
- opp_table->regulator = ERR_PTR(-ENXIO);
+ opp_table->set_opp = NULL;
/* Try freeing opp_table if this was the last blocking resource */
_remove_opp_table(opp_table);
@@ -1406,7 +1679,7 @@ void dev_pm_opp_put_regulator(struct device *dev)
unlock:
mutex_unlock(&opp_table_lock);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
index ef1ae6b52042..95f433db4ac7 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/base/power/opp/debugfs.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/limits.h>
+#include <linux/slab.h>
#include "opp.h"
@@ -34,6 +35,46 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
+static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ struct dentry *d;
+ int i = 0;
+ char *name;
+
+ /* Always create at least supply-0 directory */
+ do {
+ name = kasprintf(GFP_KERNEL, "supply-%d", i);
+
+ /* Create per-opp directory */
+ d = debugfs_create_dir(name, pdentry);
+
+ kfree(name);
+
+ if (!d)
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
+ &opp->supplies[i].u_volt))
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
+ &opp->supplies[i].u_volt_min))
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
+ &opp->supplies[i].u_volt_max))
+ return false;
+
+ if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
+ &opp->supplies[i].u_amp))
+ return false;
+ } while (++i < opp_table->regulator_count);
+
+ return true;
+}
+
int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
struct dentry *pdentry = opp_table->dentry;
@@ -63,16 +104,7 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
return -ENOMEM;
- if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+ if (!opp_debug_create_supplies(opp, opp_table, d))
return -ENOMEM;
if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 5552211e6fcd..3f7d2591b173 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include <linux/export.h>
#include "opp.h"
@@ -101,16 +102,16 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
return true;
}
-/* TODO: Support multiple regulators */
static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
struct opp_table *opp_table)
{
- u32 microvolt[3] = {0};
- u32 val;
- int count, ret;
+ u32 *microvolt, *microamp = NULL;
+ int supplies, vcount, icount, ret, i, j;
struct property *prop = NULL;
char name[NAME_MAX];
+ supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
+
/* Search for "opp-microvolt-<name>" */
if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
@@ -128,34 +129,29 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
return 0;
}
- count = of_property_count_u32_elems(opp->np, name);
- if (count < 0) {
+ vcount = of_property_count_u32_elems(opp->np, name);
+ if (vcount < 0) {
dev_err(dev, "%s: Invalid %s property (%d)\n",
- __func__, name, count);
- return count;
+ __func__, name, vcount);
+ return vcount;
}
- /* There can be one or three elements here */
- if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
- __func__, name, count);
+ /* There can be one or three elements per supply */
+ if (vcount != supplies && vcount != supplies * 3) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+ __func__, name, vcount, supplies);
return -EINVAL;
}
- ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+ microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
+ if (!microvolt)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
if (ret) {
dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
- return -EINVAL;
- }
-
- opp->u_volt = microvolt[0];
-
- if (count == 1) {
- opp->u_volt_min = opp->u_volt;
- opp->u_volt_max = opp->u_volt;
- } else {
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
+ ret = -EINVAL;
+ goto free_microvolt;
}
/* Search for "opp-microamp-<name>" */
@@ -172,10 +168,59 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
prop = of_find_property(opp->np, name, NULL);
}
- if (prop && !of_property_read_u32(opp->np, name, &val))
- opp->u_amp = val;
+ if (prop) {
+ icount = of_property_count_u32_elems(opp->np, name);
+ if (icount < 0) {
+ dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
+ name, icount);
+ ret = icount;
+ goto free_microvolt;
+ }
- return 0;
+ if (icount != supplies) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+ __func__, name, icount, supplies);
+ ret = -EINVAL;
+ goto free_microvolt;
+ }
+
+ microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
+ if (!microamp) {
+ ret = -EINVAL;
+ goto free_microvolt;
+ }
+
+ ret = of_property_read_u32_array(opp->np, name, microamp,
+ icount);
+ if (ret) {
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__,
+ name, ret);
+ ret = -EINVAL;
+ goto free_microamp;
+ }
+ }
+
+ for (i = 0, j = 0; i < supplies; i++) {
+ opp->supplies[i].u_volt = microvolt[j++];
+
+ if (vcount == supplies) {
+ opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
+ opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
+ } else {
+ opp->supplies[i].u_volt_min = microvolt[j++];
+ opp->supplies[i].u_volt_max = microvolt[j++];
+ }
+
+ if (microamp)
+ opp->supplies[i].u_amp = microamp[i];
+ }
+
+free_microamp:
+ kfree(microamp);
+free_microvolt:
+ kfree(microvolt);
+
+ return ret;
}
/**
@@ -198,7 +243,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
+static struct device_node *_of_get_opp_desc_node(struct device *dev)
{
/*
* TODO: Support for multiple OPP tables.
@@ -303,9 +348,9 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
mutex_unlock(&opp_table_lock);
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
- __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
- new_opp->u_volt_min, new_opp->u_volt_max,
- new_opp->clock_latency_ns);
+ __func__, new_opp->turbo, new_opp->rate,
+ new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
+ new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
/*
* Notify the changes in the availability of the operable
@@ -562,7 +607,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
np = _of_get_opp_desc_node(cpu_dev);
if (!np) {
- dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
return -ENOENT;
}
@@ -587,7 +632,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
tmp_np = _of_get_opp_desc_node(tcpu_dev);
if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
__func__);
ret = -ENOENT;
goto put_cpu_node;
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index fabd5ca1a083..af9f2b849a66 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -61,10 +61,7 @@ extern struct list_head opp_tables;
* @turbo: true if turbo (boost) OPP
* @suspend: true if suspend OPP
* @rate: Frequency in hertz
- * @u_volt: Target voltage in microvolts corresponding to this OPP
- * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
- * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
- * @u_amp: Maximum current drawn by the device in microamperes
+ * @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
* @opp_table: points back to the opp_table struct this opp belongs to
@@ -83,10 +80,8 @@ struct dev_pm_opp {
bool suspend;
unsigned long rate;
- unsigned long u_volt;
- unsigned long u_volt_min;
- unsigned long u_volt_max;
- unsigned long u_amp;
+ struct dev_pm_opp_supply *supplies;
+
unsigned long clock_latency_ns;
struct opp_table *opp_table;
@@ -144,7 +139,10 @@ enum opp_table_access {
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
* @clk: Device's clock handle
- * @regulator: Supply regulator
+ * @regulators: Supply regulators
+ * @regulator_count: Number of power supply regulators
+ * @set_opp: Platform specific set_opp callback
+ * @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -179,7 +177,11 @@ struct opp_table {
unsigned int supported_hw_count;
const char *prop_name;
struct clk *clk;
- struct regulator *regulator;
+ struct regulator **regulators;
+ unsigned int regulator_count;
+
+ int (*set_opp)(struct dev_pm_set_opp_data *data);
+ struct dev_pm_set_opp_data *set_opp_data;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
@@ -190,7 +192,6 @@ struct opp_table {
/* Routines internal to opp core */
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-struct device_node *_of_get_opp_desc_node(struct device *dev);
void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 50e30e7b059d..a46e97e515c5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
+#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
+ WAKE_IRQ_DEDICATED_MANAGED)
+
struct wake_irq {
struct device *dev;
+ unsigned int status;
int irq;
- bool dedicated_irq:1;
};
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status);
+extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
@@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{
}
+static inline void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+}
+
+static inline void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+}
+
#endif
#ifdef CONFIG_PM_SLEEP
@@ -127,6 +144,11 @@ extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
extern void device_pm_check_callbacks(struct device *dev);
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return dev->power.in_dpm_list;
+}
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -146,6 +168,11 @@ static inline void device_pm_move_last(struct device *dev) {}
static inline void device_pm_check_callbacks(struct device *dev) {}
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return device_is_registered(dev);
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7f3646e459cb..58fcc758334e 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -856,7 +856,10 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
struct dev_pm_qos_request *req;
if (val < 0) {
- ret = -EINVAL;
+ if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+ ret = 0;
+ else
+ ret = -EINVAL;
goto out;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -883,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
/**
* dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 82a081ea4317..872eac4cb1df 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -12,6 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
+
+#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
@@ -241,7 +243,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
retval = -EACCES;
else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN;
- else if (!pm_children_suspended(dev))
+ else if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count))
retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */
@@ -258,6 +261,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
+static int rpm_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ int retval;
+
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+ link->rpm_active)
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+ if (retval < 0) {
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+ link->rpm_active = true;
+ }
+ return 0;
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->rpm_active &&
+ READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+ pm_runtime_put(link->supplier);
+ link->rpm_active = false;
+ }
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -266,19 +305,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval;
+ int retval, idx;
+ bool use_links = dev->power.links_count > 0;
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
- else
+ } else {
spin_unlock_irq(&dev->power.lock);
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ idx = device_links_read_lock();
+
+ retval = rpm_get_suppliers(dev);
+ if (retval)
+ goto fail;
+
+ device_links_read_unlock(idx);
+ }
+ }
+
retval = cb(dev);
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
- else
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
+
+ fail:
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
spin_lock_irq(&dev->power.lock);
+ }
return retval;
}
@@ -515,7 +592,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- dev_pm_enable_wake_irq(dev);
+ dev_pm_enable_wake_irq_check(dev, true);
retval = rpm_callback(callback, dev);
if (retval)
goto fail;
@@ -554,7 +631,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
- dev_pm_disable_wake_irq(dev);
+ dev_pm_disable_wake_irq_check(dev);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -712,8 +789,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&parent->power.lock);
/*
- * We can resume if the parent's runtime PM is disabled or it
- * is set to ignore children.
+ * Resume the parent if it has runtime PM enabled and not been
+ * set to ignore its children.
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children) {
@@ -737,12 +814,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
- dev_pm_disable_wake_irq(dev);
+ dev_pm_disable_wake_irq_check(dev);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
- dev_pm_enable_wake_irq(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
@@ -1027,7 +1104,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
goto out_set;
if (status == RPM_SUSPENDED) {
- /* It always is possible to set the status to 'suspended'. */
+ /*
+ * It is invalid to suspend a device with an active child,
+ * unless it has been set to ignore its children.
+ */
+ if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count)) {
+ dev_err(dev, "runtime PM trying to suspend device but active child\n");
+ error = -EBUSY;
+ goto out;
+ }
+
if (parent) {
atomic_add_unless(&parent->power.child_count, -1, 0);
notify_parent = !parent->power.ignore_children;
@@ -1447,6 +1534,94 @@ void pm_runtime_remove(struct device *dev)
}
/**
+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+ * @dev: Device whose driver is going to be removed.
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+ * (once per link).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+ * Since the device is guaranteed to be runtime-active at the point this is
+ * called, nothing else needs to be done here.
+ *
+ * Moreover, this is called after device_links_busy() has returned 'false', so
+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+ * therefore rpm_active can't be manipulated concurrently.
+ */
+void pm_runtime_clean_up_links(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->rpm_active) {
+ pm_runtime_put_noidle(dev);
+ link->rpm_active = false;
+ }
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_put(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.links_count++;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_drop_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
@@ -1478,6 +1653,16 @@ int pm_runtime_force_suspend(struct device *dev)
if (ret)
goto err;
+ /*
+ * Increase the runtime PM usage count for the device's parent, in case
+ * when we find the device being used when system suspend was invoked.
+ * This informs pm_runtime_force_resume() to resume the parent
+ * immediately, which is needed to be able to resume its children,
+ * when not deferring the resume to be managed via runtime PM.
+ */
+ if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
+ pm_runtime_get_noresume(dev->parent);
+
pm_runtime_set_suspended(dev);
return 0;
err:
@@ -1487,16 +1672,20 @@ err:
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
/**
- * pm_runtime_force_resume - Force a device into resume state.
+ * pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power. We update the runtime PM
- * status and re-enables runtime PM.
+ * those actions and brings the device into full power, if it is expected to be
+ * used on system resume. To distinguish that, we check whether the runtime PM
+ * usage count is greater than 1 (the PM core increases the usage count in the
+ * system PM prepare phase), as that indicates a real user (such as a subsystem,
+ * driver, userspace, etc.) is using it. If that is the case, the device is
+ * expected to be used on system resume as well, so then we resume it. In the
+ * other case, we defer the resume to be managed via runtime PM.
*
- * Typically this function may be invoked from a system resume callback to make
- * sure the device is put into full power state.
+ * Typically this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
@@ -1513,6 +1702,17 @@ int pm_runtime_force_resume(struct device *dev)
if (!pm_runtime_status_suspended(dev))
goto out;
+ /*
+ * Decrease the parent's runtime PM usage count, if we increased it
+ * during system suspend in pm_runtime_force_suspend().
+ */
+ if (atomic_read(&dev->power.usage_count) > 1) {
+ if (dev->parent)
+ pm_runtime_put_noidle(dev->parent);
+ } else {
+ goto out;
+ }
+
ret = pm_runtime_set_active(dev);
if (ret)
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b46798c81d..33b4b902741a 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
s32 value;
int ret;
- if (kstrtos32(buf, 0, &value)) {
+ if (kstrtos32(buf, 0, &value) == 0) {
+ /* Users can't write negative values directly */
+ if (value < 0)
+ return -EINVAL;
+ } else {
if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index efec10b49d59..1cda505d6a85 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -10,6 +10,7 @@
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
+#include <linux/suspend.h>
#include <linux/mc146818rtc.h>
@@ -74,6 +75,9 @@
#define DEVSEED (7919)
+bool pm_trace_rtc_abused __read_mostly;
+EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
+
static unsigned int dev_hash_value;
static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
@@ -104,6 +108,7 @@ static int set_magic_time(unsigned int user, unsigned int file, unsigned int dev
time.tm_min = (n % 20) * 3;
n /= 20;
mc146818_set_time(&time);
+ pm_trace_rtc_abused = true;
return n ? -1 : 0;
}
@@ -239,9 +244,31 @@ int show_trace_dev_match(char *buf, size_t size)
return ret;
}
+static int
+pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ if (pm_trace_rtc_abused) {
+ pm_trace_rtc_abused = false;
+ pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block pm_trace_nb = {
+ .notifier_call = pm_trace_notify,
+};
+
static int early_resume_init(void)
{
hash_value_early_read = read_magic_time();
+ register_pm_notifier(&pm_trace_nb);
return 0;
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0d77cd6fd8d1..404d94c6c8bc 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
- if (wirq->dedicated_irq)
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
free_irq(wirq->irq, wirq);
+ wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
+ }
kfree(wirq);
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
@@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- wirq->dedicated_irq = true;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
/*
@@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+
return err;
err_free_irq:
@@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
*
- * Called from the bus code or the device driver for
- * runtime_suspend() to enable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_resume() to override the PM runtime core managed wake-up
+ * interrupt handling to enable the wake-up interrupt.
*
* Note that for runtime_suspend()) the wake-up interrupts
* should be unconditionally enabled unlike for suspend()
@@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (wirq && wirq->dedicated_irq)
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
enable_irq(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
@@ -231,20 +234,73 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
* dev_pm_disable_wake_irq - Disable device wake-up interrupt
* @dev: Device
*
- * Called from the bus code or the device driver for
- * runtime_resume() to disable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_suspend() to override the PM runtime core managed wake-up
+ * interrupt handling to disable the wake-up interrupt.
*/
void dev_pm_disable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (wirq && wirq->dedicated_irq)
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
disable_irq_nosync(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
/**
+ * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
+ * @dev: Device
+ * @can_change_status: Can change wake-up interrupt status
+ *
+ * Enables wakeirq conditionally. We need to enable wake-up interrupt
+ * lazily on the first rpm_suspend(). This is needed as the consumer device
+ * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * otherwise try to disable already disabled wakeirq. The wake-up interrupt
+ * starts disabled with IRQ_NOAUTOEN set.
+ *
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ * Caller must hold &dev->power.lock to change wirq->status
+ */
+void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ return;
+
+ if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
+ goto enable;
+ } else if (can_change_status) {
+ wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
+ goto enable;
+ }
+
+ return;
+
+enable:
+ enable_irq(wirq->irq);
+}
+
+/**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+ disable_irq_nosync(wirq->irq);
+}
+
+/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 62e4de2aa8d1..bf9ba26981a5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -811,7 +811,7 @@ void pm_print_active_wakeup_sources(void)
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_info("active wakeup source: %s\n", ws->name);
+ pr_debug("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -822,7 +822,7 @@ void pm_print_active_wakeup_sources(void)
}
if (!active && last_activity_ws)
- pr_info("last active wakeup source: %s\n",
+ pr_debug("last active wakeup source: %s\n",
last_activity_ws->name);
rcu_read_unlock();
}
@@ -905,7 +905,7 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
split_counters(&cnt, &inpr);
if (inpr == 0 || signal_pending(current))
break;
-
+ pm_print_active_wakeup_sources();
schedule();
}
finish_wait(&wakeup_count_wait_queue, &wait);
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 6f77d7319fc6..4ff311374c4a 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -236,15 +236,13 @@ static int regcache_lzo_read(struct regmap *map,
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
@@ -275,15 +273,13 @@ static int regcache_lzo_write(struct regmap *map,
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index b63f23e6ad61..dc26e5949a32 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/err.h>
+#include <linux/glob.h>
static DEFINE_IDA(soc_ida);
@@ -113,6 +114,12 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
struct soc_device *soc_dev;
int ret;
+ if (!soc_bus_type.p) {
+ ret = bus_register(&soc_bus_type);
+ if (ret)
+ goto out1;
+ }
+
soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
if (!soc_dev) {
ret = -ENOMEM;
@@ -156,6 +163,78 @@ void soc_device_unregister(struct soc_device *soc_dev)
static int __init soc_bus_register(void)
{
+ if (soc_bus_type.p)
+ return 0;
+
return bus_register(&soc_bus_type);
}
core_initcall(soc_bus_register);
+
+static int soc_device_match_one(struct device *dev, void *arg)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ const struct soc_device_attribute *match = arg;
+
+ if (match->machine &&
+ (!soc_dev->attr->machine ||
+ !glob_match(match->machine, soc_dev->attr->machine)))
+ return 0;
+
+ if (match->family &&
+ (!soc_dev->attr->family ||
+ !glob_match(match->family, soc_dev->attr->family)))
+ return 0;
+
+ if (match->revision &&
+ (!soc_dev->attr->revision ||
+ !glob_match(match->revision, soc_dev->attr->revision)))
+ return 0;
+
+ if (match->soc_id &&
+ (!soc_dev->attr->soc_id ||
+ !glob_match(match->soc_id, soc_dev->attr->soc_id)))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * soc_device_match - identify the SoC in the machine
+ * @matches: zero-terminated array of possible matches
+ *
+ * returns the first matching entry of the argument array, or NULL
+ * if none of them match.
+ *
+ * This function is meant as a helper in place of of_match_node()
+ * in cases where either no device tree is available or the information
+ * in a device node is insufficient to identify a particular variant
+ * by its compatible strings or other properties. For new devices,
+ * the DT binding should always provide unique compatible strings
+ * that allow the use of of_match_node() instead.
+ *
+ * The calling function can use the .data entry of the
+ * soc_device_attribute to pass a structure or function pointer for
+ * each entry.
+ */
+const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches)
+{
+ int ret = 0;
+
+ if (!matches)
+ return NULL;
+
+ while (!ret) {
+ if (!(matches->machine || matches->family ||
+ matches->revision || matches->soc_id))
+ break;
+ ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
+ soc_device_match_one);
+ if (!ret)
+ matches++;
+ else
+ return matches;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_match);
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 000000000000..9aa0d45a60db
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,9 @@
+config TEST_ASYNC_DRIVER_PROBE
+ tristate "Build kernel module to test asynchronous driver probing"
+ depends on m
+ help
+ Enabling this option produces a kernel module that allows
+ testing asynchronous driver probing by the device core.
+ The module name will be test_async_driver_probe.ko
+
+ If unsure say N.
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 000000000000..90477c5fd9f9
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 000000000000..304d5c2bd5e9
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
+#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
+
+static int test_probe(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
+ TEST_PROBE_DELAY);
+ msleep(TEST_PROBE_DELAY);
+ dev_info(&pdev->dev, "done sleeping\n");
+
+ return 0;
+}
+
+static struct platform_driver async_driver = {
+ .driver = {
+ .name = "test_async_driver",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+ .driver = {
+ .name = "test_sync_driver",
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_device *async_dev_1, *async_dev_2;
+static struct platform_device *sync_dev_1;
+
+static int __init test_async_probe_init(void)
+{
+ ktime_t calltime, delta;
+ unsigned long long duration;
+ int error;
+
+ pr_info("registering first asynchronous device...\n");
+
+ async_dev_1 = platform_device_register_simple("test_async_driver", 1,
+ NULL, 0);
+ if (IS_ERR(async_dev_1)) {
+ error = PTR_ERR(async_dev_1);
+ pr_err("failed to create async_dev_1: %d", error);
+ return error;
+ }
+
+ pr_info("registering asynchronous driver...\n");
+ calltime = ktime_get();
+ error = platform_driver_register(&async_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_1;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_driver;
+ }
+
+ pr_info("registering second asynchronous device...\n");
+ calltime = ktime_get();
+ async_dev_2 = platform_device_register_simple("test_async_driver", 2,
+ NULL, 0);
+ if (IS_ERR(async_dev_2)) {
+ error = PTR_ERR(async_dev_2);
+ pr_err("failed to create async_dev_2: %d", error);
+ goto err_unregister_async_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous driver...\n");
+
+ error = platform_driver_register(&sync_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous device...\n");
+ calltime = ktime_get();
+ sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
+ NULL, 0);
+ if (IS_ERR(sync_dev_1)) {
+ error = PTR_ERR(sync_dev_1);
+ pr_err("failed to create sync_dev_1: %d", error);
+ goto err_unregister_sync_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe was too quick\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_sync_dev_1;
+ }
+
+ pr_info("completed successfully");
+
+ return 0;
+
+err_unregister_sync_dev_1:
+ platform_device_unregister(sync_dev_1);
+
+err_unregister_sync_driver:
+ platform_driver_unregister(&sync_driver);
+
+err_unregister_async_dev_2:
+ platform_device_unregister(async_dev_2);
+
+err_unregister_async_driver:
+ platform_driver_unregister(&async_driver);
+
+err_unregister_async_dev_1:
+ platform_device_unregister(async_dev_1);
+
+ return error;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+ platform_driver_unregister(&async_driver);
+ platform_driver_unregister(&sync_driver);
+ platform_device_unregister(async_dev_1);
+ platform_device_unregister(async_dev_2);
+ platform_device_unregister(sync_dev_1);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index df3c97cb4c99..d6ec1c546f5b 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -118,51 +118,19 @@ static int topology_add_dev(unsigned int cpu)
return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
-static void topology_remove_dev(unsigned int cpu)
+static int topology_remove_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
sysfs_remove_group(&dev->kobj, &topology_attr_group);
-}
-
-static int topology_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rc = topology_add_dev(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- topology_remove_dev(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ return 0;
}
static int topology_sysfs_init(void)
{
- int cpu;
- int rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = topology_add_dev(cpu);
- if (rc)
- goto out;
- }
- __hotcpu_notifier(topology_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
+ "base/topology:prepare", topology_add_dev,
+ topology_remove_dev);
}
device_initcall(topology_sysfs_init);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index bd46569e0e52..925842996986 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -295,6 +295,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b6ef86..223ff2fcae7e 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -384,9 +384,12 @@ config BLK_DEV_RAM_DAX
allocated from highmem (only a problem for highmem systems).
config CDROM_PKTCDVD
- tristate "Packet writing on CD/DVD media"
+ tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
help
+ Note: This driver is deprecated and will be removed from the
+ kernel in the near future!
+
If you have a CDROM/DVD drive that supports packet writing, say
Y to include support. It should work with any MMC/Mt Fuji
compliant ATAPI or SCSI drive, which is just about any newer
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c76d4016eeb..ad793f35632c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -395,44 +395,9 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
#define brd_direct_access NULL
#endif
-static int brd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int error;
- struct brd_device *brd = bdev->bd_disk->private_data;
-
- if (cmd != BLKFLSBUF)
- return -ENOTTY;
-
- /*
- * ram device BLKFLSBUF has special semantics, we want to actually
- * release and destroy the ramdisk data.
- */
- mutex_lock(&brd_mutex);
- mutex_lock(&bdev->bd_mutex);
- error = -EBUSY;
- if (bdev->bd_openers <= 1) {
- /*
- * Kill the cache first, so it isn't written back to the
- * device.
- *
- * Another thread might instantiate more buffercache here,
- * but there is not much we can do to close that race.
- */
- kill_bdev(bdev);
- brd_free_pages(brd);
- error = 0;
- }
- mutex_unlock(&bdev->bd_mutex);
- mutex_unlock(&brd_mutex);
-
- return error;
-}
-
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
- .ioctl = brd_ioctl,
.direct_access = brd_direct_access,
};
@@ -443,8 +408,8 @@ static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
module_param(rd_nr, int, S_IRUGO);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
-int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
-module_param(rd_size, int, S_IRUGO);
+unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
+module_param(rd_size, ulong, S_IRUGO);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
static int max_part = 1;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 1537302e56e3..a18de9d727b0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -260,43 +260,6 @@ scsi_cmd_stack_free(ctlr_info_t *h)
}
#if 0
-static int xmargin=8;
-static int amargin=60;
-
-static void
-print_bytes (unsigned char *c, int len, int hex, int ascii)
-{
-
- int i;
- unsigned char *x;
-
- if (hex)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % xmargin) == 0 && i>0) printk("\n");
- if ((i % xmargin) == 0) printk("0x%04x:", i);
- printk(" %02x", *x);
- x++;
- }
- printk("\n");
- }
- if (ascii)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % amargin) == 0 && i>0) printk("\n");
- if ((i % amargin) == 0) printk("0x%04x:", i);
- if (*x > 26 && *x < 128) printk("%c", *x);
- else printk(".");
- x++;
- }
- printk("\n");
- }
-}
-
static void
print_cmd(CommandList_struct *cp)
{
@@ -305,30 +268,13 @@ print_cmd(CommandList_struct *cp)
printk("sgtot:%d\n", cp->Header.SGTotal);
printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
cp->Header.Tag.lower);
- printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- cp->Header.LUN.LunAddrBytes[0],
- cp->Header.LUN.LunAddrBytes[1],
- cp->Header.LUN.LunAddrBytes[2],
- cp->Header.LUN.LunAddrBytes[3],
- cp->Header.LUN.LunAddrBytes[4],
- cp->Header.LUN.LunAddrBytes[5],
- cp->Header.LUN.LunAddrBytes[6],
- cp->Header.LUN.LunAddrBytes[7]);
+ printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
printk("CDBLen:%d\n", cp->Request.CDBLen);
printk("Type:%d\n",cp->Request.Type.Type);
printk("Attr:%d\n",cp->Request.Type.Attribute);
printk(" Dir:%d\n",cp->Request.Type.Direction);
printk("Timeout:%d\n",cp->Request.Timeout);
- printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
- " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- cp->Request.CDB[0], cp->Request.CDB[1],
- cp->Request.CDB[2], cp->Request.CDB[3],
- cp->Request.CDB[4], cp->Request.CDB[5],
- cp->Request.CDB[6], cp->Request.CDB[7],
- cp->Request.CDB[8], cp->Request.CDB[9],
- cp->Request.CDB[10], cp->Request.CDB[11],
- cp->Request.CDB[12], cp->Request.CDB[13],
- cp->Request.CDB[14], cp->Request.CDB[15]),
+ printk("CDB: %16ph\n", cp->Request.CDB);
printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
cp->ErrDesc.Len);
@@ -340,9 +286,7 @@ print_cmd(CommandList_struct *cp)
printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-
}
-
#endif
static int
@@ -782,8 +726,10 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
"reported\n", c);
break;
case CMD_INVALID: {
- /* print_bytes(c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
/* We get CMD_INVALID if you address a non-existent tape drive instead
of a selection timeout (no response). You will see this if you yank
out a tape drive, then try to access it. This is kind of a shame
@@ -985,8 +931,10 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
dev_warn(&h->pdev->dev,
"%p is reported invalid (probably means "
"target device no longer present)\n", c);
- /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
}
break;
case CMD_PROTOCOL_ERR:
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 2d3d50ab74bf..8d7bcfa49c12 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -148,7 +148,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
op_flags |= REQ_FUA | REQ_PREFLUSH;
- op_flags |= REQ_SYNC | REQ_NOIDLE;
+ op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 942384f34e22..c7728dd77230 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1266,7 +1266,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
bio->bi_bdev = device->ldev->backing_bdev;
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
@@ -1648,20 +1648,8 @@ next_bio:
page_chain_for_each(page) {
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
- if (!bio_add_page(bio, page, len, 0)) {
- /* A single page must always be possible!
- * But in case it fails anyways,
- * we deal with it, and complain (below). */
- if (bio->bi_vcnt == 0) {
- drbd_err(device,
- "bio_add_page failed for len=%u, "
- "bi_vcnt=0 (bi_sector=%llu)\n",
- len, (uint64_t)bio->bi_iter.bi_sector);
- err = -ENOSPC;
- goto fail;
- }
+ if (!bio_add_page(bio, page, len, 0))
goto next_bio;
- }
data_size -= len;
sector += len >> 9;
--nr_pages;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e3d8e4ced4a2..a391a3cfb3fe 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3806,14 +3806,10 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
- bio_init(&bio);
- bio.bi_io_vec = &bio_vec;
- bio_vec.bv_page = page;
- bio_vec.bv_len = size;
- bio_vec.bv_offset = 0;
- bio.bi_vcnt = 1;
- bio.bi_iter.bi_size = size;
+ bio_init(&bio, &bio_vec, 1);
bio.bi_bdev = bdev;
+ bio_add_page(&bio, page, size, 0);
+
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fa1b7a90ba11..4af818766797 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1646,7 +1646,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
if (lo->lo_state != Lo_bound)
- return -EIO;
+ return BLK_MQ_RQ_QUEUE_ERROR;
switch (req_op(cmd->rq)) {
case REQ_OP_FLUSH:
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3cfd879267b2..f96ab717534c 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2035,18 +2035,14 @@ static int exec_drive_taskfile(struct driver_data *dd,
taskout = req_task->out_size;
taskin = req_task->in_size;
/* 130560 = 512 * 0xFF*/
- if (taskin > 130560 || taskout > 130560) {
- err = -EINVAL;
- goto abort;
- }
+ if (taskin > 130560 || taskout > 130560)
+ return -EINVAL;
if (taskout) {
outbuf = memdup_user(buf + outtotal, taskout);
- if (IS_ERR(outbuf)) {
- err = PTR_ERR(outbuf);
- outbuf = NULL;
- goto abort;
- }
+ if (IS_ERR(outbuf))
+ return PTR_ERR(outbuf);
+
outbuf_dma = pci_map_single(dd->pdev,
outbuf,
taskout,
@@ -3937,8 +3933,10 @@ static int mtip_block_initialize(struct driver_data *dd)
/* Generate the disk name, implemented same as in sd.c */
do {
- if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
+ if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) {
+ rv = -ENOMEM;
goto ida_get_error;
+ }
spin_lock(&rssd_index_lock);
rv = ida_get_new(&rssd_index_ida, &index);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7a1048755914..99c84468f154 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,26 +41,34 @@
#include <linux/nbd.h>
+struct nbd_sock {
+ struct socket *sock;
+ struct mutex tx_lock;
+};
+
#define NBD_TIMEDOUT 0
#define NBD_DISCONNECT_REQUESTED 1
+#define NBD_DISCONNECTED 2
+#define NBD_RUNNING 3
struct nbd_device {
u32 flags;
unsigned long runtime_flags;
- struct socket * sock; /* If == NULL, device is not ready, yet */
+ struct nbd_sock **socks;
int magic;
struct blk_mq_tag_set tag_set;
- struct mutex tx_lock;
+ struct mutex config_lock;
struct gendisk *disk;
- int blksize;
+ int num_connections;
+ atomic_t recv_threads;
+ wait_queue_head_t recv_wq;
+ loff_t blksize;
loff_t bytesize;
- /* protects initialization and shutdown of the socket */
- spinlock_t sock_lock;
struct task_struct *task_recv;
- struct task_struct *task_send;
+ struct task_struct *task_setup;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbg_dir;
@@ -69,7 +77,7 @@ struct nbd_device {
struct nbd_cmd {
struct nbd_device *nbd;
- struct list_head list;
+ struct completion send_complete;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -126,7 +134,7 @@ static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
}
static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
- int blocksize, int nr_blocks)
+ loff_t blocksize, loff_t nr_blocks)
{
int ret;
@@ -135,7 +143,7 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
return ret;
nbd->blksize = blocksize;
- nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+ nbd->bytesize = blocksize * nr_blocks;
nbd_size_update(nbd, bdev);
@@ -159,22 +167,20 @@ static void nbd_end_request(struct nbd_cmd *cmd)
*/
static void sock_shutdown(struct nbd_device *nbd)
{
- struct socket *sock;
-
- spin_lock(&nbd->sock_lock);
+ int i;
- if (!nbd->sock) {
- spin_unlock(&nbd->sock_lock);
+ if (nbd->num_connections == 0)
+ return;
+ if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
return;
- }
-
- sock = nbd->sock;
- dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
- nbd->sock = NULL;
- spin_unlock(&nbd->sock_lock);
- kernel_sock_shutdown(sock, SHUT_RDWR);
- sockfd_put(sock);
+ for (i = 0; i < nbd->num_connections; i++) {
+ struct nbd_sock *nsock = nbd->socks[i];
+ mutex_lock(&nsock->tx_lock);
+ kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
+ mutex_unlock(&nsock->tx_lock);
+ }
+ dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
@@ -182,42 +188,38 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
- struct socket *sock = NULL;
-
- spin_lock(&nbd->sock_lock);
+ dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
-
- if (nbd->sock) {
- sock = nbd->sock;
- get_file(sock->file);
- }
-
- spin_unlock(&nbd->sock_lock);
- if (sock) {
- kernel_sock_shutdown(sock, SHUT_RDWR);
- sockfd_put(sock);
- }
-
req->errors++;
- dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
+
+ /*
+ * If our disconnect packet times out then we're already holding the
+ * config_lock and could deadlock here, so just set an error and return,
+ * we'll handle shutting everything down later.
+ */
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ return BLK_EH_HANDLED;
+ mutex_lock(&nbd->config_lock);
+ sock_shutdown(nbd);
+ mutex_unlock(&nbd->config_lock);
return BLK_EH_HANDLED;
}
/*
* Send or receive packet.
*/
-static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
- int msg_flags)
+static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
+ int size, int msg_flags)
{
- struct socket *sock = nbd->sock;
+ struct socket *sock = nbd->socks[index]->sock;
int result;
struct msghdr msg;
struct kvec iov;
unsigned long pflags = current->flags;
if (unlikely(!sock)) {
- dev_err(disk_to_dev(nbd->disk),
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted %s on closed socket in sock_xmit\n",
(send ? "send" : "recv"));
return -EINVAL;
@@ -254,29 +256,29 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
return result;
}
-static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
- int flags)
+static inline int sock_send_bvec(struct nbd_device *nbd, int index,
+ struct bio_vec *bvec, int flags)
{
int result;
void *kaddr = kmap(bvec->bv_page);
- result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
+ result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
bvec->bv_len, flags);
kunmap(bvec->bv_page);
return result;
}
/* always call with the tx_lock held */
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
+static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
int result, flags;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
+ struct bio *bio;
u32 type;
+ u32 tag = blk_mq_unique_tag(req);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- type = NBD_CMD_DISC;
- else if (req_op(req) == REQ_OP_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
type = NBD_CMD_TRIM;
else if (req_op(req) == REQ_OP_FLUSH)
type = NBD_CMD_FLUSH;
@@ -288,73 +290,89 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
memset(&request, 0, sizeof(request));
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(type);
- if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
+ if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &req->tag, sizeof(req->tag));
+ memcpy(request.handle, &tag, sizeof(tag));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
cmd, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
- result = sock_xmit(nbd, 1, &request, sizeof(request),
+ result = sock_xmit(nbd, index, 1, &request, sizeof(request),
(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
- dev_err(disk_to_dev(nbd->disk),
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
return -EIO;
}
- if (type == NBD_CMD_WRITE) {
- struct req_iterator iter;
+ if (type != NBD_CMD_WRITE)
+ return 0;
+
+ flags = 0;
+ bio = req->bio;
+ while (bio) {
+ struct bio *next = bio->bi_next;
+ struct bvec_iter iter;
struct bio_vec bvec;
- /*
- * we are really probing at internals to determine
- * whether to set MSG_MORE or not...
- */
- rq_for_each_segment(bvec, req, iter) {
- flags = 0;
- if (!rq_iter_last(bvec, iter))
+
+ bio_for_each_segment(bvec, bio, iter) {
+ bool is_last = !next && bio_iter_last(bvec, iter);
+
+ if (is_last)
flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
- result = sock_send_bvec(nbd, &bvec, flags);
+ result = sock_send_bvec(nbd, index, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
return -EIO;
}
+ /*
+ * The completion might already have come in,
+ * so break for the last one instead of letting
+ * the iterator do it. This prevents use-after-free
+ * of the bio.
+ */
+ if (is_last)
+ break;
}
+ bio = next;
}
return 0;
}
-static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
+static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
+ struct bio_vec *bvec)
{
int result;
void *kaddr = kmap(bvec->bv_page);
- result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
- MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
+ bvec->bv_len, MSG_WAITALL);
kunmap(bvec->bv_page);
return result;
}
/* NULL returned = something went wrong, inform userspace */
-static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
+static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
{
int result;
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
u16 hwq;
- int tag;
+ u32 tag;
reply.magic = 0;
- result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
if (result <= 0) {
- dev_err(disk_to_dev(nbd->disk),
- "Receive control failed (result %d)\n", result);
+ if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
+ !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+ dev_err(disk_to_dev(nbd->disk),
+ "Receive control failed (result %d)\n", result);
return ERR_PTR(result);
}
@@ -364,7 +382,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-EPROTO);
}
- memcpy(&tag, reply.handle, sizeof(int));
+ memcpy(&tag, reply.handle, sizeof(u32));
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -376,7 +394,6 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-ENOENT);
}
cmd = blk_mq_rq_to_pdu(req);
-
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
@@ -390,7 +407,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
- result = sock_recv_bvec(nbd, &bvec);
+ result = sock_recv_bvec(nbd, index, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
@@ -400,6 +417,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
cmd, bvec.bv_len);
}
+ } else {
+ /* See the comment in nbd_queue_rq. */
+ wait_for_completion(&cmd->send_complete);
}
return cmd;
}
@@ -418,25 +438,24 @@ static struct device_attribute pid_attr = {
.show = pid_show,
};
-static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
+struct recv_thread_args {
+ struct work_struct work;
+ struct nbd_device *nbd;
+ int index;
+};
+
+static void recv_work(struct work_struct *work)
{
+ struct recv_thread_args *args = container_of(work,
+ struct recv_thread_args,
+ work);
+ struct nbd_device *nbd = args->nbd;
struct nbd_cmd *cmd;
- int ret;
+ int ret = 0;
BUG_ON(nbd->magic != NBD_MAGIC);
-
- sk_set_memalloc(nbd->sock->sk);
-
- ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
- if (ret) {
- dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
- return ret;
- }
-
- nbd_size_update(nbd, bdev);
-
while (1) {
- cmd = nbd_read_stat(nbd);
+ cmd = nbd_read_stat(nbd, args->index);
if (IS_ERR(cmd)) {
ret = PTR_ERR(cmd);
break;
@@ -445,10 +464,14 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_end_request(cmd);
}
- nbd_size_clear(nbd, bdev);
-
- device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- return ret;
+ /*
+ * We got an error, shut everybody down if this wasn't the result of a
+ * disconnect request.
+ */
+ if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+ sock_shutdown(nbd);
+ atomic_dec(&nbd->recv_threads);
+ wake_up(&nbd->recv_wq);
}
static void nbd_clear_req(struct request *req, void *data, bool reserved)
@@ -466,51 +489,60 @@ static void nbd_clear_que(struct nbd_device *nbd)
{
BUG_ON(nbd->magic != NBD_MAGIC);
- /*
- * Because we have set nbd->sock to NULL under the tx_lock, all
- * modifications to the list must have completed by now.
- */
- BUG_ON(nbd->sock);
-
blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
}
-static void nbd_handle_cmd(struct nbd_cmd *cmd)
+static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
+ struct nbd_sock *nsock;
+
+ if (index >= nbd->num_connections) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on invalid socket\n");
+ goto error_out;
+ }
+
+ if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
+ goto error_out;
+ }
- if (req->cmd_type != REQ_TYPE_FS)
+ if (req->cmd_type != REQ_TYPE_FS &&
+ req->cmd_type != REQ_TYPE_DRV_PRIV)
goto error_out;
- if (rq_data_dir(req) == WRITE &&
+ if (req->cmd_type == REQ_TYPE_FS &&
+ rq_data_dir(req) == WRITE &&
(nbd->flags & NBD_FLAG_READ_ONLY)) {
- dev_err(disk_to_dev(nbd->disk),
- "Write on read-only\n");
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Write on read-only\n");
goto error_out;
}
req->errors = 0;
- mutex_lock(&nbd->tx_lock);
- nbd->task_send = current;
- if (unlikely(!nbd->sock)) {
- mutex_unlock(&nbd->tx_lock);
- dev_err(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
+ nsock = nbd->socks[index];
+ mutex_lock(&nsock->tx_lock);
+ if (unlikely(!nsock->sock)) {
+ mutex_unlock(&nsock->tx_lock);
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
goto error_out;
}
- if (nbd_send_cmd(nbd, cmd) != 0) {
- dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
+ if (nbd_send_cmd(nbd, cmd, index) != 0) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Request send failed\n");
req->errors++;
nbd_end_request(cmd);
}
- nbd->task_send = NULL;
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nsock->tx_lock);
return;
@@ -524,39 +556,70 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ /*
+ * Since we look at the bio's to send the request over the network we
+ * need to make sure the completion work doesn't mark this request done
+ * before we are done doing our send. This keeps us from dereferencing
+ * freed data if we have particularly fast completions (ie we get the
+ * completion before we exit sock_xmit on the last bvec) or in the case
+ * that the server is misbehaving (or there was an error) before we're
+ * done sending everything over the wire.
+ */
+ init_completion(&cmd->send_complete);
blk_mq_start_request(bd->rq);
- nbd_handle_cmd(cmd);
+ nbd_handle_cmd(cmd, hctx->queue_num);
+ complete(&cmd->send_complete);
+
return BLK_MQ_RQ_QUEUE_OK;
}
-static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
+static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
{
- int ret = 0;
-
- spin_lock_irq(&nbd->sock_lock);
+ struct nbd_sock **socks;
+ struct nbd_sock *nsock;
- if (nbd->sock) {
- ret = -EBUSY;
- goto out;
+ if (!nbd->task_setup)
+ nbd->task_setup = current;
+ if (nbd->task_setup != current) {
+ dev_err(disk_to_dev(nbd->disk),
+ "Device being setup by another task");
+ return -EINVAL;
}
- nbd->sock = sock;
+ socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
+ sizeof(struct nbd_sock *), GFP_KERNEL);
+ if (!socks)
+ return -ENOMEM;
+ nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
+ if (!nsock)
+ return -ENOMEM;
-out:
- spin_unlock_irq(&nbd->sock_lock);
+ nbd->socks = socks;
+
+ mutex_init(&nsock->tx_lock);
+ nsock->sock = sock;
+ socks[nbd->num_connections++] = nsock;
- return ret;
+ return 0;
}
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
+ int i;
+
+ for (i = 0; i < nbd->num_connections; i++)
+ kfree(nbd->socks[i]);
+ kfree(nbd->socks);
+ nbd->socks = NULL;
nbd->runtime_flags = 0;
nbd->blksize = 1024;
nbd->bytesize = 0;
set_capacity(nbd->disk, 0);
nbd->flags = 0;
nbd->tag_set.timeout = 0;
+ nbd->num_connections = 0;
+ nbd->task_setup = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
}
@@ -582,48 +645,68 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
blk_queue_write_cache(nbd->disk->queue, false, false);
}
+static void send_disconnects(struct nbd_device *nbd)
+{
+ struct nbd_request request = {};
+ int i, ret;
+
+ request.magic = htonl(NBD_REQUEST_MAGIC);
+ request.type = htonl(NBD_CMD_DISC);
+
+ for (i = 0; i < nbd->num_connections; i++) {
+ ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
+ if (ret <= 0)
+ dev_err(disk_to_dev(nbd->disk),
+ "Send disconnect failed %d\n", ret);
+ }
+}
+
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
-/* Must be called with tx_lock held */
-
+/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case NBD_DISCONNECT: {
- struct request *sreq;
-
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!nbd->sock)
+ if (!nbd->socks)
return -EINVAL;
- sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
- if (IS_ERR(sreq))
- return -ENOMEM;
-
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
fsync_bdev(bdev);
- mutex_lock(&nbd->tx_lock);
- sreq->cmd_type = REQ_TYPE_DRV_PRIV;
+ mutex_lock(&nbd->config_lock);
/* Check again after getting mutex back. */
- if (!nbd->sock) {
- blk_mq_free_request(sreq);
+ if (!nbd->socks)
return -EINVAL;
- }
- set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags);
-
- nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq));
- blk_mq_free_request(sreq);
+ if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
+ &nbd->runtime_flags))
+ send_disconnects(nbd);
return 0;
}
-
+
case NBD_CLEAR_SOCK:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
+ nbd_bdev_reset(bdev);
+ /*
+ * We want to give the run thread a chance to wait for everybody
+ * to clean up and then do it's own cleanup.
+ */
+ if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
+ int i;
+
+ for (i = 0; i < nbd->num_connections; i++)
+ kfree(nbd->socks[i]);
+ kfree(nbd->socks);
+ nbd->socks = NULL;
+ nbd->num_connections = 0;
+ nbd->task_setup = NULL;
+ }
return 0;
case NBD_SET_SOCK: {
@@ -633,7 +716,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!sock)
return err;
- err = nbd_set_socket(nbd, sock);
+ err = nbd_add_socket(nbd, sock);
if (!err && max_part)
bdev->bd_invalidated = 1;
@@ -648,7 +731,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_SIZE:
return nbd_size_set(nbd, bdev, nbd->blksize,
- arg / nbd->blksize);
+ div_s64(arg, nbd->blksize));
case NBD_SET_SIZE_BLOCKS:
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
@@ -662,26 +745,61 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
case NBD_DO_IT: {
- int error;
+ struct recv_thread_args *args;
+ int num_connections = nbd->num_connections;
+ int error = 0, i;
if (nbd->task_recv)
return -EBUSY;
- if (!nbd->sock)
+ if (!nbd->socks)
return -EINVAL;
+ if (num_connections > 1 &&
+ !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
+ dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
+ error = -EINVAL;
+ goto out_err;
+ }
- /* We have to claim the device under the lock */
+ set_bit(NBD_RUNNING, &nbd->runtime_flags);
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
+ args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
+ if (!args) {
+ error = -ENOMEM;
+ goto out_err;
+ }
nbd->task_recv = current;
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
nbd_parse_flags(nbd, bdev);
+ error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
+ if (error) {
+ dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
+ goto out_recv;
+ }
+
+ nbd_size_update(nbd, bdev);
+
nbd_dev_dbg_init(nbd);
- error = nbd_thread_recv(nbd, bdev);
+ for (i = 0; i < num_connections; i++) {
+ sk_set_memalloc(nbd->socks[i]->sock->sk);
+ atomic_inc(&nbd->recv_threads);
+ INIT_WORK(&args[i].work, recv_work);
+ args[i].nbd = nbd;
+ args[i].index = i;
+ queue_work(system_long_wq, &args[i].work);
+ }
+ wait_event_interruptible(nbd->recv_wq,
+ atomic_read(&nbd->recv_threads) == 0);
+ for (i = 0; i < num_connections; i++)
+ flush_work(&args[i].work);
nbd_dev_dbg_close(nbd);
-
- mutex_lock(&nbd->tx_lock);
+ nbd_size_clear(nbd, bdev);
+ device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
+out_recv:
+ mutex_lock(&nbd->config_lock);
nbd->task_recv = NULL;
-
+out_err:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
@@ -694,7 +812,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
error = -ETIMEDOUT;
nbd_reset(nbd);
-
return error;
}
@@ -726,9 +843,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
BUG_ON(nbd->magic != NBD_MAGIC);
- mutex_lock(&nbd->tx_lock);
+ mutex_lock(&nbd->config_lock);
error = __nbd_ioctl(bdev, nbd, cmd, arg);
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
return error;
}
@@ -748,8 +865,6 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
if (nbd->task_recv)
seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
- if (nbd->task_send)
- seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
return 0;
}
@@ -817,7 +932,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
- debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+ debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0;
@@ -873,9 +988,7 @@ static int nbd_init_request(void *data, struct request *rq,
unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
cmd->nbd = data;
- INIT_LIST_HEAD(&cmd->list);
return 0;
}
@@ -985,13 +1098,13 @@ static int __init nbd_init(void)
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
- spin_lock_init(&nbd_dev[i].sock_lock);
- mutex_init(&nbd_dev[i].tx_lock);
+ mutex_init(&nbd_dev[i].config_lock);
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
sprintf(disk->disk_name, "nbd%d", i);
+ init_waitqueue_head(&nbd_dev[i].recv_wq);
nbd_reset(&nbd_dev[i]);
add_disk(disk);
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ba6f4a2e73db..4943ee22716e 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -577,6 +577,7 @@ static void null_nvm_unregister(struct nullb *nullb)
#else
static int null_nvm_register(struct nullb *nullb)
{
+ pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
return -EINVAL;
}
static void null_nvm_unregister(struct nullb *nullb) {}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 90fa4ac149db..95c98de92971 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -721,7 +721,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
rq->timeout = 60*HZ;
if (cgc->quiet)
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
if (rq->errors)
@@ -944,39 +944,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
}
}
-/*
- * Copy all data for this packet to pkt->pages[], so that
- * a) The number of required segments for the write bio is minimized, which
- * is necessary for some scsi controllers.
- * b) The data can be used as cache to avoid read requests if we receive a
- * new write request for the same zone.
- */
-static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
-{
- int f, p, offs;
-
- /* Copy all data to pkt->pages[] */
- p = 0;
- offs = 0;
- for (f = 0; f < pkt->frames; f++) {
- if (bvec[f].bv_page != pkt->pages[p]) {
- void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
- void *vto = page_address(pkt->pages[p]) + offs;
- memcpy(vto, vfrom, CD_FRAMESIZE);
- kunmap_atomic(vfrom);
- bvec[f].bv_page = pkt->pages[p];
- bvec[f].bv_offset = offs;
- } else {
- BUG_ON(bvec[f].bv_offset != offs);
- }
- offs += CD_FRAMESIZE;
- if (offs >= PAGE_SIZE) {
- offs = 0;
- p++;
- }
- }
-}
-
static void pkt_end_io_read(struct bio *bio)
{
struct packet_data *pkt = bio->bi_private;
@@ -1298,7 +1265,6 @@ try_next_bio:
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int f;
- struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
@@ -1308,9 +1274,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* XXX: locking? */
for (f = 0; f < pkt->frames; f++) {
- bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+ struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+ unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+
+ if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
BUG();
}
pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
@@ -1327,12 +1294,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
pkt->write_size, (unsigned long long)pkt->sector);
- if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
- pkt_make_local_copy(pkt, bvec);
+ if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
pkt->cache_valid = 1;
- } else {
+ else
pkt->cache_valid = 0;
- }
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7b274ff4632c..36d2b9f4e836 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3756,7 +3756,7 @@ static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
struct rbd_device *rbd_dev = arg;
void *p = data;
void *const end = p + data_len;
- u8 struct_v;
+ u8 struct_v = 0;
u32 len;
u32 notify_op;
int ret;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3822eae102db..abf805e332e2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -36,7 +36,6 @@
#include <linux/scatterlist.h>
#include <linux/version.h>
#include <linux/err.h>
-#include <linux/scatterlist.h>
#include <linux/aer.h>
#include <linux/ctype.h>
#include <linux/wait.h>
@@ -270,8 +269,6 @@ struct skd_device {
resource_size_t mem_phys[SKD_MAX_BARS];
u32 mem_size[SKD_MAX_BARS];
- skd_irq_type_t irq_type;
- u32 msix_count;
struct skd_msix_entry *msix_entries;
struct pci_dev *pdev;
@@ -2138,12 +2135,8 @@ static void skd_send_fitmsg(struct skd_device *skdev,
u8 *bp = (u8 *)skmsg->msg_buf;
int i;
for (i = 0; i < skmsg->length; i += 8) {
- pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__,
- i, bp[i + 0], bp[i + 1], bp[i + 2],
- bp[i + 3], bp[i + 4], bp[i + 5],
- bp[i + 6], bp[i + 7]);
+ pr_debug("%s:%s:%d msg[%2d] %8ph\n",
+ skdev->name, __func__, __LINE__, i, &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2164,7 +2157,6 @@ static void skd_send_fitmsg(struct skd_device *skdev,
qcmd |= FIT_QCMD_MSGSIZE_64;
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-
}
static void skd_send_special_fitmsg(struct skd_device *skdev,
@@ -2177,11 +2169,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
int i;
for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__, i,
- bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
- bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
+ pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
+ skdev->name, __func__, __LINE__, i, &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2955,8 +2944,8 @@ static void skd_completion_worker(struct work_struct *work)
static void skd_isr_msg_from_dev(struct skd_device *skdev);
-irqreturn_t
-static skd_isr(int irq, void *ptr)
+static irqreturn_t
+skd_isr(int irq, void *ptr)
{
struct skd_device *skdev;
u32 intstat;
@@ -3821,10 +3810,6 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
*/
struct skd_msix_entry {
- int have_irq;
- u32 vector;
- u32 entry;
- struct skd_device *rsp;
char isr_name[30];
};
@@ -3853,193 +3838,121 @@ static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
{ "(Queue Full 3)", skd_qfull_isr },
};
-static void skd_release_msix(struct skd_device *skdev)
-{
- struct skd_msix_entry *qentry;
- int i;
-
- if (skdev->msix_entries) {
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- skdev = qentry->rsp;
-
- if (qentry->have_irq)
- devm_free_irq(&skdev->pdev->dev,
- qentry->vector, qentry->rsp);
- }
-
- kfree(skdev->msix_entries);
- }
-
- if (skdev->msix_count)
- pci_disable_msix(skdev->pdev);
-
- skdev->msix_count = 0;
- skdev->msix_entries = NULL;
-}
-
static int skd_acquire_msix(struct skd_device *skdev)
{
int i, rc;
struct pci_dev *pdev = skdev->pdev;
- struct msix_entry *entries;
- struct skd_msix_entry *qentry;
-
- entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
- entries[i].entry = i;
- rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
- if (rc) {
+ rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
+ PCI_IRQ_MSIX);
+ if (rc < 0) {
pr_err("(%s): failed to enable MSI-X %d\n",
skd_name(skdev), rc);
- goto msix_out;
+ goto out;
}
- skdev->msix_count = SKD_MAX_MSIX_COUNT;
- skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
- skdev->msix_count, GFP_KERNEL);
+ skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
+ sizeof(struct skd_msix_entry), GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
pr_err("(%s): msix table allocation error\n",
skd_name(skdev));
- goto msix_out;
- }
-
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
- qentry->rsp = NULL;
- qentry->have_irq = 0;
- pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name,
- i, qentry->vector, qentry->entry);
+ goto out;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ struct skd_msix_entry *qentry = &skdev->msix_entries[i];
+
snprintf(qentry->isr_name, sizeof(qentry->isr_name),
"%s%d-msix %s", DRV_NAME, skdev->devno,
msix_entries[i].name);
- rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
- msix_entries[i].handler, 0,
- qentry->isr_name, skdev);
+
+ rc = devm_request_irq(&skdev->pdev->dev,
+ pci_irq_vector(skdev->pdev, i),
+ msix_entries[i].handler, 0,
+ qentry->isr_name, skdev);
if (rc) {
pr_err("(%s): Unable to register(%d) MSI-X "
"handler %d: %s\n",
skd_name(skdev), rc, i, qentry->isr_name);
goto msix_out;
- } else {
- qentry->have_irq = 1;
- qentry->rsp = skdev;
}
}
+
pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, skdev->msix_count);
+ pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
return 0;
msix_out:
- if (entries)
- kfree(entries);
- skd_release_msix(skdev);
+ while (--i >= 0)
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
+out:
+ kfree(skdev->msix_entries);
+ skdev->msix_entries = NULL;
return rc;
}
static int skd_acquire_irq(struct skd_device *skdev)
{
+ struct pci_dev *pdev = skdev->pdev;
+ unsigned int irq_flag = PCI_IRQ_LEGACY;
int rc;
- struct pci_dev *pdev;
-
- pdev = skdev->pdev;
- skdev->msix_count = 0;
-RETRY_IRQ_TYPE:
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
+ if (skd_isr_type == SKD_IRQ_MSIX) {
rc = skd_acquire_msix(skdev);
if (!rc)
- pr_info("(%s): MSI-X %d irqs enabled\n",
- skd_name(skdev), skdev->msix_count);
- else {
- pr_err(
- "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_MSI;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_MSI:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
- DRV_NAME, skdev->devno);
- rc = pci_enable_msi_range(pdev, 1, 1);
- if (rc > 0) {
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
- skdev->isr_name, skdev);
- if (rc) {
- pci_disable_msi(pdev);
- pr_err(
- "(%s): failed to allocate the MSI interrupt %d\n",
- skd_name(skdev), rc);
- goto RETRY_IRQ_LEGACY;
- }
- pr_info("(%s): MSI irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- } else {
-RETRY_IRQ_LEGACY:
- pr_err(
- "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_LEGACY:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name),
- "%s%d-legacy", DRV_NAME, skdev->devno);
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
- IRQF_SHARED, skdev->isr_name, skdev);
- if (!rc)
- pr_info("(%s): LEGACY irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- else
- pr_err("(%s): request LEGACY irq error %d\n",
- skd_name(skdev), rc);
- break;
- default:
- pr_info("(%s): irq_type %d invalid, re-set to %d\n",
- skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
+ return 0;
+
+ pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
+ skd_name(skdev), rc);
}
- return rc;
+
+ snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
+ skdev->devno);
+
+ if (skd_isr_type != SKD_IRQ_LEGACY)
+ irq_flag |= PCI_IRQ_MSI;
+ rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
+ if (rc < 0) {
+ pr_err("(%s): failed to allocate the MSI interrupt %d\n",
+ skd_name(skdev), rc);
+ return rc;
+ }
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
+ pdev->msi_enabled ? 0 : IRQF_SHARED,
+ skdev->isr_name, skdev);
+ if (rc) {
+ pci_free_irq_vectors(pdev);
+ pr_err("(%s): failed to allocate interrupt %d\n",
+ skd_name(skdev), rc);
+ return rc;
+ }
+
+ return 0;
}
static void skd_release_irq(struct skd_device *skdev)
{
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
- skd_release_msix(skdev);
- break;
- case SKD_IRQ_MSI:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- pci_disable_msi(skdev->pdev);
- break;
- case SKD_IRQ_LEGACY:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- break;
- default:
- pr_err("(%s): wrong irq type %d!",
- skd_name(skdev), skdev->irq_type);
- break;
+ struct pci_dev *pdev = skdev->pdev;
+
+ if (skdev->msix_entries) {
+ int i;
+
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ skdev);
+ }
+
+ kfree(skdev->msix_entries);
+ skdev->msix_entries = NULL;
+ } else {
+ devm_free_irq(&pdev->dev, pdev->irq, skdev);
}
+
+ pci_free_irq_vectors(pdev);
}
/*
@@ -4402,7 +4315,6 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->pdev = pdev;
skdev->devno = skd_next_devno++;
skdev->major = blk_major;
- skdev->irq_type = skd_isr_type;
sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
skdev->dev_max_queue_depth = 0;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index be90e15854ed..46f4c719fed9 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
+ if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4a80ee752597..726c32e35db9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1253,14 +1253,14 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
case BLKIF_OP_WRITE:
ring->st_wr_req++;
operation = REQ_OP_WRITE;
- operation_flags = WRITE_ODIRECT;
+ operation_flags = REQ_SYNC | REQ_IDLE;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
- operation_flags = WRITE_FLUSH;
+ operation_flags = REQ_PREFLUSH;
break;
default:
operation = 0; /* make gcc happy */
@@ -1272,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;
- if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
+ if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1334,7 +1334,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
}
/* Wait on all outstanding I/O's and once that has been completed
- * issue the WRITE_FLUSH.
+ * issue the flush.
*/
if (drain)
xen_blk_drain_io(pending_req->ring);
@@ -1380,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation_flags != WRITE_FLUSH);
+ BUG_ON(operation_flags != REQ_PREFLUSH);
bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d86f1e..415e79b69d34 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -533,13 +533,11 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
int err;
- int state = 0, discard_enable;
+ int state = 0;
struct block_device *bdev = be->blkif->vbd.bdev;
struct request_queue *q = bdev_get_queue(bdev);
- err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d",
- &discard_enable);
- if (err == 1 && !discard_enable)
+ if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
if (blk_queue_discard(q)) {
@@ -1039,30 +1037,24 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- err = xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-persistent", "%u", &pers_grants);
- if (err <= 0)
- pers_grants = 0;
-
+ pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
+ 0);
be->blkif->vbd.feature_gnt_persistent = pers_grants;
be->blkif->vbd.overflow_max_grants = 0;
/*
* Read the number of hardware queues from frontend.
*/
- err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues",
- "%u", &requested_num_queues);
- if (err < 0) {
- requested_num_queues = 1;
- } else {
- if (requested_num_queues > xenblk_max_queues
- || requested_num_queues == 0) {
- /* Buggy or malicious guest. */
- xenbus_dev_fatal(dev, err,
- "guest requested %u queues, exceeding the maximum of %u.",
- requested_num_queues, xenblk_max_queues);
- return -ENOSYS;
- }
+ requested_num_queues = xenbus_read_unsigned(dev->otherend,
+ "multi-queue-num-queues",
+ 1);
+ if (requested_num_queues > xenblk_max_queues
+ || requested_num_queues == 0) {
+ /* Buggy or malicious guest. */
+ xenbus_dev_fatal(dev, err,
+ "guest requested %u queues, exceeding the maximum of %u.",
+ requested_num_queues, xenblk_max_queues);
+ return -ENOSYS;
}
be->blkif->nr_rings = requested_num_queues;
if (xen_blkif_alloc_rings(be->blkif))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9908597c5209..b2bdfa81f929 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1758,17 +1758,13 @@ static int talk_to_blkback(struct xenbus_device *dev,
const char *message = NULL;
struct xenbus_transaction xbt;
int err;
- unsigned int i, max_page_order = 0;
- unsigned int ring_page_order = 0;
+ unsigned int i, max_page_order;
+ unsigned int ring_page_order;
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "max-ring-page-order", "%u", &max_page_order);
- if (err != 1)
- info->nr_ring_pages = 1;
- else {
- ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
- info->nr_ring_pages = 1 << ring_page_order;
- }
+ max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
+ "max-ring-page-order", 0);
+ ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
+ info->nr_ring_pages = 1 << ring_page_order;
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
@@ -1877,18 +1873,14 @@ again:
static int negotiate_mq(struct blkfront_info *info)
{
- unsigned int backend_max_queues = 0;
- int err;
+ unsigned int backend_max_queues;
unsigned int i;
BUG_ON(info->nr_rings);
/* Check if backend supports multiple queues. */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "multi-queue-max-queues", "%u", &backend_max_queues);
- if (err < 0)
- backend_max_queues = 1;
-
+ backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+ "multi-queue-max-queues", 1);
info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
/* We need at least one ring. */
if (!info->nr_rings)
@@ -2043,8 +2035,9 @@ static int blkif_recover(struct blkfront_info *info)
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
- blk_mq_requeue_request(req);
+ blk_mq_requeue_request(req, false);
}
+ blk_mq_start_stopped_hw_queues(info->rq, true);
blk_mq_kick_requeue_list(info->rq);
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
@@ -2195,7 +2188,6 @@ static void blkfront_setup_discard(struct blkfront_info *info)
int err;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned int discard_secure;
info->feature_discard = 1;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
@@ -2206,10 +2198,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "discard-secure", "%u", &discard_secure);
- if (err > 0)
- info->feature_secdiscard = !!discard_secure;
+ info->feature_secdiscard =
+ !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
+ 0);
}
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
@@ -2301,16 +2292,11 @@ out_of_memory:
*/
static void blkfront_gather_backend_features(struct blkfront_info *info)
{
- int err;
- int barrier, flush, discard, persistent;
unsigned int indirect_segments;
info->feature_flush = 0;
info->feature_fua = 0;
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier);
-
/*
* If there's no "feature-barrier" defined, then it means
* we're dealing with a very old backend which writes
@@ -2318,7 +2304,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
*
* If there are barriers, then we use flush.
*/
- if (err > 0 && barrier) {
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
info->feature_flush = 1;
info->feature_fua = 1;
}
@@ -2327,35 +2313,23 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
* And if there is "feature-flush-cache" use that above
* barriers.
*/
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush);
-
- if (err > 0 && flush) {
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
+ 0)) {
info->feature_flush = 1;
info->feature_fua = 0;
}
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard);
-
- if (err > 0 && discard)
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%d", &persistent);
- if (err <= 0)
- info->feature_persistent = 0;
- else
- info->feature_persistent = persistent;
+ info->feature_persistent =
+ xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u",
- &indirect_segments);
- if (err <= 0)
- info->max_indirect_segments = 0;
- else
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-max-indirect-segments", 0);
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
}
/*
@@ -2420,11 +2394,9 @@ static void blkfront_connect(struct blkfront_info *info)
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "physical-sector-size", "%u", &physical_sector_size);
- if (err != 1)
- physical_sector_size = sector_size;
-
+ physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
+ "physical-sector-size",
+ sector_size);
blkfront_gather_backend_features(info);
for (i = 0; i < info->nr_rings; i++) {
err = blkfront_setup_indirect(&info->rinfo[i]);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 4b5cd3a7b2b6..12046f4f00e4 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -160,82 +160,56 @@ int zcomp_decompress(struct zcomp_strm *zstrm,
dst, &dst_len);
}
-static int __zcomp_cpu_notifier(struct zcomp *comp,
- unsigned long action, unsigned long cpu)
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
- switch (action) {
- case CPU_UP_PREPARE:
- if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
- break;
- zstrm = zcomp_strm_alloc(comp);
- if (IS_ERR_OR_NULL(zstrm)) {
- pr_err("Can't allocate a compression stream\n");
- return NOTIFY_BAD;
- }
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- zstrm = *per_cpu_ptr(comp->stream, cpu);
- if (!IS_ERR_OR_NULL(zstrm))
- zcomp_strm_free(zstrm);
- *per_cpu_ptr(comp->stream, cpu) = NULL;
- break;
- default:
- break;
+ if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
+ return 0;
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (IS_ERR_OR_NULL(zstrm)) {
+ pr_err("Can't allocate a compression stream\n");
+ return -ENOMEM;
}
- return NOTIFY_OK;
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ return 0;
}
-static int zcomp_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
- unsigned long cpu = (unsigned long)pcpu;
- struct zcomp *comp = container_of(nb, typeof(*comp), notifier);
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
+ struct zcomp_strm *zstrm;
- return __zcomp_cpu_notifier(comp, action, cpu);
+ zstrm = *per_cpu_ptr(comp->stream, cpu);
+ if (!IS_ERR_OR_NULL(zstrm))
+ zcomp_strm_free(zstrm);
+ *per_cpu_ptr(comp->stream, cpu) = NULL;
+ return 0;
}
static int zcomp_init(struct zcomp *comp)
{
- unsigned long cpu;
int ret;
- comp->notifier.notifier_call = zcomp_cpu_notifier;
-
comp->stream = alloc_percpu(struct zcomp_strm *);
if (!comp->stream)
return -ENOMEM;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu) {
- ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu);
- if (ret == NOTIFY_BAD)
- goto cleanup;
- }
- __register_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
+ ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ if (ret < 0)
+ goto cleanup;
return 0;
cleanup:
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
+ free_percpu(comp->stream);
+ return ret;
}
void zcomp_destroy(struct zcomp *comp)
{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
-
+ cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
free_percpu(comp->stream);
kfree(comp);
}
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 478cac2ed465..41c1002a7d7d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -19,11 +19,12 @@ struct zcomp_strm {
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm * __percpu *stream;
- struct notifier_block notifier;
-
const char *name;
+ struct hlist_node node;
};
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 5497f7fc44d0..15f58ab44d0b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -30,6 +30,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
+#include <linux/cpuhotplug.h>
#include "zram_drv.h"
@@ -1443,15 +1444,22 @@ static void destroy_devices(void)
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
}
static int __init zram_init(void)
{
int ret;
+ ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
+ zcomp_cpu_up_prepare, zcomp_cpu_dead);
+ if (ret < 0)
+ return ret;
+
ret = class_register(&zram_control_class);
if (ret) {
pr_err("Unable to register zram-control class\n");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return ret;
}
@@ -1459,6 +1467,7 @@ static int __init zram_init(void)
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
class_unregister(&zram_control_class);
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return -EBUSY;
}
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b1fc29a697b7..80627187c8b6 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -40,5 +40,3 @@ hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
hci_uart-objs := $(hci_uart-y)
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index f742384b53f7..fc3caf4541ba 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -32,7 +32,6 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/slab.h>
#include <linux/of_irq.h>
#define BTM_HEADER_LEN 4
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index a2c921faaa12..910ec968f022 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -733,9 +733,7 @@ static int bcsp_open(struct hci_uart *hu)
skb_queue_head_init(&bcsp->rel);
skb_queue_head_init(&bcsp->unrel);
- init_timer(&bcsp->tbcsp);
- bcsp->tbcsp.function = bcsp_timed_event;
- bcsp->tbcsp.data = (u_long)hu;
+ setup_timer(&bcsp->tbcsp, bcsp_timed_event, (u_long)hu);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0879d64b1caf..90d0456b6744 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -204,9 +204,7 @@ static int h5_open(struct hci_uart *hu)
h5_reset_rx(h5);
- init_timer(&h5->timer);
- h5->timer.function = h5_timed_event;
- h5->timer.data = (unsigned long)hu;
+ setup_timer(&h5->timer, h5_timed_event, (unsigned long)hu);
h5->tx_win = H5_TX_WIN_MAX;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 6c867fbc56a7..05c230719a47 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -438,14 +438,11 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
- init_timer(&qca->wake_retrans_timer);
- qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
- qca->wake_retrans_timer.data = (u_long)hu;
+ setup_timer(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout,
+ (u_long)hu);
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
- init_timer(&qca->tx_idle_timer);
- qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
- qca->tx_idle_timer.data = (u_long)hu;
+ setup_timer(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, (u_long)hu);
qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index c4a75a18dcae..233e850fdac7 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -181,7 +181,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
if (!skb)
return -ENOMEM;
- if (copy_from_iter(skb_put(skb, len), len, from) != len) {
+ if (!copy_from_iter_full(skb_put(skb, len), len, from)) {
kfree_skb(skb);
return -EFAULT;
}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 78751057164a..b9e8cfc93c7e 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -150,6 +150,13 @@ config TEGRA_ACONNECT
Driver for the Tegra ACONNECT bus which is used to interface with
the devices inside the Audio Processing Engine (APE) for Tegra210.
+config TEGRA_GMI
+ tristate "Tegra Generic Memory Interface bus driver"
+ depends on ARCH_TEGRA
+ help
+ Driver for the Tegra Generic Memory Interface bus which can be used
+ to attach devices such as NOR, UART, FPGA and more.
+
config UNIPHIER_SYSTEM_BUS
tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
@@ -167,4 +174,13 @@ config VEXPRESS_CONFIG
help
Platform configuration infrastructure for the ARM Ltd.
Versatile Express.
+
+config DA8XX_MSTPRI
+ bool "TI da8xx master peripheral priority driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Driver for Texas Instruments da8xx master peripheral priority
+ configuration. Allows to adjust the priorities of all master
+ peripherals.
+
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index c6cfa6b2606e..cc6364bec054 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -19,5 +19,8 @@ obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
+obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 890082315054..231633328dfa 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -2190,6 +2190,9 @@ static int cci_probe_ports(struct device_node *np)
if (!of_match_node(arm_cci_ctrl_if_matches, cp))
continue;
+ if (!of_device_is_available(cp))
+ continue;
+
i = nb_ace + nb_ace_lite;
if (i >= nb_cci_ports)
@@ -2232,6 +2235,13 @@ static int cci_probe_ports(struct device_node *np)
ports[i].dn = cp;
}
+ /*
+ * If there is no CCI port that is under kernel control
+ * return early and report probe status.
+ */
+ if (!nb_ace && !nb_ace_lite)
+ return -ENODEV;
+
/* initialize a stashed array of ACE ports to speed-up look-up */
cci_ace_init_ports();
diff --git a/drivers/bus/da8xx-mstpri.c b/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 000000000000..063397f2c0db
--- /dev/null
+++ b/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,267 @@
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET 0
+#define DA8XX_MSTPRI1_OFFSET 4
+#define DA8XX_MSTPRI2_OFFSET 8
+
+enum {
+ DA8XX_MSTPRI_ARM_I = 0,
+ DA8XX_MSTPRI_ARM_D,
+ DA8XX_MSTPRI_UPP,
+ DA8XX_MSTPRI_SATA,
+ DA8XX_MSTPRI_PRU0,
+ DA8XX_MSTPRI_PRU1,
+ DA8XX_MSTPRI_EDMA30TC0,
+ DA8XX_MSTPRI_EDMA30TC1,
+ DA8XX_MSTPRI_EDMA31TC0,
+ DA8XX_MSTPRI_VPIF_DMA_0,
+ DA8XX_MSTPRI_VPIF_DMA_1,
+ DA8XX_MSTPRI_EMAC,
+ DA8XX_MSTPRI_USB0CFG,
+ DA8XX_MSTPRI_USB0CDMA,
+ DA8XX_MSTPRI_UHPI,
+ DA8XX_MSTPRI_USB1,
+ DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+ int reg;
+ int shift;
+ int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+ [DA8XX_MSTPRI_ARM_I] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_ARM_D] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_UPP] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_SATA] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_PRU0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_PRU1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_EDMA30TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_EDMA30TC1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_EDMA31TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+ [DA8XX_MSTPRI_EMAC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_USB0CFG] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_USB0CDMA] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_UHPI] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_USB1] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_LCDC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+};
+
+struct da8xx_mstpri_priority {
+ int which;
+ u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+ const char *board;
+ const struct da8xx_mstpri_priority *priorities;
+ size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ * tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+ {
+ .which = DA8XX_MSTPRI_LCDC,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC1,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC0,
+ .val = 1,
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .priorities = da850_lcdk_priorities,
+ .numprio = ARRAY_SIZE(da850_lcdk_priorities),
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+ const struct da8xx_mstpri_board_priorities *board_prio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+ board_prio = &da8xx_mstpri_board_confs[i];
+
+ if (of_machine_is_compatible(board_prio->board))
+ return board_prio;
+ }
+
+ return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+ const struct da8xx_mstpri_board_priorities *prio_list;
+ const struct da8xx_mstpri_descr *prio_descr;
+ const struct da8xx_mstpri_priority *prio;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mstpri;
+ u32 reg;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mstpri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mstpri)) {
+ dev_err(dev, "unable to map MSTPRI registers\n");
+ return PTR_ERR(mstpri);
+ }
+
+ prio_list = da8xx_mstpri_get_board_prio();
+ if (!prio_list) {
+ dev_err(dev, "no master priorities defined for this board\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < prio_list->numprio; i++) {
+ prio = &prio_list->priorities[i];
+ prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+ if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev, "register offset out of range\n");
+ continue;
+ }
+
+ reg = readl(mstpri + prio_descr->reg);
+ reg &= ~prio_descr->mask;
+ reg |= prio->val << prio_descr->shift;
+
+ writel(reg, mstpri + prio_descr->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+ { .compatible = "ti,da850-mstpri", },
+ { },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+ .probe = da8xx_mstpri_probe,
+ .driver = {
+ .name = "da8xx-mstpri",
+ .of_match_table = da8xx_mstpri_of_match,
+ },
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
new file mode 100644
index 000000000000..a6570789f7af
--- /dev/null
+++ b/drivers/bus/tegra-gmi.c
@@ -0,0 +1,284 @@
+/*
+ * Driver for NVIDIA Generic Memory Interface
+ *
+ * Copyright (C) 2016 Host Mobility AB. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+#define TEGRA_GMI_CONFIG 0x00
+#define TEGRA_GMI_CONFIG_GO BIT(31)
+#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
+#define TEGRA_GMI_MUX_MODE BIT(28)
+#define TEGRA_GMI_RDY_BEFORE_DATA BIT(24)
+#define TEGRA_GMI_RDY_ACTIVE_HIGH BIT(23)
+#define TEGRA_GMI_ADV_ACTIVE_HIGH BIT(22)
+#define TEGRA_GMI_OE_ACTIVE_HIGH BIT(21)
+#define TEGRA_GMI_CS_ACTIVE_HIGH BIT(20)
+#define TEGRA_GMI_CS_SELECT(x) ((x & 0x7) << 4)
+
+#define TEGRA_GMI_TIMING0 0x10
+#define TEGRA_GMI_MUXED_WIDTH(x) ((x & 0xf) << 12)
+#define TEGRA_GMI_HOLD_WIDTH(x) ((x & 0xf) << 8)
+#define TEGRA_GMI_ADV_WIDTH(x) ((x & 0xf) << 4)
+#define TEGRA_GMI_CE_WIDTH(x) (x & 0xf)
+
+#define TEGRA_GMI_TIMING1 0x14
+#define TEGRA_GMI_WE_WIDTH(x) ((x & 0xff) << 16)
+#define TEGRA_GMI_OE_WIDTH(x) ((x & 0xff) << 8)
+#define TEGRA_GMI_WAIT_WIDTH(x) (x & 0xff)
+
+#define TEGRA_GMI_MAX_CHIP_SELECT 8
+
+struct tegra_gmi {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ u32 snor_config;
+ u32 snor_timing0;
+ u32 snor_timing1;
+};
+
+static int tegra_gmi_enable(struct tegra_gmi *gmi)
+{
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(gmi->rst);
+ usleep_range(2000, 4000);
+ reset_control_deassert(gmi->rst);
+
+ writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
+ writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
+
+ gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
+ writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
+
+ return 0;
+}
+
+static void tegra_gmi_disable(struct tegra_gmi *gmi)
+{
+ u32 config;
+
+ /* stop GMI operation */
+ config = readl(gmi->base + TEGRA_GMI_CONFIG);
+ config &= ~TEGRA_GMI_CONFIG_GO;
+ writel(config, gmi->base + TEGRA_GMI_CONFIG);
+
+ reset_control_assert(gmi->rst);
+ clk_disable_unprepare(gmi->clk);
+}
+
+static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
+{
+ struct device_node *child;
+ u32 property, ranges[4];
+ int err;
+
+ child = of_get_next_available_child(gmi->dev->of_node, NULL);
+ if (!child) {
+ dev_err(gmi->dev, "no child nodes found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently only support one child device due to lack of
+ * chip-select address decoding. Which means that we only have one
+ * chip-select line from the GMI controller.
+ */
+ if (of_get_child_count(gmi->dev->of_node) > 1)
+ dev_warn(gmi->dev, "only one child device is supported.");
+
+ if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
+ gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
+
+ if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
+ gmi->snor_config |= TEGRA_GMI_MUX_MODE;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
+ gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
+ gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
+ gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
+ gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
+ gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
+
+ /* Decode the CS# */
+ err = of_property_read_u32_array(child, "ranges", ranges, 4);
+ if (err < 0) {
+ /* Invalid binding */
+ if (err == -EOVERFLOW) {
+ dev_err(gmi->dev,
+ "failed to decode CS: invalid ranges length\n");
+ goto error_cs;
+ }
+
+ /*
+ * If we reach here it means that the child node has an empty
+ * ranges or it does not exist at all. Attempt to decode the
+ * CS# from the reg property instead.
+ */
+ err = of_property_read_u32(child, "reg", &property);
+ if (err < 0) {
+ dev_err(gmi->dev,
+ "failed to decode CS: no reg property found\n");
+ goto error_cs;
+ }
+ } else {
+ property = ranges[1];
+ }
+
+ /* Valid chip selects are CS0-CS7 */
+ if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
+ dev_err(gmi->dev, "invalid chip select: %d", property);
+ err = -EINVAL;
+ goto error_cs;
+ }
+
+ gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
+
+ /* The default values that are provided below are reset values */
+ if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
+
+ if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
+
+error_cs:
+ of_node_put(child);
+ return err;
+}
+
+static int tegra_gmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_gmi *gmi;
+ struct resource *res;
+ int err;
+
+ gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
+ if (!gmi)
+ return -ENOMEM;
+
+ gmi->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gmi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gmi->base))
+ return PTR_ERR(gmi->base);
+
+ gmi->clk = devm_clk_get(dev, "gmi");
+ if (IS_ERR(gmi->clk)) {
+ dev_err(dev, "can not get clock\n");
+ return PTR_ERR(gmi->clk);
+ }
+
+ gmi->rst = devm_reset_control_get(dev, "gmi");
+ if (IS_ERR(gmi->rst)) {
+ dev_err(dev, "can not get reset\n");
+ return PTR_ERR(gmi->rst);
+ }
+
+ err = tegra_gmi_parse_dt(gmi);
+ if (err)
+ return err;
+
+ err = tegra_gmi_enable(gmi);
+ if (err < 0)
+ return err;
+
+ err = of_platform_default_populate(dev->of_node, NULL, dev);
+ if (err < 0) {
+ dev_err(dev, "fail to create devices.\n");
+ tegra_gmi_disable(gmi);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, gmi);
+
+ return 0;
+}
+
+static int tegra_gmi_remove(struct platform_device *pdev)
+{
+ struct tegra_gmi *gmi = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(gmi->dev);
+ tegra_gmi_disable(gmi);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_gmi_id_table[] = {
+ { .compatible = "nvidia,tegra20-gmi", },
+ { .compatible = "nvidia,tegra30-gmi", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
+
+static struct platform_driver tegra_gmi_driver = {
+ .probe = tegra_gmi_probe,
+ .remove = tegra_gmi_remove,
+ .driver = {
+ .name = "tegra-gmi",
+ .of_match_table = tegra_gmi_id_table,
+ },
+};
+module_platform_driver(tegra_gmi_driver);
+
+MODULE_AUTHOR("Mirza Krak <mirza.krak@gmail.com");
+MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index 9efdf1de4035..493e7b9fc813 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
{
struct device_node *bridge;
struct device *parent;
+ int ret;
bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
if (!bridge)
@@ -182,7 +183,11 @@ static int vexpress_config_populate(struct device_node *node)
if (WARN_ON(!parent))
return -ENODEV;
- return of_platform_populate(node, NULL, NULL, parent);
+ ret = of_platform_populate(node, NULL, NULL, parent);
+
+ put_device(parent);
+
+ return ret;
}
static int __init vexpress_config_init(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index dcc09739a54e..fde005ef9d36 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -17,7 +17,6 @@ config DEVMEM
config DEVKMEM
bool "/dev/kmem virtual device support"
- default y
help
Say Y here if you want to support the /dev/kmem device. The
/dev/kmem device is rarely used, but can be used for certain
@@ -542,6 +541,7 @@ config HANGCHECK_TIMER
config MMTIMER
tristate "MMTIMER Memory mapped RTC for SGI Altix"
depends on IA64_GENERIC || IA64_SGI_SN2
+ depends on POSIX_TIMERS
default y
help
The mmtimer device allows direct userspace access to the
@@ -578,7 +578,7 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
config TILE_SROM
- bool "Character-device access via hypervisor to the Tilera SPI ROM"
+ tristate "Character-device access via hypervisor to the Tilera SPI ROM"
depends on TILE
default y
---help---
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 199b8e99f7d7..737187865269 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -19,8 +19,7 @@ static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
unsigned long pa;
struct page *page;
- dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
- + agp->aperture.bus_base;
+ dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 200dab5136a7..ceff2fc524b1 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -168,7 +168,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 0fcc9e69a346..661c82cde0f2 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -48,6 +48,16 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
return 0;
}
+static void atmel_trng_enable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+}
+
+static void atmel_trng_disable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY, trng->base + TRNG_CR);
+}
+
static int atmel_trng_probe(struct platform_device *pdev)
{
struct atmel_trng *trng;
@@ -71,7 +81,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (ret)
return ret;
- writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+ atmel_trng_enable(trng);
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
@@ -84,7 +94,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
return 0;
err_register:
- clk_disable(trng->clk);
+ clk_disable_unprepare(trng->clk);
return ret;
}
@@ -94,7 +104,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
hwrng_unregister(&trng->rng);
- writel(TRNG_KEY, trng->base + TRNG_CR);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -105,6 +115,7 @@ static int atmel_trng_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -113,8 +124,15 @@ static int atmel_trng_suspend(struct device *dev)
static int atmel_trng_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(trng->clk);
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ atmel_trng_enable(trng);
+
+ return 0;
}
static const struct dev_pm_ops atmel_trng_pm_ops = {
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d2c89de5b4..f9766415ff10 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -92,6 +92,7 @@ static void add_early_randomness(struct hwrng *rng)
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);
+ memset(rng_buffer, 0, size);
}
static inline void cleanup_rng(struct kref *kref)
@@ -287,6 +288,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
}
out:
+ memset(rng_buffer, 0, rng_buffer_size());
return ret ? : err;
out_unlock_reading:
@@ -425,6 +427,7 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
+ memset(rng_fillbuf, 0, rng_buffer_size());
}
hwrng_fill = NULL;
return 0;
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 58bef39f7286..119d698439ae 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -110,6 +110,7 @@ static const struct of_device_id meson_rng_of_match[] = {
{ .compatible = "amlogic,meson-rng", },
{},
};
+MODULE_DEVICE_TABLE(of, meson_rng_of_match);
static struct platform_driver meson_rng_driver = {
.probe = meson_rng_probe,
@@ -121,7 +122,6 @@ static struct platform_driver meson_rng_driver = {
module_platform_driver(meson_rng_driver);
-MODULE_ALIAS("platform:meson-rng");
MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986402eb..841fee845ec9 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -90,10 +90,6 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
/* calculate max size bytes to transfer back to caller */
maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
- /* no room for word data */
- if (maxsize < WORD_SZ)
- return 0;
-
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index f5c26a5f6875..3ad86fdf954e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -28,6 +28,7 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/interrupt.h>
+#include <linux/clk.h>
#include <asm/io.h>
@@ -63,10 +64,13 @@
#define OMAP2_RNG_OUTPUT_SIZE 0x4
#define OMAP4_RNG_OUTPUT_SIZE 0x8
+#define EIP76_RNG_OUTPUT_SIZE 0x10
enum {
- RNG_OUTPUT_L_REG = 0,
- RNG_OUTPUT_H_REG,
+ RNG_OUTPUT_0_REG = 0,
+ RNG_OUTPUT_1_REG,
+ RNG_OUTPUT_2_REG,
+ RNG_OUTPUT_3_REG,
RNG_STATUS_REG,
RNG_INTMASK_REG,
RNG_INTACK_REG,
@@ -82,7 +86,7 @@ enum {
};
static const u16 reg_map_omap2[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
+ [RNG_OUTPUT_0_REG] = 0x0,
[RNG_STATUS_REG] = 0x4,
[RNG_CONFIG_REG] = 0x28,
[RNG_REV_REG] = 0x3c,
@@ -90,8 +94,8 @@ static const u16 reg_map_omap2[] = {
};
static const u16 reg_map_omap4[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
- [RNG_OUTPUT_H_REG] = 0x4,
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
[RNG_STATUS_REG] = 0x8,
[RNG_INTMASK_REG] = 0xc,
[RNG_INTACK_REG] = 0x10,
@@ -106,6 +110,23 @@ static const u16 reg_map_omap4[] = {
[RNG_SYSCONFIG_REG] = 0x1FE4,
};
+static const u16 reg_map_eip76[] = {
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
+ [RNG_OUTPUT_2_REG] = 0x8,
+ [RNG_OUTPUT_3_REG] = 0xc,
+ [RNG_STATUS_REG] = 0x10,
+ [RNG_INTACK_REG] = 0x10,
+ [RNG_CONTROL_REG] = 0x14,
+ [RNG_CONFIG_REG] = 0x18,
+ [RNG_ALARMCNT_REG] = 0x1c,
+ [RNG_FROENABLE_REG] = 0x20,
+ [RNG_FRODETUNE_REG] = 0x24,
+ [RNG_ALARMMASK_REG] = 0x28,
+ [RNG_ALARMSTOP_REG] = 0x2c,
+ [RNG_REV_REG] = 0x7c,
+};
+
struct omap_rng_dev;
/**
* struct omap_rng_pdata - RNG IP block-specific data
@@ -127,6 +148,8 @@ struct omap_rng_dev {
void __iomem *base;
struct device *dev;
const struct omap_rng_pdata *pdata;
+ struct hwrng rng;
+ struct clk *clk;
};
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -140,41 +163,35 @@ static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
__raw_writel(val, priv->base + priv->pdata->regs[reg]);
}
-static int omap_rng_data_present(struct hwrng *rng, int wait)
+
+static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
{
struct omap_rng_dev *priv;
- int data, i;
+ int i, present;
priv = (struct omap_rng_dev *)rng->priv;
+ if (max < priv->pdata->data_size)
+ return 0;
+
for (i = 0; i < 20; i++) {
- data = priv->pdata->data_present(priv);
- if (data || !wait)
+ present = priv->pdata->data_present(priv);
+ if (present || !wait)
break;
- /* RNG produces data fast enough (2+ MBit/sec, even
- * during "rngtest" loads, that these delays don't
- * seem to trigger. We *could* use the RNG IRQ, but
- * that'd be higher overhead ... so why bother?
- */
+
udelay(10);
}
- return data;
-}
-
-static int omap_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct omap_rng_dev *priv;
- u32 data_size, i;
-
- priv = (struct omap_rng_dev *)rng->priv;
- data_size = priv->pdata->data_size;
+ if (!present)
+ return 0;
- for (i = 0; i < data_size / sizeof(u32); i++)
- data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i);
+ memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
+ priv->pdata->data_size);
if (priv->pdata->regs[RNG_INTACK_REG])
omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
- return data_size;
+
+ return priv->pdata->data_size;
}
static int omap_rng_init(struct hwrng *rng)
@@ -193,13 +210,6 @@ static void omap_rng_cleanup(struct hwrng *rng)
priv->pdata->cleanup(priv);
}
-static struct hwrng omap_rng_ops = {
- .name = "omap",
- .data_present = omap_rng_data_present,
- .data_read = omap_rng_data_read,
- .init = omap_rng_init,
- .cleanup = omap_rng_cleanup,
-};
static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
{
@@ -231,6 +241,38 @@ static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
}
+static int eip76_rng_init(struct omap_rng_dev *priv)
+{
+ u32 val;
+
+ /* Return if RNG is already running. */
+ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+ return 0;
+
+ /* Number of 512 bit blocks of raw Noise Source output data that must
+ * be processed by either the Conditioning Function or the
+ * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
+ * output value.
+ */
+ val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+
+ /* Number of FRO samples that are XOR-ed together into one bit to be
+ * shifted into the main shift register
+ */
+ val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+ omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+ /* Enable all available FROs */
+ omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+ omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+
+ /* Enable TRNG */
+ val = RNG_CONTROL_ENABLE_TRNG_MASK;
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+ return 0;
+}
+
static int omap4_rng_init(struct omap_rng_dev *priv)
{
u32 val;
@@ -300,6 +342,14 @@ static struct omap_rng_pdata omap4_rng_pdata = {
.cleanup = omap4_rng_cleanup,
};
+static struct omap_rng_pdata eip76_rng_pdata = {
+ .regs = (u16 *)reg_map_eip76,
+ .data_size = EIP76_RNG_OUTPUT_SIZE,
+ .data_present = omap4_rng_data_present,
+ .init = eip76_rng_init,
+ .cleanup = omap4_rng_cleanup,
+};
+
static const struct of_device_id omap_rng_of_match[] = {
{
.compatible = "ti,omap2-rng",
@@ -309,6 +359,10 @@ static const struct of_device_id omap_rng_of_match[] = {
.compatible = "ti,omap4-rng",
.data = &omap4_rng_pdata,
},
+ {
+ .compatible = "inside-secure,safexcel-eip76",
+ .data = &eip76_rng_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_rng_of_match);
@@ -327,7 +381,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
}
priv->pdata = match->data;
- if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) {
+ if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
+ of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "%s: error getting IRQ resource - %d\n",
@@ -343,6 +398,16 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
return err;
}
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
+
+ priv->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk)) {
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ dev_err(&pdev->dev, "unable to enable the clk, "
+ "err = %d\n", err);
+ }
}
return 0;
}
@@ -372,7 +437,11 @@ static int omap_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- omap_rng_ops.priv = (unsigned long)priv;
+ priv->rng.read = omap_rng_do_read;
+ priv->rng.init = omap_rng_init;
+ priv->rng.cleanup = omap_rng_cleanup;
+
+ priv->rng.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
priv->dev = dev;
@@ -383,6 +452,12 @@ static int omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!priv->rng.name) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
@@ -394,20 +469,24 @@ static int omap_rng_probe(struct platform_device *pdev)
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)
- goto err_ioremap;
+ goto err_register;
- ret = hwrng_register(&omap_rng_ops);
+ ret = hwrng_register(&priv->rng);
if (ret)
goto err_register;
- dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
+ dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
omap_rng_read(priv, RNG_REV_REG));
return 0;
err_register:
priv->base = NULL;
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
err_ioremap:
dev_err(dev, "initialization failed.\n");
return ret;
@@ -417,13 +496,16 @@ static int omap_rng_remove(struct platform_device *pdev)
{
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
- hwrng_unregister(&omap_rng_ops);
+ hwrng_unregister(&priv->rng);
priv->pdata->cleanup(priv);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 11dc9b7c09ce..9b5e68a71d01 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -62,9 +62,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
u32 t;
unsigned int timeout = RNG_TIMEOUT;
- if (max < 8)
- return 0;
-
do {
t = readl(priv->base + RNGRCNT) & RCNT_MASK;
if (t == 64) {
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 63ce51d09af1..d9f46b437cc2 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -28,7 +28,6 @@
static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
u64 buffer[PLPAR_HCALL_BUFSIZE];
- size_t size = max < 8 ? max : 8;
int rc;
rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
@@ -36,10 +35,10 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO;
}
- memcpy(data, buffer, size);
+ memcpy(data, buffer, 8);
/* The hypervisor interface returns 64 bits */
- return size;
+ return 8;
}
/**
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 44ce80606944..d1f5bb534e0e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -70,21 +70,17 @@ enum {
* until we have 4 bytes, thus returning a u32 at a time,
* instead of the current u8-at-a-time.
*
- * Padlock instructions can generate a spurious DNA fault, so
- * we have to call them in the context of irq_ts_save/restore()
+ * Padlock instructions can generate a spurious DNA fault, but the
+ * kernel doesn't use CR0.TS, so this doesn't matter.
*/
static inline u32 xstore(u32 *addr, u32 edx_in)
{
u32 eax_out;
- int ts_state;
-
- ts_state = irq_ts_save();
asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
- irq_ts_restore(ts_state);
return eax_out;
}
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 1786574536b2..a21407de46ae 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -989,4 +989,3 @@ module_exit(cleanup_ipmi);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
-MODULE_ALIAS("platform:ipmi_si");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index fcdd886819f5..92e53acf2cd2 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -158,15 +158,16 @@ struct seq_table {
* Store the information in a msgid (long) to allow us to find a
* sequence table entry from the msgid.
*/
-#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+ ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
do { \
- seq = ((msgid >> 26) & 0x3f); \
- seqid = (msgid & 0x3fffff); \
+ seq = (((msgid) >> 26) & 0x3f); \
+ seqid = ((msgid) & 0x3ffffff); \
} while (0)
-#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
struct ipmi_channel {
unsigned char medium;
@@ -4645,3 +4646,4 @@ MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
" interface.");
MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a112c0146012..2a7c425ddfa7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -789,7 +789,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
break;
}
- start_getting_msg_queue(smi_info);
+ start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
@@ -812,7 +812,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
break;
}
- start_getting_msg_queue(smi_info);
+ start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
@@ -1764,7 +1764,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
s = strchr(*curr, ',');
if (!s) {
- printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+ pr_warn(PFX "No hotmod %s given.\n", name);
return -EINVAL;
}
*s = '\0';
@@ -1777,7 +1777,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
}
}
- printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
return -EINVAL;
}
@@ -1788,16 +1788,12 @@ static int check_hotmod_int_op(const char *curr, const char *option,
if (strcmp(curr, name) == 0) {
if (!option) {
- printk(KERN_WARNING PFX
- "No option given for '%s'\n",
- curr);
+ pr_warn(PFX "No option given for '%s'\n", curr);
return -EINVAL;
}
*val = simple_strtoul(option, &n, 0);
if ((*n != '\0') || (*option == '\0')) {
- printk(KERN_WARNING PFX
- "Bad option given for '%s'\n",
- curr);
+ pr_warn(PFX "Bad option given for '%s'\n", curr);
return -EINVAL;
}
return 1;
@@ -1877,8 +1873,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
addr = simple_strtoul(curr, &n, 0);
if ((*n != '\0') || (*curr == '\0')) {
- printk(KERN_WARNING PFX "Invalid hotmod address"
- " '%s'\n", curr);
+ pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
break;
}
@@ -1921,9 +1916,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
continue;
rv = -EINVAL;
- printk(KERN_WARNING PFX
- "Invalid hotmod option '%s'\n",
- curr);
+ pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
goto out;
}
@@ -2003,7 +1996,7 @@ static int hardcode_find_bmc(void)
return -ENOMEM;
info->addr_source = SI_HARDCODED;
- printk(KERN_INFO PFX "probing via hardcoded address\n");
+ pr_info(PFX "probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
@@ -2012,9 +2005,8 @@ static int hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
- printk(KERN_WARNING PFX "Interface type specified "
- "for interface %d, was invalid: %s\n",
- i, si_type[i]);
+ pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+ i, si_type[i]);
kfree(info);
continue;
}
@@ -2030,9 +2022,8 @@ static int hardcode_find_bmc(void)
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- printk(KERN_WARNING PFX "Interface type specified "
- "for interface %d, but port and address were "
- "not set or set to zero.\n", i);
+ pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+ i);
kfree(info);
continue;
}
@@ -2173,18 +2164,18 @@ static int try_init_spmi(struct SPMITable *spmi)
int rv;
if (spmi->IPMIlegacy != 1) {
- printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
return -ENODEV;
}
info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
+ pr_err(PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
}
info->addr_source = SI_SPMI;
- printk(KERN_INFO PFX "probing via SPMI\n");
+ pr_info(PFX "probing via SPMI\n");
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2201,8 +2192,8 @@ static int try_init_spmi(struct SPMITable *spmi)
kfree(info);
return -EIO;
default:
- printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
+ pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
kfree(info);
return -EIO;
}
@@ -2238,15 +2229,15 @@ static int try_init_spmi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
+ pr_warn(PFX "Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
rv = add_smi(info);
if (rv)
@@ -2356,12 +2347,12 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR PFX "Could not allocate SI data\n");
+ pr_err(PFX "Could not allocate SI data\n");
return;
}
info->addr_source = SI_SMBIOS;
- printk(KERN_INFO PFX "probing via SMBIOS\n");
+ pr_info(PFX "probing via SMBIOS\n");
switch (ipmi_data->type) {
case 0x01: /* KCS */
@@ -2391,8 +2382,8 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
default:
kfree(info);
- printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
- ipmi_data->addr_space);
+ pr_warn(PFX "Unknown SMBIOS I/O Address type: %d\n",
+ ipmi_data->addr_space);
return;
}
info->io.addr_data = ipmi_data->base_addr;
@@ -2410,9 +2401,9 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info->irq_setup = std_irq_setup;
pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
if (add_smi(info))
kfree(info);
@@ -3141,9 +3132,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from get"
- " global enables command, the event buffer is not"
- " enabled.\n");
+ pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
goto out;
}
@@ -3154,8 +3143,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING PFX "Invalid return from get global"
- " enables command, cannot enable the event buffer.\n");
+ pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -3173,9 +3161,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from set"
- " global, enables command, the event buffer is not"
- " enabled.\n");
+ pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
goto out;
}
@@ -3185,8 +3171,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING PFX "Invalid return from get global,"
- "enables command, not enable the event buffer.\n");
+ pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -3463,8 +3448,16 @@ static int is_new_interface(struct smi_info *info)
list_for_each_entry(e, &smi_infos, link) {
if (e->io.addr_type != info->io.addr_type)
continue;
- if (e->io.addr_data == info->io.addr_data)
+ if (e->io.addr_data == info->io.addr_data) {
+ /*
+ * This is a cheap hack, ACPI doesn't have a defined
+ * slave address but SMBIOS does. Pick it up from
+ * any source that has it available.
+ */
+ if (info->slave_addr && !e->slave_addr)
+ e->slave_addr = info->slave_addr;
return 0;
+ }
}
return 1;
@@ -3474,17 +3467,18 @@ static int add_smi(struct smi_info *new_smi)
{
int rv = 0;
- printk(KERN_INFO PFX "Adding %s-specified %s state machine",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_CONT " duplicate interface\n");
+ pr_info(PFX "%s-specified %s state machine: duplicate\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
rv = -EBUSY;
goto out_err;
}
- printk(KERN_CONT "\n");
+ pr_info(PFX "Adding %s-specified %s state machine\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
@@ -3502,15 +3496,14 @@ static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
+ char *init_name = NULL;
- printk(KERN_INFO PFX "Trying %s-specified %s state"
- " machine at %s address 0x%lx, slave address 0x%x,"
- " irq %d\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type],
- addr_space_to_str[new_smi->io.addr_type],
- new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
+ pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
switch (new_smi->si_type) {
case SI_KCS:
@@ -3531,11 +3524,30 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
+ /* Do this early so it's available for logs. */
+ if (!new_smi->dev) {
+ init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", 0);
+
+ /*
+ * If we don't already have a device from something
+ * else (like PCI), then register a new one.
+ */
+ new_smi->pdev = platform_device_alloc("ipmi_si",
+ new_smi->intf_num);
+ if (!new_smi->pdev) {
+ pr_err(PFX "Unable to allocate platform device\n");
+ goto out_err;
+ }
+ new_smi->dev = &new_smi->pdev->dev;
+ new_smi->dev->driver = &ipmi_driver.driver;
+ /* Nulled by device_add() */
+ new_smi->dev->init_name = init_name;
+ }
+
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(KERN_ERR PFX
- "Could not allocate state machine memory\n");
+ pr_err(PFX "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -3545,14 +3557,14 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(KERN_ERR PFX "Could not set up I/O space\n");
+ dev_err(new_smi->dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
- printk(KERN_INFO PFX "Interface detection failed\n");
+ dev_err(new_smi->dev, "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3564,8 +3576,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
- printk(KERN_INFO PFX "There appears to be no BMC"
- " at this location\n");
+ dev_err(new_smi->dev, "There appears to be no BMC at this location\n");
goto out_err;
}
@@ -3604,27 +3615,12 @@ static int try_smi_init(struct smi_info *new_smi)
atomic_set(&new_smi->req_events, 1);
}
- if (!new_smi->dev) {
- /*
- * If we don't already have a device from something
- * else (like PCI), then register a new one.
- */
- new_smi->pdev = platform_device_alloc("ipmi_si",
- new_smi->intf_num);
- if (!new_smi->pdev) {
- printk(KERN_ERR PFX
- "Unable to allocate platform device\n");
- goto out_err;
- }
- new_smi->dev = &new_smi->pdev->dev;
- new_smi->dev->driver = &ipmi_driver.driver;
-
+ if (new_smi->pdev) {
rv = platform_device_add(new_smi->pdev);
if (rv) {
- printk(KERN_ERR PFX
- "Unable to register system interface device:"
- " %d\n",
- rv);
+ dev_err(new_smi->dev,
+ "Unable to register system interface device: %d\n",
+ rv);
goto out_err;
}
new_smi->dev_registered = true;
@@ -3668,6 +3664,9 @@ static int try_smi_init(struct smi_info *new_smi)
dev_info(new_smi->dev, "IPMI %s interface initialized\n",
si_to_str[new_smi->si_type]);
+ WARN_ON(new_smi->dev->init_name != NULL);
+ kfree(init_name);
+
return 0;
out_err_stop_timer:
@@ -3712,8 +3711,14 @@ out_err:
if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev);
new_smi->dev_registered = false;
+ new_smi->pdev = NULL;
+ } else if (new_smi->pdev) {
+ platform_device_put(new_smi->pdev);
+ new_smi->pdev = NULL;
}
+ kfree(init_name);
+
return rv;
}
@@ -3732,8 +3737,7 @@ static int init_ipmi_si(void)
if (si_tryplatform) {
rv = platform_driver_register(&ipmi_driver);
if (rv) {
- printk(KERN_ERR PFX "Unable to register "
- "driver: %d\n", rv);
+ pr_err(PFX "Unable to register driver: %d\n", rv);
return rv;
}
}
@@ -3753,7 +3757,7 @@ static int init_ipmi_si(void)
}
}
- printk(KERN_INFO "IPMI System Interface driver.\n");
+ pr_info("IPMI System Interface driver.\n");
/* If the user gave us a device, they presumably want us to use it */
if (!hardcode_find_bmc())
@@ -3763,8 +3767,7 @@ static int init_ipmi_si(void)
if (si_trypci) {
rv = pci_register_driver(&ipmi_pci_driver);
if (rv)
- printk(KERN_ERR PFX "Unable to register "
- "PCI driver: %d\n", rv);
+ pr_err(PFX "Unable to register PCI driver: %d\n", rv);
else
pci_registered = true;
}
@@ -3826,8 +3829,7 @@ static int init_ipmi_si(void)
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
cleanup_ipmi_si();
- printk(KERN_WARNING PFX
- "Unable to find any System Interface(s)\n");
+ pr_warn(PFX "Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5673ffff00be..cca6e5bc1cea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -174,7 +174,6 @@ enum ssif_stat_indexes {
};
struct ssif_addr_info {
- unsigned short addr;
struct i2c_board_info binfo;
char *adapter_name;
int debug;
@@ -1154,10 +1153,6 @@ static bool ssif_dbg_probe;
module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
-static int use_thread;
-module_param(use_thread, int, 0);
-MODULE_PARM_DESC(use_thread, "Use the thread interface.");
-
static bool ssif_tryacpi = true;
module_param_named(tryacpi, ssif_tryacpi, bool, 0);
MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
@@ -1405,6 +1400,34 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
return false;
}
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+ struct ssif_addr_info *info;
+
+ if (slave_addr)
+ return slave_addr;
+
+ /*
+ * Came in without a slave address, search around to see if
+ * the other sources have a slave address. This lets us pick
+ * up an SMBIOS slave address when using ACPI.
+ */
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (info->binfo.addr != client->addr)
+ continue;
+ if (info->adapter_name && client->adapter->name &&
+ strcmp_nospace(info->adapter_name,
+ client->adapter->name))
+ continue;
+ if (info->slave_addr) {
+ slave_addr = info->slave_addr;
+ break;
+ }
+ }
+
+ return slave_addr;
+}
+
/*
* Global enables we care about.
*/
@@ -1447,6 +1470,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ slave_addr = find_slave_address(client, slave_addr);
+
pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
@@ -1935,7 +1960,7 @@ static int decode_dmi(const struct dmi_device *dmi_dev)
slave_addr = data[6];
}
- return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS);
+ return new_ssif_client(myaddr, NULL, 0, slave_addr, SI_SMBIOS);
}
static void dmi_iterator(void)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index f3f92d5fcda0..a697ca0cab1e 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* be because another thread has installed the pte first, so it
* is no problem.
*/
- vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ vm_insert_pfn(vma, vmf->address, pfn);
return VM_FAULT_NOPAGE;
}
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 8d3dfb0c8a26..1d1e7da8ad27 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -43,6 +43,17 @@ config CARDMAN_4040
(http://www.omnikey.com/), or a current development version of OpenCT
(http://www.opensc-project.org/opensc).
+config SCR24X
+ tristate "SCR24x Chip Card Interface support"
+ depends on PCMCIA
+ help
+ Enable support for the SCR24x PCMCIA Chip Card Interface.
+
+ To compile this driver as a module, choose M here.
+ The module will be called scr24x_cs..
+
+ If unsure say N.
+
config IPWIRELESS
tristate "IPWireless 3G UMTS PCMCIA card support"
depends on PCMCIA && NETDEVICES && TTY
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
index 0aae20985d57..5b836bc21406 100644
--- a/drivers/char/pcmcia/Makefile
+++ b/drivers/char/pcmcia/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
+obj-$(CONFIG_SCR24X) += scr24x_cs.o
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index c115217c79ae..e051fc8aa7d7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -14,7 +14,7 @@
* (C) 2000,2001,2002,2003,2004 Omnikey AG
*
* (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
- * - Adhere to Kernel CodingStyle
+ * - Adhere to Kernel process/coding-style.rst
* - Port to 2.6.13 "new" style PCMCIA
* - Check for copy_{from,to}_user return values
* - Use nonseekable_open()
@@ -151,7 +151,7 @@ static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
static struct class *cmm_class;
/* This table doesn't use spaces after the comma between fields and thus
- * violates CodingStyle. However, I don't really think wrapping it around will
+ * violates process/coding-style.rst. However, I don't really think wrapping it around will
* make it any clearer to read -HW */
static unsigned char fi_di_table[10][14] = {
/*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
new file mode 100644
index 000000000000..f6b43d9350f0
--- /dev/null
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -0,0 +1,373 @@
+/*
+ * SCR24x PCMCIA Smart Card Reader Driver
+ *
+ * Copyright (C) 2005-2006 TL Sudheendran
+ * Copyright (C) 2016 Lubomir Rintel
+ *
+ * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#define CCID_HEADER_SIZE 10
+#define CCID_LENGTH_OFFSET 1
+#define CCID_MAX_LEN 271
+
+#define SCR24X_DATA(n) (1 + n)
+#define SCR24X_CMD_STATUS 7
+#define CMD_START 0x40
+#define CMD_WRITE_BYTE 0x41
+#define CMD_READ_BYTE 0x42
+#define STATUS_BUSY 0x80
+
+struct scr24x_dev {
+ struct device *dev;
+ struct cdev c_dev;
+ unsigned char buf[CCID_MAX_LEN];
+ int devno;
+ struct mutex lock;
+ struct kref refcnt;
+ u8 __iomem *regs;
+};
+
+#define SCR24X_DEVS 8
+static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS);
+
+static struct class *scr24x_class;
+static dev_t scr24x_devt;
+
+static void scr24x_delete(struct kref *kref)
+{
+ struct scr24x_dev *dev = container_of(kref, struct scr24x_dev,
+ refcnt);
+
+ kfree(dev);
+}
+
+static int scr24x_wait_ready(struct scr24x_dev *dev)
+{
+ u_char status;
+ int timeout = 100;
+
+ do {
+ status = ioread8(dev->regs + SCR24X_CMD_STATUS);
+ if (!(status & STATUS_BUSY))
+ return 0;
+
+ msleep(20);
+ } while (--timeout);
+
+ return -EIO;
+}
+
+static int scr24x_open(struct inode *inode, struct file *filp)
+{
+ struct scr24x_dev *dev = container_of(inode->i_cdev,
+ struct scr24x_dev, c_dev);
+
+ kref_get(&dev->refcnt);
+ filp->private_data = dev;
+
+ return nonseekable_open(inode, filp);
+}
+
+static int scr24x_release(struct inode *inode, struct file *filp)
+{
+ struct scr24x_dev *dev = filp->private_data;
+
+ /* We must not take the dev->lock here as scr24x_delete()
+ * might be called to remove the dev structure altogether.
+ * We don't need the lock anyway, since after the reference
+ * acquired in probe() is released in remove() the chrdev
+ * is already unregistered and noone can possibly acquire
+ * a reference via open() anymore. */
+ kref_put(&dev->refcnt, scr24x_delete);
+ return 0;
+}
+
+static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit)
+{
+ size_t i, y;
+ int ret;
+
+ for (i = offset; i < limit; i += 5) {
+ iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ for (y = 0; y < 5 && i + y < limit; y++)
+ dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y));
+ }
+
+ return 0;
+}
+
+static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct scr24x_dev *dev = filp->private_data;
+ int ret;
+ int len;
+
+ if (count < CCID_HEADER_SIZE)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+
+ if (!dev->dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+ len = CCID_HEADER_SIZE;
+ ret = read_chunk(dev, 0, len);
+ if (ret < 0)
+ goto out;
+
+ len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET]));
+ if (len > sizeof(dev->buf)) {
+ ret = -EIO;
+ goto out;
+ }
+ ret = read_chunk(dev, CCID_HEADER_SIZE, len);
+ if (ret < 0)
+ goto out;
+
+ if (len < count)
+ count = len;
+
+ if (copy_to_user(buf, dev->buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static ssize_t scr24x_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scr24x_dev *dev = filp->private_data;
+ size_t i, y;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+
+ if (!dev->dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (count > sizeof(dev->buf)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(dev->buf, buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+
+ iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < count; i += 5) {
+ for (y = 0; y < 5 && i + y < count; y++)
+ iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y));
+
+ iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static const struct file_operations scr24x_fops = {
+ .owner = THIS_MODULE,
+ .read = scr24x_read,
+ .write = scr24x_write,
+ .open = scr24x_open,
+ .release = scr24x_release,
+ .llseek = no_llseek,
+};
+
+static int scr24x_config_check(struct pcmcia_device *link, void *priv_data)
+{
+ if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11)
+ return -ENODEV;
+ return pcmcia_request_io(link);
+}
+
+static int scr24x_probe(struct pcmcia_device *link)
+{
+ struct scr24x_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS);
+ if (dev->devno >= SCR24X_DEVS) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ mutex_init(&dev->lock);
+ kref_init(&dev->refcnt);
+
+ link->priv = dev;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ ret = pcmcia_loop_config(link, scr24x_config_check, NULL);
+ if (ret < 0)
+ goto err;
+
+ dev->dev = &link->dev;
+ dev->regs = devm_ioport_map(&link->dev,
+ link->resource[PCMCIA_IOPORT_0]->start,
+ resource_size(link->resource[PCMCIA_IOPORT_0]));
+ if (!dev->regs) {
+ ret = -EIO;
+ goto err;
+ }
+
+ cdev_init(&dev->c_dev, &scr24x_fops);
+ dev->c_dev.owner = THIS_MODULE;
+ dev->c_dev.ops = &scr24x_fops;
+ ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
+ if (ret < 0)
+ goto err;
+
+ ret = pcmcia_enable_device(link);
+ if (ret < 0) {
+ pcmcia_disable_device(link);
+ goto err;
+ }
+
+ device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno),
+ NULL, "scr24x%d", dev->devno);
+
+ dev_info(&link->dev, "SCR24x Chip Card Interface\n");
+ return 0;
+
+err:
+ if (dev->devno < SCR24X_DEVS)
+ clear_bit(dev->devno, scr24x_minors);
+ kfree (dev);
+ return ret;
+}
+
+static void scr24x_remove(struct pcmcia_device *link)
+{
+ struct scr24x_dev *dev = (struct scr24x_dev *)link->priv;
+
+ device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno));
+ mutex_lock(&dev->lock);
+ pcmcia_disable_device(link);
+ cdev_del(&dev->c_dev);
+ clear_bit(dev->devno, scr24x_minors);
+ dev->dev = NULL;
+ mutex_unlock(&dev->lock);
+
+ kref_put(&dev->refcnt, scr24x_delete);
+}
+
+static const struct pcmcia_device_id scr24x_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader",
+ 0x53cb94f9, 0xbfdf89a5),
+ PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3),
+ PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de),
+ PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, scr24x_ids);
+
+static struct pcmcia_driver scr24x_driver = {
+ .owner = THIS_MODULE,
+ .name = "scr24x_cs",
+ .probe = scr24x_probe,
+ .remove = scr24x_remove,
+ .id_table = scr24x_ids,
+};
+
+static int __init scr24x_init(void)
+{
+ int ret;
+
+ scr24x_class = class_create(THIS_MODULE, "scr24x");
+ if (IS_ERR(scr24x_class))
+ return PTR_ERR(scr24x_class);
+
+ ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x");
+ if (ret < 0) {
+ class_destroy(scr24x_class);
+ return ret;
+ }
+
+ ret = pcmcia_register_driver(&scr24x_driver);
+ if (ret < 0) {
+ unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+ class_destroy(scr24x_class);
+ }
+
+ return ret;
+}
+
+static void __exit scr24x_exit(void)
+{
+ pcmcia_unregister_driver(&scr24x_driver);
+ unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+ class_destroy(scr24x_class);
+}
+
+module_init(scr24x_init);
+module_exit(scr24x_exit);
+
+MODULE_AUTHOR("Lubomir Rintel");
+MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d28922df01d7..a7dd5f4f2c5a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4248,7 +4248,6 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 6af1ce04b3da..02819e0703c8 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -86,6 +86,9 @@ struct pp_struct {
long default_inactivity;
};
+/* should we use PARDEVICE_MAX here? */
+static struct device *devices[PARPORT_MAX];
+
/* pp_struct.flags bitfields */
#define PP_CLAIMED (1<<0)
#define PP_EXCL (1<<1)
@@ -294,7 +297,7 @@ static int register_device(int minor, struct pp_struct *pp)
port = parport_find_number(minor);
if (!port) {
- printk(KERN_WARNING "%s: no associated port!\n", name);
+ pr_warn("%s: no associated port!\n", name);
kfree(name);
return -ENXIO;
}
@@ -305,10 +308,10 @@ static int register_device(int minor, struct pp_struct *pp)
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
parport_put_port(port);
+ kfree(name);
if (!pdev) {
- printk(KERN_WARNING "%s: failed to register device!\n", name);
- kfree(name);
+ pr_warn("%s: failed to register device!\n", name);
return -ENXIO;
}
@@ -789,13 +792,29 @@ static const struct file_operations pp_fops = {
static void pp_attach(struct parport *port)
{
- device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number),
- NULL, "parport%d", port->number);
+ struct device *ret;
+
+ if (devices[port->number])
+ return;
+
+ ret = device_create(ppdev_class, port->dev,
+ MKDEV(PP_MAJOR, port->number), NULL,
+ "parport%d", port->number);
+ if (IS_ERR(ret)) {
+ pr_err("Failed to create device parport%d\n",
+ port->number);
+ return;
+ }
+ devices[port->number] = ret;
}
static void pp_detach(struct parport *port)
{
+ if (!devices[port->number])
+ return;
+
device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+ devices[port->number] = NULL;
}
static int pp_probe(struct pardevice *par_dev)
@@ -822,8 +841,7 @@ static int __init ppdev_init(void)
int err = 0;
if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
- printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
- PP_MAJOR);
+ pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
return -EIO;
}
ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -833,11 +851,11 @@ static int __init ppdev_init(void)
}
err = parport_register_driver(&pp_driver);
if (err < 0) {
- printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
+ pr_warn(CHRDEV ": unable to register with parport\n");
goto out_class;
}
- printk(KERN_INFO PP_VERSION "\n");
+ pr_info(PP_VERSION "\n");
goto out;
out_class:
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 10e56323f390..ec07f0e99732 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -285,7 +285,7 @@ scdrv_write(struct file *file, const char __user *buf,
DECLARE_WAITQUEUE(wait, current);
if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&sd->sd_wlock);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
up(&sd->sd_wbs);
return -EAGAIN;
}
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 398800edb2cc..3d4cca64b2d4 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -312,7 +312,8 @@ ATTRIBUTE_GROUPS(srom_dev);
static char *srom_devnode(struct device *dev, umode_t *mode)
{
- *mode = S_IRUGO | S_IWUSR;
+ if (mode)
+ *mode = 0644;
return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9faa0b1e7766..277186d3b668 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -32,7 +32,7 @@ config TCG_TIS_CORE
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
- depends on X86
+ depends on X86 || OF
select TCG_TIS_CORE
---help---
If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a385fb8c17de..a05b1ebd0b26 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,16 +2,10 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
-tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o
-
-ifdef CONFIG_ACPI
- tpm-y += tpm_eventlog.o tpm_acpi.o
-else
-ifdef CONFIG_TCG_IBMVTPM
- tpm-y += tpm_eventlog.o tpm_of.o
-endif
-endif
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
+ tpm_eventlog.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
+tpm-$(CONFIG_OF) += tpm_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e5950131bd90..a77262d31911 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(tpm_put_ops);
*
* The return'd chip has been tpm_try_get_ops'd and must be released via
* tpm_put_ops
- */
+ */
struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *chip, *res = NULL;
@@ -103,7 +103,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
}
} while (chip_prev != chip_num);
} else {
- chip = idr_find_slowpath(&dev_nums_idr, chip_num);
+ chip = idr_find(&dev_nums_idr, chip_num);
if (chip && !tpm_try_get_ops(chip))
res = chip;
}
@@ -127,6 +127,7 @@ static void tpm_dev_release(struct device *dev)
idr_remove(&dev_nums_idr, chip->dev_num);
mutex_unlock(&idr_lock);
+ kfree(chip->log.bios_event_log);
kfree(chip);
}
@@ -276,27 +277,6 @@ static void tpm_del_char_device(struct tpm_chip *chip)
up_write(&chip->ops_sem);
}
-static int tpm1_chip_register(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return 0;
-
- tpm_sysfs_add_device(chip);
-
- chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev));
-
- return 0;
-}
-
-static void tpm1_chip_unregister(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
- if (chip->bios_dir)
- tpm_bios_log_teardown(chip->bios_dir);
-}
-
static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
{
struct attribute **i;
@@ -363,20 +343,20 @@ int tpm_chip_register(struct tpm_chip *chip)
return rc;
}
- rc = tpm1_chip_register(chip);
- if (rc)
+ tpm_sysfs_add_device(chip);
+
+ rc = tpm_bios_log_setup(chip);
+ if (rc != 0 && rc != -ENODEV)
return rc;
tpm_add_ppi(chip);
rc = tpm_add_char_device(chip);
if (rc) {
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
return rc;
}
- chip->flags |= TPM_CHIP_FLAG_REGISTERED;
-
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
tpm_chip_unregister(chip);
@@ -402,12 +382,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
- if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
- return;
-
tpm_del_legacy_sysfs(chip);
-
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 3a9149cf0110..a2688ac2b48f 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#include "tpm_eventlog.h"
@@ -356,6 +357,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_lock(&chip->tpm_mutex);
+ if (chip->dev.parent)
+ pm_runtime_get_sync(chip->dev.parent);
+
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
dev_err(&chip->dev,
@@ -397,6 +401,9 @@ out_recv:
dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
+ if (chip->dev.parent)
+ pm_runtime_put_sync(chip->dev.parent);
+
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_unlock(&chip->tpm_mutex);
return rc;
@@ -437,26 +444,29 @@ static const struct tpm_input_header tpm_getcap_header = {
.ordinal = TPM_ORD_GET_CAP
};
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
tpm_cmd.header.in = tpm_getcap_header;
- if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
- tpm_cmd.params.getcap_in.cap = subcap_id;
+ if (subcap_id == TPM_CAP_VERSION_1_1 ||
+ subcap_id == TPM_CAP_VERSION_1_2) {
+ tpm_cmd.params.getcap_in.cap = cpu_to_be32(subcap_id);
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
- tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_FLAG);
else
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_PROP);
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = subcap_id;
+ tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id);
}
rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
desc);
@@ -488,12 +498,14 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
int tpm_get_timeouts(struct tpm_chip *chip)
{
- struct tpm_cmd_t tpm_cmd;
+ cap_t cap;
unsigned long new_timeout[4];
unsigned long old_timeout[4];
- struct duration_t *duration_cap;
ssize_t rc;
+ if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+ return 0;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
/* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
@@ -506,46 +518,30 @@ int tpm_get_timeouts(struct tpm_chip *chip)
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- NULL);
-
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(&chip->dev, "Issuing TPM_STARTUP");
+ dev_info(&chip->dev, "Issuing TPM_STARTUP\n");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- 0, NULL);
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
}
- if (rc) {
- dev_err(&chip->dev,
- "A TPM error (%zd) occurred attempting to determine the timeouts\n",
- rc);
- goto duration;
- }
-
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
- return -EINVAL;
+ if (rc)
+ return rc;
- old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
- old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
- old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
- old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+ old_timeout[0] = be32_to_cpu(cap.timeout.a);
+ old_timeout[1] = be32_to_cpu(cap.timeout.b);
+ old_timeout[2] = be32_to_cpu(cap.timeout.c);
+ old_timeout[3] = be32_to_cpu(cap.timeout.d);
memcpy(new_timeout, old_timeout, sizeof(new_timeout));
/*
@@ -583,29 +579,17 @@ int tpm_get_timeouts(struct tpm_chip *chip)
chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
-duration:
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
-
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- "attempting to determine the durations");
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+ "attempting to determine the durations");
if (rc)
return rc;
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
- return -EINVAL;
-
- duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->duration[TPM_SHORT] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
chip->duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
chip->duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
@@ -619,6 +603,8 @@ duration:
chip->duration_adjusted = true;
dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -726,6 +712,14 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
+#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define EXTEND_PCR_RESULT_SIZE 34
+static const struct tpm_input_header pcrextend_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(34),
+ .ordinal = TPM_ORD_PCR_EXTEND
+};
+
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
@@ -736,14 +730,6 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
-#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
-#define EXTEND_PCR_RESULT_SIZE 34
-static const struct tpm_input_header pcrextend_header = {
- .tag = TPM_TAG_RQU_COMMAND,
- .length = cpu_to_be32(34),
- .ordinal = TPM_ORD_PCR_EXTEND
-};
-
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index a76ab4af9fb2..848ad6580b46 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -193,7 +193,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
be32_to_cpu(cap.manufacturer_id));
/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (!rc) {
str += sprintf(str,
@@ -204,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version_1_2.revMinor);
} else {
/* Otherwise just use TPM_STRUCT_VER */
- rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
@@ -284,6 +284,9 @@ static const struct attribute_group tpm_dev_group = {
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
+
/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
* is called before ops is null'd and the sysfs core synchronizes this
* removal so that no callbacks are running or can run again
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4d183c97f6a6..1ae976894257 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -35,11 +35,14 @@
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include "tpm_eventlog.h"
+
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 65536,
TPM_RETRY = 50, /* 5 seconds */
+ TPM_NUM_EVENT_LOG_FILES = 3,
};
enum tpm_timeout {
@@ -139,10 +142,15 @@ enum tpm2_startup_types {
#define TPM_PPI_VERSION_LEN 3
enum tpm_chip_flags {
- TPM_CHIP_FLAG_REGISTERED = BIT(0),
TPM_CHIP_FLAG_TPM2 = BIT(1),
TPM_CHIP_FLAG_IRQ = BIT(2),
TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+};
+
+struct tpm_chip_seqops {
+ struct tpm_chip *chip;
+ const struct seq_operations *seqops;
};
struct tpm_chip {
@@ -156,6 +164,10 @@ struct tpm_chip {
struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
+ struct tpm_bios_log log;
+ struct tpm_chip_seqops bin_log_seqops;
+ struct tpm_chip_seqops ascii_log_seqops;
+
unsigned int flags;
int dev_num; /* /dev/tpm# */
@@ -171,7 +183,7 @@ struct tpm_chip {
unsigned long duration[3]; /* jiffies */
bool duration_adjusted;
- struct dentry **bios_dir;
+ struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
const struct attribute_group *groups[3];
unsigned int groups_cnt;
@@ -282,21 +294,20 @@ typedef union {
} cap_t;
enum tpm_capabilities {
- TPM_CAP_FLAG = cpu_to_be32(4),
- TPM_CAP_PROP = cpu_to_be32(5),
- CAP_VERSION_1_1 = cpu_to_be32(0x06),
- CAP_VERSION_1_2 = cpu_to_be32(0x1A)
+ TPM_CAP_FLAG = 4,
+ TPM_CAP_PROP = 5,
+ TPM_CAP_VERSION_1_1 = 0x06,
+ TPM_CAP_VERSION_1_2 = 0x1A,
};
enum tpm_sub_capabilities {
- TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
- TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
- TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
- TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
- TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
- TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
- TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
-
+ TPM_CAP_PROP_PCR = 0x101,
+ TPM_CAP_PROP_MANUFACTURER = 0x103,
+ TPM_CAP_FLAG_PERM = 0x108,
+ TPM_CAP_FLAG_VOL = 0x109,
+ TPM_CAP_PROP_OWNER = 0x111,
+ TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+ TPM_CAP_PROP_TIS_DURATION = 0x120,
};
struct tpm_getcap_params_in {
@@ -484,7 +495,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
unsigned int flags);
ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
unsigned int flags, const char *desc);
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc);
int tpm_get_timeouts(struct tpm_chip *);
int tpm1_auto_startup(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 7df55d58c939..da5b782a9731 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -680,7 +680,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
}
/**
- * tpm_unseal_trusted() - unseal the payload of a trusted key
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
* @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 565a9478cb94..b7718c95fd0b 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -6,10 +6,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog extended by the TCG BIOS of PC platform
+ * Access to the event log extended by the TCG BIOS of PC platform
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,29 +46,28 @@ struct acpi_tcpa {
};
/* read binary bios log */
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_acpi(struct tpm_chip *chip)
{
struct acpi_tcpa *buff;
acpi_status status;
void __iomem *virt;
u64 len, start;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- printk(KERN_ERR
- "%s: ERROR - Eventlog already initialized\n",
- __func__);
- return -EFAULT;
- }
+ log = &chip->log;
+
+ /* Unfortuntely ACPI does not associate the event log with a specific
+ * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+ */
+ if (!chip->acpi_dev_handle)
+ return -ENODEV;
/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
status = acpi_get_table(ACPI_SIG_TCPA, 1,
(struct acpi_table_header **)&buff);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
- __func__);
- return -EIO;
- }
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
switch(buff->platform_class) {
case BIOS_SERVER:
@@ -81,29 +81,29 @@ int read_log(struct tpm_bios_log *log)
break;
}
if (!len) {
- printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+ dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
return -EIO;
}
/* malloc EventLog space */
log->bios_event_log = kmalloc(len, GFP_KERNEL);
- if (!log->bios_event_log) {
- printk("%s: ERROR - Not enough Memory for BIOS measurements\n",
- __func__);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + len;
virt = acpi_os_map_iomem(start, len);
- if (!virt) {
- kfree(log->bios_event_log);
- printk("%s: ERROR - Unable to map memory\n", __func__);
- return -EIO;
- }
+ if (!virt)
+ goto err;
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
return 0;
+
+err:
+ kfree(log->bios_event_log);
+ log->bios_event_log = NULL;
+ return -EIO;
+
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870af916c..717b6b47c042 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/rculist.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -83,7 +84,71 @@ struct crb_priv {
u32 cmd_size;
};
-static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume);
+/**
+ * crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method.
+ *
+ * Return: 0 always
+ */
+static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->cca->req);
+ /* we don't really care when this settles */
+
+ return 0;
+}
+
+/**
+ * crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __maybe_unused crb_cmd_ready(struct device *dev,
+ struct crb_priv *priv)
+{
+ ktime_t stop, start;
+
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->cca->req);
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(TPM2_TIMEOUT_C));
+ do {
+ if (!(ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY))
+ return 0;
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), stop));
+
+ if (ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY) {
+ dev_warn(dev, "cmdReady timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
static u8 crb_status(struct tpm_chip *chip)
{
@@ -196,21 +261,6 @@ static const struct tpm_class_ops tpm_crb = {
.req_complete_val = CRB_DRV_STS_COMPLETE,
};
-static int crb_init(struct acpi_device *device, struct crb_priv *priv)
-{
- struct tpm_chip *chip;
-
- chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- dev_set_drvdata(&chip->dev, priv);
- chip->acpi_dev_handle = device->handle;
- chip->flags = TPM_CHIP_FLAG_TPM2;
-
- return tpm_chip_register(chip);
-}
-
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
struct resource *io_res = data;
@@ -249,6 +299,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct list_head resources;
struct resource io_res;
struct device *dev = &device->dev;
+ u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
u64 rsp_pa;
@@ -276,12 +327,27 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (IS_ERR(priv->cca))
return PTR_ERR(priv->cca);
- cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
- (u64) ioread32(&priv->cca->cmd_pa_low);
+ /*
+ * PTT HW bug w/a: wake up the device to access
+ * possibly not retained registers.
+ */
+ ret = crb_cmd_ready(dev, priv);
+ if (ret)
+ return ret;
+
+ pa_high = ioread32(&priv->cca->cmd_pa_high);
+ pa_low = ioread32(&priv->cca->cmd_pa_low);
+ cmd_pa = ((u64)pa_high << 32) | pa_low;
cmd_size = ioread32(&priv->cca->cmd_size);
+
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ pa_high, pa_low, cmd_size);
+
priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
- if (IS_ERR(priv->cmd))
- return PTR_ERR(priv->cmd);
+ if (IS_ERR(priv->cmd)) {
+ ret = PTR_ERR(priv->cmd);
+ goto out;
+ }
memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8);
rsp_pa = le64_to_cpu(rsp_pa);
@@ -289,7 +355,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (cmd_pa != rsp_pa) {
priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
- return PTR_ERR_OR_ZERO(priv->rsp);
+ ret = PTR_ERR_OR_ZERO(priv->rsp);
+ goto out;
}
/* According to the PTP specification, overlapping command and response
@@ -297,18 +364,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if (cmd_size != rsp_size) {
dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+
priv->cmd_size = cmd_size;
priv->rsp = priv->cmd;
- return 0;
+
+out:
+ crb_go_idle(dev, priv);
+
+ return ret;
}
static int crb_acpi_add(struct acpi_device *device)
{
struct acpi_table_tpm2 *buf;
struct crb_priv *priv;
+ struct tpm_chip *chip;
struct device *dev = &device->dev;
acpi_status status;
u32 sm;
@@ -346,7 +420,33 @@ static int crb_acpi_add(struct acpi_device *device)
if (rc)
return rc;
- return crb_init(device, priv);
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = crb_cmd_ready(dev, priv);
+ if (rc)
+ return rc;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ rc = tpm_chip_register(chip);
+ if (rc) {
+ crb_go_idle(dev, priv);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return rc;
+ }
+
+ pm_runtime_put(dev);
+
+ return 0;
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -356,9 +456,34 @@ static int crb_acpi_remove(struct acpi_device *device)
tpm_chip_unregister(chip);
+ pm_runtime_disable(dev);
+
return 0;
}
+#ifdef CONFIG_PM
+static int crb_pm_runtime_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_go_idle(dev, priv);
+}
+
+static int crb_pm_runtime_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_cmd_ready(dev, priv);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops crb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+ SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+};
+
static struct acpi_device_id crb_device_ids[] = {
{"MSFT0101", 0},
{"", 0},
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index e7228863290e..11bb1138a828 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -7,10 +7,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog created by a system's firmware / BIOS
+ * Access to the event log created by a system's firmware / BIOS
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -72,7 +73,8 @@ static const char* tcpa_pc_event_id_strings[] = {
static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t i;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *addr = log->bios_event_log;
void *limit = log->bios_event_log_end;
struct tcpa_event *event;
@@ -119,7 +121,8 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcpa_event *event = v;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *limit = log->bios_event_log_end;
u32 converted_event_size;
u32 converted_event_type;
@@ -260,13 +263,10 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
static int tpm_bios_measurements_release(struct inode *inode,
struct file *file)
{
- struct seq_file *seq = file->private_data;
- struct tpm_bios_log *log = seq->private;
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct tpm_chip *chip = (struct tpm_chip *)seq->private;
- if (log) {
- kfree(log->bios_event_log);
- kfree(log);
- }
+ put_device(&chip->dev);
return seq_release(inode, file);
}
@@ -304,151 +304,159 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+static const struct seq_operations tpm_ascii_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_ascii_bios_measurements_show,
};
-static const struct seq_operations tpm_binary_b_measurments_seqops = {
+static const struct seq_operations tpm_binary_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_binary_bios_measurements_show,
};
-static int tpm_ascii_bios_measurements_open(struct inode *inode,
+static int tpm_bios_measurements_open(struct inode *inode,
struct file *file)
{
int err;
- struct tpm_bios_log *log;
struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
-
- if ((err = read_log(log)))
- goto out_free;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
/* now register seq file */
- err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+ err = seq_open(file, seqops);
if (!err) {
seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ seq->private = chip;
}
-out:
return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
}
-static const struct file_operations tpm_ascii_bios_measurements_ops = {
- .open = tpm_ascii_bios_measurements_open,
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tpm_bios_measurements_release,
};
-static int tpm_binary_bios_measurements_open(struct inode *inode,
- struct file *file)
+static int tpm_read_log(struct tpm_chip *chip)
{
- int err;
- struct tpm_bios_log *log;
- struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
+ int rc;
- if ((err = read_log(log)))
- goto out_free;
-
- /* now register seq file */
- err = seq_open(file, &tpm_binary_b_measurments_seqops);
- if (!err) {
- seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
}
-out:
- return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
-}
-
-static const struct file_operations tpm_binary_bios_measurements_ops = {
- .open = tpm_binary_bios_measurements_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tpm_bios_measurements_release,
-};
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
-static int is_bad(void *p)
-{
- if (!p)
- return 1;
- if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
- return 1;
- return 0;
+ return tpm_read_log_of(chip);
}
-struct dentry **tpm_bios_log_setup(const char *name)
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
{
- struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
-
- tpm_dir = securityfs_create_dir(name, NULL);
- if (is_bad(tpm_dir))
- goto out;
-
- bin_file =
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int rc = 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ rc = tpm_read_log(chip);
+ if (rc)
+ return rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ chip->bin_log_seqops.seqops = &tpm_binary_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
securityfs_create_file("binary_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_binary_bios_measurements_ops);
- if (is_bad(bin_file))
- goto out_tpm;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops = &tpm_ascii_b_measurements_seqops;
- ascii_file =
+ chip->bios_dir[cnt] =
securityfs_create_file("ascii_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_ascii_bios_measurements_ops);
- if (is_bad(ascii_file))
- goto out_bin;
-
- ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
- if (!ret)
- goto out_ascii;
-
- ret[0] = ascii_file;
- ret[1] = bin_file;
- ret[2] = tpm_dir;
-
- return ret;
-
-out_ascii:
- securityfs_remove(ascii_file);
-out_bin:
- securityfs_remove(bin_file);
-out_tpm:
- securityfs_remove(tpm_dir);
-out:
- return NULL;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ return 0;
+
+err:
+ rc = PTR_ERR(chip->bios_dir[cnt]);
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return rc;
}
-void tpm_bios_log_teardown(struct dentry **lst)
+void tpm_bios_log_teardown(struct tpm_chip *chip)
{
int i;
-
- for (i = 0; i < 3; i++)
- securityfs_remove(lst[i]);
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
}
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index 8de62b09be51..1660d74ea79a 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -73,20 +73,24 @@ enum tcpa_pc_event_ids {
HOST_TABLE_OF_DEVICES,
};
-int read_log(struct tpm_bios_log *log);
-
-#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
- defined(CONFIG_ACPI)
-extern struct dentry **tpm_bios_log_setup(const char *);
-extern void tpm_bios_log_teardown(struct dentry **);
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
#else
-static inline struct dentry **tpm_bios_log_setup(const char *name)
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
{
- return NULL;
+ return -ENODEV;
}
-static inline void tpm_bios_log_teardown(struct dentry **dir)
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
{
+ return -ENODEV;
}
#endif
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
+
#endif
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index 570f30c5c5f4..7dee42d7b5e0 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -2,6 +2,7 @@
* Copyright 2012 IBM Corporation
*
* Author: Ashley Lai <ashleydlai@gmail.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
@@ -20,55 +21,38 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_of(struct tpm_chip *chip)
{
struct device_node *np;
const u32 *sizep;
const u64 *basep;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
- return -EFAULT;
- }
-
- np = of_find_node_by_name(NULL, "vtpm");
- if (!np) {
- pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
+ log = &chip->log;
+ if (chip->dev.parent && chip->dev.parent->of_node)
+ np = chip->dev.parent->of_node;
+ else
return -ENODEV;
- }
sizep = of_get_property(np, "linux,sml-size", NULL);
- if (sizep == NULL) {
- pr_err("%s: ERROR - SML size not found\n", __func__);
- goto cleanup_eio;
- }
- if (*sizep == 0) {
- pr_err("%s: ERROR - event log area empty\n", __func__);
- goto cleanup_eio;
- }
-
basep = of_get_property(np, "linux,sml-base", NULL);
- if (basep == NULL) {
- pr_err("%s: ERROR - SML not found\n", __func__);
- goto cleanup_eio;
+ if (sizep == NULL && basep == NULL)
+ return -ENODEV;
+ if (sizep == NULL || basep == NULL)
+ return -EIO;
+
+ if (*sizep == 0) {
+ dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+ return -EIO;
}
log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
- if (!log->bios_event_log) {
- pr_err("%s: ERROR - Not enough memory for BIOS measurements\n",
- __func__);
- of_node_put(np);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + *sizep;
memcpy(log->bios_event_log, __va(*basep), *sizep);
- of_node_put(np);
return 0;
-
-cleanup_eio:
- of_node_put(np);
- return -EIO;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index eaf5730d79eb..0127af130cb1 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -28,6 +28,8 @@
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -354,12 +356,21 @@ static int tpm_tis_plat_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+ {.compatible = "tcg,tpm-tis-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
.remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(tis_of_platform_match),
},
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index e3bf31b37138..7993678954a2 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -180,12 +180,19 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0, burstcnt, rc;
- while (size < count &&
- wait_for_tpm_stat(chip,
+ while (size < count) {
+ rc = wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->timeout_c,
- &priv->read_queue, true) == 0) {
- burstcnt = min_t(int, get_burstcount(chip), count - size);
+ &priv->read_queue, true);
+ if (rc < 0)
+ return rc;
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ return burstcnt;
+ }
+ burstcnt = min_t(int, burstcnt, count - size);
rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + size);
@@ -229,8 +236,11 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ size = -ETIME;
+ goto out;
+ }
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(&chip->dev, "Error left over data\n");
@@ -271,7 +281,13 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
}
while (count < len - 1) {
- burstcnt = min_t(int, get_burstcount(chip), len - count - 1);
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ rc = burstcnt;
+ goto out_err;
+ }
+ burstcnt = min_t(int, burstcnt, len - count - 1);
rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + count);
if (rc < 0)
@@ -279,8 +295,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
count += burstcnt;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -293,8 +312,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
if (rc < 0)
goto out_err;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -755,20 +777,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
- /* Very early on issue a command to the TPM in polling mode to make
- * sure it works. May as well use that command to set the proper
- * timeouts for the driver.
- */
- if (tpm_get_timeouts(chip)) {
- dev_err(dev, "Could not get TPM timeouts and durations\n");
- rc = -ENODEV;
- goto out_err;
- }
-
/* INTERRUPT Setup */
init_waitqueue_head(&priv->read_queue);
init_waitqueue_head(&priv->int_queue);
if (irq != -1) {
+ /* Before doing irq testing issue a command to the TPM in polling mode
+ * to make sure it works. May as well use that command to set the
+ * proper timeouts for the driver.
+ */
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 9a940332c157..5463b58af26e 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
*
* Author: Stefan Berger <stefanb@us.ibm.com>
*
@@ -41,6 +42,7 @@ struct proxy_dev {
long state; /* internal state */
#define STATE_OPENED_FLAG BIT(0)
#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG BIT(2)
size_t req_len; /* length of queued TPM request */
size_t resp_len; /* length of queued TPM response */
@@ -369,12 +371,9 @@ static void vtpm_proxy_work(struct work_struct *work)
rc = tpm_chip_register(proxy_dev->chip);
if (rc)
- goto err;
-
- return;
-
-err:
- vtpm_proxy_fops_undo_open(proxy_dev);
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ else
+ proxy_dev->state |= STATE_REGISTERED_FLAG;
}
/*
@@ -515,7 +514,8 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
*/
vtpm_proxy_fops_undo_open(proxy_dev);
- tpm_chip_unregister(proxy_dev->chip);
+ if (proxy_dev->state & STATE_REGISTERED_FLAG)
+ tpm_chip_unregister(proxy_dev->chip);
vtpm_proxy_delete_proxy_dev(proxy_dev);
}
@@ -524,6 +524,50 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Code related to the control device /dev/vtpmx
*/
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file: /dev/vtpmx
+ * @ioctl: the ioctl number
+ * @arg: pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+ struct vtpm_proxy_new_dev vtpm_new_dev;
+ struct file *vtpm_file;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vtpm_new_dev_p = argp;
+
+ if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+ sizeof(vtpm_new_dev)))
+ return -EFAULT;
+
+ vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+ if (IS_ERR(vtpm_file))
+ return PTR_ERR(vtpm_file);
+
+ if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+ sizeof(vtpm_new_dev))) {
+ put_unused_fd(vtpm_new_dev.fd);
+ fput(vtpm_file);
+ return -EFAULT;
+ }
+
+ fd_install(vtpm_new_dev.fd, vtpm_file);
+ return 0;
+}
+
/*
* vtpmx_fops_ioctl: ioctl on /dev/vtpmx
*
@@ -531,34 +575,11 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Returns 0 on success, a negative error code otherwise.
*/
static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
+ unsigned long arg)
{
- void __user *argp = (void __user *)arg;
- struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
- struct vtpm_proxy_new_dev vtpm_new_dev;
- struct file *file;
-
switch (ioctl) {
case VTPM_PROXY_IOC_NEW_DEV:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- vtpm_new_dev_p = argp;
- if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
- sizeof(vtpm_new_dev)))
- return -EFAULT;
- file = vtpm_proxy_create_device(&vtpm_new_dev);
- if (IS_ERR(file))
- return PTR_ERR(file);
- if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
- sizeof(vtpm_new_dev))) {
- put_unused_fd(vtpm_new_dev.fd);
- fput(file);
- return -EFAULT;
- }
-
- fd_install(vtpm_new_dev.fd, file);
- return 0;
-
+ return vtpmx_ioc_new_dev(f, ioctl, arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 62028f483bba..5aaa268f3a78 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -307,7 +307,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
rv = setup_ring(dev, priv);
if (rv) {
chip = dev_get_drvdata(&dev->dev);
- tpm_chip_unregister(chip);
ring_free(priv);
return rv;
}
@@ -337,18 +336,14 @@ static int tpmfront_resume(struct xenbus_device *dev)
static void backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
- int val;
-
switch (backend_state) {
case XenbusStateInitialised:
case XenbusStateConnected:
if (dev->state == XenbusStateConnected)
break;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-protocol-v2", "%d", &val) < 0)
- val = 0;
- if (!val) {
+ if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
+ 0)) {
xenbus_dev_fatal(dev, -EINVAL,
"vTPM protocol 2 required");
return;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5649234b7316..8b00e79c2683 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -152,8 +152,8 @@ struct ports_device {
spinlock_t c_ivq_lock;
spinlock_t c_ovq_lock;
- /* The current config space is stored here */
- struct virtio_console_config config;
+ /* max. number of ports this device can hold */
+ u32 max_nr_ports;
/* The virtio device we're associated with */
struct virtio_device *vdev;
@@ -1649,11 +1649,11 @@ static void handle_control_message(struct virtio_device *vdev,
break;
}
if (virtio32_to_cpu(vdev, cpkt->id) >=
- portdev->config.max_nr_ports) {
+ portdev->max_nr_ports) {
dev_warn(&portdev->vdev->dev,
"Request for adding port with "
"out-of-bound id %u, max. supported id: %u\n",
- cpkt->id, portdev->config.max_nr_ports - 1);
+ cpkt->id, portdev->max_nr_ports - 1);
break;
}
add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
@@ -1894,7 +1894,7 @@ static int init_vqs(struct ports_device *portdev)
u32 i, j, nr_ports, nr_queues;
int err;
- nr_ports = portdev->config.max_nr_ports;
+ nr_ports = portdev->max_nr_ports;
nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
@@ -2047,13 +2047,13 @@ static int virtcons_probe(struct virtio_device *vdev)
}
multiport = false;
- portdev->config.max_nr_ports = 1;
+ portdev->max_nr_ports = 1;
/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
if (!is_rproc_serial(vdev) &&
virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
struct virtio_console_config, max_nr_ports,
- &portdev->config.max_nr_ports) == 0) {
+ &portdev->max_nr_ports) == 0) {
multiport = true;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 6a8ac04bedeb..56c1998ced3e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -33,7 +33,7 @@ source "drivers/clk/versatile/Kconfig"
config COMMON_CLK_MAX77686
tristate "Clock driver for Maxim 77620/77686/77802 MFD"
- depends on MFD_MAX77686 || MFD_MAX77620
+ depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST
---help---
This driver supports Maxim 77620/77686/77802 crystal oscillator
clock.
@@ -119,7 +119,7 @@ config COMMON_CLK_CS2000_CP
config COMMON_CLK_S2MPS11
tristate "Clock driver for S2MPS1X/S5M8767 MFD"
- depends on MFD_SEC_CORE
+ depends on MFD_SEC_CORE || COMPILE_TEST
---help---
This driver supports S2MPS11/S2MPS14/S5M8767 crystal oscillator
clock. These multi-function devices have two (S2MPS14) or three
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index e3eed5a78404..b5ae5311b0a2 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -1,7 +1,6 @@
config CLK_BCM_63XX
bool "Broadcom BCM63xx clock support"
depends on ARCH_BCM_63XX || COMPILE_TEST
- depends on COMMON_CLK
select COMMON_CLK_IPROC
default ARCH_BCM_63XX
help
@@ -11,27 +10,22 @@ config CLK_BCM_63XX
config CLK_BCM_KONA
bool "Broadcom Kona CCU clock support"
depends on ARCH_BCM_MOBILE || COMPILE_TEST
- depends on COMMON_CLK
- default y
+ default ARCH_BCM_MOBILE
help
Enable common clock framework support for Broadcom SoCs
using "Kona" style clock control units, including those
in the BCM281xx and BCM21664 families.
config COMMON_CLK_IPROC
- bool "Broadcom iProc clock support"
- depends on ARCH_BCM_IPROC || ARCH_BCM_63XX || COMPILE_TEST
- depends on COMMON_CLK
- default ARCH_BCM_IPROC
+ bool
help
Enable common clock framework support for Broadcom SoCs
based on the iProc architecture
-if COMMON_CLK_IPROC
-
config CLK_BCM_CYGNUS
bool "Broadcom Cygnus clock support"
depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ select COMMON_CLK_IPROC
default ARCH_BCM_CYGNUS
help
Enable common clock framework support for the Broadcom Cygnus SoC
@@ -39,6 +33,7 @@ config CLK_BCM_CYGNUS
config CLK_BCM_NSP
bool "Broadcom Northstar/Northstar Plus clock support"
depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST
+ select COMMON_CLK_IPROC
default ARCH_BCM_5301X || ARCH_BCM_NSP
help
Enable common clock framework support for the Broadcom Northstar and
@@ -47,8 +42,7 @@ config CLK_BCM_NSP
config CLK_BCM_NS2
bool "Broadcom Northstar 2 clock support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select COMMON_CLK_IPROC
default ARCH_BCM_IPROC
help
Enable common clock framework support for the Broadcom Northstar 2 SoC
-
-endif
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 8c7763fd9efc..0d14409097e7 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -436,6 +436,9 @@ struct bcm2835_clock_data {
const char *const *parents;
int num_mux_parents;
+ /* Bitmap encoding which parents accept rate change propagation. */
+ unsigned int set_rate_parent;
+
u32 ctl_reg;
u32 div_reg;
@@ -751,7 +754,9 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
cprman_write(cprman, data->cm_reg,
(cprman_read(cprman, data->cm_reg) &
~data->load_mask) | data->hold_mask);
- cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
+ cprman_write(cprman, data->a2w_reg,
+ cprman_read(cprman, data->a2w_reg) |
+ A2W_PLL_CHANNEL_DISABLE);
spin_unlock(&cprman->regs_lock);
}
@@ -1015,10 +1020,60 @@ bcm2835_clk_is_pllc(struct clk_hw *hw)
return strncmp(clk_hw_get_name(hw), "pllc", 4) == 0;
}
+static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw,
+ int parent_idx,
+ unsigned long rate,
+ u32 *div,
+ unsigned long *prate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ unsigned long best_rate = 0;
+ u32 curdiv, mindiv, maxdiv;
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent_by_index(hw, parent_idx);
+
+ if (!(BIT(parent_idx) & data->set_rate_parent)) {
+ *prate = clk_hw_get_rate(parent);
+ *div = bcm2835_clock_choose_div(hw, rate, *prate, true);
+
+ return bcm2835_clock_rate_from_divisor(clock, *prate,
+ *div);
+ }
+
+ if (data->frac_bits)
+ dev_warn(cprman->dev,
+ "frac bits are not used when propagating rate change");
+
+ /* clamp to min divider of 2 if we're dealing with a mash clock */
+ mindiv = data->is_mash_clock ? 2 : 1;
+ maxdiv = BIT(data->int_bits) - 1;
+
+ /* TODO: Be smart, and only test a subset of the available divisors. */
+ for (curdiv = mindiv; curdiv <= maxdiv; curdiv++) {
+ unsigned long tmp_rate;
+
+ tmp_rate = clk_hw_round_rate(parent, rate * curdiv);
+ tmp_rate /= curdiv;
+ if (curdiv == mindiv ||
+ (tmp_rate > best_rate && tmp_rate <= rate))
+ best_rate = tmp_rate;
+
+ if (best_rate == rate)
+ break;
+ }
+
+ *div = curdiv << CM_DIV_FRAC_BITS;
+ *prate = curdiv * best_rate;
+
+ return best_rate;
+}
+
static int bcm2835_clock_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct clk_hw *parent, *best_parent = NULL;
bool current_parent_is_pllc;
unsigned long rate, best_rate = 0;
@@ -1046,9 +1101,8 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
if (bcm2835_clk_is_pllc(parent) && !current_parent_is_pllc)
continue;
- prate = clk_hw_get_rate(parent);
- div = bcm2835_clock_choose_div(hw, req->rate, prate, true);
- rate = bcm2835_clock_rate_from_divisor(clock, prate, div);
+ rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate,
+ &div, &prate);
if (rate > best_rate && rate <= req->rate) {
best_parent = parent;
best_prate = prate;
@@ -1260,6 +1314,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
init.name = data->name;
init.flags = data->flags | CLK_IGNORE_UNUSED;
+ /*
+ * Pass the CLK_SET_RATE_PARENT flag if we are allowed to propagate
+ * rate changes on at least of the parents.
+ */
+ if (data->set_rate_parent)
+ init.flags |= CLK_SET_RATE_PARENT;
+
if (data->is_vpu_clock) {
init.ops = &bcm2835_vpu_clock_clk_ops;
} else {
@@ -1596,7 +1657,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.a2w_reg = A2W_PLLH_AUX,
.load_mask = CM_PLLH_LOADAUX,
.hold_mask = 0,
- .fixed_divider = 10),
+ .fixed_divider = 1),
[BCM2835_PLLH_PIX] = REGISTER_PLL_DIV(
.name = "pllh_pix",
.source_pll = "pllh",
@@ -1800,7 +1861,12 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.ctl_reg = CM_VECCTL,
.div_reg = CM_VECDIV,
.int_bits = 4,
- .frac_bits = 0),
+ .frac_bits = 0,
+ /*
+ * Allow rate change propagation only on PLLH_AUX which is
+ * assigned index 7 in the parent array.
+ */
+ .set_rate_parent = BIT(7)),
/* dsi clocks */
[BCM2835_CLOCK_DSI0E] = REGISTER_PER_CLK(
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index b8459c14a1b7..f793b2d9238c 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -216,7 +216,7 @@ static int cdce925_pll_prepare(struct clk_hw *hw)
nn = n * BIT(p);
/* q = int(nn/m) */
q = nn / m;
- if ((q < 16) || (1 > 64)) {
+ if ((q < 16) || (q > 63)) {
pr_debug("%s invalid q=%d\n", __func__, q);
return -EINVAL;
}
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 8f571548870f..3a218c3a06ae 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -53,3 +53,24 @@ void devm_clk_put(struct device *dev, struct clk *clk)
WARN_ON(ret);
}
EXPORT_SYMBOL(devm_clk_put);
+
+struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id)
+{
+ struct clk **ptr, *clk;
+
+ ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ clk = of_clk_get_by_name(np, con_id);
+ if (!IS_ERR(clk)) {
+ *ptr = clk;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return clk;
+}
+EXPORT_SYMBOL(devm_get_clk_from_child);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 4e691e35483a..4e0c054a787c 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -145,8 +145,8 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_gate_ops;
init.flags = flags | CLK_IS_BASIC;
- init.parent_names = (parent_name ? &parent_name: NULL);
- init.num_parents = (parent_name ? 1 : 0);
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
/* struct clk_gate assignments */
gate->reg = reg;
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index 47649ac5d399..e51e0023fc6e 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -20,31 +20,43 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/stringify.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <dt-bindings/clock/oxsemi,ox810se.h>
+#include <dt-bindings/clock/oxsemi,ox820.h>
+
/* Standard regmap gate clocks */
-struct clk_oxnas {
+struct clk_oxnas_gate {
struct clk_hw hw;
- signed char bit;
+ unsigned int bit;
struct regmap *regmap;
};
+struct oxnas_stdclk_data {
+ struct clk_hw_onecell_data *onecell_data;
+ struct clk_oxnas_gate **gates;
+ unsigned int ngates;
+ struct clk_oxnas_pll **plls;
+ unsigned int nplls;
+};
+
/* Regmap offsets */
#define CLK_STAT_REGOFFSET 0x24
#define CLK_SET_REGOFFSET 0x2c
#define CLK_CLR_REGOFFSET 0x30
-static inline struct clk_oxnas *to_clk_oxnas(struct clk_hw *hw)
+static inline struct clk_oxnas_gate *to_clk_oxnas_gate(struct clk_hw *hw)
{
- return container_of(hw, struct clk_oxnas, hw);
+ return container_of(hw, struct clk_oxnas_gate, hw);
}
-static int oxnas_clk_is_enabled(struct clk_hw *hw)
+static int oxnas_clk_gate_is_enabled(struct clk_hw *hw)
{
- struct clk_oxnas *std = to_clk_oxnas(hw);
+ struct clk_oxnas_gate *std = to_clk_oxnas_gate(hw);
int ret;
unsigned int val;
@@ -55,29 +67,29 @@ static int oxnas_clk_is_enabled(struct clk_hw *hw)
return val & BIT(std->bit);
}
-static int oxnas_clk_enable(struct clk_hw *hw)
+static int oxnas_clk_gate_enable(struct clk_hw *hw)
{
- struct clk_oxnas *std = to_clk_oxnas(hw);
+ struct clk_oxnas_gate *std = to_clk_oxnas_gate(hw);
regmap_write(std->regmap, CLK_SET_REGOFFSET, BIT(std->bit));
return 0;
}
-static void oxnas_clk_disable(struct clk_hw *hw)
+static void oxnas_clk_gate_disable(struct clk_hw *hw)
{
- struct clk_oxnas *std = to_clk_oxnas(hw);
+ struct clk_oxnas_gate *std = to_clk_oxnas_gate(hw);
regmap_write(std->regmap, CLK_CLR_REGOFFSET, BIT(std->bit));
}
-static const struct clk_ops oxnas_clk_ops = {
- .enable = oxnas_clk_enable,
- .disable = oxnas_clk_disable,
- .is_enabled = oxnas_clk_is_enabled,
+static const struct clk_ops oxnas_clk_gate_ops = {
+ .enable = oxnas_clk_gate_enable,
+ .disable = oxnas_clk_gate_disable,
+ .is_enabled = oxnas_clk_gate_is_enabled,
};
-static const char *const oxnas_clk_parents[] = {
+static const char *const osc_parents[] = {
"oscillator",
};
@@ -85,63 +97,138 @@ static const char *const eth_parents[] = {
"gmacclk",
};
-#define DECLARE_STD_CLKP(__clk, __parent) \
-static const struct clk_init_data clk_##__clk##_init = { \
- .name = __stringify(__clk), \
- .ops = &oxnas_clk_ops, \
- .parent_names = __parent, \
- .num_parents = ARRAY_SIZE(__parent), \
+#define OXNAS_GATE(_name, _bit, _parents) \
+struct clk_oxnas_gate _name = { \
+ .bit = (_bit), \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &oxnas_clk_gate_ops, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ }, \
}
-#define DECLARE_STD_CLK(__clk) DECLARE_STD_CLKP(__clk, oxnas_clk_parents)
+static OXNAS_GATE(ox810se_leon, 0, osc_parents);
+static OXNAS_GATE(ox810se_dma_sgdma, 1, osc_parents);
+static OXNAS_GATE(ox810se_cipher, 2, osc_parents);
+static OXNAS_GATE(ox810se_sata, 4, osc_parents);
+static OXNAS_GATE(ox810se_audio, 5, osc_parents);
+static OXNAS_GATE(ox810se_usbmph, 6, osc_parents);
+static OXNAS_GATE(ox810se_etha, 7, eth_parents);
+static OXNAS_GATE(ox810se_pciea, 8, osc_parents);
+static OXNAS_GATE(ox810se_nand, 9, osc_parents);
+
+static struct clk_oxnas_gate *ox810se_gates[] = {
+ &ox810se_leon,
+ &ox810se_dma_sgdma,
+ &ox810se_cipher,
+ &ox810se_sata,
+ &ox810se_audio,
+ &ox810se_usbmph,
+ &ox810se_etha,
+ &ox810se_pciea,
+ &ox810se_nand,
+};
+
+static OXNAS_GATE(ox820_leon, 0, osc_parents);
+static OXNAS_GATE(ox820_dma_sgdma, 1, osc_parents);
+static OXNAS_GATE(ox820_cipher, 2, osc_parents);
+static OXNAS_GATE(ox820_sd, 3, osc_parents);
+static OXNAS_GATE(ox820_sata, 4, osc_parents);
+static OXNAS_GATE(ox820_audio, 5, osc_parents);
+static OXNAS_GATE(ox820_usbmph, 6, osc_parents);
+static OXNAS_GATE(ox820_etha, 7, eth_parents);
+static OXNAS_GATE(ox820_pciea, 8, osc_parents);
+static OXNAS_GATE(ox820_nand, 9, osc_parents);
+static OXNAS_GATE(ox820_ethb, 10, eth_parents);
+static OXNAS_GATE(ox820_pcieb, 11, osc_parents);
+static OXNAS_GATE(ox820_ref600, 12, osc_parents);
+static OXNAS_GATE(ox820_usbdev, 13, osc_parents);
+
+static struct clk_oxnas_gate *ox820_gates[] = {
+ &ox820_leon,
+ &ox820_dma_sgdma,
+ &ox820_cipher,
+ &ox820_sd,
+ &ox820_sata,
+ &ox820_audio,
+ &ox820_usbmph,
+ &ox820_etha,
+ &ox820_pciea,
+ &ox820_nand,
+ &ox820_etha,
+ &ox820_pciea,
+ &ox820_ref600,
+ &ox820_usbdev,
+};
+
+static struct clk_hw_onecell_data ox810se_hw_onecell_data = {
+ .hws = {
+ [CLK_810_LEON] = &ox810se_leon.hw,
+ [CLK_810_DMA_SGDMA] = &ox810se_dma_sgdma.hw,
+ [CLK_810_CIPHER] = &ox810se_cipher.hw,
+ [CLK_810_SATA] = &ox810se_sata.hw,
+ [CLK_810_AUDIO] = &ox810se_audio.hw,
+ [CLK_810_USBMPH] = &ox810se_usbmph.hw,
+ [CLK_810_ETHA] = &ox810se_etha.hw,
+ [CLK_810_PCIEA] = &ox810se_pciea.hw,
+ [CLK_810_NAND] = &ox810se_nand.hw,
+ },
+ .num = ARRAY_SIZE(ox810se_gates),
+};
+
+static struct clk_hw_onecell_data ox820_hw_onecell_data = {
+ .hws = {
+ [CLK_820_LEON] = &ox820_leon.hw,
+ [CLK_820_DMA_SGDMA] = &ox820_dma_sgdma.hw,
+ [CLK_820_CIPHER] = &ox820_cipher.hw,
+ [CLK_820_SD] = &ox820_sd.hw,
+ [CLK_820_SATA] = &ox820_sata.hw,
+ [CLK_820_AUDIO] = &ox820_audio.hw,
+ [CLK_820_USBMPH] = &ox820_usbmph.hw,
+ [CLK_820_ETHA] = &ox820_etha.hw,
+ [CLK_820_PCIEA] = &ox820_pciea.hw,
+ [CLK_820_NAND] = &ox820_nand.hw,
+ [CLK_820_ETHB] = &ox820_ethb.hw,
+ [CLK_820_PCIEB] = &ox820_pcieb.hw,
+ [CLK_820_REF600] = &ox820_ref600.hw,
+ [CLK_820_USBDEV] = &ox820_usbdev.hw,
+ },
+ .num = ARRAY_SIZE(ox820_gates),
+};
-/* Hardware Bit - Clock association */
-struct clk_oxnas_init_data {
- unsigned long bit;
- const struct clk_init_data *clk_init;
+static struct oxnas_stdclk_data ox810se_stdclk_data = {
+ .onecell_data = &ox810se_hw_onecell_data,
+ .gates = ox810se_gates,
+ .ngates = ARRAY_SIZE(ox810se_gates),
};
-/* Clk init data declaration */
-DECLARE_STD_CLK(leon);
-DECLARE_STD_CLK(dma_sgdma);
-DECLARE_STD_CLK(cipher);
-DECLARE_STD_CLK(sata);
-DECLARE_STD_CLK(audio);
-DECLARE_STD_CLK(usbmph);
-DECLARE_STD_CLKP(etha, eth_parents);
-DECLARE_STD_CLK(pciea);
-DECLARE_STD_CLK(nand);
-
-/* Table index is clock indice */
-static const struct clk_oxnas_init_data clk_oxnas_init[] = {
- [0] = {0, &clk_leon_init},
- [1] = {1, &clk_dma_sgdma_init},
- [2] = {2, &clk_cipher_init},
- /* Skip & Do not touch to DDR clock */
- [3] = {4, &clk_sata_init},
- [4] = {5, &clk_audio_init},
- [5] = {6, &clk_usbmph_init},
- [6] = {7, &clk_etha_init},
- [7] = {8, &clk_pciea_init},
- [8] = {9, &clk_nand_init},
+static struct oxnas_stdclk_data ox820_stdclk_data = {
+ .onecell_data = &ox820_hw_onecell_data,
+ .gates = ox820_gates,
+ .ngates = ARRAY_SIZE(ox820_gates),
};
-struct clk_oxnas_data {
- struct clk_oxnas clk_oxnas[ARRAY_SIZE(clk_oxnas_init)];
- struct clk_onecell_data onecell_data[ARRAY_SIZE(clk_oxnas_init)];
- struct clk *clks[ARRAY_SIZE(clk_oxnas_init)];
+static const struct of_device_id oxnas_stdclk_dt_ids[] = {
+ { .compatible = "oxsemi,ox810se-stdclk", &ox810se_stdclk_data },
+ { .compatible = "oxsemi,ox820-stdclk", &ox820_stdclk_data },
+ { }
};
static int oxnas_stdclk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct clk_oxnas_data *clk_oxnas;
+ const struct oxnas_stdclk_data *data;
+ const struct of_device_id *id;
struct regmap *regmap;
+ int ret;
int i;
- clk_oxnas = devm_kzalloc(&pdev->dev, sizeof(*clk_oxnas), GFP_KERNEL);
- if (!clk_oxnas)
- return -ENOMEM;
+ id = of_match_device(oxnas_stdclk_dt_ids, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+ data = id->data;
regmap = syscon_node_to_regmap(of_get_parent(np));
if (IS_ERR(regmap)) {
@@ -149,32 +236,23 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
- struct clk_oxnas *_clk;
+ for (i = 0 ; i < data->ngates ; ++i)
+ data->gates[i]->regmap = regmap;
- _clk = &clk_oxnas->clk_oxnas[i];
- _clk->bit = clk_oxnas_init[i].bit;
- _clk->hw.init = clk_oxnas_init[i].clk_init;
- _clk->regmap = regmap;
+ for (i = 0; i < data->onecell_data->num; i++) {
+ if (!data->onecell_data->hws[i])
+ continue;
- clk_oxnas->clks[i] =
- devm_clk_register(&pdev->dev, &_clk->hw);
- if (WARN_ON(IS_ERR(clk_oxnas->clks[i])))
- return PTR_ERR(clk_oxnas->clks[i]);
+ ret = devm_clk_hw_register(&pdev->dev,
+ data->onecell_data->hws[i]);
+ if (ret)
+ return ret;
}
- clk_oxnas->onecell_data->clks = clk_oxnas->clks;
- clk_oxnas->onecell_data->clk_num = ARRAY_SIZE(clk_oxnas_init);
-
- return of_clk_add_provider(np, of_clk_src_onecell_get,
- clk_oxnas->onecell_data);
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+ data->onecell_data);
}
-static const struct of_device_id oxnas_stdclk_dt_ids[] = {
- { .compatible = "oxsemi,ox810se-stdclk" },
- { }
-};
-
static struct platform_driver oxnas_stdclk_driver = {
.probe = oxnas_stdclk_probe,
.driver = {
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 80ae2a51452d..d0bf8b1c67de 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -266,6 +266,39 @@ static const struct clockgen_muxinfo ls1043a_hwa2 = {
},
};
+static const struct clockgen_muxinfo ls1046a_hwa1 = {
+ {
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1046a_hwa2 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1012a_cmux = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ {},
+ [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ }
+};
+
static const struct clockgen_muxinfo t1023_hwa1 = {
{
{},
@@ -489,6 +522,31 @@ static const struct clockgen_chipinfo chipinfo[] = {
.flags = CG_PLL_8BIT,
},
{
+ .compat = "fsl,ls1046a-clockgen",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = {
+ &t1040_cmux
+ },
+ .hwaccel = {
+ &ls1046a_hwa1, &ls1046a_hwa2
+ },
+ .cmux_to_group = {
+ 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,ls1012a-clockgen",
+ .cmux_groups = {
+ &ls1012a_cmux
+ },
+ .cmux_to_group = {
+ 0, -1
+ },
+ .pll_mask = 0x03,
+ },
+ {
.compat = "fsl,ls2080a-clockgen",
.cmux_groups = {
&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
@@ -1273,8 +1331,10 @@ err:
CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
/* Legacy nodes */
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 02d681008401..5eb05dbf59b8 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -19,10 +19,14 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#define STM32F4_RCC_PLLCFGR 0x04
#define STM32F4_RCC_CFGR 0x08
@@ -31,6 +35,8 @@
#define STM32F4_RCC_AHB3ENR 0x38
#define STM32F4_RCC_APB1ENR 0x40
#define STM32F4_RCC_APB2ENR 0x44
+#define STM32F4_RCC_BDCR 0x70
+#define STM32F4_RCC_CSR 0x74
struct stm32f4_gate_data {
u8 offset;
@@ -40,7 +46,7 @@ struct stm32f4_gate_data {
unsigned long flags;
};
-static const struct stm32f4_gate_data stm32f4_gates[] __initconst = {
+static const struct stm32f4_gate_data stm32f429_gates[] __initconst = {
{ STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
@@ -120,26 +126,113 @@ static const struct stm32f4_gate_data stm32f4_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
-/*
- * MAX_CLKS is the maximum value in the enumeration below plus the combined
- * hweight of stm32f42xx_gate_map (plus one).
- */
-#define MAX_CLKS 74
+static const struct stm32f4_gate_data stm32f469_gates[] __initconst = {
+ { STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 20, "ccmdatam", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
+
+ { STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
+ { STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
+
+ { STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
+ CLK_IGNORE_UNUSED },
+ { STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div",
+ CLK_IGNORE_UNUSED },
+
+ { STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 17, "uart2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 18, "uart3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 19, "uart4", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 20, "uart5", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 21, "i2c1", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 22, "i2c2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 23, "i2c3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 30, "uart7", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 31, "uart8", "apb1_div" },
+
+ { STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 4, "usart1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 5, "usart6", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 11, "sdio", "pll48" },
+ { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
+};
-enum { SYSTICK, FCLK };
+enum { SYSTICK, FCLK, CLK_LSI, CLK_LSE, CLK_HSE_RTC, CLK_RTC, END_PRIMARY_CLK };
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
*/
-static const u64 stm32f42xx_gate_map[] = { 0x000000f17ef417ffull,
- 0x0000000000000001ull,
- 0x04777f33f6fec9ffull };
+#define MAX_GATE_MAP 3
+
+static const u64 stm32f42xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
+ 0x0000000000000001ull,
+ 0x04777f33f6fec9ffull };
+
+static const u64 stm32f46xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
+ 0x0000000000000003ull,
+ 0x0c777f33f6fec9ffull };
+
+static const u64 *stm32f4_gate_map;
+
+static struct clk_hw **clks;
-static struct clk_hw *clks[MAX_CLKS];
static DEFINE_SPINLOCK(stm32f4_clk_lock);
static void __iomem *base;
+static struct regmap *pdrm;
+
/*
* "Multiplier" device for APBx clocks.
*
@@ -256,15 +349,15 @@ static void stm32f4_rcc_register_pll(const char *hse_clk, const char *hsi_clk)
*/
static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
{
- u64 table[ARRAY_SIZE(stm32f42xx_gate_map)];
+ u64 table[MAX_GATE_MAP];
if (primary == 1) {
- if (WARN_ON(secondary > FCLK))
+ if (WARN_ON(secondary >= END_PRIMARY_CLK))
return -EINVAL;
return secondary;
}
- memcpy(table, stm32f42xx_gate_map, sizeof(table));
+ memcpy(table, stm32f4_gate_map, sizeof(table));
/* only bits set in table can be used as indices */
if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
@@ -276,7 +369,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
table[BIT_ULL_WORD(secondary)] &=
GENMASK_ULL(secondary % BITS_PER_LONG_LONG, 0);
- return FCLK + hweight64(table[0]) +
+ return END_PRIMARY_CLK - 1 + hweight64(table[0]) +
(BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) +
(BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0);
}
@@ -292,6 +385,212 @@ stm32f4_rcc_lookup_clk(struct of_phandle_args *clkspec, void *data)
return clks[i];
}
+#define to_rgclk(_rgate) container_of(_rgate, struct stm32_rgate, gate)
+
+static inline void disable_power_domain_write_protection(void)
+{
+ if (pdrm)
+ regmap_update_bits(pdrm, 0x00, (1 << 8), (1 << 8));
+}
+
+static inline void enable_power_domain_write_protection(void)
+{
+ if (pdrm)
+ regmap_update_bits(pdrm, 0x00, (1 << 8), (0 << 8));
+}
+
+static inline void sofware_reset_backup_domain(void)
+{
+ unsigned long val;
+
+ val = readl(base + STM32F4_RCC_BDCR);
+ writel(val | BIT(16), base + STM32F4_RCC_BDCR);
+ writel(val & ~BIT(16), base + STM32F4_RCC_BDCR);
+}
+
+struct stm32_rgate {
+ struct clk_gate gate;
+ u8 bit_rdy_idx;
+};
+
+#define RTC_TIMEOUT 1000000
+
+static int rgclk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_rgate *rgate = to_rgclk(gate);
+ u32 reg;
+ int ret;
+
+ disable_power_domain_write_protection();
+
+ clk_gate_ops.enable(hw);
+
+ ret = readl_relaxed_poll_timeout_atomic(gate->reg, reg,
+ reg & rgate->bit_rdy_idx, 1000, RTC_TIMEOUT);
+
+ enable_power_domain_write_protection();
+ return ret;
+}
+
+static void rgclk_disable(struct clk_hw *hw)
+{
+ clk_gate_ops.disable(hw);
+}
+
+static int rgclk_is_enabled(struct clk_hw *hw)
+{
+ return clk_gate_ops.is_enabled(hw);
+}
+
+static const struct clk_ops rgclk_ops = {
+ .enable = rgclk_enable,
+ .disable = rgclk_disable,
+ .is_enabled = rgclk_is_enabled,
+};
+
+static struct clk_hw *clk_register_rgate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, u8 bit_rdy_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct stm32_rgate *rgate;
+ struct clk_init_data init = { NULL };
+ struct clk_hw *hw;
+ int ret;
+
+ rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
+ if (!rgate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &rgclk_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ rgate->bit_rdy_idx = bit_rdy_idx;
+
+ rgate->gate.lock = lock;
+ rgate->gate.reg = reg;
+ rgate->gate.bit_idx = bit_idx;
+ rgate->gate.hw.init = &init;
+
+ hw = &rgate->gate.hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(rgate);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static int cclk_gate_enable(struct clk_hw *hw)
+{
+ int ret;
+
+ disable_power_domain_write_protection();
+
+ ret = clk_gate_ops.enable(hw);
+
+ enable_power_domain_write_protection();
+
+ return ret;
+}
+
+static void cclk_gate_disable(struct clk_hw *hw)
+{
+ disable_power_domain_write_protection();
+
+ clk_gate_ops.disable(hw);
+
+ enable_power_domain_write_protection();
+}
+
+static int cclk_gate_is_enabled(struct clk_hw *hw)
+{
+ return clk_gate_ops.is_enabled(hw);
+}
+
+static const struct clk_ops cclk_gate_ops = {
+ .enable = cclk_gate_enable,
+ .disable = cclk_gate_disable,
+ .is_enabled = cclk_gate_is_enabled,
+};
+
+static u8 cclk_mux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int cclk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ int ret;
+
+ disable_power_domain_write_protection();
+
+ sofware_reset_backup_domain();
+
+ ret = clk_mux_ops.set_parent(hw, index);
+
+ enable_power_domain_write_protection();
+
+ return ret;
+}
+
+static const struct clk_ops cclk_mux_ops = {
+ .get_parent = cclk_mux_get_parent,
+ .set_parent = cclk_mux_set_parent,
+};
+
+static struct clk_hw *stm32_register_cclk(struct device *dev, const char *name,
+ const char * const *parent_names, int num_parents,
+ void __iomem *reg, u8 bit_idx, u8 shift, unsigned long flags,
+ spinlock_t *lock)
+{
+ struct clk_hw *hw;
+ struct clk_gate *gate;
+ struct clk_mux *mux;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ hw = ERR_PTR(-EINVAL);
+ goto fail;
+ }
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ kfree(gate);
+ hw = ERR_PTR(-EINVAL);
+ goto fail;
+ }
+
+ gate->reg = reg;
+ gate->bit_idx = bit_idx;
+ gate->flags = 0;
+ gate->lock = lock;
+
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = 3;
+ mux->flags = 0;
+
+ hw = clk_hw_register_composite(dev, name, parent_names, num_parents,
+ &mux->hw, &cclk_mux_ops,
+ NULL, NULL,
+ &gate->hw, &cclk_gate_ops,
+ flags);
+
+ if (IS_ERR(hw)) {
+ kfree(gate);
+ kfree(mux);
+ }
+
+fail:
+ return hw;
+}
+
static const char *sys_parents[] __initdata = { "hsi", NULL, "pll" };
static const struct clk_div_table ahb_div_table[] = {
@@ -308,10 +607,46 @@ static const struct clk_div_table apb_div_table[] = {
{ 0 },
};
+static const char *rtc_parents[4] = {
+ "no-clock", "lse", "lsi", "hse-rtc"
+};
+
+struct stm32f4_clk_data {
+ const struct stm32f4_gate_data *gates_data;
+ const u64 *gates_map;
+ int gates_num;
+};
+
+static const struct stm32f4_clk_data stm32f429_clk_data = {
+ .gates_data = stm32f429_gates,
+ .gates_map = stm32f42xx_gate_map,
+ .gates_num = ARRAY_SIZE(stm32f429_gates),
+};
+
+static const struct stm32f4_clk_data stm32f469_clk_data = {
+ .gates_data = stm32f469_gates,
+ .gates_map = stm32f46xx_gate_map,
+ .gates_num = ARRAY_SIZE(stm32f469_gates),
+};
+
+static const struct of_device_id stm32f4_of_match[] = {
+ {
+ .compatible = "st,stm32f42xx-rcc",
+ .data = &stm32f429_clk_data
+ },
+ {
+ .compatible = "st,stm32f469-rcc",
+ .data = &stm32f469_clk_data
+ },
+ {}
+};
+
static void __init stm32f4_rcc_init(struct device_node *np)
{
const char *hse_clk;
int n;
+ const struct of_device_id *match;
+ const struct stm32f4_clk_data *data;
base = of_iomap(np, 0);
if (!base) {
@@ -319,6 +654,25 @@ static void __init stm32f4_rcc_init(struct device_node *np)
return;
}
+ pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(pdrm)) {
+ pdrm = NULL;
+ pr_warn("%s: Unable to get syscfg\n", __func__);
+ }
+
+ match = of_match_node(stm32f4_of_match, np);
+ if (WARN_ON(!match))
+ return;
+
+ data = match->data;
+
+ clks = kmalloc_array(data->gates_num + END_PRIMARY_CLK,
+ sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ goto fail;
+
+ stm32f4_gate_map = data->gates_map;
+
hse_clk = of_clk_get_parent_name(np, 0);
clk_register_fixed_rate_with_accuracy(NULL, "hsi", NULL, 0,
@@ -351,11 +705,15 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clks[FCLK] = clk_hw_register_fixed_factor(NULL, "fclk", "ahb_div",
0, 1, 1);
- for (n = 0; n < ARRAY_SIZE(stm32f4_gates); n++) {
- const struct stm32f4_gate_data *gd = &stm32f4_gates[n];
- unsigned int secondary =
- 8 * (gd->offset - STM32F4_RCC_AHB1ENR) + gd->bit_idx;
- int idx = stm32f4_rcc_lookup_clk_idx(0, secondary);
+ for (n = 0; n < data->gates_num; n++) {
+ const struct stm32f4_gate_data *gd;
+ unsigned int secondary;
+ int idx;
+
+ gd = &data->gates_data[n];
+ secondary = 8 * (gd->offset - STM32F4_RCC_AHB1ENR) +
+ gd->bit_idx;
+ idx = stm32f4_rcc_lookup_clk_idx(0, secondary);
if (idx < 0)
goto fail;
@@ -371,9 +729,44 @@ static void __init stm32f4_rcc_init(struct device_node *np)
}
}
+ clks[CLK_LSI] = clk_register_rgate(NULL, "lsi", "clk-lsi", 0,
+ base + STM32F4_RCC_CSR, 0, 2, 0, &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_LSI])) {
+ pr_err("Unable to register lsi clock\n");
+ goto fail;
+ }
+
+ clks[CLK_LSE] = clk_register_rgate(NULL, "lse", "clk-lse", 0,
+ base + STM32F4_RCC_BDCR, 0, 2, 0, &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_LSE])) {
+ pr_err("Unable to register lse clock\n");
+ goto fail;
+ }
+
+ clks[CLK_HSE_RTC] = clk_hw_register_divider(NULL, "hse-rtc", "clk-hse",
+ 0, base + STM32F4_RCC_CFGR, 16, 5, 0,
+ &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_HSE_RTC])) {
+ pr_err("Unable to register hse-rtc clock\n");
+ goto fail;
+ }
+
+ clks[CLK_RTC] = stm32_register_cclk(NULL, "rtc", rtc_parents, 4,
+ base + STM32F4_RCC_BDCR, 15, 8, 0, &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_RTC])) {
+ pr_err("Unable to register rtc clock\n");
+ goto fail;
+ }
+
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
return;
fail:
+ kfree(clks);
iounmap(base);
}
-CLK_OF_DECLARE(stm32f4_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index f4fdac55727c..0621fbfb4beb 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -243,7 +243,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw)
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
ret);
- return true;
+ return false;
}
return (ret & WM831X_CLKOUT_ENA) != 0;
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 97ae60fa1584..bb8a77a5985f 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -448,12 +448,20 @@ EXPORT_SYMBOL(clk_register_clkdev);
*
* con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev.
+ *
+ * To make things easier for mass registration, we detect error clk_hws
+ * from a previous clk_hw_register_*() call, and return the error code for
+ * those. This is to permit this function to be called immediately
+ * after clk_hw_register_*().
*/
int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
const char *dev_id)
{
struct clk_lookup *cl;
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
/*
* Since dev_id can be NULL, and NULL is handled specially, we must
* pass it as either a NULL format string, or with "%s".
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 3f537a04c6a6..cbed6602172b 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,3 +1,11 @@
+config COMMON_CLK_HI3516CV300
+ tristate "HI3516CV300 Clock Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select RESET_HISI
+ default ARCH_HISI
+ help
+ Build the clock driver for hi3516cv300.
+
config COMMON_CLK_HI3519
tristate "Hi3519 Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
@@ -6,6 +14,14 @@ config COMMON_CLK_HI3519
help
Build the clock driver for hi3519.
+config COMMON_CLK_HI3798CV200
+ tristate "Hi3798CV200 Clock Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select RESET_HISI
+ default ARCH_HISI
+ help
+ Build the clock driver for hi3798cv200.
+
config COMMON_CLK_HI6220
bool "Hi6220 Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
@@ -23,5 +39,6 @@ config RESET_HISI
config STUB_CLK_HI6220
bool "Hi6220 Stub Clock Driver"
depends on COMMON_CLK_HI6220 && MAILBOX
+ default ARCH_HISI
help
Build the Hisilicon Hi6220 stub clock driver.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index e169ec7da023..4eec5e511e4c 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,7 +7,9 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
+obj-$(CONFIG_COMMON_CLK_HI3516CV300) += crg-hi3516cv300.o
obj-$(CONFIG_COMMON_CLK_HI3519) += clk-hi3519.o
+obj-$(CONFIG_COMMON_CLK_HI3798CV200) += crg-hi3798cv200.o
obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
obj-$(CONFIG_RESET_HISI) += reset.o
obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
new file mode 100644
index 000000000000..2007123832bb
--- /dev/null
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -0,0 +1,330 @@
+/*
+ * Hi3516CV300 Clock and Reset Generator Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/clock/hi3516cv300-clock.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk.h"
+#include "crg.h"
+#include "reset.h"
+
+/* hi3516CV300 core CRG */
+#define HI3516CV300_INNER_CLK_OFFSET 64
+#define HI3516CV300_FIXED_3M 65
+#define HI3516CV300_FIXED_6M 66
+#define HI3516CV300_FIXED_24M 67
+#define HI3516CV300_FIXED_49P5 68
+#define HI3516CV300_FIXED_50M 69
+#define HI3516CV300_FIXED_83P3M 70
+#define HI3516CV300_FIXED_99M 71
+#define HI3516CV300_FIXED_100M 72
+#define HI3516CV300_FIXED_148P5M 73
+#define HI3516CV300_FIXED_198M 74
+#define HI3516CV300_FIXED_297M 75
+#define HI3516CV300_UART_MUX 76
+#define HI3516CV300_FMC_MUX 77
+#define HI3516CV300_MMC0_MUX 78
+#define HI3516CV300_MMC1_MUX 79
+#define HI3516CV300_MMC2_MUX 80
+#define HI3516CV300_MMC3_MUX 81
+#define HI3516CV300_PWM_MUX 82
+#define HI3516CV300_CRG_NR_CLKS 128
+
+static const struct hisi_fixed_rate_clock hi3516cv300_fixed_rate_clks[] = {
+ { HI3516CV300_FIXED_3M, "3m", NULL, 0, 3000000, },
+ { HI3516CV300_FIXED_6M, "6m", NULL, 0, 6000000, },
+ { HI3516CV300_FIXED_24M, "24m", NULL, 0, 24000000, },
+ { HI3516CV300_FIXED_49P5, "49.5m", NULL, 0, 49500000, },
+ { HI3516CV300_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HI3516CV300_FIXED_83P3M, "83.3m", NULL, 0, 83300000, },
+ { HI3516CV300_FIXED_99M, "99m", NULL, 0, 99000000, },
+ { HI3516CV300_FIXED_100M, "100m", NULL, 0, 100000000, },
+ { HI3516CV300_FIXED_148P5M, "148.5m", NULL, 0, 148500000, },
+ { HI3516CV300_FIXED_198M, "198m", NULL, 0, 198000000, },
+ { HI3516CV300_FIXED_297M, "297m", NULL, 0, 297000000, },
+ { HI3516CV300_APB_CLK, "apb", NULL, 0, 50000000, },
+};
+
+static const char *const uart_mux_p[] = {"24m", "6m"};
+static const char *const fmc_mux_p[] = {
+ "24m", "83.3m", "148.5m", "198m", "297m"
+};
+static const char *const mmc_mux_p[] = {"49.5m"};
+static const char *const mmc2_mux_p[] = {"99m", "49.5m"};
+static const char *const pwm_mux_p[] = {"3m", "50m", "24m", "24m"};
+
+static u32 uart_mux_table[] = {0, 1};
+static u32 fmc_mux_table[] = {0, 1, 2, 3, 4};
+static u32 mmc_mux_table[] = {0};
+static u32 mmc2_mux_table[] = {0, 2};
+static u32 pwm_mux_table[] = {0, 1, 2, 3};
+
+static const struct hisi_mux_clock hi3516cv300_mux_clks[] = {
+ { HI3516CV300_UART_MUX, "uart_mux", uart_mux_p, ARRAY_SIZE(uart_mux_p),
+ CLK_SET_RATE_PARENT, 0xe4, 19, 1, 0, uart_mux_table, },
+ { HI3516CV300_FMC_MUX, "fmc_mux", fmc_mux_p, ARRAY_SIZE(fmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc0, 2, 3, 0, fmc_mux_table, },
+ { HI3516CV300_MMC0_MUX, "mmc0_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc4, 4, 2, 0, mmc_mux_table, },
+ { HI3516CV300_MMC1_MUX, "mmc1_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc4, 12, 2, 0, mmc_mux_table, },
+ { HI3516CV300_MMC2_MUX, "mmc2_mux", mmc2_mux_p, ARRAY_SIZE(mmc2_mux_p),
+ CLK_SET_RATE_PARENT, 0xc4, 20, 2, 0, mmc2_mux_table, },
+ { HI3516CV300_MMC3_MUX, "mmc3_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc8, 4, 2, 0, mmc_mux_table, },
+ { HI3516CV300_PWM_MUX, "pwm_mux", pwm_mux_p, ARRAY_SIZE(pwm_mux_p),
+ CLK_SET_RATE_PARENT, 0x38, 2, 2, 0, pwm_mux_table, },
+};
+
+static const struct hisi_gate_clock hi3516cv300_gate_clks[] = {
+
+ { HI3516CV300_UART0_CLK, "clk_uart0", "uart_mux", CLK_SET_RATE_PARENT,
+ 0xe4, 15, 0, },
+ { HI3516CV300_UART1_CLK, "clk_uart1", "uart_mux", CLK_SET_RATE_PARENT,
+ 0xe4, 16, 0, },
+ { HI3516CV300_UART2_CLK, "clk_uart2", "uart_mux", CLK_SET_RATE_PARENT,
+ 0xe4, 17, 0, },
+
+ { HI3516CV300_SPI0_CLK, "clk_spi0", "100m", CLK_SET_RATE_PARENT,
+ 0xe4, 13, 0, },
+ { HI3516CV300_SPI1_CLK, "clk_spi1", "100m", CLK_SET_RATE_PARENT,
+ 0xe4, 14, 0, },
+
+ { HI3516CV300_FMC_CLK, "clk_fmc", "fmc_mux", CLK_SET_RATE_PARENT,
+ 0xc0, 1, 0, },
+ { HI3516CV300_MMC0_CLK, "clk_mmc0", "mmc0_mux", CLK_SET_RATE_PARENT,
+ 0xc4, 1, 0, },
+ { HI3516CV300_MMC1_CLK, "clk_mmc1", "mmc1_mux", CLK_SET_RATE_PARENT,
+ 0xc4, 9, 0, },
+ { HI3516CV300_MMC2_CLK, "clk_mmc2", "mmc2_mux", CLK_SET_RATE_PARENT,
+ 0xc4, 17, 0, },
+ { HI3516CV300_MMC3_CLK, "clk_mmc3", "mmc3_mux", CLK_SET_RATE_PARENT,
+ 0xc8, 1, 0, },
+
+ { HI3516CV300_ETH_CLK, "clk_eth", NULL, 0, 0xec, 1, 0, },
+
+ { HI3516CV300_DMAC_CLK, "clk_dmac", NULL, 0, 0xd8, 5, 0, },
+ { HI3516CV300_PWM_CLK, "clk_pwm", "pwm_mux", CLK_SET_RATE_PARENT,
+ 0x38, 1, 0, },
+
+ { HI3516CV300_USB2_BUS_CLK, "clk_usb2_bus", NULL, 0, 0xb8, 0, 0, },
+ { HI3516CV300_USB2_OHCI48M_CLK, "clk_usb2_ohci48m", NULL, 0,
+ 0xb8, 1, 0, },
+ { HI3516CV300_USB2_OHCI12M_CLK, "clk_usb2_ohci12m", NULL, 0,
+ 0xb8, 2, 0, },
+ { HI3516CV300_USB2_OTG_UTMI_CLK, "clk_usb2_otg_utmi", NULL, 0,
+ 0xb8, 3, 0, },
+ { HI3516CV300_USB2_HST_PHY_CLK, "clk_usb2_hst_phy", NULL, 0,
+ 0xb8, 4, 0, },
+ { HI3516CV300_USB2_UTMI0_CLK, "clk_usb2_utmi0", NULL, 0, 0xb8, 5, 0, },
+ { HI3516CV300_USB2_PHY_CLK, "clk_usb2_phy", NULL, 0, 0xb8, 7, 0, },
+};
+
+static struct hisi_clock_data *hi3516cv300_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3516CV300_CRG_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_fixed_rate(hi3516cv300_fixed_rate_clks,
+ ARRAY_SIZE(hi3516cv300_fixed_rate_clks), clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = hisi_clk_register_mux(hi3516cv300_mux_clks,
+ ARRAY_SIZE(hi3516cv300_mux_clks), clk_data);
+ if (ret)
+ goto unregister_fixed_rate;
+
+ ret = hisi_clk_register_gate(hi3516cv300_gate_clks,
+ ARRAY_SIZE(hi3516cv300_gate_clks), clk_data);
+ if (ret)
+ goto unregister_mux;
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_gate;
+
+ return clk_data;
+
+unregister_gate:
+ hisi_clk_unregister_gate(hi3516cv300_gate_clks,
+ ARRAY_SIZE(hi3516cv300_gate_clks), clk_data);
+unregister_mux:
+ hisi_clk_unregister_mux(hi3516cv300_mux_clks,
+ ARRAY_SIZE(hi3516cv300_mux_clks), clk_data);
+unregister_fixed_rate:
+ hisi_clk_unregister_fixed_rate(hi3516cv300_fixed_rate_clks,
+ ARRAY_SIZE(hi3516cv300_fixed_rate_clks), clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3516cv300_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3516cv300_gate_clks,
+ ARRAY_SIZE(hi3516cv300_gate_clks), crg->clk_data);
+ hisi_clk_unregister_mux(hi3516cv300_mux_clks,
+ ARRAY_SIZE(hi3516cv300_mux_clks), crg->clk_data);
+ hisi_clk_unregister_fixed_rate(hi3516cv300_fixed_rate_clks,
+ ARRAY_SIZE(hi3516cv300_fixed_rate_clks), crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3516cv300_crg_funcs = {
+ .register_clks = hi3516cv300_clk_register,
+ .unregister_clks = hi3516cv300_clk_unregister,
+};
+
+/* hi3516CV300 sysctrl CRG */
+#define HI3516CV300_SYSCTRL_NR_CLKS 16
+
+static const char *wdt_mux_p[] __initconst = { "3m", "apb" };
+static u32 wdt_mux_table[] = {0, 1};
+
+static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = {
+ { HI3516CV300_WDT_CLK, "wdt", wdt_mux_p, ARRAY_SIZE(wdt_mux_p),
+ CLK_SET_RATE_PARENT, 0x0, 23, 1, 0, wdt_mux_table, },
+};
+
+static struct hisi_clock_data *hi3516cv300_sysctrl_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3516CV300_SYSCTRL_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_mux(hi3516cv300_sysctrl_mux_clks,
+ ARRAY_SIZE(hi3516cv300_sysctrl_mux_clks), clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_mux;
+
+ return clk_data;
+
+unregister_mux:
+ hisi_clk_unregister_mux(hi3516cv300_sysctrl_mux_clks,
+ ARRAY_SIZE(hi3516cv300_sysctrl_mux_clks), clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3516cv300_sysctrl_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_mux(hi3516cv300_sysctrl_mux_clks,
+ ARRAY_SIZE(hi3516cv300_sysctrl_mux_clks),
+ crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3516cv300_sysctrl_funcs = {
+ .register_clks = hi3516cv300_sysctrl_clk_register,
+ .unregister_clks = hi3516cv300_sysctrl_clk_unregister,
+};
+
+static const struct of_device_id hi3516cv300_crg_match_table[] = {
+ {
+ .compatible = "hisilicon,hi3516cv300-crg",
+ .data = &hi3516cv300_crg_funcs
+ },
+ {
+ .compatible = "hisilicon,hi3516cv300-sysctrl",
+ .data = &hi3516cv300_sysctrl_funcs
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hi3516cv300_crg_match_table);
+
+static int hi3516cv300_crg_probe(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg;
+
+ crg = devm_kmalloc(&pdev->dev, sizeof(*crg), GFP_KERNEL);
+ if (!crg)
+ return -ENOMEM;
+
+ crg->funcs = of_device_get_match_data(&pdev->dev);
+ if (!crg->funcs)
+ return -ENOENT;
+
+ crg->rstc = hisi_reset_init(pdev);
+ if (!crg->rstc)
+ return -ENOMEM;
+
+ crg->clk_data = crg->funcs->register_clks(pdev);
+ if (IS_ERR(crg->clk_data)) {
+ hisi_reset_exit(crg->rstc);
+ return PTR_ERR(crg->clk_data);
+ }
+
+ platform_set_drvdata(pdev, crg);
+ return 0;
+}
+
+static int hi3516cv300_crg_remove(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ hisi_reset_exit(crg->rstc);
+ crg->funcs->unregister_clks(pdev);
+ return 0;
+}
+
+static struct platform_driver hi3516cv300_crg_driver = {
+ .probe = hi3516cv300_crg_probe,
+ .remove = hi3516cv300_crg_remove,
+ .driver = {
+ .name = "hi3516cv300-crg",
+ .of_match_table = hi3516cv300_crg_match_table,
+ },
+};
+
+static int __init hi3516cv300_crg_init(void)
+{
+ return platform_driver_register(&hi3516cv300_crg_driver);
+}
+core_initcall(hi3516cv300_crg_init);
+
+static void __exit hi3516cv300_crg_exit(void)
+{
+ platform_driver_unregister(&hi3516cv300_crg_driver);
+}
+module_exit(hi3516cv300_crg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon Hi3516CV300 CRG Driver");
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
new file mode 100644
index 000000000000..fc8b5bc2d50d
--- /dev/null
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -0,0 +1,337 @@
+/*
+ * Hi3798CV200 Clock and Reset Generator Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/clock/histb-clock.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk.h"
+#include "crg.h"
+#include "reset.h"
+
+/* hi3798CV200 core CRG */
+#define HI3798CV200_INNER_CLK_OFFSET 64
+#define HI3798CV200_FIXED_24M 65
+#define HI3798CV200_FIXED_25M 66
+#define HI3798CV200_FIXED_50M 67
+#define HI3798CV200_FIXED_75M 68
+#define HI3798CV200_FIXED_100M 69
+#define HI3798CV200_FIXED_150M 70
+#define HI3798CV200_FIXED_200M 71
+#define HI3798CV200_FIXED_250M 72
+#define HI3798CV200_FIXED_300M 73
+#define HI3798CV200_FIXED_400M 74
+#define HI3798CV200_MMC_MUX 75
+#define HI3798CV200_ETH_PUB_CLK 76
+#define HI3798CV200_ETH_BUS_CLK 77
+#define HI3798CV200_ETH_BUS0_CLK 78
+#define HI3798CV200_ETH_BUS1_CLK 79
+#define HI3798CV200_COMBPHY1_MUX 80
+
+#define HI3798CV200_CRG_NR_CLKS 128
+
+static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
+ { HISTB_OSC_CLK, "clk_osc", NULL, 0, 24000000, },
+ { HISTB_APB_CLK, "clk_apb", NULL, 0, 100000000, },
+ { HISTB_AHB_CLK, "clk_ahb", NULL, 0, 200000000, },
+ { HI3798CV200_FIXED_24M, "24m", NULL, 0, 24000000, },
+ { HI3798CV200_FIXED_25M, "25m", NULL, 0, 25000000, },
+ { HI3798CV200_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HI3798CV200_FIXED_75M, "75m", NULL, 0, 75000000, },
+ { HI3798CV200_FIXED_100M, "100m", NULL, 0, 100000000, },
+ { HI3798CV200_FIXED_150M, "150m", NULL, 0, 150000000, },
+ { HI3798CV200_FIXED_200M, "200m", NULL, 0, 200000000, },
+ { HI3798CV200_FIXED_250M, "250m", NULL, 0, 250000000, },
+};
+
+static const char *const mmc_mux_p[] = {
+ "100m", "50m", "25m", "200m", "150m" };
+static u32 mmc_mux_table[] = {0, 1, 2, 3, 6};
+
+static const char *const comphy1_mux_p[] = {
+ "100m", "25m"};
+static u32 comphy1_mux_table[] = {2, 3};
+
+static struct hisi_mux_clock hi3798cv200_mux_clks[] = {
+ { HI3798CV200_MMC_MUX, "mmc_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xa0, 8, 3, 0, mmc_mux_table, },
+ { HI3798CV200_COMBPHY1_MUX, "combphy1_mux",
+ comphy1_mux_p, ARRAY_SIZE(comphy1_mux_p),
+ CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy1_mux_table, },
+};
+
+static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
+ /* UART */
+ { HISTB_UART2_CLK, "clk_uart2", "75m",
+ CLK_SET_RATE_PARENT, 0x68, 4, 0, },
+ /* I2C */
+ { HISTB_I2C0_CLK, "clk_i2c0", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 4, 0, },
+ { HISTB_I2C1_CLK, "clk_i2c1", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 8, 0, },
+ { HISTB_I2C2_CLK, "clk_i2c2", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 12, 0, },
+ { HISTB_I2C3_CLK, "clk_i2c3", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 16, 0, },
+ { HISTB_I2C4_CLK, "clk_i2c4", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 20, 0, },
+ /* SPI */
+ { HISTB_SPI0_CLK, "clk_spi0", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x70, 0, 0, },
+ /* SDIO */
+ { HISTB_SDIO0_BIU_CLK, "clk_sdio0_biu", "200m",
+ CLK_SET_RATE_PARENT, 0x9c, 0, 0, },
+ { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "mmc_mux",
+ CLK_SET_RATE_PARENT, 0x9c, 1, 0, },
+ /* EMMC */
+ { HISTB_MMC_BIU_CLK, "clk_mmc_biu", "200m",
+ CLK_SET_RATE_PARENT, 0xa0, 0, 0, },
+ { HISTB_MMC_CIU_CLK, "clk_mmc_ciu", "mmc_mux",
+ CLK_SET_RATE_PARENT, 0xa0, 1, 0, },
+ /* PCIE*/
+ { HISTB_PCIE_BUS_CLK, "clk_pcie_bus", "200m",
+ CLK_SET_RATE_PARENT, 0x18c, 0, 0, },
+ { HISTB_PCIE_SYS_CLK, "clk_pcie_sys", "100m",
+ CLK_SET_RATE_PARENT, 0x18c, 1, 0, },
+ { HISTB_PCIE_PIPE_CLK, "clk_pcie_pipe", "250m",
+ CLK_SET_RATE_PARENT, 0x18c, 2, 0, },
+ { HISTB_PCIE_AUX_CLK, "clk_pcie_aux", "24m",
+ CLK_SET_RATE_PARENT, 0x18c, 3, 0, },
+ /* Ethernet */
+ { HI3798CV200_ETH_PUB_CLK, "clk_pub", NULL,
+ CLK_SET_RATE_PARENT, 0xcc, 5, 0, },
+ { HI3798CV200_ETH_BUS_CLK, "clk_bus", "clk_pub",
+ CLK_SET_RATE_PARENT, 0xcc, 0, 0, },
+ { HI3798CV200_ETH_BUS0_CLK, "clk_bus_m0", "clk_bus",
+ CLK_SET_RATE_PARENT, 0xcc, 1, 0, },
+ { HI3798CV200_ETH_BUS1_CLK, "clk_bus_m1", "clk_bus",
+ CLK_SET_RATE_PARENT, 0xcc, 2, 0, },
+ { HISTB_ETH0_MAC_CLK, "clk_mac0", "clk_bus_m0",
+ CLK_SET_RATE_PARENT, 0xcc, 3, 0, },
+ { HISTB_ETH0_MACIF_CLK, "clk_macif0", "clk_bus_m0",
+ CLK_SET_RATE_PARENT, 0xcc, 24, 0, },
+ { HISTB_ETH1_MAC_CLK, "clk_mac1", "clk_bus_m1",
+ CLK_SET_RATE_PARENT, 0xcc, 4, 0, },
+ { HISTB_ETH1_MACIF_CLK, "clk_macif1", "clk_bus_m1",
+ CLK_SET_RATE_PARENT, 0xcc, 25, 0, },
+ /* COMBPHY1 */
+ { HISTB_COMBPHY1_CLK, "clk_combphy1", "combphy1_mux",
+ CLK_SET_RATE_PARENT, 0x188, 8, 0, },
+};
+
+static struct hisi_clock_data *hi3798cv200_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3798CV200_CRG_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+ clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = hisi_clk_register_mux(hi3798cv200_mux_clks,
+ ARRAY_SIZE(hi3798cv200_mux_clks),
+ clk_data);
+ if (ret)
+ goto unregister_fixed_rate;
+
+ ret = hisi_clk_register_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
+ clk_data);
+ if (ret)
+ goto unregister_mux;
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_gate;
+
+ return clk_data;
+
+unregister_fixed_rate:
+ hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+ clk_data);
+
+unregister_mux:
+ hisi_clk_unregister_mux(hi3798cv200_mux_clks,
+ ARRAY_SIZE(hi3798cv200_mux_clks),
+ clk_data);
+unregister_gate:
+ hisi_clk_unregister_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
+ clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3798cv200_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
+ crg->clk_data);
+ hisi_clk_unregister_mux(hi3798cv200_mux_clks,
+ ARRAY_SIZE(hi3798cv200_mux_clks),
+ crg->clk_data);
+ hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+ crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3798cv200_crg_funcs = {
+ .register_clks = hi3798cv200_clk_register,
+ .unregister_clks = hi3798cv200_clk_unregister,
+};
+
+/* hi3798CV200 sysctrl CRG */
+
+#define HI3798CV200_SYSCTRL_NR_CLKS 16
+
+static const struct hisi_gate_clock hi3798cv200_sysctrl_gate_clks[] = {
+ { HISTB_IR_CLK, "clk_ir", "100m",
+ CLK_SET_RATE_PARENT, 0x48, 4, 0, },
+ { HISTB_TIMER01_CLK, "clk_timer01", "24m",
+ CLK_SET_RATE_PARENT, 0x48, 6, 0, },
+ { HISTB_UART0_CLK, "clk_uart0", "75m",
+ CLK_SET_RATE_PARENT, 0x48, 10, 0, },
+};
+
+static struct hisi_clock_data *hi3798cv200_sysctrl_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3798CV200_SYSCTRL_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_gate(hi3798cv200_sysctrl_gate_clks,
+ ARRAY_SIZE(hi3798cv200_sysctrl_gate_clks),
+ clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_gate;
+
+ return clk_data;
+
+unregister_gate:
+ hisi_clk_unregister_gate(hi3798cv200_sysctrl_gate_clks,
+ ARRAY_SIZE(hi3798cv200_sysctrl_gate_clks),
+ clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3798cv200_sysctrl_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3798cv200_sysctrl_gate_clks,
+ ARRAY_SIZE(hi3798cv200_sysctrl_gate_clks),
+ crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3798cv200_sysctrl_funcs = {
+ .register_clks = hi3798cv200_sysctrl_clk_register,
+ .unregister_clks = hi3798cv200_sysctrl_clk_unregister,
+};
+
+static const struct of_device_id hi3798cv200_crg_match_table[] = {
+ { .compatible = "hisilicon,hi3798cv200-crg",
+ .data = &hi3798cv200_crg_funcs },
+ { .compatible = "hisilicon,hi3798cv200-sysctrl",
+ .data = &hi3798cv200_sysctrl_funcs },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hi3798cv200_crg_match_table);
+
+static int hi3798cv200_crg_probe(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg;
+
+ crg = devm_kmalloc(&pdev->dev, sizeof(*crg), GFP_KERNEL);
+ if (!crg)
+ return -ENOMEM;
+
+ crg->funcs = of_device_get_match_data(&pdev->dev);
+ if (!crg->funcs)
+ return -ENOENT;
+
+ crg->rstc = hisi_reset_init(pdev);
+ if (!crg->rstc)
+ return -ENOMEM;
+
+ crg->clk_data = crg->funcs->register_clks(pdev);
+ if (IS_ERR(crg->clk_data)) {
+ hisi_reset_exit(crg->rstc);
+ return PTR_ERR(crg->clk_data);
+ }
+
+ platform_set_drvdata(pdev, crg);
+ return 0;
+}
+
+static int hi3798cv200_crg_remove(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ hisi_reset_exit(crg->rstc);
+ crg->funcs->unregister_clks(pdev);
+ return 0;
+}
+
+static struct platform_driver hi3798cv200_crg_driver = {
+ .probe = hi3798cv200_crg_probe,
+ .remove = hi3798cv200_crg_remove,
+ .driver = {
+ .name = "hi3798cv200-crg",
+ .of_match_table = hi3798cv200_crg_match_table,
+ },
+};
+
+static int __init hi3798cv200_crg_init(void)
+{
+ return platform_driver_register(&hi3798cv200_crg_driver);
+}
+core_initcall(hi3798cv200_crg_init);
+
+static void __exit hi3798cv200_crg_exit(void)
+{
+ platform_driver_unregister(&hi3798cv200_crg_driver);
+}
+module_exit(hi3798cv200_crg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon Hi3798CV200 CRG Driver");
diff --git a/drivers/clk/hisilicon/crg.h b/drivers/clk/hisilicon/crg.h
new file mode 100644
index 000000000000..e0739717de9a
--- /dev/null
+++ b/drivers/clk/hisilicon/crg.h
@@ -0,0 +1,34 @@
+/*
+ * HiSilicon Clock and Reset Driver Header
+ *
+ * Copyright (c) 2016 HiSilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HISI_CRG_H
+#define __HISI_CRG_H
+
+struct hisi_clock_data;
+struct hisi_reset_controller;
+
+struct hisi_crg_funcs {
+ struct hisi_clock_data* (*register_clks)(struct platform_device *pdev);
+ void (*unregister_clks)(struct platform_device *pdev);
+};
+
+struct hisi_crg_dev {
+ struct hisi_clock_data *clk_data;
+ struct hisi_reset_controller *rstc;
+ const struct hisi_crg_funcs *funcs;
+};
+
+#endif /* __HISI_CRG_H */
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index 6a964144a5b5..cbce308aad04 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <soc/imx/revision.h>
#include <soc/imx/timer.h>
#include <asm/irq.h>
@@ -72,14 +73,8 @@ static struct clk ** const uart_clks[] __initconst = {
NULL
};
-static void __init _mx31_clocks_init(unsigned long fref)
+static void __init _mx31_clocks_init(void __iomem *base, unsigned long fref)
{
- void __iomem *base;
- struct device_node *np;
-
- base = ioremap(MX31_CCM_BASE_ADDR, SZ_4K);
- BUG_ON(!base);
-
clk[dummy] = imx_clk_fixed("dummy", 0);
clk[ckih] = imx_clk_fixed("ckih", fref);
clk[ckil] = imx_clk_fixed("ckil", 32768);
@@ -147,21 +142,17 @@ static void __init _mx31_clocks_init(unsigned long fref)
clk_prepare_enable(clk[iim_gate]);
mx31_revision();
clk_disable_unprepare(clk[iim_gate]);
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx31-ccm");
-
- if (np) {
- clk_data.clks = clk;
- clk_data.clk_num = ARRAY_SIZE(clk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- }
}
-int __init mx31_clocks_init(void)
+int __init mx31_clocks_init(unsigned long fref)
{
- u32 fref = 26000000; /* default */
+ void __iomem *base;
+
+ base = ioremap(MX31_CCM_BASE_ADDR, SZ_4K);
+ if (!base)
+ panic("%s: failed to map registers\n", __func__);
- _mx31_clocks_init(fref);
+ _mx31_clocks_init(base, fref);
clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
@@ -224,22 +215,31 @@ int __init mx31_clocks_init(void)
return 0;
}
-int __init mx31_clocks_init_dt(void)
+static void __init mx31_clocks_init_dt(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *osc_np;
u32 fref = 26000000; /* default */
+ void __iomem *ccm;
- for_each_compatible_node(np, NULL, "fixed-clock") {
- if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+ for_each_compatible_node(osc_np, NULL, "fixed-clock") {
+ if (!of_device_is_compatible(osc_np, "fsl,imx-osc26m"))
continue;
- if (!of_property_read_u32(np, "clock-frequency", &fref)) {
- of_node_put(np);
+ if (!of_property_read_u32(osc_np, "clock-frequency", &fref)) {
+ of_node_put(osc_np);
break;
}
}
- _mx31_clocks_init(fref);
+ ccm = of_iomap(np, 0);
+ if (!ccm)
+ panic("%s: failed to map registers\n", __func__);
- return 0;
+ _mx31_clocks_init(ccm, fref);
+
+ clk_data.clks = clk;
+ clk_data.clk_num = ARRAY_SIZE(clk);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
+
+CLK_OF_DECLARE(imx31_ccm, "fsl,imx31-ccm", mx31_clocks_init_dt);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index ce8ea10407e4..42ffc1c92bab 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -156,10 +156,267 @@ static struct clk ** const uart_clks[] __initconst = {
NULL
};
+static int ldb_di_sel_by_clock_id(int clock_id)
+{
+ switch (clock_id) {
+ case IMX6QDL_CLK_PLL5_VIDEO_DIV:
+ if (clk_on_imx6q() &&
+ imx_get_soc_revision() == IMX_CHIP_REVISION_1_0)
+ return -ENOENT;
+ return 0;
+ case IMX6QDL_CLK_PLL2_PFD0_352M:
+ return 1;
+ case IMX6QDL_CLK_PLL2_PFD2_396M:
+ return 2;
+ case IMX6QDL_CLK_MMDC_CH1_AXI:
+ return 3;
+ case IMX6QDL_CLK_PLL3_USB_OTG:
+ return 4;
+ default:
+ return -ENOENT;
+ }
+}
+
+static void of_assigned_ldb_sels(struct device_node *node,
+ unsigned int *ldb_di0_sel,
+ unsigned int *ldb_di1_sel)
+{
+ struct of_phandle_args clkspec;
+ int index, rc, num_parents;
+ int parent, child, sel;
+
+ num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
+ "#clock-cells");
+ for (index = 0; index < num_parents; index++) {
+ rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
+ "#clock-cells", index, &clkspec);
+ if (rc < 0) {
+ /* skip empty (null) phandles */
+ if (rc == -ENOENT)
+ continue;
+ else
+ return;
+ }
+ if (clkspec.np != node || clkspec.args[0] >= IMX6QDL_CLK_END) {
+ pr_err("ccm: parent clock %d not in ccm\n", index);
+ return;
+ }
+ parent = clkspec.args[0];
+
+ rc = of_parse_phandle_with_args(node, "assigned-clocks",
+ "#clock-cells", index, &clkspec);
+ if (rc < 0)
+ return;
+ if (clkspec.np != node || clkspec.args[0] >= IMX6QDL_CLK_END) {
+ pr_err("ccm: child clock %d not in ccm\n", index);
+ return;
+ }
+ child = clkspec.args[0];
+
+ if (child != IMX6QDL_CLK_LDB_DI0_SEL &&
+ child != IMX6QDL_CLK_LDB_DI1_SEL)
+ continue;
+
+ sel = ldb_di_sel_by_clock_id(parent);
+ if (sel < 0) {
+ pr_err("ccm: invalid ldb_di%d parent clock: %d\n",
+ child == IMX6QDL_CLK_LDB_DI1_SEL, parent);
+ continue;
+ }
+
+ if (child == IMX6QDL_CLK_LDB_DI0_SEL)
+ *ldb_di0_sel = sel;
+ if (child == IMX6QDL_CLK_LDB_DI1_SEL)
+ *ldb_di1_sel = sel;
+ }
+}
+
+#define CCM_CCDR 0x04
+#define CCM_CCSR 0x0c
+#define CCM_CS2CDR 0x2c
+
+#define CCDR_MMDC_CH1_MASK BIT(16)
+#define CCSR_PLL3_SW_CLK_SEL BIT(0)
+
+#define CS2CDR_LDB_DI0_CLK_SEL_SHIFT 9
+#define CS2CDR_LDB_DI1_CLK_SEL_SHIFT 12
+
+static void __init imx6q_mmdc_ch1_mask_handshake(void __iomem *ccm_base)
+{
+ unsigned int reg;
+
+ reg = readl_relaxed(ccm_base + CCM_CCDR);
+ reg |= CCDR_MMDC_CH1_MASK;
+ writel_relaxed(reg, ccm_base + CCM_CCDR);
+}
+
+/*
+ * The only way to disable the MMDC_CH1 clock is to move it to pll3_sw_clk
+ * via periph2_clk2_sel and then to disable pll3_sw_clk by selecting the
+ * bypass clock source, since there is no CG bit for mmdc_ch1.
+ */
+static void mmdc_ch1_disable(void __iomem *ccm_base)
+{
+ unsigned int reg;
+
+ clk_set_parent(clk[IMX6QDL_CLK_PERIPH2_CLK2_SEL],
+ clk[IMX6QDL_CLK_PLL3_USB_OTG]);
+
+ /*
+ * Handshake with mmdc_ch1 module must be masked when changing
+ * periph2_clk_sel.
+ */
+ clk_set_parent(clk[IMX6QDL_CLK_PERIPH2], clk[IMX6QDL_CLK_PERIPH2_CLK2]);
+
+ /* Disable pll3_sw_clk by selecting the bypass clock source */
+ reg = readl_relaxed(ccm_base + CCM_CCSR);
+ reg |= CCSR_PLL3_SW_CLK_SEL;
+ writel_relaxed(reg, ccm_base + CCM_CCSR);
+}
+
+static void mmdc_ch1_reenable(void __iomem *ccm_base)
+{
+ unsigned int reg;
+
+ /* Enable pll3_sw_clk by disabling the bypass */
+ reg = readl_relaxed(ccm_base + CCM_CCSR);
+ reg &= ~CCSR_PLL3_SW_CLK_SEL;
+ writel_relaxed(reg, ccm_base + CCM_CCSR);
+
+ clk_set_parent(clk[IMX6QDL_CLK_PERIPH2], clk[IMX6QDL_CLK_PERIPH2_PRE]);
+}
+
+/*
+ * We have to follow a strict procedure when changing the LDB clock source,
+ * otherwise we risk introducing a glitch that can lock up the LDB divider.
+ * Things to keep in mind:
+ *
+ * 1. The current and new parent clock inputs to the mux must be disabled.
+ * 2. The default clock input for ldb_di0/1_clk_sel is mmdc_ch1_axi, which
+ * has no CG bit.
+ * 3. pll2_pfd2_396m can not be gated if it is used as memory clock.
+ * 4. In the RTL implementation of the LDB_DI_CLK_SEL muxes the top four
+ * options are in one mux and the PLL3 option along with three unused
+ * inputs is in a second mux. There is a third mux with two inputs used
+ * to decide between the first and second 4-port mux:
+ *
+ * pll5_video_div 0 --|\
+ * pll2_pfd0_352m 1 --| |_
+ * pll2_pfd2_396m 2 --| | `-|\
+ * mmdc_ch1_axi 3 --|/ | |
+ * | |--
+ * pll3_usb_otg 4 --|\ | |
+ * 5 --| |_,-|/
+ * 6 --| |
+ * 7 --|/
+ *
+ * The ldb_di0/1_clk_sel[1:0] bits control both 4-port muxes at the same time.
+ * The ldb_di0/1_clk_sel[2] bit controls the 2-port mux. The code below
+ * switches the parent to the bottom mux first and then manipulates the top
+ * mux to ensure that no glitch will enter the divider.
+ */
+static void init_ldb_clks(struct device_node *np, void __iomem *ccm_base)
+{
+ unsigned int reg;
+ unsigned int sel[2][4];
+ int i;
+
+ reg = readl_relaxed(ccm_base + CCM_CS2CDR);
+ sel[0][0] = (reg >> CS2CDR_LDB_DI0_CLK_SEL_SHIFT) & 7;
+ sel[1][0] = (reg >> CS2CDR_LDB_DI1_CLK_SEL_SHIFT) & 7;
+
+ sel[0][3] = sel[0][2] = sel[0][1] = sel[0][0];
+ sel[1][3] = sel[1][2] = sel[1][1] = sel[1][0];
+
+ of_assigned_ldb_sels(np, &sel[0][3], &sel[1][3]);
+
+ for (i = 0; i < 2; i++) {
+ /* Warn if a glitch might have been introduced already */
+ if (sel[i][0] != 3) {
+ pr_warn("ccm: ldb_di%d_sel already changed from reset value: %d\n",
+ i, sel[i][0]);
+ }
+
+ if (sel[i][0] == sel[i][3])
+ continue;
+
+ /* Only switch to or from pll2_pfd2_396m if it is disabled */
+ if ((sel[i][0] == 2 || sel[i][3] == 2) &&
+ (clk_get_parent(clk[IMX6QDL_CLK_PERIPH_PRE]) ==
+ clk[IMX6QDL_CLK_PLL2_PFD2_396M])) {
+ pr_err("ccm: ldb_di%d_sel: couldn't disable pll2_pfd2_396m\n",
+ i);
+ sel[i][3] = sel[i][2] = sel[i][1] = sel[i][0];
+ continue;
+ }
+
+ /* First switch to the bottom mux */
+ sel[i][1] = sel[i][0] | 4;
+
+ /* Then configure the top mux before switching back to it */
+ sel[i][2] = sel[i][3] | 4;
+
+ pr_debug("ccm: switching ldb_di%d_sel: %d->%d->%d->%d\n", i,
+ sel[i][0], sel[i][1], sel[i][2], sel[i][3]);
+ }
+
+ if (sel[0][0] == sel[0][3] && sel[1][0] == sel[1][3])
+ return;
+
+ mmdc_ch1_disable(ccm_base);
+
+ for (i = 1; i < 4; i++) {
+ reg = readl_relaxed(ccm_base + CCM_CS2CDR);
+ reg &= ~((7 << CS2CDR_LDB_DI0_CLK_SEL_SHIFT) |
+ (7 << CS2CDR_LDB_DI1_CLK_SEL_SHIFT));
+ reg |= ((sel[0][i] << CS2CDR_LDB_DI0_CLK_SEL_SHIFT) |
+ (sel[1][i] << CS2CDR_LDB_DI1_CLK_SEL_SHIFT));
+ writel_relaxed(reg, ccm_base + CCM_CS2CDR);
+ }
+
+ mmdc_ch1_reenable(ccm_base);
+}
+
+#define CCM_ANALOG_PLL_VIDEO 0xa0
+#define CCM_ANALOG_PFD_480 0xf0
+#define CCM_ANALOG_PFD_528 0x100
+
+#define PLL_ENABLE BIT(13)
+
+#define PFD0_CLKGATE BIT(7)
+#define PFD1_CLKGATE BIT(15)
+#define PFD2_CLKGATE BIT(23)
+#define PFD3_CLKGATE BIT(31)
+
+static void disable_anatop_clocks(void __iomem *anatop_base)
+{
+ unsigned int reg;
+
+ /* Make sure PLL2 PFDs 0-2 are gated */
+ reg = readl_relaxed(anatop_base + CCM_ANALOG_PFD_528);
+ /* Cannot gate PFD2 if pll2_pfd2_396m is the parent of MMDC clock */
+ if (clk_get_parent(clk[IMX6QDL_CLK_PERIPH_PRE]) ==
+ clk[IMX6QDL_CLK_PLL2_PFD2_396M])
+ reg |= PFD0_CLKGATE | PFD1_CLKGATE;
+ else
+ reg |= PFD0_CLKGATE | PFD1_CLKGATE | PFD2_CLKGATE;
+ writel_relaxed(reg, anatop_base + CCM_ANALOG_PFD_528);
+
+ /* Make sure PLL3 PFDs 0-3 are gated */
+ reg = readl_relaxed(anatop_base + CCM_ANALOG_PFD_480);
+ reg |= PFD0_CLKGATE | PFD1_CLKGATE | PFD2_CLKGATE | PFD3_CLKGATE;
+ writel_relaxed(reg, anatop_base + CCM_ANALOG_PFD_480);
+
+ /* Make sure PLL5 is disabled */
+ reg = readl_relaxed(anatop_base + CCM_ANALOG_PLL_VIDEO);
+ reg &= ~PLL_ENABLE;
+ writel_relaxed(reg, anatop_base + CCM_ANALOG_PLL_VIDEO);
+}
+
static void __init imx6q_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
- void __iomem *base;
+ void __iomem *anatop_base, *base;
int i;
int ret;
@@ -172,7 +429,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_ANACLK2] = imx_obtain_fixed_clock("anaclk2", 0);
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
- base = of_iomap(np, 0);
+ anatop_base = base = of_iomap(np, 0);
WARN_ON(!base);
/* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
@@ -330,8 +587,20 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
clk[IMX6QDL_CLK_IPU2_SEL] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
- clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
+
+ disable_anatop_clocks(anatop_base);
+
+ imx6q_mmdc_ch1_mask_handshake(base);
+
+ /*
+ * The LDB_DI0/1_SEL muxes are registered read-only due to a hardware
+ * bug. Set the muxes to the requested values before registering the
+ * ldb_di_sel clocks.
+ */
+ init_ldb_clks(np, base);
+
+ clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_ldb("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
+ clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_ldb("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
@@ -582,12 +851,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk_register_clkdev(clk[IMX6QDL_CLK_ENET_REF], "enet_ref", NULL);
- if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) ||
- clk_on_imx6dl()) {
- clk_set_parent(clk[IMX6QDL_CLK_LDB_DI0_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
- clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
- }
-
clk_set_rate(clk[IMX6QDL_CLK_PLL3_PFD1_540M], 540000000);
if (clk_on_imx6dl())
clk_set_parent(clk[IMX6QDL_CLK_IPU1_SEL], clk[IMX6QDL_CLK_PLL3_PFD1_540M]);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index d1d7787ce211..75c35fb12b60 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -64,6 +64,10 @@ static const char *perclk_sels[] = { "ipg", "osc", };
static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
static const char *sim_sels[] = { "sim_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+/* epdc_pre_sels, epdc_sels, esai_sels only exists on i.MX6ULL */
+static const char *epdc_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
+static const char *esai_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
+static const char *epdc_sels[] = { "epdc_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static struct clk *clks[IMX6UL_CLK_END];
static struct clk_onecell_data clk_data;
@@ -102,6 +106,17 @@ static u32 share_count_audio;
static u32 share_count_sai1;
static u32 share_count_sai2;
static u32 share_count_sai3;
+static u32 share_count_esai;
+
+static inline int clk_on_imx6ul(void)
+{
+ return of_machine_is_compatible("fsl,imx6ul");
+}
+
+static inline int clk_on_imx6ull(void)
+{
+ return of_machine_is_compatible("fsl,imx6ull");
+}
static void __init imx6ul_clocks_init(struct device_node *ccm_node)
{
@@ -238,12 +253,19 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
+ if (clk_on_imx6ull())
+ clks[IMX6ULL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, esai_sels, ARRAY_SIZE(esai_sels));
clks[IMX6UL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
clks[IMX6UL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels));
clks[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
clks[IMX6UL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
- clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
- clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
+ clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+ } else if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_EPDC_PRE_SEL] = imx_clk_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels));
+ clks[IMX6ULL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
+ }
clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
@@ -276,6 +298,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_SAI3_PODF] = imx_clk_divider("sai3_podf", "sai3_pred", base + 0x28, 16, 6);
clks[IMX6UL_CLK_SAI1_PRED] = imx_clk_divider("sai1_pred", "sai1_sel", base + 0x28, 6, 3);
clks[IMX6UL_CLK_SAI1_PODF] = imx_clk_divider("sai1_podf", "sai1_pred", base + 0x28, 0, 6);
+ if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
+ clks[IMX6ULL_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
+ }
clks[IMX6UL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
clks[IMX6UL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
clks[IMX6UL_CLK_SAI2_PRED] = imx_clk_divider("sai2_pred", "sai2_sel", base + 0x2c, 6, 3);
@@ -298,9 +324,15 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
- clks[IMX6UL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
- clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
- clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
+ clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
+ clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
+ } else if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_DCP_CLK] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10);
+ clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x68, 12);
+ clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x68, 12);
+ }
clks[IMX6UL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
@@ -309,7 +341,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt2_serial", "perclk", base + 0x68, 26);
clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
- clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+ if (clk_on_imx6ul())
+ clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+ else if (clk_on_imx6ull())
+ clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18);
/* CCGR1 */
clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -328,6 +363,11 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
/* CCGR2 */
+ if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x70, 0, &share_count_esai);
+ clks[IMX6ULL_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x70, 0, &share_count_esai);
+ clks[IMX6ULL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x70, 0, &share_count_esai);
+ }
clks[IMX6UL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2);
clks[IMX6UL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
clks[IMX6UL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
@@ -340,8 +380,13 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
/* CCGR3 */
clks[IMX6UL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2);
clks[IMX6UL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
- clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4);
- clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x74, 4);
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4);
+ clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x74, 4);
+ } else if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_EPDC_ACLK] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4);
+ clks[IMX6ULL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
+ }
clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6);
clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6);
clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
@@ -385,8 +430,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
clks[IMX6UL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
clks[IMX6UL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
- clks[IMX6UL_CLK_SIM1] = imx_clk_gate2("sim1", "sim_sel", base + 0x80, 6);
- clks[IMX6UL_CLK_SIM2] = imx_clk_gate2("sim2", "sim_sel", base + 0x80, 8);
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_SIM1] = imx_clk_gate2("sim1", "sim_sel", base + 0x80, 6);
+ clks[IMX6UL_CLK_SIM2] = imx_clk_gate2("sim2", "sim_sel", base + 0x80, 8);
+ }
clks[IMX6UL_CLK_EIM] = imx_clk_gate2("eim", "eim_slow_podf", base + 0x80, 10);
clks[IMX6UL_CLK_PWM8] = imx_clk_gate2("pwm8", "perclk", base + 0x80, 16);
clks[IMX6UL_CLK_UART8_IPG] = imx_clk_gate2("uart8_ipg", "ipg", base + 0x80, 14);
@@ -441,7 +488,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
}
clk_set_parent(clks[IMX6UL_CLK_CAN_SEL], clks[IMX6UL_CLK_PLL3_60M]);
- clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+ if (clk_on_imx6ul())
+ clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+ else if (clk_on_imx6ull())
+ clk_set_parent(clks[IMX6ULL_CLK_EPDC_PRE_SEL], clks[IMX6UL_CLK_PLL3_PFD2]);
clk_set_parent(clks[IMX6UL_CLK_ENFC_SEL], clks[IMX6UL_CLK_PLL2_PFD2]);
}
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 7a6acc3e4a92..ed3a2df536ea 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -234,6 +234,7 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long max_rate = parent_rate * 54;
u32 div;
u32 mfn, mfd = 1000000;
+ u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
if (rate > max_rate)
@@ -241,6 +242,9 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
else if (rate < min_rate)
rate = min_rate;
+ if (parent_rate <= max_mfd)
+ mfd = parent_rate;
+
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
@@ -262,11 +266,15 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long max_rate = parent_rate * 54;
u32 val, div;
u32 mfn, mfd = 1000000;
+ u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
if (rate < min_rate || rate > max_rate)
return -EINVAL;
+ if (parent_rate <= max_mfd)
+ mfd = parent_rate;
+
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 3799ff82a9b4..4afad3b96a61 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -75,6 +75,14 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
+static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parents, int num_parents)
+{
+ return clk_register_mux(NULL, name, parents, num_parents,
+ CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
+ shift, width, CLK_MUX_READ_ONLY, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_fixed_factor(const char *name,
const char *parent, unsigned int mult, unsigned int div)
{
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index a26ba2184454..e7e840fb74ea 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -154,7 +154,7 @@ out:
}
/**
- * _of_clk_init - PLL initialisation via DT
+ * _of_pll_clk_init - PLL initialisation via DT
* @node: device tree node for this clock
* @pllctrl: If true, lower 6 bits of multiplier is in pllm register of
* pll controller, else it is in the control register0(bit 11-6)
@@ -235,7 +235,7 @@ CLK_OF_DECLARE(keystone_pll_clock, "ti,keystone,pll-clock",
of_keystone_pll_clk_init);
/**
- * of_keystone_pll_main_clk_init - Main PLL initialisation DT wrapper
+ * of_keystone_main_pll_clk_init - Main PLL initialisation DT wrapper
* @node: device tree node for this clock
*/
static void __init of_keystone_main_pll_clk_init(struct device_node *node)
@@ -267,25 +267,30 @@ static void __init of_pll_div_clk_init(struct device_node *node)
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
pr_err("%s: missing parent clock\n", __func__);
+ iounmap(reg);
return;
}
if (of_property_read_u32(node, "bit-shift", &shift)) {
pr_err("%s: missing 'shift' property\n", __func__);
+ iounmap(reg);
return;
}
if (of_property_read_u32(node, "bit-mask", &mask)) {
pr_err("%s: missing 'bit-mask' property\n", __func__);
+ iounmap(reg);
return;
}
clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
mask, 0, NULL);
- if (clk)
+ if (clk) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- else
+ } else {
pr_err("%s: error registering divider %s\n", __func__, clk_name);
+ iounmap(reg);
+ }
}
CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index f042bd2a6a99..0bd631a41f6a 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -6,6 +6,49 @@ config COMMON_CLK_MEDIATEK
---help---
Mediatek SoCs' clock support.
+config COMMON_CLK_MT2701
+ bool "Clock driver for Mediatek MT2701"
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ ---help---
+ This driver supports Mediatek MT2701 basic clocks.
+
+config COMMON_CLK_MT2701_MMSYS
+ bool "Clock driver for Mediatek MT2701 mmsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 mmsys clocks.
+
+config COMMON_CLK_MT2701_IMGSYS
+ bool "Clock driver for Mediatek MT2701 imgsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 imgsys clocks.
+
+config COMMON_CLK_MT2701_VDECSYS
+ bool "Clock driver for Mediatek MT2701 vdecsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 vdecsys clocks.
+
+config COMMON_CLK_MT2701_HIFSYS
+ bool "Clock driver for Mediatek MT2701 hifsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 hifsys clocks.
+
+config COMMON_CLK_MT2701_ETHSYS
+ bool "Clock driver for Mediatek MT2701 ethsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 ethsys clocks.
+
+config COMMON_CLK_MT2701_BDPSYS
+ bool "Clock driver for Mediatek MT2701 bdpsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 bdpsys clocks.
+
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 32e7222e7305..19ae7ef79b57 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,4 +1,11 @@
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
+obj-$(CONFIG_COMMON_CLK_MT2701) += clk-mt2701.o
+obj-$(CONFIG_COMMON_CLK_MT2701_BDPSYS) += clk-mt2701-bdp.o
+obj-$(CONFIG_COMMON_CLK_MT2701_ETHSYS) += clk-mt2701-eth.o
+obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
+obj-$(CONFIG_COMMON_CLK_MT2701_IMGSYS) += clk-mt2701-img.o
+obj-$(CONFIG_COMMON_CLK_MT2701_MMSYS) += clk-mt2701-mm.o
+obj-$(CONFIG_COMMON_CLK_MT2701_VDECSYS) += clk-mt2701-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index d8787bf444eb..934bf0e45e26 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -61,6 +61,22 @@ static void mtk_cg_clr_bit(struct clk_hw *hw)
regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
}
+static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 cgbit = BIT(cg->bit);
+
+ regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, cgbit);
+}
+
+static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 cgbit = BIT(cg->bit);
+
+ regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, 0);
+}
+
static int mtk_cg_enable(struct clk_hw *hw)
{
mtk_cg_clr_bit(hw);
@@ -85,6 +101,30 @@ static void mtk_cg_disable_inv(struct clk_hw *hw)
mtk_cg_clr_bit(hw);
}
+static int mtk_cg_enable_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_clr_bit_no_setclr(hw);
+
+ return 0;
+}
+
+static void mtk_cg_disable_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_set_bit_no_setclr(hw);
+}
+
+static int mtk_cg_enable_inv_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_set_bit_no_setclr(hw);
+
+ return 0;
+}
+
+static void mtk_cg_disable_inv_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_clr_bit_no_setclr(hw);
+}
+
const struct clk_ops mtk_clk_gate_ops_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable,
@@ -97,6 +137,18 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
.disable = mtk_cg_disable_inv,
};
+const struct clk_ops mtk_clk_gate_ops_no_setclr = {
+ .is_enabled = mtk_cg_bit_is_cleared,
+ .enable = mtk_cg_enable_no_setclr,
+ .disable = mtk_cg_disable_no_setclr,
+};
+
+const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
+ .is_enabled = mtk_cg_bit_is_set,
+ .enable = mtk_cg_enable_inv_no_setclr,
+ .disable = mtk_cg_disable_inv_no_setclr,
+};
+
struct clk *mtk_clk_register_gate(
const char *name,
const char *parent_name,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index b1821603b887..72ef89b3ad7b 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -36,6 +36,8 @@ static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
+extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
+extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
struct clk *mtk_clk_register_gate(
const char *name,
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
new file mode 100644
index 000000000000..fe4964d05b5f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs bdp0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs bdp1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_BDP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &bdp0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_BDP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &bdp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate bdp_clks[] = {
+ GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
+ GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
+ GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
+ GATE_BDP0(CLK_BDP_WR_VDI_PXL, "wr_vdi_pxl", "hdmi_0_deep340m", 3),
+ GATE_BDP0(CLK_BDP_WR_VDI_DRAM, "wr_vdi_dram", "mm_sel", 4),
+ GATE_BDP0(CLK_BDP_WR_B, "wr_bclk", "mm_sel", 5),
+ GATE_BDP0(CLK_BDP_DGI_IN, "dgi_in", "dpi1_sel", 6),
+ GATE_BDP0(CLK_BDP_DGI_OUT, "dgi_out", "dpi1_sel", 7),
+ GATE_BDP0(CLK_BDP_FMT_MAST_27, "fmt_mast_27", "dpi1_sel", 8),
+ GATE_BDP0(CLK_BDP_FMT_B, "fmt_bclk", "mm_sel", 9),
+ GATE_BDP0(CLK_BDP_OSD_B, "osd_bclk", "mm_sel", 10),
+ GATE_BDP0(CLK_BDP_OSD_DRAM, "osd_dram", "mm_sel", 11),
+ GATE_BDP0(CLK_BDP_OSD_AGENT, "osd_agent", "osd_sel", 12),
+ GATE_BDP0(CLK_BDP_OSD_PXL, "osd_pxl", "dpi1_sel", 13),
+ GATE_BDP0(CLK_BDP_RLE_B, "rle_bclk", "mm_sel", 14),
+ GATE_BDP0(CLK_BDP_RLE_AGENT, "rle_agent", "mm_sel", 15),
+ GATE_BDP0(CLK_BDP_RLE_DRAM, "rle_dram", "mm_sel", 16),
+ GATE_BDP0(CLK_BDP_F27M, "f27m", "di_sel", 17),
+ GATE_BDP0(CLK_BDP_F27M_VDOUT, "f27m_vdout", "di_sel", 18),
+ GATE_BDP0(CLK_BDP_F27_74_74, "f27_74_74", "di_sel", 19),
+ GATE_BDP0(CLK_BDP_F2FS, "f2fs", "di_sel", 20),
+ GATE_BDP0(CLK_BDP_F2FS74_148, "f2fs74_148", "di_sel", 21),
+ GATE_BDP0(CLK_BDP_FB, "fbclk", "mm_sel", 22),
+ GATE_BDP0(CLK_BDP_VDO_DRAM, "vdo_dram", "mm_sel", 23),
+ GATE_BDP0(CLK_BDP_VDO_2FS, "vdo_2fs", "di_sel", 24),
+ GATE_BDP0(CLK_BDP_VDO_B, "vdo_bclk", "mm_sel", 25),
+ GATE_BDP0(CLK_BDP_WR_DI_PXL, "wr_di_pxl", "di_sel", 26),
+ GATE_BDP0(CLK_BDP_WR_DI_DRAM, "wr_di_dram", "mm_sel", 27),
+ GATE_BDP0(CLK_BDP_WR_DI_B, "wr_di_bclk", "mm_sel", 28),
+ GATE_BDP0(CLK_BDP_NR_PXL, "nr_pxl", "nr_sel", 29),
+ GATE_BDP0(CLK_BDP_NR_DRAM, "nr_dram", "mm_sel", 30),
+ GATE_BDP0(CLK_BDP_NR_B, "nr_bclk", "mm_sel", 31),
+ GATE_BDP1(CLK_BDP_RX_F, "rx_fclk", "hadds2_fbclk", 0),
+ GATE_BDP1(CLK_BDP_RX_X, "rx_xclk", "clk26m", 1),
+ GATE_BDP1(CLK_BDP_RXPDT, "rxpdtclk", "hdmi_0_pix340m", 2),
+ GATE_BDP1(CLK_BDP_RX_CSCL_N, "rx_cscl_n", "clk26m", 3),
+ GATE_BDP1(CLK_BDP_RX_CSCL, "rx_cscl", "clk26m", 4),
+ GATE_BDP1(CLK_BDP_RX_DDCSCL_N, "rx_ddcscl_n", "hdmi_scl_rx", 5),
+ GATE_BDP1(CLK_BDP_RX_DDCSCL, "rx_ddcscl", "hdmi_scl_rx", 6),
+ GATE_BDP1(CLK_BDP_RX_VCO, "rx_vcoclk", "hadds2pll_294m", 7),
+ GATE_BDP1(CLK_BDP_RX_DP, "rx_dpclk", "hdmi_0_pll340m", 8),
+ GATE_BDP1(CLK_BDP_RX_P, "rx_pclk", "hdmi_0_pll340m", 9),
+ GATE_BDP1(CLK_BDP_RX_M, "rx_mclk", "hadds2pll_294m", 10),
+ GATE_BDP1(CLK_BDP_RX_PLL, "rx_pllclk", "hdmi_0_pix340m", 11),
+ GATE_BDP1(CLK_BDP_BRG_RT_B, "brg_rt_bclk", "mm_sel", 12),
+ GATE_BDP1(CLK_BDP_BRG_RT_DRAM, "brg_rt_dram", "mm_sel", 13),
+ GATE_BDP1(CLK_BDP_LARBRT_DRAM, "larbrt_dram", "mm_sel", 14),
+ GATE_BDP1(CLK_BDP_TMDS_SYN, "tmds_syn", "hdmi_0_pll340m", 15),
+ GATE_BDP1(CLK_BDP_HDMI_MON, "hdmi_mon", "hdmi_0_pll340m", 16),
+};
+
+static const struct of_device_id of_match_clk_mt2701_bdp[] = {
+ { .compatible = "mediatek,mt2701-bdpsys", },
+ {}
+};
+
+static int clk_mt2701_bdp_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_BDP_NR);
+
+ mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_bdp_drv = {
+ .probe = clk_mt2701_bdp_probe,
+ .driver = {
+ .name = "clk-mt2701-bdp",
+ .of_match_table = of_match_clk_mt2701_bdp,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_bdp_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
new file mode 100644
index 000000000000..877be8715afa
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .sta_ofs = 0x0030,
+};
+
+#define GATE_ETH(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &eth_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate eth_clks[] = {
+ GATE_ETH(CLK_ETHSYS_HSDMA, "hsdma_clk", "ethif_sel", 5),
+ GATE_ETH(CLK_ETHSYS_ESW, "esw_clk", "ethpll_500m_ck", 6),
+ GATE_ETH(CLK_ETHSYS_GP2, "gp2_clk", "trgpll", 7),
+ GATE_ETH(CLK_ETHSYS_GP1, "gp1_clk", "ethpll_500m_ck", 8),
+ GATE_ETH(CLK_ETHSYS_PCM, "pcm_clk", "ethif_sel", 11),
+ GATE_ETH(CLK_ETHSYS_GDMA, "gdma_clk", "ethif_sel", 14),
+ GATE_ETH(CLK_ETHSYS_I2S, "i2s_clk", "ethif_sel", 17),
+ GATE_ETH(CLK_ETHSYS_CRYPTO, "crypto_clk", "ethif_sel", 29),
+};
+
+static const struct of_device_id of_match_clk_mt2701_eth[] = {
+ { .compatible = "mediatek,mt2701-ethsys", },
+ {}
+};
+
+static int clk_mt2701_eth_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETHSYS_NR);
+
+ mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_eth_drv = {
+ .probe = clk_mt2701_eth_probe,
+ .driver = {
+ .name = "clk-mt2701-eth",
+ .of_match_table = of_match_clk_mt2701_eth,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
new file mode 100644
index 000000000000..18f3723be3e8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs hif_cg_regs = {
+ .sta_ofs = 0x0030,
+};
+
+#define GATE_HIF(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &hif_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate hif_clks[] = {
+ GATE_HIF(CLK_HIFSYS_USB0PHY, "usb0_phy_clk", "ethpll_500m_ck", 21),
+ GATE_HIF(CLK_HIFSYS_USB1PHY, "usb1_phy_clk", "ethpll_500m_ck", 22),
+ GATE_HIF(CLK_HIFSYS_PCIE0, "pcie0_clk", "ethpll_500m_ck", 24),
+ GATE_HIF(CLK_HIFSYS_PCIE1, "pcie1_clk", "ethpll_500m_ck", 25),
+ GATE_HIF(CLK_HIFSYS_PCIE2, "pcie2_clk", "ethpll_500m_ck", 26),
+};
+
+static const struct of_device_id of_match_clk_mt2701_hif[] = {
+ { .compatible = "mediatek,mt2701-hifsys", },
+ {}
+};
+
+static int clk_mt2701_hif_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_HIFSYS_NR);
+
+ mtk_clk_register_gates(node, hif_clks, ARRAY_SIZE(hif_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+ return r;
+ }
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt2701_hif_drv = {
+ .probe = clk_mt2701_hif_probe,
+ .driver = {
+ .name = "clk-mt2701-hif",
+ .of_match_table = of_match_clk_mt2701_hif,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_hif_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
new file mode 100644
index 000000000000..b7441c98bda8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x0004,
+ .clr_ofs = 0x0008,
+ .sta_ofs = 0x0000,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &img_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
+ GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
+ GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
+ GATE_IMG(CLK_IMG_JPGDEC, "img_jpgdec", "mm_sel", 6),
+ GATE_IMG(CLK_IMG_VENC_LT, "img_venc_lt", "mm_sel", 8),
+ GATE_IMG(CLK_IMG_VENC, "img_venc", "mm_sel", 9),
+};
+
+static const struct of_device_id of_match_clk_mt2701_img[] = {
+ { .compatible = "mediatek,mt2701-imgsys", },
+ {}
+};
+
+static int clk_mt2701_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_img_drv = {
+ .probe = clk_mt2701_img_probe,
+ .driver = {
+ .name = "clk-mt2701-img",
+ .of_match_table = of_match_clk_mt2701_img,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
new file mode 100644
index 000000000000..fe1f85072fc5
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs disp0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs disp1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_DISP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &disp0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_DISP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &disp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
+ GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
+ GATE_DISP0(CLK_MM_MUTEX, "mm_mutex", "mm_sel", 3),
+ GATE_DISP0(CLK_MM_DISP_COLOR, "mm_disp_color", "mm_sel", 4),
+ GATE_DISP0(CLK_MM_DISP_BLS, "mm_disp_bls", "mm_sel", 5),
+ GATE_DISP0(CLK_MM_DISP_WDMA, "mm_disp_wdma", "mm_sel", 6),
+ GATE_DISP0(CLK_MM_DISP_RDMA, "mm_disp_rdma", "mm_sel", 7),
+ GATE_DISP0(CLK_MM_DISP_OVL, "mm_disp_ovl", "mm_sel", 8),
+ GATE_DISP0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 9),
+ GATE_DISP0(CLK_MM_MDP_WROT, "mm_mdp_wrot", "mm_sel", 10),
+ GATE_DISP0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_DISP0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 12),
+ GATE_DISP0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 13),
+ GATE_DISP0(CLK_MM_MDP_RDMA, "mm_mdp_rdma", "mm_sel", 14),
+ GATE_DISP0(CLK_MM_MDP_BLS_26M, "mm_mdp_bls_26m", "pwm_sel", 15),
+ GATE_DISP0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 16),
+ GATE_DISP0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 17),
+ GATE_DISP0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 18),
+ GATE_DISP0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_DISP0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 20),
+ GATE_DISP1(CLK_MM_DSI_ENGINE, "mm_dsi_eng", "mm_sel", 0),
+ GATE_DISP1(CLK_MM_DSI_DIG, "mm_dsi_dig", "dsi0_lntc_dsi", 1),
+ GATE_DISP1(CLK_MM_DPI_DIGL, "mm_dpi_digl", "dpi0_sel", 2),
+ GATE_DISP1(CLK_MM_DPI_ENGINE, "mm_dpi_eng", "mm_sel", 3),
+ GATE_DISP1(CLK_MM_DPI1_DIGL, "mm_dpi1_digl", "dpi1_sel", 4),
+ GATE_DISP1(CLK_MM_DPI1_ENGINE, "mm_dpi1_eng", "mm_sel", 5),
+ GATE_DISP1(CLK_MM_TVE_OUTPUT, "mm_tve_output", "tve_sel", 6),
+ GATE_DISP1(CLK_MM_TVE_INPUT, "mm_tve_input", "dpi0_sel", 7),
+ GATE_DISP1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi1_sel", 8),
+ GATE_DISP1(CLK_MM_HDMI_PLL, "mm_hdmi_pll", "hdmi_sel", 9),
+ GATE_DISP1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll_sel", 10),
+ GATE_DISP1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll_sel", 11),
+ GATE_DISP1(CLK_MM_TVE_FMM, "mm_tve_fmm", "mm_sel", 14),
+};
+
+static const struct of_device_id of_match_clk_mt2701_mm[] = {
+ { .compatible = "mediatek,mt2701-mmsys", },
+ {}
+};
+
+static int clk_mt2701_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_mm_drv = {
+ .probe = clk_mt2701_mm_probe,
+ .driver = {
+ .name = "clk-mt2701-mm",
+ .of_match_table = of_match_clk_mt2701_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
new file mode 100644
index 000000000000..d3c0fc9d6f02
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x0000,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x0008,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate vdec_clks[] = {
+ GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
+ GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
+};
+
+static const struct of_device_id of_match_clk_mt2701_vdec[] = {
+ { .compatible = "mediatek,mt2701-vdecsys", },
+ {}
+};
+
+static int clk_mt2701_vdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_vdec_drv = {
+ .probe = clk_mt2701_vdec_probe,
+ .driver = {
+ .name = "clk-mt2701-vdec",
+ .of_match_table = of_match_clk_mt2701_vdec,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
new file mode 100644
index 000000000000..6f26e6a37a6b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -0,0 +1,1035 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+/*
+ * For some clocks, we don't care what their actual rates are. And these
+ * clocks may change their rate on different products or different scenarios.
+ * So we model these clocks' rate as 0, to denote it's not an actual rate.
+ */
+#define DUMMY_RATE 0
+
+static DEFINE_SPINLOCK(mt2701_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_DPI, "dpi_ck", "clk26m",
+ 108 * MHZ),
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", "clk26m",
+ 400 * MHZ),
+ FIXED_CLK(CLK_TOP_VENCPLL, "vencpll_ck", "clk26m",
+ 295750000),
+ FIXED_CLK(CLK_TOP_HDMI_0_PIX340M, "hdmi_0_pix340m", "clk26m",
+ 340 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMI_0_DEEP340M, "hdmi_0_deep340m", "clk26m",
+ 340 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMI_0_PLL340M, "hdmi_0_pll340m", "clk26m",
+ 340 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMITX_CLKDIG_CTS, "hdmitx_dig_cts", "clk26m",
+ 300 * MHZ),
+ FIXED_CLK(CLK_TOP_HADDS2_FB, "hadds2_fbclk", "clk26m",
+ 27 * MHZ),
+ FIXED_CLK(CLK_TOP_WBG_DIG_416M, "wbg_dig_ck_416m", "clk26m",
+ 416 * MHZ),
+ FIXED_CLK(CLK_TOP_DSI0_LNTC_DSI, "dsi0_lntc_dsi", "clk26m",
+ 143 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMI_SCL_RX, "hdmi_scl_rx", "clk26m",
+ 27 * MHZ),
+ FIXED_CLK(CLK_TOP_AUD_EXT1, "aud_ext1", "clk26m",
+ DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_AUD_EXT2, "aud_ext2", "clk26m",
+ DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_NFI1X_PAD, "nfi1x_pad", "clk26m",
+ DUMMY_RATE),
+};
+
+static const struct mtk_fixed_factor top_fixed_divs[] = {
+ FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "syspll_d3", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1, 4),
+
+ FACTOR(CLK_TOP_UNIVPLL, "univpll_ck", "univpll", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univpll", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_D108, "univpll_d108", "univpll", 1, 108),
+ FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1, 8),
+ FACTOR(CLK_TOP_8BDAC, "8bdac_ck", "univpll_d2", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL2_D16, "univpll2_d16", "univpll_d3", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL2_D32, "univpll2_d32", "univpll_d3", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1, 8),
+
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, 8),
+
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "dmpll_ck", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "dmpll_ck", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_X2, "dmpll_x2", "dmpll_ck", 1, 1),
+
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4),
+
+ FACTOR(CLK_TOP_VDECPLL, "vdecpll_ck", "vdecpll", 1, 1),
+ FACTOR(CLK_TOP_TVD2PLL, "tvd2pll_ck", "tvd2pll", 1, 1),
+ FACTOR(CLK_TOP_TVD2PLL_D2, "tvd2pll_d2", "tvd2pll", 1, 2),
+
+ FACTOR(CLK_TOP_MIPIPLL, "mipipll", "dpi_ck", 1, 1),
+ FACTOR(CLK_TOP_MIPIPLL_D2, "mipipll_d2", "dpi_ck", 1, 2),
+ FACTOR(CLK_TOP_MIPIPLL_D4, "mipipll_d4", "dpi_ck", 1, 4),
+
+ FACTOR(CLK_TOP_HDMIPLL, "hdmipll_ck", "hdmitx_dig_cts", 1, 1),
+ FACTOR(CLK_TOP_HDMIPLL_D2, "hdmipll_d2", "hdmitx_dig_cts", 1, 2),
+ FACTOR(CLK_TOP_HDMIPLL_D3, "hdmipll_d3", "hdmitx_dig_cts", 1, 3),
+
+ FACTOR(CLK_TOP_ARMPLL_1P3G, "armpll_1p3g_ck", "armpll", 1, 1),
+
+ FACTOR(CLK_TOP_AUDPLL, "audpll", "audpll_sel", 1, 1),
+ FACTOR(CLK_TOP_AUDPLL_D4, "audpll_d4", "audpll_sel", 1, 4),
+ FACTOR(CLK_TOP_AUDPLL_D8, "audpll_d8", "audpll_sel", 1, 8),
+ FACTOR(CLK_TOP_AUDPLL_D16, "audpll_d16", "audpll_sel", 1, 16),
+ FACTOR(CLK_TOP_AUDPLL_D24, "audpll_d24", "audpll_sel", 1, 24),
+
+ FACTOR(CLK_TOP_AUD1PLL_98M, "aud1pll_98m_ck", "aud1pll", 1, 3),
+ FACTOR(CLK_TOP_AUD2PLL_90M, "aud2pll_90m_ck", "aud2pll", 1, 3),
+ FACTOR(CLK_TOP_HADDS2PLL_98M, "hadds2pll_98m", "hadds2pll", 1, 3),
+ FACTOR(CLK_TOP_HADDS2PLL_294M, "hadds2pll_294m", "hadds2pll", 1, 1),
+ FACTOR(CLK_TOP_ETHPLL_500M, "ethpll_500m_ck", "ethpll", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8),
+ FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793),
+ FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "mmpll_d2",
+ "dmpll_d2"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "syspll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_ck"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4",
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "vdecpll_ck",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "vencpll_ck",
+ "msdcpll_d2",
+ "mmpll_d2"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_x2_ck",
+ "msdcpll_ck",
+ "clk26m",
+ "syspll_d3",
+ "univpll_d3",
+ "univpll1_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "msdcpll_d2",
+ "mmpll_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const usb20_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "msdcpll_d2",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll2_d4"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll3_d2",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll2_d4",
+ "syspll4_d2",
+ "syspll3_d4",
+ "syspll2_d8",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "mipipll",
+ "mipipll_d2",
+ "mipipll_d4",
+ "clk26m",
+ "tvdpll_ck",
+ "tvdpll_d2",
+ "tvdpll_d4"
+};
+
+static const char * const dpi1_parents[] = {
+ "clk26m",
+ "tvdpll_ck",
+ "tvdpll_d2",
+ "tvdpll_d4"
+};
+
+static const char * const tve_parents[] = {
+ "clk26m",
+ "mipipll",
+ "mipipll_d2",
+ "mipipll_d4",
+ "clk26m",
+ "tvdpll_ck",
+ "tvdpll_d2",
+ "tvdpll_d4"
+};
+
+static const char * const hdmi_parents[] = {
+ "clk26m",
+ "hdmipll_ck",
+ "hdmipll_d2",
+ "hdmipll_d3"
+};
+
+static const char * const apll_parents[] = {
+ "clk26m",
+ "audpll",
+ "audpll_d4",
+ "audpll_d8",
+ "audpll_d16",
+ "audpll_d24",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const rtc_parents[] = {
+ "32k_internal",
+ "32k_external",
+ "clk26m",
+ "univpll3_d8"
+};
+
+static const char * const nfi2x_parents[] = {
+ "clk26m",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll3_d2",
+ "syspll2_d4",
+ "univpll3_d4",
+ "syspll4_d4",
+ "clk26m"
+};
+
+static const char * const emmc_hclk_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll1_d4",
+ "syspll2_d2"
+};
+
+static const char * const flash_parents[] = {
+ "clk26m_d8",
+ "clk26m",
+ "syspll2_d8",
+ "syspll3_d4",
+ "univpll3_d4",
+ "syspll4_d2",
+ "syspll2_d4",
+ "univpll2_d4"
+};
+
+static const char * const di_parents[] = {
+ "clk26m",
+ "tvd2pll_ck",
+ "tvd2pll_d2",
+ "clk26m"
+};
+
+static const char * const nr_osd_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "syspll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_ck"
+};
+
+static const char * const hdmirx_bist_parents[] = {
+ "clk26m",
+ "syspll_d3",
+ "clk26m",
+ "syspll1_d16",
+ "syspll4_d2",
+ "syspll1_d4",
+ "vencpll_ck",
+ "clk26m"
+};
+
+static const char * const intdir_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll_d2",
+ "univpll_d2"
+};
+
+static const char * const asm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const ms_card_parents[] = {
+ "clk26m",
+ "univpll3_d8",
+ "syspll4_d4"
+};
+
+static const char * const ethif_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "dmpll_ck",
+ "dmpll_d2"
+};
+
+static const char * const hdmirx_parents[] = {
+ "clk26m",
+ "univpll_d52"
+};
+
+static const char * const cmsys_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll1_d2",
+ "univpll_d5",
+ "syspll_d5",
+ "syspll2_d2",
+ "syspll1_d4",
+ "syspll3_d2",
+ "syspll2_d4",
+ "syspll1_d8",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const clk_8bdac_parents[] = {
+ "32k_internal",
+ "8bdac_ck",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const aud2dvd_parents[] = {
+ "a1sys_hp_ck",
+ "a2sys_hp_ck"
+};
+
+static const char * const padmclk_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll_d52",
+ "univpll_d108",
+ "univpll2_d8",
+ "univpll2_d16",
+ "univpll2_d32"
+};
+
+static const char * const aud_mux_parents[] = {
+ "clk26m",
+ "aud1pll_98m_ck",
+ "aud2pll_90m_ck",
+ "hadds2pll_98m",
+ "audio_ext1_ck",
+ "audio_ext2_ck"
+};
+
+static const char * const aud_src_parents[] = {
+ "aud_mux1_sel",
+ "aud_mux2_sel"
+};
+
+static const char * const cpu_parents[] = {
+ "clk26m",
+ "armpll",
+ "mainpll",
+ "mmpll"
+};
+
+static const struct mtk_composite top_muxes[] = {
+ MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x0040, 0, 3, 7, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x0040, 8, 1, 15, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel",
+ ddrphycfg_parents, 0x0040, 16, 1, 23, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents,
+ 0x0040, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x0050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents,
+ 0x0050, 8, 4, 15),
+ MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents,
+ 0x0050, 16, 3, 23),
+ MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents,
+ 0x0050, 24, 3, 31),
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ 0x0060, 0, 1, 7),
+
+ MUX_GATE(CLK_TOP_SPI0_SEL, "spi0_sel", spi_parents,
+ 0x0060, 8, 3, 15),
+ MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents,
+ 0x0060, 16, 2, 23),
+ MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_parents,
+ 0x0060, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_parents,
+ 0x0070, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_parents,
+ 0x0070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", msdc30_parents,
+ 0x0070, 16, 1, 23),
+ MUX_GATE(CLK_TOP_AUDINTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x0070, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x0080, 0, 4, 7),
+ MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents,
+ 0x0080, 8, 2, 15),
+ MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents,
+ 0x0080, 16, 3, 23),
+ MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents,
+ 0x0080, 24, 2, 31),
+
+ MUX_GATE(CLK_TOP_TVE_SEL, "tve_sel", tve_parents,
+ 0x0090, 0, 3, 7),
+ MUX_GATE(CLK_TOP_HDMI_SEL, "hdmi_sel", hdmi_parents,
+ 0x0090, 8, 2, 15),
+ MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents,
+ 0x0090, 16, 3, 23),
+
+ MUX_GATE_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents,
+ 0x00A0, 0, 2, 7, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_NFI2X_SEL, "nfi2x_sel", nfi2x_parents,
+ 0x00A0, 8, 3, 15),
+ MUX_GATE(CLK_TOP_EMMC_HCLK_SEL, "emmc_hclk_sel", emmc_hclk_parents,
+ 0x00A0, 24, 2, 31),
+
+ MUX_GATE(CLK_TOP_FLASH_SEL, "flash_sel", flash_parents,
+ 0x00B0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_DI_SEL, "di_sel", di_parents,
+ 0x00B0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_NR_SEL, "nr_sel", nr_osd_parents,
+ 0x00B0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_OSD_SEL, "osd_sel", nr_osd_parents,
+ 0x00B0, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_HDMIRX_BIST_SEL, "hdmirx_bist_sel",
+ hdmirx_bist_parents, 0x00C0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_INTDIR_SEL, "intdir_sel", intdir_parents,
+ 0x00C0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_ASM_I_SEL, "asm_i_sel", asm_parents,
+ 0x00C0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel", asm_parents,
+ 0x00C0, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel", asm_parents,
+ 0x00D0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MS_CARD_SEL, "ms_card_sel", ms_card_parents,
+ 0x00D0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_ETHIF_SEL, "ethif_sel", ethif_parents,
+ 0x00D0, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_HDMIRX26_24_SEL, "hdmirx26_24_sel", hdmirx_parents,
+ 0x00E0, 0, 1, 7),
+ MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_parents,
+ 0x00E0, 8, 3, 15),
+ MUX_GATE(CLK_TOP_CMSYS_SEL, "cmsys_sel", cmsys_parents,
+ 0x00E0, 16, 4, 23),
+
+ MUX_GATE(CLK_TOP_SPI1_SEL, "spi2_sel", spi_parents,
+ 0x00E0, 24, 3, 31),
+ MUX_GATE(CLK_TOP_SPI2_SEL, "spi1_sel", spi_parents,
+ 0x00F0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_8BDAC_SEL, "8bdac_sel", clk_8bdac_parents,
+ 0x00F0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_AUD2DVD_SEL, "aud2dvd_sel", aud2dvd_parents,
+ 0x00F0, 16, 1, 23),
+
+ MUX(CLK_TOP_PADMCLK_SEL, "padmclk_sel", padmclk_parents,
+ 0x0100, 0, 3),
+
+ MUX(CLK_TOP_AUD_MUX1_SEL, "aud_mux1_sel", aud_mux_parents,
+ 0x012c, 0, 3),
+ MUX(CLK_TOP_AUD_MUX2_SEL, "aud_mux2_sel", aud_mux_parents,
+ 0x012c, 3, 3),
+ MUX(CLK_TOP_AUDPLL_MUX_SEL, "audpll_sel", aud_mux_parents,
+ 0x012c, 6, 3),
+ MUX_GATE(CLK_TOP_AUD_K1_SRC_SEL, "aud_k1_src_sel", aud_src_parents,
+ 0x012c, 15, 1, 23),
+ MUX_GATE(CLK_TOP_AUD_K2_SRC_SEL, "aud_k2_src_sel", aud_src_parents,
+ 0x012c, 16, 1, 24),
+ MUX_GATE(CLK_TOP_AUD_K3_SRC_SEL, "aud_k3_src_sel", aud_src_parents,
+ 0x012c, 17, 1, 25),
+ MUX_GATE(CLK_TOP_AUD_K4_SRC_SEL, "aud_k4_src_sel", aud_src_parents,
+ 0x012c, 18, 1, 26),
+ MUX_GATE(CLK_TOP_AUD_K5_SRC_SEL, "aud_k5_src_sel", aud_src_parents,
+ 0x012c, 19, 1, 27),
+ MUX_GATE(CLK_TOP_AUD_K6_SRC_SEL, "aud_k6_src_sel", aud_src_parents,
+ 0x012c, 20, 1, 28),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_AUD_EXTCK1_DIV, "audio_ext1_ck", "aud_ext1",
+ 0x0120, 0, 8),
+ DIV_ADJ(CLK_TOP_AUD_EXTCK2_DIV, "audio_ext2_ck", "aud_ext2",
+ 0x0120, 8, 8),
+ DIV_ADJ(CLK_TOP_AUD_MUX1_DIV, "aud_mux1_div", "aud_mux1_sel",
+ 0x0120, 16, 8),
+ DIV_ADJ(CLK_TOP_AUD_MUX2_DIV, "aud_mux2_div", "aud_mux2_sel",
+ 0x0120, 24, 8),
+ DIV_ADJ(CLK_TOP_AUD_K1_SRC_DIV, "aud_k1_src_div", "aud_k1_src_sel",
+ 0x0124, 0, 8),
+ DIV_ADJ(CLK_TOP_AUD_K2_SRC_DIV, "aud_k2_src_div", "aud_k2_src_sel",
+ 0x0124, 8, 8),
+ DIV_ADJ(CLK_TOP_AUD_K3_SRC_DIV, "aud_k3_src_div", "aud_k3_src_sel",
+ 0x0124, 16, 8),
+ DIV_ADJ(CLK_TOP_AUD_K4_SRC_DIV, "aud_k4_src_div", "aud_k4_src_sel",
+ 0x0124, 24, 8),
+ DIV_ADJ(CLK_TOP_AUD_K5_SRC_DIV, "aud_k5_src_div", "aud_k5_src_sel",
+ 0x0128, 0, 8),
+ DIV_ADJ(CLK_TOP_AUD_K6_SRC_DIV, "aud_k6_src_div", "aud_k6_src_sel",
+ 0x0128, 8, 8),
+};
+
+static const struct mtk_gate_regs top_aud_cg_regs = {
+ .sta_ofs = 0x012C,
+};
+
+#define GATE_TOP_AUD(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top_aud_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] = {
+ GATE_TOP_AUD(CLK_TOP_AUD_48K_TIMING, "a1sys_hp_ck", "aud_mux1_div",
+ 21),
+ GATE_TOP_AUD(CLK_TOP_AUD_44K_TIMING, "a2sys_hp_ck", "aud_mux2_div",
+ 22),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S1_MCLK, "aud_i2s1_mclk", "aud_k1_src_div",
+ 23),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S2_MCLK, "aud_i2s2_mclk", "aud_k2_src_div",
+ 24),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S3_MCLK, "aud_i2s3_mclk", "aud_k3_src_div",
+ 25),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S4_MCLK, "aud_i2s4_mclk", "aud_k4_src_div",
+ 26),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S5_MCLK, "aud_i2s5_mclk", "aud_k5_src_div",
+ 27),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S6_MCLK, "aud_i2s6_mclk", "aud_k6_src_div",
+ 28),
+};
+
+static int mtk_topckgen_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+
+ mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ clk_data);
+
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+ base, &mt2701_clk_lock, clk_data);
+
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt2701_clk_lock, clk_data);
+
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x0040,
+ .clr_ofs = 0x0044,
+ .sta_ofs = 0x0048,
+};
+
+#define GATE_ICG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_ICG(CLK_INFRA_DBG, "dbgclk", "axi_sel", 0),
+ GATE_ICG(CLK_INFRA_SMI, "smi_ck", "mm_sel", 1),
+ GATE_ICG(CLK_INFRA_QAXI_CM4, "cm4_ck", "axi_sel", 2),
+ GATE_ICG(CLK_INFRA_AUD_SPLIN_B, "audio_splin_bck", "hadds2pll_294m", 4),
+ GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "clk26m", 5),
+ GATE_ICG(CLK_INFRA_EFUSE, "efuse_ck", "clk26m", 6),
+ GATE_ICG(CLK_INFRA_L2C_SRAM, "l2c_sram_ck", "mm_sel", 7),
+ GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
+ GATE_ICG(CLK_INFRA_CONNMCU, "connsys_bus", "wbg_dig_ck_416m", 12),
+ GATE_ICG(CLK_INFRA_TRNG, "trng_ck", "axi_sel", 13),
+ GATE_ICG(CLK_INFRA_RAMBUFIF, "rambufif_ck", "mem_sel", 14),
+ GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "mem_sel", 15),
+ GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16),
+ GATE_ICG(CLK_INFRA_CEC, "cec_ck", "rtc_sel", 18),
+ GATE_ICG(CLK_INFRA_IRRX, "irrx_ck", "axi_sel", 19),
+ GATE_ICG(CLK_INFRA_PMICSPI, "pmicspi_ck", "pmicspi_sel", 22),
+ GATE_ICG(CLK_INFRA_PMICWRAP, "pmicwrap_ck", "axi_sel", 23),
+ GATE_ICG(CLK_INFRA_DDCCI, "ddcci_ck", "axi_sel", 24),
+};
+
+static const struct mtk_fixed_factor infra_fixed_divs[] = {
+ FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
+};
+
+static struct clk_onecell_data *infra_clk_data;
+
+static void mtk_infrasys_init_early(struct device_node *node)
+{
+ int r, i;
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
+ infra_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE_DRIVER(mtk_infra, "mediatek,mt2701-infracfg",
+ mtk_infrasys_init_early);
+
+static int mtk_infrasys_init(struct platform_device *pdev)
+{
+ int r, i;
+ struct device_node *node = pdev->dev.of_node;
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
+ infra_clk_data->clks[i] = ERR_PTR(-ENOENT);
+ }
+ }
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ infra_clk_data);
+ mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
+ infra_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ if (r)
+ return r;
+
+ mtk_register_reset_controller(node, 2, 0x30);
+
+ return 0;
+}
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x0010,
+ .sta_ofs = 0x0018,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0x000c,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x001c,
+};
+
+#define GATE_PERI0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate peri_clks[] = {
+ GATE_PERI0(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 31),
+ GATE_PERI0(CLK_PERI_ETH, "eth_ck", "clk26m", 30),
+ GATE_PERI0(CLK_PERI_SPI0, "spi0_ck", "spi0_sel", 29),
+ GATE_PERI0(CLK_PERI_AUXADC, "auxadc_ck", "clk26m", 28),
+ GATE_PERI0(CLK_PERI_I2C3, "i2c3_ck", "clk26m", 27),
+ GATE_PERI0(CLK_PERI_I2C2, "i2c2_ck", "axi_sel", 26),
+ GATE_PERI0(CLK_PERI_I2C1, "i2c1_ck", "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_I2C0, "i2c0_ck", "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_BTIF, "bitif_ck", "axi_sel", 23),
+ GATE_PERI0(CLK_PERI_UART3, "uart3_ck", "axi_sel", 22),
+ GATE_PERI0(CLK_PERI_UART2, "uart2_ck", "axi_sel", 21),
+ GATE_PERI0(CLK_PERI_UART1, "uart1_ck", "axi_sel", 20),
+ GATE_PERI0(CLK_PERI_UART0, "uart0_ck", "axi_sel", 19),
+ GATE_PERI0(CLK_PERI_NLI, "nli_ck", "axi_sel", 18),
+ GATE_PERI0(CLK_PERI_MSDC50_3, "msdc50_3_ck", "emmc_hclk_sel", 17),
+ GATE_PERI0(CLK_PERI_MSDC30_3, "msdc30_3_ck", "msdc30_3_sel", 16),
+ GATE_PERI0(CLK_PERI_MSDC30_2, "msdc30_2_ck", "msdc30_2_sel", 15),
+ GATE_PERI0(CLK_PERI_MSDC30_1, "msdc30_1_ck", "msdc30_1_sel", 14),
+ GATE_PERI0(CLK_PERI_MSDC30_0, "msdc30_0_ck", "msdc30_0_sel", 13),
+ GATE_PERI0(CLK_PERI_AP_DMA, "ap_dma_ck", "axi_sel", 12),
+ GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
+ GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
+ GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
+ GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
+ GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
+ GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
+ GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
+ GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
+ GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
+ GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
+ GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0),
+
+ GATE_PERI1(CLK_PERI_FCI, "fci_ck", "ms_card_sel", 11),
+ GATE_PERI1(CLK_PERI_SPI2, "spi2_ck", "spi2_sel", 10),
+ GATE_PERI1(CLK_PERI_SPI1, "spi1_ck", "spi1_sel", 9),
+ GATE_PERI1(CLK_PERI_HOST89_DVD, "host89_dvd_ck", "aud2dvd_sel", 8),
+ GATE_PERI1(CLK_PERI_HOST89_SPI, "host89_spi_ck", "spi0_sel", 7),
+ GATE_PERI1(CLK_PERI_HOST89_INT, "host89_int_ck", "axi_sel", 6),
+ GATE_PERI1(CLK_PERI_FLASH, "flash_ck", "nfi2x_sel", 5),
+ GATE_PERI1(CLK_PERI_NFI_PAD, "nfi_pad_ck", "nfi1x_pad", 4),
+ GATE_PERI1(CLK_PERI_NFI_ECC, "nfi_ecc_ck", "nfi1x_pad", 3),
+ GATE_PERI1(CLK_PERI_GCPU, "gcpu_ck", "axi_sel", 2),
+ GATE_PERI1(CLK_PERI_USB_SLV, "usbslv_ck", "axi_sel", 1),
+ GATE_PERI1(CLK_PERI_USB1_MCU, "usb1_mcu_ck", "axi_sel", 0),
+};
+
+static const char * const uart_ck_sel_parents[] = {
+ "clk26m",
+ "uart_sel",
+};
+
+static const struct mtk_composite peri_muxs[] = {
+ MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents,
+ 0x40c, 0, 1),
+ MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents,
+ 0x40c, 1, 1),
+ MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents,
+ 0x40c, 2, 1),
+ MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents,
+ 0x40c, 3, 1),
+};
+
+static int mtk_pericfg_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+
+ mtk_clk_register_composites(peri_muxs, ARRAY_SIZE(peri_muxs), base,
+ &mt2701_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ mtk_register_reset_controller(node, 2, 0x0);
+
+ return 0;
+}
+
+#define MT8590_PLL_FMAX (2000 * MHZ)
+#define CON0_MT8590_RST_BAR BIT(27)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, \
+ _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8590_RST_BAR, \
+ .fmax = MT8590_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ }
+
+static const struct mtk_pll_data apmixed_plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x200, 0x20c, 0x80000001,
+ PLL_AO, 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x210, 0x21c, 0xf0000001,
+ HAVE_RST_BAR, 21, 0x210, 4, 0x0, 0x214, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x220, 0x22c, 0xf3000001,
+ HAVE_RST_BAR, 7, 0x220, 4, 0x0, 0x224, 14),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x230, 0x23c, 0x00000001, 0,
+ 21, 0x230, 4, 0x0, 0x234, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x240, 0x24c, 0x00000001, 0,
+ 21, 0x240, 4, 0x0, 0x244, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x250, 0x25c, 0x00000001, 0,
+ 21, 0x250, 4, 0x0, 0x254, 0),
+ PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x270, 0x27c, 0x00000001, 0,
+ 31, 0x270, 4, 0x0, 0x274, 0),
+ PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x280, 0x28c, 0x00000001, 0,
+ 31, 0x280, 4, 0x0, 0x284, 0),
+ PLL(CLK_APMIXED_ETHPLL, "ethpll", 0x290, 0x29c, 0x00000001, 0,
+ 31, 0x290, 4, 0x0, 0x294, 0),
+ PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x2a0, 0x2ac, 0x00000001, 0,
+ 31, 0x2a0, 4, 0x0, 0x2a4, 0),
+ PLL(CLK_APMIXED_HADDS2PLL, "hadds2pll", 0x2b0, 0x2bc, 0x00000001, 0,
+ 31, 0x2b0, 4, 0x0, 0x2b4, 0),
+ PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x2c0, 0x2cc, 0x00000001, 0,
+ 31, 0x2c0, 4, 0x0, 0x2c4, 0),
+ PLL(CLK_APMIXED_TVD2PLL, "tvd2pll", 0x2d0, 0x2dc, 0x00000001, 0,
+ 21, 0x2d0, 4, 0x0, 0x2d4, 0),
+};
+
+static int mtk_apmixedsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, apmixed_plls, ARRAY_SIZE(apmixed_plls),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt2701[] = {
+ {
+ .compatible = "mediatek,mt2701-topckgen",
+ .data = mtk_topckgen_init,
+ }, {
+ .compatible = "mediatek,mt2701-infracfg",
+ .data = mtk_infrasys_init,
+ }, {
+ .compatible = "mediatek,mt2701-pericfg",
+ .data = mtk_pericfg_init,
+ }, {
+ .compatible = "mediatek,mt2701-apmixedsys",
+ .data = mtk_apmixedsys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt2701_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_drv = {
+ .probe = clk_mt2701_probe,
+ .driver = {
+ .name = "clk-mt2701",
+ .of_match_table = of_match_clk_mt2701,
+ },
+};
+
+static int __init clk_mt2701_init(void)
+{
+ return platform_driver_register(&clk_mt2701_drv);
+}
+
+arch_initcall(clk_mt2701_init);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index bb30f7063569..0541df78141c 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -58,6 +58,9 @@ void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
for (i = 0; i < num; i++) {
const struct mtk_fixed_clk *rc = &clks[i];
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[rc->id]))
+ continue;
+
clk = clk_register_fixed_rate(NULL, rc->name, rc->parent, 0,
rc->rate);
@@ -81,6 +84,9 @@ void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
for (i = 0; i < num; i++) {
const struct mtk_fixed_factor *ff = &clks[i];
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[ff->id]))
+ continue;
+
clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name,
CLK_SET_RATE_PARENT, ff->mult, ff->div);
@@ -116,6 +122,9 @@ int mtk_clk_register_gates(struct device_node *node,
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
+ if (!IS_ERR_OR_NULL(clk_data->clks[gate->id]))
+ continue;
+
clk = mtk_clk_register_gate(gate->name, gate->parent_name,
regmap,
gate->regs->set_ofs,
@@ -232,6 +241,9 @@ void mtk_clk_register_composites(const struct mtk_composite *mcs,
for (i = 0; i < num; i++) {
const struct mtk_composite *mc = &mcs[i];
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[mc->id]))
+ continue;
+
clk = mtk_clk_register_composite(mc, base, lock);
if (IS_ERR(clk)) {
@@ -244,3 +256,31 @@ void mtk_clk_register_composites(const struct mtk_composite *mcs,
clk_data->clks[mc->id] = clk;
}
}
+
+void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
+ int num, void __iomem *base, spinlock_t *lock,
+ struct clk_onecell_data *clk_data)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ const struct mtk_clk_divider *mcd = &mcds[i];
+
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[mcd->id]))
+ continue;
+
+ clk = clk_register_divider(NULL, mcd->name, mcd->parent_name,
+ mcd->flags, base + mcd->div_reg, mcd->div_shift,
+ mcd->div_width, mcd->clk_divider_flags, lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ mcd->name, PTR_ERR(clk));
+ continue;
+ }
+
+ if (clk_data)
+ clk_data->clks[mcd->id] = clk;
+ }
+}
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 9f24fcfa304f..f5d6b70ce189 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -87,7 +87,8 @@ struct mtk_composite {
* In case the rate change propagation to parent clocks is undesirable,
* this macro allows to specify the clock flags manually.
*/
-#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) { \
+#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, _flags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@@ -106,7 +107,8 @@ struct mtk_composite {
* parent clock by default.
*/
#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) \
- MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, CLK_SET_RATE_PARENT)
+ MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, CLK_SET_RATE_PARENT)
#define MUX(_id, _name, _parents, _reg, _shift, _width) { \
.id = _id, \
@@ -121,7 +123,8 @@ struct mtk_composite {
.flags = CLK_SET_RATE_PARENT, \
}
-#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, _div_width, _div_shift) { \
+#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \
+ _div_width, _div_shift) { \
.id = _id, \
.parent = _parent, \
.name = _name, \
@@ -156,12 +159,40 @@ struct mtk_gate {
const struct clk_ops *ops;
};
-int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks,
- int num, struct clk_onecell_data *clk_data);
+int mtk_clk_register_gates(struct device_node *node,
+ const struct mtk_gate *clks, int num,
+ struct clk_onecell_data *clk_data);
+
+struct mtk_clk_divider {
+ int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+
+ u32 div_reg;
+ unsigned char div_shift;
+ unsigned char div_width;
+ unsigned char clk_divider_flags;
+ const struct clk_div_table *clk_div_table;
+};
+
+#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+}
+
+void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
+ int num, void __iomem *base, spinlock_t *lock,
+ struct clk_onecell_data *clk_data);
struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
#define HAVE_RST_BAR BIT(0)
+#define PLL_AO BIT(1)
struct mtk_pll_div_table {
u32 div;
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 0c2deac17ce9..a409142e9346 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -301,6 +301,7 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->data = data;
init.name = data->name;
+ init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = &mtk_pll_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 9adaf48aea23..0fc75c395957 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -309,19 +309,19 @@ static void __init mmp2_clk_init(struct device_node *np)
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
- return;
+ goto free_memory;
}
pxa_unit->apmu_base = of_iomap(np, 1);
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
- return;
+ goto unmap_mpmu_region;
}
pxa_unit->apbc_base = of_iomap(np, 2);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
- return;
+ goto unmap_apmu_region;
}
mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
@@ -333,6 +333,15 @@ static void __init mmp2_clk_init(struct device_node *np)
mmp2_axi_periph_clk_init(pxa_unit);
mmp2_clk_reset_init(np, pxa_unit);
+
+ return;
+
+unmap_apmu_region:
+ iounmap(pxa_unit->apmu_base);
+unmap_mpmu_region:
+ iounmap(pxa_unit->mpmu_base);
+free_memory:
+ kfree(pxa_unit);
}
CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
index e478ff44e170..cede7b4ca3b9 100644
--- a/drivers/clk/mmp/clk-of-pxa1928.c
+++ b/drivers/clk/mmp/clk-of-pxa1928.c
@@ -216,6 +216,7 @@ static void __init pxa1928_mpmu_clk_init(struct device_node *np)
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
+ kfree(pxa_unit);
return;
}
@@ -234,6 +235,7 @@ static void __init pxa1928_apmu_clk_init(struct device_node *np)
pxa_unit->apmu_base = of_iomap(np, 0);
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
+ kfree(pxa_unit);
return;
}
@@ -254,6 +256,7 @@ static void __init pxa1928_apbc_clk_init(struct device_node *np)
pxa_unit->apbc_base = of_iomap(np, 0);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
+ kfree(pxa_unit);
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index 64d1ef49caeb..1dcabe95cb67 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -278,25 +278,25 @@ static void __init pxa910_clk_init(struct device_node *np)
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
- return;
+ goto free_memory;
}
pxa_unit->apmu_base = of_iomap(np, 1);
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
- return;
+ goto unmap_mpmu_region;
}
pxa_unit->apbc_base = of_iomap(np, 2);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
- return;
+ goto unmap_apmu_region;
}
pxa_unit->apbcp_base = of_iomap(np, 3);
if (!pxa_unit->apbcp_base) {
pr_err("failed to map apbcp registers\n");
- return;
+ goto unmap_apbc_region;
}
mmp_clk_init(np, &pxa_unit->unit, PXA910_NR_CLKS);
@@ -308,6 +308,17 @@ static void __init pxa910_clk_init(struct device_node *np)
pxa910_axi_periph_clk_init(pxa_unit);
pxa910_clk_reset_init(np, pxa_unit);
+
+ return;
+
+unmap_apbc_region:
+ iounmap(pxa_unit->apbc_base);
+unmap_apmu_region:
+ iounmap(pxa_unit->apmu_base);
+unmap_mpmu_region:
+ iounmap(pxa_unit->mpmu_base);
+free_memory:
+ kfree(pxa_unit);
}
CLK_OF_DECLARE(pxa910_clk, "marvell,pxa910-clock", pxa910_clk_init);
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
index 02023baf86c9..8181b919f062 100644
--- a/drivers/clk/mvebu/ap806-system-controller.c
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -14,7 +14,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -135,34 +135,17 @@ fail0:
return ret;
}
-static int ap806_syscon_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- clk_unregister_fixed_factor(ap806_clks[3]);
- clk_unregister_fixed_rate(ap806_clks[2]);
- clk_unregister_fixed_rate(ap806_clks[1]);
- clk_unregister_fixed_rate(ap806_clks[0]);
-
- return 0;
-}
-
static const struct of_device_id ap806_syscon_of_match[] = {
{ .compatible = "marvell,ap806-system-controller", },
{ }
};
-MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
static struct platform_driver ap806_syscon_driver = {
.probe = ap806_syscon_clk_probe,
- .remove = ap806_syscon_clk_remove,
.driver = {
.name = "marvell-ap806-system-controller",
.of_match_table = ap806_syscon_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(ap806_syscon_driver);
-
-MODULE_DESCRIPTION("Marvell AP806 System Controller driver");
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(ap806_syscon_driver);
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index f2303da7fda7..32e5b43c086f 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -30,7 +30,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -87,7 +87,7 @@ struct cp110_gate_clk {
u8 bit_idx;
};
-#define to_cp110_gate_clk(clk) container_of(clk, struct cp110_gate_clk, hw)
+#define to_cp110_gate_clk(hw) container_of(hw, struct cp110_gate_clk, hw)
static int cp110_gate_enable(struct clk_hw *hw)
{
@@ -123,13 +123,14 @@ static const struct clk_ops cp110_gate_ops = {
.is_enabled = cp110_gate_is_enabled,
};
-static struct clk *cp110_register_gate(const char *name,
- const char *parent_name,
- struct regmap *regmap, u8 bit_idx)
+static struct clk_hw *cp110_register_gate(const char *name,
+ const char *parent_name,
+ struct regmap *regmap, u8 bit_idx)
{
struct cp110_gate_clk *gate;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
@@ -146,39 +147,37 @@ static struct clk *cp110_register_gate(const char *name,
gate->bit_idx = bit_idx;
gate->hw.init = &init;
- clk = clk_register(NULL, &gate->hw);
- if (IS_ERR(clk))
+ hw = &gate->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
kfree(gate);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
-static void cp110_unregister_gate(struct clk *clk)
+static void cp110_unregister_gate(struct clk_hw *hw)
{
- struct clk_hw *hw;
-
- hw = __clk_get_hw(clk);
- if (!hw)
- return;
-
- clk_unregister(clk);
+ clk_hw_unregister(hw);
kfree(to_cp110_gate_clk(hw));
}
-static struct clk *cp110_of_clk_get(struct of_phandle_args *clkspec, void *data)
+static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
+ void *data)
{
- struct clk_onecell_data *clk_data = data;
+ struct clk_hw_onecell_data *clk_data = data;
unsigned int type = clkspec->args[0];
unsigned int idx = clkspec->args[1];
if (type == CP110_CLK_TYPE_CORE) {
if (idx > CP110_MAX_CORE_CLOCKS)
return ERR_PTR(-EINVAL);
- return clk_data->clks[idx];
+ return clk_data->hws[idx];
} else if (type == CP110_CLK_TYPE_GATABLE) {
if (idx > CP110_MAX_GATABLE_CLOCKS)
return ERR_PTR(-EINVAL);
- return clk_data->clks[CP110_MAX_CORE_CLOCKS + idx];
+ return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
}
return ERR_PTR(-EINVAL);
@@ -189,8 +188,8 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
struct regmap *regmap;
struct device_node *np = pdev->dev.of_node;
const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name;
- struct clk_onecell_data *cp110_clk_data;
- struct clk *clk, **cp110_clks;
+ struct clk_hw_onecell_data *cp110_clk_data;
+ struct clk_hw *hw, **cp110_clks;
u32 nand_clk_ctrl;
int i, ret;
@@ -203,80 +202,75 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- cp110_clks = devm_kcalloc(&pdev->dev, sizeof(struct clk *),
- CP110_CLK_NUM, GFP_KERNEL);
- if (!cp110_clks)
- return -ENOMEM;
-
- cp110_clk_data = devm_kzalloc(&pdev->dev,
- sizeof(*cp110_clk_data),
+ cp110_clk_data = devm_kzalloc(&pdev->dev, sizeof(*cp110_clk_data) +
+ sizeof(struct clk_hw *) * CP110_CLK_NUM,
GFP_KERNEL);
if (!cp110_clk_data)
return -ENOMEM;
- cp110_clk_data->clks = cp110_clks;
- cp110_clk_data->clk_num = CP110_CLK_NUM;
+ cp110_clks = cp110_clk_data->hws;
+ cp110_clk_data->num = CP110_CLK_NUM;
- /* Register the APLL which is the root of the clk tree */
+ /* Register the APLL which is the root of the hw tree */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_APLL, &apll_name);
- clk = clk_register_fixed_rate(NULL, apll_name, NULL, 0,
- 1000 * 1000 * 1000);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_rate(NULL, apll_name, NULL, 0,
+ 1000 * 1000 * 1000);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail0;
}
- cp110_clks[CP110_CORE_APLL] = clk;
+ cp110_clks[CP110_CORE_APLL] = hw;
/* PPv2 is APLL/3 */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_PPV2, &ppv2_name);
- clk = clk_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail1;
}
- cp110_clks[CP110_CORE_PPV2] = clk;
+ cp110_clks[CP110_CORE_PPV2] = hw;
/* EIP clock is APLL/2 */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_EIP, &eip_name);
- clk = clk_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail2;
}
- cp110_clks[CP110_CORE_EIP] = clk;
+ cp110_clks[CP110_CORE_EIP] = hw;
/* Core clock is EIP/2 */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_CORE, &core_name);
- clk = clk_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail3;
}
- cp110_clks[CP110_CORE_CORE] = clk;
+ cp110_clks[CP110_CORE_CORE] = hw;
/* NAND can be either APLL/2.5 or core clock */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_NAND, &nand_name);
if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK)
- clk = clk_register_fixed_factor(NULL, nand_name,
- apll_name, 0, 2, 5);
+ hw = clk_hw_register_fixed_factor(NULL, nand_name,
+ apll_name, 0, 2, 5);
else
- clk = clk_register_fixed_factor(NULL, nand_name,
- core_name, 0, 1, 1);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, nand_name,
+ core_name, 0, 1, 1);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail4;
}
- cp110_clks[CP110_CORE_NAND] = clk;
+ cp110_clks[CP110_CORE_NAND] = hw;
for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
const char *parent, *name;
@@ -335,16 +329,16 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
break;
}
- clk = cp110_register_gate(name, parent, regmap, i);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = cp110_register_gate(name, parent, regmap, i);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail_gate;
}
- cp110_clks[CP110_MAX_CORE_CLOCKS + i] = clk;
+ cp110_clks[CP110_MAX_CORE_CLOCKS + i] = hw;
}
- ret = of_clk_add_provider(np, cp110_of_clk_get, cp110_clk_data);
+ ret = of_clk_add_hw_provider(np, cp110_of_clk_get, cp110_clk_data);
if (ret)
goto fail_clk_add;
@@ -355,65 +349,36 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
fail_clk_add:
fail_gate:
for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
- clk = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
+ hw = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
- if (clk)
- cp110_unregister_gate(clk);
+ if (hw)
+ cp110_unregister_gate(hw);
}
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
fail4:
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
fail3:
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
fail2:
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
fail1:
- clk_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
+ clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
fail0:
return ret;
}
-static int cp110_syscon_clk_remove(struct platform_device *pdev)
-{
- struct clk **cp110_clks = platform_get_drvdata(pdev);
- int i;
-
- of_clk_del_provider(pdev->dev.of_node);
-
- for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
- struct clk *clk = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
-
- if (clk)
- cp110_unregister_gate(clk);
- }
-
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
- clk_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
-
- return 0;
-}
-
static const struct of_device_id cp110_syscon_of_match[] = {
{ .compatible = "marvell,cp110-system-controller0", },
{ }
};
-MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
static struct platform_driver cp110_syscon_driver = {
.probe = cp110_syscon_clk_probe,
- .remove = cp110_syscon_clk_remove,
.driver = {
.name = "marvell-cp110-system-controller0",
.of_match_table = cp110_syscon_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(cp110_syscon_driver);
-
-MODULE_DESCRIPTION("Marvell CP110 System Controller 0 driver");
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(cp110_syscon_driver);
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index f7136b94fd0e..27781b49eb82 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -277,12 +277,15 @@ static void __init lpc18xx_ccu_init(struct device_node *np)
}
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ iounmap(reg_base);
return;
+ }
clk_data->num = of_property_count_strings(np, "clock-names");
clk_data->name = kcalloc(clk_data->num, sizeof(char *), GFP_KERNEL);
if (!clk_data->name) {
+ iounmap(reg_base);
kfree(clk_data);
return;
}
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 34c97353cdeb..5b98ff9076f3 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -1282,13 +1282,13 @@ static struct clk_hw_proto clk_hw_proto[LPC32XX_CLK_HW_MAX] = {
LPC32XX_DEFINE_MUX(PWM1_MUX, PWMCLK_CTRL, 1, 0x1, NULL, 0),
LPC32XX_DEFINE_DIV(PWM1_DIV, PWMCLK_CTRL, 4, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_GATE(PWM1_GATE, PWMCLK_CTRL, 0, 0),
LPC32XX_DEFINE_COMPOSITE(PWM1, PWM1_MUX, PWM1_DIV, PWM1_GATE),
LPC32XX_DEFINE_MUX(PWM2_MUX, PWMCLK_CTRL, 3, 0x1, NULL, 0),
LPC32XX_DEFINE_DIV(PWM2_DIV, PWMCLK_CTRL, 8, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_GATE(PWM2_GATE, PWMCLK_CTRL, 2, 0),
LPC32XX_DEFINE_COMPOSITE(PWM2, PWM2_MUX, PWM2_DIV, PWM2_GATE),
@@ -1335,8 +1335,7 @@ static struct clk_hw_proto clk_hw_proto[LPC32XX_CLK_HW_MAX] = {
LPC32XX_DEFINE_GATE(USB_DIV_GATE, USB_CTRL, 17, 0),
LPC32XX_DEFINE_COMPOSITE(USB_DIV, _NULL, USB_DIV_DIV, USB_DIV_GATE),
- LPC32XX_DEFINE_DIV(SD_DIV, MS_CTRL, 0, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ LPC32XX_DEFINE_DIV(SD_DIV, MS_CTRL, 0, 4, NULL, CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_CLK(SD_GATE, MS_CTRL, BIT(5) | BIT(9), BIT(5) | BIT(9),
0x0, BIT(5) | BIT(9), 0x0, 0x0, clk_mask_ops),
LPC32XX_DEFINE_COMPOSITE(SD, _NULL, SD_DIV, SD_GATE),
@@ -1478,6 +1477,20 @@ static struct clk * __init lpc32xx_clk_register(u32 id)
return clk;
}
+static void __init lpc32xx_clk_div_quirk(u32 reg, u32 div_mask, u32 gate)
+{
+ u32 val;
+
+ regmap_read(clk_regmap, reg, &val);
+
+ if (!(val & div_mask)) {
+ val &= ~gate;
+ val |= BIT(__ffs(div_mask));
+ }
+
+ regmap_update_bits(clk_regmap, reg, gate | div_mask, val);
+}
+
static void __init lpc32xx_clk_init(struct device_node *np)
{
unsigned int i;
@@ -1517,6 +1530,17 @@ static void __init lpc32xx_clk_init(struct device_node *np)
return;
}
+ /*
+ * Divider part of PWM and MS clocks requires a quirk to avoid
+ * a misinterpretation of formally valid zero value in register
+ * bitfield, which indicates another clock gate. Instead of
+ * adding complexity to a gate clock ensure that zero value in
+ * divider clock is never met in runtime.
+ */
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_PWMCLK_CTRL, 0xf0, BIT(0));
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_PWMCLK_CTRL, 0xf00, BIT(2));
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_MS_CTRL, 0xf, BIT(5) | BIT(9));
+
for (i = 1; i < LPC32XX_CLK_MAX; i++) {
clk[i] = lpc32xx_clk_register(i);
if (IS_ERR(clk[i])) {
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 29cee9e8d4d9..74f64c3c4290 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -18,7 +18,27 @@
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
-DEFINE_SPINLOCK(lock);
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+#define MDREFR_K0DB4 (1 << 29) /* SDCLK0 Divide by 4 Control/Status */
+#define MDREFR_K2FREE (1 << 25) /* SDRAM Free-Running Control */
+#define MDREFR_K1FREE (1 << 24) /* SDRAM Free-Running Control */
+#define MDREFR_K0FREE (1 << 23) /* SDRAM Free-Running Control */
+#define MDREFR_SLFRSH (1 << 22) /* SDRAM Self-Refresh Control/Status */
+#define MDREFR_APD (1 << 20) /* SDRAM/SSRAM Auto-Power-Down Enable */
+#define MDREFR_K2DB2 (1 << 19) /* SDCLK2 Divide by 2 Control/Status */
+#define MDREFR_K2RUN (1 << 18) /* SDCLK2 Run Control/Status */
+#define MDREFR_K1DB2 (1 << 17) /* SDCLK1 Divide by 2 Control/Status */
+#define MDREFR_K1RUN (1 << 16) /* SDCLK1 Run Control/Status */
+#define MDREFR_E1PIN (1 << 15) /* SDCKE1 Level Control/Status */
+#define MDREFR_K0DB2 (1 << 14) /* SDCLK0 Divide by 2 Control/Status */
+#define MDREFR_K0RUN (1 << 13) /* SDCLK0 Run Control/Status */
+#define MDREFR_E0PIN (1 << 12) /* SDCKE0 Level Control/Status */
+#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
+#define MDREFR_DRI_MASK 0xFFF
+
+static DEFINE_SPINLOCK(pxa_clk_lock);
static struct clk *pxa_clocks[CLK_MAX];
static struct clk_onecell_data onecell_data = {
@@ -89,7 +109,7 @@ int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
pxa_clk->lp = clks[i].lp;
pxa_clk->hp = clks[i].hp;
pxa_clk->gate = clks[i].gate;
- pxa_clk->gate.lock = &lock;
+ pxa_clk->gate.lock = &pxa_clk_lock;
clk = clk_register_composite(NULL, clks[i].name,
clks[i].parent_names, 2,
&pxa_clk->hw, &cken_mux_ops,
@@ -106,3 +126,124 @@ void __init clk_pxa_dt_common_init(struct device_node *np)
{
of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
}
+
+void pxa2xx_core_turbo_switch(bool on)
+{
+ unsigned long flags;
+ unsigned int unused, clkcfg;
+
+ local_irq_save(flags);
+
+ asm("mrc p14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+ clkcfg &= ~CLKCFG_TURBO & ~CLKCFG_HALFTURBO;
+ if (on)
+ clkcfg |= CLKCFG_TURBO;
+ clkcfg |= CLKCFG_FCS;
+
+ asm volatile(
+ " b 2f\n"
+ " .align 5\n"
+ "1: mcr p14, 0, %1, c6, c0, 0\n"
+ " b 3f\n"
+ "2: b 1b\n"
+ "3: nop\n"
+ : "=&r" (unused)
+ : "r" (clkcfg)
+ : );
+
+ local_irq_restore(flags);
+}
+
+void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
+ u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ void __iomem *cccr)
+{
+ unsigned int clkcfg = freq->clkcfg;
+ unsigned int unused, preset_mdrefr, postset_mdrefr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
+ * we need to preset the smaller DRI before the change. If we're
+ * speeding up we need to set the larger DRI value after the change.
+ */
+ preset_mdrefr = postset_mdrefr = readl(mdrefr);
+ if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(freq->membus_khz)) {
+ preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
+ preset_mdrefr |= mdrefr_dri(freq->membus_khz);
+ }
+ postset_mdrefr =
+ (postset_mdrefr & ~MDREFR_DRI_MASK) |
+ mdrefr_dri(freq->membus_khz);
+
+ /* If we're dividing the memory clock by two for the SDRAM clock, this
+ * must be set prior to the change. Clearing the divide must be done
+ * after the change.
+ */
+ if (freq->div2) {
+ preset_mdrefr |= MDREFR_DB2_MASK;
+ postset_mdrefr |= MDREFR_DB2_MASK;
+ } else {
+ postset_mdrefr &= ~MDREFR_DB2_MASK;
+ }
+
+ /* Set new the CCCR and prepare CLKCFG */
+ writel(freq->cccr, cccr);
+
+ asm volatile(
+ " ldr r4, [%1]\n"
+ " b 2f\n"
+ " .align 5\n"
+ "1: str %3, [%1] /* preset the MDREFR */\n"
+ " mcr p14, 0, %2, c6, c0, 0 /* set CLKCFG[FCS] */\n"
+ " str %4, [%1] /* postset the MDREFR */\n"
+ " b 3f\n"
+ "2: b 1b\n"
+ "3: nop\n"
+ : "=&r" (unused)
+ : "r" (mdrefr), "r" (clkcfg), "r" (preset_mdrefr),
+ "r" (postset_mdrefr)
+ : "r4", "r5");
+
+ local_irq_restore(flags);
+}
+
+int pxa2xx_determine_rate(struct clk_rate_request *req,
+ struct pxa2xx_freq *freqs, int nb_freqs)
+{
+ int i, closest_below = -1, closest_above = -1;
+ unsigned long rate;
+
+ for (i = 0; i < nb_freqs; i++) {
+ rate = freqs[i].cpll;
+ if (rate == req->rate)
+ break;
+ if (rate < req->min_rate)
+ continue;
+ if (rate > req->max_rate)
+ continue;
+ if (rate <= req->rate)
+ closest_below = i;
+ if ((rate >= req->rate) && (closest_above == -1))
+ closest_above = i;
+ }
+
+ req->best_parent_hw = NULL;
+
+ if (i < nb_freqs) {
+ rate = req->rate;
+ } else if (closest_below >= 0) {
+ rate = freqs[closest_below].cpll;
+ } else if (closest_above >= 0) {
+ rate = freqs[closest_above].cpll;
+ } else {
+ pr_debug("%s(rate=%lu) no match\n", __func__, req->rate);
+ return -EINVAL;
+ }
+
+ pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate);
+ req->rate = rate;
+
+ return 0;
+}
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index d1de805df867..2b90c5917b32 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -13,6 +13,11 @@
#ifndef _CLK_PXA_
#define _CLK_PXA_
+#define CLKCFG_TURBO 0x1
+#define CLKCFG_FCS 0x2
+#define CLKCFG_HALFTURBO 0x4
+#define CLKCFG_FASTBUS 0x8
+
#define PARENTS(name) \
static const char *const name ## _parents[] __initconst
#define MUX_RO_RATE_RO_OPS(name, clk_name) \
@@ -35,10 +40,27 @@
NULL, NULL, CLK_GET_RATE_NOCACHE); \
}
-#define RATE_RO_OPS(name, clk_name) \
+#define RATE_RO_OPS(name, clk_name) \
+ static struct clk_hw name ## _rate_hw; \
+ static const struct clk_ops name ## _rate_ops = { \
+ .recalc_rate = name ## _get_rate, \
+ }; \
+ static struct clk * __init clk_register_ ## name(void) \
+ { \
+ return clk_register_composite(NULL, clk_name, \
+ name ## _parents, \
+ ARRAY_SIZE(name ## _parents), \
+ NULL, NULL, \
+ &name ## _rate_hw, &name ## _rate_ops, \
+ NULL, NULL, CLK_GET_RATE_NOCACHE); \
+ }
+
+#define RATE_OPS(name, clk_name) \
static struct clk_hw name ## _rate_hw; \
static struct clk_ops name ## _rate_ops = { \
.recalc_rate = name ## _get_rate, \
+ .set_rate = name ## _set_rate, \
+ .determine_rate = name ## _determine_rate, \
}; \
static struct clk * __init clk_register_ ## name(void) \
{ \
@@ -50,6 +72,24 @@
NULL, NULL, CLK_GET_RATE_NOCACHE); \
}
+#define MUX_OPS(name, clk_name, flags) \
+ static struct clk_hw name ## _mux_hw; \
+ static const struct clk_ops name ## _mux_ops = { \
+ .get_parent = name ## _get_parent, \
+ .set_parent = name ## _set_parent, \
+ .determine_rate = name ## _determine_rate, \
+ }; \
+ static struct clk * __init clk_register_ ## name(void) \
+ { \
+ return clk_register_composite(NULL, clk_name, \
+ name ## _parents, \
+ ARRAY_SIZE(name ## _parents), \
+ &name ## _mux_hw, &name ## _mux_ops, \
+ NULL, NULL, \
+ NULL, NULL, \
+ CLK_GET_RATE_NOCACHE | flags); \
+ }
+
/*
* CKEN clock type
* This clock takes it source from 2 possible parents :
@@ -95,7 +135,15 @@ struct desc_clk_cken {
PXA_CKEN(dev_id, con_id, name, parents, 1, 1, 1, 1, \
NULL, cken_reg, cken_bit, flag)
-static int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
+struct pxa2xx_freq {
+ unsigned long cpll;
+ unsigned int membus_khz;
+ unsigned int cccr;
+ unsigned int div2;
+ unsigned int clkcfg;
+};
+
+static inline int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
{
return 0;
}
@@ -105,4 +153,11 @@ extern void clkdev_pxa_register(int ckid, const char *con_id,
extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
void clk_pxa_dt_common_init(struct device_node *np);
+void pxa2xx_core_turbo_switch(bool on);
+void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
+ u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ void __iomem *cccr);
+int pxa2xx_determine_rate(struct clk_rate_request *req,
+ struct pxa2xx_freq *freqs, int nb_freqs);
+
#endif
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index a98b98e2a9e4..6416c1f8e632 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <mach/pxa2xx-regs.h>
+#include <mach/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -30,6 +31,17 @@ enum {
PXA_CORE_TURBO,
};
+#define PXA25x_CLKCFG(T) \
+ (CLKCFG_FCS | \
+ ((T) ? CLKCFG_TURBO : 0))
+#define PXA25x_CCCR(N2, M, L) (N2 << 7 | M << 5 | L)
+
+#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
+
+/* Define the refresh period in mSec for the SDRAM and the number of rows */
+#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+
/*
* Various clock factors driven by the CCCR register.
*/
@@ -48,6 +60,34 @@ static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory"
};
+static int get_sdram_rows(void)
+{
+ static int sdram_rows;
+ unsigned int drac2 = 0, drac0 = 0;
+ u32 mdcnfg;
+
+ if (sdram_rows)
+ return sdram_rows;
+
+ mdcnfg = readl_relaxed(MDCNFG);
+
+ if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+ drac2 = MDCNFG_DRAC2(mdcnfg);
+
+ if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+ drac0 = MDCNFG_DRAC0(mdcnfg);
+
+ sdram_rows = 1 << (11 + max(drac0, drac2));
+ return sdram_rows;
+}
+
+static u32 mdrefr_dri(unsigned int freq_khz)
+{
+ u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+
+ return interval / 32;
+}
+
/*
* Get the clock frequency as reflected by CCCR and the turbo flag.
* We assume these values have been applied via a fcs.
@@ -139,6 +179,21 @@ static struct desc_clk_cken pxa25x_clocks[] __initdata = {
clk_pxa25x_memory_parents, 0),
};
+/*
+ * In this table, PXA25x_CCCR(N2, M, L) has the following meaning, where :
+ * - freq_cpll = n * m * L * 3.6864 MHz
+ * - n = N2 / 2
+ * - m = 2^(M - 1), where 1 <= M <= 3
+ * - l = L_clk_mult[L], ie. { 0, 27, 32, 36, 40, 45, 0, }[L]
+ */
+static struct pxa2xx_freq pxa25x_freqs[] = {
+ /* CPU MEMBUS CCCR DIV2 CCLKCFG */
+ { 99532800, 99500, PXA25x_CCCR(2, 1, 1), 1, PXA25x_CLKCFG(1)},
+ {199065600, 99500, PXA25x_CCCR(4, 1, 1), 0, PXA25x_CLKCFG(1)},
+ {298598400, 99500, PXA25x_CCCR(3, 2, 1), 0, PXA25x_CLKCFG(1)},
+ {398131200, 99500, PXA25x_CCCR(4, 2, 1), 0, PXA25x_CLKCFG(1)},
+};
+
static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
@@ -151,13 +206,24 @@ static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
return PXA_CORE_RUN;
}
-static unsigned long clk_pxa25x_core_get_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static int clk_pxa25x_core_set_parent(struct clk_hw *hw, u8 index)
{
- return parent_rate;
+ if (index > PXA_CORE_TURBO)
+ return -EINVAL;
+
+ pxa2xx_core_turbo_switch(index == PXA_CORE_TURBO);
+
+ return 0;
+}
+
+static int clk_pxa25x_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return __clk_mux_determine_rate(hw, req);
}
+
PARENTS(clk_pxa25x_core) = { "run", "cpll" };
-MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
+MUX_OPS(clk_pxa25x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -182,17 +248,42 @@ static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
m = M_clk_mult[(cccr >> 5) & 0x03];
n2 = N2_clk_mult[(cccr >> 7) & 0x07];
- if (t)
- return m * l * n2 * parent_rate / 2;
- return m * l * parent_rate;
+ return m * l * n2 * parent_rate / 2;
+}
+
+static int clk_pxa25x_cpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return pxa2xx_determine_rate(req, pxa25x_freqs,
+ ARRAY_SIZE(pxa25x_freqs));
+}
+
+static int clk_pxa25x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+
+ pr_debug("%s(rate=%lu parent_rate=%lu)\n", __func__, rate, parent_rate);
+ for (i = 0; i < ARRAY_SIZE(pxa25x_freqs); i++)
+ if (pxa25x_freqs[i].cpll == rate)
+ break;
+
+ if (i >= ARRAY_SIZE(pxa25x_freqs))
+ return -EINVAL;
+
+ pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+
+ return 0;
}
PARENTS(clk_pxa25x_cpll) = { "osc_3_6864mhz" };
-RATE_RO_OPS(clk_pxa25x_cpll, "cpll");
+RATE_OPS(clk_pxa25x_cpll, "cpll");
static void __init pxa25x_register_core(void)
{
- clk_register_clk_pxa25x_cpll();
- clk_register_clk_pxa25x_run();
+ clkdev_pxa_register(CLK_NONE, "cpll", NULL,
+ clk_register_clk_pxa25x_cpll());
+ clkdev_pxa_register(CLK_NONE, "run", NULL,
+ clk_register_clk_pxa25x_run());
clkdev_pxa_register(CLK_CORE, "core", NULL,
clk_register_clk_pxa25x_core());
}
@@ -214,7 +305,8 @@ static void __init pxa25x_base_clocks_init(void)
{
pxa25x_register_plls();
pxa25x_register_core();
- clk_register_clk_pxa25x_memory();
+ clkdev_pxa_register(CLK_NONE, "system_bus", NULL,
+ clk_register_clk_pxa25x_memory());
}
#define DUMMY_CLK(_con_id, _dev_id, _parent) \
@@ -230,7 +322,7 @@ static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK("GPIO11_CLK", NULL, "osc_3_6864mhz"),
DUMMY_CLK("GPIO12_CLK", NULL, "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
- DUMMY_CLK("OSTIMER0", NULL, "osc_32_768khz"),
+ DUMMY_CLK("OSTIMER0", NULL, "osc_3_6864mhz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index c40b1804f58c..25a30194d27a 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -17,6 +17,8 @@
#include <linux/clkdev.h>
#include <linux/of.h>
+#include <mach/smemc.h>
+
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -45,11 +47,52 @@ enum {
PXA_MEM_RUN,
};
+#define PXA27x_CLKCFG(B, HT, T) \
+ (CLKCFG_FCS | \
+ ((B) ? CLKCFG_FASTBUS : 0) | \
+ ((HT) ? CLKCFG_HALFTURBO : 0) | \
+ ((T) ? CLKCFG_TURBO : 0))
+#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
+
+#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
+
+/* Define the refresh period in mSec for the SDRAM and the number of rows */
+#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+
static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory",
"system_bus"
};
+static int get_sdram_rows(void)
+{
+ static int sdram_rows;
+ unsigned int drac2 = 0, drac0 = 0;
+ u32 mdcnfg;
+
+ if (sdram_rows)
+ return sdram_rows;
+
+ mdcnfg = readl_relaxed(MDCNFG);
+
+ if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+ drac2 = MDCNFG_DRAC2(mdcnfg);
+
+ if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+ drac0 = MDCNFG_DRAC0(mdcnfg);
+
+ sdram_rows = 1 << (11 + max(drac0, drac2));
+ return sdram_rows;
+}
+
+static u32 mdrefr_dri(unsigned int freq_khz)
+{
+ u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+
+ return (interval - 31) / 32;
+}
+
/*
* Get the clock frequency as reflected by CCSR and the turbo flag.
* We assume these values have been applied via a fcs.
@@ -145,6 +188,42 @@ static struct desc_clk_cken pxa27x_clocks[] __initdata = {
};
+/*
+ * PXA270 definitions
+ *
+ * For the PXA27x:
+ * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
+ *
+ * A = 0 => memory controller clock from table 3-7,
+ * A = 1 => memory controller clock = system bus clock
+ * Run mode frequency = 13 MHz * L
+ * Turbo mode frequency = 13 MHz * L * N
+ * System bus frequency = 13 MHz * L / (B + 1)
+ *
+ * In CCCR:
+ * A = 1
+ * L = 16 oscillator to run mode ratio
+ * 2N = 6 2 * (turbo mode to run mode ratio)
+ *
+ * In CCLKCFG:
+ * B = 1 Fast bus mode
+ * HT = 0 Half-Turbo mode
+ * T = 1 Turbo mode
+ *
+ * For now, just support some of the combinations in table 3-7 of
+ * PXA27x Processor Family Developer's Manual to simplify frequency
+ * change sequences.
+ */
+static struct pxa2xx_freq pxa27x_freqs[] = {
+ {104000000, 104000, PXA27x_CCCR(1, 8, 2), 0, PXA27x_CLKCFG(1, 0, 1) },
+ {156000000, 104000, PXA27x_CCCR(1, 8, 3), 0, PXA27x_CLKCFG(1, 0, 1) },
+ {208000000, 208000, PXA27x_CCCR(0, 16, 2), 1, PXA27x_CLKCFG(0, 0, 1) },
+ {312000000, 208000, PXA27x_CCCR(1, 16, 3), 1, PXA27x_CLKCFG(1, 0, 1) },
+ {416000000, 208000, PXA27x_CCCR(1, 16, 4), 1, PXA27x_CLKCFG(1, 0, 1) },
+ {520000000, 208000, PXA27x_CCCR(1, 16, 5), 1, PXA27x_CLKCFG(1, 0, 1) },
+ {624000000, 208000, PXA27x_CCCR(1, 16, 6), 1, PXA27x_CLKCFG(1, 0, 1) },
+};
+
static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -162,10 +241,35 @@ static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
L = l * parent_rate;
N = (L * n2) / 2;
- return t ? N : L;
+ return N;
+}
+
+static int clk_pxa27x_cpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return pxa2xx_determine_rate(req, pxa27x_freqs,
+ ARRAY_SIZE(pxa27x_freqs));
+}
+
+static int clk_pxa27x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+
+ pr_debug("%s(rate=%lu parent_rate=%lu)\n", __func__, rate, parent_rate);
+ for (i = 0; i < ARRAY_SIZE(pxa27x_freqs); i++)
+ if (pxa27x_freqs[i].cpll == rate)
+ break;
+
+ if (i >= ARRAY_SIZE(pxa27x_freqs))
+ return -EINVAL;
+
+ pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+ return 0;
}
+
PARENTS(clk_pxa27x_cpll) = { "osc_13mhz" };
-RATE_RO_OPS(clk_pxa27x_cpll, "cpll");
+RATE_OPS(clk_pxa27x_cpll, "cpll");
static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -217,31 +321,10 @@ static void __init pxa27x_register_plls(void)
clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
}
-static unsigned long clk_pxa27x_core_get_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- unsigned long clkcfg;
- unsigned int t, ht, b, osc_forced;
- unsigned long ccsr = readl(CCSR);
-
- osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
- asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
- t = clkcfg & (1 << 0);
- ht = clkcfg & (1 << 2);
- b = clkcfg & (1 << 3);
-
- if (osc_forced)
- return parent_rate;
- if (ht)
- return parent_rate / 2;
- else
- return parent_rate;
-}
-
static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
- unsigned int t, ht, b, osc_forced;
+ unsigned int t, ht, osc_forced;
unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
@@ -251,14 +334,30 @@ static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
t = clkcfg & (1 << 0);
ht = clkcfg & (1 << 2);
- b = clkcfg & (1 << 3);
if (ht || t)
return PXA_CORE_TURBO;
return PXA_CORE_RUN;
}
+
+static int clk_pxa27x_core_set_parent(struct clk_hw *hw, u8 index)
+{
+ if (index > PXA_CORE_TURBO)
+ return -EINVAL;
+
+ pxa2xx_core_turbo_switch(index == PXA_CORE_TURBO);
+
+ return 0;
+}
+
+static int clk_pxa27x_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return __clk_mux_determine_rate(hw, req);
+}
+
PARENTS(clk_pxa27x_core) = { "osc_13mhz", "run", "cpll" };
-MUX_RO_RATE_RO_OPS(clk_pxa27x_core, "core");
+MUX_OPS(clk_pxa27x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -273,9 +372,10 @@ RATE_RO_OPS(clk_pxa27x_run, "run");
static void __init pxa27x_register_core(void)
{
- clk_register_clk_pxa27x_cpll();
- clk_register_clk_pxa27x_run();
-
+ clkdev_pxa_register(CLK_NONE, "cpll", NULL,
+ clk_register_clk_pxa27x_cpll());
+ clkdev_pxa_register(CLK_NONE, "run", NULL,
+ clk_register_clk_pxa27x_run());
clkdev_pxa_register(CLK_CORE, "core", NULL,
clk_register_clk_pxa27x_core());
}
@@ -294,9 +394,9 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
if (osc_forced)
return parent_rate;
if (b)
- return parent_rate / 2;
- else
return parent_rate;
+ else
+ return parent_rate / 2;
}
static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
@@ -385,8 +485,10 @@ static void __init pxa27x_base_clocks_init(void)
{
pxa27x_register_plls();
pxa27x_register_core();
- clk_register_clk_pxa27x_system_bus();
- clk_register_clk_pxa27x_memory();
+ clkdev_pxa_register(CLK_NONE, "system_bus", NULL,
+ clk_register_clk_pxa27x_system_bus());
+ clkdev_pxa_register(CLK_NONE, "memory", NULL,
+ clk_register_clk_pxa27x_memory());
clk_register_clk_pxa27x_lcd_base();
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 0146d3c2547f..5fb8d7430908 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -2,6 +2,9 @@ config QCOM_GDSC
bool
select PM_GENERIC_DOMAINS if PM
+config QCOM_RPMCC
+ bool
+
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
@@ -9,6 +12,32 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
@@ -132,6 +161,14 @@ config MSM_MMCC_8974
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_8994
+ tristate "MSM8994 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8994 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
config MSM_GCC_8996
tristate "MSM8996 Global Clock Controller"
select QCOM_GDSC
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1fb1f5476cb0..1c3e222b917b 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -24,8 +24,11 @@ obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index e6a03eaf7a93..47a1da3739ce 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -18,17 +18,21 @@
#include <linux/delay.h>
#include "clk-alpha-pll.h"
+#include "common.h"
#define PLL_MODE 0x00
# define PLL_OUTCTRL BIT(0)
# define PLL_BYPASSNL BIT(1)
# define PLL_RESET_N BIT(2)
+# define PLL_OFFLINE_REQ BIT(7)
# define PLL_LOCK_COUNT_SHIFT 8
# define PLL_LOCK_COUNT_MASK 0x3f
# define PLL_BIAS_COUNT_SHIFT 14
# define PLL_BIAS_COUNT_MASK 0x3f
# define PLL_VOTE_FSM_ENA BIT(20)
+# define PLL_FSM_ENA BIT(20)
# define PLL_VOTE_FSM_RESET BIT(21)
+# define PLL_OFFLINE_ACK BIT(28)
# define PLL_ACTIVE_FLAG BIT(30)
# define PLL_LOCK_DET BIT(31)
@@ -46,6 +50,7 @@
#define PLL_USER_CTL_U 0x14
#define PLL_CONFIG_CTL 0x18
+#define PLL_CONFIG_CTL_U 0x20
#define PLL_TEST_CTL 0x1c
#define PLL_TEST_CTL_U 0x20
#define PLL_STATUS 0x24
@@ -55,6 +60,7 @@
*/
#define ALPHA_REG_BITWIDTH 40
#define ALPHA_BITWIDTH 32
+#define ALPHA_16BIT_MASK 0xffff
#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll, clkr)
@@ -62,9 +68,10 @@
#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll_postdiv, clkr)
-static int wait_for_pll(struct clk_alpha_pll *pll)
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
{
- u32 val, mask, off;
+ u32 val, off;
int count;
int ret;
const char *name = clk_hw_get_name(&pll->clkr.hw);
@@ -74,26 +81,148 @@ static int wait_for_pll(struct clk_alpha_pll *pll)
if (ret)
return ret;
- if (val & PLL_VOTE_FSM_ENA)
- mask = PLL_ACTIVE_FLAG;
- else
- mask = PLL_LOCK_DET;
-
- /* Wait for pll to enable. */
for (count = 100; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
if (ret)
return ret;
- if ((val & mask) == mask)
+ if (inverse && !(val & mask))
+ return 0;
+ else if ((val & mask) == mask)
return 0;
udelay(1);
}
- WARN(1, "%s didn't enable after voting for it!\n", name);
+ WARN(1, "%s failed to %s!\n", name, action);
return -ETIMEDOUT;
}
+#define wait_for_pll_enable_active(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 0, "enable")
+
+#define wait_for_pll_enable_lock(pll) \
+ wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
+
+#define wait_for_pll_disable(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
+
+#define wait_for_pll_offline(pll) \
+ wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+ u32 off = pll->offset;
+
+ regmap_write(regmap, off + PLL_L_VAL, config->l);
+ regmap_write(regmap, off + PLL_ALPHA_VAL, config->alpha);
+ regmap_write(regmap, off + PLL_CONFIG_CTL, config->config_ctl_val);
+ regmap_write(regmap, off + PLL_CONFIG_CTL_U, config->config_ctl_hi_val);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->vco_val;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->vco_mask;
+
+ regmap_update_bits(regmap, off + PLL_USER_CTL, mask, val);
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ qcom_pll_set_fsm_mode(regmap, off + PLL_MODE, 6, 0);
+}
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ u32 val, off;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ val |= PLL_FSM_ENA;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ)
+ val &= ~PLL_OFFLINE_REQ;
+
+ ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ return wait_for_pll_enable_active(pll);
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ u32 val, off;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ) {
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+
+ ret = wait_for_pll_offline(pll);
+ if (ret)
+ return;
+ }
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+
+ wait_for_pll_disable(pll);
+}
+
+static int pll_is_enabled(struct clk_hw *hw, u32 mask)
+{
+ int ret;
+ u32 val, off;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static int clk_alpha_pll_hwfsm_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_ACTIVE_FLAG);
+}
+
+static int clk_alpha_pll_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_LOCK_DET);
+}
+
static int clk_alpha_pll_enable(struct clk_hw *hw)
{
int ret;
@@ -112,7 +241,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
ret = clk_enable_regmap(hw);
if (ret)
return ret;
- return wait_for_pll(pll);
+ return wait_for_pll_enable_active(pll);
}
/* Skip if already enabled */
@@ -136,7 +265,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
if (ret)
return ret;
- ret = wait_for_pll(pll);
+ ret = wait_for_pll_enable_lock(pll);
if (ret)
return ret;
@@ -234,9 +363,14 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
regmap_read(pll->clkr.regmap, off + PLL_USER_CTL, &ctl);
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL, &low);
- regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, &high);
- a = (u64)high << 32 | low;
- a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
+ if (pll->flags & SUPPORTS_16BIT_ALPHA) {
+ a = low & ALPHA_16BIT_MASK;
+ } else {
+ regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U,
+ &high);
+ a = (u64)high << 32 | low;
+ a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
+ }
}
return alpha_pll_calc_rate(prate, l, a);
@@ -257,11 +391,15 @@ static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
-
regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
- regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL, a);
- regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32);
+
+ if (pll->flags & SUPPORTS_16BIT_ALPHA) {
+ regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL,
+ a & ALPHA_16BIT_MASK);
+ } else {
+ a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+ regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32);
+ }
regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL,
PLL_VCO_MASK << PLL_VCO_SHIFT,
@@ -294,12 +432,23 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .is_enabled = clk_alpha_pll_hwfsm_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
static unsigned long
clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 90ce2016e1a0..d6e1ee2c7348 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -34,6 +34,10 @@ struct clk_alpha_pll {
const struct pll_vco *vco_table;
size_t num_vco;
+#define SUPPORTS_OFFLINE_REQ BIT(0)
+#define SUPPORTS_16BIT_ALPHA BIT(1)
+#define SUPPORTS_FSM_MODE BIT(2)
+ u8 flags;
struct clk_regmap clkr;
};
@@ -51,7 +55,28 @@ struct clk_alpha_pll_postdiv {
struct clk_regmap clkr;
};
+struct alpha_pll_config {
+ u32 l;
+ u32 alpha;
+ u32 config_ctl_val;
+ u32 config_ctl_hi_val;
+ u32 main_output_mask;
+ u32 aux_output_mask;
+ u32 aux2_output_mask;
+ u32 early_output_mask;
+ u32 pre_div_val;
+ u32 pre_div_mask;
+ u32 post_div_val;
+ u32 post_div_mask;
+ u32 vco_val;
+ u32 vco_mask;
+};
+
extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+
#endif
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 5b940d629045..cb6cb8710daf 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -23,16 +23,11 @@
#include <asm/div64.h>
#include "clk-pll.h"
+#include "common.h"
#define PLL_OUTCTRL BIT(0)
#define PLL_BYPASSNL BIT(1)
#define PLL_RESET_N BIT(2)
-#define PLL_LOCK_COUNT_SHIFT 8
-#define PLL_LOCK_COUNT_MASK 0x3f
-#define PLL_BIAS_COUNT_SHIFT 14
-#define PLL_BIAS_COUNT_MASK 0x3f
-#define PLL_VOTE_FSM_ENA BIT(20)
-#define PLL_VOTE_FSM_RESET BIT(21)
static int clk_pll_enable(struct clk_hw *hw)
{
@@ -228,26 +223,6 @@ const struct clk_ops clk_pll_vote_ops = {
};
EXPORT_SYMBOL_GPL(clk_pll_vote_ops);
-static void
-clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap, u8 lock_count)
-{
- u32 val;
- u32 mask;
-
- /* De-assert reset to FSM */
- regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_RESET, 0);
-
- /* Program bias count and lock count */
- val = 1 << PLL_BIAS_COUNT_SHIFT | lock_count << PLL_LOCK_COUNT_SHIFT;
- mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
- mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
- regmap_update_bits(regmap, pll->mode_reg, mask, val);
-
- /* Enable PLL FSM voting */
- regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_ENA,
- PLL_VOTE_FSM_ENA);
-}
-
static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
const struct pll_config *config)
{
@@ -280,7 +255,7 @@ void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
{
clk_pll_configure(pll, regmap, config);
if (fsm_mode)
- clk_pll_set_fsm_mode(pll, regmap, 8);
+ qcom_pll_set_fsm_mode(regmap, pll->mode_reg, 1, 8);
}
EXPORT_SYMBOL_GPL(clk_pll_configure_sr);
@@ -289,7 +264,7 @@ void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
{
clk_pll_configure(pll, regmap, config);
if (fsm_mode)
- clk_pll_set_fsm_mode(pll, regmap, 0);
+ qcom_pll_set_fsm_mode(regmap, pll->mode_reg, 1, 0);
}
EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b904c335cda4..1b3e8d265bdb 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -173,6 +173,7 @@ struct clk_rcg2 {
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a071bba8018c..1a0985ae20d2 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -47,6 +47,11 @@
#define N_REG 0xc
#define D_REG 0x10
+enum freq_policy {
+ FLOOR,
+ CEIL,
+};
+
static int clk_rcg2_is_enabled(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -176,15 +181,26 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return calc_rate(parent_rate, m, n, mode, hid_div);
}
-static int _freq_tbl_determine_rate(struct clk_hw *hw,
- const struct freq_tbl *f, struct clk_rate_request *req)
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
+ enum freq_policy policy)
{
unsigned long clk_flags, rate = req->rate;
struct clk_hw *p;
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int index;
- f = qcom_find_freq(f, rate);
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(f, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(f, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
if (!f)
return -EINVAL;
@@ -221,7 +237,15 @@ static int clk_rcg2_determine_rate(struct clk_hw *hw,
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req);
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
+}
+
+static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
}
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
@@ -265,12 +289,23 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
return update_config(rcg);
}
-static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ enum freq_policy policy)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
- f = qcom_find_freq(rcg->freq_tbl, rate);
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
if (!f)
return -EINVAL;
@@ -280,13 +315,25 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- return __clk_rcg2_set_rate(hw, rate);
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
}
static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate, u8 index)
{
- return __clk_rcg2_set_rate(hw, rate);
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
}
const struct clk_ops clk_rcg2_ops = {
@@ -300,6 +347,17 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+const struct clk_ops clk_rcg2_floor_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+
static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -323,7 +381,7 @@ static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
pr_err("%s: RCG did not turn on\n", name);
/* set clock rate */
- ret = __clk_rcg2_set_rate(hw, rate);
+ ret = __clk_rcg2_set_rate(hw, rate, CEIL);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
new file mode 100644
index 000000000000..df3e5fe8442a
--- /dev/null
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mfd/qcom_rpm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mfd/qcom-rpm.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
+
+struct clk_rpm {
+ const int rpm_clk_id;
+ const bool active_only;
+ unsigned long rate;
+ bool enabled;
+ bool branch;
+ struct clk_rpm *peer;
+ struct clk_hw hw;
+ struct qcom_rpm *rpm;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_rpm **clks;
+ size_t num_clks;
+};
+
+struct rpm_clk_desc {
+ struct clk_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_clk_lock);
+
+static int clk_rpm_handoff(struct clk_rpm *r)
+{
+ int ret;
+ u32 value = INT_MAX;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static void clk_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+}
+
+static int clk_rpm_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static const struct clk_ops clk_rpm_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .set_rate = clk_rpm_set_rate,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_rpm_branch_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+/* apq8064 */
+DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
+DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
+
+static struct clk_rpm *apq8064_clks[] = {
+ [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk,
+ [RPM_CFPB_CLK] = &apq8064_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk,
+ [RPM_EBI1_CLK] = &apq8064_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk,
+ [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk,
+ [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk,
+ [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk,
+ [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk,
+ [RPM_SFPB_CLK] = &apq8064_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
+ [RPM_QDSS_CLK] = &apq8064_qdss_clk,
+ [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_apq8064 = {
+ .clks = apq8064_clks,
+ .num_clks = ARRAY_SIZE(apq8064_clks),
+};
+
+static const struct of_device_id rpm_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
+
+static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct rpm_cc *rcc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= rcc->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
+static int rpm_clk_probe(struct platform_device *pdev)
+{
+ struct rpm_cc *rcc;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_rpm *rpm;
+ struct clk_rpm **rpm_clks;
+ const struct rpm_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ rcc->clks = rpm_clks;
+ rcc->num_clks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i])
+ continue;
+
+ rpm_clks[i]->rpm = rpm;
+
+ ret = clk_rpm_handoff(rpm_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i])
+ continue;
+
+ ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (ret)
+ goto err;
+ }
+
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get,
+ rcc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-rpm",
+ .of_match_table = rpm_clk_match_table,
+ },
+ .probe = rpm_clk_probe,
+ .remove = rpm_clk_remove,
+};
+
+static int __init rpm_clk_init(void)
+{
+ return platform_driver_register(&rpm_clk_driver);
+}
+core_initcall(rpm_clk_init);
+
+static void __exit rpm_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_clk_driver);
+}
+module_exit(rpm_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-rpm");
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
new file mode 100644
index 000000000000..07e2cc6ed781
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \
+ key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_name, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \
+ stat_id, r, key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \
+ r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_STATE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_SOFTWARE_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
+
+#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
+
+struct clk_smd_rpm {
+ const int rpm_res_type;
+ const int rpm_key;
+ const int rpm_clk_id;
+ const int rpm_status_id;
+ const bool active_only;
+ bool enabled;
+ bool branch;
+ struct clk_smd_rpm *peer;
+ struct clk_hw hw;
+ unsigned long rate;
+ struct qcom_smd_rpm *rpm;
+};
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+struct rpm_smd_clk_desc {
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_smd_clk_lock);
+
+static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(INT_MAX),
+ };
+
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_smd_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_smd_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static void clk_smd_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+}
+
+static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(1),
+ };
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (sleep set) not enabled!\n");
+ return ret;
+ }
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (active set) not enabled!\n");
+ return ret;
+ }
+
+ pr_debug("%s: RPM clock scaling is enabled\n", __func__);
+ return 0;
+}
+
+static const struct clk_ops clk_smd_rpm_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .set_rate = clk_smd_rpm_set_rate,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_smd_rpm_branch_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+/* msm8916 */
+DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8916_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
+ .clks = msm8916_clks,
+ .num_clks = ARRAY_SIZE(msm8916_clks),
+};
+
+static const struct of_device_id rpm_smd_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
+
+static struct clk_hw *qcom_smdrpm_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct rpm_cc *rcc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= rcc->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
+static int rpm_smd_clk_probe(struct platform_device *pdev)
+{
+ struct rpm_cc *rcc;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_smd_rpm *rpm;
+ struct clk_smd_rpm **rpm_smd_clks;
+ const struct rpm_smd_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_smd_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ rcc->clks = rpm_smd_clks;
+ rcc->num_clks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i])
+ continue;
+
+ rpm_smd_clks[i]->rpm = rpm;
+
+ ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = clk_smd_rpm_enable_scaling(rpm);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i])
+ continue;
+
+ ret = devm_clk_hw_register(&pdev->dev, &rpm_smd_clks[i]->hw);
+ if (ret)
+ goto err;
+ }
+
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_smdrpm_clk_hw_get,
+ rcc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_smd_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_smd_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-smd-rpm",
+ .of_match_table = rpm_smd_clk_match_table,
+ },
+ .probe = rpm_smd_clk_probe,
+ .remove = rpm_smd_clk_remove,
+};
+
+static int __init rpm_smd_clk_init(void)
+{
+ return platform_driver_register(&rpm_smd_clk_driver);
+}
+core_initcall(rpm_smd_clk_init);
+
+static void __exit rpm_smd_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_smd_clk_driver);
+}
+module_exit(rpm_smd_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM over SMD Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-smd-rpm");
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index fffcbaf0fba7..cfab7b400381 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -46,6 +46,22 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
}
EXPORT_SYMBOL_GPL(qcom_find_freq);
+const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+ unsigned long rate)
+{
+ const struct freq_tbl *best = NULL;
+
+ for ( ; f->freq; f++) {
+ if (rate >= f->freq)
+ best = f;
+ else
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq_floor);
+
int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
{
int i, num_parents = clk_hw_get_num_parents(hw);
@@ -74,6 +90,27 @@ qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
}
EXPORT_SYMBOL_GPL(qcom_cc_map);
+void
+qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
+{
+ u32 val;
+ u32 mask;
+
+ /* De-assert reset to FSM */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0);
+
+ /* Program bias count and lock count */
+ val = bias_count << PLL_BIAS_COUNT_SHIFT |
+ lock_count << PLL_LOCK_COUNT_SHIFT;
+ mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
+ mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
+ regmap_update_bits(map, reg, mask, val);
+
+ /* Enable PLL FSM voting */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA);
+}
+EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
+
static void qcom_cc_del_clk_provider(void *data)
{
of_clk_del_provider(data);
@@ -153,15 +190,12 @@ int qcom_cc_register_board_clk(struct device *dev, const char *path,
const char *name, unsigned long rate)
{
bool add_factor = true;
- struct device_node *node;
-
- /* The RPM clock driver will add the factor clock if present */
- if (IS_ENABLED(CONFIG_QCOM_RPMCC)) {
- node = of_find_compatible_node(NULL, NULL, "qcom,rpmcc");
- if (of_device_is_available(node))
- add_factor = false;
- of_node_put(node);
- }
+
+ /*
+ * TODO: The RPM clock driver currently does not support the xo clock.
+ * When xo is added to the RPM clock driver, we should change this
+ * function to skip registration of xo factor clocks.
+ */
return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor);
}
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index ae9bdeb21f29..23c1927669ba 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -22,6 +22,13 @@ struct freq_tbl;
struct clk_hw;
struct parent_map;
+#define PLL_LOCK_COUNT_SHIFT 8
+#define PLL_LOCK_COUNT_MASK 0x3f
+#define PLL_BIAS_COUNT_SHIFT 14
+#define PLL_BIAS_COUNT_MASK 0x3f
+#define PLL_VOTE_FSM_ENA BIT(20)
+#define PLL_VOTE_FSM_RESET BIT(21)
+
struct qcom_cc_desc {
const struct regmap_config *config;
struct clk_regmap **clks;
@@ -34,6 +41,10 @@ struct qcom_cc_desc {
extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
unsigned long rate);
+extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+ unsigned long rate);
+extern void
+qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
u8 src);
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 070037a29ea5..486d9610355c 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -1142,7 +1142,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1156,7 +1156,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1170,7 +1170,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.name = "sdcc3_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1184,7 +1184,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.name = "sdcc4_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index b593065de8db..33d09138f5e5 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -185,8 +185,7 @@ static struct clk_branch gcc_audio_pwm_clk = {
};
static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_i2c_apps_clk[] = {
- F(19200000, P_XO, 1, 2, 5),
- F(24000000, P_XO, 1, 1, 2),
+ F(19050000, P_FEPLL200, 10.5, 1, 1),
{ }
};
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 52a7d3959875..28eb200d0f1e 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -2990,11 +2990,11 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
- ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000);
+ ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 25000000);
if (ret)
return ret;
- ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 27000000);
+ ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 25000000);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 5c4e193164d4..628e6ca276ec 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -1107,7 +1107,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1132,7 +1132,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 00915209e7c5..348e30da4f18 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -872,7 +872,7 @@ static struct clk_init_data sdcc1_apps_clk_src_init = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
};
static struct clk_rcg2 sdcc1_apps_clk_src = {
@@ -894,7 +894,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -908,7 +908,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.name = "sdcc3_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -922,7 +922,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.name = "sdcc4_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
new file mode 100644
index 000000000000..8afd8304a070
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -0,0 +1,2300 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8994.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL4,
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const char * const gcc_xo_gpll0[] = {
+ "xo",
+ "gpll0",
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 },
+};
+
+static const char * const gcc_xo_gpll0_gpll4[] = {
+ "xo",
+ "gpll0",
+ "gpll4",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "xo",
+ .parent_names = (const char *[]) { "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x00000,
+ .clkr = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll0_early",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x00000,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll0",
+ .parent_names = (const char *[]) { "gpll0_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4_early = {
+ .offset = 0x1dc0,
+ .clkr = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll4_early",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x1dc0,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll4",
+ .parent_names = (const char *[]) { "gpll4_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(171430000, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+ .cmd_rcgr = 0x1d68,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "ufs_axi_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(125000000, P_GPLL0, 1, 5, 24),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0x03d4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0660,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_blspqup_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x064c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0760,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x074c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0860,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x084c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x08e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x08cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0, 1, 96, 15625),
+ F(7372800, P_GPLL0, 1, 192, 15625),
+ F(14745600, P_GPLL0, 1, 384, 15625),
+ F(16000000, P_GPLL0, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_GPLL0, 1, 4, 75),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(46400000, P_GPLL0, 1, 29, 375),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(51200000, P_GPLL0, 1, 32, 375),
+ F(56000000, P_GPLL0, 1, 7, 75),
+ F(58982400, P_GPLL0, 1, 1536, 15625),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(63160000, P_GPLL0, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x068c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x070c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x078c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x080c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x088c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x090c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x09a0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x098c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0a20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0aa0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0b20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0ba0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0c20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0c0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x09cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x0a4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x0acc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x0b4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x0bcc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x0c4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x1904,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gp2_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x1944,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gp2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gp3_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x1984,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gp3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pcie_0_aux_clk_src[] = {
+ F(1011000, P_XO, 1, 1, 19),
+ { }
+};
+
+static struct clk_rcg2 pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x1b00,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_0_aux_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pcie_pipe_clk_src[] = {
+ F(125000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcie_0_pipe_clk_src = {
+ .cmd_rcgr = 0x1adc,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_pipe_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_0_pipe_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pcie_1_aux_clk_src[] = {
+ F(1011000, P_XO, 1, 1, 19),
+ { }
+};
+
+static struct clk_rcg2 pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x1b80,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_1_aux_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pcie_1_pipe_clk_src = {
+ .cmd_rcgr = 0x1b5c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_pipe_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_1_pipe_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x0cd0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(192000000, P_GPLL4, 2, 0, 0),
+ F(384000000, P_GPLL4, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x04d0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x0510,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+ .cmd_rcgr = 0x0550,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x0590,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct freq_tbl ftbl_tsif_ref_clk_src[] = {
+ F(105500, P_XO, 1, 1, 182),
+ { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+ .cmd_rcgr = 0x0d90,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "tsif_ref_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x03e8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1414,
+ .hid_width = 5,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x0490,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb_hs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb_hs_system_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x05c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0648,
+ .clkr = {
+ .enable_reg = 0x0648,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x0644,
+ .clkr = {
+ .enable_reg = 0x0644,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x06c8,
+ .clkr = {
+ .enable_reg = 0x06c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x06c4,
+ .clkr = {
+ .enable_reg = 0x06c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0748,
+ .clkr = {
+ .enable_reg = 0x0748,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0744,
+ .clkr = {
+ .enable_reg = 0x0744,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x07c8,
+ .clkr = {
+ .enable_reg = 0x07c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x07c4,
+ .clkr = {
+ .enable_reg = 0x07c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0848,
+ .clkr = {
+ .enable_reg = 0x0848,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0844,
+ .clkr = {
+ .enable_reg = 0x0844,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x08c8,
+ .clkr = {
+ .enable_reg = 0x08c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x08c4,
+ .clkr = {
+ .enable_reg = 0x08c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0684,
+ .clkr = {
+ .enable_reg = 0x0684,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0704,
+ .clkr = {
+ .enable_reg = 0x0704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x0784,
+ .clkr = {
+ .enable_reg = 0x0784,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x0804,
+ .clkr = {
+ .enable_reg = 0x0804,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x0884,
+ .clkr = {
+ .enable_reg = 0x0884,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x0904,
+ .clkr = {
+ .enable_reg = 0x0904,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x0944,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0988,
+ .clkr = {
+ .enable_reg = 0x0988,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x0984,
+ .clkr = {
+ .enable_reg = 0x0984,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x0a08,
+ .clkr = {
+ .enable_reg = 0x0a08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x0a04,
+ .clkr = {
+ .enable_reg = 0x0a04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0a88,
+ .clkr = {
+ .enable_reg = 0x0a88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x0a84,
+ .clkr = {
+ .enable_reg = 0x0a84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x0b08,
+ .clkr = {
+ .enable_reg = 0x0b08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x0b04,
+ .clkr = {
+ .enable_reg = 0x0b04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0b88,
+ .clkr = {
+ .enable_reg = 0x0b88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+ .halt_reg = 0x0b84,
+ .clkr = {
+ .enable_reg = 0x0b84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+ .halt_reg = 0x0c08,
+ .clkr = {
+ .enable_reg = 0x0c08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+ .halt_reg = 0x0c04,
+ .clkr = {
+ .enable_reg = 0x0c04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x09c4,
+ .clkr = {
+ .enable_reg = 0x09c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x0a44,
+ .clkr = {
+ .enable_reg = 0x0a44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+ .halt_reg = 0x0ac4,
+ .clkr = {
+ .enable_reg = 0x0ac4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart3_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart4_apps_clk = {
+ .halt_reg = 0x0b44,
+ .clkr = {
+ .enable_reg = 0x0b44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart4_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart5_apps_clk = {
+ .halt_reg = 0x0bc4,
+ .clkr = {
+ .enable_reg = 0x0bc4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart5_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart6_apps_clk = {
+ .halt_reg = 0x0c44,
+ .clkr = {
+ .enable_reg = 0x0c44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart6_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x1900,
+ .clkr = {
+ .enable_reg = 0x1900,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]) {
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x1940,
+ .clkr = {
+ .enable_reg = 0x1940,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]) {
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x1980,
+ .clkr = {
+ .enable_reg = 0x1980,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]) {
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x1ad4,
+ .clkr = {
+ .enable_reg = 0x1ad4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]) {
+ "pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x1ad8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1ad8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_names = (const char *[]) {
+ "pcie_0_pipe_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x1b54,
+ .clkr = {
+ .enable_reg = 0x1b54,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]) {
+ "pcie_1_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x1b58,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1b58,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_names = (const char *[]) {
+ "pcie_1_pipe_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x0ccc,
+ .clkr = {
+ .enable_reg = 0x0ccc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]) {
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x04c4,
+ .clkr = {
+ .enable_reg = 0x04c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x0504,
+ .clkr = {
+ .enable_reg = 0x0504,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+ .halt_reg = 0x0544,
+ .clkr = {
+ .enable_reg = 0x0544,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc3_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x0584,
+ .clkr = {
+ .enable_reg = 0x0584,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
+ .halt_reg = 0x1d7c,
+ .clkr = {
+ .enable_reg = 0x1d7c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sys_noc_ufs_axi_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
+ .halt_reg = 0x03fc,
+ .clkr = {
+ .enable_reg = 0x03fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sys_noc_usb3_axi_clk",
+ .parent_names = (const char *[]) {
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x0d88,
+ .clkr = {
+ .enable_reg = 0x0d88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]) {
+ "tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+ .halt_reg = 0x1d48,
+ .clkr = {
+ .enable_reg = 0x1d48,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_axi_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_cfg_clk = {
+ .halt_reg = 0x1d54,
+ .clkr = {
+ .enable_reg = 0x1d54,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_rx_cfg_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_cfg_clk = {
+ .halt_reg = 0x1d50,
+ .clkr = {
+ .enable_reg = 0x1d50,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_tx_cfg_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x03c8,
+ .clkr = {
+ .enable_reg = 0x03c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]) {
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x03d0,
+ .clkr = {
+ .enable_reg = 0x03d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]) {
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x1408,
+ .clkr = {
+ .enable_reg = 0x1408,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]) {
+ "usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x0484,
+ .clkr = {
+ .enable_reg = 0x0484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb_hs_system_clk",
+ .parent_names = (const char *[]) {
+ "usb_hs_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8994_clocks[] = {
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+ [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+ [BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr,
+ [BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr,
+ [BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [PCIE_0_AUX_CLK_SRC] = &pcie_0_aux_clk_src.clkr,
+ [PCIE_0_PIPE_CLK_SRC] = &pcie_0_pipe_clk_src.clkr,
+ [PCIE_1_AUX_CLK_SRC] = &pcie_1_aux_clk_src.clkr,
+ [PCIE_1_PIPE_CLK_SRC] = &pcie_1_pipe_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+ [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+ [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+ [GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr,
+ [GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr,
+ [GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr,
+ [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+ [GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr,
+ [GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+};
+
+static const struct regmap_config gcc_msm8994_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x2000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8994_desc = {
+ .config = &gcc_msm8994_regmap_config,
+ .clks = gcc_msm8994_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8994_clocks),
+};
+
+static const struct of_device_id gcc_msm8994_match_table[] = {
+ { .compatible = "qcom,gcc-msm8994" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8994_match_table);
+
+static int gcc_msm8994_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+
+ clk = devm_clk_register(dev, &xo.hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return qcom_cc_probe(pdev, &gcc_msm8994_desc);
+}
+
+static struct platform_driver gcc_msm8994_driver = {
+ .probe = gcc_msm8994_probe,
+ .driver = {
+ .name = "gcc-msm8994",
+ .of_match_table = gcc_msm8994_match_table,
+ },
+};
+
+static int __init gcc_msm8994_init(void)
+{
+ return platform_driver_register(&gcc_msm8994_driver);
+}
+core_initcall(gcc_msm8994_init);
+
+static void __exit gcc_msm8994_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8994_driver);
+}
+module_exit(gcc_msm8994_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8994 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8994");
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index fe03e6fbc7df..4b1fc1730d29 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -460,14 +460,22 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
+static struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.cmd_rcgr = 0x13024,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_gpll0_early_div_map,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_ice_core_clk_src",
.parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
@@ -497,7 +505,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -511,7 +519,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.name = "sdcc3_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -535,7 +543,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.name = "sdcc4_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1230,10 +1238,18 @@ static struct clk_rcg2 ufs_axi_clk_src = {
},
};
+static const struct freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 ufs_ice_core_clk_src = {
.cmd_rcgr = 0x76014,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_ufs_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_ice_core_clk_src",
.parent_names = gcc_xo_gpll0,
@@ -1242,10 +1258,19 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
},
};
+static const struct freq_tbl ftbl_qspi_ser_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(256000000, P_GPLL4, 1.5, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 qspi_ser_clk_src = {
.cmd_rcgr = 0x8b00c,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map,
+ .freq_tbl = ftbl_qspi_ser_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "qspi_ser_clk_src",
.parent_names = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index f12d7b2bddd7..288186cce0ae 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -30,6 +30,7 @@
#define SW_OVERRIDE_MASK BIT(2)
#define HW_CONTROL_MASK BIT(1)
#define SW_COLLAPSE_MASK BIT(0)
+#define GMEM_CLAMP_IO_MASK BIT(0)
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
#define EN_REST_WAIT_VAL (0x2 << 20)
@@ -55,6 +56,13 @@ static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg)
return !!(val & PWR_ON_MASK);
}
+static int gdsc_hwctrl(struct gdsc *sc, bool en)
+{
+ u32 val = en ? HW_CONTROL_MASK : 0;
+
+ return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
+}
+
static int gdsc_toggle_logic(struct gdsc *sc, bool en)
{
int ret;
@@ -140,6 +148,18 @@ static inline void gdsc_clear_mem_on(struct gdsc *sc)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
}
+static inline void gdsc_deassert_clamp_io(struct gdsc *sc)
+{
+ regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
+ GMEM_CLAMP_IO_MASK, 0);
+}
+
+static inline void gdsc_assert_clamp_io(struct gdsc *sc)
+{
+ regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
+ GMEM_CLAMP_IO_MASK, 1);
+}
+
static int gdsc_enable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
@@ -148,6 +168,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
if (sc->pwrsts == PWRSTS_ON)
return gdsc_deassert_reset(sc);
+ if (sc->flags & CLAMP_IO)
+ gdsc_deassert_clamp_io(sc);
+
ret = gdsc_toggle_logic(sc, true);
if (ret)
return ret;
@@ -164,20 +187,39 @@ static int gdsc_enable(struct generic_pm_domain *domain)
*/
udelay(1);
+ /* Turn on HW trigger mode if supported */
+ if (sc->flags & HW_CTRL)
+ return gdsc_hwctrl(sc, true);
+
return 0;
}
static int gdsc_disable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
if (sc->pwrsts == PWRSTS_ON)
return gdsc_assert_reset(sc);
+ /* Turn off HW trigger mode if supported */
+ if (sc->flags & HW_CTRL) {
+ ret = gdsc_hwctrl(sc, false);
+ if (ret < 0)
+ return ret;
+ }
+
if (sc->pwrsts & PWRSTS_OFF)
gdsc_clear_mem_on(sc);
- return gdsc_toggle_logic(sc, false);
+ ret = gdsc_toggle_logic(sc, false);
+ if (ret)
+ return ret;
+
+ if (sc->flags & CLAMP_IO)
+ gdsc_assert_clamp_io(sc);
+
+ return 0;
}
static int gdsc_init(struct gdsc *sc)
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 3bf497c36bdf..39648348e5ec 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -39,6 +39,7 @@ struct gdsc {
struct regmap *regmap;
unsigned int gdscr;
unsigned int gds_hw_ctrl;
+ unsigned int clamp_io_ctrl;
unsigned int *cxcs;
unsigned int cxc_count;
const u8 pwrsts;
@@ -50,6 +51,8 @@ struct gdsc {
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
const u8 flags;
#define VOTABLE BIT(0)
+#define CLAMP_IO BIT(1)
+#define HW_CTRL BIT(2)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index db3998e5e2d8..977e98eadbeb 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -443,7 +443,7 @@ static int lcc_ipq806x_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
/* Configure the rate of PLL4 if the bootloader hasn't already */
- val = regmap_read(regmap, 0x0, &val);
+ regmap_read(regmap, 0x0, &val);
if (!val)
clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
/* Enable PLL4 source on the LPASS Primary PLL Mux */
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index ca97e1151797..9b97246287a7 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2945,6 +2945,7 @@ static struct gdsc venus_core0_gdsc = {
.name = "venus_core0",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc venus_core1_gdsc = {
@@ -2955,6 +2956,7 @@ static struct gdsc venus_core1_gdsc = {
.name = "venus_core1",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc camss_gdsc = {
@@ -3034,6 +3036,28 @@ static struct gdsc mdss_gdsc = {
.pwrsts = PWRSTS_OFF_ON,
};
+static struct gdsc gpu_gdsc = {
+ .gdscr = 0x4034,
+ .gds_hw_ctrl = 0x4038,
+ .pd = {
+ .name = "gpu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x4024,
+ .clamp_io_ctrl = 0x4300,
+ .cxcs = (unsigned int []){ 0x4028 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "gpu_gx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMPLL0_EARLY] = &mmpll0_early.clkr,
[MMPLL0_PLL] = &mmpll0.clkr,
@@ -3223,6 +3247,8 @@ static struct gdsc *mmcc_msm8996_gdscs[] = {
[CPP_GDSC] = &cpp_gdsc,
[FD_GDSC] = &fd_gdsc,
[MDSS_GDSC] = &mdss_gdsc,
+ [GPU_GDSC] = &gpu_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
};
static const struct qcom_reset_map mmcc_msm8996_resets[] = {
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 41a12d376799..2586dfa0026b 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -1,5 +1,7 @@
config CLK_RENESAS_CPG_MSSR
bool
+ default y if ARCH_R8A7743
+ default y if ARCH_R8A7745
default y if ARCH_R8A7795
default y if ARCH_R8A7796
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 90dd0db7d9c6..1072f7653c0c 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -2,6 +2,8 @@ obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o clk-div6.o
obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7743) += r8a7743-cpg-mssr.o rcar-gen2-cpg.o
+obj-$(CONFIG_ARCH_R8A7745) += r8a7745-cpg-mssr.o rcar-gen2-cpg.o
obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o clk-div6.o
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 40e3a501a50e..886a8380e912 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -12,6 +12,7 @@
#include <linux/clk/renesas.h>
#include <linux/of_address.h>
#include <linux/slab.h>
+#include <linux/soc/renesas/rcar-rst.h>
struct r8a7778_cpg {
struct clk_onecell_data data;
@@ -83,6 +84,18 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
struct clk **clks;
unsigned int i;
int num_clks;
+ u32 mode;
+
+ if (rcar_rst_read_mode_pins(&mode))
+ return;
+
+ BUG_ON(!(mode & BIT(19)));
+
+ cpg_mode_rates = (!!(mode & BIT(18)) << 2) |
+ (!!(mode & BIT(12)) << 1) |
+ (!!(mode & BIT(11)));
+ cpg_mode_divs = (!!(mode & BIT(2)) << 1) |
+ (!!(mode & BIT(1)));
num_clks = of_property_count_strings(np, "clock-output-names");
if (num_clks < 0) {
@@ -130,16 +143,3 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks",
r8a7778_cpg_clocks_init);
-
-void __init r8a7778_clocks_init(u32 mode)
-{
- BUG_ON(!(mode & BIT(19)));
-
- cpg_mode_rates = (!!(mode & BIT(18)) << 2) |
- (!!(mode & BIT(12)) << 1) |
- (!!(mode & BIT(11)));
- cpg_mode_divs = (!!(mode & BIT(2)) << 1) |
- (!!(mode & BIT(1)));
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index cf2a37df03b1..27fbfafaf2cd 100644
--- a/drivers/clk/renesas/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -18,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7779-clock.h>
@@ -88,8 +89,6 @@ static const unsigned int cpg_plla_mult[4] __initconst = { 42, 48, 56, 64 };
* Initialization
*/
-static u32 cpg_mode __initdata;
-
static struct clk * __init
r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
const struct cpg_clk_config *config,
@@ -127,6 +126,10 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
struct clk **clks;
unsigned int i, plla_mult;
int num_clks;
+ u32 mode;
+
+ if (rcar_rst_read_mode_pins(&mode))
+ return;
num_clks = of_property_count_strings(np, "clock-output-names");
if (num_clks < 0) {
@@ -148,8 +151,8 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(cpg_mode)];
- plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(cpg_mode)];
+ config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(mode)];
+ plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(mode)];
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -173,10 +176,3 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
}
CLK_OF_DECLARE(r8a7779_cpg_clks, "renesas,r8a7779-cpg-clocks",
r8a7779_cpg_clocks_init);
-
-void __init r8a7779_clocks_init(u32 mode)
-{
- cpg_mode = mode;
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 00e6aba4b9c0..f39519edc645 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/soc/renesas/rcar-rst.h>
struct rcar_gen2_cpg {
struct clk_onecell_data data;
@@ -364,6 +365,23 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
4, 0, table, &cpg->lock);
}
+/*
+ * Reset register definitions.
+ */
+#define MODEMR 0xe6160060
+
+static u32 __init rcar_gen2_read_mode_pins(void)
+{
+ void __iomem *modemr = ioremap_nocache(MODEMR, 4);
+ u32 mode;
+
+ BUG_ON(!modemr);
+ mode = ioread32(modemr);
+ iounmap(modemr);
+
+ return mode;
+}
+
static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
{
const struct cpg_pll_config *config;
@@ -372,6 +390,13 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
unsigned int i;
int num_clks;
+ if (rcar_rst_read_mode_pins(&cpg_mode)) {
+ /* Backward-compatibility with old DT */
+ pr_warn("%s: failed to obtain mode pins from RST\n",
+ np->full_name);
+ cpg_mode = rcar_gen2_read_mode_pins();
+ }
+
num_clks = of_property_count_strings(np, "clock-output-names");
if (num_clks < 0) {
pr_err("%s: failed to count clocks\n", __func__);
@@ -420,10 +445,3 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
}
CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
rcar_gen2_cpg_clocks_init);
-
-void __init rcar_gen2_clocks_init(u32 mode)
-{
- cpg_mode = mode;
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c
new file mode 100644
index 000000000000..6dc0b3082aa6
--- /dev/null
+++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c
@@ -0,0 +1,270 @@
+/*
+ * r8a7743 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation; of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7743-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7743_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7743_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A7743_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
+ DEF_BASE("lb", R8A7743_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("sdh", R8A7743_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7743_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("qspi", R8A7743_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7743_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("zg", R8A7743_CLK_ZG, CLK_PLL1, 3, 1),
+ DEF_FIXED("zx", R8A7743_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7743_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7743_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("b", R8A7743_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7743_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7743_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7743_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7743_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7743_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7743_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7743_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cp", R8A7743_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7743_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7743_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7743_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7743_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7743_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+};
+
+static const struct mssr_mod_clk r8a7743_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7743_CLK_MP),
+ DEF_MOD("vcp0", 101, R8A7743_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7743_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7743_CLK_P),
+ DEF_MOD("3dg", 112, R8A7743_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7743_CLK_ZS),
+ DEF_MOD("fdp1-1", 118, R8A7743_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7743_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7743_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7743_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7743_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7743_CLK_CP),
+ DEF_MOD("vsp1du1", 127, R8A7743_CLK_ZS),
+ DEF_MOD("vsp1du0", 128, R8A7743_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7743_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7743_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7743_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7743_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7743_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7743_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7743_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7743_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7743_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7743_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7743_CLK_ZS),
+ DEF_MOD("tpu0", 304, R8A7743_CLK_CP),
+ DEF_MOD("sdhi3", 311, R8A7743_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7743_CLK_SD2),
+ DEF_MOD("sdhi0", 314, R8A7743_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7743_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7743_CLK_HP),
+ DEF_MOD("pciec", 319, R8A7743_CLK_MP),
+ DEF_MOD("iic1", 323, R8A7743_CLK_HP),
+ DEF_MOD("usb3.0", 328, R8A7743_CLK_MP),
+ DEF_MOD("cmt1", 329, R8A7743_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7743_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7743_CLK_HP),
+ DEF_MOD("irqc", 407, R8A7743_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7743_CLK_ZS),
+ DEF_MOD("audio-dmac1", 501, R8A7743_CLK_HP),
+ DEF_MOD("audio-dmac0", 502, R8A7743_CLK_HP),
+ DEF_MOD("thermal", 522, CLK_EXTAL),
+ DEF_MOD("pwm", 523, R8A7743_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7743_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7743_CLK_HP),
+ DEF_MOD("hscif2", 713, R8A7743_CLK_ZS),
+ DEF_MOD("scif5", 714, R8A7743_CLK_P),
+ DEF_MOD("scif4", 715, R8A7743_CLK_P),
+ DEF_MOD("hscif1", 716, R8A7743_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7743_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A7743_CLK_P),
+ DEF_MOD("scif2", 719, R8A7743_CLK_P),
+ DEF_MOD("scif1", 720, R8A7743_CLK_P),
+ DEF_MOD("scif0", 721, R8A7743_CLK_P),
+ DEF_MOD("du1", 723, R8A7743_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7743_CLK_ZX),
+ DEF_MOD("lvds0", 726, R8A7743_CLK_ZX),
+ DEF_MOD("ipmmu-sgx", 800, R8A7743_CLK_ZX),
+ DEF_MOD("vin2", 809, R8A7743_CLK_ZG),
+ DEF_MOD("vin1", 810, R8A7743_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7743_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7743_CLK_HP),
+ DEF_MOD("ether", 813, R8A7743_CLK_P),
+ DEF_MOD("sata1", 814, R8A7743_CLK_ZS),
+ DEF_MOD("sata0", 815, R8A7743_CLK_ZS),
+ DEF_MOD("gpio7", 904, R8A7743_CLK_CP),
+ DEF_MOD("gpio6", 905, R8A7743_CLK_CP),
+ DEF_MOD("gpio5", 907, R8A7743_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7743_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7743_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7743_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7743_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7743_CLK_CP),
+ DEF_MOD("can1", 915, R8A7743_CLK_P),
+ DEF_MOD("can0", 916, R8A7743_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7743_CLK_QSPI),
+ DEF_MOD("i2c5", 925, R8A7743_CLK_HP),
+ DEF_MOD("iicdvfs", 926, R8A7743_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A7743_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A7743_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7743_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7743_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7743_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7743_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7743_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+ DEF_MOD("scifa3", 1106, R8A7743_CLK_MP),
+ DEF_MOD("scifa4", 1107, R8A7743_CLK_MP),
+ DEF_MOD("scifa5", 1108, R8A7743_CLK_MP),
+};
+
+static const unsigned int r8a7743_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x172/2 x208/2 x106
+ * 0 0 1 15 x172/2 x208/2 x88
+ * 0 1 0 20 x130/2 x156/2 x80
+ * 0 1 1 20 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.5a indicates VCO output (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult PLL3 mult */
+ { 1, 208, 106, },
+ { 1, 208, 88, },
+ { 1, 156, 80, },
+ { 1, 156, 66, },
+ { 2, 240, 122, },
+ { 2, 240, 102, },
+ { 2, 208, 106, },
+ { 2, 208, 88, },
+};
+
+static int __init r8a7743_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7743_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7743_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7743_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7743_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7743_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7743_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7743_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7743_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
new file mode 100644
index 000000000000..2f15ba786c3b
--- /dev/null
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -0,0 +1,259 @@
+/*
+ * r8a7745 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation; of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7745-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7745_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7745_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("lb", R8A7745_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("sdh", R8A7745_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7745_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("qspi", R8A7745_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7745_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("z2", R8A7745_CLK_Z2, CLK_PLL0, 1, 1),
+ DEF_FIXED("zg", R8A7745_CLK_ZG, CLK_PLL1, 6, 1),
+ DEF_FIXED("zx", R8A7745_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7745_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7745_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("b", R8A7745_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7745_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7745_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cp", R8A7745_CLK_CP, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7745_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7745_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7745_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7745_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7745_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cpex", R8A7745_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7745_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7745_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7745_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7745_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7745_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+};
+
+static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7745_CLK_MP),
+ DEF_MOD("vcp0", 101, R8A7745_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7745_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7745_CLK_P),
+ DEF_MOD("3dg", 112, R8A7745_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7745_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7745_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7745_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7745_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7745_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7745_CLK_CP),
+ DEF_MOD("vsp1du0", 128, R8A7745_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7745_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7745_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7745_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7745_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7745_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7745_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7745_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7745_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7745_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7745_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7745_CLK_ZS),
+ DEF_MOD("tpu0", 304, R8A7745_CLK_CP),
+ DEF_MOD("sdhi3", 311, R8A7745_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7745_CLK_SD2),
+ DEF_MOD("sdhi0", 314, R8A7745_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7745_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7745_CLK_HP),
+ DEF_MOD("iic1", 323, R8A7745_CLK_HP),
+ DEF_MOD("cmt1", 329, R8A7745_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7745_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7745_CLK_HP),
+ DEF_MOD("irqc", 407, R8A7745_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7745_CLK_ZS),
+ DEF_MOD("audio-dmac0", 502, R8A7745_CLK_HP),
+ DEF_MOD("pwm", 523, R8A7745_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7745_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7745_CLK_HP),
+ DEF_MOD("hscif2", 713, R8A7745_CLK_ZS),
+ DEF_MOD("scif5", 714, R8A7745_CLK_P),
+ DEF_MOD("scif4", 715, R8A7745_CLK_P),
+ DEF_MOD("hscif1", 716, R8A7745_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7745_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A7745_CLK_P),
+ DEF_MOD("scif2", 719, R8A7745_CLK_P),
+ DEF_MOD("scif1", 720, R8A7745_CLK_P),
+ DEF_MOD("scif0", 721, R8A7745_CLK_P),
+ DEF_MOD("du0", 724, R8A7745_CLK_ZX),
+ DEF_MOD("ipmmu-sgx", 800, R8A7745_CLK_ZX),
+ DEF_MOD("vin1", 810, R8A7745_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7745_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7745_CLK_HP),
+ DEF_MOD("ether", 813, R8A7745_CLK_P),
+ DEF_MOD("gpio6", 905, R8A7745_CLK_CP),
+ DEF_MOD("gpio5", 907, R8A7745_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7745_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7745_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7745_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7745_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7745_CLK_CP),
+ DEF_MOD("can1", 915, R8A7745_CLK_P),
+ DEF_MOD("can0", 916, R8A7745_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7745_CLK_QSPI),
+ DEF_MOD("i2c5", 925, R8A7745_CLK_HP),
+ DEF_MOD("i2c4", 927, R8A7745_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A7745_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7745_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7745_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7745_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7745_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7745_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+ DEF_MOD("scifa3", 1106, R8A7745_CLK_MP),
+ DEF_MOD("scifa4", 1107, R8A7745_CLK_MP),
+ DEF_MOD("scifa5", 1108, R8A7745_CLK_MP),
+};
+
+static const unsigned int r8a7745_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *2
+ *---------------------------------------------------
+ * 0 0 0 15 x200/3 x208/2 x106
+ * 0 0 1 15 x200/3 x208/2 x88
+ * 0 1 0 20 x150/3 x156/2 x80
+ * 0 1 1 20 x150/3 x156/2 x66
+ * 1 0 0 26 / 2 x230/3 x240/2 x122
+ * 1 0 1 26 / 2 x230/3 x240/2 x102
+ * 1 1 0 30 / 2 x200/3 x208/2 x106
+ * 1 1 1 30 / 2 x200/3 x208/2 x88
+ *
+ * *1 : Table 7.5b indicates VCO output (PLL0 = VCO/3)
+ * *2 : Table 7.5b indicates VCO output (PLL1 = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult PLL3 mult PLL0 mult */
+ { 1, 208, 106, 200 },
+ { 1, 208, 88, 200 },
+ { 1, 156, 80, 150 },
+ { 1, 156, 66, 150 },
+ { 2, 240, 122, 230 },
+ { 2, 240, 102, 230 },
+ { 2, 208, 106, 200 },
+ { 2, 208, 88, 200 },
+};
+
+static int __init r8a7745_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 3, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7745_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7745_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7745_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7745_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7745_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7745_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7745_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7745_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index f255e451e8ca..50698a7d9074 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
@@ -97,7 +98,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014),
- DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV2, 0x250),
+ DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A7795_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
@@ -311,7 +312,12 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
static int __init r8a7795_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
- u32 cpg_mode = rcar_gen3_read_mode_pins();
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
if (!cpg_pll_config->extal_div) {
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index eb347ed265f2..7d298c57a3e0 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
@@ -102,6 +103,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_DIV6P1("csi0", R8A7796_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+
DEF_DIV6_RO("osc", R8A7796_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
@@ -109,6 +112,14 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+ DEF_MOD("scif5", 202, R8A7796_CLK_S3D4),
+ DEF_MOD("scif4", 203, R8A7796_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A7796_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A7796_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A7796_CLK_S3D4),
+ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
+ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
+ DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A7796_CLK_R),
DEF_MOD("cmt2", 301, R8A7796_CLK_R),
DEF_MOD("cmt1", 302, R8A7796_CLK_R),
@@ -120,7 +131,47 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("sdif0", 314, R8A7796_CLK_SD0),
DEF_MOD("rwdt0", 402, R8A7796_CLK_R),
DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1),
+ DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
+ DEF_MOD("drif4", 511, R8A7796_CLK_S3D2),
+ DEF_MOD("drif3", 512, R8A7796_CLK_S3D2),
+ DEF_MOD("drif2", 513, R8A7796_CLK_S3D2),
+ DEF_MOD("drif1", 514, R8A7796_CLK_S3D2),
+ DEF_MOD("drif0", 515, R8A7796_CLK_S3D2),
+ DEF_MOD("hscif4", 516, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A7796_CLK_S3D1),
DEF_MOD("thermal", 522, R8A7796_CLK_CP),
+ DEF_MOD("fcpvd2", 601, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpvd1", 602, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpvd0", 603, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpvb0", 607, R8A7796_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A7796_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A7796_CLK_S0D1),
+ DEF_MOD("fcpci0", 617, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpcs", 619, R8A7796_CLK_S0D2),
+ DEF_MOD("vspd2", 621, R8A7796_CLK_S0D2),
+ DEF_MOD("vspd1", 622, R8A7796_CLK_S0D2),
+ DEF_MOD("vspd0", 623, R8A7796_CLK_S0D2),
+ DEF_MOD("vspb", 626, R8A7796_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A7796_CLK_S0D1),
+ DEF_MOD("csi20", 714, R8A7796_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A7796_CLK_CSI0),
+ DEF_MOD("du2", 722, R8A7796_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A7796_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A7796_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A7796_CLK_S2D1),
+ DEF_MOD("vin7", 804, R8A7796_CLK_S0D2),
+ DEF_MOD("vin6", 805, R8A7796_CLK_S0D2),
+ DEF_MOD("vin5", 806, R8A7796_CLK_S0D2),
+ DEF_MOD("vin4", 807, R8A7796_CLK_S0D2),
+ DEF_MOD("vin3", 808, R8A7796_CLK_S0D2),
+ DEF_MOD("vin2", 809, R8A7796_CLK_S0D2),
+ DEF_MOD("vin1", 810, R8A7796_CLK_S0D2),
+ DEF_MOD("vin0", 811, R8A7796_CLK_S0D2),
DEF_MOD("etheravb", 812, R8A7796_CLK_S0D6),
DEF_MOD("gpio7", 905, R8A7796_CLK_S3D4),
DEF_MOD("gpio6", 906, R8A7796_CLK_S3D4),
@@ -130,6 +181,13 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A7796_CLK_S3D4),
DEF_MOD("gpio1", 911, R8A7796_CLK_S3D4),
DEF_MOD("gpio0", 912, R8A7796_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c4", 927, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A7796_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A7796_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A7796_CLK_S3D2),
};
static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
@@ -190,7 +248,12 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
static int __init r8a7796_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
- u32 cpg_mode = rcar_gen3_read_mode_pins();
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
if (!cpg_pll_config->extal_div) {
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
new file mode 100644
index 000000000000..123b1e622179
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -0,0 +1,371 @@
+/*
+ * R-Car Gen2 Clock Pulse Generator
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+#define CPG_FRQCRB 0x0004
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_SDCKCR 0x0074
+#define CPG_PLL0CR 0x00d8
+#define CPG_PLL0CR_STC_SHIFT 24
+#define CPG_PLL0CR_STC_MASK (0x7f << CPG_PLL0CR_STC_SHIFT)
+#define CPG_FRQCRC 0x00e0
+#define CPG_FRQCRC_ZFC_SHIFT 8
+#define CPG_FRQCRC_ZFC_MASK (0x1f << CPG_FRQCRC_ZFC_SHIFT)
+#define CPG_ADSPCKCR 0x025c
+#define CPG_RCANCKCR 0x0270
+
+static spinlock_t cpg_lock;
+
+/*
+ * Z Clock
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = parent->rate * mult / 32
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int val;
+
+ val = (readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK) >> CPG_FRQCRC_ZFC_SHIFT;
+ mult = 32 - val;
+
+ return div_u64((u64)parent_rate * mult, 32);
+}
+
+static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long prate = *parent_rate;
+ unsigned int mult;
+
+ if (!prate)
+ prate = 1;
+
+ mult = div_u64((u64)rate * 32, prate);
+ mult = clamp(mult, 1U, 32U);
+
+ return *parent_rate / 32 * mult;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val, kick;
+ unsigned int i;
+
+ mult = div_u64((u64)rate * 32, parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ val = readl(zclk->reg);
+ val &= ~CPG_FRQCRC_ZFC_MASK;
+ val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
+ writel(val, zclk->reg);
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent on external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .round_rate = cpg_z_clk_round_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_init_data init;
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = base + CPG_FRQCRC;
+ zclk->kick_reg = base + CPG_FRQCRB;
+ zclk->hw.init = &init;
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk))
+ kfree(zclk);
+
+ return clk;
+}
+
+static struct clk * __init cpg_rcan_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_fixed_factor *fixed;
+ struct clk_gate *gate;
+ struct clk *clk;
+
+ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return ERR_PTR(-ENOMEM);
+
+ fixed->mult = 1;
+ fixed->div = 6;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(fixed);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gate->reg = base + CPG_RCANCKCR;
+ gate->bit_idx = 8;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
+ gate->lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &fixed->hw, &clk_fixed_factor_ops,
+ &gate->hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk)) {
+ kfree(gate);
+ kfree(fixed);
+ }
+
+ return clk;
+}
+
+/* ADSP divisors */
+static const struct clk_div_table cpg_adsp_div_table[] = {
+ { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 },
+ { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
+ { 10, 36 }, { 11, 48 }, { 0, 0 },
+};
+
+static struct clk * __init cpg_adsp_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_divider *div;
+ struct clk_gate *gate;
+ struct clk *clk;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ div->reg = base + CPG_ADSPCKCR;
+ div->width = 4;
+ div->table = cpg_adsp_div_table;
+ div->lock = &cpg_lock;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(div);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gate->reg = base + CPG_ADSPCKCR;
+ gate->bit_idx = 8;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
+ gate->lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &div->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk)) {
+ kfree(gate);
+ kfree(div);
+ }
+
+ return clk;
+}
+
+/* SDHI divisors */
+static const struct clk_div_table cpg_sdh_div_table[] = {
+ { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 },
+};
+
+static const struct clk_div_table cpg_sd01_div_table[] = {
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 12, 10 },
+ { 0, 0 },
+};
+
+static const struct rcar_gen2_cpg_pll_config *cpg_pll_config __initdata;
+static unsigned int cpg_pll0_div __initdata;
+static u32 cpg_mode __initdata;
+
+struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ const struct cpg_mssr_info *info,
+ struct clk **clks,
+ void __iomem *base)
+{
+ const struct clk_div_table *table = NULL;
+ const struct clk *parent;
+ const char *parent_name;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+ unsigned int shift;
+
+ parent = clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ switch (core->type) {
+ /* R-Car Gen2 */
+ case CLK_TYPE_GEN2_MAIN:
+ div = cpg_pll_config->extal_div;
+ break;
+
+ case CLK_TYPE_GEN2_PLL0:
+ /*
+ * PLL0 is a configurable multiplier clock except on R-Car
+ * V2H/E2. Register the PLL0 clock as a fixed factor clock for
+ * now as there's no generic multiplier clock implementation and
+ * we currently have no need to change the multiplier value.
+ */
+ mult = cpg_pll_config->pll0_mult;
+ div = cpg_pll0_div;
+ if (!mult) {
+ u32 pll0cr = readl(base + CPG_PLL0CR);
+
+ mult = (((pll0cr & CPG_PLL0CR_STC_MASK) >>
+ CPG_PLL0CR_STC_SHIFT) + 1) * 2;
+ }
+ break;
+
+ case CLK_TYPE_GEN2_PLL1:
+ mult = cpg_pll_config->pll1_mult / 2;
+ break;
+
+ case CLK_TYPE_GEN2_PLL3:
+ mult = cpg_pll_config->pll3_mult;
+ break;
+
+ case CLK_TYPE_GEN2_Z:
+ return cpg_z_clk_register(core->name, parent_name, base);
+
+ case CLK_TYPE_GEN2_LB:
+ div = cpg_mode & BIT(18) ? 36 : 24;
+ break;
+
+ case CLK_TYPE_GEN2_ADSP:
+ return cpg_adsp_clk_register(core->name, parent_name, base);
+
+ case CLK_TYPE_GEN2_SDH:
+ table = cpg_sdh_div_table;
+ shift = 8;
+ break;
+
+ case CLK_TYPE_GEN2_SD0:
+ table = cpg_sd01_div_table;
+ shift = 4;
+ break;
+
+ case CLK_TYPE_GEN2_SD1:
+ table = cpg_sd01_div_table;
+ shift = 0;
+ break;
+
+ case CLK_TYPE_GEN2_QSPI:
+ div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) ?
+ 8 : 10;
+ break;
+
+ case CLK_TYPE_GEN2_RCAN:
+ return cpg_rcan_clk_register(core->name, parent_name, base);
+
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!table)
+ return clk_register_fixed_factor(NULL, core->name, parent_name,
+ 0, mult, div);
+ else
+ return clk_register_divider_table(NULL, core->name,
+ parent_name, 0,
+ base + CPG_SDCKCR, shift, 4,
+ 0, table, &cpg_lock);
+}
+
+int __init rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
+ unsigned int pll0_div, u32 mode)
+{
+ cpg_pll_config = config;
+ cpg_pll0_div = pll0_div;
+ cpg_mode = mode;
+
+ spin_lock_init(&cpg_lock);
+
+ return 0;
+}
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
new file mode 100644
index 000000000000..9eba07ff8b11
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -0,0 +1,43 @@
+/*
+ * R-Car Gen2 Clock Pulse Generator
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __CLK_RENESAS_RCAR_GEN2_CPG_H__
+#define __CLK_RENESAS_RCAR_GEN2_CPG_H__
+
+enum rcar_gen2_clk_types {
+ CLK_TYPE_GEN2_MAIN = CLK_TYPE_CUSTOM,
+ CLK_TYPE_GEN2_PLL0,
+ CLK_TYPE_GEN2_PLL1,
+ CLK_TYPE_GEN2_PLL3,
+ CLK_TYPE_GEN2_Z,
+ CLK_TYPE_GEN2_LB,
+ CLK_TYPE_GEN2_ADSP,
+ CLK_TYPE_GEN2_SDH,
+ CLK_TYPE_GEN2_SD0,
+ CLK_TYPE_GEN2_SD1,
+ CLK_TYPE_GEN2_QSPI,
+ CLK_TYPE_GEN2_RCAN,
+};
+
+struct rcar_gen2_cpg_pll_config {
+ unsigned int extal_div;
+ unsigned int pll1_mult;
+ unsigned int pll3_mult;
+ unsigned int pll0_mult; /* leave as zero if PLL0CR exists */
+};
+
+struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base);
+int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
+ unsigned int pll0_div, u32 mode);
+
+#endif
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index bb4f2f9a8c2f..742f6dc7c156 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -98,7 +98,7 @@ static int cpg_sd_clock_enable(struct clk_hw *hw)
u32 val, sd_fc;
unsigned int i;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
sd_fc = val & CPG_SD_FC_MASK;
for (i = 0; i < clock->div_num; i++)
@@ -111,7 +111,7 @@ static int cpg_sd_clock_enable(struct clk_hw *hw)
val &= ~(CPG_SD_STP_MASK);
val |= clock->div_table[i].val & CPG_SD_STP_MASK;
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
return 0;
}
@@ -120,14 +120,14 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+ writel(readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
}
static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- return !(clk_readl(clock->reg) & CPG_SD_STP_MASK);
+ return !(readl(clock->reg) & CPG_SD_STP_MASK);
}
static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
@@ -138,7 +138,7 @@ static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
u32 val, sd_fc;
unsigned int i;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
sd_fc = val & CPG_SD_FC_MASK;
for (i = 0; i < clock->div_num; i++)
@@ -189,10 +189,10 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
if (i >= clock->div_num)
return -EINVAL;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
return 0;
}
@@ -333,23 +333,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
__clk_get_name(parent), 0, mult, div);
}
-/*
- * Reset register definitions.
- */
-#define MODEMR 0xe6160060
-
-u32 __init rcar_gen3_read_mode_pins(void)
-{
- void __iomem *modemr = ioremap_nocache(MODEMR, 4);
- u32 mode;
-
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
-
- return mode;
-}
-
int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
unsigned int clk_extalr)
{
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index f699085147d1..f788f481dd42 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -33,7 +33,6 @@ struct rcar_gen3_cpg_pll_config {
#define CPG_RCKCR 0x240
-u32 rcar_gen3_read_mode_pins(void);
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
struct clk **clks, void __iomem *base);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index e1365e7491ae..8359ce75db7a 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -33,9 +33,9 @@
#include "clk-div6.h"
#ifdef DEBUG
-#define WARN_DEBUG(x) do { } while (0)
-#else
#define WARN_DEBUG(x) WARN_ON(x)
+#else
+#define WARN_DEBUG(x) do { } while (0)
#endif
@@ -146,12 +146,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
enable ? "ON" : "OFF");
spin_lock_irqsave(&priv->mstp_lock, flags);
- value = clk_readl(priv->base + SMSTPCR(reg));
+ value = readl(priv->base + SMSTPCR(reg));
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- clk_writel(value, priv->base + SMSTPCR(reg));
+ writel(value, priv->base + SMSTPCR(reg));
spin_unlock_irqrestore(&priv->mstp_lock, flags);
@@ -159,8 +159,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
return 0;
for (i = 1000; i > 0; --i) {
- if (!(clk_readl(priv->base + MSTPSR(reg)) &
- bitmask))
+ if (!(readl(priv->base + MSTPSR(reg)) & bitmask))
break;
cpu_relax();
}
@@ -190,7 +189,7 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
struct cpg_mssr_priv *priv = clock->priv;
u32 value;
- value = clk_readl(priv->base + MSTPSR(clock->index / 32));
+ value = readl(priv->base + MSTPSR(clock->index / 32));
return !(value & BIT(clock->index % 32));
}
@@ -309,7 +308,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
return;
fail:
- dev_err(dev, "Failed to register %s clock %s: %ld\n", "core,",
+ dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
core->name, PTR_ERR(clk));
}
@@ -377,7 +376,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
return;
fail:
- dev_err(dev, "Failed to register %s clock %s: %ld\n", "module,",
+ dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
mod->name, PTR_ERR(clk));
kfree(clock);
}
@@ -503,6 +502,18 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
}
static const struct of_device_id cpg_mssr_match[] = {
+#ifdef CONFIG_ARCH_R8A7743
+ {
+ .compatible = "renesas,r8a7743-cpg-mssr",
+ .data = &r8a7743_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ {
+ .compatible = "renesas,r8a7745-cpg-mssr",
+ .data = &r8a7745_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_ARCH_R8A7795
{
.compatible = "renesas,r8a7795-cpg-mssr",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index ee7edfaf1408..4bb7a80c6469 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -130,6 +130,8 @@ struct cpg_mssr_info {
struct clk **clks, void __iomem *base);
};
+extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
#endif
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index b5f2c8ed12e1..16e098c36f90 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -11,6 +11,7 @@ obj-y += clk-mmc-phase.o
obj-y += clk-ddr.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
+obj-y += clk-rk1108.o
obj-y += clk-rk3036.o
obj-y += clk-rk3188.o
obj-y += clk-rk3228.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 05b3d73bfefa..0e09684d43a5 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -124,9 +124,18 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
struct clk_notifier_data *ndata)
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
+ const struct rockchip_cpuclk_rate_table *rate;
unsigned long alt_prate, alt_div;
unsigned long flags;
+ /* check validity of the new rate */
+ rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for cpuclk\n",
+ __func__, ndata->new_rate);
+ return -EINVAL;
+ }
+
alt_prate = clk_get_rate(cpuclk->alt_parent);
spin_lock_irqsave(cpuclk->lock, flags);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 9c1373e81683..6ed605776abd 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -319,7 +319,8 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
- rate->dsmpd != cur.dsmpd || rate->frac != cur.frac) {
+ rate->dsmpd != cur.dsmpd ||
+ (!cur.dsmpd && (rate->frac != cur.frac))) {
struct clk *parent = clk_get_parent(hw->clk);
if (!parent) {
@@ -795,7 +796,8 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
- rate->dsmpd != cur.dsmpd || rate->frac != cur.frac) {
+ rate->dsmpd != cur.dsmpd ||
+ (!cur.dsmpd && (rate->frac != cur.frac))) {
struct clk *parent = clk_get_parent(hw->clk);
if (!parent) {
diff --git a/drivers/clk/rockchip/clk-rk1108.c b/drivers/clk/rockchip/clk-rk1108.c
new file mode 100644
index 000000000000..92750d798e5d
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk1108.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Andy Yan <andy.yan@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rk1108-cru.h>
+#include "clk.h"
+
+#define RK1108_GRF_SOC_STATUS0 0x480
+
+enum rk1108_plls {
+ apll, dpll, gpll,
+};
+
+static struct rockchip_pll_rate_table rk1108_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 900000000, 4, 300, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0),
+ RK3036_PLL_RATE( 594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0),
+ RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE( 312000000, 1, 52, 2, 2, 1, 0),
+ RK3036_PLL_RATE( 216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE( 96000000, 1, 64, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RK1108_DIV_CORE_MASK 0xf
+#define RK1108_DIV_CORE_SHIFT 4
+
+#define RK1108_CLKSEL0(_core_peri_div) \
+ { \
+ .reg = RK1108_CLKSEL_CON(1), \
+ .val = HIWORD_UPDATE(_core_peri_div, RK1108_DIV_CORE_MASK,\
+ RK1108_DIV_CORE_SHIFT) \
+ }
+
+#define RK1108_CPUCLK_RATE(_prate, _core_peri_div) \
+ { \
+ .prate = _prate, \
+ .divs = { \
+ RK1108_CLKSEL0(_core_peri_div), \
+ }, \
+ }
+
+static struct rockchip_cpuclk_rate_table rk1108_cpuclk_rates[] __initdata = {
+ RK1108_CPUCLK_RATE(816000000, 4),
+ RK1108_CPUCLK_RATE(600000000, 4),
+ RK1108_CPUCLK_RATE(312000000, 4),
+};
+
+static const struct rockchip_cpuclk_reg_data rk1108_cpuclk_data = {
+ .core_reg = RK1108_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 8,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "xin24m", "xin24m"};
+PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr", "apll_ddr" };
+PNAME(mux_armclk_p) = { "apll_core", "gpll_core", "dpll_core" };
+PNAME(mux_usb480m_pre_p) = { "usbphy", "xin24m" };
+PNAME(mux_hdmiphy_phy_p) = { "hdmiphy", "xin24m" };
+PNAME(mux_dclk_hdmiphy_pre_p) = { "dclk_hdmiphy_src_gpll", "dclk_hdmiphy_src_dpll" };
+PNAME(mux_pll_src_4plls_p) = { "dpll", "hdmiphy", "gpll", "usb480m" };
+PNAME(mux_pll_src_3plls_p) = { "apll", "gpll", "dpll" };
+PNAME(mux_pll_src_2plls_p) = { "dpll", "gpll" };
+PNAME(mux_pll_src_apll_gpll_p) = { "apll", "gpll" };
+PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_dpll", "aclk_peri_src_gpll" };
+PNAME(mux_aclk_bus_src_p) = { "aclk_bus_src_gpll", "aclk_bus_src_apll", "aclk_bus_src_dpll" };
+PNAME(mux_mmc_src_p) = { "dpll", "gpll", "xin24m", "usb480m" };
+PNAME(mux_pll_src_dpll_gpll_usb480m_p) = { "dpll", "gpll", "usb480m" };
+PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
+PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
+PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
+PNAME(mux_sclk_macphy_p) = { "sclk_macphy_pre", "ext_gmac" };
+PNAME(mux_i2s0_pre_p) = { "i2s0_src", "i2s0_frac", "ext_i2s", "xin12m" };
+PNAME(mux_i2s_out_p) = { "i2s0_pre", "xin12m" };
+PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "xin12m" };
+PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
+
+static struct rockchip_pll_clock rk1108_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RK1108_PLL_CON(0),
+ RK1108_PLL_CON(3), 8, 31, 0, rk1108_pll_rates),
+ [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RK1108_PLL_CON(8),
+ RK1108_PLL_CON(11), 8, 31, 0, NULL),
+ [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RK1108_PLL_CON(16),
+ RK1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk1108_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
+
+static struct rockchip_clk_branch rk1108_uart0_fracmux __initdata =
+ MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(13), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_uart1_fracmux __initdata =
+ MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(14), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_uart2_fracmux __initdata =
+ MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(15), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_i2s0_fracmux __initdata =
+ MUX(0, "i2s0_pre", mux_i2s0_pre_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(5), 12, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_i2s1_fracmux __initdata =
+ MUX(0, "i2s1_pre", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(6), 12, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_i2s2_fracmux __initdata =
+ MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(7), 12, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_clk_branches[] __initdata = {
+ MUX(0, "hdmi_phy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT,
+ RK1108_MISC_CON, 13, 2, MFLAGS),
+ MUX(0, "usb480m", mux_usb480m_pre_p, CLK_SET_RATE_PARENT,
+ RK1108_MISC_CON, 15, 2, MFLAGS),
+ /*
+ * Clock-Architecture Diagram 2
+ */
+
+ /* PD_CORE */
+ GATE(0, "dpll_core", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclken_dbg", "armclk", CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK1108_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_ENMCORE, "aclkenm_core", "armclk", CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK1108_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(ACLK_CORE, "aclk_core", "aclkenm_core", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(11), 0, GFLAGS),
+ GATE(0, "pclk_dbg", "pclken_dbg", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(11), 1, GFLAGS),
+
+ /* PD_RKVENC */
+
+ /* PD_RKVDEC */
+
+ /* PD_PMU_wrapper */
+ COMPOSITE_NOMUX(0, "pmu_24m_ena", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(38), 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(0, "pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 0, GFLAGS),
+ GATE(0, "intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 1, GFLAGS),
+ GATE(0, "gpio0_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 2, GFLAGS),
+ GATE(0, "pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(0, "pmu_noc", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(0, "i2c0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 5, GFLAGS),
+ GATE(0, "pwm0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE(0, "pwm0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(8), 15, GFLAGS),
+ COMPOSITE(0, "i2c0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(8), 14, GFLAGS),
+ GATE(0, "pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(8), 13, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+ COMPOSITE(0, "aclk_vio0_2wrap_occ", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(0, "aclk_vio0_pre", "aclk_vio0_2wrap_occ", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(17), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vio_pre", "aclk_vio0_pre", 0,
+ RK1108_CLKSEL_CON(29), 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(7), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_vio_pre", "aclk_vio0_pre", 0,
+ RK1108_CLKSEL_CON(29), 8, 5, DFLAGS,
+ RK1108_CLKGATE_CON(7), 3, GFLAGS),
+
+ INVERTER(0, "pclk_vip", "ext_vip",
+ RK1108_CLKSEL_CON(31), 8, IFLAGS),
+ GATE(0, "pclk_isp_pre", "pclk_vip", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(0, "pclk_isp", "pclk_isp_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(0, "dclk_hdmiphy_src_gpll", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(6), 5, GFLAGS),
+ GATE(0, "dclk_hdmiphy_src_dpll", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NOGATE(0, "dclk_hdmiphy", mux_dclk_hdmiphy_pre_p, 0,
+ RK1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 5
+ */
+
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(8), 0,
+ RK1108_CLKGATE_CON(2), 1, GFLAGS,
+ &rk1108_i2s0_fracmux),
+ GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT,
+ RK1108_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "i2s_out", mux_i2s_out_p, 0,
+ RK1108_CLKSEL_CON(5), 15, 1, MFLAGS,
+ RK1108_CLKGATE_CON(2), 3, GFLAGS),
+
+ COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(9), 0,
+ RK2928_CLKGATE_CON(2), 5, GFLAGS,
+ &rk1108_i2s1_fracmux),
+ GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT,
+ RK1108_CLKGATE_CON(2), 6, GFLAGS),
+
+ COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(10), 0,
+ RK1108_CLKGATE_CON(2), 9, GFLAGS,
+ &rk1108_i2s2_fracmux),
+ GATE(SCLK_I2S2, "sclk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT,
+ RK1108_CLKGATE_CON(2), 10, GFLAGS),
+
+ /* PD_BUS */
+ GATE(0, "aclk_bus_src_gpll", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(0, "aclk_bus_src_apll", "apll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(0, "aclk_bus_src_dpll", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NOGATE(ACLK_PRE, "aclk_bus_pre", mux_aclk_bus_src_p, 0,
+ RK1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_bus_pre", "aclk_bus_2wrap_occ", 0,
+ RK1108_CLKSEL_CON(3), 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(1), 4, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclken_bus", "aclk_bus_2wrap_occ", 0,
+ RK1108_CLKSEL_CON(3), 8, 5, DFLAGS,
+ RK1108_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(0, "pclk_bus_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "pclk_top_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(0, "pclk_ddr_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 8, GFLAGS),
+ GATE(0, "clk_timer0", "mux_pll_p", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(0, "clk_timer1", "mux_pll_p", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(0, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 4, GFLAGS),
+
+ COMPOSITE(0, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 1, GFLAGS),
+ COMPOSITE(0, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 3, GFLAGS),
+ COMPOSITE(0, "uart21_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 5, GFLAGS),
+
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(16), 0,
+ RK1108_CLKGATE_CON(3), 2, GFLAGS,
+ &rk1108_uart0_fracmux),
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(17), 0,
+ RK1108_CLKGATE_CON(3), 4, GFLAGS,
+ &rk1108_uart1_fracmux),
+ COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(18), 0,
+ RK1108_CLKGATE_CON(3), 6, GFLAGS,
+ &rk1108_uart2_fracmux),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 12, GFLAGS),
+
+ COMPOSITE(0, "clk_i2c1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 7, GFLAGS),
+ COMPOSITE(0, "clk_i2c2", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE(0, "clk_i2c3", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 9, GFLAGS),
+ GATE(0, "pclk_i2c1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 0, GFLAGS),
+ GATE(0, "pclk_i2c2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(0, "pclk_i2c3", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 2, GFLAGS),
+ COMPOSITE(0, "clk_pwm1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 10, GFLAGS),
+ GATE(0, "pclk_pwm1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(0, "pclk_wdt", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(0, "pclk_gpio1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 7, GFLAGS),
+ GATE(0, "pclk_gpio2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 8, GFLAGS),
+ GATE(0, "pclk_gpio3", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 9, GFLAGS),
+
+ GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(14), 0, GFLAGS),
+
+ GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre", 0,
+ RK1108_CLKGATE_CON(12), 2, GFLAGS),
+ GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 1, GFLAGS),
+
+ /* PD_DDR */
+ GATE(0, "apll_ddr", "apll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE(0, "ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3,
+ DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK1108_CLKGATE_CON(10), 9, GFLAGS),
+ GATE(0, "ddrupctl", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 4, GFLAGS),
+ GATE(0, "ddrc", "ddrphy", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 5, GFLAGS),
+ GATE(0, "ddrmon", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 6, GFLAGS),
+ GATE(0, "timer_clk", "xin24m", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 11, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+
+ /* PD_PERI */
+ COMPOSITE_NOMUX(0, "pclk_periph_pre", "gpll", 0,
+ RK1108_CLKSEL_CON(23), 10, 5, DFLAGS,
+ RK1108_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(0, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(15), 13, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_periph_pre", "gpll", 0,
+ RK1108_CLKSEL_CON(23), 5, 5, DFLAGS,
+ RK1108_CLKGATE_CON(4), 4, GFLAGS),
+ GATE(0, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(15), 12, GFLAGS),
+
+ GATE(0, "aclk_peri_src_dpll", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(4), 1, GFLAGS),
+ GATE(0, "aclk_peri_src_gpll", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(0, "aclk_periph", mux_aclk_peri_src_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(15), 11, GFLAGS),
+
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ RK1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK1108_CLKGATE_CON(5), 0, GFLAGS),
+
+ COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
+ RK1108_CLKSEL_CON(25), 10, 2, MFLAGS,
+ RK1108_CLKGATE_CON(5), 2, GFLAGS),
+ DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
+ RK1108_CLKSEL_CON(26), 0, 8, DFLAGS),
+
+ COMPOSITE_NODIV(0, "sclk_emmc_src", mux_mmc_src_p, 0,
+ RK1108_CLKSEL_CON(25), 12, 2, MFLAGS,
+ RK1108_CLKGATE_CON(5), 1, GFLAGS),
+ DIV(SCLK_EMMC, "sclk_emmc", "sclk_emmc_src", 0,
+ RK2928_CLKSEL_CON(26), 8, 8, DFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 1, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 2, GFLAGS),
+
+ COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK1108_CLKGATE_CON(5), 3, GFLAGS),
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 3, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(5), 4, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 10, GFLAGS),
+
+ COMPOSITE(0, "sclk_macphy_pre", mux_pll_src_apll_gpll_p, 0,
+ RK1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(4), 10, GFLAGS),
+ MUX(0, "sclk_macphy", mux_sclk_macphy_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(24), 8, 2, MFLAGS),
+ GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 7, GFLAGS),
+
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK1108_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK1108_SDMMC_CON1, 1),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK1108_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK1108_SDIO_CON1, 1),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK1108_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK1108_EMMC_CON1, 1),
+};
+
+static const char *const rk1108_critical_clocks[] __initconst = {
+ "aclk_core",
+ "aclk_bus_src_gpll",
+ "aclk_periph",
+ "hclk_periph",
+ "pclk_periph",
+};
+
+static void __init rk1108_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk1108_pll_clks,
+ ARRAY_SIZE(rk1108_pll_clks),
+ RK1108_GRF_SOC_STATUS0);
+ rockchip_clk_register_branches(ctx, rk1108_clk_branches,
+ ARRAY_SIZE(rk1108_clk_branches));
+ rockchip_clk_protect_critical(rk1108_critical_clocks,
+ ARRAY_SIZE(rk1108_critical_clocks));
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &rk1108_cpuclk_data, rk1108_cpuclk_rates,
+ ARRAY_SIZE(rk1108_cpuclk_rates));
+
+ rockchip_register_softrst(np, 13, reg_base + RK1108_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, RK1108_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(rk1108_cru, "rockchip,rk1108-cru", rk1108_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index d0e722a0e8cf..062ef4960244 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -89,6 +89,7 @@ static struct rockchip_pll_rate_table rk3188_pll_rates[] = {
RK3066_PLL_RATE( 504000000, 1, 84, 4),
RK3066_PLL_RATE( 456000000, 1, 76, 4),
RK3066_PLL_RATE( 408000000, 1, 68, 4),
+ RK3066_PLL_RATE( 400000000, 3, 100, 2),
RK3066_PLL_RATE( 384000000, 2, 128, 4),
RK3066_PLL_RATE( 360000000, 1, 60, 4),
RK3066_PLL_RATE( 312000000, 1, 52, 4),
@@ -306,14 +307,14 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(0), 2, GFLAGS),
- GATE(0, "aclk_cpu", "aclk_cpu_pre", 0,
+ GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", 0,
RK2928_CLKGATE_CON(0), 3, GFLAGS),
GATE(0, "atclk_cpu", "pclk_cpu_pre", 0,
RK2928_CLKGATE_CON(0), 6, GFLAGS),
- GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
+ GATE(PCLK_CPU, "pclk_cpu", "pclk_cpu_pre", 0,
RK2928_CLKGATE_CON(0), 5, GFLAGS),
- GATE(0, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
+ GATE(HCLK_CPU, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(0), 4, GFLAGS),
COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
@@ -323,12 +324,12 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(31), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(1), 4, GFLAGS),
- GATE(0, "aclk_peri", "aclk_peri_pre", 0,
+ GATE(ACLK_PERI, "aclk_peri", "aclk_peri_pre", 0,
RK2928_CLKGATE_CON(2), 1, GFLAGS),
- COMPOSITE_NOMUX(0, "hclk_peri", "aclk_peri_pre", 0,
+ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_pre", 0,
RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 2, GFLAGS),
- COMPOSITE_NOMUX(0, "pclk_peri", "aclk_peri_pre", 0,
+ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_pre", 0,
RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 3, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 8387c7a40bda..3490887b0579 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -77,7 +77,7 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
- RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 1, 125, 3, 1, 1, 0),
RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0),
RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0),
RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0),
@@ -87,12 +87,13 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0),
RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0),
RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0),
- RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 800000000, 1, 100, 3, 1, 1, 0),
RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0),
RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0),
RK3036_PLL_RATE( 676000000, 3, 169, 2, 1, 1, 0),
RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0),
RK3036_PLL_RATE( 594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE( 533250000, 8, 711, 4, 1, 1, 0),
RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0),
RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0),
RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0),
@@ -410,11 +411,11 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(SCLK_USB2PHY1_REF, "clk_usb2phy1_ref", "xin24m", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(6), 6, GFLAGS),
- GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", CLK_IGNORE_UNUSED,
+ GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", 0,
RK3399_CLKGATE_CON(13), 12, GFLAGS),
- GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", CLK_IGNORE_UNUSED,
+ GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", 0,
RK3399_CLKGATE_CON(13), 12, GFLAGS),
- MUX(0, "clk_usbphy_480m", mux_usbphy_480m_p, CLK_IGNORE_UNUSED,
+ MUX(0, "clk_usbphy_480m", mux_usbphy_480m_p, 0,
RK3399_CLKSEL_CON(14), 6, 1, MFLAGS),
MUX(0, "upll", mux_pll_src_24m_usbphy480m_p, 0,
@@ -498,7 +499,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(14), 10, GFLAGS),
GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_core_adb400_core_l_2_gic", "armclkl", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(14), 11, GFLAGS),
- GATE(SCLK_PVTM_CORE_L, "clk_pvtm_core_l", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_PVTM_CORE_L, "clk_pvtm_core_l", "xin24m", 0,
RK3399_CLKGATE_CON(0), 7, GFLAGS),
/* big core */
@@ -539,7 +540,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(14), 2, GFLAGS),
- GATE(SCLK_PVTM_CORE_B, "clk_pvtm_core_b", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_PVTM_CORE_B, "clk_pvtm_core_b", "xin24m", 0,
RK3399_CLKGATE_CON(1), 7, GFLAGS),
/* gmac */
@@ -675,18 +676,18 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(PCLK_CENTER_MAIN_NOC, "pclk_center_main_noc", "pclk_ddr", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(18), 10, GFLAGS),
- GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", 0,
RK3399_CLKGATE_CON(18), 12, GFLAGS),
GATE(PCLK_CIC, "pclk_cic", "pclk_ddr", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(18), 15, GFLAGS),
GATE(PCLK_DDR_SGRF, "pclk_ddr_sgrf", "pclk_ddr", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(19), 2, GFLAGS),
- GATE(SCLK_PVTM_DDR, "clk_pvtm_ddr", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_PVTM_DDR, "clk_pvtm_ddr", "xin24m", 0,
RK3399_CLKGATE_CON(4), 11, GFLAGS),
- GATE(SCLK_DFIMON0_TIMER, "clk_dfimon0_timer", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_DFIMON0_TIMER, "clk_dfimon0_timer", "xin24m", 0,
RK3399_CLKGATE_CON(3), 5, GFLAGS),
- GATE(SCLK_DFIMON1_TIMER, "clk_dfimon1_timer", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_DFIMON1_TIMER, "clk_dfimon1_timer", "xin24m", 0,
RK3399_CLKGATE_CON(3), 6, GFLAGS),
/* cci */
@@ -966,7 +967,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(SCLK_INTMEM3, "clk_intmem3", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 5, GFLAGS),
GATE(SCLK_INTMEM4, "clk_intmem4", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 6, GFLAGS),
GATE(SCLK_INTMEM5, "clk_intmem5", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 7, GFLAGS),
- GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 8, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", 0, RK3399_CLKGATE_CON(23), 8, GFLAGS),
GATE(ACLK_DMAC0_PERILP, "aclk_dmac0_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 5, GFLAGS),
GATE(ACLK_DMAC1_PERILP, "aclk_dmac1_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 6, GFLAGS),
GATE(ACLK_PERILP0_NOC, "aclk_perilp0_noc", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 7, GFLAGS),
@@ -980,7 +981,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(HCLK_PERILP0_NOC, "hclk_perilp0_noc", "hclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 8, GFLAGS),
/* pclk_perilp0 gates */
- GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 9, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", 0, RK3399_CLKGATE_CON(23), 9, GFLAGS),
/* crypto */
COMPOSITE(SCLK_CRYPTO0, "clk_crypto0", mux_pll_src_cpll_gpll_ppll_p, 0,
@@ -1382,8 +1383,8 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* clk_test */
/* clk_test_pre is controlled by CRU_MISC_CON[3] */
COMPOSITE_NOMUX(0, "clk_test", "clk_test_pre", CLK_IGNORE_UNUSED,
- RK3368_CLKSEL_CON(58), 0, 5, DFLAGS,
- RK3368_CLKGATE_CON(13), 11, GFLAGS),
+ RK3399_CLKSEL_CON(58), 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 11, GFLAGS),
/* ddrc */
GATE(0, "clk_ddrc_lpll_src", "lpll", 0, RK3399_CLKGATE_CON(3),
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 1653edd792a5..d67eecc4ade9 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -34,6 +34,21 @@ struct clk;
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
+/* register positions shared by RK1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+#define RK1108_PLL_CON(x) ((x) * 0x4)
+#define RK1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60)
+#define RK1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120)
+#define RK1108_SOFTRST_CON(x) ((x) * 0x4 + 0x180)
+#define RK1108_GLB_SRST_FST 0x1c0
+#define RK1108_GLB_SRST_SND 0x1c4
+#define RK1108_MISC_CON 0x1cc
+#define RK1108_SDMMC_CON0 0x1d8
+#define RK1108_SDMMC_CON1 0x1dc
+#define RK1108_SDIO_CON0 0x1e0
+#define RK1108_SDIO_CON1 0x1e4
+#define RK1108_EMMC_CON0 0x1e8
+#define RK1108_EMMC_CON1 0x1ec
+
#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index ea1608682d7f..f096bd7df40c 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -543,7 +543,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
/* ENABLE_ACLK_TOP */
GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
- ENABLE_ACLK_TOP, 30, 0, 0),
+ ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266",
"div_aclk_imem_sssx_266", ENABLE_ACLK_TOP,
29, CLK_IGNORE_UNUSED, 0),
@@ -555,25 +555,25 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266",
ENABLE_ACLK_TOP, 24,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200",
ENABLE_ACLK_TOP, 23,
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b",
ENABLE_ACLK_TOP, 22,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_PERIS_66, "aclk_peris_66", "div_aclk_peris_66_b",
ENABLE_ACLK_TOP, 21,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_MSCL_400, "aclk_mscl_400", "div_aclk_mscl_400",
ENABLE_ACLK_TOP, 19,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_FSYS_200, "aclk_fsys_200", "div_aclk_fsys_200",
ENABLE_ACLK_TOP, 18,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_GSCL_111, "aclk_gscl_111", "div_aclk_gscl_111",
ENABLE_ACLK_TOP, 15,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_GSCL_333, "aclk_gscl_333", "div_aclk_gscl_333",
ENABLE_ACLK_TOP, 14,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
@@ -582,7 +582,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_CAM1_400, "aclk_cam1_400", "div_aclk_cam1_400",
ENABLE_ACLK_TOP, 12,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_CAM1_552, "aclk_cam1_552", "div_aclk_cam1_552",
ENABLE_ACLK_TOP, 11,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
@@ -591,7 +591,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_CAM0_400, "aclk_cam0_400", "div_aclk_cam0_400",
ENABLE_ACLK_TOP, 9,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_CAM0_552, "aclk_cam0_552", "div_aclk_cam0_552",
ENABLE_ACLK_TOP, 8,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
@@ -600,19 +600,19 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_ISP_400, "aclk_isp_400", "div_aclk_isp_400",
ENABLE_ACLK_TOP, 6,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_HEVC_400, "aclk_hevc_400", "div_aclk_hevc_400",
ENABLE_ACLK_TOP, 5,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_MFC_400, "aclk_mfc_400", "div_aclk_mfc_400",
ENABLE_ACLK_TOP, 3,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_G2D_266, "aclk_g2d_266", "div_aclk_g2d_266",
ENABLE_ACLK_TOP, 2,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_G2D_400, "aclk_g2d_400", "div_aclk_g2d_400",
ENABLE_ACLK_TOP, 0,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
/* ENABLE_SCLK_TOP_MSCL */
GATE(CLK_SCLK_JPEG_MSCL, "sclk_jpeg_mscl", "div_sclk_jpeg",
@@ -1385,7 +1385,7 @@ static const struct samsung_gate_clock mif_gate_clks[] __initconst = {
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_DISP_333, "aclk_disp_333", "div_aclk_disp_333",
ENABLE_ACLK_MIF3, 1,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_CPIF_200, "aclk_cpif_200", "div_aclk_cpif_200",
ENABLE_ACLK_MIF3, 0,
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
@@ -1929,7 +1929,7 @@ CLK_OF_DECLARE(exynos5433_cmu_peris, "samsung,exynos5433-cmu-peris",
/* list of all parent clock list */
PNAME(mout_sclk_ufs_mphy_user_p) = { "oscclk", "sclk_ufs_mphy", };
-PNAME(mout_aclk_fsys_200_user_p) = { "oscclk", "div_aclk_fsys_200", };
+PNAME(mout_aclk_fsys_200_user_p) = { "oscclk", "aclk_fsys_200", };
PNAME(mout_sclk_pcie_100_user_p) = { "oscclk", "sclk_pcie_100_fsys",};
PNAME(mout_sclk_ufsunipro_user_p) = { "oscclk", "sclk_ufsunipro_fsys",};
PNAME(mout_sclk_mmc2_user_p) = { "oscclk", "sclk_mmc2_fsys", };
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index a485f3b284b9..918ba3164da9 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -329,8 +329,10 @@ static void __init st_of_flexgen_setup(struct device_node *np)
return;
parents = flexgen_get_parents(np, &num_parents);
- if (!parents)
+ if (!parents) {
+ iounmap(reg);
return;
+ }
match = of_match_node(flexgen_of_match, np);
if (match) {
@@ -394,6 +396,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
return;
err:
+ iounmap(reg);
if (clk_data)
kfree(clk_data->clks);
kfree(clk_data);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 254d9526c018..8454c6e3dd65 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -35,17 +35,14 @@ config SUNXI_CCU_NK
config SUNXI_CCU_NKM
bool
- select RATIONAL
select SUNXI_CCU_GATE
config SUNXI_CCU_NKMP
bool
- select RATIONAL
select SUNXI_CCU_GATE
config SUNXI_CCU_NM
bool
- select RATIONAL
select SUNXI_CCU_FRAC
select SUNXI_CCU_GATE
@@ -56,6 +53,17 @@ config SUNXI_CCU_MP
# SoC Drivers
+config SUN50I_A64_CCU
+ bool "Support for the Allwinner A64 CCU"
+ select SUNXI_CCU_DIV
+ select SUNXI_CCU_NK
+ select SUNXI_CCU_NKM
+ select SUNXI_CCU_NKMP
+ select SUNXI_CCU_NM
+ select SUNXI_CCU_MP
+ select SUNXI_CCU_PHASE
+ default ARM64 && ARCH_SUNXI
+
config SUN6I_A31_CCU
bool "Support for the Allwinner A31/A31s CCU"
select SUNXI_CCU_DIV
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 106cba27c331..24fbc6e5deb8 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SUNXI_CCU_NM) += ccu_nm.o
obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o
# SoC support
+obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o
obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
new file mode 100644
index 000000000000..e3c084cc6da5
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun50i-a64.h"
+
+static struct ccu_nkmp pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .p = _SUNXI_CCU_DIV_MAX(16, 2, 4),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux",
+ "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUN50I_A64_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
+ "osc24M", 0x010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static struct ccu_nk pll_periph0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
+ &ccu_nk_ops, CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nk pll_periph1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x02c,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
+ &ccu_nk_ops, CLK_SET_RATE_UNGATE),
+ },
+};
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
+ "osc24M", 0x038,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/*
+ * The output function can be changed to something more complex that
+ * we do not handle yet.
+ *
+ * Hardcode the mode so that we don't fall in that case.
+ */
+#define SUN50I_A64_PLL_MIPI_REG 0x040
+
+static struct ccu_nkm pll_mipi_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 4),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .m = _SUNXI_CCU_DIV(0, 4),
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT("pll-mipi", "pll-video0",
+ &ccu_nkm_ops, CLK_SET_RATE_UNGATE),
+ },
+};
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_hsic_clk, "pll-hsic",
+ "osc24M", 0x044,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
+ "osc24M", 0x048,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1",
+ "osc24M", 0x04c,
+ 8, 7, /* N */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static const char * const cpux_parents[] = { "osc32k", "osc24M",
+ "pll-cpux", "pll-cpux" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x050, 16, 2, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
+
+static const char * const ahb1_parents[] = { "osc32k", "osc24M",
+ "axi", "pll-periph0" };
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .variable_prediv = {
+ .index = 3,
+ .shift = 6,
+ .width = 2,
+ },
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb1_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
+ 0x054, 8, 2, apb1_div_table, 0);
+
+static const char * const apb2_parents[] = { "osc32k", "osc24M",
+ "pll-periph0-2x",
+ "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb2_parents[] = { "ahb1", "pll-periph0" };
+static const struct ccu_mux_fixed_prediv ahb2_fixed_predivs[] = {
+ { .index = 1, .div = 2 },
+};
+static struct ccu_mux ahb2_clk = {
+ .mux = {
+ .shift = 0,
+ .width = 1,
+ .fixed_predivs = ahb2_fixed_predivs,
+ .n_predivs = ARRAY_SIZE(ahb2_fixed_predivs),
+ },
+
+ .common = {
+ .reg = 0x05c,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb2",
+ ahb2_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2",
+ 0x060, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1",
+ 0x060, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
+ 0x060, BIT(23), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb2",
+ 0x060, BIT(25), 0);
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1",
+ 0x060, BIT(28), 0);
+static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb2",
+ 0x060, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1",
+ 0x064, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_tcon1_clk, "bus-tcon1", "ahb1",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb1",
+ 0x064, BIT(11), 0);
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1",
+ 0x064, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1",
+ 0x064, BIT(22), 0);
+
+static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1",
+ 0x068, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
+ 0x068, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1",
+ 0x068, BIT(14), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
+ 0x06c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2",
+ 0x06c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2",
+ 0x06c, BIT(20), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1",
+ 0x070, BIT(7), 0);
+
+static struct clk_div_table ths_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+};
+static const char * const ths_parents[] = { "osc24M" };
+static struct ccu_div ths_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_TABLE(0, 2, ths_div_table),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x074,
+ .hw.init = CLK_HW_INIT_PARENTS("ths",
+ ths_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const mmc_default_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_default_parents, 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0", };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 4, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", mmc_default_parents, 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents,
+ 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
+ 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic",
+ 0x0cc, BIT(10), 0);
+static SUNXI_CCU_GATE(usb_hsic_12m_clk, "usb-hsic-12M", "osc12M",
+ 0x0cc, BIT(11), 0);
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M",
+ 0x0cc, BIT(16), 0);
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "usb-ohci0",
+ 0x0cc, BIT(17), 0);
+
+static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents,
+ 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
+ 0x100, BIT(2), 0);
+static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
+ 0x100, BIT(3), 0);
+
+static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
+static const u8 tcon0_table[] = { 0, 2, };
+static SUNXI_CCU_MUX_TABLE_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents,
+ tcon0_table, 0x118, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char * const tcon1_parents[] = { "pll-video0", "pll-video1" };
+static const u8 tcon1_table[] = { 0, 2, };
+static struct ccu_div tcon1_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV(0, 4),
+ .mux = _SUNXI_CCU_MUX_TABLE(24, 2, tcon1_table),
+ .common = {
+ .reg = 0x11c,
+ .hw.init = CLK_HW_INIT_PARENTS("tcon1",
+ tcon1_parents,
+ &ccu_div_ops,
+ CLK_SET_RATE_PARENT),
+ },
+};
+
+static const char * const deinterlace_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace", deinterlace_parents,
+ 0x124, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M",
+ 0x130, BIT(31), 0);
+
+static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
+ 0x134, 16, 4, 24, 3, BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video1", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents,
+ 0x134, 0, 5, 8, 3, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ 0x13c, 16, 3, BIT(31), 0);
+
+static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
+ 0x140, BIT(30), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+
+static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
+ 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M",
+ 0x154, BIT(31), 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
+ 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
+
+static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-periph0" };
+static const u8 dsi_dphy_table[] = { 0, 2, };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_dphy_clk, "dsi-dphy",
+ dsi_dphy_parents, dsi_dphy_table,
+ 0x168, 0, 4, 8, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
+ 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+/* Fixed Factor clocks */
+static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0);
+
+/* We hardcode the divider to 4 for now */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x",
+ "pll-periph0", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x",
+ "pll-periph1", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x",
+ "pll-video0", 1, 2, CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun50i_a64_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video0_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph1_clk.common,
+ &pll_video1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_mipi_clk.common,
+ &pll_hsic_clk.common,
+ &pll_de_clk.common,
+ &pll_ddr1_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &ahb2_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &bus_ce_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_nand_clk.common,
+ &bus_dram_clk.common,
+ &bus_emac_clk.common,
+ &bus_ts_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_otg_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ve_clk.common,
+ &bus_tcon0_clk.common,
+ &bus_tcon1_clk.common,
+ &bus_deinterlace_clk.common,
+ &bus_csi_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_de_clk.common,
+ &bus_gpu_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_codec_clk.common,
+ &bus_spdif_clk.common,
+ &bus_pio_clk.common,
+ &bus_ths_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_scr_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_dbg_clk.common,
+ &ths_clk.common,
+ &nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &ts_clk.common,
+ &ce_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &spdif_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_hsic_clk.common,
+ &usb_hsic_12m_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &dram_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_clk.common,
+ &dram_deinterlace_clk.common,
+ &dram_ts_clk.common,
+ &de_clk.common,
+ &tcon0_clk.common,
+ &tcon1_clk.common,
+ &deinterlace_clk.common,
+ &csi_misc_clk.common,
+ &csi_sclk_clk.common,
+ &csi_mclk_clk.common,
+ &ve_clk.common,
+ &ac_dig_clk.common,
+ &ac_dig_4x_clk.common,
+ &avs_clk.common,
+ &hdmi_clk.common,
+ &hdmi_ddc_clk.common,
+ &mbus_clk.common,
+ &dsi_dphy_clk.common,
+ &gpu_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_a64_hw_clks = {
+ .hws = {
+ [CLK_OSC_12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw,
+ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_AHB2] = &ahb2_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw,
+ [CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_SCR] = &bus_scr_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_THS] = &ths_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
+ [CLK_USB_HSIC_12M] = &usb_hsic_12m_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
+ [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
+ [CLK_DRAM_TS] = &dram_ts_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_TCON0] = &tcon0_clk.common.hw,
+ [CLK_TCON1] = &tcon1_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_CSI_MISC] = &csi_misc_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_AC_DIG] = &ac_dig_clk.common.hw,
+ [CLK_AC_DIG_4X] = &ac_dig_4x_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_a64_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_HSIC] = { 0x0cc, BIT(2) },
+
+ [RST_DRAM] = { 0x0f4, BIT(31) },
+ [RST_MBUS] = { 0x0fc, BIT(31) },
+
+ [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) },
+ [RST_BUS_CE] = { 0x2c0, BIT(5) },
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_MMC2] = { 0x2c0, BIT(10) },
+ [RST_BUS_NAND] = { 0x2c0, BIT(13) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
+ [RST_BUS_TS] = { 0x2c0, BIT(18) },
+ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(23) },
+ [RST_BUS_EHCI0] = { 0x2c0, BIT(24) },
+ [RST_BUS_EHCI1] = { 0x2c0, BIT(25) },
+ [RST_BUS_OHCI0] = { 0x2c0, BIT(28) },
+ [RST_BUS_OHCI1] = { 0x2c0, BIT(29) },
+
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
+ [RST_BUS_TCON1] = { 0x2c4, BIT(4) },
+ [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) },
+ [RST_BUS_CSI] = { 0x2c4, BIT(8) },
+ [RST_BUS_HDMI0] = { 0x2c4, BIT(10) },
+ [RST_BUS_HDMI1] = { 0x2c4, BIT(11) },
+ [RST_BUS_DE] = { 0x2c4, BIT(12) },
+ [RST_BUS_GPU] = { 0x2c4, BIT(20) },
+ [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) },
+ [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) },
+ [RST_BUS_DBG] = { 0x2c4, BIT(31) },
+
+ [RST_BUS_LVDS] = { 0x2c8, BIT(0) },
+
+ [RST_BUS_CODEC] = { 0x2d0, BIT(0) },
+ [RST_BUS_SPDIF] = { 0x2d0, BIT(1) },
+ [RST_BUS_THS] = { 0x2d0, BIT(8) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
+ [RST_BUS_I2S2] = { 0x2d0, BIT(14) },
+
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_SCR] = { 0x2d8, BIT(5) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_UART4] = { 0x2d8, BIT(20) },
+};
+
+static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
+ .ccu_clks = sun50i_a64_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a64_ccu_clks),
+
+ .hw_clks = &sun50i_a64_hw_clks,
+
+ .resets = sun50i_a64_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets),
+};
+
+static int sun50i_a64_ccu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *reg;
+ u32 val;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN50I_A64_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUN50I_A64_PLL_AUDIO_REG);
+
+ writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
+
+ return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+}
+
+static const struct of_device_id sun50i_a64_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-a64-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_a64_ccu_driver = {
+ .probe = sun50i_a64_ccu_probe,
+ .driver = {
+ .name = "sun50i-a64-ccu",
+ .of_match_table = sun50i_a64_ccu_ids,
+ },
+};
+builtin_platform_driver(sun50i_a64_ccu_driver);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
new file mode 100644
index 000000000000..9b3cd24b78d2
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN50I_A64_H_
+#define _CCU_SUN50I_A64_H_
+
+#include <dt-bindings/clock/sun50i-a64-ccu.h>
+#include <dt-bindings/reset/sun50i-a64-ccu.h>
+
+#define CLK_OSC_12M 0
+#define CLK_PLL_CPUX 1
+#define CLK_PLL_AUDIO_BASE 2
+#define CLK_PLL_AUDIO 3
+#define CLK_PLL_AUDIO_2X 4
+#define CLK_PLL_AUDIO_4X 5
+#define CLK_PLL_AUDIO_8X 6
+#define CLK_PLL_VIDEO0 7
+#define CLK_PLL_VIDEO0_2X 8
+#define CLK_PLL_VE 9
+#define CLK_PLL_DDR0 10
+#define CLK_PLL_PERIPH0 11
+#define CLK_PLL_PERIPH0_2X 12
+#define CLK_PLL_PERIPH1 13
+#define CLK_PLL_PERIPH1_2X 14
+#define CLK_PLL_VIDEO1 15
+#define CLK_PLL_GPU 16
+#define CLK_PLL_MIPI 17
+#define CLK_PLL_HSIC 18
+#define CLK_PLL_DE 19
+#define CLK_PLL_DDR1 20
+#define CLK_CPUX 21
+#define CLK_AXI 22
+#define CLK_APB 23
+#define CLK_AHB1 24
+#define CLK_APB1 25
+#define CLK_APB2 26
+#define CLK_AHB2 27
+
+/* All the bus gates are exported */
+
+/* The first bunch of module clocks are exported */
+
+#define CLK_USB_OHCI0_12M 90
+
+#define CLK_USB_OHCI1_12M 92
+
+#define CLK_DRAM 94
+
+/* All the DRAM gates are exported */
+
+/* Some more module clocks are exported */
+
+#define CLK_MBUS 112
+
+/* And the DSI and GPU module clock is exported */
+
+#define CLK_NUMBER (CLK_GPU + 1)
+
+#endif /* _CCU_SUN50I_A64_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 2646d980087b..5c6d37bdf247 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -344,10 +344,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
"pll-audio-2x", "pll-audio" };
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
- 0x0b0, 16, 2, BIT(31), 0);
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
- 0x0b4, 16, 2, BIT(31), 0);
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
/* TODO: the parent for most of the USB clocks is not known */
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
@@ -415,7 +415,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
- 0x140, BIT(31), 0);
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
0x144, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 4d70590f05e3..21c427d86f28 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -394,16 +394,16 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
"pll-audio-2x", "pll-audio" };
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
- 0x0b0, 16, 2, BIT(31), 0);
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
- 0x0b4, 16, 2, BIT(31), 0);
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents,
- 0x0b8, 16, 2, BIT(31), 0);
+ 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
- 0x0c0, 0, 4, BIT(31), 0);
+ 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
0x0cc, BIT(8), 0);
@@ -466,7 +466,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
0x13c, 16, 3, BIT(31), 0);
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
- 0x140, BIT(31), 0);
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
0x144, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 34c338832c0d..06540f7cf41c 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -20,7 +20,7 @@
#include "ccu_mux.h"
/**
- * struct _ccu_div - Internal divider description
+ * struct ccu_div_internal - Internal divider description
* @shift: Bit offset of the divider in its register
* @width: Width of the divider field in its register
* @max: Maximum value allowed for that divider. This is the
@@ -36,7 +36,7 @@
* It is basically a wrapper around the clk_divider functions
* arguments.
*/
-struct _ccu_div {
+struct ccu_div_internal {
u8 shift;
u8 width;
@@ -78,7 +78,7 @@ struct _ccu_div {
struct ccu_div {
u32 enable;
- struct _ccu_div div;
+ struct ccu_div_internal div;
struct ccu_mux_internal mux;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index 5c4b10cd15b5..8b5eb7756bf7 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -14,7 +14,7 @@
#include "ccu_frac.h"
bool ccu_frac_helper_is_enabled(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
if (!(common->features & CCU_FEATURE_FRACTIONAL))
return false;
@@ -23,7 +23,7 @@ bool ccu_frac_helper_is_enabled(struct ccu_common *common,
}
void ccu_frac_helper_enable(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
unsigned long flags;
u32 reg;
@@ -38,7 +38,7 @@ void ccu_frac_helper_enable(struct ccu_common *common,
}
void ccu_frac_helper_disable(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
unsigned long flags;
u32 reg;
@@ -53,7 +53,7 @@ void ccu_frac_helper_disable(struct ccu_common *common,
}
bool ccu_frac_helper_has_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate)
{
if (!(common->features & CCU_FEATURE_FRACTIONAL))
@@ -63,7 +63,7 @@ bool ccu_frac_helper_has_rate(struct ccu_common *common,
}
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
u32 reg;
@@ -84,7 +84,7 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
}
int ccu_frac_helper_set_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate)
{
unsigned long flags;
diff --git a/drivers/clk/sunxi-ng/ccu_frac.h b/drivers/clk/sunxi-ng/ccu_frac.h
index e4c670b1cdfe..7b1ee380156f 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.h
+++ b/drivers/clk/sunxi-ng/ccu_frac.h
@@ -18,7 +18,7 @@
#include "ccu_common.h"
-struct _ccu_frac {
+struct ccu_frac_internal {
u32 enable;
u32 select;
@@ -33,21 +33,21 @@ struct _ccu_frac {
}
bool ccu_frac_helper_is_enabled(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
void ccu_frac_helper_enable(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
void ccu_frac_helper_disable(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
bool ccu_frac_helper_has_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate);
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
int ccu_frac_helper_set_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate);
#endif /* _CCU_FRAC_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index edf9215ea8cc..915625e97d98 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -29,8 +29,8 @@
struct ccu_mp {
u32 enable;
- struct _ccu_div m;
- struct _ccu_div p;
+ struct ccu_div_internal m;
+ struct ccu_div_internal p;
struct ccu_mux_internal mux;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 010e9424691d..678b6cb49f01 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -13,10 +13,23 @@
#include "ccu_gate.h"
#include "ccu_mult.h"
+struct _ccu_mult {
+ unsigned long mult, min, max;
+};
+
static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
- unsigned int max_n, unsigned int *n)
+ struct _ccu_mult *mult)
{
- *n = rate / parent;
+ int _mult;
+
+ _mult = rate / parent;
+ if (_mult < mult->min)
+ _mult = mult->min;
+
+ if (_mult > mult->max)
+ _mult = mult->max;
+
+ mult->mult = _mult;
}
static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
@@ -25,11 +38,13 @@ static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
void *data)
{
struct ccu_mult *cm = data;
- unsigned int n;
+ struct _ccu_mult _cm;
- ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n);
+ _cm.min = 1;
+ _cm.max = 1 << cm->mult.width;
+ ccu_mult_find_best(parent_rate, rate, &_cm);
- return parent_rate * n;
+ return parent_rate * _cm.mult;
}
static void ccu_mult_disable(struct clk_hw *hw)
@@ -83,21 +98,23 @@ static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
+ struct _ccu_mult _cm;
unsigned long flags;
- unsigned int n;
u32 reg;
ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
&parent_rate);
- ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n);
+ _cm.min = cm->mult.min;
+ _cm.max = 1 << cm->mult.width;
+ ccu_mult_find_best(parent_rate, rate, &_cm);
spin_lock_irqsave(cm->common.lock, flags);
reg = readl(cm->common.base + cm->common.reg);
reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
- writel(reg | ((n - 1) << cm->mult.shift),
+ writel(reg | ((_cm.mult - 1) << cm->mult.shift),
cm->common.base + cm->common.reg);
spin_unlock_irqrestore(cm->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h
index 5d2c8dc14073..c1a2134bdc71 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.h
+++ b/drivers/clk/sunxi-ng/ccu_mult.h
@@ -4,21 +4,26 @@
#include "ccu_common.h"
#include "ccu_mux.h"
-struct _ccu_mult {
+struct ccu_mult_internal {
u8 shift;
u8 width;
+ u8 min;
};
-#define _SUNXI_CCU_MULT(_shift, _width) \
- { \
- .shift = _shift, \
- .width = _width, \
+#define _SUNXI_CCU_MULT_MIN(_shift, _width, _min) \
+ { \
+ .shift = _shift, \
+ .width = _width, \
+ .min = _min, \
}
+#define _SUNXI_CCU_MULT(_shift, _width) \
+ _SUNXI_CCU_MULT_MIN(_shift, _width, 1)
+
struct ccu_mult {
u32 enable;
- struct _ccu_mult mult;
+ struct ccu_mult_internal mult;
struct ccu_mux_internal mux;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index d6fafb397489..eaf0fdf78d2b 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -9,21 +9,24 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_gate.h"
#include "ccu_nk.h"
+struct _ccu_nk {
+ unsigned long n, min_n, max_n;
+ unsigned long k, min_k, max_k;
+};
+
static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
- unsigned int max_n, unsigned int max_k,
- unsigned int *n, unsigned int *k)
+ struct _ccu_nk *nk)
{
unsigned long best_rate = 0;
unsigned int best_k = 0, best_n = 0;
unsigned int _k, _n;
- for (_k = 1; _k <= max_k; _k++) {
- for (_n = 1; _n <= max_n; _n++) {
+ for (_k = nk->min_k; _k <= nk->max_k; _k++) {
+ for (_n = nk->min_n; _n <= nk->max_n; _n++) {
unsigned long tmp_rate = parent * _n * _k;
if (tmp_rate > rate)
@@ -37,8 +40,8 @@ static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
}
}
- *k = best_k;
- *n = best_n;
+ nk->k = best_k;
+ nk->n = best_n;
}
static void ccu_nk_disable(struct clk_hw *hw)
@@ -89,16 +92,19 @@ static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct ccu_nk *nk = hw_to_ccu_nk(hw);
- unsigned int n, k;
+ struct _ccu_nk _nk;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= nk->fixed_post_div;
- ccu_nk_find_best(*parent_rate, rate,
- 1 << nk->n.width, 1 << nk->k.width,
- &n, &k);
+ _nk.min_n = nk->n.min;
+ _nk.max_n = 1 << nk->n.width;
+ _nk.min_k = nk->k.min;
+ _nk.max_k = 1 << nk->k.width;
+
+ ccu_nk_find_best(*parent_rate, rate, &_nk);
+ rate = *parent_rate * _nk.n * _nk.k;
- rate = *parent_rate * n * k;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate / nk->fixed_post_div;
@@ -110,15 +116,18 @@ static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct ccu_nk *nk = hw_to_ccu_nk(hw);
unsigned long flags;
- unsigned int n, k;
+ struct _ccu_nk _nk;
u32 reg;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate * nk->fixed_post_div;
- ccu_nk_find_best(parent_rate, rate,
- 1 << nk->n.width, 1 << nk->k.width,
- &n, &k);
+ _nk.min_n = nk->n.min;
+ _nk.max_n = 1 << nk->n.width;
+ _nk.min_k = nk->k.min;
+ _nk.max_k = 1 << nk->k.width;
+
+ ccu_nk_find_best(parent_rate, rate, &_nk);
spin_lock_irqsave(nk->common.lock, flags);
@@ -126,7 +135,7 @@ static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
reg &= ~GENMASK(nk->n.width + nk->n.shift - 1, nk->n.shift);
reg &= ~GENMASK(nk->k.width + nk->k.shift - 1, nk->k.shift);
- writel(reg | ((k - 1) << nk->k.shift) | ((n - 1) << nk->n.shift),
+ writel(reg | ((_nk.k - 1) << nk->k.shift) | ((_nk.n - 1) << nk->n.shift),
nk->common.base + nk->common.reg);
spin_unlock_irqrestore(nk->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_nk.h b/drivers/clk/sunxi-ng/ccu_nk.h
index 4b52da0c29fe..437836b80696 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.h
+++ b/drivers/clk/sunxi-ng/ccu_nk.h
@@ -30,8 +30,8 @@ struct ccu_nk {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_mult k;
+ struct ccu_mult_internal n;
+ struct ccu_mult_internal k;
unsigned int fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 059fdc3b4f96..9b840a47a94d 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -9,15 +9,14 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_gate.h"
#include "ccu_nkm.h"
struct _ccu_nkm {
- unsigned long n, max_n;
- unsigned long k, max_k;
- unsigned long m, max_m;
+ unsigned long n, min_n, max_n;
+ unsigned long k, min_k, max_k;
+ unsigned long m, min_m, max_m;
};
static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
@@ -27,22 +26,22 @@ static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
unsigned long best_n = 0, best_k = 0, best_m = 0;
unsigned long _n, _k, _m;
- for (_k = 1; _k <= nkm->max_k; _k++) {
- unsigned long tmp_rate;
-
- rational_best_approximation(rate / _k, parent,
- nkm->max_n, nkm->max_m, &_n, &_m);
-
- tmp_rate = parent * _n * _k / _m;
-
- if (tmp_rate > rate)
- continue;
-
- if ((rate - tmp_rate) < (rate - best_rate)) {
- best_rate = tmp_rate;
- best_n = _n;
- best_k = _k;
- best_m = _m;
+ for (_k = nkm->min_k; _k <= nkm->max_k; _k++) {
+ for (_n = nkm->min_n; _n <= nkm->max_n; _n++) {
+ for (_m = nkm->min_m; _m <= nkm->max_m; _m++) {
+ unsigned long tmp_rate;
+
+ tmp_rate = parent * _n * _k / _m;
+
+ if (tmp_rate > rate)
+ continue;
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_k = _k;
+ best_m = _m;
+ }
+ }
}
}
@@ -101,8 +100,11 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
struct ccu_nkm *nkm = data;
struct _ccu_nkm _nkm;
+ _nkm.min_n = nkm->n.min;
_nkm.max_n = 1 << nkm->n.width;
+ _nkm.min_k = nkm->k.min;
_nkm.max_k = 1 << nkm->k.width;
+ _nkm.min_m = 1;
_nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
ccu_nkm_find_best(parent_rate, rate, &_nkm);
@@ -127,8 +129,11 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
u32 reg;
+ _nkm.min_n = nkm->n.min;
_nkm.max_n = 1 << nkm->n.width;
+ _nkm.min_k = nkm->k.min;
_nkm.max_k = 1 << nkm->k.width;
+ _nkm.min_m = 1;
_nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
ccu_nkm_find_best(parent_rate, rate, &_nkm);
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h
index 35493fddd8ab..34580894f4d1 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.h
+++ b/drivers/clk/sunxi-ng/ccu_nkm.h
@@ -29,9 +29,9 @@ struct ccu_nkm {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_mult k;
- struct _ccu_div m;
+ struct ccu_mult_internal n;
+ struct ccu_mult_internal k;
+ struct ccu_div_internal m;
struct ccu_mux_internal mux;
struct ccu_common common;
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 9769dee99511..684c42da3ebb 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -9,16 +9,15 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_gate.h"
#include "ccu_nkmp.h"
struct _ccu_nkmp {
- unsigned long n, max_n;
- unsigned long k, max_k;
- unsigned long m, max_m;
- unsigned long p, max_p;
+ unsigned long n, min_n, max_n;
+ unsigned long k, min_k, max_k;
+ unsigned long m, min_m, max_m;
+ unsigned long p, min_p, max_p;
};
static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
@@ -28,25 +27,25 @@ static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
unsigned long _n, _k, _m, _p;
- for (_k = 1; _k <= nkmp->max_k; _k++) {
- for (_p = 1; _p <= nkmp->max_p; _p <<= 1) {
- unsigned long tmp_rate;
-
- rational_best_approximation(rate / _k, parent / _p,
- nkmp->max_n, nkmp->max_m,
- &_n, &_m);
-
- tmp_rate = parent * _n * _k / (_m * _p);
-
- if (tmp_rate > rate)
- continue;
-
- if ((rate - tmp_rate) < (rate - best_rate)) {
- best_rate = tmp_rate;
- best_n = _n;
- best_k = _k;
- best_m = _m;
- best_p = _p;
+ for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) {
+ for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) {
+ for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) {
+ for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
+ unsigned long tmp_rate;
+
+ tmp_rate = parent * _n * _k / (_m * _p);
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_k = _k;
+ best_m = _m;
+ best_p = _p;
+ }
+ }
}
}
}
@@ -108,9 +107,13 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
struct _ccu_nkmp _nkmp;
+ _nkmp.min_n = nkmp->n.min;
_nkmp.max_n = 1 << nkmp->n.width;
+ _nkmp.min_k = nkmp->k.min;
_nkmp.max_k = 1 << nkmp->k.width;
+ _nkmp.min_m = 1;
_nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
+ _nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
@@ -126,9 +129,13 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
u32 reg;
+ _nkmp.min_n = 1;
_nkmp.max_n = 1 << nkmp->n.width;
+ _nkmp.min_k = 1;
_nkmp.max_k = 1 << nkmp->k.width;
+ _nkmp.min_m = 1;
_nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
+ _nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h
index 5adb0c92a614..a82facbc6144 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.h
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.h
@@ -29,10 +29,10 @@ struct ccu_nkmp {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_mult k;
- struct _ccu_div m;
- struct _ccu_div p;
+ struct ccu_mult_internal n;
+ struct ccu_mult_internal k;
+ struct ccu_div_internal m;
+ struct ccu_div_internal p;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index b61bdd8c7a7f..c9f3b6c982f0 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -9,12 +9,42 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_frac.h"
#include "ccu_gate.h"
#include "ccu_nm.h"
+struct _ccu_nm {
+ unsigned long n, min_n, max_n;
+ unsigned long m, min_m, max_m;
+};
+
+static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nm *nm)
+{
+ unsigned long best_rate = 0;
+ unsigned long best_n = 0, best_m = 0;
+ unsigned long _n, _m;
+
+ for (_n = nm->min_n; _n <= nm->max_n; _n++) {
+ for (_m = nm->min_m; _m <= nm->max_m; _m++) {
+ unsigned long tmp_rate = parent * _n / _m;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_m = _m;
+ }
+ }
+ }
+
+ nm->n = best_n;
+ nm->m = best_m;
+}
+
static void ccu_nm_disable(struct clk_hw *hw)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
@@ -61,24 +91,24 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
- unsigned long max_n, max_m;
- unsigned long n, m;
+ struct _ccu_nm _nm;
- max_n = 1 << nm->n.width;
- max_m = nm->m.max ?: 1 << nm->m.width;
+ _nm.min_n = nm->n.min;
+ _nm.max_n = 1 << nm->n.width;
+ _nm.min_m = 1;
+ _nm.max_m = nm->m.max ?: 1 << nm->m.width;
- rational_best_approximation(rate, *parent_rate, max_n, max_m, &n, &m);
+ ccu_nm_find_best(*parent_rate, rate, &_nm);
- return *parent_rate * n / m;
+ return *parent_rate * _nm.n / _nm.m;
}
static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
+ struct _ccu_nm _nm;
unsigned long flags;
- unsigned long max_n, max_m;
- unsigned long n, m;
u32 reg;
if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
@@ -86,10 +116,12 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
else
ccu_frac_helper_disable(&nm->common, &nm->frac);
- max_n = 1 << nm->n.width;
- max_m = nm->m.max ?: 1 << nm->m.width;
+ _nm.min_n = 1;
+ _nm.max_n = 1 << nm->n.width;
+ _nm.min_m = 1;
+ _nm.max_m = nm->m.max ?: 1 << nm->m.width;
- rational_best_approximation(rate, parent_rate, max_n, max_m, &n, &m);
+ ccu_nm_find_best(parent_rate, rate, &_nm);
spin_lock_irqsave(nm->common.lock, flags);
@@ -97,7 +129,7 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
- writel(reg | ((m - 1) << nm->m.shift) | ((n - 1) << nm->n.shift),
+ writel(reg | ((_nm.m - 1) << nm->m.shift) | ((_nm.n - 1) << nm->n.shift),
nm->common.base + nm->common.reg);
spin_unlock_irqrestore(nm->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
index 0b7bcd33a2df..e87fd186da78 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.h
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -30,9 +30,9 @@ struct ccu_nm {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_div m;
- struct _ccu_frac frac;
+ struct ccu_mult_internal n;
+ struct ccu_div_internal m;
+ struct ccu_frac_internal frac;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index e54266cc1c51..4417ae129ac7 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -24,7 +24,7 @@
#include "clk-factors.h"
/**
- * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
+ * sun4i_a10_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
* MOD0 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
*/
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index c205809ba580..ad1c1cc829cb 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -20,7 +20,7 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <soc/tegra/fuse.h>
@@ -148,7 +148,6 @@ static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
{ .compatible = "nvidia,tegra124-dfll", },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra124_dfll_fcpu_of_match);
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
@@ -164,20 +163,4 @@ static struct platform_driver tegra124_dfll_fcpu_driver = {
.pm = &tegra124_dfll_pm_ops,
},
};
-
-static int __init tegra124_dfll_fcpu_init(void)
-{
- return platform_driver_register(&tegra124_dfll_fcpu_driver);
-}
-module_init(tegra124_dfll_fcpu_init);
-
-static void __exit tegra124_dfll_fcpu_exit(void)
-{
- platform_driver_unregister(&tegra124_dfll_fcpu_driver);
-}
-module_exit(tegra124_dfll_fcpu_exit);
-
-MODULE_DESCRIPTION("Tegra124 DFLL clock source driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Aleksandr Frid <afrid@nvidia.com>");
-MODULE_AUTHOR("Paul Walmsley <pwalmsley@nvidia.com>");
+builtin_platform_driver(tegra124_dfll_fcpu_driver);
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index 624115e82ff9..da9e8e7b5ce5 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -92,19 +92,19 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
/**
* tegra_cvb_add_opp_table - build OPP table from Tegra CVB tables
- * @cvb_tables: array of CVB tables
- * @sz: size of the previously mentioned array
+ * @dev: the struct device * for which the OPP table is built
+ * @tables: array of CVB tables
+ * @count: size of the previously mentioned array
* @process_id: process id of the HW module
* @speedo_id: speedo id of the HW module
* @speedo_value: speedo value of the HW module
- * @max_rate: highest safe clock rate
- * @opp_dev: the struct device * for which the OPP table is built
+ * @max_freq: highest safe clock rate
*
* On Tegra, a CVB table encodes the relationship between operating voltage
* and safe maximal frequency for a given module (e.g. GPU or CPU). This
* function calculates the optimal voltage-frequency operating points
* for the given arguments and exports them via the OPP library for the
- * given @opp_dev. Returns a pointer to the struct cvb_table that matched
+ * given @dev. Returns a pointer to the struct cvb_table that matched
* or an ERR_PTR on failure.
*/
const struct cvb_table *
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 8831e1a05367..11d8aa3ec186 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -22,13 +22,6 @@
#include "clock.h"
-/*
- * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
- * that are sourced by DPLL5, and both of these require this clock
- * to be at 120 MHz for proper operation.
- */
-#define DPLL5_FREQ_FOR_USBHOST 120000000
-
#define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1
#define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5
#define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8
@@ -546,14 +539,21 @@ void __init omap3_clk_lock_dpll5(void)
struct clk *dpll5_clk;
struct clk *dpll5_m2_clk;
+ /*
+ * Errata sprz319f advisory 2.1 documents a USB host clock drift issue
+ * that can be worked around using specially crafted dpll5 settings
+ * with a dpll5_m2 divider set to 8. Set the dpll5 rate to 8x the USB
+ * host clock rate, its .set_rate handler() will detect that frequency
+ * and use the errata settings.
+ */
dpll5_clk = clk_get(NULL, "dpll5_ck");
- clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
+ clk_set_rate(dpll5_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST * 8);
clk_prepare_enable(dpll5_clk);
- /* Program dpll5_m2_clk divider for no division */
+ /* Program dpll5_m2_clk divider */
dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
clk_prepare_enable(dpll5_m2_clk);
- clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
+ clk_set_rate(dpll5_m2_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST);
clk_disable_unprepare(dpll5_m2_clk);
clk_disable_unprepare(dpll5_clk);
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index bfa17d33ef3b..9fd6043314eb 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -201,7 +201,6 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "atl_dpll_clk_mux", "atl_dpll_clk_mux"),
DT_CLK(NULL, "atl_gfclk_mux", "atl_gfclk_mux"),
DT_CLK(NULL, "dcan1_sys_clk_mux", "dcan1_sys_clk_mux"),
- DT_CLK(NULL, "gmac_gmii_ref_clk_div", "gmac_gmii_ref_clk_div"),
DT_CLK(NULL, "gmac_rft_clk_mux", "gmac_rft_clk_mux"),
DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index c77333230bdf..45d05339d583 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
@@ -295,31 +295,17 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
return ret;
}
-static int of_dra7_atl_clk_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
static const struct of_device_id of_dra7_atl_clk_match_tbl[] = {
{ .compatible = "ti,dra7-atl", },
{},
};
-MODULE_DEVICE_TABLE(of, of_dra7_atl_clk_match_tbl);
static struct platform_driver dra7_atl_clk_driver = {
.driver = {
.name = "dra7-atl",
+ .suppress_bind_attrs = true,
.of_match_table = of_dra7_atl_clk_match_tbl,
},
.probe = of_dra7_atl_clk_probe,
- .remove = of_dra7_atl_clk_remove,
};
-
-module_platform_driver(dra7_atl_clk_driver);
-
-MODULE_DESCRIPTION("Clock driver for DRA7 Audio Tracking Logic");
-MODULE_ALIAS("platform:dra7-atl-clock");
-MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(dra7_atl_clk_driver);
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 90f3f472ae1c..13c37f48d9d6 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -257,11 +257,20 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
+/*
+ * OMAP3_DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
+ * that are sourced by DPLL5, and both of these require this clock
+ * to be at 120 MHz for proper operation.
+ */
+#define OMAP3_DPLL5_FREQ_FOR_USBHOST 120000000
+
unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index);
+int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
void omap3_clk_lock_dpll5(void);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 9fc8754a6e61..4b9a419d8e14 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -114,6 +114,18 @@ static const struct clk_ops omap3_dpll_ck_ops = {
.round_rate = &omap2_dpll_round_rate,
};
+static const struct clk_ops omap3_dpll5_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_dpll5_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
+ .determine_rate = &omap3_noncore_dpll_determine_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
static const struct clk_ops omap3_dpll_per_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
@@ -474,7 +486,12 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
+ if ((of_machine_is_compatible("ti,omap3630") ||
+ of_machine_is_compatible("ti,omap36xx")) &&
+ !strcmp(node->name, "dpll5_ck"))
+ of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
+ else
+ of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
of_ti_omap3_dpll_setup);
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 88f2ce81ba55..4cdd28a25584 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -838,3 +838,70 @@ int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
index);
}
+
+/* Apply DM3730 errata sprz319 advisory 2.1. */
+static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct omap3_dpll5_settings {
+ unsigned int rate, m, n;
+ };
+
+ static const struct omap3_dpll5_settings precomputed[] = {
+ /*
+ * From DM3730 errata advisory 2.1, table 35 and 36.
+ * The N value is increased by 1 compared to the tables as the
+ * errata lists register values while last_rounded_field is the
+ * real divider value.
+ */
+ { 12000000, 80, 0 + 1 },
+ { 13000000, 443, 5 + 1 },
+ { 19200000, 50, 0 + 1 },
+ { 26000000, 443, 11 + 1 },
+ { 38400000, 25, 0 + 1 }
+ };
+
+ const struct omap3_dpll5_settings *d;
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
+ if (parent_rate == precomputed[i].rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(precomputed))
+ return false;
+
+ d = &precomputed[i];
+
+ /* Update the M, N and rounded rate values and program the DPLL. */
+ dd = clk->dpll_data;
+ dd->last_rounded_m = d->m;
+ dd->last_rounded_n = d->n;
+ dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
+ omap3_noncore_dpll_program(clk, 0);
+
+ return true;
+}
+
+/**
+ * omap3_dpll5_set_rate - set rate for omap3 dpll5
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
+ * the DPLL is used for USB host (detected through the requested rate).
+ */
+int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
+ if (omap3_dpll5_apply_errata(hw, parent_rate))
+ return 0;
+ }
+
+ return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+}
diff --git a/drivers/clk/uniphier/Makefile b/drivers/clk/uniphier/Makefile
index f27b360329ca..665d1d65a90e 100644
--- a/drivers/clk/uniphier/Makefile
+++ b/drivers/clk/uniphier/Makefile
@@ -1,8 +1,11 @@
obj-y += clk-uniphier-core.o
+
+obj-y += clk-uniphier-cpugear.o
obj-y += clk-uniphier-fixed-factor.o
obj-y += clk-uniphier-fixed-rate.o
obj-y += clk-uniphier-gate.o
obj-y += clk-uniphier-mux.o
+
obj-y += clk-uniphier-sys.o
obj-y += clk-uniphier-mio.o
obj-y += clk-uniphier-peri.o
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 26c53f7963a4..0007218ce6a0 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -27,6 +27,9 @@ static struct clk_hw *uniphier_clk_register(struct device *dev,
const struct uniphier_clk_data *data)
{
switch (data->type) {
+ case UNIPHIER_CLK_TYPE_CPUGEAR:
+ return uniphier_clk_register_cpugear(dev, regmap, data->name,
+ &data->data.cpugear);
case UNIPHIER_CLK_TYPE_FIXED_FACTOR:
return uniphier_clk_register_fixed_factor(dev, data->name,
&data->data.factor);
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
new file mode 100644
index 000000000000..9bff26e0cbb0
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "clk-uniphier.h"
+
+#define UNIPHIER_CLK_CPUGEAR_STAT 0 /* status */
+#define UNIPHIER_CLK_CPUGEAR_SET 4 /* set */
+#define UNIPHIER_CLK_CPUGEAR_UPD 8 /* update */
+#define UNIPHIER_CLK_CPUGEAR_UPD_BIT BIT(0)
+
+struct uniphier_clk_cpugear {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int regbase;
+ unsigned int mask;
+};
+
+#define to_uniphier_clk_cpugear(_hw) \
+ container_of(_hw, struct uniphier_clk_cpugear, hw)
+
+static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct uniphier_clk_cpugear *gear = to_uniphier_clk_cpugear(hw);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_write_bits(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
+ gear->mask, index);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
+ UNIPHIER_CLK_CPUGEAR_UPD_BIT,
+ UNIPHIER_CLK_CPUGEAR_UPD_BIT);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
+ val, !(val & UNIPHIER_CLK_CPUGEAR_UPD_BIT),
+ 0, 1);
+}
+
+static u8 uniphier_clk_cpugear_get_parent(struct clk_hw *hw)
+{
+ struct uniphier_clk_cpugear *gear = to_uniphier_clk_cpugear(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_STAT, &val);
+ if (ret)
+ return ret;
+
+ val &= gear->mask;
+
+ return val < num_parents ? val : -EINVAL;
+}
+
+static const struct clk_ops uniphier_clk_cpugear_ops = {
+ .determine_rate = __clk_mux_determine_rate,
+ .set_parent = uniphier_clk_cpugear_set_parent,
+ .get_parent = uniphier_clk_cpugear_get_parent,
+};
+
+struct clk_hw *uniphier_clk_register_cpugear(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_cpugear_data *data)
+{
+ struct uniphier_clk_cpugear *gear;
+ struct clk_init_data init;
+ int ret;
+
+ gear = devm_kzalloc(dev, sizeof(*gear), GFP_KERNEL);
+ if (!gear)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &uniphier_clk_cpugear_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = data->parent_names;
+ init.num_parents = data->num_parents,
+
+ gear->regmap = regmap;
+ gear->regbase = data->regbase;
+ gear->mask = data->mask;
+ gear->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &gear->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &gear->hw;
+}
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 5d029991047d..d049316c1c0f 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -125,16 +125,35 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
};
const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 392, 5), /* 1960 MHz */
+ UNIPHIER_CLK_FACTOR("mpll", -1, "ref", 64, 1), /* 1600 MHz */
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */
+ UNIPHIER_CLK_FACTOR("vspll", -1, "ref", 80, 1), /* 2000 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC, MIO */
UNIPHIER_CLK_FACTOR("usb2", -1, "ref", 24, 25),
+ /* CPU gears */
+ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("mpll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV3("spll", 3, 4, 8),
+ /* Note: both gear1 and gear4 are spll/4. This is not a bug. */
+ UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 8,
+ "cpll/2", "spll/4", "cpll/3", "spll/3",
+ "spll/4", "spll/8", "cpll/4", "cpll/8"),
+ UNIPHIER_CLK_CPUGEAR("cpu-ipp", 34, 0x8100, 0xf, 8,
+ "mpll/2", "spll/4", "mpll/3", "spll/3",
+ "spll/4", "spll/8", "mpll/4", "mpll/8"),
{ /* sentinel */ }
};
const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 88, 1), /* ARM: 2200 MHz */
+ UNIPHIER_CLK_FACTOR("gppll", -1, "ref", 52, 1), /* Mali: 1300 MHz */
+ UNIPHIER_CLK_FACTOR("mpll", -1, "ref", 64, 1), /* Codec: 1600 MHz */
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */
+ UNIPHIER_CLK_FACTOR("s2pll", -1, "ref", 88, 1), /* IPP: 2200 MHz */
+ UNIPHIER_CLK_FACTOR("vppll", -1, "ref", 504, 5), /* 2520 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
UNIPHIER_LD20_SYS_CLK_SD,
@@ -147,5 +166,18 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ /* CPU gears */
+ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("s2pll", 2, 3, 4, 8),
+ UNIPHIER_CLK_CPUGEAR("cpu-ca72", 32, 0x8000, 0xf, 8,
+ "cpll/2", "spll/2", "cpll/3", "spll/3",
+ "spll/4", "spll/8", "cpll/4", "cpll/8"),
+ UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 8,
+ "cpll/2", "spll/2", "cpll/3", "spll/3",
+ "spll/4", "spll/8", "cpll/4", "cpll/8"),
+ UNIPHIER_CLK_CPUGEAR("cpu-ipp", 34, 0x8100, 0xf, 8,
+ "s2pll/2", "spll/2", "s2pll/3", "spll/3",
+ "spll/4", "spll/8", "s2pll/4", "s2pll/8"),
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 0244dba1f4cf..01c16ecec48f 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -20,15 +20,24 @@ struct clk_hw;
struct device;
struct regmap;
-#define UNIPHIER_CLK_MUX_MAX_PARENTS 8
+#define UNIPHIER_CLK_CPUGEAR_MAX_PARENTS 16
+#define UNIPHIER_CLK_MUX_MAX_PARENTS 8
enum uniphier_clk_type {
+ UNIPHIER_CLK_TYPE_CPUGEAR,
UNIPHIER_CLK_TYPE_FIXED_FACTOR,
UNIPHIER_CLK_TYPE_FIXED_RATE,
UNIPHIER_CLK_TYPE_GATE,
UNIPHIER_CLK_TYPE_MUX,
};
+struct uniphier_clk_cpugear_data {
+ const char *parent_names[UNIPHIER_CLK_CPUGEAR_MAX_PARENTS];
+ unsigned int num_parents;
+ unsigned int regbase;
+ unsigned int mask;
+};
+
struct uniphier_clk_fixed_factor_data {
const char *parent_name;
unsigned int mult;
@@ -58,6 +67,7 @@ struct uniphier_clk_data {
enum uniphier_clk_type type;
int idx;
union {
+ struct uniphier_clk_cpugear_data cpugear;
struct uniphier_clk_fixed_factor_data factor;
struct uniphier_clk_fixed_rate_data rate;
struct uniphier_clk_gate_data gate;
@@ -65,6 +75,20 @@ struct uniphier_clk_data {
} data;
};
+#define UNIPHIER_CLK_CPUGEAR(_name, _idx, _regbase, _mask, \
+ _num_parents, ...) \
+ { \
+ .name = (_name), \
+ .type = UNIPHIER_CLK_TYPE_CPUGEAR, \
+ .idx = (_idx), \
+ .data.cpugear = { \
+ .parent_names = { __VA_ARGS__ }, \
+ .num_parents = (_num_parents), \
+ .regbase = (_regbase), \
+ .mask = (_mask) \
+ }, \
+ }
+
#define UNIPHIER_CLK_FACTOR(_name, _idx, _parent, _mult, _div) \
{ \
.name = (_name), \
@@ -77,7 +101,6 @@ struct uniphier_clk_data {
}, \
}
-
#define UNIPHIER_CLK_GATE(_name, _idx, _parent, _reg, _bit) \
{ \
.name = (_name), \
@@ -90,7 +113,25 @@ struct uniphier_clk_data {
}, \
}
+#define UNIPHIER_CLK_DIV(parent, div) \
+ UNIPHIER_CLK_FACTOR(parent "/" #div, -1, parent, 1, div)
+
+#define UNIPHIER_CLK_DIV2(parent, div0, div1) \
+ UNIPHIER_CLK_DIV(parent, div0), \
+ UNIPHIER_CLK_DIV(parent, div1)
+
+#define UNIPHIER_CLK_DIV3(parent, div0, div1, div2) \
+ UNIPHIER_CLK_DIV2(parent, div0, div1), \
+ UNIPHIER_CLK_DIV(parent, div2)
+
+#define UNIPHIER_CLK_DIV4(parent, div0, div1, div2, div3) \
+ UNIPHIER_CLK_DIV2(parent, div0, div1), \
+ UNIPHIER_CLK_DIV2(parent, div2, div3)
+struct clk_hw *uniphier_clk_register_cpugear(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_cpugear_data *data);
struct clk_hw *uniphier_clk_register_fixed_factor(struct device *dev,
const char *name,
const struct uniphier_clk_fixed_factor_data *data);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index e2c6e43cf8ca..4866f7aa32e6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -282,6 +282,26 @@ config CLKSRC_MPS2
select CLKSRC_MMIO
select CLKSRC_OF
+config ARC_TIMERS
+ bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_OF
+ help
+ These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
+ (ARC700 as well as ARC HS38).
+ TIMER0 serves as clockevent while TIMER1 provides clocksource
+
+config ARC_TIMERS_64BIT
+ bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARC_TIMERS
+ select CLKSRC_OF
+ help
+ This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
+ RTC is implemented inside the core, while GFRC sits outside the core in
+ ARConnect IP block. Driver automatically picks one of them for clocksource
+ as appropriate.
+
config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index cf87f407f1ad..a14111e1f087 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
+obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
new file mode 100644
index 000000000000..a49748d826c0
--- /dev/null
+++ b/drivers/clocksource/arc_timer.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
+ * programmed to go from @count to @limit and optionally interrupt.
+ * We've designated TIMER0 for clockevents and TIMER1 for clocksource
+ *
+ * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
+ * which are suitable for UP and SMP based clocksources respectively
+ */
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <soc/arc/timers.h>
+#include <soc/arc/mcip.h>
+
+
+static unsigned long arc_timer_freq;
+
+static int noinline arc_get_timer_clk(struct device_node *node)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_err("timer missing clk");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Couldn't enable parent clk\n");
+ return ret;
+ }
+
+ arc_timer_freq = clk_get_rate(clk);
+
+ return 0;
+}
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_TIMERS_64BIT
+
+static cycle_t arc_read_gfrc(struct clocksource *cs)
+{
+ unsigned long flags;
+ u32 l, h;
+
+ local_irq_save(flags);
+
+ __mcip_cmd(CMD_GFRC_READ_LO, 0);
+ l = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+ __mcip_cmd(CMD_GFRC_READ_HI, 0);
+ h = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+ local_irq_restore(flags);
+
+ return (((cycle_t)h) << 32) | l;
+}
+
+static struct clocksource arc_counter_gfrc = {
+ .name = "ARConnect GFRC",
+ .rating = 400,
+ .read = arc_read_gfrc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_gfrc(struct device_node *node)
+{
+ struct mcip_bcr mp;
+ int ret;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+ if (!mp.gfrc) {
+ pr_warn("Global-64-bit-Ctr clocksource not detected");
+ return -ENXIO;
+ }
+
+ ret = arc_get_timer_clk(node);
+ if (ret)
+ return ret;
+
+ return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
+
+#define AUX_RTC_CTRL 0x103
+#define AUX_RTC_LOW 0x104
+#define AUX_RTC_HIGH 0x105
+
+static cycle_t arc_read_rtc(struct clocksource *cs)
+{
+ unsigned long status;
+ u32 l, h;
+
+ /*
+ * hardware has an internal state machine which tracks readout of
+ * low/high and updates the CTRL.status if
+ * - interrupt/exception taken between the two reads
+ * - high increments after low has been read
+ */
+ do {
+ l = read_aux_reg(AUX_RTC_LOW);
+ h = read_aux_reg(AUX_RTC_HIGH);
+ status = read_aux_reg(AUX_RTC_CTRL);
+ } while (!(status & _BITUL(31)));
+
+ return (((cycle_t)h) << 32) | l;
+}
+
+static struct clocksource arc_counter_rtc = {
+ .name = "ARCv2 RTC",
+ .rating = 350,
+ .read = arc_read_rtc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_rtc(struct device_node *node)
+{
+ struct bcr_timer timer;
+ int ret;
+
+ READ_BCR(ARC_REG_TIMERS_BCR, timer);
+ if (!timer.rtc) {
+ pr_warn("Local-64-bit-Ctr clocksource not detected");
+ return -ENXIO;
+ }
+
+ /* Local to CPU hence not usable in SMP */
+ if (IS_ENABLED(CONFIG_SMP)) {
+ pr_warn("Local-64-bit-Ctr not usable in SMP");
+ return -EINVAL;
+ }
+
+ ret = arc_get_timer_clk(node);
+ if (ret)
+ return ret;
+
+ write_aux_reg(AUX_RTC_CTRL, 1);
+
+ return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
+
+#endif
+
+/*
+ * 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+
+static cycle_t arc_read_timer1(struct clocksource *cs)
+{
+ return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter_timer1 = {
+ .name = "ARC Timer1",
+ .rating = 300,
+ .read = arc_read_timer1,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_timer1(struct device_node *node)
+{
+ int ret;
+
+ /* Local to CPU hence not usable in SMP */
+ if (IS_ENABLED(CONFIG_SMP))
+ return -EINVAL;
+
+ ret = arc_get_timer_clk(node);
+ if (ret)
+ return ret;
+
+ write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
+ write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+ return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
+}
+
+/********** Clock Event Device *********/
+
+static int arc_timer_irq;
+
+/*
+ * Arm the timer to interrupt after @cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int cycles)
+{
+ write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
+ write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
+
+ write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ arc_timer_event_setup(delta);
+ return 0;
+}
+
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
+{
+ /*
+ * At X Hz, 1 sec = 1000ms -> X cycles;
+ * 10ms -> X / 100 cycles
+ */
+ arc_timer_event_setup(arc_timer_freq / HZ);
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_state_periodic = arc_clkevent_set_periodic,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ /*
+ * Note that generic IRQ core could have passed @evt for @dev_id if
+ * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
+ */
+ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+ int irq_reenable = clockevent_state_periodic(evt);
+
+ /*
+ * Any write to CTRL reg ACks the interrupt, we rewrite the
+ * Count when [N]ot [H]alted bit.
+ * And re-arm it if perioid by [I]nterrupt [E]nable bit
+ */
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+
+static int arc_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+
+ evt->cpumask = cpumask_of(smp_processor_id());
+
+ clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
+ enable_percpu_irq(arc_timer_irq, 0);
+ return 0;
+}
+
+static int arc_timer_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(arc_timer_irq);
+ return 0;
+}
+
+/*
+ * clockevent setup for boot CPU
+ */
+static int __init arc_clockevent_setup(struct device_node *node)
+{
+ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+ int ret;
+
+ arc_timer_irq = irq_of_parse_and_map(node, 0);
+ if (arc_timer_irq <= 0) {
+ pr_err("clockevent: missing irq");
+ return -EINVAL;
+ }
+
+ ret = arc_get_timer_clk(node);
+ if (ret) {
+ pr_err("clockevent: missing clk");
+ return ret;
+ }
+
+ /* Needs apriori irq_set_percpu_devid() done in intc map function */
+ ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
+ "Timer0 (per-cpu-tick)", evt);
+ if (ret) {
+ pr_err("clockevent: unable to request irq\n");
+ return ret;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+ "AP_ARC_TIMER_STARTING",
+ arc_timer_starting_cpu,
+ arc_timer_dying_cpu);
+ if (ret) {
+ pr_err("Failed to setup hotplug state");
+ return ret;
+ }
+ return 0;
+}
+
+static int __init arc_of_timer_init(struct device_node *np)
+{
+ static int init_count = 0;
+ int ret;
+
+ if (!init_count) {
+ init_count = 1;
+ ret = arc_clockevent_setup(np);
+ } else {
+ ret = arc_cs_setup_timer1(np);
+ }
+
+ return ret;
+}
+CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 73c487da6d2a..02fef6830e72 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -81,6 +81,7 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
+static bool arch_counter_suspend_stop;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
@@ -576,7 +577,7 @@ static struct clocksource clocksource_counter = {
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter = {
@@ -616,6 +617,8 @@ static void __init arch_counter_register(unsigned type)
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
+ if (!arch_counter_suspend_stop)
+ clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
@@ -907,6 +910,10 @@ static int __init arch_timer_of_init(struct device_node *np)
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
+ /* On some systems, the counter stops ticking when in suspend. */
+ arch_counter_suspend_stop = of_property_read_bool(np,
+ "arm,no-tick-in-suspend");
+
return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
@@ -964,8 +971,9 @@ static int __init arch_timer_mem_init(struct device_node *np)
}
ret= -ENXIO;
- base = arch_counter_base = of_iomap(best_frame, 0);
- if (!base) {
+ base = arch_counter_base = of_io_request_and_map(best_frame, 0,
+ "arch_mem_timer");
+ if (IS_ERR(base)) {
pr_err("arch_timer: Can't map frame's registers\n");
goto out;
}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index e71acf231c89..f2f29d2be1cf 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -96,7 +96,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = of_property_read_u32(node, "clock-frequency", &freq);
if (ret) {
pr_err("Can't read clock-frequency");
- return ret;
+ goto err_iounmap;
}
system_clock = base + REG_COUNTER_LO;
@@ -108,13 +108,15 @@ static int __init bcm2835_timer_init(struct device_node *node)
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
if (irq <= 0) {
pr_err("Can't parse IRQ");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer) {
pr_err("Can't allocate timer struct\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_iounmap;
}
timer->control = base + REG_CONTROL;
@@ -133,7 +135,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
- return ret;
+ goto err_iounmap;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@@ -141,6 +143,10 @@ static int __init bcm2835_timer_init(struct device_node *node)
pr_info("bcm2835: system timer (irq = %d)\n", irq);
return 0;
+
+err_iounmap:
+ iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 2a8f4705c734..7f3430654fbd 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -161,19 +161,22 @@ static int __init moxart_timer_init(struct device_node *node)
timer->base = of_iomap(node, 0);
if (!timer->base) {
pr_err("%s: of_iomap failed\n", node->full_name);
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_free;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unmap;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("%s: of_clk_get failed\n", node->full_name);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto out_unmap;
}
pclk = clk_get_rate(clk);
@@ -186,7 +189,8 @@ static int __init moxart_timer_init(struct device_node *node)
timer->t1_disable_val = ASPEED_TIMER1_DISABLE;
} else {
pr_err("%s: unknown platform\n", node->full_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unmap;
}
timer->count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
@@ -208,14 +212,14 @@ static int __init moxart_timer_init(struct device_node *node)
clocksource_mmio_readl_down);
if (ret) {
pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
- return ret;
+ goto out_unmap;
}
ret = request_irq(irq, moxart_timer_interrupt, IRQF_TIMER,
node->name, &timer->clkevt);
if (ret) {
pr_err("%s: setup_irq failed\n", node->full_name);
- return ret;
+ goto out_unmap;
}
/* Clear match registers */
@@ -241,6 +245,12 @@ static int __init moxart_timer_init(struct device_node *node)
clockevents_config_and_register(&timer->clkevt, pclk, 0x4, 0xfffffffe);
return 0;
+
+out_unmap:
+ iounmap(timer->base);
+out_free:
+ kfree(timer);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
CLOCKSOURCE_OF_DECLARE(aspeed, "aspeed,ast2400-timer", moxart_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 3e1cb512f3ce..9cae38eebec2 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -220,17 +220,16 @@ CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
/*
* Legacy timer init for non device-tree boards.
*/
-void __init pxa_timer_nodt_init(int irq, void __iomem *base,
- unsigned long clock_tick_rate)
+void __init pxa_timer_nodt_init(int irq, void __iomem *base)
{
struct clk *clk;
timer_base = base;
clk = clk_get(NULL, "OSTIMER0");
- if (clk && !IS_ERR(clk))
+ if (clk && !IS_ERR(clk)) {
clk_prepare_enable(clk);
- else
+ pxa_timer_common_init(irq, clk_get_rate(clk));
+ } else {
pr_crit("%s: unable to get clk\n", __func__);
-
- pxa_timer_common_init(irq, clock_tick_rate);
+ }
}
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index 70c149af8ee0..8da5e93b6810 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -46,7 +46,36 @@
/* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */
static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly;
-static unsigned long nps_timer_rate;
+static int __init nps_get_timer_clk(struct device_node *node,
+ unsigned long *timer_freq,
+ struct clk **clk)
+{
+ int ret;
+
+ *clk = of_clk_get(node, 0);
+ ret = PTR_ERR_OR_ZERO(*clk);
+ if (ret) {
+ pr_err("timer missing clk");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(*clk);
+ if (ret) {
+ pr_err("Couldn't enable parent clk\n");
+ clk_put(*clk);
+ return ret;
+ }
+
+ *timer_freq = clk_get_rate(*clk);
+ if (!(*timer_freq)) {
+ pr_err("Couldn't get clk rate\n");
+ clk_disable_unprepare(*clk);
+ clk_put(*clk);
+ return -EINVAL;
+ }
+
+ return 0;
+}
static cycle_t nps_clksrc_read(struct clocksource *clksrc)
{
@@ -55,26 +84,24 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
}
-static int __init nps_setup_clocksource(struct device_node *node,
- struct clk *clk)
+static int __init nps_setup_clocksource(struct device_node *node)
{
int ret, cluster;
+ struct clk *clk;
+ unsigned long nps_timer1_freq;
+
for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++)
nps_msu_reg_low_addr[cluster] =
nps_host_reg((cluster << NPS_CLUSTER_OFFSET),
- NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
+ NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Couldn't enable parent clock\n");
+ ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk);
+ if (ret)
return ret;
- }
-
- nps_timer_rate = clk_get_rate(clk);
- ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick",
- nps_timer_rate, 301, 32, nps_clksrc_read);
+ ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick",
+ nps_timer1_freq, 300, 32, nps_clksrc_read);
if (ret) {
pr_err("Couldn't register clock source.\n");
clk_disable_unprepare(clk);
@@ -83,18 +110,175 @@ static int __init nps_setup_clocksource(struct device_node *node,
return ret;
}
-static int __init nps_timer_init(struct device_node *node)
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
+ nps_setup_clocksource);
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1",
+ nps_setup_clocksource);
+
+#ifdef CONFIG_EZNPS_MTM_EXT
+#include <soc/nps/mtm.h>
+
+/* Timer related Aux registers */
+#define NPS_REG_TIMER0_TSI 0xFFFFF850
+#define NPS_REG_TIMER0_LIMIT 0x23
+#define NPS_REG_TIMER0_CTRL 0x22
+#define NPS_REG_TIMER0_CNT 0x21
+
+/*
+ * Interrupt Enabled (IE) - re-arm the timer
+ * Not Halted (NH) - is cleared when working with JTAG (for debug)
+ */
+#define TIMER0_CTRL_IE BIT(0)
+#define TIMER0_CTRL_NH BIT(1)
+
+static unsigned long nps_timer0_freq;
+static unsigned long nps_timer0_irq;
+
+static void nps_clkevent_rm_thread(void)
+{
+ int thread;
+ unsigned int cflags, enabled_threads;
+
+ hw_schd_save(&cflags);
+
+ enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
+
+ /* remove thread from TSI1 */
+ thread = read_aux_reg(CTOP_AUX_THREAD_ID);
+ enabled_threads &= ~(1 << thread);
+ write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
+
+ /* Acknowledge and if needed re-arm the timer */
+ if (!enabled_threads)
+ write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH);
+ else
+ write_aux_reg(NPS_REG_TIMER0_CTRL,
+ TIMER0_CTRL_IE | TIMER0_CTRL_NH);
+
+ hw_schd_restore(cflags);
+}
+
+static void nps_clkevent_add_thread(unsigned long delta)
+{
+ int thread;
+ unsigned int cflags, enabled_threads;
+
+ hw_schd_save(&cflags);
+
+ /* add thread to TSI1 */
+ thread = read_aux_reg(CTOP_AUX_THREAD_ID);
+ enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
+ enabled_threads |= (1 << thread);
+ write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
+
+ /* set next timer event */
+ write_aux_reg(NPS_REG_TIMER0_LIMIT, delta);
+ write_aux_reg(NPS_REG_TIMER0_CNT, 0);
+ write_aux_reg(NPS_REG_TIMER0_CTRL,
+ TIMER0_CTRL_IE | TIMER0_CTRL_NH);
+
+ hw_schd_restore(cflags);
+}
+
+/*
+ * Whenever anyone tries to change modes, we just mask interrupts
+ * and wait for the next event to get set.
+ */
+static int nps_clkevent_set_state(struct clock_event_device *dev)
+{
+ nps_clkevent_rm_thread();
+ disable_percpu_irq(nps_timer0_irq);
+
+ return 0;
+}
+
+static int nps_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ nps_clkevent_add_thread(delta);
+ enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = {
+ .name = "NPS Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 300,
+ .set_next_event = nps_clkevent_set_next_event,
+ .set_state_oneshot = nps_clkevent_set_state,
+ .set_state_oneshot_stopped = nps_clkevent_set_state,
+ .set_state_shutdown = nps_clkevent_set_state,
+ .tick_resume = nps_clkevent_set_state,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ nps_clkevent_rm_thread();
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int nps_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device);
+
+ evt->cpumask = cpumask_of(smp_processor_id());
+
+ clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX);
+ enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int nps_timer_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(nps_timer0_irq);
+ return 0;
+}
+
+static int __init nps_setup_clockevent(struct device_node *node)
{
struct clk *clk;
+ int ret;
- clk = of_clk_get(node, 0);
- if (IS_ERR(clk)) {
- pr_err("Can't get timer clock.\n");
- return PTR_ERR(clk);
+ nps_timer0_irq = irq_of_parse_and_map(node, 0);
+ if (nps_timer0_irq <= 0) {
+ pr_err("clockevent: missing irq");
+ return -EINVAL;
}
- return nps_setup_clocksource(node, clk);
+ ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk);
+ if (ret)
+ return ret;
+
+ /* Needs apriori irq_set_percpu_devid() done in intc map function */
+ ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler,
+ "Timer0 (per-cpu-tick)",
+ &nps_clockevent_device);
+ if (ret) {
+ pr_err("Couldn't request irq\n");
+ clk_disable_unprepare(clk);
+ return ret;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+ "clockevents/nps:starting",
+ nps_timer_starting_cpu,
+ nps_timer_dying_cpu);
+ if (ret) {
+ pr_err("Failed to setup hotplug state");
+ clk_disable_unprepare(clk);
+ free_percpu_irq(nps_timer0_irq, &nps_clockevent_device);
+ return ret;
+ }
+
+ return 0;
}
-CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
- nps_timer_init);
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0",
+ nps_setup_clockevent);
+#endif /* CONFIG_EZNPS_MTM_EXT */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index d89b8afe23b6..920c469f3953 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -12,6 +12,27 @@ config ARM_BIG_LITTLE_CPUFREQ
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_BRCMSTB_AVS_CPUFREQ
+ tristate "Broadcom STB AVS CPUfreq driver"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default y
+ help
+ Some Broadcom STB SoCs use a co-processor running proprietary firmware
+ ("AVS") to handle voltage and frequency scaling. This driver provides
+ a standard CPUfreq interface to to the firmware.
+
+ Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
+
+config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+ bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
+ depends on ARM_BRCMSTB_AVS_CPUFREQ
+ help
+ Enabling this option turns on debug support via sysfs under
+ /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
+ write some AVS mailbox registers through sysfs entries.
+
+ If in doubt, say N.
+
config ARM_DT_BL_CPUFREQ
tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
depends on ARM_BIG_LITTLE_CPUFREQ && OF
@@ -60,14 +81,6 @@ config ARM_IMX6Q_CPUFREQ
If in doubt, say N.
-config ARM_INTEGRATOR
- tristate "CPUfreq driver for ARM Integrator CPUs"
- depends on ARCH_INTEGRATOR
- default y
- help
- This enables the CPUfreq driver for ARM Integrator CPUs.
- If in doubt, say Y.
-
config ARM_KIRKWOOD_CPUFREQ
def_bool MACH_KIRKWOOD
help
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..35f71825b7f3 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -6,6 +6,7 @@ config X86_INTEL_PSTATE
bool "Intel P state control"
depends on X86
select ACPI_PROCESSOR if ACPI
+ select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 0a9b6a093646..1e46c3918e7a 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -51,12 +51,12 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
-obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 297e9128fe9f..3a2ca0f79daf 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -84,7 +84,6 @@ static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufre
static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict;
-static struct msr __percpu *msrs;
static bool boost_state(unsigned int cpu)
{
@@ -104,11 +103,10 @@ static bool boost_state(unsigned int cpu)
return false;
}
-static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
+static int boost_set_msr(bool enable)
{
- u32 cpu;
u32 msr_addr;
- u64 msr_mask;
+ u64 msr_mask, val;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
@@ -120,26 +118,31 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
msr_mask = MSR_K7_HWCR_CPB_DIS;
break;
default:
- return;
+ return -EINVAL;
}
- rdmsr_on_cpus(cpumask, msr_addr, msrs);
+ rdmsrl(msr_addr, val);
- for_each_cpu(cpu, cpumask) {
- struct msr *reg = per_cpu_ptr(msrs, cpu);
- if (enable)
- reg->q &= ~msr_mask;
- else
- reg->q |= msr_mask;
- }
+ if (enable)
+ val &= ~msr_mask;
+ else
+ val |= msr_mask;
+
+ wrmsrl(msr_addr, val);
+ return 0;
+}
+
+static void boost_set_msr_each(void *p_en)
+{
+ bool enable = (bool) p_en;
- wrmsr_on_cpus(cpumask, msr_addr, msrs);
+ boost_set_msr(enable);
}
static int set_boost(int val)
{
get_online_cpus();
- boost_set_msrs(val, cpu_online_mask);
+ on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
put_online_cpus();
pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
@@ -536,46 +539,24 @@ static void free_acpi_perf_data(void)
free_percpu(acpi_perf_data);
}
-static int boost_notify(struct notifier_block *nb, unsigned long action,
- void *hcpu)
+static int cpufreq_boost_online(unsigned int cpu)
{
- unsigned cpu = (long)hcpu;
- const struct cpumask *cpumask;
-
- cpumask = get_cpu_mask(cpu);
+ /*
+ * On the CPU_UP path we simply keep the boost-disable flag
+ * in sync with the current global state.
+ */
+ return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
+}
+static int cpufreq_boost_down_prep(unsigned int cpu)
+{
/*
* Clear the boost-disable bit on the CPU_DOWN path so that
- * this cpu cannot block the remaining ones from boosting. On
- * the CPU_UP path we simply keep the boost-disable flag in
- * sync with the current global state.
+ * this cpu cannot block the remaining ones from boosting.
*/
-
- switch (action) {
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
- break;
-
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- boost_set_msrs(1, cpumask);
- break;
-
- default:
- break;
- }
-
- return NOTIFY_OK;
+ return boost_set_msr(1);
}
-
-static struct notifier_block boost_nb = {
- .notifier_call = boost_notify,
-};
-
/*
* acpi_cpufreq_early_init - initialize ACPI P-States library
*
@@ -922,37 +903,35 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.attr = acpi_cpufreq_attr,
};
+static enum cpuhp_state acpi_cpufreq_online;
+
static void __init acpi_cpufreq_boost_init(void)
{
- if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
- msrs = msrs_alloc();
-
- if (!msrs)
- return;
-
- acpi_cpufreq_driver.set_boost = set_boost;
- acpi_cpufreq_driver.boost_enabled = boost_state(0);
-
- cpu_notifier_register_begin();
+ int ret;
- /* Force all MSRs to the same value */
- boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
- cpu_online_mask);
+ if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
+ return;
- __register_cpu_notifier(&boost_nb);
+ acpi_cpufreq_driver.set_boost = set_boost;
+ acpi_cpufreq_driver.boost_enabled = boost_state(0);
- cpu_notifier_register_done();
+ /*
+ * This calls the online callback on all online cpu and forces all
+ * MSRs to the same value.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
+ cpufreq_boost_online, cpufreq_boost_down_prep);
+ if (ret < 0) {
+ pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
+ return;
}
+ acpi_cpufreq_online = ret;
}
static void acpi_cpufreq_boost_exit(void)
{
- if (msrs) {
- unregister_cpu_notifier(&boost_nb);
-
- msrs_free(msrs);
- msrs = NULL;
- }
+ if (acpi_cpufreq_online > 0)
+ cpuhp_remove_state_nocalls(acpi_cpufreq_online);
}
static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
new file mode 100644
index 000000000000..4fda623e55bb
--- /dev/null
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -0,0 +1,1057 @@
+/*
+ * CPU frequency scaling for Broadcom SoCs with AVS firmware that
+ * supports DVS or DVFS
+ *
+ * Copyright (c) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * "AVS" is the name of a firmware developed at Broadcom. It derives
+ * its name from the technique called "Adaptive Voltage Scaling".
+ * Adaptive voltage scaling was the original purpose of this firmware.
+ * The AVS firmware still supports "AVS mode", where all it does is
+ * adaptive voltage scaling. However, on some newer Broadcom SoCs, the
+ * AVS Firmware, despite its unchanged name, also supports DFS mode and
+ * DVFS mode.
+ *
+ * In the context of this document and the related driver, "AVS" by
+ * itself always means the Broadcom firmware and never refers to the
+ * technique called "Adaptive Voltage Scaling".
+ *
+ * The Broadcom STB AVS CPUfreq driver provides voltage and frequency
+ * scaling on Broadcom SoCs using AVS firmware with support for DFS and
+ * DVFS. The AVS firmware is running on its own co-processor. The
+ * driver supports both uniprocessor (UP) and symmetric multiprocessor
+ * (SMP) systems which share clock and voltage across all CPUs.
+ *
+ * Actual voltage and frequency scaling is done solely by the AVS
+ * firmware. This driver does not change frequency or voltage itself.
+ * It provides a standard CPUfreq interface to the rest of the kernel
+ * and to userland. It interfaces with the AVS firmware to effect the
+ * requested changes and to report back the current system status in a
+ * way that is expected by existing tools.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#endif
+
+/* Max number of arguments AVS calls take */
+#define AVS_MAX_CMD_ARGS 4
+/*
+ * This macro is used to generate AVS parameter register offsets. For
+ * x >= AVS_MAX_CMD_ARGS, it returns 0 to protect against accidental memory
+ * access outside of the parameter range. (Offset 0 is the first parameter.)
+ */
+#define AVS_PARAM_MULT(x) ((x) < AVS_MAX_CMD_ARGS ? (x) : 0)
+
+/* AVS Mailbox Register offsets */
+#define AVS_MBOX_COMMAND 0x00
+#define AVS_MBOX_STATUS 0x04
+#define AVS_MBOX_VOLTAGE0 0x08
+#define AVS_MBOX_TEMP0 0x0c
+#define AVS_MBOX_PV0 0x10
+#define AVS_MBOX_MV0 0x14
+#define AVS_MBOX_PARAM(x) (0x18 + AVS_PARAM_MULT(x) * sizeof(u32))
+#define AVS_MBOX_REVISION 0x28
+#define AVS_MBOX_PSTATE 0x2c
+#define AVS_MBOX_HEARTBEAT 0x30
+#define AVS_MBOX_MAGIC 0x34
+#define AVS_MBOX_SIGMA_HVT 0x38
+#define AVS_MBOX_SIGMA_SVT 0x3c
+#define AVS_MBOX_VOLTAGE1 0x40
+#define AVS_MBOX_TEMP1 0x44
+#define AVS_MBOX_PV1 0x48
+#define AVS_MBOX_MV1 0x4c
+#define AVS_MBOX_FREQUENCY 0x50
+
+/* AVS Commands */
+#define AVS_CMD_AVAILABLE 0x00
+#define AVS_CMD_DISABLE 0x10
+#define AVS_CMD_ENABLE 0x11
+#define AVS_CMD_S2_ENTER 0x12
+#define AVS_CMD_S2_EXIT 0x13
+#define AVS_CMD_BBM_ENTER 0x14
+#define AVS_CMD_BBM_EXIT 0x15
+#define AVS_CMD_S3_ENTER 0x16
+#define AVS_CMD_S3_EXIT 0x17
+#define AVS_CMD_BALANCE 0x18
+/* PMAP and P-STATE commands */
+#define AVS_CMD_GET_PMAP 0x30
+#define AVS_CMD_SET_PMAP 0x31
+#define AVS_CMD_GET_PSTATE 0x40
+#define AVS_CMD_SET_PSTATE 0x41
+
+/* Different modes AVS supports (for GET_PMAP/SET_PMAP) */
+#define AVS_MODE_AVS 0x0
+#define AVS_MODE_DFS 0x1
+#define AVS_MODE_DVS 0x2
+#define AVS_MODE_DVFS 0x3
+
+/*
+ * PMAP parameter p1
+ * unused:31-24, mdiv_p0:23-16, unused:15-14, pdiv:13-10 , ndiv_int:9-0
+ */
+#define NDIV_INT_SHIFT 0
+#define NDIV_INT_MASK 0x3ff
+#define PDIV_SHIFT 10
+#define PDIV_MASK 0xf
+#define MDIV_P0_SHIFT 16
+#define MDIV_P0_MASK 0xff
+/*
+ * PMAP parameter p2
+ * mdiv_p4:31-24, mdiv_p3:23-16, mdiv_p2:15:8, mdiv_p1:7:0
+ */
+#define MDIV_P1_SHIFT 0
+#define MDIV_P1_MASK 0xff
+#define MDIV_P2_SHIFT 8
+#define MDIV_P2_MASK 0xff
+#define MDIV_P3_SHIFT 16
+#define MDIV_P3_MASK 0xff
+#define MDIV_P4_SHIFT 24
+#define MDIV_P4_MASK 0xff
+
+/* Different P-STATES AVS supports (for GET_PSTATE/SET_PSTATE) */
+#define AVS_PSTATE_P0 0x0
+#define AVS_PSTATE_P1 0x1
+#define AVS_PSTATE_P2 0x2
+#define AVS_PSTATE_P3 0x3
+#define AVS_PSTATE_P4 0x4
+#define AVS_PSTATE_MAX AVS_PSTATE_P4
+
+/* CPU L2 Interrupt Controller Registers */
+#define AVS_CPU_L2_SET0 0x04
+#define AVS_CPU_L2_INT_MASK BIT(31)
+
+/* AVS Command Status Values */
+#define AVS_STATUS_CLEAR 0x00
+/* Command/notification accepted */
+#define AVS_STATUS_SUCCESS 0xf0
+/* Command/notification rejected */
+#define AVS_STATUS_FAILURE 0xff
+/* Invalid command/notification (unknown) */
+#define AVS_STATUS_INVALID 0xf1
+/* Non-AVS modes are not supported */
+#define AVS_STATUS_NO_SUPP 0xf2
+/* Cannot set P-State until P-Map supplied */
+#define AVS_STATUS_NO_MAP 0xf3
+/* Cannot change P-Map after initial P-Map set */
+#define AVS_STATUS_MAP_SET 0xf4
+/* Max AVS status; higher numbers are used for debugging */
+#define AVS_STATUS_MAX 0xff
+
+/* Other AVS related constants */
+#define AVS_LOOP_LIMIT 10000
+#define AVS_TIMEOUT 300 /* in ms; expected completion is < 10ms */
+#define AVS_FIRMWARE_MAGIC 0xa11600d1
+
+#define BRCM_AVS_CPUFREQ_PREFIX "brcmstb-avs"
+#define BRCM_AVS_CPUFREQ_NAME BRCM_AVS_CPUFREQ_PREFIX "-cpufreq"
+#define BRCM_AVS_CPU_DATA "brcm,avs-cpu-data-mem"
+#define BRCM_AVS_CPU_INTR "brcm,avs-cpu-l2-intr"
+#define BRCM_AVS_HOST_INTR "sw_intr"
+
+struct pmap {
+ unsigned int mode;
+ unsigned int p1;
+ unsigned int p2;
+ unsigned int state;
+};
+
+struct private_data {
+ void __iomem *base;
+ void __iomem *avs_intr_base;
+ struct device *dev;
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+ struct dentry *debugfs;
+#endif
+ struct completion done;
+ struct semaphore sem;
+ struct pmap pmap;
+};
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+
+enum debugfs_format {
+ DEBUGFS_NORMAL,
+ DEBUGFS_FLOAT,
+ DEBUGFS_REV,
+};
+
+struct debugfs_data {
+ struct debugfs_entry *entry;
+ struct private_data *priv;
+};
+
+struct debugfs_entry {
+ char *name;
+ u32 offset;
+ fmode_t mode;
+ enum debugfs_format format;
+};
+
+#define DEBUGFS_ENTRY(name, mode, format) { \
+ #name, AVS_MBOX_##name, mode, format \
+}
+
+/*
+ * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
+ */
+#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
+#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
+#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
+#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
+
+/*
+ * This table stores the name, access permissions and offset for each hardware
+ * register and is used to generate debugfs entries.
+ */
+static struct debugfs_entry debugfs_entries[] = {
+ DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
+ DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
+};
+
+static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
+
+static char *__strtolower(char *s)
+{
+ char *p;
+
+ for (p = s; *p; p++)
+ *p = tolower(*p);
+
+ return s;
+}
+
+#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+
+static void __iomem *__map_region(const char *name)
+{
+ struct device_node *np;
+ void __iomem *ptr;
+
+ np = of_find_compatible_node(NULL, NULL, name);
+ if (!np)
+ return NULL;
+
+ ptr = of_iomap(np, 0);
+ of_node_put(np);
+
+ return ptr;
+}
+
+static int __issue_avs_command(struct private_data *priv, int cmd, bool is_send,
+ u32 args[])
+{
+ unsigned long time_left = msecs_to_jiffies(AVS_TIMEOUT);
+ void __iomem *base = priv->base;
+ unsigned int i;
+ int ret;
+ u32 val;
+
+ ret = down_interruptible(&priv->sem);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure no other command is currently running: cmd is 0 if AVS
+ * co-processor is idle. Due to the guard above, we should almost never
+ * have to wait here.
+ */
+ for (i = 0, val = 1; val != 0 && i < AVS_LOOP_LIMIT; i++)
+ val = readl(base + AVS_MBOX_COMMAND);
+
+ /* Give the caller a chance to retry if AVS is busy. */
+ if (i == AVS_LOOP_LIMIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* Clear status before we begin. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* We need to send arguments for this command. */
+ if (args && is_send) {
+ for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+ writel(args[i], base + AVS_MBOX_PARAM(i));
+ }
+
+ /* Protect from spurious interrupts. */
+ reinit_completion(&priv->done);
+
+ /* Now issue the command & tell firmware to wake up to process it. */
+ writel(cmd, base + AVS_MBOX_COMMAND);
+ writel(AVS_CPU_L2_INT_MASK, priv->avs_intr_base + AVS_CPU_L2_SET0);
+
+ /* Wait for AVS co-processor to finish processing the command. */
+ time_left = wait_for_completion_timeout(&priv->done, time_left);
+
+ /*
+ * If the AVS status is not in the expected range, it means AVS didn't
+ * complete our command in time, and we return an error. Also, if there
+ * is no "time left", we timed out waiting for the interrupt.
+ */
+ val = readl(base + AVS_MBOX_STATUS);
+ if (time_left == 0 || val == 0 || val > AVS_STATUS_MAX) {
+ dev_err(priv->dev, "AVS command %#x didn't complete in time\n",
+ cmd);
+ dev_err(priv->dev, " Time left: %u ms, AVS status: %#x\n",
+ jiffies_to_msecs(time_left), val);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* This command returned arguments, so we read them back. */
+ if (args && !is_send) {
+ for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+ args[i] = readl(base + AVS_MBOX_PARAM(i));
+ }
+
+ /* Clear status to tell AVS co-processor we are done. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* Convert firmware errors to errno's as much as possible. */
+ switch (val) {
+ case AVS_STATUS_INVALID:
+ ret = -EINVAL;
+ break;
+ case AVS_STATUS_NO_SUPP:
+ ret = -ENOTSUPP;
+ break;
+ case AVS_STATUS_NO_MAP:
+ ret = -ENOENT;
+ break;
+ case AVS_STATUS_MAP_SET:
+ ret = -EEXIST;
+ break;
+ case AVS_STATUS_FAILURE:
+ ret = -EIO;
+ break;
+ }
+
+out:
+ up(&priv->sem);
+
+ return ret;
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct private_data *priv = data;
+
+ /* AVS command completed execution. Wake up __issue_avs_command(). */
+ complete(&priv->done);
+
+ return IRQ_HANDLED;
+}
+
+static char *brcm_avs_mode_to_string(unsigned int mode)
+{
+ switch (mode) {
+ case AVS_MODE_AVS:
+ return "AVS";
+ case AVS_MODE_DFS:
+ return "DFS";
+ case AVS_MODE_DVS:
+ return "DVS";
+ case AVS_MODE_DVFS:
+ return "DVFS";
+ }
+ return NULL;
+}
+
+static void brcm_avs_parse_p1(u32 p1, unsigned int *mdiv_p0, unsigned int *pdiv,
+ unsigned int *ndiv)
+{
+ *mdiv_p0 = (p1 >> MDIV_P0_SHIFT) & MDIV_P0_MASK;
+ *pdiv = (p1 >> PDIV_SHIFT) & PDIV_MASK;
+ *ndiv = (p1 >> NDIV_INT_SHIFT) & NDIV_INT_MASK;
+}
+
+static void brcm_avs_parse_p2(u32 p2, unsigned int *mdiv_p1,
+ unsigned int *mdiv_p2, unsigned int *mdiv_p3,
+ unsigned int *mdiv_p4)
+{
+ *mdiv_p4 = (p2 >> MDIV_P4_SHIFT) & MDIV_P4_MASK;
+ *mdiv_p3 = (p2 >> MDIV_P3_SHIFT) & MDIV_P3_MASK;
+ *mdiv_p2 = (p2 >> MDIV_P2_SHIFT) & MDIV_P2_MASK;
+ *mdiv_p1 = (p2 >> MDIV_P1_SHIFT) & MDIV_P1_MASK;
+}
+
+static int brcm_avs_get_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PMAP, false, args);
+ if (ret || !pmap)
+ return ret;
+
+ pmap->mode = args[0];
+ pmap->p1 = args[1];
+ pmap->p2 = args[2];
+ pmap->state = args[3];
+
+ return 0;
+}
+
+static int brcm_avs_set_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pmap->mode;
+ args[1] = pmap->p1;
+ args[2] = pmap->p2;
+ args[3] = pmap->state;
+
+ return __issue_avs_command(priv, AVS_CMD_SET_PMAP, true, args);
+}
+
+static int brcm_avs_get_pstate(struct private_data *priv, unsigned int *pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PSTATE, false, args);
+ if (ret)
+ return ret;
+ *pstate = args[0];
+
+ return 0;
+}
+
+static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pstate;
+
+ return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, true, args);
+}
+
+static unsigned long brcm_avs_get_voltage(void __iomem *base)
+{
+ return readl(base + AVS_MBOX_VOLTAGE1);
+}
+
+static unsigned long brcm_avs_get_frequency(void __iomem *base)
+{
+ return readl(base + AVS_MBOX_FREQUENCY) * 1000; /* in kHz */
+}
+
+/*
+ * We determine which frequencies are supported by cycling through all P-states
+ * and reading back what frequency we are running at for each P-state.
+ */
+static struct cpufreq_frequency_table *
+brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+{
+ struct cpufreq_frequency_table *table;
+ unsigned int pstate;
+ int i, ret;
+
+ /* Remember P-state for later */
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ table = devm_kzalloc(dev, (AVS_PSTATE_MAX + 1) * sizeof(*table),
+ GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = AVS_PSTATE_P0; i <= AVS_PSTATE_MAX; i++) {
+ ret = brcm_avs_set_pstate(priv, i);
+ if (ret)
+ return ERR_PTR(ret);
+ table[i].frequency = brcm_avs_get_frequency(priv->base);
+ table[i].driver_data = i;
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* Restore P-state */
+ ret = brcm_avs_set_pstate(priv, pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return table;
+}
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+
+#define MANT(x) (unsigned int)(abs((x)) / 1000)
+#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
+
+static int brcm_avs_debug_show(struct seq_file *s, void *data)
+{
+ struct debugfs_data *dbgfs = s->private;
+ void __iomem *base;
+ u32 val, offset;
+
+ if (!dbgfs) {
+ seq_puts(s, "No device pointer\n");
+ return 0;
+ }
+
+ base = dbgfs->priv->base;
+ offset = dbgfs->entry->offset;
+ val = readl(base + offset);
+ switch (dbgfs->entry->format) {
+ case DEBUGFS_NORMAL:
+ seq_printf(s, "%u\n", val);
+ break;
+ case DEBUGFS_FLOAT:
+ seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
+ break;
+ case DEBUGFS_REV:
+ seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
+ (val >> 16 & 0xff), (val >> 8 & 0xff),
+ val & 0xff);
+ break;
+ }
+ seq_printf(s, "0x%08x\n", val);
+
+ return 0;
+}
+
+#undef MANT
+#undef FRAC
+
+static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct debugfs_data *dbgfs = s->private;
+ struct private_data *priv = dbgfs->priv;
+ void __iomem *base, *avs_intr_base;
+ bool use_issue_command = false;
+ unsigned long val, offset;
+ char str[128];
+ int ret;
+ char *str_ptr = str;
+
+ if (size >= sizeof(str))
+ return -E2BIG;
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return ret;
+
+ base = priv->base;
+ avs_intr_base = priv->avs_intr_base;
+ offset = dbgfs->entry->offset;
+ /*
+ * Special case writing to "command" entry only: if the string starts
+ * with a 'c', we use the driver's __issue_avs_command() function.
+ * Otherwise, we perform a raw write. This should allow testing of raw
+ * access as well as using the higher level function. (Raw access
+ * doesn't clear the firmware return status after issuing the command.)
+ */
+ if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
+ use_issue_command = true;
+ str_ptr++;
+ }
+ if (kstrtoul(str_ptr, 0, &val) != 0)
+ return -EINVAL;
+
+ /*
+ * Setting the P-state is a special case. We need to update the CPU
+ * frequency we report.
+ */
+ if (val == AVS_CMD_SET_PSTATE) {
+ struct cpufreq_policy *policy;
+ unsigned int pstate;
+
+ policy = cpufreq_cpu_get(smp_processor_id());
+ /* Read back the P-state we are about to set */
+ pstate = readl(base + AVS_MBOX_PARAM(0));
+ if (use_issue_command) {
+ ret = brcm_avs_target_index(policy, pstate);
+ return ret ? ret : size;
+ }
+ policy->cur = policy->freq_table[pstate].frequency;
+ }
+
+ if (use_issue_command) {
+ ret = __issue_avs_command(priv, val, false, NULL);
+ } else {
+ /* Locking here is not perfect, but is only for debug. */
+ ret = down_interruptible(&priv->sem);
+ if (ret)
+ return ret;
+
+ writel(val, base + offset);
+ /* We have to wake up the firmware to process a command. */
+ if (offset == AVS_MBOX_COMMAND)
+ writel(AVS_CPU_L2_INT_MASK,
+ avs_intr_base + AVS_CPU_L2_SET0);
+ up(&priv->sem);
+ }
+
+ return ret ? ret : size;
+}
+
+static struct debugfs_entry *__find_debugfs_entry(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
+ if (strcasecmp(debugfs_entries[i].name, name) == 0)
+ return &debugfs_entries[i];
+
+ return NULL;
+}
+
+static int brcm_avs_debug_open(struct inode *inode, struct file *file)
+{
+ struct debugfs_data *data;
+ fmode_t fmode;
+ int ret;
+
+ /*
+ * seq_open(), which is called by single_open(), clears "write" access.
+ * We need write access to some files, so we preserve our access mode
+ * and restore it.
+ */
+ fmode = file->f_mode;
+ /*
+ * Check access permissions even for root. We don't want to be writing
+ * to read-only registers. Access for regular users has already been
+ * checked by the VFS layer.
+ */
+ if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
+ return -EACCES;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ /*
+ * We use the same file system operations for all our debug files. To
+ * produce specific output, we look up the file name upon opening a
+ * debugfs entry and map it to a memory offset. This offset is then used
+ * in the generic "show" function to read a specific register.
+ */
+ data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
+ data->priv = inode->i_private;
+
+ ret = single_open(file, brcm_avs_debug_show, data);
+ if (ret)
+ kfree(data);
+ file->f_mode = fmode;
+
+ return ret;
+}
+
+static int brcm_avs_debug_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq_priv = file->private_data;
+ struct debugfs_data *data = seq_priv->private;
+
+ kfree(data);
+ return single_release(inode, file);
+}
+
+static const struct file_operations brcm_avs_debug_ops = {
+ .open = brcm_avs_debug_open,
+ .read = seq_read,
+ .write = brcm_avs_seq_write,
+ .llseek = seq_lseek,
+ .release = brcm_avs_debug_release,
+};
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
+{
+ struct private_data *priv = platform_get_drvdata(pdev);
+ struct dentry *dir;
+ int i;
+
+ if (!priv)
+ return;
+
+ dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return;
+ priv->debugfs = dir;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
+ /*
+ * The DEBUGFS_ENTRY macro generates uppercase strings. We
+ * convert them to lowercase before creating the debugfs
+ * entries.
+ */
+ char *entry = __strtolower(debugfs_entries[i].name);
+ fmode_t mode = debugfs_entries[i].mode;
+
+ if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
+ dir, priv, &brcm_avs_debug_ops)) {
+ priv->debugfs = NULL;
+ debugfs_remove_recursive(dir);
+ break;
+ }
+ }
+}
+
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
+{
+ struct private_data *priv = platform_get_drvdata(pdev);
+
+ if (priv && priv->debugfs) {
+ debugfs_remove_recursive(priv->debugfs);
+ priv->debugfs = NULL;
+ }
+}
+
+#else
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+
+/*
+ * To ensure the right firmware is running we need to
+ * - check the MAGIC matches what we expect
+ * - brcm_avs_get_pmap() doesn't return -ENOTSUPP or -EINVAL
+ * We need to set up our interrupt handling before calling brcm_avs_get_pmap()!
+ */
+static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+{
+ u32 magic;
+ int rc;
+
+ rc = brcm_avs_get_pmap(priv, NULL);
+ magic = readl(priv->base + AVS_MBOX_MAGIC);
+
+ return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
+ (rc != -EINVAL);
+}
+
+static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct private_data *priv = policy->driver_data;
+
+ return brcm_avs_get_frequency(priv->base);
+}
+
+static int brcm_avs_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return brcm_avs_set_pstate(policy->driver_data,
+ policy->freq_table[index].driver_data);
+}
+
+static int brcm_avs_suspend(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return brcm_avs_get_pmap(priv, &priv->pmap);
+}
+
+static int brcm_avs_resume(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ int ret;
+
+ ret = brcm_avs_set_pmap(priv, &priv->pmap);
+ if (ret == -EEXIST) {
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct device *dev = &pdev->dev;
+
+ dev_warn(dev, "PMAP was already set\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * All initialization code that we only want to execute once goes here. Setup
+ * code that can be re-tried on every core (if it failed before) can go into
+ * brcm_avs_cpufreq_init().
+ */
+static int brcm_avs_prepare_init(struct platform_device *pdev)
+{
+ struct private_data *priv;
+ struct device *dev;
+ int host_irq, ret;
+
+ dev = &pdev->dev;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ sema_init(&priv->sem, 1);
+ init_completion(&priv->done);
+ platform_set_drvdata(pdev, priv);
+
+ priv->base = __map_region(BRCM_AVS_CPU_DATA);
+ if (!priv->base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_DATA);
+ return -ENOENT;
+ }
+
+ priv->avs_intr_base = __map_region(BRCM_AVS_CPU_INTR);
+ if (!priv->avs_intr_base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_INTR);
+ ret = -ENOENT;
+ goto unmap_base;
+ }
+
+ host_irq = platform_get_irq_byname(pdev, BRCM_AVS_HOST_INTR);
+ if (host_irq < 0) {
+ dev_err(dev, "Couldn't find interrupt %s -- %d\n",
+ BRCM_AVS_HOST_INTR, host_irq);
+ ret = host_irq;
+ goto unmap_intr_base;
+ }
+
+ ret = devm_request_irq(dev, host_irq, irq_handler, IRQF_TRIGGER_RISING,
+ BRCM_AVS_HOST_INTR, priv);
+ if (ret) {
+ dev_err(dev, "IRQ request failed: %s (%d) -- %d\n",
+ BRCM_AVS_HOST_INTR, host_irq, ret);
+ goto unmap_intr_base;
+ }
+
+ if (brcm_avs_is_firmware_loaded(priv))
+ return 0;
+
+ dev_err(dev, "AVS firmware is not loaded or doesn't support DVFS\n");
+ ret = -ENODEV;
+
+unmap_intr_base:
+ iounmap(priv->avs_intr_base);
+unmap_base:
+ iounmap(priv->base);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct platform_device *pdev;
+ struct private_data *priv;
+ struct device *dev;
+ int ret;
+
+ pdev = cpufreq_get_driver_data();
+ priv = platform_get_drvdata(pdev);
+ policy->driver_data = priv;
+ dev = &pdev->dev;
+
+ freq_table = brcm_avs_get_freq_table(dev, priv);
+ if (IS_ERR(freq_table)) {
+ ret = PTR_ERR(freq_table);
+ dev_err(dev, "Couldn't determine frequency table (%d).\n", ret);
+ return ret;
+ }
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(dev, "invalid frequency table: %d\n", ret);
+ return ret;
+ }
+
+ /* All cores share the same clock and thus the same policy. */
+ cpumask_setall(policy->cpus);
+
+ ret = __issue_avs_command(priv, AVS_CMD_ENABLE, false, NULL);
+ if (!ret) {
+ unsigned int pstate;
+
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (!ret) {
+ policy->cur = freq_table[pstate].frequency;
+ dev_info(dev, "registered\n");
+ return 0;
+ }
+ }
+
+ dev_err(dev, "couldn't initialize driver (%d)\n", ret);
+
+ return ret;
+}
+
+static ssize_t show_brcm_avs_pstate(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ unsigned int pstate;
+
+ if (brcm_avs_get_pstate(priv, &pstate))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%u\n", pstate);
+}
+
+static ssize_t show_brcm_avs_mode(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%s %u\n", brcm_avs_mode_to_string(pmap.mode),
+ pmap.mode);
+}
+
+static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int mdiv_p0, mdiv_p1, mdiv_p2, mdiv_p3, mdiv_p4;
+ struct private_data *priv = policy->driver_data;
+ unsigned int ndiv, pdiv;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
+ brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
+
+ return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+ pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
+ mdiv_p3, mdiv_p4);
+}
+
+static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return sprintf(buf, "0x%08lx\n", brcm_avs_get_voltage(priv->base));
+}
+
+static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return sprintf(buf, "0x%08lx\n", brcm_avs_get_frequency(priv->base));
+}
+
+cpufreq_freq_attr_ro(brcm_avs_pstate);
+cpufreq_freq_attr_ro(brcm_avs_mode);
+cpufreq_freq_attr_ro(brcm_avs_pmap);
+cpufreq_freq_attr_ro(brcm_avs_voltage);
+cpufreq_freq_attr_ro(brcm_avs_frequency);
+
+static struct freq_attr *brcm_avs_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &brcm_avs_pstate,
+ &brcm_avs_mode,
+ &brcm_avs_pmap,
+ &brcm_avs_voltage,
+ &brcm_avs_frequency,
+ NULL
+};
+
+static struct cpufreq_driver brcm_avs_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = brcm_avs_target_index,
+ .get = brcm_avs_cpufreq_get,
+ .suspend = brcm_avs_suspend,
+ .resume = brcm_avs_resume,
+ .init = brcm_avs_cpufreq_init,
+ .attr = brcm_avs_cpufreq_attr,
+ .name = BRCM_AVS_CPUFREQ_PREFIX,
+};
+
+static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = brcm_avs_prepare_init(pdev);
+ if (ret)
+ return ret;
+
+ brcm_avs_driver.driver_data = pdev;
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (!ret)
+ brcm_avs_cpufreq_debug_init(pdev);
+
+ return ret;
+}
+
+static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+{
+ struct private_data *priv;
+ int ret;
+
+ ret = cpufreq_unregister_driver(&brcm_avs_driver);
+ if (ret)
+ return ret;
+
+ brcm_avs_cpufreq_debug_exit(pdev);
+
+ priv = platform_get_drvdata(pdev);
+ iounmap(priv->base);
+ iounmap(priv->avs_intr_base);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id brcm_avs_cpufreq_match[] = {
+ { .compatible = BRCM_AVS_CPU_DATA },
+ { }
+};
+MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
+
+static struct platform_driver brcm_avs_cpufreq_platdrv = {
+ .driver = {
+ .name = BRCM_AVS_CPUFREQ_NAME,
+ .of_match_table = brcm_avs_cpufreq_match,
+ },
+ .probe = brcm_avs_cpufreq_probe,
+ .remove = brcm_avs_cpufreq_remove,
+};
+module_platform_driver(brcm_avs_cpufreq_platdrv);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom STB AVS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4852d9efe74e..e82bb3c30b92 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -247,3 +247,10 @@ MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
MODULE_LICENSE("GPL");
late_initcall(cppc_cpufreq_init);
+
+static const struct acpi_device_id cppc_acpi_ids[] = {
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 71267626456b..bc97b6a4b1cf 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -26,6 +26,9 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
+ { .compatible = "arm,integrator-ap", },
+ { .compatible = "arm,integrator-cp", },
+
{ .compatible = "hisilicon,hi6220", },
{ .compatible = "fsl,imx27", },
@@ -34,6 +37,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "fsl,imx7d", },
{ .compatible = "marvell,berlin", },
+ { .compatible = "marvell,pxa250", },
+ { .compatible = "marvell,pxa270", },
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
@@ -50,6 +55,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "renesas,r7s72100", },
{ .compatible = "renesas,r8a73a4", },
{ .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7743", },
+ { .compatible = "renesas,r8a7745", },
{ .compatible = "renesas,r8a7778", },
{ .compatible = "renesas,r8a7779", },
{ .compatible = "renesas,r8a7790", },
@@ -72,6 +79,12 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "sigma,tango4" },
+ { .compatible = "socionext,uniphier-pro5", },
+ { .compatible = "socionext,uniphier-pxs2", },
+ { .compatible = "socionext,uniphier-ld6b", },
+ { .compatible = "socionext,uniphier-ld11", },
+ { .compatible = "socionext,uniphier-ld20", },
+
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap2", },
@@ -81,6 +94,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "xlnx,zynq-7000", },
+ { .compatible = "zte,zx296718", },
+
{ }
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 5c07ae05d69a..269013311e79 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -28,6 +28,7 @@
#include "cpufreq-dt.h"
struct private_data {
+ struct opp_table *opp_table;
struct device *cpu_dev;
struct thermal_cooling_device *cdev;
const char *reg_name;
@@ -143,6 +144,7 @@ static int resources_available(void)
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
+ struct opp_table *opp_table = NULL;
struct private_data *priv;
struct device *cpu_dev;
struct clk *cpu_clk;
@@ -186,8 +188,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/
name = find_supply_name(cpu_dev);
if (name) {
- ret = dev_pm_opp_set_regulator(cpu_dev, name);
- if (ret) {
+ opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
policy->cpu, ret);
goto out_put_clk;
@@ -237,6 +240,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
}
priv->reg_name = name;
+ priv->opp_table = opp_table;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
@@ -285,7 +289,7 @@ out_free_priv:
out_free_opp:
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
if (name)
- dev_pm_opp_put_regulator(cpu_dev);
+ dev_pm_opp_put_regulators(opp_table);
out_put_clk:
clk_put(cpu_clk);
@@ -300,7 +304,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
if (priv->reg_name)
- dev_pm_opp_put_regulator(priv->cpu_dev);
+ dev_pm_opp_put_regulators(priv->opp_table);
clk_put(policy->clk);
kfree(priv);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e6c1fb60fbc..cc475eff90b3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1526,7 +1526,10 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
- ret_freq = __cpufreq_get(policy);
+
+ if (!policy_is_inactive(policy))
+ ret_freq = __cpufreq_get(policy);
+
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -2254,17 +2257,19 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Useful for policy notifiers which have different necessities
* at different times.
*/
-int cpufreq_update_policy(unsigned int cpu)
+void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy new_policy;
- int ret;
if (!policy)
- return -ENODEV;
+ return;
down_write(&policy->rwsem);
+ if (policy_is_inactive(policy))
+ goto unlock;
+
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
new_policy.min = policy->user_policy.min;
@@ -2275,24 +2280,20 @@ int cpufreq_update_policy(unsigned int cpu)
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
- if (cpufreq_suspended) {
- ret = -EAGAIN;
+ if (cpufreq_suspended)
goto unlock;
- }
+
new_policy.cur = cpufreq_update_current_freq(policy);
- if (WARN_ON(!new_policy.cur)) {
- ret = -EIO;
+ if (WARN_ON(!new_policy.cur))
goto unlock;
- }
}
- ret = cpufreq_set_policy(policy, &new_policy);
+ cpufreq_set_policy(policy, &new_policy);
unlock:
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
- return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 13475890d792..992f7c20760f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -37,16 +37,16 @@ struct cs_dbs_tuners {
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
- struct cpufreq_policy *policy)
+static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
+ struct cpufreq_policy *policy)
{
- unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
+ unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100;
/* max freq cannot be less than 100. But who knows... */
- if (unlikely(freq_target == 0))
- freq_target = DEF_FREQUENCY_STEP;
+ if (unlikely(freq_step == 0))
+ freq_step = DEF_FREQUENCY_STEP;
- return freq_target;
+ return freq_step;
}
/*
@@ -55,10 +55,10 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
* sampling_down_factor, we check, if current idle time is more than 80%
* (default), then we try to decrease frequency
*
- * Any frequency increase takes it to the maximum frequency. Frequency reduction
- * happens at minimum steps of 5% (default) of maximum frequency
+ * Frequency updates happen at minimum steps of 5% (default) of maximum
+ * frequency
*/
-static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
+static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
@@ -66,6 +66,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
+ unsigned int freq_step;
/*
* break out if we 'cannot' reduce the speed as the user might
@@ -82,6 +83,23 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (requested_freq > policy->max || requested_freq < policy->min)
requested_freq = policy->cur;
+ freq_step = get_freq_step(cs_tuners, policy);
+
+ /*
+ * Decrease requested_freq one freq_step for each idle period that
+ * we didn't update the frequency.
+ */
+ if (policy_dbs->idle_periods < UINT_MAX) {
+ unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
+
+ if (requested_freq > freq_steps)
+ requested_freq -= freq_steps;
+ else
+ requested_freq = policy->min;
+
+ policy_dbs->idle_periods = UINT_MAX;
+ }
+
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
dbs_info->down_skip = 0;
@@ -90,7 +108,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (requested_freq == policy->max)
goto out;
- requested_freq += get_freq_target(cs_tuners, policy);
+ requested_freq += freq_step;
if (requested_freq > policy->max)
requested_freq = policy->max;
@@ -106,16 +124,14 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
if (requested_freq == policy->min)
goto out;
- freq_target = get_freq_target(cs_tuners, policy);
- if (requested_freq > freq_target)
- requested_freq -= freq_target;
+ if (requested_freq > freq_step)
+ requested_freq -= freq_step;
else
requested_freq = policy->min;
@@ -305,7 +321,7 @@ static void cs_start(struct cpufreq_policy *policy)
static struct dbs_governor cs_governor = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
.kobj_type = { .default_attrs = cs_attributes },
- .gov_dbs_timer = cs_dbs_timer,
+ .gov_dbs_update = cs_dbs_update,
.alloc = cs_alloc,
.free = cs_free,
.init = cs_init,
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 642dd0f183a8..0196467280bd 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -61,7 +61,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
* entries can't be freed concurrently.
*/
list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
- mutex_lock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
/*
* On 32-bit architectures this may race with the
* sample_delay_ns read in dbs_update_util_handler(), but that
@@ -76,7 +76,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
* taken, so it shouldn't be significant.
*/
gov_update_sample_delay(policy_dbs, 0);
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_unlock(&policy_dbs->update_mutex);
}
return count;
@@ -117,7 +117,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
unsigned int ignore_nice = dbs_data->ignore_nice_load;
- unsigned int max_load = 0;
+ unsigned int max_load = 0, idle_periods = UINT_MAX;
unsigned int sampling_rate, io_busy, j;
/*
@@ -215,9 +215,19 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
j_cdbs->prev_load = load;
}
+ if (time_elapsed > 2 * sampling_rate) {
+ unsigned int periods = time_elapsed / sampling_rate;
+
+ if (periods < idle_periods)
+ idle_periods = periods;
+ }
+
if (load > max_load)
max_load = load;
}
+
+ policy_dbs->idle_periods = idle_periods;
+
return max_load;
}
EXPORT_SYMBOL_GPL(dbs_update);
@@ -236,9 +246,9 @@ static void dbs_work_handler(struct work_struct *work)
* Make sure cpufreq_governor_limits() isn't evaluating load or the
* ondemand governor isn't updating the sampling rate in parallel.
*/
- mutex_lock(&policy_dbs->timer_mutex);
- gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
+ gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
+ mutex_unlock(&policy_dbs->update_mutex);
/* Allow the utilization update handler to queue up more work. */
atomic_set(&policy_dbs->work_count, 0);
@@ -348,7 +358,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
return NULL;
policy_dbs->policy = policy;
- mutex_init(&policy_dbs->timer_mutex);
+ mutex_init(&policy_dbs->update_mutex);
atomic_set(&policy_dbs->work_count, 0);
init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
INIT_WORK(&policy_dbs->work, dbs_work_handler);
@@ -367,7 +377,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
{
int j;
- mutex_destroy(&policy_dbs->timer_mutex);
+ mutex_destroy(&policy_dbs->update_mutex);
for_each_cpu(j, policy_dbs->policy->related_cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
@@ -547,10 +557,10 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
- mutex_lock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_unlock(&policy_dbs->update_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index ef1037e9c92b..f5717ca070cc 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -85,7 +85,7 @@ struct policy_dbs_info {
* Per policy mutex that serializes load evaluation from limit-change
* and work-handler.
*/
- struct mutex timer_mutex;
+ struct mutex update_mutex;
u64 last_sample_time;
s64 sample_delay_ns;
@@ -97,6 +97,7 @@ struct policy_dbs_info {
struct list_head list;
/* Multiplier for increasing sample delay temporarily. */
unsigned int rate_mult;
+ unsigned int idle_periods; /* For conservative */
/* Status indicators */
bool is_shared; /* This object is used by multiple CPUs */
bool work_in_progress; /* Work is being queued up or in progress */
@@ -135,7 +136,7 @@ struct dbs_governor {
*/
struct dbs_data *gdbs_data;
- unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
+ unsigned int (*gov_dbs_update)(struct cpufreq_policy *policy);
struct policy_dbs_info *(*alloc)(void);
void (*free)(struct policy_dbs_info *policy_dbs);
int (*init)(struct dbs_data *dbs_data);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3a1f49f5f4c6..4a017e895296 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -25,7 +25,7 @@
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
-#define MIN_FREQUENCY_UP_THRESHOLD (11)
+#define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
static struct od_ops od_ops;
@@ -169,7 +169,7 @@ static void od_update(struct cpufreq_policy *policy)
}
}
-static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
+static unsigned int od_dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
@@ -191,7 +191,7 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
od_update(policy);
if (dbs_info->freq_lo) {
- /* Setup timer for SUB_SAMPLE */
+ /* Setup SUB_SAMPLE */
dbs_info->sample_type = OD_SUB_SAMPLE;
return dbs_info->freq_hi_delay_us;
}
@@ -255,11 +255,11 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
/*
* Doing this without locking might lead to using different
- * rate_mult values in od_update() and od_dbs_timer().
+ * rate_mult values in od_update() and od_dbs_update().
*/
- mutex_lock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
policy_dbs->rate_mult = 1;
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_unlock(&policy_dbs->update_mutex);
}
return count;
@@ -374,8 +374,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
/*
* In nohz/micro accounting case we set the minimum frequency
- * not depending on HZ, but fixed (very low). The deferred
- * timer might skip some samples if idle/sleeping as needed.
+ * not depending on HZ, but fixed (very low).
*/
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
@@ -415,7 +414,7 @@ static struct od_ops od_ops = {
static struct dbs_governor od_dbs_gov = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
.kobj_type = { .default_attrs = od_attributes },
- .gov_dbs_timer = od_dbs_timer,
+ .gov_dbs_update = od_dbs_update,
.alloc = od_alloc,
.free = od_free,
.init = od_init,
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 06d3abdffd3a..ac284e66839c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -41,6 +41,18 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
return 0;
}
+static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
+{
+ unsigned int count = stats->max_state;
+
+ memset(stats->time_in_state, 0, count * sizeof(u64));
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ memset(stats->trans_table, 0, count * count * sizeof(int));
+#endif
+ stats->last_time = get_jiffies_64();
+ stats->total_trans = 0;
+}
+
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
return sprintf(buf, "%d\n", policy->stats->total_trans);
@@ -64,6 +76,14 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
return len;
}
+static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ /* We don't care what is written to the attribute. */
+ cpufreq_stats_clear_table(policy->stats);
+ return count;
+}
+
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
@@ -113,10 +133,12 @@ cpufreq_freq_attr_ro(trans_table);
cpufreq_freq_attr_ro(total_trans);
cpufreq_freq_attr_ro(time_in_state);
+cpufreq_freq_attr_wo(reset);
static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
+ &reset.attr,
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
&trans_table.attr,
#endif
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
deleted file mode 100644
index 79e3ff2771a6..000000000000
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * CPU support functions
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <asm/mach-types.h>
-#include <asm/hardware/icst.h>
-
-static void __iomem *cm_base;
-/* The cpufreq driver only use the OSC register */
-#define INTEGRATOR_HDR_OSC_OFFSET 0x08
-#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
-
-static struct cpufreq_driver integrator_driver;
-
-static const struct icst_params lclk_params = {
- .ref = 24000000,
- .vco_max = ICST525_VCO_MAX_5V,
- .vco_min = ICST525_VCO_MIN,
- .vd_min = 8,
- .vd_max = 132,
- .rd_min = 24,
- .rd_max = 24,
- .s2div = icst525_s2div,
- .idx2s = icst525_idx2s,
-};
-
-static const struct icst_params cclk_params = {
- .ref = 24000000,
- .vco_max = ICST525_VCO_MAX_5V,
- .vco_min = ICST525_VCO_MIN,
- .vd_min = 12,
- .vd_max = 160,
- .rd_min = 24,
- .rd_max = 24,
- .s2div = icst525_s2div,
- .idx2s = icst525_idx2s,
-};
-
-/*
- * Validate the speed policy.
- */
-static int integrator_verify_policy(struct cpufreq_policy *policy)
-{
- struct icst_vco vco;
-
- cpufreq_verify_within_cpu_limits(policy);
-
- vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
- policy->max = icst_hz(&cclk_params, vco) / 1000;
-
- vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
- policy->min = icst_hz(&cclk_params, vco) / 1000;
-
- cpufreq_verify_within_cpu_limits(policy);
- return 0;
-}
-
-
-static int integrator_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- cpumask_t cpus_allowed;
- int cpu = policy->cpu;
- struct icst_vco vco;
- struct cpufreq_freqs freqs;
- u_int cm_osc;
-
- /*
- * Save this threads cpus_allowed mask.
- */
- cpus_allowed = current->cpus_allowed;
-
- /*
- * Bind to the specified CPU. When this call returns,
- * we should be running on the right CPU.
- */
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
- BUG_ON(cpu != smp_processor_id());
-
- /* get current setting */
- cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
- if (machine_is_integrator())
- vco.s = (cm_osc >> 8) & 7;
- else if (machine_is_cintegrator())
- vco.s = 1;
- vco.v = cm_osc & 255;
- vco.r = 22;
- freqs.old = icst_hz(&cclk_params, vco) / 1000;
-
- /* icst_hz_to_vco rounds down -- so we need the next
- * larger freq in case of CPUFREQ_RELATION_L.
- */
- if (relation == CPUFREQ_RELATION_L)
- target_freq += 999;
- if (target_freq > policy->max)
- target_freq = policy->max;
- vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
- freqs.new = icst_hz(&cclk_params, vco) / 1000;
-
- if (freqs.old == freqs.new) {
- set_cpus_allowed_ptr(current, &cpus_allowed);
- return 0;
- }
-
- cpufreq_freq_transition_begin(policy, &freqs);
-
- cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
- if (machine_is_integrator()) {
- cm_osc &= 0xfffff800;
- cm_osc |= vco.s << 8;
- } else if (machine_is_cintegrator()) {
- cm_osc &= 0xffffff00;
- }
- cm_osc |= vco.v;
-
- __raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
- __raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET);
- __raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
-
- /*
- * Restore the CPUs allowed mask.
- */
- set_cpus_allowed_ptr(current, &cpus_allowed);
-
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
- return 0;
-}
-
-static unsigned int integrator_get(unsigned int cpu)
-{
- cpumask_t cpus_allowed;
- unsigned int current_freq;
- u_int cm_osc;
- struct icst_vco vco;
-
- cpus_allowed = current->cpus_allowed;
-
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
- BUG_ON(cpu != smp_processor_id());
-
- /* detect memory etc. */
- cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
- if (machine_is_integrator())
- vco.s = (cm_osc >> 8) & 7;
- else
- vco.s = 1;
- vco.v = cm_osc & 255;
- vco.r = 22;
-
- current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
-
- set_cpus_allowed_ptr(current, &cpus_allowed);
-
- return current_freq;
-}
-
-static int integrator_cpufreq_init(struct cpufreq_policy *policy)
-{
-
- /* set default policy and cpuinfo */
- policy->max = policy->cpuinfo.max_freq = 160000;
- policy->min = policy->cpuinfo.min_freq = 12000;
- policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
-
- return 0;
-}
-
-static struct cpufreq_driver integrator_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = integrator_verify_policy,
- .target = integrator_set_target,
- .get = integrator_get,
- .init = integrator_cpufreq_init,
- .name = "integrator",
-};
-
-static int __init integrator_cpufreq_probe(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!cm_base)
- return -ENODEV;
-
- return cpufreq_register_driver(&integrator_driver);
-}
-
-static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
-{
- return cpufreq_unregister_driver(&integrator_driver);
-}
-
-static const struct of_device_id integrator_cpufreq_match[] = {
- { .compatible = "arm,core-module-integrator"},
- { },
-};
-
-MODULE_DEVICE_TABLE(of, integrator_cpufreq_match);
-
-static struct platform_driver integrator_cpufreq_driver = {
- .driver = {
- .name = "integrator-cpufreq",
- .of_match_table = integrator_cpufreq_match,
- },
- .remove = __exit_p(integrator_cpufreq_remove),
-};
-
-module_platform_driver_probe(integrator_cpufreq_driver,
- integrator_cpufreq_probe);
-
-MODULE_AUTHOR("Russell M. King");
-MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4737520ec823..6acbd4af632e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,8 @@
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
+#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
+
#define ATOM_RATIOS 0x66a
#define ATOM_VIDS 0x66b
#define ATOM_TURBO_RATIOS 0x66c
@@ -44,6 +46,7 @@
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
#endif
#define FRAC_BITS 8
@@ -52,6 +55,8 @@
#define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
+#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
@@ -122,6 +127,8 @@ struct sample {
* @scaling: Scaling factor to convert frequency to cpufreq
* frequency units
* @turbo_pstate: Max Turbo P state possible for this platform
+ * @max_freq: @max_pstate frequency in cpufreq units
+ * @turbo_freq: @turbo_pstate frequency in cpufreq units
*
* Stores the per cpu model P state limits and current P state.
*/
@@ -132,6 +139,8 @@ struct pstate_data {
int max_pstate_physical;
int scaling;
int turbo_pstate;
+ unsigned int max_freq;
+ unsigned int turbo_freq;
};
/**
@@ -177,6 +186,48 @@ struct _pid {
};
/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo: User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled: Platform turbo status either from msr
+ * MSR_IA32_MISC_ENABLE or when maximum available pstate
+ * matches the maximum turbo pstate
+ * @max_perf_pct: Effective maximum performance limit in percentage, this
+ * is minimum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct: Effective minimum performance limit in percentage, this
+ * is maximum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
+ * This value is used to limit max pstate
+ * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
+ * This value is used to limit min pstate
+ * @max_policy_pct: The maximum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @max_sysfs_pct: The maximum performance in percentage enforced by
+ * intel pstate sysfs interface, unused when per cpu
+ * controls are enforced
+ * @min_policy_pct: The minimum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @min_sysfs_pct: The minimum performance in percentage enforced by
+ * intel pstate sysfs interface, unused when per cpu
+ * controls are enforced
+ *
+ * Storage for user and policy defined limits.
+ */
+struct perf_limits {
+ int no_turbo;
+ int turbo_disabled;
+ int max_perf_pct;
+ int min_perf_pct;
+ int32_t max_perf;
+ int32_t min_perf;
+ int max_policy_pct;
+ int max_sysfs_pct;
+ int min_policy_pct;
+ int min_sysfs_pct;
+};
+
+/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
* @policy: CPUFreq policy value
@@ -194,8 +245,19 @@ struct _pid {
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
+ * @perf_limits: Pointer to perf_limit unique to this CPU
+ * Not all field in the structure are applicable
+ * when per cpu controls are enforced
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
+ * @epp_powersave: Last saved HWP energy performance preference
+ * (EPP) or energy performance bias (EPB),
+ * when policy switched to performance
+ * @epp_policy: Last saved policy used to set EPP/EPB
+ * @epp_default: Power on default HWP energy performance
+ * preference/bias
+ * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
+ * operation
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -217,11 +279,16 @@ struct cpudata {
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
+ struct perf_limits *perf_limits;
#ifdef CONFIG_ACPI
struct acpi_processor_performance acpi_perf_data;
bool valid_pss_table;
#endif
unsigned int iowait_boost;
+ s16 epp_powersave;
+ s16 epp_policy;
+ s16 epp_default;
+ s16 epp_saved;
};
static struct cpudata **all_cpu_data;
@@ -235,7 +302,6 @@ static struct cpudata **all_cpu_data;
* @p_gain_pct: PID proportional gain
* @i_gain_pct: PID integral gain
* @d_gain_pct: PID derivative gain
- * @boost_iowait: Whether or not to use iowait boosting.
*
* Stores per CPU model static PID configuration data.
*/
@@ -247,7 +313,6 @@ struct pstate_adjust_policy {
int p_gain_pct;
int d_gain_pct;
int i_gain_pct;
- bool boost_iowait;
};
/**
@@ -291,58 +356,19 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
static struct pstate_adjust_policy pid_params __read_mostly;
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
+static bool per_cpu_limits __read_mostly;
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
-/**
- * struct perf_limits - Store user and policy limits
- * @no_turbo: User requested turbo state from intel_pstate sysfs
- * @turbo_disabled: Platform turbo status either from msr
- * MSR_IA32_MISC_ENABLE or when maximum available pstate
- * matches the maximum turbo pstate
- * @max_perf_pct: Effective maximum performance limit in percentage, this
- * is minimum of either limits enforced by cpufreq policy
- * or limits from user set limits via intel_pstate sysfs
- * @min_perf_pct: Effective minimum performance limit in percentage, this
- * is maximum of either limits enforced by cpufreq policy
- * or limits from user set limits via intel_pstate sysfs
- * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
- * This value is used to limit max pstate
- * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
- * This value is used to limit min pstate
- * @max_policy_pct: The maximum performance in percentage enforced by
- * cpufreq setpolicy interface
- * @max_sysfs_pct: The maximum performance in percentage enforced by
- * intel pstate sysfs interface
- * @min_policy_pct: The minimum performance in percentage enforced by
- * cpufreq setpolicy interface
- * @min_sysfs_pct: The minimum performance in percentage enforced by
- * intel pstate sysfs interface
- *
- * Storage for user and policy defined limits.
- */
-struct perf_limits {
- int no_turbo;
- int turbo_disabled;
- int max_perf_pct;
- int min_perf_pct;
- int32_t max_perf;
- int32_t min_perf;
- int max_policy_pct;
- int max_sysfs_pct;
- int min_policy_pct;
- int min_sysfs_pct;
-};
-
static struct perf_limits performance_limits = {
.no_turbo = 0,
.turbo_disabled = 0,
.max_perf_pct = 100,
- .max_perf = int_tofp(1),
+ .max_perf = int_ext_tofp(1),
.min_perf_pct = 100,
- .min_perf = int_tofp(1),
+ .min_perf = int_ext_tofp(1),
.max_policy_pct = 100,
.max_sysfs_pct = 100,
.min_policy_pct = 0,
@@ -353,7 +379,7 @@ static struct perf_limits powersave_limits = {
.no_turbo = 0,
.turbo_disabled = 0,
.max_perf_pct = 100,
- .max_perf = int_tofp(1),
+ .max_perf = int_ext_tofp(1),
.min_perf_pct = 0,
.min_perf = 0,
.max_policy_pct = 100,
@@ -368,6 +394,8 @@ static struct perf_limits *limits = &performance_limits;
static struct perf_limits *limits = &powersave_limits;
#endif
+static DEFINE_MUTEX(intel_pstate_limits_lock);
+
#ifdef CONFIG_ACPI
static bool intel_pstate_get_ppc_enable_status(void)
@@ -379,14 +407,67 @@ static bool intel_pstate_get_ppc_enable_status(void)
return acpi_ppc;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+
+/* The work item is needed to avoid CPU hotplug locking issues */
+static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+ struct cppc_perf_caps cppc_perf;
+ static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret)
+ return;
+
+ /*
+ * The priorities can be set regardless of whether or not
+ * sched_set_itmt_support(true) has been called and it is valid to
+ * update them at any time after it has been called.
+ */
+ sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
+
+ if (max_highest_perf <= min_highest_perf) {
+ if (cppc_perf.highest_perf > max_highest_perf)
+ max_highest_perf = cppc_perf.highest_perf;
+
+ if (cppc_perf.highest_perf < min_highest_perf)
+ min_highest_perf = cppc_perf.highest_perf;
+
+ if (max_highest_perf > min_highest_perf) {
+ /*
+ * This code can be run during CPU online under the
+ * CPU hotplug locks, so sched_set_itmt_support()
+ * cannot be called from here. Queue up a work item
+ * to invoke it.
+ */
+ schedule_work(&sched_itmt_work);
+ }
+ }
+}
+#else
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+}
+#endif
+
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int ret;
int i;
- if (hwp_active)
+ if (hwp_active) {
+ intel_pstate_set_itmt_prio(policy->cpu);
return;
+ }
if (!intel_pstate_get_ppc_enable_status())
return;
@@ -459,11 +540,11 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
}
#else
-static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
}
-static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
}
#endif
@@ -559,24 +640,252 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
+static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
+{
+ u64 epb;
+ int ret;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return (s16)ret;
+
+ return (s16)(epb & 0x0f);
+}
+
+static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
+{
+ s16 epp;
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * When hwp_req_data is 0, means that caller didn't read
+ * MSR_HWP_REQUEST, so need to read and get EPP.
+ */
+ if (!hwp_req_data) {
+ epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ &hwp_req_data);
+ if (epp)
+ return epp;
+ }
+ epp = (hwp_req_data >> 24) & 0xff;
+ } else {
+ /* When there is no EPP present, HWP uses EPB settings */
+ epp = intel_pstate_get_epb(cpu_data);
+ }
+
+ return epp;
+}
+
+static int intel_pstate_set_epb(int cpu, s16 pref)
+{
+ u64 epb;
+ int ret;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return ret;
+
+ epb = (epb & ~0x0f) | pref;
+ wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+
+ return 0;
+}
+
+/*
+ * EPP/EPB display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
+ */
+static const char * const energy_perf_strings[] = {
+ "default",
+ "performance",
+ "balance_performance",
+ "balance_power",
+ "power",
+ NULL
+};
+
+static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
+{
+ s16 epp;
+ int index = -EINVAL;
+
+ epp = intel_pstate_get_epp(cpu_data, 0);
+ if (epp < 0)
+ return epp;
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * Range:
+ * 0x00-0x3F : Performance
+ * 0x40-0x7F : Balance performance
+ * 0x80-0xBF : Balance power
+ * 0xC0-0xFF : Power
+ * The EPP is a 8 bit value, but our ranges restrict the
+ * value which can be set. Here only using top two bits
+ * effectively.
+ */
+ index = (epp >> 6) + 1;
+ } else if (static_cpu_has(X86_FEATURE_EPB)) {
+ /*
+ * Range:
+ * 0x00-0x03 : Performance
+ * 0x04-0x07 : Balance performance
+ * 0x08-0x0B : Balance power
+ * 0x0C-0x0F : Power
+ * The EPB is a 4 bit value, but our ranges restrict the
+ * value which can be set. Here only using top two bits
+ * effectively.
+ */
+ index = (epp >> 2) + 1;
+ }
+
+ return index;
+}
+
+static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+ int pref_index)
+{
+ int epp = -EINVAL;
+ int ret;
+
+ if (!pref_index)
+ epp = cpu_data->epp_default;
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ u64 value;
+
+ ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
+ if (ret)
+ goto return_pref;
+
+ value &= ~GENMASK_ULL(31, 24);
+
+ /*
+ * If epp is not default, convert from index into
+ * energy_perf_strings to epp value, by shifting 6
+ * bits left to use only top two bits in epp.
+ * The resultant epp need to shifted by 24 bits to
+ * epp position in MSR_HWP_REQUEST.
+ */
+ if (epp == -EINVAL)
+ epp = (pref_index - 1) << 6;
+
+ value |= (u64)epp << 24;
+ ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
+ } else {
+ if (epp == -EINVAL)
+ epp = (pref_index - 1) << 2;
+ ret = intel_pstate_set_epb(cpu_data->cpu, epp);
+ }
+return_pref:
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return ret;
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int i = 0;
+ int ret = 0;
+
+ while (energy_perf_strings[i] != NULL)
+ ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
+
+ ret += sprintf(&buf[ret], "\n");
+
+ return ret;
+}
+
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+ char str_preference[21];
+ int ret, i = 0;
+
+ ret = sscanf(buf, "%20s", str_preference);
+ if (ret != 1)
+ return -EINVAL;
+
+ while (energy_perf_strings[i] != NULL) {
+ if (!strcmp(str_preference, energy_perf_strings[i])) {
+ intel_pstate_set_energy_pref_index(cpu_data, i);
+ return count;
+ }
+ ++i;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
+ struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+ int preference;
+
+ preference = intel_pstate_get_energy_pref_index(cpu_data);
+ if (preference < 0)
+ return preference;
+
+ return sprintf(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+cpufreq_freq_attr_rw(energy_performance_preference);
+
+static struct freq_attr *hwp_cpufreq_attrs[] = {
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+};
+
static void intel_pstate_hwp_set(const struct cpumask *cpumask)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
+ struct perf_limits *perf_limits = limits;
u64 value, cap;
for_each_cpu(cpu, cpumask) {
+ int max_perf_pct, min_perf_pct;
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+ s16 epp;
+
+ if (per_cpu_limits)
+ perf_limits = all_cpu_data[cpu]->perf_limits;
+
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
hw_min = HWP_LOWEST_PERF(cap);
hw_max = HWP_HIGHEST_PERF(cap);
range = hw_max - hw_min;
+ max_perf_pct = perf_limits->max_perf_pct;
+ min_perf_pct = perf_limits->min_perf_pct;
+
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- adj_range = limits->min_perf_pct * range / 100;
+ adj_range = min_perf_pct * range / 100;
min = hw_min + adj_range;
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
- adj_range = limits->max_perf_pct * range / 100;
+ adj_range = max_perf_pct * range / 100;
max = hw_min + adj_range;
if (limits->no_turbo) {
hw_max = HWP_GUARANTEED_PERF(cap);
@@ -586,6 +895,53 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
+
+ if (cpu_data->epp_policy == cpu_data->policy)
+ goto skip_epp;
+
+ cpu_data->epp_policy = cpu_data->policy;
+
+ if (cpu_data->epp_saved >= 0) {
+ epp = cpu_data->epp_saved;
+ cpu_data->epp_saved = -EINVAL;
+ goto update_epp;
+ }
+
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ epp = intel_pstate_get_epp(cpu_data, value);
+ cpu_data->epp_powersave = epp;
+ /* If EPP read was failed, then don't try to write */
+ if (epp < 0)
+ goto skip_epp;
+
+
+ epp = 0;
+ } else {
+ /* skip setting EPP, when saved value is invalid */
+ if (cpu_data->epp_powersave < 0)
+ goto skip_epp;
+
+ /*
+ * No need to restore EPP when it is not zero. This
+ * means:
+ * - Policy is not changed
+ * - user has manually changed
+ * - Error reading EPB
+ */
+ epp = intel_pstate_get_epp(cpu_data, value);
+ if (epp)
+ goto skip_epp;
+
+ epp = cpu_data->epp_powersave;
+ }
+update_epp:
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ } else {
+ intel_pstate_set_epb(cpu, epp);
+ }
+skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
}
@@ -598,6 +954,28 @@ static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
return 0;
}
+static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+
+ if (!hwp_active)
+ return 0;
+
+ cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
+
+ return 0;
+}
+
+static int intel_pstate_resume(struct cpufreq_policy *policy)
+{
+ if (!hwp_active)
+ return 0;
+
+ all_cpu_data[policy->cpu]->epp_policy = 0;
+
+ return intel_pstate_hwp_set_policy(policy);
+}
+
static void intel_pstate_hwp_set_online_cpus(void)
{
get_online_cpus();
@@ -640,8 +1018,10 @@ static void __init intel_pstate_debug_expose_params(void)
struct dentry *debugfs_parent;
int i = 0;
- if (hwp_active)
+ if (hwp_active ||
+ pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
return;
+
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
if (IS_ERR_OR_NULL(debugfs_parent))
return;
@@ -714,9 +1094,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_limits_lock);
+
update_turbo_state();
if (limits->turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ mutex_unlock(&intel_pstate_limits_lock);
return -EPERM;
}
@@ -725,6 +1108,8 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
+ mutex_unlock(&intel_pstate_limits_lock);
+
return count;
}
@@ -738,6 +1123,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_limits_lock);
+
limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits->max_perf_pct = min(limits->max_policy_pct,
limits->max_sysfs_pct);
@@ -745,10 +1132,13 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
- limits->max_perf = div_fp(limits->max_perf_pct, 100);
+ limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
return count;
}
@@ -762,6 +1152,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_limits_lock);
+
limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
limits->min_perf_pct = max(limits->min_policy_pct,
limits->min_sysfs_pct);
@@ -769,10 +1161,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
- limits->min_perf = div_fp(limits->min_perf_pct, 100);
+ limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
return count;
}
@@ -787,8 +1182,6 @@ define_one_global_ro(num_pstates);
static struct attribute *intel_pstate_attributes[] = {
&no_turbo.attr,
- &max_perf_pct.attr,
- &min_perf_pct.attr,
&turbo_pct.attr,
&num_pstates.attr,
NULL
@@ -805,9 +1198,26 @@ static void __init intel_pstate_sysfs_expose_params(void)
intel_pstate_kobject = kobject_create_and_add("intel_pstate",
&cpu_subsys.dev_root->kobj);
- BUG_ON(!intel_pstate_kobject);
+ if (WARN_ON(!intel_pstate_kobject))
+ return;
+
rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
- BUG_ON(rc);
+ if (WARN_ON(rc))
+ return;
+
+ /*
+ * If per cpu limits are enforced there are no global limits, so
+ * return without creating max/min_perf_pct attributes
+ */
+ if (per_cpu_limits)
+ return;
+
+ rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
+ WARN_ON(rc);
+
+ rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
+ WARN_ON(rc);
+
}
/************************** sysfs end ************************/
@@ -818,6 +1228,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ cpudata->epp_policy = 0;
+ if (cpudata->epp_default == -EINVAL)
+ cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
}
static int atom_get_min_pstate(void)
@@ -1045,7 +1458,6 @@ static const struct cpu_defaults silvermont_params = {
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
- .boost_iowait = true,
},
.funcs = {
.get_max = atom_get_max_pstate,
@@ -1067,7 +1479,6 @@ static const struct cpu_defaults airmont_params = {
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
- .boost_iowait = true,
},
.funcs = {
.get_max = atom_get_max_pstate,
@@ -1109,7 +1520,6 @@ static const struct cpu_defaults bxt_params = {
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
- .boost_iowait = true,
},
.funcs = {
.get_max = core_get_max_pstate,
@@ -1127,20 +1537,24 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj;
int min_perf;
+ struct perf_limits *perf_limits = limits;
if (limits->no_turbo || limits->turbo_disabled)
max_perf = cpu->pstate.max_pstate;
+ if (per_cpu_limits)
+ perf_limits = cpu->perf_limits;
+
/*
* performance can be limited by user through sysfs, by cpufreq
* policy, or by cpu specific default values determined through
* experimentation.
*/
- max_perf_adj = fp_toint(max_perf * limits->max_perf);
+ max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
*max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
- min_perf = fp_toint(max_perf * limits->min_perf);
+ min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
@@ -1178,6 +1592,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
@@ -1316,15 +1732,19 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
}
-static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
int max_perf, min_perf;
- update_turbo_state();
-
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+ return pstate;
+}
+
+static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+{
+ pstate = intel_pstate_prepare_request(cpu, pstate);
if (pstate == cpu->pstate.current_pstate)
return;
@@ -1342,6 +1762,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
+ update_turbo_state();
+
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
@@ -1362,7 +1784,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns;
- if (pid_params.boost_iowait) {
+ if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) {
if (flags & SCHED_CPUFREQ_IOWAIT) {
cpu->iowait_boost = int_tofp(1);
} else if (cpu->iowait_boost) {
@@ -1408,6 +1830,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params),
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_params),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params),
{}
};
@@ -1424,11 +1847,26 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
- if (!all_cpu_data[cpunum])
- all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
- GFP_KERNEL);
- if (!all_cpu_data[cpunum])
- return -ENOMEM;
+ cpu = all_cpu_data[cpunum];
+
+ if (!cpu) {
+ unsigned int size = sizeof(struct cpudata);
+
+ if (per_cpu_limits)
+ size += sizeof(struct perf_limits);
+
+ cpu = kzalloc(size, GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ all_cpu_data[cpunum] = cpu;
+ if (per_cpu_limits)
+ cpu->perf_limits = (struct perf_limits *)(cpu + 1);
+
+ cpu->epp_default = -EINVAL;
+ cpu->epp_powersave = -EINVAL;
+ cpu->epp_saved = -EINVAL;
+ }
cpu = all_cpu_data[cpunum];
@@ -1487,18 +1925,57 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
limits->no_turbo = 0;
limits->turbo_disabled = 0;
limits->max_perf_pct = 100;
- limits->max_perf = int_tofp(1);
+ limits->max_perf = int_ext_tofp(1);
limits->min_perf_pct = 100;
- limits->min_perf = int_tofp(1);
+ limits->min_perf = int_ext_tofp(1);
limits->max_policy_pct = 100;
limits->max_sysfs_pct = 100;
limits->min_policy_pct = 0;
limits->min_sysfs_pct = 0;
}
+static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
+ struct perf_limits *limits)
+{
+
+ limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
+ policy->cpuinfo.max_freq);
+ limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+ if (policy->max == policy->min) {
+ limits->min_policy_pct = limits->max_policy_pct;
+ } else {
+ limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
+ policy->cpuinfo.max_freq);
+ limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
+ 0, 100);
+ }
+
+ /* Normalize user input to [min_policy_pct, max_policy_pct] */
+ limits->min_perf_pct = max(limits->min_policy_pct,
+ limits->min_sysfs_pct);
+ limits->min_perf_pct = min(limits->max_policy_pct,
+ limits->min_perf_pct);
+ limits->max_perf_pct = min(limits->max_policy_pct,
+ limits->max_sysfs_pct);
+ limits->max_perf_pct = max(limits->min_policy_pct,
+ limits->max_perf_pct);
+
+ /* Make sure min_perf_pct <= max_perf_pct */
+ limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+
+ limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
+ limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
+ limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
+ limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+
+ pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
+ limits->max_perf_pct, limits->min_perf_pct);
+}
+
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
+ struct perf_limits *perf_limits = NULL;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
@@ -1516,41 +1993,31 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
}
- if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
- limits = &performance_limits;
+ if (per_cpu_limits)
+ perf_limits = cpu->perf_limits;
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (!perf_limits) {
+ limits = &performance_limits;
+ perf_limits = limits;
+ }
if (policy->max >= policy->cpuinfo.max_freq) {
pr_debug("set performance\n");
- intel_pstate_set_performance_limits(limits);
+ intel_pstate_set_performance_limits(perf_limits);
goto out;
}
} else {
pr_debug("set powersave\n");
- limits = &powersave_limits;
- }
-
- limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
- limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
- limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
- policy->cpuinfo.max_freq);
- limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
-
- /* Normalize user input to [min_policy_pct, max_policy_pct] */
- limits->min_perf_pct = max(limits->min_policy_pct,
- limits->min_sysfs_pct);
- limits->min_perf_pct = min(limits->max_policy_pct,
- limits->min_perf_pct);
- limits->max_perf_pct = min(limits->max_policy_pct,
- limits->max_sysfs_pct);
- limits->max_perf_pct = max(limits->min_policy_pct,
- limits->max_perf_pct);
-
- /* Make sure min_perf_pct <= max_perf_pct */
- limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+ if (!perf_limits) {
+ limits = &powersave_limits;
+ perf_limits = limits;
+ }
- limits->min_perf = div_fp(limits->min_perf_pct, 100);
- limits->max_perf = div_fp(limits->max_perf_pct, 100);
- limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
+ }
+ intel_pstate_update_perf_limits(policy, perf_limits);
out:
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
@@ -1565,6 +2032,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_hwp_set_policy(policy);
+ mutex_unlock(&intel_pstate_limits_lock);
+
return 0;
}
@@ -1579,22 +2048,32 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
return 0;
}
+static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+{
+ intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
+}
+
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
{
- int cpu_num = policy->cpu;
- struct cpudata *cpu = all_cpu_data[cpu_num];
+ pr_debug("CPU %d exiting\n", policy->cpu);
- pr_debug("CPU %d exiting\n", cpu_num);
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ if (hwp_active)
+ intel_pstate_hwp_save_state(policy);
+ else
+ intel_cpufreq_stop_cpu(policy);
+}
- intel_pstate_clear_update_util_hook(cpu_num);
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ intel_pstate_exit_perf_limits(policy);
- if (hwp_active)
- return;
+ policy->fast_switch_possible = false;
- intel_pstate_set_min_pstate(cpu);
+ return 0;
}
-static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int rc;
@@ -1605,10 +2084,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ /*
+ * We need sane value in the cpu->perf_limits, so inherit from global
+ * perf_limits limits, which are seeded with values based on the
+ * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
+ */
+ if (per_cpu_limits)
+ memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -1621,24 +2103,35 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
intel_pstate_init_acpi_perf_limits(policy);
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);
+ policy->fast_switch_possible = true;
+
return 0;
}
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
- intel_pstate_exit_perf_limits(policy);
+ int ret = __intel_pstate_cpu_init(policy);
+
+ if (ret)
+ return ret;
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
return 0;
}
-static struct cpufreq_driver intel_pstate_driver = {
+static struct cpufreq_driver intel_pstate = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
- .resume = intel_pstate_hwp_set_policy,
+ .suspend = intel_pstate_hwp_save_state,
+ .resume = intel_pstate_resume,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
@@ -1646,6 +2139,118 @@ static struct cpufreq_driver intel_pstate_driver = {
.name = "intel_pstate",
};
+static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ struct perf_limits *perf_limits = limits;
+
+ update_turbo_state();
+ policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+
+ cpufreq_verify_within_cpu_limits(policy);
+
+ if (per_cpu_limits)
+ perf_limits = cpu->perf_limits;
+
+ intel_pstate_update_perf_limits(policy, perf_limits);
+
+ return 0;
+}
+
+static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
+ struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ unsigned int max_freq;
+
+ update_turbo_state();
+
+ max_freq = limits->no_turbo || limits->turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+ policy->cpuinfo.max_freq = max_freq;
+ if (policy->max > max_freq)
+ policy->max = max_freq;
+
+ if (target_freq > max_freq)
+ target_freq = max_freq;
+
+ return target_freq;
+}
+
+static int intel_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ struct cpufreq_freqs freqs;
+ int target_pstate;
+
+ freqs.old = policy->cur;
+ freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ switch (relation) {
+ case CPUFREQ_RELATION_L:
+ target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
+ break;
+ case CPUFREQ_RELATION_H:
+ target_pstate = freqs.new / cpu->pstate.scaling;
+ break;
+ default:
+ target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
+ break;
+ }
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ if (target_pstate != cpu->pstate.current_pstate) {
+ cpu->pstate.current_pstate = target_pstate;
+ wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, target_pstate));
+ }
+ cpufreq_freq_transition_end(policy, &freqs, false);
+
+ return 0;
+}
+
+static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ int target_pstate;
+
+ target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+ target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ intel_pstate_update_pstate(cpu, target_pstate);
+ return target_freq;
+}
+
+static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int ret = __intel_pstate_cpu_init(policy);
+
+ if (ret)
+ return ret;
+
+ policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
+ /* This reflects the intel_pstate_get_cpu_pstates() setting. */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_cpufreq = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_cpufreq_verify_policy,
+ .target = intel_cpufreq_target,
+ .fast_switch = intel_cpufreq_fast_switch,
+ .init = intel_cpufreq_cpu_init,
+ .exit = intel_pstate_cpu_exit,
+ .stop_cpu = intel_cpufreq_stop_cpu,
+ .name = "intel_cpufreq",
+};
+
+static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;
+
static int no_load __initdata;
static int no_hwp __initdata;
static int hwp_only __initdata;
@@ -1672,6 +2277,19 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy)
pid_params.setpoint = policy->setpoint;
}
+#ifdef CONFIG_ACPI
+static void intel_pstate_use_acpi_profile(void)
+{
+ if (acpi_gbl_FADT.preferred_profile == PM_MOBILE)
+ pstate_funcs.get_target_pstate =
+ get_target_pstate_use_cpu_load;
+}
+#else
+static void intel_pstate_use_acpi_profile(void)
+{
+}
+#endif
+
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
@@ -1683,6 +2301,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.get_target_pstate = funcs->get_target_pstate;
+ intel_pstate_use_acpi_profile();
}
#ifdef CONFIG_ACPI
@@ -1796,9 +2415,20 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
return false;
}
+
+static void intel_pstate_request_control_from_smm(void)
+{
+ /*
+ * It may be unsafe to request P-states control from SMM if _PPC support
+ * has not been enabled.
+ */
+ if (acpi_ppc)
+ acpi_processor_pstate_control();
+}
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
+static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
@@ -1818,6 +2448,7 @@ static int __init intel_pstate_init(void)
if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
copy_cpu_funcs(&core_params.funcs);
hwp_active++;
+ intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
}
@@ -1850,7 +2481,9 @@ hwp_cpu_matched:
if (!hwp_active && hwp_only)
goto out;
- rc = cpufreq_register_driver(&intel_pstate_driver);
+ intel_pstate_request_control_from_smm();
+
+ rc = cpufreq_register_driver(intel_pstate_driver);
if (rc)
goto out;
@@ -1865,7 +2498,9 @@ out:
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
- intel_pstate_clear_update_util_hook(cpu);
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_clear_update_util_hook(cpu);
+
kfree(all_cpu_data[cpu]);
}
}
@@ -1881,8 +2516,13 @@ static int __init intel_pstate_setup(char *str)
if (!str)
return -EINVAL;
- if (!strcmp(str, "disable"))
+ if (!strcmp(str, "disable")) {
no_load = 1;
+ } else if (!strcmp(str, "passive")) {
+ pr_info("Passive mode enabled\n");
+ intel_pstate_driver = &intel_cpufreq;
+ no_hwp = 1;
+ }
if (!strcmp(str, "no_hwp")) {
pr_info("HWP disabled\n");
no_hwp = 1;
@@ -1891,6 +2531,8 @@ static int __init intel_pstate_setup(char *str)
force_load = 1;
if (!strcmp(str, "hwp_only"))
hwp_only = 1;
+ if (!strcmp(str, "per_cpu_perf_limits"))
+ per_cpu_limits = true;
#ifdef CONFIG_ACPI
if (!strcmp(str, "support_acpi_ppc"))
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index d3ffde806629..37671b545880 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -42,6 +42,10 @@
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define LPSTATE_SHIFT 48
+#define GPSTATE_SHIFT 56
+#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
+#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -592,7 +596,8 @@ void gpstate_timer_handler(unsigned long data)
{
struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
struct global_pstate_info *gpstates = policy->driver_data;
- int gpstate_idx;
+ int gpstate_idx, lpstate_idx;
+ unsigned long val;
unsigned int time_diff = jiffies_to_msecs(jiffies)
- gpstates->last_sampled_time;
struct powernv_smp_call_data freq_data;
@@ -600,21 +605,37 @@ void gpstate_timer_handler(unsigned long data)
if (!spin_trylock(&gpstates->gpstate_lock))
return;
+ /*
+ * If PMCR was last updated was using fast_swtich then
+ * We may have wrong in gpstate->last_lpstate_idx
+ * value. Hence, read from PMCR to get correct data.
+ */
+ val = get_pmspr(SPRN_PMCR);
+ freq_data.gpstate_id = (s8)GET_GPSTATE(val);
+ freq_data.pstate_id = (s8)GET_LPSTATE(val);
+ if (freq_data.gpstate_id == freq_data.pstate_id) {
+ reset_gpstates(policy);
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
+
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
- freq_data.pstate_id = idx_to_pstate(gpstates->last_lpstate_idx);
- if ((gpstates->last_gpstate_idx == gpstates->last_lpstate_idx) ||
- (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
+ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
gpstate_idx = pstate_to_idx(freq_data.pstate_id);
+ lpstate_idx = gpstate_idx;
reset_gpstates(policy);
gpstates->highest_lpstate_idx = gpstate_idx;
} else {
+ lpstate_idx = pstate_to_idx(freq_data.pstate_id);
gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx,
- gpstates->last_lpstate_idx);
+ lpstate_idx);
}
-
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
+ gpstates->last_gpstate_idx = gpstate_idx;
+ gpstates->last_lpstate_idx = lpstate_idx;
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
@@ -622,10 +643,6 @@ void gpstate_timer_handler(unsigned long data)
if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
- freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
- gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
- gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
-
spin_unlock(&gpstates->gpstate_lock);
/* Timer may get migrated to a different cpu on cpu hot unplug */
@@ -647,8 +664,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
- if (!throttled)
+ if (!throttled) {
+ /* we don't want to be preempted while
+ * checking if the CPU frequency has been throttled
+ */
+ preempt_disable();
powernv_cpufreq_throttle_check(NULL);
+ preempt_enable();
+ }
cur_msec = jiffies_to_msecs(get_jiffies_64());
@@ -752,9 +775,12 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
spin_lock_init(&gpstates->gpstate_lock);
ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
- if (ret < 0)
+ if (ret < 0) {
kfree(policy->driver_data);
+ return ret;
+ }
+ policy->fast_switch_possible = true;
return ret;
}
@@ -897,6 +923,20 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
del_timer_sync(&gpstates->timer);
}
+static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ int index;
+ struct powernv_smp_call_data freq_data;
+
+ index = cpufreq_table_find_index_dl(policy, target_freq);
+ freq_data.pstate_id = powernv_freqs[index].driver_data;
+ freq_data.gpstate_id = powernv_freqs[index].driver_data;
+ set_pstate(&freq_data);
+
+ return powernv_freqs[index].frequency;
+}
+
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
@@ -904,6 +944,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.exit = powernv_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
+ .fast_switch = powernv_fast_switch,
.get = powernv_cpufreq_get,
.stop_cpu = powernv_cpufreq_stop_cpu,
.attr = powernv_cpu_freq_attr,
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 176e84cc3991..0cb9040eca49 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -107,7 +107,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_REGULATOR
-static void __init s3c64xx_cpufreq_config_regulator(void)
+static void s3c64xx_cpufreq_config_regulator(void)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 7fe442ca38f4..0835a37a5f3a 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -22,7 +22,7 @@
#define POWERNV_THRESHOLD_LATENCY_NS 200000
-struct cpuidle_driver powernv_idle_driver = {
+static struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
.owner = THIS_MODULE,
};
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index c73207abb5a4..62810ff3b00f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -97,7 +97,23 @@ static int find_deepest_state(struct cpuidle_driver *drv,
return ret;
}
-#ifdef CONFIG_SUSPEND
+/**
+ * cpuidle_use_deepest_state - Set/clear governor override flag.
+ * @enable: New value of the flag.
+ *
+ * Set/unset the current CPU to use the deepest idle state (override governors
+ * going forward if set).
+ */
+void cpuidle_use_deepest_state(bool enable)
+{
+ struct cpuidle_device *dev;
+
+ preempt_disable();
+ dev = cpuidle_get_device();
+ dev->use_deepest_state = enable;
+ preempt_enable();
+}
+
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
@@ -109,6 +125,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}
+#ifdef CONFIG_SUSPEND
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b67f37..ffca4fc0061d 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -38,6 +38,12 @@ static int init_state_node(struct cpuidle_state *idle_state,
* state enter function.
*/
idle_state->enter = match_id->data;
+ /*
+ * Since this is not a "coupled" state, it's safe to assume interrupts
+ * won't be enabled when it exits allowing the tick to be frozen
+ * safely. So enter() can be also enter_freeze() callback.
+ */
+ idle_state->enter_freeze = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index fb9f511cca23..4e78263e34a4 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -9,7 +9,6 @@
*/
#include <linux/mutex.h>
-#include <linux/module.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
@@ -53,14 +52,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
if (cpuidle_curr_governor) {
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_disable_device(dev);
- module_put(cpuidle_curr_governor->owner);
}
cpuidle_curr_governor = gov;
if (gov) {
- if (!try_module_get(cpuidle_curr_governor->owner))
- return -EINVAL;
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_enable_device(dev);
cpuidle_install_idle_handler();
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 63bd5a403e22..fe8f08948fcb 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
-#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
@@ -177,7 +176,6 @@ static struct cpuidle_governor ladder_governor = {
.enable = ladder_enable_device,
.select = ladder_select_state,
.reflect = ladder_reflect,
- .owner = THIS_MODULE,
};
/**
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 03d38c291de6..d9b5b9398a0f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,7 +19,6 @@
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/math64.h>
-#include <linux/module.h>
/*
* Please note when changing the tuning values:
@@ -484,7 +483,6 @@ static struct cpuidle_governor menu_governor = {
.enable = menu_enable_device,
.select = menu_select,
.reflect = menu_reflect,
- .owner = THIS_MODULE,
};
/**
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 832a2c3f01ff..c5adc8c9ac43 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -403,8 +403,10 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
/* state statistics */
for (i = 0; i < drv->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
- if (!kobj)
+ if (!kobj) {
+ ret = -ENOMEM;
goto error_state;
+ }
kobj->state = &drv->states[i];
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4d2b81f2b223..79564785ae30 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -555,4 +555,6 @@ config CRYPTO_DEV_ROCKCHIP
source "drivers/crypto/chelsio/Kconfig"
+source "drivers/crypto/virtio/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250fa1348..bc53cb833a06 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index dae1e39139e9..d10b4ae5e0da 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
&ctx->sa_out_dma_addr, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device,
- ctx->sa_len * 4,
+ dma_free_coherent(ctx->dev->core_dev->device, size * 4,
ctx->sa_in, ctx->sa_in_dma_addr);
return -ENOMEM;
}
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
index 6c2951bb70b1..0ec04407b533 100644
--- a/drivers/crypto/atmel-aes-regs.h
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -28,6 +28,7 @@
#define AES_MR_OPMOD_CFB (0x3 << 12)
#define AES_MR_OPMOD_CTR (0x4 << 12)
#define AES_MR_OPMOD_GCM (0x5 << 12)
+#define AES_MR_OPMOD_XTS (0x6 << 12)
#define AES_MR_LOD (0x1 << 15)
#define AES_MR_CFBS_MASK (0x7 << 16)
#define AES_MR_CFBS_128b (0x0 << 16)
@@ -67,6 +68,9 @@
#define AES_CTRR 0x98
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
+#define AES_TWR(x) (0xc0 + ((x) * 0x04))
+#define AES_ALPHAR(x) (0xd0 + ((x) * 0x04))
+
#define AES_HW_VERSION 0xFC
#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index e3d40a8dfffb..0e3d0d655b96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
@@ -68,6 +69,7 @@
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
+#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
AES_FLAGS_ENCRYPT | \
@@ -89,6 +91,7 @@ struct atmel_aes_caps {
bool has_cfb64;
bool has_ctr32;
bool has_gcm;
+ bool has_xts;
u32 max_burst_size;
};
@@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx {
atmel_aes_fn_t ghash_resume;
};
+struct atmel_aes_xts_ctx {
+ struct atmel_aes_base_ctx base;
+
+ u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
struct atmel_aes_reqctx {
unsigned long mode;
};
@@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
break;
+ case AES_TWR(0):
+ case AES_TWR(1):
+ case AES_TWR(2):
+ case AES_TWR(3):
+ snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
+ break;
+
+ case AES_ALPHAR(0):
+ case AES_ALPHAR(1):
+ case AES_ALPHAR(2):
+ case AES_ALPHAR(3):
+ snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
+ break;
+
default:
snprintf(tmp, sz, "0x%02x", offset);
break;
@@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd,
char tmp[16];
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
- atmel_aes_reg_name(offset, tmp));
+ atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
}
#endif /* VERBOSE_DEBUG */
@@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
return err;
}
-static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
/* MR register must be set before IV registers */
- if (dd->ctx->keylen == AES_KEYSIZE_128)
+ if (keylen == AES_KEYSIZE_128)
valmr |= AES_MR_KEYSIZE_128;
- else if (dd->ctx->keylen == AES_KEYSIZE_192)
+ else if (keylen == AES_KEYSIZE_192)
valmr |= AES_MR_KEYSIZE_192;
else
valmr |= AES_MR_KEYSIZE_256;
@@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
atmel_aes_write(dd, AES_MR, valmr);
- atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
- SIZE_IN_WORDS(dd->ctx->keylen));
+ atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
atmel_aes_write_block(dd, AES_IVR(0), iv);
}
+static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv)
+
+{
+ atmel_aes_write_ctrl_key(dd, use_dma, iv,
+ dd->ctx->key, dd->ctx->keylen);
+}
/* CPU transfer */
@@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = {
};
+/* xts functions */
+
+static inline struct atmel_aes_xts_ctx *
+atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_xts_ctx, base);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
+
+static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ unsigned long flags;
+ int err;
+
+ atmel_aes_set_mode(dd, rctx);
+
+ err = atmel_aes_hw_init(dd);
+ if (err)
+ return atmel_aes_complete(dd, err);
+
+ /* Compute the tweak value from req->info with ecb(aes). */
+ flags = dd->flags;
+ dd->flags &= ~AES_FLAGS_MODE_MASK;
+ dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+ atmel_aes_write_ctrl_key(dd, false, NULL,
+ ctx->key2, ctx->base.keylen);
+ dd->flags = flags;
+
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
+ static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ u8 *tweak_bytes = (u8 *)tweak;
+ int i;
+
+ /* Read the computed ciphered tweak value. */
+ atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
+ /*
+ * Hardware quirk:
+ * the order of the ciphered tweak bytes need to be reversed before
+ * writing them into the ODATARx registers.
+ */
+ for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
+ u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
+
+ tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
+ tweak_bytes[i] = tmp;
+ }
+
+ /* Process the data. */
+ atmel_aes_write_ctrl(dd, use_dma, NULL);
+ atmel_aes_write_block(dd, AES_TWR(0), tweak);
+ atmel_aes_write_block(dd, AES_ALPHAR(0), one);
+ if (use_dma)
+ return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+}
+
+static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int err;
+
+ err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ if (err)
+ return err;
+
+ memcpy(ctx->base.key, key, keylen/2);
+ memcpy(ctx->key2, key + keylen/2, keylen/2);
+ ctx->base.keylen = keylen/2;
+
+ return 0;
+}
+
+static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS);
+}
+
+static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ ctx->base.start = atmel_aes_xts_start;
+
+ return 0;
+}
+
+static struct crypto_alg aes_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "atmel-xts-aes",
+ .cra_priority = ATMEL_AES_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_xts_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ }
+};
+
+
/* Probe functions */
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
@@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
+ if (dd->caps.has_xts)
+ crypto_unregister_alg(&aes_xts_alg);
+
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
@@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_gcm_alg;
}
+ if (dd->caps.has_xts) {
+ err = crypto_register_alg(&aes_xts_alg);
+ if (err)
+ goto err_aes_xts_alg;
+ }
+
return 0;
+err_aes_xts_alg:
+ crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
crypto_unregister_alg(&aes_cfb64_alg);
err_aes_cfb64_alg:
@@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 0;
dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
+ dd->caps.has_xts = 0;
dd->caps.max_burst_size = 1;
/* keep only major version number */
@@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 1;
dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
+ dd->caps.has_xts = 1;
dd->caps.max_burst_size = 4;
break;
case 0x200:
@@ -2138,7 +2311,7 @@ aes_dd_err:
static int atmel_aes_remove(struct platform_device *pdev)
{
- static struct atmel_aes_dev *aes_dd;
+ struct atmel_aes_dev *aes_dd;
aes_dd = platform_get_drvdata(pdev);
if (!aes_dd)
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 64bf3024b680..bc0d3569f8d9 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -74,7 +74,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -89,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -101,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -113,7 +113,7 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
help
Selecting this will enable printing of various debug
information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf5515ae8a..6554742f357e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 954a64c7757b..662fe94cb2f8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
*
* Based on talitos crypto API driver.
*
@@ -53,6 +54,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamalg_desc.h"
/*
* crypto alg
@@ -62,8 +64,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define CAAM_MAX_IV_LENGTH 16
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
@@ -71,37 +71,6 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
-/* length of descriptors text */
-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
-
-/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
-
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -117,8 +86,7 @@
static void dbg_dump_sg(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii,
- bool may_sleep)
+ struct scatterlist *sg, size_t tlen, bool ascii)
{
struct scatterlist *it;
void *it_page;
@@ -152,7 +120,6 @@ static struct list_head alg_list;
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
- int alg_op;
bool rfc3686;
bool geniv;
};
@@ -163,52 +130,6 @@ struct caam_aead_alg {
bool registered;
};
-/* Set DK bit in class 1 operation if shared */
-static inline void append_dec_op1(u32 *desc, u32 type)
-{
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* DK bit is valid only for AES */
- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- return;
- }
-
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
-}
-
-/*
- * For aead functions, read payload and write payload,
- * both of which are specified in req->src and req->dst
- */
-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
-{
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
-}
-
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
-{
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-}
-
/*
* per-session context
*/
@@ -220,147 +141,36 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
- u32 class1_alg_type;
- u32 class2_alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
+ struct alginfo cdata;
unsigned int authsize;
};
-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *nonce;
- unsigned int enckeylen = ctx->enckeylen;
-
- /*
- * RFC3686 specific:
- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
- * | enckeylen = encryption key size + nonce size
- */
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
- if (keys_fit_inline) {
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, enckeylen,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
- enckeylen);
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc,
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-}
-
-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *key_jump_cmd;
-
- /* Note: Context registers are saved. */
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-}
-
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -368,84 +178,22 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- desc = ctx->sh_desc_dec;
+ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH2 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /*
- * Insert a NOP here, since we need at least 4 instructions between
- * code patching the descriptor buffer and the location being patched.
- */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -453,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -470,11 +212,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ u32 *desc, *nonce = NULL;
+ u32 inl_mask;
+ unsigned int data_len[2];
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -482,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return 0;
/* NULL encryption / decryption */
- if (!ctx->enckeylen)
+ if (!ctx->cdata.keylen)
return aead_null_set_sh_desc(aead);
/*
@@ -497,8 +239,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
- if (is_rfc3686)
+ if (is_rfc3686) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
if (alg->caam.geniv)
goto skip_enc;
@@ -507,54 +255,29 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- FIFOLDST_VLF);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
+ is_rfc3686, nonce, ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -562,79 +285,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (desc_inline_query(DESC_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (alg->caam.geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
+ ctx->adata.key_dma = ctx->key_dma;
- if (alg->caam.geniv) {
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
+ nonce, ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -642,11 +322,6 @@ skip_enc:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
if (!alg->caam.geniv)
goto skip_givenc;
@@ -655,93 +330,30 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- if (is_rfc3686)
- goto copy_iv;
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-copy_iv:
- /* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
-
- /* Return to encryption */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Copy iv from outfifo to class 2 fifo */
- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Not need to reload iv */
- append_seq_fifo_load(desc, ivsize,
- FIFOLD_CLASS_SKIP);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -749,11 +361,6 @@ copy_iv:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_givenc:
return 0;
@@ -774,12 +381,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *zero_payload_jump_cmd,
- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -787,82 +393,16 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqinlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* if cryptlen is ZERO jump to zero-payload commands */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
- /* write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -870,80 +410,21 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* jump to zero-payload command if cryptlen is zero */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* store encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* zero-payload command */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -951,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -974,11 +450,11 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -986,62 +462,16 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1049,73 +479,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read encrypted data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1123,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1147,12 +520,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
- u32 *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -1160,61 +532,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read and write assoclen + cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1222,77 +549,21 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* In-snoop assoclen + cryptlen data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1300,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1320,19 +586,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
- u32 authkeylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, authkeylen,
- ctx->alg_op);
-}
-
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct crypto_authenc_keys keys;
@@ -1341,33 +597,25 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- goto badkey;
-
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1376,14 +624,14 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
- ctx->enckeylen = keys.enckeylen;
+ ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
}
@@ -1412,11 +660,11 @@ static int gcm_setkey(struct crypto_aead *aead,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1444,9 +692,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1455,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
ret = rfc4106_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1483,9 +731,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1494,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
ret = rfc4543_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1505,21 +753,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
- u32 *key_jump_cmd;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc;
- u8 *nonce;
- u32 geniv;
u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
+ memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1542,60 +787,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- memcpy(ctx->key, key, keylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1603,61 +808,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* load IV */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
- else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1666,76 +821,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX |
- (crt->ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, crt->ivsize,
- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1743,14 +832,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
- return ret;
+ return 0;
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1758,8 +841,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- u32 *key_jump_cmd, *desc;
- __be64 sector_size = cpu_to_be64(512);
+ u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
@@ -1774,88 +856,23 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 keys only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
@@ -1864,31 +881,22 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
return 0;
}
/*
* aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct aead_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -1900,9 +908,9 @@ struct aead_edesc {
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
@@ -2019,8 +1027,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2031,7 +1038,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc->src_nents > 1 ? 100 : ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2052,8 +1059,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2063,7 +1069,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2157,7 +1163,7 @@ static void init_gcm_job(struct aead_request *req,
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
- append_data(desc, ctx->key + ctx->enckeylen, 4);
+ append_data(desc, ctx->key + ctx->cdata.keylen, 4);
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
@@ -2172,7 +1178,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
@@ -2218,15 +1224,13 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2278,14 +1282,12 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2344,10 +1346,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/* Check if data are contiguous. */
all_contig = !src_nents;
- if (!all_contig) {
- src_nents = src_nents ? : 1;
+ if (!all_contig)
sec4_sg_len = src_nents;
- }
sec4_sg_len += dst_nents;
@@ -2556,11 +1556,9 @@ static int aead_decrypt(struct aead_request *req)
int ret = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1, may_sleep);
+ req->assoclen + req->cryptlen, 1);
#endif
/* allocate extended descriptor */
@@ -2618,16 +1616,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2647,6 +1662,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2673,6 +1690,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -2794,11 +1814,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
/*
@@ -2808,6 +1843,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2823,6 +1860,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2850,6 +1889,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
@@ -2916,7 +1958,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
- u32 alg_op;
};
static struct caam_alg_template driver_algs[] = {
@@ -3101,7 +2142,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3123,7 +2163,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3145,7 +2184,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3167,7 +2205,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3189,7 +2226,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3211,7 +2247,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3233,7 +2268,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3256,7 +2290,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3279,7 +2312,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3302,7 +2334,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3325,7 +2356,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3348,7 +2378,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3371,7 +2400,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3394,7 +2422,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3417,7 +2444,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3440,7 +2466,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3463,7 +2488,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3486,7 +2510,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3509,7 +2532,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
}
},
{
@@ -3532,7 +2554,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
}
},
@@ -3556,7 +2577,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3580,7 +2600,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3604,7 +2623,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3628,7 +2646,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3652,7 +2669,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3676,7 +2692,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3700,7 +2715,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3724,7 +2738,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3748,7 +2761,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3772,7 +2784,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3795,7 +2806,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3818,7 +2828,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3841,7 +2850,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3864,7 +2872,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3887,7 +2894,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3910,7 +2916,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3933,7 +2938,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3956,7 +2960,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3979,7 +2982,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4002,7 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4025,7 +3026,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4048,7 +3048,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4073,7 +3072,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4098,7 +3096,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4124,7 +3121,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4149,7 +3145,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4175,7 +3170,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4200,7 +3194,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4226,7 +3219,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4251,7 +3243,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4277,7 +3268,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4302,7 +3292,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4328,7 +3317,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4353,7 +3341,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4375,9 +3362,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
}
/* copy descriptor header template value */
- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
return 0;
}
@@ -4420,7 +3406,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
if (ctx->key_dma &&
!dma_mapping_error(ctx->jrdev, ctx->key_dma))
dma_unmap_single(ctx->jrdev, ctx->key_dma,
- ctx->enckeylen + ctx->split_key_pad_len,
+ ctx->cdata.keylen + ctx->adata.keylen_pad,
DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
@@ -4498,7 +3484,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
- t_alg->caam.alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 000000000000..f3f48c10b9d6
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1306 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
+ const bool is_rfc3686, u32 *nonce)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /*
+ * RFC3686 specific:
+ * | key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ FIFOLDST_VLF);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ if (geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with HW-generated initialization
+ * vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ if (is_rfc3686)
+ goto copy_iv;
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from outfifo to class 2 fifo */
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+ *zero_assoc_jump_cmd2;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* There is no input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read and write assoclen + cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop assoclen + cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Load iv */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* load IV */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ * with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd, geniv;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+ (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 keys only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 000000000000..95551737333a
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,97 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 660dc206969f..e58639ea53b1 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,7 +72,7 @@
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
@@ -103,20 +103,15 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
};
/* ahash state */
@@ -222,89 +217,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
return 0;
}
-/* Common shared descriptor commands */
-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-}
-
-/* Append key if it has been set */
-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- u32 *key_jump_cmd;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- if (ctx->split_key_len) {
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_ahash(desc, ctx);
-
- set_jump_tgt_here(desc, key_jump_cmd);
- }
-
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
+ * For ahash update, final and finup (import_ctx = true)
+ * import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ * read and write to seqout
*/
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+ struct caam_hash_ctx *ctx, bool import_ctx)
{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
+ u32 op = ctx->adata.algtype;
+ u32 *skip_key_load;
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize,
- struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* Append key if it has been set; ahash update excluded */
+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+ set_jump_tgt_here(desc, skip_key_load);
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
*/
- ahash_append_load_str(desc, digestsize);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
@@ -312,28 +272,11 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
-
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-
- /* Class 2 operation */
- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
- OP_ALG_ENCRYPT);
-
- /* Load data and write to result or context */
- ahash_append_load_str(desc, ctx->ctx_len);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
@@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
- ctx->ctx_len, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
@@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
desc_bytes(desc), 1);
#endif
- /* ahash_finup shared descriptor */
- desc = ctx->sh_desc_finup;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
- digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 keylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, keylen,
- ctx->alg_op);
-}
-
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Job descriptor to perform unkeyed hash on key_in */
- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
@@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-#ifdef DEBUG
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
-
- ret = gen_split_hash_key(ctx, key, keylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
+ CAAM_MAX_HASH_KEY_SIZE);
if (ret)
goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
+ ctx->adata.keylen_pad, 1);
#endif
ret = ahash_set_sh_desc(ahash);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
}
+
error_free_key:
kfree(hashed_key);
return ret;
@@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -1683,7 +1575,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
- u32 alg_op;
};
/* ahash descriptors */
@@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
@@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
@@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
@@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
@@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
@@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- int alg_op;
struct ahash_alg ahash_alg;
};
@@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */
- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
desc_bytes(ctx->sh_desc_digest),
DMA_TO_DEVICE);
- if (ctx->sh_desc_finup_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
@@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
- t_alg->alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 851015e652b8..32100c4851dd 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 9b92af2c7241..41398da3edf4 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
{
struct buf_data *bd;
- bd = (struct buf_data *)((char *)desc -
- offsetof(struct buf_data, hw_desc));
+ bd = container_of(desc, struct buf_data, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
init_sh_desc(desc, HDR_SHARE_SERIAL);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Generate random bytes */
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
@@ -351,7 +347,7 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
err = -ENOMEM;
goto free_caam_alloc;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e483b78c6343..755109841cfd 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrlpriv->caam_ipg);
clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val) {
- /* put RNG4 into run mode */
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
- return;
- }
+ if (ent_delay <= val)
+ goto start_rng;
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
@@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
+start_rng:
/*
* select raw sampling in both entropy shifter
- * and statistical checker
+ * and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
- /* put RNG4 into run mode */
- clrsetbits_32(&val, RTMCTL_PRGM, 0);
- /* write back the control register */
- wr_reg32(&r4tst->rtmctl, val);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
/**
@@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
}
- ctrlpriv->caam_emi_slow = clk;
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
if (ret < 0) {
@@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev)
goto disable_caam_mem;
}
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
+ if (ctrlpriv->caam_emi_slow) {
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
}
/* Get configuration properties from device tree */
@@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev)
else
BLOCK_OFFSET = PG_SIZE_64K;
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
- ctrlpriv->assure = (struct caam_assurance __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
);
- ctrlpriv->deco = (struct caam_deco __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->deco = (struct caam_deco __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
@@ -627,8 +625,8 @@ static int caam_probe(struct platform_device *pdev)
ring);
continue;
}
- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
@@ -641,8 +639,8 @@ static int caam_probe(struct platform_device *pdev)
!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
);
/* This is all that's required to physically enable QI */
@@ -800,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
&caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
@@ -808,7 +806,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
@@ -816,7 +814,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
@@ -833,7 +831,8 @@ caam_remove:
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 513b6646bb36..2e6766a1573f 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -22,12 +22,6 @@
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
#define SEC4_SG_OFFSET_MASK 0x00001fff
-struct sec4_sg_entry {
- u64 ptr;
- u32 len;
- u32 bpid_offset;
-};
-
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
#define MAX_CAAM_DESCSIZE 64
@@ -90,8 +84,8 @@ struct sec4_sg_entry {
#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
/* If shared descriptor header, 6-bit length */
#define HDR_DESCLEN_SHR_MASK 0x3f
@@ -121,10 +115,10 @@ struct sec4_sg_entry {
#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
@@ -235,7 +229,7 @@ struct sec4_sg_entry {
#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
@@ -400,7 +394,7 @@ struct sec4_sg_entry {
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
@@ -1107,8 +1101,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
-#define OP_ALG_TYPE_CLASS1 2
-#define OP_ALG_TYPE_CLASS2 4
+#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
@@ -1249,7 +1243,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index a8cd8a78ec1f..b9c8d98ef826 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -33,38 +33,39 @@
extern bool caam_little_end;
-static inline int desc_len(u32 *desc)
+static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
-static inline int desc_bytes(void *desc)
+static inline int desc_bytes(void * const desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
-static inline u32 *desc_end(u32 *desc)
+static inline u32 *desc_end(u32 * const desc)
{
return desc + desc_len(desc);
}
-static inline void *sh_desc_pdb(u32 *desc)
+static inline void *sh_desc_pdb(u32 * const desc)
{
return desc + 1;
}
-static inline void init_desc(u32 *desc, u32 options)
+static inline void init_desc(u32 * const desc, u32 options)
{
*desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
-static inline void init_sh_desc(u32 *desc, u32 options)
+static inline void init_sh_desc(u32 * const desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
@@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
options);
}
-static inline void init_job_desc(u32 *desc, u32 options)
+static inline void init_job_desc(u32 * const desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
}
-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
@@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
CAAM_PTR_SZ / CAAM_CMD_SZ);
}
-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
- u32 options)
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+ int len, u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
@@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 *desc, void *data, int len)
+static inline void append_data(u32 * const desc, void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len)
(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
-static inline void append_cmd(u32 *desc, u32 command)
+static inline void append_cmd(u32 * const desc, u32 command)
{
u32 *cmd = desc_end(desc);
@@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command)
#define append_u32 append_cmd
-static inline void append_u64(u32 *desc, u64 data)
+static inline void append_u64(u32 * const desc, u64 data)
{
u32 *offset = desc_end(desc);
@@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data)
}
/* Write command without affecting header, and return pointer to next word */
-static inline u32 *write_cmd(u32 *desc, u32 command)
+static inline u32 *write_cmd(u32 * const desc, u32 command)
{
*desc = cpu_to_caam32(command);
return desc + 1;
}
-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
@@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
}
/* Write length after pointer, rather than inside command */
-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
unsigned int len, u32 command)
{
append_cmd(desc, command);
@@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
}
#define APPEND_CMD_RET(cmd, op) \
-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
{ \
u32 *cmd = desc_end(desc); \
PRINT_POS; \
@@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
(desc_len(desc) - (jump_cmd - desc)));
}
-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
{
u32 val = caam32_to_cpu(*move_cmd);
@@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
}
#define APPEND_CMD(cmd, op) \
-static inline void append_##cmd(u32 *desc, u32 options) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
@@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
APPEND_CMD(operation, OPERATION)
#define APPEND_CMD_LEN(cmd, op) \
-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+ u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
@@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
- u32 options) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
@@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
- u32 options)
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+ unsigned int len, u32 options)
{
u32 cmd_src;
@@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
}
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+ dma_addr_t ptr, \
unsigned int len, \
u32 options) \
{ \
@@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
* the size of its type
*/
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
type len, u32 options) \
{ \
PRINT_POS; \
@@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
@@ -426,3 +430,64 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ * is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ * referenced by the descriptor
+ */
+struct alginfo {
+ u32 algtype;
+ unsigned int keylen;
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
+ void *key_virt;
+ };
+ bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len, unsigned int *data_len,
+ u32 *inl_mask, unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 33e41ea83fcc..79a0cc70717f 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
- } else if (err_id < ARRAY_SIZE(err_id_list))
+ } else {
err_str = err_id_list[err_id];
- else
- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+ }
/*
* CCB ICV check failures are part of normal operation life;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c05074a5c..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f9953d..c8604dfadbf5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
+ tasklet_kill(&jrp->irqtask);
+
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * the threaded irq if jobs done.
+ * tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- return IRQ_WAKE_THREAD;
+ preempt_disable();
+ tasklet_schedule(&jrp->irqtask);
+ preempt_enable();
+
+ return IRQ_HANDLED;
}
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = st_dev;
+ struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
- return IRQ_HANDLED;
}
/**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
/* Connect job ring interrupt handler. */
- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
- caam_jr_threadirq, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
return error;
}
@@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..1bb2816a9b4d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -10,6 +10,36 @@
#include "desc_constr.h"
#include "key_gen.h"
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
@@ -41,15 +71,29 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op)
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen)
{
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = -ENOMEM;
+ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+ adata->keylen_pad = split_key_pad_len(adata->algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ adata->keylen, adata->keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+ if (adata->keylen_pad > max_keylen)
+ return -EINVAL;
+
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
@@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
goto out_free;
}
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
@@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+ OP_ALG_AS_INIT);
/*
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
@@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, split_key_len,
+ append_fifo_store(desc, dma_addr_out, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
@@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- split_key_pad_len, 1);
+ adata->keylen_pad, 1);
#endif
}
- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6d8109..4628f389eb64 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 41cd5a356d05..6afa20c4a013 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -7,7 +7,11 @@
#include "regs.h"
-struct sec4_sg_entry;
+struct sec4_sg_entry {
+ u64 ptr;
+ u32 len;
+ u32 bpid_offset;
+};
/*
* convert single dma address to h/w link table format
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbacc6161..7bc09989e18a 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->sb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3ddce2..e2ce8190ecc9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -21,6 +21,12 @@
#include "ccp-dev.h"
+/* Allocate the requested number of contiguous LSB slots
+ * from the LSB bitmap. Look in the private range for this
+ * queue first; failing that, check the public area.
+ * If no space is available, wait around.
+ * Return: first slot number
+ */
static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
struct ccp_device *ccp;
@@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
bitmap_set(ccp->lsbmap, start, count);
mutex_unlock(&ccp->sb_mutex);
- return start * LSB_ITEM_SIZE;
+ return start;
}
ccp->sb_avail = 0;
@@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
}
}
+/* Free a number of LSB slots from the bitmap, starting at
+ * the indicated starting slot number.
+ */
static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
unsigned int count)
{
- int lsbno = start / LSB_SIZE;
-
if (!start)
return;
- if (cmd_q->lsb == lsbno) {
+ if (cmd_q->lsb == start) {
/* An entry from the private LSB */
- bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ bitmap_clear(cmd_q->lsbmap, start, count);
} else {
/* From the shared LSBs */
struct ccp_device *ccp = cmd_q->ccp;
@@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+ CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Key (Exponent) is in external memory */
- CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
- CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+ /* Exponent is in LSB memory */
+ CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queue used to suspend */
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */
status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633aae10..511ab042b5e7 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -41,7 +41,7 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
-char *ccp_error_codes[] = {
+static char *ccp_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 02: ILLEGAL_KEY_ID",
@@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->sb_count = KSB_COUNT;
ccp->sb_start = 0;
+ /* Initialize the wait queues */
+ init_waitqueue_head(&ccp->sb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index da5f4a678083..830f35e6005f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -278,7 +278,7 @@ struct ccp_cmd_queue {
/* Private LSB that is assigned to this queue, or -1 if none.
* Bitmap for my private LSB, unused otherwise
*/
- unsigned int lsb;
+ int lsb;
DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
/* Queue processing thread */
@@ -515,7 +515,6 @@ struct ccp_op {
struct ccp_passthru_op passthru;
struct ccp_ecc_op ecc;
} u;
- struct ccp_mem key;
};
static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
* word 7: upper 16 bits of key pointer; key memory type
*/
struct dword0 {
- __le32 soc:1;
- __le32 ioc:1;
- __le32 rsvd1:1;
- __le32 init:1;
- __le32 eom:1; /* AES/SHA only */
- __le32 function:15;
- __le32 engine:4;
- __le32 prot:1;
- __le32 rsvd2:7;
+ unsigned int soc:1;
+ unsigned int ioc:1;
+ unsigned int rsvd1:1;
+ unsigned int init:1;
+ unsigned int eom:1; /* AES/SHA only */
+ unsigned int function:15;
+ unsigned int engine:4;
+ unsigned int prot:1;
+ unsigned int rsvd2:7;
};
struct dword3 {
- __le32 src_hi:16;
- __le32 src_mem:2;
- __le32 lsb_cxt_id:8;
- __le32 rsvd1:5;
- __le32 fixed:1;
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
};
union dword4 {
@@ -567,18 +566,18 @@ union dword4 {
union dword5 {
struct {
- __le32 dst_hi:16;
- __le32 dst_mem:2;
- __le32 rsvd1:13;
- __le32 fixed:1;
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
} fields;
__le32 sha_len_hi;
};
struct dword7 {
- __le32 key_hi:16;
- __le32 key_mem:2;
- __le32 rsvd1:14;
+ unsigned int key_hi:16;
+ unsigned int key_mem:2;
+ unsigned int rsvd1:14;
};
struct ccp5_desc {
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4ce67fb9a880..3e104f5aa0c2 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_AUTHENC
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index e4ddb921d7b3..2ed1e24b44a8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "t4fw_api.h"
@@ -62,6 +68,11 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->aeadctx;
+}
+
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
return ctx->crypto_ctx->hmacctx;
}
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->authenc;
+}
+
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+ u8 temp[SHA512_DIGEST_SIZE];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ struct cpl_fw6_pld *fw6_pld;
+ int cmp = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+ (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+ cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+ } else {
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+ authsize, req->assoclen +
+ req->cryptlen - authsize);
+ cmp = memcmp(temp, (fw6_pld + 1), authsize);
+ }
+ if (cmp)
+ *err = -EBADMSG;
+ else
+ *err = 0;
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
*/
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int error_status)
+ int err)
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
unsigned int digestsize, updated_digestsize;
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ ctx_req.req.aead_req = (struct aead_request *)req;
+ ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+ ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+ if (ctx_req.ctx.reqctx->skb) {
+ kfree_skb(ctx_req.ctx.reqctx->skb);
+ ctx_req.ctx.reqctx->skb = NULL;
+ }
+ if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(ctx_req.req.aead_req, input,
+ &err);
+ ctx_req.ctx.reqctx->verify = VERIFY_HW;
+ }
+ break;
+
case CRYPTO_ALG_TYPE_BLKCIPHER:
ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
ctx_req.ctx.ablk_ctx =
ablkcipher_request_ctx(ctx_req.req.ablk_req);
- if (!error_status) {
+ if (!err) {
fw6_pld = (struct cpl_fw6_pld *)input;
memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
AES_BLOCK_SIZE);
}
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
- ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+ ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.ablk_ctx->skb) {
kfree_skb(ctx_req.ctx.ablk_ctx->skb);
ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb)
+ if (ctx_req.ctx.ahash_ctx->skb) {
+ kfree_skb(ctx_req.ctx.ahash_ctx->skb);
ctx_req.ctx.ahash_ctx->skb = NULL;
+ }
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
@@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
- kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
- ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
break;
}
- return 0;
+ return err;
}
/*
@@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++)
+ w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while (i + nk < (nr + 1) * 4) {
+ if (!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ } else if (nk == 8 && (i % 4 == 0)) {
+ temp = aes_ks_subword(temp);
+ }
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ i--;
+ for (k = 0, j = i % nk; k < nk; k++) {
+ *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ j--;
+ if (j < 0)
+ j += nk;
+ }
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
- struct shash_desc *desc;
switch (ds) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha1", 0, 0);
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha224", 0, 0);
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha256", 0, 0);
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha384", 0, 0);
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha512", 0, 0);
break;
}
- if (IS_ERR(base_hash)) {
- pr_err("Can not allocate sha-generic algo.\n");
- return (void *)base_hash;
- }
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
- GFP_KERNEL);
- if (!desc)
- return ERR_PTR(-ENOMEM);
- desc->tfm = base_hash;
- desc->flags = crypto_shash_get_flags(base_hash);
- return desc;
+ return base_hash;
}
static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm)
struct chcr_alg_template *chcr_crypto_alg =
container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
alg.hash);
- if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
- CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+ if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
return 1;
return 0;
}
-static inline unsigned int ch_nents(struct scatterlist *sg,
- unsigned int *total_size)
-{
- unsigned int nents;
-
- for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
- nents++;
- *total_size += sg->length;
- }
- return nents;
-}
-
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
{
struct phys_sge_pairs *to;
- unsigned int out_buf_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+ int out_buf_size = sg_param->obsize;
+ unsigned int nents = sg_param->nents, i, j = 0;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents; to++) {
- for (j = i; (nents && (j < (8 + i))); j++, nents--) {
- to->len[j] = htons(sg->length);
+ for (j = 0; j < 8 && nents; j++, nents--) {
+ out_buf_size -= sg_dma_len(sg);
+ to->len[j] = htons(sg_dma_len(sg));
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- if (out_buf_size) {
- if (tot_len + sg_dma_len(sg) >= out_buf_size) {
- to->len[j] = htons(out_buf_size -
- tot_len);
- return;
- }
- tot_len += sg_dma_len(sg);
- }
sg = sg_next(sg);
}
}
+ if (out_buf_size) {
+ j--;
+ to--;
+ to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+ }
}
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+ struct phys_sge_parm *sg_param)
{
if (!sg || !sg_param->nents)
return 0;
@@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
return 0;
}
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+ unsigned int *frags,
+ char *bfr,
+ u8 bfr_len)
+{
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ get_page(virt_to_page(bfr));
+ skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+ offset_in_page(bfr), bfr_len);
+ (*frags)++;
+}
+
+
static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
@@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
skb->len += count;
skb->data_len += count;
skb->truesize += count;
+
while (count > 0) {
- if (sg && (!(sg->length)))
+ if (!sg || (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
@@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
struct _key_ctx *key_ctx)
{
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- get_aes_decrypt_key(key_ctx->key, ablkctx->key,
- ablkctx->enckey_len << 3);
- memset(key_ctx->key + ablkctx->enckey_len, 0,
- CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+ memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
} else {
memcpy(key_ctx->key,
ablkctx->key + (ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->key, ablkctx->enckey_len << 2);
+ memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->rrkey, ablkctx->enckey_len >> 1);
}
return 0;
}
static inline void create_wreq(struct chcr_context *ctx,
- struct fw_crypto_lookaside_wr *wreq,
+ struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
- unsigned int phys_dsgl)
+ int is_iv,
+ unsigned int sc_len)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
- struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
@@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx,
nr_frags = skb_shinfo(skb)->nr_frags;
}
- wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- (kctx_len >> 4));
- wreq->pld_size_hash_size =
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
- wreq->cookie = cpu_to_be64((uintptr_t)req);
- wreq->rx_chid_to_rx_q_id =
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
- (hash_sz) ? IV_NOP : iv_loc);
+ is_iv ? iv_loc : IV_NOP);
- ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
- ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(*wreq)) >> 4)));
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
- sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
- sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
- ((hash_sz) ? DUMMY_BYTES :
- (sizeof(struct cpl_rx_phys_dsgl) +
- phys_dsgl)) + immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) +
+ kctx_len + sc_len + immdatalen);
}
/**
@@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx,
* @op_type: encryption or decryption
*/
static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
- struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+ unsigned short qid,
unsigned short op_type)
{
- struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+ unsigned int frags = 0, transhdr_len, phys_dsgl;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (!req->info)
return ERR_PTR(-EINVAL);
- ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
- ablkctx->enc = op_type;
-
+ reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AES:Invalid Destination sg lists\n");
+ return ERR_PTR(-EINVAL);
+ }
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->nbytes, ivsize);
return ERR_PTR(-EINVAL);
+ }
- phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+ phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
- kctx_len = sizeof(*key_ctx) +
- (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
- sec_cpl->pldlen = htonl(ivsize + req->nbytes);
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
- ivsize + 1, 0);
-
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
- 0, 0);
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1, 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 0, ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
- if (generate_copy_rrkey(ablkctx, key_ctx))
- goto map_fail1;
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
} else {
- memcpy(key_ctx->key, ablkctx->key +
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key +
+ memcpy(chcr_req->key_ctx.key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
- memcpy(ablkctx->iv, req->info, ivsize);
- sg_init_table(&ablkctx->iv_sg, 1);
- sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
- sg_param.nents = ablkctx->dst_nents;
- sg_param.obsize = dst_bufsize;
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->nbytes;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -541,10 +647,12 @@ static struct sk_buff
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
- write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
- write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
- create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
- req_ctx->skb = skb;
+ memcpy(reqctx->iv, req->info, ivsize);
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+ reqctx->skb = skb;
skb_get(skb);
return skb;
map_fail1:
@@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
unsigned int ck_size, context_size;
u16 alignment = 0;
- if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
- goto badkey_err;
-
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
@@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
} else {
goto badkey_err;
}
-
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
keylen + alignment) >> 4;
@@ -592,16 +696,18 @@ badkey_err:
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{
- int ret = 0;
- struct sge_ofld_txq *q;
struct adapter *adap = netdev2adap(dev);
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+ struct sge_uld_txq *txq;
+ int ret = 0;
local_bh_disable();
- q = &adap->sge.ofldtxq[idx];
- spin_lock(&q->sendq.lock);
- if (q->full)
+ txq = &txq_info->uldtxq[idx];
+ spin_lock(&txq->sendq.lock);
+ if (txq->full)
ret = -1;
- spin_unlock(&q->sendq.lock);
+ spin_unlock(&txq->sendq.lock);
local_bh_enable();
return ret;
}
@@ -610,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -620,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx,
- u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -637,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -647,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -674,11 +777,11 @@ static int chcr_device_init(struct chcr_context *ctx)
}
u_ctx = ULD_CTX(ctx);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
- ctx->dev->tx_channel_id = 0;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_channel_id = rxq_idx;
+ ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
spin_unlock(&ctx->dev->lock_chcr_dev);
}
out:
@@ -727,50 +830,33 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
- struct sk_buff *skb, unsigned int *frags, char *bfr,
- u8 bfr_len)
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
{
- void *page_ptr = NULL;
-
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
- if (!page_ptr)
- return -ENOMEM;
- get_page(virt_to_page(page_ptr));
- req_ctx->dummy_payload_ptr = page_ptr;
- memcpy(page_ptr, bfr, bfr_len);
- skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
- offset_in_page(page_ptr), bfr_len);
- (*frags)++;
- return 0;
+ crypto_free_shash(base_hash);
}
/**
- * create_final_hash_wr - Create hash work request
+ * create_hash_wr - Create hash work request
* @req - Cipher req base
*/
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
- struct hash_wr_param *param)
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = sizeof(*key_ctx);
+ unsigned int kctx_len = 0;
u8 hash_size_in_response = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len += param->alg_prm.result_size + iopad_alignment;
+ kctx_len = param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
@@ -779,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
- memset(wreq, 0, transhdr_len);
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
- sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- sec_cpl->aadstart_cipherstop_hi =
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- sec_cpl->cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- sec_cpl->seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0, 0);
+ param->opad_needed, 0);
- sec_cpl->ivgen_hdrlen =
+ chcr_req->sec_cpl.ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
if (param->opad_needed)
- memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
- key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- (kctx_len >> 4));
- sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+ ((kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
- write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
- param->bfr_len);
+ write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+ param->bfr_len);
if (param->sg_len != 0)
- write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+ write_sg_to_skb(skb, &frags, req->src, param->sg_len);
- create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
- 0);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+ DUMMY_BYTES);
req_ctx->skb = skb;
skb_get(skb);
return skb;
@@ -852,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req)
return -EBUSY;
}
- if (nbytes + req_ctx->bfr_len >= bs) {
- remainder = (nbytes + req_ctx->bfr_len) % bs;
- nbytes = nbytes + req_ctx->bfr_len - remainder;
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+ nbytes = nbytes + req_ctx->reqlen - remainder;
} else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
- req_ctx->bfr_len, nbytes, 0);
- req_ctx->bfr_len += nbytes;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+ + req_ctx->reqlen, nbytes, 0);
+ req_ctx->reqlen += nbytes;
return 0;
}
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->bfr_len;
- params.bfr_len = req_ctx->bfr_len;
+ params.sg_len = nbytes - req_ctx->reqlen;
+ params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
- req_ctx->bfr_len = remainder;
- if (remainder)
+ if (remainder) {
+ u8 *temp;
+ /* Swap buffers */
+ temp = req_ctx->reqbfr;
+ req_ctx->reqbfr = req_ctx->skbfr;
+ req_ctx->skbfr = temp;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- req_ctx->bfr, remainder, req->nbytes -
+ req_ctx->reqbfr, remainder, req->nbytes -
remainder);
+ }
+ req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -915,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.sg_len = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if (req_ctx->reqlen == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -929,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -961,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.opad_needed = 0;
params.sg_len = req->nbytes;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
req_ctx->result = 1;
- if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if ((req_ctx->reqlen + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -977,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -1021,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->result = 1;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && req->nbytes == 0) {
- create_last_hash_block(req_ctx->bfr, bs, 0);
+ if (req->nbytes == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, 0);
params.more = 1;
params.bfr_len = bs;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
@@ -1042,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = out;
- state->bfr_len = req_ctx->bfr_len;
+ state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1055,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
- req_ctx->bfr_len = state->bfr_len;
+ req_ctx->reqlen = state->reqlen;
req_ctx->data_len = state->data_len;
- req_ctx->dummy_payload_ptr = NULL;
- memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
@@ -1073,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
- /*
- * use the key to calculate the ipad and opad. ipad will sent with the
+ SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+ /* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- if (!hmacctx->desc)
- return -EINVAL;
+ shash->tfm = hmacctx->base_hash;
+ shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
- err = crypto_shash_digest(hmacctx->desc, key, keylen,
+ err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
if (err)
goto out;
@@ -1102,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+ err = chcr_compute_partial_hash(shash, hmacctx->ipad,
hmacctx->ipad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->ipad, updated_digestsize);
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+ err = chcr_compute_partial_hash(shash, hmacctx->opad,
hmacctx->opad, digestsize);
if (err)
goto out;
@@ -1122,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- int status = 0;
unsigned short context_size = 0;
- if ((key_len == (AES_KEYSIZE_128 << 1)) ||
- (key_len == (AES_KEYSIZE_256 << 1))) {
- memcpy(ablkctx->key, key, key_len);
- ablkctx->enckey_len = key_len;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
- FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
- CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
- CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
- CHCR_KEYCTX_NO_KEY, 1,
- 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
- } else {
+ if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+ (key_len != (AES_KEYSIZE_256 << 1))) {
crypto_tfm_set_flags((struct crypto_tfm *)tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
- status = -EINVAL;
+ return -EINVAL;
+
}
- return status;
+
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ return 0;
}
static int chcr_sha_init(struct ahash_request *areq)
@@ -1153,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq)
int digestsize = crypto_ahash_digestsize(tfm);
req_ctx->data_len = 0;
- req_ctx->dummy_payload_ptr = NULL;
- req_ctx->bfr_len = 0;
+ req_ctx->reqlen = 0;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@ -1202,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->desc = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->desc))
- return PTR_ERR(hmacctx->desc);
+ hmacctx->base_hash = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->base_hash))
+ return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_free_shash(struct shash_desc *desc)
-{
- crypto_free_shash(desc->tfm);
- kfree(desc);
-}
-
static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- if (hmacctx->desc) {
- chcr_free_shash(hmacctx->desc);
- hmacctx->desc = NULL;
+ if (hmacctx->base_hash) {
+ chcr_free_shash(hmacctx->base_hash);
+ hmacctx->base_hash = NULL;
+ }
+}
+
+static int chcr_copy_assoc(struct aead_request *req,
+ struct chcr_aead_ctx *ctx)
+{
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+ NULL);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+ switch (authsize) {
+ case ICV_8:
+ return CHCR_SCMD_HMAC_CTRL_PL1;
+ case ICV_10:
+ return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ case ICV_12:
+ return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ }
+ return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+ unsigned int kctx_len = 0;
+ unsigned short stop_offset = 0;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ null = 1;
+ assoclen = 0;
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AUTHENC:Invalid Destination sg entries\n");
+ goto err;
+ }
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* LLD is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ /* Write WR */
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+ /*
+ * Input order is AAD,IV and Payload. where IV should be included as
+ * the part of authdata. All other fields should be filled according
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+ (ivsize ? (assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ assoclen ? 1 : 0, assoclen,
+ assoclen + ivsize + 1,
+ (stop_offset & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ stop_offset & 0xF,
+ null ? 0 : assoclen + ivsize + 1,
+ stop_offset, stop_offset);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ actx->auth_mode, aeadctx->hmac_ctrl,
+ ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ if (op_type == CHCR_ENCRYPT_OP)
+ memcpy(chcr_req->key_ctx.key, aeadctx->key,
+ aeadctx->enckey_len);
+ else
+ memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+ aeadctx->enckey_len);
+
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+ 4), actx->h_iopad, kctx_len -
+ (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ if (assoclen) {
+ /* AAD buffer in */
+ write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+ }
+ write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+
+ return skb;
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+ unsigned short offset)
+{
+ struct page *spage;
+ unsigned char *addr;
+
+ spage = sg_page(sg);
+ get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+ addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+ addr = kmap_atomic(spage);
+#endif
+ memset(addr + sg->offset, 0, offset + 1);
+
+ kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned short op_type)
+{
+ unsigned int l, lp, m;
+ int rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ u8 *b0 = reqctx->scratch_pad;
+
+ m = crypto_aead_authsize(aead);
+
+ memcpy(b0, reqctx->iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (req->assoclen)
+ *b0 |= 64;
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int rc = 0;
+
+ if (req->assoclen > T5_MAX_AAD_SIZE) {
+ pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+ T5_MAX_AAD_SIZE);
+ return -EINVAL;
+ }
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ reqctx->iv[0] = 3;
+ memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ memset(reqctx->iv + 12, 0, 4);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen - 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 16);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen);
+ }
+ generate_b0(req, aeadctx, op_type);
+ /* zero the ctr value */
+ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ unsigned int dst_size,
+ struct aead_request *req,
+ unsigned short op_type,
+ struct chcr_context *chcrctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+ unsigned int c_id = chcrctx->dev->tx_channel_id;
+ unsigned int ccm_xtra;
+ unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+ unsigned int assoclen;
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen = req->assoclen - 8;
+ else
+ assoclen = req->assoclen;
+ ccm_xtra = CCM_B0_SIZE +
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+ (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+ else
+ auth_offset = 0;
+ }
+
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+ 2, (ivsize ? (assoclen + 1) : 0) +
+ ccm_xtra);
+ sec_cpl->pldlen =
+ htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ 1, assoclen + ccm_xtra, assoclen
+ + ivsize + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 :
+ crypto_aead_authsize(tfm));
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+ cipher_mode, mac_mode, hmac_ctrl,
+ ivsize >> 1);
+
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+ 1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
+{
+ if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ if (crypto_ccm_check_iv(req->iv)) {
+ pr_err("CCM: IV check fails\n");
+ return -EINVAL;
+ }
+ } else {
+ if (req->assoclen != 16 && req->assoclen != 20) {
+ pr_err("RFC4309: Invalid AAD length %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+ }
+ if (aeadctx->enckey_len == 0) {
+ pr_err("CCM: Encryption key not set\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+ struct aead_request *req,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ struct chcr_aead_ctx *aeadctx)
+{
+ unsigned int frags = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ /* b0 and aad length(if available) */
+
+ write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+ (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
+ if (req->assoclen) {
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ write_sg_to_skb(skb, &frags, req->src,
+ req->assoclen - 8);
+ else
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+ }
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ if (req->cryptlen)
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+ return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned int sub_type;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ sub_type = get_aead_subtype(tfm);
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err) {
+ pr_err("AAD copy to destination buffer fails\n");
+ return ERR_PTR(err);
+ }
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("CCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+ goto err;
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+
+ if (!skb)
+ goto err;
+
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+ goto dstmap_fail;
+
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+ frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+dstmap_fail:
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned char tag_offset = 0;
+ unsigned int crypt_len = 0;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ unsigned char hmac_ctrl = get_hmac(authsize);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+
+ if (!req->cryptlen)
+ /* null-payload is not supported in the hardware.
+ * software is sending block size
+ */
+ crypt_len = AES_BLOCK_SIZE;
+ else
+ crypt_len = req->cryptlen;
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("GCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* NIC driver is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ req->assoclen -= 8;
+
+ tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+ ctx->dev->tx_channel_id, 2, (ivsize ?
+ (req->assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ req->assoclen ? 1 : 0, req->assoclen,
+ req->assoclen + ivsize + 1, 0);
+ if (req->cryptlen) {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+ tag_offset, tag_offset);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+ CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+ ivsize >> 1);
+ } else {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ?
+ 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ 0, 0, ivsize >> 1);
+ }
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ memcpy(reqctx->iv, aeadctx->salt, 4);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 12);
+ }
+ *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+ if (req->cryptlen) {
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ } else {
+ aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+ write_sg_to_skb(skb, &frags, dst, crypt_len);
+ }
+
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+ aeadctx->null = crypto_get_default_null_skcipher();
+ if (IS_ERR(aeadctx->null))
+ return PTR_ERR(aeadctx->null);
+ return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+ crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+ aeadctx->mayverify = VERIFY_HW;
+ return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+ /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+ * true for sha1. authsize == 12 condition should be before
+ * authsize == (maxauth >> 1)
+ */
+ if (authsize == ICV_4) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_6) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_10) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_12) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_14) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == (maxauth >> 1)) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == maxauth) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ } else {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ }
+ return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_13:
+ case ICV_15:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ break;
+ default:
+
+ crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_6:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_10:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
+ return 0;
}
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ unsigned char ck_size, mk_size;
+ int key_ctx_size = 0;
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ if (keylen == AES_KEYSIZE_128) {
+ mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+ key_ctx_size >> 4);
+ return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ if (keylen < 3) {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ keylen -= 3;
+ memcpy(aeadctx->salt, key + keylen, 3);
+ return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+ struct blkcipher_desc h_desc;
+ struct scatterlist src[1];
+ unsigned int ck_size;
+ int ret = 0, key_ctx_size = 0;
+
+ if (get_aead_subtype(aead) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(aeadctx->salt, key + keylen, 4);
+ }
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ pr_err("GCM: Invalid key length %d", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+ /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+ * blkcipher It will go on key context
+ */
+ h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+ if (IS_ERR(h_desc.tfm)) {
+ aeadctx->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ h_desc.flags = 0;
+ ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+ if (ret) {
+ aeadctx->enckey_len = 0;
+ goto out1;
+ }
+ memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+ sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+ ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+ crypto_free_blkcipher(h_desc.tfm);
+out:
+ return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ /* it contains auth and cipher key both*/
+ struct crypto_authenc_keys keys;
+ unsigned int bs;
+ unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+ int err = 0, i, key_ctx_len = 0;
+ unsigned char ck_size = 0;
+ unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+ struct crypto_shash *base_hash = NULL;
+ struct algo_param param;
+ int align;
+ u8 *o_ptr = NULL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+
+ if (get_alg_config(&param, max_authsize)) {
+ pr_err("chcr : Unsupported digest size\n");
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+
+ /* Copy only encryption key. We use authkey to generate h(ipad) and
+ * h(opad) so authkey is not needed again. authkeylen size have the
+ * size of the hash digest size.
+ */
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+
+ base_hash = chcr_alloc_shash(max_authsize);
+ if (IS_ERR(base_hash)) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ {
+ SHASH_DESC_ON_STACK(shash, base_hash);
+ shash->tfm = base_hash;
+ shash->flags = crypto_shash_get_flags(base_hash);
+ bs = crypto_shash_blocksize(base_hash);
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ o_ptr = actx->h_iopad + param.result_size + align;
+
+ if (keys.authkeylen > bs) {
+ err = crypto_shash_digest(shash, keys.authkey,
+ keys.authkeylen,
+ o_ptr);
+ if (err) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ keys.authkeylen = max_authsize;
+ } else
+ memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+ /* Compute the ipad-digest*/
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= IPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+ max_authsize))
+ goto out;
+ /* Compute the opad-digest */
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= OPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+ goto out;
+
+ /* convert the ipad and opad digest to network order */
+ chcr_change_order(actx->h_iopad, param.result_size);
+ chcr_change_order(o_ptr, param.result_size);
+ key_ctx_len = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+ 0, 1, key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+ chcr_free_shash(base_hash);
+
+ return 0;
+ }
+out:
+ aeadctx->enckey_len = 0;
+ if (base_hash)
+ chcr_free_shash(base_hash);
+ return -EINVAL;
+}
+
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct crypto_authenc_keys keys;
+
+ /* it contains auth and cipher key both*/
+ int key_ctx_len = 0;
+ unsigned char ck_size = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ key_ctx_len = sizeof(struct _key_ctx)
+ + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+ 0, key_ctx_len >> 4);
+ actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ return 0;
+out:
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ reqctx->verify = VERIFY_HW;
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int size;
+
+ if (aeadctx->mayverify == VERIFY_SW) {
+ size = crypto_aead_maxauthsize(tfm);
+ reqctx->verify = VERIFY_SW;
+ } else {
+ size = 0;
+ reqctx->verify = VERIFY_HW;
+ }
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (ctx && !ctx->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+ op_type);
+
+ if (IS_ERR(skb) || skb == NULL) {
+ pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -1232,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "cbc(aes)",
- .cra_driver_name = "cbc(aes-chcr)",
+ .cra_driver_name = "cbc-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1259,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "xts(aes)",
- .cra_driver_name = "xts(aes-chcr)",
+ .cra_driver_name = "xts-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1352,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac(sha1-chcr)",
+ .cra_driver_name = "hmac-sha1-chcr",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
@@ -1364,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
- .cra_driver_name = "hmac(sha224-chcr)",
+ .cra_driver_name = "hmac-sha224-chcr",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
@@ -1376,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac(sha256-chcr)",
+ .cra_driver_name = "hmac-sha256-chcr",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
@@ -1388,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
- .cra_driver_name = "hmac(sha384-chcr)",
+ .cra_driver_name = "hmac-sha384-chcr",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
@@ -1400,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
- .cra_driver_name = "hmac(sha512-chcr)",
+ .cra_driver_name = "hmac-sha512-chcr",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
+ /* Add AEAD Algorithms */
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+ },
+ .ivsize = 12,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_gcm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_ccm_setkey,
+ .setauthsize = chcr_ccm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_rfc4309_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,cbc(aes))",
+ .cra_driver_name =
+ "authenc-digest_null-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
};
/*
@@ -1422,6 +2871,11 @@ static int chcr_unregister_alg(void)
crypto_unregister_alg(
&driver_algs[i].alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_aead(
+ &driver_algs[i].alg.aead);
+ break;
case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered)
crypto_unregister_ahash(
@@ -1456,6 +2910,19 @@ static int chcr_register_alg(void)
err = crypto_register_alg(&driver_algs[i].alg.crypto);
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ driver_algs[i].alg.aead.base.cra_priority =
+ CHCR_CRA_PRIORITY;
+ driver_algs[i].alg.aead.base.cra_flags =
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+ driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+ driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+ driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+ driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+ err = crypto_register_aead(&driver_algs[i].alg.aead);
+ name = driver_algs[i].alg.aead.base.cra_driver_name;
+ break;
case CRYPTO_ALG_TYPE_AHASH:
a_hash = &driver_algs[i].alg.hash;
a_hash->update = chcr_ahash_update;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 199b0bb69b89..3c7c51f7bedf 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -108,30 +108,24 @@
#define IPAD_DATA 0x36363636
#define OPAD_DATA 0x5c5c5c5c
-#define TRANSHDR_SIZE(alignedkctx_len)\
- (sizeof(struct ulptx_idata) +\
- sizeof(struct ulp_txpkt) +\
- sizeof(struct fw_crypto_lookaside_wr) +\
- sizeof(struct cpl_tx_sec_pdu) +\
- (alignedkctx_len))
-#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
- (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+#define TRANSHDR_SIZE(kctx_len)\
+ (sizeof(struct chcr_wr) +\
+ kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
sizeof(struct cpl_rx_phys_dsgl))
-#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
- (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
-#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
- sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata))
-#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \
htonl( \
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
CPL_TX_SEC_PDU_RXCHID_V((id)) | \
CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
- CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
@@ -148,7 +142,7 @@
CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
-#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
+#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \
htonl( \
SCMD_SEQ_NO_CTRL_V(0) | \
SCMD_STATUS_PRESENT_V(0) | \
@@ -159,7 +153,7 @@
SCMD_AUTH_MODE_V((amode)) | \
SCMD_HMAC_CTRL_V((opad)) | \
SCMD_IV_SIZE_V((size)) | \
- SCMD_NUM_IVS_V((nivs)))
+ SCMD_NUM_IVS_V(0))
#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
SCMD_ENB_DBGID_V(0) | \
@@ -264,13 +258,15 @@ enum {
* where they indicate the size of the integrity check value (ICV)
*/
enum {
- AES_CCM_ICV_4 = 4,
- AES_CCM_ICV_6 = 6,
- AES_CCM_ICV_8 = 8,
- AES_CCM_ICV_10 = 10,
- AES_CCM_ICV_12 = 12,
- AES_CCM_ICV_14 = 14,
- AES_CCM_ICV_16 = 16
+ ICV_4 = 4,
+ ICV_6 = 6,
+ ICV_8 = 8,
+ ICV_10 = 10,
+ ICV_12 = 12,
+ ICV_13 = 13,
+ ICV_14 = 14,
+ ICV_15 = 15,
+ ICV_16 = 16
};
struct hash_op_params {
@@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = {
187, 22
};
-static u32 aes_ks_subword(const u32 w)
+static inline u32 aes_ks_subword(const u32 w)
{
u8 bytes[4];
@@ -412,61 +408,4 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-/* dec_key - OUTPUT - Reverse round key
- * key - INPUT - key
- * keylength - INPUT - length of the key in number of bits
- */
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
- const unsigned char *key,
- unsigned int keylength)
-{
- u32 temp;
- u32 w_ring[MAX_NK];
- int i, j, k;
- u8 nr, nk;
-
- switch (keylength) {
- case AES_KEYLENGTH_128BIT:
- nk = KEYLENGTH_4BYTES;
- nr = NUMBER_OF_ROUNDS_10;
- break;
-
- case AES_KEYLENGTH_192BIT:
- nk = KEYLENGTH_6BYTES;
- nr = NUMBER_OF_ROUNDS_12;
- break;
- case AES_KEYLENGTH_256BIT:
- nk = KEYLENGTH_8BYTES;
- nr = NUMBER_OF_ROUNDS_14;
- break;
- default:
- return;
- }
- for (i = 0; i < nk; i++ )
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
-
- i = 0;
- temp = w_ring[nk - 1];
- while(i + nk < (nr + 1) * 4) {
- if(!(i % nk)) {
- /* RotWord(temp) */
- temp = (temp << 8) | (temp >> 24);
- temp = aes_ks_subword(temp);
- temp ^= round_constant[i / nk];
- }
- else if (nk == 8 && (i % 4 == 0))
- temp = aes_ks_subword(temp);
- w_ring[i % nk] ^= temp;
- temp = w_ring[i % nk];
- i++;
- }
- i--;
- for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
- j--;
- if(j < 0)
- j += nk;
- }
-}
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index fb5f9bbfa09c..918da8e6e2d8 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -42,6 +42,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
@@ -109,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
if (ack_err_status) {
if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EINVAL;
+ error_status = -EBADMSG;
}
/* call completion callback with failure status */
if (req) {
- if (!chcr_handle_resp(req, input, error_status))
- req->complete(req, error_status);
- else
- return -EINVAL;
+ error_status = chcr_handle_resp(req, input, error_status);
+ req->complete(req, error_status);
} else {
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
@@ -126,7 +125,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
int chcr_send_wr(struct sk_buff *skb)
{
- return cxgb4_ofld_send(skb->dev, skb);
+ return cxgb4_crypto_send(skb->dev, skb);
}
static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2a5c671a4232..c7088a4e0a49 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -52,13 +52,27 @@
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT 4
struct uld_ctx;
+struct _key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[MAX_SALT];
+ __be64 reserverd;
+ unsigned char key[0];
+};
+
+struct chcr_wr {
+ struct fw_crypto_lookaside_wr wreq;
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
struct chcr_dev {
- /* Request submited to h/w and waiting for response. */
spinlock_t lock_chcr_dev;
- struct crypto_queue pending_queue;
struct uld_ctx *u_ctx;
unsigned char tx_channel_id;
};
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d7d75605da8b..d5af7d64a763 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -36,6 +36,14 @@
#ifndef __CHCR_CRYPTO_H__
#define __CHCR_CRYPTO_H__
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+#define CCM_B0_SIZE 16
+#define CCM_AAD_FIELD_SIZE 2
+#define T5_MAX_AAD_SIZE 512
+
+
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
@@ -63,22 +71,36 @@
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
-#define CHCR_SCMD_CIPHER_MODE_NOP 0
-#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
-#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
-#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_NOP 0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM 7
#define CHCR_SCMD_AUTH_MODE_NOP 0
#define CHCR_SCMD_AUTH_MODE_SHA1 1
#define CHCR_SCMD_AUTH_MODE_SHA224 2
#define CHCR_SCMD_AUTH_MODE_SHA256 3
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC 9
+#define CHCR_SCMD_AUTH_MODE_CMAC 10
#define CHCR_SCMD_HMAC_CTRL_NOP 0
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3
+#define CHCR_SCMD_HMAC_CTRL_PL1 4
+#define CHCR_SCMD_HMAC_CTRL_PL2 5
+#define CHCR_SCMD_HMAC_CTRL_PL3 6
+#define CHCR_SCMD_HMAC_CTRL_DIV2 7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
#define CHCR_SCMD_IVGEN_CTRL_HW 0
#define CHCR_SCMD_IVGEN_CTRL_SW 1
@@ -106,39 +128,74 @@
#define IV_IMMEDIATE 1
#define IV_DSGL 2
+#define AEAD_H_SIZE 16
+
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
-#define MAX_SALT 4
#define MAX_SCRATCH_PAD_SIZE 32
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
/* Aligned to 128 bit boundary */
-struct _key_ctx {
- __be32 ctx_hdr;
- u8 salt[MAX_SALT];
- __be64 reserverd;
- unsigned char key[0];
-};
struct ablk_ctx {
- u8 enc;
- unsigned int processed_len;
__be32 key_ctx_hdr;
unsigned int enckey_len;
- unsigned int dst_nents;
- struct scatterlist iv_sg;
u8 key[CHCR_AES_MAX_KEY_LEN];
- u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char ciph_mode;
+ u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+ struct sk_buff *skb;
+ short int dst_nents;
+ u16 verify;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
+};
+
+struct chcr_gcm_ctx {
+ u8 ghash_h[AEAD_H_SIZE];
};
+struct chcr_authenc_ctx {
+ u8 dec_rrkey[AES_MAX_KEY_SIZE];
+ u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+ unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+ struct chcr_gcm_ctx gcm[0];
+ struct chcr_authenc_ctx authenc[0];
+};
+
+
+
+struct chcr_aead_ctx {
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ struct crypto_skcipher *null;
+ u8 salt[MAX_SALT];
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u16 hmac_ctrl;
+ u16 mayverify;
+ struct __aead_ctx ctx[0];
+};
+
+
+
struct hmac_ctx {
- struct shash_desc *desc;
+ struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
@@ -146,6 +203,7 @@ struct hmac_ctx {
struct __crypto_ctx {
struct hmac_ctx hmacctx[0];
struct ablk_ctx ablkctx[0];
+ struct chcr_aead_ctx aeadctx[0];
};
struct chcr_context {
@@ -156,18 +214,22 @@ struct chcr_context {
struct chcr_ahash_req_ctx {
u32 result;
- char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
- u8 bfr_len;
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 *reqbfr;
+ u8 *skbfr;
+ u8 reqlen;
/* DMA the partial hash in it */
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
- void *dummy_payload_ptr;
/* SKB which is being sent to the hardware for processing */
struct sk_buff *skb;
};
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
+ unsigned int dst_nents;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
struct chcr_alg_template {
@@ -176,16 +238,19 @@ struct chcr_alg_template {
union {
struct crypto_alg crypto;
struct ahash_alg hash;
+ struct aead_alg aead;
} alg;
};
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
+ struct chcr_aead_reqctx *reqctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
@@ -195,9 +260,15 @@ struct sge_opaque_hdr {
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
-typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
- struct chcr_context *ctx,
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
+ int size,
unsigned short op_type);
+static int chcr_aead_op(struct aead_request *req_base,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn);
+static inline int get_aead_subtype(struct crypto_aead *aead);
+
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 37dadb2a4feb..6e7a5c77a00a 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
- dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
- if (!dma->iv_pool)
- return -ENOMEM;
-
cesa->dma = dma;
return 0;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33decd4..a768da7138a1 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -277,7 +277,7 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
-#define CESA_TDMA_IV 3
+#define CESA_TDMA_RESULT 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -393,7 +393,6 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
- struct dma_pool *iv_pool;
};
/**
@@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags);
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d19dc9614e6e..098871a22a54 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ ivsize);
} else {
memcpy_fromio(ablkreq->info,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
- ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b375b84..317cf029c0cf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -311,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->last_req) {
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ __le32 *data = NULL;
+
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
+ * Result is already in the correct endianess when the SA is
+ * used
*/
- if (creq->algo_le) {
- __le32 *result = (void *)ahashreq->result;
+ data = creq->base.chain.last->op->ctx.hash.hash;
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = cpu_to_le32(data[i]);
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_le32(creq->state[i]);
- } else {
- __be32 *result = (void *)ahashreq->result;
+ memcpy(ahashreq->result, data, digsize);
+ } else {
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = readl_relaxed(engine->regs +
+ CESA_IVDIG(i));
+ if (creq->last_req) {
+ /*
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
+ if (creq->algo_le) {
+ __le32 *result = (void *)ahashreq->result;
+
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_le32(creq->state[i]);
+ } else {
+ __be32 *result = (void *)ahashreq->result;
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_be32(creq->state[i]);
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_be32(creq->state[i]);
+ }
}
}
@@ -503,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
CESA_SA_DESC_CFG_LAST_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
+ ret = mv_cesa_dma_add_result_op(chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
return op;
}
@@ -563,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
int ret;
+ u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
@@ -634,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
goto err_free_tdma;
}
- if (op) {
+ /*
+ * If results are copied via DMA, this means that this
+ * request can be directly processed by the engine,
+ * without partial updates. So we can chain it at the
+ * DMA level with other requests.
+ */
+ type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+ if (op && type != CESA_TDMA_RESULT) {
/* Add dummy desc to wait for crypto operation end */
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
@@ -647,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
- basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
- CESA_TDMA_BREAK_CHAIN);
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+ if (type != CESA_TDMA_RESULT)
+ basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
return 0;
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5fbaa1b..4416b88eca70 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
- else if (type == CESA_TDMA_IV)
- dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
- le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags)
{
-
- struct mv_cesa_tdma_desc *tdma;
- u8 *iv;
- dma_addr_t dma_handle;
+ struct mv_cesa_tdma_desc *tdma, *op_desc;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
- iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
- if (!iv)
- return -ENOMEM;
+ /* We re-use an existing op_desc object to retrieve the context
+ * and result instead of allocating a new one.
+ * There is at least one object of this type in a CESA crypto
+ * req, just pick the first one in the chain.
+ */
+ for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+ u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+ if (type == CESA_TDMA_OP)
+ break;
+ }
+
+ if (!op_desc)
+ return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = src;
- tdma->dst = cpu_to_le32(dma_handle);
- tdma->data = iv;
+ tdma->dst = op_desc->src;
+ tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
- tdma->flags = flags | CESA_TDMA_IV;
+ tdma->flags = flags | CESA_TDMA_RESULT;
return 0;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 104e9ce9400a..451fa18c1c7b 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -1163,7 +1163,6 @@ err_irq:
err_thread:
kthread_stop(cp->queue_th);
err:
- kfree(cp);
cpg = NULL;
return ret;
}
@@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev)
clk_put(cp->clk);
}
- kfree(cp);
cpg = NULL;
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 42f0f229f7f7..036057abb257 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -32,7 +32,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/types.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 441e86b23571..b3869748cc6b 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -183,8 +183,8 @@ static inline void padlock_store_cword(struct cword *cword)
/*
* While the padlock instructions don't use FP/SSE registers, they
- * generate a spurious DNA fault when cr0.ts is '1'. These instructions
- * should be used only inside the irq_ts_save/restore() context
+ * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
+ * the kernel doesn't use CR0.TS.
*/
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
@@ -298,24 +298,18 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -346,14 +340,12 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
@@ -361,7 +353,6 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -375,14 +366,12 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.decrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
@@ -390,7 +379,6 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -425,14 +413,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
@@ -442,7 +428,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.decrypt);
@@ -456,14 +441,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -472,8 +455,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
-
padlock_store_cword(&ctx->cword.encrypt);
return err;
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 8c5f90647b7a..bc72d20c32c3 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
struct sha1_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA1_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
@@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
struct sha256_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA256_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
@@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
memcpy(sctx->buffer + partial, data,
done + SHA1_BLOCK_SIZE);
src = sctx->buffer;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst) \
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA1_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from the input data */
if (len - done >= SHA1_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
src = data + done;
}
@@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
memcpy(sctx->buf + partial, data,
done + SHA256_BLOCK_SIZE);
src = sctx->buf;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA256_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from input data*/
if (len - done >= SHA256_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / 64)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % 64);
src = data + done;
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c49956ee0ce..1d9ecd368b5b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
if (status & SAHARA_STATUS_MODE_BATCH)
dev_dbg(dev->device, " - Batch Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEDICATED)
- dev_dbg(dev->device, " - Decidated Mode.\n");
+ dev_dbg(dev->device, " - Dedicated Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEBUG)
dev_dbg(dev->device, " - Debug Mode.\n");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f41dc0..0bba6a19d36a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_MDTE)
dev_err(dev, "master data transfer error\n");
if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
- dev_err(dev, is_sec1 ? "pointeur not complete error\n"
+ dev_err(dev, is_sec1 ? "pointer not complete error\n"
: "s/g data length zero error\n");
if (v_lo & TALITOS_CCPSR_LO_FPZ)
dev_err(dev, is_sec1 ? "parity error\n"
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
new file mode 100644
index 000000000000..d80f73366ae2
--- /dev/null
+++ b/drivers/crypto/virtio/Kconfig
@@ -0,0 +1,10 @@
+config CRYPTO_DEV_VIRTIO
+ tristate "VirtIO crypto driver"
+ depends on VIRTIO
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ default m
+ help
+ This driver provides support for virtio crypto device. If you
+ choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 000000000000..dd342c947ff9
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
+virtio_crypto-objs := \
+ virtio_crypto_algs.o \
+ virtio_crypto_mgr.o \
+ virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
new file mode 100644
index 000000000000..c2374df9abae
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -0,0 +1,540 @@
+ /* Algorithms supported by virtio crypto device
+ *
+ * Authors: Gonglei <arei.gonglei@huawei.com>
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+/*
+ * The algs_lock protects the below global virtio_crypto_active_devs
+ * and crypto algorithms registion.
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int virtio_crypto_active_devs;
+
+static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
+{
+ u64 total = 0;
+
+ for (total = 0; sg; sg = sg_next(sg))
+ total += sg->length;
+
+ return total;
+}
+
+static int
+virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+{
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ pr_err("virtio_crypto: Unsupported key length: %d\n",
+ key_len);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_session(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ uint32_t alg, const uint8_t *key,
+ unsigned int keylen,
+ int encrypt)
+{
+ struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
+ unsigned int tmp;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the key
+ */
+ uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
+
+ if (!cipher_key)
+ return -ENOMEM;
+
+ memcpy(cipher_key, key, keylen);
+
+ spin_lock(&vcrypto->ctrl_lock);
+ /* Pad ctrl header */
+ vcrypto->ctrl.header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+ vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+ /* Set the default dataqueue id to 0 */
+ vcrypto->ctrl.header.queue_id = 0;
+
+ vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ /* Pad cipher's parameters */
+ vcrypto->ctrl.u.sym_create_session.op_type =
+ cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
+ vcrypto->ctrl.header.algo;
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
+ cpu_to_le32(keylen);
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
+ cpu_to_le32(op);
+
+ sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Set key */
+ sg_init_one(&key_sg, cipher_key, keylen);
+ sgs[num_out++] = &key_sg;
+
+ /* Return status and session id back */
+ sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+ sgs[num_out + num_in++] = &inhdr;
+
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+ num_in, vcrypto, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ kzfree(cipher_key);
+ return err;
+ }
+ virtqueue_kick(vcrypto->ctrl_vq);
+
+ /*
+ * Trapping into the hypervisor, so the request should be
+ * handled immediately.
+ */
+ while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+ !virtqueue_is_broken(vcrypto->ctrl_vq))
+ cpu_relax();
+
+ if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ pr_err("virtio_crypto: Create session failed status: %u\n",
+ le32_to_cpu(vcrypto->input.status));
+ kzfree(cipher_key);
+ return -EINVAL;
+ }
+
+ if (encrypt)
+ ctx->enc_sess_info.session_id =
+ le64_to_cpu(vcrypto->input.session_id);
+ else
+ ctx->dec_sess_info.session_id =
+ le64_to_cpu(vcrypto->input.session_id);
+
+ spin_unlock(&vcrypto->ctrl_lock);
+
+ kzfree(cipher_key);
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_close_session(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ int encrypt)
+{
+ struct scatterlist outhdr, status_sg, *sgs[2];
+ unsigned int tmp;
+ struct virtio_crypto_destroy_session_req *destroy_session;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+
+ spin_lock(&vcrypto->ctrl_lock);
+ vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+ /* Pad ctrl header */
+ vcrypto->ctrl.header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+ /* Set the default virtqueue id to 0 */
+ vcrypto->ctrl.header.queue_id = 0;
+
+ destroy_session = &vcrypto->ctrl.u.destroy_session;
+
+ if (encrypt)
+ destroy_session->session_id =
+ cpu_to_le64(ctx->enc_sess_info.session_id);
+ else
+ destroy_session->session_id =
+ cpu_to_le64(ctx->dec_sess_info.session_id);
+
+ sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Return status and session id back */
+ sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
+ sizeof(vcrypto->ctrl_status.status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+ num_in, vcrypto, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ return err;
+ }
+ virtqueue_kick(vcrypto->ctrl_vq);
+
+ while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+ !virtqueue_is_broken(vcrypto->ctrl_vq))
+ cpu_relax();
+
+ if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ vcrypto->ctrl_status.status,
+ destroy_session->session_id);
+
+ return -EINVAL;
+ }
+ spin_unlock(&vcrypto->ctrl_lock);
+
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_sessions(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ const uint8_t *key, unsigned int keylen)
+{
+ uint32_t alg;
+ int ret;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+
+ if (keylen > vcrypto->max_cipher_key_len) {
+ pr_err("virtio_crypto: the key is too long\n");
+ goto bad_key;
+ }
+
+ if (virtio_crypto_alg_validate_key(keylen, &alg))
+ goto bad_key;
+
+ /* Create encryption session */
+ ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ alg, key, keylen, 1);
+ if (ret)
+ return ret;
+ /* Create decryption session */
+ ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ alg, key, keylen, 0);
+ if (ret) {
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ return ret;
+ }
+ return 0;
+
+bad_key:
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+/* Note: kernel crypto API realization */
+static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const uint8_t *key,
+ unsigned int keylen)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int ret;
+
+ if (!ctx->vcrypto) {
+ /* New key */
+ int node = virtio_crypto_get_current_node();
+ struct virtio_crypto *vcrypto =
+ virtcrypto_get_dev_node(node);
+ if (!vcrypto) {
+ pr_err("virtio_crypto: Could not find a virtio device in the system");
+ return -ENODEV;
+ }
+
+ ctx->vcrypto = vcrypto;
+ } else {
+ /* Rekeying, we should close the created sessions previously */
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ }
+
+ ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+ if (ret) {
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
+ struct ablkcipher_request *req,
+ struct data_queue *data_vq,
+ __u8 op)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ struct virtio_crypto_op_data_req *req_data;
+ int src_nents, dst_nents;
+ int err;
+ unsigned long flags;
+ struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+ int i;
+ u64 dst_len;
+ unsigned int num_out = 0, num_in = 0;
+ int sg_total;
+ uint8_t *iv;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ dst_nents = sg_nents(req->dst);
+
+ pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+ src_nents, dst_nents);
+
+ /* Why 3? outhdr + iv + inhdr */
+ sg_total = src_nents + dst_nents + 3;
+ sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!sgs)
+ return -ENOMEM;
+
+ req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!req_data) {
+ kfree(sgs);
+ return -ENOMEM;
+ }
+
+ vc_req->req_data = req_data;
+ vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ /* Head of operation */
+ if (op) {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->enc_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
+ } else {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->dec_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
+ }
+ req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ cpu_to_le32(req->nbytes);
+
+ dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
+ if (unlikely(dst_len > U32_MAX)) {
+ pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+ req->nbytes, dst_len);
+
+ if (unlikely(req->nbytes + dst_len + ivsize +
+ sizeof(vc_req->status) > vcrypto->max_size)) {
+ pr_err("virtio_crypto: The length is too big\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ cpu_to_le32((uint32_t)dst_len);
+
+ /* Outhdr */
+ sg_init_one(&outhdr, req_data, sizeof(*req_data));
+ sgs[num_out++] = &outhdr;
+
+ /* IV */
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the IV
+ */
+ iv = kzalloc_node(ivsize, GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!iv) {
+ err = -ENOMEM;
+ goto free;
+ }
+ memcpy(iv, req->info, ivsize);
+ sg_init_one(&iv_sg, iv, ivsize);
+ sgs[num_out++] = &iv_sg;
+ vc_req->iv = iv;
+
+ /* Source data */
+ for (i = 0; i < src_nents; i++)
+ sgs[num_out++] = &req->src[i];
+
+ /* Destination data */
+ for (i = 0; i < dst_nents; i++)
+ sgs[num_out + num_in++] = &req->dst[i];
+
+ /* Status */
+ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ vc_req->sgs = sgs;
+
+ spin_lock_irqsave(&data_vq->lock, flags);
+ err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
+ num_in, vc_req, GFP_ATOMIC);
+ virtqueue_kick(data_vq->vq);
+ spin_unlock_irqrestore(&data_vq->lock, flags);
+ if (unlikely(err < 0))
+ goto free_iv;
+
+ return 0;
+
+free_iv:
+ kzfree(iv);
+free:
+ kzfree(req_data);
+ kfree(sgs);
+ return err;
+}
+
+static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int ret;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ vc_req->ablkcipher_ctx = ctx;
+ vc_req->ablkcipher_req = req;
+ ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1);
+ if (ret < 0) {
+ pr_err("virtio_crypto: Encryption failed!\n");
+ return ret;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int ret;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ vc_req->ablkcipher_ctx = ctx;
+ vc_req->ablkcipher_req = req;
+
+ ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 0);
+ if (ret < 0) {
+ pr_err("virtio_crypto: Decryption failed!\n");
+ return ret;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
+ ctx->tfm = tfm;
+
+ return 0;
+}
+
+static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!ctx->vcrypto)
+ return;
+
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+}
+
+static struct crypto_alg virtio_crypto_algs[] = { {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "virtio_crypto_aes_cbc",
+ .cra_priority = 501,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = virtio_crypto_ablkcipher_init,
+ .cra_exit = virtio_crypto_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = virtio_crypto_ablkcipher_setkey,
+ .decrypt = virtio_crypto_ablkcipher_decrypt,
+ .encrypt = virtio_crypto_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ },
+} };
+
+int virtio_crypto_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++virtio_crypto_active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_algs(virtio_crypto_algs,
+ ARRAY_SIZE(virtio_crypto_algs));
+ if (ret)
+ virtio_crypto_active_devs--;
+
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void virtio_crypto_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--virtio_crypto_active_devs != 0)
+ goto unlock;
+
+ crypto_unregister_algs(virtio_crypto_algs,
+ ARRAY_SIZE(virtio_crypto_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
new file mode 100644
index 000000000000..3d6566b02876
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -0,0 +1,128 @@
+/* Common header for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _VIRTIO_CRYPTO_COMMON_H
+#define _VIRTIO_CRYPTO_COMMON_H
+
+#include <linux/virtio.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+
+
+/* Internal representation of a data virtqueue */
+struct data_queue {
+ /* Virtqueue associated with this send _queue */
+ struct virtqueue *vq;
+
+ /* To protect the vq operations for the dataq */
+ spinlock_t lock;
+
+ /* Name of the tx queue: dataq.$index */
+ char name[32];
+};
+
+struct virtio_crypto {
+ struct virtio_device *vdev;
+ struct virtqueue *ctrl_vq;
+ struct data_queue *data_vq;
+
+ /* To protect the vq operations for the controlq */
+ spinlock_t ctrl_lock;
+
+ /* Maximum of data queues supported by the device */
+ u32 max_data_queues;
+
+ /* Number of queue currently used by the driver */
+ u32 curr_queue;
+
+ /* Maximum length of cipher key */
+ u32 max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ u32 max_auth_key_len;
+ /* Maximum size of per request */
+ u64 max_size;
+
+ /* Control VQ buffers: protected by the ctrl_lock */
+ struct virtio_crypto_op_ctrl_req ctrl;
+ struct virtio_crypto_session_input input;
+ struct virtio_crypto_inhdr ctrl_status;
+
+ unsigned long status;
+ atomic_t ref_count;
+ struct list_head list;
+ struct module *owner;
+ uint8_t dev_id;
+
+ /* Does the affinity hint is set for virtqueues? */
+ bool affinity_hint_set;
+};
+
+struct virtio_crypto_sym_session_info {
+ /* Backend session id, which come from the host side */
+ __u64 session_id;
+};
+
+struct virtio_crypto_ablkcipher_ctx {
+ struct virtio_crypto *vcrypto;
+ struct crypto_tfm *tfm;
+
+ struct virtio_crypto_sym_session_info enc_sess_info;
+ struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_request {
+ /* Cipher or aead */
+ uint32_t type;
+ uint8_t status;
+ struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+ struct ablkcipher_request *ablkcipher_req;
+ struct virtio_crypto_op_data_req *req_data;
+ struct scatterlist **sgs;
+ uint8_t *iv;
+};
+
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
+struct list_head *virtcrypto_devmgr_get_head(void);
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_devmgr_get_first(void);
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_get_dev_node(int node);
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
+
+static inline int virtio_crypto_get_current_node(void)
+{
+ int cpu, node;
+
+ cpu = get_cpu();
+ node = topology_physical_package_id(cpu);
+ put_cpu();
+
+ return node;
+}
+
+int virtio_crypto_algs_register(void);
+void virtio_crypto_algs_unregister(void);
+
+#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
new file mode 100644
index 000000000000..fe70ec823b27
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -0,0 +1,476 @@
+ /* Driver for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/virtio_config.h>
+#include <linux/cpu.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+static void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
+{
+ if (vc_req) {
+ kzfree(vc_req->iv);
+ kzfree(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct virtio_crypto_request *vc_req;
+ unsigned long flags;
+ unsigned int len;
+ struct ablkcipher_request *ablk_req;
+ int error;
+ unsigned int qid = vq->index;
+
+ spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+ ablk_req = vc_req->ablkcipher_req;
+ virtcrypto_clear_request(vc_req);
+
+ spin_unlock_irqrestore(
+ &vcrypto->data_vq[qid].lock, flags);
+ /* Finish the encrypt or decrypt process */
+ ablk_req->base.complete(&ablk_req->base, error);
+ spin_lock_irqsave(
+ &vcrypto->data_vq[qid].lock, flags);
+ }
+ }
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+{
+ vq_callback_t **callbacks;
+ struct virtqueue **vqs;
+ int ret = -ENOMEM;
+ int i, total_vqs;
+ const char **names;
+
+ /*
+ * We expect 1 data virtqueue, followed by
+ * possible N-1 data queues used in multiqueue mode,
+ * followed by control vq.
+ */
+ total_vqs = vi->max_data_queues + 1;
+
+ /* Allocate space for find_vqs parameters */
+ vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_vq;
+ callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
+ if (!callbacks)
+ goto err_callback;
+ names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ goto err_names;
+
+ /* Parameters for control virtqueue */
+ callbacks[total_vqs - 1] = NULL;
+ names[total_vqs - 1] = "controlq";
+
+ /* Allocate/initialize parameters for data virtqueues */
+ for (i = 0; i < vi->max_data_queues; i++) {
+ callbacks[i] = virtcrypto_dataq_callback;
+ snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
+ "dataq.%d", i);
+ names[i] = vi->data_vq[i].name;
+ }
+
+ ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
+ names);
+ if (ret)
+ goto err_find;
+
+ vi->ctrl_vq = vqs[total_vqs - 1];
+
+ for (i = 0; i < vi->max_data_queues; i++) {
+ spin_lock_init(&vi->data_vq[i].lock);
+ vi->data_vq[i].vq = vqs[i];
+ }
+
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
+
+ return 0;
+
+err_find:
+ kfree(names);
+err_names:
+ kfree(callbacks);
+err_callback:
+ kfree(vqs);
+err_vq:
+ return ret;
+}
+
+static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
+{
+ vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
+ GFP_KERNEL);
+ if (!vi->data_vq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
+{
+ int i;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_data_queues; i++)
+ virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+
+ vi->affinity_hint_set = false;
+ }
+}
+
+static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
+{
+ int i = 0;
+ int cpu;
+
+ /*
+ * In single queue mode, we don't set the cpu affinity.
+ */
+ if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
+ virtcrypto_clean_affinity(vcrypto, -1);
+ return;
+ }
+
+ /*
+ * In multiqueue mode, we let the queue to be private to one cpu
+ * by setting the affinity hint to eliminate the contention.
+ *
+ * TODO: adds cpu hotplug support by register cpu notifier.
+ *
+ */
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+ if (++i >= vcrypto->max_data_queues)
+ break;
+ }
+
+ vcrypto->affinity_hint_set = true;
+}
+
+static void virtcrypto_free_queues(struct virtio_crypto *vi)
+{
+ kfree(vi->data_vq);
+}
+
+static int virtcrypto_init_vqs(struct virtio_crypto *vi)
+{
+ int ret;
+
+ /* Allocate send & receive queues */
+ ret = virtcrypto_alloc_queues(vi);
+ if (ret)
+ goto err;
+
+ ret = virtcrypto_find_vqs(vi);
+ if (ret)
+ goto err_free;
+
+ get_online_cpus();
+ virtcrypto_set_affinity(vi);
+ put_online_cpus();
+
+ return 0;
+
+err_free:
+ virtcrypto_free_queues(vi);
+err:
+ return ret;
+}
+
+static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
+{
+ u32 status;
+ int err;
+
+ virtio_cread(vcrypto->vdev,
+ struct virtio_crypto_config, status, &status);
+
+ /*
+ * Unknown status bits would be a host error and the driver
+ * should consider the device to be broken.
+ */
+ if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
+ dev_warn(&vcrypto->vdev->dev,
+ "Unknown status bits: 0x%x\n", status);
+
+ virtio_break_device(vcrypto->vdev);
+ return -EPERM;
+ }
+
+ if (vcrypto->status == status)
+ return 0;
+
+ vcrypto->status = status;
+
+ if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vcrypto->vdev->dev,
+ "Failed to start virtio crypto device.\n");
+
+ return -EPERM;
+ }
+ dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
+ } else {
+ virtcrypto_dev_stop(vcrypto);
+ dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
+ }
+
+ return 0;
+}
+
+static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_device *vdev = vcrypto->vdev;
+
+ virtcrypto_clean_affinity(vcrypto, -1);
+
+ vdev->config->del_vqs(vdev);
+
+ virtcrypto_free_queues(vcrypto);
+}
+
+static int virtcrypto_probe(struct virtio_device *vdev)
+{
+ int err = -EFAULT;
+ struct virtio_crypto *vcrypto;
+ u32 max_data_queues = 0, max_cipher_key_len = 0;
+ u32 max_auth_key_len = 0;
+ u64 max_size = 0;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
+ dev_to_node(&vdev->dev));
+ if (!vcrypto)
+ return -ENOMEM;
+
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_dataqueues, &max_data_queues);
+ if (max_data_queues < 1)
+ max_data_queues = 1;
+
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_cipher_key_len, &max_cipher_key_len);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_auth_key_len, &max_auth_key_len);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_size, &max_size);
+
+ /* Add virtio crypto device to global table */
+ err = virtcrypto_devmgr_add_dev(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
+ goto free;
+ }
+ vcrypto->owner = THIS_MODULE;
+ vcrypto = vdev->priv = vcrypto;
+ vcrypto->vdev = vdev;
+
+ spin_lock_init(&vcrypto->ctrl_lock);
+
+ /* Use single data queue as default */
+ vcrypto->curr_queue = 1;
+ vcrypto->max_data_queues = max_data_queues;
+ vcrypto->max_cipher_key_len = max_cipher_key_len;
+ vcrypto->max_auth_key_len = max_auth_key_len;
+ vcrypto->max_size = max_size;
+
+ dev_info(&vdev->dev,
+ "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
+ vcrypto->max_data_queues,
+ vcrypto->max_cipher_key_len,
+ vcrypto->max_auth_key_len,
+ vcrypto->max_size);
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to initialize vqs.\n");
+ goto free_dev;
+ }
+ virtio_device_ready(vdev);
+
+ err = virtcrypto_update_status(vcrypto);
+ if (err)
+ goto free_vqs;
+
+ return 0;
+
+free_vqs:
+ vcrypto->vdev->config->reset(vdev);
+ virtcrypto_del_vqs(vcrypto);
+free_dev:
+ virtcrypto_devmgr_rm_dev(vcrypto);
+free:
+ kfree(vcrypto);
+ return err;
+}
+
+static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_crypto_request *vc_req;
+ int i;
+ struct virtqueue *vq;
+
+ for (i = 0; i < vcrypto->max_data_queues; i++) {
+ vq = vcrypto->data_vq[i].vq;
+ while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
+ kfree(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+ }
+}
+
+static void virtcrypto_remove(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ virtcrypto_del_vqs(vcrypto);
+ virtcrypto_devmgr_rm_dev(vcrypto);
+ kfree(vcrypto);
+}
+
+static void virtcrypto_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ virtcrypto_update_status(vcrypto);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcrypto_freeze(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+
+ virtcrypto_del_vqs(vcrypto);
+ return 0;
+}
+
+static int virtcrypto_restore(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+ int err;
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#endif
+
+static unsigned int features[] = {
+ /* none */
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_crypto_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtcrypto_probe,
+ .remove = virtcrypto_remove,
+ .config_changed = virtcrypto_config_changed,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtcrypto_freeze,
+ .restore = virtcrypto_restore,
+#endif
+};
+
+module_virtio_driver(virtio_crypto_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("virtio crypto device driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
new file mode 100644
index 000000000000..a69ff71de2c4
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -0,0 +1,264 @@
+ /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+static LIST_HEAD(virtio_crypto_table);
+static uint32_t num_devices;
+
+/* The table_lock protects the above global list and num_devices */
+static DEFINE_MUTEX(table_lock);
+
+#define VIRTIO_CRYPTO_MAX_DEVICES 32
+
+
+/*
+ * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function adds virtio crypto device to the global list.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
+{
+ struct list_head *itr;
+
+ mutex_lock(&table_lock);
+ if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
+ pr_info("virtio_crypto: only support up to %d devices\n",
+ VIRTIO_CRYPTO_MAX_DEVICES);
+ mutex_unlock(&table_lock);
+ return -EFAULT;
+ }
+
+ list_for_each(itr, &virtio_crypto_table) {
+ struct virtio_crypto *ptr =
+ list_entry(itr, struct virtio_crypto, list);
+
+ if (ptr == vcrypto_dev) {
+ mutex_unlock(&table_lock);
+ return -EEXIST;
+ }
+ }
+ atomic_set(&vcrypto_dev->ref_count, 0);
+ list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
+ vcrypto_dev->dev_id = num_devices++;
+ mutex_unlock(&table_lock);
+ return 0;
+}
+
+struct list_head *virtcrypto_devmgr_get_head(void)
+{
+ return &virtio_crypto_table;
+}
+
+/*
+ * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function removes virtio crypto device from the acceleration framework.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
+{
+ mutex_lock(&table_lock);
+ list_del(&vcrypto_dev->list);
+ num_devices--;
+ mutex_unlock(&table_lock);
+}
+
+/*
+ * virtcrypto_devmgr_get_first()
+ *
+ * Function returns the first virtio crypto device from the acceleration
+ * framework.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_devmgr_get_first(void)
+{
+ struct virtio_crypto *dev = NULL;
+
+ mutex_lock(&table_lock);
+ if (!list_empty(&virtio_crypto_table))
+ dev = list_first_entry(&virtio_crypto_table,
+ struct virtio_crypto,
+ list);
+ mutex_unlock(&table_lock);
+ return dev;
+}
+
+/*
+ * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
+{
+ return atomic_read(&vcrypto_dev->ref_count) != 0;
+}
+
+/*
+ * virtcrypto_dev_get() - Increment vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Increment the vcrypto_dev refcount and if this is the first time
+ * incrementing it during this period the vcrypto_dev is in use,
+ * increment the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
+ if (!try_module_get(vcrypto_dev->owner))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Decrement the vcrypto_dev refcount and if this is the last time
+ * decrementing it during this period the vcrypto_dev is in use,
+ * decrement the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
+ module_put(vcrypto_dev->owner);
+}
+
+/*
+ * virtcrypto_dev_started() - Check whether device has started
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
+{
+ return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
+}
+
+/*
+ * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
+ * @node: Node id the driver works.
+ *
+ * Function returns the virtio crypto device used fewest on the node.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_get_dev_node(int node)
+{
+ struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
+ unsigned long best = ~0;
+ unsigned long ctr;
+
+ mutex_lock(&table_lock);
+ list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
+
+ if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
+ dev_to_node(&tmp_dev->vdev->dev) < 0) &&
+ virtcrypto_dev_started(tmp_dev)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ vcrypto_dev = tmp_dev;
+ best = ctr;
+ }
+ }
+ }
+
+ if (!vcrypto_dev) {
+ pr_info("virtio_crypto: Could not find a device on node %d\n",
+ node);
+ /* Get any started device */
+ list_for_each_entry(tmp_dev,
+ virtcrypto_devmgr_get_head(), list) {
+ if (virtcrypto_dev_started(tmp_dev)) {
+ vcrypto_dev = tmp_dev;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&table_lock);
+ if (!vcrypto_dev)
+ return NULL;
+
+ virtcrypto_dev_get(vcrypto_dev);
+ return vcrypto_dev;
+}
+
+/*
+ * virtcrypto_dev_start() - Start virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, EFAULT when fail to register algorithms
+ */
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
+{
+ if (virtio_crypto_algs_register()) {
+ pr_err("virtio_crypto: Failed to register crypto algs\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_stop() - Stop virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
+{
+ virtio_crypto_algs_unregister();
+}
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index de6e241b0866..55f7c392582f 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -10,10 +10,12 @@ endif
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
-$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
- $(call cmd,perl)
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
+ $(call if_changed,perl)
-$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
- $(call cmd,perl)
+$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+ $(call if_changed,perl)
-.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
+clean-files := aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 286447a83dab..ed758b74ddf0 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -75,6 +75,73 @@ struct dax_dev {
struct resource res[0];
};
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%d\n", dax_region->id);
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t region_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%llu\n", (unsigned long long)
+ resource_size(&dax_region->res));
+ device_unlock(dev);
+
+ return rc;
+}
+static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
+ region_size_show, NULL);
+
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%u\n", dax_region->align);
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(align);
+
+static struct attribute *dax_region_attributes[] = {
+ &dev_attr_region_size.attr,
+ &dev_attr_align.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_region_attribute_group = {
+ .name = "dax_region",
+ .attrs = dax_region_attributes,
+};
+
+static const struct attribute_group *dax_region_attribute_groups[] = {
+ &dax_region_attribute_group,
+ NULL,
+};
+
static struct inode *dax_alloc_inode(struct super_block *sb)
{
return kmem_cache_alloc(dax_cache, GFP_KERNEL);
@@ -200,12 +267,31 @@ void dax_region_put(struct dax_region *dax_region)
}
EXPORT_SYMBOL_GPL(dax_region_put);
+static void dax_region_unregister(void *region)
+{
+ struct dax_region *dax_region = region;
+
+ sysfs_remove_groups(&dax_region->dev->kobj,
+ dax_region_attribute_groups);
+ dax_region_put(dax_region);
+}
+
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, unsigned int align, void *addr,
unsigned long pfn_flags)
{
struct dax_region *dax_region;
+ /*
+ * The DAX core assumes that it can store its private data in
+ * parent->driver_data. This WARN is a reminder / safeguard for
+ * developers of device-dax drivers.
+ */
+ if (dev_get_drvdata(parent)) {
+ dev_WARN(parent, "dax core failed to setup private data\n");
+ return NULL;
+ }
+
if (!IS_ALIGNED(res->start, align)
|| !IS_ALIGNED(resource_size(res), align))
return NULL;
@@ -214,6 +300,7 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
if (!dax_region)
return NULL;
+ dev_set_drvdata(parent, dax_region);
memcpy(&dax_region->res, res, sizeof(*res));
dax_region->pfn_flags = pfn_flags;
kref_init(&dax_region->kref);
@@ -222,7 +309,14 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
dax_region->align = align;
dax_region->dev = parent;
dax_region->base = addr;
+ if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
+ kfree(dax_region);
+ return NULL;;
+ }
+ kref_get(&dax_region->kref);
+ if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
+ return NULL;
return dax_region;
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
@@ -328,7 +422,6 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- unsigned long vaddr = (unsigned long) vmf->virtual_address;
struct device *dev = &dax_dev->dev;
struct dax_region *dax_region;
int rc = VM_FAULT_SIGBUS;
@@ -353,7 +446,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- rc = vm_insert_mixed(vma, vaddr, pfn);
+ rc = vm_insert_mixed(vma, vmf->address, pfn);
if (rc == -ENOMEM)
return VM_FAULT_OOM;
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 73c6ce93a0d9..033f49b31fdc 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -89,7 +89,8 @@ static int dax_pmem_probe(struct device *dev)
pfn_sb = nd_pfn->pfn_sb;
if (!devm_request_mem_region(dev, nsio->res.start,
- resource_size(&nsio->res), dev_name(dev))) {
+ resource_size(&nsio->res),
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
return -EBUSY;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index bf3ea7603a58..a324801d6a66 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -850,7 +850,7 @@ err_out:
EXPORT_SYMBOL(devfreq_add_governor);
/**
- * devfreq_remove_device() - Remove devfreq feature from a device.
+ * devfreq_remove_governor() - Remove devfreq feature from a device.
* @governor: the devfreq governor to be removed
*/
int devfreq_remove_governor(struct devfreq_governor *governor)
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 49e712aca0c1..5c3e7b11e8a6 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -190,6 +190,7 @@ static const struct of_device_id exynos_nocp_id_match[] = {
{ .compatible = "samsung,exynos5420-nocp", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, exynos_nocp_id_match);
static struct regmap_config exynos_nocp_regmap_config = {
.reg_bits = 32,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f55cf0eb2a66..107eb91a9415 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
@@ -34,7 +33,6 @@ struct exynos_ppmu {
unsigned int num_events;
struct device *dev;
- struct mutex lock;
struct exynos_ppmu_data ppmu;
};
@@ -90,8 +88,6 @@ struct __exynos_ppmu_events {
PPMU_EVENT(d1-cpu),
PPMU_EVENT(d1-general),
PPMU_EVENT(d1-rt),
-
- { /* sentinel */ },
};
static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
@@ -351,6 +347,7 @@ static const struct of_device_id exynos_ppmu_id_match[] = {
},
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
{
@@ -463,7 +460,6 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- mutex_init(&info->lock);
info->dev = &pdev->dev;
/* Parse dt data to get resource */
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 43fcc5a7f515..22b113363ffc 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -188,6 +188,7 @@ static const struct of_device_id rockchip_dfi_id_match[] = {
{ .compatible = "rockchip,rk3399-dfi" },
{ },
};
+MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
static int rockchip_dfi_probe(struct platform_device *pdev)
{
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 29866f7e6d7e..a8ed7792ece2 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -35,7 +35,7 @@ struct exynos_bus {
unsigned int edev_count;
struct mutex lock;
- struct dev_pm_opp *curr_opp;
+ unsigned long curr_freq;
struct regulator *regulator;
struct clk *clk;
@@ -99,7 +99,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
{
struct exynos_bus *bus = dev_get_drvdata(dev);
struct dev_pm_opp *new_opp;
- unsigned long old_freq, new_freq, old_volt, new_volt, tol;
+ unsigned long old_freq, new_freq, new_volt, tol;
int ret = 0;
/* Get new opp-bus instance according to new bus clock */
@@ -113,8 +113,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
new_freq = dev_pm_opp_get_freq(new_opp);
new_volt = dev_pm_opp_get_voltage(new_opp);
- old_freq = dev_pm_opp_get_freq(bus->curr_opp);
- old_volt = dev_pm_opp_get_voltage(bus->curr_opp);
+ old_freq = bus->curr_freq;
rcu_read_unlock();
if (old_freq == new_freq)
@@ -146,7 +145,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
goto out;
}
}
- bus->curr_opp = new_opp;
+ bus->curr_freq = new_freq;
dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
old_freq/1000, new_freq/1000);
@@ -163,9 +162,7 @@ static int exynos_bus_get_dev_status(struct device *dev,
struct devfreq_event_data edata;
int ret;
- rcu_read_lock();
- stat->current_frequency = dev_pm_opp_get_freq(bus->curr_opp);
- rcu_read_unlock();
+ stat->current_frequency = bus->curr_freq;
ret = exynos_bus_get_event(bus, &edata);
if (ret < 0) {
@@ -226,7 +223,7 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
}
new_freq = dev_pm_opp_get_freq(new_opp);
- old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+ old_freq = bus->curr_freq;
rcu_read_unlock();
if (old_freq == new_freq)
@@ -242,7 +239,7 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
}
*freq = new_freq;
- bus->curr_opp = new_opp;
+ bus->curr_freq = new_freq;
dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
old_freq/1000, new_freq/1000);
@@ -335,6 +332,7 @@ static int exynos_bus_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
+ struct dev_pm_opp *opp;
unsigned long rate;
int ret;
@@ -352,22 +350,23 @@ static int exynos_bus_parse_of(struct device_node *np,
}
/* Get the freq and voltage from OPP table to scale the bus freq */
- rcu_read_lock();
ret = dev_pm_opp_of_add_table(dev);
if (ret < 0) {
dev_err(dev, "failed to get OPP table\n");
- rcu_read_unlock();
goto err_clk;
}
rate = clk_get_rate(bus->clk);
- bus->curr_opp = devfreq_recommended_opp(dev, &rate, 0);
- if (IS_ERR(bus->curr_opp)) {
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, &rate, 0);
+ if (IS_ERR(opp)) {
dev_err(dev, "failed to find dev_pm_opp\n");
rcu_read_unlock();
- ret = PTR_ERR(bus->curr_opp);
+ ret = PTR_ERR(opp);
goto err_opp;
}
+ bus->curr_freq = dev_pm_opp_get_freq(opp);
rcu_read_unlock();
return 0;
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index e24b73d66659..27d2f349b53c 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -80,7 +80,6 @@ struct rk3399_dmcfreq {
struct regulator *vdd_center;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
- struct dev_pm_opp *curr_opp;
};
static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -102,9 +101,6 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
target_rate = dev_pm_opp_get_freq(opp);
target_volt = dev_pm_opp_get_voltage(opp);
- dmcfreq->rate = dev_pm_opp_get_freq(dmcfreq->curr_opp);
- dmcfreq->volt = dev_pm_opp_get_voltage(dmcfreq->curr_opp);
-
rcu_read_unlock();
if (dmcfreq->rate == target_rate)
@@ -165,7 +161,9 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
if (err)
dev_err(dev, "Cannot to set vol %lu uV\n", target_volt);
- dmcfreq->curr_opp = opp;
+ dmcfreq->rate = target_rate;
+ dmcfreq->volt = target_volt;
+
out:
mutex_unlock(&dmcfreq->lock);
return err;
@@ -414,7 +412,6 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
*/
if (dev_pm_opp_of_add_table(dev)) {
dev_err(dev, "Invalid operating-points in device tree.\n");
- rcu_read_unlock();
return -EINVAL;
}
@@ -431,12 +428,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
rcu_read_unlock();
return PTR_ERR(opp);
}
+ data->rate = dev_pm_opp_get_freq(opp);
+ data->volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
- data->curr_opp = opp;
rk3399_devfreq_dmc_profile.initial_freq = data->rate;
- data->devfreq = devfreq_add_device(dev,
+ data->devfreq = devm_devfreq_add_device(dev,
&rk3399_devfreq_dmc_profile,
"simple_ondemand",
&data->ondemand_data);
@@ -454,6 +452,7 @@ static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
{ .compatible = "rockchip,rk3399-dmc" },
{ },
};
+MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 2585821b24ab..ed3b785bae37 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -7,7 +7,7 @@ config SYNC_FILE
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
- userspace. It enables send/receive 'struct fence' objects to/from
+ userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 210a10bfad2b..c33bf8863147 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,3 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cf04d249a6a4..e72e64484131 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -25,7 +25,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
return base + offset;
}
-static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
unsigned long flags;
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
- struct fence *fence_excl;
+ struct dma_fence *fence_excl;
unsigned long events;
unsigned shared_count, seq;
@@ -187,20 +187,20 @@ retry:
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
- if (!fence_get_rcu(fence_excl)) {
+ if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
+ } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
+ dma_buf_poll_cb)) {
events &= ~pevents;
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
}
@@ -222,9 +222,9 @@ retry:
goto out;
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
- if (!fence_get_rcu(fence)) {
+ if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
@@ -235,13 +235,13 @@ retry:
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
- if (!fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- fence_put(fence);
+ if (!dma_fence_add_callback(fence, &dcb->cb,
+ dma_buf_poll_cb)) {
+ dma_fence_put(fence);
events &= ~POLLOUT;
break;
}
- fence_put(fence);
+ dma_fence_put(fence);
}
/* No callback queued, wake up any additional waiters. */
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/dma-fence-array.c
index f1989fcaf354..67eb7c8fb88c 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -1,5 +1,5 @@
/*
- * fence-array: aggregate fences to be waited together
+ * dma-fence-array: aggregate fences to be waited together
*
* Copyright (C) 2016 Collabora Ltd
* Copyright (C) 2016 Advanced Micro Devices, Inc.
@@ -19,35 +19,34 @@
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
-
-static const char *fence_array_get_driver_name(struct fence *fence)
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
- return "fence_array";
+ return "dma_fence_array";
}
-static const char *fence_array_get_timeline_name(struct fence *fence)
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+static void dma_fence_array_cb_func(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
- struct fence_array_cb *array_cb =
- container_of(cb, struct fence_array_cb, cb);
- struct fence_array *array = array_cb->array;
+ struct dma_fence_array_cb *array_cb =
+ container_of(cb, struct dma_fence_array_cb, cb);
+ struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
- fence_signal(&array->base);
- fence_put(&array->base);
+ dma_fence_signal(&array->base);
+ dma_fence_put(&array->base);
}
-static bool fence_array_enable_signaling(struct fence *fence)
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
- struct fence_array_cb *cb = (void *)(&array[1]);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ struct dma_fence_array_cb *cb = (void *)(&array[1]);
unsigned i;
for (i = 0; i < array->num_fences; ++i) {
@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence)
* until we signal the array as complete (but that is now
* insufficient).
*/
- fence_get(&array->base);
- if (fence_add_callback(array->fences[i], &cb[i].cb,
- fence_array_cb_func)) {
- fence_put(&array->base);
+ dma_fence_get(&array->base);
+ if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+ dma_fence_array_cb_func)) {
+ dma_fence_put(&array->base);
if (atomic_dec_and_test(&array->num_pending))
return false;
}
@@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence)
return true;
}
-static bool fence_array_signaled(struct fence *fence)
+static bool dma_fence_array_signaled(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
return atomic_read(&array->num_pending) <= 0;
}
-static void fence_array_release(struct fence *fence)
+static void dma_fence_array_release(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
unsigned i;
for (i = 0; i < array->num_fences; ++i)
- fence_put(array->fences[i]);
+ dma_fence_put(array->fences[i]);
kfree(array->fences);
- fence_free(fence);
+ dma_fence_free(fence);
}
-const struct fence_ops fence_array_ops = {
- .get_driver_name = fence_array_get_driver_name,
- .get_timeline_name = fence_array_get_timeline_name,
- .enable_signaling = fence_array_enable_signaling,
- .signaled = fence_array_signaled,
- .wait = fence_default_wait,
- .release = fence_array_release,
+const struct dma_fence_ops dma_fence_array_ops = {
+ .get_driver_name = dma_fence_array_get_driver_name,
+ .get_timeline_name = dma_fence_array_get_timeline_name,
+ .enable_signaling = dma_fence_array_enable_signaling,
+ .signaled = dma_fence_array_signaled,
+ .wait = dma_fence_default_wait,
+ .release = dma_fence_array_release,
};
-EXPORT_SYMBOL(fence_array_ops);
+EXPORT_SYMBOL(dma_fence_array_ops);
/**
- * fence_array_create - Create a custom fence array
+ * dma_fence_array_create - Create a custom fence array
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
- * Allocate a fence_array object and initialize the base fence with fence_init().
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and fence_put() is used on each fence on release.
+ * array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
*/
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any)
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
size_t size = sizeof(*array);
/* Allocate the callback structures behind the array. */
- size += num_fences * sizeof(struct fence_array_cb);
+ size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
if (!array)
return NULL;
spin_lock_init(&array->lock);
- fence_init(&array->base, &fence_array_ops, &array->lock,
- context, seqno);
+ dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+ context, seqno);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences,
return array;
}
-EXPORT_SYMBOL(fence_array_create);
+EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c
index 4d51f9e83fa8..0212af7997d9 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -21,13 +21,13 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
-EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
-EXPORT_TRACEPOINT_SYMBOL(fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
/*
* fence context counter: each execution context should have its own
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
- * fence_context_alloc - allocate an array of fence contexts
+ * dma_fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-u64 fence_context_alloc(unsigned num)
+u64 dma_fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic64_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
-EXPORT_SYMBOL(fence_context_alloc);
+EXPORT_SYMBOL(dma_fence_context_alloc);
/**
- * fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*
- * Unlike fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal, this function must be called with fence->lock held.
*/
-int fence_signal_locked(struct fence *fence)
+int dma_fence_signal_locked(struct dma_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ struct dma_fence_cb *cur, *tmp;
int ret = 0;
+ lockdep_assert_held(fence->lock);
+
if (WARN_ON(!fence))
return -EINVAL;
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
/*
- * we might have raced with the unlocked fence_signal,
+ * we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
} else
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
}
return ret;
}
-EXPORT_SYMBOL(fence_signal_locked);
+EXPORT_SYMBOL(dma_fence_signal_locked);
/**
- * fence_signal - signal completion of a fence
+ * dma_fence_signal - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*/
-int fence_signal(struct fence *fence)
+int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
- if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
- struct fence_cb *cur, *tmp;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+ struct dma_fence_cb *cur, *tmp;
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence)
}
return 0;
}
-EXPORT_SYMBOL(fence_signal);
+EXPORT_SYMBOL(dma_fence_signal);
/**
- * fence_wait_timeout - sleep until the fence gets signaled
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
@@ -152,78 +154,76 @@ EXPORT_SYMBOL(fence_signal);
* freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
{
signed long ret;
if (WARN_ON(timeout < 0))
return -EINVAL;
- if (timeout == 0)
- return fence_is_signaled(fence);
-
- trace_fence_wait_start(fence);
+ trace_dma_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
- trace_fence_wait_end(fence);
+ trace_dma_fence_wait_end(fence);
return ret;
}
-EXPORT_SYMBOL(fence_wait_timeout);
+EXPORT_SYMBOL(dma_fence_wait_timeout);
-void fence_release(struct kref *kref)
+void dma_fence_release(struct kref *kref)
{
- struct fence *fence =
- container_of(kref, struct fence, refcount);
+ struct dma_fence *fence =
+ container_of(kref, struct dma_fence, refcount);
- trace_fence_destroy(fence);
+ trace_dma_fence_destroy(fence);
BUG_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
else
- fence_free(fence);
+ dma_fence_free(fence);
}
-EXPORT_SYMBOL(fence_release);
+EXPORT_SYMBOL(dma_fence_release);
-void fence_free(struct fence *fence)
+void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
}
-EXPORT_SYMBOL(fence_free);
+EXPORT_SYMBOL(dma_fence_free);
/**
- * fence_enable_sw_signaling - enable signaling on fence
+ * dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: [in] the fence to enable
*
* this will request for sw signaling to be enabled, to make the fence
* complete as soon as possible
*/
-void fence_enable_sw_signaling(struct fence *fence)
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- trace_fence_enable_signal(fence);
+ if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
if (!fence->ops->enable_signaling(fence))
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
}
-EXPORT_SYMBOL(fence_enable_sw_signaling);
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
- * fence_add_callback - add a callback to be called when the fence
+ * dma_fence_add_callback - add a callback to be called when the fence
* is signaled
* @fence: [in] the fence to wait on
* @cb: [in] the callback to register
* @func: [in] the function to call
*
- * cb will be initialized by fence_add_callback, no initialization
+ * cb will be initialized by dma_fence_add_callback, no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
@@ -232,15 +232,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
* *not* call the callback)
*
* Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to fence_wait, however the caller doesn't need to
+ * refcount as it does to dma_fence_wait, however the caller doesn't need to
* keep a refcount to fence afterwards: when software access is enabled,
* the creator of the fence is required to keep the fence alive until
- * after it signals with fence_signal. The callback itself can be called
+ * after it signals with dma_fence_signal. The callback itself can be called
* from irq context.
*
*/
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func)
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ dma_fence_func_t func)
{
unsigned long flags;
int ret = 0;
@@ -249,22 +249,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
if (WARN_ON(!fence || !func))
return -EINVAL;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
INIT_LIST_HEAD(&cb->node);
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
ret = -ENOENT;
}
}
@@ -278,10 +279,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
return ret;
}
-EXPORT_SYMBOL(fence_add_callback);
+EXPORT_SYMBOL(dma_fence_add_callback);
/**
- * fence_remove_callback - remove a callback from the signaling list
+ * dma_fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
*
@@ -296,7 +297,7 @@ EXPORT_SYMBOL(fence_add_callback);
* with a reference held to the fence.
*/
bool
-fence_remove_callback(struct fence *fence, struct fence_cb *cb)
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
{
unsigned long flags;
bool ret;
@@ -311,15 +312,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
return ret;
}
-EXPORT_SYMBOL(fence_remove_callback);
+EXPORT_SYMBOL(dma_fence_remove_callback);
struct default_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct default_wait_cb *wait =
container_of(cb, struct default_wait_cb, base);
@@ -328,25 +329,27 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
}
/**
- * fence_default_wait - default sleep until the fence gets signaled
+ * dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
- * remaining timeout in jiffies on success.
+ * remaining timeout in jiffies on success. If timeout is zero the value one is
+ * returned if the fence is already signaled for consistency with other
+ * functions taking a jiffies timeout.
*/
signed long
-fence_default_wait(struct fence *fence, bool intr, signed long timeout)
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
{
struct default_wait_cb cb;
unsigned long flags;
- signed long ret = timeout;
+ signed long ret = timeout ? timeout : 1;
bool was_set;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return timeout;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return ret;
spin_lock_irqsave(fence->lock, flags);
@@ -355,25 +358,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
goto out;
}
}
- cb.base.func = fence_default_wait_cb;
+ cb.base.func = dma_fence_default_wait_cb;
cb.task = current;
list_add(&cb.base.node, &fence->cb_list);
- while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+ while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
@@ -395,28 +399,34 @@ out:
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
-EXPORT_SYMBOL(fence_default_wait);
+EXPORT_SYMBOL(dma_fence_default_wait);
static bool
-fence_test_signaled_any(struct fence **fences, uint32_t count)
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
+ uint32_t *idx)
{
int i;
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ struct dma_fence *fence = fences[i];
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (idx)
+ *idx = i;
return true;
+ }
}
return false;
}
/**
- * fence_wait_any_timeout - sleep until any fence gets signaled
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
* @fences: [in] array of fences to wait on
* @count: [in] number of fences to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: [out] the first signaled fence index, meaningful only on
+ * positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -427,8 +437,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
* fence might be freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout)
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+ bool intr, signed long timeout, uint32_t *idx)
{
struct default_wait_cb *cb;
signed long ret = timeout;
@@ -439,8 +449,11 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
- if (fence_is_signaled(fences[i]))
+ if (dma_fence_is_signaled(fences[i])) {
+ if (idx)
+ *idx = i;
return 1;
+ }
return 0;
}
@@ -452,17 +465,19 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
}
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
+ struct dma_fence *fence = fences[i];
- if (fence->ops->wait != fence_default_wait) {
+ if (fence->ops->wait != dma_fence_default_wait) {
ret = -EINVAL;
goto fence_rm_cb;
}
cb[i].task = current;
- if (fence_add_callback(fence, &cb[i].base,
- fence_default_wait_cb)) {
+ if (dma_fence_add_callback(fence, &cb[i].base,
+ dma_fence_default_wait_cb)) {
/* This fence is already signaled */
+ if (idx)
+ *idx = i;
goto fence_rm_cb;
}
}
@@ -473,7 +488,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
else
set_current_state(TASK_UNINTERRUPTIBLE);
- if (fence_test_signaled_any(fences, count))
+ if (dma_fence_test_signaled_any(fences, count, idx))
break;
ret = schedule_timeout(ret);
@@ -486,34 +501,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
fence_rm_cb:
while (i-- > 0)
- fence_remove_callback(fences[i], &cb[i].base);
+ dma_fence_remove_callback(fences[i], &cb[i].base);
err_free_cb:
kfree(cb);
return ret;
}
-EXPORT_SYMBOL(fence_wait_any_timeout);
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
- * fence_init - Initialize a custom fence.
+ * dma_fence_init - Initialize a custom fence.
* @fence: [in] the fence to initialize
- * @ops: [in] the fence_ops for operations on this fence
+ * @ops: [in] the dma_fence_ops for operations on this fence
* @lock: [in] the irqsafe spinlock to use for locking this fence
* @context: [in] the execution context this fence is run on
* @seqno: [in] a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
- * refcount again if fence_ops.enable_signaling gets called. This can
+ * refcount again if dma_fence_ops.enable_signaling gets called. This can
* be used for other implementing other types of fence.
*
* context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using fence_later.
+ * to check which fence is later by simply using dma_fence_later.
*/
void
-fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno)
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@@ -527,6 +542,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
fence->seqno = seqno;
fence->flags = 0UL;
- trace_fence_init(fence);
+ trace_dma_fence_init(fence);
}
-EXPORT_SYMBOL(fence_init);
+EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af988e5..393817e849ed 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
static void
reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
u32 i;
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < fobj->shared_count; ++i) {
- struct fence *old_fence;
+ struct dma_fence *old_fence;
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
write_seqcount_end(&obj->seq);
preempt_enable();
- fence_put(old_fence);
+ dma_fence_put(old_fence);
return;
}
}
@@ -143,12 +143,12 @@ static void
reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *old,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
unsigned i;
- struct fence *old_fence = NULL;
+ struct dma_fence *old_fence = NULL;
- fence_get(fence);
+ dma_fence_get(fence);
if (!old) {
RCU_INIT_POINTER(fobj->shared[0], fence);
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
fobj->shared_count = old->shared_count;
for (i = 0; i < old->shared_count; ++i) {
- struct fence *check;
+ struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
@@ -196,7 +196,7 @@ done:
kfree_rcu(old, rcu);
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
/**
@@ -208,7 +208,7 @@ done:
* reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct reservation_object_list *old, *fobj = obj->staged;
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
- struct fence *old_fence = reservation_object_get_excl(obj);
+ struct dma_fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
u32 i = 0;
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
i = old->shared_count;
if (fence)
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
- fence_put(rcu_dereference_protected(old->shared[i],
+ dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
@@ -276,26 +276,32 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* Zero or -errno
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared)
+ struct dma_fence ***pshared)
{
- unsigned shared_count = 0;
- unsigned retry = 1;
- struct fence **shared = NULL, *fence_excl = NULL;
- int ret = 0;
+ struct dma_fence **shared = NULL;
+ struct dma_fence *fence_excl;
+ unsigned int shared_count;
+ int ret = 1;
- while (retry) {
+ do {
struct reservation_object_list *fobj;
unsigned seq;
+ unsigned int i;
- seq = read_seqcount_begin(&obj->seq);
+ shared_count = i = 0;
rcu_read_lock();
+ seq = read_seqcount_begin(&obj->seq);
+
+ fence_excl = rcu_dereference(obj->fence_excl);
+ if (fence_excl && !dma_fence_get_rcu(fence_excl))
+ goto unlock;
fobj = rcu_dereference(obj->fence);
if (fobj) {
- struct fence **nshared;
+ struct dma_fence **nshared;
size_t sz = sizeof(*shared) * fobj->shared_max;
nshared = krealloc(shared, sz,
@@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
ret = -ENOMEM;
- shared_count = 0;
break;
}
shared = nshared;
- memcpy(shared, fobj->shared, sz);
shared_count = fobj->shared_count;
- } else
- shared_count = 0;
- fence_excl = rcu_dereference(obj->fence_excl);
-
- retry = read_seqcount_retry(&obj->seq, seq);
- if (retry)
- goto unlock;
-
- if (!fence_excl || fence_get_rcu(fence_excl)) {
- unsigned i;
for (i = 0; i < shared_count; ++i) {
- if (fence_get_rcu(shared[i]))
- continue;
-
- /* uh oh, refcount failed, abort and retry */
- while (i--)
- fence_put(shared[i]);
-
- if (fence_excl) {
- fence_put(fence_excl);
- fence_excl = NULL;
- }
-
- retry = 1;
- break;
+ shared[i] = rcu_dereference(fobj->shared[i]);
+ if (!dma_fence_get_rcu(shared[i]))
+ break;
}
- } else
- retry = 1;
+ }
+ if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+ while (i--)
+ dma_fence_put(shared[i]);
+ dma_fence_put(fence_excl);
+ goto unlock;
+ }
+
+ ret = 0;
unlock:
rcu_read_unlock();
- }
- *pshared_count = shared_count;
- if (shared_count)
- *pshared = shared;
- else {
- *pshared = NULL;
+ } while (ret);
+
+ if (!shared_count) {
kfree(shared);
+ shared = NULL;
}
+
+ *pshared_count = shared_count;
+ *pshared = shared;
*pfence_excl = fence_excl;
return ret;
@@ -377,12 +368,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned seq, shared_count, i = 0;
- long ret = timeout;
-
- if (!timeout)
- return reservation_object_test_signaled_rcu(obj, wait_all);
+ long ret = timeout ? timeout : 1;
retry:
fence = NULL;
@@ -397,20 +385,18 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *lfence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &lfence->flags))
continue;
- if (!fence_get_rcu(lfence))
+ if (!dma_fence_get_rcu(lfence))
goto unlock_retry;
- if (fence_is_signaled(lfence)) {
- fence_put(lfence);
+ if (dma_fence_is_signaled(lfence)) {
+ dma_fence_put(lfence);
continue;
}
@@ -420,18 +406,16 @@ retry:
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
- if (!fence_get_rcu(fence_excl))
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence_excl->flags)) {
+ if (!dma_fence_get_rcu(fence_excl))
goto unlock_retry;
- if (fence_is_signaled(fence_excl))
- fence_put(fence_excl);
+ if (dma_fence_is_signaled(fence_excl))
+ dma_fence_put(fence_excl);
else
fence = fence_excl;
}
@@ -439,8 +423,13 @@ retry:
rcu_read_unlock();
if (fence) {
- ret = fence_wait_timeout(fence, intr, ret);
- fence_put(fence);
+ if (read_seqcount_retry(&obj->seq, seq)) {
+ dma_fence_put(fence);
+ goto retry;
+ }
+
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
goto retry;
}
@@ -454,18 +443,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
static inline int
-reservation_object_test_signaled_single(struct fence *passed_fence)
+reservation_object_test_signaled_single(struct dma_fence *passed_fence)
{
- struct fence *fence, *lfence = passed_fence;
+ struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- fence = fence_get_rcu(lfence);
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+ fence = dma_fence_get_rcu(lfence);
if (!fence)
return -1;
- ret = !!fence_is_signaled(fence);
- fence_put(fence);
+ ret = !!dma_fence_is_signaled(fence);
+ dma_fence_put(fence);
}
return ret;
}
@@ -484,12 +473,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
unsigned seq, shared_count;
- int ret = true;
+ int ret;
+ rcu_read_lock();
retry:
+ ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
- rcu_read_lock();
if (test_all) {
unsigned i;
@@ -500,46 +490,35 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
else if (!ret)
break;
}
- /*
- * There could be a read_seqcount_retry here, but nothing cares
- * about whether it's the old or newer fence pointers that are
- * signaled. That race could still have happened after checking
- * read_seqcount_retry. If you care, use ww_mutex_lock.
- */
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
ret = reservation_object_test_signaled_single(
fence_excl);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
+
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
}
rcu_read_unlock();
return ret;
-
-unlock_retry:
- rcu_read_unlock();
- goto retry;
}
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
index 71127f8f1626..f47112a64763 100644
--- a/drivers/dma-buf/seqno-fence.c
+++ b/drivers/dma-buf/seqno-fence.c
@@ -21,35 +21,35 @@
#include <linux/export.h>
#include <linux/seqno-fence.h>
-static const char *seqno_fence_get_driver_name(struct fence *fence)
+static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
-static const char *seqno_fence_get_timeline_name(struct fence *fence)
+static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
-static bool seqno_enable_signaling(struct fence *fence)
+static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
-static bool seqno_signaled(struct fence *fence)
+static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
-static void seqno_release(struct fence *fence)
+static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence)
if (f->ops->release)
f->ops->release(fence);
else
- fence_free(&f->base);
+ dma_fence_free(&f->base);
}
-static signed long seqno_wait(struct fence *fence, bool intr,
- signed long timeout)
+static signed long seqno_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
return f->ops->wait(fence, intr, timeout);
}
-const struct fence_ops seqno_fence_ops = {
+const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 62e8e6dc7953..69c5ff36e2f9 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-static const struct fence_ops timeline_fence_ops;
+static const struct dma_fence_ops timeline_fence_ops;
-static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
{
if (fence->ops != &timeline_fence_ops)
return NULL;
@@ -84,7 +84,7 @@ static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
* Creates a new sync_timeline. Returns the sync_timeline object or NULL in
* case of error.
*/
-struct sync_timeline *sync_timeline_create(const char *name)
+static struct sync_timeline *sync_timeline_create(const char *name)
{
struct sync_timeline *obj;
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
return NULL;
kref_init(&obj->kref);
- obj->context = fence_context_alloc(1);
+ obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
INIT_LIST_HEAD(&obj->child_list_head);
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
- if (fence_is_signaled_locked(&pt->base))
+ if (dma_fence_is_signaled_locked(&pt->base))
list_del_init(&pt->active_list);
}
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
spin_lock_irqsave(&obj->child_list_lock, flags);
sync_timeline_get(obj);
- fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
- obj->context, value);
+ dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+ obj->context, value);
list_add_tail(&pt->child_list, &obj->child_list_head);
INIT_LIST_HEAD(&pt->active_list);
spin_unlock_irqrestore(&obj->child_list_lock, flags);
return pt;
}
-static const char *timeline_fence_get_driver_name(struct fence *fence)
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
}
-static const char *timeline_fence_get_timeline_name(struct fence *fence)
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return parent->name;
}
-static void timeline_fence_release(struct fence *fence)
+static void timeline_fence_release(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
- fence_free(fence);
+ dma_fence_free(fence);
}
-static bool timeline_fence_signaled(struct fence *fence)
+static bool timeline_fence_signaled(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return (fence->seqno > parent->value) ? false : true;
}
-static bool timeline_fence_enable_signaling(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
if (timeline_fence_signaled(fence))
return false;
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
return true;
}
-static void timeline_fence_value_str(struct fence *fence,
+static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
snprintf(str, size, "%d", fence->seqno);
}
-static void timeline_fence_timeline_value_str(struct fence *fence,
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
char *str, int size)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
snprintf(str, size, "%d", parent->value);
}
-static const struct fence_ops timeline_fence_ops = {
+static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
@@ -316,8 +316,8 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
}
sync_file = sync_file_create(&pt->base);
+ dma_fence_put(&pt->base);
if (!sync_file) {
- fence_put(&pt->base);
err = -ENOMEM;
goto err;
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 2dd4c3db6caa..48b20e34fb6d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
return "error";
}
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+ struct dma_fence *fence, bool show)
{
int status = 1;
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
- if (fence_is_signaled_locked(fence))
+ if (dma_fence_is_signaled_locked(fence))
status = fence->status;
seq_printf(s, " %s%sfence %s",
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(!fence_is_signaled(sync_file->fence)));
+ sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
for (i = 0; i < array->num_fences; ++i)
sync_print_fence(s, array->fences[i], true);
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d269aa6783aa..26fe8b9907b3 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -15,7 +15,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/sync_file.h>
#include <uapi/linux/sync_file.h>
@@ -45,10 +45,9 @@ struct sync_timeline {
struct list_head sync_timeline_list;
};
-static inline struct sync_timeline *fence_parent(struct fence *fence)
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
- return container_of(fence->lock, struct sync_timeline,
- child_list_lock);
+ return container_of(fence->lock, struct sync_timeline, child_list_lock);
}
/**
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
* @active_list: sync timeline active child's list
*/
struct sync_pt {
- struct fence base;
+ struct dma_fence base;
struct list_head child_list;
struct list_head active_list;
};
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index b29a9e817320..6d802f2d2881 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -54,7 +54,7 @@ err:
return NULL;
}
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct sync_file *sync_file;
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
* takes ownership of @fence. The sync_file can be released with
* fput(sync_file->file). Returns the sync_file or NULL in case of error.
*/
-struct sync_file *sync_file_create(struct fence *fence)
+struct sync_file *sync_file_create(struct dma_fence *fence)
{
struct sync_file *sync_file;
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
if (!sync_file)
return NULL;
- sync_file->fence = fence;
+ sync_file->fence = dma_fence_get(fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
@@ -121,16 +121,16 @@ err:
* Ensures @fd references a valid sync_file and returns a fence that
* represents all fence in the sync_file. On error NULL is returned.
*/
-struct fence *sync_file_get_fence(int fd)
+struct dma_fence *sync_file_get_fence(int fd)
{
struct sync_file *sync_file;
- struct fence *fence;
+ struct dma_fence *fence;
sync_file = sync_file_fdget(fd);
if (!sync_file)
return NULL;
- fence = fence_get(sync_file->fence);
+ fence = dma_fence_get(sync_file->fence);
fput(sync_file->file);
return fence;
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
EXPORT_SYMBOL(sync_file_get_fence);
static int sync_file_set_fence(struct sync_file *sync_file,
- struct fence **fences, int num_fences)
+ struct dma_fence **fences, int num_fences)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
/*
* The reference for the fences in the new sync_file and held
* in add_fence() during the merge procedure, so for num_fences == 1
* we already own a new reference to the fence. For num_fence > 1
- * we own the reference of the fence_array creation.
+ * we own the reference of the dma_fence_array creation.
*/
if (num_fences == 1) {
sync_file->fence = fences[0];
kfree(fences);
} else {
- array = fence_array_create(num_fences, fences,
- fence_context_alloc(1), 1, false);
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
if (!array)
return -ENOMEM;
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
-static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+static struct dma_fence **get_fences(struct sync_file *sync_file,
+ int *num_fences)
{
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
*num_fences = array->num_fences;
return array->fences;
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
return &sync_file->fence;
}
-static void add_fence(struct fence **fences, int *i, struct fence *fence)
+static void add_fence(struct dma_fence **fences,
+ int *i, struct dma_fence *fence)
{
fences[*i] = fence;
- if (!fence_is_signaled(fence)) {
- fence_get(fence);
+ if (!dma_fence_is_signaled(fence)) {
+ dma_fence_get(fence);
(*i)++;
}
}
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
- struct fence **fences, **nfences, **a_fences, **b_fences;
+ struct dma_fence **fences, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
- struct fence *pt_a = a_fences[i_a];
- struct fence *pt_b = b_fences[i_b];
+ struct dma_fence *pt_a = a_fences[i_a];
+ struct dma_fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
add_fence(fences, &i, pt_a);
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
add_fence(fences, &i, b_fences[i_b]);
if (i == 0)
- fences[i++] = fence_get(a_fences[0]);
+ fences[i++] = dma_fence_get(a_fences[0]);
if (num_fences > i) {
nfences = krealloc(fences, i * sizeof(*fences),
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
kref);
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
- fence_remove_callback(sync_file->fence, &sync_file->cb);
- fence_put(sync_file->fence);
+ dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+ dma_fence_put(sync_file->fence);
kfree(sync_file);
}
@@ -305,14 +308,13 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
- if (!poll_does_not_wait(wait) &&
- !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
- if (fence_add_callback(sync_file->fence, &sync_file->cb,
- fence_check_cb_func) < 0)
+ if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
}
- return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+ return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -370,14 +372,14 @@ err_put_fd:
return err;
}
-static void sync_fill_fence_info(struct fence *fence,
+static void sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
info->status = fence->status >= 0 ? 1 : fence->status;
else
info->status = 0;
@@ -389,7 +391,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
- struct fence **fences;
+ struct dma_fence **fences;
__u32 size;
int num_fences, ret, i;
@@ -429,7 +431,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
- info.status = fence_is_signaled(sync_file->fence);
+ info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 141aefbe37ec..263495d0adbd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -436,6 +436,20 @@ config STE_DMA40
help
Support for ST-Ericsson DMA40 controller
+config ST_FDMA
+ tristate "ST FDMA dmaengine support"
+ depends on ARCH_STI
+ depends on REMOTEPROC
+ select ST_SLIM_REMOTEPROC
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for ST FDMA controller.
+ It supports 16 independent DMA channels, accepts up to 32 DMA requests
+
+ Say Y here if you have such a chipset.
+ If unsure, say N.
+
config STM32_DMA
bool "STMicroelectronics STM32 DMA support"
depends on ARCH_STM32 || COMPILE_TEST
@@ -480,7 +494,7 @@ config TEGRA20_APB_DMA
or vice versa. It does not support memory to memory data transfer.
config TEGRA210_ADMA
- bool "NVIDIA Tegra210 ADMA support"
+ tristate "NVIDIA Tegra210 ADMA support"
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index e4dc9cac7ee8..a4fa3360e609 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-y += qcom/
obj-y += xilinx/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 939a7c31f760..0b7c6ce629a6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1793,6 +1793,13 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL_GPL(pl08x_filter_id);
+static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ return plchan->cd == chan_id;
+}
+
/*
* Just check that the device is there and active
* TODO: turn this bit on/off depending on the number of physical channels
@@ -2307,6 +2314,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ret = -EINVAL;
goto out_no_platdata;
}
+ } else {
+ pl08x->slave.filter.map = pl08x->pd->slave_map;
+ pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len;
+ pl08x->slave.filter.fn = pl08x_filter_fn;
}
/* By default, AHB1 only. If dualmaster, from platform */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index a4c8f80db29d..1baf3404a365 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -111,9 +111,8 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
struct at_dma *atdma = to_at_dma(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(struct at_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
/* txd.flags will be overwritten in prep functions */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b7d7f2d443a1..7d4e0bcda9af 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -221,7 +221,6 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
- u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
struct at_xdmac_chan chan[0];
};
@@ -444,9 +443,8 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc->descs_list);
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
@@ -1896,7 +1894,6 @@ static int atmel_xdmac_resume(struct device *dev)
}
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
atchan = to_at_xdmac_chan(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index cf76fc6149e5..451f899f74e4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -164,7 +164,9 @@ struct dmatest_thread {
struct task_struct *task;
struct dma_chan *chan;
u8 **srcs;
+ u8 **usrcs;
u8 **dsts;
+ u8 **udsts;
enum dma_transaction_type type;
bool done;
};
@@ -431,6 +433,7 @@ static int dmatest_func(void *data)
ktime_t comparetime = ktime_set(0, 0);
s64 runtime = 0;
unsigned long long total_len = 0;
+ u8 align = 0;
set_freezable();
@@ -441,20 +444,24 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
- if (thread->type == DMA_MEMCPY)
+ if (thread->type == DMA_MEMCPY) {
+ align = dev->copy_align;
src_cnt = dst_cnt = 1;
- else if (thread->type == DMA_SG)
+ } else if (thread->type == DMA_SG) {
+ align = dev->copy_align;
src_cnt = dst_cnt = sg_buffers;
- else if (thread->type == DMA_XOR) {
+ } else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
dst_cnt = 1;
+ align = dev->xor_align;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
+ align = dev->pq_align;
- pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+ pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
if (!pq_coefs)
goto err_thread_type;
@@ -463,23 +470,47 @@ static int dmatest_func(void *data)
} else
goto err_thread_type;
- thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
+
+ thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->usrcs)
+ goto err_usrcs;
+
for (i = 0; i < src_cnt; i++) {
- thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->srcs[i])
+ thread->usrcs[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->usrcs[i])
goto err_srcbuf;
+
+ /* align srcs to alignment restriction */
+ if (align)
+ thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
+ else
+ thread->srcs[i] = thread->usrcs[i];
}
thread->srcs[i] = NULL;
- thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->dsts)
goto err_dsts;
+
+ thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->udsts)
+ goto err_udsts;
+
for (i = 0; i < dst_cnt; i++) {
- thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->dsts[i])
+ thread->udsts[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->udsts[i])
goto err_dstbuf;
+
+ /* align dsts to alignment restriction */
+ if (align)
+ thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
+ else
+ thread->dsts[i] = thread->udsts[i];
}
thread->dsts[i] = NULL;
@@ -498,20 +529,11 @@ static int dmatest_func(void *data)
dma_addr_t srcs[src_cnt];
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
- u8 align = 0;
struct scatterlist tx_sg[src_cnt];
struct scatterlist rx_sg[src_cnt];
total_tests++;
- /* honor alignment restrictions */
- if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
- align = dev->copy_align;
- else if (thread->type == DMA_XOR)
- align = dev->xor_align;
- else if (thread->type == DMA_PQ)
- align = dev->pq_align;
-
if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
params->buf_size, 1 << align);
@@ -549,7 +571,7 @@ static int dmatest_func(void *data)
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
+ um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
@@ -729,13 +751,17 @@ static int dmatest_func(void *data)
ret = 0;
err_dstbuf:
- for (i = 0; thread->dsts[i]; i++)
- kfree(thread->dsts[i]);
+ for (i = 0; thread->udsts[i]; i++)
+ kfree(thread->udsts[i]);
+ kfree(thread->udsts);
+err_udsts:
kfree(thread->dsts);
err_dsts:
err_srcbuf:
- for (i = 0; thread->srcs[i]; i++)
- kfree(thread->srcs[i]);
+ for (i = 0; thread->usrcs[i]; i++)
+ kfree(thread->usrcs[i]);
+ kfree(thread->usrcs);
+err_usrcs:
kfree(thread->srcs);
err_srcs:
kfree(pq_coefs);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index c2c0a613cb7a..e5adf5d1c34f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1569,7 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
} else {
dwc->block_size = pdata->block_size;
- dwc->nollp = pdata->is_nollp;
+ dwc->nollp = !pdata->multi_block[i];
}
}
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 5bda0eb9f393..b1655e40cfa2 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -102,7 +102,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
u32 nr_masters;
u32 nr_channels;
@@ -118,6 +118,8 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_u32(np, "dma-channels", &nr_channels))
return NULL;
+ if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -129,6 +131,12 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_bool(np, "is_private"))
pdata->is_private = true;
+ /*
+ * All known devices, which use DT for configuration, support
+ * memory-to-memory transfers. So enable it by default.
+ */
+ pdata->is_memcpy = true;
+
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
@@ -146,6 +154,14 @@ dw_dma_parse_dt(struct platform_device *pdev)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
+ if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = mb[tmp];
+ } else {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = 1;
+ }
+
return pdata;
}
#else
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index f65dd104479f..4e0128c62704 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -12,7 +12,8 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
-#define DW_DMA_MAX_NR_CHANNELS 8
+#include "internal.h"
+
#define DW_DMA_MAX_NR_REQUESTS 16
/* flow controller */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 77242b37ef87..3879f80a4815 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2451,6 +2451,9 @@ static int edma_pm_resume(struct device *dev)
int i;
s8 (*queue_priority_mapping)[2];
+ /* re initialize dummy slot to dummy param set */
+ edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
+
queue_priority_mapping = ecc->info->queue_priority_mapping;
/* Event queue priority mapping */
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index db2f9e1653a2..90d29f90acfb 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -881,6 +881,7 @@ static struct of_device_id fsl_re_ids[] = {
{ .compatible = "fsl,raideng-v1.0", },
{}
};
+MODULE_DEVICE_TABLE(of, fsl_re_ids);
static struct platform_driver fsl_re_driver = {
.driver = {
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index b51639f045ed..4875fa428e81 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -77,13 +77,15 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!chip)
return -ENOMEM;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
chip->dev = &pdev->dev;
chip->regs = pcim_iomap_table(pdev)[0];
chip->length = pci_resource_len(pdev, 0);
chip->offset = HSU_PCI_CHAN_OFFSET;
- chip->irq = pdev->irq;
-
- pci_enable_msi(pdev);
+ chip->irq = pci_irq_vector(pdev, 0);
ret = hsu_dma_probe(chip);
if (ret)
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 624f1e1e9c55..54db1411ce73 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -292,7 +292,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!len)
return NULL;
@@ -324,7 +324,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
xfer_size);
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
src += xfer_size;
@@ -375,7 +374,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!buf_len && !period_len)
return NULL;
@@ -430,7 +429,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
buf_addr += xfer_size;
@@ -458,7 +456,7 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
struct mdc_tx_desc *mdesc;
struct scatterlist *sg;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
unsigned int i;
if (!sgl)
@@ -509,7 +507,6 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
mdesc->list_xfer_size += xfer_size;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b9629b2bfc05..d1651a50c349 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -298,6 +298,7 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
+ * @buf_ptail ID of the previous buffer that was processed
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -309,6 +310,7 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
+ unsigned int buf_ptail;
unsigned int num_bd;
unsigned int period_len;
struct sdma_buffer_descriptor *bd;
@@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
sdmac->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
bd->mode.count = sdmac->period_len;
+ sdmac->buf_ptail = sdmac->buf_tail;
+ sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
/*
* The callback is called from the interrupt context in order
@@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
- sdmac->buf_tail++;
- sdmac->buf_tail %= sdmac->num_bd;
-
if (error)
sdmac->status = old_status;
}
@@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
sdmac->flags = 0;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
@@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
sdmac->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
@@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
u32 residue;
if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_tail) *
+ residue = (sdmac->num_bd - sdmac->buf_ptail) *
sdmac->period_len - sdmac->chn_real_count;
else
residue = sdmac->chn_count - sdmac->chn_real_count;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 49386ce04bf5..a371b07a0981 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -39,6 +39,7 @@
#include "../dmaengine.h"
static char *chanerr_str[] = {
+ "DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",
"Next Descriptor Address Error",
"Descriptor Error",
@@ -66,7 +67,6 @@ static char *chanerr_str[] = {
"Result Guard Tag verification Error",
"Result Application Tag verification Error",
"Result Reference Tag verification Error",
- NULL
};
static void ioat_eh(struct ioatdma_chan *ioat_chan);
@@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
{
int i;
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
if ((chanerr >> i) & 1) {
- if (chanerr_str[i]) {
- dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
- i, chanerr_str[i]);
- } else
- break;
+ dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+ i, chanerr_str[i]);
}
}
}
@@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
- struct ioatdma_device *ioat_dma;
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
int chunk;
dma_addr_t phys;
u8 *pos;
off_t offs;
- ioat_dma = to_ioatdma_device(chan->device);
-
chunk = idx / IOAT_DESCS_PER_2M;
idx &= (IOAT_DESCS_PER_2M - 1);
offs = idx * IOAT_DESC_SZ;
@@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
- struct dmaengine_result res;
-
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
- res.result = DMA_TRANS_NOERROR;
dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback = NULL;
tx->callback_result = NULL;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 015f7110b96d..90eddd9f07e4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_src)) {
dev_err(dev, "mapping src buffer failed\n");
+ err = -ENOMEM;
goto free_resources;
}
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_dest)) {
dev_err(dev, "mapping dest buffer failed\n");
+ err = -ENOMEM;
goto unmap_src;
}
flags = DMA_PREP_INTERRUPT;
@@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR;
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dest_dma))
+ if (dma_mapping_error(dev, dest_dma)) {
+ err = -ENOMEM;
goto free_resources;
+ }
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
@@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
struct dma_device *dma;
struct dma_chan *c;
struct ioatdma_chan *ioat_chan;
- bool is_raid_device = false;
int err;
u16 val16;
@@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
if (ioat_dma->cap & IOAT_CAP_XOR) {
- is_raid_device = true;
dma->max_xor = 8;
dma_cap_set(DMA_XOR, dma->cap_mask);
@@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
}
if (ioat_dma->cap & IOAT_CAP_PQ) {
- is_raid_device = true;
dma->device_prep_dma_pq = ioat_prep_pq;
dma->device_prep_dma_pq_val = ioat_prep_pq_val;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 48fa4cf9f64a..2f3bbc88ff2a 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -106,8 +106,6 @@
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
-/* IOAT1 define left for i7300_idle driver to not fail compiling */
-#define IOAT1_CHANSTS_OFFSET 0x04
#define IOAT_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index aabcb7934b05..01e25c68dd5a 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -458,13 +458,12 @@ static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 818255844a3c..5ba5714d0b7c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -554,9 +554,7 @@ static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
int ret;
for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- unsigned long data;
ch = &mic_dma_dev->mic_ch[i];
- data = (unsigned long)ch;
ch->ch_num = i;
ch->owner = owner;
spin_lock_init(&ch->cleanup_lock);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23f75285a4d9..0cb951b743a6 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
hw_desc->byte_count = byte_count;
}
+/* Populate the descriptor */
+static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
+ dma_addr_t dma_src, dma_addr_t dma_dst,
+ u32 len, struct mv_xor_desc_slot *prev)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ hw_desc->status = XOR_DESC_DMA_OWNED;
+ hw_desc->phy_next_desc = 0;
+ /* Configure for XOR with only one src address -> MEMCPY */
+ hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
+ hw_desc->phy_dest_addr = dma_dst;
+ hw_desc->phy_src_addr[0] = dma_src;
+ hw_desc->byte_count = len;
+
+ if (prev) {
+ struct mv_xor_desc *hw_prev = prev->hw_desc;
+
+ hw_prev->phy_next_desc = desc->async_tx.phys;
+ }
+}
+
+static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ /* Enable end-of-descriptor interrupt */
+ hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
+}
+
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
node) {
- if (async_tx_test_ack(&iter->async_tx))
+ if (async_tx_test_ack(&iter->async_tx)) {
list_move_tail(&iter->node, &mv_chan->free_slots);
+ if (!list_empty(&iter->sg_tx_list)) {
+ list_splice_tail_init(&iter->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
}
return 0;
}
@@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
/* the client is allowed to attach dependent operations
* until 'ack' is set
*/
- if (!async_tx_test_ack(&desc->async_tx))
+ if (!async_tx_test_ack(&desc->async_tx)) {
/* move this slot to the completed_slots */
list_move_tail(&desc->node, &mv_chan->completed_slots);
- else
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->completed_slots);
+ }
+ } else {
list_move_tail(&desc->node, &mv_chan->free_slots);
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
return 0;
}
@@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->node);
+ INIT_LIST_HEAD(&slot->sg_tx_list);
dma_desc = mv_chan->dma_desc_pool;
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
@@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
+/**
+ * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
+ * @chan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *new;
+ struct mv_xor_desc_slot *first = NULL;
+ struct mv_xor_desc_slot *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ int desc_cnt = 0;
+ int ret;
+
+ dev_dbg(mv_chan_to_devp(mv_chan),
+ "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
+ __func__, dst_sg_len, src_sg_len, flags);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ desc_cnt++;
+ new = mv_chan_alloc_slot(mv_chan);
+ if (!new) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Out of descriptors (desc_cnt=%d)!\n",
+ desc_cnt);
+ goto err;
+ }
+
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
+ if (len == 0)
+ goto fetch;
+
+ if (len < MV_XOR_MIN_BYTE_COUNT) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Transfer size of %zu too small!\n", len);
+ goto err;
+ }
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ /* Check if a new window needs to get added for 'dst' */
+ ret = mv_xor_add_io_win(mv_chan, dma_dst);
+ if (ret)
+ goto err;
+
+ /* Check if a new window needs to get added for 'src' */
+ ret = mv_xor_add_io_win(mv_chan, dma_src);
+ if (ret)
+ goto err;
+
+ /* Populate the descriptor */
+ mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
+ prev = new;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_move_tail(&new->node, &first->sg_tx_list);
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ /* Set the EOD flag in the last descriptor */
+ mv_xor_desc_config_eod(new);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+
+err:
+ /* Cleanup: Move all descriptors back into the free list */
+ spin_lock_bh(&mv_chan->lock);
+ mv_desc_clean_slot(first, mv_chan);
+ spin_unlock_bh(&mv_chan->lock);
+
+ return NULL;
+}
+
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
+ dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
@@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_cap_zero(cap_mask);
dma_cap_set(DMA_MEMCPY, cap_mask);
+ dma_cap_set(DMA_SG, cap_mask);
dma_cap_set(DMA_XOR, cap_mask);
dma_cap_set(DMA_INTERRUPT, cap_mask);
@@ -1455,12 +1630,7 @@ static struct platform_driver mv_xor_driver = {
},
};
-
-static int __init mv_xor_init(void)
-{
- return platform_driver_register(&mv_xor_driver);
-}
-device_initcall(mv_xor_init);
+builtin_platform_driver(mv_xor_driver);
/*
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 88eeab222a23..cf921dd6af73 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -148,6 +148,7 @@ struct mv_xor_chan {
*/
struct mv_xor_desc_slot {
struct list_head node;
+ struct list_head sg_tx_list;
enum dma_transaction_type type;
void *hw_desc;
u16 idx;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 09de71519d37..3f45b9bdf201 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -225,6 +225,8 @@ struct nbpf_channel {
struct nbpf_device {
struct dma_device dma_dev;
void __iomem *base;
+ u32 max_burst_mem_read;
+ u32 max_burst_mem_write;
struct clk *clk;
const struct nbpf_config *config;
unsigned int eirq;
@@ -425,10 +427,33 @@ static void nbpf_chan_configure(struct nbpf_channel *chan)
nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
}
-static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
+static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
+ enum dma_transfer_direction direction)
{
+ int max_burst = nbpf->config->buffer_size * 8;
+
+ if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
+ switch (direction) {
+ case DMA_MEM_TO_MEM:
+ max_burst = min_not_zero(nbpf->max_burst_mem_read,
+ nbpf->max_burst_mem_write);
+ break;
+ case DMA_MEM_TO_DEV:
+ if (nbpf->max_burst_mem_read)
+ max_burst = nbpf->max_burst_mem_read;
+ break;
+ case DMA_DEV_TO_MEM:
+ if (nbpf->max_burst_mem_write)
+ max_burst = nbpf->max_burst_mem_write;
+ break;
+ case DMA_DEV_TO_DEV:
+ default:
+ break;
+ }
+ }
+
/* Maximum supported bursts depend on the buffer size */
- return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
+ return min_t(int, __ffs(size), ilog2(max_burst));
}
static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
@@ -458,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
size = burst;
}
- return nbpf_xfer_ds(nbpf, size);
+ return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
}
/*
@@ -507,7 +532,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
* transfers we enable the SBE bit and terminate the transfer in our
* .device_pause handler.
*/
- mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
+ mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
switch (direction) {
case DMA_DEV_TO_MEM:
@@ -1313,6 +1338,11 @@ static int nbpf_probe(struct platform_device *pdev)
if (IS_ERR(nbpf->clk))
return PTR_ERR(nbpf->clk);
+ of_property_read_u32(np, "max-burst-mem-read",
+ &nbpf->max_burst_mem_read);
+ of_property_read_u32(np, "max-burst-mem-write",
+ &nbpf->max_burst_mem_write);
+
nbpf->config = cfg;
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7ca27d4b1c54..ac68666cd3f4 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -166,6 +166,9 @@ enum {
CSDP_DST_BURST_16 = 1 << 14,
CSDP_DST_BURST_32 = 2 << 14,
CSDP_DST_BURST_64 = 3 << 14,
+ CSDP_WRITE_NON_POSTED = 0 << 16,
+ CSDP_WRITE_POSTED = 1 << 16,
+ CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
CICR_TOUT_IE = BIT(0), /* OMAP1 only */
CICR_DROP_IE = BIT(1),
@@ -422,7 +425,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
c->running = true;
}
-static void omap_dma_stop(struct omap_chan *c)
+static void omap_dma_drain_chan(struct omap_chan *c)
+{
+ int i;
+ u32 val;
+
+ /* Wait for sDMA FIFO to drain */
+ for (i = 0; ; i++) {
+ val = omap_dma_chan_read(c, CCR);
+ if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+ break;
+
+ if (i > 100)
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+ dev_err(c->vc.chan.device->dev,
+ "DMA drain did not complete on lch %d\n",
+ c->dma_ch);
+}
+
+static int omap_dma_stop(struct omap_chan *c)
{
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
uint32_t val;
@@ -435,7 +461,6 @@ static void omap_dma_stop(struct omap_chan *c)
val = omap_dma_chan_read(c, CCR);
if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
uint32_t sysconfig;
- unsigned i;
sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
@@ -446,27 +471,19 @@ static void omap_dma_stop(struct omap_chan *c)
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
- /* Wait for sDMA FIFO to drain */
- for (i = 0; ; i++) {
- val = omap_dma_chan_read(c, CCR);
- if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
- break;
-
- if (i > 100)
- break;
-
- udelay(5);
- }
-
- if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
- dev_err(c->vc.chan.device->dev,
- "DMA drain did not complete on lch %d\n",
- c->dma_ch);
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
} else {
+ if (!(val & CCR_ENABLE))
+ return -EINVAL;
+
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
+
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
}
mb();
@@ -481,8 +498,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
-
c->running = false;
+ return 0;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
@@ -836,6 +853,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
} else {
txstate->residue = 0;
}
+ if (ret == DMA_IN_PROGRESS && c->paused)
+ ret = DMA_PAUSED;
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
@@ -865,15 +884,18 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
unsigned i, es, en, frame_bytes;
bool ll_failed = false;
u32 burst;
+ u32 port_window, port_window_bytes;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
+ port_window = c->cfg.src_port_window_size;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
+ port_window = c->cfg.dst_port_window_size;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -894,6 +916,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
return NULL;
}
+ /* When the port_window is used, one frame must cover the window */
+ if (port_window) {
+ burst = port_window;
+ port_window_bytes = port_window * es_bytes[es];
+ }
+
/* Now allocate and setup the descriptor. */
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
if (!d)
@@ -905,11 +933,45 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr = c->ccr | CCR_SYNC_FRAME;
if (dir == DMA_DEV_TO_MEM) {
- d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+ d->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ d->fi = -(port_window_bytes - 1);
+
+ if (port_window_bytes >= 64)
+ d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+ else if (port_window_bytes >= 32)
+ d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+ else if (port_window_bytes >= 16)
+ d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+ } else {
+ d->ccr |= CCR_SRC_AMODE_CONSTANT;
+ }
} else {
- d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+
+ if (port_window_bytes >= 64)
+ d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+ else if (port_window_bytes >= 32)
+ d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+ else if (port_window_bytes >= 16)
+ d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+ } else {
+ d->ccr |= CCR_DST_AMODE_CONSTANT;
+ }
}
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
@@ -927,6 +989,9 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_TRIGGER_SRC;
d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+ if (port_window)
+ d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
}
if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
d->clnk_ctrl = c->dma_ch;
@@ -952,6 +1017,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
osg->addr = sg_dma_address(sgent);
osg->en = en;
osg->fn = sg_dma_len(sgent) / frame_bytes;
+ if (port_window && dir == DMA_MEM_TO_DEV) {
+ osg->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ osg->fi = -(port_window_bytes - 1);
+ }
if (d->using_ll) {
osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
@@ -1247,10 +1322,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
omap_dma_stop(c);
}
- if (c->cyclic) {
- c->cyclic = false;
- c->paused = false;
- }
+ c->cyclic = false;
+ c->paused = false;
vchan_get_all_descriptors(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1269,28 +1342,66 @@ static void omap_dma_synchronize(struct dma_chan *chan)
static int omap_dma_pause(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool can_pause = false;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
- if (!c->paused) {
- omap_dma_stop(c);
- c->paused = true;
+ if (!c->desc)
+ goto out;
+
+ if (c->cyclic)
+ can_pause = true;
+
+ /*
+ * We do not allow DMA_MEM_TO_DEV transfers to be paused.
+ * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
+ * "When a channel is disabled during a transfer, the channel undergoes
+ * an abort, unless it is hardware-source-synchronized …".
+ * A source-synchronised channel is one where the fetching of data is
+ * under control of the device. In other words, a device-to-memory
+ * transfer. So, a destination-synchronised channel (which would be a
+ * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+ * bit is cleared.
+ * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
+ * aborts immediately after completion of current read/write
+ * transactions and then the FIFO is cleaned up." The term "cleaned up"
+ * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
+ * are both clear _before_ disabling the channel, otherwise data loss
+ * will occur.
+ * The problem is that if the channel is active, then device activity
+ * can result in DMA activity starting between reading those as both
+ * clear and the write to DMA_CCR to clear the enable bit hitting the
+ * hardware. If the DMA hardware can't drain the data in its FIFO to the
+ * destination, then data loss "might" occur (say if we write to an UART
+ * and the UART is not accepting any further data).
+ */
+ else if (c->desc->dir == DMA_DEV_TO_MEM)
+ can_pause = true;
+
+ if (can_pause && !c->paused) {
+ ret = omap_dma_stop(c);
+ if (!ret)
+ c->paused = true;
}
+out:
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_resume(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
- if (c->paused) {
+ if (c->paused && c->desc) {
mb();
/* Restore channel link register */
@@ -1298,9 +1409,11 @@ static int omap_dma_resume(struct dma_chan *chan)
omap_dma_start(c, c->desc);
c->paused = false;
+ ret = 0;
}
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_chan_init(struct omap_dmadev *od)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index df95727dc2fb..f9028e9d0dfc 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -417,10 +417,8 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct pch_dma_desc *desc = to_pd_desc(txd);
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
- dma_cookie_t cookie;
spin_lock(&pd_chan->lock);
- cookie = dma_cookie_assign(txd);
if (list_empty(&pd_chan->active_list)) {
list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -439,9 +437,8 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_alloc(pd->pool, flags, &addr);
+ desc = pci_pool_zalloc(pd->pool, flags, &addr);
if (desc) {
- memset(desc, 0, sizeof(struct pch_dma_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = pd_tx_submit;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 030fe05ed43b..87fd01539fcb 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -570,7 +570,8 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAADDH;
buf[0] |= (da << 1);
- *((__le16 *)&buf[1]) = cpu_to_le16(val);
+ buf[1] = val;
+ buf[2] = val >> 8;
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
da == 1 ? "DA" : "SA", val);
@@ -724,7 +725,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAMOV;
buf[1] = dst;
- *((__le32 *)&buf[2]) = cpu_to_le32(val);
+ buf[2] = val;
+ buf[3] = val >> 8;
+ buf[4] = val >> 16;
+ buf[5] = val >> 24;
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@@ -899,10 +903,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAGO;
buf[0] |= (ns << 1);
-
buf[1] = chan & 0x7;
-
- *((__le32 *)&buf[2]) = cpu_to_le32(addr);
+ buf[2] = addr;
+ buf[3] = addr >> 8;
+ buf[4] = addr >> 16;
+ buf[5] = addr >> 24;
return SZ_DMAGO;
}
@@ -1883,11 +1888,8 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
static int pl330_add(struct pl330_dmac *pl330)
{
- void __iomem *regs;
int i, ret;
- regs = pl330->base;
-
/* Check if we can handle this DMAC */
if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
@@ -2263,6 +2265,11 @@ static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
}
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
+
+ /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
+ if (!val)
+ return 0;
+
return val - addr;
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 3f56f9ca4482..b53fb618bbf6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -413,15 +413,6 @@ static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
#endif
-/*
- * In the transition phase where legacy pxa handling is done at the same time as
- * mmp_dma, the DMA physical channel split between the 2 DMA providers is done
- * through legacy_reserved. Legacy code reserves DMA channels by settings
- * corresponding bits in legacy_reserved.
- */
-static u32 legacy_reserved;
-static u32 legacy_unavailable;
-
static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
{
int prio, i;
@@ -442,14 +433,10 @@ static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
for (i = 0; i < pdev->nr_chans; i++) {
if (prio != (i & 0xf) >> 2)
continue;
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
phy = &pdev->phys[i];
if (!phy->vchan) {
phy->vchan = pchan;
found = phy;
- if (i < 32)
- legacy_unavailable |= BIT(i);
goto out_unlock;
}
}
@@ -469,7 +456,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
unsigned long flags;
u32 reg;
- int i;
dev_dbg(&chan->vc.chan.dev->device,
"%s(): freeing\n", __func__);
@@ -483,9 +469,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
}
spin_lock_irqsave(&pdev->phy_lock, flags);
- for (i = 0; i < 32; i++)
- if (chan->phy == &pdev->phys[i])
- legacy_unavailable &= ~BIT(i);
chan->phy->vchan = NULL;
chan->phy = NULL;
spin_unlock_irqrestore(&pdev->phy_lock, flags);
@@ -739,8 +722,6 @@ static irqreturn_t pxad_int_handler(int irq, void *dev_id)
i = __ffs(dint);
dint &= (dint - 1);
phy = &pdev->phys[i];
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
@@ -1522,15 +1503,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(pxad_filter_fn);
-int pxad_toggle_reserved_channel(int legacy_channel)
-{
- if (legacy_unavailable & (BIT(legacy_channel)))
- return -EBUSY;
- legacy_reserved ^= BIT(legacy_channel);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel);
-
module_platform_driver(pxad_driver);
MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e244e10a94b5..3c982c96b4b7 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -56,6 +56,7 @@
#include <linux/irq.h>
#include <linux/atomic.h>
#include <linux/pm_runtime.h>
+#include <linux/msi.h>
#include "../dmaengine.h"
#include "hidma.h"
@@ -70,6 +71,7 @@
#define HIDMA_ERR_INFO_SW 0xFF
#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
#define HIDMA_NR_DEFAULT_DESC 10
+#define HIDMA_MSI_INTS 11
static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
{
@@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
return hidma_ll_inthandler(chirq, lldev);
}
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
+{
+ struct hidma_lldev **lldevp = arg;
+ struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
+
+ return hidma_ll_inthandler_msi(chirq, *lldevp,
+ 1 << (chirq - dmadev->msi_virqbase));
+}
+#endif
+
static ssize_t hidma_show_values(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev,
return strlen(buf);
}
-static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
- int mode)
+static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
+{
+ device_remove_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+static struct device_attribute*
+hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
{
struct device_attribute *attrs;
char *name_copy;
@@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
GFP_KERNEL);
if (!attrs)
- return -ENOMEM;
+ return NULL;
name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
if (!name_copy)
- return -ENOMEM;
+ return NULL;
attrs->attr.name = name_copy;
attrs->attr.mode = mode;
attrs->show = hidma_show_values;
sysfs_attr_init(&attrs->attr);
- return device_create_file(dev->ddev.dev, attrs);
+ return attrs;
+}
+
+static int hidma_sysfs_init(struct hidma_dev *dev)
+{
+ dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
+ if (!dev->chid_attrs)
+ return -ENOMEM;
+
+ return device_create_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct hidma_dev *dmadev = dev_get_drvdata(dev);
+
+ if (!desc->platform.msi_index) {
+ writel(msg->address_lo, dmadev->dev_evca + 0x118);
+ writel(msg->address_hi, dmadev->dev_evca + 0x11C);
+ writel(msg->data, dmadev->dev_evca + 0x120);
+ }
+}
+#endif
+
+static void hidma_free_msis(struct hidma_dev *dmadev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct device *dev = dmadev->ddev.dev;
+ struct msi_desc *desc;
+
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, dev)
+ devm_free_irq(dev, desc->irq, &dmadev->lldev);
+
+ platform_msi_domain_free_irqs(dev);
+#endif
+}
+
+static int hidma_request_msi(struct hidma_dev *dmadev,
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ int rc;
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+
+ rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
+ if (rc)
+ return rc;
+
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (!desc->platform.msi_index)
+ dmadev->msi_virqbase = desc->irq;
+
+ rc = devm_request_irq(&pdev->dev, desc->irq,
+ hidma_chirq_handler_msi,
+ 0, "qcom-hidma-msi",
+ &dmadev->lldev);
+ if (rc) {
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (rc) {
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(&pdev->dev, desc->irq,
+ &dmadev->lldev);
+ }
+ } else {
+ /* Add callback to free MSIs on teardown */
+ hidma_ll_setup_irq(dmadev->lldev, true);
+
+ }
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed to request MSI irq, falling back to wired IRQ\n");
+ return rc;
+#else
+ return -EINVAL;
+#endif
+}
+
+static bool hidma_msi_capable(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ const char *of_compat;
+ int ret = -EINVAL;
+
+ if (!adev || acpi_disabled) {
+ ret = device_property_read_string(dev, "compatible",
+ &of_compat);
+ if (ret)
+ return false;
+
+ ret = strcmp(of_compat, "qcom,hidma-1.1");
+ } else {
+#ifdef CONFIG_ACPI
+ ret = strcmp(acpi_device_hid(adev), "QCOM8062");
+#endif
+ }
+ return ret == 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev)
void __iomem *evca;
void __iomem *trca;
int rc;
+ bool msi;
pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->ddev.device_terminate_all = hidma_terminate_all;
dmadev->ddev.copy_align = 8;
+ /*
+ * Determine the MSI capability of the platform. Old HW doesn't
+ * support MSI.
+ */
+ msi = hidma_msi_capable(&pdev->dev);
+
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev)
goto dmafree;
}
- rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
- "qcom-hidma", dmadev->lldev);
- if (rc)
- goto uninit;
+ platform_set_drvdata(pdev, dmadev);
+ if (msi)
+ rc = hidma_request_msi(dmadev, pdev);
+
+ if (!msi || rc) {
+ hidma_ll_setup_irq(dmadev->lldev, false);
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
+ 0, "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+ }
INIT_LIST_HEAD(&dmadev->ddev.channels);
rc = hidma_chan_init(dmadev, 0);
@@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->irq = chirq;
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
hidma_debug_init(dmadev);
- hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
+ hidma_sysfs_init(dmadev);
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
- platform_set_drvdata(pdev, dmadev);
pm_runtime_mark_last_busy(dmadev->ddev.dev);
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return 0;
uninit:
+ if (msi)
+ hidma_free_msis(dmadev);
+
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
dmafree:
@@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_async_device_unregister(&dmadev->ddev);
- devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ if (!dmadev->lldev->msi_support)
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ else
+ hidma_free_msis(dmadev);
+
tasklet_kill(&dmadev->task);
+ hidma_sysfs_uninit(dmadev);
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
hidma_free(dmadev);
@@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
+ {"QCOM8062"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
#endif
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
+ {.compatible = "qcom,hidma-1.1",},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index e52e20716303..c7d014235c32 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -46,6 +46,7 @@ struct hidma_tre {
};
struct hidma_lldev {
+ bool msi_support; /* flag indicating MSI support */
bool initialized; /* initialized flag */
u8 trch_state; /* trch_state of the device */
u8 evch_state; /* evch_state of the device */
@@ -58,7 +59,7 @@ struct hidma_lldev {
void __iomem *evca; /* Event Channel address */
struct hidma_tre
**pending_tre_list; /* Pointers to pending TREs */
- s32 pending_tre_count; /* Number of TREs pending */
+ atomic_t pending_tre_count; /* Number of TREs pending */
void *tre_ring; /* TRE ring */
dma_addr_t tre_dma; /* TRE ring to be shared with HW */
@@ -114,6 +115,7 @@ struct hidma_dev {
int irq;
int chidx;
u32 nr_descriptors;
+ int msi_virqbase;
struct hidma_lldev *lldev;
void __iomem *dev_trca;
@@ -128,6 +130,9 @@ struct hidma_dev {
struct dentry *debugfs;
struct dentry *stats;
+ /* sysfs entry for the channel id */
+ struct device_attribute *chid_attrs;
+
/* Task delivering issue_pending */
struct tasklet_struct task;
};
@@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev);
int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
int hidma_ll_setup(struct hidma_lldev *lldev);
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
void __iomem *trca, void __iomem *evca,
u8 chidx);
int hidma_ll_uninit(struct hidma_lldev *llhndl);
irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
u8 err_code);
int hidma_debug_init(struct hidma_dev *dmadev);
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
index fa827e5ffd68..3bdcb8056a36 100644
--- a/drivers/dma/qcom/hidma_dbg.c
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
- seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+ seq_printf(s, "pending_tre_count=%d\n",
+ atomic_read(&lldev->pending_tre_count));
seq_printf(s, "evca=%p\n", lldev->evca);
seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
@@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = {
void hidma_debug_uninit(struct hidma_dev *dmadev)
{
debugfs_remove_recursive(dmadev->debugfs);
- debugfs_remove_recursive(dmadev->stats);
}
int hidma_debug_init(struct hidma_dev *dmadev)
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 3224f24c577b..6645bdf0d151 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg)
}
}
-static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
- u8 err_info, u8 err_code)
+static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
+ u8 err_code)
{
struct hidma_tre *tre;
unsigned long flags;
+ u32 tre_iterator;
spin_lock_irqsave(&lldev->lock, flags);
+
+ tre_iterator = lldev->tre_processed_off;
tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
if (!tre) {
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
* Keep track of pending TREs that SW is expecting to receive
* from HW. We got one now. Decrement our counter.
*/
- lldev->pending_tre_count--;
- if (lldev->pending_tre_count < 0) {
+ if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
dev_warn(lldev->dev, "tre count mismatch on completion");
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
}
+ HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+ lldev->tre_ring_size);
+ lldev->tre_processed_off = tre_iterator;
spin_unlock_irqrestore(&lldev->lock, flags);
tre->err_info = err_info;
@@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
{
u32 evre_ring_size = lldev->evre_ring_size;
- u32 tre_ring_size = lldev->tre_ring_size;
u32 err_info, err_code, evre_write_off;
- u32 tre_iterator, evre_iterator;
+ u32 evre_iterator;
u32 num_completed = 0;
evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
- tre_iterator = lldev->tre_processed_off;
evre_iterator = lldev->evre_processed_off;
if ((evre_write_off > evre_ring_size) ||
@@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
err_code =
(cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
evre_ring_size);
@@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
evre_write_off =
readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
num_completed++;
+
+ /*
+ * An error interrupt might have arrived while we are processing
+ * the completed interrupt.
+ */
+ if (!hidma_ll_isenabled(lldev))
+ break;
}
if (num_completed) {
u32 evre_read_off = (lldev->evre_processed_off +
HIDMA_EVRE_SIZE * num_completed);
- u32 tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
evre_read_off = evre_read_off % evre_ring_size;
- tre_read_off = tre_read_off % tre_ring_size;
-
writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
/* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
lldev->evre_processed_off = evre_read_off;
}
@@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
u8 err_code)
{
- u32 tre_iterator;
- u32 tre_ring_size = lldev->tre_ring_size;
- int num_completed = 0;
- u32 tre_read_off;
-
- tre_iterator = lldev->tre_processed_off;
- while (lldev->pending_tre_count) {
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ while (atomic_read(&lldev->pending_tre_count)) {
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
- num_completed++;
}
- tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
- tre_read_off = tre_read_off % tre_ring_size;
-
- /* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
}
static int hidma_ll_reset(struct hidma_lldev *lldev)
@@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
* requests traditionally to the destination, this concept does not apply
* here for this HW.
*/
-irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
- struct hidma_lldev *lldev = arg;
- u32 status;
- u32 enable;
- u32 cause;
+ if (cause & HIDMA_ERR_INT_MASK) {
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
+ cause);
+
+ /* Clear out pending interrupts */
+ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
+
+ return;
+ }
/*
* Fine tuned for this HW...
@@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
* read and write accessors are used for performance reasons due to
* interrupt delivery guarantees. Do not copy this code blindly and
* expect that to work.
+ *
+ * Try to consume as many EVREs as possible.
*/
+ hidma_handle_tre_completion(lldev);
+
+ /* We consumed TREs or there are pending TREs or EVREs. */
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+ u32 status;
+ u32 enable;
+ u32 cause;
+
status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
cause = status & enable;
while (cause) {
- if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, disabling...\n",
- cause);
-
- /* Clear out pending interrupts */
- writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
-
- /* No further submissions. */
- hidma_ll_disable(lldev);
-
- /* Driver completes the txn and intimates the client.*/
- hidma_cleanup_pending_tre(lldev, 0xFF,
- HIDMA_EVRE_STATUS_ERROR);
- goto out;
- }
-
- /*
- * Try to consume as many EVREs as possible.
- */
- hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ hidma_ll_int_handler_internal(lldev, cause);
/*
* Another interrupt might have arrived while we are
@@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
cause = status & enable;
}
-out:
+ return IRQ_HANDLED;
+}
+
+irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
+{
+ struct hidma_lldev *lldev = arg;
+
+ hidma_ll_int_handler_internal(lldev, cause);
return IRQ_HANDLED;
}
@@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
tre->err_code = 0;
tre->err_info = 0;
tre->queued = 1;
- lldev->pending_tre_count++;
+ atomic_inc(&lldev->pending_tre_count);
lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
% lldev->tre_ring_size;
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
u32 val;
int ret;
- val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
- lldev->evch_state = HIDMA_CH_STATE(val);
- val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
- lldev->trch_state = HIDMA_CH_STATE(val);
-
- /* already suspended by this OS */
- if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
- (lldev->evch_state == HIDMA_CH_SUSPENDED))
- return 0;
-
- /* already stopped by the manager */
- if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
- (lldev->evch_state == HIDMA_CH_STOPPED))
+ /* The channel needs to be in working state */
+ if (!hidma_ll_isenabled(lldev))
return 0;
val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
@@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
u32 val;
u32 nr_tres = lldev->nr_tres;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_processed_off = 0;
lldev->evre_processed_off = 0;
lldev->tre_write_offset = 0;
@@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
writel(HIDMA_EVRE_SIZE * nr_tres,
lldev->evca + HIDMA_EVCA_RING_LEN_REG);
- /* support IRQ only for now */
+ /* configure interrupts */
+ hidma_ll_setup_irq(lldev, lldev->msi_support);
+
+ rc = hidma_ll_enable(lldev);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
+{
+ u32 val;
+
+ lldev->msi_support = msi;
+
+ /* disable interrupts again after reset */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ /* support IRQ by default */
val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
val &= ~0xF;
- val |= 0x1;
+ if (!lldev->msi_support)
+ val = val | 0x1;
writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
/* clear all pending interrupts and enable them */
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-
- return hidma_ll_enable(lldev);
}
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
@@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
tasklet_kill(&lldev->task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_write_offset = 0;
rc = hidma_ll_reset(lldev);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 82f36e466083..f847d32cc4b5 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
{"QCOM8060"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
#endif
static const struct of_device_id hidma_mgmt_match[] = {
@@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
ret = PTR_ERR(new_pdev);
goto out;
}
+ of_node_get(child);
+ new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child);
-
+ /*
+ * It is assumed that calling of_msi_configure is safe on
+ * platforms with or without MSI support.
+ */
+ of_msi_configure(&new_pdev->dev, child);
+ of_node_put(child);
kfree(res);
res = NULL;
}
@@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void)
for_each_matching_node(child, hidma_mgmt_match) {
/* device tree based firmware here */
hidma_mgmt_of_populate_channels(child);
- of_node_put(child);
}
#endif
platform_driver_register(&hidma_mgmt_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 3c579abbabb7..f04c4702d98b 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -289,16 +289,11 @@ static
struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
{
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata;
struct s3c24xx_dma_phy *phy = NULL;
unsigned long flags;
int i;
int ret;
- if (s3cchan->slave)
- cdata = &pdata->channels[s3cchan->id];
-
for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
phy = &s3cdma->phy_chans[i];
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 06ecdc38cee0..72c649713ace 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -652,7 +652,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
- struct usb_dmac_chan *uchan;
struct dma_chan *chan;
dma_cap_mask_t mask;
@@ -667,8 +666,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
if (!chan)
return NULL;
- uchan = to_usb_dmac_chan(chan);
-
return chan;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8f62edad51be..a0733ac3edb1 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -1011,7 +1011,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_desc *sdesc;
struct sirfsoc_dma_chan *schan;
int ch;
int ret;
@@ -1044,9 +1043,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
schan = &sdma->channels[ch];
if (list_empty(&schan->active))
continue;
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc,
- node);
save->ctrl[ch] = readl_relaxed(sdma->base +
ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
}
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
new file mode 100644
index 000000000000..bfb79bd0c6de
--- /dev/null
+++ b/drivers/dma/st_fdma.c
@@ -0,0 +1,889 @@
+/*
+ * DMA driver for STMicroelectronics STi FDMA controller
+ *
+ * Copyright (C) 2014 STMicroelectronics
+ *
+ * Author: Ludovic Barre <Ludovic.barre@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/remoteproc.h>
+
+#include "st_fdma.h"
+
+static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct st_fdma_chan, vchan.chan);
+}
+
+static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct st_fdma_desc, vdesc);
+}
+
+static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
+{
+ struct st_fdma_dev *fdev = fchan->fdev;
+ u32 req_line_cfg = fchan->cfg.req_line;
+ u32 dreq_line;
+ int try = 0;
+
+ /*
+ * dreq_mask is shared for n channels of fdma, so all accesses must be
+ * atomic. if the dreq_mask is changed between ffz and set_bit,
+ * we retry
+ */
+ do {
+ if (fdev->dreq_mask == ~0L) {
+ dev_err(fdev->dev, "No req lines available\n");
+ return -EINVAL;
+ }
+
+ if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
+ dev_err(fdev->dev, "Invalid or used req line\n");
+ return -EINVAL;
+ } else {
+ dreq_line = req_line_cfg;
+ }
+
+ try++;
+ } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
+
+ dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
+ dreq_line, fdev->dreq_mask);
+
+ return dreq_line;
+}
+
+static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
+{
+ struct st_fdma_dev *fdev = fchan->fdev;
+
+ dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
+ clear_bit(fchan->dreq_line, &fdev->dreq_mask);
+}
+
+static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
+{
+ struct virt_dma_desc *vdesc;
+ unsigned long nbytes, ch_cmd, cmd;
+
+ vdesc = vchan_next_desc(&fchan->vchan);
+ if (!vdesc)
+ return;
+
+ fchan->fdesc = to_st_fdma_desc(vdesc);
+ nbytes = fchan->fdesc->node[0].desc->nbytes;
+ cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
+ ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
+
+ /* start the channel for the descriptor */
+ fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
+ fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
+ writel(cmd,
+ fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
+
+ dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
+}
+
+static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
+ unsigned long int_sta)
+{
+ unsigned long ch_sta, ch_err;
+ int ch_id = fchan->vchan.chan.chan_id;
+ struct st_fdma_dev *fdev = fchan->fdev;
+
+ ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
+ ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
+ ch_sta &= FDMA_CH_CMD_STA_MASK;
+
+ if (int_sta & FDMA_INT_STA_ERR) {
+ dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
+ fchan->status = DMA_ERROR;
+ return;
+ }
+
+ switch (ch_sta) {
+ case FDMA_CH_CMD_STA_PAUSED:
+ fchan->status = DMA_PAUSED;
+ break;
+
+ case FDMA_CH_CMD_STA_RUNNING:
+ fchan->status = DMA_IN_PROGRESS;
+ break;
+ }
+}
+
+static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
+{
+ struct st_fdma_dev *fdev = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ struct st_fdma_chan *fchan = &fdev->chans[0];
+ unsigned long int_sta, clr;
+
+ int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
+ clr = int_sta;
+
+ for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
+ if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
+ continue;
+
+ spin_lock(&fchan->vchan.lock);
+ st_fdma_ch_sta_update(fchan, int_sta);
+
+ if (fchan->fdesc) {
+ if (!fchan->fdesc->iscyclic) {
+ list_del(&fchan->fdesc->vdesc.node);
+ vchan_cookie_complete(&fchan->fdesc->vdesc);
+ fchan->fdesc = NULL;
+ fchan->status = DMA_COMPLETE;
+ } else {
+ vchan_cyclic_callback(&fchan->fdesc->vdesc);
+ }
+
+ /* Start the next descriptor (if available) */
+ if (!fchan->fdesc)
+ st_fdma_xfer_desc(fchan);
+ }
+
+ spin_unlock(&fchan->vchan.lock);
+ ret = IRQ_HANDLED;
+ }
+
+ fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
+
+ return ret;
+}
+
+static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct st_fdma_dev *fdev = ofdma->of_dma_data;
+ struct dma_chan *chan;
+ struct st_fdma_chan *fchan;
+ int ret;
+
+ if (dma_spec->args_count < 1)
+ return ERR_PTR(-EINVAL);
+
+ if (fdev->dma_device.dev->of_node != dma_spec->np)
+ return ERR_PTR(-EINVAL);
+
+ ret = rproc_boot(fdev->slim_rproc->rproc);
+ if (ret == -ENOENT)
+ return ERR_PTR(-EPROBE_DEFER);
+ else if (ret)
+ return ERR_PTR(ret);
+
+ chan = dma_get_any_slave_channel(&fdev->dma_device);
+ if (!chan)
+ goto err_chan;
+
+ fchan = to_st_fdma_chan(chan);
+
+ fchan->cfg.of_node = dma_spec->np;
+ fchan->cfg.req_line = dma_spec->args[0];
+ fchan->cfg.req_ctrl = 0;
+ fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
+
+ if (dma_spec->args_count > 1)
+ fchan->cfg.req_ctrl = dma_spec->args[1]
+ & FDMA_REQ_CTRL_CFG_MASK;
+
+ if (dma_spec->args_count > 2)
+ fchan->cfg.type = dma_spec->args[2];
+
+ if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
+ fchan->dreq_line = 0;
+ } else {
+ fchan->dreq_line = st_fdma_dreq_get(fchan);
+ if (IS_ERR_VALUE(fchan->dreq_line)) {
+ chan = ERR_PTR(fchan->dreq_line);
+ goto err_chan;
+ }
+ }
+
+ dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
+ fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
+
+ return chan;
+
+err_chan:
+ rproc_shutdown(fdev->slim_rproc->rproc);
+ return chan;
+
+}
+
+static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct st_fdma_desc *fdesc;
+ int i;
+
+ fdesc = to_st_fdma_desc(vdesc);
+ for (i = 0; i < fdesc->n_nodes; i++)
+ dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
+ fdesc->node[i].pdesc);
+ kfree(fdesc);
+}
+
+static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
+ int sg_len)
+{
+ struct st_fdma_desc *fdesc;
+ int i;
+
+ fdesc = kzalloc(sizeof(*fdesc) +
+ sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
+ if (!fdesc)
+ return NULL;
+
+ fdesc->fchan = fchan;
+ fdesc->n_nodes = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
+ GFP_NOWAIT, &fdesc->node[i].pdesc);
+ if (!fdesc->node[i].desc)
+ goto err;
+ }
+ return fdesc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
+ fdesc->node[i].pdesc);
+ kfree(fdesc);
+ return NULL;
+}
+
+static int st_fdma_alloc_chan_res(struct dma_chan *chan)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+
+ /* Create the dma pool for descriptor allocation */
+ fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
+ fchan->fdev->dev,
+ sizeof(struct st_fdma_hw_node),
+ __alignof__(struct st_fdma_hw_node),
+ 0);
+
+ if (!fchan->node_pool) {
+ dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
+ return -ENOMEM;
+ }
+
+ dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
+ fchan->vchan.chan.chan_id, fchan->cfg.type);
+
+ return 0;
+}
+
+static void st_fdma_free_chan_res(struct dma_chan *chan)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
+ unsigned long flags;
+
+ LIST_HEAD(head);
+
+ dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
+ __func__, fchan->vchan.chan.chan_id);
+
+ if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
+ st_fdma_dreq_put(fchan);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ fchan->fdesc = NULL;
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ dma_pool_destroy(fchan->node_pool);
+ fchan->node_pool = NULL;
+ memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
+
+ rproc_shutdown(rproc);
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct st_fdma_chan *fchan;
+ struct st_fdma_desc *fdesc;
+ struct st_fdma_hw_node *hw_node;
+
+ if (!len)
+ return NULL;
+
+ fchan = to_st_fdma_chan(chan);
+
+ /* We only require a single descriptor */
+ fdesc = st_fdma_alloc_desc(fchan, 1);
+ if (!fdesc) {
+ dev_err(fchan->fdev->dev, "no memory for desc\n");
+ return NULL;
+ }
+
+ hw_node = fdesc->node[0].desc;
+ hw_node->next = 0;
+ hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
+ hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
+ hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
+ hw_node->control |= FDMA_NODE_CTRL_INT_EON;
+ hw_node->nbytes = len;
+ hw_node->saddr = src;
+ hw_node->daddr = dst;
+ hw_node->generic.length = len;
+ hw_node->generic.sstride = 0;
+ hw_node->generic.dstride = 0;
+
+ return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static int config_reqctrl(struct st_fdma_chan *fchan,
+ enum dma_transfer_direction direction)
+{
+ u32 maxburst = 0, addr = 0;
+ enum dma_slave_buswidth width;
+ int ch_id = fchan->vchan.chan.chan_id;
+ struct st_fdma_dev *fdev = fchan->fdev;
+
+ switch (direction) {
+
+ case DMA_DEV_TO_MEM:
+ fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
+ maxburst = fchan->scfg.src_maxburst;
+ width = fchan->scfg.src_addr_width;
+ addr = fchan->scfg.src_addr;
+ break;
+
+ case DMA_MEM_TO_DEV:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
+ maxburst = fchan->scfg.dst_maxburst;
+ width = fchan->scfg.dst_addr_width;
+ addr = fchan->scfg.dst_addr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
+
+ switch (width) {
+
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
+ dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
+
+ fchan->cfg.dev_addr = addr;
+ fchan->cfg.dir = direction;
+
+ dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
+ ch_id, addr, fchan->cfg.req_ctrl);
+
+ return 0;
+}
+
+static void fill_hw_node(struct st_fdma_hw_node *hw_node,
+ struct st_fdma_chan *fchan,
+ enum dma_transfer_direction direction)
+{
+ if (direction == DMA_MEM_TO_DEV) {
+ hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
+ hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
+ hw_node->daddr = fchan->cfg.dev_addr;
+ } else {
+ hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
+ hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
+ hw_node->saddr = fchan->cfg.dev_addr;
+ }
+
+ hw_node->generic.sstride = 0;
+ hw_node->generic.dstride = 0;
+}
+
+static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
+ size_t len, enum dma_transfer_direction direction)
+{
+ struct st_fdma_chan *fchan;
+
+ if (!chan || !len)
+ return NULL;
+
+ fchan = to_st_fdma_chan(chan);
+
+ if (!is_slave_direction(direction)) {
+ dev_err(fchan->fdev->dev, "bad direction?\n");
+ return NULL;
+ }
+
+ return fchan;
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct st_fdma_chan *fchan;
+ struct st_fdma_desc *fdesc;
+ int sg_len, i;
+
+ fchan = st_fdma_prep_common(chan, len, direction);
+ if (!fchan)
+ return NULL;
+
+ if (!period_len)
+ return NULL;
+
+ if (config_reqctrl(fchan, direction)) {
+ dev_err(fchan->fdev->dev, "bad width or direction\n");
+ return NULL;
+ }
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0) {
+ dev_err(fchan->fdev->dev, "len is not multiple of period\n");
+ return NULL;
+ }
+
+ sg_len = len / period_len;
+ fdesc = st_fdma_alloc_desc(fchan, sg_len);
+ if (!fdesc) {
+ dev_err(fchan->fdev->dev, "no memory for desc\n");
+ return NULL;
+ }
+
+ fdesc->iscyclic = true;
+
+ for (i = 0; i < sg_len; i++) {
+ struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
+
+ hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
+
+ hw_node->control =
+ FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
+ hw_node->control |= FDMA_NODE_CTRL_INT_EON;
+
+ fill_hw_node(hw_node, fchan, direction);
+
+ if (direction == DMA_MEM_TO_DEV)
+ hw_node->saddr = buf_addr + (i * period_len);
+ else
+ hw_node->daddr = buf_addr + (i * period_len);
+
+ hw_node->nbytes = period_len;
+ hw_node->generic.length = period_len;
+ }
+
+ return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct st_fdma_chan *fchan;
+ struct st_fdma_desc *fdesc;
+ struct st_fdma_hw_node *hw_node;
+ struct scatterlist *sg;
+ int i;
+
+ fchan = st_fdma_prep_common(chan, sg_len, direction);
+ if (!fchan)
+ return NULL;
+
+ if (!sgl)
+ return NULL;
+
+ fdesc = st_fdma_alloc_desc(fchan, sg_len);
+ if (!fdesc) {
+ dev_err(fchan->fdev->dev, "no memory for desc\n");
+ return NULL;
+ }
+
+ fdesc->iscyclic = false;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ hw_node = fdesc->node[i].desc;
+
+ hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
+ hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
+
+ fill_hw_node(hw_node, fchan, direction);
+
+ if (direction == DMA_MEM_TO_DEV)
+ hw_node->saddr = sg_dma_address(sg);
+ else
+ hw_node->daddr = sg_dma_address(sg);
+
+ hw_node->nbytes = sg_dma_len(sg);
+ hw_node->generic.length = sg_dma_len(sg);
+ }
+
+ /* interrupt at end of last node */
+ hw_node->control |= FDMA_NODE_CTRL_INT_EON;
+
+ return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
+ struct virt_dma_desc *vdesc,
+ bool in_progress)
+{
+ struct st_fdma_desc *fdesc = fchan->fdesc;
+ size_t residue = 0;
+ dma_addr_t cur_addr = 0;
+ int i;
+
+ if (in_progress) {
+ cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
+ cur_addr &= FDMA_CH_CMD_DATA_MASK;
+ }
+
+ for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
+ if (cur_addr == fdesc->node[i].pdesc) {
+ residue += fnode_read(fchan, FDMA_CNTN_OFST);
+ break;
+ }
+ residue += fdesc->node[i].desc->nbytes;
+ }
+
+ return residue;
+}
+
+static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ vd = vchan_find_desc(&fchan->vchan, cookie);
+ if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
+ txstate->residue = st_fdma_desc_residue(fchan, vd, true);
+ else if (vd)
+ txstate->residue = st_fdma_desc_residue(fchan, vd, false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ return ret;
+}
+
+static void st_fdma_issue_pending(struct dma_chan *chan)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
+ st_fdma_xfer_desc(fchan);
+
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+}
+
+static int st_fdma_pause(struct dma_chan *chan)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ int ch_id = fchan->vchan.chan.chan_id;
+ unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
+
+ dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ if (fchan->fdesc)
+ fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int st_fdma_resume(struct dma_chan *chan)
+{
+ unsigned long flags;
+ unsigned long val;
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ int ch_id = fchan->vchan.chan.chan_id;
+
+ dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ if (fchan->fdesc) {
+ val = fchan_read(fchan, FDMA_CH_CMD_OFST);
+ val &= FDMA_CH_CMD_DATA_MASK;
+ fchan_write(fchan, val, FDMA_CH_CMD_OFST);
+ }
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int st_fdma_terminate_all(struct dma_chan *chan)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ int ch_id = fchan->vchan.chan.chan_id;
+ unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
+
+ dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
+ fchan->fdesc = NULL;
+ vchan_get_all_descriptors(&fchan->vchan, &head);
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fchan->vchan, &head);
+
+ return 0;
+}
+
+static int st_fdma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *slave_cfg)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+
+ memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
+ return 0;
+}
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
+ .name = "STiH407",
+ .id = 0,
+};
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
+ .name = "STiH407",
+ .id = 1,
+};
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
+ .name = "STiH407",
+ .id = 2,
+};
+
+static const struct of_device_id st_fdma_match[] = {
+ { .compatible = "st,stih407-fdma-mpe31-11"
+ , .data = &fdma_mpe31_stih407_11 },
+ { .compatible = "st,stih407-fdma-mpe31-12"
+ , .data = &fdma_mpe31_stih407_12 },
+ { .compatible = "st,stih407-fdma-mpe31-13"
+ , .data = &fdma_mpe31_stih407_13 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_fdma_match);
+
+static int st_fdma_parse_dt(struct platform_device *pdev,
+ const struct st_fdma_driverdata *drvdata,
+ struct st_fdma_dev *fdev)
+{
+ snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
+ drvdata->name, drvdata->id);
+
+ return of_property_read_u32(pdev->dev.of_node, "dma-channels",
+ &fdev->nr_channels);
+}
+#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static void st_fdma_free(struct st_fdma_dev *fdev)
+{
+ struct st_fdma_chan *fchan;
+ int i;
+
+ for (i = 0; i < fdev->nr_channels; i++) {
+ fchan = &fdev->chans[i];
+ list_del(&fchan->vchan.chan.device_node);
+ tasklet_kill(&fchan->vchan.task);
+ }
+}
+
+static int st_fdma_probe(struct platform_device *pdev)
+{
+ struct st_fdma_dev *fdev;
+ const struct of_device_id *match;
+ struct device_node *np = pdev->dev.of_node;
+ const struct st_fdma_driverdata *drvdata;
+ int ret, i;
+
+ match = of_match_device((st_fdma_match), &pdev->dev);
+ if (!match || !match->data) {
+ dev_err(&pdev->dev, "No device match found\n");
+ return -ENODEV;
+ }
+
+ drvdata = match->data;
+
+ fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
+ if (!fdev)
+ return -ENOMEM;
+
+ ret = st_fdma_parse_dt(pdev, drvdata, fdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to find platform data\n");
+ goto err;
+ }
+
+ fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
+ sizeof(struct st_fdma_chan), GFP_KERNEL);
+ if (!fdev->chans)
+ return -ENOMEM;
+
+ fdev->dev = &pdev->dev;
+ fdev->drvdata = drvdata;
+ platform_set_drvdata(pdev, fdev);
+
+ fdev->irq = platform_get_irq(pdev, 0);
+ if (fdev->irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq resource\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
+ dev_name(&pdev->dev), fdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
+ goto err;
+ }
+
+ fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
+ if (IS_ERR(fdev->slim_rproc)) {
+ ret = PTR_ERR(fdev->slim_rproc);
+ dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
+ goto err;
+ }
+
+ /* Initialise list of FDMA channels */
+ INIT_LIST_HEAD(&fdev->dma_device.channels);
+ for (i = 0; i < fdev->nr_channels; i++) {
+ struct st_fdma_chan *fchan = &fdev->chans[i];
+
+ fchan->fdev = fdev;
+ fchan->vchan.desc_free = st_fdma_free_desc;
+ vchan_init(&fchan->vchan, &fdev->dma_device);
+ }
+
+ /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
+ fdev->dreq_mask = BIT(0) | BIT(31);
+
+ dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
+
+ fdev->dma_device.dev = &pdev->dev;
+ fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
+ fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
+ fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
+ fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
+ fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
+ fdev->dma_device.device_tx_status = st_fdma_tx_status;
+ fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
+ fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
+ fdev->dma_device.device_config = st_fdma_slave_config;
+ fdev->dma_device.device_pause = st_fdma_pause;
+ fdev->dma_device.device_resume = st_fdma_resume;
+
+ fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
+ fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
+ fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ ret = dma_async_device_register(&fdev->dma_device);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register DMA device (%d)\n", ret);
+ goto err_rproc;
+ }
+
+ ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register controller (%d)\n", ret);
+ goto err_dma_dev;
+ }
+
+ dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
+
+ return 0;
+
+err_dma_dev:
+ dma_async_device_unregister(&fdev->dma_device);
+err_rproc:
+ st_fdma_free(fdev);
+ st_slim_rproc_put(fdev->slim_rproc);
+err:
+ return ret;
+}
+
+static int st_fdma_remove(struct platform_device *pdev)
+{
+ struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
+
+ devm_free_irq(&pdev->dev, fdev->irq, fdev);
+ st_slim_rproc_put(fdev->slim_rproc);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&fdev->dma_device);
+
+ return 0;
+}
+
+static struct platform_driver st_fdma_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = st_fdma_match,
+ },
+ .probe = st_fdma_probe,
+ .remove = st_fdma_remove,
+};
+module_platform_driver(st_fdma_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
+MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_ALIAS("platform: " DRIVER_NAME);
diff --git a/drivers/dma/st_fdma.h b/drivers/dma/st_fdma.h
new file mode 100644
index 000000000000..c58e00d4ab37
--- /dev/null
+++ b/drivers/dma/st_fdma.h
@@ -0,0 +1,249 @@
+/*
+ * DMA driver header for STMicroelectronics STi FDMA controller
+ *
+ * Copyright (C) 2014 STMicroelectronics
+ *
+ * Author: Ludovic Barre <Ludovic.barre@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __DMA_ST_FDMA_H
+#define __DMA_ST_FDMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/remoteproc/st_slim_rproc.h>
+#include "virt-dma.h"
+
+#define ST_FDMA_NR_DREQS 32
+#define FW_NAME_SIZE 30
+#define DRIVER_NAME "st-fdma"
+
+/**
+ * struct st_fdma_generic_node - Free running/paced generic node
+ *
+ * @length: Length in bytes of a line in a 2D mem to mem
+ * @sstride: Stride, in bytes, between source lines in a 2D data move
+ * @dstride: Stride, in bytes, between destination lines in a 2D data move
+ */
+struct st_fdma_generic_node {
+ u32 length;
+ u32 sstride;
+ u32 dstride;
+};
+
+/**
+ * struct st_fdma_hw_node - Node structure used by fdma hw
+ *
+ * @next: Pointer to next node
+ * @control: Transfer Control Parameters
+ * @nbytes: Number of Bytes to read
+ * @saddr: Source address
+ * @daddr: Destination address
+ *
+ * @generic: generic node for free running/paced transfert type
+ * 2 others transfert type are possible, but not yet implemented
+ *
+ * The NODE structures must be aligned to a 32 byte boundary
+ */
+struct st_fdma_hw_node {
+ u32 next;
+ u32 control;
+ u32 nbytes;
+ u32 saddr;
+ u32 daddr;
+ union {
+ struct st_fdma_generic_node generic;
+ };
+} __aligned(32);
+
+/*
+ * node control parameters
+ */
+#define FDMA_NODE_CTRL_REQ_MAP_MASK GENMASK(4, 0)
+#define FDMA_NODE_CTRL_REQ_MAP_FREE_RUN 0x0
+#define FDMA_NODE_CTRL_REQ_MAP_DREQ(n) ((n)&FDMA_NODE_CTRL_REQ_MAP_MASK)
+#define FDMA_NODE_CTRL_REQ_MAP_EXT FDMA_NODE_CTRL_REQ_MAP_MASK
+#define FDMA_NODE_CTRL_SRC_MASK GENMASK(6, 5)
+#define FDMA_NODE_CTRL_SRC_STATIC BIT(5)
+#define FDMA_NODE_CTRL_SRC_INCR BIT(6)
+#define FDMA_NODE_CTRL_DST_MASK GENMASK(8, 7)
+#define FDMA_NODE_CTRL_DST_STATIC BIT(7)
+#define FDMA_NODE_CTRL_DST_INCR BIT(8)
+#define FDMA_NODE_CTRL_SECURE BIT(15)
+#define FDMA_NODE_CTRL_PAUSE_EON BIT(30)
+#define FDMA_NODE_CTRL_INT_EON BIT(31)
+
+/**
+ * struct st_fdma_sw_node - descriptor structure for link list
+ *
+ * @pdesc: Physical address of desc
+ * @node: link used for putting this into a channel queue
+ */
+struct st_fdma_sw_node {
+ dma_addr_t pdesc;
+ struct st_fdma_hw_node *desc;
+};
+
+#define NAME_SZ 10
+
+struct st_fdma_driverdata {
+ u32 id;
+ char name[NAME_SZ];
+};
+
+struct st_fdma_desc {
+ struct virt_dma_desc vdesc;
+ struct st_fdma_chan *fchan;
+ bool iscyclic;
+ unsigned int n_nodes;
+ struct st_fdma_sw_node node[];
+};
+
+enum st_fdma_type {
+ ST_FDMA_TYPE_FREE_RUN,
+ ST_FDMA_TYPE_PACED,
+};
+
+struct st_fdma_cfg {
+ struct device_node *of_node;
+ enum st_fdma_type type;
+ dma_addr_t dev_addr;
+ enum dma_transfer_direction dir;
+ int req_line; /* request line */
+ long req_ctrl; /* Request control */
+};
+
+struct st_fdma_chan {
+ struct st_fdma_dev *fdev;
+ struct dma_pool *node_pool;
+ struct dma_slave_config scfg;
+ struct st_fdma_cfg cfg;
+
+ int dreq_line;
+
+ struct virt_dma_chan vchan;
+ struct st_fdma_desc *fdesc;
+ enum dma_status status;
+};
+
+struct st_fdma_dev {
+ struct device *dev;
+ const struct st_fdma_driverdata *drvdata;
+ struct dma_device dma_device;
+
+ struct st_slim_rproc *slim_rproc;
+
+ int irq;
+
+ struct st_fdma_chan *chans;
+
+ spinlock_t dreq_lock;
+ unsigned long dreq_mask;
+
+ u32 nr_channels;
+ char fw_name[FW_NAME_SIZE];
+};
+
+/* Peripheral Registers*/
+
+#define FDMA_CMD_STA_OFST 0xFC0
+#define FDMA_CMD_SET_OFST 0xFC4
+#define FDMA_CMD_CLR_OFST 0xFC8
+#define FDMA_CMD_MASK_OFST 0xFCC
+#define FDMA_CMD_START(ch) (0x1 << (ch << 1))
+#define FDMA_CMD_PAUSE(ch) (0x2 << (ch << 1))
+#define FDMA_CMD_FLUSH(ch) (0x3 << (ch << 1))
+
+#define FDMA_INT_STA_OFST 0xFD0
+#define FDMA_INT_STA_CH 0x1
+#define FDMA_INT_STA_ERR 0x2
+
+#define FDMA_INT_SET_OFST 0xFD4
+#define FDMA_INT_CLR_OFST 0xFD8
+#define FDMA_INT_MASK_OFST 0xFDC
+
+#define fdma_read(fdev, name) \
+ readl((fdev)->slim_rproc->peri + name)
+
+#define fdma_write(fdev, val, name) \
+ writel((val), (fdev)->slim_rproc->peri + name)
+
+/* fchan interface (dmem) */
+#define FDMA_CH_CMD_OFST 0x200
+#define FDMA_CH_CMD_STA_MASK GENMASK(1, 0)
+#define FDMA_CH_CMD_STA_IDLE (0x0)
+#define FDMA_CH_CMD_STA_START (0x1)
+#define FDMA_CH_CMD_STA_RUNNING (0x2)
+#define FDMA_CH_CMD_STA_PAUSED (0x3)
+#define FDMA_CH_CMD_ERR_MASK GENMASK(4, 2)
+#define FDMA_CH_CMD_ERR_INT (0x0 << 2)
+#define FDMA_CH_CMD_ERR_NAND (0x1 << 2)
+#define FDMA_CH_CMD_ERR_MCHI (0x2 << 2)
+#define FDMA_CH_CMD_DATA_MASK GENMASK(31, 5)
+#define fchan_read(fchan, name) \
+ readl((fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * 0x4 \
+ + name)
+
+#define fchan_write(fchan, val, name) \
+ writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * 0x4 \
+ + name)
+
+/* req interface */
+#define FDMA_REQ_CTRL_OFST 0x240
+#define dreq_write(fchan, val, name) \
+ writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + fchan->dreq_line * 0x04 \
+ + name)
+/* node interface */
+#define FDMA_NODE_SZ 128
+#define FDMA_PTRN_OFST 0x800
+#define FDMA_CNTN_OFST 0x808
+#define FDMA_SADDRN_OFST 0x80c
+#define FDMA_DADDRN_OFST 0x810
+#define fnode_read(fchan, name) \
+ readl((fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \
+ + name)
+
+#define fnode_write(fchan, val, name) \
+ writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \
+ + name)
+
+/*
+ * request control bits
+ */
+#define FDMA_REQ_CTRL_NUM_OPS_MASK GENMASK(31, 24)
+#define FDMA_REQ_CTRL_NUM_OPS(n) (FDMA_REQ_CTRL_NUM_OPS_MASK & \
+ ((n) << 24))
+#define FDMA_REQ_CTRL_INITIATOR_MASK BIT(22)
+#define FDMA_REQ_CTRL_INIT0 (0x0 << 22)
+#define FDMA_REQ_CTRL_INIT1 (0x1 << 22)
+#define FDMA_REQ_CTRL_INC_ADDR_ON BIT(21)
+#define FDMA_REQ_CTRL_DATA_SWAP_ON BIT(17)
+#define FDMA_REQ_CTRL_WNR BIT(14)
+#define FDMA_REQ_CTRL_OPCODE_MASK GENMASK(7, 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST1 (0x0 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST2 (0x1 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST4 (0x2 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST8 (0x3 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST16 (0x4 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST32 (0x5 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST64 (0x6 << 4)
+#define FDMA_REQ_CTRL_HOLDOFF_MASK GENMASK(2, 0)
+#define FDMA_REQ_CTRL_HOLDOFF(n) ((n) & FDMA_REQ_CTRL_HOLDOFF_MASK)
+
+/* bits used by client to configure request control */
+#define FDMA_REQ_CTRL_CFG_MASK (FDMA_REQ_CTRL_HOLDOFF_MASK | \
+ FDMA_REQ_CTRL_DATA_SWAP_ON | \
+ FDMA_REQ_CTRL_INC_ADDR_ON | \
+ FDMA_REQ_CTRL_INITIATOR_MASK)
+
+#endif /* __DMA_ST_FDMA_H */
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 307547f4848d..3688d0873a3e 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -527,13 +527,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
{
struct stm32_dma_chan *chan = devid;
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- u32 status, scr, sfcr;
+ u32 status, scr;
spin_lock(&chan->vchan.lock);
status = stm32_dma_irq_status(chan);
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
- sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -574,15 +573,12 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
int src_bus_width, dst_bus_width;
int src_burst_size, dst_burst_size;
u32 src_maxburst, dst_maxburst;
- dma_addr_t src_addr, dst_addr;
u32 dma_scr = 0;
src_addr_width = chan->dma_sconfig.src_addr_width;
dst_addr_width = chan->dma_sconfig.dst_addr_width;
src_maxburst = chan->dma_sconfig.src_maxburst;
dst_maxburst = chan->dma_sconfig.dst_maxburst;
- src_addr = chan->dma_sconfig.src_addr;
- dst_addr = chan->dma_sconfig.dst_addr;
switch (direction) {
case DMA_MEM_TO_DEV:
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759d5ffc..380276d078b2 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -435,13 +435,12 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 58d3e2b39b5b..c5a5b91f37f0 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -35,7 +35,6 @@
#include <linux/uaccess.h>
#include "altera_edac.h"
-#include "edac_core.h"
#include "edac_module.h"
#define EDAC_MOD_STR "altera_edac"
@@ -153,13 +152,17 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
if (count == 3) {
edac_printk(KERN_ALERT, EDAC_MC,
"Inject Double bit error\n");
+ local_irq_disable();
regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
(read_reg | priv->ue_set_mask));
+ local_irq_enable();
} else {
edac_printk(KERN_ALERT, EDAC_MC,
"Inject Single bit error\n");
+ local_irq_disable();
regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
(read_reg | priv->ce_set_mask));
+ local_irq_enable();
}
ptemp[0] = 0x5A5A5A5A;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ee181c53626f..260251177830 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -164,8 +164,23 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
* other archs, we might not have access to the caches directly.
*/
+static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
+{
+ /*
+ * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
+ * are shifted down by 0x5, so scrubval 0x5 is written to the register
+ * as 0x0, scrubval 0x6 as 0x1, etc.
+ */
+ if (scrubval >= 0x5 && scrubval <= 0x14) {
+ scrubval -= 0x5;
+ pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
+ pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
+ } else {
+ pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
+ }
+}
/*
- * scan the scrub rate mapping table for a close or matching bandwidth value to
+ * Scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
*/
static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
@@ -196,7 +211,9 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x15 && pvt->model == 0x60) {
+ if (pvt->fam == 0x17) {
+ __f17h_set_scrubval(pvt, scrubval);
+ } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
f15h_select_dct(pvt, 1);
@@ -233,18 +250,34 @@ static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
static int get_scrub_rate(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u32 scrubval = 0;
int i, retval = -EINVAL;
+ u32 scrubval = 0;
- if (pvt->fam == 0x15) {
+ switch (pvt->fam) {
+ case 0x15:
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
- } else
+ break;
+
+ case 0x17:
+ amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
+ if (scrubval & BIT(0)) {
+ amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
+ scrubval &= 0xF;
+ scrubval += 0x5;
+ } else {
+ scrubval = 0;
+ }
+ break;
+
+ default:
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
+ break;
+ }
scrubval = scrubval & 0x001F;
@@ -682,15 +715,33 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
*/
static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
{
- u8 bit;
unsigned long edac_cap = EDAC_FLAG_NONE;
+ u8 bit;
- bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
- ? 19
- : 17;
+ if (pvt->umc) {
+ u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
- if (pvt->dclr0 & BIT(bit))
- edac_cap = EDAC_FLAG_SECDED;
+ for (i = 0; i < NUM_UMCS; i++) {
+ if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
+ continue;
+
+ umc_en_mask |= BIT(i);
+
+ /* UMC Configuration bit 12 (DimmEccEn) */
+ if (pvt->umc[i].umc_cfg & BIT(12))
+ dimm_ecc_en_mask |= BIT(i);
+ }
+
+ if (umc_en_mask == dimm_ecc_en_mask)
+ edac_cap = EDAC_FLAG_SECDED;
+ } else {
+ bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
+ ? 19
+ : 17;
+
+ if (pvt->dclr0 & BIT(bit))
+ edac_cap = EDAC_FLAG_SECDED;
+ }
return edac_cap;
}
@@ -729,8 +780,75 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
(dclr & BIT(15)) ? "yes" : "no");
}
+static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
+{
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ int dimm, size0, size1;
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+
+ for (dimm = 0; dimm < 4; dimm++) {
+ size0 = 0;
+
+ if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
+
+ size1 = 0;
+ if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
+
+ amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
+ dimm * 2, size0,
+ dimm * 2 + 1, size1);
+ }
+}
+
+static void __dump_misc_regs_df(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ u32 i, tmp, umc_base;
+
+ for (i = 0; i < NUM_UMCS; i++) {
+ umc_base = get_umc_base(i);
+ umc = &pvt->umc[i];
+
+ edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
+ edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
+ edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
+ edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
+
+ amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
+ edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
+
+ amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
+ edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
+ edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
+
+ edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
+ i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
+ (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
+ i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
+ i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
+ i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+
+ if (pvt->dram_type == MEM_LRDDR4) {
+ amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
+ edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
+ i, 1 << ((tmp >> 4) & 0x3));
+ }
+
+ debug_display_dimm_sizes_df(pvt, i);
+ }
+
+ edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
+ pvt->dhar, dhar_base(pvt));
+}
+
/* Display and decode various NB registers for debug purposes. */
-static void dump_misc_regs(struct amd64_pvt *pvt)
+static void __dump_misc_regs(struct amd64_pvt *pvt)
{
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
@@ -750,8 +868,6 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
(pvt->fam == 0xf) ? k8_dhar_offset(pvt)
: f10_dhar_offset(pvt));
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
-
debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
@@ -760,13 +876,25 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
debug_display_dimm_sizes(pvt, 1);
- amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
-
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
}
+/* Display and decode various NB registers for debug purposes. */
+static void dump_misc_regs(struct amd64_pvt *pvt)
+{
+ if (pvt->umc)
+ __dump_misc_regs_df(pvt);
+ else
+ __dump_misc_regs(pvt);
+
+ edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+
+ amd64_info("using %s syndromes.\n",
+ ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
+}
+
/*
* See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
@@ -789,46 +917,78 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
*/
static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs;
+ int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
prep_chip_selects(pvt);
+ if (pvt->umc) {
+ base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
+ base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
+ mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
+ mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
+ } else {
+ base_reg0 = DCSB0;
+ base_reg1 = DCSB1;
+ mask_reg0 = DCSM0;
+ mask_reg1 = DCSM1;
+ }
+
for_each_chip_select(cs, 0, pvt) {
- int reg0 = DCSB0 + (cs * 4);
- int reg1 = DCSB1 + (cs * 4);
+ int reg0 = base_reg0 + (cs * 4);
+ int reg1 = base_reg1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
+ if (pvt->umc) {
+ if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
+ cs, *base0, reg0);
- if (pvt->fam == 0xf)
- continue;
+ if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
+ cs, *base1, reg1);
+ } else {
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
+
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, (pvt->fam == 0x10) ? reg1
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, (pvt->fam == 0x10) ? reg1
: reg0);
+ }
}
for_each_chip_select_mask(cs, 0, pvt) {
- int reg0 = DCSM0 + (cs * 4);
- int reg1 = DCSM1 + (cs * 4);
+ int reg0 = mask_reg0 + (cs * 4);
+ int reg1 = mask_reg1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ if (pvt->umc) {
+ if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
+ cs, *mask0, reg0);
- if (pvt->fam == 0xf)
- continue;
+ if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
+ cs, *mask1, reg1);
+ } else {
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
+
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, (pvt->fam == 0x10) ? reg1
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, (pvt->fam == 0x10) ? reg1
: reg0);
+ }
}
}
@@ -881,6 +1041,15 @@ static void determine_memory_type(struct amd64_pvt *pvt)
case 0x16:
goto ddr3;
+ case 0x17:
+ if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+ pvt->dram_type = MEM_LRDDR4;
+ else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+ pvt->dram_type = MEM_RDDR4;
+ else
+ pvt->dram_type = MEM_DDR4;
+ return;
+
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
@@ -1210,6 +1379,19 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
return channels;
}
+static int f17_early_channel_count(struct amd64_pvt *pvt)
+{
+ int i, channels = 0;
+
+ /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
+ for (i = 0; i < NUM_UMCS; i++)
+ channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
+
+ amd64_info("MCT channel count: %d\n", channels);
+
+ return channels;
+}
+
static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
@@ -1337,6 +1519,23 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr3_cs_size(cs_mode, false);
}
+static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ unsigned int cs_mode, int csrow_nr)
+{
+ u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
+
+ /* Each mask is used for every two base addresses. */
+ u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
+
+ edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
+
+ /* Return size in MBs. */
+ return size >> 10;
+}
+
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
@@ -1897,8 +2096,9 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
size0 = 0;
if (dcsb[dimm*2] & DCSB_CS_ENABLE)
- /* For f15m60h, need multiplier for LRDIMM cs_size
- * calculation. We pass 'dimm' value to the dbam_to_cs
+ /*
+ * For F15m60h, we need multiplier for LRDIMM cs_size
+ * calculation. We pass dimm value to the dbam_to_cs
* mapper so we can find the multiplier from the
* corresponding DCSM.
*/
@@ -1989,6 +2189,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f16_dbam_to_chip_select,
}
},
+ [F17_CPUS] = {
+ .ctl_name = "F17h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -2155,7 +2364,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
-static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
+static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
u8 ecc_type)
{
enum hw_event_mc_err_type err_type;
@@ -2165,6 +2374,8 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
err_type = HW_EVENT_ERR_CORRECTED;
else if (ecc_type == 1)
err_type = HW_EVENT_ERR_UNCORRECTED;
+ else if (ecc_type == 3)
+ err_type = HW_EVENT_ERR_DEFERRED;
else {
WARN(1, "Something is rotten in the state of Denmark.\n");
return;
@@ -2181,7 +2392,13 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
string = "Failed to map error addr to a csrow";
break;
case ERR_CHANNEL:
- string = "unknown syndrome - possible error reporting race";
+ string = "Unknown syndrome - possible error reporting race";
+ break;
+ case ERR_SYND:
+ string = "MCA_SYND not valid - unknown syndrome and csrow";
+ break;
+ case ERR_NORM_ADDR:
+ string = "Cannot decode normalized address";
break;
default:
string = "WTF error";
@@ -2227,36 +2444,127 @@ static inline void decode_bus_error(int node_id, struct mce *m)
pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
- __log_bus_error(mci, &err, ecc_type);
+ __log_ecc_error(mci, &err, ecc_type);
+}
+
+/*
+ * To find the UMC channel represented by this bank we need to match on its
+ * instance_id. The instance_id of a bank is held in the lower 32 bits of its
+ * IPID.
+ */
+static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
+{
+ u32 umc_instance_id[] = {0x50f00, 0x150f00};
+ u32 instance_id = m->ipid & GENMASK(31, 0);
+ int i, channel = -1;
+
+ for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
+ if (umc_instance_id[i] == instance_id)
+ channel = i;
+
+ return channel;
+}
+
+static void decode_umc_error(int node_id, struct mce *m)
+{
+ u8 ecc_type = (m->status >> 45) & 0x3;
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+ struct err_info err;
+ u64 sys_addr;
+
+ mci = edac_mc_find(node_id);
+ if (!mci)
+ return;
+
+ pvt = mci->pvt_info;
+
+ memset(&err, 0, sizeof(err));
+
+ if (m->status & MCI_STATUS_DEFERRED)
+ ecc_type = 3;
+
+ err.channel = find_umc_channel(pvt, m);
+ if (err.channel < 0) {
+ err.err_code = ERR_CHANNEL;
+ goto log_error;
+ }
+
+ if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ err.err_code = ERR_NORM_ADDR;
+ goto log_error;
+ }
+
+ error_address_to_page_and_offset(sys_addr, &err);
+
+ if (!(m->status & MCI_STATUS_SYNDV)) {
+ err.err_code = ERR_SYND;
+ goto log_error;
+ }
+
+ if (ecc_type == 2) {
+ u8 length = (m->synd >> 18) & 0x3f;
+
+ if (length)
+ err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
+ else
+ err.err_code = ERR_CHANNEL;
+ }
+
+ err.csrow = m->synd & 0x7;
+
+log_error:
+ __log_ecc_error(mci, &err, ecc_type);
}
/*
* Use pvt->F3 which contains the F3 CPU PCI device to get the related
* F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
+ * Reserve F0 and F6 on systems with a UMC.
*/
-static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
-{
+static int
+reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
+{
+ if (pvt->umc) {
+ pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
+ if (!pvt->F0) {
+ amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
+ return -ENODEV;
+ }
+
+ pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
+ if (!pvt->F6) {
+ pci_dev_put(pvt->F0);
+ pvt->F0 = NULL;
+
+ amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+ return -ENODEV;
+ }
+
+ edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
+ edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
+ edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
+
+ return 0;
+ }
+
/* Reserve the ADDRESS MAP Device */
- pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
+ pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
- amd64_err("error address map device not found: "
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f1_id);
+ amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
return -ENODEV;
}
/* Reserve the DCT Device */
- pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
+ pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
if (!pvt->F2) {
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
- amd64_err("error F2 device not found: "
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f2_id);
-
+ amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
return -ENODEV;
}
+
edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
@@ -2266,8 +2574,69 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{
- pci_dev_put(pvt->F1);
- pci_dev_put(pvt->F2);
+ if (pvt->umc) {
+ pci_dev_put(pvt->F0);
+ pci_dev_put(pvt->F6);
+ } else {
+ pci_dev_put(pvt->F1);
+ pci_dev_put(pvt->F2);
+ }
+}
+
+static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
+{
+ pvt->ecc_sym_sz = 4;
+
+ if (pvt->umc) {
+ u8 i;
+
+ for (i = 0; i < NUM_UMCS; i++) {
+ /* Check enabled channels only: */
+ if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
+ (pvt->umc[i].ecc_ctrl & BIT(7))) {
+ pvt->ecc_sym_sz = 8;
+ break;
+ }
+ }
+
+ return;
+ }
+
+ if (pvt->fam >= 0x10) {
+ u32 tmp;
+
+ amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ /* F16h has only DCT0, so no need to read dbam1. */
+ if (pvt->fam != 0x16)
+ amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
+
+ /* F10h, revD and later can do x8 ECC too. */
+ if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
+ pvt->ecc_sym_sz = 8;
+ }
+}
+
+/*
+ * Retrieve the hardware registers of the memory controller.
+ */
+static void __read_mc_regs_df(struct amd64_pvt *pvt)
+{
+ u8 nid = pvt->mc_node_id;
+ struct amd64_umc *umc;
+ u32 i, umc_base;
+
+ /* Read registers from each UMC */
+ for (i = 0; i < NUM_UMCS; i++) {
+
+ umc_base = get_umc_base(i);
+ umc = &pvt->umc[i];
+
+ amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
+ amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
+ amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
+ amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
+ amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
+ }
}
/*
@@ -2276,24 +2645,31 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
- unsigned range;
+ unsigned int range;
u64 msr_val;
- u32 tmp;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
- * those are Read-As-Zero
+ * those are Read-As-Zero.
*/
rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
- /* check first whether TOP_MEM2 is enabled */
+ /* Check first whether TOP_MEM2 is enabled: */
rdmsrl(MSR_K8_SYSCFG, msr_val);
- if (msr_val & (1U << 21)) {
+ if (msr_val & BIT(21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
- } else
+ } else {
edac_dbg(0, " TOP_MEM2 disabled\n");
+ }
+
+ if (pvt->umc) {
+ __read_mc_regs_df(pvt);
+ amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
+
+ goto skip;
+ }
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
@@ -2322,8 +2698,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
dram_dst_node(pvt, range));
}
- read_dct_base_mask(pvt);
-
amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
@@ -2337,20 +2711,14 @@ static void read_mc_regs(struct amd64_pvt *pvt)
amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
}
- pvt->ecc_sym_sz = 4;
+skip:
+ read_dct_base_mask(pvt);
+
determine_memory_type(pvt);
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
- if (pvt->fam >= 0x10) {
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
- /* F16h has only DCT0, so no need to read dbam1 */
- if (pvt->fam != 0x16)
- amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
+ determine_ecc_sym_sz(pvt);
- /* F10h, revD and later can do x8 ECC too */
- if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
- pvt->ecc_sym_sz = 8;
- }
dump_misc_regs(pvt);
}
@@ -2420,20 +2788,22 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
static int init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
+ enum edac_type edac_mode = EDAC_NONE;
struct csrow_info *csrow;
struct dimm_info *dimm;
- enum edac_type edac_mode;
int i, j, empty = 1;
int nr_pages = 0;
u32 val;
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+ if (!pvt->umc) {
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
- pvt->nbcfg = val;
+ pvt->nbcfg = val;
- edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ }
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
@@ -2469,14 +2839,18 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
- /*
- * determine whether CHIPKILL or JUST ECC or NO ECC is operating
- */
- if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
- else
- edac_mode = EDAC_NONE;
+ /* Determine DIMM ECC mode: */
+ if (pvt->umc) {
+ if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
+ edac_mode = EDAC_S4ECD4ED;
+ else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
+ edac_mode = EDAC_SECDED;
+
+ } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+ edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
+ ? EDAC_S4ECD4ED
+ : EDAC_SECDED;
+ }
for (j = 0; j < pvt->channel_count; j++) {
dimm = csrow->channels[j]->dimm;
@@ -2539,7 +2913,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_warn("%s: error allocating mask\n", __func__);
- return false;
+ return -ENOMEM;
}
get_cpus_on_this_dct_cpumask(cmask, nid);
@@ -2627,7 +3001,6 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
{
u32 value, mask = 0x3; /* UECC/CECC enable */
-
if (!s->nbctl_valid)
return;
@@ -2663,20 +3036,50 @@ static const char *ecc_msg =
static bool ecc_enabled(struct pci_dev *F3, u16 nid)
{
- u32 value;
- u8 ecc_en = 0;
bool nb_mce_en = false;
+ u8 ecc_en = 0, i;
+ u32 value;
- amd64_read_pci_cfg(F3, NBCFG, &value);
+ if (boot_cpu_data.x86 >= 0x17) {
+ u8 umc_en_mask = 0, ecc_en_mask = 0;
- ecc_en = !!(value & NBCFG_ECC_ENABLE);
- amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
+ for (i = 0; i < NUM_UMCS; i++) {
+ u32 base = get_umc_base(i);
+
+ /* Only check enabled UMCs. */
+ if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
+ continue;
+
+ if (!(value & UMC_SDP_INIT))
+ continue;
+
+ umc_en_mask |= BIT(i);
- nb_mce_en = nb_mce_bank_enabled_on_node(nid);
- if (!nb_mce_en)
- amd64_notice("NB MCE bank disabled, set MSR "
- "0x%08x[4] on node %d to enable.\n",
- MSR_IA32_MCG_CTL, nid);
+ if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
+ continue;
+
+ if (value & UMC_ECC_ENABLED)
+ ecc_en_mask |= BIT(i);
+ }
+
+ /* Check whether at least one UMC is enabled: */
+ if (umc_en_mask)
+ ecc_en = umc_en_mask == ecc_en_mask;
+
+ /* Assume UMC MCA banks are enabled. */
+ nb_mce_en = true;
+ } else {
+ amd64_read_pci_cfg(F3, NBCFG, &value);
+
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
+
+ nb_mce_en = nb_mce_bank_enabled_on_node(nid);
+ if (!nb_mce_en)
+ amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
+ MSR_IA32_MCG_CTL, nid);
+ }
+
+ amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
if (!ecc_en || !nb_mce_en) {
amd64_notice("%s", ecc_msg);
@@ -2685,6 +3088,27 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
return true;
}
+static inline void
+f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+{
+ u8 i, ecc_en = 1, cpk_en = 1;
+
+ for (i = 0; i < NUM_UMCS; i++) {
+ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+ ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
+ cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+ }
+ }
+
+ /* Set chipkill only if ECC is enabled: */
+ if (ecc_en) {
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+ if (cpk_en)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ }
+}
+
static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
struct amd64_family_type *fam)
{
@@ -2693,17 +3117,21 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->nbcap & NBCAP_SECDED)
- mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+ if (pvt->umc) {
+ f17h_determine_edac_ctl_cap(mci, pvt);
+ } else {
+ if (pvt->nbcap & NBCAP_SECDED)
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & NBCAP_CHIPKILL)
- mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ if (pvt->nbcap & NBCAP_CHIPKILL)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ }
mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = fam->ctl_name;
- mci->dev_name = pci_name(pvt->F2);
+ mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
@@ -2759,6 +3187,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
pvt->ops = &family_types[F16_CPUS].ops;
break;
+ case 0x17:
+ fam_type = &family_types[F17_CPUS];
+ pvt->ops = &family_types[F17_CPUS].ops;
+ break;
+
default:
amd64_err("Unsupported family!\n");
return NULL;
@@ -2789,6 +3222,7 @@ static int init_one_instance(unsigned int nid)
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
struct amd64_pvt *pvt = NULL;
+ u16 pci_id1, pci_id2;
int err = 0, ret;
ret = -ENOMEM;
@@ -2804,10 +3238,23 @@ static int init_one_instance(unsigned int nid)
if (!fam_type)
goto err_free;
- ret = -ENODEV;
- err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
+ if (pvt->fam >= 0x17) {
+ pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ pci_id1 = fam_type->f0_id;
+ pci_id2 = fam_type->f6_id;
+ } else {
+ pci_id1 = fam_type->f1_id;
+ pci_id2 = fam_type->f2_id;
+ }
+
+ err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
if (err)
- goto err_free;
+ goto err_post_init;
read_mc_regs(pvt);
@@ -2857,7 +3304,10 @@ static int init_one_instance(unsigned int nid)
if (report_gart_errors)
amd_report_gart_errors(true);
- amd_register_ecc_decoder(decode_bus_error);
+ if (pvt->umc)
+ amd_register_ecc_decoder(decode_umc_error);
+ else
+ amd_register_ecc_decoder(decode_bus_error);
return 0;
@@ -2867,6 +3317,10 @@ err_add_mc:
err_siblings:
free_mc_sibling_devs(pvt);
+err_post_init:
+ if (pvt->fam >= 0x17)
+ kfree(pvt->umc);
+
err_free:
kfree(pvt);
@@ -2893,7 +3347,11 @@ static int probe_one_instance(unsigned int nid)
if (!ecc_enable_override)
goto err_enable;
- amd64_warn("Forcing ECC on!\n");
+ if (boot_cpu_data.x86 >= 0x17) {
+ amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
+ goto err_enable;
+ } else
+ amd64_warn("Forcing ECC on!\n");
if (!enable_ecc_error_reporting(s, nid, F3))
goto err_enable;
@@ -2902,7 +3360,9 @@ static int probe_one_instance(unsigned int nid)
ret = init_one_instance(nid);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
- restore_ecc_error_reporting(s, nid, F3);
+
+ if (boot_cpu_data.x86 < 0x17)
+ restore_ecc_error_reporting(s, nid, F3);
}
return ret;
@@ -2938,7 +3398,11 @@ static void remove_one_instance(unsigned int nid)
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
- amd_unregister_ecc_decoder(decode_bus_error);
+
+ if (pvt->umc)
+ amd_unregister_ecc_decoder(decode_umc_error);
+ else
+ amd_unregister_ecc_decoder(decode_bus_error);
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
@@ -2963,7 +3427,10 @@ static void setup_pci_device(void)
return;
pvt = mci->pvt_info;
- pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+ if (pvt->umc)
+ pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
+ else
+ pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
if (!pci_ctl) {
pr_warn("%s(): Unable to create PCI control\n", __func__);
pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
@@ -2975,6 +3442,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c08870479054..496603d8f3d2 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -17,7 +17,7 @@
#include <linux/mmzone.h>
#include <linux/edac.h>
#include <asm/msr.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include "mce_amd.h"
#define amd64_debug(fmt, arg...) \
@@ -30,10 +30,10 @@
edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
#define amd64_warn(fmt, arg...) \
- edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
+ edac_printk(KERN_WARNING, "amd64", "Warning: " fmt, ##arg)
#define amd64_err(fmt, arg...) \
- edac_printk(KERN_ERR, "amd64", fmt, ##arg)
+ edac_printk(KERN_ERR, "amd64", "Error: " fmt, ##arg)
#define amd64_mc_warn(mci, fmt, arg...) \
edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
@@ -118,6 +118,8 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
+#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
+#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
/*
* Function 1 - Address Map
@@ -202,6 +204,8 @@
#define DCT_SEL_HI 0x114
#define F15H_M60H_SCRCTRL 0x1C8
+#define F17H_SCR_BASE_ADDR 0x48
+#define F17H_SCR_LIMIT_ADDR 0x4C
/*
* Function 3 - Misc Control
@@ -248,6 +252,31 @@
/* MSRs */
#define MSR_MCGCTL_NBE BIT(4)
+/* F17h */
+
+/* F0: */
+#define DF_DHAR 0x104
+
+/* UMC CH register offsets */
+#define UMCCH_BASE_ADDR 0x0
+#define UMCCH_ADDR_MASK 0x20
+#define UMCCH_ADDR_CFG 0x30
+#define UMCCH_DIMM_CFG 0x80
+#define UMCCH_UMC_CFG 0x100
+#define UMCCH_SDP_CTRL 0x104
+#define UMCCH_ECC_CTRL 0x14C
+#define UMCCH_ECC_BAD_SYMBOL 0xD90
+#define UMCCH_UMC_CAP 0xDF0
+#define UMCCH_UMC_CAP_HI 0xDF4
+
+/* UMC CH bitfields */
+#define UMC_ECC_CHIPKILL_CAP BIT(31)
+#define UMC_ECC_ENABLED BIT(30)
+
+#define UMC_SDP_INIT BIT(31)
+
+#define NUM_UMCS 2
+
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -256,6 +285,7 @@ enum amd_families {
F15_M60H_CPUS,
F16_CPUS,
F16_M30H_CPUS,
+ F17_CPUS,
NUM_FAMILIES,
};
@@ -288,11 +318,19 @@ struct chip_select {
u8 m_cnt;
};
+struct amd64_umc {
+ u32 dimm_cfg; /* DIMM Configuration reg */
+ u32 umc_cfg; /* Configuration reg */
+ u32 sdp_ctrl; /* SDP Control reg */
+ u32 ecc_ctrl; /* DRAM ECC Control reg */
+ u32 umc_cap_hi; /* Capabilities High reg */
+};
+
struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
- struct pci_dev *F1, *F2, *F3;
+ struct pci_dev *F0, *F1, *F2, *F3, *F6;
u16 mc_node_id; /* MC index of this MC node */
u8 fam; /* CPU family */
@@ -335,6 +373,8 @@ struct amd64_pvt {
/* cache the dram_type */
enum mem_type dram_type;
+
+ struct amd64_umc *umc; /* UMC registers */
};
enum err_codes {
@@ -342,6 +382,8 @@ enum err_codes {
ERR_NODE = -1,
ERR_CSROW = -2,
ERR_CHANNEL = -3,
+ ERR_SYND = -4,
+ ERR_NORM_ADDR = -5,
};
struct err_info {
@@ -354,6 +396,12 @@ struct err_info {
u32 offset;
};
+static inline u32 get_umc_base(u8 channel)
+{
+ /* ch0: 0x50000, ch1: 0x150000 */
+ return 0x50000 + (!!channel << 20);
+}
+
static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
{
u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
@@ -422,7 +470,7 @@ struct low_ops {
struct amd64_family_type {
const char *ctl_name;
- u16 f1_id, f2_id;
+ u16 f0_id, f1_id, f2_id, f6_id;
struct low_ops ops;
};
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 3a501b530e11..a7450275ad28 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -17,7 +17,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define AMD76X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "amd76x_edac"
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 2b63f7c2d6d2..b5786cfded3a 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -29,7 +29,6 @@
#include <linux/pci_ids.h>
#include <asm/io.h>
-#include "edac_core.h"
#include "edac_module.h"
#include "amd8111_edac.h"
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index a5c680561c73..8851c33d7d24 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -29,7 +29,6 @@
#include <linux/edac.h>
#include <linux/pci_ids.h>
-#include "edac_core.h"
#include "edac_module.h"
#include "amd8131_edac.h"
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index a9259b069dcd..bc1f3416400e 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -19,7 +19,7 @@
#include <asm/machdep.h>
#include <asm/cell-regs.h>
-#include "edac_core.h"
+#include "edac_module.h"
struct cell_edac_priv
{
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 682288ced4ac..837b62c4993d 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -27,7 +27,6 @@
#include <linux/platform_device.h>
#include <linux/gfp.h>
-#include "edac_core.h"
#include "edac_module.h"
#define CPC925_EDAC_REVISION " Ver: 1.0.0"
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index b2d71388172b..1a352cae1f52 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -24,7 +24,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define E752X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e752x_edac"
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index ece3aef16bb1..67ef07aed923 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -30,7 +30,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define E7XXX_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e7xxx_edac"
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a97900333e2d..de4d5d08af9e 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -12,23 +12,20 @@
* 19 Jan 2007
*/
+#include <asm/page.h>
+#include <asm/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
#include <linux/module.h>
-#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/init.h>
+#include <linux/spinlock.h>
#include <linux/sysctl.h>
-#include <linux/highmem.h>
#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include "edac_core.h"
+#include "edac_device.h"
#include "edac_module.h"
/* lock for the list: 'edac_device_list', manipulation of this list
@@ -50,21 +47,6 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
}
#endif /* CONFIG_EDAC_DEBUG */
-
-/*
- * edac_device_alloc_ctl_info()
- * Allocate a new edac device control info structure
- *
- * The control structure is allocated in complete chunk
- * from the OS. It is in turn sub allocated to the
- * various objects that compose the structure
- *
- * The structure has a 'nr_instance' array within itself.
- * Each instance represents a major component
- * Example: L1 cache and L2 cache are 2 instance components
- *
- * Within each instance is an array of 'nr_blocks' blockoffsets
- */
struct edac_device_ctl_info *edac_device_alloc_ctl_info(
unsigned sz_private,
char *edac_device_name, unsigned nr_instances,
@@ -244,11 +226,6 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
}
EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
-/*
- * edac_device_free_ctl_info()
- * frees the memory allocated by the edac_device_alloc_ctl_info()
- * function
- */
void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
{
edac_device_unregister_sysfs_main_kobj(ctl_info);
@@ -460,12 +437,6 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
edac_mod_work(&edac_dev->work, jiffs);
}
-/*
- * edac_device_alloc_index: Allocate a unique device index number
- *
- * Return:
- * allocated index number
- */
int edac_device_alloc_index(void)
{
static atomic_t device_indexes = ATOMIC_INIT(0);
@@ -474,17 +445,6 @@ int edac_device_alloc_index(void)
}
EXPORT_SYMBOL_GPL(edac_device_alloc_index);
-/**
- * edac_device_add_device: Insert the 'edac_dev' structure into the
- * edac_device global list and create sysfs entries associated with
- * edac_device structure.
- * @edac_device: pointer to the edac_device structure to be added to the list
- * 'edac_device' structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
{
edac_dbg(0, "\n");
@@ -541,19 +501,6 @@ fail0:
}
EXPORT_SYMBOL_GPL(edac_device_add_device);
-/**
- * edac_device_del_device:
- * Remove sysfs entries for specified edac_device structure and
- * then remove edac_device structure from global list
- *
- * @dev:
- * Pointer to 'struct device' representing edac_device
- * structure to remove.
- *
- * Return:
- * Pointer to removed edac_device structure,
- * OR NULL if device not found.
- */
struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
{
struct edac_device_ctl_info *edac_dev;
@@ -608,10 +555,6 @@ static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
return edac_dev->panic_on_ue;
}
-/*
- * edac_device_handle_ce
- * perform a common output and handling of an 'edac_dev' CE event
- */
void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg)
{
@@ -654,10 +597,6 @@ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
}
EXPORT_SYMBOL_GPL(edac_device_handle_ce);
-/*
- * edac_device_handle_ue
- * perform a common output and handling of an 'edac_dev' UE event
- */
void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg)
{
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_device.h
index 4861542163d7..1aaba74ae411 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_device.h
@@ -1,5 +1,5 @@
/*
- * Defines, structures, APIs for edac_core module
+ * Defines, structures, APIs for edac_device
*
* (C) 2007 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
@@ -15,86 +15,22 @@
* Refactored for multi-source files:
* Doug Thompson <norsk5@xmission.com>
*
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
*/
-#ifndef _EDAC_CORE_H_
-#define _EDAC_CORE_H_
+#ifndef _EDAC_DEVICE_H_
+#define _EDAC_DEVICE_H_
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/pci.h>
-#include <linux/time.h>
-#include <linux/nmi.h>
-#include <linux/rcupdate.h>
#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/edac.h>
#include <linux/kobject.h>
-#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
-#include <linux/edac.h>
-
-#define EDAC_DEVICE_NAME_LEN 31
-#define EDAC_ATTRIB_VALUE_LEN 15
-
-#if PAGE_SHIFT < 20
-#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
-#define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
-#else /* PAGE_SHIFT > 20 */
-#define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20))
-#define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
-#endif
-
-#define edac_printk(level, prefix, fmt, arg...) \
- printk(level "EDAC " prefix ": " fmt, ##arg)
-
-#define edac_mc_printk(mci, level, fmt, arg...) \
- printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
- printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_device_printk(ctl, level, fmt, arg...) \
- printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
-
-#define edac_pci_printk(ctl, level, fmt, arg...) \
- printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
-/* prefixes for edac_printk() and edac_mc_printk() */
-#define EDAC_MC "MC"
-#define EDAC_PCI "PCI"
-#define EDAC_DEBUG "DEBUG"
-
-extern const char * const edac_mem_types[];
-
-#ifdef CONFIG_EDAC_DEBUG
-extern int edac_debug_level;
-
-#define edac_dbg(level, fmt, ...) \
-do { \
- if (level <= edac_debug_level) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, \
- "%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-
-#else /* !CONFIG_EDAC_DEBUG */
-
-#define edac_dbg(level, fmt, ...) \
-do { \
- if (0) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, \
- "%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-
-#endif /* !CONFIG_EDAC_DEBUG */
-
-#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
- PCI_DEVICE_ID_ ## vend ## _ ## dev
-
-#define edac_dev_name(dev) (dev)->dev_name
-
-#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
/*
* The following are the structures to provide for a generic
@@ -321,197 +257,64 @@ extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
-#ifdef CONFIG_PCI
-
-struct edac_pci_counter {
- atomic_t pe_count;
- atomic_t npe_count;
-};
-
-/*
- * Abstract edac_pci control info structure
+/**
+ * edac_device_add_device: Insert the 'edac_dev' structure into the
+ * edac_device global list and create sysfs entries associated with
+ * edac_device structure.
+ *
+ * @edac_dev: pointer to edac_device structure to be added to the list
+ * 'edac_device' structure.
*
+ * Returns:
+ * 0 on Success, or an error code on failure
*/
-struct edac_pci_ctl_info {
- /* for global list of edac_pci_ctl_info structs */
- struct list_head link;
-
- int pci_idx;
-
- struct bus_type *edac_subsys; /* pointer to subsystem */
-
- /* the internal state of this controller instance */
- int op_state;
- /* work struct for this instance */
- struct delayed_work work;
-
- /* pointer to edac polling checking routine:
- * If NOT NULL: points to polling check routine
- * If NULL: Then assumes INTERRUPT operation, where
- * MC driver will receive events
- */
- void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
-
- struct device *dev; /* pointer to device structure */
-
- const char *mod_name; /* module name */
- const char *ctl_name; /* edac controller name */
- const char *dev_name; /* pci/platform/etc... name */
-
- void *pvt_info; /* pointer to 'private driver' info */
-
- unsigned long start_time; /* edac_pci load start time (jiffies) */
-
- struct completion complete;
-
- /* sysfs top name under 'edac' directory
- * and instance name:
- * cpu/cpu0/...
- * cpu/cpu1/...
- * cpu/cpu2/...
- * ...
- */
- char name[EDAC_DEVICE_NAME_LEN + 1];
-
- /* Event counters for the this whole EDAC Device */
- struct edac_pci_counter counters;
-
- /* edac sysfs device control for the 'name'
- * device this structure controls
- */
- struct kobject kobj;
- struct completion kobj_complete;
-};
-
-#define to_edac_pci_ctl_work(w) \
- container_of(w, struct edac_pci_ctl_info,work)
-
-/* write all or some bits in a byte-register*/
-static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
- u8 mask)
-{
- if (mask != 0xff) {
- u8 buf;
-
- pci_read_config_byte(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_byte(pdev, offset, value);
-}
-
-/* write all or some bits in a word-register*/
-static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
- u16 value, u16 mask)
-{
- if (mask != 0xffff) {
- u16 buf;
-
- pci_read_config_word(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_word(pdev, offset, value);
-}
+extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
-/*
- * pci_write_bits32
+/**
+ * edac_device_del_device:
+ * Remove sysfs entries for specified edac_device structure and
+ * then remove edac_device structure from global list
*
- * edac local routine to do pci_write_config_dword, but adds
- * a mask parameter. If mask is all ones, ignore the mask.
- * Otherwise utilize the mask to isolate specified bits
+ * @dev:
+ * Pointer to struct &device representing the edac device
+ * structure to remove.
*
- * write all or some bits in a dword-register
+ * Returns:
+ * Pointer to removed edac_device structure,
+ * or %NULL if device not found.
*/
-static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
- u32 value, u32 mask)
-{
- if (mask != 0xffffffff) {
- u32 buf;
-
- pci_read_config_dword(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_dword(pdev, offset, value);
-}
-
-#endif /* CONFIG_PCI */
-
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
- struct edac_mc_layer *layers,
- unsigned sz_pvt);
-extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
- const struct attribute_group **groups);
-#define edac_mc_add_mc(mci) edac_mc_add_mc_with_groups(mci, NULL)
-extern void edac_mc_free(struct mem_ctl_info *mci);
-extern struct mem_ctl_info *edac_mc_find(int idx);
-extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
-extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
-extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
- unsigned long page);
-
-void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
- struct mem_ctl_info *mci,
- struct edac_raw_error_desc *e);
-
-void edac_mc_handle_error(const enum hw_event_mc_err_type type,
- struct mem_ctl_info *mci,
- const u16 error_count,
- const unsigned long page_frame_number,
- const unsigned long offset_in_page,
- const unsigned long syndrome,
- const int top_layer,
- const int mid_layer,
- const int low_layer,
- const char *msg,
- const char *other_detail);
+extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
-/*
- * edac_device APIs
+/**
+ * edac_device_handle_ue():
+ * perform a common output and handling of an 'edac_dev' UE event
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the UE error happened
+ * @block_nr: number of the block where the UE error happened
+ * @msg: message to be printed
*/
-extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
-extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
+/**
+ * edac_device_handle_ce():
+ * perform a common output and handling of an 'edac_dev' CE event
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the CE error happened
+ * @block_nr: number of the block where the CE error happened
+ * @msg: message to be printed
+ */
extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
-extern int edac_device_alloc_index(void);
-extern const char *edac_layer_name[];
-/*
- * edac_pci APIs
- */
-extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
- const char *edac_pci_name);
-
-extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
-
-extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
- unsigned long value);
-
-extern int edac_pci_alloc_index(void);
-extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
-extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
-
-extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
- struct device *dev,
- const char *mod_name);
-
-extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
-extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
-extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
-
-/*
- * edac misc APIs
+/**
+ * edac_device_alloc_index: Allocate a unique device index number
+ *
+ * Returns:
+ * allocated index number
*/
-extern char *edac_op_state_to_string(int op_state);
+extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
-#endif /* _EDAC_CORE_H_ */
+#endif
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 93da1a45c716..0e7ea3591b78 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -1,7 +1,7 @@
/*
* file for managing the edac_device subsystem of devices for EDAC
*
- * (C) 2007 SoftwareBitMaker
+ * (C) 2007 SoftwareBitMaker
*
* This file may be distributed under the terms of the
* GNU General Public License.
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_device.h"
#include "edac_module.h"
#define EDAC_DEVICE_SYMLINK "device"
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index c3ee3ad98a63..5f2c717f8053 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -30,7 +30,7 @@
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/page.h>
-#include "edac_core.h"
+#include "edac_mc.h"
#include "edac_module.h"
#include <ras/ras_event.h>
@@ -239,30 +239,6 @@ static void _edac_mc_free(struct mem_ctl_info *mci)
kfree(mci);
}
-/**
- * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
- * @mc_num: Memory controller number
- * @n_layers: Number of MC hierarchy layers
- * layers: Describes each layer as seen by the Memory Controller
- * @size_pvt: size of private storage needed
- *
- *
- * Everything is kmalloc'ed as one big chunk - more efficient.
- * Only can be used if all structures have the same lifetime - otherwise
- * you have to allocate and initialize your own structures.
- *
- * Use edac_mc_free() to free mc structures allocated by this function.
- *
- * NOTE: drivers handle multi-rank memories in different ways: in some
- * drivers, one multi-rank memory stick is mapped as one entry, while, in
- * others, a single multi-rank memory stick would be mapped into several
- * entries. Currently, this function will allocate multiple struct dimm_info
- * on such scenarios, as grouping the multiple ranks require drivers change.
- *
- * Returns:
- * On failure: NULL
- * On success: struct mem_ctl_info pointer
- */
struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
unsigned n_layers,
struct edac_mc_layer *layers,
@@ -460,11 +436,6 @@ error:
}
EXPORT_SYMBOL_GPL(edac_mc_alloc);
-/**
- * edac_mc_free
- * 'Free' a previously allocated 'mci' structure
- * @mci: pointer to a struct mem_ctl_info structure
- */
void edac_mc_free(struct mem_ctl_info *mci)
{
edac_dbg(1, "\n");
@@ -482,15 +453,8 @@ void edac_mc_free(struct mem_ctl_info *mci)
}
EXPORT_SYMBOL_GPL(edac_mc_free);
-
-/**
- * find_mci_by_dev
- *
- * scan list of controllers looking for the one that manages
- * the 'dev' device
- * @dev: pointer to a struct device related with the MCI
- */
-struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+/* Caller must hold mem_ctls_mutex */
+static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -506,6 +470,24 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev)
return NULL;
}
+
+/**
+ * find_mci_by_dev
+ *
+ * scan list of controllers looking for the one that manages
+ * the 'dev' device
+ * @dev: pointer to a struct device related with the MCI
+ */
+struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+{
+ struct mem_ctl_info *ret;
+
+ mutex_lock(&mem_ctls_mutex);
+ ret = __find_mci_by_dev(dev);
+ mutex_unlock(&mem_ctls_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(find_mci_by_dev);
/*
@@ -588,7 +570,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
insert_before = &mc_devices;
- p = find_mci_by_dev(mci->pdev);
+ p = __find_mci_by_dev(mci->pdev);
if (unlikely(p != NULL))
goto fail0;
@@ -635,44 +617,30 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
return handlers;
}
-/**
- * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
- *
- * If found, return a pointer to the structure.
- * Else return NULL.
- *
- * Caller must hold mem_ctls_mutex.
- */
struct mem_ctl_info *edac_mc_find(int idx)
{
+ struct mem_ctl_info *mci = NULL;
struct list_head *item;
- struct mem_ctl_info *mci;
+
+ mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
if (mci->mc_idx >= idx) {
- if (mci->mc_idx == idx)
- return mci;
-
+ if (mci->mc_idx == idx) {
+ goto unlock;
+ }
break;
}
}
- return NULL;
+unlock:
+ mutex_unlock(&mem_ctls_mutex);
+ return mci;
}
EXPORT_SYMBOL(edac_mc_find);
-/**
- * edac_mc_add_mc_with_groups: Insert the 'mci' structure into the mci
- * global list and create sysfs entries associated with mci structure
- * @mci: pointer to the mci structure to be added to the list
- * @groups: optional attribute groups for the driver-specific sysfs entries
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
@@ -763,13 +731,6 @@ fail0:
}
EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
-/**
- * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
- * remove mci structure from global list
- * @pdev: Pointer to 'struct device' representing mci structure to remove.
- *
- * Return pointer to removed mci structure, or NULL if device not found.
- */
struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
{
struct mem_ctl_info *mci;
@@ -779,7 +740,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
mutex_lock(&mem_ctls_mutex);
/* find the requested mci struct in the global list */
- mci = find_mci_by_dev(dev);
+ mci = __find_mci_by_dev(dev);
if (mci == NULL) {
mutex_unlock(&mem_ctls_mutex);
return NULL;
@@ -1033,18 +994,6 @@ static void edac_ue_error(struct mem_ctl_info *mci,
edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
}
-/**
- * edac_raw_mc_handle_error - reports a memory event to userspace without doing
- * anything to discover the error location
- *
- * @type: severity of the error (CE/UE/Fatal)
- * @mci: a struct mem_ctl_info pointer
- * @e: error description
- *
- * This raw function is used internally by edac_mc_handle_error(). It should
- * only be called directly when the hardware error come directly from BIOS,
- * like in the case of APEI GHES driver.
- */
void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
struct edac_raw_error_desc *e)
@@ -1074,24 +1023,6 @@ void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
}
EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
-/**
- * edac_mc_handle_error - reports a memory event to userspace
- *
- * @type: severity of the error (CE/UE/Fatal)
- * @mci: a struct mem_ctl_info pointer
- * @error_count: Number of errors of the same type
- * @page_frame_number: mem page where the error occurred
- * @offset_in_page: offset of the error inside the page
- * @syndrome: ECC syndrome
- * @top_layer: Memory layer[0] position
- * @mid_layer: Memory layer[1] position
- * @low_layer: Memory layer[2] position
- * @msg: Message meaningful to the end users that
- * explains the event
- * @other_detail: Technical details about the event that
- * may help hardware manufacturers and
- * EDAC developers to analyse the event
- */
void edac_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
const u16 error_count,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 000000000000..50fc1dc9c0d8
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,245 @@
+/*
+ * Defines, structures, APIs for edac_mc module
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ * Doug Thompson <norsk5@xmission.com>
+ *
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
+ */
+
+#ifndef _EDAC_MC_H_
+#define _EDAC_MC_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/edac.h>
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+#else /* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20))
+#define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
+#endif
+
+#define edac_printk(level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix ": " fmt, ##arg)
+
+#define edac_mc_printk(mci, level, fmt, arg...) \
+ printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_device_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
+
+#define edac_pci_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
+
+/* prefixes for edac_printk() and edac_mc_printk() */
+#define EDAC_MC "MC"
+#define EDAC_PCI "PCI"
+#define EDAC_DEBUG "DEBUG"
+
+extern const char * const edac_mem_types[];
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+
+#define edac_dbg(level, fmt, ...) \
+do { \
+ if (level <= edac_debug_level) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else /* !CONFIG_EDAC_DEBUG */
+
+#define edac_dbg(level, fmt, ...) \
+do { \
+ if (0) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* !CONFIG_EDAC_DEBUG */
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
+ PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+#define edac_dev_name(dev) (dev)->dev_name
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+/**
+ * edac_mc_alloc() - Allocate and partially fill a struct &mem_ctl_info.
+ *
+ * @mc_num: Memory controller number
+ * @n_layers: Number of MC hierarchy layers
+ * @layers: Describes each layer as seen by the Memory Controller
+ * @sz_pvt: size of private storage needed
+ *
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * .. note::
+ *
+ * drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
+ * Returns:
+ * On success, return a pointer to struct mem_ctl_info pointer;
+ * %NULL otherwise
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt);
+
+/**
+ * edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci
+ * global list and create sysfs entries associated with @mci structure.
+ *
+ * @mci: pointer to the mci structure to be added to the list
+ * @groups: optional attribute groups for the driver-specific sysfs entries
+ *
+ * Returns:
+ * 0 on Success, or an error code on failure
+ */
+extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
+ const struct attribute_group **groups);
+#define edac_mc_add_mc(mci) edac_mc_add_mc_with_groups(mci, NULL)
+
+/**
+ * edac_mc_free() - Frees a previously allocated @mci structure
+ *
+ * @mci: pointer to a struct mem_ctl_info structure
+ */
+extern void edac_mc_free(struct mem_ctl_info *mci);
+
+/**
+ * edac_mc_find() - Search for a mem_ctl_info structure whose index is @idx.
+ *
+ * @idx: index to be seek
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ */
+extern struct mem_ctl_info *edac_mc_find(int idx);
+
+/**
+ * find_mci_by_dev() - Scan list of controllers looking for the one that
+ * manages the @dev device.
+ *
+ * @dev: pointer to a struct device related with the MCI
+ *
+ * Returns: on success, returns a pointer to struct &mem_ctl_info;
+ * %NULL otherwise.
+ */
+extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
+
+/**
+ * edac_mc_del_mc() - Remove sysfs entries for mci structure associated with
+ * @dev and remove mci structure from global list.
+ *
+ * @dev: Pointer to struct &device representing mci structure to remove.
+ *
+ * Returns: pointer to removed mci structure, or %NULL if device not found.
+ */
+extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
+
+/**
+ * edac_mc_find_csrow_by_page() - Ancillary routine to identify what csrow
+ * contains a memory page.
+ *
+ * @mci: pointer to a struct mem_ctl_info structure
+ * @page: memory page to find
+ *
+ * Returns: on success, returns the csrow. -1 if not found.
+ */
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+ unsigned long page);
+
+/**
+ * edac_raw_mc_handle_error() - Reports a memory event to userspace without
+ * doing anything to discover the error location.
+ *
+ * @type: severity of the error (CE/UE/Fatal)
+ * @mci: a struct mem_ctl_info pointer
+ * @e: error description
+ *
+ * This raw function is used internally by edac_mc_handle_error(). It should
+ * only be called directly when the hardware error come directly from BIOS,
+ * like in the case of APEI GHES driver.
+ */
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ struct edac_raw_error_desc *e);
+
+/**
+ * edac_mc_handle_error() - Reports a memory event to userspace.
+ *
+ * @type: severity of the error (CE/UE/Fatal)
+ * @mci: a struct mem_ctl_info pointer
+ * @error_count: Number of errors of the same type
+ * @page_frame_number: mem page where the error occurred
+ * @offset_in_page: offset of the error inside the page
+ * @syndrome: ECC syndrome
+ * @top_layer: Memory layer[0] position
+ * @mid_layer: Memory layer[1] position
+ * @low_layer: Memory layer[2] position
+ * @msg: Message meaningful to the end users that
+ * explains the event
+ * @other_detail: Technical details about the event that
+ * may help hardware manufacturers and
+ * EDAC developers to analyse the event
+ */
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const u16 error_count,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int top_layer,
+ const int mid_layer,
+ const int low_layer,
+ const char *msg,
+ const char *other_detail);
+
+/*
+ * edac misc APIs
+ */
+extern char *edac_op_state_to_string(int op_state);
+
+#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4e0f8e720ad9..39dbab7d62f1 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -19,7 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
-#include "edac_core.h"
+#include "edac_mc.h"
#include "edac_module.h"
/* MC EDAC Controls, setable by module parameter, and sysfs */
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 5f8543be995a..172598a27d7d 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -12,7 +12,7 @@
*/
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_mc.h"
#include "edac_module.h"
#define EDAC_VERSION "Ver: 3.0.0"
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index cfaacb99c973..014871e169cc 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -10,7 +10,9 @@
#ifndef __EDAC_MODULE_H__
#define __EDAC_MODULE_H__
-#include "edac_core.h"
+#include "edac_mc.h"
+#include "edac_pci.h"
+#include "edac_device.h"
/*
* INTERNAL EDAC MODULE:
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 8f2f2899a7a2..4e9d5632041a 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -9,35 +9,25 @@
* or implied.
*
*/
+#include <asm/page.h>
+#include <asm/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
#include <linux/module.h>
-#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/init.h>
+#include <linux/spinlock.h>
#include <linux/sysctl.h>
-#include <linux/highmem.h>
#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include "edac_core.h"
+#include "edac_pci.h"
#include "edac_module.h"
static DEFINE_MUTEX(edac_pci_ctls_mutex);
static LIST_HEAD(edac_pci_list);
static atomic_t pci_indexes = ATOMIC_INIT(0);
-/*
- * edac_pci_alloc_ctl_info
- *
- * The alloc() function for the 'edac_pci' control info
- * structure. The chip driver will allocate one of these for each
- * edac_pci it is going to control/register with the EDAC CORE.
- */
struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
{
@@ -68,16 +58,6 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
}
EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
-/*
- * edac_pci_free_ctl_info()
- *
- * Last action on the pci control structure.
- *
- * call the remove sysfs information, which will unregister
- * this control struct's kobj. When that kobj's ref count
- * goes to zero, its release function will be call and then
- * kfree() the memory.
- */
void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
{
edac_dbg(1, "\n");
@@ -215,31 +195,12 @@ static void edac_pci_workq_function(struct work_struct *work_req)
mutex_unlock(&edac_pci_ctls_mutex);
}
-/*
- * edac_pci_alloc_index: Allocate a unique PCI index number
- *
- * Return:
- * allocated index number
- *
- */
int edac_pci_alloc_index(void)
{
return atomic_inc_return(&pci_indexes) - 1;
}
EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
-/*
- * edac_pci_add_device: Insert the 'edac_dev' structure into the
- * edac_pci global list and create sysfs entries associated with
- * edac_pci structure.
- * @pci: pointer to the edac_device structure to be added to the list
- * @edac_idx: A unique numeric identifier to be assigned to the
- * 'edac_pci' structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
{
edac_dbg(0, "\n");
@@ -285,19 +246,6 @@ fail0:
}
EXPORT_SYMBOL_GPL(edac_pci_add_device);
-/*
- * edac_pci_del_device()
- * Remove sysfs entries for specified edac_pci structure and
- * then remove edac_pci structure from global list
- *
- * @dev:
- * Pointer to 'struct device' representing edac_pci structure
- * to remove
- *
- * Return:
- * Pointer to removed edac_pci structure,
- * or NULL if device not found
- */
struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
{
struct edac_pci_ctl_info *pci;
@@ -351,17 +299,6 @@ struct edac_pci_gen_data {
int edac_idx;
};
-/*
- * edac_pci_create_generic_ctl
- *
- * A generic constructor for a PCI parity polling device
- * Some systems have more than one domain of PCI busses.
- * For systems with one domain, then this API will
- * provide for a generic poller.
- *
- * This routine calls the edac_pci_alloc_ctl_info() for
- * the generic device, with default values
- */
struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
const char *mod_name)
{
@@ -394,11 +331,6 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
}
EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
-/*
- * edac_pci_release_generic_ctl
- *
- * The release function of a generic EDAC PCI polling device
- */
void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
{
edac_dbg(0, "pci mod=%s\n", pci->mod_name);
diff --git a/drivers/edac/edac_pci.h b/drivers/edac/edac_pci.h
new file mode 100644
index 000000000000..5175f5724cfa
--- /dev/null
+++ b/drivers/edac/edac_pci.h
@@ -0,0 +1,271 @@
+/*
+ * Defines, structures, APIs for edac_pci and edac_pci_sysfs
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ * Doug Thompson <norsk5@xmission.com>
+ *
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
+ */
+
+#ifndef _EDAC_PCI_H_
+#define _EDAC_PCI_H_
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/edac.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#ifdef CONFIG_PCI
+
+struct edac_pci_counter {
+ atomic_t pe_count;
+ atomic_t npe_count;
+};
+
+/*
+ * Abstract edac_pci control info structure
+ *
+ */
+struct edac_pci_ctl_info {
+ /* for global list of edac_pci_ctl_info structs */
+ struct list_head link;
+
+ int pci_idx;
+
+ struct bus_type *edac_subsys; /* pointer to subsystem */
+
+ /* the internal state of this controller instance */
+ int op_state;
+ /* work struct for this instance */
+ struct delayed_work work;
+
+ /* pointer to edac polling checking routine:
+ * If NOT NULL: points to polling check routine
+ * If NULL: Then assumes INTERRUPT operation, where
+ * MC driver will receive events
+ */
+ void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
+
+ struct device *dev; /* pointer to device structure */
+
+ const char *mod_name; /* module name */
+ const char *ctl_name; /* edac controller name */
+ const char *dev_name; /* pci/platform/etc... name */
+
+ void *pvt_info; /* pointer to 'private driver' info */
+
+ unsigned long start_time; /* edac_pci load start time (jiffies) */
+
+ struct completion complete;
+
+ /* sysfs top name under 'edac' directory
+ * and instance name:
+ * cpu/cpu0/...
+ * cpu/cpu1/...
+ * cpu/cpu2/...
+ * ...
+ */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ /* Event counters for the this whole EDAC Device */
+ struct edac_pci_counter counters;
+
+ /* edac sysfs device control for the 'name'
+ * device this structure controls
+ */
+ struct kobject kobj;
+};
+
+#define to_edac_pci_ctl_work(w) \
+ container_of(w, struct edac_pci_ctl_info,work)
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
+ u8 mask)
+{
+ if (mask != 0xff) {
+ u8 buf;
+
+ pci_read_config_byte(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_byte(pdev, offset, value);
+}
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+ u16 value, u16 mask)
+{
+ if (mask != 0xffff) {
+ u16 buf;
+
+ pci_read_config_word(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_word(pdev, offset, value);
+}
+
+/*
+ * pci_write_bits32
+ *
+ * edac local routine to do pci_write_config_dword, but adds
+ * a mask parameter. If mask is all ones, ignore the mask.
+ * Otherwise utilize the mask to isolate specified bits
+ *
+ * write all or some bits in a dword-register
+ */
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+ u32 value, u32 mask)
+{
+ if (mask != 0xffffffff) {
+ u32 buf;
+
+ pci_read_config_dword(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_dword(pdev, offset, value);
+}
+
+#endif /* CONFIG_PCI */
+
+/*
+ * edac_pci APIs
+ */
+
+/**
+ * edac_pci_alloc_ctl_info:
+ * The alloc() function for the 'edac_pci' control info
+ * structure.
+ *
+ * @sz_pvt: size of the private info at struct &edac_pci_ctl_info
+ * @edac_pci_name: name of the PCI device
+ *
+ * The chip driver will allocate one of these for each
+ * edac_pci it is going to control/register with the EDAC CORE.
+ *
+ * Returns: a pointer to struct &edac_pci_ctl_info on success; %NULL otherwise.
+ */
+extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+ const char *edac_pci_name);
+
+/**
+ * edac_pci_free_ctl_info():
+ * Last action on the pci control structure.
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ *
+ * Calls the remove sysfs information, which will unregister
+ * this control struct's kobj. When that kobj's ref count
+ * goes to zero, its release function will be call and then
+ * kfree() the memory.
+ */
+extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_alloc_index: Allocate a unique PCI index number
+ *
+ * Returns:
+ * allocated index number
+ *
+ */
+extern int edac_pci_alloc_index(void);
+
+/**
+ * edac_pci_add_device(): Insert the 'edac_dev' structure into the
+ * edac_pci global list and create sysfs entries associated with
+ * edac_pci structure.
+ *
+ * @pci: pointer to the edac_device structure to be added to the list
+ * @edac_idx: A unique numeric identifier to be assigned to the
+ * 'edac_pci' structure.
+ *
+ * Returns:
+ * 0 on Success, or an error code on failure
+ */
+extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
+
+/**
+ * edac_pci_del_device()
+ * Remove sysfs entries for specified edac_pci structure and
+ * then remove edac_pci structure from global list
+ *
+ * @dev:
+ * Pointer to 'struct device' representing edac_pci structure
+ * to remove
+ *
+ * Returns:
+ * Pointer to removed edac_pci structure,
+ * or %NULL if device not found
+ */
+extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
+
+/**
+ * edac_pci_create_generic_ctl()
+ * A generic constructor for a PCI parity polling device
+ * Some systems have more than one domain of PCI busses.
+ * For systems with one domain, then this API will
+ * provide for a generic poller.
+ *
+ * @dev: pointer to struct &device;
+ * @mod_name: name of the PCI device
+ *
+ * This routine calls the edac_pci_alloc_ctl_info() for
+ * the generic device, with default values
+ *
+ * Returns: Pointer to struct &edac_pci_ctl_info on success, %NULL on
+ * failure.
+ */
+extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
+ struct device *dev,
+ const char *mod_name);
+
+/**
+ * edac_pci_release_generic_ctl
+ * The release function of a generic EDAC PCI polling device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_create_sysfs
+ * Create the controls/attributes for the specified EDAC PCI device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_remove_sysfs()
+ * remove the controls and attributes for this EDAC PCI device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+
+#endif
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 6e3428ba400f..72c9eb9fdffb 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -11,7 +11,7 @@
#include <linux/slab.h>
#include <linux/ctype.h>
-#include "edac_core.h"
+#include "edac_pci.h"
#include "edac_module.h"
#define EDAC_PCI_SYMLINK "device"
@@ -418,12 +418,6 @@ static void edac_pci_main_kobj_teardown(void)
}
}
-/*
- *
- * edac_pci_create_sysfs
- *
- * Create the controls/attributes for the specified EDAC PCI device
- */
int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
{
int err;
@@ -459,11 +453,6 @@ unregister_cleanup:
return err;
}
-/*
- * edac_pci_remove_sysfs
- *
- * remove the controls and attributes for this EDAC PCI device
- */
void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
{
edac_dbg(0, "index=%d\n", pci->pci_idx);
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index 9774f52f0c3e..4e9608a958e7 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -28,7 +28,6 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include "edac_module.h"
-#include "edac_core.h"
#include "fsl_ddr_edac.h"
#define EDAC_MOD_STR "fsl_ddr_edac"
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index e3fa4390f846..4e61a6229dd2 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -14,7 +14,7 @@
#include <acpi/ghes.h>
#include <linux/edac.h>
#include <linux/dmi.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include <ras/ras_event.h>
#define GHES_EDAC_REVISION " Ver: 1.0.0"
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 2f193668ebc7..cd9a2bb7c548 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
-#include "edac_core.h"
#include "edac_module.h"
#define SR_CLR_SB_ECC_INTR 0x0
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 11260cc3360e..0e7e0a404d89 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -22,7 +22,6 @@
#include <linux/of_platform.h>
#include <linux/uaccess.h>
-#include "edac_core.h"
#include "edac_module.h"
/* DDR Ctrlr Error Registers */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 5cb36a6022cc..5306240570d7 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I3000_REVISION "1.1"
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 1f453382258a..77c58d201a30 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -13,7 +13,7 @@
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include <linux/io.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include <linux/io-64-nonatomic-lo-hi.h>
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 72e07e3cf718..1670d27bcac8 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -22,7 +22,7 @@
#include <linux/edac.h>
#include <asm/mmzone.h>
-#include "edac_core.h"
+#include "edac_module.h"
/*
* Alter this version for the I5000 module when modifications are made
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index c655162caf08..a8334c4acea7 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -29,7 +29,6 @@
#include <linux/mmzone.h>
#include <linux/debugfs.h>
-#include "edac_core.h"
#include "edac_module.h"
/* register addresses */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6ef6ad1ba16e..abf6ef22e220 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -32,7 +32,7 @@
#include <linux/edac.h>
#include <linux/mmzone.h>
-#include "edac_core.h"
+#include "edac_module.h"
/*
* Alter this version for the I5400 module when modifications are made
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index dcac982fdc7a..0a912bf6de00 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -26,7 +26,7 @@
#include <linux/edac.h>
#include <linux/mmzone.h>
-#include "edac_core.h"
+#include "edac_module.h"
/*
* Alter this version for the I7300 module when modifications are made
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8a68a5e943ea..69b5adead0ad 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -39,7 +39,7 @@
#include <asm/processor.h>
#include <asm/div64.h>
-#include "edac_core.h"
+#include "edac_module.h"
/* Static vars */
static LIST_HEAD(i7core_edac_list);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 4d4110364f02..cb61a5b7d080 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -29,7 +29,7 @@
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82443_REVISION "0.1"
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index ee1078cd3b96..236c813227fc 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82860_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82860_edac"
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index c26a513f8869..e286b7e74c7a 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -18,7 +18,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82875P_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82875p_edac"
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 35ab66c623a3..7baa8ace267b 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82975X_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i82975x_edac"
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 1c88d9707495..2733fb5938a4 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -41,7 +41,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define IE31200_REVISION "1.0"
#define EDAC_MOD_STR "ie31200_edac"
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 6c59d897ad12..94cac7686a56 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -16,7 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "edac_core.h"
+#include "edac_module.h"
#include "fsl_ddr_edac.h"
static const struct of_device_id fsl_ddr_mc_err_of_match[] = {
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index daaac2c79ca7..34208f38c5b1 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -8,7 +8,7 @@ static struct amd_decoder_ops *fam_ops;
static u8 xec_mask = 0xf;
static bool report_gart_errors;
-static void (*nb_bus_decoder)(int node_id, struct mce *m);
+static void (*decode_dram_ecc)(int node_id, struct mce *m);
void amd_report_gart_errors(bool v)
{
@@ -18,16 +18,16 @@ EXPORT_SYMBOL_GPL(amd_report_gart_errors);
void amd_register_ecc_decoder(void (*f)(int, struct mce *))
{
- nb_bus_decoder = f;
+ decode_dram_ecc = f;
}
EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
{
- if (nb_bus_decoder) {
- WARN_ON(nb_bus_decoder != f);
+ if (decode_dram_ecc) {
+ WARN_ON(decode_dram_ecc != f);
- nb_bus_decoder = NULL;
+ decode_dram_ecc = NULL;
}
}
EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
@@ -763,8 +763,8 @@ static void decode_mc4_mce(struct mce *m)
pr_cont("%s.\n", mc4_mce_desc[xec]);
- if (nb_bus_decoder)
- nb_bus_decoder(node_id, m);
+ if (decode_dram_ecc)
+ decode_dram_ecc(node_id, m);
return;
}
break;
@@ -851,7 +851,7 @@ static void decode_mc6_mce(struct mce *m)
/* Decode errors according to Scalable MCA specification */
static void decode_smca_errors(struct mce *m)
{
- struct smca_hwid_mcatype *type;
+ struct smca_hwid *hwid;
unsigned int bank_type;
const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
@@ -862,21 +862,28 @@ static void decode_smca_errors(struct mce *m)
if (boot_cpu_data.x86 >= 0x17 && m->bank == 4)
pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n");
- type = smca_banks[m->bank].type;
- if (!type)
+ hwid = smca_banks[m->bank].hwid;
+ if (!hwid)
return;
- bank_type = type->bank_type;
- ip_name = smca_bank_names[bank_type].long_name;
+ bank_type = hwid->bank_type;
+ ip_name = smca_get_long_name(bank_type);
pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec);
/* Only print the decode of valid error codes */
if (xec < smca_mce_descs[bank_type].num_descs &&
- (type->xec_bitmap & BIT_ULL(xec))) {
+ (hwid->xec_bitmap & BIT_ULL(xec))) {
pr_emerg(HW_ERR "%s Error: ", ip_name);
pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]);
}
+
+ /*
+ * amd_get_nb_id() returns the last level cache id.
+ * The last level cache on Fam17h is 1 level below the node.
+ */
+ if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
+ decode_dram_ecc(amd_get_nb_id(m->extcpu) >> 1, m);
}
static inline void amd_decode_err_code(u16 ec)
@@ -957,10 +964,13 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 >= 0x15)
- pr_cont("|%s|%s",
- ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
- ((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
+ if (c->x86 >= 0x15) {
+ pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-"));
+
+ /* F15h, bank4, bit 43 is part of McaStatSubCache. */
+ if (c->x86 != 0x15 || m->bank != 4)
+ pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-"));
+ }
if (boot_cpu_has(X86_FEATURE_SMCA)) {
u32 low, high;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index ff0567526ee3..8f66cbed70b7 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -25,7 +25,6 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include "edac_module.h"
-#include "edac_core.h"
#include "mpc85xx_edac.h"
#include "fsl_ddr_edac.h"
@@ -300,6 +299,22 @@ err:
return res;
}
+static int mpc85xx_pci_err_remove(struct platform_device *op)
+{
+ struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+
+ edac_dbg(0, "\n");
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, orig_pci_err_cap_dr);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
+
+ edac_pci_del_device(&op->dev);
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
static const struct platform_device_id mpc85xx_pci_err_match[] = {
{
.name = "mpc85xx-pci-edac"
@@ -309,6 +324,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = {
static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
+ .remove = mpc85xx_pci_err_remove,
.id_table = mpc85xx_pci_err_match,
.driver = {
.name = "mpc85xx_pci_err",
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index cb9b8577acbc..14b7e7b71eaa 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -17,7 +17,6 @@
#include <linux/edac.h>
#include <linux/gfp.h>
-#include "edac_core.h"
#include "edac_module.h"
#include "mv64x60_edac.h"
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index afea7fc625cc..c33059e9b0be 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -16,7 +16,6 @@
#include <asm/octeon/cvmx.h>
-#include "edac_core.h"
#include "edac_module.h"
#define EDAC_MOD_STR "octeon-l2c"
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index cda6dab5067a..9c1ffe3e912b 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -19,7 +19,6 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-lmcx-defs.h>
-#include "edac_core.h"
#include "edac_module.h"
#define OCTEON_MAX_MC 4
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index 2ab6cf24c959..754eced59c32 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/edac.h>
-#include "edac_core.h"
#include "edac_module.h"
#include <asm/octeon/cvmx.h>
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 9ca73cec74e7..28b238eecefc 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -18,7 +18,6 @@
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/octeon.h>
-#include "edac_core.h"
#include "edac_module.h"
static void octeon_pci_poll(struct edac_pci_ctl_info *pci)
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 9c971b575530..199f2c80480d 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -26,7 +26,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define MODULE_NAME "pasemi_edac"
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 691ce25e9010..e55e92590106 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -21,7 +21,7 @@
#include <asm/dcr.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include "ppc4xx_edac.h"
/*
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 8f936bc7a010..978916625ced 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -20,7 +20,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define R82600_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "r82600_edac"
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 54775221a01f..54ae6dc45ab2 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -23,10 +23,11 @@
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/mce.h>
-#include "edac_core.h"
+#include "edac_module.h"
/* Static vars */
static LIST_HEAD(sbridge_edac_list);
@@ -3365,12 +3366,13 @@ fail0:
{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
static const struct x86_cpu_id sbridge_cpuids[] = {
- ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
- ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
- ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
- ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
- ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
- ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table),
+ ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table),
+ ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table),
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 0ff4878c2aa1..79ef675e4d6f 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -25,10 +25,11 @@
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/mce.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define SKX_REVISION " Ver: 1.0 "
@@ -262,8 +263,8 @@ fail:
return -ENODEV;
}
-const struct x86_cpu_id skx_cpuids[] = {
- { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */
+static const struct x86_cpu_id skx_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
@@ -1036,7 +1037,7 @@ static void skx_remove(void)
* search for all the devices we need
* check which DIMMs are present.
*/
-int __init skx_init(void)
+static int __init skx_init(void)
{
const struct x86_cpu_id *id;
const struct munit *m;
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index fc153aea2f6c..1c01dec78ec3 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include "edac_core.h"
+#include "edac_module.h"
/* Number of cs_rows needed per memory controller */
#define SYNPS_EDAC_NR_CSROWS 1
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 71381642ce2a..8a33a87e67f1 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -30,7 +30,7 @@
#include <hv/hypervisor.h>
#include <hv/drv_mshim_intf.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define DRV_NAME "tile-edac"
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 314cf5cf268c..03c97a4bf590 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -16,7 +16,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define X38_REVISION "1.1"
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index bf19b6e3bd12..6c270d9d304a 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -28,7 +28,6 @@
#include <linux/of_address.h>
#include <linux/regmap.h>
-#include "edac_core.h"
#include "edac_module.h"
#define EDAC_MOD_STR "xgene_edac"
@@ -1602,16 +1601,16 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
dev_err(edac_dev->dev,
- "Mutilple IOB PA read data RAM error\n");
+ "Multiple IOB PA read data RAM error\n");
if (reg & IOBPA_WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA write data RAM error\n");
if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev,
- "Mutilple IOB PA write data RAM error\n");
+ "Multiple IOB PA write data RAM error\n");
if (reg & IOBPA_TRANS_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA transaction error\n");
if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
- dev_err(edac_dev->dev, "Mutilple IOB PA transaction error\n");
+ dev_err(edac_dev->dev, "Multiple IOB PA transaction error\n");
if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA transaction ID RAM error\n");
if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 56e6c4c7c60d..d836d4ce5ee4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -274,9 +274,10 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
int ret;
- ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ ret = snd_soc_component_force_enable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
@@ -284,7 +285,7 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
@@ -349,6 +350,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
bool change;
int ret;
@@ -356,7 +358,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- ret = snd_soc_dapm_disable_pin(dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a27d350f69e3..d589c5feff3d 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
@@ -36,7 +35,9 @@ struct usb_extcon_info {
struct extcon_dev *edev;
struct gpio_desc *id_gpiod;
+ struct gpio_desc *vbus_gpiod;
int id_irq;
+ int vbus_irq;
unsigned long debounce_jiffies;
struct delayed_work wq_detcable;
@@ -48,31 +49,47 @@ static const unsigned int usb_extcon_cable[] = {
EXTCON_NONE,
};
+/*
+ * "USB" = VBUS and "USB-HOST" = !ID, so we have:
+ * Both "USB" and "USB-HOST" can't be set as active at the
+ * same time so if "USB-HOST" is active (i.e. ID is 0) we keep "USB" inactive
+ * even if VBUS is on.
+ *
+ * State | ID | VBUS
+ * ----------------------------------------
+ * [1] USB | H | H
+ * [2] none | H | L
+ * [3] USB-HOST | L | H
+ * [4] USB-HOST | L | L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1.
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID.
+*/
static void usb_extcon_detect_cable(struct work_struct *work)
{
- int id;
+ int id, vbus;
struct usb_extcon_info *info = container_of(to_delayed_work(work),
struct usb_extcon_info,
wq_detcable);
- /* check ID and update cable state */
- id = gpiod_get_value_cansleep(info->id_gpiod);
- if (id) {
- /*
- * ID = 1 means USB HOST cable detached.
- * As we don't have event for USB peripheral cable attached,
- * we simulate USB peripheral attach here.
- */
+ /* check ID and VBUS and update cable state */
+ id = info->id_gpiod ?
+ gpiod_get_value_cansleep(info->id_gpiod) : 1;
+ vbus = info->vbus_gpiod ?
+ gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+ /* at first we clean states which are no longer active */
+ if (id)
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
- extcon_set_state_sync(info->edev, EXTCON_USB, true);
- } else {
- /*
- * ID = 0 means USB HOST cable attached.
- * As we don't have event for USB peripheral cable detached,
- * we simulate USB peripheral detach here.
- */
+ if (!vbus)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+ if (!id) {
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+ } else {
+ if (vbus)
+ extcon_set_state_sync(info->edev, EXTCON_USB, true);
}
}
@@ -101,12 +118,21 @@ static int usb_extcon_probe(struct platform_device *pdev)
return -ENOMEM;
info->dev = dev;
- info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
- if (IS_ERR(info->id_gpiod)) {
- dev_err(dev, "failed to get ID GPIO\n");
- return PTR_ERR(info->id_gpiod);
+ info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
+ info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+ GPIOD_IN);
+
+ if (!info->id_gpiod && !info->vbus_gpiod) {
+ dev_err(dev, "failed to get gpios\n");
+ return -ENODEV;
}
+ if (IS_ERR(info->id_gpiod))
+ return PTR_ERR(info->id_gpiod);
+
+ if (IS_ERR(info->vbus_gpiod))
+ return PTR_ERR(info->vbus_gpiod);
+
info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
@@ -119,32 +145,56 @@ static int usb_extcon_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiod_set_debounce(info->id_gpiod,
- USB_GPIO_DEBOUNCE_MS * 1000);
+ if (info->id_gpiod)
+ ret = gpiod_set_debounce(info->id_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+ if (!ret && info->vbus_gpiod)
+ ret = gpiod_set_debounce(info->vbus_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+
if (ret < 0)
info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
- info->id_irq = gpiod_to_irq(info->id_gpiod);
- if (info->id_irq < 0) {
- dev_err(dev, "failed to get ID IRQ\n");
- return info->id_irq;
+ if (info->id_gpiod) {
+ info->id_irq = gpiod_to_irq(info->id_gpiod);
+ if (info->id_irq < 0) {
+ dev_err(dev, "failed to get ID IRQ\n");
+ return info->id_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for ID IRQ\n");
+ return ret;
+ }
}
- ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
- usb_irq_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- pdev->name, info);
- if (ret < 0) {
- dev_err(dev, "failed to request handler for ID IRQ\n");
- return ret;
+ if (info->vbus_gpiod) {
+ info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+ if (info->vbus_irq < 0) {
+ dev_err(dev, "failed to get VBUS IRQ\n");
+ return info->vbus_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for VBUS IRQ\n");
+ return ret;
+ }
}
platform_set_drvdata(pdev, info);
device_init_wakeup(dev, true);
- dev_pm_set_wake_irq(dev, info->id_irq);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -157,8 +207,6 @@ static int usb_extcon_remove(struct platform_device *pdev)
struct usb_extcon_info *info = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&info->wq_detcable);
-
- dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
return 0;
@@ -170,12 +218,32 @@ static int usb_extcon_suspend(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = enable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = enable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ disable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
/*
* We don't want to process any IRQs after this point
* as GPIOs used behind I2C subsystem might not be
* accessible until resume completes. So disable IRQ.
*/
- disable_irq(info->id_irq);
+ if (info->id_gpiod)
+ disable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ disable_irq(info->vbus_irq);
return ret;
}
@@ -185,7 +253,28 @@ static int usb_extcon_resume(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- enable_irq(info->id_irq);
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = disable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = disable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ enable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
+ if (info->id_gpiod)
+ enable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ enable_irq(info->vbus_irq);
+
if (!device_may_wakeup(dev))
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 15475892af0c..5d3640264f2d 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1368,15 +1368,6 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static int fwnet_change_mtu(struct net_device *net, int new_mtu)
-{
- if (new_mtu < 68)
- return -EINVAL;
-
- net->mtu = new_mtu;
- return 0;
-}
-
static const struct ethtool_ops fwnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
@@ -1385,7 +1376,6 @@ static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open,
.ndo_stop = fwnet_stop,
.ndo_start_xmit = fwnet_tx,
- .ndo_change_mtu = fwnet_change_mtu,
};
static void fwnet_init_dev(struct net_device *net)
@@ -1454,7 +1444,6 @@ static int fwnet_probe(struct fw_unit *unit,
struct net_device *net;
bool allocated_netdev = false;
struct fwnet_device *dev;
- unsigned max_mtu;
int ret;
union fwnet_hwaddr *ha;
@@ -1493,13 +1482,9 @@ static int fwnet_probe(struct fw_unit *unit,
goto out;
dev->local_fifo = dev->handler.offset;
- /*
- * Use the RFC 2734 default 1500 octets or the maximum payload
- * as initial MTU
- */
- max_mtu = (1 << (card->max_receive + 1))
- - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
- net->mtu = min(1500U, max_mtu);
+ net->mtu = 1500U;
+ net->min_mtu = ETH_MIN_MTU;
+ net->max_mtu = 0xfff;
/* Set our hardware address while we're at it */
ha = (union fwnet_hwaddr *)net->dev_addr;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d42c74..1867f0d1389b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -8,6 +8,17 @@ menu "Firmware Drivers"
config ARM_PSCI_FW
bool
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
+
config ARM_SCPI_PROTOCOL
tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
depends on MAILBOX
@@ -203,6 +214,21 @@ config QCOM_SCM_64
def_bool y
depends on QCOM_SCM && ARM64
+config TI_SCI_PROTOCOL
+ tristate "TI System Control Interface (TISCI) Message Protocol"
+ depends on TI_MESSAGE_MANAGER
+ help
+ TI System Control Interface (TISCI) Message Protocol is used to manage
+ compute systems such as ARM, DSP etc with the system controller in
+ complex System on Chip(SoC) such as those found on certain keystone
+ generation SoC from TI.
+
+ System controller provides various facilities including power
+ management function support.
+
+ This protocol library is used by client drivers to use the features
+ provided by the system controller.
+
config HAVE_ARM_SMCCC
bool
@@ -210,5 +236,6 @@ source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41fa8b3..a37f12e8d137 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,6 +2,7 @@
# Makefile for the linux kernel.
#
obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_DMI) += dmi_scan.o
@@ -20,9 +21,11 @@ obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
+obj-y += tegra/
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index ce2bc2a38101..70e13230d8db 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -50,20 +50,27 @@
#define CMD_TOKEN_ID_MASK 0xff
#define CMD_DATA_SIZE_SHIFT 16
#define CMD_DATA_SIZE_MASK 0x1ff
+#define CMD_LEGACY_DATA_SIZE_SHIFT 20
+#define CMD_LEGACY_DATA_SIZE_MASK 0x1ff
#define PACK_SCPI_CMD(cmd_id, tx_sz) \
((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
(((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT))
#define ADD_SCPI_TOKEN(cmd, token) \
((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT))
+#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
+ ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
+ (((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT))
#define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK)
+#define CMD_LEGACY_SIZE(cmd) (((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \
+ CMD_LEGACY_DATA_SIZE_MASK)
#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK)
#define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK)
#define SCPI_SLOT 0
#define MAX_DVFS_DOMAINS 8
-#define MAX_DVFS_OPPS 8
+#define MAX_DVFS_OPPS 16
#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16)
#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff)
@@ -99,6 +106,7 @@ enum scpi_error_codes {
SCPI_ERR_MAX
};
+/* SCPI Standard commands */
enum scpi_std_cmd {
SCPI_CMD_INVALID = 0x00,
SCPI_CMD_SCPI_READY = 0x01,
@@ -132,6 +140,108 @@ enum scpi_std_cmd {
SCPI_CMD_COUNT
};
+/* SCPI Legacy Commands */
+enum legacy_scpi_std_cmd {
+ LEGACY_SCPI_CMD_INVALID = 0x00,
+ LEGACY_SCPI_CMD_SCPI_READY = 0x01,
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ LEGACY_SCPI_CMD_EVENT = 0x03,
+ LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
+ LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08,
+ LEGACY_SCPI_CMD_L2_READY = 0x09,
+ LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a,
+ LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b,
+ LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d,
+ LEGACY_SCPI_CMD_SET_DVFS = 0x0e,
+ LEGACY_SCPI_CMD_GET_DVFS = 0x0f,
+ LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10,
+ LEGACY_SCPI_CMD_SET_RTC = 0x11,
+ LEGACY_SCPI_CMD_GET_RTC = 0x12,
+ LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16,
+ LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17,
+ LEGACY_SCPI_CMD_SET_PSU = 0x18,
+ LEGACY_SCPI_CMD_GET_PSU = 0x19,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
+ LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b,
+ LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
+ LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
+ LEGACY_SCPI_CMD_COUNT
+};
+
+/* List all commands that are required to go through the high priority link */
+static int legacy_hpriority_cmds[] = {
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_RTC,
+ LEGACY_SCPI_CMD_GET_RTC,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_PSU,
+ LEGACY_SCPI_CMD_GET_PSU,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+/* List all commands used by this driver, used as indexes */
+enum scpi_drv_cmds {
+ CMD_SCPI_CAPABILITIES = 0,
+ CMD_GET_CLOCK_INFO,
+ CMD_GET_CLOCK_VALUE,
+ CMD_SET_CLOCK_VALUE,
+ CMD_GET_DVFS,
+ CMD_SET_DVFS,
+ CMD_GET_DVFS_INFO,
+ CMD_SENSOR_CAPABILITIES,
+ CMD_SENSOR_INFO,
+ CMD_SENSOR_VALUE,
+ CMD_SET_DEVICE_PWR_STATE,
+ CMD_GET_DEVICE_PWR_STATE,
+ CMD_MAX_COUNT,
+};
+
+static int scpi_std_commands[CMD_MAX_COUNT] = {
+ SCPI_CMD_SCPI_CAPABILITIES,
+ SCPI_CMD_GET_CLOCK_INFO,
+ SCPI_CMD_GET_CLOCK_VALUE,
+ SCPI_CMD_SET_CLOCK_VALUE,
+ SCPI_CMD_GET_DVFS,
+ SCPI_CMD_SET_DVFS,
+ SCPI_CMD_GET_DVFS_INFO,
+ SCPI_CMD_SENSOR_CAPABILITIES,
+ SCPI_CMD_SENSOR_INFO,
+ SCPI_CMD_SENSOR_VALUE,
+ SCPI_CMD_SET_DEVICE_PWR_STATE,
+ SCPI_CMD_GET_DEVICE_PWR_STATE,
+};
+
+static int scpi_legacy_commands[CMD_MAX_COUNT] = {
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
+ -1, /* GET_CLOCK_INFO */
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
+ LEGACY_SCPI_CMD_SENSOR_INFO,
+ LEGACY_SCPI_CMD_SENSOR_VALUE,
+ -1, /* SET_DEVICE_PWR_STATE */
+ -1, /* GET_DEVICE_PWR_STATE */
+};
+
struct scpi_xfer {
u32 slot; /* has to be first element */
u32 cmd;
@@ -160,7 +270,10 @@ struct scpi_chan {
struct scpi_drvinfo {
u32 protocol_version;
u32 firmware_version;
+ bool is_legacy;
int num_chans;
+ int *commands;
+ DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
atomic_t next_chan;
struct scpi_ops *scpi_ops;
struct scpi_chan *channels;
@@ -177,6 +290,11 @@ struct scpi_shared_mem {
u8 payload[0];
} __packed;
+struct legacy_scpi_shared_mem {
+ __le32 status;
+ u8 payload[0];
+} __packed;
+
struct scp_capabilities {
__le32 protocol_version;
__le32 event_version;
@@ -202,6 +320,12 @@ struct clk_set_value {
__le32 rate;
} __packed;
+struct legacy_clk_set_value {
+ __le32 rate;
+ __le16 id;
+ __le16 reserved;
+} __packed;
+
struct dvfs_info {
__le32 header;
struct {
@@ -273,19 +397,43 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
return;
}
- list_for_each_entry(t, &ch->rx_pending, node)
- if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
- list_del(&t->node);
- match = t;
- break;
- }
+ /* Command type is not replied by the SCP Firmware in legacy Mode
+ * We should consider that command is the head of pending RX commands
+ * if the list is not empty. In TX only mode, the list would be empty.
+ */
+ if (scpi_info->is_legacy) {
+ match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
+ node);
+ list_del(&match->node);
+ } else {
+ list_for_each_entry(t, &ch->rx_pending, node)
+ if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
+ list_del(&t->node);
+ match = t;
+ break;
+ }
+ }
/* check if wait_for_completion is in progress or timed-out */
if (match && !completion_done(&match->done)) {
- struct scpi_shared_mem *mem = ch->rx_payload;
- unsigned int len = min(match->rx_len, CMD_SIZE(cmd));
+ unsigned int len;
+
+ if (scpi_info->is_legacy) {
+ struct legacy_scpi_shared_mem *mem = ch->rx_payload;
+
+ /* RX Length is not replied by the legacy Firmware */
+ len = match->rx_len;
+
+ match->status = le32_to_cpu(mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ } else {
+ struct scpi_shared_mem *mem = ch->rx_payload;
+
+ len = min(match->rx_len, CMD_SIZE(cmd));
+
+ match->status = le32_to_cpu(mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ }
- match->status = le32_to_cpu(mem->status);
- memcpy_fromio(match->rx_buf, mem->payload, len);
if (match->rx_len > len)
memset(match->rx_buf + len, 0, match->rx_len - len);
complete(&match->done);
@@ -297,7 +445,10 @@ static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
{
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = ch->rx_payload;
- u32 cmd = le32_to_cpu(mem->command);
+ u32 cmd = 0;
+
+ if (!scpi_info->is_legacy)
+ cmd = le32_to_cpu(mem->command);
scpi_process_cmd(ch, cmd);
}
@@ -309,8 +460,13 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
- if (t->tx_buf)
- memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ if (t->tx_buf) {
+ if (scpi_info->is_legacy)
+ memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
+ else
+ memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ }
+
if (t->rx_buf) {
if (!(++ch->token))
++ch->token;
@@ -319,7 +475,9 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
list_add_tail(&t->node, &ch->rx_pending);
spin_unlock_irqrestore(&ch->rx_lock, flags);
}
- mem->command = cpu_to_le32(t->cmd);
+
+ if (!scpi_info->is_legacy)
+ mem->command = cpu_to_le32(t->cmd);
}
static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -344,23 +502,38 @@ static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
mutex_unlock(&ch->xfers_lock);
}
-static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len,
+static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
void *rx_buf, unsigned int rx_len)
{
int ret;
u8 chan;
+ u8 cmd;
struct scpi_xfer *msg;
struct scpi_chan *scpi_chan;
- chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans;
+ if (scpi_info->commands[idx] < 0)
+ return -EOPNOTSUPP;
+
+ cmd = scpi_info->commands[idx];
+
+ if (scpi_info->is_legacy)
+ chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
+ else
+ chan = atomic_inc_return(&scpi_info->next_chan) %
+ scpi_info->num_chans;
scpi_chan = scpi_info->channels + chan;
msg = get_scpi_xfer(scpi_chan);
if (!msg)
return -ENOMEM;
- msg->slot = BIT(SCPI_SLOT);
- msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ if (scpi_info->is_legacy) {
+ msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
+ msg->slot = msg->cmd;
+ } else {
+ msg->slot = BIT(SCPI_SLOT);
+ msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ }
msg->tx_buf = tx_buf;
msg->tx_len = tx_len;
msg->rx_buf = rx_buf;
@@ -397,7 +570,7 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
struct clk_get_info clk;
__le16 le_clk_id = cpu_to_le16(clk_id);
- ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id,
+ ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
if (!ret) {
*min = le32_to_cpu(clk.min_rate);
@@ -412,8 +585,9 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
struct clk_get_value clk;
__le16 le_clk_id = cpu_to_le16(clk_id);
- ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id,
+ ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
+
return ret ? ret : le32_to_cpu(clk.rate);
}
@@ -425,7 +599,19 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
.rate = cpu_to_le32(rate)
};
- return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ &stat, sizeof(stat));
+}
+
+static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ int stat;
+ struct legacy_clk_set_value clk = {
+ .id = cpu_to_le16(clk_id),
+ .rate = cpu_to_le32(rate)
+ };
+
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
&stat, sizeof(stat));
}
@@ -434,8 +620,9 @@ static int scpi_dvfs_get_idx(u8 domain)
int ret;
u8 dvfs_idx;
- ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain),
+ ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
&dvfs_idx, sizeof(dvfs_idx));
+
return ret ? ret : dvfs_idx;
}
@@ -444,7 +631,7 @@ static int scpi_dvfs_set_idx(u8 domain, u8 index)
int stat;
struct dvfs_set dvfs = {domain, index};
- return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs),
+ return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
&stat, sizeof(stat));
}
@@ -468,9 +655,8 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (scpi_info->dvfs[domain]) /* data already populated */
return scpi_info->dvfs[domain];
- ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain),
+ ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
&buf, sizeof(buf));
-
if (ret)
return ERR_PTR(ret);
@@ -503,7 +689,7 @@ static int scpi_sensor_get_capability(u16 *sensors)
struct sensor_capabilities cap_buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
+ ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
sizeof(cap_buf));
if (!ret)
*sensors = le16_to_cpu(cap_buf.sensors);
@@ -517,7 +703,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
struct _scpi_sensor_info _info;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id),
+ ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
&_info, sizeof(_info));
if (!ret) {
memcpy(info, &_info, sizeof(*info));
@@ -533,7 +719,7 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
struct sensor_value buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id),
+ ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
if (!ret)
*val = (u64)le32_to_cpu(buf.hi_val) << 32 |
@@ -548,7 +734,7 @@ static int scpi_device_get_power_state(u16 dev_id)
u8 pstate;
__le16 id = cpu_to_le16(dev_id);
- ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id,
+ ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
sizeof(id), &pstate, sizeof(pstate));
return ret ? ret : pstate;
}
@@ -561,7 +747,7 @@ static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
.pstate = pstate,
};
- return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set,
+ return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
sizeof(dev_set), &stat, sizeof(stat));
}
@@ -591,12 +777,16 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
int ret;
struct scp_capabilities caps;
- ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0,
+ ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
&caps, sizeof(caps));
if (!ret) {
info->protocol_version = le32_to_cpu(caps.protocol_version);
info->firmware_version = le32_to_cpu(caps.platform_version);
}
+ /* Ignore error if not implemented */
+ if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ return 0;
+
return ret;
}
@@ -681,6 +871,11 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
return 0;
}
+static const struct of_device_id legacy_scpi_of_match[] = {
+ {.compatible = "arm,scpi-pre-1.0"},
+ {},
+};
+
static int scpi_probe(struct platform_device *pdev)
{
int count, idx, ret;
@@ -693,6 +888,9 @@ static int scpi_probe(struct platform_device *pdev)
if (!scpi_info)
return -ENOMEM;
+ if (of_match_device(legacy_scpi_of_match, &pdev->dev))
+ scpi_info->is_legacy = true;
+
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
dev_err(dev, "no mboxes property in '%s'\n", np->full_name);
@@ -755,8 +953,21 @@ err:
scpi_info->channels = scpi_chan;
scpi_info->num_chans = count;
+ scpi_info->commands = scpi_std_commands;
+
platform_set_drvdata(pdev, scpi_info);
+ if (scpi_info->is_legacy) {
+ /* Replace with legacy variants */
+ scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
+ scpi_info->commands = scpi_legacy_commands;
+
+ /* Fill priority bitmap */
+ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
+ set_bit(legacy_hpriority_cmds[idx],
+ scpi_info->cmd_priority);
+ }
+
ret = scpi_init_versions(scpi_info);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
@@ -781,6 +992,7 @@ err:
static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi"},
+ {.compatible = "arm,scpi-pre-1.0"},
{},
};
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 88bebe1968b7..54be60ead08f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -560,7 +560,7 @@ static int __init dmi_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
}
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
@@ -588,7 +588,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- pr_debug("DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index c981be17d3c0..2e78b0b96d74 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -129,7 +129,25 @@ config EFI_TEST
Say Y here to enable the runtime services support via /dev/efi_test.
If unsure, say N.
+config APPLE_PROPERTIES
+ bool "Apple Device Properties"
+ depends on EFI_STUB && X86
+ select EFI_DEV_PATH_PARSER
+ select UCS2_STRING
+ help
+ Retrieve properties from EFI on Apple Macs and assign them to
+ devices, allowing for improved support of Apple hardware.
+ Properties that would otherwise be missing include the
+ Thunderbolt Device ROM and GPU configuration data.
+
+ If unsure, say Y if you have a Mac. Otherwise N.
+
endmenu
config UEFI_CPER
bool
+
+config EFI_DEV_PATH_PARSER
+ bool
+ depends on ACPI
+ default n
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index c8a439f6d715..ad67342313ed 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_EFI_STUB) += libstub/
obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
+obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
+obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
new file mode 100644
index 000000000000..c473f4c5ca34
--- /dev/null
+++ b/drivers/firmware/efi/apple-properties.c
@@ -0,0 +1,248 @@
+/*
+ * apple-properties.c - EFI device properties on Macs
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "apple-properties: " fmt
+
+#include <linux/bootmem.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <asm/setup.h>
+
+static bool dump_properties __initdata;
+
+static int __init dump_properties_enable(char *arg)
+{
+ dump_properties = true;
+ return 0;
+}
+
+__setup("dump_apple_properties", dump_properties_enable);
+
+struct dev_header {
+ u32 len;
+ u32 prop_count;
+ struct efi_dev_path path[0];
+ /*
+ * followed by key/value pairs, each key and value preceded by u32 len,
+ * len includes itself, value may be empty (in which case its len is 4)
+ */
+};
+
+struct properties_header {
+ u32 len;
+ u32 version;
+ u32 dev_count;
+ struct dev_header dev_header[0];
+};
+
+static u8 one __initdata = 1;
+
+static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
+ struct device *dev, void *ptr,
+ struct property_entry entry[])
+{
+ int i;
+
+ for (i = 0; i < dev_header->prop_count; i++) {
+ int remaining = dev_header->len - (ptr - (void *)dev_header);
+ u32 key_len, val_len;
+ char *key;
+
+ if (sizeof(key_len) > remaining)
+ break;
+
+ key_len = *(typeof(key_len) *)ptr;
+ if (key_len + sizeof(val_len) > remaining ||
+ key_len < sizeof(key_len) + sizeof(efi_char16_t) ||
+ *(efi_char16_t *)(ptr + sizeof(key_len)) == 0) {
+ dev_err(dev, "invalid property name len at %#zx\n",
+ ptr - (void *)dev_header);
+ break;
+ }
+
+ val_len = *(typeof(val_len) *)(ptr + key_len);
+ if (key_len + val_len > remaining ||
+ val_len < sizeof(val_len)) {
+ dev_err(dev, "invalid property val len at %#zx\n",
+ ptr - (void *)dev_header + key_len);
+ break;
+ }
+
+ /* 4 bytes to accommodate UTF-8 code points + null byte */
+ key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL);
+ if (!key) {
+ dev_err(dev, "cannot allocate property name\n");
+ break;
+ }
+ ucs2_as_utf8(key, ptr + sizeof(key_len),
+ key_len - sizeof(key_len));
+
+ entry[i].name = key;
+ entry[i].is_array = true;
+ entry[i].length = val_len - sizeof(val_len);
+ entry[i].pointer.raw_data = ptr + key_len + sizeof(val_len);
+ if (!entry[i].length) {
+ /* driver core doesn't accept empty properties */
+ entry[i].length = 1;
+ entry[i].pointer.raw_data = &one;
+ }
+
+ if (dump_properties) {
+ dev_info(dev, "property: %s\n", entry[i].name);
+ print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, entry[i].pointer.raw_data,
+ entry[i].length, true);
+ }
+
+ ptr += key_len + val_len;
+ }
+
+ if (i != dev_header->prop_count) {
+ dev_err(dev, "got %d device properties, expected %u\n", i,
+ dev_header->prop_count);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ return;
+ }
+
+ dev_info(dev, "assigning %d device properties\n", i);
+}
+
+static int __init unmarshal_devices(struct properties_header *properties)
+{
+ size_t offset = offsetof(struct properties_header, dev_header[0]);
+
+ while (offset + sizeof(struct dev_header) < properties->len) {
+ struct dev_header *dev_header = (void *)properties + offset;
+ struct property_entry *entry = NULL;
+ struct device *dev;
+ size_t len;
+ int ret, i;
+ void *ptr;
+
+ if (offset + dev_header->len > properties->len ||
+ dev_header->len <= sizeof(*dev_header)) {
+ pr_err("invalid len in dev_header at %#zx\n", offset);
+ return -EINVAL;
+ }
+
+ ptr = dev_header->path;
+ len = dev_header->len - sizeof(*dev_header);
+
+ dev = efi_get_device_by_path((struct efi_dev_path **)&ptr, &len);
+ if (IS_ERR(dev)) {
+ pr_err("device path parse error %ld at %#zx:\n",
+ PTR_ERR(dev), ptr - (void *)dev_header);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ dev = NULL;
+ goto skip_device;
+ }
+
+ entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry) {
+ dev_err(dev, "cannot allocate properties\n");
+ goto skip_device;
+ }
+
+ unmarshal_key_value_pairs(dev_header, dev, ptr, entry);
+ if (!entry[0].name)
+ goto skip_device;
+
+ ret = device_add_properties(dev, entry); /* makes deep copy */
+ if (ret)
+ dev_err(dev, "error %d assigning properties\n", ret);
+
+ for (i = 0; entry[i].name; i++)
+ kfree(entry[i].name);
+
+skip_device:
+ kfree(entry);
+ put_device(dev);
+ offset += dev_header->len;
+ }
+
+ return 0;
+}
+
+static int __init map_properties(void)
+{
+ struct properties_header *properties;
+ struct setup_data *data;
+ u32 data_len;
+ u64 pa_data;
+ int ret;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") &&
+ !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."))
+ return 0;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = ioremap(pa_data, sizeof(*data));
+ if (!data) {
+ pr_err("cannot map setup_data header\n");
+ return -ENOMEM;
+ }
+
+ if (data->type != SETUP_APPLE_PROPERTIES) {
+ pa_data = data->next;
+ iounmap(data);
+ continue;
+ }
+
+ data_len = data->len;
+ iounmap(data);
+
+ data = ioremap(pa_data, sizeof(*data) + data_len);
+ if (!data) {
+ pr_err("cannot map setup_data payload\n");
+ return -ENOMEM;
+ }
+
+ properties = (struct properties_header *)data->data;
+ if (properties->version != 1) {
+ pr_err("unsupported version:\n");
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -ENOTSUPP;
+ } else if (properties->len != data_len) {
+ pr_err("length mismatch, expected %u\n", data_len);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -EINVAL;
+ } else
+ ret = unmarshal_devices(properties);
+
+ /*
+ * Can only free the setup_data payload but not its header
+ * to avoid breaking the chain of ->next pointers.
+ */
+ data->len = 0;
+ iounmap(data);
+ free_bootmem_late(pa_data + sizeof(*data), data_len);
+
+ return ret;
+ }
+ return 0;
+}
+
+fs_initcall(map_properties);
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 8efe13075c92..f853ad2c4ca0 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -244,8 +244,10 @@ void __init efi_init(void)
"Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
efi.memmap.desc_version);
- if (uefi_init() < 0)
+ if (uefi_init() < 0) {
+ efi_memmap_unmap();
return;
+ }
reserve_regions();
efi_memattr_init();
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 7c75a8d9091a..349dc3e1e52e 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -39,7 +39,7 @@ static struct mm_struct efi_mm = {
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
};
-#ifdef CONFIG_ARM64_PTDUMP
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
#include <asm/ptdump.h>
static struct ptdump_info efi_ptdump_info = {
@@ -53,7 +53,7 @@ static struct ptdump_info efi_ptdump_info = {
static int __init ptdump_init(void)
{
- return ptdump_register(&efi_ptdump_info, "efi_page_tables");
+ return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
}
device_initcall(ptdump_init);
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
new file mode 100644
index 000000000000..85d1834ee9b7
--- /dev/null
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -0,0 +1,203 @@
+/*
+ * dev-path-parser.c - EFI Device Path parser
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+struct acpi_hid_uid {
+ struct acpi_device_id hid[2];
+ char uid[11]; /* UINT_MAX + null byte */
+};
+
+static int __init match_acpi_dev(struct device *dev, void *data)
+{
+ struct acpi_hid_uid hid_uid = *(struct acpi_hid_uid *)data;
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ if (acpi_match_device_ids(adev, hid_uid.hid))
+ return 0;
+
+ if (adev->pnp.unique_id)
+ return !strcmp(adev->pnp.unique_id, hid_uid.uid);
+ else
+ return !strcmp("0", hid_uid.uid);
+}
+
+static long __init parse_acpi_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ struct acpi_hid_uid hid_uid = {};
+ struct device *phys_dev;
+
+ if (node->length != 12)
+ return -EINVAL;
+
+ sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
+ 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
+ node->acpi.hid >> 16);
+ sprintf(hid_uid.uid, "%u", node->acpi.uid);
+
+ *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
+ match_acpi_dev);
+ if (!*child)
+ return -ENODEV;
+
+ phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
+ if (phys_dev) {
+ get_device(phys_dev);
+ put_device(*child);
+ *child = phys_dev;
+ }
+
+ return 0;
+}
+
+static int __init match_pci_dev(struct device *dev, void *data)
+{
+ unsigned int devfn = *(unsigned int *)data;
+
+ return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
+}
+
+static long __init parse_pci_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ unsigned int devfn;
+
+ if (node->length != 6)
+ return -EINVAL;
+ if (!parent)
+ return -EINVAL;
+
+ devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
+
+ *child = device_find_child(parent, &devfn, match_pci_dev);
+ if (!*child)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Insert parsers for further node types here.
+ *
+ * Each parser takes a pointer to the @node and to the @parent (will be NULL
+ * for the first device path node). If a device corresponding to @node was
+ * found below @parent, its reference count should be incremented and the
+ * device returned in @child.
+ *
+ * The return value should be 0 on success or a negative int on failure.
+ * The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF
+ * (EFI_DEV_END_ENTIRE) signal the end of the device path, only
+ * parse_end_path() is supposed to return this.
+ *
+ * Be sure to validate the node length and contents before commencing the
+ * search for a device.
+ */
+
+static long __init parse_end_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ if (node->length != 4)
+ return -EINVAL;
+ if (node->sub_type != EFI_DEV_END_INSTANCE &&
+ node->sub_type != EFI_DEV_END_ENTIRE)
+ return -EINVAL;
+ if (!parent)
+ return -ENODEV;
+
+ *child = get_device(parent);
+ return node->sub_type;
+}
+
+/**
+ * efi_get_device_by_path - find device by EFI Device Path
+ * @node: EFI Device Path
+ * @len: maximum length of EFI Device Path in bytes
+ *
+ * Parse a series of EFI Device Path nodes at @node and find the corresponding
+ * device. If the device was found, its reference count is incremented and a
+ * pointer to it is returned. The caller needs to drop the reference with
+ * put_device() after use. The @node pointer is updated to point to the
+ * location immediately after the "End of Hardware Device Path" node.
+ *
+ * If another Device Path instance follows, @len is decremented by the number
+ * of bytes consumed. Otherwise @len is set to %0.
+ *
+ * If a Device Path node is malformed or its corresponding device is not found,
+ * @node is updated to point to this offending node and an ERR_PTR is returned.
+ *
+ * If @len is initially %0, the function returns %NULL. Thus, to iterate over
+ * all instances in a path, the following idiom may be used:
+ *
+ * while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) {
+ * // do something with dev
+ * put_device(dev);
+ * }
+ * if (IS_ERR(dev))
+ * // report error
+ *
+ * Devices can only be found if they're already instantiated. Most buses
+ * instantiate devices in the "subsys" initcall level, hence the earliest
+ * initcall level in which this function should be called is "fs".
+ *
+ * Returns the device on success or
+ * %ERR_PTR(-ENODEV) if no device was found,
+ * %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
+ * %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
+ */
+struct device * __init efi_get_device_by_path(struct efi_dev_path **node,
+ size_t *len)
+{
+ struct device *parent = NULL, *child;
+ long ret = 0;
+
+ if (!*len)
+ return NULL;
+
+ while (!ret) {
+ if (*len < 4 || *len < (*node)->length)
+ ret = -EINVAL;
+ else if ((*node)->type == EFI_DEV_ACPI &&
+ (*node)->sub_type == EFI_DEV_BASIC_ACPI)
+ ret = parse_acpi_path(*node, parent, &child);
+ else if ((*node)->type == EFI_DEV_HW &&
+ (*node)->sub_type == EFI_DEV_PCI)
+ ret = parse_pci_path(*node, parent, &child);
+ else if (((*node)->type == EFI_DEV_END_PATH ||
+ (*node)->type == EFI_DEV_END_PATH2))
+ ret = parse_end_path(*node, parent, &child);
+ else
+ ret = -ENOTSUPP;
+
+ put_device(parent);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ parent = child;
+ *node = (void *)*node + (*node)->length;
+ *len -= (*node)->length;
+ }
+
+ if (ret == EFI_DEV_END_ENTIRE)
+ *len = 0;
+
+ return child;
+}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 1ac199cd75e7..92914801e388 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -23,7 +23,10 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/io.h>
+#include <linux/kexec.h>
#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/ucs2_string.h>
@@ -48,6 +51,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
+ .rng_seed = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -259,8 +263,10 @@ static __init int efivar_ssdt_load(void)
}
data = kmalloc(size, GFP_KERNEL);
- if (!data)
+ if (!data) {
+ ret = -ENOMEM;
goto free_entry;
+ }
ret = efivar_entry_get(entry, NULL, &size, data);
if (ret) {
@@ -438,6 +444,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
{NULL_GUID, NULL, NULL},
};
@@ -499,6 +506,29 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
pr_cont("\n");
set_bit(EFI_CONFIG_TABLES, &efi.flags);
+ if (efi.rng_seed != EFI_INVALID_TABLE_ADDR) {
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ seed = early_memremap(efi.rng_seed, sizeof(*seed));
+ if (seed != NULL) {
+ size = seed->size;
+ early_memunmap(seed, sizeof(*seed));
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = early_memremap(efi.rng_seed,
+ sizeof(*seed) + size);
+ if (seed != NULL) {
+ add_device_randomness(seed->bits, seed->size);
+ early_memunmap(seed, sizeof(*seed) + size);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ }
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
@@ -822,3 +852,47 @@ int efi_status_to_err(efi_status_t status)
return err;
}
+
+#ifdef CONFIG_KEXEC
+static int update_efi_random_seed(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ if (!kexec_in_progress)
+ return NOTIFY_DONE;
+
+ seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB);
+ if (seed != NULL) {
+ size = min(seed->size, 32U);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = memremap(efi.rng_seed, sizeof(*seed) + size,
+ MEMREMAP_WB);
+ if (seed != NULL) {
+ seed->size = size;
+ get_random_bytes(seed->bits, seed->size);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efi_random_seed_nb = {
+ .notifier_call = update_efi_random_seed,
+};
+
+static int register_update_efi_random_seed(void)
+{
+ if (efi.rng_seed == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ return register_reboot_notifier(&efi_random_seed_nb);
+}
+late_initcall(register_update_efi_random_seed);
+#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 5e23e2d305e7..d564d25df8ab 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -6,7 +6,7 @@
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
@@ -36,11 +36,11 @@ arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
+lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64-stub.o random.o
+lib-$(CONFIG_ARM64) += arm64-stub.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 993aa56755f6..b4f7d78f9e8b 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -340,6 +340,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS)
pr_efi_err(sys_table, "Failed initrd from command line!\n");
+ efi_random_get_seed(sys_table);
+
new_fdt_addr = fdt_addr;
status = allocate_new_fdt_and_exit_boot(sys_table, handle,
&new_fdt_addr, dram_base + MAX_FDT_OFFSET,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index aded10662020..757badc1debb 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,15 +32,6 @@
static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
-/*
- * Allow the platform to override the allocation granularity: this allows
- * systems that have the capability to run with a larger page size to deal
- * with the allocations for initrd and fdt more efficiently.
- */
-#ifndef EFI_ALLOC_ALIGN
-#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
-#endif
-
#define EFI_MMAP_NR_SLACK_SLOTS 8
struct file_info {
@@ -186,14 +177,16 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
goto fail;
/*
- * Enforce minimum alignment that EFI requires when requesting
- * a specific address. We are doing page-based allocations,
- * so we must be aligned to a page.
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
*/
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
again:
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
@@ -208,7 +201,7 @@ again:
continue;
start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
if (end > max)
end = max;
@@ -278,14 +271,16 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
goto fail;
/*
- * Enforce minimum alignment that EFI requires when requesting
- * a specific address. We are doing page-based allocations,
- * so we must be aligned to a page.
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
*/
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
unsigned long m = (unsigned long)map;
@@ -300,7 +295,7 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
continue;
start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
/*
* Don't allocate at 0x0. It will confuse code that
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index ee49cd23ee63..b98824e3800a 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -15,6 +15,15 @@
*/
#undef __init
+/*
+ * Allow the platform to override the allocation granularity: this allows
+ * systems that have the capability to run with a larger page size to deal
+ * with the allocations for initrd and fdt more efficiently.
+ */
+#ifndef EFI_ALLOC_ALIGN
+#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+#endif
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
@@ -62,4 +71,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+
#endif
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 0c9f58c5ba50..7e72954d5860 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -8,6 +8,7 @@
*/
#include <linux/efi.h>
+#include <linux/log2.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -41,21 +42,23 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
*/
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
- unsigned long align)
+ unsigned long align_shift)
{
- u64 start, end;
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
- start = round_up(md->phys_addr, align);
- end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
- align);
+ region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
- if (start > end)
+ first_slot = round_up(md->phys_addr, align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
return 0;
- return (end - start + 1) / align;
+ return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
}
/*
@@ -98,7 +101,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_memory_desc_t *md = (void *)memory_map + map_offset;
unsigned long slots;
- slots = get_entry_num_slots(md, size, align);
+ slots = get_entry_num_slots(md, size, ilog2(align));
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
}
@@ -141,3 +144,51 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
return status;
}
+
+#define RANDOM_SEED_SIZE 32
+
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
+ efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
+ struct efi_rng_protocol *rng;
+ struct linux_efi_random_seed *seed;
+ efi_status_t status;
+
+ status = efi_call_early(locate_protocol, &rng_proto, NULL,
+ (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*seed) + RANDOM_SEED_SIZE,
+ (void **)&seed);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = rng->get_rng(rng, &rng_algo_raw, RANDOM_SEED_SIZE,
+ seed->bits);
+ if (status == EFI_UNSUPPORTED)
+ /*
+ * Use whatever algorithm we have available if the raw algorithm
+ * is not implemented.
+ */
+ status = rng->get_rng(rng, NULL, RANDOM_SEED_SIZE,
+ seed->bits);
+
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ seed->size = RANDOM_SEED_SIZE;
+ status = efi_call_early(install_configuration_table, &rng_table_guid,
+ seed);
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ return EFI_SUCCESS;
+
+err_freepool:
+ efi_call_early(free_pool, seed);
+ return status;
+}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index f61bb52be318..8cd578f62059 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -8,7 +8,6 @@
*
*/
-#include <linux/version.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -156,7 +155,7 @@ static long efi_runtime_get_variable(unsigned long arg)
{
struct efi_getvariable __user *getvariable_user;
struct efi_getvariable getvariable;
- unsigned long datasize, prev_datasize, *dz;
+ unsigned long datasize = 0, prev_datasize, *dz;
efi_guid_t vendor_guid, *vd = NULL;
efi_status_t status;
efi_char16_t *name = NULL;
@@ -266,14 +265,10 @@ static long efi_runtime_set_variable(unsigned long arg)
return rv;
}
- data = kmalloc(setvariable.data_size, GFP_KERNEL);
- if (!data) {
+ data = memdup_user(setvariable.data, setvariable.data_size);
+ if (IS_ERR(data)) {
kfree(name);
- return -ENOMEM;
- }
- if (copy_from_user(data, setvariable.data, setvariable.data_size)) {
- rv = -EFAULT;
- goto out;
+ return PTR_ERR(data);
}
status = efi.set_variable(name, &vendor_guid,
@@ -429,7 +424,7 @@ static long efi_runtime_get_nextvariablename(unsigned long arg)
efi_guid_t *vd = NULL;
efi_guid_t vendor_guid;
efi_char16_t *name = NULL;
- int rv;
+ int rv = 0;
getnextvariablename_user = (struct efi_getnextvariablename __user *)arg;
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8263429e21b8..6c60a5087caf 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -630,7 +630,7 @@ int __init psci_dt_init(void)
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
- if (!np)
+ if (!np || !of_device_is_available(np))
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
new file mode 100644
index 000000000000..44bdb78f837b
--- /dev/null
+++ b/drivers/firmware/psci_checker.c
@@ -0,0 +1,490 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2016 ARM Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/topology.h>
+
+#include <asm/cpuidle.h>
+
+#include <uapi/linux/psci.h>
+
+#define NUM_SUSPEND_CYCLE (10)
+
+static unsigned int nb_available_cpus;
+static int tos_resident_cpu = -1;
+
+static atomic_t nb_active_threads;
+static struct completion suspend_threads_started =
+ COMPLETION_INITIALIZER(suspend_threads_started);
+static struct completion suspend_threads_done =
+ COMPLETION_INITIALIZER(suspend_threads_done);
+
+/*
+ * We assume that PSCI operations are used if they are available. This is not
+ * necessarily true on arm64, since the decision is based on the
+ * "enable-method" property of each CPU in the DT, but given that there is no
+ * arch-specific way to check this, we assume that the DT is sensible.
+ */
+static int psci_ops_check(void)
+{
+ int migrate_type = -1;
+ int cpu;
+
+ if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
+ pr_warn("Missing PSCI operations, aborting tests\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (psci_ops.migrate_info_type)
+ migrate_type = psci_ops.migrate_info_type();
+
+ if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
+ migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ /* There is a UP Trusted OS, find on which core it resides. */
+ for_each_online_cpu(cpu)
+ if (psci_tos_resident_on(cpu)) {
+ tos_resident_cpu = cpu;
+ break;
+ }
+ if (tos_resident_cpu == -1)
+ pr_warn("UP Trusted OS resides on no online CPU\n");
+ }
+
+ return 0;
+}
+
+static int find_clusters(const struct cpumask *cpus,
+ const struct cpumask **clusters)
+{
+ unsigned int nb = 0;
+ cpumask_var_t tmp;
+
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(tmp, cpus);
+
+ while (!cpumask_empty(tmp)) {
+ const struct cpumask *cluster =
+ topology_core_cpumask(cpumask_any(tmp));
+
+ clusters[nb++] = cluster;
+ cpumask_andnot(tmp, tmp, cluster);
+ }
+
+ free_cpumask_var(tmp);
+ return nb;
+}
+
+/*
+ * offlined_cpus is a temporary array but passing it as an argument avoids
+ * multiple allocations.
+ */
+static unsigned int down_and_up_cpus(const struct cpumask *cpus,
+ struct cpumask *offlined_cpus)
+{
+ int cpu;
+ int err = 0;
+
+ cpumask_clear(offlined_cpus);
+
+ /* Try to power down all CPUs in the mask. */
+ for_each_cpu(cpu, cpus) {
+ int ret = cpu_down(cpu);
+
+ /*
+ * cpu_down() checks the number of online CPUs before the TOS
+ * resident CPU.
+ */
+ if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+ if (ret != -EBUSY) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down last online CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (cpu == tos_resident_cpu) {
+ if (ret != -EPERM) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down TOS resident CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power down CPU %d\n", ret, cpu);
+ ++err;
+ }
+
+ if (ret == 0)
+ cpumask_set_cpu(cpu, offlined_cpus);
+ }
+
+ /* Try to power up all the CPUs that have been offlined. */
+ for_each_cpu(cpu, offlined_cpus) {
+ int ret = cpu_up(cpu);
+
+ if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power up CPU %d\n", ret, cpu);
+ ++err;
+ } else {
+ cpumask_clear_cpu(cpu, offlined_cpus);
+ }
+ }
+
+ /*
+ * Something went bad at some point and some CPUs could not be turned
+ * back on.
+ */
+ WARN_ON(!cpumask_empty(offlined_cpus) ||
+ num_online_cpus() != nb_available_cpus);
+
+ return err;
+}
+
+static int hotplug_tests(void)
+{
+ int err;
+ cpumask_var_t offlined_cpus;
+ int i, nb_cluster;
+ const struct cpumask **clusters;
+ char *page_buf;
+
+ err = -ENOMEM;
+ if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
+ return err;
+ /* We may have up to nb_available_cpus clusters. */
+ clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
+ GFP_KERNEL);
+ if (!clusters)
+ goto out_free_cpus;
+ page_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!page_buf)
+ goto out_free_clusters;
+
+ err = 0;
+ nb_cluster = find_clusters(cpu_online_mask, clusters);
+
+ /*
+ * Of course the last CPU cannot be powered down and cpu_down() should
+ * refuse doing that.
+ */
+ pr_info("Trying to turn off and on again all CPUs\n");
+ err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+
+ /*
+ * Take down CPUs by cluster this time. When the last CPU is turned
+ * off, the cluster itself should shut down.
+ */
+ for (i = 0; i < nb_cluster; ++i) {
+ int cluster_id =
+ topology_physical_package_id(cpumask_any(clusters[i]));
+ ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
+ clusters[i]);
+ /* Remove trailing newline. */
+ page_buf[len - 1] = '\0';
+ pr_info("Trying to turn off and on again cluster %d "
+ "(CPUs %s)\n", cluster_id, page_buf);
+ err += down_and_up_cpus(clusters[i], offlined_cpus);
+ }
+
+ free_page((unsigned long)page_buf);
+out_free_clusters:
+ kfree(clusters);
+out_free_cpus:
+ free_cpumask_var(offlined_cpus);
+ return err;
+}
+
+static void dummy_callback(unsigned long ignored) {}
+
+static int suspend_cpu(int index, bool broadcast)
+{
+ int ret;
+
+ arch_cpu_idle_enter();
+
+ if (broadcast) {
+ /*
+ * The local timer will be shut down, we need to enter tick
+ * broadcast.
+ */
+ ret = tick_broadcast_enter();
+ if (ret) {
+ /*
+ * In the absence of hardware broadcast mechanism,
+ * this CPU might be used to broadcast wakeups, which
+ * may be why entering tick broadcast has failed.
+ * There is little the kernel can do to work around
+ * that, so enter WFI instead (idle state 0).
+ */
+ cpu_do_idle();
+ ret = 0;
+ goto out_arch_exit;
+ }
+ }
+
+ /*
+ * Replicate the common ARM cpuidle enter function
+ * (arm_enter_idle_state).
+ */
+ ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+
+ if (broadcast)
+ tick_broadcast_exit();
+
+out_arch_exit:
+ arch_cpu_idle_exit();
+
+ return ret;
+}
+
+static int suspend_test_thread(void *arg)
+{
+ int cpu = (long)arg;
+ int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
+ struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv;
+ /* No need for an actual callback, we just want to wake up the CPU. */
+ struct timer_list wakeup_timer =
+ TIMER_INITIALIZER(dummy_callback, 0, 0);
+
+ /* Wait for the main thread to give the start signal. */
+ wait_for_completion(&suspend_threads_started);
+
+ /* Set maximum priority to preempt all other threads on this CPU. */
+ if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
+ pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+ cpu);
+
+ dev = this_cpu_read(cpuidle_devices);
+ drv = cpuidle_get_cpu_driver(dev);
+
+ pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
+ cpu, drv->state_count - 1);
+
+ for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
+ int index;
+ /*
+ * Test all possible states, except 0 (which is usually WFI and
+ * doesn't use PSCI).
+ */
+ for (index = 1; index < drv->state_count; ++index) {
+ struct cpuidle_state *state = &drv->states[index];
+ bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
+ int ret;
+
+ /*
+ * Set the timer to wake this CPU up in some time (which
+ * should be largely sufficient for entering suspend).
+ * If the local tick is disabled when entering suspend,
+ * suspend_cpu() takes care of switching to a broadcast
+ * tick, so the timer will still wake us up.
+ */
+ mod_timer(&wakeup_timer, jiffies +
+ usecs_to_jiffies(state->target_residency));
+
+ /* IRQs must be disabled during suspend operations. */
+ local_irq_disable();
+
+ ret = suspend_cpu(index, broadcast);
+
+ /*
+ * We have woken up. Re-enable IRQs to handle any
+ * pending interrupt, do not wait until the end of the
+ * loop.
+ */
+ local_irq_enable();
+
+ if (ret == index) {
+ ++nb_suspend;
+ } else if (ret >= 0) {
+ /* We did not enter the expected state. */
+ ++nb_shallow_sleep;
+ } else {
+ pr_err("Failed to suspend CPU %d: error %d "
+ "(requested state %d, cycle %d)\n",
+ cpu, ret, index, i);
+ ++nb_err;
+ }
+ }
+ }
+
+ /*
+ * Disable the timer to make sure that the timer will not trigger
+ * later.
+ */
+ del_timer(&wakeup_timer);
+
+ if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+ complete(&suspend_threads_done);
+
+ /* Give up on RT scheduling and wait for termination. */
+ sched_priority.sched_priority = 0;
+ if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
+ pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+ cpu);
+ for (;;) {
+ /* Needs to be set first to avoid missing a wakeup. */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
+ schedule();
+ }
+
+ pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+ cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
+ return nb_err;
+}
+
+static int suspend_tests(void)
+{
+ int i, cpu, err = 0;
+ struct task_struct **threads;
+ int nb_threads = 0;
+
+ threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
+ GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ /*
+ * Stop cpuidle to prevent the idle tasks from entering a deep sleep
+ * mode, as it might interfere with the suspend threads on other CPUs.
+ * This does not prevent the suspend threads from using cpuidle (only
+ * the idle tasks check this status). Take the idle lock so that
+ * the cpuidle driver and device look-up can be carried out safely.
+ */
+ cpuidle_pause_and_lock();
+
+ for_each_online_cpu(cpu) {
+ struct task_struct *thread;
+ /* Check that cpuidle is available on that CPU. */
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+ if (!dev || !drv) {
+ pr_warn("cpuidle not available on CPU %d, ignoring\n",
+ cpu);
+ continue;
+ }
+
+ thread = kthread_create_on_cpu(suspend_test_thread,
+ (void *)(long)cpu, cpu,
+ "psci_suspend_test");
+ if (IS_ERR(thread))
+ pr_err("Failed to create kthread on CPU %d\n", cpu);
+ else
+ threads[nb_threads++] = thread;
+ }
+
+ if (nb_threads < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ atomic_set(&nb_active_threads, nb_threads);
+
+ /*
+ * Wake up the suspend threads. To avoid the main thread being preempted
+ * before all the threads have been unparked, the suspend threads will
+ * wait for the completion of suspend_threads_started.
+ */
+ for (i = 0; i < nb_threads; ++i)
+ wake_up_process(threads[i]);
+ complete_all(&suspend_threads_started);
+
+ wait_for_completion(&suspend_threads_done);
+
+
+ /* Stop and destroy all threads, get return status. */
+ for (i = 0; i < nb_threads; ++i)
+ err += kthread_stop(threads[i]);
+ out:
+ cpuidle_resume_and_unlock();
+ kfree(threads);
+ return err;
+}
+
+static int __init psci_checker(void)
+{
+ int ret;
+
+ /*
+ * Since we're in an initcall, we assume that all the CPUs that all
+ * CPUs that can be onlined have been onlined.
+ *
+ * The tests assume that hotplug is enabled but nobody else is using it,
+ * otherwise the results will be unpredictable. However, since there
+ * is no userspace yet in initcalls, that should be fine, as long as
+ * no torture test is running at the same time (see Kconfig).
+ */
+ nb_available_cpus = num_online_cpus();
+
+ /* Check PSCI operations are set up and working. */
+ ret = psci_ops_check();
+ if (ret)
+ return ret;
+
+ pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
+
+ pr_info("Starting hotplug tests\n");
+ ret = hotplug_tests();
+ if (ret == 0)
+ pr_info("Hotplug tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in hotplug tests\n", ret);
+ else {
+ pr_err("Out of memory\n");
+ return ret;
+ }
+
+ pr_info("Starting suspend tests (%d cycles per state)\n",
+ NUM_SUSPEND_CYCLE);
+ ret = suspend_tests();
+ if (ret == 0)
+ pr_info("Suspend tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in suspend tests\n", ret);
+ else {
+ switch (ret) {
+ case -ENOMEM:
+ pr_err("Out of memory\n");
+ break;
+ case -ENODEV:
+ pr_warn("Could not start suspend tests on any CPU\n");
+ break;
+ }
+ }
+
+ pr_info("PSCI checker completed\n");
+ return ret < 0 ? ret : 0;
+}
+late_initcall(psci_checker);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index d95c70227c05..893f953eaccf 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -28,6 +28,10 @@
#include "qcom_scm.h"
+#define SCM_HAS_CORE_CLK BIT(0)
+#define SCM_HAS_IFACE_CLK BIT(1)
+#define SCM_HAS_BUS_CLK BIT(2)
+
struct qcom_scm {
struct device *dev;
struct clk *core_clk;
@@ -323,32 +327,40 @@ EXPORT_SYMBOL(qcom_scm_is_available);
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
+ unsigned long clks;
int ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
return -ENOMEM;
- scm->core_clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(scm->core_clk)) {
- if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+ clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (clks & SCM_HAS_CORE_CLK) {
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to acquire core clk\n");
return PTR_ERR(scm->core_clk);
-
- scm->core_clk = NULL;
+ }
}
- if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
+ if (clks & SCM_HAS_IFACE_CLK) {
scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(scm->iface_clk)) {
if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire iface clk\n");
+ dev_err(&pdev->dev,
+ "failed to acquire iface clk\n");
return PTR_ERR(scm->iface_clk);
}
+ }
+ if (clks & SCM_HAS_BUS_CLK) {
scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (IS_ERR(scm->bus_clk)) {
if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire bus clk\n");
+ dev_err(&pdev->dev,
+ "failed to acquire bus clk\n");
return PTR_ERR(scm->bus_clk);
}
}
@@ -356,7 +368,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
scm->reset.ops = &qcom_scm_pas_reset_ops;
scm->reset.nr_resets = 1;
scm->reset.of_node = pdev->dev.of_node;
- reset_controller_register(&scm->reset);
+ ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+ if (ret)
+ return ret;
/* vote for max clk rate for highest performance */
ret = clk_set_rate(scm->core_clk, INT_MAX);
@@ -372,10 +386,23 @@ static int qcom_scm_probe(struct platform_device *pdev)
}
static const struct of_device_id qcom_scm_dt_match[] = {
- { .compatible = "qcom,scm-apq8064",},
- { .compatible = "qcom,scm-msm8660",},
- { .compatible = "qcom,scm-msm8960",},
- { .compatible = "qcom,scm",},
+ { .compatible = "qcom,scm-apq8064",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8660",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8960",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8996",
+ .data = NULL, /* no clocks */
+ },
+ { .compatible = "qcom,scm",
+ .data = (void *)(SCM_HAS_CORE_CLK
+ | SCM_HAS_IFACE_CLK
+ | SCM_HAS_BUS_CLK),
+ },
{}
};
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 000000000000..ff2730d5c468
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,25 @@
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+ bool "Tegra IVC protocol"
+ depends on ARCH_TEGRA
+ help
+ IVC (Inter-VM Communication) protocol is part of the IPC
+ (Inter Processor Communication) framework on Tegra. It maintains the
+ data and the different commuication channels in SysRAM or RAM and
+ keeps the content is synchronization between host CPU and remote
+ processors.
+
+config TEGRA_BPMP
+ bool "Tegra BPMP driver"
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ help
+ BPMP (Boot and Power Management Processor) is designed to off-loading
+ the PM functions which include clock/DVFS/thermal/power from the CPU.
+ It needs HSP as the HW synchronization and notification module and
+ IVC module as the message communication protocol.
+
+ This driver manages the IPC interface between host CPU and the
+ firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 000000000000..e34a2f79e1ad
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TEGRA_BPMP) += bpmp.o
+obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 000000000000..4ff02d310868
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#define MSG_ACK BIT(0)
+#define MSG_RING BIT(1)
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_bpmp *bpmp;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
+
+ bpmp = platform_get_drvdata(pdev);
+ if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
+ put_device(&pdev->dev);
+ goto put;
+ }
+
+put:
+ of_node_put(np);
+ return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+ if (bpmp)
+ put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel)
+{
+ return channel - channel->bpmp->channels;
+}
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned int offset, count;
+ int index;
+
+ offset = bpmp->soc->channels.thread.offset;
+ count = bpmp->soc->channels.thread.count;
+
+ index = tegra_bpmp_channel_get_index(channel);
+ if (index < 0)
+ return index;
+
+ if (index < offset || index >= offset + count)
+ return -EINVAL;
+
+ return index - offset;
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ unsigned int offset = bpmp->soc->channels.thread.offset;
+ unsigned int count = bpmp->soc->channels.thread.count;
+
+ if (index >= count)
+ return NULL;
+
+ return &bpmp->channels[offset + index];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_tx.offset;
+
+ return &bpmp->channels[offset + smp_processor_id()];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_rx.offset;
+
+ return &bpmp->channels[offset];
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+ return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->tx.size == 0 || msg->tx.data) &&
+ (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t end;
+
+ end = ktime_add_us(ktime_get(), timeout);
+
+ do {
+ if (tegra_bpmp_master_acked(channel))
+ return 0;
+ } while (ktime_before(ktime_get(), end));
+
+ return -ETIMEDOUT;
+}
+
+static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t start, now;
+
+ start = ns_to_ktime(local_clock());
+
+ do {
+ if (tegra_bpmp_master_free(channel))
+ return 0;
+
+ now = ns_to_ktime(local_clock());
+ } while (ktime_us_delta(now, start) < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ if (data && size > 0)
+ memcpy(data, channel->ib->data, size);
+
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned long flags;
+ ssize_t err;
+ int index;
+
+ index = tegra_bpmp_channel_get_thread_index(channel);
+ if (index < 0)
+ return index;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+ err = __tegra_bpmp_channel_read(channel, data, size);
+ clear_bit(index, bpmp->threaded.allocated);
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ up(&bpmp->threaded.lock);
+
+ return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ channel->ob->code = mrq;
+ channel->ob->flags = flags;
+
+ if (data && size > 0)
+ memcpy(channel->ob->data, data, size);
+
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+ const void *data, size_t size)
+{
+ unsigned long timeout = bpmp->soc->channels.thread.timeout;
+ unsigned int count = bpmp->soc->channels.thread.count;
+ struct tegra_bpmp_channel *channel;
+ unsigned long flags;
+ unsigned int index;
+ int err;
+
+ err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+ if (err < 0)
+ return ERR_PTR(err);
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ index = find_first_zero_bit(bpmp->threaded.allocated, count);
+ if (index == count) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, index);
+ if (!channel) {
+ channel = ERR_PTR(-EINVAL);
+ goto unlock;
+ }
+
+ if (!tegra_bpmp_master_free(channel)) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.allocated);
+
+ err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+ data, size);
+ if (err < 0) {
+ clear_bit(index, bpmp->threaded.allocated);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.busy);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ return channel;
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ int err;
+
+ err = tegra_bpmp_wait_master_free(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ int err;
+
+ if (WARN_ON(!irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_channel_get_tx(bpmp);
+
+ err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+ msg->tx.data, msg->tx.size);
+ if (err < 0)
+ return err;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ err = tegra_bpmp_wait_ack(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned long timeout;
+ int err;
+
+ if (WARN_ON(irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+ err = wait_for_completion_timeout(&channel->completion, timeout);
+ if (err == 0)
+ return -ETIMEDOUT;
+
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq)
+{
+ struct tegra_bpmp_mrq *entry;
+
+ list_for_each_entry(entry, &bpmp->mrqs, list)
+ if (entry->mrq == mrq)
+ return entry;
+
+ return NULL;
+}
+
+static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
+ int code, const void *data, size_t size)
+{
+ unsigned long flags = channel->ib->flags;
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ struct tegra_bpmp_mb_data *frame;
+ int err;
+
+ if (WARN_ON(size > MSG_DATA_MIN_SZ))
+ return;
+
+ err = tegra_ivc_read_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if ((flags & MSG_ACK) == 0)
+ return;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (WARN_ON(IS_ERR(frame)))
+ return;
+
+ frame->code = code;
+
+ if (data && size > 0)
+ memcpy(frame->data, data, size);
+
+ err = tegra_ivc_write_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if (flags & MSG_RING) {
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (WARN_ON(err < 0))
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+ }
+}
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq,
+ struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp_mrq *entry;
+ u32 zero = 0;
+
+ spin_lock(&bpmp->lock);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry) {
+ spin_unlock(&bpmp->lock);
+ tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+ return;
+ }
+
+ entry->handler(mrq, channel, entry->data);
+
+ spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ if (!handler)
+ return -EINVAL;
+
+ entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry->mrq = mrq;
+ entry->handler = handler;
+ entry->data = data;
+ list_add(&entry->list, &bpmp->mrqs);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry)
+ goto unlock;
+
+ list_del(&entry->list);
+ devm_kfree(bpmp->dev, entry);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data)
+{
+ struct mrq_ping_request *request;
+ struct mrq_ping_response response;
+
+ request = (struct mrq_ping_request *)channel->ib->data;
+
+ memset(&response, 0, sizeof(response));
+ response.reply = request->challenge << 1;
+
+ tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+ struct mrq_ping_response response;
+ struct mrq_ping_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ ktime_t start, end;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.challenge = 1;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PING;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ local_irq_save(flags);
+ start = ktime_get();
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ end = ktime_get();
+ local_irq_restore(flags);
+
+ if (!err)
+ dev_dbg(bpmp->dev,
+ "ping ok: challenge: %u, response: %u, time: %lld\n",
+ request.challenge, response.reply,
+ ktime_to_us(ktime_sub(end, start)));
+
+ return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ struct mrq_query_tag_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ dma_addr_t phys;
+ void *virt;
+ int err;
+
+ virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+ request.addr = phys;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_QUERY_TAG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ local_irq_save(flags);
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ local_irq_restore(flags);
+
+ if (err == 0)
+ strlcpy(tag, virt, size);
+
+ dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
+
+ return err;
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+ unsigned long flags = channel->ob->flags;
+
+ if ((flags & MSG_RING) == 0)
+ return;
+
+ complete(&channel->completion);
+}
+
+static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+ struct tegra_bpmp_channel *channel;
+ unsigned int i, count;
+ unsigned long *busy;
+
+ channel = tegra_bpmp_channel_get_rx(bpmp);
+ count = bpmp->soc->channels.thread.count;
+ busy = bpmp->threaded.busy;
+
+ if (tegra_bpmp_master_acked(channel))
+ tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
+
+ spin_lock(&bpmp->lock);
+
+ for_each_set_bit(i, busy, count) {
+ struct tegra_bpmp_channel *channel;
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, i);
+ if (!channel)
+ continue;
+
+ if (tegra_bpmp_master_acked(channel)) {
+ tegra_bpmp_channel_signal(channel);
+ clear_bit(i, busy);
+ }
+ }
+
+ spin_unlock(&bpmp->lock);
+}
+
+static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ int err;
+
+ if (WARN_ON(bpmp->mbox.channel == NULL))
+ return;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+}
+
+static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ bpmp->rx.virt + offset, bpmp->rx.phys + offset,
+ bpmp->tx.virt + offset, bpmp->tx.phys + offset,
+ 1, message_size, tegra_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp_channel *channel;
+ struct tegra_bpmp *bpmp;
+ unsigned int i;
+ char tag[32];
+ size_t size;
+ int err;
+
+ bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+ if (!bpmp)
+ return -ENOMEM;
+
+ bpmp->soc = of_device_get_match_data(&pdev->dev);
+ bpmp->dev = &pdev->dev;
+
+ bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
+ if (!bpmp->tx.pool) {
+ dev_err(&pdev->dev, "TX shmem pool not found\n");
+ return -ENOMEM;
+ }
+
+ bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
+ if (!bpmp->tx.virt) {
+ dev_err(&pdev->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "RX shmem pool not found\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ INIT_LIST_HEAD(&bpmp->mrqs);
+ spin_lock_init(&bpmp->lock);
+
+ bpmp->threaded.count = bpmp->soc->channels.thread.count;
+ sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+ size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+ bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.allocated) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.busy) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->num_channels = bpmp->soc->channels.cpu_tx.count +
+ bpmp->soc->channels.thread.count +
+ bpmp->soc->channels.cpu_rx.count;
+
+ bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels,
+ sizeof(*channel), GFP_KERNEL);
+ if (!bpmp->channels) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ /* message channel initialization */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ err = tegra_bpmp_channel_init(channel, bpmp, i);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ bpmp->mbox.client.dev = &pdev->dev;
+ bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
+ bpmp->mbox.client.tx_block = false;
+ bpmp->mbox.client.knows_txdone = false;
+
+ bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
+ if (IS_ERR(bpmp->mbox.channel)) {
+ err = PTR_ERR(bpmp->mbox.channel);
+ dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ /* reset message channels */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ tegra_bpmp_channel_reset(channel);
+ }
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+ tegra_bpmp_mrq_handle_ping, bpmp);
+ if (err < 0)
+ goto free_mbox;
+
+ err = tegra_bpmp_ping(bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+ goto free_mrq;
+ }
+
+ dev_info(&pdev->dev, "firmware: %s\n", tag);
+
+ err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ platform_set_drvdata(pdev, bpmp);
+
+ return 0;
+
+free_mrq:
+ tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+free_mbox:
+ mbox_free_channel(bpmp->mbox.channel);
+cleanup_channels:
+ while (i--)
+ tegra_bpmp_channel_cleanup(&bpmp->channels[i]);
+free_rx:
+ gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
+free_tx:
+ gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
+ return err;
+}
+
+static const struct tegra_bpmp_soc tegra186_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 6,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 6,
+ .count = 7,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 13,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .num_resets = 193,
+};
+
+static const struct of_device_id tegra_bpmp_match[] = {
+ { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+ { }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+ .driver = {
+ .name = "tegra-bpmp",
+ .of_match_table = tegra_bpmp_match,
+ },
+ .probe = tegra_bpmp_probe,
+};
+
+static int __init tegra_bpmp_init(void)
+{
+ return platform_driver_register(&tegra_bpmp_driver);
+}
+core_initcall(tegra_bpmp_init);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 000000000000..29ecfd815320
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+ /*
+ * This value is zero for backwards compatibility with services that
+ * assume channels to be initially zeroed. Such channels are in an
+ * initially valid state, but cannot be asynchronously reset, and must
+ * maintain a valid state at all times.
+ *
+ * The transmitting end can enter the established state from the sync or
+ * ack state when it observes the receiving endpoint in the ack or
+ * established state, indicating that has cleared the counters in our
+ * rx_channel.
+ */
+ TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+ /*
+ * If an endpoint is observed in the sync state, the remote endpoint is
+ * allowed to clear the counters it owns asynchronously with respect to
+ * the current endpoint. Therefore, the current endpoint is no longer
+ * allowed to communicate.
+ */
+ TEGRA_IVC_STATE_SYNC,
+
+ /*
+ * When the transmitting end observes the receiving end in the sync
+ * state, it can clear the w_count and r_count and transition to the ack
+ * state. If the remote endpoint observes us in the ack state, it can
+ * return to the established state once it has cleared its counters.
+ */
+ TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+ union {
+ struct {
+ /* fields owned by the transmitting end */
+ u32 count;
+ u32 state;
+ };
+
+ u8 pad[TEGRA_IVC_ALIGN];
+ } tx;
+
+ union {
+ /* fields owned by the receiving end */
+ u32 count;
+ u8 pad[TEGRA_IVC_ALIGN];
+ } rx;
+};
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ /*
+ * This function performs multiple checks on the same values with
+ * security implications, so create snapshots with ACCESS_ONCE() to
+ * ensure that these checks use the same values.
+ */
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Perform an over-full check to prevent denial of service attacks
+ * where a server could be easily fooled into believing that there's
+ * an extremely large number of frames ready, since receivers are not
+ * expected to check for full or over-full conditions.
+ *
+ * Although the channel isn't empty, this is an invalid case caused by
+ * a potentially malicious peer, so returning empty is safer, because
+ * it gives the impression that the channel has gone silent.
+ */
+ if (tx - rx > ivc->num_frames)
+ return true;
+
+ return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Invalid cases where the counters indicate that the queue is over
+ * capacity also appear full.
+ */
+ return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * This function isn't expected to be used in scenarios where an
+ * over-full situation can lead to denial of service attacks. See the
+ * comment in tegra_ivc_empty() for an explanation about special
+ * over-full considerations.
+ */
+ return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->tx.channel->tx.count) =
+ ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+
+ if (ivc->tx.position == ivc->num_frames - 1)
+ ivc->tx.position = 0;
+ else
+ ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->rx.channel->rx.count) =
+ ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+
+ if (ivc->rx.position == ivc->num_frames - 1)
+ ivc->rx.position = 0;
+ else
+ ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * tx.channel->state is set locally, so it is not synchronized with
+ * state from the remote peer. The remote peer cannot reset its
+ * transmit counters until we've acknowledged its synchronization
+ * request, so no additional synchronization is required because an
+ * asynchronous transition of rx.channel->state to
+ * TEGRA_IVC_STATE_ACK is not allowed.
+ */
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ /*
+ * Avoid unnecessary invalidations when performing repeated accesses
+ * to an IVC channel by checking the old queue pointers first.
+ *
+ * Synchronization is only necessary when these pointers indicate
+ * empty or full.
+ */
+ if (!tegra_ivc_empty(ivc, ivc->rx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+ if (tegra_ivc_empty(ivc, ivc->rx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ if (!tegra_ivc_full(ivc, ivc->tx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+ if (tegra_ivc_full(ivc, ivc->tx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header,
+ unsigned int frame)
+{
+ if (WARN_ON(frame >= ivc->num_frames))
+ return ERR_PTR(-EINVAL);
+
+ return (void *)(header + 1) + ivc->frame_size * frame;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame)
+{
+ unsigned long offset;
+
+ offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+ return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ if (WARN_ON(ivc == NULL))
+ return ERR_PTR(-EINVAL);
+
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /*
+ * Order observation of ivc->rx.position potentially indicating new
+ * data before data read.
+ */
+ smp_rmb();
+
+ tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+ ivc->frame_size);
+
+ return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ int err;
+
+ /*
+ * No read barriers or synchronization here: the caller is expected to
+ * have already observed the channel non-empty. This check is just to
+ * catch programming errors.
+ */
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_advance_rx(ivc);
+
+ tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+ /*
+ * Ensure our write to ivc->rx.position occurs before our read from
+ * ivc->tx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from full to non-full. The available
+ * count can only asynchronously increase, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+ if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+ ivc->frame_size);
+
+ /*
+ * Order any possible stores to the frame before update of
+ * ivc->tx.position.
+ */
+ smp_wmb();
+
+ tegra_ivc_advance_tx(ivc);
+ tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+ /*
+ * Ensure our write to ivc->tx.position occurs before our read from
+ * ivc->rx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from empty to non-empty. The available
+ * count can only asynchronously decrease, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+ if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+ ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ * IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ * local remote action
+ * ----- ------ -----------------------------------
+ * SYNC EST <none>
+ * SYNC ACK reset counters; move to EST; notify
+ * SYNC SYNC reset counters; move to ACK; notify
+ * ACK EST move to EST; notify
+ * ACK ACK move to EST; notify
+ * ACK SYNC reset counters; move to ACK; notify
+ * EST EST <none>
+ * EST ACK <none>
+ * EST SYNC reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+ enum tegra_ivc_state state;
+
+ /* Copy the receiver's state out of shared memory. */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+ state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+
+ if (state == TEGRA_IVC_STATE_SYNC) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of TEGRA_IVC_STATE_SYNC before stores
+ * clearing tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the SYNC
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ACK state. We have just cleared our counters, so it
+ * is now safe for the remote end to start using these values.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
+ state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the ACK
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that the remote end has
+ * already cleared its counters, so it is safe to start
+ * writing/reading on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * At this point, we have observed the peer to be in either
+ * the ACK or ESTABLISHED state. Next, order observation of
+ * peer state before storing to tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that we have previously
+ * cleared our counters, and we know that the remote end has
+ * cleared its counters, so it is safe to start writing/reading
+ * on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else {
+ /*
+ * There is no need to handle any further action. Either the
+ * channel is already fully established, or we are waiting for
+ * the remote end to catch up with our current state. Refer
+ * to the diagram in "IVC State Transition Table" above.
+ */
+ }
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+ return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+ if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+ pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+ __func__, queue_size, TEGRA_IVC_ALIGN);
+ return 0;
+ }
+
+ return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+ unsigned int num_frames, size_t frame_size)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+ TEGRA_IVC_ALIGN));
+
+ if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+ pr_err("num_frames * frame_size overflows\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+ pr_err("frame size not adequately aligned: %zu\n", frame_size);
+ return -EINVAL;
+ }
+
+ /*
+ * The headers must at least be aligned enough for counters
+ * to be accessed atomically.
+ */
+ if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", rx);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", tx);
+ return -EINVAL;
+ }
+
+ if (rx < tx) {
+ if (rx + frame_size * num_frames > tx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ rx, frame_size * num_frames, tx);
+ return -EINVAL;
+ }
+ } else {
+ if (tx + frame_size * num_frames > rx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ tx, frame_size * num_frames, rx);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+ dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data)
+{
+ size_t queue_size;
+ int err;
+
+ if (WARN_ON(!ivc || !notify))
+ return -EINVAL;
+
+ /*
+ * All sizes that can be returned by communication functions should
+ * fit in an int.
+ */
+ if (frame_size > INT_MAX)
+ return -E2BIG;
+
+ err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
+ num_frames, frame_size);
+ if (err < 0)
+ return err;
+
+ queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+ if (peer) {
+ ivc->rx.phys = dma_map_single(peer, rx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->rx.phys == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ ivc->tx.phys = dma_map_single(peer, tx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->tx.phys == DMA_ERROR_CODE) {
+ dma_unmap_single(peer, ivc->rx.phys, queue_size,
+ DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+ } else {
+ ivc->rx.phys = rx_phys;
+ ivc->tx.phys = tx_phys;
+ }
+
+ ivc->rx.channel = rx;
+ ivc->tx.channel = tx;
+ ivc->peer = peer;
+ ivc->notify = notify;
+ ivc->notify_data = data;
+ ivc->frame_size = frame_size;
+ ivc->num_frames = num_frames;
+
+ /*
+ * These values aren't necessarily correct until the channel has been
+ * reset.
+ */
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+ if (ivc->peer) {
+ size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+ ivc->frame_size);
+
+ dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+ DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 000000000000..874ff32db366
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,1991 @@
+/*
+ * Texas Instruments System Control Interface Protocol Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/reboot.h>
+
+#include "ti_sci.h"
+
+/* List of all TI SCI devices active in system */
+static LIST_HEAD(ti_sci_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(ti_sci_list_mutex);
+
+/**
+ * struct ti_sci_xfer - Structure representing a message flow
+ * @tx_message: Transmit message
+ * @rx_len: Receive message length
+ * @xfer_buf: Preallocated buffer to store receive message
+ * Since we work with request-ACK protocol, we can
+ * reuse the same buffer for the rx path as we
+ * use for the tx path.
+ * @done: completion event
+ */
+struct ti_sci_xfer {
+ struct ti_msgmgr_message tx_message;
+ u8 rx_len;
+ u8 *xfer_buf;
+ struct completion done;
+};
+
+/**
+ * struct ti_sci_xfers_info - Structure to manage transfer information
+ * @sem_xfer_count: Counting Semaphore for managing max simultaneous
+ * Messages.
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct ti_sci_xfers_info {
+ struct semaphore sem_xfer_count;
+ struct ti_sci_xfer *xfer_block;
+ unsigned long *xfer_alloc_table;
+ /* protect transfer allocation */
+ spinlock_t xfer_lock;
+};
+
+/**
+ * struct ti_sci_desc - Description of SoC integration
+ * @host_id: Host identifier representing the compute entity
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msgs: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct ti_sci_desc {
+ u8 host_id;
+ int max_rx_timeout_ms;
+ int max_msgs;
+ int max_msg_size;
+};
+
+/**
+ * struct ti_sci_info - Structure representing a TI SCI instance
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @nb: Reboot Notifier block
+ * @d: Debugfs file entry
+ * @debug_region: Memory region where the debug message are available
+ * @debug_region_size: Debug region size
+ * @debug_buffer: Buffer allocated to copy debug messages.
+ * @handle: Instance of TI SCI handle to send to clients.
+ * @cl: Mailbox Client
+ * @chan_tx: Transmit mailbox channel
+ * @chan_rx: Receive mailbox channel
+ * @minfo: Message info
+ * @node: list head
+ * @users: Number of users of this instance
+ */
+struct ti_sci_info {
+ struct device *dev;
+ struct notifier_block nb;
+ const struct ti_sci_desc *desc;
+ struct dentry *d;
+ void __iomem *debug_region;
+ char *debug_buffer;
+ size_t debug_region_size;
+ struct ti_sci_handle handle;
+ struct mbox_client cl;
+ struct mbox_chan *chan_tx;
+ struct mbox_chan *chan_rx;
+ struct ti_sci_xfers_info minfo;
+ struct list_head node;
+ /* protected by ti_sci_list_mutex */
+ int users;
+
+};
+
+#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
+#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
+#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * ti_sci_debug_show() - Helper to dump the debug log
+ * @s: sequence file pointer
+ * @unused: unused.
+ *
+ * Return: 0
+ */
+static int ti_sci_debug_show(struct seq_file *s, void *unused)
+{
+ struct ti_sci_info *info = s->private;
+
+ memcpy_fromio(info->debug_buffer, info->debug_region,
+ info->debug_region_size);
+ /*
+ * We don't trust firmware to leave NULL terminated last byte (hence
+ * we have allocated 1 extra 0 byte). Since we cannot guarantee any
+ * specific data format for debug messages, We just present the data
+ * in the buffer as is - we expect the messages to be self explanatory.
+ */
+ seq_puts(s, info->debug_buffer);
+ return 0;
+}
+
+/**
+ * ti_sci_debug_open() - debug file open
+ * @inode: inode pointer
+ * @file: file pointer
+ *
+ * Return: result of single_open
+ */
+static int ti_sci_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ti_sci_debug_show, inode->i_private);
+}
+
+/* log file operations */
+static const struct file_operations ti_sci_debug_fops = {
+ .open = ti_sci_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * ti_sci_debugfs_create() - Create log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static int ti_sci_debugfs_create(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ char debug_name[50] = "ti_sci_debug@";
+
+ /* Debug region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "debug_messages");
+ info->debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->debug_region))
+ return 0;
+ info->debug_region_size = resource_size(res);
+
+ info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
+ sizeof(char), GFP_KERNEL);
+ if (!info->debug_buffer)
+ return -ENOMEM;
+ /* Setup NULL termination */
+ info->debug_buffer[info->debug_region_size] = 0;
+
+ info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+ sizeof(debug_name)),
+ 0444, NULL, info, &ti_sci_debug_fops);
+ if (IS_ERR(info->d))
+ return PTR_ERR(info->d);
+
+ dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
+ info->debug_region, info->debug_region_size, res);
+ return 0;
+}
+
+/**
+ * ti_sci_debugfs_destroy() - clean up log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ */
+static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ if (IS_ERR(info->debug_region))
+ return;
+
+ debugfs_remove(info->d);
+}
+#else /* CONFIG_DEBUG_FS */
+static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+ return 0;
+}
+
+static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ti_sci_dump_header_dbg() - Helper to dump a message header.
+ * @dev: Device pointer corresponding to the SCI entity
+ * @hdr: pointer to header.
+ */
+static inline void ti_sci_dump_header_dbg(struct device *dev,
+ struct ti_sci_msg_hdr *hdr)
+{
+ dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
+ hdr->type, hdr->host, hdr->seq, hdr->flags);
+}
+
+/**
+ * ti_sci_rx_callback() - mailbox client callback for receive messages
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
+{
+ struct ti_sci_info *info = cl_to_ti_sci_info(cl);
+ struct device *dev = info->dev;
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_msgmgr_message *mbox_msg = m;
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
+ struct ti_sci_xfer *xfer;
+ u8 xfer_id;
+
+ xfer_id = hdr->seq;
+
+ /*
+ * Are we even expecting this?
+ * NOTE: barriers were implicit in locks used for modifying the bitmap
+ */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "Message for %d is not expected!\n", xfer_id);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ /* Is the message of valid length? */
+ if (mbox_msg->len > info->desc->max_msg_size) {
+ dev_err(dev, "Unable to handle %d xfer(max %d)\n",
+ mbox_msg->len, info->desc->max_msg_size);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+ if (mbox_msg->len < xfer->rx_len) {
+ dev_err(dev, "Recv xfer %d < expected %d length\n",
+ mbox_msg->len, xfer->rx_len);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+
+ ti_sci_dump_header_dbg(dev, hdr);
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
+ complete(&xfer->done);
+}
+
+/**
+ * ti_sci_get_one_xfer() - Allocate one message
+ * @info: Pointer to SCI entity information
+ * @msg_type: Message type
+ * @msg_flags: Flag to set for the message
+ * @tx_message_size: transmit message size
+ * @rx_message_size: receive message size
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCI entity. Further, this also holds a spinlock to maintain integrity
+ * of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
+ u16 msg_type, u32 msg_flags,
+ size_t tx_message_size,
+ size_t rx_message_size)
+{
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_hdr *hdr;
+ unsigned long flags;
+ unsigned long bit_pos;
+ u8 xfer_id;
+ int ret;
+ int timeout;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_message_size > info->desc->max_msg_size ||
+ tx_message_size > info->desc->max_msg_size ||
+ rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
+ return ERR_PTR(-ERANGE);
+
+ /*
+ * Ensure we have only controlled number of pending messages.
+ * Ideally, we might just have to wait a single message, be
+ * conservative and wait 5 times that..
+ */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
+ ret = down_timeout(&minfo->sem_xfer_count, timeout);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Keep the locked section as small as possible */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+ info->desc->max_msgs);
+ set_bit(bit_pos, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /*
+ * We already ensured in probe that we can have max messages that can
+ * fit in hdr.seq - NOTE: this improves access latencies
+ * to predictable O(1) access, BUT, it opens us to risk if
+ * remote misbehaves with corrupted message sequence responses.
+ * If that happens, we are going to be messed up anyways..
+ */
+ xfer_id = (u8)bit_pos;
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer->tx_message.len = tx_message_size;
+ xfer->rx_len = (u8)rx_message_size;
+
+ reinit_completion(&xfer->done);
+
+ hdr->seq = xfer_id;
+ hdr->type = msg_type;
+ hdr->host = info->desc->host_id;
+ hdr->flags = msg_flags;
+
+ return xfer;
+}
+
+/**
+ * ti_sci_put_one_xfer() - Release a message
+ * @minfo: transfer info pointer
+ * @xfer: message that was reserved by ti_sci_get_one_xfer
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
+ struct ti_sci_xfer *xfer)
+{
+ unsigned long flags;
+ struct ti_sci_msg_hdr *hdr;
+ u8 xfer_id;
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer_id = hdr->seq;
+
+ /*
+ * Keep the locked section as small as possible
+ * NOTE: we might escape with smp_mb and no lock here..
+ * but just be conservative and symmetric.
+ */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ clear_bit(xfer_id, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /* Increment the count for the next user to get through */
+ up(&minfo->sem_xfer_count);
+}
+
+/**
+ * ti_sci_do_xfer() - Do one transfer
+ * @info: Pointer to SCI entity information
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ struct ti_sci_xfer *xfer)
+{
+ int ret;
+ int timeout;
+ struct device *dev = info->dev;
+
+ ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ dev_err(dev, "Mbox timedout in resp(caller: %pF)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(info->chan_tx, ret);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
+ * @info: Pointer to SCI entity information
+ *
+ * Updates the SCI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+{
+ struct device *dev = info->dev;
+ struct ti_sci_handle *handle = &info->handle;
+ struct ti_sci_version_info *ver = &handle->version;
+ struct ti_sci_msg_resp_version *rev_info;
+ struct ti_sci_xfer *xfer;
+ int ret;
+
+ /* No need to setup flags since it is expected to respond */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+ 0x0, sizeof(struct ti_sci_msg_hdr),
+ sizeof(*rev_info));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ ver->abi_major = rev_info->abi_major;
+ ver->abi_minor = rev_info->abi_minor;
+ ver->firmware_revision = rev_info->firmware_revision;
+ strncpy(ver->firmware_description, rev_info->firmware_description,
+ sizeof(ver->firmware_description));
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ return ret;
+}
+
+/**
+ * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
+ * @r: pointer to response buffer
+ *
+ * Return: true if the response was an ACK, else returns false.
+ */
+static inline bool ti_sci_is_response_ack(void *r)
+{
+ struct ti_sci_msg_hdr *hdr = r;
+
+ return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
+}
+
+/**
+ * ti_sci_set_device_state() - Set device state helper
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @flags: flags to setup for the device
+ * @state: State to move the device to
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_get_device_state() - Get device state helper
+ * @handle: Handle to the device
+ * @id: Device Identifier
+ * @clcnt: Pointer to Context Loss Count
+ * @resets: pointer to resets
+ * @p_state: pointer to p_state
+ * @c_state: pointer to c_state
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 *clcnt, u32 *resets,
+ u8 *p_state, u8 *c_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_device_state *req;
+ struct ti_sci_msg_resp_get_device_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!clcnt && !resets && !p_state && !c_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ /* Response is expected, so need of any flags */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+ 0, sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
+ req->id = id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (clcnt)
+ *clcnt = resp->context_loss_count;
+ if (resets)
+ *resets = resp->resets;
+ if (p_state)
+ *p_state = resp->programmed_state;
+ if (c_state)
+ *c_state = resp->current_state;
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * NOTE: The request is for exclusive access for the processor.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_put_device() - command to release a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
+}
+
+/**
+ * ti_sci_cmd_dev_is_valid() - Is the device valid
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Return: 0 if all went fine and the device ID is valid, else return
+ * appropriate error.
+ */
+static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
+{
+ u8 unused;
+
+ /* check the device state which will also tell us if the ID is valid */
+ return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
+}
+
+/**
+ * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @count: Pointer to Context Loss counter to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
+ u32 *count)
+{
+ return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
+}
+
+/**
+ * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be idle
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state)
+{
+ int ret;
+ u8 state;
+
+ if (!r_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
+ if (ret)
+ return ret;
+
+ *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be stopped
+ * @curr_state: true if currently stopped.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be ON
+ * @curr_state: true if currently ON and active
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @curr_state: true if currently transitioning.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
+ bool *curr_state)
+{
+ int ret;
+ u8 state;
+
+ if (!curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
+ if (ret)
+ return ret;
+
+ *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_set_device_resets() - command to set resets for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ * @reset_state: Device specific reset bit field
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 reset_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_resets *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
+ req->id = id;
+ req->resets = reset_state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device_resets() - Get reset state for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @reset_state: Pointer to reset state to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 *reset_state)
+{
+ return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
+ NULL);
+}
+
+/**
+ * ti_sci_set_clock_state() - Set clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @flags: Header flags as needed
+ * @state: State to request for the clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->request_state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock_state() - Get clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @programmed_state: State requested for clock to move to
+ * @current_state: State that the clock is currently in
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u8 *programmed_state, u8 *current_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_state *req;
+ struct ti_sci_msg_resp_get_clock_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!programmed_state && !current_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (programmed_state)
+ *programmed_state = resp->programmed_state;
+ if (current_state)
+ *current_state = resp->current_state;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
+ * @can_change_freq: 'true' if frequency change is desired, else 'false'
+ * @enable_input_term: 'true' if input termination is desired, else 'false'
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool needs_ssc, bool can_change_freq,
+ bool enable_input_term)
+{
+ u32 flags = 0;
+
+ flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
+ flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
+ flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
+
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
+ MSG_CLOCK_SW_STATE_REQ);
+}
+
+/**
+ * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ MSG_CLOCK_SW_STATE_UNREQ);
+}
+
+/**
+ * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ MSG_CLOCK_SW_STATE_AUTO);
+}
+
+/**
+ * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is auto managed
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, bool *req_state)
+{
+ u8 state = 0;
+ int ret;
+
+ if (!req_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
+ if (ret)
+ return ret;
+
+ *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_on() - Is the clock ON
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and enabled
+ * @curr_state: state indicating if the clock is ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_off() - Is the clock OFF
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and disabled
+ * @curr_state: state indicating if the clock is NOT ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Parent clock identifier to set
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u8 parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_parent *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->parent_id = parent_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_parent() - Get current parent clock source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Current clock parent
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u8 *parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_parent *req;
+ struct ti_sci_msg_resp_get_clock_parent *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !parent_id)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *parent_id = resp->parent_id;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @num_parents: Returns he number of parents to the current clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u8 *num_parents)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_num_parents *req;
+ struct ti_sci_msg_resp_get_clock_num_parents *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !num_parents)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *num_parents = resp->num_parents;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @match_freq: Frequency match in Hz response.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq,
+ u64 *match_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_query_clock_freq *req;
+ struct ti_sci_msg_resp_query_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !match_freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *match_freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_freq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_freq() - Get current frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @freq: Currently frequency in Hz
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 *freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_freq *req;
+ struct ti_sci_msg_resp_get_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_reboot *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ ret = 0;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/*
+ * ti_sci_setup_ops() - Setup the operations structures
+ * @info: pointer to TISCI pointer
+ */
+static void ti_sci_setup_ops(struct ti_sci_info *info)
+{
+ struct ti_sci_ops *ops = &info->handle.ops;
+ struct ti_sci_core_ops *core_ops = &ops->core_ops;
+ struct ti_sci_dev_ops *dops = &ops->dev_ops;
+ struct ti_sci_clk_ops *cops = &ops->clk_ops;
+
+ core_ops->reboot_device = ti_sci_cmd_core_reboot;
+
+ dops->get_device = ti_sci_cmd_get_device;
+ dops->idle_device = ti_sci_cmd_idle_device;
+ dops->put_device = ti_sci_cmd_put_device;
+
+ dops->is_valid = ti_sci_cmd_dev_is_valid;
+ dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
+ dops->is_idle = ti_sci_cmd_dev_is_idle;
+ dops->is_stop = ti_sci_cmd_dev_is_stop;
+ dops->is_on = ti_sci_cmd_dev_is_on;
+ dops->is_transitioning = ti_sci_cmd_dev_is_trans;
+ dops->set_device_resets = ti_sci_cmd_set_device_resets;
+ dops->get_device_resets = ti_sci_cmd_get_device_resets;
+
+ cops->get_clock = ti_sci_cmd_get_clock;
+ cops->idle_clock = ti_sci_cmd_idle_clock;
+ cops->put_clock = ti_sci_cmd_put_clock;
+ cops->is_auto = ti_sci_cmd_clk_is_auto;
+ cops->is_on = ti_sci_cmd_clk_is_on;
+ cops->is_off = ti_sci_cmd_clk_is_off;
+
+ cops->set_parent = ti_sci_cmd_clk_set_parent;
+ cops->get_parent = ti_sci_cmd_clk_get_parent;
+ cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
+
+ cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
+ cops->set_freq = ti_sci_cmd_clk_set_freq;
+ cops->get_freq = ti_sci_cmd_clk_get_freq;
+}
+
+/**
+ * ti_sci_get_handle() - Get the TI SCI handle for a device
+ * @dev: Pointer to device for which we want SCI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+ struct device_node *ti_sci_np;
+ struct list_head *p;
+ struct ti_sci_handle *handle = NULL;
+ struct ti_sci_info *info;
+
+ if (!dev) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+ ti_sci_np = of_get_parent(dev->of_node);
+ if (!ti_sci_np) {
+ dev_err(dev, "No OF information\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_handle);
+
+/**
+ * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
+ * @handle: Handle acquired by ti_sci_get_handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ *
+ * Return: 0 is successfully released
+ * if an error pointer was passed, it returns the error value back,
+ * if null was passed, it returns -EINVAL;
+ */
+int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ mutex_lock(&ti_sci_list_mutex);
+ if (!WARN_ON(!info->users))
+ info->users--;
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ti_sci_put_handle);
+
+static void devm_ti_sci_release(struct device *dev, void *res)
+{
+ const struct ti_sci_handle **ptr = res;
+ const struct ti_sci_handle *handle = *ptr;
+ int ret;
+
+ ret = ti_sci_put_handle(handle);
+ if (ret)
+ dev_err(dev, "failed to put handle %d\n", ret);
+}
+
+/**
+ * devm_ti_sci_get_handle() - Managed get handle
+ * @dev: device for which we want SCI handle for.
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+ const struct ti_sci_handle **ptr;
+ const struct ti_sci_handle *handle;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_handle(dev);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+
+static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+ void *cmd)
+{
+ struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+ const struct ti_sci_handle *handle = &info->handle;
+
+ ti_sci_cmd_core_reboot(handle);
+
+ /* call fail OR pass, we should not be here in the first place */
+ return NOTIFY_BAD;
+}
+
+/* Description for K2G */
+static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+ .host_id = 2,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 1000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 64,
+};
+
+static const struct of_device_id ti_sci_of_match[] = {
+ {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_of_match);
+
+static int ti_sci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ const struct ti_sci_desc *desc;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info = NULL;
+ struct ti_sci_xfers_info *minfo;
+ struct mbox_client *cl;
+ int ret = -EINVAL;
+ int i;
+ int reboot = 0;
+
+ of_id = of_match_device(ti_sci_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->desc = desc;
+ reboot = of_property_read_bool(dev->of_node,
+ "ti,system-reboot-controller");
+ INIT_LIST_HEAD(&info->node);
+ minfo = &info->minfo;
+
+ /*
+ * Pre-allocate messages
+ * NEVER allocate more than what we can indicate in hdr.seq
+ * if we have data description bug, force a fix..
+ */
+ if (WARN_ON(desc->max_msgs >=
+ 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
+ return -EINVAL;
+
+ minfo->xfer_block = devm_kcalloc(dev,
+ desc->max_msgs,
+ sizeof(*minfo->xfer_block),
+ GFP_KERNEL);
+ if (!minfo->xfer_block)
+ return -ENOMEM;
+
+ minfo->xfer_alloc_table = devm_kzalloc(dev,
+ BITS_TO_LONGS(desc->max_msgs)
+ * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!minfo->xfer_alloc_table)
+ return -ENOMEM;
+ bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
+
+ /* Pre-initialize the buffer pointer to pre-allocated buffers */
+ for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
+ xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->xfer_buf)
+ return -ENOMEM;
+
+ xfer->tx_message.buf = xfer->xfer_buf;
+ init_completion(&xfer->done);
+ }
+
+ ret = ti_sci_debugfs_create(pdev, info);
+ if (ret)
+ dev_warn(dev, "Failed to create debug file\n");
+
+ platform_set_drvdata(pdev, info);
+
+ cl = &info->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->rx_callback = ti_sci_rx_callback;
+ cl->knows_txdone = true;
+
+ spin_lock_init(&minfo->xfer_lock);
+ sema_init(&minfo->sem_xfer_count, desc->max_msgs);
+
+ info->chan_rx = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(info->chan_rx)) {
+ ret = PTR_ERR(info->chan_rx);
+ goto out;
+ }
+
+ info->chan_tx = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(info->chan_tx)) {
+ ret = PTR_ERR(info->chan_tx);
+ goto out;
+ }
+ ret = ti_sci_cmd_get_revision(info);
+ if (ret) {
+ dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
+ goto out;
+ }
+
+ ti_sci_setup_ops(info);
+
+ if (reboot) {
+ info->nb.notifier_call = tisci_reboot_handler;
+ info->nb.priority = 128;
+
+ ret = register_restart_handler(&info->nb);
+ if (ret) {
+ dev_err(dev, "reboot registration fail(%d)\n", ret);
+ return ret;
+ }
+ }
+
+ dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
+ info->handle.version.abi_major, info->handle.version.abi_minor,
+ info->handle.version.firmware_revision,
+ info->handle.version.firmware_description);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_add_tail(&info->node, &ti_sci_list);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+out:
+ if (!IS_ERR(info->chan_tx))
+ mbox_free_channel(info->chan_tx);
+ if (!IS_ERR(info->chan_rx))
+ mbox_free_channel(info->chan_rx);
+ debugfs_remove(info->d);
+ return ret;
+}
+
+static int ti_sci_remove(struct platform_device *pdev)
+{
+ struct ti_sci_info *info;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ of_platform_depopulate(dev);
+
+ info = platform_get_drvdata(pdev);
+
+ if (info->nb.notifier_call)
+ unregister_restart_handler(&info->nb);
+
+ mutex_lock(&ti_sci_list_mutex);
+ if (info->users)
+ ret = -EBUSY;
+ else
+ list_del(&info->node);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ if (!ret) {
+ ti_sci_debugfs_destroy(pdev, info);
+
+ /* Safe to free channels since no more users */
+ mbox_free_channel(info->chan_tx);
+ mbox_free_channel(info->chan_rx);
+ }
+
+ return ret;
+}
+
+static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+ .remove = ti_sci_remove,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
+ },
+};
+module_platform_driver(ti_sci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-sci");
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
new file mode 100644
index 000000000000..9b611e9e6f6d
--- /dev/null
+++ b/drivers/firmware/ti_sci.h
@@ -0,0 +1,492 @@
+/*
+ * Texas Instruments System Control Interface (TISCI) Protocol
+ *
+ * Communication protocol with TI SCI hardware
+ * The system works in a message response protocol
+ * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __TI_SCI_H
+#define __TI_SCI_H
+
+/* Generic Messages */
+#define TI_SCI_MSG_ENABLE_WDT 0x0000
+#define TI_SCI_MSG_WAKE_RESET 0x0001
+#define TI_SCI_MSG_VERSION 0x0002
+#define TI_SCI_MSG_WAKE_REASON 0x0003
+#define TI_SCI_MSG_GOODBYE 0x0004
+#define TI_SCI_MSG_SYS_RESET 0x0005
+
+/* Device requests */
+#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
+#define TI_SCI_MSG_GET_DEVICE_STATE 0x0201
+#define TI_SCI_MSG_SET_DEVICE_RESETS 0x0202
+
+/* Clock requests */
+#define TI_SCI_MSG_SET_CLOCK_STATE 0x0100
+#define TI_SCI_MSG_GET_CLOCK_STATE 0x0101
+#define TI_SCI_MSG_SET_CLOCK_PARENT 0x0102
+#define TI_SCI_MSG_GET_CLOCK_PARENT 0x0103
+#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104
+#define TI_SCI_MSG_SET_CLOCK_FREQ 0x010c
+#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
+#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+
+/**
+ * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
+ * @type: Type of messages: One of TI_SCI_MSG* values
+ * @host: Host of the message
+ * @seq: Message identifier indicating a transfer sequence
+ * @flags: Flag for the message
+ */
+struct ti_sci_msg_hdr {
+ u16 type;
+ u8 host;
+ u8 seq;
+#define TI_SCI_MSG_FLAG(val) (1 << (val))
+#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE 0x0
+#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED TI_SCI_MSG_FLAG(0)
+#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED TI_SCI_MSG_FLAG(1)
+#define TI_SCI_FLAG_RESP_GENERIC_NACK 0x0
+#define TI_SCI_FLAG_RESP_GENERIC_ACK TI_SCI_MSG_FLAG(1)
+ /* Additional Flags */
+ u32 flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_version - Response for a message
+ * @hdr: Generic header
+ * @firmware_description: String describing the firmware
+ * @firmware_revision: Firmware revision
+ * @abi_major: Major version of the ABI that firmware supports
+ * @abi_minor: Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_VERSION
+ */
+struct ti_sci_msg_resp_version {
+ struct ti_sci_msg_hdr hdr;
+ char firmware_description[32];
+ u16 firmware_revision;
+ u8 abi_major;
+ u8 abi_minor;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_reboot - Reboot the SoC
+ * @hdr: Generic Header
+ *
+ * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_reboot {
+ struct ti_sci_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_state - Set the desired state of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @reserved: Reserved space in message, must be 0 for backward compatibility
+ * @state: The desired state of the device.
+ *
+ * Certain flags can also be set to alter the device state:
+ * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source.
+ * The meaning of this flag will vary slightly from device to device and from
+ * SoC to SoC but it generally allows the device to wake the SoC out of deep
+ * suspend states.
+ * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device.
+ * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed
+ * with STATE_RETENTION or STATE_ON, it will claim the device exclusively.
+ * If another host already has this device set to STATE_RETENTION or STATE_ON,
+ * the message will fail. Once successful, other hosts attempting to set
+ * STATE_RETENTION or STATE_ON will fail.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_DEVICE_WAKE_ENABLED TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_DEVICE_RESET_ISO TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_DEVICE_EXCLUSIVE TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 reserved;
+
+#define MSG_DEVICE_SW_STATE_AUTO_OFF 0
+#define MSG_DEVICE_SW_STATE_RETENTION 1
+#define MSG_DEVICE_SW_STATE_ON 2
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_device_state - Request to get device.
+ * @hdr: Generic header
+ * @id: Device Identifier
+ *
+ * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state
+ * information
+ */
+struct ti_sci_msg_req_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_device_state - Response to get device request.
+ * @hdr: Generic header
+ * @context_loss_count: Indicates how many times the device has lost context. A
+ * driver can use this monotonic counter to determine if the device has
+ * lost context since the last time this message was exchanged.
+ * @resets: Programmed state of the reset lines.
+ * @programmed_state: The state as programmed by set_device.
+ * - Uses the MSG_DEVICE_SW_* macros
+ * @current_state: The actual state of the hardware.
+ *
+ * Response to request TI_SCI_MSG_GET_DEVICE_STATE.
+ */
+struct ti_sci_msg_resp_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 context_loss_count;
+ u32 resets;
+ u8 programmed_state;
+#define MSG_DEVICE_HW_STATE_OFF 0
+#define MSG_DEVICE_HW_STATE_ON 1
+#define MSG_DEVICE_HW_STATE_TRANS 2
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_resets - Set the desired resets
+ * configuration of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @resets: A bit field of resets for the device. The meaning, behavior,
+ * and usage of the reset flags are device specific. 0 for a bit
+ * indicates releasing the reset represented by that bit while 1
+ * indicates keeping it held.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_resets {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 resets;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state
+ * @hdr: Generic Header, Certain flags can be set specific to the clocks:
+ * MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified
+ * via spread spectrum clocking.
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's
+ * frequency to be changed while it is running so long as it
+ * is within the min/max limits.
+ * MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this
+ * is only applicable to clock inputs on the SoC pseudo-device.
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @request_state: Request the state for the clock to be set to.
+ * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
+ * it can be disabled, regardless of the state of the device
+ * MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to
+ * automatically manage the state of this clock. If the device
+ * is enabled, then the clock is enabled. If the device is set
+ * to off or retention, then the clock is internally set as not
+ * being required by the device.(default)
+ * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled,
+ * regardless of the state of the device.
+ *
+ * Normally, all required clocks are managed by TISCI entity, this is used
+ * only for specific control *IF* required. Auto managed state is
+ * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote
+ * will explicitly control.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic
+ * ACK or NACK message.
+ */
+struct ti_sci_msg_req_set_clock_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_CLOCK_ALLOW_SSC TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CLOCK_INPUT_TERM TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+#define MSG_CLOCK_SW_STATE_UNREQ 0
+#define MSG_CLOCK_SW_STATE_AUTO 1
+#define MSG_CLOCK_SW_STATE_REQ 2
+ u8 request_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_state - Request for clock state
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get state of.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
+ * of the clock
+ */
+struct ti_sci_msg_req_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_state - Response to get clock state
+ * @hdr: Generic Header
+ * @programmed_state: Any programmed state of the clock. This is one of
+ * MSG_CLOCK_SW_STATE* values.
+ * @current_state: Current state of the clock. This is one of:
+ * MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready
+ * MSG_CLOCK_HW_STATE_READY: Clock is ready
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_STATE.
+ */
+struct ti_sci_msg_resp_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u8 programmed_state;
+#define MSG_CLOCK_HW_STATE_NOT_READY 0
+#define MSG_CLOCK_HW_STATE_READY 1
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_parent - Set the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: The new clock parent is selectable by an index via this
+ * parameter.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
+ * ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_parent - Get the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get the parent for.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
+ */
+struct ti_sci_msg_req_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
+ * @hdr: Generic Header
+ * @parent_id: The current clock parent
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
+ */
+struct ti_sci_msg_resp_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
+ * @hdr: Generic header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * This request provides information about how many clock parent options
+ * are available for a given clock to a device. This is typically used
+ * for input clocks.
+ *
+ * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
+ * @hdr: Generic header
+ * @num_parents: Number of clock parents
+ *
+ * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
+ */
+struct ti_sci_msg_resp_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u8 num_parents;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. A frequency will be found
+ * as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested frequency within provided range and responds with
+ * result message.
+ *
+ * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message,
+ * or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that is the best match in Hz.
+ *
+ * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request
+ * cannot be satisfied, the message will be of type NACK.
+ */
+struct ti_sci_msg_resp_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. The clock will be programmed
+ * at a rate as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested range and responds with success/failure message.
+ *
+ * This sets the desired frequency for a clock within an allowable
+ * range. This message will fail on an enabled clock unless
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally,
+ * if other clocks have their frequency modified due to this message,
+ * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled.
+ *
+ * Calling set frequency on a clock input to the SoC pseudo-device will
+ * inform the PMMC of that clock's frequency. Setting a frequency of
+ * zero will indicate the clock is disabled.
+ *
+ * Calling set frequency on clock outputs from the SoC pseudo-device will
+ * function similarly to setting the clock frequency on a device.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In some cases, clock frequencies are configured by host.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency
+ * that the clock is currently at.
+ */
+struct ti_sci_msg_req_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that the clock is currently on, in Hz.
+ *
+ * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ.
+ */
+struct ti_sci_msg_resp_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+#endif /* __TI_SCI_H */
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index cd84934774cc..ce861a2853a4 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -13,12 +13,26 @@ config FPGA
if FPGA
+config FPGA_REGION
+ tristate "FPGA Region"
+ depends on OF && FPGA_BRIDGE
+ help
+ FPGA Regions allow loading FPGA images under control of
+ the Device Tree.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
- depends on ARCH_SOCFPGA
+ depends on ARCH_SOCFPGA || COMPILE_TEST
help
FPGA manager driver support for Altera SOCFPGA.
+config FPGA_MGR_SOCFPGA_A10
+ tristate "Altera SoCFPGA Arria10"
+ depends on ARCH_SOCFPGA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ FPGA manager driver support for Altera Arria10 SoCFPGA.
+
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
depends on ARCH_ZYNQ || COMPILE_TEST
@@ -26,6 +40,29 @@ config FPGA_MGR_ZYNQ_FPGA
help
FPGA manager driver support for Xilinx Zynq FPGAs.
+config FPGA_BRIDGE
+ tristate "FPGA Bridge Framework"
+ depends on OF
+ help
+ Say Y here if you want to support bridges connected between host
+ processors and FPGAs or between FPGAs.
+
+config SOCFPGA_FPGA_BRIDGE
+ tristate "Altera SoCFPGA FPGA Bridges"
+ depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ help
+ Say Y to enable drivers for FPGA bridges for Altera SOCFPGA
+ devices.
+
+config ALTERA_FREEZE_BRIDGE
+ tristate "Altera FPGA Freeze Bridge"
+ depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ help
+ Say Y to enable drivers for Altera FPGA Freeze bridges. A
+ freeze bridge is a bridge that exists in the FPGA fabric to
+ isolate one region of the FPGA from the busses while that
+ region is being reprogrammed.
+
endif # FPGA
endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 8d83fc6b1613..8df07bcf42a6 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -7,4 +7,13 @@ obj-$(CONFIG_FPGA) += fpga-mgr.o
# FPGA Manager Drivers
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
+obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+
+# FPGA Bridge Drivers
+obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
+obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o
+obj-$(CONFIG_ALTERA_FREEZE_BRIDGE) += altera-freeze-bridge.o
+
+# High Level Interfaces
+obj-$(CONFIG_FPGA_REGION) += fpga-region.o
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
new file mode 100644
index 000000000000..d4eeb74388da
--- /dev/null
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -0,0 +1,180 @@
+/*
+ * FPGA to SDRAM Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages a bridge between an FPGA and the SDRAM used by the ARM
+ * host processor system (HPS).
+ *
+ * The bridge contains 4 read ports, 4 write ports, and 6 command ports.
+ * Reconfiguring these ports requires that no SDRAM transactions occur during
+ * reconfiguration. The code reconfiguring the ports cannot run out of SDRAM
+ * nor can the FPGA access the SDRAM during reconfiguration. This driver does
+ * not support reconfiguring the ports. The ports are configured by code
+ * running out of on chip ram before Linux is started and the configuration
+ * is passed in a handoff register in the system manager.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+
+#define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80
+#define ALT_SDR_CTL_FPGAPORTRST_PORTRSTN_MSK 0x00003fff
+#define ALT_SDR_CTL_FPGAPORTRST_RD_SHIFT 0
+#define ALT_SDR_CTL_FPGAPORTRST_WR_SHIFT 4
+#define ALT_SDR_CTL_FPGAPORTRST_CTRL_SHIFT 8
+
+/*
+ * From the Cyclone V HPS Memory Map document:
+ * These registers are used to store handoff information between the
+ * preloader and the OS. These 8 registers can be used to store any
+ * information. The contents of these registers have no impact on
+ * the state of the HPS hardware.
+ */
+#define SYSMGR_ISWGRP_HANDOFF3 (0x8C)
+
+#define F2S_BRIDGE_NAME "fpga2sdram"
+
+struct alt_fpga2sdram_data {
+ struct device *dev;
+ struct regmap *sdrctl;
+ int mask;
+};
+
+static int alt_fpga2sdram_enable_show(struct fpga_bridge *bridge)
+{
+ struct alt_fpga2sdram_data *priv = bridge->priv;
+ int value;
+
+ regmap_read(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST, &value);
+
+ return (value & priv->mask) == priv->mask;
+}
+
+static inline int _alt_fpga2sdram_enable_set(struct alt_fpga2sdram_data *priv,
+ bool enable)
+{
+ return regmap_update_bits(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST,
+ priv->mask, enable ? priv->mask : 0);
+}
+
+static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_fpga2sdram_enable_set(bridge->priv, enable);
+}
+
+struct prop_map {
+ char *prop_name;
+ u32 *prop_value;
+ u32 prop_max;
+};
+
+static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = {
+ .enable_set = alt_fpga2sdram_enable_set,
+ .enable_show = alt_fpga2sdram_enable_show,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-fpga2sdram-bridge" },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct alt_fpga2sdram_data *priv;
+ u32 enable;
+ struct regmap *sysmgr;
+ int ret = 0;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->sdrctl = syscon_regmap_lookup_by_compatible("altr,sdr-ctl");
+ if (IS_ERR(priv->sdrctl)) {
+ dev_err(dev, "regmap for altr,sdr-ctl lookup failed.\n");
+ return PTR_ERR(priv->sdrctl);
+ }
+
+ sysmgr = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+ if (IS_ERR(sysmgr)) {
+ dev_err(dev, "regmap for altr,sys-mgr lookup failed.\n");
+ return PTR_ERR(sysmgr);
+ }
+
+ /* Get f2s bridge configuration saved in handoff register */
+ regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
+
+ ret = fpga_bridge_register(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+ ret = _alt_fpga2sdram_enable_set(priv, enable);
+ if (ret) {
+ fpga_bridge_unregister(&pdev->dev);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ fpga_bridge_unregister(&pdev->dev);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver altera_fpga_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_fpga2sdram_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(altera_fpga_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA FPGA to SDRAM Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
new file mode 100644
index 000000000000..8dcd9fb22cb9
--- /dev/null
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -0,0 +1,273 @@
+/*
+ * FPGA Freeze Bridge Controller
+ *
+ * Copyright (C) 2016 Altera Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#define FREEZE_CSR_STATUS_OFFSET 0
+#define FREEZE_CSR_CTRL_OFFSET 4
+#define FREEZE_CSR_ILLEGAL_REQ_OFFSET 8
+#define FREEZE_CSR_REG_VERSION 12
+
+#define FREEZE_CSR_SUPPORTED_VERSION 2
+
+#define FREEZE_CSR_STATUS_FREEZE_REQ_DONE BIT(0)
+#define FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE BIT(1)
+
+#define FREEZE_CSR_CTRL_FREEZE_REQ BIT(0)
+#define FREEZE_CSR_CTRL_RESET_REQ BIT(1)
+#define FREEZE_CSR_CTRL_UNFREEZE_REQ BIT(2)
+
+#define FREEZE_BRIDGE_NAME "freeze"
+
+struct altera_freeze_br_data {
+ struct device *dev;
+ void __iomem *base_addr;
+ bool enable;
+};
+
+/*
+ * Poll status until status bit is set or we have a timeout.
+ */
+static int altera_freeze_br_req_ack(struct altera_freeze_br_data *priv,
+ u32 timeout, u32 req_ack)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_illegal_req_addr = priv->base_addr +
+ FREEZE_CSR_ILLEGAL_REQ_OFFSET;
+ u32 status, illegal, ctrl;
+ int ret = -ETIMEDOUT;
+
+ do {
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal) {
+ dev_err(dev, "illegal request detected 0x%x", illegal);
+
+ writel(1, csr_illegal_req_addr);
+
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal)
+ dev_err(dev, "illegal request not cleared 0x%x",
+ illegal);
+
+ ret = -EINVAL;
+ break;
+ }
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ dev_dbg(dev, "%s %x %x\n", __func__, status, req_ack);
+ status &= req_ack;
+ if (status) {
+ ctrl = readl(priv->base_addr + FREEZE_CSR_CTRL_OFFSET);
+ dev_dbg(dev, "%s request %x acknowledged %x %x\n",
+ __func__, req_ack, status, ctrl);
+ ret = 0;
+ break;
+ }
+
+ udelay(1);
+ } while (timeout--);
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "%s timeout waiting for 0x%x\n",
+ __func__, req_ack);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_freeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already disabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not enabled %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_FREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_FREEZE_REQ_DONE);
+
+ if (ret)
+ writel(0, csr_ctrl_addr);
+ else
+ writel(FREEZE_CSR_CTRL_RESET_REQ, csr_ctrl_addr);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_unfreeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ writel(0, csr_ctrl_addr);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already enabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not frozen %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_UNFREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ writel(0, csr_ctrl_addr);
+
+ return ret;
+}
+
+/*
+ * enable = 1 : allow traffic through the bridge
+ * enable = 0 : disable traffic through the bridge
+ */
+static int altera_freeze_br_enable_set(struct fpga_bridge *bridge,
+ bool enable)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+ struct fpga_image_info *info = bridge->info;
+ u32 timeout = 0;
+ int ret;
+
+ if (enable) {
+ if (info)
+ timeout = info->enable_timeout_us;
+
+ ret = altera_freeze_br_do_unfreeze(bridge->priv, timeout);
+ } else {
+ if (info)
+ timeout = info->disable_timeout_us;
+
+ ret = altera_freeze_br_do_freeze(bridge->priv, timeout);
+ }
+
+ if (!ret)
+ priv->enable = enable;
+
+ return ret;
+}
+
+static int altera_freeze_br_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+
+ return priv->enable;
+}
+
+static struct fpga_bridge_ops altera_freeze_br_br_ops = {
+ .enable_set = altera_freeze_br_enable_set,
+ .enable_show = altera_freeze_br_enable_show,
+};
+
+static const struct of_device_id altera_freeze_br_of_match[] = {
+ { .compatible = "altr,freeze-bridge-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match);
+
+static int altera_freeze_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct altera_freeze_br_data *priv;
+ struct resource *res;
+ u32 status, revision;
+
+ if (!np)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base_addr))
+ return PTR_ERR(priv->base_addr);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)
+ priv->enable = 1;
+
+ revision = readl(priv->base_addr + FREEZE_CSR_REG_VERSION);
+ if (revision != FREEZE_CSR_SUPPORTED_VERSION)
+ dev_warn(dev,
+ "%s Freeze Controller unexpected revision %d != %d\n",
+ __func__, revision, FREEZE_CSR_SUPPORTED_VERSION);
+
+ return fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
+}
+
+static int altera_freeze_br_remove(struct platform_device *pdev)
+{
+ fpga_bridge_unregister(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver altera_freeze_br_driver = {
+ .probe = altera_freeze_br_probe,
+ .remove = altera_freeze_br_remove,
+ .driver = {
+ .name = "altera_freeze_br",
+ .of_match_table = of_match_ptr(altera_freeze_br_of_match),
+ },
+};
+
+module_platform_driver(altera_freeze_br_driver);
+
+MODULE_DESCRIPTION("Altera Freeze Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
new file mode 100644
index 000000000000..4b354c79be31
--- /dev/null
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -0,0 +1,222 @@
+/*
+ * FPGA to/from HPS Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * Includes this patch from the mailing list:
+ * fpga: altera-hps2fpga: fix HPS2FPGA bridge visibility to L3 masters
+ * Signed-off-by: Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages bridges on a Altera SOCFPGA between the ARM host
+ * processor system (HPS) and the embedded FPGA.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/clk.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define ALT_L3_REMAP_OFST 0x0
+#define ALT_L3_REMAP_MPUZERO_MSK 0x00000001
+#define ALT_L3_REMAP_H2F_MSK 0x00000008
+#define ALT_L3_REMAP_LWH2F_MSK 0x00000010
+
+#define HPS2FPGA_BRIDGE_NAME "hps2fpga"
+#define LWHPS2FPGA_BRIDGE_NAME "lwhps2fpga"
+#define FPGA2HPS_BRIDGE_NAME "fpga2hps"
+
+struct altera_hps2fpga_data {
+ const char *name;
+ struct reset_control *bridge_reset;
+ struct regmap *l3reg;
+ unsigned int remap_mask;
+ struct clk *clk;
+};
+
+static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ return reset_control_status(priv->bridge_reset);
+}
+
+/* The L3 REMAP register is write only, so keep a cached value. */
+static unsigned int l3_remap_shadow;
+static spinlock_t l3_remap_lock;
+
+static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
+ bool enable)
+{
+ unsigned long flags;
+ int ret;
+
+ /* bring bridge out of reset */
+ if (enable)
+ ret = reset_control_deassert(priv->bridge_reset);
+ else
+ ret = reset_control_assert(priv->bridge_reset);
+ if (ret)
+ return ret;
+
+ /* Allow bridge to be visible to L3 masters or not */
+ if (priv->remap_mask) {
+ spin_lock_irqsave(&l3_remap_lock, flags);
+ l3_remap_shadow |= ALT_L3_REMAP_MPUZERO_MSK;
+
+ if (enable)
+ l3_remap_shadow |= priv->remap_mask;
+ else
+ l3_remap_shadow &= ~priv->remap_mask;
+
+ ret = regmap_write(priv->l3reg, ALT_L3_REMAP_OFST,
+ l3_remap_shadow);
+ spin_unlock_irqrestore(&l3_remap_lock, flags);
+ }
+
+ return ret;
+}
+
+static int alt_hps2fpga_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_hps2fpga_enable_set(bridge->priv, enable);
+}
+
+static const struct fpga_bridge_ops altera_hps2fpga_br_ops = {
+ .enable_set = alt_hps2fpga_enable_set,
+ .enable_show = alt_hps2fpga_enable_show,
+};
+
+static struct altera_hps2fpga_data hps2fpga_data = {
+ .name = HPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_H2F_MSK,
+};
+
+static struct altera_hps2fpga_data lwhps2fpga_data = {
+ .name = LWHPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_LWH2F_MSK,
+};
+
+static struct altera_hps2fpga_data fpga2hps_data = {
+ .name = FPGA2HPS_BRIDGE_NAME,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-hps2fpga-bridge",
+ .data = &hps2fpga_data },
+ { .compatible = "altr,socfpga-lwhps2fpga-bridge",
+ .data = &lwhps2fpga_data },
+ { .compatible = "altr,socfpga-fpga2hps-bridge",
+ .data = &fpga2hps_data },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct altera_hps2fpga_data *priv;
+ const struct of_device_id *of_id;
+ u32 enable;
+ int ret;
+
+ of_id = of_match_device(altera_fpga_of_match, dev);
+ priv = (struct altera_hps2fpga_data *)of_id->data;
+
+ priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+ if (IS_ERR(priv->bridge_reset)) {
+ dev_err(dev, "Could not get %s reset control\n", priv->name);
+ return PTR_ERR(priv->bridge_reset);
+ }
+
+ if (priv->remap_mask) {
+ priv->l3reg = syscon_regmap_lookup_by_compatible("altr,l3regs");
+ if (IS_ERR(priv->l3reg)) {
+ dev_err(dev, "regmap for altr,l3regs lookup failed\n");
+ return PTR_ERR(priv->l3reg);
+ }
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ spin_lock_init(&l3_remap_lock);
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+
+ ret = _alt_hps2fpga_enable_set(priv, enable);
+ if (ret) {
+ fpga_bridge_unregister(&pdev->dev);
+ return ret;
+ }
+ }
+ }
+
+ return fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops,
+ priv);
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *bridge = platform_get_drvdata(pdev);
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ fpga_bridge_unregister(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver alt_fpga_bridge_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_hps2fpga_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(alt_fpga_bridge_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA HPS to FPGA Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
new file mode 100644
index 000000000000..33ee83e6373c
--- /dev/null
+++ b/drivers/fpga/fpga-bridge.c
@@ -0,0 +1,395 @@
+/*
+ * FPGA Bridge Framework Driver
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_IDA(fpga_bridge_ida);
+static struct class *fpga_bridge_class;
+
+/* Lock for adding/removing bridges to linked lists*/
+spinlock_t bridge_list_lock;
+
+static int fpga_bridge_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * fpga_bridge_enable - Enable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_enable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "enable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_enable);
+
+/**
+ * fpga_bridge_disable - Disable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_disable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "disable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_disable);
+
+/**
+ * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ *
+ * Return fpga_bridge struct if successful.
+ * Return -EBUSY if someone already has a reference to the bridge.
+ * Return -ENODEV if @np is not a FPGA Bridge.
+ */
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+ struct fpga_image_info *info)
+
+{
+ struct device *dev;
+ struct fpga_bridge *bridge;
+ int ret = -ENODEV;
+
+ dev = class_find_device(fpga_bridge_class, NULL, np,
+ fpga_bridge_of_node_match);
+ if (!dev)
+ goto err_dev;
+
+ bridge = to_fpga_bridge(dev);
+ if (!bridge)
+ goto err_dev;
+
+ bridge->info = info;
+
+ if (!mutex_trylock(&bridge->mutex)) {
+ ret = -EBUSY;
+ goto err_dev;
+ }
+
+ if (!try_module_get(dev->parent->driver->owner))
+ goto err_ll_mod;
+
+ dev_dbg(&bridge->dev, "get\n");
+
+ return bridge;
+
+err_ll_mod:
+ mutex_unlock(&bridge->mutex);
+err_dev:
+ put_device(dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+
+/**
+ * fpga_bridge_put - release a reference to a bridge
+ *
+ * @bridge: FPGA bridge
+ */
+void fpga_bridge_put(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "put\n");
+
+ bridge->info = NULL;
+ module_put(bridge->dev.parent->driver->owner);
+ mutex_unlock(&bridge->mutex);
+ put_device(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_put);
+
+/**
+ * fpga_bridges_enable - enable bridges in a list
+ * @bridge_list: list of FPGA bridges
+ *
+ * Enable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_enable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node;
+ int ret;
+
+ list_for_each(node, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+ ret = fpga_bridge_enable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_enable);
+
+/**
+ * fpga_bridges_disable - disable bridges in a list
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * Disable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_disable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node;
+ int ret;
+
+ list_for_each(node, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+ ret = fpga_bridge_disable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_disable);
+
+/**
+ * fpga_bridges_put - put bridges
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * For each bridge in the list, put the bridge and remove it from the list.
+ * If list is empty, do nothing.
+ */
+void fpga_bridges_put(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node, *next;
+ unsigned long flags;
+
+ list_for_each_safe(node, next, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+
+ fpga_bridge_put(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_del(&bridge->node);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_put);
+
+/**
+ * fpga_bridges_get_to_list - get a bridge, add it to a list
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and and it to the list.
+ *
+ * Return 0 for success, error code from of_fpga_bridge_get() othewise.
+ */
+int fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ unsigned long flags;
+
+ bridge = of_fpga_bridge_get(np, info);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_add(&bridge->node, bridge_list);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get_to_list);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ return sprintf(buf, "%s\n", bridge->name);
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+ int enable = 1;
+
+ if (bridge->br_ops && bridge->br_ops->enable_show)
+ enable = bridge->br_ops->enable_show(bridge);
+
+ return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
+}
+
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *fpga_bridge_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_bridge);
+
+/**
+ * fpga_bridge_register - register a fpga bridge driver
+ * @dev: FPGA bridge device from pdev
+ * @name: FPGA bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: FPGA bridge private data
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_register(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv)
+{
+ struct fpga_bridge *bridge;
+ int id, ret = 0;
+
+ if (!name || !strlen(name)) {
+ dev_err(dev, "Attempt to register with no name!\n");
+ return -EINVAL;
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto error_kfree;
+ }
+
+ mutex_init(&bridge->mutex);
+ INIT_LIST_HEAD(&bridge->node);
+
+ bridge->name = name;
+ bridge->br_ops = br_ops;
+ bridge->priv = priv;
+
+ device_initialize(&bridge->dev);
+ bridge->dev.class = fpga_bridge_class;
+ bridge->dev.parent = dev;
+ bridge->dev.of_node = dev->of_node;
+ bridge->dev.id = id;
+ dev_set_drvdata(dev, bridge);
+
+ ret = dev_set_name(&bridge->dev, "br%d", id);
+ if (ret)
+ goto error_device;
+
+ ret = device_add(&bridge->dev);
+ if (ret)
+ goto error_device;
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ dev_info(bridge->dev.parent, "fpga bridge [%s] registered\n",
+ bridge->name);
+
+ return 0;
+
+error_device:
+ ida_simple_remove(&fpga_bridge_ida, id);
+error_kfree:
+ kfree(bridge);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_register);
+
+/**
+ * fpga_bridge_unregister - unregister a fpga bridge driver
+ * @dev: FPGA bridge device from pdev
+ */
+void fpga_bridge_unregister(struct device *dev)
+{
+ struct fpga_bridge *bridge = dev_get_drvdata(dev);
+
+ /*
+ * If the low level driver provides a method for putting bridge into
+ * a desired state upon unregister, do it.
+ */
+ if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+ bridge->br_ops->fpga_bridge_remove(bridge);
+
+ device_unregister(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
+
+static void fpga_bridge_dev_release(struct device *dev)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
+ kfree(bridge);
+}
+
+static int __init fpga_bridge_dev_init(void)
+{
+ spin_lock_init(&bridge_list_lock);
+
+ fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
+ if (IS_ERR(fpga_bridge_class))
+ return PTR_ERR(fpga_bridge_class);
+
+ fpga_bridge_class->dev_groups = fpga_bridge_groups;
+ fpga_bridge_class->dev_release = fpga_bridge_dev_release;
+
+ return 0;
+}
+
+static void __exit fpga_bridge_dev_exit(void)
+{
+ class_destroy(fpga_bridge_class);
+ ida_destroy(&fpga_bridge_ida);
+}
+
+MODULE_DESCRIPTION("FPGA Bridge Driver");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(fpga_bridge_dev_init);
+module_exit(fpga_bridge_dev_exit);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 953dc9195937..f0a69d3e60a5 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -32,19 +32,20 @@ static struct class *fpga_mgr_class;
/**
* fpga_mgr_buf_load - load fpga from image in buffer
* @mgr: fpga manager
- * @flags: flags setting fpga confuration modes
+ * @info: fpga image specific information
* @buf: buffer contain fpga image
* @count: byte count of buf
*
* Step the low level fpga manager through the device-specific steps of getting
* an FPGA ready to be configured, writing the image to it, then doing whatever
* post-configuration steps necessary. This code assumes the caller got the
- * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
+ * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
+ * not an error code.
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
- size_t count)
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
{
struct device *dev = &mgr->dev;
int ret;
@@ -52,10 +53,12 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
/*
* Call the low level driver's write_init function. This will do the
* device-specific things to get the FPGA into the state where it is
- * ready to receive an FPGA image.
+ * ready to receive an FPGA image. The low level driver only gets to
+ * see the first initial_header_size bytes in the buffer.
*/
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
- ret = mgr->mops->write_init(mgr, flags, buf, count);
+ ret = mgr->mops->write_init(mgr, info, buf,
+ min(mgr->mops->initial_header_size, count));
if (ret) {
dev_err(dev, "Error preparing FPGA for writing\n");
mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
@@ -78,7 +81,7 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
* steps to finish and set the FPGA into operating mode.
*/
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
- ret = mgr->mops->write_complete(mgr, flags);
+ ret = mgr->mops->write_complete(mgr, info);
if (ret) {
dev_err(dev, "Error after writing image data to FPGA\n");
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
@@ -93,17 +96,19 @@ EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
/**
* fpga_mgr_firmware_load - request firmware and load to fpga
* @mgr: fpga manager
- * @flags: flags setting fpga confuration modes
+ * @info: fpga image specific information
* @image_name: name of image file on the firmware search path
*
* Request an FPGA image using the firmware class, then write out to the FPGA.
* Update the state before each step to provide info on what step failed if
* there is a failure. This code assumes the caller got the mgr pointer
- * from of_fpga_mgr_get() and checked that it is not an error code.
+ * from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is not an error
+ * code.
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *image_name)
{
struct device *dev = &mgr->dev;
@@ -121,7 +126,7 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
return ret;
}
- ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
+ ret = fpga_mgr_buf_load(mgr, info, fw->data, fw->size);
release_firmware(fw);
@@ -181,30 +186,11 @@ static struct attribute *fpga_mgr_attrs[] = {
};
ATTRIBUTE_GROUPS(fpga_mgr);
-static int fpga_mgr_of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-/**
- * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
- * @node: device node
- *
- * Given a device node, get an exclusive reference to a fpga mgr.
- *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
- */
-struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+struct fpga_manager *__fpga_mgr_get(struct device *dev)
{
struct fpga_manager *mgr;
- struct device *dev;
int ret = -ENODEV;
- dev = class_find_device(fpga_mgr_class, NULL, node,
- fpga_mgr_of_node_match);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
mgr = to_fpga_manager(dev);
if (!mgr)
goto err_dev;
@@ -226,6 +212,55 @@ err_dev:
put_device(dev);
return ERR_PTR(ret);
}
+
+static int fpga_mgr_dev_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+/**
+ * fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @dev: parent device that fpga mgr was registered with
+ *
+ * Given a device, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *fpga_mgr_get(struct device *dev)
+{
+ struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev,
+ fpga_mgr_dev_match);
+ if (!mgr_dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(mgr_dev);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_get);
+
+static int fpga_mgr_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @node: device node
+ *
+ * Given a device node, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device(fpga_mgr_class, NULL, node,
+ fpga_mgr_of_node_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(dev);
+}
EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
/**
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
new file mode 100644
index 000000000000..3222fdbad75a
--- /dev/null
+++ b/drivers/fpga/fpga-region.c
@@ -0,0 +1,603 @@
+/*
+ * FPGA Region - Device Tree support for FPGA programming under Linux
+ *
+ * Copyright (C) 2013-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @info: fpga image specific information
+ */
+struct fpga_region {
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to region */
+ struct list_head bridge_list;
+ struct fpga_image_info *info;
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+static DEFINE_IDA(fpga_region_ida);
+static struct class *fpga_region_class;
+
+static const struct of_device_id fpga_region_of_match[] = {
+ { .compatible = "fpga-region", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fpga_region_of_match);
+
+static int fpga_region_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * fpga_region_find - find FPGA region
+ * @np: device node of FPGA Region
+ * Caller will need to put_device(&region->dev) when done.
+ * Returns FPGA Region struct or NULL
+ */
+static struct fpga_region *fpga_region_find(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(fpga_region_class, NULL, np,
+ fpga_region_of_node_match);
+ if (!dev)
+ return NULL;
+
+ return to_fpga_region(dev);
+}
+
+/**
+ * fpga_region_get - get an exclusive reference to a fpga region
+ * @region: FPGA Region struct
+ *
+ * Caller should call fpga_region_put() when done with region.
+ *
+ * Return fpga_region struct if successful.
+ * Return -EBUSY if someone already has a reference to the region.
+ * Return -ENODEV if @np is not a FPGA Region.
+ */
+static struct fpga_region *fpga_region_get(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ if (!mutex_trylock(&region->mutex)) {
+ dev_dbg(dev, "%s: FPGA Region already in use\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ get_device(dev);
+ of_node_get(dev->of_node);
+ if (!try_module_get(dev->parent->driver->owner)) {
+ of_node_put(dev->of_node);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ dev_dbg(&region->dev, "get\n");
+
+ return region;
+}
+
+/**
+ * fpga_region_put - release a reference to a region
+ *
+ * @region: FPGA region
+ */
+static void fpga_region_put(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ dev_dbg(&region->dev, "put\n");
+
+ module_put(dev->parent->driver->owner);
+ of_node_put(dev->of_node);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+}
+
+/**
+ * fpga_region_get_manager - get exclusive reference for FPGA manager
+ * @region: FPGA region
+ *
+ * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
+ *
+ * Caller should call fpga_mgr_put() when done with manager.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *mgr_node;
+ struct fpga_manager *mgr;
+
+ of_node_get(np);
+ while (np) {
+ if (of_device_is_compatible(np, "fpga-region")) {
+ mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
+ if (mgr_node) {
+ mgr = of_fpga_mgr_get(mgr_node);
+ of_node_put(np);
+ return mgr;
+ }
+ }
+ np = of_get_next_parent(np);
+ }
+ of_node_put(np);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * fpga_region_get_bridges - create a list of bridges
+ * @region: FPGA region
+ * @overlay: device node of the overlay
+ *
+ * Create a list of bridges including the parent bridge and the bridges
+ * specified by "fpga-bridges" property. Note that the
+ * fpga_bridges_enable/disable/put functions are all fine with an empty list
+ * if that happens.
+ *
+ * Caller should call fpga_bridges_put(&region->bridge_list) when
+ * done with the bridges.
+ *
+ * Return 0 for success (even if there are no bridges specified)
+ * or -EBUSY if any of the bridges are in use.
+ */
+static int fpga_region_get_bridges(struct fpga_region *region,
+ struct device_node *overlay)
+{
+ struct device *dev = &region->dev;
+ struct device_node *region_np = dev->of_node;
+ struct device_node *br, *np, *parent_br = NULL;
+ int i, ret;
+
+ /* If parent is a bridge, add to list */
+ ret = fpga_bridge_get_to_list(region_np->parent, region->info,
+ &region->bridge_list);
+ if (ret == -EBUSY)
+ return ret;
+
+ if (!ret)
+ parent_br = region_np->parent;
+
+ /* If overlay has a list of bridges, use it. */
+ if (of_parse_phandle(overlay, "fpga-bridges", 0))
+ np = overlay;
+ else
+ np = region_np;
+
+ for (i = 0; ; i++) {
+ br = of_parse_phandle(np, "fpga-bridges", i);
+ if (!br)
+ break;
+
+ /* If parent bridge is in list, skip it. */
+ if (br == parent_br)
+ continue;
+
+ /* If node is a bridge, get it and add to list */
+ ret = fpga_bridge_get_to_list(br, region->info,
+ &region->bridge_list);
+
+ /* If any of the bridges are in use, give up */
+ if (ret == -EBUSY) {
+ fpga_bridges_put(&region->bridge_list);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * fpga_region_program_fpga - program FPGA
+ * @region: FPGA region
+ * @firmware_name: name of FPGA image firmware file
+ * @overlay: device node of the overlay
+ * Program an FPGA using information in the device tree.
+ * Function assumes that there is a firmware-name property.
+ * Return 0 for success or negative error code.
+ */
+static int fpga_region_program_fpga(struct fpga_region *region,
+ const char *firmware_name,
+ struct device_node *overlay)
+{
+ struct fpga_manager *mgr;
+ int ret;
+
+ region = fpga_region_get(region);
+ if (IS_ERR(region)) {
+ pr_err("failed to get fpga region\n");
+ return PTR_ERR(region);
+ }
+
+ mgr = fpga_region_get_manager(region);
+ if (IS_ERR(mgr)) {
+ pr_err("failed to get fpga region manager\n");
+ return PTR_ERR(mgr);
+ }
+
+ ret = fpga_region_get_bridges(region, overlay);
+ if (ret) {
+ pr_err("failed to get fpga region bridges\n");
+ goto err_put_mgr;
+ }
+
+ ret = fpga_bridges_disable(&region->bridge_list);
+ if (ret) {
+ pr_err("failed to disable region bridges\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_mgr_firmware_load(mgr, region->info, firmware_name);
+ if (ret) {
+ pr_err("failed to load fpga image\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_bridges_enable(&region->bridge_list);
+ if (ret) {
+ pr_err("failed to enable region bridges\n");
+ goto err_put_br;
+ }
+
+ fpga_mgr_put(mgr);
+ fpga_region_put(region);
+
+ return 0;
+
+err_put_br:
+ fpga_bridges_put(&region->bridge_list);
+err_put_mgr:
+ fpga_mgr_put(mgr);
+ fpga_region_put(region);
+
+ return ret;
+}
+
+/**
+ * child_regions_with_firmware
+ * @overlay: device node of the overlay
+ *
+ * If the overlay adds child FPGA regions, they are not allowed to have
+ * firmware-name property.
+ *
+ * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ */
+static int child_regions_with_firmware(struct device_node *overlay)
+{
+ struct device_node *child_region;
+ const char *child_firmware_name;
+ int ret = 0;
+
+ of_node_get(overlay);
+
+ child_region = of_find_matching_node(overlay, fpga_region_of_match);
+ while (child_region) {
+ if (!of_property_read_string(child_region, "firmware-name",
+ &child_firmware_name)) {
+ ret = -EINVAL;
+ break;
+ }
+ child_region = of_find_matching_node(child_region,
+ fpga_region_of_match);
+ }
+
+ of_node_put(child_region);
+
+ if (ret)
+ pr_err("firmware-name not allowed in child FPGA region: %s",
+ child_region->full_name);
+
+ return ret;
+}
+
+/**
+ * fpga_region_notify_pre_apply - pre-apply overlay notification
+ *
+ * @region: FPGA region that the overlay was applied to
+ * @nd: overlay notification data
+ *
+ * Called after when an overlay targeted to a FPGA Region is about to be
+ * applied. Function will check the properties that will be added to the FPGA
+ * region. If the checks pass, it will program the FPGA.
+ *
+ * The checks are:
+ * The overlay must add either firmware-name or external-fpga-config property
+ * to the FPGA Region.
+ *
+ * firmware-name : program the FPGA
+ * external-fpga-config : FPGA is already programmed
+ *
+ * The overlay can add other FPGA regions, but child FPGA regions cannot have a
+ * firmware-name property since those regions don't exist yet.
+ *
+ * If the overlay that breaks the rules, notifier returns an error and the
+ * overlay is rejected before it goes into the main tree.
+ *
+ * Returns 0 for success or negative error code for failure.
+ */
+static int fpga_region_notify_pre_apply(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ const char *firmware_name = NULL;
+ struct fpga_image_info *info;
+ int ret;
+
+ info = devm_kzalloc(&region->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ region->info = info;
+
+ /* Reject overlay if child FPGA Regions have firmware-name property */
+ ret = child_regions_with_firmware(nd->overlay);
+ if (ret)
+ return ret;
+
+ /* Read FPGA region properties from the overlay */
+ if (of_property_read_bool(nd->overlay, "partial-fpga-config"))
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ if (of_property_read_bool(nd->overlay, "external-fpga-config"))
+ info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+
+ of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
+
+ of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
+ &info->enable_timeout_us);
+
+ of_property_read_u32(nd->overlay, "region-freeze-timeout-us",
+ &info->disable_timeout_us);
+
+ /* If FPGA was externally programmed, don't specify firmware */
+ if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) {
+ pr_err("error: specified firmware and external-fpga-config");
+ return -EINVAL;
+ }
+
+ /* FPGA is already configured externally. We're done. */
+ if (info->flags & FPGA_MGR_EXTERNAL_CONFIG)
+ return 0;
+
+ /* If we got this far, we should be programming the FPGA */
+ if (!firmware_name) {
+ pr_err("should specify firmware-name or external-fpga-config\n");
+ return -EINVAL;
+ }
+
+ return fpga_region_program_fpga(region, firmware_name, nd->overlay);
+}
+
+/**
+ * fpga_region_notify_post_remove - post-remove overlay notification
+ *
+ * @region: FPGA region that was targeted by the overlay that was removed
+ * @nd: overlay notification data
+ *
+ * Called after an overlay has been removed if the overlay's target was a
+ * FPGA region.
+ */
+static void fpga_region_notify_post_remove(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ fpga_bridges_disable(&region->bridge_list);
+ fpga_bridges_put(&region->bridge_list);
+ devm_kfree(&region->dev, region->info);
+ region->info = NULL;
+}
+
+/**
+ * of_fpga_region_notify - reconfig notifier for dynamic DT changes
+ * @nb: notifier block
+ * @action: notifier action
+ * @arg: reconfig data
+ *
+ * This notifier handles programming a FPGA when a "firmware-name" property is
+ * added to a fpga-region.
+ *
+ * Returns NOTIFY_OK or error if FPGA programming fails.
+ */
+static int of_fpga_region_notify(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct of_overlay_notify_data *nd = arg;
+ struct fpga_region *region;
+ int ret;
+
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
+ break;
+ case OF_OVERLAY_POST_APPLY:
+ pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_PRE_REMOVE:
+ pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_POST_REMOVE:
+ pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
+ break;
+ default: /* should not happen */
+ return NOTIFY_OK;
+ }
+
+ region = fpga_region_find(nd->target);
+ if (!region)
+ return NOTIFY_OK;
+
+ ret = 0;
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ ret = fpga_region_notify_pre_apply(region, nd);
+ break;
+
+ case OF_OVERLAY_POST_REMOVE:
+ fpga_region_notify_post_remove(region, nd);
+ break;
+ }
+
+ put_device(&region->dev);
+
+ if (ret)
+ return notifier_from_errno(ret);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpga_region_of_nb = {
+ .notifier_call = of_fpga_region_notify,
+};
+
+static int fpga_region_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct fpga_region *region;
+ int id, ret = 0;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto err_kfree;
+ }
+
+ mutex_init(&region->mutex);
+ INIT_LIST_HEAD(&region->bridge_list);
+
+ device_initialize(&region->dev);
+ region->dev.class = fpga_region_class;
+ region->dev.parent = dev;
+ region->dev.of_node = np;
+ region->dev.id = id;
+ dev_set_drvdata(dev, region);
+
+ ret = dev_set_name(&region->dev, "region%d", id);
+ if (ret)
+ goto err_remove;
+
+ ret = device_add(&region->dev);
+ if (ret)
+ goto err_remove;
+
+ of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
+
+ dev_info(dev, "FPGA Region probed\n");
+
+ return 0;
+
+err_remove:
+ ida_simple_remove(&fpga_region_ida, id);
+err_kfree:
+ kfree(region);
+
+ return ret;
+}
+
+static int fpga_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = platform_get_drvdata(pdev);
+
+ device_unregister(&region->dev);
+
+ return 0;
+}
+
+static struct platform_driver fpga_region_driver = {
+ .probe = fpga_region_probe,
+ .remove = fpga_region_remove,
+ .driver = {
+ .name = "fpga-region",
+ .of_match_table = of_match_ptr(fpga_region_of_match),
+ },
+};
+
+static void fpga_region_dev_release(struct device *dev)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ ida_simple_remove(&fpga_region_ida, region->dev.id);
+ kfree(region);
+}
+
+/**
+ * fpga_region_init - init function for fpga_region class
+ * Creates the fpga_region class and registers a reconfig notifier.
+ */
+static int __init fpga_region_init(void)
+{
+ int ret;
+
+ fpga_region_class = class_create(THIS_MODULE, "fpga_region");
+ if (IS_ERR(fpga_region_class))
+ return PTR_ERR(fpga_region_class);
+
+ fpga_region_class->dev_release = fpga_region_dev_release;
+
+ ret = of_overlay_notifier_register(&fpga_region_of_nb);
+ if (ret)
+ goto err_class;
+
+ ret = platform_driver_register(&fpga_region_driver);
+ if (ret)
+ goto err_plat;
+
+ return 0;
+
+err_plat:
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+err_class:
+ class_destroy(fpga_region_class);
+ ida_destroy(&fpga_region_ida);
+ return ret;
+}
+
+static void __exit fpga_region_exit(void)
+{
+ platform_driver_unregister(&fpga_region_driver);
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+ class_destroy(fpga_region_class);
+ ida_destroy(&fpga_region_ida);
+}
+
+subsys_initcall(fpga_region_init);
+module_exit(fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
new file mode 100644
index 000000000000..f8770af0f6b5
--- /dev/null
+++ b/drivers/fpga/socfpga-a10.c
@@ -0,0 +1,557 @@
+/*
+ * FPGA Manager Driver for Altera Arria10 SoCFPGA
+ *
+ * Copyright (C) 2015-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#define A10_FPGAMGR_DCLKCNT_OFST 0x08
+#define A10_FPGAMGR_DCLKSTAT_OFST 0x0c
+#define A10_FPGAMGR_IMGCFG_CTL_00_OFST 0x70
+#define A10_FPGAMGR_IMGCFG_CTL_01_OFST 0x74
+#define A10_FPGAMGR_IMGCFG_CTL_02_OFST 0x78
+#define A10_FPGAMGR_IMGCFG_STAT_OFST 0x80
+
+#define A10_FPGAMGR_DCLKSTAT_DCLKDONE BIT(0)
+
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS BIT(1)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE BIT(2)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG BIT(8)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NSTATUS_OE BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK (BIT(16) | BIT(17))
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT 16
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH BIT(24)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT 24
+
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR BIT(0)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_EARLY_USERMODE BIT(1)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE BIT(2)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN BIT(4)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN BIT(6)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY BIT(9)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE BIT(10)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR BIT(11)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN BIT(12)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK (BIT(16) | BIT(17) | BIT(18))
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT 16
+
+/* FPGA CD Ratio Value */
+#define CDRATIO_x1 0x0
+#define CDRATIO_x2 0x1
+#define CDRATIO_x4 0x2
+#define CDRATIO_x8 0x3
+
+/* Configuration width 16/32 bit */
+#define CFGWDTH_32 1
+#define CFGWDTH_16 0
+
+/*
+ * struct a10_fpga_priv - private data for fpga manager
+ * @regmap: regmap for register access
+ * @fpga_data_addr: iomap for single address data register to FPGA
+ * @clk: clock
+ */
+struct a10_fpga_priv {
+ struct regmap *regmap;
+ void __iomem *fpga_data_addr;
+ struct clk *clk;
+};
+
+static bool socfpga_a10_fpga_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ return true;
+ }
+ return false;
+}
+
+static bool socfpga_a10_fpga_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ case A10_FPGAMGR_IMGCFG_STAT_OFST:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config socfpga_a10_fpga_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .writeable_reg = socfpga_a10_fpga_writeable_reg,
+ .readable_reg = socfpga_a10_fpga_readable_reg,
+ .max_register = A10_FPGAMGR_IMGCFG_STAT_OFST,
+ .cache_type = REGCACHE_NONE,
+};
+
+/*
+ * from the register map description of cdratio in imgcfg_ctrl_02:
+ * Normal Configuration : 32bit Passive Parallel
+ * Partial Reconfiguration : 16bit Passive Parallel
+ */
+static void socfpga_a10_fpga_set_cfg_width(struct a10_fpga_priv *priv,
+ int width)
+{
+ width <<= A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT;
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH, width);
+}
+
+static void socfpga_a10_fpga_generate_dclks(struct a10_fpga_priv *priv,
+ u32 count)
+{
+ u32 val;
+
+ /* Clear any existing DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+
+ /* Issue the DCLK regmap. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKCNT_OFST, count);
+
+ /* wait till the dclkcnt done */
+ regmap_read_poll_timeout(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, val,
+ val, 1, 100);
+
+ /* Clear DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+}
+
+#define RBF_ENCRYPTION_MODE_OFFSET 69
+#define RBF_DECOMPRESS_OFFSET 229
+
+static int socfpga_a10_fpga_encrypted(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_ENCRYPTION_MODE_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream encrypted? */
+ return ((buf32[RBF_ENCRYPTION_MODE_OFFSET] >> 2) & 3) != 0;
+}
+
+static int socfpga_a10_fpga_compressed(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_DECOMPRESS_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream compressed? */
+ return !((buf32[RBF_DECOMPRESS_OFFSET] >> 1) & 1);
+}
+
+static unsigned int socfpga_a10_fpga_get_cd_ratio(unsigned int cfg_width,
+ bool encrypt, bool compress)
+{
+ unsigned int cd_ratio;
+
+ /*
+ * cd ratio is dependent on cfg width and whether the bitstream
+ * is encrypted and/or compressed.
+ *
+ * | width | encr. | compr. | cd ratio |
+ * | 16 | 0 | 0 | 1 |
+ * | 16 | 0 | 1 | 4 |
+ * | 16 | 1 | 0 | 2 |
+ * | 16 | 1 | 1 | 4 |
+ * | 32 | 0 | 0 | 1 |
+ * | 32 | 0 | 1 | 8 |
+ * | 32 | 1 | 0 | 4 |
+ * | 32 | 1 | 1 | 8 |
+ */
+ if (!compress && !encrypt)
+ return CDRATIO_x1;
+
+ if (compress)
+ cd_ratio = CDRATIO_x4;
+ else
+ cd_ratio = CDRATIO_x2;
+
+ /* If 32 bit, double the cd ratio by incrementing the field */
+ if (cfg_width == CFGWDTH_32)
+ cd_ratio += 1;
+
+ return cd_ratio;
+}
+
+static int socfpga_a10_fpga_set_cdratio(struct fpga_manager *mgr,
+ unsigned int cfg_width,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cd_ratio;
+ int encrypt, compress;
+
+ encrypt = socfpga_a10_fpga_encrypted((u32 *)buf, count / 4);
+ if (encrypt < 0)
+ return -EINVAL;
+
+ compress = socfpga_a10_fpga_compressed((u32 *)buf, count / 4);
+ if (compress < 0)
+ return -EINVAL;
+
+ cd_ratio = socfpga_a10_fpga_get_cd_ratio(cfg_width, encrypt, compress);
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK,
+ cd_ratio << A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT);
+
+ return 0;
+}
+
+static u32 socfpga_a10_fpga_read_stat(struct a10_fpga_priv *priv)
+{
+ u32 val;
+
+ regmap_read(priv->regmap, A10_FPGAMGR_IMGCFG_STAT_OFST, &val);
+
+ return val;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_ready(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_done(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/* Start the FPGA programming by initialize the FPGA Manager */
+static int socfpga_a10_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cfg_width;
+ u32 msel, stat, mask;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ cfg_width = CFGWDTH_16;
+ else
+ return -EINVAL;
+
+ /* Check for passive parallel (msel == 000 or 001) */
+ msel = socfpga_a10_fpga_read_stat(priv);
+ msel &= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK;
+ msel >>= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT;
+ if ((msel != 0) && (msel != 1)) {
+ dev_dbg(&mgr->dev, "Fail: invalid msel=%d\n", msel);
+ return -EINVAL;
+ }
+
+ /* Make sure no external devices are interfering */
+ stat = socfpga_a10_fpga_read_stat(priv);
+ mask = A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN |
+ A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN;
+ if ((stat & mask) != mask)
+ return -EINVAL;
+
+ /* Set cfg width */
+ socfpga_a10_fpga_set_cfg_width(priv, cfg_width);
+
+ /* Determine cd ratio from bitstream header and set cd ratio */
+ ret = socfpga_a10_fpga_set_cdratio(mgr, cfg_width, buf, count);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear s2f_nce to enable chip select. Leave pr_request
+ * unasserted and override disabled.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Set cfg_ctrl to enable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL);
+
+ /*
+ * Disable overrides not needed for pr.
+ * s2f_config==1 leaves reset deasseted.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_00_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG);
+
+ /* Enable override for data, dclk, nce, and pr_request to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Assert pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST);
+
+ /* Provide 2048 DCLKs before starting the config data streaming. */
+ socfpga_a10_fpga_generate_dclks(priv, 0x7ff);
+
+ /* Wait for pr_ready */
+ return socfpga_a10_fpga_wait_for_pr_ready(priv);
+}
+
+/*
+ * write data to the FPGA data register
+ */
+static int socfpga_a10_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 *buffer_32 = (u32 *)buf;
+ size_t i = 0;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ /* Write out the complete 32-bit chunks */
+ while (count >= sizeof(u32)) {
+ writel(buffer_32[i++], priv->fpga_data_addr);
+ count -= sizeof(u32);
+ }
+
+ /* Write out remaining non 32-bit chunks */
+ switch (count) {
+ case 3:
+ writel(buffer_32[i++] & 0x00ffffff, priv->fpga_data_addr);
+ break;
+ case 2:
+ writel(buffer_32[i++] & 0x0000ffff, priv->fpga_data_addr);
+ break;
+ case 1:
+ writel(buffer_32[i++] & 0x000000ff, priv->fpga_data_addr);
+ break;
+ case 0:
+ break;
+ default:
+ /* This will never happen */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int socfpga_a10_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg;
+ int ret;
+
+ /* Wait for pr_done */
+ ret = socfpga_a10_fpga_wait_for_pr_done(priv);
+
+ /* Clear pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Disable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL, 0);
+
+ /* Deassert chip select */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE);
+
+ /* Disable data, dclk, nce, and pr_request override to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Return any errors regarding pr_done or pr_error */
+ if (ret)
+ return ret;
+
+ /* Final check */
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)) {
+ dev_dbg(&mgr->dev,
+ "Timeout in final check. Status=%08xf\n", reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static enum fpga_mgr_states socfpga_a10_fpga_state(struct fpga_manager *mgr)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return FPGA_MGR_STATE_WRITE;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR)
+ return FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+
+ if ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops socfpga_a10_fpga_mgr_ops = {
+ .initial_header_size = (RBF_DECOMPRESS_OFFSET + 1) * 4,
+ .state = socfpga_a10_fpga_state,
+ .write_init = socfpga_a10_fpga_write_init,
+ .write = socfpga_a10_fpga_write,
+ .write_complete = socfpga_a10_fpga_write_complete,
+};
+
+static int socfpga_a10_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct a10_fpga_priv *priv;
+ void __iomem *reg_base;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* First mmio base is for register access */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ /* Second mmio base is for writing FPGA image data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->fpga_data_addr))
+ return PTR_ERR(priv->fpga_data_addr);
+
+ /* regmap for register access */
+ priv->regmap = devm_regmap_init_mmio(dev, reg_base,
+ &socfpga_a10_fpga_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return -ENODEV;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ return fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
+}
+
+static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct a10_fpga_priv *priv = mgr->priv;
+
+ fpga_mgr_unregister(&pdev->dev);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id socfpga_a10_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-a10-fpga-mgr", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
+
+static struct platform_driver socfpga_a10_fpga_driver = {
+ .probe = socfpga_a10_fpga_probe,
+ .remove = socfpga_a10_fpga_remove,
+ .driver = {
+ .name = "socfpga_a10_fpga_manager",
+ .of_match_table = socfpga_a10_fpga_of_match,
+ },
+};
+
+module_platform_driver(socfpga_a10_fpga_driver);
+
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_DESCRIPTION("SoCFPGA Arria10 FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 27d2ff28132c..b6672e66cda6 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -407,13 +407,14 @@ static int socfpga_fpga_reset(struct fpga_manager *mgr)
/*
* Prepare the FPGA to receive the configuration data.
*/
-static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr, u32 flags,
+static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count)
{
struct socfpga_fpga_priv *priv = mgr->priv;
int ret;
- if (flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
return -EINVAL;
}
@@ -478,7 +479,7 @@ static int socfpga_fpga_ops_configure_write(struct fpga_manager *mgr,
}
static int socfpga_fpga_ops_configure_complete(struct fpga_manager *mgr,
- u32 flags)
+ struct fpga_image_info *info)
{
struct socfpga_fpga_priv *priv = mgr->priv;
u32 status;
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index c2fb4120bd62..1812bf7614e1 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -118,7 +118,6 @@
#define FPGA_RST_NONE_MASK 0x0
struct zynq_fpga_priv {
- struct device *dev;
int irq;
struct clk *clk;
@@ -175,7 +174,8 @@ static irqreturn_t zynq_fpga_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
+static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count)
{
struct zynq_fpga_priv *priv;
@@ -189,7 +189,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
return err;
/* don't globally reset PL if we're doing partial reconfig */
- if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
/* assert AXI interface resets */
regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
FPGA_RST_ALL_MASK);
@@ -217,7 +217,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
goto out_err;
}
@@ -231,7 +231,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for !PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
goto out_err;
}
@@ -245,7 +245,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
goto out_err;
}
}
@@ -262,7 +262,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
/* check that we have room in the command queue */
status = zynq_fpga_read(priv, STATUS_OFFSET);
if (status & STATUS_DMA_Q_F) {
- dev_err(priv->dev, "DMA command queue full");
+ dev_err(&mgr->dev, "DMA command queue full\n");
err = -EBUSY;
goto out_err;
}
@@ -295,7 +295,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
in_count = count;
priv = mgr->priv;
- kbuf = dma_alloc_coherent(priv->dev, count, &dma_addr, GFP_KERNEL);
+ kbuf =
+ dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
@@ -331,19 +332,19 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
- dev_err(priv->dev, "Error configuring FPGA");
+ dev_err(&mgr->dev, "Error configuring FPGA\n");
err = -EFAULT;
}
clk_disable(priv->clk);
out_free:
- dma_free_coherent(priv->dev, in_count, kbuf, dma_addr);
-
+ dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr);
return err;
}
-static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
+static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
{
struct zynq_fpga_priv *priv = mgr->priv;
int err;
@@ -364,7 +365,7 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
return err;
/* for the partial reconfig case we didn't touch the level shifters */
- if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
/* enable level shifters from PL to PS */
regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
LVL_SHFTR_ENABLE_PL_TO_PS);
@@ -416,8 +417,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->dev = dev;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->io_base))
@@ -426,7 +425,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
"syscon");
if (IS_ERR(priv->slcr)) {
- dev_err(dev, "unable to get zynq-slcr regmap");
+ dev_err(dev, "unable to get zynq-slcr regmap\n");
return PTR_ERR(priv->slcr);
}
@@ -434,38 +433,41 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "No IRQ available");
+ dev_err(dev, "No IRQ available\n");
return priv->irq;
}
- err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0,
- dev_name(dev), priv);
- if (err) {
- dev_err(dev, "unable to request IRQ");
- return err;
- }
-
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
- dev_err(dev, "input clock not found");
+ dev_err(dev, "input clock not found\n");
return PTR_ERR(priv->clk);
}
err = clk_prepare_enable(priv->clk);
if (err) {
- dev_err(dev, "unable to enable clock");
+ dev_err(dev, "unable to enable clock\n");
return err;
}
/* unlock the device */
zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
+ zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF);
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
+ err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
+ priv);
+ if (err) {
+ dev_err(dev, "unable to request IRQ\n");
+ clk_disable_unprepare(priv->clk);
+ return err;
+ }
+
clk_disable(priv->clk);
err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
&zynq_fpga_ops, priv);
if (err) {
- dev_err(dev, "unable to register FPGA manager");
+ dev_err(dev, "unable to register FPGA manager\n");
clk_unprepare(priv->clk);
return err;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index ed37e5908b91..d5d36549ecc1 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -167,7 +167,7 @@ config GPIO_DWAPB
config GPIO_EM
tristate "Emma Mobile GPIO"
- depends on ARM && OF_GPIO
+ depends on (ARCH_EMEV2 || COMPILE_TEST) && OF_GPIO
help
Say yes here to support GPIO on Renesas Emma Mobile SoCs.
@@ -451,7 +451,7 @@ config GPIO_VR41XX
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select MFD_CORE
select MFD_VX855
help
@@ -520,6 +520,7 @@ config GPIO_ZYNQ
config GPIO_ZX
bool "ZTE ZX GPIO support"
+ depends on ARCH_ZX || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support the GPIO device on ZTE ZX SoCs.
@@ -603,7 +604,7 @@ config GPIO_IT87
config GPIO_SCH
tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
- depends on PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select MFD_CORE
select LPC_SCH
help
@@ -777,16 +778,13 @@ config GPIO_PCF857X
platform-neutral GPIO calls.
config GPIO_SX150X
- bool "Semtech SX150x I2C GPIO expander"
- depends on I2C=y
- select GPIOLIB_IRQCHIP
+ bool "Semtech SX150x I2C GPIO expander (deprecated)"
+ depends on PINCTRL && I2C=y
+ select PINCTRL_SX150X
default n
help
- Say yes here to provide support for Semtech SX150-series I2C
- GPIO expanders. Compatible models include:
-
- 8 bits: sx1508q
- 16 bits: sx1509q
+ Say yes here to provide support for Semtech SX150x-series I2C
+ GPIO expanders. The GPIO driver was replaced by a Pinctrl version.
config GPIO_TPIC2810
tristate "TPIC2810 8-Bit I2C GPO expander"
@@ -798,6 +796,7 @@ config GPIO_TPIC2810
config GPIO_TS4900
tristate "Technologic Systems FPGA I2C GPIO"
+ depends on SOC_IMX6 || COMPILE_TEST
select REGMAP_I2C
help
Say yes here to enabled the GPIO driver for Technologic's FPGA core.
@@ -814,6 +813,14 @@ config GPIO_ADP5520
This option enables support for on-chip GPIO found
on Analog Devices ADP5520 PMICs.
+config GPIO_ALTERA_A10SR
+ tristate "Altera Arria10 System Resource GPIO"
+ depends on MFD_ALTERA_A10SR
+ help
+ Driver for Arria10 Development Kit GPIO expansion which
+ includes reads of pushbuttons and DIP switches as well
+ as writes to LEDs.
+
config GPIO_ARIZONA
tristate "Wolfson Microelectronics Arizona class devices"
depends on MFD_ARIZONA
@@ -822,7 +829,7 @@ config GPIO_ARIZONA
config GPIO_CRYSTAL_COVE
tristate "GPIO support for Crystal Cove PMIC"
- depends on INTEL_SOC_PMIC
+ depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC
select GPIOLIB_IRQCHIP
help
Support for GPIO pins on Crystal Cove PMIC.
@@ -835,6 +842,7 @@ config GPIO_CRYSTAL_COVE
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
+ depends on X86 || MIPS || COMPILE_TEST
depends on MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
@@ -927,7 +935,7 @@ config GPIO_MAX77620
config GPIO_MSIC
bool "Intel MSIC mixed signal gpio support"
- depends on MFD_INTEL_MSIC
+ depends on (X86 || COMPILE_TEST) && MFD_INTEL_MSIC
help
Enable support for GPIO on intel MSIC controllers found in
intel MID devices
@@ -1028,7 +1036,7 @@ config GPIO_UCB1400
config GPIO_WHISKEY_COVE
tristate "GPIO support for Whiskey Cove PMIC"
- depends on INTEL_SOC_PMIC
+ depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC
select GPIOLIB_IRQCHIP
help
Support for GPIO pins on Whiskey Cove PMIC.
@@ -1067,6 +1075,7 @@ menu "PCI GPIO expanders"
config GPIO_AMD8111
tristate "AMD 8111 GPIO driver"
+ depends on X86 || COMPILE_TEST
help
The AMD 8111 south bridge contains 32 GPIO pins which can be used.
@@ -1108,6 +1117,7 @@ config GPIO_MERRIFIELD
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+ depends on X86 || COMPILE_TEST
select GENERIC_IRQ_CHIP
help
ML7213 is companion chip for Intel Atom E6xx series.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d074c2299393..a7676b82de6f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -5,6 +5,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
+obj-$(CONFIG_GPIOLIB) += gpiolib-devprop.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
@@ -24,6 +25,7 @@ obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
+obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
@@ -102,7 +104,6 @@ obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
-obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 8ff7b0d3eac6..89863ea25de1 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -468,17 +468,19 @@ static int adnp_irq_setup(struct adnp *adnp)
return err;
}
- err = gpiochip_irqchip_add(chip,
- &adnp_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ err = gpiochip_irqchip_add_nested(chip,
+ &adnp_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (err) {
dev_err(chip->parent,
"could not connect irqchip to gpiochip\n");
return err;
}
+ gpiochip_set_nested_irqchip(chip, &adnp_irq_chip, adnp->client->irq);
+
return 0;
}
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
new file mode 100644
index 000000000000..9e1a138fed53
--- /dev/null
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * GPIO driver for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from gpio-tps65910.c
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/altera-a10sr.h>
+#include <linux/module.h>
+
+/**
+ * struct altr_a10sr_gpio - Altera Max5 GPIO device private data structure
+ * @gp: : instance of the gpio_chip
+ * @regmap: the regmap from the parent device.
+ */
+struct altr_a10sr_gpio {
+ struct gpio_chip gp;
+ struct regmap *regmap;
+};
+
+static int altr_a10sr_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct altr_a10sr_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->regmap, ALTR_A10SR_PBDSW_REG, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset - ALTR_A10SR_LED_VALID_SHIFT));
+}
+
+static void altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct altr_a10sr_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
+ value ? BIT(ALTR_A10SR_LED_VALID_SHIFT + offset)
+ : 0);
+}
+
+static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int nr)
+{
+ if (nr >= (ALTR_A10SR_IN_VALID_RANGE_LO - ALTR_A10SR_LED_VALID_SHIFT))
+ return 0;
+ return -EINVAL;
+}
+
+static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int nr, int value)
+{
+ if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
+ return 0;
+ return -EINVAL;
+}
+
+static struct gpio_chip altr_a10sr_gc = {
+ .label = "altr_a10sr_gpio",
+ .owner = THIS_MODULE,
+ .get = altr_a10sr_gpio_get,
+ .set = altr_a10sr_gpio_set,
+ .direction_input = altr_a10sr_gpio_direction_input,
+ .direction_output = altr_a10sr_gpio_direction_output,
+ .can_sleep = true,
+ .ngpio = 12,
+ .base = -1,
+};
+
+static int altr_a10sr_gpio_probe(struct platform_device *pdev)
+{
+ struct altr_a10sr_gpio *gpio;
+ int ret;
+ struct altr_a10sr *a10sr = dev_get_drvdata(pdev->dev.parent);
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->regmap = a10sr->regmap;
+
+ gpio->gp = altr_a10sr_gc;
+
+ gpio->gp.of_node = pdev->dev.of_node;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ return 0;
+}
+
+static const struct of_device_id altr_a10sr_gpio_of_match[] = {
+ { .compatible = "altr,a10sr-gpio" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, altr_a10sr_gpio_of_match);
+
+static struct platform_driver altr_a10sr_gpio_driver = {
+ .probe = altr_a10sr_gpio_probe,
+ .driver = {
+ .name = "altr_a10sr_gpio",
+ .of_match_table = of_match_ptr(altr_a10sr_gpio_of_match),
+ },
+};
+module_platform_driver(altr_a10sr_gpio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Thor Thayer <tthayer@opensource.altera.com>");
+MODULE_DESCRIPTION("Altera Arria10 System Resource Chip GPIO");
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 482462889c8f..1f91557717a6 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -137,15 +137,10 @@ static int arizona_gpio_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
- goto err;
+ return ret;
}
- platform_set_drvdata(pdev, arizona_gpio);
-
- return ret;
-
-err:
- return ret;
+ return 0;
}
static struct platform_driver arizona_gpio_driver = {
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
index d9c2a517c6df..4a346b7b4172 100644
--- a/drivers/gpio/gpio-axp209.c
+++ b/drivers/gpio/gpio-axp209.c
@@ -64,13 +64,9 @@ static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct axp20x_gpio *gpio = gpiochip_get_data(chip);
unsigned int val;
- int reg, ret;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
+ int ret;
- ret = regmap_read(gpio->regmap, reg, &val);
+ ret = regmap_read(gpio->regmap, AXP20X_GPIO20_SS, &val);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 7c446d118cd6..2197368cc899 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -351,8 +351,8 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
return retval;
}
- gpiochip_irqchip_add(&cg->chip, &crystalcove_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ gpiochip_irqchip_add_nested(&cg->chip, &crystalcove_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
retval = request_threaded_irq(irq, NULL, crystalcove_gpio_irq_handler,
IRQF_ONESHOT, KBUILD_MODNAME, cg);
@@ -362,6 +362,8 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
return retval;
}
+ gpiochip_set_nested_irqchip(&cg->chip, &crystalcove_irqchip, irq);
+
return 0;
}
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index dd262f00295d..9191056548fe 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -40,6 +40,7 @@ struct davinci_gpio_regs {
typedef struct irq_chip *(*gpio_get_irq_chip_cb_t)(unsigned int irq);
#define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */
+#define MAX_LABEL_SIZE 20
static void __iomem *gpio_base;
@@ -201,6 +202,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
struct davinci_gpio_regs __iomem *regs;
struct device *dev = &pdev->dev;
struct resource *res;
+ char label[MAX_LABEL_SIZE];
pdata = davinci_gpio_get_pdata(pdev);
if (!pdata) {
@@ -237,7 +239,10 @@ static int davinci_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_base);
for (i = 0, base = 0; base < ngpio; i++, base += 32) {
- chips[i].chip.label = "DaVinci";
+ snprintf(label, MAX_LABEL_SIZE, "davinci_gpio.%d", i);
+ chips[i].chip.label = devm_kstrdup(dev, label, GFP_KERNEL);
+ if (!chips[i].chip.label)
+ return -ENOMEM;
chips[i].chip.direction_input = davinci_direction_in;
chips[i].chip.get = davinci_gpio_get;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index f7a60a441e95..5d38b08d1ee2 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -467,7 +467,6 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.irq_not_threaded = true;
dln2->gpio.set = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 00b022c9acb3..a254d5b07b94 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -471,9 +471,4 @@ static struct platform_driver etraxfs_gpio_driver = {
.probe = etraxfs_gpio_probe,
};
-static int __init etraxfs_gpio_init(void)
-{
- return platform_driver_register(&etraxfs_gpio_driver);
-}
-
-device_initcall(etraxfs_gpio_init);
+builtin_platform_driver(etraxfs_gpio_driver);
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index 0b4df6051097..271356effb2e 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -17,7 +17,7 @@
#include <linux/platform_data/gpio-htc-egpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/init.h>
struct egpio_chip {
int reg_start;
@@ -160,10 +160,14 @@ static int egpio_get(struct gpio_chip *chip, unsigned offset)
bit = egpio_bit(ei, offset);
reg = egpio->reg_start + egpio_pos(ei, offset);
- value = egpio_readw(ei, reg);
- pr_debug("readw(%p + %x) = %x\n",
- ei->base_addr, reg << ei->bus_shift, value);
- return !!(value & bit);
+ if (test_bit(offset, &egpio->is_out)) {
+ return !!(egpio->cached_values & (1 << offset));
+ } else {
+ value = egpio_readw(ei, reg);
+ pr_debug("readw(%p + %x) = %x\n",
+ ei->base_addr, reg << ei->bus_shift, value);
+ return !!(value & bit);
+ }
}
static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -225,6 +229,15 @@ static int egpio_direction_output(struct gpio_chip *chip,
}
}
+static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct egpio_chip *egpio;
+
+ egpio = gpiochip_get_data(chip);
+
+ return !test_bit(offset, &egpio->is_out);
+}
+
static void egpio_write_cache(struct egpio_info *ei)
{
int i;
@@ -327,6 +340,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->set = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
+ chip->get_direction = egpio_get_direction;
chip->base = pdata->chip[i].gpio_base;
chip->ngpio = pdata->chip[i].num_gpios;
@@ -367,24 +381,6 @@ fail:
return ret;
}
-static int __exit egpio_remove(struct platform_device *pdev)
-{
- struct egpio_info *ei = platform_get_drvdata(pdev);
- unsigned int irq, irq_end;
-
- if (ei->chained_irq) {
- irq_end = ei->irq_start + ei->nirqs;
- for (irq = ei->irq_start; irq < irq_end; irq++) {
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
- irq_set_chained_handler(ei->chained_irq, NULL);
- device_init_wakeup(&pdev->dev, 0);
- }
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int egpio_suspend(struct platform_device *pdev, pm_message_t state)
{
@@ -416,8 +412,8 @@ static int egpio_resume(struct platform_device *pdev)
static struct platform_driver egpio_driver = {
.driver = {
.name = "htc-egpio",
+ .suppress_bind_attrs = true,
},
- .remove = __exit_p(egpio_remove),
.suspend = egpio_suspend,
.resume = egpio_resume,
};
@@ -426,15 +422,5 @@ static int __init egpio_init(void)
{
return platform_driver_probe(&egpio_driver, egpio_probe);
}
-
-static void __exit egpio_exit(void)
-{
- platform_driver_unregister(&egpio_driver);
-}
-
/* start early for dependencies */
subsys_initcall(egpio_init);
-module_exit(egpio_exit)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kevin O'Connor <kevin@koconnor.net>");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 164de64b11fc..a1e44c221f66 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -421,9 +421,4 @@ static struct pci_driver intel_gpio_driver = {
},
};
-static int __init intel_gpio_init(void)
-{
- return pci_register_driver(&intel_gpio_driver);
-}
-
-device_initcall(intel_gpio_init);
+builtin_pci_driver(intel_gpio_driver);
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index a9aaf9d822b4..4ea4c6a1313b 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -520,20 +520,19 @@ static int max732x_irq_setup(struct max732x_chip *chip,
client->irq);
return ret;
}
- ret = gpiochip_irqchip_add(&chip->gpio_chip,
- &max732x_irq_chip,
- irq_base,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&chip->gpio_chip,
+ &max732x_irq_chip,
+ irq_base,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&client->dev,
"could not connect irqchip to gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&chip->gpio_chip,
- &max732x_irq_chip,
- client->irq,
- NULL);
+ gpiochip_set_nested_irqchip(&chip->gpio_chip,
+ &max732x_irq_chip,
+ client->irq);
}
return 0;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index b46b436cb97f..ec8de4190db9 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -21,9 +21,6 @@ struct max77620_gpio {
struct gpio_chip gpio_chip;
struct regmap *rmap;
struct device *dev;
- int gpio_irq;
- int irq_base;
- int gpio_base;
};
static const struct regmap_irq max77620_gpio_irqs[] = {
@@ -254,7 +251,6 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->rmap = chip->rmap;
mgpio->dev = &pdev->dev;
- mgpio->gpio_irq = gpio_irq;
mgpio->gpio_chip.label = pdev->name;
mgpio->gpio_chip.parent = &pdev->dev;
@@ -268,7 +264,6 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
mgpio->gpio_chip.base = -1;
- mgpio->irq_base = -1;
#ifdef CONFIG_OF_GPIO
mgpio->gpio_chip.of_node = pdev->dev.parent->of_node;
#endif
@@ -281,9 +276,8 @@ static int max77620_gpio_probe(struct platform_device *pdev)
return ret;
}
- mgpio->gpio_base = mgpio->gpio_chip.base;
- ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, mgpio->gpio_irq,
- IRQF_ONESHOT, mgpio->irq_base,
+ ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, gpio_irq,
+ IRQF_ONESHOT, -1,
&max77620_gpio_irq_chip,
&chip->gpio_irq_data);
if (ret < 0) {
@@ -296,6 +290,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
static const struct platform_device_id max77620_gpio_devtype[] = {
{ .name = "max77620-gpio", },
+ { .name = "max20024-gpio", },
{},
};
MODULE_DEVICE_TABLE(platform, max77620_gpio_devtype);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index d55af50e7034..ffb73f688ae1 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -217,8 +217,4 @@ static struct platform_driver mb86s70_gpio_driver = {
.remove = mb86s70_gpio_remove,
};
-static int __init mb86s70_gpio_init(void)
-{
- return platform_driver_register(&mb86s70_gpio_driver);
-}
-device_initcall(mb86s70_gpio_init);
+builtin_platform_driver(mb86s70_gpio_driver);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 99d37b56c258..504550665091 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -473,21 +473,20 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err;
}
- err = gpiochip_irqchip_add(chip,
- &mcp23s08_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ err = gpiochip_irqchip_add_nested(chip,
+ &mcp23s08_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (err) {
dev_err(chip->parent,
"could not connect irqchip to gpiochip: %d\n", err);
return err;
}
- gpiochip_set_chained_irqchip(chip,
- &mcp23s08_irq_chip,
- mcp->irq,
- NULL);
+ gpiochip_set_nested_irqchip(chip,
+ &mcp23s08_irq_chip,
+ mcp->irq);
return 0;
}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 45b51278b8ee..69e0f4ace465 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -161,6 +162,34 @@ static int mrfld_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int mrfld_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
+
+ return (readl(gpdr) & BIT(offset % 32)) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
+static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
+ unsigned int debounce)
+{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
+ void __iomem *gfbr = gpio_reg(chip, offset, GFBR);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ if (debounce)
+ value = readl(gfbr) & ~BIT(offset % 32);
+ else
+ value = readl(gfbr) | BIT(offset % 32);
+ writel(value, gfbr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
static void mrfld_irq_ack(struct irq_data *d)
{
struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
@@ -384,6 +413,8 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
priv->chip.direction_output = mrfld_gpio_direction_output;
priv->chip.get = mrfld_gpio_get;
priv->chip.set = mrfld_gpio_set;
+ priv->chip.get_direction = mrfld_gpio_get_direction;
+ priv->chip.set_debounce = mrfld_gpio_set_debounce;
priv->chip.base = gpio_base;
priv->chip.ngpio = MRFLD_NGPIO;
priv->chip.can_sleep = false;
@@ -411,7 +442,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
}
retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (retval) {
dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
return retval;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index ee1724806f46..1e8fde8cb803 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -87,10 +87,15 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
u32 val;
u32 pin_mask = 1 << d->hwirq;
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct mxs_gpio_port *port = gc->private;
void __iomem *pin_addr;
int edge;
+ if (!(ct->type & type))
+ if (irq_setup_alt_chip(d, type))
+ return -EINVAL;
+
port->both_edges &= ~pin_mask;
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
@@ -119,10 +124,13 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
/* set level or edge */
pin_addr = port->base + PINCTRL_IRQLEV(port);
- if (edge & GPIO_INT_LEV_MASK)
+ if (edge & GPIO_INT_LEV_MASK) {
writel(pin_mask, pin_addr + MXS_SET);
- else
+ writel(pin_mask, port->base + PINCTRL_IRQEN(port) + MXS_SET);
+ } else {
writel(pin_mask, pin_addr + MXS_CLR);
+ writel(pin_mask, port->base + PINCTRL_PIN2IRQ(port) + MXS_SET);
+ }
/* set polarity */
pin_addr = port->base + PINCTRL_IRQPOL(port);
@@ -202,21 +210,37 @@ static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base,
+ gc = irq_alloc_generic_chip("gpio-mxs", 2, irq_base,
port->base, handle_level_irq);
if (!gc)
return -ENOMEM;
gc->private = port;
- ct = gc->chip_types;
+ ct = &gc->chip_types[0];
+ ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_set_type = mxs_gpio_set_irq_type;
+ ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
+ ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
+ ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+ ct->regs.enable = PINCTRL_PIN2IRQ(port) + MXS_SET;
+ ct->regs.disable = PINCTRL_PIN2IRQ(port) + MXS_CLR;
+
+ ct = &gc->chip_types[1];
+ ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
ct->chip.irq_set_type = mxs_gpio_set_irq_type;
ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
+ ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
- ct->regs.mask = PINCTRL_IRQEN(port);
+ ct->regs.enable = PINCTRL_IRQEN(port) + MXS_SET;
+ ct->regs.disable = PINCTRL_IRQEN(port) + MXS_CLR;
+ ct->handler = handle_level_irq;
irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
IRQ_NOREQUEST, 0);
@@ -297,11 +321,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
}
port->base = base;
- /*
- * select the pin interrupt functionality but initially
- * disable the interrupts
- */
- writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
+ /* initially disable the interrupts */
+ writel(0, port->base + PINCTRL_PIN2IRQ(port));
writel(0, port->base + PINCTRL_IRQEN(port));
/* clear address has to be used to clear IRQSTAT bits */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index fe731f094257..d5d72d84b719 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -74,6 +74,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "max7312", 16 | PCA953X_TYPE | PCA_INT, },
{ "max7313", 16 | PCA953X_TYPE | PCA_INT, },
{ "max7315", 8 | PCA953X_TYPE | PCA_INT, },
+ { "max7318", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
@@ -635,20 +636,20 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return ret;
}
- ret = gpiochip_irqchip_add(&chip->gpio_chip,
- &pca953x_irq_chip,
- irq_base,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&chip->gpio_chip,
+ &pca953x_irq_chip,
+ irq_base,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&client->dev,
"could not connect irqchip to gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&chip->gpio_chip,
- &pca953x_irq_chip,
- client->irq, NULL);
+ gpiochip_set_nested_irqchip(&chip->gpio_chip,
+ &pca953x_irq_chip,
+ client->irq);
}
return 0;
@@ -907,6 +908,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "maxim,max7312", .data = OF_953X(16, PCA_INT), },
{ .compatible = "maxim,max7313", .data = OF_953X(16, PCA_INT), },
{ .compatible = "maxim,max7315", .data = OF_953X( 8, PCA_INT), },
+ { .compatible = "maxim,max7318", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,pca6107", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,pca9536", .data = OF_953X( 4, 0), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index d168410e2338..895af42a4513 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -378,9 +378,10 @@ static int pcf857x_probe(struct i2c_client *client,
/* Enable irqchip if we have an interrupt */
if (client->irq) {
- status = gpiochip_irqchip_add(&gpio->chip, &pcf857x_irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
+ status = gpiochip_irqchip_add_nested(&gpio->chip,
+ &pcf857x_irq_chip,
+ 0, handle_level_irq,
+ IRQ_TYPE_NONE);
if (status) {
dev_err(&client->dev, "cannot add irqchip\n");
goto fail;
@@ -393,8 +394,8 @@ static int pcf857x_probe(struct i2c_client *client,
if (status)
goto fail;
- gpiochip_set_chained_irqchip(&gpio->chip, &pcf857x_irq_chip,
- client->irq, NULL);
+ gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
+ client->irq);
gpio->irq_parent = client->irq;
}
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 6e3c1430616f..0a6bfd2b06e5 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -23,7 +23,6 @@
#include <linux/gpio.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
-#include <linux/amba/pl061.h>
#include <linux/slab.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
@@ -50,11 +49,12 @@ struct pl061_context_save_regs {
};
#endif
-struct pl061_gpio {
+struct pl061 {
spinlock_t lock;
void __iomem *base;
struct gpio_chip gc;
+ int parent_irq;
#ifdef CONFIG_PM
struct pl061_context_save_regs csave_regs;
@@ -63,22 +63,22 @@ struct pl061_gpio {
static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- return !(readb(chip->base + GPIODIR) & BIT(offset));
+ return !(readb(pl061->base + GPIODIR) & BIT(offset));
}
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
unsigned long flags;
unsigned char gpiodir;
- spin_lock_irqsave(&chip->lock, flags);
- gpiodir = readb(chip->base + GPIODIR);
+ spin_lock_irqsave(&pl061->lock, flags);
+ gpiodir = readb(pl061->base + GPIODIR);
gpiodir &= ~(BIT(offset));
- writeb(gpiodir, chip->base + GPIODIR);
- spin_unlock_irqrestore(&chip->lock, flags);
+ writeb(gpiodir, pl061->base + GPIODIR);
+ spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
@@ -86,44 +86,44 @@ static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
unsigned long flags;
unsigned char gpiodir;
- spin_lock_irqsave(&chip->lock, flags);
- writeb(!!value << offset, chip->base + (BIT(offset + 2)));
- gpiodir = readb(chip->base + GPIODIR);
+ spin_lock_irqsave(&pl061->lock, flags);
+ writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
+ gpiodir = readb(pl061->base + GPIODIR);
gpiodir |= BIT(offset);
- writeb(gpiodir, chip->base + GPIODIR);
+ writeb(gpiodir, pl061->base + GPIODIR);
/*
* gpio value is set again, because pl061 doesn't allow to set value of
* a gpio pin before configuring it in OUT mode.
*/
- writeb(!!value << offset, chip->base + (BIT(offset + 2)));
- spin_unlock_irqrestore(&chip->lock, flags);
+ writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
+ spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- return !!readb(chip->base + (BIT(offset + 2)));
+ return !!readb(pl061->base + (BIT(offset + 2)));
}
static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- writeb(!!value << offset, chip->base + (BIT(offset + 2)));
+ writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d);
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
@@ -143,11 +143,11 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
}
- spin_lock_irqsave(&chip->lock, flags);
+ spin_lock_irqsave(&pl061->lock, flags);
- gpioiev = readb(chip->base + GPIOIEV);
- gpiois = readb(chip->base + GPIOIS);
- gpioibe = readb(chip->base + GPIOIBE);
+ gpioiev = readb(pl061->base + GPIOIEV);
+ gpiois = readb(pl061->base + GPIOIS);
+ gpioibe = readb(pl061->base + GPIOIBE);
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
bool polarity = trigger & IRQ_TYPE_LEVEL_HIGH;
@@ -199,11 +199,11 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
offset);
}
- writeb(gpiois, chip->base + GPIOIS);
- writeb(gpioibe, chip->base + GPIOIBE);
- writeb(gpioiev, chip->base + GPIOIEV);
+ writeb(gpiois, pl061->base + GPIOIS);
+ writeb(gpioibe, pl061->base + GPIOIBE);
+ writeb(gpioiev, pl061->base + GPIOIEV);
- spin_unlock_irqrestore(&chip->lock, flags);
+ spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
@@ -213,12 +213,12 @@ static void pl061_irq_handler(struct irq_desc *desc)
unsigned long pending;
int offset;
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
chained_irq_enter(irqchip, desc);
- pending = readb(chip->base + GPIOMIS);
+ pending = readb(pl061->base + GPIOMIS);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(irq_find_mapping(gc->irqdomain,
@@ -231,27 +231,27 @@ static void pl061_irq_handler(struct irq_desc *desc)
static void pl061_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
- spin_lock(&chip->lock);
- gpioie = readb(chip->base + GPIOIE) & ~mask;
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock(&chip->lock);
+ spin_lock(&pl061->lock);
+ gpioie = readb(pl061->base + GPIOIE) & ~mask;
+ writeb(gpioie, pl061->base + GPIOIE);
+ spin_unlock(&pl061->lock);
}
static void pl061_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
- spin_lock(&chip->lock);
- gpioie = readb(chip->base + GPIOIE) | mask;
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock(&chip->lock);
+ spin_lock(&pl061->lock);
+ gpioie = readb(pl061->base + GPIOIE) | mask;
+ writeb(gpioie, pl061->base + GPIOIE);
+ spin_unlock(&pl061->lock);
}
/**
@@ -265,19 +265,20 @@ static void pl061_irq_unmask(struct irq_data *d)
static void pl061_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
- spin_lock(&chip->lock);
- writeb(mask, chip->base + GPIOIC);
- spin_unlock(&chip->lock);
+ spin_lock(&pl061->lock);
+ writeb(mask, pl061->base + GPIOIC);
+ spin_unlock(&pl061->lock);
}
static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- return irq_set_irq_wake(gc->irq_parent, state);
+ return irq_set_irq_wake(pl061->parent_irq, state);
}
static struct irq_chip pl061_irqchip = {
@@ -292,81 +293,60 @@ static struct irq_chip pl061_irqchip = {
static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
- struct pl061_platform_data *pdata = dev_get_platdata(dev);
- struct pl061_gpio *chip;
- int ret, irq, i, irq_base;
+ struct pl061 *pl061;
+ int ret, irq;
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (chip == NULL)
+ pl061 = devm_kzalloc(dev, sizeof(*pl061), GFP_KERNEL);
+ if (pl061 == NULL)
return -ENOMEM;
- if (pdata) {
- chip->gc.base = pdata->gpio_base;
- irq_base = pdata->irq_base;
- if (irq_base <= 0) {
- dev_err(&adev->dev, "invalid IRQ base in pdata\n");
- return -ENODEV;
- }
- } else {
- chip->gc.base = -1;
- irq_base = 0;
- }
-
- chip->base = devm_ioremap_resource(dev, &adev->res);
- if (IS_ERR(chip->base))
- return PTR_ERR(chip->base);
+ pl061->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(pl061->base))
+ return PTR_ERR(pl061->base);
- spin_lock_init(&chip->lock);
+ spin_lock_init(&pl061->lock);
if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- chip->gc.request = gpiochip_generic_request;
- chip->gc.free = gpiochip_generic_free;
+ pl061->gc.request = gpiochip_generic_request;
+ pl061->gc.free = gpiochip_generic_free;
}
- chip->gc.get_direction = pl061_get_direction;
- chip->gc.direction_input = pl061_direction_input;
- chip->gc.direction_output = pl061_direction_output;
- chip->gc.get = pl061_get_value;
- chip->gc.set = pl061_set_value;
- chip->gc.ngpio = PL061_GPIO_NR;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
-
- ret = gpiochip_add_data(&chip->gc, chip);
+ pl061->gc.base = -1;
+ pl061->gc.get_direction = pl061_get_direction;
+ pl061->gc.direction_input = pl061_direction_input;
+ pl061->gc.direction_output = pl061_direction_output;
+ pl061->gc.get = pl061_get_value;
+ pl061->gc.set = pl061_set_value;
+ pl061->gc.ngpio = PL061_GPIO_NR;
+ pl061->gc.label = dev_name(dev);
+ pl061->gc.parent = dev;
+ pl061->gc.owner = THIS_MODULE;
+
+ ret = gpiochip_add_data(&pl061->gc, pl061);
if (ret)
return ret;
/*
* irq_chip support
*/
- writeb(0, chip->base + GPIOIE); /* disable irqs */
+ writeb(0, pl061->base + GPIOIE); /* disable irqs */
irq = adev->irq[0];
if (irq < 0) {
dev_err(&adev->dev, "invalid IRQ\n");
return -ENODEV;
}
+ pl061->parent_irq = irq;
- ret = gpiochip_irqchip_add(&chip->gc, &pl061_irqchip,
- irq_base, handle_bad_irq,
+ ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
+ 0, handle_bad_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_info(&adev->dev, "could not add irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&chip->gc, &pl061_irqchip,
+ gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
irq, pl061_irq_handler);
- for (i = 0; i < PL061_GPIO_NR; i++) {
- if (pdata) {
- if (pdata->directions & (BIT(i)))
- pl061_direction_output(&chip->gc, i,
- pdata->values & (BIT(i)));
- else
- pl061_direction_input(&chip->gc, i);
- }
- }
-
- amba_set_drvdata(adev, chip);
+ amba_set_drvdata(adev, pl061);
dev_info(&adev->dev, "PL061 GPIO chip @%pa registered\n",
&adev->res.start);
@@ -376,20 +356,20 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
#ifdef CONFIG_PM
static int pl061_suspend(struct device *dev)
{
- struct pl061_gpio *chip = dev_get_drvdata(dev);
+ struct pl061 *pl061 = dev_get_drvdata(dev);
int offset;
- chip->csave_regs.gpio_data = 0;
- chip->csave_regs.gpio_dir = readb(chip->base + GPIODIR);
- chip->csave_regs.gpio_is = readb(chip->base + GPIOIS);
- chip->csave_regs.gpio_ibe = readb(chip->base + GPIOIBE);
- chip->csave_regs.gpio_iev = readb(chip->base + GPIOIEV);
- chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE);
+ pl061->csave_regs.gpio_data = 0;
+ pl061->csave_regs.gpio_dir = readb(pl061->base + GPIODIR);
+ pl061->csave_regs.gpio_is = readb(pl061->base + GPIOIS);
+ pl061->csave_regs.gpio_ibe = readb(pl061->base + GPIOIBE);
+ pl061->csave_regs.gpio_iev = readb(pl061->base + GPIOIEV);
+ pl061->csave_regs.gpio_ie = readb(pl061->base + GPIOIE);
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
- if (chip->csave_regs.gpio_dir & (BIT(offset)))
- chip->csave_regs.gpio_data |=
- pl061_get_value(&chip->gc, offset) << offset;
+ if (pl061->csave_regs.gpio_dir & (BIT(offset)))
+ pl061->csave_regs.gpio_data |=
+ pl061_get_value(&pl061->gc, offset) << offset;
}
return 0;
@@ -397,22 +377,22 @@ static int pl061_suspend(struct device *dev)
static int pl061_resume(struct device *dev)
{
- struct pl061_gpio *chip = dev_get_drvdata(dev);
+ struct pl061 *pl061 = dev_get_drvdata(dev);
int offset;
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
- if (chip->csave_regs.gpio_dir & (BIT(offset)))
- pl061_direction_output(&chip->gc, offset,
- chip->csave_regs.gpio_data &
+ if (pl061->csave_regs.gpio_dir & (BIT(offset)))
+ pl061_direction_output(&pl061->gc, offset,
+ pl061->csave_regs.gpio_data &
(BIT(offset)));
else
- pl061_direction_input(&chip->gc, offset);
+ pl061_direction_input(&pl061->gc, offset);
}
- writeb(chip->csave_regs.gpio_is, chip->base + GPIOIS);
- writeb(chip->csave_regs.gpio_ibe, chip->base + GPIOIBE);
- writeb(chip->csave_regs.gpio_iev, chip->base + GPIOIEV);
- writeb(chip->csave_regs.gpio_ie, chip->base + GPIOIE);
+ writeb(pl061->csave_regs.gpio_is, pl061->base + GPIOIS);
+ writeb(pl061->csave_regs.gpio_ibe, pl061->base + GPIOIBE);
+ writeb(pl061->csave_regs.gpio_iev, pl061->base + GPIOIEV);
+ writeb(pl061->csave_regs.gpio_ie, pl061->base + GPIOIE);
return 0;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 5b0042776ec7..16cbc5702865 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -413,7 +413,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
stmpe->partnum != STMPE1801) {
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
stmpe_reg_write(stmpe,
- stmpe->regs[STMPE_IDX_GPEDR_LSB + i],
+ stmpe->regs[STMPE_IDX_GPEDR_MSB] + i,
status[i]);
}
}
@@ -484,21 +484,20 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
if (stmpe_gpio->norequest_mask & BIT(i))
clear_bit(i, stmpe_gpio->chip.irq_valid_mask);
}
- ret = gpiochip_irqchip_add(&stmpe_gpio->chip,
- &stmpe_gpio_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&stmpe_gpio->chip,
+ &stmpe_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&pdev->dev,
"could not connect irqchip to gpiochip\n");
goto out_disable;
}
- gpiochip_set_chained_irqchip(&stmpe_gpio->chip,
- &stmpe_gpio_irq_chip,
- irq,
- NULL);
+ gpiochip_set_nested_irqchip(&stmpe_gpio->chip,
+ &stmpe_gpio_irq_chip,
+ irq);
}
platform_set_drvdata(pdev, stmpe_gpio);
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
deleted file mode 100644
index af95de89db01..000000000000
--- a/drivers/gpio/gpio-sx150x.c
+++ /dev/null
@@ -1,792 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * Driver for Semtech SX150X I2C GPIO Expanders
- *
- * Author: Gregory Bean <gbean@codeaurora.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
-#include <linux/of_device.h>
-
-#define NO_UPDATE_PENDING -1
-
-/* The chip models of sx150x */
-#define SX150X_123 0
-#define SX150X_456 1
-#define SX150X_789 2
-
-struct sx150x_123_pri {
- u8 reg_pld_mode;
- u8 reg_pld_table0;
- u8 reg_pld_table1;
- u8 reg_pld_table2;
- u8 reg_pld_table3;
- u8 reg_pld_table4;
- u8 reg_advance;
-};
-
-struct sx150x_456_pri {
- u8 reg_pld_mode;
- u8 reg_pld_table0;
- u8 reg_pld_table1;
- u8 reg_pld_table2;
- u8 reg_pld_table3;
- u8 reg_pld_table4;
- u8 reg_advance;
-};
-
-struct sx150x_789_pri {
- u8 reg_drain;
- u8 reg_polarity;
- u8 reg_clock;
- u8 reg_misc;
- u8 reg_reset;
- u8 ngpios;
-};
-
-struct sx150x_device_data {
- u8 model;
- u8 reg_pullup;
- u8 reg_pulldn;
- u8 reg_dir;
- u8 reg_data;
- u8 reg_irq_mask;
- u8 reg_irq_src;
- u8 reg_sense;
- u8 ngpios;
- union {
- struct sx150x_123_pri x123;
- struct sx150x_456_pri x456;
- struct sx150x_789_pri x789;
- } pri;
-};
-
-/**
- * struct sx150x_platform_data - config data for SX150x driver
- * @gpio_base: The index number of the first GPIO assigned to this
- * GPIO expander. The expander will create a block of
- * consecutively numbered gpios beginning at the given base,
- * with the size of the block depending on the model of the
- * expander chip.
- * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
- * instead of as an oscillator, increasing the size of the
- * GP(I)O pool created by this expander by one. The
- * output-only GPO pin will be added at the end of the block.
- * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
- * for each IO line in the expander. Setting the bit at
- * position n will enable the pull-up for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
- * resistor for each IO line in the expander. Setting the
- * bit at position n will enable the pull-down for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_polarity: A bit-mask which enables polarity inversion for each IO line
- * in the expander. Setting the bit at position n inverts
- * the polarity of that IO line, while clearing it results
- * in normal polarity. For chips with fewer than 16 IO pins,
- * high-end bits are ignored.
- * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
- * is connected, via which it reports interrupt events
- * across all GPIO lines. This must be a real,
- * pre-existing IRQ line.
- * Setting this value < 0 disables the irq_chip functionality
- * of the driver.
- * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
- * IRQ lines will appear. Similarly to gpio_base, the expander
- * will create a block of irqs beginning at this number.
- * This value is ignored if irq_summary is < 0.
- * @reset_during_probe: If set to true, the driver will trigger a full
- * reset of the chip at the beginning of the probe
- * in order to place it in a known state.
- */
-struct sx150x_platform_data {
- unsigned gpio_base;
- bool oscio_is_gpo;
- u16 io_pullup_ena;
- u16 io_pulldn_ena;
- u16 io_polarity;
- int irq_summary;
- unsigned irq_base;
- bool reset_during_probe;
-};
-
-struct sx150x_chip {
- struct gpio_chip gpio_chip;
- struct i2c_client *client;
- const struct sx150x_device_data *dev_cfg;
- int irq_summary;
- int irq_base;
- int irq_update;
- u32 irq_sense;
- u32 irq_masked;
- u32 dev_sense;
- u32 dev_masked;
- struct irq_chip irq_chip;
- struct mutex lock;
-};
-
-static const struct sx150x_device_data sx150x_devices[] = {
- [0] = { /* sx1508q */
- .model = SX150X_789,
- .reg_pullup = 0x03,
- .reg_pulldn = 0x04,
- .reg_dir = 0x07,
- .reg_data = 0x08,
- .reg_irq_mask = 0x09,
- .reg_irq_src = 0x0c,
- .reg_sense = 0x0b,
- .pri.x789 = {
- .reg_drain = 0x05,
- .reg_polarity = 0x06,
- .reg_clock = 0x0f,
- .reg_misc = 0x10,
- .reg_reset = 0x7d,
- },
- .ngpios = 8,
- },
- [1] = { /* sx1509q */
- .model = SX150X_789,
- .reg_pullup = 0x07,
- .reg_pulldn = 0x09,
- .reg_dir = 0x0f,
- .reg_data = 0x11,
- .reg_irq_mask = 0x13,
- .reg_irq_src = 0x19,
- .reg_sense = 0x17,
- .pri.x789 = {
- .reg_drain = 0x0b,
- .reg_polarity = 0x0d,
- .reg_clock = 0x1e,
- .reg_misc = 0x1f,
- .reg_reset = 0x7d,
- },
- .ngpios = 16
- },
- [2] = { /* sx1506q */
- .model = SX150X_456,
- .reg_pullup = 0x05,
- .reg_pulldn = 0x07,
- .reg_dir = 0x03,
- .reg_data = 0x01,
- .reg_irq_mask = 0x09,
- .reg_irq_src = 0x0f,
- .reg_sense = 0x0d,
- .pri.x456 = {
- .reg_pld_mode = 0x21,
- .reg_pld_table0 = 0x23,
- .reg_pld_table1 = 0x25,
- .reg_pld_table2 = 0x27,
- .reg_pld_table3 = 0x29,
- .reg_pld_table4 = 0x2b,
- .reg_advance = 0xad,
- },
- .ngpios = 16
- },
- [3] = { /* sx1502q */
- .model = SX150X_123,
- .reg_pullup = 0x02,
- .reg_pulldn = 0x03,
- .reg_dir = 0x01,
- .reg_data = 0x00,
- .reg_irq_mask = 0x05,
- .reg_irq_src = 0x08,
- .reg_sense = 0x07,
- .pri.x123 = {
- .reg_pld_mode = 0x10,
- .reg_pld_table0 = 0x11,
- .reg_pld_table1 = 0x12,
- .reg_pld_table2 = 0x13,
- .reg_pld_table3 = 0x14,
- .reg_pld_table4 = 0x15,
- .reg_advance = 0xad,
- },
- .ngpios = 8,
- },
-};
-
-static const struct i2c_device_id sx150x_id[] = {
- {"sx1508q", 0},
- {"sx1509q", 1},
- {"sx1506q", 2},
- {"sx1502q", 3},
- {}
-};
-
-static const struct of_device_id sx150x_of_match[] = {
- { .compatible = "semtech,sx1508q" },
- { .compatible = "semtech,sx1509q" },
- { .compatible = "semtech,sx1506q" },
- { .compatible = "semtech,sx1502q" },
- {},
-};
-
-static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
-{
- s32 err = i2c_smbus_write_byte_data(client, reg, val);
-
- if (err < 0)
- dev_warn(&client->dev,
- "i2c write fail: can't write %02x to %02x: %d\n",
- val, reg, err);
- return err;
-}
-
-static s32 sx150x_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
-{
- s32 err = i2c_smbus_read_byte_data(client, reg);
-
- if (err >= 0)
- *val = err;
- else
- dev_warn(&client->dev,
- "i2c read fail: can't read from %02x: %d\n",
- reg, err);
- return err;
-}
-
-static inline bool offset_is_oscio(struct sx150x_chip *chip, unsigned offset)
-{
- return (chip->dev_cfg->ngpios == offset);
-}
-
-/*
- * These utility functions solve the common problem of locating and setting
- * configuration bits. Configuration bits are grouped into registers
- * whose indexes increase downwards. For example, with eight-bit registers,
- * sixteen gpios would have their config bits grouped in the following order:
- * REGISTER N-1 [ f e d c b a 9 8 ]
- * N [ 7 6 5 4 3 2 1 0 ]
- *
- * For multi-bit configurations, the pattern gets wider:
- * REGISTER N-3 [ f f e e d d c c ]
- * N-2 [ b b a a 9 9 8 8 ]
- * N-1 [ 7 7 6 6 5 5 4 4 ]
- * N [ 3 3 2 2 1 1 0 0 ]
- *
- * Given the address of the starting register 'N', the index of the gpio
- * whose configuration we seek to change, and the width in bits of that
- * configuration, these functions allow us to locate the correct
- * register and mask the correct bits.
- */
-static inline void sx150x_find_cfg(u8 offset, u8 width,
- u8 *reg, u8 *mask, u8 *shift)
-{
- *reg -= offset * width / 8;
- *mask = (1 << width) - 1;
- *shift = (offset * width) % 8;
- *mask <<= *shift;
-}
-
-static s32 sx150x_write_cfg(struct sx150x_chip *chip,
- u8 offset, u8 width, u8 reg, u8 val)
-{
- u8 mask;
- u8 data;
- u8 shift;
- s32 err;
-
- sx150x_find_cfg(offset, width, &reg, &mask, &shift);
- err = sx150x_i2c_read(chip->client, reg, &data);
- if (err < 0)
- return err;
-
- data &= ~mask;
- data |= (val << shift) & mask;
- return sx150x_i2c_write(chip->client, reg, data);
-}
-
-static int sx150x_get_io(struct sx150x_chip *chip, unsigned offset)
-{
- u8 reg = chip->dev_cfg->reg_data;
- u8 mask;
- u8 data;
- u8 shift;
- s32 err;
-
- sx150x_find_cfg(offset, 1, &reg, &mask, &shift);
- err = sx150x_i2c_read(chip->client, reg, &data);
- if (err >= 0)
- err = (data & mask) != 0 ? 1 : 0;
-
- return err;
-}
-
-static void sx150x_set_oscio(struct sx150x_chip *chip, int val)
-{
- sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x789.reg_clock,
- (val ? 0x1f : 0x10));
-}
-
-static void sx150x_set_io(struct sx150x_chip *chip, unsigned offset, int val)
-{
- sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_data,
- (val ? 1 : 0));
-}
-
-static int sx150x_io_input(struct sx150x_chip *chip, unsigned offset)
-{
- return sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_dir,
- 1);
-}
-
-static int sx150x_io_output(struct sx150x_chip *chip, unsigned offset, int val)
-{
- int err;
-
- err = sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_data,
- (val ? 1 : 0));
- if (err >= 0)
- err = sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_dir,
- 0);
- return err;
-}
-
-static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
- int status = -EINVAL;
-
- if (!offset_is_oscio(chip, offset)) {
- mutex_lock(&chip->lock);
- status = sx150x_get_io(chip, offset);
- mutex_unlock(&chip->lock);
- }
-
- return (status < 0) ? status : !!status;
-}
-
-static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
-
- mutex_lock(&chip->lock);
- if (offset_is_oscio(chip, offset))
- sx150x_set_oscio(chip, val);
- else
- sx150x_set_io(chip, offset, val);
- mutex_unlock(&chip->lock);
-}
-
-static int sx150x_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned offset,
- enum single_ended_mode mode)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
-
- /* On the SX160X 789 we can set open drain */
- if (chip->dev_cfg->model != SX150X_789)
- return -ENOTSUPP;
-
- if (mode == LINE_MODE_PUSH_PULL)
- return sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->pri.x789.reg_drain,
- 0);
-
- if (mode == LINE_MODE_OPEN_DRAIN)
- return sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->pri.x789.reg_drain,
- 1);
- return -ENOTSUPP;
-}
-
-static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
- int status = -EINVAL;
-
- if (!offset_is_oscio(chip, offset)) {
- mutex_lock(&chip->lock);
- status = sx150x_io_input(chip, offset);
- mutex_unlock(&chip->lock);
- }
- return status;
-}
-
-static int sx150x_gpio_direction_output(struct gpio_chip *gc,
- unsigned offset,
- int val)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
- int status = 0;
-
- if (!offset_is_oscio(chip, offset)) {
- mutex_lock(&chip->lock);
- status = sx150x_io_output(chip, offset, val);
- mutex_unlock(&chip->lock);
- }
- return status;
-}
-
-static void sx150x_irq_mask(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n = d->hwirq;
-
- chip->irq_masked |= (1 << n);
- chip->irq_update = n;
-}
-
-static void sx150x_irq_unmask(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n = d->hwirq;
-
- chip->irq_masked &= ~(1 << n);
- chip->irq_update = n;
-}
-
-static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n, val = 0;
-
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- return -EINVAL;
-
- n = d->hwirq;
-
- if (flow_type & IRQ_TYPE_EDGE_RISING)
- val |= 0x1;
- if (flow_type & IRQ_TYPE_EDGE_FALLING)
- val |= 0x2;
-
- chip->irq_sense &= ~(3UL << (n * 2));
- chip->irq_sense |= val << (n * 2);
- chip->irq_update = n;
- return 0;
-}
-
-static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
-{
- struct sx150x_chip *chip = (struct sx150x_chip *)dev_id;
- unsigned nhandled = 0;
- unsigned sub_irq;
- unsigned n;
- s32 err;
- u8 val;
- int i;
-
- for (i = (chip->dev_cfg->ngpios / 8) - 1; i >= 0; --i) {
- err = sx150x_i2c_read(chip->client,
- chip->dev_cfg->reg_irq_src - i,
- &val);
- if (err < 0)
- continue;
-
- sx150x_i2c_write(chip->client,
- chip->dev_cfg->reg_irq_src - i,
- val);
- for (n = 0; n < 8; ++n) {
- if (val & (1 << n)) {
- sub_irq = irq_find_mapping(
- chip->gpio_chip.irqdomain,
- (i * 8) + n);
- handle_nested_irq(sub_irq);
- ++nhandled;
- }
- }
- }
-
- return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
-}
-
-static void sx150x_irq_bus_lock(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
-
- mutex_lock(&chip->lock);
-}
-
-static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n;
-
- if (chip->irq_update == NO_UPDATE_PENDING)
- goto out;
-
- n = chip->irq_update;
- chip->irq_update = NO_UPDATE_PENDING;
-
- /* Avoid updates if nothing changed */
- if (chip->dev_sense == chip->irq_sense &&
- chip->dev_masked == chip->irq_masked)
- goto out;
-
- chip->dev_sense = chip->irq_sense;
- chip->dev_masked = chip->irq_masked;
-
- if (chip->irq_masked & (1 << n)) {
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
- } else {
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
- chip->irq_sense >> (n * 2));
- }
-out:
- mutex_unlock(&chip->lock);
-}
-
-static void sx150x_init_chip(struct sx150x_chip *chip,
- struct i2c_client *client,
- kernel_ulong_t driver_data,
- struct sx150x_platform_data *pdata)
-{
- mutex_init(&chip->lock);
-
- chip->client = client;
- chip->dev_cfg = &sx150x_devices[driver_data];
- chip->gpio_chip.parent = &client->dev;
- chip->gpio_chip.label = client->name;
- chip->gpio_chip.direction_input = sx150x_gpio_direction_input;
- chip->gpio_chip.direction_output = sx150x_gpio_direction_output;
- chip->gpio_chip.get = sx150x_gpio_get;
- chip->gpio_chip.set = sx150x_gpio_set;
- chip->gpio_chip.set_single_ended = sx150x_gpio_set_single_ended;
- chip->gpio_chip.base = pdata->gpio_base;
- chip->gpio_chip.can_sleep = true;
- chip->gpio_chip.ngpio = chip->dev_cfg->ngpios;
-#ifdef CONFIG_OF_GPIO
- chip->gpio_chip.of_node = client->dev.of_node;
- chip->gpio_chip.of_gpio_n_cells = 2;
-#endif
- if (pdata->oscio_is_gpo)
- ++chip->gpio_chip.ngpio;
-
- chip->irq_chip.name = client->name;
- chip->irq_chip.irq_mask = sx150x_irq_mask;
- chip->irq_chip.irq_unmask = sx150x_irq_unmask;
- chip->irq_chip.irq_set_type = sx150x_irq_set_type;
- chip->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
- chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
- chip->irq_summary = -1;
- chip->irq_base = -1;
- chip->irq_masked = ~0;
- chip->irq_sense = 0;
- chip->dev_masked = ~0;
- chip->dev_sense = 0;
- chip->irq_update = NO_UPDATE_PENDING;
-}
-
-static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
-{
- int err = 0;
- unsigned n;
-
- for (n = 0; err >= 0 && n < (chip->dev_cfg->ngpios / 8); ++n)
- err = sx150x_i2c_write(chip->client, base - n, cfg >> (n * 8));
- return err;
-}
-
-static int sx150x_reset(struct sx150x_chip *chip)
-{
- int err;
-
- err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->pri.x789.reg_reset,
- 0x12);
- if (err < 0)
- return err;
-
- err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->pri.x789.reg_reset,
- 0x34);
- return err;
-}
-
-static int sx150x_init_hw(struct sx150x_chip *chip,
- struct sx150x_platform_data *pdata)
-{
- int err = 0;
-
- if (pdata->reset_during_probe) {
- err = sx150x_reset(chip);
- if (err < 0)
- return err;
- }
-
- if (chip->dev_cfg->model == SX150X_789)
- err = sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x789.reg_misc,
- 0x01);
- else if (chip->dev_cfg->model == SX150X_456)
- err = sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x456.reg_advance,
- 0x04);
- else
- err = sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x123.reg_advance,
- 0x00);
- if (err < 0)
- return err;
-
- err = sx150x_init_io(chip, chip->dev_cfg->reg_pullup,
- pdata->io_pullup_ena);
- if (err < 0)
- return err;
-
- err = sx150x_init_io(chip, chip->dev_cfg->reg_pulldn,
- pdata->io_pulldn_ena);
- if (err < 0)
- return err;
-
- if (chip->dev_cfg->model == SX150X_789) {
- err = sx150x_init_io(chip,
- chip->dev_cfg->pri.x789.reg_polarity,
- pdata->io_polarity);
- if (err < 0)
- return err;
- } else if (chip->dev_cfg->model == SX150X_456) {
- /* Set all pins to work in normal mode */
- err = sx150x_init_io(chip,
- chip->dev_cfg->pri.x456.reg_pld_mode,
- 0);
- if (err < 0)
- return err;
- } else {
- /* Set all pins to work in normal mode */
- err = sx150x_init_io(chip,
- chip->dev_cfg->pri.x123.reg_pld_mode,
- 0);
- if (err < 0)
- return err;
- }
-
-
- if (pdata->oscio_is_gpo)
- sx150x_set_oscio(chip, 0);
-
- return err;
-}
-
-static int sx150x_install_irq_chip(struct sx150x_chip *chip,
- int irq_summary,
- int irq_base)
-{
- int err;
-
- chip->irq_summary = irq_summary;
- chip->irq_base = irq_base;
-
- /* Add gpio chip to irq subsystem */
- err = gpiochip_irqchip_add(&chip->gpio_chip,
- &chip->irq_chip, chip->irq_base,
- handle_edge_irq, IRQ_TYPE_EDGE_BOTH);
- if (err) {
- dev_err(&chip->client->dev,
- "could not connect irqchip to gpiochip\n");
- return err;
- }
-
- err = devm_request_threaded_irq(&chip->client->dev,
- irq_summary, NULL, sx150x_irq_thread_fn,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_FALLING,
- chip->irq_chip.name, chip);
- if (err < 0) {
- chip->irq_summary = -1;
- chip->irq_base = -1;
- }
-
- return err;
-}
-
-static int sx150x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WRITE_WORD_DATA;
- struct sx150x_platform_data *pdata;
- struct sx150x_chip *chip;
- int rc;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata)
- return -EINVAL;
-
- if (!i2c_check_functionality(client->adapter, i2c_funcs))
- return -ENOSYS;
-
- chip = devm_kzalloc(&client->dev,
- sizeof(struct sx150x_chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- sx150x_init_chip(chip, client, id->driver_data, pdata);
- rc = sx150x_init_hw(chip, pdata);
- if (rc < 0)
- return rc;
-
- rc = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
- if (rc)
- return rc;
-
- if (pdata->irq_summary >= 0) {
- rc = sx150x_install_irq_chip(chip,
- pdata->irq_summary,
- pdata->irq_base);
- if (rc < 0)
- return rc;
- }
-
- i2c_set_clientdata(client, chip);
-
- return 0;
-}
-
-static struct i2c_driver sx150x_driver = {
- .driver = {
- .name = "sx150x",
- .of_match_table = of_match_ptr(sx150x_of_match),
- },
- .probe = sx150x_probe,
- .id_table = sx150x_id,
-};
-
-static int __init sx150x_init(void)
-{
- return i2c_add_driver(&sx150x_driver);
-}
-subsys_initcall(sx150x_init);
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index d6e21f1a70a9..be97101c2c9a 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -337,21 +337,20 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&tc3589x_gpio->chip,
- &tc3589x_gpio_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&tc3589x_gpio->chip,
+ &tc3589x_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&pdev->dev,
"could not connect irqchip to gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&tc3589x_gpio->chip,
- &tc3589x_gpio_irq_chip,
- irq,
- NULL);
+ gpiochip_set_nested_irqchip(&tc3589x_gpio->chip,
+ &tc3589x_gpio_irq_chip,
+ irq);
platform_set_drvdata(pdev, tc3589x_gpio);
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index d779307a9685..46e6dcc089cb 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/mfd/tps65218.h>
struct tps65218_gpio {
@@ -30,7 +31,7 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
unsigned int val;
int ret;
- ret = tps65218_reg_read(tps65218, TPS65218_REG_ENABLE2, &val);
+ ret = regmap_read(tps65218->regmap, TPS65218_REG_ENABLE2, &val);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 3edb09cb9ee0..521fbe338589 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -283,8 +283,4 @@ static struct platform_driver vf610_gpio_driver = {
.probe = vf610_gpio_probe,
};
-static int __init gpio_vf610_init(void)
-{
- return platform_driver_register(&vf610_gpio_driver);
-}
-device_initcall(gpio_vf610_init);
+builtin_platform_driver(vf610_gpio_driver);
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index d0ddba7a9d08..34baee5b1dd6 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -426,8 +426,8 @@ static int wcove_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&wg->chip, &wcove_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&wg->chip, &wcove_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "Failed to add irqchip: %d\n", ret);
return ret;
@@ -446,6 +446,8 @@ static int wcove_gpio_probe(struct platform_device *pdev)
return ret;
}
+ gpiochip_set_nested_irqchip(&wg->chip, &wcove_irqchip, virq);
+
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 72a4b326fd0d..a3faefa44f68 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -468,7 +468,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
int ret;
memset(&args, 0, sizeof(args));
- ret = acpi_node_get_property_reference(fwnode, propname, index, &args);
+ ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
+ &args);
if (ret) {
struct acpi_device *adev = to_acpi_device_node(fwnode);
@@ -483,13 +484,13 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* on returned args.
*/
lookup->adev = args.adev;
- if (args.nargs >= 2) {
- lookup->index = args.args[0];
- lookup->pin_index = args.args[1];
- /* 3rd argument, if present is used to specify active_low. */
- if (args.nargs >= 3)
- lookup->active_low = !!args.args[2];
- }
+ if (args.nargs != 3)
+ return -EPROTO;
+
+ lookup->index = args.args[0];
+ lookup->pin_index = args.args[1];
+ lookup->active_low = !!args.args[2];
+
return 0;
}
@@ -859,6 +860,77 @@ static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
}
}
+static struct gpio_desc *acpi_gpiochip_parse_own_gpio(
+ struct acpi_gpio_chip *achip, struct fwnode_handle *fwnode,
+ const char **name, unsigned int *lflags, unsigned int *dflags)
+{
+ struct gpio_chip *chip = achip->chip;
+ struct gpio_desc *desc;
+ u32 gpios[2];
+ int ret;
+
+ *lflags = 0;
+ *dflags = 0;
+ *name = NULL;
+
+ ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
+ ARRAY_SIZE(gpios));
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, gpios[0]);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ desc = gpiochip_get_desc(chip, ret);
+ if (IS_ERR(desc))
+ return desc;
+
+ if (gpios[1])
+ *lflags |= GPIO_ACTIVE_LOW;
+
+ if (fwnode_property_present(fwnode, "input"))
+ *dflags |= GPIOD_IN;
+ else if (fwnode_property_present(fwnode, "output-low"))
+ *dflags |= GPIOD_OUT_LOW;
+ else if (fwnode_property_present(fwnode, "output-high"))
+ *dflags |= GPIOD_OUT_HIGH;
+ else
+ return ERR_PTR(-EINVAL);
+
+ fwnode_property_read_string(fwnode, "line-name", name);
+
+ return desc;
+}
+
+static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
+{
+ struct gpio_chip *chip = achip->chip;
+ struct fwnode_handle *fwnode;
+
+ device_for_each_child_node(chip->parent, fwnode) {
+ unsigned int lflags, dflags;
+ struct gpio_desc *desc;
+ const char *name;
+ int ret;
+
+ if (!fwnode_property_present(fwnode, "gpio-hog"))
+ continue;
+
+ desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name,
+ &lflags, &dflags);
+ if (IS_ERR(desc))
+ continue;
+
+ ret = gpiod_hog(desc, name, lflags, dflags);
+ if (ret) {
+ dev_err(chip->parent, "Failed to hog GPIO\n");
+ fwnode_handle_put(fwnode);
+ return;
+ }
+ }
+}
+
void acpi_gpiochip_add(struct gpio_chip *chip)
{
struct acpi_gpio_chip *acpi_gpio;
@@ -889,7 +961,11 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
return;
}
+ if (!chip->names)
+ devprop_gpiochip_set_names(chip);
+
acpi_gpiochip_request_regions(acpi_gpio);
+ acpi_gpiochip_scan_gpios(acpi_gpio);
acpi_walk_dep_device_list(handle);
}
@@ -918,18 +994,27 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
kfree(acpi_gpio);
}
-static unsigned int acpi_gpio_package_count(const union acpi_object *obj)
+static int acpi_gpio_package_count(const union acpi_object *obj)
{
const union acpi_object *element = obj->package.elements;
const union acpi_object *end = element + obj->package.count;
unsigned int count = 0;
while (element < end) {
- if (element->type == ACPI_TYPE_LOCAL_REFERENCE)
+ switch (element->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ element += 3;
+ /* Fallthrough */
+ case ACPI_TYPE_INTEGER:
+ element++;
count++;
+ break;
- element++;
+ default:
+ return -EPROTO;
+ }
}
+
return count;
}
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
new file mode 100644
index 000000000000..27f383bda7d9
--- /dev/null
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -0,0 +1,67 @@
+/*
+ * Device property helpers for GPIO chips.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+
+#include "gpiolib.h"
+
+/**
+ * devprop_gpiochip_set_names - Set GPIO line names using device properties
+ * @chip: GPIO chip whose lines should be named, if possible
+ *
+ * Looks for device property "gpio-line-names" and if it exists assigns
+ * GPIO line names for the chip. The memory allocated for the assigned
+ * names belong to the underlying firmware node and should not be released
+ * by the caller.
+ */
+void devprop_gpiochip_set_names(struct gpio_chip *chip)
+{
+ struct gpio_device *gdev = chip->gpiodev;
+ const char **names;
+ int ret, i;
+
+ if (!chip->parent) {
+ dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
+ return;
+ }
+
+ ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ NULL, 0);
+ if (ret < 0)
+ return;
+
+ if (ret != gdev->ngpio) {
+ dev_warn(chip->parent,
+ "names %d do not match number of GPIOs %d\n", ret,
+ gdev->ngpio);
+ return;
+ }
+
+ names = kcalloc(gdev->ngpio, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ return;
+
+ ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ names, gdev->ngpio);
+ if (ret < 0) {
+ dev_warn(chip->parent, "failed to read GPIO line names\n");
+ kfree(names);
+ return;
+ }
+
+ for (i = 0; i < gdev->ngpio; i++)
+ gdev->descs[i].name = names[i];
+
+ kfree(names);
+}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 193f15d50bba..92b185f19232 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -226,51 +226,6 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
}
/**
- * of_gpiochip_set_names() - set up the names of the lines
- * @chip: GPIO chip whose lines should be named, if possible
- */
-static void of_gpiochip_set_names(struct gpio_chip *gc)
-{
- struct gpio_device *gdev = gc->gpiodev;
- struct device_node *np = gc->of_node;
- int i;
- int nstrings;
-
- nstrings = of_property_count_strings(np, "gpio-line-names");
- if (nstrings <= 0)
- /* Lines names not present */
- return;
-
- /* This is normally not what you want */
- if (gdev->ngpio != nstrings)
- dev_info(&gdev->dev, "gpio-line-names specifies %d line "
- "names but there are %d lines on the chip\n",
- nstrings, gdev->ngpio);
-
- /*
- * Make sure to not index beyond the end of the number of descriptors
- * of the GPIO device.
- */
- for (i = 0; i < gdev->ngpio; i++) {
- const char *name;
- int ret;
-
- ret = of_property_read_string_index(np,
- "gpio-line-names",
- i,
- &name);
- if (ret) {
- if (ret != -ENODATA)
- dev_err(&gdev->dev,
- "unable to name line %d: %d\n",
- i, ret);
- break;
- }
- gdev->descs[i].name = name;
- }
-}
-
-/**
* of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
* @chip: gpio chip to act on
*
@@ -296,8 +251,10 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
continue;
ret = gpiod_hog(desc, name, lflags, dflags);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(np);
return ret;
+ }
}
return 0;
@@ -526,7 +483,7 @@ int of_gpiochip_add(struct gpio_chip *chip)
/* If the chip defines names itself, these take precedence */
if (!chip->names)
- of_gpiochip_set_names(chip);
+ devprop_gpiochip_set_names(chip);
of_node_get(chip->of_node);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 868128a676ba..f4c26c7826cd 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -986,7 +986,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp)
return -ENODEV;
get_device(&gdev->dev);
filp->private_data = gdev;
- return 0;
+
+ return nonseekable_open(inode, filp);
}
/**
@@ -1011,7 +1012,7 @@ static const struct file_operations gpio_fileops = {
.release = gpio_chrdev_release,
.open = gpio_chrdev_open,
.owner = THIS_MODULE,
- .llseek = noop_llseek,
+ .llseek = no_llseek,
.unlocked_ioctl = gpio_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = gpio_ioctl_compat,
@@ -1512,7 +1513,7 @@ static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
}
/**
- * gpiochip_set_chained_irqchip() - sets a chained irqchip to a gpiochip
+ * gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
* @gpiochip: the gpiochip to set the irqchip chain to
* @irqchip: the irqchip to chain to the gpiochip
* @parent_irq: the irq number corresponding to the parent IRQ for this
@@ -1521,10 +1522,10 @@ static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
* coming out of the gpiochip. If the interrupt is nested rather than
* cascaded, pass NULL in this handler argument
*/
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- int parent_irq,
- irq_flow_handler_t parent_handler)
+static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq,
+ irq_flow_handler_t parent_handler)
{
unsigned int offset;
@@ -1548,7 +1549,7 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip);
- gpiochip->irq_parent = parent_irq;
+ gpiochip->irq_chained_parent = parent_irq;
}
/* Set the parent IRQ for all affected IRQs */
@@ -1559,9 +1560,48 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
parent_irq);
}
}
+
+/**
+ * gpiochip_set_chained_irqchip() - connects a chained irqchip to a gpiochip
+ * @gpiochip: the gpiochip to set the irqchip chain to
+ * @irqchip: the irqchip to chain to the gpiochip
+ * @parent_irq: the irq number corresponding to the parent IRQ for this
+ * chained irqchip
+ * @parent_handler: the parent interrupt handler for the accumulated IRQ
+ * coming out of the gpiochip. If the interrupt is nested rather than
+ * cascaded, pass NULL in this handler argument
+ */
+void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq,
+ irq_flow_handler_t parent_handler)
+{
+ gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
+ parent_handler);
+}
EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
/**
+ * gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
+ * @gpiochip: the gpiochip to set the irqchip nested handler to
+ * @irqchip: the irqchip to nest to the gpiochip
+ * @parent_irq: the irq number corresponding to the parent IRQ for this
+ * nested irqchip
+ */
+void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq)
+{
+ if (!gpiochip->irq_nested) {
+ chip_err(gpiochip, "tried to nest a chained gpiochip\n");
+ return;
+ }
+ gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
+
+/**
* gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
* @d: the irqdomain used by this irqchip
* @irq: the global irq number used by this GPIO irqchip irq
@@ -1583,8 +1623,8 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
*/
irq_set_lockdep_class(irq, chip->lock_key);
irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
- /* Chips that can sleep need nested thread handlers */
- if (chip->can_sleep && !chip->irq_not_threaded)
+ /* Chips that use nested thread handlers have them marked */
+ if (chip->irq_nested)
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
@@ -1602,7 +1642,7 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
struct gpio_chip *chip = d->host_data;
- if (chip->can_sleep)
+ if (chip->irq_nested)
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
@@ -1657,9 +1697,9 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
acpi_gpiochip_free_interrupts(gpiochip);
- if (gpiochip->irq_parent) {
- irq_set_chained_handler(gpiochip->irq_parent, NULL);
- irq_set_handler_data(gpiochip->irq_parent, NULL);
+ if (gpiochip->irq_chained_parent) {
+ irq_set_chained_handler(gpiochip->irq_chained_parent, NULL);
+ irq_set_handler_data(gpiochip->irq_chained_parent, NULL);
}
/* Remove all IRQ mappings and delete the domain */
@@ -1683,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
/**
- * gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
* @gpiochip: the gpiochip to add the irqchip to
* @irqchip: the irqchip to add to the gpiochip
* @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1691,6 +1731,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @handler: the irq handler to use (often a predefined irq core function)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
+ * @nested: whether this is a nested irqchip calling handle_nested_irq()
+ * in its IRQ handler
* @lock_key: lockdep class
*
* This function closely associates a certain irqchip with a certain
@@ -1712,6 +1754,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
+ bool nested,
struct lock_class_key *lock_key)
{
struct device_node *of_node;
@@ -1726,6 +1769,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
pr_err("missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
+ gpiochip->irq_nested = nested;
of_node = gpiochip->parent->of_node;
#ifdef CONFIG_OF_GPIO
/*
@@ -2223,6 +2267,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
struct gpio_chip *gc = desc->gdev->chip;
+ int val = !!value;
int ret;
/* GPIOs used for IRQs shall not be set as output */
@@ -2242,7 +2287,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
goto set_output_value;
}
/* Emulate open drain by not actively driving the line high */
- if (value)
+ if (val)
return gpiod_direction_input(desc);
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
@@ -2253,7 +2298,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
goto set_output_value;
}
/* Emulate open source by not actively driving the line low */
- if (!value)
+ if (!val)
return gpiod_direction_input(desc);
} else {
/* Make sure to disable open drain/source hardware, if any */
@@ -2271,10 +2316,10 @@ set_output_value:
return -EIO;
}
- ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), value);
+ ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
- trace_gpio_value(desc_to_gpio(desc), 0, value);
+ trace_gpio_value(desc_to_gpio(desc), 0, val);
trace_gpio_direction(desc_to_gpio(desc), 0, ret);
return ret;
}
@@ -2314,6 +2359,8 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
VALIDATE_DESC(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
+ else
+ value = !!value;
return _gpiod_direction_output_raw(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output);
@@ -2758,6 +2805,15 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
}
set_bit(FLAG_USED_AS_IRQ, &desc->flags);
+
+ /*
+ * If the consumer has not set up a label (such as when the
+ * IRQ is referenced from .to_irq()) we set up a label here
+ * so it is clear this is used as an interrupt.
+ */
+ if (!desc->label)
+ desc_set_label(desc, "interrupt");
+
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
@@ -2772,10 +2828,17 @@ EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
*/
void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ struct gpio_desc *desc;
+
+ desc = gpiochip_get_desc(chip, offset);
+ if (IS_ERR(desc))
return;
- clear_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
+
+ /* If we only had this marking, erase it */
+ if (desc->label && !strcmp(desc->label, "interrupt"))
+ desc_set_label(desc, NULL);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
@@ -3170,7 +3233,7 @@ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* Process flags */
if (dflags & GPIOD_FLAGS_BIT_DIR_OUT)
status = gpiod_direction_output(desc,
- dflags & GPIOD_FLAGS_BIT_DIR_VAL);
+ !!(dflags & GPIOD_FLAGS_BIT_DIR_VAL));
else
status = gpiod_direction_input(desc);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 346fbda39220..d10eaf520860 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -209,6 +209,8 @@ static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
return desc - &desc->gdev->descs[0];
}
+void devprop_gpiochip_set_names(struct gpio_chip *chip);
+
/* With descriptor prefix */
#define gpiod_emerg(desc, fmt, ...) \
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 483059a22b1b..ebfe8404c25f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -12,6 +12,7 @@ menuconfig DRM
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
+ select SYNC_FILE
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -33,6 +34,20 @@ config DRM_DP_AUX_CHARDEV
read and write values to arbitrary DPCD registers on the DP aux
channel.
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM=y
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -223,6 +238,12 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
+source "drivers/gpu/drm/zte/Kconfig"
+
+source "drivers/gpu/drm/mxsfb/Kconfig"
+
+source "drivers/gpu/drm/meson/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25c720454017..b9ae4280de9d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,13 +9,14 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
- drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_info.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
- drm_plane.o drm_color_mgmt.o
+ drm_plane.o drm_color_mgmt.o drm_print.o \
+ drm_dumb_buffers.o drm_mode_config.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,6 +24,7 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@@ -79,6 +81,7 @@ obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
+obj-$(CONFIG_DRM_MESON) += meson/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
@@ -86,3 +89,5 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
+obj-$(CONFIG_DRM_ZTE) += zte/
+obj-$(CONFIG_DRM_MXSFB) += mxsfb/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 248a05d02917..41bd2bf28f4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index b8d66670bb17..06192698bd96 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,7 +90,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
-#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -120,7 +119,6 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
-#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -149,7 +147,6 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
-#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -411,10 +408,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
-#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
-
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 05c2850c04b0..63208e5c1588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -53,7 +53,11 @@
#include "amdgpu_ucode.h"
#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_vm.h"
#include "amd_powerplay.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
@@ -88,15 +92,16 @@ extern int amdgpu_vm_debug;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_powerplay;
-extern int amdgpu_powercontainment;
+extern int amdgpu_no_evict;
+extern int amdgpu_direct_gma_size;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask;
extern char *amdgpu_disable_cu;
-extern int amdgpu_sclk_deep_sleep_en;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
+extern int amdgpu_vram_page_split;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 8
-/* max number of rings */
-#define AMDGPU_MAX_RINGS 16
-#define AMDGPU_MAX_GFX_RINGS 1
-#define AMDGPU_MAX_COMPUTE_RINGS 8
-#define AMDGPU_MAX_VCE_RINGS 3
-
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
struct amdgpu_device;
struct amdgpu_ib;
-struct amdgpu_vm;
-struct amdgpu_ring;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
bool amdgpu_is_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+#define AMDGPU_MAX_IP_NUM 16
+
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+ bool late_initialized;
+ bool hang;
+};
+
struct amdgpu_ip_block_version {
- enum amd_ip_block_type type;
- u32 major;
- u32 minor;
- u32 rev;
+ const enum amd_ip_block_type type;
+ const u32 major;
+ const u32 minor;
+ const u32 rev;
const struct amd_ip_funcs *funcs;
};
+struct amdgpu_ip_block {
+ struct amdgpu_ip_block_status status;
+ const struct amdgpu_ip_block_version *version;
+};
+
int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor);
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
+
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
void (*set_rptr)(struct amdgpu_device *adev);
};
-/* provided by hw blocks that expose a ring buffer for commands */
-struct amdgpu_ring_funcs {
- /* ring read/write ptr handling */
- u32 (*get_rptr)(struct amdgpu_ring *ring);
- u32 (*get_wptr)(struct amdgpu_ring *ring);
- void (*set_wptr)(struct amdgpu_ring *ring);
- /* validating and patching of IBs */
- int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
- /* command emit functions */
- void (*emit_ib)(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
- void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
- uint64_t seq, unsigned flags);
- void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
- uint64_t pd_addr);
- void (*emit_hdp_flush)(struct amdgpu_ring *ring);
- void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
- void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
- /* testing functions */
- int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring, long timeout);
- /* insert NOP packets */
- void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
- /* pad the indirect buffer to the necessary number of dw */
- void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
- unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
- void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
- /* note usage for clock and power gating */
- void (*begin_use)(struct amdgpu_ring *ring);
- void (*end_use)(struct amdgpu_ring *ring);
- void (*emit_switch_buffer) (struct amdgpu_ring *ring);
- void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
- unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
-};
-
/*
* BIOS.
*/
@@ -364,47 +337,6 @@ struct amdgpu_clock {
};
/*
- * Fences.
- */
-struct amdgpu_fence_driver {
- uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
- /* sync_seq is protected by ring emission lock */
- uint32_t sync_seq;
- atomic_t last_seq;
- bool initialized;
- struct amdgpu_irq_src *irq_src;
- unsigned irq_type;
- struct timer_list fallback_timer;
- unsigned num_fences_mask;
- spinlock_t lock;
- struct fence **fences;
-};
-
-/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
-
-#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
-#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
-int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-
-/*
* BO.
*/
struct amdgpu_bo_list_entry {
@@ -427,7 +359,7 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- struct fence *last_pt_update;
+ struct dma_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex and spinlock */
@@ -465,7 +397,6 @@ struct amdgpu_bo {
*/
struct list_head va;
/* Constant after initialization */
- struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
@@ -544,7 +475,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
- struct fence *fence;
+ struct dma_fence *fence;
};
/*
@@ -562,27 +493,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-/*
- * Synchronization
- */
-struct amdgpu_sync {
- DECLARE_HASHTABLE(fences, 4);
- struct fence *last_vm_update;
-};
-
-void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct reservation_object *resv,
- void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_sync *sync);
-int amdgpu_sync_init(void);
-void amdgpu_sync_fini(void);
int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
@@ -704,10 +614,10 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
- struct fence *excl;
+ struct dma_fence *excl;
unsigned shared_count;
- struct fence **shared;
- struct fence_cb cb;
+ struct dma_fence **shared;
+ struct dma_fence_cb cb;
bool async;
};
@@ -724,14 +634,6 @@ struct amdgpu_ib {
uint32_t flags;
};
-enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE
-};
-
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -743,214 +645,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f);
-
-struct amdgpu_ring {
- struct amdgpu_device *adev;
- const struct amdgpu_ring_funcs *funcs;
- struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
-
- struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
- unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
- unsigned ring_size;
- unsigned max_dw;
- int count_dw;
- uint64_t gpu_addr;
- uint32_t align_mask;
- uint32_t ptr_mask;
- bool ready;
- u32 nop;
- u32 idx;
- u32 me;
- u32 pipe;
- u32 queue;
- struct amdgpu_bo *mqd_obj;
- u32 doorbell_index;
- bool use_doorbell;
- unsigned wptr_offs;
- unsigned fence_offs;
- uint64_t current_ctx;
- enum amdgpu_ring_type type;
- char name[16];
- unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
-};
-
-/*
- * VM
- */
-
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
-/* Maximum number of PTEs the hardware can write with one command */
-#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
-
-/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
-#define AMDGPU_PTE_VALID (1 << 0)
-#define AMDGPU_PTE_SYSTEM (1 << 1)
-#define AMDGPU_PTE_SNOOPED (1 << 2)
-
-/* VI only */
-#define AMDGPU_PTE_EXECUTABLE (1 << 4)
-
-#define AMDGPU_PTE_READABLE (1 << 5)
-#define AMDGPU_PTE_WRITEABLE (1 << 6)
-
-#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
-
-/* How to programm VM fault handling */
-#define AMDGPU_VM_FAULT_STOP_NEVER 0
-#define AMDGPU_VM_FAULT_STOP_FIRST 1
-#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-
-struct amdgpu_vm_pt {
- struct amdgpu_bo_list_entry entry;
- uint64_t addr;
- uint64_t shadow_addr;
-};
-
-struct amdgpu_vm {
- /* tree of virtual addresses mapped */
- struct rb_root va;
-
- /* protecting invalidated */
- spinlock_t status_lock;
-
- /* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
-
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
- /* contains the page directory */
- struct amdgpu_bo *page_directory;
- unsigned max_pde_used;
- struct fence *page_directory_fence;
- uint64_t last_eviction_counter;
-
- /* array of page tables, one for each page directory entry */
- struct amdgpu_vm_pt *page_tables;
-
- /* for id and flush management per ring */
- struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
-
- /* protecting freed */
- spinlock_t freed_lock;
-
- /* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
-
- /* client id */
- u64 client_id;
-};
-
-struct amdgpu_vm_id {
- struct list_head list;
- struct fence *first;
- struct amdgpu_sync active;
- struct fence *last_flush;
- atomic64_t owner;
-
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct fence *flushed_updates;
-
- uint32_t current_gpu_reset_count;
-
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
-
-struct amdgpu_vm_manager {
- /* Handling of VMIDs */
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
-
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
- uint32_t max_pfn;
- /* vram base address for page table entry */
- u64 vram_base_offset;
- /* is vm enabled? */
- bool enabled;
- /* vm pte handling */
- const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rings;
- atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
-};
-
-void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
- struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr, uint64_t offset,
- uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va);
+ struct dma_fence **f);
/*
* context related structures
@@ -958,7 +653,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
- struct fence **fences;
+ struct dma_fence **fences;
struct amd_sched_entity entity;
};
@@ -967,7 +662,7 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
unsigned reset_counter;
spinlock_t ring_lock;
- struct fence **fences;
+ struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
};
@@ -983,8 +678,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1094,6 +789,16 @@ struct amdgpu_scratch {
/*
* GFX configurations
*/
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+struct amdgpu_rb_config {
+ uint32_t rb_backend_disable;
+ uint32_t user_rb_backend_disable;
+ uint32_t raster_config;
+ uint32_t raster_config_1;
+};
+
struct amdgpu_gca_config {
unsigned max_shader_engines;
unsigned max_tile_pipes;
@@ -1122,6 +827,8 @@ struct amdgpu_gca_config {
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
+
+ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
};
struct amdgpu_cu_info {
@@ -1134,6 +841,9 @@ struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
+ void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
+ void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
};
struct amdgpu_gfx {
@@ -1182,23 +892,13 @@ struct amdgpu_gfx {
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f);
+ struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ib, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f);
+ struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
-void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_undo(struct amdgpu_ring *ring);
-int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type);
-void amdgpu_ring_fini(struct amdgpu_ring *ring);
/*
* CS.
@@ -1226,7 +926,7 @@ struct amdgpu_cs_parser {
struct amdgpu_bo_list *bo_list;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
- struct fence *fence;
+ struct dma_fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
struct amdgpu_bo_list_entry *evictable;
@@ -1246,7 +946,7 @@ struct amdgpu_job {
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
- struct fence *fence; /* the hw fence */
+ struct dma_fence *fence; /* the hw fence */
uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
@@ -1295,354 +995,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define AMDGPU_MAX_VCE_LEVELS 6
-
-enum amdgpu_vce_level {
- AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amdgpu_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-enum amdgpu_dpm_forced_level {
- AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
- AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
- AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
- AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
-struct amdgpu_vce_state {
- /* vce clocks */
- u32 evclk;
- u32 ecclk;
- /* gpu clocks */
- u32 sclk;
- u32 mclk;
- u8 clk_idx;
- u8 pstate;
-};
-
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
-};
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
- enum amdgpu_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- bool ac_power;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amdgpu_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
@@ -1863,6 +1215,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1940,14 +1294,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-struct amdgpu_ip_block_status {
- bool valid;
- bool sw;
- bool hw;
- bool late_initialized;
- bool hang;
-};
-
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -1986,6 +1332,7 @@ struct amdgpu_device {
/* BIOS */
uint8_t *bios;
+ uint32_t bios_size;
bool is_atom_bios;
struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -2103,9 +1450,8 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
- const struct amdgpu_ip_block_version *ip_blocks;
+ struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
- struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
@@ -2128,6 +1474,11 @@ struct amdgpu_device {
};
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct amdgpu_device, mman.bdev);
+}
+
bool amdgpu_device_is_px(struct drm_device *dev);
int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev,
@@ -2279,15 +1630,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
-#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
-#define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
@@ -2302,108 +1650,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
- -EINVAL)
-
-#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_performance_level(adev) \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
/* Common functions */
@@ -2434,8 +1682,6 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
-u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
-int amdgpu_ttm_global_init(struct amdgpu_device *adev);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_program_register_sequence(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 2057683f7b59..06879d1dcabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- const struct amdgpu_ip_block_version *ip_version =
+ const struct amdgpu_ip_block *ip_block =
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
- if (!ip_version)
+ if (!ip_block)
return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device,
- ip_version->major, ip_version->minor);
+ ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV)
return 0;
@@ -459,7 +459,7 @@ static int acp_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs acp_ip_funcs = {
+static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
@@ -475,3 +475,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version acp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 8a396313c86f..a288ce25c176 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -37,6 +37,6 @@ struct amdgpu_acp {
struct acp_pm_domain *acp_genpd;
};
-extern const struct amd_ip_funcs acp_ip_funcs;
+extern const struct amdgpu_ip_block_version acp_ip_block;
#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf548d689..56a86dd5789e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
-{
- GET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnEngineClock);
-}
-
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
-{
- GET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnMemoryClock);
-}
-
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock)
-{
- SET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
-
- args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock)
-{
- SET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock)
{
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = voltage_level;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- /* 0xff01 is a flag rather then an actual voltage */
- if (voltage_level == 0xff01)
- return;
-
- switch (crev) {
- case 1:
- args.v1.ucVoltageType = voltage_type;
- args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
- args.v1.ucVoltageIndex = volt_index;
- break;
- case 2:
- args.v2.ucVoltageType = voltage_type;
- args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- case 3:
- args.v3.ucVoltageType = voltage_type;
- args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
- args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return;
- }
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
u16 *leakage_id)
{
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung)
+{
+ u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 17356151db38..70e9acef5d9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock);
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock);
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type);
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 345305235349..cc97eee93226 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
false);
if (r)
goto exit_do_move;
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
goto exit_do_move;
- fence_put(fence);
+ dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 2b6afe123f3d..8ec1967a850b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -70,10 +70,11 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
return false;
}
adev->bios = kmalloc(size, GFP_KERNEL);
- if (adev->bios == NULL) {
+ if (!adev->bios) {
iounmap(bios);
return false;
}
+ adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
return true;
@@ -103,6 +104,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
pci_unmap_rom(adev->pdev, bios);
return false;
}
+ adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
return true;
@@ -135,6 +137,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
DRM_ERROR("no memory to allocate for BIOS\n");
return false;
}
+ adev->bios_size = len;
/* read complete BIOS */
return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
@@ -159,6 +162,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
if (adev->bios == NULL) {
return false;
}
+ adev->bios_size = size;
return true;
}
@@ -273,6 +277,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
kfree(adev->bios);
return false;
}
+ adev->bios_size = size;
return true;
}
#else
@@ -300,8 +305,9 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
- if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
+ tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
@@ -334,6 +340,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+ adev->bios_size = vhdr->ImageLength;
ret = !!adev->bios;
out_unmap:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 662976292535..9ada56c16a58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev,
state);
break;
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev,
state);
break;
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
- if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
- || adev->asic_type == CHIP_POLARIS10)
- result = AMDGPU_UCODE_ID_CP_MEC2;
- else
+ /* for VI. JT2 should be the same as JT1, because:
+ 1, MEC2 and MEC1 use exactly same FW.
+ 2, JT2 is not pached but JT1 is.
+ */
+ if (adev->asic_type >= CHIP_TOPAZ)
result = AMDGPU_UCODE_ID_CP_MEC1;
+ else
+ result = AMDGPU_UCODE_ID_CP_MEC2;
break;
case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G;
break;
+ case CGS_UCODE_ID_STORAGE:
+ result = AMDGPU_UCODE_ID_STORAGE;
+ break;
default:
DRM_ERROR("Firmware type not supported\n");
}
@@ -715,7 +723,7 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
enum cgs_ucode_id type)
{
CGS_FUNC_ADEV;
- uint16_t fw_version;
+ uint16_t fw_version = 0;
switch (type) {
case CGS_UCODE_ID_SDMA0:
@@ -745,9 +753,11 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
case CGS_UCODE_ID_RLC_G:
fw_version = adev->gfx.rlc_fw_version;
break;
+ case CGS_UCODE_ID_STORAGE:
+ break;
default:
DRM_ERROR("firmware type %d do not have version\n", type);
- fw_version = 0;
+ break;
}
return fw_version;
}
@@ -776,12 +786,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+ gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
data_size = le32_to_cpu(header->jt_size) << 2;
}
- info->mc_addr = gpu_addr;
+
+ info->kptr = ucode->kaddr;
info->image_size = data_size;
+ info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+
+ if (CGS_UCODE_ID_CP_MEC == type)
+ info->image_size = (header->jt_offset) << 2;
+
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
@@ -860,6 +876,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
+static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
+{
+ CGS_FUNC_ADEV;
+ return amdgpu_sriov_vf(adev);
+}
+
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
@@ -1213,6 +1235,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
+ amdgpu_cgs_is_virtualization_enabled
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 086aa5c9c634..8d1cf2d3e663 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
-static struct drm_encoder *
-amdgpu_connector_virtual_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- struct drm_encoder *encoder;
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
- if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
- return encoder;
- }
-
- /* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
- return NULL;
-}
-
-static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
-{
- struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
-
- if (encoder) {
- amdgpu_connector_add_common_modes(encoder, connector);
- }
-
- return 0;
-}
-
-static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static int
-amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
-{
- return 0;
-}
-
-static enum drm_connector_status
-
-amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static int
-amdgpu_connector_virtual_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
-static void amdgpu_connector_virtual_force(struct drm_connector *connector)
-{
- return;
-}
-
-static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
- .get_modes = amdgpu_connector_virtual_get_modes,
- .mode_valid = amdgpu_connector_virtual_mode_valid,
- .best_encoder = amdgpu_connector_virtual_encoder,
-};
-
-static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
- .dpms = amdgpu_connector_virtual_dpms,
- .detect = amdgpu_connector_virtual_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = amdgpu_connector_virtual_set_property,
- .destroy = amdgpu_connector_destroy,
- .force = amdgpu_connector_virtual_force,
-};
-
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
- case DRM_MODE_CONNECTOR_VIRTUAL:
- amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
- if (!amdgpu_dig_connector)
- goto failed;
- amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
- subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82dc8d20e28a..29d6d84d1c28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t domain;
int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -387,9 +388,9 @@ retry:
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo_list_entry *lobj)
+ struct amdgpu_bo *validated)
{
- uint32_t domain = lobj->robj->allowed_domains;
+ uint32_t domain = validated->allowed_domains;
int r;
if (!p->evictable)
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t other;
/* If we reached our current BO we can forget it */
- if (candidate == lobj)
+ if (candidate->robj == validated)
break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r))
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false;
}
+static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+{
+ struct amdgpu_cs_parser *p = param;
+ int r;
+
+ do {
+ r = amdgpu_cs_bo_validate(p, bo);
+ } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ if (r)
+ return r;
+
+ if (bo->shadow)
+ r = amdgpu_cs_bo_validate(p, bo->shadow);
+
+ return r;
+}
+
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj)
p->evictable = NULL;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+ r = amdgpu_cs_validate(p, bo);
if (r)
return r;
- if (bo->shadow) {
- r = amdgpu_cs_bo_validate(p, bo);
- if (r)
- return r;
- }
-
if (binding_userptr) {
drm_free_large(lobj->user_pages);
lobj->user_pages = NULL;
@@ -594,14 +605,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
-
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
+ r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
+ amdgpu_cs_validate, p);
+ if (r) {
+ DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+ goto error_validate;
+ }
+
r = amdgpu_cs_list_validate(p, &duplicates);
if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@@ -720,7 +736,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
- fence_put(parser->fence);
+ dma_fence_put(parser->fence);
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
@@ -757,7 +773,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
- struct fence *f;
+ struct dma_fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
@@ -807,13 +823,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
- p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- } else {
+ }
+
+ if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm);
@@ -824,16 +841,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
return amdgpu_cs_sync_rings(p);
}
-static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
-{
- if (r == -EDEADLK) {
- r = amdgpu_gpu_reset(adev);
- if (!r)
- r = -EAGAIN;
- }
- return r;
-}
-
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
struct amdgpu_cs_parser *parser)
{
@@ -902,7 +909,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
- r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
+ r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -917,9 +924,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
- ib->gpu_addr = chunk_ib->va_start;
}
+ ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
j++;
@@ -927,8 +934,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
@@ -957,7 +964,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -979,7 +986,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
} else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
- fence_put(fence);
+ dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
@@ -1009,7 +1016,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
- p->fence = fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@@ -1037,29 +1044,29 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- amdgpu_cs_parser_fini(&parser, r, false);
- r = amdgpu_cs_handle_lockup(adev, r);
- return r;
- }
- r = amdgpu_cs_parser_bos(&parser, data);
- if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
- else if (r && r != -ERESTARTSYS)
- DRM_ERROR("Failed to process the buffer list %d!\n", r);
- else if (!r) {
- reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, &parser);
+ goto out;
}
- if (!r) {
- r = amdgpu_cs_dependencies(adev, &parser);
- if (r)
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+ r = amdgpu_cs_parser_bos(&parser, data);
+ if (r) {
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to process the buffer list %d!\n", r);
+ goto out;
}
+ reserved_buffers = true;
+ r = amdgpu_cs_ib_fill(adev, &parser);
if (r)
goto out;
+ r = amdgpu_cs_dependencies(adev, &parser);
+ if (r) {
+ DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+ goto out;
+ }
+
for (i = 0; i < parser.job->num_ibs; i++)
trace_amdgpu_cs(&parser, i);
@@ -1071,7 +1078,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
- r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
@@ -1092,7 +1098,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1108,8 +1114,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
- r = fence_wait_timeout(fence, true, timeout);
- fence_put(fence);
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
} else
r = 1;
@@ -1124,6 +1130,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
}
/**
+ * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @user: drm_amdgpu_fence copied from user space
+ */
+static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_fence *user)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
+ struct dma_fence *fence;
+ int r;
+
+ r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
+ user->ring, &ring);
+ if (r)
+ return ERR_PTR(r);
+
+ ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
+ if (ctx == NULL)
+ return ERR_PTR(-EINVAL);
+
+ fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+ amdgpu_ctx_put(ctx);
+
+ return fence;
+}
+
+/**
+ * amdgpu_cs_wait_all_fence - wait on all fences to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ uint32_t fence_count = wait->in.fence_count;
+ unsigned int i;
+ long r = 1;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+ }
+
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+
+ return 0;
+}
+
+/**
+ * amdgpu_cs_wait_any_fence - wait on any fence to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+ uint32_t fence_count = wait->in.fence_count;
+ uint32_t first = ~0;
+ struct dma_fence **array;
+ unsigned int i;
+ long r;
+
+ /* Prepare the fence array */
+ array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
+
+ if (array == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
+ goto err_free_fence_array;
+ } else if (fence) {
+ array[i] = fence;
+ } else { /* NULL, the fence has been already signaled */
+ r = 1;
+ goto out;
+ }
+ }
+
+ r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
+ &first);
+ if (r < 0)
+ goto err_free_fence_array;
+
+out:
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+ wait->out.first_signaled = first;
+ /* set return value 0 to indicate success */
+ r = 0;
+
+err_free_fence_array:
+ for (i = 0; i < fence_count; i++)
+ dma_fence_put(array[i]);
+ kfree(array);
+
+ return r;
+}
+
+/**
+ * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
+ *
+ * @dev: drm device
+ * @data: data from userspace
+ * @filp: file private
+ */
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ union drm_amdgpu_wait_fences *wait = data;
+ uint32_t fence_count = wait->in.fence_count;
+ struct drm_amdgpu_fence *fences_user;
+ struct drm_amdgpu_fence *fences;
+ int r;
+
+ /* Get the fences from userspace */
+ fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
+ GFP_KERNEL);
+ if (fences == NULL)
+ return -ENOMEM;
+
+ fences_user = (void __user *)(unsigned long)(wait->in.fences);
+ if (copy_from_user(fences, fences_user,
+ sizeof(struct drm_amdgpu_fence) * fence_count)) {
+ r = -EFAULT;
+ goto err_free_fences;
+ }
+
+ if (wait->in.wait_all)
+ r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
+ else
+ r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
+
+err_free_fences:
+ kfree(fences);
+
+ return r;
+}
+
+/**
* amdgpu_cs_find_bo_va - find bo_va for VM address
*
* @parser: command submission parser context
@@ -1196,6 +1376,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ continue;
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r))
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcbef0f0..400c66ba4c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
- sizeof(struct fence*), GFP_KERNEL);
+ sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
- break;
+ goto failed;
}
- if (i < adev->num_rings) {
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
- kfree(ctx->fences);
- ctx->fences = NULL;
- return r;
- }
return 0;
+
+failed:
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+ return r;
}
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
- fence_put(ctx->rings[i].fences[j]);
+ dma_fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = 0;
- struct fence *other = NULL;
+ struct dma_fence *other = NULL;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
- r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
- fence_get(fence);
+ dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
- fence_put(other);
+ dma_fence_put(other);
return seq;
}
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
- struct amdgpu_ring *ring, uint64_t seq)
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
- struct fence *fence;
+ struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return NULL;
}
- fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+ fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e41d4baebf86..60bd4afe45c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
static void amdgpu_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
- if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
- amdgpu_bo_kunmap(adev->wb.wb_obj);
- amdgpu_bo_unpin(adev->wb.wb_obj);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- }
- amdgpu_bo_unref(&adev->wb.wb_obj);
- adev->wb.wb = NULL;
+ amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+ &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
adev->wb.wb_obj = NULL;
}
}
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->wb.wb_obj);
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->wb.wb_obj, &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->wb.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
adev->wb.num_wb = AMDGPU_MAX_WB;
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -1038,6 +1016,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
}
+
+ if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
+ !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
+ dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
+ amdgpu_vram_page_split);
+ amdgpu_vram_page_split = 1024;
+ }
}
/**
@@ -1112,11 +1097,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1132,11 +1117,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1151,10 +1136,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
if (r)
return r;
break;
@@ -1170,23 +1155,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type)
- return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
}
return true;
}
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type)
+ if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
@@ -1207,38 +1191,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor)
{
- const struct amdgpu_ip_block_version *ip_block;
- ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
- if (ip_block && ((ip_block->major > major) ||
- ((ip_block->major == major) &&
- (ip_block->minor >= minor))))
+ if (ip_block && ((ip_block->version->major > major) ||
+ ((ip_block->version->major == major) &&
+ (ip_block->version->minor >= minor))))
return 0;
return 1;
}
-static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+/**
+ * amdgpu_ip_block_add
+ *
+ * @adev: amdgpu_device pointer
+ * @ip_block_version: pointer to the IP to add
+ *
+ * Adds the IP block driver information to the collection of IPs
+ * on the asic.
+ */
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
+{
+ if (!ip_block_version)
+ return -EINVAL;
+
+ adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
+
+ return 0;
+}
+
+static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
struct drm_device *ddev = adev->ddev;
const char *pci_address_name = pci_name(ddev->pdev);
- char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
- while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
+ pciaddname = strsep(&pciaddname_tmp, ",");
if (!strcmp(pci_address_name, pciaddname)) {
+ long num_crtc;
+ int res = -1;
+
adev->enable_virtual_display = true;
+
+ if (pciaddname_tmp)
+ res = kstrtol(pciaddname_tmp, 10,
+ &num_crtc);
+
+ if (!res) {
+ if (num_crtc < 1)
+ num_crtc = 1;
+ if (num_crtc > 6)
+ num_crtc = 6;
+ adev->mode_info.num_crtc = num_crtc;
+ } else {
+ adev->mode_info.num_crtc = 1;
+ }
break;
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display);
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -1248,7 +1269,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
- amdgpu_whether_enable_virtual_display(adev);
+ amdgpu_device_enable_virtual_display(adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
@@ -1300,33 +1321,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_status = kcalloc(adev->num_ip_blocks,
- sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
- if (adev->ip_block_status == NULL)
- return -ENOMEM;
-
- if (adev->ip_blocks == NULL) {
- DRM_ERROR("No IP blocks found!\n");
- return r;
- }
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else {
- if (adev->ip_blocks[i].funcs->early_init) {
- r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->early_init) {
+ r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
if (r == -ENOENT) {
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
}
}
@@ -1342,22 +1354,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].sw = true;
+ adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
@@ -1367,22 +1380,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
/* gmc hw init is done early */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue;
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -1393,25 +1407,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->late_init) {
- r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].late_initialized = true;
+ adev->ip_blocks[i].status.late_initialized = true;
}
/* skip CG for VCE/UVD, it's handled specially */
- if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1426,68 +1441,77 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
break;
}
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
amdgpu_vram_scratch_fini(adev);
}
- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
- return r;
+
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+
+ adev->ip_blocks[i].status.hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
- r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].sw = false;
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.sw = false;
+ adev->ip_blocks[i].status.valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].late_initialized)
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
- if (adev->ip_blocks[i].funcs->late_fini)
- adev->ip_blocks[i].funcs->late_fini((void *)adev);
- adev->ip_block_status[i].late_initialized = false;
+ if (adev->ip_blocks[i].version->funcs->late_fini)
+ adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].status.late_initialized = false;
}
return 0;
@@ -1505,21 +1529,23 @@ int amdgpu_suspend(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (i != AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
/* XXX handle errors */
- r = adev->ip_blocks[i].funcs->suspend(adev);
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
@@ -1531,11 +1557,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->resume(adev);
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1586,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
- adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -1846,8 +1873,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_status);
- adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
@@ -1943,7 +1968,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
r = amdgpu_suspend(adev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
amdgpu_bo_evict_vram(adev);
amdgpu_atombios_scratch_regs_save(adev);
@@ -2085,13 +2113,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
bool asic_hang = false;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_block_status[i].hang =
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
- if (adev->ip_block_status[i].hang) {
- DRM_INFO("IP block:%d is hang!\n", i);
+ if (adev->ip_blocks[i].version->funcs->check_soft_reset)
+ adev->ip_blocks[i].status.hang =
+ adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang) {
+ DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@@ -2103,11 +2131,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
if (r)
return r;
}
@@ -2121,13 +2149,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
- if (adev->ip_block_status[i].hang) {
+ if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
}
@@ -2141,11 +2169,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->soft_reset) {
- r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
if (r)
return r;
}
@@ -2159,11 +2187,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->post_soft_reset)
- r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->post_soft_reset)
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
if (r)
return r;
}
@@ -2182,7 +2210,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
- struct fence **fence)
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2298,30 +2326,30 @@ retry:
if (need_full_reset && amdgpu_need_backup(adev)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
- struct fence *fence = NULL, *next = NULL;
+ struct dma_fence *fence = NULL, *next = NULL;
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
WARN(r, "recovery from shadow isn't comleted\n");
break;
}
}
- fence_put(fence);
+ dma_fence_put(fence);
fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
WARN(r, "recovery from shadow isn't comleted\n");
}
- fence_put(fence);
+ dma_fence_put(fence);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2471,9 +2499,6 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
adev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- adev->ddev->control->debugfs_root,
- adev->ddev->control);
- drm_debugfs_create_files(files, nfiles,
adev->ddev->primary->debugfs_root,
adev->ddev->primary);
#endif
@@ -2488,9 +2513,6 @@ static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
for (i = 0; i < adev->debugfs_count; i++) {
drm_debugfs_remove_files(adev->debugfs[i].files,
adev->debugfs[i].num_files,
- adev->ddev->control);
- drm_debugfs_remove_files(adev->debugfs[i].files,
- adev->debugfs[i].num_files,
adev->ddev->primary);
}
#endif
@@ -2501,7 +2523,7 @@ static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
@@ -2517,6 +2539,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
@@ -2525,8 +2554,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
*pos &= 0x3FFFF;
if (use_bank) {
- if (sh_bank >= adev->gfx.config.max_sh_per_se ||
- se_bank >= adev->gfx.config.max_shader_engines)
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2570,13 +2599,48 @@ end:
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= 0x3FFFF;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
@@ -2595,13 +2659,21 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size -= 4;
}
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2628,7 +2700,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2656,7 +2728,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2683,7 +2755,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2711,7 +2783,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2738,7 +2810,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2766,7 +2838,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
uint32_t *config, no_regs = 0;
@@ -2836,7 +2908,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
int idx, r;
int32_t value;
@@ -2857,6 +2929,116 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
return !r ? 4 : r;
}
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & 0x7F);
+ se = ((*pos >> 7) & 0xFF);
+ sh = ((*pos >> 15) & 0xFF);
+ cu = ((*pos >> 23) & 0xFF);
+ wave = ((*pos >> 31) & 0xFF);
+ simd = ((*pos >> 37) & 0xFF);
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & 0xFFF); /* in dwords */
+ se = ((*pos >> 12) & 0xFF);
+ sh = ((*pos >> 20) & 0xFF);
+ cu = ((*pos >> 28) & 0xFF);
+ wave = ((*pos >> 36) & 0xFF);
+ simd = ((*pos >> 44) & 0xFF);
+ thread = ((*pos >> 52) & 0xFF);
+ bank = ((*pos >> 60) & 1);
+
+ data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ if (bank == 0) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ while (size) {
+ uint32_t value;
+
+ value = data[offset++];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto err;
+ }
+
+ result += 4;
+ buf += 4;
+ size -= 4;
+ }
+
+err:
+ kfree(data);
+ return result;
+}
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2894,6 +3076,17 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_gpr_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gpr_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -2901,6 +3094,8 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
+ &amdgpu_debugfs_gpr_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2910,6 +3105,8 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",
+ "amdgpu_wave",
+ "amdgpu_gpr",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b429872..581601ca6b89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,29 +35,29 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
+static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
- fence_put(f);
+ dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
- struct fence **f)
+ struct dma_fence **f)
{
- struct fence *fence= *f;
+ struct dma_fence *fence= *f;
if (fence == NULL)
return false;
*f = NULL;
- if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
- struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
- struct drm_crtc *crtc = &amdgpuCrtc->base;
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
unsigned i;
int vpos, hpos;
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
- if (amdgpuCrtc->enabled &&
+ if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
- amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
- amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
@@ -187,7 +187,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
+ r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
r = -EINVAL;
DRM_ERROR("failed to pin new abo buffer before flip\n");
@@ -244,9 +244,9 @@ unreserve:
cleanup:
amdgpu_bo_unref(&work->old_abo);
- fence_put(work->excl);
+ dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
- fence_put(work->shared[i]);
+ dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 14f57d9915e3..6ca0333ca4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
- for (i = 0; i < states->numEntries; i++) {
- if (i >= AMDGPU_MAX_VCE_LEVELS)
- break;
+ adev->pm.dpm.num_of_vce_states =
+ states->numEntries > AMD_MAX_VCE_LEVELS ?
+ AMD_MAX_VCE_LEVELS : states->numEntries;
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
return encoded_lanes[lanes];
}
+
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+{
+ if (idx < adev->pm.dpm.num_of_vce_states)
+ return &adev->pm.dpm.vce_states[idx];
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 3738a96c2619..955d6f21e2b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -23,6 +23,453 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
+enum amdgpu_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_EXTERNAL,
+ THERMAL_TYPE_EXTERNAL_GPIO,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+ THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
+ THERMAL_TYPE_SI,
+ THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+ THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+ AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+ AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+ AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+ AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+#define SCLK_DEEP_SLEEP_MASK 0x8
+
+struct amdgpu_ps {
+ u32 caps; /* vbios flags */
+ u32 class; /* vbios flags */
+ u32 class2; /* vbios flags */
+ /* UVD clocks */
+ u32 vclk;
+ u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
+ bool vce_active;
+ enum amd_vce_level vce_level;
+ /* asic priv */
+ void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+ /* thermal interrupt work */
+ struct work_struct work;
+ /* low temperature threshold */
+ int min_temp;
+ /* high temperature threshold */
+ int max_temp;
+ /* was last interrupt low to high or high to low */
+ bool high_to_low;
+ /* interrupt source */
+ struct amdgpu_irq_src irq;
+};
+
+enum amdgpu_clk_action
+{
+ AMDGPU_SCLK_UP = 1,
+ AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+ u32 sclk;
+ u32 mclk;
+ enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+ u32 sclk;
+ u32 mclk;
+ u16 vddc;
+ u16 vddci;
+};
+
+struct amdgpu_clock_array {
+ u32 count;
+ u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+ u32 clk;
+ u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+ u32 count;
+ struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
+};
+
+struct amdgpu_cac_leakage_table {
+ u32 count;
+ union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+ u16 voltage;
+ u32 sclk;
+ u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+ u32 count;
+ struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+ u8 ppm_design;
+ u16 cpu_core_number;
+ u32 platform_tdp;
+ u32 small_ac_platform_tdp;
+ u32 platform_tdc;
+ u32 small_ac_platform_tdc;
+ u32 apu_tdp;
+ u32 dgpu_tdp;
+ u32 dgpu_ulv_power;
+ u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+ struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+ struct amdgpu_clock_array valid_sclk_values;
+ struct amdgpu_clock_array valid_mclk_values;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+ u32 mclk_sclk_ratio;
+ u32 sclk_mclk_delta;
+ u16 vddc_vddci_delta;
+ u16 min_vddc_for_pcie_gen2;
+ struct amdgpu_cac_leakage_table cac_leakage_table;
+ struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+ struct amdgpu_ppm_table *ppm_table;
+ struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+ u16 t_min;
+ u16 t_med;
+ u16 t_high;
+ u16 pwm_min;
+ u16 pwm_med;
+ u16 pwm_high;
+ u8 t_hyst;
+ u32 cycle_delay;
+ u16 t_max;
+ u8 control_mode;
+ u16 default_max_fan_pwm;
+ u16 default_fan_output_sensitivity;
+ u16 fan_output_sensitivity;
+ bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+ AMDGPU_PCIE_GEN1 = 0,
+ AMDGPU_PCIE_GEN2 = 1,
+ AMDGPU_PCIE_GEN3 = 2,
+ AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+ AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+ AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+ AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+ AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
+};
+
+struct amdgpu_dpm_funcs {
+ int (*get_temperature)(struct amdgpu_device *adev);
+ int (*pre_set_power_state)(struct amdgpu_device *adev);
+ int (*set_power_state)(struct amdgpu_device *adev);
+ void (*post_set_power_state)(struct amdgpu_device *adev);
+ void (*display_configuration_changed)(struct amdgpu_device *adev);
+ u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+ u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+ void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+ void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+ int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+ bool (*vblank_too_short)(struct amdgpu_device *adev);
+ void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+ void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
+ void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+ void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+ u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+ int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+ int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*check_state_equal)(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal);
+
+ struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
+};
+
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
+#define amdgpu_dpm_get_temperature(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_temperature((adev)))
+
+#define amdgpu_dpm_set_fan_control_mode(adev, m) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
+ (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+
+#define amdgpu_dpm_get_fan_control_mode(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_fan_control_mode((adev)))
+
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
+ -EINVAL)
+
+#define amdgpu_dpm_get_sclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_sclk((adev), (l)))
+
+#define amdgpu_dpm_get_mclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_mclk((adev), (l)))
+
+
+#define amdgpu_dpm_force_performance_level(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->force_performance_level((adev), (l)))
+
+#define amdgpu_dpm_powergate_uvd(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_uvd((adev), (g)))
+
+#define amdgpu_dpm_powergate_vce(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_vce((adev), (g)))
+
+#define amdgpu_dpm_get_current_power_state(adev) \
+ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_performance_level(adev) \
+ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+ (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+ (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+ (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+ (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
+#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
+ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+
+#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+
+#define amdgpu_dpm_get_vce_clock_state(adev, i) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
+ (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+
+struct amdgpu_dpm {
+ struct amdgpu_ps *ps;
+ /* number of valid power states */
+ int num_ps;
+ /* current power state that is active */
+ struct amdgpu_ps *current_ps;
+ /* requested power state */
+ struct amdgpu_ps *requested_ps;
+ /* boot up power state */
+ struct amdgpu_ps *boot_ps;
+ /* default uvd power state */
+ struct amdgpu_ps *uvd_ps;
+ /* vce requirements */
+ u32 num_of_vce_states;
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
+ enum amd_vce_level vce_level;
+ enum amd_pm_state_type state;
+ enum amd_pm_state_type user_state;
+ enum amd_pm_state_type last_state;
+ enum amd_pm_state_type last_user_state;
+ u32 platform_caps;
+ u32 voltage_response_time;
+ u32 backbias_response_time;
+ void *priv;
+ u32 new_active_crtcs;
+ int new_active_crtc_count;
+ u32 current_active_crtcs;
+ int current_active_crtc_count;
+ struct amdgpu_dpm_dynamic_state dyn_state;
+ struct amdgpu_dpm_fan fan;
+ u32 tdp_limit;
+ u32 near_tdp_limit;
+ u32 near_tdp_limit_adjusted;
+ u32 sq_ramping_threshold;
+ u32 cac_leakage;
+ u16 tdp_od_limit;
+ u32 tdp_adjustment;
+ u16 load_line_slope;
+ bool power_control;
+ bool ac_power;
+ /* special states active */
+ bool thermal_active;
+ bool uvd_active;
+ bool vce_active;
+ /* thermal handling */
+ struct amdgpu_dpm_thermal thermal;
+ /* forced levels */
+ enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+ struct mutex mutex;
+ u32 current_sclk;
+ u32 current_mclk;
+ u32 default_sclk;
+ u32 default_mclk;
+ struct amdgpu_i2c_chan *i2c_bus;
+ /* internal thermal controller on rv6xx+ */
+ enum amdgpu_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
+ /* fan control parameters */
+ bool no_fan;
+ u8 fan_pulses_per_revolution;
+ u8 fan_min_rpm;
+ u8 fan_max_rpm;
+ /* dpm */
+ bool dpm_enabled;
+ bool sysfs_initialized;
+ struct amdgpu_dpm dpm;
+ const struct firmware *fw; /* SMC firmware */
+ uint32_t fw_version;
+ const struct amdgpu_dpm_funcs *funcs;
+ uint32_t pcie_gen_mask;
+ uint32_t pcie_mlw_mask;
+ struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+};
+
#define R600_SSTU_DFLT 0
#define R600_SST_DFLT 0x00C8
@@ -82,4 +529,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u16 default_lanes);
u8 amdgpu_encode_pci_lane_width(u32 lanes);
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e0890deccb2f..8cb937b2bfcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -58,9 +58,10 @@
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
+ * - 3.9.0 - Add support for memory query info about VRAM and GTT.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -85,12 +86,13 @@ int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
+int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
-int amdgpu_powercontainment = 1;
-int amdgpu_sclk_deep_sleep_en = 1;
+int amdgpu_no_evict = 0;
+int amdgpu_direct_gma_size = 0;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
@@ -177,14 +182,14 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
-MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
-module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
-
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
-MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
-module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
+MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
+module_param_named(no_evict, amdgpu_no_evict, int, 0444);
+
+MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
+module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
-MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+MODULE_PARM_DESC(virtual_display,
+ "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
static const struct pci_device_id pciidlist[] = {
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
+ {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9fb8aa4d6bae..24629bec181a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -75,27 +75,21 @@ amdgpufb_release(struct fb_info *info, int user)
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = amdgpufb_open,
.fb_release = amdgpufb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
+int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
{
int aligned = width;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = 255;
break;
@@ -110,7 +104,7 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,20 +133,21 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+ fb_tiled);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
@@ -176,7 +171,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
}
- ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
+ ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
if (ret) {
amdgpu_bo_unreserve(abo);
goto out_unref;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 77b34ec92632..7b60fb79c3a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
*/
struct amdgpu_fence {
- struct fence base;
+ struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
@@ -74,8 +74,8 @@ void amdgpu_fence_slab_fini(void)
/*
* Cast helper
*/
-static const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+static const struct dma_fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
@@ -131,11 +131,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct fence *old, **ptr;
+ struct dma_fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -144,10 +144,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
- fence_init(&fence->base, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
+ dma_fence_init(&fence->base, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
@@ -156,12 +156,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
- if (old && !fence_is_signaled(old)) {
+ if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
- fence_wait(old, false);
+ dma_fence_wait(old, false);
}
- rcu_assign_pointer(*ptr, fence_get(&fence->base));
+ rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
@@ -212,7 +212,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
seq &= drv->num_fences_mask;
do {
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -225,13 +225,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = fence_signal(fence);
+ r = dma_fence_signal(fence);
if (!r)
- FENCE_TRACE(fence, "signaled from irq context\n");
+ DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
- fence_put(fence);
+ dma_fence_put(fence);
} while (last_seq != seq);
}
@@ -261,7 +261,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
int r;
if (!seq)
@@ -270,14 +270,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
- if (!fence || !fence_get_rcu(fence)) {
+ if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
- r = fence_wait(fence, false);
- fence_put(fence);
+ r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -382,24 +382,27 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
if (!ring->fence_drv.fences)
return -ENOMEM;
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission,
- timeout, ring->name);
- if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
- return r;
+ /* No need to setup the GPU scheduler for KIQ ring */
+ if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
+ timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+ if (timeout == 0) {
+ /*
+ * FIXME:
+ * Delayed workqueue cannot use it directly,
+ * so the scheduler will not use delayed workqueue if
+ * MAX_SCHEDULE_TIMEOUT is set.
+ * Currently keep it simple and silly.
+ */
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+ r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission,
+ timeout, ring->name);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
+ }
}
return 0;
@@ -453,7 +456,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[j]);
+ dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
@@ -542,12 +545,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
* Common fence implementation
*/
-static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
-static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
@@ -561,7 +564,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool amdgpu_fence_enable_signaling(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
@@ -569,7 +572,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
@@ -583,7 +586,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
@@ -596,16 +599,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amdgpu_fence_release(struct fence *f)
+static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
-static const struct fence_ops amdgpu_fence_ops = {
+static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 21a1242fc13b..964d2a946ed5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj);
if (r) {
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a7ea9a3b454e..cd62f6ffde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = abo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@@ -407,10 +408,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- if (timeout == 0)
- ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
- else
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
+ timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
@@ -470,6 +469,16 @@ out:
return r;
}
+static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
+{
+ unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+
+ return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+}
+
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@@ -480,7 +489,8 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va, uint32_t operation)
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd;
@@ -503,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
@@ -511,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
- list_for_each_entry(entry, &duplicates, head) {
- domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (domain == AMDGPU_GEM_DOMAIN_CPU)
- goto error_unreserve;
- }
+ r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
+ NULL);
+ if (r)
+ goto error_unreserve;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
@@ -538,8 +544,6 @@ error_print:
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
-
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -549,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
- struct ttm_validate_buffer tv, tv_pd;
+ struct amdgpu_bo_list_entry vm_pd;
+ struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
@@ -594,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
- tv_pd.bo = &fpriv->vm.page_directory->tbo;
- tv_pd.shared = true;
- list_add(&tv_pd.head, &list);
+ amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
@@ -704,7 +707,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = amdgpu_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a074edd95c70..01a42b6a69a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_gfx.h"
/*
* GPU scratch registers helpers function.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e154c09..e02044086445 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,6 +27,7 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+ unsigned max_sh);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f86c84427778..00f46b0e076d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -164,10 +164,13 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
+ if (!node) {
+ r = -ENOMEM;
+ goto err_out;
+ }
node->start = AMDGPU_BO_INVALID_OFFSET;
+ node->size = mem->num_pages;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
@@ -175,12 +178,20 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r)) {
kfree(node);
mem->mm_node = NULL;
+ r = 0;
+ goto err_out;
}
} else {
mem->start = node->start;
}
return 0;
+err_out:
+ spin_lock(&mgr->lock);
+ mgr->available += mem->num_pages;
+ spin_unlock(&mgr->lock);
+
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c9c169..216a9572d946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Free an IB (all asics).
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f)
+ struct dma_fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f)
+ struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
- num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+ alloc_size = ring->funcs->emit_frame_size + num_ibs *
+ ring->funcs->emit_ib_size;
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
- if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+ if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 9fa809876339..fb902932f571 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -424,15 +424,6 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return 0;
}
-bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type)
-{
- if ((type >= src->num_types) || !src->enabled_types)
- return false;
- return atomic_inc_return(&src->enabled_types[type]) == 1;
-}
-
/**
* amdgpu_irq_put - disable interrupt
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index f016464035b8..1642f4108297 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -88,9 +88,6 @@ int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
-bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type);
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8c5807994073..a0de6286c453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct fence *f;
+ struct dma_fence *f;
unsigned i;
/* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f)
+ struct dma_fence **f)
{
int r;
job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->owner = owner;
job->fence_ctx = entity->fence_context;
- *f = fence_get(&job->base.s_fence->finished);
+ *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
}
-static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
- struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
return fence;
}
-static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
int r;
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
/* if gpu reset, hw fence will be replaced here */
- fence_put(job->fence);
- job->fence = fence_get(fence);
+ dma_fence_put(job->fence);
+ job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 3938fca1ea8e..9af87eaf8ee3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -308,10 +308,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid) {
- ip.hw_ip_version_major = adev->ip_blocks[i].major;
- ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid) {
+ ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+ ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
ip.capabilities_flags = 0;
ip.available_rings = ring_mask;
ip.ib_start_alignment = ib_start_alignment;
@@ -347,8 +347,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid &&
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -413,6 +413,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_MEMORY: {
+ struct drm_amdgpu_memory_info mem;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.vram.total_heap_size = adev->mc.real_vram_size;
+ mem.vram.usable_heap_size =
+ adev->mc.real_vram_size - adev->vram_pin_size;
+ mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+ mem.cpu_accessible_vram.total_heap_size =
+ adev->mc.visible_vram_size;
+ mem.cpu_accessible_vram.usable_heap_size =
+ adev->mc.visible_vram_size -
+ (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.heap_usage =
+ atomic64_read(&adev->vram_vis_usage);
+ mem.cpu_accessible_vram.max_allocation =
+ mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
+
+ mem.gtt.total_heap_size = adev->mc.gtt_size;
+ mem.gtt.usable_heap_size =
+ adev->mc.gtt_size - adev->gart_pin_size;
+ mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+
+ return copy_to_user(out, &mem,
+ min((size_t)size, sizeof(mem)))
+ ? -EFAULT : 0;
+ }
case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size;
uint32_t *regs;
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+ if (amdgpu_sriov_vf(adev))
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -494,6 +526,50 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+ unsigned i;
+ struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+ struct amd_vce_state *vce_state;
+
+ for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
+ vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
+ if (vce_state) {
+ vce_clk_table.entries[i].sclk = vce_state->sclk;
+ vce_clk_table.entries[i].mclk = vce_state->mclk;
+ vce_clk_table.entries[i].eclk = vce_state->evclk;
+ vce_clk_table.num_valid_entries++;
+ }
+ }
+
+ return copy_to_user(out, &vce_clk_table,
+ min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
+ }
+ case AMDGPU_INFO_VBIOS: {
+ uint32_t bios_size = adev->bios_size;
+
+ switch (info->vbios_info.type) {
+ case AMDGPU_INFO_VBIOS_SIZE:
+ return copy_to_user(out, &bios_size,
+ min((size_t)size, sizeof(bios_size)))
+ ? -EFAULT : 0;
+ case AMDGPU_INFO_VBIOS_IMAGE: {
+ uint8_t *bios;
+ uint32_t bios_offset = info->vbios_info.offset;
+
+ if (bios_offset >= bios_size)
+ return -EINVAL;
+
+ bios = adev->bios + bios_offset;
+ return copy_to_user(out, bios,
+ min((size_t)size, (size_t)(bios_size - bios_offset)))
+ ? -EFAULT : 0;
+ }
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n",
+ info->vbios_info.type);
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -775,6 +851,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7913f7..7ea3cacf9f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ free_rmn:
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL;
struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct list_head *head;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7b0eff7d060b..202b4176b74e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -271,8 +271,6 @@ struct amdgpu_display_funcs {
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
/* wait for vblank */
void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
- /* is dce hung */
- bool (*is_display_hung)(struct amdgpu_device *adev);
/* set backlight level */
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
u8 level);
@@ -341,8 +339,6 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
- struct hrtimer vblank_timer;
- enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -413,6 +409,9 @@ struct amdgpu_crtc {
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
+ /* for virtual dce */
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f3efb1c5dae9..bf79b73e1538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo);
- amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+ amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
- mutex_lock(&bo->adev->shadow_list_lock);
+ mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
- mutex_unlock(&bo->adev->shadow_list_lock);
+ mutex_unlock(&adev->shadow_list_lock);
}
kfree(bo->metadata);
kfree(bo);
@@ -121,20 +122,14 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned lpfn = 0;
- if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
- !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- places[c].fpfn = visible_pfn;
- places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
- TTM_PL_FLAG_TOPDOWN;
- c++;
- }
+ /* This forces a reallocation if the flag wasn't set before */
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@@ -205,8 +200,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
- amdgpu_ttm_placement_init(abo->adev, &abo->placement,
- abo->placements, domain, abo->flags);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+
+ amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
+ domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -245,7 +242,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int r;
r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@@ -351,7 +349,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
kfree(bo);
return r;
}
- bo->adev = adev;
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -374,39 +371,36 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
+
+ if (!resv) {
+ bool locked;
+
+ reservation_object_init(&bo->tbo.ttm_resv);
+ locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
+ WARN_ON(!locked);
+ }
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
- acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
- if (unlikely(r != 0)) {
+ acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
+ &amdgpu_ttm_bo_destroy);
+ if (unlikely(r != 0))
return r;
- }
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
- struct fence *fence;
-
- if (adev->mman.buffer_funcs_ring == NULL ||
- !adev->mman.buffer_funcs_ring->ready) {
- r = -EBUSY;
- goto fail_free;
- }
-
- r = amdgpu_bo_reserve(bo, false);
- if (unlikely(r != 0))
- goto fail_free;
+ struct dma_fence *fence;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r != 0))
+ r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ if (unlikely(r))
goto fail_unreserve;
- amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
amdgpu_bo_fence(bo, fence, false);
- amdgpu_bo_unreserve(bo);
- fence_put(bo->tbo.moving);
- bo->tbo.moving = fence_get(fence);
- fence_put(fence);
+ dma_fence_put(bo->tbo.moving);
+ bo->tbo.moving = dma_fence_get(fence);
+ dma_fence_put(fence);
}
+ if (!resv)
+ ww_mutex_unlock(&bo->tbo.resv->lock);
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
@@ -414,8 +408,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
return 0;
fail_unreserve:
- amdgpu_bo_unreserve(bo);
-fail_free:
+ ww_mutex_unlock(&bo->tbo.resv->lock);
amdgpu_bo_unref(&bo);
return r;
}
@@ -491,7 +484,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -523,7 +516,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -616,6 +609,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset,
u64 *gpu_addr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
unsigned fpfn, lpfn;
@@ -643,18 +637,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset >
- bo->adev->mc.visible_vram_size)) {
+ adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
- bo->adev->mc.visible_vram_size))
+ adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
- lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+ lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
@@ -669,12 +665,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p bind failed\n", bo);
+ dev_err(adev->dev, "%p bind failed\n", bo);
goto error;
}
@@ -682,11 +678,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+ adev->gart_pin_size += amdgpu_bo_size(bo);
}
error:
@@ -700,10 +696,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
if (!bo->pin_count) {
- dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+ dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
@@ -715,16 +712,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
}
error:
@@ -854,6 +851,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -861,21 +859,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(abo->adev, abo);
+ amdgpu_vm_bo_invalidate(adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
+ amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
unsigned long offset, size, lpfn;
int i, r;
@@ -884,13 +882,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
- adev = abo->adev;
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->mc.visible_vram_size)
+ /* TODO: figure out how to map scattered VRAM to the CPU */
+ if ((offset + size) <= adev->mc.visible_vram_size &&
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -898,6 +897,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
@@ -931,7 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
@@ -959,6 +959,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return bo->tbo.offset;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb..5cbf59ec0f68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
*/
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
- dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+ dev_err(adev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence, bool direct);
+ struct dma_fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct);
@@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
- struct fence *fence);
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index accc908bdc88..723ae682bf25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -737,6 +737,21 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return sprintf(buf, "%i\n", speed);
}
+static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 speed;
+
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%i\n", speed);
+}
+
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@@ -744,6 +759,7 @@ static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -753,6 +769,7 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL
};
@@ -804,6 +821,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
+ /* requires powerplay */
+ if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
+ return 0;
+
return effective_mode;
}
@@ -986,10 +1007,10 @@ restart_search:
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{
- int i;
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
+ bool equal;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1009,46 +1030,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- /* no need to reprogram if nothing changed unless we are on BTC+ */
- if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
- /* vce just modifies an existing state so force a change */
- if (ps->vce_active != adev->pm.dpm.vce_active)
- goto force;
- if (adev->flags & AMD_IS_APU) {
- /* for APUs if the num crtcs changed but state is the same,
- * all we need to do is update the display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- }
- return;
- } else {
- /* for BTC+ if the num crtcs hasn't changed and state is the same,
- * nothing to do, if the num crtcs is > 1 and state is the same,
- * update display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs ==
- adev->pm.dpm.current_active_crtcs) {
- return;
- } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
- (adev->pm.dpm.new_active_crtc_count > 1)) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- return;
- }
- }
- }
-
-force:
if (amdgpu_dpm == 1) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@@ -1059,31 +1040,21 @@ force:
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
+ amdgpu_dpm_display_configuration_changed(adev);
+
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
+ if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
+ equal = false;
- /* wait for the rings to drain */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ if (equal)
+ return;
- /* program the new power state */
amdgpu_dpm_set_power_state(adev);
-
- /* update current power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
-
amdgpu_dpm_post_set_power_state(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@@ -1135,7 +1106,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1276,20 +1247,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ int i = 0;
if (!adev->pm.dpm_enabled)
return;
- if (adev->pp_enabled) {
- int i = 0;
+ amdgpu_display_bandwidth_update(adev);
- amdgpu_display_bandwidth_update(adev);
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+ if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 7532ff822aa7..fc592c2b0e16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -155,9 +155,6 @@ static int amdgpu_pp_sw_init(void *handle)
ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle);
- if (adev->pp_enabled)
- adev->pm.dpm_enabled = true;
-
return ret;
}
@@ -187,6 +184,9 @@ static int amdgpu_pp_hw_init(void *handle)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
+ if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
+ adev->pm.dpm_enabled = true;
+
return ret;
}
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
-const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
index da5cf47cfd99..c0c4bfdcdb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -23,11 +23,11 @@
*
*/
-#ifndef __AMDGPU_POPWERPLAY_H__
-#define __AMDGPU_POPWERPLAY_H__
+#ifndef __AMDGPU_POWERPLAY_H__
+#define __AMDGPU_POWERPLAY_H__
#include "amd_shared.h"
-extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
+extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
-#endif /* __AMDSOC_DM_H__ */
+#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3cb5e903cd62..a47628395914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space
* than the maximum for one submission
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
int i;
for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->align_mask)
- ib->ptr[ib->length_dw++] = ring->nop;
+ while (ib->length_dw & ring->funcs->align_mask)
+ ib->ptr[ib->length_dw++] = ring->funcs->nop;
}
/**
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
uint32_t count;
/* We pad to match fetch size */
- count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
- count %= ring->align_mask + 1;
+ count = ring->funcs->align_mask + 1 -
+ (ring->wptr & ring->funcs->align_mask);
+ count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count);
mb();
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type)
+ unsigned max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type)
{
int r;
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
- ring->align_mask = align_mask;
- ring->nop = nop;
- ring->type = ring_type;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
@@ -283,7 +280,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
int r, i;
uint32_t value, result, early[3];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
new file mode 100644
index 000000000000..574f0b79c690
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_RING_H__
+#define __AMDGPU_RING_H__
+
+#include "gpu_scheduler.h"
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS 16
+#define AMDGPU_MAX_GFX_RINGS 1
+#define AMDGPU_MAX_COMPUTE_RINGS 8
+#define AMDGPU_MAX_VCE_RINGS 3
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
+
+#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
+#define AMDGPU_FENCE_FLAG_INT (1 << 1)
+
+enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA,
+ AMDGPU_RING_TYPE_UVD,
+ AMDGPU_RING_TYPE_VCE,
+ AMDGPU_RING_TYPE_KIQ
+};
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+ /* sync_seq is protected by ring emission lock */
+ uint32_t sync_seq;
+ atomic_t last_seq;
+ bool initialized;
+ struct amdgpu_irq_src *irq_src;
+ unsigned irq_type;
+ struct timer_list fallback_timer;
+ unsigned num_fences_mask;
+ spinlock_t lock;
+ struct dma_fence **fences;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+/*
+ * Rings.
+ */
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+ enum amdgpu_ring_type type;
+ uint32_t align_mask;
+ u32 nop;
+
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct amdgpu_ring *ring);
+ u32 (*get_wptr)(struct amdgpu_ring *ring);
+ void (*set_wptr)(struct amdgpu_ring *ring);
+ /* validating and patching of IBs */
+ int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ /* constants to calculate how many DW are needed for an emit */
+ unsigned emit_frame_size;
+ unsigned emit_ib_size;
+ /* command emit functions */
+ void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch);
+ void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+ uint64_t seq, unsigned flags);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ uint64_t pd_addr);
+ void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+ void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
+ /* testing functions */
+ int (*test_ring)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
+ /* insert NOP packets */
+ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+ /* pad the indirect buffer to the necessary number of dw */
+ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+ void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
+ void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+};
+
+struct amdgpu_ring {
+ struct amdgpu_device *adev;
+ const struct amdgpu_ring_funcs *funcs;
+ struct amdgpu_fence_driver fence_drv;
+ struct amd_gpu_scheduler sched;
+
+ struct amdgpu_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr_offs;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ unsigned max_dw;
+ int count_dw;
+ uint64_t gpu_addr;
+ uint32_t ptr_mask;
+ bool ready;
+ u32 idx;
+ u32 me;
+ u32 pipe;
+ u32 queue;
+ struct amdgpu_bo *mqd_obj;
+ u32 doorbell_index;
+ bool use_doorbell;
+ unsigned wptr_offs;
+ unsigned fence_offs;
+ uint64_t current_ctx;
+ char name[16];
+ unsigned cond_exe_offs;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
+};
+
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ unsigned ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a845f4..de9f919ae336 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
- fence_put(sa_bo->fence);
+ dma_fence_put(sa_bo->fence);
kfree(sa_bo);
}
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
- !fence_is_signaled(sa_bo->fence)) {
+ !dma_fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct fence **fences,
+ struct dma_fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
- if (!fence_is_signaled(sa_bo->fence)) {
+ if (!dma_fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+ struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
@@ -327,9 +327,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
return -EINVAL;
*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
- if ((*sa_bo) == NULL) {
+ if (!(*sa_bo))
return -ENOMEM;
- }
(*sa_bo)->manager = sa_manager;
(*sa_bo)->fence = NULL;
INIT_LIST_HEAD(&(*sa_bo)->olist);
@@ -356,14 +355,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
- fences[count++] = fence_get(fences[i]);
+ fences[count++] = dma_fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
- t = fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT);
+ t = dma_fence_wait_any_timeout(fences, count, false,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
for (i = 0; i < count; ++i)
- fence_put(fences[i]);
+ dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !fence_is_signaled(fence)) {
+ if (fence && !dma_fence_is_signaled(fence)) {
uint32_t idx;
- (*sa_bo)->fence = fence_get(fence);
+ (*sa_bo)->fence = dma_fence_get(fence);
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d3022fb87..ed814e6d0207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
struct amdgpu_sync_entry {
struct hlist_node node;
- struct fence *fence;
+ struct dma_fence *fence;
};
static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
*
* Test if the fence was issued by us.
*/
-static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+ struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
*
* Extract who originally created the fence.
*/
-static void *amdgpu_sync_get_owner(struct fence *f)
+static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
*
* Either keep the existing fence or the new one, depending which one is later.
*/
-static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
+static void amdgpu_sync_keep_later(struct dma_fence **keep,
+ struct dma_fence *fence)
{
- if (*keep && fence_is_later(*keep, fence))
+ if (*keep && dma_fence_is_later(*keep, fence))
return;
- fence_put(*keep);
- *keep = fence_get(fence);
+ dma_fence_put(*keep);
+ *keep = dma_fence_get(fence);
}
/**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f)
+ struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -ENOMEM;
hash_add(sync->fences, &e->node, f->context);
- e->fence = fence_get(f);
+ e->fence = dma_fence_get(f);
return 0;
}
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
void *owner)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
void *fence_owner;
unsigned i;
int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
* Returns the next fence not signaled yet without removing it from the sync
* object.
*/
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring)
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
+ struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
- if (fence_is_signaled(&s_fence->scheduled))
+ if (dma_fence_is_signaled(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
}
}
- if (fence_is_signaled(f)) {
+ if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
- fence_put(f);
+ dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
*
* Get and removes the next fence from the sync object not signaled yet.
*/
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- struct fence *f;
+ struct dma_fence *f;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
- if (!fence_is_signaled(f))
+ if (!dma_fence_is_signaled(f))
return f;
- fence_put(f);
+ dma_fence_put(f);
}
return NULL;
}
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
- fence_put(e->fence);
+ dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
- fence_put(sync->last_vm_update);
+ dma_fence_put(sync->last_vm_update);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
new file mode 100644
index 000000000000..605be266e07f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_SYNC_H__
+#define __AMDGPU_SYNC_H__
+
+#include <linux/hashtable.h>
+
+struct dma_fence;
+struct reservation_object;
+struct amdgpu_device;
+struct amdgpu_ring;
+
+/*
+ * Container for fences used to sync command submissions.
+ */
+struct amdgpu_sync {
+ DECLARE_HASHTABLE(fences, 4);
+ struct dma_fence *last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_fence *f);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+ struct amdgpu_sync *sync,
+ struct reservation_object *resv,
+ void *owner);
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b827c75e95de..e05a24325eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
@@ -216,7 +216,7 @@ out_lclean:
amdgpu_bo_unref(&gtt_obj[i]);
}
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 067e5e683bb3..bb964a8ff938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691f56b5..8e35c1ff59e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -34,7 +34,6 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
-#include <ttm/ttm_memory.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <linux/seq_file.h>
@@ -51,16 +50,6 @@
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
-{
- struct amdgpu_mman *mman;
- struct amdgpu_device *adev;
-
- mman = container_of(bdev, struct amdgpu_mman, bdev);
- adev = container_of(mman, struct amdgpu_device, mman);
- return adev;
-}
-
/*
* Global memory.
@@ -75,7 +64,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
ttm_mem_global_release(ref->object);
}
-int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
@@ -150,7 +139,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
{
struct amdgpu_device *adev;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@@ -168,7 +157,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -195,6 +184,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
@@ -213,7 +203,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+ if (adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +219,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* allocating address space for the BO.
*/
abo->placements[i].lpfn =
- abo->adev->mc.gtt_size >> PAGE_SHIFT;
+ adev->mc.gtt_size >> PAGE_SHIFT;
}
}
break;
@@ -260,63 +250,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem,
+ uint64_t *addr)
{
- struct amdgpu_device *adev;
- struct amdgpu_ring *ring;
- uint64_t old_start, new_start;
- struct fence *fence;
int r;
- adev = amdgpu_get_adev(bo->bdev);
- ring = adev->mman.buffer_funcs_ring;
-
- switch (old_mem->mem_type) {
+ switch (mem->mem_type) {
case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, old_mem);
+ r = amdgpu_ttm_bind(bo, mem);
if (r)
return r;
case TTM_PL_VRAM:
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
+ *addr = mm_node->start << PAGE_SHIFT;
+ *addr += bo->bdev->man[mem->mem_type].gpu_offset;
break;
default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ DRM_ERROR("Unknown placement %d\n", mem->mem_type);
return -EINVAL;
}
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, new_mem);
- if (r)
- return r;
- case TTM_PL_VRAM:
- new_start = (u64)new_mem->start << PAGE_SHIFT;
- new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- return -EINVAL;
- }
+ return 0;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ struct drm_mm_node *old_mm, *new_mm;
+ uint64_t old_start, old_size, new_start, new_size;
+ unsigned long num_pages;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+ old_mm = old_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
+ if (r)
+ return r;
+ old_size = old_mm->size;
+
- r = amdgpu_copy_buffer(ring, old_start, new_start,
- new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence, false);
+ new_mm = new_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
if (r)
return r;
+ new_size = new_mm->size;
+
+ num_pages = new_mem->num_pages;
+ while (num_pages) {
+ unsigned long cur_pages = min(old_size, new_size);
+ struct dma_fence *next;
+
+ r = amdgpu_copy_buffer(ring, old_start, new_start,
+ cur_pages * PAGE_SIZE,
+ bo->resv, &next, false);
+ if (r)
+ goto error;
+
+ dma_fence_put(fence);
+ fence = next;
+
+ num_pages -= cur_pages;
+ if (!num_pages)
+ break;
+
+ old_size -= cur_pages;
+ if (!old_size) {
+ r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
+ &old_start);
+ if (r)
+ goto error;
+ old_size = old_mm->size;
+ } else {
+ old_start += cur_pages * PAGE_SIZE;
+ }
+
+ new_size -= cur_pages;
+ if (!new_size) {
+ r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
+ &new_start);
+ if (r)
+ goto error;
+
+ new_size = new_mm->size;
+ } else {
+ new_start += cur_pages * PAGE_SIZE;
+ }
+ }
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
- fence_put(fence);
+ dma_fence_put(fence);
+ return r;
+
+error:
+ if (fence)
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -332,7 +374,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -379,7 +421,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_place placements;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -422,7 +464,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
/* remember the eviction */
if (evict)
@@ -475,7 +517,7 @@ memcpy:
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -607,7 +649,7 @@ release_pages:
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned nents;
int r;
@@ -639,7 +681,7 @@ release_sg:
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
@@ -799,7 +841,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@@ -843,7 +885,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -889,7 +931,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -1012,7 +1054,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1029,7 +1071,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
@@ -1060,12 +1102,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
return res;
}
+static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
+ unsigned long num_pages = bo->mem.num_pages;
+ struct drm_mm_node *node = bo->mem.mm_node;
+
+ /* Check each drm MM node individually */
+ while (num_pages) {
+ if (place->fpfn < (node->start + node->size) &&
+ !(place->lpfn && place->lpfn <= node->start))
+ return true;
+
+ num_pages -= node->size;
+ ++node;
+ }
+
+ return false;
+ }
+
+ return ttm_bo_eviction_valuable(bo, place);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
+ .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
@@ -1083,6 +1150,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
unsigned i, j;
int r;
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
adev->mman.bo_global_ref.ref.object,
@@ -1119,7 +1190,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
@@ -1247,7 +1319,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1294,7 +1366,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (direct_submit) {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
NULL, NULL, fence);
- job->fence = fence_get(*fence);
+ job->fence = dma_fence_get(*fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
amdgpu_job_free(job);
@@ -1313,28 +1385,40 @@ error_free:
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
- struct reservation_object *resv,
- struct fence **fence)
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct dma_fence **fence)
{
- struct amdgpu_device *adev = bo->adev;
- struct amdgpu_job *job;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint32_t max_bytes, byte_count;
- uint64_t dst_offset;
+ struct drm_mm_node *mm_node;
+ unsigned long num_pages;
unsigned int num_loops, num_dw;
- unsigned int i;
+
+ struct amdgpu_job *job;
int r;
- byte_count = bo->tbo.num_pages << PAGE_SHIFT;
- max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
- num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+ if (!ring->ready) {
+ DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ return -EINVAL;
+ }
+
+ num_pages = bo->tbo.num_pages;
+ mm_node = bo->tbo.mem.mm_node;
+ num_loops = 0;
+ while (num_pages) {
+ uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+
+ num_loops += DIV_ROUND_UP(byte_count, max_bytes);
+ num_pages -= mm_node->size;
+ ++mm_node;
+ }
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
/* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
+ num_dw += 64;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
@@ -1342,28 +1426,43 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
}
}
- dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
- for (i = 0; i < num_loops; i++) {
- uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+ num_pages = bo->tbo.num_pages;
+ mm_node = bo->tbo.mem.mm_node;
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_offset, cur_size_in_bytes);
+ while (num_pages) {
+ uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+ uint64_t dst_addr;
- dst_offset += cur_size_in_bytes;
- byte_count -= cur_size_in_bytes;
+ r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
+ &bo->tbo.mem, &dst_addr);
+ if (r)
+ return r;
+
+ while (byte_count) {
+ uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+ dst_addr, cur_size_in_bytes);
+
+ dst_addr += cur_size_in_bytes;
+ byte_count -= cur_size_in_bytes;
+ }
+
+ num_pages -= mm_node->size;
+ ++mm_node;
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
@@ -1412,7 +1511,7 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -1456,7 +1555,7 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -1554,8 +1653,3 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
#endif
}
-
-u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
-{
- return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c805326c..98ee384f0fca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,7 @@ struct amdgpu_mman {
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
@@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit);
+ struct dma_fence **fence, bool direct_submit);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cb3d252f3c78..0f0b38191fac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
ucode->mc_addr = mc_addr;
ucode->kaddr = kptr;
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
+ return 0;
+
header = (const struct common_firmware_header *)ucode->fw->data;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes)),
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
return 0;
}
+static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+ uint64_t mc_addr, void *kptr)
+{
+ const struct gfx_firmware_header_v1_0 *header = NULL;
+ const struct common_firmware_header *comm_hdr = NULL;
+ uint8_t* src_addr = NULL;
+ uint8_t* dst_addr = NULL;
+
+ if (NULL == ucode->fw)
+ return 0;
+
+ comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
+ header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ dst_addr = ucode->kaddr +
+ ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
+ PAGE_SIZE);
+ src_addr = (uint8_t *)ucode->fw->data +
+ le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
+ (le32_to_cpu(header->jt_offset) * 4);
+ memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
+
+ return 0;
+}
+
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
goto failed_reserve;
}
- err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
+ if (i == AMDGPU_UCODE_ID_CP_MEC1) {
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
+ fw_buf_ptr + fw_offset);
+ fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+ }
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e468be4e28fa..a8a4230729f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_RLC_G,
+ AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586..a81dfaeeb8c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue;
}
- fence_wait(fence, false);
- fence_put(fence);
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
@@ -360,6 +360,18 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
}
}
+static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
+{
+ uint32_t lo, hi;
+ uint64_t addr;
+
+ lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
+ hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
+ addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
+
+ return addr;
+}
+
/**
* amdgpu_uvd_cs_pass1 - first parsing round
*
@@ -372,14 +384,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
- uint32_t cmd, lo, hi;
- uint64_t addr;
+ uint32_t cmd;
+ uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
- lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
- hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
- addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
-
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
if (mapping == NULL) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
@@ -698,18 +706,16 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
- uint32_t cmd, lo, hi;
+ uint32_t cmd;
uint64_t start, end;
- uint64_t addr;
+ uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
- lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
- hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
- addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
-
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL)
+ if (mapping == NULL) {
+ DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL;
+ }
start = amdgpu_bo_gpu_offset(bo);
@@ -876,6 +882,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
+ parser->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
ib->length_dw);
@@ -890,10 +899,13 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
- /* first round, make sure the buffers are actually in the UVD segment */
- r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
- if (r)
- return r;
+ /* first round only required on chips without UVD 64 bit address support */
+ if (!parser->adev->uvd.address_64_bit) {
+ /* first round, make sure the buffers are actually in the UVD segment */
+ r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
+ if (r)
+ return r;
+ }
/* second round, patch buffer addresses into the command stream */
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
@@ -909,14 +921,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
@@ -931,7 +943,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (!bo->adev->uvd.address_64_bit) {
+ if (!ring->adev->uvd.address_64_bit) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
}
@@ -960,7 +972,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err_free;
@@ -975,9 +987,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ttm_eu_fence_buffer_objects(&ticket, &head, f);
if (fence)
- *fence = fence_get(f);
+ *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
- fence_put(f);
+ dma_fence_put(f);
return 0;
@@ -993,7 +1005,7 @@ err:
crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1002,7 +1014,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1042,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1051,7 +1064,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1128,7 +1142,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
*/
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1143,7 +1157,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -1154,7 +1168,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
- fence_put(fence);
+ dma_fence_put(fence);
error:
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index c850009602d1..6249ba1bde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd884f06..69b66b9e7f57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint64_t dummy;
int i, r;
@@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
amdgpu_job_free(job);
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -476,12 +477,12 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
@@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
}
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t *size = &tmp;
int i, r, idx = 0;
+ p->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
@@ -788,6 +792,96 @@ out:
}
/**
+ * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ int session_idx = -1;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
+ uint32_t tmp, handle = 0;
+ int i, r = 0, idx = 0;
+
+ while (idx < ib->length_dw) {
+ uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+ uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+ if ((len < 8) || (len & 3)) {
+ DRM_ERROR("invalid VCE command length (%d)!\n", len);
+ r = -EINVAL;
+ goto out;
+ }
+
+ switch (cmd) {
+ case 0x00000001: /* session */
+ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ session_idx = amdgpu_vce_validate_handle(p, handle,
+ &allocated);
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
+ break;
+
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ break;
+
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
+ break;
+
+ default:
+ break;
+ }
+
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ idx += len / 4;
+ }
+
+ if (allocated & ~created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
+ }
+
+out:
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ amdgpu_ib_free(p->adev, ib, NULL);
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
+ }
+
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
+ return r;
+}
+
+/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ring: engine to use
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
-unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* amdgpu_vce_ring_emit_ib */
-}
-
-unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
error:
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2852df..d98041f7508d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 968c4260d7a7..1dda9321bd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
- * @duplicates: head of duplicates list
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
*
- * Add the page directory to the BO duplicates list
- * for command submission.
+ * Validate the page table BOs on command submission if neccessary.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
uint64_t num_evictions;
unsigned i;
+ int r;
/* We only need to validate the page tables
* if they aren't already valid.
*/
num_evictions = atomic64_read(&adev->num_evictions);
if (num_evictions == vm->last_eviction_counter)
- return;
+ return 0;
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- list_add(&entry->tv.head, duplicates);
+ r = validate(param, bo);
+ if (r)
+ return r;
}
+ return 0;
}
/**
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
}
spin_unlock(&glob->lru_lock);
}
@@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
uint64_t fence_context = adev->fence_context + ring->idx;
- struct fence *updates = sync->last_vm_update;
+ struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
- struct fence **fences;
+ struct dma_fence **fences;
unsigned i;
int r = 0;
@@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (&idle->list == &adev->vm_manager.ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct fence_array *array;
+ struct dma_fence_array *array;
unsigned j;
for (j = 0; j < i; ++j)
- fence_get(fences[j]);
+ dma_fence_get(fences[j]);
- array = fence_array_create(i, fences, fence_context,
+ array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
- fence_put(fences[j]);
+ dma_fence_put(fences[j]);
kfree(fences);
r = -ENOMEM;
goto error;
@@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
- fence_put(&array->base);
+ dma_fence_put(&array->base);
if (r)
goto error;
@@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check if we can use a VMID already assigned to this VM */
i = ring->idx;
do {
- struct fence *flushed;
+ struct dma_fence *flushed;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
@@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
continue;
if (id->last_flush->context != fence_context &&
- !fence_is_signaled(id->last_flush))
+ !dma_fence_is_signaled(id->last_flush))
continue;
flushed = id->flushed_updates;
if (updates &&
- (!flushed || fence_is_later(updates, flushed)))
+ (!flushed || dma_fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID. Remember this submission as
@@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- fence_put(id->first);
- id->first = fence_get(fence);
+ dma_fence_put(id->first);
+ id->first = dma_fence_get(fence);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = NULL;
- fence_put(id->flushed_updates);
- id->flushed_updates = fence_get(updates);
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -341,9 +346,9 @@ error:
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- const struct amdgpu_ip_block_version *ip_block;
+ const struct amdgpu_ip_block *ip_block;
- if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return false;
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
if (!ip_block)
return false;
- if (ip_block->major <= 7) {
+ if (ip_block->version->major <= 7) {
/* gfx7 has no workaround */
return true;
- } else if (ip_block->major == 8) {
+ } else if (ip_block->version->major == 8) {
if (adev->gfx.mec_fw_version >= 673)
/* gfx8 is fixed in MEC firmware 673 */
return false;
@@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
amdgpu_vm_is_gpu_reset(adev, id))) {
- struct fence *fence;
+ struct dma_fence *fence;
trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
return r;
mutex_lock(&adev->vm_manager.lock);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
@@ -525,70 +530,6 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
}
/**
- * amdgpu_vm_clear_bo - initially clear the page dir/table
- *
- * @adev: amdgpu_device pointer
- * @bo: bo to clear
- *
- * need to reserve bo first before calling it.
- */
-static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
-{
- struct amdgpu_ring *ring;
- struct fence *fence = NULL;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- unsigned entries;
- uint64_t addr;
- int r;
-
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- return r;
-
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto error;
-
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (r)
- goto error;
-
- addr = amdgpu_bo_gpu_offset(bo);
- entries = amdgpu_bo_size(bo) / 8;
-
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
- if (r)
- goto error;
-
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- params.ib = &job->ibs[0];
- amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-
- WARN_ON(job->ibs[0].length_dw > 64);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(bo, fence, true);
- fence_put(fence);
- return 0;
-
-error_free:
- amdgpu_job_free(job);
-
-error:
- return r;
-}
-
-/**
* amdgpu_vm_map_gart - Resolve gart mapping of addr
*
* @pages_addr: optional DMA address to use for lookup
@@ -612,32 +553,35 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- bool shadow)
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
+ struct amdgpu_bo *shadow;
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
- vm->page_directory;
- uint64_t pd_addr;
+ uint64_t pd_addr, shadow_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
- uint64_t last_pde = ~0, last_pt = ~0;
+ uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int r;
- if (!pd)
- return 0;
-
- r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
- if (r)
- return r;
-
- pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ shadow = vm->page_directory->shadow;
/* padding, etc. */
ndw = 64;
@@ -645,6 +589,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* assume the worst case */
ndw += vm->max_pde_used * 6;
+ pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+ if (shadow) {
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ shadow_addr = amdgpu_bo_gpu_offset(shadow);
+ ndw *= 2;
+ } else {
+ shadow_addr = 0;
+ }
+
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
@@ -655,30 +610,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+ struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
if (bo->shadow) {
- struct amdgpu_bo *shadow = bo->shadow;
+ struct amdgpu_bo *pt_shadow = bo->shadow;
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ r = amdgpu_ttm_bind(&pt_shadow->tbo,
+ &pt_shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
- if (!shadow) {
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
- } else {
- if (vm->page_tables[pt_idx].shadow_addr == pt)
- continue;
- vm->page_tables[pt_idx].shadow_addr = pt;
- }
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+
+ vm->page_tables[pt_idx].addr = pt;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -686,6 +637,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
+ if (shadow)
+ amdgpu_vm_do_set_ptes(&params,
+ last_shadow,
+ last_pt, count,
+ incr,
+ AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID);
@@ -693,34 +651,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
count = 1;
last_pde = pde;
+ last_shadow = shadow_addr + pt_idx * 8;
last_pt = pt;
} else {
++count;
}
}
- if (count)
+ if (count) {
+ if (vm->page_directory->shadow)
+ amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
+ }
- if (params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+ if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ return 0;
+ }
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
- amdgpu_bo_fence(pd, fence, true);
- fence_put(vm->page_directory_fence);
- vm->page_directory_fence = fence_get(fence);
- fence_put(fence);
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
- } else {
- amdgpu_job_free(job);
- }
+ amdgpu_bo_fence(vm->page_directory, fence, true);
+ dma_fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = dma_fence_get(fence);
+ dma_fence_put(fence);
return 0;
@@ -729,29 +697,6 @@ error_free:
return r;
}
-/*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- int r;
-
- r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
- if (r)
- return r;
- return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
-}
-
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
@@ -781,11 +726,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -804,11 +749,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
@@ -929,20 +874,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_ring *ring;
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
@@ -1045,10 +990,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
- fence_put(*fence);
- *fence = fence_get(f);
+ dma_fence_put(*fence);
+ *fence = dma_fence_get(f);
}
- fence_put(f);
+ dma_fence_put(f);
return 0;
error_free:
@@ -1065,8 +1010,8 @@ error_free:
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
* @flags: HW flags for the mapping
+ * @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@@ -1074,17 +1019,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint32_t flags, uint64_t addr,
- struct fence **fence)
+ uint32_t flags,
+ struct drm_mm_node *nodes,
+ struct dma_fence **fence)
{
- const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
-
- uint64_t src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1041,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
- if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr + (addr >> 12) * 8;
- addr = 0;
+ pfn = mapping->offset >> PAGE_SHIFT;
+ if (nodes) {
+ while (pfn >= nodes->size) {
+ pfn -= nodes->size;
+ ++nodes;
+ }
}
- addr += mapping->offset;
- if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
- start, mapping->it.last,
- flags, addr, fence);
+ do {
+ uint64_t max_entries;
+ uint64_t addr, last;
- while (start != mapping->it.last + 1) {
- uint64_t last;
+ if (nodes) {
+ addr = nodes->start << PAGE_SHIFT;
+ max_entries = (nodes->size - pfn) *
+ (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ } else {
+ addr = 0;
+ max_entries = S64_MAX;
+ }
- last = min((uint64_t)mapping->it.last, start + max_size - 1);
+ if (pages_addr) {
+ if (flags == gtt_flags)
+ src = adev->gart.table_addr +
+ (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+ else
+ max_entries = min(max_entries, 16ull * 1024ull);
+ addr = 0;
+ } else if (flags & AMDGPU_PTE_VALID) {
+ addr += adev->vm_manager.vram_base_offset;
+ }
+ addr += pfn << PAGE_SHIFT;
+
+ last = min((uint64_t)mapping->it.last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@@ -1121,9 +1082,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
+ pfn += last - start + 1;
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+ }
start = last + 1;
- addr += max_size * AMDGPU_GPU_PAGE_SIZE;
- }
+
+ } while (unlikely(start != mapping->it.last + 1));
return 0;
}
@@ -1147,40 +1113,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
struct ttm_mem_reg *mem;
- struct fence *exclusive;
- uint64_t addr;
+ struct drm_mm_node *nodes;
+ struct dma_fence *exclusive;
int r;
if (clear) {
mem = NULL;
- addr = 0;
+ nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
mem = &bo_va->bo->tbo.mem;
- addr = (u64)mem->start << PAGE_SHIFT;
- switch (mem->mem_type) {
- case TTM_PL_TT:
+ nodes = mem->mm_node;
+ if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
- break;
-
- case TTM_PL_VRAM:
- addr += adev->vm_manager.vram_base_offset;
- break;
-
- default:
- break;
}
-
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == bo_va->bo->adev) ? flags : 0;
+ adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1190,7 +1146,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
gtt_flags, pages_addr, vm,
- mapping, flags, addr,
+ mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
return r;
@@ -1405,18 +1361,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
- struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
- entry = &vm->page_tables[pt_idx].entry;
- if (entry->robj)
+ if (vm->page_tables[pt_idx].bo)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1426,27 +1382,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
*/
pt->parent = amdgpu_bo_ref(vm->page_directory);
- r = amdgpu_vm_clear_bo(adev, vm, pt);
- if (r) {
- amdgpu_bo_unref(&pt->shadow);
- amdgpu_bo_unref(&pt);
- goto error_free;
- }
-
- if (pt->shadow) {
- r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
- if (r) {
- amdgpu_bo_unref(&pt->shadow);
- amdgpu_bo_unref(&pt);
- goto error_free;
- }
- }
-
- entry->robj = pt;
- entry->priority = 0;
- entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
- entry->user_pages = NULL;
+ vm->page_tables[pt_idx].bo = pt;
vm->page_tables[pt_idx].addr = 0;
}
@@ -1547,7 +1483,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
kfree(mapping);
}
- fence_put(bo_va->last_pt_update);
+ dma_fence_put(bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1626,7 +1562,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1635,24 +1573,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r)
goto error_free_page_directory;
- r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
- if (r)
- goto error_unreserve;
-
- if (vm->page_directory->shadow) {
- r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
- if (r)
- goto error_unreserve;
- }
-
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
amdgpu_bo_unreserve(vm->page_directory);
return 0;
-error_unreserve:
- amdgpu_bo_unreserve(vm->page_directory);
-
error_free_page_directory:
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
@@ -1697,7 +1622,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
- struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+ struct amdgpu_bo *pt = vm->page_tables[i].bo;
if (!pt)
continue;
@@ -1709,7 +1634,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
- fence_put(vm->page_directory_fence);
+ dma_fence_put(vm->page_directory_fence);
}
/**
@@ -1733,7 +1658,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
- adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
@@ -1755,9 +1681,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- fence_put(adev->vm_manager.ids[i].first);
+ dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
- fence_put(id->flushed_updates);
- fence_put(id->last_flush);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
new file mode 100644
index 000000000000..adbc2f5e5c7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_VM_H__
+#define __AMDGPU_VM_H__
+
+#include <linux/rbtree.h>
+
+#include "gpu_scheduler.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va;
+struct amdgpu_job;
+struct amdgpu_bo_list_entry;
+
+/*
+ * GPUVM handling
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM 16
+
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+#define AMDGPU_PTE_VALID (1 << 0)
+#define AMDGPU_PTE_SYSTEM (1 << 1)
+#define AMDGPU_PTE_SNOOPED (1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE (1 << 4)
+
+#define AMDGPU_PTE_READABLE (1 << 5)
+#define AMDGPU_PTE_WRITEABLE (1 << 6)
+
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
+
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER 0
+#define AMDGPU_VM_FAULT_STOP_FIRST 1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+
+struct amdgpu_vm_pt {
+ struct amdgpu_bo *bo;
+ uint64_t addr;
+};
+
+struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root va;
+
+ /* protecting invalidated */
+ spinlock_t status_lock;
+
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
+ /* BOs cleared in the PT because of a move */
+ struct list_head cleared;
+
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+
+ /* contains the page directory */
+ struct amdgpu_bo *page_directory;
+ unsigned max_pde_used;
+ struct dma_fence *page_directory_fence;
+ uint64_t last_eviction_counter;
+
+ /* array of page tables, one for each page directory entry */
+ struct amdgpu_vm_pt *page_tables;
+
+ /* for id and flush management per ring */
+ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
+
+ /* protecting freed */
+ spinlock_t freed_lock;
+
+ /* Scheduler entity for page table updates */
+ struct amd_sched_entity entity;
+
+ /* client id */
+ u64 client_id;
+};
+
+struct amdgpu_vm_id {
+ struct list_head list;
+ struct dma_fence *first;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+};
+
+struct amdgpu_vm_manager {
+ /* Handling of VMIDs */
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
+ uint32_t max_pfn;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+ /* vm pte handling */
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
+ struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_rings;
+ atomic_t vm_pte_next_ring;
+ /* client id counter */
+ atomic64_t client_counter;
+};
+
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+ struct list_head *validated,
+ struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ bool clear);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr, uint64_t offset,
+ uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
new file mode 100644
index 000000000000..d710226a0fff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_vram_mgr {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of VRAM
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct amdgpu_vram_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ drm_mm_init(&mgr->mm, 0, p_size);
+ spin_lock_init(&mgr->lock);
+ man->priv = mgr;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ if (!drm_mm_clean(&mgr->mm)) {
+ spin_unlock(&mgr->lock);
+ return -EBUSY;
+ }
+
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+ kfree(mgr);
+ man->priv = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate VRAM for the given BO.
+ */
+static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm *mm = &mgr->mm;
+ struct drm_mm_node *nodes;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ unsigned i;
+ int r;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
+ place->lpfn || amdgpu_vram_page_split == -1) {
+ pages_per_node = ~0ul;
+ num_nodes = 1;
+ } else {
+ pages_per_node = max((uint32_t)amdgpu_vram_page_split,
+ mem->page_alignment);
+ num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ }
+
+ nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
+ aflags = DRM_MM_CREATE_TOP;
+ }
+
+ pages_left = mem->num_pages;
+
+ spin_lock(&mgr->lock);
+ for (i = 0; i < num_nodes; ++i) {
+ unsigned long pages = min(pages_left, pages_per_node);
+ uint32_t alignment = mem->page_alignment;
+
+ if (pages == pages_per_node)
+ alignment = pages_per_node;
+ else
+ sflags |= DRM_MM_SEARCH_BEST;
+
+ r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
+ alignment, 0,
+ place->fpfn, lpfn,
+ sflags, aflags);
+ if (unlikely(r))
+ goto error;
+
+ pages_left -= pages;
+ }
+ spin_unlock(&mgr->lock);
+
+ mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
+ mem->mm_node = nodes;
+
+ return 0;
+
+error:
+ while (i--)
+ drm_mm_remove_node(&nodes[i]);
+ spin_unlock(&mgr->lock);
+
+ kfree(nodes);
+ return r == -ENOSPC ? 0 : r;
+}
+
+/**
+ * amdgpu_vram_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated VRAM again.
+ */
+static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+
+ if (!mem->mm_node)
+ return;
+
+ spin_lock(&mgr->lock);
+ while (pages) {
+ pages -= nodes->size;
+ drm_mm_remove_node(nodes);
+ ++nodes;
+ }
+ spin_unlock(&mgr->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_vram_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ drm_mm_debug_table(&mgr->mm, prefix);
+ spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
+ amdgpu_vram_mgr_init,
+ amdgpu_vram_mgr_fini,
+ amdgpu_vram_mgr_new,
+ amdgpu_vram_mgr_del,
+ amdgpu_vram_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f95e74..8c9bc75a9c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -31,6 +31,7 @@
#include "atom.h"
#include "atom-bits.h"
#include "atombios_encoders.h"
+#include "atombios_crtc.h"
#include "amdgpu_atombios.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5be788b269e2..bda9e3de191e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
- if (pi->uvd_power_gated == gate)
- return;
-
pi->uvd_power_gated = gate;
ci_update_uvd_dpm(adev, gate);
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
sclk = ps->performance_levels[0].sclk;
}
+ if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
+ sclk = adev->pm.pm_display_cfg.min_core_set_clock;
+
+ if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
+ mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
+
if (rps->vce_active) {
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
struct ci_power_info *pi = ci_get_pi(adev);
int i, ret;
+ if (amdgpu_ci_is_smc_running(adev)) {
+ DRM_INFO("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
break;
@@ -4190,8 +4198,10 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp;
+ int ret = 0;
if (!gate) {
+ /* turn the clocks on when decoding */
if (pi->caps_uvd_dpm ||
(adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
pi->smc_state_table.UvdBootLevel = 0;
@@ -4203,9 +4213,14 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
WREG32_SMC(ixDPM_TABLE_475, tmp);
+ ret = ci_enable_uvd_dpm(adev, true);
+ } else {
+ ret = ci_enable_uvd_dpm(adev, false);
+ if (ret)
+ return ret;
}
- return ci_enable_uvd_dpm(adev, !gate);
+ return ret;
}
static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
@@ -4247,13 +4262,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
ret = ci_enable_vce_dpm(adev, true);
} else {
+ ret = ci_enable_vce_dpm(adev, false);
+ if (ret)
+ return ret;
/* turn the clocks off when not encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
- if (ret)
- return ret;
-
- ret = ci_enable_vce_dpm(adev, false);
}
}
return ret;
@@ -5219,6 +5233,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
pi->current_rps = *rps;
pi->current_ps = *new_ps;
pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
static void ci_update_requested_ps(struct amdgpu_device *adev,
@@ -5230,6 +5245,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
pi->requested_rps = *rps;
pi->requested_ps = *new_ps;
pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
@@ -5267,8 +5283,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
int ret;
- if (amdgpu_ci_is_smc_running(adev))
- return -EINVAL;
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
ci_enable_voltage_control(adev);
ret = ci_construct_voltage_tables(adev);
@@ -5689,7 +5703,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -5874,7 +5888,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -5977,7 +5991,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
break;
default:
- DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
break;
}
WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
@@ -6069,7 +6083,7 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
activity_percent = activity_percent > 100 ? 100 : activity_percent;
}
- seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
seq_printf(m, "power level avg sclk: %u mclk: %u\n",
sclk, mclk);
@@ -6094,6 +6108,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
amdgpu_dpm_print_ps_status(adev, rps);
}
+static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
+ const struct ci_pl *ci_cpl2)
+{
+ return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
+ (ci_cpl1->sclk == ci_cpl2->sclk) &&
+ (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
+ (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
+}
+
+static int ci_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct ci_ps *ci_cps;
+ struct ci_ps *ci_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ ci_cps = ci_get_ps(cps);
+ ci_rps = ci_get_ps(rps);
+
+ if (ci_cps == NULL) {
+ *equal = false;
+ return 0;
+ }
+
+ if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
+
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < ci_cps->performance_level_count; i++) {
+ if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
+ &(ci_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
struct ci_power_info *pi = ci_get_pi(adev);
@@ -6289,12 +6353,19 @@ static int ci_dpm_suspend(void *handle)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- ci_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+ adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
+ adev->pm.dpm.last_state = adev->pm.dpm.state;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
mutex_unlock(&adev->pm.mutex);
+ amdgpu_pm_compute_clocks(adev);
+
}
+
return 0;
}
@@ -6312,6 +6383,8 @@ static int ci_dpm_resume(void *handle)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
+ adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
+ adev->pm.dpm.state = adev->pm.dpm.last_state;
mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
amdgpu_pm_compute_clocks(adev);
@@ -6646,6 +6719,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.set_sclk_od = ci_dpm_set_sclk_od,
.get_mclk_od = ci_dpm_get_mclk_od,
.set_mclk_od = ci_dpm_set_mclk_od,
+ .check_state_equal = ci_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -6664,3 +6739,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version ci_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a845b6a93b79..302df85893ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* cik_asic_reset - soft reset GPU
*
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
- cik_set_bios_scratch_engine_hung(adev, true);
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = cik_gpu_pci_config_reset(adev);
- cik_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
}
-static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-int cik_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_common_ip_funcs = {
+static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};
+
+static const struct amdgpu_ip_block_version cik_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_HAWAII:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KAVERI:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 5ebd2d7a0327..c4989f51ecef 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,6 @@
#ifndef __CIK_H__
#define __CIK_H__
-extern const struct amd_ip_funcs cik_common_ip_funcs;
-
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index be3d6f79a864..319b32cdea84 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_ih_ip_funcs = {
+static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
.late_init = NULL,
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
if (adev->irq.ih_funcs == NULL)
adev->irq.ih_funcs = &cik_ih_funcs;
}
+
+const struct amdgpu_ip_block_version cik_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
index 6b0f375ec244..1d9ddee2868e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -24,6 +24,6 @@
#ifndef __CIK_IH_H__
#define __CIK_IH_H__
-extern const struct amd_ip_funcs cik_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc7133..4c34dbc7a254 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_NOP_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
-static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 4; /* cik_sdma_ring_emit_ib */
-}
-
-static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* cik_sdma_ring_emit_hdp_flush */
- 3 + /* cik_sdma_ring_emit_hdp_invalidate */
- 6 + /* cik_sdma_ring_emit_pipeline_sync */
- 12 + /* cik_sdma_ring_emit_vm_flush */
- 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_sdma_ip_funcs = {
+static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
.get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr,
.set_wptr = cik_sdma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* cik_sdma_ring_emit_hdp_flush */
+ 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 6 + /* cik_sdma_ring_emit_pipeline_sync */
+ 12 + /* cik_sdma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
- .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
- .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version cik_sdma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
index 027727c677b8..a4a8fe01410b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -24,6 +24,6 @@
#ifndef __CIK_SDMA_H__
#define __CIK_SDMA_H__
-extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 8659852aea9e..6cbd913fd12e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -43,6 +43,14 @@
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 3c082e143730..ba2b66be9022 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -438,7 +438,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->caps_td_ramping = true;
pi->caps_tcp_ramping = true;
}
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
pi->current_ps = *ps;
pi->current_rps = *rps;
- pi->current_rps.ps_priv = ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
pi->requested_ps = *ps;
pi->requested_rps = *rps;
- pi->requested_rps.ps_priv = ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
@@ -2109,9 +2111,8 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
if (gate) {
if (pi->caps_uvd_pg) {
- /* disable clockgating so we can properly shut down the block */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
if (ret) {
DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
return;
@@ -2157,9 +2158,8 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
return;
}
- /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ AMD_CG_STATE_UNGATE);
if (ret) {
DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
return;
@@ -2257,6 +2257,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
+static int cz_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs cz_dpm_ip_funcs = {
.name = "cz_dpm",
.early_init = cz_dpm_early_init,
@@ -2289,6 +2301,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
.vblank_too_short = NULL,
.powergate_uvd = cz_dpm_powergate_uvd,
.powergate_vce = cz_dpm_powergate_vce,
+ .check_state_equal = cz_check_state_equal,
};
static void cz_dpm_set_funcs(struct amdgpu_device *adev)
@@ -2296,3 +2309,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
if (NULL == adev->pm.funcs)
adev->pm.funcs = &cz_dpm_funcs;
}
+
+const struct amdgpu_ip_block_version cz_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 3d23a70b6432..fe7cbb24da7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cz_ih_ip_funcs = {
+static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &cz_ih_funcs;
}
+const struct amdgpu_ip_block_version cz_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
index fc4057a2ecb9..14be7753221b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
@@ -24,6 +24,6 @@
#ifndef __CZ_IH_H__
#define __CZ_IH_H__
-extern const struct amd_ip_funcs cz_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cz_ih_ip_block;
#endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9260caef74fa..9999dc71b998 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v10_0.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v10_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
@@ -2115,7 +2032,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2227,9 +2144,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2577,6 +2493,9 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
@@ -2593,11 +2512,6 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -2623,6 +2537,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2661,9 +2576,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v10_0_lock_cursor(crtc, true);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
@@ -2679,6 +2591,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
@@ -2700,6 +2620,7 @@ unpin:
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v10_0_lock_cursor(crtc, true);
@@ -2707,6 +2628,10 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
@@ -3548,7 +3473,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
.late_init = NULL,
@@ -3833,7 +3758,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.bandwidth_update = &dce_v10_0_bandwidth_update,
.vblank_get_counter = &dce_v10_0_vblank_get_counter,
.vblank_wait = &dce_v10_0_vblank_wait,
- .is_display_hung = &dce_v10_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v10_0_hpd_sense,
@@ -3879,3 +3803,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v10_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v10_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index e3dc04d293e4..7a0747789f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -24,7 +24,9 @@
#ifndef __DCE_V10_0_H__
#define __DCE_V10_0_H__
-extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+
+extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
void dce_v10_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 367739bd1927..b3d62b909f43 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v11_0.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v11_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -2096,7 +2013,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2208,9 +2125,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2593,6 +2509,9 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
@@ -2609,11 +2528,6 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -2639,6 +2553,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2677,9 +2592,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v11_0_lock_cursor(crtc, true);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
@@ -2695,6 +2607,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
@@ -2716,6 +2636,7 @@ unpin:
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v11_0_lock_cursor(crtc, true);
@@ -2723,6 +2644,10 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
@@ -3605,7 +3530,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
.late_init = NULL,
@@ -3889,7 +3814,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
.bandwidth_update = &dce_v11_0_bandwidth_update,
.vblank_get_counter = &dce_v11_0_vblank_get_counter,
.vblank_wait = &dce_v11_0_vblank_wait,
- .is_display_hung = &dce_v11_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v11_0_hpd_sense,
@@ -3935,3 +3859,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v11_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 1f58a65ba2ef..0d878ca3acba 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -24,7 +24,8 @@
#ifndef __DCE_V11_0_H__
#define __DCE_V11_0_H__
-extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
void dce_v11_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 15f9fc0514b2..b4e4ec630e8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -30,8 +30,19 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
-#include "si/si_reg.h"
-#include "si/sid.h"
+
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -46,6 +57,16 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
+};
+
static const uint32_t dig_offsets[] = {
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
@@ -63,46 +84,37 @@ static const struct {
uint32_t hpd;
} interrupt_status_offsets[6] = { {
- .reg = DISP_INTERRUPT_STATUS,
+ .reg = mmDISP_INTERRUPT_STATUS,
.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- DC_HPD1_INT_CONTROL,
- DC_HPD2_INT_CONTROL,
- DC_HPD3_INT_CONTROL,
- DC_HPD4_INT_CONTROL,
- DC_HPD5_INT_CONTROL,
- DC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -118,7 +130,7 @@ static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
{
- if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
return true;
else
return false;
@@ -128,8 +140,8 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
{
u32 pos1, pos2;
- pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
if (pos1 != pos2)
return true;
@@ -151,7 +163,7 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
if (crtc >= adev->mode_info.num_crtc)
return;
- if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
return;
/* depending on when we hit vblank, we may be close to active; if so,
@@ -179,7 +191,7 @@ static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
if (crtc >= adev->mode_info.num_crtc)
return 0;
else
- return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+ return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
@@ -219,16 +231,16 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
/* flip at hsync for async, default is vsync */
- WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
- EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+ GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
/* update the scanout addresses */
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)crtc_base);
/* post the write */
- RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+ RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}
static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
@@ -236,8 +248,8 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
- *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+ *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
return 0;
@@ -257,34 +269,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
return connected;
}
@@ -303,58 +292,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v6_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(DC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(DC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(DC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(DC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(DC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -369,34 +315,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
- DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -405,34 +334,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -454,46 +358,25 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
{
- return SI_DC_GPIO_HPD_A;
-}
-
-static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
-{
- DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
-
- return true;
+ return mmDC_GPIO_HPD_A;
}
static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
@@ -501,7 +384,7 @@ static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
if (crtc >= adev->mode_info.num_crtc)
return 0;
else
- return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+ return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
@@ -510,25 +393,25 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
u32 crtc_enabled, tmp, frame_count;
int i, j;
- save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
/* disable VGA render */
- WREG32(VGA_RENDER_CONTROL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, 0);
/* blank the display controllers */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+ crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
if (crtc_enabled) {
save->crtc_enabled[i] = true;
- tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+ tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+ if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
dce_v6_0_vblank_wait(adev, i);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
- WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
+ WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = evergreen_get_vblank_counter(adev, i);
@@ -539,11 +422,11 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
}
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
- tmp &= ~EVERGREEN_CRTC_MASTER_EN;
- WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
save->crtc_enabled[i] = false;
/* ***** */
} else {
@@ -560,41 +443,40 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
}
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+ WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+ WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
/* unlock regs and wait for update */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (save->crtc_enabled[i]) {
- tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 3) {
+ tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x7) != 0) {
tmp &= ~0x7;
- tmp |= 0x3;
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
- tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
- if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
- tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
- WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
+ tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
+ WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
- tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
if (tmp & 1) {
tmp &= ~1;
- WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
- if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
break;
udelay(1);
}
@@ -602,19 +484,62 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
}
/* Unlock vga access */
- WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+ WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
}
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
- if (!render)
- WREG32(R_000300_VGA_RENDER_CONTROL,
- RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+ if (!render)
+ WREG32(mmVGA_RENDER_CONTROL,
+ RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
+
+}
+
+static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ num_crtc = 6;
+ break;
+ case CHIP_OLAND:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v6_0_set_vga_render_state(adev, false);
+ /*Disable crtc*/
+ for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
+ crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
+ CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
}
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
@@ -647,19 +572,23 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
case 6:
if (dither == AMDGPU_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
- tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
- FMT_SPATIAL_DITHER_EN);
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
else
- tmp |= FMT_TRUNCATE_EN;
+ tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
break;
case 8:
if (dither == AMDGPU_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
- tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
- FMT_RGB_RANDOM_ENABLE |
- FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
else
- tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
break;
case 10:
default:
@@ -667,7 +596,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
break;
}
- WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
/**
@@ -681,7 +610,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
*/
static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(MC_SHARED_CHMAP);
+ u32 tmp = RREG32(mmMC_SHARED_CHMAP);
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0:
@@ -1178,28 +1107,28 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
}
/* select wm A */
- arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(1);
- WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
- WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
- (LATENCY_LOW_WATERMARK(latency_watermark_a) |
- LATENCY_HIGH_WATERMARK(line_time)));
+ WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
+ (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
- tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(2);
- WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
- WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
- (LATENCY_LOW_WATERMARK(latency_watermark_b) |
- LATENCY_HIGH_WATERMARK(line_time)));
+ WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
+ (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* restore original selection */
- WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+ WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
/* write the priority marks */
- WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
- WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+ WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+ WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
@@ -1217,7 +1146,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
- * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
* the display controllers. The paritioning is done via one of four
* preset allocations specified in bits 21:20:
* 0 - half lb
@@ -1240,14 +1169,14 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
buffer_alloc = 0;
}
- WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+ WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
- WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
- DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
- DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
break;
udelay(1);
}
@@ -1489,12 +1418,12 @@ static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
static const u32 vga_control_regs[6] =
{
- AVIVO_D1VGA_CONTROL,
- AVIVO_D2VGA_CONTROL,
- EVERGREEN_D3VGA_CONTROL,
- EVERGREEN_D4VGA_CONTROL,
- EVERGREEN_D5VGA_CONTROL,
- EVERGREEN_D6VGA_CONTROL,
+ mmD1VGA_CONTROL,
+ mmD2VGA_CONTROL,
+ mmD3VGA_CONTROL,
+ mmD4VGA_CONTROL,
+ mmD5VGA_CONTROL,
+ mmD6VGA_CONTROL,
};
static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
@@ -1514,7 +1443,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
- WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
+ WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
}
static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
@@ -1530,10 +1459,11 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+ u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1573,71 +1503,71 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
switch (target_fb->pixel_format) {
case DRM_FORMAT_C8:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_INDEXED));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -1650,75 +1580,75 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
- fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
- fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
- fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
- fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= GRPH_NUM_BANKS(num_banks);
+ fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
+ fb_format |= GRPH_TILE_SPLIT(tile_split);
+ fb_format |= GRPH_BANK_WIDTH(bankw);
+ fb_format |= GRPH_BANK_HEIGHT(bankh);
+ fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= GRPH_PIPE_CONFIG(pipe_config);
dce_v6_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
- WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
- WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+ WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
/*
* The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
* for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
* retain the full precision throughout the pipeline.
*/
- WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
- (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
- ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+ WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
+ (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
+ ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
if (bypass_lut)
DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
- WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+ WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+ WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
- WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
dce_v6_0_grph_enable(crtc, true);
- WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+ WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
target_fb->height);
x &= ~3;
y &= ~1;
- WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+ WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+ WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* set pageflip to happen anywhere in vblank interval */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -1745,10 +1675,10 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
- EVERGREEN_INTERLEAVE_EN);
+ WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
+ INTERLEAVE_EN);
else
- WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
@@ -1761,54 +1691,52 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
- WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
- NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
- WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
- NI_GRPH_PRESCALE_BYPASS);
- WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
- NI_OVL_PRESCALE_BYPASS);
- WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
- NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
-
+ WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+ PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
+ WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+ PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
+ WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
- WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
for (i = 0; i < 256; i++) {
- WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+ WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
(amdgpu_crtc->lut_r[i] << 20) |
(amdgpu_crtc->lut_g[i] << 10) |
(amdgpu_crtc->lut_b[i] << 0));
}
- WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
- NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
- NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
- NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
- WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
- NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
- WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
- NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
- WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_OUTPUT_CSC_GRPH_MODE(0) |
- NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ ICON_DEGAMMA_MODE(0) |
+ (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -1887,12 +1815,12 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
- cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+ cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
if (lock)
- cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
else
- cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
- WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+ cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
+ WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
}
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
@@ -1900,9 +1828,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
- EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -1912,15 +1840,15 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
- EVERGREEN_CURSOR_EN |
- EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -1931,7 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
- int w = amdgpu_crtc->cursor_width;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
/* avivo cursor are offset into the total surface */
x += crtc->x;
@@ -1947,13 +1876,9 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
y = 0;
}
- WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
- ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+ WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+ WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -1978,6 +1903,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2016,9 +1942,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v6_0_lock_cursor(crtc, true);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
@@ -2034,6 +1957,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
@@ -2055,6 +1986,7 @@ unpin:
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v6_0_lock_cursor(crtc, true);
@@ -2062,6 +1994,10 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
}
@@ -2117,13 +2053,13 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v6_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled)
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
@@ -2338,21 +2274,20 @@ static int dce_v6_0_early_init(void *handle)
dce_v6_0_set_display_funcs(adev);
dce_v6_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_OLAND:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 2;
adev->mode_info.num_dig = 2;
break;
default:
- /* FIXME: not supported yet */
return -EINVAL;
}
@@ -2556,14 +2491,14 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask = RREG32(mmINT_MASK + reg_block);
interrupt_mask &= ~VBLANK_INT_MASK;
- WREG32(INT_MASK + reg_block, interrupt_mask);
+ WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask = RREG32(mmINT_MASK + reg_block);
interrupt_mask |= VBLANK_INT_MASK;
- WREG32(INT_MASK + reg_block, interrupt_mask);
+ WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
break;
@@ -2582,42 +2517,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl |= DC_HPDx_INT_EN;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -2685,7 +2601,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -2696,7 +2612,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -2722,12 +2638,12 @@ static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
return -EINVAL;
}
- reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+ reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
else
- WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
return 0;
@@ -2750,9 +2666,9 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
return -EINVAL;
}
- if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+ if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+ WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
/* IRQ could occur when in initial stage */
@@ -2790,7 +2706,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -2801,12 +2717,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_INFO("IH: HPD%d\n", hpd + 1);
}
@@ -2827,7 +2742,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
.late_init = NULL,
@@ -3122,7 +3037,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
.vblank_wait = &dce_v6_0_vblank_wait,
- .is_display_hung = &dce_v6_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v6_0_hpd_sense,
@@ -3168,3 +3082,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v6_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 6a5528105bb6..7b546b596de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -24,6 +24,9 @@
#ifndef __DCE_V6_0_H__
#define __DCE_V6_0_H__
-extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 8c4d808db0f1..584abe834a3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v8_0.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
+};
+
static const uint32_t dig_offsets[] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- mmDC_HPD1_INT_CONTROL,
- mmDC_HPD2_INT_CONTROL,
- mmDC_HPD3_INT_CONTROL,
- mmDC_HPD4_INT_CONTROL,
- mmDC_HPD5_INT_CONTROL,
- mmDC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
return connected;
}
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v8_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(mmDC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(mmDC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- WREG32(mmDC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(mmDC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- WREG32(mmDC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(mmDC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- WREG32(mmDC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(mmDC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- WREG32(mmDC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(mmDC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- WREG32(mmDC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
- (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
- DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -2030,7 +1910,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2135,9 +2015,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2465,6 +2344,9 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
@@ -2481,11 +2363,6 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -2511,6 +2388,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2549,9 +2427,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v8_0_lock_cursor(crtc, true);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
@@ -2567,6 +2442,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
@@ -2588,6 +2471,7 @@ unpin:
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v8_0_lock_cursor(crtc, true);
@@ -2595,6 +2479,10 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
@@ -3198,42 +3086,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -3406,7 +3275,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3417,12 +3286,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
@@ -3443,7 +3311,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
.late_init = NULL,
@@ -3727,7 +3595,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
.bandwidth_update = &dce_v8_0_bandwidth_update,
.vblank_get_counter = &dce_v8_0_vblank_get_counter,
.vblank_wait = &dce_v8_0_vblank_wait,
- .is_display_hung = &dce_v8_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v8_0_hpd_sense,
@@ -3773,3 +3640,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7d0770c3a49b..13b802dd946a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -24,7 +24,11 @@
#ifndef __DCE_V8_0_H__
#define __DCE_V8_0_H__
-extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
void dce_v8_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f045532..e4a5a5ac0ff3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -27,6 +27,9 @@
#include "atom.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "dce_v8_0.h"
#endif
@@ -34,11 +37,13 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry);
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index);
/**
* dce_virtual_vblank_wait - vblank wait asic callback.
@@ -90,15 +95,18 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
return 0;
}
-static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
-{
- return false;
-}
-
static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -119,6 +127,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
/* no DCE */
return;
default:
@@ -195,16 +206,15 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true;
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ /* Make sure VBLANK interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
amdgpu_crtc->enabled = false;
break;
}
@@ -264,24 +274,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
-
return true;
}
@@ -341,6 +333,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
return 0;
@@ -350,48 +343,121 @@ static int dce_virtual_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
dce_virtual_set_display_funcs(adev);
dce_virtual_set_irq_funcs(adev);
- adev->mode_info.num_crtc = 1;
adev->mode_info.num_hpd = 1;
adev->mode_info.num_dig = 1;
return 0;
}
-static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+static struct drm_encoder *
+dce_virtual_encoder(struct drm_connector *connector)
{
- struct amdgpu_i2c_bus_rec ddc_bus;
- struct amdgpu_router router;
- struct amdgpu_hpd hpd;
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
- /* look up gpio for ddc, hpd */
- ddc_bus.valid = false;
- hpd.hpd = AMDGPU_HPD_NONE;
- /* needed for aux chan transactions */
- ddc_bus.hpd = hpd.hpd;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
- memset(&router, 0, sizeof(router));
- router.ddc_valid = false;
- router.cd_valid = false;
- amdgpu_display_add_connector(adev,
- 0,
- ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
- CONNECTOR_OBJECT_ID_VIRTUAL,
- &hpd,
- &router);
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
- amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
- ATOM_DEVICE_CRT1_SUPPORT,
- 0);
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
- amdgpu_link_encoder_connector(adev->ddev);
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int dce_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ unsigned i;
+ static const struct mode_size {
+ int w;
+ int h;
+ } common_modes[17] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+ };
+
+ for (i = 0; i < 17; i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ drm_mode_probed_add(connector, mode);
+ }
- return true;
+ return 0;
+}
+
+static int dce_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
}
+static int
+dce_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
+}
+
+static int
+dce_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void dce_virtual_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void dce_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
+ .get_modes = dce_virtual_get_modes,
+ .mode_valid = dce_virtual_mode_valid,
+ .best_encoder = dce_virtual_encoder,
+};
+
+static const struct drm_connector_funcs dce_virtual_connector_funcs = {
+ .dpms = dce_virtual_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = dce_virtual_set_property,
+ .destroy = dce_virtual_destroy,
+ .force = dce_virtual_force,
+};
+
static int dce_virtual_sw_init(void *handle)
{
int r, i;
@@ -420,16 +486,16 @@ static int dce_virtual_sw_init(void *handle)
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
- /* allocate crtcs */
+ /* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_virtual_crtc_init(adev, i);
if (r)
return r;
+ r = dce_virtual_connector_encoder_init(adev, i);
+ if (r)
+ return r;
}
- dce_virtual_get_connector_info(adev);
- amdgpu_print_display_setup(adev->ddev);
-
drm_kms_helper_poll_init(adev->ddev);
adev->mode_info.mode_config_initialized = true;
@@ -496,7 +562,7 @@ static int dce_virtual_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_virtual_ip_funcs = {
+static const struct amd_ip_funcs dce_virtual_ip_funcs = {
.name = "dce_virtual",
.early_init = dce_virtual_early_init,
.late_init = NULL,
@@ -526,8 +592,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
static void
dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return;
}
@@ -547,10 +613,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
- /* set the active encoder to connector routing */
- amdgpu_encoder_set_active_device(encoder);
-
return true;
}
@@ -576,45 +638,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
.destroy = dce_virtual_encoder_destroy,
};
-static void dce_virtual_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index)
{
- struct drm_device *dev = adev->ddev;
struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
+ struct drm_connector *connector;
+
+ /* add a new encoder */
+ encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
+ encoder->possible_crtcs = 1 << index;
+ drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+ if (!connector) {
+ kfree(encoder);
+ return -ENOMEM;
}
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
+ /* add a new connector */
+ drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_connector_register(connector);
- encoder = &amdgpu_encoder->base;
- encoder->possible_crtcs = 0x1;
- amdgpu_encoder->enc_priv = NULL;
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
- DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+ /* link them */
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
}
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
@@ -622,7 +679,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.bandwidth_update = &dce_virtual_bandwidth_update,
.vblank_get_counter = &dce_virtual_vblank_get_counter,
.vblank_wait = &dce_virtual_vblank_wait,
- .is_display_hung = &dce_virtual_is_display_hung,
.backlight_set_level = NULL,
.backlight_get_level = NULL,
.hpd_sense = &dce_virtual_hpd_sense,
@@ -630,8 +686,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
.page_flip = &dce_virtual_page_flip,
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
- .add_encoder = &dce_virtual_encoder_add,
- .add_connector = &amdgpu_connector_add,
+ .add_encoder = NULL,
+ .add_connector = NULL,
.stop_mc_access = &dce_virtual_stop_mc_access,
.resume_mc_access = &dce_virtual_resume_mc_access,
};
@@ -642,107 +698,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
adev->mode_info.funcs = &dce_virtual_display_funcs;
}
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
- struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
- struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
- unsigned crtc = 0;
- drm_handle_vblank(adev->ddev, crtc);
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- if (state && !adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Enable software vsync timer\n");
- hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
- adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
- hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- } else if (!state && adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Disable software vsync timer\n");
- hrtimer_cancel(&adev->mode_info.vblank_timer);
- }
-
- adev->mode_info.vsync_timer_enabled = state;
- DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-}
-
-static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = 0;
- unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
-
- dce_virtual_crtc_vblank_int_ack(adev, crtc);
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
- return 0;
-}
-
-static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
- DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
-
- return 0;
-}
-
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+ unsigned crtc_id)
{
unsigned long flags;
- unsigned crtc_id = 0;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
- crtc_id = 0;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
if (crtc_id >= adev->mode_info.num_crtc) {
@@ -781,22 +743,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
return 0;
}
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+ struct amdgpu_crtc, vblank_timer);
+ struct drm_device *ddev = amdgpu_crtc->base.dev;
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+ dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ HRTIMER_MODE_REL);
+
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.crtcs[crtc]->vblank_timer.function =
+ dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+ }
+
+ adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type > AMDGPU_CRTC_IRQ_VBLANK6)
+ return -EINVAL;
+
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
+
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
.set = dce_virtual_set_crtc_irq_state,
- .process = dce_virtual_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
- .set = dce_virtual_set_pageflip_irq_state,
- .process = dce_virtual_pageflip_irq,
+ .process = NULL,
};
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
- adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
}
+const struct amdgpu_ip_block_version dce_virtual_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index e239243f6ebc..ed422012c8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -24,8 +24,7 @@
#ifndef __DCE_VIRTUAL_H__
#define __DCE_VIRTUAL_H__
-extern const struct amd_ip_funcs dce_virtual_ip_funcs;
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b81c09..b323f5ef64d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -26,15 +26,18 @@
#include "amdgpu_gfx.h"
#include "amdgpu_ucode.h"
#include "si/clearstate_si.h"
-#include "si/sid.h"
-
-#define GFX6_NUM_GFX_RINGS 1
-#define GFX6_NUM_COMPUTE_RINGS 2
-#define STATIC_PER_CU_PG_ENABLE (1 << 3)
-#define DYN_PER_CU_PG_ENABLE (1 << 2)
-#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
-#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+#include "si_enums.h"
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -70,6 +73,15 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *bu
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+#define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
+#define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
+#define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
+#define MICRO_TILE_MODE(x) ((x) << 0)
+#define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
+#define BANK_WIDTH(x) ((x) << 14)
+#define BANK_HEIGHT(x) ((x) << 16)
+#define MACRO_TILE_ASPECT(x) ((x) << 18)
+#define NUM_BANKS(x) ((x) << 20)
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -399,487 +411,882 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
}
- if (adev->asic_type == CHIP_VERDE ||
- adev->asic_type == CHIP_OLAND ||
- adev->asic_type == CHIP_HAINAN) {
+ if (adev->asic_type == CHIP_VERDE) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 1:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 3:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 4:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
+ break;
+ case 5:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 6:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 7:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
+ break;
+ case 9:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 10:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 11:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 12:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 14:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 15:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 16:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 17:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 21:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 22:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 23:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
- case 15:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 24:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 25:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ default:
+ continue;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ } else if (adev->asic_type == CHIP_OLAND ||
+ adev->asic_type == CHIP_HAINAN) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 4:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 5:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ case 6:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 7:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
+ break;
+ case 9:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 10:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 21:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 13:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 14:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 22:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 15:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 16:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 17:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 21:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
- case 23:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 22:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 23:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
- case 24:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 24:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
- case 25:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 25:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
default:
- gb_tile_moden = 0;
- break;
+ continue;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
}
} else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
- case 0: /* non-AA compressed depth or any compressed stencil */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 0:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 1: /* 2xAA/4xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 1:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 2: /* 8xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 2:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 3:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 4:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 5:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 6:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ case 7:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 8: /* 1D and 1D Array Surfaces */
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
break;
- case 9: /* Displayable maps. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 9:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 10: /* Display 8bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 10:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 11: /* Display 16bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 11:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 12: /* Display 32bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 12:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 13: /* Thin. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 13:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 14: /* Thin 8 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 14:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 15: /* Thin 16 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 15:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 16: /* Thin 32 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 16:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 17: /* Thin 64 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 17:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 21: /* 8 bpp PRT. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 22: /* 16 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 21:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 22:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
- case 23: /* 32 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 23:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 24: /* 64 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 24:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 25: /* 128 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 25:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- default:
- gb_tile_moden = 0;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
+ default:
+ continue;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
}
} else{
@@ -894,19 +1301,23 @@ static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 data;
if (instance == 0xffffffff)
- data = INSTANCE_BROADCAST_WRITES;
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
else
- data = INSTANCE_INDEX(instance);
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
- data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
else if (se_num == 0xffffffff)
- data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
+ (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
else if (sh_num == 0xffffffff)
- data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
else
- data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
- WREG32(GRBM_GFX_INDEX, data);
+ data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
+ (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ WREG32(mmGRBM_GFX_INDEX, data);
}
static u32 gfx_v6_0_create_bitmask(u32 bit_width)
@@ -920,11 +1331,11 @@ static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
{
u32 data, mask;
- data = RREG32(CC_RB_BACKEND_DISABLE);
- data &= BACKEND_DISABLE_MASK;
- data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+ data = RREG32(mmCC_RB_BACKEND_DISABLE);
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
+ data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
- data >>= BACKEND_DISABLE_SHIFT;
+ data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
@@ -936,14 +1347,23 @@ static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
- *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
- SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
+ *rconf |=
+ (2 << PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT) |
+ (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT) |
+ (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT);
break;
case CHIP_VERDE:
- *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
+ *rconf |=
+ (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT) |
+ (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT);
break;
case CHIP_OLAND:
- *rconf |= RB_YSEL;
+ *rconf |= (1 << PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT);
break;
case CHIP_HAINAN:
*rconf |= 0x0;
@@ -981,24 +1401,24 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
int idx = (se / 2) * 2;
if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
- raster_config_se &= ~SE_MAP_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__SE_MAP_MASK;
if (!se_mask[idx]) {
- raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ raster_config_se |= RASTER_CONFIG_SE_MAP_3 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT;
} else {
- raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ raster_config_se |= RASTER_CONFIG_SE_MAP_0 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT;
}
}
pkr0_mask &= rb_mask;
pkr1_mask &= rb_mask;
if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
- raster_config_se &= ~PKR_MAP_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__PKR_MAP_MASK;
if (!pkr0_mask) {
- raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ raster_config_se |= RASTER_CONFIG_PKR_MAP_3 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT;
} else {
- raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ raster_config_se |= RASTER_CONFIG_PKR_MAP_0 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT;
}
}
@@ -1009,14 +1429,14 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
- raster_config_se &= ~RB_MAP_PKR0_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK;
if (!rb0_mask) {
raster_config_se |=
- RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ RASTER_CONFIG_RB_MAP_3 << PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT;
} else {
raster_config_se |=
- RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ RASTER_CONFIG_RB_MAP_0 << PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT;
}
}
@@ -1026,14 +1446,14 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
- raster_config_se &= ~RB_MAP_PKR1_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK;
if (!rb0_mask) {
raster_config_se |=
- RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ RASTER_CONFIG_RB_MAP_3 << PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT;
} else {
raster_config_se |=
- RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ RASTER_CONFIG_RB_MAP_0 << PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT;
}
}
}
@@ -1041,7 +1461,7 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
/* GRBM_GFX_INDEX has a different offset on SI */
gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
- WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
}
/* GRBM_GFX_INDEX has a different offset on SI */
@@ -1063,7 +1483,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
for (j = 0; j < sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
- disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ disabled_rbs |= data << ((i * sh_per_se + j) * 2);
}
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -1105,7 +1525,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
if (!adev->gfx.config.backend_enable_mask ||
adev->gfx.config.num_rbs >= num_rb_pipes)
- WREG32(PA_SC_RASTER_CONFIG, data);
+ WREG32(mmPA_SC_RASTER_CONFIG, data);
else
gfx_v6_0_write_harvested_raster_configs(adev, data,
adev->gfx.config.backend_enable_mask,
@@ -1124,11 +1544,11 @@ static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
{
u32 data, mask;
- data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- data &= INACTIVE_CUS_MASK;
- data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+ data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
- data >>= INACTIVE_CUS_SHIFT;
+ data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
mask = gfx_v6_0_create_bitmask(cu_per_sh);
@@ -1148,7 +1568,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
- data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
mask = 1;
@@ -1156,7 +1576,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
mask <<= k;
if (active_cu & mask) {
data &= ~mask;
- WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ WREG32(mmSPI_STATIC_THREAD_MGMT_3, data);
break;
}
}
@@ -1209,7 +1629,6 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
-
case CHIP_VERDE:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 4;
@@ -1266,18 +1685,18 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
break;
}
- WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
- WREG32(SRBM_INT_CNTL, 1);
- WREG32(SRBM_INT_ACK, 1);
+ WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
+ WREG32(mmSRBM_INT_CNTL, 1);
+ WREG32(mmSRBM_INT_ACK, 1);
- WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+ WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
- mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
- mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+ mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
- tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (adev->gfx.config.mem_row_size_in_kb > 4)
adev->gfx.config.mem_row_size_in_kb = 4;
@@ -1285,32 +1704,33 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.num_gpus = 1;
adev->gfx.config.multi_gpu_tile_size = 64;
- gb_addr_config &= ~ROW_SIZE_MASK;
+ gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
switch (adev->gfx.config.mem_row_size_in_kb) {
case 1:
default:
- gb_addr_config |= ROW_SIZE(0);
+ gb_addr_config |= 0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
break;
case 2:
- gb_addr_config |= ROW_SIZE(1);
+ gb_addr_config |= 1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
break;
case 4:
- gb_addr_config |= ROW_SIZE(2);
+ gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
break;
}
adev->gfx.config.gb_addr_config = gb_addr_config;
- WREG32(GB_ADDR_CONFIG, gb_addr_config);
- WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
- WREG32(DMIF_ADDR_CALC, gb_addr_config);
- WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
- WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmDMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
+ WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmDMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(mmDMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+
#if 0
if (adev->has_uvd) {
- WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
}
#endif
gfx_v6_0_tiling_mode_table_init(adev);
@@ -1325,45 +1745,48 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
gfx_v6_0_get_cu_info(adev);
- WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
- ROQ_IB2_START(0x2b)));
- WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+ WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
+ (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
+ WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
- sx_debug_1 = RREG32(SX_DEBUG_1);
- WREG32(SX_DEBUG_1, sx_debug_1);
+ sx_debug_1 = RREG32(mmSX_DEBUG_1);
+ WREG32(mmSX_DEBUG_1, sx_debug_1);
- WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
- WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
- SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
- SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
- SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+ WREG32(mmPA_SC_FIFO_SIZE, ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
- WREG32(VGT_NUM_INSTANCES, 1);
- WREG32(CP_PERFMON_CNTL, 0);
- WREG32(SQ_CONFIG, 0);
- WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
- FORCE_EOV_MAX_REZ_CNT(255)));
+ WREG32(mmVGT_NUM_INSTANCES, 1);
+ WREG32(mmCP_PERFMON_CNTL, 0);
+ WREG32(mmSQ_CONFIG, 0);
+ WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS, ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
+ (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
- WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
- AUTO_INVLD_EN(ES_AND_GS_AUTO));
+ WREG32(mmVGT_CACHE_INVALIDATION,
+ (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
+ (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
- WREG32(VGT_GS_VERTEX_REUSE, 16);
- WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+ WREG32(mmVGT_GS_VERTEX_REUSE, 16);
+ WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
- WREG32(CB_PERFCOUNTER0_SELECT0, 0);
- WREG32(CB_PERFCOUNTER0_SELECT1, 0);
- WREG32(CB_PERFCOUNTER1_SELECT0, 0);
- WREG32(CB_PERFCOUNTER1_SELECT1, 0);
- WREG32(CB_PERFCOUNTER2_SELECT0, 0);
- WREG32(CB_PERFCOUNTER2_SELECT1, 0);
- WREG32(CB_PERFCOUNTER3_SELECT0, 0);
- WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER0_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER0_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER1_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER1_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER2_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER2_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER3_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER3_SELECT1, 0);
- hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
- WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+ hdp_host_path_cntl = RREG32(mmHDP_HOST_PATH_CNTL);
+ WREG32(mmHDP_HOST_PATH_CNTL, hdp_host_path_cntl);
- WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+ WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
+ (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
udelay(50);
}
@@ -1374,7 +1797,7 @@ static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
int i;
adev->gfx.scratch.num_reg = 7;
- adev->gfx.scratch.reg_base = SCRATCH_REG0;
+ adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
adev->gfx.scratch.free[i] = true;
adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
@@ -1430,11 +1853,18 @@ static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+ amdgpu_ring_write(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x1);
}
+static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
+ EVENT_INDEX(0));
+}
+
/**
* gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
*
@@ -1448,7 +1878,7 @@ static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, HDP_DEBUG0);
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x1);
}
@@ -1460,7 +1890,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
/* flush read cache over gart */
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+ amdgpu_ring_write(ring, (mmCP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
@@ -1475,7 +1905,8 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
- DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+ ((write64bit ? 2 : 1) << CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT) |
+ ((int_sel ? 2 : 0) << CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
@@ -1522,7 +1953,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -1548,7 +1979,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -1569,7 +2000,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1578,11 +2009,13 @@ err1:
static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
- if (enable)
- WREG32(CP_ME_CNTL, 0);
- else {
- WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
- WREG32(SCRATCH_UMSK, 0);
+ if (enable) {
+ WREG32(mmCP_ME_CNTL, 0);
+ } else {
+ WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
+ CP_ME_CNTL__PFP_HALT_MASK |
+ CP_ME_CNTL__CE_HALT_MASK));
+ WREG32(mmSCRATCH_UMSK, 0);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
@@ -1616,34 +2049,33 @@ static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(mmCP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_PFP_UCODE_ADDR, 0);
/* CE */
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(mmCP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_CE_UCODE_ADDR, 0);
/* ME */
fw_data = (const __be32 *)
(adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(mmCP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_ME_RAM_WADDR, 0);
-
- WREG32(CP_PFP_UCODE_ADDR, 0);
- WREG32(CP_CE_UCODE_ADDR, 0);
- WREG32(CP_ME_RAM_WADDR, 0);
- WREG32(CP_ME_RAM_RADDR, 0);
+ WREG32(mmCP_PFP_UCODE_ADDR, 0);
+ WREG32(mmCP_CE_UCODE_ADDR, 0);
+ WREG32(mmCP_ME_RAM_WADDR, 0);
+ WREG32(mmCP_ME_RAM_RADDR, 0);
return 0;
}
@@ -1720,14 +2152,14 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
int r;
u64 rptr_addr;
- WREG32(CP_SEM_WAIT_TIMER, 0x0);
- WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
+ WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
/* Set the write pointer delay */
- WREG32(CP_RB_WPTR_DELAY, 0);
+ WREG32(mmCP_RB_WPTR_DELAY, 0);
- WREG32(CP_DEBUG, 0);
- WREG32(SCRATCH_ADDR, 0);
+ WREG32(mmCP_DEBUG, 0);
+ WREG32(mmSCRATCH_ADDR, 0);
/* ring 0 - compute and gfx */
/* Set ring buffer size */
@@ -1738,24 +2170,24 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB0_CNTL, tmp);
+ WREG32(mmCP_RB0_CNTL, tmp);
/* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(CP_RB0_WPTR, ring->wptr);
+ WREG32(mmCP_RB0_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
- WREG32(SCRATCH_UMSK, 0);
+ WREG32(mmSCRATCH_UMSK, 0);
mdelay(1);
- WREG32(CP_RB0_CNTL, tmp);
+ WREG32(mmCP_RB0_CNTL, tmp);
- WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+ WREG32(mmCP_RB0_BASE, ring->gpu_addr >> 8);
/* start the rings */
gfx_v6_0_cp_gfx_start(adev);
@@ -1779,11 +2211,11 @@ static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->gfx.gfx_ring[0])
- return RREG32(CP_RB0_WPTR);
+ return RREG32(mmCP_RB0_WPTR);
else if (ring == &adev->gfx.compute_ring[0])
- return RREG32(CP_RB1_WPTR);
+ return RREG32(mmCP_RB1_WPTR);
else if (ring == &adev->gfx.compute_ring[1])
- return RREG32(CP_RB2_WPTR);
+ return RREG32(mmCP_RB2_WPTR);
else
BUG();
}
@@ -1792,8 +2224,8 @@ static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- WREG32(CP_RB0_WPTR, ring->wptr);
- (void)RREG32(CP_RB0_WPTR);
+ WREG32(mmCP_RB0_WPTR, ring->wptr);
+ (void)RREG32(mmCP_RB0_WPTR);
}
static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
@@ -1801,11 +2233,11 @@ static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->gfx.compute_ring[0]) {
- WREG32(CP_RB1_WPTR, ring->wptr);
- (void)RREG32(CP_RB1_WPTR);
+ WREG32(mmCP_RB1_WPTR, ring->wptr);
+ (void)RREG32(mmCP_RB1_WPTR);
} else if (ring == &adev->gfx.compute_ring[1]) {
- WREG32(CP_RB2_WPTR, ring->wptr);
- (void)RREG32(CP_RB2_WPTR);
+ WREG32(mmCP_RB2_WPTR, ring->wptr);
+ (void)RREG32(mmCP_RB2_WPTR);
} else {
BUG();
}
@@ -1817,7 +2249,7 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
- int r;
+ int i, r;
u64 rptr_addr;
/* ring1 - compute only */
@@ -1829,19 +2261,19 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB1_CNTL, tmp);
+ WREG32(mmCP_RB1_CNTL, tmp);
- WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(mmCP_RB1_CNTL, tmp | CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(CP_RB1_WPTR, ring->wptr);
+ WREG32(mmCP_RB1_WPTR, ring->wptr);
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
mdelay(1);
- WREG32(CP_RB1_CNTL, tmp);
- WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+ WREG32(mmCP_RB1_CNTL, tmp);
+ WREG32(mmCP_RB1_BASE, ring->gpu_addr >> 8);
ring = &adev->gfx.compute_ring[1];
rb_bufsz = order_base_2(ring->ring_size / 8);
@@ -1849,32 +2281,27 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB2_CNTL, tmp);
+ WREG32(mmCP_RB2_CNTL, tmp);
- WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(mmCP_RB2_CNTL, tmp | CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(CP_RB2_WPTR, ring->wptr);
+ WREG32(mmCP_RB2_WPTR, ring->wptr);
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmCP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(mmCP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
mdelay(1);
- WREG32(CP_RB2_CNTL, tmp);
- WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
-
- adev->gfx.compute_ring[0].ready = true;
- adev->gfx.compute_ring[1].ready = true;
+ WREG32(mmCP_RB2_CNTL, tmp);
+ WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
- if (r) {
- adev->gfx.compute_ring[0].ready = false;
- return r;
- }
+ adev->gfx.compute_ring[0].ready = false;
+ adev->gfx.compute_ring[1].ready = false;
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
- if (r) {
- adev->gfx.compute_ring[1].ready = false;
- return r;
+ for (i = 0; i < 2; i++) {
+ r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+ if (r)
+ return r;
+ adev->gfx.compute_ring[i].ready = true;
}
return 0;
@@ -1892,24 +2319,26 @@ static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
-{
- u32 tmp = RREG32(CP_INT_CNTL_RING0);
+{
+ u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
u32 mask;
int i;
if (enable)
- tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp |= (CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK |
+ CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK);
else
- tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- WREG32(CP_INT_CNTL_RING0, tmp);
+ tmp &= ~(CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK |
+ CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK);
+ WREG32(mmCP_INT_CNTL_RING0, tmp);
if (!enable) {
/* read a gfx register */
- tmp = RREG32(DB_DEPTH_INFO);
+ tmp = RREG32(mmDB_DEPTH_INFO);
mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+ if ((RREG32(mmRLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
break;
udelay(1);
}
@@ -1940,7 +2369,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -1966,16 +2395,16 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
/* write new base address */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vm_id < 8) {
- amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
} else {
- amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
@@ -1984,7 +2413,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id);
@@ -1992,7 +2421,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
- amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); /* ref */
amdgpu_ring_write(ring, 0); /* mask */
@@ -2071,7 +2500,6 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
if (adev->gfx.rlc.save_restore_obj == NULL) {
-
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@@ -2166,20 +2594,12 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
{
- u32 tmp;
-
- tmp = RREG32(RLC_LB_CNTL);
- if (enable)
- tmp |= LOAD_BALANCE_ENABLE;
- else
- tmp &= ~LOAD_BALANCE_ENABLE;
- WREG32(RLC_LB_CNTL, tmp);
+ WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
if (!enable) {
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- WREG32(SPI_LB_CU_MASK, 0x00ff);
+ WREG32(mmSPI_LB_CU_MASK, 0x00ff);
}
-
}
static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -2187,13 +2607,13 @@ static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+ if (RREG32(mmRLC_SERDES_MASTER_BUSY_0) == 0)
break;
udelay(1);
}
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+ if (RREG32(mmRLC_SERDES_MASTER_BUSY_1) == 0)
break;
udelay(1);
}
@@ -2203,20 +2623,20 @@ static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
{
u32 tmp;
- tmp = RREG32(RLC_CNTL);
+ tmp = RREG32(mmRLC_CNTL);
if (tmp != rlc)
- WREG32(RLC_CNTL, rlc);
+ WREG32(mmRLC_CNTL, rlc);
}
static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
{
u32 data, orig;
- orig = data = RREG32(RLC_CNTL);
+ orig = data = RREG32(mmRLC_CNTL);
- if (data & RLC_ENABLE) {
- data &= ~RLC_ENABLE;
- WREG32(RLC_CNTL, data);
+ if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
+ data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
+ WREG32(mmRLC_CNTL, data);
gfx_v6_0_wait_for_rlc_serdes(adev);
}
@@ -2226,7 +2646,7 @@ static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
{
- WREG32(RLC_CNTL, 0);
+ WREG32(mmRLC_CNTL, 0);
gfx_v6_0_enable_gui_idle_interrupt(adev, false);
gfx_v6_0_wait_for_rlc_serdes(adev);
@@ -2234,7 +2654,7 @@ static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
{
- WREG32(RLC_CNTL, RLC_ENABLE);
+ WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
gfx_v6_0_enable_gui_idle_interrupt(adev, true);
@@ -2243,13 +2663,9 @@ static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(GRBM_SOFT_RESET);
-
- tmp |= SOFT_RESET_RLC;
- WREG32(GRBM_SOFT_RESET, tmp);
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp &= ~SOFT_RESET_RLC;
- WREG32(GRBM_SOFT_RESET, tmp);
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
@@ -2258,11 +2674,12 @@ static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
u32 tmp;
/* Enable LBPW only for DDR3 */
- tmp = RREG32(MC_SEQ_MISC0);
+ tmp = RREG32(mmMC_SEQ_MISC0);
if ((tmp & 0xF0000000) == 0xB0000000)
return true;
return false;
}
+
static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
{
}
@@ -2283,15 +2700,15 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
gfx_v6_0_init_pg(adev);
gfx_v6_0_init_cg(adev);
- WREG32(RLC_RL_BASE, 0);
- WREG32(RLC_RL_SIZE, 0);
- WREG32(RLC_LB_CNTL, 0);
- WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
- WREG32(RLC_LB_CNTR_INIT, 0);
- WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+ WREG32(mmRLC_RL_BASE, 0);
+ WREG32(mmRLC_RL_SIZE, 0);
+ WREG32(mmRLC_LB_CNTL, 0);
+ WREG32(mmRLC_LB_CNTR_MAX, 0xffffffff);
+ WREG32(mmRLC_LB_CNTR_INIT, 0);
+ WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
- WREG32(RLC_MC_CNTL, 0);
- WREG32(RLC_UCODE_CNTL, 0);
+ WREG32(mmRLC_MC_CNTL, 0);
+ WREG32(mmRLC_UCODE_CNTL, 0);
hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
@@ -2301,10 +2718,10 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
amdgpu_ucode_print_rlc_hdr(&hdr->header);
for (i = 0; i < fw_size; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmRLC_UCODE_ADDR, i);
+ WREG32(mmRLC_UCODE_DATA, le32_to_cpup(fw_data++));
}
- WREG32(RLC_UCODE_ADDR, 0);
+ WREG32(mmRLC_UCODE_ADDR, 0);
gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
gfx_v6_0_rlc_start(adev);
@@ -2316,38 +2733,38 @@ static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig, tmp;
- orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+ orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
gfx_v6_0_enable_gui_idle_interrupt(adev, true);
- WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+ WREG32(mmRLC_GCPM_GENERAL_3, 0x00000080);
tmp = gfx_v6_0_halt_rlc(adev);
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x00b000ff);
gfx_v6_0_wait_for_rlc_serdes(adev);
gfx_v6_0_update_rlc(adev, tmp);
- WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x007000ff);
- data |= CGCG_EN | CGLS_EN;
+ data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
} else {
gfx_v6_0_enable_gui_idle_interrupt(adev, false);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
- data &= ~(CGCG_EN | CGLS_EN);
+ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
}
if (orig != data)
- WREG32(RLC_CGCG_CGLS_CTRL, data);
+ WREG32(mmRLC_CGCG_CGLS_CTRL, data);
}
@@ -2357,51 +2774,51 @@ static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
u32 data, orig, tmp = 0;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
- orig = data = RREG32(CGTS_SM_CTRL_REG);
+ orig = data = RREG32(mmCGTS_SM_CTRL_REG);
data = 0x96940200;
if (orig != data)
- WREG32(CGTS_SM_CTRL_REG, data);
+ WREG32(mmCGTS_SM_CTRL_REG, data);
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
- orig = data = RREG32(CP_MEM_SLP_CNTL);
- data |= CP_MEM_LS_EN;
+ orig = data = RREG32(mmCP_MEM_SLP_CNTL);
+ data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
if (orig != data)
- WREG32(CP_MEM_SLP_CNTL, data);
+ WREG32(mmCP_MEM_SLP_CNTL, data);
}
- orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data &= 0xffffffc0;
if (orig != data)
- WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+ WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
tmp = gfx_v6_0_halt_rlc(adev);
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x00d000ff);
gfx_v6_0_update_rlc(adev, tmp);
} else {
- orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data |= 0x00000003;
if (orig != data)
- WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+ WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
- data = RREG32(CP_MEM_SLP_CNTL);
- if (data & CP_MEM_LS_EN) {
- data &= ~CP_MEM_LS_EN;
- WREG32(CP_MEM_SLP_CNTL, data);
+ data = RREG32(mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32(mmCP_MEM_SLP_CNTL, data);
}
- orig = data = RREG32(CGTS_SM_CTRL_REG);
- data |= LS_OVERRIDE | OVERRIDE;
+ orig = data = RREG32(mmCGTS_SM_CTRL_REG);
+ data |= CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK | CGTS_SM_CTRL_REG__OVERRIDE_MASK;
if (orig != data)
- WREG32(CGTS_SM_CTRL_REG, data);
+ WREG32(mmCGTS_SM_CTRL_REG, data);
tmp = gfx_v6_0_halt_rlc(adev);
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x00e000ff);
gfx_v6_0_update_rlc(adev, tmp);
}
@@ -2421,6 +2838,7 @@ static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
gfx_v6_0_enable_gui_idle_interrupt(adev, true);
}
*/
+
static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
bool enable)
{
@@ -2435,13 +2853,13 @@ static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig;
- orig = data = RREG32(RLC_PG_CNTL);
+ orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
data &= ~0x8000;
else
data |= 0x8000;
if (orig != data)
- WREG32(RLC_PG_CNTL, data);
+ WREG32(mmRLC_PG_CNTL, data);
}
static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
@@ -2518,26 +2936,13 @@ static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
bool enable)
{
-
- u32 tmp;
-
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
- tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
- WREG32(RLC_TTOP_D, tmp);
-
- tmp = RREG32(RLC_PG_CNTL);
- tmp |= GFX_PG_ENABLE;
- WREG32(RLC_PG_CNTL, tmp);
-
- tmp = RREG32(RLC_AUTO_PG_CTRL);
- tmp |= AUTO_PG_EN;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
+ WREG32(mmRLC_TTOP_D, RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10));
+ WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, 1);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, AUTO_PG_EN, 1);
} else {
- tmp = RREG32(RLC_AUTO_PG_CTRL);
- tmp &= ~AUTO_PG_EN;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
-
- tmp = RREG32(DB_RENDER_CONTROL);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, AUTO_PG_EN, 0);
+ (void)RREG32(mmDB_RENDER_CONTROL);
}
}
@@ -2550,8 +2955,8 @@ static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
- tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
@@ -2594,12 +2999,8 @@ static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
}
}
- WREG32(RLC_PG_AO_CU_MASK, tmp);
-
- tmp = RREG32(RLC_MAX_PG_CU);
- tmp &= ~MAX_PU_CU_MASK;
- tmp |= MAX_PU_CU(active_cu_number);
- WREG32(RLC_MAX_PG_CU, tmp);
+ WREG32(mmRLC_PG_AO_CU_MASK, tmp);
+ WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number);
}
static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
@@ -2607,13 +3008,13 @@ static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
{
u32 data, orig;
- orig = data = RREG32(RLC_PG_CNTL);
+ orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
- data |= STATIC_PER_CU_PG_ENABLE;
+ data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
else
- data &= ~STATIC_PER_CU_PG_ENABLE;
+ data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
if (orig != data)
- WREG32(RLC_PG_CNTL, data);
+ WREG32(mmRLC_PG_CNTL, data);
}
static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
@@ -2621,33 +3022,28 @@ static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
{
u32 data, orig;
- orig = data = RREG32(RLC_PG_CNTL);
+ orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
- data |= DYN_PER_CU_PG_ENABLE;
+ data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
else
- data &= ~DYN_PER_CU_PG_ENABLE;
+ data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
if (orig != data)
- WREG32(RLC_PG_CNTL, data);
+ WREG32(mmRLC_PG_CNTL, data);
}
static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
{
u32 tmp;
- WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
-
- tmp = RREG32(RLC_PG_CNTL);
- tmp |= GFX_PG_SRC;
- WREG32(RLC_PG_CNTL, tmp);
-
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+ WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_SRC, 1);
+ WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
- tmp = RREG32(RLC_AUTO_PG_CTRL);
-
- tmp &= ~GRBM_REG_SGIT_MASK;
- tmp |= GRBM_REG_SGIT(0x700);
- tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
+ tmp = RREG32(mmRLC_AUTO_PG_CTRL);
+ tmp &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
+ tmp |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
+ tmp &= ~RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK;
+ WREG32(mmRLC_AUTO_PG_CTRL, tmp);
}
static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
@@ -2703,7 +3099,6 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
buffer[count++] = cpu_to_le32(0x80000000);
buffer[count++] = cpu_to_le32(0x80000000);
@@ -2723,7 +3118,7 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
}
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (adev->asic_type) {
case CHIP_TAHITI:
@@ -2766,16 +3161,16 @@ static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
gfx_v6_0_enable_cp_pg(adev, true);
gfx_v6_0_enable_gds_pg(adev, true);
} else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+ WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
}
gfx_v6_0_init_ao_cu_mask(adev);
gfx_v6_0_update_gfx_pg(adev, true);
} else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+ WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
}
}
@@ -2800,50 +3195,86 @@ static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
+ if (flags & AMDGPU_HAVE_CTX_SWITCH)
+ gfx_v6_0_ring_emit_vgt_flush(ring);
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
amdgpu_ring_write(ring, 0x80000000);
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- return
- 6; /* gfx_v6_0_ring_emit_ib */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t regno, uint32_t num, uint32_t *out)
{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3; /* gfx_v6_ring_emit_cntxcntl */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (regno << SQ_IND_INDEX__INDEX__SHIFT) |
+ (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK) |
+ (SQ_IND_INDEX__AUTO_INCR_MASK));
+ while (num--)
+ *(out++) = RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t start,
+ uint32_t size, uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, 0,
+ start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
+ .read_wave_data = &gfx_v6_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
};
static int gfx_v6_0_early_init(void *handle)
@@ -2896,9 +3327,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2920,9 +3349,7 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -2998,7 +3425,7 @@ static bool gfx_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+ if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
else
return true;
@@ -3029,14 +3456,14 @@ static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
- cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
- cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
break;
@@ -3051,27 +3478,27 @@ static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
switch (state){
case AMDGPU_IRQ_STATE_DISABLE:
if (ring == 0) {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
- cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING1);
+ cp_int_cntl &= ~CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING1, cp_int_cntl);
break;
} else {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
- cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING2);
+ cp_int_cntl &= ~CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING2, cp_int_cntl);
break;
}
case AMDGPU_IRQ_STATE_ENABLE:
if (ring == 0) {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
- cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING1);
+ cp_int_cntl |= CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING1, cp_int_cntl);
break;
} else {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
- cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING2);
+ cp_int_cntl |= CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING2, cp_int_cntl);
break;
}
@@ -3092,14 +3519,14 @@ static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
break;
@@ -3117,14 +3544,14 @@ static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
break;
@@ -3164,7 +3591,7 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
break;
case 1:
case 2:
- amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+ amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id - 1]);
break;
default:
break;
@@ -3237,7 +3664,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
.late_init = NULL,
@@ -3255,10 +3682,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3269,15 +3706,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3287,8 +3731,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -3360,3 +3802,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index b9657e72b248..ced6fc42f688 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GFX_V6_0_H__
#define __GFX_V6_0_H__
-extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782..c4e14015ec5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask;
- int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+ int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -2105,6 +2105,18 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
+static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
+ EVENT_INDEX(4));
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
+ EVENT_INDEX(0));
+}
+
+
/**
* gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
*
@@ -2260,6 +2272,7 @@ static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ gfx_v7_0_ring_emit_vgt_flush(ring);
/* set load_global_config & load_global_uconfig */
dw2 |= 0x8001;
/* set load_cs_sh_regs */
@@ -2286,7 +2299,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -2312,7 +2325,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -2333,7 +2346,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -3222,7 +3235,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
*/
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -3262,7 +3275,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3391,7 +3404,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.save_restore_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.save_restore_obj);
if (r) {
@@ -3435,7 +3449,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -3475,7 +3490,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -4354,44 +4370,69 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- return
- 4; /* gfx_v7_0_ring_emit_ib_gfx */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t regno, uint32_t num, uint32_t *out)
{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3; /* gfx_v7_ring_emit_cntxcntl */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (regno << SQ_IND_INDEX__INDEX__SHIFT) |
+ (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK) |
+ (SQ_IND_INDEX__AUTO_INCR_MASK));
+ while (num--)
+ *(out++) = RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
- return
- 4; /* gfx_v7_0_ring_emit_ib_compute */
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
}
-static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t start,
+ uint32_t size, uint32_t *dst)
{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ wave_read_regs(
+ adev, simd, wave, 0,
+ start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
+ .read_wave_data = &gfx_v7_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
@@ -4643,9 +4684,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -4670,9 +4709,7 @@ static int gfx_v7_0_sw_init(void *handle)
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -5123,7 +5160,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init,
@@ -5141,10 +5178,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5157,15 +5205,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5177,8 +5233,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5289,3 +5343,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index 94e3ea147c26..2f5164cc0e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -24,6 +24,9 @@
#ifndef __GFX_V7_0_H__
#define __GFX_V7_0_H__
-extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bb97182dc749..d0ec00986f38 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "vi.h"
+#include "vi_structs.h"
#include "vid.h"
#include "amdgpu_ucode.h"
#include "amdgpu_atombios.h"
@@ -167,6 +168,7 @@ static const u32 golden_settings_tonga_a11[] =
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -797,7 +799,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -823,7 +825,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -843,7 +845,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1057,6 +1059,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ /* we need account JT in */
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+ if (amdgpu_sriov_vf(adev)) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+ info->fw = adev->gfx.mec_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+ }
+
if (adev->gfx.mec2_fw) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
@@ -1126,34 +1141,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_TONGA:
- case CHIP_POLARIS10:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x0000002A);
- break;
- case CHIP_POLARIS11:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_FIJI:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- buffer[count++] = cpu_to_le32(0x00000002);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_STONEY:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -1272,7 +1261,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -1314,7 +1304,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -1382,7 +1373,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
if (adev->gfx.mec.hpd_eop_obj == NULL) {
r = amdgpu_bo_create(adev,
- adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
+ adev->gfx.mec.num_queue * MEC_HPD_SIZE,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->gfx.mec.hpd_eop_obj);
@@ -1411,7 +1402,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
+ memset(hpd, 0, adev->gfx.mec.num_queue * MEC_HPD_SIZE);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
@@ -1574,7 +1565,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r, i;
u32 tmp;
unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1707,7 +1698,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* wait for the GPU to finish processing the IB */
- r = fence_wait(f, false);
+ r = dma_fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto fail;
@@ -1728,7 +1719,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
fail:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
return r;
}
@@ -2044,10 +2035,8 @@ static int gfx_v8_0_sw_init(void *handle)
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
}
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2071,10 +2060,8 @@ static int gfx_v8_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ irq_type);
if (r)
return r;
}
@@ -3678,6 +3665,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
num_rb_pipes);
}
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -3904,7 +3906,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
int list_size;
unsigned int *register_list_format =
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
- if (register_list_format == NULL)
+ if (!register_list_format)
return -ENOMEM;
memcpy(register_list_format, adev->gfx.rlc.register_list_format,
adev->gfx.rlc.reg_list_format_size_bytes);
@@ -3947,8 +3949,12 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
- amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
- amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
+ if (unique_indices[i] != 0) {
+ amdgpu_mm_wreg(adev, temp + i,
+ unique_indices[i] & 0x3FFFF, false);
+ amdgpu_mm_wreg(adev, data + i,
+ unique_indices[i] >> 20, false);
+ }
}
kfree(register_list_format);
@@ -3964,20 +3970,17 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
- if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG)) {
- WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
+ WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
- data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
- WREG32(mmRLC_PG_DELAY, data);
+ data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+ WREG32(mmRLC_PG_DELAY, data);
+
+ WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
- WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
- WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
- }
}
static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
@@ -3994,41 +3997,37 @@ static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
{
- WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
+ WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
}
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
- if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG |
- AMD_PG_SUPPORT_CP |
- AMD_PG_SUPPORT_GDS |
- AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
-
- if ((adev->asic_type == CHIP_CARRIZO) ||
- (adev->asic_type == CHIP_STONEY)) {
- WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
- gfx_v8_0_init_power_gating(adev);
- WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
- if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
- cz_enable_sck_slow_down_on_power_up(adev, true);
- cz_enable_sck_slow_down_on_power_down(adev, true);
- } else {
- cz_enable_sck_slow_down_on_power_up(adev, false);
- cz_enable_sck_slow_down_on_power_down(adev, false);
- }
- if (adev->pg_flags & AMD_PG_SUPPORT_CP)
- cz_enable_cp_power_gating(adev, true);
- else
- cz_enable_cp_power_gating(adev, false);
- } else if (adev->asic_type == CHIP_POLARIS11) {
- gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+ } else if (adev->asic_type == CHIP_POLARIS11) {
+ gfx_v8_0_init_csb(adev);
+ gfx_v8_0_init_save_restore_list(adev);
+ gfx_v8_0_enable_save_restore_machine(adev);
+ gfx_v8_0_init_power_gating(adev);
}
+
}
static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
@@ -4330,7 +4329,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
- u64 rb_addr, rptr_addr;
+ u64 rb_addr, rptr_addr, wptr_gpu_addr;
int r;
/* Set the write pointer delay */
@@ -4361,6 +4360,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32(mmCP_RB0_CNTL, tmp);
@@ -4466,267 +4468,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
return 0;
}
-struct vi_mqd {
- uint32_t header; /* ordinal0 */
- uint32_t compute_dispatch_initiator; /* ordinal1 */
- uint32_t compute_dim_x; /* ordinal2 */
- uint32_t compute_dim_y; /* ordinal3 */
- uint32_t compute_dim_z; /* ordinal4 */
- uint32_t compute_start_x; /* ordinal5 */
- uint32_t compute_start_y; /* ordinal6 */
- uint32_t compute_start_z; /* ordinal7 */
- uint32_t compute_num_thread_x; /* ordinal8 */
- uint32_t compute_num_thread_y; /* ordinal9 */
- uint32_t compute_num_thread_z; /* ordinal10 */
- uint32_t compute_pipelinestat_enable; /* ordinal11 */
- uint32_t compute_perfcount_enable; /* ordinal12 */
- uint32_t compute_pgm_lo; /* ordinal13 */
- uint32_t compute_pgm_hi; /* ordinal14 */
- uint32_t compute_tba_lo; /* ordinal15 */
- uint32_t compute_tba_hi; /* ordinal16 */
- uint32_t compute_tma_lo; /* ordinal17 */
- uint32_t compute_tma_hi; /* ordinal18 */
- uint32_t compute_pgm_rsrc1; /* ordinal19 */
- uint32_t compute_pgm_rsrc2; /* ordinal20 */
- uint32_t compute_vmid; /* ordinal21 */
- uint32_t compute_resource_limits; /* ordinal22 */
- uint32_t compute_static_thread_mgmt_se0; /* ordinal23 */
- uint32_t compute_static_thread_mgmt_se1; /* ordinal24 */
- uint32_t compute_tmpring_size; /* ordinal25 */
- uint32_t compute_static_thread_mgmt_se2; /* ordinal26 */
- uint32_t compute_static_thread_mgmt_se3; /* ordinal27 */
- uint32_t compute_restart_x; /* ordinal28 */
- uint32_t compute_restart_y; /* ordinal29 */
- uint32_t compute_restart_z; /* ordinal30 */
- uint32_t compute_thread_trace_enable; /* ordinal31 */
- uint32_t compute_misc_reserved; /* ordinal32 */
- uint32_t compute_dispatch_id; /* ordinal33 */
- uint32_t compute_threadgroup_id; /* ordinal34 */
- uint32_t compute_relaunch; /* ordinal35 */
- uint32_t compute_wave_restore_addr_lo; /* ordinal36 */
- uint32_t compute_wave_restore_addr_hi; /* ordinal37 */
- uint32_t compute_wave_restore_control; /* ordinal38 */
- uint32_t reserved9; /* ordinal39 */
- uint32_t reserved10; /* ordinal40 */
- uint32_t reserved11; /* ordinal41 */
- uint32_t reserved12; /* ordinal42 */
- uint32_t reserved13; /* ordinal43 */
- uint32_t reserved14; /* ordinal44 */
- uint32_t reserved15; /* ordinal45 */
- uint32_t reserved16; /* ordinal46 */
- uint32_t reserved17; /* ordinal47 */
- uint32_t reserved18; /* ordinal48 */
- uint32_t reserved19; /* ordinal49 */
- uint32_t reserved20; /* ordinal50 */
- uint32_t reserved21; /* ordinal51 */
- uint32_t reserved22; /* ordinal52 */
- uint32_t reserved23; /* ordinal53 */
- uint32_t reserved24; /* ordinal54 */
- uint32_t reserved25; /* ordinal55 */
- uint32_t reserved26; /* ordinal56 */
- uint32_t reserved27; /* ordinal57 */
- uint32_t reserved28; /* ordinal58 */
- uint32_t reserved29; /* ordinal59 */
- uint32_t reserved30; /* ordinal60 */
- uint32_t reserved31; /* ordinal61 */
- uint32_t reserved32; /* ordinal62 */
- uint32_t reserved33; /* ordinal63 */
- uint32_t reserved34; /* ordinal64 */
- uint32_t compute_user_data_0; /* ordinal65 */
- uint32_t compute_user_data_1; /* ordinal66 */
- uint32_t compute_user_data_2; /* ordinal67 */
- uint32_t compute_user_data_3; /* ordinal68 */
- uint32_t compute_user_data_4; /* ordinal69 */
- uint32_t compute_user_data_5; /* ordinal70 */
- uint32_t compute_user_data_6; /* ordinal71 */
- uint32_t compute_user_data_7; /* ordinal72 */
- uint32_t compute_user_data_8; /* ordinal73 */
- uint32_t compute_user_data_9; /* ordinal74 */
- uint32_t compute_user_data_10; /* ordinal75 */
- uint32_t compute_user_data_11; /* ordinal76 */
- uint32_t compute_user_data_12; /* ordinal77 */
- uint32_t compute_user_data_13; /* ordinal78 */
- uint32_t compute_user_data_14; /* ordinal79 */
- uint32_t compute_user_data_15; /* ordinal80 */
- uint32_t cp_compute_csinvoc_count_lo; /* ordinal81 */
- uint32_t cp_compute_csinvoc_count_hi; /* ordinal82 */
- uint32_t reserved35; /* ordinal83 */
- uint32_t reserved36; /* ordinal84 */
- uint32_t reserved37; /* ordinal85 */
- uint32_t cp_mqd_query_time_lo; /* ordinal86 */
- uint32_t cp_mqd_query_time_hi; /* ordinal87 */
- uint32_t cp_mqd_connect_start_time_lo; /* ordinal88 */
- uint32_t cp_mqd_connect_start_time_hi; /* ordinal89 */
- uint32_t cp_mqd_connect_end_time_lo; /* ordinal90 */
- uint32_t cp_mqd_connect_end_time_hi; /* ordinal91 */
- uint32_t cp_mqd_connect_end_wf_count; /* ordinal92 */
- uint32_t cp_mqd_connect_end_pq_rptr; /* ordinal93 */
- uint32_t cp_mqd_connect_end_pq_wptr; /* ordinal94 */
- uint32_t cp_mqd_connect_end_ib_rptr; /* ordinal95 */
- uint32_t reserved38; /* ordinal96 */
- uint32_t reserved39; /* ordinal97 */
- uint32_t cp_mqd_save_start_time_lo; /* ordinal98 */
- uint32_t cp_mqd_save_start_time_hi; /* ordinal99 */
- uint32_t cp_mqd_save_end_time_lo; /* ordinal100 */
- uint32_t cp_mqd_save_end_time_hi; /* ordinal101 */
- uint32_t cp_mqd_restore_start_time_lo; /* ordinal102 */
- uint32_t cp_mqd_restore_start_time_hi; /* ordinal103 */
- uint32_t cp_mqd_restore_end_time_lo; /* ordinal104 */
- uint32_t cp_mqd_restore_end_time_hi; /* ordinal105 */
- uint32_t reserved40; /* ordinal106 */
- uint32_t reserved41; /* ordinal107 */
- uint32_t gds_cs_ctxsw_cnt0; /* ordinal108 */
- uint32_t gds_cs_ctxsw_cnt1; /* ordinal109 */
- uint32_t gds_cs_ctxsw_cnt2; /* ordinal110 */
- uint32_t gds_cs_ctxsw_cnt3; /* ordinal111 */
- uint32_t reserved42; /* ordinal112 */
- uint32_t reserved43; /* ordinal113 */
- uint32_t cp_pq_exe_status_lo; /* ordinal114 */
- uint32_t cp_pq_exe_status_hi; /* ordinal115 */
- uint32_t cp_packet_id_lo; /* ordinal116 */
- uint32_t cp_packet_id_hi; /* ordinal117 */
- uint32_t cp_packet_exe_status_lo; /* ordinal118 */
- uint32_t cp_packet_exe_status_hi; /* ordinal119 */
- uint32_t gds_save_base_addr_lo; /* ordinal120 */
- uint32_t gds_save_base_addr_hi; /* ordinal121 */
- uint32_t gds_save_mask_lo; /* ordinal122 */
- uint32_t gds_save_mask_hi; /* ordinal123 */
- uint32_t ctx_save_base_addr_lo; /* ordinal124 */
- uint32_t ctx_save_base_addr_hi; /* ordinal125 */
- uint32_t reserved44; /* ordinal126 */
- uint32_t reserved45; /* ordinal127 */
- uint32_t cp_mqd_base_addr_lo; /* ordinal128 */
- uint32_t cp_mqd_base_addr_hi; /* ordinal129 */
- uint32_t cp_hqd_active; /* ordinal130 */
- uint32_t cp_hqd_vmid; /* ordinal131 */
- uint32_t cp_hqd_persistent_state; /* ordinal132 */
- uint32_t cp_hqd_pipe_priority; /* ordinal133 */
- uint32_t cp_hqd_queue_priority; /* ordinal134 */
- uint32_t cp_hqd_quantum; /* ordinal135 */
- uint32_t cp_hqd_pq_base_lo; /* ordinal136 */
- uint32_t cp_hqd_pq_base_hi; /* ordinal137 */
- uint32_t cp_hqd_pq_rptr; /* ordinal138 */
- uint32_t cp_hqd_pq_rptr_report_addr_lo; /* ordinal139 */
- uint32_t cp_hqd_pq_rptr_report_addr_hi; /* ordinal140 */
- uint32_t cp_hqd_pq_wptr_poll_addr; /* ordinal141 */
- uint32_t cp_hqd_pq_wptr_poll_addr_hi; /* ordinal142 */
- uint32_t cp_hqd_pq_doorbell_control; /* ordinal143 */
- uint32_t cp_hqd_pq_wptr; /* ordinal144 */
- uint32_t cp_hqd_pq_control; /* ordinal145 */
- uint32_t cp_hqd_ib_base_addr_lo; /* ordinal146 */
- uint32_t cp_hqd_ib_base_addr_hi; /* ordinal147 */
- uint32_t cp_hqd_ib_rptr; /* ordinal148 */
- uint32_t cp_hqd_ib_control; /* ordinal149 */
- uint32_t cp_hqd_iq_timer; /* ordinal150 */
- uint32_t cp_hqd_iq_rptr; /* ordinal151 */
- uint32_t cp_hqd_dequeue_request; /* ordinal152 */
- uint32_t cp_hqd_dma_offload; /* ordinal153 */
- uint32_t cp_hqd_sema_cmd; /* ordinal154 */
- uint32_t cp_hqd_msg_type; /* ordinal155 */
- uint32_t cp_hqd_atomic0_preop_lo; /* ordinal156 */
- uint32_t cp_hqd_atomic0_preop_hi; /* ordinal157 */
- uint32_t cp_hqd_atomic1_preop_lo; /* ordinal158 */
- uint32_t cp_hqd_atomic1_preop_hi; /* ordinal159 */
- uint32_t cp_hqd_hq_status0; /* ordinal160 */
- uint32_t cp_hqd_hq_control0; /* ordinal161 */
- uint32_t cp_mqd_control; /* ordinal162 */
- uint32_t cp_hqd_hq_status1; /* ordinal163 */
- uint32_t cp_hqd_hq_control1; /* ordinal164 */
- uint32_t cp_hqd_eop_base_addr_lo; /* ordinal165 */
- uint32_t cp_hqd_eop_base_addr_hi; /* ordinal166 */
- uint32_t cp_hqd_eop_control; /* ordinal167 */
- uint32_t cp_hqd_eop_rptr; /* ordinal168 */
- uint32_t cp_hqd_eop_wptr; /* ordinal169 */
- uint32_t cp_hqd_eop_done_events; /* ordinal170 */
- uint32_t cp_hqd_ctx_save_base_addr_lo; /* ordinal171 */
- uint32_t cp_hqd_ctx_save_base_addr_hi; /* ordinal172 */
- uint32_t cp_hqd_ctx_save_control; /* ordinal173 */
- uint32_t cp_hqd_cntl_stack_offset; /* ordinal174 */
- uint32_t cp_hqd_cntl_stack_size; /* ordinal175 */
- uint32_t cp_hqd_wg_state_offset; /* ordinal176 */
- uint32_t cp_hqd_ctx_save_size; /* ordinal177 */
- uint32_t cp_hqd_gds_resource_state; /* ordinal178 */
- uint32_t cp_hqd_error; /* ordinal179 */
- uint32_t cp_hqd_eop_wptr_mem; /* ordinal180 */
- uint32_t cp_hqd_eop_dones; /* ordinal181 */
- uint32_t reserved46; /* ordinal182 */
- uint32_t reserved47; /* ordinal183 */
- uint32_t reserved48; /* ordinal184 */
- uint32_t reserved49; /* ordinal185 */
- uint32_t reserved50; /* ordinal186 */
- uint32_t reserved51; /* ordinal187 */
- uint32_t reserved52; /* ordinal188 */
- uint32_t reserved53; /* ordinal189 */
- uint32_t reserved54; /* ordinal190 */
- uint32_t reserved55; /* ordinal191 */
- uint32_t iqtimer_pkt_header; /* ordinal192 */
- uint32_t iqtimer_pkt_dw0; /* ordinal193 */
- uint32_t iqtimer_pkt_dw1; /* ordinal194 */
- uint32_t iqtimer_pkt_dw2; /* ordinal195 */
- uint32_t iqtimer_pkt_dw3; /* ordinal196 */
- uint32_t iqtimer_pkt_dw4; /* ordinal197 */
- uint32_t iqtimer_pkt_dw5; /* ordinal198 */
- uint32_t iqtimer_pkt_dw6; /* ordinal199 */
- uint32_t iqtimer_pkt_dw7; /* ordinal200 */
- uint32_t iqtimer_pkt_dw8; /* ordinal201 */
- uint32_t iqtimer_pkt_dw9; /* ordinal202 */
- uint32_t iqtimer_pkt_dw10; /* ordinal203 */
- uint32_t iqtimer_pkt_dw11; /* ordinal204 */
- uint32_t iqtimer_pkt_dw12; /* ordinal205 */
- uint32_t iqtimer_pkt_dw13; /* ordinal206 */
- uint32_t iqtimer_pkt_dw14; /* ordinal207 */
- uint32_t iqtimer_pkt_dw15; /* ordinal208 */
- uint32_t iqtimer_pkt_dw16; /* ordinal209 */
- uint32_t iqtimer_pkt_dw17; /* ordinal210 */
- uint32_t iqtimer_pkt_dw18; /* ordinal211 */
- uint32_t iqtimer_pkt_dw19; /* ordinal212 */
- uint32_t iqtimer_pkt_dw20; /* ordinal213 */
- uint32_t iqtimer_pkt_dw21; /* ordinal214 */
- uint32_t iqtimer_pkt_dw22; /* ordinal215 */
- uint32_t iqtimer_pkt_dw23; /* ordinal216 */
- uint32_t iqtimer_pkt_dw24; /* ordinal217 */
- uint32_t iqtimer_pkt_dw25; /* ordinal218 */
- uint32_t iqtimer_pkt_dw26; /* ordinal219 */
- uint32_t iqtimer_pkt_dw27; /* ordinal220 */
- uint32_t iqtimer_pkt_dw28; /* ordinal221 */
- uint32_t iqtimer_pkt_dw29; /* ordinal222 */
- uint32_t iqtimer_pkt_dw30; /* ordinal223 */
- uint32_t iqtimer_pkt_dw31; /* ordinal224 */
- uint32_t reserved56; /* ordinal225 */
- uint32_t reserved57; /* ordinal226 */
- uint32_t reserved58; /* ordinal227 */
- uint32_t set_resources_header; /* ordinal228 */
- uint32_t set_resources_dw1; /* ordinal229 */
- uint32_t set_resources_dw2; /* ordinal230 */
- uint32_t set_resources_dw3; /* ordinal231 */
- uint32_t set_resources_dw4; /* ordinal232 */
- uint32_t set_resources_dw5; /* ordinal233 */
- uint32_t set_resources_dw6; /* ordinal234 */
- uint32_t set_resources_dw7; /* ordinal235 */
- uint32_t reserved59; /* ordinal236 */
- uint32_t reserved60; /* ordinal237 */
- uint32_t reserved61; /* ordinal238 */
- uint32_t reserved62; /* ordinal239 */
- uint32_t reserved63; /* ordinal240 */
- uint32_t reserved64; /* ordinal241 */
- uint32_t reserved65; /* ordinal242 */
- uint32_t reserved66; /* ordinal243 */
- uint32_t reserved67; /* ordinal244 */
- uint32_t reserved68; /* ordinal245 */
- uint32_t reserved69; /* ordinal246 */
- uint32_t reserved70; /* ordinal247 */
- uint32_t reserved71; /* ordinal248 */
- uint32_t reserved72; /* ordinal249 */
- uint32_t reserved73; /* ordinal250 */
- uint32_t reserved74; /* ordinal251 */
- uint32_t reserved75; /* ordinal252 */
- uint32_t reserved76; /* ordinal253 */
- uint32_t reserved77; /* ordinal254 */
- uint32_t reserved78; /* ordinal255 */
-
- uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */
-};
-
static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev)
{
int i, r;
@@ -4760,34 +4501,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
u32 *buf;
struct vi_mqd *mqd;
- /* init the pipes */
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
- int me = (i < 4) ? 1 : 2;
- int pipe = (i < 4) ? i : (i - 4);
-
- eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
- eop_gpu_addr >>= 8;
-
- vi_srbm_select(adev, me, pipe, 0, 0);
-
- /* write the EOP addr */
- WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
- WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
-
- /* set the VMID assigned */
- WREG32(mmCP_HQD_VMID, 0);
-
- /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32(mmCP_HQD_EOP_CONTROL);
- tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
- (order_base_2(MEC_HPD_SIZE / 4) - 1));
- WREG32(mmCP_HQD_EOP_CONTROL, tmp);
- }
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-
- /* init the queues. Just two for now. */
+ /* init the queues. */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
@@ -4839,6 +4553,22 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
ring->pipe,
ring->queue, 0);
+ eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
+ eop_gpu_addr >>= 8;
+
+ /* write the EOP addr */
+ WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
+ WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
+
+ /* set the VMID assigned */
+ WREG32(mmCP_HQD_VMID, 0);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32(mmCP_HQD_EOP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+ (order_base_2(MEC_HPD_SIZE / 4) - 1));
+ WREG32(mmCP_HQD_EOP_CONTROL, tmp);
+
/* disable wptr polling */
tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
@@ -4922,9 +4652,9 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr);
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo);
WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
mqd->cp_hqd_pq_wptr_poll_addr_hi);
@@ -5095,6 +4825,10 @@ static int gfx_v8_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ if (amdgpu_sriov_vf(adev)) {
+ pr_debug("For SRIOV client, shouldn't do anything.\n");
+ return 0;
+ }
gfx_v8_0_cp_enable(adev, false);
gfx_v8_0_rlc_stop(adev);
gfx_v8_0_cp_compute_fini(adev);
@@ -5437,9 +5171,70 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32(mmSQ_IND_DATA);
+}
+
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t regno, uint32_t num, uint32_t *out)
+{
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (regno << SQ_IND_INDEX__INDEX__SHIFT) |
+ (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK) |
+ (SQ_IND_INDEX__AUTO_INCR_MASK));
+ while (num--)
+ *(out++) = RREG32(mmSQ_IND_DATA);
+}
+
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+{
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t start,
+ uint32_t size, uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, 0,
+ start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
+}
+
+
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v8_0_select_se_sh,
+ .read_wave_data = &gfx_v8_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
};
static int gfx_v8_0_early_init(void *handle)
@@ -5541,14 +5336,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
- return 0;
-
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
- if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
- cz_update_gfx_cg_power_gating(adev, enable);
+
+ cz_update_gfx_cg_power_gating(adev, enable);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
@@ -5891,29 +5683,24 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
- /* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
- * Cmp_busy/GFX_Idle interrupts
- */
- gfx_v8_0_enable_gui_idle_interrupt(adev, true);
-
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
if (temp1 != data1)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
- /* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
+ /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
- /* 3 - clear cgcg override */
+ /* 2 - clear cgcg override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
- /* 4 - write cmd to set CGLS */
+ /* 3 - write cmd to set CGLS */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
- /* 5 - enable cgcg */
+ /* 4 - enable cgcg */
data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -5931,6 +5718,11 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (temp != data)
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
+
+ /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
+ * Cmp_busy/GFX_Idle interrupts
+ */
+ gfx_v8_0_enable_gui_idle_interrupt(adev, true);
} else {
/* disable cntx_empty_int_enable & GFX Idle interrupt */
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
@@ -5993,25 +5785,49 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
@@ -6019,43 +5835,98 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_3D,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_3D,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_RLC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_RLC,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
@@ -6119,7 +5990,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6147,6 +6018,18 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
+static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
+ EVENT_INDEX(4));
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
+ EVENT_INDEX(0));
+}
+
+
static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -6221,7 +6104,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -6239,11 +6122,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-
- /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
- if (usepfp)
- amdgpu_ring_insert_nop(ring, 128);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6336,6 +6215,7 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ gfx_v8_0_ring_emit_vgt_flush(ring);
/* set load_global_config & load_global_uconfig */
dw2 |= 0x8001;
/* set load_cs_sh_regs */
@@ -6359,42 +6239,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_gfx */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
- 2 + /* gfx_v8_ring_emit_sb */
- 3; /* gfx_v8_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-}
-
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -6540,7 +6384,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init,
@@ -6561,10 +6405,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+ 2 + /* gfx_v8_ring_emit_sb */
+ 3 + 4, /* gfx_v8_ring_emit_cntxcntl including vgt flush */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6578,15 +6434,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v8_ring_emit_sb,
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6598,8 +6462,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6752,3 +6614,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index ebed1f829297..788cc3ab584b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -24,6 +24,7 @@
#ifndef __GFX_V8_0_H__
#define __GFX_V8_0_H__
-extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b13c8aaec078..45a573e63d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1,4 +1,3 @@
-
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
@@ -26,7 +25,16 @@
#include "amdgpu.h"
#include "gmc_v6_0.h"
#include "amdgpu_ucode.h"
-#include "si/sid.h"
+
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "si_enums.h"
static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -37,6 +45,16 @@ MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
+#define MC_SEQ_MISC0__MT__MASK 0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
+#define MC_SEQ_MISC0__MT__DDR2 0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
+#define MC_SEQ_MISC0__MT__HBM 0x60000000
+#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
+
+
static const u32 crtc_offsets[6] =
{
SI_CRTC0_REGISTER_OFFSET,
@@ -57,14 +75,14 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
gmc_v6_0_wait_for_idle((void *)adev);
- blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
- if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+ blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+ if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
/* Block CPU access */
- WREG32(BIF_FB_EN, 0);
+ WREG32(mmBIF_FB_EN, 0);
/* blackout the MC */
blackout = REG_SET_FIELD(blackout,
- mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
/* wait for the MC to settle */
udelay(100);
@@ -77,13 +95,13 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
u32 tmp;
/* unblackout the MC */
- tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
- tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
- WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+ tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
/* allow CPU access */
- tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
- tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
- WREG32(BIF_FB_EN, tmp);
+ tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
+ tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
+ WREG32(mmBIF_FB_EN, tmp);
if (adev->mode_info.num_crtc)
amdgpu_display_resume_mc_access(adev, save);
@@ -158,37 +176,37 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
new_fw_data = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+ running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
if (running == 0) {
/* reset the engine and set to writable */
- WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
- WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
- WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
}
/* load the MC ucode */
for (i = 0; i < ucode_size; i++) {
- WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
}
/* put the engine back into the active state */
- WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
- WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
- WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
/* wait for training to complete */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+ if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
break;
udelay(1);
}
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+ if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
break;
udelay(1);
}
@@ -225,7 +243,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32((0xb08 + j), 0x00000000);
WREG32((0xb09 + j), 0x00000000);
}
- WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+ WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
gmc_v6_0_mc_stop(adev, &save);
@@ -233,24 +251,24 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
/* Update configuration */
- WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
- WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->mc.vram_end >> 12);
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+ WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(mmMC_VM_FB_LOCATION, tmp);
/* XXX double check these! */
- WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
- WREG32(MC_VM_AGP_BASE, 0);
- WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
- WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ WREG32(mmMC_VM_AGP_BASE, 0);
+ WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v6_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
@@ -265,16 +283,16 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
u32 tmp;
int chansize, numchan;
- tmp = RREG32(MC_ARB_RAMCFG);
- if (tmp & CHANSIZE_OVERRIDE) {
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (tmp & (1 << 11)) {
chansize = 16;
- } else if (tmp & CHANSIZE_MASK) {
+ } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
chansize = 64;
} else {
chansize = 32;
}
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0:
default:
numchan = 1;
@@ -309,15 +327,15 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
- adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.visible_vram_size = adev->mc.aper_size;
/* unless the user had overridden it, set the gart
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -329,9 +347,9 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid)
{
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
- WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
@@ -355,20 +373,20 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
{
u32 tmp;
- tmp = RREG32(VM_CONTEXT1_CNTL);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- WREG32(VM_CONTEXT1_CNTL, tmp);
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
}
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
@@ -383,33 +401,39 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL,
+ WREG32(mmMC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
- ENABLE_L1_TLB |
- ENABLE_L1_FRAGMENT_PROCESSING |
- SYSTEM_ACCESS_MODE_NOT_IN_SYS |
- ENABLE_ADVANCED_DRIVER_MODEL |
- SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
+ MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
+ MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
+ MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
+ (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
/* Setup L2 cache */
- WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
- ENABLE_L2_FRAGMENT_PROCESSING |
- ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
- ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
- EFFECTIVE_L2_QUEUE_SIZE(7) |
- CONTEXT1_IDENTITY_ACCESS_MODE(1));
- WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
- WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- BANK_SELECT(4) |
- L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+ WREG32(mmVM_L2_CNTL,
+ VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
+ VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
+ VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
+ (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
+ WREG32(mmVM_L2_CNTL2,
+ VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
+ VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
+ WREG32(mmVM_L2_CNTL3,
+ VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
+ (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
+ (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
- WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+ WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT0_CNTL2, 0);
- WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+ WREG32(mmVM_CONTEXT0_CNTL2, 0);
+ WREG32(mmVM_CONTEXT0_CNTL,
+ VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
+ (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
+ VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
WREG32(0x575, 0);
WREG32(0x576, 0);
@@ -417,39 +441,41 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
/* empty context1-15 */
/* set vm size, must be a multiple of 4 */
- WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+ WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+ WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < 16; i++) {
if (i < 8)
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
adev->gart.table_addr >> 12);
else
- WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
adev->gart.table_addr >> 12);
}
/* enable context1-15 */
- WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 4);
- WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
- RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
- PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
- VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
- READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
- READ_PROTECTION_FAULT_ENABLE_DEFAULT |
- WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(mmVM_CONTEXT1_CNTL2, 4);
+ WREG32(mmVM_CONTEXT1_CNTL,
+ VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
+ (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
+ ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
+ VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -488,19 +514,22 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
}*/
/* Disable all tables */
- WREG32(VM_CONTEXT0_CNTL, 0);
- WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(mmVM_CONTEXT0_CNTL, 0);
+ WREG32(mmVM_CONTEXT1_CNTL, 0);
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
- SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ WREG32(mmMC_VM_MX_L1_TLB_CNTL,
+ MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
+ (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
/* Setup L2 cache */
- WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
- ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
- EFFECTIVE_L2_QUEUE_SIZE(7) |
- CONTEXT1_IDENTITY_ACCESS_MODE(1));
- WREG32(VM_L2_CNTL2, 0);
- WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ WREG32(mmVM_L2_CNTL,
+ VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
+ (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
+ WREG32(mmVM_L2_CNTL2, 0);
+ WREG32(mmVM_L2_CNTL3,
+ VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
+ (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
amdgpu_gart_table_vram_unpin(adev);
}
@@ -523,7 +552,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
- u64 tmp = RREG32(MC_VM_FB_OFFSET);
+ u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
tmp <<= 22;
adev->vm_manager.vram_base_offset = tmp;
} else
@@ -540,19 +569,19 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
u32 status, u32 addr, u32 mc_client)
{
u32 mc_id;
- u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
- u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
- xxPROTECTIONS);
+ u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+ u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
- mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
- xxMEMORY_CLIENT_ID);
+ mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
- REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
- xxMEMORY_CLIENT_RW) ?
+ REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id);
}
@@ -655,7 +684,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
{
u32 orig, data;
- orig = data = RREG32(HDP_HOST_PATH_CNTL);
+ orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
@@ -663,7 +692,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
if (orig != data)
- WREG32(HDP_HOST_PATH_CNTL, data);
+ WREG32(mmHDP_HOST_PATH_CNTL, data);
}
static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
@@ -671,7 +700,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
{
u32 orig, data;
- orig = data = RREG32(HDP_MEM_POWER_LS);
+ orig = data = RREG32(mmHDP_MEM_POWER_LS);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
@@ -679,7 +708,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
if (orig != data)
- WREG32(HDP_MEM_POWER_LS, data);
+ WREG32(mmHDP_MEM_POWER_LS, data);
}
*/
@@ -713,7 +742,7 @@ static int gmc_v6_0_early_init(void *handle)
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
- u32 tmp = RREG32(MC_SEQ_MISC0);
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
@@ -766,11 +795,6 @@ static int gmc_v6_0_sw_init(void *handle)
return r;
}
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
-
r = gmc_v6_0_mc_init(adev);
if (r)
return r;
@@ -879,7 +903,7 @@ static int gmc_v6_0_resume(void *handle)
static bool gmc_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
@@ -895,7 +919,7 @@ static int gmc_v6_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+ tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK |
SRBM_STATUS__MCD_BUSY_MASK |
@@ -913,17 +937,17 @@ static int gmc_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
- mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+ SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
if (!(adev->flags & AMD_IS_APU))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
- mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+ SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
@@ -933,17 +957,17 @@ static int gmc_v6_0_soft_reset(void *handle)
}
- tmp = RREG32(SRBM_SOFT_RESET);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
@@ -969,20 +993,20 @@ static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp &= ~bits;
- WREG32(VM_CONTEXT0_CNTL, tmp);
- tmp = RREG32(VM_CONTEXT1_CNTL);
+ WREG32(mmVM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp &= ~bits;
- WREG32(VM_CONTEXT1_CNTL, tmp);
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp |= bits;
- WREG32(VM_CONTEXT0_CNTL, tmp);
- tmp = RREG32(VM_CONTEXT1_CNTL);
+ WREG32(mmVM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp |= bits;
- WREG32(VM_CONTEXT1_CNTL, tmp);
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
default:
break;
@@ -997,9 +1021,9 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status;
- addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
- status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
@@ -1007,13 +1031,15 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v6_0_set_fault_enable_default(adev, false);
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ if (printk_ratelimit()) {
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ }
return 0;
}
@@ -1030,7 +1056,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.name = "gmc_v6_0",
.early_init = gmc_v6_0_early_init,
.late_init = gmc_v6_0_late_init,
@@ -1069,3 +1095,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 42c4fc676cd4..ed2f64dec47a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GMC_V6_0_H__
#define __GMC_V6_0_H__
-extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index aa0c4b964621..273b16fb9459 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -385,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -711,7 +711,7 @@ static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
@@ -945,11 +945,6 @@ static int gmc_v7_0_sw_init(void *handle)
return r;
}
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
-
r = gmc_v7_0_mc_init(adev);
if (r)
return r;
@@ -1198,13 +1193,15 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v7_0_set_fault_enable_default(adev, false);
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
+ if (printk_ratelimit()) {
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
+ }
return 0;
}
@@ -1235,7 +1232,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
@@ -1273,3 +1270,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 0b386b5d2f7a..ebce2966c1c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -24,6 +24,7 @@
#ifndef __GMC_V7_0_H__
#define __GMC_V7_0_H__
-extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index a16b2201d52c..0daac3a5be79 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -472,7 +472,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -837,7 +837,7 @@ static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
@@ -952,11 +952,6 @@ static int gmc_v8_0_sw_init(void *handle)
return r;
}
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
-
r = gmc_v8_0_mc_init(adev);
if (r)
return r;
@@ -1242,13 +1237,15 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v8_0_set_fault_enable_default(adev, false);
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
+ if (printk_ratelimit()) {
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
+ }
return 0;
}
@@ -1437,7 +1434,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
@@ -1478,3 +1475,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index fc5001a8119d..19b8a8aed204 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -24,6 +24,8 @@
#ifndef __GMC_V8_0_H__
#define __GMC_V8_0_H__
-extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 3b8906ce3511..ac21bb7bc0f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs iceland_ih_ip_funcs = {
+static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
.late_init = NULL,
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &iceland_ih_funcs;
}
+const struct amdgpu_ip_block_version iceland_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
index 57558cddfbcb..3235f4277548 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
@@ -24,6 +24,6 @@
#ifndef __ICELAND_IH_H__
#define __ICELAND_IH_H__
-extern const struct amd_ip_funcs iceland_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
#endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 71d2856222fa..5a1bc358bcb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -2845,7 +2845,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -3245,6 +3245,18 @@ static int kv_dpm_set_powergating_state(void *handle,
return 0;
}
+static int kv_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3275,6 +3287,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
+ .check_state_equal = kv_check_state_equal,
};
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -3293,3 +3307,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version kv_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3c7218..fbe74a33899c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -775,11 +775,11 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v2_4_ring_emit_ib */
-}
-
-static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v2_4_ring_emit_hdp_flush */
- 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
- 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
- 12 + /* sdma_v2_4_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.late_init = NULL,
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
+ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+ 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
- .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
index 07349f5ee10f..28b433729216 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
@@ -24,6 +24,6 @@
#ifndef __SDMA_V2_4_H__
#define __SDMA_V2_4_H__
-extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d10941fb53..1170a64a3184 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -977,11 +977,11 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v3_0_ring_emit_ib */
-}
-
-static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v3_0_ring_emit_hdp_flush */
- 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
- 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
- 12 + /* sdma_v3_0_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
.late_init = NULL,
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
+ 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+ 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
- .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
index 0cb9698a3054..7aa223d35f1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
@@ -24,6 +24,7 @@
#ifndef __SDMA_V3_0_H__
#define __SDMA_V3_0_H__
-extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c5ecb8..c46b0159007d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -39,16 +39,18 @@
#include "si_dma.h"
#include "dce_v6_0.h"
#include "si.h"
+#include "dce_virtual.h"
static const u32 tahiti_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x031e, 0x00000080, 0x00000000,
- 0x340c, 0x000300c0, 0x00800040,
- 0x360c, 0x000300c0, 0x00800040,
+ 0x340c, 0x000000c0, 0x00800040,
+ 0x360c, 0x000000c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0x00200000, 0x50100000,
0x1c0c, 0x31000311, 0x00000011,
@@ -59,7 +61,7 @@ static const u32 tahiti_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x2a00126a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x23a2, 0x01ff1f3f, 0x00000000,
@@ -72,7 +74,11 @@ static const u32 tahiti_golden_registers[] =
0x2234, 0xffffffff, 0x000fff40,
0x2235, 0x0000001f, 0x00000010,
0x0504, 0x20000000, 0x20fffed8,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 tahiti_golden_registers2[] =
@@ -82,16 +88,18 @@ static const u32 tahiti_golden_registers2[] =
static const u32 tahiti_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x12011003,
0x3109, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
0x30c5, 0xffffffff, 0x00000800,
0x30c3, 0xffffffff, 0x800000f4,
- 0x3d2a, 0xffffffff, 0x00000000
+ 0x3d2a, 0x00000008, 0x00000000
};
static const u32 pitcairn_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
@@ -109,7 +117,7 @@ static const u32 pitcairn_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x2a00126a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x2418, 0x0000007f, 0x00000020,
@@ -118,11 +126,16 @@ static const u32 pitcairn_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x32761054,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 pitcairn_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x12011003,
0x3109, 0xffffffff, 0x00601004,
0x311f, 0xffffffff, 0x10102020,
0x3122, 0xffffffff, 0x01000020,
@@ -132,133 +145,134 @@ static const u32 pitcairn_golden_rlc_registers[] =
static const u32 verde_pg_init[] =
{
- 0xd4f, 0xffffffff, 0x40000,
- 0xd4e, 0xffffffff, 0x200010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x7007,
- 0xd4e, 0xffffffff, 0x300010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x400000,
- 0xd4e, 0xffffffff, 0x100010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x120200,
- 0xd4e, 0xffffffff, 0x500010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x1e1e16,
- 0xd4e, 0xffffffff, 0x600010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x171f1e,
- 0xd4e, 0xffffffff, 0x700010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4e, 0xffffffff, 0x9ff,
- 0xd40, 0xffffffff, 0x0,
- 0xd41, 0xffffffff, 0x10000800,
- 0xd41, 0xffffffff, 0xf,
- 0xd41, 0xffffffff, 0xf,
- 0xd40, 0xffffffff, 0x4,
- 0xd41, 0xffffffff, 0x1000051e,
- 0xd41, 0xffffffff, 0xffff,
- 0xd41, 0xffffffff, 0xffff,
- 0xd40, 0xffffffff, 0x8,
- 0xd41, 0xffffffff, 0x80500,
- 0xd40, 0xffffffff, 0x12,
- 0xd41, 0xffffffff, 0x9050c,
- 0xd40, 0xffffffff, 0x1d,
- 0xd41, 0xffffffff, 0xb052c,
- 0xd40, 0xffffffff, 0x2a,
- 0xd41, 0xffffffff, 0x1053e,
- 0xd40, 0xffffffff, 0x2d,
- 0xd41, 0xffffffff, 0x10546,
- 0xd40, 0xffffffff, 0x30,
- 0xd41, 0xffffffff, 0xa054e,
- 0xd40, 0xffffffff, 0x3c,
- 0xd41, 0xffffffff, 0x1055f,
- 0xd40, 0xffffffff, 0x3f,
- 0xd41, 0xffffffff, 0x10567,
- 0xd40, 0xffffffff, 0x42,
- 0xd41, 0xffffffff, 0x1056f,
- 0xd40, 0xffffffff, 0x45,
- 0xd41, 0xffffffff, 0x10572,
- 0xd40, 0xffffffff, 0x48,
- 0xd41, 0xffffffff, 0x20575,
- 0xd40, 0xffffffff, 0x4c,
- 0xd41, 0xffffffff, 0x190801,
- 0xd40, 0xffffffff, 0x67,
- 0xd41, 0xffffffff, 0x1082a,
- 0xd40, 0xffffffff, 0x6a,
- 0xd41, 0xffffffff, 0x1b082d,
- 0xd40, 0xffffffff, 0x87,
- 0xd41, 0xffffffff, 0x310851,
- 0xd40, 0xffffffff, 0xba,
- 0xd41, 0xffffffff, 0x891,
- 0xd40, 0xffffffff, 0xbc,
- 0xd41, 0xffffffff, 0x893,
- 0xd40, 0xffffffff, 0xbe,
- 0xd41, 0xffffffff, 0x20895,
- 0xd40, 0xffffffff, 0xc2,
- 0xd41, 0xffffffff, 0x20899,
- 0xd40, 0xffffffff, 0xc6,
- 0xd41, 0xffffffff, 0x2089d,
- 0xd40, 0xffffffff, 0xca,
- 0xd41, 0xffffffff, 0x8a1,
- 0xd40, 0xffffffff, 0xcc,
- 0xd41, 0xffffffff, 0x8a3,
- 0xd40, 0xffffffff, 0xce,
- 0xd41, 0xffffffff, 0x308a5,
- 0xd40, 0xffffffff, 0xd3,
- 0xd41, 0xffffffff, 0x6d08cd,
- 0xd40, 0xffffffff, 0x142,
- 0xd41, 0xffffffff, 0x2000095a,
- 0xd41, 0xffffffff, 0x1,
- 0xd40, 0xffffffff, 0x144,
- 0xd41, 0xffffffff, 0x301f095b,
- 0xd40, 0xffffffff, 0x165,
- 0xd41, 0xffffffff, 0xc094d,
- 0xd40, 0xffffffff, 0x173,
- 0xd41, 0xffffffff, 0xf096d,
- 0xd40, 0xffffffff, 0x184,
- 0xd41, 0xffffffff, 0x15097f,
- 0xd40, 0xffffffff, 0x19b,
- 0xd41, 0xffffffff, 0xc0998,
- 0xd40, 0xffffffff, 0x1a9,
- 0xd41, 0xffffffff, 0x409a7,
- 0xd40, 0xffffffff, 0x1af,
- 0xd41, 0xffffffff, 0xcdc,
- 0xd40, 0xffffffff, 0x1b1,
- 0xd41, 0xffffffff, 0x800,
- 0xd42, 0xffffffff, 0x6c9b2000,
- 0xd44, 0xfc00, 0x2000,
- 0xd51, 0xffffffff, 0xfc0,
- 0xa35, 0x00000100, 0x100
+ 0x0d4f, 0xffffffff, 0x40000,
+ 0x0d4e, 0xffffffff, 0x200010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x7007,
+ 0x0d4e, 0xffffffff, 0x300010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x400000,
+ 0x0d4e, 0xffffffff, 0x100010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x120200,
+ 0x0d4e, 0xffffffff, 0x500010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x1e1e16,
+ 0x0d4e, 0xffffffff, 0x600010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x171f1e,
+ 0x0d4e, 0xffffffff, 0x700010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4e, 0xffffffff, 0x9ff,
+ 0x0d40, 0xffffffff, 0x0,
+ 0x0d41, 0xffffffff, 0x10000800,
+ 0x0d41, 0xffffffff, 0xf,
+ 0x0d41, 0xffffffff, 0xf,
+ 0x0d40, 0xffffffff, 0x4,
+ 0x0d41, 0xffffffff, 0x1000051e,
+ 0x0d41, 0xffffffff, 0xffff,
+ 0x0d41, 0xffffffff, 0xffff,
+ 0x0d40, 0xffffffff, 0x8,
+ 0x0d41, 0xffffffff, 0x80500,
+ 0x0d40, 0xffffffff, 0x12,
+ 0x0d41, 0xffffffff, 0x9050c,
+ 0x0d40, 0xffffffff, 0x1d,
+ 0x0d41, 0xffffffff, 0xb052c,
+ 0x0d40, 0xffffffff, 0x2a,
+ 0x0d41, 0xffffffff, 0x1053e,
+ 0x0d40, 0xffffffff, 0x2d,
+ 0x0d41, 0xffffffff, 0x10546,
+ 0x0d40, 0xffffffff, 0x30,
+ 0x0d41, 0xffffffff, 0xa054e,
+ 0x0d40, 0xffffffff, 0x3c,
+ 0x0d41, 0xffffffff, 0x1055f,
+ 0x0d40, 0xffffffff, 0x3f,
+ 0x0d41, 0xffffffff, 0x10567,
+ 0x0d40, 0xffffffff, 0x42,
+ 0x0d41, 0xffffffff, 0x1056f,
+ 0x0d40, 0xffffffff, 0x45,
+ 0x0d41, 0xffffffff, 0x10572,
+ 0x0d40, 0xffffffff, 0x48,
+ 0x0d41, 0xffffffff, 0x20575,
+ 0x0d40, 0xffffffff, 0x4c,
+ 0x0d41, 0xffffffff, 0x190801,
+ 0x0d40, 0xffffffff, 0x67,
+ 0x0d41, 0xffffffff, 0x1082a,
+ 0x0d40, 0xffffffff, 0x6a,
+ 0x0d41, 0xffffffff, 0x1b082d,
+ 0x0d40, 0xffffffff, 0x87,
+ 0x0d41, 0xffffffff, 0x310851,
+ 0x0d40, 0xffffffff, 0xba,
+ 0x0d41, 0xffffffff, 0x891,
+ 0x0d40, 0xffffffff, 0xbc,
+ 0x0d41, 0xffffffff, 0x893,
+ 0x0d40, 0xffffffff, 0xbe,
+ 0x0d41, 0xffffffff, 0x20895,
+ 0x0d40, 0xffffffff, 0xc2,
+ 0x0d41, 0xffffffff, 0x20899,
+ 0x0d40, 0xffffffff, 0xc6,
+ 0x0d41, 0xffffffff, 0x2089d,
+ 0x0d40, 0xffffffff, 0xca,
+ 0x0d41, 0xffffffff, 0x8a1,
+ 0x0d40, 0xffffffff, 0xcc,
+ 0x0d41, 0xffffffff, 0x8a3,
+ 0x0d40, 0xffffffff, 0xce,
+ 0x0d41, 0xffffffff, 0x308a5,
+ 0x0d40, 0xffffffff, 0xd3,
+ 0x0d41, 0xffffffff, 0x6d08cd,
+ 0x0d40, 0xffffffff, 0x142,
+ 0x0d41, 0xffffffff, 0x2000095a,
+ 0x0d41, 0xffffffff, 0x1,
+ 0x0d40, 0xffffffff, 0x144,
+ 0x0d41, 0xffffffff, 0x301f095b,
+ 0x0d40, 0xffffffff, 0x165,
+ 0x0d41, 0xffffffff, 0xc094d,
+ 0x0d40, 0xffffffff, 0x173,
+ 0x0d41, 0xffffffff, 0xf096d,
+ 0x0d40, 0xffffffff, 0x184,
+ 0x0d41, 0xffffffff, 0x15097f,
+ 0x0d40, 0xffffffff, 0x19b,
+ 0x0d41, 0xffffffff, 0xc0998,
+ 0x0d40, 0xffffffff, 0x1a9,
+ 0x0d41, 0xffffffff, 0x409a7,
+ 0x0d40, 0xffffffff, 0x1af,
+ 0x0d41, 0xffffffff, 0xcdc,
+ 0x0d40, 0xffffffff, 0x1b1,
+ 0x0d41, 0xffffffff, 0x800,
+ 0x0d42, 0xffffffff, 0x6c9b2000,
+ 0x0d44, 0xfc00, 0x2000,
+ 0x0d51, 0xffffffff, 0xfc0,
+ 0x0a35, 0x00000100, 0x100
};
static const u32 verde_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x02010002,
0x3109, 0xffffffff, 0x033f1005,
0x311f, 0xffffffff, 0x10808020,
0x3122, 0xffffffff, 0x00800008,
@@ -268,65 +282,45 @@ static const u32 verde_golden_rlc_registers[] =
static const u32 verde_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x031e, 0x00000080, 0x00000000,
0x340c, 0x000300c0, 0x00800040,
- 0x340c, 0x000300c0, 0x00800040,
- 0x360c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0x00200000, 0x50100000,
-
0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0903, 0x000007ff, 0x00000000,
0x0903, 0x000007ff, 0x00000000,
- 0x0903, 0x000007ff, 0x00000000,
- 0x2285, 0xf000001f, 0x00000007,
0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xffffffff, 0x00ffffff,
+ 0x22c9, 0xffffffff, 0x00ffffff,
0x22c4, 0x0000ff0f, 0x00000000,
-
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
- 0x2440, 0x07ffffff, 0x03000000,
- 0x23a2, 0x01ff1f3f, 0x00000000,
- 0x23a3, 0x01ff1f3f, 0x00000000,
0x23a2, 0x01ff1f3f, 0x00000000,
0x23a1, 0x01ff1f3f, 0x00000000,
- 0x23a1, 0x01ff1f3f, 0x00000000,
-
- 0x23a1, 0x01ff1f3f, 0x00000000,
0x2418, 0x0000007f, 0x00000020,
0x2542, 0x00010000, 0x00010000,
- 0x2b01, 0x000003ff, 0x00000003,
- 0x2b05, 0x000003ff, 0x00000003,
0x2b05, 0x000003ff, 0x00000003,
0x2b04, 0xffffffff, 0x00000000,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b03, 0xffffffff, 0x00001032,
0x2b03, 0xffffffff, 0x00001032,
- 0x2b03, 0xffffffff, 0x00001032,
- 0x2235, 0x0000001f, 0x00000010,
- 0x2235, 0x0000001f, 0x00000010,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 oland_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
@@ -335,7 +329,7 @@ static const u32 oland_golden_registers[] =
0x340c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
- 0x16f9, 0x00200000, 0x50100000,
+ 0x16f0, 0x00200000, 0x50100000,
0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
0x0903, 0x000007ff, 0x00000000,
@@ -344,7 +338,7 @@ static const u32 oland_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x00000082,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x2418, 0x0000007f, 0x00000020,
@@ -353,11 +347,16 @@ static const u32 oland_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x00003210,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 oland_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x02010002,
0x3109, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
@@ -367,22 +366,27 @@ static const u32 oland_golden_rlc_registers[] =
static const u32 hainan_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
- 0x4595, 0xff000fff, 0x00000100,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x3430, 0xff000fff, 0x00000100,
0x340c, 0x000300c0, 0x00800040,
0x3630, 0xff000fff, 0x00000100,
0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
0x0903, 0x000007ff, 0x00000000,
0x2285, 0xf000001f, 0x00000007,
0x22c9, 0xffffffff, 0x00ffffff,
0x22c4, 0x0000ff0f, 0x00000000,
- 0xa393, 0x07ffffff, 0x4e000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x00000000,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x03e00000, 0x03600000,
0x2418, 0x0000007f, 0x00000020,
@@ -391,12 +395,16 @@ static const u32 hainan_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x00003210,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 hainan_golden_registers2[] =
{
- 0x263e, 0xffffffff, 0x02010001
+ 0x263e, 0xffffffff, 0x2011003
};
static const u32 tahiti_mgcg_cgcg_init[] =
@@ -512,18 +520,18 @@ static const u32 tahiti_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -611,16 +619,16 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -708,18 +716,18 @@ static const u32 verde_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -787,18 +795,18 @@ static const u32 oland_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -866,15 +874,15 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -905,7 +913,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
@@ -918,7 +926,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
return r;
}
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
@@ -1178,6 +1186,8 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = (adev->rev_id == 0) ? 1 :
+ (adev->rev_id == 1) ? 5 : 6;
break;
case CHIP_PITCAIRN:
adev->cg_flags =
@@ -1197,6 +1207,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 20;
break;
case CHIP_VERDE:
@@ -1218,7 +1229,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
//???
- adev->external_rev_id = adev->rev_id + 0x14;
+ adev->external_rev_id = adev->rev_id + 40;
break;
case CHIP_OLAND:
adev->cg_flags =
@@ -1237,6 +1248,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = 60;
break;
case CHIP_HAINAN:
adev->cg_flags =
@@ -1254,6 +1266,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = 70;
break;
default:
@@ -1811,7 +1824,7 @@ static int si_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_common_ip_funcs = {
+static const struct amd_ip_funcs si_common_ip_funcs = {
.name = "si_common",
.early_init = si_common_early_init,
.late_init = NULL,
@@ -1828,119 +1841,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
.set_powergating_state = si_common_set_powergating_state,
};
-static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+static const struct amdgpu_ip_block_version si_common_ip_block =
{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
-/* {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- */
-};
-
-
-static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
-{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
};
int si_set_ip_blocks(struct amdgpu_device *adev)
@@ -1949,13 +1856,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ break;
case CHIP_OLAND:
- adev->ip_blocks = verde_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- adev->ip_blocks = hainan_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 959d7b63e0e5..589225080c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,8 +24,6 @@
#ifndef __SI_H__
#define __SI_H__
-extern const struct amd_ip_funcs si_common_ip_funcs;
-
void si_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de358193a8f9..3dd552ae0b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
-static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 3; /* si_dma_ring_emit_ib */
-}
-
-static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 3 + /* si_dma_ring_emit_hdp_flush */
- 3 + /* si_dma_ring_emit_hdp_invalidate */
- 6 + /* si_dma_ring_emit_pipeline_sync */
- 12 + /* si_dma_ring_emit_vm_flush */
- 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int si_dma_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_dma_ip_funcs = {
+static const struct amd_ip_funcs si_dma_ip_funcs = {
.name = "si_dma",
.early_init = si_dma_early_init,
.late_init = NULL,
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
.get_rptr = si_dma_ring_get_rptr,
.get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 3 + /* si_dma_ring_emit_hdp_flush */
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 6 + /* si_dma_ring_emit_pipeline_sync */
+ 12 + /* si_dma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
.emit_ib = si_dma_ring_emit_ib,
.emit_fence = si_dma_ring_emit_fence,
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
.test_ib = si_dma_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = si_dma_ring_pad_ib,
- .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
- .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
};
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version si_dma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
index 3a3e0c78a54b..5ac1b8452fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -24,6 +24,6 @@
#ifndef __SI_DMA_H__
#define __SI_DMA_H__
-extern const struct amd_ip_funcs si_dma_ip_funcs;
+extern const struct amdgpu_ip_block_version si_dma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index d6f85b1a0b93..6c65a1a2de79 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+ adev->pm.dpm.current_ps = &eg_pi->current_rps;
}
static void ni_update_requested_ps(struct amdgpu_device *adev,
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+ adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
}
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
@@ -3504,6 +3506,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605)) {
max_sclk = 75000;
@@ -7347,7 +7350,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -7713,6 +7716,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
(adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605))
chip_name = "oland_k";
@@ -7986,6 +7990,57 @@ static int si_dpm_early_init(void *handle)
return 0;
}
+static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
+ const struct rv7xx_pl *si_cpl2)
+{
+ return ((si_cpl1->mclk == si_cpl2->mclk) &&
+ (si_cpl1->sclk == si_cpl2->sclk) &&
+ (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
+ (si_cpl1->vddc == si_cpl2->vddc) &&
+ (si_cpl1->vddci == si_cpl2->vddci));
+}
+
+static int si_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct si_ps *si_cps;
+ struct si_ps *si_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ si_cps = si_get_ps(cps);
+ si_rps = si_get_ps(rps);
+
+ if (si_cps == NULL) {
+ printk("si_cps is NULL\n");
+ *equal = false;
+ return 0;
+ }
+
+ if (si_cps->performance_level_count != si_rps->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < si_cps->performance_level_count; i++) {
+ if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
+ &(si_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
@@ -8020,6 +8075,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+ .check_state_equal = &si_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -8039,3 +8096,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
}
+const struct amdgpu_ip_block_version si_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
new file mode 100644
index 000000000000..fde2086246fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SI_ENUMS_H
+#define SI_ENUMS_H
+
+#define VBLANK_INT_MASK (1 << 0)
+#define DC_HPDx_INT_EN (1 << 16)
+#define VBLANK_ACK (1 << 4)
+#define VLINE_ACK (1 << 4)
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+#define VGA_VSTATUS_CNTL 0xFFFCFFFF
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define INTERLEAVE_EN (1 << 0)
+
+#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
+#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+
+#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+#define GRPH_ENDIAN_NONE 0
+#define GRPH_ENDIAN_8IN16 1
+#define GRPH_ENDIAN_8IN32 2
+#define GRPH_ENDIAN_8IN64 3
+
+#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
+#define GRPH_DEPTH_8BPP 0
+#define GRPH_DEPTH_16BPP 1
+#define GRPH_DEPTH_32BPP 2
+
+#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
+#define GRPH_FORMAT_INDEXED 0
+#define GRPH_FORMAT_ARGB1555 0
+#define GRPH_FORMAT_ARGB565 1
+#define GRPH_FORMAT_ARGB4444 2
+#define GRPH_FORMAT_AI88 3
+#define GRPH_FORMAT_MONO16 4
+#define GRPH_FORMAT_BGRA5551 5
+#define GRPH_FORMAT_ARGB8888 0
+#define GRPH_FORMAT_ARGB2101010 1
+#define GRPH_FORMAT_32BPP_DIG 2
+#define GRPH_FORMAT_8B_ARGB2101010 3
+#define GRPH_FORMAT_BGRA1010102 4
+#define GRPH_FORMAT_8B_BGRA1010102 5
+#define GRPH_FORMAT_RGB111110 6
+#define GRPH_FORMAT_BGR101111 7
+
+#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+#define GRPH_ARRAY_LINEAR_GENERAL 0
+#define GRPH_ARRAY_LINEAR_ALIGNED 1
+#define GRPH_ARRAY_1D_TILED_THIN1 2
+#define GRPH_ARRAY_2D_TILED_THIN1 4
+#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+
+#define CURSOR_EN (1 << 0)
+#define CURSOR_MODE(x) (((x) & 0x3) << 8)
+#define CURSOR_MONO 0
+#define CURSOR_24_1 1
+#define CURSOR_24_8_PRE_MULT 2
+#define CURSOR_24_8_UNPRE_MULT 3
+#define CURSOR_2X_MAGNIFY (1 << 16)
+#define CURSOR_FORCE_MC_ON (1 << 20)
+#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+#define CURSOR_URGENT_ALWAYS 0
+#define CURSOR_URGENT_1_8 1
+#define CURSOR_URGENT_1_4 2
+#define CURSOR_URGENT_3_8 3
+#define CURSOR_URGENT_1_2 4
+#define CURSOR_UPDATE_PENDING (1 << 0)
+#define CURSOR_UPDATE_TAKEN (1 << 1)
+#define CURSOR_UPDATE_LOCK (1 << 16)
+#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+#define AMDGPU_NUM_OF_VMIDS 8
+#define SI_CRTC0_REGISTER_OFFSET 0
+#define SI_CRTC1_REGISTER_OFFSET 0x300
+#define SI_CRTC2_REGISTER_OFFSET 0x2600
+#define SI_CRTC3_REGISTER_OFFSET 0x2900
+#define SI_CRTC4_REGISTER_OFFSET 0x2c00
+#define SI_CRTC5_REGISTER_OFFSET 0x2f00
+
+#define DMA0_REGISTER_OFFSET 0x000
+#define DMA1_REGISTER_OFFSET 0x200
+#define ES_AND_GS_AUTO 3
+#define RADEON_PACKET_TYPE3 3
+#define CE_PARTITION_BASE 3
+#define BUF_SWAP_32BIT (2 << 16)
+
+#define GFX_POWER_STATUS (1 << 1)
+#define GFX_CLOCK_STATUS (1 << 2)
+#define GFX_LS_STATUS (1 << 3)
+#define RLC_BUSY_STATUS (1 << 0)
+
+#define RLC_PUD(x) ((x) << 0)
+#define RLC_PUD_MASK (0xff << 0)
+#define RLC_PDD(x) ((x) << 8)
+#define RLC_PDD_MASK (0xff << 8)
+#define RLC_TTPD(x) ((x) << 16)
+#define RLC_TTPD_MASK (0xff << 16)
+#define RLC_MSD(x) ((x) << 24)
+#define RLC_MSD_MASK (0xff << 24)
+#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
+#define WRITE_DATA_DST_SEL(x) ((x) << 8)
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_BASE_INDEX(x) ((x) << 0)
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_ALLOC_GDS 0x1B
+#define PACKET3_WRITE_GDS_RAM 0x1C
+#define PACKET3_ATOMIC_GDS 0x1D
+#define PACKET3_ATOMIC 0x1E
+#define PACKET3_OCCLUSION_QUERY 0x1F
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER_CONST 0x31
+#define PACKET3_INDIRECT_BUFFER 0x3F
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
+#define PACKET3_PFP_SYNC_ME 0x42
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_DEST_BASE_0_ENA (1 << 0)
+# define PACKET3_DEST_BASE_1_ENA (1 << 1)
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_DEST_BASE_2_ENA (1 << 19)
+# define PACKET3_DEST_BASE_3_ENA (1 << 21)
+# define PACKET3_TCL1_ACTION_ENA (1 << 22)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_LOAD_CONFIG_REG 0x5F
+#define PACKET3_LOAD_CONTEXT_REG 0x60
+#define PACKET3_LOAD_SH_REG 0x61
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00002000
+#define PACKET3_SET_CONFIG_REG_END 0x00002c00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x000a000
+#define PACKET3_SET_CONTEXT_REG_END 0x000a400
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_SH_REG 0x76
+#define PACKET3_SET_SH_REG_START 0x00002c00
+#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG_OFFSET 0x77
+#define PACKET3_ME_WRITE 0x7A
+#define PACKET3_SCRATCH_RAM_WRITE 0x7D
+#define PACKET3_SCRATCH_RAM_READ 0x7E
+#define PACKET3_CE_WRITE 0x7F
+#define PACKET3_LOAD_CONST_RAM 0x80
+#define PACKET3_WRITE_CONST_RAM 0x81
+#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
+#define PACKET3_DUMP_CONST_RAM 0x83
+#define PACKET3_INCREMENT_CE_COUNTER 0x84
+#define PACKET3_INCREMENT_DE_COUNTER 0x85
+#define PACKET3_WAIT_ON_CE_COUNTER 0x86
+#define PACKET3_WAIT_ON_DE_COUNTER 0x87
+#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
+#define PACKET3_SET_CE_DE_COUNTERS 0x89
+#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
+#define PACKET3_SWITCH_BUFFER 0x8B
+#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8fae3d4a2360..db0f36846661 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_ih_ip_funcs = {
+static const struct amd_ip_funcs si_ih_ip_funcs = {
.name = "si_ih",
.early_init = si_ih_early_init,
.late_init = NULL,
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &si_ih_funcs;
}
+const struct amdgpu_ip_block_version si_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
index f3e3a954369c..42e64a53e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -24,6 +24,6 @@
#ifndef __SI_IH_H__
#define __SI_IH_H__
-extern const struct amd_ip_funcs si_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version si_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b4ea229bb449..52b71ee58793 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs tonga_ih_ip_funcs = {
+static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
.late_init = NULL,
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &tonga_ih_funcs;
}
+const struct amdgpu_ip_block_version tonga_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
index 7392d70fa4a7..499027eee5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
@@ -24,6 +24,6 @@
#ifndef __TONGA_IH_H__
#define __TONGA_IH_H__
-extern const struct amd_ip_funcs tonga_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
-#endif /* __CZ_IH_H__ */
+#endif /* __TONGA_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c941550b8f..96444e4d862a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -36,13 +36,17 @@
#include "bif/bif_4_1_d.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
-
+static int uvd_v4_2_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state);
/**
* uvd_v4_2_ring_get_rptr - get read pointer
*
@@ -116,8 +120,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -152,9 +155,9 @@ static int uvd_v4_2_hw_init(void *handle)
uint32_t tmp;
int r;
- /* raise clocks while booting up the VCPU */
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
+ uvd_v4_2_init_cg(adev);
+ uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
r = uvd_v4_2_start(adev);
if (r)
goto done;
@@ -194,8 +197,6 @@ static int uvd_v4_2_hw_init(void *handle)
amdgpu_ring_commit(ring);
done:
- /* lower clocks again */
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
if (!r)
DRM_INFO("UVD initialized successfully.\n");
@@ -272,9 +273,6 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
uvd_v4_2_mc_resume(adev);
- /* disable clock gating */
- WREG32(mmUVD_CGC_GATE, 0);
-
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
@@ -526,20 +524,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* uvd_v4_2_ring_emit_ib */
-}
-
-static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v4_2_ring_emit_hdp_flush */
- 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
- 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
-}
-
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@@ -580,8 +564,6 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
-
- uvd_v4_2_init_cg(adev);
}
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
@@ -591,7 +573,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
- data = 0xfff;
+ data |= 0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
@@ -615,6 +597,8 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
{
u32 tmp, tmp2;
+ WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
+
tmp = RREG32(mmUVD_CGC_CTRL);
tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
@@ -738,7 +722,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
.late_init = NULL,
@@ -756,10 +740,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v4_2_ring_emit_hdp_flush */
+ 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+ 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +762,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -789,3 +779,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
index 0a615dd50840..8a0444bb8b95 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V4_2_H__
#define __UVD_V4_2_H__
-extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16fe579e..a79e283590fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -33,12 +33,17 @@
#include "oss/oss_2_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "vi.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v5_0_start(struct amdgpu_device *adev);
static void uvd_v5_0_stop(struct amdgpu_device *adev);
-
+static int uvd_v5_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state);
+static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable);
/**
* uvd_v5_0_ring_get_rptr - get read pointer
*
@@ -112,8 +117,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -148,9 +152,6 @@ static int uvd_v5_0_hw_init(void *handle)
uint32_t tmp;
int r;
- /* raise clocks while booting up the VCPU */
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
r = uvd_v5_0_start(adev);
if (r)
goto done;
@@ -188,11 +189,7 @@ static int uvd_v5_0_hw_init(void *handle)
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
-
done:
- /* lower clocks again */
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
-
if (!r)
DRM_INFO("UVD initialized successfully.\n");
@@ -225,6 +222,7 @@ static int uvd_v5_0_suspend(void *handle)
r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
+ uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -312,8 +310,9 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
uvd_v5_0_mc_resume(adev);
- /* disable clock gating */
- WREG32(mmUVD_CGC_GATE, 0);
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+ uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v5_0_enable_mgcg(adev, true);
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
@@ -577,20 +576,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* uvd_v5_0_ring_emit_ib */
-}
-
-static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v5_0_ring_emit_hdp_flush */
- 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
- 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -641,16 +626,12 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
+static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
{
- uint32_t data, data1, data2, suvd_flags;
+ uint32_t data1, data3, suvd_flags;
- data = RREG32(mmUVD_CGC_CTRL);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
- data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
-
- data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
- UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+ data3 = RREG32(mmUVD_CGC_GATE);
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
@@ -658,6 +639,51 @@ static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
+ if (enable) {
+ data3 |= (UVD_CGC_GATE__SYS_MASK |
+ UVD_CGC_GATE__UDEC_MASK |
+ UVD_CGC_GATE__MPEG2_MASK |
+ UVD_CGC_GATE__RBC_MASK |
+ UVD_CGC_GATE__LMI_MC_MASK |
+ UVD_CGC_GATE__IDCT_MASK |
+ UVD_CGC_GATE__MPRD_MASK |
+ UVD_CGC_GATE__MPC_MASK |
+ UVD_CGC_GATE__LBSI_MASK |
+ UVD_CGC_GATE__LRBBM_MASK |
+ UVD_CGC_GATE__UDEC_RE_MASK |
+ UVD_CGC_GATE__UDEC_CM_MASK |
+ UVD_CGC_GATE__UDEC_IT_MASK |
+ UVD_CGC_GATE__UDEC_DB_MASK |
+ UVD_CGC_GATE__UDEC_MP_MASK |
+ UVD_CGC_GATE__WCB_MASK |
+ UVD_CGC_GATE__JPEG_MASK |
+ UVD_CGC_GATE__SCPU_MASK);
+ /* only in pg enabled, we can gate clock to vcpu*/
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
+ data3 |= UVD_CGC_GATE__VCPU_MASK;
+ data3 &= ~UVD_CGC_GATE__REGS_MASK;
+ data1 |= suvd_flags;
+ } else {
+ data3 = 0;
+ data1 = 0;
+ }
+
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+ WREG32(mmUVD_CGC_GATE, data3);
+}
+
+static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data, data2;
+
+ data = RREG32(mmUVD_CGC_CTRL);
+ data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
+
+ data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
+ UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+
+
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
@@ -688,11 +714,8 @@ static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
- data1 |= suvd_flags;
WREG32(mmUVD_CGC_CTRL, data);
- WREG32(mmUVD_CGC_GATE, 0);
- WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
@@ -737,6 +760,32 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
+static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data |= 0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ }
+}
+
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -752,17 +801,18 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
curstate = state;
if (enable) {
- /* disable HW gating and enable Sw gating */
- uvd_v5_0_set_sw_clock_gating(adev);
- } else {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
return -EBUSY;
+ uvd_v5_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
/* uvd_v5_0_set_hw_clock_gating(adev); */
+ } else {
+ uvd_v5_0_enable_clock_gating(adev, false);
}
+ uvd_v5_0_set_sw_clock_gating(adev);
return 0;
}
@@ -789,7 +839,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
.late_init = NULL,
@@ -807,10 +857,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v5_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+ 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +879,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -840,3 +896,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
index e3b3c49fa5de..2eaaea793ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V5_0_H__
#define __UVD_V5_0_H__
-extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d75656..ba0bbf7138dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -42,6 +42,10 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
+static int uvd_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state);
+static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable);
/**
* uvd_v6_0_ring_get_rptr - get read pointer
@@ -116,8 +120,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -394,11 +397,11 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
lmi_swap_cntl = 0;
mp_swap_cntl = 0;
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+ uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v6_0_enable_mgcg(adev, true);
uvd_v6_0_mc_resume(adev);
- /* disable clock gating */
- WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
-
/* disable interupt */
WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
@@ -725,31 +728,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
-static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 8; /* uvd_v6_0_ring_emit_ib */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 20 + /* uvd_v6_0_ring_emit_vm_flush */
- 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
-}
-
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -862,22 +840,72 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
+{
+ uint32_t data1, data3;
+
+ data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+ data3 = RREG32(mmUVD_CGC_GATE);
+
+ data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
+ UVD_SUVD_CGC_GATE__SIT_MASK |
+ UVD_SUVD_CGC_GATE__SMP_MASK |
+ UVD_SUVD_CGC_GATE__SCM_MASK |
+ UVD_SUVD_CGC_GATE__SDB_MASK |
+ UVD_SUVD_CGC_GATE__SRE_H264_MASK |
+ UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
+ UVD_SUVD_CGC_GATE__SIT_H264_MASK |
+ UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
+ UVD_SUVD_CGC_GATE__SCM_H264_MASK |
+ UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
+ UVD_SUVD_CGC_GATE__SDB_H264_MASK |
+ UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
+
+ if (enable) {
+ data3 |= (UVD_CGC_GATE__SYS_MASK |
+ UVD_CGC_GATE__UDEC_MASK |
+ UVD_CGC_GATE__MPEG2_MASK |
+ UVD_CGC_GATE__RBC_MASK |
+ UVD_CGC_GATE__LMI_MC_MASK |
+ UVD_CGC_GATE__LMI_UMC_MASK |
+ UVD_CGC_GATE__IDCT_MASK |
+ UVD_CGC_GATE__MPRD_MASK |
+ UVD_CGC_GATE__MPC_MASK |
+ UVD_CGC_GATE__LBSI_MASK |
+ UVD_CGC_GATE__LRBBM_MASK |
+ UVD_CGC_GATE__UDEC_RE_MASK |
+ UVD_CGC_GATE__UDEC_CM_MASK |
+ UVD_CGC_GATE__UDEC_IT_MASK |
+ UVD_CGC_GATE__UDEC_DB_MASK |
+ UVD_CGC_GATE__UDEC_MP_MASK |
+ UVD_CGC_GATE__WCB_MASK |
+ UVD_CGC_GATE__JPEG_MASK |
+ UVD_CGC_GATE__SCPU_MASK |
+ UVD_CGC_GATE__JPEG2_MASK);
+ /* only in pg enabled, we can gate clock to vcpu*/
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
+ data3 |= UVD_CGC_GATE__VCPU_MASK;
+
+ data3 &= ~UVD_CGC_GATE__REGS_MASK;
+ } else {
+ data3 = 0;
+ }
+
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+ WREG32(mmUVD_CGC_GATE, data3);
+}
+
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
- uint32_t data, data1, data2, suvd_flags;
+ uint32_t data, data2;
data = RREG32(mmUVD_CGC_CTRL);
- data1 = RREG32(mmUVD_SUVD_CGC_GATE);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
- suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK;
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
@@ -910,11 +938,8 @@ static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
- data1 |= suvd_flags;
WREG32(mmUVD_CGC_CTRL, data);
- WREG32(mmUVD_CGC_GATE, 0);
- WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
@@ -961,44 +986,53 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
-static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable)
{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+ u32 orig, data;
- if (enable)
- tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
- else
- tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data |= 0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ }
}
static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->asic_type == CHIP_FIJI ||
- adev->asic_type == CHIP_POLARIS10)
- uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (state == AMD_CG_STATE_GATE) {
- /* disable HW gating and enable Sw gating */
- uvd_v6_0_set_sw_clock_gating(adev);
- } else {
+ if (enable) {
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(handle))
return -EBUSY;
-
+ uvd_v6_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
/* uvd_v6_0_set_hw_clock_gating(adev); */
+ } else {
+ /* disable HW gating and enable Sw gating */
+ uvd_v6_0_enable_clock_gating(adev, false);
}
-
+ uvd_v6_0_set_sw_clock_gating(adev);
return 0;
}
@@ -1027,7 +1061,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
.late_init = NULL,
@@ -1048,10 +1082,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,15 +1105,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1083,8 +1133,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1108,3 +1156,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
index 6b92a2352986..d3d48c6428cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
@@ -24,6 +24,8 @@
#ifndef __UVD_V6_0_H__
#define __UVD_V6_0_H__
-extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad04a53..38ed903dd6f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vce.irq, 0);
if (r)
return r;
}
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
return vce_v2_0_start(adev);
}
-const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
.late_init = NULL,
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
- .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v2_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
index 0d2ae8a01acd..4d15167654a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -24,6 +24,6 @@
#ifndef __VCE_V2_0_H__
#define __VCE_V2_0_H__
-extern const struct amd_ip_funcs vce_v2_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v2_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6feed726e299..6b3293a1c7b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -134,7 +134,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
accessible but the firmware will throttle the clocks on the
fly as necessary.
*/
- if (gated) {
+ if (!gated) {
data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff;
data &= ~0xef0000;
@@ -395,8 +395,7 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
return r;
}
@@ -814,28 +813,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, seq);
}
-static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 5; /* vce_v3_0_ring_emit_ib */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 6 + /* vce_v3_0_emit_vm_flush */
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
-}
-
-const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
.late_init = NULL,
@@ -856,10 +834,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size =
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -868,15 +853,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .emit_frame_size =
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -887,8 +878,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -916,3 +905,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
index b45af65da81f..08b908c7de0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
@@ -24,6 +24,8 @@
#ifndef __VCE_V3_0_H__
#define __VCE_V3_0_H__
-extern const struct amd_ip_funcs vce_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_1_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f62f1a74f890..bf088d6d9bf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -123,8 +123,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- r = RREG32(mmSMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ r = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -134,8 +134,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- WREG32(mmSMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ WREG32(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -439,12 +439,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
- WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
- WREG32(mmSMC_IND_DATA_0, 0);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
+ WREG32(mmSMC_IND_DATA_11, 0);
/* set index to data for continous read */
- WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
+ dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
@@ -558,21 +558,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, false, true},
};
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 reg_offset)
-{
- uint32_t val;
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
+{
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -607,10 +686,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
if (reg_offset != asic_register_entry->reg_offset)
continue;
if (!asic_register_entry->untouched)
- *value = asic_register_entry->grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ asic_register_entry->grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
}
@@ -620,10 +698,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
continue;
if (!vi_allowed_read_registers[i].untouched)
- *value = vi_allowed_read_registers[i].grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ vi_allowed_read_registers[i].grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
@@ -654,18 +731,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
-static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* vi_asic_reset - soft reset GPU
*
@@ -679,11 +744,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
- vi_set_bios_scratch_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = vi_gpu_pci_config_reset(adev);
- vi_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -783,734 +848,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
WREG32(mmBIF_DOORBELL_APER_EN, tmp);
}
-/* topaz has no DCE, UVD, VCE */
-static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-int vi_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
- break;
-
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
@@ -1589,22 +926,71 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
- AMD_CG_SUPPORT_MC_LS;
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
- adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_POLARIS11:
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x5A;
break;
case CHIP_POLARIS10:
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
@@ -1631,6 +1017,7 @@ static int vi_common_early_init(void *handle)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE;
}
@@ -1656,6 +1043,7 @@ static int vi_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x61;
@@ -1815,57 +1203,118 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
static int vi_common_set_clockgating_state_by_smu(void *handle,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_MC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_SDMA,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_HDP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_DRM,
- PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_ROM,
- PP_STATE_SUPPORT_CG,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
+ pp_support_state = AMD_CG_SUPPORT_MC_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_MC,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
+ pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_SDMA,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_HDP,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_DRM,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_ROM,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
@@ -1910,7 +1359,7 @@ static int vi_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs vi_common_ip_funcs = {
+static const struct amd_ip_funcs vi_common_ip_funcs = {
.name = "vi_common",
.early_init = vi_common_early_init,
.late_init = NULL,
@@ -1927,3 +1376,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
.set_powergating_state = vi_common_set_powergating_state,
};
+static const struct amdgpu_ip_block_version vi_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ /* topaz has no DCE, UVD, VCE */
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ break;
+ case CHIP_FIJI:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_TONGA:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ break;
+ case CHIP_CARRIZO:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ case CHIP_STONEY:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 502094042462..575d7aed5d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,8 +24,6 @@
#ifndef __VI_H__
#define __VI_H__
-extern const struct amd_ip_funcs vi_common_ip_funcs;
-
void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bec8125bceb0..c02469ada9f1 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -84,6 +84,29 @@ enum amd_powergating_state {
AMD_PG_STATE_UNGATE,
};
+struct amd_vce_state {
+ /* vce clocks */
+ u32 evclk;
+ u32 ecclk;
+ /* gpu clocks */
+ u32 sclk;
+ u32 mclk;
+ u8 clk_idx;
+ u8 pstate;
+};
+
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+ AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
@@ -103,6 +126,10 @@ enum amd_powergating_state {
#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
#define AMD_CG_SUPPORT_ROM_MGCG (1 << 17)
+#define AMD_CG_SUPPORT_DRM_LS (1 << 18)
+#define AMD_CG_SUPPORT_BIF_MGCG (1 << 19)
+#define AMD_CG_SUPPORT_GFX_3D_CGCG (1 << 20)
+#define AMD_CG_SUPPORT_GFX_3D_CGLS (1 << 21)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h
new file mode 100644
index 000000000000..7138fbf7256a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h
@@ -0,0 +1,661 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef BIF_3_0_D_H
+#define BIF_3_0_D_H
+
+#define ixPB0_DFT_DEBUG_CTRL_REG0 0x1300C
+#define ixPB0_DFT_JIT_INJ_REG0 0x13000
+#define ixPB0_DFT_JIT_INJ_REG1 0x13004
+#define ixPB0_DFT_JIT_INJ_REG2 0x13008
+#define ixPB0_GLB_CTRL_REG0 0x10004
+#define ixPB0_GLB_CTRL_REG1 0x10008
+#define ixPB0_GLB_CTRL_REG2 0x1000C
+#define ixPB0_GLB_CTRL_REG3 0x10010
+#define ixPB0_GLB_CTRL_REG4 0x10014
+#define ixPB0_GLB_CTRL_REG5 0x10018
+#define ixPB0_GLB_OVRD_REG0 0x10030
+#define ixPB0_GLB_OVRD_REG1 0x10034
+#define ixPB0_GLB_OVRD_REG2 0x10038
+#define ixPB0_GLB_SCI_STAT_OVRD_REG0 0x1001C
+#define ixPB0_GLB_SCI_STAT_OVRD_REG1 0x10020
+#define ixPB0_GLB_SCI_STAT_OVRD_REG2 0x10024
+#define ixPB0_GLB_SCI_STAT_OVRD_REG3 0x10028
+#define ixPB0_GLB_SCI_STAT_OVRD_REG4 0x1002C
+#define ixPB0_HW_DEBUG 0x12004
+#define ixPB0_PIF_CNTL 0x0010
+#define ixPB0_PIF_CNTL2 0x0014
+#define ixPB0_PIF_HW_DEBUG 0x0002
+#define ixPB0_PIF_PAIRING 0x0011
+#define ixPB0_PIF_PDNB_OVERRIDE_0 0x0020
+#define ixPB0_PIF_PDNB_OVERRIDE_10 0x0032
+#define ixPB0_PIF_PDNB_OVERRIDE_1 0x0021
+#define ixPB0_PIF_PDNB_OVERRIDE_11 0x0033
+#define ixPB0_PIF_PDNB_OVERRIDE_12 0x0034
+#define ixPB0_PIF_PDNB_OVERRIDE_13 0x0035
+#define ixPB0_PIF_PDNB_OVERRIDE_14 0x0036
+#define ixPB0_PIF_PDNB_OVERRIDE_15 0x0037
+#define ixPB0_PIF_PDNB_OVERRIDE_2 0x0022
+#define ixPB0_PIF_PDNB_OVERRIDE_3 0x0023
+#define ixPB0_PIF_PDNB_OVERRIDE_4 0x0024
+#define ixPB0_PIF_PDNB_OVERRIDE_5 0x0025
+#define ixPB0_PIF_PDNB_OVERRIDE_6 0x0026
+#define ixPB0_PIF_PDNB_OVERRIDE_7 0x0027
+#define ixPB0_PIF_PDNB_OVERRIDE_8 0x0030
+#define ixPB0_PIF_PDNB_OVERRIDE_9 0x0031
+#define ixPB0_PIF_PWRDOWN_0 0x0012
+#define ixPB0_PIF_PWRDOWN_1 0x0013
+#define ixPB0_PIF_PWRDOWN_2 0x0017
+#define ixPB0_PIF_PWRDOWN_3 0x0018
+#define ixPB0_PIF_SC_CTL 0x0016
+#define ixPB0_PIF_SCRATCH 0x0001
+#define ixPB0_PIF_SEQ_STATUS_0 0x0028
+#define ixPB0_PIF_SEQ_STATUS_10 0x003A
+#define ixPB0_PIF_SEQ_STATUS_1 0x0029
+#define ixPB0_PIF_SEQ_STATUS_11 0x003B
+#define ixPB0_PIF_SEQ_STATUS_12 0x003C
+#define ixPB0_PIF_SEQ_STATUS_13 0x003D
+#define ixPB0_PIF_SEQ_STATUS_14 0x003E
+#define ixPB0_PIF_SEQ_STATUS_15 0x003F
+#define ixPB0_PIF_SEQ_STATUS_2 0x002A
+#define ixPB0_PIF_SEQ_STATUS_3 0x002B
+#define ixPB0_PIF_SEQ_STATUS_4 0x002C
+#define ixPB0_PIF_SEQ_STATUS_5 0x002D
+#define ixPB0_PIF_SEQ_STATUS_6 0x002E
+#define ixPB0_PIF_SEQ_STATUS_7 0x002F
+#define ixPB0_PIF_SEQ_STATUS_8 0x0038
+#define ixPB0_PIF_SEQ_STATUS_9 0x0039
+#define ixPB0_PIF_TXPHYSTATUS 0x0015
+#define ixPB0_PLL_LC0_CTRL_REG0 0x14480
+#define ixPB0_PLL_LC0_OVRD_REG0 0x14490
+#define ixPB0_PLL_LC0_OVRD_REG1 0x14494
+#define ixPB0_PLL_LC0_SCI_STAT_OVRD_REG0 0x14500
+#define ixPB0_PLL_LC1_SCI_STAT_OVRD_REG0 0x14504
+#define ixPB0_PLL_LC2_SCI_STAT_OVRD_REG0 0x14508
+#define ixPB0_PLL_LC3_SCI_STAT_OVRD_REG0 0x1450C
+#define ixPB0_PLL_RO0_CTRL_REG0 0x14440
+#define ixPB0_PLL_RO0_OVRD_REG0 0x14450
+#define ixPB0_PLL_RO0_OVRD_REG1 0x14454
+#define ixPB0_PLL_RO0_SCI_STAT_OVRD_REG0 0x14460
+#define ixPB0_PLL_RO1_SCI_STAT_OVRD_REG0 0x14464
+#define ixPB0_PLL_RO2_SCI_STAT_OVRD_REG0 0x14468
+#define ixPB0_PLL_RO3_SCI_STAT_OVRD_REG0 0x1446C
+#define ixPB0_PLL_RO_GLB_CTRL_REG0 0x14000
+#define ixPB0_PLL_RO_GLB_OVRD_REG0 0x14010
+#define ixPB0_RX_GLB_CTRL_REG0 0x16000
+#define ixPB0_RX_GLB_CTRL_REG1 0x16004
+#define ixPB0_RX_GLB_CTRL_REG2 0x16008
+#define ixPB0_RX_GLB_CTRL_REG3 0x1600C
+#define ixPB0_RX_GLB_CTRL_REG4 0x16010
+#define ixPB0_RX_GLB_CTRL_REG5 0x16014
+#define ixPB0_RX_GLB_CTRL_REG6 0x16018
+#define ixPB0_RX_GLB_CTRL_REG7 0x1601C
+#define ixPB0_RX_GLB_CTRL_REG8 0x16020
+#define ixPB0_RX_GLB_OVRD_REG0 0x16030
+#define ixPB0_RX_GLB_OVRD_REG1 0x16034
+#define ixPB0_RX_GLB_SCI_STAT_OVRD_REG0 0x16028
+#define ixPB0_RX_LANE0_CTRL_REG0 0x16440
+#define ixPB0_RX_LANE0_SCI_STAT_OVRD_REG0 0x16448
+#define ixPB0_RX_LANE10_CTRL_REG0 0x17500
+#define ixPB0_RX_LANE10_SCI_STAT_OVRD_REG0 0x17508
+#define ixPB0_RX_LANE11_CTRL_REG0 0x17600
+#define ixPB0_RX_LANE11_SCI_STAT_OVRD_REG0 0x17608
+#define ixPB0_RX_LANE12_CTRL_REG0 0x17840
+#define ixPB0_RX_LANE12_SCI_STAT_OVRD_REG0 0x17848
+#define ixPB0_RX_LANE13_CTRL_REG0 0x17880
+#define ixPB0_RX_LANE13_SCI_STAT_OVRD_REG0 0x17888
+#define ixPB0_RX_LANE14_CTRL_REG0 0x17900
+#define ixPB0_RX_LANE14_SCI_STAT_OVRD_REG0 0x17908
+#define ixPB0_RX_LANE15_CTRL_REG0 0x17A00
+#define ixPB0_RX_LANE15_SCI_STAT_OVRD_REG0 0x17A08
+#define ixPB0_RX_LANE1_CTRL_REG0 0x16480
+#define ixPB0_RX_LANE1_SCI_STAT_OVRD_REG0 0x16488
+#define ixPB0_RX_LANE2_CTRL_REG0 0x16500
+#define ixPB0_RX_LANE2_SCI_STAT_OVRD_REG0 0x16508
+#define ixPB0_RX_LANE3_CTRL_REG0 0x16600
+#define ixPB0_RX_LANE3_SCI_STAT_OVRD_REG0 0x16608
+#define ixPB0_RX_LANE4_CTRL_REG0 0x16800
+#define ixPB0_RX_LANE4_SCI_STAT_OVRD_REG0 0x16848
+#define ixPB0_RX_LANE5_CTRL_REG0 0x16880
+#define ixPB0_RX_LANE5_SCI_STAT_OVRD_REG0 0x16888
+#define ixPB0_RX_LANE6_CTRL_REG0 0x16900
+#define ixPB0_RX_LANE6_SCI_STAT_OVRD_REG0 0x16908
+#define ixPB0_RX_LANE7_CTRL_REG0 0x16A00
+#define ixPB0_RX_LANE7_SCI_STAT_OVRD_REG0 0x16A08
+#define ixPB0_RX_LANE8_CTRL_REG0 0x17440
+#define ixPB0_RX_LANE8_SCI_STAT_OVRD_REG0 0x17448
+#define ixPB0_RX_LANE9_CTRL_REG0 0x17480
+#define ixPB0_RX_LANE9_SCI_STAT_OVRD_REG0 0x17488
+#define ixPB0_STRAP_GLB_REG0 0x12020
+#define ixPB0_STRAP_PLL_REG0 0x12030
+#define ixPB0_STRAP_RX_REG0 0x12028
+#define ixPB0_STRAP_RX_REG1 0x1202C
+#define ixPB0_STRAP_TX_REG0 0x12024
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0 0x18014
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1 0x18018
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2 0x1801C
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3 0x18020
+#define ixPB0_TX_GLB_CTRL_REG0 0x18000
+#define ixPB0_TX_GLB_LANE_SKEW_CTRL 0x18004
+#define ixPB0_TX_GLB_OVRD_REG0 0x18030
+#define ixPB0_TX_GLB_OVRD_REG1 0x18034
+#define ixPB0_TX_GLB_OVRD_REG2 0x18038
+#define ixPB0_TX_GLB_OVRD_REG3 0x1803C
+#define ixPB0_TX_GLB_OVRD_REG4 0x18040
+#define ixPB0_TX_GLB_SCI_STAT_OVRD_REG0 0x18010
+#define ixPB0_TX_LANE0_CTRL_REG0 0x18440
+#define ixPB0_TX_LANE0_OVRD_REG0 0x18444
+#define ixPB0_TX_LANE0_SCI_STAT_OVRD_REG0 0x18448
+#define ixPB0_TX_LANE10_CTRL_REG0 0x19500
+#define ixPB0_TX_LANE10_OVRD_REG0 0x19504
+#define ixPB0_TX_LANE10_SCI_STAT_OVRD_REG0 0x19508
+#define ixPB0_TX_LANE11_CTRL_REG0 0x19600
+#define ixPB0_TX_LANE11_OVRD_REG0 0x19604
+#define ixPB0_TX_LANE11_SCI_STAT_OVRD_REG0 0x19608
+#define ixPB0_TX_LANE12_CTRL_REG0 0x19840
+#define ixPB0_TX_LANE12_OVRD_REG0 0x19844
+#define ixPB0_TX_LANE12_SCI_STAT_OVRD_REG0 0x19848
+#define ixPB0_TX_LANE13_CTRL_REG0 0x19880
+#define ixPB0_TX_LANE13_OVRD_REG0 0x19884
+#define ixPB0_TX_LANE13_SCI_STAT_OVRD_REG0 0x19888
+#define ixPB0_TX_LANE14_CTRL_REG0 0x19900
+#define ixPB0_TX_LANE14_OVRD_REG0 0x19904
+#define ixPB0_TX_LANE14_SCI_STAT_OVRD_REG0 0x19908
+#define ixPB0_TX_LANE15_CTRL_REG0 0x19A00
+#define ixPB0_TX_LANE15_OVRD_REG0 0x19A04
+#define ixPB0_TX_LANE15_SCI_STAT_OVRD_REG0 0x19A08
+#define ixPB0_TX_LANE1_CTRL_REG0 0x18480
+#define ixPB0_TX_LANE1_OVRD_REG0 0x18484
+#define ixPB0_TX_LANE1_SCI_STAT_OVRD_REG0 0x18488
+#define ixPB0_TX_LANE2_CTRL_REG0 0x18500
+#define ixPB0_TX_LANE2_OVRD_REG0 0x18504
+#define ixPB0_TX_LANE2_SCI_STAT_OVRD_REG0 0x18508
+#define ixPB0_TX_LANE3_CTRL_REG0 0x18600
+#define ixPB0_TX_LANE3_OVRD_REG0 0x18604
+#define ixPB0_TX_LANE3_SCI_STAT_OVRD_REG0 0x18608
+#define ixPB0_TX_LANE4_CTRL_REG0 0x18840
+#define ixPB0_TX_LANE4_OVRD_REG0 0x18844
+#define ixPB0_TX_LANE4_SCI_STAT_OVRD_REG0 0x18848
+#define ixPB0_TX_LANE5_CTRL_REG0 0x18880
+#define ixPB0_TX_LANE5_OVRD_REG0 0x18884
+#define ixPB0_TX_LANE5_SCI_STAT_OVRD_REG0 0x18888
+#define ixPB0_TX_LANE6_CTRL_REG0 0x18900
+#define ixPB0_TX_LANE6_OVRD_REG0 0x18904
+#define ixPB0_TX_LANE6_SCI_STAT_OVRD_REG0 0x18908
+#define ixPB0_TX_LANE7_CTRL_REG0 0x18A00
+#define ixPB0_TX_LANE7_OVRD_REG0 0x18A04
+#define ixPB0_TX_LANE7_SCI_STAT_OVRD_REG0 0x18A08
+#define ixPB0_TX_LANE8_CTRL_REG0 0x19440
+#define ixPB0_TX_LANE8_OVRD_REG0 0x19444
+#define ixPB0_TX_LANE8_SCI_STAT_OVRD_REG0 0x19448
+#define ixPB0_TX_LANE9_CTRL_REG0 0x19480
+#define ixPB0_TX_LANE9_OVRD_REG0 0x19484
+#define ixPB0_TX_LANE9_SCI_STAT_OVRD_REG0 0x19488
+#define ixPB1_DFT_DEBUG_CTRL_REG0 0x1300C
+#define ixPB1_DFT_JIT_INJ_REG0 0x13000
+#define ixPB1_DFT_JIT_INJ_REG1 0x13004
+#define ixPB1_DFT_JIT_INJ_REG2 0x13008
+#define ixPB1_GLB_CTRL_REG0 0x10004
+#define ixPB1_GLB_CTRL_REG1 0x10008
+#define ixPB1_GLB_CTRL_REG2 0x1000C
+#define ixPB1_GLB_CTRL_REG3 0x10010
+#define ixPB1_GLB_CTRL_REG4 0x10014
+#define ixPB1_GLB_CTRL_REG5 0x10018
+#define ixPB1_GLB_OVRD_REG0 0x10030
+#define ixPB1_GLB_OVRD_REG1 0x10034
+#define ixPB1_GLB_OVRD_REG2 0x10038
+#define ixPB1_GLB_SCI_STAT_OVRD_REG0 0x1001C
+#define ixPB1_GLB_SCI_STAT_OVRD_REG1 0x10020
+#define ixPB1_GLB_SCI_STAT_OVRD_REG2 0x10024
+#define ixPB1_GLB_SCI_STAT_OVRD_REG3 0x10028
+#define ixPB1_GLB_SCI_STAT_OVRD_REG4 0x1002C
+#define ixPB1_HW_DEBUG 0x12004
+#define ixPB1_PIF_CNTL 0x0010
+#define ixPB1_PIF_CNTL2 0x0014
+#define ixPB1_PIF_HW_DEBUG 0x0002
+#define ixPB1_PIF_PAIRING 0x0011
+#define ixPB1_PIF_PDNB_OVERRIDE_0 0x0020
+#define ixPB1_PIF_PDNB_OVERRIDE_10 0x0032
+#define ixPB1_PIF_PDNB_OVERRIDE_1 0x0021
+#define ixPB1_PIF_PDNB_OVERRIDE_11 0x0033
+#define ixPB1_PIF_PDNB_OVERRIDE_12 0x0034
+#define ixPB1_PIF_PDNB_OVERRIDE_13 0x0035
+#define ixPB1_PIF_PDNB_OVERRIDE_14 0x0036
+#define ixPB1_PIF_PDNB_OVERRIDE_15 0x0037
+#define ixPB1_PIF_PDNB_OVERRIDE_2 0x0022
+#define ixPB1_PIF_PDNB_OVERRIDE_3 0x0023
+#define ixPB1_PIF_PDNB_OVERRIDE_4 0x0024
+#define ixPB1_PIF_PDNB_OVERRIDE_5 0x0025
+#define ixPB1_PIF_PDNB_OVERRIDE_6 0x0026
+#define ixPB1_PIF_PDNB_OVERRIDE_7 0x0027
+#define ixPB1_PIF_PDNB_OVERRIDE_8 0x0030
+#define ixPB1_PIF_PDNB_OVERRIDE_9 0x0031
+#define ixPB1_PIF_PWRDOWN_0 0x0012
+#define ixPB1_PIF_PWRDOWN_1 0x0013
+#define ixPB1_PIF_PWRDOWN_2 0x0017
+#define ixPB1_PIF_PWRDOWN_3 0x0018
+#define ixPB1_PIF_SC_CTL 0x0016
+#define ixPB1_PIF_SCRATCH 0x0001
+#define ixPB1_PIF_SEQ_STATUS_0 0x0028
+#define ixPB1_PIF_SEQ_STATUS_10 0x003A
+#define ixPB1_PIF_SEQ_STATUS_1 0x0029
+#define ixPB1_PIF_SEQ_STATUS_11 0x003B
+#define ixPB1_PIF_SEQ_STATUS_12 0x003C
+#define ixPB1_PIF_SEQ_STATUS_13 0x003D
+#define ixPB1_PIF_SEQ_STATUS_14 0x003E
+#define ixPB1_PIF_SEQ_STATUS_15 0x003F
+#define ixPB1_PIF_SEQ_STATUS_2 0x002A
+#define ixPB1_PIF_SEQ_STATUS_3 0x002B
+#define ixPB1_PIF_SEQ_STATUS_4 0x002C
+#define ixPB1_PIF_SEQ_STATUS_5 0x002D
+#define ixPB1_PIF_SEQ_STATUS_6 0x002E
+#define ixPB1_PIF_SEQ_STATUS_7 0x002F
+#define ixPB1_PIF_SEQ_STATUS_8 0x0038
+#define ixPB1_PIF_SEQ_STATUS_9 0x0039
+#define ixPB1_PIF_TXPHYSTATUS 0x0015
+#define ixPB1_PLL_LC0_CTRL_REG0 0x14480
+#define ixPB1_PLL_LC0_OVRD_REG0 0x14490
+#define ixPB1_PLL_LC0_OVRD_REG1 0x14494
+#define ixPB1_PLL_LC0_SCI_STAT_OVRD_REG0 0x14500
+#define ixPB1_PLL_LC1_SCI_STAT_OVRD_REG0 0x14504
+#define ixPB1_PLL_LC2_SCI_STAT_OVRD_REG0 0x14508
+#define ixPB1_PLL_LC3_SCI_STAT_OVRD_REG0 0x1450C
+#define ixPB1_PLL_RO0_CTRL_REG0 0x14440
+#define ixPB1_PLL_RO0_OVRD_REG0 0x14450
+#define ixPB1_PLL_RO0_OVRD_REG1 0x14454
+#define ixPB1_PLL_RO0_SCI_STAT_OVRD_REG0 0x14460
+#define ixPB1_PLL_RO1_SCI_STAT_OVRD_REG0 0x14464
+#define ixPB1_PLL_RO2_SCI_STAT_OVRD_REG0 0x14468
+#define ixPB1_PLL_RO3_SCI_STAT_OVRD_REG0 0x1446C
+#define ixPB1_PLL_RO_GLB_CTRL_REG0 0x14000
+#define ixPB1_PLL_RO_GLB_OVRD_REG0 0x14010
+#define ixPB1_RX_GLB_CTRL_REG0 0x16000
+#define ixPB1_RX_GLB_CTRL_REG1 0x16004
+#define ixPB1_RX_GLB_CTRL_REG2 0x16008
+#define ixPB1_RX_GLB_CTRL_REG3 0x1600C
+#define ixPB1_RX_GLB_CTRL_REG4 0x16010
+#define ixPB1_RX_GLB_CTRL_REG5 0x16014
+#define ixPB1_RX_GLB_CTRL_REG6 0x16018
+#define ixPB1_RX_GLB_CTRL_REG7 0x1601C
+#define ixPB1_RX_GLB_CTRL_REG8 0x16020
+#define ixPB1_RX_GLB_OVRD_REG0 0x16030
+#define ixPB1_RX_GLB_OVRD_REG1 0x16034
+#define ixPB1_RX_GLB_SCI_STAT_OVRD_REG0 0x16028
+#define ixPB1_RX_LANE0_CTRL_REG0 0x16440
+#define ixPB1_RX_LANE0_SCI_STAT_OVRD_REG0 0x16448
+#define ixPB1_RX_LANE10_CTRL_REG0 0x17500
+#define ixPB1_RX_LANE10_SCI_STAT_OVRD_REG0 0x17508
+#define ixPB1_RX_LANE11_CTRL_REG0 0x17600
+#define ixPB1_RX_LANE11_SCI_STAT_OVRD_REG0 0x17608
+#define ixPB1_RX_LANE12_CTRL_REG0 0x17840
+#define ixPB1_RX_LANE12_SCI_STAT_OVRD_REG0 0x17848
+#define ixPB1_RX_LANE13_CTRL_REG0 0x17880
+#define ixPB1_RX_LANE13_SCI_STAT_OVRD_REG0 0x17888
+#define ixPB1_RX_LANE14_CTRL_REG0 0x17900
+#define ixPB1_RX_LANE14_SCI_STAT_OVRD_REG0 0x17908
+#define ixPB1_RX_LANE15_CTRL_REG0 0x17A00
+#define ixPB1_RX_LANE15_SCI_STAT_OVRD_REG0 0x17A08
+#define ixPB1_RX_LANE1_CTRL_REG0 0x16480
+#define ixPB1_RX_LANE1_SCI_STAT_OVRD_REG0 0x16488
+#define ixPB1_RX_LANE2_CTRL_REG0 0x16500
+#define ixPB1_RX_LANE2_SCI_STAT_OVRD_REG0 0x16508
+#define ixPB1_RX_LANE3_CTRL_REG0 0x16600
+#define ixPB1_RX_LANE3_SCI_STAT_OVRD_REG0 0x16608
+#define ixPB1_RX_LANE4_CTRL_REG0 0x16800
+#define ixPB1_RX_LANE4_SCI_STAT_OVRD_REG0 0x16848
+#define ixPB1_RX_LANE5_CTRL_REG0 0x16880
+#define ixPB1_RX_LANE5_SCI_STAT_OVRD_REG0 0x16888
+#define ixPB1_RX_LANE6_CTRL_REG0 0x16900
+#define ixPB1_RX_LANE6_SCI_STAT_OVRD_REG0 0x16908
+#define ixPB1_RX_LANE7_CTRL_REG0 0x16A00
+#define ixPB1_RX_LANE7_SCI_STAT_OVRD_REG0 0x16A08
+#define ixPB1_RX_LANE8_CTRL_REG0 0x17440
+#define ixPB1_RX_LANE8_SCI_STAT_OVRD_REG0 0x17448
+#define ixPB1_RX_LANE9_CTRL_REG0 0x17480
+#define ixPB1_RX_LANE9_SCI_STAT_OVRD_REG0 0x17488
+#define ixPB1_STRAP_GLB_REG0 0x12020
+#define ixPB1_STRAP_PLL_REG0 0x12030
+#define ixPB1_STRAP_RX_REG0 0x12028
+#define ixPB1_STRAP_RX_REG1 0x1202C
+#define ixPB1_STRAP_TX_REG0 0x12024
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0 0x18014
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1 0x18018
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2 0x1801C
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3 0x18020
+#define ixPB1_TX_GLB_CTRL_REG0 0x18000
+#define ixPB1_TX_GLB_LANE_SKEW_CTRL 0x18004
+#define ixPB1_TX_GLB_OVRD_REG0 0x18030
+#define ixPB1_TX_GLB_OVRD_REG1 0x18034
+#define ixPB1_TX_GLB_OVRD_REG2 0x18038
+#define ixPB1_TX_GLB_OVRD_REG3 0x1803C
+#define ixPB1_TX_GLB_OVRD_REG4 0x18040
+#define ixPB1_TX_GLB_SCI_STAT_OVRD_REG0 0x18010
+#define ixPB1_TX_LANE0_CTRL_REG0 0x18440
+#define ixPB1_TX_LANE0_OVRD_REG0 0x18444
+#define ixPB1_TX_LANE0_SCI_STAT_OVRD_REG0 0x18448
+#define ixPB1_TX_LANE10_CTRL_REG0 0x19500
+#define ixPB1_TX_LANE10_OVRD_REG0 0x19504
+#define ixPB1_TX_LANE10_SCI_STAT_OVRD_REG0 0x19508
+#define ixPB1_TX_LANE11_CTRL_REG0 0x19600
+#define ixPB1_TX_LANE11_OVRD_REG0 0x19604
+#define ixPB1_TX_LANE11_SCI_STAT_OVRD_REG0 0x19608
+#define ixPB1_TX_LANE12_CTRL_REG0 0x19840
+#define ixPB1_TX_LANE12_OVRD_REG0 0x19844
+#define ixPB1_TX_LANE12_SCI_STAT_OVRD_REG0 0x19848
+#define ixPB1_TX_LANE13_CTRL_REG0 0x19880
+#define ixPB1_TX_LANE13_OVRD_REG0 0x19884
+#define ixPB1_TX_LANE13_SCI_STAT_OVRD_REG0 0x19888
+#define ixPB1_TX_LANE14_CTRL_REG0 0x19900
+#define ixPB1_TX_LANE14_OVRD_REG0 0x19904
+#define ixPB1_TX_LANE14_SCI_STAT_OVRD_REG0 0x19908
+#define ixPB1_TX_LANE15_CTRL_REG0 0x19A00
+#define ixPB1_TX_LANE15_OVRD_REG0 0x19A04
+#define ixPB1_TX_LANE15_SCI_STAT_OVRD_REG0 0x19A08
+#define ixPB1_TX_LANE1_CTRL_REG0 0x18480
+#define ixPB1_TX_LANE1_OVRD_REG0 0x18484
+#define ixPB1_TX_LANE1_SCI_STAT_OVRD_REG0 0x18488
+#define ixPB1_TX_LANE2_CTRL_REG0 0x18500
+#define ixPB1_TX_LANE2_OVRD_REG0 0x18504
+#define ixPB1_TX_LANE2_SCI_STAT_OVRD_REG0 0x18508
+#define ixPB1_TX_LANE3_CTRL_REG0 0x18600
+#define ixPB1_TX_LANE3_OVRD_REG0 0x18604
+#define ixPB1_TX_LANE3_SCI_STAT_OVRD_REG0 0x18608
+#define ixPB1_TX_LANE4_CTRL_REG0 0x18840
+#define ixPB1_TX_LANE4_OVRD_REG0 0x18844
+#define ixPB1_TX_LANE4_SCI_STAT_OVRD_REG0 0x18848
+#define ixPB1_TX_LANE5_CTRL_REG0 0x18880
+#define ixPB1_TX_LANE5_OVRD_REG0 0x18884
+#define ixPB1_TX_LANE5_SCI_STAT_OVRD_REG0 0x18888
+#define ixPB1_TX_LANE6_CTRL_REG0 0x18900
+#define ixPB1_TX_LANE6_OVRD_REG0 0x18904
+#define ixPB1_TX_LANE6_SCI_STAT_OVRD_REG0 0x18908
+#define ixPB1_TX_LANE7_CTRL_REG0 0x18A00
+#define ixPB1_TX_LANE7_OVRD_REG0 0x18A04
+#define ixPB1_TX_LANE7_SCI_STAT_OVRD_REG0 0x18A08
+#define ixPB1_TX_LANE8_CTRL_REG0 0x19440
+#define ixPB1_TX_LANE8_OVRD_REG0 0x19444
+#define ixPB1_TX_LANE8_SCI_STAT_OVRD_REG0 0x19448
+#define ixPB1_TX_LANE9_CTRL_REG0 0x19480
+#define ixPB1_TX_LANE9_OVRD_REG0 0x19484
+#define ixPB1_TX_LANE9_SCI_STAT_OVRD_REG0 0x19488
+#define ixPCIE_BUS_CNTL 0x0021
+#define ixPCIE_CFG_CNTL 0x003C
+#define ixPCIE_CI_CNTL 0x0020
+#define ixPCIE_CNTL 0x0010
+#define ixPCIE_CNTL2 0x001C
+#define ixPCIE_CONFIG_CNTL 0x0011
+#define ixPCIE_DEBUG_CNTL 0x0012
+#define ixPCIE_ERR_CNTL 0x006A
+#define ixPCIE_F0_DPA_CAP 0x00E0
+#define ixPCIE_F0_DPA_CNTL 0x00E5
+#define ixPCIE_F0_DPA_LATENCY_INDICATOR 0x00E4
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x00E7
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x00E8
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x00E9
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x00EA
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x00EB
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x00EC
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x00ED
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x00EE
+#define ixPCIE_FC_CPL 0x0062
+#define ixPCIE_FC_NP 0x0061
+#define ixPCIE_FC_P 0x0060
+#define ixPCIE_HW_DEBUG 0x0002
+#define ixPCIE_I2C_REG_ADDR_EXPAND 0x003A
+#define ixPCIE_I2C_REG_DATA 0x003B
+#define ixPCIE_INT_CNTL 0x001A
+#define ixPCIE_INT_STATUS 0x001B
+#define ixPCIE_LC_BEST_EQ_SETTINGS 0x00B9
+#define ixPCIE_LC_BW_CHANGE_CNTL 0x00B2
+#define ixPCIE_LC_CDR_CNTL 0x00B3
+#define ixPCIE_LC_CNTL 0x00A0
+#define ixPCIE_LC_CNTL2 0x00B1
+#define ixPCIE_LC_CNTL3 0x00B5
+#define ixPCIE_LC_CNTL4 0x00B6
+#define ixPCIE_LC_CNTL5 0x00B7
+#define ixPCIE_LC_FORCE_COEFF 0x00B8
+#define ixPCIE_LC_FORCE_EQ_REQ_COEFF 0x00BA
+#define ixPCIE_LC_LANE_CNTL 0x00B4
+#define ixPCIE_LC_LINK_WIDTH_CNTL 0x00A2
+#define ixPCIE_LC_N_FTS_CNTL 0x00A3
+#define ixPCIE_LC_SPEED_CNTL 0x00A4
+#define ixPCIE_LC_STATE0 0x00A5
+#define ixPCIE_LC_STATE10 0x0026
+#define ixPCIE_LC_STATE1 0x00A6
+#define ixPCIE_LC_STATE11 0x0027
+#define ixPCIE_LC_STATE2 0x00A7
+#define ixPCIE_LC_STATE3 0x00A8
+#define ixPCIE_LC_STATE4 0x00A9
+#define ixPCIE_LC_STATE5 0x00AA
+#define ixPCIE_LC_STATE6 0x0022
+#define ixPCIE_LC_STATE7 0x0023
+#define ixPCIE_LC_STATE8 0x0024
+#define ixPCIE_LC_STATE9 0x0025
+#define ixPCIE_LC_STATUS1 0x0028
+#define ixPCIE_LC_STATUS2 0x0029
+#define ixPCIE_LC_TRAINING_CNTL 0x00A1
+#define ixPCIE_P_BUF_STATUS 0x0041
+#define ixPCIE_P_CNTL 0x0040
+#define ixPCIE_P_DECODER_STATUS 0x0042
+#define ixPCIE_PERF_CNTL_EVENT0_PORT_SEL 0x0093
+#define ixPCIE_PERF_CNTL_EVENT1_PORT_SEL 0x0094
+#define ixPCIE_PERF_CNTL_MST_C_CLK 0x0087
+#define ixPCIE_PERF_CNTL_MST_R_CLK 0x0084
+#define ixPCIE_PERF_CNTL_SLV_NS_C_CLK 0x0090
+#define ixPCIE_PERF_CNTL_SLV_R_CLK 0x008A
+#define ixPCIE_PERF_CNTL_SLV_S_C_CLK 0x008D
+#define ixPCIE_PERF_CNTL_TXCLK 0x0081
+#define ixPCIE_PERF_CNTL_TXCLK2 0x0095
+#define ixPCIE_PERF_COUNT0_MST_C_CLK 0x0088
+#define ixPCIE_PERF_COUNT0_MST_R_CLK 0x0085
+#define ixPCIE_PERF_COUNT0_SLV_NS_C_CLK 0x0091
+#define ixPCIE_PERF_COUNT0_SLV_R_CLK 0x008B
+#define ixPCIE_PERF_COUNT0_SLV_S_C_CLK 0x008E
+#define ixPCIE_PERF_COUNT0_TXCLK 0x0082
+#define ixPCIE_PERF_COUNT0_TXCLK2 0x0096
+#define ixPCIE_PERF_COUNT1_MST_C_CLK 0x0089
+#define ixPCIE_PERF_COUNT1_MST_R_CLK 0x0086
+#define ixPCIE_PERF_COUNT1_SLV_NS_C_CLK 0x0092
+#define ixPCIE_PERF_COUNT1_SLV_R_CLK 0x008C
+#define ixPCIE_PERF_COUNT1_SLV_S_C_CLK 0x008F
+#define ixPCIE_PERF_COUNT1_TXCLK 0x0083
+#define ixPCIE_PERF_COUNT1_TXCLK2 0x0097
+#define ixPCIE_PERF_COUNT_CNTL 0x0080
+#define ixPCIEP_HW_DEBUG 0x0002
+#define ixPCIE_P_MISC_STATUS 0x0043
+#define ixPCIEP_PORT_CNTL 0x0010
+#define ixPCIE_P_PORT_LANE_STATUS 0x0050
+#define ixPCIE_PRBS_CLR 0x00C8
+#define ixPCIE_PRBS_ERRCNT_0 0x00D0
+#define ixPCIE_PRBS_ERRCNT_10 0x00DA
+#define ixPCIE_PRBS_ERRCNT_1 0x00D1
+#define ixPCIE_PRBS_ERRCNT_11 0x00DB
+#define ixPCIE_PRBS_ERRCNT_12 0x00DC
+#define ixPCIE_PRBS_ERRCNT_13 0x00DD
+#define ixPCIE_PRBS_ERRCNT_14 0x00DE
+#define ixPCIE_PRBS_ERRCNT_15 0x00DF
+#define ixPCIE_PRBS_ERRCNT_2 0x00D2
+#define ixPCIE_PRBS_ERRCNT_3 0x00D3
+#define ixPCIE_PRBS_ERRCNT_4 0x00D4
+#define ixPCIE_PRBS_ERRCNT_5 0x00D5
+#define ixPCIE_PRBS_ERRCNT_6 0x00D6
+#define ixPCIE_PRBS_ERRCNT_7 0x00D7
+#define ixPCIE_PRBS_ERRCNT_8 0x00D8
+#define ixPCIE_PRBS_ERRCNT_9 0x00D9
+#define ixPCIE_PRBS_FREERUN 0x00CB
+#define ixPCIE_PRBS_HI_BITCNT 0x00CF
+#define ixPCIE_PRBS_LO_BITCNT 0x00CE
+#define ixPCIE_PRBS_MISC 0x00CC
+#define ixPCIE_PRBS_STATUS1 0x00C9
+#define ixPCIE_PRBS_STATUS2 0x00CA
+#define ixPCIE_PRBS_USER_PATTERN 0x00CD
+#define ixPCIE_P_RCV_L0S_FTS_DET 0x0050
+#define ixPCIEP_RESERVED 0x0000
+#define ixPCIEP_SCRATCH 0x0001
+#define ixPCIEP_STRAP_LC 0x00C0
+#define ixPCIEP_STRAP_MISC 0x00C1
+#define ixPCIE_RESERVED 0x0000
+#define ixPCIE_RX_CNTL 0x0070
+#define ixPCIE_RX_CNTL2 0x001D
+#define ixPCIE_RX_CNTL3 0x0074
+#define ixPCIE_RX_CREDITS_ALLOCATED_CPL 0x0082
+#define ixPCIE_RX_CREDITS_ALLOCATED_NP 0x0081
+#define ixPCIE_RX_CREDITS_ALLOCATED_P 0x0080
+#define ixPCIE_RX_EXPECTED_SEQNUM 0x0071
+#define ixPCIE_RX_LAST_TLP0 0x0031
+#define ixPCIE_RX_LAST_TLP1 0x0032
+#define ixPCIE_RX_LAST_TLP2 0x0033
+#define ixPCIE_RX_LAST_TLP3 0x0034
+#define ixPCIE_RX_NUM_NAK 0x000E
+#define ixPCIE_RX_NUM_NAK_GENERATED 0x000F
+#define ixPCIE_RX_VENDOR_SPECIFIC 0x0072
+#define ixPCIE_SCRATCH 0x0001
+#define ixPCIE_STRAP_F0 0x00B0
+#define ixPCIE_STRAP_F1 0x00B1
+#define ixPCIE_STRAP_F2 0x00B2
+#define ixPCIE_STRAP_F3 0x00B3
+#define ixPCIE_STRAP_F4 0x00B4
+#define ixPCIE_STRAP_F5 0x00B5
+#define ixPCIE_STRAP_F6 0x00B6
+#define ixPCIE_STRAP_F7 0x00B7
+#define ixPCIE_STRAP_I2C_BD 0x00C4
+#define ixPCIE_STRAP_MISC 0x00C0
+#define ixPCIE_STRAP_MISC2 0x00C1
+#define ixPCIE_STRAP_PI 0x00C2
+#define ixPCIE_TX_ACK_LATENCY_LIMIT 0x0026
+#define ixPCIE_TX_CNTL 0x0020
+#define ixPCIE_TX_CREDITS_ADVT_CPL 0x0032
+#define ixPCIE_TX_CREDITS_ADVT_NP 0x0031
+#define ixPCIE_TX_CREDITS_ADVT_P 0x0030
+#define ixPCIE_TX_CREDITS_FCU_THRESHOLD 0x0037
+#define ixPCIE_TX_CREDITS_INIT_CPL 0x0035
+#define ixPCIE_TX_CREDITS_INIT_NP 0x0034
+#define ixPCIE_TX_CREDITS_INIT_P 0x0033
+#define ixPCIE_TX_CREDITS_STATUS 0x0036
+#define ixPCIE_TX_LAST_TLP0 0x0035
+#define ixPCIE_TX_LAST_TLP1 0x0036
+#define ixPCIE_TX_LAST_TLP2 0x0037
+#define ixPCIE_TX_LAST_TLP3 0x0038
+#define ixPCIE_TX_REPLAY 0x0025
+#define ixPCIE_TX_REQUESTER_ID 0x0021
+#define ixPCIE_TX_REQUEST_NUM_CNTL 0x0023
+#define ixPCIE_TX_SEQ 0x0024
+#define ixPCIE_TX_VENDOR_SPECIFIC 0x0022
+#define ixPCIE_WPR_CNTL 0x0030
+#define mmBACO_CNTL 0x14E5
+#define mmBF_ANA_ISO_CNTL 0x14C7
+#define mmBIF_BACO_DEBUG 0x14DF
+#define mmBIF_BACO_DEBUG_LATCH 0x14DC
+#define mmBIF_BACO_MSIC 0x14DE
+#define mmBIF_BUSNUM_CNTL1 0x1525
+#define mmBIF_BUSNUM_CNTL2 0x152B
+#define mmBIF_BUSNUM_LIST0 0x1526
+#define mmBIF_BUSNUM_LIST1 0x1527
+#define mmBIF_BUSY_DELAY_CNTR 0x1529
+#define mmBIF_CLK_PDWN_DELAY_TIMER 0x151F
+#define mmBIF_DEBUG_CNTL 0x151C
+#define mmBIF_DEBUG_MUX 0x151D
+#define mmBIF_DEBUG_OUT 0x151E
+#define mmBIF_DEVFUNCNUM_LIST0 0x14E8
+#define mmBIF_DEVFUNCNUM_LIST1 0x14E7
+#define mmBIF_FB_EN 0x1524
+#define mmBIF_FEATURES_CONTROL_MISC 0x14C2
+#define mmBIF_PERFCOUNTER0_RESULT 0x152D
+#define mmBIF_PERFCOUNTER1_RESULT 0x152E
+#define mmBIF_PERFMON_CNTL 0x152C
+#define mmBIF_PIF_TXCLK_SWITCH_TIMER 0x152F
+#define mmBIF_RESET_EN 0x1511
+#define mmBIF_SCRATCH0 0x150E
+#define mmBIF_SCRATCH1 0x150F
+#define mmBIF_SSA_DISP_LOWER 0x14D2
+#define mmBIF_SSA_DISP_UPPER 0x14D3
+#define mmBIF_SSA_GFX0_LOWER 0x14CA
+#define mmBIF_SSA_GFX0_UPPER 0x14CB
+#define mmBIF_SSA_GFX1_LOWER 0x14CC
+#define mmBIF_SSA_GFX1_UPPER 0x14CD
+#define mmBIF_SSA_GFX2_LOWER 0x14CE
+#define mmBIF_SSA_GFX2_UPPER 0x14CF
+#define mmBIF_SSA_GFX3_LOWER 0x14D0
+#define mmBIF_SSA_GFX3_UPPER 0x14D1
+#define mmBIF_SSA_MC_LOWER 0x14D4
+#define mmBIF_SSA_MC_UPPER 0x14D5
+#define mmBIF_SSA_PWR_STATUS 0x14C8
+#define mmBIF_XDMA_HI 0x14C1
+#define mmBIF_XDMA_LO 0x14C0
+#define mmBIOS_SCRATCH_0 0x05C9
+#define mmBIOS_SCRATCH_10 0x05D3
+#define mmBIOS_SCRATCH_1 0x05CA
+#define mmBIOS_SCRATCH_11 0x05D4
+#define mmBIOS_SCRATCH_12 0x05D5
+#define mmBIOS_SCRATCH_13 0x05D6
+#define mmBIOS_SCRATCH_14 0x05D7
+#define mmBIOS_SCRATCH_15 0x05D8
+#define mmBIOS_SCRATCH_2 0x05CB
+#define mmBIOS_SCRATCH_3 0x05CC
+#define mmBIOS_SCRATCH_4 0x05CD
+#define mmBIOS_SCRATCH_5 0x05CE
+#define mmBIOS_SCRATCH_6 0x05CF
+#define mmBIOS_SCRATCH_7 0x05D0
+#define mmBIOS_SCRATCH_8 0x05D1
+#define mmBIOS_SCRATCH_9 0x05D2
+#define mmBUS_CNTL 0x1508
+#define mmCAPTURE_HOST_BUSNUM 0x153C
+#define mmCLKREQB_PAD_CNTL 0x1521
+#define mmCONFIG_APER_SIZE 0x150C
+#define mmCONFIG_CNTL 0x1509
+#define mmCONFIG_F0_BASE 0x150B
+#define mmCONFIG_MEMSIZE 0x150A
+#define mmCONFIG_REG_APER_SIZE 0x150D
+#define mmHDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
+#define mmHDP_REG_COHERENCY_FLUSH_CNTL 0x1528
+#define mmHOST_BUSNUM 0x153D
+#define mmHW_DEBUG 0x1515
+#define mmIMPCTL_RESET 0x14F5
+#define mmINTERRUPT_CNTL 0x151A
+#define mmINTERRUPT_CNTL2 0x151B
+#define mmMASTER_CREDIT_CNTL 0x1516
+#define mmMM_CFGREGS_CNTL 0x1513
+#define mmMM_DATA 0x0001
+#define mmMM_INDEX 0x0000
+#define mmMM_INDEX_HI 0x0006
+#define mmNEW_REFCLKB_TIMER 0x14EA
+#define mmNEW_REFCLKB_TIMER_1 0x14E9
+#define mmPCIE_DATA 0x000D
+#define mmPCIE_INDEX 0x000C
+#define mmPEER0_FB_OFFSET_HI 0x14F3
+#define mmPEER0_FB_OFFSET_LO 0x14F2
+#define mmPEER1_FB_OFFSET_HI 0x14F1
+#define mmPEER1_FB_OFFSET_LO 0x14F0
+#define mmPEER2_FB_OFFSET_HI 0x14EF
+#define mmPEER2_FB_OFFSET_LO 0x14EE
+#define mmPEER3_FB_OFFSET_HI 0x14ED
+#define mmPEER3_FB_OFFSET_LO 0x14EC
+#define mmPEER_REG_RANGE0 0x153E
+#define mmPEER_REG_RANGE1 0x153F
+#define mmSLAVE_HANG_ERROR 0x153B
+#define mmSLAVE_HANG_PROTECTION_CNTL 0x1536
+#define mmSLAVE_REQ_CREDIT_CNTL 0x1517
+#define mmSMBCLK_PAD_CNTL 0x1523
+#define mmSMBDAT_PAD_CNTL 0x1522
+#define mmSMBUS_BACO_DUMMY 0x14C6
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h
new file mode 100644
index 000000000000..e94445acf3c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h
@@ -0,0 +1,8127 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef BIF_3_0_SH_MASK_H
+#define BIF_3_0_SH_MASK_H
+
+#define BACO_CNTL__BACO_ANA_ISO_DIS_MASK 0x00000080L
+#define BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT 0x00000007
+#define BACO_CNTL__BACO_BCLK_OFF_MASK 0x00000002L
+#define BACO_CNTL__BACO_BCLK_OFF__SHIFT 0x00000001
+#define BACO_CNTL__BACO_EN_MASK 0x00000001L
+#define BACO_CNTL__BACO_EN__SHIFT 0x00000000
+#define BACO_CNTL__BACO_HANG_PROTECTION_EN_MASK 0x00000020L
+#define BACO_CNTL__BACO_HANG_PROTECTION_EN__SHIFT 0x00000005
+#define BACO_CNTL__BACO_ISO_DIS_MASK 0x00000004L
+#define BACO_CNTL__BACO_ISO_DIS__SHIFT 0x00000002
+#define BACO_CNTL__BACO_MODE_MASK 0x00000040L
+#define BACO_CNTL__BACO_MODE__SHIFT 0x00000006
+#define BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L
+#define BACO_CNTL__BACO_POWER_OFF__SHIFT 0x00000003
+#define BACO_CNTL__BACO_RESET_EN_MASK 0x00000010L
+#define BACO_CNTL__BACO_RESET_EN__SHIFT 0x00000004
+#define BACO_CNTL__PWRGOOD_BF_MASK 0x00000200L
+#define BACO_CNTL__PWRGOOD_BF__SHIFT 0x00000009
+#define BACO_CNTL__PWRGOOD_DVO_MASK 0x00001000L
+#define BACO_CNTL__PWRGOOD_DVO__SHIFT 0x0000000c
+#define BACO_CNTL__PWRGOOD_GPIO_MASK 0x00000400L
+#define BACO_CNTL__PWRGOOD_GPIO__SHIFT 0x0000000a
+#define BACO_CNTL__PWRGOOD_MEM_MASK 0x00000800L
+#define BACO_CNTL__PWRGOOD_MEM__SHIFT 0x0000000b
+#define BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000100L
+#define BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x00000008
+#define BF_ANA_ISO_CNTL__BF_ANA_ISO_DIS_MASK_MASK 0x00000001L
+#define BF_ANA_ISO_CNTL__BF_ANA_ISO_DIS_MASK__SHIFT 0x00000000
+#define BF_ANA_ISO_CNTL__BF_VDDC_ISO_DIS_MASK_MASK 0x00000002L
+#define BF_ANA_ISO_CNTL__BF_VDDC_ISO_DIS_MASK__SHIFT 0x00000001
+#define BIF_BACO_DEBUG__BIF_BACO_SCANDUMP_FLG_MASK 0x00000001L
+#define BIF_BACO_DEBUG__BIF_BACO_SCANDUMP_FLG__SHIFT 0x00000000
+#define BIF_BACO_DEBUG_LATCH__BIF_BACO_LATCH_FLG_MASK 0x00000001L
+#define BIF_BACO_DEBUG_LATCH__BIF_BACO_LATCH_FLG__SHIFT 0x00000000
+#define BIF_BACO_MSIC__BIF_XTALIN_SEL_MASK 0x00000001L
+#define BIF_BACO_MSIC__BIF_XTALIN_SEL__SHIFT 0x00000000
+#define BIF_BUSNUM_CNTL1__ID_MASK_MASK 0x000000ffL
+#define BIF_BUSNUM_CNTL1__ID_MASK__SHIFT 0x00000000
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x00000008
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000ffL
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x00000000
+#define BIF_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L
+#define BIF_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x00000011
+#define BIF_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L
+#define BIF_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x00000010
+#define BIF_BUSNUM_LIST0__ID0_MASK 0x000000ffL
+#define BIF_BUSNUM_LIST0__ID0__SHIFT 0x00000000
+#define BIF_BUSNUM_LIST0__ID1_MASK 0x0000ff00L
+#define BIF_BUSNUM_LIST0__ID1__SHIFT 0x00000008
+#define BIF_BUSNUM_LIST0__ID2_MASK 0x00ff0000L
+#define BIF_BUSNUM_LIST0__ID2__SHIFT 0x00000010
+#define BIF_BUSNUM_LIST0__ID3_MASK 0xff000000L
+#define BIF_BUSNUM_LIST0__ID3__SHIFT 0x00000018
+#define BIF_BUSNUM_LIST1__ID4_MASK 0x000000ffL
+#define BIF_BUSNUM_LIST1__ID4__SHIFT 0x00000000
+#define BIF_BUSNUM_LIST1__ID5_MASK 0x0000ff00L
+#define BIF_BUSNUM_LIST1__ID5__SHIFT 0x00000008
+#define BIF_BUSNUM_LIST1__ID6_MASK 0x00ff0000L
+#define BIF_BUSNUM_LIST1__ID6__SHIFT 0x00000010
+#define BIF_BUSNUM_LIST1__ID7_MASK 0xff000000L
+#define BIF_BUSNUM_LIST1__ID7__SHIFT 0x00000018
+#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003fL
+#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x00000000
+#define BIF_CLK_PDWN_DELAY_TIMER__TIMER_MASK 0x000003ffL
+#define BIF_CLK_PDWN_DELAY_TIMER__TIMER__SHIFT 0x00000000
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK1_MASK 0x00000010L
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK1__SHIFT 0x00000004
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK2_MASK 0x00000020L
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK2__SHIFT 0x00000005
+#define BIF_DEBUG_CNTL__DEBUG_EN_MASK 0x00000001L
+#define BIF_DEBUG_CNTL__DEBUG_EN__SHIFT 0x00000000
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK1_MASK 0x00001f00L
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK1__SHIFT 0x00000008
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK2_MASK 0x001f0000L
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK2__SHIFT 0x00000010
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_XSP_MASK 0x01000000L
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_XSP__SHIFT 0x00000018
+#define BIF_DEBUG_CNTL__DEBUG_MULTIBLOCKEN_MASK 0x00000002L
+#define BIF_DEBUG_CNTL__DEBUG_MULTIBLOCKEN__SHIFT 0x00000001
+#define BIF_DEBUG_CNTL__DEBUG_OUT_EN_MASK 0x00000004L
+#define BIF_DEBUG_CNTL__DEBUG_OUT_EN__SHIFT 0x00000002
+#define BIF_DEBUG_CNTL__DEBUG_PAD_SEL_MASK 0x00000008L
+#define BIF_DEBUG_CNTL__DEBUG_PAD_SEL__SHIFT 0x00000003
+#define BIF_DEBUG_CNTL__DEBUG_SWAP_MASK 0x00000080L
+#define BIF_DEBUG_CNTL__DEBUG_SWAP__SHIFT 0x00000007
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_CLKSEL_MASK 0xc0000000L
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_CLKSEL__SHIFT 0x0000001e
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_EN_MASK 0x00000040L
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_EN__SHIFT 0x00000006
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK1_MASK 0x0000003fL
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK1__SHIFT 0x00000000
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK2_MASK 0x00003f00L
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK2__SHIFT 0x00000008
+#define BIF_DEBUG_OUT__DEBUG_OUTPUT_MASK 0x0001ffffL
+#define BIF_DEBUG_OUT__DEBUG_OUTPUT__SHIFT 0x00000000
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID0_MASK 0x000000ffL
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID0__SHIFT 0x00000000
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID1_MASK 0x0000ff00L
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID1__SHIFT 0x00000008
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID2_MASK 0x00ff0000L
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID2__SHIFT 0x00000010
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID3_MASK 0xff000000L
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID3__SHIFT 0x00000018
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID4_MASK 0x000000ffL
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID4__SHIFT 0x00000000
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID5_MASK 0x0000ff00L
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID5__SHIFT 0x00000008
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID6_MASK 0x00ff0000L
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID6__SHIFT 0x00000010
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID7_MASK 0xff000000L
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID7__SHIFT 0x00000018
+#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
+#define BIF_FB_EN__FB_READ_EN__SHIFT 0x00000000
+#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
+#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x00000001
+#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L
+#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x00000003
+#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L
+#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x00000002
+#define BIF_FEATURES_CONTROL_MISC__IGNORE_BE_CHECK_GASKET_COMB_DIS_MASK 0x00000100L
+#define BIF_FEATURES_CONTROL_MISC__IGNORE_BE_CHECK_GASKET_COMB_DIS__SHIFT 0x00000008
+#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L
+#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x00000000
+#define BIF_FEATURES_CONTROL_MISC__PLL_SWITCH_IMPCTL_CAL_DONE_DIS_MASK 0x00000080L
+#define BIF_FEATURES_CONTROL_MISC__PLL_SWITCH_IMPCTL_CAL_DONE_DIS__SHIFT 0x00000007
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS_MASK 0x00000020L
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS__SHIFT 0x00000005
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS_MASK 0x00000040L
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS__SHIFT 0x00000006
+#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L
+#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x00000001
+#define BIF_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS_MASK 0x00000010L
+#define BIF_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS__SHIFT 0x00000004
+#define BIF_PERFCOUNTER0_RESULT__PERFCOUNTER_RESULT_MASK 0xffffffffL
+#define BIF_PERFCOUNTER0_RESULT__PERFCOUNTER_RESULT__SHIFT 0x00000000
+#define BIF_PERFCOUNTER1_RESULT__PERFCOUNTER_RESULT_MASK 0xffffffffL
+#define BIF_PERFCOUNTER1_RESULT__PERFCOUNTER_RESULT__SHIFT 0x00000000
+#define BIF_PERFMON_CNTL__PERFCOUNTER_EN_MASK 0x00000001L
+#define BIF_PERFMON_CNTL__PERFCOUNTER_EN__SHIFT 0x00000000
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET0_MASK 0x00000002L
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET0__SHIFT 0x00000001
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET1_MASK 0x00000004L
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET1__SHIFT 0x00000002
+#define BIF_PERFMON_CNTL__PERF_SEL0_MASK 0x00001f00L
+#define BIF_PERFMON_CNTL__PERF_SEL0__SHIFT 0x00000008
+#define BIF_PERFMON_CNTL__PERF_SEL1_MASK 0x0003e000L
+#define BIF_PERFMON_CNTL__PERF_SEL1__SHIFT 0x0000000d
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL0_ACK_TIMER_MASK 0x00000007L
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL0_ACK_TIMER__SHIFT 0x00000000
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL1_ACK_TIMER_MASK 0x00000038L
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL1_ACK_TIMER__SHIFT 0x00000003
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL_SWITCH_TIMER_MASK 0x000003c0L
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL_SWITCH_TIMER__SHIFT 0x00000006
+#define BIF_RESET_EN__BIF_COR_RESET_EN_MASK 0x00400000L
+#define BIF_RESET_EN__BIF_COR_RESET_EN__SHIFT 0x00000016
+#define BIF_RESET_EN__CFG_RESET_EN_MASK 0x00000040L
+#define BIF_RESET_EN__CFG_RESET_EN__SHIFT 0x00000006
+#define BIF_RESET_EN__CFG_RESET_PULSE_WIDTH_MASK 0x0003f000L
+#define BIF_RESET_EN__CFG_RESET_PULSE_WIDTH__SHIFT 0x0000000c
+#define BIF_RESET_EN__COR_RESET_EN_MASK 0x00000008L
+#define BIF_RESET_EN__COR_RESET_EN__SHIFT 0x00000003
+#define BIF_RESET_EN__DRV_RESET_DELAY_SEL_MASK 0x000c0000L
+#define BIF_RESET_EN__DRV_RESET_DELAY_SEL__SHIFT 0x00000012
+#define BIF_RESET_EN__DRV_RESET_EN_MASK 0x00000080L
+#define BIF_RESET_EN__DRV_RESET_EN__SHIFT 0x00000007
+#define BIF_RESET_EN__FUNC0_FLR_EN_MASK 0x00800000L
+#define BIF_RESET_EN__FUNC0_FLR_EN__SHIFT 0x00000017
+#define BIF_RESET_EN__FUNC0_RESET_DELAY_SEL_MASK 0x0c000000L
+#define BIF_RESET_EN__FUNC0_RESET_DELAY_SEL__SHIFT 0x0000001a
+#define BIF_RESET_EN__FUNC1_FLR_EN_MASK 0x01000000L
+#define BIF_RESET_EN__FUNC1_FLR_EN__SHIFT 0x00000018
+#define BIF_RESET_EN__FUNC1_RESET_DELAY_SEL_MASK 0x30000000L
+#define BIF_RESET_EN__FUNC1_RESET_DELAY_SEL__SHIFT 0x0000001c
+#define BIF_RESET_EN__FUNC2_FLR_EN_MASK 0x02000000L
+#define BIF_RESET_EN__FUNC2_FLR_EN__SHIFT 0x00000019
+#define BIF_RESET_EN__FUNC2_RESET_DELAY_SEL_MASK 0xc0000000L
+#define BIF_RESET_EN__FUNC2_RESET_DELAY_SEL__SHIFT 0x0000001e
+#define BIF_RESET_EN__HOT_RESET_EN_MASK 0x00000200L
+#define BIF_RESET_EN__HOT_RESET_EN__SHIFT 0x00000009
+#define BIF_RESET_EN__LINK_DISABLE_RESET_EN_MASK 0x00000400L
+#define BIF_RESET_EN__LINK_DISABLE_RESET_EN__SHIFT 0x0000000a
+#define BIF_RESET_EN__LINK_DOWN_RESET_EN_MASK 0x00000800L
+#define BIF_RESET_EN__LINK_DOWN_RESET_EN__SHIFT 0x0000000b
+#define BIF_RESET_EN__PHY_RESET_EN_MASK 0x00000004L
+#define BIF_RESET_EN__PHY_RESET_EN__SHIFT 0x00000002
+#define BIF_RESET_EN__PIF_RSTB_EN_MASK 0x00100000L
+#define BIF_RESET_EN__PIF_RSTB_EN__SHIFT 0x00000014
+#define BIF_RESET_EN__PIF_STRAP_ALLVALID_EN_MASK 0x00200000L
+#define BIF_RESET_EN__PIF_STRAP_ALLVALID_EN__SHIFT 0x00000015
+#define BIF_RESET_EN__REG_RESET_EN_MASK 0x00000010L
+#define BIF_RESET_EN__REG_RESET_EN__SHIFT 0x00000004
+#define BIF_RESET_EN__RESET_CFGREG_ONLY_EN_MASK 0x00000100L
+#define BIF_RESET_EN__RESET_CFGREG_ONLY_EN__SHIFT 0x00000008
+#define BIF_RESET_EN__SOFT_RST_MODE_MASK 0x00000002L
+#define BIF_RESET_EN__SOFT_RST_MODE__SHIFT 0x00000001
+#define BIF_RESET_EN__STY_RESET_EN_MASK 0x00000020L
+#define BIF_RESET_EN__STY_RESET_EN__SHIFT 0x00000005
+#define BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xffffffffL
+#define BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x00000000
+#define BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xffffffffL
+#define BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x00000000
+#define BIF_SSA_DISP_LOWER__SSA_DISP_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_DISP_LOWER__SSA_DISP_LOWER__SHIFT 0x00000002
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_DISP_UPPER__SSA_DISP_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_DISP_UPPER__SSA_DISP_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX0_UPPER__SSA_GFX0_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX0_UPPER__SSA_GFX0_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX1_UPPER__SSA_GFX1_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX1_UPPER__SSA_GFX1_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX2_UPPER__SSA_GFX2_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX2_UPPER__SSA_GFX2_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX3_UPPER__SSA_GFX3_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX3_UPPER__SSA_GFX3_UPPER__SHIFT 0x00000002
+#define BIF_SSA_MC_LOWER__SSA_MC_FB_STALL_EN_MASK 0x20000000L
+#define BIF_SSA_MC_LOWER__SSA_MC_FB_STALL_EN__SHIFT 0x0000001d
+#define BIF_SSA_MC_LOWER__SSA_MC_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_MC_LOWER__SSA_MC_LOWER__SHIFT 0x00000002
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_MC_UPPER__SSA_MC_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_MC_UPPER__SSA_MC_UPPER__SHIFT 0x00000002
+#define BIF_SSA_PWR_STATUS__SSA_DISP_PWR_STATUS_MASK 0x00000002L
+#define BIF_SSA_PWR_STATUS__SSA_DISP_PWR_STATUS__SHIFT 0x00000001
+#define BIF_SSA_PWR_STATUS__SSA_GFX_PWR_STATUS_MASK 0x00000001L
+#define BIF_SSA_PWR_STATUS__SSA_GFX_PWR_STATUS__SHIFT 0x00000000
+#define BIF_SSA_PWR_STATUS__SSA_MC_PWR_STATUS_MASK 0x00000004L
+#define BIF_SSA_PWR_STATUS__SSA_MC_PWR_STATUS__SHIFT 0x00000002
+#define BIF_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x1fffffffL
+#define BIF_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x00000000
+#define BIF_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L
+#define BIF_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x0000001f
+#define BIF_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x1fffffffL
+#define BIF_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x00000000
+#define BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xffffffffL
+#define BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x00000000
+#define BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xffffffffL
+#define BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x00000000
+#define BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xffffffffL
+#define BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x00000000
+#define BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xffffffffL
+#define BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x00000000
+#define BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xffffffffL
+#define BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x00000000
+#define BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xffffffffL
+#define BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x00000000
+#define BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xffffffffL
+#define BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x00000000
+#define BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xffffffffL
+#define BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x00000000
+#define BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xffffffffL
+#define BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x00000000
+#define BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xffffffffL
+#define BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x00000000
+#define BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xffffffffL
+#define BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x00000000
+#define BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xffffffffL
+#define BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x00000000
+#define BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xffffffffL
+#define BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x00000000
+#define BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xffffffffL
+#define BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x00000000
+#define BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xffffffffL
+#define BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x00000000
+#define BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xffffffffL
+#define BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x00000000
+#define BUS_CNTL__BIF_ERR_RTR_BKPRESSURE_EN_MASK 0x00000100L
+#define BUS_CNTL__BIF_ERR_RTR_BKPRESSURE_EN__SHIFT 0x00000008
+#define BUS_CNTL__BIOS_ROM_DIS_MASK 0x00000002L
+#define BUS_CNTL__BIOS_ROM_DIS__SHIFT 0x00000001
+#define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x00000001L
+#define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x00000000
+#define BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define BUS_CNTL__PMI_BM_DIS__SHIFT 0x00000004
+#define BUS_CNTL__PMI_INT_DIS_MASK 0x00000020L
+#define BUS_CNTL__PMI_INT_DIS__SHIFT 0x00000005
+#define BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define BUS_CNTL__PMI_IO_DIS__SHIFT 0x00000002
+#define BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define BUS_CNTL__PMI_MEM_DIS__SHIFT 0x00000003
+#define BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L
+#define BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x00000012
+#define BUS_CNTL__SET_AZ_TC_MASK 0x00001c00L
+#define BUS_CNTL__SET_AZ_TC__SHIFT 0x0000000a
+#define BUS_CNTL__SET_MC_TC_MASK 0x0000e000L
+#define BUS_CNTL__SET_MC_TC__SHIFT 0x0000000d
+#define BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L
+#define BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x00000007
+#define BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L
+#define BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x00000006
+#define BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L
+#define BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x00000011
+#define BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L
+#define BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x00000010
+#define CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L
+#define CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x00000000
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x00000000
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0x0000000c
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x00000002
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0x0000000b
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x00000001
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x00000009
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x00000005
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x00000006
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x00000007
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x00000008
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x00000003
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0x0000000a
+#define CONFIG_APER_SIZE__APER_SIZE_MASK 0xffffffffL
+#define CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x00000000
+#define CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L
+#define CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x00000000
+#define CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L
+#define CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x00000002
+#define CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L
+#define CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x00000003
+#define CONFIG_CNTL__VGA_DIS_MASK 0x00000002L
+#define CONFIG_CNTL__VGA_DIS__SHIFT 0x00000001
+#define CONFIG_F0_BASE__F0_BASE_MASK 0xffffffffL
+#define CONFIG_F0_BASE__F0_BASE__SHIFT 0x00000000
+#define CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xffffffffL
+#define CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x00000000
+#define CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x000fffffL
+#define CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x00000000
+#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x00000000
+#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x00000000
+#define HOST_BUSNUM__HOST_ID_MASK 0x0000ffffL
+#define HOST_BUSNUM__HOST_ID__SHIFT 0x00000000
+#define HW_DEBUG__HW_00_DEBUG_MASK 0x00000001L
+#define HW_DEBUG__HW_00_DEBUG__SHIFT 0x00000000
+#define HW_DEBUG__HW_01_DEBUG_MASK 0x00000002L
+#define HW_DEBUG__HW_01_DEBUG__SHIFT 0x00000001
+#define HW_DEBUG__HW_02_DEBUG_MASK 0x00000004L
+#define HW_DEBUG__HW_02_DEBUG__SHIFT 0x00000002
+#define HW_DEBUG__HW_03_DEBUG_MASK 0x00000008L
+#define HW_DEBUG__HW_03_DEBUG__SHIFT 0x00000003
+#define HW_DEBUG__HW_04_DEBUG_MASK 0x00000010L
+#define HW_DEBUG__HW_04_DEBUG__SHIFT 0x00000004
+#define HW_DEBUG__HW_05_DEBUG_MASK 0x00000020L
+#define HW_DEBUG__HW_05_DEBUG__SHIFT 0x00000005
+#define HW_DEBUG__HW_06_DEBUG_MASK 0x00000040L
+#define HW_DEBUG__HW_06_DEBUG__SHIFT 0x00000006
+#define HW_DEBUG__HW_07_DEBUG_MASK 0x00000080L
+#define HW_DEBUG__HW_07_DEBUG__SHIFT 0x00000007
+#define HW_DEBUG__HW_08_DEBUG_MASK 0x00000100L
+#define HW_DEBUG__HW_08_DEBUG__SHIFT 0x00000008
+#define HW_DEBUG__HW_09_DEBUG_MASK 0x00000200L
+#define HW_DEBUG__HW_09_DEBUG__SHIFT 0x00000009
+#define HW_DEBUG__HW_10_DEBUG_MASK 0x00000400L
+#define HW_DEBUG__HW_10_DEBUG__SHIFT 0x0000000a
+#define HW_DEBUG__HW_11_DEBUG_MASK 0x00000800L
+#define HW_DEBUG__HW_11_DEBUG__SHIFT 0x0000000b
+#define HW_DEBUG__HW_12_DEBUG_MASK 0x00001000L
+#define HW_DEBUG__HW_12_DEBUG__SHIFT 0x0000000c
+#define HW_DEBUG__HW_13_DEBUG_MASK 0x00002000L
+#define HW_DEBUG__HW_13_DEBUG__SHIFT 0x0000000d
+#define HW_DEBUG__HW_14_DEBUG_MASK 0x00004000L
+#define HW_DEBUG__HW_14_DEBUG__SHIFT 0x0000000e
+#define HW_DEBUG__HW_15_DEBUG_MASK 0x00008000L
+#define HW_DEBUG__HW_15_DEBUG__SHIFT 0x0000000f
+#define HW_DEBUG__HW_16_DEBUG_MASK 0x00010000L
+#define HW_DEBUG__HW_16_DEBUG__SHIFT 0x00000010
+#define HW_DEBUG__HW_17_DEBUG_MASK 0x00020000L
+#define HW_DEBUG__HW_17_DEBUG__SHIFT 0x00000011
+#define HW_DEBUG__HW_18_DEBUG_MASK 0x00040000L
+#define HW_DEBUG__HW_18_DEBUG__SHIFT 0x00000012
+#define HW_DEBUG__HW_19_DEBUG_MASK 0x00080000L
+#define HW_DEBUG__HW_19_DEBUG__SHIFT 0x00000013
+#define HW_DEBUG__HW_20_DEBUG_MASK 0x00100000L
+#define HW_DEBUG__HW_20_DEBUG__SHIFT 0x00000014
+#define HW_DEBUG__HW_21_DEBUG_MASK 0x00200000L
+#define HW_DEBUG__HW_21_DEBUG__SHIFT 0x00000015
+#define HW_DEBUG__HW_22_DEBUG_MASK 0x00400000L
+#define HW_DEBUG__HW_22_DEBUG__SHIFT 0x00000016
+#define HW_DEBUG__HW_23_DEBUG_MASK 0x00800000L
+#define HW_DEBUG__HW_23_DEBUG__SHIFT 0x00000017
+#define HW_DEBUG__HW_24_DEBUG_MASK 0x01000000L
+#define HW_DEBUG__HW_24_DEBUG__SHIFT 0x00000018
+#define HW_DEBUG__HW_25_DEBUG_MASK 0x02000000L
+#define HW_DEBUG__HW_25_DEBUG__SHIFT 0x00000019
+#define HW_DEBUG__HW_26_DEBUG_MASK 0x04000000L
+#define HW_DEBUG__HW_26_DEBUG__SHIFT 0x0000001a
+#define HW_DEBUG__HW_27_DEBUG_MASK 0x08000000L
+#define HW_DEBUG__HW_27_DEBUG__SHIFT 0x0000001b
+#define HW_DEBUG__HW_28_DEBUG_MASK 0x10000000L
+#define HW_DEBUG__HW_28_DEBUG__SHIFT 0x0000001c
+#define HW_DEBUG__HW_29_DEBUG_MASK 0x20000000L
+#define HW_DEBUG__HW_29_DEBUG__SHIFT 0x0000001d
+#define HW_DEBUG__HW_30_DEBUG_MASK 0x40000000L
+#define HW_DEBUG__HW_30_DEBUG__SHIFT 0x0000001e
+#define HW_DEBUG__HW_31_DEBUG_MASK 0x80000000L
+#define HW_DEBUG__HW_31_DEBUG__SHIFT 0x0000001f
+#define IMPCTL_RESET__IMP_SW_RESET_MASK 0x00000001L
+#define IMPCTL_RESET__IMP_SW_RESET__SHIFT 0x00000000
+#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xffffffffL
+#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x00000000
+#define INTERRUPT_CNTL__GEN_GPIO_INT_EN_MASK 0x00001e00L
+#define INTERRUPT_CNTL__GEN_GPIO_INT_EN__SHIFT 0x00000009
+#define INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L
+#define INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x00000008
+#define INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L
+#define INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x00000001
+#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L
+#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x00000000
+#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000f0L
+#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x00000004
+#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L
+#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x00000003
+#define INTERRUPT_CNTL__SELECT_INT_GPIO_OUTPUT_MASK 0x00006000L
+#define INTERRUPT_CNTL__SELECT_INT_GPIO_OUTPUT__SHIFT 0x0000000d
+#define MASTER_CREDIT_CNTL__BIF_AZ_RDRET_CREDIT_MASK 0x003f0000L
+#define MASTER_CREDIT_CNTL__BIF_AZ_RDRET_CREDIT__SHIFT 0x00000010
+#define MASTER_CREDIT_CNTL__BIF_MC_RDRET_CREDIT_MASK 0x0000003fL
+#define MASTER_CREDIT_CNTL__BIF_MC_RDRET_CREDIT__SHIFT 0x00000000
+#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L
+#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x00000000
+#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x00000008L
+#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x00000003
+#define MM_DATA__MM_DATA_MASK 0xffffffffL
+#define MM_DATA__MM_DATA__SHIFT 0x00000000
+#define MM_INDEX_HI__MM_OFFSET_HI_MASK 0xffffffffL
+#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x00000000
+#define MM_INDEX__MM_APER_MASK 0x80000000L
+#define MM_INDEX__MM_APER__SHIFT 0x0000001f
+#define MM_INDEX__MM_OFFSET_MASK 0x7fffffffL
+#define MM_INDEX__MM_OFFSET__SHIFT 0x00000000
+#define NEW_REFCLKB_TIMER_1__PHY_PLL_PDWN_TIMER_MASK 0x000003ffL
+#define NEW_REFCLKB_TIMER_1__PHY_PLL_PDWN_TIMER__SHIFT 0x00000000
+#define NEW_REFCLKB_TIMER_1__PLL0_PDNB_EN_MASK 0x00000400L
+#define NEW_REFCLKB_TIMER_1__PLL0_PDNB_EN__SHIFT 0x0000000a
+#define NEW_REFCLKB_TIMER__REFCLK_ON_MASK 0x00200000L
+#define NEW_REFCLKB_TIMER__REFCLK_ON__SHIFT 0x00000015
+#define NEW_REFCLKB_TIMER__REG_STOP_REFCLK_EN_MASK 0x00000001L
+#define NEW_REFCLKB_TIMER__REG_STOP_REFCLK_EN__SHIFT 0x00000000
+#define NEW_REFCLKB_TIMER__STOP_REFCLK_TIMER_MASK 0x001ffffeL
+#define NEW_REFCLKB_TIMER__STOP_REFCLK_TIMER__SHIFT 0x00000001
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN_MASK 0x00000001L
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN__SHIFT 0x00000000
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE_MASK 0x0000003eL
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE__SHIFT 0x00000001
+#define PB0_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP_MASK 0x00000f00L
+#define PB0_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP__SHIFT 0x00000008
+#define PB0_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN_MASK 0x00800000L
+#define PB0_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN__SHIFT 0x00000017
+#define PB0_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN_MASK 0x00400000L
+#define PB0_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN__SHIFT 0x00000016
+#define PB0_DFT_JIT_INJ_REG0__DFT_NUM_STEPS_MASK 0x0000001fL
+#define PB0_DFT_JIT_INJ_REG0__DFT_NUM_STEPS__SHIFT 0x00000000
+#define PB0_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME_MASK 0xff000000L
+#define PB0_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME__SHIFT 0x00000018
+#define PB0_DFT_JIT_INJ_REG1__DFT_BLOCK_EN_MASK 0x00010000L
+#define PB0_DFT_JIT_INJ_REG1__DFT_BLOCK_EN__SHIFT 0x00000010
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_EN_MASK 0x00000100L
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_EN__SHIFT 0x00000008
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE_MASK 0x000000ffL
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE__SHIFT 0x00000000
+#define PB0_DFT_JIT_INJ_REG2__DFT_LANE_EN_MASK 0x0000ffffL
+#define PB0_DFT_JIT_INJ_REG2__DFT_LANE_EN__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG0__BACKUP_MASK 0x0000ffffL
+#define PB0_GLB_CTRL_REG0__BACKUP__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG0__CFG_IDLEDET_TH_MASK 0x00030000L
+#define PB0_GLB_CTRL_REG0__CFG_IDLEDET_TH__SHIFT 0x00000010
+#define PB0_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL_MASK 0x00700000L
+#define PB0_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL__SHIFT 0x00000014
+#define PB0_GLB_CTRL_REG0__DBG_RXFEBYP_EN_MASK 0x00800000L
+#define PB0_GLB_CTRL_REG0__DBG_RXFEBYP_EN__SHIFT 0x00000017
+#define PB0_GLB_CTRL_REG0__DBG_RXPRBS_CLR_MASK 0x01000000L
+#define PB0_GLB_CTRL_REG0__DBG_RXPRBS_CLR__SHIFT 0x00000018
+#define PB0_GLB_CTRL_REG0__DBG_RXTOGGLE_EN_MASK 0x02000000L
+#define PB0_GLB_CTRL_REG0__DBG_RXTOGGLE_EN__SHIFT 0x00000019
+#define PB0_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN_MASK 0x04000000L
+#define PB0_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN__SHIFT 0x0000001a
+#define PB0_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE_MASK 0xc0000000L
+#define PB0_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE__SHIFT 0x0000001e
+#define PB0_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV_MASK 0x80000000L
+#define PB0_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV__SHIFT 0x0000001f
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN_MASK 0x00000001L
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL_MASK 0x0000007eL
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL__SHIFT 0x00000001
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN_MASK 0x00000080L
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN__SHIFT 0x00000007
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL_MASK 0x00003f00L
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL__SHIFT 0x00000008
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN_MASK 0x00004000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN__SHIFT 0x0000000e
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL_MASK 0x003f8000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL__SHIFT 0x0000000f
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN_MASK 0x00400000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN__SHIFT 0x00000016
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL_MASK 0x3f800000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL__SHIFT 0x00000017
+#define PB0_GLB_CTRL_REG1__TST_LOSPDTST_EN_MASK 0x40000000L
+#define PB0_GLB_CTRL_REG1__TST_LOSPDTST_EN__SHIFT 0x0000001e
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN_MASK 0x00000001L
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL_MASK 0x000000feL
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL__SHIFT 0x00000001
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN_MASK 0x00000100L
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN__SHIFT 0x00000008
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL_MASK 0x0000fe00L
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL__SHIFT 0x00000009
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN_MASK 0x00010000L
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN__SHIFT 0x00000010
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL_MASK 0x00fe0000L
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL__SHIFT 0x00000011
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN_MASK 0x01000000L
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN__SHIFT 0x00000018
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL_MASK 0xfe000000L
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL__SHIFT 0x00000019
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL_MASK 0x00000060L
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL__SHIFT 0x00000005
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL_MASK 0x00000180L
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL__SHIFT 0x00000007
+#define PB0_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL_MASK 0x00000600L
+#define PB0_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL__SHIFT 0x00000009
+#define PB0_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL_MASK 0x0001c000L
+#define PB0_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL__SHIFT 0x0000000e
+#define PB0_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN_MASK 0x00001000L
+#define PB0_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN__SHIFT 0x0000000c
+#define PB0_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN_MASK 0x00000800L
+#define PB0_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN__SHIFT 0x0000000b
+#define PB0_GLB_CTRL_REG3__DBG_DLL_CLK_SEL_MASK 0x001c0000L
+#define PB0_GLB_CTRL_REG3__DBG_DLL_CLK_SEL__SHIFT 0x00000012
+#define PB0_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE_MASK 0x80000000L
+#define PB0_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE__SHIFT 0x0000001f
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN_MASK 0x00400000L
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN__SHIFT 0x00000016
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL_MASK 0x07800000L
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL__SHIFT 0x00000017
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN_MASK 0x08000000L
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN__SHIFT 0x0000001b
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL_MASK 0x70000000L
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL__SHIFT 0x0000001c
+#define PB0_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL_MASK 0x00200000L
+#define PB0_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL__SHIFT 0x00000015
+#define PB0_GLB_CTRL_REG3__RXDBG_SEL_MASK 0x0000001fL
+#define PB0_GLB_CTRL_REG3__RXDBG_SEL__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_EXEC_MASK 0x03c00000L
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_EXEC__SHIFT 0x00000016
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_INST_MASK 0x0000ffffL
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_INST__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN_MASK 0x00040000L
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN__SHIFT 0x00000012
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL_MASK 0x00030000L
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL__SHIFT 0x00000010
+#define PB0_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL_MASK 0x04000000L
+#define PB0_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000001a
+#define PB0_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE_MASK 0x10000000L
+#define PB0_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE__SHIFT 0x0000001c
+#define PB0_GLB_CTRL_REG4__PWRGOOD_OVRD_MASK 0x08000000L
+#define PB0_GLB_CTRL_REG4__PWRGOOD_OVRD__SHIFT 0x0000001b
+#define PB0_GLB_CTRL_REG5__DBG_RXAPU_MODE_MASK 0x000000ffL
+#define PB0_GLB_CTRL_REG5__DBG_RXAPU_MODE__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL_MASK 0x0000ffffL
+#define PB0_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB0_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN_MASK 0x00008000L
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN__SHIFT 0x0000000f
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN_MASK 0x00000004L
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN__SHIFT 0x00000002
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL_MASK 0x00000008L
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL__SHIFT 0x00000003
+#define PB0_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN_MASK 0x00000001L
+#define PB0_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN_MASK 0x00000002L
+#define PB0_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN__SHIFT 0x00000001
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_EN_MASK 0x00000001L
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_EN__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL_MASK 0x00000002L
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT_MASK 0x00000010L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT__SHIFT 0x00000004
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE_MASK 0x00100000L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG0__RXIMP_MASK 0x000f0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__RXIMP__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXNIMP_MASK 0x00000f00L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXNIMP__SHIFT 0x00000008
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXPIMP_MASK 0x0000f000L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXPIMP__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_0_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_0__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_1_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_1__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_2_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_2__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_3_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_3__SHIFT 0x0000001c
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_4_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_4__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_5_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_5__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_6_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_6__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_7_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_7__SHIFT 0x0000001c
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_10_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_10__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_11_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_11__SHIFT 0x0000001c
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_8_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_8__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_9_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_9__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_12_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_12__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_13_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_13__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_14_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_14__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_15_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_15__SHIFT 0x0000001c
+#define PB0_HW_DEBUG__PB0_HW_00_DEBUG_MASK 0x00000001L
+#define PB0_HW_DEBUG__PB0_HW_00_DEBUG__SHIFT 0x00000000
+#define PB0_HW_DEBUG__PB0_HW_01_DEBUG_MASK 0x00000002L
+#define PB0_HW_DEBUG__PB0_HW_01_DEBUG__SHIFT 0x00000001
+#define PB0_HW_DEBUG__PB0_HW_02_DEBUG_MASK 0x00000004L
+#define PB0_HW_DEBUG__PB0_HW_02_DEBUG__SHIFT 0x00000002
+#define PB0_HW_DEBUG__PB0_HW_03_DEBUG_MASK 0x00000008L
+#define PB0_HW_DEBUG__PB0_HW_03_DEBUG__SHIFT 0x00000003
+#define PB0_HW_DEBUG__PB0_HW_04_DEBUG_MASK 0x00000010L
+#define PB0_HW_DEBUG__PB0_HW_04_DEBUG__SHIFT 0x00000004
+#define PB0_HW_DEBUG__PB0_HW_05_DEBUG_MASK 0x00000020L
+#define PB0_HW_DEBUG__PB0_HW_05_DEBUG__SHIFT 0x00000005
+#define PB0_HW_DEBUG__PB0_HW_06_DEBUG_MASK 0x00000040L
+#define PB0_HW_DEBUG__PB0_HW_06_DEBUG__SHIFT 0x00000006
+#define PB0_HW_DEBUG__PB0_HW_07_DEBUG_MASK 0x00000080L
+#define PB0_HW_DEBUG__PB0_HW_07_DEBUG__SHIFT 0x00000007
+#define PB0_HW_DEBUG__PB0_HW_08_DEBUG_MASK 0x00000100L
+#define PB0_HW_DEBUG__PB0_HW_08_DEBUG__SHIFT 0x00000008
+#define PB0_HW_DEBUG__PB0_HW_09_DEBUG_MASK 0x00000200L
+#define PB0_HW_DEBUG__PB0_HW_09_DEBUG__SHIFT 0x00000009
+#define PB0_HW_DEBUG__PB0_HW_10_DEBUG_MASK 0x00000400L
+#define PB0_HW_DEBUG__PB0_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB0_HW_DEBUG__PB0_HW_11_DEBUG_MASK 0x00000800L
+#define PB0_HW_DEBUG__PB0_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB0_HW_DEBUG__PB0_HW_12_DEBUG_MASK 0x00001000L
+#define PB0_HW_DEBUG__PB0_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB0_HW_DEBUG__PB0_HW_13_DEBUG_MASK 0x00002000L
+#define PB0_HW_DEBUG__PB0_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB0_HW_DEBUG__PB0_HW_14_DEBUG_MASK 0x00004000L
+#define PB0_HW_DEBUG__PB0_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB0_HW_DEBUG__PB0_HW_15_DEBUG_MASK 0x00008000L
+#define PB0_HW_DEBUG__PB0_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB0_HW_DEBUG__PB0_HW_16_DEBUG_MASK 0x00010000L
+#define PB0_HW_DEBUG__PB0_HW_16_DEBUG__SHIFT 0x00000010
+#define PB0_HW_DEBUG__PB0_HW_17_DEBUG_MASK 0x00020000L
+#define PB0_HW_DEBUG__PB0_HW_17_DEBUG__SHIFT 0x00000011
+#define PB0_HW_DEBUG__PB0_HW_18_DEBUG_MASK 0x00040000L
+#define PB0_HW_DEBUG__PB0_HW_18_DEBUG__SHIFT 0x00000012
+#define PB0_HW_DEBUG__PB0_HW_19_DEBUG_MASK 0x00080000L
+#define PB0_HW_DEBUG__PB0_HW_19_DEBUG__SHIFT 0x00000013
+#define PB0_HW_DEBUG__PB0_HW_20_DEBUG_MASK 0x00100000L
+#define PB0_HW_DEBUG__PB0_HW_20_DEBUG__SHIFT 0x00000014
+#define PB0_HW_DEBUG__PB0_HW_21_DEBUG_MASK 0x00200000L
+#define PB0_HW_DEBUG__PB0_HW_21_DEBUG__SHIFT 0x00000015
+#define PB0_HW_DEBUG__PB0_HW_22_DEBUG_MASK 0x00400000L
+#define PB0_HW_DEBUG__PB0_HW_22_DEBUG__SHIFT 0x00000016
+#define PB0_HW_DEBUG__PB0_HW_23_DEBUG_MASK 0x00800000L
+#define PB0_HW_DEBUG__PB0_HW_23_DEBUG__SHIFT 0x00000017
+#define PB0_HW_DEBUG__PB0_HW_24_DEBUG_MASK 0x01000000L
+#define PB0_HW_DEBUG__PB0_HW_24_DEBUG__SHIFT 0x00000018
+#define PB0_HW_DEBUG__PB0_HW_25_DEBUG_MASK 0x02000000L
+#define PB0_HW_DEBUG__PB0_HW_25_DEBUG__SHIFT 0x00000019
+#define PB0_HW_DEBUG__PB0_HW_26_DEBUG_MASK 0x04000000L
+#define PB0_HW_DEBUG__PB0_HW_26_DEBUG__SHIFT 0x0000001a
+#define PB0_HW_DEBUG__PB0_HW_27_DEBUG_MASK 0x08000000L
+#define PB0_HW_DEBUG__PB0_HW_27_DEBUG__SHIFT 0x0000001b
+#define PB0_HW_DEBUG__PB0_HW_28_DEBUG_MASK 0x10000000L
+#define PB0_HW_DEBUG__PB0_HW_28_DEBUG__SHIFT 0x0000001c
+#define PB0_HW_DEBUG__PB0_HW_29_DEBUG_MASK 0x20000000L
+#define PB0_HW_DEBUG__PB0_HW_29_DEBUG__SHIFT 0x0000001d
+#define PB0_HW_DEBUG__PB0_HW_30_DEBUG_MASK 0x40000000L
+#define PB0_HW_DEBUG__PB0_HW_30_DEBUG__SHIFT 0x0000001e
+#define PB0_HW_DEBUG__PB0_HW_31_DEBUG_MASK 0x80000000L
+#define PB0_HW_DEBUG__PB0_HW_31_DEBUG__SHIFT 0x0000001f
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_EN_MASK 0x00000080L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_EN__SHIFT 0x00000007
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0_MASK 0x00000100L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0__SHIFT 0x00000008
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10_MASK 0x00040000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10__SHIFT 0x00000012
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11_MASK 0x00080000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11__SHIFT 0x00000013
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12_MASK 0x00100000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12__SHIFT 0x00000014
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13_MASK 0x00200000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13__SHIFT 0x00000015
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14_MASK 0x00400000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14__SHIFT 0x00000016
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15_MASK 0x00800000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15__SHIFT 0x00000017
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2_MASK 0x00000400L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2__SHIFT 0x0000000a
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3_MASK 0x00000800L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4_MASK 0x00001000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4__SHIFT 0x0000000c
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5_MASK 0x00002000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5__SHIFT 0x0000000d
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6_MASK 0x00004000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6__SHIFT 0x0000000e
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7_MASK 0x00008000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8_MASK 0x00010000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8__SHIFT 0x00000010
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9_MASK 0x00020000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9__SHIFT 0x00000011
+#define PB0_PIF_CNTL2__RXDETECT_SAMPL_TIME_MASK 0x00000006L
+#define PB0_PIF_CNTL2__RXDETECT_SAMPL_TIME__SHIFT 0x00000001
+#define PB0_PIF_CNTL2__RXPHYSTATUS_DELAY_MASK 0x07000000L
+#define PB0_PIF_CNTL2__RXPHYSTATUS_DELAY__SHIFT 0x00000018
+#define PB0_PIF_CNTL__DA_FIFO_RESET_0_MASK 0x00000002L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_0__SHIFT 0x00000001
+#define PB0_PIF_CNTL__DA_FIFO_RESET_1_MASK 0x00000020L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_1__SHIFT 0x00000005
+#define PB0_PIF_CNTL__DA_FIFO_RESET_2_MASK 0x00000200L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_2__SHIFT 0x00000009
+#define PB0_PIF_CNTL__DA_FIFO_RESET_3_MASK 0x00002000L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_3__SHIFT 0x0000000d
+#define PB0_PIF_CNTL__DIVINIT_MODE_MASK 0x00000100L
+#define PB0_PIF_CNTL__DIVINIT_MODE__SHIFT 0x00000008
+#define PB0_PIF_CNTL__EI_CYCLE_OFF_TIME_MASK 0x00700000L
+#define PB0_PIF_CNTL__EI_CYCLE_OFF_TIME__SHIFT 0x00000014
+#define PB0_PIF_CNTL__EI_DET_CYCLE_MODE_MASK 0x00000010L
+#define PB0_PIF_CNTL__EI_DET_CYCLE_MODE__SHIFT 0x00000004
+#define PB0_PIF_CNTL__EXIT_L0S_INIT_DIS_MASK 0x00800000L
+#define PB0_PIF_CNTL__EXIT_L0S_INIT_DIS__SHIFT 0x00000017
+#define PB0_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP_MASK 0x10000000L
+#define PB0_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP__SHIFT 0x0000001c
+#define PB0_PIF_CNTL__IGNORE_TxDataValid_EP_DIS_MASK 0x20000000L
+#define PB0_PIF_CNTL__IGNORE_TxDataValid_EP_DIS__SHIFT 0x0000001d
+#define PB0_PIF_CNTL__LS2_EXIT_TIME_MASK 0x000e0000L
+#define PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT 0x00000011
+#define PB0_PIF_CNTL__PHYCMD_CR_EN_MODE_MASK 0x00000008L
+#define PB0_PIF_CNTL__PHYCMD_CR_EN_MODE__SHIFT 0x00000003
+#define PB0_PIF_CNTL__PHY_CR_EN_MODE_MASK 0x00000004L
+#define PB0_PIF_CNTL__PHY_CR_EN_MODE__SHIFT 0x00000002
+#define PB0_PIF_CNTL__PLL_BINDING_ENABLE_MASK 0x00000400L
+#define PB0_PIF_CNTL__PLL_BINDING_ENABLE__SHIFT 0x0000000a
+#define PB0_PIF_CNTL__RXDETECT_FIFO_RESET_MODE_MASK 0x00000040L
+#define PB0_PIF_CNTL__RXDETECT_FIFO_RESET_MODE__SHIFT 0x00000006
+#define PB0_PIF_CNTL__RXDETECT_TX_PWR_MODE_MASK 0x00000080L
+#define PB0_PIF_CNTL__RXDETECT_TX_PWR_MODE__SHIFT 0x00000007
+#define PB0_PIF_CNTL__RXEN_GATER_MASK 0x0f000000L
+#define PB0_PIF_CNTL__RXEN_GATER__SHIFT 0x00000018
+#define PB0_PIF_CNTL__SC_CALIB_DONE_CNTL_MASK 0x00000800L
+#define PB0_PIF_CNTL__SC_CALIB_DONE_CNTL__SHIFT 0x0000000b
+#define PB0_PIF_CNTL__SERIAL_CFG_ENABLE_MASK 0x00000001L
+#define PB0_PIF_CNTL__SERIAL_CFG_ENABLE__SHIFT 0x00000000
+#define PB0_PIF_CNTL__TXGND_TIME_MASK 0x00010000L
+#define PB0_PIF_CNTL__TXGND_TIME__SHIFT 0x00000010
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_00_DEBUG_MASK 0x00000001L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_00_DEBUG__SHIFT 0x00000000
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_01_DEBUG_MASK 0x00000002L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_01_DEBUG__SHIFT 0x00000001
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_02_DEBUG_MASK 0x00000004L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_02_DEBUG__SHIFT 0x00000002
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_03_DEBUG_MASK 0x00000008L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_03_DEBUG__SHIFT 0x00000003
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_04_DEBUG_MASK 0x00000010L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_04_DEBUG__SHIFT 0x00000004
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_05_DEBUG_MASK 0x00000020L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_05_DEBUG__SHIFT 0x00000005
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_06_DEBUG_MASK 0x00000040L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_06_DEBUG__SHIFT 0x00000006
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_07_DEBUG_MASK 0x00000080L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_07_DEBUG__SHIFT 0x00000007
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_08_DEBUG_MASK 0x00000100L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_08_DEBUG__SHIFT 0x00000008
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_09_DEBUG_MASK 0x00000200L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_09_DEBUG__SHIFT 0x00000009
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_10_DEBUG_MASK 0x00000400L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_11_DEBUG_MASK 0x00000800L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_12_DEBUG_MASK 0x00001000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_13_DEBUG_MASK 0x00002000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_14_DEBUG_MASK 0x00004000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_15_DEBUG_MASK 0x00008000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB0_PIF_PAIRING__MULTI_PIF_MASK 0x02000000L
+#define PB0_PIF_PAIRING__MULTI_PIF__SHIFT 0x00000019
+#define PB0_PIF_PAIRING__X16_LANE_15_0_MASK 0x00100000L
+#define PB0_PIF_PAIRING__X16_LANE_15_0__SHIFT 0x00000014
+#define PB0_PIF_PAIRING__X2_LANE_1_0_MASK 0x00000001L
+#define PB0_PIF_PAIRING__X2_LANE_1_0__SHIFT 0x00000000
+#define PB0_PIF_PAIRING__X2_LANE_11_10_MASK 0x00000020L
+#define PB0_PIF_PAIRING__X2_LANE_11_10__SHIFT 0x00000005
+#define PB0_PIF_PAIRING__X2_LANE_13_12_MASK 0x00000040L
+#define PB0_PIF_PAIRING__X2_LANE_13_12__SHIFT 0x00000006
+#define PB0_PIF_PAIRING__X2_LANE_15_14_MASK 0x00000080L
+#define PB0_PIF_PAIRING__X2_LANE_15_14__SHIFT 0x00000007
+#define PB0_PIF_PAIRING__X2_LANE_3_2_MASK 0x00000002L
+#define PB0_PIF_PAIRING__X2_LANE_3_2__SHIFT 0x00000001
+#define PB0_PIF_PAIRING__X2_LANE_5_4_MASK 0x00000004L
+#define PB0_PIF_PAIRING__X2_LANE_5_4__SHIFT 0x00000002
+#define PB0_PIF_PAIRING__X2_LANE_7_6_MASK 0x00000008L
+#define PB0_PIF_PAIRING__X2_LANE_7_6__SHIFT 0x00000003
+#define PB0_PIF_PAIRING__X2_LANE_9_8_MASK 0x00000010L
+#define PB0_PIF_PAIRING__X2_LANE_9_8__SHIFT 0x00000004
+#define PB0_PIF_PAIRING__X4_LANE_11_8_MASK 0x00000400L
+#define PB0_PIF_PAIRING__X4_LANE_11_8__SHIFT 0x0000000a
+#define PB0_PIF_PAIRING__X4_LANE_15_12_MASK 0x00000800L
+#define PB0_PIF_PAIRING__X4_LANE_15_12__SHIFT 0x0000000b
+#define PB0_PIF_PAIRING__X4_LANE_3_0_MASK 0x00000100L
+#define PB0_PIF_PAIRING__X4_LANE_3_0__SHIFT 0x00000008
+#define PB0_PIF_PAIRING__X4_LANE_7_4_MASK 0x00000200L
+#define PB0_PIF_PAIRING__X4_LANE_7_4__SHIFT 0x00000009
+#define PB0_PIF_PAIRING__X8_LANE_15_8_MASK 0x00020000L
+#define PB0_PIF_PAIRING__X8_LANE_15_8__SHIFT 0x00000011
+#define PB0_PIF_PAIRING__X8_LANE_7_0_MASK 0x00010000L
+#define PB0_PIF_PAIRING__X8_LANE_7_0__SHIFT 0x00000010
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000b
+#define PB0_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0__SHIFT 0x00000000
+#define PB0_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1__SHIFT 0x00000000
+#define PB0_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2__SHIFT 0x00000000
+#define PB0_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3__SHIFT 0x00000000
+#define PB0_PIF_SC_CTL__SC_CALIBRATION_MASK 0x00000001L
+#define PB0_PIF_SC_CTL__SC_CALIBRATION__SHIFT 0x00000000
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0_MASK 0x00000020L
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0__SHIFT 0x00000005
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S_MASK 0x00000010L
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S__SHIFT 0x00000004
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0_MASK 0x00000008L
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0__SHIFT 0x00000003
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0S_MASK 0x00000004L
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0S__SHIFT 0x00000002
+#define PB0_PIF_SC_CTL__SC_LANE_0_RESUME_MASK 0x00010000L
+#define PB0_PIF_SC_CTL__SC_LANE_0_RESUME__SHIFT 0x00000010
+#define PB0_PIF_SC_CTL__SC_LANE_10_RESUME_MASK 0x04000000L
+#define PB0_PIF_SC_CTL__SC_LANE_10_RESUME__SHIFT 0x0000001a
+#define PB0_PIF_SC_CTL__SC_LANE_11_RESUME_MASK 0x08000000L
+#define PB0_PIF_SC_CTL__SC_LANE_11_RESUME__SHIFT 0x0000001b
+#define PB0_PIF_SC_CTL__SC_LANE_12_RESUME_MASK 0x10000000L
+#define PB0_PIF_SC_CTL__SC_LANE_12_RESUME__SHIFT 0x0000001c
+#define PB0_PIF_SC_CTL__SC_LANE_13_RESUME_MASK 0x20000000L
+#define PB0_PIF_SC_CTL__SC_LANE_13_RESUME__SHIFT 0x0000001d
+#define PB0_PIF_SC_CTL__SC_LANE_14_RESUME_MASK 0x40000000L
+#define PB0_PIF_SC_CTL__SC_LANE_14_RESUME__SHIFT 0x0000001e
+#define PB0_PIF_SC_CTL__SC_LANE_15_RESUME_MASK 0x80000000L
+#define PB0_PIF_SC_CTL__SC_LANE_15_RESUME__SHIFT 0x0000001f
+#define PB0_PIF_SC_CTL__SC_LANE_1_RESUME_MASK 0x00020000L
+#define PB0_PIF_SC_CTL__SC_LANE_1_RESUME__SHIFT 0x00000011
+#define PB0_PIF_SC_CTL__SC_LANE_2_RESUME_MASK 0x00040000L
+#define PB0_PIF_SC_CTL__SC_LANE_2_RESUME__SHIFT 0x00000012
+#define PB0_PIF_SC_CTL__SC_LANE_3_RESUME_MASK 0x00080000L
+#define PB0_PIF_SC_CTL__SC_LANE_3_RESUME__SHIFT 0x00000013
+#define PB0_PIF_SC_CTL__SC_LANE_4_RESUME_MASK 0x00100000L
+#define PB0_PIF_SC_CTL__SC_LANE_4_RESUME__SHIFT 0x00000014
+#define PB0_PIF_SC_CTL__SC_LANE_5_RESUME_MASK 0x00200000L
+#define PB0_PIF_SC_CTL__SC_LANE_5_RESUME__SHIFT 0x00000015
+#define PB0_PIF_SC_CTL__SC_LANE_6_RESUME_MASK 0x00400000L
+#define PB0_PIF_SC_CTL__SC_LANE_6_RESUME__SHIFT 0x00000016
+#define PB0_PIF_SC_CTL__SC_LANE_7_RESUME_MASK 0x00800000L
+#define PB0_PIF_SC_CTL__SC_LANE_7_RESUME__SHIFT 0x00000017
+#define PB0_PIF_SC_CTL__SC_LANE_8_RESUME_MASK 0x01000000L
+#define PB0_PIF_SC_CTL__SC_LANE_8_RESUME__SHIFT 0x00000018
+#define PB0_PIF_SC_CTL__SC_LANE_9_RESUME_MASK 0x02000000L
+#define PB0_PIF_SC_CTL__SC_LANE_9_RESUME__SHIFT 0x00000019
+#define PB0_PIF_SC_CTL__SC_PHASE_1_MASK 0x00000100L
+#define PB0_PIF_SC_CTL__SC_PHASE_1__SHIFT 0x00000008
+#define PB0_PIF_SC_CTL__SC_PHASE_2_MASK 0x00000200L
+#define PB0_PIF_SC_CTL__SC_PHASE_2__SHIFT 0x00000009
+#define PB0_PIF_SC_CTL__SC_PHASE_3_MASK 0x00000400L
+#define PB0_PIF_SC_CTL__SC_PHASE_3__SHIFT 0x0000000a
+#define PB0_PIF_SC_CTL__SC_PHASE_4_MASK 0x00000800L
+#define PB0_PIF_SC_CTL__SC_PHASE_4__SHIFT 0x0000000b
+#define PB0_PIF_SC_CTL__SC_PHASE_5_MASK 0x00001000L
+#define PB0_PIF_SC_CTL__SC_PHASE_5__SHIFT 0x0000000c
+#define PB0_PIF_SC_CTL__SC_PHASE_6_MASK 0x00002000L
+#define PB0_PIF_SC_CTL__SC_PHASE_6__SHIFT 0x0000000d
+#define PB0_PIF_SC_CTL__SC_PHASE_7_MASK 0x00004000L
+#define PB0_PIF_SC_CTL__SC_PHASE_7__SHIFT 0x0000000e
+#define PB0_PIF_SC_CTL__SC_PHASE_8_MASK 0x00008000L
+#define PB0_PIF_SC_CTL__SC_PHASE_8__SHIFT 0x0000000f
+#define PB0_PIF_SC_CTL__SC_RXDETECT_MASK 0x00000002L
+#define PB0_PIF_SC_CTL__SC_RXDETECT__SHIFT 0x00000001
+#define PB0_PIF_SC_CTL__SC_SPEED_CHANGE_MASK 0x00000040L
+#define PB0_PIF_SC_CTL__SC_SPEED_CHANGE__SHIFT 0x00000006
+#define PB0_PIF_SCRATCH__PIF_SCRATCH_MASK 0xffffffffL
+#define PB0_PIF_SCRATCH__PIF_SCRATCH__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_0__SEQ_PHASE_0_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_PHASE_0__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_10__SEQ_PHASE_10_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_PHASE_10__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_11__SEQ_PHASE_11_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_PHASE_11__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_12__SEQ_PHASE_12_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_PHASE_12__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_13__SEQ_PHASE_13_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_PHASE_13__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_14__SEQ_PHASE_14_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_PHASE_14__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_15__SEQ_PHASE_15_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_PHASE_15__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_1__SEQ_PHASE_1_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_PHASE_1__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_2__SEQ_PHASE_2_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_PHASE_2__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_3__SEQ_PHASE_3_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_PHASE_3__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_4__SEQ_PHASE_4_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_PHASE_4__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_5__SEQ_PHASE_5_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_PHASE_5__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_6__SEQ_PHASE_6_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_PHASE_6__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_7__SEQ_PHASE_7_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_PHASE_7__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_8__SEQ_PHASE_8_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_PHASE_8__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_9__SEQ_PHASE_9_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_PHASE_9__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9__SHIFT 0x00000006
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_0_MASK 0x00000001L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_0__SHIFT 0x00000000
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_10_MASK 0x00000400L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_10__SHIFT 0x0000000a
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_11_MASK 0x00000800L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_11__SHIFT 0x0000000b
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_12_MASK 0x00001000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_12__SHIFT 0x0000000c
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_13_MASK 0x00002000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_13__SHIFT 0x0000000d
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_14_MASK 0x00004000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_14__SHIFT 0x0000000e
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_15_MASK 0x00008000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_15__SHIFT 0x0000000f
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_1_MASK 0x00000002L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_1__SHIFT 0x00000001
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_2_MASK 0x00000004L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_2__SHIFT 0x00000002
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_3_MASK 0x00000008L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_3__SHIFT 0x00000003
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_4_MASK 0x00000010L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_4__SHIFT 0x00000004
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_5_MASK 0x00000020L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_5__SHIFT 0x00000005
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_6_MASK 0x00000040L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_6__SHIFT 0x00000006
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_7_MASK 0x00000080L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_7__SHIFT 0x00000007
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_8_MASK 0x00000100L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_8__SHIFT 0x00000008
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_9_MASK 0x00000200L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_9__SHIFT 0x00000009
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0_MASK 0x00000003L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB0_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0_MASK 0x00000010L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0__SHIFT 0x00000004
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0_MASK 0x00000008L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000003
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0_MASK 0x00000007L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0_MASK 0x00000080L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0__SHIFT 0x00000007
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000070L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000004
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0_MASK 0x00000200L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0__SHIFT 0x00000009
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0_MASK 0x00000100L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0__SHIFT 0x00000008
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0_MASK 0x00040000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0__SHIFT 0x00000012
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0_MASK 0x0003fc00L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0__SHIFT 0x0000000a
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0_MASK 0x10000000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0__SHIFT 0x0000001c
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0_MASK 0x0ff80000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0__SHIFT 0x00000013
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0_MASK 0x80000000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0__SHIFT 0x0000001f
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0_MASK 0x60000000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0__SHIFT 0x0000001d
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0_MASK 0x00000008L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000003
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0_MASK 0x00000007L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0_MASK 0x00040000L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0__SHIFT 0x00000012
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0_MASK 0x0003c000L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0__SHIFT 0x0000000e
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000020L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x00000005
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000010L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000004
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00000080L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x00000007
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000040L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x00000006
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0_MASK 0x00000200L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0__SHIFT 0x00000009
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0_MASK 0x00000100L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0__SHIFT 0x00000008
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0_MASK 0x00000003L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0_MASK 0x000007f0L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0__SHIFT 0x00000004
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB0_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0_MASK 0x00000800L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0__SHIFT 0x0000000b
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0_MASK 0x00000100L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000008
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0_MASK 0x000000ffL
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0_MASK 0x00001000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0__SHIFT 0x0000000c
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000e00L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000009
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0_MASK 0x00004000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0__SHIFT 0x0000000e
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0_MASK 0x00002000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0_MASK 0x10000000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0__SHIFT 0x0000001c
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0_MASK 0x0fff8000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0__SHIFT 0x0000000f
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0_MASK 0x80000000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0__SHIFT 0x0000001f
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0_MASK 0x40000000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0__SHIFT 0x0000001e
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0_MASK 0x00400000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000016
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0_MASK 0x00380000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000013
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0_MASK 0x00000020L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0__SHIFT 0x00000005
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0_MASK 0x0000001fL
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0_MASK 0x00000100L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0__SHIFT 0x00000008
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0_MASK 0x000000c0L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0__SHIFT 0x00000006
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000400L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x0000000a
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000200L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000009
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00001000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x0000000c
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000800L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x0000000b
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0_MASK 0x00004000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0__SHIFT 0x0000000e
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0_MASK 0x00002000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000200L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000009
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000400L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000a
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000800L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000b
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00100000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000014
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00200000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000015
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00001000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x0000000c
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00002000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000d
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00004000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000e
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00400000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000016
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00800000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000017
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2_MASK 0x00000100L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000008
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000002L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000001
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000004L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x00000002
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000008L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x00000003
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00010000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000010
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00020000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000011
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00000010L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x00000004
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00000020L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x00000005
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00000040L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x00000006
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00040000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000012
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00080000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000013
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2_MASK 0x00000080L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000007
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC_MASK 0x00000001L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1_MASK 0x000003ffL
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2_MASK 0x000ffc00L
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2__SHIFT 0x0000000a
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3_MASK 0x3ff00000L
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE_MASK 0xc0000000L
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN_MASK 0xc0000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1_MASK 0x0000000fL
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2_MASK 0x000000f0L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2__SHIFT 0x00000004
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3_MASK 0x00000f00L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3__SHIFT 0x00000008
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1_MASK 0x0000f000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2_MASK 0x000f0000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1_MASK 0x01000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2_MASK 0x02000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2__SHIFT 0x00000019
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3_MASK 0x04000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3__SHIFT 0x0000001a
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2_MASK 0x10000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3_MASK 0x20000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3__SHIFT 0x0000001d
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1_MASK 0x0000f000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2_MASK 0x000f0000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1_MASK 0x03000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2_MASK 0x0c000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2__SHIFT 0x0000001a
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3_MASK 0x30000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD_MASK 0xc0000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1_MASK 0x00000001L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2_MASK 0x00000002L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2__SHIFT 0x00000001
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3_MASK 0x00000004L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3__SHIFT 0x00000002
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2_MASK 0x0f000000L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3_MASK 0xf0000000L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1_MASK 0x00000007L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2_MASK 0x00000038L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2__SHIFT 0x00000003
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3_MASK 0x000001c0L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3__SHIFT 0x00000006
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2_MASK 0x0f000000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3_MASK 0xf0000000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1_MASK 0x00000e00L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1__SHIFT 0x00000009
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2_MASK 0x00007000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3_MASK 0x00038000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3__SHIFT 0x0000000f
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1_MASK 0x0000001fL
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2_MASK 0x000003e0L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2__SHIFT 0x00000005
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3_MASK 0x00007c00L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3__SHIFT 0x0000000a
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1_MASK 0x00008000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1__SHIFT 0x0000000f
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2_MASK 0x00010000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3_MASK 0x00020000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3__SHIFT 0x00000011
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1_MASK 0x00040000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1__SHIFT 0x00000012
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2_MASK 0x00080000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2__SHIFT 0x00000013
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3_MASK 0x00100000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2_MASK 0x10000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3_MASK 0x20000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3__SHIFT 0x0000001d
+#define PB0_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0_MASK 0x40000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1_MASK 0x0000000fL
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2_MASK 0x000000f0L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2__SHIFT 0x00000004
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3_MASK 0x00000f00L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3__SHIFT 0x00000008
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1_MASK 0x0000f000L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2_MASK 0x000f0000L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x01000000L
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2_MASK 0x04000000L
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001a
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1_MASK 0x001c0000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1__SHIFT 0x00000012
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2_MASK 0x00e00000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2__SHIFT 0x00000015
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3_MASK 0x07000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2_MASK 0x10000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3_MASK 0x20000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3__SHIFT 0x0000001d
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1_MASK 0x0000000fL
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2_MASK 0x000000f0L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2__SHIFT 0x00000004
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3_MASK 0x00000f00L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3__SHIFT 0x00000008
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x00001000L
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2_MASK 0x00002000L
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2__SHIFT 0x0000000d
+#define PB0_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2_MASK 0x00020000L
+#define PB0_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000011
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN_MASK 0x80000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN__SHIFT 0x0000001f
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL_MASK 0x40000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL__SHIFT 0x0000001e
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN_MASK 0x00000002L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN__SHIFT 0x00000001
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL_MASK 0x00000001L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL__SHIFT 0x00000000
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN_MASK 0x00000008L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN__SHIFT 0x00000003
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL_MASK 0x00000004L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL__SHIFT 0x00000002
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN_MASK 0x20000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN__SHIFT 0x0000001d
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL_MASK 0x10000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL__SHIFT 0x0000001c
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000100L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000008
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x000000c0L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000006
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN_MASK 0x00000400L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN__SHIFT 0x0000000a
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL_MASK 0x00000200L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL__SHIFT 0x00000009
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00001000L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000c
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000800L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x0000000b
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN_MASK 0x00004000L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN__SHIFT 0x0000000e
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL_MASK 0x00002000L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL__SHIFT 0x0000000d
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN_MASK 0x00010000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN__SHIFT 0x00000010
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL_MASK 0x00008000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL__SHIFT 0x0000000f
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN_MASK 0x00040000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN__SHIFT 0x00000012
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL_MASK 0x00020000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL__SHIFT 0x00000011
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN_MASK 0x00100000L
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN__SHIFT 0x00000014
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL_MASK 0x00080000L
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL__SHIFT 0x00000013
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN_MASK 0x00400000L
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN__SHIFT 0x00000016
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL_MASK 0x00200000L
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL__SHIFT 0x00000015
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN_MASK 0x01000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN__SHIFT 0x00000018
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL_MASK 0x00800000L
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL__SHIFT 0x00000017
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN_MASK 0x00000002L
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN__SHIFT 0x00000001
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL_MASK 0x00000001L
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL__SHIFT 0x00000000
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3_MASK 0x00010000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3__SHIFT 0x00000010
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15_MASK 0x00080000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15__SHIFT 0x00000013
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7_MASK 0x00020000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7__SHIFT 0x00000011
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11_MASK 0x00040000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11__SHIFT 0x00000012
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3_MASK 0x00100000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3__SHIFT 0x00000014
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15_MASK 0x00800000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15__SHIFT 0x00000017
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7_MASK 0x00200000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7__SHIFT 0x00000015
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11_MASK 0x00400000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11__SHIFT 0x00000016
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_RX_LANE0_CTRL_REG0__RX_BACKUP_0_MASK 0x000000ffL
+#define PB0_RX_LANE0_CTRL_REG0__RX_BACKUP_0__SHIFT 0x00000000
+#define PB0_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0_MASK 0x00002000L
+#define PB0_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0__SHIFT 0x0000000d
+#define PB0_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0_MASK 0x00000c00L
+#define PB0_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0__SHIFT 0x0000000a
+#define PB0_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0_MASK 0x00001000L
+#define PB0_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0__SHIFT 0x0000000c
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0_MASK 0x00000008L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0__SHIFT 0x00000003
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0_MASK 0x00000080L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0__SHIFT 0x00000007
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0_MASK 0x00000100L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0__SHIFT 0x00000008
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0_MASK 0x00000200L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0__SHIFT 0x00000009
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0_MASK 0x00000070L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0__SHIFT 0x00000004
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0_MASK 0x00000007L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0__SHIFT 0x00000000
+#define PB0_RX_LANE10_CTRL_REG0__RX_BACKUP_10_MASK 0x000000ffL
+#define PB0_RX_LANE10_CTRL_REG0__RX_BACKUP_10__SHIFT 0x00000000
+#define PB0_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10_MASK 0x00002000L
+#define PB0_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10__SHIFT 0x0000000d
+#define PB0_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10_MASK 0x00000c00L
+#define PB0_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10__SHIFT 0x0000000a
+#define PB0_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10_MASK 0x00001000L
+#define PB0_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10__SHIFT 0x0000000c
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10_MASK 0x00000008L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10__SHIFT 0x00000003
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10_MASK 0x00000080L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10__SHIFT 0x00000007
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10_MASK 0x00000100L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10__SHIFT 0x00000008
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10_MASK 0x00000200L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10__SHIFT 0x00000009
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10_MASK 0x00000070L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10__SHIFT 0x00000004
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10_MASK 0x00000007L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10__SHIFT 0x00000000
+#define PB0_RX_LANE11_CTRL_REG0__RX_BACKUP_11_MASK 0x000000ffL
+#define PB0_RX_LANE11_CTRL_REG0__RX_BACKUP_11__SHIFT 0x00000000
+#define PB0_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11_MASK 0x00002000L
+#define PB0_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11__SHIFT 0x0000000d
+#define PB0_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11_MASK 0x00000c00L
+#define PB0_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11__SHIFT 0x0000000a
+#define PB0_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11_MASK 0x00001000L
+#define PB0_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11__SHIFT 0x0000000c
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11_MASK 0x00000008L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11__SHIFT 0x00000003
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11_MASK 0x00000080L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11__SHIFT 0x00000007
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11_MASK 0x00000100L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11__SHIFT 0x00000008
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11_MASK 0x00000200L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11__SHIFT 0x00000009
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11_MASK 0x00000070L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11__SHIFT 0x00000004
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11_MASK 0x00000007L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11__SHIFT 0x00000000
+#define PB0_RX_LANE12_CTRL_REG0__RX_BACKUP_12_MASK 0x000000ffL
+#define PB0_RX_LANE12_CTRL_REG0__RX_BACKUP_12__SHIFT 0x00000000
+#define PB0_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12_MASK 0x00002000L
+#define PB0_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12__SHIFT 0x0000000d
+#define PB0_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12_MASK 0x00000c00L
+#define PB0_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12__SHIFT 0x0000000a
+#define PB0_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12_MASK 0x00001000L
+#define PB0_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12__SHIFT 0x0000000c
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12_MASK 0x00000008L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12__SHIFT 0x00000003
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12_MASK 0x00000080L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12__SHIFT 0x00000007
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12_MASK 0x00000100L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12__SHIFT 0x00000008
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12_MASK 0x00000200L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12__SHIFT 0x00000009
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12_MASK 0x00000070L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12__SHIFT 0x00000004
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12_MASK 0x00000007L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12__SHIFT 0x00000000
+#define PB0_RX_LANE13_CTRL_REG0__RX_BACKUP_13_MASK 0x000000ffL
+#define PB0_RX_LANE13_CTRL_REG0__RX_BACKUP_13__SHIFT 0x00000000
+#define PB0_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13_MASK 0x00002000L
+#define PB0_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13__SHIFT 0x0000000d
+#define PB0_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13_MASK 0x00000c00L
+#define PB0_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13__SHIFT 0x0000000a
+#define PB0_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13_MASK 0x00001000L
+#define PB0_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13__SHIFT 0x0000000c
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13_MASK 0x00000008L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13__SHIFT 0x00000003
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13_MASK 0x00000080L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13__SHIFT 0x00000007
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13_MASK 0x00000100L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13__SHIFT 0x00000008
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13_MASK 0x00000200L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13__SHIFT 0x00000009
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13_MASK 0x00000070L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13__SHIFT 0x00000004
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13_MASK 0x00000007L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13__SHIFT 0x00000000
+#define PB0_RX_LANE14_CTRL_REG0__RX_BACKUP_14_MASK 0x000000ffL
+#define PB0_RX_LANE14_CTRL_REG0__RX_BACKUP_14__SHIFT 0x00000000
+#define PB0_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14_MASK 0x00002000L
+#define PB0_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14__SHIFT 0x0000000d
+#define PB0_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14_MASK 0x00000c00L
+#define PB0_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14__SHIFT 0x0000000a
+#define PB0_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14_MASK 0x00001000L
+#define PB0_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14__SHIFT 0x0000000c
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14_MASK 0x00000008L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14__SHIFT 0x00000003
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14_MASK 0x00000080L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14__SHIFT 0x00000007
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14_MASK 0x00000100L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14__SHIFT 0x00000008
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14_MASK 0x00000200L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14__SHIFT 0x00000009
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14_MASK 0x00000070L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14__SHIFT 0x00000004
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14_MASK 0x00000007L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14__SHIFT 0x00000000
+#define PB0_RX_LANE15_CTRL_REG0__RX_BACKUP_15_MASK 0x000000ffL
+#define PB0_RX_LANE15_CTRL_REG0__RX_BACKUP_15__SHIFT 0x00000000
+#define PB0_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15_MASK 0x00002000L
+#define PB0_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15__SHIFT 0x0000000d
+#define PB0_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15_MASK 0x00000c00L
+#define PB0_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15__SHIFT 0x0000000a
+#define PB0_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15_MASK 0x00001000L
+#define PB0_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15__SHIFT 0x0000000c
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15_MASK 0x00000008L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15__SHIFT 0x00000003
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15_MASK 0x00000080L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15__SHIFT 0x00000007
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15_MASK 0x00000100L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15__SHIFT 0x00000008
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15_MASK 0x00000200L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15__SHIFT 0x00000009
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15_MASK 0x00000070L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15__SHIFT 0x00000004
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15_MASK 0x00000007L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15__SHIFT 0x00000000
+#define PB0_RX_LANE1_CTRL_REG0__RX_BACKUP_1_MASK 0x000000ffL
+#define PB0_RX_LANE1_CTRL_REG0__RX_BACKUP_1__SHIFT 0x00000000
+#define PB0_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1_MASK 0x00002000L
+#define PB0_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1__SHIFT 0x0000000d
+#define PB0_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1_MASK 0x00000c00L
+#define PB0_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1__SHIFT 0x0000000a
+#define PB0_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1_MASK 0x00001000L
+#define PB0_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1__SHIFT 0x0000000c
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1_MASK 0x00000008L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1__SHIFT 0x00000003
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1_MASK 0x00000080L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1__SHIFT 0x00000007
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1_MASK 0x00000100L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1__SHIFT 0x00000008
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1_MASK 0x00000200L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1__SHIFT 0x00000009
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1_MASK 0x00000070L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1__SHIFT 0x00000004
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1_MASK 0x00000007L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1__SHIFT 0x00000000
+#define PB0_RX_LANE2_CTRL_REG0__RX_BACKUP_2_MASK 0x000000ffL
+#define PB0_RX_LANE2_CTRL_REG0__RX_BACKUP_2__SHIFT 0x00000000
+#define PB0_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2_MASK 0x00002000L
+#define PB0_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2__SHIFT 0x0000000d
+#define PB0_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2_MASK 0x00000c00L
+#define PB0_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2__SHIFT 0x0000000a
+#define PB0_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2_MASK 0x00001000L
+#define PB0_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2__SHIFT 0x0000000c
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2_MASK 0x00000008L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2__SHIFT 0x00000003
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2_MASK 0x00000080L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2__SHIFT 0x00000007
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2_MASK 0x00000100L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2__SHIFT 0x00000008
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2_MASK 0x00000200L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2__SHIFT 0x00000009
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2_MASK 0x00000070L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2__SHIFT 0x00000004
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2_MASK 0x00000007L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2__SHIFT 0x00000000
+#define PB0_RX_LANE3_CTRL_REG0__RX_BACKUP_3_MASK 0x000000ffL
+#define PB0_RX_LANE3_CTRL_REG0__RX_BACKUP_3__SHIFT 0x00000000
+#define PB0_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3_MASK 0x00002000L
+#define PB0_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3__SHIFT 0x0000000d
+#define PB0_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3_MASK 0x00000c00L
+#define PB0_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3__SHIFT 0x0000000a
+#define PB0_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3_MASK 0x00001000L
+#define PB0_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3__SHIFT 0x0000000c
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3_MASK 0x00000008L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3__SHIFT 0x00000003
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3_MASK 0x00000080L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3__SHIFT 0x00000007
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3_MASK 0x00000100L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3__SHIFT 0x00000008
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3_MASK 0x00000200L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3__SHIFT 0x00000009
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3_MASK 0x00000070L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3__SHIFT 0x00000004
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3_MASK 0x00000007L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3__SHIFT 0x00000000
+#define PB0_RX_LANE4_CTRL_REG0__RX_BACKUP_4_MASK 0x000000ffL
+#define PB0_RX_LANE4_CTRL_REG0__RX_BACKUP_4__SHIFT 0x00000000
+#define PB0_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4_MASK 0x00002000L
+#define PB0_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4__SHIFT 0x0000000d
+#define PB0_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4_MASK 0x00000c00L
+#define PB0_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4__SHIFT 0x0000000a
+#define PB0_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4_MASK 0x00001000L
+#define PB0_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4__SHIFT 0x0000000c
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4_MASK 0x00000008L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4__SHIFT 0x00000003
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4_MASK 0x00000080L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4__SHIFT 0x00000007
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4_MASK 0x00000100L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4__SHIFT 0x00000008
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4_MASK 0x00000200L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4__SHIFT 0x00000009
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4_MASK 0x00000070L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4__SHIFT 0x00000004
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4_MASK 0x00000007L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4__SHIFT 0x00000000
+#define PB0_RX_LANE5_CTRL_REG0__RX_BACKUP_5_MASK 0x000000ffL
+#define PB0_RX_LANE5_CTRL_REG0__RX_BACKUP_5__SHIFT 0x00000000
+#define PB0_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5_MASK 0x00002000L
+#define PB0_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5__SHIFT 0x0000000d
+#define PB0_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5_MASK 0x00000c00L
+#define PB0_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5__SHIFT 0x0000000a
+#define PB0_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5_MASK 0x00001000L
+#define PB0_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5__SHIFT 0x0000000c
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5_MASK 0x00000008L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5__SHIFT 0x00000003
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5_MASK 0x00000080L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5__SHIFT 0x00000007
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5_MASK 0x00000100L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5__SHIFT 0x00000008
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5_MASK 0x00000200L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5__SHIFT 0x00000009
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5_MASK 0x00000070L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5__SHIFT 0x00000004
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5_MASK 0x00000007L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5__SHIFT 0x00000000
+#define PB0_RX_LANE6_CTRL_REG0__RX_BACKUP_6_MASK 0x000000ffL
+#define PB0_RX_LANE6_CTRL_REG0__RX_BACKUP_6__SHIFT 0x00000000
+#define PB0_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6_MASK 0x00002000L
+#define PB0_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6__SHIFT 0x0000000d
+#define PB0_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6_MASK 0x00000c00L
+#define PB0_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6__SHIFT 0x0000000a
+#define PB0_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6_MASK 0x00001000L
+#define PB0_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6__SHIFT 0x0000000c
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6_MASK 0x00000008L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6__SHIFT 0x00000003
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6_MASK 0x00000080L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6__SHIFT 0x00000007
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6_MASK 0x00000100L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6__SHIFT 0x00000008
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6_MASK 0x00000200L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6__SHIFT 0x00000009
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6_MASK 0x00000070L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6__SHIFT 0x00000004
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6_MASK 0x00000007L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6__SHIFT 0x00000000
+#define PB0_RX_LANE7_CTRL_REG0__RX_BACKUP_7_MASK 0x000000ffL
+#define PB0_RX_LANE7_CTRL_REG0__RX_BACKUP_7__SHIFT 0x00000000
+#define PB0_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7_MASK 0x00002000L
+#define PB0_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7__SHIFT 0x0000000d
+#define PB0_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7_MASK 0x00000c00L
+#define PB0_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7__SHIFT 0x0000000a
+#define PB0_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7_MASK 0x00001000L
+#define PB0_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7__SHIFT 0x0000000c
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7_MASK 0x00000008L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7__SHIFT 0x00000003
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7_MASK 0x00000080L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7__SHIFT 0x00000007
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7_MASK 0x00000100L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7__SHIFT 0x00000008
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7_MASK 0x00000200L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7__SHIFT 0x00000009
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7_MASK 0x00000070L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7__SHIFT 0x00000004
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7_MASK 0x00000007L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7__SHIFT 0x00000000
+#define PB0_RX_LANE8_CTRL_REG0__RX_BACKUP_8_MASK 0x000000ffL
+#define PB0_RX_LANE8_CTRL_REG0__RX_BACKUP_8__SHIFT 0x00000000
+#define PB0_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8_MASK 0x00002000L
+#define PB0_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8__SHIFT 0x0000000d
+#define PB0_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8_MASK 0x00000c00L
+#define PB0_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8__SHIFT 0x0000000a
+#define PB0_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8_MASK 0x00001000L
+#define PB0_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8__SHIFT 0x0000000c
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8_MASK 0x00000008L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8__SHIFT 0x00000003
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8_MASK 0x00000080L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8__SHIFT 0x00000007
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8_MASK 0x00000100L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8__SHIFT 0x00000008
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8_MASK 0x00000200L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8__SHIFT 0x00000009
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8_MASK 0x00000070L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8__SHIFT 0x00000004
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8_MASK 0x00000007L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8__SHIFT 0x00000000
+#define PB0_RX_LANE9_CTRL_REG0__RX_BACKUP_9_MASK 0x000000ffL
+#define PB0_RX_LANE9_CTRL_REG0__RX_BACKUP_9__SHIFT 0x00000000
+#define PB0_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9_MASK 0x00002000L
+#define PB0_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9__SHIFT 0x0000000d
+#define PB0_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9_MASK 0x00000c00L
+#define PB0_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9__SHIFT 0x0000000a
+#define PB0_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9_MASK 0x00001000L
+#define PB0_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9__SHIFT 0x0000000c
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9_MASK 0x00000008L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9__SHIFT 0x00000003
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9_MASK 0x00000080L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9__SHIFT 0x00000007
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9_MASK 0x00000100L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9__SHIFT 0x00000008
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9_MASK 0x00000200L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9__SHIFT 0x00000009
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9_MASK 0x00000070L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9__SHIFT 0x00000004
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9_MASK 0x00000007L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9__SHIFT 0x00000000
+#define PB0_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL_MASK 0x00008000L
+#define PB0_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14_MASK 0x00004000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14__SHIFT 0x0000000e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15_MASK 0x00008000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16_MASK 0x00010000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16__SHIFT 0x00000010
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17_MASK 0x00020000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17__SHIFT 0x00000011
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18_MASK 0x00040000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18__SHIFT 0x00000012
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19_MASK 0x00080000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19__SHIFT 0x00000013
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20_MASK 0x00100000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20__SHIFT 0x00000014
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21_MASK 0x00200000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21__SHIFT 0x00000015
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22_MASK 0x00400000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22__SHIFT 0x00000016
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23_MASK 0x00800000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23__SHIFT 0x00000017
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24_MASK 0x01000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24__SHIFT 0x00000018
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25_MASK 0x02000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25__SHIFT 0x00000019
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26_MASK 0x04000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26__SHIFT 0x0000001a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27_MASK 0x08000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27__SHIFT 0x0000001b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28_MASK 0x10000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28__SHIFT 0x0000001c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29_MASK 0x20000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29__SHIFT 0x0000001d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30_MASK 0x40000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30__SHIFT 0x0000001e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31_MASK 0x80000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31__SHIFT 0x0000001f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3__SHIFT 0x00000003
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35__SHIFT 0x00000003
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46_MASK 0x00004000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46__SHIFT 0x0000000e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47_MASK 0x00008000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48_MASK 0x00010000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48__SHIFT 0x00000010
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49_MASK 0x00020000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49__SHIFT 0x00000011
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50_MASK 0x00040000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50__SHIFT 0x00000012
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51_MASK 0x00080000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51__SHIFT 0x00000013
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52_MASK 0x00100000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52__SHIFT 0x00000014
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53_MASK 0x00200000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53__SHIFT 0x00000015
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54_MASK 0x00400000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54__SHIFT 0x00000016
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55_MASK 0x00800000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55__SHIFT 0x00000017
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56_MASK 0x01000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56__SHIFT 0x00000018
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57_MASK 0x02000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57__SHIFT 0x00000019
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58_MASK 0x04000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58__SHIFT 0x0000001a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59_MASK 0x08000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59__SHIFT 0x0000001b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60_MASK 0x10000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60__SHIFT 0x0000001c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61_MASK 0x20000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61__SHIFT 0x0000001d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62_MASK 0x40000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62__SHIFT 0x0000001e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63_MASK 0x80000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63__SHIFT 0x0000001f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67__SHIFT 0x00000003
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78_MASK 0x00004000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78__SHIFT 0x0000000e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79_MASK 0x00008000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80_MASK 0x00010000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80__SHIFT 0x00000010
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81_MASK 0x00020000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81__SHIFT 0x00000011
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82_MASK 0x00040000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82__SHIFT 0x00000012
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83_MASK 0x00080000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83__SHIFT 0x00000013
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84_MASK 0x00100000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84__SHIFT 0x00000014
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85_MASK 0x00200000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85__SHIFT 0x00000015
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86_MASK 0x00400000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86__SHIFT 0x00000016
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87_MASK 0x00800000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87__SHIFT 0x00000017
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88_MASK 0x01000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88__SHIFT 0x00000018
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89_MASK 0x02000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89__SHIFT 0x00000019
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90_MASK 0x04000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90__SHIFT 0x0000001a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91_MASK 0x08000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91__SHIFT 0x0000001b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92_MASK 0x10000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92__SHIFT 0x0000001c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93_MASK 0x20000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93__SHIFT 0x0000001d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94_MASK 0x40000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94__SHIFT 0x0000001e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95_MASK 0x80000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95__SHIFT 0x0000001f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99__SHIFT 0x00000003
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1_MASK 0x00000700L
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1__SHIFT 0x00000008
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2_MASK 0x00003800L
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2__SHIFT 0x0000000b
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3_MASK 0x0001c000L
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3__SHIFT 0x0000000e
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER_MASK 0x00400000L
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER__SHIFT 0x00000016
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN_MASK 0x00200000L
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN__SHIFT 0x00000015
+#define PB0_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING_MASK 0x00080000L
+#define PB0_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING__SHIFT 0x00000013
+#define PB0_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON_MASK 0x00800000L
+#define PB0_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON__SHIFT 0x00000017
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL_MASK 0x00000007L
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL_MASK 0x00000038L
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL__SHIFT 0x00000003
+#define PB0_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF_MASK 0x01000000L
+#define PB0_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF__SHIFT 0x00000018
+#define PB0_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS_MASK 0x00100000L
+#define PB0_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS__SHIFT 0x00000014
+#define PB0_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL_MASK 0x00060000L
+#define PB0_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL__SHIFT 0x00000011
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15_MASK 0x40000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15__SHIFT 0x0000001e
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0_MASK 0x00000001L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0__SHIFT 0x00000000
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10_MASK 0x00000400L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10__SHIFT 0x0000000a
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11_MASK 0x00000800L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11__SHIFT 0x0000000b
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12_MASK 0x00001000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12__SHIFT 0x0000000c
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13_MASK 0x00002000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13__SHIFT 0x0000000d
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14_MASK 0x00004000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14__SHIFT 0x0000000e
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15_MASK 0x00008000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15__SHIFT 0x0000000f
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1_MASK 0x00000002L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1__SHIFT 0x00000001
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2_MASK 0x00000004L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2__SHIFT 0x00000002
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3_MASK 0x00000008L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3__SHIFT 0x00000003
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4_MASK 0x00000010L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4__SHIFT 0x00000004
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5_MASK 0x00000020L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5__SHIFT 0x00000005
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6_MASK 0x00000040L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6__SHIFT 0x00000006
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7_MASK 0x00000080L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7__SHIFT 0x00000007
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8_MASK 0x00000100L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8__SHIFT 0x00000008
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9_MASK 0x00000200L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9__SHIFT 0x00000009
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1_MASK 0x00010000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1__SHIFT 0x00000010
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11_MASK 0x00200000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11__SHIFT 0x00000015
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13_MASK 0x00400000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13__SHIFT 0x00000016
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15_MASK 0x00800000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15__SHIFT 0x00000017
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3_MASK 0x00020000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3__SHIFT 0x00000011
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5_MASK 0x00040000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5__SHIFT 0x00000012
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7_MASK 0x00080000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7__SHIFT 0x00000013
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9_MASK 0x00100000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9__SHIFT 0x00000014
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3_MASK 0x01000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3__SHIFT 0x00000018
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15_MASK 0x08000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15__SHIFT 0x0000001b
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7_MASK 0x02000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7__SHIFT 0x00000019
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11_MASK 0x04000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11__SHIFT 0x0000001a
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7_MASK 0x10000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7__SHIFT 0x0000001c
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15_MASK 0x20000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15__SHIFT 0x0000001d
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000008L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000003
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x00000007L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL_MASK 0x000000f0L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00001e00L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN_MASK 0x00002000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN__SHIFT 0x0000000d
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL_MASK 0x0007c000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL__SHIFT 0x0000000e
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN_MASK 0x00080000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN__SHIFT 0x00000013
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL_MASK 0x01f00000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000014
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN_MASK 0x02000000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN__SHIFT 0x00000019
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL_MASK 0x3c000000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL__SHIFT 0x0000001a
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN_MASK 0x40000000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN__SHIFT 0x0000001e
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL_MASK 0x0000000fL
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN_MASK 0x00000010L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL_MASK 0x00000020L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL__SHIFT 0x00000005
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN_MASK 0x00000040L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN__SHIFT 0x00000006
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00000080L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000007
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00000400L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000a
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000200L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN_MASK 0x00001000L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN__SHIFT 0x0000000c
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL_MASK 0x00000800L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL__SHIFT 0x0000000b
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN_MASK 0x00004000L
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN__SHIFT 0x0000000e
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL_MASK 0x00002000L
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL__SHIFT 0x0000000d
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN_MASK 0x02000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN__SHIFT 0x00000019
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL_MASK 0x01ff8000L
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL__SHIFT 0x0000000f
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN_MASK 0x08000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN__SHIFT 0x0000001b
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL_MASK 0x04000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL__SHIFT 0x0000001a
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN_MASK 0x20000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN__SHIFT 0x0000001d
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL_MASK 0x10000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL__SHIFT 0x0000001c
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN_MASK 0x80000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN__SHIFT 0x0000001f
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL_MASK 0x40000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL__SHIFT 0x0000001e
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL_MASK 0x0000f000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL__SHIFT 0x0000000c
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000f0000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000010
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL_MASK 0x01f00000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL__SHIFT 0x00000014
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL_MASK 0x3e000000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000019
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN_MASK 0x00000800L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN__SHIFT 0x0000000b
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL_MASK 0x00000400L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL__SHIFT 0x0000000a
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN_MASK 0x00000008L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN__SHIFT 0x00000003
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL_MASK 0x00000004L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL__SHIFT 0x00000002
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN_MASK 0x00000020L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN__SHIFT 0x00000005
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL_MASK 0x00000010L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN_MASK 0x00000080L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN__SHIFT 0x00000007
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL_MASK 0x00000040L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL__SHIFT 0x00000006
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN_MASK 0x00000200L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN_MASK 0x00000002L
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN__SHIFT 0x00000001
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL_MASK 0x00000001L
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL_MASK 0x00003c00L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL__SHIFT 0x0000000a
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0003c000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x0000000e
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL_MASK 0x007c0000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL__SHIFT 0x00000012
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0f800000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000017
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL_MASK 0x0000000fL
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL_MASK 0xf0000000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL__SHIFT 0x0000001c
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000000f0L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL_MASK 0x00000200L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0000000fL
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL_MASK 0x00000010L
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL_MASK 0x00000020L
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000005
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0_MASK 0x00000001L
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0__SHIFT 0x00000000
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0_MASK 0x00000002L
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0__SHIFT 0x00000001
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0_MASK 0x00000004L
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0__SHIFT 0x00000002
+#define PB0_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0_MASK 0x00000008L
+#define PB0_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0__SHIFT 0x00000003
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0_MASK 0x00000002L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0__SHIFT 0x00000001
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0_MASK 0x00000001L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0_MASK 0x00000008L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0__SHIFT 0x00000003
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0_MASK 0x00000004L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0__SHIFT 0x00000002
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0_MASK 0x00000020L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0__SHIFT 0x00000005
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0_MASK 0x00000010L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0__SHIFT 0x00000004
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0_MASK 0x00000080L
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0__SHIFT 0x00000007
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0_MASK 0x00000040L
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0__SHIFT 0x00000006
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0_MASK 0x0000fc00L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0__SHIFT 0x0000000a
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0_MASK 0x00000300L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0__SHIFT 0x00000008
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0_MASK 0x00000080L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0__SHIFT 0x00000007
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0_MASK 0x00000008L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0__SHIFT 0x00000003
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0_MASK 0x00000070L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0__SHIFT 0x00000004
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0_MASK 0x00000007L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0__SHIFT 0x00000000
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10_MASK 0x00000001L
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10__SHIFT 0x00000000
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10_MASK 0x00000002L
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10__SHIFT 0x00000001
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10_MASK 0x00000004L
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10__SHIFT 0x00000002
+#define PB0_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10_MASK 0x00000008L
+#define PB0_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10__SHIFT 0x00000003
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10_MASK 0x00000002L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10__SHIFT 0x00000001
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10_MASK 0x00000001L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10__SHIFT 0x00000000
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10_MASK 0x00000008L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10__SHIFT 0x00000003
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10_MASK 0x00000004L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10__SHIFT 0x00000002
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10_MASK 0x00000020L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10__SHIFT 0x00000005
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10_MASK 0x00000010L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10__SHIFT 0x00000004
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10_MASK 0x00000080L
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10__SHIFT 0x00000007
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10_MASK 0x00000040L
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10__SHIFT 0x00000006
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10_MASK 0x0000fc00L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10__SHIFT 0x0000000a
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10_MASK 0x00000300L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10__SHIFT 0x00000008
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10_MASK 0x00000080L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10__SHIFT 0x00000007
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10_MASK 0x00000008L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10__SHIFT 0x00000003
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10_MASK 0x00000070L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10__SHIFT 0x00000004
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10_MASK 0x00000007L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10__SHIFT 0x00000000
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11_MASK 0x00000001L
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11__SHIFT 0x00000000
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11_MASK 0x00000002L
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11__SHIFT 0x00000001
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11_MASK 0x00000004L
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11__SHIFT 0x00000002
+#define PB0_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11_MASK 0x00000008L
+#define PB0_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11__SHIFT 0x00000003
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11_MASK 0x00000002L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11__SHIFT 0x00000001
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11_MASK 0x00000001L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11__SHIFT 0x00000000
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11_MASK 0x00000008L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11__SHIFT 0x00000003
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11_MASK 0x00000004L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11__SHIFT 0x00000002
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11_MASK 0x00000020L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11__SHIFT 0x00000005
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11_MASK 0x00000010L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11__SHIFT 0x00000004
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11_MASK 0x00000080L
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11__SHIFT 0x00000007
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11_MASK 0x00000040L
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11__SHIFT 0x00000006
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11_MASK 0x0000fc00L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11__SHIFT 0x0000000a
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11_MASK 0x00000300L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11__SHIFT 0x00000008
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11_MASK 0x00000080L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11__SHIFT 0x00000007
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11_MASK 0x00000008L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11__SHIFT 0x00000003
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11_MASK 0x00000070L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11__SHIFT 0x00000004
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11_MASK 0x00000007L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11__SHIFT 0x00000000
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12_MASK 0x00000001L
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12__SHIFT 0x00000000
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12_MASK 0x00000002L
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12__SHIFT 0x00000001
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12_MASK 0x00000004L
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12__SHIFT 0x00000002
+#define PB0_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12_MASK 0x00000008L
+#define PB0_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12__SHIFT 0x00000003
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12_MASK 0x00000002L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12__SHIFT 0x00000001
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12_MASK 0x00000001L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12__SHIFT 0x00000000
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12_MASK 0x00000008L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12__SHIFT 0x00000003
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12_MASK 0x00000004L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12__SHIFT 0x00000002
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12_MASK 0x00000020L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12__SHIFT 0x00000005
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12_MASK 0x00000010L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12__SHIFT 0x00000004
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12_MASK 0x00000080L
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12__SHIFT 0x00000007
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12_MASK 0x00000040L
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12__SHIFT 0x00000006
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12_MASK 0x0000fc00L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12__SHIFT 0x0000000a
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12_MASK 0x00000300L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12__SHIFT 0x00000008
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12_MASK 0x00000080L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12__SHIFT 0x00000007
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12_MASK 0x00000008L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12__SHIFT 0x00000003
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12_MASK 0x00000070L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12__SHIFT 0x00000004
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12_MASK 0x00000007L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12__SHIFT 0x00000000
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13_MASK 0x00000001L
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13__SHIFT 0x00000000
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13_MASK 0x00000002L
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13__SHIFT 0x00000001
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13_MASK 0x00000004L
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13__SHIFT 0x00000002
+#define PB0_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13_MASK 0x00000008L
+#define PB0_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13__SHIFT 0x00000003
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13_MASK 0x00000002L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13__SHIFT 0x00000001
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13_MASK 0x00000001L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13__SHIFT 0x00000000
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13_MASK 0x00000008L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13__SHIFT 0x00000003
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13_MASK 0x00000004L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13__SHIFT 0x00000002
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13_MASK 0x00000020L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13__SHIFT 0x00000005
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13_MASK 0x00000010L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13__SHIFT 0x00000004
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13_MASK 0x00000080L
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13__SHIFT 0x00000007
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13_MASK 0x00000040L
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13__SHIFT 0x00000006
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13_MASK 0x0000fc00L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13__SHIFT 0x0000000a
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13_MASK 0x00000300L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13__SHIFT 0x00000008
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13_MASK 0x00000080L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13__SHIFT 0x00000007
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13_MASK 0x00000008L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13__SHIFT 0x00000003
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13_MASK 0x00000070L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13__SHIFT 0x00000004
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13_MASK 0x00000007L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13__SHIFT 0x00000000
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14_MASK 0x00000001L
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14__SHIFT 0x00000000
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14_MASK 0x00000002L
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14__SHIFT 0x00000001
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14_MASK 0x00000004L
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14__SHIFT 0x00000002
+#define PB0_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14_MASK 0x00000008L
+#define PB0_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14__SHIFT 0x00000003
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14_MASK 0x00000002L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14__SHIFT 0x00000001
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14_MASK 0x00000001L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14__SHIFT 0x00000000
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14_MASK 0x00000008L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14__SHIFT 0x00000003
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14_MASK 0x00000004L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14__SHIFT 0x00000002
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14_MASK 0x00000020L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14__SHIFT 0x00000005
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14_MASK 0x00000010L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14__SHIFT 0x00000004
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14_MASK 0x00000080L
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14__SHIFT 0x00000007
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14_MASK 0x00000040L
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14__SHIFT 0x00000006
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14_MASK 0x0000fc00L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14__SHIFT 0x0000000a
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14_MASK 0x00000300L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14__SHIFT 0x00000008
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14_MASK 0x00000080L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14__SHIFT 0x00000007
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14_MASK 0x00000008L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14__SHIFT 0x00000003
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14_MASK 0x00000070L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14__SHIFT 0x00000004
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14_MASK 0x00000007L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14__SHIFT 0x00000000
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15_MASK 0x00000001L
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15__SHIFT 0x00000000
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15_MASK 0x00000002L
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15__SHIFT 0x00000001
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15_MASK 0x00000004L
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15__SHIFT 0x00000002
+#define PB0_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15_MASK 0x00000008L
+#define PB0_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15__SHIFT 0x00000003
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15_MASK 0x00000002L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15__SHIFT 0x00000001
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15_MASK 0x00000001L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15__SHIFT 0x00000000
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15_MASK 0x00000008L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15__SHIFT 0x00000003
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15_MASK 0x00000004L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15__SHIFT 0x00000002
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15_MASK 0x00000020L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15__SHIFT 0x00000005
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15_MASK 0x00000010L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15__SHIFT 0x00000004
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15_MASK 0x00000080L
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15__SHIFT 0x00000007
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15_MASK 0x00000040L
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15__SHIFT 0x00000006
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15_MASK 0x0000fc00L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15__SHIFT 0x0000000a
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15_MASK 0x00000300L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15__SHIFT 0x00000008
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15_MASK 0x00000080L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15__SHIFT 0x00000007
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15_MASK 0x00000008L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15__SHIFT 0x00000003
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15_MASK 0x00000070L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15__SHIFT 0x00000004
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15_MASK 0x00000007L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15__SHIFT 0x00000000
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1_MASK 0x00000001L
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1__SHIFT 0x00000000
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1_MASK 0x00000002L
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1__SHIFT 0x00000001
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1_MASK 0x00000004L
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1__SHIFT 0x00000002
+#define PB0_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1_MASK 0x00000008L
+#define PB0_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1__SHIFT 0x00000003
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1_MASK 0x00000002L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1__SHIFT 0x00000001
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1_MASK 0x00000001L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1__SHIFT 0x00000000
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1_MASK 0x00000008L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1__SHIFT 0x00000003
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1_MASK 0x00000004L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1__SHIFT 0x00000002
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1_MASK 0x00000020L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1__SHIFT 0x00000005
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1_MASK 0x00000010L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1__SHIFT 0x00000004
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1_MASK 0x00000080L
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1__SHIFT 0x00000007
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1_MASK 0x00000040L
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1__SHIFT 0x00000006
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1_MASK 0x0000fc00L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1__SHIFT 0x0000000a
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1_MASK 0x00000300L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1__SHIFT 0x00000008
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1_MASK 0x00000080L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1__SHIFT 0x00000007
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1_MASK 0x00000008L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1__SHIFT 0x00000003
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1_MASK 0x00000070L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1__SHIFT 0x00000004
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1_MASK 0x00000007L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1__SHIFT 0x00000000
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2_MASK 0x00000001L
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2__SHIFT 0x00000000
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2_MASK 0x00000002L
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2__SHIFT 0x00000001
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2_MASK 0x00000004L
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2__SHIFT 0x00000002
+#define PB0_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2_MASK 0x00000008L
+#define PB0_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2__SHIFT 0x00000003
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2_MASK 0x00000002L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2__SHIFT 0x00000001
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2_MASK 0x00000001L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2__SHIFT 0x00000000
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2_MASK 0x00000008L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2__SHIFT 0x00000003
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2_MASK 0x00000004L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2__SHIFT 0x00000002
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2_MASK 0x00000020L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2__SHIFT 0x00000005
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2_MASK 0x00000010L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2__SHIFT 0x00000004
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2_MASK 0x00000080L
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2__SHIFT 0x00000007
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2_MASK 0x00000040L
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2__SHIFT 0x00000006
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2_MASK 0x0000fc00L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2__SHIFT 0x0000000a
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2_MASK 0x00000300L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2__SHIFT 0x00000008
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2_MASK 0x00000080L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2__SHIFT 0x00000007
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2_MASK 0x00000008L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2__SHIFT 0x00000003
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2_MASK 0x00000070L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2__SHIFT 0x00000004
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2_MASK 0x00000007L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2__SHIFT 0x00000000
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3_MASK 0x00000001L
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3__SHIFT 0x00000000
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3_MASK 0x00000002L
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3__SHIFT 0x00000001
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3_MASK 0x00000004L
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3__SHIFT 0x00000002
+#define PB0_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3_MASK 0x00000008L
+#define PB0_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3__SHIFT 0x00000003
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3_MASK 0x00000002L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3__SHIFT 0x00000001
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3_MASK 0x00000001L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3__SHIFT 0x00000000
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3_MASK 0x00000008L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3__SHIFT 0x00000003
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3_MASK 0x00000004L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3__SHIFT 0x00000002
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3_MASK 0x00000020L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3__SHIFT 0x00000005
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3_MASK 0x00000010L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3__SHIFT 0x00000004
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3_MASK 0x00000080L
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3__SHIFT 0x00000007
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3_MASK 0x00000040L
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3__SHIFT 0x00000006
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3_MASK 0x0000fc00L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3__SHIFT 0x0000000a
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3_MASK 0x00000300L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3__SHIFT 0x00000008
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3_MASK 0x00000080L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3__SHIFT 0x00000007
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3_MASK 0x00000008L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3__SHIFT 0x00000003
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3_MASK 0x00000070L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3__SHIFT 0x00000004
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3_MASK 0x00000007L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3__SHIFT 0x00000000
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4_MASK 0x00000001L
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4__SHIFT 0x00000000
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4_MASK 0x00000002L
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4__SHIFT 0x00000001
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4_MASK 0x00000004L
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4__SHIFT 0x00000002
+#define PB0_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4_MASK 0x00000008L
+#define PB0_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4__SHIFT 0x00000003
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4_MASK 0x00000002L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4__SHIFT 0x00000001
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4_MASK 0x00000001L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4__SHIFT 0x00000000
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4_MASK 0x00000008L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4__SHIFT 0x00000003
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4_MASK 0x00000004L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4__SHIFT 0x00000002
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4_MASK 0x00000020L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4__SHIFT 0x00000005
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4_MASK 0x00000010L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4__SHIFT 0x00000004
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4_MASK 0x00000080L
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4__SHIFT 0x00000007
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4_MASK 0x00000040L
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4__SHIFT 0x00000006
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4_MASK 0x0000fc00L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4__SHIFT 0x0000000a
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4_MASK 0x00000300L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4__SHIFT 0x00000008
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4_MASK 0x00000080L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4__SHIFT 0x00000007
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4_MASK 0x00000008L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4__SHIFT 0x00000003
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4_MASK 0x00000070L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4__SHIFT 0x00000004
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4_MASK 0x00000007L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4__SHIFT 0x00000000
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5_MASK 0x00000001L
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5__SHIFT 0x00000000
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5_MASK 0x00000002L
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5__SHIFT 0x00000001
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5_MASK 0x00000004L
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5__SHIFT 0x00000002
+#define PB0_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5_MASK 0x00000008L
+#define PB0_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5__SHIFT 0x00000003
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5_MASK 0x00000002L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5__SHIFT 0x00000001
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5_MASK 0x00000001L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5__SHIFT 0x00000000
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5_MASK 0x00000008L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5__SHIFT 0x00000003
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5_MASK 0x00000004L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5__SHIFT 0x00000002
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5_MASK 0x00000020L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5__SHIFT 0x00000005
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5_MASK 0x00000010L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5__SHIFT 0x00000004
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5_MASK 0x00000080L
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5__SHIFT 0x00000007
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5_MASK 0x00000040L
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5__SHIFT 0x00000006
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5_MASK 0x0000fc00L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5__SHIFT 0x0000000a
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5_MASK 0x00000300L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5__SHIFT 0x00000008
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5_MASK 0x00000080L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5__SHIFT 0x00000007
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5_MASK 0x00000008L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5__SHIFT 0x00000003
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5_MASK 0x00000070L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5__SHIFT 0x00000004
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5_MASK 0x00000007L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5__SHIFT 0x00000000
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6_MASK 0x00000001L
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6__SHIFT 0x00000000
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6_MASK 0x00000002L
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6__SHIFT 0x00000001
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6_MASK 0x00000004L
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6__SHIFT 0x00000002
+#define PB0_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6_MASK 0x00000008L
+#define PB0_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6__SHIFT 0x00000003
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6_MASK 0x00000002L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6__SHIFT 0x00000001
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6_MASK 0x00000001L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6__SHIFT 0x00000000
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6_MASK 0x00000008L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6__SHIFT 0x00000003
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6_MASK 0x00000004L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6__SHIFT 0x00000002
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6_MASK 0x00000020L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6__SHIFT 0x00000005
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6_MASK 0x00000010L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6__SHIFT 0x00000004
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6_MASK 0x00000080L
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6__SHIFT 0x00000007
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6_MASK 0x00000040L
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6__SHIFT 0x00000006
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6_MASK 0x0000fc00L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6__SHIFT 0x0000000a
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6_MASK 0x00000300L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6__SHIFT 0x00000008
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6_MASK 0x00000080L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6__SHIFT 0x00000007
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6_MASK 0x00000008L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6__SHIFT 0x00000003
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6_MASK 0x00000070L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6__SHIFT 0x00000004
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6_MASK 0x00000007L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6__SHIFT 0x00000000
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7_MASK 0x00000001L
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7__SHIFT 0x00000000
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7_MASK 0x00000002L
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7__SHIFT 0x00000001
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7_MASK 0x00000004L
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7__SHIFT 0x00000002
+#define PB0_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7_MASK 0x00000008L
+#define PB0_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7__SHIFT 0x00000003
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7_MASK 0x00000002L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7__SHIFT 0x00000001
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7_MASK 0x00000001L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7__SHIFT 0x00000000
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7_MASK 0x00000008L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7__SHIFT 0x00000003
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7_MASK 0x00000004L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7__SHIFT 0x00000002
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7_MASK 0x00000020L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7__SHIFT 0x00000005
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7_MASK 0x00000010L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7__SHIFT 0x00000004
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7_MASK 0x00000080L
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7__SHIFT 0x00000007
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7_MASK 0x00000040L
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7__SHIFT 0x00000006
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7_MASK 0x0000fc00L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7__SHIFT 0x0000000a
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7_MASK 0x00000300L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7__SHIFT 0x00000008
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7_MASK 0x00000080L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7__SHIFT 0x00000007
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7_MASK 0x00000008L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7__SHIFT 0x00000003
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7_MASK 0x00000070L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7__SHIFT 0x00000004
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7_MASK 0x00000007L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7__SHIFT 0x00000000
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8_MASK 0x00000001L
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8__SHIFT 0x00000000
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8_MASK 0x00000002L
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8__SHIFT 0x00000001
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8_MASK 0x00000004L
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8__SHIFT 0x00000002
+#define PB0_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8_MASK 0x00000008L
+#define PB0_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8__SHIFT 0x00000003
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8_MASK 0x00000002L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8__SHIFT 0x00000001
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8_MASK 0x00000001L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8__SHIFT 0x00000000
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8_MASK 0x00000008L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8__SHIFT 0x00000003
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8_MASK 0x00000004L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8__SHIFT 0x00000002
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8_MASK 0x00000020L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8__SHIFT 0x00000005
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8_MASK 0x00000010L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8__SHIFT 0x00000004
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8_MASK 0x00000080L
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8__SHIFT 0x00000007
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8_MASK 0x00000040L
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8__SHIFT 0x00000006
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8_MASK 0x0000fc00L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8__SHIFT 0x0000000a
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8_MASK 0x00000300L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8__SHIFT 0x00000008
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8_MASK 0x00000080L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8__SHIFT 0x00000007
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8_MASK 0x00000008L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8__SHIFT 0x00000003
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8_MASK 0x00000070L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8__SHIFT 0x00000004
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8_MASK 0x00000007L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8__SHIFT 0x00000000
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9_MASK 0x00000001L
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9__SHIFT 0x00000000
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9_MASK 0x00000002L
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9__SHIFT 0x00000001
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9_MASK 0x00000004L
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9__SHIFT 0x00000002
+#define PB0_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9_MASK 0x00000008L
+#define PB0_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9__SHIFT 0x00000003
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9_MASK 0x00000002L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9__SHIFT 0x00000001
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9_MASK 0x00000001L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9__SHIFT 0x00000000
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9_MASK 0x00000008L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9__SHIFT 0x00000003
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9_MASK 0x00000004L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9__SHIFT 0x00000002
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9_MASK 0x00000020L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9__SHIFT 0x00000005
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9_MASK 0x00000010L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9__SHIFT 0x00000004
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9_MASK 0x00000080L
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9__SHIFT 0x00000007
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9_MASK 0x00000040L
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9__SHIFT 0x00000006
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9_MASK 0x0000fc00L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9__SHIFT 0x0000000a
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9_MASK 0x00000300L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9__SHIFT 0x00000008
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9_MASK 0x00000080L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9__SHIFT 0x00000007
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9_MASK 0x00000008L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9__SHIFT 0x00000003
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9_MASK 0x00000070L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9__SHIFT 0x00000004
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9_MASK 0x00000007L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9__SHIFT 0x00000000
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN_MASK 0x00000001L
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN__SHIFT 0x00000000
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE_MASK 0x0000003eL
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE__SHIFT 0x00000001
+#define PB1_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP_MASK 0x00000f00L
+#define PB1_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP__SHIFT 0x00000008
+#define PB1_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN_MASK 0x00800000L
+#define PB1_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN__SHIFT 0x00000017
+#define PB1_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN_MASK 0x00400000L
+#define PB1_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN__SHIFT 0x00000016
+#define PB1_DFT_JIT_INJ_REG0__DFT_NUM_STEPS_MASK 0x0000001fL
+#define PB1_DFT_JIT_INJ_REG0__DFT_NUM_STEPS__SHIFT 0x00000000
+#define PB1_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME_MASK 0xff000000L
+#define PB1_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME__SHIFT 0x00000018
+#define PB1_DFT_JIT_INJ_REG1__DFT_BLOCK_EN_MASK 0x00010000L
+#define PB1_DFT_JIT_INJ_REG1__DFT_BLOCK_EN__SHIFT 0x00000010
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_EN_MASK 0x00000100L
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_EN__SHIFT 0x00000008
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE_MASK 0x000000ffL
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE__SHIFT 0x00000000
+#define PB1_DFT_JIT_INJ_REG2__DFT_LANE_EN_MASK 0x0000ffffL
+#define PB1_DFT_JIT_INJ_REG2__DFT_LANE_EN__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG0__BACKUP_MASK 0x0000ffffL
+#define PB1_GLB_CTRL_REG0__BACKUP__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG0__CFG_IDLEDET_TH_MASK 0x00030000L
+#define PB1_GLB_CTRL_REG0__CFG_IDLEDET_TH__SHIFT 0x00000010
+#define PB1_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL_MASK 0x00700000L
+#define PB1_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL__SHIFT 0x00000014
+#define PB1_GLB_CTRL_REG0__DBG_RXFEBYP_EN_MASK 0x00800000L
+#define PB1_GLB_CTRL_REG0__DBG_RXFEBYP_EN__SHIFT 0x00000017
+#define PB1_GLB_CTRL_REG0__DBG_RXPRBS_CLR_MASK 0x01000000L
+#define PB1_GLB_CTRL_REG0__DBG_RXPRBS_CLR__SHIFT 0x00000018
+#define PB1_GLB_CTRL_REG0__DBG_RXTOGGLE_EN_MASK 0x02000000L
+#define PB1_GLB_CTRL_REG0__DBG_RXTOGGLE_EN__SHIFT 0x00000019
+#define PB1_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN_MASK 0x04000000L
+#define PB1_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN__SHIFT 0x0000001a
+#define PB1_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE_MASK 0xc0000000L
+#define PB1_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE__SHIFT 0x0000001e
+#define PB1_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV_MASK 0x80000000L
+#define PB1_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV__SHIFT 0x0000001f
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN_MASK 0x00000001L
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL_MASK 0x0000007eL
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL__SHIFT 0x00000001
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN_MASK 0x00000080L
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN__SHIFT 0x00000007
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL_MASK 0x00003f00L
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL__SHIFT 0x00000008
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN_MASK 0x00004000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN__SHIFT 0x0000000e
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL_MASK 0x003f8000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL__SHIFT 0x0000000f
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN_MASK 0x00400000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN__SHIFT 0x00000016
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL_MASK 0x3f800000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL__SHIFT 0x00000017
+#define PB1_GLB_CTRL_REG1__TST_LOSPDTST_EN_MASK 0x40000000L
+#define PB1_GLB_CTRL_REG1__TST_LOSPDTST_EN__SHIFT 0x0000001e
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN_MASK 0x00000001L
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL_MASK 0x000000feL
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL__SHIFT 0x00000001
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN_MASK 0x00000100L
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN__SHIFT 0x00000008
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL_MASK 0x0000fe00L
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL__SHIFT 0x00000009
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN_MASK 0x00010000L
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN__SHIFT 0x00000010
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL_MASK 0x00fe0000L
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL__SHIFT 0x00000011
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN_MASK 0x01000000L
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN__SHIFT 0x00000018
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL_MASK 0xfe000000L
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL__SHIFT 0x00000019
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL_MASK 0x00000060L
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL__SHIFT 0x00000005
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL_MASK 0x00000180L
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL__SHIFT 0x00000007
+#define PB1_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL_MASK 0x00000600L
+#define PB1_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL__SHIFT 0x00000009
+#define PB1_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL_MASK 0x0001c000L
+#define PB1_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL__SHIFT 0x0000000e
+#define PB1_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN_MASK 0x00001000L
+#define PB1_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN__SHIFT 0x0000000c
+#define PB1_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN_MASK 0x00000800L
+#define PB1_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN__SHIFT 0x0000000b
+#define PB1_GLB_CTRL_REG3__DBG_DLL_CLK_SEL_MASK 0x001c0000L
+#define PB1_GLB_CTRL_REG3__DBG_DLL_CLK_SEL__SHIFT 0x00000012
+#define PB1_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE_MASK 0x80000000L
+#define PB1_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE__SHIFT 0x0000001f
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN_MASK 0x00400000L
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN__SHIFT 0x00000016
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL_MASK 0x07800000L
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL__SHIFT 0x00000017
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN_MASK 0x08000000L
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN__SHIFT 0x0000001b
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL_MASK 0x70000000L
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL__SHIFT 0x0000001c
+#define PB1_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL_MASK 0x00200000L
+#define PB1_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL__SHIFT 0x00000015
+#define PB1_GLB_CTRL_REG3__RXDBG_SEL_MASK 0x0000001fL
+#define PB1_GLB_CTRL_REG3__RXDBG_SEL__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_EXEC_MASK 0x03c00000L
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_EXEC__SHIFT 0x00000016
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_INST_MASK 0x0000ffffL
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_INST__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN_MASK 0x00040000L
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN__SHIFT 0x00000012
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL_MASK 0x00030000L
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL__SHIFT 0x00000010
+#define PB1_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL_MASK 0x04000000L
+#define PB1_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000001a
+#define PB1_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE_MASK 0x10000000L
+#define PB1_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE__SHIFT 0x0000001c
+#define PB1_GLB_CTRL_REG4__PWRGOOD_OVRD_MASK 0x08000000L
+#define PB1_GLB_CTRL_REG4__PWRGOOD_OVRD__SHIFT 0x0000001b
+#define PB1_GLB_CTRL_REG5__DBG_RXAPU_MODE_MASK 0x000000ffL
+#define PB1_GLB_CTRL_REG5__DBG_RXAPU_MODE__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL_MASK 0x0000ffffL
+#define PB1_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB1_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN_MASK 0x00008000L
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN__SHIFT 0x0000000f
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN_MASK 0x00000004L
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN__SHIFT 0x00000002
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL_MASK 0x00000008L
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL__SHIFT 0x00000003
+#define PB1_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN_MASK 0x00000001L
+#define PB1_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN_MASK 0x00000002L
+#define PB1_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN__SHIFT 0x00000001
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_EN_MASK 0x00000001L
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_EN__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL_MASK 0x00000002L
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT_MASK 0x00000010L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT__SHIFT 0x00000004
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE_MASK 0x00100000L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG0__RXIMP_MASK 0x000f0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__RXIMP__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXNIMP_MASK 0x00000f00L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXNIMP__SHIFT 0x00000008
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXPIMP_MASK 0x0000f000L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXPIMP__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_0_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_0__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_1_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_1__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_2_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_2__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_3_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_3__SHIFT 0x0000001c
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_4_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_4__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_5_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_5__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_6_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_6__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_7_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_7__SHIFT 0x0000001c
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_10_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_10__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_11_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_11__SHIFT 0x0000001c
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_8_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_8__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_9_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_9__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_12_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_12__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_13_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_13__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_14_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_14__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_15_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_15__SHIFT 0x0000001c
+#define PB1_HW_DEBUG__PB1_HW_00_DEBUG_MASK 0x00000001L
+#define PB1_HW_DEBUG__PB1_HW_00_DEBUG__SHIFT 0x00000000
+#define PB1_HW_DEBUG__PB1_HW_01_DEBUG_MASK 0x00000002L
+#define PB1_HW_DEBUG__PB1_HW_01_DEBUG__SHIFT 0x00000001
+#define PB1_HW_DEBUG__PB1_HW_02_DEBUG_MASK 0x00000004L
+#define PB1_HW_DEBUG__PB1_HW_02_DEBUG__SHIFT 0x00000002
+#define PB1_HW_DEBUG__PB1_HW_03_DEBUG_MASK 0x00000008L
+#define PB1_HW_DEBUG__PB1_HW_03_DEBUG__SHIFT 0x00000003
+#define PB1_HW_DEBUG__PB1_HW_04_DEBUG_MASK 0x00000010L
+#define PB1_HW_DEBUG__PB1_HW_04_DEBUG__SHIFT 0x00000004
+#define PB1_HW_DEBUG__PB1_HW_05_DEBUG_MASK 0x00000020L
+#define PB1_HW_DEBUG__PB1_HW_05_DEBUG__SHIFT 0x00000005
+#define PB1_HW_DEBUG__PB1_HW_06_DEBUG_MASK 0x00000040L
+#define PB1_HW_DEBUG__PB1_HW_06_DEBUG__SHIFT 0x00000006
+#define PB1_HW_DEBUG__PB1_HW_07_DEBUG_MASK 0x00000080L
+#define PB1_HW_DEBUG__PB1_HW_07_DEBUG__SHIFT 0x00000007
+#define PB1_HW_DEBUG__PB1_HW_08_DEBUG_MASK 0x00000100L
+#define PB1_HW_DEBUG__PB1_HW_08_DEBUG__SHIFT 0x00000008
+#define PB1_HW_DEBUG__PB1_HW_09_DEBUG_MASK 0x00000200L
+#define PB1_HW_DEBUG__PB1_HW_09_DEBUG__SHIFT 0x00000009
+#define PB1_HW_DEBUG__PB1_HW_10_DEBUG_MASK 0x00000400L
+#define PB1_HW_DEBUG__PB1_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB1_HW_DEBUG__PB1_HW_11_DEBUG_MASK 0x00000800L
+#define PB1_HW_DEBUG__PB1_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB1_HW_DEBUG__PB1_HW_12_DEBUG_MASK 0x00001000L
+#define PB1_HW_DEBUG__PB1_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB1_HW_DEBUG__PB1_HW_13_DEBUG_MASK 0x00002000L
+#define PB1_HW_DEBUG__PB1_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB1_HW_DEBUG__PB1_HW_14_DEBUG_MASK 0x00004000L
+#define PB1_HW_DEBUG__PB1_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB1_HW_DEBUG__PB1_HW_15_DEBUG_MASK 0x00008000L
+#define PB1_HW_DEBUG__PB1_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB1_HW_DEBUG__PB1_HW_16_DEBUG_MASK 0x00010000L
+#define PB1_HW_DEBUG__PB1_HW_16_DEBUG__SHIFT 0x00000010
+#define PB1_HW_DEBUG__PB1_HW_17_DEBUG_MASK 0x00020000L
+#define PB1_HW_DEBUG__PB1_HW_17_DEBUG__SHIFT 0x00000011
+#define PB1_HW_DEBUG__PB1_HW_18_DEBUG_MASK 0x00040000L
+#define PB1_HW_DEBUG__PB1_HW_18_DEBUG__SHIFT 0x00000012
+#define PB1_HW_DEBUG__PB1_HW_19_DEBUG_MASK 0x00080000L
+#define PB1_HW_DEBUG__PB1_HW_19_DEBUG__SHIFT 0x00000013
+#define PB1_HW_DEBUG__PB1_HW_20_DEBUG_MASK 0x00100000L
+#define PB1_HW_DEBUG__PB1_HW_20_DEBUG__SHIFT 0x00000014
+#define PB1_HW_DEBUG__PB1_HW_21_DEBUG_MASK 0x00200000L
+#define PB1_HW_DEBUG__PB1_HW_21_DEBUG__SHIFT 0x00000015
+#define PB1_HW_DEBUG__PB1_HW_22_DEBUG_MASK 0x00400000L
+#define PB1_HW_DEBUG__PB1_HW_22_DEBUG__SHIFT 0x00000016
+#define PB1_HW_DEBUG__PB1_HW_23_DEBUG_MASK 0x00800000L
+#define PB1_HW_DEBUG__PB1_HW_23_DEBUG__SHIFT 0x00000017
+#define PB1_HW_DEBUG__PB1_HW_24_DEBUG_MASK 0x01000000L
+#define PB1_HW_DEBUG__PB1_HW_24_DEBUG__SHIFT 0x00000018
+#define PB1_HW_DEBUG__PB1_HW_25_DEBUG_MASK 0x02000000L
+#define PB1_HW_DEBUG__PB1_HW_25_DEBUG__SHIFT 0x00000019
+#define PB1_HW_DEBUG__PB1_HW_26_DEBUG_MASK 0x04000000L
+#define PB1_HW_DEBUG__PB1_HW_26_DEBUG__SHIFT 0x0000001a
+#define PB1_HW_DEBUG__PB1_HW_27_DEBUG_MASK 0x08000000L
+#define PB1_HW_DEBUG__PB1_HW_27_DEBUG__SHIFT 0x0000001b
+#define PB1_HW_DEBUG__PB1_HW_28_DEBUG_MASK 0x10000000L
+#define PB1_HW_DEBUG__PB1_HW_28_DEBUG__SHIFT 0x0000001c
+#define PB1_HW_DEBUG__PB1_HW_29_DEBUG_MASK 0x20000000L
+#define PB1_HW_DEBUG__PB1_HW_29_DEBUG__SHIFT 0x0000001d
+#define PB1_HW_DEBUG__PB1_HW_30_DEBUG_MASK 0x40000000L
+#define PB1_HW_DEBUG__PB1_HW_30_DEBUG__SHIFT 0x0000001e
+#define PB1_HW_DEBUG__PB1_HW_31_DEBUG_MASK 0x80000000L
+#define PB1_HW_DEBUG__PB1_HW_31_DEBUG__SHIFT 0x0000001f
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_EN_MASK 0x00000080L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_EN__SHIFT 0x00000007
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0_MASK 0x00000100L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0__SHIFT 0x00000008
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10_MASK 0x00040000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10__SHIFT 0x00000012
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11_MASK 0x00080000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11__SHIFT 0x00000013
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12_MASK 0x00100000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12__SHIFT 0x00000014
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13_MASK 0x00200000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13__SHIFT 0x00000015
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14_MASK 0x00400000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14__SHIFT 0x00000016
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15_MASK 0x00800000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15__SHIFT 0x00000017
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2_MASK 0x00000400L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2__SHIFT 0x0000000a
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3_MASK 0x00000800L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4_MASK 0x00001000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4__SHIFT 0x0000000c
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5_MASK 0x00002000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5__SHIFT 0x0000000d
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6_MASK 0x00004000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6__SHIFT 0x0000000e
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7_MASK 0x00008000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8_MASK 0x00010000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8__SHIFT 0x00000010
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9_MASK 0x00020000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9__SHIFT 0x00000011
+#define PB1_PIF_CNTL2__RXDETECT_SAMPL_TIME_MASK 0x00000006L
+#define PB1_PIF_CNTL2__RXDETECT_SAMPL_TIME__SHIFT 0x00000001
+#define PB1_PIF_CNTL2__RXPHYSTATUS_DELAY_MASK 0x07000000L
+#define PB1_PIF_CNTL2__RXPHYSTATUS_DELAY__SHIFT 0x00000018
+#define PB1_PIF_CNTL__DA_FIFO_RESET_0_MASK 0x00000002L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_0__SHIFT 0x00000001
+#define PB1_PIF_CNTL__DA_FIFO_RESET_1_MASK 0x00000020L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_1__SHIFT 0x00000005
+#define PB1_PIF_CNTL__DA_FIFO_RESET_2_MASK 0x00000200L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_2__SHIFT 0x00000009
+#define PB1_PIF_CNTL__DA_FIFO_RESET_3_MASK 0x00002000L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_3__SHIFT 0x0000000d
+#define PB1_PIF_CNTL__DIVINIT_MODE_MASK 0x00000100L
+#define PB1_PIF_CNTL__DIVINIT_MODE__SHIFT 0x00000008
+#define PB1_PIF_CNTL__EI_CYCLE_OFF_TIME_MASK 0x00700000L
+#define PB1_PIF_CNTL__EI_CYCLE_OFF_TIME__SHIFT 0x00000014
+#define PB1_PIF_CNTL__EI_DET_CYCLE_MODE_MASK 0x00000010L
+#define PB1_PIF_CNTL__EI_DET_CYCLE_MODE__SHIFT 0x00000004
+#define PB1_PIF_CNTL__EXIT_L0S_INIT_DIS_MASK 0x00800000L
+#define PB1_PIF_CNTL__EXIT_L0S_INIT_DIS__SHIFT 0x00000017
+#define PB1_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP_MASK 0x10000000L
+#define PB1_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP__SHIFT 0x0000001c
+#define PB1_PIF_CNTL__IGNORE_TxDataValid_EP_DIS_MASK 0x20000000L
+#define PB1_PIF_CNTL__IGNORE_TxDataValid_EP_DIS__SHIFT 0x0000001d
+#define PB1_PIF_CNTL__LS2_EXIT_TIME_MASK 0x000e0000L
+#define PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT 0x00000011
+#define PB1_PIF_CNTL__PHYCMD_CR_EN_MODE_MASK 0x00000008L
+#define PB1_PIF_CNTL__PHYCMD_CR_EN_MODE__SHIFT 0x00000003
+#define PB1_PIF_CNTL__PHY_CR_EN_MODE_MASK 0x00000004L
+#define PB1_PIF_CNTL__PHY_CR_EN_MODE__SHIFT 0x00000002
+#define PB1_PIF_CNTL__PLL_BINDING_ENABLE_MASK 0x00000400L
+#define PB1_PIF_CNTL__PLL_BINDING_ENABLE__SHIFT 0x0000000a
+#define PB1_PIF_CNTL__RXDETECT_FIFO_RESET_MODE_MASK 0x00000040L
+#define PB1_PIF_CNTL__RXDETECT_FIFO_RESET_MODE__SHIFT 0x00000006
+#define PB1_PIF_CNTL__RXDETECT_TX_PWR_MODE_MASK 0x00000080L
+#define PB1_PIF_CNTL__RXDETECT_TX_PWR_MODE__SHIFT 0x00000007
+#define PB1_PIF_CNTL__RXEN_GATER_MASK 0x0f000000L
+#define PB1_PIF_CNTL__RXEN_GATER__SHIFT 0x00000018
+#define PB1_PIF_CNTL__SC_CALIB_DONE_CNTL_MASK 0x00000800L
+#define PB1_PIF_CNTL__SC_CALIB_DONE_CNTL__SHIFT 0x0000000b
+#define PB1_PIF_CNTL__SERIAL_CFG_ENABLE_MASK 0x00000001L
+#define PB1_PIF_CNTL__SERIAL_CFG_ENABLE__SHIFT 0x00000000
+#define PB1_PIF_CNTL__TXGND_TIME_MASK 0x00010000L
+#define PB1_PIF_CNTL__TXGND_TIME__SHIFT 0x00000010
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_00_DEBUG_MASK 0x00000001L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_00_DEBUG__SHIFT 0x00000000
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_01_DEBUG_MASK 0x00000002L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_01_DEBUG__SHIFT 0x00000001
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_02_DEBUG_MASK 0x00000004L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_02_DEBUG__SHIFT 0x00000002
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_03_DEBUG_MASK 0x00000008L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_03_DEBUG__SHIFT 0x00000003
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_04_DEBUG_MASK 0x00000010L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_04_DEBUG__SHIFT 0x00000004
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_05_DEBUG_MASK 0x00000020L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_05_DEBUG__SHIFT 0x00000005
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_06_DEBUG_MASK 0x00000040L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_06_DEBUG__SHIFT 0x00000006
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_07_DEBUG_MASK 0x00000080L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_07_DEBUG__SHIFT 0x00000007
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_08_DEBUG_MASK 0x00000100L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_08_DEBUG__SHIFT 0x00000008
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_09_DEBUG_MASK 0x00000200L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_09_DEBUG__SHIFT 0x00000009
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_10_DEBUG_MASK 0x00000400L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_11_DEBUG_MASK 0x00000800L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_12_DEBUG_MASK 0x00001000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_13_DEBUG_MASK 0x00002000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_14_DEBUG_MASK 0x00004000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_15_DEBUG_MASK 0x00008000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB1_PIF_PAIRING__MULTI_PIF_MASK 0x02000000L
+#define PB1_PIF_PAIRING__MULTI_PIF__SHIFT 0x00000019
+#define PB1_PIF_PAIRING__X16_LANE_15_0_MASK 0x00100000L
+#define PB1_PIF_PAIRING__X16_LANE_15_0__SHIFT 0x00000014
+#define PB1_PIF_PAIRING__X2_LANE_1_0_MASK 0x00000001L
+#define PB1_PIF_PAIRING__X2_LANE_1_0__SHIFT 0x00000000
+#define PB1_PIF_PAIRING__X2_LANE_11_10_MASK 0x00000020L
+#define PB1_PIF_PAIRING__X2_LANE_11_10__SHIFT 0x00000005
+#define PB1_PIF_PAIRING__X2_LANE_13_12_MASK 0x00000040L
+#define PB1_PIF_PAIRING__X2_LANE_13_12__SHIFT 0x00000006
+#define PB1_PIF_PAIRING__X2_LANE_15_14_MASK 0x00000080L
+#define PB1_PIF_PAIRING__X2_LANE_15_14__SHIFT 0x00000007
+#define PB1_PIF_PAIRING__X2_LANE_3_2_MASK 0x00000002L
+#define PB1_PIF_PAIRING__X2_LANE_3_2__SHIFT 0x00000001
+#define PB1_PIF_PAIRING__X2_LANE_5_4_MASK 0x00000004L
+#define PB1_PIF_PAIRING__X2_LANE_5_4__SHIFT 0x00000002
+#define PB1_PIF_PAIRING__X2_LANE_7_6_MASK 0x00000008L
+#define PB1_PIF_PAIRING__X2_LANE_7_6__SHIFT 0x00000003
+#define PB1_PIF_PAIRING__X2_LANE_9_8_MASK 0x00000010L
+#define PB1_PIF_PAIRING__X2_LANE_9_8__SHIFT 0x00000004
+#define PB1_PIF_PAIRING__X4_LANE_11_8_MASK 0x00000400L
+#define PB1_PIF_PAIRING__X4_LANE_11_8__SHIFT 0x0000000a
+#define PB1_PIF_PAIRING__X4_LANE_15_12_MASK 0x00000800L
+#define PB1_PIF_PAIRING__X4_LANE_15_12__SHIFT 0x0000000b
+#define PB1_PIF_PAIRING__X4_LANE_3_0_MASK 0x00000100L
+#define PB1_PIF_PAIRING__X4_LANE_3_0__SHIFT 0x00000008
+#define PB1_PIF_PAIRING__X4_LANE_7_4_MASK 0x00000200L
+#define PB1_PIF_PAIRING__X4_LANE_7_4__SHIFT 0x00000009
+#define PB1_PIF_PAIRING__X8_LANE_15_8_MASK 0x00020000L
+#define PB1_PIF_PAIRING__X8_LANE_15_8__SHIFT 0x00000011
+#define PB1_PIF_PAIRING__X8_LANE_7_0_MASK 0x00010000L
+#define PB1_PIF_PAIRING__X8_LANE_7_0__SHIFT 0x00000010
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000b
+#define PB1_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0__SHIFT 0x00000000
+#define PB1_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1__SHIFT 0x00000000
+#define PB1_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2__SHIFT 0x00000000
+#define PB1_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3__SHIFT 0x00000000
+#define PB1_PIF_SC_CTL__SC_CALIBRATION_MASK 0x00000001L
+#define PB1_PIF_SC_CTL__SC_CALIBRATION__SHIFT 0x00000000
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0_MASK 0x00000020L
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0__SHIFT 0x00000005
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S_MASK 0x00000010L
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S__SHIFT 0x00000004
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0_MASK 0x00000008L
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0__SHIFT 0x00000003
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0S_MASK 0x00000004L
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0S__SHIFT 0x00000002
+#define PB1_PIF_SC_CTL__SC_LANE_0_RESUME_MASK 0x00010000L
+#define PB1_PIF_SC_CTL__SC_LANE_0_RESUME__SHIFT 0x00000010
+#define PB1_PIF_SC_CTL__SC_LANE_10_RESUME_MASK 0x04000000L
+#define PB1_PIF_SC_CTL__SC_LANE_10_RESUME__SHIFT 0x0000001a
+#define PB1_PIF_SC_CTL__SC_LANE_11_RESUME_MASK 0x08000000L
+#define PB1_PIF_SC_CTL__SC_LANE_11_RESUME__SHIFT 0x0000001b
+#define PB1_PIF_SC_CTL__SC_LANE_12_RESUME_MASK 0x10000000L
+#define PB1_PIF_SC_CTL__SC_LANE_12_RESUME__SHIFT 0x0000001c
+#define PB1_PIF_SC_CTL__SC_LANE_13_RESUME_MASK 0x20000000L
+#define PB1_PIF_SC_CTL__SC_LANE_13_RESUME__SHIFT 0x0000001d
+#define PB1_PIF_SC_CTL__SC_LANE_14_RESUME_MASK 0x40000000L
+#define PB1_PIF_SC_CTL__SC_LANE_14_RESUME__SHIFT 0x0000001e
+#define PB1_PIF_SC_CTL__SC_LANE_15_RESUME_MASK 0x80000000L
+#define PB1_PIF_SC_CTL__SC_LANE_15_RESUME__SHIFT 0x0000001f
+#define PB1_PIF_SC_CTL__SC_LANE_1_RESUME_MASK 0x00020000L
+#define PB1_PIF_SC_CTL__SC_LANE_1_RESUME__SHIFT 0x00000011
+#define PB1_PIF_SC_CTL__SC_LANE_2_RESUME_MASK 0x00040000L
+#define PB1_PIF_SC_CTL__SC_LANE_2_RESUME__SHIFT 0x00000012
+#define PB1_PIF_SC_CTL__SC_LANE_3_RESUME_MASK 0x00080000L
+#define PB1_PIF_SC_CTL__SC_LANE_3_RESUME__SHIFT 0x00000013
+#define PB1_PIF_SC_CTL__SC_LANE_4_RESUME_MASK 0x00100000L
+#define PB1_PIF_SC_CTL__SC_LANE_4_RESUME__SHIFT 0x00000014
+#define PB1_PIF_SC_CTL__SC_LANE_5_RESUME_MASK 0x00200000L
+#define PB1_PIF_SC_CTL__SC_LANE_5_RESUME__SHIFT 0x00000015
+#define PB1_PIF_SC_CTL__SC_LANE_6_RESUME_MASK 0x00400000L
+#define PB1_PIF_SC_CTL__SC_LANE_6_RESUME__SHIFT 0x00000016
+#define PB1_PIF_SC_CTL__SC_LANE_7_RESUME_MASK 0x00800000L
+#define PB1_PIF_SC_CTL__SC_LANE_7_RESUME__SHIFT 0x00000017
+#define PB1_PIF_SC_CTL__SC_LANE_8_RESUME_MASK 0x01000000L
+#define PB1_PIF_SC_CTL__SC_LANE_8_RESUME__SHIFT 0x00000018
+#define PB1_PIF_SC_CTL__SC_LANE_9_RESUME_MASK 0x02000000L
+#define PB1_PIF_SC_CTL__SC_LANE_9_RESUME__SHIFT 0x00000019
+#define PB1_PIF_SC_CTL__SC_PHASE_1_MASK 0x00000100L
+#define PB1_PIF_SC_CTL__SC_PHASE_1__SHIFT 0x00000008
+#define PB1_PIF_SC_CTL__SC_PHASE_2_MASK 0x00000200L
+#define PB1_PIF_SC_CTL__SC_PHASE_2__SHIFT 0x00000009
+#define PB1_PIF_SC_CTL__SC_PHASE_3_MASK 0x00000400L
+#define PB1_PIF_SC_CTL__SC_PHASE_3__SHIFT 0x0000000a
+#define PB1_PIF_SC_CTL__SC_PHASE_4_MASK 0x00000800L
+#define PB1_PIF_SC_CTL__SC_PHASE_4__SHIFT 0x0000000b
+#define PB1_PIF_SC_CTL__SC_PHASE_5_MASK 0x00001000L
+#define PB1_PIF_SC_CTL__SC_PHASE_5__SHIFT 0x0000000c
+#define PB1_PIF_SC_CTL__SC_PHASE_6_MASK 0x00002000L
+#define PB1_PIF_SC_CTL__SC_PHASE_6__SHIFT 0x0000000d
+#define PB1_PIF_SC_CTL__SC_PHASE_7_MASK 0x00004000L
+#define PB1_PIF_SC_CTL__SC_PHASE_7__SHIFT 0x0000000e
+#define PB1_PIF_SC_CTL__SC_PHASE_8_MASK 0x00008000L
+#define PB1_PIF_SC_CTL__SC_PHASE_8__SHIFT 0x0000000f
+#define PB1_PIF_SC_CTL__SC_RXDETECT_MASK 0x00000002L
+#define PB1_PIF_SC_CTL__SC_RXDETECT__SHIFT 0x00000001
+#define PB1_PIF_SC_CTL__SC_SPEED_CHANGE_MASK 0x00000040L
+#define PB1_PIF_SC_CTL__SC_SPEED_CHANGE__SHIFT 0x00000006
+#define PB1_PIF_SCRATCH__PIF_SCRATCH_MASK 0xffffffffL
+#define PB1_PIF_SCRATCH__PIF_SCRATCH__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_0__SEQ_PHASE_0_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_PHASE_0__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_10__SEQ_PHASE_10_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_PHASE_10__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_11__SEQ_PHASE_11_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_PHASE_11__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_12__SEQ_PHASE_12_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_PHASE_12__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_13__SEQ_PHASE_13_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_PHASE_13__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_14__SEQ_PHASE_14_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_PHASE_14__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_15__SEQ_PHASE_15_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_PHASE_15__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_1__SEQ_PHASE_1_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_PHASE_1__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_2__SEQ_PHASE_2_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_PHASE_2__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_3__SEQ_PHASE_3_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_PHASE_3__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_4__SEQ_PHASE_4_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_PHASE_4__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_5__SEQ_PHASE_5_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_PHASE_5__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_6__SEQ_PHASE_6_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_PHASE_6__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_7__SEQ_PHASE_7_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_PHASE_7__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_8__SEQ_PHASE_8_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_PHASE_8__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_9__SEQ_PHASE_9_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_PHASE_9__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9__SHIFT 0x00000006
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_0_MASK 0x00000001L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_0__SHIFT 0x00000000
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_10_MASK 0x00000400L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_10__SHIFT 0x0000000a
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_11_MASK 0x00000800L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_11__SHIFT 0x0000000b
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_12_MASK 0x00001000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_12__SHIFT 0x0000000c
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_13_MASK 0x00002000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_13__SHIFT 0x0000000d
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_14_MASK 0x00004000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_14__SHIFT 0x0000000e
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_15_MASK 0x00008000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_15__SHIFT 0x0000000f
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_1_MASK 0x00000002L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_1__SHIFT 0x00000001
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_2_MASK 0x00000004L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_2__SHIFT 0x00000002
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_3_MASK 0x00000008L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_3__SHIFT 0x00000003
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_4_MASK 0x00000010L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_4__SHIFT 0x00000004
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_5_MASK 0x00000020L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_5__SHIFT 0x00000005
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_6_MASK 0x00000040L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_6__SHIFT 0x00000006
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_7_MASK 0x00000080L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_7__SHIFT 0x00000007
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_8_MASK 0x00000100L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_8__SHIFT 0x00000008
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_9_MASK 0x00000200L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_9__SHIFT 0x00000009
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0_MASK 0x00000003L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB1_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0_MASK 0x00000010L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0__SHIFT 0x00000004
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0_MASK 0x00000008L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000003
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0_MASK 0x00000007L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0_MASK 0x00000080L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0__SHIFT 0x00000007
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000070L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000004
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0_MASK 0x00000200L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0__SHIFT 0x00000009
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0_MASK 0x00000100L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0__SHIFT 0x00000008
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0_MASK 0x00040000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0__SHIFT 0x00000012
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0_MASK 0x0003fc00L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0__SHIFT 0x0000000a
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0_MASK 0x10000000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0__SHIFT 0x0000001c
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0_MASK 0x0ff80000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0__SHIFT 0x00000013
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0_MASK 0x80000000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0__SHIFT 0x0000001f
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0_MASK 0x60000000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0__SHIFT 0x0000001d
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0_MASK 0x00000008L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000003
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0_MASK 0x00000007L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0_MASK 0x00040000L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0__SHIFT 0x00000012
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0_MASK 0x0003c000L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0__SHIFT 0x0000000e
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000020L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x00000005
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000010L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000004
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00000080L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x00000007
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000040L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x00000006
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0_MASK 0x00000200L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0__SHIFT 0x00000009
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0_MASK 0x00000100L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0__SHIFT 0x00000008
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0_MASK 0x00000003L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0_MASK 0x000007f0L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0__SHIFT 0x00000004
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB1_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0_MASK 0x00000800L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0__SHIFT 0x0000000b
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0_MASK 0x00000100L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000008
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0_MASK 0x000000ffL
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0_MASK 0x00001000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0__SHIFT 0x0000000c
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000e00L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000009
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0_MASK 0x00004000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0__SHIFT 0x0000000e
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0_MASK 0x00002000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0_MASK 0x10000000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0__SHIFT 0x0000001c
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0_MASK 0x0fff8000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0__SHIFT 0x0000000f
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0_MASK 0x80000000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0__SHIFT 0x0000001f
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0_MASK 0x40000000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0__SHIFT 0x0000001e
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0_MASK 0x00400000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000016
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0_MASK 0x00380000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000013
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0_MASK 0x00000020L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0__SHIFT 0x00000005
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0_MASK 0x0000001fL
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0_MASK 0x00000100L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0__SHIFT 0x00000008
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0_MASK 0x000000c0L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0__SHIFT 0x00000006
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000400L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x0000000a
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000200L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000009
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00001000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x0000000c
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000800L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x0000000b
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0_MASK 0x00004000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0__SHIFT 0x0000000e
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0_MASK 0x00002000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000200L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000009
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000400L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000a
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000800L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000b
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00100000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000014
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00200000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000015
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00001000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x0000000c
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00002000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000d
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00004000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000e
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00400000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000016
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00800000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000017
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2_MASK 0x00000100L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000008
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000002L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000001
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000004L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x00000002
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000008L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x00000003
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00010000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000010
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00020000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000011
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00000010L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x00000004
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00000020L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x00000005
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00000040L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x00000006
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00040000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000012
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00080000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000013
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2_MASK 0x00000080L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000007
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC_MASK 0x00000001L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1_MASK 0x000003ffL
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2_MASK 0x000ffc00L
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2__SHIFT 0x0000000a
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3_MASK 0x3ff00000L
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE_MASK 0xc0000000L
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN_MASK 0xc0000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1_MASK 0x0000000fL
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2_MASK 0x000000f0L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2__SHIFT 0x00000004
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3_MASK 0x00000f00L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3__SHIFT 0x00000008
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1_MASK 0x0000f000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2_MASK 0x000f0000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1_MASK 0x01000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2_MASK 0x02000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2__SHIFT 0x00000019
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3_MASK 0x04000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3__SHIFT 0x0000001a
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2_MASK 0x10000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3_MASK 0x20000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3__SHIFT 0x0000001d
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1_MASK 0x0000f000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2_MASK 0x000f0000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1_MASK 0x03000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2_MASK 0x0c000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2__SHIFT 0x0000001a
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3_MASK 0x30000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD_MASK 0xc0000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1_MASK 0x00000001L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2_MASK 0x00000002L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2__SHIFT 0x00000001
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3_MASK 0x00000004L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3__SHIFT 0x00000002
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2_MASK 0x0f000000L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3_MASK 0xf0000000L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1_MASK 0x00000007L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2_MASK 0x00000038L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2__SHIFT 0x00000003
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3_MASK 0x000001c0L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3__SHIFT 0x00000006
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2_MASK 0x0f000000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3_MASK 0xf0000000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1_MASK 0x00000e00L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1__SHIFT 0x00000009
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2_MASK 0x00007000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3_MASK 0x00038000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3__SHIFT 0x0000000f
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1_MASK 0x0000001fL
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2_MASK 0x000003e0L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2__SHIFT 0x00000005
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3_MASK 0x00007c00L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3__SHIFT 0x0000000a
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1_MASK 0x00008000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1__SHIFT 0x0000000f
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2_MASK 0x00010000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3_MASK 0x00020000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3__SHIFT 0x00000011
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1_MASK 0x00040000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1__SHIFT 0x00000012
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2_MASK 0x00080000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2__SHIFT 0x00000013
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3_MASK 0x00100000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2_MASK 0x10000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3_MASK 0x20000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3__SHIFT 0x0000001d
+#define PB1_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0_MASK 0x40000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1_MASK 0x0000000fL
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2_MASK 0x000000f0L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2__SHIFT 0x00000004
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3_MASK 0x00000f00L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3__SHIFT 0x00000008
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1_MASK 0x0000f000L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2_MASK 0x000f0000L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x01000000L
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2_MASK 0x04000000L
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001a
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1_MASK 0x001c0000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1__SHIFT 0x00000012
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2_MASK 0x00e00000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2__SHIFT 0x00000015
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3_MASK 0x07000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2_MASK 0x10000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3_MASK 0x20000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3__SHIFT 0x0000001d
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1_MASK 0x0000000fL
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2_MASK 0x000000f0L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2__SHIFT 0x00000004
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3_MASK 0x00000f00L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3__SHIFT 0x00000008
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x00001000L
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2_MASK 0x00002000L
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2__SHIFT 0x0000000d
+#define PB1_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2_MASK 0x00020000L
+#define PB1_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000011
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN_MASK 0x80000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN__SHIFT 0x0000001f
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL_MASK 0x40000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL__SHIFT 0x0000001e
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN_MASK 0x00000002L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN__SHIFT 0x00000001
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL_MASK 0x00000001L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL__SHIFT 0x00000000
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN_MASK 0x00000008L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN__SHIFT 0x00000003
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL_MASK 0x00000004L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL__SHIFT 0x00000002
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN_MASK 0x20000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN__SHIFT 0x0000001d
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL_MASK 0x10000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL__SHIFT 0x0000001c
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000100L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000008
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x000000c0L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000006
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN_MASK 0x00000400L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN__SHIFT 0x0000000a
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL_MASK 0x00000200L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL__SHIFT 0x00000009
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00001000L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000c
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000800L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x0000000b
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN_MASK 0x00004000L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN__SHIFT 0x0000000e
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL_MASK 0x00002000L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL__SHIFT 0x0000000d
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN_MASK 0x00010000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN__SHIFT 0x00000010
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL_MASK 0x00008000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL__SHIFT 0x0000000f
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN_MASK 0x00040000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN__SHIFT 0x00000012
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL_MASK 0x00020000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL__SHIFT 0x00000011
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN_MASK 0x00100000L
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN__SHIFT 0x00000014
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL_MASK 0x00080000L
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL__SHIFT 0x00000013
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN_MASK 0x00400000L
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN__SHIFT 0x00000016
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL_MASK 0x00200000L
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL__SHIFT 0x00000015
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN_MASK 0x01000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN__SHIFT 0x00000018
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL_MASK 0x00800000L
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL__SHIFT 0x00000017
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN_MASK 0x00000002L
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN__SHIFT 0x00000001
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL_MASK 0x00000001L
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL__SHIFT 0x00000000
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3_MASK 0x00010000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3__SHIFT 0x00000010
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15_MASK 0x00080000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15__SHIFT 0x00000013
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7_MASK 0x00020000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7__SHIFT 0x00000011
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11_MASK 0x00040000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11__SHIFT 0x00000012
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3_MASK 0x00100000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3__SHIFT 0x00000014
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15_MASK 0x00800000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15__SHIFT 0x00000017
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7_MASK 0x00200000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7__SHIFT 0x00000015
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11_MASK 0x00400000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11__SHIFT 0x00000016
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_RX_LANE0_CTRL_REG0__RX_BACKUP_0_MASK 0x000000ffL
+#define PB1_RX_LANE0_CTRL_REG0__RX_BACKUP_0__SHIFT 0x00000000
+#define PB1_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0_MASK 0x00002000L
+#define PB1_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0__SHIFT 0x0000000d
+#define PB1_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0_MASK 0x00000c00L
+#define PB1_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0__SHIFT 0x0000000a
+#define PB1_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0_MASK 0x00001000L
+#define PB1_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0__SHIFT 0x0000000c
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0_MASK 0x00000008L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0__SHIFT 0x00000003
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0_MASK 0x00000080L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0__SHIFT 0x00000007
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0_MASK 0x00000100L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0__SHIFT 0x00000008
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0_MASK 0x00000200L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0__SHIFT 0x00000009
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0_MASK 0x00000070L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0__SHIFT 0x00000004
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0_MASK 0x00000007L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0__SHIFT 0x00000000
+#define PB1_RX_LANE10_CTRL_REG0__RX_BACKUP_10_MASK 0x000000ffL
+#define PB1_RX_LANE10_CTRL_REG0__RX_BACKUP_10__SHIFT 0x00000000
+#define PB1_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10_MASK 0x00002000L
+#define PB1_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10__SHIFT 0x0000000d
+#define PB1_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10_MASK 0x00000c00L
+#define PB1_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10__SHIFT 0x0000000a
+#define PB1_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10_MASK 0x00001000L
+#define PB1_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10__SHIFT 0x0000000c
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10_MASK 0x00000008L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10__SHIFT 0x00000003
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10_MASK 0x00000080L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10__SHIFT 0x00000007
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10_MASK 0x00000100L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10__SHIFT 0x00000008
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10_MASK 0x00000200L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10__SHIFT 0x00000009
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10_MASK 0x00000070L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10__SHIFT 0x00000004
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10_MASK 0x00000007L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10__SHIFT 0x00000000
+#define PB1_RX_LANE11_CTRL_REG0__RX_BACKUP_11_MASK 0x000000ffL
+#define PB1_RX_LANE11_CTRL_REG0__RX_BACKUP_11__SHIFT 0x00000000
+#define PB1_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11_MASK 0x00002000L
+#define PB1_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11__SHIFT 0x0000000d
+#define PB1_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11_MASK 0x00000c00L
+#define PB1_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11__SHIFT 0x0000000a
+#define PB1_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11_MASK 0x00001000L
+#define PB1_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11__SHIFT 0x0000000c
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11_MASK 0x00000008L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11__SHIFT 0x00000003
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11_MASK 0x00000080L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11__SHIFT 0x00000007
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11_MASK 0x00000100L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11__SHIFT 0x00000008
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11_MASK 0x00000200L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11__SHIFT 0x00000009
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11_MASK 0x00000070L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11__SHIFT 0x00000004
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11_MASK 0x00000007L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11__SHIFT 0x00000000
+#define PB1_RX_LANE12_CTRL_REG0__RX_BACKUP_12_MASK 0x000000ffL
+#define PB1_RX_LANE12_CTRL_REG0__RX_BACKUP_12__SHIFT 0x00000000
+#define PB1_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12_MASK 0x00002000L
+#define PB1_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12__SHIFT 0x0000000d
+#define PB1_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12_MASK 0x00000c00L
+#define PB1_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12__SHIFT 0x0000000a
+#define PB1_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12_MASK 0x00001000L
+#define PB1_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12__SHIFT 0x0000000c
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12_MASK 0x00000008L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12__SHIFT 0x00000003
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12_MASK 0x00000080L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12__SHIFT 0x00000007
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12_MASK 0x00000100L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12__SHIFT 0x00000008
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12_MASK 0x00000200L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12__SHIFT 0x00000009
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12_MASK 0x00000070L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12__SHIFT 0x00000004
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12_MASK 0x00000007L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12__SHIFT 0x00000000
+#define PB1_RX_LANE13_CTRL_REG0__RX_BACKUP_13_MASK 0x000000ffL
+#define PB1_RX_LANE13_CTRL_REG0__RX_BACKUP_13__SHIFT 0x00000000
+#define PB1_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13_MASK 0x00002000L
+#define PB1_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13__SHIFT 0x0000000d
+#define PB1_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13_MASK 0x00000c00L
+#define PB1_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13__SHIFT 0x0000000a
+#define PB1_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13_MASK 0x00001000L
+#define PB1_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13__SHIFT 0x0000000c
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13_MASK 0x00000008L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13__SHIFT 0x00000003
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13_MASK 0x00000080L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13__SHIFT 0x00000007
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13_MASK 0x00000100L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13__SHIFT 0x00000008
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13_MASK 0x00000200L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13__SHIFT 0x00000009
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13_MASK 0x00000070L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13__SHIFT 0x00000004
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13_MASK 0x00000007L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13__SHIFT 0x00000000
+#define PB1_RX_LANE14_CTRL_REG0__RX_BACKUP_14_MASK 0x000000ffL
+#define PB1_RX_LANE14_CTRL_REG0__RX_BACKUP_14__SHIFT 0x00000000
+#define PB1_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14_MASK 0x00002000L
+#define PB1_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14__SHIFT 0x0000000d
+#define PB1_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14_MASK 0x00000c00L
+#define PB1_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14__SHIFT 0x0000000a
+#define PB1_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14_MASK 0x00001000L
+#define PB1_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14__SHIFT 0x0000000c
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14_MASK 0x00000008L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14__SHIFT 0x00000003
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14_MASK 0x00000080L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14__SHIFT 0x00000007
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14_MASK 0x00000100L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14__SHIFT 0x00000008
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14_MASK 0x00000200L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14__SHIFT 0x00000009
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14_MASK 0x00000070L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14__SHIFT 0x00000004
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14_MASK 0x00000007L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14__SHIFT 0x00000000
+#define PB1_RX_LANE15_CTRL_REG0__RX_BACKUP_15_MASK 0x000000ffL
+#define PB1_RX_LANE15_CTRL_REG0__RX_BACKUP_15__SHIFT 0x00000000
+#define PB1_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15_MASK 0x00002000L
+#define PB1_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15__SHIFT 0x0000000d
+#define PB1_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15_MASK 0x00000c00L
+#define PB1_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15__SHIFT 0x0000000a
+#define PB1_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15_MASK 0x00001000L
+#define PB1_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15__SHIFT 0x0000000c
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15_MASK 0x00000008L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15__SHIFT 0x00000003
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15_MASK 0x00000080L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15__SHIFT 0x00000007
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15_MASK 0x00000100L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15__SHIFT 0x00000008
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15_MASK 0x00000200L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15__SHIFT 0x00000009
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15_MASK 0x00000070L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15__SHIFT 0x00000004
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15_MASK 0x00000007L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15__SHIFT 0x00000000
+#define PB1_RX_LANE1_CTRL_REG0__RX_BACKUP_1_MASK 0x000000ffL
+#define PB1_RX_LANE1_CTRL_REG0__RX_BACKUP_1__SHIFT 0x00000000
+#define PB1_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1_MASK 0x00002000L
+#define PB1_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1__SHIFT 0x0000000d
+#define PB1_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1_MASK 0x00000c00L
+#define PB1_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1__SHIFT 0x0000000a
+#define PB1_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1_MASK 0x00001000L
+#define PB1_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1__SHIFT 0x0000000c
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1_MASK 0x00000008L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1__SHIFT 0x00000003
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1_MASK 0x00000080L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1__SHIFT 0x00000007
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1_MASK 0x00000100L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1__SHIFT 0x00000008
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1_MASK 0x00000200L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1__SHIFT 0x00000009
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1_MASK 0x00000070L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1__SHIFT 0x00000004
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1_MASK 0x00000007L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1__SHIFT 0x00000000
+#define PB1_RX_LANE2_CTRL_REG0__RX_BACKUP_2_MASK 0x000000ffL
+#define PB1_RX_LANE2_CTRL_REG0__RX_BACKUP_2__SHIFT 0x00000000
+#define PB1_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2_MASK 0x00002000L
+#define PB1_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2__SHIFT 0x0000000d
+#define PB1_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2_MASK 0x00000c00L
+#define PB1_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2__SHIFT 0x0000000a
+#define PB1_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2_MASK 0x00001000L
+#define PB1_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2__SHIFT 0x0000000c
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2_MASK 0x00000008L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2__SHIFT 0x00000003
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2_MASK 0x00000080L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2__SHIFT 0x00000007
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2_MASK 0x00000100L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2__SHIFT 0x00000008
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2_MASK 0x00000200L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2__SHIFT 0x00000009
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2_MASK 0x00000070L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2__SHIFT 0x00000004
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2_MASK 0x00000007L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2__SHIFT 0x00000000
+#define PB1_RX_LANE3_CTRL_REG0__RX_BACKUP_3_MASK 0x000000ffL
+#define PB1_RX_LANE3_CTRL_REG0__RX_BACKUP_3__SHIFT 0x00000000
+#define PB1_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3_MASK 0x00002000L
+#define PB1_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3__SHIFT 0x0000000d
+#define PB1_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3_MASK 0x00000c00L
+#define PB1_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3__SHIFT 0x0000000a
+#define PB1_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3_MASK 0x00001000L
+#define PB1_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3__SHIFT 0x0000000c
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3_MASK 0x00000008L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3__SHIFT 0x00000003
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3_MASK 0x00000080L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3__SHIFT 0x00000007
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3_MASK 0x00000100L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3__SHIFT 0x00000008
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3_MASK 0x00000200L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3__SHIFT 0x00000009
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3_MASK 0x00000070L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3__SHIFT 0x00000004
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3_MASK 0x00000007L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3__SHIFT 0x00000000
+#define PB1_RX_LANE4_CTRL_REG0__RX_BACKUP_4_MASK 0x000000ffL
+#define PB1_RX_LANE4_CTRL_REG0__RX_BACKUP_4__SHIFT 0x00000000
+#define PB1_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4_MASK 0x00002000L
+#define PB1_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4__SHIFT 0x0000000d
+#define PB1_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4_MASK 0x00000c00L
+#define PB1_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4__SHIFT 0x0000000a
+#define PB1_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4_MASK 0x00001000L
+#define PB1_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4__SHIFT 0x0000000c
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4_MASK 0x00000008L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4__SHIFT 0x00000003
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4_MASK 0x00000080L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4__SHIFT 0x00000007
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4_MASK 0x00000100L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4__SHIFT 0x00000008
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4_MASK 0x00000200L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4__SHIFT 0x00000009
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4_MASK 0x00000070L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4__SHIFT 0x00000004
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4_MASK 0x00000007L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4__SHIFT 0x00000000
+#define PB1_RX_LANE5_CTRL_REG0__RX_BACKUP_5_MASK 0x000000ffL
+#define PB1_RX_LANE5_CTRL_REG0__RX_BACKUP_5__SHIFT 0x00000000
+#define PB1_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5_MASK 0x00002000L
+#define PB1_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5__SHIFT 0x0000000d
+#define PB1_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5_MASK 0x00000c00L
+#define PB1_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5__SHIFT 0x0000000a
+#define PB1_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5_MASK 0x00001000L
+#define PB1_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5__SHIFT 0x0000000c
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5_MASK 0x00000008L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5__SHIFT 0x00000003
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5_MASK 0x00000080L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5__SHIFT 0x00000007
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5_MASK 0x00000100L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5__SHIFT 0x00000008
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5_MASK 0x00000200L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5__SHIFT 0x00000009
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5_MASK 0x00000070L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5__SHIFT 0x00000004
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5_MASK 0x00000007L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5__SHIFT 0x00000000
+#define PB1_RX_LANE6_CTRL_REG0__RX_BACKUP_6_MASK 0x000000ffL
+#define PB1_RX_LANE6_CTRL_REG0__RX_BACKUP_6__SHIFT 0x00000000
+#define PB1_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6_MASK 0x00002000L
+#define PB1_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6__SHIFT 0x0000000d
+#define PB1_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6_MASK 0x00000c00L
+#define PB1_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6__SHIFT 0x0000000a
+#define PB1_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6_MASK 0x00001000L
+#define PB1_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6__SHIFT 0x0000000c
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6_MASK 0x00000008L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6__SHIFT 0x00000003
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6_MASK 0x00000080L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6__SHIFT 0x00000007
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6_MASK 0x00000100L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6__SHIFT 0x00000008
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6_MASK 0x00000200L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6__SHIFT 0x00000009
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6_MASK 0x00000070L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6__SHIFT 0x00000004
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6_MASK 0x00000007L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6__SHIFT 0x00000000
+#define PB1_RX_LANE7_CTRL_REG0__RX_BACKUP_7_MASK 0x000000ffL
+#define PB1_RX_LANE7_CTRL_REG0__RX_BACKUP_7__SHIFT 0x00000000
+#define PB1_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7_MASK 0x00002000L
+#define PB1_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7__SHIFT 0x0000000d
+#define PB1_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7_MASK 0x00000c00L
+#define PB1_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7__SHIFT 0x0000000a
+#define PB1_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7_MASK 0x00001000L
+#define PB1_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7__SHIFT 0x0000000c
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7_MASK 0x00000008L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7__SHIFT 0x00000003
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7_MASK 0x00000080L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7__SHIFT 0x00000007
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7_MASK 0x00000100L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7__SHIFT 0x00000008
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7_MASK 0x00000200L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7__SHIFT 0x00000009
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7_MASK 0x00000070L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7__SHIFT 0x00000004
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7_MASK 0x00000007L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7__SHIFT 0x00000000
+#define PB1_RX_LANE8_CTRL_REG0__RX_BACKUP_8_MASK 0x000000ffL
+#define PB1_RX_LANE8_CTRL_REG0__RX_BACKUP_8__SHIFT 0x00000000
+#define PB1_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8_MASK 0x00002000L
+#define PB1_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8__SHIFT 0x0000000d
+#define PB1_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8_MASK 0x00000c00L
+#define PB1_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8__SHIFT 0x0000000a
+#define PB1_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8_MASK 0x00001000L
+#define PB1_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8__SHIFT 0x0000000c
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8_MASK 0x00000008L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8__SHIFT 0x00000003
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8_MASK 0x00000080L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8__SHIFT 0x00000007
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8_MASK 0x00000100L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8__SHIFT 0x00000008
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8_MASK 0x00000200L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8__SHIFT 0x00000009
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8_MASK 0x00000070L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8__SHIFT 0x00000004
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8_MASK 0x00000007L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8__SHIFT 0x00000000
+#define PB1_RX_LANE9_CTRL_REG0__RX_BACKUP_9_MASK 0x000000ffL
+#define PB1_RX_LANE9_CTRL_REG0__RX_BACKUP_9__SHIFT 0x00000000
+#define PB1_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9_MASK 0x00002000L
+#define PB1_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9__SHIFT 0x0000000d
+#define PB1_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9_MASK 0x00000c00L
+#define PB1_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9__SHIFT 0x0000000a
+#define PB1_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9_MASK 0x00001000L
+#define PB1_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9__SHIFT 0x0000000c
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9_MASK 0x00000008L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9__SHIFT 0x00000003
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9_MASK 0x00000080L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9__SHIFT 0x00000007
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9_MASK 0x00000100L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9__SHIFT 0x00000008
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9_MASK 0x00000200L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9__SHIFT 0x00000009
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9_MASK 0x00000070L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9__SHIFT 0x00000004
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9_MASK 0x00000007L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9__SHIFT 0x00000000
+#define PB1_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL_MASK 0x00008000L
+#define PB1_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14_MASK 0x00004000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14__SHIFT 0x0000000e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15_MASK 0x00008000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16_MASK 0x00010000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16__SHIFT 0x00000010
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17_MASK 0x00020000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17__SHIFT 0x00000011
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18_MASK 0x00040000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18__SHIFT 0x00000012
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19_MASK 0x00080000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19__SHIFT 0x00000013
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20_MASK 0x00100000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20__SHIFT 0x00000014
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21_MASK 0x00200000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21__SHIFT 0x00000015
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22_MASK 0x00400000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22__SHIFT 0x00000016
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23_MASK 0x00800000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23__SHIFT 0x00000017
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24_MASK 0x01000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24__SHIFT 0x00000018
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25_MASK 0x02000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25__SHIFT 0x00000019
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26_MASK 0x04000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26__SHIFT 0x0000001a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27_MASK 0x08000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27__SHIFT 0x0000001b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28_MASK 0x10000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28__SHIFT 0x0000001c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29_MASK 0x20000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29__SHIFT 0x0000001d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30_MASK 0x40000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30__SHIFT 0x0000001e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31_MASK 0x80000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31__SHIFT 0x0000001f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3__SHIFT 0x00000003
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35__SHIFT 0x00000003
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46_MASK 0x00004000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46__SHIFT 0x0000000e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47_MASK 0x00008000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48_MASK 0x00010000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48__SHIFT 0x00000010
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49_MASK 0x00020000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49__SHIFT 0x00000011
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50_MASK 0x00040000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50__SHIFT 0x00000012
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51_MASK 0x00080000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51__SHIFT 0x00000013
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52_MASK 0x00100000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52__SHIFT 0x00000014
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53_MASK 0x00200000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53__SHIFT 0x00000015
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54_MASK 0x00400000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54__SHIFT 0x00000016
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55_MASK 0x00800000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55__SHIFT 0x00000017
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56_MASK 0x01000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56__SHIFT 0x00000018
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57_MASK 0x02000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57__SHIFT 0x00000019
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58_MASK 0x04000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58__SHIFT 0x0000001a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59_MASK 0x08000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59__SHIFT 0x0000001b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60_MASK 0x10000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60__SHIFT 0x0000001c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61_MASK 0x20000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61__SHIFT 0x0000001d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62_MASK 0x40000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62__SHIFT 0x0000001e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63_MASK 0x80000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63__SHIFT 0x0000001f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67__SHIFT 0x00000003
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78_MASK 0x00004000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78__SHIFT 0x0000000e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79_MASK 0x00008000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80_MASK 0x00010000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80__SHIFT 0x00000010
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81_MASK 0x00020000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81__SHIFT 0x00000011
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82_MASK 0x00040000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82__SHIFT 0x00000012
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83_MASK 0x00080000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83__SHIFT 0x00000013
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84_MASK 0x00100000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84__SHIFT 0x00000014
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85_MASK 0x00200000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85__SHIFT 0x00000015
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86_MASK 0x00400000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86__SHIFT 0x00000016
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87_MASK 0x00800000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87__SHIFT 0x00000017
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88_MASK 0x01000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88__SHIFT 0x00000018
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89_MASK 0x02000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89__SHIFT 0x00000019
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90_MASK 0x04000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90__SHIFT 0x0000001a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91_MASK 0x08000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91__SHIFT 0x0000001b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92_MASK 0x10000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92__SHIFT 0x0000001c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93_MASK 0x20000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93__SHIFT 0x0000001d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94_MASK 0x40000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94__SHIFT 0x0000001e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95_MASK 0x80000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95__SHIFT 0x0000001f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99__SHIFT 0x00000003
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1_MASK 0x00000700L
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1__SHIFT 0x00000008
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2_MASK 0x00003800L
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2__SHIFT 0x0000000b
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3_MASK 0x0001c000L
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3__SHIFT 0x0000000e
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER_MASK 0x00400000L
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER__SHIFT 0x00000016
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN_MASK 0x00200000L
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN__SHIFT 0x00000015
+#define PB1_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING_MASK 0x00080000L
+#define PB1_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING__SHIFT 0x00000013
+#define PB1_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON_MASK 0x00800000L
+#define PB1_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON__SHIFT 0x00000017
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL_MASK 0x00000007L
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL_MASK 0x00000038L
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL__SHIFT 0x00000003
+#define PB1_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF_MASK 0x01000000L
+#define PB1_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF__SHIFT 0x00000018
+#define PB1_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS_MASK 0x00100000L
+#define PB1_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS__SHIFT 0x00000014
+#define PB1_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL_MASK 0x00060000L
+#define PB1_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL__SHIFT 0x00000011
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15_MASK 0x40000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15__SHIFT 0x0000001e
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0_MASK 0x00000001L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0__SHIFT 0x00000000
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10_MASK 0x00000400L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10__SHIFT 0x0000000a
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11_MASK 0x00000800L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11__SHIFT 0x0000000b
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12_MASK 0x00001000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12__SHIFT 0x0000000c
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13_MASK 0x00002000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13__SHIFT 0x0000000d
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14_MASK 0x00004000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14__SHIFT 0x0000000e
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15_MASK 0x00008000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15__SHIFT 0x0000000f
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1_MASK 0x00000002L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1__SHIFT 0x00000001
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2_MASK 0x00000004L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2__SHIFT 0x00000002
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3_MASK 0x00000008L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3__SHIFT 0x00000003
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4_MASK 0x00000010L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4__SHIFT 0x00000004
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5_MASK 0x00000020L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5__SHIFT 0x00000005
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6_MASK 0x00000040L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6__SHIFT 0x00000006
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7_MASK 0x00000080L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7__SHIFT 0x00000007
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8_MASK 0x00000100L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8__SHIFT 0x00000008
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9_MASK 0x00000200L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9__SHIFT 0x00000009
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1_MASK 0x00010000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1__SHIFT 0x00000010
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11_MASK 0x00200000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11__SHIFT 0x00000015
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13_MASK 0x00400000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13__SHIFT 0x00000016
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15_MASK 0x00800000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15__SHIFT 0x00000017
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3_MASK 0x00020000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3__SHIFT 0x00000011
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5_MASK 0x00040000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5__SHIFT 0x00000012
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7_MASK 0x00080000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7__SHIFT 0x00000013
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9_MASK 0x00100000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9__SHIFT 0x00000014
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3_MASK 0x01000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3__SHIFT 0x00000018
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15_MASK 0x08000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15__SHIFT 0x0000001b
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7_MASK 0x02000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7__SHIFT 0x00000019
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11_MASK 0x04000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11__SHIFT 0x0000001a
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7_MASK 0x10000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7__SHIFT 0x0000001c
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15_MASK 0x20000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15__SHIFT 0x0000001d
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000008L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000003
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x00000007L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL_MASK 0x000000f0L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00001e00L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN_MASK 0x00002000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN__SHIFT 0x0000000d
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL_MASK 0x0007c000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL__SHIFT 0x0000000e
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN_MASK 0x00080000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN__SHIFT 0x00000013
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL_MASK 0x01f00000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000014
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN_MASK 0x02000000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN__SHIFT 0x00000019
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL_MASK 0x3c000000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL__SHIFT 0x0000001a
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN_MASK 0x40000000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN__SHIFT 0x0000001e
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL_MASK 0x0000000fL
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN_MASK 0x00000010L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL_MASK 0x00000020L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL__SHIFT 0x00000005
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN_MASK 0x00000040L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN__SHIFT 0x00000006
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00000080L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000007
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00000400L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000a
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000200L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN_MASK 0x00001000L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN__SHIFT 0x0000000c
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL_MASK 0x00000800L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL__SHIFT 0x0000000b
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN_MASK 0x00004000L
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN__SHIFT 0x0000000e
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL_MASK 0x00002000L
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL__SHIFT 0x0000000d
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN_MASK 0x02000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN__SHIFT 0x00000019
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL_MASK 0x01ff8000L
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL__SHIFT 0x0000000f
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN_MASK 0x08000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN__SHIFT 0x0000001b
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL_MASK 0x04000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL__SHIFT 0x0000001a
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN_MASK 0x20000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN__SHIFT 0x0000001d
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL_MASK 0x10000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL__SHIFT 0x0000001c
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN_MASK 0x80000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN__SHIFT 0x0000001f
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL_MASK 0x40000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL__SHIFT 0x0000001e
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL_MASK 0x0000f000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL__SHIFT 0x0000000c
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000f0000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000010
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL_MASK 0x01f00000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL__SHIFT 0x00000014
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL_MASK 0x3e000000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000019
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN_MASK 0x00000800L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN__SHIFT 0x0000000b
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL_MASK 0x00000400L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL__SHIFT 0x0000000a
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN_MASK 0x00000008L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN__SHIFT 0x00000003
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL_MASK 0x00000004L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL__SHIFT 0x00000002
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN_MASK 0x00000020L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN__SHIFT 0x00000005
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL_MASK 0x00000010L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN_MASK 0x00000080L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN__SHIFT 0x00000007
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL_MASK 0x00000040L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL__SHIFT 0x00000006
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN_MASK 0x00000200L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN_MASK 0x00000002L
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN__SHIFT 0x00000001
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL_MASK 0x00000001L
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL_MASK 0x00003c00L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL__SHIFT 0x0000000a
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0003c000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x0000000e
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL_MASK 0x007c0000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL__SHIFT 0x00000012
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0f800000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000017
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL_MASK 0x0000000fL
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL_MASK 0xf0000000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL__SHIFT 0x0000001c
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000000f0L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL_MASK 0x00000200L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0000000fL
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL_MASK 0x00000010L
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL_MASK 0x00000020L
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000005
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0_MASK 0x00000001L
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0__SHIFT 0x00000000
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0_MASK 0x00000002L
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0__SHIFT 0x00000001
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0_MASK 0x00000004L
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0__SHIFT 0x00000002
+#define PB1_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0_MASK 0x00000008L
+#define PB1_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0__SHIFT 0x00000003
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0_MASK 0x00000002L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0__SHIFT 0x00000001
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0_MASK 0x00000001L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0_MASK 0x00000008L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0__SHIFT 0x00000003
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0_MASK 0x00000004L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0__SHIFT 0x00000002
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0_MASK 0x00000020L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0__SHIFT 0x00000005
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0_MASK 0x00000010L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0__SHIFT 0x00000004
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0_MASK 0x00000080L
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0__SHIFT 0x00000007
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0_MASK 0x00000040L
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0__SHIFT 0x00000006
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0_MASK 0x0000fc00L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0__SHIFT 0x0000000a
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0_MASK 0x00000300L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0__SHIFT 0x00000008
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0_MASK 0x00000080L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0__SHIFT 0x00000007
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0_MASK 0x00000008L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0__SHIFT 0x00000003
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0_MASK 0x00000070L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0__SHIFT 0x00000004
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0_MASK 0x00000007L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0__SHIFT 0x00000000
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10_MASK 0x00000001L
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10__SHIFT 0x00000000
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10_MASK 0x00000002L
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10__SHIFT 0x00000001
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10_MASK 0x00000004L
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10__SHIFT 0x00000002
+#define PB1_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10_MASK 0x00000008L
+#define PB1_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10__SHIFT 0x00000003
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10_MASK 0x00000002L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10__SHIFT 0x00000001
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10_MASK 0x00000001L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10__SHIFT 0x00000000
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10_MASK 0x00000008L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10__SHIFT 0x00000003
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10_MASK 0x00000004L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10__SHIFT 0x00000002
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10_MASK 0x00000020L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10__SHIFT 0x00000005
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10_MASK 0x00000010L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10__SHIFT 0x00000004
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10_MASK 0x00000080L
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10__SHIFT 0x00000007
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10_MASK 0x00000040L
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10__SHIFT 0x00000006
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10_MASK 0x0000fc00L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10__SHIFT 0x0000000a
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10_MASK 0x00000300L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10__SHIFT 0x00000008
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10_MASK 0x00000080L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10__SHIFT 0x00000007
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10_MASK 0x00000008L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10__SHIFT 0x00000003
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10_MASK 0x00000070L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10__SHIFT 0x00000004
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10_MASK 0x00000007L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10__SHIFT 0x00000000
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11_MASK 0x00000001L
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11__SHIFT 0x00000000
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11_MASK 0x00000002L
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11__SHIFT 0x00000001
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11_MASK 0x00000004L
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11__SHIFT 0x00000002
+#define PB1_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11_MASK 0x00000008L
+#define PB1_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11__SHIFT 0x00000003
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11_MASK 0x00000002L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11__SHIFT 0x00000001
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11_MASK 0x00000001L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11__SHIFT 0x00000000
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11_MASK 0x00000008L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11__SHIFT 0x00000003
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11_MASK 0x00000004L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11__SHIFT 0x00000002
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11_MASK 0x00000020L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11__SHIFT 0x00000005
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11_MASK 0x00000010L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11__SHIFT 0x00000004
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11_MASK 0x00000080L
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11__SHIFT 0x00000007
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11_MASK 0x00000040L
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11__SHIFT 0x00000006
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11_MASK 0x0000fc00L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11__SHIFT 0x0000000a
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11_MASK 0x00000300L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11__SHIFT 0x00000008
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11_MASK 0x00000080L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11__SHIFT 0x00000007
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11_MASK 0x00000008L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11__SHIFT 0x00000003
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11_MASK 0x00000070L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11__SHIFT 0x00000004
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11_MASK 0x00000007L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11__SHIFT 0x00000000
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12_MASK 0x00000001L
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12__SHIFT 0x00000000
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12_MASK 0x00000002L
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12__SHIFT 0x00000001
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12_MASK 0x00000004L
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12__SHIFT 0x00000002
+#define PB1_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12_MASK 0x00000008L
+#define PB1_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12__SHIFT 0x00000003
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12_MASK 0x00000002L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12__SHIFT 0x00000001
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12_MASK 0x00000001L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12__SHIFT 0x00000000
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12_MASK 0x00000008L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12__SHIFT 0x00000003
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12_MASK 0x00000004L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12__SHIFT 0x00000002
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12_MASK 0x00000020L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12__SHIFT 0x00000005
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12_MASK 0x00000010L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12__SHIFT 0x00000004
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12_MASK 0x00000080L
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12__SHIFT 0x00000007
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12_MASK 0x00000040L
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12__SHIFT 0x00000006
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12_MASK 0x0000fc00L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12__SHIFT 0x0000000a
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12_MASK 0x00000300L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12__SHIFT 0x00000008
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12_MASK 0x00000080L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12__SHIFT 0x00000007
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12_MASK 0x00000008L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12__SHIFT 0x00000003
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12_MASK 0x00000070L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12__SHIFT 0x00000004
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12_MASK 0x00000007L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12__SHIFT 0x00000000
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13_MASK 0x00000001L
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13__SHIFT 0x00000000
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13_MASK 0x00000002L
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13__SHIFT 0x00000001
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13_MASK 0x00000004L
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13__SHIFT 0x00000002
+#define PB1_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13_MASK 0x00000008L
+#define PB1_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13__SHIFT 0x00000003
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13_MASK 0x00000002L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13__SHIFT 0x00000001
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13_MASK 0x00000001L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13__SHIFT 0x00000000
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13_MASK 0x00000008L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13__SHIFT 0x00000003
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13_MASK 0x00000004L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13__SHIFT 0x00000002
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13_MASK 0x00000020L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13__SHIFT 0x00000005
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13_MASK 0x00000010L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13__SHIFT 0x00000004
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13_MASK 0x00000080L
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13__SHIFT 0x00000007
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13_MASK 0x00000040L
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13__SHIFT 0x00000006
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13_MASK 0x0000fc00L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13__SHIFT 0x0000000a
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13_MASK 0x00000300L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13__SHIFT 0x00000008
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13_MASK 0x00000080L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13__SHIFT 0x00000007
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13_MASK 0x00000008L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13__SHIFT 0x00000003
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13_MASK 0x00000070L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13__SHIFT 0x00000004
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13_MASK 0x00000007L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13__SHIFT 0x00000000
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14_MASK 0x00000001L
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14__SHIFT 0x00000000
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14_MASK 0x00000002L
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14__SHIFT 0x00000001
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14_MASK 0x00000004L
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14__SHIFT 0x00000002
+#define PB1_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14_MASK 0x00000008L
+#define PB1_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14__SHIFT 0x00000003
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14_MASK 0x00000002L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14__SHIFT 0x00000001
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14_MASK 0x00000001L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14__SHIFT 0x00000000
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14_MASK 0x00000008L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14__SHIFT 0x00000003
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14_MASK 0x00000004L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14__SHIFT 0x00000002
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14_MASK 0x00000020L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14__SHIFT 0x00000005
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14_MASK 0x00000010L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14__SHIFT 0x00000004
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14_MASK 0x00000080L
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14__SHIFT 0x00000007
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14_MASK 0x00000040L
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14__SHIFT 0x00000006
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14_MASK 0x0000fc00L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14__SHIFT 0x0000000a
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14_MASK 0x00000300L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14__SHIFT 0x00000008
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14_MASK 0x00000080L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14__SHIFT 0x00000007
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14_MASK 0x00000008L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14__SHIFT 0x00000003
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14_MASK 0x00000070L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14__SHIFT 0x00000004
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14_MASK 0x00000007L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14__SHIFT 0x00000000
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15_MASK 0x00000001L
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15__SHIFT 0x00000000
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15_MASK 0x00000002L
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15__SHIFT 0x00000001
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15_MASK 0x00000004L
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15__SHIFT 0x00000002
+#define PB1_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15_MASK 0x00000008L
+#define PB1_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15__SHIFT 0x00000003
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15_MASK 0x00000002L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15__SHIFT 0x00000001
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15_MASK 0x00000001L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15__SHIFT 0x00000000
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15_MASK 0x00000008L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15__SHIFT 0x00000003
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15_MASK 0x00000004L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15__SHIFT 0x00000002
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15_MASK 0x00000020L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15__SHIFT 0x00000005
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15_MASK 0x00000010L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15__SHIFT 0x00000004
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15_MASK 0x00000080L
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15__SHIFT 0x00000007
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15_MASK 0x00000040L
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15__SHIFT 0x00000006
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15_MASK 0x0000fc00L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15__SHIFT 0x0000000a
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15_MASK 0x00000300L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15__SHIFT 0x00000008
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15_MASK 0x00000080L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15__SHIFT 0x00000007
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15_MASK 0x00000008L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15__SHIFT 0x00000003
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15_MASK 0x00000070L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15__SHIFT 0x00000004
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15_MASK 0x00000007L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15__SHIFT 0x00000000
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1_MASK 0x00000001L
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1__SHIFT 0x00000000
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1_MASK 0x00000002L
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1__SHIFT 0x00000001
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1_MASK 0x00000004L
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1__SHIFT 0x00000002
+#define PB1_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1_MASK 0x00000008L
+#define PB1_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1__SHIFT 0x00000003
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1_MASK 0x00000002L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1__SHIFT 0x00000001
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1_MASK 0x00000001L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1__SHIFT 0x00000000
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1_MASK 0x00000008L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1__SHIFT 0x00000003
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1_MASK 0x00000004L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1__SHIFT 0x00000002
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1_MASK 0x00000020L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1__SHIFT 0x00000005
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1_MASK 0x00000010L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1__SHIFT 0x00000004
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1_MASK 0x00000080L
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1__SHIFT 0x00000007
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1_MASK 0x00000040L
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1__SHIFT 0x00000006
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1_MASK 0x0000fc00L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1__SHIFT 0x0000000a
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1_MASK 0x00000300L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1__SHIFT 0x00000008
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1_MASK 0x00000080L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1__SHIFT 0x00000007
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1_MASK 0x00000008L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1__SHIFT 0x00000003
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1_MASK 0x00000070L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1__SHIFT 0x00000004
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1_MASK 0x00000007L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1__SHIFT 0x00000000
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2_MASK 0x00000001L
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2__SHIFT 0x00000000
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2_MASK 0x00000002L
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2__SHIFT 0x00000001
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2_MASK 0x00000004L
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2__SHIFT 0x00000002
+#define PB1_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2_MASK 0x00000008L
+#define PB1_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2__SHIFT 0x00000003
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2_MASK 0x00000002L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2__SHIFT 0x00000001
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2_MASK 0x00000001L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2__SHIFT 0x00000000
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2_MASK 0x00000008L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2__SHIFT 0x00000003
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2_MASK 0x00000004L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2__SHIFT 0x00000002
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2_MASK 0x00000020L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2__SHIFT 0x00000005
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2_MASK 0x00000010L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2__SHIFT 0x00000004
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2_MASK 0x00000080L
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2__SHIFT 0x00000007
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2_MASK 0x00000040L
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2__SHIFT 0x00000006
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2_MASK 0x0000fc00L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2__SHIFT 0x0000000a
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2_MASK 0x00000300L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2__SHIFT 0x00000008
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2_MASK 0x00000080L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2__SHIFT 0x00000007
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2_MASK 0x00000008L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2__SHIFT 0x00000003
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2_MASK 0x00000070L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2__SHIFT 0x00000004
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2_MASK 0x00000007L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2__SHIFT 0x00000000
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3_MASK 0x00000001L
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3__SHIFT 0x00000000
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3_MASK 0x00000002L
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3__SHIFT 0x00000001
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3_MASK 0x00000004L
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3__SHIFT 0x00000002
+#define PB1_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3_MASK 0x00000008L
+#define PB1_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3__SHIFT 0x00000003
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3_MASK 0x00000002L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3__SHIFT 0x00000001
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3_MASK 0x00000001L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3__SHIFT 0x00000000
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3_MASK 0x00000008L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3__SHIFT 0x00000003
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3_MASK 0x00000004L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3__SHIFT 0x00000002
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3_MASK 0x00000020L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3__SHIFT 0x00000005
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3_MASK 0x00000010L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3__SHIFT 0x00000004
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3_MASK 0x00000080L
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3__SHIFT 0x00000007
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3_MASK 0x00000040L
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3__SHIFT 0x00000006
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3_MASK 0x0000fc00L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3__SHIFT 0x0000000a
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3_MASK 0x00000300L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3__SHIFT 0x00000008
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3_MASK 0x00000080L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3__SHIFT 0x00000007
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3_MASK 0x00000008L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3__SHIFT 0x00000003
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3_MASK 0x00000070L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3__SHIFT 0x00000004
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3_MASK 0x00000007L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3__SHIFT 0x00000000
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4_MASK 0x00000001L
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4__SHIFT 0x00000000
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4_MASK 0x00000002L
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4__SHIFT 0x00000001
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4_MASK 0x00000004L
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4__SHIFT 0x00000002
+#define PB1_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4_MASK 0x00000008L
+#define PB1_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4__SHIFT 0x00000003
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4_MASK 0x00000002L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4__SHIFT 0x00000001
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4_MASK 0x00000001L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4__SHIFT 0x00000000
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4_MASK 0x00000008L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4__SHIFT 0x00000003
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4_MASK 0x00000004L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4__SHIFT 0x00000002
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4_MASK 0x00000020L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4__SHIFT 0x00000005
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4_MASK 0x00000010L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4__SHIFT 0x00000004
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4_MASK 0x00000080L
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4__SHIFT 0x00000007
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4_MASK 0x00000040L
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4__SHIFT 0x00000006
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4_MASK 0x0000fc00L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4__SHIFT 0x0000000a
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4_MASK 0x00000300L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4__SHIFT 0x00000008
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4_MASK 0x00000080L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4__SHIFT 0x00000007
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4_MASK 0x00000008L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4__SHIFT 0x00000003
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4_MASK 0x00000070L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4__SHIFT 0x00000004
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4_MASK 0x00000007L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4__SHIFT 0x00000000
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5_MASK 0x00000001L
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5__SHIFT 0x00000000
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5_MASK 0x00000002L
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5__SHIFT 0x00000001
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5_MASK 0x00000004L
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5__SHIFT 0x00000002
+#define PB1_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5_MASK 0x00000008L
+#define PB1_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5__SHIFT 0x00000003
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5_MASK 0x00000002L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5__SHIFT 0x00000001
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5_MASK 0x00000001L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5__SHIFT 0x00000000
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5_MASK 0x00000008L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5__SHIFT 0x00000003
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5_MASK 0x00000004L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5__SHIFT 0x00000002
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5_MASK 0x00000020L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5__SHIFT 0x00000005
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5_MASK 0x00000010L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5__SHIFT 0x00000004
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5_MASK 0x00000080L
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5__SHIFT 0x00000007
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5_MASK 0x00000040L
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5__SHIFT 0x00000006
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5_MASK 0x0000fc00L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5__SHIFT 0x0000000a
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5_MASK 0x00000300L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5__SHIFT 0x00000008
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5_MASK 0x00000080L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5__SHIFT 0x00000007
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5_MASK 0x00000008L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5__SHIFT 0x00000003
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5_MASK 0x00000070L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5__SHIFT 0x00000004
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5_MASK 0x00000007L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5__SHIFT 0x00000000
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6_MASK 0x00000001L
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6__SHIFT 0x00000000
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6_MASK 0x00000002L
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6__SHIFT 0x00000001
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6_MASK 0x00000004L
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6__SHIFT 0x00000002
+#define PB1_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6_MASK 0x00000008L
+#define PB1_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6__SHIFT 0x00000003
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6_MASK 0x00000002L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6__SHIFT 0x00000001
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6_MASK 0x00000001L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6__SHIFT 0x00000000
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6_MASK 0x00000008L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6__SHIFT 0x00000003
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6_MASK 0x00000004L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6__SHIFT 0x00000002
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6_MASK 0x00000020L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6__SHIFT 0x00000005
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6_MASK 0x00000010L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6__SHIFT 0x00000004
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6_MASK 0x00000080L
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6__SHIFT 0x00000007
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6_MASK 0x00000040L
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6__SHIFT 0x00000006
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6_MASK 0x0000fc00L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6__SHIFT 0x0000000a
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6_MASK 0x00000300L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6__SHIFT 0x00000008
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6_MASK 0x00000080L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6__SHIFT 0x00000007
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6_MASK 0x00000008L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6__SHIFT 0x00000003
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6_MASK 0x00000070L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6__SHIFT 0x00000004
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6_MASK 0x00000007L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6__SHIFT 0x00000000
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7_MASK 0x00000001L
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7__SHIFT 0x00000000
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7_MASK 0x00000002L
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7__SHIFT 0x00000001
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7_MASK 0x00000004L
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7__SHIFT 0x00000002
+#define PB1_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7_MASK 0x00000008L
+#define PB1_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7__SHIFT 0x00000003
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7_MASK 0x00000002L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7__SHIFT 0x00000001
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7_MASK 0x00000001L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7__SHIFT 0x00000000
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7_MASK 0x00000008L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7__SHIFT 0x00000003
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7_MASK 0x00000004L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7__SHIFT 0x00000002
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7_MASK 0x00000020L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7__SHIFT 0x00000005
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7_MASK 0x00000010L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7__SHIFT 0x00000004
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7_MASK 0x00000080L
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7__SHIFT 0x00000007
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7_MASK 0x00000040L
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7__SHIFT 0x00000006
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7_MASK 0x0000fc00L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7__SHIFT 0x0000000a
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7_MASK 0x00000300L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7__SHIFT 0x00000008
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7_MASK 0x00000080L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7__SHIFT 0x00000007
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7_MASK 0x00000008L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7__SHIFT 0x00000003
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7_MASK 0x00000070L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7__SHIFT 0x00000004
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7_MASK 0x00000007L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7__SHIFT 0x00000000
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8_MASK 0x00000001L
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8__SHIFT 0x00000000
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8_MASK 0x00000002L
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8__SHIFT 0x00000001
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8_MASK 0x00000004L
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8__SHIFT 0x00000002
+#define PB1_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8_MASK 0x00000008L
+#define PB1_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8__SHIFT 0x00000003
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8_MASK 0x00000002L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8__SHIFT 0x00000001
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8_MASK 0x00000001L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8__SHIFT 0x00000000
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8_MASK 0x00000008L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8__SHIFT 0x00000003
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8_MASK 0x00000004L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8__SHIFT 0x00000002
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8_MASK 0x00000020L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8__SHIFT 0x00000005
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8_MASK 0x00000010L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8__SHIFT 0x00000004
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8_MASK 0x00000080L
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8__SHIFT 0x00000007
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8_MASK 0x00000040L
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8__SHIFT 0x00000006
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8_MASK 0x0000fc00L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8__SHIFT 0x0000000a
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8_MASK 0x00000300L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8__SHIFT 0x00000008
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8_MASK 0x00000080L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8__SHIFT 0x00000007
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8_MASK 0x00000008L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8__SHIFT 0x00000003
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8_MASK 0x00000070L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8__SHIFT 0x00000004
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8_MASK 0x00000007L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8__SHIFT 0x00000000
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9_MASK 0x00000001L
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9__SHIFT 0x00000000
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9_MASK 0x00000002L
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9__SHIFT 0x00000001
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9_MASK 0x00000004L
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9__SHIFT 0x00000002
+#define PB1_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9_MASK 0x00000008L
+#define PB1_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9__SHIFT 0x00000003
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9_MASK 0x00000002L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9__SHIFT 0x00000001
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9_MASK 0x00000001L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9__SHIFT 0x00000000
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9_MASK 0x00000008L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9__SHIFT 0x00000003
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9_MASK 0x00000004L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9__SHIFT 0x00000002
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9_MASK 0x00000020L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9__SHIFT 0x00000005
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9_MASK 0x00000010L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9__SHIFT 0x00000004
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9_MASK 0x00000080L
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9__SHIFT 0x00000007
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9_MASK 0x00000040L
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9__SHIFT 0x00000006
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9_MASK 0x0000fc00L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9__SHIFT 0x0000000a
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9_MASK 0x00000300L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9__SHIFT 0x00000008
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9_MASK 0x00000080L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9__SHIFT 0x00000007
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9_MASK 0x00000008L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9__SHIFT 0x00000003
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9_MASK 0x00000070L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9__SHIFT 0x00000004
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9_MASK 0x00000007L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9__SHIFT 0x00000000
+#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x00000007
+#define PCIE_BUS_CNTL__PMI_INT_DIS_MASK 0x00000040L
+#define PCIE_BUS_CNTL__PMI_INT_DIS__SHIFT 0x00000006
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000001L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x00000000
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x00000002
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000002L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x00000001
+#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA_MASK 0x00000010L
+#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA__SHIFT 0x00000004
+#define PCIE_CI_CNTL__CI_MST_IGNORE_PAGE_ALIGNED_REQUEST_MASK 0x00002000L
+#define PCIE_CI_CNTL__CI_MST_IGNORE_PAGE_ALIGNED_REQUEST__SHIFT 0x0000000d
+#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS_MASK 0x00000200L
+#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS__SHIFT 0x00000009
+#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS_MASK 0x00000008L
+#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS__SHIFT 0x00000003
+#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE_MASK 0x00000004L
+#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE__SHIFT 0x00000002
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS_MASK 0x00000400L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS__SHIFT 0x0000000a
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE_MASK 0x00000800L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE__SHIFT 0x0000000b
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR_MASK 0x00001000L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR__SHIFT 0x0000000c
+#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS_MASK 0x00000100L
+#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS__SHIFT 0x00000008
+#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE_MASK 0x000000c0L
+#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE__SHIFT 0x00000006
+#define PCIE_CNTL2__MST_MEM_LS_EN_MASK 0x00040000L
+#define PCIE_CNTL2__MST_MEM_LS_EN__SHIFT 0x00000012
+#define PCIE_CNTL2__MST_MEM_SD_EN_MASK 0x00400000L
+#define PCIE_CNTL2__MST_MEM_SD_EN__SHIFT 0x00000016
+#define PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK 0x00080000L
+#define PCIE_CNTL2__REPLAY_MEM_LS_EN__SHIFT 0x00000013
+#define PCIE_CNTL2__REPLAY_MEM_SD_EN_MASK 0x00800000L
+#define PCIE_CNTL2__REPLAY_MEM_SD_EN__SHIFT 0x00000017
+#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING_MASK 0x1f000000L
+#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING__SHIFT 0x00000018
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN_MASK 0x00020000L
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN__SHIFT 0x00000011
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN_MASK 0x00200000L
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN__SHIFT 0x00000015
+#define PCIE_CNTL2__SLV_MEM_LS_EN_MASK 0x00010000L
+#define PCIE_CNTL2__SLV_MEM_LS_EN__SHIFT 0x00000010
+#define PCIE_CNTL2__SLV_MEM_SD_EN_MASK 0x00100000L
+#define PCIE_CNTL2__SLV_MEM_SD_EN__SHIFT 0x00000014
+#define PCIE_CNTL2__TX_ARB_MST_LIMIT_MASK 0x000007c0L
+#define PCIE_CNTL2__TX_ARB_MST_LIMIT__SHIFT 0x00000006
+#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN_MASK 0x00000001L
+#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN__SHIFT 0x00000000
+#define PCIE_CNTL2__TX_ARB_SLV_LIMIT_MASK 0x0000003eL
+#define PCIE_CNTL2__TX_ARB_SLV_LIMIT__SHIFT 0x00000001
+#define PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x00000000
+#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL_MASK 0x0000000eL
+#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL__SHIFT 0x00000001
+#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE_MASK 0x00000200L
+#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE__SHIFT 0x00000009
+#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x00000008
+#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS_MASK 0x00800000L
+#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS__SHIFT 0x00000017
+#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN_MASK 0x80000000L
+#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN__SHIFT 0x0000001f
+#define PCIE_CNTL__RX_RCB_ATS_UC_DIS_MASK 0x00008000L
+#define PCIE_CNTL__RX_RCB_ATS_UC_DIS__SHIFT 0x0000000f
+#define PCIE_CNTL__RX_RCB_CHANNEL_ORDERING_MASK 0x00100000L
+#define PCIE_CNTL__RX_RCB_CHANNEL_ORDERING__SHIFT 0x00000014
+#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE_MASK 0x00080000L
+#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE__SHIFT 0x00000013
+#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS_MASK 0x00020000L
+#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS__SHIFT 0x00000011
+#define PCIE_CNTL__RX_RCB_REORDER_EN_MASK 0x00010000L
+#define PCIE_CNTL__RX_RCB_REORDER_EN__SHIFT 0x00000010
+#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS_MASK 0x00040000L
+#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS__SHIFT 0x00000012
+#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS_MASK 0x00200000L
+#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS__SHIFT 0x00000015
+#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS_MASK 0x00400000L
+#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS__SHIFT 0x00000016
+#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE_MASK 0x00001c00L
+#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE__SHIFT 0x0000000a
+#define PCIE_CNTL__TX_CPL_DEBUG_MASK 0x3f000000L
+#define PCIE_CNTL__TX_CPL_DEBUG__SHIFT 0x00000018
+#define PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x00000007
+#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x02000000L
+#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x00000019
+#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE_MASK 0x00010000L
+#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x00000010
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE_MASK 0x00100000L
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x00000014
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE_MASK 0x01000000L
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE__SHIFT 0x00000018
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE_MASK 0x000e0000L
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x00000011
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE_MASK 0x00e00000L
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x00000015
+#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK 0x0000000fL
+#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT 0x00000000
+#define PCIE_DATA__PCIE_DATA_MASK 0xffffffffL
+#define PCIE_DATA__PCIE_DATA__SHIFT 0x00000000
+#define PCIE_DEBUG_CNTL__DEBUG_LANE_EN_MASK 0xffff0000L
+#define PCIE_DEBUG_CNTL__DEBUG_LANE_EN__SHIFT 0x00000010
+#define PCIE_DEBUG_CNTL__DEBUG_PORT_EN_MASK 0x000000ffL
+#define PCIE_DEBUG_CNTL__DEBUG_PORT_EN__SHIFT 0x00000000
+#define PCIE_DEBUG_CNTL__DEBUG_SELECT_MASK 0x00000100L
+#define PCIE_DEBUG_CNTL__DEBUG_SELECT__SHIFT 0x00000008
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x0000000b
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x00001000L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x0000000c
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x00002000L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x0000000d
+#define PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x00000008
+#define PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS_MASK 0x00008000L
+#define PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS__SHIFT 0x0000000f
+#define PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS_MASK 0x00004000L
+#define PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS__SHIFT 0x0000000e
+#define PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET_MASK 0x00010000L
+#define PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET__SHIFT 0x00000010
+#define PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x00000000
+#define PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR_MASK 0x00000080L
+#define PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR__SHIFT 0x00000007
+#define PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR_MASK 0x00000020L
+#define PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR__SHIFT 0x00000005
+#define PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG_MASK 0x00000002L
+#define PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG__SHIFT 0x00000001
+#define PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR_MASK 0x00000040L
+#define PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR__SHIFT 0x00000006
+#define PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR_MASK 0x00000010L
+#define PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR__SHIFT 0x00000004
+#define PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0x0000000c
+#define PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x00000008
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00ff0000L
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x00000010
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xff000000L
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x00000018
+#define PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x0000001fL
+#define PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x00000000
+#define PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000ffL
+#define PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_FC_CPL__CPLD_CREDITS_MASK 0x000000ffL
+#define PCIE_FC_CPL__CPLD_CREDITS__SHIFT 0x00000000
+#define PCIE_FC_CPL__CPLH_CREDITS_MASK 0x0000ff00L
+#define PCIE_FC_CPL__CPLH_CREDITS__SHIFT 0x00000008
+#define PCIE_FC_NP__NPD_CREDITS_MASK 0x000000ffL
+#define PCIE_FC_NP__NPD_CREDITS__SHIFT 0x00000000
+#define PCIE_FC_NP__NPH_CREDITS_MASK 0x0000ff00L
+#define PCIE_FC_NP__NPH_CREDITS__SHIFT 0x00000008
+#define PCIE_FC_P__PD_CREDITS_MASK 0x000000ffL
+#define PCIE_FC_P__PD_CREDITS__SHIFT 0x00000000
+#define PCIE_FC_P__PH_CREDITS_MASK 0x0000ff00L
+#define PCIE_FC_P__PH_CREDITS__SHIFT 0x00000008
+#define PCIE_HW_DEBUG__HW_00_DEBUG_MASK 0x00000001L
+#define PCIE_HW_DEBUG__HW_00_DEBUG__SHIFT 0x00000000
+#define PCIE_HW_DEBUG__HW_01_DEBUG_MASK 0x00000002L
+#define PCIE_HW_DEBUG__HW_01_DEBUG__SHIFT 0x00000001
+#define PCIE_HW_DEBUG__HW_02_DEBUG_MASK 0x00000004L
+#define PCIE_HW_DEBUG__HW_02_DEBUG__SHIFT 0x00000002
+#define PCIE_HW_DEBUG__HW_03_DEBUG_MASK 0x00000008L
+#define PCIE_HW_DEBUG__HW_03_DEBUG__SHIFT 0x00000003
+#define PCIE_HW_DEBUG__HW_04_DEBUG_MASK 0x00000010L
+#define PCIE_HW_DEBUG__HW_04_DEBUG__SHIFT 0x00000004
+#define PCIE_HW_DEBUG__HW_05_DEBUG_MASK 0x00000020L
+#define PCIE_HW_DEBUG__HW_05_DEBUG__SHIFT 0x00000005
+#define PCIE_HW_DEBUG__HW_06_DEBUG_MASK 0x00000040L
+#define PCIE_HW_DEBUG__HW_06_DEBUG__SHIFT 0x00000006
+#define PCIE_HW_DEBUG__HW_07_DEBUG_MASK 0x00000080L
+#define PCIE_HW_DEBUG__HW_07_DEBUG__SHIFT 0x00000007
+#define PCIE_HW_DEBUG__HW_08_DEBUG_MASK 0x00000100L
+#define PCIE_HW_DEBUG__HW_08_DEBUG__SHIFT 0x00000008
+#define PCIE_HW_DEBUG__HW_09_DEBUG_MASK 0x00000200L
+#define PCIE_HW_DEBUG__HW_09_DEBUG__SHIFT 0x00000009
+#define PCIE_HW_DEBUG__HW_10_DEBUG_MASK 0x00000400L
+#define PCIE_HW_DEBUG__HW_10_DEBUG__SHIFT 0x0000000a
+#define PCIE_HW_DEBUG__HW_11_DEBUG_MASK 0x00000800L
+#define PCIE_HW_DEBUG__HW_11_DEBUG__SHIFT 0x0000000b
+#define PCIE_HW_DEBUG__HW_12_DEBUG_MASK 0x00001000L
+#define PCIE_HW_DEBUG__HW_12_DEBUG__SHIFT 0x0000000c
+#define PCIE_HW_DEBUG__HW_13_DEBUG_MASK 0x00002000L
+#define PCIE_HW_DEBUG__HW_13_DEBUG__SHIFT 0x0000000d
+#define PCIE_HW_DEBUG__HW_14_DEBUG_MASK 0x00004000L
+#define PCIE_HW_DEBUG__HW_14_DEBUG__SHIFT 0x0000000e
+#define PCIE_HW_DEBUG__HW_15_DEBUG_MASK 0x00008000L
+#define PCIE_HW_DEBUG__HW_15_DEBUG__SHIFT 0x0000000f
+#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR_MASK 0x0001ffffL
+#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR__SHIFT 0x00000000
+#define PCIE_I2C_REG_DATA__I2C_REG_DATA_MASK 0xffffffffL
+#define PCIE_I2C_REG_DATA__I2C_REG_DATA__SHIFT 0x00000000
+#define PCIE_INDEX__PCIE_INDEX_MASK 0x000000ffL
+#define PCIE_INDEX__PCIE_INDEX__SHIFT 0x00000000
+#define PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x00000000
+#define PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x00000002
+#define PCIE_INT_CNTL__LINK_BW_INT_EN_MASK 0x00000080L
+#define PCIE_INT_CNTL__LINK_BW_INT_EN__SHIFT 0x00000007
+#define PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x00000004
+#define PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x00000001
+#define PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+#define PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x00000006
+#define PCIE_INT_CNTL__QUIESCE_RCVD_INT_EN_MASK 0x00000100L
+#define PCIE_INT_CNTL__QUIESCE_RCVD_INT_EN__SHIFT 0x00000008
+#define PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x00000003
+#define PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x00000000
+#define PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x00000002
+#define PCIE_INT_STATUS__LINK_BW_INT_STATUS_MASK 0x00000080L
+#define PCIE_INT_STATUS__LINK_BW_INT_STATUS__SHIFT 0x00000007
+#define PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x00000004
+#define PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x00000001
+#define PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x00000006
+#define PCIE_INT_STATUS__QUIESCE_RCVD_INT_STATUS_MASK 0x00000100L
+#define PCIE_INT_STATUS__QUIESCE_RCVD_INT_STATUS__SHIFT 0x00000008
+#define PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x00000003
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR_MASK 0x0000fc00L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR__SHIFT 0x0000000a
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM_MASK 0x3fc00000L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM__SHIFT 0x00000016
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR_MASK 0x003f0000L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR__SHIFT 0x00000010
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR_MASK 0x000003f0L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR__SHIFT 0x00000004
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET_MASK 0x0000000fL
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET__SHIFT 0x00000000
+#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN_MASK 0x00000001L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN__SHIFT 0x00000000
+#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG_MASK 0x00000020L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG__SHIFT 0x00000005
+#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE_MASK 0x00000002L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE__SHIFT 0x00000001
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE_MASK 0x00000400L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE__SHIFT 0x0000000a
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE_MASK 0x00000040L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE__SHIFT 0x00000006
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED_MASK 0x00000200L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED__SHIFT 0x00000009
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER_MASK 0x00000100L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER__SHIFT 0x00000008
+#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE_MASK 0x00000008L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE__SHIFT 0x00000003
+#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE_MASK 0x00000010L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE__SHIFT 0x00000004
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE_MASK 0x00000080L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE__SHIFT 0x00000007
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE_MASK 0x00000004L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE__SHIFT 0x00000002
+#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE_MASK 0x03000000L
+#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE__SHIFT 0x00000018
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF_MASK 0x00000fffL
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF__SHIFT 0x00000000
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS_MASK 0x00fff000L
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS__SHIFT 0x0000000c
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK 0x00020000L
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1__SHIFT 0x00000011
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK 0x00040000L
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23__SHIFT 0x00000012
+#define PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD_MASK 0x00400000L
+#define PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD__SHIFT 0x00000016
+#define PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0_MASK 0x00100000L
+#define PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0__SHIFT 0x00000014
+#define PCIE_LC_CNTL2__LC_DEASSERT_RX_EN_IN_L0S_MASK 0x00080000L
+#define PCIE_LC_CNTL2__LC_DEASSERT_RX_EN_IN_L0S__SHIFT 0x00000013
+#define PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET_MASK 0x00010000L
+#define PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET__SHIFT 0x00000010
+#define PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS_MASK 0x04000000L
+#define PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS__SHIFT 0x0000001a
+#define PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE_MASK 0x0000c000L
+#define PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE__SHIFT 0x0000000e
+#define PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI_MASK 0x80000000L
+#define PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI__SHIFT 0x0000001f
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_MASK 0x00000800L
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN_MASK 0x00001000L
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN__SHIFT 0x0000000c
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE__SHIFT 0x0000000b
+#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x0000001b
+#define PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN__SHIFT 0x0000000a
+#define PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION_MASK 0x00000080L
+#define PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION__SHIFT 0x00000007
+#define PCIE_LC_CNTL2__LC_MORE_TS2_EN_MASK 0x00000100L
+#define PCIE_LC_CNTL2__LC_MORE_TS2_EN__SHIFT 0x00000008
+#define PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE_MASK 0x10000000L
+#define PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE__SHIFT 0x0000001c
+#define PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES_MASK 0x02000000L
+#define PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES__SHIFT 0x00000019
+#define PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK 0x00200000L
+#define PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS__SHIFT 0x00000015
+#define PCIE_LC_CNTL2__LC_STATE_TIMED_OUT_MASK 0x00000040L
+#define PCIE_LC_CNTL2__LC_STATE_TIMED_OUT__SHIFT 0x00000006
+#define PCIE_LC_CNTL2__LC_TEST_TIMER_SEL_MASK 0x60000000L
+#define PCIE_LC_CNTL2__LC_TEST_TIMER_SEL__SHIFT 0x0000001d
+#define PCIE_LC_CNTL2__LC_TIMED_OUT_STATE_MASK 0x0000003fL
+#define PCIE_LC_CNTL2__LC_TIMED_OUT_STATE__SHIFT 0x00000000
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG_MASK 0x01800000L
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG__SHIFT 0x00000017
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE_MASK 0x00002000L
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE__SHIFT 0x0000000d
+#define PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS_MASK 0x00000200L
+#define PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS__SHIFT 0x00000009
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN_MASK 0x00040000L
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN__SHIFT 0x00000012
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL_MASK 0x00180000L
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL__SHIFT 0x00000013
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000100L
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x00000008
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x000000c0L
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x00000006
+#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN_MASK 0x00010000L
+#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN__SHIFT 0x00000010
+#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT_MASK 0x00000200L
+#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT__SHIFT 0x00000009
+#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT_MASK 0x00000010L
+#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT__SHIFT 0x00000004
+#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK 0x00800000L
+#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK__SHIFT 0x00000017
+#define PCIE_LC_CNTL3__LC_EHP_RX_PHY_CMD_MASK 0x00003000L
+#define PCIE_LC_CNTL3__LC_EHP_RX_PHY_CMD__SHIFT 0x0000000c
+#define PCIE_LC_CNTL3__LC_EHP_TX_PHY_CMD_MASK 0x0000c000L
+#define PCIE_LC_CNTL3__LC_EHP_TX_PHY_CMD__SHIFT 0x0000000e
+#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN__SHIFT 0x0000000a
+#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN_MASK 0x00200000L
+#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN__SHIFT 0x00000015
+#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK 0x40000000L
+#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY__SHIFT 0x0000001e
+#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL_MASK 0x03000000L
+#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL__SHIFT 0x00000018
+#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN_MASK 0x00020000L
+#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN__SHIFT 0x00000011
+#define PCIE_LC_CNTL3__LC_N_EIE_SEL_MASK 0x80000000L
+#define PCIE_LC_CNTL3__LC_N_EIE_SEL__SHIFT 0x0000001f
+#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS_MASK 0x00000008L
+#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS__SHIFT 0x00000003
+#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE_MASK 0x00000800L
+#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE__SHIFT 0x0000000b
+#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN_MASK 0x00000020L
+#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN__SHIFT 0x00000005
+#define PCIE_LC_CNTL3__LC_RXPHYCMD_INACTIVE_EN_MODE_MASK 0x00400000L
+#define PCIE_LC_CNTL3__LC_RXPHYCMD_INACTIVE_EN_MODE__SHIFT 0x00000016
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL_MASK 0x00000006L
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL__SHIFT 0x00000001
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_MASK 0x00000001L
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS__SHIFT 0x00000000
+#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL_MASK 0x3c000000L
+#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL__SHIFT 0x0000001a
+#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN_MASK 0x02000000L
+#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN__SHIFT 0x00000019
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ_MASK 0x00000010L
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE_MASK 0x00010000L
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE__SHIFT 0x00000010
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ__SHIFT 0x00000004
+#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK_MASK 0x00000400L
+#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK__SHIFT 0x0000000a
+#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE_MASK 0x00000300L
+#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE__SHIFT 0x00000008
+#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE_MASK 0x01000000L
+#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE__SHIFT 0x00000018
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS_MASK 0x00000040L
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS__SHIFT 0x00000006
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_MASK 0x00020000L
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE__SHIFT 0x00000011
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE_MASK 0x003c0000L
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE__SHIFT 0x00000012
+#define PCIE_LC_CNTL4__LC_IGNORE_PARITY_MASK 0x00000080L
+#define PCIE_LC_CNTL4__LC_IGNORE_PARITY__SHIFT 0x00000007
+#define PCIE_LC_CNTL4__LC_PCIE_TX_FULL_SWING_MASK 0x00800000L
+#define PCIE_LC_CNTL4__LC_PCIE_TX_FULL_SWING__SHIFT 0x00000017
+#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD_MASK 0x00004000L
+#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD__SHIFT 0x0000000e
+#define PCIE_LC_CNTL4__LC_REDO_EQ_MASK 0x00000020L
+#define PCIE_LC_CNTL4__LC_REDO_EQ__SHIFT 0x00000005
+#define PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK 0x00002000L
+#define PCIE_LC_CNTL4__LC_SET_QUIESCE__SHIFT 0x0000000d
+#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR_MASK 0x00000003L
+#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR__SHIFT 0x00000000
+#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD_MASK 0x00008000L
+#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD__SHIFT 0x0000000f
+#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS_MASK 0x00400000L
+#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS__SHIFT 0x00000016
+#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD_MASK 0x00000800L
+#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD__SHIFT 0x0000000b
+#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ_MASK 0x00001000L
+#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ__SHIFT 0x0000000c
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK_MASK 0xfc000000L
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK__SHIFT 0x0000001a
+#define PCIE_LC_CNTL5__LC_EQ_FS_0_MASK 0x0000003fL
+#define PCIE_LC_CNTL5__LC_EQ_FS_0__SHIFT 0x00000000
+#define PCIE_LC_CNTL5__LC_EQ_FS_8_MASK 0x00000fc0L
+#define PCIE_LC_CNTL5__LC_EQ_FS_8__SHIFT 0x00000006
+#define PCIE_LC_CNTL5__LC_EQ_LF_0_MASK 0x0003f000L
+#define PCIE_LC_CNTL5__LC_EQ_LF_0__SHIFT 0x0000000c
+#define PCIE_LC_CNTL5__LC_EQ_LF_8_MASK 0x00fc0000L
+#define PCIE_LC_CNTL5__LC_EQ_LF_8__SHIFT 0x00000012
+#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE_MASK 0x000000f0L
+#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE__SHIFT 0x00000004
+#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS_MASK 0x01000000L
+#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS__SHIFT 0x00000018
+#define PCIE_LC_CNTL__LC_DELAY_COUNT_MASK 0x06000000L
+#define PCIE_LC_CNTL__LC_DELAY_COUNT__SHIFT 0x00000019
+#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT_MASK 0x08000000L
+#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT__SHIFT 0x0000001b
+#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK 0x10000000L
+#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT__SHIFT 0x0000001c
+#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0_MASK 0x00000002L
+#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0__SHIFT 0x00000001
+#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN_MASK 0x40000000L
+#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN__SHIFT 0x0000001e
+#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE_MASK 0x20000000L
+#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE__SHIFT 0x0000001d
+#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC_MASK 0x00100000L
+#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC__SHIFT 0x00000014
+#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE_MASK 0x80000000L
+#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE__SHIFT 0x0000001f
+#define PCIE_LC_CNTL__LC_INC_N_FTS_EN_MASK 0x00020000L
+#define PCIE_LC_CNTL__LC_INC_N_FTS_EN__SHIFT 0x00000011
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK 0x00000f00L
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT 0x00000008
+#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK_MASK 0x00800000L
+#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK__SHIFT 0x00000017
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK 0x0000f000L
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT 0x0000000c
+#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23_MASK 0x000c0000L
+#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23__SHIFT 0x00000012
+#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK 0x00010000L
+#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT 0x00000010
+#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN_MASK 0x00000004L
+#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN__SHIFT 0x00000002
+#define PCIE_LC_CNTL__LC_RESET_LINK_MASK 0x00000008L
+#define PCIE_LC_CNTL__LC_RESET_LINK__SHIFT 0x00000003
+#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS_MASK 0x00200000L
+#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS__SHIFT 0x00000015
+#define PCIE_LC_CNTL__LC_WAKE_FROM_L23_MASK 0x00400000L
+#define PCIE_LC_CNTL__LC_WAKE_FROM_L23__SHIFT 0x00000016
+#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_MASK 0x00080000L
+#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN__SHIFT 0x00000013
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_MASK 0x00000001L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF__SHIFT 0x00000000
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_MASK 0x00001f80L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR__SHIFT 0x00000007
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_MASK 0x0007e000L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR__SHIFT 0x0000000d
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_MASK 0x0000007eL
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR__SHIFT 0x00000001
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_MASK 0x00000001L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE__SHIFT 0x00000000
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_MASK 0x00001f80L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ__SHIFT 0x00000007
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_MASK 0x0007e000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ__SHIFT 0x0000000d
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_MASK 0x0000007eL
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ__SHIFT 0x00000001
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_MASK 0x01f80000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END__SHIFT 0x00000013
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_MASK 0x7e000000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END__SHIFT 0x00000019
+#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES_MASK 0x0000ffffL
+#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES__SHIFT 0x00000000
+#define PCIE_LC_LANE_CNTL__LC_LANE_DIS_MASK 0xffff0000L
+#define PCIE_LC_LANE_CNTL__LC_LANE_DIS__SHIFT 0x00000010
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB_MASK 0x00010000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB__SHIFT 0x00000010
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN_MASK 0x00080000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN__SHIFT 0x00000013
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK 0x00600000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT 0x00000015
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN_MASK 0x00040000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN__SHIFT 0x00000012
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN_MASK 0x00800000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN__SHIFT 0x00000017
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK 0x00020000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN__SHIFT 0x00000011
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK 0x00000007L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x00000004
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT 0x00000000
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK 0x00000080L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE__SHIFT 0x00000007
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK 0x00000100L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW__SHIFT 0x00000008
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK 0x00000400L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN__SHIFT 0x0000000a
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK 0x00000200L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT__SHIFT 0x00000009
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN_MASK 0x00000800L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN__SHIFT 0x0000000b
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL_MASK 0x00008000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL__SHIFT 0x0000000f
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS_MASK 0x00004000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS__SHIFT 0x0000000e
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE_MASK 0x00100000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE__SHIFT 0x00000014
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK 0x00002000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS__SHIFT 0x0000000d
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK 0x00001000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT__SHIFT 0x0000000c
+#define PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK 0xff000000L
+#define PCIE_LC_N_FTS_CNTL__LC_N_FTS__SHIFT 0x00000018
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY_MASK 0x00000200L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY__SHIFT 0x00000009
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT_MASK 0x00ff0000L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT__SHIFT 0x00000010
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK 0x000000ffL
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK 0x00000100L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN__SHIFT 0x00000008
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT 0x00000000
+#define PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN_MASK 0x00020000L
+#define PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN__SHIFT 0x00000011
+#define PCIE_LC_SPEED_CNTL__LC_AUTO_RECOVERY_DIS_MASK 0x00400000L
+#define PCIE_LC_SPEED_CNTL__LC_AUTO_RECOVERY_DIS__SHIFT 0x00000016
+#define PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE_MASK 0x04000000L
+#define PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE__SHIFT 0x0000001a
+#define PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT_MASK 0x00010000L
+#define PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT__SHIFT 0x00000010
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0x00006000L
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x0000000d
+#define PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED_MASK 0x03000000L
+#define PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED__SHIFT 0x00000018
+#define PCIE_LC_SPEED_CNTL__LC_DELAY_COEFF_UPDATE_DIS_MASK 0x80000000L
+#define PCIE_LC_SPEED_CNTL__LC_DELAY_COEFF_UPDATE_DIS__SHIFT 0x0000001f
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CHECK_EQTS_IN_RCFG_MASK 0x40000000L
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CHECK_EQTS_IN_RCFG__SHIFT 0x0000001e
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS_MASK 0x00008000L
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS__SHIFT 0x0000000f
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK 0x00000100L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE__SHIFT 0x00000008
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK 0x00000040L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE__SHIFT 0x00000006
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE_MASK 0x00000080L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE__SHIFT 0x00000007
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK 0x00000020L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE__SHIFT 0x00000005
+#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x00000000
+#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x00000001
+#define PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK 0x00000200L
+#define PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE__SHIFT 0x00000009
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN_MASK 0x10000000L
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN__SHIFT 0x0000001c
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN_MASK 0x20000000L
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN__SHIFT 0x0000001d
+#define PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN_MASK 0x08000000L
+#define PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN__SHIFT 0x0000001b
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2_MASK 0x00040000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2__SHIFT 0x00000012
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3_MASK 0x00100000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3__SHIFT 0x00000014
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2_MASK 0x00080000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2__SHIFT 0x00000013
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3_MASK 0x00200000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3__SHIFT 0x00000015
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00001000L
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x0000000c
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x00000c00L
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x0000000a
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS_MASK 0x00800000L
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS__SHIFT 0x00000017
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN_MASK 0x00000004L
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN__SHIFT 0x00000002
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_MASK 0x00000018L
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE__SHIFT 0x00000003
+#define PCIE_LC_STATE0__LC_CURRENT_STATE_MASK 0x0000003fL
+#define PCIE_LC_STATE0__LC_CURRENT_STATE__SHIFT 0x00000000
+#define PCIE_LC_STATE0__LC_PREV_STATE1_MASK 0x00003f00L
+#define PCIE_LC_STATE0__LC_PREV_STATE1__SHIFT 0x00000008
+#define PCIE_LC_STATE0__LC_PREV_STATE2_MASK 0x003f0000L
+#define PCIE_LC_STATE0__LC_PREV_STATE2__SHIFT 0x00000010
+#define PCIE_LC_STATE0__LC_PREV_STATE3_MASK 0x3f000000L
+#define PCIE_LC_STATE0__LC_PREV_STATE3__SHIFT 0x00000018
+#define PCIE_LC_STATE10__LC_PREV_STATE40_MASK 0x0000003fL
+#define PCIE_LC_STATE10__LC_PREV_STATE40__SHIFT 0x00000000
+#define PCIE_LC_STATE10__LC_PREV_STATE41_MASK 0x00003f00L
+#define PCIE_LC_STATE10__LC_PREV_STATE41__SHIFT 0x00000008
+#define PCIE_LC_STATE10__LC_PREV_STATE42_MASK 0x003f0000L
+#define PCIE_LC_STATE10__LC_PREV_STATE42__SHIFT 0x00000010
+#define PCIE_LC_STATE10__LC_PREV_STATE43_MASK 0x3f000000L
+#define PCIE_LC_STATE10__LC_PREV_STATE43__SHIFT 0x00000018
+#define PCIE_LC_STATE11__LC_PREV_STATE44_MASK 0x0000003fL
+#define PCIE_LC_STATE11__LC_PREV_STATE44__SHIFT 0x00000000
+#define PCIE_LC_STATE11__LC_PREV_STATE45_MASK 0x00003f00L
+#define PCIE_LC_STATE11__LC_PREV_STATE45__SHIFT 0x00000008
+#define PCIE_LC_STATE11__LC_PREV_STATE46_MASK 0x003f0000L
+#define PCIE_LC_STATE11__LC_PREV_STATE46__SHIFT 0x00000010
+#define PCIE_LC_STATE11__LC_PREV_STATE47_MASK 0x3f000000L
+#define PCIE_LC_STATE11__LC_PREV_STATE47__SHIFT 0x00000018
+#define PCIE_LC_STATE1__LC_PREV_STATE4_MASK 0x0000003fL
+#define PCIE_LC_STATE1__LC_PREV_STATE4__SHIFT 0x00000000
+#define PCIE_LC_STATE1__LC_PREV_STATE5_MASK 0x00003f00L
+#define PCIE_LC_STATE1__LC_PREV_STATE5__SHIFT 0x00000008
+#define PCIE_LC_STATE1__LC_PREV_STATE6_MASK 0x003f0000L
+#define PCIE_LC_STATE1__LC_PREV_STATE6__SHIFT 0x00000010
+#define PCIE_LC_STATE1__LC_PREV_STATE7_MASK 0x3f000000L
+#define PCIE_LC_STATE1__LC_PREV_STATE7__SHIFT 0x00000018
+#define PCIE_LC_STATE2__LC_PREV_STATE10_MASK 0x003f0000L
+#define PCIE_LC_STATE2__LC_PREV_STATE10__SHIFT 0x00000010
+#define PCIE_LC_STATE2__LC_PREV_STATE11_MASK 0x3f000000L
+#define PCIE_LC_STATE2__LC_PREV_STATE11__SHIFT 0x00000018
+#define PCIE_LC_STATE2__LC_PREV_STATE8_MASK 0x0000003fL
+#define PCIE_LC_STATE2__LC_PREV_STATE8__SHIFT 0x00000000
+#define PCIE_LC_STATE2__LC_PREV_STATE9_MASK 0x00003f00L
+#define PCIE_LC_STATE2__LC_PREV_STATE9__SHIFT 0x00000008
+#define PCIE_LC_STATE3__LC_PREV_STATE12_MASK 0x0000003fL
+#define PCIE_LC_STATE3__LC_PREV_STATE12__SHIFT 0x00000000
+#define PCIE_LC_STATE3__LC_PREV_STATE13_MASK 0x00003f00L
+#define PCIE_LC_STATE3__LC_PREV_STATE13__SHIFT 0x00000008
+#define PCIE_LC_STATE3__LC_PREV_STATE14_MASK 0x003f0000L
+#define PCIE_LC_STATE3__LC_PREV_STATE14__SHIFT 0x00000010
+#define PCIE_LC_STATE3__LC_PREV_STATE15_MASK 0x3f000000L
+#define PCIE_LC_STATE3__LC_PREV_STATE15__SHIFT 0x00000018
+#define PCIE_LC_STATE4__LC_PREV_STATE16_MASK 0x0000003fL
+#define PCIE_LC_STATE4__LC_PREV_STATE16__SHIFT 0x00000000
+#define PCIE_LC_STATE4__LC_PREV_STATE17_MASK 0x00003f00L
+#define PCIE_LC_STATE4__LC_PREV_STATE17__SHIFT 0x00000008
+#define PCIE_LC_STATE4__LC_PREV_STATE18_MASK 0x003f0000L
+#define PCIE_LC_STATE4__LC_PREV_STATE18__SHIFT 0x00000010
+#define PCIE_LC_STATE4__LC_PREV_STATE19_MASK 0x3f000000L
+#define PCIE_LC_STATE4__LC_PREV_STATE19__SHIFT 0x00000018
+#define PCIE_LC_STATE5__LC_PREV_STATE20_MASK 0x0000003fL
+#define PCIE_LC_STATE5__LC_PREV_STATE20__SHIFT 0x00000000
+#define PCIE_LC_STATE5__LC_PREV_STATE21_MASK 0x00003f00L
+#define PCIE_LC_STATE5__LC_PREV_STATE21__SHIFT 0x00000008
+#define PCIE_LC_STATE5__LC_PREV_STATE22_MASK 0x003f0000L
+#define PCIE_LC_STATE5__LC_PREV_STATE22__SHIFT 0x00000010
+#define PCIE_LC_STATE5__LC_PREV_STATE23_MASK 0x3f000000L
+#define PCIE_LC_STATE5__LC_PREV_STATE23__SHIFT 0x00000018
+#define PCIE_LC_STATE6__LC_PREV_STATE24_MASK 0x0000003fL
+#define PCIE_LC_STATE6__LC_PREV_STATE24__SHIFT 0x00000000
+#define PCIE_LC_STATE6__LC_PREV_STATE25_MASK 0x00003f00L
+#define PCIE_LC_STATE6__LC_PREV_STATE25__SHIFT 0x00000008
+#define PCIE_LC_STATE6__LC_PREV_STATE26_MASK 0x003f0000L
+#define PCIE_LC_STATE6__LC_PREV_STATE26__SHIFT 0x00000010
+#define PCIE_LC_STATE6__LC_PREV_STATE27_MASK 0x3f000000L
+#define PCIE_LC_STATE6__LC_PREV_STATE27__SHIFT 0x00000018
+#define PCIE_LC_STATE7__LC_PREV_STATE28_MASK 0x0000003fL
+#define PCIE_LC_STATE7__LC_PREV_STATE28__SHIFT 0x00000000
+#define PCIE_LC_STATE7__LC_PREV_STATE29_MASK 0x00003f00L
+#define PCIE_LC_STATE7__LC_PREV_STATE29__SHIFT 0x00000008
+#define PCIE_LC_STATE7__LC_PREV_STATE30_MASK 0x003f0000L
+#define PCIE_LC_STATE7__LC_PREV_STATE30__SHIFT 0x00000010
+#define PCIE_LC_STATE7__LC_PREV_STATE31_MASK 0x3f000000L
+#define PCIE_LC_STATE7__LC_PREV_STATE31__SHIFT 0x00000018
+#define PCIE_LC_STATE8__LC_PREV_STATE32_MASK 0x0000003fL
+#define PCIE_LC_STATE8__LC_PREV_STATE32__SHIFT 0x00000000
+#define PCIE_LC_STATE8__LC_PREV_STATE33_MASK 0x00003f00L
+#define PCIE_LC_STATE8__LC_PREV_STATE33__SHIFT 0x00000008
+#define PCIE_LC_STATE8__LC_PREV_STATE34_MASK 0x003f0000L
+#define PCIE_LC_STATE8__LC_PREV_STATE34__SHIFT 0x00000010
+#define PCIE_LC_STATE8__LC_PREV_STATE35_MASK 0x3f000000L
+#define PCIE_LC_STATE8__LC_PREV_STATE35__SHIFT 0x00000018
+#define PCIE_LC_STATE9__LC_PREV_STATE36_MASK 0x0000003fL
+#define PCIE_LC_STATE9__LC_PREV_STATE36__SHIFT 0x00000000
+#define PCIE_LC_STATE9__LC_PREV_STATE37_MASK 0x00003f00L
+#define PCIE_LC_STATE9__LC_PREV_STATE37__SHIFT 0x00000008
+#define PCIE_LC_STATE9__LC_PREV_STATE38_MASK 0x003f0000L
+#define PCIE_LC_STATE9__LC_PREV_STATE38__SHIFT 0x00000010
+#define PCIE_LC_STATE9__LC_PREV_STATE39_MASK 0x3f000000L
+#define PCIE_LC_STATE9__LC_PREV_STATE39__SHIFT 0x00000018
+#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK 0x000000e0L
+#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT 0x00000005
+#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK 0x0000001cL
+#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT 0x00000002
+#define PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK 0x00000001L
+#define PCIE_LC_STATUS1__LC_REVERSE_RCVR__SHIFT 0x00000000
+#define PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK 0x00000002L
+#define PCIE_LC_STATUS1__LC_REVERSE_XMIT__SHIFT 0x00000001
+#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES_MASK 0x0000ffffL
+#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES__SHIFT 0x00000000
+#define PCIE_LC_STATUS2__LC_TURN_ON_LANE_MASK 0xffff0000L
+#define PCIE_LC_STATUS2__LC_TURN_ON_LANE__SHIFT 0x00000010
+#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL_MASK 0x10000000L
+#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL__SHIFT 0x0000001c
+#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL_MASK 0x00c00000L
+#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL__SHIFT 0x00000016
+#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF_MASK 0x00020000L
+#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF__SHIFT 0x00000011
+#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE_MASK 0x00000010L
+#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE__SHIFT 0x00000004
+#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK 0x00002000L
+#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH__SHIFT 0x0000000d
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED_MASK 0x01000000L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED__SHIFT 0x00000018
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST_MASK 0x02000000L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST__SHIFT 0x00000019
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED_MASK 0x00000800L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED__SHIFT 0x0000000b
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME_MASK 0xc0000000L
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME__SHIFT 0x0000001e
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP_MASK 0x00010000L
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP__SHIFT 0x00000010
+#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN_MASK 0x00080000L
+#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN__SHIFT 0x00000013
+#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN_MASK 0x00001000L
+#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN__SHIFT 0x0000000c
+#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN_MASK 0x00000040L
+#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN__SHIFT 0x00000006
+#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN_MASK 0x00000080L
+#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN__SHIFT 0x00000007
+#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW_MASK 0x00100000L
+#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW__SHIFT 0x00000014
+#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1_MASK 0x00000020L
+#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1__SHIFT 0x00000005
+#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE_MASK 0x00000700L
+#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE__SHIFT 0x00000008
+#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER_MASK 0x04000000L
+#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER__SHIFT 0x0000001a
+#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT_MASK 0x08000000L
+#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT__SHIFT 0x0000001b
+#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN_MASK 0x00200000L
+#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN__SHIFT 0x00000015
+#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL_MASK 0x0000000fL
+#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL__SHIFT 0x00000000
+#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF_MASK 0x00040000L
+#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF__SHIFT 0x00000012
+#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK_MASK 0x20000000L
+#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK__SHIFT 0x0000001d
+#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR_MASK 0x0000ffffL
+#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR__SHIFT 0x00000000
+#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR_MASK 0xffff0000L
+#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR__SHIFT 0x00000010
+#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK_MASK 0x00002000L
+#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK__SHIFT 0x0000000d
+#define PCIE_P_CNTL__P_BLK_LOCK_MODE_MASK 0x00001000L
+#define PCIE_P_CNTL__P_BLK_LOCK_MODE__SHIFT 0x0000000c
+#define PCIE_P_CNTL__P_ELASTDESKEW_HW_DEBUG_MASK 0x00000008L
+#define PCIE_P_CNTL__P_ELASTDESKEW_HW_DEBUG__SHIFT 0x00000003
+#define PCIE_P_CNTL__P_ELEC_IDLE_MODE_MASK 0x0000c000L
+#define PCIE_P_CNTL__P_ELEC_IDLE_MODE__SHIFT 0x0000000e
+#define PCIE_P_CNTL__P_IGNORE_CRC_ERR_MASK 0x00000010L
+#define PCIE_P_CNTL__P_IGNORE_CRC_ERR__SHIFT 0x00000004
+#define PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK 0x00000040L
+#define PCIE_P_CNTL__P_IGNORE_EDB_ERR__SHIFT 0x00000006
+#define PCIE_P_CNTL__P_IGNORE_IDL_ERR_MASK 0x00000080L
+#define PCIE_P_CNTL__P_IGNORE_IDL_ERR__SHIFT 0x00000007
+#define PCIE_P_CNTL__P_IGNORE_LEN_ERR_MASK 0x00000020L
+#define PCIE_P_CNTL__P_IGNORE_LEN_ERR__SHIFT 0x00000005
+#define PCIE_P_CNTL__P_IGNORE_TOK_ERR_MASK 0x00000100L
+#define PCIE_P_CNTL__P_IGNORE_TOK_ERR__SHIFT 0x00000008
+#define PCIE_P_CNTL__P_PWRDN_EN_MASK 0x00000001L
+#define PCIE_P_CNTL__P_PWRDN_EN__SHIFT 0x00000000
+#define PCIE_P_CNTL__P_SYMALIGN_HW_DEBUG_MASK 0x00000004L
+#define PCIE_P_CNTL__P_SYMALIGN_HW_DEBUG__SHIFT 0x00000002
+#define PCIE_P_CNTL__P_SYMALIGN_MODE_MASK 0x00000002L
+#define PCIE_P_CNTL__P_SYMALIGN_MODE__SHIFT 0x00000001
+#define PCIE_P_DECODER_STATUS__P_DECODE_ERR_MASK 0x0000ffffL
+#define PCIE_P_DECODER_STATUS__P_DECODE_ERR__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK_MASK 0x00000f00L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK_MASK 0x000000f0L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK__SHIFT 0x00000004
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK_MASK 0x00f00000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x00000014
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK_MASK 0x0000f000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK__SHIFT 0x0000000c
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK_MASK 0x000f0000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2_MASK 0x0f000000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK_MASK 0x0000000fL
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK_MASK 0x00000f00L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK_MASK 0x000000f0L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK__SHIFT 0x00000004
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK_MASK 0x00f00000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x00000014
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK_MASK 0x0000f000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK__SHIFT 0x0000000c
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK_MASK 0x000f0000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2_MASK 0x0f000000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK_MASK 0x0000000fL
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_TXCLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_TXCLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_TXCLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_TXCLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN_MASK 0x00000001L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN__SHIFT 0x00000000
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET_MASK 0x00000004L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET__SHIFT 0x00000002
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR_MASK 0x00000002L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR__SHIFT 0x00000001
+#define PCIEP_HW_DEBUG__HW_00_DEBUG_MASK 0x00000001L
+#define PCIEP_HW_DEBUG__HW_00_DEBUG__SHIFT 0x00000000
+#define PCIEP_HW_DEBUG__HW_01_DEBUG_MASK 0x00000002L
+#define PCIEP_HW_DEBUG__HW_01_DEBUG__SHIFT 0x00000001
+#define PCIEP_HW_DEBUG__HW_02_DEBUG_MASK 0x00000004L
+#define PCIEP_HW_DEBUG__HW_02_DEBUG__SHIFT 0x00000002
+#define PCIEP_HW_DEBUG__HW_03_DEBUG_MASK 0x00000008L
+#define PCIEP_HW_DEBUG__HW_03_DEBUG__SHIFT 0x00000003
+#define PCIEP_HW_DEBUG__HW_04_DEBUG_MASK 0x00000010L
+#define PCIEP_HW_DEBUG__HW_04_DEBUG__SHIFT 0x00000004
+#define PCIEP_HW_DEBUG__HW_05_DEBUG_MASK 0x00000020L
+#define PCIEP_HW_DEBUG__HW_05_DEBUG__SHIFT 0x00000005
+#define PCIEP_HW_DEBUG__HW_06_DEBUG_MASK 0x00000040L
+#define PCIEP_HW_DEBUG__HW_06_DEBUG__SHIFT 0x00000006
+#define PCIEP_HW_DEBUG__HW_07_DEBUG_MASK 0x00000080L
+#define PCIEP_HW_DEBUG__HW_07_DEBUG__SHIFT 0x00000007
+#define PCIEP_HW_DEBUG__HW_08_DEBUG_MASK 0x00000100L
+#define PCIEP_HW_DEBUG__HW_08_DEBUG__SHIFT 0x00000008
+#define PCIEP_HW_DEBUG__HW_09_DEBUG_MASK 0x00000200L
+#define PCIEP_HW_DEBUG__HW_09_DEBUG__SHIFT 0x00000009
+#define PCIEP_HW_DEBUG__HW_10_DEBUG_MASK 0x00000400L
+#define PCIEP_HW_DEBUG__HW_10_DEBUG__SHIFT 0x0000000a
+#define PCIEP_HW_DEBUG__HW_11_DEBUG_MASK 0x00000800L
+#define PCIEP_HW_DEBUG__HW_11_DEBUG__SHIFT 0x0000000b
+#define PCIEP_HW_DEBUG__HW_12_DEBUG_MASK 0x00001000L
+#define PCIEP_HW_DEBUG__HW_12_DEBUG__SHIFT 0x0000000c
+#define PCIEP_HW_DEBUG__HW_13_DEBUG_MASK 0x00002000L
+#define PCIEP_HW_DEBUG__HW_13_DEBUG__SHIFT 0x0000000d
+#define PCIEP_HW_DEBUG__HW_14_DEBUG_MASK 0x00004000L
+#define PCIEP_HW_DEBUG__HW_14_DEBUG__SHIFT 0x0000000e
+#define PCIEP_HW_DEBUG__HW_15_DEBUG_MASK 0x00008000L
+#define PCIEP_HW_DEBUG__HW_15_DEBUG__SHIFT 0x0000000f
+#define PCIE_P_MISC_STATUS__P_DESKEW_ERR_MASK 0x000000ffL
+#define PCIE_P_MISC_STATUS__P_DESKEW_ERR__SHIFT 0x00000000
+#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR_MASK 0xffff0000L
+#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR__SHIFT 0x00000010
+#define PCIE_PORT_DATA__PCIE_DATA_MASK 0xffffffffL
+#define PCIE_PORT_DATA__PCIE_DATA__SHIFT 0x00000000
+#define PCIE_PORT_INDEX__PCIE_INDEX_MASK 0x000000ffL
+#define PCIE_PORT_INDEX__PCIE_INDEX__SHIFT 0x00000000
+#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S_MASK 0x00007f00L
+#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S__SHIFT 0x00000008
+#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE_MASK 0x00000002L
+#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE__SHIFT 0x00000001
+#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN_MASK 0x00000004L
+#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN__SHIFT 0x00000002
+#define PCIEP_PORT_CNTL__NATIVE_PME_EN_MASK 0x00000008L
+#define PCIEP_PORT_CNTL__NATIVE_PME_EN__SHIFT 0x00000003
+#define PCIEP_PORT_CNTL__PMI_BM_DIS_MASK 0x00000020L
+#define PCIEP_PORT_CNTL__PMI_BM_DIS__SHIFT 0x00000005
+#define PCIEP_PORT_CNTL__PWR_FAULT_EN_MASK 0x00000010L
+#define PCIEP_PORT_CNTL__PWR_FAULT_EN__SHIFT 0x00000004
+#define PCIEP_PORT_CNTL__SEQNUM_DEBUG_MODE_MASK 0x00000040L
+#define PCIEP_PORT_CNTL__SEQNUM_DEBUG_MODE__SHIFT 0x00000006
+#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN_MASK 0x00000001L
+#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN__SHIFT 0x00000000
+#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH_MASK 0x0000007eL
+#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH__SHIFT 0x00000001
+#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL_MASK 0x00000001L
+#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL__SHIFT 0x00000000
+#define PCIE_PRBS_CLR__PRBS_CHECKER_DEBUG_BUS_SELECT_MASK 0x000f0000L
+#define PCIE_PRBS_CLR__PRBS_CHECKER_DEBUG_BUS_SELECT__SHIFT 0x00000010
+#define PCIE_PRBS_CLR__PRBS_CLR_MASK 0x0000ffffL
+#define PCIE_PRBS_CLR__PRBS_CLR__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9__SHIFT 0x00000000
+#define PCIE_PRBS_FREERUN__PRBS_FREERUN_MASK 0x0000ffffL
+#define PCIE_PRBS_FREERUN__PRBS_FREERUN__SHIFT 0x00000000
+#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT_MASK 0x000000ffL
+#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT__SHIFT 0x00000000
+#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT_MASK 0xffffffffL
+#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT__SHIFT 0x00000000
+#define PCIE_PRBS_MISC__PRBS_8BIT_SEL_MASK 0x00000010L
+#define PCIE_PRBS_MISC__PRBS_8BIT_SEL__SHIFT 0x00000004
+#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK_MASK 0xffff0000L
+#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK__SHIFT 0x00000010
+#define PCIE_PRBS_MISC__PRBS_COMMA_NUM_MASK 0x00000060L
+#define PCIE_PRBS_MISC__PRBS_COMMA_NUM__SHIFT 0x00000005
+#define PCIE_PRBS_MISC__PRBS_DATA_RATE_MASK 0x0000c000L
+#define PCIE_PRBS_MISC__PRBS_DATA_RATE__SHIFT 0x0000000e
+#define PCIE_PRBS_MISC__PRBS_EN_MASK 0x00000001L
+#define PCIE_PRBS_MISC__PRBS_EN__SHIFT 0x00000000
+#define PCIE_PRBS_MISC__PRBS_LOCK_CNT_MASK 0x00000f80L
+#define PCIE_PRBS_MISC__PRBS_LOCK_CNT__SHIFT 0x00000007
+#define PCIE_PRBS_MISC__PRBS_TEST_MODE_MASK 0x00000006L
+#define PCIE_PRBS_MISC__PRBS_TEST_MODE__SHIFT 0x00000001
+#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE_MASK 0x00000008L
+#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE__SHIFT 0x00000003
+#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT_MASK 0x0000ffffL
+#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT__SHIFT 0x00000000
+#define PCIE_PRBS_STATUS1__PRBS_LOCKED_MASK 0xffff0000L
+#define PCIE_PRBS_STATUS1__PRBS_LOCKED__SHIFT 0x00000010
+#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE_MASK 0x0000ffffL
+#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE__SHIFT 0x00000000
+#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN_MASK 0x3fffffffL
+#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN__SHIFT 0x00000000
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX_MASK 0x0000ff00L
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX__SHIFT 0x00000008
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN_MASK 0x000000ffL
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN__SHIFT 0x00000000
+#define PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xffffffffL
+#define PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x00000000
+#define PCIEP_SCRATCH__PCIEP_SCRATCH_MASK 0xffffffffL
+#define PCIEP_SCRATCH__PCIEP_SCRATCH__SHIFT 0x00000000
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_DIS_MASK 0x00008000L
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_DIS__SHIFT 0x0000000f
+#define PCIEP_STRAP_LC__STRAP_BYPASS_RCVR_DET_MASK 0x00000800L
+#define PCIEP_STRAP_LC__STRAP_BYPASS_RCVR_DET__SHIFT 0x0000000b
+#define PCIEP_STRAP_LC__STRAP_COMPLIANCE_DIS_MASK 0x00001000L
+#define PCIEP_STRAP_LC__STRAP_COMPLIANCE_DIS__SHIFT 0x0000000c
+#define PCIEP_STRAP_LC__STRAP_FORCE_COMPLIANCE_MASK 0x00002000L
+#define PCIEP_STRAP_LC__STRAP_FORCE_COMPLIANCE__SHIFT 0x0000000d
+#define PCIEP_STRAP_LC__STRAP_FTS_yTSx_COUNT_MASK 0x00000003L
+#define PCIEP_STRAP_LC__STRAP_FTS_yTSx_COUNT__SHIFT 0x00000000
+#define PCIEP_STRAP_LC__STRAP_LANE_NEGOTIATION_MASK 0x00070000L
+#define PCIEP_STRAP_LC__STRAP_LANE_NEGOTIATION__SHIFT 0x00000010
+#define PCIEP_STRAP_LC__STRAP_LONG_yTSx_COUNT_MASK 0x0000000cL
+#define PCIEP_STRAP_LC__STRAP_LONG_yTSx_COUNT__SHIFT 0x00000002
+#define PCIEP_STRAP_LC__STRAP_MED_yTSx_COUNT_MASK 0x00000030L
+#define PCIEP_STRAP_LC__STRAP_MED_yTSx_COUNT__SHIFT 0x00000004
+#define PCIEP_STRAP_LC__STRAP_REVERSE_LC_LANES_MASK 0x00004000L
+#define PCIEP_STRAP_LC__STRAP_REVERSE_LC_LANES__SHIFT 0x0000000e
+#define PCIEP_STRAP_LC__STRAP_SHORT_yTSx_COUNT_MASK 0x000000c0L
+#define PCIEP_STRAP_LC__STRAP_SHORT_yTSx_COUNT__SHIFT 0x00000006
+#define PCIEP_STRAP_LC__STRAP_SKIP_INTERVAL_MASK 0x00000700L
+#define PCIEP_STRAP_LC__STRAP_SKIP_INTERVAL__SHIFT 0x00000008
+#define PCIEP_STRAP_MISC__STRAP_E2E_PREFIX_EN_MASK 0x00000002L
+#define PCIEP_STRAP_MISC__STRAP_E2E_PREFIX_EN__SHIFT 0x00000001
+#define PCIEP_STRAP_MISC__STRAP_EXTENDED_FMT_SUPPORTED_MASK 0x00000004L
+#define PCIEP_STRAP_MISC__STRAP_EXTENDED_FMT_SUPPORTED__SHIFT 0x00000002
+#define PCIEP_STRAP_MISC__STRAP_OBFF_SUPPORTED_MASK 0x00000018L
+#define PCIEP_STRAP_MISC__STRAP_OBFF_SUPPORTED__SHIFT 0x00000003
+#define PCIEP_STRAP_MISC__STRAP_REVERSE_LANES_MASK 0x00000001L
+#define PCIEP_STRAP_MISC__STRAP_REVERSE_LANES__SHIFT 0x00000000
+#define PCIE_RESERVED__PCIE_RESERVED_MASK 0xffffffffL
+#define PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x00000000
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR_MASK 0x00000008L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR__SHIFT 0x00000003
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x00000000
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR_MASK 0x00000020L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR__SHIFT 0x00000005
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR_MASK 0x00000010L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR__SHIFT 0x00000004
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR_MASK 0x00000002L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR__SHIFT 0x00000001
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR_MASK 0x00000004L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR__SHIFT 0x00000002
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR_MASK 0x00000010L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR__SHIFT 0x00000004
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR_MASK 0x00000008L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR__SHIFT 0x00000003
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR_MASK 0x00000004L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR__SHIFT 0x00000002
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR_MASK 0x00000001L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR__SHIFT 0x00000000
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR_MASK 0x00000002L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR__SHIFT 0x00000001
+#define PCIE_RX_CNTL__RX_FC_INIT_FROM_REG_MASK 0x00008000L
+#define PCIE_RX_CNTL__RX_FC_INIT_FROM_REG__SHIFT 0x0000000f
+#define PCIE_RX_CNTL__RX_GEN_ONE_NAK_MASK 0x00004000L
+#define PCIE_RX_CNTL__RX_GEN_ONE_NAK__SHIFT 0x0000000e
+#define PCIE_RX_CNTL__RX_IGNORE_AT_ERR_MASK 0x00001000L
+#define PCIE_RX_CNTL__RX_IGNORE_AT_ERR__SHIFT 0x0000000c
+#define PCIE_RX_CNTL__RX_IGNORE_BE_ERR_MASK 0x00000002L
+#define PCIE_RX_CNTL__RX_IGNORE_BE_ERR__SHIFT 0x00000001
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_ERR_MASK 0x00000010L
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_ERR__SHIFT 0x00000004
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_UR_MASK 0x00000400L
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_UR__SHIFT 0x0000000a
+#define PCIE_RX_CNTL__RX_IGNORE_CPL_ERR_MASK 0x00000020L
+#define PCIE_RX_CNTL__RX_IGNORE_CPL_ERR__SHIFT 0x00000005
+#define PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR_MASK 0x00800000L
+#define PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR__SHIFT 0x00000017
+#define PCIE_RX_CNTL__RX_IGNORE_CRC_ERR_MASK 0x00000008L
+#define PCIE_RX_CNTL__RX_IGNORE_CRC_ERR__SHIFT 0x00000003
+#define PCIE_RX_CNTL__RX_IGNORE_EP_ERR_MASK 0x00000040L
+#define PCIE_RX_CNTL__RX_IGNORE_EP_ERR__SHIFT 0x00000006
+#define PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x00000018
+#define PCIE_RX_CNTL__RX_IGNORE_IO_ERR_MASK 0x00000001L
+#define PCIE_RX_CNTL__RX_IGNORE_IO_ERR__SHIFT 0x00000000
+#define PCIE_RX_CNTL__RX_IGNORE_IO_UR_MASK 0x00000800L
+#define PCIE_RX_CNTL__RX_IGNORE_IO_UR__SHIFT 0x0000000b
+#define PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR_MASK 0x00000080L
+#define PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR__SHIFT 0x00000007
+#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x00000008
+#define PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x00000016
+#define PCIE_RX_CNTL__RX_IGNORE_MSG_ERR_MASK 0x00000004L
+#define PCIE_RX_CNTL__RX_IGNORE_MSG_ERR__SHIFT 0x00000002
+#define PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x00000019
+#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x00000015
+#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x00000009
+#define PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL_MASK 0x00002000L
+#define PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL__SHIFT 0x0000000d
+#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x00000014
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MASK 0x00070000L
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE_MASK 0x00080000L
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE__SHIFT 0x00000013
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT__SHIFT 0x00000010
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD_MASK 0x00000fffL
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD__SHIFT 0x00000000
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH_MASK 0x00ff0000L
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH__SHIFT 0x00000010
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD_MASK 0x00000fffL
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD__SHIFT 0x00000000
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH_MASK 0x00ff0000L
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH__SHIFT 0x00000010
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD_MASK 0x00000fffL
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD__SHIFT 0x00000000
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH_MASK 0x00ff0000L
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH__SHIFT 0x00000010
+#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM_MASK 0x00000fffL
+#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3__SHIFT 0x00000000
+#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED_MASK 0xffffffffL
+#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED__SHIFT 0x00000000
+#define PCIE_RX_NUM_NAK__RX_NUM_NAK_MASK 0xffffffffL
+#define PCIE_RX_NUM_NAK__RX_NUM_NAK__SHIFT 0x00000000
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA_MASK 0x00ffffffL
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA__SHIFT 0x00000000
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS_MASK 0x01000000L
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS__SHIFT 0x00000018
+#define PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xffffffffL
+#define PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x00000000
+#define PCIE_STRAP_F0__STRAP_F0_ACS_EN_MASK 0x00000040L
+#define PCIE_STRAP_F0__STRAP_F0_ACS_EN__SHIFT 0x00000006
+#define PCIE_STRAP_F0__STRAP_F0_AER_EN_MASK 0x00000020L
+#define PCIE_STRAP_F0__STRAP_F0_AER_EN__SHIFT 0x00000005
+#define PCIE_STRAP_F0__STRAP_F0_ATS_EN_MASK 0x00000400L
+#define PCIE_STRAP_F0__STRAP_F0_ATS_EN__SHIFT 0x0000000a
+#define PCIE_STRAP_F0__STRAP_F0_BAR_EN_MASK 0x00000080L
+#define PCIE_STRAP_F0__STRAP_F0_BAR_EN__SHIFT 0x00000007
+#define PCIE_STRAP_F0__STRAP_F0_DPA_EN_MASK 0x00000200L
+#define PCIE_STRAP_F0__STRAP_F0_DPA_EN__SHIFT 0x00000009
+#define PCIE_STRAP_F0__STRAP_F0_DSN_EN_MASK 0x00000010L
+#define PCIE_STRAP_F0__STRAP_F0_DSN_EN__SHIFT 0x00000004
+#define PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x00000000
+#define PCIE_STRAP_F0__STRAP_F0_LEGACY_DEVICE_TYPE_EN_MASK 0x00000002L
+#define PCIE_STRAP_F0__STRAP_F0_LEGACY_DEVICE_TYPE_EN__SHIFT 0x00000001
+#define PCIE_STRAP_F0__STRAP_F0_MSI_EN_MASK 0x00000004L
+#define PCIE_STRAP_F0__STRAP_F0_MSI_EN__SHIFT 0x00000002
+#define PCIE_STRAP_F0__STRAP_F0_PAGE_REQ_EN_MASK 0x00000800L
+#define PCIE_STRAP_F0__STRAP_F0_PAGE_REQ_EN__SHIFT 0x0000000b
+#define PCIE_STRAP_F0__STRAP_F0_PASID_EN_MASK 0x00001000L
+#define PCIE_STRAP_F0__STRAP_F0_PASID_EN__SHIFT 0x0000000c
+#define PCIE_STRAP_F0__STRAP_F0_PWR_EN_MASK 0x00000100L
+#define PCIE_STRAP_F0__STRAP_F0_PWR_EN__SHIFT 0x00000008
+#define PCIE_STRAP_F0__STRAP_F0_VC_EN_MASK 0x00000008L
+#define PCIE_STRAP_F0__STRAP_F0_VC_EN__SHIFT 0x00000003
+#define PCIE_STRAP_F1__STRAP_F1_ACS_EN_MASK 0x00000040L
+#define PCIE_STRAP_F1__STRAP_F1_ACS_EN__SHIFT 0x00000006
+#define PCIE_STRAP_F1__STRAP_F1_AER_EN_MASK 0x00000020L
+#define PCIE_STRAP_F1__STRAP_F1_AER_EN__SHIFT 0x00000005
+#define PCIE_STRAP_F1__STRAP_F1_ATS_EN_MASK 0x00000400L
+#define PCIE_STRAP_F1__STRAP_F1_ATS_EN__SHIFT 0x0000000a
+#define PCIE_STRAP_F1__STRAP_F1_BAR_EN_MASK 0x00000080L
+#define PCIE_STRAP_F1__STRAP_F1_BAR_EN__SHIFT 0x00000007
+#define PCIE_STRAP_F1__STRAP_F1_DPA_EN_MASK 0x00000200L
+#define PCIE_STRAP_F1__STRAP_F1_DPA_EN__SHIFT 0x00000009
+#define PCIE_STRAP_F1__STRAP_F1_DSN_EN_MASK 0x00000010L
+#define PCIE_STRAP_F1__STRAP_F1_DSN_EN__SHIFT 0x00000004
+#define PCIE_STRAP_F1__STRAP_F1_EN_MASK 0x00000001L
+#define PCIE_STRAP_F1__STRAP_F1_EN__SHIFT 0x00000000
+#define PCIE_STRAP_F1__STRAP_F1_LEGACY_DEVICE_TYPE_EN_MASK 0x00000002L
+#define PCIE_STRAP_F1__STRAP_F1_LEGACY_DEVICE_TYPE_EN__SHIFT 0x00000001
+#define PCIE_STRAP_F1__STRAP_F1_MSI_EN_MASK 0x00000004L
+#define PCIE_STRAP_F1__STRAP_F1_MSI_EN__SHIFT 0x00000002
+#define PCIE_STRAP_F1__STRAP_F1_PAGE_REQ_EN_MASK 0x00000800L
+#define PCIE_STRAP_F1__STRAP_F1_PAGE_REQ_EN__SHIFT 0x0000000b
+#define PCIE_STRAP_F1__STRAP_F1_PASID_EN_MASK 0x00001000L
+#define PCIE_STRAP_F1__STRAP_F1_PASID_EN__SHIFT 0x0000000c
+#define PCIE_STRAP_F1__STRAP_F1_PWR_EN_MASK 0x00000100L
+#define PCIE_STRAP_F1__STRAP_F1_PWR_EN__SHIFT 0x00000008
+#define PCIE_STRAP_F1__STRAP_F1_VC_EN_MASK 0x00000008L
+#define PCIE_STRAP_F1__STRAP_F1_VC_EN__SHIFT 0x00000003
+#define PCIE_STRAP_F2__STRAP_F2_ACS_EN_MASK 0x00000040L
+#define PCIE_STRAP_F2__STRAP_F2_ACS_EN__SHIFT 0x00000006
+#define PCIE_STRAP_F2__STRAP_F2_AER_EN_MASK 0x00000020L
+#define PCIE_STRAP_F2__STRAP_F2_AER_EN__SHIFT 0x00000005
+#define PCIE_STRAP_F2__STRAP_F2_ATS_EN_MASK 0x00000400L
+#define PCIE_STRAP_F2__STRAP_F2_ATS_EN__SHIFT 0x0000000a
+#define PCIE_STRAP_F2__STRAP_F2_BAR_EN_MASK 0x00000080L
+#define PCIE_STRAP_F2__STRAP_F2_BAR_EN__SHIFT 0x00000007
+#define PCIE_STRAP_F2__STRAP_F2_DPA_EN_MASK 0x00000200L
+#define PCIE_STRAP_F2__STRAP_F2_DPA_EN__SHIFT 0x00000009
+#define PCIE_STRAP_F2__STRAP_F2_DSN_EN_MASK 0x00000010L
+#define PCIE_STRAP_F2__STRAP_F2_DSN_EN__SHIFT 0x00000004
+#define PCIE_STRAP_F2__STRAP_F2_EN_MASK 0x00000001L
+#define PCIE_STRAP_F2__STRAP_F2_EN__SHIFT 0x00000000
+#define PCIE_STRAP_F2__STRAP_F2_LEGACY_DEVICE_TYPE_EN_MASK 0x00000002L
+#define PCIE_STRAP_F2__STRAP_F2_LEGACY_DEVICE_TYPE_EN__SHIFT 0x00000001
+#define PCIE_STRAP_F2__STRAP_F2_MSI_EN_MASK 0x00000004L
+#define PCIE_STRAP_F2__STRAP_F2_MSI_EN__SHIFT 0x00000002
+#define PCIE_STRAP_F2__STRAP_F2_PAGE_REQ_EN_MASK 0x00000800L
+#define PCIE_STRAP_F2__STRAP_F2_PAGE_REQ_EN__SHIFT 0x0000000b
+#define PCIE_STRAP_F2__STRAP_F2_PASID_EN_MASK 0x00001000L
+#define PCIE_STRAP_F2__STRAP_F2_PASID_EN__SHIFT 0x0000000c
+#define PCIE_STRAP_F2__STRAP_F2_PWR_EN_MASK 0x00000100L
+#define PCIE_STRAP_F2__STRAP_F2_PWR_EN__SHIFT 0x00000008
+#define PCIE_STRAP_F2__STRAP_F2_VC_EN_MASK 0x00000008L
+#define PCIE_STRAP_F2__STRAP_F2_VC_EN__SHIFT 0x00000003
+#define PCIE_STRAP_F3__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F3__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F4__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F4__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F5__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F5__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F6__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F6__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F7__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F7__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_DBG_I2C_EN_MASK 0x00000080L
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_DBG_I2C_EN__SHIFT 0x00000007
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_I2C_SLV_ADR_MASK 0x0000007fL
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_I2C_SLV_ADR__SHIFT 0x00000000
+#define PCIE_STRAP_MISC2__STRAP_GEN2_COMPLIANCE_MASK 0x00000002L
+#define PCIE_STRAP_MISC2__STRAP_GEN2_COMPLIANCE__SHIFT 0x00000001
+#define PCIE_STRAP_MISC2__STRAP_GEN3_COMPLIANCE_MASK 0x00000008L
+#define PCIE_STRAP_MISC2__STRAP_GEN3_COMPLIANCE__SHIFT 0x00000003
+#define PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+#define PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x00000002
+#define PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x00000018
+#define PCIE_STRAP_MISC__STRAP_ECN1P1_EN_MASK 0x02000000L
+#define PCIE_STRAP_MISC__STRAP_ECN1P1_EN__SHIFT 0x00000019
+#define PCIE_STRAP_MISC__STRAP_EXT_VC_COUNT_MASK 0x04000000L
+#define PCIE_STRAP_MISC__STRAP_EXT_VC_COUNT__SHIFT 0x0000001a
+#define PCIE_STRAP_MISC__STRAP_FLR_EN_MASK 0x40000000L
+#define PCIE_STRAP_MISC__STRAP_FLR_EN__SHIFT 0x0000001e
+#define PCIE_STRAP_MISC__STRAP_LINK_CONFIG_MASK 0x0000000fL
+#define PCIE_STRAP_MISC__STRAP_LINK_CONFIG__SHIFT 0x00000000
+#define PCIE_STRAP_MISC__STRAP_MAX_PASID_WIDTH_MASK 0x00001f00L
+#define PCIE_STRAP_MISC__STRAP_MAX_PASID_WIDTH__SHIFT 0x00000008
+#define PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+#define PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x0000001d
+#define PCIE_STRAP_MISC__STRAP_PASID_EXE_PERMISSION_SUPPORTED_MASK 0x00002000L
+#define PCIE_STRAP_MISC__STRAP_PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x0000000d
+#define PCIE_STRAP_MISC__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_MASK 0x00008000L
+#define PCIE_STRAP_MISC__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x0000000f
+#define PCIE_STRAP_MISC__STRAP_PASID_PRIV_MODE_SUPPORTED_MASK 0x00004000L
+#define PCIE_STRAP_MISC__STRAP_PASID_PRIV_MODE_SUPPORTED__SHIFT 0x0000000e
+#define PCIE_STRAP_MISC__STRAP_REVERSE_ALL_MASK 0x10000000L
+#define PCIE_STRAP_MISC__STRAP_REVERSE_ALL__SHIFT 0x0000001c
+#define PCIE_STRAP_PI__STRAP_QUICKSIM_START_MASK 0x00000001L
+#define PCIE_STRAP_PI__STRAP_QUICKSIM_START__SHIFT 0x00000000
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_MODE_MASK 0x20000000L
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_MODE__SHIFT 0x0000001d
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_PATTERN_MASK 0x10000000L
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_PATTERN__SHIFT 0x0000001c
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_MASK 0x00000fffL
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE_MASK 0x00001000L
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE__SHIFT 0x0000000c
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT__SHIFT 0x00000000
+#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS_MASK 0x00400000L
+#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS__SHIFT 0x00000016
+#define PCIE_TX_CNTL__TX_CPL_PASS_P_MASK 0x00100000L
+#define PCIE_TX_CNTL__TX_CPL_PASS_P__SHIFT 0x00000014
+#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS_MASK 0x00800000L
+#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS__SHIFT 0x00000017
+#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS_MASK 0x00008000L
+#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS__SHIFT 0x0000000f
+#define PCIE_TX_CNTL__TX_NP_PASS_P_MASK 0x00200000L
+#define PCIE_TX_CNTL__TX_NP_PASS_P__SHIFT 0x00000015
+#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS_MASK 0x00004000L
+#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS__SHIFT 0x0000000e
+#define PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0x0000000c
+#define PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000c00L
+#define PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0x0000000a
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0_MASK 0x00000700L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0__SHIFT 0x00000008
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1_MASK 0x07000000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1__SHIFT 0x00000018
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0_MASK 0x00000070L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0__SHIFT 0x00000004
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1_MASK 0x00700000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1__SHIFT 0x00000014
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0_MASK 0x00000007L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1_MASK 0x00070000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD_MASK 0x00100000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD__SHIFT 0x00000014
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH_MASK 0x00200000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH__SHIFT 0x00000015
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD_MASK 0x00040000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD__SHIFT 0x00000012
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH_MASK 0x00080000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH__SHIFT 0x00000013
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD_MASK 0x00010000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH_MASK 0x00020000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH__SHIFT 0x00000011
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD_MASK 0x00000010L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD__SHIFT 0x00000004
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH_MASK 0x00000020L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH__SHIFT 0x00000005
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD_MASK 0x00000004L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD__SHIFT 0x00000002
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH_MASK 0x00000008L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH__SHIFT 0x00000003
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD_MASK 0x00000001L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH_MASK 0x00000002L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH__SHIFT 0x00000001
+#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0__SHIFT 0x00000000
+#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1__SHIFT 0x00000000
+#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2__SHIFT 0x00000000
+#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3__SHIFT 0x00000000
+#define PCIE_TX_REPLAY__TX_REPLAY_NUM_MASK 0x00000007L
+#define PCIE_TX_REPLAY__TX_REPLAY_NUM__SHIFT 0x00000000
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_MASK 0xffff0000L
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE_MASK 0x00008000L
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE__SHIFT 0x0000000f
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER__SHIFT 0x00000010
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000ff00L
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x00000008
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000f8L
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x00000003
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x00000000
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN_MASK 0x80000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN__SHIFT 0x0000001f
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_MASK 0x3f000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP__SHIFT 0x00000018
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN_MASK 0x40000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN__SHIFT 0x0000001e
+#define PCIE_TX_SEQ__TX_ACKD_SEQ_MASK 0x0fff0000L
+#define PCIE_TX_SEQ__TX_ACKD_SEQ__SHIFT 0x00000010
+#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ_MASK 0x00000fffL
+#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ__SHIFT 0x00000000
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA_MASK 0x00ffffffL
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA__SHIFT 0x00000000
+#define PCIE_WPR_CNTL__WPR_RESET_COR_EN_MASK 0x00000008L
+#define PCIE_WPR_CNTL__WPR_RESET_COR_EN__SHIFT 0x00000003
+#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN_MASK 0x00000001L
+#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN__SHIFT 0x00000000
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN_MASK 0x00000004L
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN__SHIFT 0x00000002
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN_MASK 0x00000002L
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN__SHIFT 0x00000001
+#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN_MASK 0x00000040L
+#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN__SHIFT 0x00000006
+#define PCIE_WPR_CNTL__WPR_RESET_REG_EN_MASK 0x00000010L
+#define PCIE_WPR_CNTL__WPR_RESET_REG_EN__SHIFT 0x00000004
+#define PCIE_WPR_CNTL__WPR_RESET_STY_EN_MASK 0x00000020L
+#define PCIE_WPR_CNTL__WPR_RESET_STY_EN__SHIFT 0x00000005
+#define PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L
+#define PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x0000001f
+#define PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L
+#define PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x0000001f
+#define PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L
+#define PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x0000001f
+#define PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L
+#define PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x0000001f
+#define PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER_REG_RANGE0__END_ADDR_MASK 0xffff0000L
+#define PEER_REG_RANGE0__END_ADDR__SHIFT 0x00000010
+#define PEER_REG_RANGE0__START_ADDR_MASK 0x0000ffffL
+#define PEER_REG_RANGE0__START_ADDR__SHIFT 0x00000000
+#define PEER_REG_RANGE1__END_ADDR_MASK 0xffff0000L
+#define PEER_REG_RANGE1__END_ADDR__SHIFT 0x00000010
+#define PEER_REG_RANGE1__START_ADDR_MASK 0x0000ffffL
+#define PEER_REG_RANGE1__START_ADDR__SHIFT 0x00000000
+#define SLAVE_HANG_ERROR__AUDIO_HANG_ERROR_MASK 0x00000010L
+#define SLAVE_HANG_ERROR__AUDIO_HANG_ERROR__SHIFT 0x00000004
+#define SLAVE_HANG_ERROR__CEC_HANG_ERROR_MASK 0x00000020L
+#define SLAVE_HANG_ERROR__CEC_HANG_ERROR__SHIFT 0x00000005
+#define SLAVE_HANG_ERROR__HDP_HANG_ERROR_MASK 0x00000002L
+#define SLAVE_HANG_ERROR__HDP_HANG_ERROR__SHIFT 0x00000001
+#define SLAVE_HANG_ERROR__ROM_HANG_ERROR_MASK 0x00000008L
+#define SLAVE_HANG_ERROR__ROM_HANG_ERROR__SHIFT 0x00000003
+#define SLAVE_HANG_ERROR__SRBM_HANG_ERROR_MASK 0x00000001L
+#define SLAVE_HANG_ERROR__SRBM_HANG_ERROR__SHIFT 0x00000000
+#define SLAVE_HANG_ERROR__VGA_HANG_ERROR_MASK 0x00000004L
+#define SLAVE_HANG_ERROR__VGA_HANG_ERROR__SHIFT 0x00000002
+#define SLAVE_HANG_PROTECTION_CNTL__HANG_PROTECTION_TIMER_SEL_MASK 0x0000000eL
+#define SLAVE_HANG_PROTECTION_CNTL__HANG_PROTECTION_TIMER_SEL__SHIFT 0x00000001
+#define SLAVE_REQ_CREDIT_CNTL__BIF_AZ_REQ_CREDIT_MASK 0x00100000L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_AZ_REQ_CREDIT__SHIFT 0x00000014
+#define SLAVE_REQ_CREDIT_CNTL__BIF_HDP_REQ_CREDIT_MASK 0x00007c00L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_HDP_REQ_CREDIT__SHIFT 0x0000000a
+#define SLAVE_REQ_CREDIT_CNTL__BIF_ROM_REQ_CREDIT_MASK 0x00008000L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_ROM_REQ_CREDIT__SHIFT 0x0000000f
+#define SLAVE_REQ_CREDIT_CNTL__BIF_SRBM_REQ_CREDIT_MASK 0x0000001fL
+#define SLAVE_REQ_CREDIT_CNTL__BIF_SRBM_REQ_CREDIT__SHIFT 0x00000000
+#define SLAVE_REQ_CREDIT_CNTL__BIF_VGA_REQ_CREDIT_MASK 0x000001e0L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_VGA_REQ_CREDIT__SHIFT 0x00000005
+#define SLAVE_REQ_CREDIT_CNTL__BIF_XDMA_REQ_CREDIT_MASK 0x7e000000L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_XDMA_REQ_CREDIT__SHIFT 0x00000019
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_A_MASK 0x00000001L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_A__SHIFT 0x00000000
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_CNTL_EN_MASK 0x00001000L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_CNTL_EN__SHIFT 0x0000000c
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_MODE_MASK 0x00000004L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_MODE__SHIFT 0x00000002
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SCHMEN_MASK 0x00000800L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SCHMEN__SHIFT 0x0000000b
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SEL_MASK 0x00000002L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SEL__SHIFT 0x00000001
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SLEWN_MASK 0x00000200L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SLEWN__SHIFT 0x00000009
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN0_MASK 0x00000020L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN0__SHIFT 0x00000005
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN1_MASK 0x00000040L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN1__SHIFT 0x00000006
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN2_MASK 0x00000080L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN2__SHIFT 0x00000007
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN3_MASK 0x00000100L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN3__SHIFT 0x00000008
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SPARE_MASK 0x00000018L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SPARE__SHIFT 0x00000003
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_WAKE_MASK 0x00000400L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_WAKE__SHIFT 0x0000000a
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_A_MASK 0x00000001L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_A__SHIFT 0x00000000
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_CNTL_EN_MASK 0x00001000L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_CNTL_EN__SHIFT 0x0000000c
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_MODE_MASK 0x00000004L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_MODE__SHIFT 0x00000002
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SCHMEN_MASK 0x00000800L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SCHMEN__SHIFT 0x0000000b
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SEL_MASK 0x00000002L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SEL__SHIFT 0x00000001
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SLEWN_MASK 0x00000200L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SLEWN__SHIFT 0x00000009
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN0_MASK 0x00000020L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN0__SHIFT 0x00000005
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN1_MASK 0x00000040L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN1__SHIFT 0x00000006
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN2_MASK 0x00000080L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN2__SHIFT 0x00000007
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN3_MASK 0x00000100L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN3__SHIFT 0x00000008
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SPARE_MASK 0x00000018L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SPARE__SHIFT 0x00000003
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_WAKE_MASK 0x00000400L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_WAKE__SHIFT 0x0000000a
+#define SMBUS_BACO_DUMMY__SMBUS_BACO_DUMMY_DATA_MASK 0xffffffffL
+#define SMBUS_BACO_DUMMY__SMBUS_BACO_DUMMY_DATA__SHIFT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
index 09a7df17570d..09a7df17570d 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
index 1ddc4183a1c9..1ddc4183a1c9 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
new file mode 100644
index 000000000000..ae798f768853
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -0,0 +1,4457 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_6_0_D_H
+#define DCE_6_0_D_H
+
+#define ixATTR00 0x0000
+#define ixATTR01 0x0001
+#define ixATTR02 0x0002
+#define ixATTR03 0x0003
+#define ixATTR04 0x0004
+#define ixATTR05 0x0005
+#define ixATTR06 0x0006
+#define ixATTR07 0x0007
+#define ixATTR08 0x0008
+#define ixATTR09 0x0009
+#define ixATTR0A 0x000A
+#define ixATTR0B 0x000B
+#define ixATTR0C 0x000C
+#define ixATTR0D 0x000D
+#define ixATTR0E 0x000E
+#define ixATTR0F 0x000F
+#define ixATTR10 0x0010
+#define ixATTR11 0x0011
+#define ixATTR12 0x0012
+#define ixATTR13 0x0013
+#define ixATTR14 0x0014
+#define ixAUDIO_DESCRIPTOR0 0x0001
+#define ixAUDIO_DESCRIPTOR10 0x000B
+#define ixAUDIO_DESCRIPTOR1 0x0002
+#define ixAUDIO_DESCRIPTOR11 0x000C
+#define ixAUDIO_DESCRIPTOR12 0x000D
+#define ixAUDIO_DESCRIPTOR13 0x000E
+#define ixAUDIO_DESCRIPTOR2 0x0003
+#define ixAUDIO_DESCRIPTOR3 0x0004
+#define ixAUDIO_DESCRIPTOR4 0x0005
+#define ixAUDIO_DESCRIPTOR5 0x0006
+#define ixAUDIO_DESCRIPTOR6 0x0007
+#define ixAUDIO_DESCRIPTOR7 0x0008
+#define ixAUDIO_DESCRIPTOR8 0x0009
+#define ixAUDIO_DESCRIPTOR9 0x000A
+#define ixAZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZALIA_F0_CODEC_CONVERTER_PIN_DEBUG 0x0000
+#define ixAZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002A
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002B
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002C
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002D
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002E
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002F
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003A
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003B
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003C
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003D
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003E
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003F
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005A
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005B
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005C
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005D
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005E
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005F
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x270D
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2 0x270E
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3 0x273E
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x2F09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x2F0B
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x2F0A
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL 0x2724
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1770
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET 0x17FF
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2 0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3 0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4 0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x1F05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x1F0F
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x1F0B
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT 0x1F04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x1F0A
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO 0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA 0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX 0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION 0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO 0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR 0x377C
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC 0x377B
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID 0x0000
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE 0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE 0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE 0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE 0x377A
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x3789
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0 0x0003
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1 0x0004
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID 0x0001
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x371C
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x371D
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x371E
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x371F
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY 0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION 0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN 0x0002
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x3707
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x3F09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES 0x3F0C
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH 0x3F0E
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID 0x0F02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT 0x0F04
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0F00
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x378A
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x378B
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x378C
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x378D
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x378E
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x378F
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x3792
+#define ixAZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZALIA_STREAM_DEBUG 0x0005
+#define ixAZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixCRT00 0x0000
+#define ixCRT01 0x0001
+#define ixCRT02 0x0002
+#define ixCRT03 0x0003
+#define ixCRT04 0x0004
+#define ixCRT05 0x0005
+#define ixCRT06 0x0006
+#define ixCRT07 0x0007
+#define ixCRT08 0x0008
+#define ixCRT09 0x0009
+#define ixCRT0A 0x000A
+#define ixCRT0B 0x000B
+#define ixCRT0C 0x000C
+#define ixCRT0D 0x000D
+#define ixCRT0E 0x000E
+#define ixCRT0F 0x000F
+#define ixCRT10 0x0010
+#define ixCRT11 0x0011
+#define ixCRT12 0x0012
+#define ixCRT13 0x0013
+#define ixCRT14 0x0014
+#define ixCRT15 0x0015
+#define ixCRT16 0x0016
+#define ixCRT17 0x0017
+#define ixCRT18 0x0018
+#define ixCRT1E 0x001E
+#define ixCRT1F 0x001F
+#define ixCRT22 0x0022
+#define ixDCIO_DEBUG10 0x0010
+#define ixDCIO_DEBUG1 0x0001
+#define ixDCIO_DEBUG11 0x0011
+#define ixDCIO_DEBUG12 0x0012
+#define ixDCIO_DEBUG13 0x0013
+#define ixDCIO_DEBUG2 0x0002
+#define ixDCIO_DEBUG3 0x0003
+#define ixDCIO_DEBUG4 0x0004
+#define ixDCIO_DEBUG5 0x0005
+#define ixDCIO_DEBUG6 0x0006
+#define ixDCIO_DEBUG7 0x0007
+#define ixDCIO_DEBUG8 0x0008
+#define ixDCIO_DEBUG9 0x0009
+#define ixDCIO_DEBUGA 0x000A
+#define ixDCIO_DEBUGB 0x000B
+#define ixDCIO_DEBUGC 0x000C
+#define ixDCIO_DEBUGD 0x000D
+#define ixDCIO_DEBUGE 0x000E
+#define ixDCIO_DEBUGF 0x000F
+#define ixDCIO_DEBUG_ID 0x0000
+#define ixDMIF_DEBUG02_CORE0 0x0002
+#define ixDMIF_DEBUG02_CORE1 0x000A
+#define ixDP_AUX1_DEBUG_A 0x0010
+#define ixDP_AUX1_DEBUG_B 0x0011
+#define ixDP_AUX1_DEBUG_C 0x0012
+#define ixDP_AUX1_DEBUG_D 0x0013
+#define ixDP_AUX1_DEBUG_E 0x0014
+#define ixDP_AUX1_DEBUG_F 0x0015
+#define ixDP_AUX1_DEBUG_G 0x0016
+#define ixDP_AUX1_DEBUG_H 0x0017
+#define ixDP_AUX1_DEBUG_I 0x0018
+#define ixDP_AUX2_DEBUG_A 0x0020
+#define ixDP_AUX2_DEBUG_B 0x0021
+#define ixDP_AUX2_DEBUG_C 0x0022
+#define ixDP_AUX2_DEBUG_D 0x0023
+#define ixDP_AUX2_DEBUG_E 0x0024
+#define ixDP_AUX2_DEBUG_F 0x0025
+#define ixDP_AUX2_DEBUG_G 0x0026
+#define ixDP_AUX2_DEBUG_H 0x0027
+#define ixDP_AUX2_DEBUG_I 0x0028
+#define ixDP_AUX3_DEBUG_A 0x0030
+#define ixDP_AUX3_DEBUG_B 0x0031
+#define ixDP_AUX3_DEBUG_C 0x0032
+#define ixDP_AUX3_DEBUG_D 0x0033
+#define ixDP_AUX3_DEBUG_E 0x0034
+#define ixDP_AUX3_DEBUG_F 0x0035
+#define ixDP_AUX3_DEBUG_G 0x0036
+#define ixDP_AUX3_DEBUG_H 0x0037
+#define ixDP_AUX3_DEBUG_I 0x0038
+#define ixDP_AUX4_DEBUG_A 0x0040
+#define ixDP_AUX4_DEBUG_B 0x0041
+#define ixDP_AUX4_DEBUG_C 0x0042
+#define ixDP_AUX4_DEBUG_D 0x0043
+#define ixDP_AUX4_DEBUG_E 0x0044
+#define ixDP_AUX4_DEBUG_F 0x0045
+#define ixDP_AUX4_DEBUG_G 0x0046
+#define ixDP_AUX4_DEBUG_H 0x0047
+#define ixDP_AUX4_DEBUG_I 0x0048
+#define ixDP_AUX5_DEBUG_A 0x0070
+#define ixDP_AUX5_DEBUG_B 0x0071
+#define ixDP_AUX5_DEBUG_C 0x0072
+#define ixDP_AUX5_DEBUG_D 0x0073
+#define ixDP_AUX5_DEBUG_E 0x0074
+#define ixDP_AUX5_DEBUG_F 0x0075
+#define ixDP_AUX5_DEBUG_G 0x0076
+#define ixDP_AUX5_DEBUG_H 0x0077
+#define ixDP_AUX5_DEBUG_I 0x0078
+#define ixDP_AUX6_DEBUG_A 0x0080
+#define ixDP_AUX6_DEBUG_B 0x0081
+#define ixDP_AUX6_DEBUG_C 0x0082
+#define ixDP_AUX6_DEBUG_D 0x0083
+#define ixDP_AUX6_DEBUG_E 0x0084
+#define ixDP_AUX6_DEBUG_F 0x0085
+#define ixDP_AUX6_DEBUG_G 0x0086
+#define ixDP_AUX6_DEBUG_H 0x0087
+#define ixDP_AUX6_DEBUG_I 0x0088
+#define ixFMT_DEBUG0 0x0001
+#define ixFMT_DEBUG1 0x0002
+#define ixFMT_DEBUG2 0x0003
+#define ixFMT_DEBUG_ID 0x0000
+#define ixGRA00 0x0000
+#define ixGRA01 0x0001
+#define ixGRA02 0x0002
+#define ixGRA03 0x0003
+#define ixGRA04 0x0004
+#define ixGRA05 0x0005
+#define ixGRA06 0x0006
+#define ixGRA07 0x0007
+#define ixGRA08 0x0008
+#define ixIDDCCIF02_DBG_DCCIF_C 0x0009
+#define ixIDDCCIF04_DBG_DCCIF_E 0x000B
+#define ixIDDCCIF05_DBG_DCCIF_F 0x000C
+#define ixMVP_DEBUG_12 0x000C
+#define ixMVP_DEBUG_13 0x000D
+#define ixMVP_DEBUG_14 0x000E
+#define ixMVP_DEBUG_15 0x000F
+#define ixMVP_DEBUG_16 0x0010
+#define ixMVP_DEBUG_17 0x0011
+#define ixSEQ00 0x0000
+#define ixSEQ01 0x0001
+#define ixSEQ02 0x0002
+#define ixSEQ03 0x0003
+#define ixSEQ04 0x0004
+#define ixSINK_DESCRIPTION0 0x0005
+#define ixSINK_DESCRIPTION10 0x000F
+#define ixSINK_DESCRIPTION1 0x0006
+#define ixSINK_DESCRIPTION11 0x0010
+#define ixSINK_DESCRIPTION12 0x0011
+#define ixSINK_DESCRIPTION13 0x0012
+#define ixSINK_DESCRIPTION14 0x0013
+#define ixSINK_DESCRIPTION15 0x0014
+#define ixSINK_DESCRIPTION16 0x0015
+#define ixSINK_DESCRIPTION17 0x0016
+#define ixSINK_DESCRIPTION2 0x0007
+#define ixSINK_DESCRIPTION3 0x0008
+#define ixSINK_DESCRIPTION4 0x0009
+#define ixSINK_DESCRIPTION5 0x000A
+#define ixSINK_DESCRIPTION6 0x000B
+#define ixSINK_DESCRIPTION7 0x000C
+#define ixSINK_DESCRIPTION8 0x000D
+#define ixSINK_DESCRIPTION9 0x000E
+#define ixVGADCC_DBG_DCCIF_C 0x007E
+#define mmABM_TEST_DEBUG_DATA 0x169F
+#define mmABM_TEST_DEBUG_INDEX 0x169E
+#define mmAFMT_60958_0 0x1C41
+#define mmAFMT_60958_1 0x1C42
+#define mmAFMT_60958_2 0x1C48
+#define mmAFMT_AUDIO_CRC_CONTROL 0x1C43
+#define mmAFMT_AUDIO_CRC_RESULT 0x1C49
+#define mmAFMT_AUDIO_DBG_DTO_CNTL 0x1C52
+#define mmAFMT_AUDIO_INFO0 0x1C3F
+#define mmAFMT_AUDIO_INFO1 0x1C40
+#define mmAFMT_AUDIO_PACKET_CONTROL 0x1C4B
+#define mmAFMT_AUDIO_PACKET_CONTROL2 0x1C17
+#define mmAFMT_AUDIO_SRC_CONTROL 0x1C4F
+#define mmAFMT_AVI_INFO0 0x1C21
+#define mmAFMT_AVI_INFO1 0x1C22
+#define mmAFMT_AVI_INFO2 0x1C23
+#define mmAFMT_AVI_INFO3 0x1C24
+#define mmAFMT_GENERIC_0 0x1C28
+#define mmAFMT_GENERIC_1 0x1C29
+#define mmAFMT_GENERIC_2 0x1C2A
+#define mmAFMT_GENERIC_3 0x1C2B
+#define mmAFMT_GENERIC_4 0x1C2C
+#define mmAFMT_GENERIC_5 0x1C2D
+#define mmAFMT_GENERIC_6 0x1C2E
+#define mmAFMT_GENERIC_7 0x1C2F
+#define mmAFMT_GENERIC_HDR 0x1C27
+#define mmAFMT_INFOFRAME_CONTROL0 0x1C4D
+#define mmAFMT_INTERRUPT_STATUS 0x1C14
+#define mmAFMT_ISRC1_0 0x1C18
+#define mmAFMT_ISRC1_1 0x1C19
+#define mmAFMT_ISRC1_2 0x1C1A
+#define mmAFMT_ISRC1_3 0x1C1B
+#define mmAFMT_ISRC1_4 0x1C1C
+#define mmAFMT_ISRC2_0 0x1C1D
+#define mmAFMT_ISRC2_1 0x1C1E
+#define mmAFMT_ISRC2_2 0x1C1F
+#define mmAFMT_ISRC2_3 0x1C20
+#define mmAFMT_MPEG_INFO0 0x1C25
+#define mmAFMT_MPEG_INFO1 0x1C26
+#define mmAFMT_RAMP_CONTROL0 0x1C44
+#define mmAFMT_RAMP_CONTROL1 0x1C45
+#define mmAFMT_RAMP_CONTROL2 0x1C46
+#define mmAFMT_RAMP_CONTROL3 0x1C47
+#define mmAFMT_STATUS 0x1C4A
+#define mmAFMT_VBI_PACKET_CONTROL 0x1C4C
+#define mmATTRDR 0x00F0
+#define mmATTRDW 0x00F0
+#define mmATTRX 0x00F0
+#define mmAUX_ARB_CONTROL 0x1882
+#define mmAUX_CONTROL 0x1880
+#define mmAUX_DPHY_RX_CONTROL0 0x188A
+#define mmAUX_DPHY_RX_CONTROL1 0x188B
+#define mmAUX_DPHY_RX_STATUS 0x188D
+#define mmAUX_DPHY_TX_CONTROL 0x1889
+#define mmAUX_DPHY_TX_REF_CONTROL 0x1888
+#define mmAUX_DPHY_TX_STATUS 0x188C
+#define mmAUX_GTC_SYNC_CONTROL 0x188E
+#define mmAUX_GTC_SYNC_DATA 0x1890
+#define mmAUX_INTERRUPT_CONTROL 0x1883
+#define mmAUX_LS_DATA 0x1887
+#define mmAUX_LS_STATUS 0x1885
+#define mmAUXN_IMPCAL 0x190C
+#define mmAUXP_IMPCAL 0x190B
+#define mmAUX_SW_CONTROL 0x1881
+#define mmAUX_SW_DATA 0x1886
+#define mmAUX_SW_STATUS 0x1884
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER 0x17C9
+#define mmAZALIA_AUDIO_DTO 0x17BA
+#define mmAZALIA_AUDIO_DTO_CONTROL 0x17BB
+#define mmAZALIA_BDL_DMA_CONTROL 0x17BF
+#define mmAZALIA_CONTROLLER_DEBUG 0x17CF
+#define mmAZALIA_CORB_DMA_CONTROL 0x17C1
+#define mmAZALIA_CYCLIC_BUFFER_SYNC 0x17CA
+#define mmAZALIA_DATA_DMA_CONTROL 0x17BE
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x17D5
+#define mmAZALIA_F0_CODEC_DEBUG 0x17DF
+#define mmAZALIA_F0_CODEC_ENDPOINT_DATA 0x1781
+#define mmAZALIA_F0_CODEC_ENDPOINT_INDEX 0x1780
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x17DE
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x17DB
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x17DC
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x17DD
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x17D7
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x17DA
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x17D9
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x17D8
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x17D6
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x17D3
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x17D2
+#define mmAZALIA_GLOBAL_CAPABILITIES 0x17CB
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x17CC
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x17CD
+#define mmAZALIA_RIRB_AND_DP_CONTROL 0x17C0
+#define mmAZALIA_SCLK_CONTROL 0x17BC
+#define mmAZALIA_STREAM_DATA 0x17E9
+#define mmAZALIA_STREAM_INDEX 0x17E8
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE 0x17BD
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1781
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1780
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1787
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1786
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA 0x178D
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x178C
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1793
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1792
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1799
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1798
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA 0x179F
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x179E
+#define mmAZF0STREAM0_AZALIA_STREAM_DATA 0x17E9
+#define mmAZF0STREAM0_AZALIA_STREAM_INDEX 0x17E8
+#define mmAZF0STREAM1_AZALIA_STREAM_DATA 0x17ED
+#define mmAZF0STREAM1_AZALIA_STREAM_INDEX 0x17EC
+#define mmAZF0STREAM2_AZALIA_STREAM_DATA 0x17F1
+#define mmAZF0STREAM2_AZALIA_STREAM_INDEX 0x17F0
+#define mmAZF0STREAM3_AZALIA_STREAM_DATA 0x17F5
+#define mmAZF0STREAM3_AZALIA_STREAM_INDEX 0x17F4
+#define mmAZF0STREAM4_AZALIA_STREAM_DATA 0x17F9
+#define mmAZF0STREAM4_AZALIA_STREAM_INDEX 0x17F8
+#define mmAZF0STREAM5_AZALIA_STREAM_DATA 0x17FD
+#define mmAZF0STREAM5_AZALIA_STREAM_INDEX 0x17FC
+#define mmAZ_TEST_DEBUG_DATA 0x17D1
+#define mmAZ_TEST_DEBUG_INDEX 0x17D0
+#define mmBL1_PWM_ABM_CNTL 0x162E
+#define mmBL1_PWM_AMBIENT_LIGHT_LEVEL 0x1628
+#define mmBL1_PWM_BL_UPDATE_SAMPLE_RATE 0x162F
+#define mmBL1_PWM_CURRENT_ABM_LEVEL 0x162B
+#define mmBL1_PWM_FINAL_DUTY_CYCLE 0x162C
+#define mmBL1_PWM_GRP2_REG_LOCK 0x1630
+#define mmBL1_PWM_MINIMUM_DUTY_CYCLE 0x162D
+#define mmBL1_PWM_TARGET_ABM_LEVEL 0x162A
+#define mmBL1_PWM_USER_LEVEL 0x1629
+#define mmBL_PWM_CNTL 0x191E
+#define mmBL_PWM_CNTL2 0x191F
+#define mmBL_PWM_GRP1_REG_LOCK 0x1921
+#define mmBL_PWM_PERIOD_CNTL 0x1920
+#define mmBPHYC_DAC_AUTO_CALIB_CONTROL 0x19FE
+#define mmBPHYC_DAC_MACRO_CNTL 0x19FD
+#define mmCC_DC_PIPE_DIS 0x177F
+#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY 0x17D4
+#define mmCOMM_MATRIXA_TRANS_C11_C12 0x1A43
+#define mmCOMM_MATRIXA_TRANS_C13_C14 0x1A44
+#define mmCOMM_MATRIXA_TRANS_C21_C22 0x1A45
+#define mmCOMM_MATRIXA_TRANS_C23_C24 0x1A46
+#define mmCOMM_MATRIXA_TRANS_C31_C32 0x1A47
+#define mmCOMM_MATRIXA_TRANS_C33_C34 0x1A48
+#define mmCOMM_MATRIXB_TRANS_C11_C12 0x1A49
+#define mmCOMM_MATRIXB_TRANS_C13_C14 0x1A4A
+#define mmCOMM_MATRIXB_TRANS_C21_C22 0x1A4B
+#define mmCOMM_MATRIXB_TRANS_C23_C24 0x1A4C
+#define mmCOMM_MATRIXB_TRANS_C31_C32 0x1A4D
+#define mmCOMM_MATRIXB_TRANS_C33_C34 0x1A4E
+#define mmCRTC0_CRTC_3D_STRUCTURE_CONTROL 0x1B78
+#define mmCRTC0_CRTC_ALLOW_STOP_OFF_V_CNT 0x1BC3
+#define mmCRTC0_CRTC_BLACK_COLOR 0x1BA2
+#define mmCRTC0_CRTC_BLANK_CONTROL 0x1B9D
+#define mmCRTC0_CRTC_BLANK_DATA_COLOR 0x1BA1
+#define mmCRTC0_CRTC_CONTROL 0x1B9C
+#define mmCRTC0_CRTC_COUNT_CONTROL 0x1BA9
+#define mmCRTC0_CRTC_COUNT_RESET 0x1BAA
+#define mmCRTC0_CRTC_DCFE_CLOCK_CONTROL 0x1B7C
+#define mmCRTC0_CRTC_DOUBLE_BUFFER_CONTROL 0x1BB6
+#define mmCRTC0_CRTC_DTMTEST_CNTL 0x1B92
+#define mmCRTC0_CRTC_DTMTEST_STATUS_POSITION 0x1B93
+#define mmCRTC0_CRTC_FLOW_CONTROL 0x1B99
+#define mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL 0x1B98
+#define mmCRTC0_CRTC_GSL_CONTROL 0x1B7B
+#define mmCRTC0_CRTC_GSL_VSYNC_GAP 0x1B79
+#define mmCRTC0_CRTC_GSL_WINDOW 0x1B7A
+#define mmCRTC0_CRTC_H_BLANK_EARLY_NUM 0x1B7D
+#define mmCRTC0_CRTC_H_BLANK_START_END 0x1B81
+#define mmCRTC0_CRTC_H_SYNC_A 0x1B82
+#define mmCRTC0_CRTC_H_SYNC_A_CNTL 0x1B83
+#define mmCRTC0_CRTC_H_SYNC_B 0x1B84
+#define mmCRTC0_CRTC_H_SYNC_B_CNTL 0x1B85
+#define mmCRTC0_CRTC_H_TOTAL 0x1B80
+#define mmCRTC0_CRTC_INTERLACE_CONTROL 0x1B9E
+#define mmCRTC0_CRTC_INTERLACE_STATUS 0x1B9F
+#define mmCRTC0_CRTC_INTERRUPT_CONTROL 0x1BB4
+#define mmCRTC0_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1BAB
+#define mmCRTC0_CRTC_MASTER_EN 0x1BC2
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT 0x1BBF
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1BC0
+#define mmCRTC0_CRTC_MVP_STATUS 0x1BC1
+#define mmCRTC0_CRTC_NOM_VERT_POSITION 0x1BA5
+#define mmCRTC0_CRTC_OVERSCAN_COLOR 0x1BA0
+#define mmCRTC0_CRTC_SNAPSHOT_CONTROL 0x1BB0
+#define mmCRTC0_CRTC_SNAPSHOT_FRAME 0x1BB2
+#define mmCRTC0_CRTC_SNAPSHOT_POSITION 0x1BB1
+#define mmCRTC0_CRTC_SNAPSHOT_STATUS 0x1BAF
+#define mmCRTC0_CRTC_START_LINE_CONTROL 0x1BB3
+#define mmCRTC0_CRTC_STATUS 0x1BA3
+#define mmCRTC0_CRTC_STATUS_FRAME_COUNT 0x1BA6
+#define mmCRTC0_CRTC_STATUS_HV_COUNT 0x1BA8
+#define mmCRTC0_CRTC_STATUS_POSITION 0x1BA4
+#define mmCRTC0_CRTC_STATUS_VF_COUNT 0x1BA7
+#define mmCRTC0_CRTC_STEREO_CONTROL 0x1BAE
+#define mmCRTC0_CRTC_STEREO_FORCE_NEXT_EYE 0x1B9B
+#define mmCRTC0_CRTC_STEREO_STATUS 0x1BAD
+#define mmCRTC0_CRTC_TEST_DEBUG_DATA 0x1BC7
+#define mmCRTC0_CRTC_TEST_DEBUG_INDEX 0x1BC6
+#define mmCRTC0_CRTC_TEST_PATTERN_COLOR 0x1BBC
+#define mmCRTC0_CRTC_TEST_PATTERN_CONTROL 0x1BBA
+#define mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS 0x1BBB
+#define mmCRTC0_CRTC_TRIGA_CNTL 0x1B94
+#define mmCRTC0_CRTC_TRIGA_MANUAL_TRIG 0x1B95
+#define mmCRTC0_CRTC_TRIGB_CNTL 0x1B96
+#define mmCRTC0_CRTC_TRIGB_MANUAL_TRIG 0x1B97
+#define mmCRTC0_CRTC_UPDATE_LOCK 0x1BB5
+#define mmCRTC0_CRTC_VBI_END 0x1B86
+#define mmCRTC0_CRTC_V_BLANK_START_END 0x1B8D
+#define mmCRTC0_CRTC_VERT_SYNC_CONTROL 0x1BAC
+#define mmCRTC0_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x1BB7
+#define mmCRTC0_CRTC_V_SYNC_A 0x1B8E
+#define mmCRTC0_CRTC_V_SYNC_A_CNTL 0x1B8F
+#define mmCRTC0_CRTC_V_SYNC_B 0x1B90
+#define mmCRTC0_CRTC_V_SYNC_B_CNTL 0x1B91
+#define mmCRTC0_CRTC_VSYNC_NOM_INT_STATUS 0x1B8C
+#define mmCRTC0_CRTC_V_TOTAL 0x1B87
+#define mmCRTC0_CRTC_V_TOTAL_CONTROL 0x1B8A
+#define mmCRTC0_CRTC_V_TOTAL_INT_STATUS 0x1B8B
+#define mmCRTC0_CRTC_V_TOTAL_MAX 0x1B89
+#define mmCRTC0_CRTC_V_TOTAL_MIN 0x1B88
+#define mmCRTC0_CRTC_V_UPDATE_INT_STATUS 0x1BC4
+#define mmCRTC0_DCFE_DBG_SEL 0x1B7E
+#define mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL 0x1B7F
+#define mmCRTC0_MASTER_UPDATE_LOCK 0x1BBD
+#define mmCRTC0_MASTER_UPDATE_MODE 0x1BBE
+#define mmCRTC0_PIXEL_RATE_CNTL 0x0140
+#define mmCRTC1_CRTC_3D_STRUCTURE_CONTROL 0x1E78
+#define mmCRTC1_CRTC_ALLOW_STOP_OFF_V_CNT 0x1EC3
+#define mmCRTC1_CRTC_BLACK_COLOR 0x1EA2
+#define mmCRTC1_CRTC_BLANK_CONTROL 0x1E9D
+#define mmCRTC1_CRTC_BLANK_DATA_COLOR 0x1EA1
+#define mmCRTC1_CRTC_CONTROL 0x1E9C
+#define mmCRTC1_CRTC_COUNT_CONTROL 0x1EA9
+#define mmCRTC1_CRTC_COUNT_RESET 0x1EAA
+#define mmCRTC1_CRTC_DCFE_CLOCK_CONTROL 0x1E7C
+#define mmCRTC1_CRTC_DOUBLE_BUFFER_CONTROL 0x1EB6
+#define mmCRTC1_CRTC_DTMTEST_CNTL 0x1E92
+#define mmCRTC1_CRTC_DTMTEST_STATUS_POSITION 0x1E93
+#define mmCRTC1_CRTC_FLOW_CONTROL 0x1E99
+#define mmCRTC1_CRTC_FORCE_COUNT_NOW_CNTL 0x1E98
+#define mmCRTC1_CRTC_GSL_CONTROL 0x1E7B
+#define mmCRTC1_CRTC_GSL_VSYNC_GAP 0x1E79
+#define mmCRTC1_CRTC_GSL_WINDOW 0x1E7A
+#define mmCRTC1_CRTC_H_BLANK_EARLY_NUM 0x1E7D
+#define mmCRTC1_CRTC_H_BLANK_START_END 0x1E81
+#define mmCRTC1_CRTC_H_SYNC_A 0x1E82
+#define mmCRTC1_CRTC_H_SYNC_A_CNTL 0x1E83
+#define mmCRTC1_CRTC_H_SYNC_B 0x1E84
+#define mmCRTC1_CRTC_H_SYNC_B_CNTL 0x1E85
+#define mmCRTC1_CRTC_H_TOTAL 0x1E80
+#define mmCRTC1_CRTC_INTERLACE_CONTROL 0x1E9E
+#define mmCRTC1_CRTC_INTERLACE_STATUS 0x1E9F
+#define mmCRTC1_CRTC_INTERRUPT_CONTROL 0x1EB4
+#define mmCRTC1_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1EAB
+#define mmCRTC1_CRTC_MASTER_EN 0x1EC2
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT 0x1EBF
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1EC0
+#define mmCRTC1_CRTC_MVP_STATUS 0x1EC1
+#define mmCRTC1_CRTC_NOM_VERT_POSITION 0x1EA5
+#define mmCRTC1_CRTC_OVERSCAN_COLOR 0x1EA0
+#define mmCRTC1_CRTC_SNAPSHOT_CONTROL 0x1EB0
+#define mmCRTC1_CRTC_SNAPSHOT_FRAME 0x1EB2
+#define mmCRTC1_CRTC_SNAPSHOT_POSITION 0x1EB1
+#define mmCRTC1_CRTC_SNAPSHOT_STATUS 0x1EAF
+#define mmCRTC1_CRTC_START_LINE_CONTROL 0x1EB3
+#define mmCRTC1_CRTC_STATUS 0x1EA3
+#define mmCRTC1_CRTC_STATUS_FRAME_COUNT 0x1EA6
+#define mmCRTC1_CRTC_STATUS_HV_COUNT 0x1EA8
+#define mmCRTC1_CRTC_STATUS_POSITION 0x1EA4
+#define mmCRTC1_CRTC_STATUS_VF_COUNT 0x1EA7
+#define mmCRTC1_CRTC_STEREO_CONTROL 0x1EAE
+#define mmCRTC1_CRTC_STEREO_FORCE_NEXT_EYE 0x1E9B
+#define mmCRTC1_CRTC_STEREO_STATUS 0x1EAD
+#define mmCRTC1_CRTC_TEST_DEBUG_DATA 0x1EC7
+#define mmCRTC1_CRTC_TEST_DEBUG_INDEX 0x1EC6
+#define mmCRTC1_CRTC_TEST_PATTERN_COLOR 0x1EBC
+#define mmCRTC1_CRTC_TEST_PATTERN_CONTROL 0x1EBA
+#define mmCRTC1_CRTC_TEST_PATTERN_PARAMETERS 0x1EBB
+#define mmCRTC1_CRTC_TRIGA_CNTL 0x1E94
+#define mmCRTC1_CRTC_TRIGA_MANUAL_TRIG 0x1E95
+#define mmCRTC1_CRTC_TRIGB_CNTL 0x1E96
+#define mmCRTC1_CRTC_TRIGB_MANUAL_TRIG 0x1E97
+#define mmCRTC1_CRTC_UPDATE_LOCK 0x1EB5
+#define mmCRTC1_CRTC_VBI_END 0x1E86
+#define mmCRTC1_CRTC_V_BLANK_START_END 0x1E8D
+#define mmCRTC1_CRTC_VERT_SYNC_CONTROL 0x1EAC
+#define mmCRTC1_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x1EB7
+#define mmCRTC1_CRTC_V_SYNC_A 0x1E8E
+#define mmCRTC1_CRTC_V_SYNC_A_CNTL 0x1E8F
+#define mmCRTC1_CRTC_V_SYNC_B 0x1E90
+#define mmCRTC1_CRTC_V_SYNC_B_CNTL 0x1E91
+#define mmCRTC1_CRTC_VSYNC_NOM_INT_STATUS 0x1E8C
+#define mmCRTC1_CRTC_V_TOTAL 0x1E87
+#define mmCRTC1_CRTC_V_TOTAL_CONTROL 0x1E8A
+#define mmCRTC1_CRTC_V_TOTAL_INT_STATUS 0x1E8B
+#define mmCRTC1_CRTC_V_TOTAL_MAX 0x1E89
+#define mmCRTC1_CRTC_V_TOTAL_MIN 0x1E88
+#define mmCRTC1_CRTC_V_UPDATE_INT_STATUS 0x1EC4
+#define mmCRTC1_DCFE_DBG_SEL 0x1E7E
+#define mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL 0x1E7F
+#define mmCRTC1_MASTER_UPDATE_LOCK 0x1EBD
+#define mmCRTC1_MASTER_UPDATE_MODE 0x1EBE
+#define mmCRTC1_PIXEL_RATE_CNTL 0x0144
+#define mmCRTC2_CRTC_3D_STRUCTURE_CONTROL 0x4178
+#define mmCRTC2_CRTC_ALLOW_STOP_OFF_V_CNT 0x41C3
+#define mmCRTC2_CRTC_BLACK_COLOR 0x41A2
+#define mmCRTC2_CRTC_BLANK_CONTROL 0x419D
+#define mmCRTC2_CRTC_BLANK_DATA_COLOR 0x41A1
+#define mmCRTC2_CRTC_CONTROL 0x419C
+#define mmCRTC2_CRTC_COUNT_CONTROL 0x41A9
+#define mmCRTC2_CRTC_COUNT_RESET 0x41AA
+#define mmCRTC2_CRTC_DCFE_CLOCK_CONTROL 0x417C
+#define mmCRTC2_CRTC_DOUBLE_BUFFER_CONTROL 0x41B6
+#define mmCRTC2_CRTC_DTMTEST_CNTL 0x4192
+#define mmCRTC2_CRTC_DTMTEST_STATUS_POSITION 0x4193
+#define mmCRTC2_CRTC_FLOW_CONTROL 0x4199
+#define mmCRTC2_CRTC_FORCE_COUNT_NOW_CNTL 0x4198
+#define mmCRTC2_CRTC_GSL_CONTROL 0x417B
+#define mmCRTC2_CRTC_GSL_VSYNC_GAP 0x4179
+#define mmCRTC2_CRTC_GSL_WINDOW 0x417A
+#define mmCRTC2_CRTC_H_BLANK_EARLY_NUM 0x417D
+#define mmCRTC2_CRTC_H_BLANK_START_END 0x4181
+#define mmCRTC2_CRTC_H_SYNC_A 0x4182
+#define mmCRTC2_CRTC_H_SYNC_A_CNTL 0x4183
+#define mmCRTC2_CRTC_H_SYNC_B 0x4184
+#define mmCRTC2_CRTC_H_SYNC_B_CNTL 0x4185
+#define mmCRTC2_CRTC_H_TOTAL 0x4180
+#define mmCRTC2_CRTC_INTERLACE_CONTROL 0x419E
+#define mmCRTC2_CRTC_INTERLACE_STATUS 0x419F
+#define mmCRTC2_CRTC_INTERRUPT_CONTROL 0x41B4
+#define mmCRTC2_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x41AB
+#define mmCRTC2_CRTC_MASTER_EN 0x41C2
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT 0x41BF
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x41C0
+#define mmCRTC2_CRTC_MVP_STATUS 0x41C1
+#define mmCRTC2_CRTC_NOM_VERT_POSITION 0x41A5
+#define mmCRTC2_CRTC_OVERSCAN_COLOR 0x41A0
+#define mmCRTC2_CRTC_SNAPSHOT_CONTROL 0x41B0
+#define mmCRTC2_CRTC_SNAPSHOT_FRAME 0x41B2
+#define mmCRTC2_CRTC_SNAPSHOT_POSITION 0x41B1
+#define mmCRTC2_CRTC_SNAPSHOT_STATUS 0x41AF
+#define mmCRTC2_CRTC_START_LINE_CONTROL 0x41B3
+#define mmCRTC2_CRTC_STATUS 0x41A3
+#define mmCRTC2_CRTC_STATUS_FRAME_COUNT 0x41A6
+#define mmCRTC2_CRTC_STATUS_HV_COUNT 0x41A8
+#define mmCRTC2_CRTC_STATUS_POSITION 0x41A4
+#define mmCRTC2_CRTC_STATUS_VF_COUNT 0x41A7
+#define mmCRTC2_CRTC_STEREO_CONTROL 0x41AE
+#define mmCRTC2_CRTC_STEREO_FORCE_NEXT_EYE 0x419B
+#define mmCRTC2_CRTC_STEREO_STATUS 0x41AD
+#define mmCRTC2_CRTC_TEST_DEBUG_DATA 0x41C7
+#define mmCRTC2_CRTC_TEST_DEBUG_INDEX 0x41C6
+#define mmCRTC2_CRTC_TEST_PATTERN_COLOR 0x41BC
+#define mmCRTC2_CRTC_TEST_PATTERN_CONTROL 0x41BA
+#define mmCRTC2_CRTC_TEST_PATTERN_PARAMETERS 0x41BB
+#define mmCRTC2_CRTC_TRIGA_CNTL 0x4194
+#define mmCRTC2_CRTC_TRIGA_MANUAL_TRIG 0x4195
+#define mmCRTC2_CRTC_TRIGB_CNTL 0x4196
+#define mmCRTC2_CRTC_TRIGB_MANUAL_TRIG 0x4197
+#define mmCRTC2_CRTC_UPDATE_LOCK 0x41B5
+#define mmCRTC2_CRTC_VBI_END 0x4186
+#define mmCRTC2_CRTC_V_BLANK_START_END 0x418D
+#define mmCRTC2_CRTC_VERT_SYNC_CONTROL 0x41AC
+#define mmCRTC2_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x41B7
+#define mmCRTC2_CRTC_V_SYNC_A 0x418E
+#define mmCRTC2_CRTC_V_SYNC_A_CNTL 0x418F
+#define mmCRTC2_CRTC_V_SYNC_B 0x4190
+#define mmCRTC2_CRTC_V_SYNC_B_CNTL 0x4191
+#define mmCRTC2_CRTC_VSYNC_NOM_INT_STATUS 0x418C
+#define mmCRTC2_CRTC_V_TOTAL 0x4187
+#define mmCRTC2_CRTC_V_TOTAL_CONTROL 0x418A
+#define mmCRTC2_CRTC_V_TOTAL_INT_STATUS 0x418B
+#define mmCRTC2_CRTC_V_TOTAL_MAX 0x4189
+#define mmCRTC2_CRTC_V_TOTAL_MIN 0x4188
+#define mmCRTC2_CRTC_V_UPDATE_INT_STATUS 0x41C4
+#define mmCRTC2_DCFE_DBG_SEL 0x417E
+#define mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL 0x417F
+#define mmCRTC2_MASTER_UPDATE_LOCK 0x41BD
+#define mmCRTC2_MASTER_UPDATE_MODE 0x41BE
+#define mmCRTC2_PIXEL_RATE_CNTL 0x0148
+#define mmCRTC3_CRTC_3D_STRUCTURE_CONTROL 0x4478
+#define mmCRTC3_CRTC_ALLOW_STOP_OFF_V_CNT 0x44C3
+#define mmCRTC3_CRTC_BLACK_COLOR 0x44A2
+#define mmCRTC3_CRTC_BLANK_CONTROL 0x449D
+#define mmCRTC3_CRTC_BLANK_DATA_COLOR 0x44A1
+#define mmCRTC3_CRTC_CONTROL 0x449C
+#define mmCRTC3_CRTC_COUNT_CONTROL 0x44A9
+#define mmCRTC3_CRTC_COUNT_RESET 0x44AA
+#define mmCRTC3_CRTC_DCFE_CLOCK_CONTROL 0x447C
+#define mmCRTC3_CRTC_DOUBLE_BUFFER_CONTROL 0x44B6
+#define mmCRTC3_CRTC_DTMTEST_CNTL 0x4492
+#define mmCRTC3_CRTC_DTMTEST_STATUS_POSITION 0x4493
+#define mmCRTC3_CRTC_FLOW_CONTROL 0x4499
+#define mmCRTC3_CRTC_FORCE_COUNT_NOW_CNTL 0x4498
+#define mmCRTC3_CRTC_GSL_CONTROL 0x447B
+#define mmCRTC3_CRTC_GSL_VSYNC_GAP 0x4479
+#define mmCRTC3_CRTC_GSL_WINDOW 0x447A
+#define mmCRTC3_CRTC_H_BLANK_EARLY_NUM 0x447D
+#define mmCRTC3_CRTC_H_BLANK_START_END 0x4481
+#define mmCRTC3_CRTC_H_SYNC_A 0x4482
+#define mmCRTC3_CRTC_H_SYNC_A_CNTL 0x4483
+#define mmCRTC3_CRTC_H_SYNC_B 0x4484
+#define mmCRTC3_CRTC_H_SYNC_B_CNTL 0x4485
+#define mmCRTC3_CRTC_H_TOTAL 0x4480
+#define mmCRTC3_CRTC_INTERLACE_CONTROL 0x449E
+#define mmCRTC3_CRTC_INTERLACE_STATUS 0x449F
+#define mmCRTC3_CRTC_INTERRUPT_CONTROL 0x44B4
+#define mmCRTC3_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x44AB
+#define mmCRTC3_CRTC_MASTER_EN 0x44C2
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT 0x44BF
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x44C0
+#define mmCRTC3_CRTC_MVP_STATUS 0x44C1
+#define mmCRTC3_CRTC_NOM_VERT_POSITION 0x44A5
+#define mmCRTC3_CRTC_OVERSCAN_COLOR 0x44A0
+#define mmCRTC3_CRTC_SNAPSHOT_CONTROL 0x44B0
+#define mmCRTC3_CRTC_SNAPSHOT_FRAME 0x44B2
+#define mmCRTC3_CRTC_SNAPSHOT_POSITION 0x44B1
+#define mmCRTC3_CRTC_SNAPSHOT_STATUS 0x44AF
+#define mmCRTC3_CRTC_START_LINE_CONTROL 0x44B3
+#define mmCRTC3_CRTC_STATUS 0x44A3
+#define mmCRTC3_CRTC_STATUS_FRAME_COUNT 0x44A6
+#define mmCRTC3_CRTC_STATUS_HV_COUNT 0x44A8
+#define mmCRTC3_CRTC_STATUS_POSITION 0x44A4
+#define mmCRTC3_CRTC_STATUS_VF_COUNT 0x44A7
+#define mmCRTC3_CRTC_STEREO_CONTROL 0x44AE
+#define mmCRTC3_CRTC_STEREO_FORCE_NEXT_EYE 0x449B
+#define mmCRTC3_CRTC_STEREO_STATUS 0x44AD
+#define mmCRTC3_CRTC_TEST_DEBUG_DATA 0x44C7
+#define mmCRTC3_CRTC_TEST_DEBUG_INDEX 0x44C6
+#define mmCRTC3_CRTC_TEST_PATTERN_COLOR 0x44BC
+#define mmCRTC3_CRTC_TEST_PATTERN_CONTROL 0x44BA
+#define mmCRTC3_CRTC_TEST_PATTERN_PARAMETERS 0x44BB
+#define mmCRTC3_CRTC_TRIGA_CNTL 0x4494
+#define mmCRTC3_CRTC_TRIGA_MANUAL_TRIG 0x4495
+#define mmCRTC3_CRTC_TRIGB_CNTL 0x4496
+#define mmCRTC3_CRTC_TRIGB_MANUAL_TRIG 0x4497
+#define mmCRTC3_CRTC_UPDATE_LOCK 0x44B5
+#define mmCRTC3_CRTC_VBI_END 0x4486
+#define mmCRTC3_CRTC_V_BLANK_START_END 0x448D
+#define mmCRTC3_CRTC_VERT_SYNC_CONTROL 0x44AC
+#define mmCRTC3_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x44B7
+#define mmCRTC3_CRTC_V_SYNC_A 0x448E
+#define mmCRTC3_CRTC_V_SYNC_A_CNTL 0x448F
+#define mmCRTC3_CRTC_V_SYNC_B 0x4490
+#define mmCRTC3_CRTC_V_SYNC_B_CNTL 0x4491
+#define mmCRTC3_CRTC_VSYNC_NOM_INT_STATUS 0x448C
+#define mmCRTC3_CRTC_V_TOTAL 0x4487
+#define mmCRTC3_CRTC_V_TOTAL_CONTROL 0x448A
+#define mmCRTC3_CRTC_V_TOTAL_INT_STATUS 0x448B
+#define mmCRTC3_CRTC_V_TOTAL_MAX 0x4489
+#define mmCRTC3_CRTC_V_TOTAL_MIN 0x4488
+#define mmCRTC3_CRTC_V_UPDATE_INT_STATUS 0x44C4
+#define mmCRTC3_DCFE_DBG_SEL 0x447E
+#define mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL 0x447F
+#define mmCRTC_3D_STRUCTURE_CONTROL 0x1B78
+#define mmCRTC3_MASTER_UPDATE_LOCK 0x44BD
+#define mmCRTC3_MASTER_UPDATE_MODE 0x44BE
+#define mmCRTC3_PIXEL_RATE_CNTL 0x014C
+#define mmCRTC4_CRTC_3D_STRUCTURE_CONTROL 0x4778
+#define mmCRTC4_CRTC_ALLOW_STOP_OFF_V_CNT 0x47C3
+#define mmCRTC4_CRTC_BLACK_COLOR 0x47A2
+#define mmCRTC4_CRTC_BLANK_CONTROL 0x479D
+#define mmCRTC4_CRTC_BLANK_DATA_COLOR 0x47A1
+#define mmCRTC4_CRTC_CONTROL 0x479C
+#define mmCRTC4_CRTC_COUNT_CONTROL 0x47A9
+#define mmCRTC4_CRTC_COUNT_RESET 0x47AA
+#define mmCRTC4_CRTC_DCFE_CLOCK_CONTROL 0x477C
+#define mmCRTC4_CRTC_DOUBLE_BUFFER_CONTROL 0x47B6
+#define mmCRTC4_CRTC_DTMTEST_CNTL 0x4792
+#define mmCRTC4_CRTC_DTMTEST_STATUS_POSITION 0x4793
+#define mmCRTC4_CRTC_FLOW_CONTROL 0x4799
+#define mmCRTC4_CRTC_FORCE_COUNT_NOW_CNTL 0x4798
+#define mmCRTC4_CRTC_GSL_CONTROL 0x477B
+#define mmCRTC4_CRTC_GSL_VSYNC_GAP 0x4779
+#define mmCRTC4_CRTC_GSL_WINDOW 0x477A
+#define mmCRTC4_CRTC_H_BLANK_EARLY_NUM 0x477D
+#define mmCRTC4_CRTC_H_BLANK_START_END 0x4781
+#define mmCRTC4_CRTC_H_SYNC_A 0x4782
+#define mmCRTC4_CRTC_H_SYNC_A_CNTL 0x4783
+#define mmCRTC4_CRTC_H_SYNC_B 0x4784
+#define mmCRTC4_CRTC_H_SYNC_B_CNTL 0x4785
+#define mmCRTC4_CRTC_H_TOTAL 0x4780
+#define mmCRTC4_CRTC_INTERLACE_CONTROL 0x479E
+#define mmCRTC4_CRTC_INTERLACE_STATUS 0x479F
+#define mmCRTC4_CRTC_INTERRUPT_CONTROL 0x47B4
+#define mmCRTC4_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x47AB
+#define mmCRTC4_CRTC_MASTER_EN 0x47C2
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT 0x47BF
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x47C0
+#define mmCRTC4_CRTC_MVP_STATUS 0x47C1
+#define mmCRTC4_CRTC_NOM_VERT_POSITION 0x47A5
+#define mmCRTC4_CRTC_OVERSCAN_COLOR 0x47A0
+#define mmCRTC4_CRTC_SNAPSHOT_CONTROL 0x47B0
+#define mmCRTC4_CRTC_SNAPSHOT_FRAME 0x47B2
+#define mmCRTC4_CRTC_SNAPSHOT_POSITION 0x47B1
+#define mmCRTC4_CRTC_SNAPSHOT_STATUS 0x47AF
+#define mmCRTC4_CRTC_START_LINE_CONTROL 0x47B3
+#define mmCRTC4_CRTC_STATUS 0x47A3
+#define mmCRTC4_CRTC_STATUS_FRAME_COUNT 0x47A6
+#define mmCRTC4_CRTC_STATUS_HV_COUNT 0x47A8
+#define mmCRTC4_CRTC_STATUS_POSITION 0x47A4
+#define mmCRTC4_CRTC_STATUS_VF_COUNT 0x47A7
+#define mmCRTC4_CRTC_STEREO_CONTROL 0x47AE
+#define mmCRTC4_CRTC_STEREO_FORCE_NEXT_EYE 0x479B
+#define mmCRTC4_CRTC_STEREO_STATUS 0x47AD
+#define mmCRTC4_CRTC_TEST_DEBUG_DATA 0x47C7
+#define mmCRTC4_CRTC_TEST_DEBUG_INDEX 0x47C6
+#define mmCRTC4_CRTC_TEST_PATTERN_COLOR 0x47BC
+#define mmCRTC4_CRTC_TEST_PATTERN_CONTROL 0x47BA
+#define mmCRTC4_CRTC_TEST_PATTERN_PARAMETERS 0x47BB
+#define mmCRTC4_CRTC_TRIGA_CNTL 0x4794
+#define mmCRTC4_CRTC_TRIGA_MANUAL_TRIG 0x4795
+#define mmCRTC4_CRTC_TRIGB_CNTL 0x4796
+#define mmCRTC4_CRTC_TRIGB_MANUAL_TRIG 0x4797
+#define mmCRTC4_CRTC_UPDATE_LOCK 0x47B5
+#define mmCRTC4_CRTC_VBI_END 0x4786
+#define mmCRTC4_CRTC_V_BLANK_START_END 0x478D
+#define mmCRTC4_CRTC_VERT_SYNC_CONTROL 0x47AC
+#define mmCRTC4_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x47B7
+#define mmCRTC4_CRTC_V_SYNC_A 0x478E
+#define mmCRTC4_CRTC_V_SYNC_A_CNTL 0x478F
+#define mmCRTC4_CRTC_V_SYNC_B 0x4790
+#define mmCRTC4_CRTC_V_SYNC_B_CNTL 0x4791
+#define mmCRTC4_CRTC_VSYNC_NOM_INT_STATUS 0x478C
+#define mmCRTC4_CRTC_V_TOTAL 0x4787
+#define mmCRTC4_CRTC_V_TOTAL_CONTROL 0x478A
+#define mmCRTC4_CRTC_V_TOTAL_INT_STATUS 0x478B
+#define mmCRTC4_CRTC_V_TOTAL_MAX 0x4789
+#define mmCRTC4_CRTC_V_TOTAL_MIN 0x4788
+#define mmCRTC4_CRTC_V_UPDATE_INT_STATUS 0x47C4
+#define mmCRTC4_DCFE_DBG_SEL 0x477E
+#define mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL 0x477F
+#define mmCRTC4_MASTER_UPDATE_LOCK 0x47BD
+#define mmCRTC4_MASTER_UPDATE_MODE 0x47BE
+#define mmCRTC4_PIXEL_RATE_CNTL 0x0150
+#define mmCRTC5_CRTC_3D_STRUCTURE_CONTROL 0x4A78
+#define mmCRTC5_CRTC_ALLOW_STOP_OFF_V_CNT 0x4AC3
+#define mmCRTC5_CRTC_BLACK_COLOR 0x4AA2
+#define mmCRTC5_CRTC_BLANK_CONTROL 0x4A9D
+#define mmCRTC5_CRTC_BLANK_DATA_COLOR 0x4AA1
+#define mmCRTC5_CRTC_CONTROL 0x4A9C
+#define mmCRTC5_CRTC_COUNT_CONTROL 0x4AA9
+#define mmCRTC5_CRTC_COUNT_RESET 0x4AAA
+#define mmCRTC5_CRTC_DCFE_CLOCK_CONTROL 0x4A7C
+#define mmCRTC5_CRTC_DOUBLE_BUFFER_CONTROL 0x4AB6
+#define mmCRTC5_CRTC_DTMTEST_CNTL 0x4A92
+#define mmCRTC5_CRTC_DTMTEST_STATUS_POSITION 0x4A93
+#define mmCRTC5_CRTC_FLOW_CONTROL 0x4A99
+#define mmCRTC5_CRTC_FORCE_COUNT_NOW_CNTL 0x4A98
+#define mmCRTC5_CRTC_GSL_CONTROL 0x4A7B
+#define mmCRTC5_CRTC_GSL_VSYNC_GAP 0x4A79
+#define mmCRTC5_CRTC_GSL_WINDOW 0x4A7A
+#define mmCRTC5_CRTC_H_BLANK_EARLY_NUM 0x4A7D
+#define mmCRTC5_CRTC_H_BLANK_START_END 0x4A81
+#define mmCRTC5_CRTC_H_SYNC_A 0x4A82
+#define mmCRTC5_CRTC_H_SYNC_A_CNTL 0x4A83
+#define mmCRTC5_CRTC_H_SYNC_B 0x4A84
+#define mmCRTC5_CRTC_H_SYNC_B_CNTL 0x4A85
+#define mmCRTC5_CRTC_H_TOTAL 0x4A80
+#define mmCRTC5_CRTC_INTERLACE_CONTROL 0x4A9E
+#define mmCRTC5_CRTC_INTERLACE_STATUS 0x4A9F
+#define mmCRTC5_CRTC_INTERRUPT_CONTROL 0x4AB4
+#define mmCRTC5_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x4AAB
+#define mmCRTC5_CRTC_MASTER_EN 0x4AC2
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT 0x4ABF
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x4AC0
+#define mmCRTC5_CRTC_MVP_STATUS 0x4AC1
+#define mmCRTC5_CRTC_NOM_VERT_POSITION 0x4AA5
+#define mmCRTC5_CRTC_OVERSCAN_COLOR 0x4AA0
+#define mmCRTC5_CRTC_SNAPSHOT_CONTROL 0x4AB0
+#define mmCRTC5_CRTC_SNAPSHOT_FRAME 0x4AB2
+#define mmCRTC5_CRTC_SNAPSHOT_POSITION 0x4AB1
+#define mmCRTC5_CRTC_SNAPSHOT_STATUS 0x4AAF
+#define mmCRTC5_CRTC_START_LINE_CONTROL 0x4AB3
+#define mmCRTC5_CRTC_STATUS 0x4AA3
+#define mmCRTC5_CRTC_STATUS_FRAME_COUNT 0x4AA6
+#define mmCRTC5_CRTC_STATUS_HV_COUNT 0x4AA8
+#define mmCRTC5_CRTC_STATUS_POSITION 0x4AA4
+#define mmCRTC5_CRTC_STATUS_VF_COUNT 0x4AA7
+#define mmCRTC5_CRTC_STEREO_CONTROL 0x4AAE
+#define mmCRTC5_CRTC_STEREO_FORCE_NEXT_EYE 0x4A9B
+#define mmCRTC5_CRTC_STEREO_STATUS 0x4AAD
+#define mmCRTC5_CRTC_TEST_DEBUG_DATA 0x4AC7
+#define mmCRTC5_CRTC_TEST_DEBUG_INDEX 0x4AC6
+#define mmCRTC5_CRTC_TEST_PATTERN_COLOR 0x4ABC
+#define mmCRTC5_CRTC_TEST_PATTERN_CONTROL 0x4ABA
+#define mmCRTC5_CRTC_TEST_PATTERN_PARAMETERS 0x4ABB
+#define mmCRTC5_CRTC_TRIGA_CNTL 0x4A94
+#define mmCRTC5_CRTC_TRIGA_MANUAL_TRIG 0x4A95
+#define mmCRTC5_CRTC_TRIGB_CNTL 0x4A96
+#define mmCRTC5_CRTC_TRIGB_MANUAL_TRIG 0x4A97
+#define mmCRTC5_CRTC_UPDATE_LOCK 0x4AB5
+#define mmCRTC5_CRTC_VBI_END 0x4A86
+#define mmCRTC5_CRTC_V_BLANK_START_END 0x4A8D
+#define mmCRTC5_CRTC_VERT_SYNC_CONTROL 0x4AAC
+#define mmCRTC5_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x4AB7
+#define mmCRTC5_CRTC_V_SYNC_A 0x4A8E
+#define mmCRTC5_CRTC_V_SYNC_A_CNTL 0x4A8F
+#define mmCRTC5_CRTC_V_SYNC_B 0x4A90
+#define mmCRTC5_CRTC_V_SYNC_B_CNTL 0x4A91
+#define mmCRTC5_CRTC_VSYNC_NOM_INT_STATUS 0x4A8C
+#define mmCRTC5_CRTC_V_TOTAL 0x4A87
+#define mmCRTC5_CRTC_V_TOTAL_CONTROL 0x4A8A
+#define mmCRTC5_CRTC_V_TOTAL_INT_STATUS 0x4A8B
+#define mmCRTC5_CRTC_V_TOTAL_MAX 0x4A89
+#define mmCRTC5_CRTC_V_TOTAL_MIN 0x4A88
+#define mmCRTC5_CRTC_V_UPDATE_INT_STATUS 0x4AC4
+#define mmCRTC5_DCFE_DBG_SEL 0x4A7E
+#define mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL 0x4A7F
+#define mmCRTC5_MASTER_UPDATE_LOCK 0x4ABD
+#define mmCRTC5_MASTER_UPDATE_MODE 0x4ABE
+#define mmCRTC5_PIXEL_RATE_CNTL 0x0154
+#define mmCRTC8_DATA 0x00ED
+#define mmCRTC8_IDX 0x00ED
+#define mmCRTC_ALLOW_STOP_OFF_V_CNT 0x1BC3
+#define mmCRTC_BLACK_COLOR 0x1BA2
+#define mmCRTC_BLANK_CONTROL 0x1B9D
+#define mmCRTC_BLANK_DATA_COLOR 0x1BA1
+#define mmCRTC_CONTROL 0x1B9C
+#define mmCRTC_COUNT_CONTROL 0x1BA9
+#define mmCRTC_COUNT_RESET 0x1BAA
+#define mmCRTC_DCFE_CLOCK_CONTROL 0x1B7C
+#define mmCRTC_DOUBLE_BUFFER_CONTROL 0x1BB6
+#define mmCRTC_DTMTEST_CNTL 0x1B92
+#define mmCRTC_DTMTEST_STATUS_POSITION 0x1B93
+#define mmCRTC_FLOW_CONTROL 0x1B99
+#define mmCRTC_FORCE_COUNT_NOW_CNTL 0x1B98
+#define mmCRTC_GSL_CONTROL 0x1B7B
+#define mmCRTC_GSL_VSYNC_GAP 0x1B79
+#define mmCRTC_GSL_WINDOW 0x1B7A
+#define mmCRTC_H_BLANK_EARLY_NUM 0x1B7D
+#define mmCRTC_H_BLANK_START_END 0x1B81
+#define mmCRTC_H_SYNC_A 0x1B82
+#define mmCRTC_H_SYNC_A_CNTL 0x1B83
+#define mmCRTC_H_SYNC_B 0x1B84
+#define mmCRTC_H_SYNC_B_CNTL 0x1B85
+#define mmCRTC_H_TOTAL 0x1B80
+#define mmCRTC_INTERLACE_CONTROL 0x1B9E
+#define mmCRTC_INTERLACE_STATUS 0x1B9F
+#define mmCRTC_INTERRUPT_CONTROL 0x1BB4
+#define mmCRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1BAB
+#define mmCRTC_MASTER_EN 0x1BC2
+#define mmCRTC_MVP_INBAND_CNTL_INSERT 0x1BBF
+#define mmCRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1BC0
+#define mmCRTC_MVP_STATUS 0x1BC1
+#define mmCRTC_NOM_VERT_POSITION 0x1BA5
+#define mmCRTC_OVERSCAN_COLOR 0x1BA0
+#define mmCRTC_SNAPSHOT_CONTROL 0x1BB0
+#define mmCRTC_SNAPSHOT_FRAME 0x1BB2
+#define mmCRTC_SNAPSHOT_POSITION 0x1BB1
+#define mmCRTC_SNAPSHOT_STATUS 0x1BAF
+#define mmCRTC_START_LINE_CONTROL 0x1BB3
+#define mmCRTC_STATUS 0x1BA3
+#define mmCRTC_STATUS_FRAME_COUNT 0x1BA6
+#define mmCRTC_STATUS_HV_COUNT 0x1BA8
+#define mmCRTC_STATUS_POSITION 0x1BA4
+#define mmCRTC_STATUS_VF_COUNT 0x1BA7
+#define mmCRTC_STEREO_CONTROL 0x1BAE
+#define mmCRTC_STEREO_FORCE_NEXT_EYE 0x1B9B
+#define mmCRTC_STEREO_STATUS 0x1BAD
+#define mmCRTC_TEST_DEBUG_DATA 0x1BC7
+#define mmCRTC_TEST_DEBUG_INDEX 0x1BC6
+#define mmCRTC_TEST_PATTERN_COLOR 0x1BBC
+#define mmCRTC_TEST_PATTERN_CONTROL 0x1BBA
+#define mmCRTC_TEST_PATTERN_PARAMETERS 0x1BBB
+#define mmCRTC_TRIGA_CNTL 0x1B94
+#define mmCRTC_TRIGA_MANUAL_TRIG 0x1B95
+#define mmCRTC_TRIGB_CNTL 0x1B96
+#define mmCRTC_TRIGB_MANUAL_TRIG 0x1B97
+#define mmCRTC_UPDATE_LOCK 0x1BB5
+#define mmCRTC_VBI_END 0x1B86
+#define mmCRTC_V_BLANK_START_END 0x1B8D
+#define mmCRTC_VERT_SYNC_CONTROL 0x1BAC
+#define mmCRTC_VGA_PARAMETER_CAPTURE_MODE 0x1BB7
+#define mmCRTC_V_SYNC_A 0x1B8E
+#define mmCRTC_V_SYNC_A_CNTL 0x1B8F
+#define mmCRTC_V_SYNC_B 0x1B90
+#define mmCRTC_V_SYNC_B_CNTL 0x1B91
+#define mmCRTC_VSYNC_NOM_INT_STATUS 0x1B8C
+#define mmCRTC_V_TOTAL 0x1B87
+#define mmCRTC_V_TOTAL_CONTROL 0x1B8A
+#define mmCRTC_V_TOTAL_INT_STATUS 0x1B8B
+#define mmCRTC_V_TOTAL_MAX 0x1B89
+#define mmCRTC_V_TOTAL_MIN 0x1B88
+#define mmCRTC_V_UPDATE_INT_STATUS 0x1BC4
+#define mmCUR_COLOR1 0x1A6C
+#define mmCUR_COLOR2 0x1A6D
+#define mmCUR_CONTROL 0x1A66
+#define mmCUR_HOT_SPOT 0x1A6B
+#define mmCUR_POSITION 0x1A6A
+#define mmCUR_REQUEST_FILTER_CNTL 0x1A99
+#define mmCUR_SIZE 0x1A68
+#define mmCUR_SURFACE_ADDRESS 0x1A67
+#define mmCUR_SURFACE_ADDRESS_HIGH 0x1A69
+#define mmCUR_UPDATE 0x1A6E
+#define mmD1VGA_CONTROL 0x00CC
+#define mmD2VGA_CONTROL 0x00CE
+#define mmD3VGA_CONTROL 0x00F8
+#define mmD4VGA_CONTROL 0x00F9
+#define mmD5VGA_CONTROL 0x00FA
+#define mmD6VGA_CONTROL 0x00FB
+#define mmDAC_AUTODETECT_CONTROL 0x19EE
+#define mmDAC_AUTODETECT_CONTROL2 0x19EF
+#define mmDAC_AUTODETECT_CONTROL3 0x19F0
+#define mmDAC_AUTODETECT_INT_CONTROL 0x19F2
+#define mmDAC_AUTODETECT_STATUS 0x19F1
+#define mmDAC_CLK_ENABLE 0x0128
+#define mmDAC_COMPARATOR_ENABLE 0x19F7
+#define mmDAC_COMPARATOR_OUTPUT 0x19F8
+#define mmDAC_CONTROL 0x19F6
+#define mmDAC_CRC_CONTROL 0x19E7
+#define mmDAC_CRC_EN 0x19E6
+#define mmDAC_CRC_SIG_CONTROL 0x19EB
+#define mmDAC_CRC_SIG_CONTROL_MASK 0x19E9
+#define mmDAC_CRC_SIG_RGB 0x19EA
+#define mmDAC_CRC_SIG_RGB_MASK 0x19E8
+#define mmDAC_DATA 0x00F2
+#define mmDAC_DFT_CONFIG 0x19FA
+#define mmDAC_ENABLE 0x19E4
+#define mmDAC_FIFO_STATUS 0x19FB
+#define mmDAC_FORCE_DATA 0x19F4
+#define mmDAC_FORCE_OUTPUT_CNTL 0x19F3
+#define mmDAC_MACRO_CNTL_RESERVED0 0x19FC
+#define mmDAC_MACRO_CNTL_RESERVED1 0x19FD
+#define mmDAC_MACRO_CNTL_RESERVED2 0x19FE
+#define mmDAC_MACRO_CNTL_RESERVED3 0x19FF
+#define mmDAC_MASK 0x00F1
+#define mmDAC_POWERDOWN 0x19F5
+#define mmDAC_PWR_CNTL 0x19F9
+#define mmDAC_R_INDEX 0x00F1
+#define mmDAC_SOURCE_SELECT 0x19E5
+#define mmDAC_STEREOSYNC_SELECT 0x19ED
+#define mmDAC_SYNC_TRISTATE_CONTROL 0x19EC
+#define mmDAC_W_INDEX 0x00F2
+#define mmDC_ABM1_ACE_CNTL_MISC 0x1641
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_0 0x163A
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_1 0x163B
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_2 0x163C
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_3 0x163D
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_4 0x163E
+#define mmDC_ABM1_ACE_THRES_12 0x163F
+#define mmDC_ABM1_ACE_THRES_34 0x1640
+#define mmDC_ABM1_BL_MASTER_LOCK 0x169C
+#define mmDC_ABM1_CNTL 0x1638
+#define mmDC_ABM1_DEBUG_MISC 0x1649
+#define mmDC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x1656
+#define mmDC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x1659
+#define mmDC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x1657
+#define mmDC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x165A
+#define mmDC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x1658
+#define mmDC_ABM1_HGLS_REG_READ_PROGRESS 0x164A
+#define mmDC_ABM1_HG_MISC_CTRL 0x164B
+#define mmDC_ABM1_HG_RESULT_10 0x1664
+#define mmDC_ABM1_HG_RESULT_1 0x165B
+#define mmDC_ABM1_HG_RESULT_11 0x1665
+#define mmDC_ABM1_HG_RESULT_12 0x1666
+#define mmDC_ABM1_HG_RESULT_13 0x1667
+#define mmDC_ABM1_HG_RESULT_14 0x1668
+#define mmDC_ABM1_HG_RESULT_15 0x1669
+#define mmDC_ABM1_HG_RESULT_16 0x166A
+#define mmDC_ABM1_HG_RESULT_17 0x166B
+#define mmDC_ABM1_HG_RESULT_18 0x166C
+#define mmDC_ABM1_HG_RESULT_19 0x166D
+#define mmDC_ABM1_HG_RESULT_20 0x166E
+#define mmDC_ABM1_HG_RESULT_2 0x165C
+#define mmDC_ABM1_HG_RESULT_21 0x166F
+#define mmDC_ABM1_HG_RESULT_22 0x1670
+#define mmDC_ABM1_HG_RESULT_23 0x1671
+#define mmDC_ABM1_HG_RESULT_24 0x1672
+#define mmDC_ABM1_HG_RESULT_3 0x165D
+#define mmDC_ABM1_HG_RESULT_4 0x165E
+#define mmDC_ABM1_HG_RESULT_5 0x165F
+#define mmDC_ABM1_HG_RESULT_6 0x1660
+#define mmDC_ABM1_HG_RESULT_7 0x1661
+#define mmDC_ABM1_HG_RESULT_8 0x1662
+#define mmDC_ABM1_HG_RESULT_9 0x1663
+#define mmDC_ABM1_HG_SAMPLE_RATE 0x1654
+#define mmDC_ABM1_IPCSC_COEFF_SEL 0x1639
+#define mmDC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x164E
+#define mmDC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x1653
+#define mmDC_ABM1_LS_MIN_MAX_LUMA 0x164D
+#define mmDC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x1651
+#define mmDC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x1652
+#define mmDC_ABM1_LS_OVR_SCAN_BIN 0x1650
+#define mmDC_ABM1_LS_PIXEL_COUNT 0x164F
+#define mmDC_ABM1_LS_SAMPLE_RATE 0x1655
+#define mmDC_ABM1_LS_SUM_OF_LUMA 0x164C
+#define mmDC_ABM1_OVERSCAN_PIXEL_VALUE 0x169B
+#define mmDCCG_AUDIO_DTO0_MODULE 0x016D
+#define mmDCCG_AUDIO_DTO0_PHASE 0x016C
+#define mmDCCG_AUDIO_DTO1_MODULE 0x0171
+#define mmDCCG_AUDIO_DTO1_PHASE 0x0170
+#define mmDCCG_AUDIO_DTO_SOURCE 0x016B
+#define mmDCCG_CAC_STATUS 0x0137
+#define mmDCCG_GATE_DISABLE_CNTL 0x0134
+#define mmDCCG_GTC_CNTL 0x0120
+#define mmDCCG_GTC_CURRENT 0x0123
+#define mmDCCG_GTC_DTO_MODULO 0x0122
+#define mmDCCG_PERFMON_CNTL 0x0133
+#define mmDCCG_PLL0_PLL_ANALOG 0x1708
+#define mmDCCG_PLL0_PLL_CNTL 0x1707
+#define mmDCCG_PLL0_PLL_DEBUG_CNTL 0x170B
+#define mmDCCG_PLL0_PLL_DISPCLK_CURRENT_DTO_PHASE 0x170F
+#define mmDCCG_PLL0_PLL_DISPCLK_DTO_CNTL 0x170E
+#define mmDCCG_PLL0_PLL_DS_CNTL 0x1705
+#define mmDCCG_PLL0_PLL_FB_DIV 0x1701
+#define mmDCCG_PLL0_PLL_IDCLK_CNTL 0x1706
+#define mmDCCG_PLL0_PLL_POST_DIV 0x1702
+#define mmDCCG_PLL0_PLL_REF_DIV 0x1700
+#define mmDCCG_PLL0_PLL_SS_AMOUNT_DSFRAC 0x1703
+#define mmDCCG_PLL0_PLL_SS_CNTL 0x1704
+#define mmDCCG_PLL0_PLL_UNLOCK_DETECT_CNTL 0x170A
+#define mmDCCG_PLL0_PLL_UPDATE_CNTL 0x170D
+#define mmDCCG_PLL0_PLL_UPDATE_LOCK 0x170C
+#define mmDCCG_PLL0_PLL_VREG_CNTL 0x1709
+#define mmDCCG_PLL1_PLL_ANALOG 0x1718
+#define mmDCCG_PLL1_PLL_CNTL 0x1717
+#define mmDCCG_PLL1_PLL_DEBUG_CNTL 0x171B
+#define mmDCCG_PLL1_PLL_DISPCLK_CURRENT_DTO_PHASE 0x171F
+#define mmDCCG_PLL1_PLL_DISPCLK_DTO_CNTL 0x171E
+#define mmDCCG_PLL1_PLL_DS_CNTL 0x1715
+#define mmDCCG_PLL1_PLL_FB_DIV 0x1711
+#define mmDCCG_PLL1_PLL_IDCLK_CNTL 0x1716
+#define mmDCCG_PLL1_PLL_POST_DIV 0x1712
+#define mmDCCG_PLL1_PLL_REF_DIV 0x1710
+#define mmDCCG_PLL1_PLL_SS_AMOUNT_DSFRAC 0x1713
+#define mmDCCG_PLL1_PLL_SS_CNTL 0x1714
+#define mmDCCG_PLL1_PLL_UNLOCK_DETECT_CNTL 0x171A
+#define mmDCCG_PLL1_PLL_UPDATE_CNTL 0x171D
+#define mmDCCG_PLL1_PLL_UPDATE_LOCK 0x171C
+#define mmDCCG_PLL1_PLL_VREG_CNTL 0x1719
+#define mmDCCG_PLL2_PLL_ANALOG 0x1728
+#define mmDCCG_PLL2_PLL_CNTL 0x1727
+#define mmDCCG_PLL2_PLL_DEBUG_CNTL 0x172B
+#define mmDCCG_PLL2_PLL_DISPCLK_CURRENT_DTO_PHASE 0x172F
+#define mmDCCG_PLL2_PLL_DISPCLK_DTO_CNTL 0x172E
+#define mmDCCG_PLL2_PLL_DS_CNTL 0x1725
+#define mmDCCG_PLL2_PLL_FB_DIV 0x1721
+#define mmDCCG_PLL2_PLL_IDCLK_CNTL 0x1726
+#define mmDCCG_PLL2_PLL_POST_DIV 0x1722
+#define mmDCCG_PLL2_PLL_REF_DIV 0x1720
+#define mmDCCG_PLL2_PLL_SS_AMOUNT_DSFRAC 0x1723
+#define mmDCCG_PLL2_PLL_SS_CNTL 0x1724
+#define mmDCCG_PLL2_PLL_UNLOCK_DETECT_CNTL 0x172A
+#define mmDCCG_PLL2_PLL_UPDATE_CNTL 0x172D
+#define mmDCCG_PLL2_PLL_UPDATE_LOCK 0x172C
+#define mmDCCG_PLL2_PLL_VREG_CNTL 0x1729
+#define mmDCCG_SOFT_RESET 0x015F
+#define mmDCCG_TEST_CLK_SEL 0x017E
+#define mmDCCG_TEST_DEBUG_DATA 0x017D
+#define mmDCCG_TEST_DEBUG_INDEX 0x017C
+#define mmDCCG_VPCLK_CNTL 0x031F
+#define mmDCDEBUG_BUS_CLK1_SEL 0x1860
+#define mmDCDEBUG_BUS_CLK2_SEL 0x1861
+#define mmDCDEBUG_BUS_CLK3_SEL 0x1862
+#define mmDCDEBUG_BUS_CLK4_SEL 0x1863
+#define mmDCDEBUG_OUT_CNTL 0x186B
+#define mmDCDEBUG_OUT_DATA 0x186E
+#define mmDCDEBUG_OUT_PIN_OVERRIDE 0x186A
+#define mmDC_DMCU_SCRATCH 0x1618
+#define mmDC_DVODATA_CONFIG 0x1905
+#define mmDCFE0_SOFT_RESET 0x0158
+#define mmDCFE1_SOFT_RESET 0x0159
+#define mmDCFE2_SOFT_RESET 0x015A
+#define mmDCFE3_SOFT_RESET 0x015B
+#define mmDCFE4_SOFT_RESET 0x015C
+#define mmDCFE5_SOFT_RESET 0x015D
+#define mmDCFE_DBG_SEL 0x1B7E
+#define mmDCFE_MEM_LIGHT_SLEEP_CNTL 0x1B7F
+#define mmDC_GENERICA 0x1900
+#define mmDC_GENERICB 0x1901
+#define mmDC_GPIO_DDC1_A 0x194D
+#define mmDC_GPIO_DDC1_EN 0x194E
+#define mmDC_GPIO_DDC1_MASK 0x194C
+#define mmDC_GPIO_DDC1_Y 0x194F
+#define mmDC_GPIO_DDC2_A 0x1951
+#define mmDC_GPIO_DDC2_EN 0x1952
+#define mmDC_GPIO_DDC2_MASK 0x1950
+#define mmDC_GPIO_DDC2_Y 0x1953
+#define mmDC_GPIO_DDC3_A 0x1955
+#define mmDC_GPIO_DDC3_EN 0x1956
+#define mmDC_GPIO_DDC3_MASK 0x1954
+#define mmDC_GPIO_DDC3_Y 0x1957
+#define mmDC_GPIO_DDC4_A 0x1959
+#define mmDC_GPIO_DDC4_EN 0x195A
+#define mmDC_GPIO_DDC4_MASK 0x1958
+#define mmDC_GPIO_DDC4_Y 0x195B
+#define mmDC_GPIO_DDC5_A 0x195D
+#define mmDC_GPIO_DDC5_EN 0x195E
+#define mmDC_GPIO_DDC5_MASK 0x195C
+#define mmDC_GPIO_DDC5_Y 0x195F
+#define mmDC_GPIO_DDC6_A 0x1961
+#define mmDC_GPIO_DDC6_EN 0x1962
+#define mmDC_GPIO_DDC6_MASK 0x1960
+#define mmDC_GPIO_DDC6_Y 0x1963
+#define mmDC_GPIO_DDCVGA_A 0x1971
+#define mmDC_GPIO_DDCVGA_EN 0x1972
+#define mmDC_GPIO_DDCVGA_MASK 0x1970
+#define mmDC_GPIO_DDCVGA_Y 0x1973
+#define mmDC_GPIO_DEBUG 0x1904
+#define mmDC_GPIO_DVODATA_A 0x1949
+#define mmDC_GPIO_DVODATA_EN 0x194A
+#define mmDC_GPIO_DVODATA_MASK 0x1948
+#define mmDC_GPIO_DVODATA_Y 0x194B
+#define mmDC_GPIO_GENERIC_A 0x1945
+#define mmDC_GPIO_GENERIC_EN 0x1946
+#define mmDC_GPIO_GENERIC_MASK 0x1944
+#define mmDC_GPIO_GENERIC_Y 0x1947
+#define mmDC_GPIO_GENLK_A 0x1969
+#define mmDC_GPIO_GENLK_EN 0x196A
+#define mmDC_GPIO_GENLK_MASK 0x1968
+#define mmDC_GPIO_GENLK_Y 0x196B
+#define mmDC_GPIO_HPD_A 0x196D
+#define mmDC_GPIO_HPD_EN 0x196E
+#define mmDC_GPIO_HPD_MASK 0x196C
+#define mmDC_GPIO_HPD_Y 0x196F
+#define mmDC_GPIO_I2CPAD_A 0x1975
+#define mmDC_GPIO_I2CPAD_EN 0x1976
+#define mmDC_GPIO_I2CPAD_MASK 0x1974
+#define mmDC_GPIO_I2CPAD_STRENGTH 0x197A
+#define mmDC_GPIO_I2CPAD_Y 0x1977
+#define mmDC_GPIO_PAD_STRENGTH_1 0x1978
+#define mmDC_GPIO_PAD_STRENGTH_2 0x1979
+#define mmDC_GPIO_PWRSEQ_A 0x1941
+#define mmDC_GPIO_PWRSEQ_EN 0x1942
+#define mmDC_GPIO_PWRSEQ_MASK 0x1940
+#define mmDC_GPIO_PWRSEQ_Y 0x1943
+#define mmDC_GPIO_SYNCA_A 0x1965
+#define mmDC_GPIO_SYNCA_EN 0x1966
+#define mmDC_GPIO_SYNCA_MASK 0x1964
+#define mmDC_GPIO_SYNCA_Y 0x1967
+#define mmDC_GPU_TIMER_READ 0x1929
+#define mmDC_GPU_TIMER_READ_CNTL 0x192A
+#define mmDC_GPU_TIMER_START_POSITION_P_FLIP 0x1928
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE 0x1927
+#define mmDC_HPD1_CONTROL 0x1809
+#define mmDC_HPD1_FAST_TRAIN_CNTL 0x1864
+#define mmDC_HPD1_INT_CONTROL 0x1808
+#define mmDC_HPD1_INT_STATUS 0x1807
+#define mmDC_HPD1_TOGGLE_FILT_CNTL 0x18BC
+#define mmDC_HPD2_CONTROL 0x180C
+#define mmDC_HPD2_FAST_TRAIN_CNTL 0x1865
+#define mmDC_HPD2_INT_CONTROL 0x180B
+#define mmDC_HPD2_INT_STATUS 0x180A
+#define mmDC_HPD2_TOGGLE_FILT_CNTL 0x18BD
+#define mmDC_HPD3_CONTROL 0x180F
+#define mmDC_HPD3_FAST_TRAIN_CNTL 0x1866
+#define mmDC_HPD3_INT_CONTROL 0x180E
+#define mmDC_HPD3_INT_STATUS 0x180D
+#define mmDC_HPD3_TOGGLE_FILT_CNTL 0x18BE
+#define mmDC_HPD4_CONTROL 0x1812
+#define mmDC_HPD4_FAST_TRAIN_CNTL 0x1867
+#define mmDC_HPD4_INT_CONTROL 0x1811
+#define mmDC_HPD4_INT_STATUS 0x1810
+#define mmDC_HPD4_TOGGLE_FILT_CNTL 0x18FC
+#define mmDC_HPD5_CONTROL 0x1815
+#define mmDC_HPD5_FAST_TRAIN_CNTL 0x1868
+#define mmDC_HPD5_INT_CONTROL 0x1814
+#define mmDC_HPD5_INT_STATUS 0x1813
+#define mmDC_HPD5_TOGGLE_FILT_CNTL 0x18FD
+#define mmDC_HPD6_CONTROL 0x1818
+#define mmDC_HPD6_FAST_TRAIN_CNTL 0x1869
+#define mmDC_HPD6_INT_CONTROL 0x1817
+#define mmDC_HPD6_INT_STATUS 0x1816
+#define mmDC_HPD6_TOGGLE_FILT_CNTL 0x18FE
+#define mmDC_I2C_ARBITRATION 0x181A
+#define mmDC_I2C_CONTROL 0x1819
+#define mmDC_I2C_DATA 0x1833
+#define mmDC_I2C_DDC1_HW_STATUS 0x181D
+#define mmDC_I2C_DDC1_SETUP 0x1824
+#define mmDC_I2C_DDC1_SPEED 0x1823
+#define mmDC_I2C_DDC2_HW_STATUS 0x181E
+#define mmDC_I2C_DDC2_SETUP 0x1826
+#define mmDC_I2C_DDC2_SPEED 0x1825
+#define mmDC_I2C_DDC3_HW_STATUS 0x181F
+#define mmDC_I2C_DDC3_SETUP 0x1828
+#define mmDC_I2C_DDC3_SPEED 0x1827
+#define mmDC_I2C_DDC4_HW_STATUS 0x1820
+#define mmDC_I2C_DDC4_SETUP 0x182A
+#define mmDC_I2C_DDC4_SPEED 0x1829
+#define mmDC_I2C_DDC5_HW_STATUS 0x1821
+#define mmDC_I2C_DDC5_SETUP 0x182C
+#define mmDC_I2C_DDC5_SPEED 0x182B
+#define mmDC_I2C_DDC6_HW_STATUS 0x1822
+#define mmDC_I2C_DDC6_SETUP 0x182E
+#define mmDC_I2C_DDC6_SPEED 0x182D
+#define mmDC_I2C_DDCVGA_HW_STATUS 0x1855
+#define mmDC_I2C_DDCVGA_SETUP 0x1857
+#define mmDC_I2C_DDCVGA_SPEED 0x1856
+#define mmDC_I2C_EDID_DETECT_CTRL 0x186F
+#define mmDC_I2C_INTERRUPT_CONTROL 0x181B
+#define mmDC_I2C_SW_STATUS 0x181C
+#define mmDC_I2C_TRANSACTION0 0x182F
+#define mmDC_I2C_TRANSACTION1 0x1830
+#define mmDC_I2C_TRANSACTION2 0x1831
+#define mmDC_I2C_TRANSACTION3 0x1832
+#define mmDCI_CLK_CNTL 0x031E
+#define mmDCI_CLK_RAMP_CNTL 0x0324
+#define mmDCI_DEBUG_CONFIG 0x0323
+#define mmDCI_MEM_PWR_CNTL 0x0326
+#define mmDCI_MEM_PWR_STATE 0x031B
+#define mmDCI_MEM_PWR_STATE2 0x0322
+#define mmDCIO_DEBUG 0x192E
+#define mmDCIO_GSL0_CNTL 0x1924
+#define mmDCIO_GSL1_CNTL 0x1925
+#define mmDCIO_GSL2_CNTL 0x1926
+#define mmDCIO_GSL_GENLK_PAD_CNTL 0x1922
+#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL 0x1923
+#define mmDCIO_IMPCAL_CNTL_AB 0x190D
+#define mmDCIO_IMPCAL_CNTL_CD 0x1911
+#define mmDCIO_IMPCAL_CNTL_EF 0x1915
+#define mmDCIO_TEST_DEBUG_DATA 0x1930
+#define mmDCIO_TEST_DEBUG_INDEX 0x192F
+#define mmDCIO_UNIPHY0_UNIPHY_ANG_BIST_CNTL 0x198C
+#define mmDCIO_UNIPHY0_UNIPHY_CHANNEL_XBAR_CNTL 0x198E
+#define mmDCIO_UNIPHY0_UNIPHY_DATA_SYNCHRONIZATION 0x198A
+#define mmDCIO_UNIPHY0_UNIPHY_LINK_CNTL 0x198D
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_CONTROL1 0x1986
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_CONTROL2 0x1987
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_FBDIV 0x1985
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_SS_CNTL 0x1989
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_SS_STEP_SIZE 0x1988
+#define mmDCIO_UNIPHY0_UNIPHY_POWER_CONTROL 0x1984
+#define mmDCIO_UNIPHY0_UNIPHY_REG_TEST_OUTPUT 0x198B
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL1 0x1980
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL2 0x1981
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL3 0x1982
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL4 0x1983
+#define mmDCIO_UNIPHY1_UNIPHY_ANG_BIST_CNTL 0x199C
+#define mmDCIO_UNIPHY1_UNIPHY_CHANNEL_XBAR_CNTL 0x199E
+#define mmDCIO_UNIPHY1_UNIPHY_DATA_SYNCHRONIZATION 0x199A
+#define mmDCIO_UNIPHY1_UNIPHY_LINK_CNTL 0x199D
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_CONTROL1 0x1996
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_CONTROL2 0x1997
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_FBDIV 0x1995
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_SS_CNTL 0x1999
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_SS_STEP_SIZE 0x1998
+#define mmDCIO_UNIPHY1_UNIPHY_POWER_CONTROL 0x1994
+#define mmDCIO_UNIPHY1_UNIPHY_REG_TEST_OUTPUT 0x199B
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL1 0x1990
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL2 0x1991
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL3 0x1992
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL4 0x1993
+#define mmDCIO_UNIPHY2_UNIPHY_ANG_BIST_CNTL 0x19AC
+#define mmDCIO_UNIPHY2_UNIPHY_CHANNEL_XBAR_CNTL 0x19AE
+#define mmDCIO_UNIPHY2_UNIPHY_DATA_SYNCHRONIZATION 0x19AA
+#define mmDCIO_UNIPHY2_UNIPHY_LINK_CNTL 0x19AD
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_CONTROL1 0x19A6
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_CONTROL2 0x19A7
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_FBDIV 0x19A5
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_SS_CNTL 0x19A9
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_SS_STEP_SIZE 0x19A8
+#define mmDCIO_UNIPHY2_UNIPHY_POWER_CONTROL 0x19A4
+#define mmDCIO_UNIPHY2_UNIPHY_REG_TEST_OUTPUT 0x19AB
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL1 0x19A0
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL2 0x19A1
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL3 0x19A2
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL4 0x19A3
+#define mmDCIO_UNIPHY3_UNIPHY_ANG_BIST_CNTL 0x19BC
+#define mmDCIO_UNIPHY3_UNIPHY_CHANNEL_XBAR_CNTL 0x19BE
+#define mmDCIO_UNIPHY3_UNIPHY_DATA_SYNCHRONIZATION 0x19BA
+#define mmDCIO_UNIPHY3_UNIPHY_LINK_CNTL 0x19BD
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_CONTROL1 0x19B6
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_CONTROL2 0x19B7
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_FBDIV 0x19B5
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_SS_CNTL 0x19B9
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_SS_STEP_SIZE 0x19B8
+#define mmDCIO_UNIPHY3_UNIPHY_POWER_CONTROL 0x19B4
+#define mmDCIO_UNIPHY3_UNIPHY_REG_TEST_OUTPUT 0x19BB
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL1 0x19B0
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL2 0x19B1
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL3 0x19B2
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL4 0x19B3
+#define mmDCIO_UNIPHY4_UNIPHY_ANG_BIST_CNTL 0x19CC
+#define mmDCIO_UNIPHY4_UNIPHY_CHANNEL_XBAR_CNTL 0x19CE
+#define mmDCIO_UNIPHY4_UNIPHY_DATA_SYNCHRONIZATION 0x19CA
+#define mmDCIO_UNIPHY4_UNIPHY_LINK_CNTL 0x19CD
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_CONTROL1 0x19C6
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_CONTROL2 0x19C7
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_FBDIV 0x19C5
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_SS_CNTL 0x19C9
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_SS_STEP_SIZE 0x19C8
+#define mmDCIO_UNIPHY4_UNIPHY_POWER_CONTROL 0x19C4
+#define mmDCIO_UNIPHY4_UNIPHY_REG_TEST_OUTPUT 0x19CB
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL1 0x19C0
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL2 0x19C1
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL3 0x19C2
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL4 0x19C3
+#define mmDCIO_UNIPHY5_UNIPHY_ANG_BIST_CNTL 0x19DC
+#define mmDCIO_UNIPHY5_UNIPHY_CHANNEL_XBAR_CNTL 0x19DE
+#define mmDCIO_UNIPHY5_UNIPHY_DATA_SYNCHRONIZATION 0x19DA
+#define mmDCIO_UNIPHY5_UNIPHY_LINK_CNTL 0x19DD
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_CONTROL1 0x19D6
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_CONTROL2 0x19D7
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_FBDIV 0x19D5
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_SS_CNTL 0x19D9
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_SS_STEP_SIZE 0x19D8
+#define mmDCIO_UNIPHY5_UNIPHY_POWER_CONTROL 0x19D4
+#define mmDCIO_UNIPHY5_UNIPHY_REG_TEST_OUTPUT 0x19DB
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL1 0x19D0
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL2 0x19D1
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL3 0x19D2
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL4 0x19D3
+#define mmDCI_SOFT_RESET 0x015E
+#define mmDCI_TEST_DEBUG_DATA 0x0321
+#define mmDCI_TEST_DEBUG_INDEX 0x0320
+#define mmDC_LUT_30_COLOR 0x1A7C
+#define mmDC_LUT_AUTOFILL 0x1A7F
+#define mmDC_LUT_BLACK_OFFSET_BLUE 0x1A81
+#define mmDC_LUT_BLACK_OFFSET_GREEN 0x1A82
+#define mmDC_LUT_BLACK_OFFSET_RED 0x1A83
+#define mmDC_LUT_CONTROL 0x1A80
+#define mmDC_LUT_PWL_DATA 0x1A7B
+#define mmDC_LUT_RW_INDEX 0x1A79
+#define mmDC_LUT_RW_MODE 0x1A78
+#define mmDC_LUT_SEQ_COLOR 0x1A7A
+#define mmDC_LUT_VGA_ACCESS_ENABLE 0x1A7D
+#define mmDC_LUT_WHITE_OFFSET_BLUE 0x1A84
+#define mmDC_LUT_WHITE_OFFSET_GREEN 0x1A85
+#define mmDC_LUT_WHITE_OFFSET_RED 0x1A86
+#define mmDC_LUT_WRITE_EN_MASK 0x1A7E
+#define mmDC_MVP_LB_CONTROL 0x1ADB
+#define mmDCO_CLK_CNTL 0x192B
+#define mmDCO_CLK_RAMP_CNTL 0x192C
+#define mmDCO_LIGHT_SLEEP_DIS 0x1907
+#define mmDCO_MEM_POWER_STATE 0x1906
+#define mmDCO_SOFT_RESET 0x0167
+#define mmDCP0_COMM_MATRIXA_TRANS_C11_C12 0x1A43
+#define mmDCP0_COMM_MATRIXA_TRANS_C13_C14 0x1A44
+#define mmDCP0_COMM_MATRIXA_TRANS_C21_C22 0x1A45
+#define mmDCP0_COMM_MATRIXA_TRANS_C23_C24 0x1A46
+#define mmDCP0_COMM_MATRIXA_TRANS_C31_C32 0x1A47
+#define mmDCP0_COMM_MATRIXA_TRANS_C33_C34 0x1A48
+#define mmDCP0_COMM_MATRIXB_TRANS_C11_C12 0x1A49
+#define mmDCP0_COMM_MATRIXB_TRANS_C13_C14 0x1A4A
+#define mmDCP0_COMM_MATRIXB_TRANS_C21_C22 0x1A4B
+#define mmDCP0_COMM_MATRIXB_TRANS_C23_C24 0x1A4C
+#define mmDCP0_COMM_MATRIXB_TRANS_C31_C32 0x1A4D
+#define mmDCP0_COMM_MATRIXB_TRANS_C33_C34 0x1A4E
+#define mmDCP0_CUR_COLOR1 0x1A6C
+#define mmDCP0_CUR_COLOR2 0x1A6D
+#define mmDCP0_CUR_CONTROL 0x1A66
+#define mmDCP0_CUR_HOT_SPOT 0x1A6B
+#define mmDCP0_CUR_POSITION 0x1A6A
+#define mmDCP0_CUR_REQUEST_FILTER_CNTL 0x1A99
+#define mmDCP0_CUR_SIZE 0x1A68
+#define mmDCP0_CUR_SURFACE_ADDRESS 0x1A67
+#define mmDCP0_CUR_SURFACE_ADDRESS_HIGH 0x1A69
+#define mmDCP0_CUR_UPDATE 0x1A6E
+#define mmDCP0_DC_LUT_30_COLOR 0x1A7C
+#define mmDCP0_DC_LUT_AUTOFILL 0x1A7F
+#define mmDCP0_DC_LUT_BLACK_OFFSET_BLUE 0x1A81
+#define mmDCP0_DC_LUT_BLACK_OFFSET_GREEN 0x1A82
+#define mmDCP0_DC_LUT_BLACK_OFFSET_RED 0x1A83
+#define mmDCP0_DC_LUT_CONTROL 0x1A80
+#define mmDCP0_DC_LUT_PWL_DATA 0x1A7B
+#define mmDCP0_DC_LUT_RW_INDEX 0x1A79
+#define mmDCP0_DC_LUT_RW_MODE 0x1A78
+#define mmDCP0_DC_LUT_SEQ_COLOR 0x1A7A
+#define mmDCP0_DC_LUT_VGA_ACCESS_ENABLE 0x1A7D
+#define mmDCP0_DC_LUT_WHITE_OFFSET_BLUE 0x1A84
+#define mmDCP0_DC_LUT_WHITE_OFFSET_GREEN 0x1A85
+#define mmDCP0_DC_LUT_WHITE_OFFSET_RED 0x1A86
+#define mmDCP0_DC_LUT_WRITE_EN_MASK 0x1A7E
+#define mmDCP0_DCP_CRC_CONTROL 0x1A87
+#define mmDCP0_DCP_CRC_CURRENT 0x1A89
+#define mmDCP0_DCP_CRC_LAST 0x1A8B
+#define mmDCP0_DCP_CRC_MASK 0x1A88
+#define mmDCP0_DCP_DEBUG 0x1A8D
+#define mmDCP0_DCP_DEBUG2 0x1A98
+#define mmDCP0_DCP_FP_CONVERTED_FIELD 0x1A65
+#define mmDCP0_DCP_GSL_CONTROL 0x1A90
+#define mmDCP0_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1A91
+#define mmDCP0_DCP_RANDOM_SEEDS 0x1A61
+#define mmDCP0_DCP_SPATIAL_DITHER_CNTL 0x1A60
+#define mmDCP0_DCP_TEST_DEBUG_DATA 0x1A96
+#define mmDCP0_DCP_TEST_DEBUG_INDEX 0x1A95
+#define mmDCP0_DEGAMMA_CONTROL 0x1A58
+#define mmDCP0_DENORM_CONTROL 0x1A50
+#define mmDCP0_GAMUT_REMAP_C11_C12 0x1A5A
+#define mmDCP0_GAMUT_REMAP_C13_C14 0x1A5B
+#define mmDCP0_GAMUT_REMAP_C21_C22 0x1A5C
+#define mmDCP0_GAMUT_REMAP_C23_C24 0x1A5D
+#define mmDCP0_GAMUT_REMAP_C31_C32 0x1A5E
+#define mmDCP0_GAMUT_REMAP_C33_C34 0x1A5F
+#define mmDCP0_GAMUT_REMAP_CONTROL 0x1A59
+#define mmDCP0_GRPH_COMPRESS_PITCH 0x1A1A
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS 0x1A19
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1A1B
+#define mmDCP0_GRPH_CONTROL 0x1A01
+#define mmDCP0_GRPH_DFQ_CONTROL 0x1A14
+#define mmDCP0_GRPH_DFQ_STATUS 0x1A15
+#define mmDCP0_GRPH_ENABLE 0x1A00
+#define mmDCP0_GRPH_FLIP_CONTROL 0x1A12
+#define mmDCP0_GRPH_INTERRUPT_CONTROL 0x1A17
+#define mmDCP0_GRPH_INTERRUPT_STATUS 0x1A16
+#define mmDCP0_GRPH_LUT_10BIT_BYPASS 0x1A02
+#define mmDCP0_GRPH_PITCH 0x1A06
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS 0x1A04
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1A07
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS 0x1A05
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A08
+#define mmDCP0_GRPH_STEREOSYNC_FLIP 0x1A97
+#define mmDCP0_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1A18
+#define mmDCP0_GRPH_SURFACE_ADDRESS_INUSE 0x1A13
+#define mmDCP0_GRPH_SURFACE_OFFSET_X 0x1A09
+#define mmDCP0_GRPH_SURFACE_OFFSET_Y 0x1A0A
+#define mmDCP0_GRPH_SWAP_CNTL 0x1A03
+#define mmDCP0_GRPH_UPDATE 0x1A11
+#define mmDCP0_GRPH_X_END 0x1A0D
+#define mmDCP0_GRPH_X_START 0x1A0B
+#define mmDCP0_GRPH_Y_END 0x1A0E
+#define mmDCP0_GRPH_Y_START 0x1A0C
+#define mmDCP0_INPUT_CSC_C11_C12 0x1A36
+#define mmDCP0_INPUT_CSC_C13_C14 0x1A37
+#define mmDCP0_INPUT_CSC_C21_C22 0x1A38
+#define mmDCP0_INPUT_CSC_C23_C24 0x1A39
+#define mmDCP0_INPUT_CSC_C31_C32 0x1A3A
+#define mmDCP0_INPUT_CSC_C33_C34 0x1A3B
+#define mmDCP0_INPUT_CSC_CONTROL 0x1A35
+#define mmDCP0_INPUT_GAMMA_CONTROL 0x1A10
+#define mmDCP0_KEY_CONTROL 0x1A53
+#define mmDCP0_KEY_RANGE_ALPHA 0x1A54
+#define mmDCP0_KEY_RANGE_BLUE 0x1A57
+#define mmDCP0_KEY_RANGE_GREEN 0x1A56
+#define mmDCP0_KEY_RANGE_RED 0x1A55
+#define mmDCP0_OUTPUT_CSC_C11_C12 0x1A3D
+#define mmDCP0_OUTPUT_CSC_C13_C14 0x1A3E
+#define mmDCP0_OUTPUT_CSC_C21_C22 0x1A3F
+#define mmDCP0_OUTPUT_CSC_C23_C24 0x1A40
+#define mmDCP0_OUTPUT_CSC_C31_C32 0x1A41
+#define mmDCP0_OUTPUT_CSC_C33_C34 0x1A42
+#define mmDCP0_OUTPUT_CSC_CONTROL 0x1A3C
+#define mmDCP0_OUT_ROUND_CONTROL 0x1A51
+#define mmDCP0_OVL_CONTROL1 0x1A1D
+#define mmDCP0_OVL_CONTROL2 0x1A1E
+#define mmDCP0_OVL_DFQ_CONTROL 0x1A29
+#define mmDCP0_OVL_DFQ_STATUS 0x1A2A
+#define mmDCP0_OVL_ENABLE 0x1A1C
+#define mmDCP0_OVL_END 0x1A26
+#define mmDCP0_OVL_PITCH 0x1A21
+#define mmDCP0_OVLSCL_EDGE_PIXEL_CNTL 0x1A2C
+#define mmDCP0_OVL_SECONDARY_SURFACE_ADDRESS 0x1A92
+#define mmDCP0_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A94
+#define mmDCP0_OVL_START 0x1A25
+#define mmDCP0_OVL_STEREOSYNC_FLIP 0x1A93
+#define mmDCP0_OVL_SURFACE_ADDRESS 0x1A20
+#define mmDCP0_OVL_SURFACE_ADDRESS_HIGH 0x1A22
+#define mmDCP0_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x1A2B
+#define mmDCP0_OVL_SURFACE_ADDRESS_INUSE 0x1A28
+#define mmDCP0_OVL_SURFACE_OFFSET_X 0x1A23
+#define mmDCP0_OVL_SURFACE_OFFSET_Y 0x1A24
+#define mmDCP0_OVL_SWAP_CNTL 0x1A1F
+#define mmDCP0_OVL_UPDATE 0x1A27
+#define mmDCP0_PRESCALE_GRPH_CONTROL 0x1A2D
+#define mmDCP0_PRESCALE_OVL_CONTROL 0x1A31
+#define mmDCP0_PRESCALE_VALUES_GRPH_B 0x1A30
+#define mmDCP0_PRESCALE_VALUES_GRPH_G 0x1A2F
+#define mmDCP0_PRESCALE_VALUES_GRPH_R 0x1A2E
+#define mmDCP0_PRESCALE_VALUES_OVL_CB 0x1A32
+#define mmDCP0_PRESCALE_VALUES_OVL_CR 0x1A34
+#define mmDCP0_PRESCALE_VALUES_OVL_Y 0x1A33
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL1 0x1AA6
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL2 0x1AA7
+#define mmDCP0_REGAMMA_CNTLA_REGION_0_1 0x1AA8
+#define mmDCP0_REGAMMA_CNTLA_REGION_10_11 0x1AAD
+#define mmDCP0_REGAMMA_CNTLA_REGION_12_13 0x1AAE
+#define mmDCP0_REGAMMA_CNTLA_REGION_14_15 0x1AAF
+#define mmDCP0_REGAMMA_CNTLA_REGION_2_3 0x1AA9
+#define mmDCP0_REGAMMA_CNTLA_REGION_4_5 0x1AAA
+#define mmDCP0_REGAMMA_CNTLA_REGION_6_7 0x1AAB
+#define mmDCP0_REGAMMA_CNTLA_REGION_8_9 0x1AAC
+#define mmDCP0_REGAMMA_CNTLA_SLOPE_CNTL 0x1AA5
+#define mmDCP0_REGAMMA_CNTLA_START_CNTL 0x1AA4
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL1 0x1AB2
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL2 0x1AB3
+#define mmDCP0_REGAMMA_CNTLB_REGION_0_1 0x1AB4
+#define mmDCP0_REGAMMA_CNTLB_REGION_10_11 0x1AB9
+#define mmDCP0_REGAMMA_CNTLB_REGION_12_13 0x1ABA
+#define mmDCP0_REGAMMA_CNTLB_REGION_14_15 0x1ABB
+#define mmDCP0_REGAMMA_CNTLB_REGION_2_3 0x1AB5
+#define mmDCP0_REGAMMA_CNTLB_REGION_4_5 0x1AB6
+#define mmDCP0_REGAMMA_CNTLB_REGION_6_7 0x1AB7
+#define mmDCP0_REGAMMA_CNTLB_REGION_8_9 0x1AB8
+#define mmDCP0_REGAMMA_CNTLB_SLOPE_CNTL 0x1AB1
+#define mmDCP0_REGAMMA_CNTLB_START_CNTL 0x1AB0
+#define mmDCP0_REGAMMA_CONTROL 0x1AA0
+#define mmDCP0_REGAMMA_LUT_DATA 0x1AA2
+#define mmDCP0_REGAMMA_LUT_INDEX 0x1AA1
+#define mmDCP0_REGAMMA_LUT_WRITE_EN_MASK 0x1AA3
+#define mmDCP1_COMM_MATRIXA_TRANS_C11_C12 0x1D43
+#define mmDCP1_COMM_MATRIXA_TRANS_C13_C14 0x1D44
+#define mmDCP1_COMM_MATRIXA_TRANS_C21_C22 0x1D45
+#define mmDCP1_COMM_MATRIXA_TRANS_C23_C24 0x1D46
+#define mmDCP1_COMM_MATRIXA_TRANS_C31_C32 0x1D47
+#define mmDCP1_COMM_MATRIXA_TRANS_C33_C34 0x1D48
+#define mmDCP1_COMM_MATRIXB_TRANS_C11_C12 0x1D49
+#define mmDCP1_COMM_MATRIXB_TRANS_C13_C14 0x1D4A
+#define mmDCP1_COMM_MATRIXB_TRANS_C21_C22 0x1D4B
+#define mmDCP1_COMM_MATRIXB_TRANS_C23_C24 0x1D4C
+#define mmDCP1_COMM_MATRIXB_TRANS_C31_C32 0x1D4D
+#define mmDCP1_COMM_MATRIXB_TRANS_C33_C34 0x1D4E
+#define mmDCP1_CUR_COLOR1 0x1D6C
+#define mmDCP1_CUR_COLOR2 0x1D6D
+#define mmDCP1_CUR_CONTROL 0x1D66
+#define mmDCP1_CUR_HOT_SPOT 0x1D6B
+#define mmDCP1_CUR_POSITION 0x1D6A
+#define mmDCP1_CUR_REQUEST_FILTER_CNTL 0x1D99
+#define mmDCP1_CUR_SIZE 0x1D68
+#define mmDCP1_CUR_SURFACE_ADDRESS 0x1D67
+#define mmDCP1_CUR_SURFACE_ADDRESS_HIGH 0x1D69
+#define mmDCP1_CUR_UPDATE 0x1D6E
+#define mmDCP1_DC_LUT_30_COLOR 0x1D7C
+#define mmDCP1_DC_LUT_AUTOFILL 0x1D7F
+#define mmDCP1_DC_LUT_BLACK_OFFSET_BLUE 0x1D81
+#define mmDCP1_DC_LUT_BLACK_OFFSET_GREEN 0x1D82
+#define mmDCP1_DC_LUT_BLACK_OFFSET_RED 0x1D83
+#define mmDCP1_DC_LUT_CONTROL 0x1D80
+#define mmDCP1_DC_LUT_PWL_DATA 0x1D7B
+#define mmDCP1_DC_LUT_RW_INDEX 0x1D79
+#define mmDCP1_DC_LUT_RW_MODE 0x1D78
+#define mmDCP1_DC_LUT_SEQ_COLOR 0x1D7A
+#define mmDCP1_DC_LUT_VGA_ACCESS_ENABLE 0x1D7D
+#define mmDCP1_DC_LUT_WHITE_OFFSET_BLUE 0x1D84
+#define mmDCP1_DC_LUT_WHITE_OFFSET_GREEN 0x1D85
+#define mmDCP1_DC_LUT_WHITE_OFFSET_RED 0x1D86
+#define mmDCP1_DC_LUT_WRITE_EN_MASK 0x1D7E
+#define mmDCP1_DCP_CRC_CONTROL 0x1D87
+#define mmDCP1_DCP_CRC_CURRENT 0x1D89
+#define mmDCP1_DCP_CRC_LAST 0x1D8B
+#define mmDCP1_DCP_CRC_MASK 0x1D88
+#define mmDCP1_DCP_DEBUG 0x1D8D
+#define mmDCP1_DCP_DEBUG2 0x1D98
+#define mmDCP1_DCP_FP_CONVERTED_FIELD 0x1D65
+#define mmDCP1_DCP_GSL_CONTROL 0x1D90
+#define mmDCP1_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1D91
+#define mmDCP1_DCP_RANDOM_SEEDS 0x1D61
+#define mmDCP1_DCP_SPATIAL_DITHER_CNTL 0x1D60
+#define mmDCP1_DCP_TEST_DEBUG_DATA 0x1D96
+#define mmDCP1_DCP_TEST_DEBUG_INDEX 0x1D95
+#define mmDCP1_DEGAMMA_CONTROL 0x1D58
+#define mmDCP1_DENORM_CONTROL 0x1D50
+#define mmDCP1_GAMUT_REMAP_C11_C12 0x1D5A
+#define mmDCP1_GAMUT_REMAP_C13_C14 0x1D5B
+#define mmDCP1_GAMUT_REMAP_C21_C22 0x1D5C
+#define mmDCP1_GAMUT_REMAP_C23_C24 0x1D5D
+#define mmDCP1_GAMUT_REMAP_C31_C32 0x1D5E
+#define mmDCP1_GAMUT_REMAP_C33_C34 0x1D5F
+#define mmDCP1_GAMUT_REMAP_CONTROL 0x1D59
+#define mmDCP1_GRPH_COMPRESS_PITCH 0x1D1A
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS 0x1D19
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1D1B
+#define mmDCP1_GRPH_CONTROL 0x1D01
+#define mmDCP1_GRPH_DFQ_CONTROL 0x1D14
+#define mmDCP1_GRPH_DFQ_STATUS 0x1D15
+#define mmDCP1_GRPH_ENABLE 0x1D00
+#define mmDCP1_GRPH_FLIP_CONTROL 0x1D12
+#define mmDCP1_GRPH_INTERRUPT_CONTROL 0x1D17
+#define mmDCP1_GRPH_INTERRUPT_STATUS 0x1D16
+#define mmDCP1_GRPH_LUT_10BIT_BYPASS 0x1D02
+#define mmDCP1_GRPH_PITCH 0x1D06
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS 0x1D04
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1D07
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS 0x1D05
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1D08
+#define mmDCP1_GRPH_STEREOSYNC_FLIP 0x1D97
+#define mmDCP1_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1D18
+#define mmDCP1_GRPH_SURFACE_ADDRESS_INUSE 0x1D13
+#define mmDCP1_GRPH_SURFACE_OFFSET_X 0x1D09
+#define mmDCP1_GRPH_SURFACE_OFFSET_Y 0x1D0A
+#define mmDCP1_GRPH_SWAP_CNTL 0x1D03
+#define mmDCP1_GRPH_UPDATE 0x1D11
+#define mmDCP1_GRPH_X_END 0x1D0D
+#define mmDCP1_GRPH_X_START 0x1D0B
+#define mmDCP1_GRPH_Y_END 0x1D0E
+#define mmDCP1_GRPH_Y_START 0x1D0C
+#define mmDCP1_INPUT_CSC_C11_C12 0x1D36
+#define mmDCP1_INPUT_CSC_C13_C14 0x1D37
+#define mmDCP1_INPUT_CSC_C21_C22 0x1D38
+#define mmDCP1_INPUT_CSC_C23_C24 0x1D39
+#define mmDCP1_INPUT_CSC_C31_C32 0x1D3A
+#define mmDCP1_INPUT_CSC_C33_C34 0x1D3B
+#define mmDCP1_INPUT_CSC_CONTROL 0x1D35
+#define mmDCP1_INPUT_GAMMA_CONTROL 0x1D10
+#define mmDCP1_KEY_CONTROL 0x1D53
+#define mmDCP1_KEY_RANGE_ALPHA 0x1D54
+#define mmDCP1_KEY_RANGE_BLUE 0x1D57
+#define mmDCP1_KEY_RANGE_GREEN 0x1D56
+#define mmDCP1_KEY_RANGE_RED 0x1D55
+#define mmDCP1_OUTPUT_CSC_C11_C12 0x1D3D
+#define mmDCP1_OUTPUT_CSC_C13_C14 0x1D3E
+#define mmDCP1_OUTPUT_CSC_C21_C22 0x1D3F
+#define mmDCP1_OUTPUT_CSC_C23_C24 0x1D40
+#define mmDCP1_OUTPUT_CSC_C31_C32 0x1D41
+#define mmDCP1_OUTPUT_CSC_C33_C34 0x1D42
+#define mmDCP1_OUTPUT_CSC_CONTROL 0x1D3C
+#define mmDCP1_OUT_ROUND_CONTROL 0x1D51
+#define mmDCP1_OVL_CONTROL1 0x1D1D
+#define mmDCP1_OVL_CONTROL2 0x1D1E
+#define mmDCP1_OVL_DFQ_CONTROL 0x1D29
+#define mmDCP1_OVL_DFQ_STATUS 0x1D2A
+#define mmDCP1_OVL_ENABLE 0x1D1C
+#define mmDCP1_OVL_END 0x1D26
+#define mmDCP1_OVL_PITCH 0x1D21
+#define mmDCP1_OVLSCL_EDGE_PIXEL_CNTL 0x1D2C
+#define mmDCP1_OVL_SECONDARY_SURFACE_ADDRESS 0x1D92
+#define mmDCP1_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x1D94
+#define mmDCP1_OVL_START 0x1D25
+#define mmDCP1_OVL_STEREOSYNC_FLIP 0x1D93
+#define mmDCP1_OVL_SURFACE_ADDRESS 0x1D20
+#define mmDCP1_OVL_SURFACE_ADDRESS_HIGH 0x1D22
+#define mmDCP1_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x1D2B
+#define mmDCP1_OVL_SURFACE_ADDRESS_INUSE 0x1D28
+#define mmDCP1_OVL_SURFACE_OFFSET_X 0x1D23
+#define mmDCP1_OVL_SURFACE_OFFSET_Y 0x1D24
+#define mmDCP1_OVL_SWAP_CNTL 0x1D1F
+#define mmDCP1_OVL_UPDATE 0x1D27
+#define mmDCP1_PRESCALE_GRPH_CONTROL 0x1D2D
+#define mmDCP1_PRESCALE_OVL_CONTROL 0x1D31
+#define mmDCP1_PRESCALE_VALUES_GRPH_B 0x1D30
+#define mmDCP1_PRESCALE_VALUES_GRPH_G 0x1D2F
+#define mmDCP1_PRESCALE_VALUES_GRPH_R 0x1D2E
+#define mmDCP1_PRESCALE_VALUES_OVL_CB 0x1D32
+#define mmDCP1_PRESCALE_VALUES_OVL_CR 0x1D34
+#define mmDCP1_PRESCALE_VALUES_OVL_Y 0x1D33
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL1 0x1DA6
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL2 0x1DA7
+#define mmDCP1_REGAMMA_CNTLA_REGION_0_1 0x1DA8
+#define mmDCP1_REGAMMA_CNTLA_REGION_10_11 0x1DAD
+#define mmDCP1_REGAMMA_CNTLA_REGION_12_13 0x1DAE
+#define mmDCP1_REGAMMA_CNTLA_REGION_14_15 0x1DAF
+#define mmDCP1_REGAMMA_CNTLA_REGION_2_3 0x1DA9
+#define mmDCP1_REGAMMA_CNTLA_REGION_4_5 0x1DAA
+#define mmDCP1_REGAMMA_CNTLA_REGION_6_7 0x1DAB
+#define mmDCP1_REGAMMA_CNTLA_REGION_8_9 0x1DAC
+#define mmDCP1_REGAMMA_CNTLA_SLOPE_CNTL 0x1DA5
+#define mmDCP1_REGAMMA_CNTLA_START_CNTL 0x1DA4
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL1 0x1DB2
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL2 0x1DB3
+#define mmDCP1_REGAMMA_CNTLB_REGION_0_1 0x1DB4
+#define mmDCP1_REGAMMA_CNTLB_REGION_10_11 0x1DB9
+#define mmDCP1_REGAMMA_CNTLB_REGION_12_13 0x1DBA
+#define mmDCP1_REGAMMA_CNTLB_REGION_14_15 0x1DBB
+#define mmDCP1_REGAMMA_CNTLB_REGION_2_3 0x1DB5
+#define mmDCP1_REGAMMA_CNTLB_REGION_4_5 0x1DB6
+#define mmDCP1_REGAMMA_CNTLB_REGION_6_7 0x1DB7
+#define mmDCP1_REGAMMA_CNTLB_REGION_8_9 0x1DB8
+#define mmDCP1_REGAMMA_CNTLB_SLOPE_CNTL 0x1DB1
+#define mmDCP1_REGAMMA_CNTLB_START_CNTL 0x1DB0
+#define mmDCP1_REGAMMA_CONTROL 0x1DA0
+#define mmDCP1_REGAMMA_LUT_DATA 0x1DA2
+#define mmDCP1_REGAMMA_LUT_INDEX 0x1DA1
+#define mmDCP1_REGAMMA_LUT_WRITE_EN_MASK 0x1DA3
+#define mmDCP2_COMM_MATRIXA_TRANS_C11_C12 0x4043
+#define mmDCP2_COMM_MATRIXA_TRANS_C13_C14 0x4044
+#define mmDCP2_COMM_MATRIXA_TRANS_C21_C22 0x4045
+#define mmDCP2_COMM_MATRIXA_TRANS_C23_C24 0x4046
+#define mmDCP2_COMM_MATRIXA_TRANS_C31_C32 0x4047
+#define mmDCP2_COMM_MATRIXA_TRANS_C33_C34 0x4048
+#define mmDCP2_COMM_MATRIXB_TRANS_C11_C12 0x4049
+#define mmDCP2_COMM_MATRIXB_TRANS_C13_C14 0x404A
+#define mmDCP2_COMM_MATRIXB_TRANS_C21_C22 0x404B
+#define mmDCP2_COMM_MATRIXB_TRANS_C23_C24 0x404C
+#define mmDCP2_COMM_MATRIXB_TRANS_C31_C32 0x404D
+#define mmDCP2_COMM_MATRIXB_TRANS_C33_C34 0x404E
+#define mmDCP2_CUR_COLOR1 0x406C
+#define mmDCP2_CUR_COLOR2 0x406D
+#define mmDCP2_CUR_CONTROL 0x4066
+#define mmDCP2_CUR_HOT_SPOT 0x406B
+#define mmDCP2_CUR_POSITION 0x406A
+#define mmDCP2_CUR_REQUEST_FILTER_CNTL 0x4099
+#define mmDCP2_CUR_SIZE 0x4068
+#define mmDCP2_CUR_SURFACE_ADDRESS 0x4067
+#define mmDCP2_CUR_SURFACE_ADDRESS_HIGH 0x4069
+#define mmDCP2_CUR_UPDATE 0x406E
+#define mmDCP2_DC_LUT_30_COLOR 0x407C
+#define mmDCP2_DC_LUT_AUTOFILL 0x407F
+#define mmDCP2_DC_LUT_BLACK_OFFSET_BLUE 0x4081
+#define mmDCP2_DC_LUT_BLACK_OFFSET_GREEN 0x4082
+#define mmDCP2_DC_LUT_BLACK_OFFSET_RED 0x4083
+#define mmDCP2_DC_LUT_CONTROL 0x4080
+#define mmDCP2_DC_LUT_PWL_DATA 0x407B
+#define mmDCP2_DC_LUT_RW_INDEX 0x4079
+#define mmDCP2_DC_LUT_RW_MODE 0x4078
+#define mmDCP2_DC_LUT_SEQ_COLOR 0x407A
+#define mmDCP2_DC_LUT_VGA_ACCESS_ENABLE 0x407D
+#define mmDCP2_DC_LUT_WHITE_OFFSET_BLUE 0x4084
+#define mmDCP2_DC_LUT_WHITE_OFFSET_GREEN 0x4085
+#define mmDCP2_DC_LUT_WHITE_OFFSET_RED 0x4086
+#define mmDCP2_DC_LUT_WRITE_EN_MASK 0x407E
+#define mmDCP2_DCP_CRC_CONTROL 0x4087
+#define mmDCP2_DCP_CRC_CURRENT 0x4089
+#define mmDCP2_DCP_CRC_LAST 0x408B
+#define mmDCP2_DCP_CRC_MASK 0x4088
+#define mmDCP2_DCP_DEBUG 0x408D
+#define mmDCP2_DCP_DEBUG2 0x4098
+#define mmDCP2_DCP_FP_CONVERTED_FIELD 0x4065
+#define mmDCP2_DCP_GSL_CONTROL 0x4090
+#define mmDCP2_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4091
+#define mmDCP2_DCP_RANDOM_SEEDS 0x4061
+#define mmDCP2_DCP_SPATIAL_DITHER_CNTL 0x4060
+#define mmDCP2_DCP_TEST_DEBUG_DATA 0x4096
+#define mmDCP2_DCP_TEST_DEBUG_INDEX 0x4095
+#define mmDCP2_DEGAMMA_CONTROL 0x4058
+#define mmDCP2_DENORM_CONTROL 0x4050
+#define mmDCP2_GAMUT_REMAP_C11_C12 0x405A
+#define mmDCP2_GAMUT_REMAP_C13_C14 0x405B
+#define mmDCP2_GAMUT_REMAP_C21_C22 0x405C
+#define mmDCP2_GAMUT_REMAP_C23_C24 0x405D
+#define mmDCP2_GAMUT_REMAP_C31_C32 0x405E
+#define mmDCP2_GAMUT_REMAP_C33_C34 0x405F
+#define mmDCP2_GAMUT_REMAP_CONTROL 0x4059
+#define mmDCP2_GRPH_COMPRESS_PITCH 0x401A
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS 0x4019
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x401B
+#define mmDCP2_GRPH_CONTROL 0x4001
+#define mmDCP2_GRPH_DFQ_CONTROL 0x4014
+#define mmDCP2_GRPH_DFQ_STATUS 0x4015
+#define mmDCP2_GRPH_ENABLE 0x4000
+#define mmDCP2_GRPH_FLIP_CONTROL 0x4012
+#define mmDCP2_GRPH_INTERRUPT_CONTROL 0x4017
+#define mmDCP2_GRPH_INTERRUPT_STATUS 0x4016
+#define mmDCP2_GRPH_LUT_10BIT_BYPASS 0x4002
+#define mmDCP2_GRPH_PITCH 0x4006
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS 0x4004
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4007
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS 0x4005
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4008
+#define mmDCP2_GRPH_STEREOSYNC_FLIP 0x4097
+#define mmDCP2_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4018
+#define mmDCP2_GRPH_SURFACE_ADDRESS_INUSE 0x4013
+#define mmDCP2_GRPH_SURFACE_OFFSET_X 0x4009
+#define mmDCP2_GRPH_SURFACE_OFFSET_Y 0x400A
+#define mmDCP2_GRPH_SWAP_CNTL 0x4003
+#define mmDCP2_GRPH_UPDATE 0x4011
+#define mmDCP2_GRPH_X_END 0x400D
+#define mmDCP2_GRPH_X_START 0x400B
+#define mmDCP2_GRPH_Y_END 0x400E
+#define mmDCP2_GRPH_Y_START 0x400C
+#define mmDCP2_INPUT_CSC_C11_C12 0x4036
+#define mmDCP2_INPUT_CSC_C13_C14 0x4037
+#define mmDCP2_INPUT_CSC_C21_C22 0x4038
+#define mmDCP2_INPUT_CSC_C23_C24 0x4039
+#define mmDCP2_INPUT_CSC_C31_C32 0x403A
+#define mmDCP2_INPUT_CSC_C33_C34 0x403B
+#define mmDCP2_INPUT_CSC_CONTROL 0x4035
+#define mmDCP2_INPUT_GAMMA_CONTROL 0x4010
+#define mmDCP2_KEY_CONTROL 0x4053
+#define mmDCP2_KEY_RANGE_ALPHA 0x4054
+#define mmDCP2_KEY_RANGE_BLUE 0x4057
+#define mmDCP2_KEY_RANGE_GREEN 0x4056
+#define mmDCP2_KEY_RANGE_RED 0x4055
+#define mmDCP2_OUTPUT_CSC_C11_C12 0x403D
+#define mmDCP2_OUTPUT_CSC_C13_C14 0x403E
+#define mmDCP2_OUTPUT_CSC_C21_C22 0x403F
+#define mmDCP2_OUTPUT_CSC_C23_C24 0x4040
+#define mmDCP2_OUTPUT_CSC_C31_C32 0x4041
+#define mmDCP2_OUTPUT_CSC_C33_C34 0x4042
+#define mmDCP2_OUTPUT_CSC_CONTROL 0x403C
+#define mmDCP2_OUT_ROUND_CONTROL 0x4051
+#define mmDCP2_OVL_CONTROL1 0x401D
+#define mmDCP2_OVL_CONTROL2 0x401E
+#define mmDCP2_OVL_DFQ_CONTROL 0x4029
+#define mmDCP2_OVL_DFQ_STATUS 0x402A
+#define mmDCP2_OVL_ENABLE 0x401C
+#define mmDCP2_OVL_END 0x4026
+#define mmDCP2_OVL_PITCH 0x4021
+#define mmDCP2_OVLSCL_EDGE_PIXEL_CNTL 0x402C
+#define mmDCP2_OVL_SECONDARY_SURFACE_ADDRESS 0x4092
+#define mmDCP2_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4094
+#define mmDCP2_OVL_START 0x4025
+#define mmDCP2_OVL_STEREOSYNC_FLIP 0x4093
+#define mmDCP2_OVL_SURFACE_ADDRESS 0x4020
+#define mmDCP2_OVL_SURFACE_ADDRESS_HIGH 0x4022
+#define mmDCP2_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x402B
+#define mmDCP2_OVL_SURFACE_ADDRESS_INUSE 0x4028
+#define mmDCP2_OVL_SURFACE_OFFSET_X 0x4023
+#define mmDCP2_OVL_SURFACE_OFFSET_Y 0x4024
+#define mmDCP2_OVL_SWAP_CNTL 0x401F
+#define mmDCP2_OVL_UPDATE 0x4027
+#define mmDCP2_PRESCALE_GRPH_CONTROL 0x402D
+#define mmDCP2_PRESCALE_OVL_CONTROL 0x4031
+#define mmDCP2_PRESCALE_VALUES_GRPH_B 0x4030
+#define mmDCP2_PRESCALE_VALUES_GRPH_G 0x402F
+#define mmDCP2_PRESCALE_VALUES_GRPH_R 0x402E
+#define mmDCP2_PRESCALE_VALUES_OVL_CB 0x4032
+#define mmDCP2_PRESCALE_VALUES_OVL_CR 0x4034
+#define mmDCP2_PRESCALE_VALUES_OVL_Y 0x4033
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL1 0x40A6
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL2 0x40A7
+#define mmDCP2_REGAMMA_CNTLA_REGION_0_1 0x40A8
+#define mmDCP2_REGAMMA_CNTLA_REGION_10_11 0x40AD
+#define mmDCP2_REGAMMA_CNTLA_REGION_12_13 0x40AE
+#define mmDCP2_REGAMMA_CNTLA_REGION_14_15 0x40AF
+#define mmDCP2_REGAMMA_CNTLA_REGION_2_3 0x40A9
+#define mmDCP2_REGAMMA_CNTLA_REGION_4_5 0x40AA
+#define mmDCP2_REGAMMA_CNTLA_REGION_6_7 0x40AB
+#define mmDCP2_REGAMMA_CNTLA_REGION_8_9 0x40AC
+#define mmDCP2_REGAMMA_CNTLA_SLOPE_CNTL 0x40A5
+#define mmDCP2_REGAMMA_CNTLA_START_CNTL 0x40A4
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL1 0x40B2
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL2 0x40B3
+#define mmDCP2_REGAMMA_CNTLB_REGION_0_1 0x40B4
+#define mmDCP2_REGAMMA_CNTLB_REGION_10_11 0x40B9
+#define mmDCP2_REGAMMA_CNTLB_REGION_12_13 0x40BA
+#define mmDCP2_REGAMMA_CNTLB_REGION_14_15 0x40BB
+#define mmDCP2_REGAMMA_CNTLB_REGION_2_3 0x40B5
+#define mmDCP2_REGAMMA_CNTLB_REGION_4_5 0x40B6
+#define mmDCP2_REGAMMA_CNTLB_REGION_6_7 0x40B7
+#define mmDCP2_REGAMMA_CNTLB_REGION_8_9 0x40B8
+#define mmDCP2_REGAMMA_CNTLB_SLOPE_CNTL 0x40B1
+#define mmDCP2_REGAMMA_CNTLB_START_CNTL 0x40B0
+#define mmDCP2_REGAMMA_CONTROL 0x40A0
+#define mmDCP2_REGAMMA_LUT_DATA 0x40A2
+#define mmDCP2_REGAMMA_LUT_INDEX 0x40A1
+#define mmDCP2_REGAMMA_LUT_WRITE_EN_MASK 0x40A3
+#define mmDCP3_COMM_MATRIXA_TRANS_C11_C12 0x4343
+#define mmDCP3_COMM_MATRIXA_TRANS_C13_C14 0x4344
+#define mmDCP3_COMM_MATRIXA_TRANS_C21_C22 0x4345
+#define mmDCP3_COMM_MATRIXA_TRANS_C23_C24 0x4346
+#define mmDCP3_COMM_MATRIXA_TRANS_C31_C32 0x4347
+#define mmDCP3_COMM_MATRIXA_TRANS_C33_C34 0x4348
+#define mmDCP3_COMM_MATRIXB_TRANS_C11_C12 0x4349
+#define mmDCP3_COMM_MATRIXB_TRANS_C13_C14 0x434A
+#define mmDCP3_COMM_MATRIXB_TRANS_C21_C22 0x434B
+#define mmDCP3_COMM_MATRIXB_TRANS_C23_C24 0x434C
+#define mmDCP3_COMM_MATRIXB_TRANS_C31_C32 0x434D
+#define mmDCP3_COMM_MATRIXB_TRANS_C33_C34 0x434E
+#define mmDCP3_CUR_COLOR1 0x436C
+#define mmDCP3_CUR_COLOR2 0x436D
+#define mmDCP3_CUR_CONTROL 0x4366
+#define mmDCP3_CUR_HOT_SPOT 0x436B
+#define mmDCP3_CUR_POSITION 0x436A
+#define mmDCP3_CUR_REQUEST_FILTER_CNTL 0x4399
+#define mmDCP3_CUR_SIZE 0x4368
+#define mmDCP3_CUR_SURFACE_ADDRESS 0x4367
+#define mmDCP3_CUR_SURFACE_ADDRESS_HIGH 0x4369
+#define mmDCP3_CUR_UPDATE 0x436E
+#define mmDCP3_DC_LUT_30_COLOR 0x437C
+#define mmDCP3_DC_LUT_AUTOFILL 0x437F
+#define mmDCP3_DC_LUT_BLACK_OFFSET_BLUE 0x4381
+#define mmDCP3_DC_LUT_BLACK_OFFSET_GREEN 0x4382
+#define mmDCP3_DC_LUT_BLACK_OFFSET_RED 0x4383
+#define mmDCP3_DC_LUT_CONTROL 0x4380
+#define mmDCP3_DC_LUT_PWL_DATA 0x437B
+#define mmDCP3_DC_LUT_RW_INDEX 0x4379
+#define mmDCP3_DC_LUT_RW_MODE 0x4378
+#define mmDCP3_DC_LUT_SEQ_COLOR 0x437A
+#define mmDCP3_DC_LUT_VGA_ACCESS_ENABLE 0x437D
+#define mmDCP3_DC_LUT_WHITE_OFFSET_BLUE 0x4384
+#define mmDCP3_DC_LUT_WHITE_OFFSET_GREEN 0x4385
+#define mmDCP3_DC_LUT_WHITE_OFFSET_RED 0x4386
+#define mmDCP3_DC_LUT_WRITE_EN_MASK 0x437E
+#define mmDCP3_DCP_CRC_CONTROL 0x4387
+#define mmDCP3_DCP_CRC_CURRENT 0x4389
+#define mmDCP3_DCP_CRC_LAST 0x438B
+#define mmDCP3_DCP_CRC_MASK 0x4388
+#define mmDCP3_DCP_DEBUG 0x438D
+#define mmDCP3_DCP_DEBUG2 0x4398
+#define mmDCP3_DCP_FP_CONVERTED_FIELD 0x4365
+#define mmDCP3_DCP_GSL_CONTROL 0x4390
+#define mmDCP3_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4391
+#define mmDCP3_DCP_RANDOM_SEEDS 0x4361
+#define mmDCP3_DCP_SPATIAL_DITHER_CNTL 0x4360
+#define mmDCP3_DCP_TEST_DEBUG_DATA 0x4396
+#define mmDCP3_DCP_TEST_DEBUG_INDEX 0x4395
+#define mmDCP3_DEGAMMA_CONTROL 0x4358
+#define mmDCP3_DENORM_CONTROL 0x4350
+#define mmDCP3_GAMUT_REMAP_C11_C12 0x435A
+#define mmDCP3_GAMUT_REMAP_C13_C14 0x435B
+#define mmDCP3_GAMUT_REMAP_C21_C22 0x435C
+#define mmDCP3_GAMUT_REMAP_C23_C24 0x435D
+#define mmDCP3_GAMUT_REMAP_C31_C32 0x435E
+#define mmDCP3_GAMUT_REMAP_C33_C34 0x435F
+#define mmDCP3_GAMUT_REMAP_CONTROL 0x4359
+#define mmDCP3_GRPH_COMPRESS_PITCH 0x431A
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS 0x4319
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x431B
+#define mmDCP3_GRPH_CONTROL 0x4301
+#define mmDCP3_GRPH_DFQ_CONTROL 0x4314
+#define mmDCP3_GRPH_DFQ_STATUS 0x4315
+#define mmDCP3_GRPH_ENABLE 0x4300
+#define mmDCP3_GRPH_FLIP_CONTROL 0x4312
+#define mmDCP3_GRPH_INTERRUPT_CONTROL 0x4317
+#define mmDCP3_GRPH_INTERRUPT_STATUS 0x4316
+#define mmDCP3_GRPH_LUT_10BIT_BYPASS 0x4302
+#define mmDCP3_GRPH_PITCH 0x4306
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS 0x4304
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4307
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS 0x4305
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4308
+#define mmDCP3_GRPH_STEREOSYNC_FLIP 0x4397
+#define mmDCP3_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4318
+#define mmDCP3_GRPH_SURFACE_ADDRESS_INUSE 0x4313
+#define mmDCP3_GRPH_SURFACE_OFFSET_X 0x4309
+#define mmDCP3_GRPH_SURFACE_OFFSET_Y 0x430A
+#define mmDCP3_GRPH_SWAP_CNTL 0x4303
+#define mmDCP3_GRPH_UPDATE 0x4311
+#define mmDCP3_GRPH_X_END 0x430D
+#define mmDCP3_GRPH_X_START 0x430B
+#define mmDCP3_GRPH_Y_END 0x430E
+#define mmDCP3_GRPH_Y_START 0x430C
+#define mmDCP3_INPUT_CSC_C11_C12 0x4336
+#define mmDCP3_INPUT_CSC_C13_C14 0x4337
+#define mmDCP3_INPUT_CSC_C21_C22 0x4338
+#define mmDCP3_INPUT_CSC_C23_C24 0x4339
+#define mmDCP3_INPUT_CSC_C31_C32 0x433A
+#define mmDCP3_INPUT_CSC_C33_C34 0x433B
+#define mmDCP3_INPUT_CSC_CONTROL 0x4335
+#define mmDCP3_INPUT_GAMMA_CONTROL 0x4310
+#define mmDCP3_KEY_CONTROL 0x4353
+#define mmDCP3_KEY_RANGE_ALPHA 0x4354
+#define mmDCP3_KEY_RANGE_BLUE 0x4357
+#define mmDCP3_KEY_RANGE_GREEN 0x4356
+#define mmDCP3_KEY_RANGE_RED 0x4355
+#define mmDCP3_OUTPUT_CSC_C11_C12 0x433D
+#define mmDCP3_OUTPUT_CSC_C13_C14 0x433E
+#define mmDCP3_OUTPUT_CSC_C21_C22 0x433F
+#define mmDCP3_OUTPUT_CSC_C23_C24 0x4340
+#define mmDCP3_OUTPUT_CSC_C31_C32 0x4341
+#define mmDCP3_OUTPUT_CSC_C33_C34 0x4342
+#define mmDCP3_OUTPUT_CSC_CONTROL 0x433C
+#define mmDCP3_OUT_ROUND_CONTROL 0x4351
+#define mmDCP3_OVL_CONTROL1 0x431D
+#define mmDCP3_OVL_CONTROL2 0x431E
+#define mmDCP3_OVL_DFQ_CONTROL 0x4329
+#define mmDCP3_OVL_DFQ_STATUS 0x432A
+#define mmDCP3_OVL_ENABLE 0x431C
+#define mmDCP3_OVL_END 0x4326
+#define mmDCP3_OVL_PITCH 0x4321
+#define mmDCP3_OVLSCL_EDGE_PIXEL_CNTL 0x432C
+#define mmDCP3_OVL_SECONDARY_SURFACE_ADDRESS 0x4392
+#define mmDCP3_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4394
+#define mmDCP3_OVL_START 0x4325
+#define mmDCP3_OVL_STEREOSYNC_FLIP 0x4393
+#define mmDCP3_OVL_SURFACE_ADDRESS 0x4320
+#define mmDCP3_OVL_SURFACE_ADDRESS_HIGH 0x4322
+#define mmDCP3_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x432B
+#define mmDCP3_OVL_SURFACE_ADDRESS_INUSE 0x4328
+#define mmDCP3_OVL_SURFACE_OFFSET_X 0x4323
+#define mmDCP3_OVL_SURFACE_OFFSET_Y 0x4324
+#define mmDCP3_OVL_SWAP_CNTL 0x431F
+#define mmDCP3_OVL_UPDATE 0x4327
+#define mmDCP3_PRESCALE_GRPH_CONTROL 0x432D
+#define mmDCP3_PRESCALE_OVL_CONTROL 0x4331
+#define mmDCP3_PRESCALE_VALUES_GRPH_B 0x4330
+#define mmDCP3_PRESCALE_VALUES_GRPH_G 0x432F
+#define mmDCP3_PRESCALE_VALUES_GRPH_R 0x432E
+#define mmDCP3_PRESCALE_VALUES_OVL_CB 0x4332
+#define mmDCP3_PRESCALE_VALUES_OVL_CR 0x4334
+#define mmDCP3_PRESCALE_VALUES_OVL_Y 0x4333
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL1 0x43A6
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL2 0x43A7
+#define mmDCP3_REGAMMA_CNTLA_REGION_0_1 0x43A8
+#define mmDCP3_REGAMMA_CNTLA_REGION_10_11 0x43AD
+#define mmDCP3_REGAMMA_CNTLA_REGION_12_13 0x43AE
+#define mmDCP3_REGAMMA_CNTLA_REGION_14_15 0x43AF
+#define mmDCP3_REGAMMA_CNTLA_REGION_2_3 0x43A9
+#define mmDCP3_REGAMMA_CNTLA_REGION_4_5 0x43AA
+#define mmDCP3_REGAMMA_CNTLA_REGION_6_7 0x43AB
+#define mmDCP3_REGAMMA_CNTLA_REGION_8_9 0x43AC
+#define mmDCP3_REGAMMA_CNTLA_SLOPE_CNTL 0x43A5
+#define mmDCP3_REGAMMA_CNTLA_START_CNTL 0x43A4
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL1 0x43B2
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL2 0x43B3
+#define mmDCP3_REGAMMA_CNTLB_REGION_0_1 0x43B4
+#define mmDCP3_REGAMMA_CNTLB_REGION_10_11 0x43B9
+#define mmDCP3_REGAMMA_CNTLB_REGION_12_13 0x43BA
+#define mmDCP3_REGAMMA_CNTLB_REGION_14_15 0x43BB
+#define mmDCP3_REGAMMA_CNTLB_REGION_2_3 0x43B5
+#define mmDCP3_REGAMMA_CNTLB_REGION_4_5 0x43B6
+#define mmDCP3_REGAMMA_CNTLB_REGION_6_7 0x43B7
+#define mmDCP3_REGAMMA_CNTLB_REGION_8_9 0x43B8
+#define mmDCP3_REGAMMA_CNTLB_SLOPE_CNTL 0x43B1
+#define mmDCP3_REGAMMA_CNTLB_START_CNTL 0x43B0
+#define mmDCP3_REGAMMA_CONTROL 0x43A0
+#define mmDCP3_REGAMMA_LUT_DATA 0x43A2
+#define mmDCP3_REGAMMA_LUT_INDEX 0x43A1
+#define mmDCP3_REGAMMA_LUT_WRITE_EN_MASK 0x43A3
+#define mmDCP4_COMM_MATRIXA_TRANS_C11_C12 0x4643
+#define mmDCP4_COMM_MATRIXA_TRANS_C13_C14 0x4644
+#define mmDCP4_COMM_MATRIXA_TRANS_C21_C22 0x4645
+#define mmDCP4_COMM_MATRIXA_TRANS_C23_C24 0x4646
+#define mmDCP4_COMM_MATRIXA_TRANS_C31_C32 0x4647
+#define mmDCP4_COMM_MATRIXA_TRANS_C33_C34 0x4648
+#define mmDCP4_COMM_MATRIXB_TRANS_C11_C12 0x4649
+#define mmDCP4_COMM_MATRIXB_TRANS_C13_C14 0x464A
+#define mmDCP4_COMM_MATRIXB_TRANS_C21_C22 0x464B
+#define mmDCP4_COMM_MATRIXB_TRANS_C23_C24 0x464C
+#define mmDCP4_COMM_MATRIXB_TRANS_C31_C32 0x464D
+#define mmDCP4_COMM_MATRIXB_TRANS_C33_C34 0x464E
+#define mmDCP4_CUR_COLOR1 0x466C
+#define mmDCP4_CUR_COLOR2 0x466D
+#define mmDCP4_CUR_CONTROL 0x4666
+#define mmDCP4_CUR_HOT_SPOT 0x466B
+#define mmDCP4_CUR_POSITION 0x466A
+#define mmDCP4_CUR_REQUEST_FILTER_CNTL 0x4699
+#define mmDCP4_CUR_SIZE 0x4668
+#define mmDCP4_CUR_SURFACE_ADDRESS 0x4667
+#define mmDCP4_CUR_SURFACE_ADDRESS_HIGH 0x4669
+#define mmDCP4_CUR_UPDATE 0x466E
+#define mmDCP4_DC_LUT_30_COLOR 0x467C
+#define mmDCP4_DC_LUT_AUTOFILL 0x467F
+#define mmDCP4_DC_LUT_BLACK_OFFSET_BLUE 0x4681
+#define mmDCP4_DC_LUT_BLACK_OFFSET_GREEN 0x4682
+#define mmDCP4_DC_LUT_BLACK_OFFSET_RED 0x4683
+#define mmDCP4_DC_LUT_CONTROL 0x4680
+#define mmDCP4_DC_LUT_PWL_DATA 0x467B
+#define mmDCP4_DC_LUT_RW_INDEX 0x4679
+#define mmDCP4_DC_LUT_RW_MODE 0x4678
+#define mmDCP4_DC_LUT_SEQ_COLOR 0x467A
+#define mmDCP4_DC_LUT_VGA_ACCESS_ENABLE 0x467D
+#define mmDCP4_DC_LUT_WHITE_OFFSET_BLUE 0x4684
+#define mmDCP4_DC_LUT_WHITE_OFFSET_GREEN 0x4685
+#define mmDCP4_DC_LUT_WHITE_OFFSET_RED 0x4686
+#define mmDCP4_DC_LUT_WRITE_EN_MASK 0x467E
+#define mmDCP4_DCP_CRC_CONTROL 0x4687
+#define mmDCP4_DCP_CRC_CURRENT 0x4689
+#define mmDCP4_DCP_CRC_LAST 0x468B
+#define mmDCP4_DCP_CRC_MASK 0x4688
+#define mmDCP4_DCP_DEBUG 0x468D
+#define mmDCP4_DCP_DEBUG2 0x4698
+#define mmDCP4_DCP_FP_CONVERTED_FIELD 0x4665
+#define mmDCP4_DCP_GSL_CONTROL 0x4690
+#define mmDCP4_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4691
+#define mmDCP4_DCP_RANDOM_SEEDS 0x4661
+#define mmDCP4_DCP_SPATIAL_DITHER_CNTL 0x4660
+#define mmDCP4_DCP_TEST_DEBUG_DATA 0x4696
+#define mmDCP4_DCP_TEST_DEBUG_INDEX 0x4695
+#define mmDCP4_DEGAMMA_CONTROL 0x4658
+#define mmDCP4_DENORM_CONTROL 0x4650
+#define mmDCP4_GAMUT_REMAP_C11_C12 0x465A
+#define mmDCP4_GAMUT_REMAP_C13_C14 0x465B
+#define mmDCP4_GAMUT_REMAP_C21_C22 0x465C
+#define mmDCP4_GAMUT_REMAP_C23_C24 0x465D
+#define mmDCP4_GAMUT_REMAP_C31_C32 0x465E
+#define mmDCP4_GAMUT_REMAP_C33_C34 0x465F
+#define mmDCP4_GAMUT_REMAP_CONTROL 0x4659
+#define mmDCP4_GRPH_COMPRESS_PITCH 0x461A
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS 0x4619
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x461B
+#define mmDCP4_GRPH_CONTROL 0x4601
+#define mmDCP4_GRPH_DFQ_CONTROL 0x4614
+#define mmDCP4_GRPH_DFQ_STATUS 0x4615
+#define mmDCP4_GRPH_ENABLE 0x4600
+#define mmDCP4_GRPH_FLIP_CONTROL 0x4612
+#define mmDCP4_GRPH_INTERRUPT_CONTROL 0x4617
+#define mmDCP4_GRPH_INTERRUPT_STATUS 0x4616
+#define mmDCP4_GRPH_LUT_10BIT_BYPASS 0x4602
+#define mmDCP4_GRPH_PITCH 0x4606
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS 0x4604
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4607
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS 0x4605
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4608
+#define mmDCP4_GRPH_STEREOSYNC_FLIP 0x4697
+#define mmDCP4_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4618
+#define mmDCP4_GRPH_SURFACE_ADDRESS_INUSE 0x4613
+#define mmDCP4_GRPH_SURFACE_OFFSET_X 0x4609
+#define mmDCP4_GRPH_SURFACE_OFFSET_Y 0x460A
+#define mmDCP4_GRPH_SWAP_CNTL 0x4603
+#define mmDCP4_GRPH_UPDATE 0x4611
+#define mmDCP4_GRPH_X_END 0x460D
+#define mmDCP4_GRPH_X_START 0x460B
+#define mmDCP4_GRPH_Y_END 0x460E
+#define mmDCP4_GRPH_Y_START 0x460C
+#define mmDCP4_INPUT_CSC_C11_C12 0x4636
+#define mmDCP4_INPUT_CSC_C13_C14 0x4637
+#define mmDCP4_INPUT_CSC_C21_C22 0x4638
+#define mmDCP4_INPUT_CSC_C23_C24 0x4639
+#define mmDCP4_INPUT_CSC_C31_C32 0x463A
+#define mmDCP4_INPUT_CSC_C33_C34 0x463B
+#define mmDCP4_INPUT_CSC_CONTROL 0x4635
+#define mmDCP4_INPUT_GAMMA_CONTROL 0x4610
+#define mmDCP4_KEY_CONTROL 0x4653
+#define mmDCP4_KEY_RANGE_ALPHA 0x4654
+#define mmDCP4_KEY_RANGE_BLUE 0x4657
+#define mmDCP4_KEY_RANGE_GREEN 0x4656
+#define mmDCP4_KEY_RANGE_RED 0x4655
+#define mmDCP4_OUTPUT_CSC_C11_C12 0x463D
+#define mmDCP4_OUTPUT_CSC_C13_C14 0x463E
+#define mmDCP4_OUTPUT_CSC_C21_C22 0x463F
+#define mmDCP4_OUTPUT_CSC_C23_C24 0x4640
+#define mmDCP4_OUTPUT_CSC_C31_C32 0x4641
+#define mmDCP4_OUTPUT_CSC_C33_C34 0x4642
+#define mmDCP4_OUTPUT_CSC_CONTROL 0x463C
+#define mmDCP4_OUT_ROUND_CONTROL 0x4651
+#define mmDCP4_OVL_CONTROL1 0x461D
+#define mmDCP4_OVL_CONTROL2 0x461E
+#define mmDCP4_OVL_DFQ_CONTROL 0x4629
+#define mmDCP4_OVL_DFQ_STATUS 0x462A
+#define mmDCP4_OVL_ENABLE 0x461C
+#define mmDCP4_OVL_END 0x4626
+#define mmDCP4_OVL_PITCH 0x4621
+#define mmDCP4_OVLSCL_EDGE_PIXEL_CNTL 0x462C
+#define mmDCP4_OVL_SECONDARY_SURFACE_ADDRESS 0x4692
+#define mmDCP4_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4694
+#define mmDCP4_OVL_START 0x4625
+#define mmDCP4_OVL_STEREOSYNC_FLIP 0x4693
+#define mmDCP4_OVL_SURFACE_ADDRESS 0x4620
+#define mmDCP4_OVL_SURFACE_ADDRESS_HIGH 0x4622
+#define mmDCP4_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x462B
+#define mmDCP4_OVL_SURFACE_ADDRESS_INUSE 0x4628
+#define mmDCP4_OVL_SURFACE_OFFSET_X 0x4623
+#define mmDCP4_OVL_SURFACE_OFFSET_Y 0x4624
+#define mmDCP4_OVL_SWAP_CNTL 0x461F
+#define mmDCP4_OVL_UPDATE 0x4627
+#define mmDCP4_PRESCALE_GRPH_CONTROL 0x462D
+#define mmDCP4_PRESCALE_OVL_CONTROL 0x4631
+#define mmDCP4_PRESCALE_VALUES_GRPH_B 0x4630
+#define mmDCP4_PRESCALE_VALUES_GRPH_G 0x462F
+#define mmDCP4_PRESCALE_VALUES_GRPH_R 0x462E
+#define mmDCP4_PRESCALE_VALUES_OVL_CB 0x4632
+#define mmDCP4_PRESCALE_VALUES_OVL_CR 0x4634
+#define mmDCP4_PRESCALE_VALUES_OVL_Y 0x4633
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL1 0x46A6
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL2 0x46A7
+#define mmDCP4_REGAMMA_CNTLA_REGION_0_1 0x46A8
+#define mmDCP4_REGAMMA_CNTLA_REGION_10_11 0x46AD
+#define mmDCP4_REGAMMA_CNTLA_REGION_12_13 0x46AE
+#define mmDCP4_REGAMMA_CNTLA_REGION_14_15 0x46AF
+#define mmDCP4_REGAMMA_CNTLA_REGION_2_3 0x46A9
+#define mmDCP4_REGAMMA_CNTLA_REGION_4_5 0x46AA
+#define mmDCP4_REGAMMA_CNTLA_REGION_6_7 0x46AB
+#define mmDCP4_REGAMMA_CNTLA_REGION_8_9 0x46AC
+#define mmDCP4_REGAMMA_CNTLA_SLOPE_CNTL 0x46A5
+#define mmDCP4_REGAMMA_CNTLA_START_CNTL 0x46A4
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL1 0x46B2
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL2 0x46B3
+#define mmDCP4_REGAMMA_CNTLB_REGION_0_1 0x46B4
+#define mmDCP4_REGAMMA_CNTLB_REGION_10_11 0x46B9
+#define mmDCP4_REGAMMA_CNTLB_REGION_12_13 0x46BA
+#define mmDCP4_REGAMMA_CNTLB_REGION_14_15 0x46BB
+#define mmDCP4_REGAMMA_CNTLB_REGION_2_3 0x46B5
+#define mmDCP4_REGAMMA_CNTLB_REGION_4_5 0x46B6
+#define mmDCP4_REGAMMA_CNTLB_REGION_6_7 0x46B7
+#define mmDCP4_REGAMMA_CNTLB_REGION_8_9 0x46B8
+#define mmDCP4_REGAMMA_CNTLB_SLOPE_CNTL 0x46B1
+#define mmDCP4_REGAMMA_CNTLB_START_CNTL 0x46B0
+#define mmDCP4_REGAMMA_CONTROL 0x46A0
+#define mmDCP4_REGAMMA_LUT_DATA 0x46A2
+#define mmDCP4_REGAMMA_LUT_INDEX 0x46A1
+#define mmDCP4_REGAMMA_LUT_WRITE_EN_MASK 0x46A3
+#define mmDCP5_COMM_MATRIXA_TRANS_C11_C12 0x4943
+#define mmDCP5_COMM_MATRIXA_TRANS_C13_C14 0x4944
+#define mmDCP5_COMM_MATRIXA_TRANS_C21_C22 0x4945
+#define mmDCP5_COMM_MATRIXA_TRANS_C23_C24 0x4946
+#define mmDCP5_COMM_MATRIXA_TRANS_C31_C32 0x4947
+#define mmDCP5_COMM_MATRIXA_TRANS_C33_C34 0x4948
+#define mmDCP5_COMM_MATRIXB_TRANS_C11_C12 0x4949
+#define mmDCP5_COMM_MATRIXB_TRANS_C13_C14 0x494A
+#define mmDCP5_COMM_MATRIXB_TRANS_C21_C22 0x494B
+#define mmDCP5_COMM_MATRIXB_TRANS_C23_C24 0x494C
+#define mmDCP5_COMM_MATRIXB_TRANS_C31_C32 0x494D
+#define mmDCP5_COMM_MATRIXB_TRANS_C33_C34 0x494E
+#define mmDCP5_CUR_COLOR1 0x496C
+#define mmDCP5_CUR_COLOR2 0x496D
+#define mmDCP5_CUR_CONTROL 0x4966
+#define mmDCP5_CUR_HOT_SPOT 0x496B
+#define mmDCP5_CUR_POSITION 0x496A
+#define mmDCP5_CUR_REQUEST_FILTER_CNTL 0x4999
+#define mmDCP5_CUR_SIZE 0x4968
+#define mmDCP5_CUR_SURFACE_ADDRESS 0x4967
+#define mmDCP5_CUR_SURFACE_ADDRESS_HIGH 0x4969
+#define mmDCP5_CUR_UPDATE 0x496E
+#define mmDCP5_DC_LUT_30_COLOR 0x497C
+#define mmDCP5_DC_LUT_AUTOFILL 0x497F
+#define mmDCP5_DC_LUT_BLACK_OFFSET_BLUE 0x4981
+#define mmDCP5_DC_LUT_BLACK_OFFSET_GREEN 0x4982
+#define mmDCP5_DC_LUT_BLACK_OFFSET_RED 0x4983
+#define mmDCP5_DC_LUT_CONTROL 0x4980
+#define mmDCP5_DC_LUT_PWL_DATA 0x497B
+#define mmDCP5_DC_LUT_RW_INDEX 0x4979
+#define mmDCP5_DC_LUT_RW_MODE 0x4978
+#define mmDCP5_DC_LUT_SEQ_COLOR 0x497A
+#define mmDCP5_DC_LUT_VGA_ACCESS_ENABLE 0x497D
+#define mmDCP5_DC_LUT_WHITE_OFFSET_BLUE 0x4984
+#define mmDCP5_DC_LUT_WHITE_OFFSET_GREEN 0x4985
+#define mmDCP5_DC_LUT_WHITE_OFFSET_RED 0x4986
+#define mmDCP5_DC_LUT_WRITE_EN_MASK 0x497E
+#define mmDCP5_DCP_CRC_CONTROL 0x4987
+#define mmDCP5_DCP_CRC_CURRENT 0x4989
+#define mmDCP5_DCP_CRC_LAST 0x498B
+#define mmDCP5_DCP_CRC_MASK 0x4988
+#define mmDCP5_DCP_DEBUG 0x498D
+#define mmDCP5_DCP_DEBUG2 0x4998
+#define mmDCP5_DCP_FP_CONVERTED_FIELD 0x4965
+#define mmDCP5_DCP_GSL_CONTROL 0x4990
+#define mmDCP5_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4991
+#define mmDCP5_DCP_RANDOM_SEEDS 0x4961
+#define mmDCP5_DCP_SPATIAL_DITHER_CNTL 0x4960
+#define mmDCP5_DCP_TEST_DEBUG_DATA 0x4996
+#define mmDCP5_DCP_TEST_DEBUG_INDEX 0x4995
+#define mmDCP5_DEGAMMA_CONTROL 0x4958
+#define mmDCP5_DENORM_CONTROL 0x4950
+#define mmDCP5_GAMUT_REMAP_C11_C12 0x495A
+#define mmDCP5_GAMUT_REMAP_C13_C14 0x495B
+#define mmDCP5_GAMUT_REMAP_C21_C22 0x495C
+#define mmDCP5_GAMUT_REMAP_C23_C24 0x495D
+#define mmDCP5_GAMUT_REMAP_C31_C32 0x495E
+#define mmDCP5_GAMUT_REMAP_C33_C34 0x495F
+#define mmDCP5_GAMUT_REMAP_CONTROL 0x4959
+#define mmDCP5_GRPH_COMPRESS_PITCH 0x491A
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS 0x4919
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x491B
+#define mmDCP5_GRPH_CONTROL 0x4901
+#define mmDCP5_GRPH_DFQ_CONTROL 0x4914
+#define mmDCP5_GRPH_DFQ_STATUS 0x4915
+#define mmDCP5_GRPH_ENABLE 0x4900
+#define mmDCP5_GRPH_FLIP_CONTROL 0x4912
+#define mmDCP5_GRPH_INTERRUPT_CONTROL 0x4917
+#define mmDCP5_GRPH_INTERRUPT_STATUS 0x4916
+#define mmDCP5_GRPH_LUT_10BIT_BYPASS 0x4902
+#define mmDCP5_GRPH_PITCH 0x4906
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS 0x4904
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4907
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS 0x4905
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4908
+#define mmDCP5_GRPH_STEREOSYNC_FLIP 0x4997
+#define mmDCP5_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4918
+#define mmDCP5_GRPH_SURFACE_ADDRESS_INUSE 0x4913
+#define mmDCP5_GRPH_SURFACE_OFFSET_X 0x4909
+#define mmDCP5_GRPH_SURFACE_OFFSET_Y 0x490A
+#define mmDCP5_GRPH_SWAP_CNTL 0x4903
+#define mmDCP5_GRPH_UPDATE 0x4911
+#define mmDCP5_GRPH_X_END 0x490D
+#define mmDCP5_GRPH_X_START 0x490B
+#define mmDCP5_GRPH_Y_END 0x490E
+#define mmDCP5_GRPH_Y_START 0x490C
+#define mmDCP5_INPUT_CSC_C11_C12 0x4936
+#define mmDCP5_INPUT_CSC_C13_C14 0x4937
+#define mmDCP5_INPUT_CSC_C21_C22 0x4938
+#define mmDCP5_INPUT_CSC_C23_C24 0x4939
+#define mmDCP5_INPUT_CSC_C31_C32 0x493A
+#define mmDCP5_INPUT_CSC_C33_C34 0x493B
+#define mmDCP5_INPUT_CSC_CONTROL 0x4935
+#define mmDCP5_INPUT_GAMMA_CONTROL 0x4910
+#define mmDCP5_KEY_CONTROL 0x4953
+#define mmDCP5_KEY_RANGE_ALPHA 0x4954
+#define mmDCP5_KEY_RANGE_BLUE 0x4957
+#define mmDCP5_KEY_RANGE_GREEN 0x4956
+#define mmDCP5_KEY_RANGE_RED 0x4955
+#define mmDCP5_OUTPUT_CSC_C11_C12 0x493D
+#define mmDCP5_OUTPUT_CSC_C13_C14 0x493E
+#define mmDCP5_OUTPUT_CSC_C21_C22 0x493F
+#define mmDCP5_OUTPUT_CSC_C23_C24 0x4940
+#define mmDCP5_OUTPUT_CSC_C31_C32 0x4941
+#define mmDCP5_OUTPUT_CSC_C33_C34 0x4942
+#define mmDCP5_OUTPUT_CSC_CONTROL 0x493C
+#define mmDCP5_OUT_ROUND_CONTROL 0x4951
+#define mmDCP5_OVL_CONTROL1 0x491D
+#define mmDCP5_OVL_CONTROL2 0x491E
+#define mmDCP5_OVL_DFQ_CONTROL 0x4929
+#define mmDCP5_OVL_DFQ_STATUS 0x492A
+#define mmDCP5_OVL_ENABLE 0x491C
+#define mmDCP5_OVL_END 0x4926
+#define mmDCP5_OVL_PITCH 0x4921
+#define mmDCP5_OVLSCL_EDGE_PIXEL_CNTL 0x492C
+#define mmDCP5_OVL_SECONDARY_SURFACE_ADDRESS 0x4992
+#define mmDCP5_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4994
+#define mmDCP5_OVL_START 0x4925
+#define mmDCP5_OVL_STEREOSYNC_FLIP 0x4993
+#define mmDCP5_OVL_SURFACE_ADDRESS 0x4920
+#define mmDCP5_OVL_SURFACE_ADDRESS_HIGH 0x4922
+#define mmDCP5_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x492B
+#define mmDCP5_OVL_SURFACE_ADDRESS_INUSE 0x4928
+#define mmDCP5_OVL_SURFACE_OFFSET_X 0x4923
+#define mmDCP5_OVL_SURFACE_OFFSET_Y 0x4924
+#define mmDCP5_OVL_SWAP_CNTL 0x491F
+#define mmDCP5_OVL_UPDATE 0x4927
+#define mmDCP5_PRESCALE_GRPH_CONTROL 0x492D
+#define mmDCP5_PRESCALE_OVL_CONTROL 0x4931
+#define mmDCP5_PRESCALE_VALUES_GRPH_B 0x4930
+#define mmDCP5_PRESCALE_VALUES_GRPH_G 0x492F
+#define mmDCP5_PRESCALE_VALUES_GRPH_R 0x492E
+#define mmDCP5_PRESCALE_VALUES_OVL_CB 0x4932
+#define mmDCP5_PRESCALE_VALUES_OVL_CR 0x4934
+#define mmDCP5_PRESCALE_VALUES_OVL_Y 0x4933
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL1 0x49A6
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL2 0x49A7
+#define mmDCP5_REGAMMA_CNTLA_REGION_0_1 0x49A8
+#define mmDCP5_REGAMMA_CNTLA_REGION_10_11 0x49AD
+#define mmDCP5_REGAMMA_CNTLA_REGION_12_13 0x49AE
+#define mmDCP5_REGAMMA_CNTLA_REGION_14_15 0x49AF
+#define mmDCP5_REGAMMA_CNTLA_REGION_2_3 0x49A9
+#define mmDCP5_REGAMMA_CNTLA_REGION_4_5 0x49AA
+#define mmDCP5_REGAMMA_CNTLA_REGION_6_7 0x49AB
+#define mmDCP5_REGAMMA_CNTLA_REGION_8_9 0x49AC
+#define mmDCP5_REGAMMA_CNTLA_SLOPE_CNTL 0x49A5
+#define mmDCP5_REGAMMA_CNTLA_START_CNTL 0x49A4
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL1 0x49B2
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL2 0x49B3
+#define mmDCP5_REGAMMA_CNTLB_REGION_0_1 0x49B4
+#define mmDCP5_REGAMMA_CNTLB_REGION_10_11 0x49B9
+#define mmDCP5_REGAMMA_CNTLB_REGION_12_13 0x49BA
+#define mmDCP5_REGAMMA_CNTLB_REGION_14_15 0x49BB
+#define mmDCP5_REGAMMA_CNTLB_REGION_2_3 0x49B5
+#define mmDCP5_REGAMMA_CNTLB_REGION_4_5 0x49B6
+#define mmDCP5_REGAMMA_CNTLB_REGION_6_7 0x49B7
+#define mmDCP5_REGAMMA_CNTLB_REGION_8_9 0x49B8
+#define mmDCP5_REGAMMA_CNTLB_SLOPE_CNTL 0x49B1
+#define mmDCP5_REGAMMA_CNTLB_START_CNTL 0x49B0
+#define mmDCP5_REGAMMA_CONTROL 0x49A0
+#define mmDCP5_REGAMMA_LUT_DATA 0x49A2
+#define mmDCP5_REGAMMA_LUT_INDEX 0x49A1
+#define mmDCP5_REGAMMA_LUT_WRITE_EN_MASK 0x49A3
+#define mmDC_PAD_EXTERN_SIG 0x1902
+#define mmDCP_CRC_CONTROL 0x1A87
+#define mmDCP_CRC_CURRENT 0x1A89
+#define mmDCP_CRC_LAST 0x1A8B
+#define mmDCP_CRC_MASK 0x1A88
+#define mmDCP_DEBUG 0x1A8D
+#define mmDCP_DEBUG2 0x1A98
+#define mmDCP_FP_CONVERTED_FIELD 0x1A65
+#define mmDC_PGCNTL_STATUS_REG 0x177E
+#define mmDC_PGFSM_CONFIG_REG 0x177C
+#define mmDC_PGFSM_WRITE_REG 0x177D
+#define mmDCP_GSL_CONTROL 0x1A90
+#define mmDCPG_TEST_DEBUG_DATA 0x177B
+#define mmDCPG_TEST_DEBUG_INDEX 0x1779
+#define mmDC_PINSTRAPS 0x1917
+#define mmDCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1A91
+#define mmDCP_RANDOM_SEEDS 0x1A61
+#define mmDCP_SPATIAL_DITHER_CNTL 0x1A60
+#define mmDCP_TEST_DEBUG_DATA 0x1A96
+#define mmDCP_TEST_DEBUG_INDEX 0x1A95
+#define mmDC_RBBMIF_RDWR_CNTL1 0x031A
+#define mmDC_RBBMIF_RDWR_CNTL2 0x031D
+#define mmDC_REF_CLK_CNTL 0x1903
+#define mmDC_XDMA_INTERFACE_CNTL 0x0327
+#define mmDEGAMMA_CONTROL 0x1A58
+#define mmDENORM_CONTROL 0x1A50
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define mmDIG0_AFMT_60958_0 0x1C41
+#define mmDIG0_AFMT_60958_1 0x1C42
+#define mmDIG0_AFMT_60958_2 0x1C48
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL 0x1C43
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT 0x1C49
+#define mmDIG0_AFMT_AUDIO_DBG_DTO_CNTL 0x1C52
+#define mmDIG0_AFMT_AUDIO_INFO0 0x1C3F
+#define mmDIG0_AFMT_AUDIO_INFO1 0x1C40
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL 0x1C4B
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2 0x1C17
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL 0x1C4F
+#define mmDIG0_AFMT_AVI_INFO0 0x1C21
+#define mmDIG0_AFMT_AVI_INFO1 0x1C22
+#define mmDIG0_AFMT_AVI_INFO2 0x1C23
+#define mmDIG0_AFMT_AVI_INFO3 0x1C24
+#define mmDIG0_AFMT_GENERIC_0 0x1C28
+#define mmDIG0_AFMT_GENERIC_1 0x1C29
+#define mmDIG0_AFMT_GENERIC_2 0x1C2A
+#define mmDIG0_AFMT_GENERIC_3 0x1C2B
+#define mmDIG0_AFMT_GENERIC_4 0x1C2C
+#define mmDIG0_AFMT_GENERIC_5 0x1C2D
+#define mmDIG0_AFMT_GENERIC_6 0x1C2E
+#define mmDIG0_AFMT_GENERIC_7 0x1C2F
+#define mmDIG0_AFMT_GENERIC_HDR 0x1C27
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0 0x1C4D
+#define mmDIG0_AFMT_INTERRUPT_STATUS 0x1C14
+#define mmDIG0_AFMT_ISRC1_0 0x1C18
+#define mmDIG0_AFMT_ISRC1_1 0x1C19
+#define mmDIG0_AFMT_ISRC1_2 0x1C1A
+#define mmDIG0_AFMT_ISRC1_3 0x1C1B
+#define mmDIG0_AFMT_ISRC1_4 0x1C1C
+#define mmDIG0_AFMT_ISRC2_0 0x1C1D
+#define mmDIG0_AFMT_ISRC2_1 0x1C1E
+#define mmDIG0_AFMT_ISRC2_2 0x1C1F
+#define mmDIG0_AFMT_ISRC2_3 0x1C20
+#define mmDIG0_AFMT_MPEG_INFO0 0x1C25
+#define mmDIG0_AFMT_MPEG_INFO1 0x1C26
+#define mmDIG0_AFMT_RAMP_CONTROL0 0x1C44
+#define mmDIG0_AFMT_RAMP_CONTROL1 0x1C45
+#define mmDIG0_AFMT_RAMP_CONTROL2 0x1C46
+#define mmDIG0_AFMT_RAMP_CONTROL3 0x1C47
+#define mmDIG0_AFMT_STATUS 0x1C4A
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL 0x1C4C
+#define mmDIG0_DIG_BE_CNTL 0x1C50
+#define mmDIG0_DIG_BE_EN_CNTL 0x1C51
+#define mmDIG0_DIG_CLOCK_PATTERN 0x1C03
+#define mmDIG0_DIG_DISPCLK_SWITCH_CNTL 0x1C08
+#define mmDIG0_DIG_DISPCLK_SWITCH_STATUS 0x1C09
+#define mmDIG0_DIG_FE_CNTL 0x1C00
+#define mmDIG0_DIG_FIFO_STATUS 0x1C0A
+#define mmDIG0_DIG_LANE_ENABLE 0x1C8D
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL 0x1C01
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT 0x1C02
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED 0x1C05
+#define mmDIG0_DIG_TEST_PATTERN 0x1C04
+#define mmDIG0_HDMI_ACR_32_0 0x1C37
+#define mmDIG0_HDMI_ACR_32_1 0x1C38
+#define mmDIG0_HDMI_ACR_44_0 0x1C39
+#define mmDIG0_HDMI_ACR_44_1 0x1C3A
+#define mmDIG0_HDMI_ACR_48_0 0x1C3B
+#define mmDIG0_HDMI_ACR_48_1 0x1C3C
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL 0x1C0F
+#define mmDIG0_HDMI_ACR_STATUS_0 0x1C3D
+#define mmDIG0_HDMI_ACR_STATUS_1 0x1C3E
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL 0x1C0E
+#define mmDIG0_HDMI_CONTROL 0x1C0C
+#define mmDIG0_HDMI_GC 0x1C16
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x1C13
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x1C30
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0 0x1C11
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1 0x1C12
+#define mmDIG0_HDMI_STATUS 0x1C0D
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL 0x1C10
+#define mmDIG0_LVDS_DATA_CNTL 0x1C8C
+#define mmDIG0_TMDS_CNTL 0x1C7C
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK 0x1C7E
+#define mmDIG0_TMDS_CONTROL_CHAR 0x1C7D
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL 0x1C86
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL 0x1C87
+#define mmDIG0_TMDS_CTL_BITS 0x1C83
+#define mmDIG0_TMDS_DCBALANCER_CONTROL 0x1C84
+#define mmDIG0_TMDS_DEBUG 0x1C82
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL 0x1C7F
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x1C80
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x1C81
+#define mmDIG1_AFMT_60958_0 0x1F41
+#define mmDIG1_AFMT_60958_1 0x1F42
+#define mmDIG1_AFMT_60958_2 0x1F48
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL 0x1F43
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT 0x1F49
+#define mmDIG1_AFMT_AUDIO_DBG_DTO_CNTL 0x1F52
+#define mmDIG1_AFMT_AUDIO_INFO0 0x1F3F
+#define mmDIG1_AFMT_AUDIO_INFO1 0x1F40
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL 0x1F4B
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2 0x1F17
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL 0x1F4F
+#define mmDIG1_AFMT_AVI_INFO0 0x1F21
+#define mmDIG1_AFMT_AVI_INFO1 0x1F22
+#define mmDIG1_AFMT_AVI_INFO2 0x1F23
+#define mmDIG1_AFMT_AVI_INFO3 0x1F24
+#define mmDIG1_AFMT_GENERIC_0 0x1F28
+#define mmDIG1_AFMT_GENERIC_1 0x1F29
+#define mmDIG1_AFMT_GENERIC_2 0x1F2A
+#define mmDIG1_AFMT_GENERIC_3 0x1F2B
+#define mmDIG1_AFMT_GENERIC_4 0x1F2C
+#define mmDIG1_AFMT_GENERIC_5 0x1F2D
+#define mmDIG1_AFMT_GENERIC_6 0x1F2E
+#define mmDIG1_AFMT_GENERIC_7 0x1F2F
+#define mmDIG1_AFMT_GENERIC_HDR 0x1F27
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0 0x1F4D
+#define mmDIG1_AFMT_INTERRUPT_STATUS 0x1F14
+#define mmDIG1_AFMT_ISRC1_0 0x1F18
+#define mmDIG1_AFMT_ISRC1_1 0x1F19
+#define mmDIG1_AFMT_ISRC1_2 0x1F1A
+#define mmDIG1_AFMT_ISRC1_3 0x1F1B
+#define mmDIG1_AFMT_ISRC1_4 0x1F1C
+#define mmDIG1_AFMT_ISRC2_0 0x1F1D
+#define mmDIG1_AFMT_ISRC2_1 0x1F1E
+#define mmDIG1_AFMT_ISRC2_2 0x1F1F
+#define mmDIG1_AFMT_ISRC2_3 0x1F20
+#define mmDIG1_AFMT_MPEG_INFO0 0x1F25
+#define mmDIG1_AFMT_MPEG_INFO1 0x1F26
+#define mmDIG1_AFMT_RAMP_CONTROL0 0x1F44
+#define mmDIG1_AFMT_RAMP_CONTROL1 0x1F45
+#define mmDIG1_AFMT_RAMP_CONTROL2 0x1F46
+#define mmDIG1_AFMT_RAMP_CONTROL3 0x1F47
+#define mmDIG1_AFMT_STATUS 0x1F4A
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL 0x1F4C
+#define mmDIG1_DIG_BE_CNTL 0x1F50
+#define mmDIG1_DIG_BE_EN_CNTL 0x1F51
+#define mmDIG1_DIG_CLOCK_PATTERN 0x1F03
+#define mmDIG1_DIG_DISPCLK_SWITCH_CNTL 0x1F08
+#define mmDIG1_DIG_DISPCLK_SWITCH_STATUS 0x1F09
+#define mmDIG1_DIG_FE_CNTL 0x1F00
+#define mmDIG1_DIG_FIFO_STATUS 0x1F0A
+#define mmDIG1_DIG_LANE_ENABLE 0x1F8D
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL 0x1F01
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT 0x1F02
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED 0x1F05
+#define mmDIG1_DIG_TEST_PATTERN 0x1F04
+#define mmDIG1_HDMI_ACR_32_0 0x1F37
+#define mmDIG1_HDMI_ACR_32_1 0x1F38
+#define mmDIG1_HDMI_ACR_44_0 0x1F39
+#define mmDIG1_HDMI_ACR_44_1 0x1F3A
+#define mmDIG1_HDMI_ACR_48_0 0x1F3B
+#define mmDIG1_HDMI_ACR_48_1 0x1F3C
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL 0x1F0F
+#define mmDIG1_HDMI_ACR_STATUS_0 0x1F3D
+#define mmDIG1_HDMI_ACR_STATUS_1 0x1F3E
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL 0x1F0E
+#define mmDIG1_HDMI_CONTROL 0x1F0C
+#define mmDIG1_HDMI_GC 0x1F16
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x1F13
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x1F30
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0 0x1F11
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1 0x1F12
+#define mmDIG1_HDMI_STATUS 0x1F0D
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL 0x1F10
+#define mmDIG1_LVDS_DATA_CNTL 0x1F8C
+#define mmDIG1_TMDS_CNTL 0x1F7C
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK 0x1F7E
+#define mmDIG1_TMDS_CONTROL_CHAR 0x1F7D
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL 0x1F86
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL 0x1F87
+#define mmDIG1_TMDS_CTL_BITS 0x1F83
+#define mmDIG1_TMDS_DCBALANCER_CONTROL 0x1F84
+#define mmDIG1_TMDS_DEBUG 0x1F82
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL 0x1F7F
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x1F80
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x1F81
+#define mmDIG2_AFMT_60958_0 0x4241
+#define mmDIG2_AFMT_60958_1 0x4242
+#define mmDIG2_AFMT_60958_2 0x4248
+#define mmDIG2_AFMT_AUDIO_CRC_CONTROL 0x4243
+#define mmDIG2_AFMT_AUDIO_CRC_RESULT 0x4249
+#define mmDIG2_AFMT_AUDIO_DBG_DTO_CNTL 0x4252
+#define mmDIG2_AFMT_AUDIO_INFO0 0x423F
+#define mmDIG2_AFMT_AUDIO_INFO1 0x4240
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL 0x424B
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2 0x4217
+#define mmDIG2_AFMT_AUDIO_SRC_CONTROL 0x424F
+#define mmDIG2_AFMT_AVI_INFO0 0x4221
+#define mmDIG2_AFMT_AVI_INFO1 0x4222
+#define mmDIG2_AFMT_AVI_INFO2 0x4223
+#define mmDIG2_AFMT_AVI_INFO3 0x4224
+#define mmDIG2_AFMT_GENERIC_0 0x4228
+#define mmDIG2_AFMT_GENERIC_1 0x4229
+#define mmDIG2_AFMT_GENERIC_2 0x422A
+#define mmDIG2_AFMT_GENERIC_3 0x422B
+#define mmDIG2_AFMT_GENERIC_4 0x422C
+#define mmDIG2_AFMT_GENERIC_5 0x422D
+#define mmDIG2_AFMT_GENERIC_6 0x422E
+#define mmDIG2_AFMT_GENERIC_7 0x422F
+#define mmDIG2_AFMT_GENERIC_HDR 0x4227
+#define mmDIG2_AFMT_INFOFRAME_CONTROL0 0x424D
+#define mmDIG2_AFMT_INTERRUPT_STATUS 0x4214
+#define mmDIG2_AFMT_ISRC1_0 0x4218
+#define mmDIG2_AFMT_ISRC1_1 0x4219
+#define mmDIG2_AFMT_ISRC1_2 0x421A
+#define mmDIG2_AFMT_ISRC1_3 0x421B
+#define mmDIG2_AFMT_ISRC1_4 0x421C
+#define mmDIG2_AFMT_ISRC2_0 0x421D
+#define mmDIG2_AFMT_ISRC2_1 0x421E
+#define mmDIG2_AFMT_ISRC2_2 0x421F
+#define mmDIG2_AFMT_ISRC2_3 0x4220
+#define mmDIG2_AFMT_MPEG_INFO0 0x4225
+#define mmDIG2_AFMT_MPEG_INFO1 0x4226
+#define mmDIG2_AFMT_RAMP_CONTROL0 0x4244
+#define mmDIG2_AFMT_RAMP_CONTROL1 0x4245
+#define mmDIG2_AFMT_RAMP_CONTROL2 0x4246
+#define mmDIG2_AFMT_RAMP_CONTROL3 0x4247
+#define mmDIG2_AFMT_STATUS 0x424A
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL 0x424C
+#define mmDIG2_DIG_BE_CNTL 0x4250
+#define mmDIG2_DIG_BE_EN_CNTL 0x4251
+#define mmDIG2_DIG_CLOCK_PATTERN 0x4203
+#define mmDIG2_DIG_DISPCLK_SWITCH_CNTL 0x4208
+#define mmDIG2_DIG_DISPCLK_SWITCH_STATUS 0x4209
+#define mmDIG2_DIG_FE_CNTL 0x4200
+#define mmDIG2_DIG_FIFO_STATUS 0x420A
+#define mmDIG2_DIG_LANE_ENABLE 0x428D
+#define mmDIG2_DIG_OUTPUT_CRC_CNTL 0x4201
+#define mmDIG2_DIG_OUTPUT_CRC_RESULT 0x4202
+#define mmDIG2_DIG_RANDOM_PATTERN_SEED 0x4205
+#define mmDIG2_DIG_TEST_PATTERN 0x4204
+#define mmDIG2_HDMI_ACR_32_0 0x4237
+#define mmDIG2_HDMI_ACR_32_1 0x4238
+#define mmDIG2_HDMI_ACR_44_0 0x4239
+#define mmDIG2_HDMI_ACR_44_1 0x423A
+#define mmDIG2_HDMI_ACR_48_0 0x423B
+#define mmDIG2_HDMI_ACR_48_1 0x423C
+#define mmDIG2_HDMI_ACR_PACKET_CONTROL 0x420F
+#define mmDIG2_HDMI_ACR_STATUS_0 0x423D
+#define mmDIG2_HDMI_ACR_STATUS_1 0x423E
+#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL 0x420E
+#define mmDIG2_HDMI_CONTROL 0x420C
+#define mmDIG2_HDMI_GC 0x4216
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0 0x4213
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1 0x4230
+#define mmDIG2_HDMI_INFOFRAME_CONTROL0 0x4211
+#define mmDIG2_HDMI_INFOFRAME_CONTROL1 0x4212
+#define mmDIG2_HDMI_STATUS 0x420D
+#define mmDIG2_HDMI_VBI_PACKET_CONTROL 0x4210
+#define mmDIG2_LVDS_DATA_CNTL 0x428C
+#define mmDIG2_TMDS_CNTL 0x427C
+#define mmDIG2_TMDS_CONTROL0_FEEDBACK 0x427E
+#define mmDIG2_TMDS_CONTROL_CHAR 0x427D
+#define mmDIG2_TMDS_CTL0_1_GEN_CNTL 0x4286
+#define mmDIG2_TMDS_CTL2_3_GEN_CNTL 0x4287
+#define mmDIG2_TMDS_CTL_BITS 0x4283
+#define mmDIG2_TMDS_DCBALANCER_CONTROL 0x4284
+#define mmDIG2_TMDS_DEBUG 0x4282
+#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL 0x427F
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1 0x4280
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3 0x4281
+#define mmDIG3_AFMT_60958_0 0x4541
+#define mmDIG3_AFMT_60958_1 0x4542
+#define mmDIG3_AFMT_60958_2 0x4548
+#define mmDIG3_AFMT_AUDIO_CRC_CONTROL 0x4543
+#define mmDIG3_AFMT_AUDIO_CRC_RESULT 0x4549
+#define mmDIG3_AFMT_AUDIO_DBG_DTO_CNTL 0x4552
+#define mmDIG3_AFMT_AUDIO_INFO0 0x453F
+#define mmDIG3_AFMT_AUDIO_INFO1 0x4540
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL 0x454B
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2 0x4517
+#define mmDIG3_AFMT_AUDIO_SRC_CONTROL 0x454F
+#define mmDIG3_AFMT_AVI_INFO0 0x4521
+#define mmDIG3_AFMT_AVI_INFO1 0x4522
+#define mmDIG3_AFMT_AVI_INFO2 0x4523
+#define mmDIG3_AFMT_AVI_INFO3 0x4524
+#define mmDIG3_AFMT_GENERIC_0 0x4528
+#define mmDIG3_AFMT_GENERIC_1 0x4529
+#define mmDIG3_AFMT_GENERIC_2 0x452A
+#define mmDIG3_AFMT_GENERIC_3 0x452B
+#define mmDIG3_AFMT_GENERIC_4 0x452C
+#define mmDIG3_AFMT_GENERIC_5 0x452D
+#define mmDIG3_AFMT_GENERIC_6 0x452E
+#define mmDIG3_AFMT_GENERIC_7 0x452F
+#define mmDIG3_AFMT_GENERIC_HDR 0x4527
+#define mmDIG3_AFMT_INFOFRAME_CONTROL0 0x454D
+#define mmDIG3_AFMT_INTERRUPT_STATUS 0x4514
+#define mmDIG3_AFMT_ISRC1_0 0x4518
+#define mmDIG3_AFMT_ISRC1_1 0x4519
+#define mmDIG3_AFMT_ISRC1_2 0x451A
+#define mmDIG3_AFMT_ISRC1_3 0x451B
+#define mmDIG3_AFMT_ISRC1_4 0x451C
+#define mmDIG3_AFMT_ISRC2_0 0x451D
+#define mmDIG3_AFMT_ISRC2_1 0x451E
+#define mmDIG3_AFMT_ISRC2_2 0x451F
+#define mmDIG3_AFMT_ISRC2_3 0x4520
+#define mmDIG3_AFMT_MPEG_INFO0 0x4525
+#define mmDIG3_AFMT_MPEG_INFO1 0x4526
+#define mmDIG3_AFMT_RAMP_CONTROL0 0x4544
+#define mmDIG3_AFMT_RAMP_CONTROL1 0x4545
+#define mmDIG3_AFMT_RAMP_CONTROL2 0x4546
+#define mmDIG3_AFMT_RAMP_CONTROL3 0x4547
+#define mmDIG3_AFMT_STATUS 0x454A
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL 0x454C
+#define mmDIG3_DIG_BE_CNTL 0x4550
+#define mmDIG3_DIG_BE_EN_CNTL 0x4551
+#define mmDIG3_DIG_CLOCK_PATTERN 0x4503
+#define mmDIG3_DIG_DISPCLK_SWITCH_CNTL 0x4508
+#define mmDIG3_DIG_DISPCLK_SWITCH_STATUS 0x4509
+#define mmDIG3_DIG_FE_CNTL 0x4500
+#define mmDIG3_DIG_FIFO_STATUS 0x450A
+#define mmDIG3_DIG_LANE_ENABLE 0x458D
+#define mmDIG3_DIG_OUTPUT_CRC_CNTL 0x4501
+#define mmDIG3_DIG_OUTPUT_CRC_RESULT 0x4502
+#define mmDIG3_DIG_RANDOM_PATTERN_SEED 0x4505
+#define mmDIG3_DIG_TEST_PATTERN 0x4504
+#define mmDIG3_HDMI_ACR_32_0 0x4537
+#define mmDIG3_HDMI_ACR_32_1 0x4538
+#define mmDIG3_HDMI_ACR_44_0 0x4539
+#define mmDIG3_HDMI_ACR_44_1 0x453A
+#define mmDIG3_HDMI_ACR_48_0 0x453B
+#define mmDIG3_HDMI_ACR_48_1 0x453C
+#define mmDIG3_HDMI_ACR_PACKET_CONTROL 0x450F
+#define mmDIG3_HDMI_ACR_STATUS_0 0x453D
+#define mmDIG3_HDMI_ACR_STATUS_1 0x453E
+#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL 0x450E
+#define mmDIG3_HDMI_CONTROL 0x450C
+#define mmDIG3_HDMI_GC 0x4516
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0 0x4513
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1 0x4530
+#define mmDIG3_HDMI_INFOFRAME_CONTROL0 0x4511
+#define mmDIG3_HDMI_INFOFRAME_CONTROL1 0x4512
+#define mmDIG3_HDMI_STATUS 0x450D
+#define mmDIG3_HDMI_VBI_PACKET_CONTROL 0x4510
+#define mmDIG3_LVDS_DATA_CNTL 0x458C
+#define mmDIG3_TMDS_CNTL 0x457C
+#define mmDIG3_TMDS_CONTROL0_FEEDBACK 0x457E
+#define mmDIG3_TMDS_CONTROL_CHAR 0x457D
+#define mmDIG3_TMDS_CTL0_1_GEN_CNTL 0x4586
+#define mmDIG3_TMDS_CTL2_3_GEN_CNTL 0x4587
+#define mmDIG3_TMDS_CTL_BITS 0x4583
+#define mmDIG3_TMDS_DCBALANCER_CONTROL 0x4584
+#define mmDIG3_TMDS_DEBUG 0x4582
+#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL 0x457F
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1 0x4580
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3 0x4581
+#define mmDIG4_AFMT_60958_0 0x4841
+#define mmDIG4_AFMT_60958_1 0x4842
+#define mmDIG4_AFMT_60958_2 0x4848
+#define mmDIG4_AFMT_AUDIO_CRC_CONTROL 0x4843
+#define mmDIG4_AFMT_AUDIO_CRC_RESULT 0x4849
+#define mmDIG4_AFMT_AUDIO_DBG_DTO_CNTL 0x4852
+#define mmDIG4_AFMT_AUDIO_INFO0 0x483F
+#define mmDIG4_AFMT_AUDIO_INFO1 0x4840
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL 0x484B
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2 0x4817
+#define mmDIG4_AFMT_AUDIO_SRC_CONTROL 0x484F
+#define mmDIG4_AFMT_AVI_INFO0 0x4821
+#define mmDIG4_AFMT_AVI_INFO1 0x4822
+#define mmDIG4_AFMT_AVI_INFO2 0x4823
+#define mmDIG4_AFMT_AVI_INFO3 0x4824
+#define mmDIG4_AFMT_GENERIC_0 0x4828
+#define mmDIG4_AFMT_GENERIC_1 0x4829
+#define mmDIG4_AFMT_GENERIC_2 0x482A
+#define mmDIG4_AFMT_GENERIC_3 0x482B
+#define mmDIG4_AFMT_GENERIC_4 0x482C
+#define mmDIG4_AFMT_GENERIC_5 0x482D
+#define mmDIG4_AFMT_GENERIC_6 0x482E
+#define mmDIG4_AFMT_GENERIC_7 0x482F
+#define mmDIG4_AFMT_GENERIC_HDR 0x4827
+#define mmDIG4_AFMT_INFOFRAME_CONTROL0 0x484D
+#define mmDIG4_AFMT_INTERRUPT_STATUS 0x4814
+#define mmDIG4_AFMT_ISRC1_0 0x4818
+#define mmDIG4_AFMT_ISRC1_1 0x4819
+#define mmDIG4_AFMT_ISRC1_2 0x481A
+#define mmDIG4_AFMT_ISRC1_3 0x481B
+#define mmDIG4_AFMT_ISRC1_4 0x481C
+#define mmDIG4_AFMT_ISRC2_0 0x481D
+#define mmDIG4_AFMT_ISRC2_1 0x481E
+#define mmDIG4_AFMT_ISRC2_2 0x481F
+#define mmDIG4_AFMT_ISRC2_3 0x4820
+#define mmDIG4_AFMT_MPEG_INFO0 0x4825
+#define mmDIG4_AFMT_MPEG_INFO1 0x4826
+#define mmDIG4_AFMT_RAMP_CONTROL0 0x4844
+#define mmDIG4_AFMT_RAMP_CONTROL1 0x4845
+#define mmDIG4_AFMT_RAMP_CONTROL2 0x4846
+#define mmDIG4_AFMT_RAMP_CONTROL3 0x4847
+#define mmDIG4_AFMT_STATUS 0x484A
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL 0x484C
+#define mmDIG4_DIG_BE_CNTL 0x4850
+#define mmDIG4_DIG_BE_EN_CNTL 0x4851
+#define mmDIG4_DIG_CLOCK_PATTERN 0x4803
+#define mmDIG4_DIG_DISPCLK_SWITCH_CNTL 0x4808
+#define mmDIG4_DIG_DISPCLK_SWITCH_STATUS 0x4809
+#define mmDIG4_DIG_FE_CNTL 0x4800
+#define mmDIG4_DIG_FIFO_STATUS 0x480A
+#define mmDIG4_DIG_LANE_ENABLE 0x488D
+#define mmDIG4_DIG_OUTPUT_CRC_CNTL 0x4801
+#define mmDIG4_DIG_OUTPUT_CRC_RESULT 0x4802
+#define mmDIG4_DIG_RANDOM_PATTERN_SEED 0x4805
+#define mmDIG4_DIG_TEST_PATTERN 0x4804
+#define mmDIG4_HDMI_ACR_32_0 0x4837
+#define mmDIG4_HDMI_ACR_32_1 0x4838
+#define mmDIG4_HDMI_ACR_44_0 0x4839
+#define mmDIG4_HDMI_ACR_44_1 0x483A
+#define mmDIG4_HDMI_ACR_48_0 0x483B
+#define mmDIG4_HDMI_ACR_48_1 0x483C
+#define mmDIG4_HDMI_ACR_PACKET_CONTROL 0x480F
+#define mmDIG4_HDMI_ACR_STATUS_0 0x483D
+#define mmDIG4_HDMI_ACR_STATUS_1 0x483E
+#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL 0x480E
+#define mmDIG4_HDMI_CONTROL 0x480C
+#define mmDIG4_HDMI_GC 0x4816
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0 0x4813
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1 0x4830
+#define mmDIG4_HDMI_INFOFRAME_CONTROL0 0x4811
+#define mmDIG4_HDMI_INFOFRAME_CONTROL1 0x4812
+#define mmDIG4_HDMI_STATUS 0x480D
+#define mmDIG4_HDMI_VBI_PACKET_CONTROL 0x4810
+#define mmDIG4_LVDS_DATA_CNTL 0x488C
+#define mmDIG4_TMDS_CNTL 0x487C
+#define mmDIG4_TMDS_CONTROL0_FEEDBACK 0x487E
+#define mmDIG4_TMDS_CONTROL_CHAR 0x487D
+#define mmDIG4_TMDS_CTL0_1_GEN_CNTL 0x4886
+#define mmDIG4_TMDS_CTL2_3_GEN_CNTL 0x4887
+#define mmDIG4_TMDS_CTL_BITS 0x4883
+#define mmDIG4_TMDS_DCBALANCER_CONTROL 0x4884
+#define mmDIG4_TMDS_DEBUG 0x4882
+#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL 0x487F
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1 0x4880
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3 0x4881
+#define mmDIG5_AFMT_60958_0 0x4B41
+#define mmDIG5_AFMT_60958_1 0x4B42
+#define mmDIG5_AFMT_60958_2 0x4B48
+#define mmDIG5_AFMT_AUDIO_CRC_CONTROL 0x4B43
+#define mmDIG5_AFMT_AUDIO_CRC_RESULT 0x4B49
+#define mmDIG5_AFMT_AUDIO_DBG_DTO_CNTL 0x4B52
+#define mmDIG5_AFMT_AUDIO_INFO0 0x4B3F
+#define mmDIG5_AFMT_AUDIO_INFO1 0x4B40
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL 0x4B4B
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL2 0x4B17
+#define mmDIG5_AFMT_AUDIO_SRC_CONTROL 0x4B4F
+#define mmDIG5_AFMT_AVI_INFO0 0x4B21
+#define mmDIG5_AFMT_AVI_INFO1 0x4B22
+#define mmDIG5_AFMT_AVI_INFO2 0x4B23
+#define mmDIG5_AFMT_AVI_INFO3 0x4B24
+#define mmDIG5_AFMT_GENERIC_0 0x4B28
+#define mmDIG5_AFMT_GENERIC_1 0x4B29
+#define mmDIG5_AFMT_GENERIC_2 0x4B2A
+#define mmDIG5_AFMT_GENERIC_3 0x4B2B
+#define mmDIG5_AFMT_GENERIC_4 0x4B2C
+#define mmDIG5_AFMT_GENERIC_5 0x4B2D
+#define mmDIG5_AFMT_GENERIC_6 0x4B2E
+#define mmDIG5_AFMT_GENERIC_7 0x4B2F
+#define mmDIG5_AFMT_GENERIC_HDR 0x4B27
+#define mmDIG5_AFMT_INFOFRAME_CONTROL0 0x4B4D
+#define mmDIG5_AFMT_INTERRUPT_STATUS 0x4B14
+#define mmDIG5_AFMT_ISRC1_0 0x4B18
+#define mmDIG5_AFMT_ISRC1_1 0x4B19
+#define mmDIG5_AFMT_ISRC1_2 0x4B1A
+#define mmDIG5_AFMT_ISRC1_3 0x4B1B
+#define mmDIG5_AFMT_ISRC1_4 0x4B1C
+#define mmDIG5_AFMT_ISRC2_0 0x4B1D
+#define mmDIG5_AFMT_ISRC2_1 0x4B1E
+#define mmDIG5_AFMT_ISRC2_2 0x4B1F
+#define mmDIG5_AFMT_ISRC2_3 0x4B20
+#define mmDIG5_AFMT_MPEG_INFO0 0x4B25
+#define mmDIG5_AFMT_MPEG_INFO1 0x4B26
+#define mmDIG5_AFMT_RAMP_CONTROL0 0x4B44
+#define mmDIG5_AFMT_RAMP_CONTROL1 0x4B45
+#define mmDIG5_AFMT_RAMP_CONTROL2 0x4B46
+#define mmDIG5_AFMT_RAMP_CONTROL3 0x4B47
+#define mmDIG5_AFMT_STATUS 0x4B4A
+#define mmDIG5_AFMT_VBI_PACKET_CONTROL 0x4B4C
+#define mmDIG5_DIG_BE_CNTL 0x4B50
+#define mmDIG5_DIG_BE_EN_CNTL 0x4B51
+#define mmDIG5_DIG_CLOCK_PATTERN 0x4B03
+#define mmDIG5_DIG_DISPCLK_SWITCH_CNTL 0x4B08
+#define mmDIG5_DIG_DISPCLK_SWITCH_STATUS 0x4B09
+#define mmDIG5_DIG_FE_CNTL 0x4B00
+#define mmDIG5_DIG_FIFO_STATUS 0x4B0A
+#define mmDIG5_DIG_LANE_ENABLE 0x4B8D
+#define mmDIG5_DIG_OUTPUT_CRC_CNTL 0x4B01
+#define mmDIG5_DIG_OUTPUT_CRC_RESULT 0x4B02
+#define mmDIG5_DIG_RANDOM_PATTERN_SEED 0x4B05
+#define mmDIG5_DIG_TEST_PATTERN 0x4B04
+#define mmDIG5_HDMI_ACR_32_0 0x4B37
+#define mmDIG5_HDMI_ACR_32_1 0x4B38
+#define mmDIG5_HDMI_ACR_44_0 0x4B39
+#define mmDIG5_HDMI_ACR_44_1 0x4B3A
+#define mmDIG5_HDMI_ACR_48_0 0x4B3B
+#define mmDIG5_HDMI_ACR_48_1 0x4B3C
+#define mmDIG5_HDMI_ACR_PACKET_CONTROL 0x4B0F
+#define mmDIG5_HDMI_ACR_STATUS_0 0x4B3D
+#define mmDIG5_HDMI_ACR_STATUS_1 0x4B3E
+#define mmDIG5_HDMI_AUDIO_PACKET_CONTROL 0x4B0E
+#define mmDIG5_HDMI_CONTROL 0x4B0C
+#define mmDIG5_HDMI_GC 0x4B16
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL0 0x4B13
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL1 0x4B30
+#define mmDIG5_HDMI_INFOFRAME_CONTROL0 0x4B11
+#define mmDIG5_HDMI_INFOFRAME_CONTROL1 0x4B12
+#define mmDIG5_HDMI_STATUS 0x4B0D
+#define mmDIG5_HDMI_VBI_PACKET_CONTROL 0x4B10
+#define mmDIG5_LVDS_DATA_CNTL 0x4B8C
+#define mmDIG5_TMDS_CNTL 0x4B7C
+#define mmDIG5_TMDS_CONTROL0_FEEDBACK 0x4B7E
+#define mmDIG5_TMDS_CONTROL_CHAR 0x4B7D
+#define mmDIG5_TMDS_CTL0_1_GEN_CNTL 0x4B86
+#define mmDIG5_TMDS_CTL2_3_GEN_CNTL 0x4B87
+#define mmDIG5_TMDS_CTL_BITS 0x4B83
+#define mmDIG5_TMDS_DCBALANCER_CONTROL 0x4B84
+#define mmDIG5_TMDS_DEBUG 0x4B82
+#define mmDIG5_TMDS_STEREOSYNC_CTL_SEL 0x4B7F
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_0_1 0x4B80
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_2_3 0x4B81
+#define mmDIG_BE_CNTL 0x1C50
+#define mmDIG_BE_EN_CNTL 0x1C51
+#define mmDIG_CLOCK_PATTERN 0x1C03
+#define mmDIG_DISPCLK_SWITCH_CNTL 0x1C08
+#define mmDIG_DISPCLK_SWITCH_STATUS 0x1C09
+#define mmDIG_FE_CNTL 0x1C00
+#define mmDIG_FIFO_STATUS 0x1C0A
+#define mmDIG_LANE_ENABLE 0x1C8D
+#define mmDIG_OUTPUT_CRC_CNTL 0x1C01
+#define mmDIG_OUTPUT_CRC_RESULT 0x1C02
+#define mmDIG_RANDOM_PATTERN_SEED 0x1C05
+#define mmDIG_SOFT_RESET 0x013D
+#define mmDIG_TEST_PATTERN 0x1C04
+#define mmDISPCLK_CGTT_BLK_CTRL_REG 0x0135
+#define mmDISPCLK_FREQ_CHANGE_CNTL 0x0131
+#define mmDISP_INTERRUPT_STATUS 0x183D
+#define mmDISP_INTERRUPT_STATUS_CONTINUE 0x183E
+#define mmDISP_INTERRUPT_STATUS_CONTINUE2 0x183F
+#define mmDISP_INTERRUPT_STATUS_CONTINUE3 0x1840
+#define mmDISP_INTERRUPT_STATUS_CONTINUE4 0x1853
+#define mmDISP_INTERRUPT_STATUS_CONTINUE5 0x1854
+#define mmDISPOUT_STEREOSYNC_SEL 0x18BF
+#define mmDISPPLL_BG_CNTL 0x013C
+#define mmDISP_TIMER_CONTROL 0x1842
+#define mmDMCU_CTRL 0x1600
+#define mmDMCU_ERAM_RD_CTRL 0x160B
+#define mmDMCU_ERAM_RD_DATA 0x160C
+#define mmDMCU_ERAM_WR_CTRL 0x1609
+#define mmDMCU_ERAM_WR_DATA 0x160A
+#define mmDMCU_EVENT_TRIGGER 0x1611
+#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS 0x161A
+#define mmDMCU_FW_CS_HI 0x1606
+#define mmDMCU_FW_CS_LO 0x1607
+#define mmDMCU_FW_END_ADDR 0x1604
+#define mmDMCU_FW_ISR_START_ADDR 0x1605
+#define mmDMCU_FW_START_ADDR 0x1603
+#define mmDMCU_INT_CNT 0x1619
+#define mmDMCU_INTERRUPT_STATUS 0x1614
+#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK 0x1615
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK 0x1616
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL 0x1617
+#define mmDMCU_IRAM_RD_CTRL 0x160F
+#define mmDMCU_IRAM_RD_DATA 0x1610
+#define mmDMCU_IRAM_WR_CTRL 0x160D
+#define mmDMCU_IRAM_WR_DATA 0x160E
+#define mmDMCU_PC_START_ADDR 0x1602
+#define mmDMCU_RAM_ACCESS_CTRL 0x1608
+#define mmDMCU_STATUS 0x1601
+#define mmDMCU_TEST_DEBUG_DATA 0x1627
+#define mmDMCU_TEST_DEBUG_INDEX 0x1626
+#define mmDMCU_UC_CLK_GATING_CNTL 0x161B
+#define mmDMCU_UC_INTERNAL_INT_STATUS 0x1612
+#define mmDMIF_ADDR_CALC 0x0300
+#define mmDMIF_ADDR_CONFIG 0x02F5
+#define mmDMIF_ARBITRATION_CONTROL 0x02F9
+#define mmDMIF_CONTROL 0x02F6
+#define mmDMIF_HW_DEBUG 0x02F8
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 0x1B30
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL2 0x1B31
+#define mmDMIF_PG0_DPG_PIPE_DPM_CONTROL 0x1B34
+#define mmDMIF_PG0_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1B36
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL 0x1B35
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1B37
+#define mmDMIF_PG0_DPG_PIPE_URGENCY_CONTROL 0x1B33
+#define mmDMIF_PG0_DPG_TEST_DEBUG_DATA 0x1B39
+#define mmDMIF_PG0_DPG_TEST_DEBUG_INDEX 0x1B38
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL1 0x1E30
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL2 0x1E31
+#define mmDMIF_PG1_DPG_PIPE_DPM_CONTROL 0x1E34
+#define mmDMIF_PG1_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1E36
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL 0x1E35
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1E37
+#define mmDMIF_PG1_DPG_PIPE_URGENCY_CONTROL 0x1E33
+#define mmDMIF_PG1_DPG_TEST_DEBUG_DATA 0x1E39
+#define mmDMIF_PG1_DPG_TEST_DEBUG_INDEX 0x1E38
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL1 0x4130
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL2 0x4131
+#define mmDMIF_PG2_DPG_PIPE_DPM_CONTROL 0x4134
+#define mmDMIF_PG2_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4136
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL 0x4135
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4137
+#define mmDMIF_PG2_DPG_PIPE_URGENCY_CONTROL 0x4133
+#define mmDMIF_PG2_DPG_TEST_DEBUG_DATA 0x4139
+#define mmDMIF_PG2_DPG_TEST_DEBUG_INDEX 0x4138
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL1 0x4430
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL2 0x4431
+#define mmDMIF_PG3_DPG_PIPE_DPM_CONTROL 0x4434
+#define mmDMIF_PG3_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4436
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL 0x4435
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4437
+#define mmDMIF_PG3_DPG_PIPE_URGENCY_CONTROL 0x4433
+#define mmDMIF_PG3_DPG_TEST_DEBUG_DATA 0x4439
+#define mmDMIF_PG3_DPG_TEST_DEBUG_INDEX 0x4438
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL1 0x4730
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL2 0x4731
+#define mmDMIF_PG4_DPG_PIPE_DPM_CONTROL 0x4734
+#define mmDMIF_PG4_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4736
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL 0x4735
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4737
+#define mmDMIF_PG4_DPG_PIPE_URGENCY_CONTROL 0x4733
+#define mmDMIF_PG4_DPG_TEST_DEBUG_DATA 0x4739
+#define mmDMIF_PG4_DPG_TEST_DEBUG_INDEX 0x4738
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL1 0x4A30
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL2 0x4A31
+#define mmDMIF_PG5_DPG_PIPE_DPM_CONTROL 0x4A34
+#define mmDMIF_PG5_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4A36
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL 0x4A35
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4A37
+#define mmDMIF_PG5_DPG_PIPE_URGENCY_CONTROL 0x4A33
+#define mmDMIF_PG5_DPG_TEST_DEBUG_DATA 0x4A39
+#define mmDMIF_PG5_DPG_TEST_DEBUG_INDEX 0x4A38
+#define mmDMIF_STATUS 0x02F7
+#define mmDMIF_STATUS2 0x0301
+#define mmDMIF_TEST_DEBUG_DATA 0x0313
+#define mmDMIF_TEST_DEBUG_INDEX 0x0312
+#define mmDOUT_DCE_VCE_CONTROL 0x18FF
+#define mmDOUT_POWER_MANAGEMENT_CNTL 0x1841
+#define mmDOUT_SCRATCH0 0x1844
+#define mmDOUT_SCRATCH1 0x1845
+#define mmDOUT_SCRATCH2 0x1846
+#define mmDOUT_SCRATCH3 0x1847
+#define mmDOUT_SCRATCH4 0x1848
+#define mmDOUT_SCRATCH5 0x1849
+#define mmDOUT_SCRATCH6 0x184A
+#define mmDOUT_SCRATCH7 0x184B
+#define mmDOUT_TEST_DEBUG_DATA 0x184E
+#define mmDOUT_TEST_DEBUG_INDEX 0x184D
+#define mmDP0_DP_CONFIG 0x1CC2
+#define mmDP0_DP_DPHY_8B10B_CNTL 0x1CD3
+#define mmDP0_DP_DPHY_CNTL 0x1CD0
+#define mmDP0_DP_DPHY_CRC_CNTL 0x1CD7
+#define mmDP0_DP_DPHY_CRC_EN 0x1CD6
+#define mmDP0_DP_DPHY_CRC_MST_CNTL 0x1CC6
+#define mmDP0_DP_DPHY_CRC_MST_STATUS 0x1CC7
+#define mmDP0_DP_DPHY_CRC_RESULT 0x1CD8
+#define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS 0x1CE9
+#define mmDP0_DP_DPHY_PRBS_CNTL 0x1CD4
+#define mmDP0_DP_DPHY_SYM0 0x1CD2
+#define mmDP0_DP_DPHY_SYM1 0x1CE0
+#define mmDP0_DP_DPHY_SYM2 0x1CDF
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x1CD1
+#define mmDP0_DP_HBR2_EYE_PATTERN 0x1CC8
+#define mmDP0_DP_LINK_CNTL 0x1CC0
+#define mmDP0_DP_LINK_FRAMING_CNTL 0x1CCC
+#define mmDP0_DP_MSA_COLORIMETRY 0x1CDA
+#define mmDP0_DP_MSA_MISC 0x1CC5
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE1 0x1CEA
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE2 0x1CEB
+#define mmDP0_DP_MSE_LINK_TIMING 0x1CE8
+#define mmDP0_DP_MSE_MISC_CNTL 0x1CDB
+#define mmDP0_DP_MSE_RATE_CNTL 0x1CE1
+#define mmDP0_DP_MSE_RATE_UPDATE 0x1CE3
+#define mmDP0_DP_MSE_SAT0 0x1CE4
+#define mmDP0_DP_MSE_SAT1 0x1CE5
+#define mmDP0_DP_MSE_SAT2 0x1CE6
+#define mmDP0_DP_MSE_SAT_UPDATE 0x1CE7
+#define mmDP0_DP_PIXEL_FORMAT 0x1CC1
+#define mmDP0_DP_SEC_AUD_M 0x1CA7
+#define mmDP0_DP_SEC_AUD_M_READBACK 0x1CA8
+#define mmDP0_DP_SEC_AUD_N 0x1CA5
+#define mmDP0_DP_SEC_AUD_N_READBACK 0x1CA6
+#define mmDP0_DP_SEC_CNTL 0x1CA0
+#define mmDP0_DP_SEC_CNTL1 0x1CAB
+#define mmDP0_DP_SEC_FRAMING1 0x1CA1
+#define mmDP0_DP_SEC_FRAMING2 0x1CA2
+#define mmDP0_DP_SEC_FRAMING3 0x1CA3
+#define mmDP0_DP_SEC_FRAMING4 0x1CA4
+#define mmDP0_DP_SEC_PACKET_CNTL 0x1CAA
+#define mmDP0_DP_SEC_TIMESTAMP 0x1CA9
+#define mmDP0_DP_STEER_FIFO 0x1CC4
+#define mmDP0_DP_TEST_DEBUG_DATA 0x1CFD
+#define mmDP0_DP_TEST_DEBUG_INDEX 0x1CFC
+#define mmDP0_DP_VID_INTERRUPT_CNTL 0x1CCF
+#define mmDP0_DP_VID_M 0x1CCB
+#define mmDP0_DP_VID_MSA_VBID 0x1CCD
+#define mmDP0_DP_VID_N 0x1CCA
+#define mmDP0_DP_VID_STREAM_CNTL 0x1CC3
+#define mmDP0_DP_VID_TIMING 0x1CC9
+#define mmDP1_DP_CONFIG 0x1FC2
+#define mmDP1_DP_DPHY_8B10B_CNTL 0x1FD3
+#define mmDP1_DP_DPHY_CNTL 0x1FD0
+#define mmDP1_DP_DPHY_CRC_CNTL 0x1FD7
+#define mmDP1_DP_DPHY_CRC_EN 0x1FD6
+#define mmDP1_DP_DPHY_CRC_MST_CNTL 0x1FC6
+#define mmDP1_DP_DPHY_CRC_MST_STATUS 0x1FC7
+#define mmDP1_DP_DPHY_CRC_RESULT 0x1FD8
+#define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS 0x1FE9
+#define mmDP1_DP_DPHY_PRBS_CNTL 0x1FD4
+#define mmDP1_DP_DPHY_SYM0 0x1FD2
+#define mmDP1_DP_DPHY_SYM1 0x1FE0
+#define mmDP1_DP_DPHY_SYM2 0x1FDF
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x1FD1
+#define mmDP1_DP_HBR2_EYE_PATTERN 0x1FC8
+#define mmDP1_DP_LINK_CNTL 0x1FC0
+#define mmDP1_DP_LINK_FRAMING_CNTL 0x1FCC
+#define mmDP1_DP_MSA_COLORIMETRY 0x1FDA
+#define mmDP1_DP_MSA_MISC 0x1FC5
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE1 0x1FEA
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE2 0x1FEB
+#define mmDP1_DP_MSE_LINK_TIMING 0x1FE8
+#define mmDP1_DP_MSE_MISC_CNTL 0x1FDB
+#define mmDP1_DP_MSE_RATE_CNTL 0x1FE1
+#define mmDP1_DP_MSE_RATE_UPDATE 0x1FE3
+#define mmDP1_DP_MSE_SAT0 0x1FE4
+#define mmDP1_DP_MSE_SAT1 0x1FE5
+#define mmDP1_DP_MSE_SAT2 0x1FE6
+#define mmDP1_DP_MSE_SAT_UPDATE 0x1FE7
+#define mmDP1_DP_PIXEL_FORMAT 0x1FC1
+#define mmDP1_DP_SEC_AUD_M 0x1FA7
+#define mmDP1_DP_SEC_AUD_M_READBACK 0x1FA8
+#define mmDP1_DP_SEC_AUD_N 0x1FA5
+#define mmDP1_DP_SEC_AUD_N_READBACK 0x1FA6
+#define mmDP1_DP_SEC_CNTL 0x1FA0
+#define mmDP1_DP_SEC_CNTL1 0x1FAB
+#define mmDP1_DP_SEC_FRAMING1 0x1FA1
+#define mmDP1_DP_SEC_FRAMING2 0x1FA2
+#define mmDP1_DP_SEC_FRAMING3 0x1FA3
+#define mmDP1_DP_SEC_FRAMING4 0x1FA4
+#define mmDP1_DP_SEC_PACKET_CNTL 0x1FAA
+#define mmDP1_DP_SEC_TIMESTAMP 0x1FA9
+#define mmDP1_DP_STEER_FIFO 0x1FC4
+#define mmDP1_DP_TEST_DEBUG_DATA 0x1FFD
+#define mmDP1_DP_TEST_DEBUG_INDEX 0x1FFC
+#define mmDP1_DP_VID_INTERRUPT_CNTL 0x1FCF
+#define mmDP1_DP_VID_M 0x1FCB
+#define mmDP1_DP_VID_MSA_VBID 0x1FCD
+#define mmDP1_DP_VID_N 0x1FCA
+#define mmDP1_DP_VID_STREAM_CNTL 0x1FC3
+#define mmDP1_DP_VID_TIMING 0x1FC9
+#define mmDP2_DP_CONFIG 0x42C2
+#define mmDP2_DP_DPHY_8B10B_CNTL 0x42D3
+#define mmDP2_DP_DPHY_CNTL 0x42D0
+#define mmDP2_DP_DPHY_CRC_CNTL 0x42D7
+#define mmDP2_DP_DPHY_CRC_EN 0x42D6
+#define mmDP2_DP_DPHY_CRC_MST_CNTL 0x42C6
+#define mmDP2_DP_DPHY_CRC_MST_STATUS 0x42C7
+#define mmDP2_DP_DPHY_CRC_RESULT 0x42D8
+#define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE
+#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS 0x42E9
+#define mmDP2_DP_DPHY_PRBS_CNTL 0x42D4
+#define mmDP2_DP_DPHY_SYM0 0x42D2
+#define mmDP2_DP_DPHY_SYM1 0x42E0
+#define mmDP2_DP_DPHY_SYM2 0x42DF
+#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL 0x42D1
+#define mmDP2_DP_HBR2_EYE_PATTERN 0x42C8
+#define mmDP2_DP_LINK_CNTL 0x42C0
+#define mmDP2_DP_LINK_FRAMING_CNTL 0x42CC
+#define mmDP2_DP_MSA_COLORIMETRY 0x42DA
+#define mmDP2_DP_MSA_MISC 0x42C5
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE1 0x42EA
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE2 0x42EB
+#define mmDP2_DP_MSE_LINK_TIMING 0x42E8
+#define mmDP2_DP_MSE_MISC_CNTL 0x42DB
+#define mmDP2_DP_MSE_RATE_CNTL 0x42E1
+#define mmDP2_DP_MSE_RATE_UPDATE 0x42E3
+#define mmDP2_DP_MSE_SAT0 0x42E4
+#define mmDP2_DP_MSE_SAT1 0x42E5
+#define mmDP2_DP_MSE_SAT2 0x42E6
+#define mmDP2_DP_MSE_SAT_UPDATE 0x42E7
+#define mmDP2_DP_PIXEL_FORMAT 0x42C1
+#define mmDP2_DP_SEC_AUD_M 0x42A7
+#define mmDP2_DP_SEC_AUD_M_READBACK 0x42A8
+#define mmDP2_DP_SEC_AUD_N 0x42A5
+#define mmDP2_DP_SEC_AUD_N_READBACK 0x42A6
+#define mmDP2_DP_SEC_CNTL 0x42A0
+#define mmDP2_DP_SEC_CNTL1 0x42AB
+#define mmDP2_DP_SEC_FRAMING1 0x42A1
+#define mmDP2_DP_SEC_FRAMING2 0x42A2
+#define mmDP2_DP_SEC_FRAMING3 0x42A3
+#define mmDP2_DP_SEC_FRAMING4 0x42A4
+#define mmDP2_DP_SEC_PACKET_CNTL 0x42AA
+#define mmDP2_DP_SEC_TIMESTAMP 0x42A9
+#define mmDP2_DP_STEER_FIFO 0x42C4
+#define mmDP2_DP_TEST_DEBUG_DATA 0x42FD
+#define mmDP2_DP_TEST_DEBUG_INDEX 0x42FC
+#define mmDP2_DP_VID_INTERRUPT_CNTL 0x42CF
+#define mmDP2_DP_VID_M 0x42CB
+#define mmDP2_DP_VID_MSA_VBID 0x42CD
+#define mmDP2_DP_VID_N 0x42CA
+#define mmDP2_DP_VID_STREAM_CNTL 0x42C3
+#define mmDP2_DP_VID_TIMING 0x42C9
+#define mmDP3_DP_CONFIG 0x45C2
+#define mmDP3_DP_DPHY_8B10B_CNTL 0x45D3
+#define mmDP3_DP_DPHY_CNTL 0x45D0
+#define mmDP3_DP_DPHY_CRC_CNTL 0x45D7
+#define mmDP3_DP_DPHY_CRC_EN 0x45D6
+#define mmDP3_DP_DPHY_CRC_MST_CNTL 0x45C6
+#define mmDP3_DP_DPHY_CRC_MST_STATUS 0x45C7
+#define mmDP3_DP_DPHY_CRC_RESULT 0x45D8
+#define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE
+#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS 0x45E9
+#define mmDP3_DP_DPHY_PRBS_CNTL 0x45D4
+#define mmDP3_DP_DPHY_SYM0 0x45D2
+#define mmDP3_DP_DPHY_SYM1 0x45E0
+#define mmDP3_DP_DPHY_SYM2 0x45DF
+#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL 0x45D1
+#define mmDP3_DP_HBR2_EYE_PATTERN 0x45C8
+#define mmDP3_DP_LINK_CNTL 0x45C0
+#define mmDP3_DP_LINK_FRAMING_CNTL 0x45CC
+#define mmDP3_DP_MSA_COLORIMETRY 0x45DA
+#define mmDP3_DP_MSA_MISC 0x45C5
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE1 0x45EA
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE2 0x45EB
+#define mmDP3_DP_MSE_LINK_TIMING 0x45E8
+#define mmDP3_DP_MSE_MISC_CNTL 0x45DB
+#define mmDP3_DP_MSE_RATE_CNTL 0x45E1
+#define mmDP3_DP_MSE_RATE_UPDATE 0x45E3
+#define mmDP3_DP_MSE_SAT0 0x45E4
+#define mmDP3_DP_MSE_SAT1 0x45E5
+#define mmDP3_DP_MSE_SAT2 0x45E6
+#define mmDP3_DP_MSE_SAT_UPDATE 0x45E7
+#define mmDP3_DP_PIXEL_FORMAT 0x45C1
+#define mmDP3_DP_SEC_AUD_M 0x45A7
+#define mmDP3_DP_SEC_AUD_M_READBACK 0x45A8
+#define mmDP3_DP_SEC_AUD_N 0x45A5
+#define mmDP3_DP_SEC_AUD_N_READBACK 0x45A6
+#define mmDP3_DP_SEC_CNTL 0x45A0
+#define mmDP3_DP_SEC_CNTL1 0x45AB
+#define mmDP3_DP_SEC_FRAMING1 0x45A1
+#define mmDP3_DP_SEC_FRAMING2 0x45A2
+#define mmDP3_DP_SEC_FRAMING3 0x45A3
+#define mmDP3_DP_SEC_FRAMING4 0x45A4
+#define mmDP3_DP_SEC_PACKET_CNTL 0x45AA
+#define mmDP3_DP_SEC_TIMESTAMP 0x45A9
+#define mmDP3_DP_STEER_FIFO 0x45C4
+#define mmDP3_DP_TEST_DEBUG_DATA 0x45FD
+#define mmDP3_DP_TEST_DEBUG_INDEX 0x45FC
+#define mmDP3_DP_VID_INTERRUPT_CNTL 0x45CF
+#define mmDP3_DP_VID_M 0x45CB
+#define mmDP3_DP_VID_MSA_VBID 0x45CD
+#define mmDP3_DP_VID_N 0x45CA
+#define mmDP3_DP_VID_STREAM_CNTL 0x45C3
+#define mmDP3_DP_VID_TIMING 0x45C9
+#define mmDP4_DP_CONFIG 0x48C2
+#define mmDP4_DP_DPHY_8B10B_CNTL 0x48D3
+#define mmDP4_DP_DPHY_CNTL 0x48D0
+#define mmDP4_DP_DPHY_CRC_CNTL 0x48D7
+#define mmDP4_DP_DPHY_CRC_EN 0x48D6
+#define mmDP4_DP_DPHY_CRC_MST_CNTL 0x48C6
+#define mmDP4_DP_DPHY_CRC_MST_STATUS 0x48C7
+#define mmDP4_DP_DPHY_CRC_RESULT 0x48D8
+#define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE
+#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS 0x48E9
+#define mmDP4_DP_DPHY_PRBS_CNTL 0x48D4
+#define mmDP4_DP_DPHY_SYM0 0x48D2
+#define mmDP4_DP_DPHY_SYM1 0x48E0
+#define mmDP4_DP_DPHY_SYM2 0x48DF
+#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL 0x48D1
+#define mmDP4_DP_HBR2_EYE_PATTERN 0x48C8
+#define mmDP4_DP_LINK_CNTL 0x48C0
+#define mmDP4_DP_LINK_FRAMING_CNTL 0x48CC
+#define mmDP4_DP_MSA_COLORIMETRY 0x48DA
+#define mmDP4_DP_MSA_MISC 0x48C5
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE1 0x48EA
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE2 0x48EB
+#define mmDP4_DP_MSE_LINK_TIMING 0x48E8
+#define mmDP4_DP_MSE_MISC_CNTL 0x48DB
+#define mmDP4_DP_MSE_RATE_CNTL 0x48E1
+#define mmDP4_DP_MSE_RATE_UPDATE 0x48E3
+#define mmDP4_DP_MSE_SAT0 0x48E4
+#define mmDP4_DP_MSE_SAT1 0x48E5
+#define mmDP4_DP_MSE_SAT2 0x48E6
+#define mmDP4_DP_MSE_SAT_UPDATE 0x48E7
+#define mmDP4_DP_PIXEL_FORMAT 0x48C1
+#define mmDP4_DP_SEC_AUD_M 0x48A7
+#define mmDP4_DP_SEC_AUD_M_READBACK 0x48A8
+#define mmDP4_DP_SEC_AUD_N 0x48A5
+#define mmDP4_DP_SEC_AUD_N_READBACK 0x48A6
+#define mmDP4_DP_SEC_CNTL 0x48A0
+#define mmDP4_DP_SEC_CNTL1 0x48AB
+#define mmDP4_DP_SEC_FRAMING1 0x48A1
+#define mmDP4_DP_SEC_FRAMING2 0x48A2
+#define mmDP4_DP_SEC_FRAMING3 0x48A3
+#define mmDP4_DP_SEC_FRAMING4 0x48A4
+#define mmDP4_DP_SEC_PACKET_CNTL 0x48AA
+#define mmDP4_DP_SEC_TIMESTAMP 0x48A9
+#define mmDP4_DP_STEER_FIFO 0x48C4
+#define mmDP4_DP_TEST_DEBUG_DATA 0x48FD
+#define mmDP4_DP_TEST_DEBUG_INDEX 0x48FC
+#define mmDP4_DP_VID_INTERRUPT_CNTL 0x48CF
+#define mmDP4_DP_VID_M 0x48CB
+#define mmDP4_DP_VID_MSA_VBID 0x48CD
+#define mmDP4_DP_VID_N 0x48CA
+#define mmDP4_DP_VID_STREAM_CNTL 0x48C3
+#define mmDP4_DP_VID_TIMING 0x48C9
+#define mmDP5_DP_CONFIG 0x4BC2
+#define mmDP5_DP_DPHY_8B10B_CNTL 0x4BD3
+#define mmDP5_DP_DPHY_CNTL 0x4BD0
+#define mmDP5_DP_DPHY_CRC_CNTL 0x4BD7
+#define mmDP5_DP_DPHY_CRC_EN 0x4BD6
+#define mmDP5_DP_DPHY_CRC_MST_CNTL 0x4BC6
+#define mmDP5_DP_DPHY_CRC_MST_STATUS 0x4BC7
+#define mmDP5_DP_DPHY_CRC_RESULT 0x4BD8
+#define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE
+#define mmDP5_DP_DPHY_FAST_TRAINING_STATUS 0x4BE9
+#define mmDP5_DP_DPHY_PRBS_CNTL 0x4BD4
+#define mmDP5_DP_DPHY_SYM0 0x4BD2
+#define mmDP5_DP_DPHY_SYM1 0x4BE0
+#define mmDP5_DP_DPHY_SYM2 0x4BDF
+#define mmDP5_DP_DPHY_TRAINING_PATTERN_SEL 0x4BD1
+#define mmDP5_DP_HBR2_EYE_PATTERN 0x4BC8
+#define mmDP5_DP_LINK_CNTL 0x4BC0
+#define mmDP5_DP_LINK_FRAMING_CNTL 0x4BCC
+#define mmDP5_DP_MSA_COLORIMETRY 0x4BDA
+#define mmDP5_DP_MSA_MISC 0x4BC5
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE1 0x4BEA
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE2 0x4BEB
+#define mmDP5_DP_MSE_LINK_TIMING 0x4BE8
+#define mmDP5_DP_MSE_MISC_CNTL 0x4BDB
+#define mmDP5_DP_MSE_RATE_CNTL 0x4BE1
+#define mmDP5_DP_MSE_RATE_UPDATE 0x4BE3
+#define mmDP5_DP_MSE_SAT0 0x4BE4
+#define mmDP5_DP_MSE_SAT1 0x4BE5
+#define mmDP5_DP_MSE_SAT2 0x4BE6
+#define mmDP5_DP_MSE_SAT_UPDATE 0x4BE7
+#define mmDP5_DP_PIXEL_FORMAT 0x4BC1
+#define mmDP5_DP_SEC_AUD_M 0x4BA7
+#define mmDP5_DP_SEC_AUD_M_READBACK 0x4BA8
+#define mmDP5_DP_SEC_AUD_N 0x4BA5
+#define mmDP5_DP_SEC_AUD_N_READBACK 0x4BA6
+#define mmDP5_DP_SEC_CNTL 0x4BA0
+#define mmDP5_DP_SEC_CNTL1 0x4BAB
+#define mmDP5_DP_SEC_FRAMING1 0x4BA1
+#define mmDP5_DP_SEC_FRAMING2 0x4BA2
+#define mmDP5_DP_SEC_FRAMING3 0x4BA3
+#define mmDP5_DP_SEC_FRAMING4 0x4BA4
+#define mmDP5_DP_SEC_PACKET_CNTL 0x4BAA
+#define mmDP5_DP_SEC_TIMESTAMP 0x4BA9
+#define mmDP5_DP_STEER_FIFO 0x4BC4
+#define mmDP5_DP_TEST_DEBUG_DATA 0x4BFD
+#define mmDP5_DP_TEST_DEBUG_INDEX 0x4BFC
+#define mmDP5_DP_VID_INTERRUPT_CNTL 0x4BCF
+#define mmDP5_DP_VID_M 0x4BCB
+#define mmDP5_DP_VID_MSA_VBID 0x4BCD
+#define mmDP5_DP_VID_N 0x4BCA
+#define mmDP5_DP_VID_STREAM_CNTL 0x4BC3
+#define mmDP5_DP_VID_TIMING 0x4BC9
+#define mmDP_AUX0_AUX_ARB_CONTROL 0x1882
+#define mmDP_AUX0_AUX_CONTROL 0x1880
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0 0x188A
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1 0x188B
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS 0x188D
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL 0x1889
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1888
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS 0x188C
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROL 0x188E
+#define mmDP_AUX0_AUX_GTC_SYNC_DATA 0x1890
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL 0x1883
+#define mmDP_AUX0_AUX_LS_DATA 0x1887
+#define mmDP_AUX0_AUX_LS_STATUS 0x1885
+#define mmDP_AUX0_AUX_SW_CONTROL 0x1881
+#define mmDP_AUX0_AUX_SW_DATA 0x1886
+#define mmDP_AUX0_AUX_SW_STATUS 0x1884
+#define mmDP_AUX1_AUX_ARB_CONTROL 0x1896
+#define mmDP_AUX1_AUX_CONTROL 0x1894
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0 0x189E
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1 0x189F
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS 0x18A1
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL 0x189D
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x189C
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS 0x18A0
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROL 0x18A2
+#define mmDP_AUX1_AUX_GTC_SYNC_DATA 0x18A4
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL 0x1897
+#define mmDP_AUX1_AUX_LS_DATA 0x189B
+#define mmDP_AUX1_AUX_LS_STATUS 0x1899
+#define mmDP_AUX1_AUX_SW_CONTROL 0x1895
+#define mmDP_AUX1_AUX_SW_DATA 0x189A
+#define mmDP_AUX1_AUX_SW_STATUS 0x1898
+#define mmDP_AUX2_AUX_ARB_CONTROL 0x18AA
+#define mmDP_AUX2_AUX_CONTROL 0x18A8
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0 0x18B2
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1 0x18B3
+#define mmDP_AUX2_AUX_DPHY_RX_STATUS 0x18B5
+#define mmDP_AUX2_AUX_DPHY_TX_CONTROL 0x18B1
+#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL 0x18B0
+#define mmDP_AUX2_AUX_DPHY_TX_STATUS 0x18B4
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROL 0x18B6
+#define mmDP_AUX2_AUX_GTC_SYNC_DATA 0x18B8
+#define mmDP_AUX2_AUX_INTERRUPT_CONTROL 0x18AB
+#define mmDP_AUX2_AUX_LS_DATA 0x18AF
+#define mmDP_AUX2_AUX_LS_STATUS 0x18AD
+#define mmDP_AUX2_AUX_SW_CONTROL 0x18A9
+#define mmDP_AUX2_AUX_SW_DATA 0x18AE
+#define mmDP_AUX2_AUX_SW_STATUS 0x18AC
+#define mmDP_AUX3_AUX_ARB_CONTROL 0x18C2
+#define mmDP_AUX3_AUX_CONTROL 0x18C0
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0 0x18CA
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1 0x18CB
+#define mmDP_AUX3_AUX_DPHY_RX_STATUS 0x18CD
+#define mmDP_AUX3_AUX_DPHY_TX_CONTROL 0x18C9
+#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL 0x18C8
+#define mmDP_AUX3_AUX_DPHY_TX_STATUS 0x18CC
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROL 0x18CE
+#define mmDP_AUX3_AUX_GTC_SYNC_DATA 0x18D0
+#define mmDP_AUX3_AUX_INTERRUPT_CONTROL 0x18C3
+#define mmDP_AUX3_AUX_LS_DATA 0x18C7
+#define mmDP_AUX3_AUX_LS_STATUS 0x18C5
+#define mmDP_AUX3_AUX_SW_CONTROL 0x18C1
+#define mmDP_AUX3_AUX_SW_DATA 0x18C6
+#define mmDP_AUX3_AUX_SW_STATUS 0x18C4
+#define mmDP_AUX4_AUX_ARB_CONTROL 0x18D6
+#define mmDP_AUX4_AUX_CONTROL 0x18D4
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0 0x18DE
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1 0x18DF
+#define mmDP_AUX4_AUX_DPHY_RX_STATUS 0x18E1
+#define mmDP_AUX4_AUX_DPHY_TX_CONTROL 0x18DD
+#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL 0x18DC
+#define mmDP_AUX4_AUX_DPHY_TX_STATUS 0x18E0
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROL 0x18E2
+#define mmDP_AUX4_AUX_GTC_SYNC_DATA 0x18E4
+#define mmDP_AUX4_AUX_INTERRUPT_CONTROL 0x18D7
+#define mmDP_AUX4_AUX_LS_DATA 0x18DB
+#define mmDP_AUX4_AUX_LS_STATUS 0x18D9
+#define mmDP_AUX4_AUX_SW_CONTROL 0x18D5
+#define mmDP_AUX4_AUX_SW_DATA 0x18DA
+#define mmDP_AUX4_AUX_SW_STATUS 0x18D8
+#define mmDP_AUX5_AUX_ARB_CONTROL 0x18EA
+#define mmDP_AUX5_AUX_CONTROL 0x18E8
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL0 0x18F2
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL1 0x18F3
+#define mmDP_AUX5_AUX_DPHY_RX_STATUS 0x18F5
+#define mmDP_AUX5_AUX_DPHY_TX_CONTROL 0x18F1
+#define mmDP_AUX5_AUX_DPHY_TX_REF_CONTROL 0x18F0
+#define mmDP_AUX5_AUX_DPHY_TX_STATUS 0x18F4
+#define mmDP_AUX5_AUX_GTC_SYNC_CONTROL 0x18F6
+#define mmDP_AUX5_AUX_GTC_SYNC_DATA 0x18F8
+#define mmDP_AUX5_AUX_INTERRUPT_CONTROL 0x18EB
+#define mmDP_AUX5_AUX_LS_DATA 0x18EF
+#define mmDP_AUX5_AUX_LS_STATUS 0x18ED
+#define mmDP_AUX5_AUX_SW_CONTROL 0x18E9
+#define mmDP_AUX5_AUX_SW_DATA 0x18EE
+#define mmDP_AUX5_AUX_SW_STATUS 0x18EC
+#define mmDP_CONFIG 0x1CC2
+#define mmDP_DPHY_8B10B_CNTL 0x1CD3
+#define mmDP_DPHY_CNTL 0x1CD0
+#define mmDP_DPHY_CRC_CNTL 0x1CD7
+#define mmDP_DPHY_CRC_EN 0x1CD6
+#define mmDP_DPHY_CRC_MST_CNTL 0x1CC6
+#define mmDP_DPHY_CRC_MST_STATUS 0x1CC7
+#define mmDP_DPHY_CRC_RESULT 0x1CD8
+#define mmDP_DPHY_FAST_TRAINING 0x1CCE
+#define mmDP_DPHY_FAST_TRAINING_STATUS 0x1CE9
+#define mmDP_DPHY_PRBS_CNTL 0x1CD4
+#define mmDP_DPHY_SYM0 0x1CD2
+#define mmDP_DPHY_SYM1 0x1CE0
+#define mmDP_DPHY_SYM2 0x1CDF
+#define mmDP_DPHY_TRAINING_PATTERN_SEL 0x1CD1
+#define mmDP_DTO0_MODULO 0x0142
+#define mmDP_DTO0_PHASE 0x0141
+#define mmDP_DTO1_MODULO 0x0146
+#define mmDP_DTO1_PHASE 0x0145
+#define mmDP_DTO2_MODULO 0x014A
+#define mmDP_DTO2_PHASE 0x0149
+#define mmDP_DTO3_MODULO 0x014E
+#define mmDP_DTO3_PHASE 0x014D
+#define mmDP_DTO4_MODULO 0x0152
+#define mmDP_DTO4_PHASE 0x0151
+#define mmDP_DTO5_MODULO 0x0156
+#define mmDP_DTO5_PHASE 0x0155
+#define mmDPG_PIPE_ARBITRATION_CONTROL1 0x1B30
+#define mmDPG_PIPE_ARBITRATION_CONTROL2 0x1B31
+#define mmDPG_PIPE_DPM_CONTROL 0x1B34
+#define mmDPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1B36
+#define mmDPG_PIPE_STUTTER_CONTROL 0x1B35
+#define mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1B37
+#define mmDPG_PIPE_URGENCY_CONTROL 0x1B33
+#define mmDPG_TEST_DEBUG_DATA 0x1B39
+#define mmDPG_TEST_DEBUG_INDEX 0x1B38
+#define mmDP_HBR2_EYE_PATTERN 0x1CC8
+#define mmDP_LINK_CNTL 0x1CC0
+#define mmDP_LINK_FRAMING_CNTL 0x1CCC
+#define mmDP_MSA_COLORIMETRY 0x1CDA
+#define mmDP_MSA_MISC 0x1CC5
+#define mmDP_MSA_V_TIMING_OVERRIDE1 0x1CEA
+#define mmDP_MSA_V_TIMING_OVERRIDE2 0x1CEB
+#define mmDP_MSE_LINK_TIMING 0x1CE8
+#define mmDP_MSE_MISC_CNTL 0x1CDB
+#define mmDP_MSE_RATE_CNTL 0x1CE1
+#define mmDP_MSE_RATE_UPDATE 0x1CE3
+#define mmDP_MSE_SAT0 0x1CE4
+#define mmDP_MSE_SAT1 0x1CE5
+#define mmDP_MSE_SAT2 0x1CE6
+#define mmDP_MSE_SAT_UPDATE 0x1CE7
+#define mmDP_PIXEL_FORMAT 0x1CC1
+#define mmDP_SEC_AUD_M 0x1CA7
+#define mmDP_SEC_AUD_M_READBACK 0x1CA8
+#define mmDP_SEC_AUD_N 0x1CA5
+#define mmDP_SEC_AUD_N_READBACK 0x1CA6
+#define mmDP_SEC_CNTL 0x1CA0
+#define mmDP_SEC_CNTL1 0x1CAB
+#define mmDP_SEC_FRAMING1 0x1CA1
+#define mmDP_SEC_FRAMING2 0x1CA2
+#define mmDP_SEC_FRAMING3 0x1CA3
+#define mmDP_SEC_FRAMING4 0x1CA4
+#define mmDP_SEC_PACKET_CNTL 0x1CAA
+#define mmDP_SEC_TIMESTAMP 0x1CA9
+#define mmDP_STEER_FIFO 0x1CC4
+#define mmDP_TEST_DEBUG_DATA 0x1CFD
+#define mmDP_TEST_DEBUG_INDEX 0x1CFC
+#define mmDP_VID_INTERRUPT_CNTL 0x1CCF
+#define mmDP_VID_M 0x1CCB
+#define mmDP_VID_MSA_VBID 0x1CCD
+#define mmDP_VID_N 0x1CCA
+#define mmDP_VID_STREAM_CNTL 0x1CC3
+#define mmDP_VID_TIMING 0x1CC9
+#define mmDVOACLKC_CNTL 0x016A
+#define mmDVOACLKC_MVP_CNTL 0x0169
+#define mmDVOACLKD_CNTL 0x0168
+#define mmDVO_CLK_ENABLE 0x0129
+#define mmDVO_CONTROL 0x185B
+#define mmDVO_CRC2_SIG_MASK 0x185D
+#define mmDVO_CRC2_SIG_RESULT 0x185E
+#define mmDVO_CRC_EN 0x185C
+#define mmDVO_ENABLE 0x1858
+#define mmDVO_FIFO_ERROR_STATUS 0x185F
+#define mmDVO_OUTPUT 0x185A
+#define mmDVO_SKEW_ADJUST 0x197D
+#define mmDVO_SOURCE_SELECT 0x1859
+#define mmDVO_STRENGTH_CONTROL 0x197B
+#define mmDVO_VREF_CONTROL 0x197C
+#define mmEXT_OVERSCAN_LEFT_RIGHT 0x1B5E
+#define mmEXT_OVERSCAN_TOP_BOTTOM 0x1B5F
+#define mmFBC_CLIENT_REGION_MASK 0x16EB
+#define mmFBC_CNTL 0x16D0
+#define mmFBC_COMP_CNTL 0x16D4
+#define mmFBC_COMP_MODE 0x16D5
+#define mmFBC_CSM_REGION_OFFSET_01 0x16E9
+#define mmFBC_CSM_REGION_OFFSET_23 0x16EA
+#define mmFBC_DEBUG0 0x16D6
+#define mmFBC_DEBUG1 0x16D7
+#define mmFBC_DEBUG2 0x16D8
+#define mmFBC_DEBUG_COMP 0x16EC
+#define mmFBC_DEBUG_CSR 0x16ED
+#define mmFBC_DEBUG_CSR_RDATA 0x16EE
+#define mmFBC_DEBUG_CSR_RDATA_HI 0x16F6
+#define mmFBC_DEBUG_CSR_WDATA 0x16EF
+#define mmFBC_DEBUG_CSR_WDATA_HI 0x16F7
+#define mmFBC_IDLE_FORCE_CLEAR_MASK 0x16D2
+#define mmFBC_IDLE_MASK 0x16D1
+#define mmFBC_IND_LUT0 0x16D9
+#define mmFBC_IND_LUT10 0x16E3
+#define mmFBC_IND_LUT1 0x16DA
+#define mmFBC_IND_LUT11 0x16E4
+#define mmFBC_IND_LUT12 0x16E5
+#define mmFBC_IND_LUT13 0x16E6
+#define mmFBC_IND_LUT14 0x16E7
+#define mmFBC_IND_LUT15 0x16E8
+#define mmFBC_IND_LUT2 0x16DB
+#define mmFBC_IND_LUT3 0x16DC
+#define mmFBC_IND_LUT4 0x16DD
+#define mmFBC_IND_LUT5 0x16DE
+#define mmFBC_IND_LUT6 0x16DF
+#define mmFBC_IND_LUT7 0x16E0
+#define mmFBC_IND_LUT8 0x16E1
+#define mmFBC_IND_LUT9 0x16E2
+#define mmFBC_MISC 0x16F0
+#define mmFBC_START_STOP_DELAY 0x16D3
+#define mmFBC_STATUS 0x16F1
+#define mmFBC_TEST_DEBUG_DATA 0x16F5
+#define mmFBC_TEST_DEBUG_INDEX 0x16F4
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL 0x1BF2
+#define mmFMT0_FMT_CLAMP_CNTL 0x1BF9
+#define mmFMT0_FMT_CONTROL 0x1BEE
+#define mmFMT0_FMT_CRC_CNTL 0x1BFA
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL 0x1BFE
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x1BFC
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN 0x1BFD
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN_MASK 0x1BFB
+#define mmFMT0_FMT_DEBUG_CNTL 0x1BFF
+#define mmFMT0_FMT_DITHER_RAND_B_SEED 0x1BF5
+#define mmFMT0_FMT_DITHER_RAND_G_SEED 0x1BF4
+#define mmFMT0_FMT_DITHER_RAND_R_SEED 0x1BF3
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL 0x1BED
+#define mmFMT0_FMT_FORCE_DATA_0_1 0x1BF0
+#define mmFMT0_FMT_FORCE_DATA_2_3 0x1BF1
+#define mmFMT0_FMT_FORCE_OUTPUT_CNTL 0x1BEF
+#define mmFMT0_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1BF6
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1BF7
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1BF8
+#define mmFMT0_FMT_TEST_DEBUG_DATA 0x1BEC
+#define mmFMT0_FMT_TEST_DEBUG_INDEX 0x1BEB
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL 0x1EF2
+#define mmFMT1_FMT_CLAMP_CNTL 0x1EF9
+#define mmFMT1_FMT_CONTROL 0x1EEE
+#define mmFMT1_FMT_CRC_CNTL 0x1EFA
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL 0x1EFE
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x1EFC
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN 0x1EFD
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN_MASK 0x1EFB
+#define mmFMT1_FMT_DEBUG_CNTL 0x1EFF
+#define mmFMT1_FMT_DITHER_RAND_B_SEED 0x1EF5
+#define mmFMT1_FMT_DITHER_RAND_G_SEED 0x1EF4
+#define mmFMT1_FMT_DITHER_RAND_R_SEED 0x1EF3
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL 0x1EED
+#define mmFMT1_FMT_FORCE_DATA_0_1 0x1EF0
+#define mmFMT1_FMT_FORCE_DATA_2_3 0x1EF1
+#define mmFMT1_FMT_FORCE_OUTPUT_CNTL 0x1EEF
+#define mmFMT1_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1EF6
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1EF7
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1EF8
+#define mmFMT1_FMT_TEST_DEBUG_DATA 0x1EEC
+#define mmFMT1_FMT_TEST_DEBUG_INDEX 0x1EEB
+#define mmFMT2_FMT_BIT_DEPTH_CONTROL 0x41F2
+#define mmFMT2_FMT_CLAMP_CNTL 0x41F9
+#define mmFMT2_FMT_CONTROL 0x41EE
+#define mmFMT2_FMT_CRC_CNTL 0x41FA
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL 0x41FE
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x41FC
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN 0x41FD
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN_MASK 0x41FB
+#define mmFMT2_FMT_DEBUG_CNTL 0x41FF
+#define mmFMT2_FMT_DITHER_RAND_B_SEED 0x41F5
+#define mmFMT2_FMT_DITHER_RAND_G_SEED 0x41F4
+#define mmFMT2_FMT_DITHER_RAND_R_SEED 0x41F3
+#define mmFMT2_FMT_DYNAMIC_EXP_CNTL 0x41ED
+#define mmFMT2_FMT_FORCE_DATA_0_1 0x41F0
+#define mmFMT2_FMT_FORCE_DATA_2_3 0x41F1
+#define mmFMT2_FMT_FORCE_OUTPUT_CNTL 0x41EF
+#define mmFMT2_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x41F6
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x41F7
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x41F8
+#define mmFMT2_FMT_TEST_DEBUG_DATA 0x41EC
+#define mmFMT2_FMT_TEST_DEBUG_INDEX 0x41EB
+#define mmFMT3_FMT_BIT_DEPTH_CONTROL 0x44F2
+#define mmFMT3_FMT_CLAMP_CNTL 0x44F9
+#define mmFMT3_FMT_CONTROL 0x44EE
+#define mmFMT3_FMT_CRC_CNTL 0x44FA
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL 0x44FE
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x44FC
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN 0x44FD
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN_MASK 0x44FB
+#define mmFMT3_FMT_DEBUG_CNTL 0x44FF
+#define mmFMT3_FMT_DITHER_RAND_B_SEED 0x44F5
+#define mmFMT3_FMT_DITHER_RAND_G_SEED 0x44F4
+#define mmFMT3_FMT_DITHER_RAND_R_SEED 0x44F3
+#define mmFMT3_FMT_DYNAMIC_EXP_CNTL 0x44ED
+#define mmFMT3_FMT_FORCE_DATA_0_1 0x44F0
+#define mmFMT3_FMT_FORCE_DATA_2_3 0x44F1
+#define mmFMT3_FMT_FORCE_OUTPUT_CNTL 0x44EF
+#define mmFMT3_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x44F6
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x44F7
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x44F8
+#define mmFMT3_FMT_TEST_DEBUG_DATA 0x44EC
+#define mmFMT3_FMT_TEST_DEBUG_INDEX 0x44EB
+#define mmFMT4_FMT_BIT_DEPTH_CONTROL 0x47F2
+#define mmFMT4_FMT_CLAMP_CNTL 0x47F9
+#define mmFMT4_FMT_CONTROL 0x47EE
+#define mmFMT4_FMT_CRC_CNTL 0x47FA
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL 0x47FE
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x47FC
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN 0x47FD
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN_MASK 0x47FB
+#define mmFMT4_FMT_DEBUG_CNTL 0x47FF
+#define mmFMT4_FMT_DITHER_RAND_B_SEED 0x47F5
+#define mmFMT4_FMT_DITHER_RAND_G_SEED 0x47F4
+#define mmFMT4_FMT_DITHER_RAND_R_SEED 0x47F3
+#define mmFMT4_FMT_DYNAMIC_EXP_CNTL 0x47ED
+#define mmFMT4_FMT_FORCE_DATA_0_1 0x47F0
+#define mmFMT4_FMT_FORCE_DATA_2_3 0x47F1
+#define mmFMT4_FMT_FORCE_OUTPUT_CNTL 0x47EF
+#define mmFMT4_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x47F6
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x47F7
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x47F8
+#define mmFMT4_FMT_TEST_DEBUG_DATA 0x47EC
+#define mmFMT4_FMT_TEST_DEBUG_INDEX 0x47EB
+#define mmFMT5_FMT_BIT_DEPTH_CONTROL 0x4AF2
+#define mmFMT5_FMT_CLAMP_CNTL 0x4AF9
+#define mmFMT5_FMT_CONTROL 0x4AEE
+#define mmFMT5_FMT_CRC_CNTL 0x4AFA
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL 0x4AFE
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x4AFC
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN 0x4AFD
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN_MASK 0x4AFB
+#define mmFMT5_FMT_DEBUG_CNTL 0x4AFF
+#define mmFMT5_FMT_DITHER_RAND_B_SEED 0x4AF5
+#define mmFMT5_FMT_DITHER_RAND_G_SEED 0x4AF4
+#define mmFMT5_FMT_DITHER_RAND_R_SEED 0x4AF3
+#define mmFMT5_FMT_DYNAMIC_EXP_CNTL 0x4AED
+#define mmFMT5_FMT_FORCE_DATA_0_1 0x4AF0
+#define mmFMT5_FMT_FORCE_DATA_2_3 0x4AF1
+#define mmFMT5_FMT_FORCE_OUTPUT_CNTL 0x4AEF
+#define mmFMT5_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x4AF6
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x4AF7
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x4AF8
+#define mmFMT5_FMT_TEST_DEBUG_DATA 0x4AEC
+#define mmFMT5_FMT_TEST_DEBUG_INDEX 0x4AEB
+#define mmFMT_BIT_DEPTH_CONTROL 0x1BF2
+#define mmFMT_CLAMP_CNTL 0x1BF9
+#define mmFMT_CONTROL 0x1BEE
+#define mmFMT_CRC_CNTL 0x1BFA
+#define mmFMT_CRC_SIG_BLUE_CONTROL 0x1BFE
+#define mmFMT_CRC_SIG_BLUE_CONTROL_MASK 0x1BFC
+#define mmFMT_CRC_SIG_RED_GREEN 0x1BFD
+#define mmFMT_CRC_SIG_RED_GREEN_MASK 0x1BFB
+#define mmFMT_DEBUG_CNTL 0x1BFF
+#define mmFMT_DITHER_RAND_B_SEED 0x1BF5
+#define mmFMT_DITHER_RAND_G_SEED 0x1BF4
+#define mmFMT_DITHER_RAND_R_SEED 0x1BF3
+#define mmFMT_DYNAMIC_EXP_CNTL 0x1BED
+#define mmFMT_FORCE_DATA_0_1 0x1BF0
+#define mmFMT_FORCE_DATA_2_3 0x1BF1
+#define mmFMT_FORCE_OUTPUT_CNTL 0x1BEF
+#define mmFMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1BF6
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1BF7
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1BF8
+#define mmFMT_TEST_DEBUG_DATA 0x1BEC
+#define mmFMT_TEST_DEBUG_INDEX 0x1BEB
+#define mmGAMUT_REMAP_C11_C12 0x1A5A
+#define mmGAMUT_REMAP_C13_C14 0x1A5B
+#define mmGAMUT_REMAP_C21_C22 0x1A5C
+#define mmGAMUT_REMAP_C23_C24 0x1A5D
+#define mmGAMUT_REMAP_C31_C32 0x1A5E
+#define mmGAMUT_REMAP_C33_C34 0x1A5F
+#define mmGAMUT_REMAP_CONTROL 0x1A59
+#define mmGENENB 0x00F0
+#define mmGENERIC_I2C_CONTROL 0x1834
+#define mmGENERIC_I2C_DATA 0x183A
+#define mmGENERIC_I2C_INTERRUPT_CONTROL 0x1835
+#define mmGENERIC_I2C_PIN_DEBUG 0x183C
+#define mmGENERIC_I2C_PIN_SELECTION 0x183B
+#define mmGENERIC_I2C_SETUP 0x1838
+#define mmGENERIC_I2C_SPEED 0x1837
+#define mmGENERIC_I2C_STATUS 0x1836
+#define mmGENERIC_I2C_TRANSACTION 0x1839
+#define mmGENFC_RD 0x00F2
+#define mmGENFC_WT 0x00EE
+#define mmGENMO_RD 0x00F3
+#define mmGENMO_WT 0x00F0
+#define mmGENS0 0x00F0
+#define mmGENS1 0x00EE
+#define mmGRPH8_DATA 0x00F3
+#define mmGRPH8_IDX 0x00F3
+#define mmGRPH_COMPRESS_PITCH 0x1A1A
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS 0x1A19
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1A1B
+#define mmGRPH_CONTROL 0x1A01
+#define mmGRPH_DFQ_CONTROL 0x1A14
+#define mmGRPH_DFQ_STATUS 0x1A15
+#define mmGRPH_ENABLE 0x1A00
+#define mmGRPH_FLIP_CONTROL 0x1A12
+#define mmGRPH_INTERRUPT_CONTROL 0x1A17
+#define mmGRPH_INTERRUPT_STATUS 0x1A16
+#define mmGRPH_LUT_10BIT_BYPASS 0x1A02
+#define mmGRPH_PITCH 0x1A06
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS 0x1A04
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1A07
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS 0x1A05
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A08
+#define mmGRPH_STEREOSYNC_FLIP 0x1A97
+#define mmGRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1A18
+#define mmGRPH_SURFACE_ADDRESS_INUSE 0x1A13
+#define mmGRPH_SURFACE_OFFSET_X 0x1A09
+#define mmGRPH_SURFACE_OFFSET_Y 0x1A0A
+#define mmGRPH_SWAP_CNTL 0x1A03
+#define mmGRPH_UPDATE 0x1A11
+#define mmGRPH_X_END 0x1A0D
+#define mmGRPH_X_START 0x1A0B
+#define mmGRPH_Y_END 0x1A0E
+#define mmGRPH_Y_START 0x1A0C
+#define mmHDMI_ACR_32_0 0x1C37
+#define mmHDMI_ACR_32_1 0x1C38
+#define mmHDMI_ACR_44_0 0x1C39
+#define mmHDMI_ACR_44_1 0x1C3A
+#define mmHDMI_ACR_48_0 0x1C3B
+#define mmHDMI_ACR_48_1 0x1C3C
+#define mmHDMI_ACR_PACKET_CONTROL 0x1C0F
+#define mmHDMI_ACR_STATUS_0 0x1C3D
+#define mmHDMI_ACR_STATUS_1 0x1C3E
+#define mmHDMI_AUDIO_PACKET_CONTROL 0x1C0E
+#define mmHDMI_CONTROL 0x1C0C
+#define mmHDMI_GC 0x1C16
+#define mmHDMI_GENERIC_PACKET_CONTROL0 0x1C13
+#define mmHDMI_GENERIC_PACKET_CONTROL1 0x1C30
+#define mmHDMI_INFOFRAME_CONTROL0 0x1C11
+#define mmHDMI_INFOFRAME_CONTROL1 0x1C12
+#define mmHDMI_STATUS 0x1C0D
+#define mmHDMI_VBI_PACKET_CONTROL 0x1C10
+#define mmINPUT_CSC_C11_C12 0x1A36
+#define mmINPUT_CSC_C13_C14 0x1A37
+#define mmINPUT_CSC_C21_C22 0x1A38
+#define mmINPUT_CSC_C23_C24 0x1A39
+#define mmINPUT_CSC_C31_C32 0x1A3A
+#define mmINPUT_CSC_C33_C34 0x1A3B
+#define mmINPUT_CSC_CONTROL 0x1A35
+#define mmINPUT_GAMMA_CONTROL 0x1A10
+#define mmKEY_CONTROL 0x1A53
+#define mmKEY_RANGE_ALPHA 0x1A54
+#define mmKEY_RANGE_BLUE 0x1A57
+#define mmKEY_RANGE_GREEN 0x1A56
+#define mmKEY_RANGE_RED 0x1A55
+#define mmLB0_DC_MVP_LB_CONTROL 0x1ADB
+#define mmLB0_LB_DEBUG 0x1AFC
+#define mmLB0_LB_DEBUG2 0x1AC9
+#define mmLB0_LB_NO_OUTSTANDING_REQ_STATUS 0x1AC8
+#define mmLB0_LB_SYNC_RESET_SEL 0x1ACA
+#define mmLB0_LB_TEST_DEBUG_DATA 0x1AFF
+#define mmLB0_LB_TEST_DEBUG_INDEX 0x1AFE
+#define mmLB0_MVP_AFR_FLIP_FIFO_CNTL 0x1AD9
+#define mmLB0_MVP_AFR_FLIP_MODE 0x1AD8
+#define mmLB0_MVP_FLIP_LINE_NUM_INSERT 0x1ADA
+#define mmLB1_DC_MVP_LB_CONTROL 0x1DDB
+#define mmLB1_LB_DEBUG 0x1DFC
+#define mmLB1_LB_DEBUG2 0x1DC9
+#define mmLB1_LB_NO_OUTSTANDING_REQ_STATUS 0x1DC8
+#define mmLB1_LB_SYNC_RESET_SEL 0x1DCA
+#define mmLB1_LB_TEST_DEBUG_DATA 0x1DFF
+#define mmLB1_LB_TEST_DEBUG_INDEX 0x1DFE
+#define mmLB1_MVP_AFR_FLIP_FIFO_CNTL 0x1DD9
+#define mmLB1_MVP_AFR_FLIP_MODE 0x1DD8
+#define mmLB1_MVP_FLIP_LINE_NUM_INSERT 0x1DDA
+#define mmLB2_DC_MVP_LB_CONTROL 0x40DB
+#define mmLB2_LB_DEBUG 0x40FC
+#define mmLB2_LB_DEBUG2 0x40C9
+#define mmLB2_LB_NO_OUTSTANDING_REQ_STATUS 0x40C8
+#define mmLB2_LB_SYNC_RESET_SEL 0x40CA
+#define mmLB2_LB_TEST_DEBUG_DATA 0x40FF
+#define mmLB2_LB_TEST_DEBUG_INDEX 0x40FE
+#define mmLB2_MVP_AFR_FLIP_FIFO_CNTL 0x40D9
+#define mmLB2_MVP_AFR_FLIP_MODE 0x40D8
+#define mmLB2_MVP_FLIP_LINE_NUM_INSERT 0x40DA
+#define mmLB3_DC_MVP_LB_CONTROL 0x43DB
+#define mmLB3_LB_DEBUG 0x43FC
+#define mmLB3_LB_DEBUG2 0x43C9
+#define mmLB3_LB_NO_OUTSTANDING_REQ_STATUS 0x43C8
+#define mmLB3_LB_SYNC_RESET_SEL 0x43CA
+#define mmLB3_LB_TEST_DEBUG_DATA 0x43FF
+#define mmLB3_LB_TEST_DEBUG_INDEX 0x43FE
+#define mmLB3_MVP_AFR_FLIP_FIFO_CNTL 0x43D9
+#define mmLB3_MVP_AFR_FLIP_MODE 0x43D8
+#define mmLB3_MVP_FLIP_LINE_NUM_INSERT 0x43DA
+#define mmLB4_DC_MVP_LB_CONTROL 0x46DB
+#define mmLB4_LB_DEBUG 0x46FC
+#define mmLB4_LB_DEBUG2 0x46C9
+#define mmLB4_LB_NO_OUTSTANDING_REQ_STATUS 0x46C8
+#define mmLB4_LB_SYNC_RESET_SEL 0x46CA
+#define mmLB4_LB_TEST_DEBUG_DATA 0x46FF
+#define mmLB4_LB_TEST_DEBUG_INDEX 0x46FE
+#define mmLB4_MVP_AFR_FLIP_FIFO_CNTL 0x46D9
+#define mmLB4_MVP_AFR_FLIP_MODE 0x46D8
+#define mmLB4_MVP_FLIP_LINE_NUM_INSERT 0x46DA
+#define mmLB5_DC_MVP_LB_CONTROL 0x49DB
+#define mmLB5_LB_DEBUG 0x49FC
+#define mmLB5_LB_DEBUG2 0x49C9
+#define mmLB5_LB_NO_OUTSTANDING_REQ_STATUS 0x49C8
+#define mmLB5_LB_SYNC_RESET_SEL 0x49CA
+#define mmLB5_LB_TEST_DEBUG_DATA 0x49FF
+#define mmLB5_LB_TEST_DEBUG_INDEX 0x49FE
+#define mmLB5_MVP_AFR_FLIP_FIFO_CNTL 0x49D9
+#define mmLB5_MVP_AFR_FLIP_MODE 0x49D8
+#define mmLB5_MVP_FLIP_LINE_NUM_INSERT 0x49DA
+#define mmLB_DEBUG 0x1AFC
+#define mmLB_DEBUG2 0x1AC9
+#define mmLB_NO_OUTSTANDING_REQ_STATUS 0x1AC8
+#define mmLB_SYNC_RESET_SEL 0x1ACA
+#define mmLB_TEST_DEBUG_DATA 0x1AFF
+#define mmLB_TEST_DEBUG_INDEX 0x1AFE
+#define mmLIGHT_SLEEP_CNTL 0x0132
+#define mmLOW_POWER_TILING_CONTROL 0x0325
+#define mmLVDS_DATA_CNTL 0x1C8C
+#define mmLVTMA_PWRSEQ_CNTL 0x1919
+#define mmLVTMA_PWRSEQ_DELAY1 0x191C
+#define mmLVTMA_PWRSEQ_DELAY2 0x191D
+#define mmLVTMA_PWRSEQ_REF_DIV 0x191B
+#define mmLVTMA_PWRSEQ_STATE 0x191A
+#define mmMASTER_COMM_CMD_REG 0x161F
+#define mmMASTER_COMM_CNTL_REG 0x1620
+#define mmMASTER_COMM_DATA_REG1 0x161C
+#define mmMASTER_COMM_DATA_REG2 0x161D
+#define mmMASTER_COMM_DATA_REG3 0x161E
+#define mmMASTER_UPDATE_LOCK 0x1BBD
+#define mmMASTER_UPDATE_MODE 0x1BBE
+#define mmMC_DC_INTERFACE_NACK_STATUS 0x031C
+#define mmMCIF_CONTROL 0x0314
+#define mmMCIF_MEM_CONTROL 0x0319
+#define mmMCIF_TEST_DEBUG_DATA 0x0317
+#define mmMCIF_TEST_DEBUG_INDEX 0x0316
+#define mmMCIF_VMID 0x0318
+#define mmMCIF_WRITE_COMBINE_CONTROL 0x0315
+#define mmMICROSECOND_TIME_BASE_DIV 0x013B
+#define mmMILLISECOND_TIME_BASE_DIV 0x0130
+#define mmMVP_AFR_FLIP_FIFO_CNTL 0x1AD9
+#define mmMVP_AFR_FLIP_MODE 0x1AD8
+#define mmMVP_BLACK_KEYER 0x1686
+#define mmMVP_CONTROL1 0x1680
+#define mmMVP_CONTROL2 0x1681
+#define mmMVP_CONTROL3 0x168A
+#define mmMVP_CRC_CNTL 0x1687
+#define mmMVP_CRC_RESULT_BLUE_GREEN 0x1688
+#define mmMVP_CRC_RESULT_RED 0x1689
+#define mmMVP_DEBUG 0x168F
+#define mmMVP_FIFO_CONTROL 0x1682
+#define mmMVP_FIFO_STATUS 0x1683
+#define mmMVP_FLIP_LINE_NUM_INSERT 0x1ADA
+#define mmMVP_INBAND_CNTL_CAP 0x1685
+#define mmMVP_RECEIVE_CNT_CNTL1 0x168B
+#define mmMVP_RECEIVE_CNT_CNTL2 0x168C
+#define mmMVP_SLAVE_STATUS 0x1684
+#define mmMVP_TEST_DEBUG_DATA 0x168E
+#define mmMVP_TEST_DEBUG_INDEX 0x168D
+#define mmOUTPUT_CSC_C11_C12 0x1A3D
+#define mmOUTPUT_CSC_C13_C14 0x1A3E
+#define mmOUTPUT_CSC_C21_C22 0x1A3F
+#define mmOUTPUT_CSC_C23_C24 0x1A40
+#define mmOUTPUT_CSC_C31_C32 0x1A41
+#define mmOUTPUT_CSC_C33_C34 0x1A42
+#define mmOUTPUT_CSC_CONTROL 0x1A3C
+#define mmOUT_ROUND_CONTROL 0x1A51
+#define mmOVL_CONTROL1 0x1A1D
+#define mmOVL_CONTROL2 0x1A1E
+#define mmOVL_DFQ_CONTROL 0x1A29
+#define mmOVL_DFQ_STATUS 0x1A2A
+#define mmOVL_ENABLE 0x1A1C
+#define mmOVL_END 0x1A26
+#define mmOVL_PITCH 0x1A21
+#define mmOVLSCL_EDGE_PIXEL_CNTL 0x1A2C
+#define mmOVL_SECONDARY_SURFACE_ADDRESS 0x1A92
+#define mmOVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A94
+#define mmOVL_START 0x1A25
+#define mmOVL_STEREOSYNC_FLIP 0x1A93
+#define mmOVL_SURFACE_ADDRESS 0x1A20
+#define mmOVL_SURFACE_ADDRESS_HIGH 0x1A22
+#define mmOVL_SURFACE_ADDRESS_HIGH_INUSE 0x1A2B
+#define mmOVL_SURFACE_ADDRESS_INUSE 0x1A28
+#define mmOVL_SURFACE_OFFSET_X 0x1A23
+#define mmOVL_SURFACE_OFFSET_Y 0x1A24
+#define mmOVL_SWAP_CNTL 0x1A1F
+#define mmOVL_UPDATE 0x1A27
+#define mmPHY_AUX_CNTL 0x197F
+#define mmPIPE0_ARBITRATION_CONTROL3 0x02FA
+#define mmPIPE0_DMIF_BUFFER_CONTROL 0x0328
+#define mmPIPE0_MAX_REQUESTS 0x0302
+#define mmPIPE0_PG_CONFIG 0x1760
+#define mmPIPE0_PG_ENABLE 0x1761
+#define mmPIPE0_PG_STATUS 0x1762
+#define mmPIPE1_ARBITRATION_CONTROL3 0x02FB
+#define mmPIPE1_DMIF_BUFFER_CONTROL 0x0330
+#define mmPIPE1_MAX_REQUESTS 0x0303
+#define mmPIPE1_PG_CONFIG 0x1764
+#define mmPIPE1_PG_ENABLE 0x1765
+#define mmPIPE1_PG_STATUS 0x1766
+#define mmPIPE2_ARBITRATION_CONTROL3 0x02FC
+#define mmPIPE2_DMIF_BUFFER_CONTROL 0x0338
+#define mmPIPE2_MAX_REQUESTS 0x0304
+#define mmPIPE2_PG_CONFIG 0x1768
+#define mmPIPE2_PG_ENABLE 0x1769
+#define mmPIPE2_PG_STATUS 0x176A
+#define mmPIPE3_ARBITRATION_CONTROL3 0x02FD
+#define mmPIPE3_DMIF_BUFFER_CONTROL 0x0340
+#define mmPIPE3_MAX_REQUESTS 0x0305
+#define mmPIPE3_PG_CONFIG 0x176C
+#define mmPIPE3_PG_ENABLE 0x176D
+#define mmPIPE3_PG_STATUS 0x176E
+#define mmPIPE4_ARBITRATION_CONTROL3 0x02FE
+#define mmPIPE4_DMIF_BUFFER_CONTROL 0x0348
+#define mmPIPE4_MAX_REQUESTS 0x0306
+#define mmPIPE4_PG_CONFIG 0x1770
+#define mmPIPE4_PG_ENABLE 0x1771
+#define mmPIPE4_PG_STATUS 0x1772
+#define mmPIPE5_ARBITRATION_CONTROL3 0x02FF
+#define mmPIPE5_DMIF_BUFFER_CONTROL 0x0350
+#define mmPIPE5_MAX_REQUESTS 0x0307
+#define mmPIPE5_PG_CONFIG 0x1774
+#define mmPIPE5_PG_ENABLE 0x1775
+#define mmPIPE5_PG_STATUS 0x1776
+#define mmPIXCLK0_RESYNC_CNTL 0x013A
+#define mmPIXCLK1_RESYNC_CNTL 0x0138
+#define mmPIXCLK2_RESYNC_CNTL 0x0139
+#define mmPLL_ANALOG 0x1708
+#define mmPLL_CNTL 0x1707
+#define mmPLL_DEBUG_CNTL 0x170B
+#define mmPLL_DISPCLK_CURRENT_DTO_PHASE 0x170F
+#define mmPLL_DISPCLK_DTO_CNTL 0x170E
+#define mmPLL_DS_CNTL 0x1705
+#define mmPLL_FB_DIV 0x1701
+#define mmPLL_IDCLK_CNTL 0x1706
+#define mmPLL_POST_DIV 0x1702
+#define mmPLL_REF_DIV 0x1700
+#define mmPLL_SS_AMOUNT_DSFRAC 0x1703
+#define mmPLL_SS_CNTL 0x1704
+#define mmPLL_UNLOCK_DETECT_CNTL 0x170A
+#define mmPLL_UPDATE_CNTL 0x170D
+#define mmPLL_UPDATE_LOCK 0x170C
+#define mmPLL_VREG_CNTL 0x1709
+#define mmPRESCALE_GRPH_CONTROL 0x1A2D
+#define mmPRESCALE_OVL_CONTROL 0x1A31
+#define mmPRESCALE_VALUES_GRPH_B 0x1A30
+#define mmPRESCALE_VALUES_GRPH_G 0x1A2F
+#define mmPRESCALE_VALUES_GRPH_R 0x1A2E
+#define mmPRESCALE_VALUES_OVL_CB 0x1A32
+#define mmPRESCALE_VALUES_OVL_CR 0x1A34
+#define mmPRESCALE_VALUES_OVL_Y 0x1A33
+#define mmREGAMMA_CNTLA_END_CNTL1 0x1AA6
+#define mmREGAMMA_CNTLA_END_CNTL2 0x1AA7
+#define mmREGAMMA_CNTLA_REGION_0_1 0x1AA8
+#define mmREGAMMA_CNTLA_REGION_10_11 0x1AAD
+#define mmREGAMMA_CNTLA_REGION_12_13 0x1AAE
+#define mmREGAMMA_CNTLA_REGION_14_15 0x1AAF
+#define mmREGAMMA_CNTLA_REGION_2_3 0x1AA9
+#define mmREGAMMA_CNTLA_REGION_4_5 0x1AAA
+#define mmREGAMMA_CNTLA_REGION_6_7 0x1AAB
+#define mmREGAMMA_CNTLA_REGION_8_9 0x1AAC
+#define mmREGAMMA_CNTLA_SLOPE_CNTL 0x1AA5
+#define mmREGAMMA_CNTLA_START_CNTL 0x1AA4
+#define mmREGAMMA_CNTLB_END_CNTL1 0x1AB2
+#define mmREGAMMA_CNTLB_END_CNTL2 0x1AB3
+#define mmREGAMMA_CNTLB_REGION_0_1 0x1AB4
+#define mmREGAMMA_CNTLB_REGION_10_11 0x1AB9
+#define mmREGAMMA_CNTLB_REGION_12_13 0x1ABA
+#define mmREGAMMA_CNTLB_REGION_14_15 0x1ABB
+#define mmREGAMMA_CNTLB_REGION_2_3 0x1AB5
+#define mmREGAMMA_CNTLB_REGION_4_5 0x1AB6
+#define mmREGAMMA_CNTLB_REGION_6_7 0x1AB7
+#define mmREGAMMA_CNTLB_REGION_8_9 0x1AB8
+#define mmREGAMMA_CNTLB_SLOPE_CNTL 0x1AB1
+#define mmREGAMMA_CNTLB_START_CNTL 0x1AB0
+#define mmREGAMMA_CONTROL 0x1AA0
+#define mmREGAMMA_LUT_DATA 0x1AA2
+#define mmREGAMMA_LUT_INDEX 0x1AA1
+#define mmREGAMMA_LUT_WRITE_EN_MASK 0x1AA3
+#define mmSCL0_EXT_OVERSCAN_LEFT_RIGHT 0x1B5E
+#define mmSCL0_EXT_OVERSCAN_TOP_BOTTOM 0x1B5F
+#define mmSCL0_SCL_ALU_CONTROL 0x1B54
+#define mmSCL0_SCL_AUTOMATIC_MODE_CONTROL 0x1B47
+#define mmSCL0_SCL_BYPASS_CONTROL 0x1B45
+#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+#define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
+#define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL0_SCL_CONTROL 0x1B44
+#define mmSCL0_SCL_DEBUG 0x1B6A
+#define mmSCL0_SCL_DEBUG2 0x1B69
+#define mmSCL0_SCL_F_SHARP_CONTROL 0x1B53
+#define mmSCL0_SCL_HORZ_FILTER_CONTROL 0x1B4A
+#define mmSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x1B4B
+#define mmSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x1B46
+#define mmSCL0_SCL_MODE_CHANGE_DET1 0x1B60
+#define mmSCL0_SCL_MODE_CHANGE_DET2 0x1B61
+#define mmSCL0_SCL_MODE_CHANGE_DET3 0x1B62
+#define mmSCL0_SCL_MODE_CHANGE_MASK 0x1B63
+#define mmSCL0_SCL_TAP_CONTROL 0x1B43
+#define mmSCL0_SCL_TEST_DEBUG_DATA 0x1B6C
+#define mmSCL0_SCL_TEST_DEBUG_INDEX 0x1B6B
+#define mmSCL0_SCL_UPDATE 0x1B51
+#define mmSCL0_SCL_VERT_FILTER_CONTROL 0x1B4E
+#define mmSCL0_SCL_VERT_FILTER_INIT 0x1B50
+#define mmSCL0_SCL_VERT_FILTER_INIT_BOT 0x1B57
+#define mmSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x1B4F
+#define mmSCL0_VIEWPORT_SIZE 0x1B5D
+#define mmSCL0_VIEWPORT_START 0x1B5C
+#define mmSCL1_EXT_OVERSCAN_LEFT_RIGHT 0x1E5E
+#define mmSCL1_EXT_OVERSCAN_TOP_BOTTOM 0x1E5F
+#define mmSCL1_SCL_ALU_CONTROL 0x1E54
+#define mmSCL1_SCL_AUTOMATIC_MODE_CONTROL 0x1E47
+#define mmSCL1_SCL_BYPASS_CONTROL 0x1E45
+#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
+#define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
+#define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
+#define mmSCL1_SCL_CONTROL 0x1E44
+#define mmSCL1_SCL_DEBUG 0x1E6A
+#define mmSCL1_SCL_DEBUG2 0x1E69
+#define mmSCL1_SCL_F_SHARP_CONTROL 0x1E53
+#define mmSCL1_SCL_HORZ_FILTER_CONTROL 0x1E4A
+#define mmSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x1E4B
+#define mmSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x1E46
+#define mmSCL1_SCL_MODE_CHANGE_DET1 0x1E60
+#define mmSCL1_SCL_MODE_CHANGE_DET2 0x1E61
+#define mmSCL1_SCL_MODE_CHANGE_DET3 0x1E62
+#define mmSCL1_SCL_MODE_CHANGE_MASK 0x1E63
+#define mmSCL1_SCL_TAP_CONTROL 0x1E43
+#define mmSCL1_SCL_TEST_DEBUG_DATA 0x1E6C
+#define mmSCL1_SCL_TEST_DEBUG_INDEX 0x1E6B
+#define mmSCL1_SCL_UPDATE 0x1E51
+#define mmSCL1_SCL_VERT_FILTER_CONTROL 0x1E4E
+#define mmSCL1_SCL_VERT_FILTER_INIT 0x1E50
+#define mmSCL1_SCL_VERT_FILTER_INIT_BOT 0x1E57
+#define mmSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x1E4F
+#define mmSCL1_VIEWPORT_SIZE 0x1E5D
+#define mmSCL1_VIEWPORT_START 0x1E5C
+#define mmSCL2_EXT_OVERSCAN_LEFT_RIGHT 0x415E
+#define mmSCL2_EXT_OVERSCAN_TOP_BOTTOM 0x415F
+#define mmSCL2_SCL_ALU_CONTROL 0x4154
+#define mmSCL2_SCL_AUTOMATIC_MODE_CONTROL 0x4147
+#define mmSCL2_SCL_BYPASS_CONTROL 0x4145
+#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
+#define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
+#define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
+#define mmSCL2_SCL_CONTROL 0x4144
+#define mmSCL2_SCL_DEBUG 0x416A
+#define mmSCL2_SCL_DEBUG2 0x4169
+#define mmSCL2_SCL_F_SHARP_CONTROL 0x4153
+#define mmSCL2_SCL_HORZ_FILTER_CONTROL 0x414A
+#define mmSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x414B
+#define mmSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x4146
+#define mmSCL2_SCL_MODE_CHANGE_DET1 0x4160
+#define mmSCL2_SCL_MODE_CHANGE_DET2 0x4161
+#define mmSCL2_SCL_MODE_CHANGE_DET3 0x4162
+#define mmSCL2_SCL_MODE_CHANGE_MASK 0x4163
+#define mmSCL2_SCL_TAP_CONTROL 0x4143
+#define mmSCL2_SCL_TEST_DEBUG_DATA 0x416C
+#define mmSCL2_SCL_TEST_DEBUG_INDEX 0x416B
+#define mmSCL2_SCL_UPDATE 0x4151
+#define mmSCL2_SCL_VERT_FILTER_CONTROL 0x414E
+#define mmSCL2_SCL_VERT_FILTER_INIT 0x4150
+#define mmSCL2_SCL_VERT_FILTER_INIT_BOT 0x4157
+#define mmSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x414F
+#define mmSCL2_VIEWPORT_SIZE 0x415D
+#define mmSCL2_VIEWPORT_START 0x415C
+#define mmSCL3_EXT_OVERSCAN_LEFT_RIGHT 0x445E
+#define mmSCL3_EXT_OVERSCAN_TOP_BOTTOM 0x445F
+#define mmSCL3_SCL_ALU_CONTROL 0x4454
+#define mmSCL3_SCL_AUTOMATIC_MODE_CONTROL 0x4447
+#define mmSCL3_SCL_BYPASS_CONTROL 0x4445
+#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
+#define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
+#define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
+#define mmSCL3_SCL_CONTROL 0x4444
+#define mmSCL3_SCL_DEBUG 0x446A
+#define mmSCL3_SCL_DEBUG2 0x4469
+#define mmSCL3_SCL_F_SHARP_CONTROL 0x4453
+#define mmSCL3_SCL_HORZ_FILTER_CONTROL 0x444A
+#define mmSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x444B
+#define mmSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x4446
+#define mmSCL3_SCL_MODE_CHANGE_DET1 0x4460
+#define mmSCL3_SCL_MODE_CHANGE_DET2 0x4461
+#define mmSCL3_SCL_MODE_CHANGE_DET3 0x4462
+#define mmSCL3_SCL_MODE_CHANGE_MASK 0x4463
+#define mmSCL3_SCL_TAP_CONTROL 0x4443
+#define mmSCL3_SCL_TEST_DEBUG_DATA 0x446C
+#define mmSCL3_SCL_TEST_DEBUG_INDEX 0x446B
+#define mmSCL3_SCL_UPDATE 0x4451
+#define mmSCL3_SCL_VERT_FILTER_CONTROL 0x444E
+#define mmSCL3_SCL_VERT_FILTER_INIT 0x4450
+#define mmSCL3_SCL_VERT_FILTER_INIT_BOT 0x4457
+#define mmSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x444F
+#define mmSCL3_VIEWPORT_SIZE 0x445D
+#define mmSCL3_VIEWPORT_START 0x445C
+#define mmSCL4_EXT_OVERSCAN_LEFT_RIGHT 0x475E
+#define mmSCL4_EXT_OVERSCAN_TOP_BOTTOM 0x475F
+#define mmSCL4_SCL_ALU_CONTROL 0x4754
+#define mmSCL4_SCL_AUTOMATIC_MODE_CONTROL 0x4747
+#define mmSCL4_SCL_BYPASS_CONTROL 0x4745
+#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
+#define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
+#define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
+#define mmSCL4_SCL_CONTROL 0x4744
+#define mmSCL4_SCL_DEBUG 0x476A
+#define mmSCL4_SCL_DEBUG2 0x4769
+#define mmSCL4_SCL_F_SHARP_CONTROL 0x4753
+#define mmSCL4_SCL_HORZ_FILTER_CONTROL 0x474A
+#define mmSCL4_SCL_HORZ_FILTER_SCALE_RATIO 0x474B
+#define mmSCL4_SCL_MANUAL_REPLICATE_CONTROL 0x4746
+#define mmSCL4_SCL_MODE_CHANGE_DET1 0x4760
+#define mmSCL4_SCL_MODE_CHANGE_DET2 0x4761
+#define mmSCL4_SCL_MODE_CHANGE_DET3 0x4762
+#define mmSCL4_SCL_MODE_CHANGE_MASK 0x4763
+#define mmSCL4_SCL_TAP_CONTROL 0x4743
+#define mmSCL4_SCL_TEST_DEBUG_DATA 0x476C
+#define mmSCL4_SCL_TEST_DEBUG_INDEX 0x476B
+#define mmSCL4_SCL_UPDATE 0x4751
+#define mmSCL4_SCL_VERT_FILTER_CONTROL 0x474E
+#define mmSCL4_SCL_VERT_FILTER_INIT 0x4750
+#define mmSCL4_SCL_VERT_FILTER_INIT_BOT 0x4757
+#define mmSCL4_SCL_VERT_FILTER_SCALE_RATIO 0x474F
+#define mmSCL4_VIEWPORT_SIZE 0x475D
+#define mmSCL4_VIEWPORT_START 0x475C
+#define mmSCL5_EXT_OVERSCAN_LEFT_RIGHT 0x4A5E
+#define mmSCL5_EXT_OVERSCAN_TOP_BOTTOM 0x4A5F
+#define mmSCL5_SCL_ALU_CONTROL 0x4A54
+#define mmSCL5_SCL_AUTOMATIC_MODE_CONTROL 0x4A47
+#define mmSCL5_SCL_BYPASS_CONTROL 0x4A45
+#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
+#define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
+#define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
+#define mmSCL5_SCL_CONTROL 0x4A44
+#define mmSCL5_SCL_DEBUG 0x4A6A
+#define mmSCL5_SCL_DEBUG2 0x4A69
+#define mmSCL5_SCL_F_SHARP_CONTROL 0x4A53
+#define mmSCL5_SCL_HORZ_FILTER_CONTROL 0x4A4A
+#define mmSCL5_SCL_HORZ_FILTER_SCALE_RATIO 0x4A4B
+#define mmSCL5_SCL_MANUAL_REPLICATE_CONTROL 0x4A46
+#define mmSCL5_SCL_MODE_CHANGE_DET1 0x4A60
+#define mmSCL5_SCL_MODE_CHANGE_DET2 0x4A61
+#define mmSCL5_SCL_MODE_CHANGE_DET3 0x4A62
+#define mmSCL5_SCL_MODE_CHANGE_MASK 0x4A63
+#define mmSCL5_SCL_TAP_CONTROL 0x4A43
+#define mmSCL5_SCL_TEST_DEBUG_DATA 0x4A6C
+#define mmSCL5_SCL_TEST_DEBUG_INDEX 0x4A6B
+#define mmSCL5_SCL_UPDATE 0x4A51
+#define mmSCL5_SCL_VERT_FILTER_CONTROL 0x4A4E
+#define mmSCL5_SCL_VERT_FILTER_INIT 0x4A50
+#define mmSCL5_SCL_VERT_FILTER_INIT_BOT 0x4A57
+#define mmSCL5_SCL_VERT_FILTER_SCALE_RATIO 0x4A4F
+#define mmSCL5_VIEWPORT_SIZE 0x4A5D
+#define mmSCL5_VIEWPORT_START 0x4A5C
+#define mmSCL_ALU_CONTROL 0x1B54
+#define mmSCL_AUTOMATIC_MODE_CONTROL 0x1B47
+#define mmSCL_BYPASS_CONTROL 0x1B45
+#define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+#define mmSCL_COEF_RAM_SELECT 0x1B40
+#define mmSCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL_CONTROL 0x1B44
+#define mmSCL_DEBUG 0x1B6A
+#define mmSCL_DEBUG2 0x1B69
+#define mmSCL_F_SHARP_CONTROL 0x1B53
+#define mmSCL_HORZ_FILTER_CONTROL 0x1B4A
+#define mmSCL_HORZ_FILTER_SCALE_RATIO 0x1B4B
+#define mmSCLK_CGTT_BLK_CTRL_REG 0x0136
+#define mmSCL_MANUAL_REPLICATE_CONTROL 0x1B46
+#define mmSCL_MODE_CHANGE_DET1 0x1B60
+#define mmSCL_MODE_CHANGE_DET2 0x1B61
+#define mmSCL_MODE_CHANGE_DET3 0x1B62
+#define mmSCL_MODE_CHANGE_MASK 0x1B63
+#define mmSCL_TAP_CONTROL 0x1B43
+#define mmSCL_TEST_DEBUG_DATA 0x1B6C
+#define mmSCL_TEST_DEBUG_INDEX 0x1B6B
+#define mmSCL_UPDATE 0x1B51
+#define mmSCL_VERT_FILTER_CONTROL 0x1B4E
+#define mmSCL_VERT_FILTER_INIT 0x1B50
+#define mmSCL_VERT_FILTER_INIT_BOT 0x1B57
+#define mmSCL_VERT_FILTER_SCALE_RATIO 0x1B4F
+#define mmSEQ8_DATA 0x00F1
+#define mmSEQ8_IDX 0x00F1
+#define mmSLAVE_COMM_CMD_REG 0x1624
+#define mmSLAVE_COMM_CNTL_REG 0x1625
+#define mmSLAVE_COMM_DATA_REG1 0x1621
+#define mmSLAVE_COMM_DATA_REG2 0x1622
+#define mmSLAVE_COMM_DATA_REG3 0x1623
+#define mmSYMCLKA_CLOCK_ENABLE 0x0160
+#define mmSYMCLKB_CLOCK_ENABLE 0x0161
+#define mmSYMCLKC_CLOCK_ENABLE 0x0162
+#define mmSYMCLKD_CLOCK_ENABLE 0x0163
+#define mmSYMCLKE_CLOCK_ENABLE 0x0164
+#define mmSYMCLKF_CLOCK_ENABLE 0x0165
+#define mmTMDS_CNTL 0x1C7C
+#define mmTMDS_CONTROL0_FEEDBACK 0x1C7E
+#define mmTMDS_CONTROL_CHAR 0x1C7D
+#define mmTMDS_CTL0_1_GEN_CNTL 0x1C86
+#define mmTMDS_CTL2_3_GEN_CNTL 0x1C87
+#define mmTMDS_CTL_BITS 0x1C83
+#define mmTMDS_DCBALANCER_CONTROL 0x1C84
+#define mmTMDS_DEBUG 0x1C82
+#define mmTMDS_STEREOSYNC_CTL_SEL 0x1C7F
+#define mmTMDS_SYNC_CHAR_PATTERN_0_1 0x1C80
+#define mmTMDS_SYNC_CHAR_PATTERN_2_3 0x1C81
+#define mmUNIPHYAB_TPG_CONTROL 0x1931
+#define mmUNIPHYAB_TPG_SEED 0x1932
+#define mmUNIPHY_ANG_BIST_CNTL 0x198C
+#define mmUNIPHYCD_TPG_CONTROL 0x1933
+#define mmUNIPHYCD_TPG_SEED 0x1934
+#define mmUNIPHY_CHANNEL_XBAR_CNTL 0x198E
+#define mmUNIPHY_DATA_SYNCHRONIZATION 0x198A
+#define mmUNIPHYEF_TPG_CONTROL 0x1935
+#define mmUNIPHYEF_TPG_SEED 0x1936
+#define mmUNIPHY_IMPCAL_LINKA 0x1908
+#define mmUNIPHY_IMPCAL_LINKB 0x1909
+#define mmUNIPHY_IMPCAL_LINKC 0x190F
+#define mmUNIPHY_IMPCAL_LINKD 0x1910
+#define mmUNIPHY_IMPCAL_LINKE 0x1913
+#define mmUNIPHY_IMPCAL_LINKF 0x1914
+#define mmUNIPHY_IMPCAL_PERIOD 0x190A
+#define mmUNIPHY_IMPCAL_PSW_AB 0x190E
+#define mmUNIPHY_IMPCAL_PSW_CD 0x1912
+#define mmUNIPHY_IMPCAL_PSW_EF 0x1916
+#define mmUNIPHY_LINK_CNTL 0x198D
+#define mmUNIPHY_PLL_CONTROL1 0x1986
+#define mmUNIPHY_PLL_CONTROL2 0x1987
+#define mmUNIPHY_PLL_FBDIV 0x1985
+#define mmUNIPHY_PLL_SS_CNTL 0x1989
+#define mmUNIPHY_PLL_SS_STEP_SIZE 0x1988
+#define mmUNIPHY_POWER_CONTROL 0x1984
+#define mmUNIPHY_REG_TEST_OUTPUT 0x198B
+#define mmUNIPHY_SOFT_RESET 0x0166
+#define mmUNIPHY_TX_CONTROL1 0x1980
+#define mmUNIPHY_TX_CONTROL2 0x1981
+#define mmUNIPHY_TX_CONTROL3 0x1982
+#define mmUNIPHY_TX_CONTROL4 0x1983
+#define mmVGA25_PPLL_ANALOG 0x00E4
+#define mmVGA25_PPLL_FB_DIV 0x00DC
+#define mmVGA25_PPLL_POST_DIV 0x00E0
+#define mmVGA25_PPLL_REF_DIV 0x00D8
+#define mmVGA28_PPLL_ANALOG 0x00E5
+#define mmVGA28_PPLL_FB_DIV 0x00DD
+#define mmVGA28_PPLL_POST_DIV 0x00E1
+#define mmVGA28_PPLL_REF_DIV 0x00D9
+#define mmVGA41_PPLL_ANALOG 0x00E6
+#define mmVGA41_PPLL_FB_DIV 0x00DE
+#define mmVGA41_PPLL_POST_DIV 0x00E2
+#define mmVGA41_PPLL_REF_DIV 0x00DA
+#define mmVGA_CACHE_CONTROL 0x00CB
+#define mmVGA_DEBUG_READBACK_DATA 0x00D7
+#define mmVGA_DEBUG_READBACK_INDEX 0x00D6
+#define mmVGA_DISPBUF1_SURFACE_ADDR 0x00C6
+#define mmVGA_DISPBUF2_SURFACE_ADDR 0x00C8
+#define mmVGA_HDP_CONTROL 0x00CA
+#define mmVGA_HW_DEBUG 0x00CF
+#define mmVGA_INTERRUPT_CONTROL 0x00D1
+#define mmVGA_INTERRUPT_STATUS 0x00D3
+#define mmVGA_MAIN_CONTROL 0x00D4
+#define mmVGA_MEMORY_BASE_ADDRESS 0x00C4
+#define mmVGA_MEMORY_BASE_ADDRESS_HIGH 0x00C9
+#define mmVGA_MEM_READ_PAGE_ADDR 0x0013
+#define mmVGA_MEM_WRITE_PAGE_ADDR 0x0012
+#define mmVGA_MODE_CONTROL 0x00C2
+#define mmVGA_RENDER_CONTROL 0x00C0
+#define mmVGA_SEQUENCER_RESET_CONTROL 0x00C1
+#define mmVGA_SOURCE_SELECT 0x00FC
+#define mmVGA_STATUS 0x00D0
+#define mmVGA_STATUS_CLEAR 0x00D2
+#define mmVGA_SURFACE_PITCH_SELECT 0x00C3
+#define mmVGA_TEST_CONTROL 0x00D5
+#define mmVGA_TEST_DEBUG_DATA 0x00C7
+#define mmVGA_TEST_DEBUG_INDEX 0x00C5
+#define mmVIEWPORT_SIZE 0x1B5D
+#define mmVIEWPORT_START 0x1B5C
+#define mmXDMA_CLOCK_GATING_CNTL 0x0409
+#define mmXDMA_IF_BIF_STATUS 0x0418
+#define mmXDMA_INTERRUPT 0x0406
+#define mmXDMA_LOCAL_SURFACE_TILING1 0x03F4
+#define mmXDMA_LOCAL_SURFACE_TILING2 0x03F5
+#define mmXDMA_MC_PCIE_CLIENT_CONFIG 0x03E9
+#define mmXDMA_MEM_POWER_CNTL 0x040B
+#define mmXDMA_MSTR_CMD_URGENT_CNTL 0x03F6
+#define mmXDMA_MSTR_CNTL 0x03E0
+#define mmXDMA_MSTR_HEIGHT 0x03E3
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR 0x03F1
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH 0x03F2
+#define mmXDMA_MSTR_LOCAL_SURFACE_PITCH 0x03F3
+#define mmXDMA_MSTR_MEM_CLIENT_CONFIG 0x03EA
+#define mmXDMA_MSTR_MEM_NACK_STATUS 0x040D
+#define mmXDMA_MSTR_MEM_URGENT_CNTL 0x03F7
+#define mmXDMA_MSTR_PCIE_NACK_STATUS 0x040C
+#define mmXDMA_MSTR_READ_COMMAND 0x03E1
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS 0x03E6
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x03E7
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE 0x03E4
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x03E5
+#define mmXDMA_MSTR_STATUS 0x03E8
+#define mmXDMA_RBBMIF_RDWR_CNTL 0x040A
+#define mmXDMA_SLV_CNTL 0x03FB
+#define mmXDMA_SLV_FLIP_PENDING 0x0407
+#define mmXDMA_SLV_MEM_CLIENT_CONFIG 0x03FD
+#define mmXDMA_SLV_MEM_NACK_STATUS 0x040F
+#define mmXDMA_SLV_PCIE_NACK_STATUS 0x040E
+#define mmXDMA_SLV_READ_LATENCY_AVE 0x0405
+#define mmXDMA_SLV_READ_LATENCY_MINMAX 0x0404
+#define mmXDMA_SLV_READ_LATENCY_TIMER 0x0412
+#define mmXDMA_SLV_READ_URGENT_CNTL 0x03FF
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS 0x0402
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x0403
+#define mmXDMA_SLV_SLS_PITCH 0x03FE
+#define mmXDMA_SLV_WB_RATE_CNTL 0x0401
+#define mmXDMA_SLV_WRITE_URGENT_CNTL 0x0400
+#define mmXDMA_TEST_DEBUG_DATA 0x041D
+#define mmXDMA_TEST_DEBUG_INDEX 0x041C
+
+/* Registers that spilled out of sid.h */
+#define mmDATA_FORMAT 0x1AC0
+#define mmDESKTOP_HEIGHT 0x1AC1
+#define mmDC_LB_MEMORY_SPLIT 0x1AC3
+#define mmPRIORITY_A_CNT 0x1AC6
+#define mmPRIORITY_B_CNT 0x1AC7
+#define mmDPG_PIPE_ARBITRATION_CONTROL3 0x1B32
+#define mmINT_MASK 0x1AD0
+#define mmVLINE_STATUS 0x1AEE
+#define mmVBLANK_STATUS 0x1AEF
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
new file mode 100644
index 000000000000..9a4d4c299d5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -0,0 +1,9836 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_6_0_SH_MASK_H
+#define DCE_6_0_SH_MASK_H
+
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x00000000
+#define AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x00000001
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000ff00L
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x00000008
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00f00000L
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x00000014
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x0000001c
+#define AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x00000002
+#define AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x00000003
+#define AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000c0L
+#define AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x00000006
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0f000000L
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x00000018
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000f0000L
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x00000010
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00f00000L
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x00000014
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000f0L
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x00000004
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000fL
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x00000000
+#define AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x00000010
+#define AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x00000012
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000fL
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x00000000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000f0L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x00000004
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000f00L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x00000008
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000f000L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x0000000c
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000f0000L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x00000010
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00f00000L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x00000014
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000f000L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0x0000000c
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x00000004
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xffff0000L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x00000010
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x00000000
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x00000008
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x00000000
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xffffff00L
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x00000008
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE_MASK 0x00000100L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE__SHIFT 0x00000008
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV_MASK 0x00070000L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV__SHIFT 0x00000010
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI_MASK 0x00007000L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI__SHIFT 0x0000000c
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL_MASK 0x00000007L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL__SHIFT 0x00000000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x00000008
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000ffL
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00ff0000L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x00000010
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x00000000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0x0000000b
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1f000000L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x00000018
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000ffL
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x00000000
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0x0000000f
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x00000010
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0x0000000b
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x0000001c
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000ff00L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x00000008
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x00000000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x00000001
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00ff0000L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x00000010
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x00000018
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x0000001a
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x00000018
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x00000017
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x00000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0x0000000c
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0x0000000e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x0000001e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x0000001f
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0x0000000b
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x00000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A_MASK 0x00001000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A__SHIFT 0x0000000c
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B_MASK 0x00000c00L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B__SHIFT 0x0000000a
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM_MASK 0x000000ffL
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM__SHIFT 0x00000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C_MASK 0x00c00000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C__SHIFT 0x00000016
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC_MASK 0x70000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC__SHIFT 0x0000001c
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC_MASK 0x80000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC__SHIFT 0x0000001f
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M_MASK 0x00300000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M__SHIFT 0x00000014
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_PB1_RSVD_MASK 0x00008000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_PB1_RSVD__SHIFT 0x0000000f
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q_MASK 0x0c000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q__SHIFT 0x0000001a
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R_MASK 0x000f0000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R__SHIFT 0x00000010
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC_MASK 0x03000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC__SHIFT 0x00000018
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S_MASK 0x00000300L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S__SHIFT 0x00000008
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y_MASK 0x00006000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y__SHIFT 0x0000000d
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN_MASK 0x00003000L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN__SHIFT 0x0000000c
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PB4_RSVD_MASK 0x00000080L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PB4_RSVD__SHIFT 0x00000007
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR_MASK 0x00000f00L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR__SHIFT 0x00000008
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP_MASK 0xffff0000L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP__SHIFT 0x00000010
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC_MASK 0x0000007fL
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC__SHIFT 0x00000000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ_MASK 0x0000c000L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ__SHIFT 0x0000000e
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM_MASK 0x0000ffffL
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM__SHIFT 0x00000000
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT_MASK 0xffff0000L
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT__SHIFT 0x00000010
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT_MASK 0x0000ffffL
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT__SHIFT 0x00000000
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION_MASK 0xff000000L
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION__SHIFT 0x00000018
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000ffL
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x00000000
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000ff00L
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x00000008
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00ff0000L
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x00000010
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xff000000L
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x00000018
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000ffL
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x00000000
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000ff00L
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x00000008
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00ff0000L
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x00000010
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xff000000L
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x00000018
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00ff0000L
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x00000010
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xff000000L
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x00000018
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000ffL
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x00000000
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000ff00L
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x00000008
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000ffL
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x00000000
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000ff00L
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x00000008
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00ff0000L
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x00000010
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xff000000L
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x00000018
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000ffL
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x00000000
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000ff00L
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x00000008
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00ff0000L
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x00000010
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xff000000L
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x00000018
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000ffL
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x00000000
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000ff00L
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x00000008
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00ff0000L
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x00000010
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xff000000L
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x00000018
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000ffL
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x00000000
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000ff00L
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x00000008
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00ff0000L
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x00000010
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xff000000L
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x00000018
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000ffL
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x00000000
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000ff00L
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x00000008
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00ff0000L
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x00000010
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xff000000L
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x00000018
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000ffL
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x00000000
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000ff00L
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x00000008
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00ff0000L
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x00000010
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xff000000L
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x00000018
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x00000006
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x00000007
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0x0000000a
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x00000006
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x00000000
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x00000007
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000ffL
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x00000000
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000ff00L
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x00000008
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00ff0000L
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x00000010
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xff000000L
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x00000018
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000ffL
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x00000000
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000ff00L
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x00000008
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00ff0000L
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x00000010
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xff000000L
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x00000018
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00ff0000L
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x00000010
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xff000000L
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x00000018
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000ffL
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x00000000
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000ff00L
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x00000008
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000ffL
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x00000000
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000ff00L
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x00000008
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00ff0000L
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x00000010
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xff000000L
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x00000018
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000ffL
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x00000000
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000ff00L
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x00000008
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00ff0000L
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x00000010
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xff000000L
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x00000018
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000ffL
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x00000000
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000ff00L
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x00000008
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00ff0000L
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x00000010
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xff000000L
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x00000018
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000ffL
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x00000000
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000ff00L
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x00000008
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00ff0000L
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x00000010
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xff000000L
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x00000018
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000ffL
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x00000000
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000ff00L
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x00000008
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00ff0000L
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x00000010
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xff000000L
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x00000018
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000ffL
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x00000000
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000ff00L
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x00000008
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00ff0000L
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x00000010
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xff000000L
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x00000018
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0x0000000c
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000ffL
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x00000000
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x00000008
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x0000001f
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x00000000
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xff000000L
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x00000018
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x00000000
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x00000000
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x00000000
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x00000004
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x00000018
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x0000001e
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x00000008
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE_MASK 0x00000004L
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE__SHIFT 0x00000002
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE_MASK 0x00000008L
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE__SHIFT 0x00000003
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xc0000000L
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x0000001e
+#define ATTR00__ATTR_PAL_MASK 0x0000003fL
+#define ATTR00__ATTR_PAL__SHIFT 0x00000000
+#define ATTR01__ATTR_PAL_MASK 0x0000003fL
+#define ATTR01__ATTR_PAL__SHIFT 0x00000000
+#define ATTR02__ATTR_PAL_MASK 0x0000003fL
+#define ATTR02__ATTR_PAL__SHIFT 0x00000000
+#define ATTR03__ATTR_PAL_MASK 0x0000003fL
+#define ATTR03__ATTR_PAL__SHIFT 0x00000000
+#define ATTR04__ATTR_PAL_MASK 0x0000003fL
+#define ATTR04__ATTR_PAL__SHIFT 0x00000000
+#define ATTR05__ATTR_PAL_MASK 0x0000003fL
+#define ATTR05__ATTR_PAL__SHIFT 0x00000000
+#define ATTR06__ATTR_PAL_MASK 0x0000003fL
+#define ATTR06__ATTR_PAL__SHIFT 0x00000000
+#define ATTR07__ATTR_PAL_MASK 0x0000003fL
+#define ATTR07__ATTR_PAL__SHIFT 0x00000000
+#define ATTR08__ATTR_PAL_MASK 0x0000003fL
+#define ATTR08__ATTR_PAL__SHIFT 0x00000000
+#define ATTR09__ATTR_PAL_MASK 0x0000003fL
+#define ATTR09__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0A__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0A__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0B__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0B__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0C__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0C__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0D__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0D__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0E__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0E__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0F__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0F__ATTR_PAL__SHIFT 0x00000000
+#define ATTR10__ATTR_BLINK_EN_MASK 0x00000008L
+#define ATTR10__ATTR_BLINK_EN__SHIFT 0x00000003
+#define ATTR10__ATTR_CSEL_EN_MASK 0x00000080L
+#define ATTR10__ATTR_CSEL_EN__SHIFT 0x00000007
+#define ATTR10__ATTR_GRPH_MODE_MASK 0x00000001L
+#define ATTR10__ATTR_GRPH_MODE__SHIFT 0x00000000
+#define ATTR10__ATTR_LGRPH_EN_MASK 0x00000004L
+#define ATTR10__ATTR_LGRPH_EN__SHIFT 0x00000002
+#define ATTR10__ATTR_MONO_EN_MASK 0x00000002L
+#define ATTR10__ATTR_MONO_EN__SHIFT 0x00000001
+#define ATTR10__ATTR_PANTOPONLY_MASK 0x00000020L
+#define ATTR10__ATTR_PANTOPONLY__SHIFT 0x00000005
+#define ATTR10__ATTR_PCLKBY2_MASK 0x00000040L
+#define ATTR10__ATTR_PCLKBY2__SHIFT 0x00000006
+#define ATTR11__ATTR_OVSC_MASK 0x000000ffL
+#define ATTR11__ATTR_OVSC__SHIFT 0x00000000
+#define ATTR12__ATTR_MAP_EN_MASK 0x0000000fL
+#define ATTR12__ATTR_MAP_EN__SHIFT 0x00000000
+#define ATTR12__ATTR_VSMUX_MASK 0x00000030L
+#define ATTR12__ATTR_VSMUX__SHIFT 0x00000004
+#define ATTR13__ATTR_PPAN_MASK 0x0000000fL
+#define ATTR13__ATTR_PPAN__SHIFT 0x00000000
+#define ATTR14__ATTR_CSEL1_MASK 0x00000003L
+#define ATTR14__ATTR_CSEL1__SHIFT 0x00000000
+#define ATTR14__ATTR_CSEL2_MASK 0x0000000cL
+#define ATTR14__ATTR_CSEL2__SHIFT 0x00000002
+#define ATTRDR__ATTR_DATA_MASK 0x000000ffL
+#define ATTRDR__ATTR_DATA__SHIFT 0x00000000
+#define ATTRDW__ATTR_DATA_MASK 0x000000ffL
+#define ATTRDW__ATTR_DATA__SHIFT 0x00000000
+#define ATTRX__ATTR_IDX_MASK 0x0000001fL
+#define ATTRX__ATTR_IDX__SHIFT 0x00000000
+#define ATTRX__ATTR_PAL_RW_ENB_MASK 0x00000020L
+#define ATTRX__ATTR_PAL_RW_ENB__SHIFT 0x00000005
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x00000000
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x00000019
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x00000018
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x00000018
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0x0000000a
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x00000008
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000cL
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x00000002
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x00000011
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x00000010
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x00000010
+#define AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x0000001d
+#define AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define AUX_CONTROL__AUX_EN__SHIFT 0x00000000
+#define AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x00000014
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x00000010
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x00000018
+#define AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x00000008
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0x0000000c
+#define AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x00000012
+#define AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x0000001c
+#define AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define AUX_CONTROL__SPARE_0__SHIFT 0x0000001e
+#define AUX_CONTROL__SPARE_1_MASK 0x80000000L
+#define AUX_CONTROL__SPARE_1__SHIFT 0x0000001f
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x00000011
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x00000012
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x00000013
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x0000001c
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0x0000000c
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x00000014
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x00000008
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x00000004
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN_MASK 0x07000000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN__SHIFT 0x00000018
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x00000010
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000ffL
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x00000000
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001f0000L
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x00000010
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3fe00000L
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x00000015
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x00000000
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001f00L
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x00000008
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x00000007L
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x00000000
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003f00L
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x00000008
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x00000004
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01ff0000L
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x00000010
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x00000000
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x00000000
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01ff0000L
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x00000010
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x00000004
+#define AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x00000000
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x00000005
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x00000004
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x00000006
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x00000001
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x00000000
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x00000002
+#define AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000ff00L
+#define AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x00000008
+#define AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001f0000L
+#define AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x00000010
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x0000001d
+#define AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x00000000
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x00000009
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0x0000000b
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1f000000L
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x00000018
+#define AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x00000001
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x00000013
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0x0000000e
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0x0000000c
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x00000008
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0x0000000a
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x00000016
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x00000017
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x00000014
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x00000012
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x00000011
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x00000007
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x00000004
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x0000001f
+#define AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x0000001e
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK_MASK 0x00000400L
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK__SHIFT 0x0000000a
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_MASK 0x00000200L
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR__SHIFT 0x00000009
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT_MASK 0x00000100L
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT__SHIFT 0x00000008
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE_MASK 0x00000001L
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE__SHIFT 0x00000000
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x0000001c
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_MASK 0x0f000000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE__SHIFT 0x00000018
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY_MASK 0x00f00000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY__SHIFT 0x00000014
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE_MASK 0x000f0000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE__SHIFT 0x00000010
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK_MASK 0x00000400L
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK__SHIFT 0x0000000a
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_MASK 0x00000200L
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR__SHIFT 0x00000009
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT_MASK 0x00000100L
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT__SHIFT 0x00000008
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE_MASK 0x00000001L
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE__SHIFT 0x00000000
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x0000001c
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_MASK 0x0f000000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE__SHIFT 0x00000018
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY_MASK 0x00f00000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY__SHIFT 0x00000014
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE_MASK 0x000f0000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE__SHIFT 0x00000010
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x00000002
+#define AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x00000000
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000f0L
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x00000004
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001f0000L
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x00000010
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x0000001f
+#define AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000ff00L
+#define AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x00000000
+#define AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x00000008
+#define AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001f0000L
+#define AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x00000010
+#define AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xc0000000L
+#define AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x0000001e
+#define AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x00000000
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x00000009
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0x0000000b
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1f000000L
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x00000018
+#define AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x00000001
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x00000013
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0x0000000e
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0x0000000c
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x00000008
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0x0000000a
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x00000016
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x00000017
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x00000014
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x00000012
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x00000011
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x00000007
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x00000004
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER_MASK 0xffffffffL
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER__SHIFT 0x00000000
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xffff0000L
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x00000010
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000ffffL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x00000000
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x00000008
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x00000004
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG_MASK 0xffffffffL
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG__SHIFT 0x00000000
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x00000010L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x00000004
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xffffffffL
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x00000000
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xffffffffL
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x00000000
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE_MASK 0x00000001L
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE__SHIFT 0x00000000
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x00000010
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x00000011
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x00000004
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0x0000000b
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0x0000000e
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0x0000000f
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007f00L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x00000017
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PIN_DEBUG__AZALIA_DEBUG__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x0000003fL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0x0000000a
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x0000001e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x0000001f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x0000001f
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x00000011
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00fc0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x00000012
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x0000001b
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007fL
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x0000001f
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000f000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x0000000c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x00000011
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x0000001c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x00000019
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000f000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x0000000c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x00000011
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xf0000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x0000001c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x00000019
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000f000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0x0000000c
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000f0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000f00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x0000001e
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000ffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xffff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x0000001c
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03ffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003fL
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003fL
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003cL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x00000002
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x00000002
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x00000007
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x00000003
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000006
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x00000005
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x00000007
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0x0000000b
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0x0000000e
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0x0000000f
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0x0000000f
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x0000007fL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007f00L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x00000017
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x0000003fL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00ff0000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x0000001e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x0000001f
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0x0000ffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0x0000ffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000c0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000f000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0x0000000c
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000f0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00f00000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000f00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x0000001e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x0000001f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0x0000fc00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x0000007fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003cL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x00000002
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x00000002
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x00000007
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x00000003
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000006
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x00000005
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x00000007
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x00000004
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007f00L
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x00000008
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00ff0000L
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x00000010
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007fL
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x00000000
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x00000006L
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x00000001
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x00000000
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0x0000ffffL
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x00000000
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xffff0000L
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x00000010
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0x000000ffL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x00000000
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x00000100L
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x00000008
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x00000004
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL_MASK 0x00000030L
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL__SHIFT 0x00000004
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xffffffffL
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x00000000
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA_MASK 0xffffffffL
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA__SHIFT 0x00000000
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000ffL
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x00000000
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x00000008
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xffffffffL
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x00000000
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xffffffffL
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x00000000
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x00000003
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x00000002
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xffff0000L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x00000010
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x00000000
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x00000001
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x00000000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00ff0000L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x00000010
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x00000001
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x00000000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000ff00L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x00000008
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x00000000
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001ffffL
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x00000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000e0000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x00000011
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001f
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x00000018
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x00000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x00000008
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x00000010
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001ffffL
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x00000000
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x00000000
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x00000000
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x0000001e
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_MASK 0x80000000L
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN__SHIFT 0x0000001f
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000ffffL
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x00000000
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT_MASK 0x30000000L
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT__SHIFT 0x0000001c
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000ffffL
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x00000000
+#define BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+#define BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x0000001f
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x0000001e
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL_MASK 0x000e0000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL__SHIFT 0x00000011
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001f
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x00000018
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x00000000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x00000008
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x00000010
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000f0000L
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x00000010
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000ffffL
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x00000000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE_MASK 0x10000000L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE__SHIFT 0x0000001c
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN_MASK 0x00000004L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN__SHIFT 0x00000002
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN_MASK 0x00000002L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN__SHIFT 0x00000001
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB_MASK 0x00000001L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB__SHIFT 0x00000000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK_MASK 0x00700000L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK__SHIFT 0x00000014
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST_MASK 0x00003ff0L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST__SHIFT 0x00000004
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR_MASK 0x0f000000L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR__SHIFT 0x00000018
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT_MASK 0x003f0000L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT__SHIFT 0x00000010
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON_MASK 0x10000000L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON__SHIFT 0x0000001c
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL_MASK 0x00003f00L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL__SHIFT 0x00000008
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL_MASK 0x00000003L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL__SHIFT 0x00000000
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x0000007eL
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x00000001
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x00000004
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34__SHIFT 0x00000010
+#define CRT00__H_TOTAL_MASK 0x000000ffL
+#define CRT00__H_TOTAL__SHIFT 0x00000000
+#define CRT01__H_DISP_END_MASK 0x000000ffL
+#define CRT01__H_DISP_END__SHIFT 0x00000000
+#define CRT02__H_BLANK_START_MASK 0x000000ffL
+#define CRT02__H_BLANK_START__SHIFT 0x00000000
+#define CRT03__CR10CR11_R_DIS_B_MASK 0x00000080L
+#define CRT03__CR10CR11_R_DIS_B__SHIFT 0x00000007
+#define CRT03__H_BLANK_END_MASK 0x0000001fL
+#define CRT03__H_BLANK_END__SHIFT 0x00000000
+#define CRT03__H_DE_SKEW_MASK 0x00000060L
+#define CRT03__H_DE_SKEW__SHIFT 0x00000005
+#define CRT04__H_SYNC_START_MASK 0x000000ffL
+#define CRT04__H_SYNC_START__SHIFT 0x00000000
+#define CRT05__H_BLANK_END_B5_MASK 0x00000080L
+#define CRT05__H_BLANK_END_B5__SHIFT 0x00000007
+#define CRT05__H_SYNC_END_MASK 0x0000001fL
+#define CRT05__H_SYNC_END__SHIFT 0x00000000
+#define CRT05__H_SYNC_SKEW_MASK 0x00000060L
+#define CRT05__H_SYNC_SKEW__SHIFT 0x00000005
+#define CRT06__V_TOTAL_MASK 0x000000ffL
+#define CRT06__V_TOTAL__SHIFT 0x00000000
+#define CRT07__LINE_CMP_B8_MASK 0x00000010L
+#define CRT07__LINE_CMP_B8__SHIFT 0x00000004
+#define CRT07__V_BLANK_START_B8_MASK 0x00000008L
+#define CRT07__V_BLANK_START_B8__SHIFT 0x00000003
+#define CRT07__V_DISP_END_B8_MASK 0x00000002L
+#define CRT07__V_DISP_END_B8__SHIFT 0x00000001
+#define CRT07__V_DISP_END_B9_MASK 0x00000040L
+#define CRT07__V_DISP_END_B9__SHIFT 0x00000006
+#define CRT07__V_SYNC_START_B8_MASK 0x00000004L
+#define CRT07__V_SYNC_START_B8__SHIFT 0x00000002
+#define CRT07__V_SYNC_START_B9_MASK 0x00000080L
+#define CRT07__V_SYNC_START_B9__SHIFT 0x00000007
+#define CRT07__V_TOTAL_B8_MASK 0x00000001L
+#define CRT07__V_TOTAL_B8__SHIFT 0x00000000
+#define CRT07__V_TOTAL_B9_MASK 0x00000020L
+#define CRT07__V_TOTAL_B9__SHIFT 0x00000005
+#define CRT08__BYTE_PAN_MASK 0x00000060L
+#define CRT08__BYTE_PAN__SHIFT 0x00000005
+#define CRT08__ROW_SCAN_START_MASK 0x0000001fL
+#define CRT08__ROW_SCAN_START__SHIFT 0x00000000
+#define CRT09__DOUBLE_CHAR_HEIGHT_MASK 0x00000080L
+#define CRT09__DOUBLE_CHAR_HEIGHT__SHIFT 0x00000007
+#define CRT09__LINE_CMP_B9_MASK 0x00000040L
+#define CRT09__LINE_CMP_B9__SHIFT 0x00000006
+#define CRT09__MAX_ROW_SCAN_MASK 0x0000001fL
+#define CRT09__MAX_ROW_SCAN__SHIFT 0x00000000
+#define CRT09__V_BLANK_START_B9_MASK 0x00000020L
+#define CRT09__V_BLANK_START_B9__SHIFT 0x00000005
+#define CRT0A__CURSOR_DISABLE_MASK 0x00000020L
+#define CRT0A__CURSOR_DISABLE__SHIFT 0x00000005
+#define CRT0A__CURSOR_START_MASK 0x0000001fL
+#define CRT0A__CURSOR_START__SHIFT 0x00000000
+#define CRT0B__CURSOR_END_MASK 0x0000001fL
+#define CRT0B__CURSOR_END__SHIFT 0x00000000
+#define CRT0B__CURSOR_SKEW_MASK 0x00000060L
+#define CRT0B__CURSOR_SKEW__SHIFT 0x00000005
+#define CRT0C__DISP_START_MASK 0x000000ffL
+#define CRT0C__DISP_START__SHIFT 0x00000000
+#define CRT0D__DISP_START_MASK 0x000000ffL
+#define CRT0D__DISP_START__SHIFT 0x00000000
+#define CRT0E__CURSOR_LOC_HI_MASK 0x000000ffL
+#define CRT0E__CURSOR_LOC_HI__SHIFT 0x00000000
+#define CRT0F__CURSOR_LOC_LO_MASK 0x000000ffL
+#define CRT0F__CURSOR_LOC_LO__SHIFT 0x00000000
+#define CRT10__V_SYNC_START_MASK 0x000000ffL
+#define CRT10__V_SYNC_START__SHIFT 0x00000000
+#define CRT11__C0T7_WR_ONLY_MASK 0x00000080L
+#define CRT11__C0T7_WR_ONLY__SHIFT 0x00000007
+#define CRT11__SEL5_REFRESH_CYC_MASK 0x00000040L
+#define CRT11__SEL5_REFRESH_CYC__SHIFT 0x00000006
+#define CRT11__V_INTR_CLR_MASK 0x00000010L
+#define CRT11__V_INTR_CLR__SHIFT 0x00000004
+#define CRT11__V_INTR_EN_MASK 0x00000020L
+#define CRT11__V_INTR_EN__SHIFT 0x00000005
+#define CRT11__V_SYNC_END_MASK 0x0000000fL
+#define CRT11__V_SYNC_END__SHIFT 0x00000000
+#define CRT12__V_DISP_END_MASK 0x000000ffL
+#define CRT12__V_DISP_END__SHIFT 0x00000000
+#define CRT13__DISP_PITCH_MASK 0x000000ffL
+#define CRT13__DISP_PITCH__SHIFT 0x00000000
+#define CRT14__ADDR_CNT_BY4_MASK 0x00000020L
+#define CRT14__ADDR_CNT_BY4__SHIFT 0x00000005
+#define CRT14__DOUBLE_WORD_MASK 0x00000040L
+#define CRT14__DOUBLE_WORD__SHIFT 0x00000006
+#define CRT14__UNDRLN_LOC_MASK 0x0000001fL
+#define CRT14__UNDRLN_LOC__SHIFT 0x00000000
+#define CRT15__V_BLANK_START_MASK 0x000000ffL
+#define CRT15__V_BLANK_START__SHIFT 0x00000000
+#define CRT16__V_BLANK_END_MASK 0x000000ffL
+#define CRT16__V_BLANK_END__SHIFT 0x00000000
+#define CRT17__ADDR_CNT_BY2_MASK 0x00000008L
+#define CRT17__ADDR_CNT_BY2__SHIFT 0x00000003
+#define CRT17__BYTE_MODE_MASK 0x00000040L
+#define CRT17__BYTE_MODE__SHIFT 0x00000006
+#define CRT17__CRTC_SYNC_EN_MASK 0x00000080L
+#define CRT17__CRTC_SYNC_EN__SHIFT 0x00000007
+#define CRT17__RA0_AS_A13B_MASK 0x00000001L
+#define CRT17__RA0_AS_A13B__SHIFT 0x00000000
+#define CRT17__RA1_AS_A14B_MASK 0x00000002L
+#define CRT17__RA1_AS_A14B__SHIFT 0x00000001
+#define CRT17__VCOUNT_BY2_MASK 0x00000004L
+#define CRT17__VCOUNT_BY2__SHIFT 0x00000002
+#define CRT17__WRAP_A15TOA0_MASK 0x00000020L
+#define CRT17__WRAP_A15TOA0__SHIFT 0x00000005
+#define CRT18__LINE_CMP_MASK 0x000000ffL
+#define CRT18__LINE_CMP__SHIFT 0x00000000
+#define CRT1E__GRPH_DEC_RD1_MASK 0x00000002L
+#define CRT1E__GRPH_DEC_RD1__SHIFT 0x00000001
+#define CRT1F__GRPH_DEC_RD0_MASK 0x000000ffL
+#define CRT1F__GRPH_DEC_RD0__SHIFT 0x00000000
+#define CRT22__GRPH_LATCH_DATA_MASK 0x000000ffL
+#define CRT22__GRPH_LATCH_DATA__SHIFT 0x00000000
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL_MASK 0x00000100L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL_MASK 0x00000200L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x00000004
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL_MASK 0x00000100L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL_MASK 0x00000200L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x00000004
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL_MASK 0x00000100L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL_MASK 0x00000200L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x00000010L
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x00000004
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB_MASK 0x00000010L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB__SHIFT 0x00000004
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_MASK 0x00000001L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN__SHIFT 0x00000000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_MASK 0x000c0000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x00000011
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x00000010
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT__SHIFT 0x00000012
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0x0000000c
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x00000008
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL_MASK 0x00000100L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL_MASK 0x00000200L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x00000010L
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x00000004
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL_MASK 0x00000100L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL_MASK 0x00000200L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE_MASK 0x00000010L
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE__SHIFT 0x00000004
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL_MASK 0x00000100L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL_MASK 0x00000200L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE_MASK 0x00000010L
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE__SHIFT 0x00000004
+#define CRTC8_DATA__VCRTC_DATA_MASK 0x000000ffL
+#define CRTC8_DATA__VCRTC_DATA__SHIFT 0x00000000
+#define CRTC8_IDX__VCRTC_IDX_MASK 0x0000003fL
+#define CRTC8_IDX__VCRTC_IDX__SHIFT 0x00000000
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT_MASK 0x000000ffL
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT__SHIFT 0x00000000
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT_MASK 0x00010000L
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT__SHIFT 0x00000010
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB_MASK 0x000003ffL
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB__SHIFT 0x00000000
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y_MASK 0x000ffc00L
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y__SHIFT 0x0000000a
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR_MASK 0x3ff00000L
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR__SHIFT 0x00000014
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK 0x00000100L
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN__SHIFT 0x00000008
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE_MASK 0x00010000L
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE__SHIFT 0x00000010
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE__SHIFT 0x00000000
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003ffL
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x00000000
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000ffc00L
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0x0000000a
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR_MASK 0x3ff00000L
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR__SHIFT 0x00000014
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE__SHIFT 0x00000010
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL__SHIFT 0x00000008
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE__SHIFT 0x00000018
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL__SHIFT 0x0000000d
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL_MASK 0x00700000L
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL__SHIFT 0x00000014
+#define CRTC_CONTROL__CRTC_MASTER_EN_MASK 0x00000001L
+#define CRTC_CONTROL__CRTC_MASTER_EN__SHIFT 0x00000000
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN_MASK 0x20000000L
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN__SHIFT 0x0000001d
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL_MASK 0x00001000L
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0x0000000c
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x00000010L
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x00000004
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN__SHIFT 0x00000000
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT_MASK 0x0000001eL
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT__SHIFT 0x00000001
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT_MASK 0x00000001L
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT__SHIFT 0x00000000
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE_MASK 0x80000000L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE__SHIFT 0x0000001f
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_TEST_CLK_SEL_MASK 0x1f000000L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_TEST_CLK_SEL__SHIFT 0x00000018
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_DCP_GATE_DISABLE_MASK 0x00000100L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_DCP_GATE_DISABLE__SHIFT 0x00000008
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_SCL_GATE_DISABLE_MASK 0x00001000L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_SCL_GATE_DISABLE__SHIFT 0x0000000c
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_R_DCFE_GATE_DISABLE_MASK 0x00000010L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_R_DCFE_GATE_DISABLE__SHIFT 0x00000004
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x00000010
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY_MASK 0x00000100L
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY__SHIFT 0x00000008
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING_MASK 0x00000001L
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING__SHIFT 0x00000000
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV_MASK 0x0000001eL
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV__SHIFT 0x00000001
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN_MASK 0x00000001L
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN__SHIFT 0x00000000
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT_MASK 0x1fff0000L
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT__SHIFT 0x00000010
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT_MASK 0x00001fffL
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT__SHIFT 0x00000000
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY__SHIFT 0x00000010
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x00000018
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY__SHIFT 0x00000008
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001fL
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x00000000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK__SHIFT 0x00000004
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR__SHIFT 0x00000018
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE__SHIFT 0x00000000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x00000010
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x00000008
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS__SHIFT 0x0000001c
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM_MASK 0x00001fffL
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM__SHIFT 0x00000000
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY_MASK 0x001f0000L
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY__SHIFT 0x00000010
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR__SHIFT 0x00000013
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY_MASK 0x0000ff00L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY__SHIFT 0x00000008
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT_MASK 0x000000ffL
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT__SHIFT 0x00000000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASK 0xff000000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x00000017
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE__SHIFT 0x00000011
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x00000014
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP__SHIFT 0x00000018
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x00000010
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END_MASK 0x1fff0000L
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END__SHIFT 0x00000010
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START_MASK 0x00001fffL
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START__SHIFT 0x00000000
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS_MASK 0x00010000L
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS__SHIFT 0x00000010
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_MASK 0x000003ffL
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM__SHIFT 0x00000000
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END_MASK 0x1fff0000L
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END__SHIFT 0x00000010
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START_MASK 0x00001fffL
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START__SHIFT 0x00000000
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN_MASK 0x00010000L
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN__SHIFT 0x00000010
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF_MASK 0x00020000L
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF__SHIFT 0x00000011
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL_MASK 0x00000001L
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL__SHIFT 0x00000000
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END_MASK 0x1fff0000L
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END__SHIFT 0x00000010
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START_MASK 0x00001fffL
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START__SHIFT 0x00000000
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN_MASK 0x00010000L
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN__SHIFT 0x00000010
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF_MASK 0x00020000L
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF__SHIFT 0x00000011
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL_MASK 0x00000001L
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL__SHIFT 0x00000000
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END_MASK 0x1fff0000L
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END__SHIFT 0x00000010
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START_MASK 0x00001fffL
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START__SHIFT 0x00000000
+#define CRTC_H_TOTAL__CRTC_H_TOTAL_MASK 0x00001fffL
+#define CRTC_H_TOTAL__CRTC_H_TOTAL__SHIFT 0x00000000
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE_MASK 0x00000001L
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE__SHIFT 0x00000000
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x00000010
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD__SHIFT 0x00000000
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD__SHIFT 0x00000001
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x00000008
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x00000009
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x00000010
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x00000011
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x0000001e
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x0000001f
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK__SHIFT 0x00000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE__SHIFT 0x00000001
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK_MASK 0x01000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK__SHIFT 0x00000018
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE_MASK 0x04000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE__SHIFT 0x0000001a
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK_MASK 0x02000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK__SHIFT 0x00000019
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE_MASK 0x08000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE__SHIFT 0x0000001b
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK__SHIFT 0x0000001c
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE__SHIFT 0x0000001d
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK 0x00000010L
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK__SHIFT 0x00000004
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE_MASK 0x00000020L
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE__SHIFT 0x00000005
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x00000000
+#define CRTC_MASTER_EN__CRTC_MASTER_EN_MASK 0x00000001L
+#define CRTC_MASTER_EN__CRTC_MASTER_EN__SHIFT 0x00000000
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_MASK 0xffffff00L
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT__SHIFT 0x00000008
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE_MASK 0x00000003L
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE__SHIFT 0x00000000
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER_MASK 0x000000ffL
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER__SHIFT 0x00000000
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_MASK 0x00100000L
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR__SHIFT 0x00000014
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED_MASK 0x00000010L
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED__SHIFT 0x00000004
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR_MASK 0x00010000L
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR__SHIFT 0x00000010
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED_MASK 0x00000001L
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED__SHIFT 0x00000000
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM_MASK 0x00001fffL
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM__SHIFT 0x00000000
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE_MASK 0x000003ffL
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE__SHIFT 0x00000000
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN_MASK 0x000ffc00L
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN__SHIFT 0x0000000a
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED_MASK 0x3ff00000L
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED__SHIFT 0x00000014
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x00000000
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT_MASK 0x00ffffffL
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT__SHIFT 0x00000000
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT_MASK 0x1fff0000L
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT__SHIFT 0x00000010
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT_MASK 0x00001fffL
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT__SHIFT 0x00000000
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR__SHIFT 0x00000001
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x00000002
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED__SHIFT 0x00000000
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION_MASK 0x000f0000L
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION__SHIFT 0x00000010
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY_MASK 0x00000100L
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY__SHIFT 0x00000008
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY_MASK 0x00000001L
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY__SHIFT 0x00000000
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP_MASK 0x00020000L
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP__SHIFT 0x00000011
+#define CRTC_STATUS__CRTC_H_BLANK_MASK 0x00010000L
+#define CRTC_STATUS__CRTC_H_BLANK__SHIFT 0x00000010
+#define CRTC_STATUS__CRTC_H_SYNC_A_MASK 0x00040000L
+#define CRTC_STATUS__CRTC_H_SYNC_A__SHIFT 0x00000012
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP_MASK 0x00000002L
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP__SHIFT 0x00000001
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE__SHIFT 0x00000005
+#define CRTC_STATUS__CRTC_V_BLANK_MASK 0x00000001L
+#define CRTC_STATUS__CRTC_V_BLANK__SHIFT 0x00000000
+#define CRTC_STATUS__CRTC_V_START_LINE_MASK 0x00000010L
+#define CRTC_STATUS__CRTC_V_START_LINE__SHIFT 0x00000004
+#define CRTC_STATUS__CRTC_V_SYNC_A_MASK 0x00000004L
+#define CRTC_STATUS__CRTC_V_SYNC_A__SHIFT 0x00000002
+#define CRTC_STATUS__CRTC_V_UPDATE_MASK 0x00000008L
+#define CRTC_STATUS__CRTC_V_UPDATE__SHIFT 0x00000003
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT_MASK 0x00ffffffL
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT__SHIFT 0x00000000
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT_MASK 0x1fffffffL
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT__SHIFT 0x00000000
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT_MASK 0x1fff0000L
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT__SHIFT 0x00000010
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT_MASK 0x00001fffL
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT__SHIFT 0x00000000
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT_MASK 0x1fffffffL
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT__SHIFT 0x00000000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN_MASK 0x01000000L
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN__SHIFT 0x00000018
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00001fffL
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x00000000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0x0000000f
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY_MASK 0x00010000L
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY__SHIFT 0x00000010
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE__SHIFT 0x00000000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE__SHIFT 0x00000000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x00000018
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT__SHIFT 0x00000008
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT__SHIFT 0x00000010
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA_MASK 0x0000ffffL
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA__SHIFT 0x00000000
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK_MASK 0x003f0000L
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK__SHIFT 0x00000010
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT_MASK 0xff000000L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT__SHIFT 0x00000018
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE_MASK 0x00010000L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE__SHIFT 0x00000010
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN_MASK 0x00000001L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN__SHIFT 0x00000000
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE_MASK 0x00000700L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE__SHIFT 0x00000008
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES_MASK 0x0000f000L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES__SHIFT 0x0000000c
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0_MASK 0x0000000fL
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0__SHIFT 0x00000000
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1_MASK 0x000000f0L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1__SHIFT 0x00000004
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET_MASK 0xffff0000L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET__SHIFT 0x00000010
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES_MASK 0x00000f00L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES__SHIFT 0x00000008
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR_MASK 0x80000000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR__SHIFT 0x0000001f
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY_MASK 0x1f000000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY__SHIFT 0x00000018
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x00000010
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT__SHIFT 0x00000014
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS_MASK 0x00000200L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS__SHIFT 0x00000009
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED_MASK 0x00000800L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED__SHIFT 0x0000000b
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT_MASK 0x000000e0L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT__SHIFT 0x00000005
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS_MASK 0x00000400L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS__SHIFT 0x0000000a
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000100L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN__SHIFT 0x00000008
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00003000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x0000000c
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT_MASK 0x0000001fL
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT__SHIFT 0x00000000
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG__SHIFT 0x00000000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR_MASK 0x80000000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR__SHIFT 0x0000001f
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY_MASK 0x1f000000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY__SHIFT 0x00000018
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x00000010
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT__SHIFT 0x00000014
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS_MASK 0x00000200L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS__SHIFT 0x00000009
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED_MASK 0x00000800L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED__SHIFT 0x0000000b
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT_MASK 0x000000e0L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT__SHIFT 0x00000005
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS_MASK 0x00000400L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS__SHIFT 0x0000000a
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000100L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN__SHIFT 0x00000008
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00003000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x0000000c
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT_MASK 0x0000001fL
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT__SHIFT 0x00000000
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG__SHIFT 0x00000000
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK_MASK 0x00000001L
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK__SHIFT 0x00000000
+#define CRTC_VBI_END__CRTC_VBI_H_END_MASK 0x1fff0000L
+#define CRTC_VBI_END__CRTC_VBI_H_END__SHIFT 0x00000010
+#define CRTC_VBI_END__CRTC_VBI_V_END_MASK 0x00001fffL
+#define CRTC_VBI_END__CRTC_VBI_V_END__SHIFT 0x00000000
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END_MASK 0x1fff0000L
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END__SHIFT 0x00000010
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK 0x00001fffL
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START__SHIFT 0x00000000
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE__SHIFT 0x00000010
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x00000008
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x00000000
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE_MASK 0x00000001L
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE__SHIFT 0x00000000
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL_MASK 0x00000001L
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL__SHIFT 0x00000000
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END_MASK 0x1fff0000L
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END__SHIFT 0x00000010
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START_MASK 0x00001fffL
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START__SHIFT 0x00000000
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL_MASK 0x00000001L
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL__SHIFT 0x00000000
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END_MASK 0x1fff0000L
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END__SHIFT 0x00000010
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START_MASK 0x00001fffL
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START__SHIFT 0x00000000
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR__SHIFT 0x00000004
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_MASK 0x00000001L
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM__SHIFT 0x00000000
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT_MASK 0x00000100L
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT__SHIFT 0x00000008
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC_MASK 0x00001000L
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC__SHIFT 0x0000000c
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_MASK 0xffff0000L
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK__SHIFT 0x00000010
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL_MASK 0x00000010L
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL__SHIFT 0x00000004
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL__SHIFT 0x00000000
+#define CRTC_V_TOTAL__CRTC_V_TOTAL_MASK 0x00001fffL
+#define CRTC_V_TOTAL__CRTC_V_TOTAL__SHIFT 0x00000000
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x00000008
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x00000004
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0x0000000c
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x00000000
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING_MASK 0x00010000L
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING__SHIFT 0x00000010
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX_MASK 0x00001fffL
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX__SHIFT 0x00000000
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN_MASK 0x00001fffL
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN__SHIFT 0x00000000
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK 0x00000100L
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR__SHIFT 0x00000008
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED_MASK 0x00000001L
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED__SHIFT 0x00000000
+#define CUR_COLOR1__CUR_COLOR1_BLUE_MASK 0x000000ffL
+#define CUR_COLOR1__CUR_COLOR1_BLUE__SHIFT 0x00000000
+#define CUR_COLOR1__CUR_COLOR1_GREEN_MASK 0x0000ff00L
+#define CUR_COLOR1__CUR_COLOR1_GREEN__SHIFT 0x00000008
+#define CUR_COLOR1__CUR_COLOR1_RED_MASK 0x00ff0000L
+#define CUR_COLOR1__CUR_COLOR1_RED__SHIFT 0x00000010
+#define CUR_COLOR2__CUR_COLOR2_BLUE_MASK 0x000000ffL
+#define CUR_COLOR2__CUR_COLOR2_BLUE__SHIFT 0x00000000
+#define CUR_COLOR2__CUR_COLOR2_GREEN_MASK 0x0000ff00L
+#define CUR_COLOR2__CUR_COLOR2_GREEN__SHIFT 0x00000008
+#define CUR_COLOR2__CUR_COLOR2_RED_MASK 0x00ff0000L
+#define CUR_COLOR2__CUR_COLOR2_RED__SHIFT 0x00000010
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP_MASK 0x00000010L
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP__SHIFT 0x00000004
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00010000L
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x00000010
+#define CUR_CONTROL__CURSOR_EN_MASK 0x00000001L
+#define CUR_CONTROL__CURSOR_EN__SHIFT 0x00000000
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON_MASK 0x00100000L
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON__SHIFT 0x00000014
+#define CUR_CONTROL__CURSOR_MODE_MASK 0x00000300L
+#define CUR_CONTROL__CURSOR_MODE__SHIFT 0x00000008
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL_MASK 0x07000000L
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT 0x00000018
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x003f0000L
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x00000010
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x0000003fL
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x00000000
+#define CUR_POSITION__CURSOR_X_POSITION_MASK 0x3fff0000L
+#define CUR_POSITION__CURSOR_X_POSITION__SHIFT 0x00000010
+#define CUR_POSITION__CURSOR_Y_POSITION_MASK 0x00003fffL
+#define CUR_POSITION__CURSOR_Y_POSITION__SHIFT 0x00000000
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS_MASK 0x00000001L
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS__SHIFT 0x00000000
+#define CUR_SIZE__CURSOR_HEIGHT_MASK 0x0000003fL
+#define CUR_SIZE__CURSOR_HEIGHT__SHIFT 0x00000000
+#define CUR_SIZE__CURSOR_WIDTH_MASK 0x003f0000L
+#define CUR_SIZE__CURSOR_WIDTH__SHIFT 0x00000010
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xffffffffL
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x00000000
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE_MASK 0x01000000L
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE__SHIFT 0x00000018
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK 0x00010000L
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK__SHIFT 0x00000010
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING_MASK 0x00000001L
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING__SHIFT 0x00000000
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN_MASK 0x00000002L
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN__SHIFT 0x00000001
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK 0x00000001L
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D1VGA_CONTROL__D1VGA_ROTATE_MASK 0x03000000L
+#define D1VGA_CONTROL__D1VGA_ROTATE__SHIFT 0x00000018
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK 0x00000100L
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK 0x00000001L
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D2VGA_CONTROL__D2VGA_ROTATE_MASK 0x03000000L
+#define D2VGA_CONTROL__D2VGA_ROTATE__SHIFT 0x00000018
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK 0x00000100L
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE_MASK 0x00000001L
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D3VGA_CONTROL__D3VGA_ROTATE_MASK 0x03000000L
+#define D3VGA_CONTROL__D3VGA_ROTATE__SHIFT 0x00000018
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT_MASK 0x00000100L
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE_MASK 0x00000001L
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D4VGA_CONTROL__D4VGA_ROTATE_MASK 0x03000000L
+#define D4VGA_CONTROL__D4VGA_ROTATE__SHIFT 0x00000018
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT_MASK 0x00000100L
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE_MASK 0x00000001L
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D5VGA_CONTROL__D5VGA_ROTATE_MASK 0x03000000L
+#define D5VGA_CONTROL__D5VGA_ROTATE__SHIFT 0x00000018
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT_MASK 0x00000100L
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE_MASK 0x00000001L
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D6VGA_CONTROL__D6VGA_ROTATE_MASK 0x03000000L
+#define D6VGA_CONTROL__D6VGA_ROTATE__SHIFT 0x00000018
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT_MASK 0x00000100L
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER_MASK 0x000000ffL
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER__SHIFT 0x00000000
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE_MASK 0x00000100L
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY_MASK 0x000000ffL
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY__SHIFT 0x00000000
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY_MASK 0x0000ff00L
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK_MASK 0x00070000L
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK__SHIFT 0x00000010
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER_MASK 0x0000ff00L
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE_MASK 0x00000003L
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE__SHIFT 0x00000000
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK_MASK 0x00000001L
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK__SHIFT 0x00000000
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE_MASK 0x00010000L
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE__SHIFT 0x00000010
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE_MASK 0x03000000L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE__SHIFT 0x00000018
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT_MASK 0x00000010L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT__SHIFT 0x00000004
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE_MASK 0x00030000L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE__SHIFT 0x00000010
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE_MASK 0x00000300L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE__SHIFT 0x00000008
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS_MASK 0x00000001L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS__SHIFT 0x00000000
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE_MASK 0x00000001L
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE__SHIFT 0x00000000
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE_MASK 0x00000010L
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE__SHIFT 0x00000004
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE_MASK 0x00040000L
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE__SHIFT 0x00000012
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN_MASK 0x00000001L
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN__SHIFT 0x00000000
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN_MASK 0x00000100L
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN__SHIFT 0x00000008
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE_MASK 0x00020000L
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE__SHIFT 0x00000011
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE_MASK 0x00010000L
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE__SHIFT 0x00000010
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE_MASK 0x00000002L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE__SHIFT 0x00000001
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN_MASK 0x00000004L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN__SHIFT 0x00000002
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_MASK 0x00000001L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED_MASK 0x00000008L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED__SHIFT 0x00000003
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT__SHIFT 0x00000000
+#define DAC_CONTROL__DAC_DFORCE_EN_MASK 0x00000001L
+#define DAC_CONTROL__DAC_DFORCE_EN__SHIFT 0x00000000
+#define DAC_CONTROL__DAC_TV_ENABLE_MASK 0x00000100L
+#define DAC_CONTROL__DAC_TV_ENABLE__SHIFT 0x00000008
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT_MASK 0x00010000L
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT__SHIFT 0x00000010
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD_MASK 0x00000001L
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD__SHIFT 0x00000000
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKb_MASK 0x00000100L
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKb__SHIFT 0x00000008
+#define DAC_CRC_EN__DAC_CRC_CONT_EN_MASK 0x00010000L
+#define DAC_CRC_EN__DAC_CRC_CONT_EN__SHIFT 0x00000010
+#define DAC_CRC_EN__DAC_CRC_EN_MASK 0x00000001L
+#define DAC_CRC_EN__DAC_CRC_EN__SHIFT 0x00000000
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL_MASK 0x0000003fL
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL__SHIFT 0x00000000
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK_MASK 0x0000003fL
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK__SHIFT 0x00000000
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE_MASK 0x000003ffL
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE__SHIFT 0x00000000
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN_MASK 0x000ffc00L
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN__SHIFT 0x0000000a
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED_MASK 0x3ff00000L
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED__SHIFT 0x00000014
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK_MASK 0x000003ffL
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK__SHIFT 0x00000000
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK_MASK 0x000ffc00L
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK__SHIFT 0x0000000a
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK_MASK 0x3ff00000L
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK__SHIFT 0x00000014
+#define DAC_DATA__DAC_DATA_MASK 0x0000003fL
+#define DAC_DATA__DAC_DATA__SHIFT 0x00000000
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG_MASK 0xffffffffL
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG__SHIFT 0x00000000
+#define DAC_ENABLE__DAC_ENABLE_MASK 0x00000001L
+#define DAC_ENABLE__DAC_ENABLE__SHIFT 0x00000000
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE_MASK 0x00000002L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE__SHIFT 0x00000001
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK_MASK 0x00000020L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK__SHIFT 0x00000005
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_MASK 0x00000010L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR__SHIFT 0x00000004
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW_MASK 0x0000000cL
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW__SHIFT 0x00000002
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM_MASK 0x00000100L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM__SHIFT 0x00000008
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000fc00L
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x0000000a
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED_MASK 0x20000000L
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED__SHIFT 0x0000001d
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x0000001e
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x0000001f
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL_MASK 0x000f0000L
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL__SHIFT 0x00000010
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL_MASK 0x03c00000L
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL__SHIFT 0x00000016
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL_MASK 0x000000fcL
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL__SHIFT 0x00000002
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x00000001
+#define DAC_FORCE_DATA__DAC_FORCE_DATA_MASK 0x000003ffL
+#define DAC_FORCE_DATA__DAC_FORCE_DATA__SHIFT 0x00000000
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN_MASK 0x00000001L
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN__SHIFT 0x00000000
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKb_ONLY_MASK 0x01000000L
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKb_ONLY__SHIFT 0x00000018
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL_MASK 0x00000700L
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL__SHIFT 0x00000008
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MASK__DAC_MASK_MASK 0x000000ffL
+#define DAC_MASK__DAC_MASK__SHIFT 0x00000000
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE_MASK 0x00000100L
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE__SHIFT 0x00000008
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN_MASK 0x00010000L
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN__SHIFT 0x00000010
+#define DAC_POWERDOWN__DAC_POWERDOWN_MASK 0x00000001L
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED_MASK 0x01000000L
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED__SHIFT 0x00000018
+#define DAC_POWERDOWN__DAC_POWERDOWN__SHIFT 0x00000000
+#define DAC_PWR_CNTL__DAC_BG_MODE_MASK 0x00000003L
+#define DAC_PWR_CNTL__DAC_BG_MODE__SHIFT 0x00000000
+#define DAC_PWR_CNTL__DAC_PWRCNTL_MASK 0x00030000L
+#define DAC_PWR_CNTL__DAC_PWRCNTL__SHIFT 0x00000010
+#define DAC_R_INDEX__DAC_R_INDEX_MASK 0x000000ffL
+#define DAC_R_INDEX__DAC_R_INDEX__SHIFT 0x00000000
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT_MASK 0x00000007L
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT__SHIFT 0x00000000
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT_MASK 0x00000008L
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT__SHIFT 0x00000003
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT_MASK 0x00000007L
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT__SHIFT 0x00000000
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE_MASK 0x00000001L
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE__SHIFT 0x00000000
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE_MASK 0x00010000L
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE__SHIFT 0x00000010
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE_MASK 0x00000100L
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE__SHIFT 0x00000008
+#define DAC_W_INDEX__DAC_W_INDEX_MASK 0x000000ffL
+#define DAC_W_INDEX__DAC_W_INDEX__SHIFT 0x00000000
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x00000008
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x00000000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003ffL
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x00000000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03ff0000L
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x00000010
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x0000001e
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001c
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x0000001d
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003ffL
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x00000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03ff0000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x00000010
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE_MASK 0x80000000L
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE__SHIFT 0x0000001f
+#define DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define DC_ABM1_CNTL__ABM1_EN__SHIFT 0x00000000
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT_MASK 0x00000700L
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT__SHIFT 0x00000008
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT_MASK 0x00010000L
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT__SHIFT 0x00000010
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT_MASK 0x00000001L
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT__SHIFT 0x00000000
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT_MASK 0x00000100L
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT__SHIFT 0x00000008
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x00000002
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x0000001f
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0x0000000a
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x00000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x00000010
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x00000008
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x00000001
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x00000018
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x00000009
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x00000017
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x00000018
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x0000001c
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x0000001e
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x00000010
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0x0000000c
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001d
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x00000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x00000008
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x00000014
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x00000000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00ff0000L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x00000010
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x00000001
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x00000000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000ff00L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x00000008
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000fL
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x00000000
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000f00L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x00000008
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000f0000L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x00000010
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03ff0000L
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x00000010
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003ffL
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x00000000
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00ffffffL
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x00000000
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03ff0000L
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x00000010
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003ffL
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x00000000
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03ff0000L
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x00000010
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003ffL
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x00000000
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00ffffffL
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x00000000
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN_MASK 0x00ffffffL
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN__SHIFT 0x00000000
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00ffffffL
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x00000000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00ff0000L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x00000010
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x00000001
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x00000000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000ff00L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x00000008
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xffffffffL
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x00000000
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE_MASK 0x3ff00000L
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE__SHIFT 0x00000014
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE_MASK 0x000ffc00L
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE__SHIFT 0x0000000a
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE_MASK 0x000003ffL
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000010L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x00000004
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xffffffffL
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x00000000
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x00000004
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE__SHIFT 0x00000005
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x00000000
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_RAMP_DIV_ID_MASK 0x07000000L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_RAMP_DIV_ID__SHIFT 0x00000018
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x00000001
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_RAMP_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_RAMP_DISABLE__SHIFT 0x00000014
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x00000006
+#define DCCG_GATE_DISABLE_CNTL__PCLK_TV_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL__PCLK_TV_GATE_DISABLE__SHIFT 0x00000010
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE__SHIFT 0x00000002
+#define DCCG_GATE_DISABLE_CNTL__SCLK_RAMP_DIV_ID_MASK 0x70000000L
+#define DCCG_GATE_DISABLE_CNTL__SCLK_RAMP_DIV_ID__SHIFT 0x0000001c
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKA_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKA_GATE_DISABLE__SHIFT 0x00000008
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKB_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKB_GATE_DISABLE__SHIFT 0x00000009
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKC_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKC_GATE_DISABLE__SHIFT 0x0000000a
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKD_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKD_GATE_DISABLE__SHIFT 0x0000000b
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKE_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKE_GATE_DISABLE__SHIFT 0x0000000c
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKF_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKF_GATE_DISABLE__SHIFT 0x0000000d
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x00000000
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xffffffffL
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x00000000
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xffffffffL
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x00000000
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL__SHIFT 0x00000008
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x00000000
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x00000001
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x00000007
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x00000006
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x00000004
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x00000002
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x00000003
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x00000005
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x00000001L
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x00000000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV_MASK 0x00010000L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV__SHIFT 0x00000010
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL_MASK 0x000000ffL
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL__SHIFT 0x00000000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV_MASK 0x01000000L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV__SHIFT 0x00000018
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL_MASK 0x0000ff00L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL__SHIFT 0x00000008
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCCG_TEST_DEBUG_INDEX__DCCG_DBG_SEL_MASK 0x00001000L
+#define DCCG_TEST_DEBUG_INDEX__DCCG_DBG_SEL__SHIFT 0x0000000c
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DCCG_VPCLK_CNTL__AZ_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCCG_VPCLK_CNTL__AZ_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCCG_VPCLK_CNTL__AZ_MEM_SHUTDOWN_DIS_MASK 0x04000000L
+#define DCCG_VPCLK_CNTL__AZ_MEM_SHUTDOWN_DIS__SHIFT 0x0000001a
+#define DCCG_VPCLK_CNTL__DCCG_VPCLK_POL_MASK 0x00000001L
+#define DCCG_VPCLK_CNTL__DCCG_VPCLK_POL__SHIFT 0x00000000
+#define DCCG_VPCLK_CNTL__DMCU_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCCG_VPCLK_CNTL__DMCU_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCCG_VPCLK_CNTL__DMCU_MEM_SHUTDOWN_DIS_MASK 0x00010000L
+#define DCCG_VPCLK_CNTL__DMCU_MEM_SHUTDOWN_DIS__SHIFT 0x00000010
+#define DCCG_VPCLK_CNTL__DMIF0_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DCCG_VPCLK_CNTL__DMIF0_LIGHT_SLEEP_DIS__SHIFT 0x00000008
+#define DCCG_VPCLK_CNTL__DMIF0_MEM_SHUTDOWN_DIS_MASK 0x00100000L
+#define DCCG_VPCLK_CNTL__DMIF0_MEM_SHUTDOWN_DIS__SHIFT 0x00000014
+#define DCCG_VPCLK_CNTL__DMIF1_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DCCG_VPCLK_CNTL__DMIF1_LIGHT_SLEEP_DIS__SHIFT 0x00000009
+#define DCCG_VPCLK_CNTL__DMIF1_MEM_SHUTDOWN_DIS_MASK 0x00200000L
+#define DCCG_VPCLK_CNTL__DMIF1_MEM_SHUTDOWN_DIS__SHIFT 0x00000015
+#define DCCG_VPCLK_CNTL__DMIF2_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DCCG_VPCLK_CNTL__DMIF2_LIGHT_SLEEP_DIS__SHIFT 0x0000000a
+#define DCCG_VPCLK_CNTL__DMIF2_MEM_SHUTDOWN_DIS_MASK 0x00400000L
+#define DCCG_VPCLK_CNTL__DMIF2_MEM_SHUTDOWN_DIS__SHIFT 0x00000016
+#define DCCG_VPCLK_CNTL__DMIF3_LIGHT_SLEEP_DIS_MASK 0x00000800L
+#define DCCG_VPCLK_CNTL__DMIF3_LIGHT_SLEEP_DIS__SHIFT 0x0000000b
+#define DCCG_VPCLK_CNTL__DMIF3_MEM_SHUTDOWN_DIS_MASK 0x00800000L
+#define DCCG_VPCLK_CNTL__DMIF3_MEM_SHUTDOWN_DIS__SHIFT 0x00000017
+#define DCCG_VPCLK_CNTL__DMIF4_LIGHT_SLEEP_DIS_MASK 0x00001000L
+#define DCCG_VPCLK_CNTL__DMIF4_LIGHT_SLEEP_DIS__SHIFT 0x0000000c
+#define DCCG_VPCLK_CNTL__DMIF4_MEM_SHUTDOWN_DIS_MASK 0x01000000L
+#define DCCG_VPCLK_CNTL__DMIF4_MEM_SHUTDOWN_DIS__SHIFT 0x00000018
+#define DCCG_VPCLK_CNTL__DMIF5_LIGHT_SLEEP_DIS_MASK 0x00002000L
+#define DCCG_VPCLK_CNTL__DMIF5_LIGHT_SLEEP_DIS__SHIFT 0x0000000d
+#define DCCG_VPCLK_CNTL__DMIF5_MEM_SHUTDOWN_DIS_MASK 0x02000000L
+#define DCCG_VPCLK_CNTL__DMIF5_MEM_SHUTDOWN_DIS__SHIFT 0x00000019
+#define DCCG_VPCLK_CNTL__DMIF_XLR_LIGHT_SLEEP_MODE_FORCE_MASK 0x00000020L
+#define DCCG_VPCLK_CNTL__DMIF_XLR_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000005
+#define DCCG_VPCLK_CNTL__DMIF_XLR_MEM_SHUTDOWN_MODE_FORCE_MASK 0x00040000L
+#define DCCG_VPCLK_CNTL__DMIF_XLR_MEM_SHUTDOWN_MODE_FORCE__SHIFT 0x00000012
+#define DCCG_VPCLK_CNTL__FBC_LIGHT_SLEEP_DIS_MASK 0x00004000L
+#define DCCG_VPCLK_CNTL__FBC_LIGHT_SLEEP_DIS__SHIFT 0x0000000e
+#define DCCG_VPCLK_CNTL__FBC_MEM_SHUTDOWN_DIS_MASK 0x00080000L
+#define DCCG_VPCLK_CNTL__FBC_MEM_SHUTDOWN_DIS__SHIFT 0x00000013
+#define DCCG_VPCLK_CNTL__MCIF_LIGHT_SLEEP_MODE_FORCE_MASK 0x00000010L
+#define DCCG_VPCLK_CNTL__MCIF_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000004
+#define DCCG_VPCLK_CNTL__MCIF_MEM_SHUTDOWN_MODE_FORCE_MASK 0x00020000L
+#define DCCG_VPCLK_CNTL__MCIF_MEM_SHUTDOWN_MODE_FORCE__SHIFT 0x00000011
+#define DCCG_VPCLK_CNTL__VGA_LIGHT_SLEEP_MODE_FORCE_MASK 0x00000002L
+#define DCCG_VPCLK_CNTL__VGA_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000001
+#define DCCG_VPCLK_CNTL__VIP_LIGHT_SLEEP_DIS_MASK 0x00008000L
+#define DCCG_VPCLK_CNTL__VIP_LIGHT_SLEEP_DIS__SHIFT 0x0000000f
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL__SHIFT 0x00000000
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL__SHIFT 0x00000000
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL__SHIFT 0x00000000
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL__SHIFT 0x00000000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL_MASK 0x0000001fL
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL__SHIFT 0x00000000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_EN_MASK 0x00000020L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_EN__SHIFT 0x00000005
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_PIN_SEL_MASK 0x00000040L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_PIN_SEL__SHIFT 0x00000006
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_SEL_MASK 0x00300000L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_SEL__SHIFT 0x00000014
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA_EN_MASK 0x00000080L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA_EN__SHIFT 0x00000007
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA_MASK 0x000fff00L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA__SHIFT 0x00000008
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA_MASK 0xffffffffL
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA__SHIFT 0x00000000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN_MASK 0x00001000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN__SHIFT 0x0000000c
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL_MASK 0x0000000fL
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL__SHIFT 0x00000000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL_MASK 0x000001f0L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL__SHIFT 0x00000004
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN_MASK 0x10000000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN__SHIFT 0x0000001c
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL_MASK 0x000f0000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL__SHIFT 0x00000010
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL_MASK 0x01f00000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL__SHIFT 0x00000014
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH_MASK 0xffffffffL
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH__SHIFT 0x00000000
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN_MASK 0x00200000L
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN__SHIFT 0x00000015
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN_MASK 0x00100000L
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN__SHIFT 0x00000014
+#define DC_DVODATA_CONFIG__VIP_MUX_EN_MASK 0x00080000L
+#define DC_DVODATA_CONFIG__VIP_MUX_EN__SHIFT 0x00000013
+#define DCFE0_SOFT_RESET__CRTC0_SOFT_RESET_MASK 0x00000010L
+#define DCFE0_SOFT_RESET__CRTC0_SOFT_RESET__SHIFT 0x00000004
+#define DCFE0_SOFT_RESET__DCP0_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE0_SOFT_RESET__DCP0_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE0_SOFT_RESET__DCP0_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE0_SOFT_RESET__DCP0_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE0_SOFT_RESET__SCL0_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE0_SOFT_RESET__SCL0_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE0_SOFT_RESET__SCL0_SOFT_RESET_MASK 0x00000008L
+#define DCFE0_SOFT_RESET__SCL0_SOFT_RESET__SHIFT 0x00000003
+#define DCFE1_SOFT_RESET__CRTC1_SOFT_RESET_MASK 0x00000010L
+#define DCFE1_SOFT_RESET__CRTC1_SOFT_RESET__SHIFT 0x00000004
+#define DCFE1_SOFT_RESET__DCP1_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE1_SOFT_RESET__DCP1_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE1_SOFT_RESET__DCP1_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE1_SOFT_RESET__DCP1_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE1_SOFT_RESET__SCL1_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE1_SOFT_RESET__SCL1_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE1_SOFT_RESET__SCL1_SOFT_RESET_MASK 0x00000008L
+#define DCFE1_SOFT_RESET__SCL1_SOFT_RESET__SHIFT 0x00000003
+#define DCFE2_SOFT_RESET__CRTC2_SOFT_RESET_MASK 0x00000010L
+#define DCFE2_SOFT_RESET__CRTC2_SOFT_RESET__SHIFT 0x00000004
+#define DCFE2_SOFT_RESET__DCP2_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE2_SOFT_RESET__DCP2_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE2_SOFT_RESET__DCP2_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE2_SOFT_RESET__DCP2_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE2_SOFT_RESET__SCL2_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE2_SOFT_RESET__SCL2_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE2_SOFT_RESET__SCL2_SOFT_RESET_MASK 0x00000008L
+#define DCFE2_SOFT_RESET__SCL2_SOFT_RESET__SHIFT 0x00000003
+#define DCFE3_SOFT_RESET__CRTC3_SOFT_RESET_MASK 0x00000010L
+#define DCFE3_SOFT_RESET__CRTC3_SOFT_RESET__SHIFT 0x00000004
+#define DCFE3_SOFT_RESET__DCP3_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE3_SOFT_RESET__DCP3_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE3_SOFT_RESET__DCP3_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE3_SOFT_RESET__DCP3_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE3_SOFT_RESET__SCL3_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE3_SOFT_RESET__SCL3_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE3_SOFT_RESET__SCL3_SOFT_RESET_MASK 0x00000008L
+#define DCFE3_SOFT_RESET__SCL3_SOFT_RESET__SHIFT 0x00000003
+#define DCFE4_SOFT_RESET__CRTC4_SOFT_RESET_MASK 0x00000010L
+#define DCFE4_SOFT_RESET__CRTC4_SOFT_RESET__SHIFT 0x00000004
+#define DCFE4_SOFT_RESET__DCP4_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE4_SOFT_RESET__DCP4_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE4_SOFT_RESET__DCP4_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE4_SOFT_RESET__DCP4_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE4_SOFT_RESET__SCL4_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE4_SOFT_RESET__SCL4_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE4_SOFT_RESET__SCL4_SOFT_RESET_MASK 0x00000008L
+#define DCFE4_SOFT_RESET__SCL4_SOFT_RESET__SHIFT 0x00000003
+#define DCFE5_SOFT_RESET__CRTC5_SOFT_RESET_MASK 0x00000010L
+#define DCFE5_SOFT_RESET__CRTC5_SOFT_RESET__SHIFT 0x00000004
+#define DCFE5_SOFT_RESET__DCP5_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE5_SOFT_RESET__DCP5_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE5_SOFT_RESET__DCP5_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE5_SOFT_RESET__DCP5_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE5_SOFT_RESET__SCL5_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE5_SOFT_RESET__SCL5_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE5_SOFT_RESET__SCL5_SOFT_RESET_MASK 0x00000008L
+#define DCFE5_SOFT_RESET__SCL5_SOFT_RESET__SHIFT 0x00000003
+#define DCFE_DBG_SEL__DCFE_DBG_SEL_MASK 0x0000000fL
+#define DCFE_DBG_SEL__DCFE_DBG_SEL__SHIFT 0x00000000
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_MEM_PWR_STATE_MASK 0x00000300L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_MEM_PWR_STATE__SHIFT 0x00000008
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_MEM_PWR_STATE_MASK 0x00003000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB1_MEM_SHUTDOWN_DIS_MASK 0x20000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB1_MEM_SHUTDOWN_DIS__SHIFT 0x0000001d
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB2_MEM_SHUTDOWN_DIS_MASK 0x40000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB2_MEM_SHUTDOWN_DIS__SHIFT 0x0000001e
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_LIGHT_SLEEP_DIS__SHIFT 0x00000004
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_0_MASK 0x00030000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_0__SHIFT 0x00000010
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_1_MASK 0x00c00000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_1__SHIFT 0x00000016
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_2_MASK 0x03000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_2__SHIFT 0x00000018
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__PIPE_MEM_SHUTDOWN_DIS_MASK 0x10000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__PIPE_MEM_SHUTDOWN_DIS__SHIFT 0x0000001c
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_LIGHT_SLEEP_DIS__SHIFT 0x00000006
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_MEM_PWR_STATE_MASK 0x00300000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_LIGHT_SLEEP_DIS__SHIFT 0x00000005
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_MEM_PWR_STATE__SHIFT 0x00000012
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x00000000
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000f00L
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x00000008
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x07000000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x00000018
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0x00070000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x00000010
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00700000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x00000014
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0x00007000L
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0x0000000c
+#define DC_GENERICB__GENERICB_EN_MASK 0x00000001L
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x00000000
+#define DC_GENERICB__GENERICB_SEL_MASK 0x00000f00L
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x00000008
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x07000000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x00000018
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL_MASK 0x00070000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x00000010
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00700000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x00000014
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL_MASK 0x00007000L
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL__SHIFT 0x0000000c
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x00100000L
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x00100000L
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x00100000L
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC6_MASK__AUX6_POL_MASK 0x00100000L
+#define DC_GPIO_DDC6_MASK__AUX6_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x00010000L
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x00100000L
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x00000014
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_MASK 0x00010000L
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL__SHIFT 0x00000010
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG_MASK 0x00000300L
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG__SHIFT 0x00000008
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG_MASK 0x00000001L
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCLK_A_MASK 0x10000000L
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCLK_A__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCNTL_A_MASK 0x07000000L
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCNTL_A__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVODATA_A_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVODATA_A__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_A__DC_GPIO_MVP_DVOCNTL_A_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_A__DC_GPIO_MVP_DVOCNTL_A__SHIFT 0x0000001e
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCLK_EN_MASK 0x10000000L
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCLK_EN__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCNTL_EN_MASK 0x07000000L
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCNTL_EN__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVODATA_EN_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVODATA_EN__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_EN__DC_GPIO_MVP_DVOCNTL_EN_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_EN__DC_GPIO_MVP_DVOCNTL_EN__SHIFT 0x0000001e
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCLK_MASK_MASK 0x10000000L
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCLK_MASK__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCNTL_MASK_MASK 0x07000000L
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCNTL_MASK__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVODATA_MASK_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVODATA_MASK__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_MVP_DVOCNTL_MASK_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_MVP_DVOCNTL_MASK__SHIFT 0x0000001e
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCLK_Y_MASK 0x10000000L
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCLK_Y__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCNTL_Y_MASK 0x07000000L
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCNTL_Y__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVODATA_Y_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVODATA_Y__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_Y__DC_GPIO_MVP_DVOCNTL_Y_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_Y__DC_GPIO_MVP_DVOCNTL_Y__SHIFT 0x0000001e
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x00000001L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x00000100L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x00010000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x00100000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x00200000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x00400000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x00800000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x00000017
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x00000001L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x00000100L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x00010000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x00100000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x00200000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x00400000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x00800000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x00000017
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x00000001L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x00000001
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x00000004L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x00000002
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x00000010L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x00000004
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x00000005
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x00000040L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x00000006
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x00000009
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x00000400L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0x0000000a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x00001000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0x0000000c
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x00002000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0x0000000d
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x00004000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0x0000000e
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x00010000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x00000011
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x00040000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x00000012
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x00100000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x00400000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x01000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x00000018
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x00000019
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x04000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x0000001a
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x00000001L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x00000100L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x00010000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x00100000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x00200000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x00400000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x00800000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x00000017
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x00000001L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x00000000
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x00000100L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x00000008
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x00010000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x00000010
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x01000000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x00000018
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x00000001L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x00000100L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x00000008
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x00010000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x00000010
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x01000000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x00000018
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x00000001
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x00000008L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x00000003
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x00000004L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x00000002
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x00000008
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x00000009
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x00000800L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0x0000000b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x00000400L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0x0000000a
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x00010000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x00000010
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x00000011
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x00080000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x00000013
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x00040000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x00000012
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x01000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x00000018
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x00000019
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x08000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x0000001b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x04000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x0000001a
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x00000001L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x00000100L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x00000008
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x00010000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x00000010
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x01000000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x00000018
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x00000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x00000008
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x00000010
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x00000018
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x0000001a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x0000001c
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x00000000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x00000008
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x00010000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x00000010
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x01000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x00000018
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x04000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x0000001a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x0000001c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x00000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x00000004
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x00000040L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x00000006
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x00000008
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x00000009
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000400L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0x0000000a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x00000010
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x00000011
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x00040000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x00000012
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x00000014
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x00000015
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00400000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x00000016
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x00000018
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x00000019
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x04000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x0000001a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x0000001c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x0000001d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0x40000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x0000001e
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x00000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x00000008
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x00010000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x00000010
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x01000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x00000018
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x04000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x0000001a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x0000001c
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A__SHIFT 0x00000001
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN__SHIFT 0x00000001
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS__SHIFT 0x00000001
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV_MASK 0x00000004L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV__SHIFT 0x00000002
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK_MASK 0x00000010L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK__SHIFT 0x00000004
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS__SHIFT 0x00000005
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV_MASK 0x00000040L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV__SHIFT 0x00000006
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN_MASK 0x0000000fL
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP_MASK 0x000000f0L
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP__SHIFT 0x00000004
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y__SHIFT 0x00000001
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000fL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x00000000
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000f0L
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x00000004
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0f000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x00000018
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xf0000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x0000001c
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN_MASK 0x000f0000L
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN__SHIFT 0x00000010
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP_MASK 0x00f00000L
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP__SHIFT 0x00000014
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0x0000000fL
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x00000000
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0x000000f0L
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x00000004
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A__SHIFT 0x00000010
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN__SHIFT 0x00000010
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x00000002L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x00000001
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x00000004
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00000040L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x00000006
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0x0000000c
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x00004000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0x0000000e
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK__SHIFT 0x00000010
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS__SHIFT 0x00000014
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV_MASK 0x00400000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV__SHIFT 0x00000016
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y__SHIFT 0x00000010
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK 0x00000001L
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK 0x00000100L
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A__SHIFT 0x00000008
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN_MASK 0x00000001L
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN_MASK 0x00000100L
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN__SHIFT 0x00000008
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK_MASK 0x07000000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK__SHIFT 0x00000018
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK_MASK 0x00000001L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS__SHIFT 0x00000004
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV_MASK 0x00000040L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV__SHIFT 0x00000006
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK_MASK 0x70000000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK__SHIFT 0x0000001c
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK_MASK 0x00000100L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK__SHIFT 0x00000008
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS_MASK 0x00001000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS__SHIFT 0x0000000c
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV_MASK 0x00004000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y_MASK 0x00000001L
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y_MASK 0x00000100L
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y__SHIFT 0x00000008
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x0000003fL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x00000000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x00000700L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x00000008
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x00003800L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0x0000000b
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x0001c000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0x0000000e
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0x000e0000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x00000011
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x00700000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x00000014
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x03800000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x00000017
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xffffffffL
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x00000000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP__SHIFT 0x00000000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP__SHIFT 0x00000004
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP__SHIFT 0x00000008
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP__SHIFT 0x0000000c
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP__SHIFT 0x00000010
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP__SHIFT 0x00000014
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x00000000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x00000004
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x00000008
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0x0000000c
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x00000010
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x00000014
+#define DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD1_CONTROL__DC_HPD1_EN_MASK 0x10000000L
+#define DC_HPD1_CONTROL__DC_HPD1_EN__SHIFT 0x0000001c
+#define DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x00000001L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK__SHIFT 0x00000000
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK 0x00010000L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN__SHIFT 0x00000010
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD1_INT_STATUS__DC_HPD1_INT_STATUS_MASK 0x00000001L
+#define DC_HPD1_INT_STATUS__DC_HPD1_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD1_INT_STATUS__DC_HPD1_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD1_INT_STATUS__DC_HPD1_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK 0x00000002L
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE__SHIFT 0x00000001
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD2_CONTROL__DC_HPD2_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD2_CONTROL__DC_HPD2_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD2_CONTROL__DC_HPD2_EN_MASK 0x10000000L
+#define DC_HPD2_CONTROL__DC_HPD2_EN__SHIFT 0x0000001c
+#define DC_HPD2_CONTROL__DC_HPD2_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD2_CONTROL__DC_HPD2_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_ACK_MASK 0x00000001L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_ACK__SHIFT 0x00000000
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_EN_MASK 0x00010000L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_EN__SHIFT 0x00000010
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD2_INT_STATUS__DC_HPD2_INT_STATUS_MASK 0x00000001L
+#define DC_HPD2_INT_STATUS__DC_HPD2_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD2_INT_STATUS__DC_HPD2_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD2_INT_STATUS__DC_HPD2_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK 0x00000002L
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE__SHIFT 0x00000001
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD3_CONTROL__DC_HPD3_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD3_CONTROL__DC_HPD3_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD3_CONTROL__DC_HPD3_EN_MASK 0x10000000L
+#define DC_HPD3_CONTROL__DC_HPD3_EN__SHIFT 0x0000001c
+#define DC_HPD3_CONTROL__DC_HPD3_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD3_CONTROL__DC_HPD3_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_ACK_MASK 0x00000001L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_ACK__SHIFT 0x00000000
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_EN_MASK 0x00010000L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_EN__SHIFT 0x00000010
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD3_INT_STATUS__DC_HPD3_INT_STATUS_MASK 0x00000001L
+#define DC_HPD3_INT_STATUS__DC_HPD3_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD3_INT_STATUS__DC_HPD3_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD3_INT_STATUS__DC_HPD3_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK 0x00000002L
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE__SHIFT 0x00000001
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD4_CONTROL__DC_HPD4_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD4_CONTROL__DC_HPD4_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD4_CONTROL__DC_HPD4_EN_MASK 0x10000000L
+#define DC_HPD4_CONTROL__DC_HPD4_EN__SHIFT 0x0000001c
+#define DC_HPD4_CONTROL__DC_HPD4_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD4_CONTROL__DC_HPD4_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_ACK_MASK 0x00000001L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_ACK__SHIFT 0x00000000
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_EN_MASK 0x00010000L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_EN__SHIFT 0x00000010
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD4_INT_STATUS__DC_HPD4_INT_STATUS_MASK 0x00000001L
+#define DC_HPD4_INT_STATUS__DC_HPD4_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD4_INT_STATUS__DC_HPD4_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD4_INT_STATUS__DC_HPD4_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK 0x00000002L
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE__SHIFT 0x00000001
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD5_CONTROL__DC_HPD5_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD5_CONTROL__DC_HPD5_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD5_CONTROL__DC_HPD5_EN_MASK 0x10000000L
+#define DC_HPD5_CONTROL__DC_HPD5_EN__SHIFT 0x0000001c
+#define DC_HPD5_CONTROL__DC_HPD5_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD5_CONTROL__DC_HPD5_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_ACK_MASK 0x00000001L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_ACK__SHIFT 0x00000000
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_EN_MASK 0x00010000L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_EN__SHIFT 0x00000010
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD5_INT_STATUS__DC_HPD5_INT_STATUS_MASK 0x00000001L
+#define DC_HPD5_INT_STATUS__DC_HPD5_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD5_INT_STATUS__DC_HPD5_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD5_INT_STATUS__DC_HPD5_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK 0x00000002L
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE__SHIFT 0x00000001
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD6_CONTROL__DC_HPD6_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD6_CONTROL__DC_HPD6_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD6_CONTROL__DC_HPD6_EN_MASK 0x10000000L
+#define DC_HPD6_CONTROL__DC_HPD6_EN__SHIFT 0x0000001c
+#define DC_HPD6_CONTROL__DC_HPD6_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD6_CONTROL__DC_HPD6_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_ACK_MASK 0x00000001L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_ACK__SHIFT 0x00000000
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_EN_MASK 0x00010000L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_EN__SHIFT 0x00000010
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD6_INT_STATUS__DC_HPD6_INT_STATUS_MASK 0x00000001L
+#define DC_HPD6_INT_STATUS__DC_HPD6_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD6_INT_STATUS__DC_HPD6_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD6_INT_STATUS__DC_HPD6_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK 0x00000002L
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE__SHIFT 0x00000001
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x00000008
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0x0000000c
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x00000019
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x00000018
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x00000004
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000cL
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x00000002
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x00000015
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x00000000
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x00000014
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL_MASK 0x80000000L
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL__SHIFT 0x0000001f
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x00000008
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x00000000
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x00000002
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x00000001
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x00000003
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x00000014
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000ff00L
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x00000000
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x00000008
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x00ff0000L
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x00000010
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x0000001f
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0x00f00000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x00000014
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x0000001c
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0x0000ffffL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x00000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x00000005
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x00000004
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x00000006
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x00000009
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x00000008
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0x0000000a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x00002000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0x0000000d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x00001000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0x0000000c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x00004000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0x0000000e
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x00020000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x00000011
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x00010000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x00000010
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x00040000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x00000012
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x00200000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x00000015
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x00100000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x00000014
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x00400000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x00000016
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x02000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x00000019
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x01000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x00000018
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x04000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x0000001a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x0000001c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x08000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x0000001b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x0000001d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x00000001
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x00000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x00000002
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x00000004
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x00000007
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x00000002
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x00000006
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0x0000000c
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0x0000000d
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0x0000000e
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0x0000000f
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x00000012
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x00000000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x00000008
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x00000005
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x00000008
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x00000008
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x00000008
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x00000008
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL_MASK 0xf8000000L
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL__SHIFT 0x0000001b
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL_MASK 0x0000001fL
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL__SHIFT 0x00000000
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS_MASK 0x00008000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS__SHIFT 0x0000000f
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS_MASK 0x00010000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS__SHIFT 0x00000010
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS_MASK 0x00020000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS__SHIFT 0x00000011
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS_MASK 0x00040000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS__SHIFT 0x00000012
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS_MASK 0x00080000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS__SHIFT 0x00000013
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS_MASK 0x00100000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS__SHIFT 0x00000014
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS_MASK 0x00200000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS__SHIFT 0x00000015
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS_MASK 0x00000200L
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS__SHIFT 0x00000009
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS_MASK 0x00000800L
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS__SHIFT 0x0000000b
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS_MASK 0x00002000L
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS__SHIFT 0x0000000d
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS_MASK 0x00000040L
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS__SHIFT 0x00000006
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS_MASK 0x00000020L
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS__SHIFT 0x00000005
+#define DCI_CLK_CNTL__DISPCLK_R_DMCU_GATE_DIS_MASK 0x00004000L
+#define DCI_CLK_CNTL__DISPCLK_R_DMCU_GATE_DIS__SHIFT 0x0000000e
+#define DCI_CLK_CNTL__DISPCLK_R_VGA_GATE_DIS_MASK 0x00000400L
+#define DCI_CLK_CNTL__DISPCLK_R_VGA_GATE_DIS__SHIFT 0x0000000a
+#define DCI_CLK_CNTL__DISPCLK_R_VIP_GATE_DIS_MASK 0x00001000L
+#define DCI_CLK_CNTL__DISPCLK_R_VIP_GATE_DIS__SHIFT 0x0000000c
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS_MASK 0x00400000L
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS__SHIFT 0x00000016
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS_MASK 0x00800000L
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS__SHIFT 0x00000017
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000100L
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x00000008
+#define DCI_DEBUG_CONFIG__DCI_DBG_SEL_MASK 0x0000000fL
+#define DCI_DEBUG_CONFIG__DCI_DBG_SEL__SHIFT 0x00000000
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_PWR_STATE_MASK 0x00003000L
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000040L
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000006
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000002L
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000001
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000080L
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000007
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_PWR_STATE_MASK 0x00030000L
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_PWR_STATE__SHIFT 0x00000010
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000100L
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000008
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_PWR_STATE__SHIFT 0x00000012
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000200L
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000009
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000004
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_PWR_STATE_MASK 0x00300000L
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000400L
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x0000000a
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000005
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_PWR_STATE_MASK 0x00c00000L
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_PWR_STATE__SHIFT 0x00000016
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000800L
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x0000000b
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM1_PWR_STATE_MASK 0x00000003L
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM1_PWR_STATE__SHIFT 0x00000000
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM2_PWR_STATE_MASK 0x0000000cL
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM2_PWR_STATE__SHIFT 0x00000002
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM3_PWR_STATE_MASK 0x00000030L
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM3_PWR_STATE__SHIFT 0x00000004
+#define DCI_MEM_PWR_STATE__AZ_MEM_PWR_STATE_MASK 0x00c00000L
+#define DCI_MEM_PWR_STATE__AZ_MEM_PWR_STATE__SHIFT 0x00000016
+#define DCI_MEM_PWR_STATE__DMCU_IRAM_PWR_STATE_MASK 0x30000000L
+#define DCI_MEM_PWR_STATE__DMCU_IRAM_PWR_STATE__SHIFT 0x0000001c
+#define DCI_MEM_PWR_STATE__DMCU_MEM_PWR_STATE_MASK 0x00000003L
+#define DCI_MEM_PWR_STATE__DMCU_MEM_PWR_STATE__SHIFT 0x00000000
+#define DCI_MEM_PWR_STATE__DMIF0_MEM_PWR_STATE_MASK 0x0000000cL
+#define DCI_MEM_PWR_STATE__DMIF0_MEM_PWR_STATE__SHIFT 0x00000002
+#define DCI_MEM_PWR_STATE__DMIF1_MEM_PWR_STATE_MASK 0x00000030L
+#define DCI_MEM_PWR_STATE__DMIF1_MEM_PWR_STATE__SHIFT 0x00000004
+#define DCI_MEM_PWR_STATE__DMIF2_MEM_PWR_STATE_MASK 0x000000c0L
+#define DCI_MEM_PWR_STATE__DMIF2_MEM_PWR_STATE__SHIFT 0x00000006
+#define DCI_MEM_PWR_STATE__DMIF3_MEM_PWR_STATE_MASK 0x00000300L
+#define DCI_MEM_PWR_STATE__DMIF3_MEM_PWR_STATE__SHIFT 0x00000008
+#define DCI_MEM_PWR_STATE__DMIF4_MEM_PWR_STATE_MASK 0x00000c00L
+#define DCI_MEM_PWR_STATE__DMIF4_MEM_PWR_STATE__SHIFT 0x0000000a
+#define DCI_MEM_PWR_STATE__DMIF5_MEM_PWR_STATE_MASK 0x00003000L
+#define DCI_MEM_PWR_STATE__DMIF5_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM1_PWR_STATE_MASK 0x0c000000L
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM1_PWR_STATE__SHIFT 0x0000001a
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM_PWR_STATE_MASK 0x03000000L
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM_PWR_STATE__SHIFT 0x00000018
+#define DCI_MEM_PWR_STATE__FBC_MEM_PWR_STATE_MASK 0x00030000L
+#define DCI_MEM_PWR_STATE__FBC_MEM_PWR_STATE__SHIFT 0x00000010
+#define DCI_MEM_PWR_STATE__MCIF_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCI_MEM_PWR_STATE__MCIF_MEM_PWR_STATE__SHIFT 0x00000012
+#define DCI_MEM_PWR_STATE__VGA_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCI_MEM_PWR_STATE__VGA_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCI_MEM_PWR_STATE__VIP_MEM_PWR_STATE_MASK 0x00300000L
+#define DCI_MEM_PWR_STATE__VIP_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_CLK_TRISTATE_MASK 0x00040000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_CLK_TRISTATE__SHIFT 0x00000012
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_MASK 0x00008000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_PREMUX_MASK 0x00004000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_PREMUX__SHIFT 0x0000000e
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_REG_MASK 0x00002000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_REG__SHIFT 0x0000000d
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0__SHIFT 0x0000000f
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_MASK 0x00100000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_PREMUX_MASK 0x00080000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_PREMUX__SHIFT 0x00000013
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_REG_MASK 0x00010000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_REG__SHIFT 0x00000010
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN__SHIFT 0x00000014
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MASK_REG_MASK 0x00400000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MASK_REG__SHIFT 0x00000016
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MUX_MASK 0x00200000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MUX__SHIFT 0x00000015
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0_MASK 0x08000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0_PREMUX_MASK 0x04000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0_PREMUX__SHIFT 0x0000001a
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0__SHIFT 0x0000001b
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_ENABLE_MASK 0x00800000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_ENABLE__SHIFT 0x00000017
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_HSYNC_TRISTATE_MASK 0x00020000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_HSYNC_TRISTATE__SHIFT 0x00000011
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_RATE_SEL_MASK 0x02000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_RATE_SEL__SHIFT 0x00000019
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_VSYNC_TRISTATE_MASK 0x01000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_VSYNC_TRISTATE__SHIFT 0x00000018
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCLK_C_MASK 0x00001000L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCLK_C__SHIFT 0x0000000c
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0_MASK 0x000000c0L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0_REG_MASK 0x00000003L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0_REG__SHIFT 0x00000000
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0__SHIFT 0x00000006
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN_MASK 0x00000c00L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN_REG_MASK 0x00000030L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN_REG__SHIFT 0x00000004
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN__SHIFT 0x0000000a
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_MASK_REG_MASK 0x0000000cL
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_MASK_REG__SHIFT 0x00000002
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_SEL0_MASK 0x00000300L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_SEL0__SHIFT 0x00000008
+#define DCIO_DEBUG2__DCIO_DEBUG2_MASK 0xffffffffL
+#define DCIO_DEBUG2__DCIO_DEBUG2__SHIFT 0x00000000
+#define DCIO_DEBUG3__DCIO_DEBUG3_MASK 0xffffffffL
+#define DCIO_DEBUG3__DCIO_DEBUG3__SHIFT 0x00000000
+#define DCIO_DEBUG4__DCIO_DEBUG4_MASK 0xffffffffL
+#define DCIO_DEBUG4__DCIO_DEBUG4__SHIFT 0x00000000
+#define DCIO_DEBUG5__DCIO_DEBUG5_MASK 0xffffffffL
+#define DCIO_DEBUG5__DCIO_DEBUG5__SHIFT 0x00000000
+#define DCIO_DEBUG6__DCIO_DEBUG6_MASK 0xffffffffL
+#define DCIO_DEBUG6__DCIO_DEBUG6__SHIFT 0x00000000
+#define DCIO_DEBUG7__DCIO_DEBUG7_MASK 0xffffffffL
+#define DCIO_DEBUG7__DCIO_DEBUG7__SHIFT 0x00000000
+#define DCIO_DEBUG8__DCIO_DEBUG8_MASK 0xffffffffL
+#define DCIO_DEBUG8__DCIO_DEBUG8__SHIFT 0x00000000
+#define DCIO_DEBUG9__DCIO_DEBUG9_MASK 0xffffffffL
+#define DCIO_DEBUG9__DCIO_DEBUG9__SHIFT 0x00000000
+#define DCIO_DEBUGA__DCIO_DEBUGA_MASK 0xffffffffL
+#define DCIO_DEBUGA__DCIO_DEBUGA__SHIFT 0x00000000
+#define DCIO_DEBUGB__DCIO_DEBUGB_MASK 0xffffffffL
+#define DCIO_DEBUGB__DCIO_DEBUGB__SHIFT 0x00000000
+#define DCIO_DEBUGC__DCIO_DEBUGC_MASK 0xffffffffL
+#define DCIO_DEBUGC__DCIO_DEBUGC__SHIFT 0x00000000
+#define DCIO_DEBUG__DCIO_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG__DCIO_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUGD__DCIO_DEBUGD_MASK 0xffffffffL
+#define DCIO_DEBUGD__DCIO_DEBUGD__SHIFT 0x00000000
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID_MASK 0xffffffffL
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID__SHIFT 0x00000000
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL_MASK 0x00070000L
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL__SHIFT 0x00000010
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL_MASK 0x00000700L
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL__SHIFT 0x00000008
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL_MASK 0x00000007L
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL_MASK 0x00070000L
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL__SHIFT 0x00000010
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL_MASK 0x00000700L
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL__SHIFT 0x00000008
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL_MASK 0x00000007L
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL_MASK 0x00070000L
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL__SHIFT 0x00000010
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL_MASK 0x00000700L
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL__SHIFT 0x00000008
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL_MASK 0x00000007L
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL_MASK 0x00000030L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL__SHIFT 0x00000004
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x00000008
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL_MASK 0x00000003L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL_MASK 0x00300000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL__SHIFT 0x00000014
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x03000000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x00000018
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL_MASK 0x00030000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL__SHIFT 0x00000010
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL_MASK 0x00000030L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL__SHIFT 0x00000004
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x00000008
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL_MASK 0x00000003L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL_MASK 0x00300000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL__SHIFT 0x00000014
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x03000000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x00000018
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL_MASK 0x00030000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL__SHIFT 0x00000010
+#define DCIO_IMPCAL_CNTL_AB__CALR_CNTL_OVERRIDE_MASK 0x0000000fL
+#define DCIO_IMPCAL_CNTL_AB__CALR_CNTL_OVERRIDE__SHIFT 0x00000000
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_ARB_STATE_MASK 0x00007000L
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_ARB_STATE__SHIFT 0x0000000c
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_SOFT_RESET_MASK 0x00000020L
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_SOFT_RESET__SHIFT 0x00000005
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_STATUS_MASK 0x00000300L
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_STATUS__SHIFT 0x00000008
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE_MASK 0x0000000fL
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE__SHIFT 0x00000000
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE_MASK 0x00007000L
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE__SHIFT 0x0000000c
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET_MASK 0x00000020L
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET__SHIFT 0x00000005
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS_MASK 0x00000300L
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS__SHIFT 0x00000008
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE_MASK 0x0000000fL
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE__SHIFT 0x00000000
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE_MASK 0x00007000L
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE__SHIFT 0x0000000c
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET_MASK 0x00000020L
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET__SHIFT 0x00000005
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS_MASK 0x00000300L
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS__SHIFT 0x00000008
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET_MASK 0x00000010L
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET__SHIFT 0x00000004
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET_MASK 0x00000020L
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET__SHIFT 0x00000005
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET_MASK 0x00000040L
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET__SHIFT 0x00000006
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET_MASK 0x00000080L
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET__SHIFT 0x00000007
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET_MASK 0x00000100L
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET__SHIFT 0x00000008
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET_MASK 0x00000200L
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET__SHIFT 0x00000009
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET_MASK 0x00001000L
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET__SHIFT 0x0000000c
+#define DCI_SOFT_RESET__FBC_SOFT_RESET_MASK 0x00000008L
+#define DCI_SOFT_RESET__FBC_SOFT_RESET__SHIFT 0x00000003
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET_MASK 0x00000004L
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET__SHIFT 0x00000002
+#define DCI_SOFT_RESET__VGA_SOFT_RESET_MASK 0x00000001L
+#define DCI_SOFT_RESET__VGA_SOFT_RESET__SHIFT 0x00000000
+#define DCI_SOFT_RESET__VIP_SOFT_RESET_MASK 0x00000002L
+#define DCI_SOFT_RESET__VIP_SOFT_RESET__SHIFT 0x00000001
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE_MASK 0x000003ffL
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE__SHIFT 0x00000000
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN_MASK 0x000ffc00L
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN__SHIFT 0x0000000a
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED_MASK 0x3ff00000L
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED__SHIFT 0x00000014
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE_MASK 0x00000002L
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE__SHIFT 0x00000001
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_MASK 0x00000001L
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL__SHIFT 0x00000000
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE_MASK 0x0000ffffL
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE__SHIFT 0x00000000
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN_MASK 0x0000ffffL
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN__SHIFT 0x00000000
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED_MASK 0x0000ffffL
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED__SHIFT 0x00000000
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN_MASK 0x00000020L
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN__SHIFT 0x00000005
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT_MASK 0x000000c0L
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT__SHIFT 0x00000006
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN_MASK 0x00000010L
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN__SHIFT 0x00000004
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN_MASK 0x00002000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN__SHIFT 0x0000000d
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT_MASK 0x0000c000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT__SHIFT 0x0000000e
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN_MASK 0x00001000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN__SHIFT 0x0000000c
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN_MASK 0x00200000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN__SHIFT 0x00000015
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT_MASK 0x00c00000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT__SHIFT 0x00000016
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN_MASK 0x00100000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN__SHIFT 0x00000014
+#define DC_LUT_CONTROL__DC_LUT_INC_B_MASK 0x0000000fL
+#define DC_LUT_CONTROL__DC_LUT_INC_B__SHIFT 0x00000000
+#define DC_LUT_CONTROL__DC_LUT_INC_G_MASK 0x00000f00L
+#define DC_LUT_CONTROL__DC_LUT_INC_G__SHIFT 0x00000008
+#define DC_LUT_CONTROL__DC_LUT_INC_R_MASK 0x000f0000L
+#define DC_LUT_CONTROL__DC_LUT_INC_R__SHIFT 0x00000010
+#define DC_LUT_PWL_DATA__DC_LUT_BASE_MASK 0x0000ffffL
+#define DC_LUT_PWL_DATA__DC_LUT_BASE__SHIFT 0x00000000
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA_MASK 0xffff0000L
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA__SHIFT 0x00000010
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX_MASK 0x000000ffL
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX__SHIFT 0x00000000
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE_MASK 0x00000001L
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE__SHIFT 0x00000000
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR_MASK 0x0000ffffL
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR__SHIFT 0x00000000
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE_MASK 0x00000001L
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE__SHIFT 0x00000000
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE_MASK 0x0000ffffL
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE__SHIFT 0x00000000
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN_MASK 0x0000ffffL
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN__SHIFT 0x00000000
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED_MASK 0x0000ffffL
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED__SHIFT 0x00000000
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS_MASK 0x80000000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS__SHIFT 0x0000001f
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP_MASK 0x10000000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP__SHIFT 0x0000001c
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE_MASK 0x00001000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE__SHIFT 0x0000000c
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO_MASK 0x00010000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO__SHIFT 0x00000010
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL_MASK 0x00000100L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL__SHIFT 0x00000008
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS_MASK 0x00100000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS__SHIFT 0x00000014
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE_MASK 0x00000003L
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE__SHIFT 0x00000000
+#define DCO_CLK_CNTL__DCO_TEST_CLK_SEL_MASK 0x0000001fL
+#define DCO_CLK_CNTL__DCO_TEST_CLK_SEL__SHIFT 0x00000000
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS_MASK 0x00000040L
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS__SHIFT 0x00000006
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS_MASK 0x00000100L
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS__SHIFT 0x00000008
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS_MASK 0x00000200L
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS__SHIFT 0x00000009
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS_MASK 0x01000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS__SHIFT 0x00000018
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS_MASK 0x02000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS__SHIFT 0x00000019
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS_MASK 0x04000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS__SHIFT 0x0000001a
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS_MASK 0x08000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS__SHIFT 0x0000001b
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS_MASK 0x10000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS__SHIFT 0x0000001c
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS_MASK 0x20000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS__SHIFT 0x0000001d
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS_MASK 0x00000080L
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS__SHIFT 0x00000007
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS_MASK 0x00010000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS__SHIFT 0x00000010
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS_MASK 0x00020000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS__SHIFT 0x00000011
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS_MASK 0x00040000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS__SHIFT 0x00000012
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS_MASK 0x00080000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS__SHIFT 0x00000013
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS_MASK 0x00100000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS__SHIFT 0x00000014
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS_MASK 0x00200000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS__SHIFT 0x00000015
+#define DCO_CLK_CNTL__DISPCLK_R_ABM_GATE_DIS_MASK 0x00001000L
+#define DCO_CLK_CNTL__DISPCLK_R_ABM_GATE_DIS__SHIFT 0x0000000c
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS_MASK 0x00000020L
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS__SHIFT 0x00000005
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_ABM_RAMP_DIS_MASK 0x00000040L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_ABM_RAMP_DIS__SHIFT 0x00000006
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACA_RAMP_DIS_MASK 0x00000100L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACA_RAMP_DIS__SHIFT 0x00000008
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACB_RAMP_DIS_MASK 0x00000200L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACB_RAMP_DIS__SHIFT 0x00000009
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGA_RAMP_DIS_MASK 0x01000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGA_RAMP_DIS__SHIFT 0x00000018
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGB_RAMP_DIS_MASK 0x02000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGB_RAMP_DIS__SHIFT 0x00000019
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGC_RAMP_DIS_MASK 0x04000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGC_RAMP_DIS__SHIFT 0x0000001a
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGD_RAMP_DIS_MASK 0x08000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGD_RAMP_DIS__SHIFT 0x0000001b
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGE_RAMP_DIS_MASK 0x10000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGE_RAMP_DIS__SHIFT 0x0000001c
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGF_RAMP_DIS_MASK 0x20000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGF_RAMP_DIS__SHIFT 0x0000001d
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DVO_RAMP_DIS_MASK 0x00000080L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DVO_RAMP_DIS__SHIFT 0x00000007
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT0_RAMP_DIS_MASK 0x00010000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT0_RAMP_DIS__SHIFT 0x00000010
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT1_RAMP_DIS_MASK 0x00020000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT1_RAMP_DIS__SHIFT 0x00000011
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT2_RAMP_DIS_MASK 0x00040000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT2_RAMP_DIS__SHIFT 0x00000012
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT3_RAMP_DIS_MASK 0x00080000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT3_RAMP_DIS__SHIFT 0x00000013
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT4_RAMP_DIS_MASK 0x00100000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT4_RAMP_DIS__SHIFT 0x00000014
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT5_RAMP_DIS_MASK 0x00200000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT5_RAMP_DIS__SHIFT 0x00000015
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_ABM_RAMP_DIS_MASK 0x00001000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_ABM_RAMP_DIS__SHIFT 0x0000000c
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_DCO_RAMP_DIS_MASK 0x00000020L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_DCO_RAMP_DIS__SHIFT 0x00000005
+#define DCO_LIGHT_SLEEP_DIS__DPA_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCO_LIGHT_SLEEP_DIS__DPA_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCO_LIGHT_SLEEP_DIS__DPA_MEM_SHUTDOWN_DIS_MASK 0x00020000L
+#define DCO_LIGHT_SLEEP_DIS__DPA_MEM_SHUTDOWN_DIS__SHIFT 0x00000011
+#define DCO_LIGHT_SLEEP_DIS__DPB_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DCO_LIGHT_SLEEP_DIS__DPB_LIGHT_SLEEP_DIS__SHIFT 0x00000004
+#define DCO_LIGHT_SLEEP_DIS__DPB_MEM_SHUTDOWN_DIS_MASK 0x00040000L
+#define DCO_LIGHT_SLEEP_DIS__DPB_MEM_SHUTDOWN_DIS__SHIFT 0x00000012
+#define DCO_LIGHT_SLEEP_DIS__DPC_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DCO_LIGHT_SLEEP_DIS__DPC_LIGHT_SLEEP_DIS__SHIFT 0x00000005
+#define DCO_LIGHT_SLEEP_DIS__DPC_MEM_SHUTDOWN_DIS_MASK 0x00080000L
+#define DCO_LIGHT_SLEEP_DIS__DPC_MEM_SHUTDOWN_DIS__SHIFT 0x00000013
+#define DCO_LIGHT_SLEEP_DIS__DPD_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DCO_LIGHT_SLEEP_DIS__DPD_LIGHT_SLEEP_DIS__SHIFT 0x00000006
+#define DCO_LIGHT_SLEEP_DIS__DPD_MEM_SHUTDOWN_DIS_MASK 0x00100000L
+#define DCO_LIGHT_SLEEP_DIS__DPD_MEM_SHUTDOWN_DIS__SHIFT 0x00000014
+#define DCO_LIGHT_SLEEP_DIS__DPE_LIGHT_SLEEP_DIS_MASK 0x00000080L
+#define DCO_LIGHT_SLEEP_DIS__DPE_LIGHT_SLEEP_DIS__SHIFT 0x00000007
+#define DCO_LIGHT_SLEEP_DIS__DPE_MEM_SHUTDOWN_DIS_MASK 0x00200000L
+#define DCO_LIGHT_SLEEP_DIS__DPE_MEM_SHUTDOWN_DIS__SHIFT 0x00000015
+#define DCO_LIGHT_SLEEP_DIS__DPF_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DCO_LIGHT_SLEEP_DIS__DPF_LIGHT_SLEEP_DIS__SHIFT 0x00000008
+#define DCO_LIGHT_SLEEP_DIS__DPF_MEM_SHUTDOWN_DIS_MASK 0x00400000L
+#define DCO_LIGHT_SLEEP_DIS__DPF_MEM_SHUTDOWN_DIS__SHIFT 0x00000016
+#define DCO_LIGHT_SLEEP_DIS__HDMI0_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DCO_LIGHT_SLEEP_DIS__HDMI0_LIGHT_SLEEP_DIS__SHIFT 0x00000009
+#define DCO_LIGHT_SLEEP_DIS__HDMI1_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DCO_LIGHT_SLEEP_DIS__HDMI1_LIGHT_SLEEP_DIS__SHIFT 0x0000000a
+#define DCO_LIGHT_SLEEP_DIS__HDMI2_LIGHT_SLEEP_DIS_MASK 0x00000800L
+#define DCO_LIGHT_SLEEP_DIS__HDMI2_LIGHT_SLEEP_DIS__SHIFT 0x0000000b
+#define DCO_LIGHT_SLEEP_DIS__HDMI3_LIGHT_SLEEP_DIS_MASK 0x00001000L
+#define DCO_LIGHT_SLEEP_DIS__HDMI3_LIGHT_SLEEP_DIS__SHIFT 0x0000000c
+#define DCO_LIGHT_SLEEP_DIS__HDMI4_LIGHT_SLEEP_DIS_MASK 0x00002000L
+#define DCO_LIGHT_SLEEP_DIS__HDMI4_LIGHT_SLEEP_DIS__SHIFT 0x0000000d
+#define DCO_LIGHT_SLEEP_DIS__HDMI5_LIGHT_SLEEP_DIS_MASK 0x00004000L
+#define DCO_LIGHT_SLEEP_DIS__HDMI5_LIGHT_SLEEP_DIS__SHIFT 0x0000000e
+#define DCO_LIGHT_SLEEP_DIS__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000002L
+#define DCO_LIGHT_SLEEP_DIS__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x00000001
+#define DCO_LIGHT_SLEEP_DIS__MVP_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCO_LIGHT_SLEEP_DIS__MVP_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCO_LIGHT_SLEEP_DIS__MVP_MEM_SHUTDOWN_DIS_MASK 0x00010000L
+#define DCO_LIGHT_SLEEP_DIS__MVP_MEM_SHUTDOWN_DIS__SHIFT 0x00000010
+#define DCO_LIGHT_SLEEP_DIS__TVOUT_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define DCO_LIGHT_SLEEP_DIS__TVOUT_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define DCO_MEM_POWER_STATE__DPA_MEM_PWR_STATE_MASK 0x000000c0L
+#define DCO_MEM_POWER_STATE__DPA_MEM_PWR_STATE__SHIFT 0x00000006
+#define DCO_MEM_POWER_STATE__DPB_MEM_PWR_STATE_MASK 0x00000300L
+#define DCO_MEM_POWER_STATE__DPB_MEM_PWR_STATE__SHIFT 0x00000008
+#define DCO_MEM_POWER_STATE__DPC_MEM_PWR_STATE_MASK 0x00000c00L
+#define DCO_MEM_POWER_STATE__DPC_MEM_PWR_STATE__SHIFT 0x0000000a
+#define DCO_MEM_POWER_STATE__DPD_MEM_PWR_STATE_MASK 0x00003000L
+#define DCO_MEM_POWER_STATE__DPD_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCO_MEM_POWER_STATE__DPE_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCO_MEM_POWER_STATE__DPE_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCO_MEM_POWER_STATE__DPF_MEM_PWR_STATE_MASK 0x00030000L
+#define DCO_MEM_POWER_STATE__DPF_MEM_PWR_STATE__SHIFT 0x00000010
+#define DCO_MEM_POWER_STATE__HDMI0_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCO_MEM_POWER_STATE__HDMI0_MEM_PWR_STATE__SHIFT 0x00000012
+#define DCO_MEM_POWER_STATE__HDMI1_MEM_PWR_STATE_MASK 0x00300000L
+#define DCO_MEM_POWER_STATE__HDMI1_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCO_MEM_POWER_STATE__HDMI2_MEM_PWR_STATE_MASK 0x00c00000L
+#define DCO_MEM_POWER_STATE__HDMI2_MEM_PWR_STATE__SHIFT 0x00000016
+#define DCO_MEM_POWER_STATE__HDMI3_MEM_PWR_STATE_MASK 0x03000000L
+#define DCO_MEM_POWER_STATE__HDMI3_MEM_PWR_STATE__SHIFT 0x00000018
+#define DCO_MEM_POWER_STATE__HDMI4_MEM_PWR_STATE_MASK 0x0c000000L
+#define DCO_MEM_POWER_STATE__HDMI4_MEM_PWR_STATE__SHIFT 0x0000001a
+#define DCO_MEM_POWER_STATE__HDMI5_MEM_PWR_STATE_MASK 0x30000000L
+#define DCO_MEM_POWER_STATE__HDMI5_MEM_PWR_STATE__SHIFT 0x0000001c
+#define DCO_MEM_POWER_STATE__I2C_MEM_PWR_STATE_MASK 0x0000000cL
+#define DCO_MEM_POWER_STATE__I2C_MEM_PWR_STATE__SHIFT 0x00000002
+#define DCO_MEM_POWER_STATE__MVP_MEM_PWR_STATE_MASK 0x00000030L
+#define DCO_MEM_POWER_STATE__MVP_MEM_PWR_STATE__SHIFT 0x00000004
+#define DCO_MEM_POWER_STATE__TVOUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DCO_MEM_POWER_STATE__TVOUT_MEM_PWR_STATE__SHIFT 0x00000000
+#define DCO_SOFT_RESET__ABM_SOFT_RESET_MASK 0x02000000L
+#define DCO_SOFT_RESET__ABM_SOFT_RESET__SHIFT 0x00000019
+#define DCO_SOFT_RESET__DACA_CFG_IF_SOFT_RESET_MASK 0x20000000L
+#define DCO_SOFT_RESET__DACA_CFG_IF_SOFT_RESET__SHIFT 0x0000001d
+#define DCO_SOFT_RESET__DACA_SOFT_RESET_MASK 0x00000001L
+#define DCO_SOFT_RESET__DACA_SOFT_RESET__SHIFT 0x00000000
+#define DCO_SOFT_RESET__DACB_SOFT_RESET_MASK 0x00000002L
+#define DCO_SOFT_RESET__DACB_SOFT_RESET__SHIFT 0x00000001
+#define DCO_SOFT_RESET__DVO_ENABLE_RST_MASK 0x00000008L
+#define DCO_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x00000003
+#define DCO_SOFT_RESET__DVO_SOFT_RESET_MASK 0x08000000L
+#define DCO_SOFT_RESET__DVO_SOFT_RESET__SHIFT 0x0000001b
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET_MASK 0x00010000L
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET__SHIFT 0x00000010
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET_MASK 0x00020000L
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET__SHIFT 0x00000011
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET_MASK 0x00040000L
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET__SHIFT 0x00000012
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET_MASK 0x00080000L
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET__SHIFT 0x00000013
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET_MASK 0x00100000L
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET__SHIFT 0x00000014
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET_MASK 0x00200000L
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET__SHIFT 0x00000015
+#define DCO_SOFT_RESET__MVP_SOFT_RESET_MASK 0x01000000L
+#define DCO_SOFT_RESET__MVP_SOFT_RESET__SHIFT 0x00000018
+#define DCO_SOFT_RESET__SOFT_RESET_DVO_MASK 0x00000004L
+#define DCO_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x00000002
+#define DCO_SOFT_RESET__SRBM_SOFT_RESET_ENABLE_MASK 0x10000000L
+#define DCO_SOFT_RESET__SRBM_SOFT_RESET_ENABLE__SHIFT 0x0000001c
+#define DCO_SOFT_RESET__TVOUT_SOFT_RESET_MASK 0x04000000L
+#define DCO_SOFT_RESET__TVOUT_SOFT_RESET__SHIFT 0x0000001a
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL_MASK 0x0000000fL
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL__SHIFT 0x00000000
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS_MASK 0x00000030L
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS__SHIFT 0x00000004
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE_MASK 0x00000001L
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE__SHIFT 0x00000000
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL_MASK 0x00000300L
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL__SHIFT 0x00000008
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL_MASK 0x0000001cL
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL__SHIFT 0x00000002
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT_MASK 0xffffffffL
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT__SHIFT 0x00000000
+#define DCP_CRC_LAST__DCP_CRC_LAST_MASK 0xffffffffL
+#define DCP_CRC_LAST__DCP_CRC_LAST__SHIFT 0x00000000
+#define DCP_CRC_MASK__DCP_CRC_MASK_MASK 0xffffffffL
+#define DCP_CRC_MASK__DCP_CRC_MASK__SHIFT 0x00000000
+#define DCP_DEBUG2__DCP_DEBUG2_MASK 0xffffffffL
+#define DCP_DEBUG2__DCP_DEBUG2__SHIFT 0x00000000
+#define DCP_DEBUG__DCP_DEBUG_MASK 0xffffffffL
+#define DCP_DEBUG__DCP_DEBUG__SHIFT 0x00000000
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA_MASK 0x0003ffffL
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA__SHIFT 0x00000000
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX_MASK 0x07f00000L
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX__SHIFT 0x00000014
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG_MASK 0xffff0000L
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG__SHIFT 0x00000010
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS_MASK 0x00000004L
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS__SHIFT 0x00000002
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY_MASK 0x00000001L
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY__SHIFT 0x00000000
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE_MASK 0x00000002L
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE__SHIFT 0x00000001
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG_MASK 0xffffffffL
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG__SHIFT 0x00000000
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG_MASK 0xffffffffL
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG__SHIFT 0x00000000
+#define DCP_GSL_CONTROL__DCP_GSL0_EN_MASK 0x00000001L
+#define DCP_GSL_CONTROL__DCP_GSL0_EN__SHIFT 0x00000000
+#define DCP_GSL_CONTROL__DCP_GSL1_EN_MASK 0x00000002L
+#define DCP_GSL_CONTROL__DCP_GSL1_EN__SHIFT 0x00000001
+#define DCP_GSL_CONTROL__DCP_GSL2_EN_MASK 0x00000004L
+#define DCP_GSL_CONTROL__DCP_GSL2_EN__SHIFT 0x00000002
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_MASK 0x08000000L
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING__SHIFT 0x0000001b
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY_MASK 0xf0000000L
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY__SHIFT 0x0000001c
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY_MASK 0x0000f000L
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY__SHIFT 0x0000000c
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN_MASK 0x00010000L
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN__SHIFT 0x00000010
+#define DCP_GSL_CONTROL__DCP_GSL_MODE_MASK 0x00000300L
+#define DCP_GSL_CONTROL__DCP_GSL_MODE__SHIFT 0x00000008
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE_MASK 0x03000000L
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE__SHIFT 0x00000018
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000c000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0x0000000e
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS_MASK 0x00000400L
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS__SHIFT 0x0000000a
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x00000010
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0x0000000d
+#define DC_PINSTRAPS__DC_PINSTRAPS_VIP_DEVICE_MASK 0x00000800L
+#define DC_PINSTRAPS__DC_PINSTRAPS_VIP_DEVICE__SHIFT 0x0000000b
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP_MASK 0x0000000fL
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP__SHIFT 0x00000000
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP_MASK 0x000000f0L
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP__SHIFT 0x00000004
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED_MASK 0x00ff0000L
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED__SHIFT 0x00000010
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED_MASK 0x0000ff00L
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED__SHIFT 0x00000008
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED_MASK 0x000000ffL
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED__SHIFT 0x00000000
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE_MASK 0x00000100L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE__SHIFT 0x00000008
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE_MASK 0x00000400L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE__SHIFT 0x0000000a
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE_MASK 0x00000200L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE__SHIFT 0x00000009
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH_MASK 0x00000040L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH__SHIFT 0x00000006
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN_MASK 0x00000001L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN__SHIFT 0x00000000
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE_MASK 0x00000030L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE__SHIFT 0x00000004
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_DELAY_MASK 0x00000007L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_DELAY__SHIFT 0x00000000
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_TIMEOUT_DIS_MASK 0x00000008L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_TIMEOUT_DIS__SHIFT 0x00000003
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_DELAY_MASK 0x00000070L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_DELAY__SHIFT 0x00000004
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_TIMEOUT_DIS_MASK 0x00000080L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_TIMEOUT_DIS__SHIFT 0x00000007
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_DELAY_MASK 0x00000700L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_DELAY__SHIFT 0x00000008
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_TIMEOUT_DIS_MASK 0x00000800L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_TIMEOUT_DIS__SHIFT 0x0000000b
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_DELAY_MASK 0x00007000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_DELAY__SHIFT 0x0000000c
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_TIMEOUT_DIS_MASK 0x00008000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_TIMEOUT_DIS__SHIFT 0x0000000f
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_DELAY_MASK 0x00070000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_DELAY__SHIFT 0x00000010
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_TIMEOUT_DIS_MASK 0x00080000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_TIMEOUT_DIS__SHIFT 0x00000013
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_DELAY_MASK 0x00000007L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_DELAY__SHIFT 0x00000000
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_TIMEOUT_DIS_MASK 0x00000008L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_TIMEOUT_DIS__SHIFT 0x00000003
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_DELAY_MASK 0x00000070L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_DELAY__SHIFT 0x00000004
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_TIMEOUT_DIS_MASK 0x00000080L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_TIMEOUT_DIS__SHIFT 0x00000007
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x00000300L
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x00000008
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL_MASK 0x00000003L
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL__SHIFT 0x00000000
+#define DC_XDMA_INTERFACE_CNTL__DC_FLIP_PENDING_TO_DCP_MASK 0x00400000L
+#define DC_XDMA_INTERFACE_CNTL__DC_FLIP_PENDING_TO_DCP__SHIFT 0x00000016
+#define DC_XDMA_INTERFACE_CNTL__DC_XDMA_FLIP_PENDING_MASK 0x00010000L
+#define DC_XDMA_INTERFACE_CNTL__DC_XDMA_FLIP_PENDING__SHIFT 0x00000010
+#define DC_XDMA_INTERFACE_CNTL__XDMA_M_FLIP_PENDING_TO_DCP_MASK 0x00100000L
+#define DC_XDMA_INTERFACE_CNTL__XDMA_M_FLIP_PENDING_TO_DCP__SHIFT 0x00000014
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_ENABLE_MASK 0x0000003fL
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_ENABLE__SHIFT 0x00000000
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_SEL_MASK 0x00000700L
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_SEL__SHIFT 0x00000008
+#define DC_XDMA_INTERFACE_CNTL__XDMA_S_FLIP_PENDING_TO_DCP_MASK 0x00200000L
+#define DC_XDMA_INTERFACE_CNTL__XDMA_S_FLIP_PENDING_TO_DCP__SHIFT 0x00000015
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE_MASK 0x00003000L
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000
+#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L
+#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004
+#define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L
+#define DENORM_CONTROL__DENORM_MODE__SHIFT 0x00000000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x00000013
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0x0000000f
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x00000011
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x00000012
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007f00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x00000008
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007fL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x00000000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE__SHIFT 0x00000014
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG__SHIFT 0x00000015
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG__SHIFT 0x00000016
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER_MASK 0x7f000000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER__SHIFT 0x00000018
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00003f00L
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x00000008
+#define DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+#define DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x0000001c
+#define DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG_BE_CNTL__DIG_MODE__SHIFT 0x00000010
+#define DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x00000000
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x00000008
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003ffL
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x00000000
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT_MASK 0x00000001L
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT__SHIFT 0x00000000
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK_MASK 0x00000100L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK__SHIFT 0x00000008
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK 0x00000010L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK_MASK 0x00001000L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0x0000000c
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT__SHIFT 0x00000004
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_MASK 0x00000001L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED__SHIFT 0x00000000
+#define DIG_FE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00010000L
+#define DIG_FE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x00000010
+#define DIG_FE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00100000L
+#define DIG_FE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x00000014
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x00000000
+#define DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG_FE_CNTL__DIG_START__SHIFT 0x0000000a
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x00000008
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x00000004
+#define DIG_FE_CNTL__DIG_SWAP_MASK 0x00040000L
+#define DIG_FE_CNTL__DIG_SWAP__SHIFT 0x00000012
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x00000018
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000fc00L
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x0000000a
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x0000001d
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x00000008
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x0000001e
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x0000001f
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x00000000
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001f0000L
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x00000010
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03c00000L
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x00000016
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000fcL
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x00000002
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x00000001
+#define DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+#define DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x00000008
+#define DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x00000000
+#define DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x00000001
+#define DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x00000002
+#define DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x00000003
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x00000008
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x00000000
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x00000004
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3fffffffL
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x00000000
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00ffffffL
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x00000000
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x00000018
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET_MASK 0x00000002L
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET__SHIFT 0x00000001
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET_MASK 0x00000001L
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET__SHIFT 0x00000000
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET__SHIFT 0x00000005
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET_MASK 0x00000010L
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET__SHIFT 0x00000004
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET_MASK 0x00000200L
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET__SHIFT 0x00000009
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET_MASK 0x00000100L
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET__SHIFT 0x00000008
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET_MASK 0x00002000L
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET__SHIFT 0x0000000d
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET_MASK 0x00001000L
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET__SHIFT 0x0000000c
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET_MASK 0x00020000L
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET__SHIFT 0x00000011
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET_MASK 0x00010000L
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET__SHIFT 0x00000010
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET_MASK 0x00200000L
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET__SHIFT 0x00000015
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET_MASK 0x00100000L
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET__SHIFT 0x00000014
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x00000001
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x00000004
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x00000005
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03ff0000L
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x00000010
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x00000006
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x00000000
+#define DIG_TEST_PATTERN__LVDS_EYE_PATTERN_MASK 0x00000100L
+#define DIG_TEST_PATTERN__LVDS_EYE_PATTERN__SHIFT 0x00000008
+#define DIG_TEST_PATTERN__LVDS_TEST_CLOCK_DATA_MASK 0x00000004L
+#define DIG_TEST_PATTERN__LVDS_TEST_CLOCK_DATA__SHIFT 0x00000002
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000ff0L
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x00000004
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000fL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x00000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x0000001e
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x0000001c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x0000001d
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x0000001f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x00000014
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0e000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x00000019
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003fffL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x00000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000f0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x0000001e
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x0000001c
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x0000001d
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_TIMER_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_TIMER_INTERRUPT__SHIFT 0x00000018
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT__SHIFT 0x00000016
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT__SHIFT 0x00000017
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT__SHIFT 0x00000019
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x00000018
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT__SHIFT 0x00000015
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x0000001b
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x0000001a
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISPOUT_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL_MASK 0x00000007L
+#define DISPOUT_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL__SHIFT 0x00000000
+#define DISPOUT_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL_MASK 0x00070000L
+#define DISPOUT_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL__SHIFT 0x00000010
+#define DISPPLL_BG_CNTL__DISPPLL_BG_ADJ_MASK 0x000000f0L
+#define DISPPLL_BG_CNTL__DISPPLL_BG_ADJ__SHIFT 0x00000004
+#define DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK 0x00000001L
+#define DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT 0x00000000
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01ffffffL
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x00000000
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x00000019
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK 0x40000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_MSK_MASK 0x08000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_MSK__SHIFT 0x0000001b
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_RUNNING_MASK 0x04000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_RUNNING__SHIFT 0x0000001a
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT__SHIFT 0x0000001e
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x20000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x0000001d
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_MASK 0x10000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT__SHIFT 0x0000001c
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC_MASK 0x00000004L
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC__SHIFT 0x00000002
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC_MASK 0x00000008L
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC__SHIFT 0x00000003
+#define DMCU_CTRL__DMCU_ENABLE_MASK 0x00000010L
+#define DMCU_CTRL__DMCU_ENABLE__SHIFT 0x00000004
+#define DMCU_CTRL__IGNORE_PWRMGT_MASK 0x00000002L
+#define DMCU_CTRL__IGNORE_PWRMGT__SHIFT 0x00000001
+#define DMCU_CTRL__RESET_UC_MASK 0x00000001L
+#define DMCU_CTRL__RESET_UC__SHIFT 0x00000000
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT_MASK 0xffc00000L
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT__SHIFT 0x00000016
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR_MASK 0x0000ffffL
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR__SHIFT 0x00000000
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE_MASK 0x000f0000L
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE__SHIFT 0x00000010
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE_MASK 0x00100000L
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE__SHIFT 0x00000014
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA_MASK 0xffffffffL
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA__SHIFT 0x00000000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR_MASK 0x0000ffffL
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR__SHIFT 0x00000000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE_MASK 0x000f0000L
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE__SHIFT 0x00000010
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE_MASK 0x00100000L
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE__SHIFT 0x00000014
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA_MASK 0xffffffffL
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA__SHIFT 0x00000000
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC_MASK 0x00000001L
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC__SHIFT 0x00000000
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST_MASK 0x00800000L
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST__SHIFT 0x00000017
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE_MASK 0x007f0000L
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE__SHIFT 0x00000010
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS_MASK 0x0000000cL
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS__SHIFT 0x00000002
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS_MASK 0x00000003L
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS__SHIFT 0x00000000
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI_MASK 0xffffffffL
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI__SHIFT 0x00000000
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO_MASK 0xffffffffL
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO__SHIFT 0x00000000
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT_MASK 0x00ff0000L
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT__SHIFT 0x00000010
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT_MASK 0x000000ffL
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT__SHIFT 0x00000000
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT_MASK 0x0000ff00L
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT__SHIFT 0x00000008
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR__SHIFT 0x00000002
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED__SHIFT 0x00000002
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR__SHIFT 0x00000000
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED__SHIFT 0x00000000
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR__SHIFT 0x00000001
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED__SHIFT 0x00000001
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR__SHIFT 0x00000012
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000012
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR__SHIFT 0x00000013
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000013
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR__SHIFT 0x00000014
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000014
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR__SHIFT 0x00000015
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000015
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR__SHIFT 0x00000016
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000016
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR__SHIFT 0x00000010
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED__SHIFT 0x00000010
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR__SHIFT 0x00000017
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000017
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR__SHIFT 0x00000011
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED__SHIFT 0x00000011
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR__SHIFT 0x00000008
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED__SHIFT 0x00000008
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED__SHIFT 0x00000003
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED__SHIFT 0x00000009
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR__SHIFT 0x0000000a
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED__SHIFT 0x0000000a
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR__SHIFT 0x0000000b
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED__SHIFT 0x0000000b
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR__SHIFT 0x00000018
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED__SHIFT 0x00000018
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR__SHIFT 0x00000019
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED__SHIFT 0x00000019
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR_MASK 0x10000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED_MASK 0x10000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR_MASK 0x20000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR__SHIFT 0x0000001d
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED_MASK 0x20000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED__SHIFT 0x0000001d
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK__SHIFT 0x00000002
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK__SHIFT 0x00000000
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK__SHIFT 0x00000001
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_MASK_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_MASK__SHIFT 0x00000012
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_MASK__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_MASK_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_MASK__SHIFT 0x00000013
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_MASK_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_MASK__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_MASK_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_MASK__SHIFT 0x00000014
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_MASK_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_MASK__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_MASK_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_MASK__SHIFT 0x00000015
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_MASK_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_MASK__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_MASK_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_MASK__SHIFT 0x00000016
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_MASK_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_MASK__SHIFT 0x00000010
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_MASK_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_MASK__SHIFT 0x00000017
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_MASK_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_MASK__SHIFT 0x00000011
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK__SHIFT 0x00000009
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK__SHIFT 0x0000000a
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK__SHIFT 0x0000000b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN__SHIFT 0x00000002
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN__SHIFT 0x00000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN__SHIFT 0x00000001
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000012
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000013
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000014
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000015
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000016
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN__SHIFT 0x00000010
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000017
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN__SHIFT 0x00000011
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN__SHIFT 0x00000008
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN__SHIFT 0x00000003
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN__SHIFT 0x00000018
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN__SHIFT 0x00000019
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN_MASK 0x10000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN_MASK 0x20000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN__SHIFT 0x0000001d
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x00000002
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x00000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x00000001
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000012
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000013
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000014
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000015
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000016
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x00000010
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000017
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x00000011
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL__SHIFT 0x00000008
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL__SHIFT 0x00000003
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL__SHIFT 0x00000018
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL__SHIFT 0x00000019
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL_MASK 0x10000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL_MASK 0x20000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001d
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR_MASK 0x000003ffL
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR__SHIFT 0x00000000
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA_MASK 0x000000ffL
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA__SHIFT 0x00000000
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR_MASK 0x000003ffL
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR__SHIFT 0x00000000
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA_MASK 0x000000ffL
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA__SHIFT 0x00000000
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN_MASK 0x00000010L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN__SHIFT 0x00000004
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC_MASK 0x00000002L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC__SHIFT 0x00000001
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC_MASK 0x00000001L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC__SHIFT 0x00000000
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN_MASK 0x00000020L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN__SHIFT 0x00000005
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC_MASK 0x00000008L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC__SHIFT 0x00000003
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC_MASK 0x00000004L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC__SHIFT 0x00000002
+#define DMCU_RAM_ACCESS_CTRL__UC_RST_RELEASE_DELAY_CNT_MASK 0x0000ff00L
+#define DMCU_RAM_ACCESS_CTRL__UC_RST_RELEASE_DELAY_CNT__SHIFT 0x00000008
+#define DMCU_STATUS__UC_IN_RESET_MASK 0x00000001L
+#define DMCU_STATUS__UC_IN_RESET__SHIFT 0x00000000
+#define DMCU_STATUS__UC_IN_STOP_MODE_MASK 0x00000004L
+#define DMCU_STATUS__UC_IN_STOP_MODE__SHIFT 0x00000002
+#define DMCU_STATUS__UC_IN_WAIT_MODE_MASK 0x00000002L
+#define DMCU_STATUS__UC_IN_WAIT_MODE__SHIFT 0x00000001
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY_MASK 0x00000700L
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY__SHIFT 0x00000008
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY_MASK 0x00000007L
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY__SHIFT 0x00000000
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN_MASK 0x00010000L
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN__SHIFT 0x00000010
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP_MASK 0x00000008L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP__SHIFT 0x00000003
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN_MASK 0x00000001L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN__SHIFT 0x00000000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE_MASK 0x00004000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE__SHIFT 0x0000000e
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW_MASK 0x00008000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW__SHIFT 0x0000000f
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT_MASK 0x00000200L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT__SHIFT 0x00000009
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT_MASK 0x00000004L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT__SHIFT 0x00000002
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1_MASK 0x00002000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1__SHIFT 0x0000000d
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2_MASK 0x00001000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2__SHIFT 0x0000000c
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3_MASK 0x00000800L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3__SHIFT 0x0000000b
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5_MASK 0x00000400L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5__SHIFT 0x0000000a
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1_MASK 0x00000080L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1__SHIFT 0x00000007
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2_MASK 0x00000040L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2__SHIFT 0x00000006
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3_MASK 0x00000020L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3__SHIFT 0x00000005
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4_MASK 0x00000010L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4__SHIFT 0x00000004
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW_MASK 0x00000100L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW__SHIFT 0x00000008
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN_MASK 0x00000002L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN__SHIFT 0x00000001
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE_MASK 0x30000000L
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE__SHIFT 0x0000001c
+#define DMIF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define DMIF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define DMIF_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define DMIF_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define DMIF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define DMIF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define DMIF_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define DMIF_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define DMIF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define DMIF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define DMIF_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define DMIF_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define DMIF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define DMIF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD_MASK 0x0000ffffL
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD__SHIFT 0x00000000
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT_MASK 0xffff0000L
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT__SHIFT 0x00000010
+#define DMIF_CONTROL__DMIF_BUFF_SIZE_MASK 0x00000003L
+#define DMIF_CONTROL__DMIF_BUFF_SIZE__SHIFT 0x00000000
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN_MASK 0x60000000L
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN__SHIFT 0x0000001d
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION_MASK 0x1f000000L
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION__SHIFT 0x00000018
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT_MASK 0x00000010L
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT__SHIFT 0x00000004
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE_MASK 0x0000f000L
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE__SHIFT 0x0000000c
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK_MASK 0x00000004L
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK__SHIFT 0x00000002
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS_MASK 0x003f0000L
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS__SHIFT 0x00000010
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE_MASK 0x00000700L
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE__SHIFT 0x00000008
+#define DMIF_DEBUG02_CORE0__DB_DATA_MASK 0x0000ffffL
+#define DMIF_DEBUG02_CORE0__DB_DATA__SHIFT 0x00000000
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN_MASK 0x00010000L
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN__SHIFT 0x00000010
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER_MASK 0x0ffe0000L
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER__SHIFT 0x00000011
+#define DMIF_DEBUG02_CORE1__DB_DATA_MASK 0x0000ffffL
+#define DMIF_DEBUG02_CORE1__DB_DATA__SHIFT 0x00000000
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN_MASK 0x00010000L
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN__SHIFT 0x00000010
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER_MASK 0x0ffe0000L
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER__SHIFT 0x00000011
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG_MASK 0xffffffffL
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG__SHIFT 0x00000000
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS_MASK 0x00000100L
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS__SHIFT 0x00000008
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS_MASK 0x00000200L
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS__SHIFT 0x00000009
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS_MASK 0x00000001L
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS__SHIFT 0x00000000
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS_MASK 0x00000002L
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS__SHIFT 0x00000001
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS_MASK 0x00000004L
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS__SHIFT 0x00000002
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS_MASK 0x00000008L
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS__SHIFT 0x00000003
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS_MASK 0x00000010L
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS__SHIFT 0x00000004
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS_MASK 0x00000020L
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS__SHIFT 0x00000005
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE_MASK 0x00003f00L
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE__SHIFT 0x00000008
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x00010000L
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x00000010
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT_MASK 0x00700000L
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT__SHIFT 0x00000014
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x00020000L
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x00000011
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE_MASK 0x0000003fL
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE__SHIFT 0x00000000
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT_MASK 0x07000000L
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT__SHIFT 0x00000018
+#define DMIF_STATUS__DMIF_UNDERFLOW_MASK 0x10000000L
+#define DMIF_STATUS__DMIF_UNDERFLOW__SHIFT 0x0000001c
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT_MASK 0x00000070L
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT__SHIFT 0x00000004
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT_MASK 0x00000007L
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT__SHIFT 0x00000000
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x00000008
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x00000000
+#define DOUT_SCRATCH0__DOUT_SCRATCH0_MASK 0xffffffffL
+#define DOUT_SCRATCH0__DOUT_SCRATCH0__SHIFT 0x00000000
+#define DOUT_SCRATCH1__DOUT_SCRATCH1_MASK 0xffffffffL
+#define DOUT_SCRATCH1__DOUT_SCRATCH1__SHIFT 0x00000000
+#define DOUT_SCRATCH2__DOUT_SCRATCH2_MASK 0xffffffffL
+#define DOUT_SCRATCH2__DOUT_SCRATCH2__SHIFT 0x00000000
+#define DOUT_SCRATCH3__DOUT_SCRATCH3_MASK 0xffffffffL
+#define DOUT_SCRATCH3__DOUT_SCRATCH3__SHIFT 0x00000000
+#define DOUT_SCRATCH4__DOUT_SCRATCH4_MASK 0xffffffffL
+#define DOUT_SCRATCH4__DOUT_SCRATCH4__SHIFT 0x00000000
+#define DOUT_SCRATCH5__DOUT_SCRATCH5_MASK 0xffffffffL
+#define DOUT_SCRATCH5__DOUT_SCRATCH5__SHIFT 0x00000000
+#define DOUT_SCRATCH6__DOUT_SCRATCH6_MASK 0xffffffffL
+#define DOUT_SCRATCH6__DOUT_SCRATCH6__SHIFT 0x00000000
+#define DOUT_SCRATCH7__DOUT_SCRATCH7_MASK 0xffffffffL
+#define DOUT_SCRATCH7__DOUT_SCRATCH7__SHIFT 0x00000000
+#define DOUT_TEST_DEBUG_DATA__DOUT_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DOUT_TEST_DEBUG_DATA__DOUT_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DP_AUX1_DEBUG_A__DP_AUX1_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_A__DP_AUX1_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_B__DP_AUX1_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_B__DP_AUX1_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_C__DP_AUX1_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_C__DP_AUX1_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_D__DP_AUX1_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_D__DP_AUX1_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_E__DP_AUX1_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_E__DP_AUX1_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_F__DP_AUX1_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_F__DP_AUX1_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_G__DP_AUX1_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_G__DP_AUX1_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_H__DP_AUX1_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_H__DP_AUX1_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_I__DP_AUX1_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_I__DP_AUX1_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_A__DP_AUX2_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_A__DP_AUX2_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_B__DP_AUX2_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_B__DP_AUX2_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_C__DP_AUX2_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_C__DP_AUX2_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_D__DP_AUX2_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_D__DP_AUX2_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_E__DP_AUX2_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_E__DP_AUX2_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_F__DP_AUX2_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_F__DP_AUX2_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_G__DP_AUX2_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_G__DP_AUX2_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_H__DP_AUX2_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_H__DP_AUX2_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_I__DP_AUX2_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_I__DP_AUX2_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_A__DP_AUX3_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_A__DP_AUX3_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_B__DP_AUX3_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_B__DP_AUX3_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_C__DP_AUX3_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_C__DP_AUX3_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_D__DP_AUX3_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_D__DP_AUX3_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_E__DP_AUX3_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_E__DP_AUX3_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_F__DP_AUX3_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_F__DP_AUX3_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_G__DP_AUX3_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_G__DP_AUX3_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_H__DP_AUX3_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_H__DP_AUX3_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_I__DP_AUX3_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_I__DP_AUX3_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_A__DP_AUX4_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_A__DP_AUX4_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_B__DP_AUX4_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_B__DP_AUX4_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_C__DP_AUX4_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_C__DP_AUX4_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_D__DP_AUX4_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_D__DP_AUX4_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_E__DP_AUX4_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_E__DP_AUX4_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_F__DP_AUX4_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_F__DP_AUX4_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_G__DP_AUX4_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_G__DP_AUX4_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_H__DP_AUX4_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_H__DP_AUX4_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_I__DP_AUX4_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_I__DP_AUX4_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_A__DP_AUX5_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_A__DP_AUX5_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_B__DP_AUX5_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_B__DP_AUX5_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_C__DP_AUX5_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_C__DP_AUX5_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_D__DP_AUX5_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_D__DP_AUX5_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_E__DP_AUX5_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_E__DP_AUX5_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_F__DP_AUX5_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_F__DP_AUX5_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_G__DP_AUX5_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_G__DP_AUX5_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_H__DP_AUX5_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_H__DP_AUX5_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_I__DP_AUX5_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_I__DP_AUX5_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_A__DP_AUX6_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_A__DP_AUX6_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_B__DP_AUX6_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_B__DP_AUX6_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_C__DP_AUX6_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_C__DP_AUX6_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_D__DP_AUX6_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_D__DP_AUX6_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_E__DP_AUX6_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_E__DP_AUX6_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_F__DP_AUX6_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_F__DP_AUX6_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_G__DP_AUX6_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_G__DP_AUX6_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_H__DP_AUX6_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_H__DP_AUX6_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_I__DP_AUX6_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_I__DP_AUX6_DEBUG_I__SHIFT 0x00000000
+#define DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+#define DP_CONFIG__DP_UDI_LANES__SHIFT 0x00000000
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x00000018
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x00000010
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x00000008
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x00000000
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x00000001
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x00000002
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x00000003
+#define DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x00000010
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x00000018
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x00000000
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00ff0000L
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x00000010
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x00000004
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x00000004
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x00000000
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x00000008
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003fL
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x00000000
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003f00L
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x00000008
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x00000010
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x00000008
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x00000000
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000ff00L
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x00000008
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00ff0000L
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x00000010
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xff000000L
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x00000018
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000ffL
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x00000000
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000fff00L
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x00000008
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xfff00000L
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x00000014
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x00000002
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x00000000
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x00000001
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0x0000000c
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x00000008
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x00000004
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x00000000
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x00000000
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00L
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x00000008
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x00000004
+#define DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003ffL
+#define DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x00000000
+#define DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000ffc00L
+#define DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0x0000000a
+#define DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3ff00000L
+#define DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x00000014
+#define DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003ffL
+#define DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x00000000
+#define DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000ffc00L
+#define DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0x0000000a
+#define DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3ff00000L
+#define DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x00000014
+#define DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003ffL
+#define DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x00000000
+#define DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000ffc00L
+#define DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0x0000000a
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x00000000
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xffffffffL
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x00000000
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xffffffffL
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x00000000
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xffffffffL
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x00000000
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xffffffffL
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x00000000
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xffffffffL
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x00000000
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xffffffffL
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x00000000
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xffffffffL
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x00000000
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xffffffffL
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x00000000
+#define DP_DTO4_MODULO__DP_DTO4_MODULO_MASK 0xffffffffL
+#define DP_DTO4_MODULO__DP_DTO4_MODULO__SHIFT 0x00000000
+#define DP_DTO4_PHASE__DP_DTO4_PHASE_MASK 0xffffffffL
+#define DP_DTO4_PHASE__DP_DTO4_PHASE__SHIFT 0x00000000
+#define DP_DTO5_MODULO__DP_DTO5_MODULO_MASK 0xffffffffL
+#define DP_DTO5_MODULO__DP_DTO5_MODULO__SHIFT 0x00000000
+#define DP_DTO5_PHASE__DP_DTO5_PHASE_MASK 0xffffffffL
+#define DP_DTO5_PHASE__DP_DTO5_PHASE__SHIFT 0x00000000
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000L
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x00000010
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0x0000ffffL
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x00000000
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0x0000ffffL
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x00000000
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000L
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x00000010
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x00000001L
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x00000000
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x00000010L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x00000004
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x00000100L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x00000008
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x00003000L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0x0000000c
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x00000400L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0x0000000a
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x00000001L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x00000000
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000200L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x00000009
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x00000100L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x00000008
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x00000010L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x00000004
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x00000001L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x00000000
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x00000010L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x00000004
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x00000080L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x00000007
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x00000020L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x00000005
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x00000040L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x00000006
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x00000800L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0x0000000b
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x00000400L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0x0000000a
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x00000200L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x00000009
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x00000100L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x00000008
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x00000001L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x00000000
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x00000010L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x00000004
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x00000080L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x00000007
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x00000020L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x00000005
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x00000040L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x00000006
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x00000800L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0x0000000b
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x00000400L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0x0000000a
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x00000200L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x00000009
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x00000100L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x00000008
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0x0000ffffL
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x00000000
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x00000000
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x00000011
+#define DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x00000008
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x00000004
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003ffffL
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x00000000
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x00000018
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x0000001c
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE_MASK 0x00000100L
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE__SHIFT 0x00000008
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_MASK 0x000000ffL
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE__SHIFT 0x00000000
+#define DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000f8L
+#define DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x00000003
+#define DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000ff00L
+#define DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x00000008
+#define DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00ff0000L
+#define DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x00000010
+#define DP_MSA_MISC__DP_MSA_MISC4_MASK 0xff000000L
+#define DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x00000018
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN_MASK 0x00000001L
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN__SHIFT 0x00000000
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE_MASK 0x0001fff0L
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE__SHIFT 0x00000004
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE_MASK 0x1fff0000L
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE__SHIFT 0x00000010
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE_MASK 0x00001fffL
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE__SHIFT 0x00000000
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003ffL
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x00000000
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x00000010
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x00000000
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x00000004
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x00000008
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xfc000000L
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x0000001a
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03ffffffL
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x00000000
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x00000000
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003f00L
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x00000008
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3f000000L
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x00000018
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x00000000
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x00000010
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003f00L
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x00000008
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3f000000L
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x00000018
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x00000000
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x00000010
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003f00L
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x00000008
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3f000000L
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x00000018
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x00000000
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x00000010
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x00000008
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x00000000
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x00000018
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE_MASK 0x00000100L
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE__SHIFT 0x00000008
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000003L
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x00000000
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE_MASK 0x00010000L
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE__SHIFT 0x00000010
+#define DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00ffffffL
+#define DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x00000000
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00ffffffL
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x00000000
+#define DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00ffffffL
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x00000000
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00ffffffL
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x00000000
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x00000000
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x00000010
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0x0000000c
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x00000004
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x00000008
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE_MASK 0x01000000L
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE__SHIFT 0x00000018
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x00000014
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x00000015
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x00000016
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x00000017
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x0000001c
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x00000000
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000fffL
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x00000000
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xffff0000L
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x00000010
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xffff0000L
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x00000010
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000ffffL
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x00000000
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003fffL
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x00000000
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xffff0000L
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x00000010
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x0000001c
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x0000001d
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x00000018
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x00000014
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x00000010
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000eL
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x00000001
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x00000004
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003f00L
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x00000008
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x00000000
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x00000000
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x00000006
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x00000004
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x00000005
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x00000007
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0x0000000c
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x00000008
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x00000001
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x00000000
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x00000002
+#define DP_VID_M__DP_VID_M_MASK 0x00ffffffL
+#define DP_VID_M__DP_VID_M__SHIFT 0x00000000
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000fffL
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x00000000
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE_MASK 0x00010000L
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE__SHIFT 0x00000010
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x00000018
+#define DP_VID_N__DP_VID_N_MASK 0x00ffffffL
+#define DP_VID_N__DP_VID_N__SHIFT 0x00000000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x00000014
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x00000008
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x00000000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x00000010
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x00000008
+#define DP_VID_TIMING__DP_VID_N_DIV_MASK 0xff000000L
+#define DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x00000018
+#define DP_VID_TIMING__DP_VID_TIMING_MODE_MASK 0x00000001L
+#define DP_VID_TIMING__DP_VID_TIMING_MODE__SHIFT 0x00000000
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_ADJUST_EN_MASK 0x00020000L
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_ADJUST_EN__SHIFT 0x00000011
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_SKEW_CNTL_MASK 0x00001f00L
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_SKEW_CNTL__SHIFT 0x00000008
+#define DVOACLKC_CNTL__DVOACLKC_FINE_ADJUST_EN_MASK 0x00010000L
+#define DVOACLKC_CNTL__DVOACLKC_FINE_ADJUST_EN__SHIFT 0x00000010
+#define DVOACLKC_CNTL__DVOACLKC_FINE_SKEW_CNTL_MASK 0x00000007L
+#define DVOACLKC_CNTL__DVOACLKC_FINE_SKEW_CNTL__SHIFT 0x00000000
+#define DVOACLKC_CNTL__DVOACLKC_IN_PHASE_MASK 0x00040000L
+#define DVOACLKC_CNTL__DVOACLKC_IN_PHASE__SHIFT 0x00000012
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_ADJUST_EN_MASK 0x00020000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_ADJUST_EN__SHIFT 0x00000011
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_SKEW_CNTL_MASK 0x00001f00L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_SKEW_CNTL__SHIFT 0x00000008
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_ADJUST_EN_MASK 0x00010000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_ADJUST_EN__SHIFT 0x00000010
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_SKEW_CNTL_MASK 0x00000007L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_SKEW_CNTL__SHIFT 0x00000000
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_IN_PHASE_MASK 0x00040000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_IN_PHASE__SHIFT 0x00000012
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_SKEW_PHASE_OVERRIDE_MASK 0x00100000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_SKEW_PHASE_OVERRIDE__SHIFT 0x00000014
+#define DVOACLKC_MVP_CNTL__MVP_CLK_A_SRC_SEL_MASK 0x03000000L
+#define DVOACLKC_MVP_CNTL__MVP_CLK_A_SRC_SEL__SHIFT 0x00000018
+#define DVOACLKC_MVP_CNTL__MVP_CLK_B_SRC_SEL_MASK 0x30000000L
+#define DVOACLKC_MVP_CNTL__MVP_CLK_B_SRC_SEL__SHIFT 0x0000001c
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_ADJUST_EN_MASK 0x00020000L
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_ADJUST_EN__SHIFT 0x00000011
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_SKEW_CNTL_MASK 0x00001f00L
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_SKEW_CNTL__SHIFT 0x00000008
+#define DVOACLKD_CNTL__DVOACLKD_FINE_ADJUST_EN_MASK 0x00010000L
+#define DVOACLKD_CNTL__DVOACLKD_FINE_ADJUST_EN__SHIFT 0x00000010
+#define DVOACLKD_CNTL__DVOACLKD_FINE_SKEW_CNTL_MASK 0x00000007L
+#define DVOACLKD_CNTL__DVOACLKD_FINE_SKEW_CNTL__SHIFT 0x00000000
+#define DVOACLKD_CNTL__DVOACLKD_IN_PHASE_MASK 0x00040000L
+#define DVOACLKD_CNTL__DVOACLKD_IN_PHASE__SHIFT 0x00000012
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE_MASK 0x00000001L
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE__SHIFT 0x00000000
+#define DVO_CONTROL__DVO_COLOR_FORMAT_MASK 0x03000000L
+#define DVO_CONTROL__DVO_COLOR_FORMAT__SHIFT 0x00000018
+#define DVO_CONTROL__DVO_CTL3_MASK 0x80000000L
+#define DVO_CONTROL__DVO_CTL3__SHIFT 0x0000001f
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN_MASK 0x00000100L
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN__SHIFT 0x00000008
+#define DVO_CONTROL__DVO_INVERT_DVOCLK_MASK 0x00040000L
+#define DVO_CONTROL__DVO_INVERT_DVOCLK__SHIFT 0x00000012
+#define DVO_CONTROL__DVO_RATE_SELECT_MASK 0x00000001L
+#define DVO_CONTROL__DVO_RATE_SELECT__SHIFT 0x00000000
+#define DVO_CONTROL__DVO_RESET_FIFO_MASK 0x00010000L
+#define DVO_CONTROL__DVO_RESET_FIFO__SHIFT 0x00000010
+#define DVO_CONTROL__DVO_SDRCLK_SEL_MASK 0x00000002L
+#define DVO_CONTROL__DVO_SDRCLK_SEL__SHIFT 0x00000001
+#define DVO_CONTROL__DVO_SYNC_PHASE_MASK 0x00020000L
+#define DVO_CONTROL__DVO_SYNC_PHASE__SHIFT 0x00000011
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK_MASK 0x07ffffffL
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK__SHIFT 0x00000000
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT_MASK 0x07ffffffL
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT__SHIFT 0x00000000
+#define DVO_CRC_EN__DVO_CRC2_EN_MASK 0x00010000L
+#define DVO_CRC_EN__DVO_CRC2_EN__SHIFT 0x00000010
+#define DVO_ENABLE__DVO_ENABLE_MASK 0x00000001L
+#define DVO_ENABLE__DVO_ENABLE__SHIFT 0x00000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000fc00L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x0000000a
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED_MASK 0x20000000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED__SHIFT 0x0000001d
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK__SHIFT 0x00000008
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x0000001e
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x0000001f
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR__SHIFT 0x00000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL_MASK 0x000f0000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL__SHIFT 0x00000010
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL_MASK 0x03c00000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL__SHIFT 0x00000016
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL_MASK 0x000000fcL
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL__SHIFT 0x00000002
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x00000001
+#define DVO_OUTPUT__DVO_CLOCK_MODE_MASK 0x00000100L
+#define DVO_OUTPUT__DVO_CLOCK_MODE__SHIFT 0x00000008
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE_MASK 0x00000003L
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE__SHIFT 0x00000000
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST_MASK 0xffffffffL
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST__SHIFT 0x00000000
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT_MASK 0x00000007L
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT__SHIFT 0x00000000
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT_MASK 0x00070000L
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT__SHIFT 0x00000010
+#define DVO_STRENGTH_CONTROL__DVOCLK_SN_MASK 0x0000f000L
+#define DVO_STRENGTH_CONTROL__DVOCLK_SN__SHIFT 0x0000000c
+#define DVO_STRENGTH_CONTROL__DVOCLK_SP_MASK 0x00000f00L
+#define DVO_STRENGTH_CONTROL__DVOCLK_SP__SHIFT 0x00000008
+#define DVO_STRENGTH_CONTROL__DVO_LSB_VMODE_MASK 0x10000000L
+#define DVO_STRENGTH_CONTROL__DVO_LSB_VMODE__SHIFT 0x0000001c
+#define DVO_STRENGTH_CONTROL__DVO_MSB_VMODE_MASK 0x20000000L
+#define DVO_STRENGTH_CONTROL__DVO_MSB_VMODE__SHIFT 0x0000001d
+#define DVO_STRENGTH_CONTROL__DVO_SN_MASK 0x000000f0L
+#define DVO_STRENGTH_CONTROL__DVO_SN__SHIFT 0x00000004
+#define DVO_STRENGTH_CONTROL__DVO_SP_MASK 0x0000000fL
+#define DVO_STRENGTH_CONTROL__DVO_SP__SHIFT 0x00000000
+#define DVO_VREF_CONTROL__DVO_VREFCAL_MASK 0x000000f0L
+#define DVO_VREF_CONTROL__DVO_VREFCAL__SHIFT 0x00000004
+#define DVO_VREF_CONTROL__DVO_VREFPON_MASK 0x00000001L
+#define DVO_VREF_CONTROL__DVO_VREFPON__SHIFT 0x00000000
+#define DVO_VREF_CONTROL__DVO_VREFSEL_MASK 0x00000002L
+#define DVO_VREF_CONTROL__DVO_VREFSEL__SHIFT 0x00000001
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x0fff0000L
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x00000010
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00000fffL
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x00000000
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00000fffL
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x00000000
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x0fff0000L
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x00000010
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK_MASK 0x000f0000L
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK__SHIFT 0x00000010
+#define FBC_CNTL__FBC_COHERENCY_MODE_MASK 0x00030000L
+#define FBC_CNTL__FBC_COHERENCY_MODE__SHIFT 0x00000010
+#define FBC_CNTL__FBC_EN_MASK 0x80000000L
+#define FBC_CNTL__FBC_EN__SHIFT 0x0000001f
+#define FBC_CNTL__FBC_GRPH_COMP_EN_MASK 0x00000001L
+#define FBC_CNTL__FBC_GRPH_COMP_EN__SHIFT 0x00000000
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN_MASK 0x02000000L
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN__SHIFT 0x00000019
+#define FBC_CNTL__FBC_SRC_SEL_MASK 0x0000000eL
+#define FBC_CNTL__FBC_SRC_SEL__SHIFT 0x00000001
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN_MASK 0x00010000L
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN__SHIFT 0x00000010
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN_MASK 0x00020000L
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN__SHIFT 0x00000011
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN_MASK 0x00040000L
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN__SHIFT 0x00000012
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN_MASK 0x00080000L
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN__SHIFT 0x00000013
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN_MASK 0x00100000L
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN__SHIFT 0x00000014
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION_MASK 0x0000000fL
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION__SHIFT 0x00000000
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN_MASK 0x00000100L
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN__SHIFT 0x00000008
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN_MASK 0x00000400L
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN__SHIFT 0x0000000a
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN_MASK 0x00000200L
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN__SHIFT 0x00000009
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN_MASK 0x00000800L
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN__SHIFT 0x0000000b
+#define FBC_COMP_MODE__FBC_IND_EN_MASK 0x00010000L
+#define FBC_COMP_MODE__FBC_IND_EN__SHIFT 0x00000010
+#define FBC_COMP_MODE__FBC_RLE_EN_MASK 0x00000001L
+#define FBC_COMP_MODE__FBC_RLE_EN__SHIFT 0x00000000
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0_MASK 0x000003ffL
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0__SHIFT 0x00000000
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1_MASK 0x03ff0000L
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1__SHIFT 0x00000010
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2_MASK 0x000003ffL
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2__SHIFT 0x00000000
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3_MASK 0x03ff0000L
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3__SHIFT 0x00000010
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS_MASK 0x00010000L
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS__SHIFT 0x00000010
+#define FBC_DEBUG0__FBC_DEBUG0_MASK 0x00fe0000L
+#define FBC_DEBUG0__FBC_DEBUG0__SHIFT 0x00000011
+#define FBC_DEBUG0__FBC_DEBUG_MUX_MASK 0xff000000L
+#define FBC_DEBUG0__FBC_DEBUG_MUX__SHIFT 0x00000018
+#define FBC_DEBUG0__FBC_PERF_MUX0_MASK 0x000000ffL
+#define FBC_DEBUG0__FBC_PERF_MUX0__SHIFT 0x00000000
+#define FBC_DEBUG0__FBC_PERF_MUX1_MASK 0x0000ff00L
+#define FBC_DEBUG0__FBC_PERF_MUX1__SHIFT 0x00000008
+#define FBC_DEBUG1__FBC_DEBUG1_MASK 0xffffffffL
+#define FBC_DEBUG1__FBC_DEBUG1__SHIFT 0x00000000
+#define FBC_DEBUG2__FBC_DEBUG2_MASK 0xffffffffL
+#define FBC_DEBUG2__FBC_DEBUG2__SHIFT 0x00000000
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE_MASK 0x00000800L
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x0000000b
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS_MASK 0x000000f0L
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS__SHIFT 0x00000004
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL_MASK 0x00000300L
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL__SHIFT 0x00000008
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE_MASK 0x00000400L
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x0000000a
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE_MASK 0x00000008L
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE__SHIFT 0x00000003
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP_MASK 0x00000003L
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP__SHIFT 0x00000000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR_MASK 0x000003ffL
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR__SHIFT 0x00000000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN_MASK 0x80000000L
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN__SHIFT 0x0000001f
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA_MASK 0x00020000L
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA__SHIFT 0x00000011
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA_MASK 0x00010000L
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA__SHIFT 0x00000010
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA_MASK 0xffffffffL
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA__SHIFT 0x00000000
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI_MASK 0x000000ffL
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI__SHIFT 0x00000000
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA_MASK 0xffffffffL
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA__SHIFT 0x00000000
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI_MASK 0x000000ffL
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI__SHIFT 0x00000000
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK_MASK 0xffffffffL
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK__SHIFT 0x00000000
+#define FBC_IDLE_MASK__FBC_IDLE_MASK_MASK 0xffffffffL
+#define FBC_IDLE_MASK__FBC_IDLE_MASK__SHIFT 0x00000000
+#define FBC_IND_LUT0__FBC_IND_LUT0_MASK 0x00ffffffL
+#define FBC_IND_LUT0__FBC_IND_LUT0__SHIFT 0x00000000
+#define FBC_IND_LUT10__FBC_IND_LUT10_MASK 0x00ffffffL
+#define FBC_IND_LUT10__FBC_IND_LUT10__SHIFT 0x00000000
+#define FBC_IND_LUT11__FBC_IND_LUT11_MASK 0x00ffffffL
+#define FBC_IND_LUT11__FBC_IND_LUT11__SHIFT 0x00000000
+#define FBC_IND_LUT12__FBC_IND_LUT12_MASK 0x00ffffffL
+#define FBC_IND_LUT12__FBC_IND_LUT12__SHIFT 0x00000000
+#define FBC_IND_LUT13__FBC_IND_LUT13_MASK 0x00ffffffL
+#define FBC_IND_LUT13__FBC_IND_LUT13__SHIFT 0x00000000
+#define FBC_IND_LUT14__FBC_IND_LUT14_MASK 0x00ffffffL
+#define FBC_IND_LUT14__FBC_IND_LUT14__SHIFT 0x00000000
+#define FBC_IND_LUT15__FBC_IND_LUT15_MASK 0x00ffffffL
+#define FBC_IND_LUT15__FBC_IND_LUT15__SHIFT 0x00000000
+#define FBC_IND_LUT1__FBC_IND_LUT1_MASK 0x00ffffffL
+#define FBC_IND_LUT1__FBC_IND_LUT1__SHIFT 0x00000000
+#define FBC_IND_LUT2__FBC_IND_LUT2_MASK 0x00ffffffL
+#define FBC_IND_LUT2__FBC_IND_LUT2__SHIFT 0x00000000
+#define FBC_IND_LUT3__FBC_IND_LUT3_MASK 0x00ffffffL
+#define FBC_IND_LUT3__FBC_IND_LUT3__SHIFT 0x00000000
+#define FBC_IND_LUT4__FBC_IND_LUT4_MASK 0x00ffffffL
+#define FBC_IND_LUT4__FBC_IND_LUT4__SHIFT 0x00000000
+#define FBC_IND_LUT5__FBC_IND_LUT5_MASK 0x00ffffffL
+#define FBC_IND_LUT5__FBC_IND_LUT5__SHIFT 0x00000000
+#define FBC_IND_LUT6__FBC_IND_LUT6_MASK 0x00ffffffL
+#define FBC_IND_LUT6__FBC_IND_LUT6__SHIFT 0x00000000
+#define FBC_IND_LUT7__FBC_IND_LUT7_MASK 0x00ffffffL
+#define FBC_IND_LUT7__FBC_IND_LUT7__SHIFT 0x00000000
+#define FBC_IND_LUT8__FBC_IND_LUT8_MASK 0x00ffffffL
+#define FBC_IND_LUT8__FBC_IND_LUT8__SHIFT 0x00000000
+#define FBC_IND_LUT9__FBC_IND_LUT9_MASK 0x00ffffffL
+#define FBC_IND_LUT9__FBC_IND_LUT9__SHIFT 0x00000000
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR_MASK 0x00010000L
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR__SHIFT 0x00000010
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_MASK 0x00000003L
+#define FBC_MISC__FBC_DECOMPRESS_ERROR__SHIFT 0x00000000
+#define FBC_MISC__FBC_DIVIDE_X_MASK 0x00000300L
+#define FBC_MISC__FBC_DIVIDE_X__SHIFT 0x00000008
+#define FBC_MISC__FBC_DIVIDE_Y_MASK 0x00000400L
+#define FBC_MISC__FBC_DIVIDE_Y__SHIFT 0x0000000a
+#define FBC_MISC__FBC_ERROR_PIXEL_MASK 0x000000f0L
+#define FBC_MISC__FBC_ERROR_PIXEL__SHIFT 0x00000004
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR_MASK 0x00000008L
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR__SHIFT 0x00000003
+#define FBC_MISC__FBC_RESET_AT_DISABLE_MASK 0x00200000L
+#define FBC_MISC__FBC_RESET_AT_DISABLE__SHIFT 0x00000015
+#define FBC_MISC__FBC_RESET_AT_ENABLE_MASK 0x00100000L
+#define FBC_MISC__FBC_RESET_AT_ENABLE__SHIFT 0x00000014
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY_MASK 0x00001000L
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY__SHIFT 0x0000000c
+#define FBC_MISC__FBC_RSM_WRITE_VALUE_MASK 0x00000800L
+#define FBC_MISC__FBC_RSM_WRITE_VALUE__SHIFT 0x0000000b
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL_MASK 0xf0000000L
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL__SHIFT 0x0000001c
+#define FBC_MISC__FBC_STOP_ON_ERROR_MASK 0x00000004L
+#define FBC_MISC__FBC_STOP_ON_ERROR__SHIFT 0x00000002
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY_MASK 0x00001f00L
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY__SHIFT 0x00000008
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY_MASK 0x0000001fL
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY__SHIFT 0x00000000
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY_MASK 0x00000080L
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY__SHIFT 0x00000007
+#define FBC_STATUS__FBC_ENABLE_STATUS_MASK 0x00000001L
+#define FBC_STATUS__FBC_ENABLE_STATUS__SHIFT 0x00000000
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0c000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x0000001a
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x0000001c
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xc0000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x0000001e
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0x0000000d
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0x0000000f
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0x0000000e
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0x0000000c
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x00000008
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x00000009
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00100000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x00000014
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x00000010
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x00000015
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x00000019
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x00000018
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000010L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x00000004
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x00000000
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x00000010
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x00000000
+#define FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00010000L
+#define FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x00000010
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x00000000
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL_MASK 0x00000010L
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL__SHIFT 0x00000004
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN_MASK 0x00000010L
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN__SHIFT 0x00000004
+#define FMT_CRC_CNTL__FMT_CRC_EN_MASK 0x00000001L
+#define FMT_CRC_CNTL__FMT_CRC_EN__SHIFT 0x00000000
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE_MASK 0x00100000L
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE__SHIFT 0x00000014
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT_MASK 0x01000000L
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT__SHIFT 0x00000018
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE__SHIFT 0x0000000c
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKb_MASK 0x00000100L
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKb__SHIFT 0x00000008
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00010000L
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x00000010
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE_MASK 0x0000ffffL
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE__SHIFT 0x00000000
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL_MASK 0xffff0000L
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL__SHIFT 0x00000010
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK_MASK 0x0000ffffL
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK__SHIFT 0x00000000
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK_MASK 0xffff0000L
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK__SHIFT 0x00000010
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN_MASK 0xffff0000L
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN__SHIFT 0x00000010
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED_MASK 0x0000ffffL
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED__SHIFT 0x00000000
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK_MASK 0xffff0000L
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK__SHIFT 0x00000010
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK_MASK 0x0000ffffL
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK__SHIFT 0x00000000
+#define FMT_DEBUG0__FMT_DEBUG0_MASK 0xffffffffL
+#define FMT_DEBUG0__FMT_DEBUG0__SHIFT 0x00000000
+#define FMT_DEBUG1__FMT_DEBUG1_MASK 0xffffffffL
+#define FMT_DEBUG1__FMT_DEBUG1__SHIFT 0x00000000
+#define FMT_DEBUG2__FMT_DEBUG2_MASK 0xffffffffL
+#define FMT_DEBUG2__FMT_DEBUG2__SHIFT 0x00000000
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT_MASK 0x00000003L
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT__SHIFT 0x00000000
+#define FMT_DEBUG_ID__FMT_DEBUG_ID_MASK 0xffffffffL
+#define FMT_DEBUG_ID__FMT_DEBUG_ID__SHIFT 0x00000000
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000ffL
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x00000000
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000ffL
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x00000000
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000ffL
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x00000000
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x00000000
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x00000004
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA0_MASK 0x0000ffffL
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA0__SHIFT 0x00000000
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA1_MASK 0xffff0000L
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA1__SHIFT 0x00000010
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA2_MASK 0x0000ffffL
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA2__SHIFT 0x00000000
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA3_MASK 0xffff0000L
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA3__SHIFT 0x00000010
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_EN_MASK 0x00000001L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_EN__SHIFT 0x00000000
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_ON_BLANKb_ONLY_MASK 0x00010000L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_ON_BLANKb_ONLY__SHIFT 0x00000010
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_COLOR_MASK 0x00000700L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_COLOR__SHIFT 0x00000008
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_SLOT_MASK 0x0000f000L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_SLOT__SHIFT 0x0000000c
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_RGB1_BGR0_MASK 0x00000010L
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_RGB1_BGR0__SHIFT 0x00000004
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_SELECT_MASK 0x00000001L
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_SELECT__SHIFT 0x00000000
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX_MASK 0xffffffffL
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX__SHIFT 0x00000000
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX_MASK 0xffffffffL
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX__SHIFT 0x00000000
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11_MASK 0x0000ffffL
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11__SHIFT 0x00000000
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12_MASK 0xffff0000L
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12__SHIFT 0x00000010
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13_MASK 0x0000ffffL
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13__SHIFT 0x00000000
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14_MASK 0xffff0000L
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14__SHIFT 0x00000010
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21_MASK 0x0000ffffL
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21__SHIFT 0x00000000
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22_MASK 0xffff0000L
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22__SHIFT 0x00000010
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23_MASK 0x0000ffffL
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23__SHIFT 0x00000000
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24_MASK 0xffff0000L
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24__SHIFT 0x00000010
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31_MASK 0x0000ffffL
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31__SHIFT 0x00000000
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32_MASK 0xffff0000L
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32__SHIFT 0x00000010
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33_MASK 0x0000ffffL
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33__SHIFT 0x00000000
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34_MASK 0xffff0000L
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34__SHIFT 0x00000010
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT 0x00000000
+#define GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE_MASK 0x00000030L
+#define GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT 0x00000004
+#define GENENB__BLK_IO_BASE_MASK 0x000000ffL
+#define GENENB__BLK_IO_BASE__SHIFT 0x00000000
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL_MASK 0x80000000L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL__SHIFT 0x0000001f
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE_MASK 0x00000008L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE__SHIFT 0x00000003
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO_MASK 0x00000001L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO__SHIFT 0x00000000
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET_MASK 0x00000004L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET__SHIFT 0x00000002
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET_MASK 0x00000002L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET__SHIFT 0x00000001
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_MASK 0x0000ff00L
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW_MASK 0x00000001L
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW__SHIFT 0x00000000
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA__SHIFT 0x00000008
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_MASK 0x000f0000L
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX__SHIFT 0x00000010
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE_MASK 0x80000000L
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE__SHIFT 0x0000001f
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK_MASK 0x00000002L
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK__SHIFT 0x00000001
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT_MASK 0x00000001L
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT__SHIFT 0x00000000
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK_MASK 0x00000004L
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK__SHIFT 0x00000002
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN_MASK 0x00000004L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN__SHIFT 0x00000002
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT_MASK 0x00000002L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT__SHIFT 0x00000001
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT_MASK 0x00000001L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT__SHIFT 0x00000000
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN_MASK 0x00000040L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN__SHIFT 0x00000006
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT_MASK 0x00000020L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT__SHIFT 0x00000005
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT_MASK 0x00000010L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT__SHIFT 0x00000004
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL_MASK 0x0000007fL
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL__SHIFT 0x00000000
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL_MASK 0x00007f00L
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL__SHIFT 0x00000008
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN_MASK 0x00000080L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN__SHIFT 0x00000007
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN_MASK 0x00000001L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN__SHIFT 0x00000000
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL_MASK 0x00000002L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT_MASK 0xff000000L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT__SHIFT 0x00000018
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE_MASK 0xffff0000L
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE__SHIFT 0x00000010
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD_MASK 0x00000003L
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD__SHIFT 0x00000000
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED_MASK 0x00000020L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED__SHIFT 0x00000005
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE_MASK 0x00000010L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE__SHIFT 0x00000004
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK_MASK 0x00000400L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK__SHIFT 0x0000000a
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS_MASK 0x0000000fL
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS__SHIFT 0x00000000
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK_MASK 0x00000200L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK__SHIFT 0x00000009
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT_MASK 0x00000040L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT__SHIFT 0x00000006
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ_MASK 0x00000200L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ__SHIFT 0x00000009
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT_MASK 0x000f0000L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT__SHIFT 0x00000010
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW_MASK 0x00000001L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW__SHIFT 0x00000000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START_MASK 0x00001000L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START__SHIFT 0x0000000c
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_MASK 0x00002000L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK_MASK 0x00000100L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK__SHIFT 0x00000008
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP__SHIFT 0x0000000d
+#define GENFC_RD__VSYNC_SEL_R_MASK 0x00000008L
+#define GENFC_RD__VSYNC_SEL_R__SHIFT 0x00000003
+#define GENFC_WT__VSYNC_SEL_W_MASK 0x00000008L
+#define GENFC_WT__VSYNC_SEL_W__SHIFT 0x00000003
+#define GENMO_RD__GENMO_MONO_ADDRESS_B_MASK 0x00000001L
+#define GENMO_RD__GENMO_MONO_ADDRESS_B__SHIFT 0x00000000
+#define GENMO_RD__ODD_EVEN_MD_PGSEL_MASK 0x00000020L
+#define GENMO_RD__ODD_EVEN_MD_PGSEL__SHIFT 0x00000005
+#define GENMO_RD__VGA_CKSEL_MASK 0x0000000cL
+#define GENMO_RD__VGA_CKSEL__SHIFT 0x00000002
+#define GENMO_RD__VGA_HSYNC_POL_MASK 0x00000040L
+#define GENMO_RD__VGA_HSYNC_POL__SHIFT 0x00000006
+#define GENMO_RD__VGA_RAM_EN_MASK 0x00000002L
+#define GENMO_RD__VGA_RAM_EN__SHIFT 0x00000001
+#define GENMO_RD__VGA_VSYNC_POL_MASK 0x00000080L
+#define GENMO_RD__VGA_VSYNC_POL__SHIFT 0x00000007
+#define GENMO_WT__GENMO_MONO_ADDRESS_B_MASK 0x00000001L
+#define GENMO_WT__GENMO_MONO_ADDRESS_B__SHIFT 0x00000000
+#define GENMO_WT__ODD_EVEN_MD_PGSEL_MASK 0x00000020L
+#define GENMO_WT__ODD_EVEN_MD_PGSEL__SHIFT 0x00000005
+#define GENMO_WT__VGA_CKSEL_MASK 0x0000000cL
+#define GENMO_WT__VGA_CKSEL__SHIFT 0x00000002
+#define GENMO_WT__VGA_HSYNC_POL_MASK 0x00000040L
+#define GENMO_WT__VGA_HSYNC_POL__SHIFT 0x00000006
+#define GENMO_WT__VGA_RAM_EN_MASK 0x00000002L
+#define GENMO_WT__VGA_RAM_EN__SHIFT 0x00000001
+#define GENMO_WT__VGA_VSYNC_POL_MASK 0x00000080L
+#define GENMO_WT__VGA_VSYNC_POL__SHIFT 0x00000007
+#define GENS0__CRT_INTR_MASK 0x00000080L
+#define GENS0__CRT_INTR__SHIFT 0x00000007
+#define GENS0__SENSE_SWITCH_MASK 0x00000010L
+#define GENS0__SENSE_SWITCH__SHIFT 0x00000004
+#define GENS1__NO_DISPLAY_MASK 0x00000001L
+#define GENS1__NO_DISPLAY__SHIFT 0x00000000
+#define GENS1__PIXEL_READ_BACK_MASK 0x00000030L
+#define GENS1__PIXEL_READ_BACK__SHIFT 0x00000004
+#define GENS1__VGA_VSTATUS_MASK 0x00000008L
+#define GENS1__VGA_VSTATUS__SHIFT 0x00000003
+#define GRA00__GRPH_SET_RESET0_MASK 0x00000001L
+#define GRA00__GRPH_SET_RESET0__SHIFT 0x00000000
+#define GRA00__GRPH_SET_RESET1_MASK 0x00000002L
+#define GRA00__GRPH_SET_RESET1__SHIFT 0x00000001
+#define GRA00__GRPH_SET_RESET2_MASK 0x00000004L
+#define GRA00__GRPH_SET_RESET2__SHIFT 0x00000002
+#define GRA00__GRPH_SET_RESET3_MASK 0x00000008L
+#define GRA00__GRPH_SET_RESET3__SHIFT 0x00000003
+#define GRA01__GRPH_SET_RESET_ENA0_MASK 0x00000001L
+#define GRA01__GRPH_SET_RESET_ENA0__SHIFT 0x00000000
+#define GRA01__GRPH_SET_RESET_ENA1_MASK 0x00000002L
+#define GRA01__GRPH_SET_RESET_ENA1__SHIFT 0x00000001
+#define GRA01__GRPH_SET_RESET_ENA2_MASK 0x00000004L
+#define GRA01__GRPH_SET_RESET_ENA2__SHIFT 0x00000002
+#define GRA01__GRPH_SET_RESET_ENA3_MASK 0x00000008L
+#define GRA01__GRPH_SET_RESET_ENA3__SHIFT 0x00000003
+#define GRA02__GRPH_CCOMP_MASK 0x0000000fL
+#define GRA02__GRPH_CCOMP__SHIFT 0x00000000
+#define GRA03__GRPH_FN_SEL_MASK 0x00000018L
+#define GRA03__GRPH_FN_SEL__SHIFT 0x00000003
+#define GRA03__GRPH_ROTATE_MASK 0x00000007L
+#define GRA03__GRPH_ROTATE__SHIFT 0x00000000
+#define GRA04__GRPH_RMAP_MASK 0x00000003L
+#define GRA04__GRPH_RMAP__SHIFT 0x00000000
+#define GRA05__CGA_ODDEVEN_MASK 0x00000010L
+#define GRA05__CGA_ODDEVEN__SHIFT 0x00000004
+#define GRA05__GRPH_OES_MASK 0x00000020L
+#define GRA05__GRPH_OES__SHIFT 0x00000005
+#define GRA05__GRPH_PACK_MASK 0x00000040L
+#define GRA05__GRPH_PACK__SHIFT 0x00000006
+#define GRA05__GRPH_READ1_MASK 0x00000008L
+#define GRA05__GRPH_READ1__SHIFT 0x00000003
+#define GRA05__GRPH_WRITE_MODE_MASK 0x00000003L
+#define GRA05__GRPH_WRITE_MODE__SHIFT 0x00000000
+#define GRA06__GRPH_ADRSEL_MASK 0x0000000cL
+#define GRA06__GRPH_ADRSEL__SHIFT 0x00000002
+#define GRA06__GRPH_GRAPHICS_MASK 0x00000001L
+#define GRA06__GRPH_GRAPHICS__SHIFT 0x00000000
+#define GRA06__GRPH_ODDEVEN_MASK 0x00000002L
+#define GRA06__GRPH_ODDEVEN__SHIFT 0x00000001
+#define GRA07__GRPH_XCARE0_MASK 0x00000001L
+#define GRA07__GRPH_XCARE0__SHIFT 0x00000000
+#define GRA07__GRPH_XCARE1_MASK 0x00000002L
+#define GRA07__GRPH_XCARE1__SHIFT 0x00000001
+#define GRA07__GRPH_XCARE2_MASK 0x00000004L
+#define GRA07__GRPH_XCARE2__SHIFT 0x00000002
+#define GRA07__GRPH_XCARE3_MASK 0x00000008L
+#define GRA07__GRPH_XCARE3__SHIFT 0x00000003
+#define GRA08__GRPH_BMSK_MASK 0x000000ffL
+#define GRA08__GRPH_BMSK__SHIFT 0x00000000
+#define GRPH8_DATA__GRPH_DATA_MASK 0x000000ffL
+#define GRPH8_DATA__GRPH_DATA__SHIFT 0x00000000
+#define GRPH8_IDX__GRPH_IDX_MASK 0x0000000fL
+#define GRPH8_IDX__GRPH_IDX__SHIFT 0x00000000
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH_MASK 0x0001ffc0L
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH__SHIFT 0x00000006
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS_MASK 0xffffff00L
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS__SHIFT 0x00000008
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE_MASK 0x00010000L
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x00000010
+#define GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0x00f00000L
+#define GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x00000014
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT_MASK 0x00001800L
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT 0x0000000b
+#define GRPH_CONTROL__GRPH_BANK_WIDTH_MASK 0x000000c0L
+#define GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT 0x00000006
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE_MASK 0x80000000L
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE__SHIFT 0x0000001f
+#define GRPH_CONTROL__GRPH_DEPTH_MASK 0x00000003L
+#define GRPH_CONTROL__GRPH_DEPTH__SHIFT 0x00000000
+#define GRPH_CONTROL__GRPH_FORMAT_MASK 0x00000700L
+#define GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x00000008
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_MASK 0x000c0000L
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT 0x00000012
+#define GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0x0000000cL
+#define GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x00000002
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000L
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT 0x00000018
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE_MASK 0x00020000L
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x00000011
+#define GRPH_CONTROL__GRPH_TILE_SPLIT_MASK 0x0000e000L
+#define GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT 0x0000000d
+#define GRPH_CONTROL__GRPH_Z_MASK 0x00000030L
+#define GRPH_CONTROL__GRPH_Z__SHIFT 0x00000004
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES_MASK 0x00000700L
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES__SHIFT 0x00000008
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET_MASK 0x00000001L
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET__SHIFT 0x00000000
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE_MASK 0x00000070L
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE__SHIFT 0x00000004
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK_MASK 0x00000200L
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK__SHIFT 0x00000009
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG_MASK 0x00000100L
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG__SHIFT 0x00000008
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES_MASK 0x0000000fL
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES__SHIFT 0x00000000
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES_MASK 0x000000f0L
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES__SHIFT 0x00000004
+#define GRPH_ENABLE__GRPH_ENABLE_MASK 0x00000001L
+#define GRPH_ENABLE__GRPH_ENABLE__SHIFT 0x00000000
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK 0x00000001L
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN__SHIFT 0x00000000
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x00000001L
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK__SHIFT 0x00000000
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE_MASK 0x00000100L
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE__SHIFT 0x00000008
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x00000100L
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR__SHIFT 0x00000008
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x00000001L
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED__SHIFT 0x00000000
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_MASK 0x00010000L
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN__SHIFT 0x00000010
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK 0x00000100L
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN__SHIFT 0x00000008
+#define GRPH_PITCH__GRPH_PITCH_MASK 0x00007fffL
+#define GRPH_PITCH__GRPH_PITCH__SHIFT 0x00000000
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE_MASK 0x00000001L
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE__SHIFT 0x00000000
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK 0xffffff00L
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS__SHIFT 0x00000008
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE_MASK 0x00000001L
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE__SHIFT 0x00000000
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK 0xffffff00L
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS__SHIFT 0x00000008
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING_MASK 0x00010000L
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING__SHIFT 0x00000010
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING_MASK 0x00020000L
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING__SHIFT 0x00000011
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN_MASK 0x00000001L
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN__SHIFT 0x00000000
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE_MASK 0x00000300L
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE__SHIFT 0x00000008
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000L
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE__SHIFT 0x0000001c
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE_MASK 0x000000ffL
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE__SHIFT 0x00000000
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE_MASK 0xffffff00L
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE__SHIFT 0x00000008
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X_MASK 0x00003fffL
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X__SHIFT 0x00000000
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y_MASK 0x00003fffL
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y__SHIFT 0x00000000
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR_MASK 0x00000c00L
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR__SHIFT 0x0000000a
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR_MASK 0x00000300L
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT 0x00000008
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP_MASK 0x00000003L
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT 0x00000000
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR_MASK 0x000000c0L
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR__SHIFT 0x00000006
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR_MASK 0x00000030L
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT 0x00000004
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE_MASK 0x01000000L
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x00000018
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING_MASK 0x00000001L
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING__SHIFT 0x00000000
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN_MASK 0x00000002L
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN__SHIFT 0x00000001
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_MASK 0x10000000L
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x0000001c
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK 0x00000004L
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING__SHIFT 0x00000002
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN_MASK 0x00000008L
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN__SHIFT 0x00000003
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE_MASK 0x00000100L
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE__SHIFT 0x00000008
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK 0x00010000L
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK__SHIFT 0x00000010
+#define GRPH_X_END__GRPH_X_END_MASK 0x00007fffL
+#define GRPH_X_END__GRPH_X_END__SHIFT 0x00000000
+#define GRPH_X_START__GRPH_X_START_MASK 0x00003fffL
+#define GRPH_X_START__GRPH_X_START__SHIFT 0x00000000
+#define GRPH_Y_END__GRPH_Y_END_MASK 0x00007fffL
+#define GRPH_Y_END__GRPH_Y_END__SHIFT 0x00000000
+#define GRPH_Y_START__GRPH_Y_START_MASK 0x00003fffL
+#define GRPH_Y_START__GRPH_Y_START__SHIFT 0x00000000
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xfffff000L
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0x0000000c
+#define HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000fffffL
+#define HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x00000000
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xfffff000L
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0x0000000c
+#define HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000fffffL
+#define HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x00000000
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xfffff000L
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0x0000000c
+#define HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000fffffL
+#define HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x00000000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x0000001f
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0x0000000c
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x00000001
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x00000010
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x00000004
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x00000000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x00000008
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xfffff000L
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0x0000000c
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000fffffL
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x00000000
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x00000004
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001f0000L
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x00000010
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x00000008
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x0000001c
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x00000018
+#define HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x00000008
+#define HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x00000009
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x00000000
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x00000004
+#define HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x00000004
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x00000002
+#define HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x00000000
+#define HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000f00L
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0x0000000c
+#define HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x00000008
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x00000001
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_MASK 0x003f0000L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE__SHIFT 0x00000010
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x00000000
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x00000005
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_MASK 0x3f000000L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE__SHIFT 0x00000018
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x00000004
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT_MASK 0x00000002L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT__SHIFT 0x00000001
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE_MASK 0x003f0000L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE__SHIFT 0x00000010
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND_MASK 0x00000001L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND__SHIFT 0x00000000
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT_MASK 0x00000020L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT__SHIFT 0x00000005
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE_MASK 0x3f000000L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE__SHIFT 0x00000018
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND_MASK 0x00000010L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND__SHIFT 0x00000004
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x00000005
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x00000004
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK 0x00000002L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT__SHIFT 0x00000001
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x00000001L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND__SHIFT 0x00000000
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x00000009
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x00000008
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003f00L
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x00000008
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK 0x0000003fL
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT 0x00000000
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003f0000L
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x00000010
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x00000000
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x00000010
+#define HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+#define HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x0000001b
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x00000014
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x00000005
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x00000004
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x00000009
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003f0000L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x00000010
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x00000008
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x00000000
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffffL
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x00000000
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E_MASK 0xffffffffL
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E__SHIFT 0x00000000
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F_MASK 0xffffffffL
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F__SHIFT 0x00000000
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11_MASK 0x0000ffffL
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11__SHIFT 0x00000000
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12_MASK 0xffff0000L
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12__SHIFT 0x00000010
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13_MASK 0x0000ffffL
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13__SHIFT 0x00000000
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14_MASK 0xffff0000L
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14__SHIFT 0x00000010
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21_MASK 0x0000ffffL
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21__SHIFT 0x00000000
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22_MASK 0xffff0000L
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22__SHIFT 0x00000010
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23_MASK 0x0000ffffL
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23__SHIFT 0x00000000
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24_MASK 0xffff0000L
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24__SHIFT 0x00000010
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31_MASK 0x0000ffffL
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31__SHIFT 0x00000000
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32_MASK 0xffff0000L
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32__SHIFT 0x00000010
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33_MASK 0x0000ffffL
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33__SHIFT 0x00000000
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34_MASK 0xffff0000L
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34__SHIFT 0x00000010
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE_MASK 0x00000003L
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT 0x00000000
+#define INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE_MASK 0x00000030L
+#define INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT 0x00000004
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE_MASK 0x00000003L
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT 0x00000000
+#define INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE_MASK 0x00000030L
+#define INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT 0x00000004
+#define KEY_CONTROL__GRPH_OVL_HALF_BLEND_MASK 0x10000000L
+#define KEY_CONTROL__GRPH_OVL_HALF_BLEND__SHIFT 0x0000001c
+#define KEY_CONTROL__KEY_MODE_MASK 0x00000006L
+#define KEY_CONTROL__KEY_MODE__SHIFT 0x00000001
+#define KEY_CONTROL__KEY_SELECT_MASK 0x00000001L
+#define KEY_CONTROL__KEY_SELECT__SHIFT 0x00000000
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW__SHIFT 0x00000000
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW__SHIFT 0x00000000
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW__SHIFT 0x00000000
+#define KEY_RANGE_RED__KEY_RED_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_RED__KEY_RED_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_RED__KEY_RED_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_RED__KEY_RED_LOW__SHIFT 0x00000000
+#define LB_DEBUG2__LB_DEBUG2_MASK 0xffffffffL
+#define LB_DEBUG2__LB_DEBUG2__SHIFT 0x00000000
+#define LB_DEBUG__LB_DEBUG_MASK 0xffffffffL
+#define LB_DEBUG__LB_DEBUG__SHIFT 0x00000000
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT_MASK 0x00000001L
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT__SHIFT 0x00000000
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2_MASK 0x00000010L
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2__SHIFT 0x00000004
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL_MASK 0x00000003L
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL__SHIFT 0x00000000
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define LIGHT_SLEEP_CNTL__LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define LIGHT_SLEEP_CNTL__LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define LIGHT_SLEEP_CNTL__MEM_SHUTDOWN_DIS_MASK 0x00000100L
+#define LIGHT_SLEEP_CNTL__MEM_SHUTDOWN_DIS__SHIFT 0x00000008
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE_MASK 0x00000001L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE__SHIFT 0x00000000
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE_MASK 0x00000018L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE__SHIFT 0x00000003
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS_MASK 0x00000700L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS__SHIFT 0x00000008
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES_MASK 0x000000e0L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES__SHIFT 0x00000005
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE_MASK 0x00000800L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE__SHIFT 0x0000000b
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE_MASK 0x00007000L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE__SHIFT 0x0000000c
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN_MASK 0x0fff0000L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN__SHIFT 0x00000010
+#define LVDS_DATA_CNTL__LVDS_24BIT_ENABLE_MASK 0x00000001L
+#define LVDS_DATA_CNTL__LVDS_24BIT_ENABLE__SHIFT 0x00000000
+#define LVDS_DATA_CNTL__LVDS_24BIT_FORMAT_MASK 0x00000010L
+#define LVDS_DATA_CNTL__LVDS_24BIT_FORMAT__SHIFT 0x00000004
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_DE_MASK 0x00000100L
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_DE__SHIFT 0x00000008
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_HS_MASK 0x00000400L
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_HS__SHIFT 0x0000000a
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_VS_MASK 0x00000200L
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_VS__SHIFT 0x00000009
+#define LVDS_DATA_CNTL__LVDS_2ND_LINK_CNTL_BITS_MASK 0x00007000L
+#define LVDS_DATA_CNTL__LVDS_2ND_LINK_CNTL_BITS__SHIFT 0x0000000c
+#define LVDS_DATA_CNTL__LVDS_DTMG_POL_MASK 0x00040000L
+#define LVDS_DATA_CNTL__LVDS_DTMG_POL__SHIFT 0x00000012
+#define LVDS_DATA_CNTL__LVDS_FP_POL_MASK 0x00010000L
+#define LVDS_DATA_CNTL__LVDS_FP_POL__SHIFT 0x00000010
+#define LVDS_DATA_CNTL__LVDS_LP_POL_MASK 0x00020000L
+#define LVDS_DATA_CNTL__LVDS_LP_POL__SHIFT 0x00000011
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_MASK 0x01000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD_MASK 0x02000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD__SHIFT 0x00000019
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL_MASK 0x04000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL__SHIFT 0x0000001a
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON__SHIFT 0x00000018
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_MASK 0x00010000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD_MASK 0x00020000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD__SHIFT 0x00000011
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL_MASK 0x00040000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL__SHIFT 0x00000012
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN_MASK 0x00000002L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN__SHIFT 0x00000001
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN_MASK 0x00000001L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE__SHIFT 0x00000004
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_MASK 0x00000100L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD_MASK 0x00000200L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD__SHIFT 0x00000009
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL_MASK 0x00000400L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL__SHIFT 0x0000000a
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1_MASK 0x00ff0000L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2_MASK 0xff000000L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2__SHIFT 0x00000018
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1_MASK 0x000000ffL
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2_MASK 0x0000ff00L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3_MASK 0x00ff0000L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH_MASK 0x000000ffL
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3_MASK 0x0000ff00L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN__SHIFT 0x00000018
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV_MASK 0xffff0000L
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV_MASK 0x00000fffL
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON_MASK 0x00000008L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON__SHIFT 0x00000003
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON_MASK 0x00000002L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON__SHIFT 0x00000001
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE_MASK 0x00000010L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE__SHIFT 0x00000004
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE_MASK 0x00000f00L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN__SHIFT 0x00000002
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R__SHIFT 0x00000000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3__SHIFT 0x00000018
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3__SHIFT 0x00000018
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3__SHIFT 0x00000018
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3__SHIFT 0x00000018
+#define MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK_MASK 0x00000100L
+#define MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK__SHIFT 0x00000008
+#define MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK__SHIFT 0x00000000
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00030000L
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x00000010
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_MODE_MASK 0x00000007L
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_MODE__SHIFT 0x00000000
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR_MASK 0x00000010L
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR__SHIFT 0x00000004
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED_MASK 0x00000001L
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED__SHIFT 0x00000000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR_MASK 0x00100000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR__SHIFT 0x00000014
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED_MASK 0x00010000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED__SHIFT 0x00000010
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR_MASK 0x10000000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR__SHIFT 0x0000001c
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED_MASK 0x01000000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED__SHIFT 0x00000018
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR_MASK 0x00001000L
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR__SHIFT 0x0000000c
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED_MASK 0x00000100L
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED__SHIFT 0x00000008
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE_MASK 0x00000010L
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE__SHIFT 0x00000004
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL_MASK 0x00ff0000L
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL__SHIFT 0x00000010
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY_MASK 0x3f000000L
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY__SHIFT 0x00000018
+#define MCIF_CONTROL__MCIF_BUFF_SIZE_MASK 0x00000003L
+#define MCIF_CONTROL__MCIF_BUFF_SIZE__SHIFT 0x00000000
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x40000000L
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x0000001e
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x80000000L
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x0000001f
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL_MASK 0x0000f000L
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL__SHIFT 0x0000000c
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE_MASK 0x00000100L
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE__SHIFT 0x00000008
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS_MASK 0x00000001L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS__SHIFT 0x00000000
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_MASK 0x00000030L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE__SHIFT 0x00000004
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE_MASK 0x00070000L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE__SHIFT 0x00000010
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE_MASK 0x00007f00L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE__SHIFT 0x00000008
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE_MASK 0x00180000L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE__SHIFT 0x00000013
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define MCIF_VMID__MCIF_WR_VMID_MASK 0x0000000fL
+#define MCIF_VMID__MCIF_WR_VMID__SHIFT 0x00000000
+#define MCIF_VMID__VIP_WR_VMID_MASK 0x000000f0L
+#define MCIF_VMID__VIP_WR_VMID__SHIFT 0x00000004
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT_MASK 0x000000ffL
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT__SHIFT 0x00000000
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT_MASK 0x0000ff00L
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT__SHIFT 0x00000008
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x00000014
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007fL
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x00000000
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x00000011
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007f00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x00000008
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x00000010
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x00000014
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001ffffL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x00000000
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES_MASK 0x0000000fL
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES__SHIFT 0x00000000
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK_MASK 0x00001000L
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK__SHIFT 0x0000000c
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG_MASK 0x00000100L
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG__SHIFT 0x00000008
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_MASK 0x00000010L
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET__SHIFT 0x00000004
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE_MASK 0x00000003L
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE__SHIFT 0x00000000
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B_MASK 0x3ff00000L
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B__SHIFT 0x00000014
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G_MASK 0x000ffc00L
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G__SHIFT 0x0000000a
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R_MASK 0x000003ffL
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R__SHIFT 0x00000000
+#define MVP_CONTROL1__MVP_30BPP_EN_MASK 0x10000000L
+#define MVP_CONTROL1__MVP_30BPP_EN__SHIFT 0x0000001c
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE_MASK 0x00000400L
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE__SHIFT 0x0000000a
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL_MASK 0x00010000L
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL__SHIFT 0x00000010
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND_MASK 0x01000000L
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND__SHIFT 0x00000018
+#define MVP_CONTROL1__MVP_EN_MASK 0x00000001L
+#define MVP_CONTROL1__MVP_EN__SHIFT 0x00000000
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION_MASK 0x00300000L
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION__SHIFT 0x00000014
+#define MVP_CONTROL1__MVP_MIXER_MODE_MASK 0x00000070L
+#define MVP_CONTROL1__MVP_MIXER_MODE__SHIFT 0x00000004
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK_MASK 0x00000200L
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK__SHIFT 0x00000009
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_MASK 0x00000100L
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL__SHIFT 0x00000008
+#define MVP_CONTROL1__MVP_RATE_CONTROL_MASK 0x00001000L
+#define MVP_CONTROL1__MVP_RATE_CONTROL__SHIFT 0x0000000c
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A_MASK 0x40000000L
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A__SHIFT 0x0000001e
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B_MASK 0x80000000L
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B__SHIFT 0x0000001f
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX_MASK 0x00010000L
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX__SHIFT 0x00000010
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN_MASK 0x00100000L
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN__SHIFT 0x00000014
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL_MASK 0x00000100L
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL__SHIFT 0x00000008
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL_MASK 0x00001000L
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL__SHIFT 0x0000000c
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL_MASK 0x00000001L
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL__SHIFT 0x00000000
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL_MASK 0x00000010L
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL__SHIFT 0x00000004
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR_MASK 0x10000000L
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR__SHIFT 0x0000001c
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN_MASK 0x01000000L
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN__SHIFT 0x00000018
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL_MASK 0x00000010L
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL__SHIFT 0x00000004
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE_MASK 0x00000100L
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE__SHIFT 0x00000008
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN_MASK 0x00100000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN__SHIFT 0x00000014
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP_MASK 0x10000000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP__SHIFT 0x0000001c
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE_MASK 0x00001000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE__SHIFT 0x0000000c
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO_MASK 0x00010000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO__SHIFT 0x00000010
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES_MASK 0x00000001L
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES__SHIFT 0x00000000
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN_MASK 0x01000000L
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN__SHIFT 0x00000018
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK_MASK 0x000000ffL
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK__SHIFT 0x00000000
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN_MASK 0x20000000L
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN__SHIFT 0x0000001d
+#define MVP_CRC_CNTL__MVP_CRC_EN_MASK 0x10000000L
+#define MVP_CRC_CNTL__MVP_CRC_EN__SHIFT 0x0000001c
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK_MASK 0x0000ff00L
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK__SHIFT 0x00000008
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK_MASK 0x00ff0000L
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK__SHIFT 0x00000010
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL_MASK 0x40000000L
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL__SHIFT 0x0000001e
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT_MASK 0x0000ffffL
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT__SHIFT 0x00000000
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT_MASK 0xffff0000L
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT__SHIFT 0x00000010
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT_MASK 0x0000ffffL
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT__SHIFT 0x00000000
+#define MVP_DEBUG_05__IDE0_MVP_GPU_CHAIN_LOCATION_MASK 0x00000006L
+#define MVP_DEBUG_05__IDE0_MVP_GPU_CHAIN_LOCATION__SHIFT 0x00000001
+#define MVP_DEBUG_09__IDE4_CRTC2_MVP_GPU_CHAIN_LOCATION_MASK 0x00000006L
+#define MVP_DEBUG_09__IDE4_CRTC2_MVP_GPU_CHAIN_LOCATION__SHIFT 0x00000001
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H_MASK 0x00000001L
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H__SHIFT 0x00000000
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_MASK 0x01fffffeL
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A__SHIFT 0x00000001
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H_MASK 0x00000001L
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H__SHIFT 0x00000000
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_MASK 0x01fffffeL
+#define MVP_DEBUG_13__IDED_MVP_DATA_B__SHIFT 0x00000001
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B_MASK 0x04000000L
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B__SHIFT 0x0000001a
+#define MVP_DEBUG_13__IDED_START_READ_B_MASK 0x02000000L
+#define MVP_DEBUG_13__IDED_START_READ_B__SHIFT 0x00000019
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B_MASK 0x38000000L
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B__SHIFT 0x0000001b
+#define MVP_DEBUG_14__IDEE_CRC_PHASE_MASK 0x00100000L
+#define MVP_DEBUG_14__IDEE_CRC_PHASE__SHIFT 0x00000014
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A_MASK 0x00080000L
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A__SHIFT 0x00000013
+#define MVP_DEBUG_14__IDEE_READ_ADD_MASK 0x00000007L
+#define MVP_DEBUG_14__IDEE_READ_ADD__SHIFT 0x00000000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B_MASK 0x00020000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B__SHIFT 0x00000011
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_MASK 0x00010000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE__SHIFT 0x00000010
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE_MASK 0x00040000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE__SHIFT 0x00000012
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B_MASK 0x00008000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B__SHIFT 0x0000000f
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_MASK 0x00004000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE__SHIFT 0x0000000e
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A_MASK 0x00000800L
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A__SHIFT 0x0000000b
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B_MASK 0x00001000L
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B__SHIFT 0x0000000c
+#define MVP_DEBUG_14__IDEE_START_READ_B_MASK 0x00000400L
+#define MVP_DEBUG_14__IDEE_START_READ_B__SHIFT 0x0000000a
+#define MVP_DEBUG_14__IDEE_START_READ_MASK 0x00000200L
+#define MVP_DEBUG_14__IDEE_START_READ__SHIFT 0x00000009
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO_MASK 0x00002000L
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO__SHIFT 0x0000000d
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A_MASK 0x00000038L
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A__SHIFT 0x00000003
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B_MASK 0x000001c0L
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B__SHIFT 0x00000006
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA_MASK 0xfffffff0L
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA__SHIFT 0x00000004
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN_MASK 0x00000001L
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN__SHIFT 0x00000000
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT_MASK 0x00000008L
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT__SHIFT 0x00000003
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL_MASK 0x00000004L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL__SHIFT 0x00000002
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL_MASK 0x00000002L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL__SHIFT 0x00000001
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES_MASK 0x00000ff0L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES__SHIFT 0x00000004
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW_MASK 0x00001000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW__SHIFT 0x0000000c
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ_MASK 0x00000001L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ__SHIFT 0x00000000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW_MASK 0x00002000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW__SHIFT 0x0000000d
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR_MASK 0x00ff0000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR__SHIFT 0x00000010
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR_MASK 0xff000000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR__SHIFT 0x00000018
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE_MASK 0x00000002L
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE__SHIFT 0x00000001
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA_MASK 0xfffffffcL
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA__SHIFT 0x00000002
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_MASK 0x00000001L
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ__SHIFT 0x00000000
+#define MVP_DEBUG__MVP_DEBUG_BITS_MASK 0xffffff00L
+#define MVP_DEBUG__MVP_DEBUG_BITS__SHIFT 0x00000008
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP_MASK 0x00000020L
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP__SHIFT 0x00000005
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP_MASK 0x00000010L
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP__SHIFT 0x00000004
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY_MASK 0x00000080L
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY__SHIFT 0x00000007
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR_MASK 0x00000040L
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR__SHIFT 0x00000006
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN_MASK 0x00000002L
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN__SHIFT 0x00000001
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL_MASK 0x00000008L
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL__SHIFT 0x00000003
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN_MASK 0x00000001L
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN__SHIFT 0x00000000
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL_MASK 0x00000004L
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL__SHIFT 0x00000002
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT_MASK 0x00ff0000L
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT__SHIFT 0x00000010
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM_MASK 0x0000ff00L
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM__SHIFT 0x00000008
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM_MASK 0x000000ffL
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM__SHIFT 0x00000000
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS_MASK 0x80000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS__SHIFT 0x0000001f
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK_MASK 0x40000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK__SHIFT 0x0000001e
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL_MASK 0x000000ffL
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL__SHIFT 0x00000000
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK_MASK 0x00010000L
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK__SHIFT 0x00000010
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_MASK 0x00000100L
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED_MASK 0x00001000L
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED__SHIFT 0x0000000c
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW__SHIFT 0x00000008
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK_MASK 0x10000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK__SHIFT 0x0000001c
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_MASK 0x00100000L
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED_MASK 0x01000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED__SHIFT 0x00000018
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW__SHIFT 0x00000014
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE_MASK 0x40000000L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE__SHIFT 0x0000001e
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MASK 0x007fff00L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE_MASK 0x00000003L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE__SHIFT 0x00000000
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT__SHIFT 0x00000008
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET_MASK 0x3f000000L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET__SHIFT 0x00000018
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL_MASK 0x00000001L
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL__SHIFT 0x00000000
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP_MASK 0xffffff00L
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP__SHIFT 0x00000008
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN_MASK 0x00000010L
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN__SHIFT 0x00000004
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN_MASK 0x80000000L
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN__SHIFT 0x0000001f
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT_MASK 0x1fff0000L
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT__SHIFT 0x00000010
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT_MASK 0x00001fffL
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT__SHIFT 0x00000000
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_MASK 0x00001fffL
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET_MASK 0x80000000L
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET__SHIFT 0x0000001f
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT__SHIFT 0x00000000
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED_MASK 0x1fff0000L
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED__SHIFT 0x00000010
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED_MASK 0x00001fffL
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED__SHIFT 0x00000000
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11_MASK 0x0000ffffL
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11__SHIFT 0x00000000
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12_MASK 0xffff0000L
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12__SHIFT 0x00000010
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13_MASK 0x0000ffffL
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13__SHIFT 0x00000000
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14_MASK 0xffff0000L
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14__SHIFT 0x00000010
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21_MASK 0x0000ffffL
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21__SHIFT 0x00000000
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22_MASK 0xffff0000L
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22__SHIFT 0x00000010
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23_MASK 0x0000ffffL
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23__SHIFT 0x00000000
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24_MASK 0xffff0000L
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24__SHIFT 0x00000010
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31_MASK 0x0000ffffL
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31__SHIFT 0x00000000
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32_MASK 0xffff0000L
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32__SHIFT 0x00000010
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33_MASK 0x0000ffffL
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33__SHIFT 0x00000000
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34_MASK 0xffff0000L
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34__SHIFT 0x00000010
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE_MASK 0x00000007L
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT 0x00000000
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE_MASK 0x00000070L
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT 0x00000004
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE_MASK 0x0000000fL
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE__SHIFT 0x00000000
+#define OVL_CONTROL1__OVL_ADDRESS_TRANSLATION_ENABLE_MASK 0x00010000L
+#define OVL_CONTROL1__OVL_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x00000010
+#define OVL_CONTROL1__OVL_ARRAY_MODE_MASK 0x00f00000L
+#define OVL_CONTROL1__OVL_ARRAY_MODE__SHIFT 0x00000014
+#define OVL_CONTROL1__OVL_BANK_HEIGHT_MASK 0x00001800L
+#define OVL_CONTROL1__OVL_BANK_HEIGHT__SHIFT 0x0000000b
+#define OVL_CONTROL1__OVL_BANK_WIDTH_MASK 0x000000c0L
+#define OVL_CONTROL1__OVL_BANK_WIDTH__SHIFT 0x00000006
+#define OVL_CONTROL1__OVL_COLOR_EXPANSION_MODE_MASK 0x01000000L
+#define OVL_CONTROL1__OVL_COLOR_EXPANSION_MODE__SHIFT 0x00000018
+#define OVL_CONTROL1__OVL_DEPTH_MASK 0x00000003L
+#define OVL_CONTROL1__OVL_DEPTH__SHIFT 0x00000000
+#define OVL_CONTROL1__OVL_FORMAT_MASK 0x00000700L
+#define OVL_CONTROL1__OVL_FORMAT__SHIFT 0x00000008
+#define OVL_CONTROL1__OVL_MACRO_TILE_ASPECT_MASK 0x000c0000L
+#define OVL_CONTROL1__OVL_MACRO_TILE_ASPECT__SHIFT 0x00000012
+#define OVL_CONTROL1__OVL_NUM_BANKS_MASK 0x0000000cL
+#define OVL_CONTROL1__OVL_NUM_BANKS__SHIFT 0x00000002
+#define OVL_CONTROL1__OVL_PIPE_CONFIG_MASK 0x3e000000L
+#define OVL_CONTROL1__OVL_PIPE_CONFIG__SHIFT 0x00000019
+#define OVL_CONTROL1__OVL_PRIVILEGED_ACCESS_ENABLE_MASK 0x00020000L
+#define OVL_CONTROL1__OVL_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x00000011
+#define OVL_CONTROL1__OVL_TILE_SPLIT_MASK 0x0000e000L
+#define OVL_CONTROL1__OVL_TILE_SPLIT__SHIFT 0x0000000d
+#define OVL_CONTROL1__OVL_Z_MASK 0x00000030L
+#define OVL_CONTROL1__OVL_Z__SHIFT 0x00000004
+#define OVL_CONTROL2__OVL_HALF_RESOLUTION_ENABLE_MASK 0x00000001L
+#define OVL_CONTROL2__OVL_HALF_RESOLUTION_ENABLE__SHIFT 0x00000000
+#define OVL_DFQ_CONTROL__OVL_DFQ_MIN_FREE_ENTRIES_MASK 0x00000700L
+#define OVL_DFQ_CONTROL__OVL_DFQ_MIN_FREE_ENTRIES__SHIFT 0x00000008
+#define OVL_DFQ_CONTROL__OVL_DFQ_RESET_MASK 0x00000001L
+#define OVL_DFQ_CONTROL__OVL_DFQ_RESET__SHIFT 0x00000000
+#define OVL_DFQ_CONTROL__OVL_DFQ_SIZE_MASK 0x00000070L
+#define OVL_DFQ_CONTROL__OVL_DFQ_SIZE__SHIFT 0x00000004
+#define OVL_DFQ_STATUS__OVL_DFQ_NUM_ENTRIES_MASK 0x0000000fL
+#define OVL_DFQ_STATUS__OVL_DFQ_NUM_ENTRIES__SHIFT 0x00000000
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_ACK_MASK 0x00000200L
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_ACK__SHIFT 0x00000009
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_FLAG_MASK 0x00000100L
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_FLAG__SHIFT 0x00000008
+#define OVL_DFQ_STATUS__OVL_SECONDARY_DFQ_NUM_ENTRIES_MASK 0x000000f0L
+#define OVL_DFQ_STATUS__OVL_SECONDARY_DFQ_NUM_ENTRIES__SHIFT 0x00000004
+#define OVL_ENABLE__OVL_ENABLE_MASK 0x00000001L
+#define OVL_ENABLE__OVL_ENABLE__SHIFT 0x00000000
+#define OVL_ENABLE__OVLSCL_EN_MASK 0x00000100L
+#define OVL_ENABLE__OVLSCL_EN__SHIFT 0x00000008
+#define OVL_END__OVL_X_END_MASK 0x7fff0000L
+#define OVL_END__OVL_X_END__SHIFT 0x00000010
+#define OVL_END__OVL_Y_END_MASK 0x00007fffL
+#define OVL_END__OVL_Y_END__SHIFT 0x00000000
+#define OVL_PITCH__OVL_PITCH_MASK 0x00007fffL
+#define OVL_PITCH__OVL_PITCH__SHIFT 0x00000000
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_BCB_MASK 0x000003ffL
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_BCB__SHIFT 0x00000000
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_GY_MASK 0x000ffc00L
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_GY__SHIFT 0x0000000a
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_RCR_MASK 0x3ff00000L
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_RCR__SHIFT 0x00000014
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_EDGE_PIXEL_SEL_MASK 0x80000000L
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_EDGE_PIXEL_SEL__SHIFT 0x0000001f
+#define OVL_SECONDARY_SURFACE_ADDRESS_HIGH__OVL_SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define OVL_SECONDARY_SURFACE_ADDRESS_HIGH__OVL_SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_DFQ_ENABLE_MASK 0x00000001L
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_DFQ_ENABLE__SHIFT 0x00000000
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_SURFACE_ADDRESS_MASK 0xffffff00L
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_SURFACE_ADDRESS__SHIFT 0x00000008
+#define OVL_START__OVL_X_START_MASK 0x3fff0000L
+#define OVL_START__OVL_X_START__SHIFT 0x00000010
+#define OVL_START__OVL_Y_START_MASK 0x00003fffL
+#define OVL_START__OVL_Y_START__SHIFT 0x00000000
+#define OVL_STEREOSYNC_FLIP__OVL_PRIMARY_SURFACE_PENDING_MASK 0x00010000L
+#define OVL_STEREOSYNC_FLIP__OVL_PRIMARY_SURFACE_PENDING__SHIFT 0x00000010
+#define OVL_STEREOSYNC_FLIP__OVL_SECONDARY_SURFACE_PENDING_MASK 0x00020000L
+#define OVL_STEREOSYNC_FLIP__OVL_SECONDARY_SURFACE_PENDING__SHIFT 0x00000011
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_EN_MASK 0x00000001L
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_EN__SHIFT 0x00000000
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_MODE_MASK 0x00000300L
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_MODE__SHIFT 0x00000008
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000L
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_SELECT_DISABLE__SHIFT 0x0000001c
+#define OVL_SURFACE_ADDRESS_HIGH_INUSE__OVL_SURFACE_ADDRESS_HIGH_INUSE_MASK 0x000000ffL
+#define OVL_SURFACE_ADDRESS_HIGH_INUSE__OVL_SURFACE_ADDRESS_HIGH_INUSE__SHIFT 0x00000000
+#define OVL_SURFACE_ADDRESS_HIGH__OVL_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define OVL_SURFACE_ADDRESS_HIGH__OVL_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define OVL_SURFACE_ADDRESS_INUSE__OVL_SURFACE_ADDRESS_INUSE_MASK 0xffffff00L
+#define OVL_SURFACE_ADDRESS_INUSE__OVL_SURFACE_ADDRESS_INUSE__SHIFT 0x00000008
+#define OVL_SURFACE_ADDRESS__OVL_DFQ_ENABLE_MASK 0x00000001L
+#define OVL_SURFACE_ADDRESS__OVL_DFQ_ENABLE__SHIFT 0x00000000
+#define OVL_SURFACE_ADDRESS__OVL_SURFACE_ADDRESS_MASK 0xffffff00L
+#define OVL_SURFACE_ADDRESS__OVL_SURFACE_ADDRESS__SHIFT 0x00000008
+#define OVL_SURFACE_OFFSET_X__OVL_SURFACE_OFFSET_X_MASK 0x00003fffL
+#define OVL_SURFACE_OFFSET_X__OVL_SURFACE_OFFSET_X__SHIFT 0x00000000
+#define OVL_SURFACE_OFFSET_Y__OVL_SURFACE_OFFSET_Y_MASK 0x00003fffL
+#define OVL_SURFACE_OFFSET_Y__OVL_SURFACE_OFFSET_Y__SHIFT 0x00000000
+#define OVL_SWAP_CNTL__OVL_ALPHA_CROSSBAR_MASK 0x00000c00L
+#define OVL_SWAP_CNTL__OVL_ALPHA_CROSSBAR__SHIFT 0x0000000a
+#define OVL_SWAP_CNTL__OVL_BLUE_CROSSBAR_MASK 0x00000300L
+#define OVL_SWAP_CNTL__OVL_BLUE_CROSSBAR__SHIFT 0x00000008
+#define OVL_SWAP_CNTL__OVL_ENDIAN_SWAP_MASK 0x00000003L
+#define OVL_SWAP_CNTL__OVL_ENDIAN_SWAP__SHIFT 0x00000000
+#define OVL_SWAP_CNTL__OVL_GREEN_CROSSBAR_MASK 0x000000c0L
+#define OVL_SWAP_CNTL__OVL_GREEN_CROSSBAR__SHIFT 0x00000006
+#define OVL_SWAP_CNTL__OVL_RED_CROSSBAR_MASK 0x00000030L
+#define OVL_SWAP_CNTL__OVL_RED_CROSSBAR__SHIFT 0x00000004
+#define OVL_UPDATE__OVL_DISABLE_MULTIPLE_UPDATE_MASK 0x01000000L
+#define OVL_UPDATE__OVL_DISABLE_MULTIPLE_UPDATE__SHIFT 0x00000018
+#define OVL_UPDATE__OVL_UPDATE_LOCK_MASK 0x00010000L
+#define OVL_UPDATE__OVL_UPDATE_LOCK__SHIFT 0x00000010
+#define OVL_UPDATE__OVL_UPDATE_PENDING_MASK 0x00000001L
+#define OVL_UPDATE__OVL_UPDATE_PENDING__SHIFT 0x00000000
+#define OVL_UPDATE__OVL_UPDATE_TAKEN_MASK 0x00000002L
+#define OVL_UPDATE__OVL_UPDATE_TAKEN__SHIFT 0x00000001
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x00010000L
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x00000010
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN_MASK 0x00001000L
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN__SHIFT 0x0000000c
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00004000L
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x0000000e
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON_MASK 0x00000001L
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE_MASK 0x00000001L
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE__SHIFT 0x00000000
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON_MASK 0x00000001L
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE_MASK 0x00000001L
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE__SHIFT 0x00000000
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON_MASK 0x00000001L
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE_MASK 0x00000001L
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE__SHIFT 0x00000000
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON_MASK 0x00000001L
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE_MASK 0x00000001L
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE__SHIFT 0x00000000
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON_MASK 0x00000001L
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE_MASK 0x00000001L
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE__SHIFT 0x00000000
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON_MASK 0x00000001L
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE_MASK 0x00000001L
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE__SHIFT 0x00000000
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0_MASK 0x00000030L
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0__SHIFT 0x00000004
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE_MASK 0x00000001L
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE__SHIFT 0x00000000
+#define PIXCLK1_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL1_MASK 0x00000030L
+#define PIXCLK1_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL1__SHIFT 0x00000004
+#define PIXCLK1_RESYNC_CNTL__PIXCLK1_RESYNC_ENABLE_MASK 0x00000001L
+#define PIXCLK1_RESYNC_CNTL__PIXCLK1_RESYNC_ENABLE__SHIFT 0x00000000
+#define PIXCLK2_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL2_MASK 0x00000030L
+#define PIXCLK2_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL2__SHIFT 0x00000004
+#define PIXCLK2_RESYNC_CNTL__PIXCLK2_RESYNC_ENABLE_MASK 0x00000001L
+#define PIXCLK2_RESYNC_CNTL__PIXCLK2_RESYNC_ENABLE__SHIFT 0x00000000
+#define PLL_ANALOG__PLL_CAL_MODE_MASK 0x0000001fL
+#define PLL_ANALOG__PLL_CAL_MODE__SHIFT 0x00000000
+#define PLL_ANALOG__PLL_CP_MASK 0x00000f00L
+#define PLL_ANALOG__PLL_CP__SHIFT 0x00000008
+#define PLL_ANALOG__PLL_IBIAS_MASK 0xff000000L
+#define PLL_ANALOG__PLL_IBIAS__SHIFT 0x00000018
+#define PLL_ANALOG__PLL_LF_MODE_MASK 0x001ff000L
+#define PLL_ANALOG__PLL_LF_MODE__SHIFT 0x0000000c
+#define PLL_ANALOG__PLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define PLL_ANALOG__PLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define PLL_CNTL__PLL_ANTIGLITCH_RESETB_MASK 0x00000080L
+#define PLL_CNTL__PLL_ANTIGLITCH_RESETB__SHIFT 0x00000007
+#define PLL_CNTL__PLL_ANTI_GLITCH_RESET_MASK 0x00002000L
+#define PLL_CNTL__PLL_ANTI_GLITCH_RESET__SHIFT 0x0000000d
+#define PLL_CNTL__PLL_BYPASS_CAL_MASK 0x00000004L
+#define PLL_CNTL__PLL_BYPASS_CAL__SHIFT 0x00000002
+#define PLL_CNTL__PLL_CAL_BYPASS_REFDIV_MASK 0x00000400L
+#define PLL_CNTL__PLL_CAL_BYPASS_REFDIV__SHIFT 0x0000000a
+#define PLL_CNTL__PLL_CALIB_DONE_MASK 0x00100000L
+#define PLL_CNTL__PLL_CALIB_DONE__SHIFT 0x00000014
+#define PLL_CNTL__PLL_CALREF_MASK 0x00000300L
+#define PLL_CNTL__PLL_CALREF__SHIFT 0x00000008
+#define PLL_CNTL__PLL_DIG_SPARE_MASK 0xfc000000L
+#define PLL_CNTL__PLL_DIG_SPARE__SHIFT 0x0000001a
+#define PLL_CNTL__PLL_LOCKED_MASK 0x00200000L
+#define PLL_CNTL__PLL_LOCKED__SHIFT 0x00000015
+#define PLL_CNTL__PLL_LOCK_FREQ_SEL_MASK 0x00080000L
+#define PLL_CNTL__PLL_LOCK_FREQ_SEL__SHIFT 0x00000013
+#define PLL_CNTL__PLL_PCIE_REFCLK_SEL_MASK 0x00000040L
+#define PLL_CNTL__PLL_PCIE_REFCLK_SEL__SHIFT 0x00000006
+#define PLL_CNTL__PLL_POST_DIV_SRC_MASK 0x00000008L
+#define PLL_CNTL__PLL_POST_DIV_SRC__SHIFT 0x00000003
+#define PLL_CNTL__PLL_POWER_DOWN_MASK 0x00000002L
+#define PLL_CNTL__PLL_POWER_DOWN__SHIFT 0x00000001
+#define PLL_CNTL__PLL_REFCLK_SEL_MASK 0x00001800L
+#define PLL_CNTL__PLL_REFCLK_SEL__SHIFT 0x0000000b
+#define PLL_CNTL__PLL_REF_DIV_SRC_MASK 0x00070000L
+#define PLL_CNTL__PLL_REF_DIV_SRC__SHIFT 0x00000010
+#define PLL_CNTL__PLL_RESET_MASK 0x00000001L
+#define PLL_CNTL__PLL_RESET__SHIFT 0x00000000
+#define PLL_CNTL__PLL_TIMING_MODE_STATUS_MASK 0x03000000L
+#define PLL_CNTL__PLL_TIMING_MODE_STATUS__SHIFT 0x00000018
+#define PLL_CNTL__PLL_VCOREF_MASK 0x00000030L
+#define PLL_CNTL__PLL_VCOREF__SHIFT 0x00000004
+#define PLL_DEBUG_CNTL__PLL_DEBUG_CLK_SEL_MASK 0x00000f00L
+#define PLL_DEBUG_CNTL__PLL_DEBUG_CLK_SEL__SHIFT 0x00000008
+#define PLL_DEBUG_CNTL__PLL_DEBUG_MUXOUT_SEL_MASK 0x000000f0L
+#define PLL_DEBUG_CNTL__PLL_DEBUG_MUXOUT_SEL__SHIFT 0x00000004
+#define PLL_DEBUG_CNTL__PLL_DEBUG_SIGNALS_ENABLE_MASK 0x00000001L
+#define PLL_DEBUG_CNTL__PLL_DEBUG_SIGNALS_ENABLE__SHIFT 0x00000000
+#define PLL_DISPCLK_CURRENT_DTO_PHASE__PLL_DISPCLK_CURRENT_DTO_PHASE_MASK 0x000001ffL
+#define PLL_DISPCLK_CURRENT_DTO_PHASE__PLL_DISPCLK_CURRENT_DTO_PHASE__SHIFT 0x00000000
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_COMPL_DELAY_MASK 0xff000000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_COMPL_DELAY__SHIFT 0x00000018
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_DIS_MASK 0x00010000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_DIS__SHIFT 0x00000010
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_PHASE_MASK 0x000001ffL
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_PHASE__SHIFT 0x00000000
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_ACK_MASK 0x00400000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_ACK__SHIFT 0x00000016
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_MODE_MASK 0x00060000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_MODE__SHIFT 0x00000011
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_PENDING_MASK 0x00100000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_PENDING__SHIFT 0x00000014
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_REQ_MASK 0x00200000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_REQ__SHIFT 0x00000015
+#define PLL_DS_CNTL__PLL_DS_FRAC_MASK 0x0000ffffL
+#define PLL_DS_CNTL__PLL_DS_FRAC__SHIFT 0x00000000
+#define PLL_DS_CNTL__PLL_DS_MODE_MASK 0x00040000L
+#define PLL_DS_CNTL__PLL_DS_MODE__SHIFT 0x00000012
+#define PLL_DS_CNTL__PLL_DS_ORDER_MASK 0x00030000L
+#define PLL_DS_CNTL__PLL_DS_ORDER__SHIFT 0x00000010
+#define PLL_DS_CNTL__PLL_DS_PRBS_EN_MASK 0x00080000L
+#define PLL_DS_CNTL__PLL_DS_PRBS_EN__SHIFT 0x00000013
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define PLL_FB_DIV__PLL_FB_DIV_MASK 0x0fff0000L
+#define PLL_FB_DIV__PLL_FB_DIV__SHIFT 0x00000010
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_MASK 0x000f0000L
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_RESET_MASK 0x00000100L
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_RESET__SHIFT 0x00000008
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_SELECT_MASK 0x00001000L
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_SELECT__SHIFT 0x0000000c
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV__SHIFT 0x00000010
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_DIFF_EN_MASK 0x00000002L
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_DIFF_EN__SHIFT 0x00000001
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_EN_MASK 0x00000001L
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_EN__SHIFT 0x00000000
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_DIFF_EN_MASK 0x00000008L
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_DIFF_EN__SHIFT 0x00000003
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_EN_MASK 0x00000004L
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_EN__SHIFT 0x00000002
+#define PLL_IDCLK_CNTL__PLL_UNIPHY_IDCLK_DIFF_EN_MASK 0x00000010L
+#define PLL_IDCLK_CNTL__PLL_UNIPHY_IDCLK_DIFF_EN__SHIFT 0x00000004
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DISPCLK_MASK 0x00000080L
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DISPCLK__SHIFT 0x00000007
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DPREFCLK_MASK 0x00008000L
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DPREFCLK__SHIFT 0x0000000f
+#define PLL_POST_DIV__PLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define PLL_POST_DIV__PLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define PLL_POST_DIV__PLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define PLL_POST_DIV__PLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define PLL_POST_DIV__PLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define PLL_POST_DIV__PLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define PLL_REF_DIV__PLL_CALIBRATION_REF_DIV_MASK 0x0000f000L
+#define PLL_REF_DIV__PLL_CALIBRATION_REF_DIV__SHIFT 0x0000000c
+#define PLL_REF_DIV__PLL_REF_DIV_MASK 0x000003ffL
+#define PLL_REF_DIV__PLL_REF_DIV__SHIFT 0x00000000
+#define PLL_SS_AMOUNT_DSFRAC__PLL_SS_AMOUNT_DSFRAC_MASK 0x0000ffffL
+#define PLL_SS_AMOUNT_DSFRAC__PLL_SS_AMOUNT_DSFRAC__SHIFT 0x00000000
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_FBDIV_MASK 0x000000ffL
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_FBDIV__SHIFT 0x00000000
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_NFRAC_SLIP_MASK 0x00000f00L
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_NFRAC_SLIP__SHIFT 0x00000008
+#define PLL_SS_CNTL__PLL_SS_EN_MASK 0x00001000L
+#define PLL_SS_CNTL__PLL_SS_EN__SHIFT 0x0000000c
+#define PLL_SS_CNTL__PLL_SS_MODE_MASK 0x00002000L
+#define PLL_SS_CNTL__PLL_SS_MODE__SHIFT 0x0000000d
+#define PLL_SS_CNTL__PLL_SS_STEP_SIZE_DSFRAC_MASK 0xffff0000L
+#define PLL_SS_CNTL__PLL_SS_STEP_SIZE_DSFRAC__SHIFT 0x00000010
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_COUNT_MASK 0x00000070L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_COUNT__SHIFT 0x00000004
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DETECT_ENABLE_MASK 0x00000001L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DETECT_ENABLE__SHIFT 0x00000000
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_RES100_SELECT_MASK 0x00000002L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_RES100_SELECT__SHIFT 0x00000001
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_CLEAR_MASK 0x00000008L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_CLEAR__SHIFT 0x00000003
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_STATUS_MASK 0x00000004L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_STATUS__SHIFT 0x00000002
+#define PLL_UPDATE_CNTL__PLL_AUTO_RESET_DISABLE_MASK 0x00010000L
+#define PLL_UPDATE_CNTL__PLL_AUTO_RESET_DISABLE__SHIFT 0x00000010
+#define PLL_UPDATE_CNTL__PLL_UPDATE_PENDING_MASK 0x00000001L
+#define PLL_UPDATE_CNTL__PLL_UPDATE_PENDING__SHIFT 0x00000000
+#define PLL_UPDATE_CNTL__PLL_UPDATE_POINT_MASK 0x00000100L
+#define PLL_UPDATE_CNTL__PLL_UPDATE_POINT__SHIFT 0x00000008
+#define PLL_UPDATE_LOCK__PLL_UPDATE_LOCK_MASK 0x00000001L
+#define PLL_UPDATE_LOCK__PLL_UPDATE_LOCK__SHIFT 0x00000000
+#define PLL_VREG_CNTL__PLL_VREF_SEL_MASK 0x04000000L
+#define PLL_VREG_CNTL__PLL_VREF_SEL__SHIFT 0x0000001a
+#define PLL_VREG_CNTL__PLL_VREG_BIAS_MASK 0xf0000000L
+#define PLL_VREG_CNTL__PLL_VREG_BIAS__SHIFT 0x0000001c
+#define PLL_VREG_CNTL__PLL_VREG_CNTL_MASK 0x000fffffL
+#define PLL_VREG_CNTL__PLL_VREG_CNTL__SHIFT 0x00000000
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN_MASK 0x00000008L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN__SHIFT 0x00000003
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK 0x00000010L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS__SHIFT 0x00000004
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN_MASK 0x00000004L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN__SHIFT 0x00000002
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN_MASK 0x00000002L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN__SHIFT 0x00000001
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT_MASK 0x00000001L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT__SHIFT 0x00000000
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK 0x00000010L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS__SHIFT 0x00000004
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CB_SIGN_MASK 0x00000002L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CB_SIGN__SHIFT 0x00000001
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CR_SIGN_MASK 0x00000008L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CR_SIGN__SHIFT 0x00000003
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_SELECT_MASK 0x00000001L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_SELECT__SHIFT 0x00000000
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_Y_SIGN_MASK 0x00000004L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_Y_SIGN__SHIFT 0x00000002
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B_MASK 0x0000ffffL
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B__SHIFT 0x00000000
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B_MASK 0xffff0000L
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B__SHIFT 0x00000010
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G_MASK 0x0000ffffL
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G__SHIFT 0x00000000
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G_MASK 0xffff0000L
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G__SHIFT 0x00000010
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R_MASK 0x0000ffffL
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R__SHIFT 0x00000000
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R_MASK 0xffff0000L
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R__SHIFT 0x00000010
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_BIAS_CB_MASK 0x0000ffffL
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_BIAS_CB__SHIFT 0x00000000
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_SCALE_CB_MASK 0xffff0000L
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_SCALE_CB__SHIFT 0x00000010
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_BIAS_CR_MASK 0x0000ffffL
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_BIAS_CR__SHIFT 0x00000000
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_SCALE_CR_MASK 0xffff0000L
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_SCALE_CR__SHIFT 0x00000010
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_BIAS_Y_MASK 0x0000ffffL
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_BIAS_Y__SHIFT 0x00000000
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_SCALE_Y_MASK 0xffff0000L
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_SCALE_Y__SHIFT 0x00000010
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END_MASK 0x0000ffffL
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END__SHIFT 0x00000000
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE_MASK 0xffff0000L
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE__SHIFT 0x00000010
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE_MASK 0x0000ffffL
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE_MASK 0x0003ffffL
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_MASK 0x0003ffffL
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT_MASK 0x07f00000L
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT__SHIFT 0x00000014
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START__SHIFT 0x00000000
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END_MASK 0x0000ffffL
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END__SHIFT 0x00000000
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE_MASK 0xffff0000L
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE__SHIFT 0x00000010
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE_MASK 0x0000ffffL
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE_MASK 0x0003ffffL
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_MASK 0x0003ffffL
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT_MASK 0x07f00000L
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT__SHIFT 0x00000014
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START__SHIFT 0x00000000
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE_MASK 0x00000007L
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT 0x00000000
+#define REGAMMA_CONTROL__OVL_REGAMMA_MODE_MASK 0x00000070L
+#define REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT 0x00000004
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA_MASK 0x0007ffffL
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA__SHIFT 0x00000000
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX_MASK 0x000001ffL
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE__SHIFT 0x00000000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK_MASK 0x00000100L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK__SHIFT 0x00000008
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG_MASK 0x00000001L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG__SHIFT 0x00000000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS_MASK 0x00010000L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS__SHIFT 0x00000010
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK_MASK 0x00001000L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK__SHIFT 0x0000000c
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE_MASK 0x00030000L
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE__SHIFT 0x00000010
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE_MASK 0x00000f00L
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE__SHIFT 0x00000008
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX_MASK 0x0000000fL
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX__SHIFT 0x00000000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN__SHIFT 0x0000000f
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_MASK 0x00003fffL
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF__SHIFT 0x00000000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN__SHIFT 0x0000001f
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_MASK 0x3fff0000L
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF__SHIFT 0x00000010
+#define SCL_DEBUG2__SCL_DEBUG2_MASK 0xffffffffL
+#define SCL_DEBUG2__SCL_DEBUG2__SHIFT 0x00000000
+#define SCL_DEBUG__SCL_DEBUG_MASK 0xffffffffL
+#define SCL_DEBUG__SCL_DEBUG__SHIFT 0x00000000
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN_MASK 0x00000010L
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN__SHIFT 0x00000004
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR_MASK 0x00000007L
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR__SHIFT 0x00000000
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN_MASK 0x00001000L
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN__SHIFT 0x0000000c
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR_MASK 0x00000700L
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR__SHIFT 0x00000008
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST_MASK 0x00000001L
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST__SHIFT 0x00000000
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x03ffffffL
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x00000000
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY_MASK 0x00000ff0L
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY__SHIFT 0x00000004
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY_MASK 0x0000000fL
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY__SHIFT 0x00000000
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000f00L
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x00000008
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000fL
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO_MASK 0x0fffff80L
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO__SHIFT 0x00000007
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK_MASK 0x00000010L
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK__SHIFT 0x00000004
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_MASK 0x00000001L
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO_MASK 0x001fffffL
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT_MASK 0x00003fffL
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH_MASK 0x3fff0000L
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH__SHIFT 0x00000010
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK_MASK 0x00000001L
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK__SHIFT 0x00000000
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define SCL_UPDATE__SCL_UPDATE_LOCK_MASK 0x00010000L
+#define SCL_UPDATE__SCL_UPDATE_LOCK__SHIFT 0x00000010
+#define SCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+#define SCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x00000000
+#define SCL_UPDATE__SCL_UPDATE_TAKEN_MASK 0x00000100L
+#define SCL_UPDATE__SCL_UPDATE_TAKEN__SHIFT 0x00000008
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST_MASK 0x00000001L
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST__SHIFT 0x00000000
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x0000ffffL
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x00000000
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x00070000L
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x00000010
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x0000ffffL
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x00000000
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x00070000L
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x00000010
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x03ffffffL
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x00000000
+#define SEQ00__SEQ_RST0B_MASK 0x00000001L
+#define SEQ00__SEQ_RST0B__SHIFT 0x00000000
+#define SEQ00__SEQ_RST1B_MASK 0x00000002L
+#define SEQ00__SEQ_RST1B__SHIFT 0x00000001
+#define SEQ01__SEQ_DOT8_MASK 0x00000001L
+#define SEQ01__SEQ_DOT8__SHIFT 0x00000000
+#define SEQ01__SEQ_MAXBW_MASK 0x00000020L
+#define SEQ01__SEQ_MAXBW__SHIFT 0x00000005
+#define SEQ01__SEQ_PCLKBY2_MASK 0x00000008L
+#define SEQ01__SEQ_PCLKBY2__SHIFT 0x00000003
+#define SEQ01__SEQ_SHIFT2_MASK 0x00000004L
+#define SEQ01__SEQ_SHIFT2__SHIFT 0x00000002
+#define SEQ01__SEQ_SHIFT4_MASK 0x00000010L
+#define SEQ01__SEQ_SHIFT4__SHIFT 0x00000004
+#define SEQ02__SEQ_MAP0_EN_MASK 0x00000001L
+#define SEQ02__SEQ_MAP0_EN__SHIFT 0x00000000
+#define SEQ02__SEQ_MAP1_EN_MASK 0x00000002L
+#define SEQ02__SEQ_MAP1_EN__SHIFT 0x00000001
+#define SEQ02__SEQ_MAP2_EN_MASK 0x00000004L
+#define SEQ02__SEQ_MAP2_EN__SHIFT 0x00000002
+#define SEQ02__SEQ_MAP3_EN_MASK 0x00000008L
+#define SEQ02__SEQ_MAP3_EN__SHIFT 0x00000003
+#define SEQ03__SEQ_FONT_A0_MASK 0x00000020L
+#define SEQ03__SEQ_FONT_A0__SHIFT 0x00000005
+#define SEQ03__SEQ_FONT_A1_MASK 0x00000004L
+#define SEQ03__SEQ_FONT_A1__SHIFT 0x00000002
+#define SEQ03__SEQ_FONT_A2_MASK 0x00000008L
+#define SEQ03__SEQ_FONT_A2__SHIFT 0x00000003
+#define SEQ03__SEQ_FONT_B0_MASK 0x00000010L
+#define SEQ03__SEQ_FONT_B0__SHIFT 0x00000004
+#define SEQ03__SEQ_FONT_B1_MASK 0x00000001L
+#define SEQ03__SEQ_FONT_B1__SHIFT 0x00000000
+#define SEQ03__SEQ_FONT_B2_MASK 0x00000002L
+#define SEQ03__SEQ_FONT_B2__SHIFT 0x00000001
+#define SEQ04__SEQ_256K_MASK 0x00000002L
+#define SEQ04__SEQ_256K__SHIFT 0x00000001
+#define SEQ04__SEQ_CHAIN_MASK 0x00000008L
+#define SEQ04__SEQ_CHAIN__SHIFT 0x00000003
+#define SEQ04__SEQ_ODDEVEN_MASK 0x00000004L
+#define SEQ04__SEQ_ODDEVEN__SHIFT 0x00000002
+#define SEQ8_DATA__SEQ_DATA_MASK 0x000000ffL
+#define SEQ8_DATA__SEQ_DATA__SHIFT 0x00000000
+#define SEQ8_IDX__SEQ_IDX_MASK 0x00000007L
+#define SEQ8_IDX__SEQ_IDX__SHIFT 0x00000000
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x00000000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3__SHIFT 0x00000018
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS_MASK 0x00000100L
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS__SHIFT 0x00000008
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT_MASK 0x00000001L
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3__SHIFT 0x00000018
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3__SHIFT 0x00000018
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3__SHIFT 0x00000018
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC__SHIFT 0x00000008
+#define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+#define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+#define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+#define TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+#define TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x00000000
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x00000008
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x00000000
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x00000000
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x00000001
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x00000002
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x00000003
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x0000001f
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x00000004
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x00000007
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x00000008
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000fL
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x00000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0x0000000b
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0x0000000c
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0x0000000a
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x00000014
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x00000017
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x00000018
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000f0000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x00000010
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x0000001b
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x0000001c
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x0000001a
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x00000004
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x00000007
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x00000008
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000fL
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x00000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0x0000000b
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0x0000000c
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0x0000000a
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x00000014
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x00000017
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x00000018
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000f0000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x00000010
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x0000001b
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x0000001c
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x0000001a
+#define TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x00000000
+#define TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x00000008
+#define TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x00000010
+#define TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+#define TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x00000018
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x00000000
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x00000018
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x00000008
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000f0000L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x00000010
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x00000004
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN_MASK 0x02000000L
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN__SHIFT 0x00000019
+#define TMDS_DEBUG__TMDS_DEBUG_DE_MASK 0x01000000L
+#define TMDS_DEBUG__TMDS_DEBUG_DE__SHIFT 0x00000018
+#define TMDS_DEBUG__TMDS_DEBUG_EN_MASK 0x00000001L
+#define TMDS_DEBUG__TMDS_DEBUG_EN__SHIFT 0x00000000
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN_MASK 0x00000200L
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN__SHIFT 0x00000009
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_MASK 0x00000100L
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC__SHIFT 0x00000008
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN_MASK 0x00020000L
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN__SHIFT 0x00000011
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_MASK 0x00010000L
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC__SHIFT 0x00000010
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x00000000
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003ffL
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x00000000
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03ff0000L
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x00000010
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003ffL
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x00000000
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03ff0000L
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x00000010
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_STATIC_TEST_PATTERN_MASK 0x000003ffL
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_STATIC_TEST_PATTERN__SHIFT 0x00000000
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_EN_MASK 0x00010000L
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_EN__SHIFT 0x00000010
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_SEL_MASK 0x000e0000L
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_SEL__SHIFT 0x00000011
+#define UNIPHYAB_TPG_SEED__UNIPHYAB_TPG_SEED_MASK 0x007fffffL
+#define UNIPHYAB_TPG_SEED__UNIPHYAB_TPG_SEED__SHIFT 0x00000000
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_ERROR_MASK 0x001f0000L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_ERROR__SHIFT 0x00000010
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_RESET_MASK 0x00000002L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_RESET__SHIFT 0x00000001
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_PRESETB_MASK 0x01000000L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_PRESETB__SHIFT 0x00000018
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_RX_BIAS_MASK 0x00000f00L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_RX_BIAS__SHIFT 0x00000008
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_TEST_RX_EN_MASK 0x00000001L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_TEST_RX_EN__SHIFT 0x00000000
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_STATIC_TEST_PATTERN_MASK 0x000003ffL
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_STATIC_TEST_PATTERN__SHIFT 0x00000000
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_EN_MASK 0x00010000L
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_EN__SHIFT 0x00000010
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_SEL_MASK 0x000e0000L
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_SEL__SHIFT 0x00000011
+#define UNIPHYCD_TPG_SEED__UNIPHYCD_TPG_SEED_MASK 0x007fffffL
+#define UNIPHYCD_TPG_SEED__UNIPHYCD_TPG_SEED__SHIFT 0x00000000
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x00000000
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x00000008
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x00000010
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x00000018
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_ERROR_MASK 0x00000040L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_ERROR__SHIFT 0x00000006
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_LEVEL_MASK 0x00000030L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_LEVEL__SHIFT 0x00000004
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYNSEL_MASK 0x00000001L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYNSEL__SHIFT 0x00000000
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DUAL_LINK_PHASE_MASK 0x00010000L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DUAL_LINK_PHASE__SHIFT 0x00000010
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_LINK_ENABLE_MASK 0x00001000L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_LINK_ENABLE__SHIFT 0x0000000c
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_SOURCE_SELECT_MASK 0x00000100L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_SOURCE_SELECT__SHIFT 0x00000008
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_STATIC_TEST_PATTERN_MASK 0x000003ffL
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_STATIC_TEST_PATTERN__SHIFT 0x00000000
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_EN_MASK 0x00010000L
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_EN__SHIFT 0x00000010
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_SEL_MASK 0x000e0000L
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_SEL__SHIFT 0x00000011
+#define UNIPHYEF_TPG_SEED__UNIPHYEF_TPG_SEED_MASK 0x007fffffL
+#define UNIPHYEF_TPG_SEED__UNIPHYEF_TPG_SEED__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD_MASK 0xffffffffL
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA_MASK 0x00007fffL
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB_MASK 0x7fff0000L
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC_MASK 0x00007fffL
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD_MASK 0x7fff0000L
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE_MASK 0x00007fffL
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF_MASK 0x7fff0000L
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF__SHIFT 0x00000010
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0x0000000c
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0x0000000d
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0x0000000e
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0x0000000f
+#define UNIPHY_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHY_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x00000014
+#define UNIPHY_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x00030000L
+#define UNIPHY_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x00000010
+#define UNIPHY_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHY_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x00000008
+#define UNIPHY_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHY_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x00000000
+#define UNIPHY_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHY_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x00000004
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_BW_CNTL_MASK 0x00ff0000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_BW_CNTL__SHIFT 0x00000010
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLK_EN_MASK 0x00000008L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLK_EN__SHIFT 0x00000003
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLKPH_EN_MASK 0x000000f0L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLKPH_EN__SHIFT 0x00000004
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_ENABLE_MASK 0x00000001L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_ENABLE__SHIFT 0x00000000
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_EXT_RESET_EN_MASK 0x00000004L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_EXT_RESET_EN__SHIFT 0x00000002
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_LF_CNTL_MASK 0x00007f00L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_LF_CNTL__SHIFT 0x00000008
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_RESET_MASK 0x00000002L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_RESET__SHIFT 0x00000001
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_EN_MASK 0x02000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_EN__SHIFT 0x00000019
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_SRC_MASK 0x01000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_SRC__SHIFT 0x00000018
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_VCTL_ADC_EN_MASK 0x04000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_VCTL_ADC_EN__SHIFT 0x0000001a
+#define UNIPHY_PLL_CONTROL1__UNIPHY_VCO_MODE_MASK 0x30000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_VCO_MODE__SHIFT 0x0000001c
+#define UNIPHY_PLL_CONTROL2__UNIPHY_CLKINV_MASK 0x00002000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_CLKINV__SHIFT 0x0000000d
+#define UNIPHY_PLL_CONTROL2__UNIPHY_DPLLSEL_MASK 0x0000000cL
+#define UNIPHY_PLL_CONTROL2__UNIPHY_DPLLSEL__SHIFT 0x00000002
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_EN_MASK 0x00001000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_EN__SHIFT 0x0000000c
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_SEL_MASK 0x00000010L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_SEL__SHIFT 0x00000004
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IPCIE_REFCLK_SEL_MASK 0x00000020L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IPCIE_REFCLK_SEL__SHIFT 0x00000005
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IXTALIN_SEL_MASK 0x00000040L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IXTALIN_SEL__SHIFT 0x00000006
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PCIEREF_CLK_EN_MASK 0x00000800L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PCIEREF_CLK_EN__SHIFT 0x0000000b
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIVFRAC_SEL_MASK 0x00100000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIVFRAC_SEL__SHIFT 0x00000014
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIV_SEL_MASK 0xe0000000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIV_SEL__SHIFT 0x0000001d
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_DISPCLK_MODE_MASK 0x00000003L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_DISPCLK_MODE__SHIFT 0x00000000
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFCLK_SRC_MASK 0x00000700L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFCLK_SRC__SHIFT 0x00000008
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFDIV_MASK 0x1f000000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFDIV__SHIFT 0x00000018
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_TEST_FBDIV_FRAC_BYPASS_MASK 0x00080000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_TEST_FBDIV_FRAC_BYPASS__SHIFT 0x00000013
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_VTOI_BIAS_CNTL_MASK 0x00010000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_VTOI_BIAS_CNTL__SHIFT 0x00000010
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV_FRACTION_MASK 0x0000fffcL
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV_FRACTION__SHIFT 0x00000002
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV_MASK 0x0fff0000L
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV__SHIFT 0x00000010
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_DSMOD_EN_MASK 0x00001000L
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_DSMOD_EN__SHIFT 0x0000000c
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_EN_MASK 0x00002000L
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_EN__SHIFT 0x0000000d
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_STEP_NUM_MASK 0x00000fffL
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_STEP_NUM__SHIFT 0x00000000
+#define UNIPHY_PLL_SS_STEP_SIZE__UNIPHY_PLL_SS_STEP_SIZE_MASK 0x03ffffffL
+#define UNIPHY_PLL_SS_STEP_SIZE__UNIPHY_PLL_SS_STEP_SIZE__SHIFT 0x00000000
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ0P45_MASK 0x000f0000L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ0P45__SHIFT 0x00000010
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P00_MASK 0x00000f00L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P00__SHIFT 0x00000008
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P25_MASK 0x0000f000L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P25__SHIFT 0x0000000c
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGPDN_MASK 0x00000001L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGPDN__SHIFT 0x00000000
+#define UNIPHY_POWER_CONTROL__UNIPHY_BIASREF_SEL_MASK 0x00000004L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BIASREF_SEL__SHIFT 0x00000002
+#define UNIPHY_POWER_CONTROL__UNIPHY_RST_LOGIC_MASK 0x00000002L
+#define UNIPHY_POWER_CONTROL__UNIPHY_RST_LOGIC__SHIFT 0x00000001
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_ERROR_MASK 0x01f00000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_ERROR__SHIFT 0x00000014
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_RESET_MASK 0x00008000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_RESET__SHIFT 0x0000000f
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_SEL_MASK 0x00010000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_SEL__SHIFT 0x00000010
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_INTRESET_MASK 0x20000000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_INTRESET__SHIFT 0x0000001d
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_FREQ_LOCK_MASK 0x10000000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_FREQ_LOCK__SHIFT 0x0000001c
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_VCTL_ADC_MASK 0x0e000000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_VCTL_ADC__SHIFT 0x00000019
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_CNTL_MASK 0x0000001fL
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_CNTL__SHIFT 0x00000000
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_VCTL_EN_MASK 0x00020000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_VCTL_EN__SHIFT 0x00000011
+#define UNIPHY_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x00000001L
+#define UNIPHY_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x00000000
+#define UNIPHY_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x00000002L
+#define UNIPHY_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x00000001
+#define UNIPHY_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x00000004L
+#define UNIPHY_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0x00000002
+#define UNIPHY_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x00000008L
+#define UNIPHY_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0x00000003
+#define UNIPHY_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x00000010L
+#define UNIPHY_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0x00000004
+#define UNIPHY_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x00000020L
+#define UNIPHY_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0x00000005
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR0_MASK 0x00000007L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR0__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR1_MASK 0x00000070L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR1__SHIFT 0x00000004
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR2_MASK 0x00000700L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR2__SHIFT 0x00000008
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR3_MASK 0x00007000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR3__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR4_MASK 0x00070000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR4__SHIFT 0x00000010
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS0_MASK 0x00300000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS0__SHIFT 0x00000014
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS1_MASK 0x00c00000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS1__SHIFT 0x00000016
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS2_MASK 0x03000000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS2__SHIFT 0x00000018
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS3_MASK 0x0c000000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS3__SHIFT 0x0000001a
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS4_MASK 0x30000000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS4__SHIFT 0x0000001c
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH0_PC_MASK 0x00000003L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH0_PC__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH1_PC_MASK 0x00000030L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH1_PC__SHIFT 0x00000004
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH2_PC_MASK 0x00000300L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH2_PC__SHIFT 0x00000008
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH3_PC_MASK 0x00003000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH3_PC__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH4_PC_MASK 0x00030000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH4_PC__SHIFT 0x00000010
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH_SEL_MASK 0x00100000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH_SEL__SHIFT 0x00000014
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT0_CPSEL_MASK 0x00600000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT0_CPSEL__SHIFT 0x00000015
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT1_CPSEL_MASK 0x01800000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT1_CPSEL__SHIFT 0x00000017
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT2_CPSEL_MASK 0x06000000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT2_CPSEL__SHIFT 0x00000019
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT3_CPSEL_MASK 0x18000000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT3_CPSEL__SHIFT 0x0000001b
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT4_CPSEL_MASK 0x60000000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT4_CPSEL__SHIFT 0x0000001d
+#define UNIPHY_TX_CONTROL3__UNIPHY_LVDS_PULLDWN_MASK 0x80000000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_LVDS_PULLDWN__SHIFT 0x0000001f
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL0_MASK 0x00100000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL0__SHIFT 0x00000014
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL1_MASK 0x00200000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL1__SHIFT 0x00000015
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL2_MASK 0x00400000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL2__SHIFT 0x00000016
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL3_MASK 0x00800000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL3__SHIFT 0x00000017
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_CLK_MASK 0x000000f0L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_CLK__SHIFT 0x00000004
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_DAT_MASK 0x00000f00L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_DAT__SHIFT 0x00000008
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_CLK_MASK 0x00000003L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_CLK__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_DAT_MASK 0x0000000cL
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_DAT__SHIFT 0x00000002
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_CLK_MASK 0x00007000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_CLK__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_DAT_MASK 0x00070000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_DAT__SHIFT 0x00000010
+#define UNIPHY_TX_CONTROL3__UNIPHY_TX_VS_ADJ_MASK 0x1f000000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_TX_VS_ADJ__SHIFT 0x00000018
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_CLK_MASK 0x0000001fL
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_CLK__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_DAT_MASK 0x000003e0L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_DAT__SHIFT 0x00000005
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_CLK_MASK 0x07000000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_CLK__SHIFT 0x00000018
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_DAT_MASK 0x70000000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_DAT__SHIFT 0x0000001c
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_CLK_MASK 0x0001f000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_CLK__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_DAT_MASK 0x003e0000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_DAT__SHIFT 0x00000011
+#define VGA25_PPLL_ANALOG__VGA25_CAL_MODE_MASK 0x0000001fL
+#define VGA25_PPLL_ANALOG__VGA25_CAL_MODE__SHIFT 0x00000000
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_CP_MASK 0x00000f00L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_CP__SHIFT 0x00000008
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_IBIAS_MASK 0xff000000L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_IBIAS__SHIFT 0x00000018
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_LF_MODE_MASK 0x001ff000L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_LF_MODE__SHIFT 0x0000000c
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_MASK 0x07ff0000L
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV__SHIFT 0x00000010
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define VGA25_PPLL_REF_DIV__VGA25_PPLL_REF_DIV_MASK 0x000003ffL
+#define VGA25_PPLL_REF_DIV__VGA25_PPLL_REF_DIV__SHIFT 0x00000000
+#define VGA28_PPLL_ANALOG__VGA28_CAL_MODE_MASK 0x0000001fL
+#define VGA28_PPLL_ANALOG__VGA28_CAL_MODE__SHIFT 0x00000000
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_CP_MASK 0x00000f00L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_CP__SHIFT 0x00000008
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_IBIAS_MASK 0xff000000L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_IBIAS__SHIFT 0x00000018
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_LF_MODE_MASK 0x001ff000L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_LF_MODE__SHIFT 0x0000000c
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_MASK 0x07ff0000L
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV__SHIFT 0x00000010
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define VGA28_PPLL_REF_DIV__VGA28_PPLL_REF_DIV_MASK 0x000003ffL
+#define VGA28_PPLL_REF_DIV__VGA28_PPLL_REF_DIV__SHIFT 0x00000000
+#define VGA41_PPLL_ANALOG__VGA41_CAL_MODE_MASK 0x0000001fL
+#define VGA41_PPLL_ANALOG__VGA41_CAL_MODE__SHIFT 0x00000000
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_CP_MASK 0x00000f00L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_CP__SHIFT 0x00000008
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_IBIAS_MASK 0xff000000L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_IBIAS__SHIFT 0x00000018
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_LF_MODE_MASK 0x001ff000L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_LF_MODE__SHIFT 0x0000000c
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_MASK 0x07ff0000L
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV__SHIFT 0x00000010
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define VGA41_PPLL_REF_DIV__VGA41_PPLL_REF_DIV_MASK 0x000003ffL
+#define VGA41_PPLL_REF_DIV__VGA41_PPLL_REF_DIV__SHIFT 0x00000000
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY_MASK 0x00100000L
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY__SHIFT 0x00000014
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT_MASK 0x3f000000L
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT__SHIFT 0x00000018
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE_MASK 0x00010000L
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE__SHIFT 0x00000010
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE_MASK 0x00000100L
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE__SHIFT 0x00000008
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS_MASK 0x00000001L
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS__SHIFT 0x00000000
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffffL
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x00000000
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA_MASK 0xffffffffL
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA__SHIFT 0x00000000
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX_MASK 0x000000ffL
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX__SHIFT 0x00000000
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR_MASK 0x01ffffffL
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR__SHIFT 0x00000000
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR_MASK 0x01ffffffL
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR__SHIFT 0x00000000
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK 0x00000010L
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE__SHIFT 0x00000004
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN_MASK 0x00000001L
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN__SHIFT 0x00000000
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE_MASK 0x00000100L
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE__SHIFT 0x00000008
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET_MASK 0x00010000L
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET__SHIFT 0x00000010
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL_MASK 0x01000000L
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL__SHIFT 0x00000018
+#define VGA_HW_DEBUG__VGA_HW_DEBUG_MASK 0xffffffffL
+#define VGA_HW_DEBUG__VGA_HW_DEBUG__SHIFT 0x00000000
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK_MASK 0x00010000L
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK__SHIFT 0x00000010
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK_MASK 0x00000001L
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK__SHIFT 0x00000000
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK_MASK 0x01000000L
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK__SHIFT 0x00000018
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK_MASK 0x00000100L
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK__SHIFT 0x00000008
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS_MASK 0x00000004L
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS__SHIFT 0x00000002
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS_MASK 0x00000001L
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS__SHIFT 0x00000000
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS_MASK 0x00000008L
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS__SHIFT 0x00000003
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS_MASK 0x00000002L
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS__SHIFT 0x00000001
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT_MASK 0x00000003L
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT__SHIFT 0x00000000
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE_MASK 0x20000000L
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE__SHIFT 0x0000001d
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT_MASK 0x80000000L
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT__SHIFT 0x0000001f
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT_MASK 0x03000000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT__SHIFT 0x00000018
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT_MASK 0x00030000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT__SHIFT 0x00000010
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT_MASK 0x04000000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT__SHIFT 0x0000001a
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT_MASK 0x00000300L
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT__SHIFT 0x00000008
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE_MASK 0x08000000L
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE__SHIFT 0x0000001b
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT_MASK 0x00000018L
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT__SHIFT 0x00000003
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION_MASK 0x000000e0L
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION__SHIFT 0x00000005
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE_MASK 0x10000000L
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE__SHIFT 0x0000001c
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH_MASK 0x000000ffL
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH__SHIFT 0x00000000
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS_MASK 0xffffffffL
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS__SHIFT 0x00000000
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR_MASK 0x000003ffL
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR__SHIFT 0x00000000
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR_MASK 0x03ff0000L
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR__SHIFT 0x00000010
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR_MASK 0x000003ffL
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR__SHIFT 0x00000000
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR_MASK 0x03ff0000L
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR__SHIFT 0x00000010
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING_MASK 0x00000100L
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING__SHIFT 0x00000008
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR_MASK 0x00000001L
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR__SHIFT 0x00000000
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE_MASK 0x00000030L
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE__SHIFT 0x00000004
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN_MASK 0x00010000L
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN__SHIFT 0x00000010
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE_MASK 0x00000060L
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE__SHIFT 0x00000005
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE_MASK 0x0000001fL
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE__SHIFT 0x00000000
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT_MASK 0x00000080L
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT__SHIFT 0x00000007
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE_MASK 0x00000100L
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE__SHIFT 0x00000008
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT_MASK 0x01000000L
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT__SHIFT 0x00000018
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL_MASK 0x02000000L
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL__SHIFT 0x00000019
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK 0x00030000L
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL__SHIFT 0x00000010
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000001L
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000000
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000100L
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x00000008
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000002L
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000001
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000200L
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x00000009
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000004L
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000002
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000400L
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000a
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000008L
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000003
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000800L
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000b
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000010L
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000004
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00001000L
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000c
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000020L
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000005
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00002000L
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000d
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE_MASK 0x00010000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE__SHIFT 0x00000010
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT_MASK 0x00fc0000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT__SHIFT 0x00000012
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT_MASK 0x00020000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT__SHIFT 0x00000011
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A_MASK 0x00000007L
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A__SHIFT 0x00000000
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B_MASK 0x00000700L
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B__SHIFT 0x00000008
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR_MASK 0x00010000L
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR__SHIFT 0x00000010
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR_MASK 0x00000001L
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR__SHIFT 0x00000000
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR_MASK 0x01000000L
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR__SHIFT 0x00000018
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR_MASK 0x00000100L
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR__SHIFT 0x00000008
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS_MASK 0x00000004L
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS__SHIFT 0x00000002
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS_MASK 0x00000001L
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS__SHIFT 0x00000000
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS_MASK 0x00000008L
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS__SHIFT 0x00000003
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS_MASK 0x00000002L
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS__SHIFT 0x00000001
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT_MASK 0x00000300L
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT__SHIFT 0x00000008
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT_MASK 0x00000003L
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT__SHIFT 0x00000000
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE_MASK 0x00000001L
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE__SHIFT 0x00000000
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT_MASK 0x01000000L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT__SHIFT 0x00000018
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE_MASK 0x00010000L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE__SHIFT 0x00000010
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START_MASK 0x00000100L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START__SHIFT 0x00000008
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT_MASK 0x00003fffL
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT__SHIFT 0x00000000
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH_MASK 0x3fff0000L
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH__SHIFT 0x00000010
+#define VIEWPORT_START__VIEWPORT_X_START_MASK 0x3fff0000L
+#define VIEWPORT_START__VIEWPORT_X_START__SHIFT 0x00000010
+#define VIEWPORT_START__VIEWPORT_Y_START_MASK 0x00003fffL
+#define VIEWPORT_START__VIEWPORT_Y_START__SHIFT 0x00000000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS_MASK 0x00008000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS__SHIFT 0x0000000f
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS_MASK 0x00080000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS__SHIFT 0x00000013
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS_MASK 0x00040000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS__SHIFT 0x00000012
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS_MASK 0x00100000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS__SHIFT 0x00000014
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS_MASK 0x00010000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS__SHIFT 0x00000010
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY_MASK 0x00000ff0L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY__SHIFT 0x00000004
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY_MASK 0x0000000fL
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY__SHIFT 0x00000000
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR_MASK 0x00000100L
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR__SHIFT 0x00000008
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS_MASK 0x0000000fL
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS__SHIFT 0x00000000
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK_MASK 0x00000400L
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK__SHIFT 0x0000000a
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK_MASK 0x00000200L
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK__SHIFT 0x00000009
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT_MASK 0x00000100L
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT__SHIFT 0x00000008
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_ACK_MASK 0x00004000L
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_ACK__SHIFT 0x0000000e
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_MASK_MASK 0x00002000L
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_MASK__SHIFT 0x0000000d
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_STAT_MASK 0x00001000L
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_STAT__SHIFT 0x0000000c
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK_MASK 0x00040000L
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK__SHIFT 0x00000012
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK_MASK 0x00020000L
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK__SHIFT 0x00000011
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT_MASK 0x00010000L
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT__SHIFT 0x00000010
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE_MASK 0x0000000fL
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE__SHIFT 0x00000000
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT_MASK 0x00000c00L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT__SHIFT 0x0000000a
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH_MASK 0x00000300L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH__SHIFT 0x00000008
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT_MASK 0x00003000L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT__SHIFT 0x0000000c
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS_MASK 0x00300000L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS__SHIFT 0x00000014
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT_MASK 0x00000070L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT__SHIFT 0x00000004
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE_MASK 0x00c00000L
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE__SHIFT 0x00000016
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG_MASK 0xf8000000L
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG__SHIFT 0x0000001b
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE_MASK 0x00000007L
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE__SHIFT 0x00000000
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV_MASK 0x00010000L
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV__SHIFT 0x00000010
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP_MASK 0x00000300L
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP__SHIFT 0x00000008
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID_MASK 0x0000f000L
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID__SHIFT 0x0000000c
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_MODE_FORCE_MASK 0x00010000L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000010
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_POWER_STATE_MASK 0xc0000000L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_POWER_STATE__SHIFT 0x0000001e
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_DIS_MASK 0x00000100L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_DIS__SHIFT 0x00000008
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_MODE_FORCE_MASK 0x01000000L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_MODE_FORCE__SHIFT 0x00000018
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE_MASK 0x00040000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE__SHIFT 0x00000012
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE_MASK 0x00010000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE__SHIFT 0x00000010
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY_MASK 0x00000200L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY__SHIFT 0x00000009
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET_MASK 0x00100000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET__SHIFT 0x00000014
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT_MASK 0x00003fffL
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT__SHIFT 0x00000000
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT_MASK 0x3fff0000L
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT__SHIFT 0x00000010
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH_MASK 0x000000ffL
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__SHIFT 0x00000000
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_MASK 0xffffffffL
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__SHIFT 0x00000000
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH_MASK 0x00003fffL
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV_MASK 0x00010000L
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV__SHIFT 0x00000010
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP_MASK 0x00000300L
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP__SHIFT 0x00000008
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID_MASK 0x0000f000L
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID__SHIFT 0x0000000c
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR_MASK 0x00010000L
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR__SHIFT 0x00000010
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_MASK 0x00003000L
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK__SHIFT 0x0000000c
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG_MASK 0x000003ffL
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL_MASK 0x00000001L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY_MASK 0x0000f000L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY__SHIFT 0x0000000c
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT_MASK 0x000000f0L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT__SHIFT 0x00000004
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER_MASK 0xffff0000L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER__SHIFT 0x00000010
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR_MASK 0x00010000L
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR__SHIFT 0x00000010
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_MASK 0x00003000L
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK__SHIFT 0x0000000c
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG_MASK 0x000003ffL
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG__SHIFT 0x00000000
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH_MASK 0x3fff0000L
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH__SHIFT 0x00000010
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE_MASK 0x00003fffL
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH_MASK 0x000000ffL
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS_MASK 0xffffffffL
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH_MASK 0x000000ffL
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE_MASK 0xffffffffL
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE__SHIFT 0x00000000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT_MASK 0x00003fffL
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT__SHIFT 0x00000000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT_MASK 0x3fff0000L
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT__SHIFT 0x00000010
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY_MASK 0x00000007L
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY__SHIFT 0x00000000
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS_MASK 0x00000008L
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS__SHIFT 0x00000003
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY_MASK 0xffff8000L
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY__SHIFT 0x0000000f
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE_MASK 0x00000100L
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE__SHIFT 0x00000008
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE_MASK 0x00010000L
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE__SHIFT 0x00000010
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY_MASK 0x00000200L
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY__SHIFT 0x00000009
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN_MASK 0x00080000L
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN__SHIFT 0x00000013
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET_MASK 0x00100000L
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET__SHIFT 0x00000014
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING_MASK 0x00000001L
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING__SHIFT 0x00000000
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV_MASK 0x00010000L
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV__SHIFT 0x00000010
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP_MASK 0x00000300L
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP__SHIFT 0x00000008
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID_MASK 0x0000f000L
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID__SHIFT 0x0000000c
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR_MASK 0x80000000L
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR__SHIFT 0x0000001f
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_MASK 0x00030000L
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK__SHIFT 0x00000010
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG_MASK 0x0000ffffL
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG__SHIFT 0x00000000
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR_MASK 0x00010000L
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR__SHIFT 0x00000010
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_MASK 0x00003000L
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK__SHIFT 0x0000000c
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG_MASK 0x000003ffL
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG__SHIFT 0x00000000
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC_MASK 0x000fffffL
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC__SHIFT 0x00000000
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT_MASK 0xfff00000L
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT__SHIFT 0x00000014
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX_MASK 0xffff0000L
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX__SHIFT 0x00000010
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN_MASK 0x0000ffffL
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN__SHIFT 0x00000000
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER_MASK 0x0000ffffL
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER__SHIFT 0x00000000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL_MASK 0x00000001L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL__SHIFT 0x00000000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY_MASK 0x0000f000L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY__SHIFT 0x0000000c
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT_MASK 0x000000f0L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT__SHIFT 0x00000004
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER_MASK 0xffff0000L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER__SHIFT 0x00000010
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH_MASK 0x000000ffL
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x00000000
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS_MASK 0xffffffffL
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS__SHIFT 0x00000000
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH_MASK 0x00003fffL
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH__SHIFT 0x00000000
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH_MASK 0x3fff0000L
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH__SHIFT 0x00000010
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD_MASK 0xffff0000L
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD__SHIFT 0x00000010
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE_MASK 0x000001ffL
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE__SHIFT 0x00000000
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY_MASK 0x0000f000L
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY__SHIFT 0x0000000c
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_MASK 0x00000001L
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL__SHIFT 0x00000000
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
new file mode 100644
index 000000000000..c75aee25619e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
@@ -0,0 +1,1784 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GFX_6_0_D_H
+#define GFX_6_0_D_H
+
+#define ixCLIPPER_DEBUG_REG00 0x0000
+#define ixCLIPPER_DEBUG_REG01 0x0001
+#define ixCLIPPER_DEBUG_REG02 0x0002
+#define ixCLIPPER_DEBUG_REG03 0x0003
+#define ixCLIPPER_DEBUG_REG04 0x0004
+#define ixCLIPPER_DEBUG_REG05 0x0005
+#define ixCLIPPER_DEBUG_REG06 0x0006
+#define ixCLIPPER_DEBUG_REG07 0x0007
+#define ixCLIPPER_DEBUG_REG08 0x0008
+#define ixCLIPPER_DEBUG_REG09 0x0009
+#define ixCLIPPER_DEBUG_REG10 0x000A
+#define ixCLIPPER_DEBUG_REG11 0x000B
+#define ixCLIPPER_DEBUG_REG12 0x000C
+#define ixCLIPPER_DEBUG_REG13 0x000D
+#define ixCLIPPER_DEBUG_REG14 0x000E
+#define ixCLIPPER_DEBUG_REG15 0x000F
+#define ixCLIPPER_DEBUG_REG16 0x0010
+#define ixCLIPPER_DEBUG_REG17 0x0011
+#define ixCLIPPER_DEBUG_REG18 0x0012
+#define ixCLIPPER_DEBUG_REG19 0x0013
+#define ixGDS_DEBUG_REG0 0x0000
+#define ixGDS_DEBUG_REG1 0x0001
+#define ixGDS_DEBUG_REG2 0x0002
+#define ixGDS_DEBUG_REG3 0x0003
+#define ixGDS_DEBUG_REG4 0x0004
+#define ixGDS_DEBUG_REG5 0x0005
+#define ixGDS_DEBUG_REG6 0x0006
+#define ixIA_DEBUG_REG0 0x0000
+#define ixIA_DEBUG_REG1 0x0001
+#define ixIA_DEBUG_REG2 0x0002
+#define ixIA_DEBUG_REG3 0x0003
+#define ixIA_DEBUG_REG4 0x0004
+#define ixIA_DEBUG_REG5 0x0005
+#define ixIA_DEBUG_REG6 0x0006
+#define ixIA_DEBUG_REG7 0x0007
+#define ixIA_DEBUG_REG8 0x0008
+#define ixIA_DEBUG_REG9 0x0009
+#define ixPA_SC_DEBUG_REG0 0x0000
+#define ixPA_SC_DEBUG_REG1 0x0001
+#define ixSETUP_DEBUG_REG0 0x0018
+#define ixSETUP_DEBUG_REG1 0x0019
+#define ixSETUP_DEBUG_REG2 0x001A
+#define ixSETUP_DEBUG_REG3 0x001B
+#define ixSETUP_DEBUG_REG4 0x001C
+#define ixSETUP_DEBUG_REG5 0x001D
+#define ixSQ_DEBUG_CTRL_LOCAL 0x0009
+#define ixSQ_DEBUG_STS_LOCAL 0x0008
+#define ixSQ_INTERRUPT_WORD_AUTO 0x20C0
+#define ixSQ_INTERRUPT_WORD_CMN 0x20C0
+#define ixSQ_INTERRUPT_WORD_WAVE 0x20C0
+#define ixSQ_WAVE_EXEC_HI 0x027F
+#define ixSQ_WAVE_EXEC_LO 0x027E
+#define ixSQ_WAVE_GPR_ALLOC 0x0015
+#define ixSQ_WAVE_HW_ID 0x0014
+#define ixSQ_WAVE_IB_DBG0 0x001C
+#define ixSQ_WAVE_IB_STS 0x0017
+#define ixSQ_WAVE_INST_DW0 0x001A
+#define ixSQ_WAVE_INST_DW1 0x001B
+#define ixSQ_WAVE_LDS_ALLOC 0x0016
+#define ixSQ_WAVE_M0 0x027C
+#define ixSQ_WAVE_MODE 0x0011
+#define ixSQ_WAVE_PC_HI 0x0019
+#define ixSQ_WAVE_PC_LO 0x0018
+#define ixSQ_WAVE_STATUS 0x0012
+#define ixSQ_WAVE_TBA_HI 0x026D
+#define ixSQ_WAVE_TBA_LO 0x026C
+#define ixSQ_WAVE_TMA_HI 0x026F
+#define ixSQ_WAVE_TMA_LO 0x026E
+#define ixSQ_WAVE_TRAPSTS 0x0013
+#define ixSQ_WAVE_TTMP0 0x0270
+#define ixSQ_WAVE_TTMP10 0x027A
+#define ixSQ_WAVE_TTMP1 0x0271
+#define ixSQ_WAVE_TTMP11 0x027B
+#define ixSQ_WAVE_TTMP2 0x0272
+#define ixSQ_WAVE_TTMP3 0x0273
+#define ixSQ_WAVE_TTMP4 0x0274
+#define ixSQ_WAVE_TTMP5 0x0275
+#define ixSQ_WAVE_TTMP6 0x0276
+#define ixSQ_WAVE_TTMP7 0x0277
+#define ixSQ_WAVE_TTMP8 0x0278
+#define ixSQ_WAVE_TTMP9 0x0279
+#define ixSXIFCCG_DEBUG_REG0 0x0014
+#define ixSXIFCCG_DEBUG_REG1 0x0015
+#define ixSXIFCCG_DEBUG_REG2 0x0016
+#define ixSXIFCCG_DEBUG_REG3 0x0017
+#define ixVGT_DEBUG_REG0 0x0000
+#define ixVGT_DEBUG_REG10 0x000A
+#define ixVGT_DEBUG_REG1 0x0001
+#define ixVGT_DEBUG_REG11 0x000B
+#define ixVGT_DEBUG_REG12 0x000C
+#define ixVGT_DEBUG_REG13 0x000D
+#define ixVGT_DEBUG_REG14 0x000E
+#define ixVGT_DEBUG_REG15 0x000F
+#define ixVGT_DEBUG_REG16 0x0010
+#define ixVGT_DEBUG_REG17 0x0011
+#define ixVGT_DEBUG_REG18 0x0012
+#define ixVGT_DEBUG_REG19 0x0013
+#define ixVGT_DEBUG_REG20 0x0014
+#define ixVGT_DEBUG_REG2 0x0002
+#define ixVGT_DEBUG_REG21 0x0015
+#define ixVGT_DEBUG_REG22 0x0016
+#define ixVGT_DEBUG_REG23 0x0017
+#define ixVGT_DEBUG_REG24 0x0018
+#define ixVGT_DEBUG_REG25 0x0019
+#define ixVGT_DEBUG_REG26 0x001A
+#define ixVGT_DEBUG_REG27 0x001B
+#define ixVGT_DEBUG_REG28 0x001C
+#define ixVGT_DEBUG_REG29 0x001D
+#define ixVGT_DEBUG_REG30 0x001E
+#define ixVGT_DEBUG_REG3 0x0003
+#define ixVGT_DEBUG_REG31 0x001F
+#define ixVGT_DEBUG_REG32 0x0020
+#define ixVGT_DEBUG_REG33 0x0021
+#define ixVGT_DEBUG_REG34 0x0022
+#define ixVGT_DEBUG_REG35 0x0023
+#define ixVGT_DEBUG_REG36 0x0024
+#define ixVGT_DEBUG_REG4 0x0004
+#define ixVGT_DEBUG_REG5 0x0005
+#define ixVGT_DEBUG_REG6 0x0006
+#define ixVGT_DEBUG_REG7 0x0007
+#define ixVGT_DEBUG_REG8 0x0008
+#define ixVGT_DEBUG_REG9 0x0009
+#define mmBCI_DEBUG_READ 0x24E3
+#define mmCB_BLEND0_CONTROL 0xA1E0
+#define mmCB_BLEND1_CONTROL 0xA1E1
+#define mmCB_BLEND2_CONTROL 0xA1E2
+#define mmCB_BLEND3_CONTROL 0xA1E3
+#define mmCB_BLEND4_CONTROL 0xA1E4
+#define mmCB_BLEND5_CONTROL 0xA1E5
+#define mmCB_BLEND6_CONTROL 0xA1E6
+#define mmCB_BLEND7_CONTROL 0xA1E7
+#define mmCB_BLEND_ALPHA 0xA108
+#define mmCB_BLEND_BLUE 0xA107
+#define mmCB_BLEND_GREEN 0xA106
+#define mmCB_BLEND_RED 0xA105
+#define mmCB_CGTT_SCLK_CTRL 0x2698
+#define mmCB_COLOR0_ATTRIB 0xA31D
+#define mmCB_COLOR0_BASE 0xA318
+#define mmCB_COLOR0_CLEAR_WORD0 0xA323
+#define mmCB_COLOR0_CLEAR_WORD1 0xA324
+#define mmCB_COLOR0_CMASK 0xA31F
+#define mmCB_COLOR0_CMASK_SLICE 0xA320
+#define mmCB_COLOR0_FMASK 0xA321
+#define mmCB_COLOR0_FMASK_SLICE 0xA322
+#define mmCB_COLOR0_INFO 0xA31C
+#define mmCB_COLOR0_PITCH 0xA319
+#define mmCB_COLOR0_SLICE 0xA31A
+#define mmCB_COLOR0_VIEW 0xA31B
+#define mmCB_COLOR1_ATTRIB 0xA32C
+#define mmCB_COLOR1_BASE 0xA327
+#define mmCB_COLOR1_CLEAR_WORD0 0xA332
+#define mmCB_COLOR1_CLEAR_WORD1 0xA333
+#define mmCB_COLOR1_CMASK 0xA32E
+#define mmCB_COLOR1_CMASK_SLICE 0xA32F
+#define mmCB_COLOR1_FMASK 0xA330
+#define mmCB_COLOR1_FMASK_SLICE 0xA331
+#define mmCB_COLOR1_INFO 0xA32B
+#define mmCB_COLOR1_PITCH 0xA328
+#define mmCB_COLOR1_SLICE 0xA329
+#define mmCB_COLOR1_VIEW 0xA32A
+#define mmCB_COLOR2_ATTRIB 0xA33B
+#define mmCB_COLOR2_BASE 0xA336
+#define mmCB_COLOR2_CLEAR_WORD0 0xA341
+#define mmCB_COLOR2_CLEAR_WORD1 0xA342
+#define mmCB_COLOR2_CMASK 0xA33D
+#define mmCB_COLOR2_CMASK_SLICE 0xA33E
+#define mmCB_COLOR2_FMASK 0xA33F
+#define mmCB_COLOR2_FMASK_SLICE 0xA340
+#define mmCB_COLOR2_INFO 0xA33A
+#define mmCB_COLOR2_PITCH 0xA337
+#define mmCB_COLOR2_SLICE 0xA338
+#define mmCB_COLOR2_VIEW 0xA339
+#define mmCB_COLOR3_ATTRIB 0xA34A
+#define mmCB_COLOR3_BASE 0xA345
+#define mmCB_COLOR3_CLEAR_WORD0 0xA350
+#define mmCB_COLOR3_CLEAR_WORD1 0xA351
+#define mmCB_COLOR3_CMASK 0xA34C
+#define mmCB_COLOR3_CMASK_SLICE 0xA34D
+#define mmCB_COLOR3_FMASK 0xA34E
+#define mmCB_COLOR3_FMASK_SLICE 0xA34F
+#define mmCB_COLOR3_INFO 0xA349
+#define mmCB_COLOR3_PITCH 0xA346
+#define mmCB_COLOR3_SLICE 0xA347
+#define mmCB_COLOR3_VIEW 0xA348
+#define mmCB_COLOR4_ATTRIB 0xA359
+#define mmCB_COLOR4_BASE 0xA354
+#define mmCB_COLOR4_CLEAR_WORD0 0xA35F
+#define mmCB_COLOR4_CLEAR_WORD1 0xA360
+#define mmCB_COLOR4_CMASK 0xA35B
+#define mmCB_COLOR4_CMASK_SLICE 0xA35C
+#define mmCB_COLOR4_FMASK 0xA35D
+#define mmCB_COLOR4_FMASK_SLICE 0xA35E
+#define mmCB_COLOR4_INFO 0xA358
+#define mmCB_COLOR4_PITCH 0xA355
+#define mmCB_COLOR4_SLICE 0xA356
+#define mmCB_COLOR4_VIEW 0xA357
+#define mmCB_COLOR5_ATTRIB 0xA368
+#define mmCB_COLOR5_BASE 0xA363
+#define mmCB_COLOR5_CLEAR_WORD0 0xA36E
+#define mmCB_COLOR5_CLEAR_WORD1 0xA36F
+#define mmCB_COLOR5_CMASK 0xA36A
+#define mmCB_COLOR5_CMASK_SLICE 0xA36B
+#define mmCB_COLOR5_FMASK 0xA36C
+#define mmCB_COLOR5_FMASK_SLICE 0xA36D
+#define mmCB_COLOR5_INFO 0xA367
+#define mmCB_COLOR5_PITCH 0xA364
+#define mmCB_COLOR5_SLICE 0xA365
+#define mmCB_COLOR5_VIEW 0xA366
+#define mmCB_COLOR6_ATTRIB 0xA377
+#define mmCB_COLOR6_BASE 0xA372
+#define mmCB_COLOR6_CLEAR_WORD0 0xA37D
+#define mmCB_COLOR6_CLEAR_WORD1 0xA37E
+#define mmCB_COLOR6_CMASK 0xA379
+#define mmCB_COLOR6_CMASK_SLICE 0xA37A
+#define mmCB_COLOR6_FMASK 0xA37B
+#define mmCB_COLOR6_FMASK_SLICE 0xA37C
+#define mmCB_COLOR6_INFO 0xA376
+#define mmCB_COLOR6_PITCH 0xA373
+#define mmCB_COLOR6_SLICE 0xA374
+#define mmCB_COLOR6_VIEW 0xA375
+#define mmCB_COLOR7_ATTRIB 0xA386
+#define mmCB_COLOR7_BASE 0xA381
+#define mmCB_COLOR7_CLEAR_WORD0 0xA38C
+#define mmCB_COLOR7_CLEAR_WORD1 0xA38D
+#define mmCB_COLOR7_CMASK 0xA388
+#define mmCB_COLOR7_CMASK_SLICE 0xA389
+#define mmCB_COLOR7_FMASK 0xA38A
+#define mmCB_COLOR7_FMASK_SLICE 0xA38B
+#define mmCB_COLOR7_INFO 0xA385
+#define mmCB_COLOR7_PITCH 0xA382
+#define mmCB_COLOR7_SLICE 0xA383
+#define mmCB_COLOR7_VIEW 0xA384
+#define mmCB_COLOR_CONTROL 0xA202
+#define mmCB_DEBUG_BUS_10 0x26A2
+#define mmCB_DEBUG_BUS_1 0x2699
+#define mmCB_DEBUG_BUS_11 0x26A3
+#define mmCB_DEBUG_BUS_12 0x26A4
+#define mmCB_DEBUG_BUS_13 0x26A5
+#define mmCB_DEBUG_BUS_14 0x26A6
+#define mmCB_DEBUG_BUS_15 0x26A7
+#define mmCB_DEBUG_BUS_16 0x26A8
+#define mmCB_DEBUG_BUS_17 0x26A9
+#define mmCB_DEBUG_BUS_18 0x26AA
+#define mmCB_DEBUG_BUS_2 0x269A
+#define mmCB_DEBUG_BUS_3 0x269B
+#define mmCB_DEBUG_BUS_4 0x269C
+#define mmCB_DEBUG_BUS_5 0x269D
+#define mmCB_DEBUG_BUS_6 0x269E
+#define mmCB_DEBUG_BUS_7 0x269F
+#define mmCB_DEBUG_BUS_8 0x26A0
+#define mmCB_DEBUG_BUS_9 0x26A1
+#define mmCB_HW_CONTROL 0x2684
+#define mmCB_HW_CONTROL_1 0x2685
+#define mmCB_HW_CONTROL_2 0x2686
+#define mmCB_PERFCOUNTER0_HI 0x2691
+#define mmCB_PERFCOUNTER0_LO 0x2690
+#define mmCB_PERFCOUNTER0_SELECT1 0x2689
+#define mmCB_PERFCOUNTER1_HI 0x2693
+#define mmCB_PERFCOUNTER1_LO 0x2692
+#define mmCB_PERFCOUNTER2_HI 0x2695
+#define mmCB_PERFCOUNTER2_LO 0x2694
+#define mmCB_PERFCOUNTER3_HI 0x2697
+#define mmCB_PERFCOUNTER3_LO 0x2696
+#define mmCB_SHADER_MASK 0xA08F
+#define mmCB_TARGET_MASK 0xA08E
+#define mmCC_GC_SHADER_ARRAY_CONFIG 0x226F
+#define mmCC_RB_BACKEND_DISABLE 0x263D
+#define mmCC_RB_DAISY_CHAIN 0x2641
+#define mmCC_RB_REDUNDANCY 0x263C
+#define mmCC_SQC_BANK_DISABLE 0x2307
+#define mmCGTS_RD_CTRL_REG 0x2455
+#define mmCGTS_RD_REG 0x2456
+#define mmCGTS_SM_CTRL_REG 0x2454
+#define mmCGTS_TCC_DISABLE 0x2452
+#define mmCGTS_USER_TCC_DISABLE 0x2453
+#define mmCGTT_BCI_CLK_CTRL 0x24A9
+#define mmCGTT_CP_CLK_CTRL 0x3059
+#define mmCGTT_GDS_CLK_CTRL 0x25DD
+#define mmCGTT_IA_CLK_CTRL 0x2261
+#define mmCGTT_PA_CLK_CTRL 0x2286
+#define mmCGTT_PC_CLK_CTRL 0x24A8
+#define mmCGTT_RLC_CLK_CTRL 0x30E0
+#define mmCGTT_SC_CLK_CTRL 0x22CA
+#define mmCGTT_SPI_CLK_CTRL 0x2451
+#define mmCGTT_SQ_CLK_CTRL 0x2362
+#define mmCGTT_SQG_CLK_CTRL 0x2363
+#define mmCGTT_SX_CLK_CTRL0 0x240C
+#define mmCGTT_SX_CLK_CTRL1 0x240D
+#define mmCGTT_SX_CLK_CTRL2 0x240E
+#define mmCGTT_SX_CLK_CTRL3 0x240F
+#define mmCGTT_SX_CLK_CTRL4 0x2410
+#define mmCGTT_TCI_CLK_CTRL 0x2B60
+#define mmCGTT_TCP_CLK_CTRL 0x2B15
+#define mmCGTT_VGT_CLK_CTRL 0x225F
+#define mmCOHER_DEST_BASE_0 0xA092
+#define mmCOHER_DEST_BASE_1 0xA093
+#define mmCOHER_DEST_BASE_2 0xA07E
+#define mmCOHER_DEST_BASE_3 0xA07F
+#define mmCOMPUTE_DIM_X 0x2E01
+#define mmCOMPUTE_DIM_Y 0x2E02
+#define mmCOMPUTE_DIM_Z 0x2E03
+#define mmCOMPUTE_DISPATCH_INITIATOR 0x2E00
+#define mmCOMPUTE_NUM_THREAD_X 0x2E07
+#define mmCOMPUTE_NUM_THREAD_Y 0x2E08
+#define mmCOMPUTE_NUM_THREAD_Z 0x2E09
+#define mmCOMPUTE_PGM_HI 0x2E0D
+#define mmCOMPUTE_PGM_LO 0x2E0C
+#define mmCOMPUTE_PGM_RSRC1 0x2E12
+#define mmCOMPUTE_PGM_RSRC2 0x2E13
+#define mmCOMPUTE_RESOURCE_LIMITS 0x2E15
+#define mmCOMPUTE_START_X 0x2E04
+#define mmCOMPUTE_START_Y 0x2E05
+#define mmCOMPUTE_START_Z 0x2E06
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE0 0x2E16
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE1 0x2E17
+#define mmCOMPUTE_TBA_HI 0x2E0F
+#define mmCOMPUTE_TBA_LO 0x2E0E
+#define mmCOMPUTE_TMA_HI 0x2E11
+#define mmCOMPUTE_TMA_LO 0x2E10
+#define mmCOMPUTE_TMPRING_SIZE 0x2E18
+#define mmCOMPUTE_USER_DATA_0 0x2E40
+#define mmCOMPUTE_USER_DATA_10 0x2E4A
+#define mmCOMPUTE_USER_DATA_1 0x2E41
+#define mmCOMPUTE_USER_DATA_11 0x2E4B
+#define mmCOMPUTE_USER_DATA_12 0x2E4C
+#define mmCOMPUTE_USER_DATA_13 0x2E4D
+#define mmCOMPUTE_USER_DATA_14 0x2E4E
+#define mmCOMPUTE_USER_DATA_15 0x2E4F
+#define mmCOMPUTE_USER_DATA_2 0x2E42
+#define mmCOMPUTE_USER_DATA_3 0x2E43
+#define mmCOMPUTE_USER_DATA_4 0x2E44
+#define mmCOMPUTE_USER_DATA_5 0x2E45
+#define mmCOMPUTE_USER_DATA_6 0x2E46
+#define mmCOMPUTE_USER_DATA_7 0x2E47
+#define mmCOMPUTE_USER_DATA_8 0x2E48
+#define mmCOMPUTE_USER_DATA_9 0x2E49
+#define mmCOMPUTE_VMID 0x2E14
+#define mmCP_APPEND_ADDR_HI 0x2159
+#define mmCP_APPEND_ADDR_LO 0x2158
+#define mmCP_APPEND_DATA 0x215A
+#define mmCP_APPEND_LAST_CS_FENCE 0x215B
+#define mmCP_APPEND_LAST_PS_FENCE 0x215C
+#define mmCP_ATOMIC_PREOP_HI 0x215E
+#define mmCP_ATOMIC_PREOP_LO 0x215D
+#define mmCP_BUSY_STAT 0x219F
+#define mmCP_CE_HEADER_DUMP 0x21A4
+#define mmCP_CE_IB1_BASE_HI 0x21C7
+#define mmCP_CE_IB1_BASE_LO 0x21C6
+#define mmCP_CE_IB1_BUFSZ 0x21C8
+#define mmCP_CE_IB2_BASE_HI 0x21CA
+#define mmCP_CE_IB2_BASE_LO 0x21C9
+#define mmCP_CE_IB2_BUFSZ 0x21CB
+#define mmCP_CE_INIT_BASE_HI 0x21C4
+#define mmCP_CE_INIT_BASE_LO 0x21C3
+#define mmCP_CE_INIT_BUFSZ 0x21C5
+#define mmCP_CEQ1_AVAIL 0x21E6
+#define mmCP_CEQ2_AVAIL 0x21E7
+#define mmCP_CE_ROQ_IB1_STAT 0x21E9
+#define mmCP_CE_ROQ_IB2_STAT 0x21EA
+#define mmCP_CE_ROQ_RB_STAT 0x21E8
+#define mmCP_CE_UCODE_ADDR 0x305A
+#define mmCP_CE_UCODE_DATA 0x305B
+#define mmCP_CMD_DATA 0x21DF
+#define mmCP_CMD_INDEX 0x21DE
+#define mmCP_CNTX_STAT 0x21B8
+#define mmCP_COHER_BASE 0x217E
+#define mmCP_COHER_CNTL 0x217C
+#define mmCP_COHER_SIZE 0x217D
+#define mmCP_COHER_START_DELAY 0x217B
+#define mmCP_COHER_STATUS 0x217F
+#define mmCP_CSF_CNTL 0x21B5
+#define mmCP_CSF_STAT 0x21B4
+#define mmCP_DMA_CNTL 0x218A
+#define mmCP_DMA_ME_COMMAND 0x2184
+#define mmCP_DMA_ME_DST_ADDR 0x2182
+#define mmCP_DMA_ME_DST_ADDR_HI 0x2183
+#define mmCP_DMA_ME_SRC_ADDR 0x2180
+#define mmCP_DMA_ME_SRC_ADDR_HI 0x2181
+#define mmCP_DMA_PFP_COMMAND 0x2189
+#define mmCP_DMA_PFP_DST_ADDR 0x2187
+#define mmCP_DMA_PFP_DST_ADDR_HI 0x2188
+#define mmCP_DMA_PFP_SRC_ADDR 0x2185
+#define mmCP_DMA_PFP_SRC_ADDR_HI 0x2186
+#define mmCP_DMA_READ_TAGS 0x218B
+#define mmCP_ECC_FIRSTOCCURRENCE 0x307A
+#define mmCP_ECC_FIRSTOCCURRENCE_RING0 0x307B
+#define mmCP_ECC_FIRSTOCCURRENCE_RING1 0x307C
+#define mmCP_ECC_FIRSTOCCURRENCE_RING2 0x307D
+#define mmCP_EOP_DONE_ADDR_HI 0x2101
+#define mmCP_EOP_DONE_ADDR_LO 0x2100
+#define mmCP_EOP_DONE_DATA_HI 0x2103
+#define mmCP_EOP_DONE_DATA_LO 0x2102
+#define mmCP_EOP_LAST_FENCE_HI 0x2105
+#define mmCP_EOP_LAST_FENCE_LO 0x2104
+#define mmCP_GDS_ATOMIC0_PREOP_HI 0x2160
+#define mmCP_GDS_ATOMIC0_PREOP_LO 0x215F
+#define mmCP_GDS_ATOMIC1_PREOP_HI 0x2162
+#define mmCP_GDS_ATOMIC1_PREOP_LO 0x2161
+#define mmCP_GRBM_FREE_COUNT 0x21A3
+#define mmCP_IB1_BASE_HI 0x21CD
+#define mmCP_IB1_BASE_LO 0x21CC
+#define mmCP_IB1_BUFSZ 0x21CE
+#define mmCP_IB1_OFFSET 0x2192
+#define mmCP_IB1_PREAMBLE_BEGIN 0x2194
+#define mmCP_IB1_PREAMBLE_END 0x2195
+#define mmCP_IB2_BASE_HI 0x21D0
+#define mmCP_IB2_BASE_LO 0x21CF
+#define mmCP_IB2_BUFSZ 0x21D1
+#define mmCP_IB2_OFFSET 0x2193
+#define mmCP_IB2_PREAMBLE_BEGIN 0x2196
+#define mmCP_IB2_PREAMBLE_END 0x2197
+#define mmCP_INT_CNTL 0x3049
+#define mmCP_INT_CNTL_RING0 0x306A
+#define mmCP_INT_CNTL_RING1 0x306B
+#define mmCP_INT_CNTL_RING2 0x306C
+#define mmCP_INT_STAT_DEBUG 0x21F7
+#define mmCP_INT_STATUS 0x304A
+#define mmCP_INT_STATUS_RING0 0x306D
+#define mmCP_INT_STATUS_RING1 0x306E
+#define mmCP_INT_STATUS_RING2 0x306F
+#define mmCP_MC_PACK_DELAY_CNT 0x21A7
+#define mmCP_ME_CNTL 0x21B6
+#define mmCP_ME_HEADER_DUMP 0x21A1
+#define mmCP_ME_MC_RADDR_HI 0x216E
+#define mmCP_ME_MC_RADDR_LO 0x216D
+#define mmCP_ME_MC_WADDR_HI 0x216A
+#define mmCP_ME_MC_WADDR_LO 0x2169
+#define mmCP_ME_MC_WDATA_HI 0x216C
+#define mmCP_ME_MC_WDATA_LO 0x216B
+#define mmCP_MEM_SLP_CNTL 0x3079
+#define mmCP_ME_PREEMPTION 0x21B9
+#define mmCP_MEQ_AVAIL 0x21DD
+#define mmCP_MEQ_STAT 0x21E5
+#define mmCP_MEQ_THRESHOLDS 0x21D9
+#define mmCP_ME_RAM_DATA 0x3058
+#define mmCP_ME_RAM_RADDR 0x3056
+#define mmCP_ME_RAM_WADDR 0x3057
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_HI 0x210B
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_LO 0x210A
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_HI 0x210F
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_LO 0x210E
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_HI 0x2113
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_LO 0x2112
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_HI 0x2117
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_LO 0x2116
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_HI 0x2109
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_LO 0x2108
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_HI 0x210D
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_LO 0x210C
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_HI 0x2111
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_LO 0x2110
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_HI 0x2115
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_LO 0x2114
+#define mmCP_PA_CINVOC_COUNT_HI 0x2129
+#define mmCP_PA_CINVOC_COUNT_LO 0x2128
+#define mmCP_PA_CPRIM_COUNT_HI 0x212B
+#define mmCP_PA_CPRIM_COUNT_LO 0x212A
+#define mmCP_PERFMON_CNTL 0x21FF
+#define mmCP_PERFMON_CNTX_CNTL 0xA0D8
+#define mmCP_PFP_HEADER_DUMP 0x21A2
+#define mmCP_PFP_IB_CONTROL 0x218D
+#define mmCP_PFP_LOAD_CONTROL 0x218E
+#define mmCP_PFP_UCODE_ADDR 0x3054
+#define mmCP_PFP_UCODE_DATA 0x3055
+#define mmCP_PIPE_STATS_ADDR_HI 0x2119
+#define mmCP_PIPE_STATS_ADDR_LO 0x2118
+#define mmCP_PWR_CNTL 0x3078
+#define mmCP_QUEUE_THRESHOLDS 0x21D8
+#define mmCP_RB0_BASE 0x3040
+#define mmCP_RB0_CNTL 0x3041
+#define mmCP_RB0_RPTR 0x21C0
+#define mmCP_RB0_RPTR_ADDR 0x3043
+#define mmCP_RB0_RPTR_ADDR_HI 0x3044
+#define mmCP_RB0_WPTR 0x3045
+#define mmCP_RB1_BASE 0x3060
+#define mmCP_RB1_CNTL 0x3061
+#define mmCP_RB1_RPTR 0x21BF
+#define mmCP_RB1_RPTR_ADDR 0x3062
+#define mmCP_RB1_RPTR_ADDR_HI 0x3063
+#define mmCP_RB1_WPTR 0x3064
+#define mmCP_RB2_BASE 0x3065
+#define mmCP_RB2_CNTL 0x3066
+#define mmCP_RB2_RPTR 0x21BE
+#define mmCP_RB2_RPTR_ADDR 0x3067
+#define mmCP_RB2_RPTR_ADDR_HI 0x3068
+#define mmCP_RB2_WPTR 0x3069
+#define mmCP_RB_BASE 0x3040
+#define mmCP_RB_CNTL 0x3041
+#define mmCP_RB_OFFSET 0x2191
+#define mmCP_RB_RPTR 0x21C0
+#define mmCP_RB_RPTR_ADDR 0x3043
+#define mmCP_RB_RPTR_ADDR_HI 0x3044
+#define mmCP_RB_RPTR_WR 0x3042
+#define mmCP_RB_VMID 0x3051
+#define mmCP_RB_WPTR 0x3045
+#define mmCP_RB_WPTR_DELAY 0x21C1
+#define mmCP_RB_WPTR_POLL_ADDR_HI 0x3047
+#define mmCP_RB_WPTR_POLL_ADDR_LO 0x3046
+#define mmCP_RB_WPTR_POLL_CNTL 0x21C2
+#define mmCP_RING0_PRIORITY 0x304D
+#define mmCP_RING1_PRIORITY 0x304E
+#define mmCP_RING2_PRIORITY 0x304F
+#define mmCP_RINGID 0xA0D9
+#define mmCP_RING_PRIORITY_CNTS 0x304C
+#define mmCP_ROQ1_THRESHOLDS 0x21D5
+#define mmCP_ROQ2_AVAIL 0x21DC
+#define mmCP_ROQ2_THRESHOLDS 0x21D6
+#define mmCP_ROQ_AVAIL 0x21DA
+#define mmCP_ROQ_IB1_STAT 0x21E1
+#define mmCP_ROQ_IB2_STAT 0x21E2
+#define mmCP_ROQ_RB_STAT 0x21E0
+#define mmCP_SC_PSINVOC_COUNT0_HI 0x212D
+#define mmCP_SC_PSINVOC_COUNT0_LO 0x212C
+#define mmCP_SC_PSINVOC_COUNT1_HI 0x212F
+#define mmCP_SC_PSINVOC_COUNT1_LO 0x212E
+#define mmCP_SCRATCH_DATA 0x2190
+#define mmCP_SCRATCH_INDEX 0x218F
+#define mmCP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
+#define mmCP_SEM_WAIT_TIMER 0x216F
+#define mmCP_SIG_SEM_ADDR_HI 0x2171
+#define mmCP_SIG_SEM_ADDR_LO 0x2170
+#define mmCP_STALLED_STAT1 0x219D
+#define mmCP_STALLED_STAT2 0x219E
+#define mmCP_STALLED_STAT3 0x219C
+#define mmCP_STAT 0x21A0
+#define mmCP_ST_BASE_HI 0x21D3
+#define mmCP_ST_BASE_LO 0x21D2
+#define mmCP_ST_BUFSZ 0x21D4
+#define mmCP_STQ_AVAIL 0x21DB
+#define mmCP_STQ_STAT 0x21E3
+#define mmCP_STQ_THRESHOLDS 0x21D7
+#define mmCP_STREAM_OUT_ADDR_HI 0x2107
+#define mmCP_STREAM_OUT_ADDR_LO 0x2106
+#define mmCP_STRMOUT_CNTL 0x213F
+#define mmCP_VGT_CSINVOC_COUNT_HI 0x2131
+#define mmCP_VGT_CSINVOC_COUNT_LO 0x2130
+#define mmCP_VGT_DSINVOC_COUNT_HI 0x2127
+#define mmCP_VGT_DSINVOC_COUNT_LO 0x2126
+#define mmCP_VGT_GSINVOC_COUNT_HI 0x2123
+#define mmCP_VGT_GSINVOC_COUNT_LO 0x2122
+#define mmCP_VGT_GSPRIM_COUNT_HI 0x211F
+#define mmCP_VGT_GSPRIM_COUNT_LO 0x211E
+#define mmCP_VGT_HSINVOC_COUNT_HI 0x2125
+#define mmCP_VGT_HSINVOC_COUNT_LO 0x2124
+#define mmCP_VGT_IAPRIM_COUNT_HI 0x211D
+#define mmCP_VGT_IAPRIM_COUNT_LO 0x211C
+#define mmCP_VGT_IAVERT_COUNT_HI 0x211B
+#define mmCP_VGT_IAVERT_COUNT_LO 0x211A
+#define mmCP_VGT_VSINVOC_COUNT_HI 0x2121
+#define mmCP_VGT_VSINVOC_COUNT_LO 0x2120
+#define mmCP_VMID 0xA0DA
+#define mmCP_WAIT_REG_MEM_TIMEOUT 0x2174
+#define mmCP_WAIT_SEM_ADDR_HI 0x2176
+#define mmCP_WAIT_SEM_ADDR_LO 0x2175
+#define mmCS_COPY_STATE 0xA1F3
+#define mmDB_ALPHA_TO_MASK 0xA2DC
+#define mmDB_CGTT_CLK_CTRL_0 0x261A
+#define mmDB_COUNT_CONTROL 0xA001
+#define mmDB_CREDIT_LIMIT 0x2614
+#define mmDB_DEBUG 0x260C
+#define mmDB_DEBUG2 0x260D
+#define mmDB_DEBUG3 0x260E
+#define mmDB_DEBUG4 0x260F
+#define mmDB_DEPTH_BOUNDS_MAX 0xA009
+#define mmDB_DEPTH_BOUNDS_MIN 0xA008
+#define mmDB_DEPTH_CLEAR 0xA00B
+#define mmDB_DEPTH_CONTROL 0xA200
+#define mmDB_DEPTH_INFO 0xA00F
+#define mmDB_DEPTH_SIZE 0xA016
+#define mmDB_DEPTH_SLICE 0xA017
+#define mmDB_DEPTH_VIEW 0xA002
+#define mmDB_EQAA 0xA201
+#define mmDB_FIFO_DEPTH1 0x2618
+#define mmDB_FIFO_DEPTH2 0x2619
+#define mmDB_FREE_CACHELINES 0x2617
+#define mmDB_HTILE_DATA_BASE 0xA005
+#define mmDB_HTILE_SURFACE 0xA2AF
+#define mmDB_PERFCOUNTER0_HI 0x2602
+#define mmDB_PERFCOUNTER0_LO 0x2601
+#define mmDB_PERFCOUNTER0_SELECT 0x2600
+#define mmDB_PERFCOUNTER1_HI 0x2605
+#define mmDB_PERFCOUNTER1_LO 0x2604
+#define mmDB_PERFCOUNTER1_SELECT 0x2603
+#define mmDB_PERFCOUNTER2_HI 0x2608
+#define mmDB_PERFCOUNTER2_LO 0x2607
+#define mmDB_PERFCOUNTER2_SELECT 0x2606
+#define mmDB_PERFCOUNTER3_HI 0x260B
+#define mmDB_PERFCOUNTER3_LO 0x260A
+#define mmDB_PERFCOUNTER3_SELECT 0x2609
+#define mmDB_PRELOAD_CONTROL 0xA2B2
+#define mmDB_READ_DEBUG_0 0x2620
+#define mmDB_READ_DEBUG_1 0x2621
+#define mmDB_READ_DEBUG_2 0x2622
+#define mmDB_READ_DEBUG_3 0x2623
+#define mmDB_READ_DEBUG_4 0x2624
+#define mmDB_READ_DEBUG_5 0x2625
+#define mmDB_READ_DEBUG_6 0x2626
+#define mmDB_READ_DEBUG_7 0x2627
+#define mmDB_READ_DEBUG_8 0x2628
+#define mmDB_READ_DEBUG_9 0x2629
+#define mmDB_READ_DEBUG_A 0x262A
+#define mmDB_READ_DEBUG_B 0x262B
+#define mmDB_READ_DEBUG_C 0x262C
+#define mmDB_READ_DEBUG_D 0x262D
+#define mmDB_READ_DEBUG_E 0x262E
+#define mmDB_READ_DEBUG_F 0x262F
+#define mmDB_RENDER_CONTROL 0xA000
+#define mmDB_RENDER_OVERRIDE 0xA003
+#define mmDB_RENDER_OVERRIDE2 0xA004
+#define mmDB_SHADER_CONTROL 0xA203
+#define mmDB_SRESULTS_COMPARE_STATE0 0xA2B0
+#define mmDB_SRESULTS_COMPARE_STATE1 0xA2B1
+#define mmDB_STENCIL_CLEAR 0xA00A
+#define mmDB_STENCIL_CONTROL 0xA10B
+#define mmDB_STENCIL_INFO 0xA011
+#define mmDB_STENCIL_READ_BASE 0xA013
+#define mmDB_STENCILREFMASK 0xA10C
+#define mmDB_STENCILREFMASK_BF 0xA10D
+#define mmDB_STENCIL_WRITE_BASE 0xA015
+#define mmDB_SUBTILE_CONTROL 0x2616
+#define mmDB_WATERMARKS 0x2615
+#define mmDB_Z_INFO 0xA010
+#define mmDB_ZPASS_COUNT_HI 0x261D
+#define mmDB_ZPASS_COUNT_LOW 0x261C
+#define mmDB_Z_READ_BASE 0xA012
+#define mmDB_Z_WRITE_BASE 0xA014
+#define mmDEBUG_DATA 0x203D
+#define mmDEBUG_INDEX 0x203C
+#define mmGB_ADDR_CONFIG 0x263E
+#define mmGB_BACKEND_MAP 0x263F
+#define mmGB_EDC_MODE 0x307E
+#define mmGB_GPU_ID 0x2640
+#define mmGB_TILE_MODE0 0x2644
+#define mmGB_TILE_MODE10 0x264E
+#define mmGB_TILE_MODE1 0x2645
+#define mmGB_TILE_MODE11 0x264F
+#define mmGB_TILE_MODE12 0x2650
+#define mmGB_TILE_MODE13 0x2651
+#define mmGB_TILE_MODE14 0x2652
+#define mmGB_TILE_MODE15 0x2653
+#define mmGB_TILE_MODE16 0x2654
+#define mmGB_TILE_MODE17 0x2655
+#define mmGB_TILE_MODE18 0x2656
+#define mmGB_TILE_MODE19 0x2657
+#define mmGB_TILE_MODE20 0x2658
+#define mmGB_TILE_MODE2 0x2646
+#define mmGB_TILE_MODE21 0x2659
+#define mmGB_TILE_MODE22 0x265A
+#define mmGB_TILE_MODE23 0x265B
+#define mmGB_TILE_MODE24 0x265C
+#define mmGB_TILE_MODE25 0x265D
+#define mmGB_TILE_MODE26 0x265E
+#define mmGB_TILE_MODE27 0x265F
+#define mmGB_TILE_MODE28 0x2660
+#define mmGB_TILE_MODE29 0x2661
+#define mmGB_TILE_MODE30 0x2662
+#define mmGB_TILE_MODE3 0x2647
+#define mmGB_TILE_MODE31 0x2663
+#define mmGB_TILE_MODE4 0x2648
+#define mmGB_TILE_MODE5 0x2649
+#define mmGB_TILE_MODE6 0x264A
+#define mmGB_TILE_MODE7 0x264B
+#define mmGB_TILE_MODE8 0x264C
+#define mmGB_TILE_MODE9 0x264D
+#define mmGC_PRIV_MODE 0x3048
+#define mmGC_USER_RB_BACKEND_DISABLE 0x26DF
+#define mmGC_USER_SHADER_ARRAY_CONFIG 0x2270
+#define mmGDS_ATOM_BASE 0x25CE
+#define mmGDS_ATOM_CNTL 0x25CC
+#define mmGDS_ATOM_COMPLETE 0x25CD
+#define mmGDS_ATOM_DST 0x25D2
+#define mmGDS_ATOM_OFFSET0 0x25D0
+#define mmGDS_ATOM_OFFSET1 0x25D1
+#define mmGDS_ATOM_OP 0x25D3
+#define mmGDS_ATOM_READ0 0x25D8
+#define mmGDS_ATOM_READ0_U 0x25D9
+#define mmGDS_ATOM_READ1 0x25DA
+#define mmGDS_ATOM_READ1_U 0x25DB
+#define mmGDS_ATOM_SIZE 0x25CF
+#define mmGDS_ATOM_SRC0 0x25D4
+#define mmGDS_ATOM_SRC0_U 0x25D5
+#define mmGDS_ATOM_SRC1 0x25D6
+#define mmGDS_ATOM_SRC1_U 0x25D7
+#define mmGDS_CNTL_STATUS 0x25C1
+#define mmGDS_CONFIG 0x25C0
+#define mmGDS_DEBUG_CNTL 0x25DE
+#define mmGDS_DEBUG_DATA 0x25DF
+#define mmGDS_ENHANCE 0x25DC
+#define mmGDS_GRBM_SECDED_CNT 0x25E3
+#define mmGDS_GWS_RESOURCE 0x25E1
+#define mmGDS_GWS_RESOURCE_CNTL 0x25E0
+#define mmGDS_OA_DED 0x25E4
+#define mmGDS_PERFCOUNTER0_HI 0x25E7
+#define mmGDS_PERFCOUNTER0_LO 0x25E6
+#define mmGDS_PERFCOUNTER0_SELECT 0x25E5
+#define mmGDS_PERFCOUNTER1_HI 0x25EA
+#define mmGDS_PERFCOUNTER1_LO 0x25E9
+#define mmGDS_PERFCOUNTER1_SELECT 0x25E8
+#define mmGDS_PERFCOUNTER2_HI 0x25ED
+#define mmGDS_PERFCOUNTER2_LO 0x25EC
+#define mmGDS_PERFCOUNTER2_SELECT 0x25EB
+#define mmGDS_PERFCOUNTER3_HI 0x25F0
+#define mmGDS_PERFCOUNTER3_LO 0x25EF
+#define mmGDS_PERFCOUNTER3_SELECT 0x25EE
+#define mmGDS_RD_ADDR 0x25C2
+#define mmGDS_RD_BURST_ADDR 0x25C4
+#define mmGDS_RD_BURST_COUNT 0x25C5
+#define mmGDS_RD_BURST_DATA 0x25C6
+#define mmGDS_RD_DATA 0x25C3
+#define mmGDS_SECDED_CNT 0x25E2
+#define mmGDS_WR_ADDR 0x25C7
+#define mmGDS_WR_BURST_ADDR 0x25C9
+#define mmGDS_WR_BURST_DATA 0x25CA
+#define mmGDS_WR_DATA 0x25C8
+#define mmGDS_WRITE_COMPLETE 0x25CB
+#define mmGFX_COPY_STATE 0xA1F4
+#define mmGRBM_CAM_DATA 0x3001
+#define mmGRBM_CAM_INDEX 0x3000
+#define mmGRBM_CNTL 0x2000
+#define mmGRBM_DEBUG 0x2014
+#define mmGRBM_DEBUG_CNTL 0x2009
+#define mmGRBM_DEBUG_DATA 0x200A
+#define mmGRBM_DEBUG_SNAPSHOT 0x2015
+#define mmGRBM_GFX_CLKEN_CNTL 0x200C
+#define mmGRBM_GFX_INDEX 0x200B
+#define mmGRBM_INT_CNTL 0x2018
+#define mmGRBM_NOWHERE 0x203F
+#define mmGRBM_PERFCOUNTER0_HI 0x201F
+#define mmGRBM_PERFCOUNTER0_LO 0x201E
+#define mmGRBM_PERFCOUNTER0_SELECT 0x201C
+#define mmGRBM_PERFCOUNTER1_HI 0x2021
+#define mmGRBM_PERFCOUNTER1_LO 0x2020
+#define mmGRBM_PERFCOUNTER1_SELECT 0x201D
+#define mmGRBM_PWR_CNTL 0x2003
+#define mmGRBM_READ_ERROR 0x2016
+#define mmGRBM_SCRATCH_REG0 0x2040
+#define mmGRBM_SCRATCH_REG1 0x2041
+#define mmGRBM_SCRATCH_REG2 0x2042
+#define mmGRBM_SCRATCH_REG3 0x2043
+#define mmGRBM_SCRATCH_REG4 0x2044
+#define mmGRBM_SCRATCH_REG5 0x2045
+#define mmGRBM_SCRATCH_REG6 0x2046
+#define mmGRBM_SCRATCH_REG7 0x2047
+#define mmGRBM_SE0_PERFCOUNTER_HI 0x202B
+#define mmGRBM_SE0_PERFCOUNTER_LO 0x202A
+#define mmGRBM_SE0_PERFCOUNTER_SELECT 0x2026
+#define mmGRBM_SE1_PERFCOUNTER_HI 0x202D
+#define mmGRBM_SE1_PERFCOUNTER_LO 0x202C
+#define mmGRBM_SE1_PERFCOUNTER_SELECT 0x2027
+#define mmGRBM_SKEW_CNTL 0x2001
+#define mmGRBM_SOFT_RESET 0x2008
+#define mmGRBM_STATUS 0x2004
+#define mmGRBM_STATUS2 0x2002
+#define mmGRBM_STATUS_SE0 0x2005
+#define mmGRBM_STATUS_SE1 0x2006
+#define mmGRBM_WAIT_IDLE_CLOCKS 0x200D
+#define mmIA_CNTL_STATUS 0x2237
+#define mmIA_DEBUG_CNTL 0x223A
+#define mmIA_DEBUG_DATA 0x223B
+#define mmIA_ENHANCE 0xA29C
+#define mmIA_MULTI_VGT_PARAM 0xA2AA
+#define mmIA_PERFCOUNTER0_HI 0x2225
+#define mmIA_PERFCOUNTER0_LO 0x2224
+#define mmIA_PERFCOUNTER0_SELECT 0x2220
+#define mmIA_PERFCOUNTER1_HI 0x2227
+#define mmIA_PERFCOUNTER1_LO 0x2226
+#define mmIA_PERFCOUNTER1_SELECT 0x2221
+#define mmIA_PERFCOUNTER2_HI 0x2229
+#define mmIA_PERFCOUNTER2_LO 0x2228
+#define mmIA_PERFCOUNTER2_SELECT 0x2222
+#define mmIA_PERFCOUNTER3_HI 0x222B
+#define mmIA_PERFCOUNTER3_LO 0x222A
+#define mmIA_PERFCOUNTER3_SELECT 0x2223
+#define mmIA_VMID_OVERRIDE 0x2260
+#define mmPA_CL_CLIP_CNTL 0xA204
+#define mmPA_CL_CNTL_STATUS 0x2284
+#define mmPA_CL_ENHANCE 0x2285
+#define mmPA_CL_GB_HORZ_CLIP_ADJ 0xA2FC
+#define mmPA_CL_GB_HORZ_DISC_ADJ 0xA2FD
+#define mmPA_CL_GB_VERT_CLIP_ADJ 0xA2FA
+#define mmPA_CL_GB_VERT_DISC_ADJ 0xA2FB
+#define mmPA_CL_NANINF_CNTL 0xA208
+#define mmPA_CL_POINT_CULL_RAD 0xA1F8
+#define mmPA_CL_POINT_SIZE 0xA1F7
+#define mmPA_CL_POINT_X_RAD 0xA1F5
+#define mmPA_CL_POINT_Y_RAD 0xA1F6
+#define mmPA_CL_UCP_0_W 0xA172
+#define mmPA_CL_UCP_0_X 0xA16F
+#define mmPA_CL_UCP_0_Y 0xA170
+#define mmPA_CL_UCP_0_Z 0xA171
+#define mmPA_CL_UCP_1_W 0xA176
+#define mmPA_CL_UCP_1_X 0xA173
+#define mmPA_CL_UCP_1_Y 0xA174
+#define mmPA_CL_UCP_1_Z 0xA175
+#define mmPA_CL_UCP_2_W 0xA17A
+#define mmPA_CL_UCP_2_X 0xA177
+#define mmPA_CL_UCP_2_Y 0xA178
+#define mmPA_CL_UCP_2_Z 0xA179
+#define mmPA_CL_UCP_3_W 0xA17E
+#define mmPA_CL_UCP_3_X 0xA17B
+#define mmPA_CL_UCP_3_Y 0xA17C
+#define mmPA_CL_UCP_3_Z 0xA17D
+#define mmPA_CL_UCP_4_W 0xA182
+#define mmPA_CL_UCP_4_X 0xA17F
+#define mmPA_CL_UCP_4_Y 0xA180
+#define mmPA_CL_UCP_4_Z 0xA181
+#define mmPA_CL_UCP_5_W 0xA186
+#define mmPA_CL_UCP_5_X 0xA183
+#define mmPA_CL_UCP_5_Y 0xA184
+#define mmPA_CL_UCP_5_Z 0xA185
+#define mmPA_CL_VPORT_XOFFSET 0xA110
+#define mmPA_CL_VPORT_XOFFSET_10 0xA14C
+#define mmPA_CL_VPORT_XOFFSET_1 0xA116
+#define mmPA_CL_VPORT_XOFFSET_11 0xA152
+#define mmPA_CL_VPORT_XOFFSET_12 0xA158
+#define mmPA_CL_VPORT_XOFFSET_13 0xA15E
+#define mmPA_CL_VPORT_XOFFSET_14 0xA164
+#define mmPA_CL_VPORT_XOFFSET_15 0xA16A
+#define mmPA_CL_VPORT_XOFFSET_2 0xA11C
+#define mmPA_CL_VPORT_XOFFSET_3 0xA122
+#define mmPA_CL_VPORT_XOFFSET_4 0xA128
+#define mmPA_CL_VPORT_XOFFSET_5 0xA12E
+#define mmPA_CL_VPORT_XOFFSET_6 0xA134
+#define mmPA_CL_VPORT_XOFFSET_7 0xA13A
+#define mmPA_CL_VPORT_XOFFSET_8 0xA140
+#define mmPA_CL_VPORT_XOFFSET_9 0xA146
+#define mmPA_CL_VPORT_XSCALE 0xA10F
+#define mmPA_CL_VPORT_XSCALE_10 0xA14B
+#define mmPA_CL_VPORT_XSCALE_1 0xA115
+#define mmPA_CL_VPORT_XSCALE_11 0xA151
+#define mmPA_CL_VPORT_XSCALE_12 0xA157
+#define mmPA_CL_VPORT_XSCALE_13 0xA15D
+#define mmPA_CL_VPORT_XSCALE_14 0xA163
+#define mmPA_CL_VPORT_XSCALE_15 0xA169
+#define mmPA_CL_VPORT_XSCALE_2 0xA11B
+#define mmPA_CL_VPORT_XSCALE_3 0xA121
+#define mmPA_CL_VPORT_XSCALE_4 0xA127
+#define mmPA_CL_VPORT_XSCALE_5 0xA12D
+#define mmPA_CL_VPORT_XSCALE_6 0xA133
+#define mmPA_CL_VPORT_XSCALE_7 0xA139
+#define mmPA_CL_VPORT_XSCALE_8 0xA13F
+#define mmPA_CL_VPORT_XSCALE_9 0xA145
+#define mmPA_CL_VPORT_YOFFSET 0xA112
+#define mmPA_CL_VPORT_YOFFSET_10 0xA14E
+#define mmPA_CL_VPORT_YOFFSET_1 0xA118
+#define mmPA_CL_VPORT_YOFFSET_11 0xA154
+#define mmPA_CL_VPORT_YOFFSET_12 0xA15A
+#define mmPA_CL_VPORT_YOFFSET_13 0xA160
+#define mmPA_CL_VPORT_YOFFSET_14 0xA166
+#define mmPA_CL_VPORT_YOFFSET_15 0xA16C
+#define mmPA_CL_VPORT_YOFFSET_2 0xA11E
+#define mmPA_CL_VPORT_YOFFSET_3 0xA124
+#define mmPA_CL_VPORT_YOFFSET_4 0xA12A
+#define mmPA_CL_VPORT_YOFFSET_5 0xA130
+#define mmPA_CL_VPORT_YOFFSET_6 0xA136
+#define mmPA_CL_VPORT_YOFFSET_7 0xA13C
+#define mmPA_CL_VPORT_YOFFSET_8 0xA142
+#define mmPA_CL_VPORT_YOFFSET_9 0xA148
+#define mmPA_CL_VPORT_YSCALE 0xA111
+#define mmPA_CL_VPORT_YSCALE_10 0xA14D
+#define mmPA_CL_VPORT_YSCALE_1 0xA117
+#define mmPA_CL_VPORT_YSCALE_11 0xA153
+#define mmPA_CL_VPORT_YSCALE_12 0xA159
+#define mmPA_CL_VPORT_YSCALE_13 0xA15F
+#define mmPA_CL_VPORT_YSCALE_14 0xA165
+#define mmPA_CL_VPORT_YSCALE_15 0xA16B
+#define mmPA_CL_VPORT_YSCALE_2 0xA11D
+#define mmPA_CL_VPORT_YSCALE_3 0xA123
+#define mmPA_CL_VPORT_YSCALE_4 0xA129
+#define mmPA_CL_VPORT_YSCALE_5 0xA12F
+#define mmPA_CL_VPORT_YSCALE_6 0xA135
+#define mmPA_CL_VPORT_YSCALE_7 0xA13B
+#define mmPA_CL_VPORT_YSCALE_8 0xA141
+#define mmPA_CL_VPORT_YSCALE_9 0xA147
+#define mmPA_CL_VPORT_ZOFFSET 0xA114
+#define mmPA_CL_VPORT_ZOFFSET_10 0xA150
+#define mmPA_CL_VPORT_ZOFFSET_1 0xA11A
+#define mmPA_CL_VPORT_ZOFFSET_11 0xA156
+#define mmPA_CL_VPORT_ZOFFSET_12 0xA15C
+#define mmPA_CL_VPORT_ZOFFSET_13 0xA162
+#define mmPA_CL_VPORT_ZOFFSET_14 0xA168
+#define mmPA_CL_VPORT_ZOFFSET_15 0xA16E
+#define mmPA_CL_VPORT_ZOFFSET_2 0xA120
+#define mmPA_CL_VPORT_ZOFFSET_3 0xA126
+#define mmPA_CL_VPORT_ZOFFSET_4 0xA12C
+#define mmPA_CL_VPORT_ZOFFSET_5 0xA132
+#define mmPA_CL_VPORT_ZOFFSET_6 0xA138
+#define mmPA_CL_VPORT_ZOFFSET_7 0xA13E
+#define mmPA_CL_VPORT_ZOFFSET_8 0xA144
+#define mmPA_CL_VPORT_ZOFFSET_9 0xA14A
+#define mmPA_CL_VPORT_ZSCALE 0xA113
+#define mmPA_CL_VPORT_ZSCALE_10 0xA14F
+#define mmPA_CL_VPORT_ZSCALE_1 0xA119
+#define mmPA_CL_VPORT_ZSCALE_11 0xA155
+#define mmPA_CL_VPORT_ZSCALE_12 0xA15B
+#define mmPA_CL_VPORT_ZSCALE_13 0xA161
+#define mmPA_CL_VPORT_ZSCALE_14 0xA167
+#define mmPA_CL_VPORT_ZSCALE_15 0xA16D
+#define mmPA_CL_VPORT_ZSCALE_2 0xA11F
+#define mmPA_CL_VPORT_ZSCALE_3 0xA125
+#define mmPA_CL_VPORT_ZSCALE_4 0xA12B
+#define mmPA_CL_VPORT_ZSCALE_5 0xA131
+#define mmPA_CL_VPORT_ZSCALE_6 0xA137
+#define mmPA_CL_VPORT_ZSCALE_7 0xA13D
+#define mmPA_CL_VPORT_ZSCALE_8 0xA143
+#define mmPA_CL_VPORT_ZSCALE_9 0xA149
+#define mmPA_CL_VS_OUT_CNTL 0xA207
+#define mmPA_CL_VTE_CNTL 0xA206
+#define mmPA_SC_AA_CONFIG 0xA2F8
+#define mmPA_SC_AA_MASK_X0Y0_X1Y0 0xA30E
+#define mmPA_SC_AA_MASK_X0Y1_X1Y1 0xA30F
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0xA2FE
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0xA2FF
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0xA300
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0xA301
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0xA306
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0xA307
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0xA308
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0xA309
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0xA302
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0xA303
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0xA304
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0xA305
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0xA30A
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0xA30B
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0xA30C
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0xA30D
+#define mmPA_SC_CENTROID_PRIORITY_0 0xA2F5
+#define mmPA_SC_CENTROID_PRIORITY_1 0xA2F6
+#define mmPA_SC_CLIPRECT_0_BR 0xA085
+#define mmPA_SC_CLIPRECT_0_TL 0xA084
+#define mmPA_SC_CLIPRECT_1_BR 0xA087
+#define mmPA_SC_CLIPRECT_1_TL 0xA086
+#define mmPA_SC_CLIPRECT_2_BR 0xA089
+#define mmPA_SC_CLIPRECT_2_TL 0xA088
+#define mmPA_SC_CLIPRECT_3_BR 0xA08B
+#define mmPA_SC_CLIPRECT_3_TL 0xA08A
+#define mmPA_SC_CLIPRECT_RULE 0xA083
+#define mmPA_SC_DEBUG_CNTL 0x22F6
+#define mmPA_SC_DEBUG_DATA 0x22F7
+#define mmPA_SC_EDGERULE 0xA08C
+#define mmPA_SC_ENHANCE 0x22FC
+#define mmPA_SC_FIFO_DEPTH_CNTL 0x2295
+#define mmPA_SC_FIFO_SIZE 0x22F3
+#define mmPA_SC_FORCE_EOV_MAX_CNTS 0x22C9
+#define mmPA_SC_GENERIC_SCISSOR_BR 0xA091
+#define mmPA_SC_GENERIC_SCISSOR_TL 0xA090
+#define mmPA_SC_IF_FIFO_SIZE 0x22F5
+#define mmPA_SC_LINE_CNTL 0xA2F7
+#define mmPA_SC_LINE_STIPPLE 0xA283
+#define mmPA_SC_LINE_STIPPLE_STATE 0x22C4
+#define mmPA_SC_MODE_CNTL_0 0xA292
+#define mmPA_SC_MODE_CNTL_1 0xA293
+#define mmPA_SC_PERFCOUNTER0_HI 0x22A9
+#define mmPA_SC_PERFCOUNTER0_LO 0x22A8
+#define mmPA_SC_PERFCOUNTER0_SELECT 0x22A0
+#define mmPA_SC_PERFCOUNTER1_HI 0x22AB
+#define mmPA_SC_PERFCOUNTER1_LO 0x22AA
+#define mmPA_SC_PERFCOUNTER1_SELECT 0x22A1
+#define mmPA_SC_PERFCOUNTER2_HI 0x22AD
+#define mmPA_SC_PERFCOUNTER2_LO 0x22AC
+#define mmPA_SC_PERFCOUNTER2_SELECT 0x22A2
+#define mmPA_SC_PERFCOUNTER3_HI 0x22AF
+#define mmPA_SC_PERFCOUNTER3_LO 0x22AE
+#define mmPA_SC_PERFCOUNTER3_SELECT 0x22A3
+#define mmPA_SC_PERFCOUNTER4_HI 0x22B1
+#define mmPA_SC_PERFCOUNTER4_LO 0x22B0
+#define mmPA_SC_PERFCOUNTER4_SELECT 0x22A4
+#define mmPA_SC_PERFCOUNTER5_HI 0x22B3
+#define mmPA_SC_PERFCOUNTER5_LO 0x22B2
+#define mmPA_SC_PERFCOUNTER5_SELECT 0x22A5
+#define mmPA_SC_PERFCOUNTER6_HI 0x22B5
+#define mmPA_SC_PERFCOUNTER6_LO 0x22B4
+#define mmPA_SC_PERFCOUNTER6_SELECT 0x22A6
+#define mmPA_SC_PERFCOUNTER7_HI 0x22B7
+#define mmPA_SC_PERFCOUNTER7_LO 0x22B6
+#define mmPA_SC_PERFCOUNTER7_SELECT 0x22A7
+#define mmPA_SC_RASTER_CONFIG 0xA0D4
+#define mmPA_SC_SCREEN_SCISSOR_BR 0xA00D
+#define mmPA_SC_SCREEN_SCISSOR_TL 0xA00C
+#define mmPA_SC_VPORT_SCISSOR_0_BR 0xA095
+#define mmPA_SC_VPORT_SCISSOR_0_TL 0xA094
+#define mmPA_SC_VPORT_SCISSOR_10_BR 0xA0A9
+#define mmPA_SC_VPORT_SCISSOR_10_TL 0xA0A8
+#define mmPA_SC_VPORT_SCISSOR_11_BR 0xA0AB
+#define mmPA_SC_VPORT_SCISSOR_11_TL 0xA0AA
+#define mmPA_SC_VPORT_SCISSOR_12_BR 0xA0AD
+#define mmPA_SC_VPORT_SCISSOR_12_TL 0xA0AC
+#define mmPA_SC_VPORT_SCISSOR_13_BR 0xA0AF
+#define mmPA_SC_VPORT_SCISSOR_13_TL 0xA0AE
+#define mmPA_SC_VPORT_SCISSOR_14_BR 0xA0B1
+#define mmPA_SC_VPORT_SCISSOR_14_TL 0xA0B0
+#define mmPA_SC_VPORT_SCISSOR_15_BR 0xA0B3
+#define mmPA_SC_VPORT_SCISSOR_15_TL 0xA0B2
+#define mmPA_SC_VPORT_SCISSOR_1_BR 0xA097
+#define mmPA_SC_VPORT_SCISSOR_1_TL 0xA096
+#define mmPA_SC_VPORT_SCISSOR_2_BR 0xA099
+#define mmPA_SC_VPORT_SCISSOR_2_TL 0xA098
+#define mmPA_SC_VPORT_SCISSOR_3_BR 0xA09B
+#define mmPA_SC_VPORT_SCISSOR_3_TL 0xA09A
+#define mmPA_SC_VPORT_SCISSOR_4_BR 0xA09D
+#define mmPA_SC_VPORT_SCISSOR_4_TL 0xA09C
+#define mmPA_SC_VPORT_SCISSOR_5_BR 0xA09F
+#define mmPA_SC_VPORT_SCISSOR_5_TL 0xA09E
+#define mmPA_SC_VPORT_SCISSOR_6_BR 0xA0A1
+#define mmPA_SC_VPORT_SCISSOR_6_TL 0xA0A0
+#define mmPA_SC_VPORT_SCISSOR_7_BR 0xA0A3
+#define mmPA_SC_VPORT_SCISSOR_7_TL 0xA0A2
+#define mmPA_SC_VPORT_SCISSOR_8_BR 0xA0A5
+#define mmPA_SC_VPORT_SCISSOR_8_TL 0xA0A4
+#define mmPA_SC_VPORT_SCISSOR_9_BR 0xA0A7
+#define mmPA_SC_VPORT_SCISSOR_9_TL 0xA0A6
+#define mmPA_SC_VPORT_ZMAX_0 0xA0B5
+#define mmPA_SC_VPORT_ZMAX_10 0xA0C9
+#define mmPA_SC_VPORT_ZMAX_1 0xA0B7
+#define mmPA_SC_VPORT_ZMAX_11 0xA0CB
+#define mmPA_SC_VPORT_ZMAX_12 0xA0CD
+#define mmPA_SC_VPORT_ZMAX_13 0xA0CF
+#define mmPA_SC_VPORT_ZMAX_14 0xA0D1
+#define mmPA_SC_VPORT_ZMAX_15 0xA0D3
+#define mmPA_SC_VPORT_ZMAX_2 0xA0B9
+#define mmPA_SC_VPORT_ZMAX_3 0xA0BB
+#define mmPA_SC_VPORT_ZMAX_4 0xA0BD
+#define mmPA_SC_VPORT_ZMAX_5 0xA0BF
+#define mmPA_SC_VPORT_ZMAX_6 0xA0C1
+#define mmPA_SC_VPORT_ZMAX_7 0xA0C3
+#define mmPA_SC_VPORT_ZMAX_8 0xA0C5
+#define mmPA_SC_VPORT_ZMAX_9 0xA0C7
+#define mmPA_SC_VPORT_ZMIN_0 0xA0B4
+#define mmPA_SC_VPORT_ZMIN_10 0xA0C8
+#define mmPA_SC_VPORT_ZMIN_1 0xA0B6
+#define mmPA_SC_VPORT_ZMIN_11 0xA0CA
+#define mmPA_SC_VPORT_ZMIN_12 0xA0CC
+#define mmPA_SC_VPORT_ZMIN_13 0xA0CE
+#define mmPA_SC_VPORT_ZMIN_14 0xA0D0
+#define mmPA_SC_VPORT_ZMIN_15 0xA0D2
+#define mmPA_SC_VPORT_ZMIN_2 0xA0B8
+#define mmPA_SC_VPORT_ZMIN_3 0xA0BA
+#define mmPA_SC_VPORT_ZMIN_4 0xA0BC
+#define mmPA_SC_VPORT_ZMIN_5 0xA0BE
+#define mmPA_SC_VPORT_ZMIN_6 0xA0C0
+#define mmPA_SC_VPORT_ZMIN_7 0xA0C2
+#define mmPA_SC_VPORT_ZMIN_8 0xA0C4
+#define mmPA_SC_VPORT_ZMIN_9 0xA0C6
+#define mmPA_SC_WINDOW_OFFSET 0xA080
+#define mmPA_SC_WINDOW_SCISSOR_BR 0xA082
+#define mmPA_SC_WINDOW_SCISSOR_TL 0xA081
+#define mmPA_SU_CNTL_STATUS 0x2294
+#define mmPA_SU_DEBUG_CNTL 0x2280
+#define mmPA_SU_DEBUG_DATA 0x2281
+#define mmPA_SU_HARDWARE_SCREEN_OFFSET 0xA08D
+#define mmPA_SU_LINE_CNTL 0xA282
+#define mmPA_SU_LINE_STIPPLE_CNTL 0xA209
+#define mmPA_SU_LINE_STIPPLE_SCALE 0xA20A
+#define mmPA_SU_LINE_STIPPLE_VALUE 0x2298
+#define mmPA_SU_PERFCOUNTER0_HI 0x228D
+#define mmPA_SU_PERFCOUNTER0_LO 0x228C
+#define mmPA_SU_PERFCOUNTER0_SELECT 0x2288
+#define mmPA_SU_PERFCOUNTER1_HI 0x228F
+#define mmPA_SU_PERFCOUNTER1_LO 0x228E
+#define mmPA_SU_PERFCOUNTER1_SELECT 0x2289
+#define mmPA_SU_PERFCOUNTER2_HI 0x2291
+#define mmPA_SU_PERFCOUNTER2_LO 0x2290
+#define mmPA_SU_PERFCOUNTER2_SELECT 0x228A
+#define mmPA_SU_PERFCOUNTER3_HI 0x2293
+#define mmPA_SU_PERFCOUNTER3_LO 0x2292
+#define mmPA_SU_PERFCOUNTER3_SELECT 0x228B
+#define mmPA_SU_POINT_MINMAX 0xA281
+#define mmPA_SU_POINT_SIZE 0xA280
+#define mmPA_SU_POLY_OFFSET_BACK_OFFSET 0xA2E3
+#define mmPA_SU_POLY_OFFSET_BACK_SCALE 0xA2E2
+#define mmPA_SU_POLY_OFFSET_CLAMP 0xA2DF
+#define mmPA_SU_POLY_OFFSET_DB_FMT_CNTL 0xA2DE
+#define mmPA_SU_POLY_OFFSET_FRONT_OFFSET 0xA2E1
+#define mmPA_SU_POLY_OFFSET_FRONT_SCALE 0xA2E0
+#define mmPA_SU_PRIM_FILTER_CNTL 0xA20B
+#define mmPA_SU_SC_MODE_CNTL 0xA205
+#define mmPA_SU_VTX_CNTL 0xA2F9
+#define mmRAS_BCI_SIGNATURE0 0x339E
+#define mmRAS_BCI_SIGNATURE1 0x339F
+#define mmRAS_CB_SIGNATURE0 0x339D
+#define mmRAS_DB_SIGNATURE0 0x338B
+#define mmRAS_IA_SIGNATURE0 0x3397
+#define mmRAS_IA_SIGNATURE1 0x3398
+#define mmRAS_PA_SIGNATURE0 0x338C
+#define mmRAS_SC_SIGNATURE0 0x338F
+#define mmRAS_SC_SIGNATURE1 0x3390
+#define mmRAS_SC_SIGNATURE2 0x3391
+#define mmRAS_SC_SIGNATURE3 0x3392
+#define mmRAS_SC_SIGNATURE4 0x3393
+#define mmRAS_SC_SIGNATURE5 0x3394
+#define mmRAS_SC_SIGNATURE6 0x3395
+#define mmRAS_SC_SIGNATURE7 0x3396
+#define mmRAS_SIGNATURE_CONTROL 0x3380
+#define mmRAS_SIGNATURE_MASK 0x3381
+#define mmRAS_SPI_SIGNATURE0 0x3399
+#define mmRAS_SPI_SIGNATURE1 0x339A
+#define mmRAS_SQ_SIGNATURE0 0x338E
+#define mmRAS_SX_SIGNATURE0 0x3382
+#define mmRAS_SX_SIGNATURE1 0x3383
+#define mmRAS_SX_SIGNATURE2 0x3384
+#define mmRAS_SX_SIGNATURE3 0x3385
+#define mmRAS_TA_SIGNATURE0 0x339B
+#define mmRAS_TD_SIGNATURE0 0x339C
+#define mmRAS_VGT_SIGNATURE0 0x338D
+#define mmRLC_AUTO_PG_CTRL 0x310D
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT 0x30D0
+#define mmRLC_CGCG_CGLS_CTRL 0x3101
+#define mmRLC_CGCG_RAMP_CTRL 0x3102
+#define mmRLC_CGTT_MGCG_OVERRIDE 0x3100
+#define mmRLC_CNTL 0x30C0
+#define mmRLC_CU_STATUS 0x3106
+#define mmRLC_DEBUG 0x30CA
+#define mmRLC_DEBUG_SELECT 0x30C9
+#define mmRLC_DRIVER_CPDMA_STATUS 0x30C7
+#define mmRLC_DYN_PG_REQUEST 0x3104
+#define mmRLC_DYN_PG_STATUS 0x3103
+#define mmRLC_GPU_CLOCK_32 0x30D5
+#define mmRLC_GPU_CLOCK_32_RES_SEL 0x30D4
+#define mmRLC_GPU_CLOCK_COUNT_LSB 0x30CE
+#define mmRLC_GPU_CLOCK_COUNT_MSB 0x30CF
+#define mmRLC_LB_ALWAYS_ACTIVE_CU_MASK 0x3108
+#define mmRLC_LB_CNTL 0x30C3
+#define mmRLC_LB_CNTR_INIT 0x30C6
+#define mmRLC_LB_CNTR_MAX 0x30C5
+#define mmRLC_LB_INIT_CU_MASK 0x3107
+#define mmRLC_LB_PARAMS 0x3109
+#define mmRLC_LOAD_BALANCE_CNTR 0x30F6
+#define mmRLC_MAX_PG_CU 0x310C
+#define mmRLC_MC_CNTL 0x30D1
+#define mmRLC_MEM_SLP_CNTL 0x30D8
+#define mmRLC_PERFCOUNTER0_HI 0x30DC
+#define mmRLC_PERFCOUNTER0_LO 0x30DB
+#define mmRLC_PERFCOUNTER0_SELECT 0x30DA
+#define mmRLC_PERFCOUNTER1_HI 0x30DF
+#define mmRLC_PERFCOUNTER1_LO 0x30DE
+#define mmRLC_PERFCOUNTER1_SELECT 0x30DD
+#define mmRLC_PERFMON_CNTL 0x30D9
+#define mmRLC_PG_ALWAYS_ON_CU_MASK 0x310B
+#define mmRLC_PG_CNTL 0x30D7
+#define mmRLC_SAVE_AND_RESTORE_BASE 0x30C4
+#define mmRLC_SERDES_RD_DATA_0 0x3112
+#define mmRLC_SERDES_RD_DATA_1 0x3113
+#define mmRLC_SERDES_RD_DATA_2 0x3114
+#define mmRLC_SERDES_RD_MASTER_INDEX 0x3111
+#define mmRLC_SERDES_WR_CTRL 0x3117
+#define mmRLC_SERDES_WR_DATA 0x3118
+#define mmRLC_SMU_GRBM_REG_SAVE_CTRL 0x310E
+#define mmRLC_SMU_PG_CTRL 0x310F
+#define mmRLC_SMU_PG_WAKE_UP_CTRL 0x3110
+#define mmRLC_SOFT_RESET_GPU 0x30D6
+#define mmRLC_STAT 0x30D3
+#define mmRLC_THREAD1_DELAY 0x310A
+#define mmRLC_UCODE_CNTL 0x30D2
+#define mmSCRATCH_ADDR 0x2151
+#define mmSCRATCH_REG0 0x2140
+#define mmSCRATCH_REG1 0x2141
+#define mmSCRATCH_REG2 0x2142
+#define mmSCRATCH_REG3 0x2143
+#define mmSCRATCH_REG4 0x2144
+#define mmSCRATCH_REG5 0x2145
+#define mmSCRATCH_REG6 0x2146
+#define mmSCRATCH_REG7 0x2147
+#define mmSCRATCH_UMSK 0x2150
+#define mmSPI_ARB_CYCLES_0 0x243D
+#define mmSPI_ARB_CYCLES_1 0x243E
+#define mmSPI_ARB_PRIORITY 0x243C
+#define mmSPI_BARYC_CNTL 0xA1B8
+#define mmSPI_CONFIG_CNTL 0x2440
+#define mmSPI_CONFIG_CNTL_1 0x244F
+#define mmSPI_DEBUG_BUSY 0x2450
+#define mmSPI_DEBUG_CNTL 0x2441
+#define mmSPI_DEBUG_READ 0x2442
+#define mmSPI_GDS_CREDITS 0x24D8
+#define mmSPI_INTERP_CONTROL_0 0xA1B5
+#define mmSPI_LB_CTR_CTRL 0x24D4
+#define mmSPI_LB_CU_MASK 0x24D5
+#define mmSPI_LB_DATA_REG 0x24D6
+#define mmSPI_PERFCOUNTER0_HI 0x2447
+#define mmSPI_PERFCOUNTER0_LO 0x2448
+#define mmSPI_PERFCOUNTER0_SELECT 0x2443
+#define mmSPI_PERFCOUNTER1_HI 0x2449
+#define mmSPI_PERFCOUNTER1_LO 0x244A
+#define mmSPI_PERFCOUNTER1_SELECT 0x2444
+#define mmSPI_PERFCOUNTER2_HI 0x244B
+#define mmSPI_PERFCOUNTER2_LO 0x244C
+#define mmSPI_PERFCOUNTER2_SELECT 0x2445
+#define mmSPI_PERFCOUNTER3_HI 0x244D
+#define mmSPI_PERFCOUNTER3_LO 0x244E
+#define mmSPI_PERFCOUNTER3_SELECT 0x2446
+#define mmSPI_PERFCOUNTER_BINS 0x243F
+#define mmSPI_PG_ENABLE_STATIC_CU_MASK 0x24D7
+#define mmSPI_PS_IN_CONTROL 0xA1B6
+#define mmSPI_PS_INPUT_ADDR 0xA1B4
+#define mmSPI_PS_INPUT_CNTL_0 0xA191
+#define mmSPI_PS_INPUT_CNTL_10 0xA19B
+#define mmSPI_PS_INPUT_CNTL_1 0xA192
+#define mmSPI_PS_INPUT_CNTL_11 0xA19C
+#define mmSPI_PS_INPUT_CNTL_12 0xA19D
+#define mmSPI_PS_INPUT_CNTL_13 0xA19E
+#define mmSPI_PS_INPUT_CNTL_14 0xA19F
+#define mmSPI_PS_INPUT_CNTL_15 0xA1A0
+#define mmSPI_PS_INPUT_CNTL_16 0xA1A1
+#define mmSPI_PS_INPUT_CNTL_17 0xA1A2
+#define mmSPI_PS_INPUT_CNTL_18 0xA1A3
+#define mmSPI_PS_INPUT_CNTL_19 0xA1A4
+#define mmSPI_PS_INPUT_CNTL_20 0xA1A5
+#define mmSPI_PS_INPUT_CNTL_2 0xA193
+#define mmSPI_PS_INPUT_CNTL_21 0xA1A6
+#define mmSPI_PS_INPUT_CNTL_22 0xA1A7
+#define mmSPI_PS_INPUT_CNTL_23 0xA1A8
+#define mmSPI_PS_INPUT_CNTL_24 0xA1A9
+#define mmSPI_PS_INPUT_CNTL_25 0xA1AA
+#define mmSPI_PS_INPUT_CNTL_26 0xA1AB
+#define mmSPI_PS_INPUT_CNTL_27 0xA1AC
+#define mmSPI_PS_INPUT_CNTL_28 0xA1AD
+#define mmSPI_PS_INPUT_CNTL_29 0xA1AE
+#define mmSPI_PS_INPUT_CNTL_30 0xA1AF
+#define mmSPI_PS_INPUT_CNTL_3 0xA194
+#define mmSPI_PS_INPUT_CNTL_31 0xA1B0
+#define mmSPI_PS_INPUT_CNTL_4 0xA195
+#define mmSPI_PS_INPUT_CNTL_5 0xA196
+#define mmSPI_PS_INPUT_CNTL_6 0xA197
+#define mmSPI_PS_INPUT_CNTL_7 0xA198
+#define mmSPI_PS_INPUT_CNTL_8 0xA199
+#define mmSPI_PS_INPUT_CNTL_9 0xA19A
+#define mmSPI_PS_INPUT_ENA 0xA1B3
+#define mmSPI_PS_MAX_WAVE_ID 0x243B
+#define mmSPI_SHADER_COL_FORMAT 0xA1C5
+#define mmSPI_SHADER_PGM_HI_ES 0x2CC9
+#define mmSPI_SHADER_PGM_HI_GS 0x2C89
+#define mmSPI_SHADER_PGM_HI_HS 0x2D09
+#define mmSPI_SHADER_PGM_HI_LS 0x2D49
+#define mmSPI_SHADER_PGM_HI_PS 0x2C09
+#define mmSPI_SHADER_PGM_HI_VS 0x2C49
+#define mmSPI_SHADER_PGM_LO_ES 0x2CC8
+#define mmSPI_SHADER_PGM_LO_GS 0x2C88
+#define mmSPI_SHADER_PGM_LO_HS 0x2D08
+#define mmSPI_SHADER_PGM_LO_LS 0x2D48
+#define mmSPI_SHADER_PGM_LO_PS 0x2C08
+#define mmSPI_SHADER_PGM_LO_VS 0x2C48
+#define mmSPI_SHADER_PGM_RSRC1_ES 0x2CCA
+#define mmSPI_SHADER_PGM_RSRC1_GS 0x2C8A
+#define mmSPI_SHADER_PGM_RSRC1_HS 0x2D0A
+#define mmSPI_SHADER_PGM_RSRC1_LS 0x2D4A
+#define mmSPI_SHADER_PGM_RSRC1_PS 0x2C0A
+#define mmSPI_SHADER_PGM_RSRC1_VS 0x2C4A
+#define mmSPI_SHADER_PGM_RSRC2_ES 0x2CCB
+#define mmSPI_SHADER_PGM_RSRC2_GS 0x2C8B
+#define mmSPI_SHADER_PGM_RSRC2_HS 0x2D0B
+#define mmSPI_SHADER_PGM_RSRC2_LS 0x2D4B
+#define mmSPI_SHADER_PGM_RSRC2_PS 0x2C0B
+#define mmSPI_SHADER_PGM_RSRC2_VS 0x2C4B
+#define mmSPI_SHADER_POS_FORMAT 0xA1C3
+#define mmSPI_SHADER_TBA_HI_ES 0x2CC1
+#define mmSPI_SHADER_TBA_HI_GS 0x2C81
+#define mmSPI_SHADER_TBA_HI_HS 0x2D01
+#define mmSPI_SHADER_TBA_HI_LS 0x2D41
+#define mmSPI_SHADER_TBA_HI_PS 0x2C01
+#define mmSPI_SHADER_TBA_HI_VS 0x2C41
+#define mmSPI_SHADER_TBA_LO_ES 0x2CC0
+#define mmSPI_SHADER_TBA_LO_GS 0x2C80
+#define mmSPI_SHADER_TBA_LO_HS 0x2D00
+#define mmSPI_SHADER_TBA_LO_LS 0x2D40
+#define mmSPI_SHADER_TBA_LO_PS 0x2C00
+#define mmSPI_SHADER_TBA_LO_VS 0x2C40
+#define mmSPI_SHADER_TMA_HI_ES 0x2CC3
+#define mmSPI_SHADER_TMA_HI_GS 0x2C83
+#define mmSPI_SHADER_TMA_HI_HS 0x2D03
+#define mmSPI_SHADER_TMA_HI_LS 0x2D43
+#define mmSPI_SHADER_TMA_HI_PS 0x2C03
+#define mmSPI_SHADER_TMA_HI_VS 0x2C43
+#define mmSPI_SHADER_TMA_LO_ES 0x2CC2
+#define mmSPI_SHADER_TMA_LO_GS 0x2C82
+#define mmSPI_SHADER_TMA_LO_HS 0x2D02
+#define mmSPI_SHADER_TMA_LO_LS 0x2D42
+#define mmSPI_SHADER_TMA_LO_PS 0x2C02
+#define mmSPI_SHADER_TMA_LO_VS 0x2C42
+#define mmSPI_SHADER_USER_DATA_ES_0 0x2CCC
+#define mmSPI_SHADER_USER_DATA_ES_10 0x2CD6
+#define mmSPI_SHADER_USER_DATA_ES_1 0x2CCD
+#define mmSPI_SHADER_USER_DATA_ES_11 0x2CD7
+#define mmSPI_SHADER_USER_DATA_ES_12 0x2CD8
+#define mmSPI_SHADER_USER_DATA_ES_13 0x2CD9
+#define mmSPI_SHADER_USER_DATA_ES_14 0x2CDA
+#define mmSPI_SHADER_USER_DATA_ES_15 0x2CDB
+#define mmSPI_SHADER_USER_DATA_ES_2 0x2CCE
+#define mmSPI_SHADER_USER_DATA_ES_3 0x2CCF
+#define mmSPI_SHADER_USER_DATA_ES_4 0x2CD0
+#define mmSPI_SHADER_USER_DATA_ES_5 0x2CD1
+#define mmSPI_SHADER_USER_DATA_ES_6 0x2CD2
+#define mmSPI_SHADER_USER_DATA_ES_7 0x2CD3
+#define mmSPI_SHADER_USER_DATA_ES_8 0x2CD4
+#define mmSPI_SHADER_USER_DATA_ES_9 0x2CD5
+#define mmSPI_SHADER_USER_DATA_GS_0 0x2C8C
+#define mmSPI_SHADER_USER_DATA_GS_10 0x2C96
+#define mmSPI_SHADER_USER_DATA_GS_1 0x2C8D
+#define mmSPI_SHADER_USER_DATA_GS_11 0x2C97
+#define mmSPI_SHADER_USER_DATA_GS_12 0x2C98
+#define mmSPI_SHADER_USER_DATA_GS_13 0x2C99
+#define mmSPI_SHADER_USER_DATA_GS_14 0x2C9A
+#define mmSPI_SHADER_USER_DATA_GS_15 0x2C9B
+#define mmSPI_SHADER_USER_DATA_GS_2 0x2C8E
+#define mmSPI_SHADER_USER_DATA_GS_3 0x2C8F
+#define mmSPI_SHADER_USER_DATA_GS_4 0x2C90
+#define mmSPI_SHADER_USER_DATA_GS_5 0x2C91
+#define mmSPI_SHADER_USER_DATA_GS_6 0x2C92
+#define mmSPI_SHADER_USER_DATA_GS_7 0x2C93
+#define mmSPI_SHADER_USER_DATA_GS_8 0x2C94
+#define mmSPI_SHADER_USER_DATA_GS_9 0x2C95
+#define mmSPI_SHADER_USER_DATA_HS_0 0x2D0C
+#define mmSPI_SHADER_USER_DATA_HS_10 0x2D16
+#define mmSPI_SHADER_USER_DATA_HS_1 0x2D0D
+#define mmSPI_SHADER_USER_DATA_HS_11 0x2D17
+#define mmSPI_SHADER_USER_DATA_HS_12 0x2D18
+#define mmSPI_SHADER_USER_DATA_HS_13 0x2D19
+#define mmSPI_SHADER_USER_DATA_HS_14 0x2D1A
+#define mmSPI_SHADER_USER_DATA_HS_15 0x2D1B
+#define mmSPI_SHADER_USER_DATA_HS_2 0x2D0E
+#define mmSPI_SHADER_USER_DATA_HS_3 0x2D0F
+#define mmSPI_SHADER_USER_DATA_HS_4 0x2D10
+#define mmSPI_SHADER_USER_DATA_HS_5 0x2D11
+#define mmSPI_SHADER_USER_DATA_HS_6 0x2D12
+#define mmSPI_SHADER_USER_DATA_HS_7 0x2D13
+#define mmSPI_SHADER_USER_DATA_HS_8 0x2D14
+#define mmSPI_SHADER_USER_DATA_HS_9 0x2D15
+#define mmSPI_SHADER_USER_DATA_LS_0 0x2D4C
+#define mmSPI_SHADER_USER_DATA_LS_10 0x2D56
+#define mmSPI_SHADER_USER_DATA_LS_1 0x2D4D
+#define mmSPI_SHADER_USER_DATA_LS_11 0x2D57
+#define mmSPI_SHADER_USER_DATA_LS_12 0x2D58
+#define mmSPI_SHADER_USER_DATA_LS_13 0x2D59
+#define mmSPI_SHADER_USER_DATA_LS_14 0x2D5A
+#define mmSPI_SHADER_USER_DATA_LS_15 0x2D5B
+#define mmSPI_SHADER_USER_DATA_LS_2 0x2D4E
+#define mmSPI_SHADER_USER_DATA_LS_3 0x2D4F
+#define mmSPI_SHADER_USER_DATA_LS_4 0x2D50
+#define mmSPI_SHADER_USER_DATA_LS_5 0x2D51
+#define mmSPI_SHADER_USER_DATA_LS_6 0x2D52
+#define mmSPI_SHADER_USER_DATA_LS_7 0x2D53
+#define mmSPI_SHADER_USER_DATA_LS_8 0x2D54
+#define mmSPI_SHADER_USER_DATA_LS_9 0x2D55
+#define mmSPI_SHADER_USER_DATA_PS_0 0x2C0C
+#define mmSPI_SHADER_USER_DATA_PS_10 0x2C16
+#define mmSPI_SHADER_USER_DATA_PS_1 0x2C0D
+#define mmSPI_SHADER_USER_DATA_PS_11 0x2C17
+#define mmSPI_SHADER_USER_DATA_PS_12 0x2C18
+#define mmSPI_SHADER_USER_DATA_PS_13 0x2C19
+#define mmSPI_SHADER_USER_DATA_PS_14 0x2C1A
+#define mmSPI_SHADER_USER_DATA_PS_15 0x2C1B
+#define mmSPI_SHADER_USER_DATA_PS_2 0x2C0E
+#define mmSPI_SHADER_USER_DATA_PS_3 0x2C0F
+#define mmSPI_SHADER_USER_DATA_PS_4 0x2C10
+#define mmSPI_SHADER_USER_DATA_PS_5 0x2C11
+#define mmSPI_SHADER_USER_DATA_PS_6 0x2C12
+#define mmSPI_SHADER_USER_DATA_PS_7 0x2C13
+#define mmSPI_SHADER_USER_DATA_PS_8 0x2C14
+#define mmSPI_SHADER_USER_DATA_PS_9 0x2C15
+#define mmSPI_SHADER_USER_DATA_VS_0 0x2C4C
+#define mmSPI_SHADER_USER_DATA_VS_10 0x2C56
+#define mmSPI_SHADER_USER_DATA_VS_1 0x2C4D
+#define mmSPI_SHADER_USER_DATA_VS_11 0x2C57
+#define mmSPI_SHADER_USER_DATA_VS_12 0x2C58
+#define mmSPI_SHADER_USER_DATA_VS_13 0x2C59
+#define mmSPI_SHADER_USER_DATA_VS_14 0x2C5A
+#define mmSPI_SHADER_USER_DATA_VS_15 0x2C5B
+#define mmSPI_SHADER_USER_DATA_VS_2 0x2C4E
+#define mmSPI_SHADER_USER_DATA_VS_3 0x2C4F
+#define mmSPI_SHADER_USER_DATA_VS_4 0x2C50
+#define mmSPI_SHADER_USER_DATA_VS_5 0x2C51
+#define mmSPI_SHADER_USER_DATA_VS_6 0x2C52
+#define mmSPI_SHADER_USER_DATA_VS_7 0x2C53
+#define mmSPI_SHADER_USER_DATA_VS_8 0x2C54
+#define mmSPI_SHADER_USER_DATA_VS_9 0x2C55
+#define mmSPI_SHADER_Z_FORMAT 0xA1C4
+#define mmSPI_SLAVE_DEBUG_BUSY 0x24D3
+#define mmSPI_SX_EXPORT_BUFFER_SIZES 0x24D9
+#define mmSPI_SX_SCOREBOARD_BUFFER_SIZES 0x24DA
+#define mmSPI_TMPRING_SIZE 0xA1BA
+#define mmSPI_VS_OUT_CONFIG 0xA1B1
+#define mmSQ_ALU_CLK_CTRL 0x2360
+#define mmSQ_BUF_RSRC_WORD0 0x23C0
+#define mmSQ_BUF_RSRC_WORD1 0x23C1
+#define mmSQ_BUF_RSRC_WORD2 0x23C2
+#define mmSQ_BUF_RSRC_WORD3 0x23C3
+#define mmSQC_CACHES 0x2302
+#define mmSQC_CONFIG 0x2301
+#define mmSQ_CONFIG 0x2300
+#define mmSQC_SECDED_CNT 0x23A0
+#define mmSQ_DEBUG_STS_GLOBAL 0x2309
+#define mmSQ_DED_CNT 0x23A2
+#define mmSQ_DED_INFO 0x23A3
+#define mmSQ_DS_0 0x237F
+#define mmSQ_DS_1 0x237F
+#define mmSQ_EXP_0 0x237F
+#define mmSQ_EXP_1 0x237F
+#define mmSQ_FIFO_SIZES 0x2305
+#define mmSQ_IMG_RSRC_WORD0 0x23C4
+#define mmSQ_IMG_RSRC_WORD1 0x23C5
+#define mmSQ_IMG_RSRC_WORD2 0x23C6
+#define mmSQ_IMG_RSRC_WORD3 0x23C7
+#define mmSQ_IMG_RSRC_WORD4 0x23C8
+#define mmSQ_IMG_RSRC_WORD5 0x23C9
+#define mmSQ_IMG_RSRC_WORD6 0x23CA
+#define mmSQ_IMG_RSRC_WORD7 0x23CB
+#define mmSQ_IMG_SAMP_WORD0 0x23CC
+#define mmSQ_IMG_SAMP_WORD1 0x23CD
+#define mmSQ_IMG_SAMP_WORD2 0x23CE
+#define mmSQ_IMG_SAMP_WORD3 0x23CF
+#define mmSQ_IND_CMD 0x237A
+#define mmSQ_IND_DATA 0x2379
+#define mmSQ_IND_INDEX 0x2378
+#define mmSQ_INST 0x237F
+#define mmSQ_LB_CTR_CTRL 0x2398
+#define mmSQ_LB_DATA_ALU_CYCLES 0x2399
+#define mmSQ_LB_DATA_ALU_STALLS 0x239B
+#define mmSQ_LB_DATA_TEX_CYCLES 0x239A
+#define mmSQ_LB_DATA_TEX_STALLS 0x239C
+#define mmSQ_MIMG_0 0x237F
+#define mmSQ_MIMG_1 0x237F
+#define mmSQ_MTBUF_0 0x237F
+#define mmSQ_MTBUF_1 0x237F
+#define mmSQ_MUBUF_0 0x237F
+#define mmSQ_MUBUF_1 0x237F
+#define mmSQ_PERFCOUNTER0_HI 0x2321
+#define mmSQ_PERFCOUNTER0_LO 0x2320
+#define mmSQ_PERFCOUNTER0_SELECT 0x2340
+#define mmSQ_PERFCOUNTER10_HI 0x2335
+#define mmSQ_PERFCOUNTER10_LO 0x2334
+#define mmSQ_PERFCOUNTER10_SELECT 0x234A
+#define mmSQ_PERFCOUNTER11_HI 0x2337
+#define mmSQ_PERFCOUNTER11_LO 0x2336
+#define mmSQ_PERFCOUNTER11_SELECT 0x234B
+#define mmSQ_PERFCOUNTER12_HI 0x2339
+#define mmSQ_PERFCOUNTER12_LO 0x2338
+#define mmSQ_PERFCOUNTER12_SELECT 0x234C
+#define mmSQ_PERFCOUNTER13_HI 0x233B
+#define mmSQ_PERFCOUNTER13_LO 0x233A
+#define mmSQ_PERFCOUNTER13_SELECT 0x234D
+#define mmSQ_PERFCOUNTER14_HI 0x233D
+#define mmSQ_PERFCOUNTER14_LO 0x233C
+#define mmSQ_PERFCOUNTER14_SELECT 0x234E
+#define mmSQ_PERFCOUNTER15_HI 0x233F
+#define mmSQ_PERFCOUNTER15_LO 0x233E
+#define mmSQ_PERFCOUNTER15_SELECT 0x234F
+#define mmSQ_PERFCOUNTER1_HI 0x2323
+#define mmSQ_PERFCOUNTER1_LO 0x2322
+#define mmSQ_PERFCOUNTER1_SELECT 0x2341
+#define mmSQ_PERFCOUNTER2_HI 0x2325
+#define mmSQ_PERFCOUNTER2_LO 0x2324
+#define mmSQ_PERFCOUNTER2_SELECT 0x2342
+#define mmSQ_PERFCOUNTER3_HI 0x2327
+#define mmSQ_PERFCOUNTER3_LO 0x2326
+#define mmSQ_PERFCOUNTER3_SELECT 0x2343
+#define mmSQ_PERFCOUNTER4_HI 0x2329
+#define mmSQ_PERFCOUNTER4_LO 0x2328
+#define mmSQ_PERFCOUNTER4_SELECT 0x2344
+#define mmSQ_PERFCOUNTER5_HI 0x232B
+#define mmSQ_PERFCOUNTER5_LO 0x232A
+#define mmSQ_PERFCOUNTER5_SELECT 0x2345
+#define mmSQ_PERFCOUNTER6_HI 0x232D
+#define mmSQ_PERFCOUNTER6_LO 0x232C
+#define mmSQ_PERFCOUNTER6_SELECT 0x2346
+#define mmSQ_PERFCOUNTER7_HI 0x232F
+#define mmSQ_PERFCOUNTER7_LO 0x232E
+#define mmSQ_PERFCOUNTER7_SELECT 0x2347
+#define mmSQ_PERFCOUNTER8_HI 0x2331
+#define mmSQ_PERFCOUNTER8_LO 0x2330
+#define mmSQ_PERFCOUNTER8_SELECT 0x2348
+#define mmSQ_PERFCOUNTER9_HI 0x2333
+#define mmSQ_PERFCOUNTER9_LO 0x2332
+#define mmSQ_PERFCOUNTER9_SELECT 0x2349
+#define mmSQ_PERFCOUNTER_CTRL 0x2306
+#define mmSQ_POWER_THROTTLE 0x2396
+#define mmSQ_POWER_THROTTLE2 0x2397
+#define mmSQ_RANDOM_WAVE_PRI 0x2303
+#define mmSQ_REG_CREDITS 0x2304
+#define mmSQ_SEC_CNT 0x23A1
+#define mmSQ_SMRD 0x237F
+#define mmSQ_SOP1 0x237F
+#define mmSQ_SOP2 0x237F
+#define mmSQ_SOPC 0x237F
+#define mmSQ_SOPK 0x237F
+#define mmSQ_SOPP 0x237F
+#define mmSQ_TEX_CLK_CTRL 0x2361
+#define mmSQ_THREAD_TRACE_BASE 0x2380
+#define mmSQ_THREAD_TRACE_CNTR 0x2390
+#define mmSQ_THREAD_TRACE_CTRL 0x238F
+#define mmSQ_THREAD_TRACE_HIWATER 0x2392
+#define mmSQ_THREAD_TRACE_MASK 0x2382
+#define mmSQ_THREAD_TRACE_MODE 0x238E
+#define mmSQ_THREAD_TRACE_PERF_MASK 0x2384
+#define mmSQ_THREAD_TRACE_SIZE 0x2381
+#define mmSQ_THREAD_TRACE_STATUS 0x238D
+#define mmSQ_THREAD_TRACE_TOKEN_MASK 0x2383
+#define mmSQ_THREAD_TRACE_USERDATA_0 0x2388
+#define mmSQ_THREAD_TRACE_USERDATA_1 0x2389
+#define mmSQ_THREAD_TRACE_USERDATA_2 0x238A
+#define mmSQ_THREAD_TRACE_USERDATA_3 0x238B
+#define mmSQ_THREAD_TRACE_WORD_CMN 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_EVENT 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_ISSUE 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_MISC 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_PERF_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_PERF_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_REG_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_REG_2_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_TIME 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_WAVE 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_WAVE_START 0x23B0
+#define mmSQ_THREAD_TRACE_WPTR 0x238C
+#define mmSQ_TIME_HI 0x237C
+#define mmSQ_TIME_LO 0x237D
+#define mmSQ_VINTRP 0x237F
+#define mmSQ_VOP1 0x237F
+#define mmSQ_VOP2 0x237F
+#define mmSQ_VOP3_0 0x237F
+#define mmSQ_VOP3_0_SDST_ENC 0x237F
+#define mmSQ_VOP3_1 0x237F
+#define mmSQ_VOPC 0x237F
+#define mmSX_DEBUG_1 0x2418
+#define mmSX_DEBUG_BUSY 0x2414
+#define mmSX_DEBUG_BUSY_2 0x2415
+#define mmSX_DEBUG_BUSY_3 0x2416
+#define mmSX_DEBUG_BUSY_4 0x2417
+#define mmSX_PERFCOUNTER0_HI 0x2421
+#define mmSX_PERFCOUNTER0_LO 0x2420
+#define mmSX_PERFCOUNTER0_SELECT 0x241C
+#define mmSX_PERFCOUNTER1_HI 0x2423
+#define mmSX_PERFCOUNTER1_LO 0x2422
+#define mmSX_PERFCOUNTER1_SELECT 0x241D
+#define mmSX_PERFCOUNTER2_HI 0x2425
+#define mmSX_PERFCOUNTER2_LO 0x2424
+#define mmSX_PERFCOUNTER2_SELECT 0x241E
+#define mmSX_PERFCOUNTER3_HI 0x2427
+#define mmSX_PERFCOUNTER3_LO 0x2426
+#define mmSX_PERFCOUNTER3_SELECT 0x241F
+#define mmTA_BC_BASE_ADDR 0xA020
+#define mmTA_CGTT_CTRL 0x2544
+#define mmTA_CNTL 0x2541
+#define mmTA_CNTL_AUX 0x2542
+#define mmTA_CS_BC_BASE_ADDR 0x2543
+#define mmTA_DEBUG_DATA 0x254D
+#define mmTA_DEBUG_INDEX 0x254C
+#define mmTA_PERFCOUNTER0_HI 0x2556
+#define mmTA_PERFCOUNTER0_LO 0x2555
+#define mmTA_PERFCOUNTER0_SELECT 0x2554
+#define mmTA_PERFCOUNTER1_HI 0x2562
+#define mmTA_PERFCOUNTER1_LO 0x2561
+#define mmTA_PERFCOUNTER1_SELECT 0x2560
+#define mmTA_SCRATCH 0x2564
+#define mmTA_STATUS 0x2548
+#define mmTCA_CGTT_SCLK_CTRL 0x2BC1
+#define mmTCA_CTRL 0x2BC0
+#define mmTCA_PERFCOUNTER0_HI 0x2BD2
+#define mmTCA_PERFCOUNTER0_LO 0x2BD1
+#define mmTCA_PERFCOUNTER0_SELECT 0x2BD0
+#define mmTCA_PERFCOUNTER1_HI 0x2BD5
+#define mmTCA_PERFCOUNTER1_LO 0x2BD4
+#define mmTCA_PERFCOUNTER1_SELECT 0x2BD3
+#define mmTCA_PERFCOUNTER2_HI 0x2BD8
+#define mmTCA_PERFCOUNTER2_LO 0x2BD7
+#define mmTCA_PERFCOUNTER2_SELECT 0x2BD6
+#define mmTCA_PERFCOUNTER3_HI 0x2BDB
+#define mmTCA_PERFCOUNTER3_LO 0x2BDA
+#define mmTCA_PERFCOUNTER3_SELECT 0x2BD9
+#define mmTCC_CGTT_SCLK_CTRL 0x2B81
+#define mmTCC_CTRL 0x2B80
+#define mmTCC_EDC_COUNTER 0x2B82
+#define mmTCC_PERFCOUNTER0_HI 0x2B92
+#define mmTCC_PERFCOUNTER0_LO 0x2B91
+#define mmTCC_PERFCOUNTER0_SELECT 0x2B90
+#define mmTCC_PERFCOUNTER1_HI 0x2B95
+#define mmTCC_PERFCOUNTER1_LO 0x2B94
+#define mmTCC_PERFCOUNTER1_SELECT 0x2B93
+#define mmTCC_PERFCOUNTER2_HI 0x2B98
+#define mmTCC_PERFCOUNTER2_LO 0x2B97
+#define mmTCC_PERFCOUNTER2_SELECT 0x2B96
+#define mmTCC_PERFCOUNTER3_HI 0x2B9B
+#define mmTCC_PERFCOUNTER3_LO 0x2B9A
+#define mmTCC_PERFCOUNTER3_SELECT 0x2B99
+#define mmTCI_CNTL_1 0x2B62
+#define mmTCI_CNTL_2 0x2B63
+#define mmTCI_STATUS 0x2B61
+#define mmTCP_ADDR_CONFIG 0x2B05
+#define mmTCP_BUFFER_ADDR_HASH_CNTL 0x2B16
+#define mmTCP_CHAN_STEER_HI 0x2B04
+#define mmTCP_CHAN_STEER_LO 0x2B03
+#define mmTCP_CNTL 0x2B02
+#define mmTCP_CREDIT 0x2B06
+#define mmTCP_EDC_COUNTER 0x2B17
+#define mmTCP_INVALIDATE 0x2B00
+#define mmTCP_PERFCOUNTER0_HI 0x2B0A
+#define mmTCP_PERFCOUNTER0_LO 0x2B0B
+#define mmTCP_PERFCOUNTER0_SELECT 0x2B09
+#define mmTCP_PERFCOUNTER1_HI 0x2B0D
+#define mmTCP_PERFCOUNTER1_LO 0x2B0E
+#define mmTCP_PERFCOUNTER1_SELECT 0x2B0C
+#define mmTCP_PERFCOUNTER2_HI 0x2B10
+#define mmTCP_PERFCOUNTER2_LO 0x2B11
+#define mmTCP_PERFCOUNTER2_SELECT 0x2B0F
+#define mmTCP_PERFCOUNTER3_HI 0x2B13
+#define mmTCP_PERFCOUNTER3_LO 0x2B14
+#define mmTCP_PERFCOUNTER3_SELECT 0x2B12
+#define mmTCP_STATUS 0x2B01
+#define mmTD_CGTT_CTRL 0x2527
+#define mmTD_CNTL 0x2525
+#define mmTD_DEBUG_DATA 0x2529
+#define mmTD_DEBUG_INDEX 0x2528
+#define mmTD_PERFCOUNTER0_HI 0x252E
+#define mmTD_PERFCOUNTER0_LO 0x252D
+#define mmTD_PERFCOUNTER0_SELECT 0x252C
+#define mmTD_SCRATCH 0x2530
+#define mmTD_STATUS 0x2526
+#define mmUSER_SQC_BANK_DISABLE 0x2308
+#define mmVGT_CACHE_INVALIDATION 0x2231
+#define mmVGT_CNTL_STATUS 0x223C
+#define mmVGT_DEBUG_CNTL 0x2238
+#define mmVGT_DEBUG_DATA 0x2239
+#define mmVGT_DMA_BASE 0xA1FA
+#define mmVGT_DMA_BASE_HI 0xA1F9
+#define mmVGT_DMA_DATA_FIFO_DEPTH 0x222D
+#define mmVGT_DMA_INDEX_TYPE 0xA29F
+#define mmVGT_DMA_MAX_SIZE 0xA29E
+#define mmVGT_DMA_NUM_INSTANCES 0xA2A2
+#define mmVGT_DMA_REQ_FIFO_DEPTH 0x222E
+#define mmVGT_DMA_SIZE 0xA29D
+#define mmVGT_DRAW_INIT_FIFO_DEPTH 0x222F
+#define mmVGT_DRAW_INITIATOR 0xA1FC
+#define mmVGT_ENHANCE 0xA294
+#define mmVGT_ESGS_RING_ITEMSIZE 0xA2AB
+#define mmVGT_ESGS_RING_SIZE 0x2232
+#define mmVGT_ES_PER_GS 0xA296
+#define mmVGT_EVENT_ADDRESS_REG 0xA1FE
+#define mmVGT_EVENT_INITIATOR 0xA2A4
+#define mmVGT_FIFO_DEPTHS 0x2234
+#define mmVGT_GROUP_DECR 0xA28B
+#define mmVGT_GROUP_FIRST_DECR 0xA28A
+#define mmVGT_GROUP_PRIM_TYPE 0xA289
+#define mmVGT_GROUP_VECT_0_CNTL 0xA28C
+#define mmVGT_GROUP_VECT_0_FMT_CNTL 0xA28E
+#define mmVGT_GROUP_VECT_1_CNTL 0xA28D
+#define mmVGT_GROUP_VECT_1_FMT_CNTL 0xA28F
+#define mmVGT_GS_INSTANCE_CNT 0xA2E4
+#define mmVGT_GS_MAX_VERT_OUT 0xA2CE
+#define mmVGT_GS_MODE 0xA290
+#define mmVGT_GS_OUT_PRIM_TYPE 0xA29B
+#define mmVGT_GS_PER_ES 0xA295
+#define mmVGT_GS_PER_VS 0xA297
+#define mmVGT_GS_VERTEX_REUSE 0x2235
+#define mmVGT_GS_VERT_ITEMSIZE 0xA2D7
+#define mmVGT_GS_VERT_ITEMSIZE_1 0xA2D8
+#define mmVGT_GS_VERT_ITEMSIZE_2 0xA2D9
+#define mmVGT_GS_VERT_ITEMSIZE_3 0xA2DA
+#define mmVGT_GSVS_RING_ITEMSIZE 0xA2AC
+#define mmVGT_GSVS_RING_OFFSET_1 0xA298
+#define mmVGT_GSVS_RING_OFFSET_2 0xA299
+#define mmVGT_GSVS_RING_OFFSET_3 0xA29A
+#define mmVGT_GSVS_RING_SIZE 0x2233
+#define mmVGT_HOS_CNTL 0xA285
+#define mmVGT_HOS_MAX_TESS_LEVEL 0xA286
+#define mmVGT_HOS_MIN_TESS_LEVEL 0xA287
+#define mmVGT_HOS_REUSE_DEPTH 0xA288
+#define mmVGT_HS_OFFCHIP_PARAM 0x226C
+#define mmVGT_IMMED_DATA 0xA1FD
+#define mmVGT_INDEX_TYPE 0x2257
+#define mmVGT_INDX_OFFSET 0xA102
+#define mmVGT_INSTANCE_STEP_RATE_0 0xA2A8
+#define mmVGT_INSTANCE_STEP_RATE_1 0xA2A9
+#define mmVGT_LAST_COPY_STATE 0x2230
+#define mmVGT_LS_HS_CONFIG 0xA2D6
+#define mmVGT_MAX_VTX_INDX 0xA100
+#define mmVGT_MC_LAT_CNTL 0x2236
+#define mmVGT_MIN_VTX_INDX 0xA101
+#define mmVGT_MULTI_PRIM_IB_RESET_EN 0xA2A5
+#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0xA103
+#define mmVGT_NUM_INDICES 0x225C
+#define mmVGT_NUM_INSTANCES 0x225D
+#define mmVGT_OUT_DEALLOC_CNTL 0xA317
+#define mmVGT_OUTPUT_PATH_CNTL 0xA284
+#define mmVGT_PERFCOUNTER0_HI 0x224D
+#define mmVGT_PERFCOUNTER0_LO 0x224C
+#define mmVGT_PERFCOUNTER0_SELECT 0x2248
+#define mmVGT_PERFCOUNTER1_HI 0x224F
+#define mmVGT_PERFCOUNTER1_LO 0x224E
+#define mmVGT_PERFCOUNTER1_SELECT 0x2249
+#define mmVGT_PERFCOUNTER2_HI 0x2251
+#define mmVGT_PERFCOUNTER2_LO 0x2250
+#define mmVGT_PERFCOUNTER2_SELECT 0x224A
+#define mmVGT_PERFCOUNTER3_HI 0x2253
+#define mmVGT_PERFCOUNTER3_LO 0x2252
+#define mmVGT_PERFCOUNTER3_SELECT 0x224B
+#define mmVGT_PERFCOUNTER_SEID_MASK 0x2247
+#define mmVGT_PRIMITIVEID_EN 0xA2A1
+#define mmVGT_PRIMITIVEID_RESET 0xA2A3
+#define mmVGT_PRIMITIVE_TYPE 0x2256
+#define mmVGT_REUSE_OFF 0xA2AD
+#define mmVGT_SHADER_STAGES_EN 0xA2D5
+#define mmVGT_STRMOUT_BUFFER_CONFIG 0xA2E6
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_0 0x2258
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_1 0x2259
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_2 0x225A
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_3 0x225B
+#define mmVGT_STRMOUT_BUFFER_OFFSET_0 0xA2B7
+#define mmVGT_STRMOUT_BUFFER_OFFSET_1 0xA2BB
+#define mmVGT_STRMOUT_BUFFER_OFFSET_2 0xA2BF
+#define mmVGT_STRMOUT_BUFFER_OFFSET_3 0xA2C3
+#define mmVGT_STRMOUT_BUFFER_SIZE_0 0xA2B4
+#define mmVGT_STRMOUT_BUFFER_SIZE_1 0xA2B8
+#define mmVGT_STRMOUT_BUFFER_SIZE_2 0xA2BC
+#define mmVGT_STRMOUT_BUFFER_SIZE_3 0xA2C0
+#define mmVGT_STRMOUT_CONFIG 0xA2E5
+#define mmVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0xA2CB
+#define mmVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0xA2CA
+#define mmVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0xA2CC
+#define mmVGT_STRMOUT_VTX_STRIDE_0 0xA2B5
+#define mmVGT_STRMOUT_VTX_STRIDE_1 0xA2B9
+#define mmVGT_STRMOUT_VTX_STRIDE_2 0xA2BD
+#define mmVGT_STRMOUT_VTX_STRIDE_3 0xA2C1
+#define mmVGT_SYS_CONFIG 0x2263
+#define mmVGT_TF_MEMORY_BASE 0x226E
+#define mmVGT_TF_PARAM 0xA2DB
+#define mmVGT_TF_RING_SIZE 0x2262
+#define mmVGT_VERTEX_REUSE_BLOCK_CNTL 0xA316
+#define mmVGT_VTX_CNT_EN 0xA2AE
+#define mmVGT_VTX_VECT_EJECT_REG 0x222C
+
+/* manually added from old sid.h */
+#define mmCB_PERFCOUNTER0_SELECT0 0x2688
+#define mmCB_PERFCOUNTER1_SELECT0 0x268A
+#define mmCB_PERFCOUNTER1_SELECT1 0x268B
+#define mmCB_PERFCOUNTER2_SELECT0 0x268C
+#define mmCB_PERFCOUNTER2_SELECT1 0x268D
+#define mmCB_PERFCOUNTER3_SELECT0 0x268E
+#define mmCB_PERFCOUNTER3_SELECT1 0x268F
+#define mmCP_COHER_CNTL2 0x217A
+#define mmCP_DEBUG 0x307F
+#define mmRLC_SERDES_MASTER_BUSY_0 0x3119
+#define mmRLC_SERDES_MASTER_BUSY_1 0x311A
+#define mmRLC_RL_BASE 0x30C1
+#define mmRLC_RL_SIZE 0x30C2
+#define mmRLC_UCODE_ADDR 0x30CB
+#define mmRLC_UCODE_DATA 0x30CC
+#define mmRLC_GCPM_GENERAL_3 0x311E
+#define mmRLC_SERDES_WR_MASTER_MASK_0 0x3115
+#define mmRLC_SERDES_WR_MASTER_MASK_1 0x3116
+#define mmRLC_TTOP_D 0x3105
+#define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8
+#define mmRLC_PG_AO_CU_MASK 0x310B
+#define mmSPI_STATIC_THREAD_MGMT_3 0x243A
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h
new file mode 100644
index 000000000000..b5e634749665
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h
@@ -0,0 +1,12821 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GFX_6_0_SH_MASK_H
+#define GFX_6_0_SH_MASK_H
+
+#define BCI_DEBUG_READ__DATA_MASK 0x00ffffffL
+#define BCI_DEBUG_READ__DATA__SHIFT 0x00000000
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xffffffffL
+#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x00000000
+#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xffffffffL
+#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x00000000
+#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xffffffffL
+#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x00000000
+#define CB_BLEND_RED__BLEND_RED_MASK 0xffffffffL
+#define CB_BLEND_RED__BLEND_RED__SHIFT 0x00000000
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CB_COLOR0_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR0_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR0_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR0_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR0_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR0_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR0_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR0_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR0_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR0_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR0_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR0_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR0_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR0_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR0_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR0_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR0_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR0_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR0_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR0_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR0_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR0_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR0_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR0_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR0_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR0_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR0_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR0_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR0_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR0_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR0_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR0_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR1_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR1_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR1_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR1_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR1_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR1_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR1_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR1_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR1_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR1_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR1_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR1_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR1_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR1_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR1_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR1_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR1_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR1_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR1_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR1_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR1_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR1_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR1_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR1_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR1_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR1_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR1_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR1_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR1_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR1_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR1_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR1_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR2_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR2_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR2_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR2_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR2_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR2_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR2_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR2_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR2_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR2_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR2_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR2_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR2_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR2_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR2_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR2_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR2_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR2_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR2_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR2_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR2_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR2_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR2_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR2_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR2_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR2_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR2_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR2_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR2_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR2_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR2_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR2_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR3_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR3_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR3_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR3_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR3_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR3_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR3_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR3_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR3_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR3_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR3_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR3_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR3_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR3_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR3_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR3_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR3_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR3_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR3_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR3_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR3_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR3_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR3_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR3_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR3_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR3_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR3_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR3_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR3_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR3_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR3_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR3_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR4_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR4_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR4_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR4_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR4_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR4_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR4_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR4_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR4_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR4_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR4_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR4_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR4_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR4_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR4_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR4_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR4_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR4_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR4_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR4_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR4_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR4_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR4_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR4_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR4_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR4_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR4_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR4_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR4_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR4_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR4_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR4_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR5_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR5_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR5_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR5_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR5_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR5_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR5_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR5_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR5_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR5_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR5_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR5_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR5_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR5_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR5_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR5_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR5_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR5_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR5_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR5_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR5_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR5_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR5_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR5_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR5_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR5_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR5_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR5_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR5_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR5_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR5_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR5_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR6_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR6_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR6_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR6_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR6_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR6_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR6_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR6_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR6_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR6_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR6_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR6_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR6_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR6_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR6_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR6_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR6_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR6_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR6_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR6_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR6_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR6_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR6_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR6_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR6_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR6_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR6_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR6_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR6_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR6_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR6_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR6_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR7_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR7_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR7_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR7_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR7_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR7_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR7_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR7_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR7_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR7_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR7_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR7_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR7_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR7_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR7_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR7_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR7_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR7_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR7_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR7_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR7_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR7_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR7_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR7_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR7_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR7_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR7_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR7_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR7_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR7_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR7_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR7_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x00000003
+#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
+#define CB_COLOR_CONTROL__MODE__SHIFT 0x00000004
+#define CB_COLOR_CONTROL__ROP3_MASK 0x00ff0000L
+#define CB_COLOR_CONTROL__ROP3__SHIFT 0x00000010
+#define CB_DEBUG_BUS_13__AC_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_13__AC_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_13__CACHE_CTRL_BUSY_MASK 0x00000020L
+#define CB_DEBUG_BUS_13__CACHE_CTRL_BUSY__SHIFT 0x00000005
+#define CB_DEBUG_BUS_13__CRW_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_13__CRW_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_13__EVICT_PENDING_MASK 0x00000200L
+#define CB_DEBUG_BUS_13__EVICT_PENDING__SHIFT 0x00000009
+#define CB_DEBUG_BUS_13__FC_RD_PENDING_MASK 0x00000100L
+#define CB_DEBUG_BUS_13__FC_RD_PENDING__SHIFT 0x00000008
+#define CB_DEBUG_BUS_13__FC_WR_PENDING_MASK 0x00000080L
+#define CB_DEBUG_BUS_13__FC_WR_PENDING__SHIFT 0x00000007
+#define CB_DEBUG_BUS_13__LAST_RD_ARB_WINNER_MASK 0x00000400L
+#define CB_DEBUG_BUS_13__LAST_RD_ARB_WINNER__SHIFT 0x0000000a
+#define CB_DEBUG_BUS_13__MC_WR_PENDING_MASK 0x00000040L
+#define CB_DEBUG_BUS_13__MC_WR_PENDING__SHIFT 0x00000006
+#define CB_DEBUG_BUS_13__MU_BUSY_MASK 0x00000002L
+#define CB_DEBUG_BUS_13__MU_BUSY__SHIFT 0x00000001
+#define CB_DEBUG_BUS_13__MU_STATE_MASK 0x0007f800L
+#define CB_DEBUG_BUS_13__MU_STATE__SHIFT 0x0000000b
+#define CB_DEBUG_BUS_13__TILE_INTFC_BUSY_MASK 0x00000001L
+#define CB_DEBUG_BUS_13__TILE_INTFC_BUSY__SHIFT 0x00000000
+#define CB_DEBUG_BUS_13__TQ_BUSY_MASK 0x00000004L
+#define CB_DEBUG_BUS_13__TQ_BUSY__SHIFT 0x00000002
+#define CB_DEBUG_BUS_14__ADDR_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_14__ADDR_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_14__CACHE_CTL_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_14__CACHE_CTL_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_14__CLEAR_BUSY_MASK 0x00000100L
+#define CB_DEBUG_BUS_14__CLEAR_BUSY__SHIFT 0x00000008
+#define CB_DEBUG_BUS_14__FOP_BUSY_MASK 0x00000002L
+#define CB_DEBUG_BUS_14__FOP_BUSY__SHIFT 0x00000001
+#define CB_DEBUG_BUS_14__LAT_BUSY_MASK 0x00000004L
+#define CB_DEBUG_BUS_14__LAT_BUSY__SHIFT 0x00000002
+#define CB_DEBUG_BUS_14__MERGE_BUSY_MASK 0x00000020L
+#define CB_DEBUG_BUS_14__MERGE_BUSY__SHIFT 0x00000005
+#define CB_DEBUG_BUS_14__QUAD_BUSY_MASK 0x00000040L
+#define CB_DEBUG_BUS_14__QUAD_BUSY__SHIFT 0x00000006
+#define CB_DEBUG_BUS_14__TILE_BUSY_MASK 0x00000080L
+#define CB_DEBUG_BUS_14__TILE_BUSY__SHIFT 0x00000007
+#define CB_DEBUG_BUS_14__TILE_RETIREMENT_BUSY_MASK 0x00000001L
+#define CB_DEBUG_BUS_14__TILE_RETIREMENT_BUSY__SHIFT 0x00000000
+#define CB_DEBUG_BUS_15__CS_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_15__CS_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_15__DS_BUSY_MASK 0x00000040L
+#define CB_DEBUG_BUS_15__DS_BUSY__SHIFT 0x00000006
+#define CB_DEBUG_BUS_15__IB_BUSY_MASK 0x00000100L
+#define CB_DEBUG_BUS_15__IB_BUSY__SHIFT 0x00000008
+#define CB_DEBUG_BUS_15__RB_BUSY_MASK 0x00000020L
+#define CB_DEBUG_BUS_15__RB_BUSY__SHIFT 0x00000005
+#define CB_DEBUG_BUS_15__SF_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_15__SF_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_15__SURF_SYNC_START_MASK 0x00000004L
+#define CB_DEBUG_BUS_15__SURF_SYNC_START__SHIFT 0x00000002
+#define CB_DEBUG_BUS_15__SURF_SYNC_STATE_MASK 0x00000003L
+#define CB_DEBUG_BUS_15__SURF_SYNC_STATE__SHIFT 0x00000000
+#define CB_DEBUG_BUS_15__TB_BUSY_MASK 0x00000080L
+#define CB_DEBUG_BUS_15__TB_BUSY__SHIFT 0x00000007
+#define CB_DEBUG_BUS_16__CC_WRREQ_FIFO_EMPTY_MASK 0x00100000L
+#define CB_DEBUG_BUS_16__CC_WRREQ_FIFO_EMPTY__SHIFT 0x00000014
+#define CB_DEBUG_BUS_16__CM_WRREQ_FIFO_EMPTY_MASK 0x00400000L
+#define CB_DEBUG_BUS_16__CM_WRREQ_FIFO_EMPTY__SHIFT 0x00000016
+#define CB_DEBUG_BUS_16__FC_WRREQ_FIFO_EMPTY_MASK 0x00200000L
+#define CB_DEBUG_BUS_16__FC_WRREQ_FIFO_EMPTY__SHIFT 0x00000015
+#define CB_DEBUG_BUS_16__LAST_RD_GRANT_VEC_MASK 0x000003c0L
+#define CB_DEBUG_BUS_16__LAST_RD_GRANT_VEC__SHIFT 0x00000006
+#define CB_DEBUG_BUS_16__LAST_WR_GRANT_VEC_MASK 0x000f0000L
+#define CB_DEBUG_BUS_16__LAST_WR_GRANT_VEC__SHIFT 0x00000010
+#define CB_DEBUG_BUS_16__MC_RDREQ_CREDITS_MASK 0x0000003fL
+#define CB_DEBUG_BUS_16__MC_RDREQ_CREDITS__SHIFT 0x00000000
+#define CB_DEBUG_BUS_16__MC_WRREQ_CREDITS_MASK 0x0000fc00L
+#define CB_DEBUG_BUS_16__MC_WRREQ_CREDITS__SHIFT 0x0000000a
+#define CB_DEBUG_BUS_17__BB_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_17__BB_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_17__CC_BUSY_MASK 0x00000004L
+#define CB_DEBUG_BUS_17__CC_BUSY__SHIFT 0x00000002
+#define CB_DEBUG_BUS_17__CM_BUSY_MASK 0x00000001L
+#define CB_DEBUG_BUS_17__CM_BUSY__SHIFT 0x00000000
+#define CB_DEBUG_BUS_17__CORE_SCLK_VLD_MASK 0x00000020L
+#define CB_DEBUG_BUS_17__CORE_SCLK_VLD__SHIFT 0x00000005
+#define CB_DEBUG_BUS_17__FC_BUSY_MASK 0x00000002L
+#define CB_DEBUG_BUS_17__FC_BUSY__SHIFT 0x00000001
+#define CB_DEBUG_BUS_17__MA_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_17__MA_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_17__REG_SCLK0_VLD_MASK 0x00000080L
+#define CB_DEBUG_BUS_17__REG_SCLK0_VLD__SHIFT 0x00000007
+#define CB_DEBUG_BUS_17__REG_SCLK1_VLD_MASK 0x00000040L
+#define CB_DEBUG_BUS_17__REG_SCLK1_VLD__SHIFT 0x00000006
+#define CB_DEBUG_BUS_18__NOT_USED_MASK 0x00ffffffL
+#define CB_DEBUG_BUS_18__NOT_USED__SHIFT 0x00000000
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0001f800L
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0x0000000b
+#define CB_HW_CONTROL_1__CHICKEN_BITS_MASK 0xfc000000L
+#define CB_HW_CONTROL_1__CHICKEN_BITS__SHIFT 0x0000001a
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS_MASK 0x0000001fL
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS__SHIFT 0x00000000
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH_MASK 0x03fe0000L
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH__SHIFT 0x00000011
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS_MASK 0x000007e0L
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS__SHIFT 0x00000005
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH_MASK 0x000000ffL
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH__SHIFT 0x00000000
+#define CB_HW_CONTROL_2__CHICKEN_BITS_MASK 0xff800000L
+#define CB_HW_CONTROL_2__CHICKEN_BITS__SHIFT 0x00000017
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH_MASK 0x007f8000L
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH__SHIFT 0x0000000f
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH_MASK 0x00007f00L
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH__SHIFT 0x00000008
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL__SHIFT 0x00000000
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00010000L
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x00000010
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT_MASK 0x0000f000L
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT__SHIFT 0x0000000c
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT_MASK 0x0000000fL
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT__SHIFT 0x00000000
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x00000019
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x0000001a
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x00000018
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x00000015
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x0000001b
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x0000001e
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK_MASK 0x00400000L
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK__SHIFT 0x00000016
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING_MASK 0x00040000L
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING__SHIFT 0x00000012
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x0000001f
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG_MASK 0x00800000L
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG__SHIFT 0x00000017
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT_MASK 0x000003c0L
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT__SHIFT 0x00000006
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE_MASK 0x00100000L
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE__SHIFT 0x00000014
+#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
+#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x00000013
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT_MASK 0x20000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT__SHIFT 0x0000001d
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT_MASK 0x10000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT__SHIFT 0x0000001c
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001ffL
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007fc00L
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000fL
+#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x00000000
+#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000f0L
+#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x00000004
+#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000f00L
+#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x00000008
+#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000f000L
+#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0x0000000c
+#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000f0000L
+#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x00000010
+#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00f00000L
+#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x00000014
+#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0f000000L
+#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x00000018
+#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xf0000000L
+#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x0000001c
+#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000fL
+#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x00000000
+#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000f0L
+#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x00000004
+#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000f00L
+#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x00000008
+#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000f000L
+#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0x0000000c
+#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000f0000L
+#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x00000010
+#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00f00000L
+#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x00000014
+#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0f000000L
+#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x00000018
+#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xf0000000L
+#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x0000001c
+#define CC_GC_SHADER_ARRAY_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define CC_GC_SHADER_ARRAY_CONFIG__DPFP_RATE__SHIFT 0x00000001
+#define CC_GC_SHADER_ARRAY_CONFIG__HALF_LDS_MASK 0x00000010L
+#define CC_GC_SHADER_ARRAY_CONFIG__HALF_LDS__SHIFT 0x00000004
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xffff0000L
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x00000010
+#define CC_GC_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define CC_GC_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x00000003
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000fL
+#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x00000000
+#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000f0L
+#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x00000004
+#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000f00L
+#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x00000008
+#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000f000L
+#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0x0000000c
+#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000f0000L
+#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x00000010
+#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00f00000L
+#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x00000014
+#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0f000000L
+#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x00000018
+#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xf0000000L
+#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x0000001c
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0x0000000c
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x00000014
+#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000f00L
+#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x00000008
+#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000f0000L
+#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x00000010
+#define CC_SQC_BANK_DISABLE__SQC0_BANK_DISABLE_MASK 0x000f0000L
+#define CC_SQC_BANK_DISABLE__SQC0_BANK_DISABLE__SHIFT 0x00000010
+#define CC_SQC_BANK_DISABLE__SQC1_BANK_DISABLE_MASK 0x00f00000L
+#define CC_SQC_BANK_DISABLE__SQC1_BANK_DISABLE__SHIFT 0x00000014
+#define CC_SQC_BANK_DISABLE__SQC2_BANK_DISABLE_MASK 0x0f000000L
+#define CC_SQC_BANK_DISABLE__SQC2_BANK_DISABLE__SHIFT 0x00000018
+#define CC_SQC_BANK_DISABLE__SQC3_BANK_DISABLE_MASK 0xf0000000L
+#define CC_SQC_BANK_DISABLE__SQC3_BANK_DISABLE__SHIFT 0x0000001c
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x00001f00L
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x00000008
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000001fL
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x00000000
+#define CGTS_RD_REG__READ_DATA_MASK 0x00003fffL
+#define CGTS_RD_REG__READ_DATA__SHIFT 0x00000000
+#define CGTS_SM_CTRL_REG__BASE_MODE_MASK 0x00010000L
+#define CGTS_SM_CTRL_REG__BASE_MODE__SHIFT 0x00000010
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK 0x00400000L
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE__SHIFT 0x00000016
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED_MASK 0x00001000L
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED__SHIFT 0x0000000c
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY_MASK 0x00000ff0L
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY__SHIFT 0x00000004
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK 0x00800000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN__SHIFT 0x00000017
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK 0xff000000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT 0x00000018
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY_MASK 0x0000000fL
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY__SHIFT 0x00000000
+#define CGTS_SM_CTRL_REG__OVERRIDE_MASK 0x00200000L
+#define CGTS_SM_CTRL_REG__OVERRIDE__SHIFT 0x00000015
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK 0x00100000L
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE__SHIFT 0x00000014
+#define CGTS_SM_CTRL_REG__SM_MODE_MASK 0x000e0000L
+#define CGTS_SM_CTRL_REG__SM_MODE__SHIFT 0x00000011
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xffff0000L
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x00000010
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xffff0000L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x00000010
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x0000001c
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x0000001b
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x0000001a
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x00000019
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x00000018
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x00fff000L
+#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0x0000000c
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x0000001e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x0000001f
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE_MASK 0x04000000L
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE__SHIFT 0x0000001a
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x00000019
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_PC_CLK_CTRL__BACK_CLK_ON_OVERRIDE_MASK 0x02000000L
+#define CGTT_PC_CLK_CTRL__BACK_CLK_ON_OVERRIDE__SHIFT 0x00000019
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x0000001c
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x0000001b
+#define CGTT_PC_CLK_CTRL__FRONT_CLK_ON_OVERRIDE_MASK 0x04000000L
+#define CGTT_PC_CLK_CTRL__FRONT_CLK_ON_OVERRIDE__SHIFT 0x0000001a
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00fc0000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x00000012
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x00000018
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_RLC_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_RLC_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x0000001e
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x0000001f
+#define CGTT_SC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SC_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SC_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x0000001a
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x0000001c
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x0000001b
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00fc0000L
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x00000012
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x00000018
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_TCI_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_TCI_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_TCP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_TCP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_TCP_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_TCP_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE_MASK 0x04000000L
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE__SHIFT 0x0000001a
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0x00000019
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG00__ALWAYS_ZERO_MASK 0x000000ffL
+#define CLIPPER_DEBUG_REG00__ALWAYS_ZERO__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_empty_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_empty__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_fifo_empty_MASK 0x00200000L
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_fifo_empty__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_full_MASK 0x00400000L
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_full__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG00__clip_ga_bc_fifo_write_MASK 0x00000100L
+#define CLIPPER_DEBUG_REG00__clip_ga_bc_fifo_write__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_full_MASK 0x00001000L
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_full__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_write_MASK 0x00000800L
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_write__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_empty_MASK 0x00008000L
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_empty__SHIFT 0x0000000f
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_full_MASK 0x00010000L
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_full__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_write_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_write__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_empty_MASK 0x00002000L
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_empty__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_full_MASK 0x00004000L
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_full__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG00__su_clip_baryc_free_MASK 0x00000600L
+#define CLIPPER_DEBUG_REG00__su_clip_baryc_free__SHIFT 0x00000009
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_empty_MASK 0x00020000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_empty__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_full_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_full__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_write_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_write__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_empty_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_empty__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_full_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_full__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_empty_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_empty__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_full_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_full__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_empty_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_empty__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_full_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_full__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_write_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_write__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG01__ALWAYS_ZERO_MASK 0x000000ffL
+#define CLIPPER_DEBUG_REG01__ALWAYS_ZERO__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG01__clip_extra_bc_valid_MASK 0x00000700L
+#define CLIPPER_DEBUG_REG01__clip_extra_bc_valid__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG01__clip_ga_bc_fifo_write_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG01__clip_ga_bc_fifo_write__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG01__clip_to_ga_fifo_write_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG01__clip_to_ga_fifo_write__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_deallocate_slot_MASK 0x000e0000L
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_deallocate_slot__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_null_primitive_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_null_primitive__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_vertex_deallocate_MASK 0x0001c000L
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_vertex_deallocate__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG01__clip_vert_vte_valid_MASK 0x00003800L
+#define CLIPPER_DEBUG_REG01__clip_vert_vte_valid__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_advanceread_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_advanceread__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_empty_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_empty__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_extra_bc_valid_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_extra_bc_valid__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vertex_store_indx_MASK 0x0c000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vertex_store_indx__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vte_naninf_kill_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vte_naninf_kill__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_0_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_1_MASK 0x00400000L
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_1__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_2_MASK 0x00200000L
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_2__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG02__clip_extra_bc_valid_MASK 0x00000007L
+#define CLIPPER_DEBUG_REG02__clip_extra_bc_valid__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_full_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_full__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_write_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_write__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_extra_bc_coords_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_extra_bc_coords__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_vte_naninf_kill_MASK 0x00200000L
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_vte_naninf_kill__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_full_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_full__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_write_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_write__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clipped_prim_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clipped_prim__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clip_seq_indx_MASK 0x000000c0L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clip_seq_indx__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_end_of_packet_MASK 0x00400000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_end_of_packet__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_advanceread_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_advanceread__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_empty_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_empty__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_first_prim_of_slot_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_first_prim_of_slot__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_null_primitive_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_null_primitive__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_0_MASK 0x000f0000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_0__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_1_MASK 0x0000f000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_1__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_2_MASK 0x00000f00L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_2__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG02__clip_vert_vte_valid_MASK 0x00000038L
+#define CLIPPER_DEBUG_REG02__clip_vert_vte_valid__SHIFT 0x00000003
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_primitive_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_primitive__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_to_outsm_cnt_MASK 0x00f00000L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_event_MASK 0x00000008L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_event__SHIFT 0x00000003
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_prim_valid_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_prim_valid__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG11__clipsm0_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG11__clipsm0_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_primitive_MASK 0x00000040L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_primitive__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_to_outsm_cnt_MASK 0x000f0000L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_event_MASK 0x00000004L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_event__SHIFT 0x00000002
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_prim_valid_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_prim_valid__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG11__clipsm1_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG11__clipsm1_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_primitive_MASK 0x00000020L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_primitive__SHIFT 0x00000005
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_to_outsm_cnt_MASK 0x0000f000L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_event_MASK 0x00000002L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_event__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_prim_valid_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_prim_valid__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG11__clipsm2_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG11__clipsm2_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_primitive_MASK 0x00000010L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_primitive__SHIFT 0x00000004
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_to_outsm_cnt_MASK 0x00000f00L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_event_MASK 0x00000001L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_event__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_prim_valid_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_prim_valid__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG11__clipsm3_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG11__clipsm3_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG12__ALWAYS_ZERO_MASK 0x000000ffL
+#define CLIPPER_DEBUG_REG12__ALWAYS_ZERO__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG12__clip_priority_available_clip_verts_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG12__clip_priority_available_clip_verts__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG12__clip_priority_available_vte_out_clip_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG12__clip_priority_available_vte_out_clip__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_load_MASK 0x00c00000L
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_load__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_out_MASK 0x000c0000L
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_out__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_vert_MASK 0x00300000L
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_vert__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_clip_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_clip_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_clip_primitive_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_clip_primitive__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_prim_valid_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_clip_primitive_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_clip_primitive__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_prim_valid_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_clip_primitive_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_clip_primitive__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_prim_valid_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG13__ccgen_to_clipcc_fifo_empty_MASK 0x00010000L
+#define CLIPPER_DEBUG_REG13__ccgen_to_clipcc_fifo_empty__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG13__clipcc_vertex_store_indx_MASK 0x00003000L
+#define CLIPPER_DEBUG_REG13__clipcc_vertex_store_indx__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG13__clipcode_fifo_fifo_empty_MASK 0x00008000L
+#define CLIPPER_DEBUG_REG13__clipcode_fifo_fifo_empty__SHIFT 0x0000000f
+#define CLIPPER_DEBUG_REG13__clip_priority_seq_indx_out_cnt_MASK 0x001e0000L
+#define CLIPPER_DEBUG_REG13__clip_priority_seq_indx_out_cnt__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG13__clprim_clip_primitive_MASK 0x00000020L
+#define CLIPPER_DEBUG_REG13__clprim_clip_primitive__SHIFT 0x00000005
+#define CLIPPER_DEBUG_REG13__clprim_cull_primitive_MASK 0x00000040L
+#define CLIPPER_DEBUG_REG13__clprim_cull_primitive__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG13__clprim_in_back_state_var_indx_MASK 0x00000007L
+#define CLIPPER_DEBUG_REG13__clprim_in_back_state_var_indx__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_advanceread_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_advanceread__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_contents_MASK 0x1f000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_contents__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_full_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_full__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_write_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_write__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_clipsm_wait_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_clipsm_wait__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_orig_vertices_MASK 0x00600000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_orig_vertices__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG13__point_clip_candidate_MASK 0x00000008L
+#define CLIPPER_DEBUG_REG13__point_clip_candidate__SHIFT 0x00000003
+#define CLIPPER_DEBUG_REG13__prim_back_valid_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG13__prim_back_valid__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG13__prim_nan_kill_MASK 0x00000010L
+#define CLIPPER_DEBUG_REG13__prim_nan_kill__SHIFT 0x00000004
+#define CLIPPER_DEBUG_REG13__vertval_bits_vertex_cc_next_valid_MASK 0x00000f00L
+#define CLIPPER_DEBUG_REG13__vertval_bits_vertex_cc_next_valid__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG13__vte_out_orig_fifo_fifo_empty_MASK 0x00004000L
+#define CLIPPER_DEBUG_REG13__vte_out_orig_fifo_fifo_empty__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG14__clprim_in_back_deallocate_slot_MASK 0x00e00000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_deallocate_slot__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG14__clprim_in_back_end_of_packet_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_end_of_packet__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event_id_MASK 0x3f000000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event_id__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG14__clprim_in_back_first_prim_of_slot_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_first_prim_of_slot__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_0_MASK 0x0003f000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_0__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_1_MASK 0x00000fc0L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_1__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_2_MASK 0x0000003fL
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_2__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG14__outputclprimtoclip_null_primitive_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG14__outputclprimtoclip_null_primitive__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG14__prim_back_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG14__prim_back_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_0_MASK 0x7c000000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_0__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_1_MASK 0x03e00000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_1__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_2_MASK 0x001f0000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_2__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG15__vertval_bits_vertex_vertex_store_msb_MASK 0x0000ffffL
+#define CLIPPER_DEBUG_REG15__vertval_bits_vertex_vertex_store_msb__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG16__sm0_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG16__sm0_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG16__sm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG16__sm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG16__sm0_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG16__sm0_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG16__sm0_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG16__sm0_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG16__sm0_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG16__sm0_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG16__sm0_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG16__sm0_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG16__sm0_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG16__sm0_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG16__sm0_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG16__sm0_vertex_clip_cnt__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG17__sm1_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG17__sm1_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG17__sm1_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG17__sm1_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG17__sm1_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG17__sm1_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG17__sm1_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG17__sm1_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG17__sm1_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG17__sm1_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG17__sm1_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG17__sm1_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG17__sm1_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG17__sm1_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG17__sm1_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG17__sm1_vertex_clip_cnt__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG18__sm2_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG18__sm2_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG18__sm2_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG18__sm2_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG18__sm2_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG18__sm2_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG18__sm2_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG18__sm2_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG18__sm2_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG18__sm2_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG18__sm2_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG18__sm2_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG18__sm2_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG18__sm2_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG18__sm2_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG18__sm2_vertex_clip_cnt__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG19__sm3_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG19__sm3_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG19__sm3_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG19__sm3_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG19__sm3_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG19__sm3_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG19__sm3_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG19__sm3_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG19__sm3_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG19__sm3_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG19__sm3_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG19__sm3_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG19__sm3_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG19__sm3_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG19__sm3_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG19__sm3_vertex_clip_cnt__SHIFT 0x0000000d
+#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x00000000
+#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x00000000
+#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x00000000
+#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x00000000
+#define COMPUTE_DIM_X__SIZE_MASK 0xffffffffL
+#define COMPUTE_DIM_X__SIZE__SHIFT 0x00000000
+#define COMPUTE_DIM_Y__SIZE_MASK 0xffffffffL
+#define COMPUTE_DIM_Y__SIZE__SHIFT 0x00000000
+#define COMPUTE_DIM_Z__SIZE_MASK 0xffffffffL
+#define COMPUTE_DIM_Z__SIZE__SHIFT 0x00000000
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x00000000
+#define COMPUTE_DISPATCH_INITIATOR__DATA_ATC_MASK 0x00001000L
+#define COMPUTE_DISPATCH_INITIATOR__DATA_ATC__SHIFT 0x0000000c
+#define COMPUTE_DISPATCH_INITIATOR__DISPATCH_CACHE_CNTL_MASK 0x00000380L
+#define COMPUTE_DISPATCH_INITIATOR__DISPATCH_CACHE_CNTL__SHIFT 0x00000007
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x00000002
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x00000003
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x00000004
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x00000006
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x00000001
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0x0000000e
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0x0000000a
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x00000005
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0x0000000b
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000ffffL
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x00000000
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xffff0000L
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x00000010
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000ffffL
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x00000000
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xffff0000L
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x00000010
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000ffffL
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x00000000
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xffff0000L
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x00000010
+#define COMPUTE_PGM_HI__DATA_MASK 0x000000ffL
+#define COMPUTE_PGM_HI__DATA__SHIFT 0x00000000
+#define COMPUTE_PGM_HI__INST_ATC_MASK 0x00000100L
+#define COMPUTE_PGM_HI__INST_ATC__SHIFT 0x00000008
+#define COMPUTE_PGM_LO__DATA_MASK 0xffffffffL
+#define COMPUTE_PGM_LO__DATA__SHIFT 0x00000000
+#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
+#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x00000018
+#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x02000000L
+#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x00000019
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x00400000L
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x00000016
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x00000015
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000ff000L
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0x0000000c
+#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
+#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x00000017
+#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000c00L
+#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0x0000000a
+#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
+#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x00000014
+#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003c0L
+#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x00000006
+#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003fL
+#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x00000000
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7f000000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0x0000000d
+#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x00000018
+#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00ff8000L
+#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0x0000000f
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x00000000
+#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
+#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x00000007
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x00000008
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x00000009
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0x0000000a
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0x0000000b
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x00000006
+#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003eL
+#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x00000001
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x00000018
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x00000017
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003f0000L
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x00000010
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x00000016
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000f000L
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0x0000000c
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x0000003fL
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x00000000
+#define COMPUTE_START_X__START_MASK 0xffffffffL
+#define COMPUTE_START_X__START__SHIFT 0x00000000
+#define COMPUTE_START_Y__START_MASK 0xffffffffL
+#define COMPUTE_START_Y__START__SHIFT 0x00000000
+#define COMPUTE_START_Z__START_MASK 0xffffffffL
+#define COMPUTE_START_Z__START__SHIFT 0x00000000
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN_MASK 0x0000ffffL
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN__SHIFT 0x00000000
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN_MASK 0xffff0000L
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN__SHIFT 0x00000010
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN_MASK 0x0000ffffL
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN__SHIFT 0x00000000
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN_MASK 0xffff0000L
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN__SHIFT 0x00000010
+#define COMPUTE_TBA_HI__DATA_MASK 0x000000ffL
+#define COMPUTE_TBA_HI__DATA__SHIFT 0x00000000
+#define COMPUTE_TBA_LO__DATA_MASK 0xffffffffL
+#define COMPUTE_TBA_LO__DATA__SHIFT 0x00000000
+#define COMPUTE_TMA_HI__DATA_MASK 0x000000ffL
+#define COMPUTE_TMA_HI__DATA__SHIFT 0x00000000
+#define COMPUTE_TMA_LO__DATA_MASK 0xffffffffL
+#define COMPUTE_TMA_LO__DATA__SHIFT 0x00000000
+#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x01fff000L
+#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0x0000000c
+#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000fffL
+#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_0__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_10__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_11__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_12__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_13__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_14__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_15__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_1__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_2__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_3__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_4__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_5__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_6__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_7__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_8__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_9__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x00000000
+#define COMPUTE_VMID__DATA_MASK 0x0000000fL
+#define COMPUTE_VMID__DATA__SHIFT 0x00000000
+#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xe0000000L
+#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x0000001d
+#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00030000L
+#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x00000010
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x000000ffL
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x00000000
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xfffffffcL
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x00000002
+#define CP_APPEND_DATA__DATA_MASK 0xffffffffL
+#define CP_APPEND_DATA__DATA__SHIFT 0x00000000
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE_MASK 0xffffffffL
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE__SHIFT 0x00000000
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE_MASK 0xffffffffL
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE__SHIFT 0x00000000
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xffffffffL
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x00000000
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xffffffffL
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x00000000
+#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
+#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x00000016
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x00000006
+#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
+#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x00000012
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0x0000000f
+#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
+#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x00000011
+#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
+#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x00000008
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x00000007
+#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
+#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x00000014
+#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
+#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x00000015
+#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
+#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0x0000000a
+#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
+#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x00000009
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x00000000
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0x0000000c
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0x0000000d
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0x0000000e
+#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
+#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x00000013
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP_MASK 0xffffffffL
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP__SHIFT 0x00000000
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI_MASK 0x000000ffL
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x00000000
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO_MASK 0xfffffffcL
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x00000002
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000fffffL
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x00000000
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI_MASK 0x000000ffL
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x00000000
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO_MASK 0xfffffffcL
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x00000002
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000fffffL
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x00000000
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI_MASK 0x000000ffL
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI__SHIFT 0x00000000
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO_MASK 0xffffffe0L
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO__SHIFT 0x00000005
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ_MASK 0x00000fffL
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ__SHIFT 0x00000000
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1_MASK 0x07ff0000L
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1__SHIFT 0x00000010
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING_MASK 0x000007ffL
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING__SHIFT 0x00000000
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2_MASK 0x000007ffL
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2__SHIFT 0x00000000
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1_MASK 0x000003ffL
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1__SHIFT 0x00000000
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1_MASK 0x03ff0000L
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1__SHIFT 0x00000010
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2_MASK 0x000003ffL
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2__SHIFT 0x00000000
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2_MASK 0x03ff0000L
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2__SHIFT 0x00000010
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY_MASK 0x000003ffL
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY__SHIFT 0x00000000
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY_MASK 0x03ff0000L
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY__SHIFT 0x00000010
+#define CP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000fffL
+#define CP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x00000000
+#define CP_CE_UCODE_DATA__UCODE_DATA_MASK 0xffffffffL
+#define CP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x00000000
+#define CP_CMD_DATA__CMD_DATA_MASK 0xffffffffL
+#define CP_CMD_DATA__CMD_DATA__SHIFT 0x00000000
+#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007ffL
+#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x00000000
+#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
+#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0x0000000c
+#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00030000L
+#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x00000010
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0ff00000L
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x00000014
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000ffL
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x00000000
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x0000001c
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x00000008
+#define CP_COHER_BASE__COHER_BASE_256B_MASK 0xffffffffL
+#define CP_COHER_BASE__COHER_BASE_256B__SHIFT 0x00000000
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000ffL
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x00000000
+#define CP_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
+#define CP_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x00000006
+#define CP_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
+#define CP_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x00000007
+#define CP_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
+#define CP_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x00000008
+#define CP_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
+#define CP_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x00000009
+#define CP_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
+#define CP_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0x0000000a
+#define CP_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
+#define CP_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0x0000000b
+#define CP_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
+#define CP_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0x0000000c
+#define CP_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
+#define CP_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0x0000000d
+#define CP_COHER_CNTL__CB_ACTION_ENA_MASK 0x02000000L
+#define CP_COHER_CNTL__CB_ACTION_ENA__SHIFT 0x00000019
+#define CP_COHER_CNTL__DB_ACTION_ENA_MASK 0x04000000L
+#define CP_COHER_CNTL__DB_ACTION_ENA__SHIFT 0x0000001a
+#define CP_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
+#define CP_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0x0000000e
+#define CP_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
+#define CP_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x00000000
+#define CP_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
+#define CP_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x00000001
+#define CP_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
+#define CP_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x00000013
+#define CP_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
+#define CP_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x00000015
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA_MASK 0x20000000L
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA__SHIFT 0x0000001d
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA_MASK 0x08000000L
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA__SHIFT 0x0000001b
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA_MASK 0x10000000L
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA__SHIFT 0x0000001c
+#define CP_COHER_CNTL__TC_ACTION_ENA_MASK 0x00800000L
+#define CP_COHER_CNTL__TC_ACTION_ENA__SHIFT 0x00000017
+#define CP_COHER_CNTL__TCL1_ACTION_ENA_MASK 0x00400000L
+#define CP_COHER_CNTL__TCL1_ACTION_ENA__SHIFT 0x00000016
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA_MASK 0x00008000L
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA__SHIFT 0x0000000f
+#define CP_COHER_CNTL__TC_VOL_ACTION_ENA_MASK 0x00010000L
+#define CP_COHER_CNTL__TC_VOL_ACTION_ENA__SHIFT 0x00000010
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA_MASK 0x00040000L
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA__SHIFT 0x00000012
+#define CP_COHER_SIZE__COHER_SIZE_256B_MASK 0xffffffffL
+#define CP_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x00000000
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000ffL
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x00000000
+#define CP_COHER_START_DELAY__START_DELAY_COUNT_MASK 0x0000003fL
+#define CP_COHER_START_DELAY__START_DELAY_COUNT__SHIFT 0x00000000
+#define CP_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000ffL
+#define CP_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x00000000
+#define CP_COHER_STATUS__MEID_MASK 0x03000000L
+#define CP_COHER_STATUS__MEID__SHIFT 0x00000018
+#define CP_COHER_STATUS__PHASE1_STATUS_MASK 0x40000000L
+#define CP_COHER_STATUS__PHASE1_STATUS__SHIFT 0x0000001e
+#define CP_COHER_STATUS__STATUS_MASK 0x80000000L
+#define CP_COHER_STATUS__STATUS__SHIFT 0x0000001f
+#define CP_CSF_CNTL__FETCH_BUFFER_DEPTH_MASK 0x0000000fL
+#define CP_CSF_CNTL__FETCH_BUFFER_DEPTH__SHIFT 0x00000000
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x00003f00L
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x00000008
+#define CP_CSF_STAT__BUFFER_SLOTS_ALLOCATED_MASK 0x0000000fL
+#define CP_CSF_STAT__BUFFER_SLOTS_ALLOCATED__SHIFT 0x00000000
+#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x000f0000L
+#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x00000010
+#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
+#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x00000004
+#define CP_DMA_CNTL__PIO_COUNT_MASK 0xc0000000L
+#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x0000001e
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x0000001c
+#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
+#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x0000001d
+#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x001fffffL
+#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x00000000
+#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x0000001d
+#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x0000001b
+#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x00200000L
+#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x00000015
+#define CP_DMA_ME_COMMAND__DST_SWAP_MASK 0x03000000L
+#define CP_DMA_ME_COMMAND__DST_SWAP__SHIFT 0x00000018
+#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x0000001e
+#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x0000001c
+#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x0000001a
+#define CP_DMA_ME_COMMAND__SRC_SWAP_MASK 0x00c00000L
+#define CP_DMA_ME_COMMAND__SRC_SWAP__SHIFT 0x00000016
+#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xffffffffL
+#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x00000000
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xffffffffL
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x00000000
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x001fffffL
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x00000000
+#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x0000001d
+#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x0000001b
+#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x00200000L
+#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x00000015
+#define CP_DMA_PFP_COMMAND__DST_SWAP_MASK 0x03000000L
+#define CP_DMA_PFP_COMMAND__DST_SWAP__SHIFT 0x00000018
+#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x0000001e
+#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x0000001c
+#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x0000001a
+#define CP_DMA_PFP_COMMAND__SRC_SWAP_MASK 0x00c00000L
+#define CP_DMA_PFP_COMMAND__SRC_SWAP__SHIFT 0x00000016
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xffffffffL
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x00000000
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xffffffffL
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x00000000
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03ffffffL
+#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x00000000
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x0000001c
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING0__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE_RING0__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING0__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE_RING0__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__VMID__SHIFT 0x00000010
+#define CP_ECC_FIRSTOCCURRENCE_RING1__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE_RING1__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING1__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE_RING1__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__VMID__SHIFT 0x00000010
+#define CP_ECC_FIRSTOCCURRENCE_RING2__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE_RING2__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING2__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE_RING2__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__VMID__SHIFT 0x00000010
+#define CP_ECC_FIRSTOCCURRENCE__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x00000010
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000ffffL
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x00000000
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xfffffffcL
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x00000002
+#define CP_EOP_DONE_ADDR_LO__ADDR_SWAP_MASK 0x00000003L
+#define CP_EOP_DONE_ADDR_LO__ADDR_SWAP__SHIFT 0x00000000
+#define CP_EOP_DONE_DATA_CNTL__CNTX_ID_MASK 0x0000ffffL
+#define CP_EOP_DONE_DATA_CNTL__CNTX_ID__SHIFT 0x00000000
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xe0000000L
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x0000001d
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x00000010
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x00000018
+#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xffffffffL
+#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x00000000
+#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xffffffffL
+#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x00000000
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xffffffffL
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x00000000
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xffffffffL
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x00000000
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xffffffffL
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x00000000
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xffffffffL
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x00000000
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xffffffffL
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x00000000
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xffffffffL
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x00000000
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003f00L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x00000008
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003fL
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003f0000L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x00000010
+#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x00000000
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x000000ffL
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x00000000
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xfffffffcL
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x00000002
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000fffffL
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x00000000
+#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000fffffL
+#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x00000000
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000fffffL
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x00000000
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000fffffL
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x00000000
+#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x000000ffL
+#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x00000000
+#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xfffffffcL
+#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x00000002
+#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000fffffL
+#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x00000000
+#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000fffffL
+#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x00000000
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000fffffL
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x00000000
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000fffffL
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x00000000
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED_MASK 0x00080000L
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED__SHIFT 0x00000013
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED_MASK 0x00100000L
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED__SHIFT 0x00000014
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0x0000000e
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x0000001f
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x0000001e
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x0000001d
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x00000018
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x00000016
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x00000017
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x0000001b
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x0000001a
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x00000011
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING0__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING0__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_MC_PACK_DELAY_CNT__PACK_DELAY_CNT_MASK 0x0000001fL
+#define CP_MC_PACK_DELAY_CNT__PACK_DELAY_CNT__SHIFT 0x00000000
+#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
+#define CP_ME_CNTL__CE_HALT__SHIFT 0x00000018
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x00000004
+#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
+#define CP_ME_CNTL__CE_STEP__SHIFT 0x00000019
+#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
+#define CP_ME_CNTL__ME_HALT__SHIFT 0x0000001c
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x00000008
+#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
+#define CP_ME_CNTL__ME_STEP__SHIFT 0x0000001d
+#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
+#define CP_ME_CNTL__PFP_HALT__SHIFT 0x0000001a
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x00000006
+#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
+#define CP_ME_CNTL__PFP_STEP__SHIFT 0x0000001b
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xffffffffL
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x00000000
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x000000ffL
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x00000000
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xfffffffcL
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x00000002
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_SWAP_MASK 0x00000003L
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_SWAP__SHIFT 0x00000000
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x000000ffL
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x00000000
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xfffffffcL
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x00000002
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_SWAP_MASK 0x00000003L
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_SWAP__SHIFT 0x00000000
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xffffffffL
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x00000000
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xffffffffL
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x00000000
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN_MASK 0x00000002L
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN__SHIFT 0x00000001
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK 0x00000001L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN__SHIFT 0x00000000
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY_MASK 0x00ff0000L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY__SHIFT 0x00000010
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY_MASK 0x0000ff00L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY__SHIFT 0x00000008
+#define CP_MEM_SLP_CNTL__RESERVED1_MASK 0xff000000L
+#define CP_MEM_SLP_CNTL__RESERVED1__SHIFT 0x00000018
+#define CP_MEM_SLP_CNTL__RESERVED_MASK 0x000000fcL
+#define CP_MEM_SLP_CNTL__RESERVED__SHIFT 0x00000002
+#define CP_ME_PREEMPTION__ME_CNTXSW_PREEMPTION_MASK 0x00000001L
+#define CP_ME_PREEMPTION__ME_CNTXSW_PREEMPTION__SHIFT 0x00000000
+#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003ffL
+#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x00000000
+#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003ffL
+#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x00000000
+#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03ff0000L
+#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x00000010
+#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000ffL
+#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x00000000
+#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000ff00L
+#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x00000008
+#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xffffffffL
+#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x00000000
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x00000fffL
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x00000000
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x00000fffL
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO__SHIFT 0x00000000
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xffffffffL
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x00000000
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xffffffffL
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x00000000
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x00000008
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0x0000000a
+#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000fL
+#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x00000000
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000f0L
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x00000004
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x0000001f
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xffffffffL
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x00000000
+#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x00000001L
+#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x00000000
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x00000001
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x00000000
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x00000018
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x00000010
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN_MASK 0x00008000L
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN__SHIFT 0x0000000f
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00000fffL
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x00000000
+#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xffffffffL
+#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x00000000
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0xffffffffL
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x00000000
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xfffffffcL
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x00000002
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_SWAP_MASK 0x00000003L
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_SWAP__SHIFT 0x00000000
+#define CP_PWR_CNTL__GFX_CLK_HALT_MASK 0x00000001L
+#define CP_PWR_CNTL__GFX_CLK_HALT__SHIFT 0x00000000
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START_MASK 0x0000003fL
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT 0x00000000
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START_MASK 0x00003f00L
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT 0x00000008
+#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000ffL
+#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x00000000
+#define CP_RB0_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB0_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB0_CNTL__BUF_SWAP_MASK 0x00030000L
+#define CP_RB0_CNTL__BUF_SWAP__SHIFT 0x00000010
+#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB0_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB0_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB0_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB0_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB0_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB0_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000ffL
+#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x00000000
+#define CP_RB1_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB1_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB1_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB1_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB1_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB1_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB1_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB1_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RB2_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB2_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB2_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB2_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB2_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB2_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB2_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB2_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB2_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB2_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB2_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB2_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB2_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB2_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB2_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB2_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB2_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB2_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB2_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB2_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RB_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB_CNTL__BUF_SWAP_MASK 0x00030000L
+#define CP_RB_CNTL__BUF_SWAP__SHIFT 0x00000010
+#define CP_RB_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000fffffL
+#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x00000000
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000fffffL
+#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x00000000
+#define CP_RB_VMID__RB0_VMID_MASK 0x0000000fL
+#define CP_RB_VMID__RB0_VMID__SHIFT 0x00000000
+#define CP_RB_VMID__RB1_VMID_MASK 0x00000f00L
+#define CP_RB_VMID__RB1_VMID__SHIFT 0x00000008
+#define CP_RB_VMID__RB2_VMID_MASK 0x000f0000L
+#define CP_RB_VMID__RB2_VMID__SHIFT 0x00000010
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xf0000000L
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x0000001c
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0fffffffL
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x00000000
+#define CP_RB_WPTR_POLL_ADDR_HI__OBSOLETE_MASK 0x000000ffL
+#define CP_RB_WPTR_POLL_ADDR_HI__OBSOLETE__SHIFT 0x00000000
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x000000ffL
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x00000000
+#define CP_RB_WPTR_POLL_ADDR_LO__OBSOLETE_MASK 0xfffffffcL
+#define CP_RB_WPTR_POLL_ADDR_LO__OBSOLETE__SHIFT 0x00000002
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xfffffffcL
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x00000002
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xffff0000L
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x00000010
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000ffffL
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x00000000
+#define CP_RB_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
+#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x00000000
+#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
+#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x00000000
+#define CP_RING2_PRIORITY__PRIORITY_MASK 0x00000003L
+#define CP_RING2_PRIORITY__PRIORITY__SHIFT 0x00000000
+#define CP_RINGID__RINGID_MASK 0x00000003L
+#define CP_RINGID__RINGID__SHIFT 0x00000000
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000ffL
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x00000000
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000ff00L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x00000008
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00ff0000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x00000010
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xff000000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x00000018
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x00ff0000L
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0x00000010
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0xff000000L
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x00000018
+#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000000ffL
+#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x00000000
+#define CP_ROQ1_THRESHOLDS__RB2_START_MASK 0x0000ff00L
+#define CP_ROQ1_THRESHOLDS__RB2_START__SHIFT 0x00000008
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x000007ffL
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x00000000
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x0000ff00L
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x00000008
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x00ff0000L
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0x00000010
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START_MASK 0x000000ffL
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START__SHIFT 0x00000000
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START_MASK 0xff000000L
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START__SHIFT 0x00000018
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x07ff0000L
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x00000010
+#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x000007ffL
+#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x00000000
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x000003ffL
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x00000000
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x03ff0000L
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x00000010
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x000003ffL
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x00000000
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x03ff0000L
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x00000010
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x000003ffL
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x00000000
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x03ff0000L
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x00000010
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x00000000
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x00000000
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x00000000
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x00000000
+#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xffffffffL
+#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x00000000
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000000ffL
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x00000000
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xffffffffL
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x00000000
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x000000ffL
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x00000000
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x00000018
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xe0000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x0000001d
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x00000014
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x00000010
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xfffffff8L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x00000003
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x00000000
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0x0000000a
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0x0000000b
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x0000000d
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x0000000c
+#define CP_STALLED_STAT1__ME_WAITING_ON_MC_READ_DATA_MASK 0x00004000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_MC_READ_DATA__SHIFT 0x0000000e
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0x0000000f
+#define CP_STALLED_STAT1__MIU_WAITING_ON_RDREQ_FREE_MASK 0x00010000L
+#define CP_STALLED_STAT1__MIU_WAITING_ON_RDREQ_FREE__SHIFT 0x00000010
+#define CP_STALLED_STAT1__MIU_WAITING_ON_WRREQ_FREE_MASK 0x00020000L
+#define CP_STALLED_STAT1__MIU_WAITING_ON_WRREQ_FREE__SHIFT 0x00000011
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x00000000
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_MASK 0x00000010L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV__SHIFT 0x00000004
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV__SHIFT 0x00000002
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x0000001c
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x0000001c
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x0000001b
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x0000001a
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x00000017
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x00000018
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x00000019
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x0000001c
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x00000019
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x0000001a
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x0000001d
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x0000001b
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE_MASK 0x00200000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE__SHIFT 0x00000015
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM_MASK 0x00400000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM__SHIFT 0x00000016
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0x0000000b
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x00000010
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0x0000000c
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0x0000000d
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0x0000000e
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x00000012
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0x0000000f
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0x0000000a
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x00000009
+#define CP_STALLED_STAT2__PFP_MIU_READ_PENDING_MASK 0x00000040L
+#define CP_STALLED_STAT2__PFP_MIU_READ_PENDING__SHIFT 0x00000006
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x00000005
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x00000014
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x00000013
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x00000000
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x00000001
+#define CP_STALLED_STAT2__PFP_TO_MIU_WRITE_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT2__PFP_TO_MIU_WRITE_NOT_RDY_TO_RCV__SHIFT 0x00000007
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x00000002
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x00000004
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x00000008
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x00000018
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x00000011
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x00000017
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x0000001f
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x0000001e
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x00000000
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x00000006
+#define CP_STALLED_STAT3__CE_TO_MIU_WRITE_NOT_RDY_TO_RCV_MASK 0x00000100L
+#define CP_STALLED_STAT3__CE_TO_MIU_WRITE_NOT_RDY_TO_RCV__SHIFT 0x00000008
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x00000004
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x00000001
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x00000003
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x00000005
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x00000007
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0x0000000a
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0x0000000b
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x00000002
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0x0000000c
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0x0000000d
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0x0000000e
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0x0000000f
+#define CP_STAT__CE_BUSY_MASK 0x04000000L
+#define CP_STAT__CE_BUSY__SHIFT 0x0000001a
+#define CP_STAT__CP_BUSY_MASK 0x80000000L
+#define CP_STAT__CP_BUSY__SHIFT 0x0000001f
+#define CP_STAT__CPC_CPG_BUSY_MASK 0x02000000L
+#define CP_STAT__CPC_CPG_BUSY__SHIFT 0x00000019
+#define CP_STAT__DC_BUSY_MASK 0x00002000L
+#define CP_STAT__DC_BUSY__SHIFT 0x0000000d
+#define CP_STAT__DMA_BUSY_MASK 0x00400000L
+#define CP_STAT__DMA_BUSY__SHIFT 0x00000016
+#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
+#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x00000014
+#define CP_STAT__ME_BUSY_MASK 0x00020000L
+#define CP_STAT__ME_BUSY__SHIFT 0x00000011
+#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
+#define CP_STAT__MEQ_BUSY__SHIFT 0x00000010
+#define CP_STAT__MIU_RDREQ_BUSY_MASK 0x00000080L
+#define CP_STAT__MIU_RDREQ_BUSY__SHIFT 0x00000007
+#define CP_STAT__MIU_WRREQ_BUSY_MASK 0x00000100L
+#define CP_STAT__MIU_WRREQ_BUSY__SHIFT 0x00000008
+#define CP_STAT__PFP_BUSY_MASK 0x00008000L
+#define CP_STAT__PFP_BUSY__SHIFT 0x0000000f
+#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
+#define CP_STAT__QUERY_BUSY__SHIFT 0x00000012
+#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
+#define CP_STAT__RCIU_BUSY__SHIFT 0x00000017
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x0000001d
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x0000001e
+#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
+#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x0000001c
+#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0x0000000a
+#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0x0000000b
+#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
+#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x00000009
+#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
+#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0x0000000c
+#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
+#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x00000018
+#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
+#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x00000013
+#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
+#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x00000015
+#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
+#define CP_STAT__TCIU_BUSY__SHIFT 0x0000001b
+#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x000000ffL
+#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x00000000
+#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xfffffffcL
+#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x00000002
+#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000fffffL
+#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x00000000
+#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001ffL
+#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x00000000
+#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003ffL
+#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x00000000
+#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000ffL
+#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x00000000
+#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000ff00L
+#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x00000008
+#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00ff0000L
+#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x00000010
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI_MASK 0xffffffffL
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI__SHIFT 0x00000000
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO_MASK 0xfffffffcL
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO__SHIFT 0x00000002
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_SWAP_MASK 0x00000003L
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_SWAP__SHIFT 0x00000000
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE_MASK 0x00000001L
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE__SHIFT 0x00000000
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000ffffL
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x00000000
+#define CP_VMID_PREEMPT__PREEMPT_STATUS_MASK 0xffff0000L
+#define CP_VMID_PREEMPT__PREEMPT_STATUS__SHIFT 0x00000010
+#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000ffffL
+#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x00000000
+#define CP_VMID_RESET__RESET_STATUS_MASK 0xffff0000L
+#define CP_VMID_RESET__RESET_STATUS__SHIFT 0x00000010
+#define CP_VMID__VMID_MASK 0x0000000fL
+#define CP_VMID__VMID__SHIFT 0x00000000
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xffffffffL
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x00000000
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x000000ffL
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x00000000
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x00000018
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xe0000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x0000001d
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x00000014
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x00000010
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xfffffff8L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x00000003
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x00000000
+#define CS_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define CS_COPY_STATE__SRC_STATE_ID__SHIFT 0x00000000
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x00000000
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x00000008
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000c00L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0x0000000a
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0x0000000c
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000c000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0x0000000e
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x00000010
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x00000004
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000fL
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x00000000
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x00fff000L
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0x0000000c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00f00000L
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x00000014
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x00000001
+#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
+#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x00000004
+#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000f0000L
+#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x00000010
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0f000000L
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x00000018
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xf0000000L
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x0000001c
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000f000L
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0x0000000c
+#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000f00L
+#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x00000008
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE_MASK 0x00000001L
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE__SHIFT 0x00000000
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001c00L
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0x0000000a
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS_MASK 0x7f000000L
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS__SHIFT 0x00000018
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003e0L
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x00000005
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001fL
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x00000000
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x00000000
+#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003e00L
+#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x00000009
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x00000012
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES_MASK 0x00010000L
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES__SHIFT 0x00000010
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x00000011
+#define DB_DEBUG2__DISABLE_PREZL_CB_STALL_REZ_MASK 0x00000100L
+#define DB_DEBUG2__DISABLE_PREZL_CB_STALL_REZ__SHIFT 0x00000008
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_MASK 0x00000020L
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_REZ_MASK 0x00000080L
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_REZ__SHIFT 0x00000007
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL__SHIFT 0x00000005
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x0000001d
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x00000013
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x80000000L
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x0000001f
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x00000002
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x00000001
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER_MASK 0x00004000L
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER__SHIFT 0x0000000e
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x0000001f
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x00000004
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x00000003
+#define DB_DEBUG2__ENABLE_PREZL_CB_STALL_MASK 0x00000040L
+#define DB_DEBUG2__ENABLE_PREZL_CB_STALL__SHIFT 0x00000006
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x0000001c
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING_MASK 0x00008000L
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING__SHIFT 0x0000000f
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION_MASK 0x00020000L
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION__SHIFT 0x00000011
+#define DB_DEBUG3__DB_EXTRA_DEBUG3_MASK 0xfc000000L
+#define DB_DEBUG3__DB_EXTRA_DEBUG3__SHIFT 0x0000001a
+#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
+#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x00000019
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x00000018
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x00000004
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00200000L
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x00000015
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00004000L
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0x0000000e
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00010000L
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0x00000010
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00008000L
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0x0000000f
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING_MASK 0x00080000L
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING__SHIFT 0x00000013
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING_MASK 0x00002000L
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING__SHIFT 0x0000000d
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x0000001b
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x00000017
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000800L
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0x0000000b
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT_MASK 0x00000400L
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT__SHIFT 0x0000000a
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS_MASK 0x00000080L
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS__SHIFT 0x00000007
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00100000L
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x00000014
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x00000003
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x00000008
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND__SHIFT 0x0000001d
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x0000001c
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x0000001a
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00001000L
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0x0000000c
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00400000L
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x00000016
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00800000L
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x00000017
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x00000005
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x00000006
+#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
+#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x00000002
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00040000L
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x00000012
+#define DB_DEBUG4__DB_EXTRA_DEBUG4_MASK 0xffffffc0L
+#define DB_DEBUG4__DB_EXTRA_DEBUG4__SHIFT 0x00000006
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000020L
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x00000005
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x00000001
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x00000000
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x00000002
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x00000001
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0x0000000f
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0x0000000e
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x00000006
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x00000013
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000c00L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0x0000000a
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0x0000000c
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x00000008
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x00000007
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x00000010
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x00000000
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0f000000L
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x00000018
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x00000012
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x0000001e
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x0000001f
+#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
+#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x00000011
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x00000017
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x00000003
+#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
+#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x00000002
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x0000001d
+#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
+#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x00000004
+#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
+#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x00000015
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x0000001c
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x00000016
+#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xffffffffL
+#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x00000000
+#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xffffffffL
+#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x00000000
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xffffffffL
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x00000000
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x00000007
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x00000003
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x0000001f
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x0000001e
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x00000000
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x00000014
+#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
+#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x00000008
+#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
+#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x00000001
+#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
+#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x00000004
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x00000002
+#define DB_DEPTH_INFO__ADDR5_SWIZZLE_MASK_MASK 0x0000000fL
+#define DB_DEPTH_INFO__ADDR5_SWIZZLE_MASK__SHIFT 0x00000000
+#define DB_DEPTH_INFO__ARRAY_MODE_MASK 0x000000f0L
+#define DB_DEPTH_INFO__ARRAY_MODE__SHIFT 0x00000004
+#define DB_DEPTH_INFO__BANK_HEIGHT_MASK 0x00018000L
+#define DB_DEPTH_INFO__BANK_HEIGHT__SHIFT 0x0000000f
+#define DB_DEPTH_INFO__BANK_WIDTH_MASK 0x00006000L
+#define DB_DEPTH_INFO__BANK_WIDTH__SHIFT 0x0000000d
+#define DB_DEPTH_INFO__MACRO_TILE_ASPECT_MASK 0x00060000L
+#define DB_DEPTH_INFO__MACRO_TILE_ASPECT__SHIFT 0x00000011
+#define DB_DEPTH_INFO__NUM_BANKS_MASK 0x00180000L
+#define DB_DEPTH_INFO__NUM_BANKS__SHIFT 0x00000013
+#define DB_DEPTH_INFO__PIPE_CONFIG_MASK 0x00001f00L
+#define DB_DEPTH_INFO__PIPE_CONFIG__SHIFT 0x00000008
+#define DB_DEPTH_SIZE__HEIGHT_TILE_MAX_MASK 0x003ff800L
+#define DB_DEPTH_SIZE__HEIGHT_TILE_MAX__SHIFT 0x0000000b
+#define DB_DEPTH_SIZE__PITCH_TILE_MAX_MASK 0x000007ffL
+#define DB_DEPTH_SIZE__PITCH_TILE_MAX__SHIFT 0x00000000
+#define DB_DEPTH_SLICE__SLICE_TILE_MAX_MASK 0x003fffffL
+#define DB_DEPTH_SLICE__SLICE_TILE_MAX__SHIFT 0x00000000
+#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007ffL
+#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x00000000
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x00000019
+#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
+#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x00000018
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x00000015
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0x0000000c
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x0000001b
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x00000010
+#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
+#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x00000011
+#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
+#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x00000012
+#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
+#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x00000013
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x00000008
+#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
+#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x00000000
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x00000018
+#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
+#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x00000004
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x00000014
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH_MASK 0x1fe00000L
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x00000015
+#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x0000fc00L
+#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0x0000000a
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH_MASK 0x0000001fL
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH__SHIFT 0x00000000
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH_MASK 0x000003e0L
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH__SHIFT 0x00000005
+#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0x001f0000L
+#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x00000010
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000ffL
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x00000000
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x00007f00L
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x00000008
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01ff8000L
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0x0000000f
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xfe000000L
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x00000019
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x0000007fL
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x00000000
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0x01e00000L
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x00000015
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x00003f80L
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x00000007
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x001fc000L
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0x0000000e
+#define DB_FREE_CACHELINES__QUAD_READ_REQS_MASK 0xfe000000L
+#define DB_FREE_CACHELINES__QUAD_READ_REQS__SHIFT 0x00000019
+#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x00000010
+#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
+#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x00000001
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN_MASK 0x00000004L
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN__SHIFT 0x00000002
+#define DB_HTILE_SURFACE__LINEAR_MASK 0x00000001L
+#define DB_HTILE_SURFACE__LINEAR__SHIFT 0x00000000
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT_MASK 0x0000fc00L
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT__SHIFT 0x0000000a
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH_MASK 0x000003f0L
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH__SHIFT 0x00000004
+#define DB_HTILE_SURFACE__PRELOAD_MASK 0x00000008L
+#define DB_HTILE_SURFACE__PRELOAD__SHIFT 0x00000003
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00ff0000L
+#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x00000010
+#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xff000000L
+#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x00000018
+#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000ffL
+#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x00000000
+#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000ff00L
+#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x00000008
+#define DB_READ_DEBUG_0__BUSY_DATA0_MASK 0xffffffffL
+#define DB_READ_DEBUG_0__BUSY_DATA0__SHIFT 0x00000000
+#define DB_READ_DEBUG_1__BUSY_DATA1_MASK 0xffffffffL
+#define DB_READ_DEBUG_1__BUSY_DATA1__SHIFT 0x00000000
+#define DB_READ_DEBUG_2__BUSY_DATA2_MASK 0xffffffffL
+#define DB_READ_DEBUG_2__BUSY_DATA2__SHIFT 0x00000000
+#define DB_READ_DEBUG_3__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_3__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_4__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_4__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_5__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_5__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_6__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_6__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_7__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_7__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_8__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_8__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_9__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_9__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_A__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_A__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_B__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_B__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_C__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_C__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_D__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_D__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_E__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_E__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_F__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_F__DEBUG_DATA__SHIFT 0x00000000
+#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
+#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x00000007
+#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000f00L
+#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x00000008
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x00000000
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x00000006
+#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
+#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x00000002
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x00000004
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x00000001
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x00000005
+#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
+#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x00000003
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x00000008
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0x0000000a
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x00000007
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x00000017
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x00000009
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x00000006
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x00000005
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001c0000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x00000012
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0x0000000f
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0x0000000c
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x00000000
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001cL
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x00000002
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x00000016
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x00000015
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0x0000000b
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x00000012
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x0000001a
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x00000010
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x00000008
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x00000007
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0x0000000a
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0x0000000d
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000cL
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x00000002
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x00000004
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x00000000
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT_MASK 0x00008000L
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT__SHIFT 0x0000000f
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x00000006
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x0000001c
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0x0000000c
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x0000001e
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x0000001b
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x00000013
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0x0000000b
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x0000001d
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x00000011
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03e00000L
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x00000015
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x00000009
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x0000001f
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0x0000000b
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0x0000000d
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x00000007
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0x0000000c
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x00000009
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0x0000000a
+#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
+#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x00000006
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x00000008
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x00000002
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x00000001
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x00000000
+#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
+#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x00000004
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x00000000
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000ff000L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0x0000000c
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000ff0L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x00000004
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x00000018
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x00000000
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000ff000L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0x0000000c
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000ff0L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x00000004
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x00000018
+#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000ffL
+#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x00000000
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000f000L
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0x0000000c
+#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000fL
+#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x00000000
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00f00000L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x00000014
+#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000f00L
+#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x00000008
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000f0000L
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x00000010
+#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000f0L
+#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x00000004
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x0000001b
+#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
+#define DB_STENCIL_INFO__FORMAT__SHIFT 0x00000000
+#define DB_STENCIL_INFO__TILE_MODE_INDEX_MASK 0x00700000L
+#define DB_STENCIL_INFO__TILE_MODE_INDEX__SHIFT 0x00000014
+#define DB_STENCIL_INFO__TILE_SPLIT_MASK 0x0000e000L
+#define DB_STENCIL_INFO__TILE_SPLIT__SHIFT 0x0000000d
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x0000001d
+#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000ff00L
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x00000008
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xff000000L
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x00000018
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000ffL
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x00000000
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00ff0000L
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x00000010
+#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000ff00L
+#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x00000008
+#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xff000000L
+#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x00000018
+#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000ffL
+#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x00000000
+#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00ff0000L
+#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x00000010
+#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
+#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x00000010
+#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000c0000L
+#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x00000012
+#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
+#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x00000000
+#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000cL
+#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x00000002
+#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
+#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x00000004
+#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000c0L
+#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x00000006
+#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
+#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x00000008
+#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000c00L
+#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0x0000000a
+#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
+#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0x0000000c
+#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000c000L
+#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0x0000000e
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE_MASK 0x40000000L
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE__SHIFT 0x0000001e
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD_MASK 0x80000000L
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD__SHIFT 0x0000001f
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0x07f00000L
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x00000014
+#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x000007e0L
+#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x00000005
+#define DB_WATERMARKS__DEPTH_FREE_MASK 0x0000001fL
+#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x00000000
+#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x000f8000L
+#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0x0000000f
+#define DB_WATERMARKS__EARLY_Z_PANIC_DISABLE_MASK 0x08000000L
+#define DB_WATERMARKS__EARLY_Z_PANIC_DISABLE__SHIFT 0x0000001b
+#define DB_WATERMARKS__FORCE_SUMMARIZE_MASK 0x00007800L
+#define DB_WATERMARKS__FORCE_SUMMARIZE__SHIFT 0x0000000b
+#define DB_WATERMARKS__LATE_Z_PANIC_DISABLE_MASK 0x10000000L
+#define DB_WATERMARKS__LATE_Z_PANIC_DISABLE__SHIFT 0x0000001c
+#define DB_WATERMARKS__RE_Z_PANIC_DISABLE_MASK 0x20000000L
+#define DB_WATERMARKS__RE_Z_PANIC_DISABLE__SHIFT 0x0000001d
+#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x0000001b
+#define DB_Z_INFO__FORMAT_MASK 0x00000003L
+#define DB_Z_INFO__FORMAT__SHIFT 0x00000000
+#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000cL
+#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x00000002
+#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
+#define DB_Z_INFO__READ_SIZE__SHIFT 0x0000001c
+#define DB_Z_INFO__TILE_MODE_INDEX_MASK 0x00700000L
+#define DB_Z_INFO__TILE_MODE_INDEX__SHIFT 0x00000014
+#define DB_Z_INFO__TILE_SPLIT_MASK 0x0000e000L
+#define DB_Z_INFO__TILE_SPLIT__SHIFT 0x0000000d
+#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
+#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x0000001d
+#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
+#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x0000001f
+#define DB_ZPASS_COUNT_HI__COUNT_HI_MASK 0x7fffffffL
+#define DB_ZPASS_COUNT_HI__COUNT_HI__SHIFT 0x00000000
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW_MASK 0xffffffffL
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW__SHIFT 0x00000000
+#define DB_Z_READ_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x00000000
+#define DEBUG_DATA__DEBUG_DATA_MASK 0xffffffffL
+#define DEBUG_DATA__DEBUG_DATA__SHIFT 0x00000000
+#define DEBUG_INDEX__DEBUG_INDEX_MASK 0x0003ffffL
+#define DEBUG_INDEX__DEBUG_INDEX__SHIFT 0x00000000
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define GB_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define GB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define GB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define GB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xffffffffL
+#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x00000000
+#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
+#define GB_EDC_MODE__BYPASS__SHIFT 0x0000001f
+#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define GB_EDC_MODE__DED_MODE__SHIFT 0x00000014
+#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00010000L
+#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0x00000010
+#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define GB_EDC_MODE__PROP_FED__SHIFT 0x0000001d
+#define GB_GPU_ID__GPU_ID_MASK 0x0000000fL
+#define GB_GPU_ID__GPU_ID__SHIFT 0x00000000
+#define GB_TILE_MODE0__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE0__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE0__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE0__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE0__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE0__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE0__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE10__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE10__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE10__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE10__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE10__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE10__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE10__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE10__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE11__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE11__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE11__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE11__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE11__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE11__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE11__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE11__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE12__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE12__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE12__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE12__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE12__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE12__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE12__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE12__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE13__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE13__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE13__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE13__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE13__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE13__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE13__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE13__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE14__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE14__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE14__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE14__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE14__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE14__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE14__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE14__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE15__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE15__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE15__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE15__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE15__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE15__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE15__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE15__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE16__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE16__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE16__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE16__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE16__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE16__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE16__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE16__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE17__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE17__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE17__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE17__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE17__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE17__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE17__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE17__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE18__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE18__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE18__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE18__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE18__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE18__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE18__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE18__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE19__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE19__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE19__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE19__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE19__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE19__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE19__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE19__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE1__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE1__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE1__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE1__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE1__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE1__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE1__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE1__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE20__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE20__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE20__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE20__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE20__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE20__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE20__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE20__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE21__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE21__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE21__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE21__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE21__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE21__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE21__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE21__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE22__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE22__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE22__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE22__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE22__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE22__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE22__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE22__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE23__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE23__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE23__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE23__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE23__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE23__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE23__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE23__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE24__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE24__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE24__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE24__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE24__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE24__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE24__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE24__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE25__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE25__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE25__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE25__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE25__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE25__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE25__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE25__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE26__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE26__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE26__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE26__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE26__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE26__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE26__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE26__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE27__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE27__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE27__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE27__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE27__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE27__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE27__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE27__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE28__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE28__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE28__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE28__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE28__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE28__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE28__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE28__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE29__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE29__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE29__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE29__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE29__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE29__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE29__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE29__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE2__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE2__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE2__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE2__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE2__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE2__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE2__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE2__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE30__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE30__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE30__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE30__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE30__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE30__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE30__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE30__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE31__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE31__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE31__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE31__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE31__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE31__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE31__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE31__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE3__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE3__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE3__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE3__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE3__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE3__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE3__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE3__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE4__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE4__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE4__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE4__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE4__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE4__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE4__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE4__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE5__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE5__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE5__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE5__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE5__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE5__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE5__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE5__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE6__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE6__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE6__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE6__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE6__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE6__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE6__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE6__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE7__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE7__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE7__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE7__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE7__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE7__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE7__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE7__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE8__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE8__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE8__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE8__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE8__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE8__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE8__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE8__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE9__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE9__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE9__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE9__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE9__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE9__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE9__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE9__TILE_SPLIT__SHIFT 0x0000000b
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define GC_USER_SHADER_ARRAY_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define GC_USER_SHADER_ARRAY_CONFIG__DPFP_RATE__SHIFT 0x00000001
+#define GC_USER_SHADER_ARRAY_CONFIG__HALF_LDS_MASK 0x00000010L
+#define GC_USER_SHADER_ARRAY_CONFIG__HALF_LDS__SHIFT 0x00000004
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xffff0000L
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x00000010
+#define GC_USER_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define GC_USER_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x00000003
+#define GDS_ATOM_BASE__BASE_MASK 0x0000ffffL
+#define GDS_ATOM_BASE__BASE__SHIFT 0x00000000
+#define GDS_ATOM_BASE__UNUSED_MASK 0xffff0000L
+#define GDS_ATOM_BASE__UNUSED__SHIFT 0x00000010
+#define GDS_ATOM_CNTL__AINC_MASK 0x0000003fL
+#define GDS_ATOM_CNTL__AINC__SHIFT 0x00000000
+#define GDS_ATOM_CNTL__DMODE_MASK 0x00000100L
+#define GDS_ATOM_CNTL__DMODE__SHIFT 0x00000008
+#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000c0L
+#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x00000006
+#define GDS_ATOM_CNTL__UNUSED2_MASK 0xfffffe00L
+#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0x00000009
+#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
+#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x00000000
+#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xfffffffeL
+#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x00000001
+#define GDS_ATOM_DST__DST_MASK 0xffffffffL
+#define GDS_ATOM_DST__DST__SHIFT 0x00000000
+#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000ffL
+#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x00000000
+#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xffffff00L
+#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x00000008
+#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000ffL
+#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x00000000
+#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xffffff00L
+#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x00000008
+#define GDS_ATOM_OP__OP_MASK 0x000000ffL
+#define GDS_ATOM_OP__OP__SHIFT 0x00000000
+#define GDS_ATOM_OP__UNUSED_MASK 0xffffff00L
+#define GDS_ATOM_OP__UNUSED__SHIFT 0x00000008
+#define GDS_ATOM_READ0__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ0__DATA__SHIFT 0x00000000
+#define GDS_ATOM_READ0_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ0_U__DATA__SHIFT 0x00000000
+#define GDS_ATOM_READ1__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ1__DATA__SHIFT 0x00000000
+#define GDS_ATOM_READ1_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ1_U__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SIZE__SIZE_MASK 0x0000ffffL
+#define GDS_ATOM_SIZE__SIZE__SHIFT 0x00000000
+#define GDS_ATOM_SIZE__UNUSED_MASK 0xffff0000L
+#define GDS_ATOM_SIZE__UNUSED__SHIFT 0x00000010
+#define GDS_ATOM_SRC0__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC0__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SRC0_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SRC1__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC1__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SRC1_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x00000000
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT_MASK 0x00000010L
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT__SHIFT 0x00000004
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT_MASK 0x00000008L
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT__SHIFT 0x00000003
+#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000040L
+#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x00000006
+#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000020L
+#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x00000005
+#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
+#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x00000000
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x00000001
+#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
+#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x00000002
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL_MASK 0x00000006L
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL__SHIFT 0x00000001
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL_MASK 0x00000018L
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL__SHIFT 0x00000003
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL_MASK 0x00000060L
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL__SHIFT 0x00000005
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL_MASK 0x00000180L
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL__SHIFT 0x00000007
+#define GDS_DEBUG_CNTL__GDS_DEBUG_INDX_MASK 0x0000001fL
+#define GDS_DEBUG_CNTL__GDS_DEBUG_INDX__SHIFT 0x00000000
+#define GDS_DEBUG_CNTL__UNUSED_MASK 0xffffffe0L
+#define GDS_DEBUG_CNTL__UNUSED__SHIFT 0x00000005
+#define GDS_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define GDS_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define GDS_DEBUG_REG0__buff_write_MASK 0x00020000L
+#define GDS_DEBUG_REG0__buff_write__SHIFT 0x00000011
+#define GDS_DEBUG_REG0__cstate_MASK 0x0001e000L
+#define GDS_DEBUG_REG0__cstate__SHIFT 0x0000000d
+#define GDS_DEBUG_REG0__flush_request_MASK 0x00040000L
+#define GDS_DEBUG_REG0__flush_request__SHIFT 0x00000012
+#define GDS_DEBUG_REG0__last_pixel_ptr_MASK 0x00001000L
+#define GDS_DEBUG_REG0__last_pixel_ptr__SHIFT 0x0000000c
+#define GDS_DEBUG_REG0__spare1_MASK 0x00000001L
+#define GDS_DEBUG_REG0__spare1__SHIFT 0x00000000
+#define GDS_DEBUG_REG0__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG0__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG0__wbuf_fifo_empty_MASK 0x00100000L
+#define GDS_DEBUG_REG0__wbuf_fifo_empty__SHIFT 0x00000014
+#define GDS_DEBUG_REG0__wbuf_fifo_full_MASK 0x00200000L
+#define GDS_DEBUG_REG0__wbuf_fifo_full__SHIFT 0x00000015
+#define GDS_DEBUG_REG0__wr_buffer_wr_complete_MASK 0x00080000L
+#define GDS_DEBUG_REG0__wr_buffer_wr_complete__SHIFT 0x00000013
+#define GDS_DEBUG_REG0__write_buff_valid_MASK 0x00000040L
+#define GDS_DEBUG_REG0__write_buff_valid__SHIFT 0x00000006
+#define GDS_DEBUG_REG0__wr_pixel_nxt_ptr_MASK 0x00000f80L
+#define GDS_DEBUG_REG0__wr_pixel_nxt_ptr__SHIFT 0x00000007
+#define GDS_DEBUG_REG1__addr_fifo_empty_MASK 0x00200000L
+#define GDS_DEBUG_REG1__addr_fifo_empty__SHIFT 0x00000015
+#define GDS_DEBUG_REG1__addr_fifo_full_MASK 0x00100000L
+#define GDS_DEBUG_REG1__addr_fifo_full__SHIFT 0x00000014
+#define GDS_DEBUG_REG1__awaiting_data_MASK 0x00080000L
+#define GDS_DEBUG_REG1__awaiting_data__SHIFT 0x00000013
+#define GDS_DEBUG_REG1__buffer_invalid_MASK 0x00800000L
+#define GDS_DEBUG_REG1__buffer_invalid__SHIFT 0x00000017
+#define GDS_DEBUG_REG1__buffer_loaded_MASK 0x00400000L
+#define GDS_DEBUG_REG1__buffer_loaded__SHIFT 0x00000016
+#define GDS_DEBUG_REG1__data_ready_MASK 0x00040000L
+#define GDS_DEBUG_REG1__data_ready__SHIFT 0x00000012
+#define GDS_DEBUG_REG1__pixel_addr_MASK 0x0001fffcL
+#define GDS_DEBUG_REG1__pixel_addr__SHIFT 0x00000002
+#define GDS_DEBUG_REG1__pixel_vld_MASK 0x00020000L
+#define GDS_DEBUG_REG1__pixel_vld__SHIFT 0x00000011
+#define GDS_DEBUG_REG1__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG1__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG1__tag_hit_MASK 0x00000001L
+#define GDS_DEBUG_REG1__tag_hit__SHIFT 0x00000000
+#define GDS_DEBUG_REG1__tag_miss_MASK 0x00000002L
+#define GDS_DEBUG_REG1__tag_miss__SHIFT 0x00000001
+#define GDS_DEBUG_REG2__app_sel_MASK 0x000000f0L
+#define GDS_DEBUG_REG2__app_sel__SHIFT 0x00000004
+#define GDS_DEBUG_REG2__cmd_write_MASK 0x00000008L
+#define GDS_DEBUG_REG2__cmd_write__SHIFT 0x00000003
+#define GDS_DEBUG_REG2__ds_credit_avail_MASK 0x00000002L
+#define GDS_DEBUG_REG2__ds_credit_avail__SHIFT 0x00000001
+#define GDS_DEBUG_REG2__ds_full_MASK 0x00000001L
+#define GDS_DEBUG_REG2__ds_full__SHIFT 0x00000000
+#define GDS_DEBUG_REG2__ord_idx_free_MASK 0x00000004L
+#define GDS_DEBUG_REG2__ord_idx_free__SHIFT 0x00000002
+#define GDS_DEBUG_REG2__req_MASK 0x007fff00L
+#define GDS_DEBUG_REG2__req__SHIFT 0x00000008
+#define GDS_DEBUG_REG2__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG2__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG3__pipe0_busy_num_MASK 0x00007800L
+#define GDS_DEBUG_REG3__pipe0_busy_num__SHIFT 0x0000000b
+#define GDS_DEBUG_REG3__pipe_num_busy_MASK 0x000007ffL
+#define GDS_DEBUG_REG3__pipe_num_busy__SHIFT 0x00000000
+#define GDS_DEBUG_REG3__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG3__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG4__cmd_write_MASK 0x00020000L
+#define GDS_DEBUG_REG4__cmd_write__SHIFT 0x00000011
+#define GDS_DEBUG_REG4__credit_cnt_gt0_MASK 0x00010000L
+#define GDS_DEBUG_REG4__credit_cnt_gt0__SHIFT 0x00000010
+#define GDS_DEBUG_REG4__cur_reso_barrier_MASK 0x00002000L
+#define GDS_DEBUG_REG4__cur_reso_barrier__SHIFT 0x0000000d
+#define GDS_DEBUG_REG4__cur_reso_cnt_gt0_MASK 0x00008000L
+#define GDS_DEBUG_REG4__cur_reso_cnt_gt0__SHIFT 0x0000000f
+#define GDS_DEBUG_REG4__cur_reso_fed_MASK 0x00001000L
+#define GDS_DEBUG_REG4__cur_reso_fed__SHIFT 0x0000000c
+#define GDS_DEBUG_REG4__cur_reso_flag_MASK 0x00004000L
+#define GDS_DEBUG_REG4__cur_reso_flag__SHIFT 0x0000000e
+#define GDS_DEBUG_REG4__cur_reso_head_dirty_MASK 0x00000400L
+#define GDS_DEBUG_REG4__cur_reso_head_dirty__SHIFT 0x0000000a
+#define GDS_DEBUG_REG4__cur_reso_head_flag_MASK 0x00000800L
+#define GDS_DEBUG_REG4__cur_reso_head_flag__SHIFT 0x0000000b
+#define GDS_DEBUG_REG4__cur_reso_head_valid_MASK 0x00000200L
+#define GDS_DEBUG_REG4__cur_reso_head_valid__SHIFT 0x00000009
+#define GDS_DEBUG_REG4__cur_reso_MASK 0x000001f8L
+#define GDS_DEBUG_REG4__cur_reso__SHIFT 0x00000003
+#define GDS_DEBUG_REG4__grbm_gws_reso_rd_MASK 0x00080000L
+#define GDS_DEBUG_REG4__grbm_gws_reso_rd__SHIFT 0x00000013
+#define GDS_DEBUG_REG4__grbm_gws_reso_wr_MASK 0x00040000L
+#define GDS_DEBUG_REG4__grbm_gws_reso_wr__SHIFT 0x00000012
+#define GDS_DEBUG_REG4__gws_bulkfree_MASK 0x00200000L
+#define GDS_DEBUG_REG4__gws_bulkfree__SHIFT 0x00000015
+#define GDS_DEBUG_REG4__gws_busy_MASK 0x00000001L
+#define GDS_DEBUG_REG4__gws_busy__SHIFT 0x00000000
+#define GDS_DEBUG_REG4__gws_out_stall_MASK 0x00000004L
+#define GDS_DEBUG_REG4__gws_out_stall__SHIFT 0x00000002
+#define GDS_DEBUG_REG4__gws_req_MASK 0x00000002L
+#define GDS_DEBUG_REG4__gws_req__SHIFT 0x00000001
+#define GDS_DEBUG_REG4__ram_gws_re_MASK 0x00400000L
+#define GDS_DEBUG_REG4__ram_gws_re__SHIFT 0x00000016
+#define GDS_DEBUG_REG4__ram_gws_we_MASK 0x00800000L
+#define GDS_DEBUG_REG4__ram_gws_we__SHIFT 0x00000017
+#define GDS_DEBUG_REG4__ram_read_busy_MASK 0x00100000L
+#define GDS_DEBUG_REG4__ram_read_busy__SHIFT 0x00000014
+#define GDS_DEBUG_REG4__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG4__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG5__alloc_opco_error_MASK 0x00000004L
+#define GDS_DEBUG_REG5__alloc_opco_error__SHIFT 0x00000002
+#define GDS_DEBUG_REG5__dealloc_opco_error_MASK 0x00000008L
+#define GDS_DEBUG_REG5__dealloc_opco_error__SHIFT 0x00000003
+#define GDS_DEBUG_REG5__dec_error_MASK 0x00000002L
+#define GDS_DEBUG_REG5__dec_error__SHIFT 0x00000001
+#define GDS_DEBUG_REG5__error_ds_address_MASK 0x003fff00L
+#define GDS_DEBUG_REG5__error_ds_address__SHIFT 0x00000008
+#define GDS_DEBUG_REG5__spare1_MASK 0xffc00000L
+#define GDS_DEBUG_REG5__spare1__SHIFT 0x00000016
+#define GDS_DEBUG_REG5__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG5__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG5__wrap_opco_error_MASK 0x00000010L
+#define GDS_DEBUG_REG5__wrap_opco_error__SHIFT 0x00000004
+#define GDS_DEBUG_REG5__write_dis_MASK 0x00000001L
+#define GDS_DEBUG_REG5__write_dis__SHIFT 0x00000000
+#define GDS_DEBUG_REG6__counters_busy_MASK 0x001fffe0L
+#define GDS_DEBUG_REG6__counters_busy__SHIFT 0x00000005
+#define GDS_DEBUG_REG6__counters_enabled_MASK 0x0000001eL
+#define GDS_DEBUG_REG6__counters_enabled__SHIFT 0x00000001
+#define GDS_DEBUG_REG6__oa_busy_MASK 0x00000001L
+#define GDS_DEBUG_REG6__oa_busy__SHIFT 0x00000000
+#define GDS_DEBUG_REG6__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG6__spare__SHIFT 0x00000018
+#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
+#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x00000010
+#define GDS_ENHANCE__MISC_MASK 0x0000ffffL
+#define GDS_ENHANCE__MISC__SHIFT 0x00000000
+#define GDS_ENHANCE__UNUSED_MASK 0xffff0000L
+#define GDS_ENHANCE__UNUSED__SHIFT 0x00000010
+#define GDS_GRBM_SECDED_CNT__DED_MASK 0xffff0000L
+#define GDS_GRBM_SECDED_CNT__DED__SHIFT 0x00000010
+#define GDS_GRBM_SECDED_CNT__SEC_MASK 0x0000ffffL
+#define GDS_GRBM_SECDED_CNT__SEC__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003fL
+#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xffffffc0L
+#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x00000006
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000ffffL
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xffff0000L
+#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x00000010
+#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001ffeL
+#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x00000001
+#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
+#define GDS_GWS_RESOURCE__DED__SHIFT 0x0000000e
+#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
+#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x10000000L
+#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x0000001c
+#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x07ff0000L
+#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x00000010
+#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x08000000L
+#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x0000001b
+#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
+#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0x0000000f
+#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
+#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000ff00L
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x00000008
+#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
+#define GDS_GWS_RESOURCE__TYPE__SHIFT 0x0000000d
+#define GDS_GWS_RESOURCE__UNUSED1_MASK 0xe0000000L
+#define GDS_GWS_RESOURCE__UNUSED1__SHIFT 0x0000001d
+#define GDS_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_OA_DED__ME0_CS_DED__SHIFT 0x00000002
+#define GDS_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x00000000
+#define GDS_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x00000001
+#define GDS_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_OA_DED__ME1_PIPE0_DED__SHIFT 0x00000004
+#define GDS_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_OA_DED__ME1_PIPE1_DED__SHIFT 0x00000005
+#define GDS_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_OA_DED__ME1_PIPE2_DED__SHIFT 0x00000006
+#define GDS_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_OA_DED__ME1_PIPE3_DED__SHIFT 0x00000007
+#define GDS_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_OA_DED__ME2_PIPE0_DED__SHIFT 0x00000008
+#define GDS_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_OA_DED__ME2_PIPE1_DED__SHIFT 0x00000009
+#define GDS_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_OA_DED__ME2_PIPE2_DED__SHIFT 0x0000000a
+#define GDS_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_OA_DED__ME2_PIPE3_DED__SHIFT 0x0000000b
+#define GDS_OA_DED__UNUSED0_MASK 0x00000008L
+#define GDS_OA_DED__UNUSED0__SHIFT 0x00000003
+#define GDS_OA_DED__UNUSED1_MASK 0xfffff000L
+#define GDS_OA_DED__UNUSED1__SHIFT 0x0000000c
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003ffL
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x00000000
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_RD_ADDR__READ_ADDR_MASK 0xffffffffL
+#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x00000000
+#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xffffffffL
+#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x00000000
+#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xffffffffL
+#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x00000000
+#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xffffffffL
+#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x00000000
+#define GDS_RD_DATA__READ_DATA_MASK 0xffffffffL
+#define GDS_RD_DATA__READ_DATA__SHIFT 0x00000000
+#define GDS_SECDED_CNT__DED_MASK 0xffff0000L
+#define GDS_SECDED_CNT__DED__SHIFT 0x00000010
+#define GDS_SECDED_CNT__SEC_MASK 0x0000ffffL
+#define GDS_SECDED_CNT__SEC__SHIFT 0x00000000
+#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xffffffffL
+#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x00000000
+#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xffffffffL
+#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x00000000
+#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xffffffffL
+#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x00000000
+#define GDS_WR_DATA__WRITE_DATA_MASK 0xffffffffL
+#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x00000000
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xffffffffL
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x00000000
+#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x00000000
+#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000ffffL
+#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x00000000
+#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xffff0000L
+#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x00000010
+#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x00000000
+#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000ffL
+#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x00000000
+#define GRBM_DEBUG_CNTL__GRBM_DEBUG_INDEX_MASK 0x0000003fL
+#define GRBM_DEBUG_CNTL__GRBM_DEBUG_INDEX__SHIFT 0x00000000
+#define GRBM_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define GRBM_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define GRBM_DEBUG__DISABLE_READ_TIMEOUT_MASK 0x00000040L
+#define GRBM_DEBUG__DISABLE_READ_TIMEOUT__SHIFT 0x00000006
+#define GRBM_DEBUG__GFX_CLOCK_DOMAIN_OVERRIDE_MASK 0x00001000L
+#define GRBM_DEBUG__GFX_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x0000000c
+#define GRBM_DEBUG__HYSTERESIS_GUI_ACTIVE_MASK 0x00000f00L
+#define GRBM_DEBUG__HYSTERESIS_GUI_ACTIVE__SHIFT 0x00000008
+#define GRBM_DEBUG__IGNORE_FAO_MASK 0x00000020L
+#define GRBM_DEBUG__IGNORE_FAO__SHIFT 0x00000005
+#define GRBM_DEBUG__IGNORE_RDY_MASK 0x00000002L
+#define GRBM_DEBUG__IGNORE_RDY__SHIFT 0x00000001
+#define GRBM_DEBUG_SNAPSHOT__CPF_RDY_MASK 0x00000001L
+#define GRBM_DEBUG_SNAPSHOT__CPF_RDY__SHIFT 0x00000000
+#define GRBM_DEBUG_SNAPSHOT__CPG_RDY_MASK 0x00000002L
+#define GRBM_DEBUG_SNAPSHOT__CPG_RDY__SHIFT 0x00000001
+#define GRBM_DEBUG__SNAPSHOT_FREE_CNTRS_MASK 0x00000080L
+#define GRBM_DEBUG__SNAPSHOT_FREE_CNTRS__SHIFT 0x00000007
+#define GRBM_DEBUG_SNAPSHOT__GDS_RDY_MASK 0x00000200L
+#define GRBM_DEBUG_SNAPSHOT__GDS_RDY__SHIFT 0x00000009
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY0_MASK 0x00000040L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY0__SHIFT 0x00000006
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY1_MASK 0x00004000L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY1__SHIFT 0x0000000e
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY0_MASK 0x00000080L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY0__SHIFT 0x00000007
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY1_MASK 0x00008000L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY1__SHIFT 0x0000000f
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY0_MASK 0x00000100L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY0__SHIFT 0x00000008
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY1_MASK 0x00010000L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY1__SHIFT 0x00000010
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY0_MASK 0x00000200L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY0__SHIFT 0x00000009
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY1_MASK 0x00020000L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY1__SHIFT 0x00000011
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY0_MASK 0x00000400L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY0__SHIFT 0x0000000a
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY1_MASK 0x00040000L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY1__SHIFT 0x00000012
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY0_MASK 0x00000800L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY0__SHIFT 0x0000000b
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY1_MASK 0x00080000L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY1__SHIFT 0x00000013
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY0_MASK 0x00001000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY0__SHIFT 0x0000000c
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY1_MASK 0x00100000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY1__SHIFT 0x00000014
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY0_MASK 0x00002000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY0__SHIFT 0x0000000d
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY1_MASK 0x00200000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY1__SHIFT 0x00000015
+#define GRBM_DEBUG_SNAPSHOT__SRBM_RDY_MASK 0x00000002L
+#define GRBM_DEBUG_SNAPSHOT__SRBM_RDY__SHIFT 0x00000001
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE0_RDY_MASK 0x00000008L
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE0_RDY__SHIFT 0x00000003
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE1_RDY_MASK 0x00000010L
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE1_RDY__SHIFT 0x00000004
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x0000001e
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000ffL
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x00000000
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x0000001f
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00ff0000L
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x00000010
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x0000001d
+#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000ff00L
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x00000008
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x00000013
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x00000000
+#define GRBM_NOWHERE__DATA_MASK 0xffffffffL
+#define GRBM_NOWHERE__DATA__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000019
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x00000016
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x00000018
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000017
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001a
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000e
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001b
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001c
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000019
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x00000016
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x00000018
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000017
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001a
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000e
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001b
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001c
+#define GRBM_PWR_CNTL__REQ_TYPE_MASK 0x0000000fL
+#define GRBM_PWR_CNTL__REQ_TYPE__SHIFT 0x00000000
+#define GRBM_PWR_CNTL__RSP_TYPE_MASK 0x000000f0L
+#define GRBM_PWR_CNTL__RSP_TYPE__SHIFT 0x00000004
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x00000013
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x00000014
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x00000015
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x00000016
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x00000017
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x00000018
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x00000019
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x0000001a
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x0000001b
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x0000001c
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x0000001d
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x0000001e
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x0000001f
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x00000012
+#define GRBM_READ_ERROR2__READ_REQUESTER_SRBM_MASK 0x00020000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SRBM__SHIFT 0x00000011
+#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003fffcL
+#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x00000002
+#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x0000001f
+#define GRBM_READ_ERROR__READ_MEID_MASK 0x00c00000L
+#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x00000016
+#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
+#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x00000014
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000f
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000f
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000fc0L
+#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x00000006
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003fL
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x00000000
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x00000012
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x00000011
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x00000013
+#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
+#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x00000000
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x00000010
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x00000002
+#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
+#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x0000001c
+#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
+#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x00000004
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000fL
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x00000000
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x00000005
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x00000006
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x00000007
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x00000008
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x00000009
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING_MASK 0x00000400L
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING__SHIFT 0x0000000a
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING_MASK 0x00000800L
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING__SHIFT 0x0000000b
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING_MASK 0x00001000L
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING__SHIFT 0x0000000c
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING_MASK 0x00002000L
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING__SHIFT 0x0000000d
+#define GRBM_STATUS2__RLC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x00000008
+#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00000001L
+#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0x00000000
+#define GRBM_STATUS2__TC_BUSY_MASK 0x00000200L
+#define GRBM_STATUS2__TC_BUSY__SHIFT 0x00000009
+#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
+#define GRBM_STATUS__BCI_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS__CB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
+#define GRBM_STATUS__CB_CLEAN__SHIFT 0x0000000d
+#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
+#define GRBM_STATUS__CP_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
+#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x0000001c
+#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
+#define GRBM_STATUS__DB_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
+#define GRBM_STATUS__DB_CLEAN__SHIFT 0x0000000c
+#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
+#define GRBM_STATUS__GDS_BUSY__SHIFT 0x0000000f
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x00000009
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x0000001f
+#define GRBM_STATUS__IA_BUSY_MASK 0x00080000L
+#define GRBM_STATUS__IA_BUSY_NO_DMA_MASK 0x00040000L
+#define GRBM_STATUS__IA_BUSY_NO_DMA__SHIFT 0x00000012
+#define GRBM_STATUS__IA_BUSY__SHIFT 0x00000013
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x00000007
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000fL
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x00000000
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x00000008
+#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS__PA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS__SC_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE0__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE0__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE1__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE1__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE2__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE2__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS_SE3__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE3__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE3__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE3__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE3__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE3__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE3__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE3__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE3__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE3__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE3__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE3__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE3__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE3__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE3__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE3__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE3__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE3__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE3__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE3__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE3__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE3__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS__SPI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS__SRBM_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS__SRBM_RQ_PENDING__SHIFT 0x00000005
+#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
+#define GRBM_STATUS__SX_BUSY__SHIFT 0x00000014
+#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
+#define GRBM_STATUS__TA_BUSY__SHIFT 0x0000000e
+#define GRBM_STATUS__VGT_BUSY_MASK 0x00020000L
+#define GRBM_STATUS__VGT_BUSY__SHIFT 0x00000011
+#define GRBM_STATUS__WD_BUSY_MASK 0x00200000L
+#define GRBM_STATUS__WD_BUSY_NO_DMA_MASK 0x00010000L
+#define GRBM_STATUS__WD_BUSY_NO_DMA__SHIFT 0x00000010
+#define GRBM_STATUS__WD_BUSY__SHIFT 0x00000015
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000ffL
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x00000000
+#define IA_CNTL_STATUS__IA_ADC_BUSY_MASK 0x00000010L
+#define IA_CNTL_STATUS__IA_ADC_BUSY__SHIFT 0x00000004
+#define IA_CNTL_STATUS__IA_BUSY_MASK 0x00000001L
+#define IA_CNTL_STATUS__IA_BUSY__SHIFT 0x00000000
+#define IA_CNTL_STATUS__IA_DMA_BUSY_MASK 0x00000002L
+#define IA_CNTL_STATUS__IA_DMA_BUSY__SHIFT 0x00000001
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY_MASK 0x00000004L
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY__SHIFT 0x00000002
+#define IA_CNTL_STATUS__IA_GRP_BUSY_MASK 0x00000008L
+#define IA_CNTL_STATUS__IA_GRP_BUSY__SHIFT 0x00000003
+#define IA_DEBUG_CNTL__IA_DEBUG_INDX_MASK 0x0000003fL
+#define IA_DEBUG_CNTL__IA_DEBUG_INDX__SHIFT 0x00000000
+#define IA_DEBUG_CNTL__IA_DEBUG_SEL_BUS_B_MASK 0x00000040L
+#define IA_DEBUG_CNTL__IA_DEBUG_SEL_BUS_B__SHIFT 0x00000006
+#define IA_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define IA_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define IA_DEBUG_REG0__core_clk_busy_MASK 0x04000000L
+#define IA_DEBUG_REG0__core_clk_busy__SHIFT 0x0000001a
+#define IA_DEBUG_REG0__dma_busy_MASK 0x00000040L
+#define IA_DEBUG_REG0__dma_busy__SHIFT 0x00000006
+#define IA_DEBUG_REG0__dma_grp_hp_valid_MASK 0x00001000L
+#define IA_DEBUG_REG0__dma_grp_hp_valid__SHIFT 0x0000000c
+#define IA_DEBUG_REG0__dma_grp_valid_MASK 0x00000400L
+#define IA_DEBUG_REG0__dma_grp_valid__SHIFT 0x0000000a
+#define IA_DEBUG_REG0__dma_req_busy_MASK 0x00000020L
+#define IA_DEBUG_REG0__dma_req_busy__SHIFT 0x00000005
+#define IA_DEBUG_REG0__grp_busy_MASK 0x00000100L
+#define IA_DEBUG_REG0__grp_busy__SHIFT 0x00000008
+#define IA_DEBUG_REG0__grp_dma_hp_read_MASK 0x00002000L
+#define IA_DEBUG_REG0__grp_dma_hp_read__SHIFT 0x0000000d
+#define IA_DEBUG_REG0__grp_dma_read_MASK 0x00000800L
+#define IA_DEBUG_REG0__grp_dma_read__SHIFT 0x0000000b
+#define IA_DEBUG_REG0__ia_busy_extended_MASK 0x00000001L
+#define IA_DEBUG_REG0__ia_busy_extended__SHIFT 0x00000000
+#define IA_DEBUG_REG0__ia_busy_MASK 0x00000004L
+#define IA_DEBUG_REG0__ia_busy__SHIFT 0x00000002
+#define IA_DEBUG_REG0__ia_nodma_busy_extended_MASK 0x00000002L
+#define IA_DEBUG_REG0__ia_nodma_busy_extended__SHIFT 0x00000001
+#define IA_DEBUG_REG0__ia_nodma_busy_MASK 0x00000008L
+#define IA_DEBUG_REG0__ia_nodma_busy__SHIFT 0x00000003
+#define IA_DEBUG_REG0__mc_xl8r_busy_MASK 0x00000080L
+#define IA_DEBUG_REG0__mc_xl8r_busy__SHIFT 0x00000007
+#define IA_DEBUG_REG0__reg_clk_busy_MASK 0x01000000L
+#define IA_DEBUG_REG0__reg_clk_busy__SHIFT 0x00000018
+#define IA_DEBUG_REG0__sclk_core_vld_MASK 0x20000000L
+#define IA_DEBUG_REG0__sclk_core_vld__SHIFT 0x0000001d
+#define IA_DEBUG_REG0__sclk_reg_vld_MASK 0x10000000L
+#define IA_DEBUG_REG0__sclk_reg_vld__SHIFT 0x0000001c
+#define IA_DEBUG_REG0__SPARE0_MASK 0x00000010L
+#define IA_DEBUG_REG0__SPARE0__SHIFT 0x00000004
+#define IA_DEBUG_REG0__SPARE1_MASK 0x00000200L
+#define IA_DEBUG_REG0__SPARE1__SHIFT 0x00000009
+#define IA_DEBUG_REG0__SPARE2_MASK 0x00ffc000L
+#define IA_DEBUG_REG0__SPARE2__SHIFT 0x0000000e
+#define IA_DEBUG_REG0__SPARE3_MASK 0x00100000L
+#define IA_DEBUG_REG0__SPARE3__SHIFT 0x00000014
+#define IA_DEBUG_REG0__SPARE4_MASK 0x08000000L
+#define IA_DEBUG_REG0__SPARE4__SHIFT 0x0000001b
+#define IA_DEBUG_REG0__SPARE5_MASK 0x40000000L
+#define IA_DEBUG_REG0__SPARE5__SHIFT 0x0000001e
+#define IA_DEBUG_REG0__SPARE6_MASK 0x80000000L
+#define IA_DEBUG_REG0__SPARE6__SHIFT 0x0000001f
+#define IA_DEBUG_REG1__current_data_valid_MASK 0x10000000L
+#define IA_DEBUG_REG1__current_data_valid__SHIFT 0x0000001c
+#define IA_DEBUG_REG1__discard_1st_chunk_MASK 0x00000100L
+#define IA_DEBUG_REG1__discard_1st_chunk__SHIFT 0x00000008
+#define IA_DEBUG_REG1__discard_2nd_chunk_MASK 0x00000200L
+#define IA_DEBUG_REG1__discard_2nd_chunk__SHIFT 0x00000009
+#define IA_DEBUG_REG1__dma_buf_type_q_MASK 0x00000060L
+#define IA_DEBUG_REG1__dma_buf_type_q__SHIFT 0x00000005
+#define IA_DEBUG_REG1__dma_data_fifo_empty_q_MASK 0x00004000L
+#define IA_DEBUG_REG1__dma_data_fifo_empty_q__SHIFT 0x0000000e
+#define IA_DEBUG_REG1__dma_data_fifo_full_MASK 0x00008000L
+#define IA_DEBUG_REG1__dma_data_fifo_full__SHIFT 0x0000000f
+#define IA_DEBUG_REG1__dma_grp_valid_MASK 0x04000000L
+#define IA_DEBUG_REG1__dma_grp_valid__SHIFT 0x0000001a
+#define IA_DEBUG_REG1__dma_input_fifo_empty_MASK 0x00000001L
+#define IA_DEBUG_REG1__dma_input_fifo_empty__SHIFT 0x00000000
+#define IA_DEBUG_REG1__dma_input_fifo_full_MASK 0x00000002L
+#define IA_DEBUG_REG1__dma_input_fifo_full__SHIFT 0x00000001
+#define IA_DEBUG_REG1__dma_mask_fifo_empty_MASK 0x00002000L
+#define IA_DEBUG_REG1__dma_mask_fifo_empty__SHIFT 0x0000000d
+#define IA_DEBUG_REG1__dma_mask_fifo_we_MASK 0x40000000L
+#define IA_DEBUG_REG1__dma_mask_fifo_we__SHIFT 0x0000001e
+#define IA_DEBUG_REG1__dma_rdreq_dr_q_MASK 0x00000008L
+#define IA_DEBUG_REG1__dma_rdreq_dr_q__SHIFT 0x00000003
+#define IA_DEBUG_REG1__dma_req_fifo_empty_MASK 0x00010000L
+#define IA_DEBUG_REG1__dma_req_fifo_empty__SHIFT 0x00000010
+#define IA_DEBUG_REG1__dma_req_fifo_full_MASK 0x00020000L
+#define IA_DEBUG_REG1__dma_req_fifo_full__SHIFT 0x00000011
+#define IA_DEBUG_REG1__dma_req_path_q_MASK 0x00000080L
+#define IA_DEBUG_REG1__dma_req_path_q__SHIFT 0x00000007
+#define IA_DEBUG_REG1__dma_ret_data_we_q_MASK 0x80000000L
+#define IA_DEBUG_REG1__dma_ret_data_we_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG1__dma_skid_fifo_empty_MASK 0x01000000L
+#define IA_DEBUG_REG1__dma_skid_fifo_empty__SHIFT 0x00000018
+#define IA_DEBUG_REG1__dma_skid_fifo_full_MASK 0x02000000L
+#define IA_DEBUG_REG1__dma_skid_fifo_full__SHIFT 0x00000019
+#define IA_DEBUG_REG1__dma_tc_ret_sel_q_MASK 0x00000800L
+#define IA_DEBUG_REG1__dma_tc_ret_sel_q__SHIFT 0x0000000b
+#define IA_DEBUG_REG1__dma_zero_indices_q_MASK 0x00000010L
+#define IA_DEBUG_REG1__dma_zero_indices_q__SHIFT 0x00000004
+#define IA_DEBUG_REG1__grp_dma_read_MASK 0x08000000L
+#define IA_DEBUG_REG1__grp_dma_read__SHIFT 0x0000001b
+#define IA_DEBUG_REG1__last_rdreq_in_dma_op_MASK 0x00001000L
+#define IA_DEBUG_REG1__last_rdreq_in_dma_op__SHIFT 0x0000000c
+#define IA_DEBUG_REG1__out_of_range_r2_q_MASK 0x20000000L
+#define IA_DEBUG_REG1__out_of_range_r2_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG1__second_tc_ret_data_q_MASK 0x00000400L
+#define IA_DEBUG_REG1__second_tc_ret_data_q__SHIFT 0x0000000a
+#define IA_DEBUG_REG1__stage2_dr_MASK 0x00040000L
+#define IA_DEBUG_REG1__stage2_dr__SHIFT 0x00000012
+#define IA_DEBUG_REG1__stage2_rtr_MASK 0x00080000L
+#define IA_DEBUG_REG1__stage2_rtr__SHIFT 0x00000013
+#define IA_DEBUG_REG1__stage3_dr_MASK 0x00100000L
+#define IA_DEBUG_REG1__stage3_dr__SHIFT 0x00000014
+#define IA_DEBUG_REG1__stage3_rtr_MASK 0x00200000L
+#define IA_DEBUG_REG1__stage3_rtr__SHIFT 0x00000015
+#define IA_DEBUG_REG1__stage4_dr_MASK 0x00400000L
+#define IA_DEBUG_REG1__stage4_dr__SHIFT 0x00000016
+#define IA_DEBUG_REG1__stage4_rtr_MASK 0x00800000L
+#define IA_DEBUG_REG1__stage4_rtr__SHIFT 0x00000017
+#define IA_DEBUG_REG1__start_new_packet_MASK 0x00000004L
+#define IA_DEBUG_REG1__start_new_packet__SHIFT 0x00000002
+#define IA_DEBUG_REG2__hp_current_data_valid_MASK 0x10000000L
+#define IA_DEBUG_REG2__hp_current_data_valid__SHIFT 0x0000001c
+#define IA_DEBUG_REG2__hp_discard_1st_chunk_MASK 0x00000100L
+#define IA_DEBUG_REG2__hp_discard_1st_chunk__SHIFT 0x00000008
+#define IA_DEBUG_REG2__hp_discard_2nd_chunk_MASK 0x00000200L
+#define IA_DEBUG_REG2__hp_discard_2nd_chunk__SHIFT 0x00000009
+#define IA_DEBUG_REG2__hp_dma_buf_type_q_MASK 0x00000060L
+#define IA_DEBUG_REG2__hp_dma_buf_type_q__SHIFT 0x00000005
+#define IA_DEBUG_REG2__hp_dma_data_fifo_empty_q_MASK 0x00004000L
+#define IA_DEBUG_REG2__hp_dma_data_fifo_empty_q__SHIFT 0x0000000e
+#define IA_DEBUG_REG2__hp_dma_data_fifo_full_MASK 0x00008000L
+#define IA_DEBUG_REG2__hp_dma_data_fifo_full__SHIFT 0x0000000f
+#define IA_DEBUG_REG2__hp_dma_grp_valid_MASK 0x04000000L
+#define IA_DEBUG_REG2__hp_dma_grp_valid__SHIFT 0x0000001a
+#define IA_DEBUG_REG2__hp_dma_input_fifo_empty_MASK 0x00000001L
+#define IA_DEBUG_REG2__hp_dma_input_fifo_empty__SHIFT 0x00000000
+#define IA_DEBUG_REG2__hp_dma_input_fifo_full_MASK 0x00000002L
+#define IA_DEBUG_REG2__hp_dma_input_fifo_full__SHIFT 0x00000001
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_empty_MASK 0x00002000L
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_empty__SHIFT 0x0000000d
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_we_MASK 0x40000000L
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_we__SHIFT 0x0000001e
+#define IA_DEBUG_REG2__hp_dma_rdreq_dr_q_MASK 0x00000008L
+#define IA_DEBUG_REG2__hp_dma_rdreq_dr_q__SHIFT 0x00000003
+#define IA_DEBUG_REG2__hp_dma_req_fifo_empty_MASK 0x00010000L
+#define IA_DEBUG_REG2__hp_dma_req_fifo_empty__SHIFT 0x00000010
+#define IA_DEBUG_REG2__hp_dma_req_fifo_full_MASK 0x00020000L
+#define IA_DEBUG_REG2__hp_dma_req_fifo_full__SHIFT 0x00000011
+#define IA_DEBUG_REG2__hp_dma_req_path_q_MASK 0x00000080L
+#define IA_DEBUG_REG2__hp_dma_req_path_q__SHIFT 0x00000007
+#define IA_DEBUG_REG2__hp_dma_ret_data_we_q_MASK 0x80000000L
+#define IA_DEBUG_REG2__hp_dma_ret_data_we_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_empty_MASK 0x01000000L
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_empty__SHIFT 0x00000018
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_full_MASK 0x02000000L
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_full__SHIFT 0x00000019
+#define IA_DEBUG_REG2__hp_dma_tc_ret_sel_q_MASK 0x00000800L
+#define IA_DEBUG_REG2__hp_dma_tc_ret_sel_q__SHIFT 0x0000000b
+#define IA_DEBUG_REG2__hp_dma_zero_indices_q_MASK 0x00000010L
+#define IA_DEBUG_REG2__hp_dma_zero_indices_q__SHIFT 0x00000004
+#define IA_DEBUG_REG2__hp_grp_dma_read_MASK 0x08000000L
+#define IA_DEBUG_REG2__hp_grp_dma_read__SHIFT 0x0000001b
+#define IA_DEBUG_REG2__hp_last_rdreq_in_dma_op_MASK 0x00001000L
+#define IA_DEBUG_REG2__hp_last_rdreq_in_dma_op__SHIFT 0x0000000c
+#define IA_DEBUG_REG2__hp_out_of_range_r2_q_MASK 0x20000000L
+#define IA_DEBUG_REG2__hp_out_of_range_r2_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG2__hp_second_tc_ret_data_q_MASK 0x00000400L
+#define IA_DEBUG_REG2__hp_second_tc_ret_data_q__SHIFT 0x0000000a
+#define IA_DEBUG_REG2__hp_stage2_dr_MASK 0x00040000L
+#define IA_DEBUG_REG2__hp_stage2_dr__SHIFT 0x00000012
+#define IA_DEBUG_REG2__hp_stage2_rtr_MASK 0x00080000L
+#define IA_DEBUG_REG2__hp_stage2_rtr__SHIFT 0x00000013
+#define IA_DEBUG_REG2__hp_stage3_dr_MASK 0x00100000L
+#define IA_DEBUG_REG2__hp_stage3_dr__SHIFT 0x00000014
+#define IA_DEBUG_REG2__hp_stage3_rtr_MASK 0x00200000L
+#define IA_DEBUG_REG2__hp_stage3_rtr__SHIFT 0x00000015
+#define IA_DEBUG_REG2__hp_stage4_dr_MASK 0x00400000L
+#define IA_DEBUG_REG2__hp_stage4_dr__SHIFT 0x00000016
+#define IA_DEBUG_REG2__hp_stage4_rtr_MASK 0x00800000L
+#define IA_DEBUG_REG2__hp_stage4_rtr__SHIFT 0x00000017
+#define IA_DEBUG_REG2__hp_start_new_packet_MASK 0x00000004L
+#define IA_DEBUG_REG2__hp_start_new_packet__SHIFT 0x00000002
+#define IA_DEBUG_REG3__discard_1st_chunk_MASK 0x04000000L
+#define IA_DEBUG_REG3__discard_1st_chunk__SHIFT 0x0000001a
+#define IA_DEBUG_REG3__discard_2nd_chunk_MASK 0x08000000L
+#define IA_DEBUG_REG3__discard_2nd_chunk__SHIFT 0x0000001b
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_eop_out_MASK 0x00000008L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_eop_out__SHIFT 0x00000003
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_null_out_MASK 0x00000004L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_null_out__SHIFT 0x00000002
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_read_MASK 0x00000002L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_read__SHIFT 0x00000001
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_use_tc_out_MASK 0x00000010L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_use_tc_out__SHIFT 0x00000004
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_valid_MASK 0x00000001L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_valid__SHIFT 0x00000000
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_eop_out_MASK 0x00000800L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_eop_out__SHIFT 0x0000000b
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_null_out_MASK 0x00000400L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_null_out__SHIFT 0x0000000a
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_read_MASK 0x00000200L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_read__SHIFT 0x00000009
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_use_tc_out_MASK 0x00001000L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_use_tc_out__SHIFT 0x0000000c
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_valid_MASK 0x00000100L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_valid__SHIFT 0x00000008
+#define IA_DEBUG_REG3__dma_rdreq_send_out_MASK 0x00008000L
+#define IA_DEBUG_REG3__dma_rdreq_send_out__SHIFT 0x0000000f
+#define IA_DEBUG_REG3__grp_dma_draw_is_pipe0_MASK 0x00000020L
+#define IA_DEBUG_REG3__grp_dma_draw_is_pipe0__SHIFT 0x00000005
+#define IA_DEBUG_REG3__ia_mc_rdreq_rtr_q_MASK 0x00002000L
+#define IA_DEBUG_REG3__ia_mc_rdreq_rtr_q__SHIFT 0x0000000d
+#define IA_DEBUG_REG3__ia_tc_rdreq_rtr_q_MASK 0x00040000L
+#define IA_DEBUG_REG3__ia_tc_rdreq_rtr_q__SHIFT 0x00000012
+#define IA_DEBUG_REG3__IA_TC_rdreq_send_out_MASK 0x20000000L
+#define IA_DEBUG_REG3__IA_TC_rdreq_send_out__SHIFT 0x0000001d
+#define IA_DEBUG_REG3__last_tc_req_p1_MASK 0x10000000L
+#define IA_DEBUG_REG3__last_tc_req_p1__SHIFT 0x0000001c
+#define IA_DEBUG_REG3__mc_out_rtr_MASK 0x00004000L
+#define IA_DEBUG_REG3__mc_out_rtr__SHIFT 0x0000000e
+#define IA_DEBUG_REG3__must_service_pipe0_req_MASK 0x00000040L
+#define IA_DEBUG_REG3__must_service_pipe0_req__SHIFT 0x00000006
+#define IA_DEBUG_REG3__pair0_valid_p1_MASK 0x00100000L
+#define IA_DEBUG_REG3__pair0_valid_p1__SHIFT 0x00000014
+#define IA_DEBUG_REG3__pair1_valid_p1_MASK 0x00200000L
+#define IA_DEBUG_REG3__pair1_valid_p1__SHIFT 0x00000015
+#define IA_DEBUG_REG3__pair2_valid_p1_MASK 0x00400000L
+#define IA_DEBUG_REG3__pair2_valid_p1__SHIFT 0x00000016
+#define IA_DEBUG_REG3__pair3_valid_p1_MASK 0x00800000L
+#define IA_DEBUG_REG3__pair3_valid_p1__SHIFT 0x00000017
+#define IA_DEBUG_REG3__pipe0_dr_MASK 0x00010000L
+#define IA_DEBUG_REG3__pipe0_dr__SHIFT 0x00000010
+#define IA_DEBUG_REG3__pipe0_rtr_MASK 0x00020000L
+#define IA_DEBUG_REG3__pipe0_rtr__SHIFT 0x00000011
+#define IA_DEBUG_REG3__send_pipe1_req_MASK 0x00000080L
+#define IA_DEBUG_REG3__send_pipe1_req__SHIFT 0x00000007
+#define IA_DEBUG_REG3__TAP_IA_rdret_vld_in_MASK 0x80000000L
+#define IA_DEBUG_REG3__TAP_IA_rdret_vld_in__SHIFT 0x0000001f
+#define IA_DEBUG_REG3__TC_IA_rdret_valid_in_MASK 0x40000000L
+#define IA_DEBUG_REG3__TC_IA_rdret_valid_in__SHIFT 0x0000001e
+#define IA_DEBUG_REG3__tc_out_rtr_MASK 0x00080000L
+#define IA_DEBUG_REG3__tc_out_rtr__SHIFT 0x00000013
+#define IA_DEBUG_REG3__tc_req_count_q_MASK 0x03000000L
+#define IA_DEBUG_REG3__tc_req_count_q__SHIFT 0x00000018
+#define IA_DEBUG_REG4__current_shift_is_vect1_q_MASK 0x80000000L
+#define IA_DEBUG_REG4__current_shift_is_vect1_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG4__di_event_flag_p1_q_MASK 0x00100000L
+#define IA_DEBUG_REG4__di_event_flag_p1_q__SHIFT 0x00000014
+#define IA_DEBUG_REG4__di_first_group_of_draw_q_MASK 0x20000000L
+#define IA_DEBUG_REG4__di_first_group_of_draw_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG4__di_major_mode_p1_q_MASK 0x00010000L
+#define IA_DEBUG_REG4__di_major_mode_p1_q__SHIFT 0x00000010
+#define IA_DEBUG_REG4__di_source_select_p1_q_MASK 0x0c000000L
+#define IA_DEBUG_REG4__di_source_select_p1_q__SHIFT 0x0000001a
+#define IA_DEBUG_REG4__di_state_sel_p1_q_MASK 0x00e00000L
+#define IA_DEBUG_REG4__di_state_sel_p1_q__SHIFT 0x00000015
+#define IA_DEBUG_REG4__draw_opaq_active_q_MASK 0x02000000L
+#define IA_DEBUG_REG4__draw_opaq_active_q__SHIFT 0x00000019
+#define IA_DEBUG_REG4__draw_opaq_en_p1_q_MASK 0x01000000L
+#define IA_DEBUG_REG4__draw_opaq_en_p1_q__SHIFT 0x00000018
+#define IA_DEBUG_REG4__grp_se0_fifo_empty_MASK 0x00000040L
+#define IA_DEBUG_REG4__grp_se0_fifo_empty__SHIFT 0x00000006
+#define IA_DEBUG_REG4__grp_se0_fifo_full_MASK 0x00000080L
+#define IA_DEBUG_REG4__grp_se0_fifo_full__SHIFT 0x00000007
+#define IA_DEBUG_REG4__gs_mode_p1_q_MASK 0x000e0000L
+#define IA_DEBUG_REG4__gs_mode_p1_q__SHIFT 0x00000011
+#define IA_DEBUG_REG4__ia_se1vgt_prim_rtr_q_MASK 0x00008000L
+#define IA_DEBUG_REG4__ia_se1vgt_prim_rtr_q__SHIFT 0x0000000f
+#define IA_DEBUG_REG4__ia_vgt_prim_rtr_q_MASK 0x00004000L
+#define IA_DEBUG_REG4__ia_vgt_prim_rtr_q__SHIFT 0x0000000e
+#define IA_DEBUG_REG4__last_shift_of_draw_MASK 0x40000000L
+#define IA_DEBUG_REG4__last_shift_of_draw__SHIFT 0x0000001e
+#define IA_DEBUG_REG4__pipe0_dr_MASK 0x00000001L
+#define IA_DEBUG_REG4__pipe0_dr__SHIFT 0x00000000
+#define IA_DEBUG_REG4__pipe0_rtr_MASK 0x00000100L
+#define IA_DEBUG_REG4__pipe0_rtr__SHIFT 0x00000008
+#define IA_DEBUG_REG4__pipe1_dr_MASK 0x00000002L
+#define IA_DEBUG_REG4__pipe1_dr__SHIFT 0x00000001
+#define IA_DEBUG_REG4__pipe1_rtr_MASK 0x00000200L
+#define IA_DEBUG_REG4__pipe1_rtr__SHIFT 0x00000009
+#define IA_DEBUG_REG4__pipe2_dr_MASK 0x00000004L
+#define IA_DEBUG_REG4__pipe2_dr__SHIFT 0x00000002
+#define IA_DEBUG_REG4__pipe2_rtr_MASK 0x00000400L
+#define IA_DEBUG_REG4__pipe2_rtr__SHIFT 0x0000000a
+#define IA_DEBUG_REG4__pipe3_dr_MASK 0x00000008L
+#define IA_DEBUG_REG4__pipe3_dr__SHIFT 0x00000003
+#define IA_DEBUG_REG4__pipe3_rtr_MASK 0x00000800L
+#define IA_DEBUG_REG4__pipe3_rtr__SHIFT 0x0000000b
+#define IA_DEBUG_REG4__pipe4_dr_MASK 0x00000010L
+#define IA_DEBUG_REG4__pipe4_dr__SHIFT 0x00000004
+#define IA_DEBUG_REG4__pipe4_rtr_MASK 0x00001000L
+#define IA_DEBUG_REG4__pipe4_rtr__SHIFT 0x0000000c
+#define IA_DEBUG_REG4__pipe5_dr_MASK 0x00000020L
+#define IA_DEBUG_REG4__pipe5_dr__SHIFT 0x00000005
+#define IA_DEBUG_REG4__pipe5_rtr_MASK 0x00002000L
+#define IA_DEBUG_REG4__pipe5_rtr__SHIFT 0x0000000d
+#define IA_DEBUG_REG4__ready_to_read_di_MASK 0x10000000L
+#define IA_DEBUG_REG4__ready_to_read_di__SHIFT 0x0000001c
+#define IA_DEBUG_REG5__di_index_counter_q_15_0_MASK 0x0000ffffL
+#define IA_DEBUG_REG5__di_index_counter_q_15_0__SHIFT 0x00000000
+#define IA_DEBUG_REG5__draw_input_fifo_empty_MASK 0x80000000L
+#define IA_DEBUG_REG5__draw_input_fifo_empty__SHIFT 0x0000001f
+#define IA_DEBUG_REG5__draw_input_fifo_full_MASK 0x40000000L
+#define IA_DEBUG_REG5__draw_input_fifo_full__SHIFT 0x0000001e
+#define IA_DEBUG_REG5__instanceid_13_0_MASK 0x3fff0000L
+#define IA_DEBUG_REG5__instanceid_13_0__SHIFT 0x00000010
+#define IA_DEBUG_REG6__after_group_partial_MASK 0x00400000L
+#define IA_DEBUG_REG6__after_group_partial__SHIFT 0x00000016
+#define IA_DEBUG_REG6__current_shift_q_MASK 0x0000000fL
+#define IA_DEBUG_REG6__current_shift_q__SHIFT 0x00000000
+#define IA_DEBUG_REG6__current_stride_pre_MASK 0x000000f0L
+#define IA_DEBUG_REG6__current_stride_pre__SHIFT 0x00000004
+#define IA_DEBUG_REG6__current_stride_q_MASK 0x00001f00L
+#define IA_DEBUG_REG6__current_stride_q__SHIFT 0x00000008
+#define IA_DEBUG_REG6__curr_prim_partial_MASK 0x00008000L
+#define IA_DEBUG_REG6__curr_prim_partial__SHIFT 0x0000000f
+#define IA_DEBUG_REG6__extract_group_MASK 0x00800000L
+#define IA_DEBUG_REG6__extract_group__SHIFT 0x00000017
+#define IA_DEBUG_REG6__first_group_partial_MASK 0x00002000L
+#define IA_DEBUG_REG6__first_group_partial__SHIFT 0x0000000d
+#define IA_DEBUG_REG6__grp_shift_debug_data_MASK 0xff000000L
+#define IA_DEBUG_REG6__grp_shift_debug_data__SHIFT 0x00000018
+#define IA_DEBUG_REG6__next_group_partial_MASK 0x00200000L
+#define IA_DEBUG_REG6__next_group_partial__SHIFT 0x00000015
+#define IA_DEBUG_REG6__next_stride_q_MASK 0x001f0000L
+#define IA_DEBUG_REG6__next_stride_q__SHIFT 0x00000010
+#define IA_DEBUG_REG6__second_group_partial_MASK 0x00004000L
+#define IA_DEBUG_REG6__second_group_partial__SHIFT 0x0000000e
+#define IA_DEBUG_REG7__indx_shift_is_one_p2_q_MASK 0x02000000L
+#define IA_DEBUG_REG7__indx_shift_is_one_p2_q__SHIFT 0x00000019
+#define IA_DEBUG_REG7__indx_shift_is_two_p2_q_MASK 0x04000000L
+#define IA_DEBUG_REG7__indx_shift_is_two_p2_q__SHIFT 0x0000001a
+#define IA_DEBUG_REG7__indx_stride_is_four_p2_q_MASK 0x08000000L
+#define IA_DEBUG_REG7__indx_stride_is_four_p2_q__SHIFT 0x0000001b
+#define IA_DEBUG_REG7__last_group_of_draw_p2_q_MASK 0x00800000L
+#define IA_DEBUG_REG7__last_group_of_draw_p2_q__SHIFT 0x00000017
+#define IA_DEBUG_REG7__num_indx_in_group_p2_q_MASK 0x00700000L
+#define IA_DEBUG_REG7__num_indx_in_group_p2_q__SHIFT 0x00000014
+#define IA_DEBUG_REG7__reset_indx_state_q_MASK 0x0000000fL
+#define IA_DEBUG_REG7__reset_indx_state_q__SHIFT 0x00000000
+#define IA_DEBUG_REG7__shift_event_flag_p2_q_MASK 0x01000000L
+#define IA_DEBUG_REG7__shift_event_flag_p2_q__SHIFT 0x00000018
+#define IA_DEBUG_REG7__shift_prim0_partial_p3_q_MASK 0x80000000L
+#define IA_DEBUG_REG7__shift_prim0_partial_p3_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG7__shift_prim0_reset_p3_q_MASK 0x40000000L
+#define IA_DEBUG_REG7__shift_prim0_reset_p3_q__SHIFT 0x0000001e
+#define IA_DEBUG_REG7__shift_prim1_partial_p3_q_MASK 0x20000000L
+#define IA_DEBUG_REG7__shift_prim1_partial_p3_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG7__shift_prim1_reset_p3_q_MASK 0x10000000L
+#define IA_DEBUG_REG7__shift_prim1_reset_p3_q__SHIFT 0x0000001c
+#define IA_DEBUG_REG7__shift_vect0_reset_match_p2_q_MASK 0x0000f000L
+#define IA_DEBUG_REG7__shift_vect0_reset_match_p2_q__SHIFT 0x0000000c
+#define IA_DEBUG_REG7__shift_vect1_reset_match_p2_q_MASK 0x000f0000L
+#define IA_DEBUG_REG7__shift_vect1_reset_match_p2_q__SHIFT 0x00000010
+#define IA_DEBUG_REG7__shift_vect1_valid_p2_q_MASK 0x00000f00L
+#define IA_DEBUG_REG7__shift_vect1_valid_p2_q__SHIFT 0x00000008
+#define IA_DEBUG_REG7__shift_vect_valid_p2_q_MASK 0x000000f0L
+#define IA_DEBUG_REG7__shift_vect_valid_p2_q__SHIFT 0x00000004
+#define IA_DEBUG_REG8__di_prim_type_p1_q_MASK 0x0000001fL
+#define IA_DEBUG_REG8__di_prim_type_p1_q__SHIFT 0x00000000
+#define IA_DEBUG_REG8__grp_components_valid_MASK 0xf0000000L
+#define IA_DEBUG_REG8__grp_components_valid__SHIFT 0x0000001c
+#define IA_DEBUG_REG8__grp_continued_MASK 0x00000800L
+#define IA_DEBUG_REG8__grp_continued__SHIFT 0x0000000b
+#define IA_DEBUG_REG8__grp_eopg_MASK 0x04000000L
+#define IA_DEBUG_REG8__grp_eopg__SHIFT 0x0000001a
+#define IA_DEBUG_REG8__grp_eop_MASK 0x02000000L
+#define IA_DEBUG_REG8__grp_eop__SHIFT 0x00000019
+#define IA_DEBUG_REG8__grp_event_flag_MASK 0x08000000L
+#define IA_DEBUG_REG8__grp_event_flag__SHIFT 0x0000001b
+#define IA_DEBUG_REG8__grp_null_primitive_MASK 0x01000000L
+#define IA_DEBUG_REG8__grp_null_primitive__SHIFT 0x00000018
+#define IA_DEBUG_REG8__grp_output_path_MASK 0x00e00000L
+#define IA_DEBUG_REG8__grp_output_path__SHIFT 0x00000015
+#define IA_DEBUG_REG8__grp_state_sel_MASK 0x00007000L
+#define IA_DEBUG_REG8__grp_state_sel__SHIFT 0x0000000c
+#define IA_DEBUG_REG8__grp_sub_prim_type_MASK 0x001f8000L
+#define IA_DEBUG_REG8__grp_sub_prim_type__SHIFT 0x0000000f
+#define IA_DEBUG_REG8__last_group_of_inst_p5_q_MASK 0x00000100L
+#define IA_DEBUG_REG8__last_group_of_inst_p5_q__SHIFT 0x00000008
+#define IA_DEBUG_REG8__shift_prim0_null_flag_p5_q_MASK 0x00000400L
+#define IA_DEBUG_REG8__shift_prim0_null_flag_p5_q__SHIFT 0x0000000a
+#define IA_DEBUG_REG8__shift_prim1_null_flag_p5_q_MASK 0x00000200L
+#define IA_DEBUG_REG8__shift_prim1_null_flag_p5_q__SHIFT 0x00000009
+#define IA_DEBUG_REG8__shift_vect_end_of_packet_p5_q_MASK 0x00000080L
+#define IA_DEBUG_REG8__shift_vect_end_of_packet_p5_q__SHIFT 0x00000007
+#define IA_DEBUG_REG8__two_cycle_xfer_p1_q_MASK 0x00000020L
+#define IA_DEBUG_REG8__two_cycle_xfer_p1_q__SHIFT 0x00000005
+#define IA_DEBUG_REG8__two_prim_input_p1_q_MASK 0x00000040L
+#define IA_DEBUG_REG8__two_prim_input_p1_q__SHIFT 0x00000006
+#define IA_DEBUG_REG9__eopg_between_prims_p6_MASK 0x00000400L
+#define IA_DEBUG_REG9__eopg_between_prims_p6__SHIFT 0x0000000a
+#define IA_DEBUG_REG9__eopg_on_last_prim_p6_MASK 0x00000200L
+#define IA_DEBUG_REG9__eopg_on_last_prim_p6__SHIFT 0x00000009
+#define IA_DEBUG_REG9__gfx_se_switch_p6_MASK 0x00000002L
+#define IA_DEBUG_REG9__gfx_se_switch_p6__SHIFT 0x00000001
+#define IA_DEBUG_REG9__grp_se1_fifo_empty_MASK 0x00040000L
+#define IA_DEBUG_REG9__grp_se1_fifo_empty__SHIFT 0x00000012
+#define IA_DEBUG_REG9__grp_se1_fifo_full_MASK 0x00080000L
+#define IA_DEBUG_REG9__grp_se1_fifo_full__SHIFT 0x00000013
+#define IA_DEBUG_REG9__null_eoi_xfer_prim0_p6_MASK 0x00000008L
+#define IA_DEBUG_REG9__null_eoi_xfer_prim0_p6__SHIFT 0x00000003
+#define IA_DEBUG_REG9__null_eoi_xfer_prim1_p6_MASK 0x00000004L
+#define IA_DEBUG_REG9__null_eoi_xfer_prim1_p6__SHIFT 0x00000002
+#define IA_DEBUG_REG9__prim0_eoi_p6_MASK 0x00000020L
+#define IA_DEBUG_REG9__prim0_eoi_p6__SHIFT 0x00000005
+#define IA_DEBUG_REG9__prim0_valid_eopg_p6_MASK 0x00000080L
+#define IA_DEBUG_REG9__prim0_valid_eopg_p6__SHIFT 0x00000007
+#define IA_DEBUG_REG9__prim1_eoi_p6_MASK 0x00000010L
+#define IA_DEBUG_REG9__prim1_eoi_p6__SHIFT 0x00000004
+#define IA_DEBUG_REG9__prim1_to_other_se_p6_MASK 0x00000100L
+#define IA_DEBUG_REG9__prim1_to_other_se_p6__SHIFT 0x00000008
+#define IA_DEBUG_REG9__prim1_valid_eopg_p6_MASK 0x00000040L
+#define IA_DEBUG_REG9__prim1_valid_eopg_p6__SHIFT 0x00000006
+#define IA_DEBUG_REG9__prim1_xfer_p6_MASK 0x00020000L
+#define IA_DEBUG_REG9__prim1_xfer_p6__SHIFT 0x00000011
+#define IA_DEBUG_REG9__prim_count_eq_group_size_p6_MASK 0x00000800L
+#define IA_DEBUG_REG9__prim_count_eq_group_size_p6__SHIFT 0x0000000b
+#define IA_DEBUG_REG9__prim_counter_q_MASK 0xfffc0000L
+#define IA_DEBUG_REG9__prim_counter_q__SHIFT 0x00000012
+#define IA_DEBUG_REG9__prim_count_gt_group_size_p6_MASK 0x00001000L
+#define IA_DEBUG_REG9__prim_count_gt_group_size_p6__SHIFT 0x0000000c
+#define IA_DEBUG_REG9__send_to_se1_p6_MASK 0x00000001L
+#define IA_DEBUG_REG9__send_to_se1_p6__SHIFT 0x00000000
+#define IA_DEBUG_REG9__shift_vect_end_of_packet_p5_q_MASK 0x00010000L
+#define IA_DEBUG_REG9__shift_vect_end_of_packet_p5_q__SHIFT 0x00000010
+#define IA_DEBUG_REG9__SPARE0_MASK 0x00004000L
+#define IA_DEBUG_REG9__SPARE0__SHIFT 0x0000000e
+#define IA_DEBUG_REG9__SPARE1_MASK 0x00008000L
+#define IA_DEBUG_REG9__SPARE1__SHIFT 0x0000000f
+#define IA_DEBUG_REG9__two_prim_output_p5_q_MASK 0x00002000L
+#define IA_DEBUG_REG9__two_prim_output_p5_q__SHIFT 0x0000000d
+#define IA_ENHANCE__MISC_MASK 0xffffffffL
+#define IA_ENHANCE__MISC__SHIFT 0x00000000
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON_MASK 0x00040000L
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON__SHIFT 0x00000012
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON_MASK 0x00010000L
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON__SHIFT 0x00000010
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE_MASK 0x0000ffffL
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE__SHIFT 0x00000000
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI_MASK 0x00080000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI__SHIFT 0x00000013
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP_MASK 0x00020000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP__SHIFT 0x00000011
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP_MASK 0x00100000L
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP__SHIFT 0x00000014
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_VMID_OVERRIDE__ENABLE_MASK 0x00000001L
+#define IA_VMID_OVERRIDE__ENABLE__SHIFT 0x00000000
+#define IA_VMID_OVERRIDE__VMID_MASK 0x0000001eL
+#define IA_VMID_OVERRIDE__VMID__SHIFT 0x00000001
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x00000012
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x00000010
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x00000014
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x00000013
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x00000018
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x00000016
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000c000L
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0x0000000e
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0x0000000d
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x00000011
+#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
+#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x00000000
+#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
+#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x00000001
+#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
+#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x00000002
+#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
+#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x00000003
+#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
+#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x00000004
+#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
+#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x00000005
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x00000019
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x00000015
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x0000001b
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x0000001a
+#define PA_CL_CNTL_STATUS__CL_BUSY_MASK 0x80000000L
+#define PA_CL_CNTL_STATUS__CL_BUSY__SHIFT 0x0000001f
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x00000003
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x00000000
+#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x0000001f
+#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x0000001e
+#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
+#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x0000001d
+#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
+#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x0000001c
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x00000001
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x00000004
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x00000020L
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x00000005
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0x0000000e
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0x0000000d
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0x0000000c
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x00000009
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x00000008
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0x0000000b
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0x0000000a
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x00000003
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x00000014
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x00000002
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x00000006
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x00000007
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x00000000
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x00000004
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x00000001
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x00000005
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x00000000
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x00000001
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x00000002
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x00000003
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x00000004
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x00000005
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x00000006
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x00000007
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x00000008
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x00000009
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0x0000000a
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0x0000000b
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0x0000000c
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0x0000000d
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0x0000000e
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0x0000000f
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x00000011
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x00000019
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x00000014
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x00000010
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x00000012
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x00000013
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x00000016
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x00000017
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x00000018
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x00000015
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0x0000000b
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x00000001
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x00000000
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x00000003
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x00000002
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x00000005
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x00000004
+#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
+#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0x0000000a
+#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
+#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x00000008
+#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
+#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x00000009
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x00000004
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x00000018
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001e000L
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0x0000000d
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x00000014
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x00000000
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000ffffL
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x00000000
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xffff0000L
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x00000010
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000ffffL
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x00000000
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xffff0000L
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000fL
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x00000000
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000f0L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x00000004
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000f00L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x00000008
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000f000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0x0000000c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000f0000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x00000010
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00f00000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x00000014
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0f000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x00000018
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xf0000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x0000001c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000f00L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x00000008
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000f000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0x0000000c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000f0000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x00000010
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00f00000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x00000014
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0f000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x00000018
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xf0000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x0000001c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000fL
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x00000000
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000f0L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x00000004
+#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000ffffL
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x00000000
+#define PA_SC_DEBUG_CNTL__SC_DEBUG_INDX_MASK 0x0000003fL
+#define PA_SC_DEBUG_CNTL__SC_DEBUG_INDX__SHIFT 0x00000000
+#define PA_SC_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define PA_SC_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define PA_SC_DEBUG_REG0__REG0_FIELD0_MASK 0x00000003L
+#define PA_SC_DEBUG_REG0__REG0_FIELD0__SHIFT 0x00000000
+#define PA_SC_DEBUG_REG0__REG0_FIELD1_MASK 0x0000000cL
+#define PA_SC_DEBUG_REG0__REG0_FIELD1__SHIFT 0x00000002
+#define PA_SC_DEBUG_REG1__REG1_FIELD0_MASK 0x00000003L
+#define PA_SC_DEBUG_REG1__REG1_FIELD0__SHIFT 0x00000000
+#define PA_SC_DEBUG_REG1__REG1_FIELD1_MASK 0x0000000cL
+#define PA_SC_DEBUG_REG1__REG1_FIELD1__SHIFT 0x00000002
+#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xf0000000L
+#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x0000001c
+#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003f000L
+#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0x0000000c
+#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00fc0000L
+#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x00000012
+#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0f000000L
+#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x00000018
+#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000f0L
+#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x00000004
+#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000f00L
+#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x00000008
+#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000fL
+#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x00000000
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x00000002
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000200L
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x00000009
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00004000L
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0x0000000e
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00200000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x00000015
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00800000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x00000017
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00040000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x00000012
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00010000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0x00000010
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00400000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x00000016
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00080000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x00000013
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00002000L
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0x0000000d
+#define PA_SC_ENHANCE__DISABLE_PW_BUBBLE_COLLAPSE_MASK 0x000000c0L
+#define PA_SC_ENHANCE__DISABLE_PW_BUBBLE_COLLAPSE__SHIFT 0x00000006
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x00000001
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x00000005
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000400L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x0000000a
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000800L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x0000000b
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00001000L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0x0000000c
+#define PA_SC_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+#define PA_SC_ENHANCE__ECO_SPARE0__SHIFT 0x0000001f
+#define PA_SC_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_SC_ENHANCE__ECO_SPARE1__SHIFT 0x0000001e
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x00000003
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x00000004
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00008000L
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0x0000000f
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x01000000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x00000018
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00020000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0x00000011
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00100000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x00000014
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x00000000
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000100L
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x00000008
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000000ffL
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x00000000
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007fc0L
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x00000006
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xff800000L
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x00000017
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003fL
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x00000000
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001f8000L
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0x0000000f
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000ffffL
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x00000000
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xffff0000L
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x00000010
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00fc0000L
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x00000012
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000fc0L
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x00000006
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003fL
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x00000000
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003f000L
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0x0000000c
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0x0000000c
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x00000009
+#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
+#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0x0000000a
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0x0000000b
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x0000001d
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000ffffL
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x00000000
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x0000001c
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00ff0000L
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x00000010
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000ff00L
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x00000008
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000fL
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x00000000
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x00000002
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x00000000
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x00000003
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x00000001
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x00000019
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x0000001a
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x00000013
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00f00000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x00000014
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0x0000000f
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0x0000000e
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x00000018
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x00000012
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x00000011
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x0000001b
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x0000001c
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x00000010
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x00000007
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x00000009
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0x0000000a
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x00000008
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x00000002
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x00000001
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x00000003
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x00000004
+#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x00000000
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0x0000000b
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0x0000000c
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0x0000000d
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x00000000
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000000cL
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x00000002
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x00000004
+#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
+#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x00000008
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000c000L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0x0000000e
+#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000c00L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0x0000000a
+#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
+#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0x0000000c
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x00000000
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000cL
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x00000002
+#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x00000004
+#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
+#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x00000006
+#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
+#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x00000007
+#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
+#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x00000010
+#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000c0000L
+#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x00000012
+#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
+#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x00000014
+#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
+#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x00000018
+#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x0c000000L
+#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x0000001a
+#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0x30000000L
+#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x0000001c
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000ffffL
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xffff0000L
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000ffffL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xffff0000L
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000ffffL
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x00000000
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xffff0000L
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x00000010
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
+#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x0000001f
+#define PA_SU_DEBUG_CNTL__SU_DEBUG_INDX_MASK 0x0000001fL
+#define PA_SU_DEBUG_CNTL__SU_DEBUG_INDX__SHIFT 0x00000000
+#define PA_SU_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define PA_SU_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001ffL
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x00000000
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01ff0000L
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x00000010
+#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000ffffL
+#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x00000000
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST_MASK 0x00000010L
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST__SHIFT 0x00000004
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x00000002
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x00000003
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x00000000
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xffffffffL
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x00000000
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00ffffffL
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xffff0000L
+#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x00000010
+#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000ffffL
+#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x00000000
+#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000ffffL
+#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x00000000
+#define PA_SU_POINT_SIZE__WIDTH_MASK 0xffff0000L
+#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x00000010
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x00000008
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000ffL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x00000000
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x00000005
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x00000001
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x00000006
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x00000002
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000ff00L
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x00000008
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x00000007
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x00000003
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x00000004
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x00000000
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x0000001e
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x0000001f
+#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
+#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x00000001
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x00000000
+#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
+#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x00000002
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x00000015
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x00000014
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x00000008
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000e0L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x00000005
+#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
+#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x00000003
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0x0000000c
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0x0000000b
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0x0000000d
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x00000013
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x00000010
+#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
+#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x00000000
+#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
+#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x00000003
+#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
+#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x00000001
+#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_IA_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_IA_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_IA_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_IA_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x00000000
+#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
+#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x00000000
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xffffffffL
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x00000000
+#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_SQ_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SQ_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x00000000
+#define RAS_TA_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_TA_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_TD_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_TD_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_VGT_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_VGT_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x00000001
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x00000000
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x00000002
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007fff8L
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x00000003
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xfff80000L
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x00000013
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x00000000
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xfffffffeL
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x00000001
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x0000001b
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x00000000
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x0000ff00L
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x00000008
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x0000001c
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x00000001
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000fcL
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x00000002
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x0000001d
+#define RLC_CGCG_CGLS_CTRL__SPARE_MASK 0x80000000L
+#define RLC_CGCG_CGLS_CTRL__SPARE__SHIFT 0x0000001f
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000fL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x00000000
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000f0L
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x00000004
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0fff0000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x00000010
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xf0000000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x0000001c
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000f00L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x00000008
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000f000L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0x0000000c
+#define RLC_CGTT_MGCG_OVERRIDE__OVERRIDE_MASK 0xffffffffL
+#define RLC_CGTT_MGCG_OVERRIDE__OVERRIDE__SHIFT 0x00000000
+#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
+#define RLC_CNTL__FORCE_RETRY__SHIFT 0x00000001
+#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
+#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x00000002
+#define RLC_CNTL__RESERVED_MASK 0xffffff00L
+#define RLC_CNTL__RESERVED__SHIFT 0x00000008
+#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
+#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x00000000
+#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
+#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x00000003
+#define RLC_CNTL__SOFT_RESET_DEBUG_MODE_MASK 0x00000010L
+#define RLC_CNTL__SOFT_RESET_DEBUG_MODE__SHIFT 0x00000004
+#define RLC_CU_STATUS__WORK_PENDING_MASK 0xffffffffL
+#define RLC_CU_STATUS__WORK_PENDING__SHIFT 0x00000000
+#define RLC_DEBUG__DATA_MASK 0xffffffffL
+#define RLC_DEBUG__DATA__SHIFT 0x00000000
+#define RLC_DEBUG_SELECT__RESERVED_MASK 0xffff8000L
+#define RLC_DEBUG_SELECT__RESERVED__SHIFT 0x0000000f
+#define RLC_DEBUG_SELECT__SELECT_MASK 0x000000ffL
+#define RLC_DEBUG_SELECT__SELECT__SHIFT 0x00000000
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_ACK_MASK 0x00000010L
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_ACK__SHIFT 0x00000004
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_REQUEST_MASK 0x00000001L
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_REQUEST__SHIFT 0x00000000
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED1_MASK 0x0000000eL
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED1__SHIFT 0x00000001
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED_MASK 0xffffffe0L
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED__SHIFT 0x00000005
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK_MASK 0xffffffffL
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK__SHIFT 0x00000000
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xffffffffL
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x00000000
+#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x000001ffL
+#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x00000000
+#define RLC_GPM_SCRATCH_ADDR__RESERVED_MASK 0xfffffe00L
+#define RLC_GPM_SCRATCH_ADDR__RESERVED__SHIFT 0x00000009
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xffffffffL
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x00000000
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xffffffc0L
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x00000006
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003fL
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x00000000
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xffffffffL
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x00000000
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xffffffffL
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x00000000
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK_MASK 0xffffffffL
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK__SHIFT 0x00000000
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST_MASK 0x00000ff0L
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST__SHIFT 0x00000004
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY_MASK 0x00000002L
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY__SHIFT 0x00000001
+#define RLC_LB_CNTL__LB_CNT_REG_INC_MASK 0x00000008L
+#define RLC_LB_CNTL__LB_CNT_REG_INC__SHIFT 0x00000003
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK 0x00000004L
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE__SHIFT 0x00000002
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK 0x00000001L
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE__SHIFT 0x00000000
+#define RLC_LB_CNTL__RESERVED_MASK 0xfffffff0L
+#define RLC_LB_CNTL__RESERVED__SHIFT 0x00000004
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT_MASK 0xffffffffL
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT__SHIFT 0x00000000
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX_MASK 0xffffffffL
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX__SHIFT 0x00000000
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK_MASK 0xffffffffL
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK__SHIFT 0x00000000
+#define RLC_LB_PARAMS__FIFO_SAMPLES_MASK 0x000000feL
+#define RLC_LB_PARAMS__FIFO_SAMPLES__SHIFT 0x00000001
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL_MASK 0xffff0000L
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL__SHIFT 0x00000010
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES_MASK 0x0000ff00L
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES__SHIFT 0x00000008
+#define RLC_LB_PARAMS__SKIP_L2_CHECK_MASK 0x00000001L
+#define RLC_LB_PARAMS__SKIP_L2_CHECK__SHIFT 0x00000000
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR_MASK 0xffffffffL
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR__SHIFT 0x00000000
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK 0x000000ffL
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT 0x00000000
+#define RLC_MAX_PG_CU__SPARE_MASK 0xffffff00L
+#define RLC_MAX_PG_CU__SPARE__SHIFT 0x00000008
+#define RLC_MC_CNTL__RDNFO_STALL_MASK 0x10000000L
+#define RLC_MC_CNTL__RDNFO_STALL__SHIFT 0x0000001c
+#define RLC_MC_CNTL__RDNFO_URG_MASK 0x00f00000L
+#define RLC_MC_CNTL__RDNFO_URG__SHIFT 0x00000014
+#define RLC_MC_CNTL__RDREQ_PRIV_MASK 0x08000000L
+#define RLC_MC_CNTL__RDREQ_PRIV__SHIFT 0x0000001b
+#define RLC_MC_CNTL__RDREQ_SWAP_MASK 0x03000000L
+#define RLC_MC_CNTL__RDREQ_SWAP__SHIFT 0x00000018
+#define RLC_MC_CNTL__RDREQ_TRAN_MASK 0x04000000L
+#define RLC_MC_CNTL__RDREQ_TRAN__SHIFT 0x0000001a
+#define RLC_MC_CNTL__RESERVED_B_MASK 0x000fe000L
+#define RLC_MC_CNTL__RESERVED_B__SHIFT 0x0000000d
+#define RLC_MC_CNTL__RESERVED_MASK 0xe0000000L
+#define RLC_MC_CNTL__RESERVED__SHIFT 0x0000001d
+#define RLC_MC_CNTL__WRNFO_STALL_MASK 0x00000010L
+#define RLC_MC_CNTL__WRNFO_STALL__SHIFT 0x00000004
+#define RLC_MC_CNTL__WRNFO_URG_MASK 0x000001e0L
+#define RLC_MC_CNTL__WRNFO_URG__SHIFT 0x00000005
+#define RLC_MC_CNTL__WRREQ_DW_IMASK_MASK 0x00001e00L
+#define RLC_MC_CNTL__WRREQ_DW_IMASK__SHIFT 0x00000009
+#define RLC_MC_CNTL__WRREQ_PRIV_MASK 0x00000008L
+#define RLC_MC_CNTL__WRREQ_PRIV__SHIFT 0x00000003
+#define RLC_MC_CNTL__WRREQ_SWAP_MASK 0x00000003L
+#define RLC_MC_CNTL__WRREQ_SWAP__SHIFT 0x00000000
+#define RLC_MC_CNTL__WRREQ_TRAN_MASK 0x00000004L
+#define RLC_MC_CNTL__WRREQ_TRAN__SHIFT 0x00000002
+#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xff000000L
+#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x00000018
+#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x000000fcL
+#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x00000002
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x00000001
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x00000000
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00ff0000L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x00000010
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000ff00L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x00000008
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0x0000000a
+#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
+#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x00000000
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK_MASK 0xffffffffL
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK__SHIFT 0x00000000
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x00000010
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK 0x00000004L
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE__SHIFT 0x00000002
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x00000000
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x00000001
+#define RLC_PG_CNTL__PG_ERROR_STATUS_MASK 0xff000000L
+#define RLC_PG_CNTL__PG_ERROR_STATUS__SHIFT 0x00000018
+#define RLC_PG_CNTL__RESERVED1_MASK 0x00f80000L
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x00000013
+#define RLC_PG_CNTL__RESERVED_MASK 0xfffffff0L
+#define RLC_PG_CNTL__RESERVED__SHIFT 0x00000004
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x00000012
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x00000011
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK 0x00000008L
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE__SHIFT 0x00000003
+#define RLC_SAVE_AND_RESTORE_BASE__BASE_MASK 0xffffffffL
+#define RLC_SAVE_AND_RESTORE_BASE__BASE__SHIFT 0x00000000
+#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xffffffffL
+#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x00000000
+#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xffffffffL
+#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x00000000
+#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xffffffffL
+#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x00000000
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID_MASK 0x0000000fL
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID__SHIFT 0x00000000
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID_MASK 0x0000c000L
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID__SHIFT 0x0000000e
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE_MASK 0x00003800L
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE__SHIFT 0x0000000b
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID_MASK 0x000001c0L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID__SHIFT 0x00000006
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID_MASK 0x00000200L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID__SHIFT 0x00000009
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_MASK 0x00000400L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU__SHIFT 0x0000000a
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID_MASK 0x00000030L
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID__SHIFT 0x00000004
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE_MASK 0xffffc000L
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE__SHIFT 0x0000000e
+#define RLC_SERDES_WR_CTRL__BPM_ADDR_MASK 0x000000ffL
+#define RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT 0x00000000
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK 0x00100000L
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0__SHIFT 0x00000014
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_1_MASK 0x00200000L
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_1__SHIFT 0x00000015
+#define RLC_SERDES_WR_CTRL__CGLS_DISABLE_MASK 0x00020000L
+#define RLC_SERDES_WR_CTRL__CGLS_DISABLE__SHIFT 0x00000011
+#define RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK 0x00010000L
+#define RLC_SERDES_WR_CTRL__CGLS_ENABLE__SHIFT 0x00000010
+#define RLC_SERDES_WR_CTRL__CGLS_OFF_MASK 0x00080000L
+#define RLC_SERDES_WR_CTRL__CGLS_OFF__SHIFT 0x00000013
+#define RLC_SERDES_WR_CTRL__CGLS_ON_MASK 0x00040000L
+#define RLC_SERDES_WR_CTRL__CGLS_ON__SHIFT 0x00000012
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK 0x00400000L
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0__SHIFT 0x00000016
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK 0x00800000L
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1__SHIFT 0x00000017
+#define RLC_SERDES_WR_CTRL__P1_SELECT_MASK 0x00000400L
+#define RLC_SERDES_WR_CTRL__P1_SELECT__SHIFT 0x0000000a
+#define RLC_SERDES_WR_CTRL__P2_SELECT_MASK 0x00000800L
+#define RLC_SERDES_WR_CTRL__P2_SELECT__SHIFT 0x0000000b
+#define RLC_SERDES_WR_CTRL__POWER_DOWN_MASK 0x00000100L
+#define RLC_SERDES_WR_CTRL__POWER_DOWN__SHIFT 0x00000008
+#define RLC_SERDES_WR_CTRL__POWER_UP_MASK 0x00000200L
+#define RLC_SERDES_WR_CTRL__POWER_UP__SHIFT 0x00000009
+#define RLC_SERDES_WR_CTRL__READ_COMMAND_MASK 0x00002000L
+#define RLC_SERDES_WR_CTRL__READ_COMMAND__SHIFT 0x0000000d
+#define RLC_SERDES_WR_CTRL__REG_ADDR_MASK 0xf0000000L
+#define RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT 0x0000001c
+#define RLC_SERDES_WR_CTRL__RESERVED_1_MASK 0x0000c000L
+#define RLC_SERDES_WR_CTRL__RESERVED_1__SHIFT 0x0000000e
+#define RLC_SERDES_WR_CTRL__RESERVED_2_MASK 0x0f000000L
+#define RLC_SERDES_WR_CTRL__RESERVED_2__SHIFT 0x00000018
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK 0x00001000L
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND__SHIFT 0x0000000c
+#define RLC_SERDES_WR_DATA__DATA_MASK 0xffffffffL
+#define RLC_SERDES_WR_DATA__DATA__SHIFT 0x00000000
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE_MASK 0xfffffffeL
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE__SHIFT 0x00000001
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE_MASK 0x00000001L
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE__SHIFT 0x00000000
+#define RLC_SMU_PG_CTRL__SPARE_MASK 0xfffffffeL
+#define RLC_SMU_PG_CTRL__SPARE__SHIFT 0x00000001
+#define RLC_SMU_PG_CTRL__START_PG_MASK 0x00000001L
+#define RLC_SMU_PG_CTRL__START_PG__SHIFT 0x00000000
+#define RLC_SMU_PG_WAKE_UP_CTRL__SPARE_MASK 0xfffffffeL
+#define RLC_SMU_PG_WAKE_UP_CTRL__SPARE__SHIFT 0x00000001
+#define RLC_SMU_PG_WAKE_UP_CTRL__START_PG_WAKE_UP_MASK 0x00000001L
+#define RLC_SMU_PG_WAKE_UP_CTRL__START_PG_WAKE_UP__SHIFT 0x00000000
+#define RLC_SOFT_RESET_GPU__RESERVED_MASK 0xfffffffeL
+#define RLC_SOFT_RESET_GPU__RESERVED__SHIFT 0x00000001
+#define RLC_SOFT_RESET_GPU__SOFT_RESET_GPU_MASK 0x00000001L
+#define RLC_SOFT_RESET_GPU__SOFT_RESET_GPU__SHIFT 0x00000000
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xffffffffL
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x00000000
+#define RLC_STAT__RESERVED_MASK 0xfffffff0L
+#define RLC_STAT__RESERVED__SHIFT 0x00000004
+#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_STAT__RLC_BUSY__SHIFT 0x00000000
+#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000002L
+#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x00000001
+#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000004L
+#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x00000002
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY_MASK 0x000000ffL
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY__SHIFT 0x00000000
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY_MASK 0x0000ff00L
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY__SHIFT 0x00000008
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY_MASK 0x00ff0000L
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY__SHIFT 0x00000010
+#define RLC_THREAD1_DELAY__SPARE_MASK 0xff000000L
+#define RLC_THREAD1_DELAY__SPARE__SHIFT 0x00000018
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xffffffffL
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x00000000
+#define SCRATCH_ADDR__OBSOLETE_ADDR_MASK 0xffffffffL
+#define SCRATCH_ADDR__OBSOLETE_ADDR__SHIFT 0x00000000
+#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xffffffffL
+#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x00000000
+#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xffffffffL
+#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x00000000
+#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xffffffffL
+#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x00000000
+#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xffffffffL
+#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x00000000
+#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xffffffffL
+#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x00000000
+#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xffffffffL
+#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x00000000
+#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xffffffffL
+#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x00000000
+#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xffffffffL
+#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x00000000
+#define SCRATCH_UMSK__OBSOLETE_SWAP_MASK 0x00030000L
+#define SCRATCH_UMSK__OBSOLETE_SWAP__SHIFT 0x00000010
+#define SCRATCH_UMSK__OBSOLETE_UMSK_MASK 0x000000ffL
+#define SCRATCH_UMSK__OBSOLETE_UMSK__SHIFT 0x00000000
+#define SETUP_DEBUG_REG0__cl_dyn_sclk_vld_MASK 0x80000000L
+#define SETUP_DEBUG_REG0__cl_dyn_sclk_vld__SHIFT 0x0000001f
+#define SETUP_DEBUG_REG0__event_gated_MASK 0x10000000L
+#define SETUP_DEBUG_REG0__event_gated__SHIFT 0x0000001c
+#define SETUP_DEBUG_REG0__event_id_gated_MASK 0x0fc00000L
+#define SETUP_DEBUG_REG0__event_id_gated__SHIFT 0x00000016
+#define SETUP_DEBUG_REG0__geom_busy_MASK 0x00200000L
+#define SETUP_DEBUG_REG0__geom_busy__SHIFT 0x00000015
+#define SETUP_DEBUG_REG0__geom_enable_MASK 0x00008000L
+#define SETUP_DEBUG_REG0__geom_enable__SHIFT 0x0000000f
+#define SETUP_DEBUG_REG0__ge_stallb_MASK 0x00004000L
+#define SETUP_DEBUG_REG0__ge_stallb__SHIFT 0x0000000e
+#define SETUP_DEBUG_REG0__pfifo_busy_MASK 0x00080000L
+#define SETUP_DEBUG_REG0__pfifo_busy__SHIFT 0x00000013
+#define SETUP_DEBUG_REG0__pmode_prim_gated_MASK 0x20000000L
+#define SETUP_DEBUG_REG0__pmode_prim_gated__SHIFT 0x0000001d
+#define SETUP_DEBUG_REG0__pmode_state_MASK 0x00003f00L
+#define SETUP_DEBUG_REG0__pmode_state__SHIFT 0x00000008
+#define SETUP_DEBUG_REG0__su_baryc_cntl_state_MASK 0x00000003L
+#define SETUP_DEBUG_REG0__su_baryc_cntl_state__SHIFT 0x00000000
+#define SETUP_DEBUG_REG0__su_clip_baryc_free_MASK 0x00030000L
+#define SETUP_DEBUG_REG0__su_clip_baryc_free__SHIFT 0x00000010
+#define SETUP_DEBUG_REG0__su_clip_rtr_MASK 0x00040000L
+#define SETUP_DEBUG_REG0__su_clip_rtr__SHIFT 0x00000012
+#define SETUP_DEBUG_REG0__su_cntl_busy_MASK 0x00100000L
+#define SETUP_DEBUG_REG0__su_cntl_busy__SHIFT 0x00000014
+#define SETUP_DEBUG_REG0__su_cntl_state_MASK 0x0000003cL
+#define SETUP_DEBUG_REG0__su_cntl_state__SHIFT 0x00000002
+#define SETUP_DEBUG_REG0__su_dyn_sclk_vld_MASK 0x40000000L
+#define SETUP_DEBUG_REG0__su_dyn_sclk_vld__SHIFT 0x0000001e
+#define SETUP_DEBUG_REG1__x_sort0_gated_23_8_MASK 0xffff0000L
+#define SETUP_DEBUG_REG1__x_sort0_gated_23_8__SHIFT 0x00000010
+#define SETUP_DEBUG_REG1__y_sort0_gated_23_8_MASK 0x0000ffffL
+#define SETUP_DEBUG_REG1__y_sort0_gated_23_8__SHIFT 0x00000000
+#define SETUP_DEBUG_REG2__x_sort1_gated_23_8_MASK 0xffff0000L
+#define SETUP_DEBUG_REG2__x_sort1_gated_23_8__SHIFT 0x00000010
+#define SETUP_DEBUG_REG2__y_sort1_gated_23_8_MASK 0x0000ffffL
+#define SETUP_DEBUG_REG2__y_sort1_gated_23_8__SHIFT 0x00000000
+#define SETUP_DEBUG_REG3__x_sort2_gated_23_8_MASK 0xffff0000L
+#define SETUP_DEBUG_REG3__x_sort2_gated_23_8__SHIFT 0x00000010
+#define SETUP_DEBUG_REG3__y_sort2_gated_23_8_MASK 0x0000ffffL
+#define SETUP_DEBUG_REG3__y_sort2_gated_23_8__SHIFT 0x00000000
+#define SETUP_DEBUG_REG4__attr_indx_sort0_gated_MASK 0x00003fffL
+#define SETUP_DEBUG_REG4__attr_indx_sort0_gated__SHIFT 0x00000000
+#define SETUP_DEBUG_REG4__backfacing_gated_MASK 0x00008000L
+#define SETUP_DEBUG_REG4__backfacing_gated__SHIFT 0x0000000f
+#define SETUP_DEBUG_REG4__clipped_gated_MASK 0x00080000L
+#define SETUP_DEBUG_REG4__clipped_gated__SHIFT 0x00000013
+#define SETUP_DEBUG_REG4__dealloc_slot_gated_MASK 0x00700000L
+#define SETUP_DEBUG_REG4__dealloc_slot_gated__SHIFT 0x00000014
+#define SETUP_DEBUG_REG4__diamond_rule_gated_MASK 0x03000000L
+#define SETUP_DEBUG_REG4__diamond_rule_gated__SHIFT 0x00000018
+#define SETUP_DEBUG_REG4__eop_gated_MASK 0x80000000L
+#define SETUP_DEBUG_REG4__eop_gated__SHIFT 0x0000001f
+#define SETUP_DEBUG_REG4__fpov_gated_MASK 0x60000000L
+#define SETUP_DEBUG_REG4__fpov_gated__SHIFT 0x0000001d
+#define SETUP_DEBUG_REG4__null_prim_gated_MASK 0x00004000L
+#define SETUP_DEBUG_REG4__null_prim_gated__SHIFT 0x0000000e
+#define SETUP_DEBUG_REG4__st_indx_gated_MASK 0x00070000L
+#define SETUP_DEBUG_REG4__st_indx_gated__SHIFT 0x00000010
+#define SETUP_DEBUG_REG4__type_gated_MASK 0x1c000000L
+#define SETUP_DEBUG_REG4__type_gated__SHIFT 0x0000001a
+#define SETUP_DEBUG_REG4__xmajor_gated_MASK 0x00800000L
+#define SETUP_DEBUG_REG4__xmajor_gated__SHIFT 0x00000017
+#define SETUP_DEBUG_REG5__attr_indx_sort1_gated_MASK 0x0fffc000L
+#define SETUP_DEBUG_REG5__attr_indx_sort1_gated__SHIFT 0x0000000e
+#define SETUP_DEBUG_REG5__attr_indx_sort2_gated_MASK 0x00003fffL
+#define SETUP_DEBUG_REG5__attr_indx_sort2_gated__SHIFT 0x00000000
+#define SETUP_DEBUG_REG5__pa_reg_sclk_vld_MASK 0x80000000L
+#define SETUP_DEBUG_REG5__pa_reg_sclk_vld__SHIFT 0x0000001f
+#define SETUP_DEBUG_REG5__provoking_vtx_gated_MASK 0x30000000L
+#define SETUP_DEBUG_REG5__provoking_vtx_gated__SHIFT 0x0000001c
+#define SETUP_DEBUG_REG5__valid_prim_gated_MASK 0x40000000L
+#define SETUP_DEBUG_REG5__valid_prim_gated__SHIFT 0x0000001e
+#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000ffffL
+#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x00000000
+#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xffff0000L
+#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x00000010
+#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000ffffL
+#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x00000000
+#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xffff0000L
+#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x00000010
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x00000000
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x00000003
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001c0L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x00000006
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000e00L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x00000009
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0x0000000c
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000c000L
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0x0000000e
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x00000010
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000c0000L
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x00000012
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x00000018
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x00000008
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0x0000000c
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x00000000
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x00000004
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x00000010
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x00000014
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE__SHIFT 0x00000008
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x00000004
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003c00L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0x0000000a
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE__SHIFT 0x00000009
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000040L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x00000006
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE_MASK 0xffff0000L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE__SHIFT 0x00000010
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x00000007
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000fL
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x00000000
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x00000019
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x00000018
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00e00000L
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x00000015
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001fffffL
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x00000000
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET_MASK 0x04000000L
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET__SHIFT 0x0000001a
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL_MASK 0x08000000L
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL__SHIFT 0x0000001b
+#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x00000007
+#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x00000100L
+#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x00000008
+#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x00000200L
+#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0x00000009
+#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0x0000000b
+#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0x0000000c
+#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0x0000000d
+#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x00004000L
+#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0x0000000e
+#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0x0000000f
+#define SPI_DEBUG_BUSY__CSG_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CSG_BUSY__SHIFT 0x00000007
+#define SPI_DEBUG_BUSY__ES_BUSY_MASK 0x00000004L
+#define SPI_DEBUG_BUSY__ES_BUSY__SHIFT 0x00000002
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x0000000f
+#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x00010000L
+#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x00000010
+#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x00000008L
+#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x00000003
+#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x00000002L
+#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x00000001
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x00000400L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0x0000000a
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0x0000000b
+#define SPI_DEBUG_BUSY__LS_BUSY_MASK 0x00000001L
+#define SPI_DEBUG_BUSY__LS_BUSY__SHIFT 0x00000000
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x00100000L
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x00000014
+#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x00000020L
+#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x00000005
+#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x00000040L
+#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x00000006
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY__SHIFT 0x0000000c
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY__SHIFT 0x0000000d
+#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x00020000L
+#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x00000011
+#define SPI_DEBUG_BUSY__VS_BUSY_MASK 0x00000010L
+#define SPI_DEBUG_BUSY__VS_BUSY__SHIFT 0x00000004
+#define SPI_DEBUG_CNTL__DEBUG_GRBM_OVERRIDE_MASK 0x00000001L
+#define SPI_DEBUG_CNTL__DEBUG_GRBM_OVERRIDE__SHIFT 0x00000000
+#define SPI_DEBUG_CNTL__DEBUG_GROUP_SEL_MASK 0x000003e0L
+#define SPI_DEBUG_CNTL__DEBUG_GROUP_SEL__SHIFT 0x00000005
+#define SPI_DEBUG_CNTL__DEBUG_PIPE_SEL_MASK 0x0e000000L
+#define SPI_DEBUG_CNTL__DEBUG_PIPE_SEL__SHIFT 0x00000019
+#define SPI_DEBUG_CNTL__DEBUG_REG_EN_MASK 0x80000000L
+#define SPI_DEBUG_CNTL__DEBUG_REG_EN__SHIFT 0x0000001f
+#define SPI_DEBUG_CNTL__DEBUG_SH_SEL_MASK 0x00010000L
+#define SPI_DEBUG_CNTL__DEBUG_SH_SEL__SHIFT 0x00000010
+#define SPI_DEBUG_CNTL__DEBUG_SIMD_SEL_MASK 0x0000fc00L
+#define SPI_DEBUG_CNTL__DEBUG_SIMD_SEL__SHIFT 0x0000000a
+#define SPI_DEBUG_CNTL__DEBUG_THREAD_TYPE_SEL_MASK 0x0000001eL
+#define SPI_DEBUG_CNTL__DEBUG_THREAD_TYPE_SEL__SHIFT 0x00000001
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_0_MASK 0x00020000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_0__SHIFT 0x00000011
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_1_MASK 0x00040000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_1__SHIFT 0x00000012
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_2_MASK 0x00080000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_2__SHIFT 0x00000013
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_3_MASK 0x00100000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_3__SHIFT 0x00000014
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_4_MASK 0x00200000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_4__SHIFT 0x00000015
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_5_MASK 0x00400000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_5__SHIFT 0x00000016
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_6_MASK 0x00800000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_6__SHIFT 0x00000017
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_7_MASK 0x01000000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_7__SHIFT 0x00000018
+#define SPI_DEBUG_READ__DATA_MASK 0x00ffffffL
+#define SPI_DEBUG_READ__DATA__SHIFT 0x00000000
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000ff00L
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x00000008
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000ffL
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x00000000
+#define SPI_GDS_CREDITS__UNUSED_MASK 0xffff0000L
+#define SPI_GDS_CREDITS__UNUSED__SHIFT 0x00000010
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x00000000
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x00000001
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0x0000000b
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001cL
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x00000002
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000e0L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x00000005
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x00000008
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0x0000000e
+#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
+#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x00000000
+#define SPI_LB_CU_MASK__CU_MASK_MASK 0x0000ffffL
+#define SPI_LB_CU_MASK__CU_MASK__SHIFT 0x00000000
+#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xffffffffL
+#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000f0L
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x00000004
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000fL
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x00000000
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000f000L
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0x0000000c
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000f00L
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x00000008
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00f00000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x00000014
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000f0000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x00000010
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xf0000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x0000001c
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0f000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x00000018
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK_MASK 0x0000ffffL
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK__SHIFT 0x00000000
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0x0000000e
+#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003fL
+#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x00000000
+#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
+#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x00000006
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0x0000000d
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0x0000000c
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x00000005
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x00000006
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x00000004
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x00000007
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x00000001
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x00000002
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x00000003
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x00000000
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0x0000000f
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0x0000000b
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x00000008
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x00000009
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0x0000000a
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0x0000000e
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0x0000000d
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0x0000000c
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x00000005
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x00000006
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x00000004
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x00000007
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x00000001
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x00000002
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x00000003
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x00000000
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0x0000000f
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0x0000000b
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x00000008
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x00000009
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0x0000000a
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0x0000000e
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000fffL
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x00000000
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000fL
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x00000000
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000f0L
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x00000004
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000f00L
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x00000008
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000f000L
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0x0000000c
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000f0000L
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x00000010
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00f00000L
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x00000014
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0f000000L
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x00000018
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xf0000000L
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x0000001c
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_ES__CACHE_CTL_MASK 0x38000000L
+#define SPI_SHADER_PGM_RSRC1_ES__CACHE_CTL__SHIFT 0x0000001b
+#define SPI_SHADER_PGM_RSRC1_ES__CDBG_USER_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC1_ES__CDBG_USER__SHIFT 0x0000001e
+#define SPI_SHADER_PGM_RSRC1_ES__CU_GROUP_ENABLE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_ES__CU_GROUP_ENABLE__SHIFT 0x0000001a
+#define SPI_SHADER_PGM_RSRC1_ES__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_ES__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_ES__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_ES__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_ES__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_ES__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_ES__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_ES__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_ES__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_ES__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_ES__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_ES__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_ES__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_ES__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_ES__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_ES__VGPR_COMP_CNT__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_ES__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_ES__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_GS__CACHE_CTL_MASK 0x0e000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CACHE_CTL__SHIFT 0x00000019
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x0000001c
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_HS__CACHE_CTL_MASK 0x07000000L
+#define SPI_SHADER_PGM_RSRC1_HS__CACHE_CTL__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x0000001b
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_LS__CACHE_CTL_MASK 0x1c000000L
+#define SPI_SHADER_PGM_RSRC1_LS__CACHE_CTL__SHIFT 0x0000001a
+#define SPI_SHADER_PGM_RSRC1_LS__CDBG_USER_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC1_LS__CDBG_USER__SHIFT 0x0000001d
+#define SPI_SHADER_PGM_RSRC1_LS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_LS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_LS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_LS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_LS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_LS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_LS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_LS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_LS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_LS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_LS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_LS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_LS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_LS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_LS__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_LS__VGPR_COMP_CNT__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_LS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_LS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_PS__CACHE_CTL_MASK 0x0e000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CACHE_CTL__SHIFT 0x00000019
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x0000001c
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_VS__CACHE_CTL_MASK 0x38000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CACHE_CTL__SHIFT 0x0000001b
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER__SHIFT 0x0000001e
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE__SHIFT 0x0000001a
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES__EXCP_EN_MASK 0x00007f00L
+#define SPI_SHADER_PGM_RSRC2_ES__EXCP_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_ES_GS__EXCP_EN_MASK 0x0001ff00L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__EXCP_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_ES_GS__LDS_SIZE_MASK 0x1ff00000L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__LDS_SIZE__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC2_ES_GS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_ES_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_ES_GS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_ES_GS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_ES__LDS_SIZE_MASK 0x1ff00000L
+#define SPI_SHADER_PGM_RSRC2_ES__LDS_SIZE__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC2_ES__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_ES__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_ES__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_ES__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_ES__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_ES__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_ES__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_ES_VS__EXCP_EN_MASK 0x0001ff00L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__EXCP_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_ES_VS__LDS_SIZE_MASK 0x1ff00000L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__LDS_SIZE__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC2_ES_VS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_ES_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_ES_VS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_ES_VS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x00003f80L
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0000fe00L
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x00000009
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS_ES__EXCP_EN_MASK 0x01ff0000L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_ES__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS_ES__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS_ES__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS_ES__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS_ES__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS__EXCP_EN_MASK 0x007f0000L
+#define SPI_SHADER_PGM_RSRC2_LS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_HS__EXCP_EN_MASK 0x01ff0000L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_HS__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS_HS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS_HS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS_VS__EXCP_EN_MASK 0x01ff0000L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_VS__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS_VS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS_VS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x007f0000L
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000ff00L
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN_MASK 0x000fe000L
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN__SHIFT 0x0000000d
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN_MASK 0x00000200L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN__SHIFT 0x00000009
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN_MASK 0x00000400L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN_MASK 0x00000800L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN__SHIFT 0x0000000b
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN_MASK 0x00001000L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000fL
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x00000000
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000f0L
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x00000004
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000f00L
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x00000008
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000f000L
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0x0000000c
+#define SPI_SHADER_TBA_HI_ES__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_GS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_HS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_LS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_PS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_VS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_ES__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_GS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_HS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_LS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_PS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_VS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_ES__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_GS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_HS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_LS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_PS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_VS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_ES__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_GS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_HS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_LS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_PS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_VS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000fL
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x00000000
+#define SPI_SLAVE_DEBUG_BUSY__ES_VTX_BUSY_MASK 0x00000004L
+#define SPI_SLAVE_DEBUG_BUSY__ES_VTX_BUSY__SHIFT 0x00000002
+#define SPI_SLAVE_DEBUG_BUSY__EVENT_CNTL_BUSY_MASK 0x00200000L
+#define SPI_SLAVE_DEBUG_BUSY__EVENT_CNTL_BUSY__SHIFT 0x00000015
+#define SPI_SLAVE_DEBUG_BUSY__GS_VTX_BUSY_MASK 0x00000008L
+#define SPI_SLAVE_DEBUG_BUSY__GS_VTX_BUSY__SHIFT 0x00000003
+#define SPI_SLAVE_DEBUG_BUSY__HS_VTX_BUSY_MASK 0x00000002L
+#define SPI_SLAVE_DEBUG_BUSY__HS_VTX_BUSY__SHIFT 0x00000001
+#define SPI_SLAVE_DEBUG_BUSY__LS_VTX_BUSY_MASK 0x00000001L
+#define SPI_SLAVE_DEBUG_BUSY__LS_VTX_BUSY__SHIFT 0x00000000
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC00_BUSY_MASK 0x00000200L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC00_BUSY__SHIFT 0x00000009
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC01_BUSY_MASK 0x00000400L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC01_BUSY__SHIFT 0x0000000a
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC02_BUSY_MASK 0x00000800L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC02_BUSY__SHIFT 0x0000000b
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC03_BUSY_MASK 0x00001000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC03_BUSY__SHIFT 0x0000000c
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC10_BUSY_MASK 0x00002000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC10_BUSY__SHIFT 0x0000000d
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC11_BUSY_MASK 0x00004000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC11_BUSY__SHIFT 0x0000000e
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC12_BUSY_MASK 0x00008000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC12_BUSY__SHIFT 0x0000000f
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC13_BUSY_MASK 0x00010000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC13_BUSY__SHIFT 0x00000010
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC00_BUSY_MASK 0x00000020L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC00_BUSY__SHIFT 0x00000005
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC01_BUSY_MASK 0x00000040L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC01_BUSY__SHIFT 0x00000006
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC10_BUSY_MASK 0x00000080L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC10_BUSY__SHIFT 0x00000007
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC11_BUSY_MASK 0x00000100L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC11_BUSY__SHIFT 0x00000008
+#define SPI_SLAVE_DEBUG_BUSY__VS_VTX_BUSY_MASK 0x00000010L
+#define SPI_SLAVE_DEBUG_BUSY__VS_VTX_BUSY__SHIFT 0x00000004
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER0_BUSY_MASK 0x00020000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER0_BUSY__SHIFT 0x00000011
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER1_BUSY_MASK 0x00040000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER1_BUSY__SHIFT 0x00000012
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC0_BUSY_MASK 0x00080000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC0_BUSY__SHIFT 0x00000013
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC1_BUSY_MASK 0x00100000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC1_BUSY__SHIFT 0x00000014
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000ffffL
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x00000000
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xffff0000L
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x00000010
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000ffffL
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x00000000
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xffff0000L
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x00000010
+#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x01fff000L
+#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0x0000000c
+#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000fffL
+#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x00000000
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003eL
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x00000001
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK_MASK 0x00000040L
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK__SHIFT 0x00000006
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000ffffL
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x00000000
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xffff0000L
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x00000010
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS_MASK 0xffffffffL
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x0000ffffL
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE_MASK 0x40000000L
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE__SHIFT 0x0000001e
+#define SQ_BUF_RSRC_WORD1__STRIDE_MASK 0x3fff0000L
+#define SQ_BUF_RSRC_WORD1__STRIDE__SHIFT 0x00000010
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE_MASK 0x80000000L
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE__SHIFT 0x0000001f
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS_MASK 0xffffffffL
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE_MASK 0x00800000L
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE__SHIFT 0x00000017
+#define SQ_BUF_RSRC_WORD3__ATC_MASK 0x01000000L
+#define SQ_BUF_RSRC_WORD3__ATC__SHIFT 0x00000018
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT_MASK 0x00078000L
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT__SHIFT 0x0000000f
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W_MASK 0x00000e00L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W__SHIFT 0x00000009
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y__SHIFT 0x00000003
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z_MASK 0x000001c0L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z__SHIFT 0x00000006
+#define SQ_BUF_RSRC_WORD3__ELEMENT_SIZE_MASK 0x00180000L
+#define SQ_BUF_RSRC_WORD3__ELEMENT_SIZE__SHIFT 0x00000013
+#define SQ_BUF_RSRC_WORD3__HASH_ENABLE_MASK 0x02000000L
+#define SQ_BUF_RSRC_WORD3__HASH_ENABLE__SHIFT 0x00000019
+#define SQ_BUF_RSRC_WORD3__HEAP_MASK 0x04000000L
+#define SQ_BUF_RSRC_WORD3__HEAP__SHIFT 0x0000001a
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE_MASK 0x00600000L
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE__SHIFT 0x00000015
+#define SQ_BUF_RSRC_WORD3__MTYPE_MASK 0x38000000L
+#define SQ_BUF_RSRC_WORD3__MTYPE__SHIFT 0x0000001b
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT_MASK 0x00007000L
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT__SHIFT 0x0000000c
+#define SQ_BUF_RSRC_WORD3__TYPE_MASK 0xc0000000L
+#define SQ_BUF_RSRC_WORD3__TYPE__SHIFT 0x0000001e
+#define SQC_CACHES__DATA_INVALIDATE_MASK 0x00000002L
+#define SQC_CACHES__DATA_INVALIDATE__SHIFT 0x00000001
+#define SQC_CACHES__INST_INVALIDATE_MASK 0x00000001L
+#define SQC_CACHES__INST_INVALIDATE__SHIFT 0x00000000
+#define SQC_CACHES__INVALIDATE_VOLATILE_MASK 0x00000004L
+#define SQC_CACHES__INVALIDATE_VOLATILE__SHIFT 0x00000002
+#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000cL
+#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x00000002
+#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
+#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x00000007
+#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
+#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x00000008
+#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
+#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x00000006
+#define SQC_CONFIG__IDENTITY_HASH_BANK_MASK 0x00000200L
+#define SQC_CONFIG__IDENTITY_HASH_BANK__SHIFT 0x00000009
+#define SQC_CONFIG__IDENTITY_HASH_SET_MASK 0x00000400L
+#define SQC_CONFIG__IDENTITY_HASH_SET__SHIFT 0x0000000a
+#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
+#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x00000000
+#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
+#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x00000004
+#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000800L
+#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0x0000000b
+#define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L
+#define SQ_CONFIG__DEBUG_EN__SHIFT 0x00000008
+#define SQ_CONFIG__DISABLE_IB_DEP_CHECK_MASK 0x00000400L
+#define SQ_CONFIG__DISABLE_IB_DEP_CHECK__SHIFT 0x0000000a
+#define SQ_CONFIG__DISABLE_SCA_BYPASS_MASK 0x00000200L
+#define SQ_CONFIG__DISABLE_SCA_BYPASS__SHIFT 0x00000009
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE_MASK 0x00008000L
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE__SHIFT 0x0000000f
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE_MASK 0x00002000L
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE__SHIFT 0x0000000d
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE_MASK 0x00004000L
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE__SHIFT 0x0000000e
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE_MASK 0x00001000L
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE__SHIFT 0x0000000c
+#define SQ_CONFIG__ENABLE_SOFT_CLAUSE_MASK 0x00000800L
+#define SQ_CONFIG__ENABLE_SOFT_CLAUSE__SHIFT 0x0000000b
+#define SQ_CONFIG__UNUSED_MASK 0x000000ffL
+#define SQ_CONFIG__UNUSED__SHIFT 0x00000000
+#define SQC_SECDED_CNT__DATA_DED_MASK 0xff000000L
+#define SQC_SECDED_CNT__DATA_DED__SHIFT 0x00000018
+#define SQC_SECDED_CNT__DATA_SEC_MASK 0x00ff0000L
+#define SQC_SECDED_CNT__DATA_SEC__SHIFT 0x00000010
+#define SQC_SECDED_CNT__INST_DED_MASK 0x0000ff00L
+#define SQC_SECDED_CNT__INST_DED__SHIFT 0x00000008
+#define SQC_SECDED_CNT__INST_SEC_MASK 0x000000ffL
+#define SQC_SECDED_CNT__INST_SEC__SHIFT 0x00000000
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED_MASK 0x000000ffL
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0_MASK 0x000000ffL
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1_MASK 0x0000ff00L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1__SHIFT 0x00000008
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST_MASK 0xff000000L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST__SHIFT 0x00000018
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED_MASK 0x00ff0000L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED__SHIFT 0x00000010
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD_MASK 0x0000000fL
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG_MASK 0x000000f0L
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG__SHIFT 0x00000004
+#define SQ_DEBUG_STS_GLOBAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_GLOBAL__BUSY__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY_MASK 0x00000002L
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY__SHIFT 0x00000001
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0_MASK 0x0000fff0L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0__SHIFT 0x00000004
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1_MASK 0x0fff0000L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1__SHIFT 0x00000010
+#define SQ_DEBUG_STS_LOCAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_LOCAL__BUSY__SHIFT 0x00000000
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL_MASK 0x000003f0L
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL__SHIFT 0x00000004
+#define SQ_DED_CNT__LDS_DED_MASK 0x0000003fL
+#define SQ_DED_CNT__LDS_DED__SHIFT 0x00000000
+#define SQ_DED_CNT__SGPR_DED_MASK 0x00001f00L
+#define SQ_DED_CNT__SGPR_DED__SHIFT 0x00000008
+#define SQ_DED_CNT__VGPR_DED_MASK 0x01ff0000L
+#define SQ_DED_CNT__VGPR_DED__SHIFT 0x00000010
+#define SQ_DED_INFO__SIMD_ID_MASK 0x00000030L
+#define SQ_DED_INFO__SIMD_ID__SHIFT 0x00000004
+#define SQ_DED_INFO__SOURCE_MASK 0x000001c0L
+#define SQ_DED_INFO__SOURCE__SHIFT 0x00000006
+#define SQ_DED_INFO__VM_ID_MASK 0x00001e00L
+#define SQ_DED_INFO__VM_ID__SHIFT 0x00000009
+#define SQ_DED_INFO__WAVE_ID_MASK 0x0000000fL
+#define SQ_DED_INFO__WAVE_ID__SHIFT 0x00000000
+#define SQ_DS_0__ENCODING_MASK 0xfc000000L
+#define SQ_DS_0__ENCODING__SHIFT 0x0000001a
+#define SQ_DS_0__GDS_MASK 0x00020000L
+#define SQ_DS_0__GDS__SHIFT 0x00000011
+#define SQ_DS_0__OFFSET0_MASK 0x000000ffL
+#define SQ_DS_0__OFFSET0__SHIFT 0x00000000
+#define SQ_DS_0__OFFSET1_MASK 0x0000ff00L
+#define SQ_DS_0__OFFSET1__SHIFT 0x00000008
+#define SQ_DS_0__OP_MASK 0x03fc0000L
+#define SQ_DS_0__OP__SHIFT 0x00000012
+#define SQ_DS_1__ADDR_MASK 0x000000ffL
+#define SQ_DS_1__ADDR__SHIFT 0x00000000
+#define SQ_DS_1__DATA0_MASK 0x0000ff00L
+#define SQ_DS_1__DATA0__SHIFT 0x00000008
+#define SQ_DS_1__DATA1_MASK 0x00ff0000L
+#define SQ_DS_1__DATA1__SHIFT 0x00000010
+#define SQ_DS_1__VDST_MASK 0xff000000L
+#define SQ_DS_1__VDST__SHIFT 0x00000018
+#define SQ_EXP_0__COMPR_MASK 0x00000400L
+#define SQ_EXP_0__COMPR__SHIFT 0x0000000a
+#define SQ_EXP_0__DONE_MASK 0x00000800L
+#define SQ_EXP_0__DONE__SHIFT 0x0000000b
+#define SQ_EXP_0__ENCODING_MASK 0xfc000000L
+#define SQ_EXP_0__ENCODING__SHIFT 0x0000001a
+#define SQ_EXP_0__EN_MASK 0x0000000fL
+#define SQ_EXP_0__EN__SHIFT 0x00000000
+#define SQ_EXP_0__TGT_MASK 0x000003f0L
+#define SQ_EXP_0__TGT__SHIFT 0x00000004
+#define SQ_EXP_0__VM_MASK 0x00001000L
+#define SQ_EXP_0__VM__SHIFT 0x0000000c
+#define SQ_EXP_1__VSRC0_MASK 0x000000ffL
+#define SQ_EXP_1__VSRC0__SHIFT 0x00000000
+#define SQ_EXP_1__VSRC1_MASK 0x0000ff00L
+#define SQ_EXP_1__VSRC1__SHIFT 0x00000008
+#define SQ_EXP_1__VSRC2_MASK 0x00ff0000L
+#define SQ_EXP_1__VSRC2__SHIFT 0x00000010
+#define SQ_EXP_1__VSRC3_MASK 0xff000000L
+#define SQ_EXP_1__VSRC3__SHIFT 0x00000018
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE_MASK 0x00030000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE__SHIFT 0x00000010
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000fL
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x00000000
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000f00L
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x00000008
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000c0000L
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x00000012
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS_MASK 0xffffffffL
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x000000ffL
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT_MASK 0x03f00000L
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT__SHIFT 0x00000014
+#define SQ_IMG_RSRC_WORD1__MIN_LOD_MASK 0x000fff00L
+#define SQ_IMG_RSRC_WORD1__MIN_LOD__SHIFT 0x00000008
+#define SQ_IMG_RSRC_WORD1__MTYPE_MASK 0xc0000000L
+#define SQ_IMG_RSRC_WORD1__MTYPE__SHIFT 0x0000001e
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT_MASK 0x3c000000L
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT__SHIFT 0x0000001a
+#define SQ_IMG_RSRC_WORD2__HEIGHT_MASK 0x0fffc000L
+#define SQ_IMG_RSRC_WORD2__HEIGHT__SHIFT 0x0000000e
+#define SQ_IMG_RSRC_WORD2__INTERLACED_MASK 0x80000000L
+#define SQ_IMG_RSRC_WORD2__INTERLACED__SHIFT 0x0000001f
+#define SQ_IMG_RSRC_WORD2__PERF_MOD_MASK 0x70000000L
+#define SQ_IMG_RSRC_WORD2__PERF_MOD__SHIFT 0x0000001c
+#define SQ_IMG_RSRC_WORD2__WIDTH_MASK 0x00003fffL
+#define SQ_IMG_RSRC_WORD2__WIDTH__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD3__ATC_MASK 0x08000000L
+#define SQ_IMG_RSRC_WORD3__ATC__SHIFT 0x0000001b
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL_MASK 0x0000f000L
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL__SHIFT 0x0000000c
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W_MASK 0x00000e00L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W__SHIFT 0x00000009
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y__SHIFT 0x00000003
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z_MASK 0x000001c0L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z__SHIFT 0x00000006
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL_MASK 0x000f0000L
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL__SHIFT 0x00000010
+#define SQ_IMG_RSRC_WORD3__MTYPE_MASK 0x04000000L
+#define SQ_IMG_RSRC_WORD3__MTYPE__SHIFT 0x0000001a
+#define SQ_IMG_RSRC_WORD3__POW2_PAD_MASK 0x02000000L
+#define SQ_IMG_RSRC_WORD3__POW2_PAD__SHIFT 0x00000019
+#define SQ_IMG_RSRC_WORD3__TILING_INDEX_MASK 0x01f00000L
+#define SQ_IMG_RSRC_WORD3__TILING_INDEX__SHIFT 0x00000014
+#define SQ_IMG_RSRC_WORD3__TYPE_MASK 0xf0000000L
+#define SQ_IMG_RSRC_WORD3__TYPE__SHIFT 0x0000001c
+#define SQ_IMG_RSRC_WORD4__DEPTH_MASK 0x00001fffL
+#define SQ_IMG_RSRC_WORD4__DEPTH__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD4__PITCH_MASK 0x07ffe000L
+#define SQ_IMG_RSRC_WORD4__PITCH__SHIFT 0x0000000d
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY_MASK 0x00001fffL
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD5__LAST_ARRAY_MASK 0x03ffe000L
+#define SQ_IMG_RSRC_WORD5__LAST_ARRAY__SHIFT 0x0000000d
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID_MASK 0x000ff000L
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID__SHIFT 0x0000000c
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN_MASK 0x00100000L
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN__SHIFT 0x00000014
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN_MASK 0x00000fffL
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD6__UNUNSED_MASK 0xffe00000L
+#define SQ_IMG_RSRC_WORD6__UNUNSED__SHIFT 0x00000015
+#define SQ_IMG_RSRC_WORD7__UNUNSED_MASK 0xffffffffL
+#define SQ_IMG_RSRC_WORD7__UNUNSED__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS_MASK 0x07e00000L
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS__SHIFT 0x00000015
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD_MASK 0x00070000L
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD__SHIFT 0x00000010
+#define SQ_IMG_SAMP_WORD0__CLAMP_X_MASK 0x00000007L
+#define SQ_IMG_SAMP_WORD0__CLAMP_X__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y_MASK 0x00000038L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y__SHIFT 0x00000003
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z_MASK 0x000001c0L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z__SHIFT 0x00000006
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC_MASK 0x00007000L
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC__SHIFT 0x0000000c
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP__SHIFT 0x0000001c
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE_MASK 0x60000000L
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE__SHIFT 0x0000001d
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA_MASK 0x00100000L
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA__SHIFT 0x00000014
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED_MASK 0x00008000L
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED__SHIFT 0x0000000f
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO_MASK 0x00000e00L
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO__SHIFT 0x00000009
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC_MASK 0x00080000L
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC__SHIFT 0x00000013
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD_MASK 0x08000000L
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD__SHIFT 0x0000001b
+#define SQ_IMG_SAMP_WORD1__MAX_LOD_MASK 0x00fff000L
+#define SQ_IMG_SAMP_WORD1__MAX_LOD__SHIFT 0x0000000c
+#define SQ_IMG_SAMP_WORD1__MIN_LOD_MASK 0x00000fffL
+#define SQ_IMG_SAMP_WORD1__MIN_LOD__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD1__PERF_MIP_MASK 0x0f000000L
+#define SQ_IMG_SAMP_WORD1__PERF_MIP__SHIFT 0x00000018
+#define SQ_IMG_SAMP_WORD1__PERF_Z_MASK 0xf0000000L
+#define SQ_IMG_SAMP_WORD1__PERF_Z__SHIFT 0x0000001c
+#define SQ_IMG_SAMP_WORD2__DISABLE_LSB_CEIL_MASK 0x20000000L
+#define SQ_IMG_SAMP_WORD2__DISABLE_LSB_CEIL__SHIFT 0x0000001d
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX_MASK 0x40000000L
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX__SHIFT 0x0000001e
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_MASK 0x00003fffL
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC_MASK 0x000fc000L
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC__SHIFT 0x0000000e
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER_MASK 0x0c000000L
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER__SHIFT 0x0000001a
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP__SHIFT 0x0000001c
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER_MASK 0x00300000L
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER__SHIFT 0x00000014
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER_MASK 0x00c00000L
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER__SHIFT 0x00000016
+#define SQ_IMG_SAMP_WORD2__Z_FILTER_MASK 0x03000000L
+#define SQ_IMG_SAMP_WORD2__Z_FILTER__SHIFT 0x00000018
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR_MASK 0x00000fffL
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE_MASK 0xc0000000L
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE__SHIFT 0x0000001e
+#define SQ_IND_DATA__DATA_MASK 0xffffffffL
+#define SQ_IND_DATA__DATA__SHIFT 0x00000000
+#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00001000L
+#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0x0000000c
+#define SQ_IND_INDEX__FORCE_READ_MASK 0x00002000L
+#define SQ_IND_INDEX__FORCE_READ__SHIFT 0x0000000d
+#define SQ_IND_INDEX__INDEX_MASK 0xffff0000L
+#define SQ_IND_INDEX__INDEX__SHIFT 0x00000010
+#define SQ_IND_INDEX__READ_TIMEOUT_MASK 0x00004000L
+#define SQ_IND_INDEX__READ_TIMEOUT__SHIFT 0x0000000e
+#define SQ_IND_INDEX__SIMD_ID_MASK 0x00000030L
+#define SQ_IND_INDEX__SIMD_ID__SHIFT 0x00000004
+#define SQ_IND_INDEX__THREAD_ID_MASK 0x00000fc0L
+#define SQ_IND_INDEX__THREAD_ID__SHIFT 0x00000006
+#define SQ_IND_INDEX__UNINDEXED_MASK 0x00008000L
+#define SQ_IND_INDEX__UNINDEXED__SHIFT 0x0000000f
+#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000000fL
+#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x00000000
+#define SQ_INST__ENCODING_MASK 0xffffffffL
+#define SQ_INST__ENCODING__SHIFT 0x00000000
+#define SQ_INTERRUPT_WORD_AUTO__CMD_TIMESTAMP_MASK 0x00000010L
+#define SQ_INTERRUPT_WORD_AUTO__CMD_TIMESTAMP__SHIFT 0x00000004
+#define SQ_INTERRUPT_WORD_AUTO__ENCODING_MASK 0x0c000000L
+#define SQ_INTERRUPT_WORD_AUTO__ENCODING__SHIFT 0x0000001a
+#define SQ_INTERRUPT_WORD_AUTO__HOST_CMD_OVERFLOW_MASK 0x00000020L
+#define SQ_INTERRUPT_WORD_AUTO__HOST_CMD_OVERFLOW__SHIFT 0x00000005
+#define SQ_INTERRUPT_WORD_AUTO__HOST_REG_OVERFLOW_MASK 0x00000040L
+#define SQ_INTERRUPT_WORD_AUTO__HOST_REG_OVERFLOW__SHIFT 0x00000006
+#define SQ_INTERRUPT_WORD_AUTO__IMMED_OVERFLOW_MASK 0x00000080L
+#define SQ_INTERRUPT_WORD_AUTO__IMMED_OVERFLOW__SHIFT 0x00000007
+#define SQ_INTERRUPT_WORD_AUTO__REG_TIMESTAMP_MASK 0x00000008L
+#define SQ_INTERRUPT_WORD_AUTO__REG_TIMESTAMP__SHIFT 0x00000003
+#define SQ_INTERRUPT_WORD_AUTO__SE_ID_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_AUTO__SE_ID__SHIFT 0x00000019
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_BUF_FULL_MASK 0x00000004L
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_BUF_FULL__SHIFT 0x00000002
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_MASK 0x00000001L
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE__SHIFT 0x00000000
+#define SQ_INTERRUPT_WORD_AUTO__WLT_MASK 0x00000002L
+#define SQ_INTERRUPT_WORD_AUTO__WLT__SHIFT 0x00000001
+#define SQ_INTERRUPT_WORD_CMN__ENCODING_MASK 0x0c000000L
+#define SQ_INTERRUPT_WORD_CMN__ENCODING__SHIFT 0x0000001a
+#define SQ_INTERRUPT_WORD_CMN__SE_ID_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_CMN__SE_ID__SHIFT 0x00000019
+#define SQ_INTERRUPT_WORD_WAVE__CU_ID_MASK 0x00f00000L
+#define SQ_INTERRUPT_WORD_WAVE__CU_ID__SHIFT 0x00000014
+#define SQ_INTERRUPT_WORD_WAVE__DATA_MASK 0x000000ffL
+#define SQ_INTERRUPT_WORD_WAVE__DATA__SHIFT 0x00000000
+#define SQ_INTERRUPT_WORD_WAVE__ENCODING_MASK 0x0c000000L
+#define SQ_INTERRUPT_WORD_WAVE__ENCODING__SHIFT 0x0000001a
+#define SQ_INTERRUPT_WORD_WAVE__PRIV_MASK 0x00000200L
+#define SQ_INTERRUPT_WORD_WAVE__PRIV__SHIFT 0x00000009
+#define SQ_INTERRUPT_WORD_WAVE__SE_ID_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_WAVE__SE_ID__SHIFT 0x00000019
+#define SQ_INTERRUPT_WORD_WAVE__SH_ID_MASK 0x01000000L
+#define SQ_INTERRUPT_WORD_WAVE__SH_ID__SHIFT 0x00000018
+#define SQ_INTERRUPT_WORD_WAVE__SIMD_ID_MASK 0x000c0000L
+#define SQ_INTERRUPT_WORD_WAVE__SIMD_ID__SHIFT 0x00000012
+#define SQ_INTERRUPT_WORD_WAVE__VM_ID_MASK 0x00003c00L
+#define SQ_INTERRUPT_WORD_WAVE__VM_ID__SHIFT 0x0000000a
+#define SQ_INTERRUPT_WORD_WAVE__WAVE_ID_MASK 0x0003c000L
+#define SQ_INTERRUPT_WORD_WAVE__WAVE_ID__SHIFT 0x0000000e
+#define SQ_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
+#define SQ_LB_CTR_CTRL__CLEAR__SHIFT 0x00000002
+#define SQ_LB_CTR_CTRL__LOAD_MASK 0x00000002L
+#define SQ_LB_CTR_CTRL__LOAD__SHIFT 0x00000001
+#define SQ_LB_CTR_CTRL__START_MASK 0x00000001L
+#define SQ_LB_CTR_CTRL__START__SHIFT 0x00000000
+#define SQ_LB_DATA_ALU_CYCLES__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_ALU_CYCLES__DATA__SHIFT 0x00000000
+#define SQ_LB_DATA_ALU_STALLS__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_ALU_STALLS__DATA__SHIFT 0x00000000
+#define SQ_LB_DATA_TEX_CYCLES__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_TEX_CYCLES__DATA__SHIFT 0x00000000
+#define SQ_LB_DATA_TEX_STALLS__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_TEX_STALLS__DATA__SHIFT 0x00000000
+#define SQ_MIMG_0__DA_MASK 0x00004000L
+#define SQ_MIMG_0__DA__SHIFT 0x0000000e
+#define SQ_MIMG_0__DMASK_MASK 0x00000f00L
+#define SQ_MIMG_0__DMASK__SHIFT 0x00000008
+#define SQ_MIMG_0__ENCODING_MASK 0xfc000000L
+#define SQ_MIMG_0__ENCODING__SHIFT 0x0000001a
+#define SQ_MIMG_0__GLC_MASK 0x00002000L
+#define SQ_MIMG_0__GLC__SHIFT 0x0000000d
+#define SQ_MIMG_0__LWE_MASK 0x00020000L
+#define SQ_MIMG_0__LWE__SHIFT 0x00000011
+#define SQ_MIMG_0__OP_MASK 0x01fc0000L
+#define SQ_MIMG_0__OP__SHIFT 0x00000012
+#define SQ_MIMG_0__R128_MASK 0x00008000L
+#define SQ_MIMG_0__R128__SHIFT 0x0000000f
+#define SQ_MIMG_0__SLC_MASK 0x02000000L
+#define SQ_MIMG_0__SLC__SHIFT 0x00000019
+#define SQ_MIMG_0__TFE_MASK 0x00010000L
+#define SQ_MIMG_0__TFE__SHIFT 0x00000010
+#define SQ_MIMG_0__UNORM_MASK 0x00001000L
+#define SQ_MIMG_0__UNORM__SHIFT 0x0000000c
+#define SQ_MIMG_1__SRSRC_MASK 0x001f0000L
+#define SQ_MIMG_1__SRSRC__SHIFT 0x00000010
+#define SQ_MIMG_1__SSAMP_MASK 0x03e00000L
+#define SQ_MIMG_1__SSAMP__SHIFT 0x00000015
+#define SQ_MIMG_1__VADDR_MASK 0x000000ffL
+#define SQ_MIMG_1__VADDR__SHIFT 0x00000000
+#define SQ_MIMG_1__VDATA_MASK 0x0000ff00L
+#define SQ_MIMG_1__VDATA__SHIFT 0x00000008
+#define SQ_MTBUF_0__ADDR64_MASK 0x00008000L
+#define SQ_MTBUF_0__ADDR64__SHIFT 0x0000000f
+#define SQ_MTBUF_0__DFMT_MASK 0x00780000L
+#define SQ_MTBUF_0__DFMT__SHIFT 0x00000013
+#define SQ_MTBUF_0__ENCODING_MASK 0xfc000000L
+#define SQ_MTBUF_0__ENCODING__SHIFT 0x0000001a
+#define SQ_MTBUF_0__GLC_MASK 0x00004000L
+#define SQ_MTBUF_0__GLC__SHIFT 0x0000000e
+#define SQ_MTBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MTBUF_0__IDXEN__SHIFT 0x0000000d
+#define SQ_MTBUF_0__NFMT_MASK 0x03800000L
+#define SQ_MTBUF_0__NFMT__SHIFT 0x00000017
+#define SQ_MTBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MTBUF_0__OFFEN__SHIFT 0x0000000c
+#define SQ_MTBUF_0__OFFSET_MASK 0x00000fffL
+#define SQ_MTBUF_0__OFFSET__SHIFT 0x00000000
+#define SQ_MTBUF_0__OP_MASK 0x00070000L
+#define SQ_MTBUF_0__OP__SHIFT 0x00000010
+#define SQ_MTBUF_1__SLC_MASK 0x00400000L
+#define SQ_MTBUF_1__SLC__SHIFT 0x00000016
+#define SQ_MTBUF_1__SOFFSET_MASK 0xff000000L
+#define SQ_MTBUF_1__SOFFSET__SHIFT 0x00000018
+#define SQ_MTBUF_1__SRSRC_MASK 0x001f0000L
+#define SQ_MTBUF_1__SRSRC__SHIFT 0x00000010
+#define SQ_MTBUF_1__TFE_MASK 0x00800000L
+#define SQ_MTBUF_1__TFE__SHIFT 0x00000017
+#define SQ_MTBUF_1__VADDR_MASK 0x000000ffL
+#define SQ_MTBUF_1__VADDR__SHIFT 0x00000000
+#define SQ_MTBUF_1__VDATA_MASK 0x0000ff00L
+#define SQ_MTBUF_1__VDATA__SHIFT 0x00000008
+#define SQ_MUBUF_0__ADDR64_MASK 0x00008000L
+#define SQ_MUBUF_0__ADDR64__SHIFT 0x0000000f
+#define SQ_MUBUF_0__ENCODING_MASK 0xfc000000L
+#define SQ_MUBUF_0__ENCODING__SHIFT 0x0000001a
+#define SQ_MUBUF_0__GLC_MASK 0x00004000L
+#define SQ_MUBUF_0__GLC__SHIFT 0x0000000e
+#define SQ_MUBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MUBUF_0__IDXEN__SHIFT 0x0000000d
+#define SQ_MUBUF_0__LDS_MASK 0x00010000L
+#define SQ_MUBUF_0__LDS__SHIFT 0x00000010
+#define SQ_MUBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MUBUF_0__OFFEN__SHIFT 0x0000000c
+#define SQ_MUBUF_0__OFFSET_MASK 0x00000fffL
+#define SQ_MUBUF_0__OFFSET__SHIFT 0x00000000
+#define SQ_MUBUF_0__OP_MASK 0x01fc0000L
+#define SQ_MUBUF_0__OP__SHIFT 0x00000012
+#define SQ_MUBUF_1__SLC_MASK 0x00400000L
+#define SQ_MUBUF_1__SLC__SHIFT 0x00000016
+#define SQ_MUBUF_1__SOFFSET_MASK 0xff000000L
+#define SQ_MUBUF_1__SOFFSET__SHIFT 0x00000018
+#define SQ_MUBUF_1__SRSRC_MASK 0x001f0000L
+#define SQ_MUBUF_1__SRSRC__SHIFT 0x00000010
+#define SQ_MUBUF_1__TFE_MASK 0x00800000L
+#define SQ_MUBUF_1__TFE__SHIFT 0x00000017
+#define SQ_MUBUF_1__VADDR_MASK 0x000000ffL
+#define SQ_MUBUF_1__VADDR__SHIFT 0x00000000
+#define SQ_MUBUF_1__VDATA_MASK 0x0000ff00L
+#define SQ_MUBUF_1__VDATA__SHIFT 0x00000008
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x00000000
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE_MASK 0x00001f00L
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE__SHIFT 0x00000008
+#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x00000006
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH_MASK 0x00002000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH__SHIFT 0x0000000d
+#define SQ_PERFCOUNTER_CTRL__ES_EN_MASK 0x00000008L
+#define SQ_PERFCOUNTER_CTRL__ES_EN__SHIFT 0x00000003
+#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x00000002
+#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x00000004
+#define SQ_PERFCOUNTER_CTRL__LS_EN_MASK 0x00000020L
+#define SQ_PERFCOUNTER_CTRL__LS_EN__SHIFT 0x00000005
+#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x00000000
+#define SQ_PERFCOUNTER_CTRL__VS_EN_MASK 0x00000002L
+#define SQ_PERFCOUNTER_CTRL__VS_EN__SHIFT 0x00000001
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x0000001b
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK 0x00003fffL
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT 0x00000000
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03ff0000L
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x00000010
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK_MASK 0x80000000L
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK__SHIFT 0x0000001f
+#define SQ_POWER_THROTTLE__MAX_POWER_MASK 0x3fff0000L
+#define SQ_POWER_THROTTLE__MAX_POWER__SHIFT 0x00000010
+#define SQ_POWER_THROTTLE__MIN_POWER_MASK 0x00003fffL
+#define SQ_POWER_THROTTLE__MIN_POWER__SHIFT 0x00000000
+#define SQ_POWER_THROTTLE__PHASE_OFFSET_MASK 0xc0000000L
+#define SQ_POWER_THROTTLE__PHASE_OFFSET__SHIFT 0x0000001e
+#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007fL
+#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x00000000
+#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x001ffc00L
+#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0x0000000a
+#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
+#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x00000007
+#define SQ_REG_CREDITS__CMD_CREDITS_MASK 0x00000f00L
+#define SQ_REG_CREDITS__CMD_CREDITS__SHIFT 0x00000008
+#define SQ_REG_CREDITS__CMD_OVERFLOW_MASK 0x80000000L
+#define SQ_REG_CREDITS__CMD_OVERFLOW__SHIFT 0x0000001f
+#define SQ_REG_CREDITS__IMMED_OVERFLOW_MASK 0x40000000L
+#define SQ_REG_CREDITS__IMMED_OVERFLOW__SHIFT 0x0000001e
+#define SQ_REG_CREDITS__REG_BUSY_MASK 0x10000000L
+#define SQ_REG_CREDITS__REG_BUSY__SHIFT 0x0000001c
+#define SQ_REG_CREDITS__SRBM_CREDITS_MASK 0x0000003fL
+#define SQ_REG_CREDITS__SRBM_CREDITS__SHIFT 0x00000000
+#define SQ_REG_CREDITS__SRBM_OVERFLOW_MASK 0x20000000L
+#define SQ_REG_CREDITS__SRBM_OVERFLOW__SHIFT 0x0000001d
+#define SQ_SEC_CNT__LDS_SEC_MASK 0x0000003fL
+#define SQ_SEC_CNT__LDS_SEC__SHIFT 0x00000000
+#define SQ_SEC_CNT__SGPR_SEC_MASK 0x00001f00L
+#define SQ_SEC_CNT__SGPR_SEC__SHIFT 0x00000008
+#define SQ_SEC_CNT__VGPR_SEC_MASK 0x01ff0000L
+#define SQ_SEC_CNT__VGPR_SEC__SHIFT 0x00000010
+#define SQ_SMRD__ENCODING_MASK 0xf8000000L
+#define SQ_SMRD__ENCODING__SHIFT 0x0000001b
+#define SQ_SMRD__IMM_MASK 0x00000100L
+#define SQ_SMRD__IMM__SHIFT 0x00000008
+#define SQ_SMRD__OFFSET_MASK 0x000000ffL
+#define SQ_SMRD__OFFSET__SHIFT 0x00000000
+#define SQ_SMRD__OP_MASK 0x07c00000L
+#define SQ_SMRD__OP__SHIFT 0x00000016
+#define SQ_SMRD__SBASE_MASK 0x00007e00L
+#define SQ_SMRD__SBASE__SHIFT 0x00000009
+#define SQ_SMRD__SDST_MASK 0x003f8000L
+#define SQ_SMRD__SDST__SHIFT 0x0000000f
+#define SQ_SOP1__ENCODING_MASK 0xff800000L
+#define SQ_SOP1__ENCODING__SHIFT 0x00000017
+#define SQ_SOP1__OP_MASK 0x0000ff00L
+#define SQ_SOP1__OP__SHIFT 0x00000008
+#define SQ_SOP1__SDST_MASK 0x007f0000L
+#define SQ_SOP1__SDST__SHIFT 0x00000010
+#define SQ_SOP1__SSRC0_MASK 0x000000ffL
+#define SQ_SOP1__SSRC0__SHIFT 0x00000000
+#define SQ_SOP2__ENCODING_MASK 0xc0000000L
+#define SQ_SOP2__ENCODING__SHIFT 0x0000001e
+#define SQ_SOP2__OP_MASK 0x3f800000L
+#define SQ_SOP2__OP__SHIFT 0x00000017
+#define SQ_SOP2__SDST_MASK 0x007f0000L
+#define SQ_SOP2__SDST__SHIFT 0x00000010
+#define SQ_SOP2__SSRC0_MASK 0x000000ffL
+#define SQ_SOP2__SSRC0__SHIFT 0x00000000
+#define SQ_SOP2__SSRC1_MASK 0x0000ff00L
+#define SQ_SOP2__SSRC1__SHIFT 0x00000008
+#define SQ_SOPC__ENCODING_MASK 0xff800000L
+#define SQ_SOPC__ENCODING__SHIFT 0x00000017
+#define SQ_SOPC__OP_MASK 0x007f0000L
+#define SQ_SOPC__OP__SHIFT 0x00000010
+#define SQ_SOPC__SSRC0_MASK 0x000000ffL
+#define SQ_SOPC__SSRC0__SHIFT 0x00000000
+#define SQ_SOPC__SSRC1_MASK 0x0000ff00L
+#define SQ_SOPC__SSRC1__SHIFT 0x00000008
+#define SQ_SOPK__ENCODING_MASK 0xf0000000L
+#define SQ_SOPK__ENCODING__SHIFT 0x0000001c
+#define SQ_SOPK__OP_MASK 0x0f800000L
+#define SQ_SOPK__OP__SHIFT 0x00000017
+#define SQ_SOPK__SDST_MASK 0x007f0000L
+#define SQ_SOPK__SDST__SHIFT 0x00000010
+#define SQ_SOPK__SIMM16_MASK 0x0000ffffL
+#define SQ_SOPK__SIMM16__SHIFT 0x00000000
+#define SQ_SOPP__ENCODING_MASK 0xff800000L
+#define SQ_SOPP__ENCODING__SHIFT 0x00000017
+#define SQ_SOPP__OP_MASK 0x007f0000L
+#define SQ_SOPP__OP__SHIFT 0x00000010
+#define SQ_SOPP__SIMM16_MASK 0x0000ffffL
+#define SQ_SOPP__SIMM16__SHIFT 0x00000000
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000ffffL
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x00000000
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xffff0000L
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_BASE2__ATC_MASK 0x00000010L
+#define SQ_THREAD_TRACE_BASE2__ATC__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_BASE__ADDR_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_BASE__ADDR__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_CNTR__CNTR_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_CNTR__CNTR__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER_MASK 0x80000000L
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER__SHIFT 0x0000001f
+#define SQ_THREAD_TRACE_HIWATER__HIWATER_MASK 0x00000007L
+#define SQ_THREAD_TRACE_HIWATER__HIWATER__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_MASK__CU_SEL_MASK 0x0000001fL
+#define SQ_THREAD_TRACE_MASK__CU_SEL__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_MASK__RANDOM_SEED_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_MASK__RANDOM_SEED__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN_MASK 0x00000080L
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN__SHIFT 0x00000007
+#define SQ_THREAD_TRACE_MASK__SH_SEL_MASK 0x00000020L
+#define SQ_THREAD_TRACE_MASK__SH_SEL__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN_MASK 0x00004000L
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN_MASK 0x00008000L
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN__SHIFT 0x0000000f
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK_MASK 0x00003000L
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN_MASK 0x02000000L
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN__SHIFT 0x00000019
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE_MASK 0x01800000L
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE__SHIFT 0x00000017
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN_MASK 0x40000000L
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN__SHIFT 0x0000001e
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK_MASK 0x18000000L
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK__SHIFT 0x0000001b
+#define SQ_THREAD_TRACE_MODE__MASK_CS_MASK 0x001c0000L
+#define SQ_THREAD_TRACE_MODE__MASK_CS__SHIFT 0x00000012
+#define SQ_THREAD_TRACE_MODE__MASK_ES_MASK 0x00000e00L
+#define SQ_THREAD_TRACE_MODE__MASK_ES__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_MODE__MASK_GS_MASK 0x000001c0L
+#define SQ_THREAD_TRACE_MODE__MASK_GS__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_MODE__MASK_HS_MASK 0x00007000L
+#define SQ_THREAD_TRACE_MODE__MASK_HS__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_MODE__MASK_LS_MASK 0x00038000L
+#define SQ_THREAD_TRACE_MODE__MASK_LS__SHIFT 0x0000000f
+#define SQ_THREAD_TRACE_MODE__MASK_PS_MASK 0x00000007L
+#define SQ_THREAD_TRACE_MODE__MASK_PS__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_MODE__MASK_VS_MASK 0x00000038L
+#define SQ_THREAD_TRACE_MODE__MASK_VS__SHIFT 0x00000003
+#define SQ_THREAD_TRACE_MODE__MODE_MASK 0x00600000L
+#define SQ_THREAD_TRACE_MODE__MODE__SHIFT 0x00000015
+#define SQ_THREAD_TRACE_MODE__PRIV_MASK 0x04000000L
+#define SQ_THREAD_TRACE_MODE__PRIV__SHIFT 0x0000001a
+#define SQ_THREAD_TRACE_MODE__TEST_MODE_MASK 0x20000000L
+#define SQ_THREAD_TRACE_MODE__TEST_MODE__SHIFT 0x0000001d
+#define SQ_THREAD_TRACE_MODE__WRAP_MASK 0x80000000L
+#define SQ_THREAD_TRACE_MODE__WRAP__SHIFT 0x0000001f
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_SIZE__SIZE_MASK 0x003fffffL
+#define SQ_THREAD_TRACE_SIZE__SIZE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x40000000L
+#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x0000001e
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x00070000L
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x00000007L
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_STATUS__FULL_MASK 0x80000000L
+#define SQ_THREAD_TRACE_STATUS__FULL__SHIFT 0x0000001f
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF_MASK 0x20000000L
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF__SHIFT 0x0000001d
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL_MASK 0x01000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL__SHIFT 0x00000018
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK_MASK 0x00ff0000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE_MASK 0x0000fc00L
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE_MASK 0x000001c0L
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE_MASK 0x0000f000L
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID_MASK 0x00000600L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID_MASK 0x000001e0L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI_MASK 0x00ffffffL
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID_MASK 0x00000600L
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_WORD_INST__SIZE_MASK 0x00000800L
+#define SQ_THREAD_TRACE_WORD_INST__SIZE__SHIFT 0x0000000b
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID_MASK 0x00003c00L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID_MASK 0x000001e0L
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0_MASK 0x00000300L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0__SHIFT 0x00000008
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1_MASK 0x00000c00L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2_MASK 0x00003000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4_MASK 0x00030000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5_MASK 0x000c0000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5__SHIFT 0x00000012
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6_MASK 0x00300000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6__SHIFT 0x00000014
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7_MASK 0x00c00000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7__SHIFT 0x00000016
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8_MASK 0x03000000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8__SHIFT 0x00000018
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9_MASK 0x0c000000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9__SHIFT 0x0000001a
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE_MASK 0x000000c0L
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0_MASK 0x01fff000L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO_MASK 0xfe000000L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO__SHIFT 0x00000019
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK_MASK 0x00000c00L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI_MASK 0x0000003fL
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2_MASK 0x0007ffc0L
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3_MASK 0xfff80000L
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3__SHIFT 0x00000013
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID_MASK 0x00000180L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID__SHIFT 0x00000007
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV_MASK 0x00000200L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP_MASK 0x00008000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP__SHIFT 0x0000000f
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV_MASK 0x00004000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE_MASK 0x00001c00L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT_MASK 0x1fc00000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT__SHIFT 0x00000016
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER_MASK 0x001f0000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID_MASK 0xe0000000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID__SHIFT 0x0000001d
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED_MASK 0x00200000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED__SHIFT 0x00000015
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID_MASK 0x00003c00L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID_MASK 0x00003c00L
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET_MASK 0xc0000000L
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET__SHIFT 0x0000001e
+#define SQ_THREAD_TRACE_WPTR__WPTR_MASK 0x3fffffffL
+#define SQ_THREAD_TRACE_WPTR__WPTR__SHIFT 0x00000000
+#define SQ_TIME_HI__TIME_MASK 0xffffffffL
+#define SQ_TIME_HI__TIME__SHIFT 0x00000000
+#define SQ_TIME_LO__TIME_MASK 0xffffffffL
+#define SQ_TIME_LO__TIME__SHIFT 0x00000000
+#define SQ_VINTRP__ATTRCHAN_MASK 0x00000300L
+#define SQ_VINTRP__ATTRCHAN__SHIFT 0x00000008
+#define SQ_VINTRP__ATTR_MASK 0x0000fc00L
+#define SQ_VINTRP__ATTR__SHIFT 0x0000000a
+#define SQ_VINTRP__ENCODING_MASK 0xfc000000L
+#define SQ_VINTRP__ENCODING__SHIFT 0x0000001a
+#define SQ_VINTRP__OP_MASK 0x00030000L
+#define SQ_VINTRP__OP__SHIFT 0x00000010
+#define SQ_VINTRP__VDST_MASK 0x03fc0000L
+#define SQ_VINTRP__VDST__SHIFT 0x00000012
+#define SQ_VINTRP__VSRC_MASK 0x000000ffL
+#define SQ_VINTRP__VSRC__SHIFT 0x00000000
+#define SQ_VOP1__ENCODING_MASK 0xfe000000L
+#define SQ_VOP1__ENCODING__SHIFT 0x00000019
+#define SQ_VOP1__OP_MASK 0x0001fe00L
+#define SQ_VOP1__OP__SHIFT 0x00000009
+#define SQ_VOP1__SRC0_MASK 0x000001ffL
+#define SQ_VOP1__SRC0__SHIFT 0x00000000
+#define SQ_VOP1__VDST_MASK 0x01fe0000L
+#define SQ_VOP1__VDST__SHIFT 0x00000011
+#define SQ_VOP2__ENCODING_MASK 0x80000000L
+#define SQ_VOP2__ENCODING__SHIFT 0x0000001f
+#define SQ_VOP2__OP_MASK 0x7e000000L
+#define SQ_VOP2__OP__SHIFT 0x00000019
+#define SQ_VOP2__SRC0_MASK 0x000001ffL
+#define SQ_VOP2__SRC0__SHIFT 0x00000000
+#define SQ_VOP2__VDST_MASK 0x01fe0000L
+#define SQ_VOP2__VDST__SHIFT 0x00000011
+#define SQ_VOP2__VSRC1_MASK 0x0001fe00L
+#define SQ_VOP2__VSRC1__SHIFT 0x00000009
+#define SQ_VOP3_0__ABS_MASK 0x00000700L
+#define SQ_VOP3_0__ABS__SHIFT 0x00000008
+#define SQ_VOP3_0__CLAMP_MASK 0x00000800L
+#define SQ_VOP3_0__CLAMP__SHIFT 0x0000000b
+#define SQ_VOP3_0__ENCODING_MASK 0xfc000000L
+#define SQ_VOP3_0__ENCODING__SHIFT 0x0000001a
+#define SQ_VOP3_0__OP_MASK 0x03fe0000L
+#define SQ_VOP3_0__OP__SHIFT 0x00000011
+#define SQ_VOP3_0_SDST_ENC__ENCODING_MASK 0xfc000000L
+#define SQ_VOP3_0_SDST_ENC__ENCODING__SHIFT 0x0000001a
+#define SQ_VOP3_0_SDST_ENC__OP_MASK 0x03fe0000L
+#define SQ_VOP3_0_SDST_ENC__OP__SHIFT 0x00000011
+#define SQ_VOP3_0_SDST_ENC__SDST_MASK 0x00007f00L
+#define SQ_VOP3_0_SDST_ENC__SDST__SHIFT 0x00000008
+#define SQ_VOP3_0_SDST_ENC__VDST_MASK 0x000000ffL
+#define SQ_VOP3_0_SDST_ENC__VDST__SHIFT 0x00000000
+#define SQ_VOP3_0__VDST_MASK 0x000000ffL
+#define SQ_VOP3_0__VDST__SHIFT 0x00000000
+#define SQ_VOP3_1__NEG_MASK 0xe0000000L
+#define SQ_VOP3_1__NEG__SHIFT 0x0000001d
+#define SQ_VOP3_1__OMOD_MASK 0x18000000L
+#define SQ_VOP3_1__OMOD__SHIFT 0x0000001b
+#define SQ_VOP3_1__SRC0_MASK 0x000001ffL
+#define SQ_VOP3_1__SRC0__SHIFT 0x00000000
+#define SQ_VOP3_1__SRC1_MASK 0x0003fe00L
+#define SQ_VOP3_1__SRC1__SHIFT 0x00000009
+#define SQ_VOP3_1__SRC2_MASK 0x07fc0000L
+#define SQ_VOP3_1__SRC2__SHIFT 0x00000012
+#define SQ_VOPC__ENCODING_MASK 0xfe000000L
+#define SQ_VOPC__ENCODING__SHIFT 0x00000019
+#define SQ_VOPC__OP_MASK 0x01fe0000L
+#define SQ_VOPC__OP__SHIFT 0x00000011
+#define SQ_VOPC__SRC0_MASK 0x000001ffL
+#define SQ_VOPC__SRC0__SHIFT 0x00000000
+#define SQ_VOPC__VSRC1_MASK 0x0001fe00L
+#define SQ_VOPC__VSRC1__SHIFT 0x00000009
+#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xffffffffL
+#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x00000000
+#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xffffffffL
+#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x00000000
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE_MASK 0x003f0000L
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE__SHIFT 0x00000010
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE_MASK 0x0f000000L
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE__SHIFT 0x00000018
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x0000003fL
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x00000000
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x00003f00L
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0x00000008
+#define SQ_WAVE_HW_ID__CU_ID_MASK 0x00000f00L
+#define SQ_WAVE_HW_ID__CU_ID__SHIFT 0x00000008
+#define SQ_WAVE_HW_ID__ME_ID_MASK 0xc0000000L
+#define SQ_WAVE_HW_ID__ME_ID__SHIFT 0x0000001e
+#define SQ_WAVE_HW_ID__PIPE_ID_MASK 0x000000c0L
+#define SQ_WAVE_HW_ID__PIPE_ID__SHIFT 0x00000006
+#define SQ_WAVE_HW_ID__QUEUE_ID_MASK 0x07000000L
+#define SQ_WAVE_HW_ID__QUEUE_ID__SHIFT 0x00000018
+#define SQ_WAVE_HW_ID__SE_ID_MASK 0x00002000L
+#define SQ_WAVE_HW_ID__SE_ID__SHIFT 0x0000000d
+#define SQ_WAVE_HW_ID__SH_ID_MASK 0x00001000L
+#define SQ_WAVE_HW_ID__SH_ID__SHIFT 0x0000000c
+#define SQ_WAVE_HW_ID__SIMD_ID_MASK 0x00000030L
+#define SQ_WAVE_HW_ID__SIMD_ID__SHIFT 0x00000004
+#define SQ_WAVE_HW_ID__STATE_ID_MASK 0x38000000L
+#define SQ_WAVE_HW_ID__STATE_ID__SHIFT 0x0000001b
+#define SQ_WAVE_HW_ID__TG_ID_MASK 0x000f0000L
+#define SQ_WAVE_HW_ID__TG_ID__SHIFT 0x00000010
+#define SQ_WAVE_HW_ID__VM_ID_MASK 0x00f00000L
+#define SQ_WAVE_HW_ID__VM_ID__SHIFT 0x00000014
+#define SQ_WAVE_HW_ID__WAVE_ID_MASK 0x0000000fL
+#define SQ_WAVE_HW_ID__WAVE_ID__SHIFT 0x00000000
+#define SQ_WAVE_IB_DBG0__ECC_ST_MASK 0x00c00000L
+#define SQ_WAVE_IB_DBG0__ECC_ST__SHIFT 0x00000016
+#define SQ_WAVE_IB_DBG0__HYB_CNT_MASK 0x06000000L
+#define SQ_WAVE_IB_DBG0__HYB_CNT__SHIFT 0x00000019
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR_MASK 0x00000300L
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR__SHIFT 0x00000008
+#define SQ_WAVE_IB_DBG0__IBUF_ST_MASK 0x00000007L
+#define SQ_WAVE_IB_DBG0__IBUF_ST__SHIFT 0x00000000
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR_MASK 0x00000c00L
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR__SHIFT 0x0000000a
+#define SQ_WAVE_IB_DBG0__INST_STR_ST_MASK 0x00070000L
+#define SQ_WAVE_IB_DBG0__INST_STR_ST__SHIFT 0x00000010
+#define SQ_WAVE_IB_DBG0__IS_HYB_MASK 0x01000000L
+#define SQ_WAVE_IB_DBG0__IS_HYB__SHIFT 0x00000018
+#define SQ_WAVE_IB_DBG0__KILL_MASK 0x08000000L
+#define SQ_WAVE_IB_DBG0__KILL__SHIFT 0x0000001b
+#define SQ_WAVE_IB_DBG0__MISC_CNT_MASK 0x00380000L
+#define SQ_WAVE_IB_DBG0__MISC_CNT__SHIFT 0x00000013
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH_MASK 0x10000000L
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH__SHIFT 0x0000001c
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW_MASK 0x00000010L
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW__SHIFT 0x00000004
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_MASK 0x000000e0L
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT__SHIFT 0x00000005
+#define SQ_WAVE_IB_DBG0__PC_INVALID_MASK 0x00000008L
+#define SQ_WAVE_IB_DBG0__PC_INVALID__SHIFT 0x00000003
+#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000070L
+#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x00000004
+#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x00001f00L
+#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x00000008
+#define SQ_WAVE_IB_STS__VALU_CNT_MASK 0x0000e000L
+#define SQ_WAVE_IB_STS__VALU_CNT__SHIFT 0x0000000d
+#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000000fL
+#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0x00000000
+#define SQ_WAVE_INST_DW0__INST_DW0_MASK 0xffffffffL
+#define SQ_WAVE_INST_DW0__INST_DW0__SHIFT 0x00000000
+#define SQ_WAVE_INST_DW1__INST_DW1_MASK 0xffffffffL
+#define SQ_WAVE_INST_DW1__INST_DW1__SHIFT 0x00000000
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000000ffL
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x00000000
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001ff000L
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0x0000000c
+#define SQ_WAVE_M0__M0_MASK 0xffffffffL
+#define SQ_WAVE_M0__M0__SHIFT 0x00000000
+#define SQ_WAVE_MODE__CSP_MASK 0xe0000000L
+#define SQ_WAVE_MODE__CSP__SHIFT 0x0000001d
+#define SQ_WAVE_MODE__DEBUG_EN_MASK 0x00000800L
+#define SQ_WAVE_MODE__DEBUG_EN__SHIFT 0x0000000b
+#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
+#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x00000008
+#define SQ_WAVE_MODE__EXCP_EN_MASK 0x0007f000L
+#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0x0000000c
+#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000f0L
+#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x00000004
+#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000fL
+#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x00000000
+#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
+#define SQ_WAVE_MODE__IEEE__SHIFT 0x00000009
+#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
+#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0x0000000a
+#define SQ_WAVE_MODE__VSKIP_MASK 0x10000000L
+#define SQ_WAVE_MODE__VSKIP__SHIFT 0x0000001c
+#define SQ_WAVE_PC_HI__PC_HI_MASK 0x000000ffL
+#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x00000000
+#define SQ_WAVE_PC_LO__PC_LO_MASK 0xffffffffL
+#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x00000000
+#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x00200000L
+#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x00000015
+#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x00100000L
+#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x00000014
+#define SQ_WAVE_STATUS__DATA_ATC_MASK 0x00400000L
+#define SQ_WAVE_STATUS__DATA_ATC__SHIFT 0x00000016
+#define SQ_WAVE_STATUS__DISPATCH_CACHE_CTRL_MASK 0x07000000L
+#define SQ_WAVE_STATUS__DISPATCH_CACHE_CTRL__SHIFT 0x00000018
+#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
+#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x00000011
+#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
+#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x00000009
+#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
+#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x00000008
+#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
+#define SQ_WAVE_STATUS__HALT__SHIFT 0x0000000d
+#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
+#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0x0000000c
+#define SQ_WAVE_STATUS__INST_ATC_MASK 0x00800000L
+#define SQ_WAVE_STATUS__INST_ATC__SHIFT 0x00000017
+#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
+#define SQ_WAVE_STATUS__IN_TG__SHIFT 0x0000000b
+#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
+#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x0000001b
+#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
+#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x00000013
+#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
+#define SQ_WAVE_STATUS__PRIV__SHIFT 0x00000005
+#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
+#define SQ_WAVE_STATUS__SCC__SHIFT 0x00000000
+#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
+#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x00000012
+#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
+#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x00000001
+#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
+#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x00000006
+#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
+#define SQ_WAVE_STATUS__TRAP__SHIFT 0x0000000e
+#define SQ_WAVE_STATUS__TTRACE_CU_EN_MASK 0x00008000L
+#define SQ_WAVE_STATUS__TTRACE_CU_EN__SHIFT 0x0000000f
+#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
+#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x00000007
+#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
+#define SQ_WAVE_STATUS__VALID__SHIFT 0x00000010
+#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
+#define SQ_WAVE_STATUS__VCCZ__SHIFT 0x0000000a
+#define SQ_WAVE_STATUS__WAVE_PRIO_MASK 0x00000018L
+#define SQ_WAVE_STATUS__WAVE_PRIO__SHIFT 0x00000003
+#define SQ_WAVE_TBA_HI__ADDR_HI_MASK 0x000000ffL
+#define SQ_WAVE_TBA_HI__ADDR_HI__SHIFT 0x00000000
+#define SQ_WAVE_TBA_LO__ADDR_LO_MASK 0xffffffffL
+#define SQ_WAVE_TBA_LO__ADDR_LO__SHIFT 0x00000000
+#define SQ_WAVE_TMA_HI__ADDR_HI_MASK 0x000000ffL
+#define SQ_WAVE_TMA_HI__ADDR_HI__SHIFT 0x00000000
+#define SQ_WAVE_TMA_LO__ADDR_LO_MASK 0xffffffffL
+#define SQ_WAVE_TMA_LO__ADDR_LO__SHIFT 0x00000000
+#define SQ_WAVE_TRAPSTS__DP_RATE_MASK 0xe0000000L
+#define SQ_WAVE_TRAPSTS__DP_RATE__SHIFT 0x0000001d
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE_MASK 0x003f0000L
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE__SHIFT 0x00000010
+#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x0000007fL
+#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x00000000
+#define SQ_WAVE_TTMP0__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP0__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP10__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP10__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP11__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP11__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP1__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP1__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP2__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP2__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP3__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP3__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP4__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP4__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP5__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP5__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP6__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP6__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP7__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP7__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP8__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP8__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP9__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP9__DATA__SHIFT 0x00000000
+#define SX_DEBUG_1__DEBUG_DATA_MASK 0xffffff80L
+#define SX_DEBUG_1__DEBUG_DATA__SHIFT 0x00000007
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007fL
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY_MASK 0x80000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY__SHIFT 0x0000001f
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY__SHIFT 0x0000001e
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY__SHIFT 0x0000001d
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY__SHIFT 0x0000001c
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY__SHIFT 0x0000001b
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY__SHIFT 0x0000001a
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY__SHIFT 0x00000019
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY__SHIFT 0x00000017
+#define SX_DEBUG_BUSY_2__COL_DBIF0_READ_VALID_MASK 0x01000000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_READ_VALID__SHIFT 0x00000018
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY__SHIFT 0x00000016
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY__SHIFT 0x00000014
+#define SX_DEBUG_BUSY_2__COL_DBIF1_READ_VALID_MASK 0x00200000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_READ_VALID__SHIFT 0x00000015
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY_2__COL_DBIF2_READ_VALID_MASK 0x00040000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_READ_VALID__SHIFT 0x00000012
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY_2__COL_DBIF3_READ_VALID_MASK 0x00008000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_READ_VALID__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0_MASK 0x00000400L
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE_MASK 0x00000800L
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0_MASK 0x00000080L
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0__SHIFT 0x00000007
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE_MASK 0x00000100L
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE__SHIFT 0x00000008
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0_MASK 0x00000010L
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0__SHIFT 0x00000004
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE_MASK 0x00000020L
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE__SHIFT 0x00000005
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0_MASK 0x00000002L
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0__SHIFT 0x00000001
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE_MASK 0x00000004L
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE__SHIFT 0x00000002
+#define SX_DEBUG_BUSY_2__COL_SCBD_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_2__COL_SCBD_BUSY__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY_MASK 0x80000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY__SHIFT 0x0000001f
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY__SHIFT 0x0000001e
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY__SHIFT 0x0000001d
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY__SHIFT 0x0000001c
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY__SHIFT 0x0000001b
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY__SHIFT 0x0000001a
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY__SHIFT 0x00000019
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY__SHIFT 0x00000018
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY__SHIFT 0x00000017
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY__SHIFT 0x00000016
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY__SHIFT 0x00000015
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY__SHIFT 0x00000014
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY__SHIFT 0x00000012
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY__SHIFT 0x00000008
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY__SHIFT 0x00000007
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY__SHIFT 0x00000005
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY__SHIFT 0x00000004
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY__SHIFT 0x00000002
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY__SHIFT 0x00000001
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY__SHIFT 0x00000018
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY__SHIFT 0x00000017
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY__SHIFT 0x00000016
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY__SHIFT 0x00000015
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY__SHIFT 0x00000014
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY__SHIFT 0x00000012
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY__SHIFT 0x00000008
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY__SHIFT 0x00000007
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY__SHIFT 0x00000005
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY__SHIFT 0x00000004
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY__SHIFT 0x00000002
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY__SHIFT 0x00000001
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_4__RESERVED_MASK 0xfe000000L
+#define SX_DEBUG_BUSY_4__RESERVED__SHIFT 0x00000019
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL_MASK 0x80000000L
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL__SHIFT 0x0000001f
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL_MASK 0x40000000L
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL__SHIFT 0x0000001e
+#define SX_DEBUG_BUSY__PA_SX_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY__PA_SX_BUSY__SHIFT 0x00000002
+#define SX_DEBUG_BUSY__PCCMD_VALID_MASK 0x08000000L
+#define SX_DEBUG_BUSY__PCCMD_VALID__SHIFT 0x0000001b
+#define SX_DEBUG_BUSY__POS_BANK0VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL0_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY__POS_BANK0VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL1_BUSY__SHIFT 0x00000012
+#define SX_DEBUG_BUSY__POS_BANK0VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL2_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY__POS_BANK0VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL3_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY__POS_BANK1VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL0_BUSY__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY__POS_BANK1VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL1_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY__POS_BANK1VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL2_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY__POS_BANK1VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL3_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY__POS_BANK2VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY__POS_BANK2VAL0_BUSY__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY__POS_BANK2VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY__POS_BANK2VAL1_BUSY__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY__POS_BANK2VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY__POS_BANK2VAL2_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY__POS_BANK2VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY__POS_BANK2VAL3_BUSY__SHIFT 0x00000008
+#define SX_DEBUG_BUSY__POS_BANK3VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY__POS_BANK3VAL0_BUSY__SHIFT 0x00000007
+#define SX_DEBUG_BUSY__POS_BANK3VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY__POS_BANK3VAL1_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY__POS_BANK3VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY__POS_BANK3VAL2_BUSY__SHIFT 0x00000005
+#define SX_DEBUG_BUSY__POS_BANK3VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY__POS_BANK3VAL3_BUSY__SHIFT 0x00000004
+#define SX_DEBUG_BUSY__POS_FREE_OR_VALIDS_MASK 0x00000001L
+#define SX_DEBUG_BUSY__POS_FREE_OR_VALIDS__SHIFT 0x00000000
+#define SX_DEBUG_BUSY__POS_INMUX_VALID_MASK 0x00100000L
+#define SX_DEBUG_BUSY__POS_INMUX_VALID__SHIFT 0x00000014
+#define SX_DEBUG_BUSY__POS_REQUESTER_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY__POS_REQUESTER_BUSY__SHIFT 0x00000001
+#define SX_DEBUG_BUSY__POS_SCBD_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY__POS_SCBD_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY__VDATA0_VALID_MASK 0x20000000L
+#define SX_DEBUG_BUSY__VDATA0_VALID__SHIFT 0x0000001d
+#define SX_DEBUG_BUSY__VDATA1_VALID_MASK 0x10000000L
+#define SX_DEBUG_BUSY__VDATA1_VALID__SHIFT 0x0000001c
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ1_MASK 0x04000000L
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ1__SHIFT 0x0000001a
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ2_MASK 0x02000000L
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ2__SHIFT 0x00000019
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ3_MASK 0x01000000L
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ3__SHIFT 0x00000018
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ1_MASK 0x00800000L
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ1__SHIFT 0x00000017
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ2_MASK 0x00400000L
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ2__SHIFT 0x00000016
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ3_MASK 0x00200000L
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ3__SHIFT 0x00000015
+#define SXIFCCG_DEBUG_REG0__point_address_MASK 0x000001c0L
+#define SXIFCCG_DEBUG_REG0__point_address__SHIFT 0x00000006
+#define SXIFCCG_DEBUG_REG0__position_address_MASK 0x0000003fL
+#define SXIFCCG_DEBUG_REG0__position_address__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_advance_MASK 0x80000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_advance__SHIFT 0x0000001f
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_inc_MASK 0x40000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_inc__SHIFT 0x0000001e
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_sel_MASK 0x0c000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_sel__SHIFT 0x0000001a
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_pci_MASK 0x03ff0000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_pci__SHIFT 0x00000010
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_req_mask_MASK 0x0000f000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_req_mask__SHIFT 0x0000000c
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_sp_id_MASK 0x30000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_sp_id__SHIFT 0x0000001c
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_state_var_indx_MASK 0x00000e00L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_state_var_indx__SHIFT 0x00000009
+#define SXIFCCG_DEBUG_REG1__aux_sel_MASK 0x00300000L
+#define SXIFCCG_DEBUG_REG1__aux_sel__SHIFT 0x00000014
+#define SXIFCCG_DEBUG_REG1__available_positions_MASK 0x0000007fL
+#define SXIFCCG_DEBUG_REG1__available_positions__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_0_MASK 0xf0000000L
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_0__SHIFT 0x0000001c
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_1_MASK 0x0f000000L
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_1__SHIFT 0x00000018
+#define SXIFCCG_DEBUG_REG1__statevar_bits_disable_sp_MASK 0x000f0000L
+#define SXIFCCG_DEBUG_REG1__statevar_bits_disable_sp__SHIFT 0x00000010
+#define SXIFCCG_DEBUG_REG1__statevar_bits_vs_out_misc_vec_ena_MASK 0x00008000L
+#define SXIFCCG_DEBUG_REG1__statevar_bits_vs_out_misc_vec_ena__SHIFT 0x0000000f
+#define SXIFCCG_DEBUG_REG1__sx_pending_fifo_contents_MASK 0x00007c00L
+#define SXIFCCG_DEBUG_REG1__sx_pending_fifo_contents__SHIFT 0x0000000a
+#define SXIFCCG_DEBUG_REG1__sx_receive_indx_MASK 0x00000380L
+#define SXIFCCG_DEBUG_REG1__sx_receive_indx__SHIFT 0x00000007
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_0_MASK 0x00800000L
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_0__SHIFT 0x00000017
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_1_MASK 0x00400000L
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_1__SHIFT 0x00000016
+#define SXIFCCG_DEBUG_REG2__param_cache_base_MASK 0x0000007fL
+#define SXIFCCG_DEBUG_REG2__param_cache_base__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG2__req_active_verts_loaded_MASK 0x00008000L
+#define SXIFCCG_DEBUG_REG2__req_active_verts_loaded__SHIFT 0x0000000f
+#define SXIFCCG_DEBUG_REG2__req_active_verts_MASK 0x007f0000L
+#define SXIFCCG_DEBUG_REG2__req_active_verts__SHIFT 0x00000010
+#define SXIFCCG_DEBUG_REG2__sx_aux_MASK 0x00000180L
+#define SXIFCCG_DEBUG_REG2__sx_aux__SHIFT 0x00000007
+#define SXIFCCG_DEBUG_REG2__sx_request_indx_MASK 0x00007e00L
+#define SXIFCCG_DEBUG_REG2__sx_request_indx__SHIFT 0x00000009
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_active_verts_MASK 0xfc000000L
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_active_verts__SHIFT 0x0000001a
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_state_var_indx_MASK 0x03800000L
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_state_var_indx__SHIFT 0x00000017
+#define SXIFCCG_DEBUG_REG3__ALWAYS_ZERO_MASK 0x000000ffL
+#define SXIFCCG_DEBUG_REG3__ALWAYS_ZERO__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG3__available_positions_MASK 0x001fc000L
+#define SXIFCCG_DEBUG_REG3__available_positions__SHIFT 0x0000000e
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_fifo_full_MASK 0x20000000L
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_fifo_full__SHIFT 0x0000001d
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_write_MASK 0x80000000L
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_write__SHIFT 0x0000001f
+#define SXIFCCG_DEBUG_REG3__current_state_MASK 0x00600000L
+#define SXIFCCG_DEBUG_REG3__current_state__SHIFT 0x00000015
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist0_vec_ena_MASK 0x00002000L
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist0_vec_ena__SHIFT 0x0000000d
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist1_vec_ena_MASK 0x00001000L
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist1_vec_ena__SHIFT 0x0000000c
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_empty_MASK 0x02000000L
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_empty__SHIFT 0x00000019
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_full_MASK 0x04000000L
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_full__SHIFT 0x0000001a
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_write_MASK 0x40000000L
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_write__SHIFT 0x0000001e
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_empty_MASK 0x00800000L
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_empty__SHIFT 0x00000017
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_entriesavailable_MASK 0x00000f00L
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_entriesavailable__SHIFT 0x00000008
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_full_MASK 0x01000000L
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_full__SHIFT 0x00000018
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_empty_MASK 0x08000000L
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_empty__SHIFT 0x0000001b
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_full_MASK 0x10000000L
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_full__SHIFT 0x0000001c
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003ffL
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x00000000
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000ffc00L
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0x0000000a
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003ffL
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000ffc00L
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0x0000000a
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xffffffffL
+#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x00000000
+#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000ffL
+#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x00000000
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001f0000L
+#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x00000010
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x00000010
+#define TA_CNTL__TC_DATA_CREDIT_MASK 0x0000e000L
+#define TA_CNTL__TC_DATA_CREDIT__SHIFT 0x0000000d
+#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xffc00000L
+#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x00000016
+#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xffffffffL
+#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x00000000
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000ffL
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x00000000
+#define TA_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define TA_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define TA_DEBUG_INDEX__INDEX_MASK 0x0000001fL
+#define TA_DEBUG_INDEX__INDEX__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000ffL
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003fc00L
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003fc00L
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x0003fc00L
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TA_SCRATCH__SCRATCH_MASK 0xffffffffL
+#define TA_SCRATCH__SCRATCH__SHIFT 0x00000000
+#define TA_STATUS__AL_BUSY_MASK 0x40000000L
+#define TA_STATUS__AL_BUSY__SHIFT 0x0000001e
+#define TA_STATUS__BUSY_MASK 0x80000000L
+#define TA_STATUS__BUSY__SHIFT 0x0000001f
+#define TA_STATUS__FA_BUSY_MASK 0x20000000L
+#define TA_STATUS__FA_BUSY__SHIFT 0x0000001d
+#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
+#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x00000015
+#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
+#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x00000014
+#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
+#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x00000016
+#define TA_STATUS__FG_BUSY_MASK 0x02000000L
+#define TA_STATUS__FG_BUSY__SHIFT 0x00000019
+#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
+#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0x0000000d
+#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
+#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0x0000000c
+#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
+#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0x0000000e
+#define TA_STATUS__FL_BUSY_MASK 0x08000000L
+#define TA_STATUS__FL_BUSY__SHIFT 0x0000001b
+#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
+#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x00000011
+#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
+#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x00000010
+#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
+#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x00000012
+#define TA_STATUS__IN_BUSY_MASK 0x01000000L
+#define TA_STATUS__IN_BUSY__SHIFT 0x00000018
+#define TA_STATUS__LA_BUSY_MASK 0x04000000L
+#define TA_STATUS__LA_BUSY__SHIFT 0x0000001a
+#define TA_STATUS__TA_BUSY_MASK 0x10000000L
+#define TA_STATUS__TA_BUSY__SHIFT 0x0000001c
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TCA_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TCA_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TCA_CTRL__HOLE_TIMEOUT_MASK 0x0000000fL
+#define TCA_CTRL__HOLE_TIMEOUT__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TCC_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TCC_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TCC_CTRL__CACHE_SIZE_MASK 0x00000003L
+#define TCC_CTRL__CACHE_SIZE__SHIFT 0x00000000
+#define TCC_CTRL__LATENCY_FIFO_SIZE_MASK 0x000f0000L
+#define TCC_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x00000010
+#define TCC_CTRL__RATE_MASK 0x0000000cL
+#define TCC_CTRL__RATE__SHIFT 0x00000002
+#define TCC_CTRL__SRC_FIFO_SIZE_MASK 0x0000f000L
+#define TCC_CTRL__SRC_FIFO_SIZE__SHIFT 0x0000000c
+#define TCC_CTRL__WB_OR_INV_ALL_VMIDS_MASK 0x00100000L
+#define TCC_CTRL__WB_OR_INV_ALL_VMIDS__SHIFT 0x00000014
+#define TCC_CTRL__WRITEBACK_MARGIN_MASK 0x000000f0L
+#define TCC_CTRL__WRITEBACK_MARGIN__SHIFT 0x00000004
+#define TCC_EDC_COUNTER__DED_COUNT_MASK 0x000f0000L
+#define TCC_EDC_COUNTER__DED_COUNT__SHIFT 0x00000010
+#define TCC_EDC_COUNTER__SEC_COUNT_MASK 0x0000000fL
+#define TCC_EDC_COUNTER__SEC_COUNT__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCI_CNTL_1__REQ_FIFO_DEPTH_MASK 0x00ff0000L
+#define TCI_CNTL_1__REQ_FIFO_DEPTH__SHIFT 0x00000010
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES_MASK 0x0000ffffL
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES__SHIFT 0x00000000
+#define TCI_CNTL_1__WDATA_RAM_DEPTH_MASK 0xff000000L
+#define TCI_CNTL_1__WDATA_RAM_DEPTH__SHIFT 0x00000018
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2_MASK 0x00000001L
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2__SHIFT 0x00000000
+#define TCI_CNTL_2__TCA_MAX_CREDIT_MASK 0x000001feL
+#define TCI_CNTL_2__TCA_MAX_CREDIT__SHIFT 0x00000001
+#define TCI_STATUS__TCI_BUSY_MASK 0x00000001L
+#define TCI_STATUS__TCI_BUSY__SHIFT 0x00000000
+#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001c0L
+#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x00000006
+#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
+#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x00000004
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000fL
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS__SHIFT 0x00000000
+#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
+#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x00000009
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS_MASK 0x00000700L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS__SHIFT 0x00000008
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT_MASK 0x07000000L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT__SHIFT 0x00000018
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS_MASK 0x00000007L
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS__SHIFT 0x00000000
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT_MASK 0x00070000L
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT__SHIFT 0x00000010
+#define TCP_CHAN_STEER_HI__CHAN8_MASK 0x0000000fL
+#define TCP_CHAN_STEER_HI__CHAN8__SHIFT 0x00000000
+#define TCP_CHAN_STEER_HI__CHAN9_MASK 0x000000f0L
+#define TCP_CHAN_STEER_HI__CHAN9__SHIFT 0x00000004
+#define TCP_CHAN_STEER_HI__CHANA_MASK 0x00000f00L
+#define TCP_CHAN_STEER_HI__CHANA__SHIFT 0x00000008
+#define TCP_CHAN_STEER_HI__CHANB_MASK 0x0000f000L
+#define TCP_CHAN_STEER_HI__CHANB__SHIFT 0x0000000c
+#define TCP_CHAN_STEER_HI__CHANC_MASK 0x000f0000L
+#define TCP_CHAN_STEER_HI__CHANC__SHIFT 0x00000010
+#define TCP_CHAN_STEER_HI__CHAND_MASK 0x00f00000L
+#define TCP_CHAN_STEER_HI__CHAND__SHIFT 0x00000014
+#define TCP_CHAN_STEER_HI__CHANE_MASK 0x0f000000L
+#define TCP_CHAN_STEER_HI__CHANE__SHIFT 0x00000018
+#define TCP_CHAN_STEER_HI__CHANF_MASK 0xf0000000L
+#define TCP_CHAN_STEER_HI__CHANF__SHIFT 0x0000001c
+#define TCP_CHAN_STEER_LO__CHAN0_MASK 0x0000000fL
+#define TCP_CHAN_STEER_LO__CHAN0__SHIFT 0x00000000
+#define TCP_CHAN_STEER_LO__CHAN1_MASK 0x000000f0L
+#define TCP_CHAN_STEER_LO__CHAN1__SHIFT 0x00000004
+#define TCP_CHAN_STEER_LO__CHAN2_MASK 0x00000f00L
+#define TCP_CHAN_STEER_LO__CHAN2__SHIFT 0x00000008
+#define TCP_CHAN_STEER_LO__CHAN3_MASK 0x0000f000L
+#define TCP_CHAN_STEER_LO__CHAN3__SHIFT 0x0000000c
+#define TCP_CHAN_STEER_LO__CHAN4_MASK 0x000f0000L
+#define TCP_CHAN_STEER_LO__CHAN4__SHIFT 0x00000010
+#define TCP_CHAN_STEER_LO__CHAN5_MASK 0x00f00000L
+#define TCP_CHAN_STEER_LO__CHAN5__SHIFT 0x00000014
+#define TCP_CHAN_STEER_LO__CHAN6_MASK 0x0f000000L
+#define TCP_CHAN_STEER_LO__CHAN6__SHIFT 0x00000018
+#define TCP_CHAN_STEER_LO__CHAN7_MASK 0xf0000000L
+#define TCP_CHAN_STEER_LO__CHAN7__SHIFT 0x0000001c
+#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
+#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x0000001c
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x00000005
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE_MASK 0x00000010L
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE__SHIFT 0x00000004
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT_MASK 0x0fc00000L
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT__SHIFT 0x00000016
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001f8000L
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0x0000000f
+#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
+#define TCP_CNTL__FORCE_HIT__SHIFT 0x00000000
+#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
+#define TCP_CNTL__FORCE_MISS__SHIFT 0x00000001
+#define TCP_CNTL__INV_ALL_VMIDS_MASK 0x20000000L
+#define TCP_CNTL__INV_ALL_VMIDS__SHIFT 0x0000001d
+#define TCP_CNTL__L1_SIZE_MASK 0x0000000cL
+#define TCP_CNTL__L1_SIZE__SHIFT 0x00000002
+#define TCP_CREDIT__LFIFO_CREDIT_MASK 0x000003ffL
+#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x00000000
+#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007f0000L
+#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x00000010
+#define TCP_CREDIT__TD_CREDIT_MASK 0xe0000000L
+#define TCP_CREDIT__TD_CREDIT__SHIFT 0x0000001d
+#define TCP_EDC_COUNTER__DED_COUNT_MASK 0x000f0000L
+#define TCP_EDC_COUNTER__DED_COUNT__SHIFT 0x00000010
+#define TCP_EDC_COUNTER__SEC_COUNT_MASK 0x0000000fL
+#define TCP_EDC_COUNTER__SEC_COUNT__SHIFT 0x00000000
+#define TCP_INVALIDATE__START_MASK 0x00000001L
+#define TCP_INVALIDATE__START__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
+#define TCP_STATUS__TCP_BUSY__SHIFT 0x00000000
+#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
+#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x00000014
+#define TD_CNTL__EXTEND_LDS_STALL_MASK 0x00000600L
+#define TD_CNTL__EXTEND_LDS_STALL__SHIFT 0x00000009
+#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
+#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x00000013
+#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
+#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x00000010
+#define TD_CNTL__LD_FLOAT_MODE_MASK 0x00040000L
+#define TD_CNTL__LD_FLOAT_MODE__SHIFT 0x00000012
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST_MASK 0x00001800L
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST__SHIFT 0x0000000b
+#define TD_CNTL__PAD_STALL_EN_MASK 0x00000100L
+#define TD_CNTL__PAD_STALL_EN__SHIFT 0x00000008
+#define TD_CNTL__PRECISION_COMPATIBILITY_MASK 0x00008000L
+#define TD_CNTL__PRECISION_COMPATIBILITY__SHIFT 0x0000000f
+#define TD_CNTL__SYNC_PHASE_SH_MASK 0x00000003L
+#define TD_CNTL__SYNC_PHASE_SH__SHIFT 0x00000000
+#define TD_CNTL__SYNC_PHASE_VC_SMX_MASK 0x00000030L
+#define TD_CNTL__SYNC_PHASE_VC_SMX__SHIFT 0x00000004
+#define TD_DEBUG_DATA__DATA_MASK 0x00ffffffL
+#define TD_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define TD_DEBUG_INDEX__INDEX_MASK 0x0000001fL
+#define TD_DEBUG_INDEX__INDEX__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000ffL
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003fc00L
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003fc00L
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TD_SCRATCH__SCRATCH_MASK 0xffffffffL
+#define TD_SCRATCH__SCRATCH__SHIFT 0x00000000
+#define TD_STATUS__BUSY_MASK 0x80000000L
+#define TD_STATUS__BUSY__SHIFT 0x0000001f
+#define USER_SQC_BANK_DISABLE__SQC0_BANK_DISABLE_MASK 0x000f0000L
+#define USER_SQC_BANK_DISABLE__SQC0_BANK_DISABLE__SHIFT 0x00000010
+#define USER_SQC_BANK_DISABLE__SQC1_BANK_DISABLE_MASK 0x00f00000L
+#define USER_SQC_BANK_DISABLE__SQC1_BANK_DISABLE__SHIFT 0x00000014
+#define USER_SQC_BANK_DISABLE__SQC2_BANK_DISABLE_MASK 0x0f000000L
+#define USER_SQC_BANK_DISABLE__SQC2_BANK_DISABLE__SHIFT 0x00000018
+#define USER_SQC_BANK_DISABLE__SQC3_BANK_DISABLE_MASK 0xf0000000L
+#define USER_SQC_BANK_DISABLE__SQC3_BANK_DISABLE__SHIFT 0x0000001c
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN_MASK 0x000000c0L
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT 0x00000006
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION_MASK 0x00000003L
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT 0x00000000
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD_MASK 0x00000800L
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD__SHIFT 0x0000000b
+#define VGT_CACHE_INVALIDATION__ES_LIMIT_MASK 0x001f0000L
+#define VGT_CACHE_INVALIDATION__ES_LIMIT__SHIFT 0x00000010
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN_MASK 0x00001000L
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN__SHIFT 0x0000000c
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH_MASK 0x00002000L
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH__SHIFT 0x0000000d
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE_MASK 0x00000200L
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE__SHIFT 0x00000009
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER_MASK 0x00000020L
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER__SHIFT 0x00000005
+#define VGT_CNTL_STATUS__VGT_BUSY_MASK 0x00000001L
+#define VGT_CNTL_STATUS__VGT_BUSY__SHIFT 0x00000000
+#define VGT_CNTL_STATUS__VGT_GS_BUSY_MASK 0x00000080L
+#define VGT_CNTL_STATUS__VGT_GS_BUSY__SHIFT 0x00000007
+#define VGT_CNTL_STATUS__VGT_HS_BUSY_MASK 0x00000100L
+#define VGT_CNTL_STATUS__VGT_HS_BUSY__SHIFT 0x00000008
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY_MASK 0x00000004L
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY__SHIFT 0x00000002
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY_MASK 0x00000002L
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY__SHIFT 0x00000001
+#define VGT_CNTL_STATUS__VGT_PI_BUSY_MASK 0x00000040L
+#define VGT_CNTL_STATUS__VGT_PI_BUSY__SHIFT 0x00000006
+#define VGT_CNTL_STATUS__VGT_PT_BUSY_MASK 0x00000008L
+#define VGT_CNTL_STATUS__VGT_PT_BUSY__SHIFT 0x00000003
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY_MASK 0x00000200L
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY__SHIFT 0x00000009
+#define VGT_CNTL_STATUS__VGT_TE_BUSY_MASK 0x00000010L
+#define VGT_CNTL_STATUS__VGT_TE_BUSY__SHIFT 0x00000004
+#define VGT_CNTL_STATUS__VGT_VR_BUSY_MASK 0x00000020L
+#define VGT_CNTL_STATUS__VGT_VR_BUSY__SHIFT 0x00000005
+#define VGT_DEBUG_CNTL__VGT_DEBUG_INDX_MASK 0x0000003fL
+#define VGT_DEBUG_CNTL__VGT_DEBUG_INDX__SHIFT 0x00000000
+#define VGT_DEBUG_CNTL__VGT_DEBUG_SEL_BUS_B_MASK 0x00000040L
+#define VGT_DEBUG_CNTL__VGT_DEBUG_SEL_BUS_B__SHIFT 0x00000006
+#define VGT_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define VGT_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define VGT_DEBUG_REG0__cm_busy_MASK 0x00008000L
+#define VGT_DEBUG_REG0__cm_busy__SHIFT 0x0000000f
+#define VGT_DEBUG_REG0__combined_out_busy_MASK 0x00200000L
+#define VGT_DEBUG_REG0__combined_out_busy__SHIFT 0x00000015
+#define VGT_DEBUG_REG0__core_clk_busy_MASK 0x04000000L
+#define VGT_DEBUG_REG0__core_clk_busy__SHIFT 0x0000001a
+#define VGT_DEBUG_REG0__frmt_busy_MASK 0x00020000L
+#define VGT_DEBUG_REG0__frmt_busy__SHIFT 0x00000011
+#define VGT_DEBUG_REG0__gog_busy_MASK 0x00010000L
+#define VGT_DEBUG_REG0__gog_busy__SHIFT 0x00000010
+#define VGT_DEBUG_REG0__gs_busy_MASK 0x00001000L
+#define VGT_DEBUG_REG0__gs_busy__SHIFT 0x0000000c
+#define VGT_DEBUG_REG0__gs_clk_busy_MASK 0x08000000L
+#define VGT_DEBUG_REG0__gs_clk_busy__SHIFT 0x0000001b
+#define VGT_DEBUG_REG0__pa_interfaces_busy_MASK 0x00800000L
+#define VGT_DEBUG_REG0__pa_interfaces_busy__SHIFT 0x00000017
+#define VGT_DEBUG_REG0__pi_busy_MASK 0x00000100L
+#define VGT_DEBUG_REG0__pi_busy__SHIFT 0x00000008
+#define VGT_DEBUG_REG0__pt_pi_busy_MASK 0x00000400L
+#define VGT_DEBUG_REG0__pt_pi_busy__SHIFT 0x0000000a
+#define VGT_DEBUG_REG0__rcm_busy_MASK 0x00002000L
+#define VGT_DEBUG_REG0__rcm_busy__SHIFT 0x0000000d
+#define VGT_DEBUG_REG0__reg_clk_busy_MASK 0x01000000L
+#define VGT_DEBUG_REG0__reg_clk_busy__SHIFT 0x00000018
+#define VGT_DEBUG_REG0__sclk_core_vld_MASK 0x20000000L
+#define VGT_DEBUG_REG0__sclk_core_vld__SHIFT 0x0000001d
+#define VGT_DEBUG_REG0__sclk_gs_vld_MASK 0x40000000L
+#define VGT_DEBUG_REG0__sclk_gs_vld__SHIFT 0x0000001e
+#define VGT_DEBUG_REG0__SPARE0_MASK 0x80000000L
+#define VGT_DEBUG_REG0__SPARE0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG0__SPARE10_MASK 0x00040000L
+#define VGT_DEBUG_REG0__SPARE10__SHIFT 0x00000012
+#define VGT_DEBUG_REG0__SPARE1_MASK 0x10000000L
+#define VGT_DEBUG_REG0__SPARE1__SHIFT 0x0000001c
+#define VGT_DEBUG_REG0__SPARE2_MASK 0x02000000L
+#define VGT_DEBUG_REG0__SPARE2__SHIFT 0x00000019
+#define VGT_DEBUG_REG0__SPARE3_MASK 0x00100000L
+#define VGT_DEBUG_REG0__SPARE3__SHIFT 0x00000014
+#define VGT_DEBUG_REG0__SPARE4_MASK 0x00000080L
+#define VGT_DEBUG_REG0__SPARE4__SHIFT 0x00000007
+#define VGT_DEBUG_REG0__SPARE5_MASK 0x00000040L
+#define VGT_DEBUG_REG0__SPARE5__SHIFT 0x00000006
+#define VGT_DEBUG_REG0__SPARE6_MASK 0x00000020L
+#define VGT_DEBUG_REG0__SPARE6__SHIFT 0x00000005
+#define VGT_DEBUG_REG0__SPARE7_MASK 0x00000010L
+#define VGT_DEBUG_REG0__SPARE7__SHIFT 0x00000004
+#define VGT_DEBUG_REG0__SPARE8_MASK 0x00000008L
+#define VGT_DEBUG_REG0__SPARE8__SHIFT 0x00000003
+#define VGT_DEBUG_REG0__SPARE9_MASK 0x00000002L
+#define VGT_DEBUG_REG0__SPARE9__SHIFT 0x00000001
+#define VGT_DEBUG_REG0__spi_vs_interfaces_busy_MASK 0x00400000L
+#define VGT_DEBUG_REG0__spi_vs_interfaces_busy__SHIFT 0x00000016
+#define VGT_DEBUG_REG0__te11_pi_busy_MASK 0x00080000L
+#define VGT_DEBUG_REG0__te11_pi_busy__SHIFT 0x00000013
+#define VGT_DEBUG_REG0__te_pi_busy_MASK 0x00000800L
+#define VGT_DEBUG_REG0__te_pi_busy__SHIFT 0x0000000b
+#define VGT_DEBUG_REG0__tm_busy_MASK 0x00004000L
+#define VGT_DEBUG_REG0__tm_busy__SHIFT 0x0000000e
+#define VGT_DEBUG_REG0__vgt_busy_extended_MASK 0x00000001L
+#define VGT_DEBUG_REG0__vgt_busy_extended__SHIFT 0x00000000
+#define VGT_DEBUG_REG0__vgt_busy_MASK 0x00000004L
+#define VGT_DEBUG_REG0__vgt_busy__SHIFT 0x00000002
+#define VGT_DEBUG_REG0__vr_pi_busy_MASK 0x00000200L
+#define VGT_DEBUG_REG0__vr_pi_busy__SHIFT 0x00000009
+#define VGT_DEBUG_REG10__eopg_r2_q_MASK 0x00000020L
+#define VGT_DEBUG_REG10__eopg_r2_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG10__eotg_r2_q_MASK 0x00000040L
+#define VGT_DEBUG_REG10__eotg_r2_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG10__es_rb_space_avail_r2_q_8_0_MASK 0xff800000L
+#define VGT_DEBUG_REG10__es_rb_space_avail_r2_q_8_0__SHIFT 0x00000017
+#define VGT_DEBUG_REG10__gs_rb_space_avail_r3_q_9_0_MASK 0x007fe000L
+#define VGT_DEBUG_REG10__gs_rb_space_avail_r3_q_9_0__SHIFT 0x0000000d
+#define VGT_DEBUG_REG10__index_buffer_depth_r1_q_MASK 0x0000001fL
+#define VGT_DEBUG_REG10__index_buffer_depth_r1_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG10__onchip_gs_en_r0_q_MASK 0x00000180L
+#define VGT_DEBUG_REG10__onchip_gs_en_r0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_q_MASK 0x00001000L
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_qq_MASK 0x00000800L
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_qq__SHIFT 0x0000000b
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG10__SPARE2_MASK 0x00000600L
+#define VGT_DEBUG_REG10__SPARE2__SHIFT 0x00000009
+#define VGT_DEBUG_REG11__counters_available_r0_MASK 0x00001000L
+#define VGT_DEBUG_REG11__counters_available_r0__SHIFT 0x0000000c
+#define VGT_DEBUG_REG11__counters_avail_r0_MASK 0x00000800L
+#define VGT_DEBUG_REG11__counters_avail_r0__SHIFT 0x0000000b
+#define VGT_DEBUG_REG11__counters_busy_r0_MASK 0x00000400L
+#define VGT_DEBUG_REG11__counters_busy_r0__SHIFT 0x0000000a
+#define VGT_DEBUG_REG11__es_r0_rtr_MASK 0x00100000L
+#define VGT_DEBUG_REG11__es_r0_rtr__SHIFT 0x00000014
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_busy_MASK 0x00000008L
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_busy__SHIFT 0x00000003
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_full_MASK 0x08000000L
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_full__SHIFT 0x0000001b
+#define VGT_DEBUG_REG11__es_rb_roll_over_r3_MASK 0x00000200L
+#define VGT_DEBUG_REG11__es_rb_roll_over_r3__SHIFT 0x00000009
+#define VGT_DEBUG_REG11__es_tbl_empty_MASK 0x40000000L
+#define VGT_DEBUG_REG11__es_tbl_empty__SHIFT 0x0000001e
+#define VGT_DEBUG_REG11__gog_tm_vs_event_rtr_MASK 0x00200000L
+#define VGT_DEBUG_REG11__gog_tm_vs_event_rtr__SHIFT 0x00000015
+#define VGT_DEBUG_REG11__gs_issue_rtr_MASK 0x00010000L
+#define VGT_DEBUG_REG11__gs_issue_rtr__SHIFT 0x00000010
+#define VGT_DEBUG_REG11__gs_r0_rtr_MASK 0x00080000L
+#define VGT_DEBUG_REG11__gs_r0_rtr__SHIFT 0x00000013
+#define VGT_DEBUG_REG11__hold_eswave_MASK 0x00000100L
+#define VGT_DEBUG_REG11__hold_eswave__SHIFT 0x00000008
+#define VGT_DEBUG_REG11__no_active_states_r0_MASK 0x80000000L
+#define VGT_DEBUG_REG11__no_active_states_r0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG11__send_event_q_MASK 0x20000000L
+#define VGT_DEBUG_REG11__send_event_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG11__SPARE0_MASK 0x00040000L
+#define VGT_DEBUG_REG11__SPARE0__SHIFT 0x00000012
+#define VGT_DEBUG_REG11__SPARE1_MASK 0x00000020L
+#define VGT_DEBUG_REG11__SPARE1__SHIFT 0x00000005
+#define VGT_DEBUG_REG11__spi_esthread_fifo_busy_MASK 0x00000080L
+#define VGT_DEBUG_REG11__spi_esthread_fifo_busy__SHIFT 0x00000007
+#define VGT_DEBUG_REG11__spi_gsthread_fifo_busy_MASK 0x00000040L
+#define VGT_DEBUG_REG11__spi_gsthread_fifo_busy__SHIFT 0x00000006
+#define VGT_DEBUG_REG11__tm_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG11__tm_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG11__tm_noif_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG11__tm_noif_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG11__tm_out_busy_q_MASK 0x00000004L
+#define VGT_DEBUG_REG11__tm_out_busy_q__SHIFT 0x00000002
+#define VGT_DEBUG_REG11__tm_pt_event_rtr_MASK 0x00020000L
+#define VGT_DEBUG_REG11__tm_pt_event_rtr__SHIFT 0x00000011
+#define VGT_DEBUG_REG11__tm_rcm_es_tbl_rtr_MASK 0x01000000L
+#define VGT_DEBUG_REG11__tm_rcm_es_tbl_rtr__SHIFT 0x00000018
+#define VGT_DEBUG_REG11__tm_rcm_gs_event_rtr_MASK 0x00400000L
+#define VGT_DEBUG_REG11__tm_rcm_gs_event_rtr__SHIFT 0x00000016
+#define VGT_DEBUG_REG11__tm_rcm_gs_tbl_rtr_MASK 0x00800000L
+#define VGT_DEBUG_REG11__tm_rcm_gs_tbl_rtr__SHIFT 0x00000017
+#define VGT_DEBUG_REG11__VGT_SPI_esthread_rtr_q_MASK 0x00008000L
+#define VGT_DEBUG_REG11__VGT_SPI_esthread_rtr_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG11__VGT_SPI_gsthread_rtr_q_MASK 0x00004000L
+#define VGT_DEBUG_REG11__VGT_SPI_gsthread_rtr_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_busy_MASK 0x00000010L
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_busy__SHIFT 0x00000004
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_full_MASK 0x10000000L
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_full__SHIFT 0x0000001c
+#define VGT_DEBUG_REG11__vs_event_fifo_empty_MASK 0x02000000L
+#define VGT_DEBUG_REG11__vs_event_fifo_empty__SHIFT 0x00000019
+#define VGT_DEBUG_REG11__vs_event_fifo_full_MASK 0x04000000L
+#define VGT_DEBUG_REG11__vs_event_fifo_full__SHIFT 0x0000001a
+#define VGT_DEBUG_REG11__vs_event_fifo_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG11__vs_event_fifo_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG12__gs_state0_r0_q_MASK 0x00000007L
+#define VGT_DEBUG_REG12__gs_state0_r0_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG12__gs_state1_r0_q_MASK 0x00000038L
+#define VGT_DEBUG_REG12__gs_state1_r0_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG12__gs_state2_r0_q_MASK 0x000001c0L
+#define VGT_DEBUG_REG12__gs_state2_r0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG12__gs_state3_r0_q_MASK 0x00000e00L
+#define VGT_DEBUG_REG12__gs_state3_r0_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG12__gs_state4_r0_q_MASK 0x00007000L
+#define VGT_DEBUG_REG12__gs_state4_r0_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG12__gs_state5_r0_q_MASK 0x00038000L
+#define VGT_DEBUG_REG12__gs_state5_r0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG12__gs_state6_r0_q_MASK 0x001c0000L
+#define VGT_DEBUG_REG12__gs_state6_r0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG12__gs_state7_r0_q_MASK 0x00e00000L
+#define VGT_DEBUG_REG12__gs_state7_r0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG12__gs_state8_r0_q_MASK 0x07000000L
+#define VGT_DEBUG_REG12__gs_state8_r0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG12__gs_state9_r0_q_MASK 0x38000000L
+#define VGT_DEBUG_REG12__gs_state9_r0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG12__hold_eswave_eop_MASK 0x40000000L
+#define VGT_DEBUG_REG12__hold_eswave_eop__SHIFT 0x0000001e
+#define VGT_DEBUG_REG12__SPARE0_MASK 0x80000000L
+#define VGT_DEBUG_REG12__SPARE0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG13__active_cm_sm_r0_q_MASK 0xf8000000L
+#define VGT_DEBUG_REG13__active_cm_sm_r0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG13__es_tbl_full_MASK 0x01000000L
+#define VGT_DEBUG_REG13__es_tbl_full__SHIFT 0x00000018
+#define VGT_DEBUG_REG13__gsfetch_done_cnt_q_not_0_MASK 0x00800000L
+#define VGT_DEBUG_REG13__gsfetch_done_cnt_q_not_0__SHIFT 0x00000017
+#define VGT_DEBUG_REG13__gsfetch_done_fifo_cnt_q_not_0_MASK 0x00400000L
+#define VGT_DEBUG_REG13__gsfetch_done_fifo_cnt_q_not_0__SHIFT 0x00000016
+#define VGT_DEBUG_REG13__gs_state10_r0_q_MASK 0x00000007L
+#define VGT_DEBUG_REG13__gs_state10_r0_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG13__gs_state11_r0_q_MASK 0x00000038L
+#define VGT_DEBUG_REG13__gs_state11_r0_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG13__gs_state12_r0_q_MASK 0x000001c0L
+#define VGT_DEBUG_REG13__gs_state12_r0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG13__gs_state13_r0_q_MASK 0x00000e00L
+#define VGT_DEBUG_REG13__gs_state13_r0_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG13__gs_state14_r0_q_MASK 0x00007000L
+#define VGT_DEBUG_REG13__gs_state14_r0_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG13__gs_state15_r0_q_MASK 0x00038000L
+#define VGT_DEBUG_REG13__gs_state15_r0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG13__gs_tbl_wrptr_r0_q_3_0_MASK 0x003c0000L
+#define VGT_DEBUG_REG13__gs_tbl_wrptr_r0_q_3_0__SHIFT 0x00000012
+#define VGT_DEBUG_REG13__SPARE0_MASK 0x04000000L
+#define VGT_DEBUG_REG13__SPARE0__SHIFT 0x0000001a
+#define VGT_DEBUG_REG13__SPARE1_MASK 0x02000000L
+#define VGT_DEBUG_REG13__SPARE1__SHIFT 0x00000019
+#define VGT_DEBUG_REG14__es_flush_cnt_busy_q_MASK 0x00000400L
+#define VGT_DEBUG_REG14__es_flush_cnt_busy_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG14__gsfetch_done_fifo_full_MASK 0x00000010L
+#define VGT_DEBUG_REG14__gsfetch_done_fifo_full__SHIFT 0x00000004
+#define VGT_DEBUG_REG14__gsfetch_done_se1_cnt_q_not_0_MASK 0x20000000L
+#define VGT_DEBUG_REG14__gsfetch_done_se1_cnt_q_not_0__SHIFT 0x0000001d
+#define VGT_DEBUG_REG14__gs_rb_space_avail_r0_MASK 0x00000020L
+#define VGT_DEBUG_REG14__gs_rb_space_avail_r0__SHIFT 0x00000005
+#define VGT_DEBUG_REG14__gs_tbl_full_r0_MASK 0x00000800L
+#define VGT_DEBUG_REG14__gs_tbl_full_r0__SHIFT 0x0000000b
+#define VGT_DEBUG_REG14__se1spi_esthread_fifo_busy_MASK 0x08000000L
+#define VGT_DEBUG_REG14__se1spi_esthread_fifo_busy__SHIFT 0x0000001b
+#define VGT_DEBUG_REG14__se1spi_gsthread_fifo_busy_MASK 0x00200000L
+#define VGT_DEBUG_REG14__se1spi_gsthread_fifo_busy__SHIFT 0x00000015
+#define VGT_DEBUG_REG14__smx1_es_done_cnt_r0_q_not_0_MASK 0x04000000L
+#define VGT_DEBUG_REG14__smx1_es_done_cnt_r0_q_not_0__SHIFT 0x0000001a
+#define VGT_DEBUG_REG14__smx_es_done_cnt_r0_q_not_0_MASK 0x00000040L
+#define VGT_DEBUG_REG14__smx_es_done_cnt_r0_q_not_0__SHIFT 0x00000006
+#define VGT_DEBUG_REG14__SPARE0_MASK 0x40000000L
+#define VGT_DEBUG_REG14__SPARE0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG14__SPARE1_MASK 0x10000000L
+#define VGT_DEBUG_REG14__SPARE1__SHIFT 0x0000001c
+#define VGT_DEBUG_REG14__SPARE2_MASK 0x001ff000L
+#define VGT_DEBUG_REG14__SPARE2__SHIFT 0x0000000c
+#define VGT_DEBUG_REG14__SPARE3_MASK 0x0000000fL
+#define VGT_DEBUG_REG14__SPARE3__SHIFT 0x00000000
+#define VGT_DEBUG_REG14__SPARE8_MASK 0x00000180L
+#define VGT_DEBUG_REG14__SPARE8__SHIFT 0x00000007
+#define VGT_DEBUG_REG14__SPARE_MASK 0x01c00000L
+#define VGT_DEBUG_REG14__SPARE__SHIFT 0x00000016
+#define VGT_DEBUG_REG14__VGT_SE1SPI_esthread_rtr_q_MASK 0x80000000L
+#define VGT_DEBUG_REG14__VGT_SE1SPI_esthread_rtr_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG14__VGT_SE1SPI_gsthread_rtr_q_MASK 0x02000000L
+#define VGT_DEBUG_REG14__VGT_SE1SPI_gsthread_rtr_q__SHIFT 0x00000019
+#define VGT_DEBUG_REG14__vs_done_cnt_q_not_0_MASK 0x00000200L
+#define VGT_DEBUG_REG14__vs_done_cnt_q_not_0__SHIFT 0x00000009
+#define VGT_DEBUG_REG15__active_sm_q_MASK 0x000003e0L
+#define VGT_DEBUG_REG15__active_sm_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG15__cm_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG15__cm_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG15__cntr_tbl_wrptr_q_MASK 0x000f8000L
+#define VGT_DEBUG_REG15__cntr_tbl_wrptr_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG15__counters_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG15__counters_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG15__counters_full_MASK 0x00000010L
+#define VGT_DEBUG_REG15__counters_full__SHIFT 0x00000004
+#define VGT_DEBUG_REG15__entry_rdptr_q_MASK 0x00007c00L
+#define VGT_DEBUG_REG15__entry_rdptr_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG15__gs_done_array_q_not_0_MASK 0x10000000L
+#define VGT_DEBUG_REG15__gs_done_array_q_not_0__SHIFT 0x0000001c
+#define VGT_DEBUG_REG15__output_fifo_empty_MASK 0x00000004L
+#define VGT_DEBUG_REG15__output_fifo_empty__SHIFT 0x00000002
+#define VGT_DEBUG_REG15__output_fifo_full_MASK 0x00000008L
+#define VGT_DEBUG_REG15__output_fifo_full__SHIFT 0x00000003
+#define VGT_DEBUG_REG15__SPARE25_MASK 0x03f00000L
+#define VGT_DEBUG_REG15__SPARE25__SHIFT 0x00000014
+#define VGT_DEBUG_REG15__SPARE31_MASK 0xe0000000L
+#define VGT_DEBUG_REG15__SPARE31__SHIFT 0x0000001d
+#define VGT_DEBUG_REG15__st_cut_mode_q_MASK 0x0c000000L
+#define VGT_DEBUG_REG15__st_cut_mode_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG16__gog_busy_MASK 0x00000001L
+#define VGT_DEBUG_REG16__gog_busy__SHIFT 0x00000000
+#define VGT_DEBUG_REG16__gog_out_prim_state_sel_MASK 0x0e000000L
+#define VGT_DEBUG_REG16__gog_out_prim_state_sel__SHIFT 0x00000019
+#define VGT_DEBUG_REG16__gog_state_q_MASK 0x0000000eL
+#define VGT_DEBUG_REG16__gog_state_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG16__gog_tm_vs_event_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG16__gog_tm_vs_event_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG16__indx_valid_r0_q_MASK 0x00080000L
+#define VGT_DEBUG_REG16__indx_valid_r0_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG16__indx_valid_r1_q_MASK 0x00020000L
+#define VGT_DEBUG_REG16__indx_valid_r1_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG16__indx_valid_r2_q_MASK 0x00002000L
+#define VGT_DEBUG_REG16__indx_valid_r2_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG16__multiple_streams_en_r1_q_MASK 0x10000000L
+#define VGT_DEBUG_REG16__multiple_streams_en_r1_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG16__new_vs_thread_r2_MASK 0x80000000L
+#define VGT_DEBUG_REG16__new_vs_thread_r2__SHIFT 0x0000001f
+#define VGT_DEBUG_REG16__num_gs_r2_q_not_0_MASK 0x40000000L
+#define VGT_DEBUG_REG16__num_gs_r2_q_not_0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG16__prim_valid_r0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG16__prim_valid_r0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG16__prim_valid_r1_q_MASK 0x00010000L
+#define VGT_DEBUG_REG16__prim_valid_r1_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG16__prim_valid_r2_q_MASK 0x00004000L
+#define VGT_DEBUG_REG16__prim_valid_r2_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG16__r0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG16__r0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG16__r1_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG16__r1_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG16__r1_upstream_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG16__r1_upstream_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG16__r2_indx_rtr_MASK 0x00000200L
+#define VGT_DEBUG_REG16__r2_indx_rtr__SHIFT 0x00000009
+#define VGT_DEBUG_REG16__r2_prim_rtr_MASK 0x00000100L
+#define VGT_DEBUG_REG16__r2_prim_rtr__SHIFT 0x00000008
+#define VGT_DEBUG_REG16__r2_rtr_MASK 0x00000400L
+#define VGT_DEBUG_REG16__r2_rtr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG16__r2_vs_tbl_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG16__r2_vs_tbl_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG16__r3_force_vs_tbl_we_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG16__r3_force_vs_tbl_we_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG16__send_event_q_MASK 0x00400000L
+#define VGT_DEBUG_REG16__send_event_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG16__SPARE24_MASK 0x01800000L
+#define VGT_DEBUG_REG16__SPARE24__SHIFT 0x00000017
+#define VGT_DEBUG_REG16__valid_r0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG16__valid_r0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG16__valid_r1_q_MASK 0x00040000L
+#define VGT_DEBUG_REG16__valid_r1_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG16__valid_r2_q_MASK 0x00008000L
+#define VGT_DEBUG_REG16__valid_r2_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG16__vert_seen_since_sopg_r2_q_MASK 0x01000000L
+#define VGT_DEBUG_REG16__vert_seen_since_sopg_r2_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG16__vs_vert_count_r2_q_not_0_MASK 0x20000000L
+#define VGT_DEBUG_REG16__vs_vert_count_r2_q_not_0__SHIFT 0x0000001d
+#define VGT_DEBUG_REG17__gog_out_indx_13_0_MASK 0xfffc0000L
+#define VGT_DEBUG_REG17__gog_out_indx_13_0__SHIFT 0x00000012
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx0_5_0_MASK 0x0003f000L
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx0_5_0__SHIFT 0x0000000c
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx1_5_0_MASK 0x00000fc0L
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx1_5_0__SHIFT 0x00000006
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx2_5_0_MASK 0x0000003fL
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx2_5_0__SHIFT 0x00000000
+#define VGT_DEBUG_REG18__components_valid_r0_q_MASK 0xe0000000L
+#define VGT_DEBUG_REG18__components_valid_r0_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG18__eject_vtx_vect_r1_d_MASK 0x00800000L
+#define VGT_DEBUG_REG18__eject_vtx_vect_r1_d__SHIFT 0x00000017
+#define VGT_DEBUG_REG18__eop_r0_q_MASK 0x00400000L
+#define VGT_DEBUG_REG18__eop_r0_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG18__grp_vr_valid_MASK 0x00000001L
+#define VGT_DEBUG_REG18__grp_vr_valid__SHIFT 0x00000000
+#define VGT_DEBUG_REG18__gs_scenario_a_r0_q_MASK 0x08000000L
+#define VGT_DEBUG_REG18__gs_scenario_a_r0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG18__gs_scenario_b_r0_q_MASK 0x10000000L
+#define VGT_DEBUG_REG18__gs_scenario_b_r0_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG18__indices_to_send_q_MASK 0x00000700L
+#define VGT_DEBUG_REG18__indices_to_send_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG18__indx0_hit_d_MASK 0x00040000L
+#define VGT_DEBUG_REG18__indx0_hit_d__SHIFT 0x00000012
+#define VGT_DEBUG_REG18__indx0_new_d_MASK 0x00002000L
+#define VGT_DEBUG_REG18__indx0_new_d__SHIFT 0x0000000d
+#define VGT_DEBUG_REG18__indx1_hit_d_MASK 0x00020000L
+#define VGT_DEBUG_REG18__indx1_hit_d__SHIFT 0x00000011
+#define VGT_DEBUG_REG18__indx1_new_d_MASK 0x00004000L
+#define VGT_DEBUG_REG18__indx1_new_d__SHIFT 0x0000000e
+#define VGT_DEBUG_REG18__indx2_hit_d_MASK 0x00010000L
+#define VGT_DEBUG_REG18__indx2_hit_d__SHIFT 0x00000010
+#define VGT_DEBUG_REG18__indx2_new_d_MASK 0x00008000L
+#define VGT_DEBUG_REG18__indx2_new_d__SHIFT 0x0000000f
+#define VGT_DEBUG_REG18__last_group_of_instance_r0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG18__last_group_of_instance_r0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG18__last_indx_of_prim_MASK 0x00001000L
+#define VGT_DEBUG_REG18__last_indx_of_prim__SHIFT 0x0000000c
+#define VGT_DEBUG_REG18__null_primitive_r0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG18__null_primitive_r0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG18__out_vr_indx_read_MASK 0x00000040L
+#define VGT_DEBUG_REG18__out_vr_indx_read__SHIFT 0x00000006
+#define VGT_DEBUG_REG18__out_vr_prim_read_MASK 0x00000080L
+#define VGT_DEBUG_REG18__out_vr_prim_read__SHIFT 0x00000007
+#define VGT_DEBUG_REG18__pipe0_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG18__pipe0_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG18__pipe0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG18__pipe0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG18__pipe1_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG18__pipe1_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG18__pipe1_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG18__pipe1_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG18__st_vertex_reuse_off_r0_q_MASK 0x00080000L
+#define VGT_DEBUG_REG18__st_vertex_reuse_off_r0_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG18__sub_prim_type_r0_q_MASK 0x07000000L
+#define VGT_DEBUG_REG18__sub_prim_type_r0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG18__valid_indices_MASK 0x00000800L
+#define VGT_DEBUG_REG18__valid_indices__SHIFT 0x0000000b
+#define VGT_DEBUG_REG18__vr_grp_read_MASK 0x00000008L
+#define VGT_DEBUG_REG18__vr_grp_read__SHIFT 0x00000003
+#define VGT_DEBUG_REG19__buffered_prim_eject_vtx_vect_MASK 0x00080000L
+#define VGT_DEBUG_REG19__buffered_prim_eject_vtx_vect__SHIFT 0x00000013
+#define VGT_DEBUG_REG19__buffered_prim_eop_MASK 0x00040000L
+#define VGT_DEBUG_REG19__buffered_prim_eop__SHIFT 0x00000012
+#define VGT_DEBUG_REG19__buffered_prim_event_MASK 0x00010000L
+#define VGT_DEBUG_REG19__buffered_prim_event__SHIFT 0x00000010
+#define VGT_DEBUG_REG19__buffered_prim_null_primitive_MASK 0x00020000L
+#define VGT_DEBUG_REG19__buffered_prim_null_primitive__SHIFT 0x00000011
+#define VGT_DEBUG_REG19__buffered_prim_type_event_MASK 0x03f00000L
+#define VGT_DEBUG_REG19__buffered_prim_type_event__SHIFT 0x00000014
+#define VGT_DEBUG_REG19__filter_event_MASK 0x80000000L
+#define VGT_DEBUG_REG19__filter_event__SHIFT 0x0000001f
+#define VGT_DEBUG_REG19__hold_prim_MASK 0x00000800L
+#define VGT_DEBUG_REG19__hold_prim__SHIFT 0x0000000b
+#define VGT_DEBUG_REG19__new_packet_q_MASK 0x00008000L
+#define VGT_DEBUG_REG19__new_packet_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG19__null_terminate_vtx_vector_MASK 0x40000000L
+#define VGT_DEBUG_REG19__null_terminate_vtx_vector__SHIFT 0x0000001e
+#define VGT_DEBUG_REG19__num_new_unique_rel_indx_MASK 0x30000000L
+#define VGT_DEBUG_REG19__num_new_unique_rel_indx__SHIFT 0x0000001c
+#define VGT_DEBUG_REG19__pa_clipp_fifo_busy_q_MASK 0x00000020L
+#define VGT_DEBUG_REG19__pa_clipp_fifo_busy_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG19__pa_clips_fifo_busy_q_MASK 0x00000010L
+#define VGT_DEBUG_REG19__pa_clips_fifo_busy_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG19__pa_clipv_fifo_busy_q_MASK 0x00000400L
+#define VGT_DEBUG_REG19__pa_clipv_fifo_busy_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG19__prim_buffer_empty_MASK 0x00000004L
+#define VGT_DEBUG_REG19__prim_buffer_empty__SHIFT 0x00000002
+#define VGT_DEBUG_REG19__prim_buffer_full_MASK 0x00000008L
+#define VGT_DEBUG_REG19__prim_buffer_full__SHIFT 0x00000003
+#define VGT_DEBUG_REG19__separate_out_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG19__separate_out_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG19__separate_out_indx_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG19__separate_out_indx_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG19__spi_vsthread_fifo_busy_q_MASK 0x00000100L
+#define VGT_DEBUG_REG19__spi_vsthread_fifo_busy_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG19__spi_vsvert_fifo_busy_q_MASK 0x00000200L
+#define VGT_DEBUG_REG19__spi_vsvert_fifo_busy_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG19__VGT_PA_clipp_rtr_q_MASK 0x00000080L
+#define VGT_DEBUG_REG19__VGT_PA_clipp_rtr_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG19__VGT_PA_clips_rtr_q_MASK 0x00000040L
+#define VGT_DEBUG_REG19__VGT_PA_clips_rtr_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG19__VGT_PA_clipv_rtr_q_MASK 0x00004000L
+#define VGT_DEBUG_REG19__VGT_PA_clipv_rtr_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vsvert_rtr_q_MASK 0x08000000L
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vsvert_rtr_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vswave_rtr_q_MASK 0x04000000L
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vswave_rtr_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG19__VGT_SPI_vsthread_rtr_q_MASK 0x00001000L
+#define VGT_DEBUG_REG19__VGT_SPI_vsthread_rtr_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG19__VGT_SPI_vsvert_rtr_q_MASK 0x00002000L
+#define VGT_DEBUG_REG19__VGT_SPI_vsvert_rtr_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG1__gog_out_indx_valid_MASK 0x10000000L
+#define VGT_DEBUG_REG1__gog_out_indx_valid__SHIFT 0x0000001c
+#define VGT_DEBUG_REG1__gog_out_prim_valid_MASK 0x40000000L
+#define VGT_DEBUG_REG1__gog_out_prim_valid__SHIFT 0x0000001e
+#define VGT_DEBUG_REG1__gs_pi_read_MASK 0x08000000L
+#define VGT_DEBUG_REG1__gs_pi_read__SHIFT 0x0000001b
+#define VGT_DEBUG_REG1__out_indx_read_MASK 0x20000000L
+#define VGT_DEBUG_REG1__out_indx_read__SHIFT 0x0000001d
+#define VGT_DEBUG_REG1__out_prim_read_MASK 0x80000000L
+#define VGT_DEBUG_REG1__out_prim_read__SHIFT 0x0000001f
+#define VGT_DEBUG_REG1__pi_gs_valid_MASK 0x04000000L
+#define VGT_DEBUG_REG1__pi_gs_valid__SHIFT 0x0000001a
+#define VGT_DEBUG_REG1__pi_pt_valid_MASK 0x00001000L
+#define VGT_DEBUG_REG1__pi_pt_valid__SHIFT 0x0000000c
+#define VGT_DEBUG_REG1__pi_te_valid_MASK 0x00004000L
+#define VGT_DEBUG_REG1__pi_te_valid__SHIFT 0x0000000e
+#define VGT_DEBUG_REG1__pi_vr_valid_MASK 0x00000400L
+#define VGT_DEBUG_REG1__pi_vr_valid__SHIFT 0x0000000a
+#define VGT_DEBUG_REG1__pt_out_indx_valid_MASK 0x00100000L
+#define VGT_DEBUG_REG1__pt_out_indx_valid__SHIFT 0x00000014
+#define VGT_DEBUG_REG1__pt_out_prim_valid_MASK 0x00400000L
+#define VGT_DEBUG_REG1__pt_out_prim_valid__SHIFT 0x00000016
+#define VGT_DEBUG_REG1__pt_pi_read_MASK 0x00002000L
+#define VGT_DEBUG_REG1__pt_pi_read__SHIFT 0x0000000d
+#define VGT_DEBUG_REG1__SPARE0_MASK 0x00000200L
+#define VGT_DEBUG_REG1__SPARE0__SHIFT 0x00000009
+#define VGT_DEBUG_REG1__SPARE10_MASK 0x00200000L
+#define VGT_DEBUG_REG1__SPARE10__SHIFT 0x00000015
+#define VGT_DEBUG_REG1__SPARE11_MASK 0x00080000L
+#define VGT_DEBUG_REG1__SPARE11__SHIFT 0x00000013
+#define VGT_DEBUG_REG1__SPARE12_MASK 0x00020000L
+#define VGT_DEBUG_REG1__SPARE12__SHIFT 0x00000011
+#define VGT_DEBUG_REG1__SPARE1_MASK 0x00000100L
+#define VGT_DEBUG_REG1__SPARE1__SHIFT 0x00000008
+#define VGT_DEBUG_REG1__SPARE23_MASK 0x00800000L
+#define VGT_DEBUG_REG1__SPARE23__SHIFT 0x00000017
+#define VGT_DEBUG_REG1__SPARE25_MASK 0x02000000L
+#define VGT_DEBUG_REG1__SPARE25__SHIFT 0x00000019
+#define VGT_DEBUG_REG1__SPARE2_MASK 0x00000080L
+#define VGT_DEBUG_REG1__SPARE2__SHIFT 0x00000007
+#define VGT_DEBUG_REG1__SPARE3_MASK 0x00000040L
+#define VGT_DEBUG_REG1__SPARE3__SHIFT 0x00000006
+#define VGT_DEBUG_REG1__SPARE4_MASK 0x00000020L
+#define VGT_DEBUG_REG1__SPARE4__SHIFT 0x00000005
+#define VGT_DEBUG_REG1__SPARE5_MASK 0x00000010L
+#define VGT_DEBUG_REG1__SPARE5__SHIFT 0x00000004
+#define VGT_DEBUG_REG1__SPARE6_MASK 0x00000008L
+#define VGT_DEBUG_REG1__SPARE6__SHIFT 0x00000003
+#define VGT_DEBUG_REG1__SPARE7_MASK 0x00000004L
+#define VGT_DEBUG_REG1__SPARE7__SHIFT 0x00000002
+#define VGT_DEBUG_REG1__SPARE8_MASK 0x00000002L
+#define VGT_DEBUG_REG1__SPARE8__SHIFT 0x00000001
+#define VGT_DEBUG_REG1__SPARE9_MASK 0x00000001L
+#define VGT_DEBUG_REG1__SPARE9__SHIFT 0x00000000
+#define VGT_DEBUG_REG1__te_grp_read_MASK 0x00008000L
+#define VGT_DEBUG_REG1__te_grp_read__SHIFT 0x0000000f
+#define VGT_DEBUG_REG1__te_out_data_valid_MASK 0x01000000L
+#define VGT_DEBUG_REG1__te_out_data_valid__SHIFT 0x00000018
+#define VGT_DEBUG_REG1__vr_out_indx_valid_MASK 0x00010000L
+#define VGT_DEBUG_REG1__vr_out_indx_valid__SHIFT 0x00000010
+#define VGT_DEBUG_REG1__vr_out_prim_valid_MASK 0x00040000L
+#define VGT_DEBUG_REG1__vr_out_prim_valid__SHIFT 0x00000012
+#define VGT_DEBUG_REG1__vr_pi_read_MASK 0x00000800L
+#define VGT_DEBUG_REG1__vr_pi_read__SHIFT 0x0000000b
+#define VGT_DEBUG_REG20__alloc_counter_q_MASK 0x003c0000L
+#define VGT_DEBUG_REG20__alloc_counter_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG20__curr_dealloc_distance_q_MASK 0x1fc00000L
+#define VGT_DEBUG_REG20__curr_dealloc_distance_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG20__curr_slot_in_vtx_vect_q_not_0_MASK 0x40000000L
+#define VGT_DEBUG_REG20__curr_slot_in_vtx_vect_q_not_0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexcount_not_0_MASK 0x00010000L
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexcount_not_0__SHIFT 0x00000010
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexindex_MASK 0x0000ffffL
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexindex__SHIFT 0x00000000
+#define VGT_DEBUG_REG20__int_vtx_counter_q_not_0_MASK 0x80000000L
+#define VGT_DEBUG_REG20__int_vtx_counter_q_not_0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG20__new_allocate_q_MASK 0x20000000L
+#define VGT_DEBUG_REG20__new_allocate_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG20__SPARE17_MASK 0x00020000L
+#define VGT_DEBUG_REG20__SPARE17__SHIFT 0x00000011
+#define VGT_DEBUG_REG21__buff_full_p1_MASK 0x01000000L
+#define VGT_DEBUG_REG21__buff_full_p1__SHIFT 0x00000018
+#define VGT_DEBUG_REG21__eopg_p0_q_MASK 0x40000000L
+#define VGT_DEBUG_REG21__eopg_p0_q__SHIFT 0x0000001e
+#define VGT_DEBUG_REG21__eotg_r2_q_MASK 0x04000000L
+#define VGT_DEBUG_REG21__eotg_r2_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG21__full_state_p1_q_MASK 0x00008000L
+#define VGT_DEBUG_REG21__full_state_p1_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG21__indx_count_q_not_0_MASK 0x00002000L
+#define VGT_DEBUG_REG21__indx_count_q_not_0__SHIFT 0x0000000d
+#define VGT_DEBUG_REG21__indx_side_fifo_empty_MASK 0x00000002L
+#define VGT_DEBUG_REG21__indx_side_fifo_empty__SHIFT 0x00000001
+#define VGT_DEBUG_REG21__indx_side_fifo_full_MASK 0x00000080L
+#define VGT_DEBUG_REG21__indx_side_fifo_full__SHIFT 0x00000007
+#define VGT_DEBUG_REG21__indx_side_indx_valid_MASK 0x00010000L
+#define VGT_DEBUG_REG21__indx_side_indx_valid__SHIFT 0x00000010
+#define VGT_DEBUG_REG21__interfaces_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG21__interfaces_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG21__is_event_p0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG21__is_event_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG21__lshs_dealloc_p1_MASK 0x00200000L
+#define VGT_DEBUG_REG21__lshs_dealloc_p1__SHIFT 0x00000015
+#define VGT_DEBUG_REG21__null_r2_q_MASK 0x08000000L
+#define VGT_DEBUG_REG21__null_r2_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG21__out_indx_fifo_empty_MASK 0x00000001L
+#define VGT_DEBUG_REG21__out_indx_fifo_empty__SHIFT 0x00000000
+#define VGT_DEBUG_REG21__out_indx_fifo_full_MASK 0x00000040L
+#define VGT_DEBUG_REG21__out_indx_fifo_full__SHIFT 0x00000006
+#define VGT_DEBUG_REG21__p0_dr_MASK 0x10000000L
+#define VGT_DEBUG_REG21__p0_dr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG21__p0_nobp_MASK 0x80000000L
+#define VGT_DEBUG_REG21__p0_nobp__SHIFT 0x0000001f
+#define VGT_DEBUG_REG21__p0_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG21__p0_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG21__pipe0_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG21__pipe0_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG21__pipe0_rtr_MASK 0x00000100L
+#define VGT_DEBUG_REG21__pipe0_rtr__SHIFT 0x00000008
+#define VGT_DEBUG_REG21__pipe1_dr_MASK 0x00000008L
+#define VGT_DEBUG_REG21__pipe1_dr__SHIFT 0x00000003
+#define VGT_DEBUG_REG21__pipe1_rtr_MASK 0x00000200L
+#define VGT_DEBUG_REG21__pipe1_rtr__SHIFT 0x00000009
+#define VGT_DEBUG_REG21__pipe2_dr_MASK 0x00000010L
+#define VGT_DEBUG_REG21__pipe2_dr__SHIFT 0x00000004
+#define VGT_DEBUG_REG21__pipe2_rtr_MASK 0x00000400L
+#define VGT_DEBUG_REG21__pipe2_rtr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG21__stateid_p0_q_MASK 0x000e0000L
+#define VGT_DEBUG_REG21__stateid_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG21__stream_id_r2_q_MASK 0x00400000L
+#define VGT_DEBUG_REG21__stream_id_r2_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG21__strmout_valid_p1_MASK 0x02000000L
+#define VGT_DEBUG_REG21__strmout_valid_p1__SHIFT 0x00000019
+#define VGT_DEBUG_REG21__vsthread_buff_empty_MASK 0x00000020L
+#define VGT_DEBUG_REG21__vsthread_buff_empty__SHIFT 0x00000005
+#define VGT_DEBUG_REG21__vsthread_buff_full_MASK 0x00000800L
+#define VGT_DEBUG_REG21__vsthread_buff_full__SHIFT 0x0000000b
+#define VGT_DEBUG_REG21__vtx_vect_counter_q_not_0_MASK 0x00800000L
+#define VGT_DEBUG_REG21__vtx_vect_counter_q_not_0__SHIFT 0x00000017
+#define VGT_DEBUG_REG21__wait_for_external_eopg_q_MASK 0x00004000L
+#define VGT_DEBUG_REG21__wait_for_external_eopg_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG22__cm_state16_MASK 0x00000003L
+#define VGT_DEBUG_REG22__cm_state16__SHIFT 0x00000000
+#define VGT_DEBUG_REG22__cm_state17_MASK 0x0000000cL
+#define VGT_DEBUG_REG22__cm_state17__SHIFT 0x00000002
+#define VGT_DEBUG_REG22__cm_state18_MASK 0x00000030L
+#define VGT_DEBUG_REG22__cm_state18__SHIFT 0x00000004
+#define VGT_DEBUG_REG22__cm_state19_MASK 0x000000c0L
+#define VGT_DEBUG_REG22__cm_state19__SHIFT 0x00000006
+#define VGT_DEBUG_REG22__cm_state20_MASK 0x00000300L
+#define VGT_DEBUG_REG22__cm_state20__SHIFT 0x00000008
+#define VGT_DEBUG_REG22__cm_state21_MASK 0x00000c00L
+#define VGT_DEBUG_REG22__cm_state21__SHIFT 0x0000000a
+#define VGT_DEBUG_REG22__cm_state22_MASK 0x00003000L
+#define VGT_DEBUG_REG22__cm_state22__SHIFT 0x0000000c
+#define VGT_DEBUG_REG22__cm_state23_MASK 0x0000c000L
+#define VGT_DEBUG_REG22__cm_state23__SHIFT 0x0000000e
+#define VGT_DEBUG_REG22__cm_state24_MASK 0x00030000L
+#define VGT_DEBUG_REG22__cm_state24__SHIFT 0x00000010
+#define VGT_DEBUG_REG22__cm_state25_MASK 0x000c0000L
+#define VGT_DEBUG_REG22__cm_state25__SHIFT 0x00000012
+#define VGT_DEBUG_REG22__cm_state26_MASK 0x00300000L
+#define VGT_DEBUG_REG22__cm_state26__SHIFT 0x00000014
+#define VGT_DEBUG_REG22__cm_state27_MASK 0x00c00000L
+#define VGT_DEBUG_REG22__cm_state27__SHIFT 0x00000016
+#define VGT_DEBUG_REG22__cm_state28_MASK 0x03000000L
+#define VGT_DEBUG_REG22__cm_state28__SHIFT 0x00000018
+#define VGT_DEBUG_REG22__cm_state29_MASK 0x0c000000L
+#define VGT_DEBUG_REG22__cm_state29__SHIFT 0x0000001a
+#define VGT_DEBUG_REG22__cm_state30_MASK 0x30000000L
+#define VGT_DEBUG_REG22__cm_state30__SHIFT 0x0000001c
+#define VGT_DEBUG_REG22__cm_state31_MASK 0xc0000000L
+#define VGT_DEBUG_REG22__cm_state31__SHIFT 0x0000001e
+#define VGT_DEBUG_REG23__frmt_busy_MASK 0x00000001L
+#define VGT_DEBUG_REG23__frmt_busy__SHIFT 0x00000000
+#define VGT_DEBUG_REG23__new_verts_r2_q_MASK 0x00018000L
+#define VGT_DEBUG_REG23__new_verts_r2_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG23__prim_dr_r2_q_MASK 0x00001000L
+#define VGT_DEBUG_REG23__prim_dr_r2_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG23__prim_fifo_empty_MASK 0x00000200L
+#define VGT_DEBUG_REG23__prim_fifo_empty__SHIFT 0x00000009
+#define VGT_DEBUG_REG23__prim_fifo_full_MASK 0x00000400L
+#define VGT_DEBUG_REG23__prim_fifo_full__SHIFT 0x0000000a
+#define VGT_DEBUG_REG23__prim_r2_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG23__prim_r2_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG23__prim_r3_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG23__prim_r3_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG23__prim_state_sel_r2_q_MASK 0x00e00000L
+#define VGT_DEBUG_REG23__prim_state_sel_r2_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG23__rcm_frmt_prim_rtr_MASK 0x00000004L
+#define VGT_DEBUG_REG23__rcm_frmt_prim_rtr__SHIFT 0x00000002
+#define VGT_DEBUG_REG23__rcm_frmt_vert_rtr_MASK 0x00000002L
+#define VGT_DEBUG_REG23__rcm_frmt_vert_rtr__SHIFT 0x00000001
+#define VGT_DEBUG_REG23__SPARE_MASK 0xff000000L
+#define VGT_DEBUG_REG23__SPARE__SHIFT 0x00000018
+#define VGT_DEBUG_REG23__vert_dr_r0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG23__vert_dr_r0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG23__vert_dr_r1_q_MASK 0x00002000L
+#define VGT_DEBUG_REG23__vert_dr_r1_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG23__vert_dr_r2_q_MASK 0x00000800L
+#define VGT_DEBUG_REG23__vert_dr_r2_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG23__vert_r0_rtr_MASK 0x00000100L
+#define VGT_DEBUG_REG23__vert_r0_rtr__SHIFT 0x00000008
+#define VGT_DEBUG_REG23__vert_r1_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG23__vert_r1_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG23__vert_r2_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG23__vert_r2_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG23__vert_r3_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG23__vert_r3_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG23__verts_sent_r2_q_MASK 0x001e0000L
+#define VGT_DEBUG_REG23__verts_sent_r2_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG24__avail_es_rb_space_r0_q_23_0_MASK 0x00ffffffL
+#define VGT_DEBUG_REG24__avail_es_rb_space_r0_q_23_0__SHIFT 0x00000000
+#define VGT_DEBUG_REG24__dependent_st_cut_mode_q_MASK 0x03000000L
+#define VGT_DEBUG_REG24__dependent_st_cut_mode_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG24__SPARE31_MASK 0xfc000000L
+#define VGT_DEBUG_REG24__SPARE31__SHIFT 0x0000001a
+#define VGT_DEBUG_REG25__active_sm_r0_q_MASK 0x3c000000L
+#define VGT_DEBUG_REG25__active_sm_r0_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG25__add_gs_rb_space_r0_q_MASK 0x80000000L
+#define VGT_DEBUG_REG25__add_gs_rb_space_r0_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG25__add_gs_rb_space_r1_q_MASK 0x40000000L
+#define VGT_DEBUG_REG25__add_gs_rb_space_r1_q__SHIFT 0x0000001e
+#define VGT_DEBUG_REG25__avail_gs_rb_space_r0_q_25_0_MASK 0x03ffffffL
+#define VGT_DEBUG_REG25__avail_gs_rb_space_r0_q_25_0__SHIFT 0x00000000
+#define VGT_DEBUG_REG26__cm_state0_MASK 0x00000003L
+#define VGT_DEBUG_REG26__cm_state0__SHIFT 0x00000000
+#define VGT_DEBUG_REG26__cm_state10_MASK 0x00300000L
+#define VGT_DEBUG_REG26__cm_state10__SHIFT 0x00000014
+#define VGT_DEBUG_REG26__cm_state11_MASK 0x00c00000L
+#define VGT_DEBUG_REG26__cm_state11__SHIFT 0x00000016
+#define VGT_DEBUG_REG26__cm_state12_MASK 0x03000000L
+#define VGT_DEBUG_REG26__cm_state12__SHIFT 0x00000018
+#define VGT_DEBUG_REG26__cm_state13_MASK 0x0c000000L
+#define VGT_DEBUG_REG26__cm_state13__SHIFT 0x0000001a
+#define VGT_DEBUG_REG26__cm_state14_MASK 0x30000000L
+#define VGT_DEBUG_REG26__cm_state14__SHIFT 0x0000001c
+#define VGT_DEBUG_REG26__cm_state15_MASK 0xc0000000L
+#define VGT_DEBUG_REG26__cm_state15__SHIFT 0x0000001e
+#define VGT_DEBUG_REG26__cm_state1_MASK 0x0000000cL
+#define VGT_DEBUG_REG26__cm_state1__SHIFT 0x00000002
+#define VGT_DEBUG_REG26__cm_state2_MASK 0x00000030L
+#define VGT_DEBUG_REG26__cm_state2__SHIFT 0x00000004
+#define VGT_DEBUG_REG26__cm_state3_MASK 0x000000c0L
+#define VGT_DEBUG_REG26__cm_state3__SHIFT 0x00000006
+#define VGT_DEBUG_REG26__cm_state4_MASK 0x00000300L
+#define VGT_DEBUG_REG26__cm_state4__SHIFT 0x00000008
+#define VGT_DEBUG_REG26__cm_state5_MASK 0x00000c00L
+#define VGT_DEBUG_REG26__cm_state5__SHIFT 0x0000000a
+#define VGT_DEBUG_REG26__cm_state6_MASK 0x00003000L
+#define VGT_DEBUG_REG26__cm_state6__SHIFT 0x0000000c
+#define VGT_DEBUG_REG26__cm_state7_MASK 0x0000c000L
+#define VGT_DEBUG_REG26__cm_state7__SHIFT 0x0000000e
+#define VGT_DEBUG_REG26__cm_state8_MASK 0x00030000L
+#define VGT_DEBUG_REG26__cm_state8__SHIFT 0x00000010
+#define VGT_DEBUG_REG26__cm_state9_MASK 0x000c0000L
+#define VGT_DEBUG_REG26__cm_state9__SHIFT 0x00000012
+#define VGT_DEBUG_REG27__eop_p1_q_MASK 0x00000800L
+#define VGT_DEBUG_REG27__eop_p1_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG27__event_flag_p1_q_MASK 0x00000400L
+#define VGT_DEBUG_REG27__event_flag_p1_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG27__first_vsprim_of_gsprim_p0_q_MASK 0x00080000L
+#define VGT_DEBUG_REG27__first_vsprim_of_gsprim_p0_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG27__gsc0_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG27__gsc0_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG27__gsc0_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG27__gsc0_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG27__gsc_2cycle_output_MASK 0x00010000L
+#define VGT_DEBUG_REG27__gsc_2cycle_output__SHIFT 0x00000010
+#define VGT_DEBUG_REG27__gsc_2nd_cycle_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG27__gsc_2nd_cycle_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG27__gsc_eop_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG27__gsc_eop_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG27__gsc_indx_count_p0_q_MASK 0x7ff00000L
+#define VGT_DEBUG_REG27__gsc_indx_count_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG27__gsc_null_primitive_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG27__gsc_null_primitive_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG27__gs_out_prim_type_p0_q_MASK 0x00003000L
+#define VGT_DEBUG_REG27__gs_out_prim_type_p0_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG27__indices_to_send_p0_q_MASK 0x00000300L
+#define VGT_DEBUG_REG27__indices_to_send_p0_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG27__last_indx_of_prim_p1_q_MASK 0x00000080L
+#define VGT_DEBUG_REG27__last_indx_of_prim_p1_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG27__last_indx_of_vsprim_MASK 0x00040000L
+#define VGT_DEBUG_REG27__last_indx_of_vsprim__SHIFT 0x00000012
+#define VGT_DEBUG_REG27__last_vsprim_of_gsprim_MASK 0x80000000L
+#define VGT_DEBUG_REG27__last_vsprim_of_gsprim__SHIFT 0x0000001f
+#define VGT_DEBUG_REG27__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG27__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG27__pipe0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG27__pipe0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG27__pipe1_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG27__pipe1_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG27__pipe1_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG27__pipe1_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG27__tm_pt_event_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG27__tm_pt_event_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG28__advance_inner_point_p1_MASK 0x00800000L
+#define VGT_DEBUG_REG28__advance_inner_point_p1__SHIFT 0x00000017
+#define VGT_DEBUG_REG28__advance_outer_point_p1_MASK 0x00400000L
+#define VGT_DEBUG_REG28__advance_outer_point_p1__SHIFT 0x00000016
+#define VGT_DEBUG_REG28__con_state_q_MASK 0x0000000fL
+#define VGT_DEBUG_REG28__con_state_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG28__first_ring_of_patch_p0_q_MASK 0x00010000L
+#define VGT_DEBUG_REG28__first_ring_of_patch_p0_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG28__last_edge_of_outer_ring_p0_q_MASK 0x00040000L
+#define VGT_DEBUG_REG28__last_edge_of_outer_ring_p0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG28__last_point_of_inner_ring_p1_MASK 0x00100000L
+#define VGT_DEBUG_REG28__last_point_of_inner_ring_p1__SHIFT 0x00000014
+#define VGT_DEBUG_REG28__last_point_of_outer_ring_p1_MASK 0x00080000L
+#define VGT_DEBUG_REG28__last_point_of_outer_ring_p1__SHIFT 0x00000013
+#define VGT_DEBUG_REG28__last_ring_of_patch_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG28__last_ring_of_patch_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG28__next_ring_is_rect_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG28__next_ring_is_rect_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG28__outer_edge_tf_eq_one_p0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG28__outer_edge_tf_eq_one_p0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG28__outer_parity_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG28__outer_parity_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG28__parallel_parity_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG28__parallel_parity_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG28__pipe0_edge_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG28__pipe0_edge_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG28__pipe0_edge_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG28__pipe0_edge_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG28__pipe0_patch_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG28__pipe0_patch_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG28__pipe0_patch_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG28__pipe0_patch_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG28__pipe1_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG28__pipe1_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG28__pipe1_edge_rtr_MASK 0x40000000L
+#define VGT_DEBUG_REG28__pipe1_edge_rtr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG28__pipe1_inner1_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG28__pipe1_inner1_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG28__pipe1_inner2_rtr_MASK 0x10000000L
+#define VGT_DEBUG_REG28__pipe1_inner2_rtr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG28__pipe1_outer1_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG28__pipe1_outer1_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG28__pipe1_outer2_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG28__pipe1_outer2_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG28__pipe1_patch_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG28__pipe1_patch_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG28__pipe1_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG28__pipe1_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG28__process_tri_1st_2nd_half_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG28__process_tri_1st_2nd_half_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG28__process_tri_center_poly_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG28__process_tri_center_poly_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG28__process_tri_middle_p0_q_MASK 0x00000020L
+#define VGT_DEBUG_REG28__process_tri_middle_p0_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG28__second_cycle_q_MASK 0x00000010L
+#define VGT_DEBUG_REG28__second_cycle_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG28__use_stored_inner_q_ring2_MASK 0x80000000L
+#define VGT_DEBUG_REG28__use_stored_inner_q_ring2__SHIFT 0x0000001f
+#define VGT_DEBUG_REG29__advance_inner_point_p1_MASK 0x00800000L
+#define VGT_DEBUG_REG29__advance_inner_point_p1__SHIFT 0x00000017
+#define VGT_DEBUG_REG29__advance_outer_point_p1_MASK 0x00400000L
+#define VGT_DEBUG_REG29__advance_outer_point_p1__SHIFT 0x00000016
+#define VGT_DEBUG_REG29__con_state_q_MASK 0x0000000fL
+#define VGT_DEBUG_REG29__con_state_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG29__first_ring_of_patch_p0_q_MASK 0x00010000L
+#define VGT_DEBUG_REG29__first_ring_of_patch_p0_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG29__last_edge_of_outer_ring_p0_q_MASK 0x00040000L
+#define VGT_DEBUG_REG29__last_edge_of_outer_ring_p0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG29__last_point_of_inner_ring_p1_MASK 0x00100000L
+#define VGT_DEBUG_REG29__last_point_of_inner_ring_p1__SHIFT 0x00000014
+#define VGT_DEBUG_REG29__last_point_of_outer_ring_p1_MASK 0x00080000L
+#define VGT_DEBUG_REG29__last_point_of_outer_ring_p1__SHIFT 0x00000013
+#define VGT_DEBUG_REG29__last_ring_of_patch_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG29__last_ring_of_patch_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG29__next_ring_is_rect_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG29__next_ring_is_rect_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG29__outer_edge_tf_eq_one_p0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG29__outer_edge_tf_eq_one_p0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG29__outer_parity_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG29__outer_parity_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG29__parallel_parity_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG29__parallel_parity_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG29__pipe0_edge_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG29__pipe0_edge_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG29__pipe0_edge_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG29__pipe0_edge_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG29__pipe0_patch_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG29__pipe0_patch_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG29__pipe0_patch_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG29__pipe0_patch_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG29__pipe1_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG29__pipe1_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG29__pipe1_edge_rtr_MASK 0x40000000L
+#define VGT_DEBUG_REG29__pipe1_edge_rtr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG29__pipe1_inner1_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG29__pipe1_inner1_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG29__pipe1_inner2_rtr_MASK 0x10000000L
+#define VGT_DEBUG_REG29__pipe1_inner2_rtr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG29__pipe1_outer1_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG29__pipe1_outer1_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG29__pipe1_outer2_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG29__pipe1_outer2_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG29__pipe1_patch_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG29__pipe1_patch_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG29__pipe1_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG29__pipe1_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG29__process_tri_1st_2nd_half_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG29__process_tri_1st_2nd_half_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG29__process_tri_center_poly_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG29__process_tri_center_poly_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG29__process_tri_middle_p0_q_MASK 0x00000020L
+#define VGT_DEBUG_REG29__process_tri_middle_p0_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG29__second_cycle_q_MASK 0x00000010L
+#define VGT_DEBUG_REG29__second_cycle_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG29__use_stored_inner_q_ring3_MASK 0x80000000L
+#define VGT_DEBUG_REG29__use_stored_inner_q_ring3__SHIFT 0x0000001f
+#define VGT_DEBUG_REG2__grpModBusy_MASK 0x00000080L
+#define VGT_DEBUG_REG2__grpModBusy__SHIFT 0x00000007
+#define VGT_DEBUG_REG2__hs_grp_busy_MASK 0x00000001L
+#define VGT_DEBUG_REG2__hs_grp_busy__SHIFT 0x00000000
+#define VGT_DEBUG_REG2__hsInputFifoEmpty_MASK 0x00001000L
+#define VGT_DEBUG_REG2__hsInputFifoEmpty__SHIFT 0x0000000c
+#define VGT_DEBUG_REG2__hsInputFifoFull_MASK 0x00040000L
+#define VGT_DEBUG_REG2__hsInputFifoFull__SHIFT 0x00000012
+#define VGT_DEBUG_REG2__hs_noif_busy_MASK 0x00000002L
+#define VGT_DEBUG_REG2__hs_noif_busy__SHIFT 0x00000001
+#define VGT_DEBUG_REG2__hs_te11_tess_input_rts_MASK 0x00000040L
+#define VGT_DEBUG_REG2__hs_te11_tess_input_rts__SHIFT 0x00000006
+#define VGT_DEBUG_REG2__hsTifFifoEmpty_MASK 0x00002000L
+#define VGT_DEBUG_REG2__hsTifFifoEmpty__SHIFT 0x0000000d
+#define VGT_DEBUG_REG2__hsTifFifoFull_MASK 0x00080000L
+#define VGT_DEBUG_REG2__hsTifFifoFull__SHIFT 0x00000013
+#define VGT_DEBUG_REG2__hsVertFifoEmpty_MASK 0x00000400L
+#define VGT_DEBUG_REG2__hsVertFifoEmpty__SHIFT 0x0000000a
+#define VGT_DEBUG_REG2__hsVertFifoFull_MASK 0x00010000L
+#define VGT_DEBUG_REG2__hsVertFifoFull__SHIFT 0x00000010
+#define VGT_DEBUG_REG2__hsWaveFifoEmpty_MASK 0x00000800L
+#define VGT_DEBUG_REG2__hsWaveFifoEmpty__SHIFT 0x0000000b
+#define VGT_DEBUG_REG2__hsWaveFifoFull_MASK 0x00020000L
+#define VGT_DEBUG_REG2__hsWaveFifoFull__SHIFT 0x00000011
+#define VGT_DEBUG_REG2__lsFwaveFlag_MASK 0x08000000L
+#define VGT_DEBUG_REG2__lsFwaveFlag__SHIFT 0x0000001b
+#define VGT_DEBUG_REG2__ls_sh_id_MASK 0x04000000L
+#define VGT_DEBUG_REG2__ls_sh_id__SHIFT 0x0000001a
+#define VGT_DEBUG_REG2__lsVertFifoEmpty_MASK 0x00000100L
+#define VGT_DEBUG_REG2__lsVertFifoEmpty__SHIFT 0x00000008
+#define VGT_DEBUG_REG2__lsVertFifoFull_MASK 0x00004000L
+#define VGT_DEBUG_REG2__lsVertFifoFull__SHIFT 0x0000000e
+#define VGT_DEBUG_REG2__lsVertIfBusy_0_MASK 0x00000008L
+#define VGT_DEBUG_REG2__lsVertIfBusy_0__SHIFT 0x00000003
+#define VGT_DEBUG_REG2__lsWaveFifoEmpty_MASK 0x00000200L
+#define VGT_DEBUG_REG2__lsWaveFifoEmpty__SHIFT 0x00000009
+#define VGT_DEBUG_REG2__lsWaveFifoFull_MASK 0x00008000L
+#define VGT_DEBUG_REG2__lsWaveFifoFull__SHIFT 0x0000000f
+#define VGT_DEBUG_REG2__lsWaveIfBusy_0_MASK 0x00000020L
+#define VGT_DEBUG_REG2__lsWaveIfBusy_0__SHIFT 0x00000005
+#define VGT_DEBUG_REG2__lsWaveSendFlush_MASK 0x10000000L
+#define VGT_DEBUG_REG2__lsWaveSendFlush__SHIFT 0x0000001c
+#define VGT_DEBUG_REG2__p0_dr_MASK 0x00400000L
+#define VGT_DEBUG_REG2__p0_dr__SHIFT 0x00000016
+#define VGT_DEBUG_REG2__p0_rtr_MASK 0x00100000L
+#define VGT_DEBUG_REG2__p0_rtr__SHIFT 0x00000014
+#define VGT_DEBUG_REG2__p0_rts_MASK 0x01000000L
+#define VGT_DEBUG_REG2__p0_rts__SHIFT 0x00000018
+#define VGT_DEBUG_REG2__p1_dr_MASK 0x00800000L
+#define VGT_DEBUG_REG2__p1_dr__SHIFT 0x00000017
+#define VGT_DEBUG_REG2__p1_rtr_MASK 0x00200000L
+#define VGT_DEBUG_REG2__p1_rtr__SHIFT 0x00000015
+#define VGT_DEBUG_REG2__p1_rts_MASK 0x02000000L
+#define VGT_DEBUG_REG2__p1_rts__SHIFT 0x00000019
+#define VGT_DEBUG_REG2__SPARE_MASK 0xffffffffL
+#define VGT_DEBUG_REG2__SPARE__SHIFT 0x00000000
+#define VGT_DEBUG_REG2__te11_hs_tess_input_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG2__te11_hs_tess_input_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG2__tfmmIsBusy_MASK 0x00000004L
+#define VGT_DEBUG_REG2__tfmmIsBusy__SHIFT 0x00000002
+#define VGT_DEBUG_REG30__dynamic_hs_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG30__dynamic_hs_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG30__event_or_null_p0_q_MASK 0x00000008L
+#define VGT_DEBUG_REG30__event_or_null_p0_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG30__first_data_chunk_invalid_p0_q_MASK 0x08000000L
+#define VGT_DEBUG_REG30__first_data_chunk_invalid_p0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG30__first_data_ret_of_req_p0_q_MASK 0x04000000L
+#define VGT_DEBUG_REG30__first_data_ret_of_req_p0_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG30__first_fetch_of_tg_p0_q_MASK 0x02000000L
+#define VGT_DEBUG_REG30__first_fetch_of_tg_p0_q__SHIFT 0x00000019
+#define VGT_DEBUG_REG30__last_tf_of_tg_MASK 0x00080000L
+#define VGT_DEBUG_REG30__last_tf_of_tg__SHIFT 0x00000013
+#define VGT_DEBUG_REG30__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG30__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG30__pipe0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG30__pipe0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG30__pipe0_tf_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG30__pipe0_tf_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG30__pipe1_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG30__pipe1_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG30__pipe1_tf_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG30__pipe1_tf_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG30__pipe2_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG30__pipe2_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG30__pipe2_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG30__pipe2_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG30__pipe4_dr_MASK 0x40000000L
+#define VGT_DEBUG_REG30__pipe4_dr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG30__pipe4_rtr_MASK 0x80000000L
+#define VGT_DEBUG_REG30__pipe4_rtr__SHIFT 0x0000001f
+#define VGT_DEBUG_REG30__tf_fetch_state_q_MASK 0x00070000L
+#define VGT_DEBUG_REG30__tf_fetch_state_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG30__tf_pointer_p0_q_MASK 0x00f00000L
+#define VGT_DEBUG_REG30__tf_pointer_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG30__tf_xfer_count_p2_q_MASK 0x30000000L
+#define VGT_DEBUG_REG30__tf_xfer_count_p2_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG30__ttp_patch_fifo_empty_MASK 0x00000200L
+#define VGT_DEBUG_REG30__ttp_patch_fifo_empty__SHIFT 0x00000009
+#define VGT_DEBUG_REG30__ttp_patch_fifo_full_MASK 0x00000100L
+#define VGT_DEBUG_REG30__ttp_patch_fifo_full__SHIFT 0x00000008
+#define VGT_DEBUG_REG30__ttp_tf0_fifo_empty_MASK 0x00000400L
+#define VGT_DEBUG_REG30__ttp_tf0_fifo_empty__SHIFT 0x0000000a
+#define VGT_DEBUG_REG30__ttp_tf1_fifo_empty_MASK 0x00000800L
+#define VGT_DEBUG_REG30__ttp_tf1_fifo_empty__SHIFT 0x0000000b
+#define VGT_DEBUG_REG30__ttp_tf2_fifo_empty_MASK 0x00001000L
+#define VGT_DEBUG_REG30__ttp_tf2_fifo_empty__SHIFT 0x0000000c
+#define VGT_DEBUG_REG30__ttp_tf3_fifo_empty_MASK 0x00002000L
+#define VGT_DEBUG_REG30__ttp_tf3_fifo_empty__SHIFT 0x0000000d
+#define VGT_DEBUG_REG30__ttp_tf4_fifo_empty_MASK 0x00004000L
+#define VGT_DEBUG_REG30__ttp_tf4_fifo_empty__SHIFT 0x0000000e
+#define VGT_DEBUG_REG30__ttp_tf5_fifo_empty_MASK 0x00008000L
+#define VGT_DEBUG_REG30__ttp_tf5_fifo_empty__SHIFT 0x0000000f
+#define VGT_DEBUG_REG31__inner_ring_done_q_MASK 0x80000000L
+#define VGT_DEBUG_REG31__inner_ring_done_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG31__outer_ring_done_q_MASK 0x40000000L
+#define VGT_DEBUG_REG31__outer_ring_done_q__SHIFT 0x0000001e
+#define VGT_DEBUG_REG31__pg_con_inner_point1_rts_MASK 0x00400000L
+#define VGT_DEBUG_REG31__pg_con_inner_point1_rts__SHIFT 0x00000016
+#define VGT_DEBUG_REG31__pg_con_inner_point2_rts_MASK 0x00800000L
+#define VGT_DEBUG_REG31__pg_con_inner_point2_rts__SHIFT 0x00000017
+#define VGT_DEBUG_REG31__pg_con_outer_point1_rts_MASK 0x00100000L
+#define VGT_DEBUG_REG31__pg_con_outer_point1_rts__SHIFT 0x00000014
+#define VGT_DEBUG_REG31__pg_con_outer_point2_rts_MASK 0x00200000L
+#define VGT_DEBUG_REG31__pg_con_outer_point2_rts__SHIFT 0x00000015
+#define VGT_DEBUG_REG31__pg_edge_fifo_empty_MASK 0x02000000L
+#define VGT_DEBUG_REG31__pg_edge_fifo_empty__SHIFT 0x00000019
+#define VGT_DEBUG_REG31__pg_edge_fifo_full_MASK 0x10000000L
+#define VGT_DEBUG_REG31__pg_edge_fifo_full__SHIFT 0x0000001c
+#define VGT_DEBUG_REG31__pg_inner3_perp_fifo_empty_MASK 0x04000000L
+#define VGT_DEBUG_REG31__pg_inner3_perp_fifo_empty__SHIFT 0x0000001a
+#define VGT_DEBUG_REG31__pg_inner_perp_fifo_full_MASK 0x20000000L
+#define VGT_DEBUG_REG31__pg_inner_perp_fifo_full__SHIFT 0x0000001d
+#define VGT_DEBUG_REG31__pg_patch_fifo_empty_MASK 0x01000000L
+#define VGT_DEBUG_REG31__pg_patch_fifo_empty__SHIFT 0x00000018
+#define VGT_DEBUG_REG31__pg_patch_fifo_full_MASK 0x08000000L
+#define VGT_DEBUG_REG31__pg_patch_fifo_full__SHIFT 0x0000001b
+#define VGT_DEBUG_REG31__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG31__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG31__pipe0_rtr_MASK 0x00000002L
+#define VGT_DEBUG_REG31__pipe0_rtr__SHIFT 0x00000001
+#define VGT_DEBUG_REG31__pipe1_inner_dr_MASK 0x00000008L
+#define VGT_DEBUG_REG31__pipe1_inner_dr__SHIFT 0x00000003
+#define VGT_DEBUG_REG31__pipe1_outer_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG31__pipe1_outer_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG31__pipe2_inner_dr_MASK 0x00000020L
+#define VGT_DEBUG_REG31__pipe2_inner_dr__SHIFT 0x00000005
+#define VGT_DEBUG_REG31__pipe2_inner_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG31__pipe2_inner_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG31__pipe2_outer_dr_MASK 0x00000010L
+#define VGT_DEBUG_REG31__pipe2_outer_dr__SHIFT 0x00000004
+#define VGT_DEBUG_REG31__pipe2_outer_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG31__pipe2_outer_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG31__pipe3_inner_dr_MASK 0x00000080L
+#define VGT_DEBUG_REG31__pipe3_inner_dr__SHIFT 0x00000007
+#define VGT_DEBUG_REG31__pipe3_inner_rtr_MASK 0x00008000L
+#define VGT_DEBUG_REG31__pipe3_inner_rtr__SHIFT 0x0000000f
+#define VGT_DEBUG_REG31__pipe3_outer_dr_MASK 0x00000040L
+#define VGT_DEBUG_REG31__pipe3_outer_dr__SHIFT 0x00000006
+#define VGT_DEBUG_REG31__pipe3_outer_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG31__pipe3_outer_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG31__pipe4_inner_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG31__pipe4_inner_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG31__pipe4_inner_rtr_MASK 0x00020000L
+#define VGT_DEBUG_REG31__pipe4_inner_rtr__SHIFT 0x00000011
+#define VGT_DEBUG_REG31__pipe4_outer_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG31__pipe4_outer_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG31__pipe4_outer_rtr_MASK 0x00010000L
+#define VGT_DEBUG_REG31__pipe4_outer_rtr__SHIFT 0x00000010
+#define VGT_DEBUG_REG31__pipe5_inner_dr_MASK 0x00000800L
+#define VGT_DEBUG_REG31__pipe5_inner_dr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG31__pipe5_inner_rtr_MASK 0x00080000L
+#define VGT_DEBUG_REG31__pipe5_inner_rtr__SHIFT 0x00000013
+#define VGT_DEBUG_REG31__pipe5_outer_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG31__pipe5_outer_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG31__pipe5_outer_rtr_MASK 0x00040000L
+#define VGT_DEBUG_REG31__pipe5_outer_rtr__SHIFT 0x00000012
+#define VGT_DEBUG_REG32__event_flag_p5_q_MASK 0x00000100L
+#define VGT_DEBUG_REG32__event_flag_p5_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG32__event_null_special_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG32__event_null_special_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG32__fifos_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG32__fifos_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG32__first_point_of_edge_p5_q_MASK 0x00000400L
+#define VGT_DEBUG_REG32__first_point_of_edge_p5_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG32__first_point_of_patch_p5_q_MASK 0x00000200L
+#define VGT_DEBUG_REG32__first_point_of_patch_p5_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG32__first_ring_of_patch_MASK 0x00000001L
+#define VGT_DEBUG_REG32__first_ring_of_patch__SHIFT 0x00000000
+#define VGT_DEBUG_REG32__inner2_fifos_rtr_MASK 0x01000000L
+#define VGT_DEBUG_REG32__inner2_fifos_rtr__SHIFT 0x00000018
+#define VGT_DEBUG_REG32__inner_fifos_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG32__inner_fifos_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG32__last_edge_of_inner_ring_MASK 0x00000010L
+#define VGT_DEBUG_REG32__last_edge_of_inner_ring__SHIFT 0x00000004
+#define VGT_DEBUG_REG32__last_edge_of_outer_ring_MASK 0x00000004L
+#define VGT_DEBUG_REG32__last_edge_of_outer_ring__SHIFT 0x00000002
+#define VGT_DEBUG_REG32__last_patch_of_tg_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG32__last_patch_of_tg_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG32__last_patch_of_tg_p5_q_MASK 0x00000800L
+#define VGT_DEBUG_REG32__last_patch_of_tg_p5_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG32__last_point_of_inner_edge_MASK 0x00000020L
+#define VGT_DEBUG_REG32__last_point_of_inner_edge__SHIFT 0x00000005
+#define VGT_DEBUG_REG32__last_point_of_outer_edge_MASK 0x00000008L
+#define VGT_DEBUG_REG32__last_point_of_outer_edge__SHIFT 0x00000003
+#define VGT_DEBUG_REG32__last_ring_of_patch_MASK 0x00000002L
+#define VGT_DEBUG_REG32__last_ring_of_patch__SHIFT 0x00000001
+#define VGT_DEBUG_REG32__outer_fifos_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG32__outer_fifos_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG32__pg_edge_fifo2_full_MASK 0x00020000L
+#define VGT_DEBUG_REG32__pg_edge_fifo2_full__SHIFT 0x00000011
+#define VGT_DEBUG_REG32__pg_edge_fifo3_full_MASK 0x00010000L
+#define VGT_DEBUG_REG32__pg_edge_fifo3_full__SHIFT 0x00000010
+#define VGT_DEBUG_REG32__pg_inner2_point_fifo_full_MASK 0x00100000L
+#define VGT_DEBUG_REG32__pg_inner2_point_fifo_full__SHIFT 0x00000014
+#define VGT_DEBUG_REG32__pg_inner3_point_fifo_full_MASK 0x00040000L
+#define VGT_DEBUG_REG32__pg_inner3_point_fifo_full__SHIFT 0x00000012
+#define VGT_DEBUG_REG32__pg_inner_point_fifo_full_MASK 0x00400000L
+#define VGT_DEBUG_REG32__pg_inner_point_fifo_full__SHIFT 0x00000016
+#define VGT_DEBUG_REG32__pg_outer2_point_fifo_full_MASK 0x00200000L
+#define VGT_DEBUG_REG32__pg_outer2_point_fifo_full__SHIFT 0x00000015
+#define VGT_DEBUG_REG32__pg_outer3_point_fifo_full_MASK 0x00080000L
+#define VGT_DEBUG_REG32__pg_outer3_point_fifo_full__SHIFT 0x00000013
+#define VGT_DEBUG_REG32__pg_outer_point_fifo_full_MASK 0x00800000L
+#define VGT_DEBUG_REG32__pg_outer_point_fifo_full__SHIFT 0x00000017
+#define VGT_DEBUG_REG32__pipe5_inner2_rtr_MASK 0x00008000L
+#define VGT_DEBUG_REG32__pipe5_inner2_rtr__SHIFT 0x0000000f
+#define VGT_DEBUG_REG32__pipe5_inner3_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG32__pipe5_inner3_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG32__SPARE_MASK 0x80000000L
+#define VGT_DEBUG_REG32__SPARE__SHIFT 0x0000001f
+#define VGT_DEBUG_REG32__tess_topology_p5_q_MASK 0x00003000L
+#define VGT_DEBUG_REG32__tess_topology_p5_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG33__con_prim_fifo_empty_MASK 0x00040000L
+#define VGT_DEBUG_REG33__con_prim_fifo_empty__SHIFT 0x00000012
+#define VGT_DEBUG_REG33__con_prim_fifo_full_MASK 0x00010000L
+#define VGT_DEBUG_REG33__con_prim_fifo_full__SHIFT 0x00000010
+#define VGT_DEBUG_REG33__con_ring1_busy_MASK 0x80000000L
+#define VGT_DEBUG_REG33__con_ring1_busy__SHIFT 0x0000001f
+#define VGT_DEBUG_REG33__con_ring2_busy_MASK 0x40000000L
+#define VGT_DEBUG_REG33__con_ring2_busy__SHIFT 0x0000001e
+#define VGT_DEBUG_REG33__con_ring3_busy_MASK 0x20000000L
+#define VGT_DEBUG_REG33__con_ring3_busy__SHIFT 0x0000001d
+#define VGT_DEBUG_REG33__con_vert_fifo_empty_MASK 0x00080000L
+#define VGT_DEBUG_REG33__con_vert_fifo_empty__SHIFT 0x00000013
+#define VGT_DEBUG_REG33__con_vert_fifo_full_MASK 0x00020000L
+#define VGT_DEBUG_REG33__con_vert_fifo_full__SHIFT 0x00000011
+#define VGT_DEBUG_REG33__first_prim_of_patch_q_MASK 0x00008000L
+#define VGT_DEBUG_REG33__first_prim_of_patch_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG33__last_patch_of_tg_p0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG33__last_patch_of_tg_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG33__pipe0_patch_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG33__pipe0_patch_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG33__pipe0_patch_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG33__pipe0_patch_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG33__pipe1_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG33__pipe1_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG33__pipe1_patch_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG33__pipe1_patch_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG33__pipe2_dr_MASK 0x00000008L
+#define VGT_DEBUG_REG33__pipe2_dr__SHIFT 0x00000003
+#define VGT_DEBUG_REG33__pipe2_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG33__pipe2_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG33__pipe3_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG33__pipe3_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG33__pipe3_rtr_MASK 0x00000200L
+#define VGT_DEBUG_REG33__pipe3_rtr__SHIFT 0x00000009
+#define VGT_DEBUG_REG33__ring1_in_sync_q_MASK 0x00000800L
+#define VGT_DEBUG_REG33__ring1_in_sync_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG33__ring1_pipe1_dr_MASK 0x00000040L
+#define VGT_DEBUG_REG33__ring1_pipe1_dr__SHIFT 0x00000006
+#define VGT_DEBUG_REG33__ring1_valid_p2_MASK 0x00800000L
+#define VGT_DEBUG_REG33__ring1_valid_p2__SHIFT 0x00000017
+#define VGT_DEBUG_REG33__ring2_in_sync_q_MASK 0x00000400L
+#define VGT_DEBUG_REG33__ring2_in_sync_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG33__ring2_pipe1_dr_MASK 0x00000020L
+#define VGT_DEBUG_REG33__ring2_pipe1_dr__SHIFT 0x00000005
+#define VGT_DEBUG_REG33__ring2_valid_p2_MASK 0x00400000L
+#define VGT_DEBUG_REG33__ring2_valid_p2__SHIFT 0x00000016
+#define VGT_DEBUG_REG33__ring3_in_sync_q_MASK 0x00002000L
+#define VGT_DEBUG_REG33__ring3_in_sync_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG33__ring3_pipe1_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG33__ring3_pipe1_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG33__ring3_valid_p2_MASK 0x00200000L
+#define VGT_DEBUG_REG33__ring3_valid_p2__SHIFT 0x00000015
+#define VGT_DEBUG_REG33__te11_out_vert_gs_en_MASK 0x10000000L
+#define VGT_DEBUG_REG33__te11_out_vert_gs_en__SHIFT 0x0000001c
+#define VGT_DEBUG_REG33__tess_topology_p0_q_MASK 0x0c000000L
+#define VGT_DEBUG_REG33__tess_topology_p0_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG33__tess_type_p0_q_MASK 0x03000000L
+#define VGT_DEBUG_REG33__tess_type_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG33__tm_te11_event_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG33__tm_te11_event_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG34__advance_inner_point_p1_MASK 0x00800000L
+#define VGT_DEBUG_REG34__advance_inner_point_p1__SHIFT 0x00000017
+#define VGT_DEBUG_REG34__advance_outer_point_p1_MASK 0x00400000L
+#define VGT_DEBUG_REG34__advance_outer_point_p1__SHIFT 0x00000016
+#define VGT_DEBUG_REG34__con_state_q_MASK 0x0000000fL
+#define VGT_DEBUG_REG34__con_state_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG34__first_ring_of_patch_p0_q_MASK 0x00010000L
+#define VGT_DEBUG_REG34__first_ring_of_patch_p0_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG34__last_edge_of_outer_ring_p0_q_MASK 0x00040000L
+#define VGT_DEBUG_REG34__last_edge_of_outer_ring_p0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG34__last_point_of_inner_ring_p1_MASK 0x00100000L
+#define VGT_DEBUG_REG34__last_point_of_inner_ring_p1__SHIFT 0x00000014
+#define VGT_DEBUG_REG34__last_point_of_outer_ring_p1_MASK 0x00080000L
+#define VGT_DEBUG_REG34__last_point_of_outer_ring_p1__SHIFT 0x00000013
+#define VGT_DEBUG_REG34__last_ring_of_patch_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG34__last_ring_of_patch_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG34__next_ring_is_rect_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG34__next_ring_is_rect_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG34__outer_edge_tf_eq_one_p0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG34__outer_edge_tf_eq_one_p0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG34__outer_parity_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG34__outer_parity_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG34__parallel_parity_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG34__parallel_parity_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG34__pipe0_edge_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG34__pipe0_edge_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG34__pipe0_edge_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG34__pipe0_edge_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG34__pipe0_patch_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG34__pipe0_patch_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG34__pipe0_patch_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG34__pipe0_patch_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG34__pipe1_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG34__pipe1_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG34__pipe1_edge_rtr_MASK 0x40000000L
+#define VGT_DEBUG_REG34__pipe1_edge_rtr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG34__pipe1_inner1_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG34__pipe1_inner1_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG34__pipe1_inner2_rtr_MASK 0x10000000L
+#define VGT_DEBUG_REG34__pipe1_inner2_rtr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG34__pipe1_outer1_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG34__pipe1_outer1_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG34__pipe1_outer2_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG34__pipe1_outer2_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG34__pipe1_patch_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG34__pipe1_patch_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG34__pipe1_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG34__pipe1_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG34__process_tri_1st_2nd_half_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG34__process_tri_1st_2nd_half_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG34__process_tri_center_poly_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG34__process_tri_center_poly_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG34__process_tri_middle_p0_q_MASK 0x00000020L
+#define VGT_DEBUG_REG34__process_tri_middle_p0_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG34__second_cycle_q_MASK 0x00000010L
+#define VGT_DEBUG_REG34__second_cycle_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG34__use_stored_inner_q_ring1_MASK 0x80000000L
+#define VGT_DEBUG_REG34__use_stored_inner_q_ring1__SHIFT 0x0000001f
+#define VGT_DEBUG_REG35__event_flag_p1_q_MASK 0x00040000L
+#define VGT_DEBUG_REG35__event_flag_p1_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG35__first_req_of_tg_p1_q_MASK 0x10000000L
+#define VGT_DEBUG_REG35__first_req_of_tg_p1_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG35__last_req_of_tg_p2_MASK 0x00000800L
+#define VGT_DEBUG_REG35__last_req_of_tg_p2__SHIFT 0x0000000b
+#define VGT_DEBUG_REG35__null_flag_p1_q_MASK 0x00080000L
+#define VGT_DEBUG_REG35__null_flag_p1_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG35__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG35__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG35__pipe0_rtr_MASK 0x00000004L
+#define VGT_DEBUG_REG35__pipe0_rtr__SHIFT 0x00000002
+#define VGT_DEBUG_REG35__pipe1_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG35__pipe1_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG35__pipe1_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG35__pipe1_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG35__second_tf_ret_data_q_MASK 0x08000000L
+#define VGT_DEBUG_REG35__second_tf_ret_data_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG35__spi_vgt_hs_done_cnt_q_MASK 0x0003f000L
+#define VGT_DEBUG_REG35__spi_vgt_hs_done_cnt_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG35__TC_VGT_rdret_data_in_MASK 0x80000000L
+#define VGT_DEBUG_REG35__TC_VGT_rdret_data_in__SHIFT 0x0000001f
+#define VGT_DEBUG_REG35__tf_data_fifo_busy_q_MASK 0x00000040L
+#define VGT_DEBUG_REG35__tf_data_fifo_busy_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG35__tf_data_fifo_cnt_q_MASK 0x07f00000L
+#define VGT_DEBUG_REG35__tf_data_fifo_cnt_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG35__tf_data_fifo_rtr_q_MASK 0x00000080L
+#define VGT_DEBUG_REG35__tf_data_fifo_rtr_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_empty_MASK 0x00000010L
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_empty__SHIFT 0x00000004
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_full_MASK 0x00000020L
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_full__SHIFT 0x00000005
+#define VGT_DEBUG_REG35__tf_skid_fifo_empty_MASK 0x00000100L
+#define VGT_DEBUG_REG35__tf_skid_fifo_empty__SHIFT 0x00000008
+#define VGT_DEBUG_REG35__tf_skid_fifo_full_MASK 0x00000200L
+#define VGT_DEBUG_REG35__tf_skid_fifo_full__SHIFT 0x00000009
+#define VGT_DEBUG_REG35__VGT_TC_rdnfo_stall_out_MASK 0x40000000L
+#define VGT_DEBUG_REG35__VGT_TC_rdnfo_stall_out__SHIFT 0x0000001e
+#define VGT_DEBUG_REG35__vgt_tc_rdreq_rtr_q_MASK 0x00000400L
+#define VGT_DEBUG_REG35__vgt_tc_rdreq_rtr_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG35__VGT_TC_rdreq_send_out_MASK 0x20000000L
+#define VGT_DEBUG_REG35__VGT_TC_rdreq_send_out__SHIFT 0x0000001d
+#define VGT_DEBUG_REG3__hsWaveRelInd_MASK 0xfc000000L
+#define VGT_DEBUG_REG3__hsWaveRelInd__SHIFT 0x0000001a
+#define VGT_DEBUG_REG3__lsPatchCnt_MASK 0x03fc0000L
+#define VGT_DEBUG_REG3__lsPatchCnt__SHIFT 0x00000012
+#define VGT_DEBUG_REG3__lsTgRelInd_MASK 0x00000fffL
+#define VGT_DEBUG_REG3__lsTgRelInd__SHIFT 0x00000000
+#define VGT_DEBUG_REG3__lsWaveRelInd_MASK 0x0003f000L
+#define VGT_DEBUG_REG3__lsWaveRelInd__SHIFT 0x0000000c
+#define VGT_DEBUG_REG4__hsCpCnt_MASK 0x1f000000L
+#define VGT_DEBUG_REG4__hsCpCnt__SHIFT 0x00000018
+#define VGT_DEBUG_REG4__hsFwaveFlag_MASK 0x40000000L
+#define VGT_DEBUG_REG4__hsFwaveFlag__SHIFT 0x0000001e
+#define VGT_DEBUG_REG4__hsPatchCnt_MASK 0x000000ffL
+#define VGT_DEBUG_REG4__hsPatchCnt__SHIFT 0x00000000
+#define VGT_DEBUG_REG4__hsPrimId_15_0_MASK 0x00ffff00L
+#define VGT_DEBUG_REG4__hsPrimId_15_0__SHIFT 0x00000008
+#define VGT_DEBUG_REG4__hsWaveSendFlush_MASK 0x20000000L
+#define VGT_DEBUG_REG4__hsWaveSendFlush__SHIFT 0x0000001d
+#define VGT_DEBUG_REG4__SPARE_MASK 0xffffffffL
+#define VGT_DEBUG_REG4__SPARE__SHIFT 0x00000000
+#define VGT_DEBUG_REG5__hsVertCreditCnt_0_MASK 0x0000f800L
+#define VGT_DEBUG_REG5__hsVertCreditCnt_0__SHIFT 0x0000000b
+#define VGT_DEBUG_REG5__hsWaveCreditCnt_0_MASK 0x000000f8L
+#define VGT_DEBUG_REG5__hsWaveCreditCnt_0__SHIFT 0x00000003
+#define VGT_DEBUG_REG5__lsVertCreditCnt_0_MASK 0xf8000000L
+#define VGT_DEBUG_REG5__lsVertCreditCnt_0__SHIFT 0x0000001b
+#define VGT_DEBUG_REG5__lsWaveCreditCnt_0_MASK 0x00f80000L
+#define VGT_DEBUG_REG5__lsWaveCreditCnt_0__SHIFT 0x00000013
+#define VGT_DEBUG_REG5__SPARE1_MASK 0x07000000L
+#define VGT_DEBUG_REG5__SPARE1__SHIFT 0x00000018
+#define VGT_DEBUG_REG5__SPARE2_MASK 0x00070000L
+#define VGT_DEBUG_REG5__SPARE2__SHIFT 0x00000010
+#define VGT_DEBUG_REG5__SPARE3_MASK 0x00000700L
+#define VGT_DEBUG_REG5__SPARE3__SHIFT 0x00000008
+#define VGT_DEBUG_REG5__SPARE4_MASK 0x00000007L
+#define VGT_DEBUG_REG5__SPARE4__SHIFT 0x00000000
+#define VGT_DEBUG_REG6__debug_BASE_MASK 0x0000ffffL
+#define VGT_DEBUG_REG6__debug_BASE__SHIFT 0x00000000
+#define VGT_DEBUG_REG6__debug_SIZE_MASK 0xffff0000L
+#define VGT_DEBUG_REG6__debug_SIZE__SHIFT 0x00000010
+#define VGT_DEBUG_REG7__debug_tfmmFifoEmpty_MASK 0x00000001L
+#define VGT_DEBUG_REG7__debug_tfmmFifoEmpty__SHIFT 0x00000000
+#define VGT_DEBUG_REG7__debug_tfmmFifoFull_MASK 0x00000002L
+#define VGT_DEBUG_REG7__debug_tfmmFifoFull__SHIFT 0x00000001
+#define VGT_DEBUG_REG7__hs_pipe0_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG7__hs_pipe0_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG7__hs_pipe0_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG7__hs_pipe0_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG7__hs_pipe1_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG7__hs_pipe1_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG7__SPARE_MASK 0x0000ffe0L
+#define VGT_DEBUG_REG7__SPARE__SHIFT 0x00000005
+#define VGT_DEBUG_REG7__TF_addr_MASK 0xffff0000L
+#define VGT_DEBUG_REG7__TF_addr__SHIFT 0x00000010
+#define VGT_DEBUG_REG8__es_gs_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG8__es_gs_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG8__gs_event_fifo_empty_MASK 0x02000000L
+#define VGT_DEBUG_REG8__gs_event_fifo_empty__SHIFT 0x00000019
+#define VGT_DEBUG_REG8__gs_event_fifo_rtr_MASK 0x00008000L
+#define VGT_DEBUG_REG8__gs_event_fifo_rtr__SHIFT 0x0000000f
+#define VGT_DEBUG_REG8__gsprim_buff_empty_q_MASK 0x04000000L
+#define VGT_DEBUG_REG8__gsprim_buff_empty_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG8__gsprim_buff_full_q_MASK 0x08000000L
+#define VGT_DEBUG_REG8__gsprim_buff_full_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG8__gs_tbl_r3_rtr_MASK 0x00020000L
+#define VGT_DEBUG_REG8__gs_tbl_r3_rtr__SHIFT 0x00000011
+#define VGT_DEBUG_REG8__gs_tbl_valid_r3_q_MASK 0x00000020L
+#define VGT_DEBUG_REG8__gs_tbl_valid_r3_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG8__hold_for_es_flush_MASK 0x01000000L
+#define VGT_DEBUG_REG8__hold_for_es_flush__SHIFT 0x00000018
+#define VGT_DEBUG_REG8__prim_skid_fifo_empty_MASK 0x00040000L
+#define VGT_DEBUG_REG8__prim_skid_fifo_empty__SHIFT 0x00000012
+#define VGT_DEBUG_REG8__r0_rtr_MASK 0x00000400L
+#define VGT_DEBUG_REG8__r0_rtr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG8__r1_inst_rtr_MASK 0x00000004L
+#define VGT_DEBUG_REG8__r1_inst_rtr__SHIFT 0x00000002
+#define VGT_DEBUG_REG8__r1_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG8__r1_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG8__r2_indx_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG8__r2_indx_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG8__r2_no_bp_rtr_MASK 0x00800000L
+#define VGT_DEBUG_REG8__r2_no_bp_rtr__SHIFT 0x00000017
+#define VGT_DEBUG_REG8__r2_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG8__r2_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG8__rcm_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG8__rcm_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG8__rcm_noif_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG8__rcm_noif_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG8__spi_esvert_fifo_busy_q_MASK 0x00000010L
+#define VGT_DEBUG_REG8__spi_esvert_fifo_busy_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG8__spi_gsprim_fifo_busy_q_MASK 0x00000008L
+#define VGT_DEBUG_REG8__spi_gsprim_fifo_busy_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG8__te_prim_fifo_empty_MASK 0x10000000L
+#define VGT_DEBUG_REG8__te_prim_fifo_empty__SHIFT 0x0000001c
+#define VGT_DEBUG_REG8__te_prim_fifo_full_MASK 0x20000000L
+#define VGT_DEBUG_REG8__te_prim_fifo_full__SHIFT 0x0000001d
+#define VGT_DEBUG_REG8__te_vert_fifo_empty_MASK 0x40000000L
+#define VGT_DEBUG_REG8__te_vert_fifo_empty__SHIFT 0x0000001e
+#define VGT_DEBUG_REG8__te_vert_fifo_full_MASK 0x80000000L
+#define VGT_DEBUG_REG8__te_vert_fifo_full__SHIFT 0x0000001f
+#define VGT_DEBUG_REG8__tm_rcm_es_tbl_rtr_MASK 0x00200000L
+#define VGT_DEBUG_REG8__tm_rcm_es_tbl_rtr__SHIFT 0x00000015
+#define VGT_DEBUG_REG8__tm_rcm_gs_event_rtr_MASK 0x00010000L
+#define VGT_DEBUG_REG8__tm_rcm_gs_event_rtr__SHIFT 0x00000010
+#define VGT_DEBUG_REG8__tm_rcm_gs_tbl_rtr_MASK 0x00100000L
+#define VGT_DEBUG_REG8__tm_rcm_gs_tbl_rtr__SHIFT 0x00000014
+#define VGT_DEBUG_REG8__valid_r0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG8__valid_r0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG8__valid_r1_q_MASK 0x00000080L
+#define VGT_DEBUG_REG8__valid_r1_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG8__valid_r2_MASK 0x00000100L
+#define VGT_DEBUG_REG8__valid_r2_q_MASK 0x00000200L
+#define VGT_DEBUG_REG8__valid_r2_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG8__valid_r2__SHIFT 0x00000008
+#define VGT_DEBUG_REG8__VGT_SPI_esvert_rtr_q_MASK 0x00400000L
+#define VGT_DEBUG_REG8__VGT_SPI_esvert_rtr_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG8__VGT_SPI_gsprim_rtr_q_MASK 0x00080000L
+#define VGT_DEBUG_REG8__VGT_SPI_gsprim_rtr_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG9__eop_indx_r3_MASK 0x00000010L
+#define VGT_DEBUG_REG9__eop_indx_r3__SHIFT 0x00000004
+#define VGT_DEBUG_REG9__eop_prim_r3_MASK 0x00000020L
+#define VGT_DEBUG_REG9__eop_prim_r3__SHIFT 0x00000005
+#define VGT_DEBUG_REG9__es_eov_r3_MASK 0x00000040L
+#define VGT_DEBUG_REG9__es_eov_r3__SHIFT 0x00000006
+#define VGT_DEBUG_REG9__es_per_gs_vert_cnt_r3_q_not_0_MASK 0x02000000L
+#define VGT_DEBUG_REG9__es_per_gs_vert_cnt_r3_q_not_0__SHIFT 0x00000019
+#define VGT_DEBUG_REG9__es_tbl_state_r3_q_0_MASK 0x00000080L
+#define VGT_DEBUG_REG9__es_tbl_state_r3_q_0__SHIFT 0x00000007
+#define VGT_DEBUG_REG9__gs_eov_r3_MASK 0x00000008L
+#define VGT_DEBUG_REG9__gs_eov_r3__SHIFT 0x00000003
+#define VGT_DEBUG_REG9__gs_instancing_state_q_MASK 0x01000000L
+#define VGT_DEBUG_REG9__gs_instancing_state_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG9__gs_pending_state_r3_q_MASK 0x00400000L
+#define VGT_DEBUG_REG9__gs_pending_state_r3_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG9__gs_prim_per_es_ctr_r3_q_not_0_MASK 0x04000000L
+#define VGT_DEBUG_REG9__gs_prim_per_es_ctr_r3_q_not_0__SHIFT 0x0000001a
+#define VGT_DEBUG_REG9__gs_tbl_eop_r3_q_MASK 0x00040000L
+#define VGT_DEBUG_REG9__gs_tbl_eop_r3_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG9__gs_tbl_num_es_per_gs_r3_q_not_0_MASK 0x00000400L
+#define VGT_DEBUG_REG9__gs_tbl_num_es_per_gs_r3_q_not_0__SHIFT 0x0000000a
+#define VGT_DEBUG_REG9__gs_tbl_prim_cnt_r3_q_MASK 0x0003f800L
+#define VGT_DEBUG_REG9__gs_tbl_prim_cnt_r3_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG9__gs_tbl_state_r3_q_MASK 0x00380000L
+#define VGT_DEBUG_REG9__gs_tbl_state_r3_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG9__indices_to_send_r2_q_MASK 0x00000003L
+#define VGT_DEBUG_REG9__indices_to_send_r2_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG9__invalidate_rb_roll_over_q_MASK 0x00800000L
+#define VGT_DEBUG_REG9__invalidate_rb_roll_over_q__SHIFT 0x00000017
+#define VGT_DEBUG_REG9__off_chip_hs_r2_q_MASK 0x80000000L
+#define VGT_DEBUG_REG9__off_chip_hs_r2_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG9__pending_es_flush_r3_MASK 0x00000200L
+#define VGT_DEBUG_REG9__pending_es_flush_r3__SHIFT 0x00000009
+#define VGT_DEBUG_REG9__pending_es_send_r3_q_MASK 0x00000100L
+#define VGT_DEBUG_REG9__pending_es_send_r3_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG9__pre_r0_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG9__pre_r0_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG9__SPARE0_MASK 0x40000000L
+#define VGT_DEBUG_REG9__SPARE0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG9__valid_indices_r3_MASK 0x00000004L
+#define VGT_DEBUG_REG9__valid_indices_r3__SHIFT 0x00000002
+#define VGT_DEBUG_REG9__valid_pre_r0_q_MASK 0x20000000L
+#define VGT_DEBUG_REG9__valid_pre_r0_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG9__valid_r3_q_MASK 0x10000000L
+#define VGT_DEBUG_REG9__valid_r3_q__SHIFT 0x0000001c
+#define VGT_DMA_BASE__BASE_ADDR_MASK 0xffffffffL
+#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x00000000
+#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x000000ffL
+#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x00000000
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000001ffL
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x00000000
+#define VGT_DMA_INDEX_TYPE__ATC_MASK 0x00000100L
+#define VGT_DMA_INDEX_TYPE__ATC__SHIFT 0x00000008
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x00000004
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x00000000
+#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
+#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x00000009
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x000000c0L
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x00000006
+#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
+#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0x0000000a
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000cL
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x00000002
+#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xffffffffL
+#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x00000000
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xffffffffL
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x00000000
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003fL
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x00000000
+#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xffffffffL
+#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x00000000
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003fL
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x00000000
+#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000cL
+#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x00000002
+#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
+#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x00000005
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x00000000
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x00000004
+#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
+#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x00000006
+#define VGT_ENHANCE__MISC_MASK 0xffffffffL
+#define VGT_ENHANCE__MISC__SHIFT 0x00000000
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007fffL
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x00000000
+#define VGT_ESGS_RING_SIZE__MEM_SIZE_MASK 0xffffffffL
+#define VGT_ESGS_RING_SIZE__MEM_SIZE__SHIFT 0x00000000
+#define VGT_ES_PER_GS__ES_PER_GS_MASK 0x000007ffL
+#define VGT_ES_PER_GS__ES_PER_GS__SHIFT 0x00000000
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0fffffffL
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x00000000
+#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07fc0000L
+#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0x00000012
+#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003fL
+#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x00000000
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x0000001b
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH_MASK 0x003fff00L
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH__SHIFT 0x00000008
+#define VGT_FIFO_DEPTHS__RESERVED_0_MASK 0x00000080L
+#define VGT_FIFO_DEPTHS__RESERVED_0__SHIFT 0x00000007
+#define VGT_FIFO_DEPTHS__RESERVED_1_MASK 0xffc00000L
+#define VGT_FIFO_DEPTHS__RESERVED_1__SHIFT 0x00000016
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH_MASK 0x0000007fL
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH__SHIFT 0x00000000
+#define VGT_GROUP_DECR__DECR_MASK 0x0000000fL
+#define VGT_GROUP_DECR__DECR__SHIFT 0x00000000
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR_MASK 0x0000000fL
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR__SHIFT 0x00000000
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER_MASK 0x00070000L
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER__SHIFT 0x00000010
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE_MASK 0x0000001fL
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE__SHIFT 0x00000000
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER_MASK 0x00004000L
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER__SHIFT 0x0000000e
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS_MASK 0x00008000L
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS__SHIFT 0x0000000f
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN__SHIFT 0x00000003
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN__SHIFT 0x00000000
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN__SHIFT 0x00000001
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN__SHIFT 0x00000002
+#define VGT_GROUP_VECT_0_CNTL__SHIFT_MASK 0x00ff0000L
+#define VGT_GROUP_VECT_0_CNTL__SHIFT__SHIFT 0x00000010
+#define VGT_GROUP_VECT_0_CNTL__STRIDE_MASK 0x0000ff00L
+#define VGT_GROUP_VECT_0_CNTL__STRIDE__SHIFT 0x00000008
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV_MASK 0x0f000000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV__SHIFT 0x00000018
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET_MASK 0xf0000000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET__SHIFT 0x0000001c
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV_MASK 0x0000000fL
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV__SHIFT 0x00000000
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET_MASK 0x000000f0L
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET__SHIFT 0x00000004
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV_MASK 0x00000f00L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV__SHIFT 0x00000008
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET_MASK 0x0000f000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET__SHIFT 0x0000000c
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV_MASK 0x000f0000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV__SHIFT 0x00000010
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET_MASK 0x00f00000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET__SHIFT 0x00000014
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN__SHIFT 0x00000003
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN__SHIFT 0x00000000
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN__SHIFT 0x00000001
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN__SHIFT 0x00000002
+#define VGT_GROUP_VECT_1_CNTL__SHIFT_MASK 0x00ff0000L
+#define VGT_GROUP_VECT_1_CNTL__SHIFT__SHIFT 0x00000010
+#define VGT_GROUP_VECT_1_CNTL__STRIDE_MASK 0x0000ff00L
+#define VGT_GROUP_VECT_1_CNTL__STRIDE__SHIFT 0x00000008
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV_MASK 0x0f000000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV__SHIFT 0x00000018
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET_MASK 0xf0000000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET__SHIFT 0x0000001c
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV_MASK 0x0000000fL
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV__SHIFT 0x00000000
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET_MASK 0x000000f0L
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET__SHIFT 0x00000004
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV_MASK 0x00000f00L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV__SHIFT 0x00000008
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET_MASK 0x0000f000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET__SHIFT 0x0000000c
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV_MASK 0x000f0000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV__SHIFT 0x00000010
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET_MASK 0x00f00000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET__SHIFT 0x00000014
+#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001fcL
+#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x00000002
+#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
+#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x00000000
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007ffL
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x00000000
+#define VGT_GS_MODE__COMPUTE_MODE_MASK 0x00004000L
+#define VGT_GS_MODE__COMPUTE_MODE__SHIFT 0x0000000e
+#define VGT_GS_MODE__CUT_MODE_MASK 0x00000030L
+#define VGT_GS_MODE__CUT_MODE__SHIFT 0x00000004
+#define VGT_GS_MODE__ELEMENT_INFO_EN_MASK 0x00010000L
+#define VGT_GS_MODE__ELEMENT_INFO_EN__SHIFT 0x00000010
+#define VGT_GS_MODE__ES_PASSTHRU_MASK 0x00002000L
+#define VGT_GS_MODE__ES_PASSTHRU__SHIFT 0x0000000d
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE_MASK 0x00080000L
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE__SHIFT 0x00000013
+#define VGT_GS_MODE__FAST_COMPUTE_MODE_MASK 0x00008000L
+#define VGT_GS_MODE__FAST_COMPUTE_MODE__SHIFT 0x0000000f
+#define VGT_GS_MODE__GS_C_PACK_EN_MASK 0x00000800L
+#define VGT_GS_MODE__GS_C_PACK_EN__SHIFT 0x0000000b
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE_MASK 0x00100000L
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE__SHIFT 0x00000014
+#define VGT_GS_MODE__MODE_MASK 0x00000007L
+#define VGT_GS_MODE__MODE__SHIFT 0x00000000
+#define VGT_GS_MODE__ONCHIP_MASK 0x00600000L
+#define VGT_GS_MODE__ONCHIP__SHIFT 0x00000015
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI_MASK 0x00020000L
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI__SHIFT 0x00000011
+#define VGT_GS_MODE__RESERVED_0_MASK 0x00000008L
+#define VGT_GS_MODE__RESERVED_0__SHIFT 0x00000003
+#define VGT_GS_MODE__RESERVED_1_MASK 0x000007c0L
+#define VGT_GS_MODE__RESERVED_1__SHIFT 0x00000006
+#define VGT_GS_MODE__RESERVED_2_MASK 0x00001000L
+#define VGT_GS_MODE__RESERVED_2__SHIFT 0x0000000c
+#define VGT_GS_MODE__SUPPRESS_CUTS_MASK 0x00040000L
+#define VGT_GS_MODE__SUPPRESS_CUTS__SHIFT 0x00000012
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1_MASK 0x00003f00L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1__SHIFT 0x00000008
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2_MASK 0x003f0000L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2__SHIFT 0x00000010
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3_MASK 0x0fc00000L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3__SHIFT 0x00000016
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003fL
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x00000000
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM_MASK 0x80000000L
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM__SHIFT 0x0000001f
+#define VGT_GS_PER_ES__GS_PER_ES_MASK 0x000007ffL
+#define VGT_GS_PER_ES__GS_PER_ES__SHIFT 0x00000000
+#define VGT_GS_PER_VS__GS_PER_VS_MASK 0x0000000fL
+#define VGT_GS_PER_VS__GS_PER_VS__SHIFT 0x00000000
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE_MASK 0x0000001fL
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GSVS_RING_OFFSET_1__OFFSET_MASK 0x00007fffL
+#define VGT_GSVS_RING_OFFSET_1__OFFSET__SHIFT 0x00000000
+#define VGT_GSVS_RING_OFFSET_2__OFFSET_MASK 0x00007fffL
+#define VGT_GSVS_RING_OFFSET_2__OFFSET__SHIFT 0x00000000
+#define VGT_GSVS_RING_OFFSET_3__OFFSET_MASK 0x00007fffL
+#define VGT_GSVS_RING_OFFSET_3__OFFSET__SHIFT 0x00000000
+#define VGT_GSVS_RING_SIZE__MEM_SIZE_MASK 0xffffffffL
+#define VGT_GSVS_RING_SIZE__MEM_SIZE__SHIFT 0x00000000
+#define VGT_HOS_CNTL__TESS_MODE_MASK 0x00000003L
+#define VGT_HOS_CNTL__TESS_MODE__SHIFT 0x00000000
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xffffffffL
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x00000000
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xffffffffL
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x00000000
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH_MASK 0x000000ffL
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH__SHIFT 0x00000000
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x0000007fL
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x00000000
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000600L
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0x00000009
+#define VGT_IMMED_DATA__DATA_MASK 0xffffffffL
+#define VGT_IMMED_DATA__DATA__SHIFT 0x00000000
+#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x00000000
+#define VGT_INDX_OFFSET__INDX_OFFSET_MASK 0xffffffffL
+#define VGT_INDX_OFFSET__INDX_OFFSET__SHIFT 0x00000000
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xffffffffL
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x00000000
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE_MASK 0xffffffffL
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE__SHIFT 0x00000000
+#define VGT_LAST_COPY_STATE__DST_STATE_ID_MASK 0x00070000L
+#define VGT_LAST_COPY_STATE__DST_STATE_ID__SHIFT 0x00000010
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID__SHIFT 0x00000000
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003f00L
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x00000008
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000fc000L
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0x0000000e
+#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000ffL
+#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x00000000
+#define VGT_MAX_VTX_INDX__MAX_INDX_MASK 0xffffffffL
+#define VGT_MAX_VTX_INDX__MAX_INDX__SHIFT 0x00000000
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x00000003L
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x00000000
+#define VGT_MIN_VTX_INDX__MIN_INDX_MASK 0xffffffffL
+#define VGT_MIN_VTX_INDX__MIN_INDX__SHIFT 0x00000000
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x00000000
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xffffffffL
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x00000000
+#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xffffffffL
+#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x00000000
+#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xffffffffL
+#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x00000000
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST_MASK 0x0000007fL
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST__SHIFT 0x00000000
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT_MASK 0x00000007L
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK_MASK 0x000000ffL
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK__SHIFT 0x00000000
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x00000001
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x00000000
+#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xffffffffL
+#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x00000000
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003fL
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x00000000
+#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
+#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x00000000
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS_MASK 0x00000100L
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS__SHIFT 0x00000008
+#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
+#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x00000003
+#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
+#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x00000005
+#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
+#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x00000002
+#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
+#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x00000000
+#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000c0L
+#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x00000006
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN_MASK 0x0000000fL
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN_MASK 0x000000f0L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN__SHIFT 0x00000004
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN_MASK 0x00000f00L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN__SHIFT 0x00000008
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN_MASK 0x0000f000L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN__SHIFT 0x0000000c
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK 0x00000070L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK_MASK 0x00000f00L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK__SHIFT 0x00000008
+#define VGT_STRMOUT_CONFIG__RAST_STREAM__SHIFT 0x00000004
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN_MASK 0x00000001L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN__SHIFT 0x00000000
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN_MASK 0x00000002L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN__SHIFT 0x00000001
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN_MASK 0x00000004L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN__SHIFT 0x00000002
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN_MASK 0x00000008L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN__SHIFT 0x00000003
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK_MASK 0x80000000L
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK__SHIFT 0x0000001f
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001ffL
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE__SHIFT 0x00000000
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x00000007
+#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
+#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x00000000
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007eL
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x00000001
+#define VGT_TF_MEMORY_BASE__BASE_MASK 0xffffffffL
+#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x00000000
+#define VGT_TF_PARAM__DEPRECATED_MASK 0x00000200L
+#define VGT_TF_PARAM__DEPRECATED__SHIFT 0x00000009
+#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
+#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0x0000000e
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD_MASK 0x00003c00L
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD__SHIFT 0x0000000a
+#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001cL
+#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x00000002
+#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00018000L
+#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0x0000000f
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x00000008
+#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000e0L
+#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x00000005
+#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
+#define VGT_TF_PARAM__TYPE__SHIFT 0x00000000
+#define VGT_TF_RING_SIZE__SIZE_MASK 0x0000ffffL
+#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x00000000
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH_MASK 0x000000ffL
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH__SHIFT 0x00000000
+#define VGT_VTX_CNT_EN__VTX_CNT_EN_MASK 0x00000001L
+#define VGT_VTX_CNT_EN__VTX_CNT_EN__SHIFT 0x00000000
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT_MASK 0x000003ffL
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT__SHIFT 0x00000000
+#define WD_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define WD_DEBUG_DATA__DATA__SHIFT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h
new file mode 100644
index 000000000000..dc4e5b93801d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h
@@ -0,0 +1,1274 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GMC_6_0_D_H
+#define GMC_6_0_D_H
+
+#define ixMC_IO_DEBUG_ACMD_CLKSEL_D0 0x00CE
+#define ixMC_IO_DEBUG_ACMD_CLKSEL_D1 0x00DE
+#define ixMC_IO_DEBUG_ACMD_MISC_D0 0x00AE
+#define ixMC_IO_DEBUG_ACMD_MISC_D1 0x00BE
+#define ixMC_IO_DEBUG_ACMD_OFSCAL_D0 0x00EE
+#define ixMC_IO_DEBUG_ACMD_OFSCAL_D1 0x00FE
+#define ixMC_IO_DEBUG_ACMD_RXPHASE_D0 0x010E
+#define ixMC_IO_DEBUG_ACMD_RXPHASE_D1 0x011E
+#define ixMC_IO_DEBUG_ACMD_TXBST_PD_D0 0x018E
+#define ixMC_IO_DEBUG_ACMD_TXBST_PD_D1 0x019E
+#define ixMC_IO_DEBUG_ACMD_TXBST_PU_D0 0x01AE
+#define ixMC_IO_DEBUG_ACMD_TXBST_PU_D1 0x01BE
+#define ixMC_IO_DEBUG_ACMD_TXPHASE_D0 0x012E
+#define ixMC_IO_DEBUG_ACMD_TXPHASE_D1 0x013E
+#define ixMC_IO_DEBUG_ACMD_TXSLF_D0 0x016E
+#define ixMC_IO_DEBUG_ACMD_TXSLF_D1 0x017E
+#define ixMC_IO_DEBUG_ADDRH_CLKSEL_D0 0x00CD
+#define ixMC_IO_DEBUG_ADDRH_CLKSEL_D1 0x00DD
+#define ixMC_IO_DEBUG_ADDRH_MISC_D0 0x00AD
+#define ixMC_IO_DEBUG_ADDRH_MISC_D1 0x00BD
+#define ixMC_IO_DEBUG_ADDRH_RXPHASE_D0 0x010D
+#define ixMC_IO_DEBUG_ADDRH_RXPHASE_D1 0x011D
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PD_D0 0x018D
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PD_D1 0x019D
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PU_D0 0x01AD
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PU_D1 0x01BD
+#define ixMC_IO_DEBUG_ADDRH_TXPHASE_D0 0x012D
+#define ixMC_IO_DEBUG_ADDRH_TXPHASE_D1 0x013D
+#define ixMC_IO_DEBUG_ADDRH_TXSLF_D0 0x016D
+#define ixMC_IO_DEBUG_ADDRH_TXSLF_D1 0x017D
+#define ixMC_IO_DEBUG_ADDRL_CLKSEL_D0 0x00CC
+#define ixMC_IO_DEBUG_ADDRL_CLKSEL_D1 0x00DC
+#define ixMC_IO_DEBUG_ADDRL_MISC_D0 0x00AC
+#define ixMC_IO_DEBUG_ADDRL_MISC_D1 0x00BC
+#define ixMC_IO_DEBUG_ADDRL_RXPHASE_D0 0x010C
+#define ixMC_IO_DEBUG_ADDRL_RXPHASE_D1 0x011C
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PD_D0 0x018C
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PD_D1 0x019C
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PU_D0 0x01AC
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PU_D1 0x01BC
+#define ixMC_IO_DEBUG_ADDRL_TXPHASE_D0 0x012C
+#define ixMC_IO_DEBUG_ADDRL_TXPHASE_D1 0x013C
+#define ixMC_IO_DEBUG_ADDRL_TXSLF_D0 0x016C
+#define ixMC_IO_DEBUG_ADDRL_TXSLF_D1 0x017C
+#define ixMC_IO_DEBUG_CK_CLKSEL_D0 0x00CB
+#define ixMC_IO_DEBUG_CK_CLKSEL_D1 0x00DB
+#define ixMC_IO_DEBUG_CK_MISC_D0 0x00AB
+#define ixMC_IO_DEBUG_CK_MISC_D1 0x00BB
+#define ixMC_IO_DEBUG_CK_RXPHASE_D0 0x010B
+#define ixMC_IO_DEBUG_CK_RXPHASE_D1 0x011B
+#define ixMC_IO_DEBUG_CK_TXBST_PD_D0 0x018B
+#define ixMC_IO_DEBUG_CK_TXBST_PD_D1 0x019B
+#define ixMC_IO_DEBUG_CK_TXBST_PU_D0 0x01AB
+#define ixMC_IO_DEBUG_CK_TXBST_PU_D1 0x01BB
+#define ixMC_IO_DEBUG_CK_TXPHASE_D0 0x012B
+#define ixMC_IO_DEBUG_CK_TXPHASE_D1 0x013B
+#define ixMC_IO_DEBUG_CK_TXSLF_D0 0x016B
+#define ixMC_IO_DEBUG_CK_TXSLF_D1 0x017B
+#define ixMC_IO_DEBUG_CMD_CLKSEL_D0 0x00CF
+#define ixMC_IO_DEBUG_CMD_CLKSEL_D1 0x00DF
+#define ixMC_IO_DEBUG_CMD_MISC_D0 0x00AF
+#define ixMC_IO_DEBUG_CMD_MISC_D1 0x00BF
+#define ixMC_IO_DEBUG_CMD_OFSCAL_D0 0x00EF
+#define ixMC_IO_DEBUG_CMD_OFSCAL_D1 0x00FF
+#define ixMC_IO_DEBUG_CMD_RX_EQ_D0 0x01CF
+#define ixMC_IO_DEBUG_CMD_RX_EQ_D1 0x01DF
+#define ixMC_IO_DEBUG_CMD_RXPHASE_D0 0x010F
+#define ixMC_IO_DEBUG_CMD_RXPHASE_D1 0x011F
+#define ixMC_IO_DEBUG_CMD_TXBST_PD_D0 0x018F
+#define ixMC_IO_DEBUG_CMD_TXBST_PD_D1 0x019F
+#define ixMC_IO_DEBUG_CMD_TXBST_PU_D0 0x01AF
+#define ixMC_IO_DEBUG_CMD_TXBST_PU_D1 0x01BF
+#define ixMC_IO_DEBUG_CMD_TXPHASE_D0 0x012F
+#define ixMC_IO_DEBUG_CMD_TXPHASE_D1 0x013F
+#define ixMC_IO_DEBUG_CMD_TXSLF_D0 0x016F
+#define ixMC_IO_DEBUG_CMD_TXSLF_D1 0x017F
+#define ixMC_IO_DEBUG_DBI_CDR_PHSIZE_D0 0x014F
+#define ixMC_IO_DEBUG_DBI_CDR_PHSIZE_D1 0x015F
+#define ixMC_IO_DEBUG_DBI_CLKSEL_D0 0x00C8
+#define ixMC_IO_DEBUG_DBI_CLKSEL_D1 0x00D8
+#define ixMC_IO_DEBUG_DBI_MISC_D0 0x00A8
+#define ixMC_IO_DEBUG_DBI_MISC_D1 0x00B8
+#define ixMC_IO_DEBUG_DBI_OFSCAL_D0 0x00E8
+#define ixMC_IO_DEBUG_DBI_OFSCAL_D1 0x00F8
+#define ixMC_IO_DEBUG_DBI_RX_EQ_D0 0x01C8
+#define ixMC_IO_DEBUG_DBI_RX_EQ_D1 0x01D8
+#define ixMC_IO_DEBUG_DBI_RXPHASE_D0 0x0108
+#define ixMC_IO_DEBUG_DBI_RXPHASE_D1 0x0118
+#define ixMC_IO_DEBUG_DBI_RX_VREF_CAL_D0 0x0148
+#define ixMC_IO_DEBUG_DBI_RX_VREF_CAL_D1 0x0158
+#define ixMC_IO_DEBUG_DBI_TXBST_PD_D0 0x0188
+#define ixMC_IO_DEBUG_DBI_TXBST_PD_D1 0x0198
+#define ixMC_IO_DEBUG_DBI_TXBST_PU_D0 0x01A8
+#define ixMC_IO_DEBUG_DBI_TXBST_PU_D1 0x01B8
+#define ixMC_IO_DEBUG_DBI_TXPHASE_D0 0x0128
+#define ixMC_IO_DEBUG_DBI_TXPHASE_D1 0x0138
+#define ixMC_IO_DEBUG_DBI_TXSLF_D0 0x0168
+#define ixMC_IO_DEBUG_DBI_TXSLF_D1 0x0178
+#define ixMC_IO_DEBUG_DQ0_RX_DYN_PM_D0 0x01CD
+#define ixMC_IO_DEBUG_DQ0_RX_DYN_PM_D1 0x01DD
+#define ixMC_IO_DEBUG_DQ0_RX_EQ_PM_D0 0x01CB
+#define ixMC_IO_DEBUG_DQ0_RX_EQ_PM_D1 0x01DB
+#define ixMC_IO_DEBUG_DQ1_RX_DYN_PM_D0 0x01CE
+#define ixMC_IO_DEBUG_DQ1_RX_DYN_PM_D1 0x01DE
+#define ixMC_IO_DEBUG_DQ1_RX_EQ_PM_D0 0x01CC
+#define ixMC_IO_DEBUG_DQ1_RX_EQ_PM_D1 0x01DC
+#define ixMC_IO_DEBUG_DQB0_CDR_PHSIZE_D0 0x014B
+#define ixMC_IO_DEBUG_DQB0_CDR_PHSIZE_D1 0x015B
+#define ixMC_IO_DEBUG_DQB0H_CLKSEL_D0 0x00C1
+#define ixMC_IO_DEBUG_DQB0H_CLKSEL_D1 0x00D1
+#define ixMC_IO_DEBUG_DQB0H_MISC_D0 0x00A1
+#define ixMC_IO_DEBUG_DQB0H_MISC_D1 0x00B1
+#define ixMC_IO_DEBUG_DQB0H_OFSCAL_D0 0x00E1
+#define ixMC_IO_DEBUG_DQB0H_OFSCAL_D1 0x00F1
+#define ixMC_IO_DEBUG_DQB0H_RX_EQ_D0 0x01C1
+#define ixMC_IO_DEBUG_DQB0H_RX_EQ_D1 0x01D1
+#define ixMC_IO_DEBUG_DQB0H_RXPHASE_D0 0x0101
+#define ixMC_IO_DEBUG_DQB0H_RXPHASE_D1 0x0111
+#define ixMC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0 0x0141
+#define ixMC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1 0x0151
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PD_D0 0x0181
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PD_D1 0x0191
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PU_D0 0x01A1
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PU_D1 0x01B1
+#define ixMC_IO_DEBUG_DQB0H_TXPHASE_D0 0x0121
+#define ixMC_IO_DEBUG_DQB0H_TXPHASE_D1 0x0131
+#define ixMC_IO_DEBUG_DQB0H_TXSLF_D0 0x0161
+#define ixMC_IO_DEBUG_DQB0H_TXSLF_D1 0x0171
+#define ixMC_IO_DEBUG_DQB0L_CLKSEL_D0 0x00C0
+#define ixMC_IO_DEBUG_DQB0L_CLKSEL_D1 0x00D0
+#define ixMC_IO_DEBUG_DQB0L_MISC_D0 0x00A0
+#define ixMC_IO_DEBUG_DQB0L_MISC_D1 0x00B0
+#define ixMC_IO_DEBUG_DQB0L_OFSCAL_D0 0x00E0
+#define ixMC_IO_DEBUG_DQB0L_OFSCAL_D1 0x00F0
+#define ixMC_IO_DEBUG_DQB0L_RX_EQ_D0 0x01C0
+#define ixMC_IO_DEBUG_DQB0L_RX_EQ_D1 0x01D0
+#define ixMC_IO_DEBUG_DQB0L_RXPHASE_D0 0x0100
+#define ixMC_IO_DEBUG_DQB0L_RXPHASE_D1 0x0110
+#define ixMC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0 0x0140
+#define ixMC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1 0x0150
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PD_D0 0x0180
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PD_D1 0x0190
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PU_D0 0x01A0
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PU_D1 0x01B0
+#define ixMC_IO_DEBUG_DQB0L_TXPHASE_D0 0x0120
+#define ixMC_IO_DEBUG_DQB0L_TXPHASE_D1 0x0130
+#define ixMC_IO_DEBUG_DQB0L_TXSLF_D0 0x0160
+#define ixMC_IO_DEBUG_DQB0L_TXSLF_D1 0x0170
+#define ixMC_IO_DEBUG_DQB1_CDR_PHSIZE_D0 0x014C
+#define ixMC_IO_DEBUG_DQB1_CDR_PHSIZE_D1 0x015C
+#define ixMC_IO_DEBUG_DQB1H_CLKSEL_D0 0x00C3
+#define ixMC_IO_DEBUG_DQB1H_CLKSEL_D1 0x00D3
+#define ixMC_IO_DEBUG_DQB1H_MISC_D0 0x00A3
+#define ixMC_IO_DEBUG_DQB1H_MISC_D1 0x00B3
+#define ixMC_IO_DEBUG_DQB1H_OFSCAL_D0 0x00E3
+#define ixMC_IO_DEBUG_DQB1H_OFSCAL_D1 0x00F3
+#define ixMC_IO_DEBUG_DQB1H_RX_EQ_D0 0x01C3
+#define ixMC_IO_DEBUG_DQB1H_RX_EQ_D1 0x01D3
+#define ixMC_IO_DEBUG_DQB1H_RXPHASE_D0 0x0103
+#define ixMC_IO_DEBUG_DQB1H_RXPHASE_D1 0x0113
+#define ixMC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0 0x0143
+#define ixMC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1 0x0153
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PD_D0 0x0183
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PD_D1 0x0193
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PU_D0 0x01A3
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PU_D1 0x01B3
+#define ixMC_IO_DEBUG_DQB1H_TXPHASE_D0 0x0123
+#define ixMC_IO_DEBUG_DQB1H_TXPHASE_D1 0x0133
+#define ixMC_IO_DEBUG_DQB1H_TXSLF_D0 0x0163
+#define ixMC_IO_DEBUG_DQB1H_TXSLF_D1 0x0173
+#define ixMC_IO_DEBUG_DQB1L_CLKSEL_D0 0x00C2
+#define ixMC_IO_DEBUG_DQB1L_CLKSEL_D1 0x00D2
+#define ixMC_IO_DEBUG_DQB1L_MISC_D0 0x00A2
+#define ixMC_IO_DEBUG_DQB1L_MISC_D1 0x00B2
+#define ixMC_IO_DEBUG_DQB1L_OFSCAL_D0 0x00E2
+#define ixMC_IO_DEBUG_DQB1L_OFSCAL_D1 0x00F2
+#define ixMC_IO_DEBUG_DQB1L_RX_EQ_D0 0x01C2
+#define ixMC_IO_DEBUG_DQB1L_RX_EQ_D1 0x01D2
+#define ixMC_IO_DEBUG_DQB1L_RXPHASE_D0 0x0102
+#define ixMC_IO_DEBUG_DQB1L_RXPHASE_D1 0x0112
+#define ixMC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0 0x0142
+#define ixMC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1 0x0152
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PD_D0 0x0182
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PD_D1 0x0192
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PU_D0 0x01A2
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PU_D1 0x01B2
+#define ixMC_IO_DEBUG_DQB1L_TXPHASE_D0 0x0122
+#define ixMC_IO_DEBUG_DQB1L_TXPHASE_D1 0x0132
+#define ixMC_IO_DEBUG_DQB1L_TXSLF_D0 0x0162
+#define ixMC_IO_DEBUG_DQB1L_TXSLF_D1 0x0172
+#define ixMC_IO_DEBUG_DQB2_CDR_PHSIZE_D0 0x014D
+#define ixMC_IO_DEBUG_DQB2_CDR_PHSIZE_D1 0x015D
+#define ixMC_IO_DEBUG_DQB2H_CLKSEL_D0 0x00C5
+#define ixMC_IO_DEBUG_DQB2H_CLKSEL_D1 0x00D5
+#define ixMC_IO_DEBUG_DQB2H_MISC_D0 0x00A5
+#define ixMC_IO_DEBUG_DQB2H_MISC_D1 0x00B5
+#define ixMC_IO_DEBUG_DQB2H_OFSCAL_D0 0x00E5
+#define ixMC_IO_DEBUG_DQB2H_OFSCAL_D1 0x00F5
+#define ixMC_IO_DEBUG_DQB2H_RX_EQ_D0 0x01C5
+#define ixMC_IO_DEBUG_DQB2H_RX_EQ_D1 0x01D5
+#define ixMC_IO_DEBUG_DQB2H_RXPHASE_D0 0x0105
+#define ixMC_IO_DEBUG_DQB2H_RXPHASE_D1 0x0115
+#define ixMC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0 0x0145
+#define ixMC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1 0x0155
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PD_D0 0x0185
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PD_D1 0x0195
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PU_D0 0x01A5
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PU_D1 0x01B5
+#define ixMC_IO_DEBUG_DQB2H_TXPHASE_D0 0x0125
+#define ixMC_IO_DEBUG_DQB2H_TXPHASE_D1 0x0135
+#define ixMC_IO_DEBUG_DQB2H_TXSLF_D0 0x0165
+#define ixMC_IO_DEBUG_DQB2H_TXSLF_D1 0x0175
+#define ixMC_IO_DEBUG_DQB2L_CLKSEL_D0 0x00C4
+#define ixMC_IO_DEBUG_DQB2L_CLKSEL_D1 0x00D4
+#define ixMC_IO_DEBUG_DQB2L_MISC_D0 0x00A4
+#define ixMC_IO_DEBUG_DQB2L_MISC_D1 0x00B4
+#define ixMC_IO_DEBUG_DQB2L_OFSCAL_D0 0x00E4
+#define ixMC_IO_DEBUG_DQB2L_OFSCAL_D1 0x00F4
+#define ixMC_IO_DEBUG_DQB2L_RX_EQ_D0 0x01C4
+#define ixMC_IO_DEBUG_DQB2L_RX_EQ_D1 0x01D4
+#define ixMC_IO_DEBUG_DQB2L_RXPHASE_D0 0x0104
+#define ixMC_IO_DEBUG_DQB2L_RXPHASE_D1 0x0114
+#define ixMC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0 0x0144
+#define ixMC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1 0x0154
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PD_D0 0x0184
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PD_D1 0x0194
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PU_D0 0x01A4
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PU_D1 0x01B4
+#define ixMC_IO_DEBUG_DQB2L_TXPHASE_D0 0x0124
+#define ixMC_IO_DEBUG_DQB2L_TXPHASE_D1 0x0134
+#define ixMC_IO_DEBUG_DQB2L_TXSLF_D0 0x0164
+#define ixMC_IO_DEBUG_DQB2L_TXSLF_D1 0x0174
+#define ixMC_IO_DEBUG_DQB3_CDR_PHSIZE_D0 0x014E
+#define ixMC_IO_DEBUG_DQB3_CDR_PHSIZE_D1 0x015E
+#define ixMC_IO_DEBUG_DQB3H_CLKSEL_D0 0x00C7
+#define ixMC_IO_DEBUG_DQB3H_CLKSEL_D1 0x00D7
+#define ixMC_IO_DEBUG_DQB3H_MISC_D0 0x00A7
+#define ixMC_IO_DEBUG_DQB3H_MISC_D1 0x00B7
+#define ixMC_IO_DEBUG_DQB3H_OFSCAL_D0 0x00E7
+#define ixMC_IO_DEBUG_DQB3H_OFSCAL_D1 0x00F7
+#define ixMC_IO_DEBUG_DQB3H_RX_EQ_D0 0x01C7
+#define ixMC_IO_DEBUG_DQB3H_RX_EQ_D1 0x01D7
+#define ixMC_IO_DEBUG_DQB3H_RXPHASE_D0 0x0107
+#define ixMC_IO_DEBUG_DQB3H_RXPHASE_D1 0x0117
+#define ixMC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0 0x0147
+#define ixMC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1 0x0157
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PD_D0 0x0187
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PD_D1 0x0197
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PU_D0 0x01A7
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PU_D1 0x01B7
+#define ixMC_IO_DEBUG_DQB3H_TXPHASE_D0 0x0127
+#define ixMC_IO_DEBUG_DQB3H_TXPHASE_D1 0x0137
+#define ixMC_IO_DEBUG_DQB3H_TXSLF_D0 0x0167
+#define ixMC_IO_DEBUG_DQB3H_TXSLF_D1 0x0177
+#define ixMC_IO_DEBUG_DQB3L_CLKSEL_D0 0x00C6
+#define ixMC_IO_DEBUG_DQB3L_CLKSEL_D1 0x00D6
+#define ixMC_IO_DEBUG_DQB3L_MISC_D0 0x00A6
+#define ixMC_IO_DEBUG_DQB3L_MISC_D1 0x00B6
+#define ixMC_IO_DEBUG_DQB3L_OFSCAL_D0 0x00E6
+#define ixMC_IO_DEBUG_DQB3L_OFSCAL_D1 0x00F6
+#define ixMC_IO_DEBUG_DQB3L_RX_EQ_D0 0x01C6
+#define ixMC_IO_DEBUG_DQB3L_RX_EQ_D1 0x01D6
+#define ixMC_IO_DEBUG_DQB3L_RXPHASE_D0 0x0106
+#define ixMC_IO_DEBUG_DQB3L_RXPHASE_D1 0x0116
+#define ixMC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0 0x0146
+#define ixMC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1 0x0156
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PD_D0 0x0186
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PD_D1 0x0196
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PU_D0 0x01A6
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PU_D1 0x01B6
+#define ixMC_IO_DEBUG_DQB3L_TXPHASE_D0 0x0126
+#define ixMC_IO_DEBUG_DQB3L_TXPHASE_D1 0x0136
+#define ixMC_IO_DEBUG_DQB3L_TXSLF_D0 0x0166
+#define ixMC_IO_DEBUG_DQB3L_TXSLF_D1 0x0176
+#define ixMC_IO_DEBUG_EDC_CDR_PHSIZE_D0 0x00ED
+#define ixMC_IO_DEBUG_EDC_CDR_PHSIZE_D1 0x00FD
+#define ixMC_IO_DEBUG_EDC_CLKSEL_D0 0x00C9
+#define ixMC_IO_DEBUG_EDC_CLKSEL_D1 0x00D9
+#define ixMC_IO_DEBUG_EDC_MISC_D0 0x00A9
+#define ixMC_IO_DEBUG_EDC_MISC_D1 0x00B9
+#define ixMC_IO_DEBUG_EDC_OFSCAL_D0 0x00E9
+#define ixMC_IO_DEBUG_EDC_OFSCAL_D1 0x00F9
+#define ixMC_IO_DEBUG_EDC_RX_DYN_PM_D0 0x00EC
+#define ixMC_IO_DEBUG_EDC_RX_DYN_PM_D1 0x00FC
+#define ixMC_IO_DEBUG_EDC_RX_EQ_D0 0x01C9
+#define ixMC_IO_DEBUG_EDC_RX_EQ_D1 0x01D9
+#define ixMC_IO_DEBUG_EDC_RX_EQ_PM_D0 0x00EB
+#define ixMC_IO_DEBUG_EDC_RX_EQ_PM_D1 0x00FB
+#define ixMC_IO_DEBUG_EDC_RXPHASE_D0 0x0109
+#define ixMC_IO_DEBUG_EDC_RXPHASE_D1 0x0119
+#define ixMC_IO_DEBUG_EDC_RX_VREF_CAL_D0 0x0149
+#define ixMC_IO_DEBUG_EDC_RX_VREF_CAL_D1 0x0159
+#define ixMC_IO_DEBUG_EDC_TXBST_PD_D0 0x0189
+#define ixMC_IO_DEBUG_EDC_TXBST_PD_D1 0x0199
+#define ixMC_IO_DEBUG_EDC_TXBST_PU_D0 0x01A9
+#define ixMC_IO_DEBUG_EDC_TXBST_PU_D1 0x01B9
+#define ixMC_IO_DEBUG_EDC_TXPHASE_D0 0x0129
+#define ixMC_IO_DEBUG_EDC_TXPHASE_D1 0x0139
+#define ixMC_IO_DEBUG_EDC_TXSLF_D0 0x0169
+#define ixMC_IO_DEBUG_EDC_TXSLF_D1 0x0179
+#define ixMC_IO_DEBUG_UP_0 0x0000
+#define ixMC_IO_DEBUG_UP_100 0x0064
+#define ixMC_IO_DEBUG_UP_10 0x000A
+#define ixMC_IO_DEBUG_UP_101 0x0065
+#define ixMC_IO_DEBUG_UP_102 0x0066
+#define ixMC_IO_DEBUG_UP_103 0x0067
+#define ixMC_IO_DEBUG_UP_104 0x0068
+#define ixMC_IO_DEBUG_UP_105 0x0069
+#define ixMC_IO_DEBUG_UP_106 0x006A
+#define ixMC_IO_DEBUG_UP_107 0x006B
+#define ixMC_IO_DEBUG_UP_108 0x006C
+#define ixMC_IO_DEBUG_UP_109 0x006D
+#define ixMC_IO_DEBUG_UP_1 0x0001
+#define ixMC_IO_DEBUG_UP_110 0x006E
+#define ixMC_IO_DEBUG_UP_11 0x000B
+#define ixMC_IO_DEBUG_UP_111 0x006F
+#define ixMC_IO_DEBUG_UP_112 0x0070
+#define ixMC_IO_DEBUG_UP_113 0x0071
+#define ixMC_IO_DEBUG_UP_114 0x0072
+#define ixMC_IO_DEBUG_UP_115 0x0073
+#define ixMC_IO_DEBUG_UP_116 0x0074
+#define ixMC_IO_DEBUG_UP_117 0x0075
+#define ixMC_IO_DEBUG_UP_118 0x0076
+#define ixMC_IO_DEBUG_UP_119 0x0077
+#define ixMC_IO_DEBUG_UP_120 0x0078
+#define ixMC_IO_DEBUG_UP_12 0x000C
+#define ixMC_IO_DEBUG_UP_121 0x0079
+#define ixMC_IO_DEBUG_UP_122 0x007A
+#define ixMC_IO_DEBUG_UP_123 0x007B
+#define ixMC_IO_DEBUG_UP_124 0x007C
+#define ixMC_IO_DEBUG_UP_125 0x007D
+#define ixMC_IO_DEBUG_UP_126 0x007E
+#define ixMC_IO_DEBUG_UP_127 0x007F
+#define ixMC_IO_DEBUG_UP_128 0x0080
+#define ixMC_IO_DEBUG_UP_129 0x0081
+#define ixMC_IO_DEBUG_UP_130 0x0082
+#define ixMC_IO_DEBUG_UP_13 0x000D
+#define ixMC_IO_DEBUG_UP_131 0x0083
+#define ixMC_IO_DEBUG_UP_132 0x0084
+#define ixMC_IO_DEBUG_UP_133 0x0085
+#define ixMC_IO_DEBUG_UP_134 0x0086
+#define ixMC_IO_DEBUG_UP_135 0x0087
+#define ixMC_IO_DEBUG_UP_136 0x0088
+#define ixMC_IO_DEBUG_UP_137 0x0089
+#define ixMC_IO_DEBUG_UP_138 0x008A
+#define ixMC_IO_DEBUG_UP_139 0x008B
+#define ixMC_IO_DEBUG_UP_140 0x008C
+#define ixMC_IO_DEBUG_UP_14 0x000E
+#define ixMC_IO_DEBUG_UP_141 0x008D
+#define ixMC_IO_DEBUG_UP_142 0x008E
+#define ixMC_IO_DEBUG_UP_143 0x008F
+#define ixMC_IO_DEBUG_UP_144 0x0090
+#define ixMC_IO_DEBUG_UP_145 0x0091
+#define ixMC_IO_DEBUG_UP_146 0x0092
+#define ixMC_IO_DEBUG_UP_147 0x0093
+#define ixMC_IO_DEBUG_UP_148 0x0094
+#define ixMC_IO_DEBUG_UP_149 0x0095
+#define ixMC_IO_DEBUG_UP_150 0x0096
+#define ixMC_IO_DEBUG_UP_15 0x000F
+#define ixMC_IO_DEBUG_UP_151 0x0097
+#define ixMC_IO_DEBUG_UP_152 0x0098
+#define ixMC_IO_DEBUG_UP_153 0x0099
+#define ixMC_IO_DEBUG_UP_154 0x009A
+#define ixMC_IO_DEBUG_UP_155 0x009B
+#define ixMC_IO_DEBUG_UP_156 0x009C
+#define ixMC_IO_DEBUG_UP_157 0x009D
+#define ixMC_IO_DEBUG_UP_158 0x009E
+#define ixMC_IO_DEBUG_UP_159 0x009F
+#define ixMC_IO_DEBUG_UP_16 0x0010
+#define ixMC_IO_DEBUG_UP_17 0x0011
+#define ixMC_IO_DEBUG_UP_18 0x0012
+#define ixMC_IO_DEBUG_UP_19 0x0013
+#define ixMC_IO_DEBUG_UP_20 0x0014
+#define ixMC_IO_DEBUG_UP_2 0x0002
+#define ixMC_IO_DEBUG_UP_21 0x0015
+#define ixMC_IO_DEBUG_UP_22 0x0016
+#define ixMC_IO_DEBUG_UP_23 0x0017
+#define ixMC_IO_DEBUG_UP_24 0x0018
+#define ixMC_IO_DEBUG_UP_25 0x0019
+#define ixMC_IO_DEBUG_UP_26 0x001A
+#define ixMC_IO_DEBUG_UP_27 0x001B
+#define ixMC_IO_DEBUG_UP_28 0x001C
+#define ixMC_IO_DEBUG_UP_29 0x001D
+#define ixMC_IO_DEBUG_UP_30 0x001E
+#define ixMC_IO_DEBUG_UP_3 0x0003
+#define ixMC_IO_DEBUG_UP_31 0x001F
+#define ixMC_IO_DEBUG_UP_32 0x0020
+#define ixMC_IO_DEBUG_UP_33 0x0021
+#define ixMC_IO_DEBUG_UP_34 0x0022
+#define ixMC_IO_DEBUG_UP_35 0x0023
+#define ixMC_IO_DEBUG_UP_36 0x0024
+#define ixMC_IO_DEBUG_UP_37 0x0025
+#define ixMC_IO_DEBUG_UP_38 0x0026
+#define ixMC_IO_DEBUG_UP_39 0x0027
+#define ixMC_IO_DEBUG_UP_40 0x0028
+#define ixMC_IO_DEBUG_UP_4 0x0004
+#define ixMC_IO_DEBUG_UP_41 0x0029
+#define ixMC_IO_DEBUG_UP_42 0x002A
+#define ixMC_IO_DEBUG_UP_43 0x002B
+#define ixMC_IO_DEBUG_UP_44 0x002C
+#define ixMC_IO_DEBUG_UP_45 0x002D
+#define ixMC_IO_DEBUG_UP_46 0x002E
+#define ixMC_IO_DEBUG_UP_47 0x002F
+#define ixMC_IO_DEBUG_UP_48 0x0030
+#define ixMC_IO_DEBUG_UP_49 0x0031
+#define ixMC_IO_DEBUG_UP_50 0x0032
+#define ixMC_IO_DEBUG_UP_5 0x0005
+#define ixMC_IO_DEBUG_UP_51 0x0033
+#define ixMC_IO_DEBUG_UP_52 0x0034
+#define ixMC_IO_DEBUG_UP_53 0x0035
+#define ixMC_IO_DEBUG_UP_54 0x0036
+#define ixMC_IO_DEBUG_UP_55 0x0037
+#define ixMC_IO_DEBUG_UP_56 0x0038
+#define ixMC_IO_DEBUG_UP_57 0x0039
+#define ixMC_IO_DEBUG_UP_58 0x003A
+#define ixMC_IO_DEBUG_UP_59 0x003B
+#define ixMC_IO_DEBUG_UP_60 0x003C
+#define ixMC_IO_DEBUG_UP_6 0x0006
+#define ixMC_IO_DEBUG_UP_61 0x003D
+#define ixMC_IO_DEBUG_UP_62 0x003E
+#define ixMC_IO_DEBUG_UP_63 0x003F
+#define ixMC_IO_DEBUG_UP_64 0x0040
+#define ixMC_IO_DEBUG_UP_65 0x0041
+#define ixMC_IO_DEBUG_UP_66 0x0042
+#define ixMC_IO_DEBUG_UP_67 0x0043
+#define ixMC_IO_DEBUG_UP_68 0x0044
+#define ixMC_IO_DEBUG_UP_69 0x0045
+#define ixMC_IO_DEBUG_UP_70 0x0046
+#define ixMC_IO_DEBUG_UP_7 0x0007
+#define ixMC_IO_DEBUG_UP_71 0x0047
+#define ixMC_IO_DEBUG_UP_72 0x0048
+#define ixMC_IO_DEBUG_UP_73 0x0049
+#define ixMC_IO_DEBUG_UP_74 0x004A
+#define ixMC_IO_DEBUG_UP_75 0x004B
+#define ixMC_IO_DEBUG_UP_76 0x004C
+#define ixMC_IO_DEBUG_UP_77 0x004D
+#define ixMC_IO_DEBUG_UP_78 0x004E
+#define ixMC_IO_DEBUG_UP_79 0x004F
+#define ixMC_IO_DEBUG_UP_80 0x0050
+#define ixMC_IO_DEBUG_UP_8 0x0008
+#define ixMC_IO_DEBUG_UP_81 0x0051
+#define ixMC_IO_DEBUG_UP_82 0x0052
+#define ixMC_IO_DEBUG_UP_83 0x0053
+#define ixMC_IO_DEBUG_UP_84 0x0054
+#define ixMC_IO_DEBUG_UP_85 0x0055
+#define ixMC_IO_DEBUG_UP_86 0x0056
+#define ixMC_IO_DEBUG_UP_87 0x0057
+#define ixMC_IO_DEBUG_UP_88 0x0058
+#define ixMC_IO_DEBUG_UP_89 0x0059
+#define ixMC_IO_DEBUG_UP_90 0x005A
+#define ixMC_IO_DEBUG_UP_9 0x0009
+#define ixMC_IO_DEBUG_UP_91 0x005B
+#define ixMC_IO_DEBUG_UP_92 0x005C
+#define ixMC_IO_DEBUG_UP_93 0x005D
+#define ixMC_IO_DEBUG_UP_94 0x005E
+#define ixMC_IO_DEBUG_UP_95 0x005F
+#define ixMC_IO_DEBUG_UP_96 0x0060
+#define ixMC_IO_DEBUG_UP_97 0x0061
+#define ixMC_IO_DEBUG_UP_98 0x0062
+#define ixMC_IO_DEBUG_UP_99 0x0063
+#define ixMC_IO_DEBUG_WCDR_CDR_PHSIZE_D0 0x01EA
+#define ixMC_IO_DEBUG_WCDR_CDR_PHSIZE_D1 0x01FA
+#define ixMC_IO_DEBUG_WCDR_CLKSEL_D0 0x01E1
+#define ixMC_IO_DEBUG_WCDR_CLKSEL_D1 0x01F1
+#define ixMC_IO_DEBUG_WCDR_MISC_D0 0x01E0
+#define ixMC_IO_DEBUG_WCDR_MISC_D1 0x01F0
+#define ixMC_IO_DEBUG_WCDR_OFSCAL_D0 0x01E2
+#define ixMC_IO_DEBUG_WCDR_OFSCAL_D1 0x01F2
+#define ixMC_IO_DEBUG_WCDR_RX_DYN_PM_D0 0x01EC
+#define ixMC_IO_DEBUG_WCDR_RX_DYN_PM_D1 0x01FC
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_D0 0x01E9
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_D1 0x01F9
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_PM_D0 0x01EB
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_PM_D1 0x01FB
+#define ixMC_IO_DEBUG_WCDR_RXPHASE_D0 0x01E3
+#define ixMC_IO_DEBUG_WCDR_RXPHASE_D1 0x01F3
+#define ixMC_IO_DEBUG_WCDR_RX_VREF_CAL_D0 0x01E5
+#define ixMC_IO_DEBUG_WCDR_RX_VREF_CAL_D1 0x01F5
+#define ixMC_IO_DEBUG_WCDR_TXBST_PD_D0 0x01E7
+#define ixMC_IO_DEBUG_WCDR_TXBST_PD_D1 0x01F7
+#define ixMC_IO_DEBUG_WCDR_TXBST_PU_D0 0x01E8
+#define ixMC_IO_DEBUG_WCDR_TXBST_PU_D1 0x01F8
+#define ixMC_IO_DEBUG_WCDR_TXPHASE_D0 0x01E4
+#define ixMC_IO_DEBUG_WCDR_TXPHASE_D1 0x01F4
+#define ixMC_IO_DEBUG_WCDR_TXSLF_D0 0x01E6
+#define ixMC_IO_DEBUG_WCDR_TXSLF_D1 0x01F6
+#define ixMC_IO_DEBUG_WCK_CLKSEL_D0 0x00CA
+#define ixMC_IO_DEBUG_WCK_CLKSEL_D1 0x00DA
+#define ixMC_IO_DEBUG_WCK_MISC_D0 0x00AA
+#define ixMC_IO_DEBUG_WCK_MISC_D1 0x00BA
+#define ixMC_IO_DEBUG_WCK_OFSCAL_D0 0x00EA
+#define ixMC_IO_DEBUG_WCK_OFSCAL_D1 0x00FA
+#define ixMC_IO_DEBUG_WCK_RX_EQ_D0 0x01CA
+#define ixMC_IO_DEBUG_WCK_RX_EQ_D1 0x01DA
+#define ixMC_IO_DEBUG_WCK_RXPHASE_D0 0x010A
+#define ixMC_IO_DEBUG_WCK_RXPHASE_D1 0x011A
+#define ixMC_IO_DEBUG_WCK_RX_VREF_CAL_D0 0x014A
+#define ixMC_IO_DEBUG_WCK_RX_VREF_CAL_D1 0x015A
+#define ixMC_IO_DEBUG_WCK_TXBST_PD_D0 0x018A
+#define ixMC_IO_DEBUG_WCK_TXBST_PD_D1 0x019A
+#define ixMC_IO_DEBUG_WCK_TXBST_PU_D0 0x01AA
+#define ixMC_IO_DEBUG_WCK_TXBST_PU_D1 0x01BA
+#define ixMC_IO_DEBUG_WCK_TXPHASE_D0 0x012A
+#define ixMC_IO_DEBUG_WCK_TXPHASE_D1 0x013A
+#define ixMC_IO_DEBUG_WCK_TXSLF_D0 0x016A
+#define ixMC_IO_DEBUG_WCK_TXSLF_D1 0x017A
+#define ixMC_TSM_DEBUG_BCNT0 0x0003
+#define ixMC_TSM_DEBUG_BCNT10 0x000D
+#define ixMC_TSM_DEBUG_BCNT1 0x0004
+#define ixMC_TSM_DEBUG_BCNT2 0x0005
+#define ixMC_TSM_DEBUG_BCNT3 0x0006
+#define ixMC_TSM_DEBUG_BCNT4 0x0007
+#define ixMC_TSM_DEBUG_BCNT5 0x0008
+#define ixMC_TSM_DEBUG_BCNT6 0x0009
+#define ixMC_TSM_DEBUG_BCNT7 0x000A
+#define ixMC_TSM_DEBUG_BCNT8 0x000B
+#define ixMC_TSM_DEBUG_BCNT9 0x000C
+#define ixMC_TSM_DEBUG_BKPT 0x0013
+#define ixMC_TSM_DEBUG_FLAG 0x0001
+#define ixMC_TSM_DEBUG_GCNT 0x0000
+#define ixMC_TSM_DEBUG_MISC 0x0002
+#define ixMC_TSM_DEBUG_ST01 0x0010
+#define ixMC_TSM_DEBUG_ST23 0x0011
+#define ixMC_TSM_DEBUG_ST45 0x0012
+#define mmATC_ATS_CNTL 0x0CC9
+#define mmATC_ATS_DEBUG 0x0CCA
+#define mmATC_ATS_DEFAULT_PAGE_CNTL 0x0CD1
+#define mmATC_ATS_DEFAULT_PAGE_LOW 0x0CD0
+#define mmATC_ATS_FAULT_CNTL 0x0CCD
+#define mmATC_ATS_FAULT_DEBUG 0x0CCB
+#define mmATC_ATS_FAULT_STATUS_ADDR 0x0CCF
+#define mmATC_ATS_FAULT_STATUS_INFO 0x0CCE
+#define mmATC_ATS_STATUS 0x0CCC
+#define mmATC_L1_ADDRESS_OFFSET 0x0CDD
+#define mmATC_L1_CNTL 0x0CDC
+#define mmATC_L1RD_DEBUG_TLB 0x0CDE
+#define mmATC_L1RD_STATUS 0x0CE0
+#define mmATC_L1WR_DEBUG_TLB 0x0CDF
+#define mmATC_L1WR_STATUS 0x0CE1
+#define mmATC_L2_CNTL 0x0CD5
+#define mmATC_L2_DEBUG 0x0CD7
+#define mmATC_MISC_CG 0x0CD4
+#define mmATC_VM_APERTURE0_CNTL 0x0CC4
+#define mmATC_VM_APERTURE0_CNTL2 0x0CC6
+#define mmATC_VM_APERTURE0_HIGH_ADDR 0x0CC2
+#define mmATC_VM_APERTURE0_LOW_ADDR 0x0CC0
+#define mmATC_VM_APERTURE1_CNTL 0x0CC5
+#define mmATC_VM_APERTURE1_CNTL2 0x0CC7
+#define mmATC_VM_APERTURE1_HIGH_ADDR 0x0CC3
+#define mmATC_VM_APERTURE1_LOW_ADDR 0x0CC1
+#define mmATC_VMID0_PASID_MAPPING 0x0CE7
+#define mmATC_VMID10_PASID_MAPPING 0x0CF1
+#define mmATC_VMID11_PASID_MAPPING 0x0CF2
+#define mmATC_VMID12_PASID_MAPPING 0x0CF3
+#define mmATC_VMID13_PASID_MAPPING 0x0CF4
+#define mmATC_VMID14_PASID_MAPPING 0x0CF5
+#define mmATC_VMID15_PASID_MAPPING 0x0CF6
+#define mmATC_VMID1_PASID_MAPPING 0x0CE8
+#define mmATC_VMID2_PASID_MAPPING 0x0CE9
+#define mmATC_VMID3_PASID_MAPPING 0x0CEA
+#define mmATC_VMID4_PASID_MAPPING 0x0CEB
+#define mmATC_VMID5_PASID_MAPPING 0x0CEC
+#define mmATC_VMID6_PASID_MAPPING 0x0CED
+#define mmATC_VMID7_PASID_MAPPING 0x0CEE
+#define mmATC_VMID8_PASID_MAPPING 0x0CEF
+#define mmATC_VMID9_PASID_MAPPING 0x0CF0
+#define mmATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x0CE6
+#define mmCC_MC_MAX_CHANNEL 0x096E
+#define mmDLL_CNTL 0x0AE9
+#define mmGMCON_DEBUG 0x0D5F
+#define mmGMCON_MISC 0x0D43
+#define mmGMCON_MISC2 0x0D44
+#define mmGMCON_MISC3 0x0D51
+#define mmGMCON_PERF_MON_CNTL0 0x0D4A
+#define mmGMCON_PERF_MON_CNTL1 0x0D4B
+#define mmGMCON_PERF_MON_RSLT0 0x0D4C
+#define mmGMCON_PERF_MON_RSLT1 0x0D4D
+#define mmGMCON_PGFSM_CONFIG 0x0D4E
+#define mmGMCON_PGFSM_READ 0x0D50
+#define mmGMCON_PGFSM_WRITE 0x0D4F
+#define mmGMCON_RENG_EXECUTE 0x0D42
+#define mmGMCON_RENG_RAM_DATA 0x0D41
+#define mmGMCON_RENG_RAM_INDEX 0x0D40
+#define mmGMCON_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0D48
+#define mmGMCON_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0D49
+#define mmGMCON_STCTRL_REGISTER_SAVE_RANGE0 0x0D45
+#define mmGMCON_STCTRL_REGISTER_SAVE_RANGE1 0x0D46
+#define mmGMCON_STCTRL_REGISTER_SAVE_RANGE2 0x0D47
+#define mmMC_ARB_ADDR_HASH 0x09DC
+#define mmMC_ARB_AGE_RD 0x09E9
+#define mmMC_ARB_AGE_WR 0x09EA
+#define mmMC_ARB_BANKMAP 0x09D7
+#define mmMC_ARB_BURST_TIME 0x0A02
+#define mmMC_ARB_CAC_CNTL 0x09D4
+#define mmMC_ARB_CG 0x09FA
+#define mmMC_ARB_DRAM_TIMING 0x09DD
+#define mmMC_ARB_DRAM_TIMING_1 0x09FC
+#define mmMC_ARB_DRAM_TIMING2 0x09DE
+#define mmMC_ARB_DRAM_TIMING2_1 0x09FF
+#define mmMC_ARB_FED_CNTL 0x09C1
+#define mmMC_ARB_GDEC_RD_CNTL 0x09EE
+#define mmMC_ARB_GDEC_WR_CNTL 0x09EF
+#define mmMC_ARB_GECC2 0x09C9
+#define mmMC_ARB_GECC2_CLI 0x09CA
+#define mmMC_ARB_GECC2_DEBUG 0x09C4
+#define mmMC_ARB_GECC2_DEBUG2 0x09C5
+#define mmMC_ARB_GECC2_MISC 0x09C3
+#define mmMC_ARB_GECC2_STATUS 0x09C2
+#define mmMC_ARB_LAZY0_RD 0x09E5
+#define mmMC_ARB_LAZY0_WR 0x09E6
+#define mmMC_ARB_LAZY1_RD 0x09E7
+#define mmMC_ARB_LAZY1_WR 0x09E8
+#define mmMC_ARB_LM_RD 0x09F0
+#define mmMC_ARB_LM_WR 0x09F1
+#define mmMC_ARB_MINCLKS 0x09DA
+#define mmMC_ARB_MISC 0x09D6
+#define mmMC_ARB_MISC2 0x09D5
+#define mmMC_ARB_PM_CNTL 0x09ED
+#define mmMC_ARB_POP 0x09D9
+#define mmMC_ARB_RAMCFG 0x09D8
+#define mmMC_ARB_REMREQ 0x09F2
+#define mmMC_ARB_REPLAY 0x09F3
+#define mmMC_ARB_RET_CREDITS_RD 0x09F4
+#define mmMC_ARB_RET_CREDITS_WR 0x09F5
+#define mmMC_ARB_RFSH_CNTL 0x09EB
+#define mmMC_ARB_RFSH_RATE 0x09EC
+#define mmMC_ARB_RTT_CNTL0 0x09D0
+#define mmMC_ARB_RTT_CNTL1 0x09D1
+#define mmMC_ARB_RTT_CNTL2 0x09D2
+#define mmMC_ARB_RTT_DATA 0x09CF
+#define mmMC_ARB_RTT_DEBUG 0x09D3
+#define mmMC_ARB_SQM_CNTL 0x09DB
+#define mmMC_ARB_TM_CNTL_RD 0x09E3
+#define mmMC_ARB_TM_CNTL_WR 0x09E4
+#define mmMC_ARB_WCDR 0x09FB
+#define mmMC_ARB_WCDR_2 0x09CE
+#define mmMC_ARB_WTM_CNTL_RD 0x09DF
+#define mmMC_ARB_WTM_CNTL_WR 0x09E0
+#define mmMC_ARB_WTM_GRPWT_RD 0x09E1
+#define mmMC_ARB_WTM_GRPWT_WR 0x09E2
+#define mmMC_BIST_AUTO_CNTL 0x0A06
+#define mmMC_BIST_CMD_CNTL 0x0A8E
+#define mmMC_BIST_CMP_CNTL 0x0A8D
+#define mmMC_BIST_CMP_CNTL_2 0x0AB6
+#define mmMC_BIST_CNTL 0x0A05
+#define mmMC_BIST_DATA_MASK 0x0A12
+#define mmMC_BIST_DATA_WORD0 0x0A0A
+#define mmMC_BIST_DATA_WORD1 0x0A0B
+#define mmMC_BIST_DATA_WORD2 0x0A0C
+#define mmMC_BIST_DATA_WORD3 0x0A0D
+#define mmMC_BIST_DATA_WORD4 0x0A0E
+#define mmMC_BIST_DATA_WORD5 0x0A0F
+#define mmMC_BIST_DATA_WORD6 0x0A10
+#define mmMC_BIST_DATA_WORD7 0x0A11
+#define mmMC_BIST_DIR_CNTL 0x0A07
+#define mmMC_BIST_EADDR 0x0A09
+#define mmMC_BIST_MISMATCH_ADDR 0x0A13
+#define mmMC_BIST_RDATA_EDC 0x0A1D
+#define mmMC_BIST_RDATA_MASK 0x0A1C
+#define mmMC_BIST_RDATA_WORD0 0x0A14
+#define mmMC_BIST_RDATA_WORD1 0x0A15
+#define mmMC_BIST_RDATA_WORD2 0x0A16
+#define mmMC_BIST_RDATA_WORD3 0x0A17
+#define mmMC_BIST_RDATA_WORD4 0x0A18
+#define mmMC_BIST_RDATA_WORD5 0x0A19
+#define mmMC_BIST_RDATA_WORD6 0x0A1A
+#define mmMC_BIST_RDATA_WORD7 0x0A1B
+#define mmMC_BIST_SADDR 0x0A08
+#define mmMC_CG_CONFIG 0x096F
+#define mmMC_CG_CONFIG_MCD 0x0829
+#define mmMC_CG_DATAPORT 0x0A21
+#define mmMC_CITF_CNTL 0x0970
+#define mmMC_CITF_CREDITS_ARB_RD 0x0972
+#define mmMC_CITF_CREDITS_ARB_WR 0x0973
+#define mmMC_CITF_CREDITS_VM 0x0971
+#define mmMC_CITF_CREDITS_XBAR 0x0989
+#define mmMC_CITF_DAGB_CNTL 0x0974
+#define mmMC_CITF_DAGB_DLY 0x0977
+#define mmMC_CITF_INT_CREDITS 0x0975
+#define mmMC_CITF_INT_CREDITS_WR 0x097D
+#define mmMC_CITF_MISC_RD_CG 0x0992
+#define mmMC_CITF_MISC_VM_CG 0x0994
+#define mmMC_CITF_MISC_WR_CG 0x0993
+#define mmMC_CITF_PERF_MON_CNTL2 0x098E
+#define mmMC_CITF_PERF_MON_RSLT2 0x0991
+#define mmMC_CITF_REMREQ 0x097A
+#define mmMC_CITF_RET_MODE 0x0976
+#define mmMC_CITF_WTM_RD_CNTL 0x097F
+#define mmMC_CITF_WTM_WR_CNTL 0x0980
+#define mmMC_CITF_XTRA_ENABLE 0x096D
+#define mmMC_CONFIG 0x0800
+#define mmMC_CONFIG_MCD 0x0828
+#define mmMC_HUB_MISC_DBG 0x0831
+#define mmMC_HUB_MISC_FRAMING 0x0834
+#define mmMC_HUB_MISC_HUB_CG 0x082E
+#define mmMC_HUB_MISC_IDLE_STATUS 0x0847
+#define mmMC_HUB_MISC_OVERRIDE 0x0833
+#define mmMC_HUB_MISC_POWER 0x082D
+#define mmMC_HUB_MISC_SIP_CG 0x0830
+#define mmMC_HUB_MISC_STATUS 0x0832
+#define mmMC_HUB_MISC_VM_CG 0x082F
+#define mmMC_HUB_RDREQ_CNTL 0x083B
+#define mmMC_HUB_RDREQ_CREDITS 0x0844
+#define mmMC_HUB_RDREQ_CREDITS2 0x0845
+#define mmMC_HUB_RDREQ_DMIF 0x0863
+#define mmMC_HUB_RDREQ_DMIF_LIMIT 0x0848
+#define mmMC_HUB_RDREQ_GBL0 0x0856
+#define mmMC_HUB_RDREQ_GBL1 0x0857
+#define mmMC_HUB_RDREQ_HDP 0x085B
+#define mmMC_HUB_RDREQ_MCDW 0x0851
+#define mmMC_HUB_RDREQ_MCDX 0x0852
+#define mmMC_HUB_RDREQ_MCDY 0x0853
+#define mmMC_HUB_RDREQ_MCDZ 0x0854
+#define mmMC_HUB_RDREQ_MCIF 0x0864
+#define mmMC_HUB_RDREQ_RLC 0x085D
+#define mmMC_HUB_RDREQ_SEM 0x085E
+#define mmMC_HUB_RDREQ_SIP 0x0855
+#define mmMC_HUB_RDREQ_SMU 0x0858
+#define mmMC_HUB_RDREQ_STATUS 0x0839
+#define mmMC_HUB_RDREQ_UMC 0x0860
+#define mmMC_HUB_RDREQ_UVD 0x0861
+#define mmMC_HUB_RDREQ_VCE 0x085F
+#define mmMC_HUB_RDREQ_VCEU 0x0866
+#define mmMC_HUB_RDREQ_VMC 0x0865
+#define mmMC_HUB_RDREQ_WTM_CNTL 0x083D
+#define mmMC_HUB_RDREQ_XDMAM 0x0882
+#define mmMC_HUB_SHARED_DAGB_DLY 0x0846
+#define mmMC_HUB_WDP_BP 0x0837
+#define mmMC_HUB_WDP_CNTL 0x0835
+#define mmMC_HUB_WDP_CREDITS 0x083F
+#define mmMC_HUB_WDP_ERR 0x0836
+#define mmMC_HUB_WDP_GBL0 0x0841
+#define mmMC_HUB_WDP_GBL1 0x0842
+#define mmMC_HUB_WDP_HDP 0x0879
+#define mmMC_HUB_WDP_IH 0x0872
+#define mmMC_HUB_WDP_MCDW 0x0867
+#define mmMC_HUB_WDP_MCDX 0x0868
+#define mmMC_HUB_WDP_MCDY 0x0869
+#define mmMC_HUB_WDP_MCDZ 0x086A
+#define mmMC_HUB_WDP_MCIF 0x086F
+#define mmMC_HUB_WDP_MGPU 0x0843
+#define mmMC_HUB_WDP_MGPU2 0x0840
+#define mmMC_HUB_WDP_RLC 0x0873
+#define mmMC_HUB_WDP_SEM 0x0874
+#define mmMC_HUB_WDP_SH0 0x086E
+#define mmMC_HUB_WDP_SH1 0x0876
+#define mmMC_HUB_WDP_SIP 0x086B
+#define mmMC_HUB_WDP_SMU 0x0875
+#define mmMC_HUB_WDP_STATUS 0x0838
+#define mmMC_HUB_WDP_UMC 0x0877
+#define mmMC_HUB_WDP_UVD 0x0878
+#define mmMC_HUB_WDP_VCE 0x0870
+#define mmMC_HUB_WDP_VCEU 0x087F
+#define mmMC_HUB_WDP_WTM_CNTL 0x083E
+#define mmMC_HUB_WDP_XDMA 0x0881
+#define mmMC_HUB_WDP_XDMAM 0x0880
+#define mmMC_HUB_WDP_XDP 0x0871
+#define mmMC_HUB_WRRET_CNTL 0x083C
+#define mmMC_HUB_WRRET_MCDW 0x087B
+#define mmMC_HUB_WRRET_MCDX 0x087C
+#define mmMC_HUB_WRRET_MCDY 0x087D
+#define mmMC_HUB_WRRET_MCDZ 0x087E
+#define mmMC_HUB_WRRET_STATUS 0x083A
+#define mmMC_IMP_CNTL 0x0A36
+#define mmMC_IMP_DEBUG 0x0A37
+#define mmMC_IMP_DQ_STATUS 0x0ABC
+#define mmMC_IMP_STATUS 0x0A38
+#define mmMC_IO_APHY_STR_CNTL_D0 0x0A97
+#define mmMC_IO_APHY_STR_CNTL_D1 0x0A98
+#define mmMC_IO_CDRCNTL1_D0 0x0ADD
+#define mmMC_IO_CDRCNTL1_D1 0x0ADE
+#define mmMC_IO_CDRCNTL2_D0 0x0AE4
+#define mmMC_IO_CDRCNTL2_D1 0x0AE5
+#define mmMC_IO_CDRCNTL_D0 0x0A55
+#define mmMC_IO_CDRCNTL_D1 0x0A56
+#define mmMC_IO_DPHY_STR_CNTL_D0 0x0A4E
+#define mmMC_IO_DPHY_STR_CNTL_D1 0x0A54
+#define mmMC_IO_PAD_CNTL 0x0A73
+#define mmMC_IO_PAD_CNTL_D0 0x0A74
+#define mmMC_IO_PAD_CNTL_D1 0x0A75
+#define mmMC_IO_RXCNTL1_DPHY0_D0 0x0ADF
+#define mmMC_IO_RXCNTL1_DPHY0_D1 0x0AE1
+#define mmMC_IO_RXCNTL1_DPHY1_D0 0x0AE0
+#define mmMC_IO_RXCNTL1_DPHY1_D1 0x0AE2
+#define mmMC_IO_RXCNTL_DPHY0_D0 0x0A4C
+#define mmMC_IO_RXCNTL_DPHY0_D1 0x0A52
+#define mmMC_IO_RXCNTL_DPHY1_D0 0x0A4D
+#define mmMC_IO_RXCNTL_DPHY1_D1 0x0A53
+#define mmMC_IO_TXCNTL_APHY_D0 0x0A4B
+#define mmMC_IO_TXCNTL_APHY_D1 0x0A51
+#define mmMC_IO_TXCNTL_DPHY0_D0 0x0A49
+#define mmMC_IO_TXCNTL_DPHY0_D1 0x0A4F
+#define mmMC_IO_TXCNTL_DPHY1_D0 0x0A4A
+#define mmMC_IO_TXCNTL_DPHY1_D1 0x0A50
+#define mmMCLK_PWRMGT_CNTL 0x0AE8
+#define mmMC_MEM_POWER_LS 0x082A
+#define mmMC_NPL_STATUS 0x0A76
+#define mmMC_PHY_TIMING_2 0x0ACE
+#define mmMC_PHY_TIMING_D0 0x0ACC
+#define mmMC_PHY_TIMING_D1 0x0ACD
+#define mmMC_PMG_AUTO_CFG 0x0A35
+#define mmMC_PMG_AUTO_CMD 0x0A34
+#define mmMC_PMG_CFG 0x0A84
+#define mmMC_PMG_CMD_EMRS 0x0A83
+#define mmMC_PMG_CMD_MRS 0x0AAB
+#define mmMC_PMG_CMD_MRS1 0x0AD1
+#define mmMC_PMG_CMD_MRS2 0x0AD7
+#define mmMC_RD_CB 0x0981
+#define mmMC_RD_DB 0x0982
+#define mmMC_RD_GRP_EXT 0x0978
+#define mmMC_RD_GRP_GFX 0x0803
+#define mmMC_RD_GRP_LCL 0x098A
+#define mmMC_RD_GRP_OTH 0x0807
+#define mmMC_RD_GRP_SYS 0x0805
+#define mmMC_RD_HUB 0x0985
+#define mmMC_RD_TC0 0x0983
+#define mmMC_RD_TC1 0x0984
+#define mmMC_RPB_ARB_CNTL 0x0951
+#define mmMC_RPB_BIF_CNTL 0x0952
+#define mmMC_RPB_CID_QUEUE_EX 0x095A
+#define mmMC_RPB_CID_QUEUE_EX_DATA 0x095B
+#define mmMC_RPB_CID_QUEUE_RD 0x0957
+#define mmMC_RPB_CID_QUEUE_WR 0x0956
+#define mmMC_RPB_CONF 0x094D
+#define mmMC_RPB_DBG1 0x094F
+#define mmMC_RPB_EFF_CNTL 0x0950
+#define mmMC_RPB_IF_CONF 0x094E
+#define mmMC_RPB_PERF_COUNTER_CNTL 0x0958
+#define mmMC_RPB_PERF_COUNTER_STATUS 0x0959
+#define mmMC_RPB_RD_SWITCH_CNTL 0x0955
+#define mmMC_RPB_WR_COMBINE_CNTL 0x0954
+#define mmMC_RPB_WR_SWITCH_CNTL 0x0953
+#define mmMC_SEQ_BIT_REMAP_B0_D0 0x0AA3
+#define mmMC_SEQ_BIT_REMAP_B0_D1 0x0AA7
+#define mmMC_SEQ_BIT_REMAP_B1_D0 0x0AA4
+#define mmMC_SEQ_BIT_REMAP_B1_D1 0x0AA8
+#define mmMC_SEQ_BIT_REMAP_B2_D0 0x0AA5
+#define mmMC_SEQ_BIT_REMAP_B2_D1 0x0AA9
+#define mmMC_SEQ_BIT_REMAP_B3_D0 0x0AA6
+#define mmMC_SEQ_BIT_REMAP_B3_D1 0x0AAA
+#define mmMC_SEQ_BYTE_REMAP_D0 0x0A93
+#define mmMC_SEQ_BYTE_REMAP_D1 0x0A94
+#define mmMC_SEQ_CAS_TIMING 0x0A29
+#define mmMC_SEQ_CAS_TIMING_LP 0x0A9C
+#define mmMC_SEQ_CG 0x0A9A
+#define mmMC_SEQ_CMD 0x0A31
+#define mmMC_SEQ_CNTL 0x0A25
+#define mmMC_SEQ_CNTL_2 0x0AD4
+#define mmMC_SEQ_DRAM 0x0A26
+#define mmMC_SEQ_DRAM_2 0x0A27
+#define mmMC_SEQ_DRAM_ERROR_INSERTION 0x0ACB
+#define mmMC_SEQ_FIFO_CTL 0x0A57
+#define mmMC_SEQ_IO_DEBUG_DATA 0x0A92
+#define mmMC_SEQ_IO_DEBUG_INDEX 0x0A91
+#define mmMC_SEQ_IO_RDBI 0x0AB4
+#define mmMC_SEQ_IO_REDC 0x0AB5
+#define mmMC_SEQ_IO_RESERVE_D0 0x0AB7
+#define mmMC_SEQ_IO_RESERVE_D1 0x0AB8
+#define mmMC_SEQ_IO_RWORD0 0x0AAC
+#define mmMC_SEQ_IO_RWORD1 0x0AAD
+#define mmMC_SEQ_IO_RWORD2 0x0AAE
+#define mmMC_SEQ_IO_RWORD3 0x0AAF
+#define mmMC_SEQ_IO_RWORD4 0x0AB0
+#define mmMC_SEQ_IO_RWORD5 0x0AB1
+#define mmMC_SEQ_IO_RWORD6 0x0AB2
+#define mmMC_SEQ_IO_RWORD7 0x0AB3
+#define mmMC_SEQ_MISC0 0x0A80
+#define mmMC_SEQ_MISC1 0x0A81
+#define mmMC_SEQ_MISC3 0x0A8B
+#define mmMC_SEQ_MISC4 0x0A8C
+#define mmMC_SEQ_MISC5 0x0A95
+#define mmMC_SEQ_MISC6 0x0A96
+#define mmMC_SEQ_MISC7 0x0A99
+#define mmMC_SEQ_MISC8 0x0A5F
+#define mmMC_SEQ_MISC9 0x0AE7
+#define mmMC_SEQ_MISC_TIMING 0x0A2A
+#define mmMC_SEQ_MISC_TIMING2 0x0A2B
+#define mmMC_SEQ_MISC_TIMING2_LP 0x0A9E
+#define mmMC_SEQ_MISC_TIMING_LP 0x0A9D
+#define mmMC_SEQ_MPLL_OVERRIDE 0x0A22
+#define mmMC_SEQ_PERF_CNTL 0x0A77
+#define mmMC_SEQ_PERF_CNTL_1 0x0AFD
+#define mmMC_SEQ_PERF_SEQ_CNT_A_I0 0x0A79
+#define mmMC_SEQ_PERF_SEQ_CNT_A_I1 0x0A7A
+#define mmMC_SEQ_PERF_SEQ_CNT_B_I0 0x0A7B
+#define mmMC_SEQ_PERF_SEQ_CNT_B_I1 0x0A7C
+#define mmMC_SEQ_PERF_SEQ_CNT_C_I0 0x0AD9
+#define mmMC_SEQ_PERF_SEQ_CNT_C_I1 0x0ADA
+#define mmMC_SEQ_PERF_SEQ_CNT_D_I0 0x0ADB
+#define mmMC_SEQ_PERF_SEQ_CNT_D_I1 0x0ADC
+#define mmMC_SEQ_PERF_SEQ_CTL 0x0A78
+#define mmMC_SEQ_PMG_CMD_EMRS_LP 0x0AA1
+#define mmMC_SEQ_PMG_CMD_MRS1_LP 0x0AD2
+#define mmMC_SEQ_PMG_CMD_MRS2_LP 0x0AD8
+#define mmMC_SEQ_PMG_CMD_MRS_LP 0x0AA2
+#define mmMC_SEQ_PMG_PG_HWCNTL 0x0AB9
+#define mmMC_SEQ_PMG_PG_SWCNTL_0 0x0ABA
+#define mmMC_SEQ_PMG_PG_SWCNTL_1 0x0ABB
+#define mmMC_SEQ_PMG_TIMING 0x0A2C
+#define mmMC_SEQ_PMG_TIMING_LP 0x0AD3
+#define mmMC_SEQ_RAS_TIMING 0x0A28
+#define mmMC_SEQ_RAS_TIMING_LP 0x0A9B
+#define mmMC_SEQ_RD_CTL_D0 0x0A2D
+#define mmMC_SEQ_RD_CTL_D0_LP 0x0AC7
+#define mmMC_SEQ_RD_CTL_D1 0x0A2E
+#define mmMC_SEQ_RD_CTL_D1_LP 0x0AC8
+#define mmMC_SEQ_RESERVE_0_S 0x0A1E
+#define mmMC_SEQ_RESERVE_1_S 0x0A1F
+#define mmMC_SEQ_RESERVE_M 0x0A82
+#define mmMC_SEQ_RXFRAMING_BYTE0_D0 0x0A67
+#define mmMC_SEQ_RXFRAMING_BYTE0_D1 0x0A6D
+#define mmMC_SEQ_RXFRAMING_BYTE1_D0 0x0A68
+#define mmMC_SEQ_RXFRAMING_BYTE1_D1 0x0A6E
+#define mmMC_SEQ_RXFRAMING_BYTE2_D0 0x0A69
+#define mmMC_SEQ_RXFRAMING_BYTE2_D1 0x0A6F
+#define mmMC_SEQ_RXFRAMING_BYTE3_D0 0x0A6A
+#define mmMC_SEQ_RXFRAMING_BYTE3_D1 0x0A70
+#define mmMC_SEQ_RXFRAMING_DBI_D0 0x0A6B
+#define mmMC_SEQ_RXFRAMING_DBI_D1 0x0A71
+#define mmMC_SEQ_RXFRAMING_EDC_D0 0x0A6C
+#define mmMC_SEQ_RXFRAMING_EDC_D1 0x0A72
+#define mmMC_SEQ_STATUS_M 0x0A7D
+#define mmMC_SEQ_STATUS_S 0x0A20
+#define mmMC_SEQ_SUP_CNTL 0x0A32
+#define mmMC_SEQ_SUP_DEC_STAT 0x0A88
+#define mmMC_SEQ_SUP_GP0_STAT 0x0A8F
+#define mmMC_SEQ_SUP_GP1_STAT 0x0A90
+#define mmMC_SEQ_SUP_GP2_STAT 0x0A85
+#define mmMC_SEQ_SUP_GP3_STAT 0x0A86
+#define mmMC_SEQ_SUP_IR_STAT 0x0A87
+#define mmMC_SEQ_SUP_PGM 0x0A33
+#define mmMC_SEQ_SUP_PGM_STAT 0x0A89
+#define mmMC_SEQ_SUP_R_PGM 0x0A8A
+#define mmMC_SEQ_TCG_CNTL 0x0ABD
+#define mmMC_SEQ_TIMER_RD 0x0ACA
+#define mmMC_SEQ_TIMER_WR 0x0AC9
+#define mmMC_SEQ_TRAIN_CAPTURE 0x0A3E
+#define mmMC_SEQ_TRAIN_EDC_THRESHOLD 0x0A3B
+#define mmMC_SEQ_TRAIN_EDC_THRESHOLD2 0x0AFE
+#define mmMC_SEQ_TRAIN_EDC_THRESHOLD3 0x0AFF
+#define mmMC_SEQ_TRAIN_TIMING 0x0A40
+#define mmMC_SEQ_TRAIN_WAKEUP_CLEAR 0x0A3F
+#define mmMC_SEQ_TRAIN_WAKEUP_CNTL 0x0A3A
+#define mmMC_SEQ_TRAIN_WAKEUP_EDGE 0x0A3C
+#define mmMC_SEQ_TRAIN_WAKEUP_MASK 0x0A3D
+#define mmMC_SEQ_TSM_BCNT 0x0AC2
+#define mmMC_SEQ_TSM_CTRL 0x0ABE
+#define mmMC_SEQ_TSM_DBI 0x0AC6
+#define mmMC_SEQ_TSM_DEBUG_DATA 0x0AD0
+#define mmMC_SEQ_TSM_DEBUG_INDEX 0x0ACF
+#define mmMC_SEQ_TSM_EDC 0x0AC5
+#define mmMC_SEQ_TSM_FLAG 0x0AC3
+#define mmMC_SEQ_TSM_GCNT 0x0ABF
+#define mmMC_SEQ_TSM_MISC 0x0AE6
+#define mmMC_SEQ_TSM_NCNT 0x0AC1
+#define mmMC_SEQ_TSM_OCNT 0x0AC0
+#define mmMC_SEQ_TSM_UPDATE 0x0AC4
+#define mmMC_SEQ_TSM_WCDR 0x0AE3
+#define mmMC_SEQ_TXFRAMING_BYTE0_D0 0x0A58
+#define mmMC_SEQ_TXFRAMING_BYTE0_D1 0x0A60
+#define mmMC_SEQ_TXFRAMING_BYTE1_D0 0x0A59
+#define mmMC_SEQ_TXFRAMING_BYTE1_D1 0x0A61
+#define mmMC_SEQ_TXFRAMING_BYTE2_D0 0x0A5A
+#define mmMC_SEQ_TXFRAMING_BYTE2_D1 0x0A62
+#define mmMC_SEQ_TXFRAMING_BYTE3_D0 0x0A5B
+#define mmMC_SEQ_TXFRAMING_BYTE3_D1 0x0A63
+#define mmMC_SEQ_TXFRAMING_DBI_D0 0x0A5C
+#define mmMC_SEQ_TXFRAMING_DBI_D1 0x0A64
+#define mmMC_SEQ_TXFRAMING_EDC_D0 0x0A5D
+#define mmMC_SEQ_TXFRAMING_EDC_D1 0x0A65
+#define mmMC_SEQ_TXFRAMING_FCK_D0 0x0A5E
+#define mmMC_SEQ_TXFRAMING_FCK_D1 0x0A66
+#define mmMC_SEQ_VENDOR_ID_I0 0x0A7E
+#define mmMC_SEQ_VENDOR_ID_I1 0x0A7F
+#define mmMC_SEQ_WCDR_CTRL 0x0A39
+#define mmMC_SEQ_WR_CTL_2 0x0AD5
+#define mmMC_SEQ_WR_CTL_2_LP 0x0AD6
+#define mmMC_SEQ_WR_CTL_D0 0x0A2F
+#define mmMC_SEQ_WR_CTL_D0_LP 0x0A9F
+#define mmMC_SEQ_WR_CTL_D1 0x0A30
+#define mmMC_SEQ_WR_CTL_D1_LP 0x0AA0
+#define mmMC_SHARED_BLACKOUT_CNTL 0x082B
+#define mmMC_SHARED_CHMAP 0x0801
+#define mmMC_SHARED_CHREMAP 0x0802
+#define mmMC_TRAIN_EDCCDR_R_D0 0x0A41
+#define mmMC_TRAIN_EDCCDR_R_D1 0x0A42
+#define mmMC_TRAIN_EDC_STATUS_D0 0x0A45
+#define mmMC_TRAIN_EDC_STATUS_D1 0x0A48
+#define mmMC_TRAIN_PRBSERR_0_D0 0x0A43
+#define mmMC_TRAIN_PRBSERR_0_D1 0x0A46
+#define mmMC_TRAIN_PRBSERR_1_D0 0x0A44
+#define mmMC_TRAIN_PRBSERR_1_D1 0x0A47
+#define mmMC_TRAIN_PRBSERR_2_D0 0x0AFB
+#define mmMC_TRAIN_PRBSERR_2_D1 0x0AFC
+#define mmMC_VM_AGP_BASE 0x080C
+#define mmMC_VM_AGP_BOT 0x080B
+#define mmMC_VM_AGP_TOP 0x080A
+#define mmMC_VM_DC_WRITE_CNTL 0x0810
+#define mmMC_VM_DC_WRITE_HIT_REGION_0_HIGH_ADDR 0x0815
+#define mmMC_VM_DC_WRITE_HIT_REGION_0_LOW_ADDR 0x0811
+#define mmMC_VM_DC_WRITE_HIT_REGION_1_HIGH_ADDR 0x0816
+#define mmMC_VM_DC_WRITE_HIT_REGION_1_LOW_ADDR 0x0812
+#define mmMC_VM_DC_WRITE_HIT_REGION_2_HIGH_ADDR 0x0817
+#define mmMC_VM_DC_WRITE_HIT_REGION_2_LOW_ADDR 0x0813
+#define mmMC_VM_DC_WRITE_HIT_REGION_3_HIGH_ADDR 0x0818
+#define mmMC_VM_DC_WRITE_HIT_REGION_3_LOW_ADDR 0x0814
+#define mmMC_VM_FB_LOCATION 0x0809
+#define mmMC_VM_FB_OFFSET 0x081A
+#define mmMC_VM_MB_L1_TLB0_DEBUG 0x0891
+#define mmMC_VM_MB_L1_TLB0_STATUS 0x0895
+#define mmMC_VM_MB_L1_TLB1_STATUS 0x0896
+#define mmMC_VM_MB_L1_TLB2_DEBUG 0x0893
+#define mmMC_VM_MB_L1_TLB2_STATUS 0x0897
+#define mmMC_VM_MB_L1_TLB3_DEBUG 0x08A5
+#define mmMC_VM_MB_L1_TLB3_STATUS 0x08A6
+#define mmMC_VM_MB_L2ARBITER_L2_CREDITS 0x08A1
+#define mmMC_VM_MD_L1_TLB0_DEBUG 0x0998
+#define mmMC_VM_MD_L1_TLB0_STATUS 0x099B
+#define mmMC_VM_MD_L1_TLB1_DEBUG 0x0999
+#define mmMC_VM_MD_L1_TLB1_STATUS 0x099C
+#define mmMC_VM_MD_L1_TLB2_DEBUG 0x099A
+#define mmMC_VM_MD_L1_TLB2_STATUS 0x099D
+#define mmMC_VM_MD_L1_TLB3_DEBUG 0x09A7
+#define mmMC_VM_MD_L1_TLB3_STATUS 0x09A8
+#define mmMC_VM_MD_L2ARBITER_L2_CREDITS 0x09A4
+#define mmMC_VM_MX_L1_TLB_CNTL 0x0819
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x080F
+#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x080E
+#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x080D
+#define mmMC_WR_CB 0x0986
+#define mmMC_WR_DB 0x0987
+#define mmMC_WR_GRP_EXT 0x0979
+#define mmMC_WR_GRP_GFX 0x0804
+#define mmMC_WR_GRP_LCL 0x098B
+#define mmMC_WR_GRP_OTH 0x0808
+#define mmMC_WR_GRP_SYS 0x0806
+#define mmMC_WR_HUB 0x0988
+#define mmMC_WR_TC0 0x097B
+#define mmMC_WR_TC1 0x097C
+#define mmMC_XBAR_ADDR_DEC 0x0C80
+#define mmMC_XBAR_ARB 0x0C8D
+#define mmMC_XBAR_ARB_MAX_BURST 0x0C8E
+#define mmMC_XBAR_CHTRIREMAP 0x0C8B
+#define mmMC_XBAR_PERF_MON_CNTL0 0x0C8F
+#define mmMC_XBAR_PERF_MON_CNTL1 0x0C90
+#define mmMC_XBAR_PERF_MON_CNTL2 0x0C91
+#define mmMC_XBAR_PERF_MON_MAX_THSH 0x0C96
+#define mmMC_XBAR_PERF_MON_RSLT0 0x0C92
+#define mmMC_XBAR_PERF_MON_RSLT1 0x0C93
+#define mmMC_XBAR_PERF_MON_RSLT2 0x0C94
+#define mmMC_XBAR_PERF_MON_RSLT3 0x0C95
+#define mmMC_XBAR_RDREQ_CREDIT 0x0C83
+#define mmMC_XBAR_RDREQ_PRI_CREDIT 0x0C84
+#define mmMC_XBAR_RDRET_CREDIT1 0x0C87
+#define mmMC_XBAR_RDRET_CREDIT2 0x0C88
+#define mmMC_XBAR_RDRET_PRI_CREDIT1 0x0C89
+#define mmMC_XBAR_RDRET_PRI_CREDIT2 0x0C8A
+#define mmMC_XBAR_REMOTE 0x0C81
+#define mmMC_XBAR_SPARE0 0x0C97
+#define mmMC_XBAR_SPARE1 0x0C98
+#define mmMC_XBAR_TWOCHAN 0x0C8C
+#define mmMC_XBAR_WRREQ_CREDIT 0x0C82
+#define mmMC_XBAR_WRRET_CREDIT1 0x0C85
+#define mmMC_XBAR_WRRET_CREDIT2 0x0C86
+#define mmMC_XPB_CLG_CFG0 0x08E9
+#define mmMC_XPB_CLG_CFG10 0x08F3
+#define mmMC_XPB_CLG_CFG1 0x08EA
+#define mmMC_XPB_CLG_CFG11 0x08F4
+#define mmMC_XPB_CLG_CFG12 0x08F5
+#define mmMC_XPB_CLG_CFG13 0x08F6
+#define mmMC_XPB_CLG_CFG14 0x08F7
+#define mmMC_XPB_CLG_CFG15 0x08F8
+#define mmMC_XPB_CLG_CFG16 0x08F9
+#define mmMC_XPB_CLG_CFG17 0x08FA
+#define mmMC_XPB_CLG_CFG18 0x08FB
+#define mmMC_XPB_CLG_CFG19 0x08FC
+#define mmMC_XPB_CLG_CFG20 0x0928
+#define mmMC_XPB_CLG_CFG2 0x08EB
+#define mmMC_XPB_CLG_CFG21 0x0929
+#define mmMC_XPB_CLG_CFG22 0x092A
+#define mmMC_XPB_CLG_CFG23 0x092B
+#define mmMC_XPB_CLG_CFG24 0x092C
+#define mmMC_XPB_CLG_CFG25 0x092D
+#define mmMC_XPB_CLG_CFG26 0x092E
+#define mmMC_XPB_CLG_CFG27 0x092F
+#define mmMC_XPB_CLG_CFG28 0x0930
+#define mmMC_XPB_CLG_CFG29 0x0931
+#define mmMC_XPB_CLG_CFG30 0x0932
+#define mmMC_XPB_CLG_CFG3 0x08EC
+#define mmMC_XPB_CLG_CFG31 0x0933
+#define mmMC_XPB_CLG_CFG32 0x0936
+#define mmMC_XPB_CLG_CFG33 0x0937
+#define mmMC_XPB_CLG_CFG34 0x0938
+#define mmMC_XPB_CLG_CFG35 0x0939
+#define mmMC_XPB_CLG_CFG36 0x093A
+#define mmMC_XPB_CLG_CFG4 0x08ED
+#define mmMC_XPB_CLG_CFG5 0x08EE
+#define mmMC_XPB_CLG_CFG6 0x08EF
+#define mmMC_XPB_CLG_CFG7 0x08F0
+#define mmMC_XPB_CLG_CFG8 0x08F1
+#define mmMC_XPB_CLG_CFG9 0x08F2
+#define mmMC_XPB_CLG_EXTRA 0x08FD
+#define mmMC_XPB_CLG_EXTRA_RD 0x0935
+#define mmMC_XPB_CLK_GAT 0x091E
+#define mmMC_XPB_INTF_CFG 0x091F
+#define mmMC_XPB_INTF_CFG2 0x0934
+#define mmMC_XPB_INTF_STS 0x0920
+#define mmMC_XPB_LB_ADDR 0x08FE
+#define mmMC_XPB_MAP_INVERT_FLUSH_NUM_LSB 0x0923
+#define mmMC_XPB_MISC_CFG 0x0927
+#define mmMC_XPB_P2P_BAR0 0x0904
+#define mmMC_XPB_P2P_BAR1 0x0905
+#define mmMC_XPB_P2P_BAR2 0x0906
+#define mmMC_XPB_P2P_BAR3 0x0907
+#define mmMC_XPB_P2P_BAR4 0x0908
+#define mmMC_XPB_P2P_BAR5 0x0909
+#define mmMC_XPB_P2P_BAR6 0x090A
+#define mmMC_XPB_P2P_BAR7 0x090B
+#define mmMC_XPB_P2P_BAR_CFG 0x0903
+#define mmMC_XPB_P2P_BAR_DEBUG 0x090D
+#define mmMC_XPB_P2P_BAR_DELTA_ABOVE 0x090E
+#define mmMC_XPB_P2P_BAR_DELTA_BELOW 0x090F
+#define mmMC_XPB_P2P_BAR_SETUP 0x090C
+#define mmMC_XPB_PEER_SYS_BAR0 0x0910
+#define mmMC_XPB_PEER_SYS_BAR1 0x0911
+#define mmMC_XPB_PEER_SYS_BAR2 0x0912
+#define mmMC_XPB_PEER_SYS_BAR3 0x0913
+#define mmMC_XPB_PEER_SYS_BAR4 0x0914
+#define mmMC_XPB_PEER_SYS_BAR5 0x0915
+#define mmMC_XPB_PEER_SYS_BAR6 0x0916
+#define mmMC_XPB_PEER_SYS_BAR7 0x0917
+#define mmMC_XPB_PEER_SYS_BAR8 0x0918
+#define mmMC_XPB_PEER_SYS_BAR9 0x0919
+#define mmMC_XPB_PERF_KNOBS 0x0924
+#define mmMC_XPB_PIPE_STS 0x0921
+#define mmMC_XPB_RTR_DEST_MAP0 0x08DB
+#define mmMC_XPB_RTR_DEST_MAP1 0x08DC
+#define mmMC_XPB_RTR_DEST_MAP2 0x08DD
+#define mmMC_XPB_RTR_DEST_MAP3 0x08DE
+#define mmMC_XPB_RTR_DEST_MAP4 0x08DF
+#define mmMC_XPB_RTR_DEST_MAP5 0x08E0
+#define mmMC_XPB_RTR_DEST_MAP6 0x08E1
+#define mmMC_XPB_RTR_DEST_MAP7 0x08E2
+#define mmMC_XPB_RTR_DEST_MAP8 0x08E3
+#define mmMC_XPB_RTR_DEST_MAP9 0x08E4
+#define mmMC_XPB_RTR_SRC_APRTR0 0x08CD
+#define mmMC_XPB_RTR_SRC_APRTR1 0x08CE
+#define mmMC_XPB_RTR_SRC_APRTR2 0x08CF
+#define mmMC_XPB_RTR_SRC_APRTR3 0x08D0
+#define mmMC_XPB_RTR_SRC_APRTR4 0x08D1
+#define mmMC_XPB_RTR_SRC_APRTR5 0x08D2
+#define mmMC_XPB_RTR_SRC_APRTR6 0x08D3
+#define mmMC_XPB_RTR_SRC_APRTR7 0x08D4
+#define mmMC_XPB_RTR_SRC_APRTR8 0x08D5
+#define mmMC_XPB_RTR_SRC_APRTR9 0x08D6
+#define mmMC_XPB_STICKY 0x0925
+#define mmMC_XPB_STICKY_W1C 0x0926
+#define mmMC_XPB_SUB_CTRL 0x0922
+#define mmMC_XPB_UNC_THRESH_HST 0x08FF
+#define mmMC_XPB_UNC_THRESH_SID 0x0900
+#define mmMC_XPB_WCB_CFG 0x0902
+#define mmMC_XPB_WCB_STS 0x0901
+#define mmMC_XPB_XDMA_PEER_SYS_BAR0 0x091A
+#define mmMC_XPB_XDMA_PEER_SYS_BAR1 0x091B
+#define mmMC_XPB_XDMA_PEER_SYS_BAR2 0x091C
+#define mmMC_XPB_XDMA_PEER_SYS_BAR3 0x091D
+#define mmMC_XPB_XDMA_RTR_DEST_MAP0 0x08E5
+#define mmMC_XPB_XDMA_RTR_DEST_MAP1 0x08E6
+#define mmMC_XPB_XDMA_RTR_DEST_MAP2 0x08E7
+#define mmMC_XPB_XDMA_RTR_DEST_MAP3 0x08E8
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR0 0x08D7
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR1 0x08D8
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR2 0x08D9
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR3 0x08DA
+#define mmMPLL_AD_FUNC_CNTL 0x0AF0
+#define mmMPLL_AD_STATUS 0x0AF6
+#define mmMPLL_CNTL_MODE 0x0AEC
+#define mmMPLL_CONTROL 0x0AF5
+#define mmMPLL_DQ_0_0_STATUS 0x0AF7
+#define mmMPLL_DQ_0_1_STATUS 0x0AF8
+#define mmMPLL_DQ_1_0_STATUS 0x0AF9
+#define mmMPLL_DQ_1_1_STATUS 0x0AFA
+#define mmMPLL_DQ_FUNC_CNTL 0x0AF1
+#define mmMPLL_FUNC_CNTL 0x0AED
+#define mmMPLL_FUNC_CNTL_1 0x0AEE
+#define mmMPLL_FUNC_CNTL_2 0x0AEF
+#define mmMPLL_SEQ_UCODE_1 0x0AEA
+#define mmMPLL_SEQ_UCODE_2 0x0AEB
+#define mmMPLL_SS1 0x0AF3
+#define mmMPLL_SS2 0x0AF4
+#define mmMPLL_TIME 0x0AF2
+#define mmVM_CONTEXT0_CNTL 0x0504
+#define mmVM_CONTEXT0_CNTL2 0x050C
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x054F
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR 0x055F
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR 0x0557
+#define mmVM_CONTEXT0_PROTECTION_FAULT_ADDR 0x053E
+#define mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x0546
+#define mmVM_CONTEXT0_PROTECTION_FAULT_STATUS 0x0536
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x0510
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x0511
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x0512
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x0513
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x0514
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x0515
+#define mmVM_CONTEXT1_CNTL 0x0505
+#define mmVM_CONTEXT1_CNTL2 0x050D
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x0550
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR 0x0560
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR 0x0558
+#define mmVM_CONTEXT1_PROTECTION_FAULT_ADDR 0x053F
+#define mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x0547
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS 0x0537
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x0551
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x0552
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x0553
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x0554
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x0555
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x0556
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x050E
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x050F
+#define mmVM_CONTEXTS_DISABLE 0x0535
+#define mmVM_DEBUG 0x056F
+#define mmVM_DUMMY_PAGE_FAULT_ADDR 0x0507
+#define mmVM_DUMMY_PAGE_FAULT_CNTL 0x0506
+#define mmVM_FAULT_CLIENT_ID 0x054E
+#define mmVM_INVALIDATE_REQUEST 0x051E
+#define mmVM_INVALIDATE_RESPONSE 0x051F
+#define mmVM_L2_BANK_SELECT_MASKA 0x0572
+#define mmVM_L2_BANK_SELECT_MASKB 0x0573
+#define mmVM_L2_CG 0x0570
+#define mmVM_L2_CNTL 0x0500
+#define mmVM_L2_CNTL2 0x0501
+#define mmVM_L2_CNTL3 0x0502
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR 0x0576
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR 0x0575
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET 0x0577
+#define mmVM_L2_STATUS 0x0503
+#define mmVM_PRT_APERTURE0_HIGH_ADDR 0x0530
+#define mmVM_PRT_APERTURE0_LOW_ADDR 0x052C
+#define mmVM_PRT_APERTURE1_HIGH_ADDR 0x0531
+#define mmVM_PRT_APERTURE1_LOW_ADDR 0x052D
+#define mmVM_PRT_APERTURE2_HIGH_ADDR 0x0532
+#define mmVM_PRT_APERTURE2_LOW_ADDR 0x052E
+#define mmVM_PRT_APERTURE3_HIGH_ADDR 0x0533
+#define mmVM_PRT_APERTURE3_LOW_ADDR 0x052F
+#define mmVM_PRT_CNTL 0x0534
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h
new file mode 100644
index 000000000000..0f6c6c8d089b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h
@@ -0,0 +1,11895 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GMC_6_0_SH_MASK_H
+#define GMC_6_0_SH_MASK_H
+
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB_MASK 0x00003f00L
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB__SHIFT 0x00000008
+#define ATC_ATS_CNTL__DEBUG_ECO_MASK 0x000f0000L
+#define ATC_ATS_CNTL__DEBUG_ECO__SHIFT 0x00000010
+#define ATC_ATS_CNTL__DISABLE_ATC_MASK 0x00000001L
+#define ATC_ATS_CNTL__DISABLE_ATC__SHIFT 0x00000000
+#define ATC_ATS_CNTL__DISABLE_PASID_MASK 0x00000004L
+#define ATC_ATS_CNTL__DISABLE_PASID__SHIFT 0x00000002
+#define ATC_ATS_CNTL__DISABLE_PRI_MASK 0x00000002L
+#define ATC_ATS_CNTL__DISABLE_PRI__SHIFT 0x00000001
+#define ATC_ATS_DEBUG__ADDRESS_TRANSLATION_REQUEST_WRITE_PERMS_MASK 0x00000004L
+#define ATC_ATS_DEBUG__ADDRESS_TRANSLATION_REQUEST_WRITE_PERMS__SHIFT 0x00000002
+#define ATC_ATS_DEBUG__DISALLOW_ERR_TO_DONE_MASK 0x00004000L
+#define ATC_ATS_DEBUG__DISALLOW_ERR_TO_DONE__SHIFT 0x0000000e
+#define ATC_ATS_DEBUG__EXE_BIT_MASK 0x00000080L
+#define ATC_ATS_DEBUG__EXE_BIT__SHIFT 0x00000007
+#define ATC_ATS_DEBUG__IDENT_RETURN_MASK 0x00000002L
+#define ATC_ATS_DEBUG__IDENT_RETURN__SHIFT 0x00000001
+#define ATC_ATS_DEBUG__IGNORE_FED_MASK 0x00008000L
+#define ATC_ATS_DEBUG__IGNORE_FED__SHIFT 0x0000000f
+#define ATC_ATS_DEBUG__INVALIDATE_ALL_MASK 0x00000001L
+#define ATC_ATS_DEBUG__INVALIDATE_ALL__SHIFT 0x00000000
+#define ATC_ATS_DEBUG__INVALIDATION_REQUESTS_DISALLOWED_WHEN_ATC_IS_DISABLED_MASK 0x00010000L
+#define ATC_ATS_DEBUG__INVALIDATION_REQUESTS_DISALLOWED_WHEN_ATC_IS_DISABLED__SHIFT 0x00000010
+#define ATC_ATS_DEBUG__NUM_REQUESTS_AT_ERR_MASK 0x00003c00L
+#define ATC_ATS_DEBUG__NUM_REQUESTS_AT_ERR__SHIFT 0x0000000a
+#define ATC_ATS_DEBUG__PAGE_REQUEST_PERMS_MASK 0x00000100L
+#define ATC_ATS_DEBUG__PAGE_REQUEST_PERMS__SHIFT 0x00000008
+#define ATC_ATS_DEBUG__PAGE_REQUESTS_USE_RELAXED_ORDERING_MASK 0x00000020L
+#define ATC_ATS_DEBUG__PAGE_REQUESTS_USE_RELAXED_ORDERING__SHIFT 0x00000005
+#define ATC_ATS_DEBUG__PRIV_BIT_MASK 0x00000040L
+#define ATC_ATS_DEBUG__PRIV_BIT__SHIFT 0x00000006
+#define ATC_ATS_DEBUG__UNTRANSLATED_ONLY_REQUESTS_CARRY_SIZE_MASK 0x00000200L
+#define ATC_ATS_DEBUG__UNTRANSLATED_ONLY_REQUESTS_CARRY_SIZE__SHIFT 0x00000009
+#define ATC_ATS_DEFAULT_PAGE_CNTL__DEFAULT_PAGE_HIGH_MASK 0x0000003cL
+#define ATC_ATS_DEFAULT_PAGE_CNTL__DEFAULT_PAGE_HIGH__SHIFT 0x00000002
+#define ATC_ATS_DEFAULT_PAGE_CNTL__SEND_DEFAULT_PAGE_MASK 0x00000001L
+#define ATC_ATS_DEFAULT_PAGE_CNTL__SEND_DEFAULT_PAGE__SHIFT 0x00000000
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE_MASK 0xffffffffL
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE__SHIFT 0x00000000
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE_MASK 0x03f00000L
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE__SHIFT 0x00000014
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE_MASK 0x0000fc00L
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE__SHIFT 0x0000000a
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG_MASK 0x0000003fL
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG__SHIFT 0x00000000
+#define ATC_ATS_FAULT_DEBUG__ALLOW_SUBSEQUENT_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000100L
+#define ATC_ATS_FAULT_DEBUG__ALLOW_SUBSEQUENT_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x00000008
+#define ATC_ATS_FAULT_DEBUG__CLEAR_FAULT_STATUS_ADDR_MASK 0x00010000L
+#define ATC_ATS_FAULT_DEBUG__CLEAR_FAULT_STATUS_ADDR__SHIFT 0x00000010
+#define ATC_ATS_FAULT_DEBUG__CREDITS_ATS_IH_MASK 0x0000001fL
+#define ATC_ATS_FAULT_DEBUG__CREDITS_ATS_IH__SHIFT 0x00000000
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR_MASK 0xffffffffL
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR__SHIFT 0x00000000
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2_MASK 0x00010000L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2__SHIFT 0x00000010
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO_MASK 0x00008000L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO__SHIFT 0x0000000f
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE_MASK 0x0000003fL
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE__SHIFT 0x00000000
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION_MASK 0x00020000L
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION__SHIFT 0x00000011
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH_MASK 0x0f000000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH__SHIFT 0x00000018
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST_MASK 0x00040000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST__SHIFT 0x00000012
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS_MASK 0x00f80000L
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS__SHIFT 0x00000013
+#define ATC_ATS_FAULT_STATUS_INFO__VMID_MASK 0x00007c00L
+#define ATC_ATS_FAULT_STATUS_INFO__VMID__SHIFT 0x0000000a
+#define ATC_ATS_STATUS__BUSY_MASK 0x00000001L
+#define ATC_ATS_STATUS__BUSY__SHIFT 0x00000000
+#define ATC_ATS_STATUS__CRASHED_MASK 0x00000002L
+#define ATC_ATS_STATUS__CRASHED__SHIFT 0x00000001
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION_MASK 0x00000004L
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION__SHIFT 0x00000002
+#define ATC_L1_ADDRESS_OFFSET__LOGICAL_ADDRESS_MASK 0xffffffffL
+#define ATC_L1_ADDRESS_OFFSET__LOGICAL_ADDRESS__SHIFT 0x00000000
+#define ATC_L1_CNTL__DONT_NEED_ATS_BEHAVIOR_MASK 0x00000003L
+#define ATC_L1_CNTL__DONT_NEED_ATS_BEHAVIOR__SHIFT 0x00000000
+#define ATC_L1_CNTL__NEED_ATS_BEHAVIOR_MASK 0x00000004L
+#define ATC_L1_CNTL__NEED_ATS_BEHAVIOR__SHIFT 0x00000002
+#define ATC_L1_CNTL__NEED_ATS_SNOOP_DEFAULT_MASK 0x00000010L
+#define ATC_L1_CNTL__NEED_ATS_SNOOP_DEFAULT__SHIFT 0x00000004
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_L2_MASK 0x0003f000L
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_L2__SHIFT 0x0000000c
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_RPB_MASK 0x0ff00000L
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_RPB__SHIFT 0x00000014
+#define ATC_L1RD_DEBUG_TLB__DEBUG_ECO_MASK 0x30000000L
+#define ATC_L1RD_DEBUG_TLB__DEBUG_ECO__SHIFT 0x0000001c
+#define ATC_L1RD_DEBUG_TLB__DISABLE_FRAGMENTS_MASK 0x00000001L
+#define ATC_L1RD_DEBUG_TLB__DISABLE_FRAGMENTS__SHIFT 0x00000000
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_CAM_SIZE_MASK 0x000000f0L
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_CAM_SIZE__SHIFT 0x00000004
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE_MASK 0x00000700L
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE__SHIFT 0x00000008
+#define ATC_L1RD_DEBUG_TLB__INVALIDATE_ALL_MASK 0x40000000L
+#define ATC_L1RD_DEBUG_TLB__INVALIDATE_ALL__SHIFT 0x0000001e
+#define ATC_L1RD_STATUS__BAD_NEED_ATS_MASK 0x00000100L
+#define ATC_L1RD_STATUS__BAD_NEED_ATS__SHIFT 0x00000008
+#define ATC_L1RD_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L1RD_STATUS__BUSY__SHIFT 0x00000000
+#define ATC_L1RD_STATUS__DEADLOCK_DETECTION_MASK 0x00000002L
+#define ATC_L1RD_STATUS__DEADLOCK_DETECTION__SHIFT 0x00000001
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_L2_MASK 0x0003f000L
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_L2__SHIFT 0x0000000c
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_RPB_MASK 0x0ff00000L
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_RPB__SHIFT 0x00000014
+#define ATC_L1WR_DEBUG_TLB__DEBUG_ECO_MASK 0x30000000L
+#define ATC_L1WR_DEBUG_TLB__DEBUG_ECO__SHIFT 0x0000001c
+#define ATC_L1WR_DEBUG_TLB__DISABLE_FRAGMENTS_MASK 0x00000001L
+#define ATC_L1WR_DEBUG_TLB__DISABLE_FRAGMENTS__SHIFT 0x00000000
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_CAM_SIZE_MASK 0x000000f0L
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_CAM_SIZE__SHIFT 0x00000004
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE_MASK 0x00000700L
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE__SHIFT 0x00000008
+#define ATC_L1WR_DEBUG_TLB__INVALIDATE_ALL_MASK 0x40000000L
+#define ATC_L1WR_DEBUG_TLB__INVALIDATE_ALL__SHIFT 0x0000001e
+#define ATC_L1WR_STATUS__BAD_NEED_ATS_MASK 0x00000100L
+#define ATC_L1WR_STATUS__BAD_NEED_ATS__SHIFT 0x00000008
+#define ATC_L1WR_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L1WR_STATUS__BUSY__SHIFT 0x00000000
+#define ATC_L1WR_STATUS__DEADLOCK_DETECTION_MASK 0x00000002L
+#define ATC_L1WR_STATUS__DEADLOCK_DETECTION__SHIFT 0x00000001
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x00000000
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000400L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x0000000a
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000030L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x00000004
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000800L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x0000000b
+#define ATC_L2_DEBUG__CREDITS_L2_ATS_MASK 0x0000003fL
+#define ATC_L2_DEBUG__CREDITS_L2_ATS__SHIFT 0x00000000
+#define ATC_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATC_MISC_CG__ENABLE__SHIFT 0x00000012
+#define ATC_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define ATC_MISC_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define ATC_MISC_CG__OFFDLY_MASK 0x00000fc0L
+#define ATC_MISC_CG__OFFDLY__SHIFT 0x00000006
+#define ATC_VM_APERTURE0_CNTL2__VMIDS_USING_RANGE_MASK 0x0000ffffL
+#define ATC_VM_APERTURE0_CNTL2__VMIDS_USING_RANGE__SHIFT 0x00000000
+#define ATC_VM_APERTURE0_CNTL__ATS_ACCESS_MODE_MASK 0x00000003L
+#define ATC_VM_APERTURE0_CNTL__ATS_ACCESS_MODE__SHIFT 0x00000000
+#define ATC_VM_APERTURE0_HIGH_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE0_HIGH_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VM_APERTURE0_LOW_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE0_LOW_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_CNTL2__VMIDS_USING_RANGE_MASK 0x0000ffffL
+#define ATC_VM_APERTURE1_CNTL2__VMIDS_USING_RANGE__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_CNTL__ATS_ACCESS_MODE_MASK 0x00000003L
+#define ATC_VM_APERTURE1_CNTL__ATS_ACCESS_MODE__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_HIGH_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE1_HIGH_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_LOW_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE1_LOW_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VMID0_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID0_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID0_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID0_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID10_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID10_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID10_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID10_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID11_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID11_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID11_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID11_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID12_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID12_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID12_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID12_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID13_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID13_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID13_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID13_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID14_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID14_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID14_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID14_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID15_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID15_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID15_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID15_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID1_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID1_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID1_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID1_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID2_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID2_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID2_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID2_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID3_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID3_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID3_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID3_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID4_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID4_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID4_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID4_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID5_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID5_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID5_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID5_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID6_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID6_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID6_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID6_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID7_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID7_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID7_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID7_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID8_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID8_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID8_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID8_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID9_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID9_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID9_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID9_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED_MASK 0x00000001L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED__SHIFT 0x00000000
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED_MASK 0x00000400L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED__SHIFT 0x0000000a
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED_MASK 0x00000800L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED__SHIFT 0x0000000b
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED_MASK 0x00001000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED__SHIFT 0x0000000c
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED_MASK 0x00002000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED__SHIFT 0x0000000d
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED_MASK 0x00004000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED__SHIFT 0x0000000e
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED_MASK 0x00008000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED__SHIFT 0x0000000f
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED_MASK 0x00000002L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED__SHIFT 0x00000001
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED_MASK 0x00000004L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED__SHIFT 0x00000002
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED_MASK 0x00000008L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED__SHIFT 0x00000003
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED_MASK 0x00000010L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED__SHIFT 0x00000004
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED_MASK 0x00000020L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED__SHIFT 0x00000005
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED_MASK 0x00000040L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED__SHIFT 0x00000006
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED_MASK 0x00000080L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED__SHIFT 0x00000007
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED_MASK 0x00000100L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED__SHIFT 0x00000008
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED_MASK 0x00000200L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED__SHIFT 0x00000009
+#define CC_MC_MAX_CHANNEL__NOOFCHAN_MASK 0x0000001eL
+#define CC_MC_MAX_CHANNEL__NOOFCHAN__SHIFT 0x00000001
+#define DLL_CNTL__DLL_LOCK_TIME_MASK 0x003ff000L
+#define DLL_CNTL__DLL_LOCK_TIME__SHIFT 0x0000000c
+#define DLL_CNTL__DLL_RESET_TIME_MASK 0x000003ffL
+#define DLL_CNTL__DLL_RESET_TIME__SHIFT 0x00000000
+#define DLL_CNTL__MRDCK0_BYPASS_MASK 0x01000000L
+#define DLL_CNTL__MRDCK0_BYPASS__SHIFT 0x00000018
+#define DLL_CNTL__MRDCK1_BYPASS_MASK 0x02000000L
+#define DLL_CNTL__MRDCK1_BYPASS__SHIFT 0x00000019
+#define DLL_CNTL__PWR2_MODE_MASK 0x04000000L
+#define DLL_CNTL__PWR2_MODE__SHIFT 0x0000001a
+#define GMCON_DEBUG__GFX_CLEAR_MASK 0x00000002L
+#define GMCON_DEBUG__GFX_CLEAR__SHIFT 0x00000001
+#define GMCON_DEBUG__GFX_STALL_MASK 0x00000001L
+#define GMCON_DEBUG__GFX_STALL__SHIFT 0x00000000
+#define GMCON_DEBUG__MISC_FLAGS_MASK 0x3ffffffcL
+#define GMCON_DEBUG__MISC_FLAGS__SHIFT 0x00000002
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE0_MASK 0x00000007L
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE0__SHIFT 0x00000000
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE1_MASK 0x00000038L
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE1__SHIFT 0x00000003
+#define GMCON_MISC2__RENG_SR_HOLD_THRESHOLD_MASK 0x0000fc00L
+#define GMCON_MISC2__RENG_SR_HOLD_THRESHOLD__SHIFT 0x0000000a
+#define GMCON_MISC2__STCTRL_EXTEND_GMC_OFFLINE_MASK 0x20000000L
+#define GMCON_MISC2__STCTRL_EXTEND_GMC_OFFLINE__SHIFT 0x0000001d
+#define GMCON_MISC2__STCTRL_IGNORE_ARB_BUSY_MASK 0x10000000L
+#define GMCON_MISC2__STCTRL_IGNORE_ARB_BUSY__SHIFT 0x0000001c
+#define GMCON_MISC2__STCTRL_LPT_TARGET_MASK 0x0fff0000L
+#define GMCON_MISC2__STCTRL_LPT_TARGET__SHIFT 0x00000010
+#define GMCON_MISC3__RENG_DISABLE_MCC_MASK 0x0000003fL
+#define GMCON_MISC3__RENG_DISABLE_MCC__SHIFT 0x00000000
+#define GMCON_MISC3__RENG_DISABLE_MCD_MASK 0x00000fc0L
+#define GMCON_MISC3__RENG_DISABLE_MCD__SHIFT 0x00000006
+#define GMCON_MISC3__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00fff000L
+#define GMCON_MISC3__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x0000000c
+#define GMCON_MISC__ALLOW_DEEP_SLEEP_MODE_MASK 0x30000000L
+#define GMCON_MISC__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x0000001c
+#define GMCON_MISC__CRITICAL_REGS_LOCK_MASK 0x08000000L
+#define GMCON_MISC__CRITICAL_REGS_LOCK__SHIFT 0x0000001b
+#define GMCON_MISC__RENG_EXECUTE_NOW_MODE_MASK 0x00000400L
+#define GMCON_MISC__RENG_EXECUTE_NOW_MODE__SHIFT 0x0000000a
+#define GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00000800L
+#define GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x0000000b
+#define GMCON_MISC__RENG_SRBM_CREDITS_MCD_MASK 0x0000f000L
+#define GMCON_MISC__RENG_SRBM_CREDITS_MCD__SHIFT 0x0000000c
+#define GMCON_MISC__STCTRL_DISABLE_ALLOW_SR_MASK 0x02000000L
+#define GMCON_MISC__STCTRL_DISABLE_ALLOW_SR__SHIFT 0x00000019
+#define GMCON_MISC__STCTRL_DISABLE_GMC_OFFLINE_MASK 0x04000000L
+#define GMCON_MISC__STCTRL_DISABLE_GMC_OFFLINE__SHIFT 0x0000001a
+#define GMCON_MISC__STCTRL_FORCE_ALLOW_SR_MASK 0x40000000L
+#define GMCON_MISC__STCTRL_FORCE_ALLOW_SR__SHIFT 0x0000001e
+#define GMCON_MISC__STCTRL_GMC_IDLE_THRESHOLD_MASK 0x00060000L
+#define GMCON_MISC__STCTRL_GMC_IDLE_THRESHOLD__SHIFT 0x00000011
+#define GMCON_MISC__STCTRL_IGNORE_ALLOW_STOP_MASK 0x00400000L
+#define GMCON_MISC__STCTRL_IGNORE_ALLOW_STOP__SHIFT 0x00000016
+#define GMCON_MISC__STCTRL_IGNORE_PRE_SR_MASK 0x00200000L
+#define GMCON_MISC__STCTRL_IGNORE_PRE_SR__SHIFT 0x00000015
+#define GMCON_MISC__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x01000000L
+#define GMCON_MISC__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0x00000018
+#define GMCON_MISC__STCTRL_IGNORE_SR_COMMIT_MASK 0x00800000L
+#define GMCON_MISC__STCTRL_IGNORE_SR_COMMIT__SHIFT 0x00000017
+#define GMCON_MISC__STCTRL_SRBM_IDLE_THRESHOLD_MASK 0x00180000L
+#define GMCON_MISC__STCTRL_SRBM_IDLE_THRESHOLD__SHIFT 0x00000013
+#define GMCON_MISC__STCTRL_STUTTER_EN_MASK 0x00010000L
+#define GMCON_MISC__STCTRL_STUTTER_EN__SHIFT 0x00000010
+#define GMCON_PERF_MON_CNTL0__ALLOW_WRAP_MASK 0x10000000L
+#define GMCON_PERF_MON_CNTL0__ALLOW_WRAP__SHIFT 0x0000001c
+#define GMCON_PERF_MON_CNTL0__START_MODE_MASK 0x03000000L
+#define GMCON_PERF_MON_CNTL0__START_MODE__SHIFT 0x00000018
+#define GMCON_PERF_MON_CNTL0__START_THRESH_MASK 0x00000fffL
+#define GMCON_PERF_MON_CNTL0__START_THRESH__SHIFT 0x00000000
+#define GMCON_PERF_MON_CNTL0__STOP_MODE_MASK 0x0c000000L
+#define GMCON_PERF_MON_CNTL0__STOP_MODE__SHIFT 0x0000001a
+#define GMCON_PERF_MON_CNTL0__STOP_THRESH_MASK 0x00fff000L
+#define GMCON_PERF_MON_CNTL0__STOP_THRESH__SHIFT 0x0000000c
+#define GMCON_PERF_MON_CNTL1__MON0_ID_MASK 0x00fc0000L
+#define GMCON_PERF_MON_CNTL1__MON0_ID__SHIFT 0x00000012
+#define GMCON_PERF_MON_CNTL1__MON1_ID_MASK 0x3f000000L
+#define GMCON_PERF_MON_CNTL1__MON1_ID__SHIFT 0x00000018
+#define GMCON_PERF_MON_CNTL1__START_TRIG_ID_MASK 0x00000fc0L
+#define GMCON_PERF_MON_CNTL1__START_TRIG_ID__SHIFT 0x00000006
+#define GMCON_PERF_MON_CNTL1__STOP_TRIG_ID_MASK 0x0003f000L
+#define GMCON_PERF_MON_CNTL1__STOP_TRIG_ID__SHIFT 0x0000000c
+#define GMCON_PERF_MON_CNTL1__THRESH_CNTR_ID_MASK 0x0000003fL
+#define GMCON_PERF_MON_CNTL1__THRESH_CNTR_ID__SHIFT 0x00000000
+#define GMCON_PERF_MON_RSLT0__COUNT_MASK 0xffffffffL
+#define GMCON_PERF_MON_RSLT0__COUNT__SHIFT 0x00000000
+#define GMCON_PERF_MON_RSLT1__COUNT_MASK 0xffffffffL
+#define GMCON_PERF_MON_RSLT1__COUNT__SHIFT 0x00000000
+#define GMCON_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000ffL
+#define GMCON_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x00000000
+#define GMCON_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define GMCON_PGFSM_CONFIG__P1_SELECT__SHIFT 0x0000000a
+#define GMCON_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define GMCON_PGFSM_CONFIG__P2_SELECT__SHIFT 0x0000000b
+#define GMCON_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define GMCON_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x00000008
+#define GMCON_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define GMCON_PGFSM_CONFIG__POWER_UP__SHIFT 0x00000009
+#define GMCON_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define GMCON_PGFSM_CONFIG__READ__SHIFT 0x0000000d
+#define GMCON_PGFSM_CONFIG__REG_ADDR_MASK 0xf0000000L
+#define GMCON_PGFSM_CONFIG__REG_ADDR__SHIFT 0x0000001c
+#define GMCON_PGFSM_CONFIG__RSRVD_MASK 0x07ffc000L
+#define GMCON_PGFSM_CONFIG__RSRVD__SHIFT 0x0000000e
+#define GMCON_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define GMCON_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x0000001b
+#define GMCON_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define GMCON_PGFSM_CONFIG__WRITE__SHIFT 0x0000000c
+#define GMCON_PGFSM_READ__PGFSM_SELECT_MASK 0x0f000000L
+#define GMCON_PGFSM_READ__PGFSM_SELECT__SHIFT 0x00000018
+#define GMCON_PGFSM_READ__READ_VALUE_MASK 0x00ffffffL
+#define GMCON_PGFSM_READ__READ_VALUE__SHIFT 0x00000000
+#define GMCON_PGFSM_READ__SERDES_MASTER_BUSY_MASK 0x10000000L
+#define GMCON_PGFSM_READ__SERDES_MASTER_BUSY__SHIFT 0x0000001c
+#define GMCON_PGFSM_WRITE__WRITE_VALUE_MASK 0xffffffffL
+#define GMCON_PGFSM_WRITE__WRITE_VALUE__SHIFT 0x00000000
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_DSP_END_PTR_MASK 0x003ff000L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_DSP_END_PTR__SHIFT 0x0000000c
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0xffc00000L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0x00000016
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000002L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x00000001
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000ffcL
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x00000002
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK 0x00000001L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x00000000
+#define GMCON_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xffffffffL
+#define GMCON_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x00000000
+#define GMCON_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003ffL
+#define GMCON_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE0_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE0__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT0_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT0__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE1_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE1__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT1_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT1__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE2_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE2__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT2_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT2__SHIFT 0x00000010
+#define MC_ARB_ADDR_HASH__BANK_XOR_ENABLE_MASK 0x0000000fL
+#define MC_ARB_ADDR_HASH__BANK_XOR_ENABLE__SHIFT 0x00000000
+#define MC_ARB_ADDR_HASH__COL_XOR_MASK 0x00000ff0L
+#define MC_ARB_ADDR_HASH__COL_XOR__SHIFT 0x00000004
+#define MC_ARB_ADDR_HASH__ROW_XOR_MASK 0x0ffff000L
+#define MC_ARB_ADDR_HASH__ROW_XOR__SHIFT 0x0000000c
+#define MC_ARB_AGE_RD__DIVIDE_GROUP0_MASK 0x01000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP0__SHIFT 0x00000018
+#define MC_ARB_AGE_RD__DIVIDE_GROUP1_MASK 0x02000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP1__SHIFT 0x00000019
+#define MC_ARB_AGE_RD__DIVIDE_GROUP2_MASK 0x04000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP2__SHIFT 0x0000001a
+#define MC_ARB_AGE_RD__DIVIDE_GROUP3_MASK 0x08000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP3__SHIFT 0x0000001b
+#define MC_ARB_AGE_RD__DIVIDE_GROUP4_MASK 0x10000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP4__SHIFT 0x0000001c
+#define MC_ARB_AGE_RD__DIVIDE_GROUP5_MASK 0x20000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP5__SHIFT 0x0000001d
+#define MC_ARB_AGE_RD__DIVIDE_GROUP6_MASK 0x40000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP6__SHIFT 0x0000001e
+#define MC_ARB_AGE_RD__DIVIDE_GROUP7_MASK 0x80000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP7__SHIFT 0x0000001f
+#define MC_ARB_AGE_RD__ENABLE_GROUP0_MASK 0x00010000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP0__SHIFT 0x00000010
+#define MC_ARB_AGE_RD__ENABLE_GROUP1_MASK 0x00020000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP1__SHIFT 0x00000011
+#define MC_ARB_AGE_RD__ENABLE_GROUP2_MASK 0x00040000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP2__SHIFT 0x00000012
+#define MC_ARB_AGE_RD__ENABLE_GROUP3_MASK 0x00080000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP3__SHIFT 0x00000013
+#define MC_ARB_AGE_RD__ENABLE_GROUP4_MASK 0x00100000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP4__SHIFT 0x00000014
+#define MC_ARB_AGE_RD__ENABLE_GROUP5_MASK 0x00200000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP5__SHIFT 0x00000015
+#define MC_ARB_AGE_RD__ENABLE_GROUP6_MASK 0x00400000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP6__SHIFT 0x00000016
+#define MC_ARB_AGE_RD__ENABLE_GROUP7_MASK 0x00800000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP7__SHIFT 0x00000017
+#define MC_ARB_AGE_RD__RATE_GROUP0_MASK 0x00000003L
+#define MC_ARB_AGE_RD__RATE_GROUP0__SHIFT 0x00000000
+#define MC_ARB_AGE_RD__RATE_GROUP1_MASK 0x0000000cL
+#define MC_ARB_AGE_RD__RATE_GROUP1__SHIFT 0x00000002
+#define MC_ARB_AGE_RD__RATE_GROUP2_MASK 0x00000030L
+#define MC_ARB_AGE_RD__RATE_GROUP2__SHIFT 0x00000004
+#define MC_ARB_AGE_RD__RATE_GROUP3_MASK 0x000000c0L
+#define MC_ARB_AGE_RD__RATE_GROUP3__SHIFT 0x00000006
+#define MC_ARB_AGE_RD__RATE_GROUP4_MASK 0x00000300L
+#define MC_ARB_AGE_RD__RATE_GROUP4__SHIFT 0x00000008
+#define MC_ARB_AGE_RD__RATE_GROUP5_MASK 0x00000c00L
+#define MC_ARB_AGE_RD__RATE_GROUP5__SHIFT 0x0000000a
+#define MC_ARB_AGE_RD__RATE_GROUP6_MASK 0x00003000L
+#define MC_ARB_AGE_RD__RATE_GROUP6__SHIFT 0x0000000c
+#define MC_ARB_AGE_RD__RATE_GROUP7_MASK 0x0000c000L
+#define MC_ARB_AGE_RD__RATE_GROUP7__SHIFT 0x0000000e
+#define MC_ARB_AGE_WR__DIVIDE_GROUP0_MASK 0x01000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP0__SHIFT 0x00000018
+#define MC_ARB_AGE_WR__DIVIDE_GROUP1_MASK 0x02000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP1__SHIFT 0x00000019
+#define MC_ARB_AGE_WR__DIVIDE_GROUP2_MASK 0x04000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP2__SHIFT 0x0000001a
+#define MC_ARB_AGE_WR__DIVIDE_GROUP3_MASK 0x08000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP3__SHIFT 0x0000001b
+#define MC_ARB_AGE_WR__DIVIDE_GROUP4_MASK 0x10000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP4__SHIFT 0x0000001c
+#define MC_ARB_AGE_WR__DIVIDE_GROUP5_MASK 0x20000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP5__SHIFT 0x0000001d
+#define MC_ARB_AGE_WR__DIVIDE_GROUP6_MASK 0x40000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP6__SHIFT 0x0000001e
+#define MC_ARB_AGE_WR__DIVIDE_GROUP7_MASK 0x80000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP7__SHIFT 0x0000001f
+#define MC_ARB_AGE_WR__ENABLE_GROUP0_MASK 0x00010000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP0__SHIFT 0x00000010
+#define MC_ARB_AGE_WR__ENABLE_GROUP1_MASK 0x00020000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP1__SHIFT 0x00000011
+#define MC_ARB_AGE_WR__ENABLE_GROUP2_MASK 0x00040000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP2__SHIFT 0x00000012
+#define MC_ARB_AGE_WR__ENABLE_GROUP3_MASK 0x00080000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP3__SHIFT 0x00000013
+#define MC_ARB_AGE_WR__ENABLE_GROUP4_MASK 0x00100000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP4__SHIFT 0x00000014
+#define MC_ARB_AGE_WR__ENABLE_GROUP5_MASK 0x00200000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP5__SHIFT 0x00000015
+#define MC_ARB_AGE_WR__ENABLE_GROUP6_MASK 0x00400000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP6__SHIFT 0x00000016
+#define MC_ARB_AGE_WR__ENABLE_GROUP7_MASK 0x00800000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP7__SHIFT 0x00000017
+#define MC_ARB_AGE_WR__RATE_GROUP0_MASK 0x00000003L
+#define MC_ARB_AGE_WR__RATE_GROUP0__SHIFT 0x00000000
+#define MC_ARB_AGE_WR__RATE_GROUP1_MASK 0x0000000cL
+#define MC_ARB_AGE_WR__RATE_GROUP1__SHIFT 0x00000002
+#define MC_ARB_AGE_WR__RATE_GROUP2_MASK 0x00000030L
+#define MC_ARB_AGE_WR__RATE_GROUP2__SHIFT 0x00000004
+#define MC_ARB_AGE_WR__RATE_GROUP3_MASK 0x000000c0L
+#define MC_ARB_AGE_WR__RATE_GROUP3__SHIFT 0x00000006
+#define MC_ARB_AGE_WR__RATE_GROUP4_MASK 0x00000300L
+#define MC_ARB_AGE_WR__RATE_GROUP4__SHIFT 0x00000008
+#define MC_ARB_AGE_WR__RATE_GROUP5_MASK 0x00000c00L
+#define MC_ARB_AGE_WR__RATE_GROUP5__SHIFT 0x0000000a
+#define MC_ARB_AGE_WR__RATE_GROUP6_MASK 0x00003000L
+#define MC_ARB_AGE_WR__RATE_GROUP6__SHIFT 0x0000000c
+#define MC_ARB_AGE_WR__RATE_GROUP7_MASK 0x0000c000L
+#define MC_ARB_AGE_WR__RATE_GROUP7__SHIFT 0x0000000e
+#define MC_ARB_BANKMAP__BANK0_MASK 0x0000000fL
+#define MC_ARB_BANKMAP__BANK0__SHIFT 0x00000000
+#define MC_ARB_BANKMAP__BANK1_MASK 0x000000f0L
+#define MC_ARB_BANKMAP__BANK1__SHIFT 0x00000004
+#define MC_ARB_BANKMAP__BANK2_MASK 0x00000f00L
+#define MC_ARB_BANKMAP__BANK2__SHIFT 0x00000008
+#define MC_ARB_BANKMAP__BANK3_MASK 0x0000f000L
+#define MC_ARB_BANKMAP__BANK3__SHIFT 0x0000000c
+#define MC_ARB_BANKMAP__RANK_MASK 0x000f0000L
+#define MC_ARB_BANKMAP__RANK__SHIFT 0x00000010
+#define MC_ARB_BURST_TIME__STATE0_MASK 0x0000001fL
+#define MC_ARB_BURST_TIME__STATE0__SHIFT 0x00000000
+#define MC_ARB_BURST_TIME__STATE1_MASK 0x000003e0L
+#define MC_ARB_BURST_TIME__STATE1__SHIFT 0x00000005
+#define MC_ARB_BURST_TIME__STATE2_MASK 0x00007c00L
+#define MC_ARB_BURST_TIME__STATE2__SHIFT 0x0000000a
+#define MC_ARB_BURST_TIME__STATE3_MASK 0x000f8000L
+#define MC_ARB_BURST_TIME__STATE3__SHIFT 0x0000000f
+#define MC_ARB_CAC_CNTL__ALLOW_OVERFLOW_MASK 0x00002000L
+#define MC_ARB_CAC_CNTL__ALLOW_OVERFLOW__SHIFT 0x0000000d
+#define MC_ARB_CAC_CNTL__ENABLE_MASK 0x00000001L
+#define MC_ARB_CAC_CNTL__ENABLE__SHIFT 0x00000000
+#define MC_ARB_CAC_CNTL__READ_WEIGHT_MASK 0x0000007eL
+#define MC_ARB_CAC_CNTL__READ_WEIGHT__SHIFT 0x00000001
+#define MC_ARB_CAC_CNTL__WRITE_WEIGHT_MASK 0x00001f80L
+#define MC_ARB_CAC_CNTL__WRITE_WEIGHT__SHIFT 0x00000007
+#define MC_ARB_CG__CG_ARB_REQ_MASK 0x000000ffL
+#define MC_ARB_CG__CG_ARB_REQ__SHIFT 0x00000000
+#define MC_ARB_CG__CG_ARB_RESP_MASK 0x0000ff00L
+#define MC_ARB_CG__CG_ARB_RESP__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING_1__ACTRD_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING_1__ACTRD__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING_1__ACTWR_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING_1__ACTWR__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING_1__RASMACTRD_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING_1__RASMACTRD__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING_1__RASMACTWR_MASK 0xff000000L
+#define MC_ARB_DRAM_TIMING_1__RASMACTWR__SHIFT 0x00000018
+#define MC_ARB_DRAM_TIMING2_1__BUS_TURN_MASK 0x1f000000L
+#define MC_ARB_DRAM_TIMING2_1__BUS_TURN__SHIFT 0x00000018
+#define MC_ARB_DRAM_TIMING2_1__RAS2RAS_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING2_1__RAS2RAS__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING2_1__RP_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING2_1__RP__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING2_1__WRPLUSRP_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING2_1__WRPLUSRP__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING2__BUS_TURN_MASK 0x1f000000L
+#define MC_ARB_DRAM_TIMING2__BUS_TURN__SHIFT 0x00000018
+#define MC_ARB_DRAM_TIMING2__RAS2RAS_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING2__RAS2RAS__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING2__RP_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING2__RP__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING2__WRPLUSRP_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING2__WRPLUSRP__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING__ACTRD_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING__ACTRD__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING__ACTWR_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING__ACTWR__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING__RASMACTRD_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING__RASMACTRD__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING__RASMACTWR_MASK 0xff000000L
+#define MC_ARB_DRAM_TIMING__RASMACTWR__SHIFT 0x00000018
+#define MC_ARB_FED_CNTL__KEEP_POISON_IN_PAGE_MASK 0x00000010L
+#define MC_ARB_FED_CNTL__KEEP_POISON_IN_PAGE__SHIFT 0x00000004
+#define MC_ARB_FED_CNTL__MODE_MASK 0x00000003L
+#define MC_ARB_FED_CNTL__MODE__SHIFT 0x00000000
+#define MC_ARB_FED_CNTL__WR_ERR_MASK 0x0000000cL
+#define MC_ARB_FED_CNTL__WR_ERR__SHIFT 0x00000002
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT0_MASK 0x0000000fL
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT0__SHIFT 0x00000000
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT1_MASK 0x000000f0L
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT1__SHIFT 0x00000004
+#define MC_ARB_GDEC_RD_CNTL__REM_DEFAULT_GRP_MASK 0x00003c00L
+#define MC_ARB_GDEC_RD_CNTL__REM_DEFAULT_GRP__SHIFT 0x0000000a
+#define MC_ARB_GDEC_RD_CNTL__USE_RANK_MASK 0x00000100L
+#define MC_ARB_GDEC_RD_CNTL__USE_RANK__SHIFT 0x00000008
+#define MC_ARB_GDEC_RD_CNTL__USE_RSNO_MASK 0x00000200L
+#define MC_ARB_GDEC_RD_CNTL__USE_RSNO__SHIFT 0x00000009
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT0_MASK 0x0000000fL
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT0__SHIFT 0x00000000
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT1_MASK 0x000000f0L
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT1__SHIFT 0x00000004
+#define MC_ARB_GDEC_WR_CNTL__REM_DEFAULT_GRP_MASK 0x00003c00L
+#define MC_ARB_GDEC_WR_CNTL__REM_DEFAULT_GRP__SHIFT 0x0000000a
+#define MC_ARB_GDEC_WR_CNTL__USE_RANK_MASK 0x00000100L
+#define MC_ARB_GDEC_WR_CNTL__USE_RANK__SHIFT 0x00000008
+#define MC_ARB_GDEC_WR_CNTL__USE_RSNO_MASK 0x00000200L
+#define MC_ARB_GDEC_WR_CNTL__USE_RSNO__SHIFT 0x00000009
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI0_MASK 0x000000ffL
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI0__SHIFT 0x00000000
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI1_MASK 0x0000ff00L
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI1__SHIFT 0x00000008
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI2_MASK 0x00ff0000L
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI2__SHIFT 0x00000010
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI3_MASK 0xff000000L
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI3__SHIFT 0x00000018
+#define MC_ARB_GECC2__CLOSE_BANK_RMW_MASK 0x00004000L
+#define MC_ARB_GECC2__CLOSE_BANK_RMW__SHIFT 0x0000000e
+#define MC_ARB_GECC2__COLFIFO_WATER_MASK 0x001f8000L
+#define MC_ARB_GECC2__COLFIFO_WATER__SHIFT 0x0000000f
+#define MC_ARB_GECC2_DEBUG2__ERR0_START_MASK 0x0000ff00L
+#define MC_ARB_GECC2_DEBUG2__ERR0_START__SHIFT 0x00000008
+#define MC_ARB_GECC2_DEBUG2__ERR1_START_MASK 0x00ff0000L
+#define MC_ARB_GECC2_DEBUG2__ERR1_START__SHIFT 0x00000010
+#define MC_ARB_GECC2_DEBUG2__ERR2_START_MASK 0xff000000L
+#define MC_ARB_GECC2_DEBUG2__ERR2_START__SHIFT 0x00000018
+#define MC_ARB_GECC2_DEBUG2__PERIOD_MASK 0x000000ffL
+#define MC_ARB_GECC2_DEBUG2__PERIOD__SHIFT 0x00000000
+#define MC_ARB_GECC2_DEBUG__DATA_FIELD_MASK 0x00000018L
+#define MC_ARB_GECC2_DEBUG__DATA_FIELD__SHIFT 0x00000003
+#define MC_ARB_GECC2_DEBUG__DIRECTION_MASK 0x00000004L
+#define MC_ARB_GECC2_DEBUG__DIRECTION__SHIFT 0x00000002
+#define MC_ARB_GECC2_DEBUG__NUM_ERR_BITS_MASK 0x00000003L
+#define MC_ARB_GECC2_DEBUG__NUM_ERR_BITS__SHIFT 0x00000000
+#define MC_ARB_GECC2_DEBUG__SW_INJECTION_MASK 0x00000020L
+#define MC_ARB_GECC2_DEBUG__SW_INJECTION__SHIFT 0x00000005
+#define MC_ARB_GECC2__ECC_MODE_MASK 0x00000006L
+#define MC_ARB_GECC2__ECC_MODE__SHIFT 0x00000001
+#define MC_ARB_GECC2__ENABLE_MASK 0x00000001L
+#define MC_ARB_GECC2__ENABLE__SHIFT 0x00000000
+#define MC_ARB_GECC2__EXOR_BANK_SEL_MASK 0x00000060L
+#define MC_ARB_GECC2__EXOR_BANK_SEL__SHIFT 0x00000005
+#define MC_ARB_GECC2_MISC__STREAK_BREAK_MASK 0x0000000fL
+#define MC_ARB_GECC2_MISC__STREAK_BREAK__SHIFT 0x00000000
+#define MC_ARB_GECC2__NO_GECC_CLI_MASK 0x00000780L
+#define MC_ARB_GECC2__NO_GECC_CLI__SHIFT 0x00000007
+#define MC_ARB_GECC2__PAGE_BIT0_MASK 0x00000018L
+#define MC_ARB_GECC2__PAGE_BIT0__SHIFT 0x00000003
+#define MC_ARB_GECC2__READ_ERR_MASK 0x00003800L
+#define MC_ARB_GECC2__READ_ERR__SHIFT 0x0000000b
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR0_MASK 0x00000100L
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR0__SHIFT 0x00000008
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR1_MASK 0x00001000L
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR1__SHIFT 0x0000000c
+#define MC_ARB_GECC2_STATUS__CORR_STS0_MASK 0x00000001L
+#define MC_ARB_GECC2_STATUS__CORR_STS0__SHIFT 0x00000000
+#define MC_ARB_GECC2_STATUS__CORR_STS1_MASK 0x00000010L
+#define MC_ARB_GECC2_STATUS__CORR_STS1__SHIFT 0x00000004
+#define MC_ARB_GECC2_STATUS__FED_CLEAR0_MASK 0x00000400L
+#define MC_ARB_GECC2_STATUS__FED_CLEAR0__SHIFT 0x0000000a
+#define MC_ARB_GECC2_STATUS__FED_CLEAR1_MASK 0x00004000L
+#define MC_ARB_GECC2_STATUS__FED_CLEAR1__SHIFT 0x0000000e
+#define MC_ARB_GECC2_STATUS__FED_STS0_MASK 0x00000004L
+#define MC_ARB_GECC2_STATUS__FED_STS0__SHIFT 0x00000002
+#define MC_ARB_GECC2_STATUS__FED_STS1_MASK 0x00000040L
+#define MC_ARB_GECC2_STATUS__FED_STS1__SHIFT 0x00000006
+#define MC_ARB_GECC2_STATUS__RSVD0_MASK 0x00000008L
+#define MC_ARB_GECC2_STATUS__RSVD0__SHIFT 0x00000003
+#define MC_ARB_GECC2_STATUS__RSVD1_MASK 0x00000080L
+#define MC_ARB_GECC2_STATUS__RSVD1__SHIFT 0x00000007
+#define MC_ARB_GECC2_STATUS__RSVD2_MASK 0x00000800L
+#define MC_ARB_GECC2_STATUS__RSVD2__SHIFT 0x0000000b
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR0_MASK 0x00000200L
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR0__SHIFT 0x00000009
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR1_MASK 0x00002000L
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR1__SHIFT 0x0000000d
+#define MC_ARB_GECC2_STATUS__UNCORR_STS0_MASK 0x00000002L
+#define MC_ARB_GECC2_STATUS__UNCORR_STS0__SHIFT 0x00000001
+#define MC_ARB_GECC2_STATUS__UNCORR_STS1_MASK 0x00000020L
+#define MC_ARB_GECC2_STATUS__UNCORR_STS1__SHIFT 0x00000005
+#define MC_ARB_LAZY0_RD__GROUP0_MASK 0x000000ffL
+#define MC_ARB_LAZY0_RD__GROUP0__SHIFT 0x00000000
+#define MC_ARB_LAZY0_RD__GROUP1_MASK 0x0000ff00L
+#define MC_ARB_LAZY0_RD__GROUP1__SHIFT 0x00000008
+#define MC_ARB_LAZY0_RD__GROUP2_MASK 0x00ff0000L
+#define MC_ARB_LAZY0_RD__GROUP2__SHIFT 0x00000010
+#define MC_ARB_LAZY0_RD__GROUP3_MASK 0xff000000L
+#define MC_ARB_LAZY0_RD__GROUP3__SHIFT 0x00000018
+#define MC_ARB_LAZY0_WR__GROUP0_MASK 0x000000ffL
+#define MC_ARB_LAZY0_WR__GROUP0__SHIFT 0x00000000
+#define MC_ARB_LAZY0_WR__GROUP1_MASK 0x0000ff00L
+#define MC_ARB_LAZY0_WR__GROUP1__SHIFT 0x00000008
+#define MC_ARB_LAZY0_WR__GROUP2_MASK 0x00ff0000L
+#define MC_ARB_LAZY0_WR__GROUP2__SHIFT 0x00000010
+#define MC_ARB_LAZY0_WR__GROUP3_MASK 0xff000000L
+#define MC_ARB_LAZY0_WR__GROUP3__SHIFT 0x00000018
+#define MC_ARB_LAZY1_RD__GROUP4_MASK 0x000000ffL
+#define MC_ARB_LAZY1_RD__GROUP4__SHIFT 0x00000000
+#define MC_ARB_LAZY1_RD__GROUP5_MASK 0x0000ff00L
+#define MC_ARB_LAZY1_RD__GROUP5__SHIFT 0x00000008
+#define MC_ARB_LAZY1_RD__GROUP6_MASK 0x00ff0000L
+#define MC_ARB_LAZY1_RD__GROUP6__SHIFT 0x00000010
+#define MC_ARB_LAZY1_RD__GROUP7_MASK 0xff000000L
+#define MC_ARB_LAZY1_RD__GROUP7__SHIFT 0x00000018
+#define MC_ARB_LAZY1_WR__GROUP4_MASK 0x000000ffL
+#define MC_ARB_LAZY1_WR__GROUP4__SHIFT 0x00000000
+#define MC_ARB_LAZY1_WR__GROUP5_MASK 0x0000ff00L
+#define MC_ARB_LAZY1_WR__GROUP5__SHIFT 0x00000008
+#define MC_ARB_LAZY1_WR__GROUP6_MASK 0x00ff0000L
+#define MC_ARB_LAZY1_WR__GROUP6__SHIFT 0x00000010
+#define MC_ARB_LAZY1_WR__GROUP7_MASK 0xff000000L
+#define MC_ARB_LAZY1_WR__GROUP7__SHIFT 0x00000018
+#define MC_ARB_LM_RD__BANKGROUP_CONFIG_MASK 0x00e00000L
+#define MC_ARB_LM_RD__BANKGROUP_CONFIG__SHIFT 0x00000015
+#define MC_ARB_LM_RD__ENABLE_TWO_LIST_MASK 0x00040000L
+#define MC_ARB_LM_RD__ENABLE_TWO_LIST__SHIFT 0x00000012
+#define MC_ARB_LM_RD__POPIDLE_RST_TWOLIST_MASK 0x00080000L
+#define MC_ARB_LM_RD__POPIDLE_RST_TWOLIST__SHIFT 0x00000013
+#define MC_ARB_LM_RD__SKID1_RST_TWOLIST_MASK 0x00100000L
+#define MC_ARB_LM_RD__SKID1_RST_TWOLIST__SHIFT 0x00000014
+#define MC_ARB_LM_RD__STREAK_BREAK_MASK 0x00010000L
+#define MC_ARB_LM_RD__STREAK_BREAK__SHIFT 0x00000010
+#define MC_ARB_LM_RD__STREAK_LIMIT_MASK 0x000000ffL
+#define MC_ARB_LM_RD__STREAK_LIMIT__SHIFT 0x00000000
+#define MC_ARB_LM_RD__STREAK_LIMIT_UBER_MASK 0x0000ff00L
+#define MC_ARB_LM_RD__STREAK_LIMIT_UBER__SHIFT 0x00000008
+#define MC_ARB_LM_RD__STREAK_UBER_MASK 0x00020000L
+#define MC_ARB_LM_RD__STREAK_UBER__SHIFT 0x00000011
+#define MC_ARB_LM_WR__BANKGROUP_CONFIG_MASK 0x00e00000L
+#define MC_ARB_LM_WR__BANKGROUP_CONFIG__SHIFT 0x00000015
+#define MC_ARB_LM_WR__ENABLE_TWO_LIST_MASK 0x00040000L
+#define MC_ARB_LM_WR__ENABLE_TWO_LIST__SHIFT 0x00000012
+#define MC_ARB_LM_WR__POPIDLE_RST_TWOLIST_MASK 0x00080000L
+#define MC_ARB_LM_WR__POPIDLE_RST_TWOLIST__SHIFT 0x00000013
+#define MC_ARB_LM_WR__SKID1_RST_TWOLIST_MASK 0x00100000L
+#define MC_ARB_LM_WR__SKID1_RST_TWOLIST__SHIFT 0x00000014
+#define MC_ARB_LM_WR__STREAK_BREAK_MASK 0x00010000L
+#define MC_ARB_LM_WR__STREAK_BREAK__SHIFT 0x00000010
+#define MC_ARB_LM_WR__STREAK_LIMIT_MASK 0x000000ffL
+#define MC_ARB_LM_WR__STREAK_LIMIT__SHIFT 0x00000000
+#define MC_ARB_LM_WR__STREAK_LIMIT_UBER_MASK 0x0000ff00L
+#define MC_ARB_LM_WR__STREAK_LIMIT_UBER__SHIFT 0x00000008
+#define MC_ARB_LM_WR__STREAK_UBER_MASK 0x00020000L
+#define MC_ARB_LM_WR__STREAK_UBER__SHIFT 0x00000011
+#define MC_ARB_MINCLKS__ARB_RW_SWITCH_MASK 0x00010000L
+#define MC_ARB_MINCLKS__ARB_RW_SWITCH__SHIFT 0x00000010
+#define MC_ARB_MINCLKS__READ_CLKS_MASK 0x000000ffL
+#define MC_ARB_MINCLKS__READ_CLKS__SHIFT 0x00000000
+#define MC_ARB_MINCLKS__WRITE_CLKS_MASK 0x0000ff00L
+#define MC_ARB_MINCLKS__WRITE_CLKS__SHIFT 0x00000008
+#define MC_ARB_MISC2__ARB_DEBUG29_MASK 0x20000000L
+#define MC_ARB_MISC2__ARB_DEBUG29__SHIFT 0x0000001d
+#define MC_ARB_MISC2__GECC_MASK 0x00040000L
+#define MC_ARB_MISC2__GECC_RST_MASK 0x00080000L
+#define MC_ARB_MISC2__GECC_RST__SHIFT 0x00000013
+#define MC_ARB_MISC2__GECC__SHIFT 0x00000012
+#define MC_ARB_MISC2__GECC_STATUS_MASK 0x00100000L
+#define MC_ARB_MISC2__GECC_STATUS__SHIFT 0x00000014
+#define MC_ARB_MISC2__POP_IDLE_REPLAY_MASK 0x00000800L
+#define MC_ARB_MISC2__POP_IDLE_REPLAY__SHIFT 0x0000000b
+#define MC_ARB_MISC2__RDRET_NO_BP_MASK 0x00002000L
+#define MC_ARB_MISC2__RDRET_NO_BP__SHIFT 0x0000000d
+#define MC_ARB_MISC2__RDRET_NO_REORDERING_MASK 0x00001000L
+#define MC_ARB_MISC2__RDRET_NO_REORDERING__SHIFT 0x0000000c
+#define MC_ARB_MISC2__RDRET_SEQ_SKID_MASK 0x0003c000L
+#define MC_ARB_MISC2__RDRET_SEQ_SKID__SHIFT 0x0000000e
+#define MC_ARB_MISC2__REPLAY_DEBUG_MASK 0x10000000L
+#define MC_ARB_MISC2__REPLAY_DEBUG__SHIFT 0x0000001c
+#define MC_ARB_MISC2__SEQ_RDY_POP_IDLE_MASK 0x40000000L
+#define MC_ARB_MISC2__SEQ_RDY_POP_IDLE__SHIFT 0x0000001e
+#define MC_ARB_MISC2__TAGFIFO_THRESHOLD_MASK 0x01e00000L
+#define MC_ARB_MISC2__TAGFIFO_THRESHOLD__SHIFT 0x00000015
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT4_MASK 0x00000040L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT4__SHIFT 0x00000006
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT5_MASK 0x00000080L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT5__SHIFT 0x00000007
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT6_MASK 0x00000100L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT6__SHIFT 0x00000008
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT7_MASK 0x00000200L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT7__SHIFT 0x00000009
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT8_MASK 0x00000400L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT8__SHIFT 0x0000000a
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_ENABLE_MASK 0x00000020L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_ENABLE__SHIFT 0x00000005
+#define MC_ARB_MISC2__TCCDL4_REPLAY_EOB_MASK 0x80000000L
+#define MC_ARB_MISC2__TCCDL4_REPLAY_EOB__SHIFT 0x0000001f
+#define MC_ARB_MISC2__WCDR_REPLAY_MASKCNT_MASK 0x0e000000L
+#define MC_ARB_MISC2__WCDR_REPLAY_MASKCNT__SHIFT 0x00000019
+#define MC_ARB_MISC__CALI_ENABLE_MASK 0x00100000L
+#define MC_ARB_MISC__CALI_ENABLE__SHIFT 0x00000014
+#define MC_ARB_MISC__CALI_RATES_MASK 0x00600000L
+#define MC_ARB_MISC__CALI_RATES__SHIFT 0x00000015
+#define MC_ARB_MISC__CHAN_COUPLE_MASK 0x000007f8L
+#define MC_ARB_MISC__CHAN_COUPLE__SHIFT 0x00000003
+#define MC_ARB_MISC__DISPURG_NOSW2WR_MASK 0x01000000L
+#define MC_ARB_MISC__DISPURG_NOSW2WR__SHIFT 0x00000018
+#define MC_ARB_MISC__DISPURG_STALL_MASK 0x02000000L
+#define MC_ARB_MISC__DISPURG_STALL__SHIFT 0x00000019
+#define MC_ARB_MISC__DISPURG_THROTTLE_MASK 0x3c000000L
+#define MC_ARB_MISC__DISPURG_THROTTLE__SHIFT 0x0000001a
+#define MC_ARB_MISC__DISPURGVLD_NOWRT_MASK 0x00800000L
+#define MC_ARB_MISC__DISPURGVLD_NOWRT__SHIFT 0x00000017
+#define MC_ARB_MISC__HARSHNESS_MASK 0x0007f800L
+#define MC_ARB_MISC__HARSHNESS__SHIFT 0x0000000b
+#define MC_ARB_MISC__IDLE_RFSH_MASK 0x00000002L
+#define MC_ARB_MISC__IDLE_RFSH__SHIFT 0x00000001
+#define MC_ARB_MISC__SMART_RDWR_SW_MASK 0x00080000L
+#define MC_ARB_MISC__SMART_RDWR_SW__SHIFT 0x00000013
+#define MC_ARB_MISC__STICKY_RFSH_MASK 0x00000001L
+#define MC_ARB_MISC__STICKY_RFSH__SHIFT 0x00000000
+#define MC_ARB_MISC__STUTTER_RFSH_MASK 0x00000004L
+#define MC_ARB_MISC__STUTTER_RFSH__SHIFT 0x00000002
+#define MC_ARB_PM_CNTL__BLKOUT_ON_D1_MASK 0x00000020L
+#define MC_ARB_PM_CNTL__BLKOUT_ON_D1__SHIFT 0x00000005
+#define MC_ARB_PM_CNTL__IDLE_CNT_MASK 0x00f00000L
+#define MC_ARB_PM_CNTL__IDLE_CNT__SHIFT 0x00000014
+#define MC_ARB_PM_CNTL__IDLE_ON_D1_MASK 0x00000040L
+#define MC_ARB_PM_CNTL__IDLE_ON_D1__SHIFT 0x00000006
+#define MC_ARB_PM_CNTL__IDLE_ON_D2_MASK 0x00040000L
+#define MC_ARB_PM_CNTL__IDLE_ON_D2__SHIFT 0x00000012
+#define MC_ARB_PM_CNTL__IDLE_ON_D3_MASK 0x00080000L
+#define MC_ARB_PM_CNTL__IDLE_ON_D3__SHIFT 0x00000013
+#define MC_ARB_PM_CNTL__OVERRIDE_CGSTATE_MASK 0x00000003L
+#define MC_ARB_PM_CNTL__OVERRIDE_CGSTATE__SHIFT 0x00000000
+#define MC_ARB_PM_CNTL__OVRR_CGRFSH_MASK 0x00000004L
+#define MC_ARB_PM_CNTL__OVRR_CGRFSH__SHIFT 0x00000002
+#define MC_ARB_PM_CNTL__OVRR_CGSQM_MASK 0x00000008L
+#define MC_ARB_PM_CNTL__OVRR_CGSQM__SHIFT 0x00000003
+#define MC_ARB_PM_CNTL__OVRR_PM_MASK 0x00000080L
+#define MC_ARB_PM_CNTL__OVRR_PM__SHIFT 0x00000007
+#define MC_ARB_PM_CNTL__OVRR_PM_STATE_MASK 0x00000300L
+#define MC_ARB_PM_CNTL__OVRR_PM_STATE__SHIFT 0x00000008
+#define MC_ARB_PM_CNTL__OVRR_RD_MASK 0x00000400L
+#define MC_ARB_PM_CNTL__OVRR_RD__SHIFT 0x0000000a
+#define MC_ARB_PM_CNTL__OVRR_RD_STATE_MASK 0x00000800L
+#define MC_ARB_PM_CNTL__OVRR_RD_STATE__SHIFT 0x0000000b
+#define MC_ARB_PM_CNTL__OVRR_RFSH_MASK 0x00004000L
+#define MC_ARB_PM_CNTL__OVRR_RFSH__SHIFT 0x0000000e
+#define MC_ARB_PM_CNTL__OVRR_RFSH_STATE_MASK 0x00008000L
+#define MC_ARB_PM_CNTL__OVRR_RFSH_STATE__SHIFT 0x0000000f
+#define MC_ARB_PM_CNTL__OVRR_WR_MASK 0x00001000L
+#define MC_ARB_PM_CNTL__OVRR_WR__SHIFT 0x0000000c
+#define MC_ARB_PM_CNTL__OVRR_WR_STATE_MASK 0x00002000L
+#define MC_ARB_PM_CNTL__OVRR_WR_STATE__SHIFT 0x0000000d
+#define MC_ARB_PM_CNTL__SRFSH_ON_D1_MASK 0x00000010L
+#define MC_ARB_PM_CNTL__SRFSH_ON_D1__SHIFT 0x00000004
+#define MC_ARB_POP__ALLOW_EOB_BY_WRRET_STALL_MASK 0x00080000L
+#define MC_ARB_POP__ALLOW_EOB_BY_WRRET_STALL__SHIFT 0x00000013
+#define MC_ARB_POP__ENABLE_ARB_MASK 0x00000001L
+#define MC_ARB_POP__ENABLE_ARB__SHIFT 0x00000000
+#define MC_ARB_POP__ENABLE_TWO_PAGE_MASK 0x00040000L
+#define MC_ARB_POP__ENABLE_TWO_PAGE__SHIFT 0x00000012
+#define MC_ARB_POP__POP_DEPTH_MASK 0x0000003cL
+#define MC_ARB_POP__POP_DEPTH__SHIFT 0x00000002
+#define MC_ARB_POP__QUICK_STOP_MASK 0x00020000L
+#define MC_ARB_POP__QUICK_STOP__SHIFT 0x00000011
+#define MC_ARB_POP__SKID_DEPTH_MASK 0x00007000L
+#define MC_ARB_POP__SKID_DEPTH__SHIFT 0x0000000c
+#define MC_ARB_POP__SPEC_OPEN_MASK 0x00000002L
+#define MC_ARB_POP__SPEC_OPEN__SHIFT 0x00000001
+#define MC_ARB_POP__WAIT_AFTER_RFSH_MASK 0x00018000L
+#define MC_ARB_POP__WAIT_AFTER_RFSH__SHIFT 0x0000000f
+#define MC_ARB_POP__WRDATAINDEX_DEPTH_MASK 0x00000fc0L
+#define MC_ARB_POP__WRDATAINDEX_DEPTH__SHIFT 0x00000006
+#define MC_ARB_RAMCFG__CHANSIZE_MASK 0x00000100L
+#define MC_ARB_RAMCFG__CHANSIZE__SHIFT 0x00000008
+#define MC_ARB_RAMCFG__NOOFBANK_MASK 0x00000003L
+#define MC_ARB_RAMCFG__NOOFBANK__SHIFT 0x00000000
+#define MC_ARB_RAMCFG__NOOFCOLS_MASK 0x000000c0L
+#define MC_ARB_RAMCFG__NOOFCOLS__SHIFT 0x00000006
+#define MC_ARB_RAMCFG__NOOFGROUPS_MASK 0x00001000L
+#define MC_ARB_RAMCFG__NOOFGROUPS__SHIFT 0x0000000c
+#define MC_ARB_RAMCFG__NOOFRANKS_MASK 0x00000004L
+#define MC_ARB_RAMCFG__NOOFRANKS__SHIFT 0x00000002
+#define MC_ARB_RAMCFG__NOOFROWS_MASK 0x00000038L
+#define MC_ARB_RAMCFG__NOOFROWS__SHIFT 0x00000003
+#define MC_ARB_REMREQ__RD_WATER_MASK 0x000000ffL
+#define MC_ARB_REMREQ__RD_WATER__SHIFT 0x00000000
+#define MC_ARB_REMREQ__WR_LAZY_TIMER_MASK 0x00f00000L
+#define MC_ARB_REMREQ__WR_LAZY_TIMER__SHIFT 0x00000014
+#define MC_ARB_REMREQ__WR_MAXBURST_SIZE_MASK 0x000f0000L
+#define MC_ARB_REMREQ__WR_MAXBURST_SIZE__SHIFT 0x00000010
+#define MC_ARB_REMREQ__WR_WATER_MASK 0x0000ff00L
+#define MC_ARB_REMREQ__WR_WATER__SHIFT 0x00000008
+#define MC_ARB_REPLAY__BOS_ENABLE_WAIT_CYC_MASK 0x00000080L
+#define MC_ARB_REPLAY__BOS_ENABLE_WAIT_CYC__SHIFT 0x00000007
+#define MC_ARB_REPLAY__BOS_WAIT_CYC_MASK 0x00007f00L
+#define MC_ARB_REPLAY__BOS_WAIT_CYC__SHIFT 0x00000008
+#define MC_ARB_REPLAY__BREAK_ON_STALL_MASK 0x00000040L
+#define MC_ARB_REPLAY__BREAK_ON_STALL__SHIFT 0x00000006
+#define MC_ARB_REPLAY__ENABLE_RD_MASK 0x00000001L
+#define MC_ARB_REPLAY__ENABLE_RD__SHIFT 0x00000000
+#define MC_ARB_REPLAY__ENABLE_WR_MASK 0x00000002L
+#define MC_ARB_REPLAY__ENABLE_WR__SHIFT 0x00000001
+#define MC_ARB_REPLAY__IGNORE_WR_CDC_MASK 0x00000020L
+#define MC_ARB_REPLAY__IGNORE_WR_CDC__SHIFT 0x00000005
+#define MC_ARB_REPLAY__RAW_ENABLE_MASK 0x00000010L
+#define MC_ARB_REPLAY__RAW_ENABLE__SHIFT 0x00000004
+#define MC_ARB_REPLAY__WAW_ENABLE_MASK 0x00000008L
+#define MC_ARB_REPLAY__WAW_ENABLE__SHIFT 0x00000003
+#define MC_ARB_REPLAY__WRACK_MODE_MASK 0x00000004L
+#define MC_ARB_REPLAY__WRACK_MODE__SHIFT 0x00000002
+#define MC_ARB_RET_CREDITS_RD__DISP_MASK 0x00ff0000L
+#define MC_ARB_RET_CREDITS_RD__DISP__SHIFT 0x00000010
+#define MC_ARB_RET_CREDITS_RD__HUB_MASK 0x0000ff00L
+#define MC_ARB_RET_CREDITS_RD__HUB__SHIFT 0x00000008
+#define MC_ARB_RET_CREDITS_RD__LCL_MASK 0x000000ffL
+#define MC_ARB_RET_CREDITS_RD__LCL__SHIFT 0x00000000
+#define MC_ARB_RET_CREDITS_RD__RETURN_CREDIT_MASK 0xff000000L
+#define MC_ARB_RET_CREDITS_RD__RETURN_CREDIT__SHIFT 0x00000018
+#define MC_ARB_RET_CREDITS_WR__HUB_MASK 0x0000ff00L
+#define MC_ARB_RET_CREDITS_WR__HUB__SHIFT 0x00000008
+#define MC_ARB_RET_CREDITS_WR__LCL_MASK 0x000000ffL
+#define MC_ARB_RET_CREDITS_WR__LCL__SHIFT 0x00000000
+#define MC_ARB_RET_CREDITS_WR__RETURN_CREDIT_MASK 0x00ff0000L
+#define MC_ARB_RET_CREDITS_WR__RETURN_CREDIT__SHIFT 0x00000010
+#define MC_ARB_RET_CREDITS_WR__WRRET_SEQ_SKID_MASK 0x0f000000L
+#define MC_ARB_RET_CREDITS_WR__WRRET_SEQ_SKID__SHIFT 0x00000018
+#define MC_ARB_RFSH_CNTL__ACCUM_MASK 0x00000800L
+#define MC_ARB_RFSH_CNTL__ACCUM__SHIFT 0x0000000b
+#define MC_ARB_RFSH_CNTL__ENABLE_MASK 0x00000001L
+#define MC_ARB_RFSH_CNTL__ENABLE__SHIFT 0x00000000
+#define MC_ARB_RFSH_CNTL__URG0_MASK 0x0000003eL
+#define MC_ARB_RFSH_CNTL__URG0__SHIFT 0x00000001
+#define MC_ARB_RFSH_CNTL__URG1_MASK 0x000007c0L
+#define MC_ARB_RFSH_CNTL__URG1__SHIFT 0x00000006
+#define MC_ARB_RFSH_RATE__POWERMODE0_MASK 0x000000ffL
+#define MC_ARB_RFSH_RATE__POWERMODE0__SHIFT 0x00000000
+#define MC_ARB_RTT_CNTL0__BREAK_ON_HARSH_MASK 0x00000100L
+#define MC_ARB_RTT_CNTL0__BREAK_ON_HARSH__SHIFT 0x00000008
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTRD_MASK 0x00000200L
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTRD__SHIFT 0x00000009
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTWR_MASK 0x00000400L
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTWR__SHIFT 0x0000000a
+#define MC_ARB_RTT_CNTL0__DATA_CNTL_MASK 0x01000000L
+#define MC_ARB_RTT_CNTL0__DATA_CNTL__SHIFT 0x00000018
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_0_MASK 0x00008000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_0__SHIFT 0x0000000f
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_1_MASK 0x00010000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_1__SHIFT 0x00000010
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_2_MASK 0x00020000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_2__SHIFT 0x00000011
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_3_MASK 0x00040000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_3__SHIFT 0x00000012
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_4_MASK 0x00080000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_4__SHIFT 0x00000013
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_5_MASK 0x00100000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_5__SHIFT 0x00000014
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_6_MASK 0x00200000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_6__SHIFT 0x00000015
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_7_MASK 0x00400000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_7__SHIFT 0x00000016
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_8_MASK 0x00800000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_8__SHIFT 0x00000017
+#define MC_ARB_RTT_CNTL0__ENABLE_MASK 0x00000001L
+#define MC_ARB_RTT_CNTL0__ENABLE__SHIFT 0x00000000
+#define MC_ARB_RTT_CNTL0__FLUSH_ON_ENTER_MASK 0x00000010L
+#define MC_ARB_RTT_CNTL0__FLUSH_ON_ENTER__SHIFT 0x00000004
+#define MC_ARB_RTT_CNTL0__HARSH_START_MASK 0x00000020L
+#define MC_ARB_RTT_CNTL0__HARSH_START__SHIFT 0x00000005
+#define MC_ARB_RTT_CNTL0__NEIGHBOR_BIT_MASK 0x02000000L
+#define MC_ARB_RTT_CNTL0__NEIGHBOR_BIT__SHIFT 0x00000019
+#define MC_ARB_RTT_CNTL0__START_IDLE_MASK 0x00000002L
+#define MC_ARB_RTT_CNTL0__START_IDLE__SHIFT 0x00000001
+#define MC_ARB_RTT_CNTL0__START_R2W_MASK 0x0000000cL
+#define MC_ARB_RTT_CNTL0__START_R2W_RFSH_MASK 0x00004000L
+#define MC_ARB_RTT_CNTL0__START_R2W_RFSH__SHIFT 0x0000000e
+#define MC_ARB_RTT_CNTL0__START_R2W__SHIFT 0x00000002
+#define MC_ARB_RTT_CNTL0__TPS_HARSH_PRIORITY_MASK 0x00000040L
+#define MC_ARB_RTT_CNTL0__TPS_HARSH_PRIORITY__SHIFT 0x00000006
+#define MC_ARB_RTT_CNTL0__TRAIN_PERIOD_MASK 0x00003800L
+#define MC_ARB_RTT_CNTL0__TRAIN_PERIOD__SHIFT 0x0000000b
+#define MC_ARB_RTT_CNTL0__TWRT_HARSH_PRIORITY_MASK 0x00000080L
+#define MC_ARB_RTT_CNTL0__TWRT_HARSH_PRIORITY__SHIFT 0x00000007
+#define MC_ARB_RTT_CNTL1__WINDOW_DEC_THRESHOLD_MASK 0x000fe000L
+#define MC_ARB_RTT_CNTL1__WINDOW_DEC_THRESHOLD__SHIFT 0x0000000d
+#define MC_ARB_RTT_CNTL1__WINDOW_INC_THRESHOLD_MASK 0x00001fc0L
+#define MC_ARB_RTT_CNTL1__WINDOW_INC_THRESHOLD__SHIFT 0x00000006
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MASK 0x0000001fL
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MAX_MASK 0x01f00000L
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MAX__SHIFT 0x00000014
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MIN_MASK 0x3e000000L
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MIN__SHIFT 0x00000019
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE__SHIFT 0x00000000
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE_COUNT_MASK 0xc0000000L
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE_COUNT__SHIFT 0x0000001e
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE_MASK 0x00000020L
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE__SHIFT 0x00000005
+#define MC_ARB_RTT_CNTL2__FILTER_CNTL_MASK 0x00002000L
+#define MC_ARB_RTT_CNTL2__FILTER_CNTL__SHIFT 0x0000000d
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_SIZE_MASK 0x00001000L
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_SIZE__SHIFT 0x0000000c
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_THRESHOLD_MASK 0x00000fc0L
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_THRESHOLD__SHIFT 0x00000006
+#define MC_ARB_RTT_CNTL2__SAMPLE_CNT_MASK 0x0000003fL
+#define MC_ARB_RTT_CNTL2__SAMPLE_CNT__SHIFT 0x00000000
+#define MC_ARB_RTT_DATA__PATTERN_MASK 0x000000ffL
+#define MC_ARB_RTT_DATA__PATTERN__SHIFT 0x00000000
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH0_MASK 0x00000003L
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH0__SHIFT 0x00000000
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH1_MASK 0x0000000cL
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH1__SHIFT 0x00000002
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH0_MASK 0x00000ff0L
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH0__SHIFT 0x00000004
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH1_MASK 0x01fe0000L
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH1__SHIFT 0x00000011
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH0_MASK 0x0001f000L
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH0__SHIFT 0x0000000c
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH1_MASK 0x3e000000L
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH1__SHIFT 0x00000019
+#define MC_ARB_SQM_CNTL__DYN_SQM_ENABLE_MASK 0x00000100L
+#define MC_ARB_SQM_CNTL__DYN_SQM_ENABLE__SHIFT 0x00000008
+#define MC_ARB_SQM_CNTL__MIN_PENAL_MASK 0x000000ffL
+#define MC_ARB_SQM_CNTL__MIN_PENAL__SHIFT 0x00000000
+#define MC_ARB_SQM_CNTL__RATIO_DEBUG_MASK 0xff000000L
+#define MC_ARB_SQM_CNTL__RATIO_DEBUG__SHIFT 0x00000018
+#define MC_ARB_SQM_CNTL__RATIO_MASK 0x00ff0000L
+#define MC_ARB_SQM_CNTL__RATIO__SHIFT 0x00000010
+#define MC_ARB_SQM_CNTL__SQM_RESERVE_MASK 0x0000fe00L
+#define MC_ARB_SQM_CNTL__SQM_RESERVE__SHIFT 0x00000009
+#define MC_ARB_TM_CNTL_RD__BANK_SELECT_MASK 0x00000006L
+#define MC_ARB_TM_CNTL_RD__BANK_SELECT__SHIFT 0x00000001
+#define MC_ARB_TM_CNTL_RD__GROUPBY_RANK_MASK 0x00000001L
+#define MC_ARB_TM_CNTL_RD__GROUPBY_RANK__SHIFT 0x00000000
+#define MC_ARB_TM_CNTL_RD__MATCH_BANK_MASK 0x00000010L
+#define MC_ARB_TM_CNTL_RD__MATCH_BANK__SHIFT 0x00000004
+#define MC_ARB_TM_CNTL_RD__MATCH_RANK_MASK 0x00000008L
+#define MC_ARB_TM_CNTL_RD__MATCH_RANK__SHIFT 0x00000003
+#define MC_ARB_TM_CNTL_WR__BANK_SELECT_MASK 0x00000006L
+#define MC_ARB_TM_CNTL_WR__BANK_SELECT__SHIFT 0x00000001
+#define MC_ARB_TM_CNTL_WR__GROUPBY_RANK_MASK 0x00000001L
+#define MC_ARB_TM_CNTL_WR__GROUPBY_RANK__SHIFT 0x00000000
+#define MC_ARB_TM_CNTL_WR__MATCH_BANK_MASK 0x00000010L
+#define MC_ARB_TM_CNTL_WR__MATCH_BANK__SHIFT 0x00000004
+#define MC_ARB_TM_CNTL_WR__MATCH_RANK_MASK 0x00000008L
+#define MC_ARB_TM_CNTL_WR__MATCH_RANK__SHIFT 0x00000003
+#define MC_ARB_WCDR_2__DEBUG_0_MASK 0x00000200L
+#define MC_ARB_WCDR_2__DEBUG_0__SHIFT 0x00000009
+#define MC_ARB_WCDR_2__DEBUG_1_MASK 0x00000400L
+#define MC_ARB_WCDR_2__DEBUG_1__SHIFT 0x0000000a
+#define MC_ARB_WCDR_2__DEBUG_2_MASK 0x00000800L
+#define MC_ARB_WCDR_2__DEBUG_2__SHIFT 0x0000000b
+#define MC_ARB_WCDR_2__DEBUG_3_MASK 0x00001000L
+#define MC_ARB_WCDR_2__DEBUG_3__SHIFT 0x0000000c
+#define MC_ARB_WCDR_2__DEBUG_4_MASK 0x00002000L
+#define MC_ARB_WCDR_2__DEBUG_4__SHIFT 0x0000000d
+#define MC_ARB_WCDR_2__DEBUG_5_MASK 0x00004000L
+#define MC_ARB_WCDR_2__DEBUG_5__SHIFT 0x0000000e
+#define MC_ARB_WCDR_2__WPRE_INC_STEP_MASK 0x0000000fL
+#define MC_ARB_WCDR_2__WPRE_INC_STEP__SHIFT 0x00000000
+#define MC_ARB_WCDR_2__WPRE_MIN_THRESHOLD_MASK 0x000001f0L
+#define MC_ARB_WCDR_2__WPRE_MIN_THRESHOLD__SHIFT 0x00000004
+#define MC_ARB_WCDR__IDLE_BURST_MASK 0x00001f80L
+#define MC_ARB_WCDR__IDLE_BURST_MODE_MASK 0x00002000L
+#define MC_ARB_WCDR__IDLE_BURST_MODE__SHIFT 0x0000000d
+#define MC_ARB_WCDR__IDLE_BURST__SHIFT 0x00000007
+#define MC_ARB_WCDR__IDLE_DEGLITCH_ENABLE_MASK 0x00010000L
+#define MC_ARB_WCDR__IDLE_DEGLITCH_ENABLE__SHIFT 0x00000010
+#define MC_ARB_WCDR__IDLE_ENABLE_MASK 0x00000001L
+#define MC_ARB_WCDR__IDLE_ENABLE__SHIFT 0x00000000
+#define MC_ARB_WCDR__IDLE_PERIOD_MASK 0x0000007cL
+#define MC_ARB_WCDR__IDLE_PERIOD__SHIFT 0x00000002
+#define MC_ARB_WCDR__IDLE_WAKEUP_MASK 0x0000c000L
+#define MC_ARB_WCDR__IDLE_WAKEUP__SHIFT 0x0000000e
+#define MC_ARB_WCDR__SEQ_IDLE_MASK 0x00000002L
+#define MC_ARB_WCDR__SEQ_IDLE__SHIFT 0x00000001
+#define MC_ARB_WCDR__WPRE_ENABLE_MASK 0x00020000L
+#define MC_ARB_WCDR__WPRE_ENABLE__SHIFT 0x00000011
+#define MC_ARB_WCDR__WPRE_INC_READ_MASK 0x02000000L
+#define MC_ARB_WCDR__WPRE_INC_READ__SHIFT 0x00000019
+#define MC_ARB_WCDR__WPRE_INC_SEQIDLE_MASK 0x08000000L
+#define MC_ARB_WCDR__WPRE_INC_SEQIDLE__SHIFT 0x0000001b
+#define MC_ARB_WCDR__WPRE_INC_SKIDIDLE_MASK 0x04000000L
+#define MC_ARB_WCDR__WPRE_INC_SKIDIDLE__SHIFT 0x0000001a
+#define MC_ARB_WCDR__WPRE_MAX_BURST_MASK 0x01c00000L
+#define MC_ARB_WCDR__WPRE_MAX_BURST__SHIFT 0x00000016
+#define MC_ARB_WCDR__WPRE_THRESHOLD_MASK 0x003c0000L
+#define MC_ARB_WCDR__WPRE_THRESHOLD__SHIFT 0x00000012
+#define MC_ARB_WCDR__WPRE_TWOPAGE_MASK 0x10000000L
+#define MC_ARB_WCDR__WPRE_TWOPAGE__SHIFT 0x0000001c
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP0_MASK 0x00000008L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP0__SHIFT 0x00000003
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP1_MASK 0x00000010L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP1__SHIFT 0x00000004
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP2_MASK 0x00000020L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP2__SHIFT 0x00000005
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP3_MASK 0x00000040L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP4_MASK 0x00000080L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP4__SHIFT 0x00000007
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP5_MASK 0x00000100L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP5__SHIFT 0x00000008
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP6_MASK 0x00000200L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP6__SHIFT 0x00000009
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP7_MASK 0x00000400L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP7__SHIFT 0x0000000a
+#define MC_ARB_WTM_CNTL_RD__HARSH_PRI_MASK 0x00000004L
+#define MC_ARB_WTM_CNTL_RD__HARSH_PRI__SHIFT 0x00000002
+#define MC_ARB_WTM_CNTL_RD__WTMODE_MASK 0x00000003L
+#define MC_ARB_WTM_CNTL_RD__WTMODE__SHIFT 0x00000000
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP0_MASK 0x00000008L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP0__SHIFT 0x00000003
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP1_MASK 0x00000010L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP1__SHIFT 0x00000004
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP2_MASK 0x00000020L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP2__SHIFT 0x00000005
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP3_MASK 0x00000040L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP4_MASK 0x00000080L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP4__SHIFT 0x00000007
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP5_MASK 0x00000100L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP5__SHIFT 0x00000008
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP6_MASK 0x00000200L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP6__SHIFT 0x00000009
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP7_MASK 0x00000400L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP7__SHIFT 0x0000000a
+#define MC_ARB_WTM_CNTL_WR__HARSH_PRI_MASK 0x00000004L
+#define MC_ARB_WTM_CNTL_WR__HARSH_PRI__SHIFT 0x00000002
+#define MC_ARB_WTM_CNTL_WR__WTMODE_MASK 0x00000003L
+#define MC_ARB_WTM_CNTL_WR__WTMODE__SHIFT 0x00000000
+#define MC_ARB_WTM_GRPWT_RD__GRP0_MASK 0x00000003L
+#define MC_ARB_WTM_GRPWT_RD__GRP0__SHIFT 0x00000000
+#define MC_ARB_WTM_GRPWT_RD__GRP1_MASK 0x0000000cL
+#define MC_ARB_WTM_GRPWT_RD__GRP1__SHIFT 0x00000002
+#define MC_ARB_WTM_GRPWT_RD__GRP2_MASK 0x00000030L
+#define MC_ARB_WTM_GRPWT_RD__GRP2__SHIFT 0x00000004
+#define MC_ARB_WTM_GRPWT_RD__GRP3_MASK 0x000000c0L
+#define MC_ARB_WTM_GRPWT_RD__GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_GRPWT_RD__GRP4_MASK 0x00000300L
+#define MC_ARB_WTM_GRPWT_RD__GRP4__SHIFT 0x00000008
+#define MC_ARB_WTM_GRPWT_RD__GRP5_MASK 0x00000c00L
+#define MC_ARB_WTM_GRPWT_RD__GRP5__SHIFT 0x0000000a
+#define MC_ARB_WTM_GRPWT_RD__GRP6_MASK 0x00003000L
+#define MC_ARB_WTM_GRPWT_RD__GRP6__SHIFT 0x0000000c
+#define MC_ARB_WTM_GRPWT_RD__GRP7_MASK 0x0000c000L
+#define MC_ARB_WTM_GRPWT_RD__GRP7__SHIFT 0x0000000e
+#define MC_ARB_WTM_GRPWT_RD__GRP_EXT_MASK 0x00ff0000L
+#define MC_ARB_WTM_GRPWT_RD__GRP_EXT__SHIFT 0x00000010
+#define MC_ARB_WTM_GRPWT_WR__GRP0_MASK 0x00000003L
+#define MC_ARB_WTM_GRPWT_WR__GRP0__SHIFT 0x00000000
+#define MC_ARB_WTM_GRPWT_WR__GRP1_MASK 0x0000000cL
+#define MC_ARB_WTM_GRPWT_WR__GRP1__SHIFT 0x00000002
+#define MC_ARB_WTM_GRPWT_WR__GRP2_MASK 0x00000030L
+#define MC_ARB_WTM_GRPWT_WR__GRP2__SHIFT 0x00000004
+#define MC_ARB_WTM_GRPWT_WR__GRP3_MASK 0x000000c0L
+#define MC_ARB_WTM_GRPWT_WR__GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_GRPWT_WR__GRP4_MASK 0x00000300L
+#define MC_ARB_WTM_GRPWT_WR__GRP4__SHIFT 0x00000008
+#define MC_ARB_WTM_GRPWT_WR__GRP5_MASK 0x00000c00L
+#define MC_ARB_WTM_GRPWT_WR__GRP5__SHIFT 0x0000000a
+#define MC_ARB_WTM_GRPWT_WR__GRP6_MASK 0x00003000L
+#define MC_ARB_WTM_GRPWT_WR__GRP6__SHIFT 0x0000000c
+#define MC_ARB_WTM_GRPWT_WR__GRP7_MASK 0x0000c000L
+#define MC_ARB_WTM_GRPWT_WR__GRP7__SHIFT 0x0000000e
+#define MC_ARB_WTM_GRPWT_WR__GRP_EXT_MASK 0x00ff0000L
+#define MC_ARB_WTM_GRPWT_WR__GRP_EXT__SHIFT 0x00000010
+#define MC_BIST_AUTO_CNTL__ADR_GEN_MASK 0x000000f0L
+#define MC_BIST_AUTO_CNTL__ADR_GEN__SHIFT 0x00000004
+#define MC_BIST_AUTO_CNTL__ADR_RESET_MASK 0x02000000L
+#define MC_BIST_AUTO_CNTL__ADR_RESET__SHIFT 0x00000019
+#define MC_BIST_AUTO_CNTL__LFSR_KEY_MASK 0x00ffff00L
+#define MC_BIST_AUTO_CNTL__LFSR_KEY__SHIFT 0x00000008
+#define MC_BIST_AUTO_CNTL__LFSR_RESET_MASK 0x01000000L
+#define MC_BIST_AUTO_CNTL__LFSR_RESET__SHIFT 0x00000018
+#define MC_BIST_AUTO_CNTL__MOP_MASK 0x00000003L
+#define MC_BIST_AUTO_CNTL__MOP__SHIFT 0x00000000
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_LOOP_MASK 0x00000004L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_LOOP__SHIFT 0x00000002
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE_MASK 0x00000002L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE__SHIFT 0x00000001
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE_U_MASK 0x00010000L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE_U__SHIFT 0x00000010
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_RUN_MASK 0x00020000L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_RUN__SHIFT 0x00000011
+#define MC_BIST_CMD_CNTL__DONE_MASK 0x80000000L
+#define MC_BIST_CMD_CNTL__DONE__SHIFT 0x0000001f
+#define MC_BIST_CMD_CNTL__ENABLE_D0_MASK 0x10000000L
+#define MC_BIST_CMD_CNTL__ENABLE_D0__SHIFT 0x0000001c
+#define MC_BIST_CMD_CNTL__ENABLE_D1_MASK 0x20000000L
+#define MC_BIST_CMD_CNTL__ENABLE_D1__SHIFT 0x0000001d
+#define MC_BIST_CMD_CNTL__LOOP_CNT_MAX_MASK 0x0000fff0L
+#define MC_BIST_CMD_CNTL__LOOP_CNT_MAX__SHIFT 0x00000004
+#define MC_BIST_CMD_CNTL__LOOP_CNT_RD_MASK 0x0ffc0000L
+#define MC_BIST_CMD_CNTL__LOOP_CNT_RD__SHIFT 0x00000012
+#define MC_BIST_CMD_CNTL__LOOP_END_CONDITION_MASK 0x00000008L
+#define MC_BIST_CMD_CNTL__LOOP_END_CONDITION__SHIFT 0x00000003
+#define MC_BIST_CMD_CNTL__RESET_MASK 0x00000001L
+#define MC_BIST_CMD_CNTL__RESET__SHIFT 0x00000000
+#define MC_BIST_CMD_CNTL__STATUS_CH_MASK 0x40000000L
+#define MC_BIST_CMD_CNTL__STATUS_CH__SHIFT 0x0000001e
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT_MASK 0x0000001fL
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT_RST_MASK 0x00000100L
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT_RST__SHIFT 0x00000008
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT__SHIFT 0x00000000
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT_MASK 0x0001f000L
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT_RST_MASK 0x00100000L
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT_RST__SHIFT 0x00000014
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT__SHIFT 0x0000000c
+#define MC_BIST_CMP_CNTL__CMP_MASK 0x00030000L
+#define MC_BIST_CMP_CNTL__CMP_MASK_BIT_MASK 0x00000ff0L
+#define MC_BIST_CMP_CNTL__CMP_MASK_BIT__SHIFT 0x00000004
+#define MC_BIST_CMP_CNTL__CMP_MASK_BYTE_MASK 0x0000000fL
+#define MC_BIST_CMP_CNTL__CMP_MASK_BYTE__SHIFT 0x00000000
+#define MC_BIST_CMP_CNTL__CMP__SHIFT 0x00000010
+#define MC_BIST_CMP_CNTL__DATA_STORE_MODE_MASK 0x00300000L
+#define MC_BIST_CMP_CNTL__DATA_STORE_MODE__SHIFT 0x00000014
+#define MC_BIST_CMP_CNTL__DATA_STORE_SEL_MASK 0x00002000L
+#define MC_BIST_CMP_CNTL__DATA_STORE_SEL__SHIFT 0x0000000d
+#define MC_BIST_CMP_CNTL__DAT_MODE_MASK 0x00040000L
+#define MC_BIST_CMP_CNTL__DAT_MODE__SHIFT 0x00000012
+#define MC_BIST_CMP_CNTL__EDC_STORE_MODE_MASK 0x00080000L
+#define MC_BIST_CMP_CNTL__EDC_STORE_MODE__SHIFT 0x00000013
+#define MC_BIST_CMP_CNTL__EDC_STORE_SEL_MASK 0x00004000L
+#define MC_BIST_CMP_CNTL__EDC_STORE_SEL__SHIFT 0x0000000e
+#define MC_BIST_CMP_CNTL__ENABLE_CMD_FIFO_MASK 0x00008000L
+#define MC_BIST_CMP_CNTL__ENABLE_CMD_FIFO__SHIFT 0x0000000f
+#define MC_BIST_CMP_CNTL__LOAD_RTEDC_MASK 0x00001000L
+#define MC_BIST_CMP_CNTL__LOAD_RTEDC__SHIFT 0x0000000c
+#define MC_BIST_CMP_CNTL__MISMATCH_CNT_MASK 0xffc00000L
+#define MC_BIST_CMP_CNTL__MISMATCH_CNT__SHIFT 0x00000016
+#define MC_BIST_CNTL__ADR_MODE_MASK 0x00000020L
+#define MC_BIST_CNTL__ADR_MODE__SHIFT 0x00000005
+#define MC_BIST_CNTL__DAT_MODE_MASK 0x00000040L
+#define MC_BIST_CNTL__DAT_MODE__SHIFT 0x00000006
+#define MC_BIST_CNTL__DONE_MASK 0x40000000L
+#define MC_BIST_CNTL__DONE__SHIFT 0x0000001e
+#define MC_BIST_CNTL__ENABLE_D0_MASK 0x00001000L
+#define MC_BIST_CNTL__ENABLE_D0__SHIFT 0x0000000c
+#define MC_BIST_CNTL__ENABLE_D1_MASK 0x00002000L
+#define MC_BIST_CNTL__ENABLE_D1__SHIFT 0x0000000d
+#define MC_BIST_CNTL__LOAD_RTDATA_CH_MASK 0x00004000L
+#define MC_BIST_CNTL__LOAD_RTDATA_CH__SHIFT 0x0000000e
+#define MC_BIST_CNTL__LOAD_RTDATA_MASK 0x80000000L
+#define MC_BIST_CNTL__LOAD_RTDATA__SHIFT 0x0000001f
+#define MC_BIST_CNTL__LOOP_CNT_MASK 0x0fff0000L
+#define MC_BIST_CNTL__LOOP_CNT__SHIFT 0x00000010
+#define MC_BIST_CNTL__LOOP_MASK 0x00000c00L
+#define MC_BIST_CNTL__LOOP__SHIFT 0x0000000a
+#define MC_BIST_CNTL__MOP_MODE_MASK 0x00000010L
+#define MC_BIST_CNTL__MOP_MODE__SHIFT 0x00000004
+#define MC_BIST_CNTL__PTR_RST_D0_MASK 0x00000004L
+#define MC_BIST_CNTL__PTR_RST_D0__SHIFT 0x00000002
+#define MC_BIST_CNTL__PTR_RST_D1_MASK 0x00000008L
+#define MC_BIST_CNTL__PTR_RST_D1__SHIFT 0x00000003
+#define MC_BIST_CNTL__RESET_MASK 0x00000001L
+#define MC_BIST_CNTL__RESET__SHIFT 0x00000000
+#define MC_BIST_CNTL__RUN_MASK 0x00000002L
+#define MC_BIST_CNTL__RUN__SHIFT 0x00000001
+#define MC_BIST_DATA_MASK__MASK_MASK 0xffffffffL
+#define MC_BIST_DATA_MASK__MASK__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD0__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD0__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD1__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD1__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD2__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD2__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD3__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD3__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD4__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD4__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD5__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD5__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD6__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD6__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD7__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD7__DATA__SHIFT 0x00000000
+#define MC_BIST_DIR_CNTL__CMD_RTR_D0_MASK 0x00000040L
+#define MC_BIST_DIR_CNTL__CMD_RTR_D0__SHIFT 0x00000006
+#define MC_BIST_DIR_CNTL__CMD_RTR_D1_MASK 0x00000100L
+#define MC_BIST_DIR_CNTL__CMD_RTR_D1__SHIFT 0x00000008
+#define MC_BIST_DIR_CNTL__DATA_LOAD_MASK 0x00000020L
+#define MC_BIST_DIR_CNTL__DATA_LOAD__SHIFT 0x00000005
+#define MC_BIST_DIR_CNTL__DAT_RTR_D0_MASK 0x00000080L
+#define MC_BIST_DIR_CNTL__DAT_RTR_D0__SHIFT 0x00000007
+#define MC_BIST_DIR_CNTL__DAT_RTR_D1_MASK 0x00000200L
+#define MC_BIST_DIR_CNTL__DAT_RTR_D1__SHIFT 0x00000009
+#define MC_BIST_DIR_CNTL__EOB_MASK 0x00000008L
+#define MC_BIST_DIR_CNTL__EOB__SHIFT 0x00000003
+#define MC_BIST_DIR_CNTL__MOP3_MASK 0x00000400L
+#define MC_BIST_DIR_CNTL__MOP3__SHIFT 0x0000000a
+#define MC_BIST_DIR_CNTL__MOP_LOAD_MASK 0x00000010L
+#define MC_BIST_DIR_CNTL__MOP_LOAD__SHIFT 0x00000004
+#define MC_BIST_DIR_CNTL__MOP_MASK 0x00000007L
+#define MC_BIST_DIR_CNTL__MOP__SHIFT 0x00000000
+#define MC_BIST_EADDR__BANK_MASK 0x0f000000L
+#define MC_BIST_EADDR__BANK__SHIFT 0x00000018
+#define MC_BIST_EADDR__COLH_MASK 0x20000000L
+#define MC_BIST_EADDR__COLH__SHIFT 0x0000001d
+#define MC_BIST_EADDR__COL_MASK 0x000003ffL
+#define MC_BIST_EADDR__COL__SHIFT 0x00000000
+#define MC_BIST_EADDR__RANK_MASK 0x10000000L
+#define MC_BIST_EADDR__RANK__SHIFT 0x0000001c
+#define MC_BIST_EADDR__ROWH_MASK 0xc0000000L
+#define MC_BIST_EADDR__ROWH__SHIFT 0x0000001e
+#define MC_BIST_EADDR__ROW_MASK 0x00fffc00L
+#define MC_BIST_EADDR__ROW__SHIFT 0x0000000a
+#define MC_BIST_MISMATCH_ADDR__BANK_MASK 0x0f000000L
+#define MC_BIST_MISMATCH_ADDR__BANK__SHIFT 0x00000018
+#define MC_BIST_MISMATCH_ADDR__COLH_MASK 0x20000000L
+#define MC_BIST_MISMATCH_ADDR__COLH__SHIFT 0x0000001d
+#define MC_BIST_MISMATCH_ADDR__COL_MASK 0x000003ffL
+#define MC_BIST_MISMATCH_ADDR__COL__SHIFT 0x00000000
+#define MC_BIST_MISMATCH_ADDR__RANK_MASK 0x10000000L
+#define MC_BIST_MISMATCH_ADDR__RANK__SHIFT 0x0000001c
+#define MC_BIST_MISMATCH_ADDR__ROWH_MASK 0xc0000000L
+#define MC_BIST_MISMATCH_ADDR__ROWH__SHIFT 0x0000001e
+#define MC_BIST_MISMATCH_ADDR__ROW_MASK 0x00fffc00L
+#define MC_BIST_MISMATCH_ADDR__ROW__SHIFT 0x0000000a
+#define MC_BIST_RDATA_EDC__EDC_MASK 0xffffffffL
+#define MC_BIST_RDATA_EDC__EDC__SHIFT 0x00000000
+#define MC_BIST_RDATA_MASK__MASK_MASK 0xffffffffL
+#define MC_BIST_RDATA_MASK__MASK__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD0__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD0__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD1__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD1__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD2__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD2__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD3__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD3__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD4__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD4__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD5__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD5__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD6__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD6__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD7__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD7__RDATA__SHIFT 0x00000000
+#define MC_BIST_SADDR__BANK_MASK 0x0f000000L
+#define MC_BIST_SADDR__BANK__SHIFT 0x00000018
+#define MC_BIST_SADDR__COLH_MASK 0x20000000L
+#define MC_BIST_SADDR__COLH__SHIFT 0x0000001d
+#define MC_BIST_SADDR__COL_MASK 0x000003ffL
+#define MC_BIST_SADDR__COL__SHIFT 0x00000000
+#define MC_BIST_SADDR__RANK_MASK 0x10000000L
+#define MC_BIST_SADDR__RANK__SHIFT 0x0000001c
+#define MC_BIST_SADDR__ROWH_MASK 0xc0000000L
+#define MC_BIST_SADDR__ROWH__SHIFT 0x0000001e
+#define MC_BIST_SADDR__ROW_MASK 0x00fffc00L
+#define MC_BIST_SADDR__ROW__SHIFT 0x0000000a
+#define MC_CG_CONFIG__INDEX_MASK 0x003fffc0L
+#define MC_CG_CONFIG__INDEX__SHIFT 0x00000006
+#define MC_CG_CONFIG_MCD__INDEX_MASK 0x1fffe000L
+#define MC_CG_CONFIG_MCD__INDEX__SHIFT 0x0000000d
+#define MC_CG_CONFIG_MCD__MCD0_WR_ENABLE_MASK 0x00000001L
+#define MC_CG_CONFIG_MCD__MCD0_WR_ENABLE__SHIFT 0x00000000
+#define MC_CG_CONFIG_MCD__MCD1_WR_ENABLE_MASK 0x00000002L
+#define MC_CG_CONFIG_MCD__MCD1_WR_ENABLE__SHIFT 0x00000001
+#define MC_CG_CONFIG_MCD__MCD2_WR_ENABLE_MASK 0x00000004L
+#define MC_CG_CONFIG_MCD__MCD2_WR_ENABLE__SHIFT 0x00000002
+#define MC_CG_CONFIG_MCD__MCD3_WR_ENABLE_MASK 0x00000008L
+#define MC_CG_CONFIG_MCD__MCD3_WR_ENABLE__SHIFT 0x00000003
+#define MC_CG_CONFIG_MCD__MCD4_WR_ENABLE_MASK 0x00000010L
+#define MC_CG_CONFIG_MCD__MCD4_WR_ENABLE__SHIFT 0x00000004
+#define MC_CG_CONFIG_MCD__MCD5_WR_ENABLE_MASK 0x00000020L
+#define MC_CG_CONFIG_MCD__MCD5_WR_ENABLE__SHIFT 0x00000005
+#define MC_CG_CONFIG_MCD__MC_RD_ENABLE_MASK 0x00000700L
+#define MC_CG_CONFIG_MCD__MC_RD_ENABLE__SHIFT 0x00000008
+#define MC_CG_CONFIG__MCDW_WR_ENABLE_MASK 0x00000001L
+#define MC_CG_CONFIG__MCDW_WR_ENABLE__SHIFT 0x00000000
+#define MC_CG_CONFIG__MCDX_WR_ENABLE_MASK 0x00000002L
+#define MC_CG_CONFIG__MCDX_WR_ENABLE__SHIFT 0x00000001
+#define MC_CG_CONFIG__MCDY_WR_ENABLE_MASK 0x00000004L
+#define MC_CG_CONFIG__MCDY_WR_ENABLE__SHIFT 0x00000002
+#define MC_CG_CONFIG__MCDZ_WR_ENABLE_MASK 0x00000008L
+#define MC_CG_CONFIG__MCDZ_WR_ENABLE__SHIFT 0x00000003
+#define MC_CG_CONFIG__MC_RD_ENABLE_MASK 0x00000030L
+#define MC_CG_CONFIG__MC_RD_ENABLE__SHIFT 0x00000004
+#define MC_CG_DATAPORT__DATA_FIELD_MASK 0xffffffffL
+#define MC_CG_DATAPORT__DATA_FIELD__SHIFT 0x00000000
+#define MC_CITF_CNTL__EXEMPTPM_MASK 0x00000008L
+#define MC_CITF_CNTL__EXEMPTPM__SHIFT 0x00000003
+#define MC_CITF_CNTL__GFX_IDLE_OVERRIDE_MASK 0x00000030L
+#define MC_CITF_CNTL__GFX_IDLE_OVERRIDE__SHIFT 0x00000004
+#define MC_CITF_CNTL__IGNOREPM_MASK 0x00000004L
+#define MC_CITF_CNTL__IGNOREPM__SHIFT 0x00000002
+#define MC_CITF_CNTL__MCD_SRBM_MASK_ENABLE_MASK 0x00000040L
+#define MC_CITF_CNTL__MCD_SRBM_MASK_ENABLE__SHIFT 0x00000006
+#define MC_CITF_CREDITS_ARB_RD__HUB_PRI_MASK 0x02000000L
+#define MC_CITF_CREDITS_ARB_RD__HUB_PRI__SHIFT 0x00000019
+#define MC_CITF_CREDITS_ARB_RD__LCL_PRI_MASK 0x01000000L
+#define MC_CITF_CREDITS_ARB_RD__LCL_PRI__SHIFT 0x00000018
+#define MC_CITF_CREDITS_ARB_RD__READ_HUB_MASK 0x0000ff00L
+#define MC_CITF_CREDITS_ARB_RD__READ_HUB__SHIFT 0x00000008
+#define MC_CITF_CREDITS_ARB_RD__READ_LCL_MASK 0x000000ffL
+#define MC_CITF_CREDITS_ARB_RD__READ_LCL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_ARB_RD__READ_PRI_MASK 0x00ff0000L
+#define MC_CITF_CREDITS_ARB_RD__READ_PRI__SHIFT 0x00000010
+#define MC_CITF_CREDITS_ARB_WR__HUB_PRI_MASK 0x00010000L
+#define MC_CITF_CREDITS_ARB_WR__HUB_PRI__SHIFT 0x00000010
+#define MC_CITF_CREDITS_ARB_WR__LCL_PRI_MASK 0x00020000L
+#define MC_CITF_CREDITS_ARB_WR__LCL_PRI__SHIFT 0x00000011
+#define MC_CITF_CREDITS_ARB_WR__WRITE_HUB_MASK 0x0000ff00L
+#define MC_CITF_CREDITS_ARB_WR__WRITE_HUB__SHIFT 0x00000008
+#define MC_CITF_CREDITS_ARB_WR__WRITE_LCL_MASK 0x000000ffL
+#define MC_CITF_CREDITS_ARB_WR__WRITE_LCL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_VM__READ_ALL_MASK 0x0000003fL
+#define MC_CITF_CREDITS_VM__READ_ALL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_VM__WRITE_ALL_MASK 0x00000fc0L
+#define MC_CITF_CREDITS_VM__WRITE_ALL__SHIFT 0x00000006
+#define MC_CITF_CREDITS_XBAR__READ_LCL_MASK 0x000000ffL
+#define MC_CITF_CREDITS_XBAR__READ_LCL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_XBAR__WRITE_LCL_MASK 0x0000ff00L
+#define MC_CITF_CREDITS_XBAR__WRITE_LCL__SHIFT 0x00000008
+#define MC_CITF_DAGB_CNTL__CENTER_RD_MAX_BURST_MASK 0x0000001eL
+#define MC_CITF_DAGB_CNTL__CENTER_RD_MAX_BURST__SHIFT 0x00000001
+#define MC_CITF_DAGB_CNTL__CENTER_WR_MAX_BURST_MASK 0x000003c0L
+#define MC_CITF_DAGB_CNTL__CENTER_WR_MAX_BURST__SHIFT 0x00000006
+#define MC_CITF_DAGB_CNTL__DISABLE_SELF_INIT_MASK 0x00000020L
+#define MC_CITF_DAGB_CNTL__DISABLE_SELF_INIT__SHIFT 0x00000005
+#define MC_CITF_DAGB_CNTL__JUMP_AHEAD_MASK 0x00000001L
+#define MC_CITF_DAGB_CNTL__JUMP_AHEAD__SHIFT 0x00000000
+#define MC_CITF_DAGB_DLY__CLI_MASK 0x001f0000L
+#define MC_CITF_DAGB_DLY__CLI__SHIFT 0x00000010
+#define MC_CITF_DAGB_DLY__DLY_MASK 0x0000001fL
+#define MC_CITF_DAGB_DLY__DLY__SHIFT 0x00000000
+#define MC_CITF_DAGB_DLY__POS_MASK 0x1f000000L
+#define MC_CITF_DAGB_DLY__POS__SHIFT 0x00000018
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_HP_MASK 0x00fc0000L
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_HP__SHIFT 0x00000012
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_LP_MASK 0x0003f000L
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_LP__SHIFT 0x0000000c
+#define MC_CITF_INT_CREDITS__CNTR_RD_LCL_MASK 0x3f000000L
+#define MC_CITF_INT_CREDITS__CNTR_RD_LCL__SHIFT 0x00000018
+#define MC_CITF_INT_CREDITS__REMRDRET_MASK 0x0000003fL
+#define MC_CITF_INT_CREDITS__REMRDRET__SHIFT 0x00000000
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_HUB_MASK 0x0000003fL
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_HUB__SHIFT 0x00000000
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_LCL_MASK 0x00000fc0L
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_LCL__SHIFT 0x00000006
+#define MC_CITF_MISC_RD_CG__ENABLE_MASK 0x00040000L
+#define MC_CITF_MISC_RD_CG__ENABLE__SHIFT 0x00000012
+#define MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_CITF_MISC_RD_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_CITF_MISC_RD_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_CITF_MISC_RD_CG__OFFDLY__SHIFT 0x00000006
+#define MC_CITF_MISC_RD_CG__ONDLY_MASK 0x0000003fL
+#define MC_CITF_MISC_RD_CG__ONDLY__SHIFT 0x00000000
+#define MC_CITF_MISC_RD_CG__RDYDLY_MASK 0x0003f000L
+#define MC_CITF_MISC_RD_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_CITF_MISC_VM_CG__ENABLE_MASK 0x00040000L
+#define MC_CITF_MISC_VM_CG__ENABLE__SHIFT 0x00000012
+#define MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_CITF_MISC_VM_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_CITF_MISC_VM_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_CITF_MISC_VM_CG__OFFDLY__SHIFT 0x00000006
+#define MC_CITF_MISC_VM_CG__ONDLY_MASK 0x0000003fL
+#define MC_CITF_MISC_VM_CG__ONDLY__SHIFT 0x00000000
+#define MC_CITF_MISC_VM_CG__RDYDLY_MASK 0x0003f000L
+#define MC_CITF_MISC_VM_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_CITF_MISC_WR_CG__ENABLE_MASK 0x00040000L
+#define MC_CITF_MISC_WR_CG__ENABLE__SHIFT 0x00000012
+#define MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_CITF_MISC_WR_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_CITF_MISC_WR_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_CITF_MISC_WR_CG__OFFDLY__SHIFT 0x00000006
+#define MC_CITF_MISC_WR_CG__ONDLY_MASK 0x0000003fL
+#define MC_CITF_MISC_WR_CG__ONDLY__SHIFT 0x00000000
+#define MC_CITF_MISC_WR_CG__RDYDLY_MASK 0x0003f000L
+#define MC_CITF_MISC_WR_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_CITF_PERF_MON_CNTL2__CID_MASK 0x000001ffL
+#define MC_CITF_PERF_MON_CNTL2__CID__SHIFT 0x00000000
+#define MC_CITF_PERF_MON_RSLT2__CB_RD_BUSY_MASK 0x00000040L
+#define MC_CITF_PERF_MON_RSLT2__CB_RD_BUSY__SHIFT 0x00000006
+#define MC_CITF_PERF_MON_RSLT2__CB_WR_BUSY_MASK 0x00001000L
+#define MC_CITF_PERF_MON_RSLT2__CB_WR_BUSY__SHIFT 0x0000000c
+#define MC_CITF_PERF_MON_RSLT2__DB_RD_BUSY_MASK 0x00000080L
+#define MC_CITF_PERF_MON_RSLT2__DB_RD_BUSY__SHIFT 0x00000007
+#define MC_CITF_PERF_MON_RSLT2__DB_WR_BUSY_MASK 0x00002000L
+#define MC_CITF_PERF_MON_RSLT2__DB_WR_BUSY__SHIFT 0x0000000d
+#define MC_CITF_PERF_MON_RSLT2__SX_WR_BUSY_MASK 0x00004000L
+#define MC_CITF_PERF_MON_RSLT2__SX_WR_BUSY__SHIFT 0x0000000e
+#define MC_CITF_PERF_MON_RSLT2__TC0_RD_BUSY_MASK 0x00000100L
+#define MC_CITF_PERF_MON_RSLT2__TC0_RD_BUSY__SHIFT 0x00000008
+#define MC_CITF_PERF_MON_RSLT2__TC0_WR_BUSY_MASK 0x00010000L
+#define MC_CITF_PERF_MON_RSLT2__TC0_WR_BUSY__SHIFT 0x00000010
+#define MC_CITF_PERF_MON_RSLT2__TC1_RD_BUSY_MASK 0x00000400L
+#define MC_CITF_PERF_MON_RSLT2__TC1_RD_BUSY__SHIFT 0x0000000a
+#define MC_CITF_PERF_MON_RSLT2__TC1_WR_BUSY_MASK 0x00020000L
+#define MC_CITF_PERF_MON_RSLT2__TC1_WR_BUSY__SHIFT 0x00000011
+#define MC_CITF_PERF_MON_RSLT2__TC2_RD_BUSY_MASK 0x00008000L
+#define MC_CITF_PERF_MON_RSLT2__TC2_RD_BUSY__SHIFT 0x0000000f
+#define MC_CITF_PERF_MON_RSLT2__TC2_WR_BUSY_MASK 0x00040000L
+#define MC_CITF_PERF_MON_RSLT2__TC2_WR_BUSY__SHIFT 0x00000012
+#define MC_CITF_PERF_MON_RSLT2__VC0_RD_BUSY_MASK 0x00000200L
+#define MC_CITF_PERF_MON_RSLT2__VC0_RD_BUSY__SHIFT 0x00000009
+#define MC_CITF_PERF_MON_RSLT2__VC1_RD_BUSY_MASK 0x00000800L
+#define MC_CITF_PERF_MON_RSLT2__VC1_RD_BUSY__SHIFT 0x0000000b
+#define MC_CITF_REMREQ__CREDITS_ENABLE_MASK 0x00004000L
+#define MC_CITF_REMREQ__CREDITS_ENABLE__SHIFT 0x0000000e
+#define MC_CITF_REMREQ__READ_CREDITS_MASK 0x0000007fL
+#define MC_CITF_REMREQ__READ_CREDITS__SHIFT 0x00000000
+#define MC_CITF_REMREQ__WRITE_CREDITS_MASK 0x00003f80L
+#define MC_CITF_REMREQ__WRITE_CREDITS__SHIFT 0x00000007
+#define MC_CITF_RET_MODE__INORDER_RD_MASK 0x00000001L
+#define MC_CITF_RET_MODE__INORDER_RD__SHIFT 0x00000000
+#define MC_CITF_RET_MODE__INORDER_WR_MASK 0x00000002L
+#define MC_CITF_RET_MODE__INORDER_WR__SHIFT 0x00000001
+#define MC_CITF_RET_MODE__LCLPRI_RD_MASK 0x00000010L
+#define MC_CITF_RET_MODE__LCLPRI_RD__SHIFT 0x00000004
+#define MC_CITF_RET_MODE__LCLPRI_WR_MASK 0x00000020L
+#define MC_CITF_RET_MODE__LCLPRI_WR__SHIFT 0x00000005
+#define MC_CITF_RET_MODE__REMPRI_RD_MASK 0x00000004L
+#define MC_CITF_RET_MODE__REMPRI_RD__SHIFT 0x00000002
+#define MC_CITF_RET_MODE__REMPRI_WR_MASK 0x00000008L
+#define MC_CITF_RET_MODE__REMPRI_WR__SHIFT 0x00000003
+#define MC_CITF_WTM_RD_CNTL__DISABLE_REMOTE_MASK 0x01000000L
+#define MC_CITF_WTM_RD_CNTL__DISABLE_REMOTE__SHIFT 0x00000018
+#define MC_CITF_WTM_RD_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_CITF_WTM_RD_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_CITF_WTM_RD_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_CITF_WTM_RD_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_CITF_WTM_RD_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_CITF_WTM_RD_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_CITF_WTM_RD_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_CITF_WTM_RD_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_CITF_WTM_RD_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_CITF_WTM_RD_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_CITF_WTM_RD_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_CITF_WTM_RD_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_CITF_WTM_RD_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_CITF_WTM_RD_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_CITF_WTM_RD_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_CITF_WTM_RD_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_CITF_WTM_WR_CNTL__DISABLE_REMOTE_MASK 0x01000000L
+#define MC_CITF_WTM_WR_CNTL__DISABLE_REMOTE__SHIFT 0x00000018
+#define MC_CITF_WTM_WR_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_CITF_WTM_WR_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_CITF_WTM_WR_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_CITF_WTM_WR_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_CITF_WTM_WR_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_CITF_WTM_WR_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_CITF_WTM_WR_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_CITF_WTM_WR_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_CITF_WTM_WR_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_CITF_WTM_WR_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_CITF_WTM_WR_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_CITF_WTM_WR_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_CITF_WTM_WR_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_CITF_WTM_WR_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_CITF_WTM_WR_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_CITF_WTM_WR_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_CITF_XTRA_ENABLE__ARB_DBG_MASK 0x00000f00L
+#define MC_CITF_XTRA_ENABLE__ARB_DBG__SHIFT 0x00000008
+#define MC_CITF_XTRA_ENABLE__CB1_RD_MASK 0x00000001L
+#define MC_CITF_XTRA_ENABLE__CB1_RD__SHIFT 0x00000000
+#define MC_CITF_XTRA_ENABLE__CB1_WR_MASK 0x00000002L
+#define MC_CITF_XTRA_ENABLE__CB1_WR__SHIFT 0x00000001
+#define MC_CITF_XTRA_ENABLE__DB1_RD_MASK 0x00000004L
+#define MC_CITF_XTRA_ENABLE__DB1_RD__SHIFT 0x00000002
+#define MC_CITF_XTRA_ENABLE__DB1_WR_MASK 0x00000008L
+#define MC_CITF_XTRA_ENABLE__DB1_WR__SHIFT 0x00000003
+#define MC_CITF_XTRA_ENABLE__TC2_RD_MASK 0x00000010L
+#define MC_CITF_XTRA_ENABLE__TC2_RD__SHIFT 0x00000004
+#define MC_CITF_XTRA_ENABLE__TC2_WR_MASK 0x00001000L
+#define MC_CITF_XTRA_ENABLE__TC2_WR__SHIFT 0x0000000c
+#define MC_CONFIG__MCC_INDEX_MODE_ENABLE_MASK 0x80000000L
+#define MC_CONFIG__MCC_INDEX_MODE_ENABLE__SHIFT 0x0000001f
+#define MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK 0x00000001L
+#define MC_CONFIG_MCD__MCD0_WR_ENABLE__SHIFT 0x00000000
+#define MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK 0x00000002L
+#define MC_CONFIG_MCD__MCD1_WR_ENABLE__SHIFT 0x00000001
+#define MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK 0x00000004L
+#define MC_CONFIG_MCD__MCD2_WR_ENABLE__SHIFT 0x00000002
+#define MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK 0x00000008L
+#define MC_CONFIG_MCD__MCD3_WR_ENABLE__SHIFT 0x00000003
+#define MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK 0x00000010L
+#define MC_CONFIG_MCD__MCD4_WR_ENABLE__SHIFT 0x00000004
+#define MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK 0x00000020L
+#define MC_CONFIG_MCD__MCD5_WR_ENABLE__SHIFT 0x00000005
+#define MC_CONFIG_MCD__MCD_INDEX_MODE_ENABLE_MASK 0x80000000L
+#define MC_CONFIG_MCD__MCD_INDEX_MODE_ENABLE__SHIFT 0x0000001f
+#define MC_CONFIG_MCD__MC_RD_ENABLE_MASK 0x00000700L
+#define MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT 0x00000008
+#define MC_CONFIG__MCDW_WR_ENABLE_MASK 0x00000001L
+#define MC_CONFIG__MCDW_WR_ENABLE__SHIFT 0x00000000
+#define MC_CONFIG__MCDX_WR_ENABLE_MASK 0x00000002L
+#define MC_CONFIG__MCDX_WR_ENABLE__SHIFT 0x00000001
+#define MC_CONFIG__MCDY_WR_ENABLE_MASK 0x00000004L
+#define MC_CONFIG__MCDY_WR_ENABLE__SHIFT 0x00000002
+#define MC_CONFIG__MCDZ_WR_ENABLE_MASK 0x00000008L
+#define MC_CONFIG__MCDZ_WR_ENABLE__SHIFT 0x00000003
+#define MC_CONFIG__MC_RD_ENABLE_MASK 0x00000030L
+#define MC_CONFIG__MC_RD_ENABLE__SHIFT 0x00000004
+#define MC_HUB_MISC_DBG__SELECT0_MASK 0x0000000fL
+#define MC_HUB_MISC_DBG__SELECT0__SHIFT 0x00000000
+#define MC_HUB_MISC_DBG__SELECT1_MASK 0x000000f0L
+#define MC_HUB_MISC_DBG__SELECT1__SHIFT 0x00000004
+#define MC_HUB_MISC_FRAMING__BITS_MASK 0xffffffffL
+#define MC_HUB_MISC_FRAMING__BITS__SHIFT 0x00000000
+#define MC_HUB_MISC_HUB_CG__ENABLE_MASK 0x00040000L
+#define MC_HUB_MISC_HUB_CG__ENABLE__SHIFT 0x00000012
+#define MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_HUB_MISC_HUB_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_HUB_MISC_HUB_CG__OFFDLY__SHIFT 0x00000006
+#define MC_HUB_MISC_HUB_CG__ONDLY_MASK 0x0000003fL
+#define MC_HUB_MISC_HUB_CG__ONDLY__SHIFT 0x00000000
+#define MC_HUB_MISC_HUB_CG__RDYDLY_MASK 0x0003f000L
+#define MC_HUB_MISC_HUB_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_READ_MASK 0x00000001L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_READ__SHIFT 0x00000000
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_WRITE_MASK 0x00000002L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_WRITE__SHIFT 0x00000001
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_READ_MASK 0x00000400L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_READ__SHIFT 0x0000000a
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_WRITE_MASK 0x00000800L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_WRITE__SHIFT 0x0000000b
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_READ_MASK 0x00000004L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_READ__SHIFT 0x00000002
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_WRITE_MASK 0x00000008L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_WRITE__SHIFT 0x00000003
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_READ_MASK 0x00010000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_READ__SHIFT 0x00000010
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_WRITE_MASK 0x00020000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_WRITE__SHIFT 0x00000011
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_READ_MASK 0x00040000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_READ__SHIFT 0x00000012
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_WRITE_MASK 0x00080000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_WRITE__SHIFT 0x00000013
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_READ_MASK 0x00000040L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_READ__SHIFT 0x00000006
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_WRITE_MASK 0x00000080L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_WRITE__SHIFT 0x00000007
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_READ_MASK 0x00004000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_READ__SHIFT 0x0000000e
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_WRITE_MASK 0x00008000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_WRITE__SHIFT 0x0000000f
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_READ_MASK 0x00001000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_READ__SHIFT 0x0000000c
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_WRITE_MASK 0x00002000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_WRITE__SHIFT 0x0000000d
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_READ_MASK 0x01000000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_READ__SHIFT 0x00000018
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_WRITE_MASK 0x02000000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_WRITE__SHIFT 0x00000019
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_READ_MASK 0x00100000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_READ__SHIFT 0x00000014
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_WRITE_MASK 0x00200000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_WRITE__SHIFT 0x00000015
+#define MC_HUB_MISC_OVERRIDE__IDLE_MASK 0x00000003L
+#define MC_HUB_MISC_OVERRIDE__IDLE__SHIFT 0x00000000
+#define MC_HUB_MISC_POWER__PM_BLACKOUT_CNTL_MASK 0x00000018L
+#define MC_HUB_MISC_POWER__PM_BLACKOUT_CNTL__SHIFT 0x00000003
+#define MC_HUB_MISC_POWER__SRBM_GATE_OVERRIDE_MASK 0x00000004L
+#define MC_HUB_MISC_POWER__SRBM_GATE_OVERRIDE__SHIFT 0x00000002
+#define MC_HUB_MISC_SIP_CG__ENABLE_MASK 0x00040000L
+#define MC_HUB_MISC_SIP_CG__ENABLE__SHIFT 0x00000012
+#define MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_HUB_MISC_SIP_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_HUB_MISC_SIP_CG__OFFDLY__SHIFT 0x00000006
+#define MC_HUB_MISC_SIP_CG__ONDLY_MASK 0x0000003fL
+#define MC_HUB_MISC_SIP_CG__ONDLY__SHIFT 0x00000000
+#define MC_HUB_MISC_SIP_CG__RDYDLY_MASK 0x0003f000L
+#define MC_HUB_MISC_SIP_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_HUB_MISC_STATUS__GFX_BUSY_MASK 0x00002000L
+#define MC_HUB_MISC_STATUS__GFX_BUSY__SHIFT 0x0000000d
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDREQ_MASK 0x00000004L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDREQ__SHIFT 0x00000002
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDRET_MASK 0x00000008L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDRET__SHIFT 0x00000003
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRREQ_MASK 0x00000010L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRREQ__SHIFT 0x00000004
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRRET_MASK 0x00000020L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRRET__SHIFT 0x00000005
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_READ_MASK 0x00000100L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_READ__SHIFT 0x00000008
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_WRITE_MASK 0x00000200L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_WRITE__SHIFT 0x00000009
+#define MC_HUB_MISC_STATUS__OUTSTANDING_READ_MASK 0x00000001L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_READ__SHIFT 0x00000000
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_READ_MASK 0x00000040L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_READ__SHIFT 0x00000006
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_WRITE_MASK 0x00000080L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_WRITE__SHIFT 0x00000007
+#define MC_HUB_MISC_STATUS__OUTSTANDING_WRITE_MASK 0x00000002L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_WRITE__SHIFT 0x00000001
+#define MC_HUB_MISC_STATUS__READ_DEADLOCK_WARNING_MASK 0x00001000L
+#define MC_HUB_MISC_STATUS__READ_DEADLOCK_WARNING__SHIFT 0x0000000c
+#define MC_HUB_MISC_STATUS__RPB_BUSY_MASK 0x00000400L
+#define MC_HUB_MISC_STATUS__RPB_BUSY__SHIFT 0x0000000a
+#define MC_HUB_MISC_STATUS__WRITE_DEADLOCK_WARNING_MASK 0x00000800L
+#define MC_HUB_MISC_STATUS__WRITE_DEADLOCK_WARNING__SHIFT 0x0000000b
+#define MC_HUB_MISC_VM_CG__ENABLE_MASK 0x00040000L
+#define MC_HUB_MISC_VM_CG__ENABLE__SHIFT 0x00000012
+#define MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_HUB_MISC_VM_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_HUB_MISC_VM_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_HUB_MISC_VM_CG__OFFDLY__SHIFT 0x00000006
+#define MC_HUB_MISC_VM_CG__ONDLY_MASK 0x0000003fL
+#define MC_HUB_MISC_VM_CG__ONDLY__SHIFT 0x00000000
+#define MC_HUB_MISC_VM_CG__RDYDLY_MASK 0x0003f000L
+#define MC_HUB_MISC_VM_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_HUB_RDREQ_CNTL__BREAK_HDP_DEADLOCK_MASK 0x00000200L
+#define MC_HUB_RDREQ_CNTL__BREAK_HDP_DEADLOCK__SHIFT 0x00000009
+#define MC_HUB_RDREQ_CNTL__DEBUG_REG_MASK 0x0001fc00L
+#define MC_HUB_RDREQ_CNTL__DEBUG_REG__SHIFT 0x0000000a
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL0_MASK 0x00020000L
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL0__SHIFT 0x00000011
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL1_MASK 0x00040000L
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL1__SHIFT 0x00000012
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL0_MASK 0x00000004L
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL0__SHIFT 0x00000002
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL1_MASK 0x00000008L
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL1__SHIFT 0x00000003
+#define MC_HUB_RDREQ_CNTL__MCDW_STALL_MODE_MASK 0x00000020L
+#define MC_HUB_RDREQ_CNTL__MCDW_STALL_MODE__SHIFT 0x00000005
+#define MC_HUB_RDREQ_CNTL__MCDX_STALL_MODE_MASK 0x00000040L
+#define MC_HUB_RDREQ_CNTL__MCDX_STALL_MODE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_CNTL__MCDY_STALL_MODE_MASK 0x00000080L
+#define MC_HUB_RDREQ_CNTL__MCDY_STALL_MODE__SHIFT 0x00000007
+#define MC_HUB_RDREQ_CNTL__MCDZ_STALL_MODE_MASK 0x00000100L
+#define MC_HUB_RDREQ_CNTL__MCDZ_STALL_MODE__SHIFT 0x00000008
+#define MC_HUB_RDREQ_CNTL__OVERRIDE_STALL_ENABLE_MASK 0x00000010L
+#define MC_HUB_RDREQ_CNTL__OVERRIDE_STALL_ENABLE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_CNTL__PWRXPRESS_MODE_MASK 0x00080000L
+#define MC_HUB_RDREQ_CNTL__PWRXPRESS_MODE__SHIFT 0x00000013
+#define MC_HUB_RDREQ_CNTL__REMOTE_BLACKOUT_MASK 0x00000001L
+#define MC_HUB_RDREQ_CNTL__REMOTE_BLACKOUT__SHIFT 0x00000000
+#define MC_HUB_RDREQ_CREDITS2__STOR1_PRI_MASK 0x000000ffL
+#define MC_HUB_RDREQ_CREDITS2__STOR1_PRI__SHIFT 0x00000000
+#define MC_HUB_RDREQ_CREDITS__STOR0_MASK 0x00ff0000L
+#define MC_HUB_RDREQ_CREDITS__STOR0__SHIFT 0x00000010
+#define MC_HUB_RDREQ_CREDITS__STOR1_MASK 0xff000000L
+#define MC_HUB_RDREQ_CREDITS__STOR1__SHIFT 0x00000018
+#define MC_HUB_RDREQ_CREDITS__VM0_MASK 0x000000ffL
+#define MC_HUB_RDREQ_CREDITS__VM0__SHIFT 0x00000000
+#define MC_HUB_RDREQ_CREDITS__VM1_MASK 0x0000ff00L
+#define MC_HUB_RDREQ_CREDITS__VM1__SHIFT 0x00000008
+#define MC_HUB_RDREQ_DMIF__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_DMIF__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_DMIF__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_DMIF__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_DMIF__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_DMIF__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK 0x00000003L
+#define MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_DMIF_LIMIT__LIMIT_COUNT_MASK 0x0000007cL
+#define MC_HUB_RDREQ_DMIF_LIMIT__LIMIT_COUNT__SHIFT 0x00000002
+#define MC_HUB_RDREQ_DMIF__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_DMIF__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_DMIF__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_DMIF__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_DMIF__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_DMIF__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_GBL0__STALL_THRESHOLD_MASK 0x000000ffL
+#define MC_HUB_RDREQ_GBL0__STALL_THRESHOLD__SHIFT 0x00000000
+#define MC_HUB_RDREQ_GBL1__STALL_THRESHOLD_MASK 0x000000ffL
+#define MC_HUB_RDREQ_GBL1__STALL_THRESHOLD__SHIFT 0x00000000
+#define MC_HUB_RDREQ_HDP__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_HDP__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_HDP__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_HDP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_HDP__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_HDP__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_HDP__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_HDP__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_HDP__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_HDP__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_HDP__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_HDP__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_MCDW__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDW__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDW__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDW__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDW__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDW__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDW__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDW__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDW__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDW__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDW__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDW__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDW__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDW__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDW__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDW__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCDX__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDX__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDX__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDX__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDX__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDX__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDX__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDX__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDX__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDX__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDX__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDX__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDX__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDX__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDX__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDX__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCDY__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDY__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDY__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDY__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDY__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDY__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDY__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDY__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDY__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDY__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDY__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDY__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDY__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDY__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDY__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDY__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCDZ__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDZ__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDZ__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDZ__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDZ__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDZ__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDZ__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDZ__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDZ__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDZ__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDZ__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDZ__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDZ__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDZ__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDZ__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDZ__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCIF__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_MCIF__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCIF__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCIF__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCIF__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_MCIF__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCIF__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCIF__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCIF__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_MCIF__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCIF__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_MCIF__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_RLC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_RLC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_RLC__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_RLC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_RLC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_RLC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_RLC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_RLC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_RLC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_RLC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_RLC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_RLC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_SEM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_SEM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_SEM__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_SEM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_SEM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_SEM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_SEM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_SEM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_SEM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_SEM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_SEM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_SEM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_SIP__ASK_CREDITS_MASK 0x0000007fL
+#define MC_HUB_RDREQ_SIP__ASK_CREDITS__SHIFT 0x00000000
+#define MC_HUB_RDREQ_SIP__DISPLAY_CREDITS_MASK 0x00007f00L
+#define MC_HUB_RDREQ_SIP__DISPLAY_CREDITS__SHIFT 0x00000008
+#define MC_HUB_RDREQ_SIP__DUMMY_MASK 0x00000080L
+#define MC_HUB_RDREQ_SIP__DUMMY__SHIFT 0x00000007
+#define MC_HUB_RDREQ_SMU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_SMU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_SMU__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_SMU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_SMU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_SMU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_SMU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_SMU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_SMU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_SMU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_SMU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_SMU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_STATUS__GBL0_BYPASS_STOR_FULL_MASK 0x00000080L
+#define MC_HUB_RDREQ_STATUS__GBL0_BYPASS_STOR_FULL__SHIFT 0x00000007
+#define MC_HUB_RDREQ_STATUS__GBL0_STOR_FULL_MASK 0x00000040L
+#define MC_HUB_RDREQ_STATUS__GBL0_STOR_FULL__SHIFT 0x00000006
+#define MC_HUB_RDREQ_STATUS__GBL0_VM_FULL_MASK 0x00000020L
+#define MC_HUB_RDREQ_STATUS__GBL0_VM_FULL__SHIFT 0x00000005
+#define MC_HUB_RDREQ_STATUS__GBL1_BYPASS_STOR_FULL_MASK 0x00000400L
+#define MC_HUB_RDREQ_STATUS__GBL1_BYPASS_STOR_FULL__SHIFT 0x0000000a
+#define MC_HUB_RDREQ_STATUS__GBL1_STOR_FULL_MASK 0x00000200L
+#define MC_HUB_RDREQ_STATUS__GBL1_STOR_FULL__SHIFT 0x00000009
+#define MC_HUB_RDREQ_STATUS__GBL1_VM_FULL_MASK 0x00000100L
+#define MC_HUB_RDREQ_STATUS__GBL1_VM_FULL__SHIFT 0x00000008
+#define MC_HUB_RDREQ_STATUS__MCDW_RD_AVAIL_MASK 0x00000002L
+#define MC_HUB_RDREQ_STATUS__MCDW_RD_AVAIL__SHIFT 0x00000001
+#define MC_HUB_RDREQ_STATUS__MCDX_RD_AVAIL_MASK 0x00000004L
+#define MC_HUB_RDREQ_STATUS__MCDX_RD_AVAIL__SHIFT 0x00000002
+#define MC_HUB_RDREQ_STATUS__MCDY_RD_AVAIL_MASK 0x00000008L
+#define MC_HUB_RDREQ_STATUS__MCDY_RD_AVAIL__SHIFT 0x00000003
+#define MC_HUB_RDREQ_STATUS__MCDZ_RD_AVAIL_MASK 0x00000010L
+#define MC_HUB_RDREQ_STATUS__MCDZ_RD_AVAIL__SHIFT 0x00000004
+#define MC_HUB_RDREQ_STATUS__PWRXPRESS_ERR_MASK 0x00000800L
+#define MC_HUB_RDREQ_STATUS__PWRXPRESS_ERR__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_STATUS__SIP_AVAIL_MASK 0x00000001L
+#define MC_HUB_RDREQ_STATUS__SIP_AVAIL__SHIFT 0x00000000
+#define MC_HUB_RDREQ_UMC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_UMC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_UMC__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_UMC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_UMC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_UMC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_UMC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_UMC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_UMC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_UMC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_UMC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_UMC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_UVD__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_UVD__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_UVD__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_UVD__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_UVD__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_UVD__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_UVD__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_UVD__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_UVD__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_UVD__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_UVD__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_UVD__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_UVD__VM_BYPASS_MASK 0x00010000L
+#define MC_HUB_RDREQ_UVD__VM_BYPASS__SHIFT 0x00000010
+#define MC_HUB_RDREQ_VCE__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_VCE__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_VCE__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_VCE__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_VCE__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_VCE__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_VCE__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_VCE__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_VCE__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_VCE__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_VCE__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_VCE__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_VCEU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_VCEU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_VCEU__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_VCEU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_VCEU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_VCEU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_VCEU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_VCEU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_VCEU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_VCEU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_VCEU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_VCEU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_VMC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_VMC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_VMC__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_VMC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_VMC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_VMC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_VMC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_VMC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_VMC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_VMC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_VMC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_VMC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_HUB_RDREQ_XDMAM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_XDMAM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_XDMAM__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_XDMAM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_XDMAM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_XDMAM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_XDMAM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_XDMAM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_XDMAM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_XDMAM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_XDMAM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_XDMAM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_SHARED_DAGB_DLY__CLI_MASK 0x001f0000L
+#define MC_HUB_SHARED_DAGB_DLY__CLI__SHIFT 0x00000010
+#define MC_HUB_SHARED_DAGB_DLY__DLY_MASK 0x0000003fL
+#define MC_HUB_SHARED_DAGB_DLY__DLY__SHIFT 0x00000000
+#define MC_HUB_SHARED_DAGB_DLY__POS_MASK 0x1f000000L
+#define MC_HUB_SHARED_DAGB_DLY__POS__SHIFT 0x00000018
+#define MC_HUB_WDP_BP__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_BP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_BP__RDRET_MASK 0x0003fffeL
+#define MC_HUB_WDP_BP__RDRET__SHIFT 0x00000001
+#define MC_HUB_WDP_BP__WRREQ_MASK 0x3ffc0000L
+#define MC_HUB_WDP_BP__WRREQ__SHIFT 0x00000012
+#define MC_HUB_WDP_CNTL__DEBUG_REG_MASK 0x00001fe0L
+#define MC_HUB_WDP_CNTL__DEBUG_REG__SHIFT 0x00000005
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL0_MASK 0x00002000L
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL0__SHIFT 0x0000000d
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL1_MASK 0x00004000L
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL1__SHIFT 0x0000000e
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_INTERNAL_MASK 0x00008000L
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_INTERNAL__SHIFT 0x0000000f
+#define MC_HUB_WDP_CNTL__DISP_WAIT_EOP_MASK 0x00040000L
+#define MC_HUB_WDP_CNTL__DISP_WAIT_EOP__SHIFT 0x00000012
+#define MC_HUB_WDP_CNTL__FAIR_CH_SW_MASK 0x00010000L
+#define MC_HUB_WDP_CNTL__FAIR_CH_SW__SHIFT 0x00000010
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL0_MASK 0x00000002L
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL0__SHIFT 0x00000001
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL1_MASK 0x00000004L
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL1__SHIFT 0x00000002
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_INTERNAL_MASK 0x00000008L
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_INTERNAL__SHIFT 0x00000003
+#define MC_HUB_WDP_CNTL__LCLWRREQ_BYPASS_MASK 0x00020000L
+#define MC_HUB_WDP_CNTL__LCLWRREQ_BYPASS__SHIFT 0x00000011
+#define MC_HUB_WDP_CNTL__MCD_WAIT_EOP_MASK 0x00080000L
+#define MC_HUB_WDP_CNTL__MCD_WAIT_EOP__SHIFT 0x00000013
+#define MC_HUB_WDP_CNTL__OVERRIDE_STALL_ENABLE_MASK 0x00000010L
+#define MC_HUB_WDP_CNTL__OVERRIDE_STALL_ENABLE__SHIFT 0x00000004
+#define MC_HUB_WDP_CNTL__SIP_WAIT_EOP_MASK 0x00100000L
+#define MC_HUB_WDP_CNTL__SIP_WAIT_EOP__SHIFT 0x00000014
+#define MC_HUB_WDP_CREDITS__STOR0_MASK 0x00ff0000L
+#define MC_HUB_WDP_CREDITS__STOR0__SHIFT 0x00000010
+#define MC_HUB_WDP_CREDITS__STOR1_MASK 0xff000000L
+#define MC_HUB_WDP_CREDITS__STOR1__SHIFT 0x00000018
+#define MC_HUB_WDP_CREDITS__VM0_MASK 0x000000ffL
+#define MC_HUB_WDP_CREDITS__VM0__SHIFT 0x00000000
+#define MC_HUB_WDP_CREDITS__VM1_MASK 0x0000ff00L
+#define MC_HUB_WDP_CREDITS__VM1__SHIFT 0x00000008
+#define MC_HUB_WDP_ERR__MGPU1_TARG_SYS_MASK 0x00000001L
+#define MC_HUB_WDP_ERR__MGPU1_TARG_SYS__SHIFT 0x00000000
+#define MC_HUB_WDP_ERR__MGPU2_TARG_SYS_MASK 0x00000002L
+#define MC_HUB_WDP_ERR__MGPU2_TARG_SYS__SHIFT 0x00000001
+#define MC_HUB_WDP_GBL0__LAZY_TIMER_MASK 0x000000f0L
+#define MC_HUB_WDP_GBL0__LAZY_TIMER__SHIFT 0x00000004
+#define MC_HUB_WDP_GBL0__MAXBURST_MASK 0x0000000fL
+#define MC_HUB_WDP_GBL0__MAXBURST__SHIFT 0x00000000
+#define MC_HUB_WDP_GBL0__STALL_MODE_MASK 0x00010000L
+#define MC_HUB_WDP_GBL0__STALL_MODE__SHIFT 0x00000010
+#define MC_HUB_WDP_GBL0__STALL_THRESHOLD_MASK 0x0000ff00L
+#define MC_HUB_WDP_GBL0__STALL_THRESHOLD__SHIFT 0x00000008
+#define MC_HUB_WDP_GBL1__LAZY_TIMER_MASK 0x000000f0L
+#define MC_HUB_WDP_GBL1__LAZY_TIMER__SHIFT 0x00000004
+#define MC_HUB_WDP_GBL1__MAXBURST_MASK 0x0000000fL
+#define MC_HUB_WDP_GBL1__MAXBURST__SHIFT 0x00000000
+#define MC_HUB_WDP_GBL1__STALL_MODE_MASK 0x00010000L
+#define MC_HUB_WDP_GBL1__STALL_MODE__SHIFT 0x00000010
+#define MC_HUB_WDP_GBL1__STALL_THRESHOLD_MASK 0x0000ff00L
+#define MC_HUB_WDP_GBL1__STALL_THRESHOLD__SHIFT 0x00000008
+#define MC_HUB_WDP_HDP__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_HDP__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_HDP__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_HDP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_HDP__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_HDP__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_HDP__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_HDP__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_HDP__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_HDP__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_HDP__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_HDP__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_IH__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_IH__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_IH__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_IH__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_IH__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_IH__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_IH__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_IH__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_IH__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_IH__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_IH__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_IH__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_IH__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_IH__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_IH__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_IH__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_MCDW__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDW__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDW__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDW__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDW__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDW__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDW__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDW__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDW__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDW__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDW__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDW__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDW__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDW__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDW__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDW__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCDX__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDX__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDX__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDX__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDX__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDX__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDX__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDX__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDX__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDX__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDX__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDX__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDX__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDX__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDX__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDX__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCDY__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDY__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDY__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDY__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDY__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDY__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDY__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDY__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDY__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDY__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDY__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDY__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDY__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDY__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDY__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDY__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDZ__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDZ__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDZ__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDZ__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDZ__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDZ__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDZ__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDZ__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDZ__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDZ__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDZ__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDZ__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCIF__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_MCIF__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_MCIF__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCIF__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCIF__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_MCIF__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_MCIF__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_MCIF__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_MCIF__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_MCIF__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_MCIF__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_MCIF__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_MGPU2__CID2_MASK 0x000000ffL
+#define MC_HUB_WDP_MGPU2__CID2__SHIFT 0x00000000
+#define MC_HUB_WDP_MGPU__CID_MASK 0x0000ff00L
+#define MC_HUB_WDP_MGPU__CID__SHIFT 0x00000008
+#define MC_HUB_WDP_MGPU__ENABLE_MASK 0x00800000L
+#define MC_HUB_WDP_MGPU__ENABLE__SHIFT 0x00000017
+#define MC_HUB_WDP_MGPU__MGPU_PRIORITY_TIME_MASK 0x007f0000L
+#define MC_HUB_WDP_MGPU__MGPU_PRIORITY_TIME__SHIFT 0x00000010
+#define MC_HUB_WDP_MGPU__OTH_PRIORITY_TIME_MASK 0x7f000000L
+#define MC_HUB_WDP_MGPU__OTH_PRIORITY_TIME__SHIFT 0x00000018
+#define MC_HUB_WDP_MGPU__STOR_MASK 0x000000ffL
+#define MC_HUB_WDP_MGPU__STOR__SHIFT 0x00000000
+#define MC_HUB_WDP_RLC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_RLC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_RLC__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_RLC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_RLC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_RLC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_RLC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_RLC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_RLC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_RLC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_RLC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_RLC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SEM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SEM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SEM__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SEM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SEM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SEM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SEM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SEM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SEM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SEM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SEM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SEM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SH0__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SH0__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SH0__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SH0__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SH0__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SH0__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SH0__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SH0__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SH0__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SH0__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SH0__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SH0__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SH1__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SH1__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SH1__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SH1__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SH1__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SH1__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SH1__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SH1__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SH1__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SH1__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SH1__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SH1__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SIP__ASK_CREDITS_MASK 0x000001fcL
+#define MC_HUB_WDP_SIP__ASK_CREDITS__SHIFT 0x00000002
+#define MC_HUB_WDP_SIP__STALL_MODE_MASK 0x00000003L
+#define MC_HUB_WDP_SIP__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WDP_SMU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SMU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SMU__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SMU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SMU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SMU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SMU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SMU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SMU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SMU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SMU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SMU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_STATUS__GBL0_BYPASS_STOR_FULL_MASK 0x00000080L
+#define MC_HUB_WDP_STATUS__GBL0_BYPASS_STOR_FULL__SHIFT 0x00000007
+#define MC_HUB_WDP_STATUS__GBL0_STOR_FULL_MASK 0x00000040L
+#define MC_HUB_WDP_STATUS__GBL0_STOR_FULL__SHIFT 0x00000006
+#define MC_HUB_WDP_STATUS__GBL0_VM_FULL_MASK 0x00000020L
+#define MC_HUB_WDP_STATUS__GBL0_VM_FULL__SHIFT 0x00000005
+#define MC_HUB_WDP_STATUS__GBL1_BYPASS_STOR_FULL_MASK 0x00000400L
+#define MC_HUB_WDP_STATUS__GBL1_BYPASS_STOR_FULL__SHIFT 0x0000000a
+#define MC_HUB_WDP_STATUS__GBL1_STOR_FULL_MASK 0x00000200L
+#define MC_HUB_WDP_STATUS__GBL1_STOR_FULL__SHIFT 0x00000009
+#define MC_HUB_WDP_STATUS__GBL1_VM_FULL_MASK 0x00000100L
+#define MC_HUB_WDP_STATUS__GBL1_VM_FULL__SHIFT 0x00000008
+#define MC_HUB_WDP_STATUS__MCDW_RD_AVAIL_MASK 0x00000002L
+#define MC_HUB_WDP_STATUS__MCDW_RD_AVAIL__SHIFT 0x00000001
+#define MC_HUB_WDP_STATUS__MCDW_WR_AVAIL_MASK 0x00000800L
+#define MC_HUB_WDP_STATUS__MCDW_WR_AVAIL__SHIFT 0x0000000b
+#define MC_HUB_WDP_STATUS__MCDX_RD_AVAIL_MASK 0x00000004L
+#define MC_HUB_WDP_STATUS__MCDX_RD_AVAIL__SHIFT 0x00000002
+#define MC_HUB_WDP_STATUS__MCDX_WR_AVAIL_MASK 0x00001000L
+#define MC_HUB_WDP_STATUS__MCDX_WR_AVAIL__SHIFT 0x0000000c
+#define MC_HUB_WDP_STATUS__MCDY_RD_AVAIL_MASK 0x00000008L
+#define MC_HUB_WDP_STATUS__MCDY_RD_AVAIL__SHIFT 0x00000003
+#define MC_HUB_WDP_STATUS__MCDY_WR_AVAIL_MASK 0x00002000L
+#define MC_HUB_WDP_STATUS__MCDY_WR_AVAIL__SHIFT 0x0000000d
+#define MC_HUB_WDP_STATUS__MCDZ_RD_AVAIL_MASK 0x00000010L
+#define MC_HUB_WDP_STATUS__MCDZ_RD_AVAIL__SHIFT 0x00000004
+#define MC_HUB_WDP_STATUS__MCDZ_WR_AVAIL_MASK 0x00004000L
+#define MC_HUB_WDP_STATUS__MCDZ_WR_AVAIL__SHIFT 0x0000000e
+#define MC_HUB_WDP_STATUS__SIP_AVAIL_MASK 0x00000001L
+#define MC_HUB_WDP_STATUS__SIP_AVAIL__SHIFT 0x00000000
+#define MC_HUB_WDP_UMC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_UMC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_UMC__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_UMC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_UMC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_UMC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_UMC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_UMC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_UMC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_UMC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_UMC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_UMC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_UVD__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_UVD__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_UVD__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_UVD__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_UVD__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_UVD__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_UVD__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_UVD__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_UVD__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_UVD__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_UVD__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_UVD__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_UVD__VM_BYPASS_MASK 0x00010000L
+#define MC_HUB_WDP_UVD__VM_BYPASS__SHIFT 0x00000010
+#define MC_HUB_WDP_VCE__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_VCE__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_VCE__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_VCE__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_VCE__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_VCE__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_VCE__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_VCE__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_VCE__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_VCE__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_VCE__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_VCE__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_VCEU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_VCEU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_VCEU__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_VCEU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_VCEU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_VCEU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_VCEU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_VCEU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_VCEU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_VCEU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_VCEU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_VCEU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_WTM_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_HUB_WDP_WTM_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_HUB_WDP_WTM_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_HUB_WDP_WTM_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_HUB_WDP_WTM_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_HUB_WDP_WTM_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_HUB_WDP_WTM_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_HUB_WDP_WTM_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_HUB_WDP_WTM_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_HUB_WDP_WTM_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_HUB_WDP_WTM_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_HUB_WDP_WTM_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_HUB_WDP_XDMA__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_XDMA__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_XDMA__BYPASS_AVAIL_OVERRIDE_MASK 0x00010000L
+#define MC_HUB_WDP_XDMA__BYPASS_AVAIL_OVERRIDE__SHIFT 0x00000010
+#define MC_HUB_WDP_XDMA__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_XDMA__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_XDMA__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_XDMA__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_XDMA__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_XDMA__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_XDMAM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_XDMAM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_XDMAM__BYPASS_AVAIL_OVERRIDE_MASK 0x00010000L
+#define MC_HUB_WDP_XDMAM__BYPASS_AVAIL_OVERRIDE__SHIFT 0x00000010
+#define MC_HUB_WDP_XDMAM__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_XDMAM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_XDMAM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_XDMAM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_XDMAM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_XDMAM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_XDMAM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_XDMAM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_XDMAM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_XDMAM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_XDMA__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_XDMA__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_XDMA__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_XDMA__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_XDP__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_XDP__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_XDP__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_XDP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_XDP__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_XDP__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_XDP__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_XDP__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_XDP__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_XDP__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_XDP__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_XDP__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WRRET_CNTL__BP_ENABLE_MASK 0x00200000L
+#define MC_HUB_WRRET_CNTL__BP_ENABLE__SHIFT 0x00000015
+#define MC_HUB_WRRET_CNTL__BP_MASK 0x001ffffeL
+#define MC_HUB_WRRET_CNTL__BP__SHIFT 0x00000001
+#define MC_HUB_WRRET_CNTL__DEBUG_REG_MASK 0x3fc00000L
+#define MC_HUB_WRRET_CNTL__DEBUG_REG__SHIFT 0x00000016
+#define MC_HUB_WRRET_CNTL__DISABLE_SELF_INIT_MASK 0x40000000L
+#define MC_HUB_WRRET_CNTL__DISABLE_SELF_INIT__SHIFT 0x0000001e
+#define MC_HUB_WRRET_CNTL__FAIR_CH_SW_MASK 0x80000000L
+#define MC_HUB_WRRET_CNTL__FAIR_CH_SW__SHIFT 0x0000001f
+#define MC_HUB_WRRET_CNTL__JUMPAHEAD_MASK 0x00000001L
+#define MC_HUB_WRRET_CNTL__JUMPAHEAD__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDW__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDW__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDW__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDW__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDX__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDX__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDX__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDX__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDY__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDY__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDY__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDY__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDZ__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDZ__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDZ__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDZ__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_STATUS__MCDW_AVAIL_MASK 0x00000001L
+#define MC_HUB_WRRET_STATUS__MCDW_AVAIL__SHIFT 0x00000000
+#define MC_HUB_WRRET_STATUS__MCDX_AVAIL_MASK 0x00000002L
+#define MC_HUB_WRRET_STATUS__MCDX_AVAIL__SHIFT 0x00000001
+#define MC_HUB_WRRET_STATUS__MCDY_AVAIL_MASK 0x00000004L
+#define MC_HUB_WRRET_STATUS__MCDY_AVAIL__SHIFT 0x00000002
+#define MC_HUB_WRRET_STATUS__MCDZ_AVAIL_MASK 0x00000008L
+#define MC_HUB_WRRET_STATUS__MCDZ_AVAIL__SHIFT 0x00000003
+#define MC_IMP_CNTL__CAL_PWRON_MASK 0x80000000L
+#define MC_IMP_CNTL__CAL_PWRON__SHIFT 0x0000001f
+#define MC_IMP_CNTL__CAL_VREF_MASK 0x007f0000L
+#define MC_IMP_CNTL__CAL_VREFMODE_MASK 0x00000040L
+#define MC_IMP_CNTL__CAL_VREFMODE__SHIFT 0x00000006
+#define MC_IMP_CNTL__CAL_VREF_SEL_MASK 0x00000020L
+#define MC_IMP_CNTL__CAL_VREF_SEL__SHIFT 0x00000005
+#define MC_IMP_CNTL__CAL_VREF__SHIFT 0x00000010
+#define MC_IMP_CNTL__CAL_WHEN_IDLE_MASK 0x20000000L
+#define MC_IMP_CNTL__CAL_WHEN_IDLE__SHIFT 0x0000001d
+#define MC_IMP_CNTL__CAL_WHEN_REFRESH_MASK 0x40000000L
+#define MC_IMP_CNTL__CAL_WHEN_REFRESH__SHIFT 0x0000001e
+#define MC_IMP_CNTL__CLEAR_TIMEOUT_ERR_MASK 0x00000200L
+#define MC_IMP_CNTL__CLEAR_TIMEOUT_ERR__SHIFT 0x00000009
+#define MC_IMP_CNTL__MEM_IO_SAMPLE_CNT_MASK 0x0000e000L
+#define MC_IMP_CNTL__MEM_IO_SAMPLE_CNT__SHIFT 0x0000000d
+#define MC_IMP_CNTL__MEM_IO_UPDATE_RATE_MASK 0x0000001fL
+#define MC_IMP_CNTL__MEM_IO_UPDATE_RATE__SHIFT 0x00000000
+#define MC_IMP_CNTL__TIMEOUT_ERR_MASK 0x00000100L
+#define MC_IMP_CNTL__TIMEOUT_ERR__SHIFT 0x00000008
+#define MC_IMP_DEBUG__DEBUG_CAL_DONE_MASK 0x80000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_DONE__SHIFT 0x0000001f
+#define MC_IMP_DEBUG__DEBUG_CAL_EN_MASK 0x10000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_EN__SHIFT 0x0000001c
+#define MC_IMP_DEBUG__DEBUG_CAL_INTR_MASK 0x40000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_INTR__SHIFT 0x0000001e
+#define MC_IMP_DEBUG__DEBUG_CAL_START_MASK 0x20000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_START__SHIFT 0x0000001d
+#define MC_IMP_DEBUG__PMVCAL_RESERVED_MASK 0x0fff0000L
+#define MC_IMP_DEBUG__PMVCAL_RESERVED__SHIFT 0x00000010
+#define MC_IMP_DEBUG__TIMEOUT_CNTR_MASK 0x0000ff00L
+#define MC_IMP_DEBUG__TIMEOUT_CNTR__SHIFT 0x00000008
+#define MC_IMP_DEBUG__TSTARTUP_CNTR_MASK 0x000000ffL
+#define MC_IMP_DEBUG__TSTARTUP_CNTR__SHIFT 0x00000000
+#define MC_IMP_DQ_STATUS__CH0_DQ_NSTR_MASK 0x0000ff00L
+#define MC_IMP_DQ_STATUS__CH0_DQ_NSTR__SHIFT 0x00000008
+#define MC_IMP_DQ_STATUS__CH0_DQ_PSTR_MASK 0x000000ffL
+#define MC_IMP_DQ_STATUS__CH0_DQ_PSTR__SHIFT 0x00000000
+#define MC_IMP_DQ_STATUS__CH1_DQ_NSTR_MASK 0xff000000L
+#define MC_IMP_DQ_STATUS__CH1_DQ_NSTR__SHIFT 0x00000018
+#define MC_IMP_DQ_STATUS__CH1_DQ_PSTR_MASK 0x00ff0000L
+#define MC_IMP_DQ_STATUS__CH1_DQ_PSTR__SHIFT 0x00000010
+#define MC_IMP_STATUS__NSTR_ACCUM_VAL_MASK 0xff000000L
+#define MC_IMP_STATUS__NSTR_ACCUM_VAL__SHIFT 0x00000018
+#define MC_IMP_STATUS__NSTR_CAL_MASK 0x00ff0000L
+#define MC_IMP_STATUS__NSTR_CAL__SHIFT 0x00000010
+#define MC_IMP_STATUS__PSTR_ACCUM_VAL_MASK 0x0000ff00L
+#define MC_IMP_STATUS__PSTR_ACCUM_VAL__SHIFT 0x00000008
+#define MC_IMP_STATUS__PSTR_CAL_MASK 0x000000ffL
+#define MC_IMP_STATUS__PSTR_CAL__SHIFT 0x00000000
+#define MC_IO_APHY_STR_CNTL_D0__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_APHY_STR_CNTL_D0__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_A_STR_MASK 0x10000000L
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_A_STR__SHIFT 0x0000001c
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_D_RD_STR_MASK 0x20000000L
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_D_RD_STR__SHIFT 0x0000001d
+#define MC_IO_APHY_STR_CNTL_D0__NSTR_OFF_A_MASK 0x00000fc0L
+#define MC_IO_APHY_STR_CNTL_D0__NSTR_OFF_A__SHIFT 0x00000006
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_A_MASK 0x0000003fL
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_A__SHIFT 0x00000000
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_D_RD_MASK 0x0003f000L
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_D_RD__SHIFT 0x0000000c
+#define MC_IO_APHY_STR_CNTL_D0__USE_A_CAL_MASK 0x01000000L
+#define MC_IO_APHY_STR_CNTL_D0__USE_A_CAL__SHIFT 0x00000018
+#define MC_IO_APHY_STR_CNTL_D0__USE_D_RD_CAL_MASK 0x02000000L
+#define MC_IO_APHY_STR_CNTL_D0__USE_D_RD_CAL__SHIFT 0x00000019
+#define MC_IO_APHY_STR_CNTL_D1__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_APHY_STR_CNTL_D1__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_A_STR_MASK 0x10000000L
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_A_STR__SHIFT 0x0000001c
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_D_RD_STR_MASK 0x20000000L
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_D_RD_STR__SHIFT 0x0000001d
+#define MC_IO_APHY_STR_CNTL_D1__NSTR_OFF_A_MASK 0x00000fc0L
+#define MC_IO_APHY_STR_CNTL_D1__NSTR_OFF_A__SHIFT 0x00000006
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_A_MASK 0x0000003fL
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_A__SHIFT 0x00000000
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_D_RD_MASK 0x0003f000L
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_D_RD__SHIFT 0x0000000c
+#define MC_IO_APHY_STR_CNTL_D1__USE_A_CAL_MASK 0x01000000L
+#define MC_IO_APHY_STR_CNTL_D1__USE_A_CAL__SHIFT 0x00000018
+#define MC_IO_APHY_STR_CNTL_D1__USE_D_RD_CAL_MASK 0x02000000L
+#define MC_IO_APHY_STR_CNTL_D1__USE_D_RD_CAL__SHIFT 0x00000019
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B0_MASK 0x000000ffL
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B1_MASK 0x0000ff00L
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B1__SHIFT 0x00000008
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B0_MASK 0x00ff0000L
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B0__SHIFT 0x00000010
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B1_MASK 0xff000000L
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B1__SHIFT 0x00000018
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B0_MASK 0x000000ffL
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B1_MASK 0x0000ff00L
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B1__SHIFT 0x00000008
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B0_MASK 0x00ff0000L
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B0__SHIFT 0x00000010
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B1_MASK 0xff000000L
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B1__SHIFT 0x00000018
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL0_MASK 0x00000001L
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL1_MASK 0x00000002L
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL1__SHIFT 0x00000001
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR0_MASK 0x00000004L
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR0__SHIFT 0x00000002
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR1_MASK 0x00000008L
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR1__SHIFT 0x00000003
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS0_MASK 0x00000010L
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS0__SHIFT 0x00000004
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS1_MASK 0x00000020L
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS1__SHIFT 0x00000005
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR0_MASK 0x00000040L
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR0__SHIFT 0x00000006
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR1_MASK 0x00000080L
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR1__SHIFT 0x00000007
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL0_MASK 0x00000001L
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL1_MASK 0x00000002L
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL1__SHIFT 0x00000001
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR0_MASK 0x00000004L
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR0__SHIFT 0x00000002
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR1_MASK 0x00000008L
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR1__SHIFT 0x00000003
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS0_MASK 0x00000010L
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS0__SHIFT 0x00000004
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS1_MASK 0x00000020L
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS1__SHIFT 0x00000005
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR0_MASK 0x00000040L
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR0__SHIFT 0x00000006
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR1_MASK 0x00000080L
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR1__SHIFT 0x00000007
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B0_MASK 0x00400000L
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B0__SHIFT 0x00000016
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B1_MASK 0x00800000L
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B1__SHIFT 0x00000017
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B0_MASK 0x10000000L
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B0__SHIFT 0x0000001c
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B1_MASK 0x20000000L
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B1__SHIFT 0x0000001d
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B0_MASK 0x00100000L
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B0__SHIFT 0x00000014
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B1_MASK 0x00200000L
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B1__SHIFT 0x00000015
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B0_MASK 0x40000000L
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B0__SHIFT 0x0000001e
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B1_MASK 0x80000000L
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B1__SHIFT 0x0000001f
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B01_MASK 0x00000400L
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B01__SHIFT 0x0000000a
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B23_MASK 0x00000800L
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B23__SHIFT 0x0000000b
+#define MC_IO_CDRCNTL_D0__RXCDREN_B01_MASK 0x00000100L
+#define MC_IO_CDRCNTL_D0__RXCDREN_B01__SHIFT 0x00000008
+#define MC_IO_CDRCNTL_D0__RXCDREN_B23_MASK 0x00000200L
+#define MC_IO_CDRCNTL_D0__RXCDREN_B23__SHIFT 0x00000009
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B01_MASK 0x0000f000L
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B01__SHIFT 0x0000000c
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B23_MASK 0x000f0000L
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B23__SHIFT 0x00000010
+#define MC_IO_CDRCNTL_D0__RXPHASE_B01_MASK 0x0000000fL
+#define MC_IO_CDRCNTL_D0__RXPHASE_B01__SHIFT 0x00000000
+#define MC_IO_CDRCNTL_D0__RXPHASE_B23_MASK 0x000000f0L
+#define MC_IO_CDRCNTL_D0__RXPHASE_B23__SHIFT 0x00000004
+#define MC_IO_CDRCNTL_D0__WCDREDC_B0_MASK 0x04000000L
+#define MC_IO_CDRCNTL_D0__WCDREDC_B0__SHIFT 0x0000001a
+#define MC_IO_CDRCNTL_D0__WCDREDC_B1_MASK 0x08000000L
+#define MC_IO_CDRCNTL_D0__WCDREDC_B1__SHIFT 0x0000001b
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B0_MASK 0x01000000L
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B0__SHIFT 0x00000018
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B1_MASK 0x02000000L
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B1__SHIFT 0x00000019
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B0_MASK 0x00400000L
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B0__SHIFT 0x00000016
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B1_MASK 0x00800000L
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B1__SHIFT 0x00000017
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B0_MASK 0x10000000L
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B0__SHIFT 0x0000001c
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B1_MASK 0x20000000L
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B1__SHIFT 0x0000001d
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B0_MASK 0x00100000L
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B0__SHIFT 0x00000014
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B1_MASK 0x00200000L
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B1__SHIFT 0x00000015
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B0_MASK 0x40000000L
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B0__SHIFT 0x0000001e
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B1_MASK 0x80000000L
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B1__SHIFT 0x0000001f
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B01_MASK 0x00000400L
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B01__SHIFT 0x0000000a
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B23_MASK 0x00000800L
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B23__SHIFT 0x0000000b
+#define MC_IO_CDRCNTL_D1__RXCDREN_B01_MASK 0x00000100L
+#define MC_IO_CDRCNTL_D1__RXCDREN_B01__SHIFT 0x00000008
+#define MC_IO_CDRCNTL_D1__RXCDREN_B23_MASK 0x00000200L
+#define MC_IO_CDRCNTL_D1__RXCDREN_B23__SHIFT 0x00000009
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B01_MASK 0x0000f000L
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B01__SHIFT 0x0000000c
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B23_MASK 0x000f0000L
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B23__SHIFT 0x00000010
+#define MC_IO_CDRCNTL_D1__RXPHASE_B01_MASK 0x0000000fL
+#define MC_IO_CDRCNTL_D1__RXPHASE_B01__SHIFT 0x00000000
+#define MC_IO_CDRCNTL_D1__RXPHASE_B23_MASK 0x000000f0L
+#define MC_IO_CDRCNTL_D1__RXPHASE_B23__SHIFT 0x00000004
+#define MC_IO_CDRCNTL_D1__WCDREDC_B0_MASK 0x04000000L
+#define MC_IO_CDRCNTL_D1__WCDREDC_B0__SHIFT 0x0000001a
+#define MC_IO_CDRCNTL_D1__WCDREDC_B1_MASK 0x08000000L
+#define MC_IO_CDRCNTL_D1__WCDREDC_B1__SHIFT 0x0000001b
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B0_MASK 0x01000000L
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B0__SHIFT 0x00000018
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B1_MASK 0x02000000L
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B1__SHIFT 0x00000019
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_100__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_100__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_100__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_100__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_100__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_100__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_100__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_100__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_101__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_101__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_101__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_101__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_101__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_101__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_101__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_101__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_102__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_102__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_102__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_102__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_102__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_102__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_102__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_102__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_103__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_103__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_103__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_103__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_103__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_103__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_103__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_103__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_104__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_104__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_104__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_104__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_104__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_104__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_104__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_104__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_105__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_105__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_105__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_105__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_105__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_105__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_105__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_105__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_106__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_106__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_106__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_106__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_106__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_106__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_106__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_106__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_107__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_107__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_107__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_107__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_107__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_107__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_107__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_107__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_108__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_108__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_108__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_108__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_108__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_108__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_108__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_108__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_109__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_109__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_109__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_109__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_109__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_109__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_109__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_109__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_10__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_10__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_10__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_10__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_10__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_10__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_10__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_10__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_110__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_110__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_110__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_110__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_110__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_110__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_110__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_110__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_111__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_111__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_111__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_111__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_111__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_111__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_111__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_111__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_112__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_112__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_112__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_112__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_112__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_112__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_112__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_112__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_113__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_113__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_113__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_113__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_113__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_113__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_113__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_113__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_114__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_114__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_114__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_114__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_114__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_114__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_114__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_114__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_115__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_115__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_115__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_115__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_115__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_115__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_115__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_115__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_116__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_116__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_116__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_116__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_116__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_116__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_116__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_116__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_117__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_117__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_117__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_117__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_117__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_117__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_117__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_117__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_118__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_118__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_118__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_118__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_118__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_118__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_118__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_118__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_119__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_119__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_119__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_119__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_119__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_119__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_119__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_119__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_11__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_11__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_11__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_11__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_11__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_11__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_11__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_11__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_120__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_120__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_120__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_120__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_120__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_120__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_120__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_120__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_121__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_121__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_121__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_121__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_121__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_121__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_121__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_121__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_122__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_122__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_122__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_122__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_122__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_122__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_122__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_122__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_123__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_123__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_123__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_123__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_123__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_123__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_123__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_123__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_124__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_124__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_124__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_124__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_124__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_124__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_124__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_124__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_125__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_125__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_125__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_125__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_125__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_125__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_125__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_125__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_126__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_126__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_126__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_126__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_126__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_126__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_126__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_126__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_127__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_127__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_127__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_127__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_127__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_127__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_127__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_127__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_128__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_128__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_128__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_128__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_128__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_128__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_128__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_128__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_129__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_129__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_129__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_129__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_129__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_129__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_129__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_129__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_12__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_12__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_12__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_12__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_12__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_12__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_12__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_12__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_130__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_130__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_130__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_130__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_130__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_130__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_130__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_130__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_131__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_131__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_131__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_131__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_131__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_131__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_131__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_131__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_132__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_132__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_132__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_132__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_132__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_132__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_132__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_132__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_133__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_133__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_133__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_133__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_133__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_133__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_133__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_133__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_134__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_134__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_134__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_134__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_134__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_134__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_134__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_134__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_135__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_135__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_135__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_135__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_135__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_135__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_135__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_135__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_136__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_136__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_136__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_136__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_136__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_136__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_136__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_136__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_137__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_137__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_137__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_137__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_137__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_137__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_137__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_137__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_138__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_138__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_138__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_138__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_138__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_138__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_138__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_138__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_139__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_139__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_139__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_139__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_139__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_139__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_139__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_139__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_13__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_13__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_13__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_13__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_13__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_13__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_13__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_13__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_140__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_140__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_140__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_140__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_140__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_140__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_140__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_140__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_141__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_141__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_141__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_141__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_141__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_141__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_141__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_141__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_142__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_142__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_142__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_142__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_142__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_142__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_142__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_142__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_143__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_143__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_143__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_143__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_143__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_143__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_143__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_143__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_144__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_144__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_144__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_144__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_144__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_144__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_144__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_144__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_145__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_145__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_145__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_145__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_145__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_145__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_145__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_145__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_146__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_146__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_146__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_146__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_146__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_146__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_146__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_146__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_147__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_147__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_147__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_147__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_147__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_147__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_147__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_147__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_148__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_148__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_148__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_148__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_148__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_148__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_148__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_148__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_149__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_149__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_149__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_149__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_149__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_149__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_149__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_149__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_14__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_14__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_14__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_14__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_14__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_14__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_14__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_14__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_150__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_150__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_150__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_150__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_150__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_150__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_150__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_150__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_151__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_151__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_151__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_151__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_151__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_151__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_151__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_151__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_152__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_152__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_152__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_152__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_152__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_152__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_152__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_152__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_153__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_153__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_153__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_153__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_153__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_153__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_153__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_153__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_154__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_154__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_154__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_154__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_154__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_154__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_154__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_154__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_155__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_155__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_155__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_155__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_155__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_155__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_155__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_155__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_156__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_156__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_156__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_156__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_156__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_156__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_156__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_156__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_157__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_157__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_157__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_157__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_157__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_157__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_157__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_157__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_158__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_158__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_158__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_158__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_158__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_158__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_158__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_158__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_159__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_159__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_159__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_159__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_159__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_159__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_159__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_159__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_15__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_15__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_15__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_15__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_15__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_15__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_15__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_15__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_16__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_16__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_16__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_16__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_16__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_16__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_16__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_16__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_17__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_17__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_17__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_17__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_17__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_17__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_17__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_17__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_18__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_18__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_18__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_18__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_18__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_18__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_18__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_18__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_19__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_19__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_19__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_19__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_19__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_19__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_19__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_19__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_20__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_20__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_20__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_20__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_20__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_20__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_20__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_20__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_21__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_21__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_21__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_21__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_21__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_21__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_21__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_21__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_22__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_22__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_22__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_22__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_22__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_22__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_22__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_22__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_23__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_23__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_23__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_23__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_23__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_23__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_23__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_23__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_24__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_24__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_24__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_24__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_24__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_24__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_24__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_24__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_25__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_25__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_25__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_25__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_25__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_25__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_25__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_25__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_26__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_26__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_26__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_26__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_26__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_26__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_26__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_26__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_27__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_27__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_27__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_27__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_27__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_27__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_27__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_27__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_28__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_28__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_28__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_28__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_28__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_28__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_28__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_28__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_29__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_29__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_29__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_29__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_29__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_29__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_29__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_29__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_2__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_2__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_2__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_2__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_2__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_2__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_2__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_2__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_30__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_30__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_30__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_30__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_30__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_30__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_30__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_30__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_31__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_31__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_31__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_31__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_31__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_31__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_31__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_31__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_32__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_32__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_32__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_32__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_32__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_32__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_32__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_32__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_33__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_33__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_33__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_33__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_33__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_33__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_33__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_33__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_34__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_34__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_34__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_34__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_34__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_34__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_34__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_34__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_35__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_35__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_35__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_35__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_35__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_35__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_35__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_35__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_36__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_36__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_36__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_36__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_36__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_36__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_36__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_36__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_37__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_37__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_37__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_37__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_37__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_37__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_37__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_37__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_38__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_38__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_38__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_38__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_38__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_38__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_38__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_38__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_39__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_39__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_39__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_39__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_39__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_39__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_39__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_39__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_3__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_3__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_3__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_3__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_3__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_3__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_3__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_3__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_40__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_40__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_40__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_40__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_40__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_40__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_40__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_40__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_41__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_41__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_41__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_41__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_41__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_41__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_41__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_41__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_42__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_42__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_42__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_42__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_42__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_42__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_42__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_42__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_43__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_43__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_43__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_43__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_43__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_43__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_43__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_43__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_44__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_44__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_44__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_44__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_44__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_44__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_44__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_44__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_45__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_45__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_45__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_45__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_45__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_45__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_45__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_45__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_46__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_46__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_46__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_46__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_46__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_46__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_46__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_46__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_47__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_47__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_47__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_47__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_47__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_47__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_47__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_47__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_48__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_48__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_48__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_48__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_48__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_48__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_48__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_48__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_49__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_49__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_49__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_49__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_49__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_49__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_49__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_49__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_4__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_4__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_4__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_4__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_4__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_4__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_4__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_4__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_50__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_50__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_50__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_50__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_50__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_50__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_50__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_50__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_51__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_51__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_51__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_51__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_51__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_51__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_51__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_51__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_52__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_52__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_52__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_52__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_52__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_52__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_52__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_52__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_53__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_53__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_53__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_53__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_53__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_53__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_53__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_53__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_54__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_54__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_54__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_54__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_54__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_54__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_54__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_54__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_55__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_55__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_55__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_55__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_55__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_55__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_55__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_55__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_56__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_56__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_56__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_56__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_56__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_56__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_56__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_56__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_57__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_57__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_57__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_57__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_57__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_57__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_57__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_57__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_58__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_58__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_58__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_58__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_58__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_58__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_58__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_58__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_59__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_59__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_59__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_59__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_59__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_59__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_59__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_59__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_5__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_5__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_5__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_5__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_5__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_5__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_5__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_5__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_60__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_60__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_60__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_60__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_60__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_60__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_60__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_60__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_61__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_61__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_61__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_61__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_61__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_61__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_61__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_61__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_62__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_62__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_62__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_62__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_62__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_62__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_62__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_62__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_63__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_63__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_63__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_63__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_63__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_63__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_63__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_63__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_64__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_64__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_64__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_64__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_64__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_64__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_64__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_64__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_65__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_65__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_65__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_65__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_65__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_65__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_65__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_65__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_66__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_66__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_66__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_66__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_66__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_66__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_66__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_66__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_67__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_67__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_67__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_67__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_67__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_67__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_67__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_67__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_68__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_68__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_68__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_68__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_68__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_68__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_68__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_68__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_69__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_69__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_69__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_69__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_69__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_69__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_69__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_69__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_6__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_6__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_6__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_6__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_6__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_6__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_6__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_6__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_70__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_70__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_70__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_70__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_70__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_70__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_70__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_70__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_71__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_71__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_71__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_71__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_71__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_71__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_71__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_71__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_72__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_72__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_72__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_72__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_72__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_72__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_72__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_72__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_73__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_73__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_73__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_73__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_73__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_73__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_73__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_73__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_74__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_74__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_74__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_74__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_74__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_74__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_74__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_74__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_75__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_75__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_75__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_75__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_75__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_75__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_75__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_75__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_76__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_76__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_76__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_76__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_76__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_76__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_76__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_76__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_77__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_77__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_77__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_77__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_77__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_77__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_77__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_77__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_78__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_78__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_78__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_78__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_78__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_78__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_78__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_78__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_79__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_79__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_79__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_79__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_79__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_79__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_79__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_79__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_7__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_7__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_7__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_7__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_7__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_7__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_7__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_7__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_80__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_80__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_80__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_80__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_80__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_80__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_80__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_80__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_81__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_81__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_81__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_81__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_81__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_81__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_81__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_81__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_82__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_82__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_82__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_82__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_82__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_82__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_82__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_82__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_83__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_83__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_83__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_83__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_83__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_83__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_83__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_83__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_84__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_84__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_84__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_84__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_84__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_84__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_84__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_84__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_85__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_85__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_85__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_85__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_85__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_85__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_85__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_85__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_86__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_86__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_86__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_86__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_86__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_86__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_86__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_86__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_87__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_87__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_87__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_87__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_87__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_87__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_87__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_87__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_88__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_88__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_88__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_88__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_88__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_88__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_88__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_88__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_89__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_89__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_89__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_89__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_89__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_89__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_89__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_89__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_8__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_8__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_8__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_8__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_8__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_8__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_8__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_8__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_90__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_90__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_90__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_90__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_90__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_90__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_90__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_90__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_91__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_91__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_91__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_91__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_91__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_91__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_91__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_91__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_92__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_92__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_92__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_92__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_92__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_92__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_92__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_92__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_93__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_93__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_93__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_93__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_93__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_93__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_93__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_93__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_94__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_94__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_94__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_94__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_94__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_94__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_94__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_94__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_95__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_95__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_95__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_95__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_95__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_95__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_95__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_95__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_96__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_96__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_96__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_96__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_96__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_96__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_96__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_96__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_97__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_97__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_97__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_97__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_97__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_97__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_97__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_97__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_98__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_98__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_98__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_98__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_98__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_98__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_98__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_98__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_99__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_99__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_99__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_99__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_99__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_99__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_99__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_99__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_9__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_9__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_9__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_9__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_9__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_9__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_9__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_9__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DPHY_STR_CNTL_D0__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_DPHY_STR_CNTL_D0__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_D_STR_MASK 0x10000000L
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_D_STR__SHIFT 0x0000001c
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_S_STR_MASK 0x20000000L
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_S_STR__SHIFT 0x0000001d
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_D_MASK 0x00000fc0L
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_D__SHIFT 0x00000006
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_S_MASK 0x00fc0000L
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_S__SHIFT 0x00000012
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_D_MASK 0x0000003fL
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_D__SHIFT 0x00000000
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_S_MASK 0x0003f000L
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_S__SHIFT 0x0000000c
+#define MC_IO_DPHY_STR_CNTL_D0__USE_D_CAL_MASK 0x01000000L
+#define MC_IO_DPHY_STR_CNTL_D0__USE_D_CAL__SHIFT 0x00000018
+#define MC_IO_DPHY_STR_CNTL_D0__USE_S_CAL_MASK 0x02000000L
+#define MC_IO_DPHY_STR_CNTL_D0__USE_S_CAL__SHIFT 0x00000019
+#define MC_IO_DPHY_STR_CNTL_D1__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_DPHY_STR_CNTL_D1__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_D_STR_MASK 0x10000000L
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_D_STR__SHIFT 0x0000001c
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_S_STR_MASK 0x20000000L
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_S_STR__SHIFT 0x0000001d
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_D_MASK 0x00000fc0L
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_D__SHIFT 0x00000006
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_S_MASK 0x00fc0000L
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_S__SHIFT 0x00000012
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_D_MASK 0x0000003fL
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_D__SHIFT 0x00000000
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_S_MASK 0x0003f000L
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_S__SHIFT 0x0000000c
+#define MC_IO_DPHY_STR_CNTL_D1__USE_D_CAL_MASK 0x01000000L
+#define MC_IO_DPHY_STR_CNTL_D1__USE_D_CAL__SHIFT 0x00000018
+#define MC_IO_DPHY_STR_CNTL_D1__USE_S_CAL_MASK 0x02000000L
+#define MC_IO_DPHY_STR_CNTL_D1__USE_S_CAL__SHIFT 0x00000019
+#define MC_IO_PAD_CNTL__ATBEN_MASK 0x3f000000L
+#define MC_IO_PAD_CNTL__ATBEN__SHIFT 0x00000018
+#define MC_IO_PAD_CNTL__ATBSEL_D0_MASK 0x80000000L
+#define MC_IO_PAD_CNTL__ATBSEL_D0__SHIFT 0x0000001f
+#define MC_IO_PAD_CNTL__ATBSEL_D1_MASK 0x40000000L
+#define MC_IO_PAD_CNTL__ATBSEL_D1__SHIFT 0x0000001e
+#define MC_IO_PAD_CNTL__ATBSEL_MASK 0x00f00000L
+#define MC_IO_PAD_CNTL__ATBSEL__SHIFT 0x00000014
+#define MC_IO_PAD_CNTL_D0__CK_AUTO_EN_MASK 0x00100000L
+#define MC_IO_PAD_CNTL_D0__CK_AUTO_EN__SHIFT 0x00000014
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_N_MASK 0x00c00000L
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_N__SHIFT 0x00000016
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_P_MASK 0x03000000L
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_P__SHIFT 0x00000018
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_SEL_MASK 0x00200000L
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_SEL__SHIFT 0x00000015
+#define MC_IO_PAD_CNTL_D0__DELAY_ADR_SYNC_MASK 0x00000010L
+#define MC_IO_PAD_CNTL_D0__DELAY_ADR_SYNC__SHIFT 0x00000004
+#define MC_IO_PAD_CNTL_D0__DELAY_CLK_SYNC_MASK 0x00000004L
+#define MC_IO_PAD_CNTL_D0__DELAY_CLK_SYNC__SHIFT 0x00000002
+#define MC_IO_PAD_CNTL_D0__DELAY_CMD_SYNC_MASK 0x00000008L
+#define MC_IO_PAD_CNTL_D0__DELAY_CMD_SYNC__SHIFT 0x00000003
+#define MC_IO_PAD_CNTL_D0__DIFF_STR_MASK 0x20000000L
+#define MC_IO_PAD_CNTL_D0__DIFF_STR__SHIFT 0x0000001d
+#define MC_IO_PAD_CNTL_D0__DISABLE_ADR_MASK 0x00002000L
+#define MC_IO_PAD_CNTL_D0__DISABLE_ADR__SHIFT 0x0000000d
+#define MC_IO_PAD_CNTL_D0__DISABLE_CMD_MASK 0x00001000L
+#define MC_IO_PAD_CNTL_D0__DISABLE_CMD__SHIFT 0x0000000c
+#define MC_IO_PAD_CNTL_D0__EN_RD_STR_DLY_MASK 0x00000800L
+#define MC_IO_PAD_CNTL_D0__EN_RD_STR_DLY__SHIFT 0x0000000b
+#define MC_IO_PAD_CNTL_D0__FORCE_EN_RD_STR_MASK 0x00000400L
+#define MC_IO_PAD_CNTL_D0__FORCE_EN_RD_STR__SHIFT 0x0000000a
+#define MC_IO_PAD_CNTL_D0__GDDR_PWRON_MASK 0x40000000L
+#define MC_IO_PAD_CNTL_D0__GDDR_PWRON__SHIFT 0x0000001e
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_ADR_MASK 0x00000200L
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_ADR__SHIFT 0x00000009
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CLK_MASK 0x00000080L
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CLK__SHIFT 0x00000007
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CMD_MASK 0x00000100L
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CMD__SHIFT 0x00000008
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CKE_MASK 0x08000000L
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CKE__SHIFT 0x0000001b
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CLK_MASK 0x80000000L
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CLK__SHIFT 0x0000001f
+#define MC_IO_PAD_CNTL_D0__UNI_STR_MASK 0x10000000L
+#define MC_IO_PAD_CNTL_D0__UNI_STR__SHIFT 0x0000001c
+#define MC_IO_PAD_CNTL_D0__VREFI_EN_MASK 0x00004000L
+#define MC_IO_PAD_CNTL_D0__VREFI_EN__SHIFT 0x0000000e
+#define MC_IO_PAD_CNTL_D0__VREFI_SEL_MASK 0x000f8000L
+#define MC_IO_PAD_CNTL_D0__VREFI_SEL__SHIFT 0x0000000f
+#define MC_IO_PAD_CNTL_D1__CK_AUTO_EN_MASK 0x00100000L
+#define MC_IO_PAD_CNTL_D1__CK_AUTO_EN__SHIFT 0x00000014
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_N_MASK 0x00c00000L
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_N__SHIFT 0x00000016
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_P_MASK 0x03000000L
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_P__SHIFT 0x00000018
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_SEL_MASK 0x00200000L
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_SEL__SHIFT 0x00000015
+#define MC_IO_PAD_CNTL_D1__DELAY_ADR_SYNC_MASK 0x00000010L
+#define MC_IO_PAD_CNTL_D1__DELAY_ADR_SYNC__SHIFT 0x00000004
+#define MC_IO_PAD_CNTL_D1__DELAY_CLK_SYNC_MASK 0x00000004L
+#define MC_IO_PAD_CNTL_D1__DELAY_CLK_SYNC__SHIFT 0x00000002
+#define MC_IO_PAD_CNTL_D1__DELAY_CMD_SYNC_MASK 0x00000008L
+#define MC_IO_PAD_CNTL_D1__DELAY_CMD_SYNC__SHIFT 0x00000003
+#define MC_IO_PAD_CNTL_D1__DELAY_DATA_SYNC_MASK 0x00000001L
+#define MC_IO_PAD_CNTL_D1__DELAY_DATA_SYNC__SHIFT 0x00000000
+#define MC_IO_PAD_CNTL_D1__DELAY_STR_SYNC_MASK 0x00000002L
+#define MC_IO_PAD_CNTL_D1__DELAY_STR_SYNC__SHIFT 0x00000001
+#define MC_IO_PAD_CNTL_D1__DIFF_STR_MASK 0x20000000L
+#define MC_IO_PAD_CNTL_D1__DIFF_STR__SHIFT 0x0000001d
+#define MC_IO_PAD_CNTL_D1__DISABLE_ADR_MASK 0x00002000L
+#define MC_IO_PAD_CNTL_D1__DISABLE_ADR__SHIFT 0x0000000d
+#define MC_IO_PAD_CNTL_D1__DISABLE_CMD_MASK 0x00001000L
+#define MC_IO_PAD_CNTL_D1__DISABLE_CMD__SHIFT 0x0000000c
+#define MC_IO_PAD_CNTL_D1__EN_RD_STR_DLY_MASK 0x00000800L
+#define MC_IO_PAD_CNTL_D1__EN_RD_STR_DLY__SHIFT 0x0000000b
+#define MC_IO_PAD_CNTL_D1__FORCE_EN_RD_STR_MASK 0x00000400L
+#define MC_IO_PAD_CNTL_D1__FORCE_EN_RD_STR__SHIFT 0x0000000a
+#define MC_IO_PAD_CNTL_D1__GDDR_PWRON_MASK 0x40000000L
+#define MC_IO_PAD_CNTL_D1__GDDR_PWRON__SHIFT 0x0000001e
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_ADR_MASK 0x00000200L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_ADR__SHIFT 0x00000009
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CLK_MASK 0x00000080L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CLK__SHIFT 0x00000007
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CMD_MASK 0x00000100L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CMD__SHIFT 0x00000008
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_DATA_MASK 0x00000020L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_DATA__SHIFT 0x00000005
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_STR_MASK 0x00000040L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_STR__SHIFT 0x00000006
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CKE_MASK 0x08000000L
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CKE__SHIFT 0x0000001b
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CLK_MASK 0x80000000L
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CLK__SHIFT 0x0000001f
+#define MC_IO_PAD_CNTL_D1__UNI_STR_MASK 0x10000000L
+#define MC_IO_PAD_CNTL_D1__UNI_STR__SHIFT 0x0000001c
+#define MC_IO_PAD_CNTL_D1__VREFI_EN_MASK 0x00004000L
+#define MC_IO_PAD_CNTL_D1__VREFI_EN__SHIFT 0x0000000e
+#define MC_IO_PAD_CNTL_D1__VREFI_SEL_MASK 0x000f8000L
+#define MC_IO_PAD_CNTL_D1__VREFI_SEL__SHIFT 0x0000000f
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MAX_MASK 0x0000ff00L
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MAX__SHIFT 0x00000008
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MIN_MASK 0x000000ffL
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MIN__SHIFT 0x00000000
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D0_MASK 0x00040000L
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D0__SHIFT 0x00000012
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D1_MASK 0x00080000L
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D1__SHIFT 0x00000013
+#define MC_IO_PAD_CNTL__RXPHASE_GRAY_MASK 0x00020000L
+#define MC_IO_PAD_CNTL__RXPHASE_GRAY__SHIFT 0x00000011
+#define MC_IO_PAD_CNTL__TXPHASE_GRAY_MASK 0x00010000L
+#define MC_IO_PAD_CNTL__TXPHASE_GRAY__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY0_D0__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY0_D0__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY0_D0__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY0_D0__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL1_DPHY0_D1__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY0_D1__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY0_D1__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY0_D1__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL1_DPHY1_D0__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY1_D0__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY1_D0__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY1_D0__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL1_DPHY1_D1__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY1_D1__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY1_D1__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY1_D1__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY0_D0__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY0_D0__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY0_D0__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY0_D0__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY0_D0__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY0_D0__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY0_D0__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY0_D0__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY0_D0__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY0_D0__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY0_D0__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY0_D0__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY0_D0__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY0_D0__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY0_D0__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY0_D0__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFSEL__SHIFT 0x00000010
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY0_D1__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY0_D1__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY0_D1__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY0_D1__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY0_D1__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY0_D1__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY0_D1__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY0_D1__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY0_D1__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY0_D1__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY0_D1__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY0_D1__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY0_D1__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY0_D1__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY0_D1__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY0_D1__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFSEL__SHIFT 0x00000010
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY1_D0__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY1_D0__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY1_D0__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY1_D0__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY1_D0__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY1_D0__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY1_D0__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY1_D0__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY1_D0__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY1_D0__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY1_D0__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY1_D0__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY1_D0__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY1_D0__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY1_D0__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY1_D0__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFSEL__SHIFT 0x00000010
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY1_D1__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY1_D1__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY1_D1__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY1_D1__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY1_D1__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY1_D1__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY1_D1__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY1_D1__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY1_D1__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY1_D1__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY1_D1__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY1_D1__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY1_D1__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY1_D1__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY1_D1__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY1_D1__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFSEL__SHIFT 0x00000010
+#define MC_IO_TXCNTL_APHY_D0__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_APHY_D0__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_APHY_D0__CKE_BIT_MASK 0x40000000L
+#define MC_IO_TXCNTL_APHY_D0__CKE_BIT__SHIFT 0x0000001e
+#define MC_IO_TXCNTL_APHY_D0__CKE_SEL_MASK 0x80000000L
+#define MC_IO_TXCNTL_APHY_D0__CKE_SEL__SHIFT 0x0000001f
+#define MC_IO_TXCNTL_APHY_D0__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_APHY_D0__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_APHY_D0__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_APHY_D0__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_APHY_D0__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_APHY_D0__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_APHY_D0__NDRV_MASK 0x00700000L
+#define MC_IO_TXCNTL_APHY_D0__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_APHY_D0__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_APHY_D0__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_APHY_D0__PMA_LOOPBACK_MASK 0x0000e000L
+#define MC_IO_TXCNTL_APHY_D0__PMA_LOOPBACK__SHIFT 0x0000000d
+#define MC_IO_TXCNTL_APHY_D0__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_APHY_D0__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_APHY_D0__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_APHY_D0__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_APHY_D0__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_APHY_D0__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_APHY_D0__TXBPASS_SEL_MASK 0x00001000L
+#define MC_IO_TXCNTL_APHY_D0__TXBPASS_SEL__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS_DATA_MASK 0x38000000L
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS_DATA__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_APHY_D0__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_APHY_D0__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_APHY_D0__TXRESET_MASK 0x02000000L
+#define MC_IO_TXCNTL_APHY_D0__TXRESET__SHIFT 0x00000019
+#define MC_IO_TXCNTL_APHY_D0__YCLKON_MASK 0x00800000L
+#define MC_IO_TXCNTL_APHY_D0__YCLKON__SHIFT 0x00000017
+#define MC_IO_TXCNTL_APHY_D1__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_APHY_D1__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_APHY_D1__CKE_BIT_MASK 0x40000000L
+#define MC_IO_TXCNTL_APHY_D1__CKE_BIT__SHIFT 0x0000001e
+#define MC_IO_TXCNTL_APHY_D1__CKE_SEL_MASK 0x80000000L
+#define MC_IO_TXCNTL_APHY_D1__CKE_SEL__SHIFT 0x0000001f
+#define MC_IO_TXCNTL_APHY_D1__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_APHY_D1__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_APHY_D1__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_APHY_D1__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_APHY_D1__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_APHY_D1__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_APHY_D1__NDRV_MASK 0x00700000L
+#define MC_IO_TXCNTL_APHY_D1__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_APHY_D1__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_APHY_D1__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_APHY_D1__PMA_LOOPBACK_MASK 0x0000e000L
+#define MC_IO_TXCNTL_APHY_D1__PMA_LOOPBACK__SHIFT 0x0000000d
+#define MC_IO_TXCNTL_APHY_D1__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_APHY_D1__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_APHY_D1__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_APHY_D1__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_APHY_D1__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_APHY_D1__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_APHY_D1__TXBPASS_SEL_MASK 0x00001000L
+#define MC_IO_TXCNTL_APHY_D1__TXBPASS_SEL__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS_DATA_MASK 0x38000000L
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS_DATA__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_APHY_D1__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_APHY_D1__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_APHY_D1__TXRESET_MASK 0x02000000L
+#define MC_IO_TXCNTL_APHY_D1__TXRESET__SHIFT 0x00000019
+#define MC_IO_TXCNTL_APHY_D1__YCLKON_MASK 0x00800000L
+#define MC_IO_TXCNTL_APHY_D1__YCLKON__SHIFT 0x00000017
+#define MC_IO_TXCNTL_DPHY0_D0__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY0_D0__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY0_D0__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY0_D0__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY0_D0__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY0_D0__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY0_D0__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY0_D0__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY0_D0__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY0_D0__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY0_D0__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY0_D0__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY0_D0__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY0_D0__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY0_D0__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY0_D0__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY0_D0__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY0_D0__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY0_D0__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY0_D0__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY0_D0__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY0_D0__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY0_D0__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY0_D0__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY0_D0__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY0_D0__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_DPHY0_D1__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY0_D1__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY0_D1__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY0_D1__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY0_D1__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY0_D1__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY0_D1__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY0_D1__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY0_D1__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY0_D1__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY0_D1__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY0_D1__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY0_D1__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY0_D1__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY0_D1__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY0_D1__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY0_D1__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY0_D1__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY0_D1__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY0_D1__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY0_D1__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY0_D1__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY0_D1__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY0_D1__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY0_D1__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY0_D1__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_DPHY1_D0__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY1_D0__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY1_D0__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY1_D0__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY1_D0__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY1_D0__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY1_D0__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY1_D0__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY1_D0__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY1_D0__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY1_D0__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY1_D0__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY1_D0__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY1_D0__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY1_D0__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY1_D0__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY1_D0__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY1_D0__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY1_D0__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY1_D0__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY1_D0__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY1_D0__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY1_D0__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY1_D0__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY1_D0__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY1_D0__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_DPHY1_D1__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY1_D1__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY1_D1__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY1_D1__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY1_D1__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY1_D1__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY1_D1__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY1_D1__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY1_D1__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY1_D1__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY1_D1__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY1_D1__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY1_D1__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY1_D1__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY1_D1__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY1_D1__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY1_D1__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY1_D1__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY1_D1__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY1_D1__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY1_D1__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY1_D1__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY1_D1__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY1_D1__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY1_D1__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY1_D1__TXPD__SHIFT 0x00000007
+#define MCLK_PWRMGT_CNTL__DLL_READY_MASK 0x00000040L
+#define MCLK_PWRMGT_CNTL__DLL_READY_READ_MASK 0x01000000L
+#define MCLK_PWRMGT_CNTL__DLL_READY_READ__SHIFT 0x00000018
+#define MCLK_PWRMGT_CNTL__DLL_READY__SHIFT 0x00000006
+#define MCLK_PWRMGT_CNTL__DLL_SPEED_MASK 0x0000001fL
+#define MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT 0x00000000
+#define MCLK_PWRMGT_CNTL__MC_INT_CNTL_MASK 0x00000080L
+#define MCLK_PWRMGT_CNTL__MC_INT_CNTL__SHIFT 0x00000007
+#define MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK 0x00000100L
+#define MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT 0x00000008
+#define MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK 0x00010000L
+#define MCLK_PWRMGT_CNTL__MRDCK0_RESET__SHIFT 0x00000010
+#define MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK 0x00000200L
+#define MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT 0x00000009
+#define MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK 0x00020000L
+#define MCLK_PWRMGT_CNTL__MRDCK1_RESET__SHIFT 0x00000011
+#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000fc0L
+#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x00000006
+#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003fL
+#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x00000000
+#define MC_NPL_STATUS__D0_NDELAY_MASK 0x0000000cL
+#define MC_NPL_STATUS__D0_NDELAY__SHIFT 0x00000002
+#define MC_NPL_STATUS__D0_NEARLY_MASK 0x00000020L
+#define MC_NPL_STATUS__D0_NEARLY__SHIFT 0x00000005
+#define MC_NPL_STATUS__D0_PDELAY_MASK 0x00000003L
+#define MC_NPL_STATUS__D0_PDELAY__SHIFT 0x00000000
+#define MC_NPL_STATUS__D0_PEARLY_MASK 0x00000010L
+#define MC_NPL_STATUS__D0_PEARLY__SHIFT 0x00000004
+#define MC_NPL_STATUS__D1_NDELAY_MASK 0x00000300L
+#define MC_NPL_STATUS__D1_NDELAY__SHIFT 0x00000008
+#define MC_NPL_STATUS__D1_NEARLY_MASK 0x00000800L
+#define MC_NPL_STATUS__D1_NEARLY__SHIFT 0x0000000b
+#define MC_NPL_STATUS__D1_PDELAY_MASK 0x000000c0L
+#define MC_NPL_STATUS__D1_PDELAY__SHIFT 0x00000006
+#define MC_NPL_STATUS__D1_PEARLY_MASK 0x00000400L
+#define MC_NPL_STATUS__D1_PEARLY__SHIFT 0x0000000a
+#define MC_PHY_TIMING_2__ADR_CLKEN_D0_MASK 0x00040000L
+#define MC_PHY_TIMING_2__ADR_CLKEN_D0__SHIFT 0x00000012
+#define MC_PHY_TIMING_2__ADR_CLKEN_D1_MASK 0x00080000L
+#define MC_PHY_TIMING_2__ADR_CLKEN_D1__SHIFT 0x00000013
+#define MC_PHY_TIMING_2__IND_LD_CNT_MASK 0x0000007fL
+#define MC_PHY_TIMING_2__IND_LD_CNT__SHIFT 0x00000000
+#define MC_PHY_TIMING_2__RXC0_FRC_MASK 0x00001000L
+#define MC_PHY_TIMING_2__RXC0_FRC__SHIFT 0x0000000c
+#define MC_PHY_TIMING_2__RXC0_INV_MASK 0x00000100L
+#define MC_PHY_TIMING_2__RXC0_INV__SHIFT 0x00000008
+#define MC_PHY_TIMING_2__RXC1_FRC_MASK 0x00002000L
+#define MC_PHY_TIMING_2__RXC1_FRC__SHIFT 0x0000000d
+#define MC_PHY_TIMING_2__RXC1_INV_MASK 0x00000200L
+#define MC_PHY_TIMING_2__RXC1_INV__SHIFT 0x00000009
+#define MC_PHY_TIMING_2__TXC0_FRC_MASK 0x00004000L
+#define MC_PHY_TIMING_2__TXC0_FRC__SHIFT 0x0000000e
+#define MC_PHY_TIMING_2__TXC0_INV_MASK 0x00000400L
+#define MC_PHY_TIMING_2__TXC0_INV__SHIFT 0x0000000a
+#define MC_PHY_TIMING_2__TXC1_FRC_MASK 0x00008000L
+#define MC_PHY_TIMING_2__TXC1_FRC__SHIFT 0x0000000f
+#define MC_PHY_TIMING_2__TXC1_INV_MASK 0x00000800L
+#define MC_PHY_TIMING_2__TXC1_INV__SHIFT 0x0000000b
+#define MC_PHY_TIMING_2__TX_CDREN_D0_MASK 0x00010000L
+#define MC_PHY_TIMING_2__TX_CDREN_D0__SHIFT 0x00000010
+#define MC_PHY_TIMING_2__TX_CDREN_D1_MASK 0x00020000L
+#define MC_PHY_TIMING_2__TX_CDREN_D1__SHIFT 0x00000011
+#define MC_PHY_TIMING_2__WR_DLY_MASK 0x00f00000L
+#define MC_PHY_TIMING_2__WR_DLY__SHIFT 0x00000014
+#define MC_PHY_TIMING_D0__RXC0_DLY_MASK 0x0000000fL
+#define MC_PHY_TIMING_D0__RXC0_DLY__SHIFT 0x00000000
+#define MC_PHY_TIMING_D0__RXC0_EXT_MASK 0x000000f0L
+#define MC_PHY_TIMING_D0__RXC0_EXT__SHIFT 0x00000004
+#define MC_PHY_TIMING_D0__RXC1_DLY_MASK 0x00000f00L
+#define MC_PHY_TIMING_D0__RXC1_DLY__SHIFT 0x00000008
+#define MC_PHY_TIMING_D0__RXC1_EXT_MASK 0x0000f000L
+#define MC_PHY_TIMING_D0__RXC1_EXT__SHIFT 0x0000000c
+#define MC_PHY_TIMING_D0__TXC0_DLY_MASK 0x00070000L
+#define MC_PHY_TIMING_D0__TXC0_DLY__SHIFT 0x00000010
+#define MC_PHY_TIMING_D0__TXC0_EXT_MASK 0x00f00000L
+#define MC_PHY_TIMING_D0__TXC0_EXT__SHIFT 0x00000014
+#define MC_PHY_TIMING_D0__TXC1_DLY_MASK 0x07000000L
+#define MC_PHY_TIMING_D0__TXC1_DLY__SHIFT 0x00000018
+#define MC_PHY_TIMING_D0__TXC1_EXT_MASK 0xf0000000L
+#define MC_PHY_TIMING_D0__TXC1_EXT__SHIFT 0x0000001c
+#define MC_PHY_TIMING_D1__RXC0_DLY_MASK 0x0000000fL
+#define MC_PHY_TIMING_D1__RXC0_DLY__SHIFT 0x00000000
+#define MC_PHY_TIMING_D1__RXC0_EXT_MASK 0x000000f0L
+#define MC_PHY_TIMING_D1__RXC0_EXT__SHIFT 0x00000004
+#define MC_PHY_TIMING_D1__RXC1_DLY_MASK 0x00000f00L
+#define MC_PHY_TIMING_D1__RXC1_DLY__SHIFT 0x00000008
+#define MC_PHY_TIMING_D1__RXC1_EXT_MASK 0x0000f000L
+#define MC_PHY_TIMING_D1__RXC1_EXT__SHIFT 0x0000000c
+#define MC_PHY_TIMING_D1__TXC0_DLY_MASK 0x00070000L
+#define MC_PHY_TIMING_D1__TXC0_DLY__SHIFT 0x00000010
+#define MC_PHY_TIMING_D1__TXC0_EXT_MASK 0x00f00000L
+#define MC_PHY_TIMING_D1__TXC0_EXT__SHIFT 0x00000014
+#define MC_PHY_TIMING_D1__TXC1_DLY_MASK 0x07000000L
+#define MC_PHY_TIMING_D1__TXC1_DLY__SHIFT 0x00000018
+#define MC_PHY_TIMING_D1__TXC1_EXT_MASK 0xf0000000L
+#define MC_PHY_TIMING_D1__TXC1_EXT__SHIFT 0x0000001c
+#define MC_PMG_AUTO_CFG__DLL_CNT_MASK 0xff000000L
+#define MC_PMG_AUTO_CFG__DLL_CNT__SHIFT 0x00000018
+#define MC_PMG_AUTO_CFG__EXIT_ALLOW_STOP_MASK 0x00000800L
+#define MC_PMG_AUTO_CFG__EXIT_ALLOW_STOP__SHIFT 0x0000000b
+#define MC_PMG_AUTO_CFG__MRS_WAIT_CNT_MASK 0x000f0000L
+#define MC_PMG_AUTO_CFG__MRS_WAIT_CNT__SHIFT 0x00000010
+#define MC_PMG_AUTO_CFG__PREA_SRX_MASK 0x00002000L
+#define MC_PMG_AUTO_CFG__PREA_SRX__SHIFT 0x0000000d
+#define MC_PMG_AUTO_CFG__RFS_SRX_MASK 0x00001000L
+#define MC_PMG_AUTO_CFG__RFS_SRX__SHIFT 0x0000000c
+#define MC_PMG_AUTO_CFG__RST_MRS_MASK 0x00000002L
+#define MC_PMG_AUTO_CFG__RST_MRS__SHIFT 0x00000001
+#define MC_PMG_AUTO_CFG__RXPDNB_MASK 0x00400000L
+#define MC_PMG_AUTO_CFG__RXPDNB__SHIFT 0x00000016
+#define MC_PMG_AUTO_CFG__SCDS_MODE_MASK 0x00000400L
+#define MC_PMG_AUTO_CFG__SCDS_MODE__SHIFT 0x0000000a
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_0_MASK 0x00008000L
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_0__SHIFT 0x0000000f
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_1_MASK 0x00800000L
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_1__SHIFT 0x00000017
+#define MC_PMG_AUTO_CFG__SS_ALWAYS_SLF_MASK 0x00000100L
+#define MC_PMG_AUTO_CFG__SS_ALWAYS_SLF__SHIFT 0x00000008
+#define MC_PMG_AUTO_CFG__SS_S_SLF_MASK 0x00000200L
+#define MC_PMG_AUTO_CFG__SS_S_SLF__SHIFT 0x00000009
+#define MC_PMG_AUTO_CFG__STUTTER_EN_MASK 0x00004000L
+#define MC_PMG_AUTO_CFG__STUTTER_EN__SHIFT 0x0000000e
+#define MC_PMG_AUTO_CFG__SYC_CLK_MASK 0x00000001L
+#define MC_PMG_AUTO_CFG__SYC_CLK__SHIFT 0x00000000
+#define MC_PMG_AUTO_CFG__TRI_MIO_MASK 0x00000004L
+#define MC_PMG_AUTO_CFG__TRI_MIO__SHIFT 0x00000002
+#define MC_PMG_AUTO_CFG__WRITE_DURING_DLOCK_MASK 0x00100000L
+#define MC_PMG_AUTO_CFG__WRITE_DURING_DLOCK__SHIFT 0x00000014
+#define MC_PMG_AUTO_CFG__XSR_TMR_MASK 0x000000f0L
+#define MC_PMG_AUTO_CFG__XSR_TMR__SHIFT 0x00000004
+#define MC_PMG_AUTO_CFG__YCLK_ON_MASK 0x00200000L
+#define MC_PMG_AUTO_CFG__YCLK_ON__SHIFT 0x00000015
+#define MC_PMG_AUTO_CMD__ADR_MASK 0x0001ffffL
+#define MC_PMG_AUTO_CMD__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_AUTO_CMD__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_AUTO_CMD__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_AUTO_CMD__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_AUTO_CMD__ADR__SHIFT 0x00000000
+#define MC_PMG_CFG__DPM_WAKE_MASK 0x00000400L
+#define MC_PMG_CFG__DPM_WAKE__SHIFT 0x0000000a
+#define MC_PMG_CFG__EARLY_ACK_ACPI_MASK 0x00400000L
+#define MC_PMG_CFG__EARLY_ACK_ACPI__SHIFT 0x00000016
+#define MC_PMG_CFG__MRS_WAIT_CNT_MASK 0x000f0000L
+#define MC_PMG_CFG__MRS_WAIT_CNT__SHIFT 0x00000010
+#define MC_PMG_CFG__PREA_SRX_MASK 0x00002000L
+#define MC_PMG_CFG__PREA_SRX__SHIFT 0x0000000d
+#define MC_PMG_CFG__RFS_SRX_MASK 0x00001000L
+#define MC_PMG_CFG__RFS_SRX__SHIFT 0x0000000c
+#define MC_PMG_CFG__RST_EMRS_MASK 0x00000004L
+#define MC_PMG_CFG__RST_EMRS__SHIFT 0x00000002
+#define MC_PMG_CFG__RST_MRS1_MASK 0x00000100L
+#define MC_PMG_CFG__RST_MRS1__SHIFT 0x00000008
+#define MC_PMG_CFG__RST_MRS2_MASK 0x00000200L
+#define MC_PMG_CFG__RST_MRS2__SHIFT 0x00000009
+#define MC_PMG_CFG__RST_MRS_MASK 0x00000002L
+#define MC_PMG_CFG__RST_MRS__SHIFT 0x00000001
+#define MC_PMG_CFG__RXPDNB_MASK 0x02000000L
+#define MC_PMG_CFG__RXPDNB__SHIFT 0x00000019
+#define MC_PMG_CFG__SYC_CLK_MASK 0x00000001L
+#define MC_PMG_CFG__SYC_CLK__SHIFT 0x00000000
+#define MC_PMG_CFG__TRI_MIO_MASK 0x00000008L
+#define MC_PMG_CFG__TRI_MIO__SHIFT 0x00000003
+#define MC_PMG_CFG__WRITE_DURING_DLOCK_MASK 0x00100000L
+#define MC_PMG_CFG__WRITE_DURING_DLOCK__SHIFT 0x00000014
+#define MC_PMG_CFG__XSR_TMR_MASK 0x000000f0L
+#define MC_PMG_CFG__XSR_TMR__SHIFT 0x00000004
+#define MC_PMG_CFG__YCLK_ON_MASK 0x00200000L
+#define MC_PMG_CFG__YCLK_ON__SHIFT 0x00000015
+#define MC_PMG_CFG__ZQCL_SEND_MASK 0x0c000000L
+#define MC_PMG_CFG__ZQCL_SEND__SHIFT 0x0000001a
+#define MC_PMG_CMD_EMRS__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_EMRS__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_EMRS__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_EMRS__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_EMRS__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_EMRS__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_EMRS__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_EMRS__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_EMRS__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_EMRS__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_EMRS__END_MASK 0x00100000L
+#define MC_PMG_CMD_EMRS__END__SHIFT 0x00000014
+#define MC_PMG_CMD_EMRS__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_EMRS__MOP__SHIFT 0x00000010
+#define MC_PMG_CMD_MRS1__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_MRS1__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_MRS1__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_MRS1__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_MRS1__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_MRS1__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_MRS1__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_MRS1__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_MRS1__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_MRS1__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_MRS1__END_MASK 0x00100000L
+#define MC_PMG_CMD_MRS1__END__SHIFT 0x00000014
+#define MC_PMG_CMD_MRS1__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_MRS1__MOP__SHIFT 0x00000010
+#define MC_PMG_CMD_MRS2__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_MRS2__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_MRS2__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_MRS2__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_MRS2__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_MRS2__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_MRS2__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_MRS2__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_MRS2__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_MRS2__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_MRS2__END_MASK 0x00100000L
+#define MC_PMG_CMD_MRS2__END__SHIFT 0x00000014
+#define MC_PMG_CMD_MRS2__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_MRS2__MOP__SHIFT 0x00000010
+#define MC_PMG_CMD_MRS__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_MRS__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_MRS__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_MRS__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_MRS__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_MRS__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_MRS__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_MRS__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_MRS__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_MRS__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_MRS__END_MASK 0x00100000L
+#define MC_PMG_CMD_MRS__END__SHIFT 0x00000014
+#define MC_PMG_CMD_MRS__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_MRS__MOP__SHIFT 0x00000010
+#define MC_RD_CB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_CB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_CB__ENABLE_MASK 0x00000001L
+#define MC_RD_CB__ENABLE__SHIFT 0x00000000
+#define MC_RD_CB__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_CB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_CB__MAX_BURST_MASK 0x00000780L
+#define MC_RD_CB__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_CB__PRESCALE_MASK 0x00000006L
+#define MC_RD_CB__PRESCALE__SHIFT 0x00000001
+#define MC_RD_CB__STALL_MODE_MASK 0x00000030L
+#define MC_RD_CB__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_CB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_CB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_CB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_CB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_DB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_DB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_DB__ENABLE_MASK 0x00000001L
+#define MC_RD_DB__ENABLE__SHIFT 0x00000000
+#define MC_RD_DB__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_DB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_DB__MAX_BURST_MASK 0x00000780L
+#define MC_RD_DB__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_DB__PRESCALE_MASK 0x00000006L
+#define MC_RD_DB__PRESCALE__SHIFT 0x00000001
+#define MC_RD_DB__STALL_MODE_MASK 0x00000030L
+#define MC_RD_DB__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_DB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_DB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_DB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_DB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_GRP_EXT__DBSTEN0_MASK 0x0000000fL
+#define MC_RD_GRP_EXT__DBSTEN0__SHIFT 0x00000000
+#define MC_RD_GRP_EXT__TC0_MASK 0x000000f0L
+#define MC_RD_GRP_EXT__TC0__SHIFT 0x00000004
+#define MC_RD_GRP_GFX__CP_MASK 0x0000000fL
+#define MC_RD_GRP_GFX__CP__SHIFT 0x00000000
+#define MC_RD_GRP_GFX__XDMAM_MASK 0x000f0000L
+#define MC_RD_GRP_GFX__XDMAM__SHIFT 0x00000010
+#define MC_RD_GRP_LCL__CB0_MASK 0x0000f000L
+#define MC_RD_GRP_LCL__CB0__SHIFT 0x0000000c
+#define MC_RD_GRP_LCL__CBCMASK0_MASK 0x000f0000L
+#define MC_RD_GRP_LCL__CBCMASK0__SHIFT 0x00000010
+#define MC_RD_GRP_LCL__CBFMASK0_MASK 0x00f00000L
+#define MC_RD_GRP_LCL__CBFMASK0__SHIFT 0x00000014
+#define MC_RD_GRP_LCL__DB0_MASK 0x0f000000L
+#define MC_RD_GRP_LCL__DB0__SHIFT 0x00000018
+#define MC_RD_GRP_LCL__DBHTILE0_MASK 0xf0000000L
+#define MC_RD_GRP_LCL__DBHTILE0__SHIFT 0x0000001c
+#define MC_RD_GRP_OTH__HDP_MASK 0x00000f00L
+#define MC_RD_GRP_OTH__HDP__SHIFT 0x00000008
+#define MC_RD_GRP_OTH__SEM_MASK 0x0000f000L
+#define MC_RD_GRP_OTH__SEM__SHIFT 0x0000000c
+#define MC_RD_GRP_OTH__UMC_MASK 0x000f0000L
+#define MC_RD_GRP_OTH__UMC__SHIFT 0x00000010
+#define MC_RD_GRP_OTH__UVD_EXT0_MASK 0x0000000fL
+#define MC_RD_GRP_OTH__UVD_EXT0__SHIFT 0x00000000
+#define MC_RD_GRP_OTH__UVD_EXT1_MASK 0x0f000000L
+#define MC_RD_GRP_OTH__UVD_EXT1__SHIFT 0x00000018
+#define MC_RD_GRP_OTH__UVD_MASK 0x00f00000L
+#define MC_RD_GRP_OTH__UVD__SHIFT 0x00000014
+#define MC_RD_GRP_SYS__DMIF_MASK 0x0000f000L
+#define MC_RD_GRP_SYS__DMIF__SHIFT 0x0000000c
+#define MC_RD_GRP_SYS__MCIF_MASK 0x000f0000L
+#define MC_RD_GRP_SYS__MCIF__SHIFT 0x00000010
+#define MC_RD_GRP_SYS__RLC_MASK 0x0000000fL
+#define MC_RD_GRP_SYS__RLC__SHIFT 0x00000000
+#define MC_RD_GRP_SYS__SMU_MASK 0x00f00000L
+#define MC_RD_GRP_SYS__SMU__SHIFT 0x00000014
+#define MC_RD_GRP_SYS__VCE_MASK 0x0f000000L
+#define MC_RD_GRP_SYS__VCE__SHIFT 0x00000018
+#define MC_RD_GRP_SYS__VCEU_MASK 0xf0000000L
+#define MC_RD_GRP_SYS__VCEU__SHIFT 0x0000001c
+#define MC_RD_GRP_SYS__VMC_MASK 0x000000f0L
+#define MC_RD_GRP_SYS__VMC__SHIFT 0x00000004
+#define MC_RD_HUB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_HUB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_HUB__ENABLE_MASK 0x00000001L
+#define MC_RD_HUB__ENABLE__SHIFT 0x00000000
+#define MC_RD_HUB__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_HUB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_HUB__MAX_BURST_MASK 0x00000780L
+#define MC_RD_HUB__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_HUB__PRESCALE_MASK 0x00000006L
+#define MC_RD_HUB__PRESCALE__SHIFT 0x00000001
+#define MC_RD_HUB__STALL_MODE_MASK 0x00000030L
+#define MC_RD_HUB__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_HUB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_HUB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_HUB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_HUB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_TC0__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_TC0__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_TC0__ENABLE_MASK 0x00000001L
+#define MC_RD_TC0__ENABLE__SHIFT 0x00000000
+#define MC_RD_TC0__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_TC0__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_TC0__MAX_BURST_MASK 0x00000780L
+#define MC_RD_TC0__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_TC0__PRESCALE_MASK 0x00000006L
+#define MC_RD_TC0__PRESCALE__SHIFT 0x00000001
+#define MC_RD_TC0__STALL_MODE_MASK 0x00000030L
+#define MC_RD_TC0__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_TC0__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_TC0__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_TC0__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_TC0__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_TC1__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_TC1__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_TC1__ENABLE_MASK 0x00000001L
+#define MC_RD_TC1__ENABLE__SHIFT 0x00000000
+#define MC_RD_TC1__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_TC1__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_TC1__MAX_BURST_MASK 0x00000780L
+#define MC_RD_TC1__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_TC1__PRESCALE_MASK 0x00000006L
+#define MC_RD_TC1__PRESCALE__SHIFT 0x00000001
+#define MC_RD_TC1__STALL_MODE_MASK 0x00000030L
+#define MC_RD_TC1__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_TC1__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_TC1__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_TC1__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_TC1__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RPB_ARB_CNTL__ATC_SWITCH_NUM_MASK 0x00ff0000L
+#define MC_RPB_ARB_CNTL__ATC_SWITCH_NUM__SHIFT 0x00000010
+#define MC_RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_BIF_CNTL__ARB_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_BIF_CNTL__ARB_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_BIF_CNTL__XPB_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_BIF_CNTL__XPB_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_CID_QUEUE_EX_DATA__READ_ENTRIES_MASK 0xffff0000L
+#define MC_RPB_CID_QUEUE_EX_DATA__READ_ENTRIES__SHIFT 0x00000010
+#define MC_RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES_MASK 0x0000ffffL
+#define MC_RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_EX__OFFSET_MASK 0x0000003eL
+#define MC_RPB_CID_QUEUE_EX__OFFSET__SHIFT 0x00000001
+#define MC_RPB_CID_QUEUE_EX__START_MASK 0x00000001L
+#define MC_RPB_CID_QUEUE_EX__START__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_RD__CLIENT_ID_MASK 0x000000ffL
+#define MC_RPB_CID_QUEUE_RD__CLIENT_ID__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_RD__READ_QUEUE_MASK 0x00000c00L
+#define MC_RPB_CID_QUEUE_RD__READ_QUEUE__SHIFT 0x0000000a
+#define MC_RPB_CID_QUEUE_RD__WRITE_QUEUE_MASK 0x00000300L
+#define MC_RPB_CID_QUEUE_RD__WRITE_QUEUE__SHIFT 0x00000008
+#define MC_RPB_CID_QUEUE_WR__CLIENT_ID_MASK 0x000000ffL
+#define MC_RPB_CID_QUEUE_WR__CLIENT_ID__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_WR__READ_QUEUE_MASK 0x00001800L
+#define MC_RPB_CID_QUEUE_WR__READ_QUEUE__SHIFT 0x0000000b
+#define MC_RPB_CID_QUEUE_WR__UPDATE_MASK 0x00002000L
+#define MC_RPB_CID_QUEUE_WR__UPDATE_MODE_MASK 0x00000100L
+#define MC_RPB_CID_QUEUE_WR__UPDATE_MODE__SHIFT 0x00000008
+#define MC_RPB_CID_QUEUE_WR__UPDATE__SHIFT 0x0000000d
+#define MC_RPB_CID_QUEUE_WR__WRITE_QUEUE_MASK 0x00000600L
+#define MC_RPB_CID_QUEUE_WR__WRITE_QUEUE__SHIFT 0x00000009
+#define MC_RPB_CONF__RPB_RD_PCIE_ORDER_MASK 0x00010000L
+#define MC_RPB_CONF__RPB_RD_PCIE_ORDER__SHIFT 0x00000010
+#define MC_RPB_CONF__RPB_WR_PCIE_ORDER_MASK 0x00020000L
+#define MC_RPB_CONF__RPB_WR_PCIE_ORDER__SHIFT 0x00000011
+#define MC_RPB_CONF__XPB_PCIE_ORDER_MASK 0x00008000L
+#define MC_RPB_CONF__XPB_PCIE_ORDER__SHIFT 0x0000000f
+#define MC_RPB_DBG1__DEBUG_BITS_MASK 0xfff00000L
+#define MC_RPB_DBG1__DEBUG_BITS__SHIFT 0x00000014
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD_32B_MASK 0x000fff00L
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD_32B__SHIFT 0x00000008
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD_MASK 0x000000ffL
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD__SHIFT 0x00000000
+#define MC_RPB_EFF_CNTL__RD_LAZY_TIMER_MASK 0x0000ff00L
+#define MC_RPB_EFF_CNTL__RD_LAZY_TIMER__SHIFT 0x00000008
+#define MC_RPB_EFF_CNTL__WR_LAZY_TIMER_MASK 0x000000ffL
+#define MC_RPB_EFF_CNTL__WR_LAZY_TIMER__SHIFT 0x00000000
+#define MC_RPB_IF_CONF__OUTSTANDING_WRRET_ASK_MASK 0x0000ff00L
+#define MC_RPB_IF_CONF__OUTSTANDING_WRRET_ASK__SHIFT 0x00000008
+#define MC_RPB_IF_CONF__RPB_BIF_CREDITS_MASK 0x000000ffL
+#define MC_RPB_IF_CONF__RPB_BIF_CREDITS__SHIFT 0x00000000
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS_MASK 0x00000008L
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS__SHIFT 0x00000003
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER_MASK 0x00000004L
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER__SHIFT 0x00000002
+#define MC_RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS_MASK 0x000001e0L
+#define MC_RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS__SHIFT 0x00000005
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0_MASK 0x00003e00L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0__SHIFT 0x00000009
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1_MASK 0x0007c000L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1__SHIFT 0x0000000e
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2_MASK 0x00f80000L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2__SHIFT 0x00000013
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3_MASK 0x1f000000L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3__SHIFT 0x00000018
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT_MASK 0x00000003L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT__SHIFT 0x00000000
+#define MC_RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION_MASK 0x00000010L
+#define MC_RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION__SHIFT 0x00000004
+#define MC_RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE_MASK 0xffffffffL
+#define MC_RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE__SHIFT 0x00000000
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x00ff0000L
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0x00000010
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0xff000000L
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x00000018
+#define MC_RPB_WR_COMBINE_CNTL__WC_ALIGN_MASK 0x00000080L
+#define MC_RPB_WR_COMBINE_CNTL__WC_ALIGN__SHIFT 0x00000007
+#define MC_RPB_WR_COMBINE_CNTL__WC_ENABLE_MASK 0x00000001L
+#define MC_RPB_WR_COMBINE_CNTL__WC_ENABLE__SHIFT 0x00000000
+#define MC_RPB_WR_COMBINE_CNTL__WC_FLUSH_TIMER_MASK 0x00000078L
+#define MC_RPB_WR_COMBINE_CNTL__WC_FLUSH_TIMER__SHIFT 0x00000003
+#define MC_RPB_WR_COMBINE_CNTL__WC_MAX_PACKET_SIZE_MASK 0x00000006L
+#define MC_RPB_WR_COMBINE_CNTL__WC_MAX_PACKET_SIZE__SHIFT 0x00000001
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x00ff0000L
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0x00000010
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0xff000000L
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x00000018
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BYTE_REMAP_D0__BYTE0_MASK 0x00000003L
+#define MC_SEQ_BYTE_REMAP_D0__BYTE0__SHIFT 0x00000000
+#define MC_SEQ_BYTE_REMAP_D0__BYTE1_MASK 0x0000000cL
+#define MC_SEQ_BYTE_REMAP_D0__BYTE1__SHIFT 0x00000002
+#define MC_SEQ_BYTE_REMAP_D0__BYTE2_MASK 0x00000030L
+#define MC_SEQ_BYTE_REMAP_D0__BYTE2__SHIFT 0x00000004
+#define MC_SEQ_BYTE_REMAP_D0__BYTE3_MASK 0x000000c0L
+#define MC_SEQ_BYTE_REMAP_D0__BYTE3__SHIFT 0x00000006
+#define MC_SEQ_BYTE_REMAP_D1__BYTE0_MASK 0x00000003L
+#define MC_SEQ_BYTE_REMAP_D1__BYTE0__SHIFT 0x00000000
+#define MC_SEQ_BYTE_REMAP_D1__BYTE1_MASK 0x0000000cL
+#define MC_SEQ_BYTE_REMAP_D1__BYTE1__SHIFT 0x00000002
+#define MC_SEQ_BYTE_REMAP_D1__BYTE2_MASK 0x00000030L
+#define MC_SEQ_BYTE_REMAP_D1__BYTE2__SHIFT 0x00000004
+#define MC_SEQ_BYTE_REMAP_D1__BYTE3_MASK 0x000000c0L
+#define MC_SEQ_BYTE_REMAP_D1__BYTE3__SHIFT 0x00000006
+#define MC_SEQ_CAS_TIMING_LP__TCCDL_MASK 0x00000e00L
+#define MC_SEQ_CAS_TIMING_LP__TCCDL__SHIFT 0x00000009
+#define MC_SEQ_CAS_TIMING_LP__TCL_MASK 0x1f000000L
+#define MC_SEQ_CAS_TIMING_LP__TCL__SHIFT 0x00000018
+#define MC_SEQ_CAS_TIMING_LP__TNOPR_MASK 0x0000000cL
+#define MC_SEQ_CAS_TIMING_LP__TNOPR__SHIFT 0x00000002
+#define MC_SEQ_CAS_TIMING_LP__TNOPW_MASK 0x00000003L
+#define MC_SEQ_CAS_TIMING_LP__TNOPW__SHIFT 0x00000000
+#define MC_SEQ_CAS_TIMING_LP__TR2R_MASK 0x0000f000L
+#define MC_SEQ_CAS_TIMING_LP__TR2R__SHIFT 0x0000000c
+#define MC_SEQ_CAS_TIMING_LP__TR2W_MASK 0x000001f0L
+#define MC_SEQ_CAS_TIMING_LP__TR2W__SHIFT 0x00000004
+#define MC_SEQ_CAS_TIMING_LP__TW2R_MASK 0x001f0000L
+#define MC_SEQ_CAS_TIMING_LP__TW2R__SHIFT 0x00000010
+#define MC_SEQ_CAS_TIMING__TCCDL_MASK 0x00000e00L
+#define MC_SEQ_CAS_TIMING__TCCDL__SHIFT 0x00000009
+#define MC_SEQ_CAS_TIMING__TCL_MASK 0x1f000000L
+#define MC_SEQ_CAS_TIMING__TCL__SHIFT 0x00000018
+#define MC_SEQ_CAS_TIMING__TNOPR_MASK 0x0000000cL
+#define MC_SEQ_CAS_TIMING__TNOPR__SHIFT 0x00000002
+#define MC_SEQ_CAS_TIMING__TNOPW_MASK 0x00000003L
+#define MC_SEQ_CAS_TIMING__TNOPW__SHIFT 0x00000000
+#define MC_SEQ_CAS_TIMING__TR2R_MASK 0x0000f000L
+#define MC_SEQ_CAS_TIMING__TR2R__SHIFT 0x0000000c
+#define MC_SEQ_CAS_TIMING__TR2W_MASK 0x000001f0L
+#define MC_SEQ_CAS_TIMING__TR2W__SHIFT 0x00000004
+#define MC_SEQ_CAS_TIMING__TW2R_MASK 0x001f0000L
+#define MC_SEQ_CAS_TIMING__TW2R__SHIFT 0x00000010
+#define MC_SEQ_CG__CG_SEQ_REQ_MASK 0x000000ffL
+#define MC_SEQ_CG__CG_SEQ_REQ__SHIFT 0x00000000
+#define MC_SEQ_CG__CG_SEQ_RESP_MASK 0x0000ff00L
+#define MC_SEQ_CG__CG_SEQ_RESP__SHIFT 0x00000008
+#define MC_SEQ_CG__SEQ_CG_REQ_MASK 0x00ff0000L
+#define MC_SEQ_CG__SEQ_CG_REQ__SHIFT 0x00000010
+#define MC_SEQ_CG__SEQ_CG_RESP_MASK 0xff000000L
+#define MC_SEQ_CG__SEQ_CG_RESP__SHIFT 0x00000018
+#define MC_SEQ_CMD__ADR_MASK 0x0000ffffL
+#define MC_SEQ_CMD__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_CMD__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_CMD__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_CMD__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_CMD__ADR__SHIFT 0x00000000
+#define MC_SEQ_CMD__CHAN0_MASK 0x01000000L
+#define MC_SEQ_CMD__CHAN0__SHIFT 0x00000018
+#define MC_SEQ_CMD__CHAN1_MASK 0x02000000L
+#define MC_SEQ_CMD__CHAN1__SHIFT 0x00000019
+#define MC_SEQ_CMD__CSB_MASK 0x00600000L
+#define MC_SEQ_CMD__CSB__SHIFT 0x00000015
+#define MC_SEQ_CMD__END_MASK 0x00100000L
+#define MC_SEQ_CMD__END__SHIFT 0x00000014
+#define MC_SEQ_CMD__MOP_MASK 0x000f0000L
+#define MC_SEQ_CMD__MOP__SHIFT 0x00000010
+#define MC_SEQ_CNTL_2__ARB_RTDAT_WMK_MSB_MASK 0x00000300L
+#define MC_SEQ_CNTL_2__ARB_RTDAT_WMK_MSB__SHIFT 0x00000008
+#define MC_SEQ_CNTL_2__DRST_NSTR_MASK 0x0000fc00L
+#define MC_SEQ_CNTL_2__DRST_NSTR__SHIFT 0x0000000a
+#define MC_SEQ_CNTL_2__DRST_PSTR_MASK 0x003f0000L
+#define MC_SEQ_CNTL_2__DRST_PSTR__SHIFT 0x00000010
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D0_MASK 0x0f000000L
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D0__SHIFT 0x00000018
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D1_MASK 0xf0000000L
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D1__SHIFT 0x0000001c
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D0_MASK 0x00400000L
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D0__SHIFT 0x00000016
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D1_MASK 0x00800000L
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D1__SHIFT 0x00000017
+#define MC_SEQ_CNTL__ARB_REQCMD_WMK_MASK 0x00f00000L
+#define MC_SEQ_CNTL__ARB_REQCMD_WMK__SHIFT 0x00000014
+#define MC_SEQ_CNTL__ARB_REQDAT_WMK_MASK 0x0f000000L
+#define MC_SEQ_CNTL__ARB_REQDAT_WMK__SHIFT 0x00000018
+#define MC_SEQ_CNTL__ARB_RTDAT_WMK_MASK 0xf0000000L
+#define MC_SEQ_CNTL__ARB_RTDAT_WMK__SHIFT 0x0000001c
+#define MC_SEQ_CNTL__BANKGROUP_ENB_MASK 0x00040000L
+#define MC_SEQ_CNTL__BANKGROUP_ENB__SHIFT 0x00000012
+#define MC_SEQ_CNTL__BANKGROUP_SIZE_MASK 0x00020000L
+#define MC_SEQ_CNTL__BANKGROUP_SIZE__SHIFT 0x00000011
+#define MC_SEQ_CNTL__CHANNEL_DISABLE_MASK 0x00000300L
+#define MC_SEQ_CNTL__CHANNEL_DISABLE__SHIFT 0x00000008
+#define MC_SEQ_CNTL__DAT_INV_MASK 0x00000040L
+#define MC_SEQ_CNTL__DAT_INV__SHIFT 0x00000006
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_BANK_MASK 0x0000000cL
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_BANK__SHIFT 0x00000002
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_COLS_MASK 0x00000003L
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_COLS__SHIFT 0x00000000
+#define MC_SEQ_CNTL__MSK_DF1_MASK 0x00000080L
+#define MC_SEQ_CNTL__MSK_DF1__SHIFT 0x00000007
+#define MC_SEQ_CNTL__MSKOFF_DAT_TH_MASK 0x00008000L
+#define MC_SEQ_CNTL__MSKOFF_DAT_TH__SHIFT 0x0000000f
+#define MC_SEQ_CNTL__MSKOFF_DAT_TL_MASK 0x00004000L
+#define MC_SEQ_CNTL__MSKOFF_DAT_TL__SHIFT 0x0000000e
+#define MC_SEQ_CNTL__RET_HOLD_EOP_MASK 0x00010000L
+#define MC_SEQ_CNTL__RET_HOLD_EOP__SHIFT 0x00000010
+#define MC_SEQ_CNTL__RTR_OVERRIDE_MASK 0x00080000L
+#define MC_SEQ_CNTL__RTR_OVERRIDE__SHIFT 0x00000013
+#define MC_SEQ_CNTL__SAFE_MODE_MASK 0x00000030L
+#define MC_SEQ_CNTL__SAFE_MODE__SHIFT 0x00000004
+#define MC_SEQ_DRAM_2__ADBI_ACT_MASK 0x04000000L
+#define MC_SEQ_DRAM_2__ADBI_ACT__SHIFT 0x0000001a
+#define MC_SEQ_DRAM_2__ADBI_DF1_MASK 0x02000000L
+#define MC_SEQ_DRAM_2__ADBI_DF1__SHIFT 0x00000019
+#define MC_SEQ_DRAM_2__ADR_DBI_ACM_MASK 0x00000004L
+#define MC_SEQ_DRAM_2__ADR_DBI_ACM__SHIFT 0x00000002
+#define MC_SEQ_DRAM_2__ADR_DBI_MASK 0x00000002L
+#define MC_SEQ_DRAM_2__ADR_DBI__SHIFT 0x00000001
+#define MC_SEQ_DRAM_2__ADR_DDR_MASK 0x00000001L
+#define MC_SEQ_DRAM_2__ADR_DDR__SHIFT 0x00000000
+#define MC_SEQ_DRAM_2__BNK_MRS_MASK 0x00002000L
+#define MC_SEQ_DRAM_2__BNK_MRS__SHIFT 0x0000000d
+#define MC_SEQ_DRAM_2__CMD_QDR_MASK 0x00000008L
+#define MC_SEQ_DRAM_2__CMD_QDR__SHIFT 0x00000003
+#define MC_SEQ_DRAM_2__CS_BY16_MASK 0x80000000L
+#define MC_SEQ_DRAM_2__CS_BY16__SHIFT 0x0000001f
+#define MC_SEQ_DRAM_2__DAT_QDR_MASK 0x00000010L
+#define MC_SEQ_DRAM_2__DAT_QDR__SHIFT 0x00000004
+#define MC_SEQ_DRAM_2__DBI_ACT_MASK 0x10000000L
+#define MC_SEQ_DRAM_2__DBI_ACT__SHIFT 0x0000001c
+#define MC_SEQ_DRAM_2__DBI_DF1_MASK 0x08000000L
+#define MC_SEQ_DRAM_2__DBI_DF1__SHIFT 0x0000001b
+#define MC_SEQ_DRAM_2__DBI_EDC_DF1_MASK 0x20000000L
+#define MC_SEQ_DRAM_2__DBI_EDC_DF1__SHIFT 0x0000001d
+#define MC_SEQ_DRAM_2__DBI_OVR_MASK 0x00004000L
+#define MC_SEQ_DRAM_2__DBI_OVR__SHIFT 0x0000000e
+#define MC_SEQ_DRAM_2__DLL_EST_MASK 0x00001000L
+#define MC_SEQ_DRAM_2__DLL_EST__SHIFT 0x0000000c
+#define MC_SEQ_DRAM_2__DQM_EST_MASK 0x00000080L
+#define MC_SEQ_DRAM_2__DQM_EST__SHIFT 0x00000007
+#define MC_SEQ_DRAM_2__PCH_BNK_MASK 0x01000000L
+#define MC_SEQ_DRAM_2__PCH_BNK__SHIFT 0x00000018
+#define MC_SEQ_DRAM_2__PLL_CLR_MASK 0x00000800L
+#define MC_SEQ_DRAM_2__PLL_CLR__SHIFT 0x0000000b
+#define MC_SEQ_DRAM_2__PLL_CNT_MASK 0x00ff0000L
+#define MC_SEQ_DRAM_2__PLL_CNT__SHIFT 0x00000010
+#define MC_SEQ_DRAM_2__PLL_EST_MASK 0x00000400L
+#define MC_SEQ_DRAM_2__PLL_EST__SHIFT 0x0000000a
+#define MC_SEQ_DRAM_2__RDAT_EDC_MASK 0x00000040L
+#define MC_SEQ_DRAM_2__RDAT_EDC__SHIFT 0x00000006
+#define MC_SEQ_DRAM_2__RD_DQS_MASK 0x00000100L
+#define MC_SEQ_DRAM_2__RD_DQS__SHIFT 0x00000008
+#define MC_SEQ_DRAM_2__TESTCHIP_EN_MASK 0x40000000L
+#define MC_SEQ_DRAM_2__TESTCHIP_EN__SHIFT 0x0000001e
+#define MC_SEQ_DRAM_2__TRI_CLK_MASK 0x00008000L
+#define MC_SEQ_DRAM_2__TRI_CLK__SHIFT 0x0000000f
+#define MC_SEQ_DRAM_2__WDAT_EDC_MASK 0x00000020L
+#define MC_SEQ_DRAM_2__WDAT_EDC__SHIFT 0x00000005
+#define MC_SEQ_DRAM_2__WR_DQS_MASK 0x00000200L
+#define MC_SEQ_DRAM_2__WR_DQS__SHIFT 0x00000009
+#define MC_SEQ_DRAM__ADR_2CK_MASK 0x00000001L
+#define MC_SEQ_DRAM__ADR_2CK__SHIFT 0x00000000
+#define MC_SEQ_DRAM__ADR_DF1_MASK 0x00000004L
+#define MC_SEQ_DRAM__ADR_DF1__SHIFT 0x00000002
+#define MC_SEQ_DRAM__ADR_MUX_MASK 0x00000002L
+#define MC_SEQ_DRAM__ADR_MUX__SHIFT 0x00000001
+#define MC_SEQ_DRAM__AP8_MASK 0x00000008L
+#define MC_SEQ_DRAM__AP8__SHIFT 0x00000003
+#define MC_SEQ_DRAM__BO4_MASK 0x00004000L
+#define MC_SEQ_DRAM__BO4__SHIFT 0x0000000e
+#define MC_SEQ_DRAM__CKE_ACT_MASK 0x00002000L
+#define MC_SEQ_DRAM__CKE_ACT__SHIFT 0x0000000d
+#define MC_SEQ_DRAM__CKE_DYN_MASK 0x00001000L
+#define MC_SEQ_DRAM__CKE_DYN__SHIFT 0x0000000c
+#define MC_SEQ_DRAM__DAT_DF1_MASK 0x00000010L
+#define MC_SEQ_DRAM__DAT_DF1__SHIFT 0x00000004
+#define MC_SEQ_DRAM__DAT_INV_MASK 0x01000000L
+#define MC_SEQ_DRAM__DAT_INV__SHIFT 0x00000018
+#define MC_SEQ_DRAM__DLL_CLR_MASK 0x00008000L
+#define MC_SEQ_DRAM__DLL_CLR__SHIFT 0x0000000f
+#define MC_SEQ_DRAM__DLL_CNT_MASK 0x00ff0000L
+#define MC_SEQ_DRAM__DLL_CNT__SHIFT 0x00000010
+#define MC_SEQ_DRAM__DQM_ACT_MASK 0x00000080L
+#define MC_SEQ_DRAM__DQM_ACT__SHIFT 0x00000007
+#define MC_SEQ_DRAM__DQM_DF1_MASK 0x00000040L
+#define MC_SEQ_DRAM__DQM_DF1__SHIFT 0x00000006
+#define MC_SEQ_DRAM__DQS_DF1_MASK 0x00000020L
+#define MC_SEQ_DRAM__DQS_DF1__SHIFT 0x00000005
+#define MC_SEQ_DRAM_ERROR_INSERTION__RX_MASK 0xffff0000L
+#define MC_SEQ_DRAM_ERROR_INSERTION__RX__SHIFT 0x00000010
+#define MC_SEQ_DRAM_ERROR_INSERTION__TX_MASK 0x0000ffffL
+#define MC_SEQ_DRAM_ERROR_INSERTION__TX__SHIFT 0x00000000
+#define MC_SEQ_DRAM__INV_ACM_MASK 0x02000000L
+#define MC_SEQ_DRAM__INV_ACM__SHIFT 0x00000019
+#define MC_SEQ_DRAM__ODT_ACT_MASK 0x08000000L
+#define MC_SEQ_DRAM__ODT_ACT__SHIFT 0x0000001b
+#define MC_SEQ_DRAM__ODT_ENB_MASK 0x04000000L
+#define MC_SEQ_DRAM__ODT_ENB__SHIFT 0x0000001a
+#define MC_SEQ_DRAM__RST_CTL_MASK 0x10000000L
+#define MC_SEQ_DRAM__RST_CTL__SHIFT 0x0000001c
+#define MC_SEQ_DRAM__STB_CNT_MASK 0x00000f00L
+#define MC_SEQ_DRAM__STB_CNT__SHIFT 0x00000008
+#define MC_SEQ_DRAM__TRI_CKE_MASK 0x40000000L
+#define MC_SEQ_DRAM__TRI_CKE__SHIFT 0x0000001e
+#define MC_SEQ_DRAM__TRI_MIO_DYN_MASK 0x20000000L
+#define MC_SEQ_DRAM__TRI_MIO_DYN__SHIFT 0x0000001d
+#define MC_SEQ_FIFO_CTL__CG_DIS_D0_MASK 0x00000100L
+#define MC_SEQ_FIFO_CTL__CG_DIS_D0__SHIFT 0x00000008
+#define MC_SEQ_FIFO_CTL__CG_DIS_D1_MASK 0x00000200L
+#define MC_SEQ_FIFO_CTL__CG_DIS_D1__SHIFT 0x00000009
+#define MC_SEQ_FIFO_CTL__R_LD_INIT_MASK 0x00000030L
+#define MC_SEQ_FIFO_CTL__R_LD_INIT__SHIFT 0x00000004
+#define MC_SEQ_FIFO_CTL__R_SYC_SEL_MASK 0x000000c0L
+#define MC_SEQ_FIFO_CTL__R_SYC_SEL__SHIFT 0x00000006
+#define MC_SEQ_FIFO_CTL__SYC_DLY_MASK 0x00007000L
+#define MC_SEQ_FIFO_CTL__SYC_DLY__SHIFT 0x0000000c
+#define MC_SEQ_FIFO_CTL__W_ASYC_EXT_MASK 0x00030000L
+#define MC_SEQ_FIFO_CTL__W_ASYC_EXT__SHIFT 0x00000010
+#define MC_SEQ_FIFO_CTL__W_DSYC_EXT_MASK 0x000c0000L
+#define MC_SEQ_FIFO_CTL__W_DSYC_EXT__SHIFT 0x00000012
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D0_MASK 0x00000003L
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D0__SHIFT 0x00000000
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D1_MASK 0x00000c00L
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D1__SHIFT 0x0000000a
+#define MC_SEQ_FIFO_CTL__W_SYC_SEL_MASK 0x0000000cL
+#define MC_SEQ_FIFO_CTL__W_SYC_SEL__SHIFT 0x00000002
+#define MC_SEQ_IO_DEBUG_DATA__IO_DEBUG_DATA_MASK 0xffffffffL
+#define MC_SEQ_IO_DEBUG_DATA__IO_DEBUG_DATA__SHIFT 0x00000000
+#define MC_SEQ_IO_DEBUG_INDEX__IO_DEBUG_INDEX_MASK 0x000001ffL
+#define MC_SEQ_IO_DEBUG_INDEX__IO_DEBUG_INDEX__SHIFT 0x00000000
+#define MC_SEQ_IO_RDBI__MASK_MASK 0xffffffffL
+#define MC_SEQ_IO_RDBI__MASK__SHIFT 0x00000000
+#define MC_SEQ_IO_REDC__EDC_MASK 0xffffffffL
+#define MC_SEQ_IO_REDC__EDC__SHIFT 0x00000000
+#define MC_SEQ_IO_RESERVE_D0__APHY_RSV_MASK 0xff000000L
+#define MC_SEQ_IO_RESERVE_D0__APHY_RSV__SHIFT 0x00000018
+#define MC_SEQ_IO_RESERVE_D0__DPHY0_RSV_MASK 0x00000fffL
+#define MC_SEQ_IO_RESERVE_D0__DPHY0_RSV__SHIFT 0x00000000
+#define MC_SEQ_IO_RESERVE_D0__DPHY1_RSV_MASK 0x00fff000L
+#define MC_SEQ_IO_RESERVE_D0__DPHY1_RSV__SHIFT 0x0000000c
+#define MC_SEQ_IO_RESERVE_D1__APHY_RSV_MASK 0xff000000L
+#define MC_SEQ_IO_RESERVE_D1__APHY_RSV__SHIFT 0x00000018
+#define MC_SEQ_IO_RESERVE_D1__DPHY0_RSV_MASK 0x00000fffL
+#define MC_SEQ_IO_RESERVE_D1__DPHY0_RSV__SHIFT 0x00000000
+#define MC_SEQ_IO_RESERVE_D1__DPHY1_RSV_MASK 0x00fff000L
+#define MC_SEQ_IO_RESERVE_D1__DPHY1_RSV__SHIFT 0x0000000c
+#define MC_SEQ_IO_RWORD0__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD0__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD1__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD1__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD2__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD2__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD3__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD3__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD4__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD4__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD5__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD5__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD6__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD6__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD7__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD7__RDATA__SHIFT 0x00000000
+#define MC_SEQ_MISC0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC3__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC3__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC4__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC4__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC5__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC5__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC6__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC6__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC7__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC7__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC8__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC8__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC9__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC9__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING2__FAW_MASK 0x00001f00L
+#define MC_SEQ_MISC_TIMING2__FAW__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING2_LP__FAW_MASK 0x00001f00L
+#define MC_SEQ_MISC_TIMING2_LP__FAW__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING2_LP__PA2RDATA_MASK 0x00000007L
+#define MC_SEQ_MISC_TIMING2_LP__PA2RDATA__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING2_LP__PA2WDATA_MASK 0x00000070L
+#define MC_SEQ_MISC_TIMING2_LP__PA2WDATA__SHIFT 0x00000004
+#define MC_SEQ_MISC_TIMING2_LP__TADR_MASK 0x00e00000L
+#define MC_SEQ_MISC_TIMING2_LP__TADR__SHIFT 0x00000015
+#define MC_SEQ_MISC_TIMING2_LP__TFCKTR_MASK 0x0f000000L
+#define MC_SEQ_MISC_TIMING2_LP__TFCKTR__SHIFT 0x00000018
+#define MC_SEQ_MISC_TIMING2_LP__TREDC_MASK 0x0000e000L
+#define MC_SEQ_MISC_TIMING2_LP__TREDC__SHIFT 0x0000000d
+#define MC_SEQ_MISC_TIMING2_LP__TWDATATR_MASK 0xf0000000L
+#define MC_SEQ_MISC_TIMING2_LP__TWDATATR__SHIFT 0x0000001c
+#define MC_SEQ_MISC_TIMING2_LP__TWEDC_MASK 0x001f0000L
+#define MC_SEQ_MISC_TIMING2_LP__TWEDC__SHIFT 0x00000010
+#define MC_SEQ_MISC_TIMING2__PA2RDATA_MASK 0x00000007L
+#define MC_SEQ_MISC_TIMING2__PA2RDATA__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING2__PA2WDATA_MASK 0x00000070L
+#define MC_SEQ_MISC_TIMING2__PA2WDATA__SHIFT 0x00000004
+#define MC_SEQ_MISC_TIMING2__T32AW_MASK 0x01e00000L
+#define MC_SEQ_MISC_TIMING2__T32AW__SHIFT 0x00000015
+#define MC_SEQ_MISC_TIMING2__TREDC_MASK 0x0000e000L
+#define MC_SEQ_MISC_TIMING2__TREDC__SHIFT 0x0000000d
+#define MC_SEQ_MISC_TIMING2__TWDATATR_MASK 0xf0000000L
+#define MC_SEQ_MISC_TIMING2__TWDATATR__SHIFT 0x0000001c
+#define MC_SEQ_MISC_TIMING2__TWEDC_MASK 0x001f0000L
+#define MC_SEQ_MISC_TIMING2__TWEDC__SHIFT 0x00000010
+#define MC_SEQ_MISC_TIMING_LP__TRFC_MASK 0x1ff00000L
+#define MC_SEQ_MISC_TIMING_LP__TRFC__SHIFT 0x00000014
+#define MC_SEQ_MISC_TIMING_LP__TRP_MASK 0x000f8000L
+#define MC_SEQ_MISC_TIMING_LP__TRP_RDA_MASK 0x00003f00L
+#define MC_SEQ_MISC_TIMING_LP__TRP_RDA__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING_LP__TRP__SHIFT 0x0000000f
+#define MC_SEQ_MISC_TIMING_LP__TRP_WRA_MASK 0x0000003fL
+#define MC_SEQ_MISC_TIMING_LP__TRP_WRA__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING__TRFC_MASK 0x1ff00000L
+#define MC_SEQ_MISC_TIMING__TRFC__SHIFT 0x00000014
+#define MC_SEQ_MISC_TIMING__TRP_MASK 0x000f8000L
+#define MC_SEQ_MISC_TIMING__TRP_RDA_MASK 0x00003f00L
+#define MC_SEQ_MISC_TIMING__TRP_RDA__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING__TRP__SHIFT 0x0000000f
+#define MC_SEQ_MISC_TIMING__TRP_WRA_MASK 0x0000003fL
+#define MC_SEQ_MISC_TIMING__TRP_WRA__SHIFT 0x00000000
+#define MC_SEQ_MPLL_OVERRIDE__AD_PLL_RESET_OVERRIDE_MASK 0x00000001L
+#define MC_SEQ_MPLL_OVERRIDE__AD_PLL_RESET_OVERRIDE__SHIFT 0x00000000
+#define MC_SEQ_MPLL_OVERRIDE__ATGM_CLK_SEL_OVERRIDE_MASK 0x00000020L
+#define MC_SEQ_MPLL_OVERRIDE__ATGM_CLK_SEL_OVERRIDE__SHIFT 0x00000005
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_0_PLL_RESET_OVERRIDE_MASK 0x00000002L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_0_PLL_RESET_OVERRIDE__SHIFT 0x00000001
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_1_PLL_RESET_OVERRIDE_MASK 0x00000004L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_1_PLL_RESET_OVERRIDE__SHIFT 0x00000002
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_0_PLL_RESET_OVERRIDE_MASK 0x00000008L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_0_PLL_RESET_OVERRIDE__SHIFT 0x00000003
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_1_PLL_RESET_OVERRIDE_MASK 0x00000010L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_1_PLL_RESET_OVERRIDE__SHIFT 0x00000004
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_EN_OVERRIDE_MASK 0x00000040L
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_EN_OVERRIDE__SHIFT 0x00000006
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_SEL_OVERRIDE_MASK 0x00000080L
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_SEL_OVERRIDE__SHIFT 0x00000007
+#define MC_SEQ_PERF_CNTL_1__PAUSE_MASK 0x00000001L
+#define MC_SEQ_PERF_CNTL_1__PAUSE__SHIFT 0x00000000
+#define MC_SEQ_PERF_CNTL_1__SEL_A_MSB_MASK 0x00000100L
+#define MC_SEQ_PERF_CNTL_1__SEL_A_MSB__SHIFT 0x00000008
+#define MC_SEQ_PERF_CNTL_1__SEL_B_MSB_MASK 0x00000200L
+#define MC_SEQ_PERF_CNTL_1__SEL_B_MSB__SHIFT 0x00000009
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_C_MSB_MASK 0x00000400L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_C_MSB__SHIFT 0x0000000a
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_D_MSB_MASK 0x00000800L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_D_MSB__SHIFT 0x0000000b
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_A_MSB_MASK 0x00001000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_A_MSB__SHIFT 0x0000000c
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_B_MSB_MASK 0x00002000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_B_MSB__SHIFT 0x0000000d
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_C_MSB_MASK 0x00004000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_C_MSB__SHIFT 0x0000000e
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_D_MSB_MASK 0x00008000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_D_MSB__SHIFT 0x0000000f
+#define MC_SEQ_PERF_CNTL__CNTL_MASK 0xc0000000L
+#define MC_SEQ_PERF_CNTL__CNTL__SHIFT 0x0000001e
+#define MC_SEQ_PERF_CNTL__MONITOR_PERIOD_MASK 0x3fffffffL
+#define MC_SEQ_PERF_CNTL__MONITOR_PERIOD__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_A_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_A_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_A_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_A_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_B_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_B_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_B_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_B_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_C_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_C_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_C_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_C_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_D_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_D_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_D_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_D_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CTL__SEL_A_MASK 0x0000000fL
+#define MC_SEQ_PERF_SEQ_CTL__SEL_A__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CTL__SEL_B_MASK 0x000000f0L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_B__SHIFT 0x00000004
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_C_MASK 0x00000f00L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_C__SHIFT 0x00000008
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_D_MASK 0x0000f000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_D__SHIFT 0x0000000c
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_A_MASK 0x000f0000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_A__SHIFT 0x00000010
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_B_MASK 0x00f00000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_B__SHIFT 0x00000014
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_C_MASK 0x0f000000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_C__SHIFT 0x00000018
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_D_MASK 0xf0000000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_D__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_EMRS_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_EMRS_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_EMRS_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_EMRS_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_MRS1_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_MRS1_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_MRS1_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_MRS1_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_MRS2_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_MRS2_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_MRS2_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_MRS2_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_MRS_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_MRS_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_MRS_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_MRS_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_MRS_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_MRS_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_MRS_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_MRS_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_HWCNTL__ACAO_MASK 0x00040000L
+#define MC_SEQ_PMG_PG_HWCNTL__ACAO__SHIFT 0x00000012
+#define MC_SEQ_PMG_PG_HWCNTL__AC_DLY_MASK 0x00000300L
+#define MC_SEQ_PMG_PG_HWCNTL__AC_DLY__SHIFT 0x00000008
+#define MC_SEQ_PMG_PG_HWCNTL__D_DLY_MASK 0x000000c0L
+#define MC_SEQ_PMG_PG_HWCNTL__D_DLY__SHIFT 0x00000006
+#define MC_SEQ_PMG_PG_HWCNTL__G_DLY_MASK 0x00003c00L
+#define MC_SEQ_PMG_PG_HWCNTL__G_DLY__SHIFT 0x0000000a
+#define MC_SEQ_PMG_PG_HWCNTL__PWRGATE_EN_MASK 0x00000001L
+#define MC_SEQ_PMG_PG_HWCNTL__PWRGATE_EN__SHIFT 0x00000000
+#define MC_SEQ_PMG_PG_HWCNTL__RXAO_MASK 0x00020000L
+#define MC_SEQ_PMG_PG_HWCNTL__RXAO__SHIFT 0x00000011
+#define MC_SEQ_PMG_PG_HWCNTL__STAGGER_EN_MASK 0x00000002L
+#define MC_SEQ_PMG_PG_HWCNTL__STAGGER_EN__SHIFT 0x00000001
+#define MC_SEQ_PMG_PG_HWCNTL__TPGCG_MASK 0x0000003cL
+#define MC_SEQ_PMG_PG_HWCNTL__TPGCG__SHIFT 0x00000002
+#define MC_SEQ_PMG_PG_HWCNTL__TXAO_MASK 0x00010000L
+#define MC_SEQ_PMG_PG_HWCNTL__TXAO__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_SWCNTL_0__GMCON_SR_COMMIT_MASK 0x80000000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__GMCON_SR_COMMIT__SHIFT 0x0000001f
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMA0_AC_ENB_MASK 0x00010000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMA0_AC_ENB__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_RX_ENB_MASK 0x00000020L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_RX_ENB__SHIFT 0x00000005
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_TX_ENB_MASK 0x00000002L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_TX_ENB__SHIFT 0x00000001
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_RX_ENB_MASK 0x00000010L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_RX_ENB__SHIFT 0x00000004
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_TX_ENB_MASK 0x00000001L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_TX_ENB__SHIFT 0x00000000
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_RX_ENB_MASK 0x00000040L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_RX_ENB__SHIFT 0x00000006
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_TX_ENB_MASK 0x00000004L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_TX_ENB__SHIFT 0x00000002
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_RX_ENB_MASK 0x00000080L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_RX_ENB__SHIFT 0x00000007
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_TX_ENB_MASK 0x00000008L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_TX_ENB__SHIFT 0x00000003
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_RX_ENB_MASK 0x00002000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_RX_ENB__SHIFT 0x0000000d
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_TX_ENB_MASK 0x00000200L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_TX_ENB__SHIFT 0x00000009
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_RX_ENB_MASK 0x00001000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_RX_ENB__SHIFT 0x0000000c
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_TX_ENB_MASK 0x00000100L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_TX_ENB__SHIFT 0x00000008
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_RX_ENB_MASK 0x00004000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_RX_ENB__SHIFT 0x0000000e
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_TX_ENB_MASK 0x00000400L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_TX_ENB__SHIFT 0x0000000a
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_RX_ENB_MASK 0x00008000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_RX_ENB__SHIFT 0x0000000f
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_TX_ENB_MASK 0x00000800L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_TX_ENB__SHIFT 0x0000000b
+#define MC_SEQ_PMG_PG_SWCNTL_1__GMCON_SR_COMMIT_MASK 0x80000000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__GMCON_SR_COMMIT__SHIFT 0x0000001f
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMA1_AC_ENB_MASK 0x00010000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMA1_AC_ENB__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_RX_ENB_MASK 0x00000020L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_RX_ENB__SHIFT 0x00000005
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_TX_ENB_MASK 0x00000002L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_TX_ENB__SHIFT 0x00000001
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_RX_ENB_MASK 0x00000010L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_RX_ENB__SHIFT 0x00000004
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_TX_ENB_MASK 0x00000001L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_TX_ENB__SHIFT 0x00000000
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_RX_ENB_MASK 0x00000040L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_RX_ENB__SHIFT 0x00000006
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_TX_ENB_MASK 0x00000004L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_TX_ENB__SHIFT 0x00000002
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_RX_ENB_MASK 0x00000080L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_RX_ENB__SHIFT 0x00000007
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_TX_ENB_MASK 0x00000008L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_TX_ENB__SHIFT 0x00000003
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_RX_ENB_MASK 0x00002000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_RX_ENB__SHIFT 0x0000000d
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_TX_ENB_MASK 0x00000200L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_TX_ENB__SHIFT 0x00000009
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_RX_ENB_MASK 0x00001000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_RX_ENB__SHIFT 0x0000000c
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_TX_ENB_MASK 0x00000100L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_TX_ENB__SHIFT 0x00000008
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_RX_ENB_MASK 0x00004000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_RX_ENB__SHIFT 0x0000000e
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_TX_ENB_MASK 0x00000400L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_TX_ENB__SHIFT 0x0000000a
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_RX_ENB_MASK 0x00008000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_RX_ENB__SHIFT 0x0000000f
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_TX_ENB_MASK 0x00000800L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_TX_ENB__SHIFT 0x0000000b
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE_MASK 0x001c0000L
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE__SHIFT 0x00000012
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE_SS_MASK 0xff000000L
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE_SS__SHIFT 0x00000018
+#define MC_SEQ_PMG_TIMING_LP__TCKE_MASK 0x0003f000L
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE_MASK 0x00000f00L
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE_MSB_MASK 0x00800000L
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE_MSB__SHIFT 0x00000017
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE__SHIFT 0x00000008
+#define MC_SEQ_PMG_TIMING_LP__TCKE__SHIFT 0x0000000c
+#define MC_SEQ_PMG_TIMING_LP__TCKSRE_MASK 0x00000007L
+#define MC_SEQ_PMG_TIMING_LP__TCKSRE__SHIFT 0x00000000
+#define MC_SEQ_PMG_TIMING_LP__TCKSRX_MASK 0x00000070L
+#define MC_SEQ_PMG_TIMING_LP__TCKSRX__SHIFT 0x00000004
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE_MASK 0x001c0000L
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE__SHIFT 0x00000012
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE_SS_MASK 0xff000000L
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE_SS__SHIFT 0x00000018
+#define MC_SEQ_PMG_TIMING__TCKE_MASK 0x0003f000L
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE_MASK 0x00000f00L
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE_MSB_MASK 0x00800000L
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE_MSB__SHIFT 0x00000017
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE__SHIFT 0x00000008
+#define MC_SEQ_PMG_TIMING__TCKE__SHIFT 0x0000000c
+#define MC_SEQ_PMG_TIMING__TCKSRE_MASK 0x00000007L
+#define MC_SEQ_PMG_TIMING__TCKSRE__SHIFT 0x00000000
+#define MC_SEQ_PMG_TIMING__TCKSRX_MASK 0x00000070L
+#define MC_SEQ_PMG_TIMING__TCKSRX__SHIFT 0x00000004
+#define MC_SEQ_RAS_TIMING_LP__TRCDRA_MASK 0x000f8000L
+#define MC_SEQ_RAS_TIMING_LP__TRCDRA__SHIFT 0x0000000f
+#define MC_SEQ_RAS_TIMING_LP__TRCDR_MASK 0x00007c00L
+#define MC_SEQ_RAS_TIMING_LP__TRCDR__SHIFT 0x0000000a
+#define MC_SEQ_RAS_TIMING_LP__TRCDWA_MASK 0x000003e0L
+#define MC_SEQ_RAS_TIMING_LP__TRCDWA__SHIFT 0x00000005
+#define MC_SEQ_RAS_TIMING_LP__TRCDW_MASK 0x0000001fL
+#define MC_SEQ_RAS_TIMING_LP__TRCDW__SHIFT 0x00000000
+#define MC_SEQ_RAS_TIMING_LP__TRC_MASK 0x7f000000L
+#define MC_SEQ_RAS_TIMING_LP__TRC__SHIFT 0x00000018
+#define MC_SEQ_RAS_TIMING_LP__TRRD_MASK 0x00f00000L
+#define MC_SEQ_RAS_TIMING_LP__TRRD__SHIFT 0x00000014
+#define MC_SEQ_RAS_TIMING__TRCDRA_MASK 0x000f8000L
+#define MC_SEQ_RAS_TIMING__TRCDRA__SHIFT 0x0000000f
+#define MC_SEQ_RAS_TIMING__TRCDR_MASK 0x00007c00L
+#define MC_SEQ_RAS_TIMING__TRCDR__SHIFT 0x0000000a
+#define MC_SEQ_RAS_TIMING__TRCDWA_MASK 0x000003e0L
+#define MC_SEQ_RAS_TIMING__TRCDWA__SHIFT 0x00000005
+#define MC_SEQ_RAS_TIMING__TRCDW_MASK 0x0000001fL
+#define MC_SEQ_RAS_TIMING__TRCDW__SHIFT 0x00000000
+#define MC_SEQ_RAS_TIMING__TRC_MASK 0x7f000000L
+#define MC_SEQ_RAS_TIMING__TRC__SHIFT 0x00000018
+#define MC_SEQ_RAS_TIMING__TRRD_MASK 0x00f00000L
+#define MC_SEQ_RAS_TIMING__TRRD__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D0_LP__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D0_LP__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D0_LP__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D0_LP__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D0_LP__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D0_LP__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D0_LP__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D0_LP__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D0_LP__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D0_LP__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D0_LP__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D0_LP__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D0_LP__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D0_LP__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D0_LP__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D0_LP__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D0_LP__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D0_LP__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RD_CTL_D0__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D0__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D0__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D0__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D0__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D0__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D0__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D0__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D0__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D0__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D0__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D0__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D0__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D0__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D0__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D0__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D0__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D0__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RD_CTL_D1_LP__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D1_LP__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D1_LP__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D1_LP__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D1_LP__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D1_LP__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D1_LP__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D1_LP__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D1_LP__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D1_LP__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D1_LP__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D1_LP__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D1_LP__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D1_LP__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D1_LP__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D1_LP__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D1_LP__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D1_LP__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RD_CTL_D1__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D1__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D1__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D1__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D1__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D1__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D1__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D1__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D1__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D1__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D1__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D1__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D1__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D1__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D1__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D1__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D1__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D1__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RESERVE_0_S__SCLK_FIELD_MASK 0xffffffffL
+#define MC_SEQ_RESERVE_0_S__SCLK_FIELD__SHIFT 0x00000000
+#define MC_SEQ_RESERVE_1_S__SCLK_FIELD_MASK 0xffffffffL
+#define MC_SEQ_RESERVE_1_S__SCLK_FIELD__SHIFT 0x00000000
+#define MC_SEQ_RESERVE_M__MCLK_FIELD_MASK 0xffffffffL
+#define MC_SEQ_RESERVE_M__MCLK_FIELD__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_STATUS_M__CMD_RDY_D0_MASK 0x00000004L
+#define MC_SEQ_STATUS_M__CMD_RDY_D0__SHIFT 0x00000002
+#define MC_SEQ_STATUS_M__CMD_RDY_D1_MASK 0x00000008L
+#define MC_SEQ_STATUS_M__CMD_RDY_D1__SHIFT 0x00000003
+#define MC_SEQ_STATUS_M__PMG_FSMSTATE_MASK 0x01f00000L
+#define MC_SEQ_STATUS_M__PMG_FSMSTATE__SHIFT 0x00000014
+#define MC_SEQ_STATUS_M__PMG_PWRSTATE_MASK 0x00010000L
+#define MC_SEQ_STATUS_M__PMG_PWRSTATE__SHIFT 0x00000010
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D0_MASK 0x00000001L
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D0__SHIFT 0x00000000
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D1_MASK 0x00000002L
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D1__SHIFT 0x00000001
+#define MC_SEQ_STATUS_M__SEQ0_ARB_CMD_FIFO_EMPTY_MASK 0x00000100L
+#define MC_SEQ_STATUS_M__SEQ0_ARB_CMD_FIFO_EMPTY__SHIFT 0x00000008
+#define MC_SEQ_STATUS_M__SEQ0_BUSY_HYS_MASK 0x02000000L
+#define MC_SEQ_STATUS_M__SEQ0_BUSY_HYS__SHIFT 0x00000019
+#define MC_SEQ_STATUS_M__SEQ0_BUSY_MASK 0x00004000L
+#define MC_SEQ_STATUS_M__SEQ0_BUSY__SHIFT 0x0000000e
+#define MC_SEQ_STATUS_M__SEQ0_RS_DATA_FIFO_FULL_MASK 0x00001000L
+#define MC_SEQ_STATUS_M__SEQ0_RS_DATA_FIFO_FULL__SHIFT 0x0000000c
+#define MC_SEQ_STATUS_M__SEQ1_ARB_CMD_FIFO_EMPTY_MASK 0x00000200L
+#define MC_SEQ_STATUS_M__SEQ1_ARB_CMD_FIFO_EMPTY__SHIFT 0x00000009
+#define MC_SEQ_STATUS_M__SEQ1_BUSY_HYS_MASK 0x04000000L
+#define MC_SEQ_STATUS_M__SEQ1_BUSY_HYS__SHIFT 0x0000001a
+#define MC_SEQ_STATUS_M__SEQ1_BUSY_MASK 0x00008000L
+#define MC_SEQ_STATUS_M__SEQ1_BUSY__SHIFT 0x0000000f
+#define MC_SEQ_STATUS_M__SEQ1_RS_DATA_FIFO_FULL_MASK 0x00002000L
+#define MC_SEQ_STATUS_M__SEQ1_RS_DATA_FIFO_FULL__SHIFT 0x0000000d
+#define MC_SEQ_STATUS_M__SLF_D0_MASK 0x00000010L
+#define MC_SEQ_STATUS_M__SLF_D0__SHIFT 0x00000004
+#define MC_SEQ_STATUS_M__SLF_D1_MASK 0x00000020L
+#define MC_SEQ_STATUS_M__SLF_D1__SHIFT 0x00000005
+#define MC_SEQ_STATUS_M__SS_SLF_D0_MASK 0x00000040L
+#define MC_SEQ_STATUS_M__SS_SLF_D0__SHIFT 0x00000006
+#define MC_SEQ_STATUS_M__SS_SLF_D1_MASK 0x00000080L
+#define MC_SEQ_STATUS_M__SS_SLF_D1__SHIFT 0x00000007
+#define MC_SEQ_STATUS_S__SEQ0_ARB_CMD_FIFO_FULL_MASK 0x00000010L
+#define MC_SEQ_STATUS_S__SEQ0_ARB_CMD_FIFO_FULL__SHIFT 0x00000004
+#define MC_SEQ_STATUS_S__SEQ0_ARB_DATA_FIFO_FULL_MASK 0x00000001L
+#define MC_SEQ_STATUS_S__SEQ0_ARB_DATA_FIFO_FULL__SHIFT 0x00000000
+#define MC_SEQ_STATUS_S__SEQ0_RS_DATA_FIFO_EMPTY_MASK 0x00000100L
+#define MC_SEQ_STATUS_S__SEQ0_RS_DATA_FIFO_EMPTY__SHIFT 0x00000008
+#define MC_SEQ_STATUS_S__SEQ1_ARB_CMD_FIFO_FULL_MASK 0x00000020L
+#define MC_SEQ_STATUS_S__SEQ1_ARB_CMD_FIFO_FULL__SHIFT 0x00000005
+#define MC_SEQ_STATUS_S__SEQ1_ARB_DATA_FIFO_FULL_MASK 0x00000002L
+#define MC_SEQ_STATUS_S__SEQ1_ARB_DATA_FIFO_FULL__SHIFT 0x00000001
+#define MC_SEQ_STATUS_S__SEQ1_RS_DATA_FIFO_EMPTY_MASK 0x00000200L
+#define MC_SEQ_STATUS_S__SEQ1_RS_DATA_FIFO_EMPTY__SHIFT 0x00000009
+#define MC_SEQ_SUP_CNTL__BKPT_CLEAR_MASK 0x00000080L
+#define MC_SEQ_SUP_CNTL__BKPT_CLEAR__SHIFT 0x00000007
+#define MC_SEQ_SUP_CNTL__FAST_WRITE_MASK 0x00000040L
+#define MC_SEQ_SUP_CNTL__FAST_WRITE__SHIFT 0x00000006
+#define MC_SEQ_SUP_CNTL__PGM_CHKSUM_MASK 0xff800000L
+#define MC_SEQ_SUP_CNTL__PGM_CHKSUM__SHIFT 0x00000017
+#define MC_SEQ_SUP_CNTL__PGM_READ_MASK 0x00000020L
+#define MC_SEQ_SUP_CNTL__PGM_READ__SHIFT 0x00000005
+#define MC_SEQ_SUP_CNTL__PGM_WRITE_MASK 0x00000010L
+#define MC_SEQ_SUP_CNTL__PGM_WRITE__SHIFT 0x00000004
+#define MC_SEQ_SUP_CNTL__RESET_PC_MASK 0x00000008L
+#define MC_SEQ_SUP_CNTL__RESET_PC__SHIFT 0x00000003
+#define MC_SEQ_SUP_CNTL__RUN_MASK 0x00000001L
+#define MC_SEQ_SUP_CNTL__RUN__SHIFT 0x00000000
+#define MC_SEQ_SUP_CNTL__SINGLE_STEP_MASK 0x00000002L
+#define MC_SEQ_SUP_CNTL__SINGLE_STEP__SHIFT 0x00000001
+#define MC_SEQ_SUP_CNTL__SW_WAKE_MASK 0x00000004L
+#define MC_SEQ_SUP_CNTL__SW_WAKE__SHIFT 0x00000002
+#define MC_SEQ_SUP_DEC_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_DEC_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP0_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP0_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP1_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP1_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP2_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP2_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP3_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP3_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_IR_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_IR_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_PGM__CNTL_MASK 0xffffffffL
+#define MC_SEQ_SUP_PGM__CNTL__SHIFT 0x00000000
+#define MC_SEQ_SUP_PGM_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_PGM_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_R_PGM__PGM_MASK 0xffffffffL
+#define MC_SEQ_SUP_R_PGM__PGM__SHIFT 0x00000000
+#define MC_SEQ_TCG_CNTL__AREF_BOTH_MASK 0x04000000L
+#define MC_SEQ_TCG_CNTL__AREF_BOTH__SHIFT 0x0000001a
+#define MC_SEQ_TCG_CNTL__AREF_LAST_MASK 0x02000000L
+#define MC_SEQ_TCG_CNTL__AREF_LAST__SHIFT 0x00000019
+#define MC_SEQ_TCG_CNTL__BURST_NUM_MASK 0x00380000L
+#define MC_SEQ_TCG_CNTL__BURST_NUM__SHIFT 0x00000013
+#define MC_SEQ_TCG_CNTL__DATA_CNT_MASK 0x0000f000L
+#define MC_SEQ_TCG_CNTL__DATA_CNT__SHIFT 0x0000000c
+#define MC_SEQ_TCG_CNTL__DONE_MASK 0x80000000L
+#define MC_SEQ_TCG_CNTL__DONE__SHIFT 0x0000001f
+#define MC_SEQ_TCG_CNTL__ENABLE_D0_MASK 0x00000002L
+#define MC_SEQ_TCG_CNTL__ENABLE_D0__SHIFT 0x00000001
+#define MC_SEQ_TCG_CNTL__ENABLE_D1_MASK 0x00000004L
+#define MC_SEQ_TCG_CNTL__ENABLE_D1__SHIFT 0x00000002
+#define MC_SEQ_TCG_CNTL__FRAME_TRAIN_MASK 0x00040000L
+#define MC_SEQ_TCG_CNTL__FRAME_TRAIN__SHIFT 0x00000012
+#define MC_SEQ_TCG_CNTL__INFINITE_CMD_MASK 0x00000080L
+#define MC_SEQ_TCG_CNTL__INFINITE_CMD__SHIFT 0x00000007
+#define MC_SEQ_TCG_CNTL__ISSUE_AREF_MASK 0x00400000L
+#define MC_SEQ_TCG_CNTL__ISSUE_AREF__SHIFT 0x00000016
+#define MC_SEQ_TCG_CNTL__LOAD_FIFO_MASK 0x00010000L
+#define MC_SEQ_TCG_CNTL__LOAD_FIFO__SHIFT 0x00000010
+#define MC_SEQ_TCG_CNTL__MOP_MASK 0x00000f00L
+#define MC_SEQ_TCG_CNTL__MOP__SHIFT 0x00000008
+#define MC_SEQ_TCG_CNTL__NFIFO_MASK 0x00000070L
+#define MC_SEQ_TCG_CNTL__NFIFO__SHIFT 0x00000004
+#define MC_SEQ_TCG_CNTL__RESET_MASK 0x00000001L
+#define MC_SEQ_TCG_CNTL__RESET__SHIFT 0x00000000
+#define MC_SEQ_TCG_CNTL__SHORT_LDFF_MASK 0x00020000L
+#define MC_SEQ_TCG_CNTL__SHORT_LDFF__SHIFT 0x00000011
+#define MC_SEQ_TCG_CNTL__START_MASK 0x00000008L
+#define MC_SEQ_TCG_CNTL__START__SHIFT 0x00000003
+#define MC_SEQ_TCG_CNTL__TXDBI_CNTL_MASK 0x00800000L
+#define MC_SEQ_TCG_CNTL__TXDBI_CNTL__SHIFT 0x00000017
+#define MC_SEQ_TCG_CNTL__VPTR_MASK_MASK 0x01000000L
+#define MC_SEQ_TCG_CNTL__VPTR_MASK__SHIFT 0x00000018
+#define MC_SEQ_TIMER_RD__COUNTER_MASK 0xffffffffL
+#define MC_SEQ_TIMER_RD__COUNTER__SHIFT 0x00000000
+#define MC_SEQ_TIMER_WR__COUNTER_MASK 0xffffffffL
+#define MC_SEQ_TIMER_WR__COUNTER__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_CAPTURE__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_CAPTURE__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_CAPTURE__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_CAPTURE__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_CAPTURE__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_CAPTURE__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_CAPTURE__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_CAPTURE__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_CAPTURE__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_CAPTURE__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_CAPTURE__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_CAPTURE__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_CAPTURE__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_CAPTURE__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_CAPTURE__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_CAPTURE__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_CAPTURE__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_CAPTURE__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_CAPTURE__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_CAPTURE__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_CAPTURE__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_CAPTURE__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_CAPTURE__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_CAPTURE__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_CAPTURE__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_CAPTURE__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_CAPTURE__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_CAPTURE__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_CAPTURE__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_CAPTURE__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_CAPTURE__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_CAPTURE__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_CAPTURE__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_CAPTURE__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_CAPTURE__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_CAPTURE__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_CAPTURE__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_CAPTURE__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_CAPTURE__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_CAPTURE__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_CAPTURE__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_CAPTURE__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_CAPTURE__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_CAPTURE__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_EDC_THRESHOLD2__THRESHOLD_PERIOD_MASK 0xffffffffL
+#define MC_SEQ_TRAIN_EDC_THRESHOLD2__THRESHOLD_PERIOD__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_IN_PROGRESS_MASK 0x00000100L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_IN_PROGRESS__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_STATUS_MASK 0x00000001L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_STATUS__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_IN_PROGRESS_MASK 0x00000200L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_IN_PROGRESS__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_STATUS_MASK 0x00000002L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_STATUS__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CLEAR_RETRAIN_STATUS_MASK 0x00000004L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CLEAR_RETRAIN_STATUS__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_MONITOR_MASK 0x00000030L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_MONITOR__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_VBI_MASK 0x00000008L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_VBI__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__READ_EDC_THRESHOLD_MASK 0xffff0000L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__READ_EDC_THRESHOLD__SHIFT 0x00000010
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__WRITE_EDC_THRESHOLD_MASK 0x0000ffffL
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__WRITE_EDC_THRESHOLD__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_TIMING__TARF2T_MASK 0x000003e0L
+#define MC_SEQ_TRAIN_TIMING__TARF2T__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_TIMING__TLD2LD_MASK 0x000f8000L
+#define MC_SEQ_TRAIN_TIMING__TLD2LD__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_TIMING__TT2ROW_MASK 0x00007c00L
+#define MC_SEQ_TRAIN_TIMING__TT2ROW__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_TIMING__TWT2RT_MASK 0x0000001fL
+#define MC_SEQ_TRAIN_TIMING__TWT2RT__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__CLEARALL_MASK 0x00010000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__CLEARALL__SHIFT 0x00000010
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_ADDR_TRAIN_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_ADDR_TRAIN__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_READ_TRAIN_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_READ_TRAIN__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WAKEUP_EARLY_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WAKEUP_EARLY__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WCK_TRAIN_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WCK_TRAIN__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WRITE_TRAIN_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WRITE_TRAIN__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D0_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D0__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D1_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D1__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D0_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D0__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D1_MASK 0x08000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D1__SHIFT 0x0000001b
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_ADDR_TRAIN_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_ADDR_TRAIN__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_READ_TRAIN_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_READ_TRAIN__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WCK_TRAIN_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WCK_TRAIN__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WRITE_TRAIN_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WRITE_TRAIN__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__DISP_ASTOP_WAKEUP_MASK 0x20000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__DISP_ASTOP_WAKEUP__SHIFT 0x0000001d
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_ADDR_TRAIN_MASK 0x00010000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_ADDR_TRAIN__SHIFT 0x00000010
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_READ_TRAIN_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_READ_TRAIN__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WCK_TRAIN_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WCK_TRAIN__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WRITE_TRAIN_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WRITE_TRAIN__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_ADDR_TRAIN_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_ADDR_TRAIN__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_READ_TRAIN_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_READ_TRAIN__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WCK_TRAIN_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WCK_TRAIN__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WRITE_TRAIN_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WRITE_TRAIN__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D0_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D0__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D1_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D1__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SW_WAKEUP_MASK 0x10000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SW_WAKEUP__SHIFT 0x0000001c
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK 0x40000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0__SHIFT 0x0000001e
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK 0x80000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1__SHIFT 0x0000001f
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_ADDR_TRAIN_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_ADDR_TRAIN__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_READ_TRAIN_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_READ_TRAIN__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WCK_TRAIN_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WCK_TRAIN__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WRITE_TRAIN_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WRITE_TRAIN__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_MASK__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_MASK__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_MASK__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TSM_BCNT__BCNT_TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_BCNT__BCNT_TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_BCNT__COMP_VALUE_MASK 0x00ff0000L
+#define MC_SEQ_TSM_BCNT__COMP_VALUE__SHIFT 0x00000010
+#define MC_SEQ_TSM_BCNT__DONE_TESTS_MASK 0xff000000L
+#define MC_SEQ_TSM_BCNT__DONE_TESTS__SHIFT 0x00000018
+#define MC_SEQ_TSM_BCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_BCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_BCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_BCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_CTRL__CAPTURE_START_MASK 0x00000002L
+#define MC_SEQ_TSM_CTRL__CAPTURE_START__SHIFT 0x00000001
+#define MC_SEQ_TSM_CTRL__DIRECTION_MASK 0x00000020L
+#define MC_SEQ_TSM_CTRL__DIRECTION__SHIFT 0x00000005
+#define MC_SEQ_TSM_CTRL__DONE_MASK 0x00000004L
+#define MC_SEQ_TSM_CTRL__DONE__SHIFT 0x00000002
+#define MC_SEQ_TSM_CTRL__ERR_MASK 0x00000008L
+#define MC_SEQ_TSM_CTRL__ERR__SHIFT 0x00000003
+#define MC_SEQ_TSM_CTRL__INVERT_MASK 0x00000040L
+#define MC_SEQ_TSM_CTRL__INVERT__SHIFT 0x00000006
+#define MC_SEQ_TSM_CTRL__MASK_BITS_MASK 0x00000080L
+#define MC_SEQ_TSM_CTRL__MASK_BITS__SHIFT 0x00000007
+#define MC_SEQ_TSM_CTRL__POINTER_MASK 0xffff0000L
+#define MC_SEQ_TSM_CTRL__POINTER__SHIFT 0x00000010
+#define MC_SEQ_TSM_CTRL__ROT_INV_MASK 0x00000400L
+#define MC_SEQ_TSM_CTRL__ROT_INV__SHIFT 0x0000000a
+#define MC_SEQ_TSM_CTRL__START_MASK 0x00000001L
+#define MC_SEQ_TSM_CTRL__START__SHIFT 0x00000000
+#define MC_SEQ_TSM_CTRL__STEP_MASK 0x00000010L
+#define MC_SEQ_TSM_CTRL__STEP__SHIFT 0x00000004
+#define MC_SEQ_TSM_CTRL__UPDATE_LOOP_MASK 0x00000300L
+#define MC_SEQ_TSM_CTRL__UPDATE_LOOP__SHIFT 0x00000008
+#define MC_SEQ_TSM_DBI__DBI_MASK 0xffffffffL
+#define MC_SEQ_TSM_DBI__DBI__SHIFT 0x00000000
+#define MC_SEQ_TSM_DEBUG_DATA__TSM_DEBUG_DATA_MASK 0xffffffffL
+#define MC_SEQ_TSM_DEBUG_DATA__TSM_DEBUG_DATA__SHIFT 0x00000000
+#define MC_SEQ_TSM_DEBUG_INDEX__TSM_DEBUG_INDEX_MASK 0x0000001fL
+#define MC_SEQ_TSM_DEBUG_INDEX__TSM_DEBUG_INDEX__SHIFT 0x00000000
+#define MC_SEQ_TSM_EDC__EDC_MASK 0xffffffffL
+#define MC_SEQ_TSM_EDC__EDC__SHIFT 0x00000000
+#define MC_SEQ_TSM_FLAG__ERROR_TESTS_MASK 0xff000000L
+#define MC_SEQ_TSM_FLAG__ERROR_TESTS__SHIFT 0x00000018
+#define MC_SEQ_TSM_FLAG__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_FLAG__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_FLAG__FLAG_TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_FLAG__FLAG_TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_FLAG__NBBL_MASK_MASK 0x000f0000L
+#define MC_SEQ_TSM_FLAG__NBBL_MASK__SHIFT 0x00000010
+#define MC_SEQ_TSM_FLAG__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_FLAG__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_GCNT__COMP_VALUE_MASK 0xffff0000L
+#define MC_SEQ_TSM_GCNT__COMP_VALUE__SHIFT 0x00000010
+#define MC_SEQ_TSM_GCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_GCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_GCNT__TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_GCNT__TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_GCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_GCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_MISC__WCDR_MASK_MASK 0x000f0000L
+#define MC_SEQ_TSM_MISC__WCDR_MASK__SHIFT 0x00000010
+#define MC_SEQ_TSM_MISC__WCDR_PTR_MASK 0x0000ffffL
+#define MC_SEQ_TSM_MISC__WCDR_PTR__SHIFT 0x00000000
+#define MC_SEQ_TSM_NCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_NCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_NCNT__NIBBLE_SKIP_MASK 0x0f000000L
+#define MC_SEQ_TSM_NCNT__NIBBLE_SKIP__SHIFT 0x00000018
+#define MC_SEQ_TSM_NCNT__RANGE_HIGH_MASK 0x00f00000L
+#define MC_SEQ_TSM_NCNT__RANGE_HIGH__SHIFT 0x00000014
+#define MC_SEQ_TSM_NCNT__RANGE_LOW_MASK 0x000f0000L
+#define MC_SEQ_TSM_NCNT__RANGE_LOW__SHIFT 0x00000010
+#define MC_SEQ_TSM_NCNT__TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_NCNT__TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_NCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_NCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_OCNT__CMP_VALUE_MASK 0xffff0000L
+#define MC_SEQ_TSM_OCNT__CMP_VALUE__SHIFT 0x00000010
+#define MC_SEQ_TSM_OCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_OCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_OCNT__TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_OCNT__TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_OCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_OCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_UPDATE__AREF_COUNT_MASK 0x00ff0000L
+#define MC_SEQ_TSM_UPDATE__AREF_COUNT__SHIFT 0x00000010
+#define MC_SEQ_TSM_UPDATE__CAPTR_TESTS_MASK 0xff000000L
+#define MC_SEQ_TSM_UPDATE__CAPTR_TESTS__SHIFT 0x00000018
+#define MC_SEQ_TSM_UPDATE__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_UPDATE__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_UPDATE__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_UPDATE__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_UPDATE__UPDT_TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_UPDATE__UPDT_TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_WCDR__WCDR_MASK 0xffffffffL
+#define MC_SEQ_TSM_WCDR__WCDR__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK3__SHIFT 0x0000000c
+#define MC_SEQ_VENDOR_ID_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_VENDOR_ID_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_VENDOR_ID_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_VENDOR_ID_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_WCDR_CTRL__AREF_EN_MASK 0x00004000L
+#define MC_SEQ_WCDR_CTRL__AREF_EN__SHIFT 0x0000000e
+#define MC_SEQ_WCDR_CTRL__PRBS_EN_MASK 0x00100000L
+#define MC_SEQ_WCDR_CTRL__PRBS_EN__SHIFT 0x00000014
+#define MC_SEQ_WCDR_CTRL__PRBS_RST_MASK 0x00200000L
+#define MC_SEQ_WCDR_CTRL__PRBS_RST__SHIFT 0x00000015
+#define MC_SEQ_WCDR_CTRL__PREAMBLE_MASK 0x0f000000L
+#define MC_SEQ_WCDR_CTRL__PREAMBLE__SHIFT 0x00000018
+#define MC_SEQ_WCDR_CTRL__PRE_MASK_MASK 0xf0000000L
+#define MC_SEQ_WCDR_CTRL__PRE_MASK__SHIFT 0x0000001c
+#define MC_SEQ_WCDR_CTRL__RD_EN_MASK 0x00002000L
+#define MC_SEQ_WCDR_CTRL__RD_EN__SHIFT 0x0000000d
+#define MC_SEQ_WCDR_CTRL__TRAIN_EN_MASK 0x00008000L
+#define MC_SEQ_WCDR_CTRL__TRAIN_EN__SHIFT 0x0000000f
+#define MC_SEQ_WCDR_CTRL__TWCDRL_MASK 0x000f0000L
+#define MC_SEQ_WCDR_CTRL__TWCDRL__SHIFT 0x00000010
+#define MC_SEQ_WCDR_CTRL__WCDR_PRE_MASK 0x000000ffL
+#define MC_SEQ_WCDR_CTRL__WCDR_PRE__SHIFT 0x00000000
+#define MC_SEQ_WCDR_CTRL__WCDR_TIM_MASK 0x00000f00L
+#define MC_SEQ_WCDR_CTRL__WCDR_TIM__SHIFT 0x00000008
+#define MC_SEQ_WCDR_CTRL__WR_EN_MASK 0x00001000L
+#define MC_SEQ_WCDR_CTRL__WR_EN__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D0_MASK 0x00000001L
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D0__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D1_MASK 0x00000008L
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D1__SHIFT 0x00000003
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D0_MASK 0x00000002L
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D0__SHIFT 0x00000001
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D1_MASK 0x00000010L
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D1__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D0_MASK 0x00000001L
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D0__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D1_MASK 0x00000008L
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D1__SHIFT 0x00000003
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D0_MASK 0x00000002L
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D0__SHIFT 0x00000001
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D1_MASK 0x00000010L
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D1__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D0_MASK 0x00000004L
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D0__SHIFT 0x00000002
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D1_MASK 0x00000020L
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D1__SHIFT 0x00000005
+#define MC_SEQ_WR_CTL_2_LP__WCDR_EN_MASK 0x00000040L
+#define MC_SEQ_WR_CTL_2_LP__WCDR_EN__SHIFT 0x00000006
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D0_MASK 0x00000004L
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D0__SHIFT 0x00000002
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D1_MASK 0x00000020L
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D1__SHIFT 0x00000005
+#define MC_SEQ_WR_CTL_2__WCDR_EN_MASK 0x00000040L
+#define MC_SEQ_WR_CTL_2__WCDR_EN__SHIFT 0x00000006
+#define MC_SEQ_WR_CTL_D0__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D0__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D0__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D0__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D0__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D0__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D0__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D0__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D0__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D0__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D0__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D0__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D0__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D0__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D0__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D0__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D0_LP__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D0_LP__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D0_LP__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D0_LP__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D0_LP__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D0_LP__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D0_LP__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D0_LP__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D0_LP__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D0_LP__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D0_LP__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D0_LP__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D0_LP__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D0_LP__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D0_LP__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D0_LP__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D0_LP__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D0_LP__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D0_LP__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D0_LP__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D0_LP__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D0_LP__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D0_LP__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D0_LP__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D0_LP__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D0_LP__OEN_SEL__SHIFT 0x00000014
+#define MC_SEQ_WR_CTL_D0__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D0__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D0__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D0__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D0__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D0__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D0__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D0__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D0__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D0__OEN_SEL__SHIFT 0x00000014
+#define MC_SEQ_WR_CTL_D1__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D1__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D1__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D1__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D1__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D1__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D1__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D1__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D1__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D1__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D1__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D1__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D1__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D1__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D1__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D1__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D1_LP__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D1_LP__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D1_LP__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D1_LP__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D1_LP__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D1_LP__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D1_LP__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D1_LP__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D1_LP__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D1_LP__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D1_LP__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D1_LP__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D1_LP__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D1_LP__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D1_LP__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D1_LP__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D1_LP__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D1_LP__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D1_LP__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D1_LP__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D1_LP__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D1_LP__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D1_LP__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D1_LP__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D1_LP__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D1_LP__OEN_SEL__SHIFT 0x00000014
+#define MC_SEQ_WR_CTL_D1__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D1__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D1__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D1__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D1__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D1__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D1__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D1__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D1__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D1__OEN_SEL__SHIFT 0x00000014
+#define MC_SHARED_BLACKOUT_CNTL__BLACKOUT_MODE_MASK 0x00000007L
+#define MC_SHARED_BLACKOUT_CNTL__BLACKOUT_MODE__SHIFT 0x00000000
+#define MC_SHARED_CHMAP__CHAN0_MASK 0x0000000fL
+#define MC_SHARED_CHMAP__CHAN0__SHIFT 0x00000000
+#define MC_SHARED_CHMAP__CHAN1_MASK 0x000000f0L
+#define MC_SHARED_CHMAP__CHAN1__SHIFT 0x00000004
+#define MC_SHARED_CHMAP__CHAN2_MASK 0x00000f00L
+#define MC_SHARED_CHMAP__CHAN2__SHIFT 0x00000008
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0x0000f000L
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0x0000000c
+#define MC_SHARED_CHREMAP__CHAN0_MASK 0x00000007L
+#define MC_SHARED_CHREMAP__CHAN0__SHIFT 0x00000000
+#define MC_SHARED_CHREMAP__CHAN1_MASK 0x00000038L
+#define MC_SHARED_CHREMAP__CHAN1__SHIFT 0x00000003
+#define MC_SHARED_CHREMAP__CHAN2_MASK 0x000001c0L
+#define MC_SHARED_CHREMAP__CHAN2__SHIFT 0x00000006
+#define MC_SHARED_CHREMAP__CHAN3_MASK 0x00000e00L
+#define MC_SHARED_CHREMAP__CHAN3__SHIFT 0x00000009
+#define MC_SHARED_CHREMAP__CHAN4_MASK 0x00007000L
+#define MC_SHARED_CHREMAP__CHAN4__SHIFT 0x0000000c
+#define MC_SHARED_CHREMAP__CHAN5_MASK 0x00038000L
+#define MC_SHARED_CHREMAP__CHAN5__SHIFT 0x0000000f
+#define MC_SHARED_CHREMAP__CHAN6_MASK 0x001c0000L
+#define MC_SHARED_CHREMAP__CHAN6__SHIFT 0x00000012
+#define MC_SHARED_CHREMAP__CHAN7_MASK 0x00e00000L
+#define MC_SHARED_CHREMAP__CHAN7__SHIFT 0x00000015
+#define MC_TRAIN_EDCCDR_R_D0__EDC0_MASK 0x000000ffL
+#define MC_TRAIN_EDCCDR_R_D0__EDC0__SHIFT 0x00000000
+#define MC_TRAIN_EDCCDR_R_D0__EDC1_MASK 0x0000ff00L
+#define MC_TRAIN_EDCCDR_R_D0__EDC1__SHIFT 0x00000008
+#define MC_TRAIN_EDCCDR_R_D0__EDC2_MASK 0x00ff0000L
+#define MC_TRAIN_EDCCDR_R_D0__EDC2__SHIFT 0x00000010
+#define MC_TRAIN_EDCCDR_R_D0__EDC3_MASK 0xff000000L
+#define MC_TRAIN_EDCCDR_R_D0__EDC3__SHIFT 0x00000018
+#define MC_TRAIN_EDCCDR_R_D1__EDC0_MASK 0x000000ffL
+#define MC_TRAIN_EDCCDR_R_D1__EDC0__SHIFT 0x00000000
+#define MC_TRAIN_EDCCDR_R_D1__EDC1_MASK 0x0000ff00L
+#define MC_TRAIN_EDCCDR_R_D1__EDC1__SHIFT 0x00000008
+#define MC_TRAIN_EDCCDR_R_D1__EDC2_MASK 0x00ff0000L
+#define MC_TRAIN_EDCCDR_R_D1__EDC2__SHIFT 0x00000010
+#define MC_TRAIN_EDCCDR_R_D1__EDC3_MASK 0xff000000L
+#define MC_TRAIN_EDCCDR_R_D1__EDC3__SHIFT 0x00000018
+#define MC_TRAIN_EDC_STATUS_D0__REDC_CNT_MASK 0xffff0000L
+#define MC_TRAIN_EDC_STATUS_D0__REDC_CNT__SHIFT 0x00000010
+#define MC_TRAIN_EDC_STATUS_D0__WEDC_CNT_MASK 0x0000ffffL
+#define MC_TRAIN_EDC_STATUS_D0__WEDC_CNT__SHIFT 0x00000000
+#define MC_TRAIN_EDC_STATUS_D1__REDC_CNT_MASK 0xffff0000L
+#define MC_TRAIN_EDC_STATUS_D1__REDC_CNT__SHIFT 0x00000010
+#define MC_TRAIN_EDC_STATUS_D1__WEDC_CNT_MASK 0x0000ffffL
+#define MC_TRAIN_EDC_STATUS_D1__WEDC_CNT__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_0_D0__DQ_STATUS_MASK 0xffffffffL
+#define MC_TRAIN_PRBSERR_0_D0__DQ_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_0_D1__DQ_STATUS_MASK 0xffffffffL
+#define MC_TRAIN_PRBSERR_0_D1__DQ_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_1_D0__DBI_STATUS_MASK 0x0000000fL
+#define MC_TRAIN_PRBSERR_1_D0__DBI_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_1_D0__EDC_STATUS_MASK 0x000000f0L
+#define MC_TRAIN_PRBSERR_1_D0__EDC_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_1_D0__PMA_PRBSCLR_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_1_D0__PMA_PRBSCLR__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_1_D0__PMD0_PRBSCLR_MASK 0x20000000L
+#define MC_TRAIN_PRBSERR_1_D0__PMD0_PRBSCLR__SHIFT 0x0000001d
+#define MC_TRAIN_PRBSERR_1_D0__PMD1_PRBSCLR_MASK 0x40000000L
+#define MC_TRAIN_PRBSERR_1_D0__PMD1_PRBSCLR__SHIFT 0x0000001e
+#define MC_TRAIN_PRBSERR_1_D0__WCDR_STATUS_MASK 0x0000f000L
+#define MC_TRAIN_PRBSERR_1_D0__WCDR_STATUS__SHIFT 0x0000000c
+#define MC_TRAIN_PRBSERR_1_D0__WCK_STATUS_MASK 0x00000f00L
+#define MC_TRAIN_PRBSERR_1_D0__WCK_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_1_D1__DBI_STATUS_MASK 0x0000000fL
+#define MC_TRAIN_PRBSERR_1_D1__DBI_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_1_D1__EDC_STATUS_MASK 0x000000f0L
+#define MC_TRAIN_PRBSERR_1_D1__EDC_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_1_D1__PMA_PRBSCLR_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_1_D1__PMA_PRBSCLR__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_1_D1__PMD0_PRBSCLR_MASK 0x20000000L
+#define MC_TRAIN_PRBSERR_1_D1__PMD0_PRBSCLR__SHIFT 0x0000001d
+#define MC_TRAIN_PRBSERR_1_D1__PMD1_PRBSCLR_MASK 0x40000000L
+#define MC_TRAIN_PRBSERR_1_D1__PMD1_PRBSCLR__SHIFT 0x0000001e
+#define MC_TRAIN_PRBSERR_1_D1__WCDR_STATUS_MASK 0x0000f000L
+#define MC_TRAIN_PRBSERR_1_D1__WCDR_STATUS__SHIFT 0x0000000c
+#define MC_TRAIN_PRBSERR_1_D1__WCK_STATUS_MASK 0x00000f00L
+#define MC_TRAIN_PRBSERR_1_D1__WCK_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_2_D0__ABI_STATUS_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_2_D0__ABI_STATUS__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_2_D0__ADDR_STATUS_MASK 0x03ff0000L
+#define MC_TRAIN_PRBSERR_2_D0__ADDR_STATUS__SHIFT 0x00000010
+#define MC_TRAIN_PRBSERR_2_D0__CAS_STATUS_MASK 0x00000400L
+#define MC_TRAIN_PRBSERR_2_D0__CAS_STATUS__SHIFT 0x0000000a
+#define MC_TRAIN_PRBSERR_2_D0__CKB_STATUS_MASK 0x00000002L
+#define MC_TRAIN_PRBSERR_2_D0__CKB_STATUS__SHIFT 0x00000001
+#define MC_TRAIN_PRBSERR_2_D0__CKE_STATUS_MASK 0x00000100L
+#define MC_TRAIN_PRBSERR_2_D0__CKE_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_2_D0__CK_STATUS_MASK 0x00000001L
+#define MC_TRAIN_PRBSERR_2_D0__CK_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_2_D0__CS_STATUS_MASK 0x00000030L
+#define MC_TRAIN_PRBSERR_2_D0__CS_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_2_D0__RAS_STATUS_MASK 0x00000200L
+#define MC_TRAIN_PRBSERR_2_D0__RAS_STATUS__SHIFT 0x00000009
+#define MC_TRAIN_PRBSERR_2_D0__WE_STATUS_MASK 0x00000800L
+#define MC_TRAIN_PRBSERR_2_D0__WE_STATUS__SHIFT 0x0000000b
+#define MC_TRAIN_PRBSERR_2_D1__ABI_STATUS_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_2_D1__ABI_STATUS__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_2_D1__ADDR_STATUS_MASK 0x03ff0000L
+#define MC_TRAIN_PRBSERR_2_D1__ADDR_STATUS__SHIFT 0x00000010
+#define MC_TRAIN_PRBSERR_2_D1__CAS_STATUS_MASK 0x00000400L
+#define MC_TRAIN_PRBSERR_2_D1__CAS_STATUS__SHIFT 0x0000000a
+#define MC_TRAIN_PRBSERR_2_D1__CKB_STATUS_MASK 0x00000002L
+#define MC_TRAIN_PRBSERR_2_D1__CKB_STATUS__SHIFT 0x00000001
+#define MC_TRAIN_PRBSERR_2_D1__CKE_STATUS_MASK 0x00000100L
+#define MC_TRAIN_PRBSERR_2_D1__CKE_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_2_D1__CK_STATUS_MASK 0x00000001L
+#define MC_TRAIN_PRBSERR_2_D1__CK_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_2_D1__CS_STATUS_MASK 0x00000030L
+#define MC_TRAIN_PRBSERR_2_D1__CS_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_2_D1__RAS_STATUS_MASK 0x00000200L
+#define MC_TRAIN_PRBSERR_2_D1__RAS_STATUS__SHIFT 0x00000009
+#define MC_TRAIN_PRBSERR_2_D1__WE_STATUS_MASK 0x00000800L
+#define MC_TRAIN_PRBSERR_2_D1__WE_STATUS__SHIFT 0x0000000b
+#define MC_TSM_DEBUG_BCNT0__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT0__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT0__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT0__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT0__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT0__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT0__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT0__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT10__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT10__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT10__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT10__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT10__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT10__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT10__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT10__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT1__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT1__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT1__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT1__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT1__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT1__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT1__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT1__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT2__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT2__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT2__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT2__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT2__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT2__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT2__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT2__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT3__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT3__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT3__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT3__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT3__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT3__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT3__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT3__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT4__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT4__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT4__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT4__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT4__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT4__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT4__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT4__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT5__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT5__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT5__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT5__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT5__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT5__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT5__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT5__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT6__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT6__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT6__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT6__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT6__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT6__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT6__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT6__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT7__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT7__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT7__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT7__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT7__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT7__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT7__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT7__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT8__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT8__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT8__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT8__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT8__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT8__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT8__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT8__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT9__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT9__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT9__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT9__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT9__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT9__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT9__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT9__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BKPT__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_BKPT__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_FLAG__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_FLAG__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_GCNT__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_GCNT__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_MISC__FLAG_MASK 0x000000ffL
+#define MC_TSM_DEBUG_MISC__FLAG__SHIFT 0x00000000
+#define MC_TSM_DEBUG_MISC__NCNT_RD_MASK 0x00000f00L
+#define MC_TSM_DEBUG_MISC__NCNT_RD__SHIFT 0x00000008
+#define MC_TSM_DEBUG_MISC__NCNT_WR_MASK 0x0000f000L
+#define MC_TSM_DEBUG_MISC__NCNT_WR__SHIFT 0x0000000c
+#define MC_TSM_DEBUG_ST01__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_ST01__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_ST23__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_ST23__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_ST45__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_ST45__DATA__SHIFT 0x00000000
+#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x0003ffffL
+#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x00000000
+#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x0003ffffL
+#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x00000000
+#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x0003ffffL
+#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_LOCAL_MASK 0x00000100L
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_LOCAL__SHIFT 0x00000008
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_SYSTEM_MASK 0x00000200L
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_SYSTEM__SHIFT 0x00000009
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_0_MODE_MASK 0x00000003L
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_0_MODE__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_1_MODE_MASK 0x0000000cL
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_1_MODE__SHIFT 0x00000002
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_2_MODE_MASK 0x00000030L
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_2_MODE__SHIFT 0x00000004
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_3_MODE_MASK 0x000000c0L
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_3_MODE__SHIFT 0x00000006
+#define MC_VM_DC_WRITE_HIT_REGION_0_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_0_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_0_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_0_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_1_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_1_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_1_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_1_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_2_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_2_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_2_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_2_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_3_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_3_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_3_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_3_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_FB_LOCATION__FB_BASE_MASK 0x0000ffffL
+#define MC_VM_FB_LOCATION__FB_BASE__SHIFT 0x00000000
+#define MC_VM_FB_LOCATION__FB_TOP_MASK 0xffff0000L
+#define MC_VM_FB_LOCATION__FB_TOP__SHIFT 0x00000010
+#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x0003ffffL
+#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MB_L1_TLB0_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB0_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB0_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MB_L1_TLB0_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MB_L1_TLB0_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MB_L1_TLB0_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MB_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB0_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB1_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MB_L1_TLB2_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB2_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB2_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MB_L1_TLB2_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MB_L1_TLB2_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MB_L1_TLB2_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MB_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB2_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MB_L1_TLB3_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB3_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB3_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MB_L1_TLB3_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MB_L1_TLB3_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MB_L1_TLB3_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MB_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB3_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L2ARBITER_L2_CREDITS__L2_IF_CREDITS_MASK 0x0000003fL
+#define MC_VM_MB_L2ARBITER_L2_CREDITS__L2_IF_CREDITS__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB0_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB0_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB0_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB0_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB0_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB0_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB0_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB1_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB1_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB1_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB1_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB1_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB1_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB1_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB2_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB2_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB2_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB2_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB2_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB2_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB2_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB3_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB3_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB3_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB3_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB3_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB3_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB3_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L2ARBITER_L2_CREDITS__L2_IF_CREDITS_MASK 0x0000003fL
+#define MC_VM_MD_L2ARBITER_L2_CREDITS__L2_IF_CREDITS__SHIFT 0x00000000
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x00000007
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x00000006
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING__SHIFT 0x00000001
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x00000003
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x00000005
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define MC_WR_CB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_CB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_CB__ENABLE_MASK 0x00000001L
+#define MC_WR_CB__ENABLE__SHIFT 0x00000000
+#define MC_WR_CB__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_CB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_CB__MAX_BURST_MASK 0x00000780L
+#define MC_WR_CB__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_CB__PRESCALE_MASK 0x00000006L
+#define MC_WR_CB__PRESCALE__SHIFT 0x00000001
+#define MC_WR_CB__STALL_MODE_MASK 0x00000030L
+#define MC_WR_CB__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_CB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_CB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_CB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_CB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_DB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_DB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_DB__ENABLE_MASK 0x00000001L
+#define MC_WR_DB__ENABLE__SHIFT 0x00000000
+#define MC_WR_DB__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_DB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_DB__MAX_BURST_MASK 0x00000780L
+#define MC_WR_DB__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_DB__PRESCALE_MASK 0x00000006L
+#define MC_WR_DB__PRESCALE__SHIFT 0x00000001
+#define MC_WR_DB__STALL_MODE_MASK 0x00000030L
+#define MC_WR_DB__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_DB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_DB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_DB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_DB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_GRP_EXT__DBSTEN0_MASK 0x0000000fL
+#define MC_WR_GRP_EXT__DBSTEN0__SHIFT 0x00000000
+#define MC_WR_GRP_EXT__TC0_MASK 0x000000f0L
+#define MC_WR_GRP_EXT__TC0__SHIFT 0x00000004
+#define MC_WR_GRP_GFX__CP_MASK 0x0000000fL
+#define MC_WR_GRP_GFX__CP__SHIFT 0x00000000
+#define MC_WR_GRP_GFX__XDMA_MASK 0x0000f000L
+#define MC_WR_GRP_GFX__XDMAM_MASK 0x000f0000L
+#define MC_WR_GRP_GFX__XDMAM__SHIFT 0x00000010
+#define MC_WR_GRP_GFX__XDMA__SHIFT 0x0000000c
+#define MC_WR_GRP_LCL__CB0_MASK 0x0000000fL
+#define MC_WR_GRP_LCL__CB0__SHIFT 0x00000000
+#define MC_WR_GRP_LCL__CBCMASK0_MASK 0x000000f0L
+#define MC_WR_GRP_LCL__CBCMASK0__SHIFT 0x00000004
+#define MC_WR_GRP_LCL__CBFMASK0_MASK 0x00000f00L
+#define MC_WR_GRP_LCL__CBFMASK0__SHIFT 0x00000008
+#define MC_WR_GRP_LCL__CBIMMED0_MASK 0xf0000000L
+#define MC_WR_GRP_LCL__CBIMMED0__SHIFT 0x0000001c
+#define MC_WR_GRP_LCL__DB0_MASK 0x0000f000L
+#define MC_WR_GRP_LCL__DB0__SHIFT 0x0000000c
+#define MC_WR_GRP_LCL__DBHTILE0_MASK 0x000f0000L
+#define MC_WR_GRP_LCL__DBHTILE0__SHIFT 0x00000010
+#define MC_WR_GRP_LCL__SX0_MASK 0x00f00000L
+#define MC_WR_GRP_LCL__SX0__SHIFT 0x00000014
+#define MC_WR_GRP_OTH__HDP_MASK 0x00000f00L
+#define MC_WR_GRP_OTH__HDP__SHIFT 0x00000008
+#define MC_WR_GRP_OTH__SEM_MASK 0x0000f000L
+#define MC_WR_GRP_OTH__SEM__SHIFT 0x0000000c
+#define MC_WR_GRP_OTH__UMC_MASK 0x000f0000L
+#define MC_WR_GRP_OTH__UMC__SHIFT 0x00000010
+#define MC_WR_GRP_OTH__UVD_EXT0_MASK 0x0000000fL
+#define MC_WR_GRP_OTH__UVD_EXT0__SHIFT 0x00000000
+#define MC_WR_GRP_OTH__UVD_EXT1_MASK 0xf0000000L
+#define MC_WR_GRP_OTH__UVD_EXT1__SHIFT 0x0000001c
+#define MC_WR_GRP_OTH__UVD_MASK 0x00f00000L
+#define MC_WR_GRP_OTH__UVD__SHIFT 0x00000014
+#define MC_WR_GRP_OTH__XDP_MASK 0x0f000000L
+#define MC_WR_GRP_OTH__XDP__SHIFT 0x00000018
+#define MC_WR_GRP_SYS__IH_MASK 0x0000000fL
+#define MC_WR_GRP_SYS__IH__SHIFT 0x00000000
+#define MC_WR_GRP_SYS__MCIF_MASK 0x000000f0L
+#define MC_WR_GRP_SYS__MCIF__SHIFT 0x00000004
+#define MC_WR_GRP_SYS__RLC_MASK 0x00000f00L
+#define MC_WR_GRP_SYS__RLC__SHIFT 0x00000008
+#define MC_WR_GRP_SYS__SMU_MASK 0x00f00000L
+#define MC_WR_GRP_SYS__SMU__SHIFT 0x00000014
+#define MC_WR_GRP_SYS__VCE_MASK 0x0f000000L
+#define MC_WR_GRP_SYS__VCE__SHIFT 0x00000018
+#define MC_WR_GRP_SYS__VCEU_MASK 0xf0000000L
+#define MC_WR_GRP_SYS__VCEU__SHIFT 0x0000001c
+#define MC_WR_HUB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_HUB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_HUB__ENABLE_MASK 0x00000001L
+#define MC_WR_HUB__ENABLE__SHIFT 0x00000000
+#define MC_WR_HUB__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_HUB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_HUB__MAX_BURST_MASK 0x00000780L
+#define MC_WR_HUB__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_HUB__PRESCALE_MASK 0x00000006L
+#define MC_WR_HUB__PRESCALE__SHIFT 0x00000001
+#define MC_WR_HUB__STALL_MODE_MASK 0x00000030L
+#define MC_WR_HUB__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_HUB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_HUB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_HUB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_HUB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_TC0__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_TC0__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_TC0__ENABLE_MASK 0x00000001L
+#define MC_WR_TC0__ENABLE__SHIFT 0x00000000
+#define MC_WR_TC0__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_TC0__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_TC0__MAX_BURST_MASK 0x00000780L
+#define MC_WR_TC0__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_TC0__PRESCALE_MASK 0x00000006L
+#define MC_WR_TC0__PRESCALE__SHIFT 0x00000001
+#define MC_WR_TC0__STALL_MODE_MASK 0x00000030L
+#define MC_WR_TC0__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_TC0__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_TC0__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_TC0__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_TC0__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_TC1__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_TC1__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_TC1__ENABLE_MASK 0x00000001L
+#define MC_WR_TC1__ENABLE__SHIFT 0x00000000
+#define MC_WR_TC1__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_TC1__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_TC1__MAX_BURST_MASK 0x00000780L
+#define MC_WR_TC1__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_TC1__PRESCALE_MASK 0x00000006L
+#define MC_WR_TC1__PRESCALE__SHIFT 0x00000001
+#define MC_WR_TC1__STALL_MODE_MASK 0x00000030L
+#define MC_WR_TC1__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_TC1__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_TC1__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_TC1__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_TC1__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_XBAR_ADDR_DEC__GECC_MASK 0x00000002L
+#define MC_XBAR_ADDR_DEC__GECC__SHIFT 0x00000001
+#define MC_XBAR_ADDR_DEC__NO_DIV_BY_3_MASK 0x00000001L
+#define MC_XBAR_ADDR_DEC__NO_DIV_BY_3__SHIFT 0x00000000
+#define MC_XBAR_ADDR_DEC__RB_SPLIT_COLHI_MASK 0x00000008L
+#define MC_XBAR_ADDR_DEC__RB_SPLIT_COLHI__SHIFT 0x00000003
+#define MC_XBAR_ADDR_DEC__RB_SPLIT_MASK 0x00000004L
+#define MC_XBAR_ADDR_DEC__RB_SPLIT__SHIFT 0x00000002
+#define MC_XBAR_ARB__BREAK_BURST_CID_CHANGE_MASK 0x00000004L
+#define MC_XBAR_ARB__BREAK_BURST_CID_CHANGE__SHIFT 0x00000002
+#define MC_XBAR_ARB__DISABLE_HUB_STALL_HIGHEST_MASK 0x00000002L
+#define MC_XBAR_ARB__DISABLE_HUB_STALL_HIGHEST__SHIFT 0x00000001
+#define MC_XBAR_ARB__HUBRD_HIGHEST_MASK 0x00000001L
+#define MC_XBAR_ARB__HUBRD_HIGHEST__SHIFT 0x00000000
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT0_MASK 0x0000000fL
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT0__SHIFT 0x00000000
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT1_MASK 0x000000f0L
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT1__SHIFT 0x00000004
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT2_MASK 0x00000f00L
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT2__SHIFT 0x00000008
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT3_MASK 0x0000f000L
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT3__SHIFT 0x0000000c
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT0_MASK 0x000f0000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT0__SHIFT 0x00000010
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT1_MASK 0x00f00000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT1__SHIFT 0x00000014
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT2_MASK 0x0f000000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT2__SHIFT 0x00000018
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT3_MASK 0xf0000000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT3__SHIFT 0x0000001c
+#define MC_XBAR_CHTRIREMAP__CH0_MASK 0x00000003L
+#define MC_XBAR_CHTRIREMAP__CH0__SHIFT 0x00000000
+#define MC_XBAR_CHTRIREMAP__CH1_MASK 0x0000000cL
+#define MC_XBAR_CHTRIREMAP__CH1__SHIFT 0x00000002
+#define MC_XBAR_CHTRIREMAP__CH2_MASK 0x00000030L
+#define MC_XBAR_CHTRIREMAP__CH2__SHIFT 0x00000004
+#define MC_XBAR_PERF_MON_CNTL0__ALLOW_WRAP_MASK 0x10000000L
+#define MC_XBAR_PERF_MON_CNTL0__ALLOW_WRAP__SHIFT 0x0000001c
+#define MC_XBAR_PERF_MON_CNTL0__START_MODE_MASK 0x03000000L
+#define MC_XBAR_PERF_MON_CNTL0__START_MODE__SHIFT 0x00000018
+#define MC_XBAR_PERF_MON_CNTL0__START_THRESH_MASK 0x00000fffL
+#define MC_XBAR_PERF_MON_CNTL0__START_THRESH__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_CNTL0__STOP_MODE_MASK 0x0c000000L
+#define MC_XBAR_PERF_MON_CNTL0__STOP_MODE__SHIFT 0x0000001a
+#define MC_XBAR_PERF_MON_CNTL0__STOP_THRESH_MASK 0x00fff000L
+#define MC_XBAR_PERF_MON_CNTL0__STOP_THRESH__SHIFT 0x0000000c
+#define MC_XBAR_PERF_MON_CNTL1__START_TRIG_ID_MASK 0x0000ff00L
+#define MC_XBAR_PERF_MON_CNTL1__START_TRIG_ID__SHIFT 0x00000008
+#define MC_XBAR_PERF_MON_CNTL1__STOP_TRIG_ID_MASK 0x00ff0000L
+#define MC_XBAR_PERF_MON_CNTL1__STOP_TRIG_ID__SHIFT 0x00000010
+#define MC_XBAR_PERF_MON_CNTL1__THRESH_CNTR_ID_MASK 0x000000ffL
+#define MC_XBAR_PERF_MON_CNTL1__THRESH_CNTR_ID__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_CNTL2__MON0_ID_MASK 0x000000ffL
+#define MC_XBAR_PERF_MON_CNTL2__MON0_ID__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_CNTL2__MON1_ID_MASK 0x0000ff00L
+#define MC_XBAR_PERF_MON_CNTL2__MON1_ID__SHIFT 0x00000008
+#define MC_XBAR_PERF_MON_CNTL2__MON2_ID_MASK 0x00ff0000L
+#define MC_XBAR_PERF_MON_CNTL2__MON2_ID__SHIFT 0x00000010
+#define MC_XBAR_PERF_MON_CNTL2__MON3_ID_MASK 0xff000000L
+#define MC_XBAR_PERF_MON_CNTL2__MON3_ID__SHIFT 0x00000018
+#define MC_XBAR_PERF_MON_MAX_THSH__MON0_MASK 0x000000ffL
+#define MC_XBAR_PERF_MON_MAX_THSH__MON0__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_MAX_THSH__MON1_MASK 0x0000ff00L
+#define MC_XBAR_PERF_MON_MAX_THSH__MON1__SHIFT 0x00000008
+#define MC_XBAR_PERF_MON_MAX_THSH__MON2_MASK 0x00ff0000L
+#define MC_XBAR_PERF_MON_MAX_THSH__MON2__SHIFT 0x00000010
+#define MC_XBAR_PERF_MON_MAX_THSH__MON3_MASK 0xff000000L
+#define MC_XBAR_PERF_MON_MAX_THSH__MON3__SHIFT 0x00000018
+#define MC_XBAR_PERF_MON_RSLT0__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT0__COUNT__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_RSLT1__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT1__COUNT__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_RSLT2__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT2__COUNT__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_RSLT3__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT3__COUNT__SHIFT 0x00000000
+#define MC_XBAR_RDREQ_CREDIT__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDREQ_CREDIT__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDREQ_CREDIT__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDREQ_CREDIT__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDREQ_CREDIT__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDREQ_CREDIT__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDREQ_CREDIT__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDREQ_CREDIT__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDRET_CREDIT1__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDRET_CREDIT1__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDRET_CREDIT1__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_CREDIT1__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDRET_CREDIT1__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDRET_CREDIT1__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDRET_CREDIT1__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDRET_CREDIT1__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDRET_CREDIT2__HUB_LP_RDRET_SKID_MASK 0x00ff0000L
+#define MC_XBAR_RDRET_CREDIT2__HUB_LP_RDRET_SKID__SHIFT 0x00000010
+#define MC_XBAR_RDRET_CREDIT2__OUT4_MASK 0x000000ffL
+#define MC_XBAR_RDRET_CREDIT2__OUT4__SHIFT 0x00000000
+#define MC_XBAR_RDRET_CREDIT2__OUT5_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_CREDIT2__OUT5__SHIFT 0x00000008
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT4_MASK 0x000000ffL
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT4__SHIFT 0x00000000
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT5_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT5__SHIFT 0x00000008
+#define MC_XBAR_REMOTE__RDREQ_EN_GOQ_MASK 0x00000002L
+#define MC_XBAR_REMOTE__RDREQ_EN_GOQ__SHIFT 0x00000001
+#define MC_XBAR_REMOTE__WRREQ_EN_GOQ_MASK 0x00000001L
+#define MC_XBAR_REMOTE__WRREQ_EN_GOQ__SHIFT 0x00000000
+#define MC_XBAR_SPARE0__BIT_MASK 0xffffffffL
+#define MC_XBAR_SPARE0__BIT__SHIFT 0x00000000
+#define MC_XBAR_SPARE1__BIT_MASK 0xffffffffL
+#define MC_XBAR_SPARE1__BIT__SHIFT 0x00000000
+#define MC_XBAR_TWOCHAN__CH0_MASK 0x00000006L
+#define MC_XBAR_TWOCHAN__CH0__SHIFT 0x00000001
+#define MC_XBAR_TWOCHAN__CH1_MASK 0x00000018L
+#define MC_XBAR_TWOCHAN__CH1__SHIFT 0x00000003
+#define MC_XBAR_TWOCHAN__DISABLE_ONEPORT_MASK 0x00000001L
+#define MC_XBAR_TWOCHAN__DISABLE_ONEPORT__SHIFT 0x00000000
+#define MC_XBAR_WRREQ_CREDIT__OUT0_MASK 0x000000ffL
+#define MC_XBAR_WRREQ_CREDIT__OUT0__SHIFT 0x00000000
+#define MC_XBAR_WRREQ_CREDIT__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_WRREQ_CREDIT__OUT1__SHIFT 0x00000008
+#define MC_XBAR_WRREQ_CREDIT__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_WRREQ_CREDIT__OUT2__SHIFT 0x00000010
+#define MC_XBAR_WRREQ_CREDIT__OUT3_MASK 0xff000000L
+#define MC_XBAR_WRREQ_CREDIT__OUT3__SHIFT 0x00000018
+#define MC_XBAR_WRRET_CREDIT1__OUT0_MASK 0x000000ffL
+#define MC_XBAR_WRRET_CREDIT1__OUT0__SHIFT 0x00000000
+#define MC_XBAR_WRRET_CREDIT1__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_WRRET_CREDIT1__OUT1__SHIFT 0x00000008
+#define MC_XBAR_WRRET_CREDIT1__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_WRRET_CREDIT1__OUT2__SHIFT 0x00000010
+#define MC_XBAR_WRRET_CREDIT1__OUT3_MASK 0xff000000L
+#define MC_XBAR_WRRET_CREDIT1__OUT3__SHIFT 0x00000018
+#define MC_XBAR_WRRET_CREDIT2__OUT4_MASK 0x000000ffL
+#define MC_XBAR_WRRET_CREDIT2__OUT4__SHIFT 0x00000000
+#define MC_XBAR_WRRET_CREDIT2__OUT5_MASK 0x0000ff00L
+#define MC_XBAR_WRRET_CREDIT2__OUT5__SHIFT 0x00000008
+#define MC_XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG0__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG0__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG0__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG0__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG0__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG0__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG10__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG10__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG10__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG10__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG10__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG10__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG10__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG10__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG10__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG10__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG11__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG11__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG11__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG11__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG11__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG11__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG11__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG11__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG11__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG11__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG12__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG12__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG12__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG12__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG12__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG12__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG12__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG12__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG12__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG12__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG13__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG13__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG13__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG13__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG13__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG13__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG13__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG13__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG13__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG13__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG14__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG14__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG14__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG14__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG14__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG14__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG14__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG14__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG14__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG14__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG15__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG15__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG15__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG15__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG15__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG15__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG15__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG15__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG15__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG15__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG16__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG16__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG16__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG16__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG16__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG16__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG16__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG16__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG16__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG16__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG17__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG17__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG17__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG17__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG17__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG17__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG17__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG17__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG17__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG17__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG18__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG18__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG18__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG18__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG18__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG18__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG18__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG18__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG18__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG18__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG19__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG19__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG19__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG19__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG19__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG19__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG19__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG19__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG19__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG19__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG1__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG1__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG1__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG1__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG1__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG1__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG20__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG20__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG20__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG20__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG20__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG20__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG20__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG20__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG20__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG20__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG21__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG21__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG21__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG21__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG21__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG21__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG21__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG21__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG21__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG21__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG22__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG22__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG22__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG22__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG22__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG22__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG22__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG22__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG22__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG22__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG23__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG23__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG23__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG23__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG23__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG23__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG23__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG23__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG23__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG23__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG24__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG24__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG24__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG24__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG24__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG24__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG24__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG24__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG24__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG24__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG25__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG25__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG25__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG25__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG25__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG25__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG25__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG25__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG25__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG25__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG26__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG26__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG26__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG26__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG26__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG26__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG26__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG26__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG26__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG26__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG27__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG27__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG27__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG27__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG27__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG27__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG27__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG27__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG27__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG27__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG28__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG28__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG28__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG28__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG28__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG28__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG28__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG28__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG28__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG28__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG29__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG29__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG29__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG29__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG29__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG29__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG29__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG29__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG29__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG29__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG2__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG2__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG2__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG2__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG2__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG2__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG30__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG30__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG30__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG30__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG30__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG30__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG30__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG30__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG30__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG30__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG31__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG31__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG31__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG31__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG31__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG31__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG31__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG31__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG31__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG31__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG32__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG32__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG32__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG32__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG32__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG32__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG32__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG32__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG32__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG32__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG33__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG33__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG33__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG33__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG33__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG33__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG33__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG33__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG33__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG33__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG34__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG34__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG34__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG34__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG34__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG34__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG34__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG34__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG34__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG34__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG35__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG35__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG35__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG35__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG35__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG35__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG35__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG35__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG35__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG35__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG36__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG36__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG36__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG36__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG36__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG36__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG36__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG36__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG36__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG36__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG3__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG3__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG3__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG3__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG3__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG3__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG4__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG4__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG4__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG4__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG4__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG4__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG5__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG5__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG5__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG5__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG5__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG5__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG6__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG6__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG6__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG6__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG6__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG6__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG7__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG7__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG7__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG7__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG7__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG7__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG8__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG8__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG8__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG8__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG8__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG8__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG8__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG8__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG8__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG8__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG9__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG9__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG9__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG9__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG9__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG9__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG9__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG9__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG9__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG9__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_EXTRA__CMP0_MASK 0x000000ffL
+#define MC_XPB_CLG_EXTRA__CMP0__SHIFT 0x00000000
+#define MC_XPB_CLG_EXTRA__CMP1_MASK 0x01fe0000L
+#define MC_XPB_CLG_EXTRA__CMP1__SHIFT 0x00000011
+#define MC_XPB_CLG_EXTRA__MSK0_MASK 0x0000ff00L
+#define MC_XPB_CLG_EXTRA__MSK0__SHIFT 0x00000008
+#define MC_XPB_CLG_EXTRA_RD__CMP0_MASK 0x000000ffL
+#define MC_XPB_CLG_EXTRA_RD__CMP0__SHIFT 0x00000000
+#define MC_XPB_CLG_EXTRA_RD__CMP1_MASK 0x01fe0000L
+#define MC_XPB_CLG_EXTRA_RD__CMP1__SHIFT 0x00000011
+#define MC_XPB_CLG_EXTRA_RD__MSK0_MASK 0x0000ff00L
+#define MC_XPB_CLG_EXTRA_RD__MSK0__SHIFT 0x00000008
+#define MC_XPB_CLG_EXTRA_RD__VLD0_MASK 0x00010000L
+#define MC_XPB_CLG_EXTRA_RD__VLD0__SHIFT 0x00000010
+#define MC_XPB_CLG_EXTRA_RD__VLD1_MASK 0x02000000L
+#define MC_XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x00000019
+#define MC_XPB_CLG_EXTRA__VLD0_MASK 0x00010000L
+#define MC_XPB_CLG_EXTRA__VLD0__SHIFT 0x00000010
+#define MC_XPB_CLG_EXTRA__VLD1_MASK 0x02000000L
+#define MC_XPB_CLG_EXTRA__VLD1__SHIFT 0x00000019
+#define MC_XPB_CLK_GAT__ENABLE_MASK 0x00040000L
+#define MC_XPB_CLK_GAT__ENABLE__SHIFT 0x00000012
+#define MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_XPB_CLK_GAT__OFFDLY_MASK 0x00000fc0L
+#define MC_XPB_CLK_GAT__OFFDLY__SHIFT 0x00000006
+#define MC_XPB_CLK_GAT__ONDLY_MASK 0x0000003fL
+#define MC_XPB_CLK_GAT__ONDLY__SHIFT 0x00000000
+#define MC_XPB_CLK_GAT__RDYDLY_MASK 0x0003f000L
+#define MC_XPB_CLK_GAT__RDYDLY__SHIFT 0x0000000c
+#define MC_XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000ffL
+#define MC_XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x00000000
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_SEL_MASK 0x02000000L
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_SEL__SHIFT 0x00000019
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_VAL_MASK 0x04000000L
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_VAL__SHIFT 0x0000001a
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_SEL_MASK 0x00800000L
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_SEL__SHIFT 0x00000017
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_VAL_MASK 0x01000000L
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_VAL__SHIFT 0x00000018
+#define MC_XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000ff00L
+#define MC_XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x00000008
+#define MC_XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000ffL
+#define MC_XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x00000000
+#define MC_XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L
+#define MC_XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x0000001e
+#define MC_XPB_INTF_CFG__XSP_ORDERING_VAL_MASK 0x80000000L
+#define MC_XPB_INTF_CFG__XSP_ORDERING_VAL__SHIFT 0x0000001f
+#define MC_XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007f0000L
+#define MC_XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x00000010
+#define MC_XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L
+#define MC_XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x0000001b
+#define MC_XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L
+#define MC_XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x0000001d
+#define MC_XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L
+#define MC_XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x00000012
+#define MC_XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L
+#define MC_XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x00000011
+#define MC_XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L
+#define MC_XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x00000010
+#define MC_XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L
+#define MC_XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0x0000000f
+#define MC_XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07f80000L
+#define MC_XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x00000013
+#define MC_XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000ffL
+#define MC_XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x00000000
+#define MC_XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007f00L
+#define MC_XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x00000008
+#define MC_XPB_LB_ADDR__CMP0_MASK 0x000003ffL
+#define MC_XPB_LB_ADDR__CMP0__SHIFT 0x00000000
+#define MC_XPB_LB_ADDR__CMP1_MASK 0x03f00000L
+#define MC_XPB_LB_ADDR__CMP1__SHIFT 0x00000014
+#define MC_XPB_LB_ADDR__MASK0_MASK 0x000ffc00L
+#define MC_XPB_LB_ADDR__MASK0__SHIFT 0x0000000a
+#define MC_XPB_LB_ADDR__MASK1_MASK 0xfc000000L
+#define MC_XPB_LB_ADDR__MASK1__SHIFT 0x0000001a
+#define MC_XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000ffffL
+#define MC_XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x00000000
+#define MC_XPB_MISC_CFG__FIELDNAME0_MASK 0x000000ffL
+#define MC_XPB_MISC_CFG__FIELDNAME0__SHIFT 0x00000000
+#define MC_XPB_MISC_CFG__FIELDNAME1_MASK 0x0000ff00L
+#define MC_XPB_MISC_CFG__FIELDNAME1__SHIFT 0x00000008
+#define MC_XPB_MISC_CFG__FIELDNAME2_MASK 0x00ff0000L
+#define MC_XPB_MISC_CFG__FIELDNAME2__SHIFT 0x00000010
+#define MC_XPB_MISC_CFG__FIELDNAME3_MASK 0x7f000000L
+#define MC_XPB_MISC_CFG__FIELDNAME3__SHIFT 0x00000018
+#define MC_XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L
+#define MC_XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x0000001f
+#define MC_XPB_P2P_BAR0__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR0__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR0__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR0__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR0__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR0__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR0__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR1__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR1__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR1__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR1__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR1__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR1__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR1__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR2__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR2__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR2__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR2__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR2__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR2__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR2__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR3__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR3__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR3__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR3__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR3__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR3__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR3__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR4__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR4__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR4__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR4__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR4__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR4__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR4__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR5__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR5__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR5__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR5__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR5__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR5__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR5__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR6__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR6__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR6__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR6__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR6__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR6__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR6__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR7__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR7__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR7__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR7__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR7__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR7__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR7__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L
+#define MC_XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L
+#define MC_XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L
+#define MC_XPB_P2P_BAR_CFG__RD_EN__SHIFT 0x0000000b
+#define MC_XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L
+#define MC_XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0x0000000a
+#define MC_XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L
+#define MC_XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L
+#define MC_XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x00000007
+#define MC_XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L
+#define MC_XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x00000006
+#define MC_XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L
+#define MC_XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x00000009
+#define MC_XPB_P2P_BAR_DEBUG__HOST_FLUSH_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR_DEBUG__HOST_FLUSH__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_DEBUG__MEM_SYS_BAR_MASK 0x0000f000L
+#define MC_XPB_P2P_BAR_DEBUG__MEM_SYS_BAR__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR_DEBUG__SEL_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_DEBUG__SEL__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0fffff00L
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0fffff00L
+#define MC_XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_SETUP__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR_SETUP__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR_SETUP__SEL_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_SETUP__SEL__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR_SETUP__VALID__SHIFT 0x0000000c
+#define MC_XPB_PEER_SYS_BAR0__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR0__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR0__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR0__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR1__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR1__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR1__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR1__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR2__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR2__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR2__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR2__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR3__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR3__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR3__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR3__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR4__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR4__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR4__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR4__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR5__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR5__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR5__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR5__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR6__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR6__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR6__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR6__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR7__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR7__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR7__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR7__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR8__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR8__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR8__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR8__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR9__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR9__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR9__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR9__VALID__SHIFT 0x00000000
+#define MC_XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003fL
+#define MC_XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x00000000
+#define MC_XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000fc0L
+#define MC_XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x00000006
+#define MC_XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003f000L
+#define MC_XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0x0000000c
+#define MC_XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L
+#define MC_XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x00000017
+#define MC_XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L
+#define MC_XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x00000000
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000feL
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x00000001
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x00000015
+#define MC_XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L
+#define MC_XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0x0000000f
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x00000011
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x00000013
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007f00L
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x00000008
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x00000016
+#define MC_XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L
+#define MC_XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x00000010
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x00000012
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x00000014
+#define MC_XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xff000000L
+#define MC_XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP0__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP1__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP2__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP3__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP4__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP4__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP4__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP5__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP5__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP5__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP6__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP6__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP6__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP7__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP7__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP7__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP8__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP8__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP8__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP9__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP9__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP9__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_STICKY__BITS_MASK 0xffffffffL
+#define MC_XPB_STICKY__BITS__SHIFT 0x00000000
+#define MC_XPB_STICKY_W1C__BITS_MASK 0xffffffffL
+#define MC_XPB_STICKY_W1C__BITS__SHIFT 0x00000000
+#define MC_XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L
+#define MC_XPB_SUB_CTRL__RESET_CGR__SHIFT 0x00000013
+#define MC_XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L
+#define MC_XPB_SUB_CTRL__RESET_CNS__SHIFT 0x0000000a
+#define MC_XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L
+#define MC_XPB_SUB_CTRL__RESET_HOP__SHIFT 0x00000010
+#define MC_XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L
+#define MC_XPB_SUB_CTRL__RESET_HST__SHIFT 0x0000000f
+#define MC_XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L
+#define MC_XPB_SUB_CTRL__RESET_MAP__SHIFT 0x0000000d
+#define MC_XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L
+#define MC_XPB_SUB_CTRL__RESET_RET__SHIFT 0x0000000c
+#define MC_XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L
+#define MC_XPB_SUB_CTRL__RESET_RTR__SHIFT 0x0000000b
+#define MC_XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L
+#define MC_XPB_SUB_CTRL__RESET_SID__SHIFT 0x00000011
+#define MC_XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L
+#define MC_XPB_SUB_CTRL__RESET_SRB__SHIFT 0x00000012
+#define MC_XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L
+#define MC_XPB_SUB_CTRL__RESET_WCB__SHIFT 0x0000000e
+#define MC_XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L
+#define MC_XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x00000001
+#define MC_XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L
+#define MC_XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x00000008
+#define MC_XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L
+#define MC_XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x00000004
+#define MC_XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L
+#define MC_XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x00000006
+#define MC_XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L
+#define MC_XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x00000003
+#define MC_XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L
+#define MC_XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x00000002
+#define MC_XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L
+#define MC_XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x00000007
+#define MC_XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L
+#define MC_XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x00000005
+#define MC_XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L
+#define MC_XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x00000009
+#define MC_XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L
+#define MC_XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x00000000
+#define MC_XPB_UNC_THRESH_HST__CHANGE_PREF_MASK 0x0000003fL
+#define MC_XPB_UNC_THRESH_HST__CHANGE_PREF__SHIFT 0x00000000
+#define MC_XPB_UNC_THRESH_HST__STRONG_PREF_MASK 0x00000fc0L
+#define MC_XPB_UNC_THRESH_HST__STRONG_PREF__SHIFT 0x00000006
+#define MC_XPB_UNC_THRESH_HST__USE_UNFULL_MASK 0x0003f000L
+#define MC_XPB_UNC_THRESH_HST__USE_UNFULL__SHIFT 0x0000000c
+#define MC_XPB_UNC_THRESH_SID__CHANGE_PREF_MASK 0x0000003fL
+#define MC_XPB_UNC_THRESH_SID__CHANGE_PREF__SHIFT 0x00000000
+#define MC_XPB_UNC_THRESH_SID__STRONG_PREF_MASK 0x00000fc0L
+#define MC_XPB_UNC_THRESH_SID__STRONG_PREF__SHIFT 0x00000006
+#define MC_XPB_UNC_THRESH_SID__USE_UNFULL_MASK 0x0003f000L
+#define MC_XPB_UNC_THRESH_SID__USE_UNFULL__SHIFT 0x0000000c
+#define MC_XPB_WCB_CFG__HST_MAX_MASK 0x00030000L
+#define MC_XPB_WCB_CFG__HST_MAX__SHIFT 0x00000010
+#define MC_XPB_WCB_CFG__SID_MAX_MASK 0x000c0000L
+#define MC_XPB_WCB_CFG__SID_MAX__SHIFT 0x00000012
+#define MC_XPB_WCB_CFG__TIMEOUT_MASK 0x0000ffffL
+#define MC_XPB_WCB_CFG__TIMEOUT__SHIFT 0x00000000
+#define MC_XPB_WCB_STS__PBUF_VLD_MASK 0x0000ffffL
+#define MC_XPB_WCB_STS__PBUF_VLD__SHIFT 0x00000000
+#define MC_XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007f0000L
+#define MC_XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x00000010
+#define MC_XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3f800000L
+#define MC_XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x00000017
+#define MC_XPB_XDMA_PEER_SYS_BAR0__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR0__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR0__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR0__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR0__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_PEER_SYS_BAR1__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR1__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR1__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR1__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR1__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_PEER_SYS_BAR2__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR2__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR2__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR2__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR2__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_PEER_SYS_BAR3__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR3__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR3__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR3__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR3__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x00000000
+#define MPLL_AD_FUNC_CNTL__SPARE_MASK 0xfffffff8L
+#define MPLL_AD_FUNC_CNTL__SPARE__SHIFT 0x00000003
+#define MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK 0x00000007L
+#define MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT 0x00000000
+#define MPLL_AD_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_AD_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_AD_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_AD_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_AD_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_AD_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_AD_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_AD_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_AD_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_AD_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_AD_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_AD_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_CNTL_MODE__FAST_LOCK_CNTRL_MASK 0x00600000L
+#define MPLL_CNTL_MODE__FAST_LOCK_CNTRL__SHIFT 0x00000015
+#define MPLL_CNTL_MODE__FAST_LOCK_EN_MASK 0x00100000L
+#define MPLL_CNTL_MODE__FAST_LOCK_EN__SHIFT 0x00000014
+#define MPLL_CNTL_MODE__FORCE_TESTMODE_MASK 0x00020000L
+#define MPLL_CNTL_MODE__FORCE_TESTMODE__SHIFT 0x00000011
+#define MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK 0x80000000L
+#define MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT 0x0000001f
+#define MPLL_CNTL_MODE__INSTR_DELAY_MASK 0x000000ffL
+#define MPLL_CNTL_MODE__INSTR_DELAY__SHIFT 0x00000000
+#define MPLL_CNTL_MODE__MPLL_CHG_STATUS_MASK 0x00010000L
+#define MPLL_CNTL_MODE__MPLL_CHG_STATUS__SHIFT 0x00000010
+#define MPLL_CNTL_MODE__MPLL_CTLREQ_MASK 0x00004000L
+#define MPLL_CNTL_MODE__MPLL_CTLREQ__SHIFT 0x0000000e
+#define MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK 0x00000800L
+#define MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT 0x0000000b
+#define MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK 0x00000100L
+#define MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT 0x00000008
+#define MPLL_CNTL_MODE__QDR_MASK 0x00002000L
+#define MPLL_CNTL_MODE__QDR__SHIFT 0x0000000d
+#define MPLL_CNTL_MODE__SPARE_1_MASK 0x00001000L
+#define MPLL_CNTL_MODE__SPARE_1__SHIFT 0x0000000c
+#define MPLL_CNTL_MODE__SPARE_2_MASK 0x00800000L
+#define MPLL_CNTL_MODE__SPARE_2__SHIFT 0x00000017
+#define MPLL_CNTL_MODE__SPARE_3_MASK 0x70000000L
+#define MPLL_CNTL_MODE__SPARE_3__SHIFT 0x0000001c
+#define MPLL_CNTL_MODE__SS_DSMODE_EN_MASK 0x04000000L
+#define MPLL_CNTL_MODE__SS_DSMODE_EN__SHIFT 0x0000001a
+#define MPLL_CNTL_MODE__SS_SSEN_MASK 0x03000000L
+#define MPLL_CNTL_MODE__SS_SSEN__SHIFT 0x00000018
+#define MPLL_CNTL_MODE__VTOI_BIAS_CNTRL_MASK 0x08000000L
+#define MPLL_CNTL_MODE__VTOI_BIAS_CNTRL__SHIFT 0x0000001b
+#define MPLL_CONTROL__AD_BG_PWRON_MASK 0x00001000L
+#define MPLL_CONTROL__AD_BG_PWRON__SHIFT 0x0000000c
+#define MPLL_CONTROL__AD_PLL_PWRON_MASK 0x00002000L
+#define MPLL_CONTROL__AD_PLL_PWRON__SHIFT 0x0000000d
+#define MPLL_CONTROL__AD_PLL_RESET_MASK 0x00004000L
+#define MPLL_CONTROL__AD_PLL_RESET__SHIFT 0x0000000e
+#define MPLL_CONTROL__DQ_0_0_BG_PWRON_MASK 0x00010000L
+#define MPLL_CONTROL__DQ_0_0_BG_PWRON__SHIFT 0x00000010
+#define MPLL_CONTROL__DQ_0_0_PLL_PWRON_MASK 0x00020000L
+#define MPLL_CONTROL__DQ_0_0_PLL_PWRON__SHIFT 0x00000011
+#define MPLL_CONTROL__DQ_0_0_PLL_RESET_MASK 0x00040000L
+#define MPLL_CONTROL__DQ_0_0_PLL_RESET__SHIFT 0x00000012
+#define MPLL_CONTROL__DQ_0_1_BG_PWRON_MASK 0x00100000L
+#define MPLL_CONTROL__DQ_0_1_BG_PWRON__SHIFT 0x00000014
+#define MPLL_CONTROL__DQ_0_1_PLL_PWRON_MASK 0x00200000L
+#define MPLL_CONTROL__DQ_0_1_PLL_PWRON__SHIFT 0x00000015
+#define MPLL_CONTROL__DQ_0_1_PLL_RESET_MASK 0x00400000L
+#define MPLL_CONTROL__DQ_0_1_PLL_RESET__SHIFT 0x00000016
+#define MPLL_CONTROL__DQ_1_0_BG_PWRON_MASK 0x01000000L
+#define MPLL_CONTROL__DQ_1_0_BG_PWRON__SHIFT 0x00000018
+#define MPLL_CONTROL__DQ_1_0_PLL_PWRON_MASK 0x02000000L
+#define MPLL_CONTROL__DQ_1_0_PLL_PWRON__SHIFT 0x00000019
+#define MPLL_CONTROL__DQ_1_0_PLL_RESET_MASK 0x04000000L
+#define MPLL_CONTROL__DQ_1_0_PLL_RESET__SHIFT 0x0000001a
+#define MPLL_CONTROL__DQ_1_1_BG_PWRON_MASK 0x10000000L
+#define MPLL_CONTROL__DQ_1_1_BG_PWRON__SHIFT 0x0000001c
+#define MPLL_CONTROL__DQ_1_1_PLL_PWRON_MASK 0x20000000L
+#define MPLL_CONTROL__DQ_1_1_PLL_PWRON__SHIFT 0x0000001d
+#define MPLL_CONTROL__DQ_1_1_PLL_RESET_MASK 0x40000000L
+#define MPLL_CONTROL__DQ_1_1_PLL_RESET__SHIFT 0x0000001e
+#define MPLL_CONTROL__GDDR_PWRON_MASK 0x00000001L
+#define MPLL_CONTROL__GDDR_PWRON__SHIFT 0x00000000
+#define MPLL_CONTROL__PLL_BUF_PWRON_TX_MASK 0x00000004L
+#define MPLL_CONTROL__PLL_BUF_PWRON_TX__SHIFT 0x00000002
+#define MPLL_CONTROL__REFCLK_PWRON_MASK 0x00000002L
+#define MPLL_CONTROL__REFCLK_PWRON__SHIFT 0x00000001
+#define MPLL_CONTROL__SPARE_AD_0_MASK 0x00008000L
+#define MPLL_CONTROL__SPARE_AD_0__SHIFT 0x0000000f
+#define MPLL_CONTROL__SPARE_DQ_0_0_MASK 0x00080000L
+#define MPLL_CONTROL__SPARE_DQ_0_0__SHIFT 0x00000013
+#define MPLL_CONTROL__SPARE_DQ_0_1_MASK 0x00800000L
+#define MPLL_CONTROL__SPARE_DQ_0_1__SHIFT 0x00000017
+#define MPLL_CONTROL__SPARE_DQ_1_0_MASK 0x08000000L
+#define MPLL_CONTROL__SPARE_DQ_1_0__SHIFT 0x0000001b
+#define MPLL_CONTROL__SPARE_DQ_1_1_MASK 0x80000000L
+#define MPLL_CONTROL__SPARE_DQ_1_1__SHIFT 0x0000001f
+#define MPLL_DQ_0_0_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_0_0_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_0_0_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_0_0_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_0_0_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_0_0_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_0_0_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_0_0_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_0_1_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_0_1_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_0_1_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_0_1_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_0_1_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_0_1_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_0_1_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_0_1_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_1_0_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_1_0_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_1_0_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_1_0_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_1_0_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_1_0_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_1_0_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_1_0_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_1_1_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_1_1_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_1_1_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_1_1_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_1_1_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_1_1_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_1_1_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_1_1_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_FUNC_CNTL__SPARE_0_MASK 0x00000008L
+#define MPLL_DQ_FUNC_CNTL__SPARE_0__SHIFT 0x00000003
+#define MPLL_DQ_FUNC_CNTL__SPARE_MASK 0xffffffe0L
+#define MPLL_DQ_FUNC_CNTL__SPARE__SHIFT 0x00000005
+#define MPLL_DQ_FUNC_CNTL__YCLK_POST_DIV_MASK 0x00000007L
+#define MPLL_DQ_FUNC_CNTL__YCLK_POST_DIV__SHIFT 0x00000000
+#define MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK 0x00000010L
+#define MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT 0x00000004
+#define MPLL_FUNC_CNTL_1__CLKF_MASK 0x0fff0000L
+#define MPLL_FUNC_CNTL_1__CLKFRAC_MASK 0x0000fff0L
+#define MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT 0x00000004
+#define MPLL_FUNC_CNTL_1__CLKF__SHIFT 0x00000010
+#define MPLL_FUNC_CNTL_1__SPARE_0_MASK 0x0000000cL
+#define MPLL_FUNC_CNTL_1__SPARE_0__SHIFT 0x00000002
+#define MPLL_FUNC_CNTL_1__SPARE_1_MASK 0xf0000000L
+#define MPLL_FUNC_CNTL_1__SPARE_1__SHIFT 0x0000001c
+#define MPLL_FUNC_CNTL_1__VCO_MODE_MASK 0x00000003L
+#define MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT 0x00000000
+#define MPLL_FUNC_CNTL_2__BACKUP_2_MASK 0x000e0000L
+#define MPLL_FUNC_CNTL_2__BACKUP_2__SHIFT 0x00000011
+#define MPLL_FUNC_CNTL_2__BACKUP_MASK 0xf8000000L
+#define MPLL_FUNC_CNTL_2__BACKUP__SHIFT 0x0000001b
+#define MPLL_FUNC_CNTL_2__LF_CNTRL_MASK 0x07f00000L
+#define MPLL_FUNC_CNTL_2__LF_CNTRL__SHIFT 0x00000014
+#define MPLL_FUNC_CNTL_2__MPLL_UNLOCK_CLEAR_MASK 0x00000080L
+#define MPLL_FUNC_CNTL_2__MPLL_UNLOCK_CLEAR__SHIFT 0x00000007
+#define MPLL_FUNC_CNTL_2__PFD_RESET_CNTRL_MASK 0x00003000L
+#define MPLL_FUNC_CNTL_2__PFD_RESET_CNTRL__SHIFT 0x0000000c
+#define MPLL_FUNC_CNTL_2__RESET_EN_MASK 0x00000004L
+#define MPLL_FUNC_CNTL_2__RESET_EN__SHIFT 0x00000002
+#define MPLL_FUNC_CNTL_2__RESET_TIMER_MASK 0x00000c00L
+#define MPLL_FUNC_CNTL_2__RESET_TIMER__SHIFT 0x0000000a
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_EN_MASK 0x00000008L
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_EN__SHIFT 0x00000003
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_SRC_MASK 0x00000010L
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_SRC__SHIFT 0x00000004
+#define MPLL_FUNC_CNTL_2__TEST_BYPMCLK_MASK 0x00000040L
+#define MPLL_FUNC_CNTL_2__TEST_BYPMCLK__SHIFT 0x00000006
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_FRAC_BYPASS_MASK 0x00000020L
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_FRAC_BYPASS__SHIFT 0x00000005
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_SSC_BYPASS_MASK 0x00000200L
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_SSC_BYPASS__SHIFT 0x00000009
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_CNTRL_MASK 0x00000100L
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_CNTRL__SHIFT 0x00000008
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_EN_MASK 0x00000002L
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_EN__SHIFT 0x00000001
+#define MPLL_FUNC_CNTL_2__VCTRLADC_EN_MASK 0x00000001L
+#define MPLL_FUNC_CNTL_2__VCTRLADC_EN__SHIFT 0x00000000
+#define MPLL_FUNC_CNTL__BG_100ADJ_MASK 0x00000f00L
+#define MPLL_FUNC_CNTL__BG_100ADJ__SHIFT 0x00000008
+#define MPLL_FUNC_CNTL__BG_135ADJ_MASK 0x000f0000L
+#define MPLL_FUNC_CNTL__BG_135ADJ__SHIFT 0x00000010
+#define MPLL_FUNC_CNTL__BWCTRL_MASK 0x0ff00000L
+#define MPLL_FUNC_CNTL__BWCTRL__SHIFT 0x00000014
+#define MPLL_FUNC_CNTL__REG_BIAS_MASK 0xc0000000L
+#define MPLL_FUNC_CNTL__REG_BIAS__SHIFT 0x0000001e
+#define MPLL_FUNC_CNTL__SPARE_0_MASK 0x00000020L
+#define MPLL_FUNC_CNTL__SPARE_0__SHIFT 0x00000005
+#define MPLL_SEQ_UCODE_1__INSTR0_MASK 0x0000000fL
+#define MPLL_SEQ_UCODE_1__INSTR0__SHIFT 0x00000000
+#define MPLL_SEQ_UCODE_1__INSTR1_MASK 0x000000f0L
+#define MPLL_SEQ_UCODE_1__INSTR1__SHIFT 0x00000004
+#define MPLL_SEQ_UCODE_1__INSTR2_MASK 0x00000f00L
+#define MPLL_SEQ_UCODE_1__INSTR2__SHIFT 0x00000008
+#define MPLL_SEQ_UCODE_1__INSTR3_MASK 0x0000f000L
+#define MPLL_SEQ_UCODE_1__INSTR3__SHIFT 0x0000000c
+#define MPLL_SEQ_UCODE_1__INSTR4_MASK 0x000f0000L
+#define MPLL_SEQ_UCODE_1__INSTR4__SHIFT 0x00000010
+#define MPLL_SEQ_UCODE_1__INSTR5_MASK 0x00f00000L
+#define MPLL_SEQ_UCODE_1__INSTR5__SHIFT 0x00000014
+#define MPLL_SEQ_UCODE_1__INSTR6_MASK 0x0f000000L
+#define MPLL_SEQ_UCODE_1__INSTR6__SHIFT 0x00000018
+#define MPLL_SEQ_UCODE_1__INSTR7_MASK 0xf0000000L
+#define MPLL_SEQ_UCODE_1__INSTR7__SHIFT 0x0000001c
+#define MPLL_SEQ_UCODE_2__INSTR10_MASK 0x00000f00L
+#define MPLL_SEQ_UCODE_2__INSTR10__SHIFT 0x00000008
+#define MPLL_SEQ_UCODE_2__INSTR11_MASK 0x0000f000L
+#define MPLL_SEQ_UCODE_2__INSTR11__SHIFT 0x0000000c
+#define MPLL_SEQ_UCODE_2__INSTR12_MASK 0x000f0000L
+#define MPLL_SEQ_UCODE_2__INSTR12__SHIFT 0x00000010
+#define MPLL_SEQ_UCODE_2__INSTR13_MASK 0x00f00000L
+#define MPLL_SEQ_UCODE_2__INSTR13__SHIFT 0x00000014
+#define MPLL_SEQ_UCODE_2__INSTR14_MASK 0x0f000000L
+#define MPLL_SEQ_UCODE_2__INSTR14__SHIFT 0x00000018
+#define MPLL_SEQ_UCODE_2__INSTR15_MASK 0xf0000000L
+#define MPLL_SEQ_UCODE_2__INSTR15__SHIFT 0x0000001c
+#define MPLL_SEQ_UCODE_2__INSTR8_MASK 0x0000000fL
+#define MPLL_SEQ_UCODE_2__INSTR8__SHIFT 0x00000000
+#define MPLL_SEQ_UCODE_2__INSTR9_MASK 0x000000f0L
+#define MPLL_SEQ_UCODE_2__INSTR9__SHIFT 0x00000004
+#define MPLL_SS1__CLKV_MASK 0x03ffffffL
+#define MPLL_SS1__CLKV__SHIFT 0x00000000
+#define MPLL_SS1__SPARE_MASK 0xfc000000L
+#define MPLL_SS1__SPARE__SHIFT 0x0000001a
+#define MPLL_SS2__CLKS_MASK 0x00000fffL
+#define MPLL_SS2__CLKS__SHIFT 0x00000000
+#define MPLL_SS2__SPARE_MASK 0xfffff000L
+#define MPLL_SS2__SPARE__SHIFT 0x0000000c
+#define MPLL_TIME__MPLL_LOCK_TIME_MASK 0x0000ffffL
+#define MPLL_TIME__MPLL_LOCK_TIME__SHIFT 0x00000000
+#define MPLL_TIME__MPLL_RESET_TIME_MASK 0xffff0000L
+#define MPLL_TIME__MPLL_RESET_TIME__SHIFT 0x00000010
+#define VM_CONTEXT0_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000008L
+#define VM_CONTEXT0_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x00000003
+#define VM_CONTEXT0_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT0_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT_MASK 0x00000002L
+#define VM_CONTEXT0_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT__SHIFT 0x00000001
+#define VM_CONTEXT0_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT_MASK 0x00000004L
+#define VM_CONTEXT0_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT__SHIFT 0x00000002
+#define VM_CONTEXT0_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE_MASK 0x00000010L
+#define VM_CONTEXT0_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE__SHIFT 0x00000004
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000007
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000040L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000006
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x00000000
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x0f000000L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x00000018
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x00000001
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000a
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000009
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00000800L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000b
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000016
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000015
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00800000L
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000017
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000004
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000008L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000003
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000010
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000f
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00020000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000011
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00002000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000d
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00001000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000c
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00004000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000e
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00080000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000013
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00040000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000012
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00100000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000014
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT0_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID_MASK 0x000ff000L
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID__SHIFT 0x0000000c
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW_MASK 0x01000000L
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW__SHIFT 0x00000018
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__PROTECTIONS_MASK 0x000000ffL
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__PROTECTIONS__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__VMID_MASK 0x1e000000L
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x00000019
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000008L
+#define VM_CONTEXT1_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x00000003
+#define VM_CONTEXT1_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT1_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT_MASK 0x00000002L
+#define VM_CONTEXT1_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT__SHIFT 0x00000001
+#define VM_CONTEXT1_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT_MASK 0x00000004L
+#define VM_CONTEXT1_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT__SHIFT 0x00000002
+#define VM_CONTEXT1_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE_MASK 0x00000010L
+#define VM_CONTEXT1_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE__SHIFT 0x00000004
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000007
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000040L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000006
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x00000000
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x0f000000L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x00000018
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x00000001
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000a
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000009
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00000800L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000b
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000016
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000015
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00800000L
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000017
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000004
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000008L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000003
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000010
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000f
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00020000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000011
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00002000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000d
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00001000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000c
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00004000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000e
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00080000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000013
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00040000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000012
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00100000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000014
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID_MASK 0x000ff000L
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID__SHIFT 0x0000000c
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW_MASK 0x01000000L
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW__SHIFT 0x00000018
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__PROTECTIONS_MASK 0x000000ffL
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__PROTECTIONS__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__VMID_MASK 0x1e000000L
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x00000019
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x00000000
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0x0000000a
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0x0000000b
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0x0000000c
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0x0000000d
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0x0000000e
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0x0000000f
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x00000001
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x00000002
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x00000003
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x00000004
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x00000005
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x00000006
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x00000007
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x00000008
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x00000009
+#define VM_DEBUG__FLAGS_MASK 0xffffffffL
+#define VM_DEBUG__FLAGS__SHIFT 0x00000000
+#define VM_DUMMY_PAGE_FAULT_ADDR__DUMMY_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_DUMMY_PAGE_FAULT_ADDR__DUMMY_PAGE_ADDR__SHIFT 0x00000000
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x00000001
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MASK_MASK 0x0000000cL
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MASK__SHIFT 0x00000002
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x00000000
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT_MASK 0x000001ffL
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT_MASK_MASK 0x0003fe00L
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT_MASK__SHIFT 0x00000009
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT__SHIFT 0x00000000
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_0_MASK 0x00000001L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_0__SHIFT 0x00000000
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_10_MASK 0x00000400L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_10__SHIFT 0x0000000a
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_11_MASK 0x00000800L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_11__SHIFT 0x0000000b
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_12_MASK 0x00001000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_12__SHIFT 0x0000000c
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_13_MASK 0x00002000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_13__SHIFT 0x0000000d
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_14_MASK 0x00004000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_14__SHIFT 0x0000000e
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_15_MASK 0x00008000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_15__SHIFT 0x0000000f
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_1_MASK 0x00000002L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_1__SHIFT 0x00000001
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_2_MASK 0x00000004L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_2__SHIFT 0x00000002
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_3_MASK 0x00000008L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_3__SHIFT 0x00000003
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_4_MASK 0x00000010L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_4__SHIFT 0x00000004
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_5_MASK 0x00000020L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_5__SHIFT 0x00000005
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_6_MASK 0x00000040L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_6__SHIFT 0x00000006
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_7_MASK 0x00000080L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_7__SHIFT 0x00000007
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_8_MASK 0x00000100L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_8__SHIFT 0x00000008
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_9_MASK 0x00000200L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_9__SHIFT 0x00000009
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_0_MASK 0x00000001L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_0__SHIFT 0x00000000
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_10_MASK 0x00000400L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_10__SHIFT 0x0000000a
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_11_MASK 0x00000800L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_11__SHIFT 0x0000000b
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_12_MASK 0x00001000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_12__SHIFT 0x0000000c
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_13_MASK 0x00002000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_13__SHIFT 0x0000000d
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_14_MASK 0x00004000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_14__SHIFT 0x0000000e
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_15_MASK 0x00008000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_15__SHIFT 0x0000000f
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_1_MASK 0x00000002L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_1__SHIFT 0x00000001
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_2_MASK 0x00000004L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_2__SHIFT 0x00000002
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_3_MASK 0x00000008L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_3__SHIFT 0x00000003
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_4_MASK 0x00000010L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_4__SHIFT 0x00000004
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_5_MASK 0x00000020L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_5__SHIFT 0x00000005
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_6_MASK 0x00000040L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_6__SHIFT 0x00000006
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_7_MASK 0x00000080L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_7__SHIFT 0x00000007
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_8_MASK 0x00000100L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_8__SHIFT 0x00000008
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_9_MASK 0x00000200L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_9__SHIFT 0x00000009
+#define VM_L2_BANK_SELECT_MASKA__BANK_SELECT_MASK_MASK 0x0fffffffL
+#define VM_L2_BANK_SELECT_MASKA__BANK_SELECT_MASK__SHIFT 0x00000000
+#define VM_L2_BANK_SELECT_MASKB__BANK_SELECT_MASK_MASK 0x000000ffL
+#define VM_L2_BANK_SELECT_MASKB__BANK_SELECT_MASK__SHIFT 0x00000000
+#define VM_L2_CG__ENABLE_MASK 0x00040000L
+#define VM_L2_CG__ENABLE__SHIFT 0x00000012
+#define VM_L2_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define VM_L2_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define VM_L2_CG__OFFDLY_MASK 0x00000fc0L
+#define VM_L2_CG__OFFDLY__SHIFT 0x00000006
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x00000016
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x00000015
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x00000000
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0c000000L
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x0000001a
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x00000001
+#define VM_L2_CNTL2__L2_CACHE_BIGK_VMID_MODE_MASK 0x03800000L
+#define VM_L2_CNTL2__L2_CACHE_BIGK_VMID_MODE__SHIFT 0x00000017
+#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003fL
+#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x00000000
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00e00000L
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x00000015
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x0000001c
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x00000014
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0f000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x00000018
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x0000001d
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000f8000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x0000000f
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000c0L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x00000006
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001f00L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x00000008
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x00000013
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0x0000000f
+#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x00000000
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x00000001
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x0000000a
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x00000009
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03e00000L
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x00000015
+#define VM_L2_CNTL__L2_CACHE_4K_SWAP_TAG_INDEX_LSBS_MASK 0x0c000000L
+#define VM_L2_CNTL__L2_CACHE_4K_SWAP_TAG_INDEX_LSBS__SHIFT 0x0000001a
+#define VM_L2_CNTL__L2_CACHE_BIGK_SWAP_TAG_INDEX_LSBS_MASK 0x70000000L
+#define VM_L2_CNTL__L2_CACHE_BIGK_SWAP_TAG_INDEX_LSBS__SHIFT 0x0000001c
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x00000004
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000cL
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x00000002
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0x0000000c
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x00000008
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x00000012
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET__PHYSICAL_PAGE_OFFSET_MASK 0x0fffffffL
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET__PHYSICAL_PAGE_OFFSET__SHIFT 0x00000000
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001fffeL
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x00000001
+#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VM_L2_STATUS__L2_BUSY__SHIFT 0x00000000
+#define VM_PRT_APERTURE0_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE0_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE0_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE0_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE1_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE1_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE1_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE1_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE2_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE2_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE2_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE2_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE3_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE3_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE3_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE3_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_CNTL__L1_TLB_STORE_INVALID_ENTRIES_MASK 0x00000008L
+#define VM_PRT_CNTL__L1_TLB_STORE_INVALID_ENTRIES__SHIFT 0x00000003
+#define VM_PRT_CNTL__L2_CACHE_STORE_INVALID_ENTRIES_MASK 0x00000004L
+#define VM_PRT_CNTL__L2_CACHE_STORE_INVALID_ENTRIES__SHIFT 0x00000002
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
new file mode 100644
index 000000000000..edc8a793a95d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
@@ -0,0 +1,275 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef OSS_1_0_D_H
+#define OSS_1_0_D_H
+
+#define ixCLIENT0_BM 0x0220
+#define ixCLIENT0_CD0 0x0210
+#define ixCLIENT0_CD1 0x0214
+#define ixCLIENT0_CD2 0x0218
+#define ixCLIENT0_CD3 0x021C
+#define ixCLIENT0_CK0 0x0200
+#define ixCLIENT0_CK1 0x0204
+#define ixCLIENT0_CK2 0x0208
+#define ixCLIENT0_CK3 0x020C
+#define ixCLIENT0_K0 0x01F0
+#define ixCLIENT0_K1 0x01F4
+#define ixCLIENT0_K2 0x01F8
+#define ixCLIENT0_K3 0x01FC
+#define ixCLIENT0_OFFSET 0x0224
+#define ixCLIENT0_OFFSET_HI 0x0290
+#define ixCLIENT0_STATUS 0x0228
+#define ixCLIENT1_BM 0x025C
+#define ixCLIENT1_CD0 0x024C
+#define ixCLIENT1_CD1 0x0250
+#define ixCLIENT1_CD2 0x0254
+#define ixCLIENT1_CD3 0x0258
+#define ixCLIENT1_CK0 0x023C
+#define ixCLIENT1_CK1 0x0240
+#define ixCLIENT1_CK2 0x0244
+#define ixCLIENT1_CK3 0x0248
+#define ixCLIENT1_K0 0x022C
+#define ixCLIENT1_K1 0x0230
+#define ixCLIENT1_K2 0x0234
+#define ixCLIENT1_K3 0x0238
+#define ixCLIENT1_OFFSET 0x0260
+#define ixCLIENT1_OFFSET_HI 0x0294
+#define ixCLIENT1_PORT_STATUS 0x0264
+#define ixCLIENT2_BM 0x01E4
+#define ixCLIENT2_CD0 0x01D4
+#define ixCLIENT2_CD1 0x01D8
+#define ixCLIENT2_CD2 0x01DC
+#define ixCLIENT2_CD3 0x01E0
+#define ixCLIENT2_CK0 0x01C4
+#define ixCLIENT2_CK1 0x01C8
+#define ixCLIENT2_CK2 0x01CC
+#define ixCLIENT2_CK3 0x01D0
+#define ixCLIENT2_K0 0x01B4
+#define ixCLIENT2_K1 0x01B8
+#define ixCLIENT2_K2 0x01BC
+#define ixCLIENT2_K3 0x01C0
+#define ixCLIENT2_OFFSET 0x01E8
+#define ixCLIENT2_OFFSET_HI 0x0298
+#define ixCLIENT2_STATUS 0x01EC
+#define ixCLIENT3_BM 0x02D4
+#define ixCLIENT3_CD0 0x02C4
+#define ixCLIENT3_CD1 0x02C8
+#define ixCLIENT3_CD2 0x02CC
+#define ixCLIENT3_CD3 0x02D0
+#define ixCLIENT3_CK0 0x02B4
+#define ixCLIENT3_CK1 0x02B8
+#define ixCLIENT3_CK2 0x02BC
+#define ixCLIENT3_CK3 0x02C0
+#define ixCLIENT3_K0 0x02A4
+#define ixCLIENT3_K1 0x02A8
+#define ixCLIENT3_K2 0x02AC
+#define ixCLIENT3_K3 0x02B0
+#define ixCLIENT3_OFFSET 0x02D8
+#define ixCLIENT3_OFFSET_HI 0x02A0
+#define ixCLIENT3_STATUS 0x02DC
+#define ixDH_TEST 0x0000
+#define ixEXP0 0x0034
+#define ixEXP1 0x0038
+#define ixEXP2 0x003C
+#define ixEXP3 0x0040
+#define ixEXP4 0x0044
+#define ixEXP5 0x0048
+#define ixEXP6 0x004C
+#define ixEXP7 0x0050
+#define ixHFS_SEED0 0x0278
+#define ixHFS_SEED1 0x027C
+#define ixHFS_SEED2 0x0280
+#define ixHFS_SEED3 0x0284
+#define ixKEFUSE0 0x0268
+#define ixKEFUSE1 0x026C
+#define ixKEFUSE2 0x0270
+#define ixKEFUSE3 0x0274
+#define ixKHFS0 0x0004
+#define ixKHFS1 0x0008
+#define ixKHFS2 0x000C
+#define ixKHFS3 0x0010
+#define ixKSESSION0 0x0014
+#define ixKSESSION1 0x0018
+#define ixKSESSION2 0x001C
+#define ixKSESSION3 0x0020
+#define ixKSIG0 0x0024
+#define ixKSIG1 0x0028
+#define ixKSIG2 0x002C
+#define ixKSIG3 0x0030
+#define ixLX0 0x0054
+#define ixLX1 0x0058
+#define ixLX2 0x005C
+#define ixLX3 0x0060
+#define ixRINGOSC_MASK 0x0288
+#define ixSPU_PORT_STATUS 0x029C
+#define mmCC_DRM_ID_STRAPS 0x1559
+#define mmCC_SYS_RB_BACKEND_DISABLE 0x03A0
+#define mmCC_SYS_RB_REDUNDANCY 0x039F
+#define mmCGTT_DRM_CLK_CTRL0 0x1579
+#define mmCP_CONFIG 0x0F92
+#define mmDC_TEST_DEBUG_DATA 0x157D
+#define mmDC_TEST_DEBUG_INDEX 0x157C
+#define mmGC_USER_SYS_RB_BACKEND_DISABLE 0x03A1
+#define mmHDP_ADDR_CONFIG 0x0BD2
+#define mmHDP_DEBUG0 0x0BCC
+#define mmHDP_DEBUG1 0x0BCD
+#define mmHDP_HOST_PATH_CNTL 0x0B00
+#define mmHDP_LAST_SURFACE_HIT 0x0BCE
+#define mmHDP_MEMIO_ADDR 0x0BF7
+#define mmHDP_MEMIO_CNTL 0x0BF6
+#define mmHDP_MEMIO_RD_DATA 0x0BFA
+#define mmHDP_MEMIO_STATUS 0x0BF8
+#define mmHDP_MEMIO_WR_DATA 0x0BF9
+#define mmHDP_MEM_POWER_LS 0x0BD4
+#define mmHDP_MISC_CNTL 0x0BD3
+#define mmHDP_NONSURFACE_BASE 0x0B01
+#define mmHDP_NONSURFACE_INFO 0x0B02
+#define mmHDP_NONSURFACE_PREFETCH 0x0BD5
+#define mmHDP_NONSURFACE_SIZE 0x0B03
+#define mmHDP_NONSURF_FLAGS 0x0BC9
+#define mmHDP_NONSURF_FLAGS_CLR 0x0BCA
+#define mmHDP_OUTSTANDING_REQ 0x0BD1
+#define mmHDP_SC_MULTI_CHIP_CNTL 0x0BD0
+#define mmHDP_SW_SEMAPHORE 0x0BCB
+#define mmHDP_TILING_CONFIG 0x0BCF
+#define mmHDP_XDP_BARS_ADDR_39_36 0x0C44
+#define mmHDP_XDP_BUSY_STS 0x0C3E
+#define mmHDP_XDP_CGTT_BLK_CTRL 0x0C33
+#define mmHDP_XDP_CHKN 0x0C40
+#define mmHDP_XDP_D2H_BAR_UPDATE 0x0C02
+#define mmHDP_XDP_D2H_FLUSH 0x0C01
+#define mmHDP_XDP_D2H_RSVD_10 0x0C0A
+#define mmHDP_XDP_D2H_RSVD_11 0x0C0B
+#define mmHDP_XDP_D2H_RSVD_12 0x0C0C
+#define mmHDP_XDP_D2H_RSVD_13 0x0C0D
+#define mmHDP_XDP_D2H_RSVD_14 0x0C0E
+#define mmHDP_XDP_D2H_RSVD_15 0x0C0F
+#define mmHDP_XDP_D2H_RSVD_16 0x0C10
+#define mmHDP_XDP_D2H_RSVD_17 0x0C11
+#define mmHDP_XDP_D2H_RSVD_18 0x0C12
+#define mmHDP_XDP_D2H_RSVD_19 0x0C13
+#define mmHDP_XDP_D2H_RSVD_20 0x0C14
+#define mmHDP_XDP_D2H_RSVD_21 0x0C15
+#define mmHDP_XDP_D2H_RSVD_22 0x0C16
+#define mmHDP_XDP_D2H_RSVD_23 0x0C17
+#define mmHDP_XDP_D2H_RSVD_24 0x0C18
+#define mmHDP_XDP_D2H_RSVD_25 0x0C19
+#define mmHDP_XDP_D2H_RSVD_26 0x0C1A
+#define mmHDP_XDP_D2H_RSVD_27 0x0C1B
+#define mmHDP_XDP_D2H_RSVD_28 0x0C1C
+#define mmHDP_XDP_D2H_RSVD_29 0x0C1D
+#define mmHDP_XDP_D2H_RSVD_30 0x0C1E
+#define mmHDP_XDP_D2H_RSVD_3 0x0C03
+#define mmHDP_XDP_D2H_RSVD_31 0x0C1F
+#define mmHDP_XDP_D2H_RSVD_32 0x0C20
+#define mmHDP_XDP_D2H_RSVD_33 0x0C21
+#define mmHDP_XDP_D2H_RSVD_34 0x0C22
+#define mmHDP_XDP_D2H_RSVD_4 0x0C04
+#define mmHDP_XDP_D2H_RSVD_5 0x0C05
+#define mmHDP_XDP_D2H_RSVD_6 0x0C06
+#define mmHDP_XDP_D2H_RSVD_7 0x0C07
+#define mmHDP_XDP_D2H_RSVD_8 0x0C08
+#define mmHDP_XDP_D2H_RSVD_9 0x0C09
+#define mmHDP_XDP_DBG_ADDR 0x0C41
+#define mmHDP_XDP_DBG_DATA 0x0C42
+#define mmHDP_XDP_DBG_MASK 0x0C43
+#define mmHDP_XDP_DIRECT2HDP_FIRST 0x0C00
+#define mmHDP_XDP_DIRECT2HDP_LAST 0x0C23
+#define mmHDP_XDP_FLUSH_ARMED_STS 0x0C3C
+#define mmHDP_XDP_FLUSH_CNTR0_STS 0x0C3D
+#define mmHDP_XDP_HDP_IPH_CFG 0x0C31
+#define mmHDP_XDP_HDP_MBX_MC_CFG 0x0C2D
+#define mmHDP_XDP_HDP_MC_CFG 0x0C2E
+#define mmHDP_XDP_HST_CFG 0x0C2F
+#define mmHDP_XDP_P2P_BAR0 0x0C34
+#define mmHDP_XDP_P2P_BAR1 0x0C35
+#define mmHDP_XDP_P2P_BAR2 0x0C36
+#define mmHDP_XDP_P2P_BAR3 0x0C37
+#define mmHDP_XDP_P2P_BAR4 0x0C38
+#define mmHDP_XDP_P2P_BAR5 0x0C39
+#define mmHDP_XDP_P2P_BAR6 0x0C3A
+#define mmHDP_XDP_P2P_BAR7 0x0C3B
+#define mmHDP_XDP_P2P_BAR_CFG 0x0C24
+#define mmHDP_XDP_P2P_MBX_ADDR0 0x0C26
+#define mmHDP_XDP_P2P_MBX_ADDR1 0x0C27
+#define mmHDP_XDP_P2P_MBX_ADDR2 0x0C28
+#define mmHDP_XDP_P2P_MBX_ADDR3 0x0C29
+#define mmHDP_XDP_P2P_MBX_ADDR4 0x0C2A
+#define mmHDP_XDP_P2P_MBX_ADDR5 0x0C2B
+#define mmHDP_XDP_P2P_MBX_ADDR6 0x0C2C
+#define mmHDP_XDP_P2P_MBX_OFFSET 0x0C25
+#define mmHDP_XDP_SID_CFG 0x0C30
+#define mmHDP_XDP_SRBM_CFG 0x0C32
+#define mmHDP_XDP_STICKY 0x0C3F
+#define mmIH_ADVFAULT_CNTL 0x0F8C
+#define mmIH_CNTL 0x0F86
+#define mmIH_LEVEL_STATUS 0x0F87
+#define mmIH_PERFCOUNTER0_RESULT 0x0F8A
+#define mmIH_PERFCOUNTER1_RESULT 0x0F8B
+#define mmIH_PERFMON_CNTL 0x0F89
+#define mmIH_RB_BASE 0x0F81
+#define mmIH_RB_CNTL 0x0F80
+#define mmIH_RB_RPTR 0x0F82
+#define mmIH_RB_WPTR 0x0F83
+#define mmIH_RB_WPTR_ADDR_HI 0x0F84
+#define mmIH_RB_WPTR_ADDR_LO 0x0F85
+#define mmIH_STATUS 0x0F88
+#define mmSEM_MAILBOX 0x0F9B
+#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
+#define mmSEM_MAILBOX_CONTROL 0x0F9C
+#define mmSEM_MCIF_CONFIG 0x0F90
+#define mmSRBM_CAM_DATA 0x0397
+#define mmSRBM_CAM_INDEX 0x0396
+#define mmSRBM_CHIP_REVISION 0x039B
+#define mmSRBM_CNTL 0x0390
+#define mmSRBM_DEBUG 0x03A4
+#define mmSRBM_DEBUG_CNTL 0x0399
+#define mmSRBM_DEBUG_DATA 0x039A
+#define mmSRBM_DEBUG_SNAPSHOT 0x03A5
+#define mmSRBM_GFX_CNTL 0x0391
+#define mmSRBM_INT_ACK 0x03AA
+#define mmSRBM_INT_CNTL 0x03A8
+#define mmSRBM_INT_STATUS 0x03A9
+#define mmSRBM_MC_CLKEN_CNTL 0x03B3
+#define mmSRBM_PERFCOUNTER0_HI 0x0704
+#define mmSRBM_PERFCOUNTER0_LO 0x0703
+#define mmSRBM_PERFCOUNTER0_SELECT 0x0701
+#define mmSRBM_PERFCOUNTER1_HI 0x0706
+#define mmSRBM_PERFCOUNTER1_LO 0x0705
+#define mmSRBM_PERFCOUNTER1_SELECT 0x0702
+#define mmSRBM_PERFMON_CNTL 0x0700
+#define mmSRBM_READ_ERROR 0x03A6
+#define mmSRBM_SOFT_RESET 0x0398
+#define mmSRBM_STATUS 0x0394
+#define mmSRBM_STATUS2 0x0393
+#define mmSRBM_SYS_CLKEN_CNTL 0x03B4
+#define mmSRBM_UVD_CLKEN_CNTL 0x03B6
+#define mmSRBM_VCE_CLKEN_CNTL 0x03B5
+#define mmUVD_CONFIG 0x0F98
+#define mmVCE_CONFIG 0x0F94
+#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
+
+/* from the old sid.h */
+#define mmDMA_TILING_CONFIG 0x342E
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
new file mode 100644
index 000000000000..1c540fe136cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
@@ -0,0 +1,1079 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef OSS_1_0_SH_MASK_H
+#define OSS_1_0_SH_MASK_H
+
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000L
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x0000001c
+#define CC_DRM_ID_STRAPS__DEVICE_ID_MASK 0x000ffff0L
+#define CC_DRM_ID_STRAPS__DEVICE_ID__SHIFT 0x00000004
+#define CC_DRM_ID_STRAPS__MAJOR_REV_ID_MASK 0x00f00000L
+#define CC_DRM_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x00000014
+#define CC_DRM_ID_STRAPS__MINOR_REV_ID_MASK 0x0f000000L
+#define CC_DRM_ID_STRAPS__MINOR_REV_ID__SHIFT 0x00000018
+#define CC_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define CC_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define CLIENT0_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT0_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT0_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT0_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT0_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT0_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT0_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT0_STATUS__RESERVED__SHIFT 0x00000000
+#define CLIENT1_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT1_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT1_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT1_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT1_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT1_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT1_PORT_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT1_PORT_STATUS__RESERVED__SHIFT 0x00000000
+#define CLIENT2_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT2_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT2_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT2_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT2_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT2_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT2_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT2_STATUS__RESERVED__SHIFT 0x00000000
+#define CLIENT3_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT3_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT3_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT3_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT3_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT3_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT3_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT3_STATUS__RESERVED__SHIFT 0x00000000
+#define CP_CONFIG__CP_RDREQ_URG_MASK 0x00000f00L
+#define CP_CONFIG__CP_RDREQ_URG__SHIFT 0x00000008
+#define CP_CONFIG__CP_REQ_TRAN_MASK 0x00010000L
+#define CP_CONFIG__CP_REQ_TRAN__SHIFT 0x00000010
+#define DC_TEST_DEBUG_DATA__DC_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DC_TEST_DEBUG_DATA__DC_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DH_TEST__DH_TEST_MASK 0x00000001L
+#define DH_TEST__DH_TEST__SHIFT 0x00000000
+#define EXP0__RESERVED_MASK 0xffffffffL
+#define EXP0__RESERVED__SHIFT 0x00000000
+#define EXP1__RESERVED_MASK 0xffffffffL
+#define EXP1__RESERVED__SHIFT 0x00000000
+#define EXP2__RESERVED_MASK 0xffffffffL
+#define EXP2__RESERVED__SHIFT 0x00000000
+#define EXP3__RESERVED_MASK 0xffffffffL
+#define EXP3__RESERVED__SHIFT 0x00000000
+#define EXP4__RESERVED_MASK 0xffffffffL
+#define EXP4__RESERVED__SHIFT 0x00000000
+#define EXP5__RESERVED_MASK 0xffffffffL
+#define EXP5__RESERVED__SHIFT 0x00000000
+#define EXP6__RESERVED_MASK 0xffffffffL
+#define EXP6__RESERVED__SHIFT 0x00000000
+#define EXP7__RESERVED_MASK 0xffffffffL
+#define EXP7__RESERVED__SHIFT 0x00000000
+#define GC_USER_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define GC_USER_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define HDP_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define HDP_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define HDP_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define HDP_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define HDP_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define HDP_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define HDP_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define HDP_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define HDP_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HDP_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define HDP_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define HDP_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define HDP_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define HDP_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define HDP_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define HDP_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define HDP_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define HDP_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x00000000
+#define HDP_DEBUG1__HDP_DEBUG__SHIFT 0x00000000
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x0000001d
+#define HDP_HOST_PATH_CNTL__BIF_RDRET_CREDIT_MASK 0x00000007L
+#define HDP_HOST_PATH_CNTL__BIF_RDRET_CREDIT__SHIFT 0x00000000
+#define HDP_HOST_PATH_CNTL__CACHE_INVALIDATE_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__CACHE_INVALIDATE__SHIFT 0x00000016
+#define HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK 0x00800000L
+#define HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS__SHIFT 0x00000017
+#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS_MASK 0x80000000L
+#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS__SHIFT 0x0000001f
+#define HDP_HOST_PATH_CNTL__MC_WRREQ_CREDIT_MASK 0x000001f8L
+#define HDP_HOST_PATH_CNTL__MC_WRREQ_CREDIT__SHIFT 0x00000003
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0x0000000b
+#define HDP_HOST_PATH_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0f000000L
+#define HDP_HOST_PATH_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x00000018
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x00000015
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x00000013
+#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS_MASK 0x40000000L
+#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS__SHIFT 0x0000001e
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x00000009
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x0000003fL
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x00000000
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xffffffffL
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x00000000
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003f00L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x00000008
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003cL
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x00000002
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0x0000000f
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0x0000000e
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x00000001
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x00000007
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x00000000
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x00000006
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xffffffffL
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x00000000
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x00000003
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x00000001
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x00000002
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x00000000
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xffffffffL
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x00000000
+#define HDP_MEM_POWER_LS__LS_ENABLE_MASK 0x00000001L
+#define HDP_MEM_POWER_LS__LS_ENABLE__SHIFT 0x00000000
+#define HDP_MEM_POWER_LS__LS_HOLD_MASK 0x00001f80L
+#define HDP_MEM_POWER_LS__LS_HOLD__SHIFT 0x00000007
+#define HDP_MEM_POWER_LS__LS_SETUP_MASK 0x0000007eL
+#define HDP_MEM_POWER_LS__LS_SETUP__SHIFT 0x00000001
+#define HDP_MISC_CNTL__ADDRLIB_LINEAR_BYPASS_MASK 0x00100000L
+#define HDP_MISC_CNTL__ADDRLIB_LINEAR_BYPASS__SHIFT 0x00000014
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x00000015
+#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK 0x00000001L
+#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE__SHIFT 0x00000000
+#define HDP_MISC_CNTL__HDP_BIF_RDRET_CREDIT_MASK 0x00000780L
+#define HDP_MISC_CNTL__HDP_BIF_RDRET_CREDIT__SHIFT 0x00000007
+#define HDP_MISC_CNTL__MC_RDREQ_CREDIT_MASK 0x0007e000L
+#define HDP_MISC_CNTL__MC_RDREQ_CREDIT__SHIFT 0x0000000d
+#define HDP_MISC_CNTL__MULTIPLE_READS_MASK 0x00000040L
+#define HDP_MISC_CNTL__MULTIPLE_READS__SHIFT 0x00000006
+#define HDP_MISC_CNTL__NO_SPLIT_ARRAY_LINEAR_MASK 0x00001000L
+#define HDP_MISC_CNTL__NO_SPLIT_ARRAY_LINEAR__SHIFT 0x0000000c
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x00000005
+#define HDP_MISC_CNTL__READ_CACHE_INVALIDATE_MASK 0x00080000L
+#define HDP_MISC_CNTL__READ_CACHE_INVALIDATE__SHIFT 0x00000013
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0x0000000b
+#define HDP_MISC_CNTL__VM_ID_MASK 0x0000001eL
+#define HDP_MISC_CNTL__VM_ID__SHIFT 0x00000001
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_MASK 0xffffffffL
+#define HDP_NONSURFACE_BASE__NONSURF_BASE__SHIFT 0x00000000
+#define HDP_NONSURFACE_INFO__NONSURF_ADDR_TYPE_MASK 0x00000001L
+#define HDP_NONSURFACE_INFO__NONSURF_ADDR_TYPE__SHIFT 0x00000000
+#define HDP_NONSURFACE_INFO__NONSURF_ARRAY_MODE_MASK 0x0000001eL
+#define HDP_NONSURFACE_INFO__NONSURF_ARRAY_MODE__SHIFT 0x00000001
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_HEIGHT_MASK 0x03000000L
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_HEIGHT__SHIFT 0x00000018
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_WIDTH_MASK 0x00c00000L
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_WIDTH__SHIFT 0x00000016
+#define HDP_NONSURFACE_INFO__NONSURF_ENDIAN_MASK 0x00000060L
+#define HDP_NONSURFACE_INFO__NONSURF_ENDIAN__SHIFT 0x00000005
+#define HDP_NONSURFACE_INFO__NONSURF_MACRO_TILE_ASPECT_MASK 0x0c000000L
+#define HDP_NONSURFACE_INFO__NONSURF_MACRO_TILE_ASPECT__SHIFT 0x0000001a
+#define HDP_NONSURFACE_INFO__NONSURF_MICRO_TILE_MODE_MASK 0x30000000L
+#define HDP_NONSURFACE_INFO__NONSURF_MICRO_TILE_MODE__SHIFT 0x0000001c
+#define HDP_NONSURFACE_INFO__NONSURF_NUM_BANKS_MASK 0x00300000L
+#define HDP_NONSURFACE_INFO__NONSURF_NUM_BANKS__SHIFT 0x00000014
+#define HDP_NONSURFACE_INFO__NONSURF_PIXEL_SIZE_MASK 0x00000380L
+#define HDP_NONSURFACE_INFO__NONSURF_PIXEL_SIZE__SHIFT 0x00000007
+#define HDP_NONSURFACE_INFO__NONSURF_PRIV_MASK 0x00008000L
+#define HDP_NONSURFACE_INFO__NONSURF_PRIV__SHIFT 0x0000000f
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_NUM_MASK 0x00001c00L
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_NUM__SHIFT 0x0000000a
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_SIZE_MASK 0x00006000L
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_SIZE__SHIFT 0x0000000d
+#define HDP_NONSURFACE_INFO__NONSURF_SLICE_TILE_MAX_MSB_MASK 0x40000000L
+#define HDP_NONSURFACE_INFO__NONSURF_SLICE_TILE_MAX_MSB__SHIFT 0x0000001e
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_COMPACT_MASK 0x00010000L
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_COMPACT__SHIFT 0x00000010
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_SPLIT_MASK 0x000e0000L
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_SPLIT__SHIFT 0x00000011
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PIPE_CONFIG_MASK 0xf8000000L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PIPE_CONFIG__SHIFT 0x0000001b
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_DIR_MASK 0x00000038L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_DIR__SHIFT 0x00000003
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_MAX_Z_MASK 0x000ffe00L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_MAX_Z__SHIFT 0x00000009
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_NUM_MASK 0x000001c0L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_NUM__SHIFT 0x00000006
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_PRI_MASK 0x00000007L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_PRI__SHIFT 0x00000000
+#define HDP_NONSURFACE_SIZE__NONSURF_PITCH_TILE_MAX_MASK 0x000007ffL
+#define HDP_NONSURFACE_SIZE__NONSURF_PITCH_TILE_MAX__SHIFT 0x00000000
+#define HDP_NONSURFACE_SIZE__NONSURF_SLICE_TILE_MAX_MASK 0xfffff800L
+#define HDP_NONSURFACE_SIZE__NONSURF_SLICE_TILE_MAX__SHIFT 0x0000000b
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x00000001
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x00000000
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x00000001
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x00000000
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000ff00L
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x00000008
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000ffL
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x00000000
+#define HDP_SC_MULTI_CHIP_CNTL__LOG2_NUM_CHIPS_MASK 0x00000007L
+#define HDP_SC_MULTI_CHIP_CNTL__LOG2_NUM_CHIPS__SHIFT 0x00000000
+#define HDP_SC_MULTI_CHIP_CNTL__MULTI_CHIP_TILE_SIZE_MASK 0x00000018L
+#define HDP_SC_MULTI_CHIP_CNTL__MULTI_CHIP_TILE_SIZE__SHIFT 0x00000003
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xffffffffL
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x00000000
+#define HDP_TILING_CONFIG__BANK_SWAPS_MASK 0x00003800L
+#define HDP_TILING_CONFIG__BANK_SWAPS__SHIFT 0x0000000b
+#define HDP_TILING_CONFIG__BANK_TILING_MASK 0x00000030L
+#define HDP_TILING_CONFIG__BANK_TILING__SHIFT 0x00000004
+#define HDP_TILING_CONFIG__GROUP_SIZE_MASK 0x000000c0L
+#define HDP_TILING_CONFIG__GROUP_SIZE__SHIFT 0x00000006
+#define HDP_TILING_CONFIG__PIPE_TILING_MASK 0x0000000eL
+#define HDP_TILING_CONFIG__PIPE_TILING__SHIFT 0x00000001
+#define HDP_TILING_CONFIG__ROW_TILING_MASK 0x00000700L
+#define HDP_TILING_CONFIG__ROW_TILING__SHIFT 0x00000008
+#define HDP_TILING_CONFIG__SAMPLE_SPLIT_MASK 0x0000c000L
+#define HDP_TILING_CONFIG__SAMPLE_SPLIT__SHIFT 0x0000000e
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000fL
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x00000000
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000f0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x00000004
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000f00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x00000008
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000f000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0x0000000c
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000f0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x00000010
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00f00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x00000014
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0f000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x00000018
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xf0000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x0000001c
+#define HDP_XDP_BUSY_STS__BUSY_BITS_MASK 0x0003ffffL
+#define HDP_XDP_BUSY_STS__BUSY_BITS__SHIFT 0x00000000
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_0_ON_DELAY_MASK 0x0000000fL
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_0_ON_DELAY__SHIFT 0x00000000
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_1_OFF_DELAY_MASK 0x00000ff0L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_1_OFF_DELAY__SHIFT 0x00000004
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_2_RSVD_MASK 0x3ffff000L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_2_RSVD__SHIFT 0x0000000c
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_3_SOFT_CORE_OVERRIDE_MASK 0x40000000L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_3_SOFT_CORE_OVERRIDE__SHIFT 0x0000001e
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_4_SOFT_REG_OVERRIDE_MASK 0x80000000L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_4_SOFT_REG_OVERRIDE__SHIFT 0x0000001f
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000ffL
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x00000000
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000ff00L
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x00000008
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00ff0000L
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x00000010
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xff000000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x00000018
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000ffffL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x00000000
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x00000014
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000f0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x00000010
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x00000012
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000fL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x00000000
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x00000008
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000f0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x00000004
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x00000013
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x00000014
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x00000010
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_SIDE_MASK 0x00020000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_SIDE__SHIFT 0x00000011
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000f800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0x0000000b
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_DBG_ADDR__CTRL_MASK 0xffff0000L
+#define HDP_XDP_DBG_ADDR__CTRL__SHIFT 0x00000010
+#define HDP_XDP_DBG_ADDR__STS_MASK 0x0000ffffL
+#define HDP_XDP_DBG_ADDR__STS__SHIFT 0x00000000
+#define HDP_XDP_DBG_DATA__CTRL_MASK 0xffff0000L
+#define HDP_XDP_DBG_DATA__CTRL__SHIFT 0x00000010
+#define HDP_XDP_DBG_DATA__STS_MASK 0x0000ffffL
+#define HDP_XDP_DBG_DATA__STS__SHIFT 0x00000000
+#define HDP_XDP_DBG_MASK__CTRL_MASK 0xffff0000L
+#define HDP_XDP_DBG_MASK__CTRL__SHIFT 0x00000010
+#define HDP_XDP_DBG_MASK__STS_MASK 0x0000ffffL
+#define HDP_XDP_DBG_MASK__STS__SHIFT 0x00000000
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xffffffffL
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x00000000
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03ffffffL
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x00000000
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0x0000000c
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0x0000000d
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE_MASK 0x0000003fL
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE__SHIFT 0x00000000
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE_MASK 0x00000fc0L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE__SHIFT 0x00000006
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_PRIV_MASK 0x00000001L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_PRIV__SHIFT 0x00000000
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000006L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x00000001
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_TRAN_MASK 0x00000008L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_TRAN__SHIFT 0x00000003
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x000000f0L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x00000004
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_PRIV_MASK 0x00000001L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_PRIV__SHIFT 0x00000000
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_SWAP_MASK 0x00000006L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_SWAP__SHIFT 0x00000001
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_TRAN_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_TRAN__SHIFT 0x00000003
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_VMID_MASK 0x07800000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_VMID__SHIFT 0x00000017
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_MC_STALL_ON_BUF_FULL_MASK_MASK 0x00700000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_MC_STALL_ON_BUF_FULL_MASK__SHIFT 0x00000014
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_PRIV_MASK 0x00000010L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_PRIV__SHIFT 0x00000004
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_SWAP_MASK 0x00000060L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_SWAP__SHIFT 0x00000005
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_TRAN_MASK 0x00000080L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_TRAN__SHIFT 0x00000007
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_VMID_MASK 0x78000000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_VMID__SHIFT 0x0000001b
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000fc000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0x0000000e
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XL8R_WRREQ_CRD_OVERRIDE_MASK 0x00003f00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XL8R_WRREQ_CRD_OVERRIDE__SHIFT 0x00000008
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x00000000
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x00000001
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000fL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x00000004
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x00003fffL
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x00000000
+#define HDP_XDP_SID_CFG__SID_CFG_FLNUM_MSB_SEL_MASK 0x00000018L
+#define HDP_XDP_SID_CFG__SID_CFG_FLNUM_MSB_SEL__SHIFT 0x00000003
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_EN__SHIFT 0x00000000
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_TIMER__SHIFT 0x00000001
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_ENABLE_COUNT_MASK 0x0000003fL
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_ENABLE_COUNT__SHIFT 0x00000000
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_GATING_DIS_MASK 0x00000040L
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_GATING_DIS__SHIFT 0x00000006
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_WAKE_DYN_CLK_MASK 0x00000080L
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_WAKE_DYN_CLK__SHIFT 0x00000007
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000ffffL
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x00000000
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xffff0000L
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x00000010
+#define HFS_SEED0__RESERVED_MASK 0xffffffffL
+#define HFS_SEED0__RESERVED__SHIFT 0x00000000
+#define HFS_SEED1__RESERVED_MASK 0xffffffffL
+#define HFS_SEED1__RESERVED__SHIFT 0x00000000
+#define HFS_SEED2__RESERVED_MASK 0xffffffffL
+#define HFS_SEED2__RESERVED__SHIFT 0x00000000
+#define HFS_SEED3__RESERVED_MASK 0xffffffffL
+#define HFS_SEED3__RESERVED__SHIFT 0x00000000
+#define IH_ADVFAULT_CNTL__NUM_FAULTS_DROPPED_MASK 0x0000ff00L
+#define IH_ADVFAULT_CNTL__NUM_FAULTS_DROPPED__SHIFT 0x00000008
+#define IH_ADVFAULT_CNTL__WAIT_TIMER_MASK 0x3fff0000L
+#define IH_ADVFAULT_CNTL__WAIT_TIMER__SHIFT 0x00000010
+#define IH_ADVFAULT_CNTL__WATERMARK_ENABLE_MASK 0x00000008L
+#define IH_ADVFAULT_CNTL__WATERMARK_ENABLE__SHIFT 0x00000003
+#define IH_ADVFAULT_CNTL__WATERMARK_MASK 0x00000007L
+#define IH_ADVFAULT_CNTL__WATERMARK_REACHED_MASK 0x00000010L
+#define IH_ADVFAULT_CNTL__WATERMARK_REACHED__SHIFT 0x00000004
+#define IH_ADVFAULT_CNTL__WATERMARK__SHIFT 0x00000000
+#define IH_CNTL__CLIENT_FIFO_HIGHWATER_MASK 0x00000300L
+#define IH_CNTL__CLIENT_FIFO_HIGHWATER__SHIFT 0x00000008
+#define IH_CNTL__ENABLE_INTR_MASK 0x00000001L
+#define IH_CNTL__ENABLE_INTR__SHIFT 0x00000000
+#define IH_CNTL__MC_FIFO_HIGHWATER_MASK 0x00007c00L
+#define IH_CNTL__MC_FIFO_HIGHWATER__SHIFT 0x0000000a
+#define IH_CNTL__MC_SWAP_MASK 0x00000006L
+#define IH_CNTL__MC_SWAP__SHIFT 0x00000001
+#define IH_CNTL__MC_TRAN_MASK 0x00000008L
+#define IH_CNTL__MC_TRAN__SHIFT 0x00000003
+#define IH_CNTL__MC_VMID_MASK 0x1e000000L
+#define IH_CNTL__MC_VMID__SHIFT 0x00000019
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01f00000L
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x00000014
+#define IH_CNTL__MC_WRREQ_CREDIT_MASK 0x000f8000L
+#define IH_CNTL__MC_WRREQ_CREDIT__SHIFT 0x0000000f
+#define IH_CNTL__RPTR_REARM_MASK 0x00000010L
+#define IH_CNTL__RPTR_REARM__SHIFT 0x00000004
+#define IH_LEVEL_STATUS__BIF_STATUS_MASK 0x00000010L
+#define IH_LEVEL_STATUS__BIF_STATUS__SHIFT 0x00000004
+#define IH_LEVEL_STATUS__DC_STATUS_MASK 0x00000001L
+#define IH_LEVEL_STATUS__DC_STATUS__SHIFT 0x00000000
+#define IH_LEVEL_STATUS__ROM_STATUS_MASK 0x00000004L
+#define IH_LEVEL_STATUS__ROM_STATUS__SHIFT 0x00000002
+#define IH_LEVEL_STATUS__SRBM_STATUS_MASK 0x00000008L
+#define IH_LEVEL_STATUS__SRBM_STATUS__SHIFT 0x00000003
+#define IH_LEVEL_STATUS__XDMA_STATUS_MASK 0x00000020L
+#define IH_LEVEL_STATUS__XDMA_STATUS__SHIFT 0x00000005
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xffffffffL
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x00000000
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xffffffffL
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x00000000
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x00000001
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00000200L
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x00000009
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x00000000
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00000100L
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x00000008
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x000000fcL
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x00000002
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0000fc00L
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x0000000a
+#define IH_RB_BASE__ADDR_MASK 0xffffffffL
+#define IH_RB_BASE__ADDR__SHIFT 0x00000000
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x00000000
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000040L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x00000006
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT 0x00000007
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003eL
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x00000001
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x0000001f
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x00000010
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x00000008
+#define IH_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003e00L
+#define IH_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x00000009
+#define IH_RB_RPTR__OFFSET_MASK 0x0003fffcL
+#define IH_RB_RPTR__OFFSET__SHIFT 0x00000002
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000ffL
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x00000000
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xfffffffcL
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x00000002
+#define IH_RB_WPTR__OFFSET_MASK 0x0003fffcL
+#define IH_RB_WPTR__OFFSET__SHIFT 0x00000002
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x00000000
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0x0000000a
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__IDLE__SHIFT 0x00000000
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x00000001
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x00000008
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x00000009
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x00000006
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x00000007
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x00000004
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL__SHIFT 0x00000003
+#define IH_STATUS__RB_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_IDLE__SHIFT 0x00000002
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x00000005
+#define KEFUSE0__RESERVED_MASK 0xffffffffL
+#define KEFUSE0__RESERVED__SHIFT 0x00000000
+#define KEFUSE1__RESERVED_MASK 0xffffffffL
+#define KEFUSE1__RESERVED__SHIFT 0x00000000
+#define KEFUSE2__RESERVED_MASK 0xffffffffL
+#define KEFUSE2__RESERVED__SHIFT 0x00000000
+#define KEFUSE3__RESERVED_MASK 0xffffffffL
+#define KEFUSE3__RESERVED__SHIFT 0x00000000
+#define KHFS0__RESERVED_MASK 0xffffffffL
+#define KHFS0__RESERVED__SHIFT 0x00000000
+#define KHFS1__RESERVED_MASK 0xffffffffL
+#define KHFS1__RESERVED__SHIFT 0x00000000
+#define KHFS2__RESERVED_MASK 0xffffffffL
+#define KHFS2__RESERVED__SHIFT 0x00000000
+#define KHFS3__RESERVED_MASK 0xffffffffL
+#define KHFS3__RESERVED__SHIFT 0x00000000
+#define KSESSION0__RESERVED_MASK 0xffffffffL
+#define KSESSION0__RESERVED__SHIFT 0x00000000
+#define KSESSION1__RESERVED_MASK 0xffffffffL
+#define KSESSION1__RESERVED__SHIFT 0x00000000
+#define KSESSION2__RESERVED_MASK 0xffffffffL
+#define KSESSION2__RESERVED__SHIFT 0x00000000
+#define KSESSION3__RESERVED_MASK 0xffffffffL
+#define KSESSION3__RESERVED__SHIFT 0x00000000
+#define KSIG0__RESERVED_MASK 0xffffffffL
+#define KSIG0__RESERVED__SHIFT 0x00000000
+#define KSIG1__RESERVED_MASK 0xffffffffL
+#define KSIG1__RESERVED__SHIFT 0x00000000
+#define KSIG2__RESERVED_MASK 0xffffffffL
+#define KSIG2__RESERVED__SHIFT 0x00000000
+#define KSIG3__RESERVED_MASK 0xffffffffL
+#define KSIG3__RESERVED__SHIFT 0x00000000
+#define LX0__RESERVED_MASK 0xffffffffL
+#define LX0__RESERVED__SHIFT 0x00000000
+#define LX1__RESERVED_MASK 0xffffffffL
+#define LX1__RESERVED__SHIFT 0x00000000
+#define LX2__RESERVED_MASK 0xffffffffL
+#define LX2__RESERVED__SHIFT 0x00000000
+#define LX3__RESERVED_MASK 0xffffffffL
+#define LX3__RESERVED__SHIFT 0x00000000
+#define RINGOSC_MASK__MASK_MASK 0x0000ffffL
+#define RINGOSC_MASK__MASK__SHIFT 0x00000000
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1__SHIFT 0x00000003
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2_MASK 0x000001c0L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2__SHIFT 0x00000006
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3_MASK 0x00000e00L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3__SHIFT 0x00000009
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0_MASK 0x00038000L
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0__SHIFT 0x0000000f
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0_MASK 0x00e00000L
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0__SHIFT 0x00000015
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE_MASK 0x0000ff00L
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE__SHIFT 0x00000008
+#define SEM_MAILBOX_CONTROL__SIDEPORT_ENABLE_MASK 0x000000ffL
+#define SEM_MAILBOX_CONTROL__SIDEPORT_ENABLE__SHIFT 0x00000000
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000ff00L
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x00000008
+#define SEM_MAILBOX__SIDEPORT_MASK 0x000000ffL
+#define SEM_MAILBOX__SIDEPORT__SHIFT 0x00000000
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP_MASK 0x00000003L
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP__SHIFT 0x00000000
+#define SPU_PORT_STATUS__RESERVED_MASK 0xffffffffL
+#define SPU_PORT_STATUS__RESERVED__SHIFT 0x00000000
+#define SRBM_CAM_DATA__CAM_ADDR_MASK 0x0000ffffL
+#define SRBM_CAM_DATA__CAM_ADDR__SHIFT 0x00000000
+#define SRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xffff0000L
+#define SRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x00000010
+#define SRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+#define SRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x00000000
+#define SRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000ffL
+#define SRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x00000000
+#define SRBM_CNTL__COMBINE_SYSTEM_MC_MASK 0x00020000L
+#define SRBM_CNTL__COMBINE_SYSTEM_MC__SHIFT 0x00000011
+#define SRBM_CNTL__PWR_REQUEST_HALT_MASK 0x00010000L
+#define SRBM_CNTL__PWR_REQUEST_HALT__SHIFT 0x00000010
+#define SRBM_CNTL__READ_TIMEOUT_MASK 0x000003ffL
+#define SRBM_CNTL__READ_TIMEOUT__SHIFT 0x00000000
+#define SRBM_DEBUG_CNTL__SRBM_DEBUG_INDEX_MASK 0x0000003fL
+#define SRBM_DEBUG_CNTL__SRBM_DEBUG_INDEX__SHIFT 0x00000000
+#define SRBM_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define SRBM_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define SRBM_DEBUG__DISABLE_READ_TIMEOUT_MASK 0x00000002L
+#define SRBM_DEBUG__DISABLE_READ_TIMEOUT__SHIFT 0x00000001
+#define SRBM_DEBUG__IGNORE_RDY_MASK 0x00000001L
+#define SRBM_DEBUG__IGNORE_RDY__SHIFT 0x00000000
+#define SRBM_DEBUG__MC_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000100L
+#define SRBM_DEBUG__MC_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000008
+#define SRBM_DEBUG_SNAPSHOT__BIF_RDY_MASK 0x00000080L
+#define SRBM_DEBUG_SNAPSHOT__BIF_RDY__SHIFT 0x00000007
+#define SRBM_DEBUG_SNAPSHOT__DC_RDY_MASK 0x00000040L
+#define SRBM_DEBUG_SNAPSHOT__DC_RDY__SHIFT 0x00000006
+#define SRBM_DEBUG__SNAPSHOT_FREE_CNTRS_MASK 0x00000004L
+#define SRBM_DEBUG__SNAPSHOT_FREE_CNTRS__SHIFT 0x00000002
+#define SRBM_DEBUG_SNAPSHOT__GRBM_RDY_MASK 0x00000020L
+#define SRBM_DEBUG_SNAPSHOT__GRBM_RDY__SHIFT 0x00000005
+#define SRBM_DEBUG_SNAPSHOT__MCB_RDY_MASK 0x00000001L
+#define SRBM_DEBUG_SNAPSHOT__MCB_RDY__SHIFT 0x00000000
+#define SRBM_DEBUG_SNAPSHOT__MCC0_RDY_MASK 0x10000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC0_RDY__SHIFT 0x0000001c
+#define SRBM_DEBUG_SNAPSHOT__MCC1_RDY_MASK 0x08000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC1_RDY__SHIFT 0x0000001b
+#define SRBM_DEBUG_SNAPSHOT__MCC2_RDY_MASK 0x04000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC2_RDY__SHIFT 0x0000001a
+#define SRBM_DEBUG_SNAPSHOT__MCC3_RDY_MASK 0x02000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC3_RDY__SHIFT 0x00000019
+#define SRBM_DEBUG_SNAPSHOT__MCC4_RDY_MASK 0x01000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC4_RDY__SHIFT 0x00000018
+#define SRBM_DEBUG_SNAPSHOT__MCC5_RDY_MASK 0x00800000L
+#define SRBM_DEBUG_SNAPSHOT__MCC5_RDY__SHIFT 0x00000017
+#define SRBM_DEBUG_SNAPSHOT__MCC6_RDY_MASK 0x00400000L
+#define SRBM_DEBUG_SNAPSHOT__MCC6_RDY__SHIFT 0x00000016
+#define SRBM_DEBUG_SNAPSHOT__MCC7_RDY_MASK 0x00200000L
+#define SRBM_DEBUG_SNAPSHOT__MCC7_RDY__SHIFT 0x00000015
+#define SRBM_DEBUG_SNAPSHOT__MCD0_RDY_MASK 0x00100000L
+#define SRBM_DEBUG_SNAPSHOT__MCD0_RDY__SHIFT 0x00000014
+#define SRBM_DEBUG_SNAPSHOT__MCD1_RDY_MASK 0x00080000L
+#define SRBM_DEBUG_SNAPSHOT__MCD1_RDY__SHIFT 0x00000013
+#define SRBM_DEBUG_SNAPSHOT__MCD2_RDY_MASK 0x00040000L
+#define SRBM_DEBUG_SNAPSHOT__MCD2_RDY__SHIFT 0x00000012
+#define SRBM_DEBUG_SNAPSHOT__MCD3_RDY_MASK 0x00020000L
+#define SRBM_DEBUG_SNAPSHOT__MCD3_RDY__SHIFT 0x00000011
+#define SRBM_DEBUG_SNAPSHOT__MCD4_RDY_MASK 0x00010000L
+#define SRBM_DEBUG_SNAPSHOT__MCD4_RDY__SHIFT 0x00000010
+#define SRBM_DEBUG_SNAPSHOT__MCD5_RDY_MASK 0x00008000L
+#define SRBM_DEBUG_SNAPSHOT__MCD5_RDY__SHIFT 0x0000000f
+#define SRBM_DEBUG_SNAPSHOT__MCD6_RDY_MASK 0x00004000L
+#define SRBM_DEBUG_SNAPSHOT__MCD6_RDY__SHIFT 0x0000000e
+#define SRBM_DEBUG_SNAPSHOT__MCD7_RDY_MASK 0x00002000L
+#define SRBM_DEBUG_SNAPSHOT__MCD7_RDY__SHIFT 0x0000000d
+#define SRBM_DEBUG_SNAPSHOT__ORB_RDY_MASK 0x00001000L
+#define SRBM_DEBUG_SNAPSHOT__ORB_RDY__SHIFT 0x0000000c
+#define SRBM_DEBUG_SNAPSHOT__REGBB_RDY_MASK 0x00000800L
+#define SRBM_DEBUG_SNAPSHOT__REGBB_RDY__SHIFT 0x0000000b
+#define SRBM_DEBUG_SNAPSHOT__UVD_RDY_MASK 0x00000200L
+#define SRBM_DEBUG_SNAPSHOT__UVD_RDY__SHIFT 0x00000009
+#define SRBM_DEBUG_SNAPSHOT__VCE_RDY_MASK 0x20000000L
+#define SRBM_DEBUG_SNAPSHOT__VCE_RDY__SHIFT 0x0000001d
+#define SRBM_DEBUG_SNAPSHOT__XDMA_RDY_MASK 0x00000100L
+#define SRBM_DEBUG_SNAPSHOT__XDMA_RDY__SHIFT 0x00000008
+#define SRBM_DEBUG_SNAPSHOT__XSP_RDY_MASK 0x00000400L
+#define SRBM_DEBUG_SNAPSHOT__XSP_RDY__SHIFT 0x0000000a
+#define SRBM_DEBUG__SYS_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000010L
+#define SRBM_DEBUG__SYS_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000004
+#define SRBM_DEBUG__UVD_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000040L
+#define SRBM_DEBUG__UVD_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000006
+#define SRBM_DEBUG__VCE_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000020L
+#define SRBM_DEBUG__VCE_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000005
+#define SRBM_GFX_CNTL__VMID_MASK 0x000000f0L
+#define SRBM_GFX_CNTL__VMID__SHIFT 0x00000004
+#define SRBM_INT_ACK__RDERR_INT_ACK_MASK 0x00000001L
+#define SRBM_INT_ACK__RDERR_INT_ACK__SHIFT 0x00000000
+#define SRBM_INT_CNTL__RDERR_INT_MASK_MASK 0x00000001L
+#define SRBM_INT_CNTL__RDERR_INT_MASK__SHIFT 0x00000000
+#define SRBM_INT_STATUS__RDERR_INT_STAT_MASK 0x00000001L
+#define SRBM_INT_STATUS__RDERR_INT_STAT__SHIFT 0x00000000
+#define SRBM_MC_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_MC_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_MC_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_MC_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER0_HI__PERF_COUNT0_HI_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER0_HI__PERF_COUNT0_HI__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER0_LO__PERF_COUNT0_LO_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER0_LO__PERF_COUNT0_LO__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003fL
+#define SRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER1_HI__PERF_COUNT1_HI_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER1_HI__PERF_COUNT1_HI__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER1_LO__PERF_COUNT1_LO_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER1_LO__PERF_COUNT1_LO__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003fL
+#define SRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SRBM_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define SRBM_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x00000008
+#define SRBM_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+#define SRBM_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0x0000000a
+#define SRBM_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000fL
+#define SRBM_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x00000000
+#define SRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003fffcL
+#define SRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x00000002
+#define SRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+#define SRBM_READ_ERROR__READ_ERROR__SHIFT 0x0000001f
+#define SRBM_READ_ERROR__READ_REQUESTER_GRBM_MASK 0x02000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_GRBM__SHIFT 0x00000019
+#define SRBM_READ_ERROR__READ_REQUESTER_HI_MASK 0x01000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_HI__SHIFT 0x00000018
+#define SRBM_READ_ERROR__READ_REQUESTER_SMU_MASK 0x04000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_SMU__SHIFT 0x0000001a
+#define SRBM_READ_ERROR__READ_REQUESTER_TST_MASK 0x00400000L
+#define SRBM_READ_ERROR__READ_REQUESTER_TST__SHIFT 0x00000016
+#define SRBM_READ_ERROR__READ_REQUESTER_UVD_MASK 0x20000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_UVD__SHIFT 0x0000001d
+#define SRBM_READ_ERROR__READ_REQUESTER_VCE_MASK 0x00100000L
+#define SRBM_READ_ERROR__READ_REQUESTER_VCE__SHIFT 0x00000014
+#define SRBM_SOFT_RESET__SOFT_RESET_BIF_MASK 0x00000002L
+#define SRBM_SOFT_RESET__SOFT_RESET_BIF__SHIFT 0x00000001
+#define SRBM_SOFT_RESET__SOFT_RESET_DC_MASK 0x00000020L
+#define SRBM_SOFT_RESET__SOFT_RESET_DC__SHIFT 0x00000005
+#define SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK 0x00000100L
+#define SRBM_SOFT_RESET__SOFT_RESET_GRBM__SHIFT 0x00000008
+#define SRBM_SOFT_RESET__SOFT_RESET_HDP_MASK 0x00000200L
+#define SRBM_SOFT_RESET__SOFT_RESET_HDP__SHIFT 0x00000009
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK 0x00000400L
+#define SRBM_SOFT_RESET__SOFT_RESET_IH__SHIFT 0x0000000a
+#define SRBM_SOFT_RESET__SOFT_RESET_MC_MASK 0x00000800L
+#define SRBM_SOFT_RESET__SOFT_RESET_MC__SHIFT 0x0000000b
+#define SRBM_SOFT_RESET__SOFT_RESET_ORB_MASK 0x00800000L
+#define SRBM_SOFT_RESET__SOFT_RESET_ORB__SHIFT 0x00000017
+#define SRBM_SOFT_RESET__SOFT_RESET_REGBB_MASK 0x00400000L
+#define SRBM_SOFT_RESET__SOFT_RESET_REGBB__SHIFT 0x00000016
+#define SRBM_SOFT_RESET__SOFT_RESET_ROM_MASK 0x00004000L
+#define SRBM_SOFT_RESET__SOFT_RESET_ROM__SHIFT 0x0000000e
+#define SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK 0x00008000L
+#define SRBM_SOFT_RESET__SOFT_RESET_SEM__SHIFT 0x0000000f
+#define SRBM_SOFT_RESET__SOFT_RESET_TST_MASK 0x00200000L
+#define SRBM_SOFT_RESET__SOFT_RESET_TST__SHIFT 0x00000015
+#define SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK 0x00040000L
+#define SRBM_SOFT_RESET__SOFT_RESET_UVD__SHIFT 0x00000012
+#define SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK 0x01000000L
+#define SRBM_SOFT_RESET__SOFT_RESET_VCE__SHIFT 0x00000018
+#define SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK 0x00020000L
+#define SRBM_SOFT_RESET__SOFT_RESET_VMC__SHIFT 0x00000011
+#define SRBM_SOFT_RESET__SOFT_RESET_XDMA_MASK 0x02000000L
+#define SRBM_SOFT_RESET__SOFT_RESET_XDMA__SHIFT 0x00000019
+#define SRBM_SOFT_RESET__SOFT_RESET_XSP_MASK 0x00080000L
+#define SRBM_SOFT_RESET__SOFT_RESET_XSP__SHIFT 0x00000013
+#define SRBM_STATUS2__TST_RQ_PENDING_MASK 0x00000002L
+#define SRBM_STATUS2__TST_RQ_PENDING__SHIFT 0x00000001
+#define SRBM_STATUS2__VCE_BUSY_MASK 0x00000080L
+#define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007
+#define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L
+#define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003
+#define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L
+#define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008
+#define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L
+#define SRBM_STATUS2__XSP_BUSY__SHIFT 0x00000004
+#define SRBM_STATUS__BIF_BUSY_MASK 0x20000000L
+#define SRBM_STATUS__BIF_BUSY__SHIFT 0x0000001d
+#define SRBM_STATUS__GRBM_RQ_PENDING_MASK 0x00000020L
+#define SRBM_STATUS__GRBM_RQ_PENDING__SHIFT 0x00000005
+#define SRBM_STATUS__HI_RQ_PENDING_MASK 0x00000040L
+#define SRBM_STATUS__HI_RQ_PENDING__SHIFT 0x00000006
+#define SRBM_STATUS__IH_BUSY_MASK 0x00020000L
+#define SRBM_STATUS__IH_BUSY__SHIFT 0x00000011
+#define SRBM_STATUS__IO_EXTERN_SIGNAL_MASK 0x00000080L
+#define SRBM_STATUS__IO_EXTERN_SIGNAL__SHIFT 0x00000007
+#define SRBM_STATUS__MCB_BUSY_MASK 0x00000200L
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x00000009
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x00000400L
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0x0000000a
+#define SRBM_STATUS__MCC_BUSY_MASK 0x00000800L
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0x0000000b
+#define SRBM_STATUS__MCD_BUSY_MASK 0x00001000L
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0x0000000c
+#define SRBM_STATUS__SEM_BUSY_MASK 0x00004000L
+#define SRBM_STATUS__SEM_BUSY__SHIFT 0x0000000e
+#define SRBM_STATUS__SMU_RQ_PENDING_MASK 0x00000010L
+#define SRBM_STATUS__SMU_RQ_PENDING__SHIFT 0x00000004
+#define SRBM_STATUS__UVD_BUSY_MASK 0x00080000L
+#define SRBM_STATUS__UVD_BUSY__SHIFT 0x00000013
+#define SRBM_STATUS__UVD_RQ_PENDING_MASK 0x00000002L
+#define SRBM_STATUS__UVD_RQ_PENDING__SHIFT 0x00000001
+#define SRBM_STATUS__VMC_BUSY_MASK 0x00000100L
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x00000008
+#define SRBM_SYS_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_SYS_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_SYS_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_SYS_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define SRBM_UVD_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_UVD_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_UVD_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_UVD_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define SRBM_VCE_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_VCE_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_VCE_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_VCE_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define UVD_CONFIG__UVD_RDREQ_URG_MASK 0x00000f00L
+#define UVD_CONFIG__UVD_RDREQ_URG__SHIFT 0x00000008
+#define UVD_CONFIG__UVD_REQ_TRAN_MASK 0x00010000L
+#define UVD_CONFIG__UVD_REQ_TRAN__SHIFT 0x00000010
+#define VCE_CONFIG__VCE_RDREQ_URG_MASK 0x00000f00L
+#define VCE_CONFIG__VCE_RDREQ_URG__SHIFT 0x00000008
+#define VCE_CONFIG__VCE_REQ_TRAN_MASK 0x00010000L
+#define VCE_CONFIG__VCE_REQ_TRAN__SHIFT 0x00000010
+#define XDMA_MSTR_CNTL__XDMA_MSTR_LAT_TEST_EN_MASK 0x00080000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_LAT_TEST_EN__SHIFT 0x00000013
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT_ENABLE_MASK 0x80000000L
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT_ENABLE__SHIFT 0x0000001f
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT_MASK 0x0000ffffL
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_THRESHOLD_MASK 0x3fff0000L
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_THRESHOLD__SHIFT 0x00000010
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
new file mode 100644
index 000000000000..6b10be61efc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
@@ -0,0 +1,148 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_6_0_D_H
+#define SMU_6_0_D_H
+
+#define ixLCAC_MC0_CNTL 0x011C
+#define ixLCAC_MC0_OVR_SEL 0x011D
+#define ixLCAC_MC0_OVR_VAL 0x011E
+#define ixLCAC_MC1_CNTL 0x011F
+#define ixLCAC_MC1_OVR_SEL 0x0120
+#define ixLCAC_MC1_OVR_VAL 0x0121
+#define ixLCAC_MC2_CNTL 0x0122
+#define ixLCAC_MC2_OVR_SEL 0x0123
+#define ixLCAC_MC2_OVR_VAL 0x0124
+#define ixLCAC_MC3_CNTL 0x0125
+#define ixLCAC_MC3_OVR_SEL 0x0126
+#define ixLCAC_MC3_OVR_VAL 0x0127
+#define ixLCAC_MC4_CNTL 0x0128
+#define ixLCAC_MC4_OVR_SEL 0x0129
+#define ixLCAC_MC4_OVR_VAL 0x012A
+#define ixLCAC_MC5_CNTL 0x012B
+#define ixLCAC_MC5_OVR_SEL 0x012C
+#define ixLCAC_MC5_OVR_VAL 0x012D
+#define ixSMC_PC_C 0x80000370
+#define ixTHM_TMON0_DEBUG 0x03F0
+#define ixTHM_TMON0_INT_DATA 0x0380
+#define ixTHM_TMON0_RDIL0_DATA 0x0300
+#define ixTHM_TMON0_RDIL10_DATA 0x030A
+#define ixTHM_TMON0_RDIL11_DATA 0x030B
+#define ixTHM_TMON0_RDIL12_DATA 0x030C
+#define ixTHM_TMON0_RDIL13_DATA 0x030D
+#define ixTHM_TMON0_RDIL14_DATA 0x030E
+#define ixTHM_TMON0_RDIL15_DATA 0x030F
+#define ixTHM_TMON0_RDIL1_DATA 0x0301
+#define ixTHM_TMON0_RDIL2_DATA 0x0302
+#define ixTHM_TMON0_RDIL3_DATA 0x0303
+#define ixTHM_TMON0_RDIL4_DATA 0x0304
+#define ixTHM_TMON0_RDIL5_DATA 0x0305
+#define ixTHM_TMON0_RDIL6_DATA 0x0306
+#define ixTHM_TMON0_RDIL7_DATA 0x0307
+#define ixTHM_TMON0_RDIL8_DATA 0x0308
+#define ixTHM_TMON0_RDIL9_DATA 0x0309
+#define ixTHM_TMON0_RDIR0_DATA 0x0310
+#define ixTHM_TMON0_RDIR10_DATA 0x031A
+#define ixTHM_TMON0_RDIR11_DATA 0x031B
+#define ixTHM_TMON0_RDIR12_DATA 0x031C
+#define ixTHM_TMON0_RDIR13_DATA 0x031D
+#define ixTHM_TMON0_RDIR14_DATA 0x031E
+#define ixTHM_TMON0_RDIR15_DATA 0x031F
+#define ixTHM_TMON0_RDIR1_DATA 0x0311
+#define ixTHM_TMON0_RDIR2_DATA 0x0312
+#define ixTHM_TMON0_RDIR3_DATA 0x0313
+#define ixTHM_TMON0_RDIR4_DATA 0x0314
+#define ixTHM_TMON0_RDIR5_DATA 0x0315
+#define ixTHM_TMON0_RDIR6_DATA 0x0316
+#define ixTHM_TMON0_RDIR7_DATA 0x0317
+#define ixTHM_TMON0_RDIR8_DATA 0x0318
+#define ixTHM_TMON0_RDIR9_DATA 0x0319
+#define ixTHM_TMON1_DEBUG 0x03F1
+#define ixTHM_TMON1_INT_DATA 0x0381
+#define ixTHM_TMON1_RDIL0_DATA 0x0320
+#define ixTHM_TMON1_RDIL10_DATA 0x032A
+#define ixTHM_TMON1_RDIL11_DATA 0x032B
+#define ixTHM_TMON1_RDIL12_DATA 0x032C
+#define ixTHM_TMON1_RDIL13_DATA 0x032D
+#define ixTHM_TMON1_RDIL14_DATA 0x032E
+#define ixTHM_TMON1_RDIL15_DATA 0x032F
+#define ixTHM_TMON1_RDIL1_DATA 0x0321
+#define ixTHM_TMON1_RDIL2_DATA 0x0322
+#define ixTHM_TMON1_RDIL3_DATA 0x0323
+#define ixTHM_TMON1_RDIL4_DATA 0x0324
+#define ixTHM_TMON1_RDIL5_DATA 0x0325
+#define ixTHM_TMON1_RDIL6_DATA 0x0326
+#define ixTHM_TMON1_RDIL7_DATA 0x0327
+#define ixTHM_TMON1_RDIL8_DATA 0x0328
+#define ixTHM_TMON1_RDIL9_DATA 0x0329
+#define ixTHM_TMON1_RDIR0_DATA 0x0330
+#define ixTHM_TMON1_RDIR10_DATA 0x033A
+#define ixTHM_TMON1_RDIR11_DATA 0x033B
+#define ixTHM_TMON1_RDIR12_DATA 0x033C
+#define ixTHM_TMON1_RDIR13_DATA 0x033D
+#define ixTHM_TMON1_RDIR14_DATA 0x033E
+#define ixTHM_TMON1_RDIR15_DATA 0x033F
+#define ixTHM_TMON1_RDIR1_DATA 0x0331
+#define ixTHM_TMON1_RDIR2_DATA 0x0332
+#define ixTHM_TMON1_RDIR3_DATA 0x0333
+#define ixTHM_TMON1_RDIR4_DATA 0x0334
+#define ixTHM_TMON1_RDIR5_DATA 0x0335
+#define ixTHM_TMON1_RDIR6_DATA 0x0336
+#define ixTHM_TMON1_RDIR7_DATA 0x0337
+#define ixTHM_TMON1_RDIR8_DATA 0x0338
+#define ixTHM_TMON1_RDIR9_DATA 0x0339
+#define mmGPIOPAD_A 0x05E7
+#define mmGPIOPAD_EN 0x05E8
+#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
+#define mmGPIOPAD_INT_EN 0x05EE
+#define mmGPIOPAD_INT_POLARITY 0x05F0
+#define mmGPIOPAD_INT_STAT 0x05EC
+#define mmGPIOPAD_INT_STAT_AK 0x05ED
+#define mmGPIOPAD_INT_STAT_EN 0x05EB
+#define mmGPIOPAD_INT_TYPE 0x05EF
+#define mmGPIOPAD_MASK 0x05E6
+#define mmGPIOPAD_PD_EN 0x05F4
+#define mmGPIOPAD_PINSTRAPS 0x05EA
+#define mmGPIOPAD_PU_EN 0x05F3
+#define mmGPIOPAD_RCVR_SEL 0x05F2
+#define mmGPIOPAD_STRENGTH 0x05E5
+#define mmGPIOPAD_SW_INT_STAT 0x05E4
+#define mmGPIOPAD_Y 0x05E9
+#define mmSMC_IND_ACCESS_CNTL 0x008A
+#define mmSMC_IND_DATA_0 0x0081
+#define mmSMC_IND_DATA 0x0081
+#define mmSMC_IND_DATA_1 0x0083
+#define mmSMC_IND_DATA_2 0x0085
+#define mmSMC_IND_DATA_3 0x0087
+#define mmSMC_IND_INDEX_0 0x0080
+#define mmSMC_IND_INDEX 0x0080
+#define mmSMC_IND_INDEX_1 0x0082
+#define mmSMC_IND_INDEX_2 0x0084
+#define mmSMC_IND_INDEX_3 0x0086
+#define mmSMC_MESSAGE_0 0x008B
+#define mmSMC_MESSAGE_1 0x008D
+#define mmSMC_MESSAGE_2 0x008F
+#define mmSMC_RESP_0 0x008C
+#define mmSMC_RESP_1 0x008E
+#define mmSMC_RESP_2 0x0090
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
new file mode 100644
index 000000000000..7d3925b7266e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
@@ -0,0 +1,715 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_6_0_SH_MASK_H
+#define SMU_6_0_SH_MASK_H
+
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
+#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
+#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
+#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
+#define GPIOPAD_EN__GPIO_EN__SHIFT 0x00000000
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x00000020L
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x00000005
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x00000040L
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x00000006
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x0000001fL
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x00000000
+#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffffL
+#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x00000000
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x0000001f
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffffL
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x00000000
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x0000001f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x00000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0x0000000a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0x0000000b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0x0000000c
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0x0000000d
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0x0000000e
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0x0000000f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x00000010
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x00000011
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x00000012
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x00000013
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x00000001
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x00000014
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x00000015
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x00000016
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x00000017
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x00000018
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x00000019
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x0000001a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x0000001b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x0000001c
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x00000002
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x00000003
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x00000004
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x00000005
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x00000006
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x00000007
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x00000008
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x00000009
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x0000001f
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffffL
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x00000000
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x0000001f
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffffL
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x00000000
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x0000001f
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffffL
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x00000000
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x0000001f
+#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffffL
+#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x00000000
+#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffffL
+#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x00000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x00000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0x0000000a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0x0000000b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0x0000000c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0x0000000d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0x0000000e
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0x0000000f
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x00000010
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x00000011
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x00000012
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x00000013
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x00000001
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x00000014
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x00000015
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x00000016
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x00000017
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x00000018
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x00000019
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x0000001a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x0000001b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x0000001c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x0000001d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x00000002
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x0000001e
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x00000003
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x00000004
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x00000005
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x00000006
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x00000007
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x00000008
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x00000009
+#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffffL
+#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x00000000
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffffL
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x00000000
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0x0000000fL
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x00000000
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0x000000f0L
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x00000004
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
+#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
+#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
+#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
+#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
+#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x00000001L
+#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x00000000
+#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x00000001L
+#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x00000000
+#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x00000001L
+#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x00000000
+#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x00000001L
+#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x00000000
+#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x00000001L
+#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x00000000
+#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x00000008
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x00010000L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x00000010
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x01000000L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x00000018
+#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffffffffL
+#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x00000000
+#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffffffffL
+#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x00000000
+#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffffffffL
+#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x00000000
+#define SMC_PC_C__smc_pc_c_MASK 0xffffffffL
+#define SMC_PC_C__smc_pc_c__SHIFT 0x00000000
+#define SMC_RESP_0__SMC_RESP_MASK 0xffffffffL
+#define SMC_RESP_0__SMC_RESP__SHIFT 0x00000000
+#define SMC_RESP_1__SMC_RESP_MASK 0xffffffffL
+#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
+#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
+#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
+#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
+#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x00000004
+#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x00000008L
+#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
+#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
+#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x00000000
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0x00000c00L
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0x0000000a
+#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x00000004L
+#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
+#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
+#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
+#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
+#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x00000005
+#define THM_TMON0_INT_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_INT_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_INT_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_INT_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_INT_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_INT_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x0000001fL
+#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x00000000
+#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
+#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x00000005
+#define THM_TMON1_INT_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_INT_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_INT_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_INT_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_INT_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_INT_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index 3014d4a58c43..a9ef1562f43b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -176,6 +176,8 @@
#define mmSMU1_SMU_SMC_IND_DATA 0x83
#define mmSMU2_SMU_SMC_IND_DATA 0x85
#define mmSMU3_SMU_SMC_IND_DATA 0x87
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define ixRCU_UC_EVENTS 0xc0000004
#define ixRCU_MISC_CTRL 0xc0000010
#define ixCC_RCU_FUSES 0xc00c0000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 933917479985..22dd4c2b7290 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -87,6 +87,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index 44b1855cb8df..eca2b851f25f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -90,6 +90,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h
new file mode 100644
index 000000000000..5c0e3f3332e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h
@@ -0,0 +1,96 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef UVD_4_0_D_H
+#define UVD_4_0_D_H
+
+#define ixUVD_CGC_CTRL2 0x00C1
+#define ixUVD_CGC_MEM_CTRL 0x00C0
+#define ixUVD_LMI_ADDR_EXT2 0x00AB
+#define ixUVD_LMI_CACHE_CTRL 0x009B
+#define ixUVD_LMI_SWAP_CNTL2 0x00AA
+#define ixUVD_MIF_CURR_ADDR_CONFIG 0x0048
+#define ixUVD_MIF_RECON1_ADDR_CONFIG 0x0114
+#define ixUVD_MIF_REF_ADDR_CONFIG 0x004C
+#define mmUVD_CGC_CTRL 0x3D2C
+#define mmUVD_CGC_GATE 0x3D2A
+#define mmUVD_CGC_STATUS 0x3D2B
+#define mmUVD_CGC_UDEC_STATUS 0x3D2D
+#define mmUVD_CONTEXT_ID 0x3DBD
+#define mmUVD_CTX_DATA 0x3D29
+#define mmUVD_CTX_INDEX 0x3D28
+#define mmUVD_ENGINE_CNTL 0x3BC6
+#define mmUVD_GPCOM_VCPU_CMD 0x3BC3
+#define mmUVD_GPCOM_VCPU_DATA0 0x3BC4
+#define mmUVD_GPCOM_VCPU_DATA1 0x3BC5
+#define mmUVD_GP_SCRATCH4 0x3D38
+#define mmUVD_LMI_ADDR_EXT 0x3D65
+#define mmUVD_LMI_CTRL 0x3D66
+#define mmUVD_LMI_CTRL2 0x3D3D
+#define mmUVD_LMI_EXT40_ADDR 0x3D26
+#define mmUVD_LMI_STATUS 0x3D67
+#define mmUVD_LMI_SWAP_CNTL 0x3D6D
+#define mmUVD_MASTINT_EN 0x3D40
+#define mmUVD_MPC_CNTL 0x3D77
+#define mmUVD_MPC_SET_ALU 0x3D7E
+#define mmUVD_MPC_SET_MUX 0x3D7D
+#define mmUVD_MPC_SET_MUXA0 0x3D79
+#define mmUVD_MPC_SET_MUXA1 0x3D7A
+#define mmUVD_MPC_SET_MUXB0 0x3D7B
+#define mmUVD_MPC_SET_MUXB1 0x3D7C
+#define mmUVD_MP_SWAP_CNTL 0x3D6F
+#define mmUVD_NO_OP 0x3BFF
+#define mmUVD_PGFSM_CONFIG 0x38F8
+#define mmUVD_PGFSM_READ_TILE1 0x38FA
+#define mmUVD_PGFSM_READ_TILE2 0x38FB
+#define mmUVD_POWER_STATUS 0x38FC
+#define mmUVD_RBC_IB_BASE 0x3DA1
+#define mmUVD_RBC_IB_SIZE 0x3DA2
+#define mmUVD_RBC_IB_SIZE_UPDATE 0x3DF1
+#define mmUVD_RBC_RB_BASE 0x3DA3
+#define mmUVD_RBC_RB_CNTL 0x3DA9
+#define mmUVD_RBC_RB_RPTR 0x3DA4
+#define mmUVD_RBC_RB_RPTR_ADDR 0x3DAA
+#define mmUVD_RBC_RB_WPTR 0x3DA5
+#define mmUVD_RBC_RB_WPTR_CNTL 0x3DA6
+#define mmUVD_SEMA_ADDR_HIGH 0x3BC1
+#define mmUVD_SEMA_ADDR_LOW 0x3BC0
+#define mmUVD_SEMA_CMD 0x3BC2
+#define mmUVD_SEMA_CNTL 0x3D00
+#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x3DB3
+#define mmUVD_SEMA_TIMEOUT_STATUS 0x3DB0
+#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x3DB2
+#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x3DB1
+#define mmUVD_SOFT_RESET 0x3DA0
+#define mmUVD_STATUS 0x3DAF
+#define mmUVD_UDEC_ADDR_CONFIG 0x3BD3
+#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3BD4
+#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3BD5
+#define mmUVD_VCPU_CACHE_OFFSET0 0x3D36
+#define mmUVD_VCPU_CACHE_OFFSET1 0x3D38
+#define mmUVD_VCPU_CACHE_OFFSET2 0x3D3A
+#define mmUVD_VCPU_CACHE_SIZE0 0x3D37
+#define mmUVD_VCPU_CACHE_SIZE1 0x3D39
+#define mmUVD_VCPU_CACHE_SIZE2 0x3D3B
+#define mmUVD_VCPU_CNTL 0x3D98
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
new file mode 100644
index 000000000000..8ee3149df5b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
@@ -0,0 +1,795 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef UVD_4_0_SH_MASK_H
+#define UVD_4_0_SH_MASK_H
+
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x00000000
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x00000001
+#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001cL
+#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x00000002
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003cL
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x00000002
+#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007c0L
+#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x00000006
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x00000000
+#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L
+#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x00000017
+#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L
+#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x0000001a
+#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L
+#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x00000015
+#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L
+#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x00000016
+#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L
+#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x0000001b
+#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L
+#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x00000019
+#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L
+#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x00000012
+#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L
+#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x00000018
+#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L
+#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x00000014
+#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L
+#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x00000013
+#define UVD_CGC_CTRL__SCPU_MODE_MASK 0x40000000L
+#define UVD_CGC_CTRL__SCPU_MODE__SHIFT 0x0000001e
+#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L
+#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x00000010
+#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0x0000000c
+#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0x0000000e
+#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0x0000000d
+#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L
+#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x00000011
+#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0x0000000f
+#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0x0000000b
+#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L
+#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x0000001d
+#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L
+#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x0000001c
+#define UVD_CGC_GATE__IDCT_MASK 0x00000080L
+#define UVD_CGC_GATE__IDCT__SHIFT 0x00000007
+#define UVD_CGC_GATE__LBSI_MASK 0x00000400L
+#define UVD_CGC_GATE__LBSI__SHIFT 0x0000000a
+#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L
+#define UVD_CGC_GATE__LMI_MC__SHIFT 0x00000005
+#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L
+#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x00000006
+#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L
+#define UVD_CGC_GATE__LRBBM__SHIFT 0x0000000b
+#define UVD_CGC_GATE__MPC_MASK 0x00000200L
+#define UVD_CGC_GATE__MPC__SHIFT 0x00000009
+#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L
+#define UVD_CGC_GATE__MPEG2__SHIFT 0x00000002
+#define UVD_CGC_GATE__MPRD_MASK 0x00000100L
+#define UVD_CGC_GATE__MPRD__SHIFT 0x00000008
+#define UVD_CGC_GATE__RBC_MASK 0x00000010L
+#define UVD_CGC_GATE__RBC__SHIFT 0x00000004
+#define UVD_CGC_GATE__REGS_MASK 0x00000008L
+#define UVD_CGC_GATE__REGS__SHIFT 0x00000003
+#define UVD_CGC_GATE__SCPU_MASK 0x00080000L
+#define UVD_CGC_GATE__SCPU__SHIFT 0x00000013
+#define UVD_CGC_GATE__SYS_MASK 0x00000001L
+#define UVD_CGC_GATE__SYS__SHIFT 0x00000000
+#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L
+#define UVD_CGC_GATE__UDEC_CM__SHIFT 0x0000000d
+#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L
+#define UVD_CGC_GATE__UDEC_DB__SHIFT 0x0000000f
+#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L
+#define UVD_CGC_GATE__UDEC_IT__SHIFT 0x0000000e
+#define UVD_CGC_GATE__UDEC_MASK 0x00000002L
+#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L
+#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x00000010
+#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L
+#define UVD_CGC_GATE__UDEC_RE__SHIFT 0x0000000c
+#define UVD_CGC_GATE__UDEC__SHIFT 0x00000001
+#define UVD_CGC_GATE__VCPU_MASK 0x00040000L
+#define UVD_CGC_GATE__VCPU__SHIFT 0x00000012
+#define UVD_CGC_GATE__WCB_MASK 0x00020000L
+#define UVD_CGC_GATE__WCB__SHIFT 0x00000011
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0x0000000d
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x00000000
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00f00000L
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x00000014
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000f0000L
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x00000010
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0x0000000c
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x00000001
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x00000002
+#define UVD_CGC_MEM_CTRL__SCPU_LS_EN_MASK 0x00000800L
+#define UVD_CGC_MEM_CTRL__SCPU_LS_EN__SHIFT 0x0000000b
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x00000009
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x00000005
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x00000007
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x00000006
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x00000008
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x00000004
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0x0000000a
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x00000003
+#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L
+#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0x0000000e
+#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L
+#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0x0000000f
+#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L
+#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x00000015
+#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L
+#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x00000016
+#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L
+#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0x0000000c
+#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L
+#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0x0000000d
+#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L
+#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x00000017
+#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L
+#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x00000014
+#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L
+#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x00000013
+#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L
+#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x00000007
+#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L
+#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x00000006
+#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L
+#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x00000008
+#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L
+#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x00000011
+#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L
+#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x00000010
+#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L
+#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x00000012
+#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L
+#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0x0000000b
+#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L
+#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x00000009
+#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L
+#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0x0000000a
+#define UVD_CGC_STATUS__SCPU_SCLK_MASK 0x08000000L
+#define UVD_CGC_STATUS__SCPU_SCLK__SHIFT 0x0000001b
+#define UVD_CGC_STATUS__SCPU_VCLK_MASK 0x10000000L
+#define UVD_CGC_STATUS__SCPU_VCLK__SHIFT 0x0000001c
+#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L
+#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x00000001
+#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L
+#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x00000000
+#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L
+#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x00000002
+#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L
+#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x00000004
+#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L
+#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x00000003
+#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L
+#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x00000005
+#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L
+#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x00000019
+#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L
+#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x0000001a
+#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L
+#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x00000018
+#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L
+#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x00000004
+#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L
+#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x00000003
+#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L
+#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x00000005
+#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L
+#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0x0000000a
+#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L
+#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x00000009
+#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L
+#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0x0000000b
+#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L
+#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x00000007
+#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L
+#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x00000006
+#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L
+#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x00000008
+#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L
+#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0x0000000d
+#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L
+#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0x0000000c
+#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L
+#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0x0000000e
+#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L
+#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x00000001
+#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L
+#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x00000000
+#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L
+#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x00000002
+#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xffffffffL
+#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x00000000
+#define UVD_CTX_DATA__DATA_MASK 0xffffffffL
+#define UVD_CTX_DATA__DATA__SHIFT 0x00000000
+#define UVD_CTX_INDEX__INDEX_MASK 0x000001ffL
+#define UVD_CTX_INDEX__INDEX__SHIFT 0x00000000
+#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x00000001L
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x00000002L
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x00000001
+#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x00000000
+#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7ffffffeL
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x00000000
+#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x00000001
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x0000001f
+#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xffffffffL
+#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x00000000
+#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xffffffffL
+#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x00000000
+#define UVD_LMI_ADDR_EXT2__SCPU_ADDR_EXT_MASK 0x0000000fL
+#define UVD_LMI_ADDR_EXT2__SCPU_ADDR_EXT__SHIFT 0x00000000
+#define UVD_LMI_ADDR_EXT2__SCPU_NC0_ADDR_EXT_MASK 0x00000f00L
+#define UVD_LMI_ADDR_EXT2__SCPU_NC0_ADDR_EXT__SHIFT 0x00000008
+#define UVD_LMI_ADDR_EXT2__SCPU_NC1_ADDR_EXT_MASK 0x0000f000L
+#define UVD_LMI_ADDR_EXT2__SCPU_NC1_ADDR_EXT__SHIFT 0x0000000c
+#define UVD_LMI_ADDR_EXT2__SCPU_VM_ADDR_EXT_MASK 0x000000f0L
+#define UVD_LMI_ADDR_EXT2__SCPU_VM_ADDR_EXT__SHIFT 0x00000004
+#define UVD_LMI_ADDR_EXT__CM_ADDR_EXT_MASK 0x000000f0L
+#define UVD_LMI_ADDR_EXT__CM_ADDR_EXT__SHIFT 0x00000004
+#define UVD_LMI_ADDR_EXT__IT_ADDR_EXT_MASK 0x00000f00L
+#define UVD_LMI_ADDR_EXT__IT_ADDR_EXT__SHIFT 0x00000008
+#define UVD_LMI_ADDR_EXT__MP_ADDR_EXT_MASK 0x00f00000L
+#define UVD_LMI_ADDR_EXT__MP_ADDR_EXT__SHIFT 0x00000014
+#define UVD_LMI_ADDR_EXT__RE_ADDR_EXT_MASK 0x000f0000L
+#define UVD_LMI_ADDR_EXT__RE_ADDR_EXT__SHIFT 0x00000010
+#define UVD_LMI_ADDR_EXT__VCPU_ADDR_EXT_MASK 0x0000000fL
+#define UVD_LMI_ADDR_EXT__VCPU_ADDR_EXT__SHIFT 0x00000000
+#define UVD_LMI_ADDR_EXT__VCPU_NC0_ADDR_EXT_MASK 0x0f000000L
+#define UVD_LMI_ADDR_EXT__VCPU_NC0_ADDR_EXT__SHIFT 0x00000018
+#define UVD_LMI_ADDR_EXT__VCPU_NC1_ADDR_EXT_MASK 0xf0000000L
+#define UVD_LMI_ADDR_EXT__VCPU_NC1_ADDR_EXT__SHIFT 0x0000001c
+#define UVD_LMI_ADDR_EXT__VCPU_VM_ADDR_EXT_MASK 0x0000f000L
+#define UVD_LMI_ADDR_EXT__VCPU_VM_ADDR_EXT__SHIFT 0x0000000c
+#define UVD_LMI_CACHE_CTRL__CM_EN_MASK 0x00000004L
+#define UVD_LMI_CACHE_CTRL__CM_EN__SHIFT 0x00000002
+#define UVD_LMI_CACHE_CTRL__CM_FLUSH_MASK 0x00000008L
+#define UVD_LMI_CACHE_CTRL__CM_FLUSH__SHIFT 0x00000003
+#define UVD_LMI_CACHE_CTRL__IT_EN_MASK 0x00000001L
+#define UVD_LMI_CACHE_CTRL__IT_EN__SHIFT 0x00000000
+#define UVD_LMI_CACHE_CTRL__IT_FLUSH_MASK 0x00000002L
+#define UVD_LMI_CACHE_CTRL__IT_FLUSH__SHIFT 0x00000001
+#define UVD_LMI_CACHE_CTRL__VCPU_EN_MASK 0x00000010L
+#define UVD_LMI_CACHE_CTRL__VCPU_EN__SHIFT 0x00000004
+#define UVD_LMI_CACHE_CTRL__VCPU_FLUSH_MASK 0x00000020L
+#define UVD_LMI_CACHE_CTRL__VCPU_FLUSH__SHIFT 0x00000005
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x00000002
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x00000007
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x00000003
+#define UVD_LMI_CTRL2__MCIF_WR_WATERMARK_MASK 0x00000070L
+#define UVD_LMI_CTRL2__MCIF_WR_WATERMARK__SHIFT 0x00000004
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x00000009
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0x0000000b
+#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
+#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x00000000
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0x0000000f
+#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
+#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x00000001
+#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x00000008
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0x0000000d
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0x0000000e
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x0000000b
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x00000016
+#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L
+#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0x0000000e
+#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000f8000L
+#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0x0000000f
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0x0000000d
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x00000017
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x00000018
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x00000014
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x00000019
+#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L
+#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0x0000000c
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x0000001a
+#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
+#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x00000009
+#define UVD_LMI_CTRL__RFU_MASK 0xf8000000L
+#define UVD_LMI_CTRL__RFU_MASK 0xfc000000L
+#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001a
+#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001b
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x00000008
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000ffL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x00000000
+#define UVD_LMI_EXT40_ADDR__ADDR_MASK 0x000000ffL
+#define UVD_LMI_EXT40_ADDR__ADDR__SHIFT 0x00000000
+#define UVD_LMI_EXT40_ADDR__INDEX_MASK 0x001f0000L
+#define UVD_LMI_EXT40_ADDR__INDEX__SHIFT 0x00000010
+#define UVD_LMI_EXT40_ADDR__WRITE_ADDR_MASK 0x80000000L
+#define UVD_LMI_EXT40_ADDR__WRITE_ADDR__SHIFT 0x0000001f
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0x0000000c
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0x0000000d
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x00000007
+#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x00000008
+#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x00000000
+#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L
+#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0x0000000b
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x00000009
+#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x00000004
+#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L
+#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0x0000000a
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x00000006
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x00000005
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x00000003
+#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x00000002
+#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x00000001
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x00000000
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000cL
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x00000002
+#define UVD_LMI_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000c00L
+#define UVD_LMI_SWAP_CNTL__CM_MC_SWAP__SHIFT 0x0000000a
+#define UVD_LMI_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000c0000L
+#define UVD_LMI_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x00000012
+#define UVD_LMI_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000c000L
+#define UVD_LMI_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0x0000000e
+#define UVD_LMI_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L
+#define UVD_LMI_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L
+#define UVD_LMI_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x00000010
+#define UVD_LMI_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x00000018
+#define UVD_LMI_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000cL
+#define UVD_LMI_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x00000002
+#define UVD_LMI_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_SWAP_CNTL__IT_MC_SWAP__SHIFT 0x0000000c
+#define UVD_LMI_SWAP_CNTL__MP_MC_SWAP_MASK 0xc0000000L
+#define UVD_LMI_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x0000001e
+#define UVD_LMI_SWAP_CNTL__MP_REF16_MC_SWAP_MASK 0x00c00000L
+#define UVD_LMI_SWAP_CNTL__MP_REF16_MC_SWAP__SHIFT 0x00000016
+#define UVD_LMI_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_LMI_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x00000000
+#define UVD_LMI_SWAP_CNTL__RB_RPTR_MC_SWAP_MASK 0x00000030L
+#define UVD_LMI_SWAP_CNTL__RB_RPTR_MC_SWAP__SHIFT 0x00000004
+#define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0c000000L
+#define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP__SHIFT 0x0000001a
+#define UVD_LMI_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
+#define UVD_LMI_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x0000001c
+#define UVD_LMI_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000c0L
+#define UVD_LMI_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x00000006
+#define UVD_LMI_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L
+#define UVD_LMI_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x00000008
+#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007ffff0L
+#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x00000004
+#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x00000000
+#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x00000002
+#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
+#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x00000001
+#define UVD_MIF_CURR_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_MIF_CURR_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_MIF_CURR_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_MIF_CURR_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_MIF_CURR_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_MIF_CURR_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_MIF_CURR_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_MIF_CURR_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_MIF_CURR_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_MIF_CURR_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_MIF_RECON1_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_MIF_RECON1_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_MIF_RECON1_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_MIF_RECON1_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_MIF_RECON1_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_MIF_RECON1_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_MIF_RECON1_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_MIF_REF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_MIF_REF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_MIF_REF_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_MIF_REF_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_MIF_REF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_MIF_REF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_MIF_REF_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_MIF_REF_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_MIF_REF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_MIF_REF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_MPC_CNTL__AVE_WEIGHT_MASK 0x00030000L
+#define UVD_MPC_CNTL__AVE_WEIGHT__SHIFT 0x00000010
+#define UVD_MPC_CNTL__DBG_MUX_MASK 0x00000700L
+#define UVD_MPC_CNTL__DBG_MUX__SHIFT 0x00000008
+#define UVD_MPC_CNTL__PERF_RST_MASK 0x00000040L
+#define UVD_MPC_CNTL__PERF_RST__SHIFT 0x00000006
+#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L
+#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x00000003
+#define UVD_MPC_CNTL__URGENT_EN_MASK 0x00040000L
+#define UVD_MPC_CNTL__URGENT_EN__SHIFT 0x00000012
+#define UVD_MPC_SET_ALU__FUNCT_MASK 0x00000007L
+#define UVD_MPC_SET_ALU__FUNCT__SHIFT 0x00000000
+#define UVD_MPC_SET_ALU__OPERAND_MASK 0x00000ff0L
+#define UVD_MPC_SET_ALU__OPERAND__SHIFT 0x00000004
+#define UVD_MPC_SET_MUXA0__VARA_0_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXA0__VARA_1_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXA0__VARA_2_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXA0__VARA_2__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUXA0__VARA_3_MASK 0x00fc0000L
+#define UVD_MPC_SET_MUXA0__VARA_3__SHIFT 0x00000012
+#define UVD_MPC_SET_MUXA0__VARA_4_MASK 0x3f000000L
+#define UVD_MPC_SET_MUXA0__VARA_4__SHIFT 0x00000018
+#define UVD_MPC_SET_MUXA1__VARA_5_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXA1__VARA_5__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXA1__VARA_6_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXA1__VARA_6__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXA1__VARA_7_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXA1__VARA_7__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUXB0__VARB_0_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXB0__VARB_0__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXB0__VARB_1_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXB0__VARB_1__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXB0__VARB_2_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXB0__VARB_2__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUXB0__VARB_3_MASK 0x00fc0000L
+#define UVD_MPC_SET_MUXB0__VARB_3__SHIFT 0x00000012
+#define UVD_MPC_SET_MUXB0__VARB_4_MASK 0x3f000000L
+#define UVD_MPC_SET_MUXB0__VARB_4__SHIFT 0x00000018
+#define UVD_MPC_SET_MUXB1__VARB_5_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXB1__VARB_5__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXB1__VARB_6_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXB1__VARB_6__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXB1__VARB_7_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXB1__VARB_7__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUX__SET_0_MASK 0x00000007L
+#define UVD_MPC_SET_MUX__SET_0__SHIFT 0x00000000
+#define UVD_MPC_SET_MUX__SET_1_MASK 0x00000038L
+#define UVD_MPC_SET_MUX__SET_1__SHIFT 0x00000003
+#define UVD_MPC_SET_MUX__SET_2_MASK 0x000001c0L
+#define UVD_MPC_SET_MUX__SET_2__SHIFT 0x00000006
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP_MASK 0x00000003L
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP__SHIFT 0x00000000
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP_MASK 0x00300000L
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP__SHIFT 0x00000014
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP_MASK 0x00c00000L
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP__SHIFT 0x00000016
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP_MASK 0x03000000L
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP__SHIFT 0x00000018
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP_MASK 0x0c000000L
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP__SHIFT 0x0000001a
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP_MASK 0x30000000L
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP__SHIFT 0x0000001c
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP_MASK 0xc0000000L
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP__SHIFT 0x0000001e
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP_MASK 0x0000000cL
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP__SHIFT 0x00000002
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP_MASK 0x00000030L
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP__SHIFT 0x00000004
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP_MASK 0x000000c0L
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP__SHIFT 0x00000006
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP_MASK 0x00000300L
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP__SHIFT 0x00000008
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP_MASK 0x00000c00L
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP__SHIFT 0x0000000a
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP_MASK 0x00003000L
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP__SHIFT 0x0000000c
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP_MASK 0x0000c000L
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP__SHIFT 0x0000000e
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP_MASK 0x00030000L
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP__SHIFT 0x00000010
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP_MASK 0x000c0000L
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP__SHIFT 0x00000012
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK 0x000000ffL
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR__SHIFT 0x00000000
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK 0x00000400L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT__SHIFT 0x0000000a
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P2_SELECT_MASK 0x00000800L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P2_SELECT__SHIFT 0x0000000b
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK 0x00000100L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN__SHIFT 0x00000008
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK 0x00000200L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP__SHIFT 0x00000009
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_READ_MASK 0x00002000L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_READ__SHIFT 0x0000000d
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_REG_ADDR_MASK 0xf0000000L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_REG_ADDR__SHIFT 0x0000001c
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_WRITE_MASK 0x00001000L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_WRITE__SHIFT 0x0000000c
+#define UVD_PGFSM_READ_TILE1__UVD_PGFSM_READ_TILE1_VALUE_MASK 0x00ffffffL
+#define UVD_PGFSM_READ_TILE1__UVD_PGFSM_READ_TILE1_VALUE__SHIFT 0x00000000
+#define UVD_PGFSM_READ_TILE2__UVD_PGFSM_READ_TILE2_VALUE_MASK 0x00ffffffL
+#define UVD_PGFSM_READ_TILE2__UVD_PGFSM_READ_TILE2_VALUE__SHIFT 0x00000000
+#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000001L
+#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x00000000
+#define UVD_RBC_IB_BASE__IB_BASE_MASK 0xffffffc0L
+#define UVD_RBC_IB_BASE__IB_BASE__SHIFT 0x00000006
+#define UVD_RBC_IB_SIZE__IB_SIZE_MASK 0x007ffff0L
+#define UVD_RBC_IB_SIZE__IB_SIZE__SHIFT 0x00000004
+#define UVD_RBC_RB_BASE__RB_BASE_MASK 0xffffffc0L
+#define UVD_RBC_RB_BASE__RB_BASE__SHIFT 0x00000006
+#define UVD_RBC_RB_CNTL__RB_BLKSZ_MASK 0x00001f00L
+#define UVD_RBC_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define UVD_RBC_RB_CNTL__RB_BUFSZ_MASK 0x0000001fL
+#define UVD_RBC_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK 0x00010000L
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x00000010
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE_MASK 0x01000000L
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE__SHIFT 0x00000018
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x10000000L
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x0000001c
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN_MASK 0x00100000L
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN__SHIFT 0x00000014
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xffffffffL
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000000
+#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007ffff0L
+#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x00000004
+#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007ffff0L
+#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x00000004
+#define UVD_SEMA_ADDR_HIGH__ADDR_42_23_MASK 0x000fffffL
+#define UVD_SEMA_ADDR_HIGH__ADDR_42_23__SHIFT 0x00000000
+#define UVD_SEMA_ADDR_LOW__ADDR_22_3_MASK 0x000fffffL
+#define UVD_SEMA_ADDR_LOW__ADDR_22_3__SHIFT 0x00000000
+#define UVD_SEMA_CMD__MODE_MASK 0x00000040L
+#define UVD_SEMA_CMD__MODE__SHIFT 0x00000006
+#define UVD_SEMA_CMD__REQ_CMD_MASK 0x0000000fL
+#define UVD_SEMA_CMD__REQ_CMD__SHIFT 0x00000000
+#define UVD_SEMA_CMD__VMID_EN_MASK 0x00000080L
+#define UVD_SEMA_CMD__VMID_EN__SHIFT 0x00000007
+#define UVD_SEMA_CMD__VMID_MASK 0x00000f00L
+#define UVD_SEMA_CMD__VMID__SHIFT 0x00000008
+#define UVD_SEMA_CMD__WR_PHASE_MASK 0x00000030L
+#define UVD_SEMA_CMD__WR_PHASE__SHIFT 0x00000004
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS_MASK 0x00000002L
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS__SHIFT 0x00000001
+#define UVD_SEMA_CNTL__SEMAPHORE_EN_MASK 0x00000001L
+#define UVD_SEMA_CNTL__SEMAPHORE_EN__SHIFT 0x00000000
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT_MASK 0x001ffffeL
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT__SHIFT 0x00000001
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN__SHIFT 0x00000000
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000004L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x00000002
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR_MASK 0x00000008L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR__SHIFT 0x00000003
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT_MASK 0x00000002L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT__SHIFT 0x00000001
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000001L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x00000000
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT_MASK 0x001ffffeL
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT__SHIFT 0x00000001
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN__SHIFT 0x00000000
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT_MASK 0x001ffffeL
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT__SHIFT 0x00000001
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN__SHIFT 0x00000000
+#define UVD_SOFT_RESET__CSM_SOFT_RESET_MASK 0x00000020L
+#define UVD_SOFT_RESET__CSM_SOFT_RESET__SHIFT 0x00000005
+#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L
+#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x00000006
+#define UVD_SOFT_RESET__FWV_SOFT_RESET_MASK 0x00000200L
+#define UVD_SOFT_RESET__FWV_SOFT_RESET__SHIFT 0x00000009
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0x0000000c
+#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L
+#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0x0000000a
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x00000001
+#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L
+#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x00000010
+#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L
+#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x00000002
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0x0000000d
+#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L
+#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0x0000000f
+#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L
+#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x00000008
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0x0000000b
+#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x00000000
+#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L
+#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0x0000000e
+#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L
+#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x00000007
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x00000004
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x00000003
+#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L
+#define UVD_STATUS__RBC_BUSY__SHIFT 0x00000000
+#define UVD_STATUS__VCPU_REPORT_MASK 0x000000feL
+#define UVD_STATUS__VCPU_REPORT__SHIFT 0x00000001
+#define UVD_UDEC_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_UDEC_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_UDEC_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_UDEC_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_UDEC_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_UDEC_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_UDEC_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_UDEC_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_UDEC_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_UDEC_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_UDEC_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_UDEC_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_UDEC_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_UDEC_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_UDEC_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_UDEC_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_UDEC_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_UDEC_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_UDEC_DB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_UDEC_DB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_UDEC_DB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_UDEC_DB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_UDEC_DB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_UDEC_DB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_UDEC_DB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_UDEC_DB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_UDEC_DB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_UDEC_DB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_UDEC_DBW_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_UDEC_DBW_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_UDEC_DBW_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_UDEC_DBW_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_UDEC_DBW_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_UDEC_DBW_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_UDEC_DBW_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01ffffffL
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x01ffffffL
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x01ffffffL
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001fffffL
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001fffffL
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001fffffL
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x00000000
+#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L
+#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x00000008
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x00000004
+#define UVD_VCPU_CNTL__CABAC_MB_ACC_MASK 0x10000000L
+#define UVD_VCPU_CNTL__CABAC_MB_ACC__SHIFT 0x0000001c
+#define UVD_VCPU_CNTL__CLK_ACTIVE_MASK 0x00020000L
+#define UVD_VCPU_CNTL__CLK_ACTIVE__SHIFT 0x00000011
+#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
+#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x00000009
+#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000e000L
+#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0x0000000d
+#define UVD_VCPU_CNTL__ECPU_AM32_EN_MASK 0x20000000L
+#define UVD_VCPU_CNTL__ECPU_AM32_EN__SHIFT 0x0000001d
+#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000fL
+#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x00000000
+#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L
+#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x00000010
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x00000005
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x00000006
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0ff00000L
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x00000014
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000007
+#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L
+#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x00000012
+#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L
+#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0x0000000a
+#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L
+#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0x0000000b
+#define UVD_VCPU_CNTL__WMV9_EN_MASK 0x40000000L
+#define UVD_VCPU_CNTL__WMV9_EN__SHIFT 0x0000001e
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
new file mode 100644
index 000000000000..2176548e9203
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VCE_1_0_D_H
+#define VCE_1_0_D_H
+
+#define mmVCE_CLOCK_GATING_A 0x80BE
+#define mmVCE_CLOCK_GATING_B 0x80BF
+#define mmVCE_LMI_CACHE_CTRL 0x83BD
+#define mmVCE_LMI_CTRL 0x83A6
+#define mmVCE_LMI_CTRL2 0x839D
+#define mmVCE_LMI_MISC_CTRL 0x83B5
+#define mmVCE_LMI_STATUS 0x83A7
+#define mmVCE_LMI_SWAP_CNTL 0x83AD
+#define mmVCE_LMI_SWAP_CNTL1 0x83AE
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR 0x8397
+#define mmVCE_LMI_VM_CTRL 0x83A8
+#define mmVCE_RB_ARB_CTRL 0x809F
+#define mmVCE_RB_BASE_HI 0x8061
+#define mmVCE_RB_BASE_HI2 0x805C
+#define mmVCE_RB_BASE_LO 0x8060
+#define mmVCE_RB_BASE_LO2 0x805B
+#define mmVCE_RB_RPTR 0x8063
+#define mmVCE_RB_RPTR2 0x805E
+#define mmVCE_RB_SIZE 0x8062
+#define mmVCE_RB_SIZE2 0x805D
+#define mmVCE_RB_WPTR 0x8064
+#define mmVCE_RB_WPTR2 0x805F
+#define mmVCE_SOFT_RESET 0x8048
+#define mmVCE_STATUS 0x8001
+#define mmVCE_SYS_INT_ACK 0x8341
+#define mmVCE_SYS_INT_EN 0x8340
+#define mmVCE_SYS_INT_STATUS 0x8341
+#define mmVCE_UENC_CLOCK_GATING 0x816F
+#define mmVCE_UENC_DMA_DCLK_CTRL 0x8250
+#define mmVCE_UENC_REG_CLOCK_GATING 0x8170
+#define mmVCE_VCPU_CACHE_OFFSET0 0x8009
+#define mmVCE_VCPU_CACHE_OFFSET1 0x800B
+#define mmVCE_VCPU_CACHE_OFFSET2 0x800D
+#define mmVCE_VCPU_CACHE_SIZE0 0x800A
+#define mmVCE_VCPU_CACHE_SIZE1 0x800C
+#define mmVCE_VCPU_CACHE_SIZE2 0x800E
+#define mmVCE_VCPU_CNTL 0x8005
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
new file mode 100644
index 000000000000..ea5b26b11cb1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
@@ -0,0 +1,99 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VCE_1_0_SH_MASK_H
+#define VCE_1_0_SH_MASK_H
+
+#define VCE_LMI_CACHE_CTRL__VCPU_EN_MASK 0x00000001L
+#define VCE_LMI_CACHE_CTRL__VCPU_EN__SHIFT 0x00000000
+#define VCE_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define VCE_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x00000008
+#define VCE_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define VCE_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
+#define VCE_LMI_SWAP_CNTL1__RD_MC_CID_SWAP_MASK 0x00003ffcL
+#define VCE_LMI_SWAP_CNTL1__RD_MC_CID_SWAP__SHIFT 0x00000002
+#define VCE_LMI_SWAP_CNTL1__VCPU_R_MC_SWAP_MASK 0x00000003L
+#define VCE_LMI_SWAP_CNTL1__VCPU_R_MC_SWAP__SHIFT 0x00000000
+#define VCE_LMI_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000003L
+#define VCE_LMI_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x00000000
+#define VCE_LMI_SWAP_CNTL__WR_MC_CID_SWAP_MASK 0x00003ffcL
+#define VCE_LMI_SWAP_CNTL__WR_MC_CID_SWAP__SHIFT 0x00000002
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR__BAR_MASK 0xffffffffL
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR__BAR__SHIFT 0x00000000
+#define VCE_RB_BASE_HI2__RB_BASE_HI_MASK 0xffffffffL
+#define VCE_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x00000000
+#define VCE_RB_BASE_HI__RB_BASE_HI_MASK 0xffffffffL
+#define VCE_RB_BASE_HI__RB_BASE_HI__SHIFT 0x00000000
+#define VCE_RB_BASE_LO2__RB_BASE_LO_MASK 0xffffffc0L
+#define VCE_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x00000006
+#define VCE_RB_BASE_LO__RB_BASE_LO_MASK 0xffffffc0L
+#define VCE_RB_BASE_LO__RB_BASE_LO__SHIFT 0x00000006
+#define VCE_RB_RPTR2__RB_RPTR_MASK 0x007ffff0L
+#define VCE_RB_RPTR2__RB_RPTR__SHIFT 0x00000004
+#define VCE_RB_RPTR__RB_RPTR_MASK 0x007ffff0L
+#define VCE_RB_RPTR__RB_RPTR__SHIFT 0x00000004
+#define VCE_RB_SIZE2__RB_SIZE_MASK 0x007ffff0L
+#define VCE_RB_SIZE2__RB_SIZE__SHIFT 0x00000004
+#define VCE_RB_SIZE__RB_SIZE_MASK 0x007ffff0L
+#define VCE_RB_SIZE__RB_SIZE__SHIFT 0x00000004
+#define VCE_RB_WPTR2__RB_WPTR_MASK 0x007ffff0L
+#define VCE_RB_WPTR2__RB_WPTR__SHIFT 0x00000004
+#define VCE_RB_WPTR__RB_WPTR_MASK 0x007ffff0L
+#define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004
+#define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L
+#define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000
+#define VCE_STATUS__JOB_BUSY_MASK 0x00000001L
+#define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000
+#define VCE_STATUS__UENC_BUSY_MASK 0x00000100L
+#define VCE_STATUS__UENC_BUSY__SHIFT 0x00000008
+#define VCE_STATUS__VCPU_REPORT_MASK 0x000000feL
+#define VCE_STATUS__VCPU_REPORT__SHIFT 0x00000001
+#define VCE_SYS_INT_ACK__VCE_SYS_INT_TRAP_INTERRUPT_ACK_MASK 0x00000008L
+#define VCE_SYS_INT_ACK__VCE_SYS_INT_TRAP_INTERRUPT_ACK__SHIFT 0x00000003
+#define VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK 0x00000008L
+#define VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN__SHIFT 0x00000003
+#define VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK 0x00000008L
+#define VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT__SHIFT 0x00000003
+#define VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK 0x00000002L
+#define VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON__SHIFT 0x00000001
+#define VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK 0x00000004L
+#define VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON__SHIFT 0x00000002
+#define VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK 0x00000001L
+#define VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_OFFSET0__OFFSET_MASK 0x0fffffffL
+#define VCE_VCPU_CACHE_OFFSET0__OFFSET__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_OFFSET1__OFFSET_MASK 0x0fffffffL
+#define VCE_VCPU_CACHE_OFFSET1__OFFSET__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_OFFSET2__OFFSET_MASK 0x0fffffffL
+#define VCE_VCPU_CACHE_OFFSET2__OFFSET__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_SIZE0__SIZE_MASK 0x00ffffffL
+#define VCE_VCPU_CACHE_SIZE0__SIZE__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_SIZE1__SIZE_MASK 0x00ffffffL
+#define VCE_VCPU_CACHE_SIZE1__SIZE__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_SIZE2__SIZE_MASK 0x00ffffffL
+#define VCE_VCPU_CACHE_SIZE2__SIZE__SHIFT 0x00000000
+#define VCE_VCPU_CNTL__CLK_EN_MASK 0x00000001L
+#define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000
+#define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L
+#define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index df7c18b6a02a..e4a1697ec1d3 100755..100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -106,6 +106,7 @@ enum cgs_ucode_id {
CGS_UCODE_ID_CP_MEC_JT2,
CGS_UCODE_ID_GMCON_RENG,
CGS_UCODE_ID_RLC_G,
+ CGS_UCODE_ID_STORAGE,
CGS_UCODE_ID_MAXIMUM,
};
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info);
+typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info;
@@ -670,6 +673,7 @@ struct cgs_ops {
cgs_call_acpi_method call_acpi_method;
/* get system info */
cgs_query_system_info query_system_info;
+ cgs_is_virtualization_enabled_t is_virtualization_enabled;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -773,4 +777,6 @@ struct cgs_device
CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
resource_base)
+#define cgs_is_virtualization_enabled(cgs_device) \
+ CGS_CALL(is_virtualization_enabled, cgs_device)
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7174f7a68266..c81cf1412728 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -41,7 +41,7 @@
#define PP_CHECK_HW(hwmgr) \
do { \
if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL) \
- return -EINVAL; \
+ return 0; \
} while (0)
static int pp_early_init(void *handle)
@@ -115,6 +115,7 @@ static int pp_hw_init(void *handle)
struct pp_instance *pp_handle;
struct pp_smumgr *smumgr;
struct pp_eventmgr *eventmgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
if (handle == NULL)
@@ -122,6 +123,7 @@ static int pp_hw_init(void *handle)
pp_handle = (struct pp_instance *)handle;
smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
smumgr->smumgr_funcs->smu_init == NULL ||
@@ -141,9 +143,11 @@ static int pp_hw_init(void *handle)
return ret;
}
- hw_init_power_state_table(pp_handle->hwmgr);
- eventmgr = pp_handle->eventmgr;
+ PP_CHECK_HW(hwmgr);
+
+ hw_init_power_state_table(hwmgr);
+ eventmgr = pp_handle->eventmgr;
if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
return -EINVAL;
@@ -243,7 +247,9 @@ static int pp_suspend(void *handle)
pp_handle = (struct pp_instance *)handle;
eventmgr = pp_handle->eventmgr;
- pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
+
+ if (eventmgr != NULL)
+ pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
return 0;
}
@@ -273,7 +279,8 @@ static int pp_resume(void *handle)
}
eventmgr = pp_handle->eventmgr;
- pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
+ if (eventmgr != NULL)
+ pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
return 0;
}
@@ -340,8 +347,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
return (((struct pp_instance *)handle)->hwmgr->dpm_level);
}
@@ -436,7 +442,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
}
}
-int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+ void *input, void *output)
{
int ret = 0;
struct pp_instance *pp_handle;
@@ -447,6 +454,9 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
if (pp_handle == NULL)
return -EINVAL;
+ if (pp_handle->eventmgr == NULL)
+ return 0;
+
switch (event_id) {
case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
@@ -475,7 +485,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
return ret;
}
-enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_power_state *state;
@@ -581,6 +591,23 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
}
+static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle == NULL)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
+}
+
static int pp_dpm_get_temperature(void *handle)
{
struct pp_hwmgr *hwmgr;
@@ -820,6 +847,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
}
+static struct amd_vce_state*
+pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle) {
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ return &hwmgr->vce_states[idx];
+ }
+
+ return NULL;
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -836,6 +878,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+ .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
.get_pp_num_states = pp_dpm_get_pp_num_states,
.get_pp_table = pp_dpm_get_pp_table,
.set_pp_table = pp_dpm_set_pp_table,
@@ -846,6 +889,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
+ .get_vce_clock_state = pp_dpm_get_vce_clock_state,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -864,6 +908,13 @@ static int amd_pp_instance_init(struct amd_pp_init *pp_init,
if (ret)
goto fail_smum;
+
+ amd_pp->pp_handle = handle;
+
+ if ((amdgpu_dpm == 0)
+ || cgs_is_virtualization_enabled(pp_init->device))
+ return 0;
+
ret = hwmgr_init(pp_init, handle);
if (ret)
goto fail_hwmgr;
@@ -872,7 +923,6 @@ static int amd_pp_instance_init(struct amd_pp_init *pp_init,
if (ret)
goto fail_eventmgr;
- amd_pp->pp_handle = handle;
return 0;
fail_eventmgr:
@@ -891,12 +941,13 @@ static int amd_pp_instance_fini(void *handle)
if (instance == NULL)
return -EINVAL;
- eventmgr_fini(instance->eventmgr);
-
- hwmgr_fini(instance->hwmgr);
+ if ((amdgpu_dpm != 0)
+ && !cgs_is_virtualization_enabled(instance->smu_mgr->device)) {
+ eventmgr_fini(instance->eventmgr);
+ hwmgr_fini(instance->hwmgr);
+ }
smum_fini(instance->smu_mgr);
-
kfree(handle);
return 0;
}
@@ -953,6 +1004,10 @@ int amd_powerplay_reset(void *handle)
if (ret)
return ret;
+ if ((amdgpu_dpm == 0)
+ || cgs_is_virtualization_enabled(instance->smu_mgr->device))
+ return 0;
+
hw_init_power_state_table(instance->hwmgr);
if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
@@ -976,6 +1031,8 @@ int amd_powerplay_display_configuration_change(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
phm_store_dal_configuration_data(hwmgr, display_config);
return 0;
@@ -993,6 +1050,8 @@ int amd_powerplay_get_display_power_level(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
return phm_get_dal_power_level(hwmgr, output);
}
@@ -1010,6 +1069,8 @@ int amd_powerplay_get_current_clocks(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
phm_get_dal_power_level(hwmgr, &simple_clocks);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) {
@@ -1054,6 +1115,8 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
result = phm_get_clock_by_type(hwmgr, type, clocks);
return result;
@@ -1072,6 +1135,8 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
result = phm_get_max_high_clocks(hwmgr, clocks);
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
index b6f45fd01fa6..ec36c0e28388 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -154,7 +154,7 @@ int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_
int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
- /* TODO */
+ phm_disable_clock_power_gatings(eventmgr->hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 2028980f1ed4..b0c63c5f54c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -169,7 +169,7 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
@@ -182,7 +182,7 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ AMD_PG_STATE_UNGATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 960424913496..4b14f259a147 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState(
return (struct cz_power_state *)hw_ps;
}
-uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
+static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
void *output, void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
return result;
}
-int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct cz_power_state);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index a6abe81bc843..71822ae73a12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -35,7 +35,7 @@ static int phm_run_table(struct pp_hwmgr *hwmgr,
phm_table_function *function;
if (rt_table->function_list == NULL) {
- printk(KERN_INFO "[ powerplay ] this function not implement!\n");
+ pr_debug("[ powerplay ] this function not implement!\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 0723758ed065..c355a0f51663 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -209,6 +209,19 @@ int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
return 0;
}
+int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface)) {
+ if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
+ return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
+ }
+ return 0;
+}
+
+
int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index e03dcb6ea9c1..dc6700aee18f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -80,20 +80,17 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
topaz_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
- PP_VBI_TIME_SUPPORT_MASK |
+ hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
break;
case CHIP_TONGA:
tonga_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
- PP_VBI_TIME_SUPPORT_MASK);
+ hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
break;
case CHIP_FIJI:
fiji_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
- PP_VBI_TIME_SUPPORT_MASK |
+ hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
break;
case CHIP_POLARIS11:
@@ -685,20 +682,24 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
{
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
else
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
- if (amdgpu_powercontainment)
+ if (amdgpu_pp_feature_mask & PP_POWER_CONTAINMENT_MASK) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
- else
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ } else {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
-
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ }
hwmgr->feature_mask = amdgpu_pp_feature_mask;
return 0;
@@ -736,9 +737,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -767,8 +765,6 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
return 0;
}
@@ -791,9 +787,6 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
return 0;
}
@@ -810,8 +803,6 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EVV);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 1944d289f846..f5e8fda964f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -25,6 +25,7 @@
#include "linux/delay.h"
#include "hwmgr.h"
#include "amd_acpi.h"
+#include "pp_acpi.h"
bool acpi_atcs_functions_supported(void *device, uint32_t index)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 4477c55a58e3..c45bd2560468 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
/**
* Private Function to get the PowerPlay Table Address.
*/
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
{
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables(
return 0;
}
-int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
{
int result = 0;
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
return result;
}
-int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1214,7 +1214,7 @@ static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
}
static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
- struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+ struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag)
{
const ATOM_Tonga_VCE_State_Record *vce_state_record;
ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
@@ -1318,7 +1318,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
- if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+ if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) {
for (j = 0; j < i; j++)
ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index ccf7ebeaf892..a4e9cf429e62 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
return 0;
}
-int get_number_of_vce_state_table_entries(
+static int get_number_of_vce_state_table_entries(
struct pp_hwmgr *hwmgr)
{
const ATOM_PPLIB_POWERPLAYTABLE *table =
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries(
return 0;
}
-int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6eb6db199250..a1fc4fcac1e0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -150,14 +150,20 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
- smu7_update_uvd_dpm(hwmgr, false);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ smu7_update_uvd_dpm(hwmgr, false);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 08cd0bd3ebe5..a74f60a575ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC {
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
-struct smu7_power_state *cast_phw_smu7_power_state(
+static struct smu7_power_state *cast_phw_smu7_power_state(
struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state(
return (struct smu7_power_state *)hw_ps;
}
-const struct smu7_power_state *cast_const_phw_smu7_power_state(
+static const struct smu7_power_state *cast_const_phw_smu7_power_state(
const struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state(
* @param hwmgr the address of the powerplay hardware manager.
* @return always 0
*/
-int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
{
cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
return 0;
}
-uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
{
uint32_t speedCntl = 0;
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
}
-int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
{
uint32_t link_width;
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
* @param pHwMgr the address of the powerplay hardware manager.
* @return always PP_Result_OK
*/
-int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -993,13 +993,6 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
SWRST_COMMAND_1, RESETLC, 0x0);
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -EINVAL);
-
-
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
return -EINVAL;
@@ -1153,7 +1146,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
-int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->pcie_performance_request = true;
@@ -1161,7 +1154,7 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
int result = 0;
@@ -1352,6 +1345,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct cgs_system_info sys_info = {0};
+ int result;
data->dll_default_on = false;
data->mclk_dpm0_activity_target = 0xa;
@@ -1426,7 +1421,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDCI);
- if ((hwmgr->pp_table_version != PP_TABLE_V0)
+ if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
&& (table_info->cac_dtp_table->usClockStretchAmount != 0))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
@@ -1439,6 +1434,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->pcie_lane_performance.min = 16;
data->pcie_lane_power_saving.max = 0;
data->pcie_lane_power_saving.min = 16;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (!result) {
+ if (sys_info.value & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (sys_info.value & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ }
}
/**
@@ -1865,7 +1872,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1994,8 +2001,9 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
table_info->cac_dtp_table->usTargetOperatingTemp;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport);
+ if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODFuzzyFanControlSupport);
}
return 0;
@@ -2259,7 +2267,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
int result;
@@ -3678,14 +3686,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
-int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
}
-int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
uint32_t num_active_displays = 0;
struct cgs_display_info info = {0};
@@ -3707,7 +3717,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
* @param hwmgr the address of the powerplay hardware manager.
* @return always OK
*/
-int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t num_active_displays = 0;
@@ -3757,7 +3767,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
return smu7_program_display_gap(hwmgr);
}
@@ -3781,13 +3791,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
-int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
const void *thermal_interrupt_info)
{
return 0;
}
-bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
@@ -3816,7 +3827,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
(pl1->pcie_lane == pl2->pcie_lane));
}
-int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *pstate1,
+ const struct pp_hw_power_state *pstate2, bool *equal)
{
const struct smu7_power_state *psa;
const struct smu7_power_state *psb;
@@ -3849,7 +3862,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
return 0;
}
-int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -3978,7 +3991,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 41b634ffa5b0..26477f0f09dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -603,9 +603,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
return 0;
}
-static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
+ uint32_t target_tdp)
{
- return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3fb5e57a378b..3a883e6c601a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,8 @@
#include "amd_shared.h"
#include "cgs_common.h"
+extern int amdgpu_dpm;
+
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
AMDGPU_PP_SENSOR_VDDNB,
@@ -349,6 +351,7 @@ struct amd_powerplay_funcs {
int (*get_fan_control_mode)(void *handle);
int (*set_fan_speed_percent)(void *handle, uint32_t percent);
int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
+ int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
int (*get_pp_table)(void *handle, char **table);
int (*set_pp_table)(void *handle, const char *buf, size_t size);
@@ -359,6 +362,7 @@ struct amd_powerplay_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, int32_t *value);
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
};
struct amd_powerplay {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index d4495839c64c..26129972f686 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -334,6 +334,7 @@ struct phm_clocks {
uint32_t clock[MAX_NUM_CLOCKS];
};
+extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 4f0fedd1e9d3..6cdb7cbf515e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -38,8 +38,6 @@ struct pp_hwmgr;
struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
-extern int amdgpu_powercontainment;
-extern int amdgpu_sclk_deep_sleep_en;
extern unsigned amdgpu_pp_feature_mask;
#define VOLTAGE_SCALE 4
@@ -85,7 +83,9 @@ enum PP_FEATURE_MASK {
PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
PP_VBI_TIME_SUPPORT_MASK = 0x80,
PP_ULV_MASK = 0x100,
- PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+ PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
+ PP_CLOCK_STRETCH_MASK = 0x400,
+ PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800
};
enum PHM_BackEnd_Magic {
@@ -367,7 +367,7 @@ struct pp_table_func {
int (*pptable_get_vce_state_table_entry)(
struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag);
};
@@ -586,18 +586,6 @@ struct phm_microcode_version_info {
uint32_t NB;
};
-#define PP_MAX_VCE_LEVELS 6
-
-enum PP_VCE_LEVEL {
- PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-
enum PP_TABLE_VERSION {
PP_TABLE_V0 = 0,
PP_TABLE_V1,
@@ -620,7 +608,7 @@ struct pp_hwmgr {
void *hardcode_pp_table;
bool need_pp_table_upload;
- struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
uint32_t num_vce_state_tables;
enum amd_dpm_forced_level dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 9ceaed9ac52a..827860fffe78 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -156,15 +156,6 @@ struct pp_power_state {
struct pp_hw_power_state hardware;
};
-
-/*Structure to hold a VCE state entry*/
-struct pp_vce_state {
- uint32_t evclk;
- uint32_t ecclk;
- uint32_t sclk;
- uint32_t mclk;
-};
-
enum PP_MMProfilingState {
PP_MMProfilingState_NA = 0,
PP_MMProfilingState_Started,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3df5de2cdab0..8fe8ba9434ff 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -21,9 +21,6 @@
*
*/
-extern bool acpi_atcs_functions_supported(void *device,
- uint32_t index);
-extern int acpi_pcie_perf_request(void *device,
- uint8_t perf_req,
- bool advertise);
-extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+bool acpi_atcs_functions_supported(void *device, uint32_t index);
+int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
+bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 76310ac7ef0d..6aeb1d20cc3b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -1958,6 +1958,12 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
int res;
uint64_t tmp64;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (smu_data->smu7_data.fan_table_start == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
@@ -2049,7 +2055,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2125,7 +2131,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2150,7 +2156,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
return SMU73_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 02fe1df855a9..26eff56b4a99 100755..100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i, result = -1;
uint32_t reg, data;
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
+static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
{
int result = 0;
uint32_t table_start;
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
int32_t vr_config;
uint32_t table_start;
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_restore_vft_table(struct pp_smumgr *smumgr)
+static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_save_vft_table(struct pp_smumgr *smumgr)
+static int fiji_save_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr)
return -EINVAL;
}
-int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -396,7 +396,8 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!(smu7_is_smc_ram_running(smumgr)
+ || cgs_is_virtualization_enabled(smumgr->device))) {
fiji_avfs_event_mgr(smumgr, false);
/* Check if SMU is running in protected mode */
@@ -443,6 +444,9 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ return 0;
+
if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) {
if (efuse)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 8c889caba420..a24971a33bfd 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2006,6 +2006,12 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
return 0;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (0 == smu7_data->fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
@@ -2140,7 +2146,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2163,7 +2169,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
return SMU71_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 71bb2f8dc157..5190e821200c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -1885,6 +1885,12 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
int res;
uint64_t tmp64;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (smu_data->smu7_data.fan_table_start == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
@@ -2174,7 +2180,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2201,7 +2207,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5c3598ab7dae..f38a68747df0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
}
-int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+static int
+polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 6af744f42ec9..f49b5487b951 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
+ case UCODE_ID_MEC_STORAGE:
+ result = CGS_UCODE_ID_STORAGE;
+ break;
default:
break;
}
@@ -363,12 +366,16 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
&info);
if (!result) {
- entry->version = info.version;
+ entry->version = info.fw_version;
entry->id = (uint16_t)fw_type;
entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
+
+ /* digest need be excluded out */
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
}
@@ -400,8 +407,14 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
0x0);
if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+ if (!cgs_is_virtualization_enabled(smumgr->device)) {
+ smu7_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_SMU_DRAM_ADDR_HI,
+ smu_data->smu_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_SMU_DRAM_ADDR_LO,
+ smu_data->smu_buffer.mc_addr_low);
+ }
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
+ UCODE_ID_SDMA1_MASK
@@ -452,6 +465,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
@@ -532,7 +549,6 @@ int smu7_init(struct pp_smumgr *smumgr)
smu_data = (struct smu7_smumgr *)(smumgr->backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- smu_data->smu_buffer.data_size = 200*4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
@@ -555,6 +571,10 @@ int smu7_init(struct pp_smumgr *smumgr)
(cgs_handle_t)smu_data->header_buffer.handle);
return -EINVAL);
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ return 0;
+
+ smu_data->smu_buffer.data_size = 200*4096;
smu_allocate_memory(smumgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 76352f2423ae..919be435b49c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -28,8 +28,6 @@
#include <pp_endian.h>
#define SMC_RAM_END 0x40000
-#define mmSMC_IND_INDEX_11 0x01AC
-#define mmSMC_IND_DATA_11 0x01AD
struct smu7_buffer_entry {
uint32_t data_size;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index de2a24d85f48..2e1493ce1bb5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -2496,6 +2496,12 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_MicrocodeFanControl))
return 0;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (0 == smu_data->smu7_data.fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
@@ -2651,7 +2657,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x\n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2675,7 +2681,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
case SMU_MAX_LEVELS_MVDD:
return SMU72_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac value %x\n", value);
+ printk(KERN_WARNING "can't get the mac value %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 5f9124046b9b..eff9a232e72e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -140,7 +140,8 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
int result;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!(smu7_is_smc_ram_running(smumgr) ||
+ cgs_is_virtualization_enabled(smumgr->device))) {
/*Check if SMU is running in protected mode*/
if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c6caf3..dbd4fd3a810b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct amd_sched_job *, sched_job)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
TP_PROTO(struct amd_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
),
TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ffe1f85ce300..1bf83ed113b3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,7 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
@@ -138,7 +138,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(2);
+ entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
@@ -218,32 +218,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
kfifo_free(&entity->job_queue);
}
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
amd_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
}
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
- struct fence * fence = entity->dependency;
+ struct dma_fence * fence = entity->dependency;
struct amd_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -254,23 +254,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
- fence = fence_get(&s_fence->scheduled);
- fence_put(entity->dependency);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ if (!dma_fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
- if (!fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+ amd_sched_entity_wakeup))
return true;
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -351,7 +351,8 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
finish_cb);
@@ -385,8 +386,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
- fence_put(s_job->s_fence->parent);
+ if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
}
@@ -407,21 +408,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
- struct fence *fence;
+ struct dma_fence *fence;
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -443,8 +444,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
+ dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -508,7 +509,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
@@ -518,7 +519,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->finished);
+ dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -544,7 +545,7 @@ static int amd_sched_main(void *param)
struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
- struct fence *fence;
+ struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
(!amd_sched_blocked(sched) &&
@@ -566,15 +567,15 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 51068e6c3d9a..d8dc681bcda6 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,7 +25,7 @@
#define _GPU_SCHEDULER_H_
#include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
struct amd_gpu_scheduler;
struct amd_sched_rq;
@@ -47,8 +47,8 @@ struct amd_sched_entity {
atomic_t fence_seq;
uint64_t fence_context;
- struct fence *dependency;
- struct fence_cb cb;
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
};
/**
@@ -63,10 +63,10 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence scheduled;
- struct fence finished;
- struct fence_cb cb;
- struct fence *parent;
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
@@ -76,15 +76,15 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- struct fence_cb finish_cb;
+ struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
{
if (f->ops == &amd_sched_fence_ops_scheduled)
return container_of(f, struct amd_sched_fence, scheduled);
@@ -100,8 +100,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *sched_job);
- struct fence *(*run_job)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
void (*free_job)(struct amd_sched_job *sched_job);
};
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 88fc2d662579..33f54d0a5c4f 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -61,46 +61,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
- fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
+ dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->scheduled);
+ int ret = dma_fence_signal(&fence->scheduled);
if (!ret)
- FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "was already signaled\n");
}
void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->finished);
+ int ret = dma_fence_signal(&fence->finished);
if (!ret)
- FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->finished, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
{
return "amd_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -114,10 +118,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
*/
static void amd_sched_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(fence->parent);
+ dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
@@ -129,7 +133,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
@@ -143,27 +147,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(&fence->scheduled);
+ dma_fence_put(&fence->scheduled);
}
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_scheduled,
};
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_finished,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 28e6471257d0..0b6eaa49a1db 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -65,9 +65,7 @@ static const struct file_operations arcpgu_drm_ops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
index 2bf06d71556a..bca3a678c955 100644
--- a/drivers/gpu/drm/arc/arcpgu_sim.c
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -41,12 +41,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
return count;
}
-static enum drm_connector_status
-arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
@@ -61,7 +55,6 @@ arcpgu_drm_connector_helper_funcs = {
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .detect = arcpgu_drm_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = arcpgu_drm_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 28341b32067f..7d4e5aa77195 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -222,14 +222,12 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
{
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
if (!plane->state->fb)
return;
- drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
dest_w = plane->state->crtc_w;
@@ -237,7 +235,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
scanout_start = gem->paddr + plane->state->fb->offsets[0] +
plane->state->crtc_y * plane->state->fb->pitches[0] +
- plane->state->crtc_x * bpp / 8;
+ plane->state->crtc_x *
+ drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index e138fb51e8ce..e5f4f4a6546d 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -268,9 +268,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -337,14 +335,10 @@ static int hdlcd_drm_bind(struct device *dev)
if (ret)
goto err_free;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_unload;
-
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
- goto err_unregister;
+ goto err_unload;
}
ret = pm_runtime_set_active(dev);
@@ -371,8 +365,17 @@ static int hdlcd_drm_bind(struct device *dev)
goto err_fbdev;
}
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_register;
+
return 0;
+err_register:
+ if (hdlcd->fbdev) {
+ drm_fbdev_cma_fini(hdlcd->fbdev);
+ hdlcd->fbdev = NULL;
+ }
err_fbdev:
drm_kms_helper_poll_fini(drm);
drm_vblank_cleanup(drm);
@@ -380,8 +383,6 @@ err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
component_unbind_all(dev, drm);
-err_unregister:
- drm_dev_unregister(drm);
err_unload:
drm_irq_uninstall(drm);
of_reserved_mem_device_release(drm->dev);
@@ -398,6 +399,7 @@ static void hdlcd_drm_unbind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ drm_dev_unregister(drm);
if (hdlcd->fbdev) {
drm_fbdev_cma_fini(hdlcd->fbdev);
hdlcd->fbdev = NULL;
@@ -411,7 +413,6 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unregister(drm);
drm_dev_unref(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
@@ -453,7 +454,8 @@ static int hdlcd_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
match);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9280358b8f15..32f746e31379 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -42,6 +42,7 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
+ atomic_set(&malidp->config_valid, 0);
hwdev->set_config_valid(hwdev);
/* don't wait for config_valid flag if we are in config mode */
if (hwdev->in_config_mode(hwdev))
@@ -91,8 +92,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_planes(drm, state, 0);
malidp_atomic_commit_hw_done(state);
@@ -155,6 +155,12 @@ static int malidp_init(struct drm_device *drm)
return 0;
}
+static void malidp_fini(struct drm_device *drm)
+{
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+}
+
static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
@@ -197,9 +203,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -355,10 +359,6 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto init_fail;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto register_fail;
-
/* Set the CRTC's port so that the encoder component can find it */
ep = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!ep) {
@@ -377,6 +377,8 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto irq_init_fail;
+ drm->irq_enabled = true;
+
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
@@ -395,23 +397,31 @@ static int malidp_bind(struct device *dev)
}
drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto register_fail;
+
return 0;
+register_fail:
+ if (malidp->fbdev) {
+ drm_fbdev_cma_fini(malidp->fbdev);
+ malidp->fbdev = NULL;
+ }
fbdev_fail:
drm_vblank_cleanup(drm);
vblank_fail:
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
+ drm->irq_enabled = false;
irq_init_fail:
component_unbind_all(dev, drm);
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
port_fail:
- drm_dev_unregister(drm);
-register_fail:
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
init_fail:
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
@@ -432,6 +442,7 @@ static void malidp_unbind(struct device *dev)
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev = malidp->dev;
+ drm_dev_unregister(drm);
if (malidp->fbdev) {
drm_fbdev_cma_fini(malidp->fbdev);
malidp->fbdev = NULL;
@@ -443,9 +454,7 @@ static void malidp_unbind(struct device *dev)
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
- drm_dev_unregister(drm);
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
clk_disable_unprepare(hwdev->mclk);
@@ -493,7 +502,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
+ port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
match);
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 271d2fb9711c..9fc8a2e405e4 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -39,6 +39,9 @@ struct malidp_plane_state {
/* size of the required rotation memory if plane is rotated */
u32 rotmem_size;
+ /* internal format ID */
+ u8 format;
+ u8 n_planes;
};
#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index a6132f1d58c1..4bdf531f7844 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -125,6 +125,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -198,9 +199,6 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
{
- unsigned int depth;
- int bpp;
-
/* RGB888 or BGR888 can't be rotated */
if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
return -EINVAL;
@@ -210,9 +208,7 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
- drm_fb_get_bpp_depth(fmt, &depth, &bpp);
-
- return w * bpp;
+ return w * drm_format_plane_cpp(fmt, 0) * 8;
}
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
@@ -271,6 +267,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -441,6 +438,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp500_de_formats,
.n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp500_query_hw,
.enter_config_mode = malidp500_enter_config_mode,
@@ -473,6 +471,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp550_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
@@ -506,6 +505,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 141743e9f3a6..087e1202db3d 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -88,6 +88,9 @@ struct malidp_hw_regmap {
/* list of supported input formats for each layer */
const struct malidp_input_format *input_formats;
const u8 n_input_formats;
+
+ /* pitch alignment requirement in bytes */
+ const u8 bus_align_bytes;
};
struct malidp_hw_device {
@@ -229,6 +232,12 @@ void malidp_se_irq_fini(struct drm_device *drm);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
+static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
+ unsigned int pitch)
+{
+ return !(pitch & (hwdev->map.bus_align_bytes - 1));
+}
+
/*
* background color components are defined as 12bits values,
* they will be shifted right when stored on hardware that
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 82c193e5e0d6..63eec8f37cfc 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -27,6 +27,10 @@
#define LAYER_H_FLIP (1 << 10)
#define LAYER_V_FLIP (1 << 11)
#define LAYER_ROT_MASK (0xf << 8)
+#define LAYER_COMP_MASK (0x3 << 12)
+#define LAYER_COMP_PIXEL (0x3 << 12)
+#define LAYER_COMP_PLANE (0x2 << 12)
+#define MALIDP_LAYER_COMPOSE 0x008
#define MALIDP_LAYER_SIZE 0x00c
#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
@@ -34,6 +38,14 @@
#define MALIDP_LAYER_OFFSET 0x014
#define MALIDP_LAYER_STRIDE 0x018
+/*
+ * This 4-entry look-up-table is used to determine the full 8-bit alpha value
+ * for formats with 1- or 2-bit alpha channels.
+ * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
+ * opacity for 2-bit formats.
+ */
+#define MALIDP_ALPHA_LUT 0xffaa5500
+
static void malidp_de_plane_destroy(struct drm_plane *plane)
{
struct malidp_plane *mp = to_malidp_plane(plane);
@@ -46,7 +58,8 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
devm_kfree(plane->dev->dev, mp);
}
-struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
+static struct
+drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
{
struct malidp_plane_state *state, *m_state;
@@ -58,13 +71,15 @@ struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
m_state = to_malidp_plane_state(plane->state);
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
state->rotmem_size = m_state->rotmem_size;
+ state->format = m_state->format;
+ state->n_planes = m_state->n_planes;
}
return &state->base;
}
-void malidp_destroy_plane_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void malidp_destroy_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct malidp_plane_state *m_state = to_malidp_plane_state(state);
@@ -75,6 +90,7 @@ void malidp_destroy_plane_state(struct drm_plane *plane,
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
@@ -86,17 +102,29 @@ static int malidp_de_plane_check(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
- u8 format_id;
+ struct drm_framebuffer *fb;
+ int i;
u32 src_w, src_h;
if (!state->crtc || !state->fb)
return 0;
- format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- state->fb->pixel_format);
- if (format_id == MALIDP_INVALID_FORMAT_ID)
+ fb = state->fb;
+
+ ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
+ fb->pixel_format);
+ if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
+ ms->n_planes = drm_format_num_planes(fb->pixel_format);
+ for (i = 0; i < ms->n_planes; i++) {
+ if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) {
+ DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+ fb->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
@@ -135,17 +163,13 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_gem_cma_object *obj;
struct malidp_plane *mp;
const struct malidp_hw_regmap *map;
- u8 format_id;
+ struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u16 ptr;
- u32 format, src_w, src_h, dest_w, dest_h, val = 0;
- int num_planes, i;
+ u32 src_w, src_h, dest_w, dest_h, val;
+ int i;
mp = to_malidp_plane(plane);
-
map = &mp->hwdev->map;
- format = plane->state->fb->pixel_format;
- format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
- num_planes = drm_format_num_planes(format);
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
@@ -158,9 +182,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
dest_h = plane->state->crtc_h;
}
- malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
+ malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
- for (i = 0; i < num_planes; i++) {
+ for (i = 0; i < ms->n_planes; i++) {
/* calculate the offset for the layer's plane registers */
ptr = mp->layer->ptr + (i << 4);
@@ -181,9 +205,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
LAYER_V_VAL(plane->state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
- /* first clear the rotation bits in the register */
- malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ /* first clear the rotation bits */
+ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
+ val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
@@ -193,11 +217,18 @@ static void malidp_de_plane_update(struct drm_plane *plane,
if (plane->state->rotation & DRM_REFLECT_Y)
val |= LAYER_H_FLIP;
+ /*
+ * always enable pixel alpha blending until we have a way to change
+ * blend modes
+ */
+ val &= ~LAYER_COMP_MASK;
+ val |= LAYER_COMP_PIXEL;
+
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
- malidp_hw_setbits(mp->hwdev, val,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
}
static void malidp_de_plane_disable(struct drm_plane *plane,
@@ -222,6 +253,8 @@ int malidp_de_planes_init(struct drm_device *drm)
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+ unsigned long flags = DRM_ROTATE_0 | DRM_ROTATE_90 | DRM_ROTATE_180 |
+ DRM_ROTATE_270 | DRM_REFLECT_X | DRM_REFLECT_Y;
u32 *formats;
int ret, i, j, n;
@@ -254,26 +287,18 @@ int malidp_de_planes_init(struct drm_device *drm)
if (ret < 0)
goto cleanup;
- if (!drm->mode_config.rotation_property) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270 |
- DRM_REFLECT_X |
- DRM_REFLECT_Y;
- drm->mode_config.rotation_property =
- drm_mode_create_rotation_property(drm, flags);
- }
- /* SMART layer can't be rotated */
- if (drm->mode_config.rotation_property && (id != DE_SMART))
- drm_object_attach_property(&plane->base.base,
- drm->mode_config.rotation_property,
- DRM_ROTATE_0);
-
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
+
+ /* Skip the features which the SMART layer doesn't have */
+ if (id == DE_SMART)
+ continue;
+
+ drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
+ malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
+ plane->layer->base + MALIDP_LAYER_COMPOSE);
}
kfree(formats);
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ffd673615772..a18f156c8b66 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,5 +1,5 @@
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o
+ armada_gem.o armada_overlay.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index a51f8cbcfe26..95cb3966b2ca 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -18,6 +18,7 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_trace.h"
struct armada_frame_work {
struct armada_plane_work work;
@@ -164,19 +165,37 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
}
}
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ u32 pixel_format = fb->pixel_format;
+ int num_planes = drm_format_num_planes(pixel_format);
+ int i;
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ for (i = 0; i < num_planes; i++)
+ addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * drm_format_plane_cpp(pixel_format, i);
+ for (; i < 3; i++)
+ addrs[i] = 0;
+}
+
static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
int x, int y, struct armada_regs *regs, bool interlaced)
{
- struct armada_gem_object *obj = drm_fb_obj(fb);
unsigned pitch = fb->pitches[0];
- unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
- uint32_t addr_odd, addr_even;
+ u32 addrs[3], addr_odd, addr_even;
unsigned i = 0;
DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
pitch, x, y, fb->bits_per_pixel);
- addr_odd = addr_even = obj->dev_addr + offset;
+ armada_drm_plane_calc_addrs(addrs, fb, x, y);
+
+ addr_odd = addr_even = addrs[0];
if (interlaced) {
addr_even += pitch;
@@ -192,17 +211,18 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
}
static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct armada_plane *plane)
+ struct drm_plane *plane)
{
- struct armada_plane_work *work = xchg(&plane->work, NULL);
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_plane_work *work = xchg(&dplane->work, NULL);
/* Handle any pending frame work. */
if (work) {
- work->fn(dcrtc, plane, work);
+ work->fn(dcrtc, dplane, work);
drm_crtc_vblank_put(&dcrtc->crtc);
}
- wake_up(&plane->frame_wait);
+ wake_up(&dplane->frame_wait);
}
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
@@ -307,14 +327,12 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
{
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
-
/*
* Tell the DRM core that vblank IRQs aren't going to happen for
* a while. This cleans up any pending vblank events for us.
*/
drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, plane);
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
@@ -416,10 +434,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
- if (ovl_plane) {
- struct armada_plane *plane = drm_to_armada_plane(ovl_plane);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (ovl_plane)
+ armada_drm_plane_work_run(dcrtc, ovl_plane);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -449,10 +465,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ) {
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (stat & GRA_FRAME_IRQ)
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -466,6 +480,8 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ trace_armada_drm_irq(&dcrtc->crtc, stat);
+
/* Mask out those interrupts we haven't enabled */
v = stat & dcrtc->irq_ena;
@@ -531,6 +547,35 @@ static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
return val;
}
+static void armada_drm_primary_set(struct drm_crtc *crtc,
+ struct drm_plane *plane, int x, int y)
+{
+ struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[8];
+ bool interlaced = dcrtc->interlaced;
+ unsigned i;
+ u32 ctrl0;
+
+ i = armada_drm_crtc_calc_fb(plane->fb, x, y, regs, interlaced);
+
+ armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
+
+ ctrl0 = state->ctrl0;
+ if (interlaced)
+ ctrl0 |= CFG_GRA_FTOGGLE;
+
+ armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_end(regs, i);
+ armada_drm_crtc_update_regs(dcrtc, regs);
+}
+
/* The mode_config.mutex will be held for this call */
static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *adj,
@@ -547,9 +592,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
- i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb,
- x, y, regs, interlaced);
+ val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
+
+ if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+
+ drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
+ drm_to_armada_plane(crtc->primary)->state.src_hw =
+ drm_to_armada_plane(crtc->primary)->state.dst_hw =
+ adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+ drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
lm = adj->crtc_htotal - adj->crtc_hsync_end;
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
@@ -625,8 +681,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
@@ -638,22 +692,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
}
- val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- if (interlaced)
- val |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
- LCD_SPU_DMA_CTRL0);
-
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
@@ -662,6 +700,8 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
+
+ armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
armada_drm_crtc_update(dcrtc);
@@ -1038,7 +1078,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
* interrupt, so complete it now.
*/
if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, drm_to_armada_plane(dcrtc->crtc.primary));
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
return 0;
}
@@ -1172,7 +1212,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
- writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 04fdd22d483b..b08043e8cc3b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -41,10 +41,18 @@ struct armada_plane_work {
struct armada_plane_work *);
};
+struct armada_plane_state {
+ u32 src_hw;
+ u32 dst_hw;
+ u32 dst_yx;
+ u32 ctrl0;
+};
+
struct armada_plane {
struct drm_plane base;
wait_queue_head_t frame_wait;
struct armada_plane_work *work;
+ struct armada_plane_state state;
};
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
@@ -54,6 +62,8 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
struct armada_plane_work *armada_drm_plane_work_cancel(
struct armada_crtc *dcrtc, struct armada_plane *plane);
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y);
struct armada_crtc {
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index d4f7ab0a30d4..90222e60d2d6 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -113,7 +113,7 @@ static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
struct drm_info_node *node;
node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (node == NULL) {
+ if (!node) {
debugfs_remove(ent);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index 3b2bb6128d40..77952d559a3c 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -53,6 +53,7 @@ struct armada_variant {
extern const struct armada_variant armada510_ops;
struct armada_private {
+ struct drm_device drm;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 1e0e68f608e4..07086b427c22 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -49,106 +49,6 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static int armada_drm_load(struct drm_device *dev, unsigned long flags)
-{
- struct armada_private *priv;
- struct resource *mem = NULL;
- int ret, n;
-
- for (n = 0; ; n++) {
- struct resource *r = platform_get_resource(dev->platformdev,
- IORESOURCE_MEM, n);
- if (!r)
- break;
-
- /* Resources above 64K are graphics memory */
- if (resource_size(r) > SZ_64K)
- mem = r;
- else
- return -EINVAL;
- }
-
- if (!mem)
- return -ENXIO;
-
- if (!devm_request_mem_region(dev->dev, mem->start,
- resource_size(mem), "armada-drm"))
- return -EBUSY;
-
- priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- DRM_ERROR("failed to allocate private\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(dev->platformdev, dev);
- dev->dev_private = priv;
-
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
- /* Mode setting support */
- drm_mode_config_init(dev);
- dev->mode_config.min_width = 320;
- dev->mode_config.min_height = 200;
-
- /*
- * With vscale enabled, the maximum width is 1920 due to the
- * 1920 by 3 lines RAM
- */
- dev->mode_config.max_width = 1920;
- dev->mode_config.max_height = 2048;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.funcs = &armada_drm_mode_config_funcs;
- drm_mm_init(&priv->linear, mem->start, resource_size(mem));
- mutex_init(&priv->linear_lock);
-
- ret = component_bind_all(dev->dev, dev);
- if (ret)
- goto err_kms;
-
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- goto err_comp;
-
- dev->irq_enabled = true;
-
- ret = armada_fbdev_init(dev);
- if (ret)
- goto err_comp;
-
- drm_kms_helper_poll_init(dev);
-
- return 0;
-
- err_comp:
- component_unbind_all(dev->dev, dev);
- err_kms:
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
-
- return ret;
-}
-
-static int armada_drm_unload(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- drm_kms_helper_poll_fini(dev);
- armada_fbdev_fini(dev);
-
- component_unbind_all(dev->dev, dev);
-
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- dev->dev_private = NULL;
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -186,16 +86,10 @@ static const struct file_operations armada_drm_fops = {
};
static struct drm_driver armada_drm_driver = {
- .load = armada_drm_load,
.lastclose = armada_drm_lastclose,
- .unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = armada_drm_debugfs_init,
- .debugfs_cleanup = armada_drm_debugfs_cleanup,
-#endif
.gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -218,12 +112,138 @@ static struct drm_driver armada_drm_driver = {
static int armada_drm_bind(struct device *dev)
{
- return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+ struct armada_private *priv;
+ struct resource *mem = NULL;
+ int ret, n;
+
+ for (n = 0; ; n++) {
+ struct resource *r = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM, n);
+ if (!r)
+ break;
+
+ /* Resources above 64K are graphics memory */
+ if (resource_size(r) > SZ_64K)
+ mem = r;
+ else
+ return -EINVAL;
+ }
+
+ if (!mem)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(dev, mem->start, resource_size(mem),
+ "armada-drm"))
+ return -EBUSY;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The drm_device structure must be at the start of
+ * armada_private for drm_dev_unref() to work correctly.
+ */
+ BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
+
+ ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev);
+ if (ret) {
+ dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n",
+ __func__, ret);
+ kfree(priv);
+ return ret;
+ }
+
+ priv->drm.platformdev = to_platform_device(dev);
+ priv->drm.dev_private = priv;
+
+ platform_set_drvdata(priv->drm.platformdev, &priv->drm);
+
+ INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+ INIT_KFIFO(priv->fb_unref);
+
+ /* Mode setting support */
+ drm_mode_config_init(&priv->drm);
+ priv->drm.mode_config.min_width = 320;
+ priv->drm.mode_config.min_height = 200;
+
+ /*
+ * With vscale enabled, the maximum width is 1920 due to the
+ * 1920 by 3 lines RAM
+ */
+ priv->drm.mode_config.max_width = 1920;
+ priv->drm.mode_config.max_height = 2048;
+
+ priv->drm.mode_config.preferred_depth = 24;
+ priv->drm.mode_config.funcs = &armada_drm_mode_config_funcs;
+ drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+ mutex_init(&priv->linear_lock);
+
+ ret = component_bind_all(dev, &priv->drm);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_vblank_init(&priv->drm, priv->drm.mode_config.num_crtc);
+ if (ret)
+ goto err_comp;
+
+ priv->drm.irq_enabled = true;
+
+ ret = armada_fbdev_init(&priv->drm);
+ if (ret)
+ goto err_comp;
+
+ drm_kms_helper_poll_init(&priv->drm);
+
+ ret = drm_dev_register(&priv->drm, 0);
+ if (ret)
+ goto err_poll;
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_init(priv->drm.primary);
+#endif
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ armada_drm_driver.name, armada_drm_driver.major,
+ armada_drm_driver.minor, armada_drm_driver.patchlevel,
+ armada_drm_driver.date, dev_name(dev),
+ priv->drm.primary->index);
+
+ return 0;
+
+ err_poll:
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+ err_comp:
+ component_unbind_all(dev, &priv->drm);
+ err_kms:
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+ drm_dev_unref(&priv->drm);
+ return ret;
}
static void armada_drm_unbind(struct device *dev)
{
- drm_put_dev(dev_get_drvdata(dev));
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct armada_private *priv = drm->dev_private;
+
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_cleanup(priv->drm.primary);
+#endif
+ drm_dev_unregister(&priv->drm);
+
+ component_unbind_all(dev, &priv->drm);
+
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+
+ drm_dev_unref(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
@@ -254,7 +274,7 @@ static void armada_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ca73ad8614fe..c5dc06a55883 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -19,16 +19,10 @@
static /*const*/ struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int armada_fb_create(struct drm_fb_helper *fbh,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 806791897304..a293c8be232c 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -17,12 +17,11 @@
static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
- unsigned long addr = (unsigned long)vmf->virtual_address;
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
int ret;
- pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vma, addr, pfn);
+ pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
switch (ret) {
case 0:
@@ -212,7 +211,7 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
return obj;
}
-struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct armada_gem_object *obj;
@@ -419,7 +418,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/* Prime support */
-struct sg_table *
+static struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
@@ -594,11 +593,7 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
int ret;
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
- DMA_TO_DEVICE);
- if (!dobj->sgt) {
- DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
- return -EINVAL;
- }
+ DMA_TO_DEVICE);
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 152b4e716269..6743615232f5 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -15,6 +15,7 @@
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_trace.h"
struct armada_ovl_plane_properties {
uint32_t colorkey_yr;
@@ -32,10 +33,6 @@ struct armada_ovl_plane_properties {
struct armada_ovl_plane {
struct armada_plane base;
struct drm_framebuffer *old_fb;
- uint32_t src_hw;
- uint32_t dst_hw;
- uint32_t dst_yx;
- uint32_t ctrl0;
struct {
struct armada_plane_work work;
struct armada_regs regs[13];
@@ -87,6 +84,8 @@ static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
{
struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
+ trace_armada_ovl_plane_work(&dcrtc->crtc, &plane->base);
+
armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
armada_ovl_retire_fb(dplane, NULL);
}
@@ -120,6 +119,10 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
bool visible;
int ret;
+ trace_armada_ovl_plane_update(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
DRM_ROTATE_0,
0, INT_MAX, true, false, &visible);
@@ -141,22 +144,22 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+ if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
- dplane->src_hw = val;
+ dplane->base.state.src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- dplane->dst_hw = val;
+ dplane->base.state.dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
val = dest.y1 << 16 | dest.x1;
- dplane->dst_yx = val;
+ dplane->base.state.dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
return 0;
- } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ } else if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
dcrtc->base + LCD_SPU_SRAM_PARA1);
@@ -166,9 +169,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
if (plane->fb != fb) {
- struct armada_gem_object *obj = drm_fb_obj(fb);
- uint32_t addr[3], pixel_format;
- int i, num_planes, hsub;
+ u32 addrs[3], pixel_format;
+ int num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
@@ -182,6 +184,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
src_y = src.y1 >> 16;
src_x = src.x1 >> 16;
+ armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
+
pixel_format = fb->pixel_format;
hsub = drm_format_horz_chroma_subsampling(pixel_format);
num_planes = drm_format_num_planes(pixel_format);
@@ -194,24 +198,17 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (src_x & (hsub - 1) && num_planes == 1)
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
- for (i = 0; i < num_planes; i++)
- addr[i] = obj->dev_addr + fb->offsets[i] +
- src_y * fb->pitches[i] +
- src_x * drm_format_plane_cpp(pixel_format, i);
- for (; i < ARRAY_SIZE(addr); i++)
- addr[i] = 0;
-
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -223,28 +220,28 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
}
val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
- if (dplane->src_hw != val) {
- dplane->src_hw = val;
+ if (dplane->base.state.src_hw != val) {
+ dplane->base.state.src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- if (dplane->dst_hw != val) {
- dplane->dst_hw = val;
+ if (dplane->base.state.dst_hw != val) {
+ dplane->base.state.dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
val = dest.y1 << 16 | dest.x1;
- if (dplane->dst_yx != val) {
- dplane->dst_yx = val;
+ if (dplane->base.state.dst_yx != val) {
+ dplane->base.state.dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
- if (dplane->ctrl0 != ctrl0) {
- dplane->ctrl0 = ctrl0;
+ if (dplane->base.state.ctrl0 != ctrl0) {
+ dplane->base.state.ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
@@ -275,7 +272,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane)
armada_drm_crtc_plane_disable(dcrtc, plane);
dcrtc->plane = NULL;
- dplane->ctrl0 = 0;
+ dplane->base.state.ctrl0 = 0;
fb = xchg(&dplane->old_fb, NULL);
if (fb)
diff --git a/drivers/gpu/drm/armada/armada_trace.c b/drivers/gpu/drm/armada/armada_trace.c
new file mode 100644
index 000000000000..068b336ba75f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.c
@@ -0,0 +1,4 @@
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "armada_trace.h"
+#endif
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
new file mode 100644
index 000000000000..dc0cba70fd1a
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -0,0 +1,66 @@
+#if !defined(ARMADA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ARMADA_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM armada
+#define TRACE_INCLUDE_FILE armada_trace
+
+TRACE_EVENT(armada_drm_irq,
+ TP_PROTO(struct drm_crtc *crtc, u32 stat),
+ TP_ARGS(crtc, stat),
+ TP_STRUCT__entry(
+ __field(struct drm_crtc *, crtc)
+ __field(u32, stat)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->stat = stat;
+ ),
+ TP_printk("crtc %p stat 0x%08x",
+ __entry->crtc, __entry->stat)
+);
+
+TRACE_EVENT(armada_ovl_plane_update,
+ TP_PROTO(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h),
+ TP_ARGS(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ __field(struct drm_framebuffer *, fb)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ __entry->fb = fb;
+ ),
+ TP_printk("plane %p crtc %p fb %p",
+ __entry->plane, __entry->crtc, __entry->fb)
+);
+
+TRACE_EVENT(armada_ovl_plane_work,
+ TP_PROTO(struct drm_crtc *crtc, struct drm_plane *plane),
+ TP_ARGS(crtc, plane),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("plane %p crtc %p",
+ __entry->plane, __entry->crtc)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index f54afd2113a9..fd7c9eec92e4 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -188,9 +188,7 @@ static const struct file_operations ast_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = ast_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 7a86e24e2687..d6f5ec64c667 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -253,7 +253,7 @@ static int astfb_create(struct drm_fb_helper *helper,
err_release_fbi:
drm_fb_helper_release_fbi(helper);
err_free_vram:
- vfree(afbdev->sysram);
+ vfree(sysram);
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 904beaa932d0..f75c6421db62 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
ast_write32(ast, 0x10000, 0xfc600309);
do {
- ;
+ if (pci_channel_offline(dev->pdev))
+ return -EIO;
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
@@ -428,7 +429,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
ast_detect_chip(dev, &need_post);
if (ast->chip != AST1180) {
- ast_get_dram_info(dev);
+ ret = ast_get_dram_info(dev);
+ if (ret)
+ goto out_free;
ast->vram_size = ast_get_vram_info(dev);
DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 5957c3e659fe..e26c98f51eb4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -839,12 +839,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-static enum drm_connector_status
-ast_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
.mode_valid = ast_mode_valid,
.get_modes = ast_get_modes,
@@ -853,7 +847,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = ast_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
};
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 0743e65cb240..2a1368fac1d1 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_populate = ast_ttm_tt_populate,
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = ast_bo_evict_flags,
.move = NULL,
.verify_access = ast_bo_verify_access,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 5f484310bee9..cbd0070265c9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -464,7 +464,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&dc->commit.wait.lock);
@@ -521,6 +521,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
else
@@ -748,9 +749,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 9d4c030672f0..246ed1e33d8a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
- (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
+ drm_rotation_90_or_270(state->base.rotation))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(state->base.rotation)) {
tmp = state->crtc_w;
state->crtc_w = state->crtc_h;
state->crtc_h = tmp;
@@ -883,9 +883,9 @@ static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
return 0;
}
-static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
- const struct atmel_hlcdc_layer_desc *desc,
- struct atmel_hlcdc_plane_properties *props)
+static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
+ const struct atmel_hlcdc_layer_desc *desc,
+ struct atmel_hlcdc_plane_properties *props)
{
struct regmap *regmap = plane->layer.hlcdc->regmap;
@@ -902,10 +902,18 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_GA_MASK);
}
- if (desc->layout.xstride && desc->layout.pstride)
- drm_object_attach_property(&plane->base.base,
- plane->base.dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ if (desc->layout.xstride && desc->layout.pstride) {
+ int ret;
+
+ ret = drm_plane_create_rotation_property(&plane->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_90 |
+ DRM_ROTATE_180 |
+ DRM_ROTATE_270);
+ if (ret)
+ return ret;
+ }
if (desc->layout.csc) {
/*
@@ -925,6 +933,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
0x40040890);
}
+
+ return 0;
}
static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
@@ -1036,7 +1046,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
&atmel_hlcdc_layer_plane_helper_funcs);
/* Set default property values*/
- atmel_hlcdc_plane_init_properties(plane, desc, props);
+ ret = atmel_hlcdc_plane_init_properties(plane, desc, props);
+ if (ret)
+ return ERR_PTR(ret);
return plane;
}
@@ -1054,15 +1066,6 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
if (!props->alpha)
return ERR_PTR(-ENOMEM);
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270);
- if (!dev->mode_config.rotation_property)
- return ERR_PTR(-ENOMEM);
-
return props;
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 534227df23f3..15a293e65b31 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -70,9 +70,7 @@ static const struct file_operations bochs_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index e1ec498a6b6e..da790a1c302a 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -22,14 +22,10 @@ static int bochsfb_mmap(struct fb_info *info,
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = bochsfb_mmap,
};
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 0b4e5d117043..d5e63eff357b 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -216,12 +216,6 @@ bochs_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
-static enum drm_connector_status bochs_connector_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
.mode_valid = bochs_connector_mode_valid,
@@ -230,7 +224,6 @@ static const struct drm_connector_helper_funcs bochs_connector_connector_helper_
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = bochs_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
};
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 269cfca9ca06..099a3c688c26 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_populate = ttm_pool_populate,
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bochs_bo_evict_flags,
.move = NULL,
.verify_access = bochs_bo_verify_access,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 10e12e74fc9f..eb8688ec6f18 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -39,6 +39,15 @@ config DRM_DW_HDMI_AHB_AUDIO
Designware HDMI block. This is used in conjunction with
the i.MX6 HDMI driver.
+config DRM_DW_HDMI_I2S_AUDIO
+ tristate "Synopsis Designware I2S Audio interface"
+ depends on SND_SOC
+ depends on DRM_DW_HDMI
+ select SND_SOC_HDMI_CODEC
+ help
+ Support the I2S Audio interface which is part of the Synopsis
+ Designware HDMI block.
+
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
@@ -57,6 +66,13 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_SIL_SII8620
+ tristate "Silicon Image SII8620 HDMI/MHL bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Silicon Image SII8620 HDMI/MHL bridge chip driver.
+
config DRM_SII902X
tristate "Silicon Image sii902x RGB/HDMI bridge"
depends on OF
@@ -74,6 +90,13 @@ config DRM_TOSHIBA_TC358767
---help---
Toshiba TC358767 eDP bridge chip driver.
+config DRM_TI_TFP410
+ tristate "TI TFP410 DVI/HDMI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ ---help---
+ Texas Instruments TFP410 DVI/HDMI Transmitter driver
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cdf3a3cf765d..2e83a7855399 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -4,9 +4,12 @@ obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
+obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index d2b0499ab7d7..2fed567f9943 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -6,6 +6,14 @@ config DRM_I2C_ADV7511
help
Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+config DRM_I2C_ADV7511_AUDIO
+ bool "ADV7511 HDMI Audio driver"
+ depends on DRM_I2C_ADV7511 && SND_SOC
+ select SND_SOC_HDMI_CODEC
+ help
+ Support the ADV7511 HDMI Audio interface. This is used in
+ conjunction with the AV7511 HDMI driver.
+
config DRM_I2C_ADV7533
bool "ADV7533 encoder"
depends on DRM_I2C_ADV7511
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index 9019327fff4c..5ba675534f6e 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,3 +1,4 @@
adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 161c923d6162..992d76ce02bb 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -309,6 +309,8 @@ struct adv7511 {
struct drm_display_mode curr_mode;
unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
unsigned int current_edid_segment;
uint8_t edid_buf[256];
@@ -334,6 +336,7 @@ struct adv7511 {
bool use_timing_gen;
enum adv7511_type type;
+ struct platform_device *audio_pdev;
};
#ifdef CONFIG_DRM_I2C_ADV7533
@@ -389,4 +392,17 @@ static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
}
#endif
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
+void adv7511_audio_exit(struct adv7511 *adv7511);
+#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
+static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ return 0;
+}
+static inline void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+}
+#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
+
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
new file mode 100644
index 000000000000..cf92ebfe6ab7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -0,0 +1,213 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Copyright (c) 2016, Linaro Limited
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <sound/core.h>
+#include <sound/hdmi-codec.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include "adv7511.h"
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+ unsigned int rate;
+ unsigned int len;
+
+ switch (hparms->sample_rate) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (hparms->sample_width) {
+ case 16:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case 18:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case 20:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case 24:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt->fmt) {
+ case HDMI_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case HDMI_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case HDMI_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ invert_clock = fmt->bit_clk_inv;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ adv7511->f_audio = hparms->sample_rate;
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int audio_startup(struct device *dev, void *data)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+
+ /* hide Audio infoframe updates */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+ /* enable N/CTS, enable Audio sample packets */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(5), BIT(5));
+ /* enable N/CTS */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(6), BIT(6));
+ /* not copyrighted */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
+ BIT(5), BIT(5));
+ /* enable audio infoframes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(3), BIT(3));
+ /* AV mute disable */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
+ BIT(7) | BIT(6), BIT(7));
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
+ BIT(5), 0);
+ return 0;
+}
+
+static void audio_shutdown(struct device *dev, void *data)
+{
+}
+
+static const struct hdmi_codec_ops adv7511_codec_ops = {
+ .hw_params = adv7511_hdmi_hw_params,
+ .audio_shutdown = audio_shutdown,
+ .audio_startup = audio_startup,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &adv7511_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+};
+
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ adv7511->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
+}
+
+void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+ if (adv7511->audio_pdev) {
+ platform_device_unregister(adv7511->audio_pdev);
+ adv7511->audio_pdev = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8ed3906dd411..8dba729f6ef9 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1037,6 +1037,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
}
+ adv7511_audio_init(dev, adv7511);
+
return 0;
err_unregister_cec:
@@ -1058,6 +1060,8 @@ static int adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
+ adv7511_audio_exit(adv7511);
+
i2c_unregister_device(adv7511->i2c_edid);
kfree(adv7511->edid);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index d7f7b7ce8ebe..8b210373cfa2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -29,6 +29,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x17, 0xd0 },
{ 0x24, 0x20 },
{ 0x57, 0x11 },
+ { 0x05, 0xc8 },
};
static const struct regmap_config adv7533_cec_regmap_config = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6e0447f329a2..eb9bf8786c24 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -112,7 +112,7 @@ int analogix_dp_enable_psr(struct device *dev)
struct edp_vsc_psr psr_vsc;
if (!dp->psr_support)
- return -EINVAL;
+ return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
@@ -135,7 +135,7 @@ int analogix_dp_disable_psr(struct device *dev)
struct edp_vsc_psr psr_vsc;
if (!dp->psr_support)
- return -EINVAL;
+ return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index cd37ac058675..303083ad28e3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1162,5 +1162,5 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
- return num_transferred;
+ return num_transferred > 0 ? num_transferred : -EBUSY;
}
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index afec232185a7..e5706981c934 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@@ -23,6 +24,7 @@ struct dumb_vga {
struct drm_connector connector;
struct i2c_adapter *ddc;
+ struct regulator *vdd;
};
static inline struct dumb_vga *
@@ -124,8 +126,30 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
return 0;
}
+static void dumb_vga_enable(struct drm_bridge *bridge)
+{
+ struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+ int ret = 0;
+
+ if (vga->vdd)
+ ret = regulator_enable(vga->vdd);
+
+ if (ret)
+ DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+}
+
+static void dumb_vga_disable(struct drm_bridge *bridge)
+{
+ struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+
+ if (vga->vdd)
+ regulator_disable(vga->vdd);
+}
+
static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
.attach = dumb_vga_attach,
+ .enable = dumb_vga_enable,
+ .disable = dumb_vga_disable,
};
static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
@@ -169,6 +193,15 @@ static int dumb_vga_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, vga);
+ vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(vga->vdd)) {
+ ret = PTR_ERR(vga->vdd);
+ if (ret == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ vga->vdd = NULL;
+ dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+ }
+
vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
if (IS_ERR(vga->ddc)) {
if (PTR_ERR(vga->ddc) == -ENODEV) {
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
index 91f631beecc7..fd1f745c6073 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
@@ -11,4 +11,11 @@ struct dw_hdmi_audio_data {
u8 *eld;
};
+struct dw_hdmi_i2s_audio_data {
+ struct dw_hdmi *hdmi;
+
+ void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
+ u8 (*read)(struct dw_hdmi *hdmi, int offset);
+};
+
#endif
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c
new file mode 100644
index 000000000000..aaf287d2e91d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c
@@ -0,0 +1,141 @@
+/*
+ * dw-hdmi-i2s-audio.c
+ *
+ * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/bridge/dw_hdmi.h>
+
+#include <sound/hdmi-codec.h>
+
+#include "dw-hdmi.h"
+#include "dw-hdmi-audio.h"
+
+#define DRIVER_NAME "dw-hdmi-i2s-audio"
+
+static inline void hdmi_write(struct dw_hdmi_i2s_audio_data *audio,
+ u8 val, int offset)
+{
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ audio->write(hdmi, val, offset);
+}
+
+static inline u8 hdmi_read(struct dw_hdmi_i2s_audio_data *audio, int offset)
+{
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return audio->read(hdmi, offset);
+}
+
+static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+ u8 conf0 = 0;
+ u8 conf1 = 0;
+ u8 inputclkfs = 0;
+
+ /* it cares I2S only */
+ if ((fmt->fmt != HDMI_I2S) ||
+ (fmt->bit_clk_master | fmt->frame_clk_master)) {
+ dev_err(dev, "unsupported format/settings\n");
+ return -EINVAL;
+ }
+
+ inputclkfs = HDMI_AUD_INPUTCLKFS_64FS;
+ conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE;
+
+ switch (hparms->sample_width) {
+ case 16:
+ conf1 = HDMI_AUD_CONF1_WIDTH_16;
+ break;
+ case 24:
+ case 32:
+ conf1 = HDMI_AUD_CONF1_WIDTH_24;
+ break;
+ }
+
+ dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+
+ hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS);
+ hdmi_write(audio, conf0, HDMI_AUD_CONF0);
+ hdmi_write(audio, conf1, HDMI_AUD_CONF1);
+
+ dw_hdmi_audio_enable(hdmi);
+
+ return 0;
+}
+
+static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ dw_hdmi_audio_disable(hdmi);
+
+ hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
+}
+
+static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
+ .hw_params = dw_hdmi_i2s_hw_params,
+ .audio_shutdown = dw_hdmi_i2s_audio_shutdown,
+};
+
+static int snd_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = pdev->dev.platform_data;
+ struct platform_device_info pdevinfo;
+ struct hdmi_codec_pdata pdata;
+ struct platform_device *platform;
+
+ pdata.ops = &dw_hdmi_i2s_ops;
+ pdata.i2s = 1;
+ pdata.max_i2s_channels = 6;
+ pdata.data = audio;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.parent = pdev->dev.parent;
+ pdevinfo.id = PLATFORM_DEVID_AUTO;
+ pdevinfo.name = HDMI_CODEC_DRV_NAME;
+ pdevinfo.data = &pdata;
+ pdevinfo.size_data = sizeof(pdata);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+
+ platform = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(platform))
+ return PTR_ERR(platform);
+
+ dev_set_drvdata(&pdev->dev, platform);
+
+ return 0;
+}
+
+static int snd_dw_hdmi_remove(struct platform_device *pdev)
+{
+ struct platform_device *platform = dev_get_drvdata(&pdev->dev);
+
+ platform_device_unregister(platform);
+
+ return 0;
+}
+
+static struct platform_driver snd_dw_hdmi_driver = {
+ .probe = snd_dw_hdmi_probe,
+ .remove = snd_dw_hdmi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(snd_dw_hdmi_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("Synopsis Designware HDMI I2S ALSA SoC interface");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index ab7023e5dfde..235ce7d1583d 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1,14 +1,15 @@
/*
+ * DesignWare High-Definition Multimedia Interface (HDMI) driver
+ *
+ * Copyright (C) 2013-2015 Mentor Graphics Inc.
* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Designware High-Definition Multimedia Interface (HDMI) driver
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
#include <linux/module.h>
#include <linux/irq.h>
@@ -101,6 +102,17 @@ struct hdmi_data_info {
struct hdmi_vmode video_mode;
};
+struct dw_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ struct mutex lock; /* used to serialize data transfers */
+ struct completion cmp;
+ u8 stat;
+
+ u8 slave_reg;
+ bool is_regaddr;
+};
+
struct dw_hdmi {
struct drm_connector connector;
struct drm_encoder *encoder;
@@ -111,6 +123,7 @@ struct dw_hdmi {
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
const struct dw_hdmi_plat_data *plat_data;
@@ -198,6 +211,201 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
hdmi_modb(hdmi, data << shift, mask, reg);
}
+static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
+{
+ /* Software reset */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ);
+
+ /* Set Standard Mode speed (determined to be 100KHz on iMX6) */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_DIV);
+
+ /* Set done, not acknowledged and arbitration interrupt polarities */
+ hdmi_writeb(hdmi, HDMI_I2CM_INT_DONE_POL, HDMI_I2CM_INT);
+ hdmi_writeb(hdmi, HDMI_I2CM_CTLINT_NAC_POL | HDMI_I2CM_CTLINT_ARB_POL,
+ HDMI_I2CM_CTLINT);
+
+ /* Clear DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_I2CM_STAT0);
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+}
+
+static int dw_hdmi_i2c_read(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ dev_dbg(hdmi->dev, "set read register address to 0\n");
+ i2c->slave_reg = 0x00;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_READ,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+
+ *buf++ = hdmi_readb(hdmi, HDMI_I2CM_DATAI);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_write(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ /* Use the first write byte as register address */
+ i2c->slave_reg = buf[0];
+ length--;
+ buf++;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, *buf++, HDMI_I2CM_DATAO);
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_WRITE,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct dw_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ u8 addr = msgs[0].addr;
+ int i, ret = 0;
+
+ dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].addr != addr) {
+ dev_warn(hdmi->dev,
+ "unsupported transfer, changed slave address\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (msgs[i].len == 0) {
+ dev_dbg(hdmi->dev,
+ "unsupported transfer %d/%d, no data\n",
+ i + 1, num);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&i2c->lock);
+
+ /* Unmute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, 0x00, HDMI_IH_MUTE_I2CM_STAT0);
+
+ /* Set slave device address taken from the first I2C message */
+ hdmi_writeb(hdmi, addr, HDMI_I2CM_SLAVE);
+
+ /* Set slave device register address on transfer */
+ i2c->is_regaddr = false;
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = dw_hdmi_i2c_read(hdmi, msgs[i].buf, msgs[i].len);
+ else
+ ret = dw_hdmi_i2c_write(hdmi, msgs[i].buf, msgs[i].len);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+
+ mutex_unlock(&i2c->lock);
+
+ return ret;
+}
+
+static u32 dw_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm dw_hdmi_algorithm = {
+ .master_xfer = dw_hdmi_i2c_xfer,
+ .functionality = dw_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct dw_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->lock);
+ init_completion(&i2c->cmp);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &dw_hdmi_algorithm;
+ strlcpy(adap->name, "DesignWare HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
unsigned int n)
{
@@ -1512,16 +1720,40 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.mode_set = dw_hdmi_bridge_mode_set,
};
+static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ unsigned int stat;
+
+ stat = hdmi_readb(hdmi, HDMI_IH_I2CM_STAT0);
+ if (!stat)
+ return IRQ_NONE;
+
+ hdmi_writeb(hdmi, stat, HDMI_IH_I2CM_STAT0);
+
+ i2c->stat = stat;
+
+ complete(&i2c->cmp);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
{
struct dw_hdmi *hdmi = dev_id;
u8 intr_stat;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (hdmi->i2c)
+ ret = dw_hdmi_i2c_irq(hdmi);
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
- if (intr_stat)
+ if (intr_stat) {
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+ return IRQ_WAKE_THREAD;
+ }
- return intr_stat ? IRQ_WAKE_THREAD : IRQ_NONE;
+ return ret;
}
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -1639,10 +1871,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
struct device_node *np = dev->of_node;
struct platform_device_info pdevinfo;
struct device_node *ddc_node;
- struct dw_hdmi_audio_data audio;
struct dw_hdmi *hdmi;
int ret;
u32 val = 1;
+ u8 config0;
+ u8 config1;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
@@ -1681,7 +1914,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
- hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (!hdmi->ddc) {
dev_dbg(hdmi->dev, "failed to read ddc node\n");
@@ -1693,20 +1926,22 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
}
hdmi->regs = devm_ioremap_resource(dev, iores);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
+ goto err_res;
+ }
hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
if (IS_ERR(hdmi->isfr_clk)) {
ret = PTR_ERR(hdmi->isfr_clk);
dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret);
- return ret;
+ goto err_res;
}
ret = clk_prepare_enable(hdmi->isfr_clk);
if (ret) {
dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret);
- return ret;
+ goto err_res;
}
hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
@@ -1744,6 +1979,13 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
*/
hdmi_init_clk_regenerator(hdmi);
+ /* If DDC bus is not specified, try to register HDMI I2C bus */
+ if (!hdmi->ddc) {
+ hdmi->ddc = dw_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc))
+ hdmi->ddc = NULL;
+ }
+
/*
* Configure registers related to HDMI interrupt
* generation before registering IRQ.
@@ -1770,7 +2012,12 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
- if (hdmi_readb(hdmi, HDMI_CONFIG1_ID) & HDMI_CONFIG1_AHB) {
+ config0 = hdmi_readb(hdmi, HDMI_CONFIG0_ID);
+ config1 = hdmi_readb(hdmi, HDMI_CONFIG1_ID);
+
+ if (config1 & HDMI_CONFIG1_AHB) {
+ struct dw_hdmi_audio_data audio;
+
audio.phys = iores->start;
audio.base = hdmi->regs;
audio.irq = irq;
@@ -1782,16 +2029,39 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
pdevinfo.size_data = sizeof(audio);
pdevinfo.dma_mask = DMA_BIT_MASK(32);
hdmi->audio = platform_device_register_full(&pdevinfo);
+ } else if (config0 & HDMI_CONFIG0_I2S) {
+ struct dw_hdmi_i2s_audio_data audio;
+
+ audio.hdmi = hdmi;
+ audio.write = hdmi_writeb;
+ audio.read = hdmi_readb;
+
+ pdevinfo.name = "dw-hdmi-i2s-audio";
+ pdevinfo.data = &audio;
+ pdevinfo.size_data = sizeof(audio);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+ hdmi->audio = platform_device_register_full(&pdevinfo);
}
+ /* Reset HDMI DDC I2C master controller and mute I2CM interrupts */
+ if (hdmi->i2c)
+ dw_hdmi_i2c_init(hdmi);
+
dev_set_drvdata(dev, hdmi);
return 0;
err_iahb:
+ if (hdmi->i2c) {
+ i2c_del_adapter(&hdmi->i2c->adap);
+ hdmi->ddc = NULL;
+ }
+
clk_disable_unprepare(hdmi->iahb_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
+err_res:
+ i2c_put_adapter(hdmi->ddc);
return ret;
}
@@ -1809,13 +2079,18 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
- i2c_put_adapter(hdmi->ddc);
+
+ if (hdmi->i2c)
+ i2c_del_adapter(&hdmi->i2c->adap);
+ else
+ i2c_put_adapter(hdmi->ddc);
}
EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
MODULE_DESCRIPTION("DW HDMI transmitter driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dw-hdmi");
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
index fc9a560429d6..55135bbd0c16 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi.h
@@ -545,6 +545,9 @@
#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
enum {
+/* CONFIG0_ID field values */
+ HDMI_CONFIG0_I2S = 0x10,
+
/* CONFIG1_ID field values */
HDMI_CONFIG1_AHB = 0x01,
@@ -566,6 +569,10 @@ enum {
HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
HDMI_IH_PHY_STAT0_HPD = 0x1,
+/* IH_I2CM_STAT0 and IH_MUTE_I2CM_STAT0 field values */
+ HDMI_IH_I2CM_STAT0_DONE = 0x2,
+ HDMI_IH_I2CM_STAT0_ERROR = 0x1,
+
/* IH_MUTE_I2CMPHY_STAT0 field values */
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
@@ -887,6 +894,17 @@ enum {
HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
+/* AUD_CONF0 field values */
+ HDMI_AUD_CONF0_SW_RESET = 0x80,
+ HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F,
+
+/* AUD_CONF1 field values */
+ HDMI_AUD_CONF1_MODE_I2S = 0x00,
+ HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02,
+ HDMI_AUD_CONF1_MODE_LEFT_J = 0x04,
+ HDMI_AUD_CONF1_WIDTH_16 = 0x10,
+ HDMI_AUD_CONF1_WIDTH_24 = 0x18,
+
/* AUD_CTS3 field values */
HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
@@ -901,6 +919,12 @@ enum {
HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
+/* HDMI_AUD_INPUTCLKFS field values */
+ HDMI_AUD_INPUTCLKFS_128FS = 0,
+ HDMI_AUD_INPUTCLKFS_256FS = 1,
+ HDMI_AUD_INPUTCLKFS_512FS = 2,
+ HDMI_AUD_INPUTCLKFS_64FS = 4,
+
/* AHB_DMA_CONF0 field values */
HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
@@ -1032,6 +1056,21 @@ enum {
HDMI_A_VIDPOLCFG_HSYNCPOL_MASK = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
+
+/* I2CM_OPERATION field values */
+ HDMI_I2CM_OPERATION_WRITE = 0x10,
+ HDMI_I2CM_OPERATION_READ_EXT = 0x2,
+ HDMI_I2CM_OPERATION_READ = 0x1,
+
+/* I2CM_INT field values */
+ HDMI_I2CM_INT_DONE_POL = 0x8,
+ HDMI_I2CM_INT_DONE_MASK = 0x4,
+
+/* I2CM_CTLINT field values */
+ HDMI_I2CM_CTLINT_NAC_POL = 0x80,
+ HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
+ HDMI_I2CM_CTLINT_ARB_POL = 0x8,
+ HDMI_I2CM_CTLINT_ARB_MASK = 0x4,
};
#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index f1a99938e924..27f98c518dde 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -239,16 +239,9 @@ static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs =
.get_modes = ptn3460_get_modes,
};
-static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
- bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs ptn3460_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = ptn3460_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 6f7c2f9860d2..ac8cc5b50d9f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -477,16 +477,9 @@ static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
.get_modes = ps8622_get_modes,
};
-static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
- bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs ps8622_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = ps8622_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
new file mode 100644
index 000000000000..b2c267df7ee7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -0,0 +1,1564 @@
+/*
+ * Silicon Image SiI8620 HDMI/MHL bridge driver
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sil-sii8620.h"
+
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+
+enum sii8620_mode {
+ CM_DISCONNECTED,
+ CM_DISCOVERY,
+ CM_MHL1,
+ CM_MHL3,
+ CM_ECBUS_S
+};
+
+enum sii8620_sink_type {
+ SINK_NONE,
+ SINK_HDMI,
+ SINK_DVI
+};
+
+enum sii8620_mt_state {
+ MT_STATE_READY,
+ MT_STATE_BUSY,
+ MT_STATE_DONE
+};
+
+struct sii8620 {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct clk *clk_xtal;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_int;
+ struct regulator_bulk_data supplies[2];
+ struct mutex lock; /* context lock, protects fields below */
+ int error;
+ enum sii8620_mode mode;
+ enum sii8620_sink_type sink_type;
+ u8 cbus_status;
+ u8 stat[MHL_DST_SIZE];
+ u8 xstat[MHL_XDS_SIZE];
+ u8 devcap[MHL_DCAP_SIZE];
+ u8 xdevcap[MHL_XDC_SIZE];
+ u8 avif[19];
+ struct edid *edid;
+ unsigned int gen2_write_burst:1;
+ enum sii8620_mt_state mt_state;
+ struct list_head mt_queue;
+};
+
+struct sii8620_mt_msg;
+
+typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg);
+
+struct sii8620_mt_msg {
+ struct list_head node;
+ u8 reg[4];
+ u8 ret;
+ sii8620_mt_msg_cb send;
+ sii8620_mt_msg_cb recv;
+};
+
+static const u8 sii8620_i2c_page[] = {
+ 0x39, /* Main System */
+ 0x3d, /* TDM and HSIC */
+ 0x49, /* TMDS Receiver, MHL EDID */
+ 0x4d, /* eMSC, HDCP, HSIC */
+ 0x5d, /* MHL Spec */
+ 0x64, /* MHL CBUS */
+ 0x59, /* Hardware TPI (Transmitter Programming Interface) */
+ 0x61, /* eCBUS-S, eCBUS-D */
+};
+
+static void sii8620_fetch_edid(struct sii8620 *ctx);
+static void sii8620_set_upstream_edid(struct sii8620 *ctx);
+static void sii8620_enable_hpd(struct sii8620 *ctx);
+static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+
+static int sii8620_clear_error(struct sii8620 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data = addr;
+ struct i2c_msg msg[] = {
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = 1,
+ .buf = &data
+ },
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags | I2C_M_RD,
+ .len = len,
+ .buf = buf
+ },
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 2) {
+ dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n",
+ addr, len, ret);
+ ctx->error = ret < 0 ? ret : -EIO;
+ }
+}
+
+static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+{
+ u8 ret;
+
+ sii8620_read_buf(ctx, addr, &ret, 1);
+ return ret;
+}
+
+static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf,
+ int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data[2];
+ struct i2c_msg msg = {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = len + 1,
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ if (len > 1) {
+ msg.buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!msg.buf) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+ memcpy(msg.buf + 1, buf, len);
+ } else {
+ msg.buf = data;
+ msg.buf[1] = *buf;
+ }
+
+ msg.buf[0] = addr;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 1) {
+ dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n",
+ addr, len, buf, ret);
+ ctx->error = ret ?: -EIO;
+ }
+
+ if (len > 1)
+ kfree(msg.buf);
+}
+
+#define sii8620_write(ctx, addr, arr...) \
+({\
+ u8 d[] = { arr }; \
+ sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \
+})
+
+static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ sii8620_write(ctx, seq[i], seq[i + 1]);
+}
+
+#define sii8620_write_seq(ctx, seq...) \
+({\
+ const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define sii8620_write_seq_static(ctx, seq...) \
+({\
+ static const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
+{
+ val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask);
+ sii8620_write(ctx, addr, val);
+}
+
+static void sii8620_mt_cleanup(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg, *n;
+
+ list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
+ list_del(&msg->node);
+ kfree(msg);
+ }
+ ctx->mt_state = MT_STATE_READY;
+}
+
+static void sii8620_mt_work(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg;
+
+ if (ctx->error)
+ return;
+ if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue))
+ return;
+
+ if (ctx->mt_state == MT_STATE_DONE) {
+ ctx->mt_state = MT_STATE_READY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
+ node);
+ if (msg->recv)
+ msg->recv(ctx, msg);
+ list_del(&msg->node);
+ kfree(msg);
+ }
+
+ if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue))
+ return;
+
+ ctx->mt_state = MT_STATE_BUSY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+ if (msg->send)
+ msg->send(ctx, msg);
+}
+
+static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ switch (msg->reg[0]) {
+ case MHL_WRITE_STAT:
+ case MHL_SET_INT:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_WRITE_STAT);
+ break;
+ case MHL_MSC_MSG:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_MSC_MSG);
+ break;
+ default:
+ dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
+ msg->reg[0]);
+ }
+}
+
+static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+ if (!msg)
+ ctx->error = -ENOMEM;
+ else
+ list_add_tail(&msg->node, &ctx->mt_queue);
+
+ return msg;
+}
+
+static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = cmd;
+ msg->reg[1] = arg1;
+ msg->reg[2] = arg2;
+ msg->send = sii8620_mt_msc_cmd_send;
+}
+
+static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val);
+}
+
+static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask);
+}
+
+static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data);
+}
+
+static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
+}
+
+static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE,
+ REG_EDID_CTRL, ctrl,
+ REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START
+ );
+}
+
+/* copy src to dst and set changed bits in src */
+static void sii8620_update_array(u8 *dst, u8 *src, int count)
+{
+ while (--count >= 0) {
+ *src ^= *dst;
+ *dst++ ^= *src++;
+ }
+}
+
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+ static const char * const sink_str[] = {
+ [SINK_NONE] = "NONE",
+ [SINK_HDMI] = "HDMI",
+ [SINK_DVI] = "DVI"
+ };
+
+ u8 dcap[MHL_DCAP_SIZE];
+ char sink_name[20];
+ struct device *dev = ctx->dev;
+
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+ if (ctx->error < 0)
+ return;
+
+ dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
+ dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+ dcap[MHL_DCAP_MHL_VERSION] / 16,
+ dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
+ dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
+ dcap[MHL_DCAP_DEVICE_ID_L]);
+ sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+
+ if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+ return;
+
+ sii8620_fetch_edid(ctx);
+ if (!ctx->edid) {
+ dev_err(ctx->dev, "Cannot fetch EDID\n");
+ sii8620_mhl_disconnected(ctx);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(ctx->edid))
+ ctx->sink_type = SINK_HDMI;
+ else
+ ctx->sink_type = SINK_DVI;
+
+ drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name));
+
+ dev_info(dev, "detected sink(type: %s): %s\n",
+ sink_str[ctx->sink_type], sink_name);
+ sii8620_set_upstream_edid(ctx);
+ sii8620_enable_hpd(ctx);
+}
+
+static void sii8620_mr_xdevcap(struct sii8620 *ctx)
+{
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
+ MHL_XDC_SIZE);
+
+ sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+ MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+ sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+}
+
+static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE
+ | BIT_INTR9_EDID_ERROR,
+ REG_EDID_CTRL, ctrl,
+ REG_EDID_FIFO_ADDR, 0
+ );
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ sii8620_mr_xdevcap(ctx);
+ else
+ sii8620_mr_devcap(ctx);
+}
+
+static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP;
+ msg->send = sii8620_mt_read_devcap_send;
+ msg->recv = sii8620_mt_read_devcap_recv;
+}
+
+static void sii8620_fetch_edid(struct sii8620 *ctx)
+{
+ u8 lm_ddc, ddc_cmd, int3, cbus;
+ int fetched, i;
+ int edid_len = EDID_LENGTH;
+ u8 *edid;
+
+ sii8620_readb(ctx, REG_CBUS_STATUS);
+ lm_ddc = sii8620_readb(ctx, REG_LM_DDC);
+ ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD);
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, 0,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_HDCP2X_POLL_CS, 0x71,
+ REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX,
+ REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED,
+ );
+
+ for (i = 0; i < 256; ++i) {
+ u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS);
+
+ if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG))
+ break;
+ sii8620_write(ctx, REG_DDC_STATUS,
+ BIT_DDC_STATUS_DDC_FIFO_EMPTY);
+ }
+
+ sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+
+#define FETCH_SIZE 16
+ for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) {
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ sii8620_write_seq(ctx,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO,
+ REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY
+ );
+ sii8620_write_seq(ctx,
+ REG_DDC_SEGM, fetched >> 8,
+ REG_DDC_OFFSET, fetched & 0xff,
+ REG_DDC_DIN_CNT1, FETCH_SIZE,
+ REG_DDC_DIN_CNT2, 0,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+ );
+
+ do {
+ int3 = sii8620_readb(ctx, REG_INTR3);
+ cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if (int3 & BIT_DDC_CMD_DONE)
+ break;
+
+ if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
+ } while (1);
+
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+ usleep_range(10, 20);
+
+ sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+ if (fetched + FETCH_SIZE == EDID_LENGTH) {
+ u8 ext = ((struct edid *)edid)->extensions;
+
+ if (ext) {
+ u8 *new_edid;
+
+ edid_len += ext * EDID_LENGTH;
+ new_edid = krealloc(edid, edid_len, GFP_KERNEL);
+ if (!new_edid) {
+ kfree(edid);
+ ctx->error = -ENOMEM;
+ return;
+ }
+ edid = new_edid;
+ }
+ }
+
+ if (fetched + FETCH_SIZE == edid_len)
+ sii8620_write(ctx, REG_INTR3, int3);
+ }
+
+ sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+
+end:
+ kfree(ctx->edid);
+ ctx->edid = (struct edid *)edid;
+}
+
+static void sii8620_set_upstream_edid(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N
+ | BIT_DPD_PD_MHL_CLK_N, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL3, 0x00,
+ REG_PKT_FILTER_0, 0xFF,
+ REG_PKT_FILTER_1, 0xFF,
+ REG_ALICE0_BW_I2C, 0x06
+ );
+
+ sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER,
+ BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_EDID_FIFO_ADDR, 0,
+ );
+
+ sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid,
+ (ctx->edid->extensions + 1) * EDID_LENGTH);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE,
+ REG_INTR9_MASK, 0
+ );
+}
+
+static void sii8620_xtal_set_rate(struct sii8620 *ctx)
+{
+ static const struct {
+ unsigned int rate;
+ u8 div;
+ u8 tp1;
+ } rates[] = {
+ { 19200, 0x04, 0x53 },
+ { 20000, 0x04, 0x62 },
+ { 24000, 0x05, 0x75 },
+ { 30000, 0x06, 0x92 },
+ { 38400, 0x0c, 0xbc },
+ };
+ unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i)
+ if (rate <= rates[i].rate)
+ break;
+
+ if (rate != rates[i].rate)
+ dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n",
+ rate, rates[i].rate);
+
+ sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div);
+ sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1);
+}
+
+static int sii8620_hw_on(struct sii8620 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+ usleep_range(10000, 20000);
+ return clk_prepare_enable(ctx->clk_xtal);
+}
+
+static int sii8620_hw_off(struct sii8620 *ctx)
+{
+ clk_disable_unprepare(ctx->clk_xtal);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii8620_hw_reset(struct sii8620 *ctx)
+{
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ usleep_range(5000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ msleep(300);
+}
+
+static void sii8620_cbus_reset(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+ | BIT_PWD_SRST_CBUS_RST_SW_EN,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
+ );
+}
+
+static void sii8620_set_auto_zone(struct sii8620 *ctx)
+{
+ if (ctx->mode != CM_MHL1) {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, 0x0,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ }
+}
+
+static void sii8620_stop_video(struct sii8620 *ctx)
+{
+ u8 uninitialized_var(val);
+
+ sii8620_write_seq_static(ctx,
+ REG_TPI_INTR_EN, 0,
+ REG_HDCP2X_INTR0_MASK, 0,
+ REG_TPI_COPP_DATA2, 0,
+ REG_TPI_INTR_ST0, ~0,
+ );
+
+ switch (ctx->sink_type) {
+ case SINK_DVI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE;
+ break;
+ case SINK_HDMI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE
+ | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
+ break;
+ default:
+ return;
+ }
+
+ sii8620_write(ctx, REG_TPI_SC, val);
+}
+
+static void sii8620_start_hdmi(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
+ | BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
+ REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
+ | BIT_VID_OVRRD_M1080P_OVRRD,
+ REG_VID_MODE, 0,
+ REG_MHL_TOP_CTL, 0x1,
+ REG_MHLTX_CTL6, 0xa0,
+ REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ );
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL |
+ MHL_DST_LM_PATH_ENABLED);
+
+ sii8620_set_auto_zone(ctx);
+
+ sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+
+ sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+}
+
+static void sii8620_start_video(struct sii8620 *ctx)
+{
+ if (ctx->mode < CM_MHL3)
+ sii8620_stop_video(ctx);
+
+ switch (ctx->sink_type) {
+ case SINK_HDMI:
+ sii8620_start_hdmi(ctx);
+ break;
+ case SINK_DVI:
+ default:
+ break;
+ }
+}
+
+static void sii8620_disable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN,
+ REG_INTR8_MASK, 0
+ );
+}
+
+static void sii8620_enable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS
+ | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN
+ | BIT_HPD_CTRL_HPD_HIGH,
+ );
+}
+
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
+ );
+ ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (!ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_CTRL, 0,
+ REG_MDT_RCV_CTRL, 0
+ );
+ ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+ | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+ | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+ | BIT_MDT_XMIT_SM_ERROR,
+ REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+ | BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+ | BIT_MDT_RFIFO_DATA_RDY
+ );
+ sii8620_enable_gen2_write_burst(ctx);
+}
+
+static void sii8620_mhl_discover(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_DISC_PULSE_PROCEED,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K),
+ REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT
+ | BIT_MHL_EST_INT
+ | BIT_NOT_MHL_EST_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_RGND_READY_INT,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_DP_CTL1, 0xA2,
+ REG_MHL_DP_CTL2, 0x03,
+ REG_MHL_DP_CTL3, 0x35,
+ REG_MHL_DP_CTL5, 0x02,
+ REG_MHL_DP_CTL6, 0x02,
+ REG_MHL_DP_CTL7, 0x03,
+ REG_COC_CTLC, 0xFF,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC,
+ REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE
+ | BIT_COC_CALIBRATION_DONE,
+ REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD
+ | BIT_CBUS_CMD_ABORT,
+ REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE
+ | BIT_CBUS_HPD_CHG
+ | BIT_CBUS_MSC_MR_WRITE_STAT
+ | BIT_CBUS_MSC_MR_MSC_MSG
+ | BIT_CBUS_MSC_MR_WRITE_BURST
+ | BIT_CBUS_MSC_MR_SET_INT
+ | BIT_CBUS_MSC_MT_DONE_NACK
+ );
+}
+
+static void sii8620_peer_specific_init(struct sii8620 *ctx)
+{
+ if (ctx->mode == CM_MHL3)
+ sii8620_write_seq_static(ctx,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
+ REG_EMSCINTRMASK1,
+ BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR
+ );
+ else
+ sii8620_write_seq_static(ctx,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_HDCP2X_INTR0, 0xFF,
+ REG_INTR1, 0xFF,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD
+ | BIT_SYS_CTRL1_TX_CTRL_HDMI
+ );
+}
+
+#define SII8620_MHL_VERSION 0x32
+#define SII8620_SCRATCHPAD_SIZE 16
+#define SII8620_INT_STAT_SIZE 0x33
+
+static void sii8620_set_dev_cap(struct sii8620 *ctx)
+{
+ static const u8 devcap[MHL_DCAP_SIZE] = {
+ [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION,
+ [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER,
+ [MHL_DCAP_ADOPTER_ID_H] = 0x01,
+ [MHL_DCAP_ADOPTER_ID_L] = 0x41,
+ [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444
+ | MHL_DCAP_VID_LINK_PPIXEL
+ | MHL_DCAP_VID_LINK_16BPP,
+ [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH,
+ [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS,
+ [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI,
+ [MHL_DCAP_BANDWIDTH] = 0x0f,
+ [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT
+ | MHL_DCAP_FEATURE_RAP_SUPPORT
+ | MHL_DCAP_FEATURE_SP_SUPPORT,
+ [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE,
+ [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE,
+ };
+ static const u8 xdcap[MHL_XDC_SIZE] = {
+ [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075
+ | MHL_XDC_ECBUS_S_8BIT,
+ [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150
+ | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600,
+ [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST,
+ [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE,
+ };
+
+ sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap));
+ sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap));
+}
+
+static void sii8620_mhl_init(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN,
+ );
+
+ sii8620_peer_specific_init(ctx);
+
+ sii8620_disable_hpd(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_TMDS0_CCTRL1, 0x90,
+ REG_TMDS_CLK_EN, 0x01,
+ REG_TMDS_CH_EN, 0x11,
+ REG_BGR_BIAS, 0x87,
+ REG_ALICE0_ZONE_CTRL, 0xE8,
+ REG_ALICE0_MODE_CTRL, 0x04,
+ );
+ sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0);
+ sii8620_write_seq_static(ctx,
+ REG_TPI_HW_OPT3, 0x76,
+ REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE,
+ REG_TPI_DTD_B2, 79,
+ );
+ sii8620_set_dev_cap(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_TIMEOUT, 100,
+ REG_MDT_XMIT_CTRL, 0x03,
+ REG_MDT_XFIFO_STAT, 0x00,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_CBUS_LINK_CTRL_8, 0x1D,
+ );
+
+ sii8620_start_gen2_write_burst(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_BIST_CTRL, 0x00,
+ REG_COC_CTL1, 0x10,
+ REG_COC_CTL2, 0x18,
+ REG_COC_CTLF, 0x07,
+ REG_COC_CTL11, 0xF8,
+ REG_COC_CTL17, 0x61,
+ REG_COC_CTL18, 0x46,
+ REG_COC_CTL19, 0x15,
+ REG_COC_CTL1A, 0x01,
+ REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN,
+ REG_MHL_COC_CTL4, 0x2D,
+ REG_MHL_COC_CTL5, 0xF9,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ );
+ sii8620_disable_gen2_write_burst(ctx);
+
+ /* currently MHL3 is not supported, so we force version to 0 */
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
+ MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
+ | MHL_DST_CONN_POW_STAT);
+ sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
+}
+
+static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
+{
+ if (ctx->mode == mode)
+ return;
+
+ ctx->mode = mode;
+
+ switch (mode) {
+ case CM_MHL1:
+ sii8620_write_seq_static(ctx,
+ REG_CBUS_MSC_COMPAT_CTRL, 0x02,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN,
+ REG_COC_INTR_MASK, 0
+ );
+ break;
+ case CM_MHL3:
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_COC_CTL0, 0x40,
+ REG_MHL_COC_CTL1, 0x07
+ );
+ break;
+ case CM_DISCONNECTED:
+ break;
+ default:
+ dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
+ break;
+ }
+
+ sii8620_set_auto_zone(ctx);
+
+ if (mode != CM_MHL1)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_DP_CTL0, 0xBC,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x39,
+ REG_MHL_DP_CTL2, 0x2A,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x08
+ );
+}
+
+static void sii8620_disconnect(struct sii8620 *ctx)
+{
+ sii8620_disable_gen2_write_burst(ctx);
+ sii8620_stop_video(ctx);
+ msleep(50);
+ sii8620_cbus_reset(ctx);
+ sii8620_set_mode(ctx, CM_DISCONNECTED);
+ sii8620_write_seq_static(ctx,
+ REG_COC_CTL0, 0x40,
+ REG_CBUS3_CNVT, 0x84,
+ REG_COC_CTL14, 0x00,
+ REG_COC_CTL0, 0x40,
+ REG_HRXCTRL3, 0x07,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x3F,
+ REG_MHL_DP_CTL2, 0x2F,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x03
+ );
+ sii8620_disable_hpd(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_COC_CTL1, 0x07,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_DISC_CTRL8, 0x00,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_INT_CTRL, 0x00,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ REG_DISC_CTRL1, 0x25,
+ REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT,
+ REG_MDT_INT_1, 0xff,
+ REG_MDT_INT_1_MASK, 0x00,
+ REG_MDT_INT_0, 0xff,
+ REG_MDT_INT_0_MASK, 0x00,
+ REG_COC_INTR, 0xff,
+ REG_COC_INTR_MASK, 0x00,
+ REG_TRXINTH, 0xff,
+ REG_TRXINTMH, 0x00,
+ REG_CBUS_INT_0, 0xff,
+ REG_CBUS_INT_0_MASK, 0x00,
+ REG_CBUS_INT_1, 0xff,
+ REG_CBUS_INT_1_MASK, 0x00,
+ REG_EMSCINTR, 0xff,
+ REG_EMSCINTRMASK, 0x00,
+ REG_EMSCINTR1, 0xff,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_INTR8, 0xff,
+ REG_INTR8_MASK, 0x00,
+ REG_TPI_INTR_ST0, 0xff,
+ REG_TPI_INTR_EN, 0x00,
+ REG_HDCP2X_INTR0, 0xff,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_INTR9, 0xff,
+ REG_INTR9_MASK, 0x00,
+ REG_INTR3, 0xff,
+ REG_INTR3_MASK, 0x00,
+ REG_INTR5, 0xff,
+ REG_INTR5_MASK, 0x00,
+ REG_INTR2, 0xff,
+ REG_INTR2_MASK, 0x00,
+ );
+ memset(ctx->stat, 0, sizeof(ctx->stat));
+ memset(ctx->xstat, 0, sizeof(ctx->xstat));
+ memset(ctx->devcap, 0, sizeof(ctx->devcap));
+ memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+ ctx->cbus_status = 0;
+ ctx->sink_type = SINK_NONE;
+ kfree(ctx->edid);
+ ctx->edid = NULL;
+ sii8620_mt_cleanup(ctx);
+}
+
+static void sii8620_mhl_disconnected(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN
+ );
+ sii8620_disconnect(ctx);
+}
+
+static void sii8620_irq_disc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0);
+
+ if (stat & VAL_CBUS_MHL_DISCON)
+ sii8620_mhl_disconnected(ctx);
+
+ if (stat & BIT_RGND_READY_INT) {
+ u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2);
+
+ if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) {
+ sii8620_mhl_discover(ctx);
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_NOMHL_EST
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_NOT_MHL_EST_INT
+ );
+ }
+ }
+ if (stat & BIT_MHL_EST_INT)
+ sii8620_mhl_init(ctx);
+
+ sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
+}
+
+static void sii8620_irq_g2wb(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
+
+ if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
+ dev_dbg(ctx->dev, "HAWB idle\n");
+
+ sii8620_write(ctx, REG_MDT_INT_0, stat);
+}
+
+static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
+ sii8620_set_mode(ctx, CM_MHL1);
+ sii8620_peer_specific_init(ctx);
+ sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+ | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
+ }
+}
+
+static void sii8620_status_changed_path(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL
+ | MHL_DST_LM_PATH_ENABLED);
+ sii8620_mt_read_devcap(ctx, false);
+ } else {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL);
+ }
+}
+
+static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
+{
+ u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE);
+ sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE);
+
+ sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
+ sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
+
+ if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ sii8620_status_changed_dcap(ctx);
+
+ if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ sii8620_status_changed_path(ctx);
+}
+
+static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
+{
+ u8 ints[MHL_INT_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+ sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+}
+
+static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+
+ if (list_empty(&ctx->mt_queue)) {
+ dev_err(dev, "unexpected MSC MT response\n");
+ return NULL;
+ }
+
+ return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+}
+
+static void sii8620_msc_mt_done(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+
+ if (!msg)
+ return;
+
+ msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0);
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+ u8 buf[2];
+
+ if (!msg)
+ return;
+
+ sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
+
+ switch (buf[0]) {
+ case MHL_MSC_MSG_RAPK:
+ msg->ret = buf[1];
+ ctx->mt_state = MT_STATE_DONE;
+ break;
+ default:
+ dev_err(ctx->dev, "%s message type %d,%d not supported",
+ __func__, buf[0], buf[1]);
+ }
+}
+
+static void sii8620_irq_msc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0);
+
+ if (stat & ~BIT_CBUS_HPD_CHG)
+ sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG);
+
+ if (stat & BIT_CBUS_HPD_CHG) {
+ u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) {
+ sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG);
+ } else {
+ stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ }
+ ctx->cbus_status = cbus_stat;
+ }
+
+ if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
+ sii8620_msc_mr_write_stat(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_SET_INT)
+ sii8620_msc_mr_set_int(ctx);
+
+ if (stat & BIT_CBUS_MSC_MT_DONE)
+ sii8620_msc_mt_done(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_MSC_MSG)
+ sii8620_msc_mr_msc_msg(ctx);
+}
+
+static void sii8620_irq_coc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+
+ sii8620_write(ctx, REG_COC_INTR, stat);
+}
+
+static void sii8620_irq_merr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1);
+
+ sii8620_write(ctx, REG_CBUS_INT_1, stat);
+}
+
+static void sii8620_irq_edid(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR9);
+
+ sii8620_write(ctx, REG_INTR9, stat);
+
+ if (stat & BIT_INTR9_DEVCAP_DONE)
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_scdt_high(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
+ REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
+ );
+}
+
+static void sii8620_scdt_low(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
+ BIT_TMDS_CSTAT_P3_CLR_AVI);
+
+ sii8620_stop_video(ctx);
+
+ sii8620_write(ctx, REG_INTR8_MASK, 0);
+}
+
+static void sii8620_irq_scdt(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR5);
+
+ if (stat & BIT_INTR_SCDT_CHANGE) {
+ u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
+
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+ sii8620_scdt_high(ctx);
+ else
+ sii8620_scdt_low(ctx);
+ }
+
+ sii8620_write(ctx, REG_INTR5, stat);
+}
+
+static void sii8620_new_vsi(struct sii8620 *ctx)
+{
+ u8 vsif[11];
+
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+ VAL_RX_HDMI_CTRL2_DEFVAL |
+ BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
+ ARRAY_SIZE(vsif));
+}
+
+static void sii8620_new_avi(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+}
+
+static void sii8620_irq_infr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR8)
+ & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
+
+ sii8620_write(ctx, REG_INTR8, stat);
+
+ if (stat & BIT_CEA_NEW_VSI)
+ sii8620_new_vsi(ctx);
+
+ if (stat & BIT_CEA_NEW_AVI)
+ sii8620_new_avi(ctx);
+
+ if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
+ sii8620_start_video(ctx);
+}
+
+/* endian agnostic, non-volatile version of test_bit */
+static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
+{
+ return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE));
+}
+
+static irqreturn_t sii8620_irq_thread(int irq, void *data)
+{
+ static const struct {
+ int bit;
+ void (*handler)(struct sii8620 *ctx);
+ } irq_vec[] = {
+ { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
+ { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
+ { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+ { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
+ { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+ { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+ { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
+ { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
+ };
+ struct sii8620 *ctx = data;
+ u8 stats[LEN_FAST_INTR_STAT];
+ int i, ret;
+
+ mutex_lock(&ctx->lock);
+
+ sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats));
+ for (i = 0; i < ARRAY_SIZE(irq_vec); ++i)
+ if (sii8620_test_bit(irq_vec[i].bit, stats))
+ irq_vec[i].handler(ctx);
+
+ sii8620_mt_work(ctx);
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret);
+ sii8620_mhl_disconnected(ctx);
+ }
+ mutex_unlock(&ctx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void sii8620_cable_in(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+ u8 ver[5];
+ int ret;
+
+ ret = sii8620_hw_on(ctx);
+ if (ret) {
+ dev_err(dev, "Error powering on, %d.\n", ret);
+ return;
+ }
+ sii8620_hw_reset(ctx);
+
+ sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0],
+ ver[3], ver[2], ver[4]);
+
+ sii8620_write(ctx, REG_DPD,
+ BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN);
+
+ sii8620_xtal_set_rate(ctx);
+ sii8620_disconnect(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG
+ | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734,
+ REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN,
+ );
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ enable_irq(to_i2c_client(ctx->dev)->irq);
+}
+
+static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii8620, bridge);
+}
+
+static bool sii8620_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ bool ret = false;
+ int max_clock = 74250;
+
+ mutex_lock(&ctx->lock);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ goto out;
+
+ if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
+ max_clock = 300000;
+
+ ret = mode->clock <= max_clock;
+
+out:
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+ .mode_fixup = sii8620_mode_fixup,
+};
+
+static int sii8620_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct sii8620 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->mt_queue);
+
+ ctx->clk_xtal = devm_clk_get(dev, "xtal");
+ if (IS_ERR(ctx->clk_xtal)) {
+ dev_err(dev, "failed to get xtal clock from DT\n");
+ return PTR_ERR(ctx->clk_xtal);
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "no irq provided\n");
+ return -EINVAL;
+ }
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii8620_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "sii8620", ctx);
+
+ ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(dev, "failed to get reset gpio from DT\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ctx->supplies[0].supply = "cvcc10";
+ ctx->supplies[1].supply = "iovcc18";
+ ret = devm_regulator_bulk_get(dev, 2, ctx->supplies);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sii8620_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ sii8620_cable_in(ctx);
+
+ return 0;
+}
+
+static int sii8620_remove(struct i2c_client *client)
+{
+ struct sii8620 *ctx = i2c_get_clientdata(client);
+
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ drm_bridge_remove(&ctx->bridge);
+ sii8620_hw_off(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id sii8620_dt_match[] = {
+ { .compatible = "sil,sii8620" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sii8620_dt_match);
+
+static const struct i2c_device_id sii8620_id[] = {
+ { "sii8620", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii8620_id);
+static struct i2c_driver sii8620_driver = {
+ .driver = {
+ .name = "sii8620",
+ .of_match_table = of_match_ptr(sii8620_dt_match),
+ },
+ .probe = sii8620_probe,
+ .remove = sii8620_remove,
+ .id_table = sii8620_id,
+};
+
+module_i2c_driver(sii8620_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
new file mode 100644
index 000000000000..6ff616a4f6ce
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -0,0 +1,1517 @@
+/*
+ * Registers of Silicon Image SiI8620 Mobile HD Transmitter
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SIL_SII8620_H__
+#define __SIL_SII8620_H__
+
+/* Vendor ID Low byte, default value: 0x01 */
+#define REG_VND_IDL 0x0000
+
+/* Vendor ID High byte, default value: 0x00 */
+#define REG_VND_IDH 0x0001
+
+/* Device ID Low byte, default value: 0x60 */
+#define REG_DEV_IDL 0x0002
+
+/* Device ID High byte, default value: 0x86 */
+#define REG_DEV_IDH 0x0003
+
+/* Device Revision, default value: 0x10 */
+#define REG_DEV_REV 0x0004
+
+/* OTP DBYTE510, default value: 0x00 */
+#define REG_OTP_DBYTE510 0x0006
+
+/* System Control #1, default value: 0x00 */
+#define REG_SYS_CTRL1 0x0008
+#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7)
+#define BIT_SYS_CTRL1_VSYNCPIN BIT(6)
+#define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5)
+#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4)
+#define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3)
+#define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2)
+#define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1)
+#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0)
+
+/* System Control DPD, default value: 0x90 */
+#define REG_DPD 0x000b
+#define BIT_DPD_PWRON_PLL BIT(7)
+#define BIT_DPD_PDNTX12 BIT(6)
+#define BIT_DPD_PDNRX12 BIT(5)
+#define BIT_DPD_OSC_EN BIT(4)
+#define BIT_DPD_PWRON_HSIC BIT(3)
+#define BIT_DPD_PDIDCK_N BIT(2)
+#define BIT_DPD_PD_MHL_CLK_N BIT(1)
+
+/* Dual link Control, default value: 0x00 */
+#define REG_DCTL 0x000d
+#define BIT_DCTL_TDM_LCLK_PHASE BIT(7)
+#define BIT_DCTL_HSIC_CLK_PHASE BIT(6)
+#define BIT_DCTL_CTS_TCK_PHASE BIT(5)
+#define BIT_DCTL_EXT_DDC_SEL BIT(4)
+#define BIT_DCTL_TRANSCODE BIT(3)
+#define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2)
+#define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1)
+#define BIT_DCTL_TCLKNX_PHASE BIT(0)
+
+/* PWD Software Reset, default value: 0x20 */
+#define REG_PWD_SRST 0x000e
+#define BIT_PWD_SRST_COC_DOC_RST BIT(7)
+#define BIT_PWD_SRST_CBUS_RST_SW BIT(6)
+#define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5)
+#define BIT_PWD_SRST_MHLFIFO_RST BIT(4)
+#define BIT_PWD_SRST_CBUS_RST BIT(3)
+#define BIT_PWD_SRST_SW_RST_AUTO BIT(2)
+#define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1)
+#define BIT_PWD_SRST_SW_RST BIT(0)
+
+/* AKSV_1, default value: 0x00 */
+#define REG_AKSV_1 0x001d
+
+/* Video H Resolution #1, default value: 0x00 */
+#define REG_H_RESL 0x003a
+
+/* Video Mode, default value: 0x00 */
+#define REG_VID_MODE 0x004a
+#define BIT_VID_MODE_M1080P BIT(6)
+
+/* Video Input Mode, default value: 0xc0 */
+#define REG_VID_OVRRD 0x0051
+#define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7)
+#define BIT_VID_OVRRD_M1080P_OVRRD BIT(6)
+#define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5)
+#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4)
+#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3)
+#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2)
+#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0)
+
+/* I2C Address reassignment, default value: 0x00 */
+#define REG_PAGE_MHLSPEC_ADDR 0x0057
+#define REG_PAGE7_ADDR 0x0058
+#define REG_PAGE8_ADDR 0x005c
+
+/* Fast Interrupt Status, default value: 0x00 */
+#define REG_FAST_INTR_STAT 0x005f
+#define LEN_FAST_INTR_STAT 7
+#define BIT_FAST_INTR_STAT_TIMR 8
+#define BIT_FAST_INTR_STAT_INT2 9
+#define BIT_FAST_INTR_STAT_DDC 10
+#define BIT_FAST_INTR_STAT_SCDT 11
+#define BIT_FAST_INTR_STAT_INFR 13
+#define BIT_FAST_INTR_STAT_EDID 14
+#define BIT_FAST_INTR_STAT_HDCP 15
+#define BIT_FAST_INTR_STAT_MSC 16
+#define BIT_FAST_INTR_STAT_MERR 17
+#define BIT_FAST_INTR_STAT_G2WB 18
+#define BIT_FAST_INTR_STAT_G2WB_ERR 19
+#define BIT_FAST_INTR_STAT_DISC 28
+#define BIT_FAST_INTR_STAT_BLOCK 30
+#define BIT_FAST_INTR_STAT_LTRN 31
+#define BIT_FAST_INTR_STAT_HDCP2 32
+#define BIT_FAST_INTR_STAT_TDM 42
+#define BIT_FAST_INTR_STAT_COC 51
+
+/* GPIO Control, default value: 0x15 */
+#define REG_GPIO_CTRL1 0x006e
+#define BIT_CTRL1_GPIO_I_8 BIT(5)
+#define BIT_CTRL1_GPIO_OEN_8 BIT(4)
+#define BIT_CTRL1_GPIO_I_7 BIT(3)
+#define BIT_CTRL1_GPIO_OEN_7 BIT(2)
+#define BIT_CTRL1_GPIO_I_6 BIT(1)
+#define BIT_CTRL1_GPIO_OEN_6 BIT(0)
+
+/* Interrupt Control, default value: 0x06 */
+#define REG_INT_CTRL 0x006f
+#define BIT_INT_CTRL_SOFTWARE_WP BIT(7)
+#define BIT_INT_CTRL_INTR_OD BIT(2)
+#define BIT_INT_CTRL_INTR_POLARITY BIT(1)
+
+/* Interrupt State, default value: 0x00 */
+#define REG_INTR_STATE 0x0070
+#define BIT_INTR_STATE_INTR_STATE BIT(0)
+
+/* Interrupt Source #1, default value: 0x00 */
+#define REG_INTR1 0x0071
+
+/* Interrupt Source #2, default value: 0x00 */
+#define REG_INTR2 0x0072
+
+/* Interrupt Source #3, default value: 0x01 */
+#define REG_INTR3 0x0073
+#define BIT_DDC_CMD_DONE BIT(3)
+
+/* Interrupt Source #5, default value: 0x00 */
+#define REG_INTR5 0x0074
+
+/* Interrupt #1 Mask, default value: 0x00 */
+#define REG_INTR1_MASK 0x0075
+
+/* Interrupt #2 Mask, default value: 0x00 */
+#define REG_INTR2_MASK 0x0076
+
+/* Interrupt #3 Mask, default value: 0x00 */
+#define REG_INTR3_MASK 0x0077
+
+/* Interrupt #5 Mask, default value: 0x00 */
+#define REG_INTR5_MASK 0x0078
+#define BIT_INTR_SCDT_CHANGE BIT(0)
+
+/* Hot Plug Connection Control, default value: 0x45 */
+#define REG_HPD_CTRL 0x0079
+#define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7)
+#define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6)
+#define BIT_HPD_CTRL_HPD_HIGH BIT(5)
+#define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4)
+#define BIT_HPD_CTRL_GPIO_I_1 BIT(3)
+#define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2)
+#define BIT_HPD_CTRL_GPIO_I_0 BIT(1)
+#define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0)
+
+/* GPIO Control, default value: 0x55 */
+#define REG_GPIO_CTRL 0x007a
+#define BIT_CTRL_GPIO_I_5 BIT(7)
+#define BIT_CTRL_GPIO_OEN_5 BIT(6)
+#define BIT_CTRL_GPIO_I_4 BIT(5)
+#define BIT_CTRL_GPIO_OEN_4 BIT(4)
+#define BIT_CTRL_GPIO_I_3 BIT(3)
+#define BIT_CTRL_GPIO_OEN_3 BIT(2)
+#define BIT_CTRL_GPIO_I_2 BIT(1)
+#define BIT_CTRL_GPIO_OEN_2 BIT(0)
+
+/* Interrupt Source 7, default value: 0x00 */
+#define REG_INTR7 0x007b
+
+/* Interrupt Source 8, default value: 0x00 */
+#define REG_INTR8 0x007c
+
+/* Interrupt #7 Mask, default value: 0x00 */
+#define REG_INTR7_MASK 0x007d
+
+/* Interrupt #8 Mask, default value: 0x00 */
+#define REG_INTR8_MASK 0x007e
+#define BIT_CEA_NEW_VSI BIT(2)
+#define BIT_CEA_NEW_AVI BIT(1)
+
+/* IEEE, default value: 0x10 */
+#define REG_TMDS_CCTRL 0x0080
+#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
+
+/* TMDS Control #4, default value: 0x02 */
+#define REG_TMDS_CTRL4 0x0085
+#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1)
+#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0)
+
+/* BIST CNTL, default value: 0x00 */
+#define REG_BIST_CTRL 0x00bb
+#define BIT_RXBIST_VGB_EN BIT(7)
+#define BIT_TXBIST_VGB_EN BIT(6)
+#define BIT_BIST_START_SEL BIT(5)
+#define BIT_BIST_START_BIT BIT(4)
+#define BIT_BIST_ALWAYS_ON BIT(3)
+#define BIT_BIST_TRANS BIT(2)
+#define BIT_BIST_RESET BIT(1)
+#define BIT_BIST_EN BIT(0)
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_TEST_SEL 0x00bd
+#define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f
+
+/* BIST VIDEO_MODE, default value: 0x00 */
+#define REG_BIST_VIDEO_MODE 0x00be
+#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_DURATION_0 0x00bf
+
+/* BIST DURATION1, default value: 0x00 */
+#define REG_BIST_DURATION_1 0x00c0
+
+/* BIST DURATION2, default value: 0x00 */
+#define REG_BIST_DURATION_2 0x00c1
+
+/* BIST 8BIT_PATTERN, default value: 0x00 */
+#define REG_BIST_8BIT_PATTERN 0x00c2
+
+/* LM DDC, default value: 0x80 */
+#define REG_LM_DDC 0x00c7
+#define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7)
+
+#define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5)
+#define BIT_LM_DDC_DDC_TPI_SW BIT(2)
+#define BIT_LM_DDC_DDC_GRANT BIT(1)
+#define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0)
+
+/* DDC I2C Manual, default value: 0x03 */
+#define REG_DDC_MANUAL 0x00ec
+#define BIT_DDC_MANUAL_MAN_DDC BIT(7)
+#define BIT_DDC_MANUAL_VP_SEL BIT(6)
+#define BIT_DDC_MANUAL_DSDA BIT(5)
+#define BIT_DDC_MANUAL_DSCL BIT(4)
+#define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3)
+#define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2)
+#define BIT_DDC_MANUAL_IO_DSDA BIT(1)
+#define BIT_DDC_MANUAL_IO_DSCL BIT(0)
+
+/* DDC I2C Target Slave Address, default value: 0x00 */
+#define REG_DDC_ADDR 0x00ed
+#define MSK_DDC_ADDR_DDC_ADDR 0xfe
+
+/* DDC I2C Target Segment Address, default value: 0x00 */
+#define REG_DDC_SEGM 0x00ee
+
+/* DDC I2C Target Offset Address, default value: 0x00 */
+#define REG_DDC_OFFSET 0x00ef
+
+/* DDC I2C Data In count #1, default value: 0x00 */
+#define REG_DDC_DIN_CNT1 0x00f0
+
+/* DDC I2C Data In count #2, default value: 0x00 */
+#define REG_DDC_DIN_CNT2 0x00f1
+#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03
+
+/* DDC I2C Status, default value: 0x04 */
+#define REG_DDC_STATUS 0x00f2
+#define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6)
+#define BIT_DDC_STATUS_DDC_NO_ACK BIT(5)
+#define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4)
+#define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3)
+#define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2)
+#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1)
+#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0)
+
+/* DDC I2C Command, default value: 0x70 */
+#define REG_DDC_CMD 0x00f3
+#define BIT_DDC_CMD_HDCP_DDC_EN BIT(6)
+#define BIT_DDC_CMD_SDA_DEL_EN BIT(5)
+#define BIT_DDC_CMD_DDC_FLT_EN BIT(4)
+
+#define MSK_DDC_CMD_DDC_CMD 0x0f
+#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04
+#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09
+#define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f
+
+/* DDC I2C FIFO Data In/Out, default value: 0x00 */
+#define REG_DDC_DATA 0x00f4
+
+/* DDC I2C Data Out Counter, default value: 0x00 */
+#define REG_DDC_DOUT_CNT 0x00f5
+#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7)
+#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f
+
+/* DDC I2C Delay Count, default value: 0x14 */
+#define REG_DDC_DELAY_CNT 0x00f6
+
+/* Test Control, default value: 0x80 */
+#define REG_TEST_TXCTRL 0x00f7
+#define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7)
+#define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6)
+#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c
+#define BIT_TEST_TXCTRL_HDMI_MODE BIT(1)
+#define BIT_TEST_TXCTRL_TST_PLLCK BIT(0)
+
+/* CBUS Address, default value: 0x00 */
+#define REG_PAGE_CBUS_ADDR 0x00f8
+
+/* I2C Device Address re-assignment */
+#define REG_PAGE1_ADDR 0x00fc
+#define REG_PAGE2_ADDR 0x00fd
+#define REG_PAGE3_ADDR 0x00fe
+#define REG_HW_TPI_ADDR 0x00ff
+
+/* USBT CTRL0, default value: 0x00 */
+#define REG_UTSRST 0x0100
+#define BIT_UTSRST_FC_SRST BIT(5)
+#define BIT_UTSRST_KEEPER_SRST BIT(4)
+#define BIT_UTSRST_HTX_SRST BIT(3)
+#define BIT_UTSRST_TRX_SRST BIT(2)
+#define BIT_UTSRST_TTX_SRST BIT(1)
+#define BIT_UTSRST_HRX_SRST BIT(0)
+
+/* HSIC RX Control3, default value: 0x07 */
+#define REG_HRXCTRL3 0x0104
+#define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0
+#define BIT_HRXCTRL3_HRX_OUT_EN BIT(2)
+#define BIT_HRXCTRL3_STATUS_EN BIT(1)
+#define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0)
+
+/* HSIC RX INT Registers */
+#define REG_HRXINTL 0x0111
+#define REG_HRXINTH 0x0112
+
+/* TDM TX NUMBITS, default value: 0x0c */
+#define REG_TTXNUMB 0x0116
+#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0
+#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3)
+#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07
+
+/* TDM TX NUMSPISYM, default value: 0x04 */
+#define REG_TTXSPINUMS 0x0117
+
+/* TDM TX NUMHSICSYM, default value: 0x14 */
+#define REG_TTXHSICNUMS 0x0118
+
+/* TDM TX NUMTOTSYM, default value: 0x18 */
+#define REG_TTXTOTNUMS 0x0119
+
+/* TDM TX INT Low, default value: 0x00 */
+#define REG_TTXINTL 0x0136
+#define BIT_TTXINTL_TTX_INTR7 BIT(7)
+#define BIT_TTXINTL_TTX_INTR6 BIT(6)
+#define BIT_TTXINTL_TTX_INTR5 BIT(5)
+#define BIT_TTXINTL_TTX_INTR4 BIT(4)
+#define BIT_TTXINTL_TTX_INTR3 BIT(3)
+#define BIT_TTXINTL_TTX_INTR2 BIT(2)
+#define BIT_TTXINTL_TTX_INTR1 BIT(1)
+#define BIT_TTXINTL_TTX_INTR0 BIT(0)
+
+/* TDM TX INT High, default value: 0x00 */
+#define REG_TTXINTH 0x0137
+#define BIT_TTXINTH_TTX_INTR15 BIT(7)
+#define BIT_TTXINTH_TTX_INTR14 BIT(6)
+#define BIT_TTXINTH_TTX_INTR13 BIT(5)
+#define BIT_TTXINTH_TTX_INTR12 BIT(4)
+#define BIT_TTXINTH_TTX_INTR11 BIT(3)
+#define BIT_TTXINTH_TTX_INTR10 BIT(2)
+#define BIT_TTXINTH_TTX_INTR9 BIT(1)
+#define BIT_TTXINTH_TTX_INTR8 BIT(0)
+
+/* TDM RX Control, default value: 0x1c */
+#define REG_TRXCTRL 0x013b
+#define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4)
+#define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3)
+#define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07
+
+/* TDM RX NUMSPISYM, default value: 0x04 */
+#define REG_TRXSPINUMS 0x013c
+
+/* TDM RX NUMHSICSYM, default value: 0x14 */
+#define REG_TRXHSICNUMS 0x013d
+
+/* TDM RX NUMTOTSYM, default value: 0x18 */
+#define REG_TRXTOTNUMS 0x013e
+
+/* TDM RX Status 2nd, default value: 0x00 */
+#define REG_TRXSTA2 0x015c
+
+/* TDM RX INT Low, default value: 0x00 */
+#define REG_TRXINTL 0x0163
+
+/* TDM RX INT High, default value: 0x00 */
+#define REG_TRXINTH 0x0164
+
+/* TDM RX INTMASK High, default value: 0x00 */
+#define REG_TRXINTMH 0x0166
+
+/* HSIC TX CRTL, default value: 0x00 */
+#define REG_HTXCTRL 0x0169
+#define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4)
+#define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3)
+#define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2)
+#define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1)
+#define BIT_HTXCTRL_HTX_DRVRST1 BIT(0)
+
+/* HSIC TX INT Low, default value: 0x00 */
+#define REG_HTXINTL 0x017d
+
+/* HSIC TX INT High, default value: 0x00 */
+#define REG_HTXINTH 0x017e
+
+/* HSIC Keeper, default value: 0x00 */
+#define REG_KEEPER 0x0181
+#define MSK_KEEPER_KEEPER_MODE_1_0 0x03
+
+/* HSIC Flow Control General, default value: 0x02 */
+#define REG_FCGC 0x0183
+#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1)
+#define BIT_FCGC_HSIC_FC_ENABLE BIT(0)
+
+/* HSIC Flow Control CTR13, default value: 0xfc */
+#define REG_FCCTR13 0x0191
+
+/* HSIC Flow Control CTR14, default value: 0xff */
+#define REG_FCCTR14 0x0192
+
+/* HSIC Flow Control CTR15, default value: 0xff */
+#define REG_FCCTR15 0x0193
+
+/* HSIC Flow Control CTR50, default value: 0x03 */
+#define REG_FCCTR50 0x01b6
+
+/* HSIC Flow Control INTR0, default value: 0x00 */
+#define REG_FCINTR0 0x01ec
+#define REG_FCINTR1 0x01ed
+#define REG_FCINTR2 0x01ee
+#define REG_FCINTR3 0x01ef
+#define REG_FCINTR4 0x01f0
+#define REG_FCINTR5 0x01f1
+#define REG_FCINTR6 0x01f2
+#define REG_FCINTR7 0x01f3
+
+/* TDM Low Latency, default value: 0x20 */
+#define REG_TDMLLCTL 0x01fc
+#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0
+#define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30
+#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c
+#define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1)
+#define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0)
+
+/* TMDS 0 Clock Control, default value: 0x10 */
+#define REG_TMDS0_CCTRL1 0x0210
+#define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0
+#define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30
+
+/* TMDS Clock Enable, default value: 0x00 */
+#define REG_TMDS_CLK_EN 0x0211
+#define BIT_TMDS_CLK_EN_CLK_EN BIT(0)
+
+/* TMDS Channel Enable, default value: 0x00 */
+#define REG_TMDS_CH_EN 0x0212
+#define BIT_TMDS_CH_EN_CH0_EN BIT(4)
+#define BIT_TMDS_CH_EN_CH12_EN BIT(0)
+
+/* BGR_BIAS, default value: 0x07 */
+#define REG_BGR_BIAS 0x0215
+#define BIT_BGR_BIAS_BGR_EN BIT(7)
+#define MSK_BGR_BIAS_BIAS_BGR_D 0x0f
+
+/* TMDS 0 Digital I2C BW, default value: 0x0a */
+#define REG_ALICE0_BW_I2C 0x0231
+
+/* TMDS 0 Digital Zone Control, default value: 0xe0 */
+#define REG_ALICE0_ZONE_CTRL 0x024c
+#define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7)
+#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6)
+#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30
+#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f
+
+/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */
+#define REG_ALICE0_MODE_CTRL 0x024d
+#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c
+#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03
+
+/* MHL Tx Control 6th, default value: 0xa0 */
+#define REG_MHLTX_CTL6 0x0285
+#define MSK_MHLTX_CTL6_EMI_SEL 0xe0
+#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03
+
+/* Packet Filter0, default value: 0x00 */
+#define REG_PKT_FILTER_0 0x0290
+#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7)
+#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6)
+#define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5)
+#define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4)
+#define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3)
+#define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2)
+#define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1)
+#define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0)
+
+/* Packet Filter1, default value: 0x00 */
+#define REG_PKT_FILTER_1 0x0291
+#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7)
+#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6)
+#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3)
+#define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2)
+#define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1)
+#define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0)
+
+/* TMDS Clock Status, default value: 0x10 */
+#define REG_TMDS_CSTAT_P3 0x02a0
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5)
+#define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3)
+#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2)
+#define BIT_TMDS_CSTAT_P3_SCDT BIT(1)
+#define BIT_TMDS_CSTAT_P3_CKDT BIT(0)
+
+/* RX_HDMI Control, default value: 0x10 */
+#define REG_RX_HDMI_CTRL0 0x02a1
+#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0)
+
+/* RX_HDMI Control, default value: 0x38 */
+#define REG_RX_HDMI_CTRL2 0x02a3
+#define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0
+#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4)
+#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3)
+#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0)
+
+/* RX_HDMI Control, default value: 0x0f */
+#define REG_RX_HDMI_CTRL3 0x02a4
+#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f
+
+/* rx_hdmi Clear Buffer, default value: 0x00 */
+#define REG_RX_HDMI_CLR_BUFFER 0x02ac
+#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0
+#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3)
+#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2)
+#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0)
+
+/* RX_HDMI VSI Header1, default value: 0x00 */
+#define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8
+
+/* RX_HDMI VSI MHL Monitor, default value: 0x3c */
+#define REG_RX_HDMI_VSIF_MHL_MON 0x02d7
+
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03
+
+/* Interrupt Source 9, default value: 0x00 */
+#define REG_INTR9 0x02e0
+#define BIT_INTR9_EDID_ERROR BIT(6)
+#define BIT_INTR9_EDID_DONE BIT(5)
+#define BIT_INTR9_DEVCAP_DONE BIT(4)
+
+/* Interrupt 9 Mask, default value: 0x00 */
+#define REG_INTR9_MASK 0x02e1
+
+/* TPI CBUS Start, default value: 0x00 */
+#define REG_TPI_CBUS_START 0x02e2
+#define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7)
+#define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6)
+#define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5)
+#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4)
+#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3)
+#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2)
+#define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1)
+#define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0)
+
+/* EDID Control, default value: 0x10 */
+#define REG_EDID_CTRL 0x02e3
+#define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7)
+#define BIT_EDID_CTRL_XDEVCAP_EN BIT(6)
+#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5)
+#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4)
+#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3)
+#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2)
+#define BIT_EDID_CTRL_INVALID_BKSV BIT(1)
+#define BIT_EDID_CTRL_EDID_MODE_EN BIT(0)
+
+/* EDID FIFO Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR 0x02e9
+
+/* EDID FIFO Write Data, default value: 0x00 */
+#define REG_EDID_FIFO_WR_DATA 0x02ea
+
+/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR_MON 0x02eb
+
+/* EDID FIFO Read Data, default value: 0x00 */
+#define REG_EDID_FIFO_RD_DATA 0x02ec
+
+/* EDID DDC Segment Pointer, default value: 0x00 */
+#define REG_EDID_START_EXT 0x02ed
+
+/* TX IP BIST CNTL and Status, default value: 0x00 */
+#define REG_TX_IP_BIST_CNTLSTA 0x02f2
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3)
+#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0)
+
+/* TX IP BIST INST LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_INST_LOW 0x02f3
+#define REG_TX_IP_BIST_INST_HIGH 0x02f4
+
+/* TX IP BIST PATTERN LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_PAT_LOW 0x02f5
+#define REG_TX_IP_BIST_PAT_HIGH 0x02f6
+
+/* TX IP BIST CONFIGURE LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_CONF_LOW 0x02f7
+#define REG_TX_IP_BIST_CONF_HIGH 0x02f8
+
+/* E-MSC General Control, default value: 0x80 */
+#define REG_GENCTL 0x0300
+#define BIT_GENCTL_SPEC_TRANS_DIS BIT(7)
+#define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6)
+#define BIT_GENCTL_SPI_MISO_EDGE BIT(5)
+#define BIT_GENCTL_SPI_MOSI_EDGE BIT(4)
+#define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3)
+#define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2)
+#define BIT_GENCTL_START_TRAIN_SEQ BIT(1)
+#define BIT_GENCTL_EMSC_EN BIT(0)
+
+/* E-MSC Comma ErrorCNT, default value: 0x03 */
+#define REG_COMMECNT 0x0305
+#define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7)
+#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f
+
+/* E-MSC RFIFO ByteCnt, default value: 0x00 */
+#define REG_EMSCRFIFOBCNTL 0x031a
+#define REG_EMSCRFIFOBCNTH 0x031b
+
+/* SPI Burst Cnt Status, default value: 0x00 */
+#define REG_SPIBURSTCNT 0x031e
+
+/* SPI Burst Status and SWRST, default value: 0x00 */
+#define REG_SPIBURSTSTAT 0x0322
+#define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7)
+#define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6)
+#define BIT_SPIBURSTSTAT_SPI_SRST BIT(5)
+#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0)
+
+/* E-MSC 1st Interrupt, default value: 0x00 */
+#define REG_EMSCINTR 0x0323
+#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7)
+#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6)
+#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5)
+#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4)
+#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3)
+#define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2)
+#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1)
+#define BIT_EMSCINTR_SPI_DVLD BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK 0x0324
+
+/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_XMIT_WRITE_PORT 0x032a
+
+/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_RCV_READ_PORT 0x032b
+
+/* E-MSC 2nd Interrupt, default value: 0x00 */
+#define REG_EMSCINTR1 0x032c
+#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK1 0x032d
+#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0)
+
+/* MHL Top Ctl, default value: 0x00 */
+#define REG_MHL_TOP_CTL 0x0330
+#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7)
+#define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6)
+#define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03
+
+/* MHL DataPath 1st Ctl, default value: 0xbc */
+#define REG_MHL_DP_CTL0 0x0331
+#define BIT_MHL_DP_CTL0_DP_OE BIT(7)
+#define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6)
+#define MSK_MHL_DP_CTL0_TX_OE 0x3f
+
+/* MHL DataPath 2nd Ctl, default value: 0xbb */
+#define REG_MHL_DP_CTL1 0x0332
+#define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0
+#define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f
+
+/* MHL DataPath 3rd Ctl, default value: 0x2f */
+#define REG_MHL_DP_CTL2 0x0333
+#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7)
+#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30
+#define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c
+#define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03
+
+/* MHL DataPath 4th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL3 0x0334
+#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 5th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL4 0x0335
+#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 6th Ctl, default value: 0x3f */
+#define REG_MHL_DP_CTL5 0x0336
+#define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7)
+#define BIT_MHL_DP_CTL5_RSEN_EN BIT(6)
+#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30
+#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c
+#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03
+
+/* MHL PLL 1st Ctl, default value: 0x05 */
+#define REG_MHL_PLL_CTL0 0x0337
+#define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7)
+
+#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00
+
+#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00
+
+#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1)
+#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0)
+
+/* MHL PLL 3rd Ctl, default value: 0x80 */
+#define REG_MHL_PLL_CTL2 0x0339
+#define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7)
+#define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3)
+#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2)
+#define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03
+
+/* MHL CBUS 1st Ctl, default value: 0x12 */
+#define REG_MHL_CBUS_CTL0 0x0340
+#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7)
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c
+
+#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03
+
+/* MHL CBUS 2nd Ctl, default value: 0x03 */
+#define REG_MHL_CBUS_CTL1 0x0341
+#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07
+#define VAL_MHL_CBUS_CTL1_0888_OHM 0x00
+#define VAL_MHL_CBUS_CTL1_1115_OHM 0x04
+#define VAL_MHL_CBUS_CTL1_1378_OHM 0x07
+
+/* MHL CoC 1st Ctl, default value: 0xc3 */
+#define REG_MHL_COC_CTL0 0x0342
+#define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7)
+#define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70
+#define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07
+
+/* MHL CoC 2nd Ctl, default value: 0x87 */
+#define REG_MHL_COC_CTL1 0x0343
+#define BIT_MHL_COC_CTL1_COC_EN BIT(7)
+#define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f
+
+/* MHL CoC 4th Ctl, default value: 0x00 */
+#define REG_MHL_COC_CTL3 0x0345
+#define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0)
+
+/* MHL CoC 5th Ctl, default value: 0x28 */
+#define REG_MHL_COC_CTL4 0x0346
+#define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0
+#define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f
+
+/* MHL CoC 6th Ctl, default value: 0x0d */
+#define REG_MHL_COC_CTL5 0x0347
+
+/* MHL DoC 1st Ctl, default value: 0x18 */
+#define REG_MHL_DOC_CTL0 0x0349
+#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7)
+#define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38
+#define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06
+#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0)
+
+/* MHL DataPath 7th Ctl, default value: 0x2a */
+#define REG_MHL_DP_CTL6 0x0350
+#define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5)
+#define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4)
+#define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3)
+#define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2)
+#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1)
+#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0)
+
+/* MHL DataPath 8th Ctl, default value: 0x06 */
+#define REG_MHL_DP_CTL7 0x0351
+#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0
+#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f
+
+/* Tx Zone Ctl1, default value: 0x00 */
+#define REG_TX_ZONE_CTL1 0x0361
+#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08
+
+/* MHL3 Tx Zone Ctl, default value: 0x00 */
+#define REG_MHL3_TX_ZONE_CTL 0x0364
+#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7)
+#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03
+
+#define MSK_TX_ZONE_CTL3_TX_ZONE 0x03
+#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00
+#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01
+#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02
+
+/* HDCP Polling Control and Status, default value: 0x70 */
+#define REG_HDCP2X_POLL_CS 0x0391
+
+#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4)
+#define MSK_HDCP2X_POLL_CS_ 0x0c
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0)
+
+/* HDCP Interrupt 0, default value: 0x00 */
+#define REG_HDCP2X_INTR0 0x0398
+
+/* HDCP Interrupt 0 Mask, default value: 0x00 */
+#define REG_HDCP2X_INTR0_MASK 0x0399
+
+/* HDCP General Control 0, default value: 0x02 */
+#define REG_HDCP2X_CTRL_0 0x03a0
+#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0)
+
+/* HDCP General Control 1, default value: 0x08 */
+#define REG_HDCP2X_CTRL_1 0x03a1
+#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0)
+
+/* HDCP Misc Control, default value: 0x00 */
+#define REG_HDCP2X_MISC_CTRL 0x03a5
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0)
+
+/* HDCP RPT SMNG K, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_K 0x03a6
+
+/* HDCP RPT SMNG In, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_IN 0x03a7
+
+/* HDCP Auth Status, default value: 0x00 */
+#define REG_HDCP2X_AUTH_STAT 0x03aa
+
+/* HDCP RPT RCVID Out, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVID_OUT 0x03ac
+
+/* HDCP TP1, default value: 0x62 */
+#define REG_HDCP2X_TP1 0x03b4
+
+/* HDCP GP Out 0, default value: 0x00 */
+#define REG_HDCP2X_GP_OUT0 0x03c7
+
+/* HDCP Repeater RCVR ID 0, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVR_ID0 0x03d1
+
+/* HDCP DDCM Status, default value: 0x00 */
+#define REG_HDCP2X_DDCM_STS 0x03d8
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f
+
+/* HDMI2MHL3 Control, default value: 0x0a */
+#define REG_M3_CTRL 0x03e0
+#define BIT_M3_CTRL_H2M_SWRST BIT(4)
+#define BIT_M3_CTRL_SW_MHL3_SEL BIT(3)
+#define BIT_M3_CTRL_M3AV_EN BIT(2)
+#define BIT_M3_CTRL_ENC_TMDS BIT(1)
+#define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0)
+
+#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_ENC_TMDS)
+#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_M3AV_EN \
+ | BIT_M3_CTRL_ENC_TMDS \
+ | BIT_M3_CTRL_MHL3_MASTER_EN)
+
+/* HDMI2MHL3 Port0 Control, default value: 0x04 */
+#define REG_M3_P0CTRL 0x03e1
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4)
+#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3)
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2)
+#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1)
+#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0)
+
+#define REG_M3_POSTM 0x03e2
+#define MSK_M3_POSTM_RRP_DECODE 0xf8
+#define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07
+
+/* HDMI2MHL3 Scramble Control, default value: 0x41 */
+#define REG_M3_SCTRL 0x03e6
+#define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0
+#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0)
+
+/* HSIC Div Ctl, default value: 0x05 */
+#define REG_DIV_CTL_MAIN 0x03f2
+#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c
+#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03
+
+/* MHL Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_DEVCAP_0 0x0400
+
+/* MHL Interrupt 1st Byte, default value: 0x00 */
+#define REG_MHL_INT_0 0x0420
+
+/* Device Status 1st byte, default value: 0x00 */
+#define REG_MHL_STAT_0 0x0430
+
+/* CBUS Scratch Pad 1st Byte, default value: 0x00 */
+#define REG_MHL_SCRPAD_0 0x0440
+
+/* MHL Extended Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_EXTDEVCAP_0 0x0480
+
+/* Device Extended Status 1st byte, default value: 0x00 */
+#define REG_MHL_EXTSTAT_0 0x0490
+
+/* TPI DTD Byte2, default value: 0x00 */
+#define REG_TPI_DTD_B2 0x0602
+
+#define VAL_TPI_QUAN_RANGE_LIMITED 0x01
+#define VAL_TPI_QUAN_RANGE_FULL 0x02
+#define VAL_TPI_FORMAT_RGB 0x00
+#define VAL_TPI_FORMAT_YCBCR444 0x01
+#define VAL_TPI_FORMAT_YCBCR422 0x02
+#define VAL_TPI_FORMAT_INTERNAL_RGB 0x03
+#define VAL_TPI_FORMAT(_fmt, _qr) \
+ (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2))
+
+/* Input Format, default value: 0x00 */
+#define REG_TPI_INPUT 0x0609
+#define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7)
+#define BIT_TPI_INPUT_ENDITHER BIT(6)
+#define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_INPUT_INPUT_FORMAT 0x03
+
+/* Output Format, default value: 0x00 */
+#define REG_TPI_OUTPUT 0x060a
+#define BIT_TPI_OUTPUT_CSCMODE709 BIT(4)
+#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03
+
+/* TPI AVI Check Sum, default value: 0x00 */
+#define REG_TPI_AVI_CHSUM 0x060c
+
+/* TPI System Control, default value: 0x00 */
+#define REG_TPI_SC 0x061a
+#define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7)
+#define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5)
+#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4)
+#define BIT_TPI_SC_TPI_AV_MUTE BIT(3)
+#define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2)
+#define BIT_TPI_SC_DDC_TPI_SW BIT(1)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0)
+
+/* TPI COPP Query Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA1 0x0629
+#define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7)
+#define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6)
+#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30
+#define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00
+#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10
+#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20
+#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30
+#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2)
+#define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0)
+
+/* TPI COPP Control Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA2 0x062a
+#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5)
+#define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4)
+#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3)
+#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2)
+#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1)
+#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0)
+
+/* TPI Interrupt Enable, default value: 0x00 */
+#define REG_TPI_INTR_EN 0x063c
+
+/* TPI Interrupt Status Low Byte, default value: 0x00 */
+#define REG_TPI_INTR_ST0 0x063d
+#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7)
+#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6)
+#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5)
+#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1)
+#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0)
+
+/* TPI DS BCAPS Status, default value: 0x00 */
+#define REG_TPI_DS_BCAPS 0x0644
+
+/* TPI BStatus1, default value: 0x00 */
+#define REG_TPI_BSTATUS1 0x0645
+#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7)
+#define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f
+
+/* TPI BStatus2, default value: 0x10 */
+#define REG_TPI_BSTATUS2 0x0646
+#define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0
+#define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4)
+#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3)
+#define MSK_TPI_BSTATUS2_DS_DEPTH 0x07
+
+/* TPI HW Optimization Control #3, default value: 0x00 */
+#define REG_TPI_HW_OPT3 0x06bb
+#define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7)
+#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3)
+#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2)
+#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03
+
+/* TPI Info Frame Select, default value: 0x00 */
+#define REG_TPI_INFO_FSEL 0x06bf
+#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5)
+#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07
+
+/* TPI Info Byte #0, default value: 0x00 */
+#define REG_TPI_INFO_B0 0x06c0
+
+/* CoC Status, default value: 0x00 */
+#define REG_COC_STAT_0 0x0700
+#define REG_COC_STAT_1 0x0701
+#define REG_COC_STAT_2 0x0702
+#define REG_COC_STAT_3 0x0703
+#define REG_COC_STAT_4 0x0704
+#define REG_COC_STAT_5 0x0705
+
+/* CoC 1st Ctl, default value: 0x40 */
+#define REG_COC_CTL0 0x0710
+
+/* CoC 2nd Ctl, default value: 0x0a */
+#define REG_COC_CTL1 0x0711
+#define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0
+#define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f
+
+/* CoC 3rd Ctl, default value: 0x14 */
+#define REG_COC_CTL2 0x0712
+#define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0
+#define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f
+
+/* CoC 4th Ctl, default value: 0x40 */
+#define REG_COC_CTL3 0x0713
+#define BIT_COC_CTL3_COC_CTRL3_7 BIT(7)
+#define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f
+
+/* CoC 7th Ctl, default value: 0x00 */
+#define REG_COC_CTL6 0x0716
+#define BIT_COC_CTL6_COC_CTRL6_7 BIT(7)
+#define BIT_COC_CTL6_COC_CTRL6_6 BIT(6)
+#define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f
+
+/* CoC 8th Ctl, default value: 0x06 */
+#define REG_COC_CTL7 0x0717
+#define BIT_COC_CTL7_COC_CTRL7_7 BIT(7)
+#define BIT_COC_CTL7_COC_CTRL7_6 BIT(6)
+#define BIT_COC_CTL7_COC_CTRL7_5 BIT(5)
+#define MSK_COC_CTL7_COC_CTRL7_4_3 0x18
+#define MSK_COC_CTL7_COC_CTRL7_2_0 0x07
+
+/* CoC 10th Ctl, default value: 0x00 */
+#define REG_COC_CTL9 0x0719
+
+/* CoC 11th Ctl, default value: 0x00 */
+#define REG_COC_CTLA 0x071a
+
+/* CoC 12th Ctl, default value: 0x00 */
+#define REG_COC_CTLB 0x071b
+
+/* CoC 13th Ctl, default value: 0x0f */
+#define REG_COC_CTLC 0x071c
+
+/* CoC 14th Ctl, default value: 0x0a */
+#define REG_COC_CTLD 0x071d
+#define BIT_COC_CTLD_COC_CTRLD_7 BIT(7)
+#define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f
+
+/* CoC 15th Ctl, default value: 0x0a */
+#define REG_COC_CTLE 0x071e
+#define BIT_COC_CTLE_COC_CTRLE_7 BIT(7)
+#define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f
+
+/* CoC 16th Ctl, default value: 0x00 */
+#define REG_COC_CTLF 0x071f
+#define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8
+#define MSK_COC_CTLF_COC_CTRLF_2_0 0x07
+
+/* CoC 18th Ctl, default value: 0x32 */
+#define REG_COC_CTL11 0x0721
+#define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0
+#define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f
+
+/* CoC 21st Ctl, default value: 0x00 */
+#define REG_COC_CTL14 0x0724
+#define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0
+#define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f
+
+/* CoC 22nd Ctl, default value: 0x00 */
+#define REG_COC_CTL15 0x0725
+#define BIT_COC_CTL15_COC_CTRL15_7 BIT(7)
+#define MSK_COC_CTL15_COC_CTRL15_6_4 0x70
+#define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f
+
+/* CoC Interrupt, default value: 0x00 */
+#define REG_COC_INTR 0x0726
+
+/* CoC Interrupt Mask, default value: 0x00 */
+#define REG_COC_INTR_MASK 0x0727
+#define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0)
+#define BIT_COC_CALIBRATION_DONE BIT(1)
+
+/* CoC Misc Ctl, default value: 0x00 */
+#define REG_COC_MISC_CTL0 0x0728
+#define BIT_COC_MISC_CTL0_FSM_MON BIT(7)
+
+/* CoC 24th Ctl, default value: 0x00 */
+#define REG_COC_CTL17 0x072a
+#define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0
+#define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f
+
+/* CoC 25th Ctl, default value: 0x00 */
+#define REG_COC_CTL18 0x072b
+#define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0
+#define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f
+
+/* CoC 26th Ctl, default value: 0x00 */
+#define REG_COC_CTL19 0x072c
+#define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0
+#define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f
+
+/* CoC 27th Ctl, default value: 0x00 */
+#define REG_COC_CTL1A 0x072d
+#define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc
+#define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03
+
+/* DoC 9th Status, default value: 0x00 */
+#define REG_DOC_STAT_8 0x0740
+
+/* DoC 10th Status, default value: 0x00 */
+#define REG_DOC_STAT_9 0x0741
+
+/* DoC 5th CFG, default value: 0x00 */
+#define REG_DOC_CFG4 0x074e
+#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f
+
+/* DoC 1st Ctl, default value: 0x40 */
+#define REG_DOC_CTL0 0x0751
+
+/* DoC 7th Ctl, default value: 0x00 */
+#define REG_DOC_CTL6 0x0757
+#define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7)
+#define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6)
+#define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30
+#define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f
+
+/* DoC 8th Ctl, default value: 0x00 */
+#define REG_DOC_CTL7 0x0758
+#define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7)
+#define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6)
+#define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5)
+#define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18
+#define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07
+
+/* DoC 9th Ctl, default value: 0x00 */
+#define REG_DOC_CTL8 0x076c
+#define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7)
+#define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70
+#define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c
+#define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03
+
+/* DoC 10th Ctl, default value: 0x00 */
+#define REG_DOC_CTL9 0x076d
+
+/* DoC 11th Ctl, default value: 0x00 */
+#define REG_DOC_CTLA 0x076e
+
+/* DoC 15th Ctl, default value: 0x00 */
+#define REG_DOC_CTLE 0x0772
+#define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7)
+#define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6)
+#define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30
+#define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f
+
+/* Interrupt Mask 1st, default value: 0x00 */
+#define REG_MHL_INT_0_MASK 0x0580
+
+/* Interrupt Mask 2nd, default value: 0x00 */
+#define REG_MHL_INT_1_MASK 0x0581
+
+/* Interrupt Mask 3rd, default value: 0x00 */
+#define REG_MHL_INT_2_MASK 0x0582
+
+/* Interrupt Mask 4th, default value: 0x00 */
+#define REG_MHL_INT_3_MASK 0x0583
+
+/* MDT Receive Time Out, default value: 0x00 */
+#define REG_MDT_RCV_TIMEOUT 0x0584
+
+/* MDT Transmit Time Out, default value: 0x00 */
+#define REG_MDT_XMIT_TIMEOUT 0x0585
+
+/* MDT Receive Control, default value: 0x00 */
+#define REG_MDT_RCV_CTRL 0x0586
+#define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7)
+#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4)
+#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3)
+#define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive Read Port, default value: 0x00 */
+#define REG_MDT_RCV_READ_PORT 0x0587
+
+/* MDT Transmit Control, default value: 0x70 */
+#define REG_MDT_XMIT_CTRL 0x0588
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive WRITE Port, default value: 0x00 */
+#define REG_MDT_XMIT_WRITE_PORT 0x0589
+
+/* MDT RFIFO Status, default value: 0x00 */
+#define REG_MDT_RFIFO_STAT 0x058a
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f
+
+/* MDT XFIFO Status, default value: 0x80 */
+#define REG_MDT_XFIFO_STAT 0x058b
+#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0
+#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4)
+#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f
+
+/* MDT Interrupt 0, default value: 0x0c */
+#define REG_MDT_INT_0 0x058c
+#define BIT_MDT_RFIFO_DATA_RDY BIT(0)
+#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2)
+#define BIT_MDT_XFIFO_EMPTY BIT(3)
+
+/* MDT Interrupt 0 Mask, default value: 0x00 */
+#define REG_MDT_INT_0_MASK 0x058d
+
+/* MDT Interrupt 1, default value: 0x00 */
+#define REG_MDT_INT_1 0x058e
+#define BIT_MDT_RCV_TIMEOUT BIT(0)
+#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1)
+#define BIT_MDT_RCV_SM_ERROR BIT(2)
+#define BIT_MDT_XMIT_TIMEOUT BIT(5)
+#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6)
+#define BIT_MDT_XMIT_SM_ERROR BIT(7)
+
+/* MDT Interrupt 1 Mask, default value: 0x00 */
+#define REG_MDT_INT_1_MASK 0x058f
+
+/* CBUS Vendor ID, default value: 0x01 */
+#define REG_CBUS_VENDOR_ID 0x0590
+
+/* CBUS Connection Status, default value: 0x00 */
+#define REG_CBUS_STATUS 0x0591
+#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4)
+#define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3)
+#define BIT_CBUS_STATUS_CBUS_HPD BIT(2)
+#define BIT_CBUS_STATUS_MHL_MODE BIT(1)
+#define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0)
+
+/* CBUS Interrupt 1st, default value: 0x00 */
+#define REG_CBUS_INT_0 0x0592
+#define BIT_CBUS_MSC_MT_DONE_NACK BIT(7)
+#define BIT_CBUS_MSC_MR_SET_INT BIT(6)
+#define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5)
+#define BIT_CBUS_MSC_MR_MSC_MSG BIT(4)
+#define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3)
+#define BIT_CBUS_HPD_CHG BIT(2)
+#define BIT_CBUS_MSC_MT_DONE BIT(1)
+#define BIT_CBUS_CNX_CHG BIT(0)
+
+/* CBUS Interrupt Mask 1st, default value: 0x00 */
+#define REG_CBUS_INT_0_MASK 0x0593
+
+/* CBUS Interrupt 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1 0x0594
+#define BIT_CBUS_CMD_ABORT BIT(6)
+#define BIT_CBUS_MSC_ABORT_RCVD BIT(3)
+#define BIT_CBUS_DDC_ABORT BIT(2)
+#define BIT_CBUS_CEC_ABORT BIT(1)
+
+/* CBUS Interrupt Mask 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1_MASK 0x0595
+
+/* CBUS DDC Abort Interrupt, default value: 0x00 */
+#define REG_DDC_ABORT_INT 0x0598
+
+/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */
+#define REG_DDC_ABORT_INT_MASK 0x0599
+
+/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT 0x059a
+
+/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT_MASK 0x059b
+
+/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT 0x059c
+
+/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT_MASK 0x059d
+
+/* CBUS RX DISCOVERY interrupt, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0 0x059e
+
+/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0_MASK 0x059f
+
+/* CBUS_Link_Layer Control #8, default value: 0x00 */
+#define REG_CBUS_LINK_CTRL_8 0x05a7
+
+/* MDT State Machine Status, default value: 0x00 */
+#define REG_MDT_SM_STAT 0x05b5
+#define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0
+#define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f
+
+/* CBUS MSC command trigger, default value: 0x00 */
+#define REG_MSC_COMMAND_START 0x05b8
+#define BIT_MSC_COMMAND_START_DEBUG BIT(5)
+#define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4)
+#define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3)
+#define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2)
+#define BIT_MSC_COMMAND_START_MSC_MSG BIT(1)
+#define BIT_MSC_COMMAND_START_PEER BIT(0)
+
+/* CBUS MSC Command/Offset, default value: 0x00 */
+#define REG_MSC_CMD_OR_OFFSET 0x05b9
+
+/* CBUS MSC Transmit Data */
+#define REG_MSC_1ST_TRANSMIT_DATA 0x05ba
+#define REG_MSC_2ND_TRANSMIT_DATA 0x05bb
+
+/* CBUS MSC Requester Received Data */
+#define REG_MSC_MT_RCVD_DATA0 0x05bc
+#define REG_MSC_MT_RCVD_DATA1 0x05bd
+
+/* CBUS MSC Responder MSC_MSG Received Data */
+#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf
+#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0
+
+/* CBUS MSC Heartbeat Control, default value: 0x27 */
+#define REG_MSC_HEARTBEAT_CTRL 0x05c4
+#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7)
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f
+
+/* CBUS MSC Compatibility Control, default value: 0x02 */
+#define REG_CBUS_MSC_COMPAT_CTRL 0x05c7
+#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2)
+
+/* CBUS3 Converter Control, default value: 0x24 */
+#define REG_CBUS3_CNVT 0x05dc
+#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0
+#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c
+#define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1)
+#define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0)
+
+/* Discovery Control1, default value: 0x24 */
+#define REG_DISC_CTRL1 0x05e0
+#define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7)
+#define BIT_DISC_CTRL1_HB_ONLY BIT(6)
+#define MSK_DISC_CTRL1_DISC_ATT 0x30
+#define MSK_DISC_CTRL1_DISC_CYC 0x0c
+#define BIT_DISC_CTRL1_DISC_EN BIT(0)
+
+#define VAL_PUP_OFF 0
+#define VAL_PUP_20K 1
+#define VAL_PUP_5K 2
+
+/* Discovery Control4, default value: 0x80 */
+#define REG_DISC_CTRL4 0x05e3
+#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0
+#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30
+#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4))
+
+/* Discovery Control5, default value: 0x03 */
+#define REG_DISC_CTRL5 0x05e4
+#define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3)
+#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03
+
+/* Discovery Control8, default value: 0x81 */
+#define REG_DISC_CTRL8 0x05e7
+#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7)
+#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0)
+
+/* Discovery Control9, default value: 0x54 */
+#define REG_DISC_CTRL9 0x05e8
+#define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7)
+#define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6)
+#define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4)
+#define BIT_DISC_CTRL9_NOMHL_EST BIT(3)
+#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2)
+#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1)
+#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0)
+
+/* Discovery Status1, default value: 0x00 */
+#define REG_DISC_STAT1 0x05eb
+#define BIT_DISC_STAT1_PSM_OVRIDE BIT(5)
+#define MSK_DISC_STAT1_DISC_SM 0x0f
+
+/* Discovery Status2, default value: 0x00 */
+#define REG_DISC_STAT2 0x05ec
+#define BIT_DISC_STAT2_CBUS_OE_POL BIT(6)
+#define BIT_DISC_STAT2_CBUS_SATUS BIT(5)
+#define BIT_DISC_STAT2_RSEN BIT(4)
+
+#define MSK_DISC_STAT2_MHL_VRSN 0x0c
+#define VAL_DISC_STAT2_DEFAULT 0x00
+#define VAL_DISC_STAT2_MHL1_2 0x04
+#define VAL_DISC_STAT2_MHL3 0x08
+#define VAL_DISC_STAT2_RESERVED 0x0c
+
+#define MSK_DISC_STAT2_RGND 0x03
+#define VAL_RGND_OPEN 0x00
+#define VAL_RGND_2K 0x01
+#define VAL_RGND_1K 0x02
+#define VAL_RGND_SHORT 0x03
+
+/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0 0x05ed
+#define BIT_RGND_READY_INT BIT(6)
+#define BIT_CBUS_MHL12_DISCON_INT BIT(5)
+#define BIT_CBUS_MHL3_DISCON_INT BIT(4)
+#define BIT_NOT_MHL_EST_INT BIT(3)
+#define BIT_MHL_EST_INT BIT(2)
+#define BIT_MHL3_EST_INT BIT(1)
+#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \
+ | BIT_CBUS_MHL3_DISCON_INT \
+ | BIT_NOT_MHL_EST_INT)
+
+/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0_MASK 0x05ee
+
+#endif /* __SIL_SII8620_H__ */
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 44d476ea6d2e..de9ffb49e9f6 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -908,7 +908,7 @@ static int tc_main_link_setup(struct tc_data *tc)
goto err_dpcd_read;
if (tmp[0] != tc->assr) {
- dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
+ dev_dbg(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
tc->assr);
/* trying with disabled scrambler */
tc->link.scrambler_dis = 1;
@@ -1038,12 +1038,6 @@ err:
return ret;
}
-static enum drm_connector_status
-tc_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void tc_bridge_pre_enable(struct drm_bridge *bridge)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1168,7 +1162,6 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
static const struct drm_connector_funcs tc_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = tc_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
new file mode 100644
index 000000000000..b054ea349952
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2016 Texas Instruments
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+struct tfp410 {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ struct i2c_adapter *ddc;
+
+ struct device *dev;
+};
+
+static inline struct tfp410 *
+drm_bridge_to_tfp410(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tfp410, bridge);
+}
+
+static inline struct tfp410 *
+drm_connector_to_tfp410(struct drm_connector *connector)
+{
+ return container_of(connector, struct tfp410, connector);
+}
+
+static int tfp410_get_modes(struct drm_connector *connector)
+{
+ struct tfp410 *dvi = drm_connector_to_tfp410(connector);
+ struct edid *edid;
+ int ret;
+
+ if (!dvi->ddc)
+ goto fallback;
+
+ edid = drm_get_edid(connector, dvi->ddc);
+ if (!edid) {
+ DRM_INFO("EDID read failed. Fallback to standard modes\n");
+ goto fallback;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ return drm_add_edid_modes(connector, edid);
+fallback:
+ /* No EDID, fallback on the XGA standard modes */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anything can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
+ .get_modes = tfp410_get_modes,
+};
+
+static enum drm_connector_status
+tfp410_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tfp410 *dvi = drm_connector_to_tfp410(connector);
+
+ if (dvi->ddc) {
+ if (drm_probe_ddc(dvi->ddc))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs tfp410_con_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = tfp410_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tfp410_attach(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ dev_err(dvi->dev, "Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&dvi->connector,
+ &tfp410_con_helper_funcs);
+ ret = drm_connector_init(bridge->dev, &dvi->connector,
+ &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_connector_attach_encoder(&dvi->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs tfp410_bridge_funcs = {
+ .attach = tfp410_attach,
+};
+
+static int tfp410_get_connector_ddc(struct tfp410 *dvi)
+{
+ struct device_node *ep = NULL, *connector_node = NULL;
+ struct device_node *ddc_phandle = NULL;
+ int ret = 0;
+
+ /* port@1 is the connector node */
+ ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 1, -1);
+ if (!ep)
+ goto fail;
+
+ connector_node = of_graph_get_remote_port_parent(ep);
+ if (!connector_node)
+ goto fail;
+
+ ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
+ if (!ddc_phandle)
+ goto fail;
+
+ dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
+ if (dvi->ddc)
+ dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
+ else
+ ret = -EPROBE_DEFER;
+
+fail:
+ of_node_put(ep);
+ of_node_put(connector_node);
+ of_node_put(ddc_phandle);
+ return ret;
+}
+
+static int tfp410_init(struct device *dev)
+{
+ struct tfp410 *dvi;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
+ if (!dvi)
+ return -ENOMEM;
+ dev_set_drvdata(dev, dvi);
+
+ dvi->bridge.funcs = &tfp410_bridge_funcs;
+ dvi->bridge.of_node = dev->of_node;
+ dvi->dev = dev;
+
+ ret = tfp410_get_connector_ddc(dvi);
+ if (ret)
+ goto fail;
+
+ ret = drm_bridge_add(&dvi->bridge);
+ if (ret) {
+ dev_err(dev, "drm_bridge_add() failed: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ i2c_put_adapter(dvi->ddc);
+ return ret;
+}
+
+static int tfp410_fini(struct device *dev)
+{
+ struct tfp410 *dvi = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&dvi->bridge);
+
+ if (dvi->ddc)
+ i2c_put_adapter(dvi->ddc);
+
+ return 0;
+}
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+ return tfp410_init(&pdev->dev);
+}
+
+static int tfp410_remove(struct platform_device *pdev)
+{
+ return tfp410_fini(&pdev->dev);
+}
+
+static const struct of_device_id tfp410_match[] = {
+ { .compatible = "ti,tfp410" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tfp410_match);
+
+struct platform_driver tfp410_platform_driver = {
+ .probe = tfp410_probe,
+ .remove = tfp410_remove,
+ .driver = {
+ .name = "tfp410-bridge",
+ .of_match_table = tfp410_match,
+ },
+};
+
+#if IS_ENABLED(CONFIG_I2C)
+/* There is currently no i2c functionality. */
+static int tfp410_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int reg;
+
+ if (!client->dev.of_node ||
+ of_property_read_u32(client->dev.of_node, "reg", &reg)) {
+ dev_err(&client->dev,
+ "Can't get i2c reg property from device-tree\n");
+ return -ENXIO;
+ }
+
+ return tfp410_init(&client->dev);
+}
+
+static int tfp410_i2c_remove(struct i2c_client *client)
+{
+ return tfp410_fini(&client->dev);
+}
+
+static const struct i2c_device_id tfp410_i2c_ids[] = {
+ { "tfp410", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
+
+static struct i2c_driver tfp410_i2c_driver = {
+ .driver = {
+ .name = "tfp410",
+ .of_match_table = of_match_ptr(tfp410_match),
+ },
+ .id_table = tfp410_i2c_ids,
+ .probe = tfp410_i2c_probe,
+ .remove = tfp410_i2c_remove,
+};
+#endif /* IS_ENABLED(CONFIG_I2C) */
+
+static struct {
+ uint i2c:1;
+ uint platform:1;
+} tfp410_registered_driver;
+
+static int __init tfp410_module_init(void)
+{
+ int ret;
+
+#if IS_ENABLED(CONFIG_I2C)
+ ret = i2c_add_driver(&tfp410_i2c_driver);
+ if (ret)
+ pr_err("%s: registering i2c driver failed: %d",
+ __func__, ret);
+ else
+ tfp410_registered_driver.i2c = 1;
+#endif
+
+ ret = platform_driver_register(&tfp410_platform_driver);
+ if (ret)
+ pr_err("%s: registering platform driver failed: %d",
+ __func__, ret);
+ else
+ tfp410_registered_driver.platform = 1;
+
+ if (tfp410_registered_driver.i2c ||
+ tfp410_registered_driver.platform)
+ return 0;
+
+ return ret;
+}
+module_init(tfp410_module_init);
+
+static void __exit tfp410_module_exit(void)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ if (tfp410_registered_driver.i2c)
+ i2c_del_driver(&tfp410_i2c_driver);
+#endif
+ if (tfp410_registered_driver.platform)
+ platform_driver_unregister(&tfp410_platform_driver);
+}
+module_exit(tfp410_module_exit);
+
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("TI TFP410 DVI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 6c76d125995b..d893ea21a359 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -126,9 +126,7 @@ static const struct file_operations cirrus_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = cirrus_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index daecf1ad76a4..3a6309d7d8e4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -138,12 +138,12 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
{
struct drm_device *dev = afbdev->helper.dev;
struct cirrus_device *cdev = dev->dev_private;
- u32 bpp, depth;
+ u32 bpp;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 76bcb43e7c06..2c3c0d4072ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -52,10 +52,10 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
+ u32 bpp;
int ret;
- u32 bpp, depth;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 17c915d9a03e..9a4a27c1afd2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -498,12 +498,6 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
return NULL;
}
-static enum drm_connector_status cirrus_vga_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void cirrus_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
@@ -517,7 +511,6 @@ static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs
static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = cirrus_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = cirrus_connector_destroy,
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 5e7e63ce7bce..d6da848f7c6f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_populate = cirrus_ttm_tt_populate,
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = cirrus_bo_evict_flags,
.move = NULL,
.verify_access = cirrus_bo_verify_access,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index e6862a744210..60697482b94c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -74,6 +76,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
{
+ kref_init(&state->ref);
+
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
@@ -215,22 +219,16 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_atomic_state_clear);
/**
- * drm_atomic_state_free - free all memory for an atomic state
- * @state: atomic state to deallocate
+ * __drm_atomic_state_free - free all memory for an atomic state
+ * @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
* per-object state for planes, crtcs and connectors.
*/
-void drm_atomic_state_free(struct drm_atomic_state *state)
+void __drm_atomic_state_free(struct kref *ref)
{
- struct drm_device *dev;
- struct drm_mode_config *config;
-
- if (!state)
- return;
-
- dev = state->dev;
- config = &dev->mode_config;
+ struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
+ struct drm_mode_config *config = &state->dev->mode_config;
drm_atomic_state_clear(state);
@@ -243,7 +241,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state)
kfree(state);
}
}
-EXPORT_SYMBOL(drm_atomic_state_free);
+EXPORT_SYMBOL(__drm_atomic_state_free);
/**
* drm_atomic_get_crtc_state - get crtc state
@@ -292,6 +290,23 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc, s64 __user *fence_ptr)
+{
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ s64 __user *fence_ptr;
+
+ fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -496,6 +511,16 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
+ } else if (property == config->prop_out_fence_ptr) {
+ s64 __user *fence_ptr = u64_to_user_ptr(val);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
@@ -538,6 +563,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = (state->ctm) ? state->ctm->base.id : 0;
else if (property == config->gamma_lut_property)
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+ else if (property == config->prop_out_fence_ptr)
+ *val = 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
@@ -609,6 +636,28 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
return 0;
}
+static void drm_atomic_crtc_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+
+ drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
+ drm_printf(p, "\tenable=%d\n", state->enable);
+ drm_printf(p, "\tactive=%d\n", state->active);
+ drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
+ drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
+ drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
+ drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
+ drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
+ drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
+ drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
+ drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
+ drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
+
+ if (crtc->funcs->atomic_print_state)
+ crtc->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
@@ -693,6 +742,17 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_unreference(fb);
+ } else if (property == config->prop_in_fence_fd) {
+ if (state->fence)
+ return -EINVAL;
+
+ if (U642I64(val) == -1)
+ return 0;
+
+ state->fence = sync_file_get_fence(val);
+ if (!state->fence)
+ return -EINVAL;
+
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
@@ -712,7 +772,9 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
+ if (!is_power_of_2(val & DRM_ROTATE_MASK))
+ return -EINVAL;
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -752,6 +814,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
if (property == config->prop_fb_id) {
*val = (state->fb) ? state->fb->base.id : 0;
+ } else if (property == config->prop_in_fence_fd) {
+ *val = -1;
} else if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->prop_crtc_x) {
@@ -770,7 +834,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
@@ -840,9 +904,10 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return ret;
}
@@ -883,6 +948,39 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
return 0;
}
+static void drm_atomic_plane_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct drm_plane *plane = state->plane;
+ struct drm_rect src = drm_plane_state_src(state);
+ struct drm_rect dest = drm_plane_state_dest(state);
+
+ drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
+ if (state->fb) {
+ struct drm_framebuffer *fb = state->fb;
+ int i, n = drm_format_num_planes(fb->pixel_format);
+ struct drm_format_name_buf format_name;
+
+ drm_printf(p, "\t\tformat=%s\n",
+ drm_get_format_name(fb->pixel_format, &format_name));
+ drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
+ drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
+ drm_printf(p, "\t\tlayers:\n");
+ for (i = 0; i < n; i++) {
+ drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
+ drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
+ }
+ }
+ drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
+ drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
+ drm_printf(p, "\trotation=%x\n", state->rotation);
+
+ if (plane->funcs->atomic_print_state)
+ plane->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
@@ -989,15 +1087,53 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
* now?) atomic writes to DPMS property:
*/
return -EINVAL;
+ } else if (property == config->tv_select_subconnector_property) {
+ state->tv.subconnector = val;
+ } else if (property == config->tv_left_margin_property) {
+ state->tv.margins.left = val;
+ } else if (property == config->tv_right_margin_property) {
+ state->tv.margins.right = val;
+ } else if (property == config->tv_top_margin_property) {
+ state->tv.margins.top = val;
+ } else if (property == config->tv_bottom_margin_property) {
+ state->tv.margins.bottom = val;
+ } else if (property == config->tv_mode_property) {
+ state->tv.mode = val;
+ } else if (property == config->tv_brightness_property) {
+ state->tv.brightness = val;
+ } else if (property == config->tv_contrast_property) {
+ state->tv.contrast = val;
+ } else if (property == config->tv_flicker_reduction_property) {
+ state->tv.flicker_reduction = val;
+ } else if (property == config->tv_overscan_property) {
+ state->tv.overscan = val;
+ } else if (property == config->tv_saturation_property) {
+ state->tv.saturation = val;
+ } else if (property == config->tv_hue_property) {
+ state->tv.hue = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
return -EINVAL;
}
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
+static void drm_atomic_connector_print_state(struct drm_printer *p,
+ const struct drm_connector_state *state)
+{
+ struct drm_connector *connector = state->connector;
+
+ drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+
+ if (connector->funcs->atomic_print_state)
+ connector->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_connector_get_property - get property value from connector state
* @connector: the drm connector to set a property on
@@ -1025,6 +1161,30 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->dpms_property) {
*val = connector->dpms;
+ } else if (property == config->tv_select_subconnector_property) {
+ *val = state->tv.subconnector;
+ } else if (property == config->tv_left_margin_property) {
+ *val = state->tv.margins.left;
+ } else if (property == config->tv_right_margin_property) {
+ *val = state->tv.margins.right;
+ } else if (property == config->tv_top_margin_property) {
+ *val = state->tv.margins.top;
+ } else if (property == config->tv_bottom_margin_property) {
+ *val = state->tv.margins.bottom;
+ } else if (property == config->tv_mode_property) {
+ *val = state->tv.mode;
+ } else if (property == config->tv_brightness_property) {
+ *val = state->tv.brightness;
+ } else if (property == config->tv_contrast_property) {
+ *val = state->tv.contrast;
+ } else if (property == config->tv_flicker_reduction_property) {
+ *val = state->tv.flicker_reduction;
+ } else if (property == config->tv_overscan_property) {
+ *val = state->tv.overscan;
+ } else if (property == config->tv_saturation_property) {
+ *val = state->tv.saturation;
+ } else if (property == config->tv_hue_property) {
+ *val = state->tv.hue;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1136,22 +1296,48 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
- if (plane_state->fb)
- drm_framebuffer_unreference(plane_state->fb);
- if (fb)
- drm_framebuffer_reference(fb);
- plane_state->fb = fb;
-
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
fb->base.id, plane_state);
else
DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
plane_state);
+
+ drm_framebuffer_assign(&plane_state->fb, fb);
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: dma_fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. It will
+ * all drope the reference to the fence as we not storing it
+ * anywhere.
+ *
+ * Otherwise, if plane_state->fence is not set this function we
+ * just set it with the received implict fence.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence)
+{
+ if (plane_state->fence) {
+ dma_fence_put(fence);
+ return;
+ }
+
+ plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
* drm_atomic_set_crtc_for_connector - set crtc for connector
* @conn_state: atomic state object for the connector
* @crtc: crtc to use for the connector
@@ -1462,16 +1648,107 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
}
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
+static void drm_atomic_print_state(const struct drm_atomic_state *state)
+{
+ struct drm_printer p = drm_info_printer(state->dev->dev);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ DRM_DEBUG_ATOMIC("checking %p\n", state);
+
+ for_each_plane_in_state(state, plane, plane_state, i)
+ drm_atomic_plane_print_state(&p, plane_state);
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ drm_atomic_crtc_print_state(&p, crtc_state);
+
+ for_each_connector_in_state(state, connector, connector_state, i)
+ drm_atomic_connector_print_state(&p, connector_state);
+}
+
+/**
+ * drm_state_dump - dump entire device atomic state
+ * @dev: the drm device
+ * @p: where to print the state to
+ *
+ * Just for debugging. Drivers might want an option to dump state
+ * to dmesg in case of error irq's. (Hint, you probably want to
+ * ratelimit this!)
+ *
+ * The caller must drm_modeset_lock_all(), or if this is called
+ * from error irq handler, it should not be enabled by default.
+ * (Ie. if you are debugging errors you might not care that this
+ * is racey. But calling this without all modeset locks held is
+ * not inherently safe.)
+ */
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return;
+
+ list_for_each_entry(plane, &config->plane_list, head)
+ drm_atomic_plane_print_state(p, plane->state);
+
+ list_for_each_entry(crtc, &config->crtc_list, head)
+ drm_atomic_crtc_print_state(p, crtc->state);
+
+ list_for_each_entry(connector, &config->connector_list, head)
+ drm_atomic_connector_print_state(p, connector->state);
+}
+EXPORT_SYMBOL(drm_state_dump);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_state_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_modeset_lock_all(dev);
+ drm_state_dump(dev, &p);
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+/* any use in debugfs files to dump individual planes/crtc/etc? */
+static const struct drm_info_list drm_atomic_debugfs_list[] = {
+ {"state", drm_state_info, 0},
+};
+
+int drm_atomic_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+int drm_atomic_debugfs_cleanup(struct drm_minor *minor)
+{
+ return drm_debugfs_remove_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor);
+}
+#endif
+
/*
* The big monstor ioctl
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, struct drm_file *file_priv,
- struct fence *fence, uint64_t user_data)
+ struct drm_device *dev, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
- int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
if (!e)
@@ -1481,17 +1758,6 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- if (file_priv) {
- ret = drm_event_reserve_init(dev, file_priv, &e->base,
- &e->event.base);
- if (ret) {
- kfree(e);
- return NULL;
- }
- }
-
- e->base.fence = fence;
-
return e;
}
@@ -1596,6 +1862,203 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
+/**
+ * DOC: explicit fencing properties
+ *
+ * Explicit fencing allows userspace to control the buffer synchronization
+ * between devices. A Fence or a group of fences are transfered to/from
+ * userspace using Sync File fds and there are two DRM properties for that.
+ * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
+ * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
+ *
+ * As a contrast, with implicit fencing the kernel keeps track of any
+ * ongoing rendering, and automatically ensures that the atomic update waits
+ * for any pending rendering to complete. For shared buffers represented with
+ * a struct &dma_buf this is tracked in &reservation_object structures.
+ * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
+ * whereas explicit fencing is what Android wants.
+ *
+ * "IN_FENCE_FD”:
+ * Use this property to pass a fence that DRM should wait on before
+ * proceeding with the Atomic Commit request and show the framebuffer for
+ * the plane on the screen. The fence can be either a normal fence or a
+ * merged one, the sync_file framework will handle both cases and use a
+ * fence_array if a merged fence is received. Passing -1 here means no
+ * fences to wait on.
+ *
+ * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
+ * it will only check if the Sync File is a valid one.
+ *
+ * On the driver side the fence is stored on the @fence parameter of
+ * struct &drm_plane_state. Drivers which also support implicit fencing
+ * should set the implicit fence using drm_atomic_set_fence_for_plane(),
+ * to make sure there's consistent behaviour between drivers in precedence
+ * of implicit vs. explicit fencing.
+ *
+ * "OUT_FENCE_PTR”:
+ * Use this property to pass a file descriptor pointer to DRM. Once the
+ * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
+ * the file descriptor number of a Sync File. This Sync File contains the
+ * CRTC fence that will be signaled when all framebuffers present on the
+ * Atomic Commit * request for that given CRTC are scanned out on the
+ * screen.
+ *
+ * The Atomic Commit request fails if a invalid pointer is passed. If the
+ * Atomic Commit request fails for any other reason the out fence fd
+ * returned will be -1. On a Atomic Commit with the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
+ *
+ * Note that out-fences don't have a special interface to drivers and are
+ * internally represented by a struct &drm_pending_vblank_event in struct
+ * &drm_crtc_state, which is also used by the nonblocking atomic commit
+ * helpers and for the DRM event handling for existing userspace.
+ */
+
+struct drm_out_fence_state {
+ s64 __user *out_fence_ptr;
+ struct sync_file *sync_file;
+ int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+ struct dma_fence *fence)
+{
+ fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fence_state->fd < 0)
+ return fence_state->fd;
+
+ if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+ return -EFAULT;
+
+ fence_state->sync_file = sync_file_create(fence);
+ if (!fence_state->sync_file)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int prepare_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_mode_atomic *arg,
+ struct drm_file *file_priv,
+ struct drm_out_fence_state **fence_state,
+ unsigned int *num_fences)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+ return 0;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ u64 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+ struct drm_pending_vblank_event *e;
+
+ e = create_vblank_event(dev, arg->user_data);
+ if (!e)
+ return -ENOMEM;
+
+ crtc_state->event = e;
+ }
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ struct drm_pending_vblank_event *e = crtc_state->event;
+
+ if (!file_priv)
+ continue;
+
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ crtc_state->event = NULL;
+ return ret;
+ }
+ }
+
+ if (fence_ptr) {
+ struct dma_fence *fence;
+ struct drm_out_fence_state *f;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ fence = drm_crtc_create_fence(crtc);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ crtc_state->event->base.fence = fence;
+ }
+ }
+
+ return 0;
+}
+
+static void complete_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ if (install_fds) {
+ for (i = 0; i < num_fences; i++)
+ fd_install(fence_state[i].fd,
+ fence_state[i].sync_file->file);
+
+ kfree(fence_state);
+ return;
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * TEST_ONLY and PAGE_FLIP_EVENT are mutually
+ * exclusive, if they weren't, this code should be
+ * called on success for TEST_ONLY too.
+ */
+ if (crtc_state->event)
+ drm_event_cancel_free(dev, &crtc_state->event->base);
+ }
+
+ if (!fence_state)
+ return;
+
+ for (i = 0; i < num_fences; i++) {
+ if (fence_state[i].sync_file)
+ fput(fence_state[i].sync_file->file);
+ if (fence_state[i].fd >= 0)
+ put_unused_fd(fence_state[i].fd);
+
+ /* If this fails log error to the user */
+ if (fence_state[i].out_fence_ptr &&
+ put_user(-1, fence_state[i].out_fence_ptr))
+ DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+ }
+
+ kfree(fence_state);
+}
+
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -1608,11 +2071,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_out_fence_state *fence_state = NULL;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j;
+ unsigned int i, j, num_fences = 0;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1727,50 +2189,30 @@ retry:
drm_mode_object_unreference(obj);
}
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_pending_vblank_event *e;
-
- e = create_vblank_event(dev, file_priv, NULL,
- arg->user_data);
- if (!e) {
- ret = -ENOMEM;
- goto out;
- }
-
- crtc_state->event = e;
- }
- }
+ ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
+ if (ret)
+ goto out;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
/*
* Unlike commit, check_only does not clean up state.
- * Below we call drm_atomic_state_free for it.
+ * Below we call drm_atomic_state_put for it.
*/
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
+ if (unlikely(drm_debug & DRM_UT_STATE))
+ drm_atomic_print_state(state);
+
ret = drm_atomic_commit(state);
}
out:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
- if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- /*
- * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
- * if they weren't, this code should be called on success
- * for TEST_ONLY too.
- */
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->event)
- continue;
-
- drm_event_cancel_free(dev, &crtc_state->event->base);
- }
- }
+ complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
@@ -1778,8 +2220,7 @@ out:
goto retry;
}
- if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 21f992605541..583f47f27b36 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "drm_crtc_internal.h"
@@ -458,10 +458,11 @@ mode_fixup(struct drm_atomic_state *state)
* removed from the crtc.
* crtc_state->active_changed is set when crtc_state->active changes,
* which is used for dpms.
+ * See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
*
- * Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
+ * Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a
* plane update can't be done without a full modeset) _must_ call this function
* afterwards after that change. It is permitted to call this function multiple
* times for the same update, e.g. when the ->atomic_check functions depend upon
@@ -510,9 +511,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_connector_in_state(state, connector, connector_state, i) {
/*
- * This only sets crtc->mode_changed for routing changes,
- * drivers must set crtc->mode_changed themselves when connector
- * properties need to be updated.
+ * This only sets crtc->connectors_changed for routing changes,
+ * drivers must set crtc->connectors_changed themselves when
+ * connector properties need to be updated.
*/
ret = update_connector_routing(state, connector,
connector_state);
@@ -1005,14 +1006,22 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device
* @state: atomic state object with old state structures
- * @pre_swap: if true, do an interruptible wait
+ * @pre_swap: If true, do an interruptible wait, and @state is the new state.
+ * Otherwise @state is the old state.
*
* For implicit sync, driver should fish the exclusive fence out from the
* incoming fb's and stash it in the drm_plane_state. This is called after
* drm_atomic_helper_swap_state() so it uses the current plane state (and
* just uses the atomic state to find the changed planes)
*
- * Returns zero if success or < 0 if fence_wait() fails.
+ * Note that @pre_swap is needed since the point where we block for fences moves
+ * around depending upon whether an atomic commit is blocking or
+ * non-blocking. For async commit all waiting needs to happen after
+ * drm_atomic_helper_swap_state() is called, but for synchronous commits we want
+ * to wait **before** we do anything that can't be easily rolled back. That is
+ * before we call drm_atomic_helper_swap_state().
+ *
+ * Returns zero if success or < 0 if dma_fence_wait() fails.
*/
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -1036,11 +1045,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
* still interrupt the operation. Instead of blocking until the
* timer expires, make the wait interruptible.
*/
- ret = fence_wait(plane_state->fence, pre_swap);
+ ret = dma_fence_wait(plane_state->fence, pre_swap);
if (ret)
return ret;
- fence_put(plane_state->fence);
+ dma_fence_put(plane_state->fence);
plane_state->fence = NULL;
}
@@ -1146,7 +1155,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This is the default implemenation for the ->atomic_commit_tail() hook of the
* &drm_mode_config_helper_funcs vtable.
@@ -1157,53 +1166,53 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
*
* For drivers supporting runtime PM the recommended sequence is instead ::
*
- * drm_atomic_helper_commit_modeset_disables(dev, state);
+ * drm_atomic_helper_commit_modeset_disables(dev, old_state);
*
- * drm_atomic_helper_commit_modeset_enables(dev, state);
+ * drm_atomic_helper_commit_modeset_enables(dev, old_state);
*
- * drm_atomic_helper_commit_planes(dev, state,
+ * drm_atomic_helper_commit_planes(dev, old_state,
* DRM_PLANE_COMMIT_ACTIVE_ONLY);
*
* for committing the atomic update to hardware. See the kerneldoc entries for
* these three functions for more details.
*/
-void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, state);
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
- drm_atomic_helper_commit_modeset_enables(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_commit_hw_done(old_state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
-static void commit_tail(struct drm_atomic_state *state)
+static void commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
struct drm_mode_config_helper_funcs *funcs;
funcs = dev->mode_config.helper_private;
- drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_fences(dev, old_state, false);
- drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_wait_for_dependencies(old_state);
if (funcs && funcs->atomic_commit_tail)
- funcs->atomic_commit_tail(state);
+ funcs->atomic_commit_tail(old_state);
else
- drm_atomic_helper_commit_tail(state);
+ drm_atomic_helper_commit_tail(old_state);
- drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_helper_commit_cleanup_done(old_state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(old_state);
}
static void commit_work(struct work_struct *work)
@@ -1225,9 +1234,6 @@ static void commit_work(struct work_struct *work)
* function implements nonblocking commits, using
* drm_atomic_helper_setup_commit() and related functions.
*
- * Note that right now this function does not support nonblocking commits, hence
- * driver writers must implement their own version for now.
- *
* Committing the actual hardware state is done through the
* ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
* or it's default implementation drm_atomic_helper_commit_tail().
@@ -1285,6 +1291,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* make sure work items don't artifically stall on each another.
*/
+ drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
@@ -1496,10 +1503,10 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This function waits for all preceeding commits that touch the same CRTC as
- * @state to both be committed to the hardware (as signalled by
+ * @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
* by calling drm_crtc_vblank_send_event on the event member of
* &drm_crtc_state).
@@ -1507,7 +1514,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -1515,7 +1522,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
int i;
long ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
spin_lock(&crtc->commit_lock);
commit = preceeding_commit(crtc);
if (commit)
@@ -1546,7 +1553,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This function is used to signal completion of the hardware commit step. After
* this step the driver is not allowed to read or change any permanent software
@@ -1559,15 +1566,15 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- commit = state->crtcs[i].commit;
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
+ commit = old_state->crtcs[i].commit;
if (!commit)
continue;
@@ -1582,16 +1589,16 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
/**
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
- * This signals completion of the atomic update @state, including any cleanup
- * work. If used, it must be called right before calling
- * drm_atomic_state_free().
+ * This signals completion of the atomic update @old_state, including any
+ * cleanup work. If used, it must be called right before calling
+ * drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -1599,8 +1606,8 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
int i;
long ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- commit = state->crtcs[i].commit;
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
+ commit = old_state->crtcs[i].commit;
if (WARN_ON(!commit))
continue;
@@ -2109,18 +2116,13 @@ retry:
state->legacy_cursor_update = true;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2182,18 +2184,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2322,18 +2319,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2408,7 +2400,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
- if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(primary_state->rotation)) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else {
@@ -2475,11 +2467,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
}
err = drm_atomic_commit(state);
-
free:
- if (err < 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
@@ -2530,7 +2519,7 @@ retry:
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
goto unlock;
}
@@ -2619,18 +2608,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2679,18 +2663,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2739,18 +2718,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2823,18 +2797,13 @@ retry:
}
ret = drm_atomic_nonblocking_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2910,19 +2879,14 @@ retry:
crtc_state->active = active;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
-
- connector->dpms = old_mode;
- drm_atomic_state_free(state);
-
+ if (ret != 0)
+ connector->dpms = old_mode;
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -3113,6 +3077,8 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
if (state->fb)
drm_framebuffer_reference(state->fb);
+
+ state->fence = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -3151,6 +3117,9 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
+
+ if (state->fence)
+ dma_fence_put(state->fence);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
@@ -3329,7 +3298,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
free:
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
}
@@ -3444,22 +3413,14 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
-
- drm_property_unreference_blob(blob);
-
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_property_unreference_blob(blob);
-
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 85172a977bf3..1f2412c7ccfd 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -89,7 +89,7 @@
* On top of this basic transformation additional properties can be exposed by
* the driver:
*
- * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
* rotation and reflection step between the source and destination rectangles.
* Without this property the rectangle is only scaled, but not rotated or
* reflected.
@@ -105,18 +105,12 @@
*/
/**
- * drm_mode_create_rotation_property - create a new rotation property
- * @dev: DRM device
+ * drm_plane_create_rotation_property - create a new rotation property
+ * @plane: drm plane
+ * @rotation: initial value of the rotation property
* @supported_rotations: bitmask of supported rotations and reflections
*
* This creates a new property with the selected support for transformations.
- * The resulting property should be stored in @rotation_property in
- * &drm_mode_config. It then must be attached to each plane which supports
- * rotations using drm_object_attach_property().
- *
- * FIXME: Probably better if the rotation property is created on each plane,
- * like the zpos property. Otherwise it's not possible to allow different
- * rotation modes on different planes.
*
* Since a rotation by 180° degress is the same as reflecting both along the x
* and the y axis the rotation property is somewhat redundant. Drivers can use
@@ -144,8 +138,9 @@
* rotation. After reflection, the rotation is applied to the image sampled from
* the source rectangle, before scaling it to fit the destination rectangle.
*/
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations)
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+ unsigned int rotation,
+ unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
{ __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
@@ -155,12 +150,28 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
{ __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
{ __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
};
+ struct drm_property *prop;
+
+ WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0);
+ WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK));
+ WARN_ON(rotation & ~supported_rotations);
- return drm_property_create_bitmask(dev, 0, "rotation",
+ prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
props, ARRAY_SIZE(props),
supported_rotations);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, rotation);
+
+ if (plane->state)
+ plane->state->rotation = rotation;
+
+ plane->rotation_property = prop;
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
+EXPORT_SYMBOL(drm_plane_create_rotation_property);
/**
* drm_rotation_simplify() - Try to simplify the rotation
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index d28ffdd2b929..6543ebde501a 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -41,6 +41,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “DEGAMMA_LUT_SIZE”:
* Unsinged range property to give the size of the lookup table to be set
* on the DEGAMMA_LUT property (the size depends on the underlying
@@ -54,6 +58,10 @@
* lookup through the gamma LUT. The data is interpreted as a struct
* &drm_color_ctm.
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * unit/pass-thru matrix should be used. This is generally the driver
+ * boot-up state too.
+ *
* “GAMMA_LUT”:
* Blob property to set the gamma lookup table (LUT) mapping pixel data
* after the transformation matrix to data sent to the connector. The
@@ -62,6 +70,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “GAMMA_LUT_SIZE”:
* Unsigned range property to give the size of the lookup table to be set
* on the GAMMA_LUT property (the size depends on the underlying hardware).
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2db7fb510b6c..5a4526289392 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -588,6 +588,50 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+/**
+ * DOC: standard connector properties
+ *
+ * DRM connectors have a few standardized properties:
+ *
+ * EDID:
+ * Blob property which contains the current EDID read from the sink. This
+ * is useful to parse sink identification information like vendor, model
+ * and serial. Drivers should update this property by calling
+ * drm_mode_connector_update_edid_property(), usually after having parsed
+ * the EDID using drm_add_edid_modes(). Userspace cannot change this
+ * property.
+ * DPMS:
+ * Legacy property for setting the power state of the connector. For atomic
+ * drivers this is only provided for backwards compatibility with existing
+ * drivers, it remaps to controlling the "ACTIVE" property on the CRTC the
+ * connector is linked to. Drivers should never set this property directly,
+ * it is handled by the DRM core by calling the ->dpms() callback in
+ * &drm_connector_funcs. Atomic drivers should implement this hook using
+ * drm_atomic_helper_connector_dpms(). This is the only property standard
+ * connector property that userspace can change.
+ * PATH:
+ * Connector path property to identify how this sink is physically
+ * connected. Used by DP MST. This should be set by calling
+ * drm_mode_connector_set_path_property(), in the case of DP MST with the
+ * path property the MST manager created. Userspace cannot change this
+ * property.
+ * TILE:
+ * Connector tile group property to indicate how a set of DRM connector
+ * compose together into one logical screen. This is used by both high-res
+ * external screens (often only using a single cable, but exposing multiple
+ * DP MST sinks), or high-res integrated panels (like dual-link DSI) which
+ * are not gen-locked. Note that for tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose the
+ * tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers
+ * should update this value using drm_mode_connector_set_tile_property().
+ * Userspace cannot change this property.
+ *
+ * Connectors also have one standardized atomic property:
+ *
+ * CRTC_ID:
+ * Mode object ID of the &drm_crtc this connector should be connected to.
+ */
+
int drm_connector_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
@@ -1121,3 +1165,107 @@ out_unlock:
return ret;
}
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique integer
+ * identifier. Tiled monitors using DisplayID v1.3 have a unique 8-byte handle,
+ * we store this in a tile group, so we have a common identifier for all tiles
+ * in a monitor group. The property is called "TILE". Drivers can manage tile
+ * groups using drm_mode_create_tile_group(), drm_mode_put_tile_group() and
+ * drm_mode_get_tile_group(). But this is only needed for internal panels where
+ * the tile group information is exposed through a non-standard way.
+ */
+
+static void drm_tile_group_free(struct kref *kref)
+{
+ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+ struct drm_device *dev = tg->dev;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.tile_idr, tg->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg)
+{
+ kref_put(&tg->refcount, drm_tile_group_free);
+}
+EXPORT_SYMBOL(drm_mode_put_tile_group);
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int id;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+ if (!memcmp(tg->group_data, topology, 8)) {
+ if (!kref_get_unless_zero(&tg->refcount))
+ tg = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mode_get_tile_group);
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int ret;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&tg->refcount);
+ memcpy(tg->group_data, topology, 8);
+ tg->dev = dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ tg->id = ret;
+ } else {
+ kfree(tg);
+ tg = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+}
+EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf28647..e75f62cd8a65 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/dma-fence.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -40,23 +41,11 @@
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
-#include <drm/drm_framebuffer.h>
+#include <drm/drm_debugfs_crc.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-/*
- * Global properties
- */
-static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
- { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
- { DRM_PLANE_TYPE_PRIMARY, "Primary" },
- { DRM_PLANE_TYPE_CURSOR, "Cursor" },
-};
-
-/*
- * Optional properties
- */
/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
@@ -102,8 +91,6 @@ out:
}
EXPORT_SYMBOL(drm_crtc_force_disable_all);
-DEFINE_WW_CLASS(crtc_ww_class);
-
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
@@ -116,12 +103,16 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
return num;
}
-static int drm_crtc_register_all(struct drm_device *dev)
+int drm_crtc_register_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_for_each_crtc(crtc, dev) {
+ if (drm_debugfs_crtc_add(crtc))
+ DRM_ERROR("Failed to initialize debugfs entry for CRTC '%s'.\n",
+ crtc->name);
+
if (crtc->funcs->late_register)
ret = crtc->funcs->late_register(crtc);
if (ret)
@@ -131,16 +122,84 @@ static int drm_crtc_register_all(struct drm_device *dev)
return 0;
}
-static void drm_crtc_unregister_all(struct drm_device *dev)
+void drm_crtc_unregister_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, dev) {
if (crtc->funcs->early_unregister)
crtc->funcs->early_unregister(crtc);
+ drm_debugfs_crtc_remove(crtc);
}
}
+static int drm_crtc_crc_init(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_init(&crtc->crc.lock);
+ init_waitqueue_head(&crtc->crc.wq);
+ crtc->crc.source = kstrdup("auto", GFP_KERNEL);
+ if (!crtc->crc.source)
+ return -ENOMEM;
+#endif
+ return 0;
+}
+
+static void drm_crtc_crc_fini(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ kfree(crtc->crc.source);
+#endif
+}
+
+static const struct dma_fence_ops drm_crtc_fence_ops;
+
+static struct drm_crtc *fence_to_crtc(struct dma_fence *fence)
+{
+ BUG_ON(fence->ops != &drm_crtc_fence_ops);
+ return container_of(fence->lock, struct drm_crtc, fence_lock);
+}
+
+static const char *drm_crtc_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->dev->driver->name;
+}
+
+static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->timeline_name;
+}
+
+static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static const struct dma_fence_ops drm_crtc_fence_ops = {
+ .get_driver_name = drm_crtc_fence_get_driver_name,
+ .get_timeline_name = drm_crtc_fence_get_timeline_name,
+ .enable_signaling = drm_crtc_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
+struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
+ crtc->fence_context, ++crtc->fence_seqno);
+
+ return fence;
+}
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -198,6 +257,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return -ENOMEM;
}
+ crtc->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&crtc->fence_lock);
+ snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
+ "CRTC:%d-%s", crtc->base.id, crtc->name);
+
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
@@ -205,14 +269,22 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->primary = primary;
crtc->cursor = cursor;
- if (primary)
+ if (primary && !primary->possible_crtcs)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
- if (cursor)
+ if (cursor && !cursor->possible_crtcs)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ ret = drm_crtc_crc_init(crtc);
+ if (ret) {
+ drm_mode_object_unregister(dev, &crtc->base);
+ return ret;
+ }
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
+ drm_object_attach_property(&crtc->base,
+ config->prop_out_fence_ptr, 0);
}
return 0;
@@ -236,6 +308,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
* the indices on the drm_crtc after us in the crtc_list.
*/
+ drm_crtc_crc_fini(crtc);
+
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@@ -255,301 +329,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-int drm_modeset_register_all(struct drm_device *dev)
-{
- int ret;
-
- ret = drm_plane_register_all(dev);
- if (ret)
- goto err_plane;
-
- ret = drm_crtc_register_all(dev);
- if (ret)
- goto err_crtc;
-
- ret = drm_encoder_register_all(dev);
- if (ret)
- goto err_encoder;
-
- ret = drm_connector_register_all(dev);
- if (ret)
- goto err_connector;
-
- return 0;
-
-err_connector:
- drm_encoder_unregister_all(dev);
-err_encoder:
- drm_crtc_unregister_all(dev);
-err_crtc:
- drm_plane_unregister_all(dev);
-err_plane:
- return ret;
-}
-
-void drm_modeset_unregister_all(struct drm_device *dev)
-{
- drm_connector_unregister_all(dev);
- drm_encoder_unregister_all(dev);
- drm_crtc_unregister_all(dev);
- drm_plane_unregister_all(dev);
-}
-
-static int drm_mode_create_standard_properties(struct drm_device *dev)
-{
- struct drm_property *prop;
- int ret;
-
- ret = drm_connector_create_standard_properties(dev);
- if (ret)
- return ret;
-
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
- "type", drm_plane_type_enum_list,
- ARRAY_SIZE(drm_plane_type_enum_list));
- if (!prop)
- return -ENOMEM;
- dev->mode_config.plane_type_property = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_X", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_x = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_Y", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_W", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_H", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_h = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_X", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_x = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_Y", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_W", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_H", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_h = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "FB_ID", DRM_MODE_OBJECT_FB);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_fb_id = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_ID", DRM_MODE_OBJECT_CRTC);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_id = prop;
-
- prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
- "ACTIVE");
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_active = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
- "MODE_ID", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_mode_id = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "DEGAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_size_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "CTM", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.ctm_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "GAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "GAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_size_property = prop;
-
- return 0;
-}
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0;
- uint32_t __user *fb_id;
- uint32_t __user *crtc_id;
- uint32_t __user *connector_id;
- uint32_t __user *encoder_id;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
-
- mutex_lock(&file_priv->fbs_lock);
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- mutex_unlock(&file_priv->fbs_lock);
- return -EFAULT;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
- mutex_unlock(&file_priv->fbs_lock);
-
- /* mode_config.mutex protects the connector list against e.g. DP MST
- * connector hot-adding. CRTC/Plane lists are invariant. */
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_crtc(crtc, dev)
- crtc_count++;
-
- drm_for_each_connector(connector, dev)
- connector_count++;
-
- drm_for_each_encoder(encoder, dev)
- encoder_count++;
-
- card_res->max_height = dev->mode_config.max_height;
- card_res->min_height = dev->mode_config.min_height;
- card_res->max_width = dev->mode_config.max_width;
- card_res->min_width = dev->mode_config.min_width;
-
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- drm_for_each_crtc(crtc, dev) {
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- drm_for_each_encoder(encoder, dev) {
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- drm_for_each_connector(connector, dev) {
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_connectors = connector_count;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
/**
* drm_mode_getcrtc - get CRTC configuration
* @dev: drm device for the ioctl
@@ -695,8 +474,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
- crtc->primary->state->rotation & (DRM_ROTATE_90 |
- DRM_ROTATE_270))
+ drm_rotation_90_or_270(crtc->primary->state->rotation))
swap(hdisplay, vdisplay);
return drm_framebuffer_check_src_coords(x << 16, y << 16,
@@ -796,9 +574,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = drm_plane_check_pixel_format(crtc->primary,
fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
}
@@ -902,362 +681,3 @@ int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-
-/**
- * drm_mode_config_reset - call ->reset callbacks
- * @dev: drm device
- *
- * This functions calls all the crtc's, encoder's and connector's ->reset
- * callback. Drivers can use this in e.g. their driver load or resume code to
- * reset hardware and software state.
- */
-void drm_mode_config_reset(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- struct drm_plane *plane;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
- drm_for_each_plane(plane, dev)
- if (plane->funcs->reset)
- plane->funcs->reset(plane);
-
- drm_for_each_crtc(crtc, dev)
- if (crtc->funcs->reset)
- crtc->funcs->reset(crtc);
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->funcs->reset)
- encoder->funcs->reset(encoder);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_connector(connector, dev)
- if (connector->funcs->reset)
- connector->funcs->reset(connector);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_reset);
-
-/**
- * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This creates a new dumb buffer in the driver's backing storage manager (GEM,
- * TTM or something else entirely) and returns the resulting buffer handle. This
- * handle can then be wrapped up into a framebuffer modeset object.
- *
- * Note that userspace is not allowed to use such objects for render
- * acceleration - drivers must create their own private ioctls for such a use
- * case.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_create_dumb *args = data;
- u32 cpp, stride, size;
-
- if (!dev->driver->dumb_create)
- return -ENOSYS;
- if (!args->width || !args->height || !args->bpp)
- return -EINVAL;
-
- /* overflow checks for 32bit size calculations */
- /* NOTE: DIV_ROUND_UP() can overflow */
- cpp = DIV_ROUND_UP(args->bpp, 8);
- if (!cpp || cpp > 0xffffffffU / args->width)
- return -EINVAL;
- stride = cpp * args->width;
- if (args->height > 0xffffffffU / stride)
- return -EINVAL;
-
- /* test for wrap-around */
- size = args->height * stride;
- if (PAGE_ALIGN(size) == 0)
- return -EINVAL;
-
- /*
- * handle, pitch and size are output parameters. Zero them out to
- * prevent drivers from accidentally using uninitialized data. Since
- * not all existing userspace is clearing these fields properly we
- * cannot reject IOCTL with garbage in them.
- */
- args->handle = 0;
- args->pitch = 0;
- args->size = 0;
-
- return dev->driver->dumb_create(file_priv, dev, args);
-}
-
-/**
- * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_map_dumb *args = data;
-
- /* call driver ioctl to get mmap offset */
- if (!dev->driver->dumb_map_offset)
- return -ENOSYS;
-
- return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
-}
-
-/**
- * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This destroys the userspace handle for the given dumb backing storage buffer.
- * Since buffer objects must be reference counted in the kernel a buffer object
- * won't be immediately freed if a framebuffer modeset object still uses it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_destroy_dumb *args = data;
-
- if (!dev->driver->dumb_destroy)
- return -ENOSYS;
-
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
-}
-
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- *
- * Since this initializes the modeset locks, no locking is possible. Which is no
- * problem, since this should happen single threaded at init time. It is the
- * driver's problem to ensure this guarantee.
- *
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- drm_modeset_lock_init(&dev->mode_config.connection_mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- mutex_init(&dev->mode_config.fb_lock);
- mutex_init(&dev->mode_config.blob_lock);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- INIT_LIST_HEAD(&dev->mode_config.plane_list);
- idr_init(&dev->mode_config.crtc_idr);
- idr_init(&dev->mode_config.tile_idr);
- ida_init(&dev->mode_config.connector_ida);
-
- drm_modeset_lock_all(dev);
- drm_mode_create_standard_properties(dev);
- drm_modeset_unlock_all(dev);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
- dev->mode_config.num_overlay_plane = 0;
- dev->mode_config.num_total_plane = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * Note that since this /should/ happen single-threaded at driver/device
- * teardown time, no locking is required. It's the driver's job to ensure that
- * this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
- struct drm_property_blob *blob, *bt;
- struct drm_plane *plane, *plt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
- head) {
- plane->funcs->destroy(plane);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
- list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
- head_global) {
- drm_property_unreference_blob(blob);
- }
-
- /*
- * Single-threaded teardown context, so it's not required to grab the
- * fb_lock to protect against concurrent fb_list access. Contrary, it
- * would actually deadlock with the drm_framebuffer_cleanup function.
- *
- * Also, if there are any framebuffers left, that's a driver leak now,
- * so politely WARN about this.
- */
- WARN_ON(!list_empty(&dev->mode_config.fb_list));
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- drm_framebuffer_free(&fb->base.refcount);
- }
-
- ida_destroy(&dev->mode_config.connector_ida);
- idr_destroy(&dev->mode_config.tile_idr);
- idr_destroy(&dev->mode_config.crtc_idr);
- drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
- * DOC: Tile group
- *
- * Tile groups are used to represent tiled monitors with a unique
- * integer identifier. Tiled monitors using DisplayID v1.3 have
- * a unique 8-byte handle, we store this in a tile group, so we
- * have a common identifier for all tiles in a monitor group.
- */
-static void drm_tile_group_free(struct kref *kref)
-{
- struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
- struct drm_device *dev = tg->dev;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.tile_idr, tg->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- kfree(tg);
-}
-
-/**
- * drm_mode_put_tile_group - drop a reference to a tile group.
- * @dev: DRM device
- * @tg: tile group to drop reference to.
- *
- * drop reference to tile group and free if 0.
- */
-void drm_mode_put_tile_group(struct drm_device *dev,
- struct drm_tile_group *tg)
-{
- kref_put(&tg->refcount, drm_tile_group_free);
-}
-
-/**
- * drm_mode_get_tile_group - get a reference to an existing tile group
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Use the unique bytes to get a reference to an existing tile group.
- *
- * RETURNS:
- * tile group or NULL if not found.
- */
-struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int id;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
- if (!memcmp(tg->group_data, topology, 8)) {
- if (!kref_get_unless_zero(&tg->refcount))
- tg = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
- }
- }
- mutex_unlock(&dev->mode_config.idr_mutex);
- return NULL;
-}
-EXPORT_SYMBOL(drm_mode_get_tile_group);
-
-/**
- * drm_mode_create_tile_group - create a tile group from a displayid description
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Create a tile group for the unique monitor, and get a unique
- * identifier for the tile group.
- *
- * RETURNS:
- * new tile group or error.
- */
-struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int ret;
-
- tg = kzalloc(sizeof(*tg), GFP_KERNEL);
- if (!tg)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&tg->refcount);
- memcpy(tg->group_data, topology, 8);
- tg->dev = dev;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- tg->id = ret;
- } else {
- kfree(tg);
- tg = ERR_PTR(ret);
- }
-
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
-}
-EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c48ba02c5365..cdf6860c9d22 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -40,10 +40,29 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb);
+int drm_crtc_register_all(struct drm_device *dev);
+void drm_crtc_unregister_all(struct drm_device *dev);
-void drm_fb_release(struct drm_file *file_priv);
+struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc);
+
+/* IOCTLs */
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+
+/* drm_mode_config.c */
+int drm_modeset_register_all(struct drm_device *dev);
+void drm_modeset_unregister_all(struct drm_device *dev);
+
+/* IOCTLs */
+int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
-/* dumb buffer support IOCTLs */
+
+/* drm_dumb_buffers.c */
+/* IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
@@ -51,14 +70,6 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-/* IOCTLs */
-int drm_mode_getresources(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_setcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-
/* drm_color_mgmt.c */
/* IOCTLs */
@@ -147,6 +158,8 @@ void drm_framebuffer_free(struct kref *kref);
int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_framebuffer *fb);
+void drm_fb_release(struct drm_file *file_priv);
+
/* IOCTL */
int drm_mode_addfb(struct drm_device *dev,
@@ -166,9 +179,6 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_modeset_register_all(struct drm_device *dev);
-void drm_modeset_unregister_all(struct drm_device *dev);
-
/* drm_plane.c */
int drm_plane_register_all(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1205790ed960..2e3e46a53805 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -36,6 +36,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_atomic.h>
#include "drm_internal.h"
#if defined(CONFIG_DEBUG_FS)
@@ -163,6 +164,14 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret;
}
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ ret = drm_atomic_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create atomic debugfs files\n");
+ return ret;
+ }
+ }
+
if (dev->driver->debugfs_init) {
ret = dev->driver->debugfs_init(minor);
if (ret) {
@@ -219,6 +228,7 @@ EXPORT_SYMBOL(drm_debugfs_remove_files);
int drm_debugfs_cleanup(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
+ int ret;
if (!minor->debugfs_root)
return 0;
@@ -226,6 +236,14 @@ int drm_debugfs_cleanup(struct drm_minor *minor)
if (dev->driver->debugfs_cleanup)
dev->driver->debugfs_cleanup(minor);
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ ret = drm_atomic_debugfs_cleanup(minor);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to remove atomic debugfs entries\n");
+ return ret;
+ }
+ }
+
drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
debugfs_remove(minor->debugfs_root);
@@ -415,5 +433,37 @@ void drm_debugfs_connector_remove(struct drm_connector *connector)
connector->debugfs_entry = NULL;
}
-#endif /* CONFIG_DEBUG_FS */
+int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ struct drm_minor *minor = crtc->dev->primary;
+ struct dentry *root;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
+ if (!name)
+ return -ENOMEM;
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+ if (!root)
+ return -ENOMEM;
+
+ crtc->debugfs_entry = root;
+
+ if (drm_debugfs_crtc_crc_add(crtc))
+ goto error;
+
+ return 0;
+
+error:
+ drm_debugfs_crtc_remove(crtc);
+ return -ENOMEM;
+}
+
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+ debugfs_remove_recursive(crtc->debugfs_entry);
+ crtc->debugfs_entry = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
new file mode 100644
index 000000000000..00e771fb7df2
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * Copyright © 2016 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Based on code from the i915 driver.
+ * Original author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <drm/drmP.h>
+#include "drm_internal.h"
+
+/**
+ * DOC: CRC ABI
+ *
+ * DRM device drivers can provide to userspace CRC information of each frame as
+ * it reached a given hardware component (a "source").
+ *
+ * Userspace can control generation of CRCs in a given CRTC by writing to the
+ * file dri/0/crtc-N/crc/control in debugfs, with N being the index of the CRTC.
+ * Accepted values are source names (which are driver-specific) and the "auto"
+ * keyword, which will let the driver select a default source of frame CRCs
+ * for this CRTC.
+ *
+ * Once frame CRC generation is enabled, userspace can capture them by reading
+ * the dri/0/crtc-N/crc/data file. Each line in that file contains the frame
+ * number in the first field and then a number of unsigned integer fields
+ * containing the CRC data. Fields are separated by a single space and the number
+ * of CRC fields is source-specific.
+ *
+ * Note that though in some cases the CRC is computed in a specified way and on
+ * the frame contents as supplied by userspace (eDP 1.3), in general the CRC
+ * computation is performed in an unspecified way and on frame contents that have
+ * been already processed in also an unspecified way and thus userspace cannot
+ * rely on being able to generate matching CRC values for the frame contents that
+ * it submits. In this general case, the maximum userspace can do is to compare
+ * the reported CRCs of frames that should have the same contents.
+ */
+
+static int crc_control_show(struct seq_file *m, void *data)
+{
+ struct drm_crtc *crtc = m->private;
+
+ seq_printf(m, "%s\n", crtc->crc.source);
+
+ return 0;
+}
+
+static int crc_control_open(struct inode *inode, struct file *file)
+{
+ struct drm_crtc *crtc = inode->i_private;
+
+ return single_open(file, crc_control_show, crtc);
+}
+
+static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_crtc *crtc = m->private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ char *source;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ source = memdup_user_nul(ubuf, len);
+ if (IS_ERR(source))
+ return PTR_ERR(source);
+
+ if (source[len] == '\n')
+ source[len] = '\0';
+
+ spin_lock_irq(&crc->lock);
+
+ if (crc->opened) {
+ spin_unlock_irq(&crc->lock);
+ kfree(source);
+ return -EBUSY;
+ }
+
+ kfree(crc->source);
+ crc->source = source;
+
+ spin_unlock_irq(&crc->lock);
+
+ *offp += len;
+ return len;
+}
+
+static const struct file_operations drm_crtc_crc_control_fops = {
+ .owner = THIS_MODULE,
+ .open = crc_control_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crc_control_write
+};
+
+static int crtc_crc_open(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entries = NULL;
+ size_t values_cnt;
+ int ret;
+
+ if (crc->opened)
+ return -EBUSY;
+
+ ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ if (WARN_ON(values_cnt == 0)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ spin_lock_irq(&crc->lock);
+ crc->entries = entries;
+ crc->values_cnt = values_cnt;
+ crc->opened = true;
+ spin_unlock_irq(&crc->lock);
+
+ return 0;
+
+err_disable:
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+ return ret;
+}
+
+static int crtc_crc_release(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ size_t values_cnt;
+
+ spin_lock_irq(&crc->lock);
+ kfree(crc->entries);
+ crc->entries = NULL;
+ crc->head = 0;
+ crc->tail = 0;
+ crc->values_cnt = 0;
+ crc->opened = false;
+ spin_unlock_irq(&crc->lock);
+
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+
+ return 0;
+}
+
+static int crtc_crc_data_count(struct drm_crtc_crc *crc)
+{
+ assert_spin_locked(&crc->lock);
+ return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
+}
+
+/*
+ * 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
+ * separated, with a newline at the end and null-terminated.
+ */
+#define LINE_LEN(values_cnt) (10 + 11 * values_cnt + 1 + 1)
+#define MAX_LINE_LEN (LINE_LEN(DRM_MAX_CRC_NR))
+
+static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
+ size_t count, loff_t *pos)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ char buf[MAX_LINE_LEN];
+ int ret, i;
+
+ spin_lock_irq(&crc->lock);
+
+ if (!crc->source) {
+ spin_unlock_irq(&crc->lock);
+ return 0;
+ }
+
+ /* Nothing to read? */
+ while (crtc_crc_data_count(crc) == 0) {
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(crc->wq,
+ crtc_crc_data_count(crc),
+ crc->lock);
+ if (ret) {
+ spin_unlock_irq(&crc->lock);
+ return ret;
+ }
+ }
+
+ /* We know we have an entry to be read */
+ entry = &crc->entries[crc->tail];
+
+ if (count < LINE_LEN(crc->values_cnt)) {
+ spin_unlock_irq(&crc->lock);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR);
+ crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1);
+
+ spin_unlock_irq(&crc->lock);
+
+ if (entry->has_frame_counter)
+ sprintf(buf, "0x%08x", entry->frame);
+ else
+ sprintf(buf, "XXXXXXXXXX");
+
+ for (i = 0; i < crc->values_cnt; i++)
+ sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]);
+ sprintf(buf + 10 + crc->values_cnt * 11, "\n");
+
+ if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt)))
+ return -EFAULT;
+
+ return LINE_LEN(crc->values_cnt);
+}
+
+static const struct file_operations drm_crtc_crc_data_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_crc_open,
+ .read = crtc_crc_read,
+ .release = crtc_crc_release,
+};
+
+/**
+ * drm_debugfs_crtc_crc_add - Add files to debugfs for capture of frame CRCs
+ * @crtc: CRTC to whom the frames will belong
+ *
+ * Adds files to debugfs directory that allows userspace to control the
+ * generation of frame CRCs and to read them.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ struct dentry *crc_ent, *ent;
+
+ if (!crtc->funcs->set_crc_source)
+ return 0;
+
+ crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
+ if (!crc_ent)
+ return -ENOMEM;
+
+ ent = debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_control_fops);
+ if (!ent)
+ goto error;
+
+ ent = debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_data_fops);
+ if (!ent)
+ goto error;
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(crc_ent);
+
+ return -ENOMEM;
+}
+
+/**
+ * drm_crtc_add_crc_entry - Add entry with CRC information for a frame
+ * @crtc: CRTC to which the frame belongs
+ * @has_frame: whether this entry has a frame number to go with
+ * @frame: number of the frame these CRCs are about
+ * @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
+ *
+ * For each frame, the driver polls the source of CRCs for new data and calls
+ * this function to add them to the buffer from where userspace reads.
+ */
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs)
+{
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ int head, tail;
+
+ assert_spin_locked(&crc->lock);
+
+ /* Caller may not have noticed yet that userspace has stopped reading */
+ if (!crc->opened)
+ return -EINVAL;
+
+ head = crc->head;
+ tail = crc->tail;
+
+ if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+ return -ENOBUFS;
+ }
+
+ entry = &crc->entries[head];
+ entry->frame = frame;
+ entry->has_frame_counter = has_frame;
+ memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);
+
+ head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
+ crc->head = head;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a751f6fe..e02563966271 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -142,12 +142,25 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
+static bool is_type1_adaptor(uint8_t adaptor_id)
+{
+ return adaptor_id == 0 || adaptor_id == 0xff;
+}
+
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_REV_TYPE2);
}
+static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
+ const uint8_t adaptor_id)
+{
+ return is_hdmi_adaptor(hdmi_id) &&
+ (adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+ DP_DUAL_MODE_TYPE_HAS_DPCD));
+}
+
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @adapter: I2C adapter for the DDC bus
@@ -185,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
+ DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
+ ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
@@ -202,13 +217,26 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
+ DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
+ adaptor_id, ret);
if (ret == 0) {
+ if (is_lspcon_adaptor(hdmi_id, adaptor_id))
+ return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
+ /*
+ * If neither a proper type 1 ID nor a broken type 1 adaptor
+ * as described above, assume type 1, but let the user know
+ * that we may have misdetected the type.
+ */
+ if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
+ adaptor_id);
+
}
if (is_hdmi_adaptor(hdmi_id))
@@ -364,3 +392,96 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
+
+/**
+ * drm_lspcon_get_mode: Get LSPCON's current mode of operation by
+ * reading offset (0x80, 0x41)
+ * @adapter: I2C-over-aux adapter
+ * @mode: current lspcon mode of operation output variable
+ *
+ * Returns:
+ * 0 on success, sets the current_mode value to appropriate mode
+ * -error on failure
+ */
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode *mode)
+{
+ u8 data;
+ int ret = 0;
+
+ if (!mode) {
+ DRM_ERROR("NULL input\n");
+ return -EINVAL;
+ }
+
+ /* Read Status: i2c over aux */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+ return -EFAULT;
+ }
+
+ if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
+ *mode = DRM_LSPCON_MODE_PCON;
+ else
+ *mode = DRM_LSPCON_MODE_LS;
+ return 0;
+}
+EXPORT_SYMBOL(drm_lspcon_get_mode);
+
+/**
+ * drm_lspcon_set_mode: Change LSPCON's mode of operation by
+ * writing offset (0x80, 0x40)
+ * @adapter: I2C-over-aux adapter
+ * @mode: required mode of operation
+ *
+ * Returns:
+ * 0 on success, -error on failure/timeout
+ */
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode mode)
+{
+ u8 data = 0;
+ int ret;
+ int time_out = 200;
+ enum drm_lspcon_mode current_mode;
+
+ if (mode == DRM_LSPCON_MODE_PCON)
+ data = DP_DUAL_MODE_LSPCON_MODE_PCON;
+
+ /* Change mode */
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return ret;
+ }
+
+ /*
+ * Confirm mode change by reading the status bit.
+ * Sometimes, it takes a while to change the mode,
+ * so wait and retry until time out or done.
+ */
+ do {
+ ret = drm_lspcon_get_mode(adapter, &current_mode);
+ if (ret) {
+ DRM_ERROR("can't confirm LSPCON mode change\n");
+ return ret;
+ } else {
+ if (current_mode != mode) {
+ msleep(10);
+ time_out -= 10;
+ } else {
+ DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
+ mode == DRM_LSPCON_MODE_LS ?
+ "LS" : "PCON");
+ return 0;
+ }
+ }
+ } while (time_out);
+
+ DRM_ERROR("LSPCON mode change timed out\n");
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(drm_lspcon_set_mode);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6efdba4993fc..a525751b4559 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -32,7 +32,10 @@
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/slab.h>
+
+#include <drm/drm_drv.h>
#include <drm/drmP.h>
+
#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
@@ -257,10 +260,7 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
drm_debugfs_cleanup(minor);
}
-/**
- * drm_minor_acquire - Acquire a DRM minor
- * @minor_id: Minor ID of the DRM-minor
- *
+/*
* Looks up the given minor-ID and returns the respective DRM-minor object. The
* refence-count of the underlying device is increased so you must release this
* object with drm_minor_release().
@@ -268,10 +268,6 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
* As long as you hold this minor, it is guaranteed that the object and the
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
- *
- * Returns:
- * Pointer to minor-object with increased device-refcount, or PTR_ERR on
- * failure.
*/
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
@@ -294,12 +290,6 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
return minor;
}
-/**
- * drm_minor_release - Release DRM minor
- * @minor: Pointer to DRM minor object
- *
- * Release a minor that was previously acquired via drm_minor_acquire().
- */
void drm_minor_release(struct drm_minor *minor)
{
drm_dev_unref(minor->dev);
@@ -313,9 +303,10 @@ void drm_minor_release(struct drm_minor *minor)
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
* handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. Finally when everything is up
- * and running and ready for userspace the device instance can be published
- * using drm_dev_register().
+ * initialize all the corresponding hardware bits. An important part of this is
+ * also calling drm_dev_set_unique() to set the userspace-visible unique name of
+ * this device instance. Finally when everything is up and running and ready for
+ * userspace the device instance can be published using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the ->load() callback. But due to
@@ -337,17 +328,6 @@ void drm_minor_release(struct drm_minor *minor)
* dev_priv field of &drm_device.
*/
-static int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- if (!name)
- return -EINVAL;
-
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
@@ -517,12 +497,6 @@ int drm_dev_init(struct drm_device *dev,
goto err_free;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
- }
-
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
@@ -568,6 +542,9 @@ err_minors:
drm_fs_inode_free(dev->anon_inode);
err_free:
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
@@ -630,6 +607,9 @@ static void drm_dev_release(struct kref *ref)
drm_minor_free(dev, DRM_MINOR_CONTROL);
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
kfree(dev->unique);
kfree(dev);
}
@@ -667,6 +647,62 @@ void drm_dev_unref(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unref);
+static int create_compat_control_link(struct drm_device *dev)
+{
+ struct drm_minor *minor;
+ char *name;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
+ if (!minor)
+ return 0;
+
+ /*
+ * Some existing userspace out there uses the existing of the controlD*
+ * sysfs files to figure out whether it's a modeset driver. It only does
+ * readdir, hence a symlink is sufficient (and the least confusing
+ * option). Otherwise controlD* is entirely unused.
+ *
+ * Old controlD chardev have been allocated in the range
+ * 64-127.
+ */
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
+ if (!name)
+ return -ENOMEM;
+
+ ret = sysfs_create_link(minor->kdev->kobj.parent,
+ &minor->kdev->kobj,
+ name);
+
+ kfree(name);
+
+ return ret;
+}
+
+static void remove_compat_control_link(struct drm_device *dev)
+{
+ struct drm_minor *minor;
+ char *name;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
+ if (!minor)
+ return;
+
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
+ if (!name)
+ return;
+
+ sysfs_remove_link(minor->kdev->kobj.parent, name);
+
+ kfree(name);
+}
+
/**
* drm_dev_register - Register DRM device
* @dev: Device to register
@@ -705,6 +741,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
+ ret = create_compat_control_link(dev);
+ if (ret)
+ goto err_minors;
+
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
@@ -718,6 +758,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock;
err_minors:
+ remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
@@ -758,12 +799,33 @@ void drm_dev_unregister(struct drm_device *dev)
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_legacy_rmmap(dev, r_list->map);
+ remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
}
EXPORT_SYMBOL(drm_dev_unregister);
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @name: unique name
+ *
+ * Sets the unique name of a DRM device using the specified string. Drivers
+ * can use this at driver probe time if the unique name of the devices they
+ * drive is static.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_dev_set_unique(struct drm_device *dev, const char *name)
+{
+ kfree(dev->unique);
+ dev->unique = kstrdup(name, GFP_KERNEL);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
+
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
new file mode 100644
index 000000000000..8ac5a1c1d811
--- /dev/null
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The KMS API doesn't standardize backing storage object creation and leaves it
+ * to driver-specific ioctls. Furthermore actually creating a buffer object even
+ * for GEM-based drivers is done through a driver-specific ioctl - GEM only has
+ * a common userspace interface for sharing and destroying objects. While not an
+ * issue for full-fledged graphics stacks that include device-specific userspace
+ * components (in libdrm for instance), this limit makes DRM-based early boot
+ * graphics unnecessarily complex.
+ *
+ * Dumb objects partly alleviate the problem by providing a standard API to
+ * create dumb buffers suitable for scanout, which can then be used to create
+ * KMS frame buffers.
+ *
+ * To support dumb objects drivers must implement the dumb_create,
+ * dumb_destroy and dumb_map_offset operations from struct &drm_driver. See
+ * there for further details.
+ *
+ * Note that dumb objects may not be used for gpu acceleration, as has been
+ * attempted on some ARM embedded platforms. Such drivers really must have
+ * a hardware-specific ioctl to allocate suitable buffer objects.
+ */
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+ u32 cpp, stride, size;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ if (!args->width || !args->height || !args->bpp)
+ return -EINVAL;
+
+ /* overflow checks for 32bit size calculations */
+ /* NOTE: DIV_ROUND_UP() can overflow */
+ cpp = DIV_ROUND_UP(args->bpp, 8);
+ if (!cpp || cpp > 0xffffffffU / args->width)
+ return -EINVAL;
+ stride = cpp * args->width;
+ if (args->height > 0xffffffffU / stride)
+ return -EINVAL;
+
+ /* test for wrap-around */
+ size = args->height * stride;
+ if (PAGE_ALIGN(size) == 0)
+ return -EINVAL;
+
+ /*
+ * handle, pitch and size are output parameters. Zero them out to
+ * prevent drivers from accidentally using uninitialized data. Since
+ * not all existing userspace is clearing these fields properly we
+ * cannot reject IOCTL with garbage in them.
+ */
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+/**
+ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ec77bd3e1f08..336be31ff3de 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -957,13 +957,13 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240 */
+ /* 58 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240 */
+ /* 59 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
return ret == xfers ? 0 : -1;
}
+static void connector_bad_edid(struct drm_connector *connector,
+ u8 *edid, int num_blocks)
+{
+ int i;
+
+ if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ return;
+
+ dev_warn(connector->dev->dev,
+ "%s: EDID is invalid:\n",
+ connector->name);
+ for (i = 0; i < num_blocks; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
+ char prefix[20];
+
+ if (drm_edid_is_zero(block, EDID_LENGTH))
+ sprintf(prefix, "\t[%02x] ZERO ", i);
+ else if (!drm_edid_block_valid(block, i, false, NULL))
+ sprintf(prefix, "\t[%02x] BAD ", i);
+ else
+ sprintf(prefix, "\t[%02x] GOOD ", i);
+
+ print_hex_dump(KERN_WARNING,
+ prefix, DUMP_PREFIX_NONE, 16, 1,
+ block, EDID_LENGTH, false);
+ }
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1282,20 +1310,19 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data)
{
int i, j = 0, valid_extensions = 0;
- u8 *block, *new;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ u8 *edid, *new;
- if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
/* base block fetch */
for (i = 0; i < 4; i++) {
- if (get_edid_block(data, block, 0, EDID_LENGTH))
+ if (get_edid_block(data, edid, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, 0, print_bad_edid,
+ if (drm_edid_block_valid(edid, 0, false,
&connector->edid_corrupt))
break;
- if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
connector->null_edid_counter++;
goto carp;
}
@@ -1304,58 +1331,62 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
goto carp;
/* if there's no extensions, we're done */
- if (block[0x7e] == 0)
- return (struct edid *)block;
+ valid_extensions = edid[0x7e];
+ if (valid_extensions == 0)
+ return (struct edid *)edid;
- new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
+ edid = new;
+
+ for (j = 1; j <= edid[0x7e]; j++) {
+ u8 *block = edid + j * EDID_LENGTH;
- for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (get_edid_block(data,
- block + (valid_extensions + 1) * EDID_LENGTH,
- j, EDID_LENGTH))
+ if (get_edid_block(data, block, j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1)
- * EDID_LENGTH, j,
- print_bad_edid,
- NULL)) {
- valid_extensions++;
+ if (drm_edid_block_valid(block, j, false, NULL))
break;
- }
}
- if (i == 4 && print_bad_edid) {
- dev_warn(connector->dev->dev,
- "%s: Ignoring invalid EDID block %d.\n",
- connector->name, j);
-
- connector->bad_edid_counter++;
- }
+ if (i == 4)
+ valid_extensions--;
}
- if (valid_extensions != block[0x7e]) {
- block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
- block[0x7e] = valid_extensions;
- new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (valid_extensions != edid[0x7e]) {
+ u8 *base;
+
+ connector_bad_edid(connector, edid, edid[0x7e] + 1);
+
+ edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+ edid[0x7e] = valid_extensions;
+
+ new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
- }
- return (struct edid *)block;
+ base = new;
+ for (i = 0; i <= edid[0x7e]; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
-carp:
- if (print_bad_edid) {
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- connector->name, j);
+ if (!drm_edid_block_valid(block, i, false, NULL))
+ continue;
+
+ memcpy(base, block, EDID_LENGTH);
+ base += EDID_LENGTH;
+ }
+
+ kfree(edid);
+ edid = new;
}
- connector->bad_edid_counter++;
+ return (struct edid *)edid;
+
+carp:
+ connector_bad_edid(connector, edid, 1);
out:
- kfree(block);
+ kfree(edid);
return NULL;
}
EXPORT_SYMBOL_GPL(drm_do_get_edid);
@@ -2582,6 +2613,41 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
return clock;
}
+static bool
+cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
+{
+ /*
+ * For certain VICs the spec allows the vertical
+ * front porch to vary by one or two lines.
+ *
+ * cea_modes[] stores the variant with the shortest
+ * vertical front porch. We can adjust the mode to
+ * get the other variants by simply increasing the
+ * vertical front porch length.
+ */
+ BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 ||
+ edid_cea_modes[9].vtotal != 262 ||
+ edid_cea_modes[12].vtotal != 262 ||
+ edid_cea_modes[13].vtotal != 262 ||
+ edid_cea_modes[23].vtotal != 312 ||
+ edid_cea_modes[24].vtotal != 312 ||
+ edid_cea_modes[27].vtotal != 312 ||
+ edid_cea_modes[28].vtotal != 312);
+
+ if (((vic == 8 || vic == 9 ||
+ vic == 12 || vic == 13) && mode->vtotal < 263) ||
+ ((vic == 23 || vic == 24 ||
+ vic == 27 || vic == 28) && mode->vtotal < 314)) {
+ mode->vsync_start++;
+ mode->vsync_end++;
+ mode->vtotal++;
+
+ return true;
+ }
+
+ return false;
+}
+
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
@@ -2591,19 +2657,21 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
+ struct drm_display_mode cea_mode = edid_cea_modes[vic];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
- clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
+ clock1 = cea_mode.clock;
+ clock2 = cea_mode_alternate_clock(&cea_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
- if (drm_mode_equal_no_clocks(to_match, cea_mode))
- return vic;
+ do {
+ if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+ return vic;
+ } while (cea_mode_alternate_timings(vic, &cea_mode));
}
return 0;
@@ -2624,18 +2692,23 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
+ struct drm_display_mode cea_mode = edid_cea_modes[vic];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
- clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
+ clock1 = cea_mode.clock;
+ clock2 = cea_mode_alternate_clock(&cea_mode);
- if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
- KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
- drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
- return vic;
+ if (KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock1) &&
+ KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock2))
+ continue;
+
+ do {
+ if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+ return vic;
+ } while (cea_mode_alternate_timings(vic, &cea_mode));
}
+
return 0;
}
EXPORT_SYMBOL(drm_match_cea_mode);
@@ -3580,32 +3653,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
EXPORT_SYMBOL(drm_av_sync_delay);
/**
- * drm_select_eld - select one ELD from multiple HDMI/DP sinks
- * @encoder: the encoder just changed display mode
- *
- * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
- * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
- *
- * Return: The connector associated with the first HDMI/DP sink that has ELD
- * attached to it.
- */
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
-{
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- drm_for_each_connector(connector, dev)
- if (connector->encoder == encoder && connector->eld[0])
- return connector;
-
- return NULL;
-}
-EXPORT_SYMBOL(drm_select_eld);
-
-/**
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 5c067719164d..992879f15f23 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -110,11 +110,9 @@ int drm_encoder_init(struct drm_device *dev,
{
int ret;
- drm_modeset_lock_all(dev);
-
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
- goto out_unlock;
+ return ret;
encoder->dev = dev;
encoder->encoder_type = encoder_type;
@@ -142,9 +140,6 @@ out_put:
if (ret)
drm_mode_object_unregister(dev, &encoder->base);
-out_unlock:
- drm_modeset_unlock_all(dev);
-
return ret;
}
EXPORT_SYMBOL(drm_encoder_init);
@@ -164,12 +159,10 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
* the indices on the drm_encoder after us in the encoder_list.
*/
- drm_modeset_lock_all(dev);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
- drm_modeset_unlock_all(dev);
memset(encoder, 0, sizeof(*encoder));
}
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac1400c..81b3558302b5 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,13 +18,16 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -176,20 +179,20 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
+ const struct drm_format_info *info;
struct drm_fb_cma *fb_cma;
struct drm_gem_cma_object *objs[4];
struct drm_gem_object *obj;
- unsigned int hsub;
- unsigned int vsub;
int ret;
int i;
- hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info)
+ return ERR_PTR(-EINVAL);
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
@@ -200,7 +203,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
}
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + width * info->cpp[i]
+ mode_cmd->offsets[i];
if (obj->size < min_size) {
@@ -265,16 +268,51 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dma_buf *dma_buf;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+ if (dma_buf) {
+ fence = reservation_object_get_excl_rcu(dma_buf->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ const struct drm_format_info *info;
+ int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->pixel_format);
- for (i = 0; i < n; i++) {
+ info = drm_format_info(fb->pixel_format);
+
+ for (i = 0; i < info->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
drm_gem_cma_describe(fb_cma->obj[i], m);
@@ -311,14 +349,10 @@ static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = drm_fb_cma_mmap,
};
@@ -557,7 +591,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
- drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
+ if (fbdev_cma->fb_helper.fbdev)
+ drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6c75e62c0b22..e934b541feea 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -49,6 +49,7 @@ MODULE_PARM_DESC(fbdev_emulation,
"Enable legacy fbdev emulation [default=true]");
static LIST_HEAD(kernel_fb_helper_list);
+static DEFINE_MUTEX(kernel_fb_helper_lock);
/**
* DOC: fbdev helpers
@@ -97,6 +98,10 @@ static LIST_HEAD(kernel_fb_helper_list);
* mmap page writes.
*/
+#define drm_fb_helper_for_each_connector(fbh, i__) \
+ for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \
+ i__ = 0; i__ < (fbh)->connector_count; i__++)
+
/**
* drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
* emulation helper
@@ -130,7 +135,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
mutex_unlock(&dev->mode_config.mutex);
return 0;
fail:
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_fb_helper_connector *fb_helper_connector =
fb_helper->connector_info[i];
@@ -256,6 +261,9 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
continue;
funcs = mode_set->crtc->helper_private;
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
@@ -309,6 +317,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
}
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
@@ -372,9 +383,7 @@ fail:
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
@@ -399,11 +408,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
- if (dev->mode_config.rotation_property) {
+ if (plane->rotation_property)
drm_mode_plane_set_obj_prop(plane,
- dev->mode_config.rotation_property,
+ plane->rotation_property,
DRM_ROTATE_0);
- }
}
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -562,7 +570,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
continue;
/* Walk the connectors & encoders on this fb turning them on/off */
- for (j = 0; j < fb_helper->connector_count; j++) {
+ drm_fb_helper_for_each_connector(fb_helper, j) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
@@ -848,12 +856,14 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!drm_fbdev_emulation)
return;
+ mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
}
+ mutex_unlock(&kernel_fb_helper_lock);
drm_fb_helper_crtc_free(fb_helper);
@@ -1238,11 +1248,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
- /* Need to resize the fb object !!! */
- if (var->bits_per_pixel > fb->bits_per_pixel ||
- var->xres > fb->width || var->yres > fb->height ||
- var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
- DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ /*
+ * Changes struct fb_var_screeninfo are currently not pushed back
+ * to KMS, hence fail if different settings are requested.
+ */
+ if (var->bits_per_pixel != fb->bits_per_pixel ||
+ var->xres != fb->width || var->yres != fb->height ||
+ var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
@@ -1388,16 +1401,13 @@ retry:
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
-
fail:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
@@ -1466,7 +1476,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int ret = 0;
int crtc_count = 0;
int i;
- struct fb_info *info;
struct drm_fb_helper_surface_size sizes;
int gamma_size = 0;
@@ -1482,7 +1491,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
/* first up get a count of crtcs now in use and new min/maxes width/heights */
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
@@ -1569,8 +1578,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (ret < 0)
return ret;
- info = fb_helper->fbdev;
-
/*
* Set the fb pointer - usually drm_setup_crtcs does this for hotplug
* events, but at init time drm_setup_crtcs needs to be called before
@@ -1582,20 +1589,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (fb_helper->crtc_info[i].mode_set.num_connectors)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
-
- info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
-
- dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
-
- if (list_empty(&kernel_fb_helper_list)) {
- register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
-
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
-
return 0;
}
@@ -1727,7 +1720,7 @@ static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
int count = 0;
int i;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
count += connector->funcs->fill_modes(connector, maxX, maxY);
}
@@ -1827,7 +1820,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
struct drm_connector *connector;
int i = 0;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
@@ -1838,7 +1831,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
if (any_enabled)
return;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, false);
}
@@ -1859,7 +1852,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
return false;
count = 0;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (enabled[i])
count++;
}
@@ -1870,7 +1863,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (!enabled[i])
continue;
fb_helper_conn = fb_helper->connector_info[i];
@@ -1896,8 +1889,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
can_clone = true;
dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
- for (i = 0; i < fb_helper->connector_count; i++) {
-
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (!enabled[i])
continue;
@@ -1928,7 +1920,7 @@ static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
int i;
int hoffset = 0, voffset = 0;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
fb_helper_conn = fb_helper->connector_info[i];
if (!fb_helper_conn->connector->has_tile)
continue;
@@ -1956,19 +1948,20 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
- int i;
- uint64_t conn_configured = 0, mask;
+ const u64 mask = BIT_ULL(fb_helper->connector_count) - 1;
+ u64 conn_configured = 0;
int tile_pass = 0;
- mask = (1 << fb_helper->connector_count) - 1;
+ int i;
+
retry:
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
fb_helper_conn = fb_helper->connector_info[i];
- if (conn_configured & (1 << i))
+ if (conn_configured & BIT_ULL(i))
continue;
if (enabled[i] == false) {
- conn_configured |= (1 << i);
+ conn_configured |= BIT_ULL(i);
continue;
}
@@ -2009,7 +2002,7 @@ retry:
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
- conn_configured |= (1 << i);
+ conn_configured |= BIT_ULL(i);
}
if ((conn_configured & mask) != mask) {
@@ -2109,20 +2102,22 @@ out:
return best_score;
}
-static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
+ u32 width, u32 height)
{
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_fb_offset *offsets;
bool *enabled;
- int width, height;
int i;
DRM_DEBUG_KMS("\n");
+ if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0)
+ DRM_DEBUG_KMS("No connectors reported connected with modes\n");
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
+ /* prevent concurrent modification of connector_count by hotplug */
+ lockdep_assert_held(&fb_helper->dev->mode_config.mutex);
crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
@@ -2137,7 +2132,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
goto out;
}
-
drm_enable_connectors(fb_helper, enabled);
if (!(fb_helper->funcs->initial_config &&
@@ -2166,7 +2160,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
drm_fb_helper_modeset_release(fb_helper,
&fb_helper->crtc_info[i].mode_set);
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
@@ -2243,25 +2237,38 @@ out:
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
- int count = 0;
+ struct fb_info *info;
+ int ret;
if (!drm_fbdev_emulation)
return 0;
mutex_lock(&dev->mode_config.mutex);
- count = drm_fb_helper_probe_connector_modes(fb_helper,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
+ drm_setup_crtcs(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
mutex_unlock(&dev->mode_config.mutex);
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0)
- dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+ if (ret)
+ return ret;
+
+ info = fb_helper->fbdev;
+ info->var.pixclock = 0;
+ ret = register_framebuffer(info);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
+
+ mutex_lock(&kernel_fb_helper_lock);
+ if (list_empty(&kernel_fb_helper_list))
+ register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- drm_setup_crtcs(fb_helper);
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ mutex_unlock(&kernel_fb_helper_lock);
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ return 0;
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
@@ -2289,28 +2296,22 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
- u32 max_width, max_height;
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&fb_helper->dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.mutex);
if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.mutex);
return 0;
}
DRM_DEBUG_KMS("\n");
- max_width = fb_helper->fb->width;
- max_height = fb_helper->fb->height;
+ drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height);
- drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.mutex);
- drm_modeset_lock_all(dev);
- drm_setup_crtcs(fb_helper);
- drm_modeset_unlock_all(dev);
drm_fb_helper_set_par(fb_helper->fbdev);
return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e84faecf5225..5d96de40b63f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -51,10 +51,11 @@ DEFINE_MUTEX(drm_global_mutex);
* Drivers must define the file operations structure that forms the DRM
* userspace API entry point, even though most of those operations are
* implemented in the DRM core. The mandatory functions are drm_open(),
- * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
- * Drivers which implement private ioctls that require 32/64 bit compatibility
- * support must provided their onw .compat_ioctl() handler that processes
- * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
+ * (note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n). Drivers which
+ * implement private ioctls that require 32/64 bit compatibility support must
+ * provide their own .compat_ioctl() handler that processes private ioctls and
+ * calls drm_compat_ioctl() for core ioctls.
*
* In addition drm_read() and drm_poll() provide support for DRM events. DRM
* events are a generic and extensible means to send asynchronous events to
@@ -75,9 +76,7 @@ DEFINE_MUTEX(drm_global_mutex);
* .open = drm_open,
* .release = drm_release,
* .unlocked_ioctl = drm_ioctl,
- * #ifdef CONFIG_COMPAT
- * .compat_ioctl = drm_compat_ioctl,
- * #endif
+ * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
* .poll = drm_poll,
* .read = drm_read,
* .llseek = no_llseek,
@@ -663,6 +662,10 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (p->fence)
+ dma_fence_put(p->fence);
+
kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
@@ -692,8 +695,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- fence_signal(e->fence);
- fence_put(e->fence);
+ dma_fence_signal(e->fence);
+ dma_fence_put(e->fence);
}
if (!e->file_priv) {
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 29c56b4331e0..90d2cc8da8eb 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -79,17 +79,13 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
- * drm_get_format_name - return a string for drm fourcc format
+ * drm_get_format_name - fill a string with a drm fourcc format's name
* @format: format to compute name of
- *
- * Note that the buffer returned by this function is owned by the caller
- * and will need to be freed using kfree().
+ * @buf: caller-supplied buffer
*/
-char *drm_get_format_name(uint32_t format)
+const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf)
{
- char *buf = kmalloc(32, GFP_KERNEL);
-
- snprintf(buf, 32,
+ snprintf(buf->str, sizeof(buf->str),
"%c%c%c%c %s-endian (0x%08x)",
printable_char(format & 0xff),
printable_char((format >> 8) & 0xff),
@@ -98,87 +94,109 @@ char *drm_get_format_name(uint32_t format)
format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
format);
- return buf;
+ return buf->str;
}
EXPORT_SYMBOL(drm_get_format_name);
+/*
+ * Internal function to query information for a given format. See
+ * drm_format_info() for the public API.
+ */
+const struct drm_format_info *__drm_format_info(u32 format)
+{
+ static const struct drm_format_info formats[] = {
+ { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * drm_format_info - query information for a given format
* @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
*
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
+ * The caller should only pass a supported pixel format to this function.
+ * Unsupported pixel formats will generate a warning in the kernel log.
+ *
+ * Returns:
+ * The instance of struct drm_format_info that describes the pixel format, or
+ * NULL if the format is unsupported.
*/
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
- int *bpp)
+const struct drm_format_info *drm_format_info(u32 format)
{
- char *format_name;
+ const struct drm_format_info *info;
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- *depth = 8;
- *bpp = 8;
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- *depth = 15;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- *depth = 16;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- *depth = 24;
- *bpp = 24;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- *depth = 24;
- *bpp = 32;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- *depth = 30;
- *bpp = 32;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- *depth = 32;
- *bpp = 32;
- break;
- default:
- format_name = drm_get_format_name(format);
- DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
- kfree(format_name);
- *depth = 0;
- *bpp = 0;
- break;
- }
+ info = __drm_format_info(format);
+ WARN_ON(!info);
+ return info;
}
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+EXPORT_SYMBOL(drm_format_info);
/**
* drm_format_num_planes - get the number of planes for format
@@ -189,28 +207,10 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
*/
int drm_format_num_planes(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->num_planes : 1;
}
EXPORT_SYMBOL(drm_format_num_planes);
@@ -224,40 +224,13 @@ EXPORT_SYMBOL(drm_format_num_planes);
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
- unsigned int depth;
- int bpp;
+ const struct drm_format_info *info;
- if (plane >= drm_format_num_planes(format))
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return 2;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return plane ? 2 : 1;
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 1;
- default:
- drm_fb_get_bpp_depth(format, &depth, &bpp);
- return bpp >> 3;
- }
+ return info->cpp[plane];
}
EXPORT_SYMBOL(drm_format_plane_cpp);
@@ -271,28 +244,10 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->hsub : 1;
}
EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
@@ -306,18 +261,10 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->vsub : 1;
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
@@ -332,13 +279,16 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
*/
int drm_format_plane_width(int width, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return width;
- return width / drm_format_horz_chroma_subsampling(format);
+ return width / info->hsub;
}
EXPORT_SYMBOL(drm_format_plane_width);
@@ -353,12 +303,15 @@ EXPORT_SYMBOL(drm_format_plane_width);
*/
int drm_format_plane_height(int height, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return height;
- return height / drm_format_vert_chroma_subsampling(format);
+ return height / info->vsub;
}
EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 398efd67cb93..cbf0c893f426 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -126,111 +126,34 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
- uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
- char *format_name;
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_XBGR4444:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_BGRX4444:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ABGR4444:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_BGRA4444:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_AYUV:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 0;
- default:
- format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
- kfree(format_name);
- return -EINVAL;
- }
-}
-
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
- int ret, hsub, vsub, num_planes, i;
-
- ret = format_check(r);
- if (ret) {
- char *format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
- kfree(format_name);
- return ret;
+ const struct drm_format_info *info;
+ int i;
+
+ info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
+ if (!info) {
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("bad framebuffer format %s\n",
+ drm_get_format_name(r->pixel_format,
+ &format_name));
+ return -EINVAL;
}
- hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
- num_planes = drm_format_num_planes(r->pixel_format);
-
- if (r->width == 0 || r->width % hsub) {
+ if (r->width == 0 || r->width % info->hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
}
- if (r->height == 0 || r->height % vsub) {
+ if (r->height == 0 || r->height % info->vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
- for (i = 0; i < num_planes; i++) {
- unsigned int width = r->width / (i != 0 ? hsub : 1);
- unsigned int height = r->height / (i != 0 ? vsub : 1);
- unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? info->hsub : 1);
+ unsigned int height = r->height / (i != 0 ? info->vsub : 1);
+ unsigned int cpp = info->cpp[i];
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
@@ -254,6 +177,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return -EINVAL;
}
+ if (r->flags & DRM_MODE_FB_MODIFIERS &&
+ r->modifier[i] != r->modifier[0]) {
+ DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
+ return -EINVAL;
+ }
+
/* modifier specific checks: */
switch (r->modifier[i]) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
@@ -273,7 +203,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
}
}
- for (i = num_planes; i < 4; i++) {
+ for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
return -EINVAL;
@@ -751,6 +681,11 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
* those used for fbdev. Note that the caller must hold a reference of it's own,
* i.e. the object may not be destroyed through this call (since it'll lead to a
* locking inversion).
+ *
+ * NOTE: This function is deprecated. For driver-private framebuffers it is not
+ * recommended to embed a framebuffer struct info fbdev struct, instead, a
+ * framebuffer pointer is preferred and drm_framebuffer_unreference() should be
+ * called when the framebuffer is to be cleaned up.
*/
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e66af289a016..db80ec860e33 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -24,9 +24,6 @@
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
-/* drm_irq.c */
-extern unsigned int drm_timestamp_monotonic;
-
/* drm_fops.c */
extern struct mutex drm_global_mutex;
void drm_lastclose(struct drm_device *dev);
@@ -46,12 +43,21 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf);
+/* drm_drv.c */
+struct drm_minor *drm_minor_acquire(unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
/* drm_info.c */
int drm_name_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_irq.c */
+extern unsigned int drm_timestamp_monotonic;
+
+/* IOCTLS */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_modeset_ctl(struct drm_device *dev, void *data,
@@ -100,6 +106,9 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
int drm_debugfs_cleanup(struct drm_minor *minor);
int drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
+int drm_debugfs_crtc_add(struct drm_crtc *crtc);
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
#else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
@@ -119,4 +128,17 @@ static inline int drm_debugfs_connector_add(struct drm_connector *connector)
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
+
+static inline int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
+static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+}
+
+static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 71c3473476c7..fed22c2b98b6 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -229,6 +229,22 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_crtc *crtc;
req->value = 0;
+
+ /* Only some caps make sense with UMS/render-only drivers. */
+ switch (req->capability) {
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ return 0;
+ case DRM_CAP_PRIME:
+ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+ req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ return 0;
+ }
+
+ /* Other caps only work with KMS drivers */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENOTSUPP;
+
switch (req->capability) {
case DRM_CAP_DUMB_BUFFER:
if (dev->driver->dumb_create)
@@ -243,23 +259,14 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
- case DRM_CAP_PRIME:
- req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
- req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
- break;
- case DRM_CAP_TIMESTAMP_MONOTONIC:
- req->value = drm_timestamp_monotonic;
- break;
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
case DRM_CAP_PAGE_FLIP_TARGET:
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- req->value = 1;
- drm_for_each_crtc(crtc, dev) {
- if (!crtc->funcs->page_flip_target)
- req->value = 0;
- }
+ req->value = 1;
+ drm_for_each_crtc(crtc, dev) {
+ if (!crtc->funcs->page_flip_target)
+ req->value = 0;
}
break;
case DRM_CAP_CURSOR_WIDTH:
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b969a64a1514..273625a85036 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -93,7 +93,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
- * Only to be called from drm_vblank_on().
+ * Only to be called from drm_crtc_vblank_on().
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
@@ -234,6 +234,16 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
+static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return 0;
+
+ return vblank->count;
+}
+
/**
* drm_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
@@ -296,7 +306,7 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
* Always update the count and timestamp to maintain the
* appearance that the counter has been ticking all along until
* this time. This makes the count account for the entire time
- * between drm_vblank_on() and drm_vblank_off().
+ * between drm_crtc_vblank_on() and drm_crtc_vblank_off().
*/
drm_update_vblank_count(dev, pipe, 0);
@@ -888,31 +898,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
}
/**
- * drm_vblank_count - retrieve "cooked" vblank counter value
- * @dev: DRM device
- * @pipe: index of CRTC for which to retrieve the counter
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- *
- * This is the legacy version of drm_crtc_vblank_count().
- *
- * Returns:
- * The software vblank counter.
- */
-u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return 0;
-
- return vblank->count;
-}
-EXPORT_SYMBOL(drm_vblank_count);
-
-/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
@@ -920,8 +905,6 @@ EXPORT_SYMBOL(drm_vblank_count);
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
- * This is the native KMS version of drm_vblank_count().
- *
* Returns:
* The software vblank counter.
*/
@@ -952,8 +935,10 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
u32 vblank_count;
unsigned int seq;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs)) {
+ *vblanktime = (struct timeval) { 0 };
return 0;
+ }
do {
seq = read_seqbegin(&vblank->seqlock);
@@ -1270,21 +1255,20 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
- * drm_vblank_off - disable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on() can restore it again.
+ * stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
- *
- * This is the legacy version of drm_crtc_vblank_off().
*/
-void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
@@ -1300,7 +1284,8 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
- /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+ /* Avoid redundant vblank disables without previous
+ * drm_crtc_vblank_on(). */
if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
vblank_disable_and_save(dev, pipe);
@@ -1331,25 +1316,6 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_off);
-
-/**
- * drm_crtc_vblank_off - disable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * Drivers can use this function to shut down the vblank interrupt handling when
- * disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on can restore it again.
- *
- * Drivers must use this function when the hardware vblank counter can get
- * reset, e.g. when suspending.
- *
- * This is the native kms version of drm_vblank_off().
- */
-void drm_crtc_vblank_off(struct drm_crtc *crtc)
-{
- drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
@@ -1385,19 +1351,18 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
- * drm_vblank_on - enable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
+ * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
+ * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the legacy version of drm_crtc_vblank_on().
*/
-void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1424,49 +1389,10 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_on);
-
-/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
- * in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the native kms version of drm_vblank_on().
- */
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
-{
- drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_on);
-/**
- * drm_vblank_pre_modeset - account for vblanks across mode sets
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Account for vblank events across mode setting events, which will likely
- * reset the hardware frame counter.
- *
- * This is done by grabbing a temporary vblank reference to ensure that the
- * vblank interrupt keeps running across the modeset sequence. With this the
- * software-side vblank frame counting will ensure that there are no jumps or
- * discontinuities.
- *
- * Unfortunately this approach is racy and also doesn't work when the vblank
- * interrupt stops running, e.g. across system suspend resume. It is therefore
- * highly recommended that drivers use the newer drm_vblank_off() and
- * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
- * using "cooked" software vblank frame counters and not relying on any hardware
- * counters.
- *
- * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
- * again.
- */
-void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1490,17 +1416,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset |= 0x2;
}
}
-EXPORT_SYMBOL(drm_vblank_pre_modeset);
-/**
- * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * This function again drops the temporary vblank reference acquired in
- * drm_vblank_pre_modeset.
- */
-void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1523,7 +1441,6 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset = 0;
}
}
-EXPORT_SYMBOL(drm_vblank_post_modeset);
/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
@@ -1556,10 +1473,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, pipe);
+ drm_legacy_vblank_pre_modeset(dev, pipe);
break;
case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, pipe);
+ drm_legacy_vblank_post_modeset(dev, pipe);
break;
default:
return -EINVAL;
@@ -1594,11 +1511,10 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
/*
- * drm_vblank_off() might have been called after we called
- * drm_vblank_get(). drm_vblank_off() holds event_lock
- * around the vblank disable, so no need for further locking.
- * The reference from drm_vblank_get() protects against
- * vblank disable from another source.
+ * drm_crtc_vblank_off() might have been called after we called
+ * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
+ * vblank disable, so no need for further locking. The reference from
+ * drm_vblank_get() protects against vblank disable from another source.
*/
if (!vblank->enabled) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index c901f3c5b269..32d43f86a8f2 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -176,7 +176,8 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current),
- master->lock.hw_lock->lock, lock->flags);
+ master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
+ lock->flags);
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 11d44a1e0ab3..ca1e344f318d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -104,6 +104,68 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
u64 end,
enum drm_mm_search_flags flags);
+#ifdef CONFIG_DRM_DEBUG_MM
+#include <linux/stackdepot.h>
+
+#define STACKDEPTH 32
+#define BUFSZ 4096
+
+static noinline void save_stack(struct drm_mm_node *node)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH,
+ .skip = 1
+ };
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ /* May be called under spinlock, so avoid sleeping */
+ node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+}
+
+static void show_leaks(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+ unsigned long entries[STACKDEPTH];
+ char *buf;
+
+ buf = kmalloc(BUFSZ, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ list_for_each_entry(node, &mm->head_node.node_list, node_list) {
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH
+ };
+
+ if (!node->stack) {
+ DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
+ node->start, node->size);
+ continue;
+ }
+
+ depot_fetch_stack(node->stack, &trace);
+ snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
+ node->start, node->size, buf);
+ }
+
+ kfree(buf);
+}
+
+#undef STACKDEPTH
+#undef BUFSZ
+#else
+static void save_stack(struct drm_mm_node *node) { }
+static void show_leaks(struct drm_mm *mm) { }
+#endif
+
#define START(node) ((node)->start)
#define LAST(node) ((node)->start + (node)->size - 1)
@@ -112,19 +174,12 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
START, LAST, static inline, drm_mm_interval_tree)
struct drm_mm_node *
-drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
{
return drm_mm_interval_tree_iter_first(&mm->interval_tree,
start, last);
}
-EXPORT_SYMBOL(drm_mm_interval_first);
-
-struct drm_mm_node *
-drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
-{
- return drm_mm_interval_tree_iter_next(node, start, last);
-}
-EXPORT_SYMBOL(drm_mm_interval_next);
+EXPORT_SYMBOL(__drm_mm_interval_first);
static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
struct drm_mm_node *node)
@@ -228,6 +283,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -249,6 +306,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
+ u64 adj_start, adj_end;
if (WARN_ON(node->size == 0))
return -EINVAL;
@@ -270,9 +328,13 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
if (!hole->hole_follows)
return -ENOSPC;
- hole_start = __drm_mm_hole_node_start(hole);
- hole_end = __drm_mm_hole_node_end(hole);
- if (hole_start > node->start || hole_end < end)
+ adj_start = hole_start = __drm_mm_hole_node_start(hole);
+ adj_end = hole_end = __drm_mm_hole_node_end(hole);
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole, node->color, &adj_start, &adj_end);
+
+ if (adj_start > node->start || adj_end < end)
return -ENOSPC;
node->mm = mm;
@@ -293,6 +355,8 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->hole_follows = 1;
}
+ save_stack(node);
+
return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
@@ -397,6 +461,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -839,6 +905,7 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
+ mm->head_node.allocated = 0;
mm->head_node.hole_follows = 1;
mm->head_node.scanned_block = 0;
mm->head_node.scanned_prev_free = 0;
@@ -861,10 +928,12 @@ EXPORT_SYMBOL(drm_mm_init);
* Note that it is a bug to call this function on an allocator which is not
* clean.
*/
-void drm_mm_takedown(struct drm_mm * mm)
+void drm_mm_takedown(struct drm_mm *mm)
{
- WARN(!list_empty(&mm->head_node.node_list),
- "Memory manager not clean during takedown.\n");
+ if (WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n"))
+ show_leaks(mm);
+
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
new file mode 100644
index 000000000000..2735a5847ffa
--- /dev/null
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_mode_config.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+int drm_modeset_register_all(struct drm_device *dev)
+{
+ int ret;
+
+ ret = drm_plane_register_all(dev);
+ if (ret)
+ goto err_plane;
+
+ ret = drm_crtc_register_all(dev);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_encoder_register_all(dev);
+ if (ret)
+ goto err_encoder;
+
+ ret = drm_connector_register_all(dev);
+ if (ret)
+ goto err_connector;
+
+ return 0;
+
+err_connector:
+ drm_encoder_unregister_all(dev);
+err_encoder:
+ drm_crtc_unregister_all(dev);
+err_crtc:
+ drm_plane_unregister_all(dev);
+err_plane:
+ return ret;
+}
+
+void drm_modeset_unregister_all(struct drm_device *dev)
+{
+ drm_connector_unregister_all(dev);
+ drm_encoder_unregister_all(dev);
+ drm_crtc_unregister_all(dev);
+ drm_plane_unregister_all(dev);
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+
+ mutex_lock(&file_priv->fbs_lock);
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file_priv->fbs)
+ fb_count++;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /* mode_config.mutex protects the connector list against e.g. DP MST
+ * connector hot-adding. CRTC/Plane lists are invariant. */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_crtc(crtc, dev)
+ crtc_count++;
+
+ drm_for_each_connector(connector, dev)
+ connector_count++;
+
+ drm_for_each_encoder(encoder, dev)
+ encoder_count++;
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ drm_for_each_crtc(crtc, dev) {
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+ drm_for_each_encoder(encoder, dev) {
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+ drm_for_each_connector(connector, dev) {
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_config_reset - call ->reset callbacks
+ * @dev: drm device
+ *
+ * This functions calls all the crtc's, encoder's and connector's ->reset
+ * callback. Drivers can use this in e.g. their driver load or resume code to
+ * reset hardware and software state.
+ */
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ drm_for_each_plane(plane, dev)
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
+
+ drm_for_each_crtc(crtc, dev)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
+
+/*
+ * Global properties
+ */
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
+ { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
+ { DRM_PLANE_TYPE_PRIMARY, "Primary" },
+ { DRM_PLANE_TYPE_CURSOR, "Cursor" },
+};
+
+static int drm_mode_create_standard_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_connector_create_standard_properties(dev);
+ if (ret)
+ return ret;
+
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "type", drm_plane_type_enum_list,
+ ARRAY_SIZE(drm_plane_type_enum_list));
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.plane_type_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_X", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_x = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_Y", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_W", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_H", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_h = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_X", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_x = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_Y", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_W", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_H", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_h = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "FB_ID", DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_id = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "IN_FENCE_FD", -1, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_in_fence_fd = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "OUT_FENCE_PTR", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_out_fence_ptr = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_ID", DRM_MODE_OBJECT_CRTC);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_id = prop;
+
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+ "ACTIVE");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_active = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "MODE_ID", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_mode_id = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_size_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.ctm_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "GAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "GAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_size_property = prop;
+
+ return 0;
+}
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex);
+ drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
+ mutex_init(&dev->mode_config.blob_lock);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+ idr_init(&dev->mode_config.tile_idr);
+ ida_init(&dev->mode_config.connector_ida);
+
+ drm_modeset_lock_all(dev);
+ drm_mode_create_standard_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+ dev->mode_config.num_overlay_plane = 0;
+ dev->mode_config.num_total_plane = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+ head_global) {
+ drm_property_unreference_blob(blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_free(&fb->base.refcount);
+ }
+
+ ida_destroy(&dev->mode_config.connector_ida);
+ idr_destroy(&dev->mode_config.tile_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
+ drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 53f07ac7c174..ac6a35212501 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -49,13 +49,7 @@
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
- DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
- "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+ DRM_DEBUG_KMS("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
@@ -165,6 +159,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
unsigned int vfieldrate, hperiod;
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
int interlace;
+ u64 tmp;
/* allocate the drm_display_mode structure. If failure, we will
* return directly
@@ -322,8 +317,11 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
- drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
- drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ tmp = drm_mode->htotal; /* perform intermediate calcs in u64 */
+ tmp *= HV_FACTOR * 1000;
+ do_div(tmp, hperiod);
+ tmp -= drm_mode->clock % CVT_CLOCK_STEP;
+ drm_mode->clock = tmp;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced) {
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 1d45738f8f98..cc232ac6c950 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -38,7 +38,7 @@
* Some userspace presumes that the first connected connector is the main
* display, where it's supposed to display e.g. the login screen. For
* laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
+ * (eDP/LVDS/DSI) panels to the front of the connector list, instead of
* painstakingly trying to initialize them in the right order.
*/
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
@@ -51,7 +51,8 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DSI)
list_move_tail(&connector->head, &panel_list);
}
@@ -70,17 +71,31 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info;
int i;
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("non-RGB pixel format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
+
+ fb->depth = 0;
+ fb->bits_per_pixel = 0;
+ } else {
+ fb->depth = info->depth;
+ fb->bits_per_pixel = info->cpp[0] * 8;
+ }
+
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
- fb->modifier[i] = mode_cmd->modifier[i];
}
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
- &fb->bits_per_pixel);
+ fb->modifier = mode_cmd->modifier[0];
fb->pixel_format = mode_cmd->pixel_format;
fb->flags = mode_cmd->flags;
}
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 61146f5b4f56..3551ae31f143 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -52,14 +52,16 @@
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*
- * On top of of these per-object locks using &ww_mutex there's also an overall
- * dev->mode_config.lock, for protecting everything else. Mostly this means
- * probe state of connectors, and preventing hotplug add/removal of connectors.
+ * On top of of these per-object locks using &ww_mutex there's also an overall
+ * dev->mode_config.lock, for protecting everything else. Mostly this means
+ * probe state of connectors, and preventing hotplug add/removal of connectors.
*
- * Finally there's a bunch of dedicated locks to protect drm core internal
- * lists and lookup data structures.
+ * Finally there's a bunch of dedicated locks to protect drm core internal
+ * lists and lookup data structures.
*/
+static DEFINE_WW_CLASS(crtc_ww_class);
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: DRM device
@@ -398,6 +400,17 @@ int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
/**
+ * drm_modeset_lock_init - initialize lock
+ * @lock: lock to init
+ */
+void drm_modeset_lock_init(struct drm_modeset_lock *lock)
+{
+ ww_mutex_init(&lock->mutex, &crtc_ww_class);
+ INIT_LIST_HEAD(&lock->head);
+}
+EXPORT_SYMBOL(drm_modeset_lock_init);
+
+/**
* drm_modeset_lock - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index bc98bb94264d..47848ed8ca48 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -6,6 +6,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+static void drm_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
/**
* drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
EXPORT_SYMBOL(drm_of_find_possible_crtcs);
/**
+ * drm_of_component_match_add - Add a component helper OF node match rule
+ * @master: master device
+ * @matchptr: component match pointer
+ * @compare: compare function used for matching component
+ * @node: of_node
+ */
+void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+ of_node_get(node);
+ component_match_add_release(master, matchptr, drm_release_of,
+ compare, node);
+}
+EXPORT_SYMBOL_GPL(drm_of_component_match_add);
+
+/**
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, port);
+ drm_of_component_match_add(dev, &match, compare_of, port);
of_node_put(port);
}
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of,
+ remote);
of_node_put(remote);
}
of_node_put(port);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 249c0ae52c6d..62b98f386fd1 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -79,7 +79,7 @@ static unsigned int drm_num_planes(struct drm_device *dev)
* Zero on success, error code on failure.
*/
int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs,
+ uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
enum drm_plane_type type,
@@ -137,6 +137,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
@@ -195,7 +196,7 @@ void drm_plane_unregister_all(struct drm_device *dev)
* Zero on success, error code on failure.
*/
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs,
+ uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
bool is_primary)
@@ -220,7 +221,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_fini(&plane->mutex);
+
kfree(plane->format_types);
drm_mode_object_unregister(dev, &plane->base);
@@ -235,7 +237,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
dev->mode_config.num_total_plane--;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane--;
- drm_modeset_unlock_all(dev);
WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
if (plane->state && plane->funcs->atomic_destroy_state)
@@ -479,9 +480,10 @@ static int __setplane_internal(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7899fc1dcdb0..7a7dddf604d7 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -130,15 +130,8 @@ int drm_plane_helper_check_state(struct drm_plane_state *state,
unsigned int rotation = state->rotation;
int hscale, vscale;
- src->x1 = state->src_x;
- src->y1 = state->src_y;
- src->x2 = state->src_x + state->src_w;
- src->y2 = state->src_y + state->src_h;
-
- dst->x1 = state->crtc_x;
- dst->y1 = state->crtc_y;
- dst->x2 = state->crtc_x + state->crtc_w;
- dst->y2 = state->crtc_y + state->crtc_h;
+ *src = drm_plane_state_src(state);
+ *dst = drm_plane_state_dest(state);
if (!fb) {
state->visible = false;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index b22a94dd7b53..8d77b2462594 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -290,7 +290,8 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
*
* This wraps dma_buf_export() for use by generic GEM drivers that are using
* drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
- * a reference to the drm_device which is released by drm_gem_dmabuf_release().
+ * a reference to the &drm_device and the exported &drm_gem_object (stored in
+ * exp_info->priv) which is released by drm_gem_dmabuf_release().
*
* Returns the new dmabuf.
*/
@@ -300,8 +301,11 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf *dma_buf;
dma_buf = dma_buf_export(exp_info);
- if (!IS_ERR(dma_buf))
- drm_dev_ref(dev);
+ if (IS_ERR(dma_buf))
+ return dma_buf;
+
+ drm_dev_ref(dev);
+ drm_gem_object_reference(exp_info->priv);
return dma_buf;
}
@@ -472,8 +476,6 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
*/
obj->dma_buf = dmabuf;
get_dma_buf(obj->dma_buf);
- /* Grab a new ref since the callers is now used by the dma-buf */
- drm_gem_object_reference(obj);
return dmabuf;
}
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
new file mode 100644
index 000000000000..ad3caaa1f48b
--- /dev/null
+++ b/drivers/gpu/drm/drm_print.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ */
+
+#include <stdarg.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include <drm/drm_print.h>
+
+void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
+{
+ seq_printf(p->arg, "%pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_seq_file);
+
+void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ dev_printk(KERN_INFO, p->arg, "[" DRM_NAME "] %pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_info);
+
+/**
+ * drm_printf - print to a &drm_printer stream
+ * @p: the &drm_printer
+ * @f: format string
+ */
+void drm_printf(struct drm_printer *p, const char *f, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, f);
+ vaf.fmt = f;
+ vaf.va = &args;
+ p->printfn(p, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_printf);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index f6b64d7d3528..ac953f037be7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -152,6 +152,14 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+static enum drm_connector_status
+drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector->funcs->detect ?
+ connector->funcs->detect(connector, force) :
+ connector_status_connected;
+}
+
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
@@ -239,7 +247,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
- connector->status = connector->funcs->detect(connector, true);
+ connector->status = drm_connector_detect(connector, true);
}
/*
@@ -384,7 +392,11 @@ static void output_poll_execute(struct work_struct *work)
if (!drm_kms_helper_poll)
goto out;
- mutex_lock(&dev->mode_config.mutex);
+ if (!mutex_trylock(&dev->mode_config.mutex)) {
+ repoll = true;
+ goto out;
+ }
+
drm_for_each_connector(connector, dev) {
/* Ignore forced connectors. */
@@ -405,7 +417,7 @@ static void output_poll_execute(struct work_struct *work)
repoll = true;
- connector->status = connector->funcs->detect(connector, false);
+ connector->status = drm_connector_detect(connector, false);
if (old_status != connector->status) {
const char *old, *new;
@@ -565,7 +577,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
old_status = connector->status;
- connector->status = connector->funcs->detect(connector, false);
+ connector->status = drm_connector_detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index a4d81cf4ffa0..24be69d29964 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -65,9 +65,9 @@ static bool drm_property_type_valid(struct drm_property *property)
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
@@ -125,9 +125,9 @@ EXPORT_SYMBOL(drm_property_create);
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set one of the predefined values for enumeration
* properties.
@@ -173,9 +173,9 @@ EXPORT_SYMBOL(drm_property_create_enum);
* @supported_bits: bitmask of all supported enumeration values
*
* This creates a new bitmask drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Compared to plain enumeration properties userspace is allowed to set any
* or'ed together combination of the predefined property bitflag values
@@ -245,9 +245,9 @@ static struct drm_property *property_create_range(struct drm_device *dev,
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any unsigned integer value in the (min, max)
* range inclusive.
@@ -273,9 +273,9 @@ EXPORT_SYMBOL(drm_property_create_range);
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any signed integer value in the (min, max)
* range inclusive.
@@ -300,9 +300,9 @@ EXPORT_SYMBOL(drm_property_create_signed_range);
* @type: object type from DRM_MODE_OBJECT_* defines
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set this to any property value of the given
* @type. Only useful for atomic properties, which is enforced.
@@ -338,9 +338,9 @@ EXPORT_SYMBOL(drm_property_create_object);
* @name: name of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* This is implemented as a ranged property with only {0, 1} as valid values.
*
@@ -729,7 +729,6 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
- void __user *blob_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -739,8 +738,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
return -ENOENT;
if (out_resp->length == blob->length) {
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)) {
+ if (copy_to_user(u64_to_user_ptr(out_resp->data),
+ blob->data,
+ blob->length)) {
ret = -EFAULT;
goto unref;
}
@@ -757,7 +757,6 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
{
struct drm_mode_create_blob *out_resp = data;
struct drm_property_blob *blob;
- void __user *blob_ptr;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -767,8 +766,9 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
if (IS_ERR(blob))
return PTR_ERR(blob);
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
+ if (copy_from_user(blob->data,
+ u64_to_user_ptr(out_resp->data),
+ out_resp->length)) {
ret = -EFAULT;
goto out_blob;
}
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 73e53a8d1b37..e6057d8cdcd5 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -281,17 +281,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
*/
void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
{
- int w = drm_rect_width(r);
- int h = drm_rect_height(r);
-
if (fixed_point)
- DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix,
- w >> 16, ((w & 0xffff) * 15625) >> 10,
- h >> 16, ((h & 0xffff) * 15625) >> 10,
- r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
- r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FP_FMT "\n", prefix, DRM_RECT_FP_ARG(r));
else
- DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FMT "\n", prefix, DRM_RECT_ARG(r));
}
EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index caa4e4ca616d..bd311c77c254 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -124,8 +124,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
- resource_size_t offset = (unsigned long)vmf->virtual_address -
- vma->vm_start;
+ resource_size_t offset = vmf->address - vma->vm_start;
resource_size_t baddr = map->offset + offset;
struct drm_agp_mem *agpmem;
struct page *page;
@@ -195,7 +194,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!map)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+ offset = vmf->address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
@@ -301,7 +300,8 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!dma->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
+ offset = vmf->address - vma->vm_start;
+ /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((void *)dma->pagelist[page_nr]);
@@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!entry->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+ offset = vmf->address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
index 8c44ba9a694e..65f1ba1099bd 100644
--- a/drivers/gpu/drm/etnaviv/cmdstream.xml.h
+++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
@@ -8,10 +8,34 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56)
-- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
-
-Copyright (C) 2014
+- cmdstream.xml ( 14094 bytes, from 2016-11-11 06:55:14)
+- copyright.xml ( 1597 bytes, from 2016-10-29 07:29:22)
+- common.xml ( 23344 bytes, from 2016-11-10 15:14:07)
+
+Copyright (C) 2012-2016 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
*/
@@ -26,6 +50,7 @@ Copyright (C) 2014
#define FE_OPCODE_STALL 0x00000009
#define FE_OPCODE_CALL 0x0000000a
#define FE_OPCODE_RETURN 0x0000000b
+#define FE_OPCODE_DRAW_INSTANCED 0x0000000c
#define FE_OPCODE_CHIP_SELECT 0x0000000d
#define PRIMITIVE_TYPE_POINTS 0x00000001
#define PRIMITIVE_TYPE_LINES 0x00000002
@@ -214,5 +239,32 @@ Copyright (C) 2014
#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002
#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001
+#define VIV_FE_DRAW_INSTANCED 0x00000000
+
+#define VIV_FE_DRAW_INSTANCED_HEADER 0x00000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP_DRAW_INSTANCED 0x60000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_INDEXED 0x00100000
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE__MASK 0x000f0000
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE__SHIFT 16
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE(x) (((x) << VIV_FE_DRAW_INSTANCED_HEADER_TYPE__SHIFT) & VIV_FE_DRAW_INSTANCED_HEADER_TYPE__MASK)
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__MASK 0x0000ffff
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO(x) (((x) << VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__SHIFT) & VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__MASK)
+
+#define VIV_FE_DRAW_INSTANCED_COUNT 0x00000004
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__MASK 0xff000000
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__SHIFT 24
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI(x) (((x) << VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__SHIFT) & VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__MASK)
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__MASK 0x00ffffff
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT(x) (((x) << VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__SHIFT) & VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__MASK)
+
+#define VIV_FE_DRAW_INSTANCED_START 0x00000008
+#define VIV_FE_DRAW_INSTANCED_START_INDEX__MASK 0xffffffff
+#define VIV_FE_DRAW_INSTANCED_START_INDEX__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_START_INDEX(x) (((x) << VIV_FE_DRAW_INSTANCED_START_INDEX__SHIFT) & VIV_FE_DRAW_INSTANCED_START_INDEX__MASK)
+
#endif /* CMDSTREAM_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index dcfd565c88d1..2a2e5e366ab7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -143,6 +143,7 @@ static bool etnaviv_validate_load_state(struct etna_validation_state *state,
static uint8_t cmd_length[32] = {
[FE_OPCODE_DRAW_PRIMITIVES] = 4,
[FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
+ [FE_OPCODE_DRAW_INSTANCED] = 4,
[FE_OPCODE_NOP] = 2,
[FE_OPCODE_STALL] = 2,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index aa687669e22b..00368b14d08d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -16,6 +16,7 @@
#include <linux/component.h>
#include <linux/of_platform.h>
+#include <drm/drm_of.h>
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -478,9 +479,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -505,6 +504,7 @@ static struct drm_driver etnaviv_drm_driver = {
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
.gem_prime_vmap = etnaviv_gem_prime_vmap,
.gem_prime_vunmap = etnaviv_gem_prime_vunmap,
+ .gem_prime_mmap = etnaviv_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
.debugfs_cleanup = etnaviv_debugfs_cleanup,
@@ -629,8 +629,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!core_node)
break;
- component_match_add(&pdev->dev, &match, compare_of,
- core_node);
+ drm_of_component_match_add(&pdev->dev, &match,
+ compare_of, core_node);
of_node_put(core_node);
}
} else if (dev->platform_data) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 65e057639653..c255eda40526 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -78,6 +78,8 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 2bef501d4a17..af65491a78e2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -160,7 +160,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
- iter.start = vmalloc(file_size);
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_HIGHMEM |
+ __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL);
if (!iter.start) {
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 0370b842d9cc..114dddbd297b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -202,15 +202,14 @@ int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = pages[pgoff];
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ ret = vm_insert_page(vma, vmf->address, page);
out:
switch (ret) {
@@ -409,20 +408,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
bool write = !!(op & ETNA_PREP_WRITE);
- int ret;
-
- if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
- write))
- return -EBUSY;
- } else {
- unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+ unsigned long remain =
+ op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
+ long lret;
- ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
- write, true, remain);
- if (ret <= 0)
- return ret == 0 ? -ETIMEDOUT : ret;
- }
+ lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ write, true, remain);
+ if (lret < 0)
+ return lret;
+ else if (lret == 0)
+ return remain == 0 ? -EBUSY : -ETIMEDOUT;
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
if (!etnaviv_obj->sgt) {
@@ -470,10 +465,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
}
#ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct fence *fence,
+static void etnaviv_gem_describe_fence(struct dma_fence *fence,
const char *type, struct seq_file *m)
{
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
seq_printf(m, "\t%9s: %s %s seq %u\n",
type,
fence->ops->get_driver_name(fence),
@@ -486,7 +481,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
@@ -763,7 +758,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- flags, pvec + pinned, NULL);
+ flags, pvec + pinned, NULL, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index b93618c1aa69..62b47972a52e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -23,10 +23,12 @@
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ int npages = obj->size >> PAGE_SHIFT;
- BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */
+ if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
+ return NULL;
- return etnaviv_obj->sgt;
+ return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
}
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
@@ -39,6 +41,19 @@ void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* TODO msm_gem_vunmap() */
}
+int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ int ret;
+
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ if (ret < 0)
+ return ret;
+
+ return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
+}
+
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f885fed..0a67124bb2a4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -15,7 +15,7 @@
*/
#include <linux/component.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
#include "etnaviv_dump.h"
@@ -639,6 +639,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
gpu->memory_base = PHYS_OFFSET;
else
gpu->memory_base = dma_mask - SZ_2G + 1;
+ } else if (PHYS_OFFSET >= SZ_2G) {
+ dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
+ gpu->memory_base = PHYS_OFFSET;
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
ret = etnaviv_hw_reset(gpu);
@@ -882,7 +886,7 @@ static void recover_worker(struct work_struct *work)
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
if (!gpu->event[i].used)
continue;
- fence_signal(gpu->event[i].fence);
+ dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
gpu->event[i].used = false;
complete(&gpu->event_free);
@@ -952,55 +956,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
{
return container_of(fence, struct etnaviv_fence, base);
}
-static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
{
return "etnaviv";
}
-static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct fence *fence)
+static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool etnaviv_fence_signaled(struct fence *fence)
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return fence_completed(f->gpu, f->base.seqno);
}
-static void etnaviv_fence_release(struct fence *fence)
+static void etnaviv_fence_release(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops etnaviv_fence_ops = {
+static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
.enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
-static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
@@ -1010,8 +1014,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
f->gpu = gpu;
- fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
- gpu->fence_context, ++gpu->next_fence);
+ dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+ gpu->fence_context, ++gpu->next_fence);
return &f->base;
}
@@ -1021,7 +1025,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -1039,7 +1043,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
/* Wait on any existing exclusive fence which isn't our own */
fence = reservation_object_get_excl(robj);
if (fence && fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1052,7 +1056,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(robj));
if (fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1158,11 +1162,11 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&gpu->lock);
list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!fence_is_signaled(cmdbuf->fence))
+ if (!dma_fence_is_signaled(cmdbuf->fence))
break;
list_del(&cmdbuf->node);
- fence_put(cmdbuf->fence);
+ dma_fence_put(cmdbuf->fence);
for (i = 0; i < cmdbuf->nr_bos; i++) {
struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1275,7 +1279,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned int event, i;
int ret;
@@ -1391,7 +1395,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
while ((event = ffs(intr)) != 0) {
- struct fence *fence;
+ struct dma_fence *fence;
event -= 1;
@@ -1401,7 +1405,7 @@ static irqreturn_t irq_handler(int irq, void *data)
fence = gpu->event[event].fence;
gpu->event[event].fence = NULL;
- fence_signal(fence);
+ dma_fence_signal(fence);
/*
* Events can be processed out of order. Eg,
@@ -1553,7 +1557,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
return ret;
gpu->drm = drm;
- gpu->fence_context = fence_context_alloc(1);
+ gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_cmd_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278dc3706..8c6b824e9d0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
struct etnaviv_event {
bool used;
- struct fence *fence;
+ struct dma_fence *fence;
};
struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
/* vram node used if the cmdbuf is mapped through the MMUv2 */
struct drm_mm_node vram_node;
/* fence after which this buffer is to be disposed */
- struct fence *fence;
+ struct dma_fence *fence;
/* target exec state */
u32 exec_state;
/* per GPU in-flight list */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 465d344f3391..d706ca4e2f02 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -114,7 +114,7 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "GScaler"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !VIDEO_SAMSUNG_EXYNOS_GSC
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index f86e7c846678..739180ac3da5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -69,7 +69,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
spin_lock(&priv->lock);
priv->pending &= ~commit->crtcs;
@@ -254,6 +254,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -365,9 +366,7 @@ static const struct file_operations exynos_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4cfb39d543b4..9f35deb56170 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -63,15 +63,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 147ef0d298cb..95871577015d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1433,7 +1433,7 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
&img_pos[EXYNOS_DRM_OPS_SRC],
&img_pos[EXYNOS_DRM_OPS_DST]);
if (ret) {
- dev_err(dev, "failed to set precalser.\n");
+ dev_err(dev, "failed to set prescaler.\n");
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f2ae72ba7d5a..57b81460fec8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -231,12 +231,12 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
int ret;
if (flags & ~(EXYNOS_BO_MASK)) {
- DRM_ERROR("invalid flags.\n");
+ DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
return ERR_PTR(-EINVAL);
}
if (!size) {
- DRM_ERROR("invalid size.\n");
+ DRM_ERROR("invalid GEM buffer size: %lu\n", size);
return ERR_PTR(-EINVAL);
}
@@ -455,8 +455,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff_t page_offset;
int ret;
- page_offset = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
@@ -465,8 +464,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
pfn = page_to_pfn(exynos_gem->pages[page_offset]);
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out:
switch (ret) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 52a9d269484e..bef57987759d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1610,7 +1610,7 @@ static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
&img_pos[EXYNOS_DRM_OPS_SRC],
&img_pos[EXYNOS_DRM_OPS_DST]);
if (ret) {
- dev_err(dev, "failed to set precalser.\n");
+ dev_err(dev, "failed to set prescaler.\n");
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 38eaa63afb31..5ed8b1effe71 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -47,19 +47,6 @@
#define HOTPLUG_DEBOUNCE_MS 1100
-/* AVI header and aspect ratio */
-#define HDMI_AVI_VERSION 0x02
-#define HDMI_AVI_LENGTH 0x0d
-
-/* AUI header info */
-#define HDMI_AUI_VERSION 0x01
-#define HDMI_AUI_LENGTH 0x0a
-
-/* AVI active format aspect ratio */
-#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x08
-#define AVI_4_3_CENTER_RATIO 0x09
-#define AVI_16_9_CENTER_RATIO 0x0a
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -131,7 +118,6 @@ struct hdmi_context {
bool dvi_mode;
struct delayed_work hotplug_work;
struct drm_display_mode current_mode;
- u8 cea_video_id;
const struct hdmi_driver_data *drv_data;
void __iomem *regs;
@@ -681,6 +667,13 @@ static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
}
}
+static inline void hdmi_reg_write_buf(struct hdmi_context *hdata, u32 reg_id,
+ u8 *buf, int size)
+{
+ for (reg_id = hdmi_map_reg(hdata, reg_id); size; --size, reg_id += 4)
+ writel(*buf++, hdata->regs + reg_id);
+}
+
static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
u32 reg_id, u32 value, u32 mask)
{
@@ -762,93 +755,50 @@ static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
return ret;
}
-static u8 hdmi_chksum(struct hdmi_context *hdata,
- u32 start, u8 len, u32 hdr_sum)
-{
- int i;
-
- /* hdr_sum : header0 + header1 + header2
- * start : start address of packet byte1
- * len : packet bytes - 1 */
- for (i = 0; i < len; ++i)
- hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
-
- /* return 2's complement of 8 bit hdr_sum */
- return (u8)(~(hdr_sum & 0xff) + 1);
-}
-
-static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- union hdmi_infoframe *infoframe)
+static void hdmi_reg_infoframes(struct hdmi_context *hdata)
{
- u32 hdr_sum;
- u8 chksum;
- u8 ar;
+ union hdmi_infoframe frm;
+ u8 buf[25];
+ int ret;
if (hdata->dvi_mode) {
- hdmi_reg_writeb(hdata, HDMI_VSI_CON,
- HDMI_VSI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AVI_CON,
HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
return;
}
- switch (infoframe->any.type) {
- case HDMI_INFOFRAME_TYPE_AVI:
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+ &hdata->current_mode);
+ if (!ret)
+ ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
+ if (ret > 0) {
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
- infoframe->any.version);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
- hdr_sum = infoframe->any.type + infoframe->any.version +
- infoframe->any.length;
-
- /* Output format zero hardcoded ,RGB YBCR selection */
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
- AVI_ACTIVE_FORMAT_VALID |
- AVI_UNDERSCANNED_DISPLAY_VALID);
-
- /*
- * Set the aspect ratio as per the mode, mentioned in
- * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
- */
- ar = hdata->current_mode.picture_aspect_ratio;
- switch (ar) {
- case HDMI_PICTURE_ASPECT_4_3:
- ar |= AVI_4_3_CENTER_RATIO;
- break;
- case HDMI_PICTURE_ASPECT_16_9:
- ar |= AVI_16_9_CENTER_RATIO;
- break;
- case HDMI_PICTURE_ASPECT_NONE:
- default:
- ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
- break;
- }
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
+ hdmi_reg_write_buf(hdata, HDMI_AVI_HEADER0, buf, ret);
+ } else {
+ DRM_INFO("%s: invalid AVI infoframe (%d)\n", __func__, ret);
+ }
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi,
+ &hdata->current_mode);
+ if (!ret)
+ ret = hdmi_vendor_infoframe_pack(&frm.vendor.hdmi, buf,
+ sizeof(buf));
+ if (ret > 0) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
+ hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret);
+ }
- chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->any.length, hdr_sum);
- DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
- hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
- break;
- case HDMI_INFOFRAME_TYPE_AUDIO:
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
- infoframe->any.version);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
- hdr_sum = infoframe->any.type + infoframe->any.version +
- infoframe->any.length;
- chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->any.length, hdr_sum);
- DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
- hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
- break;
- default:
- break;
+ ret = hdmi_audio_infoframe_init(&frm.audio);
+ if (!ret) {
+ frm.audio.channels = 2;
+ ret = hdmi_audio_infoframe_pack(&frm.audio, buf, sizeof(buf));
+ }
+ if (ret > 0) {
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC);
+ hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, ret);
}
}
@@ -1127,8 +1077,6 @@ static void hdmi_start(struct hdmi_context *hdata, bool start)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- union hdmi_infoframe infoframe;
-
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1164,15 +1112,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
- infoframe.any.version = HDMI_AVI_VERSION;
- infoframe.any.length = HDMI_AVI_LENGTH;
- hdmi_reg_infoframe(hdata, &infoframe);
-
- infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
- infoframe.any.version = HDMI_AUI_VERSION;
- infoframe.any.length = HDMI_AUI_LENGTH;
- hdmi_reg_infoframe(hdata, &infoframe);
+ hdmi_reg_infoframes(hdata);
/* enable AVI packet every vsync, fixes purple line problem */
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
@@ -1458,7 +1398,6 @@ static void hdmi_mode_set(struct drm_encoder *encoder,
"INTERLACED" : "PROGRESSIVE");
drm_mode_copy(&hdata->current_mode, m);
- hdata->cea_video_id = drm_match_cea_mode(mode);
}
static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 169667a22bdc..a0507dc18d9e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -361,9 +361,11 @@
/* AUI bit definition */
#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+#define HDMI_AUI_CON_EVERY_VSYNC (1 << 1)
/* VSI bit definition */
#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+#define HDMI_VSI_CON_EVERY_VSYNC (1 << 1)
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
index b35a292287f3..aca34f656bea 100644
--- a/drivers/gpu/drm/fsl-dcu/Makefile
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -3,6 +3,5 @@ fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
fsl_dcu_drm_rgb.o \
fsl_dcu_drm_plane.o \
fsl_dcu_drm_crtc.o \
- fsl_dcu_drm_fbdev.o \
fsl_tcon.o
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index cc2fde2ae5ef..537ca159ffe5 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -32,6 +32,9 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
+static int legacyfb_depth = 24;
+module_param(legacyfb_depth, int, 0444);
+
static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg)
{
if (reg == DCU_INT_STATUS || reg == DCU_UPDATE_MODE)
@@ -85,7 +88,18 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
goto done;
dev->irq_enabled = true;
- fsl_dcu_fbdev_init(dev);
+ if (legacyfb_depth != 16 && legacyfb_depth != 24 &&
+ legacyfb_depth != 32) {
+ dev_warn(dev->dev,
+ "Invalid legacyfb_depth. Defaulting to 24bpp\n");
+ legacyfb_depth = 24;
+ }
+ fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1, 1);
+ if (IS_ERR(fsl_dev->fbdev)) {
+ ret = PTR_ERR(fsl_dev->fbdev);
+ fsl_dev->fbdev = NULL;
+ goto done;
+ }
return 0;
done:
@@ -106,6 +120,7 @@ static int fsl_dcu_unload(struct drm_device *dev)
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ drm_crtc_force_disable_all(dev);
drm_kms_helper_poll_fini(dev);
if (fsl_dev->fbdev)
@@ -176,9 +191,7 @@ static const struct file_operations fsl_dcu_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -334,11 +347,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
fsl_dev->soc = id->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "could not get memory IO resource\n");
- return -ENODEV;
- }
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
@@ -348,7 +356,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
fsl_dev->irq = platform_get_irq(pdev, 0);
if (fsl_dev->irq < 0) {
dev_err(dev, "failed to get irq\n");
- return -ENXIO;
+ return fsl_dev->irq;
}
fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
@@ -426,9 +434,9 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
{
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+ drm_put_dev(fsl_dev->drm);
clk_disable_unprepare(fsl_dev->clk);
clk_unregister(fsl_dev->pix_clk);
- drm_put_dev(fsl_dev->drm);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 3b371fe7491e..e9e9aeecf2eb 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -197,7 +197,6 @@ struct fsl_dcu_drm_device {
struct drm_atomic_state *state;
};
-void fsl_dcu_fbdev_init(struct drm_device *dev);
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
#endif /* __FSL_DCU_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
deleted file mode 100644
index 8b8b819ea704..000000000000
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright 2015 Freescale Semiconductor, Inc.
- *
- * Freescale DCU drm device driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "fsl_dcu_drm_drv.h"
-
-/* initialize fbdev helper */
-void fsl_dcu_fbdev_init(struct drm_device *dev)
-{
- struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev);
-
- fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1);
-}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index e1dd75b18118..05a8ee106879 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -58,17 +58,10 @@ static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static enum drm_connector_status
-fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.destroy = fsl_dcu_drm_connector_destroy,
- .detect = fsl_dcu_drm_connector_detect,
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ea733ab5b1e0..5efdb7fbb7ee 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -387,19 +387,6 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
}
/**
- * Detect the LVDS connection.
- *
- * This always returns CONNECTOR_STATUS_CONNECTED.
- * This connector should only have
- * been set up if the LVDS was actually connected anyway.
- */
-static enum drm_connector_status cdv_intel_lvds_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-/**
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
@@ -521,7 +508,6 @@ static const struct drm_connector_helper_funcs
static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = cdv_intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_intel_lvds_set_property,
.destroy = cdv_intel_lvds_destroy,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 3a44e705db53..8b44fa542562 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -124,8 +124,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
psbfb->gtt->offset;
- page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
+ page_num = vma_pages(vma);
+ address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -185,9 +185,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
@@ -198,9 +196,7 @@ static struct fb_ops psbfb_ops = {
static struct fb_ops psbfb_roll_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
@@ -211,9 +207,7 @@ static struct fb_ops psbfb_roll_ops = {
static struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
@@ -236,22 +230,20 @@ static int psb_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
- u32 bpp, depth;
+ const struct drm_format_info *info;
int ret;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /*
+ * Reject unknown formats, YUV formats, and formats with more than
+ * 4 bytes per pixel.
+ */
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth || info->cpp[0] > 4)
+ return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (bpp) {
- case 8:
- case 16:
- case 24:
- case 32:
- break;
- default:
- return -EINVAL;
- }
+
drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
@@ -298,7 +290,6 @@ static struct drm_framebuffer *psb_framebuffer_create
* psbfb_alloc - allocate frame buffer memory
* @dev: the DRM device
* @aligned_size: space needed
- * @force: fall back to GEM buffers if need be
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 6d1cb6b370b1..527c62917660 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -197,15 +197,14 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Page relative to the VMA start - we must calculate this ourselves
because vmf->pgoff is the fake GEM offset */
- page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
- >> PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */
if (r->stolen)
pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
else
pfn = page_to_pfn(r->pages[page_offset]);
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 8f69225ce2b4..3f4f424196b2 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -76,6 +76,7 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* psb_gtt_insert - put an object into the GTT
* @dev: our DRM device
* @r: our GTT range
+ * @resume: on resume
*
* Take our preallocated GTT range and insert the GEM object into
* the GTT. This is protected via the gtt mutex which the caller
@@ -130,7 +131,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
* page table entries with the dummy page. This is protected via the gtt
* mutex which the caller must hold.
*/
-void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 __iomem *gtt_slot;
@@ -321,6 +322,7 @@ out:
* @len: length (bytes) of address space required
* @name: resource name
* @backed: resource should be backed by stolen pages
+ * @align: requested alignment
*
* Ask the kernel core to find us a suitable range of addresses
* to use for a GTT mapping.
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 50eb944fb78a..ff37ea585664 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -473,6 +473,7 @@ static const struct file_operations psb_gem_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = psb_unlocked_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b74372760d7f..05d7aaf47eea 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -753,10 +753,6 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index fd7c91254841..483fdce74e39 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -500,19 +500,6 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
}
/*
- * Detect the LVDS connection.
- *
- * This always returns CONNECTOR_STATUS_CONNECTED.
- * This connector should only have
- * been set up if the LVDS was actually connected anyway.
- */
-static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
-/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
@@ -643,7 +630,6 @@ const struct drm_connector_helper_funcs
const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = psb_intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_lvds_set_property,
.destroy = psb_intel_lvds_destroy,
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
index 558c61b1b8e8..2fd2724b7a7d 100644
--- a/drivers/gpu/drm/hisilicon/Kconfig
+++ b/drivers/gpu/drm/hisilicon/Kconfig
@@ -2,4 +2,5 @@
# hisilicon drm device configuration.
# Please keep this list sorted alphabetically
+source "drivers/gpu/drm/hisilicon/hibmc/Kconfig"
source "drivers/gpu/drm/hisilicon/kirin/Kconfig"
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
index e3f6d493c996..c8155bfb1ff1 100644
--- a/drivers/gpu/drm/hisilicon/Makefile
+++ b/drivers/gpu/drm/hisilicon/Makefile
@@ -2,4 +2,5 @@
# Makefile for hisilicon drm drivers.
# Please keep this list sorted alphabetically
+obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc/
obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
new file mode 100644
index 000000000000..380622a0da35
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -0,0 +1,9 @@
+config DRM_HISI_HIBMC
+ tristate "DRM Support for Hisilicon Hibmc"
+ depends on DRM && PCI
+ select DRM_KMS_HELPER
+ select DRM_TTM
+
+ help
+ Choose this option if you have a Hisilicon Hibmc soc chipset.
+ If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
new file mode 100644
index 000000000000..f2e04c035673
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o
+
+obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
new file mode 100644
index 000000000000..2a1386e33126
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -0,0 +1,477 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+struct hibmc_display_panel_pll {
+ unsigned long M;
+ unsigned long N;
+ unsigned long OD;
+ unsigned long POD;
+};
+
+struct hibmc_dislay_pll_config {
+ unsigned long hdisplay;
+ unsigned long vdisplay;
+ u32 pll1_config_value;
+ u32 pll2_config_value;
+};
+
+static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+ {800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
+ {1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
+ {1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
+ {1280, 768, CRT_PLL1_HS_80MHZ, CRT_PLL2_HS_80MHZ},
+ {1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
+ {1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
+ {1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
+ {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
+};
+
+#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1)))
+
+static int hibmc_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (src_w != state->crtc_w || src_h != state->crtc_h) {
+ DRM_DEBUG_ATOMIC("scale not support\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_x < 0 || state->crtc_y < 0) {
+ DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_x + state->crtc_w >
+ crtc_state->adjusted_mode.hdisplay ||
+ state->crtc_y + state->crtc_h >
+ crtc_state->adjusted_mode.vdisplay) {
+ DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hibmc_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ u32 reg;
+ int ret;
+ u64 gpu_addr = 0;
+ unsigned int line_l;
+ struct hibmc_drm_private *priv = plane->dev->dev_private;
+ struct hibmc_framebuffer *hibmc_fb;
+ struct hibmc_bo *bo;
+
+ if (!state->fb)
+ return;
+
+ hibmc_fb = to_hibmc_framebuffer(state->fb);
+ bo = gem_to_hibmc_bo(hibmc_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret) {
+ DRM_ERROR("failed to reserve ttm_bo: %d", ret);
+ return;
+ }
+
+ ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ttm_bo_unreserve(&bo->bo);
+ if (ret) {
+ DRM_ERROR("failed to pin hibmc_bo: %d", ret);
+ return;
+ }
+
+ writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
+
+ reg = state->fb->width * (state->fb->bits_per_pixel / 8);
+ /* now line_pad is 16 */
+ reg = PADDING(16, reg);
+
+ line_l = state->fb->width * state->fb->bits_per_pixel / 8;
+ line_l = PADDING(16, line_l);
+ writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
+ HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
+ priv->mmio + HIBMC_CRT_FB_WIDTH);
+
+ /* SET PIXEL FORMAT */
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK;
+ reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT,
+ state->fb->bits_per_pixel / 16);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static const u32 channel_formats1[] = {
+ DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888
+};
+
+static struct drm_plane_funcs hibmc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .atomic_check = hibmc_plane_atomic_check,
+ .atomic_update = hibmc_plane_atomic_update,
+};
+
+static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ DRM_ERROR("failed to alloc memory when init plane\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
+ channel_formats1,
+ ARRAY_SIZE(channel_formats1),
+ DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ if (ret) {
+ DRM_ERROR("failed to init plane: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
+ return plane;
+}
+
+static void hibmc_crtc_enable(struct drm_crtc *crtc)
+{
+ unsigned int reg;
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ hibmc_set_current_gate(priv, reg);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void hibmc_crtc_disable(struct drm_crtc *crtc)
+{
+ unsigned int reg;
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+
+ drm_crtc_vblank_off(crtc);
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg |= HIBMC_CURR_GATE_LOCALMEM(0);
+ reg |= HIBMC_CURR_GATE_DISPLAY(0);
+ hibmc_set_current_gate(priv, reg);
+}
+
+static unsigned int format_pll_reg(void)
+{
+ unsigned int pllreg = 0;
+ struct hibmc_display_panel_pll pll = {0};
+
+ /*
+ * Note that all PLL's have the same format. Here,
+ * we just use Panel PLL parameter to work out the bit
+ * fields in the register.On returning a 32 bit number, the value can
+ * be applied to any PLL in the calling function.
+ */
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_BYPASS, 0);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POWER, 1);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_INPUT, 0);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POD, pll.POD);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_OD, pll.OD);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_N, pll.N);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_M, pll.M);
+
+ return pllreg;
+}
+
+static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
+{
+ u32 val;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ val = readl(priv->mmio + CRT_PLL1_HS);
+ val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ val = CRT_PLL1_HS_INTER_BYPASS(1) | CRT_PLL1_HS_POWERON(1);
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ writel(pll, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val = pll & ~(CRT_PLL1_HS_POWERON(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val &= ~(CRT_PLL1_HS_INTER_BYPASS(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val |= CRT_PLL1_HS_OUTER_BYPASS(1);
+ writel(val, priv->mmio + CRT_PLL1_HS);
+}
+
+static void get_pll_config(unsigned long x, unsigned long y,
+ u32 *pll1, u32 *pll2)
+{
+ int i;
+ int count = ARRAY_SIZE(hibmc_pll_table);
+
+ for (i = 0; i < count; i++) {
+ if (hibmc_pll_table[i].hdisplay == x &&
+ hibmc_pll_table[i].vdisplay == y) {
+ *pll1 = hibmc_pll_table[i].pll1_config_value;
+ *pll2 = hibmc_pll_table[i].pll2_config_value;
+ return;
+ }
+ }
+
+ /* if found none, we use default value */
+ *pll1 = CRT_PLL1_HS_25MHZ;
+ *pll2 = CRT_PLL2_HS_25MHZ;
+}
+
+/*
+ * This function takes care the extra registers and bit fields required to
+ * setup a mode in board.
+ * Explanation about Display Control register:
+ * FPGA only supports 7 predefined pixel clocks, and clock select is
+ * in bit 4:0 of new register 0x802a8.
+ */
+static unsigned int display_ctrl_adjust(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ unsigned int ctrl)
+{
+ unsigned long x, y;
+ u32 pll1; /* bit[31:0] of PLL */
+ u32 pll2; /* bit[63:32] of PLL */
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ x = mode->hdisplay;
+ y = mode->vdisplay;
+
+ get_pll_config(x, y, &pll1, &pll2);
+ writel(pll2, priv->mmio + CRT_PLL2_HS);
+ set_vclock_hisilicon(dev, pll1);
+
+ /*
+ * Hisilicon has to set up the top-left and bottom-right
+ * registers as well.
+ * Note that normal chip only use those two register for
+ * auto-centering mode.
+ */
+ writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_TOP, 0) |
+ HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_LEFT, 0),
+ priv->mmio + HIBMC_CRT_AUTO_CENTERING_TL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) |
+ HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_RIGHT, x - 1),
+ priv->mmio + HIBMC_CRT_AUTO_CENTERING_BR);
+
+ /*
+ * Assume common fields in ctrl have been properly set before
+ * calling this function.
+ * This function only sets the extra fields in ctrl.
+ */
+
+ /* Set bit 25 of display controller: Select CRT or VGA clock */
+ ctrl &= ~HIBMC_CRT_DISP_CTL_CRTSELECT_MASK;
+ ctrl &= ~HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK;
+
+ ctrl |= HIBMC_CRT_DISP_CTL_CRTSELECT(HIBMC_CRTSELECT_CRT);
+
+ /* clock_phase_polarity is 0 */
+ ctrl |= HIBMC_CRT_DISP_CTL_CLOCK_PHASE(0);
+
+ writel(ctrl, priv->mmio + HIBMC_CRT_DISP_CTL);
+
+ return ctrl;
+}
+
+static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ unsigned int val;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_device *dev = crtc->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+ int width = mode->hsync_end - mode->hsync_start;
+ int height = mode->vsync_end - mode->vsync_start;
+
+ writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
+ writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
+ HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1),
+ priv->mmio + HIBMC_CRT_HORZ_TOTAL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_WIDTH, width) |
+ HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_START, mode->hsync_start - 1),
+ priv->mmio + HIBMC_CRT_HORZ_SYNC);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) |
+ HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1),
+ priv->mmio + HIBMC_CRT_VERT_TOTAL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_HEIGHT, height) |
+ HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_START, mode->vsync_start - 1),
+ priv->mmio + HIBMC_CRT_VERT_SYNC);
+
+ val = HIBMC_FIELD(HIBMC_CRT_DISP_CTL_VSYNC_PHASE, 0);
+ val |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_HSYNC_PHASE, 0);
+ val |= HIBMC_CRT_DISP_CTL_TIMING(1);
+ val |= HIBMC_CRT_DISP_CTL_PLANE(1);
+
+ display_ctrl_adjust(dev, mode, val);
+}
+
+static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned int reg;
+ struct drm_device *dev = crtc->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+ hibmc_set_current_gate(priv, reg);
+
+ /* We can add more initialization as needed. */
+}
+
+static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static const struct drm_crtc_funcs hibmc_crtc_funcs = {
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
+ .enable = hibmc_crtc_enable,
+ .disable = hibmc_crtc_disable,
+ .mode_set_nofb = hibmc_crtc_mode_set_nofb,
+ .atomic_begin = hibmc_crtc_atomic_begin,
+ .atomic_flush = hibmc_crtc_atomic_flush,
+};
+
+int hibmc_de_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ int ret;
+
+ plane = hibmc_plane_init(priv);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane));
+ return PTR_ERR(plane);
+ }
+
+ crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc) {
+ DRM_ERROR("failed to alloc memory when init crtc\n");
+ return -ENOMEM;
+ }
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane,
+ NULL, &hibmc_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init crtc: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_mode_crtc_set_gamma_size(crtc, 256);
+ if (ret) {
+ DRM_ERROR("failed to set gamma size: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &hibmc_crtc_helper_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
new file mode 100644
index 000000000000..7e2043f4348c
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -0,0 +1,456 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+static const struct file_operations hibmc_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .mmap = hibmc_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+};
+
+static int hibmc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+
+ writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
+ priv->mmio + HIBMC_RAW_INTERRUPT_EN);
+
+ return 0;
+}
+
+static void hibmc_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+
+ writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
+ priv->mmio + HIBMC_RAW_INTERRUPT_EN);
+}
+
+irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
+
+ if (status & HIBMC_RAW_INTERRUPT_VBLANK(1)) {
+ writel(HIBMC_RAW_INTERRUPT_VBLANK(1),
+ priv->mmio + HIBMC_RAW_INTERRUPT);
+ drm_handle_vblank(dev, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct drm_driver hibmc_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+ .fops = &hibmc_fops,
+ .name = "hibmc",
+ .date = "20160828",
+ .desc = "hibmc drm driver",
+ .major = 1,
+ .minor = 0,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = hibmc_enable_vblank,
+ .disable_vblank = hibmc_disable_vblank,
+ .gem_free_object_unlocked = hibmc_gem_free_object,
+ .dumb_create = hibmc_dumb_create,
+ .dumb_map_offset = hibmc_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .irq_handler = hibmc_drm_interrupt,
+};
+
+static int __maybe_unused hibmc_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct hibmc_drm_private *priv = drm_dev->dev_private;
+
+ drm_kms_helper_poll_disable(drm_dev);
+ priv->suspend_state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(priv->suspend_state)) {
+ DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n",
+ PTR_ERR(priv->suspend_state));
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(priv->suspend_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused hibmc_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct hibmc_drm_private *priv = drm_dev->dev_private;
+
+ drm_atomic_helper_resume(drm_dev, priv->suspend_state);
+ drm_kms_helper_poll_enable(drm_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hibmc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hibmc_pm_suspend,
+ hibmc_pm_resume)
+};
+
+static int hibmc_kms_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+
+ drm_mode_config_init(priv->dev);
+ priv->mode_config_initialized = true;
+
+ priv->dev->mode_config.min_width = 0;
+ priv->dev->mode_config.min_height = 0;
+ priv->dev->mode_config.max_width = 1920;
+ priv->dev->mode_config.max_height = 1440;
+
+ priv->dev->mode_config.fb_base = priv->fb_base;
+ priv->dev->mode_config.preferred_depth = 24;
+ priv->dev->mode_config.prefer_shadow = 0;
+
+ priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
+
+ ret = hibmc_de_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to init de: %d\n", ret);
+ return ret;
+ }
+
+ ret = hibmc_vdac_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to init vdac: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hibmc_kms_fini(struct hibmc_drm_private *priv)
+{
+ if (priv->mode_config_initialized) {
+ drm_mode_config_cleanup(priv->dev);
+ priv->mode_config_initialized = false;
+ }
+}
+
+/*
+ * It can operate in one of three modes: 0, 1 or Sleep.
+ */
+void hibmc_set_power_mode(struct hibmc_drm_private *priv,
+ unsigned int power_mode)
+{
+ unsigned int control_value = 0;
+ void __iomem *mmio = priv->mmio;
+ unsigned int input = 1;
+
+ if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
+ return;
+
+ if (power_mode == HIBMC_PW_MODE_CTL_MODE_SLEEP)
+ input = 0;
+
+ control_value = readl(mmio + HIBMC_POWER_MODE_CTRL);
+ control_value &= ~(HIBMC_PW_MODE_CTL_MODE_MASK |
+ HIBMC_PW_MODE_CTL_OSC_INPUT_MASK);
+ control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_MODE, power_mode);
+ control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_OSC_INPUT, input);
+ writel(control_value, mmio + HIBMC_POWER_MODE_CTRL);
+}
+
+void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
+{
+ unsigned int gate_reg;
+ unsigned int mode;
+ void __iomem *mmio = priv->mmio;
+
+ /* Get current power mode. */
+ mode = (readl(mmio + HIBMC_POWER_MODE_CTRL) &
+ HIBMC_PW_MODE_CTL_MODE_MASK) >> HIBMC_PW_MODE_CTL_MODE_SHIFT;
+
+ switch (mode) {
+ case HIBMC_PW_MODE_CTL_MODE_MODE0:
+ gate_reg = HIBMC_MODE0_GATE;
+ break;
+
+ case HIBMC_PW_MODE_CTL_MODE_MODE1:
+ gate_reg = HIBMC_MODE1_GATE;
+ break;
+
+ default:
+ gate_reg = HIBMC_MODE0_GATE;
+ break;
+ }
+ writel(gate, mmio + gate_reg);
+}
+
+static void hibmc_hw_config(struct hibmc_drm_private *priv)
+{
+ unsigned int reg;
+
+ /* On hardware reset, power mode 0 is default. */
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+
+ hibmc_set_current_gate(priv, reg);
+
+ /*
+ * Reset the memory controller. If the memory controller
+ * is not reset in chip,the system might hang when sw accesses
+ * the memory.The memory should be resetted after
+ * changing the MXCLK.
+ */
+ reg = readl(priv->mmio + HIBMC_MISC_CTRL);
+ reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
+ reg |= HIBMC_MSCCTL_LOCALMEM_RESET(0);
+ writel(reg, priv->mmio + HIBMC_MISC_CTRL);
+
+ reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
+ reg |= HIBMC_MSCCTL_LOCALMEM_RESET(1);
+
+ writel(reg, priv->mmio + HIBMC_MISC_CTRL);
+}
+
+static int hibmc_hw_map(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct pci_dev *pdev = dev->pdev;
+ resource_size_t addr, size, ioaddr, iosize;
+
+ ioaddr = pci_resource_start(pdev, 1);
+ iosize = pci_resource_len(pdev, 1);
+ priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ if (!priv->mmio) {
+ DRM_ERROR("Cannot map mmio region\n");
+ return -ENOMEM;
+ }
+
+ addr = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ priv->fb_map = devm_ioremap(dev->dev, addr, size);
+ if (!priv->fb_map) {
+ DRM_ERROR("Cannot map framebuffer\n");
+ return -ENOMEM;
+ }
+ priv->fb_base = addr;
+ priv->fb_size = size;
+
+ return 0;
+}
+
+static int hibmc_hw_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+
+ ret = hibmc_hw_map(priv);
+ if (ret)
+ return ret;
+
+ hibmc_hw_config(priv);
+
+ return 0;
+}
+
+static int hibmc_unload(struct drm_device *dev)
+{
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ hibmc_fbdev_fini(priv);
+
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+ if (priv->msi_enabled)
+ pci_disable_msi(dev->pdev);
+ drm_vblank_cleanup(dev);
+
+ hibmc_kms_fini(priv);
+ hibmc_mm_fini(priv);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+static int hibmc_load(struct drm_device *dev)
+{
+ struct hibmc_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("no memory to allocate for hibmc_drm_private\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = priv;
+ priv->dev = dev;
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+ goto err;
+
+ ret = hibmc_mm_init(priv);
+ if (ret)
+ goto err;
+
+ ret = hibmc_kms_init(priv);
+ if (ret)
+ goto err;
+
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ DRM_ERROR("failed to initialize vblank: %d\n", ret);
+ goto err;
+ }
+
+ priv->msi_enabled = 0;
+ ret = pci_enable_msi(dev->pdev);
+ if (ret) {
+ DRM_WARN("enabling MSI failed: %d\n", ret);
+ } else {
+ priv->msi_enabled = 1;
+ ret = drm_irq_install(dev, dev->pdev->irq);
+ if (ret)
+ DRM_WARN("install irq failed: %d\n", ret);
+ }
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(dev);
+
+ ret = hibmc_fbdev_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to initialize fbdev: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ hibmc_unload(dev);
+ DRM_ERROR("failed to initialize drm driver: %d\n", ret);
+ return ret;
+}
+
+static int hibmc_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct drm_device *dev;
+ int ret;
+
+ dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ DRM_ERROR("failed to allocate drm_device\n");
+ return PTR_ERR(dev);
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ DRM_ERROR("failed to enable pci device: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = hibmc_load(dev);
+ if (ret) {
+ DRM_ERROR("failed to load hibmc: %d\n", ret);
+ goto err_disable;
+ }
+
+ ret = drm_dev_register(dev, 0);
+ if (ret) {
+ DRM_ERROR("failed to register drv for userspace access: %d\n",
+ ret);
+ goto err_unload;
+ }
+ return 0;
+
+err_unload:
+ hibmc_unload(dev);
+err_disable:
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_unref(dev);
+
+ return ret;
+}
+
+static void hibmc_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(dev);
+ hibmc_unload(dev);
+ drm_dev_unref(dev);
+}
+
+static struct pci_device_id hibmc_pci_table[] = {
+ {0x19e5, 0x1711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+
+static struct pci_driver hibmc_pci_driver = {
+ .name = "hibmc-drm",
+ .id_table = hibmc_pci_table,
+ .probe = hibmc_pci_probe,
+ .remove = hibmc_pci_remove,
+ .driver.pm = &hibmc_pm_ops,
+};
+
+static int __init hibmc_init(void)
+{
+ return pci_register_driver(&hibmc_pci_driver);
+}
+
+static void __exit hibmc_exit(void)
+{
+ return pci_unregister_driver(&hibmc_pci_driver);
+}
+
+module_init(hibmc_init);
+module_exit(hibmc_exit);
+
+MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
+MODULE_AUTHOR("RongrongZou <zourongrong@huawei.com>");
+MODULE_DESCRIPTION("DRM Driver for Hisilicon Hibmc");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
new file mode 100644
index 000000000000..e195521eb41e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -0,0 +1,114 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef HIBMC_DRM_DRV_H
+#define HIBMC_DRM_DRV_H
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo_driver.h>
+
+struct hibmc_framebuffer {
+ struct drm_framebuffer fb;
+ struct drm_gem_object *obj;
+};
+
+struct hibmc_fbdev {
+ struct drm_fb_helper helper;
+ struct hibmc_framebuffer *fb;
+ int size;
+};
+
+struct hibmc_drm_private {
+ /* hw */
+ void __iomem *mmio;
+ void __iomem *fb_map;
+ unsigned long fb_base;
+ unsigned long fb_size;
+ bool msi_enabled;
+
+ /* drm */
+ struct drm_device *dev;
+ bool mode_config_initialized;
+ struct drm_atomic_state *suspend_state;
+
+ /* ttm */
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ bool initialized;
+
+ /* fbdev */
+ struct hibmc_fbdev *fbdev;
+ bool mm_inited;
+};
+
+#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb)
+
+struct hibmc_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ struct ttm_place placements[3];
+ int pin_count;
+};
+
+static inline struct hibmc_bo *hibmc_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct hibmc_bo, bo);
+}
+
+static inline struct hibmc_bo *gem_to_hibmc_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct hibmc_bo, gem);
+}
+
+void hibmc_set_power_mode(struct hibmc_drm_private *priv,
+ unsigned int power_mode);
+void hibmc_set_current_gate(struct hibmc_drm_private *priv,
+ unsigned int gate);
+
+int hibmc_de_init(struct hibmc_drm_private *priv);
+int hibmc_vdac_init(struct hibmc_drm_private *priv);
+int hibmc_fbdev_init(struct hibmc_drm_private *priv);
+void hibmc_fbdev_fini(struct hibmc_drm_private *priv);
+
+int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+struct hibmc_framebuffer *
+hibmc_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int hibmc_mm_init(struct hibmc_drm_private *hibmc);
+void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
+int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int hibmc_bo_unpin(struct hibmc_bo *bo);
+void hibmc_gem_free_object(struct drm_gem_object *obj);
+int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
+int hibmc_mmap(struct file *filp, struct vm_area_struct *vma);
+
+extern const struct drm_mode_config_funcs hibmc_mode_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
new file mode 100644
index 000000000000..9b0696735ba1
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -0,0 +1,267 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+static int hibmcfb_create_object(
+ struct hibmc_drm_private *priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_gem_object *gobj;
+ struct drm_device *dev = priv->dev;
+ u32 size;
+ int ret = 0;
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = hibmc_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static struct fb_ops hibmc_drm_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct hibmc_fbdev *hi_fbdev =
+ container_of(helper, struct hibmc_fbdev, helper);
+ struct hibmc_drm_private *priv = helper->dev->dev_private;
+ struct fb_info *info;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ int ret = 0;
+ int ret1;
+ size_t size;
+ unsigned int bytes_per_pixel;
+ struct hibmc_bo *bo = NULL;
+
+ DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+ sizes->surface_depth = 32;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height);
+
+ ret = hibmcfb_create_object(priv, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ bo = gem_to_hibmc_bo(gobj);
+
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret) {
+ DRM_ERROR("failed to reserve ttm_bo: %d\n", ret);
+ goto out_unref_gem;
+ }
+
+ ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin fbcon: %d\n", ret);
+ goto out_unreserve_ttm_bo;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon: %d\n", ret);
+ goto out_unpin_bo;
+ }
+ ttm_bo_unreserve(&bo->bo);
+
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ DRM_ERROR("failed to allocate fbi: %d\n", ret);
+ goto out_release_fbi;
+ }
+
+ info->par = hi_fbdev;
+
+ hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
+ if (IS_ERR(hi_fbdev->fb)) {
+ ret = PTR_ERR(info);
+ DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+ goto out_release_fbi;
+ }
+
+ priv->fbdev->size = size;
+ hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
+
+ strcpy(info->fix.id, "hibmcdrmfb");
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &hibmc_drm_fb_ops;
+
+ drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
+ hi_fbdev->fb->fb.depth);
+ drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+ info->fix.smem_start = bo->bo.mem.bus.offset + bo->bo.mem.bus.base;
+ info->fix.smem_len = size;
+ return 0;
+
+out_release_fbi:
+ drm_fb_helper_release_fbi(helper);
+ ret1 = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret1) {
+ DRM_ERROR("failed to rsv ttm_bo when release fbi: %d\n", ret1);
+ goto out_unref_gem;
+ }
+ ttm_bo_kunmap(&bo->kmap);
+out_unpin_bo:
+ hibmc_bo_unpin(bo);
+out_unreserve_ttm_bo:
+ ttm_bo_unreserve(&bo->bo);
+out_unref_gem:
+ drm_gem_object_unreference_unlocked(gobj);
+
+ return ret;
+}
+
+static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
+{
+ struct hibmc_framebuffer *gfb = fbdev->fb;
+ struct drm_fb_helper *fbh = &fbdev->helper;
+
+ drm_fb_helper_unregister_fbi(fbh);
+ drm_fb_helper_release_fbi(fbh);
+
+ drm_fb_helper_fini(fbh);
+
+ if (gfb)
+ drm_framebuffer_unreference(&gfb->fb);
+}
+
+static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
+ .fb_probe = hibmc_drm_fb_create,
+};
+
+int hibmc_fbdev_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+ struct fb_var_screeninfo *var;
+ struct fb_fix_screeninfo *fix;
+ struct hibmc_fbdev *hifbdev;
+
+ hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL);
+ if (!hifbdev) {
+ DRM_ERROR("failed to allocate hibmc_fbdev\n");
+ return -ENOMEM;
+ }
+
+ priv->fbdev = hifbdev;
+ drm_fb_helper_prepare(priv->dev, &hifbdev->helper,
+ &hibmc_fbdev_helper_funcs);
+
+ /* Now just one crtc and one channel */
+ ret = drm_fb_helper_init(priv->dev,
+ &hifbdev->helper, 1, 1);
+ if (ret) {
+ DRM_ERROR("failed to initialize fb helper: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper);
+ if (ret) {
+ DRM_ERROR("failed to add all connectors: %d\n", ret);
+ goto fini;
+ }
+
+ ret = drm_fb_helper_initial_config(&hifbdev->helper, 16);
+ if (ret) {
+ DRM_ERROR("failed to setup initial conn config: %d\n", ret);
+ goto fini;
+ }
+
+ var = &hifbdev->helper.fbdev->var;
+ fix = &hifbdev->helper.fbdev->fix;
+
+ DRM_DEBUG_DRIVER("Member of info->var is :\n"
+ "xres=%d\n"
+ "yres=%d\n"
+ "xres_virtual=%d\n"
+ "yres_virtual=%d\n"
+ "xoffset=%d\n"
+ "yoffset=%d\n"
+ "bits_per_pixel=%d\n"
+ "...\n", var->xres, var->yres, var->xres_virtual,
+ var->yres_virtual, var->xoffset, var->yoffset,
+ var->bits_per_pixel);
+ DRM_DEBUG_DRIVER("Member of info->fix is :\n"
+ "smem_start=%lx\n"
+ "smem_len=%d\n"
+ "type=%d\n"
+ "type_aux=%d\n"
+ "visual=%d\n"
+ "xpanstep=%d\n"
+ "ypanstep=%d\n"
+ "ywrapstep=%d\n"
+ "line_length=%d\n"
+ "accel=%d\n"
+ "capabilities=%d\n"
+ "...\n", fix->smem_start, fix->smem_len, fix->type,
+ fix->type_aux, fix->visual, fix->xpanstep,
+ fix->ypanstep, fix->ywrapstep, fix->line_length,
+ fix->accel, fix->capabilities);
+
+ return 0;
+
+fini:
+ drm_fb_helper_fini(&hifbdev->helper);
+ return ret;
+}
+
+void hibmc_fbdev_fini(struct hibmc_drm_private *priv)
+{
+ if (!priv->fbdev)
+ return;
+
+ hibmc_fbdev_destroy(priv->fbdev);
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
new file mode 100644
index 000000000000..f7035bf3ec1f
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -0,0 +1,196 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef HIBMC_DRM_HW_H
+#define HIBMC_DRM_HW_H
+
+/* register definition */
+#define HIBMC_MISC_CTRL 0x4
+
+#define HIBMC_MSCCTL_LOCALMEM_RESET(x) ((x) << 6)
+#define HIBMC_MSCCTL_LOCALMEM_RESET_MASK 0x40
+
+#define HIBMC_CURRENT_GATE 0x000040
+#define HIBMC_CURR_GATE_DISPLAY(x) ((x) << 2)
+#define HIBMC_CURR_GATE_DISPLAY_MASK 0x4
+
+#define HIBMC_CURR_GATE_LOCALMEM(x) ((x) << 1)
+#define HIBMC_CURR_GATE_LOCALMEM_MASK 0x2
+
+#define HIBMC_MODE0_GATE 0x000044
+#define HIBMC_MODE1_GATE 0x000048
+#define HIBMC_POWER_MODE_CTRL 0x00004C
+
+#define HIBMC_PW_MODE_CTL_OSC_INPUT(x) ((x) << 3)
+#define HIBMC_PW_MODE_CTL_OSC_INPUT_MASK 0x8
+
+#define HIBMC_PW_MODE_CTL_MODE(x) ((x) << 0)
+#define HIBMC_PW_MODE_CTL_MODE_MASK 0x03
+#define HIBMC_PW_MODE_CTL_MODE_SHIFT 0
+
+#define HIBMC_PW_MODE_CTL_MODE_MODE0 0
+#define HIBMC_PW_MODE_CTL_MODE_MODE1 1
+#define HIBMC_PW_MODE_CTL_MODE_SLEEP 2
+
+#define HIBMC_PANEL_PLL_CTRL 0x00005C
+#define HIBMC_CRT_PLL_CTRL 0x000060
+
+#define HIBMC_PLL_CTRL_BYPASS(x) ((x) << 18)
+#define HIBMC_PLL_CTRL_BYPASS_MASK 0x40000
+
+#define HIBMC_PLL_CTRL_POWER(x) ((x) << 17)
+#define HIBMC_PLL_CTRL_POWER_MASK 0x20000
+
+#define HIBMC_PLL_CTRL_INPUT(x) ((x) << 16)
+#define HIBMC_PLL_CTRL_INPUT_MASK 0x10000
+
+#define HIBMC_PLL_CTRL_POD(x) ((x) << 14)
+#define HIBMC_PLL_CTRL_POD_MASK 0xC000
+
+#define HIBMC_PLL_CTRL_OD(x) ((x) << 12)
+#define HIBMC_PLL_CTRL_OD_MASK 0x3000
+
+#define HIBMC_PLL_CTRL_N(x) ((x) << 8)
+#define HIBMC_PLL_CTRL_N_MASK 0xF00
+
+#define HIBMC_PLL_CTRL_M(x) ((x) << 0)
+#define HIBMC_PLL_CTRL_M_MASK 0xFF
+
+#define HIBMC_CRT_DISP_CTL 0x80200
+
+#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25)
+#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000
+
+#define HIBMC_CRTSELECT_CRT 1
+
+#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE(x) ((x) << 14)
+#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK 0x4000
+
+#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE(x) ((x) << 13)
+#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE_MASK 0x2000
+
+#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE(x) ((x) << 12)
+#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE_MASK 0x1000
+
+#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8)
+#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100
+
+#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2)
+#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4
+
+#define HIBMC_CRT_DISP_CTL_FORMAT(x) ((x) << 0)
+#define HIBMC_CRT_DISP_CTL_FORMAT_MASK 0x03
+
+#define HIBMC_CRT_FB_ADDRESS 0x080204
+
+#define HIBMC_CRT_FB_WIDTH 0x080208
+#define HIBMC_CRT_FB_WIDTH_WIDTH(x) ((x) << 16)
+#define HIBMC_CRT_FB_WIDTH_WIDTH_MASK 0x3FFF0000
+#define HIBMC_CRT_FB_WIDTH_OFFS(x) ((x) << 0)
+#define HIBMC_CRT_FB_WIDTH_OFFS_MASK 0x3FFF
+
+#define HIBMC_CRT_HORZ_TOTAL 0x08020C
+#define HIBMC_CRT_HORZ_TOTAL_TOTAL(x) ((x) << 16)
+#define HIBMC_CRT_HORZ_TOTAL_TOTAL_MASK 0xFFF0000
+
+#define HIBMC_CRT_HORZ_TOTAL_DISP_END(x) ((x) << 0)
+#define HIBMC_CRT_HORZ_TOTAL_DISP_END_MASK 0xFFF
+
+#define HIBMC_CRT_HORZ_SYNC 0x080210
+#define HIBMC_CRT_HORZ_SYNC_WIDTH(x) ((x) << 16)
+#define HIBMC_CRT_HORZ_SYNC_WIDTH_MASK 0xFF0000
+
+#define HIBMC_CRT_HORZ_SYNC_START(x) ((x) << 0)
+#define HIBMC_CRT_HORZ_SYNC_START_MASK 0xFFF
+
+#define HIBMC_CRT_VERT_TOTAL 0x080214
+#define HIBMC_CRT_VERT_TOTAL_TOTAL(x) ((x) << 16)
+#define HIBMC_CRT_VERT_TOTAL_TOTAL_MASK 0x7FFF0000
+
+#define HIBMC_CRT_VERT_TOTAL_DISP_END(x) ((x) << 0)
+#define HIBMC_CRT_VERT_TOTAL_DISP_END_MASK 0x7FF
+
+#define HIBMC_CRT_VERT_SYNC 0x080218
+#define HIBMC_CRT_VERT_SYNC_HEIGHT(x) ((x) << 16)
+#define HIBMC_CRT_VERT_SYNC_HEIGHT_MASK 0x3F0000
+
+#define HIBMC_CRT_VERT_SYNC_START(x) ((x) << 0)
+#define HIBMC_CRT_VERT_SYNC_START_MASK 0x7FF
+
+/* Auto Centering */
+#define HIBMC_CRT_AUTO_CENTERING_TL 0x080280
+#define HIBMC_CRT_AUTO_CENTERING_TL_TOP(x) ((x) << 16)
+#define HIBMC_CRT_AUTO_CENTERING_TL_TOP_MASK 0x7FF0000
+
+#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT(x) ((x) << 0)
+#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7FF
+
+#define HIBMC_CRT_AUTO_CENTERING_BR 0x080284
+#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM(x) ((x) << 16)
+#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM_MASK 0x7FF0000
+
+#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT(x) ((x) << 0)
+#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7FF
+
+/* register to control panel output */
+#define HIBMC_DISPLAY_CONTROL_HISILE 0x80288
+#define HIBMC_DISPLAY_CONTROL_FPVDDEN(x) ((x) << 0)
+#define HIBMC_DISPLAY_CONTROL_PANELDATE(x) ((x) << 1)
+#define HIBMC_DISPLAY_CONTROL_FPEN(x) ((x) << 2)
+#define HIBMC_DISPLAY_CONTROL_VBIASEN(x) ((x) << 3)
+
+#define HIBMC_RAW_INTERRUPT 0x80290
+#define HIBMC_RAW_INTERRUPT_VBLANK(x) ((x) << 2)
+#define HIBMC_RAW_INTERRUPT_VBLANK_MASK 0x4
+
+#define HIBMC_RAW_INTERRUPT_EN 0x80298
+#define HIBMC_RAW_INTERRUPT_EN_VBLANK(x) ((x) << 2)
+#define HIBMC_RAW_INTERRUPT_EN_VBLANK_MASK 0x4
+
+/* register and values for PLL control */
+#define CRT_PLL1_HS 0x802a8
+#define CRT_PLL1_HS_OUTER_BYPASS(x) ((x) << 30)
+#define CRT_PLL1_HS_INTER_BYPASS(x) ((x) << 29)
+#define CRT_PLL1_HS_POWERON(x) ((x) << 24)
+
+#define CRT_PLL1_HS_25MHZ 0x23d40f02
+#define CRT_PLL1_HS_40MHZ 0x23940801
+#define CRT_PLL1_HS_65MHZ 0x23940d01
+#define CRT_PLL1_HS_78MHZ 0x23540F82
+#define CRT_PLL1_HS_74MHZ 0x23941dc2
+#define CRT_PLL1_HS_80MHZ 0x23941001
+#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2
+#define CRT_PLL1_HS_108MHZ 0x23b41b01
+#define CRT_PLL1_HS_162MHZ 0x23480681
+#define CRT_PLL1_HS_148MHZ 0x23541dc2
+#define CRT_PLL1_HS_193MHZ 0x234807c1
+
+#define CRT_PLL2_HS 0x802ac
+#define CRT_PLL2_HS_25MHZ 0x206B851E
+#define CRT_PLL2_HS_40MHZ 0x30000000
+#define CRT_PLL2_HS_65MHZ 0x40000000
+#define CRT_PLL2_HS_78MHZ 0x50E147AE
+#define CRT_PLL2_HS_74MHZ 0x602B6AE7
+#define CRT_PLL2_HS_80MHZ 0x70000000
+#define CRT_PLL2_HS_108MHZ 0x80000000
+#define CRT_PLL2_HS_162MHZ 0xA0000000
+#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD
+#define CRT_PLL2_HS_193MHZ 0xC0872B02
+
+#define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
new file mode 100644
index 000000000000..12a18557c5fd
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -0,0 +1,140 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+static int hibmc_connector_get_modes(struct drm_connector *connector)
+{
+ return drm_add_modes_noedid(connector, 800, 600);
+}
+
+static int hibmc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+hibmc_connector_best_encoder(struct drm_connector *connector)
+{
+ return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+}
+
+static const struct drm_connector_helper_funcs
+ hibmc_connector_helper_funcs = {
+ .get_modes = hibmc_connector_get_modes,
+ .mode_valid = hibmc_connector_mode_valid,
+ .best_encoder = hibmc_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs hibmc_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_connector *
+hibmc_connector_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_connector *connector;
+ int ret;
+
+ connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector) {
+ DRM_ERROR("failed to alloc memory when init connector\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = drm_connector_init(dev, connector,
+ &hibmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ if (ret) {
+ DRM_ERROR("failed to init connector: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ drm_connector_helper_add(connector,
+ &hibmc_connector_helper_funcs);
+
+ return connector;
+}
+
+static void hibmc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ u32 reg;
+ struct drm_device *dev = encoder->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
+ reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
+ reg |= HIBMC_DISPLAY_CONTROL_PANELDATE(1);
+ reg |= HIBMC_DISPLAY_CONTROL_FPEN(1);
+ reg |= HIBMC_DISPLAY_CONTROL_VBIASEN(1);
+ writel(reg, priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
+}
+
+static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
+ .mode_set = hibmc_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs hibmc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int hibmc_vdac_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ connector = hibmc_connector_init(priv);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("failed to create connector: %ld\n",
+ PTR_ERR(connector));
+ return PTR_ERR(connector);
+ }
+
+ encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder) {
+ DRM_ERROR("failed to alloc memory when init encoder\n");
+ return -ENOMEM;
+ }
+
+ encoder->possible_crtcs = 0x1;
+ ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init encoder: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
new file mode 100644
index 000000000000..e76abf61edae
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -0,0 +1,558 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <ttm/ttm_page_alloc.h>
+
+#include "hibmc_drm_drv.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline struct hibmc_drm_private *
+hibmc_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct hibmc_drm_private, bdev);
+}
+
+static int
+hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
+{
+ int ret;
+
+ hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
+ hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
+ hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
+ hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
+ ret = drm_global_item_ref(&hibmc->mem_global_ref);
+ if (ret) {
+ DRM_ERROR("could not get ref on ttm global: %d\n", ret);
+ return ret;
+ }
+
+ hibmc->bo_global_ref.mem_glob =
+ hibmc->mem_global_ref.object;
+ hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
+ hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
+ hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
+ hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
+ ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
+ if (ret) {
+ DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
+ drm_global_item_unref(&hibmc->mem_global_ref);
+ return ret;
+ }
+ return 0;
+}
+
+static void
+hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
+{
+ drm_global_item_unref(&hibmc->bo_global_ref.ref);
+ drm_global_item_unref(&hibmc->mem_global_ref);
+ hibmc->mem_global_ref.release = NULL;
+}
+
+static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool hibmc_ttm_bo_is_hibmc_bo(struct ttm_buffer_object *bo)
+{
+ return bo->destroy == &hibmc_bo_ttm_destroy;
+}
+
+static int
+hibmc_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("unsupported memory type %u\n", type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void hibmc_ttm_placement(struct hibmc_bo *bo, int domain)
+{
+ u32 count = 0;
+ u32 i;
+
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[count++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[count++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+ if (!count)
+ bo->placements[count++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+
+ bo->placement.num_placement = count;
+ bo->placement.num_busy_placement = count;
+ for (i = 0; i < count; i++) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
+}
+
+static void
+hibmc_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct hibmc_bo *hibmcbo = hibmc_bo(bo);
+
+ if (!hibmc_ttm_bo_is_hibmc_bo(bo))
+ return;
+
+ hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_SYSTEM);
+ *pl = hibmcbo->placement;
+}
+
+static int hibmc_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct hibmc_bo *hibmcbo = hibmc_bo(bo);
+
+ return drm_vma_node_verify_access(&hibmcbo->gem.vma_node,
+ filp->private_data);
+}
+
+static int hibmc_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct hibmc_drm_private *hibmc = hibmc_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(hibmc->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void hibmc_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func hibmc_tt_backend_func = {
+ .destroy = &hibmc_ttm_backend_destroy,
+};
+
+static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ u32 page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ DRM_ERROR("failed to allocate ttm_tt\n");
+ return NULL;
+ }
+ tt->func = &hibmc_tt_backend_func;
+ ret = ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page);
+ if (ret) {
+ DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int hibmc_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver hibmc_bo_driver = {
+ .ttm_tt_create = hibmc_ttm_tt_create,
+ .ttm_tt_populate = hibmc_ttm_tt_populate,
+ .ttm_tt_unpopulate = hibmc_ttm_tt_unpopulate,
+ .init_mem_type = hibmc_bo_init_mem_type,
+ .evict_flags = hibmc_bo_evict_flags,
+ .move = NULL,
+ .verify_access = hibmc_bo_verify_access,
+ .io_mem_reserve = &hibmc_ttm_io_mem_reserve,
+ .io_mem_free = NULL,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
+};
+
+int hibmc_mm_init(struct hibmc_drm_private *hibmc)
+{
+ int ret;
+ struct drm_device *dev = hibmc->dev;
+ struct ttm_bo_device *bdev = &hibmc->bdev;
+
+ ret = hibmc_ttm_global_init(hibmc);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&hibmc->bdev,
+ hibmc->bo_global_ref.ref.object,
+ &hibmc_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ hibmc_ttm_global_release(hibmc);
+ DRM_ERROR("error initializing bo driver: %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ hibmc->fb_size >> PAGE_SHIFT);
+ if (ret) {
+ hibmc_ttm_global_release(hibmc);
+ DRM_ERROR("failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ hibmc->mm_inited = true;
+ return 0;
+}
+
+void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
+{
+ if (!hibmc->mm_inited)
+ return;
+
+ ttm_bo_device_release(&hibmc->bdev);
+ hibmc_ttm_global_release(hibmc);
+ hibmc->mm_inited = false;
+}
+
+static void hibmc_bo_unref(struct hibmc_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ *bo = NULL;
+}
+
+int hibmc_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct hibmc_bo **phibmcbo)
+{
+ struct hibmc_drm_private *hibmc = dev->dev_private;
+ struct hibmc_bo *hibmcbo;
+ size_t acc_size;
+ int ret;
+
+ hibmcbo = kzalloc(sizeof(*hibmcbo), GFP_KERNEL);
+ if (!hibmcbo) {
+ DRM_ERROR("failed to allocate hibmcbo\n");
+ return -ENOMEM;
+ }
+ ret = drm_gem_object_init(dev, &hibmcbo->gem, size);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm gem object: %d\n", ret);
+ kfree(hibmcbo);
+ return ret;
+ }
+
+ hibmcbo->bo.bdev = &hibmc->bdev;
+
+ hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&hibmc->bdev, size,
+ sizeof(struct hibmc_bo));
+
+ ret = ttm_bo_init(&hibmc->bdev, &hibmcbo->bo, size,
+ ttm_bo_type_device, &hibmcbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, NULL, hibmc_bo_ttm_destroy);
+ if (ret) {
+ hibmc_bo_unref(&hibmcbo);
+ DRM_ERROR("failed to initialize ttm_bo: %d\n", ret);
+ return ret;
+ }
+
+ *phibmcbo = hibmcbo;
+ return 0;
+}
+
+int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = bo->bo.offset;
+ return 0;
+ }
+
+ hibmc_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = bo->bo.offset;
+ return 0;
+}
+
+int hibmc_bo_unpin(struct hibmc_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("validate failed for unpin: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hibmc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct hibmc_drm_private *hibmc;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return -EINVAL;
+
+ file_priv = filp->private_data;
+ hibmc = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &hibmc->bdev);
+}
+
+int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct hibmc_bo *hibmcbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = PAGE_ALIGN(size);
+ if (size == 0) {
+ DRM_ERROR("error: zero size\n");
+ return -EINVAL;
+ }
+
+ ret = hibmc_bo_create(dev, size, 0, 0, &hibmcbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object: %d\n", ret);
+ return ret;
+ }
+ *obj = &hibmcbo->gem;
+ return 0;
+}
+
+int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gobj;
+ u32 handle;
+ int ret;
+
+ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16);
+ args->size = args->pitch * args->height;
+
+ ret = hibmc_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create GEM object: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret) {
+ DRM_ERROR("failed to unreference GEM object: %d\n", ret);
+ return ret;
+ }
+
+ args->handle = handle;
+ return 0;
+}
+
+void hibmc_gem_free_object(struct drm_gem_object *obj)
+{
+ struct hibmc_bo *hibmcbo = gem_to_hibmc_bo(obj);
+
+ hibmc_bo_unref(&hibmcbo);
+}
+
+static u64 hibmc_bo_mmap_offset(struct hibmc_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ struct hibmc_bo *bo;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = gem_to_hibmc_bo(obj);
+ *offset = hibmc_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+}
+
+static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
+
+ drm_gem_object_unreference_unlocked(hibmc_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(hibmc_fb);
+}
+
+static const struct drm_framebuffer_funcs hibmc_fb_funcs = {
+ .destroy = hibmc_user_framebuffer_destroy,
+};
+
+struct hibmc_framebuffer *
+hibmc_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct hibmc_framebuffer *hibmc_fb;
+ int ret;
+
+ hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL);
+ if (!hibmc_fb) {
+ DRM_ERROR("failed to allocate hibmc_fb\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_helper_mode_fill_fb_struct(&hibmc_fb->fb, mode_cmd);
+ hibmc_fb->obj = obj;
+ ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ kfree(hibmc_fb);
+ return ERR_PTR(ret);
+ }
+
+ return hibmc_fb;
+}
+
+static struct drm_framebuffer *
+hibmc_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct hibmc_framebuffer *hibmc_fb;
+
+ DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+ mode_cmd->width, mode_cmd->height,
+ (mode_cmd->pixel_format) & 0xff,
+ (mode_cmd->pixel_format >> 8) & 0xff,
+ (mode_cmd->pixel_format >> 16) & 0xff,
+ (mode_cmd->pixel_format >> 24) & 0xff);
+
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
+ if (IS_ERR(hibmc_fb)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR((long)hibmc_fb);
+ }
+ return &hibmc_fb->fb;
+}
+
+const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = hibmc_user_framebuffer_create,
+};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 7e7a4d43d6b6..afc2b5d2d5f0 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -608,17 +608,16 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
- char *format_name;
+ struct drm_format_name_buf format_name;
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32)obj->paddr);
- format_name = drm_get_format_name(fb->pixel_format);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
- addr, fb->width, fb->height, fmt, format_name);
- kfree(format_name);
+ addr, fb->width, fb->height, fmt,
+ drm_get_format_name(fb->pixel_format, &format_name));
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 90377a609c98..ebd5f4fe4c23 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include "kirin_drm_drv.h"
@@ -151,9 +152,7 @@ static const struct file_operations kirin_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -260,14 +259,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np)
DRM_ERROR("no valid endpoint node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(endpoint);
remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
if (!remote) {
DRM_ERROR("no valid remote node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(remote);
if (!of_device_is_available(remote)) {
DRM_ERROR("not available for remote node\n");
@@ -294,7 +292,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
if (IS_ERR(remote))
return PTR_ERR(remote);
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of, remote);
+ of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9798d400d817..86f47e190309 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -41,12 +41,15 @@ struct tda998x_priv {
struct i2c_client *hdmi;
struct mutex mutex;
u16 rev;
+ u8 cec_addr;
u8 current_page;
- int dpms;
- bool is_hdmi_sink;
+ bool is_on;
+ bool supports_infoframes;
+ bool sink_has_audio;
u8 vip_cntrl_0;
u8 vip_cntrl_1;
u8 vip_cntrl_2;
+ unsigned long tmds_clock;
struct tda998x_audio_params audio_params;
struct platform_device *audio_pdev;
@@ -105,6 +108,8 @@ struct tda998x_priv {
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_PREFILT BIT(0)
+# define FEAT_POWERDOWN_CSC BIT(1)
# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
@@ -370,35 +375,46 @@ struct tda998x_priv {
static void
cec_write(struct tda998x_priv *priv, u16 addr, u8 val)
{
- struct i2c_client *client = priv->cec;
u8 buf[] = {addr, val};
+ struct i2c_msg msg = {
+ .addr = priv->cec_addr,
+ .len = 2,
+ .buf = buf,
+ };
int ret;
- ret = i2c_master_send(client, buf, sizeof(buf));
+ ret = i2c_transfer(priv->hdmi->adapter, &msg, 1);
if (ret < 0)
- dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+ dev_err(&priv->hdmi->dev, "Error %d writing to cec:0x%x\n",
+ ret, addr);
}
static u8
cec_read(struct tda998x_priv *priv, u8 addr)
{
- struct i2c_client *client = priv->cec;
u8 val;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cec_addr,
+ .len = 1,
+ .buf = &addr,
+ }, {
+ .addr = priv->cec_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &val,
+ },
+ };
int ret;
- ret = i2c_master_send(client, &addr, sizeof(addr));
- if (ret < 0)
- goto fail;
-
- ret = i2c_master_recv(client, &val, sizeof(val));
- if (ret < 0)
- goto fail;
+ ret = i2c_transfer(priv->hdmi->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&priv->hdmi->dev, "Error %d reading from cec:0x%x\n",
+ ret, addr);
+ val = 0;
+ }
return val;
-
-fail:
- dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
- return 0;
}
static int
@@ -579,9 +595,9 @@ tda998x_reset(struct tda998x_priv *priv)
* HPD assertion: it needs a delay of 100ms to avoid timing out while
* trying to read EDID data.
*
- * However, tda998x_encoder_get_modes() may be called at any moment
+ * However, tda998x_connector_get_modes() may be called at any moment
* after tda998x_connector_detect() indicates that we are connected, so
- * we need to delay probing modes in tda998x_encoder_get_modes() after
+ * we need to delay probing modes in tda998x_connector_get_modes() after
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
@@ -630,28 +646,30 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
bool handled = false;
sta = cec_read(priv, REG_CEC_INTSTATUS);
- cec = cec_read(priv, REG_CEC_RXSHPDINT);
- lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
- flag0 = reg_read(priv, REG_INT_FLAGS_0);
- flag1 = reg_read(priv, REG_INT_FLAGS_1);
- flag2 = reg_read(priv, REG_INT_FLAGS_2);
- DRM_DEBUG_DRIVER(
- "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
- sta, cec, lvl, flag0, flag1, flag2);
-
- if (cec & CEC_RXSHPDINT_HPD) {
- if (lvl & CEC_RXSHPDLEV_HPD)
- tda998x_edid_delay_start(priv);
- else
- schedule_work(&priv->detect_work);
-
- handled = true;
- }
+ if (sta & CEC_INTSTATUS_HDMI) {
+ cec = cec_read(priv, REG_CEC_RXSHPDINT);
+ lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
+ flag0 = reg_read(priv, REG_INT_FLAGS_0);
+ flag1 = reg_read(priv, REG_INT_FLAGS_1);
+ flag2 = reg_read(priv, REG_INT_FLAGS_2);
+ DRM_DEBUG_DRIVER(
+ "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
+ sta, cec, lvl, flag0, flag1, flag2);
+
+ if (cec & CEC_RXSHPDINT_HPD) {
+ if (lvl & CEC_RXSHPDLEV_HPD)
+ tda998x_edid_delay_start(priv);
+ else
+ schedule_work(&priv->detect_work);
+
+ handled = true;
+ }
- if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
- priv->wq_edid_wait = 0;
- wake_up(&priv->wq_edid);
- handled = true;
+ if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
+ priv->wq_edid_wait = 0;
+ wake_up(&priv->wq_edid);
+ handled = true;
+ }
}
return IRQ_RETVAL(handled);
@@ -700,6 +718,8 @@ tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
}
+/* Audio support */
+
static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
{
if (on) {
@@ -713,8 +733,7 @@ static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
static int
tda998x_configure_audio(struct tda998x_priv *priv,
- struct tda998x_audio_params *params,
- unsigned mode_clock)
+ struct tda998x_audio_params *params)
{
u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
u32 n;
@@ -771,7 +790,7 @@ tda998x_configure_audio(struct tda998x_priv *priv,
* assume 100MHz requires larger divider.
*/
adiv = AUDIO_DIV_SERCLK_8;
- if (mode_clock > 100000)
+ if (priv->tmds_clock > 100000)
adiv++; /* AUDIO_DIV_SERCLK_16 */
/* S/PDIF asks for a larger divider */
@@ -819,58 +838,281 @@ tda998x_configure_audio(struct tda998x_priv *priv,
return tda998x_write_aif(priv, &params->cea);
}
-/* DRM encoder functions */
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+ int i, ret;
+ struct tda998x_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ memcpy(audio.status, params->iec.status,
+ min(sizeof(audio.status), sizeof(params->iec.status)));
-static void tda998x_encoder_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+ daifmt->bit_clk_master || daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_I2S)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_I2S;
+ break;
+ case HDMI_SPDIF:
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_SPDIF)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_SPDIF;
+ break;
+ default:
+ dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+ return -EINVAL;
+ }
+
+ if (audio.config == 0) {
+ dev_err(dev, "%s: No audio configuration found\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->audio_mutex);
+ if (priv->supports_infoframes && priv->sink_has_audio)
+ ret = tda998x_configure_audio(priv, &audio);
+ else
+ ret = 0;
+
+ if (ret == 0)
+ priv->audio_params = audio;
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- priv->audio_params = p->audio_params;
+ mutex_lock(&priv->audio_mutex);
+
+ reg_write(priv, REG_ENA_AP, 0);
+
+ priv->audio_params.format = AFMT_UNUSED;
+
+ mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- /* we only care about on or off: */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ mutex_lock(&priv->audio_mutex);
- if (mode == priv->dpms)
- return;
+ tda998x_audio_mute(priv, enable);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /* enable video ports, audio will be enabled later */
- reg_write(priv, REG_ENA_VP_0, 0xff);
- reg_write(priv, REG_ENA_VP_1, 0xff);
- reg_write(priv, REG_ENA_VP_2, 0xff);
- /* set muxing after enabling ports: */
- reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
- reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
- reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
- break;
- case DRM_MODE_DPMS_OFF:
- /* disable video ports */
- reg_write(priv, REG_ENA_VP_0, 0x00);
- reg_write(priv, REG_ENA_VP_1, 0x00);
- reg_write(priv, REG_ENA_VP_2, 0x00);
- break;
+ mutex_unlock(&priv->audio_mutex);
+ return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->audio_mutex);
+ memcpy(buf, priv->connector.eld,
+ min(sizeof(priv->connector.eld), len));
+ mutex_unlock(&priv->audio_mutex);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = tda998x_audio_hw_params,
+ .audio_shutdown = tda998x_audio_shutdown,
+ .digital_mute = tda998x_audio_digital_mute,
+ .get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+ struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 2,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+ if (priv->audio_port[i].format == AFMT_I2S &&
+ priv->audio_port[i].config != 0)
+ codec_data.i2s = 1;
+ if (priv->audio_port[i].format == AFMT_SPDIF &&
+ priv->audio_port[i].config != 0)
+ codec_data.spdif = 1;
}
- priv->dpms = mode;
+ priv->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
+/* DRM connector functions */
+
+static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ else
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static int tda998x_connector_fill_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ int ret;
+
+ mutex_lock(&priv->audio_mutex);
+ ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+
+ if (connector->edid_blob_ptr) {
+ struct edid *edid = (void *)connector->edid_blob_ptr->data;
+
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ } else {
+ priv->sink_has_audio = false;
+ }
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static enum drm_connector_status
+tda998x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
+
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void tda998x_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tda998x_connector_funcs = {
+ .dpms = tda998x_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = tda998x_connector_fill_modes,
+ .detect = tda998x_connector_detect,
+ .destroy = tda998x_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
+{
+ struct tda998x_priv *priv = data;
+ u8 offset, segptr;
+ int ret, i;
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(priv, REG_DDC_ADDR, 0xa0);
+ reg_write(priv, REG_DDC_OFFS, offset);
+ reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(priv, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ priv->wq_edid_wait = 1;
+ reg_write(priv, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(priv, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ if (priv->hdmi->irq) {
+ i = wait_event_timeout(priv->wq_edid,
+ !priv->wq_edid_wait,
+ msecs_to_jiffies(100));
+ if (i < 0) {
+ dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
+ return i;
+ }
+ } else {
+ for (i = 100; i > 0; i--) {
+ msleep(1);
+ ret = reg_read(priv, REG_INT_FLAGS_2);
+ if (ret < 0)
+ return ret;
+ if (ret & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ }
+ }
+
+ if (i == 0) {
+ dev_err(&priv->hdmi->dev, "read edid timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
+ if (ret != length) {
+ dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
+ blk, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tda998x_connector_get_modes(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ struct edid *edid;
+ int n;
+
+ /*
+ * If we get killed while waiting for the HPD timeout, return
+ * no modes found: we are not in a restartable path, so we
+ * can't handle signals gracefully.
+ */
+ if (tda998x_edid_delay_wait(priv))
+ return 0;
+
+ if (priv->rev == TDA19988)
+ reg_clear(priv, REG_TX4, TX4_PD_RAM);
+
+ edid = drm_do_get_edid(connector, read_edid_block, priv);
+
+ if (priv->rev == TDA19988)
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
+
+ if (!edid) {
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
+ return 0;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ kfree(edid);
+
+ return n;
}
static int tda998x_connector_mode_valid(struct drm_connector *connector,
@@ -888,6 +1130,80 @@ static int tda998x_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *
+tda998x_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+
+ return &priv->encoder;
+}
+
+static
+const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
+ .get_modes = tda998x_connector_get_modes,
+ .mode_valid = tda998x_connector_mode_valid,
+ .best_encoder = tda998x_connector_best_encoder,
+};
+
+static int tda998x_connector_init(struct tda998x_priv *priv,
+ struct drm_device *drm)
+{
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ connector->interlace_allowed = 1;
+
+ if (priv->hdmi->irq)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ drm_connector_helper_add(connector, &tda998x_connector_helper_funcs);
+ ret = drm_connector_init(drm, connector, &tda998x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ return ret;
+
+ drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+
+ return 0;
+}
+
+/* DRM encoder functions */
+
+static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ bool on;
+
+ /* we only care about on or off: */
+ on = mode == DRM_MODE_DPMS_ON;
+
+ if (on == priv->is_on)
+ return;
+
+ if (on) {
+ /* enable video ports, audio will be enabled later */
+ reg_write(priv, REG_ENA_VP_0, 0xff);
+ reg_write(priv, REG_ENA_VP_1, 0xff);
+ reg_write(priv, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
+
+ priv->is_on = true;
+ } else {
+ /* disable video ports */
+ reg_write(priv, REG_ENA_VP_0, 0x00);
+ reg_write(priv, REG_ENA_VP_1, 0x00);
+ reg_write(priv, REG_ENA_VP_2, 0x00);
+
+ priv->is_on = false;
+ }
+}
+
static void
tda998x_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -971,6 +1287,8 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
div = 3;
}
+ mutex_lock(&priv->audio_mutex);
+
/* mute the audio FIFO: */
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -982,6 +1300,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* no pre-filter or interpolator: */
reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
HVF_CNTRL_0_INTPOL(0));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_PREFILT);
reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
VIP_CNTRL_4_BLC(0));
@@ -1004,6 +1323,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* set color matrix bypass flag: */
reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
MAT_CONTRL_MAT_SC(1));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_CSC);
/* set BIAS tmds value: */
reg_write(priv, REG_ANA_GENERAL, 0x09);
@@ -1064,8 +1384,22 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* must be last register set: */
reg_write(priv, REG_TBG_CNTRL_0, 0);
- /* Only setup the info frames if the sink is HDMI */
- if (priv->is_hdmi_sink) {
+ priv->tmds_clock = adjusted_mode->clock;
+
+ /* CEA-861B section 6 says that:
+ * CEA version 1 (CEA-861) has no support for infoframes.
+ * CEA version 2 (CEA-861A) supports version 1 AVI infoframes,
+ * and optional basic audio.
+ * CEA version 3 (CEA-861B) supports version 1 and 2 AVI infoframes,
+ * and optional digital audio, with audio infoframes.
+ *
+ * Since we only support generation of version 2 AVI infoframes,
+ * ignore CEA version 2 and below (iow, behave as if we're a
+ * CEA-861 source.)
+ */
+ priv->supports_infoframes = priv->connector.display_info.cea_rev >= 3;
+
+ if (priv->supports_infoframes) {
/* We need to turn HDMI HDCP stuff on to get audio through */
reg &= ~TBG_CNTRL_1_DWIN_DIS;
reg_write(priv, REG_TBG_CNTRL_1, reg);
@@ -1074,127 +1408,12 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
tda998x_write_avi(priv, adjusted_mode);
- if (priv->audio_params.format != AFMT_UNUSED) {
- mutex_lock(&priv->audio_mutex);
- tda998x_configure_audio(priv,
- &priv->audio_params,
- adjusted_mode->clock);
- mutex_unlock(&priv->audio_mutex);
- }
- }
-}
-
-static enum drm_connector_status
-tda998x_connector_detect(struct drm_connector *connector, bool force)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
-
- return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
-{
- struct tda998x_priv *priv = data;
- u8 offset, segptr;
- int ret, i;
-
- offset = (blk & 1) ? 128 : 0;
- segptr = blk / 2;
-
- reg_write(priv, REG_DDC_ADDR, 0xa0);
- reg_write(priv, REG_DDC_OFFS, offset);
- reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
- reg_write(priv, REG_DDC_SEGM, segptr);
-
- /* enable reading EDID: */
- priv->wq_edid_wait = 1;
- reg_write(priv, REG_EDID_CTRL, 0x1);
-
- /* flag must be cleared by sw: */
- reg_write(priv, REG_EDID_CTRL, 0x0);
-
- /* wait for block read to complete: */
- if (priv->hdmi->irq) {
- i = wait_event_timeout(priv->wq_edid,
- !priv->wq_edid_wait,
- msecs_to_jiffies(100));
- if (i < 0) {
- dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
- return i;
- }
- } else {
- for (i = 100; i > 0; i--) {
- msleep(1);
- ret = reg_read(priv, REG_INT_FLAGS_2);
- if (ret < 0)
- return ret;
- if (ret & INT_FLAGS_2_EDID_BLK_RD)
- break;
- }
- }
-
- if (i == 0) {
- dev_err(&priv->hdmi->dev, "read edid timeout\n");
- return -ETIMEDOUT;
+ if (priv->audio_params.format != AFMT_UNUSED &&
+ priv->sink_has_audio)
+ tda998x_configure_audio(priv, &priv->audio_params);
}
- ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
- if (ret != length) {
- dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
- blk, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int tda998x_connector_get_modes(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- struct edid *edid;
- int n;
-
- /*
- * If we get killed while waiting for the HPD timeout, return
- * no modes found: we are not in a restartable path, so we
- * can't handle signals gracefully.
- */
- if (tda998x_edid_delay_wait(priv))
- return 0;
-
- if (priv->rev == TDA19988)
- reg_clear(priv, REG_TX4, TX4_PD_RAM);
-
- edid = drm_do_get_edid(connector, read_edid_block, priv);
-
- if (priv->rev == TDA19988)
- reg_set(priv, REG_TX4, TX4_PD_RAM);
-
- if (!edid) {
- dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
- return 0;
- }
-
- drm_mode_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
- priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
- drm_edid_to_eld(connector, edid);
-
- kfree(edid);
-
- return n;
-}
-
-static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
- struct drm_connector *connector)
-{
- if (priv->hdmi->irq)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ mutex_unlock(&priv->audio_mutex);
}
static void tda998x_destroy(struct tda998x_priv *priv)
@@ -1215,145 +1434,6 @@ static void tda998x_destroy(struct tda998x_priv *priv)
i2c_unregister_device(priv->cec);
}
-static int tda998x_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- int i, ret;
- struct tda998x_audio_params audio = {
- .sample_width = params->sample_width,
- .sample_rate = params->sample_rate,
- .cea = params->cea,
- };
-
- if (!priv->encoder.crtc)
- return -ENODEV;
-
- memcpy(audio.status, params->iec.status,
- min(sizeof(audio.status), sizeof(params->iec.status)));
-
- switch (daifmt->fmt) {
- case HDMI_I2S:
- if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
- daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
- return -EINVAL;
- }
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_I2S)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_I2S;
- break;
- case HDMI_SPDIF:
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_SPDIF)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_SPDIF;
- break;
- default:
- dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
- return -EINVAL;
- }
-
- if (audio.config == 0) {
- dev_err(dev, "%s: No audio configutation found\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&priv->audio_mutex);
- ret = tda998x_configure_audio(priv,
- &audio,
- priv->encoder.crtc->hwmode.clock);
-
- if (ret == 0)
- priv->audio_params = audio;
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
-static void tda998x_audio_shutdown(struct device *dev, void *data)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- reg_write(priv, REG_ENA_AP, 0);
-
- priv->audio_params.format = AFMT_UNUSED;
-
- mutex_unlock(&priv->audio_mutex);
-}
-
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- tda998x_audio_mute(priv, enable);
-
- mutex_unlock(&priv->audio_mutex);
- return 0;
-}
-
-static int tda998x_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- struct drm_mode_config *config = &priv->encoder.dev->mode_config;
- struct drm_connector *connector;
- int ret = -ENODEV;
-
- mutex_lock(&config->mutex);
- list_for_each_entry(connector, &config->connector_list, head) {
- if (&priv->encoder == connector->encoder) {
- memcpy(buf, connector->eld,
- min(sizeof(connector->eld), len));
- ret = 0;
- }
- }
- mutex_unlock(&config->mutex);
-
- return ret;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
- .hw_params = tda998x_audio_hw_params,
- .audio_shutdown = tda998x_audio_shutdown,
- .digital_mute = tda998x_audio_digital_mute,
- .get_eld = tda998x_audio_get_eld,
-};
-
-static int tda998x_audio_codec_init(struct tda998x_priv *priv,
- struct device *dev)
-{
- struct hdmi_codec_pdata codec_data = {
- .ops = &audio_codec_ops,
- .max_i2s_channels = 2,
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
- if (priv->audio_port[i].format == AFMT_I2S &&
- priv->audio_port[i].config != 0)
- codec_data.i2s = 1;
- if (priv->audio_port[i].format == AFMT_SPDIF &&
- priv->audio_port[i].config != 0)
- codec_data.spdif = 1;
- }
-
- priv->audio_pdev = platform_device_register_data(
- dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(priv->audio_pdev);
-}
-
/* I2C driver functions */
static int tda998x_get_audio_ports(struct tda998x_priv *priv,
@@ -1403,22 +1483,21 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
- unsigned short cec_addr;
+
+ mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ priv->cec_addr = 0x34 + (client->addr & 0x03);
priv->current_page = 0xff;
priv->hdmi = client;
- /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
- cec_addr = 0x34 + (client->addr & 0x03);
- priv->cec = i2c_new_dummy(client->adapter, cec_addr);
+ priv->cec = i2c_new_dummy(client->adapter, priv->cec_addr);
if (!priv->cec)
return -ENODEV;
- priv->dpms = DRM_MODE_DPMS_OFF;
-
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
@@ -1478,7 +1557,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* initialize the optional IRQ */
if (client->irq) {
- int irqf_trigger;
+ unsigned long irq_flags;
/* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
@@ -1488,11 +1567,11 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
reg_read(priv, REG_INT_FLAGS_1);
reg_read(priv, REG_INT_FLAGS_2);
- irqf_trigger =
+ irq_flags =
irqd_get_trigger_type(irq_get_irq_data(client->irq));
+ irq_flags |= IRQF_SHARED | IRQF_ONESHOT;
ret = request_threaded_irq(client->irq, NULL,
- tda998x_irq_thread,
- irqf_trigger | IRQF_ONESHOT,
+ tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
dev_err(&client->dev,
@@ -1519,8 +1598,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->vip_cntrl_2 = video;
}
- mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
-
ret = tda998x_get_audio_ports(priv, np);
if (ret)
goto fail;
@@ -1567,45 +1644,25 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static struct drm_encoder *
-tda998x_connector_best_encoder(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- return &priv->encoder;
-}
-
-static
-const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
- .get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
- .best_encoder = tda998x_connector_best_encoder,
-};
-
-static void tda998x_connector_destroy(struct drm_connector *connector)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
- return drm_atomic_helper_connector_dpms(connector, mode);
- else
- return drm_helper_connector_dpms(connector, mode);
+ priv->audio_params = p->audio_params;
}
-static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = tda998x_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = tda998x_connector_detect,
- .destroy = tda998x_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int tda998x_bind(struct device *dev, struct device *master, void *data)
{
struct tda998x_encoder_params *params = dev->platform_data;
@@ -1630,7 +1687,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
crtcs = 1 << 0;
}
- priv->connector.interlace_allowed = 1;
priv->encoder.possible_crtcs = crtcs;
ret = tda998x_create(client, priv);
@@ -1638,9 +1694,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
return ret;
if (!dev->of_node && params)
- tda998x_encoder_set_config(priv, params);
-
- tda998x_encoder_set_polling(priv, &priv->connector);
+ tda998x_set_config(priv, params);
drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
@@ -1648,24 +1702,12 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_encoder;
- drm_connector_helper_add(&priv->connector,
- &tda998x_connector_helper_funcs);
- ret = drm_connector_init(drm, &priv->connector,
- &tda998x_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = tda998x_connector_init(priv, drm);
if (ret)
goto err_connector;
- ret = drm_connector_register(&priv->connector);
- if (ret)
- goto err_sysfs;
-
- drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
-
return 0;
-err_sysfs:
- drm_connector_cleanup(&priv->connector);
err_connector:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
@@ -1678,7 +1720,6 @@ static void tda998x_unbind(struct device *dev, struct device *master,
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_unregister(&priv->connector);
drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
tda998x_destroy(priv);
@@ -1692,6 +1733,10 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_warn(&client->dev, "adapter does not support I2C\n");
+ return -EIO;
+ }
return component_add(&client->dev, &tda998x_ops);
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index d91856779beb..ab4e6cbe1f8b 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,9 +113,7 @@ static const struct file_operations i810_buffer_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 0be55dc1ef4b..02504a7cfaf2 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations i810_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 7769e469118f..183f5dc1c3f2 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,7 @@ config DRM_I915
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
+ select RELAY
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
@@ -24,28 +25,59 @@ config DRM_I915
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
- If M is selected, the module will be called i915. AGP support
- is required for this driver to work. This driver is used by
- the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
- replaces the older i830 module that supported a subset of the
- hardware in older X.org releases.
+
+ This driver is used by the Intel driver in X.org 6.8 and
+ XFree86 4.4 and above. It replaces the older i830 module that
+ supported a subset of the hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
-config DRM_I915_PRELIMINARY_HW_SUPPORT
- bool "Enable preliminary support for prerelease Intel hardware by default"
+ If "M" is selected, the module will be called i915.
+
+config DRM_I915_ALPHA_SUPPORT
+ bool "Enable alpha quality support for new Intel hardware by default"
depends on DRM_I915
default n
help
- Choose this option if you have prerelease Intel hardware and want the
- i915 driver to support it by default. You can enable such support at
- runtime with the module option i915.preliminary_hw_support=1; this
- option changes the default for that module option.
+ Choose this option if you have new Intel hardware and want to enable
+ the alpha quality i915 driver support for the hardware in this kernel
+ version. You can also enable the support at runtime using the module
+ parameter i915.alpha_support=1; this option changes the default for
+ that module parameter.
+
+ It is recommended to upgrade to a kernel version with proper support
+ as soon as it is available. Generally fixes for platforms with alpha
+ support are not backported to older kernels.
If in doubt, say "N".
+config DRM_I915_CAPTURE_ERROR
+ bool "Enable capturing GPU state following a hang"
+ depends on DRM_I915
+ default y
+ help
+ This option enables capturing the GPU state when a hang is detected.
+ This information is vital for triaging hangs and assists in debugging.
+ Please report any hang to
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ for triaging.
+
+ If in doubt, say "Y".
+
+config DRM_I915_COMPRESS_ERROR
+ bool "Compress GPU error state"
+ depends on DRM_I915_CAPTURE_ERROR
+ select ZLIB_DEFLATE
+ default y
+ help
+ This option selects ZLIB_DEFLATE if it isn't already
+ selected and causes any error state captured upon a GPU hang
+ to be compressed using zlib.
+
+ If in doubt, say "Y".
+
config DRM_I915_USERPTR
bool "Always enable userptr support"
depends on DRM_I915
@@ -60,6 +92,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on 64BIT
default n
help
Choose this option if you want to enable Intel GVT-g graphics
@@ -79,6 +112,16 @@ config DRM_I915_GVT
If in doubt, say "N".
+config DRM_I915_GVT_KVMGT
+ tristate "Enable KVM/VFIO support for Intel GVT-g"
+ depends on DRM_I915_GVT
+ depends on KVM
+ depends on VFIO_MDEV && VFIO_MDEV_DEVICE
+ default n
+ help
+ Choose this option if you want to enable KVMGT support for
+ Intel GVT-g.
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index cee87bfd10c4..51ba630a134b 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,6 +21,7 @@ config DRM_I915_DEBUG
select PREEMPT_COUNT
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
default n
help
Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a998c2bce70a..3dea46af9fe6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,19 +33,22 @@ i915-y += i915_cmd_parser.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
- i915_gem_fence.o \
+ i915_gem_fence_reg.o \
i915_gem_gtt.o \
+ i915_gem_internal.o \
i915_gem.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_timeline.o \
i915_gem_userptr.o \
- i915_gpu_error.o \
i915_trace_points.o \
+ i915_vma.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
+ intel_hangcheck.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
@@ -102,11 +105,15 @@ i915-y += dvo_ch7017.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
+ intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
+# Post-mortem debug and GPU hang state capture
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index d0f21a6ad60d..b123c20e2097 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,5 +1,8 @@
GVT_DIR := gvt
-GVT_SOURCE := gvt.o
+GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
+ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
+ execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
-i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
new file mode 100644
index 000000000000..0d41ebc4aea6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Pei Zhang <pei.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 alloc_flag, search_flag;
+ u64 start, end, size;
+ struct drm_mm_node *node;
+ int retried = 0;
+ int ret;
+
+ if (high_gm) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ node = &vgpu->gm.high_gm_node;
+ size = vgpu_hidden_sz(vgpu);
+ start = gvt_hidden_gmadr_base(gvt);
+ end = gvt_hidden_gmadr_end(gvt);
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ node = &vgpu->gm.low_gm_node;
+ size = vgpu_aperture_sz(vgpu);
+ start = gvt_aperture_gmadr_base(gvt);
+ end = gvt_aperture_gmadr_end(gvt);
+ }
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+search_again:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+ node, size, 4096, 0,
+ start, end, search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(&dev_priv->ggtt.base,
+ size, 4096, 0, start, end, 0);
+ if (ret == 0 && ++retried < 3)
+ goto search_again;
+
+ gvt_err("fail to alloc %s gm space from host, retried %d\n",
+ high_gm ? "high" : "low", retried);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ ret = alloc_gm(vgpu, false);
+ if (ret)
+ return ret;
+
+ ret = alloc_gm(vgpu, true);
+ if (ret)
+ goto out_free_aperture;
+
+ gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
+ vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
+
+ gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
+ vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
+
+ return 0;
+out_free_aperture:
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static void free_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ drm_mm_remove_node(&vgpu->gm.high_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+/**
+ * intel_vgpu_write_fence - write fence registers owned by a vGPU
+ * @vgpu: vGPU instance
+ * @fence: vGPU fence register number
+ * @value: Fence register value to be written
+ *
+ * This function is used to write fence registers owned by a vGPU. The vGPU
+ * fence register number will be translated into HW fence register number.
+ *
+ */
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ i915_reg_t fence_reg_lo, fence_reg_hi;
+
+ assert_rpm_wakelock_held(dev_priv);
+
+ if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ return;
+
+ reg = vgpu->fence.regs[fence];
+ if (WARN_ON(!reg))
+ return;
+
+ fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
+ fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
+
+ I915_WRITE(fence_reg_lo, 0);
+ POSTING_READ(fence_reg_lo);
+
+ I915_WRITE(fence_reg_hi, upper_32_bits(value));
+ I915_WRITE(fence_reg_lo, lower_32_bits(value));
+ POSTING_READ(fence_reg_lo);
+}
+
+static void free_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ u32 i;
+
+ if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ return;
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ intel_vgpu_write_fence(vgpu, i, 0);
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ int i;
+ struct list_head *pos, *q;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* Request fences from host */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i = 0;
+ list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
+ reg = list_entry(pos, struct drm_i915_fence_reg, link);
+ if (reg->pin_count || reg->vma)
+ continue;
+ list_del(pos);
+ vgpu->fence.regs[i] = reg;
+ intel_vgpu_write_fence(vgpu, i, 0);
+ if (++i == vgpu_fence_sz(vgpu))
+ break;
+ }
+ if (i != vgpu_fence_sz(vgpu))
+ goto out_free_fence;
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+out_free_fence:
+ /* Return fences to host, if fail */
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ if (!reg)
+ continue;
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return -ENOSPC;
+}
+
+static void free_resource(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
+ gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
+ gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
+}
+
+static int alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ unsigned long request, avail, max, taken;
+ const char *item;
+
+ if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ gvt_err("Invalid vGPU creation params\n");
+ return -EINVAL;
+ }
+
+ item = "low GM space";
+ max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_low_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->low_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_aperture_sz(vgpu) = request;
+
+ item = "high GM space";
+ max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_high_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->high_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_hidden_sz(vgpu) = request;
+
+ item = "fence";
+ max = gvt_fence_sz(gvt) - HOST_FENCE;
+ taken = gvt->fence.vgpu_allocated_fence_num;
+ avail = max - taken;
+ request = param->fence_sz;
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_fence_sz(vgpu) = request;
+
+ gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
+ gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
+ gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ return 0;
+
+no_enough_resource:
+ gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
+ gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
+ vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+ BYTES_TO_MB(max), BYTES_TO_MB(taken));
+ return -ENOSPC;
+}
+
+/**
+ * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to free the HW resource owned by a vGPU.
+ *
+ */
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
+{
+ free_vgpu_gm(vgpu);
+ free_vgpu_fence(vgpu);
+ free_resource(vgpu);
+}
+
+/**
+ * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * @vgpu: vGPU
+ * @param: vGPU creation params
+ *
+ * This function is used to allocate HW resource for a vGPU. User specifies
+ * the resource configuration through the creation params.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ int ret;
+
+ ret = alloc_resource(vgpu, param);
+ if (ret)
+ return ret;
+
+ ret = alloc_vgpu_gm(vgpu);
+ if (ret)
+ goto out_free_resource;
+
+ ret = alloc_vgpu_fence(vgpu);
+ if (ret)
+ goto out_free_vgpu_gm;
+
+ return 0;
+
+out_free_vgpu_gm:
+ free_vgpu_gm(vgpu);
+out_free_resource:
+ free_resource(vgpu);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
new file mode 100644
index 000000000000..db516382a4d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+enum {
+ INTEL_GVT_PCI_BAR_GTTMMIO = 0,
+ INTEL_GVT_PCI_BAR_APERTURE,
+ INTEL_GVT_PCI_BAR_PIO,
+ INTEL_GVT_PCI_BAR_MAX,
+};
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
+ return 0;
+}
+
+static int map_aperture(struct intel_vgpu *vgpu, bool map)
+{
+ u64 first_gfn, first_mfn;
+ u64 val;
+ int ret;
+
+ if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+ else
+ val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+
+ first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
+ first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
+ first_mfn,
+ vgpu_aperture_sz(vgpu) >>
+ PAGE_SHIFT, map);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
+ return 0;
+}
+
+static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
+{
+ u64 start, end;
+ u64 val;
+ int ret;
+
+ if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+ else
+ start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+
+ start &= ~GENMASK(3, 0);
+ end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
+
+ ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
+ return 0;
+}
+
+static int emulate_pci_command_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u8 old = vgpu_cfg_space(vgpu)[offset];
+ u8 new = *(u8 *)p_data;
+ u8 changed = old ^ new;
+ int ret;
+
+ if (!(changed & PCI_COMMAND_MEMORY))
+ return 0;
+
+ if (old & PCI_COMMAND_MEMORY) {
+ ret = trap_gttmmio(vgpu, false);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, false);
+ if (ret)
+ return ret;
+ } else {
+ ret = trap_gttmmio(vgpu, true);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, true);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ return 0;
+}
+
+static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int bar_index =
+ (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
+ u32 new = *(u32 *)(p_data);
+ bool lo = IS_ALIGNED(offset, 8);
+ u64 size;
+ int ret = 0;
+ bool mmio_enabled =
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+
+ if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
+ return -EINVAL;
+
+ if (new == 0xffffffff) {
+ /*
+ * Power-up software can determine how much address
+ * space the device requires by writing a value of
+ * all 1's to the register and then reading the value
+ * back. The device will return 0's in all don't-care
+ * address bits.
+ */
+ size = vgpu->cfg_space.bar[bar_index].size;
+ if (lo) {
+ new = rounddown(new, size);
+ } else {
+ u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
+ /* for 32bit mode bar it returns all-0 in upper 32
+ * bit, for 64bit mode bar it will calculate the
+ * size with lower 32bit and return the corresponding
+ * value
+ */
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ new &= (~(size-1)) >> 32;
+ else
+ new = 0;
+ }
+ /*
+ * Unmapp & untrap the BAR, since guest hasn't configured a
+ * valid GPA
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ } else {
+ /*
+ * Unmapp & untrap the old BAR first, since guest has
+ * re-configured the BAR
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ /* Track the new BAR */
+ if (mmio_enabled) {
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, true);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, true);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int ret;
+
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ /* First check if it's PCI_COMMAND */
+ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
+ if (WARN_ON(bytes > 2))
+ return -EINVAL;
+ return emulate_pci_command_write(vgpu, offset, p_data, bytes);
+ }
+
+ switch (rounddown(offset, 4)) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+
+ case INTEL_GVT_PCI_SWSCI:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+ break;
+
+ case INTEL_GVT_PCI_OPREGION:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ default:
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
new file mode 100644
index 000000000000..d26a092c70e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -0,0 +1,2831 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+#define INVALID_OP (~0U)
+
+#define OP_LEN_MI 9
+#define OP_LEN_2D 10
+#define OP_LEN_3D_MEDIA 16
+#define OP_LEN_MFX_VC 16
+#define OP_LEN_VEBOX 16
+
+#define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
+
+struct sub_op_bits {
+ int hi;
+ int low;
+};
+struct decode_info {
+ char *name;
+ int op_len;
+ int nr_sub_op;
+ struct sub_op_bits *sub_op;
+};
+
+#define MAX_CMD_BUDGET 0x7fffffff
+#define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
+#define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
+#define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
+
+#define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
+#define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
+#define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
+
+/* Render Command Map */
+
+/* MI_* command Opcode (28:23) */
+#define OP_MI_NOOP 0x0
+#define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
+#define OP_MI_USER_INTERRUPT 0x2
+#define OP_MI_WAIT_FOR_EVENT 0x3
+#define OP_MI_FLUSH 0x4
+#define OP_MI_ARB_CHECK 0x5
+#define OP_MI_RS_CONTROL 0x6 /* HSW+ */
+#define OP_MI_REPORT_HEAD 0x7
+#define OP_MI_ARB_ON_OFF 0x8
+#define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
+#define OP_MI_BATCH_BUFFER_END 0xA
+#define OP_MI_SUSPEND_FLUSH 0xB
+#define OP_MI_PREDICATE 0xC /* IVB+ */
+#define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
+#define OP_MI_SET_APPID 0xE /* IVB+ */
+#define OP_MI_RS_CONTEXT 0xF /* HSW+ */
+#define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
+#define OP_MI_DISPLAY_FLIP 0x14
+#define OP_MI_SEMAPHORE_MBOX 0x16
+#define OP_MI_SET_CONTEXT 0x18
+#define OP_MI_MATH 0x1A
+#define OP_MI_URB_CLEAR 0x19
+#define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
+#define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
+
+#define OP_MI_STORE_DATA_IMM 0x20
+#define OP_MI_STORE_DATA_INDEX 0x21
+#define OP_MI_LOAD_REGISTER_IMM 0x22
+#define OP_MI_UPDATE_GTT 0x23
+#define OP_MI_STORE_REGISTER_MEM 0x24
+#define OP_MI_FLUSH_DW 0x26
+#define OP_MI_CLFLUSH 0x27
+#define OP_MI_REPORT_PERF_COUNT 0x28
+#define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
+#define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
+#define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
+#define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
+#define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
+#define OP_MI_2E 0x2E /* BDW+ */
+#define OP_MI_2F 0x2F /* BDW+ */
+#define OP_MI_BATCH_BUFFER_START 0x31
+
+/* Bit definition for dword 0 */
+#define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
+
+#define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+#define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
+#define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
+
+/* 2D command: Opcode (28:22) */
+#define OP_2D(x) ((2<<7) | x)
+
+#define OP_XY_SETUP_BLT OP_2D(0x1)
+#define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
+#define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
+#define OP_XY_PIXEL_BLT OP_2D(0x24)
+#define OP_XY_SCANLINES_BLT OP_2D(0x25)
+#define OP_XY_TEXT_BLT OP_2D(0x26)
+#define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
+#define OP_XY_COLOR_BLT OP_2D(0x50)
+#define OP_XY_PAT_BLT OP_2D(0x51)
+#define OP_XY_MONO_PAT_BLT OP_2D(0x52)
+#define OP_XY_SRC_COPY_BLT OP_2D(0x53)
+#define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
+#define OP_XY_FULL_BLT OP_2D(0x55)
+#define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
+#define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
+#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
+#define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
+#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
+#define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
+#define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
+#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
+#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
+#define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
+#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
+
+/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
+#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
+ ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
+
+#define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
+
+#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
+#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
+#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+
+#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
+
+#define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
+
+#define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
+#define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
+#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
+#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
+#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+
+#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
+#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
+#define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
+#define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
+
+#define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
+#define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
+#define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
+#define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
+#define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
+#define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
+#define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
+#define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
+#define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
+#define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
+#define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
+#define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
+#define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
+#define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
+#define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
+#define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
+#define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
+#define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
+#define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
+#define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
+#define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
+#define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
+#define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
+#define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
+#define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
+#define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
+#define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
+#define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
+#define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
+#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
+#define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
+#define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
+#define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
+#define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
+#define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
+
+#define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
+#define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
+#define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
+#define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
+#define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
+#define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
+#define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
+#define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
+#define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
+#define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
+#define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
+
+#define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
+#define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
+#define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
+#define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
+#define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
+#define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
+#define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
+#define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
+#define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
+#define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
+#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
+#define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
+#define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
+#define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
+#define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
+#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
+#define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
+#define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
+#define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
+#define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
+
+/* VCCP Command Parser */
+
+/*
+ * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
+ * git://anongit.freedesktop.org/vaapi/intel-driver
+ * src/i965_defines.h
+ *
+ */
+
+#define OP_MFX(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
+#define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
+#define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
+#define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
+#define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
+#define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
+#define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
+#define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
+#define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
+#define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
+#define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
+
+#define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
+
+#define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
+#define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
+#define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
+#define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
+#define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
+#define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
+#define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
+#define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
+#define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
+#define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
+#define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
+#define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
+
+#define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
+#define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
+#define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
+#define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
+#define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
+
+#define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
+#define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
+#define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
+#define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
+#define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
+
+#define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
+#define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
+#define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
+
+#define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
+#define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
+#define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
+
+#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
+#define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
+#define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
+
+struct parser_exec_state;
+
+typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
+
+#define GVT_CMD_HASH_BITS 7
+
+/* which DWords need address fix */
+#define ADDR_FIX_1(x1) (1 << (x1))
+#define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
+#define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
+#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
+#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+
+struct cmd_info {
+ char *name;
+ u32 opcode;
+
+#define F_LEN_MASK (1U<<0)
+#define F_LEN_CONST 1U
+#define F_LEN_VAR 0U
+
+/*
+ * command has its own ip advance logic
+ * e.g. MI_BATCH_START, MI_BATCH_END
+ */
+#define F_IP_ADVANCE_CUSTOM (1<<1)
+
+#define F_POST_HANDLE (1<<2)
+ u32 flag;
+
+#define R_RCS (1 << RCS)
+#define R_VCS1 (1 << VCS)
+#define R_VCS2 (1 << VCS2)
+#define R_VCS (R_VCS1 | R_VCS2)
+#define R_BCS (1 << BCS)
+#define R_VECS (1 << VECS)
+#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
+ /* rings that support this cmd: BLT/RCS/VCS/VECS */
+ uint16_t rings;
+
+ /* devices that support this cmd: SNB/IVB/HSW/... */
+ uint16_t devices;
+
+ /* which DWords are address that need fix up.
+ * bit 0 means a 32-bit non address operand in command
+ * bit 1 means address operand, which could be 32-bit
+ * or 64-bit depending on different architectures.(
+ * defined by "gmadr_bytes_in_cmd" in intel_gvt.
+ * No matter the address length, each address only takes
+ * one bit in the bitmap.
+ */
+ uint16_t addr_bitmap;
+
+ /* flag == F_LEN_CONST : command length
+ * flag == F_LEN_VAR : length bias bits
+ * Note: length is in DWord
+ */
+ uint8_t len;
+
+ parser_cmd_handler handler;
+};
+
+struct cmd_entry {
+ struct hlist_node hlist;
+ struct cmd_info *info;
+};
+
+enum {
+ RING_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_2ND_LEVEL,
+};
+
+enum {
+ GTT_BUFFER,
+ PPGTT_BUFFER
+};
+
+struct parser_exec_state {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+
+ int buf_type;
+
+ /* batch buffer address type */
+ int buf_addr_type;
+
+ /* graphics memory address of ring buffer start */
+ unsigned long ring_start;
+ unsigned long ring_size;
+ unsigned long ring_head;
+ unsigned long ring_tail;
+
+ /* instruction graphics memory address */
+ unsigned long ip_gma;
+
+ /* mapped va of the instr_gma */
+ void *ip_va;
+ void *rb_va;
+
+ void *ret_bb_va;
+ /* next instruction when return from batch buffer to ring buffer */
+ unsigned long ret_ip_gma_ring;
+
+ /* next instruction when return from 2nd batch buffer to batch buffer */
+ unsigned long ret_ip_gma_bb;
+
+ /* batch buffer address type (GTT or PPGTT)
+ * used when ret from 2nd level batch buffer
+ */
+ int saved_buf_addr_type;
+
+ struct cmd_info *info;
+
+ struct intel_vgpu_workload *workload;
+};
+
+#define gmadr_dw_number(s) \
+ (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
+
+static unsigned long bypass_scan_mask = 0;
+static bool bypass_batch_buffer_scan = true;
+
+/* ring ALL, type = 0 */
+static struct sub_op_bits sub_op_mi[] = {
+ {31, 29},
+ {28, 23},
+};
+
+static struct decode_info decode_info_mi = {
+ "MI",
+ OP_LEN_MI,
+ ARRAY_SIZE(sub_op_mi),
+ sub_op_mi,
+};
+
+/* ring RCS, command type 2 */
+static struct sub_op_bits sub_op_2d[] = {
+ {31, 29},
+ {28, 22},
+};
+
+static struct decode_info decode_info_2d = {
+ "2D",
+ OP_LEN_2D,
+ ARRAY_SIZE(sub_op_2d),
+ sub_op_2d,
+};
+
+/* ring RCS, command type 3 */
+static struct sub_op_bits sub_op_3d_media[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 16},
+};
+
+static struct decode_info decode_info_3d_media = {
+ "3D_Media",
+ OP_LEN_3D_MEDIA,
+ ARRAY_SIZE(sub_op_3d_media),
+ sub_op_3d_media,
+};
+
+/* ring VCS, command type 3 */
+static struct sub_op_bits sub_op_mfx_vc[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_mfx_vc = {
+ "MFX_VC",
+ OP_LEN_MFX_VC,
+ ARRAY_SIZE(sub_op_mfx_vc),
+ sub_op_mfx_vc,
+};
+
+/* ring VECS, command type 3 */
+static struct sub_op_bits sub_op_vebox[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_vebox = {
+ "VEBOX",
+ OP_LEN_VEBOX,
+ ARRAY_SIZE(sub_op_vebox),
+ sub_op_vebox,
+};
+
+static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+ [RCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_3d_media,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [BCS] = {
+ &decode_info_mi,
+ NULL,
+ &decode_info_2d,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VECS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_vebox,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS2] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+};
+
+static inline u32 get_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return INVALID_OP;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return INVALID_OP;
+
+ return cmd >> (32 - d_info->op_len);
+}
+
+static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+ unsigned int opcode, int ring_id)
+{
+ struct cmd_entry *e;
+
+ hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
+ if ((opcode == e->info->opcode) &&
+ (e->info->rings & (1 << ring_id)))
+ return e->info;
+ }
+ return NULL;
+}
+
+static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+ u32 cmd, int ring_id)
+{
+ u32 opcode;
+
+ opcode = get_opcode(cmd, ring_id);
+ if (opcode == INVALID_OP)
+ return NULL;
+
+ return find_cmd_entry(gvt, opcode, ring_id);
+}
+
+static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
+{
+ return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
+}
+
+static inline void print_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+ int i;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return;
+
+ gvt_err("opcode=0x%x %s sub_ops:",
+ cmd >> (32 - d_info->op_len), d_info->name);
+
+ for (i = 0; i < d_info->nr_sub_op; i++)
+ pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
+ d_info->sub_op[i].low));
+
+ pr_err("\n");
+}
+
+static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
+{
+ return s->ip_va + (index << 2);
+}
+
+static inline u32 cmd_val(struct parser_exec_state *s, int index)
+{
+ return *cmd_ptr(s, index);
+}
+
+static void parser_exec_state_dump(struct parser_exec_state *s)
+{
+ int cnt = 0;
+ int i;
+
+ gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
+ s->ring_id, s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
+
+ gvt_err(" %s %s ip_gma(%08lx) ",
+ s->buf_type == RING_BUFFER_INSTRUCTION ?
+ "RING_BUFFER" : "BATCH_BUFFER",
+ s->buf_addr_type == GTT_BUFFER ?
+ "GTT" : "PPGTT", s->ip_gma);
+
+ if (s->ip_va == NULL) {
+ gvt_err(" ip_va(NULL)");
+ return;
+ }
+
+ gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ print_opcode(cmd_val(s, 0), s->ring_id);
+
+ /* print the whole page to trace */
+ pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
+
+ while (cnt < 1024) {
+ pr_err("ip_va=%p: ", s->ip_va);
+ for (i = 0; i < 8; i++)
+ pr_err("%08x ", cmd_val(s, i));
+ pr_err("\n");
+
+ s->ip_va += 8 * sizeof(u32);
+ cnt += 8;
+ }
+}
+
+static inline void update_ip_va(struct parser_exec_state *s)
+{
+ unsigned long len = 0;
+
+ if (WARN_ON(s->ring_head == s->ring_tail))
+ return;
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ unsigned long ring_top = s->ring_start + s->ring_size;
+
+ if (s->ring_head > s->ring_tail) {
+ if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
+ len = (s->ip_gma - s->ring_head);
+ else if (s->ip_gma >= s->ring_start &&
+ s->ip_gma <= s->ring_tail)
+ len = (ring_top - s->ring_head) +
+ (s->ip_gma - s->ring_start);
+ } else
+ len = (s->ip_gma - s->ring_head);
+
+ s->ip_va = s->rb_va + len;
+ } else {/* shadow batch buffer */
+ s->ip_va = s->ret_bb_va;
+ }
+}
+
+static inline int ip_gma_set(struct parser_exec_state *s,
+ unsigned long ip_gma)
+{
+ WARN_ON(!IS_ALIGNED(ip_gma, 4));
+
+ s->ip_gma = ip_gma;
+ update_ip_va(s);
+ return 0;
+}
+
+static inline int ip_gma_advance(struct parser_exec_state *s,
+ unsigned int dw_len)
+{
+ s->ip_gma += (dw_len << 2);
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->ip_gma >= s->ring_start + s->ring_size)
+ s->ip_gma -= s->ring_size;
+ update_ip_va(s);
+ } else {
+ s->ip_va += (dw_len << 2);
+ }
+
+ return 0;
+}
+
+static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+{
+ if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
+ return info->len;
+ else
+ return (cmd & ((1U << info->len) - 1)) + 2;
+ return 0;
+}
+
+static inline int cmd_length(struct parser_exec_state *s)
+{
+ return get_cmd_length(s->info, cmd_val(s, 0));
+}
+
+/* do not remove this, some platform may need clflush here */
+#define patch_value(s, addr, val) do { \
+ *addr = val; \
+} while (0)
+
+static bool is_shadowed_mmio(unsigned int offset)
+{
+ bool ret = false;
+
+ if ((offset == 0x2168) || /*BB current head register UDW */
+ (offset == 0x2140) || /*BB current header register */
+ (offset == 0x211c) || /*second BB header register UDW */
+ (offset == 0x2114)) { /*second BB header register UDW */
+ ret = true;
+ }
+ return ret;
+}
+
+static int cmd_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index, char *cmd)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ if (offset + 4 > gvt->device_info.mmio_size) {
+ gvt_err("%s access to (%x) outside of MMIO range\n",
+ cmd, offset);
+ return -EINVAL;
+ }
+
+ if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+ gvt_err("vgpu%d: %s access to non-render register (%x)\n",
+ s->vgpu->id, cmd, offset);
+ return 0;
+ }
+
+ if (is_shadowed_mmio(offset)) {
+ gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
+ s->vgpu->id, offset);
+ return 0;
+ }
+
+ if (offset == i915_mmio_reg_offset(DERRMR) ||
+ offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
+ /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
+ patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
+ }
+
+ /* TODO: Update the global mask if this MMIO is a masked-MMIO */
+ intel_gvt_mmio_set_cmd_accessed(gvt, offset);
+ return 0;
+}
+
+#define cmd_reg(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 2))
+
+#define cmd_reg_inhibit(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 18))
+
+#define cmd_gma(s, i) \
+ (cmd_val(s, i) & GENMASK(31, 2))
+
+#define cmd_gma_hi(s, i) \
+ (cmd_val(s, i) & GENMASK(15, 0))
+
+static int cmd_handler_lri(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(gvt->dev_priv) &&
+ (s->ring_id != RCS)) {
+ if (s->ring_id == BCS &&
+ cmd_reg(s, i) ==
+ i915_mmio_reg_offset(DERRMR))
+ ret |= 0;
+ else
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ }
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+ }
+ return ret;
+}
+
+static int cmd_handler_lrr(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ ret |= ((cmd_reg_inhibit(s, i) ||
+ (cmd_reg_inhibit(s, i + 1)))) ?
+ -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+ ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+ }
+ return ret;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode);
+
+static int cmd_handler_lrm(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+ int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ if (IS_BROADWELL(gvt->dev_priv))
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+static int cmd_handler_srm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+struct cmd_interrupt_event {
+ int pipe_control_notify;
+ int mi_flush_dw;
+ int mi_user_interrupt;
+};
+
+static struct cmd_interrupt_event cmd_interrupt_events[] = {
+ [RCS] = {
+ .pipe_control_notify = RCS_PIPE_CONTROL,
+ .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
+ .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
+ },
+ [BCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = BCS_MI_FLUSH_DW,
+ .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
+ },
+ [VCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
+ },
+ [VCS2] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS2_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
+ },
+ [VECS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VECS_MI_FLUSH_DW,
+ .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
+ },
+};
+
+static int cmd_handler_pipe_control(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ unsigned int post_sync;
+ int ret = 0;
+
+ post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
+
+ /* LRI post sync */
+ if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
+ ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
+ /* post sync */
+ else if (post_sync) {
+ if (post_sync == 2)
+ ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
+ else if (post_sync == 3)
+ ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
+ else if (post_sync == 1) {
+ /* check ggtt*/
+ if ((cmd_val(s, 2) & (1 << 2))) {
+ gma = cmd_val(s, 2) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, 3)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 1) & (1 << 21))
+ index_mode = true;
+ ret |= cmd_address_audit(s, gma, sizeof(u64),
+ index_mode);
+ }
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
+ set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
+{
+ set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_advance_default(struct parser_exec_state *s)
+{
+ return ip_gma_advance(s, cmd_length(s));
+}
+
+static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
+{
+ int ret;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ ret = ip_gma_set(s, s->ret_ip_gma_bb);
+ s->buf_addr_type = s->saved_buf_addr_type;
+ } else {
+ s->buf_type = RING_BUFFER_INSTRUCTION;
+ s->buf_addr_type = GTT_BUFFER;
+ if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
+ s->ret_ip_gma_ring -= s->ring_size;
+ ret = ip_gma_set(s, s->ret_ip_gma_ring);
+ }
+ return ret;
+}
+
+struct mi_display_flip_command_info {
+ int pipe;
+ int plane;
+ int event;
+ i915_reg_t stride_reg;
+ i915_reg_t ctrl_reg;
+ i915_reg_t surf_reg;
+ u64 stride_val;
+ u64 tile_val;
+ u64 surf_val;
+ bool async_flip;
+};
+
+struct plane_code_mapping {
+ int pipe;
+ int plane;
+ int event;
+};
+
+static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct plane_code_mapping gen8_plane_code[] = {
+ [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
+ [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
+ [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
+ [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
+ [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
+ [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
+ };
+ u32 dword0, dword1, dword2;
+ u32 v;
+
+ dword0 = cmd_val(s, 0);
+ dword1 = cmd_val(s, 1);
+ dword2 = cmd_val(s, 2);
+
+ v = (dword0 & GENMASK(21, 19)) >> 19;
+ if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ return -EINVAL;
+
+ info->pipe = gen8_plane_code[v].pipe;
+ info->plane = gen8_plane_code[v].plane;
+ info->event = gen8_plane_code[v].event;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & 0x1);
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ if (info->plane == PLANE_A) {
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+ } else if (info->plane == PLANE_B) {
+ info->ctrl_reg = SPRCTL(info->pipe);
+ info->stride_reg = SPRSTRIDE(info->pipe);
+ info->surf_reg = SPRSURF(info->pipe);
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int skl_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 dword0 = cmd_val(s, 0);
+ u32 dword1 = cmd_val(s, 1);
+ u32 dword2 = cmd_val(s, 2);
+ u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
+
+ switch (plane) {
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
+ info->pipe = PIPE_A;
+ info->event = PRIMARY_A_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
+ info->pipe = PIPE_B;
+ info->event = PRIMARY_B_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
+ info->pipe = PIPE_C;
+ info->event = PRIMARY_C_FLIP_DONE;
+ break;
+ default:
+ gvt_err("unknown plane code %d\n", plane);
+ return -EINVAL;
+ }
+
+ info->pipe = PRIMARY_PLANE;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & GENMASK(2, 0));
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+
+ return 0;
+}
+
+static int gen8_check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 stride, tile;
+
+ if (!info->async_flip)
+ return 0;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+ GENMASK(12, 10)) >> 10;
+ } else {
+ stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+ GENMASK(15, 6)) >> 6;
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+ }
+
+ if (stride != info->stride_val)
+ gvt_dbg_cmd("cannot change stride during async flip\n");
+
+ if (tile != info->tile_val)
+ gvt_dbg_cmd("cannot change tile during async flip\n");
+
+ return 0;
+}
+
+static int gen8_update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct intel_vgpu *vgpu = s->vgpu;
+
+ set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ info->surf_val << 12);
+ if (IS_SKYLAKE(dev_priv)) {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ info->stride_val);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ info->tile_val << 10);
+ } else {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ info->stride_val << 6);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ info->tile_val << 10);
+ }
+
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ return 0;
+}
+
+static int decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv))
+ return gen8_decode_mi_display_flip(s, info);
+ if (IS_SKYLAKE(dev_priv))
+ return skl_decode_mi_display_flip(s, info);
+
+ return -ENODEV;
+}
+
+static int check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_check_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
+{
+ struct mi_display_flip_command_info info;
+ int ret;
+ int i;
+ int len = cmd_length(s);
+
+ ret = decode_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to decode MI display flip command\n");
+ return ret;
+ }
+
+ ret = check_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("invalid MI display flip command\n");
+ return ret;
+ }
+
+ ret = update_plane_mmio_from_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to update plane mmio\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i++)
+ patch_value(s, cmd_ptr(s, i), MI_NOOP);
+ return 0;
+}
+
+static bool is_wait_for_flip_pending(u32 cmd)
+{
+ return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
+}
+
+static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
+{
+ u32 cmd = cmd_val(s, 0);
+
+ if (!is_wait_for_flip_pending(cmd))
+ return 0;
+
+ patch_value(s, cmd_ptr(s, 0), MI_NOOP);
+ return 0;
+}
+
+static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
+{
+ unsigned long addr;
+ unsigned long gma_high, gma_low;
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return INTEL_GVT_INVALID_ADDR;
+
+ gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 4) {
+ addr = gma_low;
+ } else {
+ gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
+ addr = (((unsigned long)gma_high) << 32) | gma_low;
+ }
+ return addr;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
+ int i;
+ int ret;
+
+ if (op_size > max_surface_size) {
+ gvt_err("command address audit fail name %s\n", s->info->name);
+ return -EINVAL;
+ }
+
+ if (index_mode) {
+ if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
+ (!vgpu_gmadr_is_valid(s->vgpu,
+ guest_gma + op_size - 1))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+ s->info->name, guest_gma, op_size);
+
+ pr_err("cmd dump: ");
+ for (i = 0; i < cmd_length(s); i++) {
+ if (!(i % 4))
+ pr_err("\n%08x ", cmd_val(s, i));
+ else
+ pr_err("%08x ", cmd_val(s, i));
+ }
+ pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
+ vgpu->id,
+ vgpu_aperture_gmadr_base(vgpu),
+ vgpu_aperture_gmadr_end(vgpu),
+ vgpu_hidden_gmadr_base(vgpu),
+ vgpu_hidden_gmadr_end(vgpu));
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = (cmd_length(s) - 3) * sizeof(u32);
+ int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
+ unsigned long gma, gma_low, gma_high;
+ int ret = 0;
+
+ /* check ppggt */
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return 0;
+
+ gma = cmd_val(s, 2) & GENMASK(31, 2);
+
+ if (gmadr_bytes == 8) {
+ gma_low = cmd_val(s, 1) & GENMASK(31, 2);
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma_low;
+ core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
+ }
+ ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
+ return ret;
+}
+
+static inline int unexpected_cmd(struct parser_exec_state *s)
+{
+ gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
+ s->vgpu->id, s->info->name);
+ return -EINVAL;
+}
+
+static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
+ sizeof(u32);
+ unsigned long gma, gma_high;
+ int ret = 0;
+
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return ret;
+
+ gma = cmd_val(s, 1) & GENMASK(31, 2);
+ if (gmadr_bytes == 8) {
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma;
+ }
+ ret = cmd_address_audit(s, gma, op_size, false);
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_clflush(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_conditional_batch_buffer_end(
+ struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ int ret = 0;
+
+ /* Check post-sync and ppgtt bit */
+ if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
+ gma = cmd_val(s, 1) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 0) & (1 << 21))
+ index_mode = true;
+ ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ }
+ /* Check notify bit */
+ if ((cmd_val(s, 0) & (1 << 8)))
+ set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
+ s->workload->pending_events);
+ return ret;
+}
+
+static void addr_type_update_snb(struct parser_exec_state *s)
+{
+ if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
+ (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
+ s->buf_addr_type = PPGTT_BUFFER;
+ }
+}
+
+
+static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
+ unsigned long gma, unsigned long end_gma, void *va)
+{
+ unsigned long copy_len, offset;
+ unsigned long len = 0;
+ unsigned long gpa;
+
+ while (gma != end_gma) {
+ gpa = intel_vgpu_gma_to_gpa(mm, gma);
+ if (gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid gma address: %lx\n", gma);
+ return -EFAULT;
+ }
+
+ offset = gma & (GTT_PAGE_SIZE - 1);
+
+ copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
+ GTT_PAGE_SIZE - offset : end_gma - gma;
+
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
+
+ len += copy_len;
+ gma += copy_len;
+ }
+ return 0;
+}
+
+
+/*
+ * Check whether a batch buffer needs to be scanned. Currently
+ * the only criteria is based on privilege.
+ */
+static int batch_buffer_needs_scan(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ if (bypass_batch_buffer_scan)
+ return 0;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ /* BDW decides privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8))
+ return 0;
+ }
+ return 1;
+}
+
+static uint32_t find_bb_size(struct parser_exec_state *s)
+{
+ unsigned long gma = 0;
+ struct cmd_info *info;
+ uint32_t bb_size = 0;
+ uint32_t cmd_len = 0;
+ bool met_bb_end = false;
+ u32 cmd;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+ do {
+ copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + 4, &cmd);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ if (info->opcode == OP_MI_BATCH_BUFFER_END) {
+ met_bb_end = true;
+ } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
+ if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+ /* chained batch buffer */
+ met_bb_end = true;
+ }
+ }
+ cmd_len = get_cmd_length(info, cmd) << 2;
+ bb_size += cmd_len;
+ gma += cmd_len;
+
+ } while (!met_bb_end);
+
+ return bb_size;
+}
+
+static int perform_bb_shadow(struct parser_exec_state *s)
+{
+ struct intel_shadow_bb_entry *entry_obj;
+ unsigned long gma = 0;
+ uint32_t bb_size;
+ void *dst = NULL;
+ int ret = 0;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+
+ /* get the size of the batch buffer */
+ bb_size = find_bb_size(s);
+
+ /* allocate shadow batch buffer */
+ entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
+ if (entry_obj == NULL)
+ return -ENOMEM;
+
+ entry_obj->obj =
+ i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
+ roundup(bb_size, PAGE_SIZE));
+ if (IS_ERR(entry_obj->obj)) {
+ ret = PTR_ERR(entry_obj->obj);
+ goto free_entry;
+ }
+ entry_obj->len = bb_size;
+ INIT_LIST_HEAD(&entry_obj->list);
+
+ dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow batch to CPU\n");
+ goto unmap_src;
+ }
+
+ entry_obj->va = dst;
+ entry_obj->bb_start_cmd_va = s->ip_va;
+
+ /* copy batch buffer to shadow batch buffer*/
+ ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + bb_size,
+ dst);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ goto unmap_src;
+ }
+
+ list_add(&entry_obj->list, &s->workload->shadow_bb);
+ /*
+ * ip_va saves the virtual address of the shadow batch buffer, while
+ * ip_gma saves the graphics address of the original batch buffer.
+ * As the shadow batch buffer is just a copy from the originial one,
+ * it should be right to use shadow batch buffer'va and original batch
+ * buffer's gma in pair. After all, we don't want to pin the shadow
+ * buffer here (too early).
+ */
+ s->ip_va = dst;
+ s->ip_gma = gma;
+
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(entry_obj->obj);
+put_obj:
+ i915_gem_object_put(entry_obj->obj);
+free_entry:
+ kfree(entry_obj);
+ return ret;
+}
+
+static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
+{
+ bool second_level;
+ int ret = 0;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+ return -EINVAL;
+ }
+
+ second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
+ if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
+ gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+ return -EINVAL;
+ }
+
+ s->saved_buf_addr_type = s->buf_addr_type;
+ addr_type_update_snb(s);
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ } else if (second_level) {
+ s->buf_type = BATCH_BUFFER_2ND_LEVEL;
+ s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
+ }
+
+ if (batch_buffer_needs_scan(s)) {
+ ret = perform_bb_shadow(s);
+ if (ret < 0)
+ gvt_err("invalid shadow batch buffer\n");
+ } else {
+ /* emulate a batch buffer end to do return right */
+ ret = cmd_handler_mi_batch_buffer_end(s);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct cmd_info cmd_info[] = {
+ {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, cmd_handler_mi_user_interrupt},
+
+ {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
+ D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
+
+ {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
+ F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ cmd_handler_mi_batch_buffer_end},
+
+ {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
+
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, NULL},
+
+ {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
+
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+
+ {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
+
+ {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, cmd_handler_mi_store_data_index},
+
+ {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lri},
+
+ {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
+ cmd_handler_mi_update_gtt},
+
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+
+ {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
+ cmd_handler_mi_flush_dw},
+
+ {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
+ 10, cmd_handler_mi_clflush},
+
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lrr},
+
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
+ 8, cmd_handler_mi_op_2e},
+
+ {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
+ 8, cmd_handler_mi_op_2f},
+
+ {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
+ F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
+ cmd_handler_mi_batch_buffer_start},
+
+ {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
+ F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end},
+
+ {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
+ R_RCS | R_BCS, D_ALL, 0, 2, NULL},
+
+ {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(3), 8, NULL},
+
+ {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, 0, 8, NULL},
+
+ {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
+ R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
+ OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
+ OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BLEND_STATE_POINTERS",
+ OP_3DSTATE_BLEND_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
+ OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_VS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_HS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_DS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_GS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_PS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
+ R_RCS, D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
+ R_RCS, D_ALL, 0, 1, NULL},
+
+ {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_2(2, 4), 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
+ OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
+ OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
+
+ {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
+ 1, NULL},
+
+ {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
+
+ {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ 0, 8, NULL},
+
+ {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
+ D_SKL_PLUS, 0, 8, NULL},
+
+ {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
+ NULL},
+
+ {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
+ F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
+ R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
+
+ {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 6, NULL},
+
+ {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+ {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
+ R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
+
+ {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
+ 0, 12, NULL},
+
+ {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
+ 0, 20, NULL},
+};
+
+static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
+{
+ hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
+}
+
+#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
+
+static void trace_cs_command(struct parser_exec_state *s,
+ cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
+{
+ /* This buffer is used by ftrace to store all commands copied from
+ * guest gma space. Sometimes commands can cross pages, this should
+ * not be handled in ftrace logic. So this is just used as a
+ * 'bounce buffer'
+ */
+ u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
+ int i;
+ u32 cmd_len = cmd_length(s);
+ /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
+ * following two considerations:
+ * 1) From observation, most common ring commands is not that long.
+ * But there are execeptions. So it indeed makes sence to observe
+ * longer commands.
+ * 2) From the performance and debugging point of view, dumping all
+ * contents of very commands is not necessary.
+ * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
+ * future for performance considerations.
+ */
+ if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
+ gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
+ cmd_len = GVT_MAX_CMD_LENGTH;
+ }
+
+ for (i = 0; i < cmd_len; i++)
+ cmd_trace_buf[i] = cmd_val(s, i);
+
+ trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
+ cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
+ cost_pre_cmd_handler, cost_cmd_handler);
+}
+
+/* call the cmd handler, and advance ip */
+static int cmd_parser_exec(struct parser_exec_state *s)
+{
+ struct cmd_info *info;
+ u32 cmd;
+ int ret = 0;
+ cycles_t t0, t1, t2;
+ struct parser_exec_state s_before_advance_custom;
+
+ t0 = get_cycles();
+
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ gvt_dbg_cmd("%s\n", info->name);
+
+ s->info = info;
+
+ t1 = get_cycles();
+
+ memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+
+ if (info->handler) {
+ ret = info->handler(s);
+ if (ret < 0) {
+ gvt_err("%s handler error\n", info->name);
+ return ret;
+ }
+ }
+ t2 = get_cycles();
+
+ trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
+
+ if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
+ ret = cmd_advance_default(s);
+ if (ret) {
+ gvt_err("%s IP advance error\n", info->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static inline bool gma_out_of_range(unsigned long gma,
+ unsigned long gma_head, unsigned int gma_tail)
+{
+ if (gma_tail >= gma_head)
+ return (gma < gma_head) || (gma > gma_tail);
+ else
+ return (gma > gma_tail) && (gma < gma_head);
+}
+
+static int command_scan(struct parser_exec_state *s,
+ unsigned long rb_head, unsigned long rb_tail,
+ unsigned long rb_start, unsigned long rb_len)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom;
+ int ret = 0;
+
+ gma_head = rb_start + rb_head;
+ gma_tail = rb_start + rb_tail;
+ gma_bottom = rb_start + rb_len;
+
+ gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
+
+ while (s->ip_gma != gma_tail) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (!(s->ip_gma >= rb_start) ||
+ !(s->ip_gma < gma_bottom)) {
+ gvt_err("ip_gma %lx out of ring scope."
+ "(base:0x%lx, bottom: 0x%lx)\n",
+ s->ip_gma, rb_start,
+ gma_bottom);
+ parser_exec_state_dump(s);
+ return -EINVAL;
+ }
+ if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
+ gvt_err("ip_gma %lx out of range."
+ "base 0x%lx head 0x%lx tail 0x%lx\n",
+ s->ip_gma, rb_start,
+ rb_head, rb_tail);
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+ ret = cmd_parser_exec(s);
+ if (ret) {
+ gvt_err("cmd parser error\n");
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+
+ gvt_dbg_cmd("scan_end\n");
+
+ return ret;
+}
+
+static int scan_workload(struct intel_vgpu_workload *workload)
+{
+ unsigned long gma_head, gma_tail, gma_bottom;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
+ s.ring_start = workload->rb_start;
+ s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = workload->shadow_ring_buffer_va;
+ s.workload = workload;
+
+ if ((bypass_scan_mask & (1 << workload->ring_id)) ||
+ gma_head == gma_tail)
+ return 0;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, workload->rb_head, workload->rb_tail,
+ workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
+
+out:
+ return ret;
+}
+
+static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+ ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
+ PAGE_SIZE);
+ gma_head = wa_ctx->indirect_ctx.guest_gma;
+ gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
+ gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = wa_ctx->workload->vgpu;
+ s.ring_id = wa_ctx->workload->ring_id;
+ s.ring_start = wa_ctx->indirect_ctx.guest_gma;
+ s.ring_size = ring_size;
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = wa_ctx->indirect_ctx.shadow_va;
+ s.workload = wa_ctx->workload;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, 0, ring_tail,
+ wa_ctx->indirect_ctx.guest_gma, ring_size);
+out:
+ return ret;
+}
+
+static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
+ unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
+ unsigned int copy_len = 0;
+ int ret;
+
+ guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ /* calculate workload ring buffer size */
+ workload->rb_len = (workload->rb_tail + guest_rb_size -
+ workload->rb_head) % guest_rb_size;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_top = workload->rb_start + guest_rb_size;
+
+ /* allocate shadow ring buffer */
+ ret = intel_ring_begin(workload->req, workload->rb_len / 4);
+ if (ret)
+ return ret;
+
+ /* get shadow ring buffer va */
+ workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+
+ /* head > tail --> copy head <-> top */
+ if (gma_head > gma_tail) {
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_top,
+ workload->shadow_ring_buffer_va);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ copy_len = gma_top - gma_head;
+ gma_head = workload->rb_start;
+ }
+
+ /* copy head or start <-> tail */
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_tail,
+ workload->shadow_ring_buffer_va + copy_len);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ ring->tail += workload->rb_len;
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
+{
+ int ret;
+
+ ret = shadow_workload_ring_buffer(workload);
+ if (ret) {
+ gvt_err("fail to shadow workload ring_buffer\n");
+ return ret;
+ }
+
+ ret = scan_workload(workload);
+ if (ret) {
+ gvt_err("scan workload error\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
+ int ctx_size = wa_ctx->indirect_ctx.size;
+ unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+ void *map;
+
+ obj = i915_gem_object_create(dev,
+ roundup(ctx_size + CACHELINE_BYTES,
+ PAGE_SIZE));
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ /* get the va of the shadow batch buffer */
+ map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(map)) {
+ gvt_err("failed to vmap shadow indirect ctx\n");
+ ret = PTR_ERR(map);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow indirect ctx to CPU\n");
+ goto unmap_src;
+ }
+
+ ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
+ wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ guest_gma, guest_gma + ctx_size,
+ map);
+ if (ret) {
+ gvt_err("fail to copy guest indirect ctx\n");
+ goto unmap_src;
+ }
+
+ wa_ctx->indirect_ctx.obj = obj;
+ wa_ctx->indirect_ctx.shadow_va = map;
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(obj);
+put_obj:
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ return ret;
+}
+
+static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+ unsigned char *bb_start_sva;
+
+ per_ctx_start[0] = 0x18800001;
+ per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
+
+ bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
+
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ret;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return 0;
+
+ ret = shadow_indirect_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("fail to shadow indirect ctx\n");
+ return ret;
+ }
+
+ combine_wa_ctx(wa_ctx);
+
+ ret = scan_wa_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("scan wa ctx error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+ unsigned int opcode, int rings)
+{
+ struct cmd_info *info = NULL;
+ unsigned int ring;
+
+ for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+ info = find_cmd_entry(gvt, opcode, ring);
+ if (info)
+ break;
+ }
+ return info;
+}
+
+static int init_cmd_table(struct intel_gvt *gvt)
+{
+ int i;
+ struct cmd_entry *e;
+ struct cmd_info *info;
+ unsigned int gen_type;
+
+ gen_type = intel_gvt_get_device_type(gvt);
+
+ for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ if (!(cmd_info[i].devices & gen_type))
+ continue;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->info = &cmd_info[i];
+ info = find_cmd_entry_any_ring(gvt,
+ e->info->opcode, e->info->rings);
+ if (info) {
+ gvt_err("%s %s duplicated\n", e->info->name,
+ info->name);
+ return -EEXIST;
+ }
+
+ INIT_HLIST_NODE(&e->hlist);
+ add_cmd_entry(gvt, e);
+ gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
+ }
+ return 0;
+}
+
+static void clean_cmd_table(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct cmd_entry *e;
+ int i;
+
+ hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
+ kfree(e);
+
+ hash_init(gvt->cmd_table);
+}
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
+{
+ clean_cmd_table(gvt);
+}
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
+{
+ int ret;
+
+ ret = init_cmd_table(gvt);
+ if (ret) {
+ intel_gvt_clean_cmd_parser(gvt);
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
new file mode 100644
index 000000000000..bed33514103c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+#ifndef _GVT_CMD_PARSER_H_
+#define _GVT_CMD_PARSER_H_
+
+#define GVT_CMD_HASH_BITS 7
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 7ef412be665f..68cba7bd980a 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -24,11 +24,34 @@
#ifndef __GVT_DEBUG_H__
#define __GVT_DEBUG_H__
+#define gvt_err(fmt, args...) \
+ DRM_ERROR("gvt: "fmt, ##args)
+
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
-/*
- * Other GVT debug stuff will be introduced in the GVT device model patches.
- */
+#define gvt_dbg_irq(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+
+#define gvt_dbg_mm(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+
+#define gvt_dbg_mmio(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+
+#define gvt_dbg_dpy(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+
+#define gvt_dbg_el(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+
+#define gvt_dbg_sched(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+
+#define gvt_dbg_render(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+
+#define gvt_dbg_cmd(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
new file mode 100644
index 000000000000..c0c884aeb30e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int get_edp_pipe(struct intel_vgpu *vgpu)
+{
+ u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
+ int pipe = -1;
+
+ switch (data & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+ return pipe;
+}
+
+static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ return 0;
+
+ if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
+ return 0;
+ return 1;
+}
+
+static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ return -EINVAL;
+
+ if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ return 1;
+
+ if (edp_pipe_is_enabled(vgpu) &&
+ get_edp_pipe(vgpu) == pipe)
+ return 1;
+ return 0;
+}
+
+/* EDID with 1024x768 as its resolution */
+static unsigned char virtual_dp_monitor_edid[] = {
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /* Standard Timings. All invalid */
+ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+ /* 18 Byte Data Blocks 1: invalid */
+ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0xef,
+};
+
+#define DPCD_HEADER_SIZE 0xb
+
+static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
+ 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+
+ if (IS_SKYLAKE(dev_priv))
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+ SDE_PORTE_HOTPLUG_SPT);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+
+ if (IS_SKYLAKE(dev_priv) &&
+ intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (IS_BROADWELL(dev_priv))
+ vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_PORT_DP_A_HOTPLUG;
+ else
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+ }
+}
+
+static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ kfree(port->edid);
+ port->edid = NULL;
+
+ kfree(port->dpcd);
+ port->dpcd = NULL;
+}
+
+static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
+ int type)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
+ if (!port->edid)
+ return -ENOMEM;
+
+ port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
+ if (!port->dpcd) {
+ kfree(port->edid);
+ return -ENOMEM;
+ }
+
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ EDID_SIZE);
+ port->edid->data_valid = true;
+
+ memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
+ port->dpcd->data_valid = true;
+ port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
+ port->type = type;
+
+ emulate_monitor_status_change(vgpu);
+ return 0;
+}
+
+/**
+ * intel_gvt_check_vblank_emulation - check if vblank emulation timer should
+ * be turned on/off when a virtual pipe is enabled/disabled.
+ * @gvt: a GVT device
+ *
+ * This function is used to turn on/off vblank timer according to currently
+ * enabled/disabled virtual pipes.
+ *
+ */
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_vgpu *vgpu;
+ bool have_enabled_pipe = false;
+ int pipe, id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+
+ for_each_active_vgpu(gvt, vgpu, id) {
+ for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
+ have_enabled_pipe =
+ pipe_is_enabled(vgpu, pipe);
+ if (have_enabled_pipe)
+ break;
+ }
+ }
+
+ if (have_enabled_pipe)
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+}
+
+static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_irq *irq = &vgpu->irq;
+ int vblank_event[] = {
+ [PIPE_A] = PIPE_A_VBLANK,
+ [PIPE_B] = PIPE_B_VBLANK,
+ [PIPE_C] = PIPE_C_VBLANK,
+ };
+ int event;
+
+ if (pipe < PIPE_A || pipe > PIPE_C)
+ return;
+
+ for_each_set_bit(event, irq->flip_done_event[pipe],
+ INTEL_GVT_EVENT_MAX) {
+ clear_bit(event, irq->flip_done_event[pipe]);
+ if (!pipe_is_enabled(vgpu, pipe))
+ continue;
+
+ vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ }
+
+ if (pipe_is_enabled(vgpu, pipe)) {
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
+ }
+}
+
+static void emulate_vblank(struct intel_vgpu *vgpu)
+{
+ int pipe;
+
+ for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ emulate_vblank_on_pipe(vgpu, pipe);
+}
+
+/**
+ * intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
+ * @gvt: a GVT device
+ *
+ * This function is used to trigger vblank interrupts for vGPUs on GVT device
+ *
+ */
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ for_each_active_vgpu(gvt, vgpu, id)
+ emulate_vblank(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_display - clean vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (IS_SKYLAKE(dev_priv))
+ clean_virtual_dp_monitor(vgpu, PORT_D);
+ else
+ clean_virtual_dp_monitor(vgpu, PORT_B);
+}
+
+/**
+ * intel_vgpu_init_display- initialize vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU virtual display emulation stuffs
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (IS_SKYLAKE(dev_priv))
+ return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+ else
+ return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
new file mode 100644
index 000000000000..7a60cb848268
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_DISPLAY_H_
+#define _GVT_DISPLAY_H_
+
+#define SBI_REG_MAX 20
+#define DPCD_SIZE 0x700
+
+#define intel_vgpu_port(vgpu, port) \
+ (&(vgpu->display.ports[port]))
+
+#define intel_vgpu_has_monitor_on_port(vgpu, port) \
+ (intel_vgpu_port(vgpu, port)->edid && \
+ intel_vgpu_port(vgpu, port)->edid->data_valid)
+
+#define intel_vgpu_port_is_dp(vgpu, port) \
+ ((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
+
+#define INTEL_GVT_MAX_UEVENT_VARS 3
+
+/* DPCD start */
+#define DPCD_SIZE 0x700
+
+/* DPCD */
+#define DP_SET_POWER 0x600
+#define DP_SET_POWER_D0 0x1
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+
+#define AUX_BURST_SIZE 16
+
+/* DPCD addresses */
+#define DPCD_REV 0x000
+#define DPCD_MAX_LINK_RATE 0x001
+#define DPCD_MAX_LANE_COUNT 0x002
+
+#define DPCD_TRAINING_PATTERN_SET 0x102
+#define DPCD_SINK_COUNT 0x200
+#define DPCD_LANE0_1_STATUS 0x202
+#define DPCD_LANE2_3_STATUS 0x203
+#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DPCD_SINK_STATUS 0x205
+
+/* link training */
+#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
+#define DPCD_LINK_TRAINING_DISABLED 0x00
+#define DPCD_TRAINING_PATTERN_1 0x01
+#define DPCD_TRAINING_PATTERN_2 0x02
+
+#define DPCD_CP_READY_MASK (1 << 6)
+
+/* lane status */
+#define DPCD_LANES_CR_DONE 0x11
+#define DPCD_LANES_EQ_DONE 0x22
+#define DPCD_SYMBOL_LOCKED 0x44
+
+#define DPCD_INTERLANE_ALIGN_DONE 0x01
+
+#define DPCD_SINK_IN_SYNC 0x03
+/* DPCD end */
+
+#define SBI_RESPONSE_MASK 0x3
+#define SBI_RESPONSE_SHIFT 0x1
+#define SBI_STAT_MASK 0x1
+#define SBI_STAT_SHIFT 0x0
+#define SBI_OPCODE_SHIFT 8
+#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
+#define SBI_CMD_IORD 2
+#define SBI_CMD_IOWR 3
+#define SBI_CMD_CRRD 6
+#define SBI_CMD_CRWR 7
+#define SBI_ADDR_OFFSET_SHIFT 16
+#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
+
+struct intel_vgpu_sbi_register {
+ unsigned int offset;
+ u32 value;
+};
+
+struct intel_vgpu_sbi {
+ int number;
+ struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
+};
+
+enum intel_gvt_plane_type {
+ PRIMARY_PLANE = 0,
+ CURSOR_PLANE,
+ SPRITE_PLANE,
+ MAX_PLANE
+};
+
+struct intel_vgpu_dpcd_data {
+ bool data_valid;
+ u8 data[DPCD_SIZE];
+};
+
+enum intel_vgpu_port_type {
+ GVT_CRT = 0,
+ GVT_DP_A,
+ GVT_DP_B,
+ GVT_DP_C,
+ GVT_DP_D,
+ GVT_HDMI_B,
+ GVT_HDMI_C,
+ GVT_HDMI_D,
+ GVT_PORT_MAX
+};
+
+struct intel_vgpu_port {
+ /* per display EDID information */
+ struct intel_vgpu_edid_data *edid;
+ /* per display DPCD information */
+ struct intel_vgpu_dpcd_data *dpcd;
+ int type;
+};
+
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
+
+int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
new file mode 100644
index 000000000000..bda85dff7b2a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GMBUS1_TOTAL_BYTES_SHIFT 16
+#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
+#define gmbus1_total_byte_count(v) (((v) >> \
+ GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
+#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
+#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
+#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
+
+/* GMBUS0 bits definitions */
+#define _GMBUS_PIN_SEL_MASK (0x7)
+
+static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+ unsigned char chr = 0;
+
+ if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+ gvt_err("Driver tries to read EDID without proper sequence!\n");
+ return 0;
+ }
+ if (edid->current_edid_read >= EDID_SIZE) {
+ gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+ return 0;
+ }
+
+ if (!edid->edid_available) {
+ gvt_err("Reading EDID but EDID is not available!\n");
+ return 0;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
+ struct intel_vgpu_edid_data *edid_data =
+ intel_vgpu_port(vgpu, edid->port)->edid;
+
+ chr = edid_data->edid_block[edid->current_edid_read];
+ edid->current_edid_read++;
+ } else {
+ gvt_err("No EDID available during the reading?\n");
+ }
+ return chr;
+}
+
+static inline int get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 2)
+ port = PORT_E;
+ else if (port_select == 4)
+ port = PORT_C;
+ else if (port_select == 5)
+ port = PORT_B;
+ else if (port_select == 6)
+ port = PORT_D;
+ return port;
+}
+
+static void reset_gmbus_controller(struct intel_vgpu *vgpu)
+{
+ vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+ if (!vgpu->display.i2c_edid.edid_available)
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+}
+
+/* GMBUS0 */
+static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int port, pin_select;
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+
+ pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (pin_select == 0)
+ return 0;
+
+ port = get_port_from_gmbus0(pin_select);
+ if (WARN_ON(port < 0))
+ return 0;
+
+ vgpu->display.i2c_edid.state = I2C_GMBUS;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
+ !intel_vgpu_port_is_dp(vgpu, port)) {
+ vgpu->display.i2c_edid.port = port;
+ vgpu->display.i2c_edid.edid_available = true;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+ } else
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ return 0;
+}
+
+static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ u32 slave_addr;
+ u32 wvalue = *(u32 *)p_data;
+
+ if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
+ if (!(wvalue & GMBUS_SW_CLR_INT)) {
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
+ reset_gmbus_controller(vgpu);
+ }
+ /*
+ * TODO: "This bit is cleared to zero when an event
+ * causes the HW_RDY bit transition to occur "
+ */
+ } else {
+ /*
+ * per bspec setting this bit can cause:
+ * 1) INT status bit cleared
+ * 2) HW_RDY bit asserted
+ */
+ if (wvalue & GMBUS_SW_CLR_INT) {
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+ }
+
+ /* For virtualization, we suppose that HW is always ready,
+ * so GMBUS_SW_RDY should always be cleared
+ */
+ if (wvalue & GMBUS_SW_RDY)
+ wvalue &= ~GMBUS_SW_RDY;
+
+ i2c_edid->gmbus.total_byte_count =
+ gmbus1_total_byte_count(wvalue);
+ slave_addr = gmbus1_slave_addr(wvalue);
+
+ /* vgpu gmbus only support EDID */
+ if (slave_addr == EDID_ADDR) {
+ i2c_edid->slave_selected = true;
+ } else if (slave_addr != 0) {
+ gvt_dbg_dpy(
+ "vgpu%d: unsupported gmbus slave addr(0x%x)\n"
+ " gmbus operations will be ignored.\n",
+ vgpu->id, slave_addr);
+ }
+
+ if (wvalue & GMBUS_CYCLE_INDEX)
+ i2c_edid->current_edid_read =
+ gmbus1_slave_index(wvalue);
+
+ i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
+ switch (gmbus1_bus_cycle(wvalue)) {
+ case GMBUS_NOCYCLE:
+ break;
+ case GMBUS_STOP:
+ /* From spec:
+ * This can only cause a STOP to be generated
+ * if a GMBUS cycle is generated, the GMBUS is
+ * currently in a data/wait/idle phase, or it is in a
+ * WAIT phase
+ */
+ if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
+ != GMBUS_NOCYCLE) {
+ intel_vgpu_init_i2c_edid(vgpu);
+ /* After the 'stop' cycle, hw state would become
+ * 'stop phase' and then 'idle phase' after a
+ * few milliseconds. In emulation, we just set
+ * it as 'idle phase' ('stop phase' is not
+ * visible in gmbus interface)
+ */
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ }
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ case NIDX_STOP:
+ case IDX_STOP:
+ /* From hw spec the GMBUS phase
+ * transition like this:
+ * START (-->INDEX) -->DATA
+ */
+ i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+ break;
+ default:
+ gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+ break;
+ }
+ /*
+ * From hw spec the WAIT state will be
+ * cleared:
+ * (1) in a new GMBUS cycle
+ * (2) by generating a stop
+ */
+ vgpu_vreg(vgpu, offset) = wvalue;
+ }
+ return 0;
+}
+
+static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int i;
+ unsigned char byte_data;
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int byte_left = i2c_edid->gmbus.total_byte_count -
+ i2c_edid->current_edid_read;
+ int byte_count = byte_left;
+ u32 reg_data = 0;
+
+ /* Data can only be recevied if previous settings correct */
+ if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (byte_left <= 0) {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+ }
+
+ if (byte_count > 4)
+ byte_count = 4;
+ for (i = 0; i < byte_count; i++) {
+ byte_data = edid_get_byte(vgpu);
+ reg_data |= (byte_data << (i << 3));
+ }
+
+ memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count);
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+
+ if (byte_left <= 4) {
+ switch (i2c_edid->gmbus.cycle_type) {
+ case NIDX_STOP:
+ case IDX_STOP:
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ default:
+ i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
+ break;
+ }
+ intel_vgpu_init_i2c_edid(vgpu);
+ }
+ /*
+ * Read GMBUS3 during send operation,
+ * return the latest written value
+ */
+ } else {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
+ vgpu->id);
+ }
+ return 0;
+}
+
+static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = vgpu_vreg(vgpu, offset);
+
+ if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
+ vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
+ memcpy(p_data, (void *)&value, bytes);
+ return 0;
+}
+
+static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 wvalue = *(u32 *)p_data;
+
+ if (wvalue & GMBUS_INUSE)
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
+ /* All other bits are read-only */
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
+
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
+ return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
+ return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+ return 0;
+}
+
+enum {
+ AUX_CH_CTL = 0,
+ AUX_CH_DATA1,
+ AUX_CH_DATA2,
+ AUX_CH_DATA3,
+ AUX_CH_DATA4,
+ AUX_CH_DATA5
+};
+
+static inline int get_aux_ch_reg(unsigned int offset)
+{
+ int reg;
+
+ switch (offset & 0xff) {
+ case 0x10:
+ reg = AUX_CH_CTL;
+ break;
+ case 0x14:
+ reg = AUX_CH_DATA1;
+ break;
+ case 0x18:
+ reg = AUX_CH_DATA2;
+ break;
+ case 0x1c:
+ reg = AUX_CH_DATA3;
+ break;
+ case 0x20:
+ reg = AUX_CH_DATA4;
+ break;
+ case 0x24:
+ reg = AUX_CH_DATA5;
+ break;
+ default:
+ reg = -1;
+ break;
+ }
+ return reg;
+}
+
+#define AUX_CTL_MSG_LENGTH(reg) \
+ ((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
+
+/**
+ * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate AUX channel register write
+ *
+ */
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int msg_length, ret_msg_size;
+ int msg, addr, ctrl, op;
+ u32 value = *(u32 *)p_data;
+ int aux_data_for_write = 0;
+ int reg = get_aux_ch_reg(offset);
+
+ if (reg != AUX_CH_CTL) {
+ vgpu_vreg(vgpu, offset) = value;
+ return;
+ }
+
+ msg_length = AUX_CTL_MSG_LENGTH(value);
+ // check the msg in DATA register.
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ op = ctrl >> 4;
+ if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* The ctl write to clear some states */
+ return;
+ }
+
+ /* Always set the wanted value for vms. */
+ ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
+ vgpu_vreg(vgpu, offset) =
+ DP_AUX_CH_CTL_DONE |
+ ((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
+ DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
+
+ if (msg_length == 3) {
+ if (!(op & GVT_AUX_I2C_MOT)) {
+ /* stop */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else {
+ /* start or restart */
+ i2c_edid->aux_ch.i2c_over_aux_ch = true;
+ i2c_edid->aux_ch.aux_ch_mot = true;
+ if (addr == 0) {
+ /* reset the address */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else if (addr == EDID_ADDR) {
+ i2c_edid->state = I2C_AUX_CH;
+ i2c_edid->port = port_idx;
+ i2c_edid->slave_selected = true;
+ if (intel_vgpu_has_monitor_on_port(vgpu,
+ port_idx) &&
+ intel_vgpu_port_is_dp(vgpu, port_idx))
+ i2c_edid->edid_available = true;
+ }
+ }
+ } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
+ /* TODO
+ * We only support EDID reading from I2C_over_AUX. And
+ * we do not expect the index mode to be used. Right now
+ * the WRITE operation is ignored. It is good enough to
+ * support the gfx driver to do EDID access.
+ */
+ } else {
+ if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ return;
+ if (WARN_ON(msg_length != 4))
+ return;
+ if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+ unsigned char val = edid_get_byte(vgpu);
+
+ aux_data_for_write = (val << 16);
+ }
+ }
+ /* write the return value in AUX_CH_DATA reg which includes:
+ * ACK of I2C_WRITE
+ * returned byte if it is READ
+ */
+ aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24;
+ vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
+}
+
+/**
+ * intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU i2c edid emulation stuffs
+ *
+ */
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+
+ edid->state = I2C_NOT_SPECIFIED;
+
+ edid->port = -1;
+ edid->slave_selected = false;
+ edid->edid_available = false;
+ edid->current_edid_read = 0;
+
+ memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
+
+ edid->aux_ch.i2c_over_aux_ch = false;
+ edid->aux_ch.aux_ch_mot = false;
+}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
new file mode 100644
index 000000000000..f6dfc8b795ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EDID_H_
+#define _GVT_EDID_H_
+
+#define EDID_SIZE 128
+#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
+
+#define GVT_AUX_NATIVE_WRITE 0x8
+#define GVT_AUX_NATIVE_READ 0x9
+#define GVT_AUX_I2C_WRITE 0x0
+#define GVT_AUX_I2C_READ 0x1
+#define GVT_AUX_I2C_STATUS 0x2
+#define GVT_AUX_I2C_MOT 0x4
+#define GVT_AUX_I2C_REPLY_ACK 0x0
+
+struct intel_vgpu_edid_data {
+ bool data_valid;
+ unsigned char edid_block[EDID_SIZE];
+};
+
+enum gmbus_cycle_type {
+ GMBUS_NOCYCLE = 0x0,
+ NIDX_NS_W = 0x1,
+ IDX_NS_W = 0x3,
+ GMBUS_STOP = 0x4,
+ NIDX_STOP = 0x5,
+ IDX_STOP = 0x7
+};
+
+/*
+ * States of GMBUS
+ *
+ * GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
+ * registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
+ * not considered here. Below describes the usage of GMBUS registers that are
+ * cared by the EDID virtualization
+ *
+ * GMBUS0:
+ * R/W
+ * port selection. value of bit0 - bit2 corresponds to the GPIO registers.
+ *
+ * GMBUS1:
+ * R/W Protect
+ * Command and Status.
+ * bit0 is the direction bit: 1 is read; 0 is write.
+ * bit1 - bit7 is slave 7-bit address.
+ * bit16 - bit24 total byte count (ignore?)
+ *
+ * GMBUS2:
+ * Most of bits are read only except bit 15 (IN_USE)
+ * Status register
+ * bit0 - bit8 current byte count
+ * bit 11: hardware ready;
+ *
+ * GMBUS3:
+ * Read/Write
+ * Data for transfer
+ */
+
+/* From hw specs, Other phases like START, ADDRESS, INDEX
+ * are invisible to GMBUS MMIO interface. So no definitions
+ * in below enum types
+ */
+enum gvt_gmbus_phase {
+ GMBUS_IDLE_PHASE = 0,
+ GMBUS_DATA_PHASE,
+ GMBUS_WAIT_PHASE,
+ //GMBUS_STOP_PHASE,
+ GMBUS_MAX_PHASE
+};
+
+struct intel_vgpu_i2c_gmbus {
+ unsigned int total_byte_count; /* from GMBUS1 */
+ enum gmbus_cycle_type cycle_type;
+ enum gvt_gmbus_phase phase;
+};
+
+struct intel_vgpu_i2c_aux_ch {
+ bool i2c_over_aux_ch;
+ bool aux_ch_mot;
+};
+
+enum i2c_state {
+ I2C_NOT_SPECIFIED = 0,
+ I2C_GMBUS = 1,
+ I2C_AUX_CH = 2
+};
+
+/* I2C sequences cannot interleave.
+ * GMBUS and AUX_CH sequences cannot interleave.
+ */
+struct intel_vgpu_i2c_edid {
+ enum i2c_state state;
+
+ unsigned int port;
+ bool slave_selected;
+ bool edid_available;
+ unsigned int current_edid_read;
+
+ struct intel_vgpu_i2c_gmbus gmbus;
+ struct intel_vgpu_i2c_aux_ch aux_ch;
+};
+
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
+
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data);
+
+#endif /*_GVT_EDID_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
new file mode 100644
index 000000000000..f32bb6f6495c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define _EL_OFFSET_STATUS 0x234
+#define _EL_OFFSET_STATUS_BUF 0x370
+#define _EL_OFFSET_STATUS_PTR 0x3A0
+
+#define execlist_ring_mmio(gvt, ring_id, offset) \
+ (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+
+#define valid_context(ctx) ((ctx)->valid)
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+static int context_switch_events[] = {
+ [RCS] = RCS_AS_CONTEXT_SWITCH,
+ [BCS] = BCS_AS_CONTEXT_SWITCH,
+ [VCS] = VCS_AS_CONTEXT_SWITCH,
+ [VCS2] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS] = VECS_AS_CONTEXT_SWITCH,
+};
+
+static int ring_id_to_context_switch_event(int ring_id)
+{
+ if (WARN_ON(ring_id < RCS && ring_id >
+ ARRAY_SIZE(context_switch_events)))
+ return -EINVAL;
+
+ return context_switch_events[ring_id];
+}
+
+static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
+{
+ gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+
+ execlist->running_slot = execlist->pending_slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = execlist->running_context ?
+ &execlist->running_slot->ctx[0] : NULL;
+
+ gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+}
+
+static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *desc = execlist->running_context;
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ struct execlist_status_format status;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt,
+ ring_id, _EL_OFFSET_STATUS);
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (running) {
+ status.current_execlist_pointer = !!running->index;
+ status.execlist_write_pointer = !!!running->index;
+ status.execlist_0_active = status.execlist_0_valid =
+ !!!(running->index);
+ status.execlist_1_active = status.execlist_1_valid =
+ !!(running->index);
+ } else {
+ status.context_id = 0;
+ status.execlist_0_active = status.execlist_0_valid = 0;
+ status.execlist_1_active = status.execlist_1_valid = 0;
+ }
+
+ status.context_id = desc ? desc->context_id : 0;
+ status.execlist_queue_full = !!(pending);
+
+ vgpu_vreg(vgpu, status_reg) = status.ldw;
+ vgpu_vreg(vgpu, status_reg + 4) = status.udw;
+
+ gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
+ vgpu->id, status_reg, status.ldw, status.udw);
+}
+
+static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 write_pointer;
+ u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_BUF);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+
+ write_pointer = ctx_status_ptr.write_ptr;
+
+ if (write_pointer == 0x7)
+ write_pointer = 0;
+ else {
+ ++write_pointer;
+ write_pointer %= 0x6;
+ }
+
+ offset = ctx_status_buf_reg + write_pointer * 8;
+
+ vgpu_vreg(vgpu, offset) = status->ldw;
+ vgpu_vreg(vgpu, offset + 4) = status->udw;
+
+ ctx_status_ptr.write_ptr = write_pointer;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+
+ gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
+
+ if (trigger_interrupt_later)
+ return;
+
+ intel_vgpu_trigger_virtual_event(vgpu,
+ ring_id_to_context_switch_event(execlist->ring_id));
+}
+
+static int emulate_execlist_ctx_schedule_out(
+ struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format *ctx)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
+ struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
+ struct execlist_context_status_format status;
+
+ memset(&status, 0, sizeof(status));
+
+ gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
+
+ if (WARN_ON(!same_context(ctx, execlist->running_context))) {
+ gvt_err("schedule out context is not running context,"
+ "ctx id %x running ctx id %x\n",
+ ctx->context_id,
+ execlist->running_context->context_id);
+ return -EINVAL;
+ }
+
+ /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
+ if (valid_context(ctx1) && same_context(ctx0, ctx)) {
+ gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
+
+ execlist->running_context = ctx1;
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.element_switch = 1;
+ status.context_id = ctx->context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ /*
+ * ctx1 is not valid, ctx == ctx0
+ * ctx1 is valid, ctx1 == ctx
+ * --> last element is finished
+ * emulate:
+ * active-to-idle if there is *no* pending execlist
+ * context-complete if there *is* pending execlist
+ */
+ } else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
+ || (valid_context(ctx1) && same_context(ctx1, ctx))) {
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.active_to_idle = 1;
+ status.context_id = ctx->context_id;
+
+ if (!pending) {
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ emulate_csb_update(execlist, &status, true);
+
+ memset(&status, 0, sizeof(status));
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ }
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
+ struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS);
+ struct execlist_status_format status;
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (status.execlist_queue_full) {
+ gvt_err("virtual execlist slots are full\n");
+ return NULL;
+ }
+
+ return &execlist->slot[status.execlist_write_pointer];
+}
+
+static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format ctx[2])
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *slot =
+ get_next_execlist_slot(execlist);
+
+ struct execlist_ctx_descriptor_format *ctx0, *ctx1;
+ struct execlist_context_status_format status;
+
+ gvt_dbg_el("emulate schedule-in\n");
+
+ if (!slot) {
+ gvt_err("no available execlist slot\n");
+ return -EINVAL;
+ }
+
+ memset(&status, 0, sizeof(status));
+ memset(slot->ctx, 0, sizeof(slot->ctx));
+
+ slot->ctx[0] = ctx[0];
+ slot->ctx[1] = ctx[1];
+
+ gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
+ slot->index, ctx[0].context_id,
+ ctx[1].context_id);
+
+ /*
+ * no running execlist, make this write bundle as running execlist
+ * -> idle-to-active
+ */
+ if (!running) {
+ gvt_dbg_el("no current running execlist\n");
+
+ execlist->running_slot = slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = &slot->ctx[0];
+
+ gvt_dbg_el("running slot index %d running context %x\n",
+ execlist->running_slot->index,
+ execlist->running_context->context_id);
+
+ emulate_execlist_status(execlist);
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ return 0;
+ }
+
+ ctx0 = &running->ctx[0];
+ ctx1 = &running->ctx[1];
+
+ gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
+ running->index, ctx0->context_id, ctx1->context_id);
+
+ /*
+ * already has an running execlist
+ * a. running ctx1 is valid,
+ * ctx0 is finished, and running ctx1 == new execlist ctx[0]
+ * b. running ctx1 is not valid,
+ * ctx0 == new execlist ctx[0]
+ * ----> lite-restore + preempted
+ */
+ if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
+ /* condition a */
+ (!same_context(ctx0, execlist->running_context))) ||
+ (!valid_context(ctx1) &&
+ same_context(ctx0, &slot->ctx[0]))) { /* condition b */
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ execlist->pending_slot = slot;
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.lite_restore = status.preempted = 1;
+ status.context_id = ctx[0].context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ gvt_dbg_el("emulate as pending slot\n");
+ /*
+ * otherwise
+ * --> emulate pending execlist exist + but no preemption case
+ */
+ execlist->pending_slot = slot;
+ emulate_execlist_status(execlist);
+ }
+ return 0;
+}
+
+static void free_workload(struct intel_vgpu_workload *workload)
+{
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_gvt_mm_unreference(workload->shadow_mm);
+ kmem_cache_free(workload->vgpu->workloads, workload);
+}
+
+#define get_desc_from_elsp_dwords(ed, i) \
+ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
+
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
+ unsigned long add, int gmadr_bytes)
+{
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return -1;
+
+ *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
+ BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 8) {
+ *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
+ add & BATCH_BUFFER_ADDR_HIGH_MASK;
+ }
+
+ return 0;
+}
+
+static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ /* pin the gem object to ggtt */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ struct i915_vma *vma;
+
+ vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
+ 4, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ /* update the relocate gma with shadow batch buffer*/
+ set_gma_to_bb_cmd(entry_obj,
+ i915_ggtt_offset(vma),
+ gmadr_bytes);
+ }
+ }
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ring_id = wa_ctx->workload->ring_id;
+ struct i915_gem_context *shadow_ctx =
+ wa_ctx->workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+ shadow_ring_context->bb_per_ctx_ptr.val =
+ (shadow_ring_context->bb_per_ctx_ptr.val &
+ (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+ shadow_ring_context->rcs_indirect_ctx.val =
+ (shadow_ring_context->rcs_indirect_ctx.val &
+ (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+}
+
+static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct i915_vma *vma;
+ unsigned char *per_ctx_va =
+ (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin indirect ctx obj\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+ wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+ memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+ update_wa_ctx_2_shadow_ctx(wa_ctx);
+}
+
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+
+ intel_vgpu_pin_mm(workload->shadow_mm);
+ intel_vgpu_sync_oos_pages(workload->vgpu);
+ intel_vgpu_flush_post_shadow(workload->vgpu);
+ prepare_shadow_batch_buffer(workload);
+ prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (!workload->emulate_schedule_in)
+ return 0;
+
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ /* release all the shadow batch buffer */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ i915_gem_object_unpin_map(entry_obj->obj);
+ i915_gem_object_put(entry_obj->obj);
+ list_del(&entry_obj->list);
+ kfree(entry_obj);
+ }
+ }
+}
+
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
+static int complete_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_execlist *execlist =
+ &vgpu->execlist[workload->ring_id];
+ struct intel_vgpu_workload *next_workload;
+ struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ bool lite_restore = false;
+ int ret;
+
+ gvt_dbg_el("complete workload %p status %d\n", workload,
+ workload->status);
+
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+
+ if (workload->status || vgpu->resetting)
+ goto out;
+
+ if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ struct execlist_ctx_descriptor_format *this_desc, *next_desc;
+
+ next_workload = container_of(next,
+ struct intel_vgpu_workload, list);
+ this_desc = &workload->ctx_desc;
+ next_desc = &next_workload->ctx_desc;
+
+ lite_restore = same_context(this_desc, next_desc);
+ }
+
+ if (lite_restore) {
+ gvt_dbg_el("next context == current - no schedule-out\n");
+ free_workload(workload);
+ return 0;
+ }
+
+ ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
+ if (ret)
+ goto err;
+out:
+ free_workload(workload);
+ return 0;
+err:
+ free_workload(workload);
+ return ret;
+}
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+ struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+ struct intel_vgpu_mm *mm;
+ int page_table_level;
+ u32 pdp[8];
+
+ if (desc->addressing_mode == 1) { /* legacy 32-bit */
+ page_table_level = 3;
+ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+ page_table_level = 4;
+ } else {
+ gvt_err("Advanced Context mode(SVM) is not supported!\n");
+ return -EINVAL;
+ }
+
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+ mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+
+ mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm object.\n");
+ return PTR_ERR(mm);
+ }
+ }
+ workload->shadow_mm = mm;
+ return 0;
+}
+
+#define get_last_workload(q) \
+ (list_empty(q) ? NULL : container_of(q->prev, \
+ struct intel_vgpu_workload, list))
+
+static int submit_context(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
+{
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *workload = NULL;
+ u64 ring_context_gpa;
+ u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ int ret;
+
+ ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
+ if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+ return -EINVAL;
+ }
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &head, 4);
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+ if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+ workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
+ if (!workload)
+ return -ENOMEM;
+
+ /* record some ring buffer register values for scan and shadow */
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_start.val), &start, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->vgpu = vgpu;
+ workload->ring_id = ring_id;
+ workload->ctx_desc = *desc;
+ workload->ring_context_gpa = ring_context_gpa;
+ workload->rb_head = head;
+ workload->rb_tail = tail;
+ workload->rb_start = start;
+ workload->rb_ctl = ctl;
+ workload->prepare = prepare_execlist_workload;
+ workload->complete = complete_execlist_workload;
+ workload->status = -EINPROGRESS;
+ workload->emulate_schedule_in = emulate_schedule_in;
+
+ if (ring_id == RCS) {
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+ workload->wa_ctx.indirect_ctx.guest_gma =
+ indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+ workload->wa_ctx.indirect_ctx.size =
+ (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+ CACHELINE_BYTES;
+ workload->wa_ctx.per_ctx.guest_gma =
+ per_ctx & PER_CTX_ADDR_MASK;
+ workload->wa_ctx.workload = workload;
+
+ WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+ }
+
+ if (emulate_schedule_in)
+ memcpy(&workload->elsp_dwords,
+ &vgpu->execlist[ring_id].elsp_dwords,
+ sizeof(workload->elsp_dwords));
+
+ gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+ workload, ring_id, head, tail, start, ctl);
+
+ gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
+ emulate_schedule_in);
+
+ ret = prepare_mm(workload);
+ if (ret) {
+ kmem_cache_free(vgpu->workloads, workload);
+ return ret;
+ }
+
+ queue_workload(workload);
+ return 0;
+}
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
+ unsigned long valid_desc_bitmap = 0;
+ bool emulate_schedule_in = true;
+ int ret;
+ int i;
+
+ memset(valid_desc, 0, sizeof(valid_desc));
+
+ desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
+ desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (!desc[i]->valid)
+ continue;
+
+ if (!desc[i]->privilege_access) {
+ gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* TODO: add another guest context checks here. */
+ set_bit(i, &valid_desc_bitmap);
+ valid_desc[i] = *desc[i];
+ }
+
+ if (!valid_desc_bitmap) {
+ gvt_err("vgpu%d: no valid desc in a elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ if (!test_bit(0, (void *)&valid_desc_bitmap) &&
+ test_bit(1, (void *)&valid_desc_bitmap)) {
+ gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* submit workload */
+ for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
+ ret = submit_context(vgpu, ring_id, &valid_desc[i],
+ emulate_schedule_in);
+ if (ret) {
+ gvt_err("vgpu%d: fail to schedule workload\n",
+ vgpu->id);
+ return ret;
+ }
+ emulate_schedule_in = false;
+ }
+ return 0;
+}
+
+static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 ctx_status_ptr_reg;
+
+ memset(execlist, 0, sizeof(*execlist));
+
+ execlist->vgpu = vgpu;
+ execlist->ring_id = ring_id;
+ execlist->slot[0].index = 0;
+ execlist->slot[1].index = 1;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+ ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+}
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
+{
+ kmem_cache_destroy(vgpu->workloads);
+}
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
+{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* each ring has a virtual execlist engine */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ init_vgpu_execlist(vgpu, i);
+ INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+ }
+
+ vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+ sizeof(struct intel_vgpu_workload), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!vgpu->workloads)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
+
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ /* free the unsubmited workload in the queue */
+ list_for_each_entry_safe(pos, n,
+ &vgpu->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ free_workload(pos);
+ }
+
+ init_vgpu_execlist(vgpu, engine->id);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
new file mode 100644
index 000000000000..7eced40a1e30
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EXECLIST_H_
+#define _GVT_EXECLIST_H_
+
+struct execlist_ctx_descriptor_format {
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+ union {
+ u32 ldw;
+ struct {
+ u32 valid : 1;
+ u32 force_pd_restore : 1;
+ u32 force_restore : 1;
+ u32 addressing_mode : 2;
+ u32 llc_coherency : 1;
+ u32 fault_handling : 2;
+ u32 privilege_access : 1;
+ u32 reserved : 3;
+ u32 lrca : 20;
+ };
+ };
+};
+
+struct execlist_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 current_execlist_pointer :1;
+ u32 execlist_write_pointer :1;
+ u32 execlist_queue_full :1;
+ u32 execlist_1_valid :1;
+ u32 execlist_0_valid :1;
+ u32 last_ctx_switch_reason :9;
+ u32 current_active_elm_status :2;
+ u32 arbitration_enable :1;
+ u32 execlist_1_active :1;
+ u32 execlist_0_active :1;
+ u32 reserved :13;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_context_status_pointer_format {
+ union {
+ u32 dw;
+ struct {
+ u32 write_ptr :3;
+ u32 reserved :5;
+ u32 read_ptr :3;
+ u32 reserved2 :5;
+ u32 mask :16;
+ };
+ };
+};
+
+struct execlist_context_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 idle_to_active :1;
+ u32 preempted :1;
+ u32 element_switch :1;
+ u32 active_to_idle :1;
+ u32 context_complete :1;
+ u32 wait_on_sync_flip :1;
+ u32 wait_on_vblank :1;
+ u32 wait_on_semaphore :1;
+ u32 wait_on_scanline :1;
+ u32 reserved :2;
+ u32 semaphore_wait_mode :1;
+ u32 display_plane :3;
+ u32 lite_restore :1;
+ u32 reserved_2 :16;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_mmio_pair {
+ u32 addr;
+ u32 val;
+};
+
+/* The first 52 dwords in register state context */
+struct execlist_ring_context {
+ u32 nop1;
+ u32 lri_cmd_1;
+ struct execlist_mmio_pair ctx_ctrl;
+ struct execlist_mmio_pair ring_header;
+ struct execlist_mmio_pair ring_tail;
+ struct execlist_mmio_pair rb_start;
+ struct execlist_mmio_pair rb_ctrl;
+ struct execlist_mmio_pair bb_cur_head_UDW;
+ struct execlist_mmio_pair bb_cur_head_LDW;
+ struct execlist_mmio_pair bb_state;
+ struct execlist_mmio_pair second_bb_addr_UDW;
+ struct execlist_mmio_pair second_bb_addr_LDW;
+ struct execlist_mmio_pair second_bb_state;
+ struct execlist_mmio_pair bb_per_ctx_ptr;
+ struct execlist_mmio_pair rcs_indirect_ctx;
+ struct execlist_mmio_pair rcs_indirect_ctx_offset;
+ u32 nop2;
+ u32 nop3;
+ u32 nop4;
+ u32 lri_cmd_2;
+ struct execlist_mmio_pair ctx_timestamp;
+ struct execlist_mmio_pair pdp3_UDW;
+ struct execlist_mmio_pair pdp3_LDW;
+ struct execlist_mmio_pair pdp2_UDW;
+ struct execlist_mmio_pair pdp2_LDW;
+ struct execlist_mmio_pair pdp1_UDW;
+ struct execlist_mmio_pair pdp1_LDW;
+ struct execlist_mmio_pair pdp0_UDW;
+ struct execlist_mmio_pair pdp0_LDW;
+};
+
+struct intel_vgpu_elsp_dwords {
+ u32 data[4];
+ u32 index;
+};
+
+struct intel_vgpu_execlist_slot {
+ struct execlist_ctx_descriptor_format ctx[2];
+ u32 index;
+};
+
+struct intel_vgpu_execlist {
+ struct intel_vgpu_execlist_slot slot[2];
+ struct intel_vgpu_execlist_slot *running_slot;
+ struct intel_vgpu_execlist_slot *pending_slot;
+ struct execlist_ctx_descriptor_format *running_context;
+ int ring_id;
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+};
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
+#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
new file mode 100644
index 000000000000..2fae2a2ca96f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Changbin Du <changbin.du@intel.com>
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/crc32.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+#define FIRMWARE_VERSION (0x0)
+
+struct gvt_firmware_header {
+ u64 magic;
+ u32 crc32; /* protect the data after this field */
+ u32 version;
+ u64 cfg_space_size;
+ u64 cfg_space_offset; /* offset in the file */
+ u64 mmio_size;
+ u64 mmio_offset; /* offset in the file */
+ unsigned char data[1];
+};
+
+#define RD(offset) (readl(mmio + offset.reg))
+#define WR(v, offset) (writel(v, mmio + offset.reg))
+
+static void bdw_forcewake_get(void __iomem *mmio)
+{
+ WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
+
+ RD(ECOBUS);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
+ gvt_err("fail to wait forcewake idle\n");
+
+ WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
+ gvt_err("fail to wait forcewake ack\n");
+
+ if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
+ gvt_err("fail to wait c0 wake up\n");
+}
+
+#undef RD
+#undef WR
+
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+static ssize_t
+gvt_firmware_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ memcpy(buf, attr->private + offset, count);
+ return count;
+}
+
+static struct bin_attribute firmware_attr = {
+ .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
+ .read = gvt_firmware_read,
+ .write = NULL,
+ .mmap = NULL,
+};
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt,
+ void __iomem *mmio)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct intel_gvt_mmio_info *e;
+ struct gvt_firmware_header *h;
+ void *firmware;
+ void *p;
+ unsigned long size;
+ int i;
+ int ret;
+
+ size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+ firmware = vmalloc(size);
+ if (!firmware)
+ return -ENOMEM;
+
+ h = firmware;
+
+ h->magic = VGT_MAGIC;
+ h->version = FIRMWARE_VERSION;
+ h->cfg_space_size = info->cfg_space_size;
+ h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
+ h->mmio_size = info->mmio_size;
+ h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
+
+ p = firmware + h->cfg_space_offset;
+
+ for (i = 0; i < h->cfg_space_size; i += 4)
+ pci_read_config_dword(pdev, i, p + i);
+
+ memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
+
+ p = firmware + h->mmio_offset;
+
+ hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+ int j;
+
+ for (j = 0; j < e->length; j += 4)
+ *(u32 *)(p + e->offset + j) =
+ readl(mmio + e->offset + j);
+ }
+
+ memcpy(gvt->firmware.mmio, p, info->mmio_size);
+
+ firmware_attr.size = size;
+ firmware_attr.private = firmware;
+
+ ret = device_create_bin_file(&pdev->dev, &firmware_attr);
+ if (ret) {
+ vfree(firmware);
+ return ret;
+ }
+ return 0;
+}
+
+static void clean_firmware_sysfs(struct intel_gvt *gvt)
+{
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+ device_remove_bin_file(&pdev->dev, &firmware_attr);
+ vfree(firmware_attr.private);
+}
+
+/**
+ * intel_gvt_free_firmware - free GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+void intel_gvt_free_firmware(struct intel_gvt *gvt)
+{
+ if (!gvt->firmware.firmware_loaded)
+ clean_firmware_sysfs(gvt);
+
+ kfree(gvt->firmware.cfg_space);
+ kfree(gvt->firmware.mmio);
+}
+
+static int verify_firmware(struct intel_gvt *gvt,
+ const struct firmware *fw)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct gvt_firmware_header *h;
+ unsigned long id, crc32_start;
+ const void *mem;
+ const char *item;
+ u64 file, request;
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ mem = fw->data + crc32_start;
+
+#define VERIFY(s, a, b) do { \
+ item = (s); file = (u64)(a); request = (u64)(b); \
+ if ((a) != (b)) \
+ goto invalid_firmware; \
+} while (0)
+
+ VERIFY("magic number", h->magic, VGT_MAGIC);
+ VERIFY("version", h->version, FIRMWARE_VERSION);
+ VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
+ VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
+ VERIFY("mmio size", h->mmio_size, info->mmio_size);
+
+ mem = (fw->data + h->cfg_space_offset);
+
+ id = *(u16 *)(mem + PCI_VENDOR_ID);
+ VERIFY("vender id", id, pdev->vendor);
+
+ id = *(u16 *)(mem + PCI_DEVICE_ID);
+ VERIFY("device id", id, pdev->device);
+
+ id = *(u8 *)(mem + PCI_REVISION_ID);
+ VERIFY("revision id", id, pdev->revision);
+
+#undef VERIFY
+ return 0;
+
+invalid_firmware:
+ gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
+ item, file, request);
+ return -EINVAL;
+}
+
+#define GVT_FIRMWARE_PATH "i915/gvt"
+
+/**
+ * intel_gvt_load_firmware - load GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+int intel_gvt_load_firmware(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct intel_gvt_firmware *firmware = &gvt->firmware;
+ struct gvt_firmware_header *h;
+ const struct firmware *fw;
+ char *path;
+ void __iomem *mmio;
+ void *mem;
+ int ret;
+
+ path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ return -ENOMEM;
+ }
+
+ firmware->cfg_space = mem;
+
+ mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ return -ENOMEM;
+ }
+
+ firmware->mmio = mem;
+
+ mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
+ if (!mmio) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ kfree(firmware->mmio);
+ return -EINVAL;
+ }
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
+ bdw_forcewake_get(mmio);
+
+ sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+ GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
+ pdev->revision);
+
+ gvt_dbg_core("request hw state firmware %s...\n", path);
+
+ ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ kfree(path);
+
+ if (ret)
+ goto expose_firmware;
+
+ gvt_dbg_core("success.\n");
+
+ ret = verify_firmware(gvt, fw);
+ if (ret)
+ goto out_free_fw;
+
+ gvt_dbg_core("verified.\n");
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
+ h->cfg_space_size);
+ memcpy(firmware->mmio, fw->data + h->mmio_offset,
+ h->mmio_size);
+
+ release_firmware(fw);
+ firmware->firmware_loaded = true;
+ pci_iounmap(pdev, mmio);
+ return 0;
+
+out_free_fw:
+ release_firmware(fw);
+expose_firmware:
+ expose_firmware_sysfs(gvt, mmio);
+ pci_iounmap(pdev, mmio);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
new file mode 100644
index 000000000000..7eaaf1c9ed2b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -0,0 +1,2244 @@
+/*
+ * GTT virtualization
+ *
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+static bool enable_out_of_sync = false;
+static int preallocated_oos_pages = 8192;
+
+/*
+ * validate a gm address and related range size,
+ * translate it to host gm address
+ */
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
+{
+ if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
+ && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
+ gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
+ vgpu->id, addr, size);
+ return false;
+ }
+ return true;
+}
+
+/* translate a guest gmadr to host gmadr */
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
+{
+ if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
+ return -EACCES;
+
+ if (vgpu_gmadr_is_aperture(vgpu, g_addr))
+ *h_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (g_addr - vgpu_aperture_offset(vgpu));
+ else
+ *h_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (g_addr - vgpu_hidden_offset(vgpu));
+ return 0;
+}
+
+/* translate a host gmadr to guest gmadr */
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
+{
+ if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
+ return -EACCES;
+
+ if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
+ *g_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
+ else
+ *g_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
+ return 0;
+}
+
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index)
+{
+ u64 h_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+ &h_addr);
+ if (ret)
+ return ret;
+
+ *h_index = h_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index)
+{
+ u64 g_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+ &g_addr);
+ if (ret)
+ return ret;
+
+ *g_index = g_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+#define gtt_type_is_entry(type) \
+ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
+ && type != GTT_TYPE_PPGTT_PTE_ENTRY \
+ && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_type_is_pt(type) \
+ (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
+
+#define gtt_type_is_pte_pt(type) \
+ (type == GTT_TYPE_PPGTT_PTE_PT)
+
+#define gtt_type_is_root_pointer(type) \
+ (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_init_entry(e, t, p, v) do { \
+ (e)->type = t; \
+ (e)->pdev = p; \
+ memcpy(&(e)->val64, &v, sizeof(v)); \
+} while (0)
+
+/*
+ * Mappings between GTT_TYPE* enumerations.
+ * Following information can be found according to the given type:
+ * - type of next level page table
+ * - type of entry inside this level page table
+ * - type of entry with PSE set
+ *
+ * If the given type doesn't have such a kind of information,
+ * e.g. give a l4 root entry type, then request to get its PSE type,
+ * give a PTE page table type, then request to get its next level page
+ * table type, as we know l4 root entry doesn't have a PSE bit,
+ * and a PTE page table doesn't have a next level page table type,
+ * GTT_TYPE_INVALID will be returned. This is useful when traversing a
+ * page table.
+ */
+
+struct gtt_type_table_entry {
+ int entry_type;
+ int next_pt_type;
+ int pse_entry_type;
+};
+
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+ [type] = { \
+ .entry_type = e_type, \
+ .next_pt_type = npt_type, \
+ .pse_entry_type = pse_type, \
+ }
+
+static struct gtt_type_table_entry gtt_type_table[] = {
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+};
+
+static inline int get_next_pt_type(int type)
+{
+ return gtt_type_table[type].next_pt_type;
+}
+
+static inline int get_entry_type(int type)
+{
+ return gtt_type_table[type].entry_type;
+}
+
+static inline int get_pse_type(int type)
+{
+ return gtt_type_table[type].pse_entry_type;
+}
+
+static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ u64 pte;
+
+#ifdef readq
+ pte = readq(addr);
+#else
+ pte = ioread32(addr);
+ pte |= (u64)ioread32(addr + 4) << 32;
+#endif
+ return pte;
+}
+
+static void write_pte64(struct drm_i915_private *dev_priv,
+ unsigned long index, u64 pte)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+
+#ifdef writeq
+ writeq(pte, addr);
+#else
+ iowrite32((u32)pte, addr);
+ iowrite32(pte >> 32, addr + 4);
+#endif
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ } else {
+ e->val64 = *((u64 *)pt + index);
+ }
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ } else {
+ *((u64 *)pt + index) = e->val64;
+ }
+ return e;
+}
+
+#define GTT_HAW 46
+
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+
+static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
+{
+ unsigned long pfn;
+
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
+ pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
+ pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+ else
+ pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+ return pfn;
+}
+
+static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
+{
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ e->val64 &= ~ADDR_1G_MASK;
+ pfn &= (ADDR_1G_MASK >> 12);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
+ e->val64 &= ~ADDR_2M_MASK;
+ pfn &= (ADDR_2M_MASK >> 12);
+ } else {
+ e->val64 &= ~ADDR_4K_MASK;
+ pfn &= (ADDR_4K_MASK >> 12);
+ }
+
+ e->val64 |= (pfn << 12);
+}
+
+static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
+{
+ /* Entry doesn't have PSE bit. */
+ if (get_pse_type(e->type) == GTT_TYPE_INVALID)
+ return false;
+
+ e->type = get_entry_type(e->type);
+ if (!(e->val64 & (1 << 7)))
+ return false;
+
+ e->type = get_pse_type(e->type);
+ return true;
+}
+
+static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
+{
+ /*
+ * i915 writes PDP root pointer registers without present bit,
+ * it also works, so we need to treat root pointer entry
+ * specifically.
+ */
+ if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+ return (e->val64 != 0);
+ else
+ return (e->val64 & (1 << 0));
+}
+
+static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~(1 << 0);
+}
+
+/*
+ * Per-platform GMA routines.
+ */
+static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
+{
+ unsigned long x = (gma >> GTT_PAGE_SHIFT);
+
+ trace_gma_index(__func__, gma, x);
+ return x;
+}
+
+#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
+static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
+{ \
+ unsigned long x = (exp); \
+ trace_gma_index(__func__, gma, x); \
+ return x; \
+}
+
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
+
+static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
+ .get_entry = gtt_get_entry64,
+ .set_entry = gtt_set_entry64,
+ .clear_present = gtt_entry_clear_present,
+ .test_present = gen8_gtt_test_present,
+ .test_pse = gen8_gtt_test_pse,
+ .get_pfn = gen8_gtt_get_pfn,
+ .set_pfn = gen8_gtt_set_pfn,
+};
+
+static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
+ .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
+ .gma_to_pte_index = gen8_gma_to_pte_index,
+ .gma_to_pde_index = gen8_gma_to_pde_index,
+ .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
+ .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
+ .gma_to_pml4_index = gen8_gma_to_pml4_index,
+};
+
+static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
+ struct intel_gvt_gtt_entry *m)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long gfn, mfn;
+
+ *m = *p;
+
+ if (!ops->test_present(p))
+ return 0;
+
+ gfn = ops->get_pfn(p);
+
+ mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+ return -ENXIO;
+ }
+
+ ops->set_pfn(m, mfn);
+ return 0;
+}
+
+/*
+ * MM helpers.
+ */
+struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = mm->page_table_entry_type;
+
+ ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+}
+
+/*
+ * PPGTT shadow page table helpers.
+ */
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = get_entry_type(type);
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ ops->get_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ return ops->set_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+}
+
+#define ppgtt_get_guest_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_set_guest_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_get_shadow_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+#define ppgtt_set_shadow_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+/**
+ * intel_vgpu_init_guest_page - init a guest page data structure
+ * @vgpu: a vGPU
+ * @p: a guest page data structure
+ * @gfn: guest memory page frame number
+ * @handler: function will be called when target guest memory page has
+ * been modified.
+ *
+ * This function is called when user wants to track a guest memory page.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p,
+ unsigned long gfn,
+ int (*handler)(void *, u64, void *, int),
+ void *data)
+{
+ INIT_HLIST_NODE(&p->node);
+
+ p->writeprotection = false;
+ p->gfn = gfn;
+ p->handler = handler;
+ p->data = data;
+ p->oos_page = NULL;
+ p->write_cnt = 0;
+
+ hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+/**
+ * intel_vgpu_clean_guest_page - release the resource owned by guest page data
+ * structure
+ * @vgpu: a vGPU
+ * @p: a tracked guest page
+ *
+ * This function is called when user tries to stop tracking a guest memory
+ * page.
+ */
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+
+ if (p->oos_page)
+ detach_oos_page(vgpu, p->oos_page);
+
+ if (p->writeprotection)
+ intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+}
+
+/**
+ * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * @vgpu: a vGPU
+ * @gfn: guest memory page frame number
+ *
+ * This function is called when emulation logic wants to know if a trapped GFN
+ * is a tracked guest page.
+ *
+ * Returns:
+ * Pointer to guest page data structure, NULL if failed.
+ */
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_guest_page *p;
+
+ hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
+ p, node, gfn) {
+ if (p->gfn == gfn)
+ return p;
+ }
+ return NULL;
+}
+
+static inline int init_shadow_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_shadow_page *p, int type)
+{
+ p->vaddr = page_address(p->page);
+ p->type = type;
+
+ INIT_HLIST_NODE(&p->node);
+
+ p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
+ if (p->mfn == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
+
+ hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+ return 0;
+}
+
+static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+}
+
+static inline struct intel_vgpu_shadow_page *find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p;
+
+ hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
+ p, node, mfn) {
+ if (p->mfn == mfn)
+ return p;
+ }
+ return NULL;
+}
+
+#define guest_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
+
+#define shadow_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
+
+static void *alloc_spt(gfp_t gfp_mask)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+
+ spt = kzalloc(sizeof(*spt), gfp_mask);
+ if (!spt)
+ return NULL;
+
+ spt->shadow_page.page = alloc_page(gfp_mask);
+ if (!spt->shadow_page.page) {
+ kfree(spt);
+ return NULL;
+ }
+ return spt;
+}
+
+static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ __free_page(spt->shadow_page.page);
+ kfree(spt);
+}
+
+static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+
+ clean_shadow_page(&spt->shadow_page);
+ intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+ list_del_init(&spt->post_shadow_list);
+
+ free_spt(spt);
+}
+
+static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+{
+ struct hlist_node *n;
+ struct intel_vgpu_shadow_page *sp;
+ int i;
+
+ hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
+ ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes);
+
+static int ppgtt_write_protection_handler(void *gp, u64 pa,
+ void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ if (!gpt->writeprotection)
+ return -EINVAL;
+
+ ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+ pa, p_data, bytes);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+ struct intel_vgpu *vgpu, int type, unsigned long gfn)
+{
+ struct intel_vgpu_ppgtt_spt *spt = NULL;
+ int ret;
+
+retry:
+ spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
+ if (!spt) {
+ if (reclaim_one_mm(vgpu->gvt))
+ goto retry;
+
+ gvt_err("fail to allocate ppgtt shadow page\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spt->vgpu = vgpu;
+ spt->guest_page_type = type;
+ atomic_set(&spt->refcount, 1);
+ INIT_LIST_HEAD(&spt->post_shadow_list);
+
+ /*
+ * TODO: guest page type may be different with shadow page type,
+ * when we support PSE page in future.
+ */
+ ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+ if (ret) {
+ gvt_err("fail to initialize shadow page for spt\n");
+ goto err;
+ }
+
+ ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+ gfn, ppgtt_write_protection_handler, NULL);
+ if (ret) {
+ gvt_err("fail to initialize guest page for spt\n");
+ goto err;
+ }
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+ return spt;
+err:
+ ppgtt_free_shadow_page(spt);
+ return ERR_PTR(ret);
+}
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+
+ if (p)
+ return shadow_page_to_ppgtt_spt(p);
+
+ gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
+ vgpu->id, mfn);
+ return NULL;
+}
+
+#define pt_entry_size_shift(spt) \
+ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
+
+#define pt_entries(spt) \
+ (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+
+#define for_each_present_guest_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_guest_entry(spt, e, i)))
+
+#define for_each_present_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_shadow_entry(spt, e, i)))
+
+static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
+
+ atomic_inc(&spt->refcount);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *e)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+ intel_gvt_gtt_type_t cur_pt_type;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
+ return -EINVAL;
+
+ if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ cur_pt_type = get_next_pt_type(e->type) + 1;
+ if (ops->get_pfn(e) ==
+ vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
+ return 0;
+ }
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s) {
+ gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
+ vgpu->id, ops->get_pfn(e));
+ return -ENXIO;
+ }
+ return ppgtt_invalidate_shadow_page(s);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_gvt_gtt_entry e;
+ unsigned long index;
+ int ret;
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_change(spt->vgpu->id, "die", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+
+ if (atomic_dec_return(&spt->refcount) > 0)
+ return 0;
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type))
+ goto release;
+
+ for_each_present_shadow_entry(spt, &e, index) {
+ if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
+ gvt_err("GVT doesn't support pse bit for now\n");
+ return -EINVAL;
+ }
+ ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
+ spt->vgpu, &e);
+ if (ret)
+ goto fail;
+ }
+release:
+ trace_spt_change(spt->vgpu->id, "release", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+ ppgtt_free_shadow_page(spt);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
+ spt->vgpu->id, spt, e.val64, e.type);
+ return ret;
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+ struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s = NULL;
+ struct intel_vgpu_guest_page *g;
+ int ret;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
+ if (g) {
+ s = guest_page_to_ppgtt_spt(g);
+ ppgtt_get_shadow_page(s);
+ } else {
+ int type = get_next_pt_type(we->type);
+
+ s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+ if (ret)
+ goto fail;
+
+ ret = ppgtt_populate_shadow_page(s);
+ if (ret)
+ goto fail;
+
+ trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+ s->shadow_page.type);
+ }
+ return s;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, s, we->val64, we->type);
+ return ERR_PTR(ret);
+}
+
+static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
+ struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
+{
+ struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
+
+ se->type = ge->type;
+ se->val64 = ge->val64;
+
+ ops->set_pfn(se, s->shadow_page.mfn);
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_vgpu_ppgtt_spt *s;
+ struct intel_gvt_gtt_entry se, ge;
+ unsigned long i;
+ int ret;
+
+ trace_spt_change(spt->vgpu->id, "born", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
+ for_each_present_guest_entry(spt, &ge, i) {
+ ret = gtt_entry_p2m(vgpu, &ge, &se);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+ }
+
+ for_each_present_guest_entry(spt, &ge, i) {
+ if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
+ gvt_err("GVT doesn't support pse bit now\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &se, i);
+ ppgtt_generate_shadow_entry(&se, s, &ge);
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, ge.val64, ge.type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+ unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry e;
+ int ret;
+
+ ppgtt_get_shadow_entry(spt, &e, index);
+
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
+ index);
+
+ if (!ops->test_present(&e))
+ return 0;
+
+ if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+ return 0;
+
+ if (gtt_type_is_pt(get_next_pt_type(e.type))) {
+ struct intel_vgpu_ppgtt_spt *s =
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
+ if (!s) {
+ gvt_err("fail to find guest page\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+ ret = ppgtt_invalidate_shadow_page(s);
+ if (ret)
+ goto fail;
+ }
+ ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &e, index);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, e.val64, e.type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_entry m;
+ struct intel_vgpu_ppgtt_spt *s;
+ int ret;
+
+ trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
+ we->val64, index);
+
+ if (gtt_type_is_pt(get_next_pt_type(we->type))) {
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &m, index);
+ ppgtt_generate_shadow_entry(&m, s, we);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ } else {
+ ret = gtt_entry_p2m(vgpu, we, &m);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
+ spt, we->val64, we->type);
+ return ret;
+}
+
+static int sync_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+ struct intel_gvt_gtt_entry old, new, m;
+ int index;
+ int ret;
+
+ trace_oos_change(vgpu->id, "sync", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ old.type = new.type = get_entry_type(spt->guest_page_type);
+ old.val64 = new.val64 = 0;
+
+ for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
+ index++) {
+ ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
+ ops->get_entry(NULL, &new, index, true,
+ oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+
+ if (old.val64 == new.val64
+ && !test_and_clear_bit(index, spt->post_shadow_bitmap))
+ continue;
+
+ trace_oos_sync(vgpu->id, oos_page->id,
+ oos_page->guest_page, spt->guest_page_type,
+ new.val64, index);
+
+ ret = gtt_entry_p2m(vgpu, &new, &m);
+ if (ret)
+ return ret;
+
+ ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+
+ oos_page->guest_page->write_cnt = 0;
+ list_del_init(&spt->post_shadow_list);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+
+ trace_oos_change(vgpu->id, "detach", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ oos_page->guest_page->write_cnt = 0;
+ oos_page->guest_page->oos_page = NULL;
+ oos_page->guest_page = NULL;
+
+ list_del_init(&oos_page->vm_list);
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
+
+ return 0;
+}
+
+static int attach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ret;
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
+ oos_page->mem, GTT_PAGE_SIZE);
+ if (ret)
+ return ret;
+
+ oos_page->guest_page = gpt;
+ gpt->oos_page = oos_page;
+
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
+
+ trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ return 0;
+}
+
+static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ int ret;
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+ if (ret)
+ return ret;
+
+ trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_del_init(&gpt->oos_page->vm_list);
+ return sync_oos_page(vgpu, gpt->oos_page);
+}
+
+static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ int ret;
+
+ WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
+
+ if (list_empty(&gtt->oos_page_free_list_head)) {
+ oos_page = container_of(gtt->oos_page_use_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ ret = detach_oos_page(vgpu, oos_page);
+ if (ret)
+ return ret;
+ } else
+ oos_page = container_of(gtt->oos_page_free_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ return attach_oos_page(vgpu, oos_page, gpt);
+}
+
+static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+
+ if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
+ return -EINVAL;
+
+ trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
+ return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+}
+
+/**
+ * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to sync all the out-of-synced shadow for vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+ int ret;
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
+ oos_page = container_of(pos,
+ struct intel_vgpu_oos_page, vm_list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * The heart of PPGTT shadow page table.
+ */
+static int ppgtt_handle_guest_write_page_table(
+ struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+
+ int ret;
+ int new_present;
+
+ new_present = ops->test_present(we);
+
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ goto fail;
+
+ if (new_present) {
+ ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
+ vgpu->id, spt, we->val64, we->type);
+ return ret;
+}
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+{
+ return enable_out_of_sync
+ && gtt_type_is_pte_pt(
+ guest_page_to_ppgtt_spt(gpt)->guest_page_type)
+ && gpt->write_cnt >= 2;
+}
+
+static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
+ unsigned long index)
+{
+ set_bit(index, spt->post_shadow_bitmap);
+ if (!list_empty(&spt->post_shadow_list))
+ return;
+
+ list_add_tail(&spt->post_shadow_list,
+ &spt->vgpu->gtt.post_shadow_list_head);
+}
+
+/**
+ * intel_vgpu_flush_post_shadow - flush the post shadow transactions
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to flush all the post shadows for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge;
+ unsigned long index;
+ int ret;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
+ spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
+ post_shadow_list);
+
+ for_each_set_bit(index, spt->post_shadow_bitmap,
+ GTT_ENTRY_NUM_IN_ONE_PAGE) {
+ ppgtt_get_guest_entry(spt, &ge, index);
+
+ ret = ppgtt_handle_guest_write_page_table(
+ &spt->guest_page, &ge, index);
+ if (ret)
+ return ret;
+ clear_bit(index, spt->post_shadow_bitmap);
+ }
+ list_del_init(&spt->post_shadow_list);
+ }
+ return 0;
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_entry we;
+ unsigned long index;
+ int ret;
+
+ index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
+
+ ppgtt_get_guest_entry(spt, &we, index);
+
+ ops->test_pse(&we);
+
+ if (bytes == info->gtt_entry_size) {
+ ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+ if (ret)
+ return ret;
+ } else {
+ if (!test_bit(index, spt->post_shadow_bitmap)) {
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ return ret;
+ }
+
+ ppgtt_set_post_shadow(spt, index);
+ }
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ gpt->write_cnt++;
+
+ if (gpt->oos_page)
+ ops->set_entry(gpt->oos_page->mem, &we, index,
+ false, 0, vgpu);
+
+ if (can_do_out_of_sync(gpt)) {
+ if (!gpt->oos_page)
+ ppgtt_allocate_oos_page(vgpu, gpt);
+
+ ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * mm page table allocation policy for bdw+
+ * - for ggtt, only virtual page table will be allocated.
+ * - for ppgtt, dedicated virtual/shadow page table will be allocated.
+ */
+static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ void *mem;
+
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ mm->page_table_entry_cnt = 4;
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = kzalloc(mm->has_shadow_page_table ?
+ mm->page_table_entry_size * 2
+ : mm->page_table_entry_size,
+ GFP_ATOMIC);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ if (!mm->has_shadow_page_table)
+ return 0;
+ mm->shadow_page_table = mem + mm->page_table_entry_size;
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ mm->page_table_entry_cnt =
+ (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = vzalloc(mm->page_table_entry_size);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ }
+ return 0;
+}
+
+static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
+{
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ kfree(mm->virtual_page_table);
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (mm->virtual_page_table)
+ vfree(mm->virtual_page_table);
+ }
+ mm->virtual_page_table = mm->shadow_page_table = NULL;
+}
+
+static void invalidate_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_gvt_gtt_entry se;
+ int i;
+
+ if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+ return;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_shadow_root_entry(mm, &se, i);
+ if (!ops->test_present(&se))
+ continue;
+ ppgtt_invalidate_shadow_page_by_shadow_entry(
+ vgpu, &se);
+ se.val64 = 0;
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "destroy root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ mm->shadowed = false;
+}
+
+/**
+ * intel_vgpu_destroy_mm - destroy a mm object
+ * @mm: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void intel_vgpu_destroy_mm(struct kref *mm_ref)
+{
+ struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+
+ if (!mm->initialized)
+ goto out;
+
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+
+ if (mm->has_shadow_page_table)
+ invalidate_mm(mm);
+
+ gtt->mm_free_page_table(mm);
+out:
+ kfree(mm);
+}
+
+static int shadow_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge, se;
+ int i;
+ int ret;
+
+ if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+ return 0;
+
+ mm->shadowed = true;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_guest_root_entry(mm, &ge, i);
+ if (!ops->test_present(&ge))
+ continue;
+
+ trace_gpt_change(vgpu->id, __func__, NULL,
+ ge.type, ge.val64, i);
+
+ spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(spt)) {
+ gvt_err("fail to populate guest root pointer\n");
+ ret = PTR_ERR(spt);
+ goto fail;
+ }
+ ppgtt_generate_shadow_entry(&se, spt, &ge);
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "populate root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ return 0;
+fail:
+ invalidate_mm(mm);
+ return ret;
+}
+
+/**
+ * intel_vgpu_create_mm - create a mm object for a vGPU
+ * @vgpu: a vGPU
+ * @mm_type: mm object type, should be PPGTT or GGTT
+ * @virtual_page_table: page table root pointers. Could be NULL if user wants
+ * to populate shadow later.
+ * @page_table_level: describe the page table level of the mm object
+ * @pde_base_index: pde root pointer base in GGTT MMIO.
+ *
+ * This function is used to create a mm object for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code in pointer if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_mm *mm;
+ int ret;
+
+ mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+ if (!mm) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mm->type = mm_type;
+
+ if (page_table_level == 1)
+ mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
+ else if (page_table_level == 3)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ else if (page_table_level == 4)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mm->page_table_level = page_table_level;
+ mm->pde_base_index = pde_base_index;
+
+ mm->vgpu = vgpu;
+ mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
+
+ kref_init(&mm->ref);
+ atomic_set(&mm->pincount, 0);
+ INIT_LIST_HEAD(&mm->list);
+ INIT_LIST_HEAD(&mm->lru_list);
+ list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+
+ ret = gtt->mm_alloc_page_table(mm);
+ if (ret) {
+ gvt_err("fail to allocate page table for mm\n");
+ goto fail;
+ }
+
+ mm->initialized = true;
+
+ if (virtual_page_table)
+ memcpy(mm->virtual_page_table, virtual_page_table,
+ mm->page_table_entry_size);
+
+ if (mm->has_shadow_page_table) {
+ ret = shadow_mm(mm);
+ if (ret)
+ goto fail;
+ list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+ }
+ return mm;
+fail:
+ gvt_err("fail to create mm\n");
+ if (mm)
+ intel_gvt_mm_unreference(mm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
+ * @mm: a vGPU mm object
+ *
+ * This function is called when user doesn't want to use a vGPU mm object
+ */
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
+{
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return;
+
+ atomic_dec(&mm->pincount);
+}
+
+/**
+ * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
+ * @vgpu: a vGPU
+ *
+ * This function is called when user wants to use a vGPU mm object. If this
+ * mm object hasn't been shadowed yet, the shadow will be populated at this
+ * time.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
+{
+ int ret;
+
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return 0;
+
+ atomic_inc(&mm->pincount);
+
+ if (!mm->shadowed) {
+ ret = shadow_mm(mm);
+ if (ret)
+ return ret;
+ }
+
+ list_del_init(&mm->lru_list);
+ list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt)
+{
+ struct intel_vgpu_mm *mm;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+ if (atomic_read(&mm->pincount))
+ continue;
+
+ list_del_init(&mm->lru_list);
+ invalidate_mm(mm);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * GMA translation APIs.
+ */
+static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+
+ if (WARN_ON(!mm->has_shadow_page_table))
+ return -EINVAL;
+
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s)
+ return -ENXIO;
+
+ if (!guest)
+ ppgtt_get_shadow_entry(s, e, index);
+ else
+ ppgtt_get_guest_entry(s, e, index);
+ return 0;
+}
+
+/**
+ * intel_vgpu_gma_to_gpa - translate a gma to GPA
+ * @mm: mm object. could be a PPGTT or GGTT mm object
+ * @gma: graphics memory address in this mm object
+ *
+ * This function is used to translate a graphics memory address in specific
+ * graphics memory space to guest physical address.
+ *
+ * Returns:
+ * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
+ unsigned long gpa = INTEL_GVT_INVALID_ADDR;
+ unsigned long gma_index[4];
+ struct intel_gvt_gtt_entry e;
+ int i, index;
+ int ret;
+
+ if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
+ return INTEL_GVT_INVALID_ADDR;
+
+ if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (!vgpu_gmadr_is_valid(vgpu, gma))
+ goto err;
+
+ ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
+ return gpa;
+ }
+
+ switch (mm->page_table_level) {
+ case 4:
+ ppgtt_get_shadow_root_entry(mm, &e, 0);
+ gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+ gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+ gma_index[2] = gma_ops->gma_to_pde_index(gma);
+ gma_index[3] = gma_ops->gma_to_pte_index(gma);
+ index = 4;
+ break;
+ case 3:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_l3_pdp_index(gma));
+ gma_index[0] = gma_ops->gma_to_pde_index(gma);
+ gma_index[1] = gma_ops->gma_to_pte_index(gma);
+ index = 2;
+ break;
+ case 2:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_pde_index(gma));
+ gma_index[0] = gma_ops->gma_to_pte_index(gma);
+ index = 1;
+ break;
+ default:
+ WARN_ON(1);
+ goto err;
+ }
+
+ /* walk into the shadow page table and get gpa from guest entry */
+ for (i = 0; i < index; i++) {
+ ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+ (i == index - 1));
+ if (ret)
+ goto err;
+ }
+
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ppgtt", 0,
+ mm->page_table_level, gma, gpa);
+ return gpa;
+err:
+ gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+ return INTEL_GVT_INVALID_ADDR;
+}
+
+static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ unsigned long index = off >> info->gtt_entry_size_shift;
+ struct intel_gvt_gtt_entry e;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ ggtt_get_guest_entry(ggtt_mm, &e, index);
+ memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
+ bytes);
+ return 0;
+}
+
+/**
+ * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data will be returned to guest
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register read
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
+ struct intel_gvt_gtt_entry e, m;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ gma = g_gtt_index << GTT_PAGE_SHIFT;
+
+ /* the VM may configure the whole GM space when ballooning is used */
+ if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
+ "vgpu%d: found oob ggtt write, offset %x\n",
+ vgpu->id, off)) {
+ return 0;
+ }
+
+ ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
+ bytes);
+
+ if (ops->test_present(&e)) {
+ ret = gtt_entry_p2m(vgpu, &e, &m);
+ if (ret) {
+ gvt_err("vgpu%d: fail to translate guest gtt entry\n",
+ vgpu->id);
+ return ret;
+ }
+ } else {
+ m = e;
+ m.val64 = 0;
+ }
+
+ ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+}
+
+/*
+ * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data from guest write
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register write
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int alloc_scratch_pages(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t type)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ int page_entry_num = GTT_PAGE_SIZE >>
+ vgpu->gvt->device_info.gtt_entry_size_shift;
+ struct page *scratch_pt;
+ unsigned long mfn;
+ int i;
+ void *p;
+
+ if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ return -EINVAL;
+
+ scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ if (!scratch_pt) {
+ gvt_err("fail to allocate scratch page\n");
+ return -ENOMEM;
+ }
+
+ p = kmap_atomic(scratch_pt);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
+ kunmap_atomic(p);
+ __free_page(scratch_pt);
+ return -EFAULT;
+ }
+ gtt->scratch_pt[type].page_mfn = mfn;
+ gtt->scratch_pt[type].page = scratch_pt;
+ gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
+ vgpu->id, type, mfn);
+
+ /* Build the tree by full filled the scratch pt with the entries which
+ * point to the next level scratch pt or scratch page. The
+ * scratch_pt[type] indicate the scratch pt/scratch page used by the
+ * 'type' pt.
+ * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
+ */
+ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ struct intel_gvt_gtt_entry se;
+
+ memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
+ se.type = get_entry_type(type - 1);
+ ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
+
+ /* The entry parameters like present/writeable/cache type
+ * set to the same as i915's scratch page tree.
+ */
+ se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+ if (type == GTT_TYPE_PPGTT_PDE_PT)
+ se.val64 |= PPAT_CACHED_INDEX;
+
+ for (i = 0; i < page_entry_num; i++)
+ ops->set_entry(p, &se, i, false, 0, vgpu);
+ }
+
+ kunmap_atomic(p);
+
+ return 0;
+}
+
+static int release_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL) {
+ __free_page(vgpu->gtt.scratch_pt[i].page);
+ vgpu->gtt.scratch_pt[i].page = NULL;
+ vgpu->gtt.scratch_pt[i].page_mfn = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int create_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i, ret;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ ret = alloc_scratch_pages(vgpu, i);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ release_scratch_page_tree(vgpu);
+ return ret;
+}
+
+/**
+ * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct intel_vgpu_mm *ggtt_mm;
+
+ hash_init(gtt->guest_page_hash_table);
+ hash_init(gtt->shadow_page_hash_table);
+
+ INIT_LIST_HEAD(&gtt->mm_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_list_head);
+ INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+
+ ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
+ NULL, 1, 0);
+ if (IS_ERR(ggtt_mm)) {
+ gvt_err("fail to create mm for ggtt.\n");
+ return PTR_ERR(ggtt_mm);
+ }
+
+ gtt->ggtt_mm = ggtt_mm;
+
+ return create_scratch_page_tree(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean up per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ ppgtt_free_all_shadow_page(vgpu);
+ release_scratch_page_tree(vgpu);
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ vgpu->gvt->gtt.mm_free_page_table(mm);
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+ kfree(mm);
+ }
+}
+
+static void clean_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+
+ WARN(!list_empty(&gtt->oos_page_use_list_head),
+ "someone is still using oos page\n");
+
+ list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
+ oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
+ list_del(&oos_page->list);
+ kfree(oos_page);
+ }
+}
+
+static int setup_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page;
+ int i;
+ int ret;
+
+ INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
+
+ for (i = 0; i < preallocated_oos_pages; i++) {
+ oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
+ if (!oos_page) {
+ gvt_err("fail to pre-allocate oos page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&oos_page->list);
+ INIT_LIST_HEAD(&oos_page->vm_list);
+ oos_page->id = i;
+ list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
+ }
+
+ gvt_dbg_mm("%d oos pages preallocated\n", i);
+
+ return 0;
+fail:
+ clean_spt_oos(gvt);
+ return ret;
+}
+
+/**
+ * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ * @root_entry: PPGTT page table root pointers
+ *
+ * This function is used to find a PPGTT mm object from mm object pool
+ *
+ * Returns:
+ * pointer to mm object on success, NULL if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry)
+{
+ struct list_head *pos;
+ struct intel_vgpu_mm *mm;
+ u64 *src, *dst;
+
+ list_for_each(pos, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+
+ if (mm->page_table_level != page_table_level)
+ continue;
+
+ src = root_entry;
+ dst = mm->virtual_page_table;
+
+ if (page_table_level == 3) {
+ if (src[0] == dst[0]
+ && src[1] == dst[1]
+ && src[2] == dst[2]
+ && src[3] == dst[3])
+ return mm;
+ } else {
+ if (src[0] == dst[0])
+ return mm;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+ mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm\n");
+ return PTR_ERR(mm);
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (!mm) {
+ gvt_err("fail to find ppgtt instance.\n");
+ return -EINVAL;
+ }
+ intel_gvt_mm_unreference(mm);
+ return 0;
+}
+
+/**
+ * intel_gvt_init_gtt - initialize mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to initialize
+ * the mm components of a GVT device.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_gvt_init_gtt(struct intel_gvt *gvt)
+{
+ int ret;
+
+ gvt_dbg_core("init gtt\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
+ gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
+ gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
+ } else {
+ return -ENODEV;
+ }
+
+ if (enable_out_of_sync) {
+ ret = setup_spt_oos(gvt);
+ if (ret) {
+ gvt_err("fail to initialize SPT oos\n");
+ return ret;
+ }
+ }
+ INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_gtt - clean up mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the
+ * the mm components of a GVT device.
+ *
+ */
+void intel_gvt_clean_gtt(struct intel_gvt *gvt)
+{
+ if (enable_out_of_sync)
+ clean_spt_oos(gvt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
new file mode 100644
index 000000000000..d250013bc37b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef _GVT_GTT_H_
+#define _GVT_GTT_H_
+
+#define GTT_PAGE_SHIFT 12
+#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
+#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
+
+struct intel_vgpu_mm;
+
+#define INTEL_GVT_GTT_HASH_BITS 8
+#define INTEL_GVT_INVALID_ADDR (~0UL)
+
+struct intel_gvt_gtt_entry {
+ u64 val64;
+ int type;
+};
+
+struct intel_gvt_gtt_pte_ops {
+ struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ bool (*test_present)(struct intel_gvt_gtt_entry *e);
+ void (*clear_present)(struct intel_gvt_gtt_entry *e);
+ bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
+ unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
+};
+
+struct intel_gvt_gtt_gma_ops {
+ unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pde_index)(unsigned long gma);
+ unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_pml4_index)(unsigned long gma);
+};
+
+struct intel_gvt_gtt {
+ struct intel_gvt_gtt_pte_ops *pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops;
+ int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
+ void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
+ struct list_head oos_page_use_list_head;
+ struct list_head oos_page_free_list_head;
+ struct list_head mm_lru_list_head;
+};
+
+enum {
+ INTEL_GVT_MM_GGTT = 0,
+ INTEL_GVT_MM_PPGTT,
+};
+
+typedef enum {
+ GTT_TYPE_INVALID = -1,
+
+ GTT_TYPE_GGTT_PTE,
+
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_ENTRY,
+
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+
+ GTT_TYPE_PPGTT_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PML4_PT,
+
+ GTT_TYPE_MAX,
+} intel_gvt_gtt_type_t;
+
+struct intel_vgpu_mm {
+ int type;
+ bool initialized;
+ bool shadowed;
+
+ int page_table_entry_type;
+ u32 page_table_entry_size;
+ u32 page_table_entry_cnt;
+ void *virtual_page_table;
+ void *shadow_page_table;
+
+ int page_table_level;
+ bool has_shadow_page_table;
+ u32 pde_base_index;
+
+ struct list_head list;
+ struct kref ref;
+ atomic_t pincount;
+ struct list_head lru_list;
+ struct intel_vgpu *vgpu;
+};
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+#define ggtt_get_guest_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_set_guest_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_get_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ggtt_set_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_get_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_set_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_get_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_set_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index);
+extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+
+struct intel_vgpu_guest_page;
+
+struct intel_vgpu_scratch_pt {
+ struct page *page;
+ unsigned long page_mfn;
+};
+
+
+struct intel_vgpu_gtt {
+ struct intel_vgpu_mm *ggtt_mm;
+ unsigned long active_ppgtt_mm_bitmap;
+ struct list_head mm_list_head;
+ DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ atomic_t n_write_protected_guest_page;
+ struct list_head oos_page_list_head;
+ struct list_head post_shadow_list_head;
+ struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
+
+};
+
+extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+
+extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+
+extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+struct intel_vgpu_oos_page;
+
+struct intel_vgpu_shadow_page {
+ void *vaddr;
+ struct page *page;
+ int type;
+ struct hlist_node node;
+ unsigned long mfn;
+};
+
+struct intel_vgpu_guest_page {
+ struct hlist_node node;
+ bool writeprotection;
+ unsigned long gfn;
+ int (*handler)(void *, u64, void *, int);
+ void *data;
+ unsigned long write_cnt;
+ struct intel_vgpu_oos_page *oos_page;
+};
+
+struct intel_vgpu_oos_page {
+ struct intel_vgpu_guest_page *guest_page;
+ struct list_head list;
+ struct list_head vm_list;
+ int id;
+ unsigned char mem[GTT_PAGE_SIZE];
+};
+
+#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
+
+struct intel_vgpu_ppgtt_spt {
+ struct intel_vgpu_shadow_page shadow_page;
+ struct intel_vgpu_guest_page guest_page;
+ int guest_page_type;
+ atomic_t refcount;
+ struct intel_vgpu *vgpu;
+ DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+ struct list_head post_shadow_list;
+};
+
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page,
+ unsigned long gfn,
+ int (*handler)(void *gp, u64, void *, int),
+ void *data);
+
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
+
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
+
+static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
+{
+ kref_get(&mm->ref);
+}
+
+static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
+{
+ kref_put(&mm->ref, intel_vgpu_destroy_mm);
+}
+
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
+
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
+
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
+ unsigned long gma);
+
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 927f4579f5b6..398877c3d2fd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -19,12 +19,23 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#include <linux/types.h>
#include <xen/xen.h>
+#include <linux/kthread.h>
#include "i915_drv.h"
+#include "gvt.h"
struct intel_gvt_host intel_gvt_host;
@@ -33,6 +44,16 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
+static const struct intel_gvt_ops intel_gvt_ops = {
+ .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
+ .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
+ .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
+ .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
+ .vgpu_create = intel_gvt_create_vgpu,
+ .vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_reset = intel_gvt_reset_vgpu,
+};
+
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
* @gvt: intel gvt device
@@ -47,6 +68,8 @@ static const char * const supported_hypervisors[] = {
*/
int intel_gvt_init_host(void)
{
+ int ret;
+
if (intel_gvt_host.initialized)
return 0;
@@ -61,10 +84,12 @@ int intel_gvt_init_host(void)
symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else {
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module(
- symbol_get(kvmgt_mpt), "kvm");
+ symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+#endif
}
/* Fail to load MPT modules - bail out */
@@ -72,7 +97,8 @@ int intel_gvt_init_host(void)
return -EINVAL;
/* Try to detect if we're running in host instead of VM. */
- if (!intel_gvt_hypervisor_detect_host())
+ ret = intel_gvt_hypervisor_detect_host();
+ if (ret)
return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
@@ -84,9 +110,67 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
- gvt->device_info.max_support_vgpus = 8;
- /* This function will grow large in GVT device model patches. */
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = 256;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
+ }
+ info->msi_cap_offset = pdev->msi_cap;
+}
+
+static int gvt_service_thread(void *data)
+{
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ int ret;
+
+ gvt_dbg_core("service thread start\n");
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(gvt->service_thread_wq,
+ kthread_should_stop() || gvt->service_request);
+
+ if (kthread_should_stop())
+ break;
+
+ if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
+ continue;
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
+ (void *)&gvt->service_request)) {
+ mutex_lock(&gvt->lock);
+ intel_gvt_emulate_vblank(gvt);
+ mutex_unlock(&gvt->lock);
+ }
+ }
+
+ return 0;
+}
+
+static void clean_service_thread(struct intel_gvt *gvt)
+{
+ kthread_stop(gvt->service_thread);
+}
+
+static int init_service_thread(struct intel_gvt *gvt)
+{
+ init_waitqueue_head(&gvt->service_thread_wq);
+
+ gvt->service_thread = kthread_run(gvt_service_thread,
+ gvt, "gvt_service_thread");
+ if (IS_ERR(gvt->service_thread)) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->service_thread);
+ }
+ return 0;
}
/**
@@ -99,14 +183,26 @@ static void init_device_info(struct intel_gvt *gvt)
*/
void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt = to_gvt(dev_priv);
- if (WARN_ON(!gvt->initialized))
+ if (WARN_ON(!gvt))
return;
- /* Other de-initialization of GVT components will be introduced. */
+ clean_service_thread(gvt);
+ intel_gvt_clean_cmd_parser(gvt);
+ intel_gvt_clean_sched_policy(gvt);
+ intel_gvt_clean_workload_scheduler(gvt);
+ intel_gvt_clean_opregion(gvt);
+ intel_gvt_clean_gtt(gvt);
+ intel_gvt_clean_irq(gvt);
+ intel_gvt_clean_mmio_info(gvt);
+ intel_gvt_free_firmware(gvt);
+
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_clean_vgpu_types(gvt);
- gvt->initialized = false;
+ kfree(dev_priv->gvt);
+ dev_priv->gvt = NULL;
}
/**
@@ -122,7 +218,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
*/
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt;
+ int ret;
+
/*
* Cannot initialize GVT device without intel_gvt_host gets
* initialized first.
@@ -130,16 +228,91 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!intel_gvt_host.initialized))
return -EINVAL;
- if (WARN_ON(gvt->initialized))
+ if (WARN_ON(dev_priv->gvt))
return -EEXIST;
+ gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
+ if (!gvt)
+ return -ENOMEM;
+
gvt_dbg_core("init gvt device\n");
+ mutex_init(&gvt->lock);
+ gvt->dev_priv = dev_priv;
+
init_device_info(gvt);
- /*
- * Other initialization of GVT components will be introduce here.
- */
- gvt_dbg_core("gvt device creation is done\n");
- gvt->initialized = true;
+
+ ret = intel_gvt_setup_mmio_info(gvt);
+ if (ret)
+ return ret;
+
+ ret = intel_gvt_load_firmware(gvt);
+ if (ret)
+ goto out_clean_mmio_info;
+
+ ret = intel_gvt_init_irq(gvt);
+ if (ret)
+ goto out_free_firmware;
+
+ ret = intel_gvt_init_gtt(gvt);
+ if (ret)
+ goto out_clean_irq;
+
+ ret = intel_gvt_init_opregion(gvt);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_gvt_init_workload_scheduler(gvt);
+ if (ret)
+ goto out_clean_opregion;
+
+ ret = intel_gvt_init_sched_policy(gvt);
+ if (ret)
+ goto out_clean_workload_scheduler;
+
+ ret = intel_gvt_init_cmd_parser(gvt);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = init_service_thread(gvt);
+ if (ret)
+ goto out_clean_cmd_parser;
+
+ ret = intel_gvt_init_vgpu_types(gvt);
+ if (ret)
+ goto out_clean_thread;
+
+ ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
+ &intel_gvt_ops);
+ if (ret) {
+ gvt_err("failed to register gvt-g host device: %d\n", ret);
+ goto out_clean_types;
+ }
+
+ gvt_dbg_core("gvt device initialization is done\n");
+ dev_priv->gvt = gvt;
return 0;
+
+out_clean_types:
+ intel_gvt_clean_vgpu_types(gvt);
+out_clean_thread:
+ clean_service_thread(gvt);
+out_clean_cmd_parser:
+ intel_gvt_clean_cmd_parser(gvt);
+out_clean_sched_policy:
+ intel_gvt_clean_sched_policy(gvt);
+out_clean_workload_scheduler:
+ intel_gvt_clean_workload_scheduler(gvt);
+out_clean_opregion:
+ intel_gvt_clean_opregion(gvt);
+out_clean_gtt:
+ intel_gvt_clean_gtt(gvt);
+out_clean_irq:
+ intel_gvt_clean_irq(gvt);
+out_free_firmware:
+ intel_gvt_free_firmware(gvt);
+out_clean_mmio_info:
+ intel_gvt_clean_mmio_info(gvt);
+ kfree(gvt);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index fb619a6e519d..ad0e9364ee70 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_H_
@@ -26,6 +35,17 @@
#include "debug.h"
#include "hypercall.h"
+#include "mmio.h"
+#include "reg.h"
+#include "interrupt.h"
+#include "gtt.h"
+#include "display.h"
+#include "edid.h"
+#include "execlist.h"
+#include "scheduler.h"
+#include "sched_policy.h"
+#include "render.h"
+#include "cmd_parser.h"
#define GVT_MAX_VGPU 8
@@ -45,25 +65,383 @@ extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
- /* This data structure will grow bigger in GVT device model patches */
+ u32 cfg_space_size;
+ u32 mmio_size;
+ u32 mmio_bar;
+ unsigned long msi_cap_offset;
+ u32 gtt_start_offset;
+ u32 gtt_entry_size;
+ u32 gtt_entry_size_shift;
+ int gmadr_bytes_in_cmd;
+ u32 max_surface_size;
+};
+
+/* GM resources owned by a vGPU */
+struct intel_vgpu_gm {
+ u64 aperture_sz;
+ u64 hidden_sz;
+ struct drm_mm_node low_gm_node;
+ struct drm_mm_node high_gm_node;
+};
+
+#define INTEL_GVT_MAX_NUM_FENCES 32
+
+/* Fences owned by a vGPU */
+struct intel_vgpu_fence {
+ struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+ u32 base;
+ u32 size;
+};
+
+struct intel_vgpu_mmio {
+ void *vreg;
+ void *sreg;
+ bool disable_warn_untrack;
+};
+
+#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
+#define INTEL_GVT_MAX_BAR_NUM 4
+
+struct intel_vgpu_pci_bar {
+ u64 size;
+ bool tracked;
+};
+
+struct intel_vgpu_cfg_space {
+ unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
+ struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+};
+
+#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
+
+#define INTEL_GVT_MAX_PIPE 4
+
+struct intel_vgpu_irq {
+ bool irq_warn_once[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ INTEL_GVT_EVENT_MAX);
+};
+
+struct intel_vgpu_opregion {
+ void *va;
+ u32 gfn[INTEL_GVT_OPREGION_PAGES];
+ struct page *pages[INTEL_GVT_OPREGION_PAGES];
+};
+
+#define vgpu_opregion(vgpu) (&(vgpu->opregion))
+
+#define INTEL_GVT_MAX_PORT 5
+
+struct intel_vgpu_display {
+ struct intel_vgpu_i2c_edid i2c_edid;
+ struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+ struct intel_vgpu_sbi sbi;
};
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+ bool active;
+ bool resetting;
+ void *sched_data;
+
+ struct intel_vgpu_fence fence;
+ struct intel_vgpu_gm gm;
+ struct intel_vgpu_cfg_space cfg_space;
+ struct intel_vgpu_mmio mmio;
+ struct intel_vgpu_irq irq;
+ struct intel_vgpu_gtt gtt;
+ struct intel_vgpu_opregion opregion;
+ struct intel_vgpu_display display;
+ struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+ struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct kmem_cache *workloads;
+ atomic_t running_workload_num;
+ DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+ struct i915_gem_context *shadow_ctx;
+ struct notifier_block shadow_ctx_notifier_block;
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+ struct {
+ struct mdev_device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+ struct rb_root cache;
+ struct mutex cache_lock;
+ struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ } vdev;
+#endif
+};
+
+struct intel_gvt_gm {
+ unsigned long vgpu_allocated_low_gm_size;
+ unsigned long vgpu_allocated_high_gm_size;
+};
+
+struct intel_gvt_fence {
+ unsigned long vgpu_allocated_fence_num;
+};
+
+#define INTEL_GVT_MMIO_HASH_BITS 9
+
+struct intel_gvt_mmio {
+ u32 *mmio_attribute;
+ DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
+};
+
+struct intel_gvt_firmware {
+ void *cfg_space;
+ void *mmio;
+ bool firmware_loaded;
+};
+
+struct intel_gvt_opregion {
+ void __iomem *opregion_va;
+ u32 opregion_pa;
+};
+
+#define NR_MAX_INTEL_VGPU_TYPES 20
+struct intel_vgpu_type {
+ char name[16];
+ unsigned int max_instance;
+ unsigned int avail_instance;
+ unsigned int low_gm_size;
+ unsigned int high_gm_size;
+ unsigned int fence;
};
struct intel_gvt {
struct mutex lock;
- bool initialized;
-
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
+ struct intel_gvt_gm gm;
+ struct intel_gvt_fence fence;
+ struct intel_gvt_mmio mmio;
+ struct intel_gvt_firmware firmware;
+ struct intel_gvt_irq irq;
+ struct intel_gvt_gtt gtt;
+ struct intel_gvt_opregion opregion;
+ struct intel_gvt_workload_scheduler scheduler;
+ DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct intel_vgpu_type *types;
+ unsigned int num_types;
+
+ struct task_struct *service_thread;
+ wait_queue_head_t service_thread_wq;
+ unsigned long service_request;
+};
+
+static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
+{
+ return i915->gvt;
+}
+
+enum {
+ INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
};
+static inline void intel_gvt_request_service(struct intel_gvt *gvt,
+ int service)
+{
+ set_bit(service, (void *)&gvt->service_request);
+ wake_up(&gvt->service_thread_wq);
+}
+
+void intel_gvt_free_firmware(struct intel_gvt *gvt);
+int intel_gvt_load_firmware(struct intel_gvt *gvt);
+
+/* Aperture/GM space definitions for GVT device */
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+/* Aperture/GM space definitions for GVT device */
+#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_sz(gvt) \
+ ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+
+#define gvt_aperture_gmadr_base(gvt) (0)
+#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt) - 1)
+
+#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt))
+#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ + gvt_hidden_sz(gvt) - 1)
+
+#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+
+/* Aperture/GM space definitions for vGPU */
+#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
+#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
+#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
+#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_base(vgpu) \
+ (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
+
+#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_end(vgpu) \
+ (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
+#define vgpu_aperture_gmadr_end(vgpu) \
+ (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
+#define vgpu_hidden_gmadr_end(vgpu) \
+ (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
+
+#define vgpu_fence_base(vgpu) (vgpu->fence.base)
+#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+
+struct intel_vgpu_creation_params {
+ __u64 handle;
+ __u64 low_gm_sz; /* in MB */
+ __u64 high_gm_sz; /* in MB */
+ __u64 fence_sz;
+ __s32 primary;
+ __u64 vgpu_id;
+};
+
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param);
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value);
+
+/* Macros for easily accessing vGPU virtual/shadow register */
+#define vgpu_vreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+
+#define for_each_active_vgpu(gvt, vgpu, id) \
+ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
+ for_each_if(vgpu->active)
+
+static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
+ u32 offset, u32 val, bool low)
+{
+ u32 *pval;
+
+ /* BAR offset should be 32 bits algiend */
+ offset = rounddown(offset, 4);
+ pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+
+ if (low) {
+ /*
+ * only update bit 31 - bit 4,
+ * leave the bit 3 - bit 0 unchanged.
+ */
+ *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
+ } else {
+ *pval = val;
+ }
+}
+
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type);
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
+
+
+/* validating GM functions */
+#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
+ ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
+ ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_valid(vgpu, gmadr) \
+ ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
+ (vgpu_gmadr_is_hidden(vgpu, gmadr))))
+
+#define gvt_gmadr_is_aperture(gvt, gmadr) \
+ ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
+ (gmadr <= gvt_aperture_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_hidden(gvt, gmadr) \
+ ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
+ (gmadr <= gvt_hidden_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_valid(gvt, gmadr) \
+ (gvt_gmadr_is_aperture(gvt, gmadr) || \
+ gvt_gmadr_is_hidden(gvt, gmadr))
+
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index);
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index);
+
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+void intel_gvt_clean_opregion(struct intel_gvt *gvt);
+int intel_gvt_init_opregion(struct intel_gvt *gvt);
+
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
+int setup_vgpu_mmio(struct intel_vgpu *vgpu);
+void populate_pvinfo_page(struct intel_vgpu *vgpu);
+
+struct intel_gvt_ops {
+ int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
+ struct intel_vgpu_type *);
+ void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_reset)(struct intel_vgpu *);
+};
+
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
new file mode 100644
index 000000000000..522809710312
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -0,0 +1,2848 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Pei Zhang <pei.zhang@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+/* XXX FIXME i915 has changed PP_XXX definition */
+#define PCH_PP_STATUS _MMIO(0xc7200)
+#define PCH_PP_CONTROL _MMIO(0xc7204)
+#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
+#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
+#define PCH_PP_DIVISOR _MMIO(0xc7210)
+
+/* Register contains RO bits */
+#define F_RO (1 << 0)
+/* Register contains graphics address */
+#define F_GMADR (1 << 1)
+/* Mode mask registers with high 16 bits as the mask bits */
+#define F_MODE_MASK (1 << 2)
+/* This reg can be accessed by GPU commands */
+#define F_CMD_ACCESS (1 << 3)
+/* This reg has been accessed by a VM */
+#define F_ACCESSED (1 << 4)
+/* This reg has been accessed through GPU commands */
+#define F_CMD_ACCESSED (1 << 5)
+/* This reg could be accessed by unaligned address */
+#define F_UNALIGN (1 << 6)
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
+{
+ if (IS_BROADWELL(gvt->dev_priv))
+ return D_BDW;
+ else if (IS_SKYLAKE(gvt->dev_priv))
+ return D_SKL;
+
+ return 0;
+}
+
+bool intel_gvt_match_device(struct intel_gvt *gvt,
+ unsigned long device)
+{
+ return intel_gvt_get_device_type(gvt) & device;
+}
+
+static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+}
+
+static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+}
+
+static int new_mmio_info(struct intel_gvt *gvt,
+ u32 offset, u32 flags, u32 size,
+ u32 addr_mask, u32 ro_mask, u32 device,
+ void *read, void *write)
+{
+ struct intel_gvt_mmio_info *info, *p;
+ u32 start, end, i;
+
+ if (!intel_gvt_match_device(gvt, device))
+ return 0;
+
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ start = offset;
+ end = offset + size;
+
+ for (i = start; i < end; i += 4) {
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->offset = i;
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
+ if (p)
+ gvt_err("dup mmio definition offset %x\n",
+ info->offset);
+ info->size = size;
+ info->length = (i + 4) < end ? 4 : (end - i);
+ info->addr_mask = addr_mask;
+ info->device = device;
+ info->read = read ? read : intel_vgpu_default_mmio_read;
+ info->write = write ? write : intel_vgpu_default_mmio_write;
+ gvt->mmio.mmio_attribute[info->offset / 4] = flags;
+ INIT_HLIST_NODE(&info->node);
+ hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+ }
+ return 0;
+}
+
+static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+{
+ enum intel_engine_id id;
+ struct intel_engine_cs *engine;
+
+ reg &= ~GENMASK(11, 0);
+ for_each_engine(engine, gvt->dev_priv, id) {
+ if (engine->mmio_base == reg)
+ return id;
+ }
+ return -1;
+}
+
+#define offset_to_fence_num(offset) \
+ ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
+
+#define fence_num_to_offset(num) \
+ (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
+ unsigned int fence_num, void *p_data, unsigned int bytes)
+{
+ if (fence_num >= vgpu_fence_sz(vgpu)) {
+ gvt_err("vgpu%d: found oob fence register access\n",
+ vgpu->id);
+ gvt_err("vgpu%d: total fence num %d access fence num %d\n",
+ vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+ memset(p_data, 0, bytes);
+ }
+ return 0;
+}
+
+static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
+ p_data, bytes);
+ if (ret)
+ return ret;
+ read_vreg(vgpu, off, p_data, bytes);
+ return 0;
+}
+
+static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int fence_num = offset_to_fence_num(off);
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
+ if (ret)
+ return ret;
+ write_vreg(vgpu, off, p_data, bytes);
+
+ intel_vgpu_write_fence(vgpu, fence_num,
+ vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
+ return 0;
+}
+
+#define CALC_MODE_MASK_REG(old, new) \
+ (((new) & GENMASK(31, 16)) \
+ | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
+ | ((new) & ((new) >> 16))))
+
+static int mul_force_wake_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 old, new;
+ uint32_t ack_reg_offset;
+
+ old = vgpu_vreg(vgpu, offset);
+ new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ switch (offset) {
+ case FORCEWAKE_RENDER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
+ break;
+ case FORCEWAKE_BLITTER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
+ break;
+ case FORCEWAKE_MEDIA_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
+ break;
+ default:
+ /*should not hit here*/
+ gvt_err("invalid forcewake offset 0x%x\n", offset);
+ return 1;
+ }
+ } else {
+ ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
+ }
+
+ vgpu_vreg(vgpu, offset) = new;
+ vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
+ return 0;
+}
+
+static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes, unsigned long bitmap)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ vgpu->resetting = true;
+
+ intel_vgpu_stop_schedule(vgpu);
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ }
+
+ intel_vgpu_reset_execlist(vgpu, bitmap);
+
+ /* full GPU reset */
+ if (bitmap == 0xff) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_vgpu_clean_gtt(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ setup_vgpu_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+ intel_vgpu_init_gtt(vgpu);
+ }
+
+ vgpu->resetting = false;
+
+ return 0;
+}
+
+static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ u64 bitmap = 0;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & GEN6_GRDOM_FULL) {
+ gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
+ bitmap = 0xff;
+ }
+ if (data & GEN6_GRDOM_RENDER) {
+ gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+ bitmap |= (1 << RCS);
+ }
+ if (data & GEN6_GRDOM_MEDIA) {
+ gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+ bitmap |= (1 << VCS);
+ }
+ if (data & GEN6_GRDOM_BLT) {
+ gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+ bitmap |= (1 << BCS);
+ }
+ if (data & GEN6_GRDOM_VECS) {
+ gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+ bitmap |= (1 << VECS);
+ }
+ if (data & GEN8_GRDOM_MEDIA2) {
+ gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+ if (HAS_BSD2(vgpu->gvt->dev_priv))
+ bitmap |= (1 << VCS2);
+ }
+ return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+}
+
+static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
+}
+
+static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
+}
+
+static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+
+ } else
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+ ~(PP_ON | PP_SEQUENCE_POWER_DOWN
+ | PP_CYCLE_DELAY_ACTIVE);
+ return 0;
+}
+
+static int transconf_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
+ vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
+ return 0;
+}
+
+static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
+ else
+ vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
+ vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
+
+ return 0;
+}
+
+static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (1 << 17);
+ return 0;
+}
+
+static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = 3;
+ return 0;
+}
+
+static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (0x2f << 16);
+ return 0;
+}
+
+static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & PIPECONF_ENABLE)
+ vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ intel_gvt_check_vblank_emulation(vgpu->gvt);
+ return 0;
+}
+
+static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
+ vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
+ } else {
+ vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
+ if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+ &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
+ }
+ return 0;
+}
+
+static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
+ return 0;
+}
+
+#define FDI_LINK_TRAIN_PATTERN1 0
+#define FDI_LINK_TRAIN_PATTERN2 1
+
+static int fdi_auto_training_started(struct intel_vgpu *vgpu)
+{
+ u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+ u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
+ u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+
+ if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
+ (rx_ctl & FDI_RX_ENABLE) &&
+ (rx_ctl & FDI_AUTO_TRAINING) &&
+ (tx_ctl & DP_TP_CTL_ENABLE) &&
+ (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
+ return 1;
+ else
+ return 0;
+}
+
+static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
+ enum pipe pipe, unsigned int train_pattern)
+{
+ i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
+ unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
+ unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
+ unsigned int fdi_iir_check_bits;
+
+ fdi_rx_imr = FDI_RX_IMR(pipe);
+ fdi_tx_ctl = FDI_TX_CTL(pipe);
+ fdi_rx_ctl = FDI_RX_CTL(pipe);
+
+ if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
+ fdi_iir_check_bits = FDI_RX_BIT_LOCK;
+ } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
+ fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
+ } else {
+ gvt_err("Invalid train pattern %d\n", train_pattern);
+ return -EINVAL;
+ }
+
+ fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
+ fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
+
+ /* If imr bit has been masked */
+ if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+ return 0;
+
+ if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+ == fdi_tx_check_bits)
+ && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+ == fdi_rx_check_bits))
+ return 1;
+ else
+ return 0;
+}
+
+#define INVALID_INDEX (~0U)
+
+static unsigned int calc_index(unsigned int offset, unsigned int start,
+ unsigned int next, unsigned int end, i915_reg_t i915_end)
+{
+ unsigned int range = next - start;
+
+ if (!end)
+ end = i915_mmio_reg_offset(i915_end);
+ if (offset < start || offset > end)
+ return INVALID_INDEX;
+ offset -= start;
+ return offset / range;
+}
+
+#define FDI_RX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
+
+#define FDI_TX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
+
+#define FDI_RX_IMR_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
+
+static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ i915_reg_t fdi_rx_iir;
+ unsigned int index;
+ int ret;
+
+ if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_CTL_TO_PIPE(offset);
+ else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_TX_CTL_TO_PIPE(offset);
+ else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_IMR_TO_PIPE(offset);
+ else {
+ gvt_err("Unsupport registers %x\n", offset);
+ return -EINVAL;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ fdi_rx_iir = FDI_RX_IIR(index);
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+
+ if (offset == _FDI_RXA_CTL)
+ if (fdi_auto_training_started(vgpu))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+ DP_TP_STATUS_AUTOTRAIN_DONE;
+ return 0;
+}
+
+#define DP_TP_CTL_TO_PORT(offset) \
+ calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
+
+static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ i915_reg_t status_reg;
+ unsigned int index;
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ index = DP_TP_CTL_TO_PORT(offset);
+ data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
+ if (data == 0x2) {
+ status_reg = DP_TP_STATUS(index);
+ vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+ }
+ return 0;
+}
+
+static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 reg_val;
+ u32 sticky_mask;
+
+ reg_val = *((u32 *)p_data);
+ sticky_mask = GENMASK(27, 26) | (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
+ (vgpu_vreg(vgpu, offset) & sticky_mask);
+ vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
+ return 0;
+}
+
+static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
+ vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ return 0;
+}
+
+static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & FDI_MPHY_IOSFSB_RESET_CTL)
+ vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
+ else
+ vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
+ return 0;
+}
+
+#define DSPSURF_TO_PIPE(offset) \
+ calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
+
+static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ unsigned int index = DSPSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = DSPSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = PRIMARY_A_FLIP_DONE,
+ [PIPE_B] = PRIMARY_B_FLIP_DONE,
+ [PIPE_C] = PRIMARY_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+#define SPRSURF_TO_PIPE(offset) \
+ calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
+
+static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int index = SPRSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = SPRSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = SPRITE_A_FLIP_DONE,
+ [PIPE_B] = SPRITE_B_FLIP_DONE,
+ [PIPE_C] = SPRITE_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
+ unsigned int reg)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum intel_gvt_event_type event;
+
+ if (reg == _DPA_AUX_CH_CTL)
+ event = AUX_CHANNEL_A;
+ else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ event = AUX_CHANNEL_B;
+ else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ event = AUX_CHANNEL_C;
+ else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ event = AUX_CHANNEL_D;
+ else {
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ return 0;
+}
+
+static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
+ unsigned int reg, int len, bool data_valid)
+{
+ /* mark transaction done */
+ value |= DP_AUX_CH_CTL_DONE;
+ value &= ~DP_AUX_CH_CTL_SEND_BUSY;
+ value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
+
+ if (data_valid)
+ value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
+ else
+ value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
+
+ /* message size */
+ value &= ~(0xf << 20);
+ value |= (len << 20);
+ vgpu_vreg(vgpu, reg) = value;
+
+ if (value & DP_AUX_CH_CTL_INTERRUPT)
+ return trigger_aux_channel_interrupt(vgpu, reg);
+ return 0;
+}
+
+static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
+ uint8_t t)
+{
+ if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
+ /* training pattern 1 for CR */
+ /* set LANE0_CR_DONE, LANE1_CR_DONE */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
+ /* set LANE2_CR_DONE, LANE3_CR_DONE */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_TRAINING_PATTERN_2) {
+ /* training pattern 2 for EQ */
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* set INTERLANE_ALIGN_DONE */
+ dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
+ DPCD_INTERLANE_ALIGN_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_LINK_TRAINING_DISABLED) {
+ /* finish link training */
+ /* set sink status as synchronized */
+ dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
+ }
+}
+
+#define _REG_HSW_DP_AUX_CH_CTL(dp) \
+ ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
+
+#define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
+
+#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
+
+#define dpy_is_valid_port(port) \
+ (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
+
+static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int msg, addr, ctrl, op, len;
+ int port_index = OFFSET_TO_DP_AUX_PORT(offset);
+ struct intel_vgpu_dpcd_data *dpcd = NULL;
+ struct intel_vgpu_port *port = NULL;
+ u32 data;
+
+ if (!dpy_is_valid_port(port_index)) {
+ gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+ return 0;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
+ offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+ /* SKL DPB/C/D aux ctl register changed */
+ return 0;
+ } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
+ /* write to the data registers */
+ return 0;
+ }
+
+ if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* just want to clear the sticky bits */
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+ }
+
+ port = &display->ports[port_index];
+ dpcd = port->dpcd;
+
+ /* read out message from DATA1 register */
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ len = msg & 0xff;
+ op = ctrl >> 4;
+
+ if (op == GVT_AUX_NATIVE_WRITE) {
+ int t;
+ uint8_t buf[16];
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * Write request exceeds what we supported,
+ * DCPD spec: When a Source Device is writing a DPCD
+ * address not supported by the Sink Device, the Sink
+ * Device shall reply with AUX NACK and “M” equal to
+ * zero.
+ */
+
+ /* NAK the write */
+ vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
+ return 0;
+ }
+
+ /*
+ * Write request format: (command + address) occupies
+ * 3 bytes, followed by (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 4) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* unpack data from vreg to buf */
+ for (t = 0; t < 4; t++) {
+ u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
+
+ buf[t * 4] = (r >> 24) & 0xff;
+ buf[t * 4 + 1] = (r >> 16) & 0xff;
+ buf[t * 4 + 2] = (r >> 8) & 0xff;
+ buf[t * 4 + 3] = r & 0xff;
+ }
+
+ /* write to virtual DPCD */
+ if (dpcd && dpcd->data_valid) {
+ for (t = 0; t <= len; t++) {
+ int p = addr + t;
+
+ dpcd->data[p] = buf[t];
+ /* check for link training */
+ if (p == DPCD_TRAINING_PATTERN_SET)
+ dp_aux_ch_ctl_link_training(dpcd,
+ buf[t]);
+ }
+ }
+
+ /* ACK the write */
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ if (op == GVT_AUX_NATIVE_READ) {
+ int idx, i, ret = 0;
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * read request exceeds what we supported
+ * DPCD spec: A Sink Device receiving a Native AUX CH
+ * read request for an unsupported DPCD address must
+ * reply with an AUX ACK and read data set equal to
+ * zero instead of replying with AUX NACK.
+ */
+
+ /* ACK the READ*/
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ vgpu_vreg(vgpu, offset + 8) = 0;
+ vgpu_vreg(vgpu, offset + 12) = 0;
+ vgpu_vreg(vgpu, offset + 16) = 0;
+ vgpu_vreg(vgpu, offset + 20) = 0;
+
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ true);
+ return 0;
+ }
+
+ for (idx = 1; idx <= 5; idx++) {
+ /* clear the data registers */
+ vgpu_vreg(vgpu, offset + 4 * idx) = 0;
+ }
+
+ /*
+ * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 2) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* read from virtual DPCD to vreg */
+ /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
+ if (dpcd && dpcd->data_valid) {
+ for (i = 1; i <= (len + 1); i++) {
+ int t;
+
+ t = dpcd->data[addr + i - 1];
+ t <<= (24 - 8 * (i % 4));
+ ret |= t;
+
+ if ((i % 4 == 3) || (i == (len + 1))) {
+ vgpu_vreg(vgpu, offset +
+ (i / 4 + 1) * 4) = ret;
+ ret = 0;
+ }
+ }
+ }
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ /* i2c transaction starts */
+ intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
+
+ if (data & DP_AUX_CH_CTL_INTERRUPT)
+ trigger_aux_channel_interrupt(vgpu, offset);
+ return 0;
+}
+
+static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool vga_disable;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
+
+ gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
+ vga_disable ? "Disable" : "Enable");
+ return 0;
+}
+
+static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int sbi_offset)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i)
+ if (display->sbi.registers[i].offset == sbi_offset)
+ break;
+
+ if (i == num)
+ return 0;
+
+ return display->sbi.registers[i].value;
+}
+
+static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int offset, u32 value)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i) {
+ if (display->sbi.registers[i].offset == offset)
+ break;
+ }
+
+ if (i == num) {
+ if (num == SBI_REG_MAX) {
+ gvt_err("vgpu%d: SBI caching meets maximum limits\n",
+ vgpu->id);
+ return;
+ }
+ display->sbi.number++;
+ }
+
+ display->sbi.registers[i].offset = offset;
+ display->sbi.registers[i].value = value;
+}
+
+static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
+ sbi_offset);
+ }
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
+ data |= SBI_READY;
+
+ data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data |= SBI_RESPONSE_SUCCESS;
+
+ vgpu_vreg(vgpu, offset) = data;
+
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+
+ write_virtual_sbi_register(vgpu, sbi_offset,
+ vgpu_vreg(vgpu, SBI_DATA));
+ }
+ return 0;
+}
+
+#define _vgtif_reg(x) \
+ (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
+
+static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool invalid_read = false;
+
+ read_vreg(vgpu, offset, p_data, bytes);
+
+ switch (offset) {
+ case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
+ if (offset + bytes > _vgtif_reg(vgt_id) + 4)
+ invalid_read = true;
+ break;
+ case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
+ _vgtif_reg(avail_rs.fence_num):
+ if (offset + bytes >
+ _vgtif_reg(avail_rs.fence_num) + 4)
+ invalid_read = true;
+ break;
+ case 0x78010: /* vgt_caps */
+ case 0x7881c:
+ break;
+ default:
+ invalid_read = true;
+ break;
+ }
+ if (invalid_read)
+ gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+ offset, bytes, *(u32 *)p_data);
+ return 0;
+}
+
+static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
+{
+ int ret = 0;
+
+ switch (notification) {
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_EXECLIST_CONTEXT_CREATE:
+ case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
+ case 1: /* Remove this in guest driver. */
+ break;
+ default:
+ gvt_err("Invalid PV notification %d\n", notification);
+ }
+ return ret;
+}
+
+static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ char *env[3] = {NULL, NULL, NULL};
+ char vmid_str[20];
+ char display_ready_str[20];
+
+ snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+ env[0] = display_ready_str;
+
+ snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
+ env[1] = vmid_str;
+
+ return kobject_uevent_env(kobj, KOBJ_ADD, env);
+}
+
+static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ int ret;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ switch (offset) {
+ case _vgtif_reg(display_ready):
+ send_display_ready_uevent(vgpu, data ? 1 : 0);
+ break;
+ case _vgtif_reg(g2v_notify):
+ ret = handle_g2v_notification(vgpu, data);
+ break;
+ /* add xhot and yhot to handled list to avoid error log */
+ case 0x78830:
+ case 0x78834:
+ case _vgtif_reg(pdp[0].lo):
+ case _vgtif_reg(pdp[0].hi):
+ case _vgtif_reg(pdp[1].lo):
+ case _vgtif_reg(pdp[1].hi):
+ case _vgtif_reg(pdp[2].lo):
+ case _vgtif_reg(pdp[2].hi):
+ case _vgtif_reg(pdp[3].lo):
+ case _vgtif_reg(pdp[3].hi):
+ case _vgtif_reg(execlist_context_descriptor_lo):
+ case _vgtif_reg(execlist_context_descriptor_hi):
+ break;
+ default:
+ gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+ offset, bytes, data);
+ break;
+ }
+ return 0;
+}
+
+static int pf_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 val = *(u32 *)p_data;
+
+ if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
+ offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
+ offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
+ WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
+}
+
+static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
+ vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+ else
+ vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+ return 0;
+}
+
+static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
+ vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
+ return 0;
+}
+
+static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mode;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ mode = vgpu_vreg(vgpu, offset);
+
+ if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
+ WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 trtte = *(u32 *)p_data;
+
+ if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
+ WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ /* TRTTE is not per-context */
+ I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+
+ return 0;
+}
+
+static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 val = *(u32 *)p_data;
+
+ if (val & 1) {
+ /* unblock hw logic */
+ I915_WRITE(_MMIO(offset), val);
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = 0;
+
+ if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
+ v |= (1 << 0);
+
+ if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
+ v |= (1 << 8);
+
+ if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
+ v |= (1 << 16);
+
+ if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
+ v |= (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = *(u32 *)p_data;
+ u32 cmd = value & 0xff;
+ u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+
+ switch (cmd) {
+ case 0x6:
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from skylake platform.
+ */
+ if (!*data0)
+ *data0 = 0x1e1a1100;
+ else
+ *data0 = 0x61514b3d;
+ break;
+ case 0x5:
+ *data0 |= 0x1;
+ break;
+ }
+
+ gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
+ vgpu->id, value, *data0);
+
+ value &= ~(1 << 31);
+ return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
+static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ v |= (v >> 1);
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
+}
+
+static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t reg = {.reg = offset};
+
+ switch (offset) {
+ case 0x4ddc:
+ vgpu_vreg(vgpu, offset) = 0x8000003c;
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ break;
+ case 0x42080:
+ vgpu_vreg(vgpu, offset) = 0x8000;
+ /* WaCompressedResourceDisplayNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ /* other bits are MBZ. */
+ v &= (1 << 31) | (1 << 30);
+ v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct intel_vgpu_execlist *execlist;
+ u32 data = *(u32 *)p_data;
+ int ret = 0;
+
+ if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+ return -EINVAL;
+
+ execlist = &vgpu->execlist[ring_id];
+
+ execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+ if (execlist->elsp_dwords.index == 3) {
+ ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ if(ret)
+ gvt_err("fail submit workload on ring %d\n", ring_id);
+ }
+
+ ++execlist->elsp_dwords.index;
+ execlist->elsp_dwords.index &= 0x3;
+ return ret;
+}
+
+static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ bool enable_execlist;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
+ || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
+
+ gvt_dbg_core("EXECLIST %s on ring %d\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ ring_id);
+
+ if (enable_execlist)
+ intel_vgpu_start_schedule(vgpu);
+ }
+ return 0;
+}
+
+static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int rc = 0;
+ unsigned int id = 0;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) = 0;
+
+ switch (offset) {
+ case 0x4260:
+ id = RCS;
+ break;
+ case 0x4264:
+ id = VCS;
+ break;
+ case 0x4268:
+ id = VCS2;
+ break;
+ case 0x426c:
+ id = BCS;
+ break;
+ case 0x4270:
+ id = VECS;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ set_bit(id, (void *)vgpu->tlb_handle_pending);
+
+ return rc;
+}
+
+static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ data |= RESET_CTL_READY_TO_RESET;
+ else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ data &= ~RESET_CTL_READY_TO_RESET;
+
+ vgpu_vreg(vgpu, offset) = data;
+ return 0;
+}
+
+#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
+ ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+ f, s, am, rm, d, r, w); \
+ if (ret) \
+ return ret; \
+} while (0)
+
+#define MMIO_D(reg, d) \
+ MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_DH(reg, d, r, w) \
+ MMIO_F(reg, 4, 0, 0, 0, d, r, w)
+
+#define MMIO_DFH(reg, d, f, r, w) \
+ MMIO_F(reg, 4, f, 0, 0, d, r, w)
+
+#define MMIO_GM(reg, d, r, w) \
+ MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+
+#define MMIO_RO(reg, d, f, rm, r, w) \
+ MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
+
+#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
+ MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+} while (0)
+
+#define MMIO_RING_D(prefix, d) \
+ MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_RING_DFH(prefix, d, f, r, w) \
+ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
+
+#define MMIO_RING_GM(prefix, d, r, w) \
+ MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+
+#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
+ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
+
+static int init_generic_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+
+ MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(SDEISR, D_ALL);
+
+ MMIO_RING_D(RING_HWSTAM, D_ALL);
+
+ MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+
+#define RING_REG(base) (base + 0x28)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x134)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+ MMIO_GM(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM(CCID, D_ALL, NULL, NULL);
+ MMIO_GM(0x12198, D_ALL, NULL, NULL);
+ MMIO_D(GEN7_CXT_SIZE, D_ALL);
+
+ MMIO_RING_D(RING_TAIL, D_ALL);
+ MMIO_RING_D(RING_HEAD, D_ALL);
+ MMIO_RING_D(RING_CTL, D_ALL);
+ MMIO_RING_D(RING_ACTHD, D_ALL);
+ MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+
+ /* RING MODE */
+#define RING_REG(base) (base + 0x29c)
+ MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+#undef RING_REG
+
+ MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_D(GAM_ECOCHK, D_ALL);
+ MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0x9030, D_ALL);
+ MMIO_D(0x20a0, D_ALL);
+ MMIO_D(0x2420, D_ALL);
+ MMIO_D(0x2430, D_ALL);
+ MMIO_D(0x2434, D_ALL);
+ MMIO_D(0x2438, D_ALL);
+ MMIO_D(0x243c, D_ALL);
+ MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+
+ /* display */
+ MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_D(0x602a0, D_ALL);
+
+ MMIO_D(0x65050, D_ALL);
+ MMIO_D(0x650b4, D_ALL);
+
+ MMIO_D(0xc4040, D_ALL);
+ MMIO_D(DERRMR, D_ALL);
+
+ MMIO_D(PIPEDSL(PIPE_A), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_B), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_C), D_ALL);
+ MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
+
+ MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+
+ MMIO_D(PIPESTAT(PIPE_A), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_B), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_C), D_ALL);
+ MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(CURCNTR(PIPE_A), D_ALL);
+ MMIO_D(CURCNTR(PIPE_B), D_ALL);
+ MMIO_D(CURCNTR(PIPE_C), D_ALL);
+
+ MMIO_D(CURPOS(PIPE_A), D_ALL);
+ MMIO_D(CURPOS(PIPE_B), D_ALL);
+ MMIO_D(CURPOS(PIPE_C), D_ALL);
+
+ MMIO_D(CURBASE(PIPE_A), D_ALL);
+ MMIO_D(CURBASE(PIPE_B), D_ALL);
+ MMIO_D(CURBASE(PIPE_C), D_ALL);
+
+ MMIO_D(0x700ac, D_ALL);
+ MMIO_D(0x710ac, D_ALL);
+ MMIO_D(0x720ac, D_ALL);
+
+ MMIO_D(0x70090, D_ALL);
+ MMIO_D(0x70094, D_ALL);
+ MMIO_D(0x70098, D_ALL);
+ MMIO_D(0x7009c, D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_A), D_ALL);
+ MMIO_D(DSPADDR(PIPE_A), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(DSPPOS(PIPE_A), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_A), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_B), D_ALL);
+ MMIO_D(DSPADDR(PIPE_B), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(DSPPOS(PIPE_B), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_B), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_C), D_ALL);
+ MMIO_D(DSPADDR(PIPE_C), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(DSPPOS(PIPE_C), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_C), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_A), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(SPRPOS(PIPE_A), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_A), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_A), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_B), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(SPRPOS(PIPE_B), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_B), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_B), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_C), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(SPRPOS(PIPE_C), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_C), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_C), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
+
+ MMIO_D(WM0_PIPEA_ILK, D_ALL);
+ MMIO_D(WM0_PIPEB_ILK, D_ALL);
+ MMIO_D(WM0_PIPEC_IVB, D_ALL);
+ MMIO_D(WM1_LP_ILK, D_ALL);
+ MMIO_D(WM2_LP_ILK, D_ALL);
+ MMIO_D(WM3_LP_ILK, D_ALL);
+ MMIO_D(WM1S_LP_ILK, D_ALL);
+ MMIO_D(WM2S_LP_IVB, D_ALL);
+ MMIO_D(WM3S_LP_IVB, D_ALL);
+
+ MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
+ MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
+
+ MMIO_D(0x48268, D_ALL);
+
+ MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
+ gmbus_mmio_write);
+ MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+
+ MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+
+ MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
+
+ MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+
+ MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
+
+ MMIO_D(_FDI_RXA_MISC, D_ALL);
+ MMIO_D(_FDI_RXB_MISC, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+
+ MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
+ MMIO_D(PCH_PP_DIVISOR, D_ALL);
+ MMIO_D(PCH_PP_STATUS, D_ALL);
+ MMIO_D(PCH_LVDS, D_ALL);
+ MMIO_D(_PCH_DPLL_A, D_ALL);
+ MMIO_D(_PCH_DPLL_B, D_ALL);
+ MMIO_D(_PCH_FPA0, D_ALL);
+ MMIO_D(_PCH_FPA1, D_ALL);
+ MMIO_D(_PCH_FPB0, D_ALL);
+ MMIO_D(_PCH_FPB1, D_ALL);
+ MMIO_D(PCH_DREF_CONTROL, D_ALL);
+ MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
+ MMIO_D(PCH_DPLL_SEL, D_ALL);
+
+ MMIO_D(0x61208, D_ALL);
+ MMIO_D(0x6120c, D_ALL);
+ MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
+ MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
+
+ MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
+ MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+
+ MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
+ PORTA_HOTPLUG_STATUS_MASK
+ | PORTB_HOTPLUG_STATUS_MASK
+ | PORTC_HOTPLUG_STATUS_MASK
+ | PORTD_HOTPLUG_STATUS_MASK,
+ NULL, NULL);
+
+ MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
+ MMIO_D(FUSE_STRAP, D_ALL);
+ MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
+
+ MMIO_D(DISP_ARB_CTL, D_ALL);
+ MMIO_D(DISP_ARB_CTL2, D_ALL);
+
+ MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
+ MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
+ MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
+
+ MMIO_D(SOUTH_CHICKEN1, D_ALL);
+ MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
+ MMIO_D(_TRANSA_CHICKEN1, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+ MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
+ MMIO_D(_TRANSA_CHICKEN2, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+
+ MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
+ MMIO_D(ILK_DPFC_CONTROL, D_ALL);
+ MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
+ MMIO_D(ILK_DPFC_STATUS, D_ALL);
+ MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
+ MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
+ MMIO_D(ILK_FBC_RT_BASE, D_ALL);
+
+ MMIO_D(IPS_CTL, D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(0x60110, D_ALL);
+ MMIO_D(0x61110, D_ALL);
+ MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+
+ MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+ MMIO_D(SPLL_CTL, D_ALL);
+ MMIO_D(_WRPLL_CTL1, D_ALL);
+ MMIO_D(_WRPLL_CTL2, D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
+ MMIO_D(0x46508, D_ALL);
+
+ MMIO_D(0x49080, D_ALL);
+ MMIO_D(0x49180, D_ALL);
+ MMIO_D(0x49280, D_ALL);
+
+ MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
+
+ MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
+
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
+
+ MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
+ MMIO_D(SBI_ADDR, D_ALL);
+ MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
+ MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
+ MMIO_D(PIXCLK_GATE, D_ALL);
+
+ MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
+
+ MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
+ MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+
+ MMIO_D(_TRANSA_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSB_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSC_MSA_MISC, D_ALL);
+ MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+
+ MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
+ MMIO_D(FORCEWAKE_ACK, D_ALL);
+ MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
+ MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
+ MMIO_D(GTFIFODBG, D_ALL);
+ MMIO_D(GTFIFOCTL, D_ALL);
+ MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
+ MMIO_D(ECOBUS, D_ALL);
+ MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
+ MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
+ MMIO_D(GEN6_RPNSWREQ, D_ALL);
+ MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
+ MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
+ MMIO_D(GEN6_RPSTAT1, D_ALL);
+ MMIO_D(GEN6_RP_CONTROL, D_ALL);
+ MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP, D_ALL);
+ MMIO_D(GEN6_RP_PREV_UP, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
+ MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC_SLEEP, D_ALL);
+ MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_PMINTRMSK, D_ALL);
+ MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+
+ MMIO_D(RSTDBYCTL, D_ALL);
+
+ MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
+ MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
+ MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
+ MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
+
+ MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(TILECTL, D_ALL);
+
+ MMIO_D(GEN6_UCGCTL1, D_ALL);
+ MMIO_D(GEN6_UCGCTL2, D_ALL);
+
+ MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+ MMIO_D(GEN6_PCODE_DATA, D_ALL);
+ MMIO_D(0x13812c, D_ALL);
+ MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
+ MMIO_D(HSW_EDRAM_CAP, D_ALL);
+ MMIO_D(HSW_IDICR, D_ALL);
+ MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
+
+ MMIO_D(0x3c, D_ALL);
+ MMIO_D(0x860, D_ALL);
+ MMIO_D(ECOSKPD, D_ALL);
+ MMIO_D(0x121d0, D_ALL);
+ MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
+ MMIO_D(0x41d0, D_ALL);
+ MMIO_D(GAC_ECO_BITS, D_ALL);
+ MMIO_D(0x6200, D_ALL);
+ MMIO_D(0x6204, D_ALL);
+ MMIO_D(0x6208, D_ALL);
+ MMIO_D(0x7118, D_ALL);
+ MMIO_D(0x7180, D_ALL);
+ MMIO_D(0x7408, D_ALL);
+ MMIO_D(0x7c00, D_ALL);
+ MMIO_D(GEN6_MBCTL, D_ALL);
+ MMIO_D(0x911c, D_ALL);
+ MMIO_D(0x9120, D_ALL);
+ MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(GAB_CTL, D_ALL);
+ MMIO_D(0x48800, D_ALL);
+ MMIO_D(0xce044, D_ALL);
+ MMIO_D(0xe6500, D_ALL);
+ MMIO_D(0xe6504, D_ALL);
+ MMIO_D(0xe6600, D_ALL);
+ MMIO_D(0xe6604, D_ALL);
+ MMIO_D(0xe6700, D_ALL);
+ MMIO_D(0xe6704, D_ALL);
+ MMIO_D(0xe6800, D_ALL);
+ MMIO_D(0xe6804, D_ALL);
+ MMIO_D(PCH_GMBUS4, D_ALL);
+ MMIO_D(PCH_GMBUS5, D_ALL);
+
+ MMIO_D(0x902c, D_ALL);
+ MMIO_D(0xec008, D_ALL);
+ MMIO_D(0xec00c, D_ALL);
+ MMIO_D(0xec008 + 0x18, D_ALL);
+ MMIO_D(0xec00c + 0x18, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 3, D_ALL);
+ MMIO_D(0xec408, D_ALL);
+ MMIO_D(0xec40c, D_ALL);
+ MMIO_D(0xec408 + 0x18, D_ALL);
+ MMIO_D(0xec40c + 0x18, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 3, D_ALL);
+ MMIO_D(0xfc810, D_ALL);
+ MMIO_D(0xfc81c, D_ALL);
+ MMIO_D(0xfc828, D_ALL);
+ MMIO_D(0xfc834, D_ALL);
+ MMIO_D(0xfcc00, D_ALL);
+ MMIO_D(0xfcc0c, D_ALL);
+ MMIO_D(0xfcc18, D_ALL);
+ MMIO_D(0xfcc24, D_ALL);
+ MMIO_D(0xfd000, D_ALL);
+ MMIO_D(0xfd00c, D_ALL);
+ MMIO_D(0xfd018, D_ALL);
+ MMIO_D(0xfd024, D_ALL);
+ MMIO_D(0xfd034, D_ALL);
+
+ MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
+ MMIO_D(0x2054, D_ALL);
+ MMIO_D(0x12054, D_ALL);
+ MMIO_D(0x22054, D_ALL);
+ MMIO_D(0x1a054, D_ALL);
+
+ MMIO_D(0x44070, D_ALL);
+
+ MMIO_D(0x215c, D_HSW_PLUS);
+ MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
+ MMIO_D(OACONTROL, D_HSW);
+ MMIO_D(0x2b00, D_BDW_PLUS);
+ MMIO_D(0x2360, D_BDW_PLUS);
+ MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(BCS_SWCTRL, D_ALL);
+
+ MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ return 0;
+}
+
+static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+
+ MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
+ intel_vgpu_reg_master_irq_handler);
+
+ MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(0x1c134, D_BDW_PLUS);
+
+ MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+ MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
+ MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0xd0)
+ MMIO_RING_F(RING_REG, 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x230)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
+ MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x234)
+ MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x244)
+ MMIO_RING_D(RING_REG, D_BDW_PLUS);
+ MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x370)
+ MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
+ NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x3a0)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+#undef RING_REG
+
+ MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
+ MMIO_D(0x1c1d0, D_BDW_PLUS);
+ MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
+ MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
+ MMIO_D(0x1c054, D_BDW_PLUS);
+
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
+
+ MMIO_D(GAMTARBMODE, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0x270)
+ MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+ MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+
+ MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+
+ MMIO_D(WM_MISC, D_BDW);
+ MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+
+ MMIO_D(0x66c00, D_BDW_PLUS);
+ MMIO_D(0x66c04, D_BDW_PLUS);
+
+ MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
+
+ MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
+
+ MMIO_D(0xfdc, D_BDW);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
+ MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+
+ MMIO_D(0xb1f0, D_BDW);
+ MMIO_D(0xb1c0, D_BDW);
+ MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0xb100, D_BDW);
+ MMIO_D(0xb10c, D_BDW);
+ MMIO_D(0xb110, D_BDW);
+
+ MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(0x83a4, D_BDW);
+ MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
+
+ MMIO_D(0x8430, D_BDW);
+
+ MMIO_D(0x110000, D_BDW_PLUS);
+
+ MMIO_D(0x48400, D_BDW_PLUS);
+
+ MMIO_D(0x6e570, D_BDW_PLUS);
+ MMIO_D(0x65f10, D_BDW_PLUS);
+
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+
+ MMIO_D(0x2248, D_BDW);
+
+ return 0;
+}
+
+static int init_skl_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+
+ MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
+ MMIO_D(0xa210, D_SKL_PLUS);
+ MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_D(0x45504, D_SKL);
+ MMIO_D(0x45520, D_SKL);
+ MMIO_D(0x46000, D_SKL);
+ MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
+ MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
+ MMIO_D(0x6C040, D_SKL);
+ MMIO_D(0x6C048, D_SKL);
+ MMIO_D(0x6C050, D_SKL);
+ MMIO_D(0x6C044, D_SKL);
+ MMIO_D(0x6C04C, D_SKL);
+ MMIO_D(0x6C054, D_SKL);
+ MMIO_D(0x6c058, D_SKL);
+ MMIO_D(0x6c05c, D_SKL);
+ MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
+
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_D(0x70380, D_SKL);
+ MMIO_D(0x71380, D_SKL);
+ MMIO_D(0x72380, D_SKL);
+ MMIO_D(0x7039c, D_SKL);
+
+ MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_D(0x8f074, D_SKL);
+ MMIO_D(0x8f004, D_SKL);
+ MMIO_D(0x8f034, D_SKL);
+
+ MMIO_D(0xb11c, D_SKL);
+
+ MMIO_D(0x51000, D_SKL);
+ MMIO_D(0x6c00c, D_SKL);
+
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_D(0xd08, D_SKL);
+ MMIO_D(0x20e0, D_SKL);
+ MMIO_D(0x20ec, D_SKL);
+
+ /* TRTT */
+ MMIO_D(0x4de0, D_SKL);
+ MMIO_D(0x4de4, D_SKL);
+ MMIO_D(0x4de8, D_SKL);
+ MMIO_D(0x4dec, D_SKL);
+ MMIO_D(0x4df0, D_SKL);
+ MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+ MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+
+ MMIO_D(0x45008, D_SKL);
+
+ MMIO_D(0x46430, D_SKL);
+
+ MMIO_D(0x46520, D_SKL);
+
+ MMIO_D(0xc403c, D_SKL);
+ MMIO_D(0xb004, D_SKL);
+ MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
+
+ MMIO_D(0x65900, D_SKL);
+ MMIO_D(0x1082c0, D_SKL);
+ MMIO_D(0x4068, D_SKL);
+ MMIO_D(0x67054, D_SKL);
+ MMIO_D(0x6e560, D_SKL);
+ MMIO_D(0x6e554, D_SKL);
+ MMIO_D(0x2b20, D_SKL);
+ MMIO_D(0x65f00, D_SKL);
+ MMIO_D(0x65f08, D_SKL);
+ MMIO_D(0x320f0, D_SKL);
+
+ MMIO_D(_REG_VCS2_EXCC, D_SKL);
+ MMIO_D(0x70034, D_SKL);
+ MMIO_D(0x71034, D_SKL);
+ MMIO_D(0x72034, D_SKL);
+
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
+
+ MMIO_D(0x44500, D_SKL);
+ return 0;
+}
+
+/**
+ * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
+ * @gvt: GVT device
+ * @offset: register offset
+ *
+ * This function is used to find the MMIO information entry from hash table
+ *
+ * Returns:
+ * pointer to MMIO information entry, NULL if not exists
+ */
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ struct intel_gvt_mmio_info *e;
+
+ WARN_ON(!IS_ALIGNED(offset, 4));
+
+ hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
+ if (e->offset == offset)
+ return e;
+ }
+ return NULL;
+}
+
+/**
+ * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the MMIO
+ * information table of GVT device
+ *
+ */
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct intel_gvt_mmio_info *e;
+ int i;
+
+ hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
+ kfree(e);
+
+ vfree(gvt->mmio.mmio_attribute);
+ gvt->mmio.mmio_attribute = NULL;
+}
+
+/**
+ * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to setup the MMIO
+ * information table for GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
+ if (!gvt->mmio.mmio_attribute)
+ return -ENOMEM;
+
+ ret = init_generic_mmio_info(gvt);
+ if (ret)
+ goto err;
+
+ if (IS_BROADWELL(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ intel_gvt_clean_mmio_info(gvt);
+ return ret;
+}
+
+/**
+ * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_CMD_ACCESS;
+}
+
+/**
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_UNALIGN;
+}
+
+/**
+ * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_CMD_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
+ *
+ */
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_MODE_MASK;
+}
+
+/**
+ * intel_vgpu_default_mmio_read - default MMIO read handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+/**
+ * intel_t_default_mmio_write - default MMIO write handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 254df8bf1f35..30e543f5a703 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_HYPERCALL_H_
@@ -30,6 +39,23 @@
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
+ int (*host_init)(struct device *dev, void *gvt, const void *ops);
+ void (*host_exit)(struct device *dev, void *gvt);
+ int (*attach_vgpu)(void *vgpu, unsigned long *handle);
+ void (*detach_vgpu)(unsigned long handle);
+ int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
+ unsigned long (*from_virt_to_mfn)(void *p);
+ int (*set_wp_page)(unsigned long handle, u64 gfn);
+ int (*unset_wp_page)(unsigned long handle, u64 gfn);
+ int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+ int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
+ unsigned long mfn, unsigned int nr, bool map);
+ int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
+ bool map);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
new file mode 100644
index 000000000000..f7be02ac4be1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/* common offset among interrupt control registers */
+#define regbase_to_isr(base) (base)
+#define regbase_to_imr(base) (base + 0x4)
+#define regbase_to_iir(base) (base + 0x8)
+#define regbase_to_ier(base) (base + 0xC)
+
+#define iir_to_regbase(iir) (iir - 0x8)
+#define ier_to_regbase(ier) (ier - 0xC)
+
+#define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
+#define get_irq_info(irq, e) (irq->events[e].info)
+
+#define irq_to_gvt(irq) \
+ container_of(irq, struct intel_gvt, irq)
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info);
+
+static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
+ [RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
+ [RCS_DEBUG] = "Render EU debug from SVG",
+ [RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
+ [RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
+ [RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
+ [RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
+ [RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
+ [RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
+
+ [VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
+ [VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
+ [VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
+ [VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
+ [VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
+ [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
+ [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
+ [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
+ [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
+ [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
+
+ [BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
+ [BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
+ [BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
+ [BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
+ [BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
+ [BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
+
+ [VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
+ [VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
+
+ [PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
+ [PIPE_A_CRC_ERR] = "Pipe A CRC error",
+ [PIPE_A_CRC_DONE] = "Pipe A CRC done",
+ [PIPE_A_VSYNC] = "Pipe A vsync",
+ [PIPE_A_LINE_COMPARE] = "Pipe A line compare",
+ [PIPE_A_ODD_FIELD] = "Pipe A odd field",
+ [PIPE_A_EVEN_FIELD] = "Pipe A even field",
+ [PIPE_A_VBLANK] = "Pipe A vblank",
+ [PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
+ [PIPE_B_CRC_ERR] = "Pipe B CRC error",
+ [PIPE_B_CRC_DONE] = "Pipe B CRC done",
+ [PIPE_B_VSYNC] = "Pipe B vsync",
+ [PIPE_B_LINE_COMPARE] = "Pipe B line compare",
+ [PIPE_B_ODD_FIELD] = "Pipe B odd field",
+ [PIPE_B_EVEN_FIELD] = "Pipe B even field",
+ [PIPE_B_VBLANK] = "Pipe B vblank",
+ [PIPE_C_VBLANK] = "Pipe C vblank",
+ [DPST_PHASE_IN] = "DPST phase in event",
+ [DPST_HISTOGRAM] = "DPST histogram event",
+ [GSE] = "GSE",
+ [DP_A_HOTPLUG] = "DP A Hotplug",
+ [AUX_CHANNEL_A] = "AUX Channel A",
+ [PERF_COUNTER] = "Performance counter",
+ [POISON] = "Poison",
+ [GTT_FAULT] = "GTT fault",
+ [PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
+ [PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
+ [PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
+ [SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
+ [SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
+ [SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
+
+ [PCU_THERMAL] = "PCU Thermal Event",
+ [PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
+
+ [FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
+ [AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
+ [AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
+ [FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
+ [AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
+ [AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
+ [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
+ [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
+ [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
+ [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [GMBUS] = "Gmbus",
+ [SDVO_B_HOTPLUG] = "SDVO B hotplug",
+ [CRT_HOTPLUG] = "CRT Hotplug",
+ [DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
+ [DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
+ [DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
+ [AUX_CHANNEL_B] = "AUX Channel B",
+ [AUX_CHANNEL_C] = "AUX Channel C",
+ [AUX_CHANNEL_D] = "AUX Channel D",
+ [AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
+ [AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
+ [AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
+
+ [INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
+};
+
+static inline struct intel_gvt_irq_info *regbase_to_irq_info(
+ struct intel_gvt *gvt,
+ unsigned int reg)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ int i;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
+ return irq->info[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IMR register bit change
+ * behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, masked, unmasked;
+ u32 imr = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IMR %x with val %x\n",
+ reg, imr);
+
+ gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly masked/unmasked bits */
+ changed = vgpu_vreg(vgpu, reg) ^ imr;
+ masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ unmasked = masked ^ changed;
+
+ gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
+ changed, masked, unmasked);
+
+ vgpu_vreg(vgpu, reg) = imr;
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the master IRQ register on gen8+.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+ u32 virtual_ier = vgpu_vreg(vgpu, reg);
+
+ gvt_dbg_irq("write master irq reg %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+
+ /*
+ * GEN8_MASTER_IRQ is a special irq register,
+ * only bit 31 is allowed to be modified
+ * and treated as an IER bit.
+ */
+ ier &= GEN8_MASTER_IRQ_CONTROL;
+ virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) |= ier;
+
+ /* figure out newly enabled/disable bits */
+ changed = virtual_ier ^ ier;
+ enabled = (virtual_ier & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_ier_handler - Generic IER write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IER register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ struct intel_gvt_irq_info *info;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IER %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly enabled/disable bits */
+ changed = vgpu_vreg(vgpu, reg) ^ ier;
+ enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+ vgpu_vreg(vgpu, reg) = ier;
+
+ info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IIR register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
+ iir_to_regbase(reg));
+ u32 iir = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ vgpu_vreg(vgpu, reg) &= ~iir;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+ return 0;
+}
+
+static struct intel_gvt_irq_map gen8_irq_map[] = {
+ { INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
+ { -1, -1, ~0 },
+};
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ struct intel_gvt_irq_map *map = irq->irq_map;
+ struct intel_gvt_irq_info *up_irq_info = NULL;
+ u32 set_bits = 0;
+ u32 clear_bits = 0;
+ int bit;
+ u32 val = vgpu_vreg(vgpu,
+ regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
+ & vgpu_vreg(vgpu,
+ regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
+
+ if (!info->has_upstream_irq)
+ return;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ if (info->group != map->down_irq_group)
+ continue;
+
+ if (!up_irq_info)
+ up_irq_info = irq->info[map->up_irq_group];
+ else
+ WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+
+ bit = map->up_irq_bit;
+
+ if (val & map->down_irq_bitmask)
+ set_bits |= (1 << bit);
+ else
+ clear_bits |= (1 << bit);
+ }
+
+ WARN_ON(!up_irq_info);
+
+ if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
+ u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
+
+ vgpu_vreg(vgpu, isr) &= ~clear_bits;
+ vgpu_vreg(vgpu, isr) |= set_bits;
+ } else {
+ u32 iir = regbase_to_iir(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+ u32 imr = regbase_to_imr(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+
+ vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
+ }
+
+ if (up_irq_info->has_upstream_irq)
+ update_upstream_irq(vgpu, up_irq_info);
+}
+
+static void init_irq_map(struct intel_gvt_irq *irq)
+{
+ struct intel_gvt_irq_map *map;
+ struct intel_gvt_irq_info *up_info, *down_info;
+ int up_bit;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ up_info = irq->info[map->up_irq_group];
+ up_bit = map->up_irq_bit;
+ down_info = irq->info[map->down_irq_group];
+
+ set_bit(up_bit, up_info->downstream_irq_bitmap);
+ down_info->has_upstream_irq = true;
+
+ gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
+ up_info->group, up_bit,
+ down_info->group, map->down_irq_bitmask);
+ }
+}
+
+/* =======================vEvent injection===================== */
+static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+{
+ return intel_gvt_hypervisor_inject_msi(vgpu);
+}
+
+static void propagate_event(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq_info *info;
+ unsigned int reg_base;
+ int bit;
+
+ info = get_irq_info(irq, event);
+ if (WARN_ON(!info))
+ return;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ bit = irq->events[event].bit;
+
+ if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_imr(reg_base)))) {
+ gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
+ bit, irq_name[event], vgpu->id);
+ set_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_iir(reg_base)));
+ }
+}
+
+/* =======================vEvent Handlers===================== */
+static void handle_default_event_virt(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ if (!vgpu->irq.irq_warn_once[event]) {
+ gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
+ vgpu->id, event, irq_name[event]);
+ vgpu->irq.irq_warn_once[event] = true;
+ }
+ propagate_event(irq, event, vgpu);
+}
+
+/* =====================GEN specific logic======================= */
+/* GEN8 interrupt routines. */
+
+#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
+static struct intel_gvt_irq_info gen8_##regname##_info = { \
+ .name = #regname"-IRQ", \
+ .reg_base = (regbase), \
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
+ INTEL_GVT_EVENT_RESERVED}, \
+}
+
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
+
+static struct intel_gvt_irq_info gvt_base_pch_info = {
+ .name = "PCH-IRQ",
+ .reg_base = SDEISR,
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
+ INTEL_GVT_EVENT_RESERVED},
+};
+
+static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ int i;
+
+ if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
+ GEN8_MASTER_IRQ_CONTROL))
+ return;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ struct intel_gvt_irq_info *info = irq->info[i];
+ u32 reg_base;
+
+ if (!info->has_upstream_irq)
+ continue;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
+ & vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
+ update_upstream_irq(vgpu, info);
+ }
+
+ if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
+ & ~GEN8_MASTER_IRQ_CONTROL)
+ inject_virtual_interrupt(vgpu);
+}
+
+static void gen8_init_irq(
+ struct intel_gvt_irq *irq)
+{
+ struct intel_gvt *gvt = irq_to_gvt(irq);
+
+#define SET_BIT_INFO(s, b, e, i) \
+ do { \
+ s->events[e].bit = b; \
+ s->events[e].info = s->info[i]; \
+ s->info[i]->bit_to_event[b] = e;\
+ } while (0)
+
+#define SET_IRQ_GROUP(s, g, i) \
+ do { \
+ s->info[g] = i; \
+ (i)->group = g; \
+ set_bit(g, s->irq_info_bitmap); \
+ } while (0)
+
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
+
+ /* GEN8 level 2 interrupts. */
+
+ /* GEN8 interrupt GT0 events */
+ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ /* GEN8 interrupt GT1 events */
+ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
+
+ if (HAS_BSD2(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
+ INTEL_GVT_IRQ_INFO_GT1);
+ }
+
+ /* GEN8 interrupt GT3 events */
+ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
+
+ SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+ /* GEN8 interrupt DE PORT events */
+ SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ /* GEN8 interrupt DE MISC events */
+ SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
+
+ /* PCH events */
+ SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+
+ if (IS_BROADWELL(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ } else if (IS_SKYLAKE(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ }
+
+ /* GEN8 interrupt PCU events */
+ SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
+ SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
+}
+
+static struct intel_gvt_irq_ops gen8_irq_ops = {
+ .init_irq = gen8_init_irq,
+ .check_pending_irq = gen8_check_pending_irq,
+};
+
+/**
+ * intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
+ * @vgpu: a vGPU
+ * @event: interrupt event
+ *
+ * This function is used to trigger a virtual interrupt event for vGPU.
+ * The caller provides the event to be triggered, the framework itself
+ * will emulate the IRQ register bit change.
+ *
+ */
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq *irq = &gvt->irq;
+ gvt_event_virt_handler_t handler;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+
+ handler = get_event_virt_handler(irq, event);
+ WARN_ON(!handler);
+
+ handler(irq, event, vgpu);
+
+ ops->check_pending_irq(vgpu);
+}
+
+static void init_events(
+ struct intel_gvt_irq *irq)
+{
+ int i;
+
+ for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
+ irq->events[i].info = NULL;
+ irq->events[i].v_handler = handle_default_event_virt;
+ }
+}
+
+static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
+{
+ struct intel_gvt_vblank_timer *vblank_timer;
+ struct intel_gvt_irq *irq;
+ struct intel_gvt *gvt;
+
+ vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
+ irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
+ gvt = container_of(irq, struct intel_gvt, irq);
+
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
+ hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
+ return HRTIMER_RESTART;
+}
+
+/**
+ * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver unloading stage, to clean up GVT-g IRQ
+ * emulation subsystem.
+ *
+ */
+void intel_gvt_clean_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+}
+
+#define VBLNAK_TIMER_PERIOD 16000000
+
+/**
+ * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver loading stage, to initialize the GVT-g IRQ
+ * emulation subsystem.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
+
+ gvt_dbg_core("init irq framework\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
+ } else {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ /* common event initialization */
+ init_events(irq);
+
+ /* gen specific initialization */
+ irq->ops->init_irq(irq);
+
+ init_irq_map(irq);
+
+ hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vblank_timer->timer.function = vblank_timer_fn;
+ vblank_timer->period = VBLNAK_TIMER_PERIOD;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
new file mode 100644
index 000000000000..5313fb1b33e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#ifndef _GVT_INTERRUPT_H_
+#define _GVT_INTERRUPT_H_
+
+enum intel_gvt_event_type {
+ RCS_MI_USER_INTERRUPT = 0,
+ RCS_DEBUG,
+ RCS_MMIO_SYNC_FLUSH,
+ RCS_CMD_STREAMER_ERR,
+ RCS_PIPE_CONTROL,
+ RCS_L3_PARITY_ERR,
+ RCS_WATCHDOG_EXCEEDED,
+ RCS_PAGE_DIRECTORY_FAULT,
+ RCS_AS_CONTEXT_SWITCH,
+ RCS_MONITOR_BUFF_HALF_FULL,
+
+ VCS_MI_USER_INTERRUPT,
+ VCS_MMIO_SYNC_FLUSH,
+ VCS_CMD_STREAMER_ERR,
+ VCS_MI_FLUSH_DW,
+ VCS_WATCHDOG_EXCEEDED,
+ VCS_PAGE_DIRECTORY_FAULT,
+ VCS_AS_CONTEXT_SWITCH,
+
+ VCS2_MI_USER_INTERRUPT,
+ VCS2_MI_FLUSH_DW,
+ VCS2_AS_CONTEXT_SWITCH,
+
+ BCS_MI_USER_INTERRUPT,
+ BCS_MMIO_SYNC_FLUSH,
+ BCS_CMD_STREAMER_ERR,
+ BCS_MI_FLUSH_DW,
+ BCS_PAGE_DIRECTORY_FAULT,
+ BCS_AS_CONTEXT_SWITCH,
+
+ VECS_MI_USER_INTERRUPT,
+ VECS_MI_FLUSH_DW,
+ VECS_AS_CONTEXT_SWITCH,
+
+ PIPE_A_FIFO_UNDERRUN,
+ PIPE_B_FIFO_UNDERRUN,
+ PIPE_A_CRC_ERR,
+ PIPE_B_CRC_ERR,
+ PIPE_A_CRC_DONE,
+ PIPE_B_CRC_DONE,
+ PIPE_A_ODD_FIELD,
+ PIPE_B_ODD_FIELD,
+ PIPE_A_EVEN_FIELD,
+ PIPE_B_EVEN_FIELD,
+ PIPE_A_LINE_COMPARE,
+ PIPE_B_LINE_COMPARE,
+ PIPE_C_LINE_COMPARE,
+ PIPE_A_VBLANK,
+ PIPE_B_VBLANK,
+ PIPE_C_VBLANK,
+ PIPE_A_VSYNC,
+ PIPE_B_VSYNC,
+ PIPE_C_VSYNC,
+ PRIMARY_A_FLIP_DONE,
+ PRIMARY_B_FLIP_DONE,
+ PRIMARY_C_FLIP_DONE,
+ SPRITE_A_FLIP_DONE,
+ SPRITE_B_FLIP_DONE,
+ SPRITE_C_FLIP_DONE,
+
+ PCU_THERMAL,
+ PCU_PCODE2DRIVER_MAILBOX,
+
+ DPST_PHASE_IN,
+ DPST_HISTOGRAM,
+ GSE,
+ DP_A_HOTPLUG,
+ AUX_CHANNEL_A,
+ PERF_COUNTER,
+ POISON,
+ GTT_FAULT,
+ ERROR_INTERRUPT_COMBINED,
+
+ FDI_RX_INTERRUPTS_TRANSCODER_A,
+ AUDIO_CP_CHANGE_TRANSCODER_A,
+ AUDIO_CP_REQUEST_TRANSCODER_A,
+ FDI_RX_INTERRUPTS_TRANSCODER_B,
+ AUDIO_CP_CHANGE_TRANSCODER_B,
+ AUDIO_CP_REQUEST_TRANSCODER_B,
+ FDI_RX_INTERRUPTS_TRANSCODER_C,
+ AUDIO_CP_CHANGE_TRANSCODER_C,
+ AUDIO_CP_REQUEST_TRANSCODER_C,
+ ERR_AND_DBG,
+ GMBUS,
+ SDVO_B_HOTPLUG,
+ CRT_HOTPLUG,
+ DP_B_HOTPLUG,
+ DP_C_HOTPLUG,
+ DP_D_HOTPLUG,
+ AUX_CHANNEL_B,
+ AUX_CHANNEL_C,
+ AUX_CHANNEL_D,
+ AUDIO_POWER_STATE_CHANGE_B,
+ AUDIO_POWER_STATE_CHANGE_C,
+ AUDIO_POWER_STATE_CHANGE_D,
+
+ INTEL_GVT_EVENT_RESERVED,
+ INTEL_GVT_EVENT_MAX,
+};
+
+struct intel_gvt_irq;
+struct intel_gvt;
+
+typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
+
+struct intel_gvt_irq_ops {
+ void (*init_irq)(struct intel_gvt_irq *irq);
+ void (*check_pending_irq)(struct intel_vgpu *vgpu);
+};
+
+/* the list of physical interrupt control register groups */
+enum intel_gvt_irq_type {
+ INTEL_GVT_IRQ_INFO_GT,
+ INTEL_GVT_IRQ_INFO_DPY,
+ INTEL_GVT_IRQ_INFO_PCH,
+ INTEL_GVT_IRQ_INFO_PM,
+
+ INTEL_GVT_IRQ_INFO_MASTER,
+ INTEL_GVT_IRQ_INFO_GT0,
+ INTEL_GVT_IRQ_INFO_GT1,
+ INTEL_GVT_IRQ_INFO_GT2,
+ INTEL_GVT_IRQ_INFO_GT3,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_A,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_B,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_C,
+ INTEL_GVT_IRQ_INFO_DE_PORT,
+ INTEL_GVT_IRQ_INFO_DE_MISC,
+ INTEL_GVT_IRQ_INFO_AUD,
+ INTEL_GVT_IRQ_INFO_PCU,
+
+ INTEL_GVT_IRQ_INFO_MAX,
+};
+
+#define INTEL_GVT_IRQ_BITWIDTH 32
+
+/* device specific interrupt bit definitions */
+struct intel_gvt_irq_info {
+ char *name;
+ i915_reg_t reg_base;
+ enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
+ unsigned long warned;
+ int group;
+ DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
+ bool has_upstream_irq;
+};
+
+/* per-event information */
+struct intel_gvt_event_info {
+ int bit; /* map to register bit */
+ int policy; /* forwarding policy */
+ struct intel_gvt_irq_info *info; /* register info */
+ gvt_event_virt_handler_t v_handler; /* for v_event */
+};
+
+struct intel_gvt_irq_map {
+ int up_irq_group;
+ int up_irq_bit;
+ int down_irq_group;
+ u32 down_irq_bitmask;
+};
+
+struct intel_gvt_vblank_timer {
+ struct hrtimer timer;
+ u64 period;
+};
+
+/* structure containing device specific IRQ state */
+struct intel_gvt_irq {
+ struct intel_gvt_irq_ops *ops;
+ struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
+ DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
+ struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ struct intel_gvt_irq_map *irq_map;
+ struct intel_gvt_vblank_timer vblank_timer;
+};
+
+int intel_gvt_init_irq(struct intel_gvt *gvt);
+void intel_gvt_clean_irq(struct intel_gvt *gvt);
+
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event);
+
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+
+int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
+int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
+int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
+
+#endif /* _GVT_INTERRUPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
new file mode 100644
index 000000000000..4dd6722a7339
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -0,0 +1,1458 @@
+/*
+ * KVMGT - the implementation of Intel mediated pass-through framework for KVM
+ *
+ * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Jike Song <jike.song@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/eventfd.h>
+#include <linux/uuid.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static const struct intel_gvt_ops *intel_gvt_ops;
+
+/* helper macros copied from vfio-pci */
+#define VFIO_PCI_OFFSET_SHIFT 40
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_region {
+ u32 type;
+ u32 subtype;
+ size_t size;
+ u32 flags;
+};
+
+struct kvmgt_pgfn {
+ gfn_t gfn;
+ struct hlist_node hnode;
+};
+
+struct kvmgt_guest_info {
+ struct kvm *kvm;
+ struct intel_vgpu *vgpu;
+ struct kvm_page_track_notifier_node track_node;
+#define NR_BKT (1 << 18)
+ struct hlist_head ptable[NR_BKT];
+#undef NR_BKT
+};
+
+struct gvt_dma {
+ struct rb_node node;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+};
+
+static inline bool handle_valid(unsigned long handle)
+{
+ return !!(handle & ~0xff);
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev);
+static void intel_vgpu_release_work(struct work_struct *work);
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
+
+static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct rb_node *node = vgpu->vdev.cache.rb_node;
+ struct gvt_dma *ret = NULL;
+
+ while (node) {
+ struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+
+ if (gfn < itr->gfn)
+ node = node->rb_left;
+ else if (gfn > itr->gfn)
+ node = node->rb_right;
+ else {
+ ret = itr;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct gvt_dma *entry;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find(vgpu, gfn);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+
+ return entry == NULL ? 0 : entry->pfn;
+}
+
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+{
+ struct gvt_dma *new, *itr;
+ struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+
+ new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
+ if (!new)
+ return;
+
+ new->gfn = gfn;
+ new->pfn = pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while (*link) {
+ parent = *link;
+ itr = rb_entry(parent, struct gvt_dma, node);
+
+ if (gfn == itr->gfn)
+ goto out;
+ else if (gfn < itr->gfn)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &vgpu->vdev.cache);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+
+out:
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ kfree(new);
+}
+
+static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
+ struct gvt_dma *entry)
+{
+ rb_erase(&entry->node, &vgpu->vdev.cache);
+ kfree(entry);
+}
+
+static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct device *dev = &vgpu->vdev.mdev->dev;
+ struct gvt_dma *this;
+ unsigned long g1;
+ int rc;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ this = __gvt_cache_find(vgpu, gfn);
+ if (!this) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+ }
+
+ g1 = gfn;
+ rc = vfio_unpin_pages(dev, &g1, 1);
+ WARN_ON(rc != 1);
+ __gvt_cache_remove_entry(vgpu, this);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_init(struct intel_vgpu *vgpu)
+{
+ vgpu->vdev.cache = RB_ROOT;
+ mutex_init(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_destroy(struct intel_vgpu *vgpu)
+{
+ struct gvt_dma *dma;
+ struct rb_node *node = NULL;
+ struct device *dev = &vgpu->vdev.mdev->dev;
+ unsigned long gfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while ((node = rb_first(&vgpu->vdev.cache))) {
+ dma = rb_entry(node, struct gvt_dma, node);
+ gfn = dma->gfn;
+
+ vfio_unpin_pages(dev, &gfn, 1);
+ __gvt_cache_remove_entry(vgpu, dma);
+ }
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+ const char *name)
+{
+ int i;
+ struct intel_vgpu_type *t;
+ const char *driver_name = dev_driver_string(
+ &gvt->dev_priv->drm.pdev->dev);
+
+ for (i = 0; i < gvt->num_types; i++) {
+ t = &gvt->types[i];
+ if (!strncmp(t->name, name + strlen(driver_name) + 1,
+ sizeof(t->name)))
+ return t;
+ }
+
+ return NULL;
+}
+
+static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *type_attrs[] = {
+ &mdev_type_attr_available_instance.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static struct attribute_group *intel_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (WARN_ON(!group))
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = type_attrs;
+ intel_vgpu_type_groups[i] = group;
+ }
+
+ return true;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = intel_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = intel_vgpu_type_groups[i];
+ kfree(group);
+ }
+}
+
+static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
+{
+ hash_init(info->ptable);
+}
+
+static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
+{
+ struct kvmgt_pgfn *p;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static struct kvmgt_pgfn *
+__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p, *res = NULL;
+
+ hash_for_each_possible(info->ptable, p, hnode, gfn) {
+ if (gfn == p->gfn) {
+ res = p;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ return !!p;
+}
+
+static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ return;
+
+ p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ if (WARN(!p, "gfn: 0x%llx\n", gfn))
+ return;
+
+ p->gfn = gfn;
+ hash_add(info->ptable, &p->hnode, gfn);
+}
+
+static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ if (p) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_type *type;
+ struct device *pdev;
+ void *gvt;
+
+ pdev = mdev->parent->dev;
+ gvt = kdev_to_i915(pdev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type) {
+ gvt_err("failed to find type %s to create\n",
+ kobject_name(kobj));
+ return -EINVAL;
+ }
+
+ vgpu = intel_gvt_ops->vgpu_create(gvt, type);
+ if (IS_ERR_OR_NULL(vgpu)) {
+ gvt_err("create intel vgpu failed\n");
+ return -EINVAL;
+ }
+
+ INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+
+ vgpu->vdev.mdev = mdev;
+ mdev_set_drvdata(mdev, vgpu);
+
+ gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
+ dev_name(&mdev->dev));
+ return 0;
+}
+
+static int intel_vgpu_remove(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ if (handle_valid(vgpu->handle))
+ return -EBUSY;
+
+ intel_gvt_ops->vgpu_destroy(vgpu);
+ return 0;
+}
+
+static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long gfn, end_gfn;
+
+ gfn = unmap->iova >> PAGE_SHIFT;
+ end_gfn = gfn + unmap->size / PAGE_SIZE;
+
+ while (gfn < end_gfn)
+ gvt_cache_remove(vgpu, gfn++);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.group_notifier);
+
+ /* the only action we care about */
+ if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
+ vgpu->vdev.kvm = data;
+
+ if (!data)
+ schedule_work(&vgpu->vdev.release_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_open(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+ vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->vdev.iommu_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+ goto out;
+ }
+
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+ ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events,
+ &vgpu->vdev.group_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+ goto undo_iommu;
+ }
+
+ return kvmgt_guest_init(mdev);
+
+undo_iommu:
+ vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+out:
+ return ret;
+}
+
+static void __intel_vgpu_release(struct intel_vgpu *vgpu)
+{
+ struct kvmgt_guest_info *info;
+
+ if (!handle_valid(vgpu->handle))
+ return;
+
+ vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+ vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
+ &vgpu->vdev.group_notifier);
+
+ info = (struct kvmgt_guest_info *)vgpu->handle;
+ kvmgt_guest_exit(info);
+ vgpu->handle = 0;
+}
+
+static void intel_vgpu_release(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ __intel_vgpu_release(vgpu);
+}
+
+static void intel_vgpu_release_work(struct work_struct *work)
+{
+ struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
+ vdev.release_work);
+ __intel_vgpu_release(vgpu);
+}
+
+static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
+{
+ u32 start_lo, start_hi;
+ u32 mem_type;
+ int pos = PCI_BASE_ADDRESS_0;
+
+ start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_MASK;
+ mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
+ + pos + 4));
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ /* 1M mem BAR treated as 32-bit BAR */
+ default:
+ /* mem unknown type treated as 32-bit BAR */
+ start_hi = 0;
+ break;
+ }
+
+ return ((u64)start_hi << 32) | start_lo;
+}
+
+static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
+ size_t count, loff_t *ppos, bool is_write)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int ret = -EINVAL;
+
+
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ gvt_err("invalid index: %u\n", index);
+ return -EINVAL;
+ }
+
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ if (is_write)
+ ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
+ buf, count);
+ else
+ ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
+ buf, count);
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ if (is_write) {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ bar0_start + pos, buf, count);
+ } else {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ bar0_start + pos, buf, count);
+ }
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ case VFIO_PCI_BAR3_REGION_INDEX:
+ case VFIO_PCI_BAR4_REGION_INDEX:
+ case VFIO_PCI_BAR5_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ case VFIO_PCI_ROM_REGION_INDEX:
+ default:
+ gvt_err("unsupported region: %u\n", index);
+ }
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
+ false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+
+read_err:
+ return -EFAULT;
+}
+
+static ssize_t intel_vgpu_write(struct mdev_device *mdev,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val,
+ sizeof(val), ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+write_err:
+ return -EFAULT;
+}
+
+static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+{
+ unsigned int index;
+ u64 virtaddr;
+ unsigned long req_size, pgoff = 0;
+ pgprot_t pg_prot;
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index >= VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (index != VFIO_PCI_BAR2_REGION_INDEX)
+ return -EINVAL;
+
+ pg_prot = vma->vm_page_prot;
+ virtaddr = vma->vm_start;
+ req_size = vma->vm_end - vma->vm_start;
+ pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
+}
+
+static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
+{
+ if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
+ return 1;
+
+ return 0;
+}
+
+static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags,
+ void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ struct eventfd_ctx *trigger;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ gvt_err("eventfd_ctx_fdget failed\n");
+ return PTR_ERR(trigger);
+ }
+ vgpu->vdev.msi_trigger = trigger;
+ }
+
+ return 0;
+}
+
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
+ unsigned int index, unsigned int start, unsigned int count,
+ void *data)
+{
+ int (*func)(struct intel_vgpu *vgpu, unsigned int index,
+ unsigned int start, unsigned int count, uint32_t flags,
+ void *data) = NULL;
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = intel_vgpu_set_intx_mask;
+ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ func = intel_vgpu_set_intx_unmask;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_intx_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ /* XXX Need masking support exported */
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_msi_trigger;
+ break;
+ }
+ break;
+ }
+
+ if (!func)
+ return -ENOTTY;
+
+ return func(vgpu, index, start, count, flags, data);
+}
+
+static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long minsz;
+
+ gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
+
+ if (cmd == VFIO_DEVICE_GET_INFO) {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
+ size_t size;
+ int nr_areas = 1;
+ int cap_type_id;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->cfg_space.bar[info.index].size;
+ if (!info.size) {
+ info.flags = 0;
+ break;
+ }
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+ info.flags = 0;
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = VFIO_REGION_INFO_FLAG_CAPS |
+ VFIO_REGION_INFO_FLAG_MMAP |
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ info.size = gvt_aperture_sz(vgpu->gvt);
+
+ size = sizeof(*sparse) +
+ (nr_areas * sizeof(*sparse->areas));
+ sparse = kzalloc(size, GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->nr_areas = nr_areas;
+ cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->areas[0].offset =
+ PAGE_ALIGN(vgpu_aperture_offset(vgpu));
+ sparse->areas[0].size = vgpu_aperture_sz(vgpu);
+ if (!caps.buf) {
+ kfree(caps.buf);
+ caps.buf = NULL;
+ caps.size = 0;
+ }
+ break;
+
+ case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+
+ info.flags = 0;
+ gvt_dbg_core("get region info bar:%d\n", info.index);
+ break;
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ gvt_dbg_core("get region info index:%d\n", info.index);
+ break;
+ default:
+ {
+ struct vfio_region_info_cap_type cap_type;
+
+ if (info.index >= VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions)
+ return -EINVAL;
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+ info.offset =
+ VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->vdev.region[i].size;
+ info.flags = vgpu->vdev.region[i].flags;
+
+ cap_type.type = vgpu->vdev.region[i].type;
+ cap_type.subtype = vgpu->vdev.region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_TYPE,
+ &cap_type);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
+ switch (cap_type_id) {
+ case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_SPARSE_MMAP,
+ sparse);
+ kfree(sparse);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (caps.size) {
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+
+ info.count = intel_vgpu_get_irq_count(vgpu, info.index);
+
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |= (VFIO_IRQ_INFO_MASKABLE |
+ VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int ret = 0;
+ size_t data_size = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+ int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
+ VFIO_PCI_NUM_IRQS, &data_size);
+ if (ret) {
+ gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+ return -EINVAL;
+ }
+ if (data_size) {
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+ }
+
+ ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
+ hdr.start, hdr.count, data);
+ kfree(data);
+
+ return ret;
+ } else if (cmd == VFIO_DEVICE_RESET) {
+ intel_gvt_ops->vgpu_reset(vgpu);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct parent_ops intel_vgpu_ops = {
+ .supported_type_groups = intel_vgpu_type_groups,
+ .create = intel_vgpu_create,
+ .remove = intel_vgpu_remove,
+
+ .open = intel_vgpu_open,
+ .release = intel_vgpu_release,
+
+ .read = intel_vgpu_read,
+ .write = intel_vgpu_write,
+ .mmap = intel_vgpu_mmap,
+ .ioctl = intel_vgpu_ioctl,
+};
+
+static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+{
+ if (!intel_gvt_init_vgpu_type_groups(gvt))
+ return -EFAULT;
+
+ intel_gvt_ops = ops;
+
+ return mdev_register_device(dev, &intel_vgpu_ops);
+}
+
+static void kvmgt_host_exit(struct device *dev, void *gvt)
+{
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_device(dev);
+}
+
+static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ if (!handle_valid(handle))
+ return -ESRCH;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_add(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ if (!handle_valid(handle))
+ return 0;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (!kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
+{
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
+ (void *)val, len);
+}
+
+static void kvmgt_page_track_flush_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ int i;
+ gfn_t gfn;
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < slot->npages; i++) {
+ gfn = slot->base_gfn + i;
+ if (kvmgt_gfn_is_write_protected(info, gfn)) {
+ kvm_slot_page_track_remove_page(kvm, slot, gfn,
+ KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
+static bool kvmgt_check_guest(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ char s[12];
+ unsigned int *i;
+
+ eax = KVM_CPUID_SIGNATURE;
+ ebx = ecx = edx = 0;
+
+ asm volatile ("cpuid"
+ : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ :
+ : "cc", "memory");
+ i = (unsigned int *)s;
+ i[0] = ebx;
+ i[1] = ecx;
+ i[2] = edx;
+
+ return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
+}
+
+/**
+ * NOTE:
+ * It's actually impossible to check if we are running in KVM host,
+ * since the "KVM host" is simply native. So we only dectect guest here.
+ */
+static int kvmgt_detect_host(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
+ return -ENODEV;
+ }
+#endif
+ return kvmgt_check_guest() ? -ENODEV : 0;
+}
+
+static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
+{
+ struct intel_vgpu *itr;
+ struct kvmgt_guest_info *info;
+ int id;
+ bool ret = false;
+
+ mutex_lock(&vgpu->gvt->lock);
+ for_each_active_vgpu(vgpu->gvt, itr, id) {
+ if (!handle_valid(itr->handle))
+ continue;
+
+ info = (struct kvmgt_guest_info *)itr->handle;
+ if (kvm && kvm == info->kvm) {
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&vgpu->gvt->lock);
+ return ret;
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvm *kvm;
+
+ vgpu = mdev_get_drvdata(mdev);
+ if (handle_valid(vgpu->handle))
+ return -EEXIST;
+
+ kvm = vgpu->vdev.kvm;
+ if (!kvm || kvm->mm != current->mm) {
+ gvt_err("KVM is required to use Intel vGPU\n");
+ return -ESRCH;
+ }
+
+ if (__kvmgt_vgpu_exist(vgpu, kvm))
+ return -EEXIST;
+
+ info = vzalloc(sizeof(struct kvmgt_guest_info));
+ if (!info)
+ return -ENOMEM;
+
+ vgpu->handle = (unsigned long)info;
+ info->vgpu = vgpu;
+ info->kvm = kvm;
+
+ kvmgt_protect_table_init(info);
+ gvt_cache_init(vgpu);
+
+ info->track_node.track_write = kvmgt_page_track_write;
+ info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+ kvm_page_track_register_notifier(kvm, &info->track_node);
+
+ return 0;
+}
+
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
+{
+ struct intel_vgpu *vgpu;
+
+ if (!info) {
+ gvt_err("kvmgt_guest_info invalid\n");
+ return false;
+ }
+
+ vgpu = info->vgpu;
+
+ kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
+ kvmgt_protect_table_destroy(info);
+ gvt_cache_destroy(vgpu);
+ vfree(info);
+
+ return true;
+}
+
+static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+static void kvmgt_detach_vgpu(unsigned long handle)
+{
+ /* nothing to do here */
+}
+
+static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+
+ if (!handle_valid(handle))
+ return -ESRCH;
+
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ return 0;
+
+ return -EFAULT;
+}
+
+static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
+{
+ unsigned long pfn;
+ struct kvmgt_guest_info *info;
+ struct device *dev;
+ int rc;
+
+ if (!handle_valid(handle))
+ return INTEL_GVT_INVALID_ADDR;
+
+ info = (struct kvmgt_guest_info *)handle;
+ pfn = gvt_cache_find(info->vgpu, gfn);
+ if (pfn != 0)
+ return pfn;
+
+ pfn = INTEL_GVT_INVALID_ADDR;
+ dev = &info->vgpu->vdev.mdev->dev;
+ rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (rc != 1) {
+ gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+ return INTEL_GVT_INVALID_ADDR;
+ }
+
+ gvt_cache_add(info->vgpu, gfn, pfn);
+ return pfn;
+}
+
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
+{
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+ int ret;
+ bool kthread = current->mm == NULL;
+
+ if (!handle_valid(handle))
+ return -ESRCH;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
+ if (kthread)
+ use_mm(kvm->mm);
+
+ ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
+ kvm_read_guest(kvm, gpa, buf, len);
+
+ if (kthread)
+ unuse_mm(kvm->mm);
+
+ return ret;
+}
+
+static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+}
+
+static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+}
+
+static unsigned long kvmgt_virt_to_pfn(void *addr)
+{
+ return PFN_DOWN(__pa(addr));
+}
+
+struct intel_gvt_mpt kvmgt_mpt = {
+ .detect_host = kvmgt_detect_host,
+ .host_init = kvmgt_host_init,
+ .host_exit = kvmgt_host_exit,
+ .attach_vgpu = kvmgt_attach_vgpu,
+ .detach_vgpu = kvmgt_detach_vgpu,
+ .inject_msi = kvmgt_inject_msi,
+ .from_virt_to_mfn = kvmgt_virt_to_pfn,
+ .set_wp_page = kvmgt_write_protect_add,
+ .unset_wp_page = kvmgt_write_protect_remove,
+ .read_gpa = kvmgt_read_gpa,
+ .write_gpa = kvmgt_write_gpa,
+ .gfn_to_mfn = kvmgt_gfn_to_pfn,
+};
+EXPORT_SYMBOL_GPL(kvmgt_mpt);
+
+static int __init kvmgt_init(void)
+{
+ return 0;
+}
+
+static void __exit kvmgt_exit(void)
+{
+}
+
+module_init(kvmgt_init);
+module_exit(kvmgt_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
new file mode 100644
index 000000000000..09c9450a1946
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
+{
+ u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
+ ~GENMASK(3, 0);
+ return gpa - gttmmio_gpa;
+}
+
+#define reg_is_mmio(gvt, reg) \
+ (reg >= 0 && reg < gvt->device_info.mmio_size)
+
+#define reg_is_gtt(gvt, reg) \
+ (reg >= gvt->device_info.gtt_start_offset \
+ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+
+/**
+ * intel_vgpu_emulate_mmio_read - emulate MMIO read
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
+ p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page read error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ goto err;
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ gvt_err("------------------------------------------\n");
+ gvt_err("vgpu%d: likely triggers a gfx reset\n",
+ vgpu->id);
+ gvt_err("------------------------------------------\n");
+ vgpu->mmio.disable_warn_untrack = true;
+ }
+ }
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+ ret = mmio->read(vgpu, offset, p_data, bytes);
+ } else
+ ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+
+ if (ret)
+ goto err;
+
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_mmio_write - emulate MMIO write
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ u32 old_vreg = 0, old_sreg = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = gp->handler(gp, pa, p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack)
+ gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ u64 ro_mask = mmio->ro_mask;
+
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ old_vreg = vgpu_vreg(vgpu, offset);
+ old_sreg = vgpu_sreg(vgpu, offset);
+ }
+
+ if (!ro_mask) {
+ ret = mmio->write(vgpu, offset, p_data, bytes);
+ } else {
+ /* Protect RO bits like HW */
+ u64 data = 0;
+
+ /* all register bits are RO. */
+ if (ro_mask == ~(u64)0) {
+ gvt_err("vgpu%d: try to write RO reg %x\n",
+ vgpu->id, offset);
+ ret = 0;
+ goto out;
+ }
+ /* keep the RO bits in the virtual register */
+ memcpy(&data, p_data, bytes);
+ data &= ~mmio->ro_mask;
+ data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
+ ret = mmio->write(vgpu, offset, &data, bytes);
+ }
+
+ /* higher 16bits of mode ctl regs are mask bits for change */
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ u32 mask = vgpu_vreg(vgpu, offset) >> 16;
+
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
+ | (vgpu_vreg(vgpu, offset) & mask);
+ vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
+ | (vgpu_sreg(vgpu, offset) & mask);
+ }
+ } else
+ ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ if (ret)
+ goto err;
+out:
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
new file mode 100644
index 000000000000..87d5b5e366a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_MMIO_H_
+#define _GVT_MMIO_H_
+
+struct intel_gvt;
+struct intel_vgpu;
+
+#define D_SNB (1 << 0)
+#define D_IVB (1 << 1)
+#define D_HSW (1 << 2)
+#define D_BDW (1 << 3)
+#define D_SKL (1 << 4)
+
+#define D_GEN9PLUS (D_SKL)
+#define D_GEN8PLUS (D_BDW | D_SKL)
+#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
+#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_SKL_PLUS (D_SKL)
+#define D_BDW_PLUS (D_BDW | D_SKL)
+#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
+#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
+#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
+#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+
+struct intel_gvt_mmio_info {
+ u32 offset;
+ u32 size;
+ u32 length;
+ u32 addr_mask;
+ u64 ro_mask;
+ u32 device;
+ int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ u32 addr_range;
+ struct hlist_node node;
+};
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
+bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
+
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
+
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
+ typeof(reg) __reg = reg; \
+ u32 *offset = (u32 *)&__reg; \
+ *offset; \
+})
+
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
+
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 03601e3ffa7c..1af5830c0a56 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_MPT_H_
@@ -46,4 +55,254 @@ static inline int intel_gvt_hypervisor_detect_host(void)
return intel_gvt_host.mpt->detect_host();
}
+/**
+ * intel_gvt_hypervisor_host_init - init GVT-g host side
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+static inline int intel_gvt_hypervisor_host_init(struct device *dev,
+ void *gvt, const void *ops)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_init)
+ return 0;
+
+ return intel_gvt_host.mpt->host_init(dev, gvt, ops);
+}
+
+/**
+ * intel_gvt_hypervisor_host_exit - exit GVT-g host side
+ */
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
+ void *gvt)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_exit)
+ return;
+
+ intel_gvt_host.mpt->host_exit(dev, gvt);
+}
+
+/**
+ * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->attach_vgpu)
+ return 0;
+
+ return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
+}
+
+/**
+ * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->detach_vgpu)
+ return;
+
+ intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+}
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
+/**
+ * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
+{
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+ u32 addr;
+ int ret;
+
+ control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+ addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+ data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+ /* Do not generate MSI if MSIEN is disable */
+ if (!(control & MSI_CAP_EN))
+ return 0;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+ return -EINVAL;
+
+ gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
+ data);
+
+ ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
+ * @p: host kernel virtual address
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
+{
+ return intel_gvt_host.mpt->from_virt_to_mfn(p);
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = true;
+ atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
+ * guest page
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (!p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = false;
+ atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
+}
+
+/**
+ * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ * @mfn: host PFN
+ * @nr: amount of PFNs
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long mfn, unsigned int nr,
+ bool map)
+{
+ /* a MPT implementation could have MMIO mapped elsewhere */
+ if (!intel_gvt_host.mpt->map_gfn_to_mfn)
+ return 0;
+
+ return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
+ map);
+}
+
+/**
+ * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
+ * @vgpu: a vGPU
+ * @start: the beginning of the guest physical address region
+ * @end: the end of the guest physical address region
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_trap_area(
+ struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
+{
+ /* a MPT implementation could have MMIO trapped elsewhere */
+ if (!intel_gvt_host.mpt->set_trap_area)
+ return 0;
+
+ return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
new file mode 100644
index 000000000000..d2a0fbc896c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
+ u8 *buf;
+ int i;
+
+ if (WARN((vgpu_opregion(vgpu)->va),
+ "vgpu%d: opregion has been initialized already.\n",
+ vgpu->id))
+ return -EINVAL;
+
+ vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
+ GFP_DMA32 | __GFP_ZERO,
+ INTEL_GVT_OPREGION_PORDER);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return -ENOMEM;
+
+ memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
+ INTEL_GVT_OPREGION_SIZE);
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+
+ /* for unknown reason, the value in LID field is incorrect
+ * which block the windows guest, so workaround it by force
+ * setting it to "OPEN"
+ */
+ buf = (u8 *)vgpu_opregion(vgpu)->va;
+ buf[INTEL_GVT_OPREGION_CLID] = 0x3;
+
+ return 0;
+}
+
+static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
+{
+ u64 mfn;
+ int i, ret;
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ + i * PAGE_SIZE);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to get MFN from VA\n");
+ return -EINVAL;
+ }
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
+ vgpu_opregion(vgpu)->gfn[i],
+ mfn, 1, map);
+ if (ret) {
+ gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
+{
+ gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return;
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ map_vgpu_opregion(vgpu, false);
+ free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+ INTEL_GVT_OPREGION_PORDER);
+
+ vgpu_opregion(vgpu)->va = NULL;
+ }
+}
+
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ int ret;
+
+ gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ gvt_dbg_core("emulate opregion from kernel\n");
+
+ ret = init_vgpu_opregion(vgpu, gpa);
+ if (ret)
+ return ret;
+
+ ret = map_vgpu_opregion(vgpu, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_opregion - clean host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ */
+void intel_gvt_clean_opregion(struct intel_gvt *gvt)
+{
+ memunmap(gvt->opregion.opregion_va);
+ gvt->opregion.opregion_va = NULL;
+}
+
+/**
+ * intel_gvt_init_opregion - initialize host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_opregion(struct intel_gvt *gvt)
+{
+ gvt_dbg_core("init host opregion\n");
+
+ pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
+ &gvt->opregion.opregion_pa);
+
+ gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
+ INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
+ if (!gvt->opregion.opregion_va) {
+ gvt_err("fail to map host opregion\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define GVT_OPREGION_FUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
+ OPREGION_SCIC_FUNC_SHIFT; \
+ __ret; \
+ })
+
+#define GVT_OPREGION_SUBFUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
+ OPREGION_SCIC_SUBFUNC_SHIFT; \
+ __ret; \
+ })
+
+static const char *opregion_func_name(u32 func)
+{
+ const char *name = NULL;
+
+ switch (func) {
+ case 0 ... 3:
+ case 5:
+ case 7 ... 15:
+ name = "Reserved";
+ break;
+
+ case 4:
+ name = "Get BIOS Data";
+ break;
+
+ case 6:
+ name = "System BIOS Callbacks";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+}
+
+static const char *opregion_subfunc_name(u32 subfunc)
+{
+ const char *name = NULL;
+
+ switch (subfunc) {
+ case 0:
+ name = "Supported Calls";
+ break;
+
+ case 1:
+ name = "Requested Callbacks";
+ break;
+
+ case 2 ... 3:
+ case 8 ... 9:
+ name = "Reserved";
+ break;
+
+ case 5:
+ name = "Boot Display";
+ break;
+
+ case 6:
+ name = "TV-Standard/Video-Connector";
+ break;
+
+ case 7:
+ name = "Internal Graphics";
+ break;
+
+ case 10:
+ name = "Spread Spectrum Clocks";
+ break;
+
+ case 11:
+ name = "Get AKSV";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+};
+
+static bool querying_capabilities(u32 scic)
+{
+ u32 func, subfunc;
+
+ func = GVT_OPREGION_FUNC(scic);
+ subfunc = GVT_OPREGION_SUBFUNC(scic);
+
+ if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * intel_vgpu_emulate_opregion_request - emulating OpRegion request
+ * @vgpu: a vGPU
+ * @swsci: SWSCI request
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
+{
+ u32 *scic, *parm;
+ u32 func, subfunc;
+
+ scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+
+ if (!(swsci & SWSCI_SCI_SELECT)) {
+ gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+ return 0;
+ }
+ /* ignore non 0->1 trasitions */
+ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
+ & SWSCI_SCI_TRIGGER) ||
+ !(swsci & SWSCI_SCI_TRIGGER)) {
+ return 0;
+ }
+
+ func = GVT_OPREGION_FUNC(*scic);
+ subfunc = GVT_OPREGION_SUBFUNC(*scic);
+ if (!querying_capabilities(*scic)) {
+ gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+ " subfunc \"%s\"\n",
+ vgpu->id,
+ opregion_func_name(func),
+ opregion_subfunc_name(subfunc));
+ /*
+ * emulate exit status of function call, '0' means
+ * "failure, generic, unsupported or unknown cause"
+ */
+ *scic &= ~OPREGION_SCIC_EXIT_MASK;
+ return 0;
+ }
+
+ *scic = 0;
+ *parm = 0;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
new file mode 100644
index 000000000000..0dfe789d8f02
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_REG_H
+#define _GVT_REG_H
+
+#define INTEL_GVT_PCI_CLASS_VGA_OTHER 0x80
+
+#define INTEL_GVT_PCI_GMCH_CONTROL 0x50
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+
+#define INTEL_GVT_PCI_SWSCI 0xe8
+#define SWSCI_SCI_SELECT (1 << 15)
+#define SWSCI_SCI_TRIGGER 1
+
+#define INTEL_GVT_PCI_OPREGION 0xfc
+
+#define INTEL_GVT_OPREGION_CLID 0x1AC
+#define INTEL_GVT_OPREGION_SCIC 0x200
+#define OPREGION_SCIC_FUNC_MASK 0x1E
+#define OPREGION_SCIC_FUNC_SHIFT 1
+#define OPREGION_SCIC_SUBFUNC_MASK 0xFF00
+#define OPREGION_SCIC_SUBFUNC_SHIFT 8
+#define OPREGION_SCIC_EXIT_MASK 0xE0
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA 4
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS 6
+#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS 0
+#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
+#define INTEL_GVT_OPREGION_PARM 0x204
+
+#define INTEL_GVT_OPREGION_PAGES 2
+#define INTEL_GVT_OPREGION_PORDER 1
+#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
+
+#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
+
+#define _REG_VECS_EXCC 0x1A028
+#define _REG_VCS2_EXCC 0x1c028
+
+#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+
+#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
+ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+
+#define FORCEWAKE_RENDER_GEN9_REG 0xa278
+#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
+#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
+#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
+#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
+#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
+#define FORCEWAKE_ACK_HSW_REG 0x130044
+
+#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
+#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
+#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
new file mode 100644
index 000000000000..44136b1f3aab
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct render_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
+static struct render_mmio gen8_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+};
+
+static struct render_mmio gen9_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {RCS, _MMIO(0x40e0), 0, false},
+ {RCS, _MMIO(0x40e4), 0, false},
+ {RCS, _MMIO(0x2580), 0xffff, true},
+ {RCS, _MMIO(0x7014), 0xffff, true},
+ {RCS, _MMIO(0x20ec), 0xffff, false},
+ {RCS, _MMIO(0xb118), 0, false},
+ {RCS, _MMIO(0xe100), 0xffff, true},
+ {RCS, _MMIO(0xe180), 0xffff, true},
+ {RCS, _MMIO(0xe184), 0xffff, true},
+ {RCS, _MMIO(0xe188), 0xffff, true},
+ {RCS, _MMIO(0xe194), 0xffff, true},
+ {RCS, _MMIO(0x4de0), 0, false},
+ {RCS, _MMIO(0x4de4), 0, false},
+ {RCS, _MMIO(0x4de8), 0, false},
+ {RCS, _MMIO(0x4dec), 0, false},
+ {RCS, _MMIO(0x4df0), 0, false},
+ {RCS, _MMIO(0x4df4), 0, false},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+
+ {VCS2, _MMIO(0x1c028), 0xffff, false},
+
+ {VECS, _MMIO(0x1a028), 0xffff, false},
+};
+
+static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
+static u32 gen9_render_mocs_L3[32];
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum forcewake_domains fw;
+ i915_reg_t reg;
+ u32 regs[] = {
+ [RCS] = 0x4260,
+ [VCS] = 0x4264,
+ [VCS2] = 0x4268,
+ [BCS] = 0x426c,
+ [VECS] = 0x4270,
+ };
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
+ return;
+
+ reg = _MMIO(regs[ring_id]);
+
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ fw |= FORCEWAKE_RENDER;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg(vgpu, regs[ring_id]) = 0;
+
+ intel_uncore_forcewake_put(dev_priv, fw);
+
+ gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ gen9_render_mocs[ring_id][i] = I915_READ(offset);
+ I915_WRITE(offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ gen9_render_mocs_L3[i] = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ vgpu_vreg(vgpu, offset) = I915_READ(offset);
+ I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ load_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ mmio->value = I915_READ(mmio->reg);
+ if (mmio->mask)
+ v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
+ else
+ v = vgpu_vreg(vgpu, mmio->reg);
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("load reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+ handle_tlb_pending_event(vgpu, ring_id);
+}
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ restore_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+
+ if (mmio->mask) {
+ vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
+ v = mmio->value | (mmio->mask << 16);
+ } else
+ v = mmio->value;
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("restore reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
new file mode 100644
index 000000000000..dac1a3cc458b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef __GVT_RENDER_H__
+#define __GVT_RENDER_H__
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
new file mode 100644
index 000000000000..678b0be85376
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
+{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (!list_empty(workload_q_head(vgpu, i)))
+ return true;
+ }
+
+ return false;
+}
+
+static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* no target to schedule */
+ if (!scheduler->next_vgpu)
+ return;
+
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /*
+ * after the flag is set, workload dispatch thread will
+ * stop dispatching workload for current vgpu
+ */
+ scheduler->need_reschedule = true;
+
+ /* still have uncompleted workload? */
+ for_each_engine(engine, gvt->dev_priv, i) {
+ if (scheduler->current_workload[i]) {
+ gvt_dbg_sched("still have running workload\n");
+ return;
+ }
+ }
+
+ gvt_dbg_sched("switch to next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /* switch current vgpu */
+ scheduler->current_vgpu = scheduler->next_vgpu;
+ scheduler->next_vgpu = NULL;
+
+ scheduler->need_reschedule = false;
+
+ /* wake up workload dispatch thread */
+ for_each_engine(engine, gvt->dev_priv, i)
+ wake_up(&scheduler->waitq[i]);
+}
+
+struct tbs_vgpu_data {
+ struct list_head list;
+ struct intel_vgpu *vgpu;
+ /* put some per-vgpu sched stats here */
+};
+
+struct tbs_sched_data {
+ struct intel_gvt *gvt;
+ struct delayed_work work;
+ unsigned long period;
+ struct list_head runq_head;
+};
+
+#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+
+static void tbs_sched_func(struct work_struct *work)
+{
+ struct tbs_sched_data *sched_data = container_of(work,
+ struct tbs_sched_data, work.work);
+ struct tbs_vgpu_data *vgpu_data;
+
+ struct intel_gvt *gvt = sched_data->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ struct intel_vgpu *vgpu = NULL;
+ struct list_head *pos, *head;
+
+ mutex_lock(&gvt->lock);
+
+ /* no vgpu or has already had a target */
+ if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ goto out;
+
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ head = &vgpu_data->list;
+ } else {
+ gvt_dbg_sched("no current vgpu search from q head\n");
+ head = &sched_data->runq_head;
+ }
+
+ /* search a vgpu with pending workload */
+ list_for_each(pos, head) {
+ if (pos == &sched_data->runq_head)
+ continue;
+
+ vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ if (!vgpu_has_pending_workload(vgpu_data->vgpu))
+ continue;
+
+ vgpu = vgpu_data->vgpu;
+ break;
+ }
+
+ if (vgpu) {
+ scheduler->next_vgpu = vgpu;
+ gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+ }
+out:
+ if (scheduler->next_vgpu) {
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+ try_to_schedule_next_vgpu(gvt);
+ }
+
+ /*
+ * still have vgpu on runq
+ * or last schedule haven't finished due to running workload
+ */
+ if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+
+ mutex_unlock(&gvt->lock);
+}
+
+static int tbs_sched_init(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+
+ struct tbs_sched_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->runq_head);
+ INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ data->period = GVT_DEFAULT_TIME_SLICE;
+ data->gvt = gvt;
+
+ scheduler->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+ struct tbs_sched_data *data = scheduler->sched_data;
+
+ cancel_delayed_work(&data->work);
+ kfree(data);
+ scheduler->sched_data = NULL;
+}
+
+static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->vgpu = vgpu;
+ INIT_LIST_HEAD(&data->list);
+
+ vgpu->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
+{
+ kfree(vgpu->sched_data);
+ vgpu->sched_data = NULL;
+}
+
+static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ if (!list_empty(&vgpu_data->list))
+ return;
+
+ list_add_tail(&vgpu_data->list, &sched_data->runq_head);
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+}
+
+static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ list_del_init(&vgpu_data->list);
+}
+
+static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
+ .init = tbs_sched_init,
+ .clean = tbs_sched_clean,
+ .init_vgpu = tbs_sched_init_vgpu,
+ .clean_vgpu = tbs_sched_clean_vgpu,
+ .start_schedule = tbs_sched_start_schedule,
+ .stop_schedule = tbs_sched_stop_schedule,
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops = &tbs_schedule_ops;
+
+ return gvt->scheduler.sched_ops->init(gvt);
+}
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops->clean(gvt);
+}
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
+{
+ return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+}
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
+{
+ vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+}
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
+{
+ gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+
+ vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+}
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+
+ scheduler->sched_ops->stop_schedule(vgpu);
+
+ if (scheduler->next_vgpu == vgpu)
+ scheduler->next_vgpu = NULL;
+
+ if (scheduler->current_vgpu == vgpu) {
+ /* stop workload dispatching */
+ scheduler->need_reschedule = true;
+ scheduler->current_vgpu = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
new file mode 100644
index 000000000000..bb8b9097e41a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef __GVT_SCHED_POLICY__
+#define __GVT_SCHED_POLICY__
+
+struct intel_gvt_sched_policy_ops {
+ int (*init)(struct intel_gvt *gvt);
+ void (*clean)(struct intel_gvt *gvt);
+ int (*init_vgpu)(struct intel_vgpu *vgpu);
+ void (*clean_vgpu)(struct intel_vgpu *vgpu);
+ void (*start_schedule)(struct intel_vgpu *vgpu);
+ void (*stop_schedule)(struct intel_vgpu *vgpu);
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
new file mode 100644
index 000000000000..4db242250235
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void set_context_pdp_root_pointer(
+ struct execlist_ring_context *ring_context,
+ u32 pdp[8])
+{
+ struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ pdp_pair[i].val = pdp[7 - i];
+}
+
+static int populate_shadow_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *dst;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("Invalid guest context descriptor\n");
+ return -EINVAL;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ dst = kmap(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+ if (ring_id == RCS) {
+ COPY_REG(bb_per_ctx_ptr);
+ COPY_REG(rcs_indirect_ctx);
+ COPY_REG(rcs_indirect_ctx_offset);
+ }
+#undef COPY_REG
+
+ set_context_pdp_root_pointer(shadow_ring_context,
+ workload->shadow_mm->shadow_page_table);
+
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap(page);
+ return 0;
+}
+
+static int shadow_context_status_change(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu, shadow_ctx_notifier_block);
+ struct drm_i915_gem_request *req =
+ (struct drm_i915_gem_request *)data;
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+ struct intel_vgpu_workload *workload =
+ scheduler->current_workload[req->engine->id];
+
+ switch (action) {
+ case INTEL_CONTEXT_SCHEDULE_IN:
+ intel_gvt_load_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 1);
+ break;
+ case INTEL_CONTEXT_SCHEDULE_OUT:
+ intel_gvt_restore_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 0);
+ break;
+ default:
+ WARN_ON(1);
+ return NOTIFY_OK;
+ }
+ wake_up(&workload->shadow_ctx_status_wq);
+ return NOTIFY_OK;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_gem_request *rq;
+ int ret;
+
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+ ring_id, workload);
+
+ shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
+
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ if (ret)
+ goto out;
+
+ ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret)
+ goto out;
+
+ ret = populate_shadow_context(workload);
+ if (ret)
+ goto out;
+
+ if (workload->prepare) {
+ ret = workload->prepare(workload);
+ if (ret)
+ goto out;
+ }
+
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+ ring_id, workload->req);
+
+ ret = 0;
+ workload->dispatched = true;
+out:
+ if (ret)
+ workload->status = ret;
+
+ if (!IS_ERR_OR_NULL(rq))
+ i915_add_request_no_flush(rq);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static struct intel_vgpu_workload *pick_next_workload(
+ struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+
+ mutex_lock(&gvt->lock);
+
+ /*
+ * no current vgpu / will be scheduled out / no workload
+ * bail out
+ */
+ if (!scheduler->current_vgpu) {
+ gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ goto out;
+ }
+
+ if (scheduler->need_reschedule) {
+ gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ goto out;
+ }
+
+ if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
+ gvt_dbg_sched("ring id %d stop - no available workload\n",
+ ring_id);
+ goto out;
+ }
+
+ /*
+ * still have current workload, maybe the workload disptacher
+ * fail to submit it for some reason, resubmit it.
+ */
+ if (scheduler->current_workload[ring_id]) {
+ workload = scheduler->current_workload[ring_id];
+ gvt_dbg_sched("ring id %d still have current workload %p\n",
+ ring_id, workload);
+ goto out;
+ }
+
+ /*
+ * pick a workload as current workload
+ * once current workload is set, schedule policy routines
+ * will wait the current workload is finished when trying to
+ * schedule out a vgpu.
+ */
+ scheduler->current_workload[ring_id] = container_of(
+ workload_q_head(scheduler->current_vgpu, ring_id)->next,
+ struct intel_vgpu_workload, list);
+
+ workload = scheduler->current_workload[ring_id];
+
+ gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+
+ atomic_inc(&workload->vgpu->running_workload_num);
+out:
+ mutex_unlock(&gvt->lock);
+ return workload;
+}
+
+static void update_guest_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *src;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context descriptor\n");
+ return;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ src = kmap(page);
+ intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
+ GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
+
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+#undef COPY_REG
+
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap(page);
+}
+
+static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload;
+ int event;
+
+ mutex_lock(&gvt->lock);
+
+ workload = scheduler->current_workload[ring_id];
+
+ if (!workload->status && !workload->vgpu->resetting) {
+ wait_event(workload->shadow_ctx_status_wq,
+ !atomic_read(&workload->shadow_ctx_active));
+
+ update_guest_context(workload);
+
+ for_each_set_bit(event, workload->pending_events,
+ INTEL_GVT_EVENT_MAX)
+ intel_vgpu_trigger_virtual_event(workload->vgpu,
+ event);
+ }
+
+ gvt_dbg_sched("ring id %d complete workload %p status %d\n",
+ ring_id, workload, workload->status);
+
+ scheduler->current_workload[ring_id] = NULL;
+
+ atomic_dec(&workload->vgpu->running_workload_num);
+
+ list_del_init(&workload->list);
+ workload->complete(workload);
+
+ wake_up(&scheduler->workload_complete_wq);
+ mutex_unlock(&gvt->lock);
+}
+
+struct workload_thread_param {
+ struct intel_gvt *gvt;
+ int ring_id;
+};
+
+static DEFINE_MUTEX(scheduler_mutex);
+
+static int workload_thread(void *priv)
+{
+ struct workload_thread_param *p = (struct workload_thread_param *)priv;
+ struct intel_gvt *gvt = p->gvt;
+ int ring_id = p->ring_id;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+ long lret;
+ int ret;
+ bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ kfree(p);
+
+ gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+
+ while (!kthread_should_stop()) {
+ add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ do {
+ workload = pick_next_workload(gvt, ring_id);
+ if (workload)
+ break;
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ } while (!kthread_should_stop());
+ remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+
+ if (!workload)
+ break;
+
+ mutex_lock(&scheduler_mutex);
+
+ gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
+ workload->ring_id, workload,
+ workload->vgpu->id);
+
+ intel_runtime_pm_get(gvt->dev_priv);
+
+ gvt_dbg_sched("ring id %d will dispatch workload %p\n",
+ workload->ring_id, workload);
+
+ if (need_force_wake)
+ intel_uncore_forcewake_get(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ mutex_lock(&gvt->lock);
+ ret = dispatch_workload(workload);
+ mutex_unlock(&gvt->lock);
+
+ if (ret) {
+ gvt_err("fail to dispatch workload, skip\n");
+ goto complete;
+ }
+
+ gvt_dbg_sched("ring id %d wait workload %p\n",
+ workload->ring_id, workload);
+
+ lret = i915_wait_request(workload->req,
+ 0, MAX_SCHEDULE_TIMEOUT);
+ if (lret < 0) {
+ workload->status = lret;
+ gvt_err("fail to wait workload, skip\n");
+ } else {
+ workload->status = 0;
+ }
+
+complete:
+ gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+ workload, workload->status);
+
+ complete_current_workload(gvt, ring_id);
+
+ if (workload->req)
+ i915_gem_request_put(fetch_and_zero(&workload->req));
+
+ if (need_force_wake)
+ intel_uncore_forcewake_put(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ intel_runtime_pm_put(gvt->dev_priv);
+
+ mutex_unlock(&scheduler_mutex);
+
+ }
+ return 0;
+}
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ gvt_dbg_sched("wait vgpu idle\n");
+
+ wait_event(scheduler->workload_complete_wq,
+ !atomic_read(&vgpu->running_workload_num));
+ }
+}
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ int i;
+
+ gvt_dbg_core("clean workload scheduler\n");
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ if (scheduler->thread[i]) {
+ kthread_stop(scheduler->thread[i]);
+ scheduler->thread[i] = NULL;
+ }
+ }
+}
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct workload_thread_param *param = NULL;
+ int ret;
+ int i;
+
+ gvt_dbg_core("init workload scheduler\n");
+
+ init_waitqueue_head(&scheduler->workload_complete_wq);
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ /* check ring mask at init time */
+ if (!HAS_ENGINE(gvt->dev_priv, i))
+ continue;
+
+ init_waitqueue_head(&scheduler->waitq[i]);
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ param->gvt = gvt;
+ param->ring_id = i;
+
+ scheduler->thread[i] = kthread_run(workload_thread, param,
+ "gvt workload %d", i);
+ if (IS_ERR(scheduler->thread[i])) {
+ gvt_err("fail to create workload thread\n");
+ ret = PTR_ERR(scheduler->thread[i]);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ intel_gvt_clean_workload_scheduler(gvt);
+ kfree(param);
+ param = NULL;
+ return ret;
+}
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ /* a little hacky to mark as ctx closed */
+ vgpu->shadow_ctx->closed = true;
+ i915_gem_context_put(vgpu->shadow_ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+{
+ atomic_set(&vgpu->running_workload_num, 0);
+
+ vgpu->shadow_ctx = i915_gem_context_create_gvt(
+ &vgpu->gvt->dev_priv->drm);
+ if (IS_ERR(vgpu->shadow_ctx))
+ return PTR_ERR(vgpu->shadow_ctx);
+
+ vgpu->shadow_ctx->engine[RCS].initialised = true;
+
+ vgpu->shadow_ctx_notifier_block.notifier_call =
+ shadow_context_status_change;
+
+ atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
new file mode 100644
index 000000000000..3b30c28bff51
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#ifndef _GVT_SCHEDULER_H_
+#define _GVT_SCHEDULER_H_
+
+struct intel_gvt_workload_scheduler {
+ struct intel_vgpu *current_vgpu;
+ struct intel_vgpu *next_vgpu;
+ struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
+ bool need_reschedule;
+
+ wait_queue_head_t workload_complete_wq;
+ struct task_struct *thread[I915_NUM_ENGINES];
+ wait_queue_head_t waitq[I915_NUM_ENGINES];
+
+ void *sched_data;
+ struct intel_gvt_sched_policy_ops *sched_ops;
+};
+
+#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
+#define INDIRECT_CTX_SIZE_MASK 0x3f
+struct shadow_indirect_ctx {
+ struct drm_i915_gem_object *obj;
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+ void *shadow_va;
+ uint32_t size;
+};
+
+#define PER_CTX_ADDR_MASK 0xfffff000
+struct shadow_per_ctx {
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+};
+
+struct intel_shadow_wa_ctx {
+ struct intel_vgpu_workload *workload;
+ struct shadow_indirect_ctx indirect_ctx;
+ struct shadow_per_ctx per_ctx;
+
+};
+
+struct intel_vgpu_workload {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+ struct drm_i915_gem_request *req;
+ /* if this workload has been dispatched to i915? */
+ bool dispatched;
+ int status;
+
+ struct intel_vgpu_mm *shadow_mm;
+
+ /* different submission model may need different handler */
+ int (*prepare)(struct intel_vgpu_workload *);
+ int (*complete)(struct intel_vgpu_workload *);
+ struct list_head list;
+
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ void *shadow_ring_buffer_va;
+
+ /* execlist context information */
+ struct execlist_ctx_descriptor_format ctx_desc;
+ struct execlist_ring_context *ring_context;
+ unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ bool restore_inhibit;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+ bool emulate_schedule_in;
+ atomic_t shadow_ctx_active;
+ wait_queue_head_t shadow_ctx_status_wq;
+ u64 ring_context_gpa;
+
+ /* shadow batch buffer */
+ struct list_head shadow_bb;
+ struct intel_shadow_wa_ctx wa_ctx;
+};
+
+/* Intel shadow batch buffer is a i915 gem object */
+struct intel_shadow_bb_entry {
+ struct list_head list;
+ struct drm_i915_gem_object *obj;
+ void *va;
+ unsigned long len;
+ void *bb_start_cmd_va;
+};
+
+#define workload_q_head(vgpu, ring_id) \
+ (&(vgpu->workload_q_head[ring_id]))
+
+#define queue_workload(workload) do { \
+ list_add_tail(&workload->list, \
+ workload_q_head(workload->vgpu, workload->ring_id)); \
+ wake_up(&workload->vgpu->gvt-> \
+ scheduler.waitq[workload->ring_id]); \
+} while (0)
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
new file mode 100644
index 000000000000..53a2d10cf3f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright © 2011-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _GVT_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+#include <asm/tsc.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gvt
+
+TRACE_EVENT(spt_alloc,
+ TP_PROTO(int id, void *spt, int type, unsigned long mfn,
+ unsigned long gpt_gfn),
+
+ TP_ARGS(id, spt, type, mfn, gpt_gfn),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ __field(unsigned long, mfn)
+ __field(unsigned long, gpt_gfn)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ __entry->mfn = mfn;
+ __entry->gpt_gfn = gpt_gfn;
+ ),
+
+ TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type,
+ __entry->mfn,
+ __entry->gpt_gfn)
+);
+
+TRACE_EVENT(spt_free,
+ TP_PROTO(int id, void *spt, int type),
+
+ TP_ARGS(id, spt, type),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ ),
+
+ TP_printk("VM%u [free] spt %p type %d\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type)
+);
+
+#define MAX_BUF_LEN 256
+
+TRACE_EVENT(gma_index,
+ TP_PROTO(const char *prefix, unsigned long gma,
+ unsigned long index),
+
+ TP_ARGS(prefix, gma, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gma_translate,
+ TP_PROTO(int id, char *type, int ring_id, int pt_level,
+ unsigned long gma, unsigned long gpa),
+
+ TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
+ id, type, ring_id, pt_level, gma, gpa);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_refcount,
+ TP_PROTO(int id, char *action, void *spt, int before, int after),
+
+ TP_ARGS(id, action, spt, before, after),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p before %d -> after %d\n",
+ id, action, spt, before, after);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_change,
+ TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
+ int type),
+
+ TP_ARGS(id, action, spt, gfn, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p gfn 0x%lx type %d\n",
+ id, action, spt, gfn, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gpt_change,
+ TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, tag, spt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
+ id, tag, spt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_change,
+ TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
+
+ TP_ARGS(id, tag, page_id, gpt, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos %s] page id %d gpt %p type %d\n",
+ id, tag, page_id, gpt, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_sync,
+ TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, page_id, gpt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
+ id, page_id, gpt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+#define MAX_CMD_STR_LEN 256
+TRACE_EVENT(gvt_command,
+ TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
+
+ TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
+
+ TP_STRUCT__entry(
+ __field(u8, vm_id)
+ __field(u8, ring_id)
+ __field(int, i)
+ __array(char, tmp_buf, MAX_CMD_STR_LEN)
+ __array(char, cmd_str, MAX_CMD_STR_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vm_id = vm_id;
+ __entry->ring_id = ring_id;
+ __entry->cmd_str[0] = '\0';
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ entry->i = 0;
+ while (cmd_len > 0) {
+ if (cmd_len >= 8) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
+ cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
+ __entry->i += 8;
+ cmd_len -= 8;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 4) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
+ __entry->i += 4;
+ cmd_len -= 4;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 2) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
+ __entry->i += 2;
+ cmd_len -= 2;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len == 1) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
+ __entry->i += 1;
+ cmd_len -= 1;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ }
+ }
+ strcat(__entry->cmd_str, "\n");
+ ),
+
+ TP_printk("%s", __entry->cmd_str)
+);
+#endif /* _GVT_TRACE_H_ */
+
+/* This part must be out of protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/gvt/trace_points.c
index 91315557e421..a3deed692b9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.h
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Intel Corporation
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -16,30 +16,21 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
*
*/
-#ifndef _I915_GEM_DMABUF_H_
-#define _I915_GEM_DMABUF_H_
-
-#include <linux/dma-buf.h>
-
-static inline struct reservation_object *
-i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
-{
- struct dma_buf *dma_buf;
-
- if (obj->base.dma_buf)
- dma_buf = obj->base.dma_buf;
- else if (obj->base.import_attach)
- dma_buf = obj->base.import_attach->dmabuf;
- else
- return NULL;
-
- return dma_buf->resv;
-}
+#include "trace.h"
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
new file mode 100644
index 000000000000..536d2b9d5777
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ vfree(vgpu->mmio.vreg);
+ vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
+
+int setup_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+
+ if (vgpu->mmio.vreg)
+ memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
+ else {
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+ }
+
+ vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+ memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+ memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+ vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+ /* set the bit 0:2(Core C-State ) to C0 */
+ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ return 0;
+}
+
+static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+ int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+
+ if (!param->primary) {
+ vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ }
+
+ /* Show guest that there isn't any stolen memory.*/
+ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+ *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+ intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+ gvt_aperture_pa_base(gvt), true);
+
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER);
+ /*
+ * Clear the bar upper 32bit and let guest to assign the new value
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+ for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+ vgpu->cfg_space.bar[i].size = pci_resource_len(
+ gvt->dev_priv->drm.pdev, i * 2);
+ vgpu->cfg_space.bar[i].tracked = false;
+ }
+}
+
+void populate_pvinfo_page(struct intel_vgpu *vgpu)
+{
+ /* setup the ballooning information */
+ vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+ vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
+ vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ vgpu_aperture_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+ vgpu_aperture_sz(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ vgpu_hidden_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+ vgpu_hidden_sz(vgpu);
+
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+
+ gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
+ gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
+ vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
+ gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
+ vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
+ gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
+
+ WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+}
+
+/**
+ * intel_gvt_init_vgpu_types - initialize vGPU type list
+ * @gvt : GVT device
+ *
+ * Initialize vGPU type list based on available resource.
+ *
+ */
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
+{
+ unsigned int num_types;
+ unsigned int i, low_avail;
+ unsigned int min_low;
+
+ /* vGPU type name is defined as GVTg_Vx_y which contains
+ * physical GPU generation type and 'y' means maximum vGPU
+ * instances user can create on one physical GPU for this
+ * type.
+ *
+ * Depend on physical SKU resource, might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
+ * different types of vGPU on same physical GPU depending on
+ * available resource. Each vGPU type will have "avail_instance"
+ * to indicate how many vGPU instance can be created for this
+ * type.
+ *
+ * Currently use static size here as we init type earlier..
+ */
+ low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+ num_types = 4;
+
+ gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+ GFP_KERNEL);
+ if (!gvt->types)
+ return -ENOMEM;
+
+ min_low = MB_TO_BYTES(32);
+ for (i = 0; i < num_types; ++i) {
+ if (low_avail / min_low == 0)
+ break;
+ gvt->types[i].low_gm_size = min_low;
+ gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].fence = 4;
+ gvt->types[i].max_instance = low_avail / min_low;
+ gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ if (IS_GEN8(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V4_%u",
+ gvt->types[i].max_instance);
+ else if (IS_GEN9(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V5_%u",
+ gvt->types[i].max_instance);
+
+ min_low <<= 1;
+ gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance,
+ gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+
+ gvt->num_types = i;
+ return 0;
+}
+
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
+{
+ kfree(gvt->types);
+}
+
+static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+{
+ int i;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+ unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+
+ /* Need to depend on maxium hw resource size but keep on
+ * static config for now.
+ */
+ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
+ high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
+ fence_min = fence_avail / gvt->types[i].fence;
+ total_min = min(min(low_gm_min, high_gm_min), fence_min);
+ gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
+ total_min);
+
+ gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ mutex_lock(&gvt->lock);
+
+ vgpu->active = false;
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ mutex_unlock(&gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&gvt->lock);
+ }
+
+ intel_vgpu_stop_schedule(vgpu);
+ intel_vgpu_clean_sched_policy(vgpu);
+ intel_vgpu_clean_gvt_context(vgpu);
+ intel_vgpu_clean_execlist(vgpu);
+ intel_vgpu_clean_display(vgpu);
+ intel_vgpu_clean_opregion(vgpu);
+ intel_vgpu_clean_gtt(vgpu);
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+ intel_vgpu_free_resource(vgpu);
+ clean_vgpu_mmio(vgpu);
+ vfree(vgpu);
+
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
+}
+
+static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_vgpu *vgpu;
+ int ret;
+
+ gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
+ param->handle, param->low_gm_sz, param->high_gm_sz,
+ param->fence_sz);
+
+ vgpu = vzalloc(sizeof(*vgpu));
+ if (!vgpu)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&gvt->lock);
+
+ ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free_vgpu;
+
+ vgpu->id = ret;
+ vgpu->handle = param->handle;
+ vgpu->gvt = gvt;
+ bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
+
+ setup_vgpu_cfg_space(vgpu, param);
+
+ ret = setup_vgpu_mmio(vgpu);
+ if (ret)
+ goto out_free_vgpu;
+
+ ret = intel_vgpu_alloc_resource(vgpu, param);
+ if (ret)
+ goto out_clean_vgpu_mmio;
+
+ populate_pvinfo_page(vgpu);
+
+ ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
+ if (ret)
+ goto out_clean_vgpu_resource;
+
+ ret = intel_vgpu_init_gtt(vgpu);
+ if (ret)
+ goto out_detach_hypervisor_vgpu;
+
+ ret = intel_vgpu_init_display(vgpu);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_vgpu_init_execlist(vgpu);
+ if (ret)
+ goto out_clean_display;
+
+ ret = intel_vgpu_init_gvt_context(vgpu);
+ if (ret)
+ goto out_clean_execlist;
+
+ ret = intel_vgpu_init_sched_policy(vgpu);
+ if (ret)
+ goto out_clean_shadow_ctx;
+
+ vgpu->active = true;
+ mutex_unlock(&gvt->lock);
+
+ return vgpu;
+
+out_clean_shadow_ctx:
+ intel_vgpu_clean_gvt_context(vgpu);
+out_clean_execlist:
+ intel_vgpu_clean_execlist(vgpu);
+out_clean_display:
+ intel_vgpu_clean_display(vgpu);
+out_clean_gtt:
+ intel_vgpu_clean_gtt(vgpu);
+out_detach_hypervisor_vgpu:
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+out_clean_vgpu_resource:
+ intel_vgpu_free_resource(vgpu);
+out_clean_vgpu_mmio:
+ clean_vgpu_mmio(vgpu);
+out_free_vgpu:
+ vfree(vgpu);
+ mutex_unlock(&gvt->lock);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_gvt_create_vgpu - create a virtual GPU
+ * @gvt: GVT device
+ * @type: type of the vGPU to create
+ *
+ * This function is called when user wants to create a virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type)
+{
+ struct intel_vgpu_creation_params param;
+ struct intel_vgpu *vgpu;
+
+ param.handle = 0;
+ param.primary = 1;
+ param.low_gm_sz = type->low_gm_size;
+ param.high_gm_sz = type->high_gm_size;
+ param.fence_sz = type->fence;
+
+ /* XXX current param based on MB */
+ param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
+ param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+
+ vgpu = __intel_gvt_create_vgpu(gvt, &param);
+ if (IS_ERR(vgpu))
+ return vgpu;
+
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+
+ return vgpu;
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to reset a virtual GPU.
+ *
+ */
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
+{
+}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 70980f82a15b..f5039f4f988f 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
if (ret == 0 && needs_clflush_after)
- drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+ drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
@@ -1308,10 +1308,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 27b0e34dadec..791bfc760075 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,10 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
return 0;
}
@@ -109,12 +107,12 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' ';
+ return !list_empty(&obj->userfault_link) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
- return obj->mapping ? 'M' : ' ';
+ return obj->mm.mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -138,11 +136,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
unsigned int frontbuffer_bits;
int pin_count = 0;
- enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
+ seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
get_pin_flag(obj),
@@ -151,17 +148,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
- obj->base.write_domain);
- for_each_engine_id(engine, dev_priv, id)
- seq_printf(m, "%x ",
- i915_gem_active_get_seqno(&obj->last_read[id],
- &obj->base.dev->struct_mutex));
- seq_printf(m, "] %x %s%s%s",
- i915_gem_active_get_seqno(&obj->last_write,
- &obj->base.dev->struct_mutex),
+ obj->base.write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
- obj->dirty ? " dirty" : "",
- obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ obj->mm.dirty ? " dirty" : "",
+ obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
@@ -188,18 +178,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_display || obj->fault_mappable) {
- char s[3], *t = s;
- if (obj->pin_display)
- *t++ = 'p';
- if (obj->fault_mappable)
- *t++ = 'f';
- *t = '\0';
- seq_printf(m, " (%s mappable)", s);
- }
-
- engine = i915_gem_active_get_engine(&obj->last_write,
- &dev_priv->drm.struct_mutex);
+
+ engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
@@ -237,7 +217,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -247,7 +227,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -334,11 +314,12 @@ static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_gem_object *obj;
struct file_stats stats;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int j;
memset(&stats, 0, sizeof(stats));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
@@ -402,23 +383,23 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
- seq_printf(m, "%u objects, %zu bytes\n",
+ seq_printf(m, "%u objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
size += obj->base.size;
++count;
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -426,7 +407,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
size += obj->base.size;
++count;
@@ -435,12 +416,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
++dpy_count;
}
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -512,7 +493,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (show_pin_display_only && !obj->pin_display)
continue;
@@ -566,12 +547,12 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
+ struct intel_engine_cs *engine = work->flip_queued_req->engine;
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
- i915_gem_request_get_seqno(work->flip_queued_req),
- dev_priv->next_seqno,
+ work->flip_queued_req->global_seqno,
+ atomic_read(&dev_priv->gt.global_timeline.next_seqno),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
@@ -607,6 +588,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int total = 0;
int ret, j;
@@ -614,7 +596,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
if (ret)
return ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
@@ -645,12 +627,24 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
+static void print_request(struct seq_file *m,
+ struct drm_i915_gem_request *rq,
+ const char *prefix)
+{
+ seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
+ rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+ rq->priotree.priority,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ rq->timeline->common->name);
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -658,29 +652,18 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
return ret;
any = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
int count;
count = 0;
- list_for_each_entry(req, &engine->request_list, link)
+ list_for_each_entry(req, &engine->timeline->requests, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->request_list, link) {
- struct pid *pid = req->ctx->pid;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
- seq_printf(m, " %x @ %d: %s [%d]\n",
- req->fence.seqno,
- (int) (jiffies - req->emitted_jiffies),
- task ? task->comm : "<unknown>",
- task ? task->pid : -1);
- rcu_read_unlock();
- }
+ list_for_each_entry(req, &engine->timeline->requests, link)
+ print_request(m, req, " ");
any++;
}
@@ -701,22 +684,23 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_ring_seqno_info(m, engine);
return 0;
@@ -727,6 +711,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i, pipe;
intel_runtime_pm_get(dev_priv);
@@ -743,17 +728,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
+ intel_display_power_put(dev_priv, power_domain);
+ }
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -895,7 +895,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
@@ -935,26 +935,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_hws_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node_to_i915(node);
- struct intel_engine_cs *engine;
- const u32 *hws;
- int i;
-
- engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
- hws = engine->status_page.page_addr;
- if (hws == NULL)
- return 0;
-
- for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
- seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i * 4,
- hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
- }
- return 0;
-}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t
i915_error_state_write(struct file *filp,
@@ -1038,19 +1019,14 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
+#endif
+
static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
- *val = dev_priv->next_seqno;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno);
return 0;
}
@@ -1065,7 +1041,7 @@ i915_next_seqno_set(void *data, u64 val)
if (ret)
return ret;
- ret = i915_gem_set_seqno(dev, val);
+ ret = i915_gem_set_global_seqno(dev, val);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1277,15 +1253,42 @@ out:
return ret;
}
+static void i915_instdone_info(struct drm_i915_private *dev_priv,
+ struct seq_file *m,
+ struct intel_instdone *instdone)
+{
+ int slice;
+ int subslice;
+
+ seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
+ instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
+ return;
+
+ seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
+ instdone->slice_common);
+
+ if (INTEL_GEN(dev_priv) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
+
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
enum intel_engine_id id;
- int j;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
seq_printf(m, "Wedged\n");
@@ -1303,12 +1306,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
- i915_get_extra_instdone(dev_priv, instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_runtime_pm_put(dev_priv);
@@ -1319,16 +1322,27 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
- engine->hangcheck.seqno,
- seqno[id],
- engine->last_submitted_seqno);
+ engine->hangcheck.seqno, seqno[id],
+ intel_engine_last_submit(engine));
seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)));
+ spin_lock_irq(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->lock);
+
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@@ -1336,18 +1350,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
if (engine->id == RCS) {
- seq_puts(m, "\tinstdone read =");
+ seq_puts(m, "\tinstdone read =\n");
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x", instdone[j]);
+ i915_instdone_info(dev_priv, m, &instdone);
- seq_puts(m, "\n\tinstdone accu =");
+ seq_puts(m, "\tinstdone accu =\n");
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x",
- engine->hangcheck.instdone[j]);
-
- seq_puts(m, "\n");
+ i915_instdone_info(dev_priv, m,
+ &engine->hangcheck.instdone);
}
}
@@ -1357,14 +1367,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
@@ -1372,7 +1377,6 @@ static int ironlake_drpc_info(struct seq_file *m)
crstandvid = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
@@ -1635,10 +1639,13 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
- if (INTEL_GEN(dev_priv) >= 7)
+ if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
+ uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
+ BDW_FBC_COMPRESSION_MASK :
+ IVB_FBC_COMPRESSION_MASK;
seq_printf(m, "Compressing: %s\n",
- yesno(I915_READ(FBC_STATUS2) &
- FBC_COMPRESSION_MASK));
+ yesno(I915_READ(FBC_STATUS2) & mask));
+ }
mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
@@ -1717,6 +1724,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
@@ -1730,10 +1738,10 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
- seq_printf(m, "self-refresh: %s\n",
- sr_enabled ? "enabled" : "disabled");
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
return 0;
}
@@ -1867,7 +1875,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.height,
fbdev_fb->base.depth,
fbdev_fb->base.bits_per_pixel,
- fbdev_fb->base.modifier[0],
+ fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj);
seq_putc(m, '\n');
@@ -1885,7 +1893,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
- fb->base.modifier[0],
+ fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1909,6 +1917,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1935,7 +1944,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
@@ -1974,7 +1983,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));
- if (i915_gem_object_get_pages(vma->obj)) {
+ if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
@@ -1993,6 +2002,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
kunmap_atomic(reg_state);
}
+ i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}
@@ -2002,6 +2012,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
if (!i915.enable_execlists) {
@@ -2014,7 +2025,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2022,84 +2033,6 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return 0;
}
-static int i915_execlists(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- u32 status_pointer;
- u8 read_pointer;
- u8 write_pointer;
- u32 status;
- u32 ctx_id;
- struct list_head *cursor;
- int i, ret;
-
- if (!i915.enable_execlists) {
- seq_puts(m, "Logical Ring Contexts are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- intel_runtime_pm_get(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *head_req = NULL;
- int count = 0;
-
- seq_printf(m, "%s\n", engine->name);
-
- status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
- ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
- seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
- status, ctx_id);
-
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
- seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
-
- read_pointer = GEN8_CSB_READ_PTR(status_pointer);
- write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
- if (read_pointer > write_pointer)
- write_pointer += GEN8_CSB_ENTRIES;
- seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
- read_pointer, write_pointer);
-
- for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
- ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
-
- seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
- i, status, ctx_id);
- }
-
- spin_lock_bh(&engine->execlist_lock);
- list_for_each(cursor, &engine->execlist_queue)
- count++;
- head_req = list_first_entry_or_null(&engine->execlist_queue,
- struct drm_i915_gem_request,
- execlist_link);
- spin_unlock_bh(&engine->execlist_lock);
-
- seq_printf(m, "\t%d requests in queue\n", count);
- if (head_req) {
- seq_printf(m, "\tHead request context: %u\n",
- head_req->ctx->hw_id);
- seq_printf(m, "\tHead request tail: %u\n",
- head_req->tail);
- }
-
- seq_putc(m, '\n');
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2127,12 +2060,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
@@ -2172,7 +2100,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -2201,14 +2128,15 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i;
if (!ppgtt)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -2223,11 +2151,12 @@ static void gen6_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
@@ -2296,9 +2225,10 @@ out_unlock:
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int count = 0;
- for_each_engine(engine, i915)
+ for_each_engine(engine, i915, id)
count += intel_engine_has_waiter(engine);
return count;
@@ -2325,8 +2255,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
- seq_printf(m, "GPU busy? %s [%x]\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
+ seq_printf(m, "GPU busy? %s [%d requests]\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
@@ -2361,7 +2291,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
if (INTEL_GEN(dev_priv) >= 6 &&
dev_priv->rps.enabled &&
- dev_priv->gt.active_engines) {
+ dev_priv->gt.active_requests) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2442,6 +2372,32 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
return 0;
}
+static void i915_guc_log_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ seq_puts(m, "\nGuC logging stats:\n");
+
+ seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_ISR_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
+
+ seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_DPC_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
+
+ seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
+
+ seq_printf(m, "\tTotal flush interrupt count: %u\n",
+ guc->log.flush_interrupt_count);
+
+ seq_printf(m, "\tCapture miss count: %u\n",
+ guc->log.capture_miss_count);
+}
+
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
@@ -2461,7 +2417,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
tot += submissions;
seq_printf(m, "\tSubmissions: %llu %s\n",
@@ -2504,7 +2460,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = guc.submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
@@ -2515,6 +2471,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
i915_guc_client_info(m, dev_priv, &client);
+ i915_guc_log_info(m, dev_priv);
+
/* Add more as required ... */
return 0;
@@ -2526,10 +2484,10 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj;
int i = 0, pg;
- if (!dev_priv->guc.log_vma)
+ if (!dev_priv->guc.log.vma)
return 0;
- obj = dev_priv->guc.log_vma->obj;
+ obj = dev_priv->guc.log.vma->obj;
for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
@@ -2546,6 +2504,44 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return 0;
}
+static int i915_guc_log_control_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ *val = i915.guc_log_level;
+
+ return 0;
+}
+
+static int i915_guc_log_control_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(dev_priv);
+ ret = i915_guc_log_control(dev_priv, val);
+ intel_runtime_pm_put(dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
+ i915_guc_log_control_get, i915_guc_log_control_set,
+ "%lld\n");
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2575,11 +2571,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain))
+ continue;
+
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
@@ -3004,7 +3011,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
- char *format_name;
+ struct drm_format_name_buf format_name;
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
@@ -3014,9 +3021,9 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
state = plane->state;
if (state->fb) {
- format_name = drm_get_format_name(state->fb->pixel_format);
+ drm_get_format_name(state->fb->pixel_format, &format_name);
} else {
- format_name = kstrdup("N/A", GFP_KERNEL);
+ sprintf(format_name.str, "N/A");
}
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
@@ -3032,10 +3039,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
((state->src_w & 0xffff) * 15625) >> 10,
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
- format_name,
+ format_name.str,
plane_rotation(state->rotation));
-
- kfree(format_name);
}
}
@@ -3121,6 +3126,146 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_engine_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ intel_runtime_pm_get(dev_priv);
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_gem_request *rq;
+ struct rb_node *rb;
+ u64 addr;
+
+ seq_printf(m, "%s\n", engine->name);
+ seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ engine->hangcheck.score);
+
+ rcu_read_lock();
+
+ seq_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ seq_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ }
+
+ seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+ I915_READ(RING_START(engine->mmio_base)),
+ rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+ seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+ rq ? rq->ring->head : 0);
+ seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+ rq ? rq->ring->tail : 0);
+ seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
+ I915_READ(RING_CTL(engine->mmio_base)),
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+ rcu_read_unlock();
+
+ addr = intel_engine_get_active_head(engine);
+ seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ addr = intel_engine_get_last_batch_head(engine);
+ seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+
+ if (i915.enable_execlists) {
+ u32 ptr, read, write;
+ struct rb_node *rb;
+
+ seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
+ I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+ I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+ ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+ read = GEN8_CSB_READ_PTR(ptr);
+ write = GEN8_CSB_WRITE_PTR(ptr);
+ seq_printf(m, "\tExeclist CSB read %d, write %d\n",
+ read, write);
+ if (read >= GEN8_CSB_ENTRIES)
+ read = 0;
+ if (write >= GEN8_CSB_ENTRIES)
+ write = 0;
+ if (read > write)
+ write += GEN8_CSB_ENTRIES;
+ while (read < write) {
+ unsigned int idx = ++read % GEN8_CSB_ENTRIES;
+
+ seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx,
+ I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+ I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->execlist_port[0].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[0] ");
+ else
+ seq_printf(m, "\t\tELSP[0] idle\n");
+ rq = READ_ONCE(engine->execlist_port[1].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[1] ");
+ else
+ seq_printf(m, "\t\tELSP[1] idle\n");
+ rcu_read_unlock();
+
+ spin_lock_irq(&engine->timeline->lock);
+ for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
+ rq = rb_entry(rb, typeof(*rq), priotree.node);
+ print_request(m, rq, "\t\tQ ");
+ }
+ spin_unlock_irq(&engine->timeline->lock);
+ } else if (INTEL_GEN(dev_priv) > 6) {
+ seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
+ }
+
+ spin_lock_irq(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->lock);
+
+ seq_puts(m, "\n");
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3147,7 +3292,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
@@ -3172,22 +3317,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
- seq_puts(m, "\nSync seqno:\n");
- for_each_engine(engine, dev_priv) {
- for (j = 0; j < num_rings; j++)
- seq_printf(m, " 0x%08x ",
- engine->semaphore.sync_seqno[j]);
- seq_putc(m, '\n');
- }
- seq_putc(m, '\n');
-
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3236,7 +3372,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine_id(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
@@ -3280,7 +3416,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -3914,8 +4050,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
int ret = 0;
@@ -3941,10 +4076,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
ret = drm_atomic_commit(state);
out:
- drm_modeset_unlock_all(dev);
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
- if (ret)
- drm_atomic_state_free(state);
+ drm_modeset_unlock_all(dev);
+ drm_atomic_state_put(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -3982,10 +4116,8 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -4056,15 +4188,15 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
if (crtc->base.state->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
@@ -4463,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
drm_modeset_lock_all(dev);
@@ -4579,7 +4711,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
if (len >= sizeof(tmp))
return -EINVAL;
@@ -4704,13 +4836,9 @@ i915_wedged_set(void *data, u64 val)
if (i915_reset_in_progress(&dev_priv->gpu_error))
return -EAGAIN;
- intel_runtime_pm_get(dev_priv);
-
i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
- intel_runtime_pm_put(dev_priv);
-
return 0;
}
@@ -4778,10 +4906,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
-#define DROP_ALL (DROP_UNBOUND | \
- DROP_BOUND | \
- DROP_RETIRE | \
- DROP_ACTIVE)
+#define DROP_FREED 0x10
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE | \
+ DROP_FREED)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4825,6 +4955,11 @@ i915_drop_caches_set(void *data, u64 val)
unlock:
mutex_unlock(&dev->struct_mutex);
+ if (val & DROP_FREED) {
+ synchronize_rcu();
+ flush_work(&dev_priv->mm.free_work);
+ }
+
return ret;
}
@@ -4945,22 +5080,16 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
- int ret;
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -5253,10 +5382,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
- {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
- {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
- {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
@@ -5275,7 +5400,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
- {"i915_execlists", i915_execlists, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -5287,6 +5411,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_engine_info", i915_engine_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -5309,7 +5434,9 @@ static const struct i915_debugfs_files {
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
+#endif
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
@@ -5318,7 +5445,8 @@ static const struct i915_debugfs_files {
{"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops}
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_guc_log_control", &i915_guc_log_control_fops}
};
void intel_display_crc_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 18dfdd5c1b3b..445fec9c2841 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
fmt, ##__VA_ARGS__)
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
{
enum intel_pch ret = PCH_NOP;
@@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
- } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
@@ -150,7 +150,7 @@ static void intel_detect_pch(struct drm_device *dev)
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
*/
- if (INTEL_INFO(dev)->num_pipes == 0) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
return;
}
@@ -174,40 +174,46 @@ static void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev));
+ WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) ||
+ IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) &&
+ !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
- WARN_ON(!IS_KABYLAKE(dev));
+ WARN_ON(!IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -215,7 +221,8 @@ static void intel_detect_pch(struct drm_device *dev)
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_type = intel_virt_detect_pch(dev);
+ dev_priv->pch_type =
+ intel_virt_detect_pch(dev_priv);
} else
continue;
@@ -255,16 +262,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = intel_engine_initialized(&dev_priv->engine[VCS]);
+ value = !!dev_priv->engine[VCS];
break;
case I915_PARAM_HAS_BLT:
- value = intel_engine_initialized(&dev_priv->engine[BCS]);
+ value = !!dev_priv->engine[BCS];
break;
case I915_PARAM_HAS_VEBOX:
- value = intel_engine_initialized(&dev_priv->engine[VECS]);
+ value = !!dev_priv->engine[VECS];
break;
case I915_PARAM_HAS_BSD2:
- value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+ value = !!dev_priv->engine[VCS2];
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_GEN(dev_priv) >= 4;
@@ -316,6 +323,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = i915_gem_mmap_gtt_version();
break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = dev_priv->engine[RCS] &&
+ dev_priv->engine[RCS]->schedule;
+ break;
case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM:
@@ -367,12 +378,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -399,7 +410,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -413,16 +424,16 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return;
dev_priv->mchbar_need_disable = false;
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
@@ -440,7 +451,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
@@ -453,10 +464,10 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
u32 deven_val;
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
@@ -484,7 +495,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
struct drm_device *dev = cookie;
- intel_modeset_vga_set_state(dev, state);
+ intel_modeset_vga_set_state(to_i915(dev), state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -530,40 +541,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_device *dev)
+static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_cleanup_engines(&dev_priv->drm);
+ i915_gem_context_fini(&dev_priv->drm);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode. And the only
- * known way to disable logical contexts is through a GPU reset.
- *
- * So in order to leave the system in a known default configuration,
- * always reset the GPU upon unload. Afterwards we then clean up the
- * GEM state tracking, flushing off the requests and leaving the
- * system in a known idle state.
- *
- * Note that is of the upmost importance that the GPU is idle and
- * all stray writes are flushed *before* we dismantle the backing
- * storage for the pinned objects.
- *
- * However, since we are uncertain that reseting the GPU on older
- * machines is a good idea, we don't - just in case it leaves the
- * machine in an unusable condition.
- */
- if (HAS_HW_CONTEXTS(dev)) {
- int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
+ rcu_barrier();
+ flush_work(&dev_priv->mm.free_work);
- WARN_ON(!list_empty(&to_i915(dev)->context_list));
+ WARN_ON(!list_empty(&dev_priv->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@@ -611,7 +599,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- intel_modeset_init(dev);
+ ret = intel_modeset_init(dev);
+ if (ret)
+ goto cleanup_irq;
intel_guc_init(dev);
@@ -621,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
ret = intel_fbdev_init(dev);
@@ -636,7 +626,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- i915_gem_fini(dev);
+ if (i915_gem_suspend(dev))
+ DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_fini(dev_priv);
cleanup_irq:
intel_guc_fini(dev);
drm_irq_uninstall(dev);
@@ -771,6 +763,19 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+ if (IS_HSW_EARLY_SDV(dev_priv) ||
+ IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+ DRM_ERROR("This is a pre-production stepping. "
+ "It may not be fully functional.\n");
+}
+
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
@@ -829,25 +834,24 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
+ intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- i915_gem_load_init(&dev_priv->drm);
+ ret = i915_gem_load_init(&dev_priv->drm);
+ if (ret < 0)
+ goto err_gvt;
intel_display_crc_init(dev_priv);
intel_device_info_dump(dev_priv);
- /* Not all pre-production machines fall into this category, only the
- * very first ones. Almost everything should work, except for maybe
- * suspend/resume. And we don't implement workarounds that affect only
- * pre-production machines. */
- if (IS_HSW_EARLY_SDV(dev_priv))
- DRM_INFO("This is an early pre-production Haswell machine. "
- "It may not be fully functional.\n");
+ intel_detect_preproduction_hw(dev_priv);
return 0;
+err_gvt:
+ intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -870,7 +874,7 @@ static int i915_mmio_setup(struct drm_device *dev)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -879,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
@@ -982,7 +986,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
int ret;
if (i915_inject_load_failure())
@@ -1023,7 +1026,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1040,7 +1043,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+ if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1070,7 +1073,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+ if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
@@ -1121,6 +1124,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
+ i915_guc_register(dev_priv);
i915_setup_sysfs(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1159,6 +1163,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_opregion_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
+ i915_guc_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1167,8 +1172,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
/**
* i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
@@ -1242,6 +1247,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
intel_runtime_pm_put(dev_priv);
@@ -1309,7 +1318,7 @@ void i915_driver_unload(struct drm_device *dev)
drain_workqueue(dev_priv->wq);
intel_guc_fini(dev);
- i915_gem_fini(dev);
+ i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -1431,9 +1440,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev);
+ intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev);
+ i915_gem_suspend_gtt_mappings(dev_priv);
i915_save_state(dev);
@@ -1507,7 +1516,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
- if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+ if (!(hibernation && INTEL_GEN(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
@@ -1595,6 +1604,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_resume(dev);
+ drm_kms_helper_poll_enable(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
@@ -1602,8 +1613,6 @@ static int i915_drm_resume(struct drm_device *dev)
* notifications.
* */
intel_hpd_init(dev_priv);
- /* Config may have changed between suspend and resume */
- drm_helper_hpd_irq_event(dev);
intel_opregion_register(dev_priv);
@@ -1616,7 +1625,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);
- drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1721,6 +1729,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
+static void disable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Ensure irq handler finishes, and not run again. */
+ disable_irq(dev_priv->drm.irq);
+ for_each_engine(engine, dev_priv, id)
+ tasklet_kill(&engine->irq_tasklet);
+}
+
+static void enable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ enable_irq(dev_priv->drm.irq);
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -1754,7 +1778,11 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
+ disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ enable_engines_irq(dev_priv);
+
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -2240,7 +2268,6 @@ err1:
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
- struct drm_device *dev = &dev_priv->drm;
int err;
int ret;
@@ -2264,10 +2291,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
vlv_check_no_gt_access(dev_priv);
- if (rpm_resume) {
- intel_init_clock_gating(dev);
- i915_gem_restore_fences(dev);
- }
+ if (rpm_resume)
+ intel_init_clock_gating(dev_priv);
return ret;
}
@@ -2282,37 +2307,18 @@ static int intel_runtime_suspend(struct device *kdev)
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Suspending device\n");
- /*
- * We could deadlock here in case another thread holding struct_mutex
- * calls RPM suspend concurrently, since the RPM suspend will wait
- * first for this RPM suspend to finish. In this case the concurrent
- * RPM resume will be followed by its RPM suspend counterpart. Still
- * for consistency return -EAGAIN, which will reschedule this suspend.
- */
- if (!mutex_trylock(&dev->struct_mutex)) {
- DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
- /*
- * Bump the expiration timestamp, otherwise the suspend won't
- * be rescheduled.
- */
- pm_runtime_mark_last_busy(kdev);
-
- return -EAGAIN;
- }
-
disable_rpm_wakeref_asserts(dev_priv);
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
- i915_gem_release_all_mmaps(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev);
@@ -2386,7 +2392,7 @@ static int intel_runtime_resume(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
@@ -2404,7 +2410,7 @@ static int intel_runtime_resume(struct device *kdev)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
@@ -2420,7 +2426,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2495,9 +2501,7 @@ static const struct file_operations i915_driver_fops = {
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
@@ -2577,7 +2581,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
- .gem_free_object = i915_gem_free_object,
+ .gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 685e9e065287..243224aeabf8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
+#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
@@ -59,9 +60,14 @@
#include "intel_ringbuffer.h"
#include "i915_gem.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
+
+#include "i915_vma.h"
#include "intel_gvt.h"
@@ -70,7 +76,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160919"
+#define DRIVER_DATE "20161121"
+#define DRIVER_TIMESTAMP 1479717903
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -122,6 +129,11 @@ static inline const char *onoff(bool v)
return v ? "on" : "off";
}
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
@@ -182,9 +194,10 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
enum port {
+ PORT_NONE = -1,
PORT_A = 0,
PORT_B,
PORT_C,
@@ -310,7 +323,7 @@ struct i915_hotplug {
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for_each_if ((__mask) & (1 << (__p)))
-#define for_each_plane(__dev_priv, __pipe, __p) \
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
@@ -455,23 +468,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_i915_fence_reg {
- struct list_head link;
- struct drm_i915_private *i915;
- struct i915_vma *vma;
- int pin_count;
- int id;
- /**
- * Whether the tiling parameters for the currently
- * associated fence register have changed. Note that
- * for the purposes of tracking tiling changes we also
- * treat the unfenced register, the register slot that
- * the object occupies whilst it executes a fenced
- * command (such as BLT on gen2/3), as a "fence".
- */
- bool dirty;
-};
-
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -483,6 +479,7 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
+struct intel_atomic_state;
struct intel_crtc_state;
struct intel_initial_plane_config;
struct intel_crtc;
@@ -490,16 +487,20 @@ struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
- int (*get_display_clock_speed)(struct drm_device *dev);
- int (*get_fifo_size)(struct drm_device *dev, int plane);
+ int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+ int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate);
- void (*initial_watermarks)(struct intel_crtc_state *cstate);
- void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
- void (*update_wm)(struct drm_crtc *crtc);
+ void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
@@ -521,7 +522,7 @@ struct drm_i915_display_funcs {
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
- void (*init_clock_gating)(struct drm_device *dev);
+ void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
@@ -558,6 +559,18 @@ enum forcewake_domains {
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
+enum decoupled_power_domain {
+ GEN9_DECOUPLED_PD_BLITTER = 0,
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+enum decoupled_ops {
+ GEN9_DECOUPLED_OP_WRITE = 0,
+ GEN9_DECOUPLED_OP_READ
+};
+
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
i915_reg_t reg, unsigned int op);
@@ -581,13 +594,25 @@ struct intel_uncore_funcs {
uint32_t val, bool trace);
};
+struct intel_forcewake_range {
+ u32 start;
+ u32 end;
+
+ enum forcewake_domains domains;
+};
+
struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
+ const struct intel_forcewake_range *fw_domains_table;
+ unsigned int fw_domains_table_entries;
+
struct intel_uncore_funcs funcs;
unsigned fifo_count;
+
enum forcewake_domains fw_domains;
+ enum forcewake_domains fw_domains_active;
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
@@ -633,54 +658,55 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
- func(is_mobile) sep \
- func(is_i85x) sep \
- func(is_i915g) sep \
- func(is_i945gm) sep \
- func(is_g33) sep \
- func(hws_needs_physical) sep \
- func(is_g4x) sep \
- func(is_pineview) sep \
- func(is_broadwater) sep \
- func(is_crestline) sep \
- func(is_ivybridge) sep \
- func(is_valleyview) sep \
- func(is_cherryview) sep \
- func(is_haswell) sep \
- func(is_broadwell) sep \
- func(is_skylake) sep \
- func(is_broxton) sep \
- func(is_kabylake) sep \
- func(is_preliminary) sep \
- func(has_fbc) sep \
- func(has_psr) sep \
- func(has_runtime_pm) sep \
- func(has_csr) sep \
- func(has_resource_streamer) sep \
- func(has_rc6) sep \
- func(has_rc6p) sep \
- func(has_dp_mst) sep \
- func(has_gmbus_irq) sep \
- func(has_hw_contexts) sep \
- func(has_logical_ring_contexts) sep \
- func(has_l3_dpf) sep \
- func(has_gmch_display) sep \
- func(has_guc) sep \
- func(has_pipe_cxsr) sep \
- func(has_hotplug) sep \
- func(cursor_needs_physical) sep \
- func(has_overlay) sep \
- func(overlay_needs_physical) sep \
- func(supports_tv) sep \
- func(has_llc) sep \
- func(has_snoop) sep \
- func(has_ddi) sep \
- func(has_fpga_dbg) sep \
- func(has_pooled_eu)
-
-#define DEFINE_FLAG(name) u8 name:1
-#define SEP_SEMICOLON ;
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ /* Keep is_* in chronological order */ \
+ func(is_mobile); \
+ func(is_i85x); \
+ func(is_i915g); \
+ func(is_i945gm); \
+ func(is_g33); \
+ func(is_g4x); \
+ func(is_pineview); \
+ func(is_broadwater); \
+ func(is_crestline); \
+ func(is_ivybridge); \
+ func(is_valleyview); \
+ func(is_cherryview); \
+ func(is_haswell); \
+ func(is_broadwell); \
+ func(is_skylake); \
+ func(is_broxton); \
+ func(is_kabylake); \
+ func(is_alpha_support); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_gmbus_irq); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_hotplug); \
+ func(has_hw_contexts); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_overlay); \
+ func(has_pipe_cxsr); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv); \
+ func(has_decoupled_mmio)
struct sseu_dev_info {
u8 slice_mask;
@@ -709,7 +735,9 @@ struct intel_device_info {
u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -726,14 +754,15 @@ struct intel_device_info {
} color;
};
-#undef DEFINE_FLAG
-#undef SEP_SEMICOLON
-
struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
+ struct timeval boottime;
+ struct timeval uptime;
+
+ struct drm_i915_private *i915;
char error_msg[128];
bool simulated;
@@ -759,11 +788,12 @@ struct drm_i915_error_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
- u32 extra_instdone[I915_NUM_INSTDONE_REG];
+
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore;
+ struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
@@ -775,12 +805,14 @@ struct drm_i915_error_state {
struct i915_address_space *vm;
int num_requests;
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
/* our own tracking of ring head and tail */
u32 cpu_ring_head;
u32 cpu_ring_tail;
u32 last_seqno;
- u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
u32 start;
@@ -791,7 +823,6 @@ struct drm_i915_error_state {
u32 hws;
u32 ipeir;
u32 ipehr;
- u32 instdone;
u32 bbstate;
u32 instpm;
u32 instps;
@@ -802,11 +833,13 @@ struct drm_i915_error_state {
u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+ struct intel_instdone instdone;
struct drm_i915_error_object {
- int page_count;
u64 gtt_offset;
u64 gtt_size;
+ int page_count;
+ int unused;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
@@ -815,10 +848,11 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
pid_t pid;
+ u32 context;
u32 seqno;
u32 head;
u32 tail;
- } *requests;
+ } *requests, execlist[2];
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@@ -914,6 +948,7 @@ struct i915_gem_context {
struct drm_i915_file_private *file_priv;
struct i915_hw_ppgtt *ppgtt;
struct pid *pid;
+ const char *name;
struct i915_ctx_hang_stats hang_stats;
@@ -924,6 +959,7 @@ struct i915_gem_context {
/* Unique identifier for this context, used by the hw for tracking */
unsigned int hw_id;
u32 user_handle;
+ int priority; /* greater priorities are serviced first */
u32 ggtt_alignment;
@@ -972,6 +1008,9 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool underrun_detected;
+ struct work_struct underrun_work;
+
struct intel_fbc_state_cache {
struct {
unsigned int mode_flags;
@@ -1297,6 +1336,12 @@ struct i915_power_well {
/* cached hw enabled state */
bool hw_enabled;
unsigned long domains;
+ /* unique identifier for this power well */
+ unsigned long id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
unsigned long data;
const struct i915_power_well_ops *ops;
};
@@ -1334,11 +1379,22 @@ struct i915_gem_mm {
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU) but still have
- * (presumably uncached) pages still attached.
+ * are idle and not used by the GPU). These objects may or may
+ * not actually have any pages attached.
*/
struct list_head unbound_list;
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /**
+ * List of objects which are pending destruction.
+ */
+ struct llist_head free_list;
+ struct work_struct free_work;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1368,7 +1424,7 @@ struct i915_gem_mm {
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
- size_t object_memory;
+ u64 object_memory;
u32 object_count;
};
@@ -1387,6 +1443,9 @@ struct i915_error_state_file_priv {
struct drm_i915_error_state *error;
};
+#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
+#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1620,7 +1679,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
};
@@ -1628,15 +1686,12 @@ struct skl_ddb_allocation {
struct skl_wm_values {
unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
- uint32_t wm_linetime[I915_MAX_PIPES];
- uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
- uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
};
struct skl_wm_level {
- bool plane_en[I915_MAX_PLANES];
- uint16_t plane_res_b[I915_MAX_PLANES];
- uint8_t plane_res_l[I915_MAX_PLANES];
+ bool plane_en;
+ uint16_t plane_res_b;
+ uint8_t plane_res_l;
};
/*
@@ -1664,7 +1719,6 @@ struct skl_wm_level {
*/
struct i915_runtime_pm {
atomic_t wakeref_count;
- atomic_t atomic_seq;
bool suspended;
bool irqs_enabled;
};
@@ -1748,6 +1802,7 @@ struct drm_i915_private {
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
+ struct kmem_cache *dependencies;
const struct intel_device_info info;
@@ -1759,7 +1814,7 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
- struct intel_gvt gvt;
+ struct intel_gvt *gvt;
struct intel_guc guc;
@@ -1787,9 +1842,8 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
- struct intel_engine_cs engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
- u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -1814,8 +1868,10 @@ struct drm_i915_private {
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
- u32 pm_irq_mask;
+ u32 pm_imr;
+ u32 pm_ier;
u32 pm_rps_events;
+ u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1892,8 +1948,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
- struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
- struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
@@ -2009,13 +2065,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /*
- * The skl_wm_values structure is a bit too big for stack
- * allocation, so we keep the staging struct where we store
- * intermediate results here instead.
- */
- struct skl_wm_values skl_results;
-
/* current hardware state */
union {
struct ilk_wm_values hw;
@@ -2047,6 +2096,10 @@ struct drm_i915_private {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
+ struct list_head timelines;
+ struct i915_gem_timeline global_timeline;
+ u32 active_requests;
+
/**
* Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power
@@ -2054,7 +2107,6 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
- unsigned int active_engines;
bool awake;
/**
@@ -2074,12 +2126,15 @@ struct drm_i915_private {
* off the idle_work.
*/
struct delayed_work idle_work;
+
+ ktime_t last_init_time;
} gt;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
- struct intel_encoder *dig_port_map[I915_MAX_PORTS];
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *av_enc_map[I915_MAX_PIPES];
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@@ -2103,19 +2158,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
}
/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__) \
- for ((engine__) = &(dev_priv__)->engine[0]; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (intel_engine_initialized(engine__))
-
-/* Iterator with engine_id */
-#define for_each_engine_id(engine__, dev_priv__, id__) \
- for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (((id__) = (engine__)->id, \
- intel_engine_initialized(engine__)))
+#define for_each_engine(engine__, dev_priv__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
@@ -2126,7 +2173,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
- tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+ tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2137,30 +2184,6 @@ enum hdmi_force_audio {
#define I915_GTT_OFFSET_NONE ((u32)-1)
-struct drm_i915_gem_object_ops {
- unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
-
- /* Interface between the GEM object and its backing storage.
- * get_pages() is called once prior to the use of the associated set
- * of pages before to binding them into the GTT, and put_pages() is
- * called after we no longer need them. As we expect there to be
- * associated cost with migrating pages between the backing storage
- * and making them available for the GPU (e.g. clflush), we may hold
- * onto the pages after they are no longer referenced by the GPU
- * in case they may be used again shortly (for example migrating the
- * pages to a different memory domain within the GTT). put_pages()
- * will therefore most likely be called when the object itself is
- * being released or under memory pressure (where we attempt to
- * reap pages for the shrinker).
- */
- int (*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *);
-
- int (*dmabuf_export)(struct drm_i915_gem_object *);
- void (*release)(struct drm_i915_gem_object *);
-};
-
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-wise. This
@@ -2182,232 +2205,6 @@ struct drm_i915_gem_object_ops {
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
-struct drm_i915_gem_object {
- struct drm_gem_object base;
-
- const struct drm_i915_gem_object_ops *ops;
-
- /** List of VMAs backed by this object */
- struct list_head vma_list;
-
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
- struct list_head global_list;
-
- /** Used in execbuf to temporarily hold a ref */
- struct list_head obj_exec_link;
-
- struct list_head batch_pool_link;
-
- unsigned long flags;
- /**
- * This is set if the object is on the active lists (has pending
- * rendering and so a non-zero seqno), and is not set if it i s on
- * inactive (ready to be unbound) list.
- */
-#define I915_BO_ACTIVE_SHIFT 0
-#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
-#define __I915_BO_ACTIVE(bo) \
- ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
-
- /**
- * This is set if the object has been written to since last bound
- * to the GTT
- */
- unsigned int dirty:1;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * Whether the current gtt mapping needs to be mappable (and isn't just
- * mappable by accident). Track pin and fault separate for a more
- * accurate mappable working set.
- */
- unsigned int fault_mappable:1;
-
- /*
- * Is the object to be mapped as read-only to the GPU
- * Only honoured if hardware has relevant pte bit
- */
- unsigned long gt_ro:1;
- unsigned int cache_level:3;
- unsigned int cache_dirty:1;
-
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
-
- /** Current tiling stride for the object, if it's tiled. */
- unsigned int tiling_and_stride;
-#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
-#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
-#define STRIDE_MASK (~TILING_MASK)
-
- /** Count of VMA actually bound by this object */
- unsigned int bind_count;
- unsigned int pin_display;
-
- struct sg_table *pages;
- int pages_pin_count;
- struct get_page {
- struct scatterlist *sg;
- int last;
- } get_page;
- void *mapping;
-
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_write;
-
- /** References from framebuffers, locks out tiling changes. */
- unsigned long framebuffer_references;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
-
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
- unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
-
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
-};
-
-static inline struct drm_i915_gem_object *
-to_intel_bo(struct drm_gem_object *gem)
-{
- /* Assert that to_intel_bo(NULL) == NULL */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
-
- return container_of(gem, struct drm_i915_gem_object, base);
-}
-
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup(struct drm_file *file, u32 handle)
-{
- return to_intel_bo(drm_gem_object_lookup(file, handle));
-}
-
-__deprecated
-extern struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *file, u32 handle);
-
-__attribute__((nonnull))
-static inline struct drm_i915_gem_object *
-i915_gem_object_get(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_reference(&obj->base);
- return obj;
-}
-
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_unreference(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_unreference_unlocked(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
-}
-
-static inline unsigned long
-i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
-{
- return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
-}
-
-static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_active(obj);
-}
-
-static inline void
-i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
-{
- obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline void
-i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
-{
- obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline bool
-i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
- int engine)
-{
- return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline unsigned int
-i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & TILING_MASK;
-}
-
-static inline bool
-i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
-}
-
-static inline unsigned int
-i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & STRIDE_MASK;
-}
-
-static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
-{
- i915_gem_object_get(vma->obj);
- return vma;
-}
-
-static inline void i915_vma_put(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- i915_gem_object_put(vma->obj);
-}
-
/*
* Optimised SGL iterator for GEM objects
*/
@@ -2434,6 +2231,14 @@ static __always_inline struct sgt_iter {
return s;
}
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+ ++sg;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+ return sg;
+}
+
/**
* __sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
@@ -2448,9 +2253,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
- return sg_is_last(sg) ? NULL :
- likely(!sg_is_chain(++sg)) ? sg :
- sg_chain_ptr(sg);
+ return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
/**
@@ -2574,23 +2377,19 @@ struct drm_i915_cmd_table {
int count;
};
-/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
-#define __I915__(p) ({ \
- struct drm_i915_private *__p; \
- if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
- __p = (struct drm_i915_private *)p; \
- else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
- __p = to_i915((struct drm_device *)p); \
- else \
- BUILD_BUG(); \
- __p; \
-})
-#define INTEL_INFO(p) (&__I915__(p)->info)
-#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
-#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+static inline const struct intel_device_info *
+intel_info(const struct drm_i915_private *dev_priv)
+{
+ return &dev_priv->info;
+}
+
+#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+
+#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
+#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
#define REVID_FOREVER 0xff
-#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
/*
@@ -2598,7 +2397,7 @@ struct drm_i915_cmd_table {
*
* Use GEN_FOREVER for unbound start and or end.
*/
-#define IS_GEN(p, s, e) ({ \
+#define IS_GEN(dev_priv, s, e) ({ \
unsigned int __s = (s), __e = (e); \
BUILD_BUG_ON(!__builtin_constant_p(s)); \
BUILD_BUG_ON(!__builtin_constant_p(e)); \
@@ -2608,7 +2407,7 @@ struct drm_i915_cmd_table {
__e = BITS_PER_LONG - 1; \
else \
__e = (e) - 1; \
- !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+ !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
})
/*
@@ -2619,75 +2418,75 @@ struct drm_i915_cmd_table {
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
-#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
-#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
-#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
-#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
- INTEL_DEVID(dev) == 0x0152 || \
- INTEL_DEVID(dev) == 0x015a)
-#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
-#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
-#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
-#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
-#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
-#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
- (INTEL_DEVID(dev) & 0xf) == 0xb || \
- (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
+#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
+#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x)
+#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
+#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g)
+#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
+#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
+#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm)
+#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater)
+#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline)
+#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
+#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
+#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
+#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
+#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview)
+#define IS_G33(dev_priv) ((dev_priv)->info.is_g33)
+#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
+#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
+ INTEL_DEVID(dev_priv) == 0x0152 || \
+ INTEL_DEVID(dev_priv) == 0x015a)
+#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
+#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
+#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
+#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
+#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
+#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
+#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
+#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
+#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
+ ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0xf) == 0xe)
-#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
- INTEL_DEVID(dev) == 0x0A1E)
-#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
- INTEL_DEVID(dev) == 0x1913 || \
- INTEL_DEVID(dev) == 0x1916 || \
- INTEL_DEVID(dev) == 0x1921 || \
- INTEL_DEVID(dev) == 0x1926)
-#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
- INTEL_DEVID(dev) == 0x1915 || \
- INTEL_DEVID(dev) == 0x191E)
-#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
- INTEL_DEVID(dev) == 0x5913 || \
- INTEL_DEVID(dev) == 0x5916 || \
- INTEL_DEVID(dev) == 0x5921 || \
- INTEL_DEVID(dev) == 0x5926)
-#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
- INTEL_DEVID(dev) == 0x5915 || \
- INTEL_DEVID(dev) == 0x591E)
-#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
-
-#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
+ INTEL_DEVID(dev_priv) == 0x0A1E)
+#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
+ INTEL_DEVID(dev_priv) == 0x1913 || \
+ INTEL_DEVID(dev_priv) == 0x1916 || \
+ INTEL_DEVID(dev_priv) == 0x1921 || \
+ INTEL_DEVID(dev_priv) == 0x1926)
+#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
+ INTEL_DEVID(dev_priv) == 0x1915 || \
+ INTEL_DEVID(dev_priv) == 0x191E)
+#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
+ INTEL_DEVID(dev_priv) == 0x5913 || \
+ INTEL_DEVID(dev_priv) == 0x5916 || \
+ INTEL_DEVID(dev_priv) == 0x5921 || \
+ INTEL_DEVID(dev_priv) == 0x5926)
+#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
+ INTEL_DEVID(dev_priv) == 0x5915 || \
+ INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
+
+#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
@@ -2705,7 +2504,8 @@ struct drm_i915_cmd_table {
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
-#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define IS_BXT_REVID(dev_priv, since, until) \
+ (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
@@ -2713,8 +2513,8 @@ struct drm_i915_cmd_table {
#define KBL_REVID_D0 0x3
#define KBL_REVID_E0 0x4
-#define IS_KBL_REVID(p, since, until) \
- (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+#define IS_KBL_REVID(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
/*
* The genX designation typically refers to the render engine, so render
@@ -2722,14 +2522,14 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
-#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
-#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
-#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
-#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
-#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
-#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
-#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
-#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
+#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
+#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
+#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
+#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
+#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
+#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
+#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@@ -2740,31 +2540,34 @@ struct drm_i915_cmd_table {
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+ (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
-#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- HAS_EDRAM(dev))
-#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
+#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
+#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
+#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
+#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
+ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
-#define USES_PPGTT(dev) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
+#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+#define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_contexts)
+#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
+
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
+ ((dev_priv)->info.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2778,46 +2581,49 @@ struct drm_i915_cmd_table {
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
*/
-#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
+#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
+#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+ !(IS_I915G(dev_priv) || \
+ IS_I915GM(dev_priv)))
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
+
+#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
+#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
-#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
+#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
-#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
-#define HAS_RUNTIME_PM(dev) (INTEL_INFO(dev)->has_runtime_pm)
-#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
-#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
+#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc)
-#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
-#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
+#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
+#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
+#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
-#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2832,26 +2638,33 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
-#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
-#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+
+#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
-#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
+ 2 : HAS_L3_DPF(dev_priv))
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
+
#include "i915_trace.h"
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -2882,6 +2695,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#else
+#define i915_compat_ioctl NULL
#endif
extern const struct dev_pm_ops i915_pm_ops;
@@ -2893,6 +2708,7 @@ extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -2974,7 +2790,7 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
- return dev_priv->gvt.initialized;
+ return dev_priv->gvt;
}
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
@@ -3076,7 +2892,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_gem_load_init(struct drm_device *dev);
+int i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -3087,7 +2903,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size);
+ u64 size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
@@ -3100,77 +2916,86 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
-int __must_check i915_vma_unbind(struct i915_vma *vma);
-void i915_vma_close(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
-
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __sg_page_count(struct scatterlist *sg)
+static inline int __sg_page_count(const struct scatterlist *sg)
{
return sg->length >> PAGE_SHIFT;
}
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
-static inline dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ might_lock(&obj->mm.lock);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+ return 0;
- return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+ return __i915_gem_object_get_pages(obj);
}
-static inline struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
- return NULL;
-
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ GEM_BUG_ON(!obj->mm.pages);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ atomic_inc(&obj->mm.pages_pin_count);
+}
- return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->mm.pages_pin_count);
}
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages == NULL);
- obj->pages_pin_count++;
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(!obj->mm.pages);
+
+ atomic_dec(&obj->mm.pages_pin_count);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
-static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages_pin_count == 0);
- obj->pages_pin_count--;
+ __i915_gem_object_unpin_pages(obj);
}
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+ I915_MM_NORMAL = 0,
+ I915_MM_SHRINKER
+};
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
@@ -3186,8 +3011,8 @@ enum i915_map_type {
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
- * The caller must hold the struct_mutex, and is responsible for calling
- * i915_gem_object_unpin_map() when the mapping is no longer required.
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
@@ -3203,12 +3028,9 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
- *
- * The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}
@@ -3241,7 +3063,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
-int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
@@ -3270,19 +3092,25 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int __must_check i915_gem_suspend(struct drm_device *dev);
void i915_gem_resume(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int __must_check
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly);
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps);
+int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int priority);
+#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
@@ -3342,57 +3170,17 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
-/* i915_gem_fence.c */
+/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
-/**
- * i915_vma_pin_fence - pin fencing state
- * @vma: vma to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * vma (and its object) is ready to be used as a scanout target. Fencing
- * status must be synchronize first by calling i915_vma_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_vma_unpin_fence().
- *
- * Returns:
- *
- * True if the vma has a fence, false otherwise.
- */
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
-{
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
-}
-
-/**
- * i915_vma_unpin_fence - unpin fencing state
- * @vma: vma to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_vma_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-static inline void
-i915_vma_unpin_fence(struct i915_vma *vma)
-{
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
-}
-
-void i915_gem_restore_fences(struct drm_device *dev);
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3402,6 +3190,9 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3435,6 +3226,16 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
+static inline struct intel_timeline *
+i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm;
+
+ vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ return &vm->timeline.engine[engine->id];
+}
+
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
@@ -3478,7 +3279,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3488,6 +3289,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_internal.c */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+ unsigned int size);
+
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
@@ -3526,6 +3332,8 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#endif
/* i915_gpu_error.c */
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
@@ -3546,7 +3354,20 @@ void i915_error_state_get(struct drm_device *dev,
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
+ const char *error_msg)
+{
+}
+
+static inline void i915_destroy_error_state(struct drm_device *dev)
+{
+}
+
+#endif
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
@@ -3596,6 +3417,9 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
+bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port);
+
/* intel_opregion.c */
#ifdef CONFIG_ACPI
@@ -3652,15 +3476,16 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv);
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_init(struct drm_device *dev);
+extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
-extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
+ bool state);
extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_device *dev);
-extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
+extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
@@ -3679,11 +3504,13 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
@@ -3707,6 +3534,23 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask);
+uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
@@ -3796,11 +3640,30 @@ __raw_write(64, q)
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections inside IRQ handlers where forcewake is explicitly
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
* controlled.
+ *
* Think twice, and think again, before using these.
- * Note: Should only be used between intel_uncore_forcewake_irqlock() and
- * intel_uncore_forcewake_irqunlock().
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&dev_priv->uncore.lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&dev_priv->uncore.lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
@@ -3812,11 +3675,11 @@ __raw_write(64, q)
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
+static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return VLV_VGACNTRL;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;
@@ -3877,7 +3740,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
/* Ensure our read of the seqno is coherent so that we
@@ -3928,7 +3791,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
wake_up_process(tsk);
rcu_read_unlock();
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00eb4814b913..4a31b7a891ec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,12 +29,12 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -42,13 +42,14 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
- return HAS_LLC(dev) || level != I915_CACHE_NONE;
+ return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
}
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -63,13 +64,13 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
}
static int
-insert_mappable_node(struct drm_i915_private *i915,
+insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
- size, 0, 0, 0,
- i915->ggtt.mappable_end,
+ return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
+ size, 0, -1,
+ 0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
}
@@ -82,7 +83,7 @@ remove_mappable_node(struct drm_mm_node *node)
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
@@ -91,7 +92,7 @@ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
@@ -104,6 +105,8 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
+ might_sleep();
+
if (!i915_reset_in_progress(error))
return 0;
@@ -114,7 +117,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
!i915_reset_in_progress(error),
- 10*HZ);
+ I915_RESET_TIMEOUT);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
@@ -167,25 +170,39 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int
+static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
+ drm_dma_handle_t *phys;
struct sg_table *st;
struct scatterlist *sg;
+ char *vaddr;
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
+
+ /* Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+ phys = drm_pci_alloc(obj->base.dev,
+ obj->base.size,
+ roundup_pow_of_two(obj->base.size));
+ if (!phys)
+ return ERR_PTR(-ENOMEM);
+ vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (IS_ERR(page)) {
+ st = ERR_CAST(page);
+ goto err_phys;
+ }
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -199,44 +216,56 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return -ENOMEM;
+ if (!st) {
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
+ }
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
}
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
- sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_address(sg) = phys->busaddr;
sg_dma_len(sg) = obj->base.size;
- obj->pages = st;
- return 0;
+ obj->phys_handle = phys;
+ return st;
+
+err_phys:
+ drm_pci_free(obj->base.dev, phys);
+ return st;
}
static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- int ret;
+ GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
- BUG_ON(obj->madv == __I915_MADV_PURGED);
+ if (obj->mm.madv == I915_MADV_DONTNEED)
+ obj->mm.dirty = false;
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ drm_clflush_sg(pages);
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
- if (obj->dirty) {
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __i915_gem_object_release_shmem(obj, pages);
+
+ if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
@@ -255,22 +284,24 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
kunmap_atomic(dst);
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
vaddr += PAGE_SIZE;
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
}
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
+
+ drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
- drm_pci_free(obj->base.dev, obj->phys_handle);
+ i915_gem_object_unpin_pages(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -292,7 +323,12 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -311,90 +347,209 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret;
}
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for just read access or read-write access
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly)
+static long
+i915_gem_object_wait_fence(struct dma_fence *fence,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
{
- struct reservation_object *resv;
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct drm_i915_gem_request *rq;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
- if (!readonly) {
- active = obj->last_read;
- active_mask = i915_gem_object_get_active(obj);
- } else {
- active_mask = 1;
- active = &obj->last_write;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return timeout;
+
+ if (!dma_fence_is_i915(fence))
+ return dma_fence_wait_timeout(fence,
+ flags & I915_WAIT_INTERRUPTIBLE,
+ timeout);
+
+ rq = to_request(fence);
+ if (i915_gem_request_completed(rq))
+ goto out;
+
+ /* This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we wait. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery). Not all clients even want their results
+ * immediately and for them we should just let the GPU select its own
+ * frequency to maximise efficiency. To prevent a single client from
+ * forcing the clocks too high for the whole system, we only allow
+ * each client to waitboost once in a busy period.
+ */
+ if (rps) {
+ if (INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+ else
+ rps = NULL;
}
- for_each_active(active_mask, idx) {
+ timeout = i915_wait_request(rq, flags, timeout);
+
+out:
+ if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
+ i915_gem_request_retire_upto(rq);
+
+ if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
+ /* The GPU is now idle and this client has stalled.
+ * Since no other client has submitted a request in the
+ * meantime, assume that this client is the only one
+ * supplying work to the GPU but is unable to keep that
+ * work supplied because it is waiting. Since the GPU is
+ * then never kept fully busy, RPS autoclocking will
+ * keep the clocks relatively low, causing further delays.
+ * Compensate by giving the synchronous client credit for
+ * a waitboost next time.
+ */
+ spin_lock(&rq->i915->rps.client_lock);
+ list_del_init(&rps->link);
+ spin_unlock(&rq->i915->rps.client_lock);
+ }
+
+ return timeout;
+}
+
+static long
+i915_gem_object_wait_reservation(struct reservation_object *resv,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
int ret;
- ret = i915_gem_active_wait(&active[idx],
- &obj->base.dev->struct_mutex);
+ ret = reservation_object_get_fences_rcu(resv,
+ &excl, &count, &shared);
if (ret)
return ret;
- }
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long err;
+ for (i = 0; i < count; i++) {
+ timeout = i915_gem_object_wait_fence(shared[i],
+ flags, timeout,
+ rps);
+ if (timeout <= 0)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
- err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
- MAX_SCHEDULE_TIMEOUT);
- if (err < 0)
- return err;
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(resv);
}
- return 0;
+ if (excl && timeout > 0)
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+
+ dma_fence_put(excl);
+
+ return timeout;
}
-/* A nonblocking variant of the above wait. Must be called prior to
- * acquiring the mutex for the object, as the object state may change
- * during this call. A reference must be held by the caller for the object.
- */
-static __must_check int
-__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
- struct intel_rps_client *rps,
- bool readonly)
+static void __fence_set_priority(struct dma_fence *fence, int prio)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine;
- active_mask = __I915_BO_ACTIVE(obj);
- if (!active_mask)
- return 0;
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ rq = to_request(fence);
+ engine = rq->engine;
+ if (!engine->schedule)
+ return;
+
+ engine->schedule(rq, prio);
+}
- if (!readonly) {
- active = obj->last_read;
+static void fence_set_priority(struct dma_fence *fence, int prio)
+{
+ /* Recurse once into a fence-array */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ for (i = 0; i < array->num_fences; i++)
+ __fence_set_priority(array->fences[i], prio);
} else {
- active_mask = 1;
- active = &obj->last_write;
+ __fence_set_priority(fence, prio);
}
+}
- for_each_active(active_mask, idx) {
+int
+i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int prio)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
int ret;
- ret = i915_gem_active_wait_unlocked(&active[idx],
- I915_WAIT_INTERRUPTIBLE,
- NULL, rps);
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
if (ret)
return ret;
+
+ for (i = 0; i < count; i++) {
+ fence_set_priority(shared[i], prio);
+ dma_fence_put(shared[i]);
+ }
+
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(obj->resv);
}
+ if (excl) {
+ fence_set_priority(excl, prio);
+ dma_fence_put(excl);
+ }
return 0;
}
+/**
+ * Waits for rendering to the object to be completed
+ * @obj: i915 gem object
+ * @flags: how to wait (under a lock, for all rendering or just for writes etc)
+ * @timeout: how long to wait
+ * @rps: client (user process) to charge for any waitboosting
+ */
+int
+i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
+{
+ might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
+ !!(flags & I915_WAIT_LOCKED));
+#endif
+ GEM_BUG_ON(timeout < 0);
+
+ timeout = i915_gem_object_wait_reservation(obj->resv,
+ flags, timeout,
+ rps);
+ return timeout < 0 ? timeout : 0;
+}
+
static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
@@ -406,17 +561,15 @@ int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
- drm_dma_handle_t *phys;
int ret;
- if (obj->phys_handle) {
- if ((unsigned long)obj->phys_handle->vaddr & (align -1))
- return -EBUSY;
+ if (align > obj->base.size)
+ return -EINVAL;
+ if (obj->ops == &i915_gem_phys_ops)
return 0;
- }
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
if (obj->base.filp == NULL)
@@ -426,35 +579,35 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_put_pages(obj);
- if (ret)
- return ret;
-
- /* create a new object */
- phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
- if (!phys)
- return -ENOMEM;
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ if (obj->mm.pages)
+ return -EBUSY;
- obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
- return i915_gem_object_get_pages(obj);
+ return i915_gem_object_pin_pages(obj);
}
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- int ret = 0;
+ int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
return ret;
@@ -516,7 +669,7 @@ i915_gem_create(struct drm_file *file,
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
@@ -548,6 +701,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_create *args = data;
+ i915_gem_flush_free_objects(to_i915(dev));
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -614,21 +769,24 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
{
int ret;
- *needs_clflush = 0;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ *needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu read domain, set ourself into the gtt
@@ -661,20 +819,25 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
{
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
*needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu write domain, set ourself into the
@@ -704,7 +867,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
obj->cache_dirty = true;
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = 1;
+ obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
@@ -713,32 +876,6 @@ err_unpin:
return ret;
}
-/* Per-page copy function for the shmem pread fastpath.
- * Flushes invalid cachelines before reading the target if
- * needs_clflush is set. */
-static int
-shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
bool swizzled)
@@ -764,7 +901,7 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
-shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pread_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
{
@@ -773,60 +910,130 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (needs_clflush)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
+ ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
+ ret = __copy_to_user(user_data, vaddr + offset, length);
kunmap(page);
return ret ? - EFAULT : 0;
}
-static inline unsigned long
-slow_user_access(struct io_mapping *mapping,
- uint64_t page_base, int page_offset,
- char __user *user_data,
- unsigned long length, bool pwrite)
+static int
+shmem_pread(struct page *page, int offset, int length, char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ int ret;
+
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
+
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + offset, length);
+ ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return 0;
+
+ return shmem_pread_slow(page, offset, length, user_data,
+ page_do_bit17_swizzling, needs_clflush);
+}
+
+static int
+i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args)
+{
+ char __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int needs_clflush;
+ unsigned int idx, offset;
+ int ret;
+
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
+
+ ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ remain = args->size;
+ user_data = u64_to_user_ptr(args->data_ptr);
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pread(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ needs_clflush);
+ if (ret)
+ break;
+
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return ret;
+}
+
+static inline bool
+gtt_user_read(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *ioaddr;
void *vaddr;
- uint64_t unwritten;
+ unsigned long unwritten;
- ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)ioaddr + page_offset;
- if (pwrite)
- unwritten = __copy_from_user(vaddr, user_data, length);
- else
- unwritten = __copy_to_user(user_data, vaddr, length);
-
- io_mapping_unmap(ioaddr);
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_to_user(user_data, vaddr + offset, length);
+ io_mapping_unmap(vaddr);
+ }
return unwritten;
}
static int
-i915_gem_gtt_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj, uint64_t size,
- uint64_t data_offset, uint64_t data_ptr)
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_mm_node node;
- char __user *user_data;
- uint64_t remain;
- uint64_t offset;
+ struct i915_vma *vma;
+ void __user *user_data;
+ u64 remain, offset;
int ret;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(i915);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -837,35 +1044,21 @@ i915_gem_gtt_pread(struct drm_device *dev,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
goto out_unpin;
- user_data = u64_to_user_ptr(data_ptr);
- remain = size;
- offset = data_offset;
+ mutex_unlock(&i915->drm.struct_mutex);
- mutex_unlock(&dev->struct_mutex);
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_writeable(user_data, remain);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- goto out_unpin;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = args->offset;
while (remain > 0) {
/* Operation in this page
@@ -882,19 +1075,14 @@ i915_gem_gtt_pread(struct drm_device *dev,
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start,
- I915_CACHE_NONE, 0);
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
}
- /* This is a slow read/write as it tries to read from
- * and write to user memory which may result into page
- * faults, and so we cannot perform this under struct_mutex.
- */
- if (slow_user_access(&ggtt->mappable, page_base,
- page_offset, user_data,
- page_length, false)) {
+
+ if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
ret = -EFAULT;
break;
}
@@ -904,111 +1092,19 @@ i915_gem_gtt_pread(struct drm_device *dev,
offset += page_length;
}
- mutex_lock(&dev->struct_mutex);
- if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
-
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
- i915_gem_object_unpin_pages(obj);
+ node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
- return ret;
-}
-
-static int
-i915_gem_shmem_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- char __user *user_data;
- ssize_t remain;
- loff_t offset;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int prefaulted = 0;
- int needs_clflush = 0;
- struct sg_page_iter sg_iter;
-
- ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
- if (ret)
- return ret;
-
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
-
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
- if (remain <= 0)
- break;
-
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pread_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
- if (ret == 0)
- goto next_page;
-
- mutex_unlock(&dev->struct_mutex);
-
- if (likely(!i915.prefault_disable) && !prefaulted) {
- ret = fault_in_pages_writeable(user_data, remain);
- /* Userspace is tricking us, but we've already clobbered
- * its pages with the prefault and promised to write the
- * data up to the first fault. Hence ignore any errors
- * and just continue. */
- (void)ret;
- prefaulted = 1;
- }
-
- ret = shmem_pread_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
-
- mutex_lock(&dev->struct_mutex);
-
- if (ret)
- goto out;
-
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
-out:
- i915_gem_obj_finish_shmem_access(obj);
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1027,7 +1123,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
- int ret = 0;
+ int ret;
if (args->size == 0)
return 0;
@@ -1045,36 +1141,29 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
- goto err;
+ goto out;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
- goto err;
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err;
-
- ret = i915_gem_shmem_pread(dev, obj, args, file);
+ goto out;
- /* pread for non shmem backed objects */
- if (ret == -EFAULT || ret == -ENODEV) {
- intel_runtime_pm_get(to_i915(dev));
- ret = i915_gem_gtt_pread(dev, obj, args->size,
- args->offset, args->data_ptr);
- intel_runtime_pm_put(to_i915(dev));
- }
+ ret = i915_gem_shmem_pread(obj, args);
+ if (ret == -EFAULT || ret == -ENODEV)
+ ret = i915_gem_gtt_pread(obj, args);
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
return ret;
}
@@ -1082,51 +1171,52 @@ err:
* page faults in the source data
*/
-static inline int
-fast_user_write(struct io_mapping *mapping,
- loff_t page_base, int page_offset,
- char __user *user_data,
- int length)
+static inline bool
+ggtt_write(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force*)vaddr_atomic + page_offset;
- unwritten = __copy_from_user_inatomic_nocache(vaddr,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_from_user(vaddr + offset, user_data, length);
+ io_mapping_unmap(vaddr);
+ }
+
return unwritten;
}
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
- * @i915: i915 device private data
- * @obj: i915 gem object
+ * @obj: i915 GEM object
* @args: pwrite arguments structure
- * @file: drm file pointer
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
- struct drm_device *dev = obj->base.dev;
- struct i915_vma *vma;
struct drm_mm_node node;
- uint64_t remain, offset;
- char __user *user_data;
+ struct i915_vma *vma;
+ u64 remain, offset;
+ void __user *user_data;
int ret;
- bool hit_slow_path = false;
- if (i915_gem_object_is_tiled(obj))
- return -EFAULT;
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
@@ -1139,25 +1229,19 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
+ mutex_unlock(&i915->drm.struct_mutex);
+
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = true;
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -1170,8 +1254,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
- unsigned page_offset = offset_in_page(offset);
- unsigned page_length = PAGE_SIZE - page_offset;
+ unsigned int page_offset = offset_in_page(offset);
+ unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
@@ -1188,92 +1272,36 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (fast_user_write(&ggtt->mappable, page_base,
- page_offset, user_data, page_length)) {
- hit_slow_path = true;
- mutex_unlock(&dev->struct_mutex);
- if (slow_user_access(&ggtt->mappable,
- page_base,
- page_offset, user_data,
- page_length, true)) {
- ret = -EFAULT;
- mutex_lock(&dev->struct_mutex);
- goto out_flush;
- }
-
- mutex_lock(&dev->struct_mutex);
+ if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
+ ret = -EFAULT;
+ break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-
-out_flush:
- if (hit_slow_path) {
- if (ret == 0 &&
- (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
- }
-
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
- i915_gem_object_unpin_pages(obj);
+ node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
-/* Per-page copy function for the shmem pwrite fastpath.
- * Flushes invalid cachelines before writing to the target if
- * needs_clflush_before is set and flushes out any written cachelines after
- * writing if needs_clflush is set. */
static int
-shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling,
- bool needs_clflush_before,
- bool needs_clflush_after)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
- user_data, page_length);
- if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
-static int
-shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pwrite_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
@@ -1284,124 +1312,114 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user_swizzled(vaddr, offset, user_data,
+ length);
else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user(vaddr + offset, user_data, length);
if (needs_clflush_after)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
kunmap(page);
return ret ? -EFAULT : 0;
}
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set.
+ */
static int
-i915_gem_shmem_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int hit_slowpath = 0;
- unsigned int needs_clflush;
- struct sg_page_iter sg_iter;
+ int ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
- if (ret)
- return ret;
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + offset, len);
+ ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + offset, len);
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
- int partial_cacheline_write;
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return ret;
- if (remain <= 0)
- break;
+ return shmem_pwrite_slow(page, offset, len, user_data,
+ page_do_bit17_swizzling,
+ needs_clflush_before,
+ needs_clflush_after);
+}
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
-
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- /* If we don't overwrite a cacheline completely we need to be
- * careful to have up-to-date data by first clflushing. Don't
- * overcomplicate things and flush the entire patch. */
- partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
- ((shmem_page_offset | page_length)
- & (boot_cpu_data.x86_clflush_size - 1));
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
- if (ret == 0)
- goto next_page;
-
- hit_slowpath = 1;
- mutex_unlock(&dev->struct_mutex);
- ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
+static int
+i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ void __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int partial_cacheline_write;
+ unsigned int needs_clflush;
+ unsigned int offset, idx;
+ int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
- if (ret)
- goto out;
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
-out:
- i915_gem_obj_finish_shmem_access(obj);
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch.
+ */
+ partial_cacheline_write = 0;
+ if (needs_clflush & CLFLUSH_BEFORE)
+ partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
- if (hit_slowpath) {
- /*
- * Fixup: Flush cpu caches in case we didn't flush the dirty
- * cachelines in-line while writing and the object moved
- * out of the cpu write domain while we've dropped the lock.
- */
- if (!(needs_clflush & CLFLUSH_AFTER) &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- if (i915_gem_clflush_object(obj, obj->pin_display))
- needs_clflush |= CLFLUSH_AFTER;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pwrite(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ (offset | length) & partial_cacheline_write,
+ needs_clflush & CLFLUSH_AFTER);
+ if (ret)
+ break;
- if (needs_clflush & CLFLUSH_AFTER)
- i915_gem_chipset_flush(to_i915(dev));
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
}
@@ -1417,7 +1435,6 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1430,13 +1447,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
- }
-
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -1450,15 +1460,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
goto err;
- intel_runtime_pm_get(dev_priv);
-
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err_rpm;
+ goto err;
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -1468,30 +1480,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
- cpu_write_needs_clflush(obj)) {
- ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
+ cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
- * textures). Fallback to the shmem path in that case. */
- }
+ * textures). Fallback to the shmem path in that case.
+ */
+ ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
else
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(obj, args);
}
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
- return ret;
-
-err_rpm:
- intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -1502,6 +1507,30 @@ write_origin(struct drm_i915_gem_object *obj, unsigned domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915;
+ struct list_head *list;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ continue;
+
+ if (i915_vma_is_active(vma))
+ continue;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ }
+
+ i915 = to_i915(obj->base.dev);
+ list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
+ list_move_tail(&obj->global_link, list);
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -1517,7 +1546,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
- int ret;
+ int err;
/* Only handle setting domains to types used by the CPU. */
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
@@ -1537,29 +1566,48 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
- if (ret)
- goto err;
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write_domain ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
+ if (err)
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err;
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out_unpin;
if (read_domains & I915_GEM_DOMAIN_GTT)
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
else
- ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+
+ /* And bump the LRU for this access */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ mutex_unlock(&dev->struct_mutex);
if (write_domain != 0)
intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
- return ret;
+ return err;
}
/**
@@ -1589,7 +1637,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return err;
}
@@ -1635,7 +1683,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
@@ -1647,7 +1695,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINTR;
}
vma = find_vma(mm, addr);
@@ -1661,7 +1709,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1763,8 +1811,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
- PAGE_SHIFT;
+ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -1773,7 +1820,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, NULL, !write);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
@@ -1784,7 +1838,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_rpm;
/* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
ret = -EFAULT;
goto err_unlock;
}
@@ -1813,8 +1867,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
min_t(unsigned int, chunk_size,
- (area->vm_end - area->vm_start) / PAGE_SIZE -
- view.params.partial.offset);
+ vma_pages(area) - view.params.partial.offset);
/* If the partial covers the entire object, just create a
* normal VMA.
@@ -1842,22 +1895,25 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
if (ret)
goto err_unpin;
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
+ if (list_empty(&obj->userfault_link))
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
- if (ret)
- goto err_unpin;
- obj->fault_mappable = true;
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
case -EIO:
@@ -1919,15 +1975,23 @@ err:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
+ *
+ * Note that RPM complicates somewhat by adding an additional
+ * requirement that operations to the GGTT be made holding the RPM
+ * wakeref.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
- if (!obj->fault_mappable)
- return;
+ if (list_empty(&obj->userfault_link))
+ goto out;
+ list_del_init(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
@@ -1940,16 +2004,45 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
*/
wmb();
- obj->fault_mappable = false;
+out:
+ intel_runtime_pm_put(i915);
}
-void
-i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj, *on;
+ int i;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- i915_gem_release_mmap(obj);
+ /*
+ * Only called during RPM suspend. All users of the userfault_list
+ * must be holding an RPM wakeref to ensure that this can not
+ * run concurrently with themselves (and use the struct_mutex for
+ * protection between themselves).
+ */
+
+ list_for_each_entry_safe(obj, on,
+ &dev_priv->mm.userfault_list, userfault_link) {
+ list_del_init(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ }
+
+ /* The fence will be lost when the device powers down. If any were
+ * in use by hardware (i.e. they are pinned), we should not be powering
+ * down! All other fences will be reacquired by the user upon waking.
+ */
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ if (WARN_ON(reg->pin_count))
+ continue;
+
+ if (!reg->vma)
+ continue;
+
+ GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+ reg->dirty = true;
+ }
}
/**
@@ -2063,7 +2156,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -2106,16 +2199,18 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
- obj->madv = __I915_MADV_PURGED;
+ obj->mm.madv = __I915_MADV_PURGED;
}
/* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;
- switch (obj->madv) {
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(obj->mm.pages);
+
+ switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
@@ -2130,95 +2225,131 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
}
static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
- int ret;
- BUG_ON(obj->madv == __I915_MADV_PURGED);
+ __i915_gem_object_release_shmem(obj, pages);
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj, true);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
-
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_save_bit_17_swizzle(obj);
+ i915_gem_object_save_bit_17_swizzle(obj, pages);
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
-
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
-int
-i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
- const struct drm_i915_gem_object_ops *ops = obj->ops;
+ struct radix_tree_iter iter;
+ void **slot;
- if (obj->pages == NULL)
- return 0;
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+}
- if (obj->pages_pin_count)
- return -EBUSY;
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
GEM_BUG_ON(obj->bind_count);
+ if (!READ_ONCE(obj->mm.pages))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
- list_del(&obj->global_list);
+ pages = fetch_and_zero(&obj->mm.pages);
+ GEM_BUG_ON(!pages);
- if (obj->mapping) {
+ if (obj->mm.mapping) {
void *ptr;
- ptr = ptr_mask_bits(obj->mapping);
+ ptr = ptr_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
- obj->mapping = NULL;
+ obj->mm.mapping = NULL;
}
- ops->put_pages(obj);
- obj->pages = NULL;
+ __i915_gem_object_reset_page_iter(obj);
- i915_gem_object_invalidate(obj);
+ obj->ops->put_pages(obj, pages);
+unlock:
+ mutex_unlock(&obj->mm.lock);
+}
+static unsigned int swiotlb_max_size(void)
+{
+#if IS_ENABLED(CONFIG_SWIOTLB)
+ return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
+#else
return 0;
+#endif
}
-static int
+static void i915_sg_trim(struct sg_table *orig_st)
+{
+ struct sg_table new_st;
+ struct scatterlist *sg, *new_sg;
+ unsigned int i;
+
+ if (orig_st->nents == orig_st->orig_nents)
+ return;
+
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ return;
+
+ new_sg = new_st.sgl;
+ for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, 0);
+ /* called before being DMA mapped, no need to copy sg->dma_* */
+ new_sg = sg_next(new_sg);
+ }
+
+ sg_free_table(orig_st);
+
+ *orig_st = new_st;
+}
+
+static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int page_count, i;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
+ unsigned int max_segment;
int ret;
gfp_t gfp;
@@ -2226,17 +2357,21 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ max_segment = swiotlb_max_size();
+ if (!max_segment)
+ max_segment = rounddown(UINT_MAX, PAGE_SIZE);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
/* Get the list of pages out of our struct file. They'll be pinned
@@ -2264,22 +2399,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
- i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_sg;
}
}
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
- sg = sg_next(sg);
- continue;
- }
-#endif
- if (!i || page_to_pfn(page) != last_pfn + 1) {
+ if (!i ||
+ sg->length >= max_segment ||
+ page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
st->nents++;
@@ -2292,24 +2420,37 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
-#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
-#endif
+ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
- obj->pages = st;
- ret = i915_gem_gtt_prepare_object(obj);
- if (ret)
- goto err_pages;
+ /* Trim unused sg entries to avoid wasting memory. */
+ i915_sg_trim(st);
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, st);
+ if (ret) {
+ /* DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
- if (i915_gem_object_is_tiled(obj) &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
- i915_gem_object_pin_pages(obj);
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&dev_priv->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
- return 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj, st);
+
+ return st;
err_sg:
sg_mark_end(sg);
@@ -2330,43 +2471,73 @@ err_pages:
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ret;
+ return ERR_PTR(ret);
}
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_get_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_put_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int
-i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- const struct drm_i915_gem_object_ops *ops = obj->ops;
- int ret;
+ lockdep_assert_held(&obj->mm.lock);
- if (obj->pages)
- return 0;
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
+
+ obj->mm.pages = pages;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *pages;
+
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
- if (obj->madv != I915_MADV_WILLNEED) {
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- BUG_ON(obj->pages_pin_count);
+ pages = obj->ops->get_pages(obj);
+ if (unlikely(IS_ERR(pages)))
+ return PTR_ERR(pages);
- ret = ops->get_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_set_pages(obj, pages);
+ return 0;
+}
- list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
- return 0;
+ if (unlikely(!obj->mm.pages)) {
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
@@ -2374,7 +2545,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->pages;
+ struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
@@ -2425,21 +2596,31 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
- ret = i915_gem_object_get_pages(obj);
+ ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
return ERR_PTR(ret);
- i915_gem_object_pin_pages(obj);
- pinned = obj->pages_pin_count > 1;
+ pinned = true;
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (unlikely(!obj->mm.pages)) {
+ ret = ____i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
- ptr = ptr_unpack_bits(obj->mapping, has_type);
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+ pinned = false;
+ }
+ GEM_BUG_ON(!obj->mm.pages);
+
+ ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
- goto err;
+ goto err_unpin;
}
if (is_vmalloc_addr(ptr))
@@ -2447,59 +2628,28 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
else
kunmap(kmap_to_page(ptr));
- ptr = obj->mapping = NULL;
+ ptr = obj->mm.mapping = NULL;
}
if (!ptr) {
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
ret = -ENOMEM;
- goto err;
+ goto err_unpin;
}
- obj->mapping = ptr_pack_bits(ptr, type);
+ obj->mm.mapping = ptr_pack_bits(ptr, type);
}
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
return ptr;
-err:
- i915_gem_object_unpin_pages(obj);
- return ERR_PTR(ret);
-}
-
-static void
-i915_gem_object_retire__write(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_write);
-
- intel_fb_obj_flush(obj, true, ORIGIN_CS);
-}
-
-static void
-i915_gem_object_retire__read(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- int idx = request->engine->id;
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_read[idx]);
-
- GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
-
- i915_gem_object_clear_active(obj, idx);
- if (i915_gem_object_is_active(obj))
- return;
-
- /* Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (obj->bind_count)
- list_move_tail(&obj->global_list,
- &request->i915->mm.bound_list);
-
- i915_gem_object_put(obj);
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(ret);
+ goto out_unlock;
}
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2546,13 +2696,10 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
- list_for_each_entry(request, &engine->request_list, link) {
- if (i915_gem_request_completed(request))
+ list_for_each_entry(request, &engine->timeline->requests, link) {
+ if (__i915_gem_request_completed(request))
continue;
- if (!i915_sw_fence_done(&request->submit))
- break;
-
return request;
}
@@ -2580,10 +2727,9 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
+ struct intel_timeline *timeline;
bool ring_hung;
- /* Ensure irq handler finishes, and not run again. */
- tasklet_kill(&engine->irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2592,12 +2738,15 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+ if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
+ ring_hung = false;
+
i915_set_reset_status(request->ctx, ring_hung);
if (!ring_hung)
return;
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->fence.seqno);
+ engine->name, request->global_seqno);
/* Setup the CS to resume from the breadcrumb of the hung request */
engine->reset_hw(engine, request);
@@ -2614,21 +2763,28 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
- list_for_each_entry_continue(request, &engine->request_list, link)
+ list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
+
+ timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+ list_for_each_entry(request, &timeline->requests, link)
+ reset_request(request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
i915_gem_retire_requests(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
- i915_gem_restore_fences(&dev_priv->drm);
+ i915_gem_restore_fences(dev_priv);
if (dev_priv->gt.awake) {
intel_sanitize_gt_powersave(dev_priv);
@@ -2640,6 +2796,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ i915_gem_request_submit(request);
+ intel_engine_init_global_seqno(request->engine, request->global_seqno);
}
static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
@@ -2650,7 +2808,8 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
- intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+ intel_engine_init_global_seqno(engine,
+ intel_engine_last_submit(engine));
/*
* Clear the execlists queue up before freeing the requests, as those
@@ -2659,26 +2818,30 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
*/
if (i915.enable_execlists) {
- spin_lock(&engine->execlist_lock);
- INIT_LIST_HEAD(&engine->execlist_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
i915_gem_request_put(engine->execlist_port[0].request);
i915_gem_request_put(engine->execlist_port[1].request);
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- spin_unlock(&engine->execlist_lock);
- }
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
- engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ }
}
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
i915_gem_context_lost(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_cleanup_engine(engine);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
@@ -2717,12 +2880,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
return;
- if (READ_ONCE(dev_priv->gt.active_engines))
+ /*
+ * Wait for last execlists context complete, but bail out in case a
+ * new request is submitted.
+ */
+ wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
+ intel_execlists_idle(dev_priv), 10);
+
+ if (READ_ONCE(dev_priv->gt.active_requests))
return;
rearm_hangcheck =
@@ -2736,10 +2907,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
goto out_rearm;
}
- if (dev_priv->gt.active_engines)
+ /*
+ * New request retired after this work handler started, extend active
+ * period until next instance of the work.
+ */
+ if (work_pending(work))
+ goto out_unlock;
+
+ if (dev_priv->gt.active_requests)
goto out_unlock;
- for_each_engine(engine, dev_priv)
+ if (wait_for(intel_execlists_idle(dev_priv), 10))
+ DRM_ERROR("Timeout waiting for engines to idle\n");
+
+ for_each_engine(engine, dev_priv, id)
i915_gem_batch_pool_fini(&engine->batch_pool);
GEM_BUG_ON(!dev_priv->gt.awake);
@@ -2769,9 +2950,26 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
+
+ if (i915_gem_object_is_active(obj) &&
+ !i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_set_active_reference(obj);
+ i915_gem_object_get(obj);
+ }
mutex_unlock(&obj->base.dev->struct_mutex);
}
+static unsigned long to_wait_timeout(s64 timeout_ns)
+{
+ if (timeout_ns < 0)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ if (timeout_ns == 0)
+ return 0;
+
+ return nsecs_to_jiffies_timeout(timeout_ns);
+}
+
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @dev: drm device pointer
@@ -2800,10 +2998,9 @@ int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_gem_wait *args = data;
- struct intel_rps_client *rps = to_rps_client(file);
struct drm_i915_gem_object *obj;
- unsigned long active;
- int idx, ret = 0;
+ ktime_t start;
+ long ret;
if (args->flags != 0)
return -EINVAL;
@@ -2812,133 +3009,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!obj)
return -ENOENT;
- active = __I915_BO_ACTIVE(obj);
- for_each_active(active, idx) {
- s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
- ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
- I915_WAIT_INTERRUPTIBLE,
- timeout, rps);
- if (ret)
- break;
- }
+ start = ktime_get();
- i915_gem_object_put_unlocked(obj);
- return ret;
-}
-
-static void __i915_vma_iounmap(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_pinned(vma));
-
- if (vma->iomap == NULL)
- return;
-
- io_mapping_unmap(vma->iomap);
- vma->iomap = NULL;
-}
-
-int i915_vma_unbind(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
- int ret;
-
- /* First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- */
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
-
- /* When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- */
- __i915_vma_pin(vma);
-
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
- if (ret)
- break;
- }
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+ to_wait_timeout(args->timeout_ns),
+ to_rps_client(file));
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915_vma_is_active(vma));
- }
-
- if (i915_vma_is_pinned(vma))
- return -EBUSY;
-
- if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
-
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!obj->pages);
-
- if (i915_vma_is_map_and_fenceable(vma)) {
- /* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
-
- __i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
- }
-
- if (likely(!vma->vm->closed)) {
- trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
- }
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
-
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
- if (vma->pages != obj->pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
+ if (args->timeout_ns > 0) {
+ args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (args->timeout_ns < 0)
+ args->timeout_ns = 0;
}
- vma->pages = NULL;
-
- /* Since the unbound list is global, only move to that list if
- * no more VMAs exist. */
- if (--obj->bind_count == 0)
- list_move_tail(&obj->global_list,
- &to_i915(obj->base.dev)->mm.unbound_list);
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
-
-destroy:
- if (unlikely(i915_vma_is_closed(vma)))
- i915_vma_destroy(vma);
-
- return 0;
+ i915_gem_object_put(obj);
+ return ret;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags)
+static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
{
- struct intel_engine_cs *engine;
- int ret;
-
- for_each_engine(engine, dev_priv) {
- if (engine->last_context == NULL)
- continue;
+ int ret, i;
- ret = intel_engine_idle(engine, flags);
+ for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+ ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
if (ret)
return ret;
}
@@ -2946,187 +3039,45 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
return 0;
}
-static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
- struct drm_mm_node *gtt_space = &vma->node;
- struct drm_mm_node *other;
-
- /*
- * On some machines we have to be careful when putting differing types
- * of snoopable memory together to avoid the prefetcher crossing memory
- * domains and dying. During vm initialisation, we decide whether or not
- * these constraints apply and set the drm_mm.color_adjust
- * appropriately.
- */
- if (vma->vm->mm.color_adjust == NULL)
- return true;
-
- if (!drm_mm_node_allocated(gtt_space))
- return true;
-
- if (list_empty(&gtt_space->node_list))
- return true;
-
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
- return false;
-
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
- return false;
-
- return true;
-}
-
-/**
- * i915_vma_insert - finds a slot for the vma in its address space
- * @vma: the vma
- * @size: requested size in bytes (can be larger than the VMA)
- * @alignment: required alignment
- * @flags: mask of PIN_* flags to use
- *
- * First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
- *
- * Returns:
- * 0 on success, negative error code otherwise.
- */
-static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
- struct drm_i915_gem_object *obj = vma->obj;
- u64 start, end;
int ret;
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
-
- size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
-
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
+ if (flags & I915_WAIT_LOCKED) {
+ struct i915_gem_timeline *tl;
- start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+ lockdep_assert_held(&i915->drm.struct_mutex);
- end = vma->vm->total;
- if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
- if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
-
- /* If binding the object/GGTT view requires more space than the entire
- * aperture has, reject it early before evicting everything in a vain
- * attempt to find space.
- */
- if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
- end);
- return -E2BIG;
- }
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- return ret;
-
- i915_gem_object_pin_pages(obj);
-
- if (flags & PIN_OFFSET_FIXED) {
- u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
- ret = -EINVAL;
- goto err_unpin;
- }
-
- vma->node.start = offset;
- vma->node.size = size;
- vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret) {
- ret = i915_gem_evict_for_vma(vma);
- if (ret == 0)
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ ret = wait_for_timeline(tl, flags);
if (ret)
- goto err_unpin;
+ return ret;
}
} else {
- u32 search_flag, alloc_flag;
-
- if (flags & PIN_HIGH) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
- } else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
- }
-
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- if (alignment <= 4096)
- alignment = 0;
-
-search_free:
- ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
- &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(vma->vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
-
- goto err_unpin;
- }
+ ret = wait_for_timeline(&i915->gt.global_timeline, flags);
+ if (ret)
+ return ret;
}
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
-
- list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- obj->bind_count++;
return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
}
-bool
-i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- bool force)
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj->pages == NULL)
- return false;
+ if (!obj->mm.pages)
+ return;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen || obj->phys_handle)
- return false;
+ return;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@@ -3138,14 +3089,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
- return false;
+ return;
}
trace_i915_gem_object_clflush(obj);
- drm_clflush_sg(obj->pages);
+ drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
-
- return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3174,7 +3123,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
+ POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -3191,9 +3140,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
-
+ i915_gem_clflush_object(obj, obj->pin_display);
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
obj->base.write_domain = 0;
@@ -3202,24 +3149,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
I915_GEM_DOMAIN_CPU);
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- continue;
-
- if (i915_vma_is_active(vma))
- continue;
-
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- }
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
* @obj: object to act on
@@ -3234,7 +3163,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3249,7 +3185,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
@@ -3268,21 +3204,19 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
- obj->dirty = 1;
+ obj->mm.dirty = true;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
+ i915_gem_object_unpin_pages(obj);
return 0;
}
@@ -3305,10 +3239,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct i915_vma *vma;
- int ret = 0;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->cache_level == cache_level)
- goto out;
+ return 0;
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
@@ -3351,11 +3287,17 @@ restart:
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
+ if (!HAS_LLC(to_i915(obj->base.dev)) &&
+ cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
@@ -3397,20 +3339,14 @@ restart:
}
}
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
+ cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ obj->cache_dirty = true;
+
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
-out:
- /* Flush the dirty CPU caches to the backing storage so that the
- * object is now coherent at its new cache level (with respect
- * to the access domain).
- */
- if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
- if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
- }
-
return 0;
}
@@ -3419,10 +3355,14 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
+ int err = 0;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto out;
+ }
switch (obj->cache_level) {
case I915_CACHE_LLC:
@@ -3438,15 +3378,15 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
args->caching = I915_CACHING_NONE;
break;
}
-
- i915_gem_object_put_unlocked(obj);
- return 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
@@ -3463,23 +3403,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
return -ENODEV;
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
- level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto rpm_put;
+ return ret;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj) {
@@ -3488,13 +3426,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
}
ret = i915_gem_object_set_cache_level(obj, level);
-
i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
-rpm_put:
- intel_runtime_pm_put(dev_priv);
-
return ret;
}
@@ -3512,6 +3446,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
@@ -3527,7 +3463,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+ HAS_WT(to_i915(obj->base.dev)) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err_unpin_display;
@@ -3565,7 +3502,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- i915_gem_object_flush_cpu_write_domain(obj);
+ /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
+ if (obj->cache_dirty) {
+ i915_gem_clflush_object(obj, true);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ }
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3590,6 +3531,8 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+
if (WARN_ON(vma->obj->pin_display == 0))
return;
@@ -3617,7 +3560,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3639,7 +3589,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -3673,11 +3623,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
- int ret;
-
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
+ long ret;
/* ABI: return -EIO if already wedged */
if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -3704,103 +3650,12 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+ ret = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(target);
- return ret;
-}
-
-static bool
-i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- if (!drm_mm_node_allocated(&vma->node))
- return false;
-
- if (vma->node.size < size)
- return true;
-
- if (alignment && vma->node.start & (alignment - 1))
- return true;
-
- if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
- return true;
-
- if (flags & PIN_OFFSET_BIAS &&
- vma->node.start < (flags & PIN_OFFSET_MASK))
- return true;
-
- if (flags & PIN_OFFSET_FIXED &&
- vma->node.start != (flags & PIN_OFFSET_MASK))
- return true;
-
- return false;
-}
-
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
-
- /*
- * Explicitly disable for rotated VMA since the display does not
- * need the fence and the VMA is not accessible to other users.
- */
- if (mappable && fenceable &&
- vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
- vma->flags |= I915_VMA_CAN_FENCE;
- else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
-{
- unsigned int bound = vma->flags;
- int ret;
-
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
-
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err;
- }
-
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err;
- }
-
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
- if (ret)
- goto err;
-
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
-
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
-
-err:
- __i915_vma_unpin(vma);
- return ret;
+ return ret < 0 ? ret : 0;
}
struct i915_vma *
@@ -3810,10 +3665,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
- struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
if (IS_ERR(vma))
return vma;
@@ -3823,6 +3681,41 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
(i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
return ERR_PTR(-ENOSPC);
+ if (flags & PIN_MAPPABLE) {
+ u32 fence_size;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
+ i915_gem_object_get_tiling(obj));
+ /* If the required space is larger than the available
+ * aperture, we will not able to find a slot for the
+ * object and unbinding the object now will be in
+ * vain. Worse, doing so may cause us to ping-pong
+ * the object in and out of the Global GTT and
+ * waste a lot of cycles under the mutex.
+ */
+ if (fence_size > dev_priv->ggtt.mappable_end)
+ return ERR_PTR(-E2BIG);
+
+ /* If NONBLOCK is set the caller is optimistically
+ * trying to cache the full object within the mappable
+ * aperture, and *must* have a fallback in place for
+ * situations where we cannot bind the object. We
+ * can be a little more lax here and use the fallback
+ * more often to avoid costly migrations of ourselves
+ * and other objects within the aperture.
+ *
+ * Half-the-aperture is used as a simple heuristic.
+ * More interesting would to do search for a free
+ * block prior to making the commitment to unbind.
+ * That caters for the self-harm case, and with a
+ * little more heuristics (e.g. NOFAULT, NOEVICT)
+ * we could try to minimise harm to others.
+ */
+ if (flags & PIN_NONBLOCK &&
+ fence_size > dev_priv->ggtt.mappable_end / 2)
+ return ERR_PTR(-ENOSPC);
+ }
+
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
" offset=%08x, req.alignment=%llx,"
@@ -3869,83 +3762,42 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
}
static __always_inline unsigned int
-__busy_set_if_active(const struct i915_gem_active *active,
+__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
- struct drm_i915_gem_request *request;
-
- request = rcu_dereference(active->request);
- if (!request || i915_gem_request_completed(request))
- return 0;
+ struct drm_i915_gem_request *rq;
- /* This is racy. See __i915_gem_active_get_rcu() for an in detail
- * discussion of how to handle the race correctly, but for reporting
- * the busy state we err on the side of potentially reporting the
- * wrong engine as being busy (but we guarantee that the result
- * is at least self-consistent).
- *
- * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
- * whilst we are inspecting it, even under the RCU read lock as we are.
- * This means that there is a small window for the engine and/or the
- * seqno to have been overwritten. The seqno will always be in the
- * future compared to the intended, and so we know that if that
- * seqno is idle (on whatever engine) our request is idle and the
- * return 0 above is correct.
- *
- * The issue is that if the engine is switched, it is just as likely
- * to report that it is busy (but since the switch happened, we know
- * the request should be idle). So there is a small chance that a busy
- * result is actually the wrong engine.
- *
- * So why don't we care?
- *
- * For starters, the busy ioctl is a heuristic that is by definition
- * racy. Even with perfect serialisation in the driver, the hardware
- * state is constantly advancing - the state we report to the user
- * is stale.
- *
- * The critical information for the busy-ioctl is whether the object
- * is idle as userspace relies on that to detect whether its next
- * access will stall, or if it has missed submitting commands to
- * the hardware allowing the GPU to stall. We never generate a
- * false-positive for idleness, thus busy-ioctl is reliable at the
- * most fundamental level, and we maintain the guarantee that a
- * busy object left to itself will eventually become idle (and stay
- * idle!).
+ /* We have to check the current hw status of the fence as the uABI
+ * guarantees forward progress. We could rely on the idle worker
+ * to eventually flush us, but to minimise latency just ask the
+ * hardware.
*
- * We allow ourselves the leeway of potentially misreporting the busy
- * state because that is an optimisation heuristic that is constantly
- * in flux. Being quickly able to detect the busy/idle state is much
- * more important than accurate logging of exactly which engines were
- * busy.
- *
- * For accuracy in reporting the engine, we could use
- *
- * result = 0;
- * request = __i915_gem_active_get_rcu(active);
- * if (request) {
- * if (!i915_gem_request_completed(request))
- * result = flag(request->engine->exec_id);
- * i915_gem_request_put(request);
- * }
- *
- * but that still remains susceptible to both hardware and userspace
- * races. So we accept making the result of that race slightly worse,
- * given the rarity of the race and its low impact on the result.
+ * Note we only report on the status of native fences.
*/
- return flag(READ_ONCE(request->engine->exec_id));
+ if (!dma_fence_is_i915(fence))
+ return 0;
+
+ /* opencode to_request() in order to avoid const warnings */
+ rq = container_of(fence, struct drm_i915_gem_request, fence);
+ if (i915_gem_request_completed(rq))
+ return 0;
+
+ return flag(rq->engine->exec_id);
}
static __always_inline unsigned int
-busy_check_reader(const struct i915_gem_active *active)
+busy_check_reader(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_read_flag);
+ return __busy_set_if_active(fence, __busy_read_flag);
}
static __always_inline unsigned int
-busy_check_writer(const struct i915_gem_active *active)
+busy_check_writer(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_write_id);
+ if (!fence)
+ return 0;
+
+ return __busy_set_if_active(fence, __busy_write_id);
}
int
@@ -3954,64 +3806,58 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- unsigned long active;
+ struct reservation_object_list *list;
+ unsigned int seq;
+ int err;
- obj = i915_gem_object_lookup(file, args->handle);
+ err = -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
if (!obj)
- return -ENOENT;
+ goto out;
- args->busy = 0;
- active = __I915_BO_ACTIVE(obj);
- if (active) {
- int idx;
+ /* A discrepancy here is that we do not report the status of
+ * non-i915 fences, i.e. even though we may report the object as idle,
+ * a call to set-domain may still stall waiting for foreign rendering.
+ * This also means that wait-ioctl may report an object as busy,
+ * where busy-ioctl considers it idle.
+ *
+ * We trade the ability to warn of foreign fences to report on which
+ * i915 engines are active for the object.
+ *
+ * Alternatively, we can trade that extra information on read/write
+ * activity with
+ * args->busy =
+ * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * to report the overall busyness. This is what the wait-ioctl does.
+ *
+ */
+retry:
+ seq = raw_read_seqcount(&obj->resv->seq);
- /* Yes, the lookups are intentionally racy.
- *
- * First, we cannot simply rely on __I915_BO_ACTIVE. We have
- * to regard the value as stale and as our ABI guarantees
- * forward progress, we confirm the status of each active
- * request with the hardware.
- *
- * Even though we guard the pointer lookup by RCU, that only
- * guarantees that the pointer and its contents remain
- * dereferencable and does *not* mean that the request we
- * have is the same as the one being tracked by the object.
- *
- * Consider that we lookup the request just as it is being
- * retired and freed. We take a local copy of the pointer,
- * but before we add its engine into the busy set, the other
- * thread reallocates it and assigns it to a task on another
- * engine with a fresh and incomplete seqno. Guarding against
- * that requires careful serialisation and reference counting,
- * i.e. using __i915_gem_active_get_request_rcu(). We don't,
- * instead we expect that if the result is busy, which engines
- * are busy is not completely reliable - we only guarantee
- * that the object was busy.
- */
- rcu_read_lock();
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
- for_each_active(active, idx)
- args->busy |= busy_check_reader(&obj->last_read[idx]);
+ /* Translate shared fences to READ set of engines */
+ list = rcu_dereference(obj->resv->fence);
+ if (list) {
+ unsigned int shared_count = list->shared_count, i;
- /* For ABI sanity, we only care that the write engine is in
- * the set of read engines. This should be ensured by the
- * ordering of setting last_read/last_write in
- * i915_vma_move_to_active(), and then in reverse in retire.
- * However, for good measure, we always report the last_write
- * request as a busy read as well as being a busy write.
- *
- * We don't care that the set of active read/write engines
- * may change during construction of the result, as it is
- * equally liable to change before userspace can inspect
- * the result.
- */
- args->busy |= busy_check_writer(&obj->last_write);
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *fence =
+ rcu_dereference(list->shared[i]);
- rcu_read_unlock();
+ args->busy |= busy_check_reader(fence);
+ }
}
- i915_gem_object_put_unlocked(obj);
- return 0;
+ if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+ goto retry;
+
+ err = 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int
@@ -4028,7 +3874,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
- int ret;
+ int err;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4038,77 +3884,111 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
obj = i915_gem_object_lookup(file_priv, args->handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (!obj)
+ return -ENOENT;
- if (obj->pages &&
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ goto out;
+
+ if (obj->mm.pages &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (obj->madv == I915_MADV_WILLNEED)
- i915_gem_object_unpin_pages(obj);
- if (args->madv == I915_MADV_WILLNEED)
- i915_gem_object_pin_pages(obj);
+ if (obj->mm.madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (args->madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
- if (obj->madv != __I915_MADV_PURGED)
- obj->madv = args->madv;
+ if (obj->mm.madv != __I915_MADV_PURGED)
+ obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
+ if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
i915_gem_object_truncate(obj);
- args->retained = obj->madv != __I915_MADV_PURGED;
+ args->retained = obj->mm.madv != __I915_MADV_PURGED;
+ mutex_unlock(&obj->mm.lock);
+out:
i915_gem_object_put(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return err;
+}
+
+static void
+frontbuffer_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
- int i;
+ mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->global_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_request_active(&obj->last_read[i],
- i915_gem_object_retire__read);
- init_request_active(&obj->last_write,
- i915_gem_object_retire__write);
+ INIT_LIST_HEAD(&obj->global_link);
+ INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
+ reservation_object_init(&obj->__builtin_resv);
+ obj->resv = &obj->__builtin_resv;
+
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- obj->madv = I915_MADV_WILLNEED;
+ init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
+
+ obj->mm.madv = I915_MADV_WILLNEED;
+ INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_page.lock);
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size)
+/* Note we don't consider signbits :| */
+#define overflows_type(x, T) \
+ (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+
+struct drm_i915_gem_object *
+i915_gem_object_create(struct drm_device *dev, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
int ret;
+ /* There is a prevalence of the assumption that we fit the object's
+ * page count inside a 32bit _signed_ variable. Let's document this and
+ * catch if we ever need to fix it. In the meantime, if you do spot
+ * such a local variable, please consider fixing!
+ */
+ if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -4118,7 +3998,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
@@ -4132,7 +4012,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(dev)) {
+ if (HAS_LLC(dev_priv)) {
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -4155,7 +4035,6 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
fail:
i915_gem_object_free(obj);
-
return ERR_PTR(ret);
}
@@ -4167,7 +4046,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
* back the contents from the GPU.
*/
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return false;
if (obj->base.filp == NULL)
@@ -4183,16 +4062,72 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
return atomic_long_read(&obj->base.filp->f_count) == 1;
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+ struct llist_node *freed)
{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_vma *vma, *next;
+ struct drm_i915_gem_object *obj, *on;
- intel_runtime_pm_get(dev_priv);
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
+ llist_for_each_entry(obj, freed, freed) {
+ struct i915_vma *vma, *vn;
+
+ trace_i915_gem_object_destroy(obj);
+
+ GEM_BUG_ON(i915_gem_object_is_active(obj));
+ list_for_each_entry_safe(vma, vn,
+ &obj->vma_list, obj_link) {
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ i915_vma_close(vma);
+ }
+ GEM_BUG_ON(!list_empty(&obj->vma_list));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
+
+ list_del(&obj->global_link);
+ }
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
- trace_i915_gem_object_destroy(obj);
+ llist_for_each_entry_safe(obj, on, freed, freed) {
+ GEM_BUG_ON(obj->bind_count);
+ GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
+ if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+ atomic_set(&obj->mm.pages_pin_count, 0);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ GEM_BUG_ON(obj->mm.pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
+ reservation_object_fini(&obj->__builtin_resv);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(i915, obj->base.size);
+
+ kfree(obj->bit_17);
+ i915_gem_object_free(obj);
+ }
+}
+
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
+{
+ struct llist_node *freed;
+
+ freed = llist_del_all(&i915->mm.free_list);
+ if (unlikely(freed))
+ __i915_gem_free_objects(i915, freed);
+}
+
+static void __i915_gem_free_work(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, struct drm_i915_private, mm.free_work);
+ struct llist_node *freed;
/* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
@@ -4201,47 +4136,62 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* the GTT either for the user or for scanout). Those VMA still need to
* unbound now.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_close(vma);
- }
- GEM_BUG_ON(obj->bind_count);
- /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
- * before progressing. */
- if (obj->stolen)
- i915_gem_object_unpin_pages(obj);
+ while ((freed = llist_del_all(&i915->mm.free_list)))
+ __i915_gem_free_objects(i915, freed);
+}
- WARN_ON(atomic_read(&obj->frontbuffer_bits));
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
- i915_gem_object_is_tiled(obj))
- i915_gem_object_unpin_pages(obj);
+ /* We can't simply use call_rcu() from i915_gem_free_object()
+ * as we need to block whilst unbinding, and the call_rcu
+ * task may be called from softirq context. So we take a
+ * detour through a worker.
+ */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ schedule_work(&i915->mm.free_work);
+}
- if (WARN_ON(obj->pages_pin_count))
- obj->pages_pin_count = 0;
- if (discard_backing_storage(obj))
- obj->madv = I915_MADV_DONTNEED;
- i915_gem_object_put_pages(obj);
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- BUG_ON(obj->pages);
+ if (obj->mm.quirked)
+ __i915_gem_object_unpin_pages(obj);
- if (obj->base.import_attach)
- drm_prime_gem_destroy(&obj->base, NULL);
+ if (discard_backing_storage(obj))
+ obj->mm.madv = I915_MADV_DONTNEED;
- if (obj->ops->release)
- obj->ops->release(obj);
+ /* Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
+ */
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev_priv, obj->base.size);
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
- kfree(obj->bit_17);
- i915_gem_object_free(obj);
+ GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+ if (i915_gem_object_is_active(obj))
+ i915_gem_object_set_active_reference(obj);
+ else
+ i915_gem_object_put(obj);
+}
- intel_runtime_pm_put(dev_priv);
+static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
}
int i915_gem_suspend(struct drm_device *dev)
@@ -4272,18 +4222,46 @@ int i915_gem_suspend(struct drm_device *dev)
goto err;
i915_gem_retire_requests(dev_priv);
+ GEM_BUG_ON(dev_priv->gt.active_requests);
+ assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
flush_delayed_work(&dev_priv->gt.idle_work);
+ flush_work(&dev_priv->mm.free_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
+ WARN_ON(!intel_execlists_idle(dev_priv));
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload and suspend. Afterwards we then
+ * clean up the GEM state tracking, flushing off the requests and
+ * leaving the system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that resetting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+ if (HAS_HW_CONTEXTS(dev_priv)) {
+ int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
return 0;
@@ -4296,8 +4274,10 @@ void i915_gem_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(dev_priv->gt.awake);
+
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
+ i915_gem_restore_gtt_mappings(dev_priv);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
@@ -4308,55 +4288,51 @@ void i915_gem_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void i915_gem_init_swizzling(struct drm_device *dev)
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen < 5 ||
+ if (INTEL_GEN(dev_priv) < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev_priv))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
-static void init_unused_ring(struct drm_device *dev, u32 base)
+static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
-static void init_unused_rings(struct drm_device *dev)
-{
- if (IS_I830(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- init_unused_ring(dev, SRB2_BASE);
- init_unused_ring(dev, SRB3_BASE);
- } else if (IS_GEN2(dev)) {
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- } else if (IS_GEN3(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, PRB2_BASE);
+static void init_unused_rings(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ init_unused_ring(dev_priv, SRB2_BASE);
+ init_unused_ring(dev_priv, SRB3_BASE);
+ } else if (IS_GEN2(dev_priv)) {
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ } else if (IS_GEN3(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, PRB2_BASE);
}
}
@@ -4365,31 +4341,34 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
+ dev_priv->gt.last_init_time = ktime_get();
+
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
+ if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HASWELL(dev))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ if (IS_HASWELL(dev_priv))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- if (HAS_PCH_NOP(dev)) {
- if (IS_IVYBRIDGE(dev)) {
+ if (HAS_PCH_NOP(dev_priv)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
}
}
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
/*
* At least 830 can leave some of the unused rings
@@ -4397,18 +4376,18 @@ i915_gem_init_hw(struct drm_device *dev)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
- init_unused_rings(dev);
+ init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
DRM_ERROR("PPGTT enable HW failed %d\n", ret);
goto out;
}
/* Need to do basic initialisation of all rings first: */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
ret = engine->init_hw(engine);
if (ret)
goto out;
@@ -4507,21 +4486,15 @@ i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
dev_priv->gt.cleanup_engine(engine);
}
-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
- INIT_LIST_HEAD(&engine->request_list);
-}
-
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
@@ -4545,41 +4518,52 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
fence->id = i;
list_add_tail(&fence->link, &dev_priv->mm.fence_list);
}
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- i915_gem_detect_bit_6_swizzle(dev);
+ i915_gem_detect_bit_6_swizzle(dev_priv);
}
-void
+int
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ int err = -ENOMEM;
+
+ dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->objects)
+ goto err_out;
- dev_priv->objects =
- kmem_cache_create("i915_gem_object",
- sizeof(struct drm_i915_gem_object), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->vmas =
- kmem_cache_create("i915_gem_vma",
- sizeof(struct i915_vma), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->requests =
- kmem_cache_create("i915_gem_request",
- sizeof(struct drm_i915_gem_request), 0,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU,
- NULL);
+ dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->vmas)
+ goto err_objects;
+
+ dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_DESTROY_BY_RCU);
+ if (!dev_priv->requests)
+ goto err_vmas;
+
+ dev_priv->dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!dev_priv->dependencies)
+ goto err_requests;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ INIT_LIST_HEAD(&dev_priv->gt.timelines);
+ err = i915_gem_timeline_init__global(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (err)
+ goto err_dependencies;
INIT_LIST_HEAD(&dev_priv->context_list);
+ INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+ init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_engine_lists(&dev_priv->engine[i]);
+ INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4596,12 +4580,33 @@ i915_gem_load_init(struct drm_device *dev)
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
+
+ return 0;
+
+err_dependencies:
+ kmem_cache_destroy(dev_priv->dependencies);
+err_requests:
+ kmem_cache_destroy(dev_priv->requests);
+err_vmas:
+ kmem_cache_destroy(dev_priv->vmas);
+err_objects:
+ kmem_cache_destroy(dev_priv->objects);
+err_out:
+ return err;
}
void i915_gem_load_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
+ WARN_ON(!list_empty(&dev_priv->gt.timelines));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
@@ -4650,7 +4655,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, global_list) {
+ list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -4686,7 +4691,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ DRM_DEBUG("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
@@ -4742,21 +4747,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
-{
- struct page *page;
-
- /* Only default objects have per-page dirty tracking */
- if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
- return NULL;
-
- page = i915_gem_object_get_page(obj, n);
- set_page_dirty(page);
- return page;
-}
-
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
@@ -4775,14 +4765,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
if (ret)
goto fail;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto fail;
- i915_gem_object_pin_pages(obj);
- sg = obj->pages;
+ sg = obj->mm.pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
- obj->dirty = 1; /* Backing store is now out of date */
+ obj->mm.dirty = true; /* Backing store is now out of date */
i915_gem_object_unpin_pages(obj);
if (WARN_ON(bytes != size)) {
@@ -4797,3 +4786,156 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(ret);
}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+ GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ unsigned long exception, i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ exception =
+ RADIX_TREE_EXCEPTIONAL_ENTRY |
+ idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i,
+ (void *)exception);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radixtree will contain an exceptional entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(radix_tree_exception(sg))) {
+ unsigned long base =
+ (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 8292e797d9b5..51ec793f2e20 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,9 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#else
-#define GEM_BUG_ON(expr)
+#define GEM_BUG_ON(expr) do { } while (0)
#endif
+#define I915_NUM_ENGINES 5
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index ed989596d9a3..b3bc119ec1bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
@@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
- struct drm_i915_gem_object *tmp, *next;
+ struct drm_i915_gem_object *tmp;
struct list_head *list;
- int n;
+ int n, ret;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
@@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
- list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+ list_for_each_entry(tmp, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
- if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
- &tmp->base.dev->struct_mutex))
+ if (i915_gem_object_is_active(tmp))
break;
- /* While we're looping, do some clean up */
- if (tmp->madv == __I915_MADV_PURGED) {
- list_del(&tmp->batch_pool_link);
- i915_gem_object_put(tmp);
- continue;
- }
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
+ true));
if (tmp->base.size >= size) {
+ /* Clear the set of shared fences early */
+ ww_mutex_lock(&tmp->resv->lock, NULL);
+ reservation_object_add_excl_fence(tmp->resv, NULL);
+ ww_mutex_unlock(&tmp->resv->lock);
+
obj = tmp;
break;
}
}
if (obj == NULL) {
- int ret;
-
- obj = i915_gem_object_create(&pool->engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(pool->engine->i915, size);
if (IS_ERR(obj))
return obj;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- return ERR_PTR(ret);
-
- obj->madv = I915_MADV_DONTNEED;
}
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
list_move_tail(&obj->batch_pool_link, list);
- i915_gem_object_pin_pages(obj);
return obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e95736..1f94b8d6d83d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring)
intel_ring_free(ce->ring);
- i915_vma_put(ce->state);
+ __i915_gem_object_release_unless_active(ce->state->obj);
}
+ kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
@@ -192,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(to_i915(dev))) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev,
}
/* Default context will never have a file_priv */
- if (file_priv != NULL) {
+ ret = DEFAULT_CONTEXT_HANDLE;
+ if (file_priv) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
- } else
- ret = DEFAULT_CONTEXT_HANDLE;
+ }
+ ctx->user_handle = ret;
ctx->file_priv = file_priv;
- if (file_priv)
+ if (file_priv) {
ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
+ current->comm,
+ pid_nr(ctx->pid),
+ ctx->user_handle);
+ if (!ctx->name) {
+ ret = -ENOMEM;
+ goto err_pid;
+ }
+ }
- ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev,
return ctx;
+err_pid:
+ put_pid(ctx->pid);
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
context_close(ctx);
return ERR_PTR(ret);
@@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
if (USES_FULL_PPGTT(dev)) {
- struct i915_hw_ppgtt *ppgtt =
- i915_ppgtt_create(to_i915(dev), file_priv);
+ struct i915_hw_ppgtt *ppgtt;
+ ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -463,6 +476,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
+ ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -474,10 +488,11 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
@@ -492,13 +507,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915_gem_context_is_default(ctx))
continue;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
@@ -563,6 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
+ enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -605,7 +621,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -634,7 +650,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -749,12 +765,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false;
}
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags)
+{
+ struct i915_vma *vma = ctx->engine[RCS].state;
+ int ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return vma;
+}
+
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_vma *vma = to->engine[RCS].state;
+ struct i915_vma *vma;
struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
@@ -762,17 +802,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- /* Clear this page out of any CPU caches for coherent swap-in/out. */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (ret)
- return ret;
- }
-
/* Trying to pin first makes error handling easier. */
- ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
- if (ret)
- return ret;
+ vma = i915_gem_context_pin_legacy(to, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
/*
* Pin can switch back to the default context if we end up calling into
@@ -929,21 +962,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ struct i915_gem_timeline *timeline;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
- if (engine->last_context == NULL)
- continue;
-
- if (engine->last_context == dev_priv->kernel_context)
- continue;
-
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Queue this switch after all other activity */
+ list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ struct drm_i915_gem_request *prev;
+ struct intel_timeline *tl;
+
+ tl = &timeline->engine[engine->id];
+ prev = i915_gem_active_raw(&tl->last_request,
+ &dev_priv->drm.struct_mutex);
+ if (prev)
+ i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ &prev->submit,
+ GFP_KERNEL);
+ }
+
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 97c9d68b45df..5e38299b5df6 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
- ret = i915_mutex_lock_interruptible(obj->base.dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto err_unlock;
-
- i915_gem_object_pin_pages(obj);
-
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
- goto err_unpin;
+ goto err_unpin_pages;
}
- ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
- src = obj->pages->sgl;
+ src = obj->mm.pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->pages->nents; i++) {
+ for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- ret =-ENOMEM;
+ ret = -ENOMEM;
goto err_free_sg;
}
- mutex_unlock(&obj->base.dev->struct_mutex);
return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
-err_unpin:
+err_unpin_pages:
i915_gem_object_unpin_pages(obj);
-err_unlock:
- mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
}
@@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
sg_free_table(sg);
kfree(sg);
- mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
- mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- void *addr;
- int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
- addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- mutex_unlock(&dev->struct_mutex);
-
- return addr;
+ return i915_gem_object_pin_map(obj, I915_MAP_WB);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin_map(obj);
- mutex_unlock(&dev->struct_mutex);
}
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
- ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex);
- return ret;
+
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex);
- return ret;
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static const struct dma_buf_ops i915_dmabuf_ops = {
@@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-static void export_fences(struct drm_i915_gem_object *obj,
- struct dma_buf *dma_buf)
-{
- struct reservation_object *resv = dma_buf->resv;
- struct drm_i915_gem_request *req;
- unsigned long active;
- int idx;
-
- active = __I915_BO_ACTIVE(obj);
- if (!active)
- return;
-
- /* Serialise with execbuf to prevent concurrent fence-loops */
- mutex_lock(&obj->base.dev->struct_mutex);
-
- /* Mark the object for future fences before racily adding old fences */
- obj->base.dma_buf = dma_buf;
-
- ww_mutex_lock(&resv->lock, NULL);
-
- for_each_active(active, idx) {
- req = i915_gem_active_get(&obj->last_read[idx],
- &obj->base.dev->struct_mutex);
- if (!req)
- continue;
-
- if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &req->fence);
-
- i915_gem_request_put(req);
- }
-
- req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
- if (req) {
- reservation_object_add_excl_fence(resv, &req->fence);
- i915_gem_request_put(req);
- }
-
- ww_mutex_unlock(&resv->lock);
- mutex_unlock(&obj->base.dev->struct_mutex);
-}
-
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
+ exp_info.resv = obj->resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
- if (IS_ERR(dma_buf))
- return dma_buf;
-
- export_fences(obj, dma_buf);
- return dma_buf;
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- struct sg_table *sg;
-
- sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg))
- return PTR_ERR(sg);
-
- obj->pages = sg;
- return 0;
+ return dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
}
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- dma_buf_unmap_attachment(obj->base.import_attach,
- obj->pages, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(obj->base.import_attach, pages,
+ DMA_BIDIRECTIONAL);
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
@@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
+ obj->resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5b6f81c1dbca..bd08814b015c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,13 +33,17 @@
#include "intel_drv.h"
#include "i915_trace.h"
-static bool
-gpu_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_timeline *tl;
- for_each_engine(engine, dev_priv) {
- if (intel_engine_is_active(engine))
+ tl = &ggtt->base.timeline.engine[engine->id];
+ if (i915_gem_active_isset(&tl->last_request))
return false;
}
@@ -55,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
- if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+ if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false;
list_add(&vma->exec_list, unwind);
@@ -102,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *vma, *next;
int ret;
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -152,7 +157,7 @@ search_again:
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
- if (gpu_is_idle(dev_priv)) {
+ if (ggtt_is_idle(dev_priv)) {
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
@@ -212,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
{
struct drm_mm_node *node, *next;
+ lockdep_assert_held(&target->vm->dev->struct_mutex);
+
list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list,
node_list) {
@@ -265,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
- WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a218c2e395e7..097d9d8c2315 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,7 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -288,7 +287,7 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
if (DBG_USE_CPU_RELOC)
return DBG_USE_CPU_RELOC > 0;
- return (HAS_LLC(obj->base.dev) ||
+ return (HAS_LLC(to_i915(obj->base.dev)) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->page = -1;
cache->vaddr = 0;
cache->i915 = i915;
- cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+ /* Must be a variable in the struct to allow GCC to unroll. */
+ cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->node.allocated = false;
}
@@ -370,8 +370,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
ggtt->base.clear_range(&ggtt->base,
cache->node.start,
- cache->node.size,
- true);
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -419,17 +418,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
unsigned long offset;
void *vaddr;
- if (cache->node.allocated) {
- wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- cache->node.start, I915_CACHE_NONE, 0);
- cache->page = page;
- return unmask_page(cache->vaddr);
- }
-
if (cache->vaddr) {
- io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int ret;
@@ -467,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
+ wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -474,7 +465,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
- vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+ vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@@ -552,27 +543,13 @@ repeat:
return 0;
}
-static bool object_is_idle(struct drm_i915_gem_object *obj)
-{
- unsigned long active = i915_gem_object_get_active(obj);
- int idx;
-
- for_each_active(active, idx) {
- if (!i915_gem_active_is_idle(&obj->last_read[idx],
- &obj->base.dev->struct_mutex))
- return false;
- }
-
- return true;
-}
-
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
@@ -591,7 +568,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
+ if (unlikely(IS_GEN6(dev_priv) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
PIN_GLOBAL);
@@ -649,10 +626,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL;
}
- /* We can't wait for rendering with pagefaults disabled */
- if (pagefault_disabled() && !object_is_idle(obj))
- return -EFAULT;
-
ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
@@ -679,12 +652,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
remain = entry->relocation_count;
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
- int count = remain;
- if (count > ARRAY_SIZE(stack_reloc))
- count = ARRAY_SIZE(stack_reloc);
+ unsigned long unwritten;
+ unsigned int count;
+
+ count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
remain -= count;
- if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+ /* This is the fast path and we cannot handle a pagefault
+ * whilst holding the struct mutex lest the user pass in the
+ * relocations contained within a mmaped bo. For in such a case
+ * we, the page fault handler would call i915_gem_fault() and
+ * we would try to acquire the struct mutex again. Obviously
+ * this is bad and so lockdep complains vehemently.
+ */
+ pagefault_disable();
+ unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
+ pagefault_enable();
+ if (unlikely(unwritten)) {
ret = -EFAULT;
goto out;
}
@@ -696,11 +680,26 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
if (ret)
goto out;
- if (r->presumed_offset != offset &&
- __put_user(r->presumed_offset,
- &user_relocs->presumed_offset)) {
- ret = -EFAULT;
- goto out;
+ if (r->presumed_offset != offset) {
+ pagefault_disable();
+ unwritten = __put_user(r->presumed_offset,
+ &user_relocs->presumed_offset);
+ pagefault_enable();
+ if (unlikely(unwritten)) {
+ /* Note that reporting an error now
+ * leaves everything in an inconsistent
+ * state as we have *already* changed
+ * the relocation value inside the
+ * object. As we have not changed the
+ * reloc.presumed_offset or will not
+ * change the execobject.offset, on the
+ * call we may not rewrite the value
+ * inside the object, leaving it
+ * dangling and causing a GPU hang.
+ */
+ ret = -EFAULT;
+ goto out;
+ }
}
user_relocs++;
@@ -740,20 +739,11 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
struct i915_vma *vma;
int ret = 0;
- /* This is the fast path and we cannot handle a pagefault whilst
- * holding the struct mutex lest the user pass in the relocations
- * contained within a mmaped bo. For in such a case we, the page
- * fault handler would call i915_gem_fault() and we would try to
- * acquire the struct mutex again. Obviously this is bad and so
- * lockdep complains vehemently.
- */
- pagefault_disable();
list_for_each_entry(vma, &eb->vmas, exec_list) {
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
- pagefault_enable();
return ret;
}
@@ -843,7 +833,7 @@ need_reloc_mappable(struct i915_vma *vma)
return false;
/* See also use_cpu_reloc() */
- if (HAS_LLC(vma->obj->base.dev))
+ if (HAS_LLC(to_i915(vma->obj->base.dev)))
return false;
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
@@ -1111,44 +1101,20 @@ err:
return ret;
}
-static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
-{
- unsigned int mask;
-
- mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
- mask <<= I915_BO_ACTIVE_SHIFT;
-
- return mask;
-}
-
static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- struct reservation_object *resv;
- if (obj->flags & other_rings) {
- ret = i915_gem_request_await_object
- (req, obj, obj->base.pending_write_domain);
- if (ret)
- return ret;
- }
-
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- ret = i915_sw_fence_await_reservation
- (&req->submit, resv, &i915_fence_ops,
- obj->base.pending_write_domain, 10*HZ,
- GFP_KERNEL | __GFP_NOWARN);
- if (ret < 0)
- return ret;
- }
+ ret = i915_gem_request_await_object
+ (req, obj, obj->base.pending_write_domain);
+ if (ret)
+ return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
@@ -1296,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- obj->dirty = 1; /* be paranoid */
-
/* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list,
@@ -1305,15 +1269,15 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!i915_gem_object_is_active(obj))
- i915_gem_object_get(obj);
- i915_gem_object_set_active(obj, idx);
- i915_gem_active_set(&obj->last_read[idx], req);
+ if (!i915_vma_is_active(vma))
+ obj->active_count++;
+ i915_vma_set_active(vma, idx);
+ i915_gem_active_set(&vma->last_read[idx], req);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
- i915_gem_active_set(&obj->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, req);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1323,21 +1287,13 @@ void i915_vma_move_to_active(struct i915_vma *vma,
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
-
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], req);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void eb_export_fence(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
unsigned int flags)
{
- struct reservation_object *resv;
-
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (!resv)
- return;
+ struct reservation_object *resv = obj->resv;
/* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
@@ -1607,12 +1563,12 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
- engine = &dev_priv->engine[_VCS(bsd_idx)];
+ engine = dev_priv->engine[_VCS(bsd_idx)];
} else {
- engine = &dev_priv->engine[user_ring_map[user_ring_id]];
+ engine = dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_engine_initialized(engine)) {
+ if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL;
}
@@ -1667,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
- if (!HAS_RESOURCE_STREAMER(dev)) {
+ if (!HAS_RESOURCE_STREAMER(dev_priv)) {
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
@@ -1921,7 +1877,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(to_i915(dev)) < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 2c7ba0ee127c..0efa3571afc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -343,6 +343,9 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ /* Note that we revoke fences on runtime suspend. Therefore the user
+ * must keep the device awake whilst using the fence.
+ */
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
/* Just update our place in the LRU if our fence is getting reused. */
@@ -365,22 +368,16 @@ i915_vma_get_fence(struct i915_vma *vma)
/**
* i915_gem_restore_fences - restore fence state
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
- * after a gpu reset and on resume.
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
- /* Note that this may be called outside of struct_mutex, by
- * runtime suspend/resume. The barrier we require is enforced by
- * rpm itself - all access to fences/GTT are only within an rpm
- * wakeref, and to acquire that wakeref you must pass through here.
- */
-
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
@@ -391,7 +388,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(vma->obj->fault_mappable);
+ GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
@@ -453,19 +450,18 @@ void i915_gem_restore_fences(struct drm_device *dev)
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@@ -475,7 +471,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
@@ -504,19 +500,20 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN5(dev)) {
+ } else if (IS_GEN5(dev_priv)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
+ !IS_G33(dev_priv))) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@@ -554,7 +551,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev) &&
+ if (IS_GEN4(dev_priv) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -645,6 +642,7 @@ i915_gem_swizzle_page(struct page *page)
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
@@ -655,7 +653,8 @@ i915_gem_swizzle_page(struct page *page)
* by swapping them out and back in again).
*/
void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -665,10 +664,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return;
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj->bit_17) != 0)) {
+ if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
@@ -679,17 +677,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
+ const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter;
struct page *page;
- int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
@@ -704,7 +704,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
new file mode 100644
index 000000000000..22c4a2d01adf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_FENCE_REG_H__
+#define __I915_FENCE_REG_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct i915_vma;
+
+struct drm_i915_fence_reg {
+ struct list_head link;
+ struct drm_i915_private *i915;
+ struct i915_vma *vma;
+ int pin_count;
+ int id;
+ /**
+ * Whether the tiling parameters for the currently
+ * associated fence register have changed. Note that
+ * for the purposes of tracking tiling changes we also
+ * treat the unfenced register, the register slot that
+ * the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ bool dirty;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0bb4232f66bc..b4bde1452f2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,7 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
@@ -95,13 +96,6 @@
*
*/
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
-}
-
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -175,7 +169,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{
u32 pte_flags = 0;
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
@@ -191,15 +185,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
- vma->size,
- true);
+ vma->size);
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+ enum i915_cache_level level)
{
- gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
pte |= addr;
switch (level) {
@@ -234,9 +226,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -256,9 +248,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -280,9 +272,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags)
+ u32 flags)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -296,9 +288,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -309,9 +301,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -328,10 +320,10 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-static int __setup_page_dma(struct drm_device *dev,
+static int __setup_page_dma(struct drm_i915_private *dev_priv,
struct i915_page_dma *p, gfp_t flags)
{
- struct device *kdev = &dev->pdev->dev;
+ struct device *kdev = &dev_priv->drm.pdev->dev;
p->page = alloc_page(flags);
if (!p->page)
@@ -348,14 +340,16 @@ static int __setup_page_dma(struct drm_device *dev,
return 0;
}
-static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static int setup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- return __setup_page_dma(dev, p, I915_GFP_DMA);
+ return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
}
-static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static void cleanup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
if (WARN_ON(!p->page))
return;
@@ -373,27 +367,29 @@ static void *kmap_page_dma(struct i915_page_dma *p)
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
-static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+#define kunmap_px(ppgtt, vaddr) \
+ kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
-#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
-#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
-#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
-#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
+#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
+#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
+#define fill32_px(dev_priv, px, v) \
+ fill_page_dma_32((dev_priv), px_base(px), (v))
-static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
- const uint64_t val)
+static void fill_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);
@@ -401,38 +397,37 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
for (i = 0; i < 512; i++)
vaddr[i] = val;
- kunmap_page_dma(dev, vaddr);
+ kunmap_page_dma(dev_priv, vaddr);
}
-static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
- const uint32_t val32)
+static void fill_page_dma_32(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint32_t val32)
{
uint64_t v = val32;
v = v << 32 | val32;
- fill_page_dma(dev, p, v);
+ fill_page_dma(dev_priv, p, v);
}
static int
-setup_scratch_page(struct drm_device *dev,
+setup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch,
gfp_t gfp)
{
- return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
+ return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
}
-static void cleanup_scratch_page(struct drm_device *dev,
+static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch)
{
- cleanup_page_dma(dev, scratch);
+ cleanup_page_dma(dev_priv, scratch);
}
-static struct i915_page_table *alloc_pt(struct drm_device *dev)
+static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
{
struct i915_page_table *pt;
- const size_t count = INTEL_INFO(dev)->gen >= 8 ?
- GEN8_PTES : GEN6_PTES;
+ const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
@@ -445,7 +440,7 @@ static struct i915_page_table *alloc_pt(struct drm_device *dev)
if (!pt->used_ptes)
goto fail_bitmap;
- ret = setup_px(dev, pt);
+ ret = setup_px(dev_priv, pt);
if (ret)
goto fail_page_m;
@@ -459,9 +454,10 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
+static void free_pt(struct drm_i915_private *dev_priv,
+ struct i915_page_table *pt)
{
- cleanup_px(dev, pt);
+ cleanup_px(dev_priv, pt);
kfree(pt->used_ptes);
kfree(pt);
}
@@ -472,9 +468,9 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
gen8_pte_t scratch_pte;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
- fill_px(vm->dev, pt, scratch_pte);
+ fill_px(to_i915(vm->dev), pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -485,12 +481,12 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
WARN_ON(vm->scratch_page.daddr == 0);
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
- fill32_px(vm->dev, pt, scratch_pte);
+ fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
-static struct i915_page_directory *alloc_pd(struct drm_device *dev)
+static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
@@ -504,7 +500,7 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev)
if (!pd->used_pdes)
goto fail_bitmap;
- ret = setup_px(dev, pd);
+ ret = setup_px(dev_priv, pd);
if (ret)
goto fail_page_m;
@@ -518,10 +514,11 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+static void free_pd(struct drm_i915_private *dev_priv,
+ struct i915_page_directory *pd)
{
if (px_page(pd)) {
- cleanup_px(dev, pd);
+ cleanup_px(dev_priv, pd);
kfree(pd->used_pdes);
kfree(pd);
}
@@ -534,13 +531,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
- fill_px(vm->dev, pd, scratch_pde);
+ fill_px(to_i915(vm->dev), pd, scratch_pde);
}
-static int __pdp_init(struct drm_device *dev,
+static int __pdp_init(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
- size_t pdpes = I915_PDPES_PER_PDP(dev);
+ size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
sizeof(unsigned long),
@@ -569,22 +566,22 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
}
static struct
-i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
return ERR_PTR(-ENOMEM);
- ret = __pdp_init(dev, pdp);
+ ret = __pdp_init(dev_priv, pdp);
if (ret)
goto fail_bitmap;
- ret = setup_px(dev, pdp);
+ ret = setup_px(dev_priv, pdp);
if (ret)
goto fail_page_m;
@@ -598,12 +595,12 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pdp(struct drm_device *dev,
+static void free_pdp(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
- if (USES_FULL_48BIT_PPGTT(dev)) {
- cleanup_px(dev, pdp);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ cleanup_px(dev_priv, pdp);
kfree(pdp);
}
}
@@ -615,7 +612,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(vm->dev, pdp, scratch_pdpe);
+ fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -626,7 +623,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
- fill_px(vm->dev, pml4, scratch_pml4e);
+ fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
}
static void
@@ -637,7 +634,7 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pdpe_t *page_directorypo;
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
return;
page_directorypo = kmap_px(pdp);
@@ -653,7 +650,7 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
- WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_px(ppgtt, pagemap);
}
@@ -706,85 +703,172 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- uint64_t start,
- uint64_t length,
- gen8_pte_t scratch_pte)
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
+}
+
+/* Removes entries from a single page table, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries.
+ */
+static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ unsigned int num_entries = gen8_pte_count(start, length);
+ unsigned int pte = gen8_pte_index(start);
+ unsigned int pte_end = pte + num_entries;
gen8_pte_t *pt_vaddr;
- unsigned pdpe = gen8_pdpe_index(start);
- unsigned pde = gen8_pde_index(start);
- unsigned pte = gen8_pte_index(start);
- unsigned num_entries = length >> PAGE_SHIFT;
- unsigned last_pte, i;
+ gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_LLC);
- if (WARN_ON(!pdp))
- return;
+ if (WARN_ON(!px_page(pt)))
+ return false;
- while (num_entries) {
- struct i915_page_directory *pd;
- struct i915_page_table *pt;
+ GEM_BUG_ON(pte_end > GEN8_PTES);
- if (WARN_ON(!pdp->page_directory[pdpe]))
- break;
+ bitmap_clear(pt->used_ptes, pte, num_entries);
+
+ if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
+ free_pt(to_i915(vm->dev), pt);
+ return true;
+ }
- pd = pdp->page_directory[pdpe];
+ pt_vaddr = kmap_px(pt);
+ while (pte < pte_end)
+ pt_vaddr[pte++] = scratch_pte;
+
+ kunmap_px(ppgtt, pt_vaddr);
+
+ return false;
+}
+
+/* Removes entries from a single page dir, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_table *pt;
+ uint64_t pde;
+ gen8_pde_t *pde_vaddr;
+ gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
+ I915_CACHE_LLC);
+
+ gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
- pt = pd->page_table[pde];
+ if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
+ __clear_bit(pde, pd->used_pdes);
+ pde_vaddr = kmap_px(pd);
+ pde_vaddr[pde] = scratch_pde;
+ kunmap_px(ppgtt, pde_vaddr);
+ }
+ }
- if (WARN_ON(!px_page(pt)))
- break;
+ if (bitmap_empty(pd->used_pdes, I915_PDES)) {
+ free_pd(to_i915(vm->dev), pd);
+ return true;
+ }
- last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES)
- last_pte = GEN8_PTES;
+ return false;
+}
- pt_vaddr = kmap_px(pt);
+/* Removes entries from a single page dir pointer, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+ struct i915_page_directory_pointer *pdp,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_page_directory *pd;
+ uint64_t pdpe;
+ gen8_ppgtt_pdpe_t *pdpe_vaddr;
+ gen8_ppgtt_pdpe_t scratch_pdpe =
+ gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- for (i = pte; i < last_pte; i++) {
- pt_vaddr[i] = scratch_pte;
- num_entries--;
+ gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+ if (WARN_ON(!pdp->page_directory[pdpe]))
+ break;
+
+ if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
+ __clear_bit(pdpe, pdp->used_pdpes);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ pdpe_vaddr = kmap_px(pdp);
+ pdpe_vaddr[pdpe] = scratch_pdpe;
+ kunmap_px(ppgtt, pdpe_vaddr);
+ }
}
+ }
- kunmap_px(ppgtt, pt_vaddr);
+ mark_tlbs_dirty(ppgtt);
- pte = 0;
- if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
- break;
- pde = 0;
- }
+ if (USES_FULL_48BIT_PPGTT(dev_priv) &&
+ bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
+ free_pdp(dev_priv, pdp);
+ return true;
}
+
+ return false;
}
-static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+/* Removes entries from a single pml4.
+ * This is the top-level structure in 4-level page tables used on gen8+.
+ * Empty entries are always scratch pml4e.
+ */
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch);
+ struct i915_page_directory_pointer *pdp;
+ uint64_t pml4e;
+ gen8_ppgtt_pml4e_t *pml4e_vaddr;
+ gen8_ppgtt_pml4e_t scratch_pml4e =
+ gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
- gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
- scratch_pte);
- } else {
- uint64_t pml4e;
- struct i915_page_directory_pointer *pdp;
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
- gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
- gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
- scratch_pte);
+ gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+ if (WARN_ON(!pml4->pdps[pml4e]))
+ break;
+
+ if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
+ __clear_bit(pml4e, pml4->used_pml4es);
+ pml4e_vaddr = kmap_px(pml4);
+ pml4e_vaddr[pml4e] = scratch_pml4e;
+ kunmap_px(ppgtt, pml4e_vaddr);
}
}
}
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
+ gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
+ else
+ gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
+}
+
static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
@@ -809,12 +893,12 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
- cache_level, true);
+ cache_level);
if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+ if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
break;
pde = 0;
}
@@ -837,7 +921,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
@@ -852,7 +936,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct drm_device *dev,
+static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd)
{
int i;
@@ -864,34 +948,34 @@ static void gen8_free_page_tables(struct drm_device *dev,
if (WARN_ON(!pd->page_table[i]))
continue;
- free_pt(dev, pd->page_table[i]);
+ free_pt(dev_priv, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
goto free_scratch_page;
}
- vm->scratch_pd = alloc_pd(dev);
+ vm->scratch_pd = alloc_pd(dev_priv);
if (IS_ERR(vm->scratch_pd)) {
ret = PTR_ERR(vm->scratch_pd);
goto free_pt;
}
- if (USES_FULL_48BIT_PPGTT(dev)) {
- vm->scratch_pdp = alloc_pdp(dev);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ vm->scratch_pdp = alloc_pdp(dev_priv);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -900,17 +984,17 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (USES_FULL_48BIT_PPGTT(dev))
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
free_pd:
- free_pd(dev, vm->scratch_pd);
+ free_pd(dev_priv, vm->scratch_pd);
free_pt:
- free_pt(dev, vm->scratch_pt);
+ free_pt(dev_priv, vm->scratch_pt);
free_scratch_page:
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return ret;
}
@@ -948,54 +1032,56 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- if (USES_FULL_48BIT_PPGTT(dev))
- free_pdp(dev, vm->scratch_pdp);
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
+ free_pdp(dev_priv, vm->scratch_pdp);
+ free_pd(dev_priv, vm->scratch_pd);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
-static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
int i;
- for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+ for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
if (WARN_ON(!pdp->page_directory[i]))
continue;
- gen8_free_page_tables(dev, pdp->page_directory[i]);
- free_pd(dev, pdp->page_directory[i]);
+ gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
+ free_pd(dev_priv, pdp->page_directory[i]);
}
- free_pdp(dev, pdp);
+ free_pdp(dev_priv, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
if (WARN_ON(!ppgtt->pml4.pdps[i]))
continue;
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
}
- cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+ cleanup_px(dev_priv, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(to_i915(vm->dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+ if (!USES_FULL_48BIT_PPGTT(dev_priv))
+ gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
else
gen8_ppgtt_cleanup_4lvl(ppgtt);
@@ -1026,7 +1112,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pts)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1038,7 +1124,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
continue;
}
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt))
goto unwind_out;
@@ -1052,7 +1138,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev, pd->page_table[pde]);
+ free_pt(dev_priv, pd->page_table[pde]);
return -ENOMEM;
}
@@ -1087,10 +1173,10 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pds)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
WARN_ON(!bitmap_empty(new_pds, pdpes));
@@ -1098,7 +1184,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
if (test_bit(pdpe, pdp->used_pdpes))
continue;
- pd = alloc_pd(dev);
+ pd = alloc_pd(dev_priv);
if (IS_ERR(pd))
goto unwind_out;
@@ -1112,7 +1198,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
return -ENOMEM;
}
@@ -1140,7 +1226,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pdps)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
@@ -1148,7 +1234,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
- pdp = alloc_pdp(dev);
+ pdp = alloc_pdp(dev_priv);
if (IS_ERR(pdp))
goto unwind_out;
@@ -1166,7 +1252,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev, pml4->pdps[pml4e]);
+ free_pdp(dev_priv, pml4->pdps[pml4e]);
return -ENOMEM;
}
@@ -1208,16 +1294,6 @@ err_out:
return -ENOMEM;
}
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
@@ -1225,12 +1301,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
int ret;
/* Wrap is never okay since we can only represent 48b, and we don't
@@ -1318,11 +1394,12 @@ err_out:
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
+ free_pt(dev_priv,
+ pdp->page_directory[pdpe]->page_table[temp]);
}
for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
@@ -1373,7 +1450,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+ gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
return ret;
}
@@ -1383,7 +1460,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(vm->dev))
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
@@ -1452,9 +1529,9 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t pml4e;
@@ -1474,7 +1551,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
unsigned long *new_page_dirs, *new_page_tables;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
int ret;
/* We allocate temp bitmap for page tables for no gain
@@ -1507,6 +1584,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int ret;
ret = gen8_init_scratch(&ppgtt->base);
@@ -1522,8 +1600,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->debug_dump = gen8_dump_ppgtt;
- if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ ret = setup_px(dev_priv, &ppgtt->pml4);
if (ret)
goto free_scratch;
@@ -1532,7 +1610,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = 1ULL << 48;
ppgtt->switch_mm = gen8_48b_mm_switch;
} else {
- ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
+ ret = __pdp_init(dev_priv, &ppgtt->pdp);
if (ret)
goto free_scratch;
@@ -1542,14 +1620,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
+ if (intel_vgpu_active(dev_priv)) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1569,7 +1647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
u32 expected;
@@ -1724,29 +1802,30 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static void gen8_ppgtt_enable(struct drm_device *dev)
+static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
+ for_each_engine(engine, dev_priv, id) {
+ u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
-static void gen7_ppgtt_enable(struct drm_device *dev)
+static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
+ enum intel_engine_id id;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
@@ -1754,16 +1833,15 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
-static void gen6_ppgtt_enable(struct drm_device *dev)
+static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1782,8 +1860,7 @@ static void gen6_ppgtt_enable(struct drm_device *dev)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr, scratch_pte;
@@ -1794,7 +1871,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -1832,7 +1909,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
- vm->pte_encode(addr, cache_level, true, flags);
+ vm->pte_encode(addr, cache_level, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@@ -1850,8 +1927,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
@@ -1881,7 +1957,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
@@ -1929,7 +2005,7 @@ unwind_out:
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = vm->scratch_pt;
- free_pt(vm->dev, pt);
+ free_pt(dev_priv, pt);
}
mark_tlbs_dirty(ppgtt);
@@ -1938,16 +2014,16 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
@@ -1958,17 +2034,17 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1976,7 +2052,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(dev, pt);
+ free_pt(dev_priv, pt);
gen6_free_scratch(vm);
}
@@ -1984,8 +2060,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
@@ -2050,17 +2125,16 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
+ if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev))
+ else if (IS_HASWELL(dev_priv))
ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ppgtt->switch_mm = gen7_mm_switch;
else
BUG();
@@ -2111,8 +2185,10 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
}
static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv)
+ struct drm_i915_private *dev_priv,
+ const char *name)
{
+ i915_gem_timeline_init(dev_priv, &vm->timeline, name);
drm_mm_init(&vm->mm, vm->start, vm->total);
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
@@ -2120,44 +2196,50 @@ static void i915_address_space_init(struct i915_address_space *vm,
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
-static void gtt_write_workarounds(struct drm_device *dev)
+static void i915_address_space_fini(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ i915_gem_timeline_fini(&vm->timeline);
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+}
+static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+{
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_SKYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+ struct drm_i915_file_private *file_priv,
+ const char *name)
{
int ret;
ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret == 0) {
kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv);
+ i915_address_space_init(&ppgtt->base, dev_priv, name);
ppgtt->base.file = file_priv;
}
return ret;
}
-int i915_ppgtt_init_hw(struct drm_device *dev)
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
- gtt_write_workarounds(dev);
+ gtt_write_workarounds(dev_priv);
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
@@ -2165,24 +2247,25 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (i915.enable_execlists)
return 0;
- if (!USES_PPGTT(dev))
+ if (!USES_PPGTT(dev_priv))
return 0;
- if (IS_GEN6(dev))
- gen6_ppgtt_enable(dev);
- else if (IS_GEN7(dev))
- gen7_ppgtt_enable(dev);
- else if (INTEL_INFO(dev)->gen >= 8)
- gen8_ppgtt_enable(dev);
+ if (IS_GEN6(dev_priv))
+ gen6_ppgtt_enable(dev_priv);
+ else if (IS_GEN7(dev_priv))
+ gen7_ppgtt_enable(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_ppgtt_enable(dev_priv);
else
- MISSING_CASE(INTEL_INFO(dev)->gen);
+ MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv)
+ struct drm_i915_file_private *fpriv,
+ const char *name)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@@ -2191,7 +2274,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
+ ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
@@ -2202,7 +2285,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
-void i915_ppgtt_release(struct kref *kref)
+void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
@@ -2214,8 +2297,7 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
- list_del(&ppgtt->base.global_link);
- drm_mm_takedown(&ppgtt->base.mm);
+ i915_address_space_fini(&ppgtt->base);
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
@@ -2239,11 +2321,12 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (INTEL_INFO(dev_priv)->gen < 6)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
@@ -2260,7 +2343,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
fault_reg & ~RING_FAULT_VALID);
}
}
- POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
+
+ /* Engine specific init may not have been done till this point. */
+ if (dev_priv->engine[RCS])
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
@@ -2273,33 +2359,32 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
i915_ggtt_flush(dev_priv);
}
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return -ENOSPC;
+ if (dma_map_sg(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return 0;
- return 0;
+ return -ENOSPC;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2317,16 +2402,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2340,15 +2420,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gen8_pte_t __iomem *gtt_entries;
gen8_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = gen8_pte_encode(addr, level, true);
+ gtt_entry = gen8_pte_encode(addr, level);
gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
@@ -2368,8 +2445,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
struct insert_entries {
@@ -2408,16 +2483,11 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
/*
@@ -2437,15 +2507,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t __iomem *gtt_entries;
gen6_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = vm->pte_encode(addr, level, true, flags);
+ gtt_entry = vm->pte_encode(addr, level, flags);
iowrite32(gtt_entry, &gtt_entries[i++]);
}
@@ -2464,23 +2531,16 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void nop_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2488,9 +2548,6 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2498,21 +2555,16 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC,
- use_scratch);
+ I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2520,9 +2572,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2530,13 +2579,11 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch, 0);
+ I915_CACHE_LLC, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
@@ -2545,16 +2592,10 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2562,40 +2603,25 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
-
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool unused)
+ uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
- intel_gtt_clear_range(first_entry, num_entries);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
@@ -2608,8 +2634,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
/*
* Without aliasing PPGTT there's no difference between
@@ -2625,6 +2653,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
u32 pte_flags;
int ret;
@@ -2639,14 +2668,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm,
vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
}
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt =
- to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->pages, vma->node.start,
cache_level, pte_flags);
@@ -2657,21 +2687,24 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
- if (vma->flags & I915_VMA_GLOBAL_BIND)
+ if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm,
- vma->node.start, size,
- true);
+ vma->node.start, size);
+ intel_runtime_pm_put(i915);
+ }
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
- vma->node.start, size,
- true);
+ vma->node.start, size);
}
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2685,8 +2718,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
}
}
- dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -2717,6 +2749,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
+ struct i915_hw_ppgtt *ppgtt;
struct drm_mm_node *entry;
int ret;
@@ -2724,45 +2757,48 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
+ &ggtt->error_capture,
+ 4096, 0, -1,
+ 0, ggtt->mappable_end,
+ 0, 0);
+ if (ret)
+ return ret;
+
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start, true);
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
- true);
+ ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
- struct i915_hw_ppgtt *ppgtt;
-
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ if (!ppgtt) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ret;
- }
+ if (ret)
+ goto err_ppgtt;
- if (ppgtt->base.allocate_va_range)
+ if (ppgtt->base.allocate_va_range) {
ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
ppgtt->base.total);
- if (ret) {
- ppgtt->base.cleanup(&ppgtt->base);
- kfree(ppgtt);
- return ret;
+ if (ret)
+ goto err_ppgtt_cleanup;
}
ppgtt->base.clear_range(&ppgtt->base,
ppgtt->base.start,
- ppgtt->base.total,
- true);
+ ppgtt->base.total);
dev_priv->mm.aliasing_ppgtt = ppgtt;
WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
@@ -2770,6 +2806,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
}
return 0;
+
+err_ppgtt_cleanup:
+ ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+ kfree(ppgtt);
+err:
+ drm_mm_remove_node(&ggtt->error_capture);
+ return ret;
}
/**
@@ -2788,11 +2832,15 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
i915_gem_cleanup_stolen(&dev_priv->drm);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
- drm_mm_takedown(&ggtt->base.mm);
- list_del(&ggtt->base.global_link);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_address_space_fini(&ggtt->base);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
ggtt->base.cleanup(&ggtt->base);
@@ -2881,6 +2929,7 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
struct pci_dev *pdev = ggtt->base.dev->pdev;
phys_addr_t phys_addr;
int ret;
@@ -2895,7 +2944,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(ggtt->base.dev))
+ if (IS_BROXTON(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -2904,9 +2953,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(ggtt->base.dev,
- &ggtt->base.scratch_page,
- GFP_DMA32);
+ ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -2995,7 +3042,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(vm->dev, &vm->scratch_page);
+ cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3190,11 +3237,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm.
*/
+ mutex_lock(&dev_priv->drm.struct_mutex);
ggtt->base.total -= PAGE_SIZE;
- i915_address_space_init(&ggtt->base, dev_priv);
+ i915_address_space_init(&ggtt->base, dev_priv, "[global]");
ggtt->base.total += PAGE_SIZE;
if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
dev_priv->ggtt.mappable_base,
@@ -3209,7 +3258,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- ret = i915_gem_init_stolen(&dev_priv->drm);
+ ret = i915_gem_init_stolen(dev_priv);
if (ret)
goto out_gtt_cleanup;
@@ -3228,23 +3277,21 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj, *on;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_list) {
+ &dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@@ -3266,8 +3313,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.closed = false;
- if (INTEL_INFO(dev)->gen >= 8) {
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (INTEL_GEN(dev_priv) >= 8) {
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3275,7 +3322,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
@@ -3296,137 +3343,28 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
-static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
-{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
-
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
- return;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-void i915_vma_destroy(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(i915_vma_is_active(vma));
- GEM_BUG_ON(!i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->fence);
-
- list_del(&vma->vm_link);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-void i915_vma_close(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
-
- list_del_init(&vma->obj_link);
- if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
- int i;
-
- GEM_BUG_ON(vm->closed);
-
- vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
- if (vma == NULL)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&vma->exec_list);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
- init_request_active(&vma->last_fence, NULL);
- list_add(&vma->vm_link, &vm->unbound_list);
- vma->vm = vm;
- vma->obj = obj;
- vma->size = obj->base.size;
-
- if (view) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
- vma->size = view->params.partial.size;
- vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
- vma->size =
- intel_rotation_info_size(&view->params.rotated);
- vma->size <<= PAGE_SHIFT;
- }
- }
-
- if (i915_is_ggtt(vm)) {
- vma->flags |= I915_VMA_GGTT;
- } else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- }
-
- list_add_tail(&vma->obj_link, &obj->vma_list);
- return vma;
-}
-
-static inline bool vma_matches(struct i915_vma *vma,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- if (vma->vm != vm)
- return false;
-
- if (!i915_vma_is_ggtt(vma))
- return true;
-
- if (!view)
- return vma->ggtt_view.type == 0;
-
- if (vma->ggtt_view.type != view->type)
- return false;
-
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params)) == 0;
-}
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
-
- return __i915_vma_create(obj, vm, view);
-}
-
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
- struct i915_vma *vma;
+ struct rb_node *rb;
- list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
- if (vma_matches(vma, vm, view))
+ rb = obj->vma_tree.rb_node;
+ while (rb) {
+ struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+ long cmp;
+
+ cmp = i915_vma_compare(vma, vm, view);
+ if (cmp == 0)
return vma;
+ if (cmp < 0)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+
return NULL;
}
@@ -3437,11 +3375,14 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm, view);
- if (!vma)
- vma = __i915_vma_create(obj, vm, view);
+ if (!vma) {
+ vma = i915_vma_create(obj, vm, view);
+ GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
+ }
GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
@@ -3507,7 +3448,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages);
@@ -3543,35 +3484,47 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
- struct scatterlist *sg;
- struct sg_page_iter obj_sg_iter;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->params.partial.size;
+ unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
- ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
+ iter = i915_gem_object_get_sg(obj,
+ view->params.partial.offset,
+ &offset);
+ GEM_BUG_ON(!iter);
+
sg = st->sgl;
st->nents = 0;
- for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
- view->params.partial.offset)
- {
- if (st->nents >= view->params.partial.size)
- break;
+ do {
+ unsigned int len;
- sg_set_page(sg, NULL, PAGE_SIZE, 0);
- sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
- sg_dma_len(sg) = PAGE_SIZE;
+ len = min(iter->length - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
- sg = sg_next(sg);
st->nents++;
- }
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ return st;
+ }
- return st;
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
err_sg_alloc:
kfree(st);
@@ -3584,11 +3537,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
int ret = 0;
+ /* The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
if (vma->pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
@@ -3612,94 +3572,3 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
-/**
- * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
- * @vma: VMA to map
- * @cache_level: mapping cache level
- * @flags: flags like global or local mapping
- *
- * DMA addresses are taken from the scatter-gather table of this object (or of
- * this VMA in case of non-default GGTT views) and PTE entries set up.
- * Note that DMA addresses are also the only part of the SG table we care about.
- */
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
-{
- u32 bind_flags;
- u32 vma_flags;
- int ret;
-
- if (WARN_ON(flags == 0))
- return -EINVAL;
-
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
-
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- if (flags & PIN_UPDATE)
- bind_flags |= vma_flags;
- else
- bind_flags &= ~vma_flags;
- if (bind_flags == 0)
- return 0;
-
- if (vma_flags == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma);
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
- }
-
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
-
- vma->flags |= bind_flags;
- return 0;
-}
-
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
-{
- void __iomem *ptr;
-
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
-
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
- return IO_ERR_PTR(-ENODEV);
-
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
-
- ptr = vma->iomap;
- if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
- vma->node.start,
- vma->node.size);
- if (ptr == NULL)
- return IO_ERR_PTR(-ENOMEM);
-
- vma->iomap = ptr;
- }
-
- __i915_vma_pin(vma);
- return ptr;
-}
-
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
-{
- struct i915_vma *vma;
-
- vma = fetch_and_zero(p_vma);
- if (!vma)
- return;
-
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ec78be2f8c77..4f35be4c26c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,7 +35,9 @@
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
+#include <linux/mm.h>
+#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#define I915_FENCE_REG_NONE -1
@@ -118,8 +120,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_LEGACY_PDPES 4
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
- GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+ GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -138,6 +140,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+struct sg_table;
+
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED,
@@ -168,133 +172,7 @@ extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- struct drm_i915_fence_reg *fence;
- struct sg_table *pages;
- void __iomem *iomap;
- u64 size;
- u64 display_alignment;
-
- unsigned int flags;
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits.
- */
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW BIT(5)
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(6)
-#define I915_VMA_LOCAL_BIND BIT(7)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CAN_FENCE BIT(9)
-#define I915_VMA_CLOSED BIT(10)
-
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_fence;
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-};
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
-
-static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_GGTT;
-}
-
-static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CAN_FENCE;
-}
-
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CLOSED;
-}
-
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
-static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
-}
+struct i915_vma;
struct i915_page_dma {
struct page *page;
@@ -341,6 +219,7 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
+ struct i915_gem_timeline timeline;
struct drm_device *dev;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
@@ -395,7 +274,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
@@ -403,8 +282,7 @@ struct i915_address_space {
uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch);
+ uint64_t length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
@@ -450,6 +328,8 @@ struct i915_ggtt {
bool do_idle_maps;
int mtrr;
+
+ struct drm_mm_node error_capture;
};
struct i915_hw_ppgtt {
@@ -602,16 +482,24 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
-int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv);
+ struct drm_i915_file_private *fpriv,
+ const char *name);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -624,11 +512,13 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
@@ -646,88 +536,4 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
#define PIN_OFFSET_FIXED BIT(11)
#define PIN_OFFSET_MASK (~4095)
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
- return 0;
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
-
-static inline int i915_vma_pin_count(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_PIN_MASK;
-}
-
-static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
-{
- return i915_vma_pin_count(vma);
-}
-
-static inline void __i915_vma_pin(struct i915_vma *vma)
-{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
-}
-
-static inline void __i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
- vma->flags--;
-}
-
-static inline void i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- __i915_vma_unpin(vma);
-}
-
-/**
- * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
- * @vma: VMA to iomap
- *
- * The passed in VMA has to be pinned in the global GTT mappable region.
- * An extra pinning of the VMA is acquired for the return iomapping,
- * the caller must call i915_vma_unpin_iomap to relinquish the pinning
- * after the iomapping is no longer required.
- *
- * Callers must hold the struct_mutex.
- *
- * Returns a valid iomapped pointer or ERR_PTR.
- */
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
-#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
-
-/**
- * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
- * @vma: VMA to unpin
- *
- * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
- *
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
- */
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
-
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
new file mode 100644
index 000000000000..4b3ff3e5b911
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+
+/* convert swiotlb segment size into sensible units (pages)! */
+#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
+
+static void internal_free_pages(struct sg_table *st)
+{
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+
+ sg_free_table(st);
+ kfree(st);
+}
+
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int max_order;
+ gfp_t gfp;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+
+ max_order = MAX_ORDER;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
+ max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+#endif
+
+ gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ gfp &= ~__GFP_HIGHMEM;
+ gfp |= __GFP_DMA32;
+ }
+
+ do {
+ int order = min(fls(npages) - 1, max_order);
+ struct page *page;
+
+ do {
+ page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ if (page)
+ break;
+ if (!order--)
+ goto err;
+
+ /* Limit subsequent allocations as well */
+ max_order = order;
+ } while (1);
+
+ sg_set_page(sg, page, PAGE_SIZE << order, 0);
+ st->nents++;
+
+ npages -= 1 << order;
+ if (!npages) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while (1);
+
+ if (i915_gem_gtt_prepare_pages(obj, st))
+ goto err;
+
+ /* Mark the pages as dontneed whilst they are still pinned. As soon
+ * as they are unpinned they are allowed to be reaped by the shrinker,
+ * and the caller is expected to repopulate - the contents of this
+ * object are only valid whilst active and pinned.
+ */
+ obj->mm.madv = I915_MADV_DONTNEED;
+ return st;
+
+err:
+ sg_mark_end(sg);
+ internal_free_pages(st);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ internal_free_pages(pages);
+
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = i915_gem_object_get_pages_internal,
+ .put_pages = i915_gem_object_put_pages_internal,
+};
+
+/**
+ * Creates a new object that wraps some internal memory for private use.
+ * This object is not backed by swappable storage, and as such its contents
+ * are volatile and only valid whilst pinned. If the object is reaped by the
+ * shrinker, its pages and data will be discarded. Equally, it is not a full
+ * GEM object and so not valid for access from userspace. This makes it useful
+ * for hardware interfaces like ringbuffers (which are pinned from the time
+ * the request is written to the time the hardware stops accessing it), but
+ * not for contexts (which need to be preserved when not active for later
+ * reuse). Note that it is not cleared upon allocation.
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(&i915->drm);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
new file mode 100644
index 000000000000..6a368de9d81e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+#include <linux/reservation.h>
+
+#include <drm/drm_vma_manager.h>
+#include <drm/drm_gem.h>
+#include <drm/drmP.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
+
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+ struct rb_root vma_tree;
+
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ struct list_head global_link;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ struct list_head userfault_link;
+
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
+
+ struct list_head batch_pool_link;
+
+ unsigned long flags;
+
+ /**
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
+ */
+#define I915_BO_ACTIVE_REF 0
+
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+ unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
+
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_gem_active frontbuffer_write;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
+
+ /** Count of VMA actually bound by this object */
+ unsigned int bind_count;
+ unsigned int active_count;
+ unsigned int pin_display;
+
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
+
+ /** Breadcrumb of last rendering to the buffer.
+ * There can only be one writer, but we allow for multiple readers.
+ * If there is a writer that necessarily implies that all other
+ * read requests are complete - but we may only be lazily clearing
+ * the read requests. A read request is naturally the most recent
+ * request on a ring, so we may have two different write and read
+ * requests on one ring where the write request is older than the
+ * read request. This allows for the CPU to read from an active
+ * buffer by only waiting for the write to complete.
+ */
+ struct reservation_object *resv;
+
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
+
+ struct reservation_object __builtin_resv;
+};
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_reference(&obj->base);
+ return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ __drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
+
+static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return obj->active_count;
+}
+
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 95b7e9afd5f8..5af19b0bf713 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,17 +28,19 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct render_state {
+struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
struct i915_vma *vma;
- u32 aux_batch_size;
- u32 aux_batch_offset;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
};
static const struct intel_renderstate_rodata *
-render_state_get_rodata(const struct drm_i915_gem_request *req)
+render_state_get_rodata(const struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(req->i915)) {
+ switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
case 7:
@@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
*/
#define OUT_BATCH(batch, i, val) \
do { \
- if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
- ret = -ENOSPC; \
- goto err_out; \
- } \
+ if ((i) >= PAGE_SIZE / sizeof(u32)) \
+ goto err; \
(batch)[(i)++] = (val); \
} while(0)
-static int render_state_setup(struct render_state *so)
+static int render_state_setup(struct intel_render_state *so,
+ struct drm_i915_private *i915)
{
- struct drm_device *dev = so->vma->vm->dev;
const struct intel_renderstate_rodata *rodata = so->rodata;
- const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
+ struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
- struct page *page;
+ unsigned int needs_clflush;
u32 *d;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (ret)
return ret;
- page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
- d = kmap(page);
+ d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so)
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start;
s = lower_32_bits(r);
- if (has_64bit_reloc) {
+ if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
- rodata->batch[i + 1] != 0) {
- ret = -EINVAL;
- goto err_out;
- }
+ rodata->batch[i + 1] != 0)
+ goto err;
d[i++] = s;
s = upper_32_bits(r);
@@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so)
d[i++] = s;
}
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
+ goto err;
+ }
+
+ so->batch_offset = so->vma->node.start;
+ so->batch_size = rodata->batch_items * sizeof(u32);
+
while (i % CACHELINE_DWORDS)
OUT_BATCH(d, i, MI_NOOP);
- so->aux_batch_offset = i * sizeof(u32);
+ so->aux_offset = i * sizeof(u32);
- if (HAS_POOLED_EU(dev)) {
+ if (HAS_POOLED_EU(i915)) {
/*
* We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config
@@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so)
}
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
- so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
-
+ so->aux_size = i * sizeof(u32) - so->aux_offset;
+ so->aux_offset += so->batch_offset;
/*
* Since we are sending length, we need to strictly conform to
* all requirements. For Gen2 this must be a multiple of 8.
*/
- so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
-
- kunmap(page);
-
- ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
- if (ret)
- return ret;
-
- if (rodata->reloc[reloc_index] != -1) {
- DRM_ERROR("only %d relocs resolved\n", reloc_index);
- return -EINVAL;
- }
+ so->aux_size = ALIGN(so->aux_size, 8);
- return 0;
+ if (needs_clflush)
+ drm_clflush_virt_range(d, i * sizeof(u32));
+ kunmap_atomic(d);
-err_out:
- kunmap(page);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+out:
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
+
+err:
+ kunmap_atomic(d);
+ ret = -EINVAL;
+ goto out;
}
#undef OUT_BATCH
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
+int i915_gem_render_state_init(struct intel_engine_cs *engine)
{
- struct render_state so;
+ struct intel_render_state *so;
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
int ret;
- if (WARN_ON(req->engine->id != RCS))
- return -ENOENT;
+ if (engine->id != RCS)
+ return 0;
- so.rodata = render_state_get_rodata(req);
- if (!so.rodata)
+ rodata = render_state_get_rodata(engine);
+ if (!rodata)
return 0;
- if (so.rodata->batch_items * 4 > 4096)
+ if (rodata->batch_items * 4 > 4096)
return -EINVAL;
- obj = i915_gem_object_create(&req->i915->drm, 4096);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ so = kmalloc(sizeof(*so), GFP_KERNEL);
+ if (!so)
+ return -ENOMEM;
- so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
- if (IS_ERR(so.vma)) {
- ret = PTR_ERR(so.vma);
- goto err_obj;
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err_free;
}
- ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
- if (ret)
+ so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(so->vma)) {
+ ret = PTR_ERR(so->vma);
goto err_obj;
+ }
+
+ so->rodata = rodata;
+ engine->render_state = so;
+ return 0;
+
+err_obj:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(so);
+ return ret;
+}
+
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
+{
+ struct intel_render_state *so;
+ int ret;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
- ret = render_state_setup(&so);
+ so = req->engine->render_state;
+ if (!so)
+ return 0;
+
+ /* Recreate the page after shrinking */
+ if (!so->vma->obj->mm.pages)
+ so->batch_offset = -1;
+
+ ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
- goto err_unpin;
+ return ret;
+
+ if (so->vma->node.start != so->batch_offset) {
+ ret = render_state_setup(so, req->i915);
+ if (ret)
+ goto err_unpin;
+ }
- ret = req->engine->emit_bb_start(req, so.vma->node.start,
- so.rodata->batch_items * 4,
+ ret = req->engine->emit_bb_start(req,
+ so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
- if (so.aux_batch_size > 8) {
+ if (so->aux_size > 8) {
ret = req->engine->emit_bb_start(req,
- (so.vma->node.start +
- so.aux_batch_offset),
- so.aux_batch_size,
+ so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, req, 0);
+ i915_vma_move_to_active(so->vma, req, 0);
err_unpin:
- i915_vma_unpin(so.vma);
-err_obj:
- i915_gem_object_put(obj);
+ i915_vma_unpin(so->vma);
return ret;
}
+
+void i915_gem_render_state_fini(struct intel_engine_cs *engine)
+{
+ struct intel_render_state *so;
+ struct drm_i915_gem_object *obj;
+
+ so = fetch_and_zero(&engine->render_state);
+ if (!so)
+ return;
+
+ obj = so->vma->obj;
+
+ i915_vma_close(so->vma);
+ __i915_gem_object_release_unless_active(obj);
+
+ kfree(so);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 18cce3f06e9c..87481845799d 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,6 +26,8 @@
struct drm_i915_gem_request;
-int i915_gem_render_state_init(struct drm_i915_gem_request *req);
+int i915_gem_render_state_init(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
+void i915_gem_render_state_fini(struct intel_engine_cs *engine);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8832f8ec1583..b8f403faadbb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -23,31 +23,26 @@
*/
#include <linux/prefetch.h>
+#include <linux/dma-fence-array.h>
#include "i915_drv.h"
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
return "i915";
}
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
- /* Timelines are bound by eviction to a VM. However, since
- * we only have a global seqno at the moment, we only have
- * a single timeline. Note that each timeline will have
- * multiple execution contexts (fence contexts) as we allow
- * engines within a single timeline to execute in parallel.
- */
- return "global";
+ return to_request(fence)->timeline->common->name;
}
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
{
return i915_gem_request_completed(to_request(fence));
}
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
if (i915_fence_signaled(fence))
return false;
@@ -56,63 +51,27 @@ static bool i915_fence_enable_signaling(struct fence *fence)
return true;
}
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
- signed long timeout_jiffies)
+ signed long timeout)
{
- s64 timeout_ns, *timeout;
- int ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
- timeout_ns = jiffies_to_nsecs(timeout_jiffies);
- timeout = &timeout_ns;
- } else {
- timeout = NULL;
- }
-
- ret = i915_wait_request(to_request(fence),
- interruptible, timeout,
- NO_WAITBOOST);
- if (ret == -ETIME)
- return 0;
-
- if (ret < 0)
- return ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
- timeout_jiffies = nsecs_to_jiffies(timeout_ns);
-
- return timeout_jiffies;
+ return i915_wait_request(to_request(fence), interruptible, timeout);
}
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
-{
- snprintf(str, size, "%u", fence->seqno);
-}
-
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%u",
- intel_engine_get_seqno(to_request(fence)->engine));
-}
-
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
{
struct drm_i915_gem_request *req = to_request(fence);
kmem_cache_free(req->i915->requests, req);
}
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
.get_driver_name = i915_fence_get_driver_name,
.get_timeline_name = i915_fence_get_timeline_name,
.enable_signaling = i915_fence_enable_signaling,
.signaled = i915_fence_signaled,
.wait = i915_fence_wait,
.release = i915_fence_release,
- .fence_value_str = i915_fence_value_str,
- .timeline_value_str = i915_fence_timeline_value_str,
};
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -154,6 +113,82 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+static void
+__i915_priotree_add_dependency(struct i915_priotree *pt,
+ struct i915_priotree *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &pt->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+}
+
+static int
+i915_priotree_add_dependency(struct drm_i915_private *i915,
+ struct i915_priotree *pt,
+ struct i915_priotree *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
+ return 0;
+}
+
+static void
+i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
+{
+ struct i915_dependency *dep, *next;
+
+ GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
+
+ /* Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+}
+
+static void
+i915_priotree_init(struct i915_priotree *pt)
+{
+ INIT_LIST_HEAD(&pt->signalers_list);
+ INIT_LIST_HEAD(&pt->waiters_list);
+ RB_CLEAR_NODE(&pt->node);
+ pt->priority = INT_MIN;
+}
+
void i915_gem_retire_noop(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
@@ -164,8 +199,17 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
struct i915_gem_active *active, *next;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
+ GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!request->i915->gt.active_requests);
+
trace_i915_gem_request_retire(request);
- list_del(&request->link);
+
+ spin_lock_irq(&request->engine->timeline->lock);
+ list_del_init(&request->link);
+ spin_unlock_irq(&request->engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -177,6 +221,12 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
*/
list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
+ if (!--request->i915->gt.active_requests) {
+ GEM_BUG_ON(!request->i915->gt.awake);
+ mod_delayed_work(request->i915->wq,
+ &request->i915->gt.idle_work,
+ msecs_to_jiffies(100));
+ }
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -214,6 +264,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
}
i915_gem_context_put(request->ctx);
+
+ dma_fence_signal(&request->fence);
+
+ i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request);
}
@@ -223,10 +277,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(list_empty(&req->link));
+ if (list_empty(&req->link))
+ return;
do {
- tmp = list_first_entry(&engine->request_list,
+ tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
i915_gem_request_retire(tmp);
@@ -253,39 +308,50 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
return 0;
}
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
+ struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv) {
- ret = intel_engine_idle(engine,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
- if (ret)
- return ret;
- }
- i915_gem_retire_requests(dev_priv);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
- while (intel_kick_waiters(dev_priv) ||
- intel_kick_signalers(dev_priv))
- yield();
+ if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
+ while (intel_breadcrumbs_busy(i915))
+ cond_resched(); /* spin until threads are complete */
}
+ atomic_set(&timeline->next_seqno, seqno);
/* Finally reset hw state */
- for_each_engine(engine, dev_priv)
- intel_engine_init_seqno(engine, seqno);
+ for_each_engine(engine, i915, id)
+ intel_engine_init_global_seqno(engine, seqno);
+
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl = &timeline->engine[id];
+
+ memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+ }
+ }
return 0;
}
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
@@ -293,29 +359,87 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev_priv, seqno - 1);
- if (ret)
+ return i915_gem_init_global_seqno(dev_priv, seqno - 1);
+}
+
+static int reserve_global_seqno(struct drm_i915_private *i915)
+{
+ u32 active_requests = ++i915->gt.active_requests;
+ u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
+ int ret;
+
+ /* Reservation is fine until we need to wrap around */
+ if (likely(next_seqno + active_requests > next_seqno))
+ return 0;
+
+ ret = i915_gem_init_global_seqno(i915, 0);
+ if (ret) {
+ i915->gt.active_requests--;
return ret;
+ }
- dev_priv->next_seqno = seqno;
return 0;
}
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
{
- /* reserve 0 for non-seqno */
- if (unlikely(dev_priv->next_seqno == 0)) {
- int ret;
+ /* next_seqno only incremented under a mutex */
+ return ++tl->next_seqno.counter;
+}
- ret = i915_gem_init_seqno(dev_priv, 0);
- if (ret)
- return ret;
+static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
+{
+ return atomic_inc_return(&tl->next_seqno);
+}
- dev_priv->next_seqno = 1;
- }
+void __i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_timeline *timeline;
+ u32 seqno;
- *seqno = dev_priv->next_seqno++;
- return 0;
+ /* Transfer from per-context onto the global per-engine timeline */
+ timeline = engine->timeline;
+ GEM_BUG_ON(timeline == request->timeline);
+ assert_spin_locked(&timeline->lock);
+
+ seqno = timeline_get_seqno(timeline->common);
+ GEM_BUG_ON(!seqno);
+ GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
+ request->previous_seqno = timeline->last_submitted_seqno;
+ timeline->last_submitted_seqno = seqno;
+
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ request->global_seqno = seqno;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_enable_signaling(request);
+ spin_unlock(&request->lock);
+
+ GEM_BUG_ON(!request->global_seqno);
+ engine->emit_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
+
+ spin_lock(&request->timeline->lock);
+ list_move_tail(&request->link, &timeline->requests);
+ spin_unlock(&request->timeline->lock);
+
+ i915_sw_fence_commit(&request->execute);
+}
+
+void i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ __i915_gem_request_submit(request);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
static int __i915_sw_fence_call
@@ -324,15 +448,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), submit);
- /* Will be called from irq-context when using foreign DMA fences */
-
switch (state) {
case FENCE_COMPLETE:
- request->engine->last_submitted_seqno = request->fence.seqno;
request->engine->submit_request(request);
break;
case FENCE_FREE:
+ i915_gem_request_put(request);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int __i915_sw_fence_call
+execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct drm_i915_gem_request *request =
+ container_of(fence, typeof(*request), execute);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ break;
+
+ case FENCE_FREE:
+ i915_gem_request_put(request);
break;
}
@@ -357,9 +497,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
- u32 seqno;
int ret;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
* and restart.
@@ -368,10 +509,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
+ ret = reserve_global_seqno(dev_priv);
+ if (ret)
+ return ERR_PTR(ret);
+
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->request_list,
+ req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
- if (req && i915_gem_request_completed(req))
+ if (req && __i915_gem_request_completed(req))
i915_gem_request_retire(req);
/* Beware: Dragons be flying overhead.
@@ -382,13 +527,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* of being read by __i915_gem_active_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer,
- * read the request->fence.seqno and increment the reference count.
+ * read the request->global_seqno and increment the reference count.
*
* The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise,
* it is either still in use, or has been reallocated and reset
- * with fence_init(). This increment is safe for release as we check
- * that the request we have a reference to and matches the active
+ * with dma_fence_init(). This increment is safe for release as we
+ * check that the request we have a reference to and matches the active
* request.
*
* Before we increment the refcount, we chase the request->engine
@@ -403,21 +548,32 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* Do not use kmem_cache_zalloc() here!
*/
req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_unreserve;
+ }
- ret = i915_gem_get_seqno(dev_priv, &seqno);
- if (ret)
- goto err;
+ req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+ GEM_BUG_ON(req->timeline == engine->timeline);
spin_lock_init(&req->lock);
- fence_init(&req->fence,
- &i915_fence_ops,
- &req->lock,
- engine->fence_context,
- seqno);
+ dma_fence_init(&req->fence,
+ &i915_fence_ops,
+ &req->lock,
+ req->timeline->fence_context,
+ __timeline_get_seqno(req->timeline->common));
+
+ /* We bump the ref for the fence chain */
+ i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
+ i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify);
+
+ /* Ensure that the execute fence completes after the submit fence -
+ * as we complete the execute fence from within the submit fence
+ * callback, its completion would otherwise be visible first.
+ */
+ i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
- i915_sw_fence_init(&req->submit, submit_notify);
+ i915_priotree_init(&req->priotree);
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
@@ -425,6 +581,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = i915_gem_context_get(ctx);
/* No zalloc, must clear what we need by hand */
+ req->global_seqno = 0;
req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -437,6 +594,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* away, e.g. because a GPU scheduler has deferred it.
*/
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+ GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req);
@@ -456,8 +614,9 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
err_ctx:
i915_gem_context_put(ctx);
-err:
kmem_cache_free(dev_priv->requests, req);
+err_unreserve:
+ dev_priv->gt.active_requests--;
return ERR_PTR(ret);
}
@@ -465,15 +624,36 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
- int idx, ret;
+ int ret;
GEM_BUG_ON(to == from);
- if (to->engine == from->engine)
+ if (to->engine->schedule) {
+ ret = i915_priotree_add_dependency(to->i915,
+ &to->priotree,
+ &from->priotree);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (to->timeline == from->timeline)
return 0;
- idx = intel_engine_sync_index(from->engine, to->engine);
- if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+ if (to->engine == from->engine) {
+ ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (!from->global_seqno) {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@@ -491,7 +671,54 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
- from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+ to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+ return 0;
+}
+
+int
+i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *array;
+ int ret;
+ int i;
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return 0;
+
+ if (dma_fence_is_i915(fence))
+ return i915_gem_request_await_request(req, to_request(fence));
+
+ if (!dma_fence_is_array(fence)) {
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ fence, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ /* Note that if the fence-array was created in signal-on-any mode,
+ * we should *not* decompose it into its individual fences. However,
+ * we don't currently store which mode the fence-array is operating
+ * in. Fortunately, the only user of signal-on-any is private to
+ * amdgpu and we should not see any incoming fence-array from
+ * sync-file being in signal-on-any mode.
+ */
+
+ array = to_dma_fence_array(fence);
+ for (i = 0; i < array->num_fences; i++) {
+ struct dma_fence *child = array->fences[i];
+
+ if (dma_fence_is_i915(child))
+ ret = i915_gem_request_await_request(req,
+ to_request(child));
+ else
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ child, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -520,43 +747,52 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct dma_fence *excl;
+ int ret = 0;
if (write) {
- active_mask = i915_gem_object_get_active(obj);
- active = obj->last_read;
+ struct dma_fence **shared;
+ unsigned int count, i;
+
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ ret = i915_gem_request_await_dma_fence(to, shared[i]);
+ if (ret)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
} else {
- active_mask = 1;
- active = &obj->last_write;
+ excl = reservation_object_get_excl_rcu(obj->resv);
}
- for_each_active(active_mask, idx) {
- struct drm_i915_gem_request *request;
- int ret;
-
- request = i915_gem_active_peek(&active[idx],
- &obj->base.dev->struct_mutex);
- if (!request)
- continue;
+ if (excl) {
+ if (ret == 0)
+ ret = i915_gem_request_await_dma_fence(to, excl);
- ret = i915_gem_request_await_request(to, request);
- if (ret)
- return ret;
+ dma_fence_put(excl);
}
- return 0;
+ return ret;
}
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- dev_priv->gt.active_engines |= intel_engine_flag(engine);
if (dev_priv->gt.awake)
return;
+ GEM_BUG_ON(!dev_priv->gt.active_requests);
+
intel_runtime_pm_get_noresume(dev_priv);
dev_priv->gt.awake = true;
@@ -579,11 +815,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
+ struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
- u32 request_start;
- u32 reserved_tail;
- int ret;
+ int err;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
/*
@@ -591,8 +827,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- request_start = ring->tail;
- reserved_tail = request->reserved_space;
request->reserved_space = 0;
/*
@@ -603,10 +837,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* what.
*/
if (flush_caches) {
- ret = engine->emit_flush(request, EMIT_FLUSH);
+ err = engine->emit_flush(request, EMIT_FLUSH);
/* Not allowed to fail! */
- WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+ WARN(err, "engine->emit_flush() failed: %d!\n", err);
}
/* Record the position of the start of the breadcrumb so that
@@ -614,20 +848,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
+ err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ GEM_BUG_ON(err);
request->postfix = ring->tail;
-
- /* Not allowed to fail! */
- ret = engine->emit_request(request);
- WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
-
- /* Sanity check that the reserved size was large enough. */
- ret = ring->tail - request_start;
- if (ret < 0)
- ret += ring->size;
- WARN_ONCE(ret > reserved_tail,
- "Not enough space reserved (%d bytes) "
- "for adding the request (%d bytes)\n",
- reserved_tail, ret);
+ ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
@@ -635,21 +859,46 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* see a more recent value in the hws than we are tracking.
*/
- prev = i915_gem_active_raw(&engine->last_request,
+ prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev)
+ if (prev) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
+ if (engine->schedule)
+ __i915_priotree_add_dependency(&request->priotree,
+ &prev->priotree,
+ &request->dep,
+ 0);
+ }
+
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&request->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+ request->fence.seqno));
+
+ timeline->last_submitted_seqno = request->fence.seqno;
+ i915_gem_active_set(&timeline->last_request, request);
- request->emitted_jiffies = jiffies;
- request->previous_seqno = engine->last_pending_seqno;
- engine->last_pending_seqno = request->fence.seqno;
- i915_gem_active_set(&engine->last_request, request);
- list_add_tail(&request->link, &engine->request_list);
list_add_tail(&request->ring_link, &ring->request_list);
+ request->emitted_jiffies = jiffies;
i915_gem_mark_busy(engine);
+ /* Let the backend know a new request has arrived that may need
+ * to adjust the existing execution schedule due to a high priority
+ * request - i.e. we may want to preempt the current request in order
+ * to run a high priority dependency chain *before* we can execute this
+ * request.
+ *
+ * This is called before the request is ready to run so that we can
+ * decide whether to preempt the entire chain so that it is ready to
+ * run at the earliest possible convenience.
+ */
+ if (engine->schedule)
+ engine->schedule(request, request->ctx->priority);
+
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -714,7 +963,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu);
do {
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
if (signal_pending_state(state, current))
@@ -723,82 +972,108 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
if (busywait_stop(timeout_us, cpu))
break;
- cpu_relax_lowlatency();
+ cpu_relax();
} while (!need_resched());
return false;
}
+static long
+__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
+ unsigned int flags,
+ long timeout)
+{
+ const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
+ DEFINE_WAIT(reset);
+ DEFINE_WAIT(wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ add_wait_queue(q, &reset);
+
+ do {
+ prepare_to_wait(&request->execute.wait, &wait, state);
+
+ if (i915_sw_fence_done(&request->execute))
+ break;
+
+ if (flags & I915_WAIT_LOCKED &&
+ i915_reset_in_progress(&request->i915->gpu_error)) {
+ __set_current_state(TASK_RUNNING);
+ i915_reset(request->i915);
+ reset_wait_queue(q, &reset);
+ continue;
+ }
+
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
+
+ timeout = io_schedule_timeout(timeout);
+ } while (timeout);
+ finish_wait(&request->execute.wait, &wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ remove_wait_queue(q, &reset);
+
+ return timeout;
+}
+
/**
* i915_wait_request - wait until execution of request has finished
- * @req: duh!
+ * @req: the request to wait upon
* @flags: how to wait
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: client to charge for RPS boosting
+ * @timeout: how long to wait in jiffies
+ *
+ * i915_wait_request() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
*
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
+ * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
+ * in via the flags, and vice versa if the struct_mutex is not held, the caller
+ * must not specify that the wait is locked.
*
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
*/
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
struct intel_wait wait;
- unsigned long timeout_remain;
- int ret = 0;
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
- GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
+ GEM_BUG_ON(timeout < 0);
if (i915_gem_request_completed(req))
- return 0;
-
- timeout_remain = MAX_SCHEDULE_TIMEOUT;
- if (timeout) {
- if (WARN_ON(*timeout < 0))
- return -EINVAL;
+ return timeout;
- if (*timeout == 0)
- return -ETIME;
-
- /* Record current time in case interrupted, or wedged */
- timeout_remain = nsecs_to_jiffies_timeout(*timeout);
- *timeout += ktime_get_raw_ns();
- }
+ if (!timeout)
+ return -ETIME;
trace_i915_gem_request_wait_begin(req);
- /* This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
- gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+ if (!i915_sw_fence_done(&req->execute)) {
+ timeout = __i915_request_wait_for_execute(req, flags, timeout);
+ if (timeout < 0)
+ goto complete;
+
+ GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
+ }
+ GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+ GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
@@ -808,7 +1083,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- intel_wait_init(&wait, req->fence.seqno);
+ intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
@@ -818,16 +1093,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
if (signal_pending_state(state, current)) {
- ret = -ERESTARTSYS;
+ timeout = -ERESTARTSYS;
break;
}
- timeout_remain = io_schedule_timeout(timeout_remain);
- if (timeout_remain == 0) {
- ret = -ETIME;
+ if (!timeout) {
+ timeout = -ETIME;
break;
}
+ timeout = io_schedule_timeout(timeout);
+
if (intel_wait_complete(&wait))
break;
@@ -874,74 +1150,32 @@ wakeup:
complete:
trace_i915_gem_request_wait_end(req);
- if (timeout) {
- *timeout -= ktime_get_raw_ns();
- if (*timeout < 0)
- *timeout = 0;
-
- /*
- * Apparently ktime isn't accurate enough and occasionally has a
- * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
- * things up to make the test happy. We allow up to 1 jiffy.
- *
- * This is a regrssion from the timespec->ktime conversion.
- */
- if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
- *timeout = 0;
- }
-
- if (IS_RPS_USER(rps) &&
- req->fence.seqno == req->engine->last_submitted_seqno) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&req->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&req->i915->rps.client_lock);
- }
-
- return ret;
+ return timeout;
}
-static bool engine_retire_requests(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request, *next;
- list_for_each_entry_safe(request, next, &engine->request_list, link) {
- if (!i915_gem_request_completed(request))
- return false;
+ list_for_each_entry_safe(request, next,
+ &engine->timeline->requests, link) {
+ if (!__i915_gem_request_completed(request))
+ return;
i915_gem_request_retire(request);
}
-
- return true;
}
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (dev_priv->gt.active_engines == 0)
+ if (!dev_priv->gt.active_requests)
return;
- GEM_BUG_ON(!dev_priv->gt.awake);
-
- for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
- if (engine_retire_requests(engine))
- dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-
- if (dev_priv->gt.active_engines == 0)
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
- msecs_to_jiffies(100));
+ for_each_engine(engine, dev_priv, id)
+ engine_retire_requests(engine);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 974bd7bcc801..e2b077df2da0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -25,11 +25,14 @@
#ifndef I915_GEM_REQUEST_H
#define I915_GEM_REQUEST_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "i915_gem.h"
#include "i915_sw_fence.h"
+struct drm_file;
+struct drm_i915_gem_object;
+
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
@@ -41,6 +44,33 @@ struct intel_signal_node {
struct intel_wait wait;
};
+struct i915_dependency {
+ struct i915_priotree *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+/* Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ */
+struct i915_priotree {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct rb_node node;
+ int priority;
+#define I915_PRIORITY_MAX 1024
+#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
/**
* Request queue structure.
*
@@ -62,7 +92,7 @@ struct intel_signal_node {
* The requests are reference counted.
*/
struct drm_i915_gem_request {
- struct fence fence;
+ struct dma_fence fence;
spinlock_t lock;
/** On Which ring this request was generated */
@@ -81,10 +111,39 @@ struct drm_i915_gem_request {
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
struct intel_signal_node signaling;
+ /* Fences for the various phases in the request's lifetime.
+ *
+ * The submit fence is used to await upon all of the request's
+ * dependencies. When it is signaled, the request is ready to run.
+ * It is used by the driver to then queue the request for execution.
+ *
+ * The execute fence is used to signal when the request has been
+ * sent to hardware.
+ *
+ * It is illegal for the submit fence of one request to wait upon the
+ * execute fence of an earlier request. It should be sufficient to
+ * wait upon the submit fence of the earlier request.
+ */
struct i915_sw_fence submit;
+ struct i915_sw_fence execute;
wait_queue_t submitq;
+ wait_queue_t execq;
+
+ /* A list of everyone we wait upon, and everyone who waits upon us.
+ * Even though we will not be submitted to the hardware before the
+ * submit fence is signaled (it waits for all external events as well
+ * as our own requests), the scheduler still needs to know the
+ * dependency tree for the lifetime of the request (from execbuf
+ * to retirement), i.e. bidirectional dependency information for the
+ * request not tied to individual fences.
+ */
+ struct i915_priotree priotree;
+ struct i915_dependency dep;
+
+ u32 global_seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@@ -140,14 +199,11 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
-
- /** Link in the execlist submission queue, guarded by execlist_lock. */
- struct list_head execlist_link;
};
-extern const struct fence_ops i915_fence_ops;
+extern const struct dma_fence_ops i915_fence_ops;
-static inline bool fence_is_i915(struct fence *fence)
+static inline bool dma_fence_is_i915(const struct dma_fence *fence)
{
return fence->ops == &i915_fence_ops;
}
@@ -159,43 +215,31 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
-static inline u32
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
- return req ? req->fence.seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
- return req ? req->engine : NULL;
-}
-
static inline struct drm_i915_gem_request *
-to_request(struct fence *fence)
+to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
- GEM_BUG_ON(fence && !fence_is_i915(fence));
+ GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
return container_of(fence, struct drm_i915_gem_request, fence);
}
static inline struct drm_i915_gem_request *
i915_gem_request_get(struct drm_i915_gem_request *req)
{
- return to_request(fence_get(&req->fence));
+ return to_request(dma_fence_get(&req->fence));
}
static inline struct drm_i915_gem_request *
i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
{
- return to_request(fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&req->fence));
}
static inline void
i915_gem_request_put(struct drm_i915_gem_request *req)
{
- fence_put(&req->fence);
+ dma_fence_put(&req->fence);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -214,6 +258,8 @@ int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write);
+int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence);
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
@@ -221,18 +267,21 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
+void __i915_gem_request_submit(struct drm_i915_gem_request *request);
+void i915_gem_request_submit(struct drm_i915_gem_request *request);
+
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -245,17 +294,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+__i915_gem_request_started(const struct drm_i915_gem_request *req)
{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
}
static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+i915_gem_request_started(const struct drm_i915_gem_request *req)
{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_started(req);
+}
+
+static inline bool
+__i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->fence.seqno);
+ req->global_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_completed(req);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
@@ -263,7 +332,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
- return (i915_gem_request_started(request) &&
+ return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us));
}
@@ -497,7 +566,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+ * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
* when successful. That is, if i915_gem_request_get_rcu()
* returns the request (and so with the reference counted
@@ -552,53 +621,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
}
/**
- * i915_gem_active_is_idle - report whether the active tracker is idle
- * @active - the active tracker
- *
- * i915_gem_active_is_idle() returns true if the active tracker is currently
- * unassigned or if the request is complete (but not yet retired). Requires
- * the caller to hold struct_mutex (but that can be relaxed if desired).
- */
-static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return !i915_gem_active_peek(active, mutex);
-}
-
-/**
* i915_gem_active_wait - waits until the request is completed
* @active - the active request on which to wait
- *
- * i915_gem_active_wait() waits until the request is completed before
- * returning. Note that it does not guarantee that the request is
- * retired first, see i915_gem_active_retire().
- *
- * i915_gem_active_wait() returns immediately if the active
- * request is already complete.
- */
-static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
-{
- struct drm_i915_gem_request *request;
-
- request = i915_gem_active_peek(active, mutex);
- if (!request)
- return 0;
-
- return i915_wait_request(request,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
-}
-
-/**
- * i915_gem_active_wait_unlocked - waits until the request is completed
- * @active - the active request on which to wait
* @flags - how to wait
* @timeout - how long to wait at most
* @rps - userspace client to charge for a waitboost
*
- * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * i915_gem_active_wait() waits until the request is completed before
* returning, without requiring any locks to be held. Note that it does not
* retire any requests before returning.
*
@@ -614,21 +643,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
* Returns 0 if successful, or a negative error code.
*/
static inline int
-i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
struct drm_i915_gem_request *request;
- int ret = 0;
+ long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
- ret = i915_wait_request(request, flags, timeout, rps);
+ ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(request);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
/**
@@ -645,7 +671,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
struct drm_i915_gem_request *request;
- int ret;
+ long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
@@ -653,8 +679,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
- if (ret)
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
return ret;
list_del_init(&active->link);
@@ -665,24 +691,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
return 0;
}
-/* Convenience functions for peeking at state inside active's request whilst
- * guarded by the struct_mutex.
- */
-
-static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
-}
-
-static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
-}
-
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1c237d02f30b..401006b4c6a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -35,17 +35,22 @@
#include "i915_drv.h"
#include "i915_trace.h"
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
- if (!mutex_is_locked(mutex))
+ switch (mutex_trylock_recursive(&dev->struct_mutex)) {
+ case MUTEX_TRYLOCK_FAILED:
return false;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
+
+ case MUTEX_TRYLOCK_RECURSIVE:
+ *unlock = false;
+ return true;
+ }
+
+ BUG();
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -66,8 +71,11 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- /* Only shmemfs objects are backed by swap */
- if (!obj->base.filp)
+ if (!obj->mm.pages)
+ return false;
+
+ /* Consider only shrinkable ojects. */
+ if (!i915_gem_object_is_shrinkable(obj))
return false;
/* Only report true if by unbinding the object and putting its pages
@@ -78,7 +86,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (obj->pages_pin_count > obj->bind_count)
+ if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
if (any_vma_pinned(obj))
@@ -88,7 +96,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
- return swap_available() || obj->madv == I915_MADV_DONTNEED;
+ return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
+}
+
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ return !READ_ONCE(obj->mm.pages);
}
/**
@@ -128,6 +143,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
+ return 0;
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@@ -171,40 +190,51 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- global_list))) {
- list_move_tail(&obj->global_list, &still_in_list);
+ global_link))) {
+ list_move_tail(&obj->global_link, &still_in_list);
+ if (!obj->mm.pages) {
+ list_del_init(&obj->global_link);
+ continue;
+ }
if (flags & I915_SHRINK_PURGEABLE &&
- obj->madv != I915_MADV_DONTNEED)
+ obj->mm.madv != I915_MADV_DONTNEED)
continue;
if (flags & I915_SHRINK_VMAPS &&
- !is_vmalloc_addr(obj->mapping))
+ !is_vmalloc_addr(obj->mm.mapping))
continue;
- if ((flags & I915_SHRINK_ACTIVE) == 0 &&
- i915_gem_object_is_active(obj))
+ if (!(flags & I915_SHRINK_ACTIVE) &&
+ (i915_gem_object_is_active(obj) ||
+ obj->framebuffer_references))
continue;
if (!can_release_pages(obj))
continue;
- i915_gem_object_get(obj);
-
- /* For the unbound phase, this should be a no-op! */
- i915_gem_object_unbind(obj);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- i915_gem_object_put(obj);
+ if (unsafe_drop_pages(obj)) {
+ /* May arrive from get_pages on another bo */
+ mutex_lock_nested(&obj->mm.lock,
+ I915_MM_SHRINKER);
+ if (!obj->mm.pages) {
+ __i915_gem_object_invalidate(obj);
+ list_del_init(&obj->global_link);
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+ mutex_unlock(&obj->mm.lock);
+ }
}
- list_splice(&still_in_list, phase->list);
+ list_splice_tail(&still_in_list, phase->list);
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
+ if (unlock)
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
@@ -238,19 +268,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
return freed;
}
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
-
- *unlock = false;
- } else
- *unlock = true;
-
- return true;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -267,11 +284,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_retire_requests(dev_priv);
count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -372,13 +389,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 59989e8ee5dc..abc78bbfc1dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
*/
- if (start < 4096 && (IS_GEN8(dev_priv) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
+ if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
@@ -89,9 +88,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
@@ -109,13 +107,13 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*
*/
base = 0;
- if (INTEL_INFO(dev)->gen >= 3) {
+ if (INTEL_GEN(dev_priv) >= 3) {
u32 bsm;
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
base = bsm & INTEL_BSM_MASK;
- } else if (IS_I865G(dev)) {
+ } else if (IS_I865G(dev_priv)) {
u32 tseg_size = 0;
u16 toud = 0;
u8 tmp;
@@ -138,7 +136,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I865_TOUD, &toud);
base = (toud << 16) + tseg_size;
- } else if (IS_I85X(dev)) {
+ } else if (IS_I85X(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -154,7 +152,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_845G(dev)) {
+ } else if (IS_845G(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -178,7 +176,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I830(dev)) {
+ } else if (IS_I830(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -204,7 +202,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
+ !IS_G4X(dev_priv)) {
struct {
u32 start, end;
} stolen[2] = {
@@ -214,7 +213,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u64 ggtt_start, ggtt_end;
ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -252,7 +251,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -263,14 +262,14 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
*/
- r = devm_request_mem_region(dev->dev, base + 1,
+ r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
ggtt->stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN3(dev)) {
+ if (r == NULL && !IS_GEN3(dev_priv)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)ggtt->stolen_size);
base = 0;
@@ -407,9 +406,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-int i915_gem_init_stolen(struct drm_device *dev)
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
@@ -417,7 +415,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+ if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
@@ -426,7 +424,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (ggtt->stolen_size == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
if (dev_priv->mm.stolen_base == 0)
return 0;
@@ -437,7 +435,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
case 3:
break;
case 4:
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
g4x_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@@ -456,7 +454,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
break;
default:
if (IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else
@@ -514,12 +512,10 @@ i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st;
struct scatterlist *sg;
- DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > ggtt->stolen_size - size);
+ GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -528,11 +524,11 @@ i915_pages_create_for_stolen(struct drm_device *dev,
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -545,31 +541,36 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st;
}
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- BUG();
- return -EINVAL;
+ return i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
}
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- /* Should only be called during free */
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ /* Should only be called from i915_gem_object_release_stolen() */
+ sg_free_table(pages);
+ kfree(pages);
}
-
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
- if (obj->stolen) {
- i915_gem_stolen_remove_node(dev_priv, obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
+ GEM_BUG_ON(!stolen);
+
+ __i915_gem_object_unpin_pages(obj);
+
+ i915_gem_stolen_remove_node(dev_priv, stolen);
+ kfree(stolen);
}
+
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
@@ -589,19 +590,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
- obj->pages = i915_pages_create_for_stolen(dev,
- stolen->start, stolen->size);
- if (obj->pages == NULL)
- goto cleanup;
-
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
-
- i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
-
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->cache_level = HAS_LLC(to_i915(dev)) ?
+ I915_CACHE_LLC : I915_CACHE_NONE;
+
+ if (i915_gem_object_pin_pages(obj))
+ goto cleanup;
return obj;
@@ -621,7 +616,6 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
@@ -697,10 +691,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err;
+ goto err_pages;
}
/* To simplify the initialisation sequence between KMS and GTT,
@@ -714,20 +712,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- goto err;
+ goto err_pages;
}
- vma->pages = obj->pages;
+ vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++;
- list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- i915_gem_object_pin_pages(obj);
-
return obj;
+err_pages:
+ i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a14b1e3d4c78..c85e7b06bdba 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -60,7 +60,8 @@
/* Check pitch constriants for all chips & tiling formats */
static bool
-i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_private *dev_priv,
+ int stride, int size, int tiling_mode)
{
int tile_width;
@@ -71,8 +72,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode > I915_TILING_LAST)
return false;
- if (IS_GEN2(dev) ||
- (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ if (IS_GEN2(dev_priv) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
tile_width = 128;
else
tile_width = 512;
@@ -80,17 +81,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
- if (IS_GEN3(dev)) {
+ if (IS_GEN3(dev_priv)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
@@ -103,7 +104,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return false;
/* 965+ just needs multiples of tile width */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -198,14 +199,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- if (!i915_tiling_ok(dev,
+ if (!i915_tiling_ok(dev_priv,
args->stride, obj->base.size, args->tiling_mode)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY;
@@ -260,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!err) {
struct i915_vma *vma;
- if (obj->pages &&
- obj->madv == I915_MADV_WILLNEED &&
+ mutex_lock(&obj->mm.lock);
+ if (obj->mm.pages &&
+ obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (args->tiling_mode == I915_TILING_NONE)
- i915_gem_object_unpin_pages(obj);
- if (!i915_gem_object_is_tiled(obj))
- i915_gem_object_pin_pages(obj);
+ if (args->tiling_mode == I915_TILING_NONE) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (!i915_gem_object_is_tiled(obj)) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
+ mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!vma->fence)
@@ -301,8 +308,6 @@ err:
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
return err;
}
@@ -326,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (obj) {
+ args->tiling_mode =
+ READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+ err = 0;
+ }
+ rcu_read_unlock();
+ if (unlikely(err))
+ return err;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
switch (args->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -339,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
+ default:
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
- default:
- DRM_ERROR("unknown tiling mode\n");
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
@@ -356,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- i915_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
new file mode 100644
index 000000000000..bf8a471b61e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static int __i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name,
+ struct lock_class_key *lockclass,
+ const char *lockname)
+{
+ unsigned int i;
+ u64 fences;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ timeline->i915 = i915;
+ timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
+ if (!timeline->name)
+ return -ENOMEM;
+
+ list_add(&timeline->link, &i915->gt.timelines);
+
+ /* Called during early_init before we know how many engines there are */
+ fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
+
+ tl->fence_context = fences++;
+ tl->common = timeline;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
+#else
+ spin_lock_init(&tl->lock);
+#endif
+ init_request_active(&tl->last_request, NULL);
+ INIT_LIST_HEAD(&tl->requests);
+ }
+
+ return 0;
+}
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915, timeline, name,
+ &class, "&timeline->lock");
+}
+
+int i915_gem_timeline_init__global(struct drm_i915_private *i915)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915,
+ &i915->gt.global_timeline,
+ "[execution]",
+ &class, "&global_timeline->lock");
+}
+
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+{
+ lockdep_assert_held(&tl->i915->drm.struct_mutex);
+
+ list_del(&tl->link);
+ kfree(tl->name);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
new file mode 100644
index 000000000000..98d99a62b4ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_TIMELINE_H
+#define I915_GEM_TIMELINE_H
+
+#include <linux/list.h>
+
+#include "i915_gem_request.h"
+
+struct i915_gem_timeline;
+
+struct intel_timeline {
+ u64 fence_context;
+ u32 last_submitted_seqno;
+
+ spinlock_t lock;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_gem_active_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_gem_active last_request;
+ u32 sync_seqno[I915_NUM_ENGINES];
+
+ struct i915_gem_timeline *common;
+};
+
+struct i915_gem_timeline {
+ struct list_head link;
+ atomic_t next_seqno;
+
+ struct drm_i915_private *i915;
+ const char *name;
+
+ struct intel_timeline engine[I915_NUM_ENGINES];
+};
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *tl,
+ const char *name);
+int i915_gem_timeline_init__global(struct drm_i915_private *i915);
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index c6f780f5abc9..d068af2ec3a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -61,33 +61,26 @@ struct i915_mmu_object {
bool attached;
};
-static void wait_rendering(struct drm_i915_gem_object *obj)
-{
- unsigned long active = __I915_BO_ACTIVE(obj);
- int idx;
-
- for_each_active(active, idx)
- i915_gem_active_wait_unlocked(&obj->last_read[idx],
- 0, NULL, NULL);
-}
-
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
struct drm_device *dev = obj->base.dev;
- wait_rendering(obj);
+ i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
- if (obj->pages != NULL) {
- /* We are inside a kthread context and can't be interrupted */
- WARN_ON(i915_gem_object_unbind(obj));
- WARN_ON(i915_gem_object_put_pages(obj));
- }
+ /* We are inside a kthread context and can't be interrupted */
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ WARN_ONCE(obj->mm.pages,
+ "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+ obj->bind_count,
+ atomic_read(&obj->mm.pages_pin_count),
+ obj->pin_display);
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
@@ -436,24 +429,25 @@ err:
return ret;
}
-static int
+static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
{
+ struct sg_table *pages;
int ret;
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = st_set_pages(&pages, pvec, num_pages);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- ret = i915_gem_gtt_prepare_object(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, pages);
if (ret) {
- sg_free_table(obj->pages);
- kfree(obj->pages);
- obj->pages = NULL;
+ sg_free_table(pages);
+ kfree(pages);
+ return ERR_PTR(ret);
}
- return ret;
+ return pages;
}
static int
@@ -497,7 +491,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- struct drm_device *dev = obj->base.dev;
const int npages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
int pinned, ret;
@@ -522,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
- pvec + pinned, NULL);
+ pvec + pinned, NULL, NULL);
if (ret < 0)
break;
@@ -533,33 +526,32 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
}
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&obj->mm.lock);
if (obj->userptr.work == &work->work) {
+ struct sg_table *pages = ERR_PTR(ret);
+
if (pinned == npages) {
- ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
- if (ret == 0) {
- list_add_tail(&obj->global_list,
- &to_i915(dev)->mm.unbound_list);
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+ if (!IS_ERR(pages)) {
+ __i915_gem_object_set_pages(obj, pages);
pinned = 0;
+ pages = NULL;
}
}
- obj->userptr.work = ERR_PTR(ret);
- }
- obj->userptr.workers--;
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
+ obj->userptr.work = ERR_CAST(pages);
+ }
+ mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
+ i915_gem_object_put(obj);
put_task_struct(work->task);
kfree(work);
}
-static int
+static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active)
{
@@ -584,15 +576,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
* that error back to this function through
* obj->userptr.work = ERR_PTR.
*/
- if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
- return -EAGAIN;
-
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
obj->userptr.work = &work->work;
- obj->userptr.workers++;
work->obj = i915_gem_object_get(obj);
@@ -603,14 +591,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
schedule_work(&work->work);
*active = true;
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
-static int
+static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
+ struct sg_table *pages;
int pinned, ret;
bool active;
@@ -634,15 +623,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return PTR_ERR(obj->userptr.work);
+ return ERR_CAST(obj->userptr.work);
else
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
pvec = NULL;
pinned = 0;
@@ -651,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_TEMPORARY);
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -660,21 +649,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
active = false;
if (pinned < 0)
- ret = pinned, pinned = 0;
+ pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages)
- ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+ pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else
- ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
- if (ret) {
+ pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+ if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0);
}
drm_free_large(pvec);
- return ret;
+ return pages;
}
static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -682,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
- if (obj->madv != I915_MADV_WILLNEED)
- obj->dirty = 0;
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ obj->mm.dirty = false;
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
@@ -717,7 +707,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
@@ -762,12 +753,13 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
int
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+ if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -816,7 +808,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15df7c8d..ae84aa4b1467 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,8 @@
*/
#include <generated/utsrelease.h>
+#include <linux/stop_machine.h>
+#include <linux/zlib.h>
#include "i915_drv.h"
static const char *engine_str(int engine)
@@ -172,6 +174,110 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
+#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ memset(zstream, 0, sizeof(*zstream));
+
+ zstream->workspace =
+ kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!zstream->workspace)
+ return false;
+
+ if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+ kfree(zstream->workspace);
+ return false;
+ }
+
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ zstream->next_in = src;
+ zstream->avail_in = PAGE_SIZE;
+
+ do {
+ if (zstream->avail_out == 0) {
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] = (void *)page;
+
+ zstream->next_out = (void *)page;
+ zstream->avail_out = PAGE_SIZE;
+ }
+
+ if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+ return -EIO;
+ } while (zstream->avail_in);
+
+ /* Fallback to uncompressed if we increase size? */
+ if (0 && zstream->total_out > zstream->total_in)
+ return -E2BIG;
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+ if (dst) {
+ zlib_deflate(zstream, Z_FINISH);
+ dst->unused = zstream->avail_out;
+ }
+
+ zlib_deflateEnd(zstream);
+ kfree(zstream->workspace);
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, ":");
+}
+
+#else
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] =
+ memcpy((void *)page, src, PAGE_SIZE);
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, "~");
+}
+
+#endif
+
static void print_error_buffers(struct drm_i915_error_state_buf *m,
const char *name,
struct drm_i915_error_buffer *err,
@@ -228,13 +334,57 @@ static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
return "unknown";
}
+static void error_print_instdone(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_engine *ee)
+{
+ int slice;
+ int subslice;
+
+ err_printf(m, " INSTDONE: 0x%08x\n",
+ ee->instdone.instdone);
+
+ if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+ return;
+
+ err_printf(m, " SC_INSTDONE: 0x%08x\n",
+ ee->instdone.slice_common);
+
+ if (INTEL_GEN(m->i915) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+}
+
+static void error_print_request(struct drm_i915_error_state_buf *m,
+ const char *prefix,
+ struct drm_i915_error_request *erq)
+{
+ if (!erq->seqno)
+ return;
+
+ err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ prefix, erq->pid,
+ erq->context, erq->seqno,
+ jiffies_to_msecs(jiffies - erq->jiffies),
+ erq->head, erq->tail);
+}
+
static void error_print_engine(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
- err_printf(m, " HEAD: 0x%08x\n", ee->head);
- err_printf(m, " TAIL: 0x%08x\n", ee->tail);
+ err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
+ err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
+ ee->tail, ee->rq_post, ee->rq_tail);
err_printf(m, " CTL: 0x%08x\n", ee->ctl);
err_printf(m, " MODE: 0x%08x\n", ee->mode);
err_printf(m, " HWS: 0x%08x\n", ee->hws);
@@ -242,7 +392,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
- err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
+
+ error_print_instdone(m, ee);
+
if (ee->batchbuffer) {
u64 start = ee->batchbuffer->gtt_offset;
u64 end = start + ee->batchbuffer->gtt_size;
@@ -263,17 +415,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
- err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[0],
- ee->semaphore_seqno[0]);
- err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[1],
- ee->semaphore_seqno[1]);
- if (HAS_VEBOX(m->i915)) {
- err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[2],
- ee->semaphore_seqno[2]);
- }
+ err_printf(m, " SYNC_0: 0x%08x\n",
+ ee->semaphore_mboxes[0]);
+ err_printf(m, " SYNC_1: 0x%08x\n",
+ ee->semaphore_mboxes[1]);
+ if (HAS_VEBOX(m->i915))
+ err_printf(m, " SYNC_2: 0x%08x\n",
+ ee->semaphore_mboxes[2]);
}
if (USES_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -296,6 +444,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(ee->hangcheck_action),
ee->hangcheck_score);
+ error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
+ error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -307,40 +457,83 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
+static int
+ascii85_encode_len(int len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static bool
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return false;
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return true;
+}
+
static void print_error_obj(struct drm_i915_error_state_buf *m,
+ struct intel_engine_cs *engine,
+ const char *name,
struct drm_i915_error_object *obj)
{
- int page, offset, elt;
+ char out[6];
+ int page;
+
+ if (!obj)
+ return;
- for (page = offset = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
+ if (name) {
+ err_printf(m, "%s --- %s = 0x%08x %08x\n",
+ engine ? engine->name : "global", name,
+ upper_32_bits(obj->gtt_offset),
+ lower_32_bits(obj->gtt_offset));
+ }
+
+ err_compression_marker(m);
+ for (page = 0; page < obj->page_count; page++) {
+ int i, len;
+
+ len = PAGE_SIZE;
+ if (page == obj->page_count - 1)
+ len -= obj->unused;
+ len = ascii85_encode_len(len);
+
+ for (i = 0; i < len; i++) {
+ if (ascii85_encode(obj->pages[page][i], out))
+ err_puts(m, out);
+ else
+ err_puts(m, "z");
}
}
+ err_puts(m, "\n");
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
- struct drm_device *dev = error_priv->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
- int i, j, offset, elt;
int max_hangcheck_score;
+ int i, j;
if (!error) {
err_printf(m, "no error state collected\n");
@@ -348,9 +541,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "Time: %ld s %ld us\n",
+ error->time.tv_sec, error->time.tv_usec);
+ err_printf(m, "Boottime: %ld s %ld us\n",
+ error->boottime.tv_sec, error->boottime.tv_usec);
+ err_printf(m, "Uptime: %ld s %ld us\n",
+ error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
max_hangcheck_score = 0;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -375,7 +572,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev)) {
+ if (HAS_CSR(dev_priv)) {
struct intel_csr *csr = &dev_priv->csr;
err_printf(m, "DMC loaded: %s\n",
@@ -387,11 +584,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
- } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+ } else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
@@ -402,21 +599,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
- err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
- error->extra_instdone[i]);
-
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -438,7 +631,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j].name);
+ dev_priv->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -456,7 +649,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i].name);
+ err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d])",
ee->comm,
@@ -464,37 +657,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
-
- obj = ee->wa_batchbuffer;
- if (obj) {
- err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
+ print_error_obj(m, dev_priv->engine[i], NULL, obj);
}
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_requests);
- for (j = 0; j < ee->num_requests; j++) {
- err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
- ee->requests[j].pid,
- ee->requests[j].seqno,
- ee->requests[j].jiffies,
- ee->requests[j].head,
- ee->requests[j].tail);
- }
+ for (j = 0; j < ee->num_requests; j++)
+ error_print_request(m, " ", &ee->requests[j]);
}
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i].name);
+ dev_priv->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -504,83 +683,31 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- if ((obj = ee->ringbuffer)) {
- err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "ringbuffer", ee->ringbuffer);
- if ((obj = ee->hws_page)) {
- u64 hws_offset = obj->gtt_offset;
- u32 *hws_page = &obj->pages[0][0];
+ print_error_obj(m, dev_priv->engine[i],
+ "HW Status", ee->hws_page);
- if (i915.enable_execlists) {
- hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
- hws_page = &obj->pages[LRC_PPHWSP_PN][0];
- }
- err_printf(m, "%s --- HW Status = 0x%08llx\n",
- dev_priv->engine[i].name, hws_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- hws_page[elt],
- hws_page[elt+1],
- hws_page[elt+2],
- hws_page[elt+3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "HW context", ee->ctx);
- obj = ee->wa_ctx;
- if (obj) {
- u64 wa_ctx_offset = obj->gtt_offset;
- u32 *wa_ctx_page = &obj->pages[0][0];
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
- u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
- engine->wa_ctx.per_ctx.size);
-
- err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
- dev_priv->engine[i].name, wa_ctx_offset);
- offset = 0;
- for (elt = 0; elt < wa_ctx_size; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- wa_ctx_page[elt + 0],
- wa_ctx_page[elt + 1],
- wa_ctx_page[elt + 2],
- wa_ctx_page[elt + 3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA context", ee->wa_ctx);
- if ((obj = ee->ctx)) {
- err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA batchbuffer", ee->wa_batchbuffer);
}
- if ((obj = error->semaphore)) {
- err_printf(m, "Semaphore page = 0x%08x\n",
- lower_32_bits(obj->gtt_offset));
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- elt * 4,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- }
- }
+ print_error_obj(m, NULL, "Semaphores", error->semaphore);
+
+ print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
if (error->display)
- intel_display_print_error_state(m, dev, error->display);
+ intel_display_print_error_state(m, dev_priv, error->display);
out:
if (m->bytes == 0 && m->err)
@@ -629,7 +756,7 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
return;
for (page = 0; page < obj->page_count; page++)
- kfree(obj->pages[page]);
+ free_page((unsigned long)obj->pages[page]);
kfree(obj);
}
@@ -656,6 +783,7 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore);
+ i915_error_object_free(error->guc_log);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
@@ -667,104 +795,63 @@ static void i915_error_state_free(struct kref *error_ref)
}
static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *dev_priv,
+i915_error_object_create(struct drm_i915_private *i915,
struct i915_vma *vma)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *src;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- int num_pages;
- bool use_ggtt;
- int i = 0;
- u64 reloc_offset;
+ struct z_stream_s zstream;
+ unsigned long num_pages;
+ struct sgt_iter iter;
+ dma_addr_t dma;
if (!vma)
return NULL;
- src = vma->obj;
- if (!src->pages)
- return NULL;
-
- num_pages = src->base.size >> PAGE_SHIFT;
-
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
+ num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!dst)
return NULL;
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->page_count = 0;
+ dst->unused = 0;
- reloc_offset = dst->gtt_offset;
- use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- (vma->flags & I915_VMA_GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
-
- /* Cannot access stolen address directly, try to use the aperture */
- if (src->stolen) {
- use_ggtt = true;
-
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
- goto unwind;
-
- reloc_offset = vma->node.start;
- if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
- goto unwind;
+ if (!compress_init(&zstream)) {
+ kfree(dst);
+ return NULL;
}
- /* Cannot access snooped pages through the aperture */
- if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
- !HAS_LLC(dev_priv))
- goto unwind;
-
- dst->page_count = num_pages;
- while (num_pages--) {
- unsigned long flags;
- void *d;
-
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (d == NULL)
- goto unwind;
-
- local_irq_save(flags);
- if (use_ggtt) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(&ggtt->mappable,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else {
- struct page *page;
- void *s;
-
- page = i915_gem_object_get_page(src, i);
-
- drm_clflush_pages(&page, 1);
+ for_each_sgt_dma(dma, iter, vma->pages) {
+ void __iomem *s;
+ int ret;
- s = kmap_atomic(page);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
+ ggtt->base.insert_page(&ggtt->base, dma, slot,
+ I915_CACHE_NONE, 0);
- drm_clflush_pages(&page, 1);
- }
- local_irq_restore(flags);
+ s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ ret = compress_page(&zstream, (void __force *)s, dst);
+ io_mapping_unmap_atomic(s);
- dst->pages[i++] = d;
- reloc_offset += PAGE_SIZE;
+ if (ret)
+ goto unwind;
}
-
- return dst;
+ goto out;
unwind:
- while (i--)
- kfree(dst->pages[i]);
+ while (dst->page_count--)
+ free_page((unsigned long)dst->pages[dst->page_count]);
kfree(dst);
- return NULL;
+ dst = NULL;
+
+out:
+ compress_fini(&zstream, dst);
+ ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ return dst;
}
/* The error capture is special as tries to run underneath the normal
@@ -773,16 +860,19 @@ unwind:
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+ struct drm_i915_gem_request *request;
+
+ request = __i915_gem_active_peek(active);
+ return request ? request->global_seqno : 0;
}
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
- engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
- return engine ? engine->id : -1;
+ request = __i915_gem_active_peek(active);
+ return request ? request->engine->id : -1;
}
static void capture_bo(struct drm_i915_error_buffer *err,
@@ -795,17 +885,17 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
- err->wseqno = __active_get_seqno(&obj->last_write);
- err->engine = __active_get_engine_id(&obj->last_write);
+ err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
+ err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
+ err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->dirty = obj->mm.dirty;
+ err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level;
}
@@ -855,7 +945,8 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
if (engine_id)
*engine_id = i;
- return error->engine[i].ipehr ^ error->engine[i].instdone;
+ return error->engine[i].ipehr ^
+ error->engine[i].instdone.instdone;
}
}
@@ -879,6 +970,26 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
}
}
+static inline u32
+gen8_engine_sync_index(struct intel_engine_cs *engine,
+ struct intel_engine_cs *other)
+{
+ int idx;
+
+ /*
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
+ */
+
+ idx = (other - engine) - 1;
+ if (idx < 0)
+ idx += I915_NUM_ENGINES;
+
+ return idx;
+}
static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
@@ -891,7 +1002,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
if (!error->semaphore)
return;
- for_each_engine_id(to, dev_priv, id) {
+ for_each_engine(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
@@ -902,10 +1013,9 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
tmp = error->semaphore->pages[0];
- idx = intel_engine_sync_index(engine, to);
+ idx = gen8_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
- ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
@@ -916,14 +1026,9 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
- ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
-
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_VEBOX(dev_priv))
ee->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
- ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
- }
}
static void error_record_engine_waiters(struct intel_engine_cs *engine,
@@ -940,7 +1045,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (RB_EMPTY_ROOT(&b->waiters))
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
@@ -948,7 +1053,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
waiter = NULL;
if (count)
@@ -958,7 +1063,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
@@ -976,7 +1081,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (++ee->num_waiters == count)
break;
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static void error_record_engine_registers(struct drm_i915_error_state *error,
@@ -998,7 +1103,6 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) {
@@ -1010,14 +1114,15 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(DMA_FADD_I8XX);
ee->ipeir = I915_READ(IPEIR);
ee->ipehr = I915_READ(IPEHR);
- ee->instdone = I915_READ(GEN2_INSTDONE);
}
+ intel_engine_get_instdone(engine, &ee->instdone);
+
ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = engine->last_submitted_seqno;
+ ee->last_seqno = intel_engine_last_submit(engine);
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
@@ -1079,6 +1184,20 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
}
}
+static void record_request(struct drm_i915_gem_request *request,
+ struct drm_i915_error_request *erq)
+{
+ erq->context = request->ctx->hw_id;
+ erq->seqno = request->global_seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->head = request->head;
+ erq->tail = request->tail;
+
+ rcu_read_lock();
+ erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ rcu_read_unlock();
+}
+
static void engine_record_requests(struct intel_engine_cs *engine,
struct drm_i915_gem_request *first,
struct drm_i915_error_engine *ee)
@@ -1088,7 +1207,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link)
+ list_for_each_entry_from(request, &engine->timeline->requests, link)
count++;
if (!count)
return;
@@ -1101,9 +1220,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link) {
- struct drm_i915_error_request *erq;
-
+ list_for_each_entry_from(request, &engine->timeline->requests, link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1123,19 +1240,22 @@ static void engine_record_requests(struct intel_engine_cs *engine,
break;
}
- erq = &ee->requests[count++];
- erq->seqno = request->fence.seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->head = request->head;
- erq->tail = request->tail;
-
- rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
- rcu_read_unlock();
+ record_request(request, &ee->requests[count++]);
}
ee->num_requests = count;
}
+static void error_record_engine_execlists(struct intel_engine_cs *engine,
+ struct drm_i915_error_engine *ee)
+{
+ unsigned int n;
+
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+ if (engine->execlist_port[n].request)
+ record_request(engine->execlist_port[n].request,
+ &ee->execlist[n]);
+}
+
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
@@ -1146,20 +1266,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
ee->pid = -1;
ee->engine_id = -1;
- if (!intel_engine_initialized(engine))
+ if (!engine)
continue;
ee->engine_id = i;
error_record_engine_registers(error, engine, ee);
error_record_engine_waiters(engine, ee);
+ error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine);
if (request) {
@@ -1202,6 +1323,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
error->simulated |=
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
+ ee->rq_head = request->head;
+ ee->rq_post = request->postfix;
+ ee->rq_tail = request->tail;
+
ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
@@ -1302,11 +1427,21 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
+static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ /* Capturing log buf contents won't be useful if logging was disabled */
+ if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
+ return;
+
+ error->guc_log = i915_error_object_create(dev_priv,
+ dev_priv->guc.log.vma);
+}
+
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* General organization
@@ -1318,62 +1453,60 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv)) {
error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
error->err_int = I915_READ(GEN7_ERR_INT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
error->forcewake = I915_READ_FW(FORCEWAKE_MT);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
error->derrmr = I915_READ(DERRMR);
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
}
/* 3: Feature specific registers */
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
/* 4: Everything else */
- if (HAS_HW_CONTEXTS(dev))
+ if (HAS_HW_CONTEXTS(dev_priv))
error->ccid = I915_READ(CCID);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
error->ier = I915_READ16(IER);
- } else if (!IS_VALLEYVIEW(dev)) {
+ } else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
-
- i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
@@ -1418,6 +1551,32 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
sizeof(error->device_info));
}
+static int capture(void *data)
+{
+ struct drm_i915_error_state *error = data;
+
+ i915_capture_gen_state(error->i915, error);
+ i915_capture_reg_state(error->i915, error);
+ i915_gem_record_fences(error->i915, error);
+ i915_gem_record_rings(error->i915, error);
+ i915_capture_active_buffers(error->i915, error);
+ i915_capture_pinned_buffers(error->i915, error);
+ i915_gem_capture_guc_log_buffer(error->i915, error);
+
+ do_gettimeofday(&error->time);
+ error->boottime = ktime_to_timeval(ktime_get_boottime());
+ error->uptime =
+ ktime_to_timeval(ktime_sub(ktime_get(),
+ error->i915->gt.last_init_time));
+
+ error->overlay = intel_overlay_capture_error_state(error->i915);
+ error->display = intel_display_capture_error_state(error->i915);
+
+ return 0;
+}
+
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -1435,6 +1594,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error;
unsigned long flags;
+ if (!i915.error_capture)
+ return;
+
if (READ_ONCE(dev_priv->gpu_error.first_error))
return;
@@ -1446,18 +1608,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
kref_init(&error->ref);
+ error->i915 = dev_priv;
- i915_capture_gen_state(dev_priv, error);
- i915_capture_reg_state(dev_priv, error);
- i915_gem_record_fences(dev_priv, error);
- i915_gem_record_rings(dev_priv, error);
- i915_capture_active_buffers(dev_priv, error);
- i915_capture_pinned_buffers(dev_priv, error);
-
- do_gettimeofday(&error->time);
-
- error->overlay = intel_overlay_capture_error_state(dev_priv);
- error->display = intel_display_capture_error_state(dev_priv);
+ stop_machine(capture, error, NULL);
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@@ -1476,7 +1629,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
return;
}
- if (!warned) {
+ if (!warned &&
+ ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
@@ -1497,7 +1651,6 @@ void i915_error_state_get(struct drm_device *dev,
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irq(&dev_priv->gpu_error.lock);
-
}
void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
@@ -1519,33 +1672,3 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
kref_put(&error->ref, i915_error_state_free);
}
-
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
- case I915_CACHE_L3_LLC: return " L3+LLC";
- case I915_CACHE_WT: return " WT";
- default: return "";
- }
-}
-
-/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
- uint32_t *instdone)
-{
- memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
- if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
- instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN7_SC_INSTDONE);
- instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 3106dcc06fe9..4462112725ef 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -23,6 +23,8 @@
*/
#include <linux/firmware.h>
#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
#include "i915_drv.h"
#include "intel_guc.h"
@@ -85,6 +87,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
if (WARN_ON(len < 1 || len > 15))
return -EINVAL;
+ mutex_lock(&guc->action_lock);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dev_priv->guc.action_count += 1;
@@ -123,6 +126,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
dev_priv->guc.action_status = status;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&guc->action_lock);
return ret;
}
@@ -170,6 +174,35 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
+{
+ u32 data[1];
+
+ data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
+
+ return host2guc_action(guc, data, 1);
+}
+
+static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
+ data[1] = 0;
+
+ return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
+ data[1] = control_val;
+
+ return host2guc_action(guc, data, 2);
+}
+
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
@@ -187,7 +220,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
struct guc_context_desc desc;
size_t len;
- doorbell = client->client_base + client->doorbell_offset;
+ doorbell = client->vaddr + client->doorbell_offset;
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
test_bit(client->doorbell_id, doorbell_bitmap)) {
@@ -293,7 +326,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
{
struct guc_process_desc *desc;
- desc = client->client_base + client->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@@ -380,8 +413,8 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
gfx_addr = i915_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc.db_trigger_cpu = (uintptr_t)client->client_base +
- client->doorbell_offset;
+ desc.db_trigger_cpu =
+ (uintptr_t)client->vaddr + client->doorbell_offset;
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
desc.process_desc = gfx_addr + client->proc_desc_offset;
desc.wq_addr = gfx_addr + client->wq_offset;
@@ -432,7 +465,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
+ struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
u32 freespace;
int ret;
@@ -473,10 +506,9 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
struct intel_engine_cs *engine = rq->engine;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
- void *base;
- u32 freespace, tail, wq_off, wq_page;
+ u32 freespace, tail, wq_off;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
@@ -506,10 +538,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
gc->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
- wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
- wq_off &= PAGE_SIZE - 1;
- base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
- wqi = (struct guc_wq_item *)((char *)base + wq_off);
+ wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
@@ -521,9 +550,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = rq->fence.seqno;
-
- kunmap_atomic(base);
+ wqi->fence_id = rq->global_seqno;
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -533,7 +560,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@@ -549,7 +576,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = gc->client_base + gc->doorbell_offset;
+ db = gc->vaddr + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -601,13 +628,31 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
*/
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
- unsigned int engine_id = rq->engine->id;
+ struct drm_i915_private *dev_priv = rq->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ rq->previous_context = engine->last_context;
+ engine->last_context = rq->ctx;
+
+ i915_gem_request_submit(rq);
+
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
+
+ /* WA to flush out the pending GMADR writes to ring buffer. */
+ if (i915_vma_is_map_and_fenceable(rq->ring->vma))
+ POSTING_READ_FW(GUC_STATUS);
+
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@@ -616,7 +661,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
client->b_fail += 1;
guc->submissions[engine_id] += 1;
- guc->last_seqno[engine_id] = rq->fence.seqno;
+ guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock(&client->wq_lock);
}
@@ -685,14 +730,14 @@ guc_client_free(struct drm_i915_private *dev_priv,
* Be sure to drop any locks
*/
- if (client->client_base) {
+ if (client->vaddr) {
/*
* If we got as far as setting up a doorbell, make sure we
* shut it down before unmapping & deallocating the memory.
*/
guc_disable_doorbell(guc, client);
- kunmap(kmap_to_page(client->client_base));
+ i915_gem_object_unpin_map(client->vma->obj);
}
i915_vma_unpin_and_release(&client->vma);
@@ -781,6 +826,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct i915_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ void *vaddr;
uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -807,7 +853,12 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
- client->client_base = kmap(i915_vma_first_page(vma));
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ goto err;
+
+ client->vaddr = vaddr;
spin_lock_init(&client->wq_lock);
client->wq_offset = GUC_DB_SIZE;
@@ -847,15 +898,411 @@ err:
return NULL;
}
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ /* Use no-overwrite mode by default, where relay will stop accepting
+ * new data if there are no empty sub buffers left.
+ * There is no strict synchronization enforced by relay between Consumer
+ * and Producer. In overwrite mode, there is a possibility of getting
+ * inconsistent/garbled data, the producer could be writing on to the
+ * same sub buffer from which Consumer is reading. This can't be avoided
+ * unless Consumer is fast enough and can always run in tandem with
+ * Producer.
+ */
+ if (relay_buf_full(buf))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ /* This to enable the use of a single buffer for the relay channel and
+ * correspondingly have a single file exposed to User, through which
+ * it can collect the logs in order without any post-processing.
+ * Need to set 'is_global' even if parent is NULL for early logging.
+ */
+ *is_global = 1;
+
+ if (!parent)
+ return NULL;
+
+ /* Not using the channel filename passed as an argument, since for each
+ * channel relay appends the corresponding CPU number to the filename
+ * passed in relay_open(). This should be fine as relay just needs a
+ * dentry of the file associated with the channel buffer and that file's
+ * name need not be same as the filename passed as an argument.
+ */
+ buf_file = debugfs_create_file("guc_log", mode,
+ parent, buf, &relay_file_operations);
+ return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_callback,
+ .create_buf_file = create_buf_file_callback,
+ .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+ relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
+
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
+
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ return -ENOMEM;
+ }
+
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.relay_chan = guc_log_relay_chan;
+ return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct dentry *log_dir;
+ int ret;
+
+ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
+ log_dir = dev_priv->drm.primary->debugfs_root;
+
+ /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+ * not mounted and so can't create the relay file.
+ * The relay API seems to fit well with debugfs only, for availing relay
+ * there are 3 requirements which can be met for debugfs file only in a
+ * straightforward/clean manner :-
+ * i) Need the associated dentry pointer of the file, while opening the
+ * relay channel.
+ * ii) Should be able to use 'relay_file_operations' fops for the file.
+ * iii) Set the 'i_private' field of file's inode to the pointer of
+ * relay channel buffer.
+ */
+ if (!log_dir) {
+ DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+ return -ENODEV;
+ }
+
+ ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+ if (ret) {
+ DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+ /* Make sure the updates made in the sub buffer are visible when
+ * Consumer sees the following update to offset inside the sub buffer.
+ */
+ smp_wmb();
+
+ /* All data has been written, so now move the offset of sub buffer. */
+ relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+ /* Switch to the next sub buffer */
+ relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+ if (!guc->log.relay_chan)
+ return NULL;
+
+ /* Just get the base address of a new sub buffer and copy data into it
+ * ourselves. NULL will be returned in no-overwrite mode, if all sub
+ * buffers are full. Could have used the relay_write() to indirectly
+ * copy the data, but that would have been bit convoluted, as we need to
+ * write to only certain locations inside a sub buffer which cannot be
+ * done without using relay_reserve() along with relay_write(). So its
+ * better to use relay_reserve() alone.
+ */
+ return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool
+guc_check_log_buf_overflow(struct intel_guc *guc,
+ enum guc_log_buffer_type type, unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ guc->log.prev_overflow_count[type] = full_cnt;
+ guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ guc->log.total_overflow_count[type] += 16;
+ }
+ DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+ }
+
+ return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ case GUC_DPC_LOG_BUFFER:
+ return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ default:
+ MISSING_CASE(type);
+ }
+
+ return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+ struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+ struct guc_log_buffer_state log_buf_state_local;
+ enum guc_log_buffer_type type;
+ void *src_data, *dst_data;
+ bool new_overflow;
+
+ if (WARN_ON(!guc->log.buf_addr))
+ return;
+
+ /* Get the pointer to shared GuC log buffer */
+ log_buf_state = src_data = guc->log.buf_addr;
+
+ /* Get the pointer to local buffer to store the logs */
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+ /* Actual logs are present from the 2nd page */
+ src_data += PAGE_SIZE;
+ dst_data += PAGE_SIZE;
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state,
+ sizeof(struct guc_log_buffer_state));
+ buffer_size = guc_get_log_buffer_size(type);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_cnt = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+ new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+ /* Update the state of shared log buffer */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ log_buf_state++;
+
+ if (unlikely(!log_buf_snapshot_state))
+ continue;
+
+ /* First copy the state structure in snapshot buffer */
+ memcpy(log_buf_snapshot_state, &log_buf_state_local,
+ sizeof(struct guc_log_buffer_state));
+
+ /* The write pointer could have been updated by GuC firmware,
+ * after sending the flush interrupt to Host, for consistency
+ * set write pointer value to same value of sampled_write_ptr
+ * in the snapshot buffer.
+ */
+ log_buf_snapshot_state->write_ptr = write_offset;
+ log_buf_snapshot_state++;
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ DRM_ERROR("invalid log buffer state\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ /* Just copy the newly written data */
+ if (read_offset > write_offset) {
+ i915_memcpy_from_wc(dst_data, src_data, write_offset);
+ bytes_to_copy = buffer_size - read_offset;
+ } else {
+ bytes_to_copy = write_offset - read_offset;
+ }
+ i915_memcpy_from_wc(dst_data + read_offset,
+ src_data + read_offset, bytes_to_copy);
+
+ src_data += buffer_size;
+ dst_data += buffer_size;
+ }
+
+ if (log_buf_snapshot_state)
+ guc_move_to_next_buf(guc);
+ else {
+ /* Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ guc->log.capture_miss_count++;
+ }
+}
+
+static void guc_capture_logs_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, guc.log.flush_work);
+
+ i915_guc_capture_logs(dev_priv);
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* First disable the flush interrupt */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ if (guc->log.flush_wq)
+ destroy_workqueue(guc->log.flush_wq);
+
+ guc->log.flush_wq = NULL;
+
+ if (guc->log.relay_chan)
+ guc_log_remove_relay_file(guc);
+
+ guc->log.relay_chan = NULL;
+
+ if (guc->log.buf_addr)
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+
+ guc->log.buf_addr = NULL;
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ void *vaddr;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Nothing to do */
+ if (i915.guc_log_level < 0)
+ return 0;
+
+ if (!guc->log.buf_addr) {
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return ret;
+ }
+
+ guc->log.buf_addr = vaddr;
+ }
+
+ if (!guc->log.relay_chan) {
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ ret = guc_log_create_relay_channel(guc);
+ if (ret)
+ return ret;
+ }
+
+ if (!guc->log.flush_wq) {
+ INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (guc->log.flush_wq == NULL) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
static void guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
- if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
- return;
-
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
@@ -865,8 +1312,18 @@ static void guc_log_create(struct intel_guc *guc)
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
- vma = guc->log_vma;
+ vma = guc->log.vma;
if (!vma) {
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
+ /* logging will not be enabled */
+ i915.guc_log_level = -1;
+ return;
+ }
+
vma = guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
/* logging will be off */
@@ -874,7 +1331,14 @@ static void guc_log_create(struct intel_guc *guc)
return;
}
- guc->log_vma = vma;
+ guc->log.vma = vma;
+
+ if (guc_log_create_extras(guc)) {
+ guc_log_cleanup(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+ i915.guc_log_level = -1;
+ return;
+ }
}
/* each allocated unit is a page */
@@ -884,7 +1348,37 @@ static void guc_log_create(struct intel_guc *guc)
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (i915.guc_log_level < 0)
+ return -EINVAL;
+
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_create_extras(guc);
+ if (ret)
+ goto err;
+
+ ret = guc_log_create_relay_file(guc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ guc_log_cleanup(guc);
+ /* logging will remain off */
+ i915.guc_log_level = -1;
+ return ret;
}
static void guc_policies_init(struct guc_policies *policies)
@@ -917,6 +1411,7 @@ static void guc_addon_create(struct intel_guc *guc)
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct page *page;
u32 size;
@@ -944,10 +1439,10 @@ static void guc_addon_create(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.ggtt_offset;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
@@ -960,7 +1455,7 @@ static void guc_addon_create(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
@@ -1005,6 +1500,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
+ mutex_init(&guc->action_lock);
guc_log_create(guc);
guc_addon_create(guc);
@@ -1014,9 +1510,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_gem_request *request;
struct i915_guc_client *client;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ enum intel_engine_id id;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
@@ -1033,11 +1530,13 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
+ engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
- list_for_each_entry(request, &engine->request_list, link) {
+ list_for_each_entry(request,
+ &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
if (i915_sw_fence_done(&request->submit))
i915_guc_submit(request);
@@ -1066,7 +1565,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
i915_vma_unpin_and_release(&guc->ads_vma);
- i915_vma_unpin_and_release(&guc->log_vma);
+ i915_vma_unpin_and_release(&guc->log.vma);
if (guc->ctx_pool_vma)
ida_destroy(&guc->ctx_ids);
@@ -1087,6 +1586,8 @@ int intel_guc_suspend(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ gen9_disable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
@@ -1113,6 +1614,9 @@ int intel_guc_resume(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
@@ -1122,3 +1626,104 @@ int intel_guc_resume(struct drm_device *dev)
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
+{
+ guc_read_update_log_buffer(&dev_priv->guc);
+
+ /* Generally device is expected to be active only at this
+ * time, so get/put should be really quick.
+ */
+ intel_runtime_pm_get(dev_priv);
+ host2guc_logbuffer_flush_complete(&dev_priv->guc);
+ intel_runtime_pm_put(dev_priv);
+}
+
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ return;
+
+ /* First disable the interrupts, will be renabled afterwards */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ /* Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&dev_priv->guc.log.flush_work);
+
+ /* Ask GuC to update the log buffer state */
+ host2guc_force_logbuffer_flush(&dev_priv->guc);
+
+ /* GuC would have updated log buffer by now, so capture it */
+ i915_guc_capture_logs(dev_priv);
+}
+
+void i915_guc_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_cleanup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_register(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_late_setup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+ union guc_log_control log_param;
+ int ret;
+
+ log_param.value = control_val;
+
+ if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+ log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+ return -EINVAL;
+
+ /* This combination doesn't make sense & won't have any effect */
+ if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ return 0;
+
+ ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
+ return ret;
+ }
+
+ i915.guc_log_level = log_param.verbosity;
+
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled.
+ */
+ if (!dev_priv->guc.log.relay_chan) {
+ ret = guc_log_late_setup(&dev_priv->guc);
+ if (!ret)
+ gen9_enable_guc_interrupts(dev_priv);
+ } else if (!log_param.logging_enabled) {
+ /* Once logging is disabled, GuC won't generate logs & send an
+ * interrupt. But there could be some data in the log buffer
+ * which is yet to be captured. So request GuC to update the log
+ * buffer state and then collect the left over logs.
+ */
+ i915_guc_flush_logs(dev_priv);
+
+ /* As logging is disabled, update log level to reflect that */
+ i915.guc_log_level = -1;
+ } else {
+ /* In case interrupts were disabled, enable them now */
+ gen9_enable_guc_interrupts(dev_priv);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fc286cd1157..07ca71cabb2b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -170,6 +170,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
} while (0)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
@@ -303,18 +304,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- new_val = dev_priv->pm_irq_mask;
+ new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ if (new_val != dev_priv->pm_imr) {
+ dev_priv->pm_imr = new_val;
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -322,28 +323,54 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
snb_update_pm_irq(dev_priv, mask, mask);
}
-static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t mask)
+static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
- __gen6_disable_pm_irq(dev_priv, mask);
+ __gen6_mask_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(reg, dev_priv->pm_rps_events);
- I915_WRITE(reg, dev_priv->pm_rps_events);
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, reset_mask);
+ I915_WRITE(reg, reset_mask);
POSTING_READ(reg);
+}
+
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier |= enable_mask;
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ gen6_unmask_pm_irq(dev_priv, enable_mask);
+ /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
+}
+
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier &= ~disable_mask;
+ __gen6_mask_pm_irq(dev_priv, disable_mask);
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ /* though a barrier is missing here, but don't really need a one */
+}
+
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -357,8 +384,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
- dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -379,9 +404,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
- ~dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
@@ -395,6 +418,38 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts_enabled) {
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
+ dev_priv->pm_guc_events);
+ dev_priv->guc.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts_enabled = false;
+
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen9_reset_guc_interrupts(dev_priv);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -670,8 +725,8 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
@@ -776,8 +831,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -912,21 +967,22 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *vblank_time,
unsigned flags)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
- if (pipe >= INTEL_INFO(dev)->num_pipes) {
+ if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
/* Get drm_crtc to timestamp: */
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
- if (!crtc->hwmode.crtc_clock) {
+ if (!crtc->base.hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
return -EBUSY;
}
@@ -934,7 +990,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
- &crtc->hwmode);
+ &crtc->base.hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
@@ -1058,8 +1114,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
@@ -1084,7 +1141,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
client_boost = dev_priv->rps.client_boost;
dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1257,20 +1314,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[BCS]);
+ notify_ring(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1323,11 +1380,13 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
- if (master_ctl & GEN8_GT_PM_IRQ) {
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
- if (gt_iir[2] & dev_priv->pm_rps_events) {
+ if (gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events)) {
I915_WRITE_FW(GEN8_GT_IIR(2),
- gt_iir[2] & dev_priv->pm_rps_events);
+ gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events));
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
@@ -1340,25 +1399,28 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir[4])
{
if (gt_iir[0]) {
- gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gen8_cs_irq_handler(dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gen8_cs_irq_handler(dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
- gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gen8_cs_irq_handler(dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
- gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gen8_cs_irq_handler(dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+
+ if (gt_iir[2] & dev_priv->pm_guc_events)
+ gen9_guc_irq_handler(dev_priv, gt_iir[2]);
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -1585,7 +1647,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&dev_priv->rps.work);
@@ -1598,13 +1660,48 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VECS]);
+ notify_ring(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+{
+ if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
+ /* Sample the log buffer flush related bits & clear them out now
+ * itself from the message identity register to minimize the
+ * probability of losing a flush interrupt, when there are back
+ * to back flush interrupts.
+ * There can be a new flush interrupt, for different log buffer
+ * type (like for ISR), whilst Host is handling one (for DPC).
+ * Since same bit is used in message register for ISR & DPC, it
+ * could happen that GuC sets the bit for 2nd interrupt but Host
+ * clears out the bit on handling the 1st interrupt.
+ */
+ u32 msg, flush;
+
+ msg = I915_READ(SOFT_SCRATCH(15));
+ flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER);
+ if (flush) {
+ /* Clear the message bits that are handled */
+ I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
+
+ /* Handle flush interrupt in bottom half */
+ queue_work(dev_priv->guc.log.flush_wq,
+ &dev_priv->guc.log.flush_work);
+
+ dev_priv->guc.log.flush_interrupt_count++;
+ } else {
+ /* Not clearing of unhandled event bits won't result in
+ * re-triggering of the interrupt.
+ */
+ }
+ }
+}
+
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -2407,7 +2504,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
fault_errors);
}
@@ -2551,92 +2648,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
-static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
+static inline void
+i915_err_print_instdone(struct drm_i915_private *dev_priv,
+ struct intel_instdone *instdone)
{
- uint32_t instdone[I915_NUM_INSTDONE_REG];
- u32 eir = I915_READ(EIR);
- int pipe, i;
+ int slice;
+ int subslice;
+
+ pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
+ return;
+
+ pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
- if (!eir)
+ if (INTEL_GEN(dev_priv) <= 6)
return;
- pr_err("render error detected, EIR: 0x%08x\n", eir);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
- i915_get_extra_instdone(dev_priv, instdone);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
- if (IS_G4X(dev_priv)) {
- if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- if (eir & GM45_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+ u32 eir;
- if (!IS_GEN2(dev_priv)) {
- if (eir & I915_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+ if (!IS_GEN2(dev_priv))
+ I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
- if (eir & I915_ERROR_MEMORY_REFRESH) {
- pr_err("memory refresh error:\n");
- for_each_pipe(dev_priv, pipe)
- pr_err("pipe %c stat: 0x%08x\n",
- pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
- /* pipestat has already been acked */
- }
- if (eir & I915_ERROR_INSTRUCTION) {
- pr_err("instruction error\n");
- pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_GEN(dev_priv) < 4) {
- u32 ipeir = I915_READ(IPEIR);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
- I915_WRITE(IPEIR, ipeir);
- POSTING_READ(IPEIR);
- } else {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- }
+ if (INTEL_GEN(dev_priv) < 4)
+ I915_WRITE(IPEIR, I915_READ(IPEIR));
+ else
+ I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
- I915_WRITE(EIR, eir);
- POSTING_READ(EIR);
+ I915_WRITE(EIR, I915_READ(EIR));
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
@@ -2665,7 +2722,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
va_end(args);
i915_capture_error_state(dev_priv, engine_mask, error_msg);
- i915_report_and_clear_eir(dev_priv);
+ i915_clear_error_registers(dev_priv);
if (!engine_mask)
return;
@@ -2694,45 +2751,40 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (INTEL_INFO(dev)->gen >= 4)
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
- else
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2753,38 +2805,36 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS |
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2798,411 +2848,14 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
- if (INTEL_GEN(engine->i915) >= 8) {
- return (ipehr >> 23) == 0x1c;
- } else {
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
- }
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
- u64 offset)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
-
- if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv) {
- if (engine == signaller)
- continue;
-
- if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
- return signaller;
- }
- } else {
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-
- for_each_engine(signaller, dev_priv) {
- if(engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
- }
-
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
- engine->name, ipehr, offset);
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = engine->i915;
- void __iomem *vaddr;
- u32 cmd, ipehr, head;
- u64 offset = 0;
- int i, backwards;
-
- /*
- * This function does not support execlist mode - any attempt to
- * proceed further into this function will result in a kernel panic
- * when dereferencing ring->buffer, which is not set up in execlist
- * mode.
- *
- * The correct way of doing it would be to derive the currently
- * executing ring buffer from the current context, which is derived
- * from the currently running request. Unfortunately, to get the
- * current request we would have to grab the struct_mutex before doing
- * anything else, which would be ill-advised since some other thread
- * might have grabbed it already and managed to hang itself, causing
- * the hang checker to deadlock.
- *
- * Therefore, this function does not support execlist mode in its
- * current form. Just return NULL and move on.
- */
- if (engine->buffer == NULL)
- return NULL;
-
- ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine, ipehr))
- return NULL;
-
- /*
- * HEAD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX. But limit it to just 3
- * or 4 dwords depending on the semaphore wait command size.
- * Note that we don't care about ACTHD here since that might
- * point at at batch, and semaphores are always emitted into the
- * ringbuffer itself.
- */
- head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
- vaddr = (void __iomem *)engine->buffer->vaddr;
-
- for (i = backwards; i; --i) {
- /*
- * Be paranoid and presume the hw has gone off into the wild -
- * our ring is smaller than what the hardware (and hence
- * HEAD_ADDR) allows. Also handles wrap-around.
- */
- head &= engine->buffer->size - 1;
-
- /* This here seems to blow up */
- cmd = ioread32(vaddr + head);
- if (cmd == ipehr)
- break;
-
- head -= 4;
- }
-
- if (!i)
- return NULL;
-
- *seqno = ioread32(vaddr + head + 4) + 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(vaddr + head + 12);
- offset <<= 32;
- offset |= ioread32(vaddr + head + 8);
- }
- return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
- u32 seqno;
-
- engine->hangcheck.deadlock++;
-
- signaller = semaphore_waits_for(engine, &seqno);
- if (signaller == NULL)
- return -1;
-
- if (IS_ERR(signaller))
- return 0;
-
- /* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
- return -1;
-
- if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
- return 1;
-
- /* cursory check for an unkickable deadlock */
- if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
- semaphore_passed(signaller) < 0)
- return -1;
-
- return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
-
- for_each_engine(engine, dev_priv)
- engine->hangcheck.deadlock = 0;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- u32 instdone[I915_NUM_INSTDONE_REG];
- bool stuck;
- int i;
-
- if (engine->id != RCS)
- return true;
-
- i915_get_extra_instdone(engine->i915, instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = true;
- for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
- const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
-
- if (tmp != engine->hangcheck.instdone[i])
- stuck = false;
-
- engine->hangcheck.instdone[i] |= tmp;
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return HANGCHECK_ACTIVE;
- }
-
- if (!subunits_stuck(engine))
- return HANGCHECK_ACTIVE;
-
- return HANGCHECK_HUNG;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != HANGCHECK_HUNG)
- return ha;
-
- if (IS_GEN2(dev_priv))
- return HANGCHECK_HUNG;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = I915_READ_CTL(engine);
- if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, 0,
- "Kicking stuck wait on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- }
-
- if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(engine)) {
- default:
- return HANGCHECK_HUNG;
- case 1:
- i915_handle_error(dev_priv, 0,
- "Kicking stuck semaphore on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- case 0:
- return HANGCHECK_WAIT;
- }
- }
-
- return HANGCHECK_HUNG;
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void i915_hangcheck_elapsed(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
- struct intel_engine_cs *engine;
- unsigned int hung = 0, stuck = 0;
- int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
-
- if (!i915.enable_hangcheck)
- return;
-
- if (!READ_ONCE(dev_priv->gt.awake))
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- bool busy = intel_engine_has_waiter(engine);
- u64 acthd;
- u32 seqno;
- u32 submit;
-
- semaphore_clear_deadlocks(dev_priv);
-
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- acthd = intel_engine_get_active_head(engine);
- seqno = intel_engine_get_seqno(engine);
- submit = READ_ONCE(engine->last_submitted_seqno);
-
- if (engine->hangcheck.seqno == seqno) {
- if (i915_seqno_passed(seqno, submit)) {
- engine->hangcheck.action = HANGCHECK_IDLE;
- } else {
- /* We always increment the hangcheck score
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, add a small increment to the
- * score so that we can catch a batch that is
- * being repeatedly kicked and so responsible
- * for stalling the machine.
- */
- engine->hangcheck.action =
- engine_stuck(engine, acthd);
-
- switch (engine->hangcheck.action) {
- case HANGCHECK_IDLE:
- case HANGCHECK_WAIT:
- break;
- case HANGCHECK_ACTIVE:
- engine->hangcheck.score += BUSY;
- break;
- case HANGCHECK_KICK:
- engine->hangcheck.score += KICK;
- break;
- case HANGCHECK_HUNG:
- engine->hangcheck.score += HUNG;
- break;
- }
- }
-
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- hung |= intel_engine_flag(engine);
- if (engine->hangcheck.action != HANGCHECK_HUNG)
- stuck |= intel_engine_flag(engine);
- }
- } else {
- engine->hangcheck.action = HANGCHECK_ACTIVE;
-
- /* Gradually reduce the count so that we catch DoS
- * attempts across multiple batches.
- */
- if (engine->hangcheck.score > 0)
- engine->hangcheck.score -= ACTIVE_DECAY;
- if (engine->hangcheck.score < 0)
- engine->hangcheck.score = 0;
-
- /* Clear head and subunit states on seqno movement */
- acthd = 0;
-
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
- }
-
- engine->hangcheck.seqno = seqno;
- engine->hangcheck.acthd = acthd;
- busy_count += busy;
- }
-
- if (hung) {
- char msg[80];
- unsigned int tmp;
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(dev_priv, hung, msg);
- }
-
- /* Reset timer in case GPU hangs without another request being added */
- if (busy_count)
- i915_queue_hangcheck(dev_priv);
-}
-
-static void ibx_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
GEN5_IRQ_RESET(SDE);
- if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
}
@@ -3218,7 +2871,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -3226,12 +2879,10 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
GEN5_IRQ_RESET(GT);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
GEN5_IRQ_RESET(GEN6_PM);
}
@@ -3293,12 +2944,12 @@ static void ironlake_irq_reset(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
GEN5_IRQ_RESET(DE);
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -3308,7 +2959,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3343,8 +2994,8 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
- if (HAS_PCH_SPLIT(dev))
- ibx_irq_reset(dev);
+ if (HAS_PCH_SPLIT(dev_priv))
+ ibx_irq_reset(dev_priv);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3532,10 +3183,10 @@ static void ibx_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
@@ -3552,14 +3203,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev)) {
+ if (HAS_L3_DPF(dev_priv)) {
/* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
- gt_irqs |= GT_PARITY_ERROR(dev);
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
+ gt_irqs |= GT_PARITY_ERROR(dev_priv);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -3567,16 +3218,18 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev))
+ if (HAS_VEBOX(dev_priv)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
- dev_priv->pm_irq_mask = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+ dev_priv->pm_imr = 0xffffffff;
+ GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@@ -3585,7 +3238,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
@@ -3616,7 +3269,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_postinstall(dev);
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
*
* spinlocking not required here for correctness since interrupt
@@ -3696,14 +3349,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- dev_priv->pm_irq_mask = 0xffffffff;
+ dev_priv->pm_ier = 0x0;
+ dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3756,13 +3410,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -3808,7 +3462,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3971,7 +3625,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4020,7 +3674,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4054,7 +3708,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_USER_INTERRUPT;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
@@ -4168,7 +3822,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4222,7 +3876,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4400,9 +4054,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4487,6 +4141,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ if (HAS_GUC_SCHED(dev_priv))
+ dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
@@ -4508,9 +4165,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 8)
dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
- INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
-
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
@@ -4539,16 +4193,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
dev->driver->irq_uninstall = cherryview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_uninstall = valleyview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
@@ -4557,13 +4211,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
@@ -4577,21 +4231,25 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
}
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 768ad89d9cd4..d46ffe7086bc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -39,7 +39,7 @@ struct i915_params i915 __read_mostly = {
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = -1,
- .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
.fastboot = 0,
@@ -47,6 +47,7 @@ struct i915_params i915 __read_mostly = {
.load_detect_test = 0,
.force_reset_modeset_test = 0,
.reset = true,
+ .error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
@@ -115,6 +116,14 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+module_param_named(error_capture, i915.error_capture, bool, 0600);
+MODULE_PARM_DESC(error_capture,
+ "Record the GPU state following a hang. "
+ "This information in /sys/class/drm/card<N>/error is vital for "
+ "triaging and debugging hangs.");
+#endif
+
module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
@@ -136,9 +145,10 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
-MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support.");
+module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
+MODULE_PARM_DESC(alpha_support,
+ "Enable alpha quality driver support for latest hardware. "
+ "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3a0dd78ddb38..817ad959941e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -40,7 +40,7 @@ struct i915_params {
int enable_ppgtt;
int enable_execlists;
int enable_psr;
- unsigned int preliminary_hw_support;
+ unsigned int alpha_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
@@ -59,6 +59,7 @@ struct i915_params {
bool load_detect_test;
bool force_reset_modeset_test;
bool reset;
+ bool error_capture;
bool disable_display;
bool verbose_state_checks;
bool nuclear_pageflip;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 31e6edd08dd0..fce8e198bc76 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = {
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS, \
- .has_logical_ring_contexts = 1
+ .has_logical_ring_contexts = 1, \
+ .has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
@@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1,
+ .has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
@@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
+ .has_64bit_reloc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
@@ -360,6 +363,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_guc = 1,
+ .has_decoupled_mmio = 1,
.ddb_size = 512,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -436,9 +440,10 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
- DRM_INFO("This hardware requires preliminary hardware support.\n"
- "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
+ DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
+ "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
+ "to enable support in this kernel version, or check for kernel updates.\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 70d96162def6..c70c07a7b586 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,8 +86,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DEVEN 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
-#define BSM 0x5c
-#define BSM_MASK (0xFFFF << 20)
+/* BSM in include/drm/i915_drm.h */
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
@@ -831,96 +830,7 @@ enum skl_disp_power_wells {
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
-/**
- * DOC: DPIO
- *
- * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
- * ports. DPIO is the name given to such a display PHY. These PHYs
- * don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
- * sideband. VLV has one such PHY for driving ports B and C, and CHV
- * adds another PHY for driving port D. Each PHY responds to specific
- * IOSF-SB port.
- *
- * Each display PHY is made up of one or two channels. Each channel
- * houses a common lane part which contains the PLL and other common
- * logic. CH0 common lane also contains the IOSF-SB logic for the
- * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
- * must be running when any DPIO registers are accessed.
- *
- * In addition to having their own registers, the PHYs are also
- * controlled through some dedicated signals from the display
- * controller. These include PLL reference clock enable, PLL enable,
- * and CRI clock selection, for example.
- *
- * Eeach channel also has two splines (also called data lanes), and
- * each spline is made up of one Physical Access Coding Sub-Layer
- * (PCS) block and two TX lanes. So each channel has two PCS blocks
- * and four TX lanes. The TX lanes are used as DP lanes or TMDS
- * data/clock pairs depending on the output type.
- *
- * Additionally the PHY also contains an AUX lane with AUX blocks
- * for each channel. This is used for DP AUX communication, but
- * this fact isn't really relevant for the driver since AUX is
- * controlled from the display controller side. No DPIO registers
- * need to be accessed during AUX communication,
- *
- * Generally on VLV/CHV the common lane corresponds to the pipe and
- * the spline (PCS/TX) corresponds to the port.
- *
- * For dual channel PHY (VLV/CHV):
- *
- * pipe A == CMN/PLL/REF CH0
- *
- * pipe B == CMN/PLL/REF CH1
- *
- * port B == PCS/TX CH0
- *
- * port C == PCS/TX CH1
- *
- * This is especially important when we cross the streams
- * ie. drive port B with pipe B, or port C with pipe A.
- *
- * For single channel PHY (CHV):
- *
- * pipe C == CMN/PLL/REF CH0
- *
- * port D == PCS/TX CH0
- *
- * On BXT the entire PHY channel corresponds to the port. That means
- * the PLL is also now associated with the port rather than the pipe,
- * and so the clock needs to be routed to the appropriate transcoder.
- * Port A PLL is directly connected to transcoder EDP and port B/C
- * PLLs can be routed to any transcoder A/B/C.
- *
- * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT). ::
- *
- *
- * Dual channel PHY (VLV/CHV/BXT)
- * ---------------------------------
- * | CH0 | CH1 |
- * | CMN/PLL/REF | CMN/PLL/REF |
- * |---------------|---------------| Display PHY
- * | PCS01 | PCS23 | PCS01 | PCS23 |
- * |-------|-------|-------|-------|
- * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- * ---------------------------------
- * | DDI0 | DDI1 | DP/HDMI ports
- * ---------------------------------
- *
- * Single channel PHY (CHV/BXT)
- * -----------------
- * | CH0 |
- * | CMN/PLL/REF |
- * |---------------| Display PHY
- * | PCS01 | PCS23 |
- * |-------|-------|
- * |TX0|TX1|TX2|TX3|
- * -----------------
- * | DDI2 | DP/HDMI port
- * -----------------
- */
+/* DPIO registers */
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
@@ -1276,7 +1186,19 @@ enum skl_disp_power_wells {
#define DPIO_UPAR_SHIFT 30
/* BXT PHY registers */
-#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
+#define _BXT_PHY0_BASE 0x6C000
+#define _BXT_PHY1_BASE 0x162000
+#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE)
+
+#define _BXT_PHY(phy, reg) \
+ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
+ (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
@@ -1293,8 +1215,8 @@ enum skl_disp_power_wells {
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP)
+#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP)
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1314,18 +1236,18 @@ enum skl_disp_power_wells {
#define PORT_PLL_P2_SHIFT 8
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
-#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
- _PORT_PLL_EBB_0_B, \
- _PORT_PLL_EBB_0_C)
+#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
#define _PORT_PLL_EBB_4_A 0x162038
#define _PORT_PLL_EBB_4_B 0x6C038
#define _PORT_PLL_EBB_4_C 0x6C344
#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
#define PORT_PLL_RECALIBRATE (1 << 14)
-#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
- _PORT_PLL_EBB_4_B, \
- _PORT_PLL_EBB_4_C)
+#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
#define _PORT_PLL_0_A 0x162100
#define _PORT_PLL_0_B 0x6C100
@@ -1356,57 +1278,56 @@ enum skl_disp_power_wells {
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) ((x)<<10)
-#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
- _PORT_PLL_0_B, \
- _PORT_PLL_0_C)
-#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
+#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
+ (idx) * 4)
/* BXT PHY common lane registers */
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
#define PHY_RESERVED (1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
- _PORT_CL1CM_DW0_A)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
- _PORT_CL1CM_DW9_A)
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
#define _PORT_CL1CM_DW10_A 0x162028
#define _PORT_CL1CM_DW10_BC 0x6C028
#define IREF1RC_OFFSET_SHIFT 8
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
- _PORT_CL1CM_DW10_A)
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
#define SUS_CLK_CONFIG 0x3
-#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
- _PORT_CL1CM_DW28_A)
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
#define _PORT_CL1CM_DW30_A 0x162078
#define _PORT_CL1CM_DW30_BC 0x6C078
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
- _PORT_CL1CM_DW30_A)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
-/* Defined for PHY0 only */
-#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A 0x162358
+#define _PORT_CL2CM_DW6_BC 0x6C358
+#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
#define GRC_DONE (1 << 22)
-#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC, \
- _PORT_REF_DW3_A)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
@@ -1417,15 +1338,13 @@ enum skl_disp_power_wells {
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
-#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC, \
- _PORT_REF_DW6_A)
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
#define _PORT_REF_DW8_A 0x1621A0
#define _PORT_REF_DW8_BC 0x6C1A0
#define GRC_DIS (1 << 15)
#define GRC_RDY_OVRD (1 << 1)
-#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC, \
- _PORT_REF_DW8_A)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
/* BXT PHY PCS registers */
#define _PORT_PCS_DW10_LN01_A 0x162428
@@ -1434,12 +1353,13 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW10_GRP_A 0x162C28
#define _PORT_PCS_DW10_GRP_B 0x6CC28
#define _PORT_PCS_DW10_GRP_C 0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
- _PORT_PCS_DW10_LN01_B, \
- _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
- _PORT_PCS_DW10_GRP_B, \
- _PORT_PCS_DW10_GRP_C)
+#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+
#define TX2_SWING_CALC_INIT (1 << 31)
#define TX1_SWING_CALC_INIT (1 << 30)
@@ -1454,15 +1374,15 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW12_GRP_C 0x6CE30
#define LANESTAGGER_STRAP_OVRD (1 << 6)
#define LANE_STAGGER_MASK 0x1F
-#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
- _PORT_PCS_DW12_LN01_B, \
- _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
- _PORT_PCS_DW12_LN23_B, \
- _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
- _PORT_PCS_DW12_GRP_B, \
- _PORT_PCS_DW12_GRP_C)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
/* BXT PHY TX registers */
#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
@@ -1474,12 +1394,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW2_GRP_A 0x162D08
#define _PORT_TX_DW2_GRP_B 0x6CD08
#define _PORT_TX_DW2_GRP_C 0x6CF08
-#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
- _PORT_TX_DW2_GRP_B, \
- _PORT_TX_DW2_GRP_C)
-#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
- _PORT_TX_DW2_LN0_B, \
- _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
#define MARGIN_000_SHIFT 16
#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
#define UNIQ_TRANS_SCALE_SHIFT 8
@@ -1491,12 +1411,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW3_GRP_A 0x162D0C
#define _PORT_TX_DW3_GRP_B 0x6CD0C
#define _PORT_TX_DW3_GRP_C 0x6CF0C
-#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
- _PORT_TX_DW3_GRP_B, \
- _PORT_TX_DW3_GRP_C)
-#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
- _PORT_TX_DW3_LN0_B, \
- _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
#define SCALE_DCOMP_METHOD (1 << 26)
#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
@@ -1506,12 +1426,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW4_GRP_A 0x162D10
#define _PORT_TX_DW4_GRP_B 0x6CD10
#define _PORT_TX_DW4_GRP_C 0x6CF10
-#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
- _PORT_TX_DW4_LN0_B, \
- _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
- _PORT_TX_DW4_GRP_B, \
- _PORT_TX_DW4_GRP_C)
+#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
@@ -1520,10 +1440,10 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW14_LN0_C 0x6C938
#define LATENCY_OPTIM_SHIFT 30
#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
- _PORT_TX_DW14_LN0_B, \
- _PORT_TX_DW14_LN0_C) + \
- _BXT_LANE_OFFSET(lane))
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
+ _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C) + \
+ _BXT_LANE_OFFSET(lane))
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
@@ -1605,6 +1525,7 @@ enum skl_disp_power_wells {
#define RING_HEAD(base) _MMIO((base)+0x34)
#define RING_START(base) _MMIO((base)+0x38)
#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_SYNC_0(base) _MMIO((base)+0x40)
#define RING_SYNC_1(base) _MMIO((base)+0x44)
#define RING_SYNC_2(base) _MMIO((base)+0x48)
@@ -1708,7 +1629,11 @@ enum skl_disp_power_wells {
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
-#define I915_NUM_INSTDONE_REG 4
+#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
+#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
+#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
+#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
+#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
#define RING_IPEIR(base) _MMIO((base)+0x64)
#define RING_IPEHR(base) _MMIO((base)+0x68)
/*
@@ -2089,9 +2014,9 @@ enum skl_disp_power_wells {
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
-#define GT_PARITY_ERROR(dev) \
+#define GT_PARITY_ERROR(dev_priv) \
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
- (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+ (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
@@ -2184,8 +2109,9 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
-#define FBC_STATUS2 _MMIO(0x43214)
-#define FBC_COMPRESSION_MASK 0x7ff
+#define FBC_STATUS2 _MMIO(0x43214)
+#define IVB_FBC_COMPRESSION_MASK 0x7ff
+#define BDW_FBC_COMPRESSION_MASK 0xfff
#define FBC_LL_SIZE (1536)
@@ -6011,6 +5937,7 @@ enum {
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_GUC_IRQ (1<<5)
#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
@@ -6022,6 +5949,16 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
+#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
+#define GEN9_GUC_DISPLAY_EVENT (1<<29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
+#define GEN9_GUC_DB_RING_EVENT (1<<26)
+#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
@@ -7327,6 +7264,10 @@ enum {
#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) \
+ (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
+ (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
@@ -7350,6 +7291,13 @@ enum {
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
+#define HSW_AUD_M_CTS_ENABLE(pipe) _MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
+#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
+#define AUD_CONFIG_M_MASK 0xfffff
+
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
@@ -7394,6 +7342,13 @@ enum {
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+/* Decoupled MMIO register pair for kernel driver */
+#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00)
+#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04)
+#define GEN9_DECOUPLED_DW1_GO (1<<31)
+#define GEN9_DECOUPLED_PD_SHIFT 28
+#define GEN9_DECOUPLED_OP_SHIFT 24
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a0af170062b1..b0e1e7ca75da 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,35 +29,31 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration control */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
-static void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
/* restore FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev_priv);
}
int i915_save_state(struct drm_device *dev)
@@ -68,14 +64,14 @@ int i915_save_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_save_display(dev);
+ i915_save_display(dev_priv);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
@@ -114,15 +110,15 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
- i915_restore_display(dev);
+ i915_restore_display(dev_priv);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1e5cbc585ca2..147420ccf49c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -8,11 +8,13 @@
*/
#include <linux/slab.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/reservation.h>
#include "i915_sw_fence.h"
+#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
+
static DEFINE_SPINLOCK(i915_sw_fence_lock);
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
@@ -114,11 +116,14 @@ static void i915_sw_fence_await(struct i915_sw_fence *fence)
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
}
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key)
{
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
- init_waitqueue_head(&fence->wait);
+ __init_waitqueue_head(&fence->wait, name, key);
kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
@@ -135,6 +140,8 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
list_del(&wq->task_list);
__i915_sw_fence_complete(wq->private, key);
i915_sw_fence_put(wq->private);
+ if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
+ kfree(wq);
return 0;
}
@@ -192,9 +199,9 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
return err;
}
-int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
- struct i915_sw_fence *signaler,
- wait_queue_t *wq)
+static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq, gfp_t gfp)
{
unsigned long flags;
int pending;
@@ -206,8 +213,22 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
+ pending = 0;
+ if (!wq) {
+ wq = kmalloc(sizeof(*wq), gfp);
+ if (!wq) {
+ if (!gfpflags_allow_blocking(gfp))
+ return -ENOMEM;
+
+ i915_sw_fence_wait(signaler);
+ return 0;
+ }
+
+ pending |= I915_SW_FENCE_FLAG_ALLOC;
+ }
+
INIT_LIST_HEAD(&wq->task_list);
- wq->flags = 0;
+ wq->flags = pending;
wq->func = i915_sw_fence_wake;
wq->private = i915_sw_fence_get(fence);
@@ -226,49 +247,64 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return pending;
}
-struct dma_fence_cb {
- struct fence_cb base;
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
+}
+
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ gfp_t gfp)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
+}
+
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
struct i915_sw_fence *fence;
- struct fence *dma;
+ struct dma_fence *dma;
struct timer_list timer;
};
static void timer_i915_sw_fence_wake(unsigned long data)
{
- struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+ struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
cb->dma = NULL;
i915_sw_fence_commit(cb->fence);
cb->timer.function = NULL;
}
-static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
{
- struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
del_timer_sync(&cb->timer);
if (cb->timer.function)
i915_sw_fence_commit(cb->fence);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
kfree(cb);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp)
{
- struct dma_fence_cb *cb;
+ struct i915_sw_dma_fence_cb *cb;
int ret;
- if (fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma))
return 0;
cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +312,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return fence_wait(dma, false);
+ return dma_fence_wait(dma, false);
}
cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +323,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
timer_i915_sw_fence_wake, (unsigned long)cb,
TIMER_IRQSAFE);
if (timeout) {
- cb->dma = fence_get(dma);
+ cb->dma = dma_fence_get(dma);
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
}
- ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+ ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
if (ret == 0) {
ret = 1;
} else {
@@ -305,16 +341,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp)
{
- struct fence *excl;
+ struct dma_fence *excl;
int ret = 0, pending;
if (write) {
- struct fence **shared;
+ struct dma_fence **shared;
unsigned int count, i;
ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +375,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
}
for (i = 0; i < count; i++)
- fence_put(shared[i]);
+ dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +392,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
ret |= pending;
}
- fence_put(excl);
+ dma_fence_put(excl);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 373141602ca4..0f3185ef7f4e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,8 +16,8 @@
#include <linux/wait.h>
struct completion;
-struct fence;
-struct fence_ops;
+struct dma_fence;
+struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -40,26 +40,54 @@ typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
enum i915_sw_fence_notify state);
#define __i915_sw_fence_call __aligned(4)
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key);
+#ifdef CONFIG_LOCKDEP
+#define i915_sw_fence_init(fence, fn) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __i915_sw_fence_init((fence), (fn), #fence, &__key); \
+} while (0)
+#else
+#define i915_sw_fence_init(fence, fn) \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL)
+#endif
+
void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
wait_queue_t *wq);
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *after,
+ gfp_t gfp);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp);
+static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
+{
+ return atomic_read(&fence->pending) <= 0;
+}
+
static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
{
return atomic_read(&fence->pending) < 0;
}
+static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
+{
+ wait_event(fence->wait, i915_sw_fence_done(fence));
+}
+
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1012eeea1324..3df8d3dd31cd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
@@ -514,6 +514,8 @@ static const struct attribute *vlv_attrs[] = {
NULL,
};
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -571,6 +573,21 @@ static struct bin_attribute error_state_attr = {
.write = error_state_write,
};
+static void i915_setup_error_capture(struct device *kdev)
+{
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ DRM_ERROR("error_state sysfs setup failed\n");
+}
+
+static void i915_teardown_error_capture(struct device *kdev)
+{
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
+#else
+static void i915_setup_error_capture(struct device *kdev) {}
+static void i915_teardown_error_capture(struct device *kdev) {}
+#endif
+
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -617,17 +634,15 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
- ret = sysfs_create_bin_file(&kdev->kobj,
- &error_state_attr);
- if (ret)
- DRM_ERROR("error_state sysfs setup failed\n");
+ i915_setup_error_capture(kdev);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+ i915_teardown_error_capture(kdev);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sysfs_remove_files(&kdev->kobj, vlv_attrs);
else
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 178798002a73..c5d210ebaa9a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->engine->id;
__entry->sync_to = to->engine->id;
- __entry->seqno = from->fence.seqno;
+ __entry->seqno = from->global_seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -489,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->flags = flags;
- fence_enable_sw_signaling(&req->fence);
+ dma_fence_enable_sw_signaling(&req->fence);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex);
),
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
new file mode 100644
index 000000000000..a792dcb902b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_vma.h"
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_frontbuffer.h"
+
+#include <drm/drm_gem.h>
+
+static void
+i915_vma_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *rq)
+{
+ const unsigned int idx = rq->engine->id;
+ struct i915_vma *vma =
+ container_of(active, struct i915_vma, last_read[idx]);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+ i915_vma_clear_active(vma, idx);
+ if (i915_vma_is_active(vma))
+ return;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+ WARN_ON(i915_vma_unbind(vma));
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ if (--obj->active_count)
+ return;
+
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ if (obj->bind_count)
+ list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+
+ obj->mm.dirty = true; /* be paranoid */
+
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static struct i915_vma *
+__i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_vma *vma;
+ struct rb_node *rb, **p;
+ int i;
+
+ GEM_BUG_ON(vm->closed);
+
+ vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->exec_list);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ init_request_active(&vma->last_read[i], i915_vma_retire);
+ init_request_active(&vma->last_fence, NULL);
+ list_add(&vma->vm_link, &vm->unbound_list);
+ vma->vm = vm;
+ vma->obj = obj;
+ vma->size = obj->base.size;
+
+ if (view) {
+ vma->ggtt_view = *view;
+ if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ vma->size = view->params.partial.size;
+ vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ vma->size =
+ intel_rotation_info_size(&view->params.rotated);
+ vma->size <<= PAGE_SHIFT;
+ }
+ }
+
+ if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
+ list_add(&vma->obj_link, &obj->vma_list);
+ } else {
+ i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
+ }
+
+ rb = NULL;
+ p = &obj->vma_tree.rb_node;
+ while (*p) {
+ struct i915_vma *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, struct i915_vma, obj_node);
+ if (i915_vma_compare(pos, vm, view) < 0)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&vma->obj_node, rb, p);
+ rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+ return __i915_vma_create(obj, vm, view);
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 bind_flags;
+ u32 vma_flags;
+ int ret;
+
+ if (WARN_ON(flags == 0))
+ return -EINVAL;
+
+ bind_flags = 0;
+ if (flags & PIN_GLOBAL)
+ bind_flags |= I915_VMA_GLOBAL_BIND;
+ if (flags & PIN_USER)
+ bind_flags |= I915_VMA_LOCAL_BIND;
+
+ vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ if (flags & PIN_UPDATE)
+ bind_flags |= vma_flags;
+ else
+ bind_flags &= ~vma_flags;
+ if (bind_flags == 0)
+ return 0;
+
+ if (vma_flags == 0 && vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma);
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+
+ vma->flags |= bind_flags;
+ return 0;
+}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ /* Access through the GTT requires the device to be awake. */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+ return IO_ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return IO_ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ __i915_vma_pin(vma);
+ return ptr;
+}
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+
+ vma = fetch_and_zero(p_vma);
+ if (!vma)
+ return;
+
+ obj = vma->obj;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_release_unless_active(obj);
+}
+
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ if (!drm_mm_node_allocated(&vma->node))
+ return false;
+
+ if (vma->node.size < size)
+ return true;
+
+ if (alignment && vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
+ return true;
+
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
+
+ if (flags & PIN_OFFSET_FIXED &&
+ vma->node.start != (flags & PIN_OFFSET_MASK))
+ return true;
+
+ return false;
+}
+
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj));
+ fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj),
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + fence_size <=
+ dev_priv->ggtt.mappable_end);
+
+ /*
+ * Explicitly disable for rotated VMA since the display does not
+ * need the fence and the VMA is not accessible to other users.
+ */
+ if (mappable && fenceable &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ vma->flags |= I915_VMA_CAN_FENCE;
+ else
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *other;
+
+ /*
+ * On some machines we have to be careful when putting differing types
+ * of snoopable memory together to avoid the prefetcher crossing memory
+ * domains and dying. During vm initialisation, we decide whether or not
+ * these constraints apply and set the drm_mm.color_adjust
+ * appropriately.
+ */
+ if (vma->vm->mm.color_adjust == NULL)
+ return true;
+
+ if (!drm_mm_node_allocated(gtt_space))
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+/**
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
+ * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_gem_object *obj = vma->obj;
+ u64 start, end;
+ int ret;
+
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ size = max(size, vma->size);
+ if (flags & PIN_MAPPABLE)
+ size = i915_gem_get_ggtt_size(dev_priv, size,
+ i915_gem_object_get_tiling(obj));
+
+ alignment = max(max(alignment, vma->display_alignment),
+ i915_gem_get_ggtt_alignment(dev_priv, size,
+ i915_gem_object_get_tiling(obj),
+ flags & PIN_MAPPABLE));
+
+ start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+
+ end = vma->vm->total;
+ if (flags & PIN_MAPPABLE)
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ if (flags & PIN_ZONE_4G)
+ end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
+ */
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+ size, obj->base.size,
+ flags & PIN_MAPPABLE ? "mappable" : "total",
+ end);
+ return -E2BIG;
+ }
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ if (flags & PIN_OFFSET_FIXED) {
+ u64 offset = flags & PIN_OFFSET_MASK;
+ if (offset & (alignment - 1) || offset > end - size) {
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ vma->node.start = offset;
+ vma->node.size = size;
+ vma->node.color = obj->cache_level;
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret) {
+ ret = i915_gem_evict_for_vma(vma);
+ if (ret == 0)
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret)
+ goto err_unpin;
+ }
+ } else {
+ u32 search_flag, alloc_flag;
+
+ if (flags & PIN_HIGH) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ }
+
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ if (alignment <= 4096)
+ alignment = 0;
+
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+ &vma->node,
+ size, alignment,
+ obj->cache_level,
+ start, end,
+ search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(vma->vm, size, alignment,
+ obj->cache_level,
+ start, end,
+ flags);
+ if (ret == 0)
+ goto search_free;
+
+ goto err_unpin;
+ }
+
+ GEM_BUG_ON(vma->node.start < start);
+ GEM_BUG_ON(vma->node.start + vma->node.size > end);
+ }
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ obj->bind_count++;
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+}
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags)
+{
+ unsigned int bound = vma->flags;
+ int ret;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+ GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+
+ if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ ret = i915_vma_insert(vma, size, alignment, flags);
+ if (ret)
+ goto err;
+ }
+
+ ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ if (ret)
+ goto err;
+
+ if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+ __i915_vma_set_map_and_fenceable(vma);
+
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+ return 0;
+
+err:
+ __i915_vma_unpin(vma);
+ return ret;
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->node.allocated);
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
+ GEM_BUG_ON(vma->fence);
+
+ list_del(&vma->vm_link);
+ if (!i915_vma_is_ggtt(vma))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+ list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
+ if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+ WARN_ON(i915_vma_unbind(vma));
+}
+
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ unsigned long active;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ /* First wait upon any activity as retiring the request may
+ * have side-effects such as unpinning or even unbinding this vma.
+ */
+ active = i915_vma_get_active(vma);
+ if (active) {
+ int idx;
+
+ /* When a closed VMA is retired, it is unbound - eek.
+ * In order to prevent it from being recursively closed,
+ * take a pin on the vma so that the second unbind is
+ * aborted.
+ *
+ * Even more scary is that the retire callback may free
+ * the object (last active vma). To prevent the explosion
+ * we defer the actual object free to a worker that can
+ * only proceed once it acquires the struct_mutex (which
+ * we currently hold, therefore it cannot free this object
+ * before we are finished).
+ */
+ __i915_vma_pin(vma);
+
+ for_each_active(active, idx) {
+ ret = i915_gem_active_retire(&vma->last_read[idx],
+ &vma->vm->dev->struct_mutex);
+ if (ret)
+ break;
+ }
+
+ __i915_vma_unpin(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ }
+
+ if (i915_vma_is_pinned(vma))
+ return -EBUSY;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
+ GEM_BUG_ON(obj->bind_count == 0);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_vma_is_map_and_fenceable(vma)) {
+ /* release the fence reg _after_ flushing */
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ __i915_vma_iounmap(vma);
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+ }
+
+ if (likely(!vma->vm->closed)) {
+ trace_i915_vma_unbind(vma);
+ vma->vm->unbind_vma(vma);
+ }
+ vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+
+ drm_mm_remove_node(&vma->node);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ if (vma->pages != obj->mm.pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist. */
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_link,
+ &to_i915(obj->base.dev)->mm.unbound_list);
+
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+
+destroy:
+ if (unlikely(i915_vma_is_closed(vma)))
+ i915_vma_destroy(vma);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
new file mode 100644
index 000000000000..85446f0b0b3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_H__
+#define __I915_VMA_H__
+
+#include <linux/io-mapping.h>
+
+#include <drm/drm_mm.h>
+
+#include "i915_gem_gtt.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
+#include "i915_gem_request.h"
+
+
+enum i915_cache_level;
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct drm_i915_fence_reg *fence;
+ struct sg_table *pages;
+ void __iomem *iomap;
+ u64 size;
+ u64 display_alignment;
+
+ unsigned int flags;
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits.
+ */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW BIT(5)
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND BIT(6)
+#define I915_VMA_LOCAL_BIND BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT BIT(8)
+#define I915_VMA_CAN_FENCE BIT(9)
+#define I915_VMA_CLOSED BIT(10)
+
+ unsigned int active;
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_fence;
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+};
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+ return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+ unsigned int engine)
+{
+ return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(upper_32_bits(vma->node.start));
+ GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+ return lower_32_bits(vma->node.start);
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+ i915_gem_object_get(vma->obj);
+ return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+ i915_gem_object_put(vma->obj);
+}
+
+static inline long
+i915_vma_compare(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+ if (vma->vm != vm)
+ return vma->vm - vm;
+
+ if (!view)
+ return vma->ggtt_view.type;
+
+ if (vma->ggtt_view.type != view->type)
+ return vma->ggtt_view.type - view->type;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params));
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ /* Pin early to prevent the shrinker/eviction logic from destroying
+ * our vma as we insert and bind.
+ */
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ return 0;
+
+ return __i915_vma_do_pin(vma, size, alignment, flags);
+}
+
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+ return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+ vma->flags++;
+ GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ __i915_vma_unpin(vma);
+}
+
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ return sg_page(vma->pages->sgl);
+}
+
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ vma->fence->pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+static inline void
+i915_vma_unpin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
+ }
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index b82de3072d4f..dbe9fb41ae53 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
- intel_state->wait_req = NULL;
return state;
}
@@ -101,13 +100,13 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
@@ -142,10 +141,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.y2 =
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
- if (state->fb && intel_rotation_90_or_270(state->rotation)) {
- char *format_name;
- if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
+ if (state->fb && drm_rotation_90_or_270(state->rotation)) {
+ struct drm_format_name_buf format_name;
+
+ if (state->fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ state->fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
@@ -158,9 +158,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
switch (state->fb->pixel_format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
- format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
- kfree(format_name);
+ DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return -EINVAL;
default:
@@ -168,6 +168,14 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
+ /* CHV ignores the mirror bit when the rotate bit is set :( */
+ if (IS_CHERRYVIEW(dev_priv) &&
+ state->rotation & DRM_ROTATE_180 &&
+ state->rotation & DRM_REFLECT_X) {
+ DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ return -EINVAL;
+ }
+
intel_state->base.visible = false;
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6c70a5bfd7d8..49f10538d4aa 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -57,6 +57,63 @@
* struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
+/* DP N/M table */
+#define LC_540M 540000
+#define LC_270M 270000
+#define LC_162M 162000
+
+struct dp_aud_n_m {
+ int sample_rate;
+ int clock;
+ u16 m;
+ u16 n;
+};
+
+/* Values according to DP 1.4 Table 2-104 */
+static const struct dp_aud_n_m dp_aud_n_m[] = {
+ { 32000, LC_162M, 1024, 10125 },
+ { 44100, LC_162M, 784, 5625 },
+ { 48000, LC_162M, 512, 3375 },
+ { 64000, LC_162M, 2048, 10125 },
+ { 88200, LC_162M, 1568, 5625 },
+ { 96000, LC_162M, 1024, 3375 },
+ { 128000, LC_162M, 4096, 10125 },
+ { 176400, LC_162M, 3136, 5625 },
+ { 192000, LC_162M, 2048, 3375 },
+ { 32000, LC_270M, 1024, 16875 },
+ { 44100, LC_270M, 784, 9375 },
+ { 48000, LC_270M, 512, 5625 },
+ { 64000, LC_270M, 2048, 16875 },
+ { 88200, LC_270M, 1568, 9375 },
+ { 96000, LC_270M, 1024, 5625 },
+ { 128000, LC_270M, 4096, 16875 },
+ { 176400, LC_270M, 3136, 9375 },
+ { 192000, LC_270M, 2048, 5625 },
+ { 32000, LC_540M, 1024, 33750 },
+ { 44100, LC_540M, 784, 18750 },
+ { 48000, LC_540M, 512, 11250 },
+ { 64000, LC_540M, 2048, 33750 },
+ { 88200, LC_540M, 1568, 18750 },
+ { 96000, LC_540M, 1024, 11250 },
+ { 128000, LC_540M, 4096, 33750 },
+ { 176400, LC_540M, 3136, 18750 },
+ { 192000, LC_540M, 2048, 11250 },
+};
+
+static const struct dp_aud_n_m *
+audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+ if (rate == dp_aud_n_m[i].sample_rate &&
+ intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
+ return &dp_aud_n_m[i];
+ }
+
+ return NULL;
+}
+
static const struct {
int clock;
u32 config;
@@ -81,7 +138,7 @@ static const struct {
int clock;
int n;
int cts;
-} aud_ncts[] = {
+} hdmi_aud_ncts[] = {
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 48000, TMDS_296M, 5824, 281250 },
@@ -121,45 +178,20 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
-static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
+static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
+ int rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
- if ((rate == aud_ncts[i].sample_rate) &&
- (mode->clock == aud_ncts[i].clock)) {
- return aud_ncts[i].n;
+ for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
+ if (rate == hdmi_aud_ncts[i].sample_rate &&
+ adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
+ return hdmi_aud_ncts[i].n;
}
}
return 0;
}
-static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
-{
- int n_low, n_up;
- uint32_t tmp = val;
-
- n_low = n & 0xfff;
- n_up = (n >> 12) & 0xff;
- tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
- tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
- (n_low << AUD_CONFIG_LOWER_N_SHIFT) |
- AUD_CONFIG_N_PROG_ENABLE);
- return tmp;
-}
-
-/* check whether N/CTS/M need be set manually */
-static bool audio_rate_need_prog(struct intel_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- if (((mode->clock == TMDS_297M) ||
- (mode->clock == TMDS_296M)) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
- return true;
- else
- return false;
-}
-
static bool intel_eld_uptodate(struct drm_connector *connector,
i915_reg_t reg_eldv, uint32_t bits_eldv,
i915_reg_t reg_elda, uint32_t bits_elda,
@@ -245,6 +277,100 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
+static void
+hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+
+ if (nm)
+ DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+ else
+ DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+
+ if (nm) {
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(nm->n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ }
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_CONFIG_M_MASK;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+ if (nm) {
+ tmp |= nm->m;
+ tmp |= AUD_M_CTS_M_VALUE_INDEX;
+ tmp |= AUD_M_CTS_M_PROG_ENABLE;
+ }
+
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ enum pipe pipe = intel_crtc->pipe;
+ int n;
+ u32 tmp;
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+
+ n = audio_config_hdmi_get_n(adjusted_mode, rate);
+ if (n != 0) {
+ DRM_DEBUG_KMS("using N %d\n", n);
+
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ } else {
+ DRM_DEBUG_KMS("using automatic N\n");
+ }
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ /*
+ * Let's disable "Enable CTS or M Prog bit"
+ * and let HW calculate the value
+ */
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
+ else
+ hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -276,20 +402,16 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
}
static void hsw_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ enum port port = intel_encoder->port;
const uint8_t *eld = connector->eld;
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
- enum port port = intel_dig_port->port;
uint32_t tmp;
int len, i;
- int n, rate;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), drm_eld_size(eld));
@@ -325,42 +447,17 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- else
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
-
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
- if (!acomp)
- rate = 0;
- else if (port >= PORT_A && port <= PORT_E)
- rate = acomp->aud_sample_rate[port];
- else {
- DRM_ERROR("invalid port: %d\n", port);
- rate = 0;
- }
- n = audio_config_get_n(adjusted_mode, rate);
- if (n != 0)
- tmp = audio_config_setup_n_reg(n, tmp);
- else
- DRM_DEBUG_KMS("no suitable N value is found\n");
- }
-
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(intel_crtc, port, adjusted_mode);
mutex_unlock(&dev_priv->av_mutex);
}
-static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@@ -400,13 +497,13 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
}
static void ilk_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@@ -425,13 +522,13 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
* infrastructure is not there yet.
*/
- if (HAS_PCH_IBX(connector->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(connector->dev) ||
- IS_CHERRYVIEW(connector->dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -480,24 +577,26 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @intel_encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
- connector = drm_select_eld(encoder);
- if (!connector)
+ connector = conn_state->connector;
+ if (!connector || !connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -508,7 +607,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -518,13 +617,19 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
adjusted_mode);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = connector;
+ intel_encoder->audio_connector = connector;
+
/* referred in audio callbacks */
- dev_priv->dig_port_map[port] = intel_encoder;
+ dev_priv->av_enc_map[pipe] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -537,22 +642,27 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = NULL;
- dev_priv->dig_port_map[port] = NULL;
+ intel_encoder->audio_connector = NULL;
+ dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -627,74 +737,68 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
return dev_priv->cdclk_freq;
}
-static int i915_audio_component_sync_audio_rate(struct device *kdev,
- int port, int rate)
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+ int port, int pipe)
+{
+
+ if (WARN_ON(pipe >= I915_MAX_PIPES))
+ return NULL;
+
+ /* MST */
+ if (pipe >= 0)
+ return dev_priv->av_enc_map[pipe];
+
+ /* Non-MST */
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_encoder *encoder;
+
+ encoder = dev_priv->av_enc_map[pipe];
+ if (encoder == NULL)
+ continue;
+
+ if (port == encoder->port)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_crtc *crtc;
- struct drm_display_mode *mode;
+ struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum pipe pipe = INVALID_PIPE;
- u32 tmp;
- int n;
int err = 0;
- /* HSW, BDW, SKL, KBL need this fix */
- if (!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_BROADWELL(dev_priv) &&
- !IS_HASWELL(dev_priv))
+ if (!HAS_DDI(dev_priv))
return 0;
i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
+
/* 1. get the pipe */
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc ||
- intel_encoder->type != INTEL_OUTPUT_HDMI) {
- DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
+ (intel_encoder->type != INTEL_OUTPUT_HDMI &&
+ intel_encoder->type != INTEL_OUTPUT_DP)) {
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
+
+ /* pipe passed from the audio driver will be -1 for Non-MST case */
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
- if (pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
- err = -ENODEV;
- goto unlock;
- }
- DRM_DEBUG_KMS("pipe %c connects port %c\n",
- pipe_name(pipe), port_name(port));
- mode = &crtc->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
- /* 2. check whether to set the N/CTS/M manually or not */
- if (!audio_rate_need_prog(crtc, mode)) {
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- n = audio_config_get_n(mode, rate);
- if (n == 0) {
- DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
- port_name(port));
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- /* 3. set the N/CTS/M */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp = audio_config_setup_n_reg(n, tmp);
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(crtc, port, adjusted_mode);
unlock:
mutex_unlock(&dev_priv->av_mutex);
@@ -703,27 +807,29 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- bool *enabled,
+ int pipe, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
const u8 *eld;
int ret = -EINVAL;
mutex_lock(&dev_priv->av_mutex);
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
- if (intel_encoder) {
- ret = 0;
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- *enabled = intel_dig_port->audio_connector != NULL;
- if (*enabled) {
- eld = intel_dig_port->audio_connector->eld;
- ret = drm_eld_size(eld);
- memcpy(buf, eld, min(max_bytes, ret));
- }
+
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!intel_encoder) {
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ mutex_unlock(&dev_priv->av_mutex);
+ return ret;
+ }
+
+ ret = 0;
+ *enabled = intel_encoder->audio_connector != NULL;
+ if (*enabled) {
+ eld = intel_encoder->audio_connector->eld;
+ ret = drm_eld_size(eld);
+ memcpy(buf, eld, min(max_bytes, ret));
}
mutex_unlock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index cf2560708e03..7ffab1abc518 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -996,6 +996,10 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
goto err;
}
+ /* Log about presence of sequences we won't run. */
+ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
+ DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
@@ -1805,3 +1809,52 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
return false;
}
+
+/**
+ * intel_bios_is_lspcon_present - if LSPCON is attached on %port
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if LSPCON is present on this port
+ */
+bool
+intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
+ if (!HAS_LSPCON(dev_priv))
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ if (!dev_priv->vbt.child_dev[i].common.lspcon)
+ continue;
+
+ switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ case DVO_PORT_DPA:
+ case DVO_PORT_HDMIA:
+ if (port == PORT_A)
+ return true;
+ break;
+ case DVO_PORT_DPB:
+ case DVO_PORT_HDMIB:
+ if (port == PORT_B)
+ return true;
+ break;
+ case DVO_PORT_DPC:
+ case DVO_PORT_HDMIC:
+ if (port == PORT_C)
+ return true;
+ break;
+ case DVO_PORT_DPD:
+ case DVO_PORT_HDMID:
+ if (port == PORT_D)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 8405b5a367d7..7e3545f65257 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,14 +46,20 @@ struct edp_power_seq {
u16 t11_t12;
} __packed;
-/* MIPI Sequence Block definitions */
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
enum mipi_seq {
MIPI_SEQ_END = 0,
- MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 495611b7068d..c9c46a538edb 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -83,16 +83,18 @@ static void irq_enable(struct intel_engine_cs *engine)
*/
engine->breadcrumbs.irq_posted = true;
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_enable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
engine->breadcrumbs.irq_posted = false;
}
@@ -293,9 +295,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
first = __intel_engine_add_wait(engine, wait);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
return first;
}
@@ -326,7 +328,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
if (RB_EMPTY_NODE(&wait->node))
return;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (RB_EMPTY_NODE(&wait->node))
goto out_unlock;
@@ -400,7 +402,7 @@ out_unlock:
GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static bool signal_complete(struct drm_i915_gem_request *request)
@@ -464,7 +466,7 @@ static int intel_breadcrumbs_signaler(void *arg)
&request->signaling.wait);
local_bh_disable();
- fence_signal(&request->fence);
+ dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
/* Find the next oldest signal. Note that as we have
@@ -473,14 +475,14 @@ static int intel_breadcrumbs_signaler(void *arg)
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (request == b->first_signal) {
struct rb_node *rb =
rb_next(&request->signaling.node);
b->first_signal = rb ? to_signaler(rb) : NULL;
}
rb_erase(&request->signaling.node, &b->signals);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
i915_gem_request_put(request);
} else {
@@ -502,11 +504,20 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
- /* locked by fence_enable_sw_signaling() */
+ /* Note that we may be called from an interrupt handler on another
+ * device (e.g. nouveau signaling a fence completion causing us
+ * to submit a request, and so enable signaling). As such,
+ * we need to make sure that all other users of b->lock protect
+ * against interrupts, i.e. use spin_lock_irqsave.
+ */
+
+ /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
assert_spin_locked(&request->lock);
+ if (!request->global_seqno)
+ return;
request->signaling.wait.tsk = b->signaler;
- request->signaling.wait.seqno = request->fence.seqno;
+ request->signaling.wait.seqno = request->global_seqno;
i915_gem_request_get(request);
spin_lock(&b->lock);
@@ -530,8 +541,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
- if (i915_seqno_passed(request->fence.seqno,
- to_signaler(parent)->fence.seqno)) {
+ if (i915_seqno_passed(request->global_seqno,
+ to_signaler(parent)->global_seqno)) {
p = &parent->rb_right;
first = false;
} else {
@@ -592,7 +603,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
__intel_breadcrumbs_disable_irq(b);
if (intel_engine_has_waiter(engine)) {
@@ -605,7 +616,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
irq_disable(engine);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
@@ -618,33 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int mask = 0;
- /* To avoid the task_struct disappearing beneath us as we wake up
- * the process, we must first inspect the task_struct->state under the
- * RCU lock, i.e. as we call wake_up_process() we must be holding the
- * rcu_read_lock().
- */
- for_each_engine(engine, i915)
- if (unlikely(intel_engine_wakeup(engine)))
- mask |= intel_engine_flag(engine);
+ for_each_engine(engine, i915, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
- return mask;
-}
+ spin_lock_irq(&b->lock);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- unsigned int mask = 0;
+ if (b->first_wait) {
+ wake_up_process(b->first_wait->tsk);
+ mask |= intel_engine_flag(engine);
+ }
- for_each_engine(engine, i915) {
- if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
- wake_up_process(engine->breadcrumbs.signaler);
+ if (b->first_signal) {
+ wake_up_process(b->signaler);
mask |= intel_engine_flag(engine);
}
+
+ spin_unlock_irq(&b->lock);
}
return mask;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 95a72771eea6..d81232b79f00 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -95,8 +95,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
@@ -180,7 +179,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
- if (INTEL_INFO(dev)->gen > 6) {
+ if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
if (intel_crtc_state->limited_color_range)
@@ -273,7 +272,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
int i;
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -288,7 +287,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -297,7 +296,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
for (i = 0; i < 256; i++) {
uint32_t word = (i << 16) | (i << 8) | i;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -326,7 +325,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
+ if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
@@ -345,11 +344,10 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
@@ -428,8 +426,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
@@ -446,7 +443,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->degamma_lut) {
lut = (struct drm_color_lut *) state->degamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
word0 =
@@ -461,7 +458,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->gamma_lut) {
lut = (struct drm_color_lut *) state->gamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
word0 =
@@ -497,12 +494,12 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state)
int intel_color_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
size_t gamma_length, degamma_length;
- degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+ degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size *
sizeof(struct drm_color_lut);
- gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+ gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size *
sizeof(struct drm_color_lut);
/*
@@ -529,19 +526,18 @@ int intel_color_check(struct drm_crtc *crtc,
void intel_color_init(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
- } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
- IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else {
@@ -549,10 +545,10 @@ void intel_color_init(struct drm_crtc *crtc)
}
/* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev)->color.gamma_lut_size != 0)
+ if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
+ INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
drm_crtc_enable_color_mgmt(crtc,
- INTEL_INFO(dev)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev)->color.gamma_lut_size);
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ true,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dfbcf16b41df..86ecec5601d4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,7 +84,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & ADPA_DAC_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -147,14 +147,13 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int mode)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 adpa;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -165,16 +164,16 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev))
+ else if (HAS_PCH_CPT(dev_priv))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
@@ -241,7 +240,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- int max_dotclk = to_i915(dev)->max_dotclk_freq;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -250,15 +250,15 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_GEN3(dev) || IS_GEN4(dev))
+ else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
max_clock = 400000;
else
max_clock = 350000;
@@ -269,7 +269,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev) &&
+ if (HAS_PCH_LPT(dev_priv) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
@@ -280,13 +280,13 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
return false;
@@ -296,7 +296,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
pipe_config->port_clock = 135000 * 2;
return true;
@@ -312,7 +312,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
crt->force_hotplug_required = 0;
@@ -419,10 +419,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
return intel_ironlake_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
/*
@@ -430,7 +430,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G4X(dev) && !IS_GM45(dev))
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
tries = 2;
else
tries = 1;
@@ -566,13 +566,13 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -643,11 +643,36 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
return status;
}
+static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_spurious_crt_detect[] = {
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "Intel DZ77BH-55K",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"),
+ },
+ },
+ { }
+};
+
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -659,10 +684,14 @@ intel_crt_detect(struct drm_connector *connector, bool force)
connector->base.id, connector->name,
force);
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_spurious_crt_detect))
+ return connector_status_disconnected;
+
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -684,7 +713,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -700,7 +729,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else if (INTEL_INFO(dev)->gen < 4)
+ else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else if (i915.load_detect_test)
@@ -740,7 +769,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
- if (ret || !IS_G4X(dev))
+ if (ret || !IS_G4X(dev_priv))
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -762,11 +791,10 @@ static int intel_crt_set_property(struct drm_connector *connector,
void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
@@ -808,32 +836,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_no_crt[] = {
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "ACER ZGB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
- },
- },
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "DELL XPS 8700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
- },
- },
- { }
-};
-
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -843,13 +845,9 @@ void intel_crt_init(struct drm_device *dev)
i915_reg_t adpa_reg;
u32 adpa;
- /* Skip machines without VGA that falsely report hotplug events */
- if (dmi_check_system(intel_no_crt))
- return;
-
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -893,12 +891,12 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
@@ -907,20 +905,23 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = adpa_reg;
crt->base.compute_config = intel_crt_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
- if (I915_HAS_HOTPLUG(dev))
+ if (I915_HAS_HOTPLUG(dev_priv) &&
+ !dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
+ crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.post_disable = hsw_post_disable_crt;
} else {
+ crt->base.port = PORT_NONE;
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
@@ -928,7 +929,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
@@ -941,7 +942,7 @@ void intel_crt_init(struct drm_device *dev)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 1ea0e1f43397..d7a04bca8c28 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -168,12 +168,6 @@ struct stepping_info {
char substepping;
};
-static const struct stepping_info kbl_stepping_info[] = {
- {'A', '0'}, {'B', '0'}, {'C', '0'},
- {'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'},
-};
-
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -194,10 +188,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
- if (IS_KABYLAKE(dev_priv)) {
- size = ARRAY_SIZE(kbl_stepping_info);
- si = kbl_stepping_info;
- } else if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 15d47c87def6..10ec9d4b7d45 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -167,8 +167,47 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x80005012, 0x000000C0, 0x3 },
};
+/* Kabylake H and S */
+static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000097, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Kabylake U */
+static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00002016, 0x0000004F, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake Y */
+static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
+ { 0x00001017, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x8000800F, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000004C, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
/*
- * Skylake H and S
+ * Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@@ -185,7 +224,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
};
/*
- * Skylake U
+ * Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
@@ -202,7 +241,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
};
/*
- * Skylake Y
+ * Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
@@ -218,7 +257,7 @@ static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
{ 0x00000018, 0x0000008A, 0x0 },
};
-/* Skylake U, H and S */
+/* Skylake/Kabylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000AC, 0x0 },
{ 0x00005012, 0x0000009D, 0x0 },
@@ -233,7 +272,7 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x80000018, 0x000000C0, 0x1 },
};
-/* Skylake Y */
+/* Skylake/Kabylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
@@ -334,10 +373,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
return skl_y_ddi_translations_dp;
- } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
+ } else if (IS_SKL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
return skl_u_ddi_translations_dp;
} else {
@@ -347,6 +386,21 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
+kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (IS_KBL_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
+ return kbl_y_ddi_translations_dp;
+ } else if (IS_KBL_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
+ return kbl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
+ return kbl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
@@ -362,7 +416,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ else
+ return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
@@ -430,21 +487,18 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ ddi_translations_fdi = NULL;
+ ddi_translations_dp =
+ kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
+ ddi_translations_edp =
+ skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
+ } else if (IS_SKYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
-
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
- if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
- port != PORT_A && port != PORT_E &&
- n_edp_entries > 9))
- n_edp_entries = 9;
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -464,6 +518,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
}
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+ if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+ port != PORT_A && port != PORT_E &&
+ n_edp_entries > 9))
+ n_edp_entries = 9;
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
@@ -1020,13 +1085,13 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_INFO(dev)->gen <= 8)
+ if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}
@@ -1081,14 +1146,14 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
@@ -1189,7 +1254,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
(intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
@@ -1434,7 +1499,12 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ ddi_translations = kbl_get_buf_trans_dp(dev_priv,
+ &n_entries);
+ else
+ ddi_translations = skl_get_buf_trans_dp(dev_priv,
+ &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
@@ -1477,7 +1547,6 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
{
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
- uint32_t val;
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
@@ -1506,38 +1575,11 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
}
}
- /*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers and we pick lanes 0/1 for that.
- */
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
- val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
- ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
- val &= ~SCALE_DCOMP_METHOD;
- if (ddi_translations[level].enable)
- val |= SCALE_DCOMP_METHOD;
-
- if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
-
- I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
- val &= ~DE_EMPHASIS;
- val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
-
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+ bxt_ddi_phy_set_signal_level(dev_priv, port,
+ ddi_translations[level].margin,
+ ddi_translations[level].scale,
+ ddi_translations[level].enable,
+ ddi_translations[level].deemphasis);
}
static uint32_t translate_signal_level(int signal_levels)
@@ -1711,8 +1753,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1742,10 +1783,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp);
}
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
- else if (INTEL_INFO(dev)->gen < 9)
+ else if (INTEL_GEN(dev_priv) < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
if (type == INTEL_OUTPUT_HDMI) {
@@ -1795,8 +1836,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1814,7 +1854,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
@@ -1824,7 +1864,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- intel_audio_codec_enable(intel_encoder);
+ intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
}
}
@@ -1853,332 +1893,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
}
}
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- enum port port;
-
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
- return false;
-
- if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
- phy);
-
- return false;
- }
-
- if (phy == DPIO_PHY1 &&
- !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
- DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
-
- return false;
- }
-
- if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
- phy);
-
- return false;
- }
-
- for_each_port_masked(port,
- phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
- BIT(PORT_A)) {
- u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
- if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
- "for port %c powered down "
- "(PHY_CTL %08x)\n",
- phy, port_name(port), tmp);
-
- return false;
- }
- }
-
- return true;
-}
-
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
-
- return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
-}
-
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- if (intel_wait_for_register(dev_priv,
- BXT_PORT_REF_DW3(phy),
- GRC_DONE, GRC_DONE,
- 10))
- DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
-}
-
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val;
-
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
- /* Still read out the GRC value for state verification */
- if (phy == DPIO_PHY0)
- dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
-
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
- "won't reprogram it\n", phy);
-
- return;
- }
-
- DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
- "force reprogramming it\n", phy);
- }
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val |= GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-
- /*
- * The PHY registers start out inaccessible and respond to reads with
- * all 1s. Eventually they become accessible as they power up, then
- * the reserved bit will give the default 0. Poll on the reserved bit
- * becoming 0 to find when the PHY is accessible.
- * HW team confirmed that the time to reach phypowergood status is
- * anywhere between 50 us and 100us.
- */
- if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
- DRM_ERROR("timeout during PHY%d power on\n", phy);
- }
-
- /* Program PLL Rcomp code offset */
- val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
-
- val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
-
- /* Program power gating */
- val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy == DPIO_PHY0) {
- val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
- }
-
- val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
- val &= ~OCL2_LDOFUSE_PWR_DIS;
- /*
- * On PHY1 disable power on the second channel, since no port is
- * connected there. On PHY0 both channels have a port, so leave it
- * enabled.
- * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
- * power down the second channel on PHY0 as well.
- *
- * FIXME: Clarify programming of the following, the register is
- * read-only with bit 6 fixed at 0 at least in stepping A.
- */
- if (phy == DPIO_PHY1)
- val |= OCL2_LDOFUSE_PWR_DIS;
- I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
-
- if (phy == DPIO_PHY0) {
- uint32_t grc_code;
- /*
- * PHY0 isn't connected to an RCOMP resistor so copy over
- * the corresponding calibrated value from PHY1, and disable
- * the automatic calibration on PHY0.
- */
- val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
- grc_code = val << GRC_CODE_FAST_SHIFT |
- val << GRC_CODE_SLOW_SHIFT |
- val;
- I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
-
- val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
- val |= GRC_DIS | GRC_RDY_OVRD;
- I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
- }
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- if (phy == DPIO_PHY1)
- bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-}
-
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- uint32_t val;
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val &= ~GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-}
-
-static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- i915_reg_t reg, u32 mask, u32 expected,
- const char *reg_fmt, ...)
-{
- struct va_format vaf;
- va_list args;
- u32 val;
-
- val = I915_READ(reg);
- if ((val & mask) == expected)
- return true;
-
- va_start(args, reg_fmt);
- vaf.fmt = reg_fmt;
- vaf.va = &args;
-
- DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
- "current %08x, expected %08x (mask %08x)\n",
- phy, &vaf, reg.reg, val, (val & ~mask) | expected,
- mask);
-
- va_end(args);
-
- return false;
-}
-
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- uint32_t mask;
- bool ok;
-
-#define _CHK(reg, mask, exp, fmt, ...) \
- __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
- ## __VA_ARGS__)
-
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
- return false;
-
- ok = true;
-
- /* PLL Rcomp code offset */
- ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
- IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW9(%d)", phy);
- ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
- IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW10(%d)", phy);
-
- /* Power gating */
- mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
- ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
- "BXT_PORT_CL1CM_DW28(%d)", phy);
-
- if (phy == DPIO_PHY0)
- ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
- DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
- "BXT_PORT_CL2CM_DW6_BC");
-
- /*
- * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
- * at least on stepping A this bit is read-only and fixed at 0.
- */
-
- if (phy == DPIO_PHY0) {
- u32 grc_code = dev_priv->bxt_phy_grc;
-
- grc_code = grc_code << GRC_CODE_FAST_SHIFT |
- grc_code << GRC_CODE_SLOW_SHIFT |
- grc_code;
- mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
- GRC_CODE_NOM_MASK;
- ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
- "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
-
- mask = GRC_DIS | GRC_RDY_OVRD;
- ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
- "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
- }
-
- return ok;
-#undef _CHK
-}
-
-static uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- switch (pipe_config->lane_count) {
- case 1:
- return 0;
- case 2:
- return BIT(2) | BIT(0);
- case 4:
- return BIT(3) | BIT(2) | BIT(0);
- default:
- MISSING_CASE(pipe_config->lane_count);
-
- return 0;
- }
-}
-
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int lane;
-
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- /*
- * Note that on CHV this flag is called UPAR, but has
- * the same function.
- */
- val &= ~LATENCY_OPTIM;
- if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
- val |= LATENCY_OPTIM;
-
- I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
- }
-}
-
-static uint8_t
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
- int lane;
- uint8_t mask;
-
- mask = 0;
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- if (val & LATENCY_OPTIM)
- mask |= BIT(lane);
- }
+ uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
- return mask;
+ bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2347,7 +2069,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
- pipe_config);
+ pipe_config->lane_count);
return ret;
@@ -2438,7 +2160,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- bool init_hdmi, init_dp;
+ bool init_hdmi, init_dp, init_lspcon = false;
int max_lanes;
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
@@ -2470,6 +2192,19 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ /*
+ * Lspcon device needs to be driven with DP connector
+ * with special detection sequence. So make sure DP
+ * is initialized before lspcon.
+ */
+ init_dp = true;
+ init_lspcon = true;
+ init_hdmi = false;
+ DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+ }
+
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
@@ -2509,7 +2244,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev) && port == PORT_A) {
+ if (IS_BROXTON(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
@@ -2520,6 +2255,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
@@ -2532,7 +2268,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
@@ -2545,6 +2281,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
goto err;
}
+ if (init_lspcon) {
+ if (lspcon_init(intel_dig_port))
+ /* TODO: handle hdmi info frame part */
+ DRM_DEBUG_KMS("LSPCON init success on port %c\n",
+ port_name(port));
+ else
+ /*
+ * LSPCON init faied, but DP init was success, so
+ * lets try to drive as DP++ port.
+ */
+ DRM_ERROR("LSPCON init failed on port %c\n",
+ port_name(port));
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 1b20e160bc1f..185e3bbc9ec9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -28,20 +28,14 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
- DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
info->gen,
dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision,
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
+ dev_priv->drm.pdev->revision);
+#define PRINT_FLAG(name) \
+ DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_COMMA
}
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
@@ -288,12 +282,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
- else
+ } else if (INTEL_GEN(dev_priv) >= 5) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
+ }
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cb70d73239b..6daad8613760 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,7 +37,6 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -116,8 +115,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
@@ -600,7 +600,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_device *dev,
+static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -613,12 +613,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -698,7 +699,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -753,7 +755,8 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -813,7 +816,8 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -845,7 +849,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(to_i915(dev))) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
@@ -909,7 +913,8 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -977,7 +982,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit, &clock))
+ if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1003,10 +1008,8 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
target_clock, refclk, NULL, best_clock);
}
-bool intel_crtc_active(struct drm_crtc *crtc)
+bool intel_crtc_active(struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*
@@ -1020,27 +1023,25 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
- return intel_crtc->active && crtc->primary->state->fb &&
- intel_crtc->config->base.adjusted_mode.crtc_clock;
+ return crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->base.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- return intel_crtc->config->cpu_transcoder;
+ return crtc->config->cpu_transcoder;
}
-static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -1070,12 +1071,11 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
*/
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
@@ -1085,7 +1085,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
WARN(1, "pipe_off wait timed out\n");
} else {
/* Wait for the display line to settle */
- if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
+ if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1187,19 +1187,17 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
onoff(state), onoff(cur_state));
}
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
@@ -1209,7 +1207,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
/* XXX: else fix for eDP */
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
@@ -1232,10 +1230,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
static void assert_cursor(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
- struct drm_device *dev = &dev_priv->drm;
bool cur_state;
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -1294,11 +1291,10 @@ static void assert_plane(struct drm_i915_private *dev_priv,
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 val = I915_READ(DSPCNTR(pipe));
I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
@@ -1320,29 +1316,28 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int sprite;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(SPCNTR(pipe, sprite));
I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 val = I915_READ(SPRCTL(pipe));
I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
u32 val = I915_READ(DVSCNTR(pipe));
I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
@@ -1596,12 +1591,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_device *dev)
+static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
int count = 0;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
count += crtc->base.state->active &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
}
@@ -1611,19 +1606,18 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev) && !IS_I830(dev))
+ if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
/* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
+ if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
@@ -1648,7 +1642,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config->dpll_hw_state.dpll_md);
} else {
@@ -1683,14 +1677,13 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
*/
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) &&
+ if (IS_I830(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev)) {
+ !intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
@@ -1786,9 +1779,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
i915_reg_t reg;
uint32_t val, pipeconf_val;
@@ -1799,7 +1791,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Set the timing override bit before enabling the
* pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
@@ -1877,7 +1869,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t reg;
uint32_t val;
@@ -1898,7 +1889,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
@@ -1926,6 +1917,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
}
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ WARN_ON(!crtc->config->has_pch_encoder);
+
+ if (HAS_PCH_LPT(dev_priv))
+ return TRANSCODER_A;
+ else
+ return (enum transcoder) crtc->pipe;
+}
+
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @crtc: crtc responsible for the pipe
@@ -1939,7 +1942,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- enum pipe pch_transcoder;
i915_reg_t reg;
u32 val;
@@ -1949,11 +1951,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
- if (HAS_PCH_LPT(dev_priv))
- pch_transcoder = TRANSCODER_A;
- else
- pch_transcoder = pipe;
-
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1967,7 +1964,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
} else {
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_rx_pll_enabled(dev_priv,
+ (enum pipe) intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
@@ -2139,7 +2137,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*view = i915_ggtt_view_rotated;
view->params.rotated = to_intel_framebuffer(fb)->rot_info;
} else {
@@ -2191,7 +2189,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
intel_fill_fb_ggtt_view(&view, fb, rotation);
@@ -2260,7 +2258,7 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
return to_intel_framebuffer(fb)->rotated[plane].pitch;
else
return fb->pitches[plane];
@@ -2296,7 +2294,7 @@ void intel_add_fb_offsets(int *x, int *y,
const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
unsigned int rotation = state->base.rotation;
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*x += intel_fb->rotated[plane].x;
*y += intel_fb->rotated[plane].y;
} else {
@@ -2352,15 +2350,15 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
tile_size = intel_tile_size(dev_priv);
intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[plane], cpp);
+ fb->modifier, cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2401,7 +2399,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
unsigned int rotation,
u32 alignment)
{
- uint64_t fb_modifier = fb->modifier[plane];
+ uint64_t fb_modifier = fb->modifier;
unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
u32 offset, offset_aligned;
@@ -2416,7 +2414,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
intel_tile_dims(dev_priv, &tile_width, &tile_height,
fb_modifier, cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2460,7 +2458,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
alignment = 4096;
else
- alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
rotation, alignment);
@@ -2542,13 +2540,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
DRM_ROTATE_0, tile_size);
offset /= tile_size;
- if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[i], cpp);
+ fb->modifier, cpp);
rot_info->plane[i].offset = offset;
rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
@@ -2707,7 +2705,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
- mode_cmd.modifier[0] = fb->modifier[0];
+ mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
@@ -2817,14 +2815,8 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->base.src.x1 = plane_state->src_x;
- intel_state->base.src.y1 = plane_state->src_y;
- intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
- intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
- intel_state->base.dst.x1 = plane_state->crtc_x;
- intel_state->base.dst.y1 = plane_state->crtc_y;
- intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
- intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+ intel_state->base.src = drm_plane_state_src(plane_state);
+ intel_state->base.dst = drm_plane_state_dest(plane_state);
obj = intel_fb_obj(fb);
if (i915_gem_object_is_tiled(obj))
@@ -2843,7 +2835,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
{
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
- switch (fb->modifier[plane]) {
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
case I915_FORMAT_MOD_X_TILED:
switch (cpp) {
@@ -2874,7 +2866,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
}
break;
default:
- MISSING_CASE(fb->modifier[plane]);
+ MISSING_CASE(fb->modifier);
}
return 2048;
@@ -2902,7 +2894,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
/*
* AUX surface offset is specified as the distance from the
@@ -2919,7 +2911,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
*
* TODO: linear and Y-tiled seem fine, Yf untested,
*/
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
while ((x + w) * cpp > fb->pitches[0]) {
@@ -2976,7 +2968,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
int ret;
/* Rotate src coordinates to match rotated GTT view */
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
fb->width << 16, fb->height << 16,
DRM_ROTATE_270);
@@ -3006,11 +2998,9 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
u32 linear_offset;
u32 dspcntr;
@@ -3023,7 +3013,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (intel_crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
@@ -3034,7 +3024,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
- } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+ } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
@@ -3069,28 +3059,34 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
}
if (INTEL_GEN(dev_priv) >= 4 &&
- fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (IS_G4X(dev))
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
+ if (IS_G4X(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
+ if (rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += crtc_state->pipe_src_w - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
@@ -3099,14 +3095,17 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
- } else
- I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
+ } else {
+ I915_WRITE(DSPADDR(plane),
+ intel_fb_gtt_offset(fb, rotation) +
+ intel_crtc->dspaddr_offset);
+ }
POSTING_READ(reg);
}
@@ -3145,7 +3144,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
dspcntr = DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
@@ -3171,10 +3170,13 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
BUG();
}
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
@@ -3182,13 +3184,11 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -3202,7 +3202,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -3277,12 +3277,12 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
- stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
+ stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
} else {
- stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
fb->pixel_format);
}
@@ -3378,7 +3378,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
@@ -3399,7 +3398,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
PLANE_CTL_PIPE_CSC_ENABLE;
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
plane_ctl |= skl_plane_ctl_rotation(rotation);
@@ -3414,9 +3413,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
- skl_write_plane_wm(intel_crtc, wm, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
@@ -3451,13 +3447,6 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!crtc->primary->state->visible)
- skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3507,7 +3496,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
+ i915_redisable_vga(to_i915(dev));
if (!state)
return 0;
@@ -3585,7 +3574,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
err:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3604,8 +3593,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
dev_priv->modeset_restore_state = NULL;
- dev_priv->modeset_restore_state = NULL;
-
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
if (!state) {
@@ -3647,6 +3634,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
+ if (state)
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
@@ -3684,8 +3673,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
@@ -3710,12 +3698,12 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
(pipe_config->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
if (pipe_config->pch_pfit.enabled)
skylake_pfit_enable(crtc);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
if (pipe_config->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
@@ -3735,7 +3723,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
@@ -3746,7 +3734,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -3760,7 +3748,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
udelay(1000);
/* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
FDI_FE_ERRC_ENABLE);
}
@@ -3904,7 +3892,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -3948,7 +3936,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -3957,7 +3945,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -4211,7 +4199,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
@@ -4223,7 +4211,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -4241,6 +4229,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here
@@ -4255,7 +4244,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
continue;
if (crtc->flip_work)
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
return true;
}
@@ -4546,7 +4535,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
/* Write the TU size bits before fdi link training, so that error
@@ -4559,7 +4548,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -4589,7 +4578,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
+ if (HAS_PCH_CPT(dev_priv) &&
+ intel_crtc_has_dp_encoder(intel_crtc->config)) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4668,7 +4658,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- need_scaling = intel_rotation_90_or_270(rotation) ?
+ need_scaling = drm_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
(src_w != dst_w || src_h != dst_h);
@@ -4729,13 +4719,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
- intel_crtc->pipe, SKL_CRTC_INDEX);
-
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
@@ -4756,7 +4741,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
@@ -4764,10 +4748,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->base.visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_plane->base.name,
- intel_crtc->pipe, drm_plane_index(&intel_plane->base));
-
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
@@ -4859,7 +4839,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
* as some pre-programmed values are broken,
* e.g. x201.
*/
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
@@ -4884,7 +4864,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
*/
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4916,7 +4896,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
return;
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4931,7 +4911,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
}
/* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
@@ -4985,7 +4965,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't always raise interrupts, so check manually. */
@@ -5008,7 +4988,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
@@ -5040,10 +5020,10 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
}
@@ -5062,7 +5042,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
crtc->wm.cxsr_allowed = true;
if (pipe_config->update_wm_post && pipe_config->base.active)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5091,6 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5105,7 +5087,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
intel_pre_disable_primary(&crtc->base);
}
- if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
+ if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
crtc->wm.cxsr_allowed = false;
/*
@@ -5120,7 +5102,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_crtc_state->base.active) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
}
@@ -5133,7 +5115,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
*/
if (pipe_config->disable_lp_wm) {
ilk_disable_lp_wm(dev);
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
/*
@@ -5158,9 +5140,10 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
* us to.
*/
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else if (pipe_config->update_wm_pre)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
@@ -5314,6 +5297,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5372,7 +5357,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -5383,12 +5368,12 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5396,18 +5381,19 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
/* IPS only exists on ULT machines and is tied to pipe A. */
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
- return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+ return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
}
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5462,7 +5448,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(intel_crtc);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
@@ -5478,9 +5464,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
@@ -5489,7 +5476,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, true);
assert_vblank_disabled(crtc);
@@ -5498,8 +5485,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder) {
- intel_wait_for_vblank(dev, pipe);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
@@ -5508,9 +5495,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
- if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
}
}
@@ -5565,7 +5552,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (intel_crtc->config->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
u32 temp;
@@ -5594,8 +5581,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5612,13 +5598,13 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc, false);
@@ -5699,13 +5685,13 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -5726,7 +5712,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
@@ -5739,7 +5725,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
* what's the status of the given connectors, play safe and
* run the DP detection too.
*/
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5831,11 +5817,9 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
static int skl_calc_cdclk(int max_pixclk, int vco);
-static void intel_update_max_cdclk(struct drm_device *dev)
+static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -5857,9 +5841,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->max_cdclk_freq = 624000;
- } else if (IS_BROADWELL(dev)) {
+ } else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
* 540 MHz for ULX and 675 Mhz for ULT.
@@ -5868,15 +5852,15 @@ static void intel_update_max_cdclk(struct drm_device *dev)
*/
if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULX(dev))
+ else if (IS_BDW_ULX(dev_priv))
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULT(dev))
+ else if (IS_BDW_ULT(dev_priv))
dev_priv->max_cdclk_freq = 540000;
else
dev_priv->max_cdclk_freq = 675000;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 320000;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
@@ -5892,11 +5876,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
dev_priv->max_dotclk_freq);
}
-static void intel_update_cdclk(struct drm_device *dev)
+static void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
if (INTEL_GEN(dev_priv) >= 9)
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
@@ -6057,14 +6039,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
return;
}
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6197,7 +6179,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
dev_priv->skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(&dev_priv->drm);
+ intel_update_max_cdclk(dev_priv);
}
static void
@@ -6262,36 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.vco = 0;
}
-static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 val;
-
- /* inform PCU we want to change CDCLK */
- val = SKL_CDCLK_PREPARE_FOR_CHANGE;
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
-}
-
-static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
-{
- return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
-}
-
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
- struct drm_device *dev = &dev_priv->drm;
u32 freq_select, pcu_ack;
+ int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
- if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
- DRM_ERROR("failed to inform PCU about cdclk change\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+ ret);
return;
}
@@ -6334,7 +6304,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
@@ -6381,7 +6351,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6415,7 +6385,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
@@ -6472,7 +6442,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->sb_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -6480,7 +6450,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
switch (cdclk) {
@@ -6513,7 +6483,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -6676,7 +6646,7 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
@@ -6704,7 +6674,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
@@ -6719,7 +6689,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
} else {
@@ -6733,7 +6703,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6775,7 +6745,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6786,7 +6756,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6823,8 +6793,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+ if (IS_GEN2(dev_priv))
+ intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6838,9 +6808,9 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_disable_pll(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_disable_pll(dev_priv, pipe);
else
i9xx_disable_pll(intel_crtc);
@@ -6848,7 +6818,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
}
@@ -6886,7 +6856,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc_state, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.id, crtc->name);
@@ -6902,7 +6872,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
encoder->base.crtc = NULL;
intel_fbc_disable(intel_crtc);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
domains = intel_crtc->enabled_power_domains;
@@ -7028,6 +6998,7 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
@@ -7040,7 +7011,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
@@ -7050,7 +7021,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev)->num_pipes == 2)
+ if (INTEL_INFO(dev_priv)->num_pipes == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7061,7 +7032,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
if (pipe_config->fdi_lanes <= 2)
return 0;
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7080,7 +7051,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7191,7 +7162,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
@@ -7225,11 +7196,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
return -EINVAL;
- if (HAS_IPS(dev))
+ if (HAS_IPS(dev_priv))
hsw_compute_ips_config(crtc, pipe_config);
if (pipe_config->has_pch_encoder)
@@ -7238,10 +7209,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return 0;
}
-static int skylake_get_display_clock_speed(struct drm_device *dev)
+static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t cdctl;
+ u32 cdctl;
skl_dpll0_update(dev_priv);
@@ -7300,9 +7270,8 @@ static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.ref;
}
-static int broxton_get_display_clock_speed(struct drm_device *dev)
+static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 divider;
int div, vco;
@@ -7335,9 +7304,8 @@ static int broxton_get_display_clock_speed(struct drm_device *dev)
return DIV_ROUND_CLOSEST(vco, div);
}
-static int broadwell_get_display_clock_speed(struct drm_device *dev)
+static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7355,9 +7323,8 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
return 675000;
}
-static int haswell_get_display_clock_speed(struct drm_device *dev)
+static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7367,41 +7334,41 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- else if (IS_HSW_ULT(dev))
+ else if (IS_HSW_ULT(dev_priv))
return 337500;
else
return 540000;
}
-static int valleyview_get_display_clock_speed(struct drm_device *dev)
+static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
+ return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL);
}
-static int ilk_get_display_clock_speed(struct drm_device *dev)
+static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 450000;
}
-static int i945_get_display_clock_speed(struct drm_device *dev)
+static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 400000;
}
-static int i915_get_display_clock_speed(struct drm_device *dev)
+static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 333333;
}
-static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 200000;
}
-static int pnv_get_display_clock_speed(struct drm_device *dev)
+static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7424,9 +7391,9 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i915gm_get_display_clock_speed(struct drm_device *dev)
+static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7444,14 +7411,14 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i865_get_display_clock_speed(struct drm_device *dev)
+static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 266667;
}
-static int i85x_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 hpllcc = 0;
/*
@@ -7487,14 +7454,13 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
return 0;
}
-static int i830_get_display_clock_speed(struct drm_device *dev)
+static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 133333;
}
-static unsigned int intel_hpll_vco(struct drm_device *dev)
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
static const unsigned int blb_vco[8] = {
[0] = 3200000,
[1] = 4000000,
@@ -7537,20 +7503,20 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
uint8_t tmp = 0;
/* FIXME other chipsets? */
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
vco_table = ctg_vco;
- else if (IS_G4X(dev))
+ else if (IS_G4X(dev_priv))
vco_table = elk_vco;
- else if (IS_CRESTLINE(dev))
+ else if (IS_CRESTLINE(dev_priv))
vco_table = cl_vco;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
vco_table = pnv_vco;
- else if (IS_G33(dev))
+ else if (IS_G33(dev_priv))
vco_table = blb_vco;
else
return 0;
- tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -7561,10 +7527,10 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
return vco;
}
-static int gm45_get_display_clock_speed(struct drm_device *dev)
+static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7584,14 +7550,14 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i965gm_get_display_clock_speed(struct drm_device *dev)
+static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 16, 10, 8 };
static const uint8_t div_4000[] = { 20, 12, 10 };
static const uint8_t div_5333[] = { 24, 16, 14 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7622,15 +7588,15 @@ fail:
return 200000;
}
-static int g33_get_display_clock_speed(struct drm_device *dev)
+static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7719,10 +7685,10 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 fp, fp2 = 0;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
@@ -7790,12 +7756,11 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config->cpu_transcoder;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
@@ -7804,8 +7769,8 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
- if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
- crtc->config->has_drrs) {
+ if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
+ INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -8092,11 +8057,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *pipe_config;
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
@@ -8107,7 +8071,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_compute_dpll(crtc, pipe_config);
chv_prepare_pll(crtc, pipe_config);
chv_enable_pll(crtc, pipe_config);
@@ -8130,20 +8094,19 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev))
- chv_disable_pll(to_i915(dev), pipe);
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
else
- vlv_disable_pll(to_i915(dev), pipe);
+ vlv_disable_pll(dev_priv, pipe);
}
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
struct dpll *clock = &crtc_state->dpll;
@@ -8156,7 +8119,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -8169,11 +8132,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && reduced_clock)
+ if (IS_G4X(dev_priv) && reduced_clock)
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock->p2) {
@@ -8190,7 +8153,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -8204,7 +8167,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
crtc_state->dpll_hw_state.dpll = dpll;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc_state->dpll_hw_state.dpll_md = dpll_md;
@@ -8235,7 +8198,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev_priv) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
@@ -8250,8 +8214,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -8277,7 +8240,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
vsyncshift += adjusted_mode->crtc_htotal;
}
- if (INTEL_INFO(dev)->gen > 3)
+ if (INTEL_GEN(dev_priv) > 3)
I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
I915_WRITE(HTOTAL(cpu_transcoder),
@@ -8304,7 +8267,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
- if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
@@ -8400,8 +8363,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
@@ -8414,7 +8376,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
@@ -8436,7 +8399,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (HAS_PIPE_CXSR(dev)) {
+ if (HAS_PIPE_CXSR(dev_priv)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
@@ -8446,7 +8409,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (INTEL_INFO(dev)->gen < 4 ||
+ if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -8454,7 +8417,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} else
pipeconf |= PIPECONF_PROGRESSIVE;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
@@ -8654,11 +8617,11 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t tmp;
- if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ if (INTEL_GEN(dev_priv) <= 3 &&
+ (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8666,7 +8629,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
/* Check whether the pfit is attached to our pipe. */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
@@ -8730,10 +8693,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
}
}
@@ -8742,7 +8705,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
@@ -8762,7 +8725,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -8811,8 +8774,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -8830,7 +8792,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
goto out;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
@@ -8846,11 +8809,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
@@ -8858,9 +8821,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
else
tmp = I915_READ(DPLL_MD(crtc->pipe));
@@ -8868,7 +8831,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -8880,13 +8844,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
* report errors due to that.
*/
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
@@ -8898,9 +8862,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
DPLL_PORTB_READY_MASK);
}
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_crtc_clock_get(crtc, pipe_config);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
@@ -8951,7 +8915,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
}
}
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
has_ck505 = dev_priv->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -9199,7 +9163,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
+ if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9222,7 +9187,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
}
}
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9238,7 +9203,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
mutex_lock(&dev_priv->sb_lock);
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9346,9 +9311,11 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
*/
void intel_init_pch_refclk(struct drm_device *dev)
{
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
ironlake_init_pch_refclk(dev);
- else if (HAS_PCH_LPT(dev))
+ else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev);
}
@@ -9477,7 +9444,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+ (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (crtc_state->sdvo_tv_clock)
factor = 20;
@@ -9651,11 +9618,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
@@ -9667,7 +9633,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily read).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
@@ -9771,17 +9737,17 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
- fb->modifier[0] = DRM_FORMAT_MOD_NONE;
+ fb->modifier = DRM_FORMAT_MOD_NONE;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
- fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
break;
default:
MISSING_CASE(tiling);
@@ -9798,13 +9764,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -9837,7 +9803,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -9869,10 +9835,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
}
}
@@ -9882,7 +9848,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
offset = I915_READ(DSPOFFSET(pipe));
} else {
if (plane_config->tiling)
@@ -9901,7 +9867,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -10026,7 +9992,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
@@ -10046,9 +10012,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
return I915_READ(D_COMP_HSW);
else
return I915_READ(D_COMP_BDW);
@@ -10056,9 +10020,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
@@ -10173,7 +10135,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
/*
@@ -10206,7 +10168,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10226,7 +10188,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
hsw_restore_lcpll(dev_priv);
lpt_init_pch_refclk(dev);
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10376,7 +10338,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
WARN(cdclk != dev_priv->cdclk_freq,
"cdclk requested %d kHz but got %d kHz\n",
@@ -10663,8 +10625,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
@@ -10673,9 +10634,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10691,7 +10652,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
+ if (INTEL_GEN(dev_priv) < 9 &&
(port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
@@ -10706,8 +10667,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
bool active;
@@ -10740,11 +10700,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_INFO(dev)->gen >= 9) {
- skl_init_scalers(dev, crtc, pipe_config);
- }
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_init_scalers(dev_priv, crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
@@ -10752,13 +10710,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT(power_domain);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
@@ -10846,13 +10804,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_cursor_wm(intel_crtc, wm);
-
if (plane_state && plane_state->base.visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (plane_state->base.crtc_w) {
@@ -10871,10 +10825,10 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
}
cntl |= pipe << 28; /* Connect to correct pipe */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- if (plane_state->base.rotation == DRM_ROTATE_180)
+ if (plane_state->base.rotation & DRM_ROTATE_180)
cntl |= CURSOR_ROTATE_180;
}
@@ -10919,8 +10873,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= y << CURSOR_Y_SHIFT;
/* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev) &&
- plane_state->base.rotation == DRM_ROTATE_180) {
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ plane_state->base.rotation & DRM_ROTATE_180) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
}
@@ -10928,13 +10882,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, plane_state);
}
-static bool cursor_size_ok(struct drm_device *dev,
+static bool cursor_size_ok(struct drm_i915_private *dev_priv,
uint32_t width, uint32_t height)
{
if (width == 0 || height == 0)
@@ -10946,11 +10900,11 @@ static bool cursor_size_ok(struct drm_device *dev,
* the precision of the register. Everything else requires
* square cursors, limited to a few power-of-two sizes.
*/
- if (IS_845G(dev) || IS_I865G(dev)) {
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
if ((width & 63) != 0)
return false;
- if (width > (IS_845G(dev) ? 64 : 512))
+ if (width > (IS_845G(dev_priv) ? 64 : 512))
return false;
if (height > 1023)
@@ -10959,7 +10913,7 @@ static bool cursor_size_ok(struct drm_device *dev,
switch (width | height) {
case 256:
case 128:
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return false;
case 64:
break;
@@ -11053,7 +11007,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -11138,6 +11092,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
@@ -11290,13 +11245,18 @@ found:
old->restore_state = restore_state;
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return true;
fail:
- drm_atomic_state_free(state);
- drm_atomic_state_free(restore_state);
- restore_state = state = NULL;
+ if (state) {
+ drm_atomic_state_put(state);
+ state = NULL;
+ }
+ if (restore_state) {
+ drm_atomic_state_put(restore_state);
+ restore_state = NULL;
+ }
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -11324,10 +11284,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
return;
ret = drm_atomic_commit(state);
- if (ret) {
+ if (ret)
DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11338,9 +11297,9 @@ static int i9xx_pll_refclk(struct drm_device *dev,
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
return dev_priv->vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
return 96000;
else
return 48000;
@@ -11365,7 +11324,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -11373,8 +11332,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_GEN2(dev)) {
- if (IS_PINEVIEW(dev))
+ if (!IS_GEN2(dev_priv)) {
+ if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -11396,12 +11355,12 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
return;
}
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+ u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -11602,7 +11561,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
* really needed there. But since ctg has the registers,
* include it in the check anyway.
*/
- if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
return true;
/*
@@ -11665,8 +11624,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11679,12 +11637,12 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
!is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11692,8 +11650,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11706,12 +11663,12 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11816,7 +11773,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -11849,7 +11806,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
@@ -11872,6 +11829,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
@@ -11900,7 +11858,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* 48bits addresses, and we need a NOOP for the batch size to
* stay even.
*/
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
len += 2;
}
@@ -11937,7 +11895,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
@@ -11946,7 +11904,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring,
i915_ggtt_offset(req->engine->scratch) + 256);
- if (IS_GEN8(dev)) {
+ if (IS_GEN8(dev_priv)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
@@ -11954,7 +11912,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
@@ -11964,8 +11922,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
- struct reservation_object *resv;
-
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
@@ -11987,12 +11943,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
else if (i915.enable_execlists)
return true;
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv && !reservation_object_test_signaled_rcu(resv, false))
- return true;
-
- return engine != i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ return engine != i915_gem_object_last_write_engine(obj);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -12007,7 +11958,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
- switch (fb->modifier[0]) {
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
break;
case I915_FORMAT_MOD_X_TILED:
@@ -12020,7 +11971,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl |= PLANE_CTL_TILED_YF;
break;
default:
- MISSING_CASE(fb->modifier[0]);
+ MISSING_CASE(fb->modifier);
}
/*
@@ -12045,7 +11996,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
dspcntr = I915_READ(reg);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -12065,17 +12016,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- struct reservation_object *resv;
- if (work->flip_queued_req)
- WARN_ON(i915_wait_request(work->flip_queued_req,
- 0, NULL, NO_WAITBOOST));
-
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv)
- WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
@@ -12138,8 +12080,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
WARN_ON(!in_interrupt());
@@ -12148,19 +12089,19 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
return;
spin_lock(&dev->event_lock);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL && !is_mmio_work(work) &&
- __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+ __pageflip_stall_check_cs(dev_priv, crtc, work)) {
WARN_ONCE(1,
"Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
- page_flip_completed(intel_crtc);
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
+ page_flip_completed(crtc);
work = NULL;
}
if (work != NULL && !is_mmio_work(work) &&
- intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+ intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -12200,7 +12141,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* TILEOFF/LINOFF registers can't be changed via MI display flips.
* Note that pitch changes could also affect these register.
*/
- if (INTEL_INFO(dev)->gen > 3 &&
+ if (INTEL_GEN(dev_priv) > 3 &&
(fb->offsets[0] != crtc->primary->fb->offsets[0] ||
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
@@ -12265,23 +12206,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&intel_crtc->unpin_work_count);
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- engine = &dev_priv->engine[BCS];
- if (fb->modifier[0] != old_fb->modifier[0])
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ engine = dev_priv->engine[BCS];
+ if (fb->modifier != old_fb->modifier)
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- engine = &dev_priv->engine[BCS];
- } else if (INTEL_INFO(dev)->gen >= 7) {
- engine = i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+ engine = dev_priv->engine[BCS];
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
- engine = &dev_priv->engine[BCS];
+ engine = dev_priv->engine[BCS];
} else {
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
}
mmio_flip = use_mmio_flip(engine, obj);
@@ -12309,10 +12249,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
-
- work->flip_queued_req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
- schedule_work(&work->mmio_work);
+ queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
if (IS_ERR(request)) {
@@ -12335,6 +12272,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_add_request_no_flush(request);
}
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
@@ -12358,7 +12296,7 @@ cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
@@ -12396,8 +12334,7 @@ retry:
goto retry;
}
- if (ret)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
@@ -12432,7 +12369,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
if (!cur->base.fb || !new->base.fb)
return false;
- if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
+ if (cur->base.fb->modifier != new->base.fb->modifier ||
cur->base.rotation != new->base.rotation ||
drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
@@ -12471,7 +12408,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_framebuffer *fb = plane_state->fb;
int ret;
- if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
@@ -12538,7 +12475,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
/* Pre-gen9 platforms need two-step watermark updates */
if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
- INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+ INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
if (visible || was_visible)
@@ -12550,7 +12487,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
- if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state))
pipe_config->disable_lp_wm = true;
@@ -12643,7 +12580,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+ ret = dev_priv->display.compute_intermediate_wm(dev,
intel_crtc,
pipe_config);
if (ret) {
@@ -12655,7 +12592,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
@@ -12726,15 +12663,16 @@ static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+ if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)))
bpp = 10*3;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
bpp = 12*3;
else
bpp = 8*3;
@@ -12767,73 +12705,81 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
}
+static inline void
+intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
+ unsigned int lane_count, struct intel_link_m_n *m_n)
+{
+ DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane *plane;
struct intel_plane *intel_plane;
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
- crtc->base.base.id, crtc->base.name,
- context, pipe_config, pipe_name(crtc->pipe));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
+ crtc->base.base.id, crtc->base.name, context);
- DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
- DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+ DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
- DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- pipe_config->has_pch_encoder,
- pipe_config->fdi_lanes,
- pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
- pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
- pipe_config->fdi_m_n.tu);
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
- pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
- pipe_config->dp_m_n.tu);
-
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m2_n2.gmch_m,
- pipe_config->dp_m2_n2.gmch_n,
- pipe_config->dp_m2_n2.link_m,
- pipe_config->dp_m2_n2.link_n,
- pipe_config->dp_m2_n2.tu);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count, &pipe_config->dp_m_n);
+ if (pipe_config->has_drrs)
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
- pipe_config->has_audio,
- pipe_config->has_infoframe);
+ pipe_config->has_audio, pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
- DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
+ pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
- DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
- DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
-
- if (IS_BROXTON(dev)) {
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
+
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled));
+
+ DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
+
+ if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
@@ -12848,13 +12794,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->dpll_hw_state.ctrl1,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
@@ -12869,7 +12815,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- char *format_name;
+ struct drm_format_name_buf format_name;
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe != crtc->pipe)
continue;
@@ -12882,23 +12828,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
continue;
}
- format_name = drm_get_format_name(fb->pixel_format);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
- plane->base.id, plane->name);
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
- fb->base.id, fb->width, fb->height, format_name);
- DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
- state->scaler_id,
- state->base.src.x1 >> 16,
- state->base.src.y1 >> 16,
- drm_rect_width(&state->base.src) >> 16,
- drm_rect_height(&state->base.src) >> 16,
- state->base.dst.x1, state->base.dst.y1,
- drm_rect_width(&state->base.dst),
- drm_rect_height(&state->base.dst));
-
- kfree(format_name);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
+ plane->base.id, plane->name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->pixel_format, &format_name));
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->base.src.x1 >> 16,
+ state->base.src.y1 >> 16,
+ drm_rect_width(&state->base.src) >> 16,
+ drm_rect_height(&state->base.src) >> 16,
+ state->base.dst.x1, state->base.dst.y1,
+ drm_rect_width(&state->base.dst),
+ drm_rect_height(&state->base.dst));
}
}
@@ -12932,7 +12875,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
switch (encoder->type) {
unsigned int port_mask;
case INTEL_OUTPUT_UNKNOWN:
- if (WARN_ON(!HAS_DDI(dev)))
+ if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
@@ -13213,7 +13156,7 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
}
static bool
-intel_pipe_config_compare(struct drm_device *dev,
+intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
struct intel_crtc_state *pipe_config,
bool adjust)
@@ -13337,7 +13280,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_GEN(dev_priv) < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
if (current_config->has_drrs)
@@ -13363,8 +13306,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
- if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
@@ -13386,7 +13329,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -13404,7 +13347,7 @@ intel_pipe_config_compare(struct drm_device *dev,
}
/* BDW+ don't expose a synchronous way to read the state */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
PIPE_CONF_CHECK_I(ips_enabled);
PIPE_CONF_CHECK_I(double_wide);
@@ -13423,7 +13366,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+ if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
@@ -13461,33 +13404,67 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct skl_ddb_entry *hw_entry, *sw_entry;
+ struct skl_pipe_wm hw_wm, *sw_wm;
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+ struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
- int plane;
+ int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
+ skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+ sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
/* planes */
- for_each_plane(dev_priv, pipe, plane) {
- hw_entry = &hw_ddb.plane[pipe][plane];
- sw_entry = &sw_ddb->plane[pipe][plane];
+ for_each_universal_plane(dev_priv, pipe, plane) {
+ hw_plane_wm = &hw_wm.planes[plane];
+ sw_plane_wm = &sw_wm->planes[plane];
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
+ DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1, level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
+
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1,
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][plane];
+ sw_ddb_entry = &sw_ddb->plane[pipe][plane];
- DRM_ERROR("mismatch in DDB state pipe %c plane %d "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
}
/*
@@ -13497,25 +13474,60 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (intel_crtc->cursor_addr) {
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+ sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
- if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor "
- "(expected (%u,%u), found (%u,%u))\n",
+ DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
+
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
pipe_name(pipe),
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
}
static void
-verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
+verify_connector_state(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
- drm_for_each_connector(connector, dev) {
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
@@ -13630,7 +13642,7 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
sw_config = to_intel_crtc_state(crtc->state);
- if (!intel_pipe_config_compare(dev, sw_config,
+ if (!intel_pipe_config_compare(dev_priv, sw_config,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(intel_crtc, pipe_config,
@@ -13723,15 +13735,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
static void
intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
{
if (!needs_modeset(new_state) &&
!to_intel_crtc_state(new_state)->update_pipe)
return;
verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, crtc);
+ verify_connector_state(crtc->dev, state, crtc);
verify_crtc_state(crtc, old_state, new_state);
verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
}
@@ -13747,16 +13760,17 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev)
+intel_modeset_verify_disabled(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
verify_encoder_state(dev);
- verify_connector_state(dev, NULL);
+ verify_connector_state(dev, state, NULL);
verify_disabled_dpll_state(dev);
}
static void update_scanline_offset(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13776,7 +13790,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
*/
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int vtotal;
@@ -13785,7 +13799,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
- } else if (HAS_DDI(dev) &&
+ } else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
@@ -13970,8 +13984,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
intel_state->cdclk, intel_state->dev_cdclk);
- } else
+ } else {
to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
+ }
intel_modeset_clear_plls(state);
@@ -14048,7 +14063,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (i915.fastboot &&
- intel_pipe_config_compare(dev,
+ intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@@ -14072,8 +14087,9 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
- } else
- intel_state->cdclk = dev_priv->cdclk_freq;
+ } else {
+ intel_state->cdclk = dev_priv->atomic_cdclk_freq;
+ }
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
@@ -14084,13 +14100,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+ struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -14113,28 +14126,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = drm_atomic_helper_prepare_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (!ret && !nonblock) {
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- I915_WAIT_INTERRUPTIBLE,
- NULL, NULL);
- if (ret) {
- /* Any hang should be swallowed by the wait */
- WARN_ON(ret == -EIO);
- mutex_lock(&dev->struct_mutex);
- drm_atomic_helper_cleanup_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
- break;
- }
- }
- }
-
return ret;
}
@@ -14160,22 +14151,24 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
return;
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
if (!((1 << pipe) & crtc_mask))
continue;
- ret = drm_crtc_vblank_get(crtc);
+ ret = drm_crtc_vblank_get(&crtc->base);
if (WARN_ON(ret != 0)) {
crtc_mask &= ~(1 << pipe);
continue;
}
- last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+ last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
}
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
long lret;
if (!((1 << pipe) & crtc_mask))
@@ -14183,12 +14176,12 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
lret = wait_event_timeout(dev->vblank[pipe].queue,
last_vblank_count[pipe] !=
- drm_crtc_vblank_count(crtc),
+ drm_crtc_vblank_count(&crtc->base),
msecs_to_jiffies(50));
WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
- drm_crtc_vblank_put(crtc);
+ drm_crtc_vblank_put(&crtc->base);
}
}
@@ -14262,16 +14255,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int *crtc_vblank_mask)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
struct drm_crtc_state *old_crtc_state;
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ struct intel_crtc_state *cstate;
unsigned int updated = 0;
bool progress;
enum pipe pipe;
+ int i;
+
+ const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i)
+ /* ignore allocations for crtc's that have been turned off. */
+ if (crtc->state->active)
+ entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/*
* Whenever the number of active pipes changes, we need to make sure we
@@ -14280,21 +14280,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* cause pipe underruns and other bad stuff.
*/
do {
- int i;
progress = false;
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
bool vbl_wait = false;
unsigned int cmask = drm_crtc_mask(crtc);
- pipe = to_intel_crtc(crtc)->pipe;
- if (updated & cmask || !crtc->state->active)
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+ pipe = intel_crtc->pipe;
+
+ if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
- pipe))
+
+ if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
continue;
updated |= cmask;
+ entries[i] = &cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -14302,7 +14305,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* then we need to wait for a vblank to pass for the
* new ddb allocation to take effect.
*/
- if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+ if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
+ &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
!crtc->state->active_changed &&
intel_state->wm_results.dirty_pipes != updated)
vbl_wait = true;
@@ -14311,7 +14315,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
crtc_vblank_mask);
if (vbl_wait)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
progress = true;
}
@@ -14326,37 +14330,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
- int i, ret;
-
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane->state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- 0, NULL, NULL);
- /* EIO should be eaten, and we can't get interrupted in the
- * worker, and blocking commits have waited already. */
- WARN_ON(ret);
- }
+ int i;
drm_atomic_helper_wait_for_dependencies(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
- sizeof(intel_state->min_pixclk));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->atomic_cdclk_freq = intel_state->cdclk;
-
+ if (intel_state->modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- }
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -14389,8 +14371,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!crtc->state->active)
- intel_update_watermarks(crtc);
+ if (!crtc->state->active) {
+ /*
+ * Make sure we don't call initial_watermarks
+ * for ILK-style watermark updates.
+ */
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(crtc->state));
+ else
+ intel_update_watermarks(intel_crtc);
+ }
}
}
@@ -14413,7 +14404,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev);
+ intel_modeset_verify_disabled(dev, state);
}
/* Complete the events for pipes that have now been disabled */
@@ -14456,7 +14447,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_cstate = to_intel_crtc_state(crtc->state);
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_cstate);
+ dev_priv->display.optimize_watermarks(intel_state,
+ intel_cstate);
}
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -14465,7 +14457,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
- intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
+ intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
}
if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -14482,7 +14474,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
* of triggering bugs in unclaimed access. After we finish
@@ -14500,12 +14492,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
static void intel_atomic_commit_work(struct work_struct *work)
{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
+ struct drm_atomic_state *state =
+ container_of(work, struct drm_atomic_state, commit_work);
+
intel_atomic_commit_tail(state);
}
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify notify)
+{
+ struct intel_atomic_state *state =
+ container_of(fence, struct intel_atomic_state, commit_ready);
+
+ switch (notify) {
+ case FENCE_COMPLETE:
+ if (state->base.commit_work.func)
+ queue_work(system_unbound_wq, &state->base.commit_work);
+ break;
+
+ case FENCE_FREE:
+ drm_atomic_state_put(&state->base);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void intel_atomic_track_fbs(struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state;
@@ -14527,10 +14540,6 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * nonblocking commits are only safe for pure plane updates. Everything else
- * should work though.
- *
* RETURNS
* Zero for success or -errno.
*/
@@ -14542,33 +14551,42 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (intel_state->modeset && nonblock) {
- DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
- return -EINVAL;
- }
-
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
- INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+ drm_atomic_state_get(state);
+ i915_sw_fence_init(&intel_state->commit_ready,
+ intel_atomic_commit_ready);
- ret = intel_atomic_prepare_commit(dev, state, nonblock);
+ ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ i915_sw_fence_commit(&intel_state->commit_ready);
return ret;
}
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
- dev_priv->wm.skl_results = intel_state->wm_results;
intel_shared_dpll_commit(state);
intel_atomic_track_fbs(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
+ if (intel_state->modeset) {
+ memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+ dev_priv->active_crtcs = intel_state->active_crtcs;
+ dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+ }
+
+ drm_atomic_state_get(state);
+ INIT_WORK(&state->commit_work,
+ nonblock ? intel_atomic_commit_work : NULL);
+
+ i915_sw_fence_commit(&intel_state->commit_ready);
+ if (!nonblock) {
+ i915_sw_fence_wait(&intel_state->commit_ready);
intel_atomic_commit_tail(state);
+ }
return 0;
}
@@ -14606,9 +14624,8 @@ retry:
goto retry;
}
- if (ret)
out:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
/*
@@ -14681,19 +14698,21 @@ int
intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct drm_device *dev = plane->dev;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_state->state);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
- struct reservation_object *resv;
- int ret = 0;
+ int ret;
if (!obj && !old_obj)
return 0;
if (old_obj) {
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
+ drm_atomic_get_existing_crtc_state(new_state->state,
+ plane->state->crtc);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14706,52 +14725,58 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state))
- ret = i915_gem_object_wait_rendering(old_obj, true);
- if (ret) {
- /* GPU hangs should have been swallowed by the wait */
- WARN_ON(ret == -EIO);
- return ret;
+ if (needs_modeset(crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ old_obj->resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
}
}
+ if (new_state->fence) { /* explicit fencing */
+ ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+ new_state->fence,
+ I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
if (!obj)
return 0;
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(resv, false, true,
- MAX_SCHEDULE_TIMEOUT);
- if (lret == -ERESTARTSYS)
- return lret;
+ if (!new_state->fence) { /* implicit fencing */
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ obj->resv, NULL,
+ false, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- WARN(lret < 0, "waiting returns %li\n", lret);
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev)->cursor_needs_physical) {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
- if (ret)
+ if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
+ return ret;
+ }
} else {
struct i915_vma *vma;
vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma))
- ret = PTR_ERR(vma);
- }
-
- if (ret == 0) {
- to_intel_plane_state(new_state)->wait_req =
- i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+ return PTR_ERR(vma);
+ }
}
- return ret;
+ return 0;
}
/**
@@ -14767,9 +14792,8 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct intel_plane_state *old_intel_state;
- struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14779,11 +14803,8 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
- !INTEL_INFO(dev)->cursor_needs_physical))
+ !INTEL_INFO(dev_priv)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
-
- i915_gem_request_assign(&intel_state->wait_req, NULL);
- i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
int
@@ -14858,30 +14879,34 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_state =
+ struct intel_crtc_state *intel_cstate =
+ to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
- enum pipe pipe = intel_crtc->pipe;
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
if (modeset)
- return;
+ goto out;
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
}
- if (to_intel_crtc_state(crtc->state)->update_pipe)
- intel_update_pipe_config(intel_crtc, old_intel_state);
- else if (INTEL_GEN(dev_priv) >= 9) {
+ if (intel_cstate->update_pipe)
+ intel_update_pipe_config(intel_crtc, old_intel_cstate);
+ else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
- I915_WRITE(PIPE_WM_LINETIME(pipe),
- dev_priv->wm.skl_hw.wm_linetime[pipe]);
- }
+out:
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(old_intel_state,
+ intel_cstate);
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14901,9 +14926,6 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
*/
void intel_plane_destroy(struct drm_plane *plane)
{
- if (!plane)
- return;
-
drm_plane_cleanup(plane);
kfree(to_intel_plane(plane));
}
@@ -14917,53 +14939,63 @@ const struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
-
};
-static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
+ unsigned int supported_rotations;
unsigned int num_formats;
int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
+ if (!primary) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&primary->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
primary->base.state = &state->base;
primary->can_scale = false;
primary->max_downscale = 1;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
primary->can_scale = true;
state->scaler_id = -1;
}
primary->pipe = pipe;
- primary->plane = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+ primary->plane = (enum plane) !pipe;
+ else
+ primary->plane = (enum plane) pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
- primary->plane = !pipe;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
primary->update_plane = ironlake_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -14977,57 +15009,56 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
- else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane %c", plane_name(primary->plane));
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4)
- intel_create_rotation_property(dev, primary);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
+ } else {
+ supported_rotations = DRM_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&primary->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
- return &primary->base;
+ return primary;
fail:
kfree(state);
kfree(primary);
- return NULL;
-}
-
-void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
-{
- if (!dev->mode_config.rotation_property) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_180;
-
- if (INTEL_INFO(dev)->gen >= 9)
- flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
-
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev, flags);
- }
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base.base,
- dev->mode_config.rotation_property,
- plane->base.state->rotation);
+ return ERR_PTR(ret);
}
static int
@@ -15054,7 +15085,8 @@ intel_check_cursor_plane(struct drm_plane *plane,
return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
+ if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+ state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
@@ -15066,7 +15098,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
return -EINVAL;
}
@@ -15081,7 +15113,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
+ if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
@@ -15107,13 +15139,13 @@ intel_update_cursor_plane(struct drm_plane *plane,
{
struct drm_crtc *crtc = crtc_state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
if (!obj)
addr = 0;
- else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
addr = i915_gem_object_ggtt_offset(obj, NULL);
else
addr = obj->phys_handle->busaddr;
@@ -15122,20 +15154,25 @@ intel_update_cursor_plane(struct drm_plane *plane,
intel_crtc_update_cursor(crtc, state);
}
-static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *cursor = NULL;
struct intel_plane_state *state = NULL;
int ret;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (!cursor)
+ if (!cursor) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&cursor->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
cursor->base.state = &state->base;
cursor->can_scale = false;
@@ -15147,8 +15184,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
- ret = drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR,
@@ -15156,102 +15193,106 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4) {
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_180);
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&cursor->base.base,
- dev->mode_config.rotation_property,
- state->base.rotation);
- }
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180);
- if (INTEL_INFO(dev)->gen >=9)
+ if (INTEL_GEN(dev_priv) >= 9)
state->scaler_id = -1;
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
- return &cursor->base;
+ return cursor;
fail:
kfree(state);
kfree(cursor);
- return NULL;
+ return ERR_PTR(ret);
}
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
int i;
- struct intel_scaler *intel_scaler;
- struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
- for (i = 0; i < intel_crtc->num_scalers; i++) {
- intel_scaler = &scaler_state->scalers[i];
- intel_scaler->in_use = 0;
- intel_scaler->mode = PS_SCALER_MODE_DYN;
+ for (i = 0; i < crtc->num_scalers; i++) {
+ struct intel_scaler *scaler = &scaler_state->scalers[i];
+
+ scaler->in_use = 0;
+ scaler->mode = PS_SCALER_MODE_DYN;
}
scaler_state->scaler_id = -1;
}
-static void intel_crtc_init(struct drm_device *dev, int pipe)
+static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
- struct drm_plane *primary = NULL;
- struct drm_plane *cursor = NULL;
- int ret;
+ struct intel_plane *primary = NULL;
+ struct intel_plane *cursor = NULL;
+ int sprite, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
- if (intel_crtc == NULL)
- return;
+ if (!intel_crtc)
+ return -ENOMEM;
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state)
+ if (!crtc_state) {
+ ret = -ENOMEM;
goto fail;
+ }
intel_crtc->config = crtc_state;
intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
/* initialize shared scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (pipe == PIPE_C)
intel_crtc->num_scalers = 1;
else
intel_crtc->num_scalers = SKL_NUM_SCALERS;
- skl_init_scalers(dev, intel_crtc, crtc_state);
+ skl_init_scalers(dev_priv, intel_crtc, crtc_state);
}
- primary = intel_primary_plane_create(dev, pipe);
- if (!primary)
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
goto fail;
+ }
- cursor = intel_cursor_plane_create(dev, pipe);
- if (!cursor)
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
goto fail;
+ }
- ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs,
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+ &primary->base, &cursor->base,
+ &intel_crtc_funcs,
"pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- /*
- * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = !pipe;
- }
+ intel_crtc->plane = primary->plane;
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
@@ -15261,21 +15302,26 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
- dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_color_init(&intel_crtc->base);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
- return;
+
+ return 0;
fail:
- intel_plane_destroy(primary);
- intel_plane_destroy(cursor);
+ /*
+ * drm_mode_config_cleanup() will free up any
+ * crtcs/planes already initialized.
+ */
kfree(crtc_state);
kfree(intel_crtc);
+
+ return ret;
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -15325,40 +15371,37 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
return index_mask;
}
-static bool has_edp_a(struct drm_device *dev)
+static bool has_edp_a(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!IS_MOBILE(dev))
+ if (!IS_MOBILE(dev_priv))
return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
}
-static bool intel_crt_present(struct drm_device *dev)
+static bool intel_crt_present(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return false;
- if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
return false;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return false;
- if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ if (HAS_PCH_LPT_H(dev_priv) &&
+ I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -15418,10 +15461,10 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
intel_lvds_init(dev);
- if (intel_crt_present(dev))
+ if (intel_crt_present(dev_priv))
intel_crt_init(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -15432,7 +15475,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_C);
intel_dsi_init(dev);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
int found;
/*
@@ -15442,7 +15485,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -15458,17 +15501,17 @@ static void intel_setup_outputs(struct drm_device *dev)
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
intel_ddi_init(dev, PORT_E);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
+ dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
- if (has_edp_a(dev))
+ if (has_edp_a(dev_priv))
intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@@ -15491,7 +15534,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
/*
@@ -15509,21 +15552,21 @@ static void intel_setup_outputs(struct drm_device *dev)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
/*
* eDP not supported on port D,
* so no need to worry about it
@@ -15536,18 +15579,18 @@ static void intel_setup_outputs(struct drm_device *dev)
}
intel_dsi_init(dev);
- } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
+ } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
- if (!found && IS_G4X(dev)) {
+ if (!found && IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
- if (!found && IS_G4X(dev))
+ if (!found && IS_G4X(dev_priv))
intel_dp_init(dev, DP_B, PORT_B);
}
@@ -15560,21 +15603,20 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_dp_init(dev, DP_C, PORT_C);
}
- if (IS_G4X(dev) &&
- (I915_READ(DP_D) & DP_DETECTED))
+ if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
- } else if (IS_GEN2(dev))
+ } else if (IS_GEN2(dev_priv))
intel_dvo_init(dev);
- if (SUPPORTS_TV(dev))
+ if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev);
intel_psr_init(dev);
@@ -15629,6 +15671,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
+ if (obj->pin_display && obj->cache_dirty)
+ i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
mutex_unlock(&dev->struct_mutex);
@@ -15642,10 +15686,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
};
static
-u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format)
+u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 gen = INTEL_INFO(dev)->gen;
+ u32 gen = INTEL_INFO(dev_priv)->gen;
if (gen >= 9) {
int cpp = drm_format_plane_cpp(pixel_format, 0);
@@ -15654,7 +15698,8 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
* pixels and 32K bytes."
*/
return min(8192 * cpp, 32768);
- } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
@@ -15681,7 +15726,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
unsigned int tiling = i915_gem_object_get_tiling(obj);
int ret;
u32 pitch_limit, stride_alignment;
- char *format_name;
+ struct drm_format_name_buf format_name;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -15708,7 +15753,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_INFO(dev)->gen < 9) {
+ if (INTEL_GEN(dev_priv) < 9) {
DRM_DEBUG("Unsupported tiling 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
@@ -15741,7 +15786,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
- pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+ pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
@@ -15771,37 +15816,33 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
- if (INTEL_INFO(dev)->gen > 3) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) > 3) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- INTEL_INFO(dev)->gen < 9) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ INTEL_GEN(dev_priv) < 9) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- if (INTEL_INFO(dev)->gen < 4) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) < 4) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
@@ -15809,17 +15850,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 5) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) < 5) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
default:
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
@@ -15860,17 +15899,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
-#ifndef CONFIG_DRM_FBDEV_EMULATION
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-#endif
-
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
@@ -16246,12 +16279,11 @@ static void intel_init_quirks(struct drm_device *dev)
}
/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_device *dev)
+static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
@@ -16269,11 +16301,11 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
- intel_init_clock_gating(dev);
+ intel_init_clock_gating(dev_priv);
}
/*
@@ -16290,6 +16322,7 @@ static void sanitize_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_modeset_acquire_ctx ctx;
@@ -16318,12 +16351,14 @@ retry:
if (WARN_ON(IS_ERR(state)))
goto fail;
+ intel_state = to_intel_atomic_state(state);
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
+ intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
if (ret) {
@@ -16339,7 +16374,7 @@ retry:
* BIOS-programmed watermarks untouched and hope for the best.
*/
WARN(true, "Could not determine valid watermarks for inherited state\n");
- goto fail;
+ goto put_state;
}
/* Write calculated watermark values back */
@@ -16347,20 +16382,20 @@ retry:
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(cs);
+ dev_priv->display.optimize_watermarks(intel_state, cs);
}
- drm_atomic_state_free(state);
+put_state:
+ drm_atomic_state_put(state);
fail:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-void intel_modeset_init(struct drm_device *dev)
+int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
@@ -16378,10 +16413,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_quirks(dev);
- intel_init_pm(dev);
+ intel_init_pm(dev_priv);
- if (INTEL_INFO(dev)->num_pipes == 0)
- return;
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ return 0;
/*
* There may be no VBT; and if the BIOS enabled SSC we can
@@ -16389,7 +16424,7 @@ void intel_modeset_init(struct drm_device *dev)
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
@@ -16401,10 +16436,10 @@ void intel_modeset_init(struct drm_device *dev)
}
}
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -16412,10 +16447,10 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
- if (IS_845G(dev) || IS_I865G(dev)) {
- dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
+ dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
} else {
@@ -16426,29 +16461,30 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev)->num_pipes,
- INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+ INTEL_INFO(dev_priv)->num_pipes,
+ INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
- intel_crtc_init(dev, pipe);
- for_each_sprite(dev_priv, pipe, sprite) {
- ret = intel_plane_init(dev, pipe, sprite);
- if (ret)
- DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
- pipe_name(pipe), sprite_name(pipe, sprite), ret);
+ int ret;
+
+ ret = intel_crtc_init(dev_priv, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
}
}
intel_update_czclk(dev_priv);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
+ dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
intel_shared_dpll_init(dev);
if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev);
+ intel_update_max_cdclk(dev_priv);
/* Just disable it once at startup */
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
intel_setup_outputs(dev);
drm_modeset_lock_all(dev);
@@ -16484,6 +16520,8 @@ void intel_modeset_init(struct drm_device *dev)
* since the watermark calculation done here will use pstate->fb.
*/
sanitize_watermarks(dev);
+
+ return 0;
}
static void intel_enable_pipe_a(struct drm_device *dev)
@@ -16513,11 +16551,10 @@ static void intel_enable_pipe_a(struct drm_device *dev)
static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val;
- if (INTEL_INFO(dev)->num_pipes == 1)
+ if (INTEL_INFO(dev_priv)->num_pipes == 1)
return true;
val = I915_READ(DSPCNTR(!crtc->plane));
@@ -16591,7 +16628,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
@@ -16621,7 +16658,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
- if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
+ if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -16693,21 +16730,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-void i915_redisable_vga_power_on(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
}
}
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
@@ -16718,7 +16752,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
- i915_redisable_vga_power_on(dev);
+ i915_redisable_vga_power_on(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
@@ -16790,7 +16824,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- crtc->active ? "enabled" : "disabled");
+ enableddisabled(crtc->active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -16813,7 +16847,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
encoder->base.crtc = &crtc->base;
crtc->config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc->config);
@@ -16822,9 +16857,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id,
- encoder->base.name,
- encoder->base.crtc ? "enabled" : "disabled",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
pipe_name(pipe));
}
@@ -16853,9 +16887,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.encoder = NULL;
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id,
- connector->base.name,
- connector->base.encoder ? "enabled" : "disabled");
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
for_each_intel_crtc(dev, crtc) {
@@ -16914,7 +16947,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
}
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
@@ -16934,11 +16968,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
pll->on = false;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_wm_get_hw_state(dev);
- else if (IS_GEN9(dev))
+ else if (IS_GEN9(dev_priv))
skl_wm_get_hw_state(dev);
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
ilk_wm_get_hw_state(dev);
for_each_intel_crtc(dev, crtc) {
@@ -16988,10 +17022,9 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
- if (ret) {
+ if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -17103,10 +17136,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
/*
* set vga decode state - true == enable VGA decode
*/
-int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
@@ -17130,6 +17162,8 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -17258,17 +17292,16 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
void
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(dev_priv, i) {
@@ -17281,13 +17314,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
@@ -17312,3 +17345,5 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index bf344d08356a..d9bc19be855e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -213,6 +213,81 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk;
}
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
+
+ *sink_rates = default_rates;
+
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int size;
+
+ if (IS_BROXTON(dev_priv)) {
+ *source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ *source_rates = skl_rates;
+ size = ARRAY_SIZE(skl_rates);
+ } else {
+ *source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates);
+ }
+
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (!intel_dp_source_supports_hbr2(intel_dp))
+ size--;
+
+ return size;
+}
+
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(intel_dp, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -320,8 +395,7 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -344,7 +418,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP |= DP_PIPE_SELECT_CHV(pipe);
else if (pipe == PIPE_B)
DP |= DP_PIPEB_SELECT;
@@ -356,10 +430,10 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev) &&
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
- if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
@@ -383,7 +457,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev, pipe);
+ vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
chv_phy_powergate_ch(dev_priv, phy, ch, false);
@@ -570,8 +644,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- !IS_BROXTON(dev)))
+ if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)))
return;
/*
@@ -591,7 +665,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
@@ -664,7 +738,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
@@ -692,7 +766,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -706,7 +780,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -821,15 +895,16 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
uint32_t precharge, timeout;
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
precharge = 3;
else
precharge = 5;
- if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
+ if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -867,14 +942,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev);
+ bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
pps_lock(intel_dp);
@@ -1147,7 +1222,7 @@ static enum port intel_aux_port(struct drm_i915_private *dev_priv,
}
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_B:
@@ -1161,7 +1236,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_B:
@@ -1175,7 +1250,7 @@ static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_A:
@@ -1191,7 +1266,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_A:
@@ -1207,7 +1282,7 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_A:
@@ -1222,7 +1297,7 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_A:
@@ -1237,7 +1312,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_ctl_reg(dev_priv, port);
@@ -1248,7 +1323,7 @@ static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_data_reg(dev_priv, port, index);
@@ -1290,78 +1365,37 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
-static int
-intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
-{
- if (intel_dp->num_sink_rates) {
- *sink_rates = intel_dp->sink_rates;
- return intel_dp->num_sink_rates;
- }
-
- *sink_rates = default_rates;
-
- return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
-
- /* WaDisableHBR2:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
- return false;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
- (INTEL_INFO(dev)->gen >= 9))
+ if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
return true;
else
return false;
}
-static int
-intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- int size;
-
- if (IS_BROXTON(dev)) {
- *source_rates = bxt_rates;
- size = ARRAY_SIZE(bxt_rates);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- *source_rates = skl_rates;
- size = ARRAY_SIZE(skl_rates);
- } else {
- *source_rates = default_rates;
- size = ARRAY_SIZE(default_rates);
- }
-
- /* This depends on the fact that 5.4 is last value in the array */
- if (!intel_dp_source_supports_hbr2(intel_dp))
- size--;
-
- return size;
-}
-
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
@@ -1377,43 +1411,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
-static int intersect_rates(const int *source_rates, int source_len,
- const int *sink_rates, int sink_len,
- int *common_rates)
-{
- int i = 0, j = 0, k = 0;
-
- while (i < source_len && j < sink_len) {
- if (source_rates[i] == sink_rates[j]) {
- if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
- return k;
- common_rates[k] = source_rates[i];
- ++k;
- ++i;
- ++j;
- } else if (source_rates[i] < sink_rates[j]) {
- ++i;
- } else {
- ++j;
- }
- }
- return k;
-}
-
-static int intel_dp_common_rates(struct intel_dp *intel_dp,
- int *common_rates)
-{
- const int *source_rates, *sink_rates;
- int source_len, sink_len;
-
- sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
- source_len = intel_dp_source_rates(intel_dp, &source_rates);
-
- return intersect_rates(source_rates, source_len,
- sink_rates, sink_len,
- common_rates);
-}
-
static void snprintf_int_array(char *str, size_t len,
const int *array, int nelem)
{
@@ -1453,42 +1450,35 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
-static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+bool
+__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
{
- uint8_t rev;
- int len;
-
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
-
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
- return;
-
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
- if (len < 0)
- return;
+ u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
+ DP_SINK_OUI;
- DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+ return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
+ sizeof(*desc);
}
-static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+bool intel_dp_read_desc(struct intel_dp *intel_dp)
{
- uint8_t rev[2];
- int len;
-
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
+ struct intel_dp_desc *desc = &intel_dp->desc;
+ bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_OUI_SUPPORT;
+ int dev_id_len;
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
- return;
+ if (!__intel_dp_read_desc(intel_dp, desc))
+ return false;
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
- if (len < 0)
- return;
+ dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
+ DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
+ drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
+ (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
+ dev_id_len, desc->device_id,
+ desc->hw_rev >> 4, desc->hw_rev & 0xf,
+ desc->sw_major_rev, desc->sw_minor_rev);
- DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+ return true;
}
static int rate_to_index(int find, const int *rates)
@@ -1552,8 +1542,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1578,7 +1567,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
max_clock = common_len - 1;
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
@@ -1588,14 +1577,14 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
int ret;
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
@@ -1720,7 +1709,7 @@ found:
to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
}
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
return true;
@@ -1778,7 +1767,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1789,7 +1778,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
@@ -1801,8 +1790,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1814,7 +1802,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
else if (crtc->pipe == PIPE_B)
intel_dp->DP |= DP_PIPEB_SELECT;
@@ -2123,7 +2111,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2131,7 +2119,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
- if (!IS_GEN5(dev))
+ if (!IS_GEN5(dev_priv))
pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2140,7 +2128,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -2372,7 +2360,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
- intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
+ intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -2453,9 +2441,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN))
goto out;
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
enum pipe p;
for_each_pipe(dev_priv, p) {
@@ -2470,7 +2458,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
*pipe = PORT_TO_PIPE(tmp);
@@ -2498,7 +2486,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
@@ -2524,8 +2512,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
+ if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -2645,7 +2632,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & DP_TRAINING_PATTERN_MASK);
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2671,8 +2658,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -2692,7 +2679,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
*DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
*DP &= ~DP_LINK_TRAIN_MASK;
@@ -2708,7 +2695,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
*DP |= DP_LINK_TRAIN_PAT_3_CHV;
} else {
DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
@@ -2744,7 +2731,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2758,7 +2746,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_init_panel_power_sequencer(intel_dp);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -2769,10 +2757,10 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_unlock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
@@ -2786,7 +2774,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
if (pipe_config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -2796,7 +2784,7 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(intel_dp);
}
@@ -2933,7 +2921,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
{
vlv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
@@ -2951,7 +2939,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
{
chv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -2988,21 +2976,20 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (INTEL_INFO(dev)->gen >= 9) {
+ else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev) && port == PORT_A)
+ else if (IS_GEN7(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (HAS_PCH_CPT(dev) && port != PORT_A)
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -3011,10 +2998,10 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3027,7 +3014,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3039,7 +3026,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3051,7 +3038,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3352,21 +3339,21 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN6(dev) && port == PORT_A) {
+ } else if (IS_GEN6(dev_priv) && port == PORT_A) {
signal_levels = gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
@@ -3411,7 +3398,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port;
uint32_t val;
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return;
val = I915_READ(DP_TP_CTL(port));
@@ -3446,7 +3433,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t DP = intel_dp->DP;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -3454,12 +3441,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -3477,7 +3464,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -3495,7 +3482,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -3505,7 +3492,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static bool
+bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
@@ -3529,6 +3516,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
+ intel_dp_read_desc(intel_dp);
+
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
@@ -3616,8 +3605,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!is_edp(intel_dp) && !intel_dp->sink_count)
return false;
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
@@ -3631,23 +3619,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return true;
}
-static void
-intel_dp_probe_oui(struct intel_dp *intel_dp)
-{
- u8 buf[3];
-
- if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
- return;
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-}
-
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
@@ -3691,7 +3662,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret = 0;
@@ -3712,7 +3683,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
}
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3735,7 +3706,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
@@ -3763,14 +3734,14 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return -EIO;
}
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return 0;
}
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int count, ret;
@@ -3781,7 +3752,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
return ret;
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3998,6 +3969,31 @@ go_again:
}
static void
+intel_dp_retrain_link(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ /* Suppress underruns caused by re-training */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), false);
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+
+ /* Keep underrun reporting disabled until things are stable */
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), true);
+}
+
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
@@ -4017,13 +4013,18 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
+ /* FIXME: we need to synchronize this sort of stuff with hardware
+ * readout. Currently fast link training doesn't work on boot-up. */
+ if (!intel_dp->lane_count)
+ return;
+
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
+
+ intel_dp_retrain_link(intel_dp);
}
}
@@ -4105,7 +4106,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* if there's no downstream port, we're done */
- if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
@@ -4396,10 +4397,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp_print_rates(intel_dp);
- intel_dp_probe_oui(intel_dp);
-
- intel_dp_print_hw_revision(intel_dp);
- intel_dp_print_sw_revision(intel_dp);
+ intel_dp_read_desc(intel_dp);
intel_dp_configure_mst(intel_dp);
@@ -4755,11 +4753,16 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
+ if (IS_GEN9(dev_priv) && lspcon->active)
+ lspcon_resume(lspcon);
+
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
@@ -4866,15 +4869,13 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_device *dev, enum port port)
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return false;
if (port == PORT_A)
@@ -5073,7 +5074,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5086,9 +5087,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
if (port == PORT_A)
port_sel = PANEL_PORT_SELECT_DPA;
else
@@ -5099,7 +5100,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@@ -5107,7 +5108,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- IS_BROXTON(dev) ?
+ IS_BROXTON(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
@@ -5115,7 +5116,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
static void intel_dp_pps_init(struct drm_device *dev,
struct intel_dp *intel_dp)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
@@ -5474,7 +5477,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
mutex_init(&dev_priv->drrs.mutex);
- if (INTEL_INFO(dev)->gen <= 6) {
+ if (INTEL_GEN(dev_priv) <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
}
@@ -5585,7 +5588,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
@@ -5594,7 +5597,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
else
pipe = PORT_TO_PIPE(intel_dp->DP);
@@ -5648,28 +5651,28 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev, port))
+ if (intel_dp_is_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -5683,7 +5686,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
return false;
@@ -5704,7 +5707,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -5716,7 +5719,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
@@ -5733,7 +5736,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -5750,7 +5753,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -5793,13 +5796,13 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@@ -5807,7 +5810,7 @@ bool intel_dp_init(struct drm_device *dev,
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
intel_encoder->post_disable = ilk_post_disable_dp;
}
@@ -5816,7 +5819,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -5825,6 +5828,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
+ intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index c438b02184cb..0048b520baf7 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -225,9 +225,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
- *
- * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
- * supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 54a9d7610d8f..b029d1026a28 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -43,7 +43,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
- pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
bpp = 24;
/*
@@ -523,6 +522,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->port = intel_dig_port->port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 047f48748944..7a8e82dabbf2 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -23,6 +23,565 @@
#include "intel_drv.h"
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT). ::
+ *
+ *
+ * Dual channel PHY (VLV/CHV/BXT)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV/BXT)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
+ */
+
+/**
+ * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ */
+struct bxt_ddi_phy_info {
+ /**
+ * @dual_channel: true if this phy has a second channel.
+ */
+ bool dual_channel;
+
+ /**
+ * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
+ * Otherwise the GRC value will be copied from the phy indicated by
+ * this field.
+ */
+ enum dpio_phy rcomp_phy;
+
+ /**
+ * @channel: struct containing per channel information.
+ */
+ struct {
+ /**
+ * @port: which port maps to this channel.
+ */
+ enum port port;
+ } channel[2];
+};
+
+static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = true,
+ .rcomp_phy = DPIO_PHY1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ [DPIO_CH1] = { .port = PORT_C },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+};
+
+static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
+{
+ return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
+ BIT(phy_info->channel[DPIO_CH0].port);
+}
+
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
+ phy_info = &bxt_ddi_phy_info[i];
+
+ if (port == phy_info->channel[DPIO_CH0].port) {
+ *phy = i;
+ *ch = DPIO_CH0;
+ return;
+ }
+
+ if (phy_info->dual_channel &&
+ port == phy_info->channel[DPIO_CH1].port) {
+ *phy = i;
+ *ch = DPIO_CH1;
+ return;
+ }
+ }
+
+ WARN(1, "PHY not found for PORT %c", port_name(port));
+ *phy = DPIO_PHY0;
+ *ch = DPIO_CH0;
+}
+
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis)
+{
+ u32 val;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+ val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+ val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+ val &= ~SCALE_DCOMP_METHOD;
+ if (enable)
+ val |= SCALE_DCOMP_METHOD;
+
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+
+ I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+ val &= ~DE_EMPHASIS;
+ val |= deemphasis << DEEMPH_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+}
+
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum port port;
+
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+ return false;
+
+ if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+ phy);
+
+ return false;
+ }
+
+ if (phy_info->rcomp_phy == -1 &&
+ !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
+ phy);
+
+ return false;
+ }
+
+ if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+ phy);
+
+ return false;
+ }
+
+ for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
+ u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+ if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+ "for port %c powered down "
+ "(PHY_CTL %08x)\n",
+ phy, port_name(port), tmp);
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+
+ return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ if (intel_wait_for_register(dev_priv,
+ BXT_PORT_REF_DW3(phy),
+ GRC_DONE, GRC_DONE,
+ 10))
+ DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ u32 val;
+
+ if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ /* Still read out the GRC value for state verification */
+ if (phy_info->rcomp_phy != -1)
+ dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+
+ if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
+
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
+ }
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val |= GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+ /*
+ * The PHY registers start out inaccessible and respond to reads with
+ * all 1s. Eventually they become accessible as they power up, then
+ * the reserved bit will give the default 0. Poll on the reserved bit
+ * becoming 0 to find when the PHY is accessible.
+ * HW team confirmed that the time to reach phypowergood status is
+ * anywhere between 50 us and 100us.
+ */
+ if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
+ DRM_ERROR("timeout during PHY%d power on\n", phy);
+ }
+
+ /* Program PLL Rcomp code offset */
+ val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val &= ~IREF0RC_OFFSET_MASK;
+ val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+ val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val &= ~IREF1RC_OFFSET_MASK;
+ val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+ /* Program power gating */
+ val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+ SUS_CLK_CONFIG;
+ I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+ if (phy_info->dual_channel) {
+ val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+ val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+ I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+ }
+
+ if (phy_info->rcomp_phy != -1) {
+ uint32_t grc_code;
+ /*
+ * PHY0 isn't connected to an RCOMP resistor so copy over
+ * the corresponding calibrated value from PHY1, and disable
+ * the automatic calibration on PHY0.
+ */
+ val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+ phy_info->rcomp_phy);
+ grc_code = val << GRC_CODE_FAST_SHIFT |
+ val << GRC_CODE_SLOW_SHIFT |
+ val;
+ I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+
+ val = I915_READ(BXT_PORT_REF_DW8(phy));
+ val |= GRC_DIS | GRC_RDY_OVRD;
+ I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+ }
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val |= COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ if (phy_info->rcomp_phy == -1)
+ bxt_phy_wait_grc_done(dev_priv, phy);
+
+}
+
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ uint32_t val;
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val &= ~COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val &= ~GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+}
+
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
+ bool was_enabled;
+
+ lockdep_assert_held(&dev_priv->power_domains.lock);
+
+ if (rcomp_phy != -1) {
+ was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+
+ /*
+ * We need to copy the GRC calibration value from rcomp_phy,
+ * so make sure it's powered up.
+ */
+ if (!was_enabled)
+ _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+ }
+
+ _bxt_ddi_phy_init(dev_priv, phy);
+
+ if (rcomp_phy != -1 && !was_enabled)
+ bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ i915_reg_t reg, u32 mask, u32 expected,
+ const char *reg_fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ u32 val;
+
+ val = I915_READ(reg);
+ if ((val & mask) == expected)
+ return true;
+
+ va_start(args, reg_fmt);
+ vaf.fmt = reg_fmt;
+ vaf.va = &args;
+
+ DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ "current %08x, expected %08x (mask %08x)\n",
+ phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+ mask);
+
+ va_end(args);
+
+ return false;
+}
+
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ uint32_t mask;
+ bool ok;
+
+#define _CHK(reg, mask, exp, fmt, ...) \
+ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ ## __VA_ARGS__)
+
+ if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ return false;
+
+ ok = true;
+
+ /* PLL Rcomp code offset */
+ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
+ ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+ /* Power gating */
+ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+ ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+ if (phy_info->dual_channel)
+ ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
+ DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+ "BXT_PORT_CL2CM_DW6(%d)", phy);
+
+ if (phy_info->rcomp_phy != -1) {
+ u32 grc_code = dev_priv->bxt_phy_grc;
+
+ grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+ grc_code << GRC_CODE_SLOW_SHIFT |
+ grc_code;
+ mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+ GRC_CODE_NOM_MASK;
+ ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
+ "BXT_PORT_REF_DW6(%d)", phy);
+
+ mask = GRC_DIS | GRC_RDY_OVRD;
+ ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
+ "BXT_PORT_REF_DW8(%d)", phy);
+ }
+
+ return ok;
+#undef _CHK
+}
+
+uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count)
+{
+ switch (lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return BIT(2) | BIT(0);
+ case 4:
+ return BIT(3) | BIT(2) | BIT(0);
+ default:
+ MISSING_CASE(lane_count);
+
+ return 0;
+ }
+}
+
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (lane_lat_optim_mask & BIT(lane))
+ val |= LATENCY_OPTIM;
+
+ I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+ }
+}
+
+uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+ uint8_t mask;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ mask = 0;
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ if (val & LATENCY_OPTIM)
+ mask |= BIT(lane);
+ }
+
+ return mask;
+}
+
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 1c59ca50c430..58a756f2f224 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -188,13 +188,12 @@ out:
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
/* PCH only available on ILK+ */
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return;
if (pll == NULL)
@@ -1371,6 +1370,10 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
{
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1378,72 +1381,72 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
/* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->config.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(port, 0));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->config.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
- temp = I915_READ(BXT_PORT_PLL(port, 1));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->config.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(port, 2));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->config.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(port, 3));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->config.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(port, 6));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->config.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(port, 8));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->config.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 9));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->config.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 10));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->config.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->config.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1459,11 +1462,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->config.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+ I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1485,6 +1488,10 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
@@ -1495,36 +1502,36 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -1533,11 +1540,11 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+ hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
@@ -1851,13 +1858,13 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
dpll_mgr = &bxt_pll_mgr;
- else if (HAS_DDI(dev))
+ else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
@@ -1883,7 +1890,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
/* FIXME: Move this to a more suitable place */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_ddi_pll_init(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a19ec06f9e42..cd132c216a67 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -206,6 +206,7 @@ struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
+ enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
@@ -247,6 +248,8 @@ struct intel_encoder {
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
+ /* for communication with audio component; protected by av_mutex */
+ const struct drm_connector *audio_connector;
};
struct intel_panel {
@@ -291,6 +294,9 @@ struct intel_connector {
*/
struct intel_encoder *encoder;
+ /* ACPI device id for ACPI and driver cooperation */
+ u32 acpi_device_id;
+
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
@@ -362,6 +368,8 @@ struct intel_atomic_state {
/* Gen9+ only */
struct skl_wm_values wm_results;
+
+ struct i915_sw_fence commit_ready;
};
struct intel_plane_state {
@@ -398,9 +406,6 @@ struct intel_plane_state {
int scaler_id;
struct drm_intel_sprite_colorkey ckey;
-
- /* async flip related structures */
- struct drm_i915_gem_request *wait_req;
};
struct intel_initial_plane_config {
@@ -465,9 +470,13 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
-struct skl_pipe_wm {
+struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
+};
+
+struct skl_pipe_wm {
+ struct skl_plane_wm planes[I915_MAX_PLANES];
uint32_t linetime;
};
@@ -493,14 +502,7 @@ struct intel_crtc_wm_state {
struct {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
-
- /* cached plane data rate */
- unsigned plane_data_rate[I915_MAX_PLANES];
- unsigned plane_y_data_rate[I915_MAX_PLANES];
-
- /* minimum block allocation */
- uint16_t minimum_blocks[I915_MAX_PLANES];
- uint16_t minimum_y_blocks[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb;
} skl;
};
@@ -653,7 +655,6 @@ struct intel_crtc_state {
bool double_wide;
- bool dp_encoder_is_mst;
int pbn;
struct intel_crtc_scaler_state scaler_state;
@@ -723,7 +724,6 @@ struct intel_crtc {
/* watermarks currently being used */
union {
struct intel_pipe_wm ilk;
- struct skl_pipe_wm skl;
} active;
/* allow CxSR on this pipe */
@@ -796,22 +796,22 @@ struct intel_plane {
};
struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
+ u16 fifo_size;
+ u16 max_wm;
+ u8 default_wm;
+ u8 guard_size;
+ u8 cacheline_size;
};
struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -872,6 +872,14 @@ enum link_m_n_set {
M2_N2
};
+struct intel_dp_desc {
+ u8 oui[3];
+ u8 device_id[6];
+ u8 hw_rev;
+ u8 sw_major_rev;
+ u8 sw_minor_rev;
+} __packed;
+
struct intel_dp {
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
@@ -894,6 +902,8 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
+ /* sink or branch descriptor */
+ struct intel_dp_desc desc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -950,17 +960,22 @@ struct intel_dp {
bool compliance_test_active;
};
+struct intel_lspcon {
+ bool active;
+ enum drm_lspcon_mode mode;
+ bool desc_valid;
+};
+
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
- /* for communication with audio component; protected by av_mutex */
- const struct drm_connector *audio_connector;
};
struct intel_dp_mst_encoder {
@@ -1012,17 +1027,15 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
-static inline struct drm_crtc *
-intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+static inline struct intel_crtc *
+intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->pipe_to_crtc_mapping[pipe];
}
-static inline struct drm_crtc *
-intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+static inline struct intel_crtc *
+intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->plane_to_crtc_mapping[plane];
}
@@ -1082,15 +1095,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
-/*
- * Returns the number of planes for this pipe, ie the number of sprites + 1
- * (primary plane). This doesn't count the cursor plane then.
- */
-static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
-{
- return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
-}
-
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@@ -1107,6 +1111,9 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1129,6 +1136,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -1176,12 +1186,15 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
@@ -1230,18 +1243,17 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
static inline void
-intel_wait_for_vblank(struct drm_device *dev, int pipe)
+intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(dev, pipe);
+ drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
{
- const struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
@@ -1285,21 +1297,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
-static inline bool
-intel_rotation_90_or_270(unsigned int rotation)
-{
- return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
-}
-
-void intel_create_rotation_property(struct drm_device *dev,
- struct intel_plane *plane);
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
/* modesetting asserts */
@@ -1327,12 +1330,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
@@ -1350,7 +1347,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-bool intel_crtc_active(struct drm_crtc *crtc);
+bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
@@ -1396,7 +1393,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
@@ -1443,6 +1440,11 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+bool __intel_dp_read_desc(struct intel_dp *intel_dp,
+ struct intel_dp_desc *desc);
+bool intel_dp_read_desc(struct intel_dp *intel_dp);
+
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1487,6 +1489,10 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
@@ -1513,6 +1519,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
@@ -1642,23 +1649,6 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
-static inline int
-assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
-{
- int seq = atomic_read(&dev_priv->pm.atomic_seq);
-
- assert_rpm_wakelock_held(dev_priv);
-
- return seq;
-}
-
-static inline void
-assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
-{
- WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
- "HW access outside of RPM atomic section\n");
-}
-
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
@@ -1714,11 +1704,11 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pm.c */
-void intel_init_clock_gating(struct drm_device *dev);
-void intel_suspend_hw(struct drm_device *dev);
-int ilk_wm_max_level(const struct drm_device *dev);
-void intel_update_watermarks(struct drm_crtc *crtc);
-void intel_init_pm(struct drm_device *dev);
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -1742,21 +1732,16 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm);
-void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
- int plane);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
@@ -1773,7 +1758,8 @@ bool intel_sdvo_init(struct drm_device *dev,
/* intel_sprite.c */
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
-int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
@@ -1835,4 +1821,7 @@ int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void intel_color_set_csc(struct drm_crtc_state *crtc_state);
void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+/* intel_lspcon.c */
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b2e3d3a334f7..5b72c50d6f76 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -437,11 +437,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
}
@@ -464,7 +464,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -494,7 +494,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -656,7 +656,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -664,7 +663,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@@ -741,7 +740,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
bool active = false;
@@ -762,7 +760,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
+ i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@@ -771,7 +769,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ port == PORT_C)
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
@@ -970,11 +969,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@ -1066,7 +1065,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1138,7 +1137,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1153,7 +1152,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@@ -1242,7 +1241,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+ if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@@ -1299,12 +1298,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
}
-static enum drm_connector_status
-intel_dsi_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int intel_dsi_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1346,7 +1339,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (HAS_GMCH_DISPLAY(dev) &&
+ if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
@@ -1408,7 +1401,6 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = intel_dsi_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dsi_connector_destroy,
@@ -1450,9 +1442,9 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
@@ -1488,6 +1480,7 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_encoder->port = port;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index cd154ce6b6c1..47cd1b20fb3e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -126,6 +126,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
+ DRM_DEBUG_KMS("\n");
+
flags = *data++;
type = *data++;
@@ -199,6 +201,8 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
u32 delay = *((const u32 *) data);
+ DRM_DEBUG_KMS("\n");
+
usleep_range(delay, delay + 10);
data += 4;
@@ -296,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->sb_lock);
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0,
- CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+ CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
+ CHV_GPIO_GPIOTXSTATE(value));
mutex_unlock(&dev_priv->sb_lock);
}
@@ -307,6 +312,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index;
bool value;
+ DRM_DEBUG_KMS("\n");
+
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
@@ -331,18 +338,36 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
+ DRM_DEBUG_KMS("Skipping I2C element execution\n");
+
return data + *(data + 6) + 7;
}
+static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping SPI element execution\n");
+
+ return data + *(data + 5) + 6;
+}
+
+static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+
+ return data + 15;
+}
+
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
- [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
+ [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c,
+ [MIPI_SEQ_ELEM_SPI] = mipi_exec_spi,
+ [MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic,
};
/*
@@ -352,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
- [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+ [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
- [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+ [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
@@ -385,11 +410,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
- if (!data) {
- DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
- seq_id, sequence_name(seq_id));
+ if (!data)
return;
- }
WARN_ON(*data != seq_id);
@@ -420,7 +442,15 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
operation_size = *data++;
if (mipi_elem_exec) {
+ const u8 *next = data + operation_size;
+
data = mipi_elem_exec(intel_dsi, data);
+
+ /* Consistency check if we have size. */
+ if (operation_size && data != next) {
+ DRM_ERROR("Inconsistent operation size\n");
+ return;
+ }
} else if (operation_size) {
/* We have size, skip. */
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
@@ -438,6 +468,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
static int vbt_panel_prepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
@@ -445,7 +477,8 @@ static int vbt_panel_prepare(struct drm_panel *panel)
static int vbt_panel_unprepare(struct drm_panel *panel)
{
- generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
return 0;
}
@@ -453,12 +486,14 @@ static int vbt_panel_unprepare(struct drm_panel *panel)
static int vbt_panel_enable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
@@ -740,9 +775,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
- DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
- DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
- "disabled" : "enabled");
+ DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
+ DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
@@ -761,8 +795,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
DRM_DEBUG_KMS("BTA %s\n",
- intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
- "disabled" : "enabled");
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 6ab58a01b18e..56eff6004bc0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -351,7 +351,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
- if (IS_BROXTON(encoder->base.dev))
+ if (IS_BROXTON(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@@ -515,11 +515,11 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
@@ -528,21 +528,21 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_disable_dsi_pll(encoder);
}
@@ -564,10 +564,10 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 2e452c505e7e..708645443046 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,12 +393,12 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
* its timings to get how the BIOS set up the panel.
*/
if (dvo_val & DVO_ENABLE) {
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc) {
- mode = intel_crtc_mode_get(dev, crtc);
+ mode = intel_crtc_mode_get(dev, &crtc->base);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -412,16 +412,14 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
-static char intel_dvo_port_name(i915_reg_t dvo_reg)
+static enum port intel_dvo_port(i915_reg_t dvo_reg)
{
if (i915_mmio_reg_equal(dvo_reg, DVOA))
- return 'A';
+ return PORT_A;
else if (i915_mmio_reg_equal(dvo_reg, DVOB))
- return 'B';
- else if (i915_mmio_reg_equal(dvo_reg, DVOC))
- return 'C';
+ return PORT_B;
else
- return '?';
+ return PORT_C;
}
void intel_dvo_init(struct drm_device *dev)
@@ -464,6 +462,7 @@ void intel_dvo_init(struct drm_device *dev)
bool dvoinit;
enum pipe pipe;
uint32_t dpll[I915_MAX_PIPES];
+ enum port port;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -511,12 +510,15 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
+ port = intel_dvo_port(dvo->dvo_reg);
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type,
- "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+ "DVO %c", port_name(port));
intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 025e232a4205..3da4d466e332 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -82,12 +82,17 @@ static const struct engine_info {
},
};
-static struct intel_engine_cs *
+static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
- struct intel_engine_cs *engine = &dev_priv->engine[id];
+ struct intel_engine_cs *engine;
+
+ GEM_BUG_ON(dev_priv->engine[id]);
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
engine->id = id;
engine->i915 = dev_priv;
@@ -97,7 +102,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
- return engine;
+ /* Nothing to do here, execute in order of dependencies */
+ engine->schedule = NULL;
+
+ dev_priv->engine[id] = engine;
+ return 0;
}
/**
@@ -110,13 +119,16 @@ int intel_engines_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+ unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
int (*init)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int i;
int ret;
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ WARN_ON(ring_mask == 0);
+ WARN_ON(ring_mask &
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
@@ -131,7 +143,11 @@ int intel_engines_init(struct drm_device *dev)
if (!init)
continue;
- ret = init(intel_engine_setup(dev_priv, i));
+ ret = intel_engine_setup(dev_priv, i);
+ if (ret)
+ goto cleanup;
+
+ ret = init(dev_priv->engine[i]);
if (ret)
goto cleanup;
@@ -143,7 +159,7 @@ int intel_engines_init(struct drm_device *dev)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+ if (WARN_ON(mask != ring_mask))
device_info->ring_mask = mask;
device_info->num_rings = hweight32(mask);
@@ -151,17 +167,17 @@ int intel_engines_init(struct drm_device *dev)
return 0;
cleanup:
- for (i = 0; i < I915_NUM_ENGINES; i++) {
+ for_each_engine(engine, dev_priv, id) {
if (i915.enable_execlists)
- intel_logical_ring_cleanup(&dev_priv->engine[i]);
+ intel_logical_ring_cleanup(engine);
else
- intel_engine_cleanup(&dev_priv->engine[i]);
+ intel_engine_cleanup(engine);
}
return ret;
}
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -191,13 +207,13 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
kunmap(page);
}
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
- engine->last_submitted_seqno = seqno;
+
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+ engine->timeline->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
@@ -207,15 +223,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
intel_engine_wakeup(engine);
}
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
-static void intel_engine_init_requests(struct intel_engine_cs *engine)
+static void intel_engine_init_timeline(struct intel_engine_cs *engine)
{
- init_request_active(&engine->last_request, NULL);
- INIT_LIST_HEAD(&engine->request_list);
+ engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
/**
@@ -229,12 +239,10 @@ static void intel_engine_init_requests(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&engine->execlist_queue);
- spin_lock_init(&engine->execlist_lock);
-
- engine->fence_context = fence_context_alloc(1);
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
- intel_engine_init_requests(engine);
+ intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
i915_gem_batch_pool_init(engine, &engine->batch_pool);
@@ -251,7 +259,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
if (!obj)
- obj = i915_gem_object_create(&engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
@@ -301,6 +309,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
return ret;
+ ret = i915_gem_render_state_init(engine);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -315,7 +327,142 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
intel_engine_cleanup_scratch(engine);
+ i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
}
+
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 acthd;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+ RING_ACTHD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+ else
+ acthd = I915_READ(ACTHD);
+
+ return acthd;
+}
+
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 bbaddr;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
+ RING_BBADDR_UDW(engine->mmio_base));
+ else
+ bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+
+ return bbaddr;
+}
+
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
+ default: return "";
+ }
+}
+
+static inline uint32_t
+read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
+ int subslice, i915_reg_t reg)
+{
+ uint32_t mcr;
+ uint32_t ret;
+ enum forcewake_domains fw_domains;
+
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ GEN8_MCR_SELECTOR,
+ FW_REG_READ | FW_REG_WRITE);
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
+ mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+ /*
+ * The HW expects the slice and sublice selectors to be reset to 0
+ * after reading out the registers.
+ */
+ WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ ret = I915_READ_FW(reg);
+
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return ret;
+}
+
+/* NB: please notice the memset */
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u32 mmio_base = engine->mmio_base;
+ int slice;
+ int subslice;
+
+ memset(instdone, 0, sizeof(*instdone));
+
+ switch (INTEL_GEN(dev_priv)) {
+ default:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
+ break;
+ case 7:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+
+ break;
+ case 6:
+ case 5:
+ case 4:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id == RCS)
+ /* HACK: Using the wrong struct member */
+ instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+ break;
+ case 3:
+ case 2:
+ instdone->instdone = I915_READ(GEN2_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index c43dd9abce79..62f215b12eb5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -48,17 +48,17 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
{
- return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
+ return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
}
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen < 4;
+ return INTEL_GEN(dev_priv) < 4;
}
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen <= 3;
+ return INTEL_GEN(dev_priv) <= 3;
}
/*
@@ -84,7 +84,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
{
int w, h;
- if (intel_rotation_90_or_270(cache->plane.rotation)) {
+ if (drm_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {
@@ -351,7 +351,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
@@ -365,9 +365,9 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
fbc->active = true;
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
- else if (INTEL_INFO(dev_priv)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
@@ -381,7 +381,7 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
fbc->active = false;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
@@ -561,7 +561,7 @@ again:
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
- if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+ if (ret && INTEL_GEN(dev_priv) <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
@@ -594,7 +594,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->threshold = ret;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
@@ -708,10 +708,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
- } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
max_w = 4096;
max_h = 2048;
} else {
@@ -776,6 +776,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ /* We don't need to use a state cache here since this information is
+ * global for all CRTC.
+ */
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -804,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
cache->plane.rotation != DRM_ROTATE_0) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
@@ -846,9 +854,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (intel_vgpu_active(dev_priv)) {
@@ -861,13 +868,8 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
return false;
}
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
- fbc->no_fbc_reason = "no enabled pipes can have FBC";
- return false;
- }
-
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
- fbc->no_fbc_reason = "no enabled planes can have FBC";
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
return false;
}
@@ -1053,23 +1055,19 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
- bool fbc_crtc_present = false;
- int i, j;
+ bool crtc_chosen = false;
+ int i;
mutex_lock(&fbc->lock);
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (fbc->crtc == to_intel_crtc(crtc)) {
- fbc_crtc_present = true;
- break;
- }
- }
- /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
- if (!fbc_crtc_present && fbc->crtc != NULL)
+ /* Does this atomic commit involve the CRTC currently tied to FBC? */
+ if (fbc->crtc &&
+ !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base))
+ goto out;
+
+ if (!intel_fbc_can_enable(dev_priv))
goto out;
/* Simply choose the first CRTC that is compatible and has a visible
@@ -1079,25 +1077,29 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
+ struct intel_crtc_state *intel_crtc_state;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc);
if (!intel_plane_state->base.visible)
continue;
- for_each_crtc_in_state(state, crtc, crtc_state, j) {
- struct intel_crtc_state *intel_crtc_state =
- to_intel_crtc_state(crtc_state);
+ if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+ continue;
- if (plane_state->crtc != crtc)
- continue;
+ if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
+ continue;
- if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
- break;
+ intel_crtc_state = to_intel_crtc_state(
+ drm_atomic_get_existing_crtc_state(state, &crtc->base));
- intel_crtc_state->enable_fbc = true;
- goto out;
- }
+ intel_crtc_state->enable_fbc = true;
+ crtc_chosen = true;
+ break;
}
+ if (!crtc_chosen)
+ fbc->no_fbc_reason = "no suitable CRTC for FBC";
+
out:
mutex_unlock(&fbc->lock);
}
@@ -1223,6 +1225,59 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
cancel_work_sync(&fbc->work.work);
}
+static void intel_fbc_underrun_work_fn(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, fbc.underrun_work);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ mutex_lock(&fbc->lock);
+
+ /* Maybe we were scheduled twice. */
+ if (fbc->underrun_detected)
+ goto out;
+
+ DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+ fbc->underrun_detected = true;
+
+ intel_fbc_deactivate(dev_priv);
+out:
+ mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
+ * @dev_priv: i915 device instance
+ *
+ * Without FBC, most underruns are harmless and don't really cause too many
+ * problems, except for an annoying message on dmesg. With FBC, underruns can
+ * become black screens or even worse, especially when paired with bad
+ * watermarks. So in order for us to be on the safe side, completely disable FBC
+ * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
+ * already suggests that watermarks may be bad, so try to be as safe as
+ * possible.
+ *
+ * This function is called from the IRQ handler.
+ */
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ /* There's no guarantee that underrun_detected won't be set to true
+ * right after this check and before the work is scheduled, but that's
+ * not a problem since we'll check it again under the work function
+ * while FBC is locked. This check here is just to prevent us from
+ * unnecessarily scheduling the work, and it relies on the fact that we
+ * never switch underrun_detect back to false after it's true. */
+ if (READ_ONCE(fbc->underrun_detected))
+ return;
+
+ schedule_work(&fbc->underrun_work);
+}
+
/**
* intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
* @dev_priv: i915 device instance
@@ -1240,7 +1295,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
return;
for_each_intel_crtc(&dev_priv->drm, crtc)
- if (intel_crtc_active(&crtc->base) &&
+ if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->base.primary->state)->base.visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
@@ -1294,6 +1349,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
enum pipe pipe;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+ INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
@@ -1319,7 +1375,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
/* This value was pulled out of someone's hat */
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b7098f98bb67..beb08982dc0b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -102,16 +102,13 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_set_par = intel_fbdev_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int intelfb_alloc(struct drm_fb_helper *helper,
@@ -359,7 +356,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
- struct drm_device *dev = fb_helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
@@ -512,7 +509,7 @@ retry:
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
@@ -636,7 +633,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
fb->base.pixel_format,
- fb->base.modifier[0]);
+ fb->base.modifier);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
@@ -700,11 +697,11 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+ if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -717,7 +714,7 @@ int intel_fbdev_init(struct drm_device *dev)
ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev)->num_pipes, 4);
+ INTEL_INFO(dev_priv)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 2aa744081f09..e660d8b4bbc3 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -57,7 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->cpu_fifo_underrun_disabled)
return false;
@@ -75,7 +75,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->pch_fifo_underrun_disabled)
return false;
@@ -245,22 +245,21 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
assert_spin_locked(&dev_priv->irq_lock);
- old = !intel_crtc->cpu_fifo_underrun_disabled;
- intel_crtc->cpu_fifo_underrun_disabled = !enable;
+ old = !crtc->cpu_fifo_underrun_disabled;
+ crtc->cpu_fifo_underrun_disabled = !enable;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN8(dev) || IS_GEN9(dev))
+ else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
@@ -314,8 +313,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
unsigned long flags;
bool old;
@@ -330,8 +329,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- old = !intel_crtc->pch_fifo_underrun_disabled;
- intel_crtc->pch_fifo_underrun_disabled = !enable;
+ old = !crtc->pch_fifo_underrun_disabled;
+ crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(&dev_priv->drm,
@@ -358,7 +357,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@@ -366,12 +365,14 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv) &&
- to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
+ crtc->cpu_fifo_underrun_disabled)
return;
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
+
+ intel_fbc_handle_fifo_underrun_irq(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 76ceb539f9f0..7bab41218cf7 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits)
- return;
+ return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ return true;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 5cdf7aa75be5..0053258e03d3 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -64,7 +64,7 @@ struct drm_i915_gem_request;
*/
struct i915_guc_client {
struct i915_vma *vma;
- void *client_base; /* first page (only) of above */
+ void *vaddr;
struct i915_gem_context *owner;
struct intel_guc *guc;
@@ -123,10 +123,28 @@ struct intel_guc_fw {
uint32_t ucode_offset;
};
+struct intel_guc_log {
+ uint32_t flags;
+ struct i915_vma *vma;
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+
+ /* logging related stats */
+ u32 capture_miss_count;
+ u32 flush_interrupt_count;
+ u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
struct intel_guc {
struct intel_guc_fw guc_fw;
- uint32_t log_flags;
- struct i915_vma *log_vma;
+ struct intel_guc_log log;
+
+ /* GuC2Host interrupt related state */
+ bool interrupts_enabled;
struct i915_vma *ads_vma;
struct i915_vma *ctx_pool_vma;
@@ -146,6 +164,9 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
+
+ /* To serialize the Host2GuC actions */
+ struct mutex action_lock;
};
/* intel_guc_loader.c */
@@ -163,5 +184,10 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
+void i915_guc_register(struct drm_i915_private *dev_priv);
+void i915_guc_unregister(struct drm_i915_private *dev_priv);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index e40db2d2ae99..324ea902558b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -104,9 +104,9 @@
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_DPC_PAGES 3
+#define GUC_LOG_DPC_PAGES 7
#define GUC_LOG_DPC_SHIFT 6
-#define GUC_LOG_ISR_PAGES 3
+#define GUC_LOG_ISR_PAGES 7
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
@@ -419,15 +419,87 @@ struct guc_ads {
u32 reserved2[4];
} __packed;
+/* GuC logging structures */
+
+enum guc_log_buffer_type {
+ GUC_ISR_LOG_BUFFER,
+ GUC_DPC_LOG_BUFFER,
+ GUC_CRASH_DUMP_LOG_BUFFER,
+ GUC_MAX_LOG_BUFFER
+};
+
+/**
+ * DOC: GuC Log buffer Layout
+ *
+ * Page0 +-------------------------------+
+ * | ISR state header (32 bytes) |
+ * | DPC state header |
+ * | Crash dump state header |
+ * Page1 +-------------------------------+
+ * | ISR logs |
+ * Page9 +-------------------------------+
+ * | DPC logs |
+ * Page17 +-------------------------------+
+ * | Crash Dump logs |
+ * +-------------------------------+
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where i915 read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for i915.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & i915 has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt i915 should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+ u32 marker[2];
+ u32 read_ptr;
+ u32 write_ptr;
+ u32 size;
+ u32 sampled_write_ptr;
+ union {
+ struct {
+ u32 flush_to_file:1;
+ u32 buffer_full_cnt:4;
+ u32 reserved:27;
+ };
+ u32 flags;
+ };
+ u32 version;
+} __packed;
+
+union guc_log_control {
+ struct {
+ u32 logging_enabled:1;
+ u32 reserved1:3;
+ u32 verbosity:4;
+ u32 reserved2:24;
+ };
+ u32 value;
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+ HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
HOST2GUC_ACTION_LIMIT
};
@@ -449,4 +521,10 @@ enum guc2host_status {
GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
};
+/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+enum guc2host_message {
+ GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
+};
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 6fd39efb7894..34d6ad2cf7c1 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -100,12 +100,13 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
/* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
@@ -117,12 +118,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
u32 tmp;
/* tell all command streamers to forward interrupts (but not vblank) to GuC */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -209,11 +211,13 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
+ params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
if (i915.guc_log_level >= 0) {
- params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
params[GUC_CTL_DEBUG] =
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
- }
+ } else
+ params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) {
u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
@@ -347,7 +351,6 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_device *dev = &dev_priv->drm;
struct i915_vma *vma;
int ret;
@@ -375,24 +378,22 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
- /* WaDisableMinuteIaClockGating:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ /* WaDisableMinuteIaClockGating:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
- /* WaC6DisallowByGfxPause*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaC6DisallowByGfxPause:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
@@ -484,6 +485,7 @@ int intel_guc_setup(struct drm_device *dev)
}
guc_interrupts_release(dev_priv);
+ gen9_reset_guc_interrupts(dev_priv);
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
@@ -528,6 +530,9 @@ int intel_guc_setup(struct drm_device *dev)
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
if (i915.enable_guc_submission) {
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;
@@ -561,7 +566,7 @@ fail:
ret = 0;
}
- if (err == 0 && !HAS_GUC_UCODE(dev))
+ if (err == 0 && !HAS_GUC_UCODE(dev_priv))
; /* Don't mention the GuC! */
else if (err == 0)
DRM_INFO("GuC firmware load skipped\n");
@@ -720,23 +725,28 @@ void intel_guc_init(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- /* A negative value means "use platform default" */
- if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev);
- if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+ if (!HAS_GUC(dev_priv)) {
+ i915.enable_guc_loading = 0;
+ i915.enable_guc_submission = 0;
+ } else {
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+ }
- if (!HAS_GUC_UCODE(dev)) {
+ if (!HAS_GUC_UCODE(dev_priv)) {
fw_path = NULL;
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev)) {
+ } else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 434f4d5c553d..290384e86c63 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -31,14 +31,20 @@
* GPU among multiple virtual machines on a time-sharing basis. Each
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
* features as the underlying physical GPU (pGPU), so i915 driver can run
- * seamlessly in a virtual machine. This file provides the englightments
- * of GVT and the necessary components used by GVT in i915 driver.
+ * seamlessly in a virtual machine.
+ *
+ * To virtualize GPU resources GVT-g driver depends on hypervisor technology
+ * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
+ * and be virtualized within GVT-g device module. More architectural design
+ * doc is available on https://01.org/group/2230/documentation-list.
*/
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
return true;
+ if (IS_SKYLAKE(dev_priv))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 960211df74db..25df2d65b985 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,7 +24,7 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
-#include "gvt/gvt.h"
+struct intel_gvt;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
new file mode 100644
index 000000000000..53df5b11bff4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static bool
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
+{
+ if (INTEL_GEN(engine->i915) >= 8) {
+ return (ipehr >> 23) == 0x1c;
+ } else {
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
+ }
+}
+
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+ u64 offset)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ enum intel_engine_id id;
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ for_each_engine(signaller, dev_priv, id) {
+ if (engine == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
+ return signaller;
+ }
+ } else {
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+ for_each_engine(signaller, dev_priv, id) {
+ if(engine == signaller)
+ continue;
+
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
+ return signaller;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
+ engine->name, ipehr, offset);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ void __iomem *vaddr;
+ u32 cmd, ipehr, head;
+ u64 offset = 0;
+ int i, backwards;
+
+ /*
+ * This function does not support execlist mode - any attempt to
+ * proceed further into this function will result in a kernel panic
+ * when dereferencing ring->buffer, which is not set up in execlist
+ * mode.
+ *
+ * The correct way of doing it would be to derive the currently
+ * executing ring buffer from the current context, which is derived
+ * from the currently running request. Unfortunately, to get the
+ * current request we would have to grab the struct_mutex before doing
+ * anything else, which would be ill-advised since some other thread
+ * might have grabbed it already and managed to hang itself, causing
+ * the hang checker to deadlock.
+ *
+ * Therefore, this function does not support execlist mode in its
+ * current form. Just return NULL and move on.
+ */
+ if (engine->buffer == NULL)
+ return NULL;
+
+ ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ if (!ipehr_is_semaphore_wait(engine, ipehr))
+ return NULL;
+
+ /*
+ * HEAD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX. But limit it to just 3
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
+ * point at at batch, and semaphores are always emitted into the
+ * ringbuffer itself.
+ */
+ head = I915_READ_HEAD(engine) & HEAD_ADDR;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+ vaddr = (void __iomem *)engine->buffer->vaddr;
+
+ for (i = backwards; i; --i) {
+ /*
+ * Be paranoid and presume the hw has gone off into the wild -
+ * our ring is smaller than what the hardware (and hence
+ * HEAD_ADDR) allows. Also handles wrap-around.
+ */
+ head &= engine->buffer->size - 1;
+
+ /* This here seems to blow up */
+ cmd = ioread32(vaddr + head);
+ if (cmd == ipehr)
+ break;
+
+ head -= 4;
+ }
+
+ if (!i)
+ return NULL;
+
+ *seqno = ioread32(vaddr + head + 4) + 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ offset = ioread32(vaddr + head + 12);
+ offset <<= 32;
+ offset |= ioread32(vaddr + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
+}
+
+static int semaphore_passed(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ u32 seqno;
+
+ engine->hangcheck.deadlock++;
+
+ signaller = semaphore_waits_for(engine, &seqno);
+ if (signaller == NULL)
+ return -1;
+
+ if (IS_ERR(signaller))
+ return 0;
+
+ /* Prevent pathological recursion due to driver bugs */
+ if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
+ return -1;
+
+ if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+ return 1;
+
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ engine->hangcheck.deadlock = 0;
+}
+
+static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
+{
+ u32 tmp = current_instdone | *old_instdone;
+ bool unchanged;
+
+ unchanged = tmp == *old_instdone;
+ *old_instdone |= tmp;
+
+ return unchanged;
+}
+
+static bool subunits_stuck(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_instdone instdone;
+ struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
+ bool stuck;
+ int slice;
+ int subslice;
+
+ if (engine->id != RCS)
+ return true;
+
+ intel_engine_get_instdone(engine, &instdone);
+
+ /* There might be unstable subunit states even when
+ * actual head is not moving. Filter out the unstable ones by
+ * accumulating the undone -> done transitions and only
+ * consider those as progress.
+ */
+ stuck = instdone_unchanged(instdone.instdone,
+ &accu_instdone->instdone);
+ stuck &= instdone_unchanged(instdone.slice_common,
+ &accu_instdone->slice_common);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
+ &accu_instdone->sampler[slice][subslice]);
+ stuck &= instdone_unchanged(instdone.row[slice][subslice],
+ &accu_instdone->row[slice][subslice]);
+ }
+
+ return stuck;
+}
+
+static enum intel_engine_hangcheck_action
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ if (acthd != engine->hangcheck.acthd) {
+
+ /* Clear subunit states on head movement */
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+
+ return HANGCHECK_ACTIVE;
+ }
+
+ if (!subunits_stuck(engine))
+ return HANGCHECK_ACTIVE;
+
+ return HANGCHECK_HUNG;
+}
+
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ enum intel_engine_hangcheck_action ha;
+ u32 tmp;
+
+ ha = head_stuck(engine, acthd);
+ if (ha != HANGCHECK_HUNG)
+ return ha;
+
+ if (IS_GEN2(dev_priv))
+ return HANGCHECK_HUNG;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ tmp = I915_READ_CTL(engine);
+ if (tmp & RING_WAIT) {
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck wait on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ switch (semaphore_passed(engine)) {
+ default:
+ return HANGCHECK_HUNG;
+ case 1:
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck semaphore on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ case 0:
+ return HANGCHECK_WAIT;
+ }
+ }
+
+ return HANGCHECK_HUNG;
+}
+
+/*
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
+ */
+static void i915_hangcheck_elapsed(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ gpu_error.hangcheck_work.work);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int hung = 0, stuck = 0;
+ int busy_count = 0;
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define ACTIVE_DECAY 15
+
+ if (!i915.enable_hangcheck)
+ return;
+
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
+
+ /* As enabling the GPU requires fairly extensive mmio access,
+ * periodically arm the mmio checker to see if we are triggering
+ * any invalid access.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+ for_each_engine(engine, dev_priv, id) {
+ bool busy = intel_engine_has_waiter(engine);
+ u64 acthd;
+ u32 seqno;
+ u32 submit;
+
+ semaphore_clear_deadlocks(dev_priv);
+
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ acthd = intel_engine_get_active_head(engine);
+ seqno = intel_engine_get_seqno(engine);
+ submit = intel_engine_last_submit(engine);
+
+ if (engine->hangcheck.seqno == seqno) {
+ if (i915_seqno_passed(seqno, submit)) {
+ engine->hangcheck.action = HANGCHECK_IDLE;
+ } else {
+ /* We always increment the hangcheck score
+ * if the engine is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * engine is in a legitimate wait for another
+ * engine. In that case the waiting engine is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, add a small increment to the
+ * score so that we can catch a batch that is
+ * being repeatedly kicked and so responsible
+ * for stalling the machine.
+ */
+ engine->hangcheck.action =
+ engine_stuck(engine, acthd);
+
+ switch (engine->hangcheck.action) {
+ case HANGCHECK_IDLE:
+ case HANGCHECK_WAIT:
+ break;
+ case HANGCHECK_ACTIVE:
+ engine->hangcheck.score += BUSY;
+ break;
+ case HANGCHECK_KICK:
+ engine->hangcheck.score += KICK;
+ break;
+ case HANGCHECK_HUNG:
+ engine->hangcheck.score += HUNG;
+ break;
+ }
+ }
+
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ hung |= intel_engine_flag(engine);
+ if (engine->hangcheck.action != HANGCHECK_HUNG)
+ stuck |= intel_engine_flag(engine);
+ }
+ } else {
+ engine->hangcheck.action = HANGCHECK_ACTIVE;
+
+ /* Gradually reduce the count so that we catch DoS
+ * attempts across multiple batches.
+ */
+ if (engine->hangcheck.score > 0)
+ engine->hangcheck.score -= ACTIVE_DECAY;
+ if (engine->hangcheck.score < 0)
+ engine->hangcheck.score = 0;
+
+ /* Clear head and subunit states on seqno movement */
+ acthd = 0;
+
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+ }
+
+ engine->hangcheck.seqno = seqno;
+ engine->hangcheck.acthd = acthd;
+ busy_count += busy;
+ }
+
+ if (hung) {
+ char msg[80];
+ unsigned int tmp;
+ int len;
+
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, dev_priv, hung, tmp)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(dev_priv, hung, msg);
+ }
+
+ /* Reset timer in case GPU hangs without another request being added */
+ if (busy_count)
+ i915_queue_hangcheck(dev_priv);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+ memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+void intel_hangcheck_init(struct drm_i915_private *i915)
+{
+ INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
+ i915_hangcheck_elapsed);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 13c306173f27..fb88e32e25a3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t enabled_bits;
- enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@@ -864,7 +864,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -879,9 +879,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@@ -911,9 +911,9 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -956,7 +956,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev) &&
+ if (!HAS_PCH_SPLIT(dev_priv) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -975,14 +975,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
}
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
WARN_ON(!crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
@@ -991,21 +993,20 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
@@ -1040,8 +1041,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* FIXME: BSpec says this should be done at the end of
* of the modeset sequence, so not sure if this isn't too soon.
*/
- if (crtc->config->pipe_bpp > 24 &&
- crtc->config->pixel_multiplier > 1) {
+ if (pipe_config->pipe_bpp > 24 &&
+ pipe_config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -1055,8 +1056,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
@@ -1073,7 +1074,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
/*
@@ -1086,7 +1087,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
* 4. enable HDMI clock gating
*/
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
I915_WRITE(TRANS_CHICKEN1(pipe),
I915_READ(TRANS_CHICKEN1(pipe)) |
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
@@ -1098,7 +1099,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
@@ -1110,8 +1111,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
@@ -1141,7 +1142,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -1164,7 +1165,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1178,9 +1179,7 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
@@ -1190,9 +1189,7 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
@@ -1241,7 +1238,7 @@ static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1249,11 +1246,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+ if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+ if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
return MODE_OK;
@@ -1265,6 +1262,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
{
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
int clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1287,7 +1285,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock, true);
/* if we can't do 8bpc we may still be able to do 12bpc */
- if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+ if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
return status;
@@ -1297,7 +1295,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(to_i915(dev)))
return false;
/*
@@ -1312,7 +1310,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
@@ -1339,7 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
clock_12bpc *= 2;
}
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
@@ -1644,13 +1642,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
}
@@ -1662,9 +1659,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
vlv_phy_pre_encoder_enable(encoder);
@@ -1673,7 +1668,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1896,19 +1891,19 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
return;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
- } else if (HAS_PCH_IBX(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
@@ -1918,7 +1913,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1932,7 +1927,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -1941,6 +1936,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -1961,7 +1957,7 @@ void intel_hdmi_init(struct drm_device *dev,
DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -1969,29 +1965,30 @@ void intel_hdmi_init(struct drm_device *dev,
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev))
+ else if (HAS_PCH_IBX(dev_priv))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
- if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->port = port;
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -2005,7 +2002,7 @@ void intel_hdmi_init(struct drm_device *dev,
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 334d47b5811a..3d546c019de0 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -501,7 +501,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (intel_connector->mst_port)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+ if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
intel_connector->encoder->hpd_pin > HPD_NONE) {
connector->polled = enabled ?
DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 79aab9ad6faa..83f260bb4eef 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -138,11 +138,10 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- struct drm_device *dev = &dev_priv->drm;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
+ if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -468,13 +467,9 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- const unsigned int fw =
- intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
- FW_REG_READ | FW_REG_WRITE);
int i = 0, inc, try = 0;
int ret = 0;
- intel_uncore_forcewake_get(dev_priv, fw);
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@@ -576,7 +571,6 @@ timeout:
ret = -EAGAIN;
out:
- intel_uncore_forcewake_put(dev_priv, fw);
return ret;
}
@@ -633,10 +627,10 @@ int intel_setup_gmbus(struct drm_device *dev)
unsigned int pin;
int ret;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return 0;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH_DISPLAY(dev_priv))
dev_priv->gpio_mmio_base =
@@ -674,7 +668,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
bus->force_bit = 1;
intel_gpio_setup(bus, pin);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0adb879833ff..d4961fa20c73 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -275,8 +275,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
engine->disable_lite_restore_wa =
- (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
@@ -366,7 +365,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
+ reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -433,15 +432,17 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *cursor, *last;
+ struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
+ unsigned long flags;
+ struct rb_node *rb;
bool submit = false;
last = port->request;
if (last)
/* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_request()
+ * as we resubmit the request. See gen8_emit_breadcrumb()
* for where we prepare the padding after the end of the
* request.
*/
@@ -470,8 +471,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock(&engine->execlist_lock);
- list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ rb = engine->execlist_first;
+ while (rb) {
+ struct drm_i915_gem_request *cursor =
+ rb_entry(rb, typeof(*cursor), priotree.node);
+
/* Can we combine this request with the current port? It has to
* be the same context/ringbuffer and not have any exceptions
* (e.g. GVT saying never to combine contexts).
@@ -494,7 +499,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* context (even though a different request) to
* the second port.
*/
- if (ctx_single_port_submission(cursor->ctx))
+ if (ctx_single_port_submission(last->ctx) ||
+ ctx_single_port_submission(cursor->ctx))
break;
GEM_BUG_ON(last->ctx == cursor->ctx);
@@ -502,17 +508,30 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_gem_request_assign(&port->request, last);
port++;
}
+
+ rb = rb_next(rb);
+ rb_erase(&cursor->priotree.node, &engine->execlist_queue);
+ RB_CLEAR_NODE(&cursor->priotree.node);
+ cursor->priotree.priority = INT_MAX;
+
+ /* We keep the previous context alive until we retire the
+ * following request. This ensures that any the context object
+ * is still pinned for any residual writes the HW makes into it
+ * on the context switch into the next object following the
+ * breadcrumb. Otherwise, we may retire the context too early.
+ */
+ cursor->previous_context = engine->last_context;
+ engine->last_context = cursor->ctx;
+
+ __i915_gem_request_submit(cursor);
last = cursor;
submit = true;
}
if (submit) {
- /* Decouple all the requests submitted from the queue */
- engine->execlist_queue.next = &cursor->execlist_link;
- cursor->execlist_link.prev = &engine->execlist_queue;
-
i915_gem_request_assign(&port->request, last);
+ engine->execlist_first = rb;
}
- spin_unlock(&engine->execlist_lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
if (submit)
execlists_submit_ports(engine);
@@ -523,6 +542,28 @@ static bool execlists_elsp_idle(struct intel_engine_cs *engine)
return !engine->execlist_port[0].request;
}
+/**
+ * intel_execlists_idle() - Determine if all engine submission ports are idle
+ * @dev_priv: i915 device private
+ *
+ * Return true if there are no requests pending on any of the submission ports
+ * of any engines.
+ */
+bool intel_execlists_idle(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!i915.enable_execlists)
+ return true;
+
+ for_each_engine(engine, dev_priv, id)
+ if (!execlists_elsp_idle(engine))
+ return false;
+
+ return true;
+}
+
static bool execlists_elsp_ready(struct intel_engine_cs *engine)
{
int port;
@@ -593,18 +634,147 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
+static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
+{
+ struct rb_node **p, *rb;
+ bool first = true;
+
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ p = &root->rb_node;
+ while (*p) {
+ struct i915_priotree *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), node);
+ if (pt->priority > pos->priority) {
+ p = &rb->rb_left;
+ } else {
+ p = &rb->rb_right;
+ first = false;
+ }
+ }
+ rb_link_node(&pt->node, rb, p);
+ rb_insert_color(&pt->node, root);
+
+ return first;
+}
+
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- spin_lock_irqsave(&engine->execlist_lock, flags);
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
- list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ if (insert_request(&request->priotree, &engine->execlist_queue))
+ engine->execlist_first = &request->priotree.node;
if (execlists_elsp_idle(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
- spin_unlock_irqrestore(&engine->execlist_lock, flags);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+static struct intel_engine_cs *
+pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine;
+
+ engine = container_of(pt,
+ struct drm_i915_gem_request,
+ priotree)->engine;
+ if (engine != locked) {
+ if (locked)
+ spin_unlock_irq(&locked->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
+ }
+
+ return engine;
+}
+
+static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+{
+ static DEFINE_MUTEX(lock);
+ struct intel_engine_cs *engine = NULL;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ LIST_HEAD(dfs);
+
+ if (prio <= READ_ONCE(request->priotree.priority))
+ return;
+
+ /* Need global lock to use the temporary link inside i915_dependency */
+ mutex_lock(&lock);
+
+ stack.signaler = &request->priotree;
+ list_add(&stack.dfs_link, &dfs);
+
+ /* Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_priotree *pt, prio) {
+ * list_for_each_entry(dep, &pt->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * insert_request(pt);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ list_for_each_entry(p, &pt->signalers_list, signal_link)
+ if (prio > READ_ONCE(p->signaler->priority))
+ list_move_tail(&p->dfs_link, &dfs);
+
+ p = list_next_entry(dep, dfs_link);
+ if (!RB_EMPTY_NODE(&pt->node))
+ continue;
+
+ engine = pt_lock_engine(pt, engine);
+
+ /* If it is not already in the rbtree, we can update the
+ * priority inplace and skip over it (and its dependencies)
+ * if it is referenced *again* as we descend the dfs.
+ */
+ if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
+ pt->priority = prio;
+ list_del_init(&dep->dfs_link);
+ }
+ }
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = pt_lock_engine(pt, engine);
+
+ if (prio <= pt->priority)
+ continue;
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
+
+ pt->priority = prio;
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
+
+ if (engine)
+ spin_unlock_irq(&engine->timeline->lock);
+
+ mutex_unlock(&lock);
+
+ /* XXX Do we need to preempt to make room for us and our deps? */
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -672,46 +842,6 @@ err_unpin:
return ret;
}
-/*
- * intel_logical_ring_advance() - advance the tail and prepare for submission
- * @request: Request to advance the logical ringbuffer of.
- *
- * The tail is updated in our logical ringbuffer struct, not in the actual context. What
- * really happens during submission is that the context and current tail will be placed
- * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
- * point, the tail *inside* the context is updated and the ELSP written to.
- */
-static int
-intel_logical_ring_advance(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- struct intel_engine_cs *engine = request->engine;
-
- intel_ring_advance(ring);
- request->tail = ring->tail;
-
- /*
- * Here we add two extra NOOPs as padding to avoid
- * lite restore of a context with HEAD==TAIL.
- *
- * Caller must reserve WA_TAIL_DWORDS for us!
- */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- request->wa_tail = ring->tail;
-
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- request->previous_context = engine->last_context;
- engine->last_context = request->ctx;
- return 0;
-}
-
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -745,7 +875,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
- ce->state->obj->dirty = true;
+ ce->state->obj->mm.dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission) {
@@ -853,13 +983,12 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl,kbl
+ * WaDisableLSQCROPERFforOCL:kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1002,9 +1131,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1075,9 +1203,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1104,9 +1231,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_NOOP);
}
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1250,8 +1376,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
- if (!execlists_elsp_idle(engine))
+ /* After a GPU reset, we may have requests to replay */
+ if (!execlists_elsp_idle(engine)) {
+ engine->execlist_port[0].count = 0;
+ engine->execlist_port[1].count = 0;
execlists_submit_ports(engine);
+ }
return 0;
}
@@ -1326,10 +1456,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
memset(&port[1], 0, sizeof(port[1]));
}
- /* CS is stopped, and we will resubmit both ports on resume */
GEM_BUG_ON(request->ctx != port[0].request->ctx);
- port[0].count = 0;
- port[1].count = 0;
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
@@ -1570,39 +1697,35 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-
-static int gen8_emit_request(struct drm_i915_gem_request *request)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
{
- struct intel_ring *ring = request->ring;
- int ret;
-
- ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+ *out++ = MI_NOOP;
+ *out++ = MI_NOOP;
+ request->wa_tail = intel_ring_offset(request->ring, out);
+}
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- intel_hws_seqno_address(request->engine) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, request->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
-}
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+ *out++ = 0;
+ *out++ = request->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
-static int gen8_emit_request_render(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- int ret;
+ gen8_emit_wa_tail(request, out);
+}
- ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
+static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1610,21 +1733,24 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(request));
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(request->engine);
+ *out++ = 0;
+ *out++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
+
+ gen8_emit_wa_tail(request, out);
}
+static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
+
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
@@ -1641,7 +1767,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return i915_gem_render_state_init(req);
+ return i915_gem_render_state_emit(req);
}
/**
@@ -1652,9 +1778,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
/*
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
@@ -1681,14 +1804,19 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
+ }
}
static void
@@ -1698,8 +1826,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_common_ring;
engine->reset_hw = reset_common_ring;
engine->emit_flush = gen8_emit_flush;
- engine->emit_request = gen8_emit_request;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
@@ -1820,7 +1950,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
- engine->emit_request = gen8_emit_request_render;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
ret = intel_engine_create_scratch(engine, 4096);
if (ret)
@@ -1837,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- ret = logical_ring_init(engine);
- if (ret) {
- lrc_destroy_wa_ctx_obj(engine);
- }
-
- return ret;
+ return logical_ring_init(engine);
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
@@ -1945,7 +2071,7 @@ static void execlists_init_reg_state(u32 *reg_state,
RING_START(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
RING_CTL(engine->mmio_base),
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+ RING_CTL_SIZE(ring->size) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
RING_BBADDR_UDW(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2046,7 +2172,7 @@ populate_lr_context(struct i915_gem_context *ctx,
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
- ctx_obj->dirty = true;
+ ctx_obj->mm.dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
@@ -2153,30 +2279,43 @@ error_deref_obj:
void intel_lr_context_resume(struct drm_i915_private *dev_priv)
{
- struct i915_gem_context *ctx = dev_priv->kernel_context;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+
+ /* Because we emit WA_TAIL_DWORDS there may be a disparity
+ * between our bookkeeping in ce->ring->head and ce->ring->tail and
+ * that stored in context. As we only write new commands from
+ * ce->ring->tail onwards, everything before that is junk. If the GPU
+ * starts reading from its RING_HEAD from the context, it may try to
+ * execute that junk and die.
+ *
+ * So to avoid that we reset the context images upon resume. For
+ * simplicity, we just zero everything out.
+ */
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u32 *reg;
- for_each_engine(engine, dev_priv) {
- struct intel_context *ce = &ctx->engine[engine->id];
- void *vaddr;
- uint32_t *reg_state;
-
- if (!ce->state)
- continue;
-
- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
- if (WARN_ON(IS_ERR(vaddr)))
- continue;
+ if (!ce->state)
+ continue;
- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ reg = i915_gem_object_pin_map(ce->state->obj,
+ I915_MAP_WB);
+ if (WARN_ON(IS_ERR(reg)))
+ continue;
- reg_state[CTX_RING_HEAD+1] = 0;
- reg_state[CTX_RING_TAIL+1] = 0;
+ reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
+ reg[CTX_RING_HEAD+1] = 0;
+ reg[CTX_RING_TAIL+1] = 0;
- ce->state->obj->dirty = true;
- i915_gem_object_unpin_map(ce->state->obj);
+ ce->state->obj->mm.dirty = true;
+ i915_gem_object_unpin_map(ce->state->obj);
- ce->ring->head = 0;
- ce->ring->tail = 0;
+ ce->ring->head = ce->ring->tail = 0;
+ ce->ring->last_retired_head = -1;
+ intel_ring_update_space(ce->ring);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4fed8165f98a..c1f546180ba2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,5 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+bool intel_execlists_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
new file mode 100644
index 000000000000..daa523410953
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <drm/drm_edid.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include "intel_drv.h"
+
+static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+
+ return &dig_port->dp;
+}
+
+static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
+{
+ enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ if (drm_lspcon_get_mode(adapter, &current_mode))
+ DRM_ERROR("Error reading LSPCON mode\n");
+ else
+ DRM_DEBUG_KMS("Current LSPCON mode %s\n",
+ current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
+ return current_mode;
+}
+
+static int lspcon_change_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode, bool force)
+{
+ int err;
+ enum drm_lspcon_mode current_mode;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ err = drm_lspcon_get_mode(adapter, &current_mode);
+ if (err) {
+ DRM_ERROR("Error reading LSPCON mode\n");
+ return err;
+ }
+
+ if (current_mode == mode) {
+ DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n");
+ return 0;
+ }
+
+ err = drm_lspcon_set_mode(adapter, mode);
+ if (err < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return err;
+ }
+
+ lspcon->mode = mode;
+ DRM_DEBUG_KMS("LSPCON mode changed done\n");
+ return 0;
+}
+
+static bool lspcon_probe(struct intel_lspcon *lspcon)
+{
+ enum drm_dp_dual_mode_type adaptor_type;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ /* Lets probe the adaptor and check its type */
+ adaptor_type = drm_dp_dual_mode_detect(adapter);
+ if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
+ DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
+ drm_dp_get_dual_mode_type_name(adaptor_type));
+ return false;
+ }
+
+ /* Yay ... got a LSPCON device */
+ DRM_DEBUG_KMS("LSPCON detected\n");
+ lspcon->mode = lspcon_get_current_mode(lspcon);
+ lspcon->active = true;
+ return true;
+}
+
+static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ unsigned long start = jiffies;
+
+ if (!lspcon->desc_valid)
+ return;
+
+ while (1) {
+ struct intel_dp_desc desc;
+
+ /*
+ * The w/a only applies in PCON mode and we don't expect any
+ * AUX errors.
+ */
+ if (!__intel_dp_read_desc(intel_dp, &desc))
+ return;
+
+ if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+ DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
+ jiffies_to_msecs(jiffies - start));
+ return;
+ }
+
+ if (time_after(jiffies, start + msecs_to_jiffies(1000)))
+ break;
+
+ usleep_range(10000, 15000);
+ }
+
+ DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
+}
+
+void lspcon_resume(struct intel_lspcon *lspcon)
+{
+ lspcon_resume_in_pcon_wa(lspcon);
+
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
+ DRM_ERROR("LSPCON resume failed\n");
+ else
+ DRM_DEBUG_KMS("LSPCON resume success\n");
+}
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!IS_GEN9(dev_priv)) {
+ DRM_ERROR("LSPCON is supported on GEN9 only\n");
+ return false;
+ }
+
+ lspcon->active = false;
+ lspcon->mode = DRM_LSPCON_MODE_INVALID;
+
+ if (!lspcon_probe(lspcon)) {
+ DRM_ERROR("Failed to probe lspcon\n");
+ return false;
+ }
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
+ true) < 0) {
+ DRM_ERROR("LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
+
+ if (!intel_dp_read_dpcd(dp)) {
+ DRM_ERROR("LSPCON DPCD read failed\n");
+ return false;
+ }
+
+ lspcon->desc_valid = intel_dp_read_desc(dp);
+
+ DRM_DEBUG_KMS("Success: LSPCON init\n");
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d51ea47..d12ef0047d49 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & LVDS_PORT_EN))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -122,8 +122,7 @@ out:
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
@@ -139,12 +138,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
@@ -396,7 +395,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -406,7 +405,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
unsigned int lvds_bpp;
/* Should never happen!! */
- if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@@ -431,7 +430,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
@@ -566,7 +565,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
intel_display_resume(dev);
dev_priv->modeset_restore = MODESET_DONE;
@@ -949,16 +948,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
-static bool intel_lvds_supported(struct drm_device *dev)
+static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
- if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ if (INTEL_GEN(dev_priv) <= 4 &&
+ IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
return false;
@@ -984,27 +984,27 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
i915_reg_t lvds_reg;
u32 lvds;
int pipe;
u8 pin;
- if (!intel_lvds_supported(dev))
+ if (!intel_lvds_supported(dev_priv))
return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
lvds = I915_READ(lvds_reg);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
if (dev_priv->vbt.edp.support) {
@@ -1064,12 +1064,13 @@ void intel_lvds_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
- intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN4(dev))
+ else if (IS_GEN4(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@@ -1157,14 +1158,14 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
goto failed;
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- fixed_mode = intel_crtc_mode_get(dev, crtc);
+ fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
if (fixed_mode) {
DRM_DEBUG_KMS("using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7acbbbf97833..f4429f67a4e3 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -642,24 +642,6 @@ static struct notifier_block intel_opregion_notifier = {
* (version 3)
*/
-static u32 get_did(struct intel_opregion *opregion, int i)
-{
- u32 did;
-
- if (i < ARRAY_SIZE(opregion->acpi->didl)) {
- did = opregion->acpi->didl[i];
- } else {
- i -= ARRAY_SIZE(opregion->acpi->didl);
-
- if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
- return 0;
-
- did = opregion->acpi->did2[i];
- }
-
- return did;
-}
-
static void set_did(struct intel_opregion *opregion, int i, u32 val)
{
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
@@ -674,11 +656,11 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct drm_connector *connector)
+static u32 acpi_display_type(struct intel_connector *connector)
{
u32 display_type;
- switch (connector->connector_type) {
+ switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
display_type = ACPI_DISPLAY_TYPE_VGA;
@@ -707,7 +689,7 @@ static u32 acpi_display_type(struct drm_connector *connector)
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
default:
- MISSING_CASE(connector->connector_type);
+ MISSING_CASE(connector->base.connector_type);
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
}
@@ -718,34 +700,9 @@ static u32 acpi_display_type(struct drm_connector *connector)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_connector *connector;
- acpi_handle handle;
- struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
- unsigned long long device_id;
- acpi_status status;
- u32 temp, max_outputs;
- int i = 0;
-
- handle = ACPI_HANDLE(&pdev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev))
- return;
-
- if (acpi_is_video_device(handle))
- acpi_video_bus = acpi_dev;
- else {
- list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
- if (acpi_is_video_device(acpi_cdev->handle)) {
- acpi_video_bus = acpi_cdev;
- break;
- }
- }
- }
-
- if (!acpi_video_bus) {
- DRM_DEBUG_KMS("No ACPI video bus found\n");
- return;
- }
+ struct intel_connector *connector;
+ int i = 0, max_outputs;
+ int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -757,64 +714,58 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
- max_outputs);
- return;
- }
- status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
- if (ACPI_SUCCESS(status)) {
- if (!device_id)
- goto blind_set;
- set_did(opregion, i++, (u32)(device_id & 0x0f0f));
- }
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ if (i < max_outputs)
+ set_did(opregion, i, device_id);
+ i++;
}
-end:
DRM_DEBUG_KMS("%d outputs detected\n", i);
+ if (i > max_outputs)
+ DRM_ERROR("More than %d outputs in connector list\n",
+ max_outputs);
+
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
set_did(opregion, i, 0);
- return;
-
-blind_set:
- i = 0;
- list_for_each_entry(connector,
- &dev_priv->drm.mode_config.connector_list, head) {
- int display_type = acpi_display_type(connector);
-
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs in connector list\n",
- max_outputs);
- return;
- }
-
- temp = get_did(opregion, i);
- set_did(opregion, i, temp | (1 << 31) | display_type | i);
- i++;
- }
- goto end;
}
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_connector *connector;
int i = 0;
- u32 disp_id;
-
- /* Initialize the CADL field by duplicating the DIDL values.
- * Technically, this is not always correct as display outputs may exist,
- * but not active. This initialization is necessary for some Clevo
- * laptops that check this field before processing the brightness and
- * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
- * there are less than eight devices. */
- do {
- disp_id = get_did(opregion, i);
- opregion->acpi->cadl[i] = disp_id;
- } while (++i < 8 && disp_id != 0);
+
+ /*
+ * Initialize the CADL field from the connector device ids. This is
+ * essentially the same as copying from the DIDL. Technically, this is
+ * not always correct as display outputs may exist, but not active. This
+ * initialization is necessary for some Clevo laptops that check this
+ * field before processing the brightness and display switching hotkeys.
+ *
+ * Note that internal panels should be at the front of the connector
+ * list already, ensuring they're not left out.
+ */
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ if (i >= ARRAY_SIZE(opregion->acpi->cadl))
+ break;
+ opregion->acpi->cadl[i++] = connector->acpi_device_id;
+ }
+
+ /* If fewer than 8 active devices, the list must be null terminated */
+ if (i < ARRAY_SIZE(opregion->acpi->cadl))
+ opregion->acpi->cadl[i] = 0;
}
void intel_opregion_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a24bc8c7889f..fd0e4dac7cc1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -233,7 +233,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
return i915_gem_request_alloc(engine, dev_priv->kernel_context);
}
@@ -1222,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
- i915_gem_object_put_unlocked(new_bo);
+ i915_gem_object_put(new_bo);
out_free:
kfree(params);
@@ -1466,10 +1466,12 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
- i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
+ i915_gem_object_put(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1587,3 +1589,5 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index be4b4d546fd9..08ab6d762ca4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -304,7 +304,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -325,7 +325,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i965_scale_aspect(pipe_config, &pfit_control);
else
i9xx_scale_aspect(pipe_config, &pfit_control,
@@ -339,7 +339,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
@@ -355,7 +355,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -366,7 +366,7 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
@@ -1722,7 +1722,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->name,
- panel->backlight.enabled ? "enabled" : "disabled",
+ enableddisabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index db24f898853c..ae2c0bb4b2e8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#include <drm/drm_atomic_helper.h>
/**
* DOC: RC6
@@ -55,10 +56,8 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-static void gen9_init_clock_gating(struct drm_device *dev)
+static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
@@ -81,11 +80,9 @@ static void gen9_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DISABLE_DUMMY0);
}
-static void bxt_init_clock_gating(struct drm_device *dev)
+static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
@@ -107,9 +104,8 @@ static void bxt_init_clock_gating(struct drm_device *dev)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(CLKCFG);
@@ -146,9 +142,8 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
@@ -252,8 +247,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+ bool is_ddr3,
int fsb,
int mem)
{
@@ -319,27 +314,26 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_device *dev = &dev_priv->drm;
u32 val;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
dev_priv->wm.vlv.cxsr = enable;
- } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev)) {
+ } else if (IS_PINEVIEW(dev_priv)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
POSTING_READ(DSPFW3);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_I915GM(dev)) {
+ } else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
@@ -353,8 +347,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
return;
}
- DRM_DEBUG_KMS("memory self-refresh is %s\n",
- enable ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
}
@@ -377,10 +370,9 @@ static const int pessimal_latency_ns = 5000;
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-static int vlv_get_fifo_size(struct drm_device *dev,
+static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int sprite0_start, sprite1_start, size;
switch (pipe) {
@@ -429,9 +421,8 @@ static int vlv_get_fifo_size(struct drm_device *dev,
return size;
}
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -445,9 +436,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -462,9 +452,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -624,11 +613,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
return wm_size;
}
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- for_each_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -639,27 +628,31 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
return enabled;
}
-static void pineview_update_wm(struct drm_crtc *unused_crtc)
+static void pineview_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
intel_set_memory_cxsr(dev_priv, false);
return;
}
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -706,7 +699,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
}
}
-static bool g4x_compute_wm0(struct drm_device *dev,
+static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
@@ -715,24 +708,26 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int *plane_wm,
int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int htotal, hdisplay, clock, cpp;
int line_time_us, line_count;
int entries, tlb_miss;
- crtc = intel_get_crtc_for_plane(dev, plane);
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
@@ -747,7 +742,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * crtc->cursor->state->crtc_w * cpp;
+ entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -766,7 +761,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
-static bool g4x_check_srwm(struct drm_device *dev,
+static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
@@ -775,13 +770,13 @@ static bool g4x_check_srwm(struct drm_device *dev,
display_wm, cursor_wm);
if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
display_wm, display->max_wm);
return false;
}
if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
@@ -794,15 +789,16 @@ static bool g4x_check_srwm(struct drm_device *dev,
return true;
}
-static bool g4x_compute_srwm(struct drm_device *dev,
+static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int hdisplay, htotal, cpp, clock;
unsigned long line_time_us;
int line_count, line_size;
@@ -814,12 +810,13 @@ static bool g4x_compute_srwm(struct drm_device *dev,
return false;
}
- crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -833,11 +830,11 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * cpp * crtc->cursor->state->crtc_w;
+ entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
- return g4x_check_srwm(dev,
+ return g4x_check_srwm(dev_priv,
*display_wm, *cursor_wm,
display, cursor);
}
@@ -937,10 +934,8 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_device *dev)
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1065,7 +1060,8 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
for (level = 0; level < wm_state->num_levels; level++) {
struct drm_device *dev = crtc->base.dev;
- const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ const int sr_fifo_size =
+ INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
struct intel_plane *plane;
wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
@@ -1095,15 +1091,16 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
static void vlv_compute_wm(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
- int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
int level;
memset(wm_state, 0, sizeof(*wm_state));
wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
- wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
+ wm_state->num_levels = dev_priv->wm.max_level + 1;
wm_state->num_active_planes = 0;
@@ -1183,7 +1180,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
}
/* clear any (partially) filled invalid levels */
- for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
+ for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
}
@@ -1327,20 +1324,19 @@ static void vlv_merge_wm(struct drm_device *dev,
}
}
-static void vlv_update_wm(struct drm_crtc *crtc)
+static void vlv_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
struct vlv_wm_values wm = {};
- vlv_compute_wm(intel_crtc);
+ vlv_compute_wm(crtc);
vlv_merge_wm(dev, &wm);
if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
return;
}
@@ -1356,9 +1352,9 @@ static void vlv_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, false);
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
- vlv_write_wm_values(intel_crtc, &wm);
+ vlv_write_wm_values(crtc, &wm);
DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
"sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
@@ -1382,30 +1378,29 @@ static void vlv_update_wm(struct drm_crtc *crtc)
#define single_plane_enabled(mask) is_power_of_2(mask)
-static void g4x_update_wm(struct drm_crtc *crtc)
+static void g4x_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = to_i915(dev);
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
- if (g4x_compute_wm0(dev, PIPE_A,
+ if (g4x_compute_wm0(dev_priv, PIPE_A,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
- if (g4x_compute_wm0(dev, PIPE_B,
+ if (g4x_compute_wm0(dev_priv, PIPE_B,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
+ g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
@@ -1440,25 +1435,27 @@ static void g4x_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i965_update_wm(struct drm_crtc *unused_crtc)
+static void i965_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ int hdisplay = crtc->config->pipe_src_w;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
@@ -1476,7 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- cpp * crtc->cursor->state->crtc_w;
+ cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1514,34 +1511,38 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
#undef FW_WM
-static void i9xx_update_wm(struct drm_crtc *unused_crtc)
+static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- if (IS_I945GM(dev))
+ if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
+ crtc = intel_get_crtc_for_plane(dev_priv, 0);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1552,18 +1553,23 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
+ crtc = intel_get_crtc_for_plane(dev_priv, 1);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1579,10 +1585,10 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- if (IS_I915GM(dev) && enabled) {
+ if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->state->fb);
+ obj = intel_fb_obj(enabled->base.primary->state->fb);
/* self-refresh seems busted with untiled */
if (!i915_gem_object_is_tiled(obj))
@@ -1598,19 +1604,24 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
+ if (HAS_FW_BLC(dev_priv) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &enabled->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ enabled->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
+ int hdisplay = enabled->config->pipe_src_w;
+ int cpp;
unsigned long line_time_us;
int entries;
- if (IS_I915GM(dev) || IS_I945GM(dev))
+ if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1623,7 +1634,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
@@ -1647,23 +1658,22 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i845_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc == NULL)
return;
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
+ dev_priv->display.get_fifo_size(dev_priv, 0),
4, pessimal_latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -1852,23 +1862,25 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
-static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 3072;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
return 768;
else
return 512;
}
-static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
- int level, bool is_sprite)
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -1879,18 +1891,18 @@ static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
return level == 0 ? 63 : 255;
}
-static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
- int level)
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
{
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 31;
else
return 15;
@@ -1903,7 +1915,8 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -1911,14 +1924,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev)->num_pipes;
+ fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (INTEL_INFO(dev)->gen <= 6)
+ if (INTEL_GEN(dev_priv) <= 6)
fifo_size /= 2;
}
@@ -1934,7 +1947,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
@@ -1947,7 +1960,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev, level);
+ return ilk_cursor_wm_reg_max(to_i915(dev), level);
}
static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1959,17 +1972,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
}
-static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev, level, false);
- max->spr = ilk_plane_wm_reg_max(dev, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev, level);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static bool ilk_validate_wm_level(int level,
@@ -2076,14 +2089,13 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
PIPE_WM_LINETIME_TIME(linetime);
}
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
+static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[8])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
uint32_t val;
int ret, i;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
@@ -2155,7 +2167,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2165,14 +2177,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
wm[2] = (sskpd >> 12) & 0xFF;
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
uint32_t sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
uint32_t mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
@@ -2182,42 +2194,44 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
-static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
wm[3] *= 2;
}
-int ilk_wm_max_level(const struct drm_device *dev)
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
{
/* how many WM levels are we expecting */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 7;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 4;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return 3;
else
return 2;
}
-static void intel_print_wm_latency(struct drm_device *dev,
+static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
const uint16_t wm[8])
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
@@ -2232,7 +2246,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -2246,7 +2260,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
{
- int level, max_level = ilk_wm_max_level(&dev_priv->drm);
+ int level, max_level = ilk_wm_max_level(dev_priv);
if (wm[0] >= min)
return false;
@@ -2258,9 +2272,8 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return true;
}
-static void snb_wm_latency_quirk(struct drm_device *dev)
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool changed;
/*
@@ -2275,39 +2288,35 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
return;
DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_device *dev)
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev))
- snb_wm_latency_quirk(dev);
+ if (IS_GEN6(dev_priv))
+ snb_wm_latency_quirk(dev_priv);
}
-static void skl_setup_wm_latency(struct drm_device *dev)
+static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
- intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+ intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
static bool ilk_validate_pipe_wm(struct drm_device *dev,
@@ -2345,7 +2354,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev), usable_level;
+ int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
@@ -2377,7 +2386,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+ if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2390,13 +2399,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev, 1, &max);
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
@@ -2432,7 +2441,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
{
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
/*
* Start with the final, target watermarks, then combine with the
@@ -2516,16 +2525,16 @@ static void ilk_wm_merge(struct drm_device *dev,
struct intel_pipe_wm *merged)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
+ merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -2556,7 +2565,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+ if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -2577,7 +2586,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
@@ -2588,6 +2597,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -2614,7 +2624,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
else
@@ -2625,7 +2635,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* Always set WM1S_LP_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
@@ -2656,7 +2666,7 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -2775,7 +2785,6 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct drm_device *dev = &dev_priv->drm;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
@@ -2801,7 +2810,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = I915_READ(WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
@@ -2831,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
@@ -2879,6 +2888,21 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ IS_KABYLAKE(dev_priv))
+ return true;
+
+ return false;
+}
+
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
@@ -2940,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-static int
-intel_do_sagv_disable(struct drm_i915_private *dev_priv)
-{
- int ret;
- uint32_t temp = GEN9_SAGV_DISABLE;
-
- ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
- &temp);
- if (ret)
- return ret;
- else
- return temp & GEN9_SAGV_IS_DISABLED;
-}
-
int
intel_disable_sagv(struct drm_i915_private *dev_priv)
{
- int ret, result;
+ int ret;
if (!intel_has_sagv(dev_priv))
return 0;
@@ -2969,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */
- ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
+ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
mutex_unlock(&dev_priv->rps.hw_lock);
- if (ret == -ETIMEDOUT) {
- DRM_ERROR("Request to disable SAGV timed out\n");
- return -ETIMEDOUT;
- }
-
/*
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
- } else if (result < 0) {
- DRM_ERROR("Failed to disable the SAGV\n");
- return result;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+ return ret;
}
dev_priv->sagv_status = I915_SAGV_DISABLED;
@@ -2999,9 +3007,12 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
+ struct intel_crtc_state *cstate;
+ struct skl_plane_wm *wm;
enum pipe pipe;
- int level, plane;
+ int level, latency;
if (!intel_has_sagv(dev_priv))
return false;
@@ -3019,27 +3030,37 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/* Since we're now guaranteed to only have one active CRTC... */
pipe = ffs(intel_state->active_crtcs) - 1;
- crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ cstate = to_intel_crtc_state(crtc->base.state);
- if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
+
/* Skip this plane if it's not enabled */
- if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+ if (!wm->wm[0].plane_en)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(dev);
- intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+ for (level = ilk_wm_max_level(dev_priv);
+ !wm->wm[level].plane_en; --level)
{ }
+ latency = dev_priv->wm.skl_latency[level];
+
+ if (skl_needs_memory_bw_wa(intel_state) &&
+ plane->base.state->fb->modifier ==
+ I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
/*
* If any of the planes on this pipe don't enable wm levels
* that incur memory latencies higher then 30µs we can't enable
* the SAGV
*/
- if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+ if (latency < SKL_SAGV_BLOCK_TIME)
return false;
}
@@ -3058,7 +3079,6 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct drm_crtc *for_crtc = cstate->base.crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
- int pipe = to_intel_crtc(for_crtc)->pipe;
if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
@@ -3086,7 +3106,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
* we currently hold.
*/
if (!intel_state->active_pipe_changes) {
- *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+ /*
+ * alloc may be cleared by clear_intel_crtc_state,
+ * copy from old state to be sure
+ */
+ *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
return;
}
@@ -3129,7 +3153,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -3173,7 +3197,7 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
- if (intel_rotation_90_or_270(pstate->base.rotation))
+ if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3204,7 +3228,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
/* for planar format */
@@ -3231,49 +3255,39 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+ unsigned *plane_data_rate,
+ unsigned *plane_y_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_plane *plane;
+ struct drm_plane *plane;
const struct intel_plane *intel_plane;
- struct drm_plane_state *pstate;
+ const struct drm_plane_state *pstate;
unsigned int rate, total_data_rate = 0;
int id;
- int i;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- for_each_plane_in_state(state, plane, pstate, i) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
id = skl_wm_plane_id(to_intel_plane(plane));
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe != intel_crtc->pipe)
- continue;
-
/* packed/uv */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 0);
- intel_cstate->wm.skl.plane_data_rate[id] = rate;
+ plane_data_rate[id] = rate;
+
+ total_data_rate += rate;
/* y-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 1);
- intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
- }
+ plane_y_data_rate[id] = rate;
- /* Calculate CRTC's total data rate from cached values */
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- int id = skl_wm_plane_id(intel_plane);
-
- /* packed/uv */
- total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
- total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
+ total_data_rate += rate;
}
return total_data_rate;
@@ -3297,14 +3311,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return 0;
/* For Non Y-tile return 8-blocks */
- if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8;
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
@@ -3318,7 +3332,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
else
plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
case 1:
min_scanlines = 32;
@@ -3342,6 +3356,30 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
}
+static void
+skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
+ uint16_t *minimum, uint16_t *y_minimum)
+{
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int id = skl_wm_plane_id(intel_plane);
+
+ if (id == PLANE_CURSOR)
+ continue;
+
+ if (!pstate->visible)
+ continue;
+
+ minimum[id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ }
+
+ minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+}
+
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
@@ -3350,17 +3388,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct drm_plane *plane;
- struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
- struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
- uint16_t alloc_size, start, cursor_blocks;
- uint16_t *minimum = cstate->wm.skl.minimum_blocks;
- uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
+ struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+ uint16_t alloc_size, start;
+ uint16_t minimum[I915_MAX_PLANES] = {};
+ uint16_t y_minimum[I915_MAX_PLANES] = {};
unsigned int total_data_rate;
int num_active;
int id, i;
+ unsigned plane_data_rate[I915_MAX_PLANES] = {};
+ unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
/* Clear the partitioning for disabled planes. */
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
@@ -3370,7 +3407,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
if (!cstate->base.active) {
- ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
+ alloc->start = alloc->end = 0;
return 0;
}
@@ -3381,57 +3418,43 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- cursor_blocks = skl_cursor_allocation(num_active);
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+ skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
- alloc_size -= cursor_blocks;
-
- /* 1. Allocate the mininum required blocks for each active plane */
- for_each_plane_in_state(state, plane, pstate, i) {
- intel_plane = to_intel_plane(plane);
- id = skl_wm_plane_id(intel_plane);
-
- if (intel_plane->pipe != pipe)
- continue;
-
- if (!to_intel_plane_state(pstate)->base.visible) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
-
- minimum[id] = skl_ddb_min_alloc(pstate, 0);
- y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
- }
+ /*
+ * 1. Allocate the mininum required blocks for each active plane
+ * and allocate the cursor, it doesn't require extra allocation
+ * proportional to the data rate.
+ */
- for (i = 0; i < PLANE_CURSOR; i++) {
+ for (i = 0; i < I915_MAX_PLANES; i++) {
alloc_size -= minimum[i];
alloc_size -= y_minimum[i];
}
+ ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+
/*
* 2. Distribute the remaining space in proportion to the amount of
* data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
- total_data_rate = skl_get_total_relative_data_rate(cstate);
+ total_data_rate = skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ plane_y_data_rate);
if (total_data_rate == 0)
return 0;
start = alloc->start;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ for (id = 0; id < I915_MAX_PLANES; id++) {
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
- int id = skl_wm_plane_id(intel_plane);
- data_rate = cstate->wm.skl.plane_data_rate[id];
+ if (id == PLANE_CURSOR)
+ continue;
+
+ data_rate = plane_data_rate[id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
@@ -3453,7 +3476,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/*
* allocation for y_plane part of planar format:
*/
- y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+ y_data_rate = plane_y_data_rate[id];
y_plane_blocks = y_minimum[id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
@@ -3549,22 +3572,28 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
uint32_t y_tile_minimum, y_min_scanlines;
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(cstate->base.state);
+ bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
*enabled = false;
return 0;
}
+ if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3576,23 +3605,27 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
case 2:
y_min_scanlines = 8;
break;
- default:
- WARN(1, "Unsupported pixel depth for rotation");
case 4:
y_min_scanlines = 4;
break;
+ default:
+ MISSING_CASE(cpp);
+ return -EINVAL;
}
} else {
y_min_scanlines = 4;
}
+ if (apply_memory_bw_wa)
+ y_min_scanlines *= 2;
+
plane_bytes_per_line = width * cpp;
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
plane_blocks_per_line =
DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
plane_blocks_per_line /= y_min_scanlines;
- } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
+ } else if (fb->modifier == DRM_FORMAT_MOD_NONE) {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
+ 1;
} else {
@@ -3607,11 +3640,14 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
selected_result = max(method2, y_tile_minimum);
} else {
- if ((ddb_allocation / plane_blocks_per_line) >= 1)
+ if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
+ (plane_bytes_per_line / 512 < 1))
+ selected_result = method2;
+ else if ((ddb_allocation / plane_blocks_per_line) >= 1)
selected_result = min(method1, method2);
else
selected_result = method1;
@@ -3621,8 +3657,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
if (level >= 1 && level <= 7) {
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
res_blocks += y_tile_minimum;
res_lines += y_min_scanlines;
} else {
@@ -3661,67 +3697,52 @@ static int
skl_compute_wm_level(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct intel_crtc_state *cstate,
+ struct intel_plane *intel_plane,
int level,
struct skl_wm_level *result)
{
struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- struct intel_plane_state *intel_pstate;
+ struct drm_plane *plane = &intel_plane->base;
+ struct intel_plane_state *intel_pstate = NULL;
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
int ret;
+ int i = skl_wm_plane_id(intel_plane);
+
+ if (state)
+ intel_pstate =
+ intel_atomic_get_existing_plane_state(state,
+ intel_plane);
/*
- * We'll only calculate watermarks for planes that are actually
- * enabled, so make sure all other planes are set as disabled.
+ * Note: If we start supporting multiple pending atomic commits against
+ * the same planes/CRTC's in the future, plane->state will no longer be
+ * the correct pre-state to use for the calculations here and we'll
+ * need to change where we get the 'unchanged' plane data from.
+ *
+ * For now this is fine because we only allow one queued commit against
+ * a CRTC. Even if the plane isn't modified by this transaction and we
+ * don't have a plane lock, we still have the CRTC's lock, so we know
+ * that no other transactions are racing with us to update it.
*/
- memset(result, 0, sizeof(*result));
+ if (!intel_pstate)
+ intel_pstate = to_intel_plane_state(plane->state);
- for_each_intel_plane_mask(&dev_priv->drm,
- intel_plane,
- cstate->base.plane_mask) {
- int i = skl_wm_plane_id(intel_plane);
-
- plane = &intel_plane->base;
- intel_pstate = NULL;
- if (state)
- intel_pstate =
- intel_atomic_get_existing_plane_state(state,
- intel_plane);
-
- /*
- * Note: If we start supporting multiple pending atomic commits
- * against the same planes/CRTC's in the future, plane->state
- * will no longer be the correct pre-state to use for the
- * calculations here and we'll need to change where we get the
- * 'unchanged' plane data from.
- *
- * For now this is fine because we only allow one queued commit
- * against a CRTC. Even if the plane isn't modified by this
- * transaction and we don't have a plane lock, we still have
- * the CRTC's lock, so we know that no other transactions are
- * racing with us to update it.
- */
- if (!intel_pstate)
- intel_pstate = to_intel_plane_state(plane->state);
-
- WARN_ON(!intel_pstate->base.fb);
+ WARN_ON(!intel_pstate->base.fb);
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_pstate,
- ddb_blocks,
- level,
- &result->plane_res_b[i],
- &result->plane_res_l[i],
- &result->plane_en[i]);
- if (ret)
- return ret;
- }
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b,
+ &result->plane_res_l,
+ &result->plane_en);
+ if (ret)
+ return ret;
return 0;
}
@@ -3729,32 +3750,28 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
static uint32_t
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
+ uint32_t pixel_rate;
+
if (!cstate->base.active)
return 0;
- if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
+ pixel_rate = ilk_pipe_pixel_rate(cstate);
+
+ if (WARN_ON(pixel_rate == 0))
return 0;
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- ilk_pipe_pixel_rate(cstate));
+ pixel_rate);
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
struct skl_wm_level *trans_wm /* out */)
{
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
-
if (!cstate->base.active)
return;
/* Until we know more, just disable transition WMs */
- for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
- int i = skl_wm_plane_id(intel_plane);
-
- trans_wm->plane_en[i] = false;
- }
+ trans_wm->plane_en = false;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
@@ -3763,77 +3780,34 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
+ int level, max_level = ilk_wm_max_level(dev_priv);
int ret;
- for (level = 0; level <= max_level; level++) {
- ret = skl_compute_wm_level(dev_priv, ddb, cstate,
- level, &pipe_wm->wm[level]);
- if (ret)
- return ret;
- }
- pipe_wm->linetime = skl_compute_linetime_wm(cstate);
-
- skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
-
- return 0;
-}
-
-static void skl_compute_wm_results(struct drm_device *dev,
- struct skl_pipe_wm *p_wm,
- struct skl_wm_values *r,
- struct intel_crtc *intel_crtc)
-{
- int level, max_level = ilk_wm_max_level(dev);
- enum pipe pipe = intel_crtc->pipe;
- uint32_t temp;
- int i;
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[i] <<
- PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[i];
- if (p_wm->wm[level].plane_en[i])
- temp |= PLANE_WM_EN;
+ /*
+ * We'll only calculate watermarks for planes that are actually
+ * enabled, so make sure all other planes are set as disabled.
+ */
+ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- r->plane[pipe][i][level] = temp;
+ for_each_intel_plane_mask(&dev_priv->drm,
+ intel_plane,
+ cstate->base.plane_mask) {
+ wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
+
+ for (level = 0; level <= max_level; level++) {
+ ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+ intel_plane, level,
+ &wm->wm[level]);
+ if (ret)
+ return ret;
}
-
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
-
- if (p_wm->wm[level].plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane[pipe][PLANE_CURSOR][level] = temp;
-
- }
-
- /* transition WMs */
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[i];
- if (p_wm->trans_wm.plane_en[i])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][i] = temp;
+ skl_compute_transition_wm(cstate, &wm->trans_wm);
}
+ pipe_wm->linetime = skl_compute_linetime_wm(cstate);
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
- if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][PLANE_CURSOR] = temp;
-
- r->wm_linetime[pipe] = p_wm->linetime;
+ return 0;
}
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
@@ -3846,53 +3820,77 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
I915_WRITE(reg, 0);
}
-void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
- int plane)
+static void skl_write_wm_level(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ uint32_t val = 0;
+
+ if (level->plane_en) {
+ val |= PLANE_WM_EN;
+ val |= level->plane_res_b;
+ val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
+ }
+
+ I915_WRITE(reg, val);
+}
+
+static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb,
+ int plane)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(PLANE_WM(pipe, plane, level),
- wm->plane[pipe][plane][level]);
+ skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
+ &wm->wm[level]);
}
- I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
+ skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
+ &wm->trans_wm);
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
- &wm->ddb.plane[pipe][plane]);
+ &ddb->plane[pipe][plane]);
skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
- &wm->ddb.y_plane[pipe][plane]);
+ &ddb->y_plane[pipe][plane]);
}
-void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm)
+static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(CUR_WM(pipe, level),
- wm->plane[pipe][PLANE_CURSOR][level]);
+ skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
+ &wm->wm[level]);
}
- I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+ skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &wm->ddb.plane[pipe][PLANE_CURSOR]);
+ &ddb->plane[pipe][PLANE_CURSOR]);
}
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
{
- return new->pipe[pipe].start == old->pipe[pipe].start &&
- new->pipe[pipe].end == old->pipe[pipe].end;
+ if (l1->plane_en != l2->plane_en)
+ return false;
+
+ /* If both planes aren't enabled, the rest shouldn't matter */
+ if (!l1->plane_en)
+ return true;
+
+ return (l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -3901,35 +3899,26 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore)
{
- struct drm_device *dev = state->dev;
- struct intel_crtc *intel_crtc;
- enum pipe otherp;
-
- for_each_intel_crtc(dev, intel_crtc) {
- otherp = intel_crtc->pipe;
-
- if (otherp == pipe)
- continue;
+ int i;
- if (skl_ddb_entries_overlap(&new->pipe[pipe],
- &old->pipe[otherp]))
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (i != ignore && entries[i] &&
+ skl_ddb_entries_overlap(ddb, entries[i]))
return true;
- }
return false;
}
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
- struct skl_ddb_allocation *ddb, /* out */
+ const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
+ struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
@@ -3937,7 +3926,7 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
if (ret)
return ret;
- if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
+ if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
*changed = false;
else
*changed = true;
@@ -3958,7 +3947,7 @@ pipes_modified(struct drm_atomic_state *state)
return ret;
}
-int
+static int
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
@@ -3976,7 +3965,7 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
- drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+ drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
id = skl_wm_plane_id(to_intel_plane(plane));
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
@@ -4076,19 +4065,50 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
struct skl_wm_values *src,
enum pipe pipe)
{
- dst->wm_linetime[pipe] = src->wm_linetime[pipe];
- memcpy(dst->plane[pipe], src->plane[pipe],
- sizeof(dst->plane[pipe]));
- memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
- sizeof(dst->plane_trans[pipe]));
-
- dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
sizeof(dst->ddb.y_plane[pipe]));
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
sizeof(dst->ddb.plane[pipe]));
}
+static void
+skl_print_wm_changes(const struct drm_atomic_state *state)
+{
+ const struct drm_device *dev = state->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
+ const struct drm_crtc *crtc;
+ const struct drm_crtc_state *cstate;
+ const struct intel_plane *intel_plane;
+ const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
+ const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ int id;
+ int i;
+
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ const struct skl_ddb_entry *old, *new;
+
+ id = skl_wm_plane_id(intel_plane);
+ old = &old_ddb->plane[pipe][id];
+ new = &new_ddb->plane[pipe][id];
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ intel_plane->base.base.id,
+ intel_plane->base.name,
+ old->start, old->end,
+ new->start, new->end);
+ }
+ }
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -4131,13 +4151,14 @@ skl_compute_wm(struct drm_atomic_state *state)
* no suitable watermark values can be found.
*/
for_each_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(cstate);
+ const struct skl_pipe_wm *old_pipe_wm =
+ &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
- &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
+ &results->ddb, &changed);
if (ret)
return ret;
@@ -4149,44 +4170,51 @@ skl_compute_wm(struct drm_atomic_state *state)
continue;
intel_cstate->update_wm_pre = true;
- skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
}
+ skl_print_wm_changes(state);
+
return 0;
}
-static void skl_update_wm(struct drm_crtc *crtc)
+static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *results = &dev_priv->wm.skl_results;
- struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- enum pipe pipe = intel_crtc->pipe;
+ const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ enum pipe pipe = crtc->pipe;
+ int plane;
- if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+ if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
- intel_crtc->wm.active.skl = *pipe_wm;
+ I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ for_each_universal_plane(dev_priv, pipe, plane)
+ skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane);
- /*
- * If this pipe isn't active already, we're going to be enabling it
- * very soon. Since it's safe to update a pipe's ddb allocation while
- * the pipe's shut off, just do so here. Already active pipes will have
- * their watermarks updated once we update their planes.
- */
- if (crtc->state->active_changed) {
- int plane;
+ skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb);
+}
+
+static void skl_initial_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct skl_wm_values *results = &state->wm_results;
+ struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
+ enum pipe pipe = intel_crtc->pipe;
+
+ if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
+ return;
- for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
- skl_write_plane_wm(intel_crtc, results, plane);
+ mutex_lock(&dev_priv->wm.wm_mutex);
- skl_write_cursor_wm(intel_crtc, results);
- }
+ if (cstate->base.active_changed)
+ skl_atomic_update_crtc_wm(state, cstate);
skl_copy_wm_for_pipe(hw_vals, results, pipe);
@@ -4226,7 +4254,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_INFO(dev)->gen >= 7 &&
+ if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
@@ -4244,7 +4272,8 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4255,7 +4284,8 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4268,114 +4298,75 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void skl_pipe_wm_active_state(uint32_t val,
- struct skl_pipe_wm *active,
- bool is_transwm,
- bool is_cursor,
- int i,
- int level)
+static inline void skl_wm_level_from_reg_val(uint32_t val,
+ struct skl_wm_level *level)
{
- bool is_enabled = (val & PLANE_WM_EN) != 0;
-
- if (!is_transwm) {
- if (!is_cursor) {
- active->wm[level].plane_en[i] = is_enabled;
- active->wm[level].plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
- active->wm[level].plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- } else {
- if (!is_cursor) {
- active->trans_wm.plane_en[i] = is_enabled;
- active->trans_wm.plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
- active->trans_wm.plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- }
+ level->plane_en = val & PLANE_WM_EN;
+ level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
+ level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
}
-static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
enum pipe pipe = intel_crtc->pipe;
- int level, i, max_level;
- uint32_t temp;
-
- max_level = ilk_wm_max_level(dev);
-
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane[pipe][i][level] =
- I915_READ(PLANE_WM(pipe, i, level));
- hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
- }
+ int level, id, max_level;
+ uint32_t val;
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
- hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
+ max_level = ilk_wm_max_level(dev_priv);
- if (!intel_crtc->active)
- return;
-
- hw->dirty_pipes |= drm_crtc_mask(crtc);
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ id = skl_wm_plane_id(intel_plane);
+ wm = &out->planes[id];
- active->linetime = hw->wm_linetime[pipe];
+ for (level = 0; level <= max_level; level++) {
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM(pipe, id, level));
+ else
+ val = I915_READ(CUR_WM(pipe, level));
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane[pipe][i][level];
- skl_pipe_wm_active_state(temp, active, false,
- false, i, level);
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- temp = hw->plane[pipe][PLANE_CURSOR][level];
- skl_pipe_wm_active_state(temp, active, false, true, i, level);
- }
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane_trans[pipe][i];
- skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM_TRANS(pipe, id));
+ else
+ val = I915_READ(CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
- temp = hw->plane_trans[pipe][PLANE_CURSOR];
- skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+ if (!intel_crtc->active)
+ return;
- intel_crtc->wm.active.skl = *active;
+ out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
void skl_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *cstate;
skl_ddb_get_hw_state(dev_priv, ddb);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- skl_pipe_wm_get_hw_state(crtc);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+
+ skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+
+ if (intel_crtc->active)
+ hw->dirty_pipes |= drm_crtc_mask(crtc);
+ }
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
@@ -4402,7 +4393,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -4424,7 +4415,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
active->linetime = hw->wm_linetime[pipe];
} else {
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/*
* For inactive pipes, all watermark levels
@@ -4536,11 +4527,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
plane->wm.fifo_size = 63;
break;
case DRM_PLANE_TYPE_PRIMARY:
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0);
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1);
break;
}
}
@@ -4605,15 +4596,15 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev))
+ else if (IS_IVYBRIDGE(dev_priv))
hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
@@ -4653,9 +4644,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_crtc *crtc)
+void intel_update_watermarks(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(crtc);
@@ -5357,6 +5348,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5378,7 +5370,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC(dev_priv))
@@ -5394,9 +5386,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
- /* WaRsUseTimeoutMode */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaRsUseTimeoutMode:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -5424,6 +5415,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5440,7 +5432,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev_priv))
@@ -5500,6 +5492,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 rc6vids, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -5533,7 +5526,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -5570,10 +5563,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
- if (ret)
- DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
-
reset_rps(dev_priv, gen6_set_rps);
rc6vids = 0;
@@ -5863,7 +5852,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
- i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
+ i915_gem_object_put(dev_priv->vlv_pctx);
dev_priv->vlv_pctx = NULL;
}
@@ -5982,6 +5971,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6008,7 +5998,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -6070,6 +6060,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6109,7 +6100,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -6792,7 +6783,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
if (READ_ONCE(dev_priv->rps.enabled))
goto out;
- rcs = &dev_priv->engine[RCS];
+ rcs = dev_priv->engine[RCS];
if (rcs->last_context)
goto out;
@@ -6845,10 +6836,8 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
}
}
-static void ibx_init_clock_gating(struct drm_device *dev)
+static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
@@ -6857,9 +6846,8 @@ static void ibx_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
-static void g4x_disable_trickle_feed(struct drm_device *dev)
+static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -6872,10 +6860,8 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
}
}
-static void ilk_init_lp_watermarks(struct drm_device *dev)
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6886,9 +6872,8 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
*/
}
-static void ironlake_init_clock_gating(struct drm_device *dev)
+static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
@@ -6920,7 +6905,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/*
* Based on the document from hardware guys the following bits
@@ -6929,7 +6914,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -6955,14 +6940,13 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:ilk */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- ibx_init_clock_gating(dev);
+ ibx_init_clock_gating(dev_priv);
}
-static void cpt_init_clock_gating(struct drm_device *dev)
+static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
uint32_t val;
@@ -6997,9 +6981,8 @@ static void cpt_init_clock_gating(struct drm_device *dev)
}
}
-static void gen6_check_mch_setup(struct drm_device *dev)
+static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
@@ -7008,9 +6991,8 @@ static void gen6_check_mch_setup(struct drm_device *dev)
tmp);
}
-static void gen6_init_clock_gating(struct drm_device *dev)
+static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -7037,7 +7019,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -7098,11 +7080,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- cpt_init_clock_gating(dev);
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -7123,15 +7105,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void lpt_init_clock_gating(struct drm_device *dev)
+static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
- if (HAS_PCH_LPT_LP(dev))
+ if (HAS_PCH_LPT_LP(dev_priv))
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -7142,11 +7122,9 @@ static void lpt_init_clock_gating(struct drm_device *dev)
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
-static void lpt_suspend_hw(struct drm_device *dev)
+static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -7177,11 +7155,9 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
-static void kabylake_init_clock_gating(struct drm_device *dev)
+static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
@@ -7198,11 +7174,9 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void skylake_init_clock_gating(struct drm_device *dev)
+static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:skl */
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
@@ -7213,12 +7187,11 @@ static void skylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void broadwell_init_clock_gating(struct drm_device *dev)
+static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7261,14 +7234,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void haswell_init_clock_gating(struct drm_device *dev)
+static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -7317,15 +7288,14 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t snpcr;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -7339,7 +7309,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@@ -7355,7 +7325,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
@@ -7382,7 +7352,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
gen7_setup_fixed_func_scheduler(dev_priv);
@@ -7412,16 +7382,14 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- if (!HAS_PCH_NOP(dev))
- cpt_init_clock_gating(dev);
+ if (!HAS_PCH_NOP(dev_priv))
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
-static void valleyview_init_clock_gating(struct drm_device *dev)
+static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -7500,10 +7468,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
-static void cherryview_init_clock_gating(struct drm_device *dev)
+static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7536,9 +7502,8 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
-static void g4x_init_clock_gating(struct drm_device *dev)
+static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7549,7 +7514,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
@@ -7560,13 +7525,11 @@ static void g4x_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:g4x */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
}
-static void crestline_init_clock_gating(struct drm_device *dev)
+static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
@@ -7579,10 +7542,8 @@ static void crestline_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void broadwater_init_clock_gating(struct drm_device *dev)
+static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
@@ -7596,16 +7557,15 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void gen3_init_clock_gating(struct drm_device *dev)
+static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
@@ -7621,10 +7581,8 @@ static void gen3_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i85x_init_clock_gating(struct drm_device *dev)
+static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
@@ -7635,10 +7593,8 @@ static void i85x_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i830_init_clock_gating(struct drm_device *dev)
+static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(MEM_MODE,
@@ -7646,20 +7602,18 @@ static void i830_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
-void intel_init_clock_gating(struct drm_device *dev)
+void intel_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->display.init_clock_gating(dev);
+ dev_priv->display.init_clock_gating(dev_priv);
}
-void intel_suspend_hw(struct drm_device *dev)
+void intel_suspend_hw(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_LPT(dev))
- lpt_suspend_hw(dev);
+ if (HAS_PCH_LPT(dev_priv))
+ lpt_suspend_hw(dev_priv);
}
-static void nop_init_clock_gating(struct drm_device *dev)
+static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
}
@@ -7714,29 +7668,28 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
}
/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_device *dev)
+void intel_init_pm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
intel_fbc_init(dev_priv);
/* For cxsr */
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
- i915_ironlake_get_mem_freq(dev);
+ if (IS_PINEVIEW(dev_priv))
+ i915_pineview_get_mem_freq(dev_priv);
+ else if (IS_GEN5(dev_priv))
+ i915_ironlake_get_mem_freq(dev_priv);
/* For FIFO watermark updates */
- if (INTEL_INFO(dev)->gen >= 9) {
- skl_setup_wm_latency(dev);
- dev_priv->display.update_wm = skl_update_wm;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_setup_wm_latency(dev_priv);
+ dev_priv->display.initial_watermarks = skl_initial_wm;
+ dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
- } else if (HAS_PCH_SPLIT(dev)) {
- ilk_setup_wm_latency(dev);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ ilk_setup_wm_latency(dev_priv);
- if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+ (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -7749,14 +7702,14 @@ void intel_init_pm(struct drm_device *dev)
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
- } else if (IS_CHERRYVIEW(dev)) {
- vlv_setup_wm_latency(dev);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_VALLEYVIEW(dev)) {
- vlv_setup_wm_latency(dev);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ } else if (IS_PINEVIEW(dev_priv)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
@@ -7770,15 +7723,15 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
dev_priv->display.update_wm = g4x_update_wm;
- } else if (IS_GEN4(dev)) {
+ } else if (IS_GEN4(dev_priv)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN2(dev)) {
- if (INTEL_INFO(dev)->num_pipes == 1) {
+ } else if (IS_GEN2(dev_priv)) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -7921,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
return 0;
}
+static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
+{
+ u32 val = request;
+
+ *status = sandybridge_pcode_read(dev_priv, mbox, &val);
+
+ return *status || ((val & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @dev_priv: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 10 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ u32 status;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
+ &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 10ms to
+ * account for interrupts that could reduce the number of these
+ * requests.
+ */
+ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ WARN_ON_ONCE(timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 10);
+ preempt_enable();
+
+out:
+ return ret ? ret : status;
+#undef COND
+}
+
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
/*
@@ -8026,5 +8054,4 @@ void intel_pm_setup(struct drm_device *dev)
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
- atomic_set(&dev_priv->pm.atomic_seq, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 108ba1e5d658..c6be70686b4a 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -268,7 +268,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
@@ -344,7 +344,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+ if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false;
}
@@ -354,20 +354,20 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
@@ -402,7 +402,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
/* On HSW+ after we enable PSR on source it will activate it
* as soon as it match configure idle_frame count. So
* we just actually enable it here on activation time.
@@ -427,7 +427,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- if (!HAS_PSR(dev)) {
+ if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
@@ -448,7 +448,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
hsw_psr_setup_vsc(intel_dp);
if (dev_priv->psr.psr2_support) {
@@ -472,7 +472,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
@@ -498,7 +498,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
* - On HSW/BDW we get a recoverable frozen screen until next
* exit-activate sequence.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
@@ -580,7 +580,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
}
/* Disable PSR on Source */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
@@ -825,19 +825,15 @@ void intel_psr_init(struct drm_device *dev)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
- /* Per platform default */
- if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- i915.enable_psr = 1;
- else
- i915.enable_psr = 0;
- }
+ /* Per platform default: all disabled. */
+ if (i915.enable_psr == -1)
+ i915.enable_psr = 0;
/* Set link_standby x link_off defaults */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
/* On VLV and CHV only standby mode is supported. */
dev_priv->psr.link_standby = true;
else
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ed9955dce156..aeb637dc1fdf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -405,22 +405,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u64 acthd;
-
- if (INTEL_GEN(dev_priv) >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
- RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_GEN(dev_priv) >= 4)
- acthd = I915_READ(RING_ACTHD(engine->mmio_base));
- else
- acthd = I915_READ(ACTHD);
-
- return acthd;
-}
-
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -585,9 +569,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
I915_WRITE_TAIL(engine, ring->tail);
(void)I915_READ_TAIL(engine);
- I915_WRITE_CTL(engine,
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_VALID);
+ I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
@@ -666,7 +648,7 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
if (ret != 0)
return ret;
- ret = i915_gem_render_state_init(req);
+ ret = i915_gem_render_state_emit(req);
if (ret)
return ret;
@@ -851,15 +833,13 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -869,10 +849,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
@@ -884,9 +862,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
- /* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableMaskBasedCammingInRCC:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
@@ -1003,47 +980,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
- }
-
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
- /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
- }
-
- /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
- * involving this register should also be added to WA batch as required.
- */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
- /* WaDisableLSQCROPERFforOCL:skl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_RO_PERF_DIS);
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
- }
-
- /* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
-
- /* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE |
- HDC_BARRIER_PERFORMANCE_DISABLE);
-
- /* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
@@ -1271,91 +1213,64 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static int gen8_rcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 8);
- if (ret)
- return ret;
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- intel_ring_emit(ring, lower_32_bits(gtt_offset));
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *out++ = lower_32_bits(gtt_offset);
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = 0;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen8_xcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 6);
- if (ret)
- return ret;
-
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring,
- (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- lower_32_bits(gtt_offset) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen6_signal(struct drm_i915_gem_request *req)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
- if (ret)
- return ret;
+ enum intel_engine_id id;
+ int num_rings = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
@@ -1363,101 +1278,80 @@ static int gen6_signal(struct drm_i915_gem_request *req)
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, mbox_reg);
- intel_ring_emit(ring, req->fence.seqno);
+ *out++ = MI_LOAD_REGISTER_IMM(1);
+ *out++ = i915_mmio_reg_offset(mbox_reg);
+ *out++ = req->global_seqno;
+ num_rings++;
}
}
+ if (num_rings & 1)
+ *out++ = MI_NOOP;
- /* If num_dwords was rounded, make sure the tail pointer is correct */
- if (num_rings % 2 == 0)
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- return 0;
+ return out;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
- I915_WRITE_TAIL(request->engine,
- intel_ring_offset(request->ring, request->tail));
+ i915_gem_request_submit(request);
+
+ I915_WRITE_TAIL(request->engine, request->tail);
}
-static int i9xx_emit_request(struct drm_i915_gem_request *req)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- struct intel_ring *ring = req->ring;
- int ret;
-
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
+ *out++ = MI_STORE_DWORD_INDEX;
+ *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *out++ = req->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
- req->tail = ring->tail;
-
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int i9xx_emit_breadcrumb_sz = 4;
+
/**
- * gen6_sema_emit_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- int ret;
-
- ret = req->engine->semaphore.signal(req);
- if (ret)
- return ret;
-
- return i9xx_emit_request(req);
+ return i9xx_emit_breadcrumb(req,
+ req->engine->semaphore.signal(req, out));
}
-static int gen8_render_emit_request(struct drm_i915_gem_request *req)
+static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ring *ring = req->ring;
- int ret;
- if (engine->semaphore.signal) {
- ret = engine->semaphore.signal(req);
- if (ret)
- return ret;
- }
+ if (engine->semaphore.signal)
+ out = engine->semaphore.signal(req, out);
- ret = intel_ring_begin(req, 8);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(engine);
+ *out++ = 0;
+ *out++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- req->tail = ring->tail;
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int gen8_render_emit_breadcrumb_sz = 8;
+
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@@ -1484,7 +1378,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(ring, signal->fence.seqno);
+ intel_ring_emit(ring, signal->global_seqno);
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_advance(ring);
@@ -1522,7 +1416,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- intel_ring_emit(ring, signal->fence.seqno - 1);
+ intel_ring_emit(ring, signal->global_seqno - 1);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1665,7 +1559,7 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1674,7 +1568,7 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~0);
- gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1819,14 +1713,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
+ obj = vma->obj;
+
i915_vma_unpin(vma);
- i915_gem_object_unpin_map(vma->obj);
- i915_vma_put(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_unless_active(obj);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -1834,9 +1733,10 @@ static int init_status_page(struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags;
+ void *vaddr;
int ret;
- obj = i915_gem_object_create(&engine->i915->drm, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@@ -1869,15 +1769,22 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_unpin;
+ }
+
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr =
- i915_gem_object_pin_map(obj, I915_MAP_WB);
+ engine->status_page.page_addr = memset(vaddr, 0, 4096);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
return 0;
+err_unpin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ret;
@@ -1989,6 +1896,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
@@ -2023,7 +1931,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
void
intel_ring_free(struct intel_ring *ring)
{
- i915_vma_put(ring->vma);
+ struct drm_i915_gem_object *obj = ring->vma->obj;
+
+ i915_vma_close(ring->vma);
+ __i915_gem_object_release_unless_active(obj);
+
kfree(ring);
}
@@ -2039,14 +1951,13 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
- ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
- if (ret)
- goto error;
+ struct i915_vma *vma;
- ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
- PIN_GLOBAL | PIN_HIGH);
- if (ret)
+ vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto error;
+ }
}
/* The kernel context is only used as a placeholder for flushing the
@@ -2093,9 +2004,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
intel_engine_setup_common(engine);
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
-
ret = intel_engine_init_common(engine);
if (ret)
goto error;
@@ -2146,9 +2054,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
dev_priv = engine->i915;
if (engine->buffer) {
@@ -2175,13 +2080,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
intel_ring_context_unpin(dev_priv->kernel_context, engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->buffer->head = engine->buffer->tail;
engine->buffer->last_retired_head = -1;
}
@@ -2211,7 +2119,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
- int ret;
+ long timeout;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
intel_ring_update_space(ring);
if (ring->space >= bytes)
@@ -2241,11 +2151,11 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- ret = i915_wait_request(target,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NO_WAITBOOST);
- if (ret)
- return ret;
+ timeout = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
i915_gem_request_retire_upto(target);
@@ -2674,9 +2584,22 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
- engine->emit_request = i9xx_emit_request;
- if (i915.semaphores)
- engine->emit_request = gen6_sema_emit_request;
+ engine->emit_breadcrumb = i9xx_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
+ if (i915.semaphores) {
+ int num_rings;
+
+ engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
+
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ } else {
+ engine->emit_breadcrumb_sz += num_rings * 3;
+ if (num_rings & 1)
+ engine->emit_breadcrumb_sz++;
+ }
+ }
engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
@@ -2703,10 +2626,18 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->emit_request = gen8_render_emit_request;
+ engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
- if (i915.semaphores)
+ if (i915.semaphores) {
+ int num_rings;
+
engine->semaphore.signal = gen8_rcs_signal;
+
+ num_rings =
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ }
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ec0b4a0c605d..3466b4e77e7c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -4,6 +4,7 @@
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
#define I915_CMD_HASH_ORDER 9
@@ -73,13 +74,40 @@ enum intel_engine_hangcheck_action {
#define HANGCHECK_SCORE_RING_HUNG 31
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 3
+
+#define instdone_slice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
int score;
enum intel_engine_hangcheck_action action;
int deadlock;
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
};
struct intel_ring {
@@ -130,6 +158,7 @@ struct i915_ctx_workarounds {
};
struct drm_i915_gem_request;
+struct intel_render_state;
struct intel_engine_cs {
struct drm_i915_private *i915;
@@ -141,7 +170,6 @@ struct intel_engine_cs {
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
-#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
enum intel_engine_hw_id {
@@ -152,10 +180,12 @@ struct intel_engine_cs {
VCS2_HW
} hw_id;
enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
- u64 fence_context;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
+ struct intel_timeline *timeline;
+
+ struct intel_render_state *render_state;
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -177,7 +207,7 @@ struct intel_engine_cs {
struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted;
- spinlock_t lock; /* protects the lists of requests */
+ spinlock_t lock; /* protects the lists of requests; irqsafe */
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
struct intel_wait *first_wait; /* oldest waiter by retirement */
@@ -225,7 +255,9 @@ struct intel_engine_cs {
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
- int (*emit_request)(struct drm_i915_gem_request *req);
+ void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
+ u32 *out);
+ int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
* the legacy ringbuffer or to the end of an execlist).
@@ -235,6 +267,15 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct drm_i915_gem_request *req);
+ /* Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ *
+ * Called under the struct_mutex.
+ */
+ void (*schedule)(struct drm_i915_gem_request *request,
+ int priority);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -282,8 +323,6 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- u32 sync_seqno[I915_NUM_ENGINES-1];
-
union {
#define GEN6_SEMAPHORE_LAST VECS_HW
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
@@ -300,43 +339,22 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
- int (*signal)(struct drm_i915_gem_request *req);
+ u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
} semaphore;
/* Execlists */
struct tasklet_struct irq_tasklet;
- spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
} execlist_port[2];
- struct list_head execlist_queue;
+ struct rb_root execlist_queue;
+ struct rb_node *execlist_first;
unsigned int fw_domains;
bool disable_lite_restore_wa;
bool preempt_wa;
u32 ctx_desc_template;
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
- * Seqno of request most recently submitted to request_list.
- * Used exclusively by hang checker to avoid grabbing lock while
- * inspecting request list.
- */
- u32 last_submitted_seqno;
- u32 last_pending_seqno;
-
- /* An RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_gem_active_get_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_gem_active last_request;
-
struct i915_gem_context *last_context;
struct intel_engine_hangcheck hangcheck;
@@ -368,39 +386,12 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_engine_initialized(const struct intel_engine_cs *engine)
-{
- return engine->i915 != NULL;
-}
-
static inline unsigned
intel_engine_flag(const struct intel_engine_cs *engine)
{
return 1 << engine->id;
}
-static inline u32
-intel_engine_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
-{
- int idx;
-
- /*
- * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
- * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
- * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
- * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
- * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
- */
-
- idx = (other - engine) - 1;
- if (idx < 0)
- idx += I915_NUM_ENGINES;
-
- return idx;
-}
-
static inline void
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
@@ -483,30 +474,23 @@ static inline void intel_ring_advance(struct intel_ring *ring)
*/
}
-static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- return value & (ring->size - 1);
+ u32 offset = addr - ring->vaddr;
+ return offset & (ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-static inline int intel_engine_idle(struct intel_engine_cs *engine,
- unsigned int flags)
-{
- /* Wait upon the last request to be completed */
- return i915_gem_active_wait_unlocked(&engine->last_request,
- flags, NULL, NULL);
-}
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
@@ -514,13 +498,30 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
+static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
+{
+ /* We are only peeking at the tail of the submit queue (and not the
+ * queue itself) in order to gain a hint as to the current active
+ * state of the engine. Callers are not expected to be taking
+ * engine->timeline->lock, nor are they expected to be concerned
+ * wtih serialising this hint with anything, so document it as
+ * a hint and nothing more.
+ */
+ return READ_ONCE(engine->timeline->last_submitted_seqno);
+}
+
int init_workarounds_ring(struct intel_engine_cs *engine);
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone);
+
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
@@ -586,12 +587,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-unsigned int intel_kick_waiters(struct drm_i915_private *i915);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915);
-
-static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
-{
- return i915_gem_active_isset(&engine->last_request);
-}
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index a38c2fefe85a..87b4af092d54 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -288,7 +288,6 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -304,7 +303,7 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -331,7 +330,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
- if (power_well->data == SKL_DISP_PW_2) {
+ if (power_well->id == SKL_DISP_PW_2) {
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
@@ -344,7 +343,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (power_well->data == SKL_DISP_PW_2)
+ if (power_well->id == SKL_DISP_PW_2)
gen8_irq_power_well_pre_disable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -659,7 +658,7 @@ static void
gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
+ enum skl_disp_power_wells power_well_id = power_well->id;
u32 val;
u32 mask;
@@ -704,7 +703,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
fuse_status = I915_READ(SKL_FUSE_STATUS);
- switch (power_well->data) {
+ switch (power_well->id) {
case SKL_DISP_PW_1:
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
@@ -728,13 +727,13 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
case SKL_DISP_PW_MISC_IO:
break;
default:
- WARN(1, "Unknown power well %lu\n", power_well->data);
+ WARN(1, "Unknown power well %lu\n", power_well->id);
return;
}
- req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ req_mask = SKL_POWER_WELL_REQ(power_well->id);
enable_requested = tmp & req_mask;
- state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ state_mask = SKL_POWER_WELL_STATE(power_well->id);
is_enabled = tmp & state_mask;
if (!enable && enable_requested)
@@ -770,14 +769,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
power_well->name, enable ? "enable" : "disable");
if (check_fuse_status) {
- if (power_well->data == SKL_DISP_PW_1) {
+ if (power_well->id == SKL_DISP_PW_1) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
1))
DRM_ERROR("PG1 distributing status timeout\n");
- } else if (power_well->data == SKL_DISP_PW_2) {
+ } else if (power_well->id == SKL_DISP_PW_2) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG2_DIST_STATUS,
@@ -819,8 +818,8 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
- SKL_POWER_WELL_STATE(power_well->data);
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
+ SKL_POWER_WELL_STATE(power_well->id);
return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
}
@@ -846,45 +845,22 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
skl_set_power_well(dev_priv, power_well, false);
}
-static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
-{
- enum skl_disp_power_wells power_well_id = power_well->data;
-
- return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
-}
-
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
- struct i915_power_well *cmn_a_well = NULL;
-
- if (power_well_id == BXT_DPIO_CMN_BC) {
- /*
- * We need to copy the GRC calibration value from the eDP PHY,
- * so make sure it's powered up.
- */
- cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
- intel_power_well_get(dev_priv, cmn_a_well);
- }
-
- bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
-
- if (cmn_a_well)
- intel_power_well_put(dev_priv, cmn_a_well);
+ bxt_ddi_phy_init(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_uninit(dev_priv, power_well->data);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv,
- bxt_power_well_to_phy(power_well));
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -903,13 +879,11 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
}
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -933,7 +907,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
WARN_ON(dev_priv->cdclk_freq !=
- dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+ dev_priv->display.get_display_clock_speed(dev_priv));
gen9_assert_dbuf_enabled(dev_priv);
@@ -976,7 +950,7 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- enum punit_power_well power_well_id = power_well->data;
+ enum punit_power_well power_well_id = power_well->id;
u32 mask;
u32 state;
u32 ctrl;
@@ -1030,7 +1004,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int power_well_id = power_well->data;
+ int power_well_id = power_well->id;
bool enabled = false;
u32 mask;
u32 state;
@@ -1065,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1092,7 +1077,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(&dev_priv->drm, pipe) {
+ for_each_pipe(dev_priv, pipe) {
u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1123,7 +1108,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(&dev_priv->drm);
+ i915_redisable_vga_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@@ -1147,7 +1132,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_set_power_well(dev_priv, power_well, true);
@@ -1157,7 +1142,7 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_display_power_well_deinit(dev_priv);
@@ -1167,7 +1152,7 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1193,7 +1178,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum pipe pipe;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
for_each_pipe(dev_priv, pipe)
assert_pll_disabled(dev_priv, pipe);
@@ -1216,7 +1201,7 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
struct i915_power_well *power_well;
power_well = &power_domains->power_wells[i];
- if (power_well->data == power_well_id)
+ if (power_well->id == power_well_id)
return power_well;
}
@@ -1340,10 +1325,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
uint32_t tmp;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
pipe = PIPE_A;
phy = DPIO_PHY0;
} else {
@@ -1371,7 +1356,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@@ -1402,10 +1387,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
assert_pll_disabled(dev_priv, PIPE_A);
assert_pll_disabled(dev_priv, PIPE_B);
@@ -1554,7 +1539,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
bool enabled;
u32 state, ctrl;
@@ -1584,7 +1569,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool enable)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
u32 state;
u32 ctrl;
@@ -1617,7 +1602,7 @@ out:
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
@@ -1625,7 +1610,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
@@ -1635,7 +1620,7 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
vlv_display_power_well_deinit(dev_priv);
@@ -1979,12 +1964,12 @@ static struct i915_power_well vlv_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = PUNIT_POWER_WELL_ALWAYS_ON,
+ .id = PUNIT_POWER_WELL_ALWAYS_ON,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
+ .id = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
@@ -1994,7 +1979,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
@@ -2003,7 +1988,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
@@ -2012,7 +1997,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
@@ -2021,12 +2006,12 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
@@ -2046,19 +2031,19 @@ static struct i915_power_well chv_power_wells[] = {
* required for any pipe to work.
*/
.domains = CHV_DISPLAY_POWER_DOMAINS,
- .data = PIPE_A,
+ .id = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
{
.name = "dpio-common-bc",
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
};
@@ -2081,57 +2066,57 @@ static struct i915_power_well skl_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = SKL_DISP_PW_ALWAYS_ON,
+ .id = SKL_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_MISC_IO,
+ .id = SKL_DISP_PW_MISC_IO,
},
{
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "DDI A/E power well",
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_A_E,
+ .id = SKL_DISP_PW_DDI_A_E,
},
{
.name = "DDI B power well",
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_B,
+ .id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C power well",
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_C,
+ .id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D power well",
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_D,
+ .id = SKL_DISP_PW_DDI_D,
},
};
@@ -2146,31 +2131,33 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "power well 1",
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "dpio-common-a",
.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_A,
+ .id = BXT_DPIO_CMN_A,
+ .data = DPIO_PHY1,
},
{
.name = "dpio-common-bc",
.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_BC,
+ .id = BXT_DPIO_CMN_BC,
+ .data = DPIO_PHY0,
},
};
@@ -2592,20 +2579,19 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
- struct drm_device *dev = &dev_priv->drm;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
mutex_unlock(&power_domains->lock);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
@@ -2740,8 +2726,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
- atomic_inc(&dev_priv->pm.atomic_seq);
+ atomic_dec(&dev_priv->pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
@@ -2760,7 +2745,6 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
struct device *kdev = &pdev->dev;
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
@@ -2772,7 +2756,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(dev)) {
+ if (!HAS_RUNTIME_PM(dev_priv)) {
pm_runtime_dont_use_autosuspend(kdev);
pm_runtime_get_sync(kdev);
} else {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c551024d4871..27808e91cb5a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -251,7 +251,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
}
@@ -307,7 +307,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
-} sdvo_cmd_names[] = {
+} __attribute__ ((packed)) sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
@@ -1133,7 +1133,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
- if (HAS_PCH_SPLIT(encoder->base.dev))
+ if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
/* We need to construct preferred input timings based on our
@@ -1195,8 +1195,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_display_mode *mode = &crtc_state->base.mode;
@@ -1269,13 +1268,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
return;
/* Set the SDVO control regs. */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
@@ -1286,7 +1285,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1294,9 +1293,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -1304,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
- INTEL_INFO(dev)->gen < 5)
+ INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1339,7 +1339,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -1389,7 +1389,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1508,7 +1508,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
@@ -1595,15 +1595,15 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
uint16_t hotplug;
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@@ -2410,10 +2410,10 @@ static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
- struct drm_device *dev = connector->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+ if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
@@ -2981,6 +2981,7 @@ bool intel_sdvo_init(struct drm_device *dev,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->port = port;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
@@ -2996,7 +2997,7 @@ bool intel_sdvo_init(struct drm_device *dev,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dbed12c484c9..8f131a08d440 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,9 +203,6 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl;
@@ -227,13 +224,10 @@ skl_update_plane(struct drm_plane *drm_plane,
PLANE_CTL_PIPE_CSC_ENABLE;
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= skl_plane_ctl_rotation(rotation);
- if (wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_plane_wm(intel_crtc, wm, plane);
-
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -292,14 +286,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!dplane->state->visible)
- skl_write_plane_wm(to_intel_crtc(crtc),
- &dev_priv->wm.skl_results, plane);
-
I915_WRITE(PLANE_CTL(pipe, plane), 0);
I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -420,9 +406,15 @@ vlv_update_plane(struct drm_plane *dplane,
*/
sprctl |= SP_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SP_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ sprctl |= SP_MIRROR;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -432,11 +424,11 @@ vlv_update_plane(struct drm_plane *dplane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SP_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += src_w;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -450,13 +442,13 @@ vlv_update_plane(struct drm_plane *dplane,
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
@@ -539,15 +531,18 @@ ivb_update_plane(struct drm_plane *plane,
*/
sprctl |= SPRITE_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SPRITE_ROTATE_180;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
/* Sizes are 0 based */
@@ -562,14 +557,11 @@ ivb_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SPRITE_ROTATE_180;
-
- /* HSW and BDW does this automagically in hardware */
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += src_w;
- y += src_h;
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += src_w;
+ y += src_h;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -590,9 +582,9 @@ ivb_update_plane(struct drm_plane *plane,
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
- else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(SPRLINOFF(pipe), linear_offset);
@@ -677,10 +669,13 @@ ilk_update_plane(struct drm_plane *plane,
*/
dvscntr |= DVS_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
- if (IS_GEN6(dev))
+ if (rotation & DRM_ROTATE_180)
+ dvscntr |= DVS_ROTATE_180;
+
+ if (IS_GEN6(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
/* Sizes are 0 based */
@@ -696,9 +691,7 @@ ilk_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dvscntr |= DVS_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
}
@@ -719,7 +712,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(DVSLINOFF(pipe), linear_offset);
@@ -753,7 +746,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -769,15 +762,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
bool can_scale;
int ret;
- src->x1 = state->base.src_x;
- src->y1 = state->base.src_y;
- src->x2 = state->base.src_x + state->base.src_w;
- src->y2 = state->base.src_y + state->base.src_h;
-
- dst->x1 = state->base.crtc_x;
- dst->y1 = state->base.crtc_y;
- dst->x2 = state->base.crtc_x + state->base.crtc_w;
- dst->y2 = state->base.crtc_y + state->base.crtc_h;
+ *src = drm_plane_state_src(&state->base);
+ *dst = drm_plane_state_dest(&state->base);
if (!fb) {
state->base.visible = false;
@@ -797,7 +783,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
}
/* setup can_scale, min_scale, max_scale */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
@@ -913,7 +899,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
- if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+ if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
@@ -932,7 +918,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
- if (INTEL_GEN(dev) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_check_plane_surface(state);
if (ret)
return ret;
@@ -944,6 +930,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -955,7 +942,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -987,9 +974,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_backoff(&ctx);
}
- if (ret)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -1039,19 +1024,18 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-int
-intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane)
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
+ unsigned int supported_rotations;
int num_plane_formats;
int ret;
- if (INTEL_INFO(dev)->gen < 5)
- return -ENODEV;
-
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane) {
ret = -ENOMEM;
@@ -1065,26 +1049,26 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
}
intel_plane->base.state = &state->base;
- switch (INTEL_INFO(dev)->gen) {
- case 5:
- case 6:
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
- intel_plane->max_downscale = 16;
- intel_plane->update_plane = ilk_update_plane;
- intel_plane->disable_plane = ilk_disable_plane;
+ state->scaler_id = -1;
- if (IS_GEN6(dev)) {
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
- } else {
- plane_formats = ilk_plane_formats;
- num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
- }
- break;
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
+
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
- case 7:
- case 8:
- if (IS_IVYBRIDGE(dev)) {
+ intel_plane->update_plane = vlv_update_plane;
+ intel_plane->disable_plane = vlv_disable_plane;
+
+ plane_formats = vlv_plane_formats;
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
@@ -1092,33 +1076,38 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- intel_plane->update_plane = vlv_update_plane;
- intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
- plane_formats = vlv_plane_formats;
- num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
- } else {
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 16;
+
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
- break;
- case 9:
- intel_plane->can_scale = true;
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- state->scaler_id = -1;
+ }
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev)->gen);
- ret = -ENODEV;
- goto fail;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
+ } else {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
}
intel_plane->pipe = pipe;
@@ -1128,30 +1117,32 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, plane));
if (ret)
goto fail;
- intel_create_rotation_property(dev, intel_plane);
+ drm_plane_create_rotation_property(&intel_plane->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
- return 0;
+ return intel_plane;
fail:
kfree(state);
kfree(intel_plane);
- return ret;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d960e4866595..78cdfc6833d6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -86,7 +86,8 @@ struct intel_tv {
};
struct video_levels {
- int blank, black, burst;
+ u16 blank, black;
+ u8 burst;
};
struct color_conversion {
@@ -339,34 +340,43 @@ static const struct video_levels component_levels = {
struct tv_mode {
const char *name;
- int clock;
- int refresh; /* in millihertz (for precision) */
+
+ u32 clock;
+ u16 refresh; /* in millihertz (for precision) */
u32 oversample;
- int hsync_end, hblank_start, hblank_end, htotal;
- bool progressive, trilevel_sync, component_only;
- int vsync_start_f1, vsync_start_f2, vsync_len;
- bool veq_ena;
- int veq_start_f1, veq_start_f2, veq_len;
- int vi_end_f1, vi_end_f2, nbr_end;
- bool burst_ena;
- int hburst_start, hburst_len;
- int vburst_start_f1, vburst_end_f1;
- int vburst_start_f2, vburst_end_f2;
- int vburst_start_f3, vburst_end_f3;
- int vburst_start_f4, vburst_end_f4;
+ u8 hsync_end;
+ u16 hblank_start, hblank_end, htotal;
+ bool progressive : 1, trilevel_sync : 1, component_only : 1;
+ u8 vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena : 1;
+ u8 veq_start_f1, veq_start_f2, veq_len;
+ u8 vi_end_f1, vi_end_f2;
+ u16 nbr_end;
+ bool burst_ena : 1;
+ u8 hburst_start, hburst_len;
+ u8 vburst_start_f1;
+ u16 vburst_end_f1;
+ u8 vburst_start_f2;
+ u16 vburst_end_f2;
+ u8 vburst_start_f3;
+ u16 vburst_end_f3;
+ u8 vburst_start_f4;
+ u16 vburst_end_f4;
/*
* subcarrier programming
*/
- int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u16 dda2_size, dda3_size;
+ u8 dda1_inc;
+ u16 dda2_inc, dda3_inc;
u32 sc_reset;
- bool pal_burst;
+ bool pal_burst : 1;
/*
* blank/black levels
*/
const struct video_levels *composite_levels, *svideo_levels;
const struct color_conversion *composite_color, *svideo_color;
const u32 *filter_table;
- int max_srcw;
+ u16 max_srcw;
};
@@ -846,7 +856,7 @@ intel_enable_tv(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
- intel_wait_for_vblank(encoder->base.dev,
+ intel_wait_for_vblank(dev_priv,
to_intel_crtc(encoder->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
@@ -1019,8 +1029,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1095,7 +1104,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (IS_I915GM(dev))
+ if (IS_I915GM(dev_priv))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
@@ -1106,7 +1115,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_color_conversion(dev_priv, color_conversion);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1220,7 +1229,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* The TV sense state should be cleared to zero on cantiga platform. Otherwise
* the TV is misdetected. This is hardware requirement.
*/
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
@@ -1228,7 +1237,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
@@ -1258,7 +1267,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1610,7 +1619,9 @@ intel_tv_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
+
intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->port = PORT_NONE;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->cloneable = 0;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ee2306a79747..d7be0d94ba4d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,19 +231,21 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
+ struct drm_i915_private *dev_priv = domain->i915;
unsigned long irqflags;
- assert_rpm_device_not_suspended(domain->i915);
+ assert_rpm_device_not_suspended(dev_priv);
- spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
- if (--domain->wake_count == 0)
- domain->i915->uncore.funcs.force_wake_put(domain->i915,
- 1 << domain->id);
+ if (--domain->wake_count == 0) {
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+ dev_priv->uncore.fw_domains_active &= ~domain->mask;
+ }
- spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return HRTIMER_NORESTART;
}
@@ -254,7 +256,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
- enum forcewake_domains fw = 0, active_domains;
+ enum forcewake_domains fw, active_domains;
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly. Wait until all pending
@@ -291,10 +293,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
WARN_ON(active_domains);
- for_each_fw_domain(domain, dev_priv)
- if (domain->wake_count)
- fw |= domain->mask;
-
+ fw = dev_priv->uncore.fw_domains_active;
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -403,6 +402,8 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
@@ -420,6 +421,10 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
+ /* Enable Decoupled MMIO only on BXT C stepping onwards */
+ if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ info->has_decoupled_mmio = false;
+
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
@@ -443,9 +448,6 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_get)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -453,8 +455,10 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= ~domain->mask;
}
- if (fw_domains)
+ if (fw_domains) {
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+ }
}
/**
@@ -509,9 +513,6 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_put)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -567,13 +568,10 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
- struct intel_uncore_forcewake_domain *domain;
-
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- for_each_fw_domain(domain, dev_priv)
- WARN_ON(domain->wake_count);
+ WARN_ON(dev_priv->uncore.fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -589,49 +587,148 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
__fwd; \
})
-#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
+static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
+{
+ if (offset < entry->start)
+ return -1;
+ else if (offset > entry->end)
+ return 1;
+ else
+ return 0;
+}
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5000, 0x8000) || \
- REG_RANGE((reg), 0xB000, 0x12000) || \
- REG_RANGE((reg), 0x2E000, 0x30000))
+/* Copied and "macroized" from lib/bsearch.c */
+#define BSEARCH(key, base, num, cmp) ({ \
+ unsigned int start__ = 0, end__ = (num); \
+ typeof(base) result__ = NULL; \
+ while (start__ < end__) { \
+ unsigned int mid__ = start__ + (end__ - start__) / 2; \
+ int ret__ = (cmp)((key), (base) + mid__); \
+ if (ret__ < 0) { \
+ end__ = mid__; \
+ } else if (ret__ > 0) { \
+ start__ = mid__ + 1; \
+ } else { \
+ result__ = (base) + mid__; \
+ break; \
+ } \
+ } \
+ result__; \
+})
+
+static enum forcewake_domains
+find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+{
+ const struct intel_forcewake_range *entry;
+
+ entry = BSEARCH(offset,
+ dev_priv->uncore.fw_domains_table,
+ dev_priv->uncore.fw_domains_table_entries,
+ fw_range_cmp);
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x22000, 0x24000) || \
- REG_RANGE((reg), 0x30000, 0x40000))
+ return entry ? entry->domains : 0;
+}
-#define __vlv_reg_read_fw_domains(offset) \
+static void
+intel_fw_table_check(struct drm_i915_private *dev_priv)
+{
+ const struct intel_forcewake_range *ranges;
+ unsigned int num_ranges;
+ s32 prev;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ ranges = dev_priv->uncore.fw_domains_table;
+ if (!ranges)
+ return;
+
+ num_ranges = dev_priv->uncore.fw_domains_table_entries;
+
+ for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ WARN_ON_ONCE(IS_GEN9(dev_priv) &&
+ (prev + 1) != (s32)ranges->start);
+ WARN_ON_ONCE(prev >= (s32)ranges->start);
+ prev = ranges->start;
+ WARN_ON_ONCE(prev >= (s32)ranges->end);
+ prev = ranges->end;
+ }
+}
+
+#define GEN_FW_RANGE(s, e, d) \
+ { .start = (s), .end = (e), .domains = (d) }
+
+#define HAS_FWTABLE(dev_priv) \
+ (IS_GEN9(dev_priv) || \
+ IS_CHERRYVIEW(dev_priv) || \
+ IS_VALLEYVIEW(dev_priv))
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __vlv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
+#define __fwtable_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
+ if (NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
+/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
+ RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
/* TODO: Other registers are not yet used */
};
+static void intel_shadow_table_check(void)
+{
+ const i915_reg_t *reg = gen8_shadowed_regs;
+ s32 prev;
+ u32 offset;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
+ offset = i915_mmio_reg_offset(*reg);
+ WARN_ON_ONCE(prev >= (s32)offset);
+ prev = offset;
+ }
+}
+
+static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
+{
+ u32 offset = i915_mmio_reg_offset(*reg);
+
+ if (key < offset)
+ return -1;
+ else if (key > offset)
+ return 1;
+ else
+ return 0;
+}
+
static bool is_gen8_shadowed(u32 offset)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
- if (offset == gen8_shadowed_regs[i].reg)
- return true;
+ const i915_reg_t *regs = gen8_shadowed_regs;
- return false;
+ return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
+ mmio_reg_cmp);
}
#define __gen8_reg_write_fw_domains(offset) \
@@ -644,143 +741,70 @@ static bool is_gen8_shadowed(u32 offset)
__fwd; \
})
-#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE800))
-
-#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8800, 0x8900) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1C000) || \
- REG_RANGE((reg), 0x1E800, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x38000))
-
-#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x4000, 0x5000) || \
- REG_RANGE((reg), 0x8000, 0x8300) || \
- REG_RANGE((reg), 0x8500, 0x8600) || \
- REG_RANGE((reg), 0x9000, 0xB000) || \
- REG_RANGE((reg), 0xF000, 0x10000))
-
-#define __chv_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- __fwd; \
-})
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __chv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
+};
-#define __chv_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
-#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0xB00, 0x2000)
-
-#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x2700) || \
- REG_RANGE((reg), 0x3000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8140, 0x8160) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0x8C00, 0x8D00) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE900) || \
- REG_RANGE((reg), 0x24400, 0x24800))
-
-#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8130, 0x8140) || \
- REG_RANGE((reg), 0x8800, 0x8A00) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x40000))
-
-#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0x9400, 0x9800)
-
-#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
- ((reg) < 0x40000 && \
- !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
-
-#define SKL_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
-#define __gen9_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
- __fwd; \
-})
-
-static const i915_reg_t gen9_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- /* TODO: Other registers are not yet used */
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen9_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-static bool is_gen9_shadowed(u32 offset)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
- if (offset == gen9_shadowed_regs[i].reg)
- return true;
-
- return false;
-}
-
-#define __gen9_reg_write_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
- __fwd; \
-})
-
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -815,6 +839,66 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
+static const enum decoupled_power_domain fw2dpd_domain[] = {
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_BLITTER,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+/*
+ * Decoupled MMIO access for only 1 DWORD
+ */
+static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain,
+ enum decoupled_ops operation)
+{
+ enum decoupled_power_domain dp_domain;
+ u32 ctrl_reg_data = 0;
+
+ dp_domain = fw2dpd_domain[fw_domain - 1];
+
+ ctrl_reg_data |= reg;
+ ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
+ ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
+ ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ GEN9_DECOUPLED_REG0_DW1) &
+ GEN9_DECOUPLED_DW1_GO) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Decoupled MMIO wait timed out\n");
+}
+
+static inline u32
+__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain)
+{
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_READ);
+
+ return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
+}
+
+static inline void
+__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 data,
+ enum forcewake_domains fw_domain)
+{
+
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
+
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_WRITE);
+}
+
+
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv);
@@ -869,26 +953,30 @@ __gen2_read(64)
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
+static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+ fw_domain_arm_timer(domain);
+
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+}
+
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
if (WARN_ON(!fw_domains))
return;
- /* Ideally GCC would be constant-fold and eliminate this loop */
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
- if (domain->wake_count) {
- fw_domains &= ~domain->mask;
- continue;
- }
-
- fw_domain_arm_timer(domain);
- }
+ /* Turn on all requested but inactive supported forcewake domains. */
+ fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= ~dev_priv->uncore.fw_domains_active;
if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ ___force_wake_auto(dev_priv, fw_domains);
}
#define __gen6_read(x) \
@@ -903,62 +991,50 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_FOOTER; \
}
-#define __vlv_read(x) \
+#define __fwtable_read(x) \
static u##x \
-vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __vlv_reg_read_fw_domains(offset); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
-#define __chv_read(x) \
+#define __gen9_decoupled_read(x) \
static u##x \
-chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __chv_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- GEN6_READ_FOOTER; \
-}
-
-#define __gen9_read(x) \
-static u##x \
-gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_READ_HEADER(x); \
- fw_engine = __gen9_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
+ unsigned i; \
+ u32 *ptr_data = (u32 *) &val; \
+ for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
+ *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
+ offset, \
+ fw_engine); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
GEN6_READ_FOOTER; \
}
-__gen9_read(8)
-__gen9_read(16)
-__gen9_read(32)
-__gen9_read(64)
-__chv_read(8)
-__chv_read(16)
-__chv_read(32)
-__chv_read(64)
-__vlv_read(8)
-__vlv_read(16)
-__vlv_read(32)
-__vlv_read(64)
+__gen9_decoupled_read(32)
+__gen9_decoupled_read(64)
+__fwtable_read(8)
+__fwtable_read(16)
+__fwtable_read(32)
+__fwtable_read(64)
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
-#undef __gen9_read
-#undef __chv_read
-#undef __vlv_read
+#undef __fwtable_read
#undef __gen6_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1054,21 +1130,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __hsw_write(x) \
-static void \
-hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- u32 __fifo_ret = 0; \
- GEN6_WRITE_HEADER; \
- if (NEEDS_FORCE_WAKE(offset)) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
- GEN6_WRITE_FOOTER; \
-}
-
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
@@ -1081,51 +1142,49 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __chv_write(x) \
+#define __fwtable_write(x) \
static void \
-chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __chv_reg_write_fw_domains(offset); \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen9_write(x) \
+#define __gen9_decoupled_write(x) \
static void \
-gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
+gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __gen9_reg_write_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
+ __gen9_decoupled_mmio_write(dev_priv, \
+ offset, \
+ val, \
+ fw_engine); \
+ else \
+ __raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-__gen9_write(8)
-__gen9_write(16)
-__gen9_write(32)
-__chv_write(8)
-__chv_write(16)
-__chv_write(32)
+__gen9_decoupled_write(32)
+__fwtable_write(8)
+__fwtable_write(16)
+__fwtable_write(32)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
-__hsw_write(8)
-__hsw_write(16)
-__hsw_write(32)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
-#undef __gen9_write
-#undef __chv_write
+#undef __fwtable_write
#undef __gen8_write
-#undef __hsw_write
#undef __gen6_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -1314,6 +1373,13 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
+#define ASSIGN_FW_DOMAINS_TABLE(d) \
+{ \
+ dev_priv->uncore.fw_domains_table = \
+ (struct intel_forcewake_range *)(d); \
+ dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+}
+
void intel_uncore_init(struct drm_i915_private *dev_priv)
{
i915_check_vgpu(dev_priv);
@@ -1327,13 +1393,23 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
switch (INTEL_INFO(dev_priv)->gen) {
default:
case 9:
- ASSIGN_WRITE_MMIO_VFUNCS(gen9);
- ASSIGN_READ_MMIO_VFUNCS(gen9);
+ ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ if (HAS_DECOUPLED_MMIO(dev_priv)) {
+ dev_priv->uncore.funcs.mmio_readl =
+ gen9_decoupled_read32;
+ dev_priv->uncore.funcs.mmio_readq =
+ gen9_decoupled_read64;
+ dev_priv->uncore.funcs.mmio_writel =
+ gen9_decoupled_write32;
+ }
break;
case 8:
if (IS_CHERRYVIEW(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(chv);
- ASSIGN_READ_MMIO_VFUNCS(chv);
+ ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
@@ -1342,14 +1418,11 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
case 7:
case 6:
- if (IS_HASWELL(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(hsw);
- } else {
- ASSIGN_WRITE_MMIO_VFUNCS(gen6);
- }
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_READ_MMIO_VFUNCS(vlv);
+ ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
@@ -1366,6 +1439,10 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
}
+ intel_fw_table_check(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 8)
+ intel_shadow_table_check();
+
if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
@@ -1408,7 +1485,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
+ (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
break;
}
@@ -1815,35 +1892,16 @@ static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
- if (IS_VALLEYVIEW(dev_priv))
- fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5: /* forcewake was introduced with gen6 */
- case 4:
- case 3:
- case 2:
- return 0;
+ if (HAS_FWTABLE(dev_priv)) {
+ fw_domains = __fwtable_reg_read_fw_domains(offset);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ fw_domains = __gen6_reg_read_fw_domains(offset);
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1855,32 +1913,18 @@ static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
+ if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+ fw_domains = __fwtable_reg_write_fw_domains(offset);
+ } else if (IS_GEN8(dev_priv)) {
+ fw_domains = __gen8_reg_write_fw_domains(offset);
+ } else if (IS_GEN(dev_priv, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5:
- case 4:
- case 3:
- case 2:
- return 0;
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1910,6 +1954,9 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
WARN_ON(!op);
+ if (intel_vgpu_active(dev_priv))
+ return 0;
+
if (op & FW_REG_READ)
fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9672b579f950..33404295b447 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -18,7 +18,6 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -151,38 +150,11 @@ static int imx_drm_atomic_check(struct drm_device *dev,
return ret;
}
-static int imx_drm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- struct dma_buf *dma_buf;
- int i;
-
- /*
- * If the plane fb has an dma-buf attached, fish out the exclusive
- * fence for the atomic helper to wait on.
- */
- for_each_plane_in_state(state, plane, plane_state, i) {
- if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
- dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
- 0)->base.dma_buf;
- if (!dma_buf)
- continue;
- plane_state->fence =
- reservation_object_get_excl_rcu(dma_buf->resv);
- }
- }
-
- return drm_atomic_helper_commit(dev, state, nonblock);
-}
-
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
- .atomic_commit = imx_drm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3ce391c239b0..516d06490465 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -101,12 +101,6 @@ struct imx_ldb {
const struct bus_mux *lvds_mux;
};
-static enum drm_connector_status imx_ldb_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
u32 bus_format)
{
@@ -319,18 +313,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int mux, ret;
- /*
- * imx_ldb_encoder_disable is called by
- * drm_helper_disable_unused_functions without
- * the encoder being enabled before.
- */
- if (imx_ldb_ch == &ldb->channel[0] &&
- (ldb->ldb_ctrl & LDB_CH0_MODE_EN_MASK) == 0)
- return;
- else if (imx_ldb_ch == &ldb->channel[1] &&
- (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
- return;
-
drm_panel_disable(imx_ldb_ch->panel);
if (imx_ldb_ch == &ldb->channel[0])
@@ -409,7 +391,6 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_connector_funcs imx_ldb_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = imx_ldb_connector_detect,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 8fc088843e55..3b602ee33c44 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -227,12 +227,6 @@ static int tve_setup_vga(struct imx_tve *tve)
TVE_TVDAC_TEST_MODE_MASK, 1);
}
-static enum drm_connector_status imx_tve_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int imx_tve_connector_get_modes(struct drm_connector *connector)
{
struct imx_tve *tve = con_to_tve(connector);
@@ -352,7 +346,6 @@ static int imx_tve_atomic_check(struct drm_encoder *encoder,
static const struct drm_connector_funcs imx_tve_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = imx_tve_connector_detect,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index d5864ed4d772..e74a0ad52950 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -50,6 +50,12 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
DRM_FORMAT_RGB565,
};
@@ -64,13 +70,14 @@ drm_plane_state_to_eba(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[0] +
- fb->pitches[0] * (state->src_y >> 16) +
- (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
+ return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
+ drm_format_plane_cpp(fb->pixel_format, 0) * x;
}
static inline unsigned long
@@ -79,13 +86,17 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[1] +
- fb->pitches[1] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
+
+ return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ drm_format_plane_cpp(fb->pixel_format, 1) * x - eba;
}
static inline unsigned long
@@ -94,69 +105,17 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[2] +
- fb->pitches[2] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
-}
-
-static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane)
-{
- struct drm_plane *plane = &ipu_plane->base;
- struct drm_plane_state *state = plane->state;
- struct drm_crtc_state *crtc_state = state->crtc->state;
- struct drm_framebuffer *fb = state->fb;
- unsigned long eba, ubo, vbo;
- int active;
-
- eba = drm_plane_state_to_eba(state);
-
- switch (fb->pixel_format) {
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- break;
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
- /*
- * Multiplanar formats have to meet the following restrictions:
- * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
- * - EBA, UBO and VBO are a multiple of 8
- * - UBO and VBO are unsigned and not larger than 0xfffff8
- * - Only EBA may be changed while scanout is active
- * - The strides of U and V planes must be identical.
- */
- ubo = drm_plane_state_to_ubo(state);
- vbo = drm_plane_state_to_vbo(state);
-
- if (fb->pixel_format == DRM_FORMAT_YUV420)
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], ubo, vbo);
- else
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], vbo, ubo);
-
- dev_dbg(ipu_plane->base.dev->dev,
- "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
- state->src_x >> 16, state->src_y >> 16);
- break;
- default:
- dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
- eba, state->src_x >> 16, state->src_y >> 16);
-
- break;
- }
-
- if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
- active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
- ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
- } else {
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
- }
+ return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
+ drm_format_plane_cpp(fb->pixel_format, 2) * x - eba;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -339,6 +298,10 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
/*
* Multiplanar formats have to meet the following restrictions:
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -347,27 +310,34 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- ubo = drm_plane_state_to_ubo(state);
vbo = drm_plane_state_to_vbo(state);
- if ((ubo & 0x7) || (vbo & 0x7))
- return -EINVAL;
-
- if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
+ if (vbo & 0x7 || vbo > 0xfffff8)
return -EINVAL;
- if (old_fb &&
- (old_fb->pixel_format == DRM_FORMAT_YUV420 ||
- old_fb->pixel_format == DRM_FORMAT_YVU420)) {
- old_ubo = drm_plane_state_to_ubo(old_state);
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
old_vbo = drm_plane_state_to_vbo(old_state);
- if (ubo != old_ubo || vbo != old_vbo)
- return -EINVAL;
+ if (vbo != old_vbo)
+ crtc_state->mode_changed = true;
}
if (fb->pitches[1] != fb->pitches[2])
return -EINVAL;
+ /* fall-through */
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ if (ubo & 0x7 || ubo > 0xfffff8)
+ return -EINVAL;
+
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ if (ubo != old_ubo)
+ crtc_state->mode_changed = true;
+ }
+
if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
return -EINVAL;
@@ -399,15 +369,19 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
enum ipu_color_space ics;
+ int active;
- if (old_state->fb) {
- struct drm_crtc_state *crtc_state = state->crtc->state;
+ eba = drm_plane_state_to_eba(state);
- if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
- ipu_plane_atomic_set_base(ipu_plane);
- return;
- }
+ if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ return;
}
switch (ipu_plane->dp_flow) {
@@ -451,11 +425,50 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
- ipu_plane_atomic_set_base(ipu_plane);
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
+ if (fb->pixel_format == DRM_FORMAT_YVU420 ||
+ fb->pixel_format == DRM_FORMAT_YVU422 ||
+ fb->pixel_format == DRM_FORMAT_YVU444)
+ swap(ubo, vbo);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, ubo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu, x = %d, y = %d", eba, ubo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, state->src_x >> 16, state->src_y >> 16);
+ break;
+ }
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_plane_enable(ipu_plane);
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .prepare_fb = drm_fb_cma_prepare_fb,
.atomic_check = ipu_plane_atomic_check,
.atomic_disable = ipu_plane_atomic_disable,
.atomic_update = ipu_plane_atomic_update,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index d796ada2a47a..8582a83c0d9b 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -49,12 +49,6 @@ static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
return container_of(e, struct imx_parallel_display, encoder);
}
-static enum drm_connector_status imx_pd_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
@@ -143,7 +137,6 @@ static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_connector_funcs imx_pd_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = imx_pd_connector_detect,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf83f6507ec8..4b7fe7eaec01 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/of_address.h>
@@ -83,7 +84,7 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void mtk_atomic_work(struct work_struct *work)
@@ -110,6 +111,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
mtk_atomic_schedule(private, state);
else
@@ -247,16 +249,14 @@ static const struct file_operations mtk_drm_fops = {
.mmap = mtk_drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
@@ -415,7 +415,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DPI) {
dev_info(dev, "Adding component match for %s\n",
node->full_name);
- component_match_add(dev, &match, compare_of, node);
+ drm_of_component_match_add(dev, &match, compare_of,
+ node);
} else {
struct mtk_ddp_comp *comp;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index eaa5a2240c0c..2c42f90809d8 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -594,12 +594,6 @@ static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
mtk_output_dsi_enable(dsi);
}
-static enum drm_connector_status mtk_dsi_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
{
struct mtk_dsi *dsi = connector_to_dsi(connector);
@@ -616,7 +610,6 @@ static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = mtk_dsi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
new file mode 100644
index 000000000000..99719afcc77f
--- /dev/null
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -0,0 +1,9 @@
+config DRM_MESON
+ tristate "DRM Support for Amlogic Meson Display Controller"
+ depends on DRM && OF && (ARM || ARM64)
+ depends on ARCH_MESON || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select VIDEOMODE_HELPERS
+ select REGMAP_MMIO
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
new file mode 100644
index 000000000000..2591978b8aad
--- /dev/null
+++ b/drivers/gpu/drm/meson/Makefile
@@ -0,0 +1,4 @@
+meson-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
+meson-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+
+obj-$(CONFIG_DRM_MESON) += meson.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
new file mode 100644
index 000000000000..4109e36c297f
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "meson_drv.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/*
+ * CANVAS is a memory zone where physical memory frames information
+ * are stored for the VIU to scanout.
+ */
+
+/* DMC Registers */
+#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */
+#define CANVAS_WIDTH_LBIT 29
+#define CANVAS_WIDTH_LWID 3
+#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */
+#define CANVAS_WIDTH_HBIT 0
+#define CANVAS_HEIGHT_BIT 9
+#define CANVAS_BLKMODE_BIT 24
+#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
+#define CANVAS_LUT_WR_EN (0x2 << 8)
+#define CANVAS_LUT_RD_EN (0x1 << 8)
+
+void meson_canvas_setup(struct meson_drm *priv,
+ uint32_t canvas_index, uint32_t addr,
+ uint32_t stride, uint32_t height,
+ unsigned int wrap,
+ unsigned int blkmode)
+{
+ unsigned int val;
+
+ regmap_write(priv->dmc, DMC_CAV_LUT_DATAL,
+ (((addr + 7) >> 3)) |
+ (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT));
+
+ regmap_write(priv->dmc, DMC_CAV_LUT_DATAH,
+ ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) <<
+ CANVAS_WIDTH_HBIT) |
+ (height << CANVAS_HEIGHT_BIT) |
+ (wrap << 22) |
+ (blkmode << CANVAS_BLKMODE_BIT));
+
+ regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
+ CANVAS_LUT_WR_EN | canvas_index);
+
+ /* Force a read-back to make sure everything is flushed. */
+ regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val);
+}
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
new file mode 100644
index 000000000000..af1759da4b27
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_canvas.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Canvas LUT Memory */
+
+#ifndef __MESON_CANVAS_H
+#define __MESON_CANVAS_H
+
+#define MESON_CANVAS_ID_OSD1 0x4e
+
+/* Canvas configuration. */
+#define MESON_CANVAS_WRAP_NONE 0x00
+#define MESON_CANVAS_WRAP_X 0x01
+#define MESON_CANVAS_WRAP_Y 0x02
+
+#define MESON_CANVAS_BLKMODE_LINEAR 0x00
+#define MESON_CANVAS_BLKMODE_32x32 0x01
+#define MESON_CANVAS_BLKMODE_64x64 0x02
+
+void meson_canvas_setup(struct meson_drm *priv,
+ uint32_t canvas_index, uint32_t addr,
+ uint32_t stride, uint32_t height,
+ unsigned int wrap,
+ unsigned int blkmode);
+
+#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
new file mode 100644
index 000000000000..749770e5c65f
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "meson_crtc.h"
+#include "meson_plane.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_registers.h"
+
+/* CRTC definition */
+
+struct meson_crtc {
+ struct drm_crtc base;
+ struct drm_pending_vblank_event *event;
+ struct meson_drm *priv;
+};
+#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
+
+/* CRTC */
+
+static const struct drm_crtc_funcs meson_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+};
+
+static void meson_crtc_enable(struct drm_crtc *crtc)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct drm_plane *plane = meson_crtc->priv->primary_plane;
+ struct meson_drm *priv = meson_crtc->priv;
+
+ /* Enable VPP Postblend */
+ writel(plane->state->crtc_w,
+ priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+
+ priv->viu.osd1_enabled = true;
+}
+
+static void meson_crtc_disable(struct drm_crtc *crtc)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ priv->viu.osd1_enabled = false;
+
+ /* Disable VPP Postblend */
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ meson_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+}
+
+static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ if (priv->viu.osd1_enabled)
+ priv->viu.osd1_commit = true;
+}
+
+static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
+ .enable = meson_crtc_enable,
+ .disable = meson_crtc_disable,
+ .atomic_begin = meson_crtc_atomic_begin,
+ .atomic_flush = meson_crtc_atomic_flush,
+};
+
+void meson_crtc_irq(struct meson_drm *priv)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(priv->crtc);
+ unsigned long flags;
+
+ /* Update the OSD registers */
+ if (priv->viu.osd1_enabled && priv->viu.osd1_commit) {
+ writel_relaxed(priv->viu.osd1_ctrl_stat,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[0],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[1],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W1));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[2],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W2));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[3],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[4],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
+
+ /* If output is interlace, make use of the Scaler */
+ if (priv->viu.osd1_interlace) {
+ struct drm_plane *plane = priv->primary_plane;
+ struct drm_plane_state *state = plane->state;
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+
+ meson_vpp_setup_interlace_vscaler_osd1(priv, &dest);
+ } else
+ meson_vpp_disable_interlace_vscaler_osd1(priv);
+
+ /* Enable OSD1 */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
+ priv->io_base + _REG(VPP_MISC));
+
+ priv->viu.osd1_commit = false;
+ }
+
+ drm_crtc_handle_vblank(priv->crtc);
+
+ spin_lock_irqsave(&priv->drm->event_lock, flags);
+ if (meson_crtc->event) {
+ drm_crtc_send_vblank_event(priv->crtc, meson_crtc->event);
+ drm_crtc_vblank_put(priv->crtc);
+ meson_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+}
+
+int meson_crtc_create(struct meson_drm *priv)
+{
+ struct meson_crtc *meson_crtc;
+ struct drm_crtc *crtc;
+ int ret;
+
+ meson_crtc = devm_kzalloc(priv->drm->dev, sizeof(*meson_crtc),
+ GFP_KERNEL);
+ if (!meson_crtc)
+ return -ENOMEM;
+
+ meson_crtc->priv = priv;
+ crtc = &meson_crtc->base;
+ ret = drm_crtc_init_with_planes(priv->drm, crtc,
+ priv->primary_plane, NULL,
+ &meson_crtc_funcs, "meson_crtc");
+ if (ret) {
+ dev_err(priv->drm->dev, "Failed to init CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+
+ priv->crtc = crtc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_crtc.h b/drivers/gpu/drm/meson/meson_crtc.h
new file mode 100644
index 000000000000..b62b9e51764d
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_crtc.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#ifndef __MESON_CRTC_H
+#define __MESON_CRTC_H
+
+#include "meson_drv.h"
+
+int meson_crtc_create(struct meson_drm *priv);
+
+void meson_crtc_irq(struct meson_drm *priv);
+
+#endif /* __MESON_CRTC_H */
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
new file mode 100644
index 000000000000..ff1f6019b97b
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_fb_helper.h>
+
+#include "meson_drv.h"
+#include "meson_plane.h"
+#include "meson_crtc.h"
+#include "meson_venc_cvbs.h"
+
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_venc.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+#define DRIVER_NAME "meson"
+#define DRIVER_DESC "Amlogic Meson DRM driver"
+
+/*
+ * Video Processing Unit
+ *
+ * VPU Handles the Global Video Processing, it includes management of the
+ * clocks gates, blocks reset lines and power domains.
+ *
+ * What is missing :
+ * - Full reset of entire video processing HW blocks
+ * - Scaling and setup of the VPU clock
+ * - Bus clock gates
+ * - Powering up video processing HW blocks
+ * - Powering Up HDMI controller and PHY
+ */
+
+static void meson_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct meson_drm *priv = dev->dev_private;
+
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs meson_mode_config_funcs = {
+ .output_poll_changed = meson_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_fb_cma_create,
+};
+
+static int meson_enable_vblank(struct drm_device *dev, unsigned int crtc)
+{
+ struct meson_drm *priv = dev->dev_private;
+
+ meson_venc_enable_vsync(priv);
+
+ return 0;
+}
+
+static void meson_disable_vblank(struct drm_device *dev, unsigned int crtc)
+{
+ struct meson_drm *priv = dev->dev_private;
+
+ meson_venc_disable_vsync(priv);
+}
+
+static irqreturn_t meson_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct meson_drm *priv = dev->dev_private;
+
+ (void)readl_relaxed(priv->io_base + _REG(VENC_INTFLAG));
+
+ meson_crtc_irq(priv);
+
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver meson_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+ DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+
+ /* Vblank */
+ .enable_vblank = meson_enable_vblank,
+ .disable_vblank = meson_disable_vblank,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+
+ /* IRQ */
+ .irq_handler = meson_irq,
+
+ /* PRIME Ops */
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ /* GEM Ops */
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ /* Misc */
+ .fops = &fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = "20161109",
+ .major = 1,
+ .minor = 0,
+};
+
+static bool meson_vpu_has_available_connectors(struct device *dev)
+{
+ struct device_node *ep, *remote;
+
+ /* Parses each endpoint and check if remote exists */
+ for_each_endpoint_of_node(dev->of_node, ep) {
+ /* If the endpoint node exists, consider it enabled */
+ remote = of_graph_get_remote_port(ep);
+ if (remote)
+ return true;
+ }
+
+ return false;
+}
+
+static struct regmap_config meson_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1000,
+};
+
+static int meson_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_drm *priv;
+ struct drm_device *drm;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ /* Checks if an output connector is available */
+ if (!meson_vpu_has_available_connectors(dev)) {
+ dev_err(dev, "No output connector available\n");
+ return -ENODEV;
+ }
+
+ drm = drm_dev_alloc(&meson_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto free_drm;
+ }
+ drm->dev_private = priv;
+ priv->drm = drm;
+ priv->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->io_base = regs;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
+ /* Simply ioremap since it may be a shared register zone */
+ regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!regs)
+ return -EADDRNOTAVAIL;
+
+ priv->hhi = devm_regmap_init_mmio(dev, regs,
+ &meson_regmap_config);
+ if (IS_ERR(priv->hhi)) {
+ dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
+ return PTR_ERR(priv->hhi);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ /* Simply ioremap since it may be a shared register zone */
+ regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!regs)
+ return -EADDRNOTAVAIL;
+
+ priv->dmc = devm_regmap_init_mmio(dev, regs,
+ &meson_regmap_config);
+ if (IS_ERR(priv->dmc)) {
+ dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
+ return PTR_ERR(priv->dmc);
+ }
+
+ priv->vsync_irq = platform_get_irq(pdev, 0);
+
+ drm_vblank_init(drm, 1);
+ drm_mode_config_init(drm);
+
+ /* Encoder Initialization */
+
+ ret = meson_venc_cvbs_create(priv);
+ if (ret)
+ goto free_drm;
+
+ /* Hardware Initialization */
+
+ meson_venc_init(priv);
+ meson_vpp_init(priv);
+ meson_viu_init(priv);
+
+ ret = meson_plane_create(priv);
+ if (ret)
+ goto free_drm;
+
+ ret = meson_crtc_create(priv);
+ if (ret)
+ goto free_drm;
+
+ ret = drm_irq_install(drm, priv->vsync_irq);
+ if (ret)
+ goto free_drm;
+
+ drm_mode_config_reset(drm);
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.funcs = &meson_mode_config_funcs;
+
+ priv->fbdev = drm_fbdev_cma_init(drm, 32,
+ drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ ret = PTR_ERR(priv->fbdev);
+ goto free_drm;
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto free_drm;
+
+ return 0;
+
+free_drm:
+ drm_dev_unref(drm);
+
+ return ret;
+}
+
+static int meson_drv_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+ struct meson_drm *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_fbdev_cma_fini(priv->fbdev);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+ drm_dev_unref(drm);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "amlogic,meson-gxbb-vpu" },
+ { .compatible = "amlogic,meson-gxl-vpu" },
+ { .compatible = "amlogic,meson-gxm-vpu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver meson_drm_platform_driver = {
+ .probe = meson_drv_probe,
+ .remove = meson_drv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = dt_match,
+ },
+};
+
+module_platform_driver(meson_drm_platform_driver);
+
+MODULE_AUTHOR("Jasper St. Pierre <jstpierre@mecheye.net>");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
new file mode 100644
index 000000000000..6195327c51ca
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MESON_DRV_H
+#define __MESON_DRV_H
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <drm/drmP.h>
+
+struct meson_drm {
+ struct device *dev;
+ void __iomem *io_base;
+ struct regmap *hhi;
+ struct regmap *dmc;
+ int vsync_irq;
+
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ struct drm_fbdev_cma *fbdev;
+ struct drm_plane *primary_plane;
+
+ /* Components Data */
+ struct {
+ bool osd1_enabled;
+ bool osd1_interlace;
+ bool osd1_commit;
+ uint32_t osd1_ctrl_stat;
+ uint32_t osd1_blk0_cfg[5];
+ } viu;
+
+ struct {
+ unsigned int current_mode;
+ } venc;
+};
+
+static inline int meson_vpu_is_compatible(struct meson_drm *priv,
+ const char *compat)
+{
+ return of_device_is_compatible(priv->dev->of_node, compat);
+}
+
+#endif /* __MESON_DRV_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
new file mode 100644
index 000000000000..4942ca090b46
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_rect.h>
+
+#include "meson_plane.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+struct meson_plane {
+ struct drm_plane base;
+ struct meson_drm *priv;
+};
+#define to_meson_plane(x) container_of(x, struct meson_plane, base)
+
+static int meson_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip = { 0, };
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ clip.x2 = crtc_state->mode.hdisplay;
+ clip.y2 = crtc_state->mode.vdisplay;
+
+ return drm_plane_helper_check_state(state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+}
+
+/* Takes a fixed 16.16 number and converts it to integer. */
+static inline int64_t fixed16_to_int(int64_t value)
+{
+ return value >> 16;
+}
+
+static void meson_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_plane *meson_plane = to_meson_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct meson_drm *priv = meson_plane->priv;
+ struct drm_gem_cma_object *gem;
+ struct drm_rect src = {
+ .x1 = (state->src_x),
+ .y1 = (state->src_y),
+ .x2 = (state->src_x + state->src_w),
+ .y2 = (state->src_y + state->src_h),
+ };
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+ unsigned long flags;
+
+ /*
+ * Update Coordinates
+ * Update Formats
+ * Update Buffer
+ * Enable Plane
+ */
+ spin_lock_irqsave(&priv->drm->event_lock, flags);
+
+ /* Enable OSD and BLK0, set max global alpha */
+ priv->viu.osd1_ctrl_stat = OSD_ENABLE |
+ (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ OSD_BLK0_ENABLE;
+
+ /* Set up BLK0 to point to the right canvas */
+ priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) |
+ OSD_ENDIANNESS_LE);
+
+ /* On GXBB, Use the old non-HDR RGB2YUV converter */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ /* For XRGB, replace the pixel's alpha by 0xFF */
+ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ /* For ARGB, use the pixel's alpha */
+ writel_bits_relaxed(OSD_REPLACE_EN, 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
+ case DRM_FORMAT_RGB888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
+ OSD_COLOR_MATRIX_24_RGB;
+ break;
+ case DRM_FORMAT_RGB565:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
+ OSD_COLOR_MATRIX_16_RGB565;
+ break;
+ };
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ priv->viu.osd1_interlace = true;
+
+ dest.y1 /= 2;
+ dest.y2 /= 2;
+ } else
+ priv->viu.osd1_interlace = false;
+
+ /*
+ * The format of these registers is (x2 << 16 | x1),
+ * where x2 is exclusive.
+ * e.g. +30x1920 would be (1919 << 16) | 30
+ */
+ priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) |
+ fixed16_to_int(src.x1);
+ priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) |
+ fixed16_to_int(src.y1);
+ priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
+ priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
+
+ /* Update Canvas with buffer address */
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ gem->paddr, fb->pitches[0],
+ fb->height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR);
+
+ spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+}
+
+static void meson_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_plane *meson_plane = to_meson_plane(plane);
+ struct meson_drm *priv = meson_plane->priv;
+
+ /* Disable OSD1 */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+}
+
+static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
+ .atomic_check = meson_plane_atomic_check,
+ .atomic_disable = meson_plane_atomic_disable,
+ .atomic_update = meson_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs meson_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t supported_drm_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+};
+
+int meson_plane_create(struct meson_drm *priv)
+{
+ struct meson_plane *meson_plane;
+ struct drm_plane *plane;
+
+ meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
+ GFP_KERNEL);
+ if (!meson_plane)
+ return -ENOMEM;
+
+ meson_plane->priv = priv;
+ plane = &meson_plane->base;
+
+ drm_universal_plane_init(priv->drm, plane, 0xFF,
+ &meson_plane_funcs,
+ supported_drm_formats,
+ ARRAY_SIZE(supported_drm_formats),
+ DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
+
+ drm_plane_helper_add(plane, &meson_plane_helper_funcs);
+
+ priv->primary_plane = plane;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_plane.h b/drivers/gpu/drm/meson/meson_plane.h
new file mode 100644
index 000000000000..e26b8b0aa1fa
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_plane.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#ifndef __MESON_PLANE_H
+#define __MESON_PLANE_H
+
+#include "meson_drv.h"
+
+int meson_plane_create(struct meson_drm *priv);
+
+#endif /* __MESON_PLANE_H */
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
new file mode 100644
index 000000000000..6adf9c13fafa
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -0,0 +1,1395 @@
+/*
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __MESON_REGISTERS_H
+#define __MESON_REGISTERS_H
+
+/* Shift all registers by 2 */
+#define _REG(reg) ((reg) << 2)
+
+#define writel_bits_relaxed(mask, val, addr) \
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+
+/* vpp2 */
+#define VPP2_DUMMY_DATA 0x1900
+#define VPP2_LINE_IN_LENGTH 0x1901
+#define VPP2_PIC_IN_HEIGHT 0x1902
+#define VPP2_SCALE_COEF_IDX 0x1903
+#define VPP2_SCALE_COEF 0x1904
+#define VPP2_VSC_REGION12_STARTP 0x1905
+#define VPP2_VSC_REGION34_STARTP 0x1906
+#define VPP2_VSC_REGION4_ENDP 0x1907
+#define VPP2_VSC_START_PHASE_STEP 0x1908
+#define VPP2_VSC_REGION0_PHASE_SLOPE 0x1909
+#define VPP2_VSC_REGION1_PHASE_SLOPE 0x190a
+#define VPP2_VSC_REGION3_PHASE_SLOPE 0x190b
+#define VPP2_VSC_REGION4_PHASE_SLOPE 0x190c
+#define VPP2_VSC_PHASE_CTRL 0x190d
+#define VPP2_VSC_INI_PHASE 0x190e
+#define VPP2_HSC_REGION12_STARTP 0x1910
+#define VPP2_HSC_REGION34_STARTP 0x1911
+#define VPP2_HSC_REGION4_ENDP 0x1912
+#define VPP2_HSC_START_PHASE_STEP 0x1913
+#define VPP2_HSC_REGION0_PHASE_SLOPE 0x1914
+#define VPP2_HSC_REGION1_PHASE_SLOPE 0x1915
+#define VPP2_HSC_REGION3_PHASE_SLOPE 0x1916
+#define VPP2_HSC_REGION4_PHASE_SLOPE 0x1917
+#define VPP2_HSC_PHASE_CTRL 0x1918
+#define VPP2_SC_MISC 0x1919
+#define VPP2_PREBLEND_VD1_H_START_END 0x191a
+#define VPP2_PREBLEND_VD1_V_START_END 0x191b
+#define VPP2_POSTBLEND_VD1_H_START_END 0x191c
+#define VPP2_POSTBLEND_VD1_V_START_END 0x191d
+#define VPP2_PREBLEND_H_SIZE 0x1920
+#define VPP2_POSTBLEND_H_SIZE 0x1921
+#define VPP2_HOLD_LINES 0x1922
+#define VPP2_BLEND_ONECOLOR_CTRL 0x1923
+#define VPP2_PREBLEND_CURRENT_XY 0x1924
+#define VPP2_POSTBLEND_CURRENT_XY 0x1925
+#define VPP2_MISC 0x1926
+#define VPP2_OFIFO_SIZE 0x1927
+#define VPP2_FIFO_STATUS 0x1928
+#define VPP2_SMOKE_CTRL 0x1929
+#define VPP2_SMOKE1_VAL 0x192a
+#define VPP2_SMOKE2_VAL 0x192b
+#define VPP2_SMOKE1_H_START_END 0x192d
+#define VPP2_SMOKE1_V_START_END 0x192e
+#define VPP2_SMOKE2_H_START_END 0x192f
+#define VPP2_SMOKE2_V_START_END 0x1930
+#define VPP2_SCO_FIFO_CTRL 0x1933
+#define VPP2_HSC_PHASE_CTRL1 0x1934
+#define VPP2_HSC_INI_PAT_CTRL 0x1935
+#define VPP2_VADJ_CTRL 0x1940
+#define VPP2_VADJ1_Y 0x1941
+#define VPP2_VADJ1_MA_MB 0x1942
+#define VPP2_VADJ1_MC_MD 0x1943
+#define VPP2_VADJ2_Y 0x1944
+#define VPP2_VADJ2_MA_MB 0x1945
+#define VPP2_VADJ2_MC_MD 0x1946
+#define VPP2_MATRIX_PROBE_COLOR 0x195c
+#define VPP2_MATRIX_HL_COLOR 0x195d
+#define VPP2_MATRIX_PROBE_POS 0x195e
+#define VPP2_MATRIX_CTRL 0x195f
+#define VPP2_MATRIX_COEF00_01 0x1960
+#define VPP2_MATRIX_COEF02_10 0x1961
+#define VPP2_MATRIX_COEF11_12 0x1962
+#define VPP2_MATRIX_COEF20_21 0x1963
+#define VPP2_MATRIX_COEF22 0x1964
+#define VPP2_MATRIX_OFFSET0_1 0x1965
+#define VPP2_MATRIX_OFFSET2 0x1966
+#define VPP2_MATRIX_PRE_OFFSET0_1 0x1967
+#define VPP2_MATRIX_PRE_OFFSET2 0x1968
+#define VPP2_DUMMY_DATA1 0x1969
+#define VPP2_GAINOFF_CTRL0 0x196a
+#define VPP2_GAINOFF_CTRL1 0x196b
+#define VPP2_GAINOFF_CTRL2 0x196c
+#define VPP2_GAINOFF_CTRL3 0x196d
+#define VPP2_GAINOFF_CTRL4 0x196e
+#define VPP2_CHROMA_ADDR_PORT 0x1970
+#define VPP2_CHROMA_DATA_PORT 0x1971
+#define VPP2_GCLK_CTRL0 0x1972
+#define VPP2_GCLK_CTRL1 0x1973
+#define VPP2_SC_GCLK_CTRL 0x1974
+#define VPP2_MISC1 0x1976
+#define VPP2_DNLP_CTRL_00 0x1981
+#define VPP2_DNLP_CTRL_01 0x1982
+#define VPP2_DNLP_CTRL_02 0x1983
+#define VPP2_DNLP_CTRL_03 0x1984
+#define VPP2_DNLP_CTRL_04 0x1985
+#define VPP2_DNLP_CTRL_05 0x1986
+#define VPP2_DNLP_CTRL_06 0x1987
+#define VPP2_DNLP_CTRL_07 0x1988
+#define VPP2_DNLP_CTRL_08 0x1989
+#define VPP2_DNLP_CTRL_09 0x198a
+#define VPP2_DNLP_CTRL_10 0x198b
+#define VPP2_DNLP_CTRL_11 0x198c
+#define VPP2_DNLP_CTRL_12 0x198d
+#define VPP2_DNLP_CTRL_13 0x198e
+#define VPP2_DNLP_CTRL_14 0x198f
+#define VPP2_DNLP_CTRL_15 0x1990
+#define VPP2_VE_ENABLE_CTRL 0x19a1
+#define VPP2_VE_DEMO_LEFT_TOP_SCREEN_WIDTH 0x19a2
+#define VPP2_VE_DEMO_CENTER_BAR 0x19a3
+#define VPP2_VE_H_V_SIZE 0x19a4
+#define VPP2_VDO_MEAS_CTRL 0x19a8
+#define VPP2_VDO_MEAS_VS_COUNT_HI 0x19a9
+#define VPP2_VDO_MEAS_VS_COUNT_LO 0x19aa
+#define VPP2_OSD_VSC_PHASE_STEP 0x19c0
+#define VPP2_OSD_VSC_INI_PHASE 0x19c1
+#define VPP2_OSD_VSC_CTRL0 0x19c2
+#define VPP2_OSD_HSC_PHASE_STEP 0x19c3
+#define VPP2_OSD_HSC_INI_PHASE 0x19c4
+#define VPP2_OSD_HSC_CTRL0 0x19c5
+#define VPP2_OSD_HSC_INI_PAT_CTRL 0x19c6
+#define VPP2_OSD_SC_DUMMY_DATA 0x19c7
+#define VPP2_OSD_SC_CTRL0 0x19c8
+#define VPP2_OSD_SCI_WH_M1 0x19c9
+#define VPP2_OSD_SCO_H_START_END 0x19ca
+#define VPP2_OSD_SCO_V_START_END 0x19cb
+#define VPP2_OSD_SCALE_COEF_IDX 0x19cc
+#define VPP2_OSD_SCALE_COEF 0x19cd
+#define VPP2_INT_LINE_NUM 0x19ce
+
+/* viu */
+#define VIU_ADDR_START 0x1a00
+#define VIU_ADDR_END 0x1aff
+#define VIU_SW_RESET 0x1a01
+#define VIU_MISC_CTRL0 0x1a06
+#define VIU_MISC_CTRL1 0x1a07
+#define D2D3_INTF_LENGTH 0x1a08
+#define D2D3_INTF_CTRL0 0x1a09
+#define VIU_OSD1_CTRL_STAT 0x1a10
+#define VIU_OSD1_CTRL_STAT2 0x1a2d
+#define VIU_OSD1_COLOR_ADDR 0x1a11
+#define VIU_OSD1_COLOR 0x1a12
+#define VIU_OSD1_TCOLOR_AG0 0x1a17
+#define VIU_OSD1_TCOLOR_AG1 0x1a18
+#define VIU_OSD1_TCOLOR_AG2 0x1a19
+#define VIU_OSD1_TCOLOR_AG3 0x1a1a
+#define VIU_OSD1_BLK0_CFG_W0 0x1a1b
+#define VIU_OSD1_BLK1_CFG_W0 0x1a1f
+#define VIU_OSD1_BLK2_CFG_W0 0x1a23
+#define VIU_OSD1_BLK3_CFG_W0 0x1a27
+#define VIU_OSD1_BLK0_CFG_W1 0x1a1c
+#define VIU_OSD1_BLK1_CFG_W1 0x1a20
+#define VIU_OSD1_BLK2_CFG_W1 0x1a24
+#define VIU_OSD1_BLK3_CFG_W1 0x1a28
+#define VIU_OSD1_BLK0_CFG_W2 0x1a1d
+#define VIU_OSD1_BLK1_CFG_W2 0x1a21
+#define VIU_OSD1_BLK2_CFG_W2 0x1a25
+#define VIU_OSD1_BLK3_CFG_W2 0x1a29
+#define VIU_OSD1_BLK0_CFG_W3 0x1a1e
+#define VIU_OSD1_BLK1_CFG_W3 0x1a22
+#define VIU_OSD1_BLK2_CFG_W3 0x1a26
+#define VIU_OSD1_BLK3_CFG_W3 0x1a2a
+#define VIU_OSD1_BLK0_CFG_W4 0x1a13
+#define VIU_OSD1_BLK1_CFG_W4 0x1a14
+#define VIU_OSD1_BLK2_CFG_W4 0x1a15
+#define VIU_OSD1_BLK3_CFG_W4 0x1a16
+#define VIU_OSD1_FIFO_CTRL_STAT 0x1a2b
+#define VIU_OSD1_TEST_RDDATA 0x1a2c
+#define VIU_OSD1_PROT_CTRL 0x1a2e
+#define VIU_OSD2_CTRL_STAT 0x1a30
+#define VIU_OSD2_CTRL_STAT2 0x1a4d
+#define VIU_OSD2_COLOR_ADDR 0x1a31
+#define VIU_OSD2_COLOR 0x1a32
+#define VIU_OSD2_HL1_H_START_END 0x1a33
+#define VIU_OSD2_HL1_V_START_END 0x1a34
+#define VIU_OSD2_HL2_H_START_END 0x1a35
+#define VIU_OSD2_HL2_V_START_END 0x1a36
+#define VIU_OSD2_TCOLOR_AG0 0x1a37
+#define VIU_OSD2_TCOLOR_AG1 0x1a38
+#define VIU_OSD2_TCOLOR_AG2 0x1a39
+#define VIU_OSD2_TCOLOR_AG3 0x1a3a
+#define VIU_OSD2_BLK0_CFG_W0 0x1a3b
+#define VIU_OSD2_BLK1_CFG_W0 0x1a3f
+#define VIU_OSD2_BLK2_CFG_W0 0x1a43
+#define VIU_OSD2_BLK3_CFG_W0 0x1a47
+#define VIU_OSD2_BLK0_CFG_W1 0x1a3c
+#define VIU_OSD2_BLK1_CFG_W1 0x1a40
+#define VIU_OSD2_BLK2_CFG_W1 0x1a44
+#define VIU_OSD2_BLK3_CFG_W1 0x1a48
+#define VIU_OSD2_BLK0_CFG_W2 0x1a3d
+#define VIU_OSD2_BLK1_CFG_W2 0x1a41
+#define VIU_OSD2_BLK2_CFG_W2 0x1a45
+#define VIU_OSD2_BLK3_CFG_W2 0x1a49
+#define VIU_OSD2_BLK0_CFG_W3 0x1a3e
+#define VIU_OSD2_BLK1_CFG_W3 0x1a42
+#define VIU_OSD2_BLK2_CFG_W3 0x1a46
+#define VIU_OSD2_BLK3_CFG_W3 0x1a4a
+#define VIU_OSD2_BLK0_CFG_W4 0x1a64
+#define VIU_OSD2_BLK1_CFG_W4 0x1a65
+#define VIU_OSD2_BLK2_CFG_W4 0x1a66
+#define VIU_OSD2_BLK3_CFG_W4 0x1a67
+#define VIU_OSD2_FIFO_CTRL_STAT 0x1a4b
+#define VIU_OSD2_TEST_RDDATA 0x1a4c
+#define VIU_OSD2_PROT_CTRL 0x1a4e
+
+#define VD1_IF0_GEN_REG 0x1a50
+#define VD1_IF0_CANVAS0 0x1a51
+#define VD1_IF0_CANVAS1 0x1a52
+#define VD1_IF0_LUMA_X0 0x1a53
+#define VD1_IF0_LUMA_Y0 0x1a54
+#define VD1_IF0_CHROMA_X0 0x1a55
+#define VD1_IF0_CHROMA_Y0 0x1a56
+#define VD1_IF0_LUMA_X1 0x1a57
+#define VD1_IF0_LUMA_Y1 0x1a58
+#define VD1_IF0_CHROMA_X1 0x1a59
+#define VD1_IF0_CHROMA_Y1 0x1a5a
+#define VD1_IF0_RPT_LOOP 0x1a5b
+#define VD1_IF0_LUMA0_RPT_PAT 0x1a5c
+#define VD1_IF0_CHROMA0_RPT_PAT 0x1a5d
+#define VD1_IF0_LUMA1_RPT_PAT 0x1a5e
+#define VD1_IF0_CHROMA1_RPT_PAT 0x1a5f
+#define VD1_IF0_LUMA_PSEL 0x1a60
+#define VD1_IF0_CHROMA_PSEL 0x1a61
+#define VD1_IF0_DUMMY_PIXEL 0x1a62
+#define VD1_IF0_LUMA_FIFO_SIZE 0x1a63
+#define VD1_IF0_RANGE_MAP_Y 0x1a6a
+#define VD1_IF0_RANGE_MAP_CB 0x1a6b
+#define VD1_IF0_RANGE_MAP_CR 0x1a6c
+#define VD1_IF0_GEN_REG2 0x1a6d
+#define VD1_IF0_PROT_CNTL 0x1a6e
+#define VIU_VD1_FMT_CTRL 0x1a68
+#define VIU_VD1_FMT_W 0x1a69
+#define VD2_IF0_GEN_REG 0x1a70
+#define VD2_IF0_CANVAS0 0x1a71
+#define VD2_IF0_CANVAS1 0x1a72
+#define VD2_IF0_LUMA_X0 0x1a73
+#define VD2_IF0_LUMA_Y0 0x1a74
+#define VD2_IF0_CHROMA_X0 0x1a75
+#define VD2_IF0_CHROMA_Y0 0x1a76
+#define VD2_IF0_LUMA_X1 0x1a77
+#define VD2_IF0_LUMA_Y1 0x1a78
+#define VD2_IF0_CHROMA_X1 0x1a79
+#define VD2_IF0_CHROMA_Y1 0x1a7a
+#define VD2_IF0_RPT_LOOP 0x1a7b
+#define VD2_IF0_LUMA0_RPT_PAT 0x1a7c
+#define VD2_IF0_CHROMA0_RPT_PAT 0x1a7d
+#define VD2_IF0_LUMA1_RPT_PAT 0x1a7e
+#define VD2_IF0_CHROMA1_RPT_PAT 0x1a7f
+#define VD2_IF0_LUMA_PSEL 0x1a80
+#define VD2_IF0_CHROMA_PSEL 0x1a81
+#define VD2_IF0_DUMMY_PIXEL 0x1a82
+#define VD2_IF0_LUMA_FIFO_SIZE 0x1a83
+#define VD2_IF0_RANGE_MAP_Y 0x1a8a
+#define VD2_IF0_RANGE_MAP_CB 0x1a8b
+#define VD2_IF0_RANGE_MAP_CR 0x1a8c
+#define VD2_IF0_GEN_REG2 0x1a8d
+#define VD2_IF0_PROT_CNTL 0x1a8e
+#define VIU_VD2_FMT_CTRL 0x1a88
+#define VIU_VD2_FMT_W 0x1a89
+
+/* VIU Matrix Registers */
+#define VIU_OSD1_MATRIX_CTRL 0x1a90
+#define VIU_OSD1_MATRIX_COEF00_01 0x1a91
+#define VIU_OSD1_MATRIX_COEF02_10 0x1a92
+#define VIU_OSD1_MATRIX_COEF11_12 0x1a93
+#define VIU_OSD1_MATRIX_COEF20_21 0x1a94
+#define VIU_OSD1_MATRIX_COLMOD_COEF42 0x1a95
+#define VIU_OSD1_MATRIX_OFFSET0_1 0x1a96
+#define VIU_OSD1_MATRIX_OFFSET2 0x1a97
+#define VIU_OSD1_MATRIX_PRE_OFFSET0_1 0x1a98
+#define VIU_OSD1_MATRIX_PRE_OFFSET2 0x1a99
+#define VIU_OSD1_MATRIX_COEF22_30 0x1a9d
+#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
+#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
+#define VIU_OSD1_EOTF_CTL 0x1ad4
+#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
+#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
+#define VIU_OSD1_EOTF_COEF11_12 0x1ad7
+#define VIU_OSD1_EOTF_COEF20_21 0x1ad8
+#define VIU_OSD1_EOTF_COEF22_RS 0x1ad9
+#define VIU_OSD1_EOTF_LUT_ADDR_PORT 0x1ada
+#define VIU_OSD1_EOTF_LUT_DATA_PORT 0x1adb
+#define VIU_OSD1_OETF_CTL 0x1adc
+#define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add
+#define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade
+
+/* vpp */
+#define VPP_DUMMY_DATA 0x1d00
+#define VPP_LINE_IN_LENGTH 0x1d01
+#define VPP_PIC_IN_HEIGHT 0x1d02
+#define VPP_SCALE_COEF_IDX 0x1d03
+#define VPP_SCALE_COEF 0x1d04
+#define VPP_VSC_REGION12_STARTP 0x1d05
+#define VPP_VSC_REGION34_STARTP 0x1d06
+#define VPP_VSC_REGION4_ENDP 0x1d07
+#define VPP_VSC_START_PHASE_STEP 0x1d08
+#define VPP_VSC_REGION0_PHASE_SLOPE 0x1d09
+#define VPP_VSC_REGION1_PHASE_SLOPE 0x1d0a
+#define VPP_VSC_REGION3_PHASE_SLOPE 0x1d0b
+#define VPP_VSC_REGION4_PHASE_SLOPE 0x1d0c
+#define VPP_VSC_PHASE_CTRL 0x1d0d
+#define VPP_VSC_INI_PHASE 0x1d0e
+#define VPP_HSC_REGION12_STARTP 0x1d10
+#define VPP_HSC_REGION34_STARTP 0x1d11
+#define VPP_HSC_REGION4_ENDP 0x1d12
+#define VPP_HSC_START_PHASE_STEP 0x1d13
+#define VPP_HSC_REGION0_PHASE_SLOPE 0x1d14
+#define VPP_HSC_REGION1_PHASE_SLOPE 0x1d15
+#define VPP_HSC_REGION3_PHASE_SLOPE 0x1d16
+#define VPP_HSC_REGION4_PHASE_SLOPE 0x1d17
+#define VPP_HSC_PHASE_CTRL 0x1d18
+#define VPP_SC_MISC 0x1d19
+#define VPP_PREBLEND_VD1_H_START_END 0x1d1a
+#define VPP_PREBLEND_VD1_V_START_END 0x1d1b
+#define VPP_POSTBLEND_VD1_H_START_END 0x1d1c
+#define VPP_POSTBLEND_VD1_V_START_END 0x1d1d
+#define VPP_BLEND_VD2_H_START_END 0x1d1e
+#define VPP_BLEND_VD2_V_START_END 0x1d1f
+#define VPP_PREBLEND_H_SIZE 0x1d20
+#define VPP_POSTBLEND_H_SIZE 0x1d21
+#define VPP_HOLD_LINES 0x1d22
+#define VPP_BLEND_ONECOLOR_CTRL 0x1d23
+#define VPP_PREBLEND_CURRENT_XY 0x1d24
+#define VPP_POSTBLEND_CURRENT_XY 0x1d25
+#define VPP_MISC 0x1d26
+#define VPP_PREBLEND_ENABLE BIT(6)
+#define VPP_POSTBLEND_ENABLE BIT(7)
+#define VPP_OSD2_ALPHA_PREMULT BIT(8)
+#define VPP_OSD1_ALPHA_PREMULT BIT(9)
+#define VPP_VD1_POSTBLEND BIT(10)
+#define VPP_VD2_POSTBLEND BIT(11)
+#define VPP_OSD1_POSTBLEND BIT(12)
+#define VPP_OSD2_POSTBLEND BIT(13)
+#define VPP_VD1_PREBLEND BIT(14)
+#define VPP_VD2_PREBLEND BIT(15)
+#define VPP_OSD1_PREBLEND BIT(16)
+#define VPP_OSD2_PREBLEND BIT(17)
+#define VPP_OFIFO_SIZE 0x1d27
+#define VPP_FIFO_STATUS 0x1d28
+#define VPP_SMOKE_CTRL 0x1d29
+#define VPP_SMOKE1_VAL 0x1d2a
+#define VPP_SMOKE2_VAL 0x1d2b
+#define VPP_SMOKE3_VAL 0x1d2c
+#define VPP_SMOKE1_H_START_END 0x1d2d
+#define VPP_SMOKE1_V_START_END 0x1d2e
+#define VPP_SMOKE2_H_START_END 0x1d2f
+#define VPP_SMOKE2_V_START_END 0x1d30
+#define VPP_SMOKE3_H_START_END 0x1d31
+#define VPP_SMOKE3_V_START_END 0x1d32
+#define VPP_SCO_FIFO_CTRL 0x1d33
+#define VPP_HSC_PHASE_CTRL1 0x1d34
+#define VPP_HSC_INI_PAT_CTRL 0x1d35
+#define VPP_VADJ_CTRL 0x1d40
+#define VPP_VADJ1_Y 0x1d41
+#define VPP_VADJ1_MA_MB 0x1d42
+#define VPP_VADJ1_MC_MD 0x1d43
+#define VPP_VADJ2_Y 0x1d44
+#define VPP_VADJ2_MA_MB 0x1d45
+#define VPP_VADJ2_MC_MD 0x1d46
+#define VPP_HSHARP_CTRL 0x1d50
+#define VPP_HSHARP_LUMA_THRESH01 0x1d51
+#define VPP_HSHARP_LUMA_THRESH23 0x1d52
+#define VPP_HSHARP_CHROMA_THRESH01 0x1d53
+#define VPP_HSHARP_CHROMA_THRESH23 0x1d54
+#define VPP_HSHARP_LUMA_GAIN 0x1d55
+#define VPP_HSHARP_CHROMA_GAIN 0x1d56
+#define VPP_MATRIX_PROBE_COLOR 0x1d5c
+#define VPP_MATRIX_HL_COLOR 0x1d5d
+#define VPP_MATRIX_PROBE_POS 0x1d5e
+#define VPP_MATRIX_CTRL 0x1d5f
+#define VPP_MATRIX_COEF00_01 0x1d60
+#define VPP_MATRIX_COEF02_10 0x1d61
+#define VPP_MATRIX_COEF11_12 0x1d62
+#define VPP_MATRIX_COEF20_21 0x1d63
+#define VPP_MATRIX_COEF22 0x1d64
+#define VPP_MATRIX_OFFSET0_1 0x1d65
+#define VPP_MATRIX_OFFSET2 0x1d66
+#define VPP_MATRIX_PRE_OFFSET0_1 0x1d67
+#define VPP_MATRIX_PRE_OFFSET2 0x1d68
+#define VPP_DUMMY_DATA1 0x1d69
+#define VPP_GAINOFF_CTRL0 0x1d6a
+#define VPP_GAINOFF_CTRL1 0x1d6b
+#define VPP_GAINOFF_CTRL2 0x1d6c
+#define VPP_GAINOFF_CTRL3 0x1d6d
+#define VPP_GAINOFF_CTRL4 0x1d6e
+#define VPP_CHROMA_ADDR_PORT 0x1d70
+#define VPP_CHROMA_DATA_PORT 0x1d71
+#define VPP_GCLK_CTRL0 0x1d72
+#define VPP_GCLK_CTRL1 0x1d73
+#define VPP_SC_GCLK_CTRL 0x1d74
+#define VPP_MISC1 0x1d76
+#define VPP_BLACKEXT_CTRL 0x1d80
+#define VPP_DNLP_CTRL_00 0x1d81
+#define VPP_DNLP_CTRL_01 0x1d82
+#define VPP_DNLP_CTRL_02 0x1d83
+#define VPP_DNLP_CTRL_03 0x1d84
+#define VPP_DNLP_CTRL_04 0x1d85
+#define VPP_DNLP_CTRL_05 0x1d86
+#define VPP_DNLP_CTRL_06 0x1d87
+#define VPP_DNLP_CTRL_07 0x1d88
+#define VPP_DNLP_CTRL_08 0x1d89
+#define VPP_DNLP_CTRL_09 0x1d8a
+#define VPP_DNLP_CTRL_10 0x1d8b
+#define VPP_DNLP_CTRL_11 0x1d8c
+#define VPP_DNLP_CTRL_12 0x1d8d
+#define VPP_DNLP_CTRL_13 0x1d8e
+#define VPP_DNLP_CTRL_14 0x1d8f
+#define VPP_DNLP_CTRL_15 0x1d90
+#define VPP_PEAKING_HGAIN 0x1d91
+#define VPP_PEAKING_VGAIN 0x1d92
+#define VPP_PEAKING_NLP_1 0x1d93
+#define VPP_DOLBY_CTRL 0x1d93
+#define VPP_PEAKING_NLP_2 0x1d94
+#define VPP_PEAKING_NLP_3 0x1d95
+#define VPP_PEAKING_NLP_4 0x1d96
+#define VPP_PEAKING_NLP_5 0x1d97
+#define VPP_SHARP_LIMIT 0x1d98
+#define VPP_VLTI_CTRL 0x1d99
+#define VPP_HLTI_CTRL 0x1d9a
+#define VPP_CTI_CTRL 0x1d9b
+#define VPP_BLUE_STRETCH_1 0x1d9c
+#define VPP_BLUE_STRETCH_2 0x1d9d
+#define VPP_BLUE_STRETCH_3 0x1d9e
+#define VPP_CCORING_CTRL 0x1da0
+#define VPP_VE_ENABLE_CTRL 0x1da1
+#define VPP_VE_DEMO_LEFT_TOP_SCREEN_WIDTH 0x1da2
+#define VPP_VE_DEMO_CENTER_BAR 0x1da3
+#define VPP_VE_H_V_SIZE 0x1da4
+#define VPP_VDO_MEAS_CTRL 0x1da8
+#define VPP_VDO_MEAS_VS_COUNT_HI 0x1da9
+#define VPP_VDO_MEAS_VS_COUNT_LO 0x1daa
+#define VPP_INPUT_CTRL 0x1dab
+#define VPP_CTI_CTRL2 0x1dac
+#define VPP_PEAKING_SAT_THD1 0x1dad
+#define VPP_PEAKING_SAT_THD2 0x1dae
+#define VPP_PEAKING_SAT_THD3 0x1daf
+#define VPP_PEAKING_SAT_THD4 0x1db0
+#define VPP_PEAKING_SAT_THD5 0x1db1
+#define VPP_PEAKING_SAT_THD6 0x1db2
+#define VPP_PEAKING_SAT_THD7 0x1db3
+#define VPP_PEAKING_SAT_THD8 0x1db4
+#define VPP_PEAKING_SAT_THD9 0x1db5
+#define VPP_PEAKING_GAIN_ADD1 0x1db6
+#define VPP_PEAKING_GAIN_ADD2 0x1db7
+#define VPP_PEAKING_DNLP 0x1db8
+#define VPP_SHARP_DEMO_WIN_CTRL1 0x1db9
+#define VPP_SHARP_DEMO_WIN_CTRL2 0x1dba
+#define VPP_FRONT_HLTI_CTRL 0x1dbb
+#define VPP_FRONT_CTI_CTRL 0x1dbc
+#define VPP_FRONT_CTI_CTRL2 0x1dbd
+#define VPP_OSD_VSC_PHASE_STEP 0x1dc0
+#define VPP_OSD_VSC_INI_PHASE 0x1dc1
+#define VPP_OSD_VSC_CTRL0 0x1dc2
+#define VPP_OSD_HSC_PHASE_STEP 0x1dc3
+#define VPP_OSD_HSC_INI_PHASE 0x1dc4
+#define VPP_OSD_HSC_CTRL0 0x1dc5
+#define VPP_OSD_HSC_INI_PAT_CTRL 0x1dc6
+#define VPP_OSD_SC_DUMMY_DATA 0x1dc7
+#define VPP_OSD_SC_CTRL0 0x1dc8
+#define VPP_OSD_SCI_WH_M1 0x1dc9
+#define VPP_OSD_SCO_H_START_END 0x1dca
+#define VPP_OSD_SCO_V_START_END 0x1dcb
+#define VPP_OSD_SCALE_COEF_IDX 0x1dcc
+#define VPP_OSD_SCALE_COEF 0x1dcd
+#define VPP_INT_LINE_NUM 0x1dce
+
+/* viu2 */
+#define VIU2_ADDR_START 0x1e00
+#define VIU2_ADDR_END 0x1eff
+#define VIU2_SW_RESET 0x1e01
+#define VIU2_OSD1_CTRL_STAT 0x1e10
+#define VIU2_OSD1_CTRL_STAT2 0x1e2d
+#define VIU2_OSD1_COLOR_ADDR 0x1e11
+#define VIU2_OSD1_COLOR 0x1e12
+#define VIU2_OSD1_TCOLOR_AG0 0x1e17
+#define VIU2_OSD1_TCOLOR_AG1 0x1e18
+#define VIU2_OSD1_TCOLOR_AG2 0x1e19
+#define VIU2_OSD1_TCOLOR_AG3 0x1e1a
+#define VIU2_OSD1_BLK0_CFG_W0 0x1e1b
+#define VIU2_OSD1_BLK1_CFG_W0 0x1e1f
+#define VIU2_OSD1_BLK2_CFG_W0 0x1e23
+#define VIU2_OSD1_BLK3_CFG_W0 0x1e27
+#define VIU2_OSD1_BLK0_CFG_W1 0x1e1c
+#define VIU2_OSD1_BLK1_CFG_W1 0x1e20
+#define VIU2_OSD1_BLK2_CFG_W1 0x1e24
+#define VIU2_OSD1_BLK3_CFG_W1 0x1e28
+#define VIU2_OSD1_BLK0_CFG_W2 0x1e1d
+#define VIU2_OSD1_BLK1_CFG_W2 0x1e21
+#define VIU2_OSD1_BLK2_CFG_W2 0x1e25
+#define VIU2_OSD1_BLK3_CFG_W2 0x1e29
+#define VIU2_OSD1_BLK0_CFG_W3 0x1e1e
+#define VIU2_OSD1_BLK1_CFG_W3 0x1e22
+#define VIU2_OSD1_BLK2_CFG_W3 0x1e26
+#define VIU2_OSD1_BLK3_CFG_W3 0x1e2a
+#define VIU2_OSD1_BLK0_CFG_W4 0x1e13
+#define VIU2_OSD1_BLK1_CFG_W4 0x1e14
+#define VIU2_OSD1_BLK2_CFG_W4 0x1e15
+#define VIU2_OSD1_BLK3_CFG_W4 0x1e16
+#define VIU2_OSD1_FIFO_CTRL_STAT 0x1e2b
+#define VIU2_OSD1_TEST_RDDATA 0x1e2c
+#define VIU2_OSD1_PROT_CTRL 0x1e2e
+#define VIU2_OSD2_CTRL_STAT 0x1e30
+#define VIU2_OSD2_CTRL_STAT2 0x1e4d
+#define VIU2_OSD2_COLOR_ADDR 0x1e31
+#define VIU2_OSD2_COLOR 0x1e32
+#define VIU2_OSD2_HL1_H_START_END 0x1e33
+#define VIU2_OSD2_HL1_V_START_END 0x1e34
+#define VIU2_OSD2_HL2_H_START_END 0x1e35
+#define VIU2_OSD2_HL2_V_START_END 0x1e36
+#define VIU2_OSD2_TCOLOR_AG0 0x1e37
+#define VIU2_OSD2_TCOLOR_AG1 0x1e38
+#define VIU2_OSD2_TCOLOR_AG2 0x1e39
+#define VIU2_OSD2_TCOLOR_AG3 0x1e3a
+#define VIU2_OSD2_BLK0_CFG_W0 0x1e3b
+#define VIU2_OSD2_BLK1_CFG_W0 0x1e3f
+#define VIU2_OSD2_BLK2_CFG_W0 0x1e43
+#define VIU2_OSD2_BLK3_CFG_W0 0x1e47
+#define VIU2_OSD2_BLK0_CFG_W1 0x1e3c
+#define VIU2_OSD2_BLK1_CFG_W1 0x1e40
+#define VIU2_OSD2_BLK2_CFG_W1 0x1e44
+#define VIU2_OSD2_BLK3_CFG_W1 0x1e48
+#define VIU2_OSD2_BLK0_CFG_W2 0x1e3d
+#define VIU2_OSD2_BLK1_CFG_W2 0x1e41
+#define VIU2_OSD2_BLK2_CFG_W2 0x1e45
+#define VIU2_OSD2_BLK3_CFG_W2 0x1e49
+#define VIU2_OSD2_BLK0_CFG_W3 0x1e3e
+#define VIU2_OSD2_BLK1_CFG_W3 0x1e42
+#define VIU2_OSD2_BLK2_CFG_W3 0x1e46
+#define VIU2_OSD2_BLK3_CFG_W3 0x1e4a
+#define VIU2_OSD2_BLK0_CFG_W4 0x1e64
+#define VIU2_OSD2_BLK1_CFG_W4 0x1e65
+#define VIU2_OSD2_BLK2_CFG_W4 0x1e66
+#define VIU2_OSD2_BLK3_CFG_W4 0x1e67
+#define VIU2_OSD2_FIFO_CTRL_STAT 0x1e4b
+#define VIU2_OSD2_TEST_RDDATA 0x1e4c
+#define VIU2_OSD2_PROT_CTRL 0x1e4e
+#define VIU2_VD1_IF0_GEN_REG 0x1e50
+#define VIU2_VD1_IF0_CANVAS0 0x1e51
+#define VIU2_VD1_IF0_CANVAS1 0x1e52
+#define VIU2_VD1_IF0_LUMA_X0 0x1e53
+#define VIU2_VD1_IF0_LUMA_Y0 0x1e54
+#define VIU2_VD1_IF0_CHROMA_X0 0x1e55
+#define VIU2_VD1_IF0_CHROMA_Y0 0x1e56
+#define VIU2_VD1_IF0_LUMA_X1 0x1e57
+#define VIU2_VD1_IF0_LUMA_Y1 0x1e58
+#define VIU2_VD1_IF0_CHROMA_X1 0x1e59
+#define VIU2_VD1_IF0_CHROMA_Y1 0x1e5a
+#define VIU2_VD1_IF0_RPT_LOOP 0x1e5b
+#define VIU2_VD1_IF0_LUMA0_RPT_PAT 0x1e5c
+#define VIU2_VD1_IF0_CHROMA0_RPT_PAT 0x1e5d
+#define VIU2_VD1_IF0_LUMA1_RPT_PAT 0x1e5e
+#define VIU2_VD1_IF0_CHROMA1_RPT_PAT 0x1e5f
+#define VIU2_VD1_IF0_LUMA_PSEL 0x1e60
+#define VIU2_VD1_IF0_CHROMA_PSEL 0x1e61
+#define VIU2_VD1_IF0_DUMMY_PIXEL 0x1e62
+#define VIU2_VD1_IF0_LUMA_FIFO_SIZE 0x1e63
+#define VIU2_VD1_IF0_RANGE_MAP_Y 0x1e6a
+#define VIU2_VD1_IF0_RANGE_MAP_CB 0x1e6b
+#define VIU2_VD1_IF0_RANGE_MAP_CR 0x1e6c
+#define VIU2_VD1_IF0_GEN_REG2 0x1e6d
+#define VIU2_VD1_IF0_PROT_CNTL 0x1e6e
+#define VIU2_VD1_FMT_CTRL 0x1e68
+#define VIU2_VD1_FMT_W 0x1e69
+
+/* encode */
+#define ENCP_VFIFO2VD_CTL 0x1b58
+#define ENCP_VFIFO2VD_PIXEL_START 0x1b59
+#define ENCP_VFIFO2VD_PIXEL_END 0x1b5a
+#define ENCP_VFIFO2VD_LINE_TOP_START 0x1b5b
+#define ENCP_VFIFO2VD_LINE_TOP_END 0x1b5c
+#define ENCP_VFIFO2VD_LINE_BOT_START 0x1b5d
+#define ENCP_VFIFO2VD_LINE_BOT_END 0x1b5e
+#define VENC_SYNC_ROUTE 0x1b60
+#define VENC_VIDEO_EXSRC 0x1b61
+#define VENC_DVI_SETTING 0x1b62
+#define VENC_C656_CTRL 0x1b63
+#define VENC_UPSAMPLE_CTRL0 0x1b64
+#define VENC_UPSAMPLE_CTRL1 0x1b65
+#define VENC_UPSAMPLE_CTRL2 0x1b66
+#define TCON_INVERT_CTL 0x1b67
+#define VENC_VIDEO_PROG_MODE 0x1b68
+#define VENC_ENCI_LINE 0x1b69
+#define VENC_ENCI_PIXEL 0x1b6a
+#define VENC_ENCP_LINE 0x1b6b
+#define VENC_ENCP_PIXEL 0x1b6c
+#define VENC_STATA 0x1b6d
+#define VENC_INTCTRL 0x1b6e
+#define VENC_INTFLAG 0x1b6f
+#define VENC_VIDEO_TST_EN 0x1b70
+#define VENC_VIDEO_TST_MDSEL 0x1b71
+#define VENC_VIDEO_TST_Y 0x1b72
+#define VENC_VIDEO_TST_CB 0x1b73
+#define VENC_VIDEO_TST_CR 0x1b74
+#define VENC_VIDEO_TST_CLRBAR_STRT 0x1b75
+#define VENC_VIDEO_TST_CLRBAR_WIDTH 0x1b76
+#define VENC_VIDEO_TST_VDCNT_STSET 0x1b77
+#define VENC_VDAC_DACSEL0 0x1b78
+#define VENC_VDAC_DACSEL1 0x1b79
+#define VENC_VDAC_DACSEL2 0x1b7a
+#define VENC_VDAC_DACSEL3 0x1b7b
+#define VENC_VDAC_DACSEL4 0x1b7c
+#define VENC_VDAC_DACSEL5 0x1b7d
+#define VENC_VDAC_SETTING 0x1b7e
+#define VENC_VDAC_TST_VAL 0x1b7f
+#define VENC_VDAC_DAC0_GAINCTRL 0x1bf0
+#define VENC_VDAC_DAC0_OFFSET 0x1bf1
+#define VENC_VDAC_DAC1_GAINCTRL 0x1bf2
+#define VENC_VDAC_DAC1_OFFSET 0x1bf3
+#define VENC_VDAC_DAC2_GAINCTRL 0x1bf4
+#define VENC_VDAC_DAC2_OFFSET 0x1bf5
+#define VENC_VDAC_DAC3_GAINCTRL 0x1bf6
+#define VENC_VDAC_DAC3_OFFSET 0x1bf7
+#define VENC_VDAC_DAC4_GAINCTRL 0x1bf8
+#define VENC_VDAC_DAC4_OFFSET 0x1bf9
+#define VENC_VDAC_DAC5_GAINCTRL 0x1bfa
+#define VENC_VDAC_DAC5_OFFSET 0x1bfb
+#define VENC_VDAC_FIFO_CTRL 0x1bfc
+#define ENCL_TCON_INVERT_CTL 0x1bfd
+#define ENCP_VIDEO_EN 0x1b80
+#define ENCP_VIDEO_SYNC_MODE 0x1b81
+#define ENCP_MACV_EN 0x1b82
+#define ENCP_VIDEO_Y_SCL 0x1b83
+#define ENCP_VIDEO_PB_SCL 0x1b84
+#define ENCP_VIDEO_PR_SCL 0x1b85
+#define ENCP_VIDEO_SYNC_SCL 0x1b86
+#define ENCP_VIDEO_MACV_SCL 0x1b87
+#define ENCP_VIDEO_Y_OFFST 0x1b88
+#define ENCP_VIDEO_PB_OFFST 0x1b89
+#define ENCP_VIDEO_PR_OFFST 0x1b8a
+#define ENCP_VIDEO_SYNC_OFFST 0x1b8b
+#define ENCP_VIDEO_MACV_OFFST 0x1b8c
+#define ENCP_VIDEO_MODE 0x1b8d
+#define ENCP_VIDEO_MODE_ADV 0x1b8e
+#define ENCP_DBG_PX_RST 0x1b90
+#define ENCP_DBG_LN_RST 0x1b91
+#define ENCP_DBG_PX_INT 0x1b92
+#define ENCP_DBG_LN_INT 0x1b93
+#define ENCP_VIDEO_YFP1_HTIME 0x1b94
+#define ENCP_VIDEO_YFP2_HTIME 0x1b95
+#define ENCP_VIDEO_YC_DLY 0x1b96
+#define ENCP_VIDEO_MAX_PXCNT 0x1b97
+#define ENCP_VIDEO_HSPULS_BEGIN 0x1b98
+#define ENCP_VIDEO_HSPULS_END 0x1b99
+#define ENCP_VIDEO_HSPULS_SWITCH 0x1b9a
+#define ENCP_VIDEO_VSPULS_BEGIN 0x1b9b
+#define ENCP_VIDEO_VSPULS_END 0x1b9c
+#define ENCP_VIDEO_VSPULS_BLINE 0x1b9d
+#define ENCP_VIDEO_VSPULS_ELINE 0x1b9e
+#define ENCP_VIDEO_EQPULS_BEGIN 0x1b9f
+#define ENCP_VIDEO_EQPULS_END 0x1ba0
+#define ENCP_VIDEO_EQPULS_BLINE 0x1ba1
+#define ENCP_VIDEO_EQPULS_ELINE 0x1ba2
+#define ENCP_VIDEO_HAVON_END 0x1ba3
+#define ENCP_VIDEO_HAVON_BEGIN 0x1ba4
+#define ENCP_VIDEO_VAVON_ELINE 0x1baf
+#define ENCP_VIDEO_VAVON_BLINE 0x1ba6
+#define ENCP_VIDEO_HSO_BEGIN 0x1ba7
+#define ENCP_VIDEO_HSO_END 0x1ba8
+#define ENCP_VIDEO_VSO_BEGIN 0x1ba9
+#define ENCP_VIDEO_VSO_END 0x1baa
+#define ENCP_VIDEO_VSO_BLINE 0x1bab
+#define ENCP_VIDEO_VSO_ELINE 0x1bac
+#define ENCP_VIDEO_SYNC_WAVE_CURVE 0x1bad
+#define ENCP_VIDEO_MAX_LNCNT 0x1bae
+#define ENCP_VIDEO_SY_VAL 0x1bb0
+#define ENCP_VIDEO_SY2_VAL 0x1bb1
+#define ENCP_VIDEO_BLANKY_VAL 0x1bb2
+#define ENCP_VIDEO_BLANKPB_VAL 0x1bb3
+#define ENCP_VIDEO_BLANKPR_VAL 0x1bb4
+#define ENCP_VIDEO_HOFFST 0x1bb5
+#define ENCP_VIDEO_VOFFST 0x1bb6
+#define ENCP_VIDEO_RGB_CTRL 0x1bb7
+#define ENCP_VIDEO_FILT_CTRL 0x1bb8
+#define ENCP_VIDEO_OFLD_VPEQ_OFST 0x1bb9
+#define ENCP_VIDEO_OFLD_VOAV_OFST 0x1bba
+#define ENCP_VIDEO_MATRIX_CB 0x1bbb
+#define ENCP_VIDEO_MATRIX_CR 0x1bbc
+#define ENCP_VIDEO_RGBIN_CTRL 0x1bbd
+#define ENCP_MACV_BLANKY_VAL 0x1bc0
+#define ENCP_MACV_MAXY_VAL 0x1bc1
+#define ENCP_MACV_1ST_PSSYNC_STRT 0x1bc2
+#define ENCP_MACV_PSSYNC_STRT 0x1bc3
+#define ENCP_MACV_AGC_STRT 0x1bc4
+#define ENCP_MACV_AGC_END 0x1bc5
+#define ENCP_MACV_WAVE_END 0x1bc6
+#define ENCP_MACV_STRTLINE 0x1bc7
+#define ENCP_MACV_ENDLINE 0x1bc8
+#define ENCP_MACV_TS_CNT_MAX_L 0x1bc9
+#define ENCP_MACV_TS_CNT_MAX_H 0x1bca
+#define ENCP_MACV_TIME_DOWN 0x1bcb
+#define ENCP_MACV_TIME_LO 0x1bcc
+#define ENCP_MACV_TIME_UP 0x1bcd
+#define ENCP_MACV_TIME_RST 0x1bce
+#define ENCP_VBI_CTRL 0x1bd0
+#define ENCP_VBI_SETTING 0x1bd1
+#define ENCP_VBI_BEGIN 0x1bd2
+#define ENCP_VBI_WIDTH 0x1bd3
+#define ENCP_VBI_HVAL 0x1bd4
+#define ENCP_VBI_DATA0 0x1bd5
+#define ENCP_VBI_DATA1 0x1bd6
+#define C656_HS_ST 0x1be0
+#define C656_HS_ED 0x1be1
+#define C656_VS_LNST_E 0x1be2
+#define C656_VS_LNST_O 0x1be3
+#define C656_VS_LNED_E 0x1be4
+#define C656_VS_LNED_O 0x1be5
+#define C656_FS_LNST 0x1be6
+#define C656_FS_LNED 0x1be7
+#define ENCI_VIDEO_MODE 0x1b00
+#define ENCI_VIDEO_MODE_ADV 0x1b01
+#define ENCI_VIDEO_FSC_ADJ 0x1b02
+#define ENCI_VIDEO_BRIGHT 0x1b03
+#define ENCI_VIDEO_CONT 0x1b04
+#define ENCI_VIDEO_SAT 0x1b05
+#define ENCI_VIDEO_HUE 0x1b06
+#define ENCI_VIDEO_SCH 0x1b07
+#define ENCI_SYNC_MODE 0x1b08
+#define ENCI_SYNC_CTRL 0x1b09
+#define ENCI_SYNC_HSO_BEGIN 0x1b0a
+#define ENCI_SYNC_HSO_END 0x1b0b
+#define ENCI_SYNC_VSO_EVN 0x1b0c
+#define ENCI_SYNC_VSO_ODD 0x1b0d
+#define ENCI_SYNC_VSO_EVNLN 0x1b0e
+#define ENCI_SYNC_VSO_ODDLN 0x1b0f
+#define ENCI_SYNC_HOFFST 0x1b10
+#define ENCI_SYNC_VOFFST 0x1b11
+#define ENCI_SYNC_ADJ 0x1b12
+#define ENCI_RGB_SETTING 0x1b13
+#define ENCI_DE_H_BEGIN 0x1b16
+#define ENCI_DE_H_END 0x1b17
+#define ENCI_DE_V_BEGIN_EVEN 0x1b18
+#define ENCI_DE_V_END_EVEN 0x1b19
+#define ENCI_DE_V_BEGIN_ODD 0x1b1a
+#define ENCI_DE_V_END_ODD 0x1b1b
+#define ENCI_VBI_SETTING 0x1b20
+#define ENCI_VBI_CCDT_EVN 0x1b21
+#define ENCI_VBI_CCDT_ODD 0x1b22
+#define ENCI_VBI_CC525_LN 0x1b23
+#define ENCI_VBI_CC625_LN 0x1b24
+#define ENCI_VBI_WSSDT 0x1b25
+#define ENCI_VBI_WSS_LN 0x1b26
+#define ENCI_VBI_CGMSDT_L 0x1b27
+#define ENCI_VBI_CGMSDT_H 0x1b28
+#define ENCI_VBI_CGMS_LN 0x1b29
+#define ENCI_VBI_TTX_HTIME 0x1b2a
+#define ENCI_VBI_TTX_LN 0x1b2b
+#define ENCI_VBI_TTXDT0 0x1b2c
+#define ENCI_VBI_TTXDT1 0x1b2d
+#define ENCI_VBI_TTXDT2 0x1b2e
+#define ENCI_VBI_TTXDT3 0x1b2f
+#define ENCI_MACV_N0 0x1b30
+#define ENCI_MACV_N1 0x1b31
+#define ENCI_MACV_N2 0x1b32
+#define ENCI_MACV_N3 0x1b33
+#define ENCI_MACV_N4 0x1b34
+#define ENCI_MACV_N5 0x1b35
+#define ENCI_MACV_N6 0x1b36
+#define ENCI_MACV_N7 0x1b37
+#define ENCI_MACV_N8 0x1b38
+#define ENCI_MACV_N9 0x1b39
+#define ENCI_MACV_N10 0x1b3a
+#define ENCI_MACV_N11 0x1b3b
+#define ENCI_MACV_N12 0x1b3c
+#define ENCI_MACV_N13 0x1b3d
+#define ENCI_MACV_N14 0x1b3e
+#define ENCI_MACV_N15 0x1b3f
+#define ENCI_MACV_N16 0x1b40
+#define ENCI_MACV_N17 0x1b41
+#define ENCI_MACV_N18 0x1b42
+#define ENCI_MACV_N19 0x1b43
+#define ENCI_MACV_N20 0x1b44
+#define ENCI_MACV_N21 0x1b45
+#define ENCI_MACV_N22 0x1b46
+#define ENCI_DBG_PX_RST 0x1b48
+#define ENCI_DBG_FLDLN_RST 0x1b49
+#define ENCI_DBG_PX_INT 0x1b4a
+#define ENCI_DBG_FLDLN_INT 0x1b4b
+#define ENCI_DBG_MAXPX 0x1b4c
+#define ENCI_DBG_MAXLN 0x1b4d
+#define ENCI_MACV_MAX_AMP 0x1b50
+#define ENCI_MACV_PULSE_LO 0x1b51
+#define ENCI_MACV_PULSE_HI 0x1b52
+#define ENCI_MACV_BKP_MAX 0x1b53
+#define ENCI_CFILT_CTRL 0x1b54
+#define ENCI_CFILT7 0x1b55
+#define ENCI_YC_DELAY 0x1b56
+#define ENCI_VIDEO_EN 0x1b57
+#define ENCI_DVI_HSO_BEGIN 0x1c00
+#define ENCI_DVI_HSO_END 0x1c01
+#define ENCI_DVI_VSO_BLINE_EVN 0x1c02
+#define ENCI_DVI_VSO_BLINE_ODD 0x1c03
+#define ENCI_DVI_VSO_ELINE_EVN 0x1c04
+#define ENCI_DVI_VSO_ELINE_ODD 0x1c05
+#define ENCI_DVI_VSO_BEGIN_EVN 0x1c06
+#define ENCI_DVI_VSO_BEGIN_ODD 0x1c07
+#define ENCI_DVI_VSO_END_EVN 0x1c08
+#define ENCI_DVI_VSO_END_ODD 0x1c09
+#define ENCI_CFILT_CTRL2 0x1c0a
+#define ENCI_DACSEL_0 0x1c0b
+#define ENCI_DACSEL_1 0x1c0c
+#define ENCP_DACSEL_0 0x1c0d
+#define ENCP_DACSEL_1 0x1c0e
+#define ENCP_MAX_LINE_SWITCH_POINT 0x1c0f
+#define ENCI_TST_EN 0x1c10
+#define ENCI_TST_MDSEL 0x1c11
+#define ENCI_TST_Y 0x1c12
+#define ENCI_TST_CB 0x1c13
+#define ENCI_TST_CR 0x1c14
+#define ENCI_TST_CLRBAR_STRT 0x1c15
+#define ENCI_TST_CLRBAR_WIDTH 0x1c16
+#define ENCI_TST_VDCNT_STSET 0x1c17
+#define ENCI_VFIFO2VD_CTL 0x1c18
+#define ENCI_VFIFO2VD_PIXEL_START 0x1c19
+#define ENCI_VFIFO2VD_PIXEL_END 0x1c1a
+#define ENCI_VFIFO2VD_LINE_TOP_START 0x1c1b
+#define ENCI_VFIFO2VD_LINE_TOP_END 0x1c1c
+#define ENCI_VFIFO2VD_LINE_BOT_START 0x1c1d
+#define ENCI_VFIFO2VD_LINE_BOT_END 0x1c1e
+#define ENCI_VFIFO2VD_CTL2 0x1c1f
+#define ENCT_VFIFO2VD_CTL 0x1c20
+#define ENCT_VFIFO2VD_PIXEL_START 0x1c21
+#define ENCT_VFIFO2VD_PIXEL_END 0x1c22
+#define ENCT_VFIFO2VD_LINE_TOP_START 0x1c23
+#define ENCT_VFIFO2VD_LINE_TOP_END 0x1c24
+#define ENCT_VFIFO2VD_LINE_BOT_START 0x1c25
+#define ENCT_VFIFO2VD_LINE_BOT_END 0x1c26
+#define ENCT_VFIFO2VD_CTL2 0x1c27
+#define ENCT_TST_EN 0x1c28
+#define ENCT_TST_MDSEL 0x1c29
+#define ENCT_TST_Y 0x1c2a
+#define ENCT_TST_CB 0x1c2b
+#define ENCT_TST_CR 0x1c2c
+#define ENCT_TST_CLRBAR_STRT 0x1c2d
+#define ENCT_TST_CLRBAR_WIDTH 0x1c2e
+#define ENCT_TST_VDCNT_STSET 0x1c2f
+#define ENCP_DVI_HSO_BEGIN 0x1c30
+#define ENCP_DVI_HSO_END 0x1c31
+#define ENCP_DVI_VSO_BLINE_EVN 0x1c32
+#define ENCP_DVI_VSO_BLINE_ODD 0x1c33
+#define ENCP_DVI_VSO_ELINE_EVN 0x1c34
+#define ENCP_DVI_VSO_ELINE_ODD 0x1c35
+#define ENCP_DVI_VSO_BEGIN_EVN 0x1c36
+#define ENCP_DVI_VSO_BEGIN_ODD 0x1c37
+#define ENCP_DVI_VSO_END_EVN 0x1c38
+#define ENCP_DVI_VSO_END_ODD 0x1c39
+#define ENCP_DE_H_BEGIN 0x1c3a
+#define ENCP_DE_H_END 0x1c3b
+#define ENCP_DE_V_BEGIN_EVEN 0x1c3c
+#define ENCP_DE_V_END_EVEN 0x1c3d
+#define ENCP_DE_V_BEGIN_ODD 0x1c3e
+#define ENCP_DE_V_END_ODD 0x1c3f
+#define ENCI_SYNC_LINE_LENGTH 0x1c40
+#define ENCI_SYNC_PIXEL_EN 0x1c41
+#define ENCI_SYNC_TO_LINE_EN 0x1c42
+#define ENCI_SYNC_TO_PIXEL 0x1c43
+#define ENCP_SYNC_LINE_LENGTH 0x1c44
+#define ENCP_SYNC_PIXEL_EN 0x1c45
+#define ENCP_SYNC_TO_LINE_EN 0x1c46
+#define ENCP_SYNC_TO_PIXEL 0x1c47
+#define ENCT_SYNC_LINE_LENGTH 0x1c48
+#define ENCT_SYNC_PIXEL_EN 0x1c49
+#define ENCT_SYNC_TO_LINE_EN 0x1c4a
+#define ENCT_SYNC_TO_PIXEL 0x1c4b
+#define ENCL_SYNC_LINE_LENGTH 0x1c4c
+#define ENCL_SYNC_PIXEL_EN 0x1c4d
+#define ENCL_SYNC_TO_LINE_EN 0x1c4e
+#define ENCL_SYNC_TO_PIXEL 0x1c4f
+#define ENCP_VFIFO2VD_CTL2 0x1c50
+#define VENC_DVI_SETTING_MORE 0x1c51
+#define VENC_VDAC_DAC4_FILT_CTRL0 0x1c54
+#define VENC_VDAC_DAC4_FILT_CTRL1 0x1c55
+#define VENC_VDAC_DAC5_FILT_CTRL0 0x1c56
+#define VENC_VDAC_DAC5_FILT_CTRL1 0x1c57
+#define VENC_VDAC_DAC0_FILT_CTRL0 0x1c58
+#define VENC_VDAC_DAC0_FILT_CTRL1 0x1c59
+#define VENC_VDAC_DAC1_FILT_CTRL0 0x1c5a
+#define VENC_VDAC_DAC1_FILT_CTRL1 0x1c5b
+#define VENC_VDAC_DAC2_FILT_CTRL0 0x1c5c
+#define VENC_VDAC_DAC2_FILT_CTRL1 0x1c5d
+#define VENC_VDAC_DAC3_FILT_CTRL0 0x1c5e
+#define VENC_VDAC_DAC3_FILT_CTRL1 0x1c5f
+#define ENCT_VIDEO_EN 0x1c60
+#define ENCT_VIDEO_Y_SCL 0x1c61
+#define ENCT_VIDEO_PB_SCL 0x1c62
+#define ENCT_VIDEO_PR_SCL 0x1c63
+#define ENCT_VIDEO_Y_OFFST 0x1c64
+#define ENCT_VIDEO_PB_OFFST 0x1c65
+#define ENCT_VIDEO_PR_OFFST 0x1c66
+#define ENCT_VIDEO_MODE 0x1c67
+#define ENCT_VIDEO_MODE_ADV 0x1c68
+#define ENCT_DBG_PX_RST 0x1c69
+#define ENCT_DBG_LN_RST 0x1c6a
+#define ENCT_DBG_PX_INT 0x1c6b
+#define ENCT_DBG_LN_INT 0x1c6c
+#define ENCT_VIDEO_YFP1_HTIME 0x1c6d
+#define ENCT_VIDEO_YFP2_HTIME 0x1c6e
+#define ENCT_VIDEO_YC_DLY 0x1c6f
+#define ENCT_VIDEO_MAX_PXCNT 0x1c70
+#define ENCT_VIDEO_HAVON_END 0x1c71
+#define ENCT_VIDEO_HAVON_BEGIN 0x1c72
+#define ENCT_VIDEO_VAVON_ELINE 0x1c73
+#define ENCT_VIDEO_VAVON_BLINE 0x1c74
+#define ENCT_VIDEO_HSO_BEGIN 0x1c75
+#define ENCT_VIDEO_HSO_END 0x1c76
+#define ENCT_VIDEO_VSO_BEGIN 0x1c77
+#define ENCT_VIDEO_VSO_END 0x1c78
+#define ENCT_VIDEO_VSO_BLINE 0x1c79
+#define ENCT_VIDEO_VSO_ELINE 0x1c7a
+#define ENCT_VIDEO_MAX_LNCNT 0x1c7b
+#define ENCT_VIDEO_BLANKY_VAL 0x1c7c
+#define ENCT_VIDEO_BLANKPB_VAL 0x1c7d
+#define ENCT_VIDEO_BLANKPR_VAL 0x1c7e
+#define ENCT_VIDEO_HOFFST 0x1c7f
+#define ENCT_VIDEO_VOFFST 0x1c80
+#define ENCT_VIDEO_RGB_CTRL 0x1c81
+#define ENCT_VIDEO_FILT_CTRL 0x1c82
+#define ENCT_VIDEO_OFLD_VPEQ_OFST 0x1c83
+#define ENCT_VIDEO_OFLD_VOAV_OFST 0x1c84
+#define ENCT_VIDEO_MATRIX_CB 0x1c85
+#define ENCT_VIDEO_MATRIX_CR 0x1c86
+#define ENCT_VIDEO_RGBIN_CTRL 0x1c87
+#define ENCT_MAX_LINE_SWITCH_POINT 0x1c88
+#define ENCT_DACSEL_0 0x1c89
+#define ENCT_DACSEL_1 0x1c8a
+#define ENCL_VFIFO2VD_CTL 0x1c90
+#define ENCL_VFIFO2VD_PIXEL_START 0x1c91
+#define ENCL_VFIFO2VD_PIXEL_END 0x1c92
+#define ENCL_VFIFO2VD_LINE_TOP_START 0x1c93
+#define ENCL_VFIFO2VD_LINE_TOP_END 0x1c94
+#define ENCL_VFIFO2VD_LINE_BOT_START 0x1c95
+#define ENCL_VFIFO2VD_LINE_BOT_END 0x1c96
+#define ENCL_VFIFO2VD_CTL2 0x1c97
+#define ENCL_TST_EN 0x1c98
+#define ENCL_TST_MDSEL 0x1c99
+#define ENCL_TST_Y 0x1c9a
+#define ENCL_TST_CB 0x1c9b
+#define ENCL_TST_CR 0x1c9c
+#define ENCL_TST_CLRBAR_STRT 0x1c9d
+#define ENCL_TST_CLRBAR_WIDTH 0x1c9e
+#define ENCL_TST_VDCNT_STSET 0x1c9f
+#define ENCL_VIDEO_EN 0x1ca0
+#define ENCL_VIDEO_Y_SCL 0x1ca1
+#define ENCL_VIDEO_PB_SCL 0x1ca2
+#define ENCL_VIDEO_PR_SCL 0x1ca3
+#define ENCL_VIDEO_Y_OFFST 0x1ca4
+#define ENCL_VIDEO_PB_OFFST 0x1ca5
+#define ENCL_VIDEO_PR_OFFST 0x1ca6
+#define ENCL_VIDEO_MODE 0x1ca7
+#define ENCL_VIDEO_MODE_ADV 0x1ca8
+#define ENCL_DBG_PX_RST 0x1ca9
+#define ENCL_DBG_LN_RST 0x1caa
+#define ENCL_DBG_PX_INT 0x1cab
+#define ENCL_DBG_LN_INT 0x1cac
+#define ENCL_VIDEO_YFP1_HTIME 0x1cad
+#define ENCL_VIDEO_YFP2_HTIME 0x1cae
+#define ENCL_VIDEO_YC_DLY 0x1caf
+#define ENCL_VIDEO_MAX_PXCNT 0x1cb0
+#define ENCL_VIDEO_HAVON_END 0x1cb1
+#define ENCL_VIDEO_HAVON_BEGIN 0x1cb2
+#define ENCL_VIDEO_VAVON_ELINE 0x1cb3
+#define ENCL_VIDEO_VAVON_BLINE 0x1cb4
+#define ENCL_VIDEO_HSO_BEGIN 0x1cb5
+#define ENCL_VIDEO_HSO_END 0x1cb6
+#define ENCL_VIDEO_VSO_BEGIN 0x1cb7
+#define ENCL_VIDEO_VSO_END 0x1cb8
+#define ENCL_VIDEO_VSO_BLINE 0x1cb9
+#define ENCL_VIDEO_VSO_ELINE 0x1cba
+#define ENCL_VIDEO_MAX_LNCNT 0x1cbb
+#define ENCL_VIDEO_BLANKY_VAL 0x1cbc
+#define ENCL_VIDEO_BLANKPB_VAL 0x1cbd
+#define ENCL_VIDEO_BLANKPR_VAL 0x1cbe
+#define ENCL_VIDEO_HOFFST 0x1cbf
+#define ENCL_VIDEO_VOFFST 0x1cc0
+#define ENCL_VIDEO_RGB_CTRL 0x1cc1
+#define ENCL_VIDEO_FILT_CTRL 0x1cc2
+#define ENCL_VIDEO_OFLD_VPEQ_OFST 0x1cc3
+#define ENCL_VIDEO_OFLD_VOAV_OFST 0x1cc4
+#define ENCL_VIDEO_MATRIX_CB 0x1cc5
+#define ENCL_VIDEO_MATRIX_CR 0x1cc6
+#define ENCL_VIDEO_RGBIN_CTRL 0x1cc7
+#define ENCL_MAX_LINE_SWITCH_POINT 0x1cc8
+#define ENCL_DACSEL_0 0x1cc9
+#define ENCL_DACSEL_1 0x1cca
+#define RDMA_AHB_START_ADDR_MAN 0x1100
+#define RDMA_AHB_END_ADDR_MAN 0x1101
+#define RDMA_AHB_START_ADDR_1 0x1102
+#define RDMA_AHB_END_ADDR_1 0x1103
+#define RDMA_AHB_START_ADDR_2 0x1104
+#define RDMA_AHB_END_ADDR_2 0x1105
+#define RDMA_AHB_START_ADDR_3 0x1106
+#define RDMA_AHB_END_ADDR_3 0x1107
+#define RDMA_AHB_START_ADDR_4 0x1108
+#define RDMA_AHB_END_ADDR_4 0x1109
+#define RDMA_AHB_START_ADDR_5 0x110a
+#define RDMA_AHB_END_ADDR_5 0x110b
+#define RDMA_AHB_START_ADDR_6 0x110c
+#define RDMA_AHB_END_ADDR_6 0x110d
+#define RDMA_AHB_START_ADDR_7 0x110e
+#define RDMA_AHB_END_ADDR_7 0x110f
+#define RDMA_ACCESS_AUTO 0x1110
+#define RDMA_ACCESS_AUTO2 0x1111
+#define RDMA_ACCESS_AUTO3 0x1112
+#define RDMA_ACCESS_MAN 0x1113
+#define RDMA_CTRL 0x1114
+#define RDMA_STATUS 0x1115
+#define RDMA_STATUS2 0x1116
+#define RDMA_STATUS3 0x1117
+#define L_GAMMA_CNTL_PORT 0x1400
+#define L_GAMMA_DATA_PORT 0x1401
+#define L_GAMMA_ADDR_PORT 0x1402
+#define L_GAMMA_VCOM_HSWITCH_ADDR 0x1403
+#define L_RGB_BASE_ADDR 0x1405
+#define L_RGB_COEFF_ADDR 0x1406
+#define L_POL_CNTL_ADDR 0x1407
+#define L_DITH_CNTL_ADDR 0x1408
+#define L_GAMMA_PROBE_CTRL 0x1409
+#define L_GAMMA_PROBE_COLOR_L 0x140a
+#define L_GAMMA_PROBE_COLOR_H 0x140b
+#define L_GAMMA_PROBE_HL_COLOR 0x140c
+#define L_GAMMA_PROBE_POS_X 0x140d
+#define L_GAMMA_PROBE_POS_Y 0x140e
+#define L_STH1_HS_ADDR 0x1410
+#define L_STH1_HE_ADDR 0x1411
+#define L_STH1_VS_ADDR 0x1412
+#define L_STH1_VE_ADDR 0x1413
+#define L_STH2_HS_ADDR 0x1414
+#define L_STH2_HE_ADDR 0x1415
+#define L_STH2_VS_ADDR 0x1416
+#define L_STH2_VE_ADDR 0x1417
+#define L_OEH_HS_ADDR 0x1418
+#define L_OEH_HE_ADDR 0x1419
+#define L_OEH_VS_ADDR 0x141a
+#define L_OEH_VE_ADDR 0x141b
+#define L_VCOM_HSWITCH_ADDR 0x141c
+#define L_VCOM_VS_ADDR 0x141d
+#define L_VCOM_VE_ADDR 0x141e
+#define L_CPV1_HS_ADDR 0x141f
+#define L_CPV1_HE_ADDR 0x1420
+#define L_CPV1_VS_ADDR 0x1421
+#define L_CPV1_VE_ADDR 0x1422
+#define L_CPV2_HS_ADDR 0x1423
+#define L_CPV2_HE_ADDR 0x1424
+#define L_CPV2_VS_ADDR 0x1425
+#define L_CPV2_VE_ADDR 0x1426
+#define L_STV1_HS_ADDR 0x1427
+#define L_STV1_HE_ADDR 0x1428
+#define L_STV1_VS_ADDR 0x1429
+#define L_STV1_VE_ADDR 0x142a
+#define L_STV2_HS_ADDR 0x142b
+#define L_STV2_HE_ADDR 0x142c
+#define L_STV2_VS_ADDR 0x142d
+#define L_STV2_VE_ADDR 0x142e
+#define L_OEV1_HS_ADDR 0x142f
+#define L_OEV1_HE_ADDR 0x1430
+#define L_OEV1_VS_ADDR 0x1431
+#define L_OEV1_VE_ADDR 0x1432
+#define L_OEV2_HS_ADDR 0x1433
+#define L_OEV2_HE_ADDR 0x1434
+#define L_OEV2_VS_ADDR 0x1435
+#define L_OEV2_VE_ADDR 0x1436
+#define L_OEV3_HS_ADDR 0x1437
+#define L_OEV3_HE_ADDR 0x1438
+#define L_OEV3_VS_ADDR 0x1439
+#define L_OEV3_VE_ADDR 0x143a
+#define L_LCD_PWR_ADDR 0x143b
+#define L_LCD_PWM0_LO_ADDR 0x143c
+#define L_LCD_PWM0_HI_ADDR 0x143d
+#define L_LCD_PWM1_LO_ADDR 0x143e
+#define L_LCD_PWM1_HI_ADDR 0x143f
+#define L_INV_CNT_ADDR 0x1440
+#define L_TCON_MISC_SEL_ADDR 0x1441
+#define L_DUAL_PORT_CNTL_ADDR 0x1442
+#define MLVDS_CLK_CTL1_HI 0x1443
+#define MLVDS_CLK_CTL1_LO 0x1444
+#define L_TCON_DOUBLE_CTL 0x1449
+#define L_TCON_PATTERN_HI 0x144a
+#define L_TCON_PATTERN_LO 0x144b
+#define LDIM_BL_ADDR_PORT 0x144e
+#define LDIM_BL_DATA_PORT 0x144f
+#define L_DE_HS_ADDR 0x1451
+#define L_DE_HE_ADDR 0x1452
+#define L_DE_VS_ADDR 0x1453
+#define L_DE_VE_ADDR 0x1454
+#define L_HSYNC_HS_ADDR 0x1455
+#define L_HSYNC_HE_ADDR 0x1456
+#define L_HSYNC_VS_ADDR 0x1457
+#define L_HSYNC_VE_ADDR 0x1458
+#define L_VSYNC_HS_ADDR 0x1459
+#define L_VSYNC_HE_ADDR 0x145a
+#define L_VSYNC_VS_ADDR 0x145b
+#define L_VSYNC_VE_ADDR 0x145c
+#define L_LCD_MCU_CTL 0x145d
+#define DUAL_MLVDS_CTL 0x1460
+#define DUAL_MLVDS_LINE_START 0x1461
+#define DUAL_MLVDS_LINE_END 0x1462
+#define DUAL_MLVDS_PIXEL_W_START_L 0x1463
+#define DUAL_MLVDS_PIXEL_W_END_L 0x1464
+#define DUAL_MLVDS_PIXEL_W_START_R 0x1465
+#define DUAL_MLVDS_PIXEL_W_END_R 0x1466
+#define DUAL_MLVDS_PIXEL_R_START_L 0x1467
+#define DUAL_MLVDS_PIXEL_R_CNT_L 0x1468
+#define DUAL_MLVDS_PIXEL_R_START_R 0x1469
+#define DUAL_MLVDS_PIXEL_R_CNT_R 0x146a
+#define V_INVERSION_PIXEL 0x1470
+#define V_INVERSION_LINE 0x1471
+#define V_INVERSION_CONTROL 0x1472
+#define MLVDS2_CONTROL 0x1474
+#define MLVDS2_CONFIG_HI 0x1475
+#define MLVDS2_CONFIG_LO 0x1476
+#define MLVDS2_DUAL_GATE_WR_START 0x1477
+#define MLVDS2_DUAL_GATE_WR_END 0x1478
+#define MLVDS2_DUAL_GATE_RD_START 0x1479
+#define MLVDS2_DUAL_GATE_RD_END 0x147a
+#define MLVDS2_SECOND_RESET_CTL 0x147b
+#define MLVDS2_DUAL_GATE_CTL_HI 0x147c
+#define MLVDS2_DUAL_GATE_CTL_LO 0x147d
+#define MLVDS2_RESET_CONFIG_HI 0x147e
+#define MLVDS2_RESET_CONFIG_LO 0x147f
+#define GAMMA_CNTL_PORT 0x1480
+#define GAMMA_DATA_PORT 0x1481
+#define GAMMA_ADDR_PORT 0x1482
+#define GAMMA_VCOM_HSWITCH_ADDR 0x1483
+#define RGB_BASE_ADDR 0x1485
+#define RGB_COEFF_ADDR 0x1486
+#define POL_CNTL_ADDR 0x1487
+#define DITH_CNTL_ADDR 0x1488
+#define GAMMA_PROBE_CTRL 0x1489
+#define GAMMA_PROBE_COLOR_L 0x148a
+#define GAMMA_PROBE_COLOR_H 0x148b
+#define GAMMA_PROBE_HL_COLOR 0x148c
+#define GAMMA_PROBE_POS_X 0x148d
+#define GAMMA_PROBE_POS_Y 0x148e
+#define STH1_HS_ADDR 0x1490
+#define STH1_HE_ADDR 0x1491
+#define STH1_VS_ADDR 0x1492
+#define STH1_VE_ADDR 0x1493
+#define STH2_HS_ADDR 0x1494
+#define STH2_HE_ADDR 0x1495
+#define STH2_VS_ADDR 0x1496
+#define STH2_VE_ADDR 0x1497
+#define OEH_HS_ADDR 0x1498
+#define OEH_HE_ADDR 0x1499
+#define OEH_VS_ADDR 0x149a
+#define OEH_VE_ADDR 0x149b
+#define VCOM_HSWITCH_ADDR 0x149c
+#define VCOM_VS_ADDR 0x149d
+#define VCOM_VE_ADDR 0x149e
+#define CPV1_HS_ADDR 0x149f
+#define CPV1_HE_ADDR 0x14a0
+#define CPV1_VS_ADDR 0x14a1
+#define CPV1_VE_ADDR 0x14a2
+#define CPV2_HS_ADDR 0x14a3
+#define CPV2_HE_ADDR 0x14a4
+#define CPV2_VS_ADDR 0x14a5
+#define CPV2_VE_ADDR 0x14a6
+#define STV1_HS_ADDR 0x14a7
+#define STV1_HE_ADDR 0x14a8
+#define STV1_VS_ADDR 0x14a9
+#define STV1_VE_ADDR 0x14aa
+#define STV2_HS_ADDR 0x14ab
+#define STV2_HE_ADDR 0x14ac
+#define STV2_VS_ADDR 0x14ad
+#define STV2_VE_ADDR 0x14ae
+#define OEV1_HS_ADDR 0x14af
+#define OEV1_HE_ADDR 0x14b0
+#define OEV1_VS_ADDR 0x14b1
+#define OEV1_VE_ADDR 0x14b2
+#define OEV2_HS_ADDR 0x14b3
+#define OEV2_HE_ADDR 0x14b4
+#define OEV2_VS_ADDR 0x14b5
+#define OEV2_VE_ADDR 0x14b6
+#define OEV3_HS_ADDR 0x14b7
+#define OEV3_HE_ADDR 0x14b8
+#define OEV3_VS_ADDR 0x14b9
+#define OEV3_VE_ADDR 0x14ba
+#define LCD_PWR_ADDR 0x14bb
+#define LCD_PWM0_LO_ADDR 0x14bc
+#define LCD_PWM0_HI_ADDR 0x14bd
+#define LCD_PWM1_LO_ADDR 0x14be
+#define LCD_PWM1_HI_ADDR 0x14bf
+#define INV_CNT_ADDR 0x14c0
+#define TCON_MISC_SEL_ADDR 0x14c1
+#define DUAL_PORT_CNTL_ADDR 0x14c2
+#define MLVDS_CONTROL 0x14c3
+#define MLVDS_RESET_PATTERN_HI 0x14c4
+#define MLVDS_RESET_PATTERN_LO 0x14c5
+#define MLVDS_RESET_PATTERN_EXT 0x14c6
+#define MLVDS_CONFIG_HI 0x14c7
+#define MLVDS_CONFIG_LO 0x14c8
+#define TCON_DOUBLE_CTL 0x14c9
+#define TCON_PATTERN_HI 0x14ca
+#define TCON_PATTERN_LO 0x14cb
+#define TCON_CONTROL_HI 0x14cc
+#define TCON_CONTROL_LO 0x14cd
+#define LVDS_BLANK_DATA_HI 0x14ce
+#define LVDS_BLANK_DATA_LO 0x14cf
+#define LVDS_PACK_CNTL_ADDR 0x14d0
+#define DE_HS_ADDR 0x14d1
+#define DE_HE_ADDR 0x14d2
+#define DE_VS_ADDR 0x14d3
+#define DE_VE_ADDR 0x14d4
+#define HSYNC_HS_ADDR 0x14d5
+#define HSYNC_HE_ADDR 0x14d6
+#define HSYNC_VS_ADDR 0x14d7
+#define HSYNC_VE_ADDR 0x14d8
+#define VSYNC_HS_ADDR 0x14d9
+#define VSYNC_HE_ADDR 0x14da
+#define VSYNC_VS_ADDR 0x14db
+#define VSYNC_VE_ADDR 0x14dc
+#define LCD_MCU_CTL 0x14dd
+#define LCD_MCU_DATA_0 0x14de
+#define LCD_MCU_DATA_1 0x14df
+#define LVDS_GEN_CNTL 0x14e0
+#define LVDS_PHY_CNTL0 0x14e1
+#define LVDS_PHY_CNTL1 0x14e2
+#define LVDS_PHY_CNTL2 0x14e3
+#define LVDS_PHY_CNTL3 0x14e4
+#define LVDS_PHY_CNTL4 0x14e5
+#define LVDS_PHY_CNTL5 0x14e6
+#define LVDS_SRG_TEST 0x14e8
+#define LVDS_BIST_MUX0 0x14e9
+#define LVDS_BIST_MUX1 0x14ea
+#define LVDS_BIST_FIXED0 0x14eb
+#define LVDS_BIST_FIXED1 0x14ec
+#define LVDS_BIST_CNTL0 0x14ed
+#define LVDS_CLKB_CLKA 0x14ee
+#define LVDS_PHY_CLK_CNTL 0x14ef
+#define LVDS_SER_EN 0x14f0
+#define LVDS_PHY_CNTL6 0x14f1
+#define LVDS_PHY_CNTL7 0x14f2
+#define LVDS_PHY_CNTL8 0x14f3
+#define MLVDS_CLK_CTL0_HI 0x14f4
+#define MLVDS_CLK_CTL0_LO 0x14f5
+#define MLVDS_DUAL_GATE_WR_START 0x14f6
+#define MLVDS_DUAL_GATE_WR_END 0x14f7
+#define MLVDS_DUAL_GATE_RD_START 0x14f8
+#define MLVDS_DUAL_GATE_RD_END 0x14f9
+#define MLVDS_SECOND_RESET_CTL 0x14fa
+#define MLVDS_DUAL_GATE_CTL_HI 0x14fb
+#define MLVDS_DUAL_GATE_CTL_LO 0x14fc
+#define MLVDS_RESET_CONFIG_HI 0x14fd
+#define MLVDS_RESET_CONFIG_LO 0x14fe
+#define VPU_OSD1_MMC_CTRL 0x2701
+#define VPU_OSD2_MMC_CTRL 0x2702
+#define VPU_VD1_MMC_CTRL 0x2703
+#define VPU_VD2_MMC_CTRL 0x2704
+#define VPU_DI_IF1_MMC_CTRL 0x2705
+#define VPU_DI_MEM_MMC_CTRL 0x2706
+#define VPU_DI_INP_MMC_CTRL 0x2707
+#define VPU_DI_MTNRD_MMC_CTRL 0x2708
+#define VPU_DI_CHAN2_MMC_CTRL 0x2709
+#define VPU_DI_MTNWR_MMC_CTRL 0x270a
+#define VPU_DI_NRWR_MMC_CTRL 0x270b
+#define VPU_DI_DIWR_MMC_CTRL 0x270c
+#define VPU_VDIN0_MMC_CTRL 0x270d
+#define VPU_VDIN1_MMC_CTRL 0x270e
+#define VPU_BT656_MMC_CTRL 0x270f
+#define VPU_TVD3D_MMC_CTRL 0x2710
+#define VPU_TVDVBI_MMC_CTRL 0x2711
+#define VPU_TVDVBI_VSLATCH_ADDR 0x2712
+#define VPU_TVDVBI_WRRSP_ADDR 0x2713
+#define VPU_VDIN_PRE_ARB_CTRL 0x2714
+#define VPU_VDISP_PRE_ARB_CTRL 0x2715
+#define VPU_VPUARB2_PRE_ARB_CTRL 0x2716
+#define VPU_OSD3_MMC_CTRL 0x2717
+#define VPU_OSD4_MMC_CTRL 0x2718
+#define VPU_VD3_MMC_CTRL 0x2719
+#define VPU_VIU_VENC_MUX_CTRL 0x271a
+#define VIU1_SEL_VENC_MASK 0x3
+#define VIU1_SEL_VENC_ENCL 0
+#define VIU1_SEL_VENC_ENCI 1
+#define VIU1_SEL_VENC_ENCP 2
+#define VIU1_SEL_VENC_ENCT 3
+#define VIU2_SEL_VENC_MASK 0xc
+#define VIU2_SEL_VENC_ENCL 0
+#define VIU2_SEL_VENC_ENCI (1 << 2)
+#define VIU2_SEL_VENC_ENCP (2 << 2)
+#define VIU2_SEL_VENC_ENCT (3 << 2)
+#define VPU_HDMI_SETTING 0x271b
+#define ENCI_INFO_READ 0x271c
+#define ENCP_INFO_READ 0x271d
+#define ENCT_INFO_READ 0x271e
+#define ENCL_INFO_READ 0x271f
+#define VPU_SW_RESET 0x2720
+#define VPU_D2D3_MMC_CTRL 0x2721
+#define VPU_CONT_MMC_CTRL 0x2722
+#define VPU_CLK_GATE 0x2723
+#define VPU_RDMA_MMC_CTRL 0x2724
+#define VPU_MEM_PD_REG0 0x2725
+#define VPU_MEM_PD_REG1 0x2726
+#define VPU_HDMI_DATA_OVR 0x2727
+#define VPU_PROT1_MMC_CTRL 0x2728
+#define VPU_PROT2_MMC_CTRL 0x2729
+#define VPU_PROT3_MMC_CTRL 0x272a
+#define VPU_ARB4_V1_MMC_CTRL 0x272b
+#define VPU_ARB4_V2_MMC_CTRL 0x272c
+#define VPU_VPU_PWM_V0 0x2730
+#define VPU_VPU_PWM_V1 0x2731
+#define VPU_VPU_PWM_V2 0x2732
+#define VPU_VPU_PWM_V3 0x2733
+#define VPU_VPU_PWM_H0 0x2734
+#define VPU_VPU_PWM_H1 0x2735
+#define VPU_VPU_PWM_H2 0x2736
+#define VPU_VPU_PWM_H3 0x2737
+#define VPU_MISC_CTRL 0x2740
+#define VPU_ISP_GCLK_CTRL0 0x2741
+#define VPU_ISP_GCLK_CTRL1 0x2742
+#define VPU_VDIN_ASYNC_HOLD_CTRL 0x2743
+#define VPU_VDISP_ASYNC_HOLD_CTRL 0x2744
+#define VPU_VPUARB2_ASYNC_HOLD_CTRL 0x2745
+
+#define VPU_PROT1_CLK_GATE 0x2750
+#define VPU_PROT1_GEN_CNTL 0x2751
+#define VPU_PROT1_X_START_END 0x2752
+#define VPU_PROT1_Y_START_END 0x2753
+#define VPU_PROT1_Y_LEN_STEP 0x2754
+#define VPU_PROT1_RPT_LOOP 0x2755
+#define VPU_PROT1_RPT_PAT 0x2756
+#define VPU_PROT1_DDR 0x2757
+#define VPU_PROT1_RBUF_ROOM 0x2758
+#define VPU_PROT1_STAT_0 0x2759
+#define VPU_PROT1_STAT_1 0x275a
+#define VPU_PROT1_STAT_2 0x275b
+#define VPU_PROT1_REQ_ONOFF 0x275c
+#define VPU_PROT2_CLK_GATE 0x2760
+#define VPU_PROT2_GEN_CNTL 0x2761
+#define VPU_PROT2_X_START_END 0x2762
+#define VPU_PROT2_Y_START_END 0x2763
+#define VPU_PROT2_Y_LEN_STEP 0x2764
+#define VPU_PROT2_RPT_LOOP 0x2765
+#define VPU_PROT2_RPT_PAT 0x2766
+#define VPU_PROT2_DDR 0x2767
+#define VPU_PROT2_RBUF_ROOM 0x2768
+#define VPU_PROT2_STAT_0 0x2769
+#define VPU_PROT2_STAT_1 0x276a
+#define VPU_PROT2_STAT_2 0x276b
+#define VPU_PROT2_REQ_ONOFF 0x276c
+#define VPU_PROT3_CLK_GATE 0x2770
+#define VPU_PROT3_GEN_CNTL 0x2771
+#define VPU_PROT3_X_START_END 0x2772
+#define VPU_PROT3_Y_START_END 0x2773
+#define VPU_PROT3_Y_LEN_STEP 0x2774
+#define VPU_PROT3_RPT_LOOP 0x2775
+#define VPU_PROT3_RPT_PAT 0x2776
+#define VPU_PROT3_DDR 0x2777
+#define VPU_PROT3_RBUF_ROOM 0x2778
+#define VPU_PROT3_STAT_0 0x2779
+#define VPU_PROT3_STAT_1 0x277a
+#define VPU_PROT3_STAT_2 0x277b
+#define VPU_PROT3_REQ_ONOFF 0x277c
+
+/* osd super scale */
+#define OSDSR_HV_SIZEIN 0x3130
+#define OSDSR_CTRL_MODE 0x3131
+#define OSDSR_ABIC_HCOEF 0x3132
+#define OSDSR_YBIC_HCOEF 0x3133
+#define OSDSR_CBIC_HCOEF 0x3134
+#define OSDSR_ABIC_VCOEF 0x3135
+#define OSDSR_YBIC_VCOEF 0x3136
+#define OSDSR_CBIC_VCOEF 0x3137
+#define OSDSR_VAR_PARA 0x3138
+#define OSDSR_CONST_PARA 0x3139
+#define OSDSR_RKE_EXTWIN 0x313a
+#define OSDSR_UK_GRAD2DDIAG_TH_RATE 0x313b
+#define OSDSR_UK_GRAD2DDIAG_LIMIT 0x313c
+#define OSDSR_UK_GRAD2DADJA_TH_RATE 0x313d
+#define OSDSR_UK_GRAD2DADJA_LIMIT 0x313e
+#define OSDSR_UK_BST_GAIN 0x313f
+#define OSDSR_HVBLEND_TH 0x3140
+#define OSDSR_DEMO_WIND_TB 0x3141
+#define OSDSR_DEMO_WIND_LR 0x3142
+#define OSDSR_INT_BLANK_NUM 0x3143
+#define OSDSR_FRM_END_STAT 0x3144
+#define OSDSR_ABIC_HCOEF0 0x3145
+#define OSDSR_YBIC_HCOEF0 0x3146
+#define OSDSR_CBIC_HCOEF0 0x3147
+#define OSDSR_ABIC_VCOEF0 0x3148
+#define OSDSR_YBIC_VCOEF0 0x3149
+#define OSDSR_CBIC_VCOEF0 0x314a
+
+#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
new file mode 100644
index 000000000000..252cfd4b19b1
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_vclk.h"
+
+/*
+ * VCLK is the "Pixel Clock" frequency generator from a dedicated PLL.
+ * We handle the following encodings :
+ * - CVBS 27MHz generator via the VCLK2 to the VENCI and VDAC blocks
+ *
+ * What is missing :
+ * - HDMI Pixel Clocks generation
+ */
+
+/* HHI Registers */
+#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */
+#define VID_PLL_EN BIT(19)
+#define VID_PLL_BYPASS BIT(18)
+#define VID_PLL_PRESET BIT(15)
+#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
+#define VCLK2_DIV_MASK 0xff
+#define VCLK2_DIV_EN BIT(16)
+#define VCLK2_DIV_RESET BIT(17)
+#define CTS_VDAC_SEL_MASK (0xf << 28)
+#define CTS_VDAC_SEL_SHIFT 28
+#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
+#define VCLK2_EN BIT(19)
+#define VCLK2_SEL_MASK (0x7 << 16)
+#define VCLK2_SEL_SHIFT 16
+#define VCLK2_SOFT_RESET BIT(15)
+#define VCLK2_DIV1_EN BIT(0)
+#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
+#define CTS_ENCI_SEL_MASK (0xf << 28)
+#define CTS_ENCI_SEL_SHIFT 28
+#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
+#define CTS_ENCI_EN BIT(0)
+#define CTS_VDAC_EN BIT(4)
+
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+
+#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
+#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
+#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
+#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+
+#define HDMI_PLL_RESET BIT(28)
+#define HDMI_PLL_LOCK BIT(31)
+
+/*
+ * Setup VCLK2 for 27MHz, and enable clocks for ENCI and VDAC
+ *
+ * TOFIX: Refactor into table to also handle HDMI frequency and paths
+ */
+static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
+{
+ unsigned int val;
+
+ /* Setup PLL to output 1.485GHz */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00404e00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4800023d);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0xa6212844);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c4d000c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+
+ /* Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET, HDMI_PLL_RESET);
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET, 0);
+ }
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* Disable VCLK2 */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
+
+ /* Disable vid_pll output clock */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_EN, 0);
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_PRESET, 0);
+ /* Enable vid_pll bypass to HDMI pll */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_BYPASS, VID_PLL_BYPASS);
+ /* Enable the vid_pll output clock */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_EN, VID_PLL_EN);
+
+ /* Setup the VCLK2 divider value to achieve 27MHz */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
+ VCLK2_DIV_MASK, (55 - 1));
+
+ /* select vid_pll for vclk2 */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+ /* enable vclk2 gate */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, VCLK2_EN);
+
+ /* select vclk_div1 for enci */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCI_SEL_MASK, (8 << CTS_ENCI_SEL_SHIFT));
+ /* select vclk_div1 for vdac */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
+ CTS_VDAC_SEL_MASK, (8 << CTS_VDAC_SEL_SHIFT));
+
+ /* release vclk2_div_reset and enable vclk2_div */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
+ VCLK2_DIV_EN | VCLK2_DIV_RESET, VCLK2_DIV_EN);
+
+ /* enable vclk2_div1 gate */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_DIV1_EN, VCLK2_DIV1_EN);
+
+ /* reset vclk2 */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SOFT_RESET, VCLK2_SOFT_RESET);
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SOFT_RESET, 0);
+
+ /* enable enci_clk */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
+ CTS_ENCI_EN, CTS_ENCI_EN);
+ /* enable vdac_clk */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
+ CTS_VDAC_EN, CTS_VDAC_EN);
+}
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ unsigned int freq)
+{
+ if (target == MESON_VCLK_TARGET_CVBS && freq == MESON_VCLK_CVBS)
+ meson_venci_cvbs_clock_config(priv);
+}
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
new file mode 100644
index 000000000000..ec62735996de
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Video Clock */
+
+#ifndef __MESON_VCLK_H
+#define __MESON_VCLK_H
+
+enum {
+ MESON_VCLK_TARGET_CVBS = 0,
+};
+
+/* 27MHz is the CVBS Pixel Clock */
+#define MESON_VCLK_CVBS 27000
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ unsigned int freq);
+
+#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
new file mode 100644
index 000000000000..d836b2274531
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_venc.h"
+#include "meson_vpp.h"
+#include "meson_vclk.h"
+#include "meson_registers.h"
+
+/*
+ * VENC Handle the pixels encoding to the output formats.
+ * We handle the following encodings :
+ * - CVBS Encoding via the ENCI encoder and VDAC digital to analog converter
+ *
+ * What is missing :
+ * - TMDS/HDMI Encoding via ENCI_DIV and ENCP
+ * - Setup of more clock rates for HDMI modes
+ * - LCD Panel encoding via ENCL
+ * - TV Panel encoding via ENCT
+ */
+
+struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
+ .mode_tag = MESON_VENC_MODE_CVBS_PAL,
+ .hso_begin = 3,
+ .hso_end = 129,
+ .vso_even = 3,
+ .vso_odd = 260,
+ .macv_max_amp = 7,
+ .video_prog_mode = 0xff,
+ .video_mode = 0x13,
+ .sch_adjust = 0x28,
+ .yc_delay = 0x343,
+ .pixel_start = 251,
+ .pixel_end = 1691,
+ .top_field_line_start = 22,
+ .top_field_line_end = 310,
+ .bottom_field_line_start = 23,
+ .bottom_field_line_end = 311,
+ .video_saturation = 9,
+ .video_contrast = 0,
+ .video_brightness = 0,
+ .video_hue = 0,
+ .analog_sync_adj = 0x8080,
+};
+
+struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc = {
+ .mode_tag = MESON_VENC_MODE_CVBS_NTSC,
+ .hso_begin = 5,
+ .hso_end = 129,
+ .vso_even = 3,
+ .vso_odd = 260,
+ .macv_max_amp = 0xb,
+ .video_prog_mode = 0xf0,
+ .video_mode = 0x8,
+ .sch_adjust = 0x20,
+ .yc_delay = 0x333,
+ .pixel_start = 227,
+ .pixel_end = 1667,
+ .top_field_line_start = 18,
+ .top_field_line_end = 258,
+ .bottom_field_line_start = 19,
+ .bottom_field_line_end = 259,
+ .video_saturation = 18,
+ .video_contrast = 3,
+ .video_brightness = 0,
+ .video_hue = 0,
+ .analog_sync_adj = 0x9c00,
+};
+
+void meson_venci_cvbs_mode_set(struct meson_drm *priv,
+ struct meson_cvbs_enci_mode *mode)
+{
+ if (mode->mode_tag == priv->venc.current_mode)
+ return;
+
+ /* CVBS Filter settings */
+ writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
+ writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
+
+ /* Digital Video Select : Interlace, clk27 clk, external */
+ writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
+
+ /* Reset Video Mode */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE));
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+
+ /* Horizontal sync signal output */
+ writel_relaxed(mode->hso_begin,
+ priv->io_base + _REG(ENCI_SYNC_HSO_BEGIN));
+ writel_relaxed(mode->hso_end,
+ priv->io_base + _REG(ENCI_SYNC_HSO_END));
+
+ /* Vertical Sync lines */
+ writel_relaxed(mode->vso_even,
+ priv->io_base + _REG(ENCI_SYNC_VSO_EVNLN));
+ writel_relaxed(mode->vso_odd,
+ priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
+
+ /* Macrovision max amplitude change */
+ writel_relaxed(0x8100 + mode->macv_max_amp,
+ priv->io_base + _REG(ENCI_MACV_MAX_AMP));
+
+ /* Video mode */
+ writel_relaxed(mode->video_prog_mode,
+ priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
+ writel_relaxed(mode->video_mode,
+ priv->io_base + _REG(ENCI_VIDEO_MODE));
+
+ /* Advanced Video Mode :
+ * Demux shifting 0x2
+ * Blank line end at line17/22
+ * High bandwidth Luma Filter
+ * Low bandwidth Chroma Filter
+ * Bypass luma low pass filter
+ * No macrovision on CSYNC
+ */
+ writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+
+ writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH));
+
+ /* Sync mode : MASTER Master mode, free run, send HSO/VSO out */
+ writel_relaxed(0x07, priv->io_base + _REG(ENCI_SYNC_MODE));
+
+ /* 0x3 Y, C, and Component Y delay */
+ writel_relaxed(mode->yc_delay, priv->io_base + _REG(ENCI_YC_DELAY));
+
+ /* Timings */
+ writel_relaxed(mode->pixel_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_START));
+ writel_relaxed(mode->pixel_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_END));
+
+ writel_relaxed(mode->top_field_line_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_START));
+ writel_relaxed(mode->top_field_line_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_END));
+
+ writel_relaxed(mode->bottom_field_line_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_START));
+ writel_relaxed(mode->bottom_field_line_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_END));
+
+ /* Internal Venc, Internal VIU Sync, Internal Vencoder */
+ writel_relaxed(0, priv->io_base + _REG(VENC_SYNC_ROUTE));
+
+ /* UNreset Interlaced TV Encoder */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
+
+ /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
+ writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
+
+ /* Power UP Dacs */
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING));
+
+ /* Video Upsampling */
+ writel_relaxed(0x0061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
+ writel_relaxed(0x4061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
+ writel_relaxed(0x5061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
+
+ /* Select Interlace Y DACs */
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL1));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL2));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL3));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL4));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL5));
+
+ /* Select ENCI for VIU */
+ meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
+
+ /* Enable ENCI FIFO */
+ writel_relaxed(0x2000, priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
+
+ /* Select ENCI DACs 0, 1, 4, and 5 */
+ writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0));
+ writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1));
+
+ /* Interlace video enable */
+ writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+
+ /* Configure Video Saturation / Contrast / Brightness / Hue */
+ writel_relaxed(mode->video_saturation,
+ priv->io_base + _REG(ENCI_VIDEO_SAT));
+ writel_relaxed(mode->video_contrast,
+ priv->io_base + _REG(ENCI_VIDEO_CONT));
+ writel_relaxed(mode->video_brightness,
+ priv->io_base + _REG(ENCI_VIDEO_BRIGHT));
+ writel_relaxed(mode->video_hue,
+ priv->io_base + _REG(ENCI_VIDEO_HUE));
+
+ /* Enable DAC0 Filter */
+ writel_relaxed(0x1, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
+ writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1));
+
+ /* 0 in Macrovision register 0 */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_MACV_N0));
+
+ /* Analog Synchronization and color burst value adjust */
+ writel_relaxed(mode->analog_sync_adj,
+ priv->io_base + _REG(ENCI_SYNC_ADJ));
+
+ /* Setup 27MHz vclk2 for ENCI and VDAC */
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS);
+
+ priv->venc.current_mode = mode->mode_tag;
+}
+
+/* Returns the current ENCI field polarity */
+unsigned int meson_venci_get_field(struct meson_drm *priv)
+{
+ return readl_relaxed(priv->io_base + _REG(ENCI_INFO_READ)) & BIT(29);
+}
+
+void meson_venc_enable_vsync(struct meson_drm *priv)
+{
+ writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+}
+
+void meson_venc_disable_vsync(struct meson_drm *priv)
+{
+ writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
+}
+
+void meson_venc_init(struct meson_drm *priv)
+{
+ /* Disable all encoders */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
+ writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
+
+ /* Disable VSync IRQ */
+ meson_venc_disable_vsync(priv);
+
+ priv->venc.current_mode = MESON_VENC_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
new file mode 100644
index 000000000000..77d4a7d82c44
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Video Encoders
+ * - ENCI : Interlace Video Encoder
+ * - ENCI_DVI : Interlace Video Encoder for DVI/HDMI
+ * - ENCP : Progressive Video Encoder
+ */
+
+#ifndef __MESON_VENC_H
+#define __MESON_VENC_H
+
+enum {
+ MESON_VENC_MODE_NONE = 0,
+ MESON_VENC_MODE_CVBS_PAL,
+ MESON_VENC_MODE_CVBS_NTSC,
+};
+
+struct meson_cvbs_enci_mode {
+ unsigned int mode_tag;
+ unsigned int hso_begin; /* HSO begin position */
+ unsigned int hso_end; /* HSO end position */
+ unsigned int vso_even; /* VSO even line */
+ unsigned int vso_odd; /* VSO odd line */
+ unsigned int macv_max_amp; /* Macrovision max amplitude */
+ unsigned int video_prog_mode;
+ unsigned int video_mode;
+ unsigned int sch_adjust;
+ unsigned int yc_delay;
+ unsigned int pixel_start;
+ unsigned int pixel_end;
+ unsigned int top_field_line_start;
+ unsigned int top_field_line_end;
+ unsigned int bottom_field_line_start;
+ unsigned int bottom_field_line_end;
+ unsigned int video_saturation;
+ unsigned int video_contrast;
+ unsigned int video_brightness;
+ unsigned int video_hue;
+ unsigned int analog_sync_adj;
+};
+
+/* CVBS Timings and Parameters */
+extern struct meson_cvbs_enci_mode meson_cvbs_enci_pal;
+extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
+
+void meson_venci_cvbs_mode_set(struct meson_drm *priv,
+ struct meson_cvbs_enci_mode *mode);
+unsigned int meson_venci_get_field(struct meson_drm *priv);
+
+void meson_venc_enable_vsync(struct meson_drm *priv);
+void meson_venc_disable_vsync(struct meson_drm *priv);
+
+void meson_venc_init(struct meson_drm *priv);
+
+#endif /* __MESON_VENC_H */
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
new file mode 100644
index 000000000000..c809c085fd78
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "meson_venc_cvbs.h"
+#include "meson_venc.h"
+#include "meson_registers.h"
+
+/* HHI VDAC Registers */
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+
+struct meson_venc_cvbs {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct meson_drm *priv;
+};
+#define encoder_to_meson_venc_cvbs(x) \
+ container_of(x, struct meson_venc_cvbs, encoder)
+
+#define connector_to_meson_venc_cvbs(x) \
+ container_of(x, struct meson_venc_cvbs, connector)
+
+/* Supported Modes */
+
+struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
+ { /* PAL */
+ .enci = &meson_cvbs_enci_pal,
+ .mode = {
+ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50,
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
+ },
+ },
+ { /* NTSC */
+ .enci = &meson_cvbs_enci_ntsc,
+ .mode = {
+ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60,
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
+ },
+ },
+};
+
+/* Connector */
+
+static void meson_cvbs_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+meson_cvbs_connector_detect(struct drm_connector *connector, bool force)
+{
+ /* FIXME: Add load-detect or jack-detect if possible */
+ return connector_status_connected;
+}
+
+static int meson_cvbs_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ mode = drm_mode_duplicate(dev, &meson_mode->mode);
+ if (!mode) {
+ DRM_ERROR("Failed to create a new display mode\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static int meson_cvbs_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Validate the modes added in get_modes */
+ return MODE_OK;
+}
+
+static const struct drm_connector_funcs meson_cvbs_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = meson_cvbs_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = meson_cvbs_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const
+struct drm_connector_helper_funcs meson_cvbs_connector_helper_funcs = {
+ .get_modes = meson_cvbs_connector_get_modes,
+ .mode_valid = meson_cvbs_connector_mode_valid,
+};
+
+/* Encoder */
+
+static void meson_venc_cvbs_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs meson_venc_cvbs_encoder_funcs = {
+ .destroy = meson_venc_cvbs_encoder_destroy,
+};
+
+static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
+{
+ struct meson_venc_cvbs *meson_venc_cvbs =
+ encoder_to_meson_venc_cvbs(encoder);
+ struct meson_drm *priv = meson_venc_cvbs->priv;
+
+ /* Disable CVBS VDAC */
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+}
+
+static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
+{
+ struct meson_venc_cvbs *meson_venc_cvbs =
+ encoder_to_meson_venc_cvbs(encoder);
+ struct meson_drm *priv = meson_venc_cvbs->priv;
+
+ /* VDAC0 source is not from ATV */
+ writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
+
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
+
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+}
+
+static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct meson_venc_cvbs *meson_venc_cvbs =
+ encoder_to_meson_venc_cvbs(encoder);
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ if (drm_mode_equal(mode, &meson_mode->mode)) {
+ meson_venci_cvbs_mode_set(meson_venc_cvbs->priv,
+ meson_mode->enci);
+ break;
+ }
+ }
+}
+
+static const struct drm_encoder_helper_funcs
+ meson_venc_cvbs_encoder_helper_funcs = {
+ .atomic_check = meson_venc_cvbs_encoder_atomic_check,
+ .disable = meson_venc_cvbs_encoder_disable,
+ .enable = meson_venc_cvbs_encoder_enable,
+ .mode_set = meson_venc_cvbs_encoder_mode_set,
+};
+
+static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv)
+{
+ struct device_node *ep, *remote;
+
+ /* CVBS VDAC output is on the first port, first endpoint */
+ ep = of_graph_get_endpoint_by_regs(priv->dev->of_node, 0, 0);
+ if (!ep)
+ return false;
+
+
+ /* If the endpoint node exists, consider it enabled */
+ remote = of_graph_get_remote_port(ep);
+ if (remote) {
+ of_node_put(ep);
+ return true;
+ }
+
+ of_node_put(ep);
+ of_node_put(remote);
+
+ return false;
+}
+
+int meson_venc_cvbs_create(struct meson_drm *priv)
+{
+ struct drm_device *drm = priv->drm;
+ struct meson_venc_cvbs *meson_venc_cvbs;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int ret;
+
+ if (!meson_venc_cvbs_connector_is_available(priv)) {
+ dev_info(drm->dev, "CVBS Output connector not available\n");
+ return -ENODEV;
+ }
+
+ meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs),
+ GFP_KERNEL);
+ if (!meson_venc_cvbs)
+ return -ENOMEM;
+
+ meson_venc_cvbs->priv = priv;
+ encoder = &meson_venc_cvbs->encoder;
+ connector = &meson_venc_cvbs->connector;
+
+ /* Connector */
+
+ drm_connector_helper_add(connector,
+ &meson_cvbs_connector_helper_funcs);
+
+ ret = drm_connector_init(drm, connector, &meson_cvbs_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init CVBS connector\n");
+ return ret;
+ }
+
+ connector->interlace_allowed = 1;
+
+ /* Encoder */
+
+ drm_encoder_helper_add(encoder, &meson_venc_cvbs_encoder_helper_funcs);
+
+ ret = drm_encoder_init(drm, encoder, &meson_venc_cvbs_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC, "meson_venc_cvbs");
+ if (ret) {
+ dev_err(priv->dev, "Failed to init CVBS encoder\n");
+ return ret;
+ }
+
+ encoder->possible_crtcs = BIT(0);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.h b/drivers/gpu/drm/meson/meson_venc_cvbs.h
new file mode 100644
index 000000000000..9256ccf9d931
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#ifndef __MESON_VENC_CVBS_H
+#define __MESON_VENC_CVBS_H
+
+#include "meson_drv.h"
+#include "meson_venc.h"
+
+struct meson_cvbs_mode {
+ struct meson_cvbs_enci_mode *enci;
+ struct drm_display_mode mode;
+};
+
+#define MESON_CVBS_MODES_COUNT 2
+
+/* Modes supported by the CVBS output */
+extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
+
+int meson_venc_cvbs_create(struct meson_drm *priv);
+
+#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
new file mode 100644
index 000000000000..a6de8ba7af19
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_viu.h"
+#include "meson_vpp.h"
+#include "meson_venc.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/*
+ * VIU Handles the Pixel scanout and the basic Colorspace conversions
+ * We handle the following features :
+ * - OSD1 RGB565/RGB888/xRGB8888 scanout
+ * - RGB conversion to x/cb/cr
+ * - Progressive or Interlace buffer scanout
+ * - OSD1 Commit on Vsync
+ * - HDR OSD matrix for GXL/GXM
+ *
+ * What is missing :
+ * - BGR888/xBGR8888/BGRx8888/BGRx8888 modes
+ * - YUV4:2:2 Y0CbY1Cr scanout
+ * - Conversion to YUV 4:4:4 from 4:2:2 input
+ * - Colorkey Alpha matching
+ * - Big endian scanout
+ * - X/Y reverse scanout
+ * - Global alpha setup
+ * - OSD2 support, would need interlace switching on vsync
+ * - OSD1 full scaling to support TV overscan
+ */
+
+/* OSD csc defines */
+
+enum viu_matrix_sel_e {
+ VIU_MATRIX_OSD_EOTF = 0,
+ VIU_MATRIX_OSD,
+};
+
+enum viu_lut_sel_e {
+ VIU_LUT_OSD_EOTF = 0,
+ VIU_LUT_OSD_OETF,
+};
+
+#define COEFF_NORM(a) ((int)((((a) * 2048.0) + 1) / 2))
+#define MATRIX_5X3_COEF_SIZE 24
+
+#define EOTF_COEFF_NORM(a) ((int)((((a) * 4096.0) + 1) / 2))
+#define EOTF_COEFF_SIZE 10
+#define EOTF_COEFF_RIGHTSHIFT 1
+
+static int RGB709_to_YUV709l_coeff[MATRIX_5X3_COEF_SIZE] = {
+ 0, 0, 0, /* pre offset */
+ COEFF_NORM(0.181873), COEFF_NORM(0.611831), COEFF_NORM(0.061765),
+ COEFF_NORM(-0.100251), COEFF_NORM(-0.337249), COEFF_NORM(0.437500),
+ COEFF_NORM(0.437500), COEFF_NORM(-0.397384), COEFF_NORM(-0.040116),
+ 0, 0, 0, /* 10'/11'/12' */
+ 0, 0, 0, /* 20'/21'/22' */
+ 64, 512, 512, /* offset */
+ 0, 0, 0 /* mode, right_shift, clip_en */
+};
+
+/* eotf matrix: bypass */
+static int eotf_bypass_coeff[EOTF_COEFF_SIZE] = {
+ EOTF_COEFF_NORM(1.0), EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(0.0),
+ EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(1.0), EOTF_COEFF_NORM(0.0),
+ EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(1.0),
+ EOTF_COEFF_RIGHTSHIFT /* right shift */
+};
+
+void meson_viu_set_osd_matrix(struct meson_drm *priv,
+ enum viu_matrix_sel_e m_select,
+ int *m, bool csc_on)
+{
+ if (m_select == VIU_MATRIX_OSD) {
+ /* osd matrix, VIU_MATRIX_0 */
+ writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET0_1));
+ writel(m[2] & 0xfff,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET2));
+ writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF00_01));
+ writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF02_10));
+ writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF11_12));
+ writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF20_21));
+
+ if (m[21]) {
+ writel(((m[11] & 0x1fff) << 16) | (m[12] & 0x1fff),
+ priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF22_30));
+ writel(((m[13] & 0x1fff) << 16) | (m[14] & 0x1fff),
+ priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF31_32));
+ writel(((m[15] & 0x1fff) << 16) | (m[16] & 0x1fff),
+ priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF40_41));
+ writel(m[17] & 0x1fff, priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
+ } else
+ writel((m[11] & 0x1fff) << 16, priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF22_30));
+
+ writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET0_1));
+ writel(m[20] & 0xfff,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET2));
+
+ writel_bits_relaxed(3 << 30, m[21] << 30,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
+ writel_bits_relaxed(7 << 16, m[22] << 16,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
+
+ /* 23 reserved for clipping control */
+ writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
+ writel_bits_relaxed(BIT(1), 0,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
+ } else if (m_select == VIU_MATRIX_OSD_EOTF) {
+ int i;
+
+ /* osd eotf matrix, VIU_MATRIX_OSD_EOTF */
+ for (i = 0; i < 5; i++)
+ writel(((m[i * 2] & 0x1fff) << 16) |
+ (m[i * 2 + 1] & 0x1fff), priv->io_base +
+ _REG(VIU_OSD1_EOTF_CTL + i + 1));
+
+ writel_bits_relaxed(BIT(30), csc_on ? BIT(30) : 0,
+ priv->io_base + _REG(VIU_OSD1_EOTF_CTL));
+ writel_bits_relaxed(BIT(31), csc_on ? BIT(31) : 0,
+ priv->io_base + _REG(VIU_OSD1_EOTF_CTL));
+ }
+}
+
+#define OSD_EOTF_LUT_SIZE 33
+#define OSD_OETF_LUT_SIZE 41
+
+void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
+ unsigned int *r_map, unsigned int *g_map,
+ unsigned int *b_map,
+ bool csc_on)
+{
+ unsigned int addr_port;
+ unsigned int data_port;
+ unsigned int ctrl_port;
+ int i;
+
+ if (lut_sel == VIU_LUT_OSD_EOTF) {
+ addr_port = VIU_OSD1_EOTF_LUT_ADDR_PORT;
+ data_port = VIU_OSD1_EOTF_LUT_DATA_PORT;
+ ctrl_port = VIU_OSD1_EOTF_CTL;
+ } else if (lut_sel == VIU_LUT_OSD_OETF) {
+ addr_port = VIU_OSD1_OETF_LUT_ADDR_PORT;
+ data_port = VIU_OSD1_OETF_LUT_DATA_PORT;
+ ctrl_port = VIU_OSD1_OETF_CTL;
+ } else
+ return;
+
+ if (lut_sel == VIU_LUT_OSD_OETF) {
+ writel(0, priv->io_base + _REG(addr_port));
+
+ for (i = 0; i < 20; i++)
+ writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(b_map[OSD_OETF_LUT_SIZE - 1],
+ priv->io_base + _REG(data_port));
+
+ if (csc_on)
+ writel_bits_relaxed(0x7 << 29, 7 << 29,
+ priv->io_base + _REG(ctrl_port));
+ else
+ writel_bits_relaxed(0x7 << 29, 0,
+ priv->io_base + _REG(ctrl_port));
+ } else if (lut_sel == VIU_LUT_OSD_EOTF) {
+ writel(0, priv->io_base + _REG(addr_port));
+
+ for (i = 0; i < 20; i++)
+ writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(b_map[OSD_EOTF_LUT_SIZE - 1],
+ priv->io_base + _REG(data_port));
+
+ if (csc_on)
+ writel_bits_relaxed(7 << 27, 7 << 27,
+ priv->io_base + _REG(ctrl_port));
+ else
+ writel_bits_relaxed(7 << 27, 0,
+ priv->io_base + _REG(ctrl_port));
+
+ writel_bits_relaxed(BIT(31), BIT(31),
+ priv->io_base + _REG(ctrl_port));
+ }
+}
+
+/* eotf lut: linear */
+static unsigned int eotf_33_linear_mapping[OSD_EOTF_LUT_SIZE] = {
+ 0x0000, 0x0200, 0x0400, 0x0600,
+ 0x0800, 0x0a00, 0x0c00, 0x0e00,
+ 0x1000, 0x1200, 0x1400, 0x1600,
+ 0x1800, 0x1a00, 0x1c00, 0x1e00,
+ 0x2000, 0x2200, 0x2400, 0x2600,
+ 0x2800, 0x2a00, 0x2c00, 0x2e00,
+ 0x3000, 0x3200, 0x3400, 0x3600,
+ 0x3800, 0x3a00, 0x3c00, 0x3e00,
+ 0x4000
+};
+
+/* osd oetf lut: linear */
+static unsigned int oetf_41_linear_mapping[OSD_OETF_LUT_SIZE] = {
+ 0, 0, 0, 0,
+ 0, 32, 64, 96,
+ 128, 160, 196, 224,
+ 256, 288, 320, 352,
+ 384, 416, 448, 480,
+ 512, 544, 576, 608,
+ 640, 672, 704, 736,
+ 768, 800, 832, 864,
+ 896, 928, 960, 992,
+ 1023, 1023, 1023, 1023,
+ 1023
+};
+
+static void meson_viu_load_matrix(struct meson_drm *priv)
+{
+ /* eotf lut bypass */
+ meson_viu_set_osd_lut(priv, VIU_LUT_OSD_EOTF,
+ eotf_33_linear_mapping, /* R */
+ eotf_33_linear_mapping, /* G */
+ eotf_33_linear_mapping, /* B */
+ false);
+
+ /* eotf matrix bypass */
+ meson_viu_set_osd_matrix(priv, VIU_MATRIX_OSD_EOTF,
+ eotf_bypass_coeff,
+ false);
+
+ /* oetf lut bypass */
+ meson_viu_set_osd_lut(priv, VIU_LUT_OSD_OETF,
+ oetf_41_linear_mapping, /* R */
+ oetf_41_linear_mapping, /* G */
+ oetf_41_linear_mapping, /* B */
+ false);
+
+ /* osd matrix RGB709 to YUV709 limit */
+ meson_viu_set_osd_matrix(priv, VIU_MATRIX_OSD,
+ RGB709_to_YUV709l_coeff,
+ true);
+}
+
+void meson_viu_init(struct meson_drm *priv)
+{
+ uint32_t reg;
+
+ /* Disable OSDs */
+ writel_bits_relaxed(BIT(0) | BIT(21), 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_bits_relaxed(BIT(0) | BIT(21), 0,
+ priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
+
+ /* On GXL/GXM, Use the 10bit HDR conversion matrix */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_viu_load_matrix(priv);
+
+ /* Initialize OSD1 fifo control register */
+ reg = BIT(0) | /* Urgent DDR request priority */
+ (4 << 5) | /* hold_fifo_lines */
+ (3 << 10) | /* burst length 64 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24);
+ writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
+
+ /* Set OSD alpha replace value */
+ writel_bits_relaxed(0xff << OSD_REPLACE_SHIFT,
+ 0xff << OSD_REPLACE_SHIFT,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ writel_bits_relaxed(0xff << OSD_REPLACE_SHIFT,
+ 0xff << OSD_REPLACE_SHIFT,
+ priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
+
+ priv->viu.osd1_enabled = false;
+ priv->viu.osd1_commit = false;
+ priv->viu.osd1_interlace = false;
+}
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
new file mode 100644
index 000000000000..073b1910bd1b
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Video Input Unit */
+
+#ifndef __MESON_VIU_H
+#define __MESON_VIU_H
+
+/* OSDx_BLKx_CFG */
+#define OSD_CANVAS_SEL 16
+
+#define OSD_ENDIANNESS_LE BIT(15)
+#define OSD_ENDIANNESS_BE (0)
+
+#define OSD_BLK_MODE_422 (0x03 << 8)
+#define OSD_BLK_MODE_16 (0x04 << 8)
+#define OSD_BLK_MODE_32 (0x05 << 8)
+#define OSD_BLK_MODE_24 (0x07 << 8)
+
+#define OSD_OUTPUT_COLOR_RGB BIT(7)
+#define OSD_OUTPUT_COLOR_YUV (0)
+
+#define OSD_COLOR_MATRIX_32_RGBA (0x00 << 2)
+#define OSD_COLOR_MATRIX_32_ARGB (0x01 << 2)
+#define OSD_COLOR_MATRIX_32_ABGR (0x02 << 2)
+#define OSD_COLOR_MATRIX_32_BGRA (0x03 << 2)
+
+#define OSD_COLOR_MATRIX_24_RGB (0x00 << 2)
+
+#define OSD_COLOR_MATRIX_16_RGB655 (0x00 << 2)
+#define OSD_COLOR_MATRIX_16_RGB565 (0x04 << 2)
+
+#define OSD_INTERLACE_ENABLED BIT(1)
+#define OSD_INTERLACE_ODD BIT(0)
+#define OSD_INTERLACE_EVEN (0)
+
+/* OSDx_CTRL_STAT */
+#define OSD_ENABLE BIT(21)
+#define OSD_BLK0_ENABLE BIT(0)
+
+#define OSD_GLOBAL_ALPHA_SHIFT 12
+
+/* OSDx_CTRL_STAT2 */
+#define OSD_REPLACE_EN BIT(14)
+#define OSD_REPLACE_SHIFT 6
+
+void meson_viu_init(struct meson_drm *priv);
+
+#endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
new file mode 100644
index 000000000000..671909d8672e
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_vpp.h"
+#include "meson_registers.h"
+
+/*
+ * VPP Handles all the Post Processing after the Scanout from the VIU
+ * We handle the following post processings :
+ * - Postblend : Blends the OSD1 only
+ * We exclude OSD2, VS1, VS1 and Preblend output
+ * - Vertical OSD Scaler for OSD1 only, we disable vertical scaler and
+ * use it only for interlace scanout
+ * - Intermediate FIFO with default Amlogic values
+ *
+ * What is missing :
+ * - Preblend for video overlay pre-scaling
+ * - OSD2 support for cursor framebuffer
+ * - Video pre-scaling before postblend
+ * - Full Vertical/Horizontal OSD scaling to support TV overscan
+ * - HDR conversion
+ */
+
+void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
+{
+ writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
+}
+
+/*
+ * When the output is interlaced, the OSD must switch between
+ * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
+ * at each vsync.
+ * But the vertical scaler can provide such funtionnality if
+ * is configured for 2:1 scaling with interlace options enabled.
+ */
+void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
+ struct drm_rect *input)
+{
+ writel_relaxed(BIT(3) /* Enable scaler */ |
+ BIT(2), /* Select OSD1 */
+ priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+
+ writel_relaxed(((drm_rect_width(input) - 1) << 16) |
+ (drm_rect_height(input) - 1),
+ priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
+ /* 2:1 scaling */
+ writel_relaxed(((input->x1) << 16) | (input->x2),
+ priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
+ writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1),
+ priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
+
+ /* 2:1 scaling values */
+ writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
+ writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
+
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+
+ writel_relaxed((4 << 0) /* osd_vsc_bank_length */ |
+ (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ |
+ (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ |
+ (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ |
+ (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ |
+ BIT(23) /* osd_prog_interlace */ |
+ BIT(24), /* Enable vertical scaler */
+ priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+}
+
+void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv)
+{
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+}
+
+static unsigned int vpp_filter_coefs_4point_bspline[] = {
+ 0x15561500, 0x14561600, 0x13561700, 0x12561800,
+ 0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
+ 0x0f531e00, 0x0e531f00, 0x0d522100, 0x0c522200,
+ 0x0b522300, 0x0b512400, 0x0a502600, 0x0a4f2700,
+ 0x094e2900, 0x084e2a00, 0x084d2b00, 0x074c2c01,
+ 0x074b2d01, 0x064a2f01, 0x06493001, 0x05483201,
+ 0x05473301, 0x05463401, 0x04453601, 0x04433702,
+ 0x04423802, 0x03413a02, 0x03403b02, 0x033f3c02,
+ 0x033d3d03
+};
+
+static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
+ const unsigned int *coefs,
+ bool is_horizontal)
+{
+ int i;
+
+ writel_relaxed(is_horizontal ? BIT(8) : 0,
+ priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX));
+ for (i = 0; i < 33; i++)
+ writel_relaxed(coefs[i],
+ priv->io_base + _REG(VPP_OSD_SCALE_COEF));
+}
+
+void meson_vpp_init(struct meson_drm *priv)
+{
+ /* set dummy data default YUV black */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ writel_relaxed(0x108080, priv->io_base + _REG(VPP_DUMMY_DATA1));
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu")) {
+ writel_bits_relaxed(0xff << 16, 0xff << 16,
+ priv->io_base + _REG(VIU_MISC_CTRL1));
+ writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL));
+ writel_relaxed(0x1020080,
+ priv->io_base + _REG(VPP_DUMMY_DATA1));
+ }
+
+ /* Initialize vpu fifo control registers */
+ writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
+ 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
+ writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES));
+
+ /* Turn off preblend */
+ writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Turn off POSTBLEND */
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Force all planes off */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Disable Scalers */
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+
+ /* Write in the proper filter coefficients. */
+ meson_vpp_write_scaling_filter_coefs(priv,
+ vpp_filter_coefs_4point_bspline, false);
+ meson_vpp_write_scaling_filter_coefs(priv,
+ vpp_filter_coefs_4point_bspline, true);
+}
diff --git a/drivers/gpu/drm/meson/meson_vpp.h b/drivers/gpu/drm/meson/meson_vpp.h
new file mode 100644
index 000000000000..ede3b26e0f22
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vpp.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Video Post Process */
+
+#ifndef __MESON_VPP_H
+#define __MESON_VPP_H
+
+/* Mux VIU/VPP to ENCI */
+#define MESON_VIU_VPP_MUX_ENCI 0x5
+
+void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux);
+
+void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
+ struct drm_rect *input);
+void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv);
+
+void meson_vpp_init(struct meson_drm *priv);
+
+#endif /* __MESON_VPP_H */
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1443b3a34775..b0b874264f9d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -82,9 +82,7 @@ static const struct file_operations mgag200_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = mgag200_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 6b21cb27e1cc..3a03ac4045d8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1658,12 +1658,6 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
return NULL;
}
-static enum drm_connector_status mga_vga_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void mga_connector_destroy(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1680,7 +1674,6 @@ static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs =
static const struct drm_connector_funcs mga_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = mga_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mga_connector_destroy,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index dcf7d11ac380..5e20220ef4c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_populate = mgag200_ttm_tt_populate,
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = mgag200_bo_evict_flags,
.move = NULL,
.verify_access = mgag200_bo_verify_access,
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4e2806cf778c..028c24df2291 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -6,6 +6,8 @@ msm-y := \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -37,6 +39,7 @@ msm-y := \
mdp/mdp5/mdp5_irq.o \
mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_pipe.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
@@ -48,6 +51,7 @@ msm-y := \
msm_gem_prime.o \
msm_gem_shrinker.o \
msm_gem_submit.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_perf.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index fee24297fb92..4be092f911f9 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -206,12 +207,12 @@ enum a2xx_rb_copy_sample_select {
};
enum a2xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_MIN_DST_SRC = 2,
- BLEND_MAX_DST_SRC = 3,
- BLEND_DST_MINUS_SRC = 4,
- BLEND_DST_PLUS_SRC_BIAS = 5,
+ BLEND2_DST_PLUS_SRC = 0,
+ BLEND2_SRC_MINUS_DST = 1,
+ BLEND2_MIN_DST_SRC = 2,
+ BLEND2_MAX_DST_SRC = 3,
+ BLEND2_DST_MINUS_SRC = 4,
+ BLEND2_DST_PLUS_SRC_BIAS = 5,
};
enum adreno_mmu_clnt_beh {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 27dabd5e57fb..a066c8b9eccd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -129,10 +130,14 @@ enum a3xx_tex_fmt {
TFMT_Z16_UNORM = 9,
TFMT_X8Z24_UNORM = 10,
TFMT_Z32_FLOAT = 11,
- TFMT_NV12_UV_TILED = 17,
- TFMT_NV12_Y_TILED = 19,
- TFMT_NV12_UV = 21,
- TFMT_NV12_Y = 23,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
TFMT_I420_Y = 24,
TFMT_I420_U = 26,
TFMT_I420_V = 27,
@@ -525,14 +530,6 @@ enum a3xx_uche_perfcounter_select {
UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a3xx_intp_mode {
SMOOTH = 0,
FLAT = 1,
@@ -1393,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
-#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1472,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed963b6..b999349b7d2d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -41,7 +41,7 @@ extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
@@ -65,7 +65,7 @@ static void a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ return gpu->funcs->idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,15 +294,20 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
- a3xx_me_init(gpu);
-
- return 0;
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a3xx_recover(struct msm_gpu *gpu)
{
+ int i;
+
adreno_dump_info(gpu);
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a3xx_dump(gpu);
@@ -330,17 +335,22 @@ static void a3xx_destroy(struct msm_gpu *gpu)
kfree(a3xx_gpu);
}
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
- A3XX_RBBM_STATUS_GPU_BUSY)))
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
}
static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -419,91 +429,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A3XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A3XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A3XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A3XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A3XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A3XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A3XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A3XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A3XX_SP_VS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A3XX_SP_FS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_A3XX_RBBM_PM_OVERRIDE2),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_A3XX_SQ_GPR_MANAGEMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_A3XX_SQ_INST_STORE_MANAGMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A3XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
@@ -583,7 +515,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3220b91f559a..4ce21b902779 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -46,6 +47,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
RB4_R5G6B5_UNORM = 14,
@@ -89,17 +93,10 @@ enum a4xx_color_fmt {
enum a4xx_tile_mode {
TILE4_LINEAR = 0,
+ TILE4_2 = 2,
TILE4_3 = 3,
};
-enum a4xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a4xx_vtx_fmt {
VFMT4_32_FLOAT = 1,
VFMT4_32_32_FLOAT = 2,
@@ -940,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
@@ -1043,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -1061,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -1073,12 +1071,18 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
}
#define REG_A4XX_RB_BLEND_RED 0x000020f0
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -1095,12 +1099,18 @@ static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
}
#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -1117,12 +1127,18 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
}
#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -1139,12 +1155,18 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
}
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -1348,7 +1370,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
@@ -2177,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
-
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -2272,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -2420,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -3117,6 +3151,8 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
@@ -3253,6 +3289,7 @@ static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
}
#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000
#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
@@ -3670,6 +3707,8 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -3690,6 +3729,20 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BIN_BASE 0x000021c0
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
@@ -3752,12 +3805,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
{
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
}
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A4XX_VBIF_VERSION 0x00003000
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index d0d3c7baa8fe..511bc855cc7f 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -113,7 +113,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
}
-static void a4xx_me_init(struct msm_gpu *gpu)
+static bool a4xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
@@ -137,7 +137,7 @@ static void a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ return gpu->funcs->idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -292,15 +292,20 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
- a4xx_me_init(gpu);
-
- return 0;
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a4xx_recover(struct msm_gpu *gpu)
{
+ int i;
+
adreno_dump_info(gpu);
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a4xx_dump(gpu);
@@ -328,17 +333,21 @@ static void a4xx_destroy(struct msm_gpu *gpu)
kfree(a4xx_gpu);
}
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
- A4XX_RBBM_STATUS_GPU_BUSY)))
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ return true;
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -460,87 +469,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A4XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A4XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A4XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A4XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A4XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A4XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A4XX_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A4XX_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A4XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A4XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_VS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_FS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A4XX_SP_VS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A4XX_SP_FS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A4XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A4XX_UCHE_INVALIDATE0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
@@ -587,16 +522,8 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- uint32_t hi, lo, tmp;
-
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- do {
- hi = tmp;
- lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- } while (tmp != hi);
-
- *value = (((uint64_t)hi) << 32) | lo;
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A4XX_RBBM_PERFCTR_CP_0_HI);
return 0;
}
@@ -672,7 +599,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000000..b6fe763ddf34
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,3757 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
+
+Copyright (C) 2013-2016 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_R8_UNORM = 3,
+ RB5_R4G4B4A4_UNORM = 8,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R5G6B5_UNORM = 14,
+ RB5_R16_FLOAT = 23,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R16G16_FLOAT = 69,
+ RB5_R32_FLOAT = 74,
+ RB5_R16G16B16A16_FLOAT = 98,
+ RB5_R32G32_FLOAT = 103,
+ RB5_R32G32B32A32_FLOAT = 130,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_5_5_1_UNORM = 10,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_8_8_UNORM = 15,
+ TFMT5_8_8_SNORM = 16,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_8_8_8_UNORM = 49,
+ TFMT5_8_8_8_SNORM = 50,
+ TFMT5_9_9_9_E5_FLOAT = 53,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_11_11_10_FLOAT = 66,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+ TFETCH5_1_BYTE = 0,
+ TFETCH5_2_BYTE = 1,
+ TFETCH5_4_BYTE = 2,
+ TFETCH5_8_BYTE = 3,
+ TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_blit_buf {
+ BLIT_MRT0 = 0,
+ BLIT_MRT1 = 1,
+ BLIT_MRT2 = 2,
+ BLIT_MRT3 = 3,
+ BLIT_MRT4 = 4,
+ BLIT_MRT5 = 5,
+ BLIT_MRT6 = 6,
+ BLIT_MRT7 = 7,
+ BLIT_ZS = 8,
+ BLIT_Z32 = 9,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001 0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CNTL 0x0000e005
+#define A5XX_GRAS_CNTL_VARYING 0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A5XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147
+#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2
+
+#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3
+
+#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4
+#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5
+#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_CNTL 0x0000e210
+#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000003f
+#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val)
+{
+ return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+
+#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
+
+#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215
+
+#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216
+#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b
+
+#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
+#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
+#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
+static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+#define A5XX_VPC_CNTL_0_VARYING 0x00000800
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+#define REG_A5XX_UNKNOWN_E292 0x0000e292
+
+#define REG_A5XX_UNKNOWN_E293 0x0000e293
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_UNKNOWN_E29A 0x0000e29a
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_UNKNOWN_E2A1 0x0000e2a1
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9
+
+#define REG_A5XX_UNKNOWN_E2AB 0x0000e2ab
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad
+
+#define REG_A5XX_UNKNOWN_E2AE 0x0000e2ae
+
+#define REG_A5XX_UNKNOWN_E2B2 0x0000e2b2
+
+#define REG_A5XX_UNKNOWN_E2B9 0x0000e2b9
+
+#define REG_A5XX_UNKNOWN_E2C0 0x0000e2c0
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
+#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+
+#define REG_A5XX_UNKNOWN_E389 0x0000e389
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_UNKNOWN_E38D 0x0000e38d
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0xc0000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 30
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592
+#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val >> 2) << A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
+
+#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0
+
+#define REG_A5XX_UNKNOWN_E600 0x0000e600
+
+#define REG_A5XX_UNKNOWN_E640 0x0000e640
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5
+
+#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8
+
+#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9
+
+#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd
+
+#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce
+
+#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf
+
+#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2
+
+#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3
+
+#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4
+
+#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd
+
+#define REG_A5XX_RB_2D_DST_FILL 0x00002101
+
+#define REG_A5XX_RB_2D_SRC_INFO 0x00002107
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_LO 0x00002108
+
+#define REG_A5XX_RB_2D_SRC_HI 0x00002109
+
+#define REG_A5XX_RB_2D_DST_INFO 0x00002110
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
+
+#define REG_A5XX_RB_2D_DST_LO 0x00002111
+
+#define REG_A5XX_RB_2D_DST_HI 0x00002112
+
+#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
+
+#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+
+#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A5XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+ return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000000..b8647198c11c
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,888 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+
+extern bool hang_debug;
+static void a5xx_dump(struct msm_gpu *gpu);
+
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb;
+ unsigned int i, ibs = 0;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->fence->seqno);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
+ OUT_RING(ring, submit->fence->seqno);
+
+ gpu->funcs->flush(gpu);
+}
+
+struct a5xx_hwcg {
+ u32 offset;
+ u32 value;
+};
+
+static const struct a5xx_hwcg a530_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+static const struct {
+ int (*test)(struct adreno_gpu *gpu);
+ const struct a5xx_hwcg *regs;
+ unsigned int count;
+} a5xx_hwcg_regs[] = {
+ { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
+};
+
+static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
+ const struct a5xx_hwcg *regs, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ gpu_write(gpu, regs[i].offset, regs[i].value);
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
+}
+
+static void a5xx_enable_hwcg(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
+ if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
+ _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
+ a5xx_hwcg_regs[i].count);
+ return;
+ }
+ }
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+
+ return gpu->funcs->idle(gpu) ? 0 : -EINVAL;
+}
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_device *drm = gpu->dev;
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ mutex_lock(&drm->struct_mutex);
+ bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(bo))
+ return bo;
+
+ ptr = msm_gem_get_vaddr(bo);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (iova) {
+ int ret = msm_gem_get_iova(bo, gpu->id, iova);
+
+ if (ret) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(ret);
+ }
+ }
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+ return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+ &a5xx_gpu->pm4_iova);
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+ &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+ REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+ REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range (0 to gpu->gmem) */
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /* Enable HWCG */
+ a5xx_enable_hwcg(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit */
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Load the GPMU firmware before starting the HW init */
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb, 0x0F);
+
+ gpu->funcs->flush(gpu);
+ if (!gpu->funcs->idle(gpu))
+ return -EINVAL;
+ }
+
+ /* Put the GPU into unsecure mode */
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
+ }
+
+ if (hang_debug)
+ a5xx_dump(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ if (a5xx_gpu->gpmu_bo)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+static bool a5xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+ return false;
+ }
+
+ return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
+ 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
+ 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
+ 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
+ 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
+ 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
+ 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
+ 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
+ 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
+ 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
+ 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
+ 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
+ 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
+ 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
+ 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
+ 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
+ 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
+ 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
+ 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
+ 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
+ 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
+ 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
+ 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
+ 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
+ ~0
+};
+
+static void a5xx_dump(struct msm_gpu *gpu)
+{
+ dev_info(gpu->dev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+
+ return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+}
+#endif
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .last_fence = adreno_last_fence,
+ .submit = a5xx_submit,
+ .flush = adreno_flush,
+ .idle = a5xx_idle,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+ .show = a5xx_show,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a5xx_gpu->pdev = pdev;
+ adreno_gpu->registers = a5xx_registers;
+ adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000000..1590f845d554
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+struct a5xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000000..72d52c71f769
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,344 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+/* Setup thermal limit management */
+static void a5xx_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ /* Hard code the A530 GPU thermal sensor ID for the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ /* The threshold is fixed at 6000 for A530 */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /* Write the max power - hard coded to 5448 for A530 */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu);
+
+ if (!gpu->funcs->idle(gpu)) {
+ DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE))
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Set up the limits management */
+ a5xx_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ const struct firmware *fw;
+ uint32_t dwords = 0, offset = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ /* Get the firmware */
+ if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) {
+ DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return;
+ }
+
+ data = (unsigned int *) fw->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+ goto out;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ goto out;
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ mutex_lock(&drm->struct_mutex);
+ a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(a5xx_gpu->gpmu_bo))
+ goto err;
+
+ if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
+ goto err;
+
+ ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
+ if (!ptr)
+ goto err;
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
+ a5xx_gpu->gpmu_dwords = dwords;
+
+ goto out;
+
+err:
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ if (a5xx_gpu->gpmu_bo)
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+ a5xx_gpu->gpmu_bo = NULL;
+ a5xx_gpu->gpmu_iova = 0;
+ a5xx_gpu->gpmu_dwords = 0;
+
+out:
+ /* No need to keep that firmware laying around anymore */
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index e81481d1b7df..4a33ba6f1244 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -172,6 +173,14 @@ enum a3xx_color_swap {
XYZW = 3,
};
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75dbf40..893eb2b2531b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -25,9 +25,6 @@ bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
@@ -77,6 +74,15 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .revn = 530,
+ .name = "A530",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
+ .gpmufw = "a530v3_gpmu.fw2",
},
};
@@ -86,6 +92,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -148,12 +156,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
mutex_unlock(&dev->struct_mutex);
+
+ disable_irq(gpu->irq);
+
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
+ enable_irq(gpu->irq);
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
@@ -169,12 +181,20 @@ static void set_gpu_pdev(struct drm_device *dev,
priv->gpu_pdev = pdev;
}
+static const struct {
+ const char *str;
+ uint32_t flag;
+} quirks[] = {
+ { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+ { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
+};
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
struct device_node *child, *node = dev->of_node;
u32 val;
- int ret;
+ int ret, i;
ret = of_property_read_u32(node, "qcom,chipid", &val);
if (ret) {
@@ -208,6 +228,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return -ENXIO;
}
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if (of_property_read_bool(node, quirks[i].str))
+ config.quirks |= quirks[i].flag;
+
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f386f463278d..a18126150e11 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -22,7 +22,7 @@
#include "msm_mmu.h"
#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 16
+#define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -54,9 +54,6 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
}
}
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -79,11 +76,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
- if (!adreno_is_a430(adreno_gpu))
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
+ if (!adreno_is_a430(adreno_gpu)) {
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ rbmemptr(adreno_gpu, rptr));
+ }
return 0;
}
@@ -126,11 +126,14 @@ void adreno_recover(struct msm_gpu *gpu)
adreno_gpu->memptrs->wptr = 0;
gpu->funcs->pm_resume(gpu);
+
+ disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
+ enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
@@ -218,19 +221,18 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
- int ret;
/* wait for CP to drain ringbuffer: */
- ret = spin_until(get_rptr(adreno_gpu) == wptr);
-
- if (ret)
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ if (!spin_until(get_rptr(adreno_gpu) == wptr))
+ return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ return false;
}
#ifdef CONFIG_DEBUG_FS
@@ -278,7 +280,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
void adreno_dump_info(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int i;
printk("revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
@@ -290,11 +291,6 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
printk("rb wptr: %d\n", get_wptr(gpu->rb));
-
- for (i = 0; i < 8; i++) {
- printk("CP_SCRATCH_REG%d: %u\n", i,
- gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
- }
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
@@ -350,6 +346,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->gmem = adreno_gpu->info->gmem;
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
+ adreno_gpu->quirks = config->quirks;
gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate;
@@ -381,7 +378,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->mmu;
+ mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a54f6e036b4a..e8d55b0306ed 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -28,6 +28,9 @@
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
@@ -35,73 +38,21 @@
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
- REG_ADRENO_CP_DEBUG,
- REG_ADRENO_CP_ME_RAM_WADDR,
- REG_ADRENO_CP_ME_RAM_DATA,
- REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_PROTECT_CTRL,
- REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_CP_IB1_BASE,
- REG_ADRENO_CP_IB1_BUFSZ,
- REG_ADRENO_CP_IB2_BASE,
- REG_ADRENO_CP_IB2_BUFSZ,
- REG_ADRENO_CP_TIMESTAMP,
- REG_ADRENO_CP_ME_RAM_RADDR,
- REG_ADRENO_CP_ROQ_ADDR,
- REG_ADRENO_CP_ROQ_DATA,
- REG_ADRENO_CP_MERCIU_ADDR,
- REG_ADRENO_CP_MERCIU_DATA,
- REG_ADRENO_CP_MERCIU_DATA2,
- REG_ADRENO_CP_MEQ_ADDR,
- REG_ADRENO_CP_MEQ_DATA,
- REG_ADRENO_CP_HW_FAULT,
- REG_ADRENO_CP_PROTECT_STATUS,
- REG_ADRENO_SCRATCH_ADDR,
- REG_ADRENO_SCRATCH_UMSK,
- REG_ADRENO_SCRATCH_REG2,
- REG_ADRENO_RBBM_STATUS,
- REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_ADRENO_RBBM_INT_0_MASK,
- REG_ADRENO_RBBM_INT_0_STATUS,
- REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_ADRENO_RBBM_AHB_CMD,
- REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_ADRENO_RBBM_CLOCK_CTL,
- REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_ADRENO_VFD_CONTROL_0,
- REG_ADRENO_VFD_INDEX_MAX,
- REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_ADRENO_PA_SC_AA_CONFIG,
- REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_ADRENO_TP0_CHICKEN,
- REG_ADRENO_RBBM_RBBM_CTL,
- REG_ADRENO_UCHE_INVALIDATE0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
+enum adreno_quirks {
+ ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+ ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -122,12 +73,16 @@ struct adreno_info {
uint32_t revn;
const char *name;
const char *pm4fw, *pfpfw;
+ const char *gpmufw;
uint32_t gmem;
struct msm_gpu *(*init)(struct drm_device *dev);
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
+#define rbmemptr(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
struct adreno_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t wptr;
@@ -153,7 +108,7 @@ struct adreno_gpu {
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
- uint32_t memptrs_iova;
+ uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
@@ -161,6 +116,8 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
+
+ uint32_t quirks;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -171,6 +128,7 @@ struct adreno_platform_config {
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
+ uint32_t quirks;
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -234,6 +192,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -241,7 +204,7 @@ void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+bool adreno_idle(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
@@ -278,8 +241,38 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
/*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
@@ -290,6 +283,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
!gpu->reg_offsets[offset_name]) {
BUG();
}
+
+ /*
+ * REG_SKIP is a special value that tell us that the register in
+ * question isn't implemented on target but don't trigger a BUG(). This
+ * is used to cleanly implement adreno_gpu_write64() and
+ * adreno_gpu_read64() in a generic fashion
+ */
+ if (gpu->reg_offsets[offset_name] == REG_SKIP)
+ return false;
+
return true;
}
@@ -311,4 +314,39 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+ enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+ adreno_gpu_write(gpu, lo, lower_32_bits(data));
+ adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index d7477ff867c9..6a2930e75503 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -58,6 +59,7 @@ enum vgt_event_type {
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
+ STAT_EVENT = 16,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -65,6 +67,10 @@ enum vgt_event_type {
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
+ UNK_1C = 28,
+ UNK_1D = 29,
+ BLIT = 30,
+ UNK_26 = 38,
};
enum pc_di_primtype {
@@ -82,7 +88,6 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES = 34,
};
enum pc_di_src_sel {
@@ -110,11 +115,15 @@ enum adreno_pm4_packet_type {
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
};
enum adreno_pm4_type3_packets {
CP_ME_INIT = 72,
CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
@@ -163,6 +172,7 @@ enum adreno_pm4_type3_packets {
CP_TEST_TWO_MEMS = 113,
CP_REG_WR_NO_CTXT = 120,
CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
CP_WAIT_FOR_ME = 19,
CP_SET_DRAW_STATE = 67,
CP_DRAW_INDX_OFFSET = 56,
@@ -178,6 +188,22 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
+ CP_BLIT = 44,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -196,6 +222,7 @@ enum adreno_state_block {
SB_VERT_SHADER = 4,
SB_GEOM_SHADER = 5,
SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
};
enum adreno_state_type {
@@ -218,6 +245,17 @@ enum a4xx_index_size {
INDEX4_SIZE_32_BIT = 2,
};
+enum render_mode_cmd {
+ BYPASS = 1,
+ GMEM = 3,
+ BLIT2D = 5,
+};
+
+enum cp_blit_cmd {
+ BLIT_OP_FILL = 0,
+ BLIT_OP_BLIT = 1,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -258,6 +296,14 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
}
+#define REG_CP_LOAD_STATE_2 0x00000002
+#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK;
+}
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -389,7 +435,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -437,30 +488,40 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
-#define REG_CP_SET_DRAW_STATE_0 0x00000000
-#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff
-#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val)
+static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK;
+ return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK;
}
-#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000
-#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000
-#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000
-#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000
-#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000
-#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24
-static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val)
+#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK;
+ return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK;
}
-#define REG_CP_SET_DRAW_STATE_1 0x00000001
-#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff
-#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val)
+static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK;
+ return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK;
+}
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK;
}
#define REG_CP_SET_BIN_0 0x00000000
@@ -533,5 +594,192 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff
+#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val)
+{
+ return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
+
+#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_0 0x00000000
+#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff
+#define CP_EVENT_WRITE_0_EVENT__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
+{
+ return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_1 0x00000001
+#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_2 0x00000002
+#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_3 0x00000003
+
+#define REG_CP_BLIT_0 0x00000000
+#define CP_BLIT_0_OP__MASK 0x0000000f
+#define CP_BLIT_0_OP__SHIFT 0
+static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
+{
+ return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK;
+}
+
+#define REG_CP_BLIT_1 0x00000001
+#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff
+#define CP_BLIT_1_SRC_X1__SHIFT 0
+static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
+}
+#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000
+#define CP_BLIT_1_SRC_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK;
+}
+
+#define REG_CP_BLIT_2 0x00000002
+#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff
+#define CP_BLIT_2_SRC_X2__SHIFT 0
+static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
+}
+#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000
+#define CP_BLIT_2_SRC_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK;
+}
+
+#define REG_CP_BLIT_3 0x00000003
+#define CP_BLIT_3_DST_X1__MASK 0x0000ffff
+#define CP_BLIT_3_DST_X1__SHIFT 0
+static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
+}
+#define CP_BLIT_3_DST_Y1__MASK 0xffff0000
+#define CP_BLIT_3_DST_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK;
+}
+
+#define REG_CP_BLIT_4 0x00000004
+#define CP_BLIT_4_DST_X2__MASK 0x0000ffff
+#define CP_BLIT_4_DST_X2__SHIFT 0
+static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
+}
+#define CP_BLIT_4_DST_Y2__MASK 0xffff0000
+#define CP_BLIT_4_DST_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 4958594d5266..39dff7d5e89b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 6f240021705b..3819fdefcae2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -982,7 +982,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
struct drm_device *dev = msm_host->dev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- u32 iova;
+ uint64_t iova;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
mutex_lock(&dev->struct_mutex);
@@ -1147,7 +1147,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- u32 dma_base;
+ uint64_t dma_base;
bool triggered;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 2d999494cdea..8b9f3ebaeba7 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 506434fac993..3fcbb30dc241 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f1072c18c81e..d7bf3232dc88 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 34c7df6549c1..0a97ff75ed6f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 6eab7d0cf6b5..1b996ede7a65 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 6688e79cc88e..88037889589b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 9527dafc3e69..1c29618f4ddb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -373,7 +373,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
- uint32_t iova = mdp4_crtc->cursor.next_iova;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
@@ -418,7 +418,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
- uint32_t iova;
+ uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index a521207db8a1..b764d7f10312 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "msm_drv.h"
#include "mdp4_kms.h"
@@ -29,7 +30,16 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
+ drm_state_dump(mdp4_kms->dev, &p);
+ }
}
void mdp4_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 571a91ee9607..b782efd4b95f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@@ -159,17 +160,18 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_mmu *mmu = mdp4_kms->mmu;
-
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
- }
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
+
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
@@ -440,7 +442,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -531,24 +533,26 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->iommu, "mdp4");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
-
- mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;
}
- mdp4_kms->id = msm_register_mmu(dev, mmu);
+ mdp4_kms->id = msm_register_address_space(dev, aspace);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
@@ -598,6 +602,10 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 25fb83997119..62712ca164ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -43,7 +43,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
@@ -51,7 +51,7 @@ struct mdp4_kms {
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
- uint32_t blank_cursor_iova;
+ uint64_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 3903dbcda763..911e4690d36a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -40,7 +40,7 @@ enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
{
bool is_tile = false;
- if (fb->modifier[1] == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
+ if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
is_tile = true;
if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index ca6ca30650a0..27d5371acee0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
-- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
-- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b4e3004f451..618b2ffed9b4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -550,6 +550,10 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index c205c360e16d..1ce8a01a5a28 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -27,11 +27,8 @@
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
-
struct mdp5_crtc {
struct drm_crtc base;
- char name[8];
int id;
bool enabled;
@@ -102,7 +99,7 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
+ DBG("%s: flush=%08x", crtc->name, flush_mask);
return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
}
@@ -136,7 +133,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
- struct drm_plane *plane;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -148,16 +144,12 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
- DBG("%s: send event: %p", mdp5_crtc->name, event);
+ DBG("%s: send event: %p", crtc->name, event);
drm_crtc_send_vblank_event(crtc, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- mdp5_plane_complete_flip(plane);
- }
-
if (mdp5_crtc->ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
@@ -295,7 +287,7 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mdp5_crtc->name, mode->base.id, mode->name,
+ crtc->name, mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
@@ -315,7 +307,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- DBG("%s", mdp5_crtc->name);
+ DBG("%s", crtc->name);
if (WARN_ON(!mdp5_crtc->enabled))
return;
@@ -334,7 +326,7 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- DBG("%s", mdp5_crtc->name);
+ DBG("%s", crtc->name);
if (WARN_ON(mdp5_crtc->enabled))
return;
@@ -372,7 +364,6 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
@@ -381,7 +372,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
const struct drm_plane_state *pstate;
int cnt = 0, base = 0, i;
- DBG("%s: check", mdp5_crtc->name);
+ DBG("%s: check", crtc->name);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
pstates[cnt].plane = plane;
@@ -405,14 +396,14 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
if ((cnt + base) >= hw_cfg->lm.nb_stages) {
- dev_err(dev->dev, "too many planes!\n");
+ dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
return -EINVAL;
}
for (i = 0; i < cnt; i++) {
pstates[i].state->stage = STAGE_BASE + i + base;
- DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
- pipe2name(mdp5_plane_pipe(pstates[i].plane)),
+ DBG("%s: assign pipe %s on stage=%d", crtc->name,
+ pstates[i].plane->name,
pstates[i].state->stage);
}
@@ -422,8 +413,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- DBG("%s: begin", mdp5_crtc->name);
+ DBG("%s: begin", crtc->name);
}
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -433,7 +423,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
+ DBG("%s: event: %p", crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@@ -499,7 +489,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, cursor_addr, stride;
+ uint32_t blendcfg, stride;
+ uint64_t cursor_addr;
int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
@@ -653,7 +644,7 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
- DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+ DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
}
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -775,9 +766,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
- snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
- pipe2name(mdp5_plane_pipe(plane)), id);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
NULL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index d53e5510fd7c..3ce8b9dec9c1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -17,6 +17,8 @@
#include <linux/irq.h>
+#include <drm/drm_print.h>
+
#include "msm_drv.h"
#include "mdp5_kms.h"
@@ -30,7 +32,18 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
+ drm_state_dump(mdp5_kms->dev, &p);
+ if (mdp5_kms->smp)
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+ }
}
void mdp5_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ed7143d35b25..5f6cd8745dbc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
@@ -71,10 +72,49 @@ static int mdp5_hw_init(struct msm_kms *kms)
return 0;
}
+struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct msm_kms_state *state = to_kms_state(s);
+ struct mdp5_state *new_state;
+ int ret;
+
+ if (state->state)
+ return state->state;
+
+ ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
+ if (!new_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy state: */
+ new_state->hwpipe = mdp5_kms->state->hwpipe;
+ if (mdp5_kms->smp)
+ new_state->smp = mdp5_kms->state->smp;
+
+ state->state = new_state;
+
+ return new_state;
+}
+
+static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ swap(to_kms_state(state)->state, mdp5_kms->state);
+}
+
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
mdp5_enable(mdp5_kms);
+
+ if (mdp5_kms->smp)
+ mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
}
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
@@ -87,6 +127,9 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
for_each_plane_in_state(state, plane, plane_state, i)
mdp5_plane_complete_commit(plane, plane_state);
+ if (mdp5_kms->smp)
+ mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
+
mdp5_disable(mdp5_kms);
}
@@ -117,14 +160,66 @@ static int mdp5_set_split_display(struct msm_kms *kms,
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++)
+ mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
}
}
+#ifdef CONFIG_DEBUG_FS
+static int smp_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!mdp5_kms->smp) {
+ drm_printf(&p, "no SMP pool\n");
+ return 0;
+ }
+
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+
+ return 0;
+}
+
+static struct drm_info_list mdp5_debugfs_list[] = {
+ {"smp", smp_show },
+};
+
+static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mdp5_kms_debugfs_cleanup(struct msm_kms *kms, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list), minor);
+}
+#endif
+
static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp5_hw_init,
@@ -134,6 +229,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
+ .swap_state = mdp5_swap_state,
.prepare_commit = mdp5_prepare_commit,
.complete_commit = mdp5_complete_commit,
.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
@@ -141,6 +237,10 @@ static const struct mdp_kms_funcs kms_funcs = {
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
.destroy = mdp5_kms_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = mdp5_kms_debugfs_init,
+ .debugfs_cleanup = mdp5_kms_debugfs_cleanup,
+#endif
},
.set_irqmask = mdp5_set_irqmask,
};
@@ -321,15 +421,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
- static const enum mdp5_pipe crtcs[] = {
- SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
- };
- static const enum mdp5_pipe vig_planes[] = {
- SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
- };
- static const enum mdp5_pipe dma_planes[] = {
- SSPP_DMA0, SSPP_DMA1,
- };
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg;
@@ -337,58 +428,35 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* construct CRTCs and their private planes: */
- for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
+ /* Construct planes equaling the number of hw pipes, and CRTCs
+ * for the N layer-mixers (LM). The first N planes become primary
+ * planes for the CRTCs, with the remainder as overlay planes:
+ */
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ bool primary = i < mdp5_cfg->lm.count;
struct drm_plane *plane;
struct drm_crtc *crtc;
- plane = mdp5_plane_init(dev, crtcs[i], true,
- hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
+ plane = mdp5_plane_init(dev, primary);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
- pipe2name(crtcs[i]), ret);
+ dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
+ priv->planes[priv->num_planes++] = plane;
+
+ if (!primary)
+ continue;
crtc = mdp5_crtc_init(dev, plane, i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
- pipe2name(crtcs[i]), ret);
+ dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
}
- /* Construct video planes: */
- for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
- struct drm_plane *plane;
-
- plane = mdp5_plane_init(dev, vig_planes[i], false,
- hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(vig_planes[i]), ret);
- goto fail;
- }
- }
-
- /* DMA planes */
- for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
- struct drm_plane *plane;
-
- plane = mdp5_plane_init(dev, dma_planes[i], false,
- hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(dma_planes[i]), ret);
- goto fail;
- }
- }
-
/* Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
@@ -564,7 +632,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, i, ret;
/* priv->kms would have been populated by the MDP5 driver */
@@ -606,30 +674,29 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
- iommu_domain_free(config->platform.iommu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->platform.iommu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+ mdp5_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
- mmu->funcs->destroy(mmu);
goto fail;
}
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;;
}
- mdp5_kms->mmu = mmu;
- mdp5_kms->id = msm_register_mmu(dev, mmu);
+ mdp5_kms->id = msm_register_address_space(dev, aspace);
if (mdp5_kms->id < 0) {
ret = mdp5_kms->id;
dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
@@ -644,8 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.max_width = config->hw->lm.max_width;
- dev->mode_config.max_height = config->hw->lm.max_height;
+ dev->mode_config.max_width = 0xffff;
+ dev->mode_config.max_height = 0xffff;
dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
@@ -673,6 +740,69 @@ static void mdp5_destroy(struct platform_device *pdev)
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
+
+ kfree(mdp5_kms->state);
+}
+
+static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
+ const enum mdp5_pipe *pipes, const uint32_t *offsets,
+ uint32_t caps)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ if (IS_ERR(hwpipe)) {
+ ret = PTR_ERR(hwpipe);
+ dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+ pipe2name(pipes[i]), ret);
+ return ret;
+ }
+ hwpipe->idx = mdp5_kms->num_hwpipes;
+ mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
+ }
+
+ return 0;
+}
+
+static int hwpipe_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe rgb_planes[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
+ };
+ static const enum mdp5_pipe vig_planes[] = {
+ SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+ };
+ static const enum mdp5_pipe dma_planes[] = {
+ SSPP_DMA0, SSPP_DMA1,
+ };
+ const struct mdp5_cfg_hw *hw_cfg;
+ int ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /* Construct RGB pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
+ hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
+ if (ret)
+ return ret;
+
+ /* Construct video (VIG) pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
+ hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
+ if (ret)
+ return ret;
+
+ /* Construct DMA pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
+ hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
@@ -696,6 +826,13 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
mdp5_kms->dev = dev;
mdp5_kms->pdev = pdev;
+ drm_modeset_lock_init(&mdp5_kms->state_lock);
+ mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
+ if (!mdp5_kms->state) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
@@ -749,7 +886,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
* this section initializes the SMP:
*/
if (mdp5_kms->caps & MDP_CAP_SMP) {
- mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
if (IS_ERR(mdp5_kms->smp)) {
ret = PTR_ERR(mdp5_kms->smp);
mdp5_kms->smp = NULL;
@@ -764,6 +901,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
goto fail;
}
+ ret = hwpipe_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 03738927be10..17b0cc101171 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -24,8 +24,11 @@
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_ctl.h"
+#include "mdp5_pipe.h"
#include "mdp5_smp.h"
+struct mdp5_state;
+
struct mdp5_kms {
struct mdp_kms base;
@@ -33,13 +36,21 @@ struct mdp5_kms {
struct platform_device *pdev;
+ unsigned num_hwpipes;
+ struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
+ /**
+ * Global atomic state. Do not access directly, use mdp5_get_state()
+ */
+ struct mdp5_state *state;
+ struct drm_modeset_lock state_lock;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
@@ -65,9 +76,27 @@ struct mdp5_kms {
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+/* Global atomic state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ *
+ * For atomic updates which require modifying global state,
+ */
+struct mdp5_state {
+ struct mdp5_hw_pipe_state hwpipe;
+ struct mdp5_smp_state smp;
+};
+
+struct mdp5_state *__must_check
+mdp5_get_state(struct drm_atomic_state *s);
+
+/* Atomic plane state. Subclasses the base drm_plane_state in order to
+ * track assigned hwpipe and hw specific state.
+ */
struct mdp5_plane_state {
struct drm_plane_state base;
+ struct mdp5_hw_pipe *hwpipe;
+
/* aligned with property */
uint8_t premultiplied;
uint8_t zpos;
@@ -76,11 +105,6 @@ struct mdp5_plane_state {
/* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
- /* some additional transactional status to help us know in the
- * apply path whether we need to update SMP allocation, and
- * whether current update is still pending:
- */
- bool mode_changed : 1;
bool pending : 1;
};
#define to_mdp5_plane_state(x) \
@@ -114,6 +138,18 @@ static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
return msm_readl(mdp5_kms->mmio + reg);
}
+static inline const char *stage2name(enum mdp_mixer_stage_id stage)
+{
+ static const char *names[] = {
+#define NAME(n) [n] = #n
+ NAME(STAGE_UNUSED), NAME(STAGE_BASE),
+ NAME(STAGE0), NAME(STAGE1), NAME(STAGE2),
+ NAME(STAGE3), NAME(STAGE4), NAME(STAGE6),
+#undef NAME
+ };
+ return names[stage];
+}
+
static inline const char *pipe2name(enum mdp5_pipe pipe)
{
static const char *names[] = {
@@ -196,13 +232,10 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_flip(struct drm_plane *plane);
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
-struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane,
- uint32_t reg_offset, uint32_t caps);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
new file mode 100644
index 000000000000..1ae9dc8d260d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
+ struct drm_plane *plane, uint32_t caps, uint32_t blkcfg)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state;
+ struct mdp5_hw_pipe_state *old_state, *new_state;
+ struct mdp5_hw_pipe *hwpipe = NULL;
+ int i;
+
+ state = mdp5_get_state(s);
+ if (IS_ERR(state))
+ return ERR_CAST(state);
+
+ /* grab old_state after mdp5_get_state(), since now we hold lock: */
+ old_state = &mdp5_kms->state->hwpipe;
+ new_state = &state->hwpipe;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
+
+ /* skip if already in-use.. check both new and old state,
+ * since we cannot immediately re-use a pipe that is
+ * released in the current update in some cases:
+ * (1) mdp5 can have SMP (non-double-buffered)
+ * (2) hw pipe previously assigned to different CRTC
+ * (vblanks might not be aligned)
+ */
+ if (new_state->hwpipe_to_plane[cur->idx] ||
+ old_state->hwpipe_to_plane[cur->idx])
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /* possible candidate, take the one with the
+ * fewest unneeded caps bits set:
+ */
+ if (!hwpipe || (hweight_long(cur->caps & ~caps) <
+ hweight_long(hwpipe->caps & ~caps)))
+ hwpipe = cur;
+ }
+
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ if (mdp5_kms->smp) {
+ int ret;
+
+ DBG("%s: alloc SMP blocks", hwpipe->name);
+ ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
+ hwpipe->pipe, blkcfg);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->blkcfg = blkcfg;
+ }
+
+ DBG("%s: assign to plane %s for caps %x",
+ hwpipe->name, plane->name, caps);
+ new_state->hwpipe_to_plane[hwpipe->idx] = plane;
+
+ return hwpipe;
+}
+
+void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+
+ if (!hwpipe)
+ return;
+
+ if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
+ return;
+
+ DBG("%s: release from plane %s", hwpipe->name,
+ new_state->hwpipe_to_plane[hwpipe->idx]->name);
+
+ if (mdp5_kms->smp) {
+ DBG("%s: free SMP blocks", hwpipe->name);
+ mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe);
+ }
+
+ new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+}
+
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
+{
+ kfree(hwpipe);
+}
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps)
+{
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->name = pipe2name(pipe);
+ hwpipe->pipe = pipe;
+ hwpipe->reg_offset = reg_offset;
+ hwpipe->caps = caps;
+ hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+
+ spin_lock_init(&hwpipe->pipe_lock);
+
+ return hwpipe;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
new file mode 100644
index 000000000000..611da7a660c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_PIPE_H__
+#define __MDP5_PIPE_H__
+
+#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+
+/* represents a hw pipe, which is dynamically assigned to a plane */
+struct mdp5_hw_pipe {
+ int idx;
+
+ const char *name;
+ enum mdp5_pipe pipe;
+
+ spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
+ uint32_t reg_offset;
+ uint32_t caps;
+
+ uint32_t flush_mask; /* used to commit pipe registers */
+
+ /* number of smp blocks per plane, ie:
+ * nblks_y | (nblks_u << 8) | (nblks_v << 16)
+ */
+ uint32_t blkcfg;
+};
+
+/* global atomic state of assignment between pipes and planes: */
+struct mdp5_hw_pipe_state {
+ struct drm_plane *hwpipe_to_plane[SSPP_MAX];
+};
+
+struct mdp5_hw_pipe *__must_check
+mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg);
+void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps);
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
+
+#endif /* __MDP5_PIPE_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 83bf997dda03..c099da7bc212 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -16,19 +16,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "mdp5_kms.h"
struct mdp5_plane {
struct drm_plane base;
- const char *name;
-
- enum mdp5_pipe pipe;
-
- spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
- uint32_t reg_offset;
- uint32_t caps;
-
- uint32_t flush_mask; /* used to commit pipe registers */
uint32_t nformats;
uint32_t formats[32];
@@ -69,21 +61,12 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
static void mdp5_plane_install_rotation_property(struct drm_device *dev,
struct drm_plane *plane)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-
- if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
- !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
- return;
-
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
-
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base,
- dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
}
/* helper to install properties which are common to planes and crtcs */
@@ -184,6 +167,21 @@ done:
#undef SET_PROPERTY
}
+static void
+mdp5_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
+ drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
+ pstate->hwpipe->name : "(null)");
+ drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
+ drm_printf(p, "\tzpos=%u\n", pstate->zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->alpha);
+ drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
+ drm_printf(p, "\tpending=%u\n", pstate->pending);
+}
+
static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
@@ -222,7 +220,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
- mdp5_state->mode_changed = false;
mdp5_state->pending = false;
return &mdp5_state->base;
@@ -231,10 +228,12 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
if (state->fb)
drm_framebuffer_unreference(state->fb);
- kfree(to_mdp5_plane_state(state));
+ kfree(pstate);
}
static const struct drm_plane_funcs mdp5_plane_funcs = {
@@ -247,99 +246,121 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct drm_framebuffer *fb = new_state->fb;
if (!new_state->fb)
return 0;
- DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
+ DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, mdp5_kms->id);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
- DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
+ DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, mdp5_kms->id);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_plane_state *old_state = plane->state;
- const struct mdp_format *format;
- bool vflip, hflip;
+ struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
+ bool new_hwpipe = false;
+ uint32_t max_width, max_height;
+ uint32_t caps = 0;
- DBG("%s: check (%d -> %d)", mdp5_plane->name,
+ DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
+ /* We don't allow faster-than-vblank updates.. if we did add this
+ * some day, we would need to disallow in cases where hwpipe
+ * changes
+ */
+ if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
+ return -EBUSY;
+
+ max_width = config->hw->lm.max_width << 16;
+ max_height = config->hw->lm.max_height << 16;
+
+ /* Make sure source dimensions are within bounds. */
+ if ((state->src_w > max_width) || (state->src_h > max_height)) {
+ struct drm_rect src = drm_plane_state_src(state);
+ DBG("Invalid source size "DRM_RECT_FP_FMT,
+ DRM_RECT_FP_ARG(&src));
+ return -ERANGE;
+ }
+
if (plane_enabled(state)) {
+ unsigned int rotation;
+ const struct mdp_format *format;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ uint32_t blkcfg = 0;
+
format = to_mdp_format(msm_framebuffer_format(state->fb));
- if (MDP_FORMAT_IS_YUV(format) &&
- !pipe_supports_yuv(mdp5_plane->caps)) {
- DBG("Pipe doesn't support YUV\n");
+ if (MDP_FORMAT_IS_YUV(format))
+ caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
- return -EINVAL;
- }
+ if (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))
+ caps |= MDP_PIPE_CAP_SCALE;
- if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
- (((state->src_w >> 16) != state->crtc_w) ||
- ((state->src_h >> 16) != state->crtc_h))) {
- DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
- state->src_w >> 16, state->src_h >> 16,
- state->crtc_w, state->crtc_h);
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
- return -EINVAL;
- }
+ if (rotation & DRM_REFLECT_X)
+ caps |= MDP_PIPE_CAP_HFLIP;
- hflip = !!(state->rotation & DRM_REFLECT_X);
- vflip = !!(state->rotation & DRM_REFLECT_Y);
- if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
- (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
- DBG("Pipe doesn't support flip\n");
+ if (rotation & DRM_REFLECT_Y)
+ caps |= MDP_PIPE_CAP_VFLIP;
- return -EINVAL;
- }
- }
+ /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
+ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
+ new_hwpipe = true;
- if (plane_enabled(state) && plane_enabled(old_state)) {
- /* we cannot change SMP block configuration during scanout: */
- bool full_modeset = false;
- if (state->fb->pixel_format != old_state->fb->pixel_format) {
- DBG("%s: pixel_format change!", mdp5_plane->name);
- full_modeset = true;
- }
- if (state->src_w != old_state->src_w) {
- DBG("%s: src_w change!", mdp5_plane->name);
- full_modeset = true;
- }
- if (to_mdp5_plane_state(old_state)->pending) {
- DBG("%s: still pending!", mdp5_plane->name);
- full_modeset = true;
+ if (mdp5_kms->smp) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(state->fb));
+
+ blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
+ state->src_w >> 16, false);
+
+ if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
+ new_hwpipe = true;
}
- if (full_modeset) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_crtc_state(state->state, state->crtc);
- crtc_state->mode_changed = true;
- to_mdp5_plane_state(state)->mode_changed = true;
+
+ /* (re)assign hwpipe if needed, otherwise keep old one: */
+ if (new_hwpipe) {
+ /* TODO maybe we want to re-assign hwpipe sometimes
+ * in cases when we no-longer need some caps to make
+ * it available for other planes?
+ */
+ struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
+ plane, caps, blkcfg);
+ if (IS_ERR(mdp5_state->hwpipe)) {
+ DBG("%s: failed to assign hwpipe!", plane->name);
+ return PTR_ERR(mdp5_state->hwpipe);
+ }
+ mdp5_pipe_release(state->state, old_hwpipe);
}
- } else {
- to_mdp5_plane_state(state)->mode_changed = true;
}
return 0;
@@ -348,16 +369,16 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
- DBG("%s: update", mdp5_plane->name);
+ DBG("%s: update", plane->name);
- if (!plane_enabled(state)) {
- to_mdp5_plane_state(state)->pending = true;
- } else if (to_mdp5_plane_state(state)->mode_changed) {
+ mdp5_state->pending = true;
+
+ if (plane_enabled(state)) {
int ret;
- to_mdp5_plane_state(state)->pending = true;
+
ret = mdp5_plane_mode_set(plane,
state->crtc, state->fb,
state->crtc_x, state->crtc_y,
@@ -366,11 +387,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
- } else {
- unsigned long flags;
- spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
- set_scanout_locked(plane, state->fb);
- spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
}
}
@@ -384,9 +400,9 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
+ enum mdp5_pipe pipe = hwpipe->pipe;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -666,18 +682,19 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *pstate = plane->state;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ enum mdp5_pipe pipe = hwpipe->pipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,};
- bool pe = mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
int pe_left[COMP_MAX], pe_right[COMP_MAX];
int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
+ unsigned int rotation;
bool vflip, hflip;
unsigned long flags;
int ret;
@@ -697,27 +714,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_w = src_w >> 16;
src_h = src_h >> 16;
- DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- /* Request some memory from the SMP: */
- if (mdp5_kms->smp) {
- ret = mdp5_smp_request(mdp5_kms->smp,
- mdp5_plane->pipe, format, src_w, false);
- if (ret)
- return ret;
- }
-
- /*
- * Currently we update the hw for allocations/requests immediately,
- * but once atomic modeset/pageflip is in place, the allocation
- * would move into atomic->check_plane_state(), while updating the
- * hw would remain here:
- */
- if (mdp5_kms->smp)
- mdp5_smp_configure(mdp5_kms->smp, pipe);
-
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
if (ret)
return ret;
@@ -726,7 +726,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
if (ret)
return ret;
- if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
calc_pixel_ext(format, src_w, crtc_w, phasex_step,
pe_left, pe_right, true);
calc_pixel_ext(format, src_h, crtc_h, phasey_step,
@@ -740,14 +740,18 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
config |= get_scale_config(format, src_h, crtc_h, false);
DBG("scale config = %x", config);
- hflip = !!(pstate->rotation & DRM_REFLECT_X);
- vflip = !!(pstate->rotation & DRM_REFLECT_Y);
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
+ hflip = !!(rotation & DRM_REFLECT_X);
+ vflip = !!(rotation & DRM_REFLECT_Y);
- spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+ spin_lock_irqsave(&hwpipe->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height));
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
@@ -792,12 +796,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
- if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
mdp5_write_pixel_ext(mdp5_kms, pipe, format,
src_w, pe_left, pe_right,
src_h, pe_top, pe_bottom);
- if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
phasex_step[COMP_0]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
@@ -812,7 +816,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
}
- if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
if (MDP_FORMAT_IS_YUV(format))
csc_enable(mdp5_kms, pipe,
mdp_get_default_csc_cfg(CSC_YUV2RGB));
@@ -822,56 +826,42 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
set_scanout_locked(plane, fb);
- spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
+ spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
return ret;
}
-void mdp5_plane_complete_flip(struct drm_plane *plane)
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- DBG("%s: complete flip", mdp5_plane->name);
-
- if (mdp5_kms->smp)
- mdp5_smp_commit(mdp5_kms->smp, pipe);
-
- to_mdp5_plane_state(plane->state)->pending = false;
-}
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
-enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
-{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- return mdp5_plane->pipe;
+ return pstate->hwpipe->pipe;
}
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- return mdp5_plane->flush_mask;
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
+
+ return pstate->hwpipe->flush_mask;
}
/* called after vsync in thread context */
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- if (!plane_enabled(plane->state) && mdp5_kms->smp) {
- DBG("%s: free SMP", mdp5_plane->name);
- mdp5_smp_release(mdp5_kms->smp, pipe);
- }
+ pstate->pending = false;
}
/* initialize plane */
-struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
- uint32_t caps)
+struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
@@ -886,19 +876,10 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
plane = &mdp5_plane->base;
- mdp5_plane->pipe = pipe;
- mdp5_plane->name = pipe2name(pipe);
- mdp5_plane->caps = caps;
-
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
- ARRAY_SIZE(mdp5_plane->formats),
- !pipe_supports_yuv(mdp5_plane->caps));
-
- mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
- mdp5_plane->reg_offset = reg_offset;
- spin_lock_init(&mdp5_plane->pipe_lock);
+ ARRAY_SIZE(mdp5_plane->formats), false);
- type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
type, NULL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 27d7b55b52c9..58f712d37e7f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -21,72 +21,6 @@
#include "mdp5_smp.h"
-/* SMP - Shared Memory Pool
- *
- * These are shared between all the clients, where each plane in a
- * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
- * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
- *
- * Based on the size of the attached scanout buffer, a certain # of
- * blocks must be allocated to that client out of the shared pool.
- *
- * In some hw, some blocks are statically allocated for certain pipes
- * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
- *
- * For each block that can be dynamically allocated, it can be either
- * free:
- * The block is free.
- *
- * pending:
- * The block is allocated to some client and not free.
- *
- * configured:
- * The block is allocated to some client, and assigned to that
- * client in MDP5_SMP_ALLOC registers.
- *
- * inuse:
- * The block is being actively used by a client.
- *
- * The updates happen in the following steps:
- *
- * 1) mdp5_smp_request():
- * When plane scanout is setup, calculate required number of
- * blocks needed per client, and request. Blocks neither inuse nor
- * configured nor pending by any other client are added to client's
- * pending set.
- * For shrinking, blocks in pending but not in configured can be freed
- * directly, but those already in configured will be freed later by
- * mdp5_smp_commit.
- *
- * 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
- * are configured for the union(pending, inuse)
- * Current pending is copied to configured.
- * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
- * concurrently for the same pipe.
- *
- * 3) mdp5_smp_commit():
- * After next vblank, copy configured -> inuse. Optionally update
- * MDP5_SMP_ALLOC registers if there are newly unused blocks
- *
- * 4) mdp5_smp_release():
- * Must be called after the pipe is disabled and no longer uses any SMB
- *
- * On the next vblank after changes have been committed to hw, the
- * client's pending blocks become it's in-use blocks (and no-longer
- * in-use blocks become available to other clients).
- *
- * btw, hurray for confusing overloaded acronyms! :-/
- *
- * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
- * should happen at (or before)? atomic->check(). And we'd need
- * an API to discard previous requests if update is aborted or
- * (test-only).
- *
- * TODO would perhaps be nice to have debugfs to dump out kernel
- * inuse and pending state of all clients..
- */
-
struct mdp5_smp {
struct drm_device *dev;
@@ -94,16 +28,8 @@ struct mdp5_smp {
int blk_cnt;
int blk_size;
-
- spinlock_t state_lock;
- mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
-
- struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
-static void update_smp_state(struct mdp5_smp *smp,
- u32 cid, mdp5_smp_state_t *assigned);
-
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
@@ -134,57 +60,38 @@ static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
return mdp5_cfg->smp.clients[pipe] + plane;
}
-/* step #1: update # of blocks pending for the client: */
+/* allocate blocks for the specified request: */
static int smp_request_block(struct mdp5_smp *smp,
+ struct mdp5_smp_state *state,
u32 cid, int nblks)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
- int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
+ void *cs = state->client_state[cid];
+ int i, avail, cnt = smp->blk_cnt;
uint8_t reserved;
- unsigned long flags;
- reserved = smp->reserved[cid];
+ /* we shouldn't be requesting blocks for an in-use client: */
+ WARN_ON(bitmap_weight(cs, cnt) > 0);
- spin_lock_irqsave(&smp->state_lock, flags);
+ reserved = smp->reserved[cid];
if (reserved) {
nblks = max(0, nblks - reserved);
DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
}
- avail = cnt - bitmap_weight(smp->state, cnt);
+ avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
- dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
- ret = -ENOSPC;
- goto fail;
+ return -ENOSPC;
}
- cur_nblks = bitmap_weight(ps->pending, cnt);
- if (nblks > cur_nblks) {
- /* grow the existing pending reservation: */
- for (i = cur_nblks; i < nblks; i++) {
- int blk = find_first_zero_bit(smp->state, cnt);
- set_bit(blk, ps->pending);
- set_bit(blk, smp->state);
- }
- } else {
- /* shrink the existing pending reservation: */
- for (i = cur_nblks; i > nblks; i--) {
- int blk = find_first_bit(ps->pending, cnt);
- clear_bit(blk, ps->pending);
-
- /* clear in global smp_state if not in configured
- * otherwise until _commit()
- */
- if (!test_bit(blk, ps->configured))
- clear_bit(blk, smp->state);
- }
+ for (i = 0; i < nblks; i++) {
+ int blk = find_first_zero_bit(state->state, cnt);
+ set_bit(blk, cs);
+ set_bit(blk, state->state);
}
-fail:
- spin_unlock_irqrestore(&smp->state_lock, flags);
return 0;
}
@@ -209,14 +116,15 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
- const struct mdp_format *format, u32 width, bool hdecim)
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
- struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
- int i, hsub, nplanes, nlines, nblks, ret;
+ int i, hsub, nplanes, nlines;
u32 fmt = format->base.pixel_format;
+ uint32_t blkcfg = 0;
nplanes = drm_format_num_planes(fmt);
hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -239,7 +147,7 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
hsub = 1;
}
- for (i = 0, nblks = 0; i < nplanes; i++) {
+ for (i = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
cpp = drm_format_plane_cpp(fmt, i);
@@ -251,60 +159,72 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
if (rev == 0)
n = roundup_pow_of_two(n);
+ blkcfg |= (n << (8 * i));
+ }
+
+ return blkcfg;
+}
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ int n = blkcfg & 0xff;
+
+ if (!n)
+ continue;
+
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
- ret = smp_request_block(smp, pipe2client(pipe, i), n);
+ ret = smp_request_block(smp, state, cid, n);
if (ret) {
dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
- nblks += n;
+ blkcfg >>= 8;
}
- set_fifo_thresholds(smp, pipe, nblks);
+ state->assigned |= (1 << pipe);
return 0;
}
/* Release SMP blocks for all clients of the pipe */
-void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe)
{
int i;
- unsigned long flags;
int cnt = smp->blk_cnt;
for (i = 0; i < pipe2nclients(pipe); i++) {
- mdp5_smp_state_t assigned;
u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
-
- spin_lock_irqsave(&smp->state_lock, flags);
-
- /* clear hw assignment */
- bitmap_or(assigned, ps->inuse, ps->configured, cnt);
- update_smp_state(smp, CID_UNUSED, &assigned);
-
- /* free to global pool */
- bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
- bitmap_andnot(smp->state, smp->state, assigned, cnt);
+ void *cs = state->client_state[cid];
- /* clear client's infor */
- bitmap_zero(ps->pending, cnt);
- bitmap_zero(ps->configured, cnt);
- bitmap_zero(ps->inuse, cnt);
+ /* update global state: */
+ bitmap_andnot(state->state, state->state, cs, cnt);
- spin_unlock_irqrestore(&smp->state_lock, flags);
+ /* clear client's state */
+ bitmap_zero(cs, cnt);
}
- set_fifo_thresholds(smp, pipe, 0);
+ state->released |= (1 << pipe);
}
-static void update_smp_state(struct mdp5_smp *smp,
+/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
+ * happen after scanout completes.
+ */
+static unsigned update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
+ unsigned nblks = 0;
u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
@@ -330,62 +250,88 @@ static void update_smp_state(struct mdp5_smp *smp,
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+
+ nblks++;
}
+
+ return nblks;
}
-/* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
- int cnt = smp->blk_cnt;
- mdp5_smp_state_t assigned;
- int i;
+ enum mdp5_pipe pipe;
- for (i = 0; i < pipe2nclients(pipe); i++) {
- u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
+ unsigned i, nblks = 0;
- /*
- * if vblank has not happened since last smp_configure
- * skip the configure for now
- */
- if (!bitmap_equal(ps->inuse, ps->configured, cnt))
- continue;
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ void *cs = state->client_state[cid];
- bitmap_copy(ps->configured, ps->pending, cnt);
- bitmap_or(assigned, ps->inuse, ps->configured, cnt);
- update_smp_state(smp, cid, &assigned);
+ nblks += update_smp_state(smp, cid, cs);
+
+ DBG("assign %s:%u, %u blks",
+ pipe2name(pipe), i, nblks);
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
}
+
+ state->assigned = 0;
}
-/* step #3: after vblank, copy configured -> inuse: */
-void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
- int cnt = smp->blk_cnt;
- mdp5_smp_state_t released;
- int i;
-
- for (i = 0; i < pipe2nclients(pipe); i++) {
- u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ enum mdp5_pipe pipe;
- /*
- * Figure out if there are any blocks we where previously
- * using, which can be released and made available to other
- * clients:
- */
- if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
- unsigned long flags;
+ for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
+ DBG("release %s", pipe2name(pipe));
+ set_fifo_thresholds(smp, pipe, 0);
+ }
- spin_lock_irqsave(&smp->state_lock, flags);
- /* clear released blocks: */
- bitmap_andnot(smp->state, smp->state, released, cnt);
- spin_unlock_irqrestore(&smp->state_lock, flags);
+ state->released = 0;
+}
- update_smp_state(smp, CID_UNUSED, &released);
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct mdp5_hw_pipe_state *hwpstate;
+ struct mdp5_smp_state *state;
+ int total = 0, i, j;
+
+ drm_printf(p, "name\tinuse\tplane\n");
+ drm_printf(p, "----\t-----\t-----\n");
+
+ if (drm_can_sleep())
+ drm_modeset_lock(&mdp5_kms->state_lock, NULL);
+
+ /* grab these *after* we hold the state_lock */
+ hwpstate = &mdp5_kms->state->hwpipe;
+ state = &mdp5_kms->state->smp;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ for (j = 0; j < pipe2nclients(pipe); j++) {
+ u32 cid = pipe2client(pipe, j);
+ void *cs = state->client_state[cid];
+ int inuse = bitmap_weight(cs, smp->blk_cnt);
+
+ drm_printf(p, "%s:%d\t%d\t%s\n",
+ pipe2name(pipe), j, inuse,
+ plane ? plane->name : NULL);
+
+ total += inuse;
}
-
- bitmap_copy(ps->inuse, ps->configured, cnt);
}
+
+ drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
+ drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
+ bitmap_weight(state->state, smp->blk_cnt));
+
+ if (drm_can_sleep())
+ drm_modeset_unlock(&mdp5_kms->state_lock);
}
void mdp5_smp_destroy(struct mdp5_smp *smp)
@@ -393,8 +339,9 @@ void mdp5_smp_destroy(struct mdp5_smp *smp)
kfree(smp);
}
-struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
+ struct mdp5_smp_state *state = &mdp5_kms->state->smp;
struct mdp5_smp *smp = NULL;
int ret;
@@ -404,14 +351,13 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
goto fail;
}
- smp->dev = dev;
+ smp->dev = mdp5_kms->dev;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
/* statically tied MMBs cannot be re-allocated: */
- bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
+ bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
- spin_lock_init(&smp->state_lock);
return smp;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 20b87e800ea3..b41d0448fbe8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -19,12 +19,53 @@
#ifndef __MDP5_SMP_H__
#define __MDP5_SMP_H__
+#include <drm/drm_print.h>
+
#include "msm_drv.h"
-struct mdp5_client_smp_state {
- mdp5_smp_state_t inuse;
- mdp5_smp_state_t configured;
- mdp5_smp_state_t pending;
+/*
+ * SMP - Shared Memory Pool:
+ *
+ * SMP blocks are shared between all the clients, where each plane in
+ * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ *
+ * Atomic SMP State:
+ *
+ * On atomic updates that modify SMP configuration, the state is cloned
+ * (copied) and modified. For test-only, or in cases where atomic
+ * update fails (or if we hit ww_mutex deadlock/backoff condition) the
+ * new state is simply thrown away.
+ *
+ * Because the SMP registers are not double buffered, updates are a
+ * two step process:
+ *
+ * 1) in _prepare_commit() we configure things (via read-modify-write)
+ * for the newly assigned pipes, so we don't take away blocks
+ * assigned to pipes that are still scanning out
+ * 2) in _complete_commit(), after vblank/etc, we clear things for the
+ * released clients, since at that point old pipes are no longer
+ * scanning out.
+ */
+struct mdp5_smp_state {
+ /* global state of what blocks are in use: */
+ mdp5_smp_state_t state;
+
+ /* per client state of what blocks they are using: */
+ mdp5_smp_state_t client_state[MAX_CLIENTS];
+
+ /* assigned pipes (hw updated at _prepare_commit()): */
+ unsigned long assigned;
+
+ /* released pipes (hw updated at _complete_commit()): */
+ unsigned long released;
};
struct mdp5_kms;
@@ -36,13 +77,22 @@ struct mdp5_smp;
* which is then used to call the other mdp5_smp_*(handler, ...) functions.
*/
-struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
+ const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(struct mdp5_smp *smp);
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
- const struct mdp_format *format, u32 width, bool hdecim);
-void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
-void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
-void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
+
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim);
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg);
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe);
+
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 452e3518f98b..8994c365e218 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 73bae382eac3..30b5d23e53b4 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -141,7 +141,7 @@ static void complete_commit(struct msm_commit *c, bool async)
kms->funcs->complete_commit(kms, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
commit_destroy(c);
}
@@ -217,8 +217,9 @@ int msm_atomic_commit(struct drm_device *dev,
if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
- plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ drm_atomic_set_fence_for_plane(plane_state, fence);
}
}
@@ -240,6 +241,10 @@ int msm_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(state, true);
+ /* swap driver private state while still holding state_lock */
+ if (to_kms_state(state)->state)
+ priv->kms->funcs->swap_state(priv->kms, state);
+
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
@@ -256,6 +261,7 @@ int msm_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
queue_work(priv->atomic_wq, &c->work);
return 0;
@@ -269,3 +275,30 @@ error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
+
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+ kfree(state);
+ return NULL;
+ }
+
+ return &state->base;
+}
+
+void msm_atomic_state_clear(struct drm_atomic_state *s)
+{
+ struct msm_kms_state *state = to_kms_state(s);
+ drm_atomic_state_default_clear(&state->base);
+ kfree(state->state);
+ state->state = NULL;
+}
+
+void msm_atomic_state_free(struct drm_atomic_state *state)
+{
+ kfree(to_kms_state(state)->state);
+ drm_atomic_state_default_release(state);
+ kfree(state);
+}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 663f2b6ef091..c1b40f5adb60 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -18,6 +18,8 @@
#ifdef CONFIG_DEBUG_FS
#include "msm_drv.h"
#include "msm_gpu.h"
+#include "msm_kms.h"
+#include "msm_debugfs.h"
static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
{
@@ -141,6 +143,7 @@ int msm_debugfs_late_init(struct drm_device *dev)
int msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
int ret;
ret = drm_debugfs_create_files(msm_debugfs_list,
@@ -152,15 +155,25 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
- return 0;
+ if (priv->kms->funcs->debugfs_init)
+ ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+ return ret;
}
void msm_debugfs_cleanup(struct drm_minor *minor)
{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
drm_debugfs_remove_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list), minor);
- if (!minor->dev->dev_private)
+ if (!priv)
return;
+
+ if (priv->kms->funcs->debugfs_cleanup)
+ priv->kms->funcs->debugfs_cleanup(priv->kms, minor);
+
msm_rd_debugfs_cleanup(minor);
msm_perf_debugfs_cleanup(minor);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 46568fc80848..e29bb66f55b1 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_of.h>
+
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
@@ -44,17 +46,21 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.output_poll_changed = msm_fb_output_poll_changed,
.atomic_check = msm_atomic_check,
.atomic_commit = msm_atomic_commit,
+ .atomic_state_alloc = msm_atomic_state_alloc,
+ .atomic_state_clear = msm_atomic_state_clear,
+ .atomic_state_free = msm_atomic_state_free,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
+ int idx = priv->num_aspaces++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
- priv->mmus[idx] = mmu;
+ priv->aspace[idx] = aspace;
return idx;
}
@@ -77,6 +83,10 @@ static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
+bool dumpstate = false;
+MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
+module_param(dumpstate, bool, 0600);
+
/*
* Util/helpers:
*/
@@ -766,9 +776,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -903,10 +911,8 @@ static int add_components_mdp(struct device *mdp_dev,
* remote-endpoint isn't a component that we need to add
*/
if (of_device_is_compatible(np, "qcom,mdp4") &&
- ep.port == 0) {
- of_node_put(ep_node);
+ ep.port == 0)
continue;
- }
/*
* It's okay if some of the ports don't have a remote endpoint
@@ -914,15 +920,12 @@ static int add_components_mdp(struct device *mdp_dev,
* any external interface.
*/
intf = of_graph_get_remote_port_parent(ep_node);
- if (!intf) {
- of_node_put(ep_node);
+ if (!intf)
continue;
- }
-
- component_match_add(master_dev, matchptr, compare_of, intf);
+ drm_of_component_match_add(master_dev, matchptr, compare_of,
+ intf);
of_node_put(intf);
- of_node_put(ep_node);
}
return 0;
@@ -962,8 +965,8 @@ static int add_display_components(struct device *dev,
put_device(mdp_dev);
/* add the MDP component itself */
- component_match_add(dev, matchptr, compare_of,
- mdp_dev->of_node);
+ drm_of_component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
} else {
/* MDP4 */
mdp_dev = dev;
@@ -996,7 +999,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- component_match_add(dev, matchptr, compare_of, np);
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
@@ -1035,7 +1038,13 @@ static int msm_pdev_probe(struct platform_device *pdev)
if (ret)
return ret;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d0da52f2a806..ed4dad3ca133 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -52,6 +52,8 @@ struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
+struct msm_gem_address_space;
+struct msm_gem_vma;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -121,12 +123,16 @@ struct msm_drm_private {
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
- struct drm_plane *planes[8];
+ struct drm_plane *planes[16];
unsigned int num_crtcs;
struct drm_crtc *crtcs[8];
@@ -173,8 +179,22 @@ int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
+void msm_atomic_state_clear(struct drm_atomic_state *state);
+void msm_atomic_state_free(struct drm_atomic_state *state);
+
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace);
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name);
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -189,9 +209,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+ uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj, int id);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -217,7 +237,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence);
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
@@ -303,8 +323,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
-#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
static inline int align_pitch(int width, int bpp)
{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 95cf8fe72ee5..9acf544e7a8f 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -88,11 +88,11 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
- uint32_t iova;
+ uint64_t iova;
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
- DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ffd4a338ca12..bffe93498512 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -39,6 +39,7 @@ struct msm_fbdev {
static struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -49,12 +50,6 @@ static struct fb_ops msm_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_mmap = msm_fbdev_mmap,
-
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
@@ -81,7 +76,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- uint32_t paddr;
+ uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a9b9b1c95a2e..3f299c537b77 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,7 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
fctx->dev = dev;
fctx->name = name;
- fctx->context = fence_context_alloc(1);
+ fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
struct msm_fence {
struct msm_fence_context *fctx;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct msm_fence *to_msm_fence(struct fence *fence)
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
{
return container_of(fence, struct msm_fence, base);
}
-static const char *msm_fence_get_driver_name(struct fence *fence)
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
{
return "msm";
}
-static const char *msm_fence_get_timeline_name(struct fence *fence)
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct fence *fence)
+static bool msm_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool msm_fence_signaled(struct fence *fence)
+static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return fence_completed(f->fctx, f->base.seqno);
}
-static void msm_fence_release(struct fence *fence)
+static void msm_fence_release(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops msm_fence_ops = {
+static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = msm_fence_release,
};
-struct fence *
+struct dma_fence *
msm_fence_alloc(struct msm_fence_context *fctx)
{
struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
f->fctx = fctx;
- fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
- fctx->context, ++fctx->last_fence);
+ dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+ fctx->context, ++fctx->last_fence);
return &f->base;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index ceb5b3d314b4..56061aa1959d 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
-struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..d8bc59c7e261 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(pages[pgoff]);
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -296,12 +294,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- msm_obj->domain[id].iova = 0;
- }
+ msm_gem_unmap_vma(priv->aspace[id],
+ &msm_obj->domain[id], msm_obj->sgt);
}
}
@@ -313,7 +307,7 @@ put_iova(struct drm_gem_object *obj)
* the refcnt counter needs to be atomic_t.
*/
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+ uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
@@ -326,16 +320,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
- uint32_t offset;
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- offset = (uint32_t)mmap_offset(obj);
- ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+ ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
} else {
msm_obj->domain[id].iova = physaddr(obj);
}
@@ -348,7 +334,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret;
@@ -370,7 +356,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_obj->domain[id].iova);
@@ -521,7 +507,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -540,7 +526,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = reservation_object_get_excl(msm_obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -553,7 +539,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(msm_obj->resv));
if (fence->context != fctx->context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -563,7 +549,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence)
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +602,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct fence *fence, const char *type,
+static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
- if (!fence_is_signaled(fence))
+ if (!dma_fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %u\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
@@ -631,9 +617,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
+ unsigned id;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -650,10 +638,15 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size, madv);
+ off, msm_obj->vaddr);
+
+ for (id = 0; id < priv->num_aspaces; id++)
+ seq_printf(m, " %08llx", msm_obj->domain[id].iova);
+
+ seq_printf(m, " %zu%s\n", obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -761,7 +754,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -783,16 +775,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
-
- msm_obj = kzalloc(sz, GFP_KERNEL);
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ msm_obj->vram_node = &msm_obj->domain[0].node;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b2f13cfe945e..7d529516b332 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -24,6 +24,20 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+struct msm_gem_address_space {
+ const char *name;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ struct msm_mmu *mmu;
+};
+
+struct msm_gem_vma {
+ struct drm_mm_node node;
+ uint64_t iova;
+};
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -61,10 +75,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct {
- // XXX
- uint32_t iova;
- } domain[NUM_DOMAINS];
+ struct msm_gem_vma domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -104,7 +115,7 @@ struct msm_gem_submit {
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
- struct fence *fence;
+ struct dma_fence *fence;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds;
@@ -112,13 +123,13 @@ struct msm_gem_submit {
struct {
uint32_t type;
uint32_t size; /* in dwords */
- uint32_t iova;
+ uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
- uint32_t iova;
+ uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 192b2d3a79cb..ab1dd020eb04 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -18,33 +18,24 @@
#include "msm_drv.h"
#include "msm_gem.h"
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
- if (!mutex_is_locked(mutex))
+ switch (mutex_trylock_recursive(&dev->struct_mutex)) {
+ case MUTEX_TRYLOCK_FAILED:
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
-static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
- } else {
- *unlock = true;
+ return true;
}
- return true;
+ BUG();
}
-
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37a65f3..166e84e4f0d4 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
void msm_gem_submit_free(struct msm_gem_submit *submit)
{
- fence_put(submit->fence);
+ dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
kfree(submit);
@@ -241,7 +241,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
@@ -266,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -312,7 +312,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
- uint32_t iova, off;
+ uint32_t off;
+ uint64_t iova;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
@@ -380,7 +381,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct fence *in_fence = NULL;
+ struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
unsigned i;
@@ -439,7 +440,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if (in_fence->context != gpu->fctx->context) {
- ret = fence_wait(in_fence, true);
+ ret = dma_fence_wait(in_fence, true);
if (ret)
goto out;
}
@@ -461,7 +462,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
void __user *userptr =
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
- uint32_t iova;
+ uint64_t iova;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
@@ -542,7 +543,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (in_fence)
- fence_put(in_fence);
+ dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 000000000000..a311d26ccb21
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+void
+msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt)
+{
+ if (!vma->iova)
+ return;
+
+ if (aspace->mmu) {
+ unsigned size = vma->node.size << PAGE_SHIFT;
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
+ }
+
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+ int ret;
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ return 0;
+
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
+ 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start << PAGE_SHIFT;
+
+ if (aspace->mmu) {
+ unsigned size = npages << PAGE_SHIFT;
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, IOMMU_READ | IOMMU_WRITE);
+ }
+
+ return ret;
+}
+
+void
+msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
+{
+ drm_mm_takedown(&aspace->mm);
+ if (aspace->mmu)
+ aspace->mmu->funcs->destroy(aspace->mmu);
+ kfree(aspace);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name)
+{
+ struct msm_gem_address_space *aspace;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ aspace->name = name;
+ aspace->mmu = msm_iommu_new(dev, domain);
+
+ drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
+ (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb09838b5ae..b28527a65d09 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -91,21 +91,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
- clk_prepare(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
+ if (gpu->grp_clks[0] && gpu->fast_rate)
+ clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
- if (rate_clk && gpu->fast_rate)
- clk_set_rate(rate_clk, gpu->fast_rate);
+ /* Set the RBBM timer rate to 19.2Mhz */
+ if (gpu->grp_clks[2])
+ clk_set_rate(gpu->grp_clks[2], 19200000);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ clk_prepare(gpu->grp_clks[i]);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -114,24 +113,22 @@ static int enable_clk(struct msm_gpu *gpu)
static int disable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
-
- if (rate_clk && gpu->slow_rate)
- clk_set_rate(rate_clk, gpu->slow_rate);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
+ if (gpu->grp_clks[0] && gpu->slow_rate)
+ clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
+
+ if (gpu->grp_clks[2])
+ clk_set_rate(gpu->grp_clks[2], 0);
+
return 0;
}
@@ -476,7 +473,7 @@ static void retire_submits(struct msm_gpu *gpu)
submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node);
- if (fence_is_signaled(submit->fence)) {
+ if (dma_fence_is_signaled(submit->fence)) {
retire_submit(gpu, submit);
} else {
break;
@@ -528,7 +525,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
/* can't happen yet.. but when we add 2d support we'll have
* to deal w/ cross-ring synchronization:
@@ -563,8 +560,8 @@ static irqreturn_t irq_handler(int irq, void *data)
}
static const char *clk_names[] = {
- "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
- "alt_mem_iface_clk",
+ "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk",
+ "mem_iface_clk", "alt_mem_iface_clk",
};
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -656,12 +653,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
+ /* TODO 32b vs 64b address space.. */
+ iommu->geometry.aperture_start = SZ_16M;
+ iommu->geometry.aperture_end = 0xffffffff;
+
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
+ gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu, "gpu");
+ if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
+ gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
@@ -669,7 +671,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
+ gpu->id = msm_register_address_space(drm, gpu->aspace);
/* Create ringbuffer: */
@@ -705,8 +707,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
+ if (gpu->aspace)
+ msm_gem_address_space_destroy(gpu->aspace);
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index d61d98a6e047..c4c39d3272c7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -50,7 +50,7 @@ struct msm_gpu_funcs {
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu);
- void (*idle)(struct msm_gpu *gpu);
+ bool (*idle)(struct msm_gpu *gpu);
irqreturn_t (*irq)(struct msm_gpu *irq);
uint32_t (*last_fence)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
@@ -80,7 +80,7 @@ struct msm_gpu {
/* ringbuffer: */
struct msm_ringbuffer *rb;
- uint32_t rb_iova;
+ uint64_t rb_iova;
/* list of GEM active objects: */
struct list_head active_list;
@@ -98,7 +98,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int id;
/* Power Control: */
@@ -154,6 +154,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
return msm_readl(gpu->mmio + (reg << 2));
}
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ uint32_t val = gpu_read(gpu, reg);
+
+ val &= ~mask;
+ gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3a294d0da3a0..61aaaa1de6eb 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -45,13 +45,13 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
iommu_detach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ unsigned long da = iova;
unsigned int i, j;
int ret;
@@ -62,7 +62,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
+ VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -84,13 +84,13 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ unsigned long da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
+ VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 40e41e5cdbc6..e470f4cf8f76 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -40,6 +40,8 @@ struct msm_kms_funcs {
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* swap global atomic state: */
+ void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
@@ -56,6 +58,11 @@ struct msm_kms_funcs {
bool is_cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
+#ifdef CONFIG_DEBUG_FS
+ /* debugfs: */
+ int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct msm_kms *kms, struct drm_minor *minor);
+#endif
};
struct msm_kms {
@@ -65,6 +72,18 @@ struct msm_kms {
int irq;
};
+/**
+ * Subclass of drm_atomic_state, to allow kms backend to have driver
+ * private global state. The kms backend can do whatever it wants
+ * with the ->state ptr. On ->atomic_state_clear() the ->state ptr
+ * is kfree'd and set back to NULL.
+ */
+struct msm_kms_state {
+ struct drm_atomic_state base;
+ void *state;
+};
+#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
+
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index b8ca9a0e9170..f85c879e68d2 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -23,9 +23,9 @@
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
- int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 8487f461f05f..6607456dc626 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -289,7 +289,7 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
- uint32_t iova, uint32_t size)
+ uint64_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf;
@@ -306,7 +306,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
}
rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, size }, 8);
+ (uint32_t[3]){ iova, size, iova >> 32 }, 12);
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(&obj->base);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
new file mode 100644
index 000000000000..e9a8d90e6723
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -0,0 +1,19 @@
+config DRM_MXS
+ bool
+ help
+ Choose this option to select drivers for MXS FB devices
+
+config DRM_MXSFB
+ tristate "i.MX23/i.MX28/i.MX6SX MXSFB LCD controller"
+ depends on DRM && OF
+ depends on COMMON_CLK
+ select DRM_MXS
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_PANEL
+ help
+ Choose this option if you have an i.MX23/i.MX28/i.MX6SX MXSFB
+ LCD controller.
+
+ If M is selected the module will be called mxsfb.
diff --git a/drivers/gpu/drm/mxsfb/Makefile b/drivers/gpu/drm/mxsfb/Makefile
new file mode 100644
index 000000000000..857f3a4545ff
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/Makefile
@@ -0,0 +1,2 @@
+mxsfb-y := mxsfb_drv.o mxsfb_crtc.o mxsfb_out.o
+obj-$(CONFIG_DRM_MXSFB) += mxsfb.o
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
new file mode 100644
index 000000000000..081890336ce7
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This code is based on drivers/video/fbdev/mxsfb.c :
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+#include <video/videomode.h>
+
+#include "mxsfb_drv.h"
+#include "mxsfb_regs.h"
+
+static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
+{
+ return (val & mxsfb->devdata->hs_wdth_mask) <<
+ mxsfb->devdata->hs_wdth_shift;
+}
+
+/* Setup the MXSFB registers for decoding the pixels out of the framebuffer */
+static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_crtc *crtc = &mxsfb->pipe.crtc;
+ struct drm_device *drm = crtc->dev;
+ const u32 format = crtc->primary->state->fb->pixel_format;
+ u32 ctrl, ctrl1;
+
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER;
+
+ /*
+ * WARNING: The bus width, CTRL_SET_BUS_WIDTH(), is configured to
+ * match the selected mode here. This differs from the original
+ * MXSFB driver, which had the option to configure the bus width
+ * to arbitrary value. This limitation should not pose an issue.
+ */
+
+ /* CTRL1 contains IRQ config and status bits, preserve those. */
+ ctrl1 = readl(mxsfb->base + LCDC_CTRL1);
+ ctrl1 &= CTRL1_CUR_FRAME_DONE_IRQ_EN | CTRL1_CUR_FRAME_DONE_IRQ;
+
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ dev_dbg(drm->dev, "Setting up RGB565 mode\n");
+ ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
+ ctrl |= CTRL_SET_WORD_LENGTH(0);
+ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
+ ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
+ ctrl |= CTRL_SET_WORD_LENGTH(3);
+ /* Do not use packed pixels = one pixel per word instead. */
+ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
+ break;
+ default:
+ dev_err(drm->dev, "Unhandled pixel format %08x\n", format);
+ return -EINVAL;
+ }
+
+ writel(ctrl1, mxsfb->base + LCDC_CTRL1);
+ writel(ctrl, mxsfb->base + LCDC_CTRL);
+
+ return 0;
+}
+
+static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
+{
+ u32 reg;
+
+ if (mxsfb->clk_disp_axi)
+ clk_prepare_enable(mxsfb->clk_disp_axi);
+ clk_prepare_enable(mxsfb->clk);
+ mxsfb_enable_axi_clk(mxsfb);
+
+ /* If it was disabled, re-enable the mode again */
+ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET);
+
+ /* Enable the SYNC signals first, then the DMA engine */
+ reg = readl(mxsfb->base + LCDC_VDCTRL4);
+ reg |= VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, mxsfb->base + LCDC_VDCTRL4);
+
+ writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET);
+}
+
+static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
+{
+ u32 reg;
+
+ /*
+ * Even if we disable the controller here, it will still continue
+ * until its FIFOs are running out of data
+ */
+ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_CLR);
+
+ readl_poll_timeout(mxsfb->base + LCDC_CTRL, reg, !(reg & CTRL_RUN),
+ 0, 1000);
+
+ reg = readl(mxsfb->base + LCDC_VDCTRL4);
+ reg &= ~VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, mxsfb->base + LCDC_VDCTRL4);
+
+ mxsfb_disable_axi_clk(mxsfb);
+
+ clk_disable_unprepare(mxsfb->clk);
+ if (mxsfb->clk_disp_axi)
+ clk_disable_unprepare(mxsfb->clk_disp_axi);
+}
+
+static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
+ const u32 bus_flags = mxsfb->connector.display_info.bus_flags;
+ u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
+ int err;
+
+ /*
+ * It seems, you can't re-program the controller if it is still
+ * running. This may lead to shifted pictures (FIFO issue?), so
+ * first stop the controller and drain its FIFOs.
+ */
+ mxsfb_enable_axi_clk(mxsfb);
+
+ /* Clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
+
+ err = mxsfb_set_pixel_fmt(mxsfb);
+ if (err)
+ return;
+
+ clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+
+ writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
+ TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
+ mxsfb->base + mxsfb->devdata->transfer_count);
+
+ vsync_pulse_len = m->crtc_vsync_end - m->crtc_vsync_start;
+
+ vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* Always in DOTCLOCK mode */
+ VDCTRL0_VSYNC_PERIOD_UNIT |
+ VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
+ VDCTRL0_SET_VSYNC_PULSE_WIDTH(vsync_pulse_len);
+ if (m->flags & DRM_MODE_FLAG_PHSYNC)
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (m->flags & DRM_MODE_FLAG_PVSYNC)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+ if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
+
+ writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
+
+ /* Frame length in lines. */
+ writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
+
+ /* Line length in units of clocks or pixels. */
+ hsync_pulse_len = m->crtc_hsync_end - m->crtc_hsync_start;
+ writel(set_hsync_pulse_width(mxsfb, hsync_pulse_len) |
+ VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
+ mxsfb->base + LCDC_VDCTRL2);
+
+ writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) |
+ SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end),
+ mxsfb->base + LCDC_VDCTRL3);
+
+ writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
+ mxsfb->base + LCDC_VDCTRL4);
+
+ mxsfb_disable_axi_clk(mxsfb);
+}
+
+void mxsfb_crtc_enable(struct mxsfb_drm_private *mxsfb)
+{
+ mxsfb_crtc_mode_set_nofb(mxsfb);
+ mxsfb_enable_controller(mxsfb);
+}
+
+void mxsfb_crtc_disable(struct mxsfb_drm_private *mxsfb)
+{
+ mxsfb_disable_controller(mxsfb);
+}
+
+void mxsfb_plane_atomic_update(struct mxsfb_drm_private *mxsfb,
+ struct drm_plane_state *state)
+{
+ struct drm_simple_display_pipe *pipe = &mxsfb->pipe;
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_framebuffer *fb = pipe->plane.state->fb;
+ struct drm_pending_vblank_event *event;
+ struct drm_gem_cma_object *gem;
+
+ if (!crtc)
+ return;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ if (drm_crtc_vblank_get(crtc) == 0) {
+ drm_crtc_arm_vblank_event(crtc, event);
+ } else {
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ if (!fb)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(gem->paddr, mxsfb->base + mxsfb->devdata->next_buf);
+ mxsfb_disable_axi_clk(mxsfb);
+}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
new file mode 100644
index 000000000000..79a18bf48b54
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This code is based on drivers/video/fbdev/mxsfb.c :
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+#include <linux/reservation.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "mxsfb_drv.h"
+#include "mxsfb_regs.h"
+
+enum mxsfb_devtype {
+ MXSFB_V3,
+ MXSFB_V4,
+};
+
+static const struct mxsfb_devdata mxsfb_devdata[] = {
+ [MXSFB_V3] = {
+ .transfer_count = LCDC_V3_TRANSFER_COUNT,
+ .cur_buf = LCDC_V3_CUR_BUF,
+ .next_buf = LCDC_V3_NEXT_BUF,
+ .debug0 = LCDC_V3_DEBUG0,
+ .hs_wdth_mask = 0xff,
+ .hs_wdth_shift = 24,
+ .ipversion = 3,
+ },
+ [MXSFB_V4] = {
+ .transfer_count = LCDC_V4_TRANSFER_COUNT,
+ .cur_buf = LCDC_V4_CUR_BUF,
+ .next_buf = LCDC_V4_NEXT_BUF,
+ .debug0 = LCDC_V4_DEBUG0,
+ .hs_wdth_mask = 0x3fff,
+ .hs_wdth_shift = 18,
+ .ipversion = 4,
+ },
+};
+
+static const uint32_t mxsfb_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565
+};
+
+static struct mxsfb_drm_private *
+drm_pipe_to_mxsfb_drm_private(struct drm_simple_display_pipe *pipe)
+{
+ return container_of(pipe, struct mxsfb_drm_private, pipe);
+}
+
+void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb)
+{
+ if (mxsfb->clk_axi)
+ clk_prepare_enable(mxsfb->clk_axi);
+}
+
+void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
+{
+ if (mxsfb->clk_axi)
+ clk_disable_unprepare(mxsfb->clk_axi);
+}
+
+static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ mxsfb_crtc_enable(mxsfb);
+}
+
+static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ mxsfb_crtc_disable(mxsfb);
+}
+
+static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ mxsfb_plane_atomic_update(mxsfb, plane_state);
+}
+
+static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+}
+
+struct drm_simple_display_pipe_funcs mxsfb_funcs = {
+ .enable = mxsfb_pipe_enable,
+ .disable = mxsfb_pipe_disable,
+ .update = mxsfb_pipe_update,
+ .prepare_fb = mxsfb_pipe_prepare_fb,
+};
+
+static int mxsfb_load(struct drm_device *drm, unsigned long flags)
+{
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct mxsfb_drm_private *mxsfb;
+ struct resource *res;
+ int ret;
+
+ mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
+ if (!mxsfb)
+ return -ENOMEM;
+
+ drm->dev_private = mxsfb;
+ mxsfb->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mxsfb->base = devm_ioremap_resource(drm->dev, res);
+ if (IS_ERR(mxsfb->base))
+ return PTR_ERR(mxsfb->base);
+
+ mxsfb->clk = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(mxsfb->clk))
+ return PTR_ERR(mxsfb->clk);
+
+ mxsfb->clk_axi = devm_clk_get(drm->dev, "axi");
+ if (IS_ERR(mxsfb->clk_axi))
+ mxsfb->clk_axi = NULL;
+
+ mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
+ if (IS_ERR(mxsfb->clk_disp_axi))
+ mxsfb->clk_disp_axi = NULL;
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(drm->dev);
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialise vblank\n");
+ goto err_vblank;
+ }
+
+ /* Modeset init */
+ drm_mode_config_init(drm);
+
+ ret = mxsfb_create_output(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to create outputs\n");
+ goto err_vblank;
+ }
+
+ ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
+ mxsfb_formats, ARRAY_SIZE(mxsfb_formats),
+ &mxsfb->connector);
+ if (ret < 0) {
+ dev_err(drm->dev, "Cannot setup simple display pipe\n");
+ goto err_vblank;
+ }
+
+ ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect panel\n");
+ goto err_vblank;
+ }
+
+ drm->mode_config.min_width = MXSFB_MIN_XRES;
+ drm->mode_config.min_height = MXSFB_MIN_YRES;
+ drm->mode_config.max_width = MXSFB_MAX_XRES;
+ drm->mode_config.max_height = MXSFB_MAX_YRES;
+ drm->mode_config.funcs = &mxsfb_mode_config_funcs;
+
+ drm_mode_config_reset(drm);
+
+ pm_runtime_get_sync(drm->dev);
+ ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
+ pm_runtime_put_sync(drm->dev);
+
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to install IRQ handler\n");
+ goto err_irq;
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(mxsfb->fbdev)) {
+ mxsfb->fbdev = NULL;
+ dev_err(drm->dev, "Failed to init FB CMA area\n");
+ goto err_cma;
+ }
+
+ platform_set_drvdata(pdev, drm);
+
+ drm_helper_hpd_irq_event(drm);
+
+ return 0;
+
+err_cma:
+ drm_irq_uninstall(drm);
+err_irq:
+ drm_panel_detach(mxsfb->panel);
+err_vblank:
+ pm_runtime_disable(drm->dev);
+
+ return ret;
+}
+
+static void mxsfb_unload(struct drm_device *drm)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ if (mxsfb->fbdev)
+ drm_fbdev_cma_fini(mxsfb->fbdev);
+
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+
+ pm_runtime_get_sync(drm->dev);
+ drm_irq_uninstall(drm);
+ pm_runtime_put_sync(drm->dev);
+
+ drm->dev_private = NULL;
+
+ pm_runtime_disable(drm->dev);
+}
+
+static void mxsfb_lastclose(struct drm_device *drm)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(mxsfb->fbdev);
+}
+
+static int mxsfb_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ /* Clear and enable VBLANK IRQ */
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_SET);
+ mxsfb_disable_axi_clk(mxsfb);
+
+ return 0;
+}
+
+static void mxsfb_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ /* Disable and clear VBLANK IRQ */
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ mxsfb_disable_axi_clk(mxsfb);
+}
+
+static void mxsfb_irq_preinstall(struct drm_device *drm)
+{
+ mxsfb_disable_vblank(drm, 0);
+}
+
+static irqreturn_t mxsfb_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+ u32 reg;
+
+ mxsfb_enable_axi_clk(mxsfb);
+
+ reg = readl(mxsfb->base + LCDC_CTRL1);
+
+ if (reg & CTRL1_CUR_FRAME_DONE_IRQ)
+ drm_crtc_handle_vblank(&mxsfb->pipe.crtc);
+
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
+ mxsfb_disable_axi_clk(mxsfb);
+
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver mxsfb_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_PRIME | DRIVER_ATOMIC |
+ DRIVER_HAVE_IRQ,
+ .lastclose = mxsfb_lastclose,
+ .irq_handler = mxsfb_irq_handler,
+ .irq_preinstall = mxsfb_irq_preinstall,
+ .irq_uninstall = mxsfb_irq_preinstall,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = mxsfb_enable_vblank,
+ .disable_vblank = mxsfb_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &fops,
+ .name = "mxsfb-drm",
+ .desc = "MXSFB Controller DRM",
+ .date = "20160824",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct platform_device_id mxsfb_devtype[] = {
+ { .name = "imx23-fb", .driver_data = MXSFB_V3, },
+ { .name = "imx28-fb", .driver_data = MXSFB_V4, },
+ { .name = "imx6sx-fb", .driver_data = MXSFB_V4, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+
+static const struct of_device_id mxsfb_dt_ids[] = {
+ { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devtype[0], },
+ { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devtype[1], },
+ { .compatible = "fsl,imx6sx-lcdif", .data = &mxsfb_devtype[2], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
+
+static int mxsfb_probe(struct platform_device *pdev)
+{
+ struct drm_device *drm;
+ const struct of_device_id *of_id =
+ of_match_device(mxsfb_dt_ids, &pdev->dev);
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ if (of_id)
+ pdev->id_entry = of_id->data;
+
+ drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = mxsfb_load(drm, 0);
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ return 0;
+
+err_unload:
+ mxsfb_unload(drm);
+err_free:
+ drm_dev_unref(drm);
+
+ return ret;
+}
+
+static int mxsfb_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ mxsfb_unload(drm);
+ drm_dev_unref(drm);
+
+ return 0;
+}
+
+static struct platform_driver mxsfb_platform_driver = {
+ .probe = mxsfb_probe,
+ .remove = mxsfb_remove,
+ .id_table = mxsfb_devtype,
+ .driver = {
+ .name = "mxsfb",
+ .of_match_table = mxsfb_dt_ids,
+ },
+};
+
+module_platform_driver(mxsfb_platform_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale MXS DRM/KMS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
new file mode 100644
index 000000000000..5d0883fc805b
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * i.MX23/i.MX28/i.MX6SX MXSFB LCD controller driver.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MXSFB_DRV_H__
+#define __MXSFB_DRV_H__
+
+struct mxsfb_devdata {
+ unsigned int transfer_count;
+ unsigned int cur_buf;
+ unsigned int next_buf;
+ unsigned int debug0;
+ unsigned int hs_wdth_mask;
+ unsigned int hs_wdth_shift;
+ unsigned int ipversion;
+};
+
+struct mxsfb_drm_private {
+ const struct mxsfb_devdata *devdata;
+
+ void __iomem *base; /* registers */
+ struct clk *clk;
+ struct clk *clk_axi;
+ struct clk *clk_disp_axi;
+
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+ struct drm_fbdev_cma *fbdev;
+};
+
+int mxsfb_setup_crtc(struct drm_device *dev);
+int mxsfb_create_output(struct drm_device *dev);
+
+void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb);
+void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb);
+
+void mxsfb_crtc_enable(struct mxsfb_drm_private *mxsfb);
+void mxsfb_crtc_disable(struct mxsfb_drm_private *mxsfb);
+void mxsfb_plane_atomic_update(struct mxsfb_drm_private *mxsfb,
+ struct drm_plane_state *state);
+
+#endif /* __MXSFB_DRV_H__ */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
new file mode 100644
index 000000000000..fa8d17399407
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drmP.h>
+
+#include "mxsfb_drv.h"
+
+static struct mxsfb_drm_private *
+drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
+{
+ return container_of(connector, struct mxsfb_drm_private, connector);
+}
+
+static int mxsfb_panel_get_modes(struct drm_connector *connector)
+{
+ struct mxsfb_drm_private *mxsfb =
+ drm_connector_to_mxsfb_drm_private(connector);
+
+ if (mxsfb->panel)
+ return mxsfb->panel->funcs->get_modes(mxsfb->panel);
+
+ return 0;
+}
+
+static const struct
+drm_connector_helper_funcs mxsfb_panel_connector_helper_funcs = {
+ .get_modes = mxsfb_panel_get_modes,
+};
+
+static enum drm_connector_status
+mxsfb_panel_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct mxsfb_drm_private *mxsfb =
+ drm_connector_to_mxsfb_drm_private(connector);
+
+ if (mxsfb->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void mxsfb_panel_connector_destroy(struct drm_connector *connector)
+{
+ struct mxsfb_drm_private *mxsfb =
+ drm_connector_to_mxsfb_drm_private(connector);
+
+ if (mxsfb->panel)
+ drm_panel_detach(mxsfb->panel);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = mxsfb_panel_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mxsfb_panel_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int mxsfb_attach_endpoint(struct drm_device *drm,
+ const struct of_endpoint *ep)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+ struct device_node *np;
+ struct drm_panel *panel;
+ int ret = -EPROBE_DEFER;
+
+ np = of_graph_get_remote_port_parent(ep->local_node);
+ panel = of_drm_find_panel(np);
+ of_node_put(np);
+
+ if (!panel)
+ return -EPROBE_DEFER;
+
+ mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
+ mxsfb->connector.polled = 0;
+ drm_connector_helper_add(&mxsfb->connector,
+ &mxsfb_panel_connector_helper_funcs);
+ ret = drm_connector_init(drm, &mxsfb->connector,
+ &mxsfb_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (!ret)
+ mxsfb->panel = panel;
+
+ return ret;
+}
+
+int mxsfb_create_output(struct drm_device *drm)
+{
+ struct device_node *ep_np = NULL;
+ struct of_endpoint ep;
+ int ret;
+
+ for_each_endpoint_of_node(drm->dev->of_node, ep_np) {
+ ret = of_graph_parse_endpoint(ep_np, &ep);
+ if (!ret)
+ ret = mxsfb_attach_endpoint(drm, &ep);
+
+ if (ret) {
+ of_node_put(ep_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
new file mode 100644
index 000000000000..31d62cd0d3d7
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * i.MX23/i.MX28/i.MX6SX MXSFB LCD controller driver.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MXSFB_REGS_H__
+#define __MXSFB_REGS_H__
+
+#define REG_SET 4
+#define REG_CLR 8
+
+#define LCDC_CTRL 0x00
+#define LCDC_CTRL1 0x10
+#define LCDC_V3_TRANSFER_COUNT 0x20
+#define LCDC_V4_TRANSFER_COUNT 0x30
+#define LCDC_V4_CUR_BUF 0x40
+#define LCDC_V4_NEXT_BUF 0x50
+#define LCDC_V3_CUR_BUF 0x30
+#define LCDC_V3_NEXT_BUF 0x40
+#define LCDC_VDCTRL0 0x70
+#define LCDC_VDCTRL1 0x80
+#define LCDC_VDCTRL2 0x90
+#define LCDC_VDCTRL3 0xa0
+#define LCDC_VDCTRL4 0xb0
+#define LCDC_V4_DEBUG0 0x1d0
+#define LCDC_V3_DEBUG0 0x1f0
+
+#define CTRL_SFTRST (1 << 31)
+#define CTRL_CLKGATE (1 << 30)
+#define CTRL_BYPASS_COUNT (1 << 19)
+#define CTRL_VSYNC_MODE (1 << 18)
+#define CTRL_DOTCLK_MODE (1 << 17)
+#define CTRL_DATA_SELECT (1 << 16)
+#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
+#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
+#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
+#define CTRL_MASTER (1 << 5)
+#define CTRL_DF16 (1 << 3)
+#define CTRL_DF18 (1 << 2)
+#define CTRL_DF24 (1 << 1)
+#define CTRL_RUN (1 << 0)
+
+#define CTRL1_FIFO_CLEAR (1 << 21)
+#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+#define CTRL1_CUR_FRAME_DONE_IRQ_EN (1 << 13)
+#define CTRL1_CUR_FRAME_DONE_IRQ (1 << 9)
+
+#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
+#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
+
+#define VDCTRL0_ENABLE_PRESENT (1 << 28)
+#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
+#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
+#define VDCTRL0_DOTCLK_ACT_FALLING (1 << 25)
+#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24)
+#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
+#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
+#define VDCTRL0_HALF_LINE (1 << 19)
+#define VDCTRL0_HALF_LINE_MODE (1 << 18)
+#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+
+#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+
+#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29)
+#define VDCTRL3_VSYNC_ONLY (1 << 28)
+#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
+#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
+#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+
+#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
+#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
+#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18)
+#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
+
+#define DEBUG0_HSYNC (1 < 26)
+#define DEBUG0_VSYNC (1 < 25)
+
+#define MXSFB_MIN_XRES 120
+#define MXSFB_MIN_YRES 120
+#define MXSFB_MAX_XRES 0xffff
+#define MXSFB_MAX_YRES 0xffff
+
+#define RED 0
+#define GREEN 1
+#define BLUE 2
+#define TRANSP 3
+
+#define STMLCDIF_8BIT 1 /* pixel data bus to the display is of 8 bit width */
+#define STMLCDIF_16BIT 0 /* pixel data bus to the display is of 16 bit width */
+#define STMLCDIF_18BIT 2 /* pixel data bus to the display is of 18 bit width */
+#define STMLCDIF_24BIT 3 /* pixel data bus to the display is of 24 bit width */
+
+#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
+#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */
+
+#endif /* __MXSFB_REGS_H__ */
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2527bf4ca5d9..fde6e3656636 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -22,6 +22,7 @@ nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
nouveau-y += nouveau_drm.o
nouveau-y += nouveau_hwmon.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
nouveau-y += nouveau_usif.o # userspace <-> nvif
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0cb7a18cde26..59d1d1c5de5f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -702,7 +702,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_off(crtc);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
@@ -734,7 +734,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- drm_vblank_post_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_on(crtc);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index ec444eac6258..a79514d440b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -33,7 +33,7 @@
#include "nouveau_connector.h"
#include "nouveau_display.h"
#include "nvreg.h"
-
+#include "disp.h"
struct nouveau_plane {
struct drm_plane base;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index d15c296b5f33..ae49dfd1f97b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -34,6 +34,8 @@ struct nv50_disp_mthd_v1 {
#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_LINK 0x25
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI 0x26
#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
__u8 method;
__u16 hasht;
@@ -90,6 +92,21 @@ struct nv50_disp_sor_dp_pwr_v0 {
__u8 pad02[6];
};
+struct nv50_disp_sor_dp_mst_link_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_sor_dp_mst_vcpi_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u8 start_slot;
+ __u8 num_slots;
+ __u16 pbn;
+ __u16 aligned_pbn;
+};
+
struct nv50_disp_pior_pwr_v0 {
__u8 version;
__u8 state;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e6e9537537cf..82235f30277c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -52,7 +52,7 @@
#define GM107_DISP /* cl5070.h */ 0x00009470
#define GM200_DISP /* cl5070.h */ 0x00009570
#define GP100_DISP /* cl5070.h */ 0x00009770
-#define GP104_DISP /* cl5070.h */ 0x00009870
+#define GP102_DISP /* cl5070.h */ 0x00009870
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -90,7 +90,7 @@
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
-#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
+#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d815967767f..9e58b305b020 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -66,6 +66,35 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
+struct nvif_mclass {
+ s32 oclass;
+ int version;
+};
+
+#define nvif_mclass(o,m) ({ \
+ struct nvif_object *object = (o); \
+ struct nvif_sclass *sclass; \
+ const typeof(m[0]) *mclass = (m); \
+ int ret = -ENODEV; \
+ int cnt, i, j; \
+ \
+ cnt = nvif_object_sclass_get(object, &sclass); \
+ if (cnt >= 0) { \
+ for (i = 0; ret < 0 && mclass[i].oclass; i++) { \
+ for (j = 0; j < cnt; j++) { \
+ if (mclass[i].oclass == sclass[j].oclass && \
+ mclass[i].version >= sclass[j].minver && \
+ mclass[i].version <= sclass[j].maxver) { \
+ ret = i; \
+ break; \
+ } \
+ } \
+ } \
+ nvif_object_sclass_put(&sclass); \
+ } \
+ ret; \
+})
+
/*XXX*/
#include <core/object.h>
#define nvxx_object(a) ({ \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index d3d26a1e215d..b93f4c1a95e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -8,5 +8,5 @@ int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index e82049667ce4..970ae753968a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -33,5 +33,5 @@ int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
index 934b0ae5521d..2ff64a20c0ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_BOOST_H__
#define __NVBIOS_BOOST_H__
-u16 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
+u32 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
struct nvbios_boostE {
u8 pstate;
@@ -8,10 +8,10 @@ struct nvbios_boostE {
u32 max;
};
-u16 nvbios_boostEe(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
-u16 nvbios_boostEp(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
+u32 nvbios_boostEe(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
+u32 nvbios_boostEp(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
struct nvbios_boostE *);
-u16 nvbios_boostEm(struct nvkm_bios *, u8, u8 *, u8 *, u8 *, u8 *,
+u32 nvbios_boostEm(struct nvkm_bios *, u8, u8 *, u8 *, u8 *, u8 *,
struct nvbios_boostE *);
struct nvbios_boostS {
@@ -21,7 +21,7 @@ struct nvbios_boostS {
u32 max;
};
-u16 nvbios_boostSe(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8);
-u16 nvbios_boostSp(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8,
+u32 nvbios_boostSe(struct nvkm_bios *, int, u32, u8 *, u8 *, u8, u8);
+u32 nvbios_boostSp(struct nvkm_bios *, int, u32, u8 *, u8 *, u8, u8,
struct nvbios_boostS *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
index 2f0e0c8e83be..76fe7d50a1ce 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_CSTEP_H__
#define __NVBIOS_CSTEP_H__
-u16 nvbios_cstepTe(struct nvkm_bios *,
+u32 nvbios_cstepTe(struct nvkm_bios *,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
struct nvbios_cstepE {
@@ -8,10 +8,10 @@ struct nvbios_cstepE {
u8 index;
};
-u16 nvbios_cstepEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_cstepEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_cstepEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *);
-u16 nvbios_cstepEm(struct nvkm_bios *, u8 pstate, u8 *ver, u8 *hdr,
+u32 nvbios_cstepEm(struct nvkm_bios *, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *);
struct nvbios_cstepX {
@@ -20,7 +20,7 @@ struct nvbios_cstepX {
u8 voltage;
};
-u16 nvbios_cstepXe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepXp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_cstepXe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_cstepXp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
index 693ea7d9ec43..a7513e8406a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
@@ -2,5 +2,5 @@
#define __NVBIOS_FAN_H__
#include <subdev/bios/therm.h>
-u16 nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan);
+u32 nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index a47d46dda704..b7a54e605469 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -6,6 +6,7 @@ enum dcb_gpio_func_name {
DCB_GPIO_TVDAC1 = 0x2d,
DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_LOGO_LED_PWM = 0x84,
DCB_GPIO_UNUSED = 0xff,
DCB_GPIO_VID0 = 0x04,
DCB_GPIO_VID1 = 0x05,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index 9cb97477248b..e933d3eede70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,10 +1,16 @@
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_resistor_t {
+ u8 mohm;
+ bool enabled;
+};
+
struct pwr_rail_t {
u8 mode;
u8 extdev_id;
- u8 resistor_mohm;
- u8 rail;
+ u8 resistor_count;
+ struct pwr_rail_resistor_t resistors[3];
+ u16 config;
};
struct nvbios_iccsense {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index d3bd250103d5..478b1c0d2089 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_PERF_H__
#define __NVBIOS_PERF_H__
-u16 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
+u32 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
struct nvbios_perfE {
@@ -17,9 +17,9 @@ struct nvbios_perfE {
u8 pcie_width;
};
-u16 nvbios_perf_entry(struct nvkm_bios *, int idx,
+u32 nvbios_perf_entry(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_perfEp(struct nvkm_bios *, int idx,
+u32 nvbios_perfEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
struct nvbios_perfS {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
index 339a826aa176..38188d4c9ab5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
@@ -2,10 +2,10 @@
#define __NVBIOS_TIMING_H__
#include <subdev/bios/ramcfg.h>
-u16 nvbios_timingTe(struct nvkm_bios *,
+u32 nvbios_timingTe(struct nvkm_bios *,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_timingEe(struct nvkm_bios *, int idx,
+u32 nvbios_timingEe(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timingEp(struct nvkm_bios *, int idx,
+u32 nvbios_timingEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index 6633c6db9281..bea31cdd1dd1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,21 +1,24 @@
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
+ u8 max0;
+ u8 max1;
+ u8 max2;
};
-u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *);
struct nvbios_vmap_entry {
- u8 unk0;
+ u8 mode;
u8 link;
u32 min;
u32 max;
s32 arg[6];
};
-u16 nvbios_vmap_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_vmap_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+u32 nvbios_vmap_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u32 nvbios_vmap_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index b0df610cec2b..f0baa2c7de09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -13,16 +13,17 @@ struct nvbios_volt {
u32 base;
/* GPIO mode */
- u8 vidmask;
- s16 step;
+ bool ranged;
+ u8 vidmask;
+ s16 step;
/* PWM mode */
u32 pwm_freq;
u32 pwm_range;
};
-u16 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_volt_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_volt_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *);
struct nvbios_volt_entry {
@@ -30,7 +31,7 @@ struct nvbios_volt_entry {
u8 vid;
};
-u16 nvbios_volt_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_volt_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+u32 nvbios_volt_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u32 nvbios_volt_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
new file mode 100644
index 000000000000..87f804fc3a88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -0,0 +1,24 @@
+#ifndef __NVBIOS_VPSTATE_H__
+#define __NVBIOS_VPSTATE_H__
+struct nvbios_vpstate_header {
+ u32 offset;
+
+ u8 version;
+ u8 hlen;
+ u8 ecount;
+ u8 elen;
+ u8 scount;
+ u8 slen;
+
+ u8 base_id;
+ u8 boost_id;
+ u8 tdp_id;
+};
+struct nvbios_vpstate_entry {
+ u8 pstate;
+ u16 clock_mhz;
+};
+int nvbios_vpstate_parse(struct nvkm_bios *, struct nvbios_vpstate_header *);
+int nvbios_vpstate_entry(struct nvkm_bios *, struct nvbios_vpstate_header *,
+ u8 idx, struct nvbios_vpstate_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index fb54417bc458..e5275f742977 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -6,6 +6,10 @@
struct nvbios_pll;
struct nvkm_pll_vals;
+#define NVKM_CLK_CSTATE_DEFAULT -1 /* POSTed default */
+#define NVKM_CLK_CSTATE_BASE -2 /* pstate base */
+#define NVKM_CLK_CSTATE_HIGHEST -3 /* highest possible */
+
enum nv_clk_src {
nv_clk_src_crystal,
nv_clk_src_href,
@@ -52,6 +56,7 @@ struct nvkm_cstate {
struct list_head head;
u8 voltage;
u32 domain[nv_clk_src_max];
+ u8 id;
};
struct nvkm_pstate {
@@ -67,7 +72,8 @@ struct nvkm_pstate {
struct nvkm_domain {
enum nv_clk_src name;
u8 bios; /* 0xff for none */
-#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_VPSTATE 0x02
u8 flags;
const char *mname;
int mdiv;
@@ -93,10 +99,16 @@ struct nvkm_clk {
int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
int astate; /* perfmon adjustment (base) */
- int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
+ u8 temp;
bool allow_reclock;
+#define NVKM_CLK_BOOST_NONE 0x0
+#define NVKM_CLK_BOOST_BIOS 0x1
+#define NVKM_CLK_BOOST_FULL 0x2
+ u8 boost_mode;
+ u32 base_khz;
+ u32 boost_khz;
/*XXX: die, these are here *only* to support the completely
* bat-shit insane what-was-nouveau_hw.c code
@@ -110,7 +122,7 @@ int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
-int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 3a410275fa71..794e432578b2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -93,8 +93,9 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
@@ -156,4 +157,6 @@ struct nvkm_ram_func {
int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *);
};
+
+extern const u8 gf100_pte_storage_type_map[256];
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index e61923d5e49c..f37538eb1fe5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -35,6 +35,8 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b765f4ffcde6..08ef9983c643 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -15,12 +15,28 @@ struct nvkm_volt {
u32 max_uv;
u32 min_uv;
+
+ /*
+ * These are fully functional map entries creating a sw ceiling for
+ * the voltage. These all can describe different kind of curves, so
+ * that for any given temperature a different one can return the lowest
+ * value of all three.
+ */
+ u8 max0_id;
+ u8 max1_id;
+ u8 max2_id;
+
+ int speedo;
};
+int nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temperature);
+int nvkm_volt_map_min(struct nvkm_volt *volt, u8 id);
int nvkm_volt_get(struct nvkm_volt *);
-int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
+ int condition);
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f5101be806cb..8b1ca4add2ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -30,12 +30,37 @@
* Register locations derived from NVClock by Roderick Colenbrander
*/
+#include <linux/apple-gmux.h>
#include <linux/backlight.h>
+#include <linux/idr.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
+static struct ida bl_ida;
+#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
+
+struct backlight_connector {
+ struct list_head head;
+ int id;
+};
+
+static bool
+nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct backlight_connector
+ *connector)
+{
+ const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
+ if (nb < 0 || nb >= 100)
+ return false;
+ if (nb > 0)
+ snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
+ else
+ snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight");
+ connector->id = nb;
+ return true;
+}
+
static int
nv40_get_intensity(struct backlight_device *bd)
{
@@ -74,6 +99,8 @@ nv40_backlight_init(struct drm_connector *connector)
struct nvif_object *device = &drm->device.object;
struct backlight_properties props;
struct backlight_device *bd;
+ struct backlight_connector bl_connector;
+ char backlight_name[BL_NAME_SIZE];
if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return 0;
@@ -81,10 +108,19 @@ nv40_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 31;
- bd = backlight_device_register("nv_backlight", connector->kdev, drm,
+ if (!nouveau_get_backlight_name(backlight_name, &bl_connector)) {
+ NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
+ return 0;
+ }
+ bd = backlight_device_register(backlight_name , connector->kdev, drm,
&nv40_bl_ops, &props);
- if (IS_ERR(bd))
+
+ if (IS_ERR(bd)) {
+ if (bl_connector.id > 0)
+ ida_simple_remove(&bl_ida, bl_connector.id);
return PTR_ERR(bd);
+ }
+ list_add(&bl_connector.head, &drm->bl_connectors);
drm->backlight = bd;
bd->props.brightness = nv40_get_intensity(bd);
backlight_update_status(bd);
@@ -182,6 +218,8 @@ nv50_backlight_init(struct drm_connector *connector)
struct backlight_properties props;
struct backlight_device *bd;
const struct backlight_ops *ops;
+ struct backlight_connector bl_connector;
+ char backlight_name[BL_NAME_SIZE];
nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
if (!nv_encoder) {
@@ -203,11 +241,20 @@ nv50_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 100;
- bd = backlight_device_register("nv_backlight", connector->kdev,
+ if (!nouveau_get_backlight_name(backlight_name, &bl_connector)) {
+ NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
+ return 0;
+ }
+ bd = backlight_device_register(backlight_name , connector->kdev,
nv_encoder, ops, &props);
- if (IS_ERR(bd))
+
+ if (IS_ERR(bd)) {
+ if (bl_connector.id > 0)
+ ida_simple_remove(&bl_ida, bl_connector.id);
return PTR_ERR(bd);
+ }
+ list_add(&bl_connector.head, &drm->bl_connectors);
drm->backlight = bd;
bd->props.brightness = bd->ops->get_brightness(bd);
backlight_update_status(bd);
@@ -221,6 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
struct nvif_device *device = &drm->device;
struct drm_connector *connector;
+ if (apple_gmux_present()) {
+ NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&drm->bl_connectors);
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
@@ -232,6 +286,7 @@ nouveau_backlight_init(struct drm_device *dev)
case NV_DEVICE_INFO_V0_TESLA:
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
return nv50_backlight_init(connector);
default:
break;
@@ -246,9 +301,27 @@ void
nouveau_backlight_exit(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct backlight_connector *connector;
+
+ list_for_each_entry(connector, &drm->bl_connectors, head) {
+ if (connector->id >= 0)
+ ida_simple_remove(&bl_ida, connector->id);
+ }
if (drm->backlight) {
backlight_device_unregister(drm->backlight);
drm->backlight = NULL;
}
}
+
+void
+nouveau_backlight_ctor(void)
+{
+ ida_init(&bl_ida);
+}
+
+void
+nouveau_backlight_dtor(void)
+{
+ ida_destroy(&bl_ida);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index a1570b109434..23ffe8571a99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -333,6 +333,9 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
+ return nvif_rd32(device, 0x001800) & 0x0000000f;
+ else
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 0067586eb015..18eb061ccafb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -31,10 +31,8 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
-#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
-#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
#define ROMPTR(d,x) ({ \
struct nouveau_drm *drm = nouveau_drm((d)); \
ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b8659472c..dd07ca140d12 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (tile) {
spin_lock(&drm->tile.lock);
- tile->fence = (struct nouveau_fence *)fence_get(fence);
+ tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -1209,6 +1209,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
nvkm_vm_map(vma, new_mem->mm_node);
} else {
+ WARN_ON(ttm_bo_wait(bo, false, false));
nvkm_vm_unmap(vma);
}
}
@@ -1243,7 +1244,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct fence *fence = reservation_object_get_excl(bo->resv);
+ struct dma_fence *fence = reservation_object_get_excl(bo->resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1561,6 +1562,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c1084088f9e4..947c200655b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -30,6 +30,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
@@ -47,6 +48,301 @@
#include <nvif/cl0046.h>
#include <nvif/event.h>
+struct drm_display_mode *
+nouveau_conn_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper = connector->helper_private;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *largest = NULL;
+ int high_w = 0, high_h = 0, high_v = 0;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ if (helper->mode_valid(connector, mode) != MODE_OK ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+
+ /* Use preferred mode if there is one.. */
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ NV_DEBUG(drm, "native mode from preferred\n");
+ return drm_mode_duplicate(dev, mode);
+ }
+
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
+ if (mode->hdisplay < high_w)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+ mode->vrefresh < high_v)
+ continue;
+
+ high_w = mode->hdisplay;
+ high_h = mode->vdisplay;
+ high_v = mode->vrefresh;
+ largest = mode;
+ }
+
+ NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
+ high_w, high_h, high_v);
+ return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+int
+nouveau_conn_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, u64 *val)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.scaling_mode_property)
+ *val = asyc->scaler.mode;
+ else if (property == disp->underscan_property)
+ *val = asyc->scaler.underscan.mode;
+ else if (property == disp->underscan_hborder_property)
+ *val = asyc->scaler.underscan.hborder;
+ else if (property == disp->underscan_vborder_property)
+ *val = asyc->scaler.underscan.vborder;
+ else if (property == disp->dithering_mode)
+ *val = asyc->dither.mode;
+ else if (property == disp->dithering_depth)
+ *val = asyc->dither.depth;
+ else if (property == disp->vibrant_hue_property)
+ *val = asyc->procamp.vibrant_hue;
+ else if (property == disp->color_vibrance_property)
+ *val = asyc->procamp.color_vibrance;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+nouveau_conn_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (val) {
+ case DRM_MODE_SCALE_NONE:
+ /* We allow 'None' for EDID modes, even on a fixed
+ * panel (some exist with support for lower refresh
+ * rates, which people might want to use for power-
+ * saving purposes).
+ *
+ * Non-EDID modes will force the use of GPU scaling
+ * to the native mode regardless of this setting.
+ */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* ... except prior to G80, where the code
+ * doesn't support such things.
+ */
+ if (disp->disp.oclass < NV50_DISP)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ case DRM_MODE_SCALE_FULLSCREEN:
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (asyc->scaler.mode != val) {
+ asyc->scaler.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_property) {
+ if (asyc->scaler.underscan.mode != val) {
+ asyc->scaler.underscan.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_hborder_property) {
+ if (asyc->scaler.underscan.hborder != val) {
+ asyc->scaler.underscan.hborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_vborder_property) {
+ if (asyc->scaler.underscan.vborder != val) {
+ asyc->scaler.underscan.vborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->dithering_mode) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.mode = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->dithering_depth) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.depth = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->vibrant_hue_property) {
+ if (asyc->procamp.vibrant_hue != val) {
+ asyc->procamp.vibrant_hue = val;
+ asyc->set.procamp = true;
+ }
+ } else
+ if (property == disp->color_vibrance_property) {
+ if (asyc->procamp.color_vibrance != val) {
+ asyc->procamp.color_vibrance = val;
+ asyc->set.procamp = true;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nouveau_conn_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ __drm_atomic_helper_connector_destroy_state(&asyc->state);
+ kfree(asyc);
+}
+
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_conn_atom *asyc;
+ if (!(asyc = kmalloc(sizeof(*asyc), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector, &asyc->state);
+ asyc->dither = armc->dither;
+ asyc->scaler = armc->scaler;
+ asyc->procamp = armc->procamp;
+ asyc->set.mask = 0;
+ return &asyc->state;
+}
+
+void
+nouveau_conn_reset(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *asyc;
+
+ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
+ return;
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ __drm_atomic_helper_connector_reset(connector, &asyc->state);
+ asyc->dither.mode = DITHERING_MODE_AUTO;
+ asyc->dither.depth = DITHERING_DEPTH_AUTO;
+ asyc->scaler.mode = DRM_MODE_SCALE_NONE;
+ asyc->scaler.underscan.mode = UNDERSCAN_OFF;
+ asyc->procamp.color_vibrance = 150;
+ asyc->procamp.vibrant_hue = 90;
+
+ if (nouveau_display(connector->dev)->disp.oclass < NV50_DISP) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ /* See note in nouveau_conn_atomic_set_property(). */
+ asyc->scaler.mode = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void
+nouveau_conn_attach_properties(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ /* Init DVI-I specific properties. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ dvi_i_subconnector_property, 0);
+
+ /* Add overscan compensation options to digital outputs. */
+ if (disp->underscan_property &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)) {
+ drm_object_attach_property(&connector->base,
+ disp->underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_hborder_property, 0);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_vborder_property, 0);
+ }
+
+ /* Add hue and saturation options. */
+ if (disp->vibrant_hue_property)
+ drm_object_attach_property(&connector->base,
+ disp->vibrant_hue_property,
+ armc->procamp.vibrant_hue);
+ if (disp->color_vibrance_property)
+ drm_object_attach_property(&connector->base,
+ disp->color_vibrance_property,
+ armc->procamp.color_vibrance);
+
+ /* Scaling mode property. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ break;
+ case DRM_MODE_CONNECTOR_VGA:
+ if (disp->disp.oclass < NV50_DISP)
+ break; /* Can only scale on DFPs. */
+ /* Fall-through. */
+ default:
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ scaling_mode_property,
+ armc->scaler.mode);
+ break;
+ }
+
+ /* Dithering properties. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ case DRM_MODE_CONNECTOR_VGA:
+ break;
+ default:
+ if (disp->dithering_mode) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_mode,
+ armc->dither.mode);
+ }
+ if (disp->dithering_depth) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_depth,
+ armc->dither.depth);
+ }
+ break;
+ }
+}
+
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
@@ -151,7 +447,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
- if (ret == 0)
+ if (ret == NOUVEAU_DP_MST)
+ return NULL;
+ if (ret == NOUVEAU_DP_SST)
break;
} else
if ((vga_switcheroo_handler_flags() &
@@ -465,199 +763,39 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
- struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- struct drm_device *dev = connector->dev;
- struct nouveau_crtc *nv_crtc;
int ret;
- nv_crtc = NULL;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
-
- /* Scaling mode */
- if (property == dev->mode_config.scaling_mode_property) {
- bool modeset = false;
-
- switch (value) {
- case DRM_MODE_SCALE_NONE:
- /* We allow 'None' for EDID modes, even on a fixed
- * panel (some exist with support for lower refresh
- * rates, which people might want to use for power
- * saving purposes).
- *
- * Non-EDID modes will force the use of GPU scaling
- * to the native mode regardless of this setting.
- */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* ... except prior to G80, where the code
- * doesn't support such things.
- */
- if (disp->disp.oclass < NV50_DISP)
- return -EINVAL;
- break;
- default:
- break;
- }
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- case DRM_MODE_SCALE_CENTER:
- case DRM_MODE_SCALE_ASPECT:
- break;
- default:
- return -EINVAL;
- }
-
- /* Changing between GPU and panel scaling requires a full
- * modeset
- */
- if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
- (value == DRM_MODE_SCALE_NONE))
- modeset = true;
- nv_connector->scaling_mode = value;
-
- if (!nv_crtc)
- return 0;
-
- if (modeset || !nv_crtc->set_scale) {
- ret = drm_crtc_helper_set_mode(&nv_crtc->base,
- &nv_crtc->base.mode,
- nv_crtc->base.x,
- nv_crtc->base.y, NULL);
- if (!ret)
- return -EINVAL;
- } else {
- ret = nv_crtc->set_scale(nv_crtc, true);
- if (ret)
- return ret;
- }
-
- return 0;
- }
-
- /* Underscan */
- if (property == disp->underscan_property) {
- if (nv_connector->underscan != value) {
- nv_connector->underscan = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_hborder_property) {
- if (nv_connector->underscan_hborder != value) {
- nv_connector->underscan_hborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_vborder_property) {
- if (nv_connector->underscan_vborder != value) {
- nv_connector->underscan_vborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_set_property(connector, property, value);
- /* Dithering */
- if (property == disp->dithering_mode) {
- nv_connector->dithering_mode = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (property == disp->dithering_depth) {
- nv_connector->dithering_depth = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (nv_crtc && nv_crtc->set_color_vibrance) {
- /* Hue */
- if (property == disp->vibrant_hue_property) {
- nv_crtc->vibrant_hue = value - 90;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
- /* Saturation */
- if (property == disp->color_vibrance_property) {
- nv_crtc->color_vibrance = value - 100;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
+ ret = connector->funcs->atomic_set_property(&nv_connector->base,
+ &asyc->state,
+ property, value);
+ if (ret) {
+ if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
+ return get_slave_funcs(encoder)->set_property(
+ encoder, connector, property, value);
+ return ret;
}
- if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
- return get_slave_funcs(encoder)->set_property(
- encoder, connector, property, value);
-
- return -EINVAL;
-}
-
-static struct drm_display_mode *
-nouveau_connector_native_mode(struct drm_connector *connector)
-{
- const struct drm_connector_helper_funcs *helper = connector->helper_private;
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *largest = NULL;
- int high_w = 0, high_h = 0, high_v = 0;
+ nv_connector->scaling_mode = asyc->scaler.mode;
+ nv_connector->dithering_mode = asyc->dither.mode;
- list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
- if (helper->mode_valid(connector, mode) != MODE_OK ||
- (mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
-
- /* Use preferred mode if there is one.. */
- if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- NV_DEBUG(drm, "native mode from preferred\n");
- return drm_mode_duplicate(dev, mode);
- }
-
- /* Otherwise, take the resolution with the largest width, then
- * height, then vertical refresh
- */
- if (mode->hdisplay < high_w)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay < high_h)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
- mode->vrefresh < high_v)
- continue;
-
- high_w = mode->hdisplay;
- high_h = mode->vdisplay;
- high_v = mode->vrefresh;
- largest = mode;
+ if (connector->encoder && connector->encoder->crtc) {
+ ret = drm_crtc_helper_set_mode(connector->encoder->crtc,
+ &connector->encoder->crtc->mode,
+ connector->encoder->crtc->x,
+ connector->encoder->crtc->y,
+ NULL);
+ if (!ret)
+ return -EINVAL;
}
- NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
- high_w, high_h, high_v);
- return largest ? drm_mode_duplicate(dev, largest) : NULL;
+ return 0;
}
struct moderec {
@@ -805,8 +943,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
* the list of modes.
*/
if (!nv_connector->native_mode)
- nv_connector->native_mode =
- nouveau_connector_native_mode(connector);
+ nv_connector->native_mode = nouveau_conn_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@@ -934,56 +1071,42 @@ nouveau_connector_helper_funcs = {
.best_encoder = nouveau_connector_best_encoder,
};
+static int
+nouveau_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ return drm_helper_connector_dpms(connector, mode);
+}
+
static const struct drm_connector_funcs
nouveau_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .destroy = nouveau_connector_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect_lvds,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
-};
-
-static int
-nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
-{
- struct nouveau_encoder *nv_encoder = NULL;
-
- if (connector->encoder)
- nv_encoder = nouveau_encoder(connector->encoder);
- if (nv_encoder && nv_encoder->dcb &&
- nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- if (mode == DRM_MODE_DPMS_ON) {
- u8 data = DP_SET_POWER_D0;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- usleep_range(1000, 2000);
- } else {
- u8 data = DP_SET_POWER_D3;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- }
- }
-
- return drm_helper_connector_dpms(connector, mode);
-}
-
-static const struct drm_connector_funcs
-nouveau_connector_funcs_dp = {
- .dpms = nouveau_connector_dp_dpms,
- .detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static int
@@ -995,19 +1118,20 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
struct nouveau_drm *drm = nouveau_drm(connector->dev);
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
+ struct nouveau_encoder *nv_encoder;
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
} else {
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
-
- mutex_lock(&drm->dev->mode_config.mutex);
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- mutex_unlock(&drm->dev->mode_config.mutex);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
+ if (!plugged)
+ nv50_mstm_remove(nv_encoder->dp.mstm);
+ }
drm_helper_hpd_irq_event(connector->dev);
}
@@ -1188,7 +1312,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs_dp;
+ funcs = &nouveau_connector_funcs;
break;
default:
funcs = &nouveau_connector_funcs;
@@ -1202,38 +1326,10 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
- /* Init DVI-I specific properties */
- if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
+ connector->funcs->reset(connector);
+ nouveau_conn_attach_properties(connector);
- /* Add overscan compensation options to digital outputs */
- if (disp->underscan_property &&
- (type == DRM_MODE_CONNECTOR_DVID ||
- type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA ||
- type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_object_attach_property(&connector->base,
- disp->underscan_property,
- UNDERSCAN_OFF);
- drm_object_attach_property(&connector->base,
- disp->underscan_hborder_property,
- 0);
- drm_object_attach_property(&connector->base,
- disp->underscan_vborder_property,
- 0);
- }
-
- /* Add hue and saturation options */
- if (disp->vibrant_hue_property)
- drm_object_attach_property(&connector->base,
- disp->vibrant_hue_property,
- 90);
- if (disp->color_vibrance_property)
- drm_object_attach_property(&connector->base,
- disp->color_vibrance_property,
- 150);
-
- /* default scaling mode */
+ /* Default scaling mode */
switch (nv_connector->type) {
case DCB_CONNECTOR_LVDS:
case DCB_CONNECTOR_LVDS_SPWG:
@@ -1250,23 +1346,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- /* scaling mode property */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- break;
- case DCB_CONNECTOR_VGA:
- if (disp->disp.oclass < NV50_DISP)
- break; /* can only scale on DFPs */
- /* fall-through */
- default:
- drm_object_attach_property(&connector->base, dev->mode_config.
- scaling_mode_property,
- nv_connector->scaling_mode);
- break;
- }
-
/* dithering properties */
switch (nv_connector->type) {
case DCB_CONNECTOR_TV_0:
@@ -1275,20 +1354,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
case DCB_CONNECTOR_VGA:
break;
default:
- if (disp->dithering_mode) {
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_mode,
- nv_connector->
- dithering_mode);
- }
- if (disp->dithering_depth) {
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_depth,
- nv_connector->
- dithering_depth);
- }
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 7446ee66ea04..096983c42a1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -35,30 +35,6 @@
struct nvkm_i2c_port;
-enum nouveau_underscan_type {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
-};
-
-/* the enum values specifically defined here match nv50/nvd0 hw values, and
- * the code relies on this
- */
-enum nouveau_dithering_mode {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
-};
-
-enum nouveau_dithering_depth {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
-};
-
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -70,12 +46,7 @@ struct nouveau_connector {
struct drm_dp_aux aux;
int dithering_mode;
- int dithering_depth;
int scaling_mode;
- bool scaling_full;
- enum nouveau_underscan_type underscan;
- u32 underscan_hborder;
- u32 underscan_vborder;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
@@ -109,5 +80,74 @@ nouveau_connector_create(struct drm_device *, int index);
extern int nouveau_tv_disable;
extern int nouveau_ignorelid;
extern int nouveau_duallink;
+extern int nouveau_hdmimhz;
+
+#include <drm/drm_crtc.h>
+#define nouveau_conn_atom(p) \
+ container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+ struct drm_connector_state state;
+
+ struct {
+ /* The enum values specifically defined here match nv50/gf119
+ * hw values, and the code relies on this.
+ */
+ enum {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+ } mode;
+ enum {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+ } depth;
+ } dither;
+
+ struct {
+ int mode; /* DRM_MODE_SCALE_* */
+ struct {
+ enum {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+ } mode;
+ u32 hborder;
+ u32 vborder;
+ } underscan;
+ bool full;
+ } scaler;
+
+ struct {
+ int color_vibrance;
+ int vibrant_hue;
+ } procamp;
+
+ union {
+ struct {
+ bool dither:1;
+ bool scaler:1;
+ bool procamp:1;
+ };
+ u8 mask;
+ } set;
+};
+void nouveau_conn_attach_properties(struct drm_connector *);
+void nouveau_conn_reset(struct drm_connector *);
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *);
+void nouveau_conn_atomic_destroy_state(struct drm_connector *,
+ struct drm_connector_state *);
+int nouveau_conn_atomic_set_property(struct drm_connector *,
+ struct drm_connector_state *,
+ struct drm_property *, u64);
+int nouveau_conn_atomic_get_property(struct drm_connector *,
+ const struct drm_connector_state *,
+ struct drm_property *, u64 *);
+struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 863f10b8d818..050fcf30a0d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -38,8 +38,6 @@ struct nouveau_crtc {
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
- int color_vibrance;
- int vibrant_hue;
int sharpness;
int last_dpms;
@@ -54,7 +52,6 @@ struct nouveau_crtc {
struct {
struct nouveau_bo *nvbo;
- bool visible;
uint32_t offset;
void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
void (*set_pos)(struct nouveau_crtc *, int x, int y);
@@ -70,10 +67,6 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, bool update);
- int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
-
void (*save)(struct drm_crtc *crtc);
void (*restore)(struct drm_crtc *crtc);
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index afbf557b23d4..cef08da1da4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -24,7 +24,10 @@
*
*/
+#include <acpi/video.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <nvif/class.h>
@@ -92,7 +95,7 @@ calc(int blanks, int blanke, int total, int line)
return line;
}
-int
+static int
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
@@ -158,9 +161,13 @@ nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (nouveau_crtc(crtc)->index == pipe) {
+ struct drm_display_mode *mode;
+ if (dev->mode_config.funcs->atomic_commit)
+ mode = &crtc->state->adjusted_mode;
+ else
+ mode = &crtc->hwmode;
return drm_calc_vbltimestamp_from_scanoutpos(dev,
- pipe, max_error, time, flags,
- &crtc->hwmode);
+ pipe, max_error, time, flags, mode);
}
}
@@ -217,10 +224,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct nouveau_display *disp = nouveau_display(drm_fb->dev);
-
- if (disp->fb_dtor)
- disp->fb_dtor(drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
@@ -245,57 +248,45 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev,
- struct nouveau_framebuffer *nv_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct nouveau_bo *nvbo)
+nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct nouveau_bo *nvbo,
+ struct nouveau_framebuffer **pfb)
{
- struct nouveau_display *disp = nouveau_display(dev);
- struct drm_framebuffer *fb = &nv_fb->base;
+ struct nouveau_framebuffer *fb;
int ret;
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
- nv_fb->nvbo = nvbo;
-
- ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret)
- return ret;
+ if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+ return -ENOMEM;
- if (disp->fb_ctor) {
- ret = disp->fb_ctor(fb);
- if (ret)
- disp->fb_dtor(fb);
- }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->nvbo = nvbo;
+ ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ if (ret)
+ kfree(fb);
return ret;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
+ struct nouveau_bo *nvbo;
struct drm_gem_object *gem;
- int ret = -ENOMEM;
+ int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
+ nvbo = nouveau_gem_object(gem);
- nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!nouveau_fb)
- goto err_unref;
-
- ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
- if (ret)
- goto err;
+ ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+ if (ret == 0)
+ return &fb->base;
- return &nouveau_fb->base;
-
-err:
- kfree(nouveau_fb);
-err_unref:
drm_gem_object_unreference_unlocked(gem);
return ERR_PTR(ret);
}
@@ -358,6 +349,55 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} \
} while(0)
+static void
+nouveau_display_hpd_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
+
+ pm_runtime_get_sync(drm->dev->dev);
+
+ drm_helper_hpd_irq_event(drm->dev);
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
+ * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
+ * This should be dropped once that is merged.
+ */
+#ifndef ACPI_VIDEO_NOTIFY_PROBE
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+#endif
+
+static int
+nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
+ struct acpi_bus_event *info = data;
+
+ if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
+ if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
+ /*
+ * This may be the only indication we receive of a
+ * connector hotplug on a runtime suspended GPU,
+ * schedule hpd_work to check.
+ */
+ schedule_work(&drm->hpd_work);
+
+ /* acpi-video should not generate keypresses for this */
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
int
nouveau_display_init(struct drm_device *dev)
{
@@ -385,16 +425,19 @@ nouveau_display_init(struct drm_device *dev)
}
void
-nouveau_display_fini(struct drm_device *dev)
+nouveau_display_fini(struct drm_device *dev, bool suspend)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- int head;
+ struct drm_crtc *crtc;
+
+ if (!suspend)
+ drm_crtc_force_disable_all(dev);
/* Make sure that drm and hw vblank irqs get properly disabled. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_off(dev, head);
+ drm_for_each_crtc(crtc, dev)
+ drm_crtc_vblank_off(crtc);
/* disable flip completion events */
nvif_notify_put(&drm->flip);
@@ -495,7 +538,7 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
- GP104_DISP,
+ GP102_DISP,
GP100_DISP,
GM200_DISP,
GM107_DISP,
@@ -530,6 +573,8 @@ nouveau_display_create(struct drm_device *dev)
if (ret)
goto disp_create_err;
+ drm_mode_config_reset(dev);
+
if (dev->mode_config.num_crtc) {
ret = nouveau_display_vblank_init(dev);
if (ret)
@@ -537,6 +582,12 @@ nouveau_display_create(struct drm_device *dev)
}
nouveau_backlight_init(dev);
+ INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
+#ifdef CONFIG_ACPI
+ drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
+ register_acpi_notifier(&drm->acpi_nb);
+#endif
+
return 0;
vblank_err:
@@ -552,11 +603,13 @@ nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
+#endif
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
- drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
@@ -568,12 +621,138 @@ nouveau_display_destroy(struct drm_device *dev)
kfree(disp);
}
+static int
+nouveau_atomic_disable_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ struct drm_connector_state *connector_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ int ret;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->active = false;
+
+ drm_for_each_plane_mask(plane, connector->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_atomic_disable(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *connector;
+ int ret;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_connector(connector, dev) {
+ ret = nouveau_atomic_disable_connector(state, connector);
+ if (ret)
+ break;
+ }
+
+ if (ret == 0)
+ ret = drm_atomic_commit(state);
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_atomic_state *
+nouveau_atomic_suspend(struct drm_device *dev)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret < 0) {
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+ state = drm_atomic_helper_duplicate_state(dev, &ctx);
+ if (IS_ERR(state))
+ goto unlock;
+
+ ret = nouveau_atomic_disable(dev, &ctx);
+ if (ret < 0) {
+ drm_atomic_state_put(state);
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+unlock:
+ if (PTR_ERR(state) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return state;
+}
+
int
nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct drm_crtc *crtc;
- nouveau_display_fini(dev);
+ if (dev->mode_config.funcs->atomic_commit) {
+ if (!runtime) {
+ disp->suspend = nouveau_atomic_suspend(dev);
+ if (IS_ERR(disp->suspend)) {
+ int ret = PTR_ERR(disp->suspend);
+ disp->suspend = NULL;
+ return ret;
+ }
+ }
+
+ nouveau_display_fini(dev, true);
+ return 0;
+ }
+
+ nouveau_display_fini(dev, true);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -600,9 +779,19 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
void
nouveau_display_resume(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
- int ret, head;
+ int ret;
+
+ if (dev->mode_config.funcs->atomic_commit) {
+ nouveau_display_init(dev);
+ if (disp->suspend) {
+ drm_atomic_helper_resume(dev, disp->suspend);
+ disp->suspend = NULL;
+ }
+ return;
+ }
/* re-pin fb/cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -647,10 +836,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
drm_helper_resume_force_mode(dev);
- /* Make sure that drm and hw vblank irqs get resumed if needed. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_on(dev, head);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -692,10 +877,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
- BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- else
- BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
@@ -724,6 +906,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan;
struct nouveau_cli *cli;
struct nouveau_fence *fence;
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ int head = nouveau_crtc(crtc)->index;
int ret;
chan = drm->channel;
@@ -770,32 +954,23 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
drm_crtc_vblank_get(crtc);
/* Emit a page flip */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
if (ret)
goto fail_unreserve;
- } else {
- struct nv04_display *dispnv04 = nv04_display(dev);
- int head = nouveau_crtc(crtc)->index;
-
- if (swap_interval) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- goto fail_unreserve;
-
- BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
- OUT_RING (chan, head);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
- OUT_RING (chan, 0);
- }
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
}
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
@@ -843,16 +1018,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- drm_crtc_arm_vblank_event(s->crtc, s->event);
- } else {
- drm_crtc_send_vblank_event(s->crtc, s->event);
-
- /* Give up ownership of vblank for page-flipped crtc */
- drm_crtc_vblank_put(s->crtc);
- }
- }
- else {
+ drm_crtc_arm_vblank_event(s->crtc, s->event);
+ } else {
/* Give up ownership of vblank for page-flipped crtc */
drm_crtc_vblank_put(s->crtc);
}
@@ -874,12 +1041,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
- state.offset + state.crtc->y *
- state.pitch + state.crtc->x *
- state.bpp / 8);
- }
+ nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+ state.offset + state.crtc->y *
+ state.pitch + state.crtc->x *
+ state.bpp / 8);
}
return NVIF_NOTIFY_KEEP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 0420ee861ea4..4a75df06c139 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -22,8 +22,9 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
-int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
- const struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
+int nouveau_framebuffer_new(struct drm_device *,
+ const struct drm_mode_fb_cmd2 *,
+ struct nouveau_bo *, struct nouveau_framebuffer **);
struct nouveau_page_flip_state {
struct list_head head;
@@ -39,9 +40,6 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
- int (*fb_ctor)(struct drm_framebuffer *);
- void (*fb_dtor)(struct drm_framebuffer *);
-
struct nvif_object disp;
struct drm_property *dithering_mode;
@@ -52,6 +50,8 @@ struct nouveau_display {
/* not really hue and saturation: */
struct drm_property *vibrant_hue_property;
struct drm_property *color_vibrance_property;
+
+ struct drm_atomic_state *suspend;
};
static inline struct nouveau_display *
@@ -63,7 +63,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev, bool suspend);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
@@ -91,6 +91,8 @@ int nouveau_crtc_set_config(struct drm_mode_set *set);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
extern int nouveau_backlight_init(struct drm_device *);
extern void nouveau_backlight_exit(struct drm_device *);
+extern void nouveau_backlight_ctor(void);
+extern void nouveau_backlight_dtor(void);
#else
static inline int
nouveau_backlight_init(struct drm_device *dev)
@@ -101,6 +103,17 @@ nouveau_backlight_init(struct drm_device *dev)
static inline void
nouveau_backlight_exit(struct drm_device *dev) {
}
+
+static inline void
+nouveau_backlight_ctor(void) {
+}
+
+static inline void
+nouveau_backlight_dtor(void) {
+}
#endif
+struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
+ const struct drm_mode_fb_cmd2 *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 87d52d36f4fc..0d052e1660f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,6 +30,13 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <nvif/class.h>
+#include <nvif/cl5070.h>
+
+MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
+static int nouveau_mst = 1;
+module_param_named(mst, nouveau_mst, int, 0400);
+
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
{
@@ -55,14 +62,14 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c_aux *aux;
- u8 *dpcd = nv_encoder->dp.dpcd;
+ u8 dpcd[8];
int ret;
aux = nv_encoder->aux;
if (!aux)
return -ENODEV;
- ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
+ ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
if (ret)
return ret;
@@ -84,5 +91,11 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
nouveau_dp_probe_oui(dev, aux, dpcd);
- return 0;
+
+ ret = nv50_mstm_detect(nv_encoder->dp.mstm, dpcd, nouveau_mst);
+ if (ret == 1)
+ return NOUVEAU_DP_MST;
+ if (ret == 0)
+ return NOUVEAU_DP_SST;
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88a015..59348fc41c77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -47,6 +47,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_vga.h"
+#include "nouveau_led.h"
#include "nouveau_hwmon.h"
#include "nouveau_acpi.h"
#include "nouveau_bios.h"
@@ -475,6 +476,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_hwmon_init(dev);
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
+ nouveau_led_init(dev);
if (nouveau_runtime_pm != 0) {
pm_runtime_use_autosuspend(dev->dev);
@@ -510,13 +512,14 @@ nouveau_drm_unload(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
+ nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev);
+ nouveau_display_fini(dev, false);
nouveau_display_destroy(dev);
nouveau_bios_takedown(dev);
@@ -561,6 +564,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_cli *cli;
int ret;
+ nouveau_led_suspend(dev);
+
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending console...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -649,6 +654,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
nouveau_fbcon_set_suspend(dev, 0);
}
+ nouveau_led_resume(dev);
+
return 0;
}
@@ -692,7 +699,12 @@ nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- return nouveau_do_resume(drm_dev, false);
+ ret = nouveau_do_resume(drm_dev, false);
+
+ /* Monitors may have been connected / disconnected during suspend */
+ schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+
+ return ret;
}
static int
@@ -766,6 +778,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+
+ /* Monitors may have been connected / disconnected during suspend */
+ schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+
return ret;
}
@@ -1030,6 +1046,7 @@ static void nouveau_display_options(void)
DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
+ DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
}
static const struct dev_pm_ops nouveau_pm_ops = {
@@ -1105,6 +1122,7 @@ nouveau_drm_init(void)
#endif
nouveau_register_dsm_handler();
+ nouveau_backlight_ctor();
return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
}
@@ -1115,6 +1133,7 @@ nouveau_drm_exit(void)
return;
drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
+ nouveau_backlight_dtor();
nouveau_unregister_dsm_handler();
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a0212cd48..8d5ed5bfdacb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -37,6 +37,8 @@
* - implemented limited ABI16/NVIF interop
*/
+#include <linux/notifier.h>
+
#include <nvif/client.h>
#include <nvif/device.h>
#include <nvif/ioctl.h>
@@ -161,11 +163,19 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct list_head bl_connectors;
+ struct work_struct hpd_work;
+#ifdef CONFIG_ACPI
+ struct notifier_block acpi_nb;
+#endif
/* power management */
struct nouveau_hwmon *hwmon;
struct nouveau_debugfs *debugfs;
+ /* led management */
+ struct nouveau_led *led;
+
/* display power reference */
bool have_disp_power_ref;
@@ -201,6 +211,10 @@ void nouveau_drm_device_remove(struct drm_device *dev);
if (unlikely(drm_debug & DRM_UT_DRIVER)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
+#define NV_ATOMIC(drm,f,a...) do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ NV_PRINTK(info, &(drm)->client, f, ##a); \
+} while(0)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index ee6a6d3fc80f..198e5f27682f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,6 +30,7 @@
#include <subdev/bios/dcb.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_dp_mst_helper.h>
#include "dispnv04/disp.h"
#define NV_DPMS_CLEARED 0x80
@@ -57,15 +58,16 @@ struct nouveau_encoder {
union {
struct {
- u8 dpcd[8];
+ struct nv50_mstm *mstm;
int link_nr;
int link_bw;
- u32 datarate;
} dp;
};
void (*enc_save)(struct drm_encoder *encoder);
void (*enc_restore)(struct drm_encoder *encoder);
+ void (*update)(struct nouveau_encoder *, u8 head,
+ struct drm_display_mode *, u8 proto, u8 depth);
};
struct nouveau_encoder *
@@ -90,9 +92,17 @@ get_slave_funcs(struct drm_encoder *enc)
}
/* nouveau_dp.c */
+enum nouveau_dp_status {
+ NOUVEAU_DP_SST,
+ NOUVEAU_DP_MST,
+};
+
int nouveau_dp_detect(struct nouveau_encoder *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+int nv50_mstm_detect(struct nv50_mstm *, u8 dpcd[8], int allow);
+void nv50_mstm_remove(struct nv50_mstm *);
+void nv50_mstm_service(struct nv50_mstm *);
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9f5692726c16..2f2a3dcd4ad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -58,7 +58,7 @@ static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -90,7 +90,7 @@ static void
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -122,7 +122,7 @@ static void
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -154,7 +154,7 @@ static int
nouveau_fbcon_sync(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -181,7 +181,7 @@ static int
nouveau_fbcon_open(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
int ret = pm_runtime_get_sync(drm->dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
@@ -192,42 +192,30 @@ static int
nouveau_fbcon_release(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
pm_runtime_put(drm->dev->dev);
return 0;
}
static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nouveau_fbcon_fillrect,
.fb_copyarea = nouveau_fbcon_copyarea,
.fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
void
@@ -333,16 +321,15 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
{
struct nouveau_fbdev *fbcon =
container_of(helper, struct nouveau_fbdev, helper);
- struct drm_device *dev = fbcon->dev;
+ struct drm_device *dev = fbcon->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
- int size, ret;
+ int ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -353,16 +340,17 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = roundup(size, PAGE_SIZE);
-
- ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height,
+ 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(drm, "failed to allocate framebuffer\n");
goto out;
}
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+ if (ret)
+ goto out_unref;
+
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
@@ -377,8 +365,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(nvbo, drm->client.vm,
- &fbcon->nouveau_fb.vma);
+ ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -394,13 +381,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->par = fbcon;
- nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
-
- nouveau_fb = &fbcon->nouveau_fb;
- fb = &nouveau_fb->base;
-
/* setup helper */
- fbcon->helper.fb = fb;
+ fbcon->helper.fb = &fb->base;
strcpy(info->fix.id, "nouveaufb");
if (!chan)
@@ -411,14 +393,14 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.base +
- nvbo->bo.mem.bus.offset;
- info->fix.smem_len = size;
+ info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
+ fb->nvbo->bo.mem.bus.offset;
+ info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
- info->screen_size = size;
+ info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
+ info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.depth);
drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -429,20 +411,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- nouveau_fb->base.width, nouveau_fb->base.height,
- nvbo->bo.offset, nvbo);
+ fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan)
- nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
- nouveau_bo_unmap(nvbo);
+ nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+ nouveau_bo_unmap(fb->nvbo);
out_unpin:
- nouveau_bo_unpin(nvbo);
+ nouveau_bo_unpin(fb->nvbo);
out_unref:
- nouveau_bo_ref(NULL, &nvbo);
+ nouveau_bo_ref(NULL, &fb->nvbo);
out:
return ret;
}
@@ -458,28 +439,26 @@ nouveau_fbcon_output_poll_changed(struct drm_device *dev)
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
+ struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_release_fbi(&fbcon->helper);
+ drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) {
- nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+ nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
- nouveau_fb->nvbo = NULL;
+ drm_framebuffer_unreference(&nouveau_fb->base);
}
- drm_fb_helper_fini(&fbcon->helper);
- drm_framebuffer_unregister_private(&nouveau_fb->base);
- drm_framebuffer_cleanup(&nouveau_fb->base);
+
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -522,7 +501,6 @@ nouveau_fbcon_init(struct drm_device *dev)
if (!fbcon)
return -ENOMEM;
- fbcon->dev = dev;
drm->fbcon = fbcon;
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
@@ -545,7 +523,8 @@ nouveau_fbcon_init(struct drm_device *dev)
preferred_bpp = 32;
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
+ if (!dev->mode_config.funcs->atomic_commit)
+ drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index ca77ad001978..e2bca729721e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -33,8 +33,6 @@
struct nouveau_fbdev {
struct drm_fb_helper helper;
- struct nouveau_framebuffer nouveau_fb;
- struct drm_device *dev;
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab892ae1..f2f348f0160c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,7 +28,7 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
#include <nvif/cl826e.h>
#include <nvif/notify.h>
@@ -38,11 +38,11 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
-static const struct fence_ops nouveau_fence_ops_uevent;
-static const struct fence_ops nouveau_fence_ops_legacy;
+static const struct dma_fence_ops nouveau_fence_ops_uevent;
+static const struct dma_fence_ops nouveau_fence_ops_legacy;
static inline struct nouveau_fence *
-from_fence(struct fence *fence)
+from_fence(struct dma_fence *fence)
{
return container_of(fence, struct nouveau_fence, base);
}
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
{
int drop = 0;
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
rcu_assign_pointer(fence->channel, NULL);
- if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+ if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
drop = 1;
}
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return drop;
}
static struct nouveau_fence *
-nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
struct nouveau_fence_priv *priv = (void*)drm->fence;
if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_fence_work {
struct work_struct work;
- struct fence_cb cb;
+ struct dma_fence_cb cb;
void (*func)(void *);
void *data;
};
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
kfree(work);
}
-static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
}
void
-nouveau_fence_work(struct fence *fence,
+nouveau_fence_work(struct dma_fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto err;
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
work->func = func;
work->data = data;
- if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+ if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
goto err_free;
return;
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
- fence_init(&fence->base, &nouveau_fence_ops_uevent,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
+ &fctx->lock, fctx->context, ++fctx->sequence);
else
- fence_init(&fence->base, &nouveau_fence_ops_legacy,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
+ &fctx->lock, fctx->context, ++fctx->sequence);
kref_get(&fctx->fence_ref);
- trace_fence_emit(&fence->base);
+ trace_dma_fence_emit(&fence->base);
ret = fctx->emit(fence);
if (!ret) {
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
struct nouveau_channel *chan;
unsigned long flags;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags);
}
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
static long
-nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
+nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
struct nouveau_fence *fence = from_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
if (!lazy)
return nouveau_fence_wait_busy(fence, intr);
- ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+ ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;
else if (!ret)
@@ -391,7 +391,7 @@ int
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct fence *fence;
+ struct dma_fence *fence;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
return ret;
}
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
}
return ret;
@@ -456,7 +456,7 @@ void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
- fence_put(&(*pfence)->base);
+ dma_fence_put(&(*pfence)->base);
*pfence = NULL;
}
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
return ret;
}
-static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
}
-static const char *nouveau_fence_get_timeline_name(struct fence *f)
+static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
* result. The drm node should still be there, so we can derive the index from
* the fence context.
*/
-static bool nouveau_fence_is_signaled(struct fence *f)
+static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
return ret;
}
-static bool nouveau_fence_no_signaling(struct fence *f)
+static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
/*
- * This needs uevents to work correctly, but fence_add_callback relies on
+ * This needs uevents to work correctly, but dma_fence_add_callback relies on
* being able to enable signaling. It will still get signaled eventually,
* just not right away.
*/
if (nouveau_fence_is_signaled(f)) {
list_del(&fence->head);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return false;
}
return true;
}
-static void nouveau_fence_release(struct fence *f)
+static void nouveau_fence_release(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static const struct fence_ops nouveau_fence_ops_legacy = {
+static const struct dma_fence_ops nouveau_fence_ops_legacy = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
.release = nouveau_fence_release
};
-static bool nouveau_fence_enable_signaling(struct fence *f)
+static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
ret = nouveau_fence_no_signaling(f);
if (ret)
- set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+ set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
else if (!--fctx->notify_ref)
nvif_notify_put(&fctx->notify);
return ret;
}
-static const struct fence_ops nouveau_fence_ops_uevent = {
+static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
- .wait = fence_default_wait,
- .release = NULL
+ .wait = dma_fence_default_wait,
+ .release = nouveau_fence_release
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7115ad..ccdce1b4eec4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,14 +1,14 @@
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <nvif/notify.h>
struct nouveau_drm;
struct nouveau_bo;
struct nouveau_fence {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
@@ -92,7 +92,6 @@ struct nv84_fence_chan {
struct nouveau_fence_chan base;
struct nvkm_vma vma;
struct nvkm_vma vma_gart;
- struct nvkm_vma dispc_vma[4];
};
struct nv84_fence_priv {
@@ -102,7 +101,6 @@ struct nv84_fence_priv {
u32 *suspend;
};
-u64 nv84_fence_crtc(struct nouveau_channel *, int);
int nv84_fence_context_new(struct nouveau_channel *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 72e2399bce39..201b52b750dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
@@ -369,7 +369,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int trycnt = 0;
- int ret, i;
+ int ret = -EINVAL, i;
struct nouveau_bo *res_bo = NULL;
LIST_HEAD(gart_list);
LIST_HEAD(vram_list);
@@ -861,6 +861,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
+ long lret;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
@@ -868,19 +869,15 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- if (no_wait)
- ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
- else {
- long lret;
+ lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
+ if (!lret)
+ ret = -EBUSY;
+ else if (lret > 0)
+ ret = 0;
+ else
+ ret = lret;
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
- if (!lret)
- ret = -EBUSY;
- else if (lret > 0)
- ret = 0;
- else
- ret = lret;
- }
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
new file mode 100644
index 000000000000..3e2f1b6cd4df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Martin Peres <martin.peres@free.fr>
+ */
+
+#include <linux/leds.h>
+
+#include "nouveau_led.h"
+#include <nvkm/subdev/gpio.h>
+
+static enum led_brightness
+nouveau_led_get_brightness(struct led_classdev *led)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+ u32 div, duty;
+
+ div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
+ duty = nvif_rd32(device, 0x61c884) & 0x00ffffff;
+
+ if (div > 0)
+ return duty * LED_FULL / div;
+ else
+ return 0;
+}
+
+static void
+nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+
+ u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
+ u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
+ u32 div, duty;
+
+ div = input_clk / freq;
+ duty = value * div / LED_FULL;
+
+ /* for now, this is safe to directly poke those registers because:
+ * - A: nvidia never puts the logo led to any other PWM controler
+ * than PDISPLAY.SOR[1].PWM.
+ * - B: nouveau does not touch these registers anywhere else
+ */
+ nvif_wr32(device, 0x61c880, div);
+ nvif_wr32(device, 0x61c884, 0xc0000000 | duty);
+}
+
+
+int
+nouveau_led_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct dcb_gpio_func logo_led;
+ int ret;
+
+ if (!gpio)
+ return 0;
+
+ /* check that there is a GPIO controlling the logo LED */
+ if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led))
+ return 0;
+
+ drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL);
+ if (!drm->led)
+ return -ENOMEM;
+ drm->led->dev = dev;
+
+ drm->led->led.name = "nvidia-logo";
+ drm->led->led.max_brightness = 255;
+ drm->led->led.brightness_get = nouveau_led_get_brightness;
+ drm->led->led.brightness_set = nouveau_led_set_brightness;
+
+ ret = led_classdev_register(dev->dev, &drm->led->led);
+ if (ret) {
+ kfree(drm->led);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_led_suspend(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_suspend(&drm->led->led);
+}
+
+void
+nouveau_led_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_resume(&drm->led->led);
+}
+
+void
+nouveau_led_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led) {
+ led_classdev_unregister(&drm->led->led);
+ kfree(drm->led);
+ drm->led = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
new file mode 100644
index 000000000000..187ecdb82002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@free.fr>
+ */
+
+#ifndef __NOUVEAU_LED_H__
+#define __NOUVEAU_LED_H__
+
+#include "nouveau_drv.h"
+
+struct led_classdev;
+
+struct nouveau_led {
+ struct drm_device *dev;
+
+ struct led_classdev led;
+};
+
+static inline struct nouveau_led *
+nouveau_led(struct drm_device *dev)
+{
+ return nouveau_drm(dev)->led;
+}
+
+/* nouveau_led.c */
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
+int nouveau_led_init(struct drm_device *dev);
+void nouveau_led_suspend(struct drm_device *dev);
+void nouveau_led_resume(struct drm_device *dev);
+void nouveau_led_fini(struct drm_device *dev);
+#else
+static inline int nouveau_led_init(struct drm_device *dev) { return 0; };
+static inline void nouveau_led_suspend(struct drm_device *dev) { };
+static inline void nouveau_led_resume(struct drm_device *dev) { };
+static inline void nouveau_led_fini(struct drm_device *dev) { };
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index da8fd5ff9d0f..6a2b187e3c3b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,7 +30,7 @@ int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -50,7 +50,7 @@ int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -77,7 +77,7 @@ int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t fg;
uint32_t bg;
@@ -133,7 +133,7 @@ int
nv04_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
struct nvif_device *device = &drm->device;
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1915b7b82a59..fa8f2375c398 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
priv->base.contexts = 15;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4e3de34ff6f4..2998bde29211 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -57,16 +57,13 @@ void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
- int i;
nouveau_fence_context_del(&fctx->base);
- for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
- nvif_object_fini(&fctx->head[i]);
nvif_object_fini(&fctx->sema);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
-int
+static int
nv10_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx;
@@ -107,7 +104,7 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index a87259f3983a..b7a508585304 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -7,7 +7,6 @@
struct nv10_fence_chan {
struct nouveau_fence_chan base;
struct nvif_object sema;
- struct nvif_object head[4];
};
struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 7d5e562a55c5..79bc01111351 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv17_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7d0edcbcfca7..2c2c64507661 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -25,10 +25,12 @@
#include <linux/dma-mapping.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -38,6 +40,7 @@
#include <nvif/cl507c.h>
#include <nvif/cl507d.h>
#include <nvif/cl507e.h>
+#include <nvif/event.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
@@ -46,6 +49,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
#define EVO_DMA_NR 9
@@ -61,6 +65,227 @@
#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
+#define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
+#define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
+
+/******************************************************************************
+ * Atomic state
+ *****************************************************************************/
+#define nv50_atom(p) container_of((p), struct nv50_atom, state)
+
+struct nv50_atom {
+ struct drm_atomic_state state;
+
+ struct list_head outp;
+ bool lock_core;
+ bool flush_disable;
+};
+
+struct nv50_outp_atom {
+ struct list_head head;
+
+ struct drm_encoder *encoder;
+ bool flush_disable;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } set;
+};
+
+#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
+
+struct nv50_head_atom {
+ struct drm_crtc_state state;
+
+ struct {
+ u16 iW;
+ u16 iH;
+ u16 oW;
+ u16 oH;
+ } view;
+
+ struct nv50_head_mode {
+ bool interlace;
+ u32 clock;
+ struct {
+ u16 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ } h;
+ struct {
+ u32 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ u16 blank2s;
+ u16 blank2e;
+ u16 blankus;
+ } v;
+ } mode;
+
+ struct {
+ u32 handle;
+ u64 offset:40;
+ } lut;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } core;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 layout:1;
+ u8 format:1;
+ } curs;
+
+ struct {
+ u8 depth;
+ u8 cpp;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } base;
+
+ struct {
+ u8 cpp;
+ } ovly;
+
+ struct {
+ bool enable:1;
+ u8 bits:2;
+ u8 mode:4;
+ } dither;
+
+ struct {
+ struct {
+ u16 cos:12;
+ u16 sin:12;
+ } sat;
+ } procamp;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ bool view:1;
+ bool mode:1;
+ bool base:1;
+ bool ovly:1;
+ bool dither:1;
+ bool procamp:1;
+ };
+ u16 mask;
+ } set;
+};
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(statec))
+ return (void *)statec;
+ return nv50_head_atom(statec);
+}
+
+#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
+
+struct nv50_wndw_atom {
+ struct drm_plane_state state;
+ u8 interval;
+
+ struct drm_rect clip;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ bool awaken:1;
+ } ntfy;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ u32 acquire;
+ u32 release;
+ } sema;
+
+ struct {
+ u8 enable:2;
+ } lut;
+
+ struct {
+ u8 mode:2;
+ u8 interval:4;
+
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 w;
+ u16 h;
+
+ u32 handle;
+ u64 offset;
+ } image;
+
+ struct {
+ u16 x;
+ u16 y;
+ } point;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ bool lut:1;
+ bool point:1;
+ };
+ u8 mask;
+ } set;
+};
/******************************************************************************
* EVO channel
@@ -133,34 +358,6 @@ nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
}
/******************************************************************************
- * Cursor Immediate
- *****************************************************************************/
-
-struct nv50_curs {
- struct nv50_pioc base;
-};
-
-static int
-nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
- int head, struct nv50_curs *curs)
-{
- struct nv50_disp_cursor_v0 args = {
- .head = head,
- };
- static const s32 oclass[] = {
- GK104_DISP_CURSOR,
- GF110_DISP_CURSOR,
- GT214_DISP_CURSOR,
- G82_DISP_CURSOR,
- NV50_DISP_CURSOR,
- 0
- };
-
- return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
- &curs->base);
-}
-
-/******************************************************************************
* Overlay Immediate
*****************************************************************************/
@@ -192,6 +389,11 @@ nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
* DMA EVO channel
*****************************************************************************/
+struct nv50_dmac_ctxdma {
+ struct list_head head;
+ struct nvif_object object;
+};
+
struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
@@ -199,6 +401,7 @@ struct nv50_dmac {
struct nvif_object sync;
struct nvif_object vram;
+ struct list_head ctxdma;
/* Protects against concurrent pushbuf access to this channel, lock is
* grabbed by evo_wait (if the pushbuf reservation is successful) and
@@ -207,9 +410,82 @@ struct nv50_dmac {
};
static void
+nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
+{
+ nvif_object_fini(&ctxdma->object);
+ list_del(&ctxdma->head);
+ kfree(ctxdma);
+}
+
+static struct nv50_dmac_ctxdma *
+nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+ struct nv50_dmac_ctxdma *ctxdma;
+ const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ const u32 handle = 0xfb000000 | kind;
+ struct {
+ struct nv_dma_v0 base;
+ union {
+ struct nv50_dma_v0 nv50;
+ struct gf100_dma_v0 gf100;
+ struct gf119_dma_v0 gf119;
+ };
+ } args = {};
+ u32 argc = sizeof(args.base);
+ int ret;
+
+ list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
+ if (ctxdma->object.handle == handle)
+ return ctxdma;
+ }
+
+ if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ list_add(&ctxdma->head, &dmac->ctxdma);
+
+ args.base.target = NV_DMA_V0_TARGET_VRAM;
+ args.base.access = NV_DMA_V0_ACCESS_RDWR;
+ args.base.start = 0;
+ args.base.limit = drm->device.info.ram_user - 1;
+
+ if (drm->device.info.chipset < 0x80) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xc0) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ args.nv50.kind = kind;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xd0) {
+ args.gf100.kind = kind;
+ argc += sizeof(args.gf100);
+ } else {
+ args.gf119.page = GF119_DMA_V0_PAGE_LP;
+ args.gf119.kind = kind;
+ argc += sizeof(args.gf119);
+ }
+
+ ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
+ &args, argc, &ctxdma->object);
+ if (ret) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ return ERR_PTR(ret);
+ }
+
+ return ctxdma;
+}
+
+static void
nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
{
struct nvif_device *device = dmac->base.device;
+ struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
+
+ list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ }
nvif_object_fini(&dmac->vram);
nvif_object_fini(&dmac->sync);
@@ -278,6 +554,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
+ INIT_LIST_HEAD(&dmac->ctxdma);
return ret;
}
@@ -297,7 +574,7 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
.pushbuf = 0xb0007d00,
};
static const s32 oclass[] = {
- GP104_DISP_CORE_CHANNEL_DMA,
+ GP102_DISP_CORE_CHANNEL_DMA,
GP100_DISP_CORE_CHANNEL_DMA,
GM200_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
@@ -381,34 +658,23 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
struct nv50_head {
struct nouveau_crtc base;
- struct nouveau_bo *image;
- struct nv50_curs curs;
- struct nv50_sync sync;
struct nv50_ovly ovly;
struct nv50_oimm oimm;
};
#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
-#define nv50_curs(c) (&nv50_head(c)->curs)
-#define nv50_sync(c) (&nv50_head(c)->sync)
#define nv50_ovly(c) (&nv50_head(c)->ovly)
#define nv50_oimm(c) (&nv50_head(c)->oimm)
#define nv50_chan(c) (&(c)->base.base)
#define nv50_vers(c) nv50_chan(c)->user.oclass
-struct nv50_fbdma {
- struct list_head head;
- struct nvif_object core;
- struct nvif_object base[4];
-};
-
struct nv50_disp {
struct nvif_object *disp;
struct nv50_mast mast;
- struct list_head fbdma;
-
struct nouveau_bo *sync;
+
+ struct mutex mutex;
};
static struct nv50_disp *
@@ -419,12 +685,6 @@ nv50_disp(struct drm_device *dev)
#define nv50_mast(d) (&nv50_disp(d)->mast)
-static struct drm_crtc *
-nv50_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
@@ -463,812 +723,1465 @@ evo_kick(u32 *push, void *evoc)
mutex_unlock(&dmac->lock);
}
-#if 1
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-#else
#define evo_mthd(p,m,s) do { \
const u32 _m = (m), _s = (s); \
- printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
*((p)++) = ((_s << 18) | _m); \
} while(0)
+
#define evo_data(p,d) do { \
const u32 _d = (d); \
- printk(KERN_ERR "\t%08x\n", _d); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "\t%08x\n", _d); \
*((p)++) = _d; \
} while(0)
-#endif
-static bool
-evo_sync_wait(void *data)
+/******************************************************************************
+ * Plane
+ *****************************************************************************/
+#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
+
+struct nv50_wndw {
+ const struct nv50_wndw_func *func;
+ struct nv50_dmac *dmac;
+
+ struct drm_plane plane;
+
+ struct nvif_notify notify;
+ u16 ntfy;
+ u16 sema;
+ u32 data;
+};
+
+struct nv50_wndw_func {
+ void *(*dtor)(struct nv50_wndw *);
+ int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw);
+
+ void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*sema_clr)(struct nv50_wndw *);
+ void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*ntfy_clr)(struct nv50_wndw *);
+ int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_clr)(struct nv50_wndw *);
+ void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+ u32 (*update)(struct nv50_wndw *, u32 interlock);
+};
+
+static int
+nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
- return true;
- usleep_range(1, 2);
- return false;
+ if (asyw->set.ntfy)
+ return wndw->func->ntfy_wait_begun(wndw, asyw);
+ return 0;
}
-static int
-evo_sync(struct drm_device *dev)
+static u32
+nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
+ struct nv50_wndw_atom *asyw)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- u32 *push = evo_wait(mast, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
- if (nvif_msec(device, 2000,
- if (evo_sync_wait(disp->sync))
- break;
- ) >= 0)
- return 0;
+ if (asyw->clr.sema && (!asyw->set.sema || flush))
+ wndw->func->sema_clr(wndw);
+ if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
+ wndw->func->ntfy_clr(wndw);
+ if (asyw->clr.image && (!asyw->set.image || flush))
+ wndw->func->image_clr(wndw);
+
+ return flush ? wndw->func->update(wndw, interlock) : 0;
+}
+
+static u32
+nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
+ struct nv50_wndw_atom *asyw)
+{
+ if (interlock) {
+ asyw->image.mode = 0;
+ asyw->image.interval = 1;
}
- return -EBUSY;
+ if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
+ if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
+ if (asyw->set.image) wndw->func->image_set(wndw, asyw);
+ if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
+ if (asyw->set.point) wndw->func->point (wndw, asyw);
+
+ return wndw->func->update(wndw, interlock);
}
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nv50_display_crtc_sema(struct drm_device *dev, int crtc)
+static void
+nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- return nv50_disp(dev)->sync;
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
+ wndw->func->release(wndw, asyw, asyh);
+ asyw->ntfy.handle = 0;
+ asyw->sema.handle = 0;
}
-struct nv50_display_flip {
- struct nv50_disp *disp;
- struct nv50_sync *chan;
-};
-
-static bool
-nv50_display_flip_wait(void *data)
+static int
+nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_display_flip *flip = data;
- if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
- flip->chan->data)
- return true;
- usleep_range(1, 2);
- return false;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ int ret;
+
+ NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
+ asyw->clip.x1 = 0;
+ asyw->clip.y1 = 0;
+ asyw->clip.x2 = asyh->state.mode.hdisplay;
+ asyw->clip.y2 = asyh->state.mode.vdisplay;
+
+ asyw->image.w = fb->base.width;
+ asyw->image.h = fb->base.height;
+ asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ if (asyw->image.kind) {
+ asyw->image.layout = 0;
+ if (drm->device.info.chipset >= 0xc0)
+ asyw->image.block = fb->nvbo->tile_mode >> 4;
+ else
+ asyw->image.block = fb->nvbo->tile_mode;
+ asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
+ } else {
+ asyw->image.layout = 1;
+ asyw->image.block = 0;
+ asyw->image.pitch = fb->base.pitches[0];
+ }
+
+ ret = wndw->func->acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+
+ if (asyw->set.image) {
+ if (!(asyw->image.mode = asyw->interval ? 0 : 1))
+ asyw->image.interval = asyw->interval;
+ else
+ asyw->image.interval = 0;
+ }
+
+ return 0;
}
-void
-nv50_display_flip_stop(struct drm_crtc *crtc)
+static int
+nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
{
- struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
- struct nv50_display_flip flip = {
- .disp = nv50_disp(crtc->dev),
- .chan = nv50_sync(crtc),
- };
- u32 *push;
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *harm = NULL, *asyh = NULL;
+ bool varm = false, asyv = false, asym = false;
+ int ret;
- push = evo_wait(flip.chan, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, flip.chan);
+ NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
+ if (asyw->state.crtc) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+ asym = drm_atomic_crtc_needs_modeset(&asyh->state);
+ asyv = asyh->state.active;
}
- nvif_msec(device, 2000,
- if (nv50_display_flip_wait(&flip))
- break;
- );
+ if (armw->state.crtc) {
+ harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
+ if (IS_ERR(harm))
+ return PTR_ERR(harm);
+ varm = harm->state.crtc->state->active;
+ }
+
+ if (asyv) {
+ asyw->point.x = asyw->state.crtc_x;
+ asyw->point.y = asyw->state.crtc_y;
+ if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
+ asyw->set.point = true;
+
+ if (!varm || asym || armw->state.fb != asyw->state.fb) {
+ ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
+ } else
+ if (varm) {
+ nv50_wndw_atomic_check_release(wndw, asyw, harm);
+ } else {
+ return 0;
+ }
+
+ if (!asyv || asym) {
+ asyw->clr.ntfy = armw->ntfy.handle != 0;
+ asyw->clr.sema = armw->sema.handle != 0;
+ if (wndw->func->image_clr)
+ asyw->clr.image = armw->image.handle != 0;
+ asyw->set.lut = wndw->func->lut && asyv;
+ }
+
+ return 0;
}
-int
-nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
+static void
+nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_head *head = nv50_head(crtc);
- struct nv50_sync *sync = nv50_sync(crtc);
- u32 *push;
- int ret;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
- if (crtc->primary->fb->width != fb->width ||
- crtc->primary->fb->height != fb->height)
- return -EINVAL;
+ NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
+ if (!old_state->fb)
+ return;
+
+ nouveau_bo_unpin(fb->nvbo);
+}
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
- if (chan == NULL)
- evo_sync(crtc->dev);
+static int
+nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *asyh;
+ struct nv50_dmac_ctxdma *ctxdma;
+ int ret;
- push = evo_wait(sync, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+ if (!asyw->state.fb)
+ return 0;
- if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ if (ret)
+ return ret;
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, sync->addr ^ 0x10);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, sync->data + 1);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
- OUT_RING (chan, sync->addr);
- OUT_RING (chan, sync->data);
- } else
- if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 12);
- if (ret)
- return ret;
+ ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(fb->nvbo);
+ return PTR_ERR(ctxdma);
+ }
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram.handle);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
- } else
- if (chan) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
+ asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
+ asyw->image.handle = ctxdma->object.handle;
+ asyw->image.offset = fb->nvbo->bo.offset;
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- }
-
- if (chan) {
- sync->addr ^= 0x10;
- sync->data++;
- FIRE_RING (chan);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, sync->addr);
- evo_data(push, sync->data++);
- evo_data(push, sync->data);
- evo_data(push, sync->base.sync.handle);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_handle);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
- evo_mthd(push, 0x0800, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- } else {
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
+ if (wndw->func->prepare) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+
+ wndw->func->prepare(wndw, asyh, asyw);
}
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, sync);
- nouveau_bo_ref(nv_fb->nvbo, &head->image);
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs
+nv50_wndw_helper = {
+ .prepare_fb = nv50_wndw_prepare_fb,
+ .cleanup_fb = nv50_wndw_cleanup_fb,
+ .atomic_check = nv50_wndw_atomic_check,
+};
+
+static void
+nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ __drm_atomic_helper_plane_destroy_state(&asyw->state);
+ dma_fence_put(asyw->state.fence);
+ kfree(asyw);
+}
+
+static struct drm_plane_state *
+nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw_atom *asyw;
+ if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
+ asyw->state.fence = NULL;
+ asyw->interval = 1;
+ asyw->sema = armw->sema;
+ asyw->ntfy = armw->ntfy;
+ asyw->image = armw->image;
+ asyw->point = armw->point;
+ asyw->lut = armw->lut;
+ asyw->clr.mask = 0;
+ asyw->set.mask = 0;
+ return &asyw->state;
+}
+
+static void
+nv50_wndw_reset(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *asyw;
+
+ if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
+ return;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+ plane->state = &asyw->state;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_ROTATE_0;
+}
+
+static void
+nv50_wndw_destroy(struct drm_plane *plane)
+{
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ void *data;
+ nvif_notify_fini(&wndw->notify);
+ data = wndw->func->dtor(wndw);
+ drm_plane_cleanup(&wndw->plane);
+ kfree(data);
+}
+
+static const struct drm_plane_funcs
+nv50_wndw = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = nv50_wndw_destroy,
+ .reset = nv50_wndw_reset,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+};
+
+static void
+nv50_wndw_fini(struct nv50_wndw *wndw)
+{
+ nvif_notify_put(&wndw->notify);
+}
+
+static void
+nv50_wndw_init(struct nv50_wndw *wndw)
+{
+ nvif_notify_get(&wndw->notify);
+}
+
+static int
+nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
+ enum drm_plane_type type, const char *name, int index,
+ struct nv50_dmac *dmac, const u32 *format, int nformat,
+ struct nv50_wndw *wndw)
+{
+ int ret;
+
+ wndw->func = func;
+ wndw->dmac = dmac;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw, format,
+ nformat, type, "%s-%d", name, index);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
return 0;
}
/******************************************************************************
- * CRTC
+ * Cursor plane
*****************************************************************************/
+#define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
+
+struct nv50_curs {
+ struct nv50_wndw wndw;
+ struct nvif_object chan;
+};
+
+static u32
+nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0080, 0x00000000);
+ return 0;
+}
+
+static void
+nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
+}
+
+static void
+nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw)
+{
+ asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
+ asyh->curs.offset = asyw->image.offset;
+ asyh->set.curs = asyh->curs.visible;
+}
+
+static void
+nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->curs.visible = false;
+}
+
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
+ int ret;
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.primary->fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ asyh->curs.visible = asyw->state.visible;
+ if (ret || !asyh->curs.visible)
+ return ret;
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
+ switch (asyw->state.fb->width) {
+ case 32: asyh->curs.layout = 0; break;
+ case 64: asyh->curs.layout = 1; break;
+ default:
+ return -EINVAL;
}
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
- evo_data(push, mode);
- } else
- if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- } else {
- evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- }
+ if (asyw->state.fb->width != asyw->state.fb->height)
+ return -EINVAL;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ switch (asyw->state.fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
return 0;
}
+static void *
+nv50_curs_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_object_fini(&curs->chan);
+ return curs;
+}
+
+static const u32
+nv50_curs_format[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const struct nv50_wndw_func
+nv50_curs = {
+ .dtor = nv50_curs_dtor,
+ .acquire = nv50_curs_acquire,
+ .release = nv50_curs_release,
+ .prepare = nv50_curs_prepare,
+ .point = nv50_curs_point,
+ .update = nv50_curs_update,
+};
+
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_curs **pcurs)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
+ static const struct nvif_mclass curses[] = {
+ { GK104_DISP_CURSOR, 0 },
+ { GF110_DISP_CURSOR, 0 },
+ { GT214_DISP_CURSOR, 0 },
+ { G82_DISP_CURSOR, 0 },
+ { NV50_DISP_CURSOR, 0 },
+ {}
+ };
+ struct nv50_disp_cursor_v0 args = {
+ .head = head->base.index,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_curs *curs;
+ int cid, ret;
+
+ cid = nvif_mclass(disp->disp, curses);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported cursor immediate class\n");
+ return cid;
+ }
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode) {
- mode = nv_connector->scaling_mode;
- if (nv_connector->scaling_full) /* non-EDID LVDS/eDP mode */
- mode = DRM_MODE_SCALE_FULLSCREEN;
+ if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
+ "curs", head->base.index, &disp->mast.base,
+ nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
+ &curs->wndw);
+ if (ret) {
+ kfree(curs);
+ return ret;
}
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
+ ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
+ sizeof(args), &curs->chan);
+ if (ret) {
+ NV_ERROR(drm, "curs%04x allocation failed: %d\n",
+ curses[cid].oclass, ret);
+ return ret;
+ }
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
+ return 0;
+}
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
+/******************************************************************************
+ * Primary plane
+ *****************************************************************************/
+#define nv50_base(p) container_of((p), struct nv50_base, wndw)
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
+struct nv50_base {
+ struct nv50_wndw wndw;
+ struct nv50_sync chan;
+ int id;
+};
+
+static int
+nv50_base_notify(struct nvif_notify *notify)
+{
+ return NVIF_NOTIFY_KEEP;
+}
+
+static void
+nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, asyw->lut.enable << 30);
+ evo_kick(push, &base->chan);
}
+}
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
+static void
+nv50_base_image_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 4))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
}
+}
- push = evo_wait(mast, 8);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- /*XXX: SCALE_CTRL_ACTIVE??? */
- evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ const s32 oclass = base->chan.base.base.user.oclass;
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 10))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, (asyw->image.mode << 8) |
+ (asyw->image.interval << 4));
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, asyw->image.handle);
+ if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, (asyw->image.kind << 16) |
+ (asyw->image.format << 8));
+ } else
+ if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
} else {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 24) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
}
+ evo_kick(push, &base->chan);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_base_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00a4, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
+}
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_flip_next(crtc, crtc->primary->fb,
- NULL, 1);
- }
+static void
+nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 3))) {
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
+ evo_data(push, asyw->ntfy.handle);
+ evo_kick(push, &base->chan);
}
+}
- return 0;
+static void
+nv50_base_sema_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
}
-static int
-nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
+static void
+nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_base *base = nv50_base(wndw);
u32 *push;
+ if ((push = evo_wait(&base->chan, 5))) {
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, asyw->sema.offset);
+ evo_data(push, asyw->sema.acquire);
+ evo_data(push, asyw->sema.release);
+ evo_data(push, asyw->sema.handle);
+ evo_kick(push, &base->chan);
+ }
+}
- push = evo_wait(mast, 8);
- if (!push)
- return -ENOMEM;
+static u32
+nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
- evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
- evo_data(push, usec);
- evo_kick(push, mast);
+ if (!(push = evo_wait(&base->chan, 2)))
+ return 0;
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, interlock);
+ evo_kick(push, &base->chan);
+
+ if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
+ return interlock ? 2 << (base->id * 8) : 0;
+ return interlock ? 2 << (base->id * 4) : 0;
+}
+
+static int
+nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ if (nvif_msec(&drm->device, 2000ULL,
+ u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
+ if ((data & 0xc0000000) == 0x40000000)
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ return -ETIMEDOUT;
return 0;
}
+static void
+nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->base.cpp = 0;
+}
+
static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push, hue, vib;
- int adj;
+ const u32 format = asyw->state.fb->pixel_format;
+ const struct drm_format_info *info;
+ int ret;
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+ info = drm_format_info(format);
+ if (!info || !info->depth)
+ return -EINVAL;
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- } else {
- evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ asyh->base.depth = info->depth;
+ asyh->base.cpp = info->cpp[0];
+ asyh->base.x = asyw->state.src.x1 >> 16;
+ asyh->base.y = asyw->state.src.y1 >> 16;
+ asyh->base.w = asyw->state.fb->width;
+ asyh->base.h = asyw->state.fb->height;
+
+ switch (format) {
+ case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
+ case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
+ case DRM_FORMAT_XRGB1555 :
+ case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
+ case DRM_FORMAT_XRGB8888 :
+ case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
+ case DRM_FORMAT_XBGR8888 :
+ case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
+ asyw->lut.enable = 1;
+ asyw->set.image = true;
return 0;
}
+static void *
+nv50_base_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ struct nv50_base *base = nv50_base(wndw);
+ nv50_dmac_destroy(&base->chan.base, disp->disp);
+ return base;
+}
+
+static const u32
+nv50_base_format[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+static const struct nv50_wndw_func
+nv50_base = {
+ .dtor = nv50_base_dtor,
+ .acquire = nv50_base_acquire,
+ .release = nv50_base_release,
+ .sema_set = nv50_base_sema_set,
+ .sema_clr = nv50_base_sema_clr,
+ .ntfy_set = nv50_base_ntfy_set,
+ .ntfy_clr = nv50_base_ntfy_clr,
+ .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
+ .image_set = nv50_base_image_set,
+ .image_clr = nv50_base_image_clr,
+ .lut = nv50_base_lut,
+ .update = nv50_base_update,
+};
+
static int
-nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
+nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_base **pbase)
+{
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_base *base;
+ int ret;
+
+ if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
+ return -ENOMEM;
+ base->id = head->base.index;
+ base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
+ base->wndw.sema = EVO_FLIP_SEM0(base->id);
+ base->wndw.data = 0x00000000;
+
+ ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
+ "base", base->id, &base->chan.base,
+ nv50_base_format, ARRAY_SIZE(nv50_base_format),
+ &base->wndw);
+ if (ret) {
+ kfree(base);
+ return ret;
+ }
+
+ ret = nv50_base_create(&drm->device, disp->disp, base->id,
+ disp->sync->bo.offset, &base->chan);
+ if (ret)
+ return ret;
+
+ return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
+ false,
+ NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+ &(struct nvif_notify_uevent_req) {},
+ sizeof(struct nvif_notify_uevent_req),
+ sizeof(struct nvif_notify_uevent_rep),
+ &base->wndw.notify);
+}
+
+/******************************************************************************
+ * Head
+ *****************************************************************************/
+static void
+nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
+ else
+ evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->procamp.sat.sin << 20) |
+ (asyh->procamp.sat.cos << 8));
+ evo_kick(push, core);
+ }
+}
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (y << 16) | x);
- if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->r_handle);
- }
- } else {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_handle);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- }
+static void
+nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
+ else
+ if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
+ else
+ evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
+ evo_data(push, (asyh->dither.mode << 3) |
+ (asyh->dither.bits << 1) |
+ asyh->dither.enable);
+ evo_kick(push, core);
+ }
+}
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
+static void
+nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
}
- nv_crtc->fb.handle = nvfb->r_handle;
- return 0;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- } else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ case 1: bounds |= 0x00000000; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
+ }
+
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = true;
}
static void
-nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+nv50_head_curs_clr(struct nv50_head *head)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, mast);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = false;
}
static void
-nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
-
- if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
- nv50_crtc_cursor_show(nv_crtc);
- else
- nv50_crtc_cursor_hide(nv_crtc);
-
- if (update) {
- u32 *push = evo_wait(mast, 2);
- if (push) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 5))) {
+ if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ } else
+ if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+ evo_data(push, asyh->curs.handle);
+ } else {
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+ evo_data(push, asyh->curs.handle);
}
+ evo_kick(push, core);
}
}
static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+nv50_head_core_clr(struct nv50_head *head)
{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
+nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 9))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.kind << 16 |
+ asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ /* EVO will complain with INVALID_STATE if we have an
+ * active cursor and (re)specify HeadSetContextDmaIso
+ * without also updating HeadSetOffsetCursor.
+ */
+ asyh->set.curs = asyh->curs.visible;
+ } else
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ } else {
+ evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 24 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ }
+ evo_kick(push, core);
+ }
+}
- nv50_display_flip_stop(crtc);
-
- push = evo_wait(mast, 6);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_head_lut_clr(struct nv50_head *head)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
evo_data(push, 0x00000000);
}
-
- evo_kick(push, mast);
+ evo_kick(push, core);
}
-
- nv50_crtc_cursor_show_hide(nv_crtc, false, false);
}
static void
-nv50_crtc_commit(struct drm_crtc *crtc)
+nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
-
- push = evo_wait(mast, 32);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if ((push = evo_wait(core, 7))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
+ evo_data(push, asyh->lut.offset >> 8);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+ evo_data(push, asyh->lut.handle);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+ evo_data(push, asyh->lut.handle);
+ }
+ evo_kick(push, core);
+ }
+}
+
+static void
+nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ struct nv50_head_mode *m = &asyh->mode;
+ u32 *push;
+ if ((push = evo_wait(core, 14))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
+ evo_data(push, 0x00800000 | m->clock);
+ evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
+ evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_data(push, asyh->mode.v.blankus);
+ evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
+ evo_data(push, 0x00000000); /* ??? */
evo_data(push, 0xffffff00);
+ evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
+ evo_data(push, m->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, m->clock * 1000);
}
+ evo_kick(push, core);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 10))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ } else {
+ evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ }
+ evo_kick(push, core);
}
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
+static void
+nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
+{
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_lut_clr(head);
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_core_clr(head);
+ if (asyh->clr.curs && (!asyh->set.curs || y))
+ nv50_head_curs_clr(head);
}
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- return true;
+ if (asyh->set.view ) nv50_head_view (head, asyh);
+ if (asyh->set.mode ) nv50_head_mode (head, asyh);
+ if (asyh->set.core ) nv50_head_lut_set (head, asyh);
+ if (asyh->set.core ) nv50_head_core_set(head, asyh);
+ if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
+ if (asyh->set.base ) nv50_head_base (head, asyh);
+ if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
+ if (asyh->set.dither ) nv50_head_dither (head, asyh);
+ if (asyh->set.procamp) nv50_head_procamp (head, asyh);
}
-static int
-nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
- struct nv50_head *head = nv50_head(crtc);
- int ret;
+ const int vib = asyc->procamp.color_vibrance - 100;
+ const int hue = asyc->procamp.vibrant_hue - 90;
+ const int adj = (vib > 0) ? 50 : 0;
+ asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
+ asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
+ asyh->set.procamp = true;
+}
+
+static void
+nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
+{
+ struct drm_connector *connector = asyc->state.connector;
+ u32 mode = 0x00;
+
+ if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+ if (asyh->base.depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = asyc->dither.mode;
+ }
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
- if (ret == 0) {
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(nvfb->nvbo, &head->image);
+ if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= asyc->dither.depth;
}
- return ret;
+ asyh->dither.enable = mode;
+ asyh->dither.bits = mode >> 1;
+ asyh->dither.mode = mode >> 3;
+ asyh->set.dither = true;
}
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_view(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nv50_mast *mast = nv50_mast(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1, vblankus = 0;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- /* XXX: Safe underestimate, even "0" works */
- vblankus = (vactive - mode->vdisplay - 2) * hactive;
- vblankus *= 1000;
- vblankus /= mode->clock;
+ struct drm_connector *connector = asyc->state.connector;
+ struct drm_display_mode *omode = &asyh->state.adjusted_mode;
+ struct drm_display_mode *umode = &asyh->state.mode;
+ int mode = asyc->scaler.mode;
+ struct edid *edid;
+
+ if (connector->edid_blob_ptr)
+ edid = (struct edid *)connector->edid_blob_ptr->data;
+ else
+ edid = NULL;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
+ if (!asyc->scaler.full) {
+ if (mode == DRM_MODE_SCALE_NONE)
+ omode = umode;
+ } else {
+ /* Non-EDID LVDS/eDP mode. */
+ mode = DRM_MODE_SCALE_FULLSCREEN;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ asyh->view.iW = umode->hdisplay;
+ asyh->view.iH = umode->vdisplay;
+ asyh->view.oW = omode->hdisplay;
+ asyh->view.oH = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ asyh->view.oH *= 2;
- push = evo_wait(mast, 64);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00800000 | mode->clock);
- evo_data(push, (ilace == 2) ? 2 : 0);
- evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ /* Add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
+ (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
+ drm_detect_hdmi_monitor(edid)))) {
+ u32 bX = asyc->scaler.underscan.hborder;
+ u32 bY = asyc->scaler.underscan.vborder;
+ u32 r = (asyh->view.oH << 19) / asyh->view.oW;
+
+ if (bX) {
+ asyh->view.oW -= (bX * 2);
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
} else {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ asyh->view.oW -= (asyh->view.oW >> 4) + 32;
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
}
+ }
- evo_kick(push, mast);
+ /* Handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation.
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
+ asyh->view.oH = min((u16)umode->vdisplay, asyh->view.oH);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (asyh->view.oH < asyh->view.oW) {
+ u32 r = (asyh->view.iW << 19) / asyh->view.iH;
+ asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
+ } else {
+ u32 r = (asyh->view.iH << 19) / asyh->view.iW;
+ asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ }
+ break;
+ default:
+ break;
}
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nv50_crtc_set_dither(nv_crtc, false);
- nv50_crtc_set_scale(nv_crtc, false);
+ asyh->set.view = true;
+}
- /* G94 only accepts this after setting scale */
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
- nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus);
+static void
+nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hbackp = mode->htotal - mode->hsync_end;
+ u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ u32 hfrontp = mode->hsync_start - mode->hdisplay;
+ u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ struct nv50_head_mode *m = &asyh->mode;
+
+ m->h.active = mode->htotal;
+ m->h.synce = mode->hsync_end - mode->hsync_start - 1;
+ m->h.blanke = m->h.synce + hbackp;
+ m->h.blanks = mode->htotal - hfrontp - 1;
+
+ m->v.active = mode->vtotal * vscan / ilace;
+ m->v.synce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ m->v.blanke = m->v.synce + vbackp;
+ m->v.blanks = m->v.active - vfrontp - 1;
+
+ /*XXX: Safe underestimate, even "0" works */
+ m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+ m->v.blankus *= 1000;
+ m->v.blankus /= mode->clock;
- nv50_crtc_set_color_vibrance(nv_crtc, false);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
- return 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ m->v.blank2e = m->v.active + m->v.synce + vbackp;
+ m->v.blank2s = m->v.blank2e + (mode->vdisplay * vscan / ilace);
+ m->v.active = (m->v.active * 2) + 1;
+ m->interlace = true;
+ } else {
+ m->v.blank2e = 0;
+ m->v.blank2s = 1;
+ m->interlace = false;
+ }
+ m->clock = mode->clock;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ asyh->set.mode = true;
}
static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ struct nouveau_conn_atom *asyc = NULL;
+ struct drm_connector_state *conns;
+ struct drm_connector *conn;
+ int i;
- if (!crtc->primary->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
+ NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
+ if (asyh->state.active) {
+ for_each_connector_in_state(asyh->state.state, conn, conns, i) {
+ if (conns->crtc == crtc) {
+ asyc = nouveau_conn_atom(conns);
+ break;
+ }
+ }
+
+ if (armh->state.active) {
+ if (asyc) {
+ if (asyh->state.mode_changed)
+ asyc->set.scaler = true;
+ if (armh->base.depth != asyh->base.depth)
+ asyc->set.dither = true;
+ }
+ } else {
+ asyc->set.mask = ~0;
+ asyh->set.mask = ~0;
+ }
+
+ if (asyh->state.mode_changed)
+ nv50_head_atomic_check_mode(head, asyh);
+
+ if (asyc) {
+ if (asyc->set.scaler)
+ nv50_head_atomic_check_view(armh, asyh, asyc);
+ if (asyc->set.dither)
+ nv50_head_atomic_check_dither(armh, asyh, asyc);
+ if (asyc->set.procamp)
+ nv50_head_atomic_check_procamp(armh, asyh, asyc);
+ }
+
+ if ((asyh->core.visible = (asyh->base.cpp != 0))) {
+ asyh->core.x = asyh->base.x;
+ asyh->core.y = asyh->base.y;
+ asyh->core.w = asyh->base.w;
+ asyh->core.h = asyh->base.h;
+ } else
+ if ((asyh->core.visible = asyh->curs.visible)) {
+ /*XXX: We need to either find some way of having the
+ * primary base layer appear black, while still
+ * being able to display the other layers, or we
+ * need to allocate a dummy black surface here.
+ */
+ asyh->core.x = 0;
+ asyh->core.y = 0;
+ asyh->core.w = asyh->state.mode.hdisplay;
+ asyh->core.h = asyh->state.mode.vdisplay;
+ }
+ asyh->core.handle = disp->mast.base.vram.handle;
+ asyh->core.offset = 0;
+ asyh->core.format = 0xcf;
+ asyh->core.kind = 0;
+ asyh->core.layout = 1;
+ asyh->core.block = 0;
+ asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+ asyh->lut.handle = disp->mast.base.vram.handle;
+ asyh->lut.offset = head->base.lut.nvbo->bo.offset;
+ asyh->set.base = armh->base.cpp != asyh->base.cpp;
+ asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
+ } else {
+ asyh->core.visible = false;
+ asyh->curs.visible = false;
+ asyh->base.cpp = 0;
+ asyh->ovly.cpp = 0;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
+ if (asyh->core.visible) {
+ if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
+ asyh->set.core = true;
+ } else
+ if (armh->core.visible) {
+ asyh->clr.core = true;
+ }
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
- return 0;
-}
+ if (asyh->curs.visible) {
+ if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
+ asyh->set.curs = true;
+ } else
+ if (armh->curs.visible) {
+ asyh->clr.curs = true;
+ }
+ } else {
+ asyh->clr.core = armh->core.visible;
+ asyh->clr.curs = armh->curs.visible;
+ asyh->set.core = asyh->core.visible;
+ asyh->set.curs = asyh->curs.visible;
+ }
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ if (asyh->clr.mask || asyh->set.mask)
+ nv50_atom(asyh->state.state)->lock_core = true;
return 0;
}
static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
+nv50_head_lut_load(struct drm_crtc *crtc)
{
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1292,64 +2205,95 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
}
}
-static void
-nv50_crtc_disable(struct drm_crtc *crtc)
+static int
+nv50_head_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
{
- struct nv50_head *head = nv50_head(crtc);
- evo_sync(crtc->dev);
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
+ WARN_ON(1);
+ return 0;
}
+static const struct drm_crtc_helper_funcs
+nv50_head_help = {
+ .mode_set_base_atomic = nv50_head_mode_set_base_atomic,
+ .load_lut = nv50_head_lut_load,
+ .atomic_check = nv50_head_atomic_check,
+};
+
+/* This is identical to the version in the atomic helpers, except that
+ * it supports non-vblanked ("async") page flips.
+ */
static int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
+nv50_head_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event, u32 flags)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_gem_object *gem = NULL;
- struct nouveau_bo *nvbo = NULL;
+ struct drm_plane *plane = crtc->primary;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
int ret = 0;
- if (handle) {
- if (width != 64 || height != 64)
- return -EINVAL;
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- gem = drm_gem_object_lookup(file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+ crtc_state->event = event;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- if (ret == 0) {
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(nvbo, &nv_crtc->cursor.nvbo);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ /* Make sure we don't accidentally do a full modeset. */
+ state->allow_modeset = false;
+ if (!crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
+ crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
}
- drm_gem_object_unreference_unlocked(gem);
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ nv50_wndw_atom(plane_state)->interval = 0;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_put(state);
return ret;
-}
-static int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_curs *curs = nv50_curs(crtc);
- struct nv50_chan *chan = nv50_chan(curs);
- nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
- nvif_wr32(&chan->user, 0x0080, 0x00000000);
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
- nv_crtc->cursor_saved_x = x;
- nv_crtc->cursor_saved_y = y;
- return 0;
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
}
static int
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1361,47 +2305,71 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
nv_crtc->lut.b[i] = b[i];
}
- nv50_crtc_lut_load(crtc);
-
+ nv50_head_lut_load(crtc);
return 0;
}
static void
-nv50_crtc_cursor_restore(struct nouveau_crtc *nv_crtc, int x, int y)
+nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- nv50_crtc_cursor_move(&nv_crtc->base, x, y);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ __drm_atomic_helper_crtc_destroy_state(&asyh->state);
+ kfree(asyh);
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+static struct drm_crtc_state *
+nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh;
+ if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
+ asyh->view = armh->view;
+ asyh->mode = armh->mode;
+ asyh->lut = armh->lut;
+ asyh->core = armh->core;
+ asyh->curs = armh->curs;
+ asyh->base = armh->base;
+ asyh->ovly = armh->ovly;
+ asyh->dither = armh->dither;
+ asyh->procamp = armh->procamp;
+ asyh->clr.mask = 0;
+ asyh->set.mask = 0;
+ return &asyh->state;
+}
+
+static void
+__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+ crtc->state = state;
+ crtc->state->crtc = crtc;
}
static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
+nv50_head_reset(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *asyh;
+
+ if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
+ return;
+
+ __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
+}
+
+static void
+nv50_head_destroy(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
- struct nv50_fbdma *fbdma;
-
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- nvif_object_fini(&fbdma->base[nv_crtc->index]);
- }
nv50_dmac_destroy(&head->ovly.base, disp->disp);
nv50_pioc_destroy(&head->oimm.base);
- nv50_dmac_destroy(&head->sync.base, disp->disp);
- nv50_pioc_destroy(&head->curs.base);
-
- /*XXX: this shouldn't be necessary, but the core doesn't call
- * disconnect() during the cleanup paths
- */
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
-
- /*XXX: ditto */
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
@@ -1412,34 +2380,27 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
-static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
- .disable = nv50_crtc_disable,
-};
-
-static const struct drm_crtc_funcs nv50_crtc_func = {
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = nouveau_crtc_set_config,
- .destroy = nv50_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
+static const struct drm_crtc_funcs
+nv50_head_func = {
+ .reset = nv50_head_reset,
+ .gamma_set = nv50_head_gamma_set,
+ .destroy = nv50_head_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = nv50_head_page_flip,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_head_atomic_destroy_state,
};
static int
-nv50_crtc_create(struct drm_device *dev, int index)
+nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
+ struct nv50_base *base;
+ struct nv50_curs *curs;
struct drm_crtc *crtc;
int ret, i;
@@ -1448,21 +2409,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
return -ENOMEM;
head->base.index = index;
- head->base.set_dither = nv50_crtc_set_dither;
- head->base.set_scale = nv50_crtc_set_scale;
- head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
- head->base.color_vibrance = 50;
- head->base.vibrant_hue = 0;
- head->base.cursor.set_pos = nv50_crtc_cursor_restore;
for (i = 0; i < 256; i++) {
head->base.lut.r[i] = i << 8;
head->base.lut.g[i] = i << 8;
head->base.lut.b[i] = i << 8;
}
+ ret = nv50_base_new(drm, head, &base);
+ if (ret == 0)
+ ret = nv50_curs_new(drm, head, &curs);
+ if (ret) {
+ kfree(head);
+ return ret;
+ }
+
crtc = &head->base.base;
- drm_crtc_init(dev, crtc, &nv50_crtc_func);
- drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
+ &curs->wndw.plane, &nv50_head_func,
+ "head-%d", head->base.index);
+ drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
@@ -1481,20 +2446,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (ret)
goto out;
- /* allocate cursor resources */
- ret = nv50_curs_create(device, disp->disp, index, &head->curs);
- if (ret)
- goto out;
-
- /* allocate page flip / sync resources */
- ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
- &head->sync);
- if (ret)
- goto out;
-
- head->sync.addr = EVO_FLIP_SEM0(index);
- head->sync.data = 0x00000000;
-
/* allocate overlay resources */
ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
if (ret)
@@ -1507,43 +2458,64 @@ nv50_crtc_create(struct drm_device *dev, int index)
out:
if (ret)
- nv50_crtc_destroy(crtc);
+ nv50_head_destroy(crtc);
return ret;
}
/******************************************************************************
- * Encoder helpers
+ * Output path helpers
*****************************************************************************/
-static bool
-nv50_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_outp_atomic_check_view(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct drm_display_mode *native_mode)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+
+ NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
+ asyc->scaler.full = false;
+ if (!native_mode)
+ return 0;
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- nv_connector->scaling_full = false;
- if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) {
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* force use of scaler for non-edid modes */
- if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
- return true;
- nv_connector->scaling_full = true;
+ if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* Force use of scaler for non-EDID modes. */
+ if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
break;
- default:
- return true;
- }
+ mode = native_mode;
+ asyc->scaler.full = true;
+ break;
+ default:
+ break;
}
+ } else {
+ mode = native_mode;
+ }
- drm_mode_copy(adjusted_mode, nv_connector->native_mode);
+ if (!drm_mode_equal(adjusted_mode, mode)) {
+ drm_mode_copy(adjusted_mode, mode);
+ crtc_state->mode_changed = true;
}
- return true;
+ return 0;
+}
+
+static int
+nv50_outp_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(conn_state->connector);
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ nv_connector->native_mode);
}
/******************************************************************************
@@ -1574,21 +2546,39 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
}
static void
-nv50_dac_commit(struct drm_encoder *encoder)
+nv50_dac_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_dac_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u32 *push;
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -1627,33 +2617,6 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0400 + (or * 0x080), 1);
- evo_data(push, 0x00000000);
- } else {
- evo_mthd(push, 0x0180 + (or * 0x020), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
-
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
@@ -1681,6 +2644,15 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
return connector_status_connected;
}
+static const struct drm_encoder_helper_funcs
+nv50_dac_help = {
+ .dpms = nv50_dac_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_dac_enable,
+ .disable = nv50_dac_disable,
+ .detect = nv50_dac_detect
+};
+
static void
nv50_dac_destroy(struct drm_encoder *encoder)
{
@@ -1688,18 +2660,8 @@ nv50_dac_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
- .dpms = nv50_dac_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .disable = nv50_dac_disconnect,
- .get_crtc = nv50_display_crtc_get,
- .detect = nv50_dac_detect
-};
-
-static const struct drm_encoder_funcs nv50_dac_func = {
+static const struct drm_encoder_funcs
+nv50_dac_func = {
.destroy = nv50_dac_destroy,
};
@@ -1726,8 +2688,9 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
+ "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1737,7 +2700,26 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
* Audio
*****************************************************************************/
static void
-nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ };
+
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
+
+static void
+nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1768,30 +2750,30 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
sizeof(args.base) + drm_eld_size(args.data));
}
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
static void
-nv50_audio_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hda_eld_v0 eld;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
};
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-/******************************************************************************
- * HDMI
- *****************************************************************************/
static void
-nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1821,26 +2803,635 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
args.pwr.max_ac_packet = max_ac_packet / 32;
nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nv50_audio_mode_set(encoder, mode);
+ nv50_audio_enable(encoder, mode);
+}
+
+/******************************************************************************
+ * MST
+ *****************************************************************************/
+#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
+#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
+#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
+
+struct nv50_mstm {
+ struct nouveau_encoder *outp;
+
+ struct drm_dp_mst_topology_mgr mgr;
+ struct nv50_msto *msto[4];
+
+ bool modified;
+};
+
+struct nv50_mstc {
+ struct nv50_mstm *mstm;
+ struct drm_dp_mst_port *port;
+ struct drm_connector connector;
+
+ struct drm_display_mode *native;
+ struct edid *edid;
+
+ int pbn;
+};
+
+struct nv50_msto {
+ struct drm_encoder encoder;
+
+ struct nv50_head *head;
+ struct nv50_mstc *mstc;
+ bool disabled;
+};
+
+static struct drm_dp_payload *
+nv50_msto_payload(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+ int vcpi = mstc->port->vcpi.vcpi, i;
+
+ NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
+ mstm->outp->base.base.name, i, payload->vcpi,
+ payload->start_slot, payload->num_slots);
+ }
+
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ if (payload->vcpi == vcpi)
+ return payload;
+ }
+
+ return NULL;
}
static void
-nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_msto_cleanup(struct nv50_msto *msto)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
+ drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
+ if (msto->disabled) {
+ msto->mstc = NULL;
+ msto->head = NULL;
+ msto->disabled = false;
+ }
+}
+
+static void
+nv50_msto_prepare(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
+ .base.hasht = mstm->outp->dcb->hasht,
+ .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
+ (0x0100 << msto->head->base.index),
};
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0) {
+ struct drm_dp_payload *payload = nv50_msto_payload(msto);
+ if (payload) {
+ args.vcpi.start_slot = payload->start_slot;
+ args.vcpi.num_slots = payload->num_slots;
+ args.vcpi.pbn = mstc->port->vcpi.pbn;
+ args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
+ }
+ }
+
+ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
+ msto->encoder.name, msto->head->base.base.name,
+ args.vcpi.start_slot, args.vcpi.num_slots,
+ args.vcpi.pbn, args.vcpi.aligned_pbn);
+ nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
+}
+
+static int
+nv50_msto_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
+ struct nv50_mstm *mstm = mstc->mstm;
+ int bpp = conn_state->connector->display_info.bpc * 3;
+ int slots;
+
+ mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
+
+ slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
+ if (slots < 0)
+ return slots;
+
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ mstc->native);
+}
+
+static void
+nv50_msto_enable(struct drm_encoder *encoder)
+{
+ struct nv50_head *head = nv50_head(encoder->crtc);
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = NULL;
+ struct nv50_mstm *mstm = NULL;
+ struct drm_connector *connector;
+ u8 proto, depth;
+ int slots;
+ bool r;
+
+ drm_for_each_connector(connector, encoder->dev) {
+ if (connector->state->best_encoder == &msto->encoder) {
+ mstc = nv50_mstc(connector);
+ mstm = mstc->mstm;
+ break;
+ }
+ }
+
+ if (WARN_ON(!mstc))
+ return;
+
+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, &slots);
+ WARN_ON(!r);
+
+ if (mstm->outp->dcb->sorconf.link & 1)
+ proto = 0x8;
+ else
+ proto = 0x9;
+
+ switch (mstc->connector.display_info.bpc) {
+ case 6: depth = 0x2; break;
+ case 8: depth = 0x5; break;
+ case 10:
+ default: depth = 0x6; break;
+ }
+
+ mstm->outp->update(mstm->outp, head->base.index,
+ &head->base.base.state->adjusted_mode, proto, depth);
+
+ msto->head = head;
+ msto->mstc = mstc;
+ mstm->modified = true;
+}
+
+static void
+nv50_msto_disable(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ if (mstc->port)
+ drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
+
+ mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
+ mstm->modified = true;
+ msto->disabled = true;
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_msto_help = {
+ .disable = nv50_msto_disable,
+ .enable = nv50_msto_enable,
+ .atomic_check = nv50_msto_atomic_check,
+};
+
+static void
+nv50_msto_destroy(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ drm_encoder_cleanup(&msto->encoder);
+ kfree(msto);
+}
+
+static const struct drm_encoder_funcs
+nv50_msto = {
+ .destroy = nv50_msto_destroy,
+};
+
+static int
+nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
+ struct nv50_msto **pmsto)
+{
+ struct nv50_msto *msto;
+ int ret;
+
+ if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
+ DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+ if (ret) {
+ kfree(*pmsto);
+ *pmsto = NULL;
+ return ret;
+ }
+
+ drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
+ msto->encoder.possible_crtcs = heads;
+ return 0;
+}
+
+static struct drm_encoder *
+nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
+{
+ struct nv50_head *head = nv50_head(connector_state->crtc);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[head->base.index]->encoder;
+ }
+ return NULL;
+}
+
+static struct drm_encoder *
+nv50_mstc_best_encoder(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[0]->encoder;
+ }
+ return NULL;
+}
+
+static enum drm_mode_status
+nv50_mstc_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+nv50_mstc_get_modes(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ int ret = 0;
+
+ mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
+ drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+ if (mstc->edid) {
+ ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
+ drm_edid_to_eld(&mstc->connector, mstc->edid);
+ }
+
+ if (!mstc->connector.display_info.bpc)
+ mstc->connector.display_info.bpc = 8;
+
+ if (mstc->native)
+ drm_mode_destroy(mstc->connector.dev, mstc->native);
+ mstc->native = nouveau_conn_native_mode(&mstc->connector);
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+};
+
+static enum drm_connector_status
+nv50_mstc_detect(struct drm_connector *connector, bool force)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (!mstc->port)
+ return connector_status_disconnected;
+ return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+}
+
+static void
+nv50_mstc_destroy(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ drm_connector_cleanup(&mstc->connector);
+ kfree(mstc);
+}
+
+static const struct drm_connector_funcs
+nv50_mstc = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = nouveau_conn_reset,
+ .detect = nv50_mstc_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .destroy = nv50_mstc_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
+};
+
+static int
+nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
+ const char *path, struct nv50_mstc **pmstc)
+{
+ struct drm_device *dev = mstm->outp->base.base.dev;
+ struct nv50_mstc *mstc;
+ int ret, i;
+
+ if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
+ return -ENOMEM;
+ mstc->mstm = mstm;
+ mstc->port = port;
+
+ ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ kfree(*pmstc);
+ *pmstc = NULL;
+ return ret;
+ }
+
+ drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
+
+ mstc->connector.funcs->reset(&mstc->connector);
+ nouveau_conn_attach_properties(&mstc->connector);
+
+ for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto; i++)
+ drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
+ drm_mode_connector_set_path_property(&mstc->connector, path);
+ return 0;
+}
+
+static void
+nv50_mstm_cleanup(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
+ ret = drm_dp_check_act_status(&mstm->mgr);
+
+ ret = drm_dp_update_payload_part2(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_cleanup(msto);
+ }
+ }
+
+ mstm->modified = false;
+}
+
+static void
+nv50_mstm_prepare(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
+ ret = drm_dp_update_payload_part1(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_prepare(msto);
+ }
+ }
+}
+
+static void
+nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
+}
+
+static void
+nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+
+ drm_connector_unregister(&mstc->connector);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
+ mstc->port = NULL;
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_unreference(&mstc->connector);
+}
+
+static void
+nv50_mstm_register_connector(struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_register(connector);
+}
+
+static struct drm_connector *
+nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, const char *path)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ struct nv50_mstc *mstc;
+ int ret;
+
+ ret = nv50_mstc_new(mstm, port, path, &mstc);
+ if (ret) {
+ if (mstc)
+ mstc->connector.funcs->destroy(&mstc->connector);
+ return NULL;
+ }
+
+ return &mstc->connector;
+}
+
+static const struct drm_dp_mst_topology_cbs
+nv50_mstm = {
+ .add_connector = nv50_mstm_add_connector,
+ .register_connector = nv50_mstm_register_connector,
+ .destroy_connector = nv50_mstm_destroy_connector,
+ .hotplug = nv50_mstm_hotplug,
+};
+
+void
+nv50_mstm_service(struct nv50_mstm *mstm)
+{
+ struct drm_dp_aux *aux = mstm->mgr.aux;
+ bool handled = true;
+ int ret;
+ u8 esi[8] = {};
+
+ while (handled) {
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ if (ret != 8) {
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+ return;
+ }
+
+ drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
+ if (!handled)
+ break;
+
+ drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+ }
+}
+
+void
+nv50_mstm_remove(struct nv50_mstm *mstm)
+{
+ if (mstm)
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+}
+
+static int
+nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
+{
+ struct nouveau_encoder *outp = mstm->outp;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_dp_mst_link_v0 mst;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
+ .base.hasht = outp->dcb->hasht,
+ .base.hashm = outp->dcb->hashm,
+ .mst.state = state,
+ };
+ struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
+ struct nvif_object *disp = &drm->display->disp;
+ int ret;
+
+ if (dpcd >= 0x12) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+ if (ret < 0)
+ return ret;
+
+ dpcd &= ~DP_MST_EN;
+ if (state)
+ dpcd |= DP_MST_EN;
+
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
+ if (ret < 0)
+ return ret;
+ }
+
+ return nvif_mthd(disp, 0, &args, sizeof(args));
+}
+
+int
+nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
+{
+ int ret, state = 0;
+
+ if (!mstm)
+ return 0;
+
+ if (dpcd[0] >= 0x12) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+ if (ret < 0)
+ return ret;
+
+ if (!(dpcd[1] & DP_MST_CAP))
+ dpcd[0] = 0x11;
+ else
+ state = allow;
+ }
+
+ ret = nv50_mstm_enable(mstm, dpcd[0], state);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+ if (ret)
+ return nv50_mstm_enable(mstm, dpcd[0], 0);
+
+ return mstm->mgr.mst_state;
+}
+
+static void
+nv50_mstm_fini(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
+}
+
+static void
+nv50_mstm_init(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+}
+
+static void
+nv50_mstm_del(struct nv50_mstm **pmstm)
+{
+ struct nv50_mstm *mstm = *pmstm;
+ if (mstm) {
+ kfree(*pmstm);
+ *pmstm = NULL;
+ }
+}
+
+static int
+nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
+ int conn_base_id, struct nv50_mstm **pmstm)
+{
+ const int max_payloads = hweight8(outp->dcb->heads);
+ struct drm_device *dev = outp->base.base.dev;
+ struct nv50_mstm *mstm;
+ int ret, i;
+ u8 dpcd;
+
+ /* This is a workaround for some monitors not functioning
+ * correctly in MST mode on initial module load. I think
+ * some bad interaction with the VBIOS may be responsible.
+ *
+ * A good ol' off and on again seems to work here ;)
+ */
+ ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
+ if (ret >= 0 && dpcd >= 0x12)
+ drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
+
+ if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
+ return -ENOMEM;
+ mstm->outp = outp;
+ mstm->mgr.cbs = &nv50_mstm;
+
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev->dev, aux, aux_max,
+ max_payloads, conn_base_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < max_payloads; i++) {
+ ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
+ i, &mstm->msto[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/******************************************************************************
@@ -1861,89 +3452,91 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
.base.hashm = nv_encoder->dcb->hashm,
.pwr.state = mode == DRM_MODE_DPMS_ON,
};
- struct {
- struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_dp_pwr_v0 pwr;
- } link = {
- .base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = nv_encoder->dcb->hashm,
- .pwr.state = mode == DRM_MODE_DPMS_ON,
- };
- struct drm_device *dev = encoder->dev;
- struct drm_encoder *partner;
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
+static void
+nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
+ struct drm_display_mode *mode, u8 proto, u8 depth)
+{
+ struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
+ u32 *push;
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- args.pwr.state = 1;
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nvif_mthd(disp->disp, 0, &link, sizeof(link));
+ if (!mode) {
+ nv_encoder->ctrl &= ~BIT(head);
+ if (!(nv_encoder->ctrl & 0x0000000f))
+ nv_encoder->ctrl = 0;
} else {
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ nv_encoder->ctrl |= proto << 8;
+ nv_encoder->ctrl |= BIT(head);
}
-}
-static void
-nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
-{
- struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
- u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
- if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ if ((push = evo_wait(core, 6))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ if (mode) {
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ nv_encoder->ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ nv_encoder->ctrl |= 0x00002000;
+ nv_encoder->ctrl |= depth << 16;
+ }
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
} else {
+ if (mode) {
+ u32 magic = 0x31ec6000 | (head << 25);
+ u32 syncs = 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (head * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ }
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
}
- evo_kick(push, mast);
+ evo_data(push, nv_encoder->ctrl);
+ evo_kick(push, core);
}
}
static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
+nv50_sor_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
nv_encoder->crtc = NULL;
if (nv_crtc) {
- nv50_crtc_prepare(&nv_crtc->base);
- nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
- nv50_audio_disconnect(encoder, nv_crtc);
- nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
- }
-}
+ struct nvkm_i2c_aux *aux = nv_encoder->aux;
+ u8 pwr;
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
+ if (aux) {
+ int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
+ }
+ }
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+ }
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
+nv50_sor_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_lvds_script_v0 lvds;
@@ -1954,13 +3547,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
.base.hashm = nv_encoder->dcb->hashm,
};
struct nv50_disp *disp = nv50_disp(encoder->dev);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
- u32 mask, ctrl;
- u8 owner = 1 << nv_crtc->index;
u8 proto = 0xf;
u8 depth = 0x0;
@@ -1985,7 +3575,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
proto = 0x2;
}
- nv50_hdmi_mode_set(&nv_encoder->base.base, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
proto = 0x0;
@@ -2019,94 +3609,60 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ if (nv_connector->base.display_info.bpc == 6)
depth = 0x2;
- } else
- if (nv_connector->base.display_info.bpc == 8) {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ else
+ if (nv_connector->base.display_info.bpc == 8)
depth = 0x5;
- } else {
- nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ else
depth = 0x6;
- }
if (nv_encoder->dcb->sorconf.link & 1)
proto = 0x8;
else
proto = 0x9;
- nv50_audio_mode_set(encoder, mode);
+
+ nv50_audio_enable(encoder, mode);
break;
default:
BUG_ON(1);
break;
}
- nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
-
- if (nv50_vers(mast) >= GF110_DISP) {
- u32 *push = evo_wait(mast, 3);
- if (push) {
- u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
- u32 syncs = 0x00000001;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs | (depth << 6));
- evo_data(push, magic);
- evo_kick(push, mast);
- }
-
- ctrl = proto << 8;
- mask = 0x00000f00;
- } else {
- ctrl = (depth << 16) | (proto << 8);
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- ctrl |= 0x00001000;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- ctrl |= 0x00002000;
- mask = 0x000f3f00;
- }
-
- nv50_sor_ctrl(nv_encoder, mask | owner, ctrl | owner);
+ nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
}
+static const struct drm_encoder_helper_funcs
+nv50_sor_help = {
+ .dpms = nv50_sor_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_sor_enable,
+ .disable = nv50_sor_disable,
+};
+
static void
nv50_sor_destroy(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
- .dpms = nv50_sor_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_sor_disconnect,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .disable = nv50_sor_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_sor_func = {
+static const struct drm_encoder_funcs
+nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- int type;
+ int type, ret;
switch (dcbe->type) {
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
@@ -2122,7 +3678,16 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->update = nv50_sor_update;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
+ "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_sor_help);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
@@ -2131,6 +3696,15 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &aux->i2c;
nv_encoder->aux = aux;
}
+
+ /*TODO: Use DP Info Table to check for support. */
+ if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
+ nv_connector->base.base.id,
+ &nv_encoder->dp.mstm);
+ if (ret)
+ return ret;
+ }
} else {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
@@ -2138,20 +3712,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &bus->i2c;
}
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
* PIOR
*****************************************************************************/
-
static void
nv50_pior_dpms(struct drm_encoder *encoder, int mode)
{
@@ -2172,30 +3738,48 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode)
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-static bool
-nv50_pior_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_pior_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- if (!nv50_encoder_mode_fixup(encoder, mode, adjusted_mode))
- return false;
- adjusted_mode->clock *= 2;
- return true;
+ int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+ crtc_state->adjusted_mode.clock *= 2;
+ return 0;
}
static void
-nv50_pior_commit(struct drm_encoder *encoder)
+nv50_pior_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0700 + (or * 0x040), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_pior_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u8 owner = 1 << nv_crtc->index;
u8 proto, depth;
u32 *push;
@@ -2218,8 +3802,6 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
break;
}
- nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -2238,29 +3820,13 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_pior_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0700 + (or * 0x040), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
+static const struct drm_encoder_helper_funcs
+nv50_pior_help = {
+ .dpms = nv50_pior_dpms,
+ .atomic_check = nv50_pior_atomic_check,
+ .enable = nv50_pior_enable,
+ .disable = nv50_pior_disable,
+};
static void
nv50_pior_destroy(struct drm_encoder *encoder)
@@ -2269,17 +3835,8 @@ nv50_pior_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
- .dpms = nv50_pior_dpms,
- .mode_fixup = nv50_pior_mode_fixup,
- .prepare = nv50_pior_disconnect,
- .commit = nv50_pior_commit,
- .mode_set = nv50_pior_mode_set,
- .disable = nv50_pior_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_pior_func = {
+static const struct drm_encoder_funcs
+nv50_pior_func = {
.destroy = nv50_pior_destroy,
};
@@ -2321,149 +3878,464 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
+ "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
- * Framebuffer
+ * Atomic
*****************************************************************************/
static void
-nv50_fbdma_fini(struct nv50_fbdma *fbdma)
+nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
- nvif_object_fini(&fbdma->base[i]);
- nvif_object_fini(&fbdma->core);
- list_del(&fbdma->head);
- kfree(fbdma);
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_dmac *core = &disp->mast.base;
+ struct nv50_mstm *mstm;
+ struct drm_encoder *encoder;
+ u32 *push;
+
+ NV_ATOMIC(drm, "commit core %08x\n", interlock);
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_prepare(mstm);
+ }
+ }
+
+ if ((push = evo_wait(core, 5))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, interlock);
+ evo_data(push, 0x00000000);
+ nouveau_bo_wr32(disp->sync, 0, 0x00000000);
+ evo_kick(push, core);
+ if (nvif_msec(&drm->device, 2000ULL,
+ if (nouveau_bo_rd32(disp->sync, 0))
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ NV_ERROR(drm, "EVO timeout\n");
+ }
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_cleanup(mstm);
+ }
+ }
}
-static int
-nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
+static void
+nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- struct __attribute__ ((packed)) {
- struct nv_dma_v0 base;
- union {
- struct nv50_dma_v0 nv50;
- struct gf100_dma_v0 gf100;
- struct gf119_dma_v0 gf119;
- };
- } args = {};
- struct nv50_fbdma *fbdma;
- struct drm_crtc *crtc;
- u32 size = sizeof(args.base);
- int ret;
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+ u32 interlock_core = 0;
+ u32 interlock_chan = 0;
+ int i;
+
+ NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- if (fbdma->core.handle == name)
- return 0;
+ if (atom->lock_core)
+ mutex_lock(&disp->mutex);
+
+ /* Disable head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
+ asyh->clr.mask, asyh->set.mask);
+
+ if (asyh->clr.mask) {
+ nv50_head_flush_clr(head, asyh, atom->flush_disable);
+ interlock_core |= 1;
+ }
}
- fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
- if (!fbdma)
- return -ENOMEM;
- list_add(&fbdma->head, &disp->fbdma);
+ /* Disable plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
- args.base.target = NV_DMA_V0_TARGET_VRAM;
- args.base.access = NV_DMA_V0_ACCESS_RDWR;
- args.base.start = offset;
- args.base.limit = offset + length - 1;
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
+ asyw->clr.mask, asyw->set.mask);
+ if (!asyw->clr.mask)
+ continue;
- if (drm->device.info.chipset < 0x80) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xc0) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- args.nv50.kind = kind;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xd0) {
- args.gf100.kind = kind;
- size += sizeof(args.gf100);
- } else {
- args.gf119.page = GF119_DMA_V0_PAGE_LP;
- args.gf119.kind = kind;
- size += sizeof(args.gf119);
+ interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
+ atom->flush_disable,
+ asyw);
+ }
+
+ /* Disable output path(s). */
+ list_for_each_entry(outp, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
+ outp->clr.mask, outp->set.mask);
+
+ if (outp->clr.mask) {
+ help->disable(encoder);
+ interlock_core |= 1;
+ if (outp->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
+ }
+ }
+
+ /* Flush disable. */
+ if (interlock_core) {
+ if (atom->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Update output path(s). */
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
+ outp->set.mask, outp->clr.mask);
+
+ if (outp->set.mask) {
+ help->enable(encoder);
+ interlock_core = 1;
+ }
+
+ list_del(&outp->head);
+ kfree(outp);
+ }
+
+ /* Update head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
struct nv50_head *head = nv50_head(crtc);
- int ret = nvif_object_init(&head->sync.base.base.user, name,
- NV_DMA_IN_MEMORY, &args, size,
- &fbdma->base[head->base.index]);
- if (ret) {
- nv50_fbdma_fini(fbdma);
- return ret;
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set(head, asyh);
+ interlock_core = 1;
}
}
- ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
- &args, size, &fbdma->core);
- if (ret) {
- nv50_fbdma_fini(fbdma);
+ /* Update plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
+ asyw->set.mask, asyw->clr.mask);
+ if ( !asyw->set.mask &&
+ (!asyw->clr.mask || atom->flush_disable))
+ continue;
+
+ interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
+ }
+
+ /* Flush update. */
+ if (interlock_core) {
+ if (!interlock_chan && atom->state.legacy_cursor_update) {
+ u32 *push = evo_wait(&disp->mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &disp->mast);
+ }
+ } else {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ }
+ }
+
+ if (atom->lock_core)
+ mutex_unlock(&disp->mutex);
+
+ /* Wait for HW to signal completion. */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ int ret = nv50_wndw_wait_armed(wndw, asyw);
+ if (ret)
+ NV_ERROR(drm, "%s: timeout\n", plane->name);
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc->state->event) {
+ unsigned long flags;
+ /* Get correct count/ts if racing with vblank irq */
+ drm_accurate_vblank_count(crtc);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+ }
+
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+}
+
+static void
+nv50_disp_atomic_commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, typeof(*state), commit_work);
+ nv50_disp_atomic_commit_tail(state);
+}
+
+static int
+nv50_disp_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool nonblock)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ bool active = false;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ goto done;
+
+ INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ goto done;
+
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret)
+ goto done;
+ }
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (asyw->set.image) {
+ asyw->ntfy.handle = wndw->dmac->sync.handle;
+ asyw->ntfy.offset = wndw->ntfy;
+ asyw->ntfy.awaken = false;
+ asyw->set.ntfy = true;
+ nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
+ wndw->ntfy ^= 0x10;
+ }
+ }
+
+ drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
+
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ nv50_disp_atomic_commit_tail(state);
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->state->enable) {
+ if (!drm->have_disp_power_ref) {
+ drm->have_disp_power_ref = true;
+ return ret;
+ }
+ active = true;
+ break;
+ }
+ }
+
+ if (!active && drm->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ drm->have_disp_power_ref = false;
+ }
+
+done:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
+static struct nv50_outp_atom *
+nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
+{
+ struct nv50_outp_atom *outp;
+
+ list_for_each_entry(outp, &atom->outp, head) {
+ if (outp->encoder == encoder)
+ return outp;
+ }
+
+ outp = kzalloc(sizeof(*outp), GFP_KERNEL);
+ if (!outp)
+ return ERR_PTR(-ENOMEM);
+
+ list_add(&outp->head, &atom->outp);
+ outp->encoder = encoder;
+ return outp;
+}
+
+static int
+nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = connector->state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc->state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ outp->flush_disable = true;
+ atom->flush_disable = true;
+ }
+ outp->clr.ctrl = true;
+ atom->lock_core = true;
}
return 0;
}
-static void
-nv50_fb_dtor(struct drm_framebuffer *fb)
+static int
+nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
+ struct drm_connector_state *connector_state)
{
+ struct drm_encoder *encoder = connector_state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector_state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ outp->set.ctrl = true;
+ atom->lock_core = true;
+ }
+
+ return 0;
}
static int
-nv50_fb_ctor(struct drm_framebuffer *fb)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_drm *drm = nouveau_drm(fb->dev);
- struct nouveau_bo *nvbo = nv_fb->nvbo;
- struct nv50_disp *disp = nv50_disp(fb->dev);
- u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
- u8 tile = nvbo->tile_mode;
-
- if (drm->device.info.chipset >= 0xc0)
- tile >>= 4; /* yep.. */
-
- switch (fb->depth) {
- case 8: nv_fb->r_format = 0x1e00; break;
- case 15: nv_fb->r_format = 0xe900; break;
- case 16: nv_fb->r_format = 0xe800; break;
- case 24:
- case 32: nv_fb->r_format = 0xcf00; break;
- case 30: nv_fb->r_format = 0xd100; break;
- default:
- NV_ERROR(drm, "unknown depth %d\n", fb->depth);
- return -EINVAL;
+nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ ret = nv50_disp_outp_atomic_check_clr(atom, connector);
+ if (ret)
+ return ret;
+
+ ret = nv50_disp_outp_atomic_check_set(atom, connector_state);
+ if (ret)
+ return ret;
}
- if (disp->disp->oclass < G82_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- nv_fb->r_format |= kind << 16;
- } else
- if (disp->disp->oclass < GF110_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- } else {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x01000000);
+ return 0;
+}
+
+static void
+nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ list_del(&outp->head);
+ kfree(outp);
}
- nv_fb->r_handle = 0xffff0000 | kind;
- return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
- drm->device.info.ram_user, kind);
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+nv50_disp_atomic_state_free(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ drm_atomic_state_default_release(&atom->state);
+ kfree(atom);
}
+static struct drm_atomic_state *
+nv50_disp_atomic_state_alloc(struct drm_device *dev)
+{
+ struct nv50_atom *atom;
+ if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
+ drm_atomic_state_init(dev, &atom->state) < 0) {
+ kfree(atom);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&atom->outp);
+ return &atom->state;
+}
+
+static const struct drm_mode_config_funcs
+nv50_disp_func = {
+ .fb_create = nouveau_user_framebuffer_create,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .atomic_check = nv50_disp_atomic_check,
+ .atomic_commit = nv50_disp_atomic_commit,
+ .atomic_state_alloc = nv50_disp_atomic_state_alloc,
+ .atomic_state_clear = nv50_disp_atomic_state_clear,
+ .atomic_state_free = nv50_disp_atomic_state_free,
+};
+
/******************************************************************************
* Init
*****************************************************************************/
@@ -2471,12 +4343,30 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
void
nv50_display_fini(struct drm_device *dev)
{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_fini(wndw);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_fini(nv_encoder->dp.mstm);
+ }
+ }
}
int
nv50_display_init(struct drm_device *dev)
{
- struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
struct drm_crtc *crtc;
u32 *push;
@@ -2484,16 +4374,35 @@ nv50_display_init(struct drm_device *dev)
if (!push)
return -EBUSY;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nv50_sync *sync = nv50_sync(crtc);
-
- nv50_crtc_lut_load(crtc);
- nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
- }
-
evo_mthd(push, 0x0088, 1);
evo_data(push, nv50_mast(dev)->base.sync.handle);
evo_kick(push, nv50_mast(dev));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ const struct drm_encoder_helper_funcs *help;
+ struct nouveau_encoder *nv_encoder;
+
+ nv_encoder = nouveau_encoder(encoder);
+ help = encoder->helper_private;
+ if (help && help->dpms)
+ help->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ nv50_mstm_init(nv_encoder->dp.mstm);
+ }
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ nv50_head_lut_load(crtc);
+ }
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_init(wndw);
+ }
+
return 0;
}
@@ -2501,11 +4410,6 @@ void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_fbdma *fbdma, *fbtmp;
-
- list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
- nv50_fbdma_fini(fbdma);
- }
nv50_dmac_destroy(&disp->mast.base, disp->disp);
@@ -2518,6 +4422,10 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
int
nv50_display_create(struct drm_device *dev)
{
@@ -2532,15 +4440,17 @@ nv50_display_create(struct drm_device *dev)
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
- INIT_LIST_HEAD(&disp->fbdma);
+
+ mutex_init(&disp->mutex);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
- nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
- nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
disp->disp = &nouveau_display(dev)->disp;
+ dev->mode_config.funcs = &nv50_disp_func;
+ if (nouveau_atomic)
+ dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2572,7 +4482,7 @@ nv50_display_create(struct drm_device *dev)
crtcs = 2;
for (i = 0; i < crtcs; i++) {
- ret = nv50_crtc_create(dev, i);
+ ret = nv50_head_create(dev, i);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 70da347aa8c5..918187cee84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,11 +35,4 @@ int nv50_display_create(struct drm_device *);
void nv50_display_destroy(struct drm_device *);
int nv50_display_init(struct drm_device *);
void nv50_display_fini(struct drm_device *);
-
-void nv50_display_flip_stop(struct drm_crtc *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *, u32 swap_interval);
-
-struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index af3d3c49411a..327dcd7901ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -30,7 +30,7 @@ int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
- struct drm_device *dev = nfbdev->dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 4d6f202b7770..f68c7054fd53 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,13 +35,12 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -60,23 +59,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->sema);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- u32 start = bo->bo.mem.start * PAGE_SIZE;
- u32 limit = start + bo->bo.mem.size - 1;
-
- ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
- NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
- .target = NV_DMA_V0_TARGET_VRAM,
- .access = NV_DMA_V0_ACCESS_RDWR,
- .start = start,
- .limit = limit,
- }, sizeof(struct nv_dma_v0),
- &fctx->head[i]);
- }
-
if (ret)
nv10_fence_context_del(chan);
return ret;
@@ -97,7 +79,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 127;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d8e6d6..52b87ae83e7b 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -28,13 +28,6 @@
#include "nv50_display.h"
-u64
-nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
-{
- struct nv84_fence_chan *fctx = chan->fence;
- return fctx->dispc_vma[crtc].offset;
-}
-
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
@@ -110,15 +103,8 @@ nv84_fence_read(struct nouveau_channel *chan)
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
- int i;
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
@@ -134,7 +120,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
struct nouveau_cli *cli = (void *)chan->user.client;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -154,12 +140,6 @@ nv84_fence_context_new(struct nouveau_channel *chan)
&fctx->vma_gart);
}
- /* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
- }
-
if (ret)
nv84_fence_context_del(chan);
return ret;
@@ -229,7 +209,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv84_fence_context_del;
priv->base.contexts = fifo->nr;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
/* Use VRAM if there is any ; otherwise fallback to system memory */
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 054b6a056d99..90f27bfa381f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -30,7 +30,7 @@ int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct drm_device *dev = nfbdev->helper.dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 1ee9294eca2e..29c20dfd894d 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -55,7 +55,7 @@ nvif_client_fini(struct nvif_client *client)
}
}
-const struct nvif_driver *
+static const struct nvif_driver *
nvif_drivers[] = {
#ifdef __KERNEL__
&nvif_driver_nvkm,
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index b0787ff833ef..278b3933dc96 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -155,10 +155,8 @@ nvif_notify_fini(struct nvif_notify *notify)
int ret = nvif_notify_put(notify);
if (ret >= 0 && object) {
ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret == 0) {
- notify->object = NULL;
- kfree((void *)notify->data);
- }
+ notify->object = NULL;
+ kfree((void *)notify->data);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 34ecd4a7e0c1..058ff46b5f16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -20,6 +20,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <core/device.h>
+#include <core/firmware.h>
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index a4458a8eb30a..255d81ccf916 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -4,4 +4,4 @@ nvkm-y += nvkm/engine/ce/gk104.o
nvkm-y += nvkm/engine/ce/gm107.o
nvkm-y += nvkm/engine/ce/gm200.o
nvkm-y += nvkm/engine/ce/gp100.o
-nvkm-y += nvkm/engine/ce/gp104.o
+nvkm-y += nvkm/engine/ce/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index 05bb65608dfe..d9ca9636a3e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_ce_data[] = {
+static uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t gf100_ce_data[] = {
0x00000800,
};
-uint32_t gf100_ce_code[] = {
+static uint32_t gf100_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index 972281d10f38..f0a1cf31c7ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_ce_data[] = {
+static uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t gt215_ce_data[] = {
0x00000800,
};
-uint32_t gt215_ce_code[] = {
+static uint32_t gt215_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
index 20e019788a53..985c8f653874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
static const struct nvkm_engine_func
-gp104_ce = {
+gp102_ce = {
.intr = gp100_ce_intr,
.sclass = {
{ -1, -1, PASCAL_DMA_COPY_B },
@@ -37,8 +37,8 @@ gp104_ce = {
};
int
-gp104_ce_new(struct nvkm_device *device, int index,
+gp102_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gp102_ce, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7218a067a6c5..cceda959b47c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1357,7 +1357,7 @@ nvc0_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1394,7 +1394,7 @@ nvc1_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1430,7 +1430,7 @@ nvc3_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1466,7 +1466,7 @@ nvc4_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1503,7 +1503,7 @@ nvc8_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1540,7 +1540,7 @@ nvce_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1577,7 +1577,7 @@ nvcf_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1612,6 +1612,7 @@ nvd7_chipset = {
.pci = gf106_pci_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1647,7 +1648,7 @@ nvd9_chipset = {
.pmu = gf119_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1851,7 +1852,7 @@ nvf1_chipset = {
.fb = gk104_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1965,7 +1966,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1999,7 +2000,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2130,7 +2131,7 @@ nv12b_chipset = {
.bar = gk20a_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
- .fb = gk20a_fb_new,
+ .fb = gm20b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gk20a_ibus_new,
.imem = gk20a_instmem_new,
@@ -2166,6 +2167,7 @@ nv130_chipset = {
.mmu = gf100_mmu_new,
.secboot = gm200_secboot_new,
.pci = gp100_pci_new,
+ .pmu = gp100_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[0] = gp100_ce_new,
@@ -2182,13 +2184,71 @@ nv130_chipset = {
};
static const struct nvkm_device_chip
+nv132_chipset = {
+ .name = "GP102",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
.bar = gf100_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
- .fb = gp104_fb_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
+static const struct nvkm_device_chip
+nv136_chipset = {
+ .name = "GP106",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
@@ -2198,13 +2258,14 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[0] = gp104_ce_new,
- .ce[1] = gp104_ce_new,
- .ce[2] = gp104_ce_new,
- .ce[3] = gp104_ce_new,
- .disp = gp104_disp_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
};
@@ -2643,7 +2704,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
case 0x130: device->chip = &nv130_chipset; break;
+ case 0x132: device->chip = &nv132_chipset; break;
case 0x134: device->chip = &nv134_chipset; break;
+ case 0x136: device->chip = &nv136_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..74a1ffa425f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1665,14 +1665,31 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
*pdevice = &pdev->device;
pdev->pdev = pci_dev;
- return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
- pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
- pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
- NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
- (u64)pci_domain_nr(pci_dev->bus) << 32 |
- pci_dev->bus->number << 16 |
- PCI_SLOT(pci_dev->devfn) << 8 |
- PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+ pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+ pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+ NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+ (u64)pci_domain_nr(pci_dev->bus) << 32 |
+ pci_dev->bus->number << 16 |
+ PCI_SLOT(pci_dev->devfn) << 8 |
+ PCI_FUNC(pci_dev->devfn), name,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &pdev->device);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Set a preliminary DMA mask based on the .dma_bits member of the
+ * MMU subdevice. This allows other subdevices to create DMA mappings
+ * in their init() or oneinit() methods, which may be called before the
+ * TTM layer sets the DMA mask definitively.
+ * This is necessary for platforms where the default DMA mask of 32
+ * does not cover any system memory, i.e., when all RAM is > 4 GB.
+ */
+ if (pdev->device.mmu)
+ dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9b638bd905ff..f2bc0b7d9b93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -102,7 +102,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
if (iommu_present(&platform_bus_type)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
- if (IS_ERR(tdev->iommu.domain))
+ if (!tdev->iommu.domain)
goto error;
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 79a8f71cf788..513ee6b79553 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -326,7 +326,7 @@ nvkm_udevice = {
.sclass = nvkm_udevice_child_get,
};
-int
+static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 77a52b54a31e..fa05d16ae948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -11,7 +11,7 @@ nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
nvkm-y += nvkm/engine/disp/gm200.o
nvkm-y += nvkm/engine/disp/gp100.o
-nvkm-y += nvkm/engine/disp/gp104.o
+nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/outpdp.o
@@ -48,14 +48,14 @@ nvkm-y += nvkm/engine/disp/rootgk110.o
nvkm-y += nvkm/engine/disp/rootgm107.o
nvkm-y += nvkm/engine/disp/rootgm200.o
nvkm-y += nvkm/engine/disp/rootgp100.o
-nvkm-y += nvkm/engine/disp/rootgp104.o
+nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/dmacnv50.o
nvkm-y += nvkm/engine/disp/dmacgf119.o
-nvkm-y += nvkm/engine/disp/dmacgp104.o
+nvkm-y += nvkm/engine/disp/dmacgp102.o
nvkm-y += nvkm/engine/disp/basenv50.o
nvkm-y += nvkm/engine/disp/baseg84.o
@@ -64,7 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
nvkm-y += nvkm/engine/disp/basegf119.o
nvkm-y += nvkm/engine/disp/basegk104.o
nvkm-y += nvkm/engine/disp/basegk110.o
-nvkm-y += nvkm/engine/disp/basegp104.o
+nvkm-y += nvkm/engine/disp/basegp102.o
nvkm-y += nvkm/engine/disp/corenv50.o
nvkm-y += nvkm/engine/disp/coreg84.o
@@ -77,7 +77,7 @@ nvkm-y += nvkm/engine/disp/coregk110.o
nvkm-y += nvkm/engine/disp/coregm107.o
nvkm-y += nvkm/engine/disp/coregm200.o
nvkm-y += nvkm/engine/disp/coregp100.o
-nvkm-y += nvkm/engine/disp/coregp104.o
+nvkm-y += nvkm/engine/disp/coregp102.o
nvkm-y += nvkm/engine/disp/ovlynv50.o
nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -85,7 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
nvkm-y += nvkm/engine/disp/ovlygt215.o
nvkm-y += nvkm/engine/disp/ovlygf119.o
nvkm-y += nvkm/engine/disp/ovlygk104.o
-nvkm-y += nvkm/engine/disp/ovlygp104.o
+nvkm-y += nvkm/engine/disp/ovlygp102.o
nvkm-y += nvkm/engine/disp/piocnv50.o
nvkm-y += nvkm/engine/disp/piocgf119.o
@@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o
nvkm-y += nvkm/engine/disp/cursgt215.o
nvkm-y += nvkm/engine/disp/cursgf119.o
nvkm-y += nvkm/engine/disp/cursgk104.o
+nvkm-y += nvkm/engine/disp/cursgp102.o
nvkm-y += nvkm/engine/disp/oimmnv50.o
nvkm-y += nvkm/engine/disp/oimmg84.o
nvkm-y += nvkm/engine/disp/oimmgt215.o
nvkm-y += nvkm/engine/disp/oimmgf119.o
nvkm-y += nvkm/engine/disp/oimmgk104.o
+nvkm-y += nvkm/engine/disp/oimmgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
index 51688e37c54e..8a3cdeef8d2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
@@ -27,12 +27,12 @@
#include <nvif/class.h>
const struct nv50_disp_dmac_oclass
-gp104_disp_base_oclass = {
+gp102_disp_base_oclass = {
.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_base_new,
- .func = &gp104_disp_dmac_func,
+ .func = &gp102_disp_dmac_func,
.mthd = &gf119_disp_base_chan_mthd,
.chid = 1,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index dd2953bc9264..524a24eae1a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (mthd->addr) {
snprintf(cname_, sizeof(cname_), "%s %d",
- mthd->name, chan->chid);
+ mthd->name, chan->chid.user);
cname = cname_;
}
@@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
notify->size = sizeof(struct nvif_notify_uevent_rep);
notify->types = 1;
- notify->index = chan->chid;
+ notify->index = chan->chid.user;
return 0;
}
@@ -153,27 +153,27 @@ nv50_disp_chan_uevent = {
.fini = nv50_disp_chan_uevent_fini,
};
-int
+static int
nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
- *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+ *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
return 0;
}
-int
+static int
nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
- nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+ nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
return 0;
}
-int
+static int
nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
struct nvkm_event **pevent)
{
@@ -189,14 +189,14 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
return -EINVAL;
}
-int
+static int
nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
*addr = device->func->resource_addr(device, 0) +
- 0x640000 + (chan->chid * 0x1000);
+ 0x640000 + (chan->chid.user * 0x1000);
*size = 0x001000;
return 0;
}
@@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
- if (chan->chid >= 0)
- disp->chan[chan->chid] = NULL;
+ if (chan->chid.user >= 0)
+ disp->chan[chan->chid.user] = NULL;
return chan->func->dtor ? chan->func->dtor(chan) : chan;
}
@@ -263,7 +263,7 @@ nv50_disp_chan = {
int
nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid, int head,
+ struct nv50_disp_root *root, int ctrl, int user, int head,
const struct nvkm_oclass *oclass,
struct nv50_disp_chan *chan)
{
@@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
chan->func = func;
chan->mthd = mthd;
chan->root = root;
- chan->chid = chid;
+ chan->chid.ctrl = ctrl;
+ chan->chid.user = user;
chan->head = head;
- if (disp->chan[chan->chid]) {
- chan->chid = -1;
+ if (disp->chan[chan->chid.user]) {
+ chan->chid.user = -1;
return -EBUSY;
}
- disp->chan[chan->chid] = chan;
+ disp->chan[chan->chid.user] = chan;
return 0;
}
int
nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid, int head,
+ struct nv50_disp_root *root, int ctrl, int user, int head,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
@@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
return -ENOMEM;
*pobject = &chan->object;
- return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+ return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
+ head, oclass, chan);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index f5f683d9fd20..737b38f6fbd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -7,7 +7,11 @@ struct nv50_disp_chan {
const struct nv50_disp_chan_func *func;
const struct nv50_disp_chan_mthd *mthd;
struct nv50_disp_root *root;
- int chid;
+
+ struct {
+ int ctrl;
+ int user;
+ } chid;
int head;
struct nvkm_object object;
@@ -25,11 +29,11 @@ struct nv50_disp_chan_func {
int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid, int head,
+ struct nv50_disp_root *, int ctrl, int user, int head,
const struct nvkm_oclass *, struct nv50_disp_chan *);
int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid, int head,
+ struct nv50_disp_root *, int ctrl, int user, int head,
const struct nvkm_oclass *, struct nvkm_object **);
extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
@@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
struct nv50_disp_pioc_oclass {
int (*ctor)(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
struct nvkm_sclass base;
const struct nv50_disp_chan_func *func;
const struct nv50_disp_chan_mthd *mthd;
- int chid;
+ struct {
+ int ctrl;
+ int user;
+ } chid;
};
extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
@@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
index 019379a3a01c..c65c9f3ff69f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-const struct nv50_disp_mthd_list
+static const struct nv50_disp_mthd_list
g94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
@@ -43,8 +43,8 @@ g94_disp_core_chan_mthd = {
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &g84_disp_core_mthd_dac },
- { "SOR", 4, &g94_disp_core_mthd_sor },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
+ { "SOR", 4, &g94_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
{ "HEAD", 2, &g84_disp_core_mthd_head },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index 6922f4007b61..b0df4b752b8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -29,7 +29,7 @@
#include <nvif/class.h>
static int
-gp104_disp_core_init(struct nv50_disp_dmac *chan)
+gp102_disp_core_init(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -59,20 +59,20 @@ gp104_disp_core_init(struct nv50_disp_dmac *chan)
return 0;
}
-const struct nv50_disp_dmac_func
-gp104_disp_core_func = {
- .init = gp104_disp_core_init,
+static const struct nv50_disp_dmac_func
+gp102_disp_core_func = {
+ .init = gp102_disp_core_init,
.fini = gf119_disp_core_fini,
.bind = gf119_disp_dmac_bind,
};
const struct nv50_disp_dmac_oclass
-gp104_disp_core_oclass = {
- .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+gp102_disp_core_oclass = {
+ .base.oclass = GP102_DISP_CORE_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_core_new,
- .func = &gp104_disp_core_func,
+ .func = &gp102_disp_core_func,
.mthd = &gk104_disp_core_chan_mthd,
.chid = 0,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
index dd99fc7060b1..fa781b5a7e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -33,5 +33,5 @@ g84_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index 2a1574e06ad6..2be6fb052c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -33,5 +33,5 @@ gf119_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &gf119_disp_pioc_func,
- .chid = 13,
+ .chid = { 13, 13 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 28e8f06c9472..2a99db4bf8f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -33,5 +33,5 @@ gk104_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &gf119_disp_pioc_func,
- .chid = 13,
+ .chid = { 13, 13 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
new file mode 100644
index 000000000000..e958210d8105
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_curs_oclass = {
+ .base.oclass = GK104_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = { 13, 17 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
index d8a4b9ca139c..00a7f3564450 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -33,5 +33,5 @@ gt215_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index 8b1320499a0f..82ff82d8c1ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -33,7 +33,7 @@
int
nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid,
+ struct nv50_disp_root *root, int ctrl, int user,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
@@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
} else
return ret;
- return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
head, oclass, pobject);
}
@@ -65,5 +65,5 @@ nv50_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index a57f7cef307a..ce7cd74fbd5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->base.root->ramht, object,
- chan->base.chid, -9, handle,
- chan->base.chid << 27 | 0x00000001);
+ chan->base.chid.user, -9, handle,
+ chan->base.chid.user << 27 | 0x00000001);
}
void
@@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* deactivate channel */
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
/* disable error reporting and completion notification */
- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
}
static int
@@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index ad24c2c57696..cdead9500343 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -27,31 +27,32 @@
#include <subdev/timer.h>
static int
-gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
@@ -59,8 +60,8 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
}
const struct nv50_disp_dmac_func
-gp104_disp_dmac_func = {
- .init = gp104_disp_dmac_init,
+gp102_disp_dmac_func = {
+ .init = gp102_disp_dmac_init,
.fini = gf119_disp_dmac_fini,
.bind = gf119_disp_dmac_bind,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 9c6645a357b9..0a1381a84552 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
chan->func = func;
ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
- chid, head, oclass, &chan->base);
+ chid, chid, head, oclass, &chan->base);
if (ret)
return ret;
@@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->base.root->ramht, object,
- chan->base.chid, -10, handle,
- chan->base.chid << 28 |
- chan->base.chid);
+ chan->base.chid.user, -10, handle,
+ chan->base.chid.user << 28 |
+ chan->base.chid.user);
}
static void
@@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* deactivate channel */
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
/* disable error reporting and completion notifications */
- nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+ nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
}
static int
@@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+ nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index 43ac05857853..ea4a0d062e31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -30,7 +30,7 @@ int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
void gf119_disp_core_fini(struct nv50_disp_dmac *);
-extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
+extern const struct nv50_disp_dmac_func gp102_disp_dmac_func;
struct nv50_disp_dmac_oclass {
int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -95,7 +95,7 @@ extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_ovly_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 9688970eca47..4a93ceb850ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -319,9 +319,8 @@ static const struct dp_rates {
};
void
-nvkm_dp_train(struct work_struct *w)
+nvkm_dp_train(struct nvkm_output_dp *outp)
{
- struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
struct nv50_disp *disp = nv50_disp(outp->base.disp);
const struct dp_rates *cfg = nvkm_dp_rates;
struct dp_state _dp = {
@@ -353,9 +352,6 @@ nvkm_dp_train(struct work_struct *w)
}
cfg--;
- /* disable link interrupt handling during link training */
- nvkm_notify_put(&outp->irq);
-
/* ensure sink is not in a low-power state */
if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
@@ -400,9 +396,6 @@ nvkm_dp_train(struct work_struct *w)
dp_link_train_fini(dp);
- /* signal completion and enable link interrupt handling */
OUTP_DBG(&outp->base, "training complete");
atomic_set(&outp->lt.done, 1);
- wake_up(&outp->lt.wait);
- nvkm_notify_get(&outp->irq);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 6e10c5e0ef11..baf1dd9ff975 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -1,6 +1,6 @@
#ifndef __NVKM_DISP_DPORT_H__
#define __NVKM_DISP_DPORT_H__
-#include <core/os.h>
+struct nvkm_output_dp;
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
@@ -77,5 +77,5 @@
#define DPCD_SC00_SET_POWER_D0 0x01
#define DPCD_SC00_SET_POWER_D3 0x03
-void nvkm_dp_train(struct work_struct *);
+void nvkm_dp_train(struct nvkm_output_dp *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 29e84b241cca..7b346ccc38b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -203,17 +203,20 @@ gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
/* see note in nv50_disp_intr_unk20_0() */
if (outp && outp->info.type == DCB_OUTPUT_DP) {
struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
- struct nvbios_init init = {
- .subdev = subdev,
- .bios = subdev->device->bios,
- .outp = &outp->info,
- .crtc = head,
- .offset = outpdp->info.script[4],
- .execute = 1,
- };
+ if (!outpdp->lt.mst) {
+ struct nvbios_init init = {
+ .subdev = subdev,
+ .bios = subdev->device->bios,
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
- nvbios_exec(&init);
- atomic_set(&outpdp->lt.done, 0);
+ nvkm_notify_put(&outpdp->irq);
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
}
}
@@ -314,7 +317,7 @@ gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, pclk, true))
+ if (nvkm_output_dp_train(outp, pclk))
OUTP_ERR(outp, "link not trained before attach");
} else {
if (disp->func->sor.magic)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 3bf3380336e4..f5d613f82709 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -25,7 +25,7 @@
#include "rootnv50.h"
static void
-gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+gp102_disp_intr_error(struct nv50_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
@@ -51,12 +51,12 @@ gp104_disp_intr_error(struct nv50_disp *disp, int chid)
}
static const struct nv50_disp_func
-gp104_disp = {
+gp102_disp = {
.intr = gf119_disp_intr,
- .intr_error = gp104_disp_intr_error,
+ .intr_error = gp102_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
- .root = &gp104_disp_root_oclass,
+ .root = &gp102_disp_root_oclass,
.head.vblank_init = gf119_disp_vblank_init,
.head.vblank_fini = gf119_disp_vblank_fini,
.head.scanoutpos = gf119_disp_root_scanoutpos,
@@ -75,7 +75,7 @@ gp104_disp = {
};
int
-gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+ return gf119_disp_new_(&gp102_disp, device, index, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fbb8c7dc18fd..567466f93cd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -590,6 +590,7 @@ nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
.execute = 1,
};
+ nvkm_notify_put(&outpdp->irq);
nvbios_exec(&init);
atomic_set(&outpdp->lt.done, 0);
}
@@ -779,7 +780,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, datarate / soff, true))
+ if (nvkm_output_dp_train(outp, datarate / soff))
OUTP_ERR(outp, "link not trained before attach");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index 54a4ae8d66c6..5ad5d0f5db05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -33,5 +33,5 @@ g84_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
index c658db54afc5..1f9fd3403f07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &gf119_disp_pioc_func,
- .chid = 9,
+ .chid = { 9, 9 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
index b1fde8c125d6..0c09fe85e952 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &gf119_disp_pioc_func,
- .chid = 9,
+ .chid = { 9, 9 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
new file mode 100644
index 000000000000..abf82365c671
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_oimm_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = { 9, 13 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
index f4e7eb3d1177..1281db28aebd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index 3940b9c966ec..07540f3d32dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -33,7 +33,7 @@
int
nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid,
+ struct nv50_disp_root *root, int ctrl, int user,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
@@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
} else
return ret;
- return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
head, oclass, pobject);
}
@@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 3b7a9e7a1ea8..de36f73b14dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -31,7 +31,7 @@
#include <nvif/event.h>
int
-nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
+nvkm_output_dp_train(struct nvkm_output *base, u32 datarate)
{
struct nvkm_output_dp *outp = nvkm_output_dp(base);
bool retrain = true;
@@ -39,6 +39,8 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
u32 linkrate;
int ret, i;
+ mutex_lock(&outp->mutex);
+
/* check that the link is trained at a high enough rate */
ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
if (ret) {
@@ -88,19 +90,10 @@ done:
outp->dpcd[DPCD_RC02] =
outp->base.info.dpconf.link_nr;
}
- atomic_set(&outp->lt.done, 0);
- schedule_work(&outp->lt.work);
- } else {
- nvkm_notify_get(&outp->irq);
- }
-
- if (wait) {
- if (!wait_event_timeout(outp->lt.wait,
- atomic_read(&outp->lt.done),
- msecs_to_jiffies(2000)))
- ret = -ETIMEDOUT;
+ nvkm_dp_train(outp);
}
+ mutex_unlock(&outp->mutex);
return ret;
}
@@ -118,7 +111,7 @@ nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
sizeof(outp->dpcd))) {
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
return;
}
}
@@ -165,10 +158,10 @@ nvkm_output_dp_irq(struct nvkm_notify *notify)
};
OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
- return NVKM_NOTIFY_DROP;
+ return NVKM_NOTIFY_KEEP;
}
static void
@@ -177,7 +170,6 @@ nvkm_output_dp_fini(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->hpd);
nvkm_notify_put(&outp->irq);
- flush_work(&outp->lt.work);
nvkm_output_dp_enable(outp, false);
}
@@ -187,6 +179,7 @@ nvkm_output_dp_init(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->base.conn->hpd);
nvkm_output_dp_enable(outp, true);
+ nvkm_notify_get(&outp->irq);
nvkm_notify_get(&outp->hpd);
}
@@ -238,11 +231,6 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
outp->version, hdr, cnt, len);
- /* link training */
- INIT_WORK(&outp->lt.work, nvkm_dp_train);
- init_waitqueue_head(&outp->lt.wait);
- atomic_set(&outp->lt.done, 0);
-
/* link maintenance */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
&(struct nvkm_i2c_ntfy_req) {
@@ -257,6 +245,9 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
return ret;
}
+ mutex_init(&outp->mutex);
+ atomic_set(&outp->lt.done, 0);
+
/* hotplug detect, replaces gpio-based mechanism with aux events */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
&(struct nvkm_i2c_ntfy_req) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 4e983f6d7032..3c83a561cd88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -29,10 +29,10 @@ struct nvkm_output_dp {
bool present;
u8 dpcd[16];
+ struct mutex mutex;
struct {
- struct work_struct work;
- wait_queue_head_t wait;
atomic_t done;
+ bool mst;
} lt;
};
@@ -41,9 +41,11 @@ struct nvkm_output_dp_func {
int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
int (*drv_ctl)(struct nvkm_output_dp *, int ln, int vs, int pe, int pc);
+ void (*vcpi)(struct nvkm_output_dp *, int head, u8 start_slot,
+ u8 num_slots, u16 pbn, u16 aligned_pbn);
};
-int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+int nvkm_output_dp_train(struct nvkm_output *, u32 rate);
int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
int index, struct dcb_output *, struct nvkm_i2c_aux *,
@@ -63,6 +65,7 @@ int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
+void gf119_sor_dp_vcpi(struct nvkm_output_dp *, int, u8, u8, u16, u16);
int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
index 97e2dd2d908e..589bd2f12b41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
@@ -27,12 +27,12 @@
#include <nvif/class.h>
const struct nv50_disp_dmac_oclass
-gp104_disp_ovly_oclass = {
+gp102_disp_ovly_oclass = {
.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_ovly_new,
- .func = &gp104_disp_dmac_func,
+ .func = &gp102_disp_dmac_func,
.mthd = &gk104_disp_ovly_chan_mthd,
.chid = 5,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index a625a9876e34..0abaa6431943 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
/* disable error reporting and completion notification */
- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
}
static int
@@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* activate channel */
- nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+ u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 9d2618dacf20..0211e0e8a35f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
}
@@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+ u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 8443e04dc626..37122ca579ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -27,32 +27,32 @@
#include <nvif/class.h>
static const struct nv50_disp_root_func
-gp104_disp_root = {
+gp102_disp_root = {
.init = gf119_disp_root_init,
.fini = gf119_disp_root_fini,
.dmac = {
- &gp104_disp_core_oclass,
- &gp104_disp_base_oclass,
- &gp104_disp_ovly_oclass,
+ &gp102_disp_core_oclass,
+ &gp102_disp_base_oclass,
+ &gp102_disp_ovly_oclass,
},
.pioc = {
- &gk104_disp_oimm_oclass,
- &gk104_disp_curs_oclass,
+ &gp102_disp_oimm_oclass,
+ &gp102_disp_curs_oclass,
},
};
static int
-gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+gp102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
- return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+ return nv50_disp_root_new_(&gp102_disp_root, disp, oclass,
data, size, pobject);
}
const struct nvkm_disp_oclass
-gp104_disp_root_oclass = {
- .base.oclass = GP104_DISP,
+gp102_disp_root_oclass = {
+ .base.oclass = GP102_DISP,
.base.minver = -1,
.base.maxver = -1,
- .ctor = gp104_disp_root_new,
+ .ctor = gp102_disp_root_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 2f9cecd81d04..e70dc6a9ff7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -66,7 +66,7 @@ nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
return 0;
}
-int
+static int
nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
union {
@@ -173,13 +173,56 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return 0;
} else
if (args->v0.state != 0) {
- nvkm_output_dp_train(&outpdp->base, 0, true);
+ nvkm_output_dp_train(&outpdp->base, 0);
return 0;
}
} else
return ret;
}
break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_LINK: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_link_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst link size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst link vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ if (outpdp->lt.mst != !!args->v0.state) {
+ outpdp->lt.mst = !!args->v0.state;
+ atomic_set(&outpdp->lt.done, 0);
+ nvkm_output_dp_train(&outpdp->base, 0);
+ }
+ return 0;
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_vcpi_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst vcpi size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst vcpi vers %d "
+ "slot %02x/%02x pbn %04x/%04x\n",
+ args->v0.version, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ if (!outpdp->func->vcpi)
+ return -ENODEV;
+ outpdp->func->vcpi(outpdp, head, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ return 0;
+ } else
+ return ret;
+ }
+ break;
case NV50_DISP_MTHD_V1_PIOR_PWR:
if (!func->pior.power)
return -ENODEV;
@@ -207,8 +250,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
{
const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
- return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
- oclass, data, size, pobject);
+ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
+ sclass->chid.user, oclass, data, size, pobject);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index ad00f1724b72..b147cf5b3518 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -41,5 +41,5 @@ extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
-extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 1bb9d661e9b3..4510cb6e10a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -45,14 +45,6 @@ static const struct nvkm_output_func
g94_sor_output_func = {
};
-int
-g94_sor_output_new(struct nvkm_disp *disp, int index,
- struct dcb_output *dcbE, struct nvkm_output **poutp)
-{
- return nvkm_output_new_(&g94_sor_output_func, disp,
- index, dcbE, poutp);
-}
-
/*******************************************************************************
* DisplayPort
******************************************************************************/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 49bd5da194e1..6ffdaa65aa77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -56,11 +56,13 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
clksor |= bw << 18;
dpctrl |= ((1 << nr) - 1) << 16;
+ if (outp->lt.mst)
+ dpctrl |= 0x40000000;
if (ef)
dpctrl |= 0x00004000;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
- nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
return 0;
}
@@ -101,12 +103,24 @@ gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
return 0;
}
+void
+gf119_sor_dp_vcpi(struct nvkm_output_dp *outp, int head, u8 slot,
+ u8 slot_nr, u16 pbn, u16 aligned)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 hoff = head * 0x800;
+
+ nvkm_mask(device, 0x616588 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+ nvkm_mask(device, 0x61658c + hoff, 0xffffffff, (aligned << 16) | pbn);
+}
+
static const struct nvkm_output_dp_func
gf119_sor_dp_func = {
.pattern = gf119_sor_dp_pattern,
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 37790b2617c5..4cf8ad4d18ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -43,6 +43,7 @@ gm107_sor_dp_func = {
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index c44fa7ea672a..81b788fa61be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -120,6 +120,7 @@ gm200_sor_dp_func = {
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index aeb3387a3fb0..15a992b3580a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -129,7 +129,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
}
-int
+static int
g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
@@ -170,7 +170,7 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
}
-int
+static int
g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 352a0baec84d..ec68ea9747d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -180,7 +180,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
list_del_init(&chan->head);
chan->killed = true;
- fifo->recover.mask |= 1ULL << engine->subdev.index;
+ if (engine != &fifo->base.engine)
+ fifo->recover.mask |= 1ULL << engine->subdev.index;
schedule_work(&fifo->recover.work);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 103c0afaaa6d..38c0910722c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -743,14 +743,14 @@ gk104_fifo_fault_engine[] = {
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "SCHED" },
- { 0x07, "HOST0" },
- { 0x08, "HOST1" },
- { 0x09, "HOST2" },
- { 0x0a, "HOST3" },
- { 0x0b, "HOST4" },
- { 0x0c, "HOST5" },
- { 0x0d, "HOST6" },
- { 0x0e, "HOST7" },
+ { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
{ 0x0f, "HOSTSR" },
{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index bd1ff877aa06..29c080683b32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -32,14 +32,14 @@ gm107_fifo_fault_engine[] = {
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "SCHED" },
- { 0x07, "HOST0" },
- { 0x08, "HOST1" },
- { 0x09, "HOST2" },
- { 0x0a, "HOST3" },
- { 0x0b, "HOST4" },
- { 0x0c, "HOST5" },
- { 0x0d, "HOST6" },
- { 0x0e, "HOST7" },
+ { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
{ 0x0f, "HOSTSR" },
{ 0x13, "PERF" },
{ 0x17, "PMU" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index eff83f7fb705..b2635aea9f6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -30,17 +30,17 @@ gp100_fifo_fault_engine[] = {
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
- { 0x06, "HOST0" },
- { 0x07, "HOST1" },
- { 0x08, "HOST2" },
- { 0x09, "HOST3" },
- { 0x0a, "HOST4" },
- { 0x0b, "HOST5" },
- { 0x0c, "HOST6" },
- { 0x0d, "HOST7" },
- { 0x0e, "HOST8" },
- { 0x0f, "HOST9" },
- { 0x10, "HOST10" },
+ { 0x06, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x07, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST7", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST8", NULL, NVKM_ENGINE_FIFO },
+ { 0x0f, "HOST9", NULL, NVKM_ENGINE_FIFO },
+ { 0x10, "HOST10", NULL, NVKM_ENGINE_FIFO },
{ 0x13, "PERF" },
{ 0x17, "PMU" },
{ 0x18, "PTP" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index cbc67f262322..12d964260a29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -60,6 +60,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
@@ -67,10 +68,12 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, chan->base.object.client->name);
- ret = -EBUSY;
- if (suspend)
- return ret;
+ ret = -ETIMEDOUT;
}
+ mutex_unlock(&subdev->mutex);
+
+ if (ret && suspend)
+ return ret;
if (offset) {
nvkm_kmap(inst);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index ed4351032ed6..a2df4f3e7763 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -40,7 +40,9 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_client *client = chan->base.object.client;
+ int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
@@ -48,10 +50,10 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, client->name);
- return -EBUSY;
+ ret = -ETIMEDOUT;
}
-
- return 0;
+ mutex_unlock(&subdev->mutex);
+ return ret;
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index c925ade5880e..74a64e3fd59a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -218,7 +218,7 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 6d3c5011e18c..4c4b5ab6e46d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -933,7 +933,7 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gm107_grctx_generate_tpcid(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 1e13278cf306..c8bb9191f9a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -106,6 +106,7 @@
#define CP_SEEK_2 0x00c800ff
#include "ctxnv40.h"
+#include "nv50.h"
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 8cb240b65ec2..12a703fe355d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grgpc_data[] = {
+static uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
/* 0x0004: gpc_mmio_list_tail */
@@ -36,7 +36,7 @@ uint32_t gf100_grgpc_data[] = {
0x00000000,
};
-uint32_t gf100_grgpc_code[] = {
+static uint32_t gf100_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 550d6ba0933b..ffbfc51200f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grgpc_data[] = {
+static uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gf117_grgpc_data[] = {
0x00000000,
};
-uint32_t gf117_grgpc_code[] = {
+static uint32_t gf117_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 271b59d365e5..357f662de571 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grgpc_data[] = {
+static uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk104_grgpc_data[] = {
0x00000000,
};
-uint32_t gk104_grgpc_code[] = {
+static uint32_t gk104_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 73b4a32c5d29..4ffc8212a85c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grgpc_data[] = {
+static uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk110_grgpc_data[] = {
0x00000000,
};
-uint32_t gk110_grgpc_code[] = {
+static uint32_t gk110_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 018169818317..09196206c9bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grgpc_data[] = {
+static uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk208_grgpc_data[] = {
0x00000000,
};
-uint32_t gk208_grgpc_code[] = {
+static uint32_t gk208_grgpc_code[] = {
0x03140ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index eca007f03fa9..6d7d004363d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grgpc_data[] = {
+static uint32_t gm107_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gm107_grgpc_data[] = {
0x00000000,
};
-uint32_t gm107_grgpc_code[] = {
+static uint32_t gm107_grgpc_code[] = {
0x03410ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 8015b40a61d6..7538404b8b13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grhub_data[] = {
+static uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf100_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf100_grhub_code[] = {
+static uint32_t gf100_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 2af90ec6852a..ce000a47ec6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grhub_data[] = {
+static uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf117_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf117_grhub_code[] = {
+static uint32_t gf117_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index e8b8c1c94700..1f26cb6a233c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grhub_data[] = {
+static uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk104_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk104_grhub_code[] = {
+static uint32_t gk104_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index f4ed2fb6f714..70436d93efe3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grhub_data[] = {
+static uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk110_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk110_grhub_code[] = {
+static uint32_t gk110_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index ed488973c117..e0933a07426a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grhub_data[] = {
+static uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk208_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk208_grhub_code[] = {
+static uint32_t gk208_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 5c9051839557..9b432823bcbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grhub_data[] = {
+static uint32_t gm107_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gm107_grhub_data[] = {
0x0417e91c,
};
-uint32_t gm107_grhub_code[] = {
+static uint32_t gm107_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 157919c788e6..f65a5b0a1a4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1041,6 +1041,13 @@ gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
stat &= ~0x00000008;
}
+ if (stat & 0x00000010) {
+ u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0430));
+ nvkm_error(subdev, "GPC%d/TPC%d/MPC: %08x\n", gpc, tpc, trap);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0430), 0xc0000000);
+ stat &= ~0x00000010;
+ }
+
if (stat) {
nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
}
@@ -1258,7 +1265,7 @@ gf100_gr_ctxctl_isr(struct gf100_gr *gr)
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x409c18);
- if (stat & 0x00000001) {
+ if (!gr->firmware && (stat & 0x00000001)) {
u32 code = nvkm_rd32(device, 0x409814);
if (code == E_BAD_FWMTHD) {
u32 class = nvkm_rd32(device, 0x409808);
@@ -1270,15 +1277,14 @@ gf100_gr_ctxctl_isr(struct gf100_gr *gr)
nvkm_error(subdev, "FECS MTHD subc %d class %04x "
"mthd %04x data %08x\n",
subc, class, mthd, data);
-
- nvkm_wr32(device, 0x409c20, 0x00000001);
- stat &= ~0x00000001;
} else {
nvkm_error(subdev, "FECS ucode error %d\n", code);
}
+ nvkm_wr32(device, 0x409c20, 0x00000001);
+ stat &= ~0x00000001;
}
- if (stat & 0x00080000) {
+ if (!gr->firmware && (stat & 0x00080000)) {
nvkm_error(subdev, "FECS watchdog timeout\n");
gf100_gr_ctxctl_debug(gr);
nvkm_wr32(device, 0x409c20, 0x00080000);
@@ -1384,7 +1390,7 @@ gf100_gr_intr(struct nvkm_gr *base)
nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
-void
+static void
gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
{
@@ -1701,7 +1707,7 @@ gf100_gr_oneinit(struct nvkm_gr *base)
return 0;
}
-int
+static int
gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
@@ -1756,6 +1762,50 @@ gf100_gr_ = {
};
int
+gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
+ struct gf100_gr_fuc *fuc, int ret)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char f[32];
+
+ /* see if this firmware has a legacy path */
+ if (!strcmp(fwname, "fecs_inst"))
+ fwname = "fuc409c";
+ else if (!strcmp(fwname, "fecs_data"))
+ fwname = "fuc409d";
+ else if (!strcmp(fwname, "gpccs_inst"))
+ fwname = "fuc41ac";
+ else if (!strcmp(fwname, "gpccs_data"))
+ fwname = "fuc41ad";
+ else {
+ /* nope, let's just return the error we got */
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+
+ /* yes, try to load from the legacy path */
+ nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+ }
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct gf100_gr_fuc *fuc)
{
@@ -1765,10 +1815,8 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
int ret;
ret = nvkm_firmware_get(device, fwname, &fw);
- if (ret) {
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
+ if (ret)
+ return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 70335f65c51e..0124e468086e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -102,7 +102,7 @@ gf117_gr_pack_mmio[] = {
#include "fuc/hubgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_fecs_ucode = {
.code.data = gf117_grhub_code,
.code.size = sizeof(gf117_grhub_code),
@@ -112,7 +112,7 @@ gf117_gr_fecs_ucode = {
#include "fuc/gpcgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_gpccs_ucode = {
.code.data = gf117_grgpc_code,
.code.size = sizeof(gf117_grgpc_code),
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 45f965f608a7..2c67fac576d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -308,7 +308,7 @@ gm107_gr_init_bios(struct gf100_gr *gr)
}
}
-int
+static int
gm107_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index f1e15a4d4f64..b4e3c50badc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -187,6 +187,7 @@ nv30_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 300f5ed5de0b..e7ed04b935cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -123,6 +123,7 @@ nv34_gr = {
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
{}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 740df0f52c38..5e8abacbacc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -124,6 +124,7 @@ nv35_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 8616636ad7b4..dde89a4a0f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -71,7 +71,7 @@ nvkm_perfdom_find(struct nvkm_pm *pm, int di)
return NULL;
}
-struct nvkm_perfsig *
+static struct nvkm_perfsig *
nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
{
struct nvkm_perfdom *dom = *pdom;
@@ -699,7 +699,7 @@ nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
return 1;
}
-int
+static int
nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
const struct nvkm_specsrc *spec)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index d2901e9a7808..fe2532ee4145 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -102,7 +102,7 @@ gf100_pm_gpc[] = {
{}
};
-const struct nvkm_specdom
+static const struct nvkm_specdom
gf100_pm_part[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index eca62221f299..4b57f8814560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_sec_data[] = {
+static uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
0x00000000,
@@ -150,7 +150,7 @@ uint32_t g98_sec_data[] = {
0x00000000,
};
-uint32_t g98_sec_code[] = {
+static uint32_t g98_sec_code[] = {
0x17f004bd,
0x0010fe35,
0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 370dcd8ff7b5..6eff637ac301 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -84,7 +84,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0100000000ULL;
limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
if (ret)
return ret;
@@ -117,7 +117,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0000000000ULL;
limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index dbcb0ef21587..be57220a2e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/subdev/bios/timing.o
nvkm-y += nvkm/subdev/bios/therm.o
nvkm-y += nvkm/subdev/bios/vmap.o
nvkm-y += nvkm/subdev/bios/volt.o
+nvkm-y += nvkm/subdev/bios/vpstate.o
nvkm-y += nvkm/subdev/bios/xpio.o
nvkm-y += nvkm/subdev/bios/M0203.o
nvkm-y += nvkm/subdev/bios/M0205.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 3756ec91a88d..eaf74eb72983 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -25,16 +25,16 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/boost.h>
-u16
+u32
nvbios_boostTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 boost = 0x0000;
+ u32 boost = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- boost = nvbios_rd16(bios, bit_P.offset + 0x30);
+ boost = nvbios_rd32(bios, bit_P.offset + 0x30);
if (boost) {
*ver = nvbios_rd08(bios, boost + 0);
@@ -52,15 +52,15 @@ nvbios_boostTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (data && idx < *cnt) {
data = data + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -68,14 +68,14 @@ nvbios_boostEe(struct nvkm_bios *bios, int idx,
*len = ssz;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
- u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
+ u32 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
@@ -85,7 +85,7 @@ nvbios_boostEp(struct nvkm_bios *bios, int idx,
return data;
}
-u16
+u32
nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
@@ -97,21 +97,21 @@ nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
return data;
}
-u16
+u32
nvbios_boostSe(struct nvkm_bios *bios, int idx,
- u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
+ u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
{
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostSp(struct nvkm_bios *bios, int idx,
- u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
+ u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
struct nvbios_boostS *info)
{
data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 32e01624a162..5063382d8a6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -25,16 +25,16 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/cstep.h>
-u16
+u32
nvbios_cstepTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
{
struct bit_entry bit_P;
- u16 cstep = 0x0000;
+ u32 cstep = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
+ cstep = nvbios_rd32(bios, bit_P.offset + 0x34);
if (cstep) {
*ver = nvbios_rd08(bios, cstep + 0);
@@ -52,27 +52,27 @@ nvbios_cstepTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
- u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
- u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
+ u32 data = nvbios_cstepEe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
@@ -81,7 +81,7 @@ nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
return data;
}
-u16
+u32
nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
@@ -93,24 +93,24 @@ nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
return data;
}
-u16
+u32
nvbios_cstepXe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
- u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < xnr) {
data = data + *hdr + (cnt * len) + (idx * xsz);
*hdr = xsz;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *info)
{
- u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
+ u32 data = nvbios_cstepXe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->freq = nvbios_rd16(bios, data + 0x00) * 1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index d89e78c4e689..972370ed36f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -207,8 +207,11 @@ nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
if (*ver >= 0x30) {
const u8 vsoff[] = { 0, 4, 7, 9 };
idx = (pc * 10) + vsoff[vs] + pe;
- if (*ver >= 0x40 && *hdr >= 0x12)
+ if (*ver >= 0x40 && *ver <= 0x41 && *hdr >= 0x12)
idx += nvbios_rd08(bios, outp + 0x11) * 40;
+ else
+ if (*ver >= 0x42)
+ idx += nvbios_rd08(bios, outp + 0x11) * 10;
} else {
while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
ver, hdr, cnt, len))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 80fed7e78dcb..456f9ea920dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -25,15 +25,15 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/fan.h>
-u16
+static u32
nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 fan = 0x0000;
+ u32 fan = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x5a)
- fan = nvbios_rd16(bios, bit_P.offset + 0x58);
+ fan = nvbios_rd32(bios, bit_P.offset + 0x58);
if (fan) {
*ver = nvbios_rd08(bios, fan + 0);
@@ -49,25 +49,25 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+static u32
nvbios_fan_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len)
{
- u16 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
+ u32 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt)
return data + *hdr + (idx * (*len));
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
u8 ver, hdr, cnt, len;
- u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
+ u32 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
if (data) {
u8 type = nvbios_rd08(bios, data + 0x00);
switch (type) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 084328028af1..3953d11844ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -23,20 +23,21 @@
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
-static u16
+static u32
nvbios_iccsense_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
u8 *len)
{
struct bit_entry bit_P;
- u16 iccsense;
+ u32 iccsense;
if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
bit_P.length < 0x2c)
return 0;
- iccsense = nvbios_rd16(bios, bit_P.offset + 0x28);
+ iccsense = nvbios_rd32(bios, bit_P.offset + 0x28);
if (!iccsense)
return 0;
@@ -60,7 +61,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
{
struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr, cnt, len, i;
- u16 table, entry;
+ u32 table, entry;
table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len);
if (!table || !cnt)
@@ -77,23 +78,47 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
return -ENOMEM;
for (i = 0; i < cnt; ++i) {
+ struct nvbios_extdev_func extdev;
struct pwr_rail_t *rail = &iccsense->rail[i];
+ u8 res_start = 0;
+ int r;
+
entry = table + hdr + i * len;
switch(ver) {
case 0x10:
rail->mode = nvbios_rd08(bios, entry + 0x1);
rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
- rail->rail = nvbios_rd08(bios, entry + 0x4);
+ res_start = 0x3;
break;
case 0x20:
rail->mode = nvbios_rd08(bios, entry);
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
- rail->rail = nvbios_rd08(bios, entry + 0x6);
+ res_start = 0x5;
+ break;
+ };
+
+ if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
+ continue;
+
+ switch (extdev.type) {
+ case NVBIOS_EXTDEV_INA209:
+ case NVBIOS_EXTDEV_INA219:
+ rail->resistor_count = 1;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ rail->resistor_count = 3;
+ break;
+ default:
+ rail->resistor_count = 0;
break;
};
+
+ for (r = 0; r < rail->resistor_count; ++r) {
+ rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
+ rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40);
+ }
+ rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index 3ddf0939ded3..994cc2d7759b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -81,7 +81,7 @@ mxm_sor_map(struct nvkm_bios *bios, u8 conn)
u16 map = nvbios_rd16(bios, mxm + 4);
if (map) {
ver = nvbios_rd08(bios, map);
- if (ver == 0x10) {
+ if (ver == 0x10 || ver == 0x11) {
if (conn < nvbios_rd08(bios, map + 3)) {
map += nvbios_rd08(bios, map + 1);
map += conn;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 636bfb665bb9..c3068358f695 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -26,16 +26,16 @@
#include <subdev/bios/perf.h>
#include <subdev/pci.h>
-u16
+u32
nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 perf = 0x0000;
+ u32 perf = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version <= 2) {
- perf = nvbios_rd16(bios, bit_P.offset + 0);
+ perf = nvbios_rd32(bios, bit_P.offset + 0);
if (perf) {
*ver = nvbios_rd08(bios, perf + 0);
*hdr = nvbios_rd08(bios, perf + 1);
@@ -72,15 +72,15 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_perf_entry(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
if (perf && idx < *cnt) {
perf = perf + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -88,14 +88,14 @@ nvbios_perf_entry(struct nvkm_bios *bios, int idx,
*len = ssz;
return perf;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_perfEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *info)
{
- u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
+ u32 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
info->pstate = nvbios_rd08(bios, perf + 0x00);
switch (!!perf * *ver) {
@@ -163,7 +163,7 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
info->pcie_width = 0xff;
break;
default:
- return 0x0000;
+ return 0;
}
return perf;
}
@@ -202,7 +202,7 @@ nvbios_perf_fan_parse(struct nvkm_bios *bios,
struct nvbios_perf_fan *fan)
{
u8 ver, hdr, cnt, len, snr, ssz;
- u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ u32 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!perf)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 212800ecdce9..7d1d3c6b4b72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -12,6 +12,7 @@ struct nvbios_source {
bool rw;
bool ignore_checksum;
bool no_pcir;
+ bool require_checksum;
};
int nvbios_extend(struct nvkm_bios *, u32 length);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index b2557e87afdd..7deb81b6dbac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -86,9 +86,12 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvbios_checksum(&bios->data[image.base], image.size)) {
nvkm_debug(subdev, "%08x: checksum failed\n",
image.base);
- if (mthd->func->rw)
+ if (!mthd->func->require_checksum) {
+ if (mthd->func->rw)
+ score += 1;
score += 1;
- score += 1;
+ } else
+ return 0;
} else {
score += 3;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 8fecb5ff22a0..06572f8ce914 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -99,6 +99,7 @@ nvbios_acpi_fast = {
.init = acpi_init,
.read = acpi_read_fast,
.rw = false,
+ .require_checksum = true,
};
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index a54cfec0550d..5babc5a7c7d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,17 +25,17 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/therm.h>
-static u16
+static u32
therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
struct bit_entry bit_P;
- u16 therm = 0;
+ u32 therm = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- therm = nvbios_rd16(bios, bit_P.offset + 12);
+ therm = nvbios_rd32(bios, bit_P.offset + 12);
else if (bit_P.version == 2)
- therm = nvbios_rd16(bios, bit_P.offset + 16);
+ therm = nvbios_rd32(bios, bit_P.offset + 16);
else
nvkm_error(&bios->subdev,
"unknown offset for thermal in BIT P %d\n",
@@ -44,7 +44,7 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
/* exit now if we haven't found the thermal table */
if (!therm)
- return 0x0000;
+ return 0;
*ver = nvbios_rd08(bios, therm + 0);
*hdr = nvbios_rd08(bios, therm + 1);
@@ -53,14 +53,14 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return therm + nvbios_rd08(bios, therm + 1);
}
-static u16
+static u32
nvbios_therm_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
+ u32 therm = therm_table(bios, ver, &hdr, len, &cnt);
if (therm && idx < cnt)
return therm + idx * *len;
- return 0x0000;
+ return 0;
}
int
@@ -70,7 +70,7 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
{
s8 thrs_section, sensor_section, offset;
u8 ver, len, i;
- u16 entry;
+ u32 entry;
/* we only support the core domain for now */
if (domain != NVBIOS_THERM_DOMAIN_CORE)
@@ -154,7 +154,7 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
struct nvbios_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
- u16 entry;
+ u32 entry;
uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
75, 0, 85, 0, 100, 0, 100, 0 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 99f6432ac0af..7e83c3985020 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -25,19 +25,19 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/timing.h>
-u16
+u32
nvbios_timingTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 timing = 0x0000;
+ u32 timing = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- timing = nvbios_rd16(bios, bit_P.offset + 4);
+ timing = nvbios_rd32(bios, bit_P.offset + 4);
else
if (bit_P.version == 2)
- timing = nvbios_rd16(bios, bit_P.offset + 8);
+ timing = nvbios_rd32(bios, bit_P.offset + 8);
if (timing) {
*ver = nvbios_rd08(bios, timing + 0);
@@ -62,15 +62,15 @@ nvbios_timingTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_timingEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (timing && idx < *cnt) {
timing += *hdr + idx * (*len + (snr * ssz));
*hdr = *len;
@@ -78,14 +78,14 @@ nvbios_timingEe(struct nvkm_bios *bios, int idx,
*len = ssz;
return timing;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_timingEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
{
- u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
+ u32 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
p->timing_ver = *ver;
p->timing_hdr = *hdr;
switch (!!data * *ver) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index 2f13db745948..c228ca15fa3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -25,15 +25,15 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/vmap.h>
-u16
+u32
nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 vmap = 0x0000;
+ u32 vmap = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2) {
- vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
+ vmap = nvbios_rd32(bios, bit_P.offset + 0x20);
if (vmap) {
*ver = nvbios_rd08(bios, vmap + 0);
switch (*ver) {
@@ -50,40 +50,50 @@ nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *info)
{
- u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
+ u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
+ info->max0 = 0xff;
+ info->max1 = 0xff;
+ info->max2 = 0xff;
+ break;
case 0x20:
+ info->max0 = nvbios_rd08(bios, vmap + 0x7);
+ info->max1 = nvbios_rd08(bios, vmap + 0x8);
+ if (*len >= 0xc)
+ info->max2 = nvbios_rd08(bios, vmap + 0xc);
+ else
+ info->max2 = 0xff;
break;
}
return vmap;
}
-u16
+u32
nvbios_vmap_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
+ u32 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
if (vmap && idx < cnt) {
vmap = vmap + hdr + (idx * *len);
return vmap;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *info)
{
- u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
+ u32 vmap = nvbios_vmap_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
@@ -95,7 +105,7 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
break;
case 0x20:
- info->unk0 = nvbios_rd08(bios, vmap + 0x00);
+ info->mode = nvbios_rd08(bios, vmap + 0x00);
info->link = nvbios_rd08(bios, vmap + 0x01);
info->min = nvbios_rd32(bios, vmap + 0x02);
info->max = nvbios_rd32(bios, vmap + 0x06);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 6e0a33648be9..a7797a9e9cbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -25,18 +25,18 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/volt.h>
-u16
+u32
nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 volt = 0x0000;
+ u32 volt = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
+ volt = nvbios_rd32(bios, bit_P.offset + 0x0c);
else
if (bit_P.version == 1)
- volt = nvbios_rd16(bios, bit_P.offset + 0x10);
+ volt = nvbios_rd32(bios, bit_P.offset + 0x10);
if (volt) {
*ver = nvbios_rd08(bios, volt + 0);
@@ -62,33 +62,37 @@ nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *info)
{
- u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
+ u32 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x20:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x05);
+ info->ranged = false;
break;
case 0x30:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x40:
info->type = NVBIOS_VOLT_GPIO;
info->base = nvbios_rd32(bios, volt + 0x04);
info->step = nvbios_rd16(bios, volt + 0x08);
info->vidmask = nvbios_rd08(bios, volt + 0x0b);
+ info->ranged = true; /* XXX: find the flag byte */
/*XXX*/
info->min = 0;
info->max = info->base;
@@ -104,32 +108,34 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000;
info->pwm_range = nvbios_rd32(bios, volt + 0x16);
} else {
- info->type = NVBIOS_VOLT_GPIO;
- info->vidmask = nvbios_rd08(bios, volt + 0x06);
- info->step = nvbios_rd16(bios, volt + 0x16);
+ info->type = NVBIOS_VOLT_GPIO;
+ info->vidmask = nvbios_rd08(bios, volt + 0x06);
+ info->step = nvbios_rd16(bios, volt + 0x16);
+ info->ranged =
+ !!(nvbios_rd08(bios, volt + 0x4) & 0x2);
}
break;
}
return volt;
}
-u16
+u32
nvbios_volt_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
+ u32 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
if (volt && idx < cnt) {
volt = volt + hdr + (idx * *len);
return volt;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *info)
{
- u16 volt = nvbios_volt_entry(bios, idx, ver, len);
+ u32 volt = nvbios_volt_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
@@ -142,7 +148,10 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
break;
case 0x40:
+ break;
case 0x50:
+ info->voltage = nvbios_rd32(bios, volt) & 0x001fffff;
+ info->vid = (nvbios_rd32(bios, volt) >> 23) & 0xff;
break;
}
return volt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
new file mode 100644
index 000000000000..f199270163d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vpstate.h>
+
+static u32
+nvbios_vpstate_offset(struct nvkm_bios *b)
+{
+ struct bit_entry bit_P;
+
+ if (!bit_entry(b, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ return nvbios_rd32(b, bit_P.offset + 0x38);
+ }
+
+ return 0x0000;
+}
+
+int
+nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
+{
+ if (!h)
+ return -EINVAL;
+
+ h->offset = nvbios_vpstate_offset(b);
+ if (!h->offset)
+ return -ENODEV;
+
+ h->version = nvbios_rd08(b, h->offset);
+ switch (h->version) {
+ case 0x10:
+ h->hlen = nvbios_rd08(b, h->offset + 0x1);
+ h->elen = nvbios_rd08(b, h->offset + 0x2);
+ h->slen = nvbios_rd08(b, h->offset + 0x3);
+ h->scount = nvbios_rd08(b, h->offset + 0x4);
+ h->ecount = nvbios_rd08(b, h->offset + 0x5);
+
+ h->base_id = nvbios_rd08(b, h->offset + 0x0f);
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h,
+ u8 idx, struct nvbios_vpstate_entry *e)
+{
+ u32 offset;
+
+ if (!e || !h || idx > h->ecount)
+ return -EINVAL;
+
+ offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount));
+ e->pstate = nvbios_rd08(b, offset);
+ e->clock_mhz = nvbios_rd16(b, offset + 0x5);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 7102c25320fc..e4c8d310d870 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -27,6 +27,7 @@
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
#include <subdev/bios/perf.h>
+#include <subdev/bios/vpstate.h>
#include <subdev/fb.h>
#include <subdev/therm.h>
#include <subdev/volt.h>
@@ -43,13 +44,13 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
struct nvkm_bios *bios = clk->subdev.device->bios;
struct nvbios_boostE boostE;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
if (data) {
struct nvbios_boostS boostS;
u8 idx = 0, sver, shdr;
- u16 subd;
+ u32 subd;
input = max(boostE.min, input);
input = min(boostE.max, input);
@@ -74,6 +75,88 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
/******************************************************************************
* C-States
*****************************************************************************/
+static bool
+nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
+ u32 max_volt, int temp)
+{
+ const struct nvkm_domain *domain = clk->domains;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
+ int voltage;
+
+ while (domain && domain->name != nv_clk_src_max) {
+ if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
+ u32 freq = cstate->domain[domain->name];
+ switch (clk->boost_mode) {
+ case NVKM_CLK_BOOST_NONE:
+ if (clk->base_khz && freq > clk->base_khz)
+ return false;
+ case NVKM_CLK_BOOST_BIOS:
+ if (clk->boost_khz && freq > clk->boost_khz)
+ return false;
+ }
+ }
+ domain++;
+ }
+
+ if (!volt)
+ return true;
+
+ voltage = nvkm_volt_map(volt, cstate->voltage, temp);
+ if (voltage < 0)
+ return false;
+ return voltage <= min(max_volt, volt->max_uv);
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
+ struct nvkm_cstate *start)
+{
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_volt *volt = device->volt;
+ struct nvkm_cstate *cstate;
+ int max_volt;
+
+ if (!pstate || !start)
+ return NULL;
+
+ if (!volt)
+ return start;
+
+ max_volt = volt->max_uv;
+ if (volt->max0_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max0_id, clk->temp));
+ if (volt->max1_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max1_id, clk->temp));
+ if (volt->max2_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max2_id, clk->temp));
+
+ for (cstate = start; &cstate->head != &pstate->list;
+ cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+ if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
+ break;
+ }
+
+ return cstate;
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
+{
+ struct nvkm_cstate *cstate;
+ if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
+ return list_last_entry(&pstate->list, typeof(*cstate), head);
+ else {
+ list_for_each_entry(cstate, &pstate->list, head) {
+ if (cstate->id == cstatei)
+ return cstate;
+ }
+ }
+ return NULL;
+}
+
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
@@ -85,7 +168,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
int ret;
if (!list_empty(&pstate->list)) {
- cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+ cstate = nvkm_cstate_get(clk, pstate, cstatei);
+ cstate = nvkm_cstate_find_best(clk, pstate, cstate);
} else {
cstate = &pstate->base;
}
@@ -99,7 +183,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, +1);
if (ret && ret != -ENODEV) {
nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
@@ -113,7 +198,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, -1);
if (ret && ret != -ENODEV)
nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
@@ -138,22 +224,27 @@ static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
u8 ver, hdr;
- u16 data;
+ u32 data;
data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
if (!data)
return -ENOENT;
+ if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
+ return -EINVAL;
+
cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (!cstate)
return -ENOMEM;
*cstate = pstate->base;
cstate->voltage = cstepX.voltage;
+ cstate->id = idx;
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
@@ -175,7 +266,7 @@ static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
struct nvkm_subdev *subdev = &clk->subdev;
- struct nvkm_ram *ram = subdev->device->fb->ram;
+ struct nvkm_fb *fb = subdev->device->fb;
struct nvkm_pci *pci = subdev->device->pci;
struct nvkm_pstate *pstate;
int ret, idx = 0;
@@ -190,7 +281,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
- if (ram && ram->func->calc) {
+ if (fb && fb->ram && fb->ram->func->calc) {
+ struct nvkm_ram *ram = fb->ram;
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = ram->func->calc(ram, khz);
@@ -200,7 +292,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
ram->func->tidy(ram);
}
- return nvkm_cstate_prog(clk, pstate, 0);
+ return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
}
static void
@@ -214,14 +306,14 @@ nvkm_pstate_work(struct work_struct *work)
return;
clk->pwrsrc = power_supply_is_system_supplied();
- nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
- clk->astate, clk->tstate, clk->dstate);
+ clk->astate, clk->temp, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
- pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
+ pstate = min(pstate, clk->state_nr - 1);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
@@ -316,7 +408,7 @@ nvkm_pstate_new(struct nvkm_clk *clk, int idx)
struct nvbios_cstepE cstepE;
struct nvbios_perfE perfE;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
if (!data)
@@ -448,13 +540,12 @@ nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
}
int
-nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
+nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
{
- if (!rel) clk->tstate = req;
- if ( rel) clk->tstate += rel;
- clk->tstate = min(clk->tstate, 0);
- clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nvkm_pstate_calc(clk, true);
+ if (clk->temp == temp)
+ return 0;
+ clk->temp = temp;
+ return nvkm_pstate_calc(clk, false);
}
int
@@ -524,9 +615,9 @@ nvkm_clk_init(struct nvkm_subdev *subdev)
return clk->func->init(clk);
clk->astate = clk->state_nr - 1;
- clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
+ clk->temp = 90; /* reasonable default value */
nvkm_pstate_calc(clk, true);
return 0;
}
@@ -561,10 +652,22 @@ int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int index, bool allow_reclock, struct nvkm_clk *clk)
{
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_bios *bios = device->bios;
int ret, idx, arglen;
const char *mode;
+ struct nvbios_vpstate_header h;
+
+ nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+
+ if (bios && !nvbios_vpstate_parse(bios, &h)) {
+ struct nvbios_vpstate_entry base, boost;
+ if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
+ clk->boost_khz = boost.clock_mhz * 1000;
+ if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
+ clk->base_khz = base.clock_mhz * 1000;
+ }
- nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
clk->func = func;
INIT_LIST_HEAD(&clk->states);
clk->domains = func->domains;
@@ -607,6 +710,8 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
if (mode)
clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
+ clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
+ NVKM_CLK_BOOST_NONE);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 89d5543118cf..7f67f9f5a550 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -457,7 +457,7 @@ gf100_clk = {
{ nv_clk_src_hubk06 , 0x00 },
{ nv_clk_src_hubk01 , 0x01 },
{ nv_clk_src_copy , 0x02 },
- { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_rop , 0x04 },
{ nv_clk_src_mem , 0x05, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 06bc0d2d6ae1..0b37e3da7feb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -491,7 +491,7 @@ gk104_clk = {
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
- { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_mem , 0x03, 0, "memory", 500 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 056702ef69aa..96e0941c8edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,7 @@ gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
return 0;
}
-int
+static int
gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
struct gt215_clk_info *info)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index a410c0db8a08..1730371933df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/pmu.h>
+#include <subdev/timer.h>
static void
pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
@@ -123,21 +124,13 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -EINVAL;
}
- /* reset PMU and load init table parser ucode */
- if (post) {
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
- while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
- }
- }
-
ret = pmu_load(init, 0x04, post, &exec, &args);
if (ret)
return ret;
/* upload first chunk of init data */
if (post) {
+ // devinit tables
u32 pmu = pmu_args(init, args + 0x08, 0x08);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
@@ -146,6 +139,7 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
/* upload second chunk of init data */
if (post) {
+ // devinit boot scripts
u32 pmu = pmu_args(init, args + 0x08, 0x10);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
@@ -156,8 +150,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
if (post) {
nvkm_wr32(device, 0x10a040, 0x00005000);
pmu_exec(init, exec);
- while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
- }
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a040) & 0x00002000)
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
}
/* load and execute some other ucode image (bios therm?) */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index edcc157e6ac8..63566ba12fbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -24,8 +24,9 @@ nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gm20b.o
nvkm-y += nvkm/subdev/fb/gp100.o
-nvkm-y += nvkm/subdev/fb/gp104.o
+nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc66fff..3841ad6be99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
}
int
-gf100_fb_oneinit(struct nvkm_fb *fb)
+gf100_fb_oneinit(struct nvkm_fb *base)
{
- struct nvkm_device *device = fb->subdev.device;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 0x1000;
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_rd);
+ false, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_wr);
+ false, &fb->base.mmu_wr);
if (ret)
return ret;
+ fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c10_page) {
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
+ return -EFAULT;
+ }
+
return 0;
}
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_fb_ctor(func, device, index, &fb->base);
*pfb = &fb->base;
- fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c10))
- return -EFAULT;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 449f431644b3..412eb89834e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -16,4 +16,8 @@ void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
void gp100_fb_init(struct nvkm_fb *);
+
+void gm200_fb_init_page(struct nvkm_fb *fb);
+void gm200_fb_init(struct nvkm_fb *base);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index f815fe2bbf08..5d34d6136616 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,27 +20,21 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
+#include "gf100.h"
-#include <core/memory.h>
-
-static void
-gk20a_fb_init(struct nvkm_fb *fb)
-{
- struct nvkm_device *device = fb->subdev.device;
- nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
-}
-
+/* GK20A's FB is similar to GF100's, but without the ability to allocate VRAM */
static const struct nvkm_fb_func
gk20a_fb = {
+ .dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
- .init = gk20a_fb_init,
+ .init = gf100_fb_init,
.init_page = gf100_fb_init_page,
+ .intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
};
int
gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
+ return gf100_fb_new_(&gk20a_fb, device, index, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 62f653240be3..fe5886013ac0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -44,7 +44,7 @@ gm200_fb_init_page(struct nvkm_fb *fb)
}
}
-static void
+void
gm200_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
new file mode 100644
index 000000000000..b87c233bcd6d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "gf100.h"
+
+/* GM20B's FB is similar to GM200, but without the ability to allocate VRAM */
+static const struct nvkm_fb_func
+gm20b_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
+ .intr = gf100_fb_intr,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gm20b_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 92cb71861bec..73b4ae1c73dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -27,7 +27,7 @@
#include <core/memory.h>
static const struct nvkm_fb_func
-gp104_fb = {
+gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
@@ -37,7 +37,7 @@ gp104_fb = {
};
int
-gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp104_fb, device, index, pfb);
+ return gf100_fb_new_(&gp102_fb, device, index, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02eab2a..0595e0722bfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
nvkm_fifo_chan_put(fifo, flags, &chan);
}
+static int
+nv50_fb_oneinit(struct nvkm_fb *base)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c08_page) {
+ fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c08))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static void
nv50_fb_init(struct nvkm_fb *base)
{
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
+ .oneinit = nv50_fb_oneinit,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
fb->func = func;
*pfb = &fb->base;
- fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c08_page) {
- fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c08))
- return -EFAULT;
- } else {
- nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b9ec0ae6723a..b60068b7d8f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -24,6 +24,7 @@ int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32);
int gk104_ram_init(struct nvkm_ram *ram);
/* RAM type-specific MR calculation routines */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 772425ca5a9e..093223d1df4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -420,8 +420,6 @@ gf100_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->fuc, false);
}
-extern const u8 gf100_pte_storage_type_map[256];
-
void
gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1fa3ade468ae..7904fa41acef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -259,7 +259,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
if ((ram->base.mr[1] & 0x03c) != 0x030) {
@@ -658,7 +660,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
gk104_ram_train(fuc, 0x80020000, 0x01000000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -706,7 +710,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -936,7 +942,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_nsec(fuc, 1000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -1530,6 +1538,12 @@ gk104_ram_func = {
int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
+ return gk104_ram_ctor(fb, pram, 0x022554);
+}
+
+int
+gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
+{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
@@ -1544,7 +1558,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
return -ENOMEM;
*pram = &ram->base;
- ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
+ ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 43d807f6ca71..ac862d1d77bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,18 +23,8 @@
*/
#include "ram.h"
-static const struct nvkm_ram_func
-gm107_ram_func = {
- .init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
-};
-
int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
- return -ENOMEM;
-
- return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
+ return gk104_ram_ctor(fb, pram, 0x021c14);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index f3be408b5e5e..405faabe8dcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -92,13 +92,13 @@ gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
- u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+ u32 fbpa_num = nvkm_rd32(device, 0x02243c), fbpa;
u32 fbio_opt = nvkm_rd32(device, 0x021c14);
u64 part, size = 0, comm = ~0ULL;
bool mixed = false;
int ret;
- nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+ nvkm_debug(subdev, "02243c: %08x\n", fbpa_num);
nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
if (!(fbio_opt & (1 << fbpa))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index d15ea886df27..f10664372161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -95,7 +95,7 @@ struct gt215_ram {
struct gt215_ltrain ltrain;
};
-void
+static void
gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
{
int i, lo, hi;
@@ -149,7 +149,7 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
/*
* Link training for (at least) DDR3
*/
-int
+static int
gt215_link_train(struct gt215_ram *ram)
{
struct gt215_ltrain *train = &ram->ltrain;
@@ -267,7 +267,7 @@ out:
return ret;
}
-int
+static int
gt215_link_train_init(struct gt215_ram *ram)
{
static const u32 pattern[16] = {
@@ -333,7 +333,7 @@ gt215_link_train_init(struct gt215_ram *ram)
return 0;
}
-void
+static void
gt215_link_train_fini(struct gt215_ram *ram)
{
if (ram->ltrain.mem)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index b9f1ffdfc602..4dcd8742f2da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -23,6 +23,7 @@
* Ben Skeggs
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 26900333b1d6..eca8a445eab3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -23,6 +23,7 @@
* Roy Spliet <rspliet@eclipso.eu>
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 3f45afd17d5a..2ead515b8530 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -37,7 +37,7 @@ gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
nvkm_wr32(device, 0x00dc80, intr1);
}
-void
+static void
gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index f0851d57df2f..01d5c5a56e2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -74,7 +74,7 @@ nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm
+static const struct i2c_algorithm
nvkm_i2c_aux_i2c_algo = {
.master_xfer = nvkm_i2c_aux_i2c_xfer,
.functionality = nvkm_i2c_aux_i2c_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index 954f5b76bfcf..b80236a4eeac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -79,7 +79,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 61d729b82c69..ed458c7f056b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -79,7 +79,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index b7159b338fac..1a4ab825852c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -29,7 +29,7 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
- usleep_range(20, 30);
+ udelay(20);
nvkm_mask(device, 0x000200, 0x20, 0x20);
nvkm_wr32(device, 0x12004c, 0x4);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 41bd5d0f7692..f0af2a381eea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -96,60 +96,12 @@ nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
}
static void
-nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0078: 0x0078 128 samples shunt
- * 0x0780: 0x0780 128 samples bus
- * 0x1800: 0x0000 +-40 mV shunt range
- * 0x2000: 0x0000 16V FSR
- */
- u16 value = 0x07ff;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
-nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0031: 0x0000 140 us conversion time shunt
- * 0x01c0: 0x0000 140 us conversion time bus
- * 0x0f00: 0x0f00 1024 samples
- * 0x7000: 0x?000 channels
- */
- u16 value = 0x0e07;
- if (sensor->rail_mask & 0x1)
- value |= 0x1 << 14;
- if (sensor->rail_mask & 0x2)
- value |= 0x1 << 13;
- if (sensor->rail_mask & 0x4)
- value |= 0x1 << 12;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_sensor *sensor)
{
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- case NVBIOS_EXTDEV_INA219:
- nvkm_iccsense_ina209_config(iccsense, sensor);
- break;
- case NVBIOS_EXTDEV_INA3221:
- nvkm_iccsense_ina3221_config(iccsense, sensor);
- break;
- default:
- break;
- }
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config);
+ nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config);
}
int
@@ -196,7 +148,6 @@ nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
static struct nvkm_iccsense_sensor*
nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
{
-
struct nvkm_subdev *subdev = &iccsense->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_i2c *i2c = subdev->device->i2c;
@@ -245,7 +196,7 @@ nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
sensor->type = extdev.type;
sensor->i2c = &i2c_bus->i2c;
sensor->addr = addr;
- sensor->rail_mask = 0x0;
+ sensor->config = 0x0;
return sensor;
}
@@ -273,48 +224,56 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
iccsense->data_valid = true;
for (i = 0; i < stbl.nr_entry; ++i) {
- struct pwr_rail_t *r = &stbl.rail[i];
- struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_t *pwr_rail = &stbl.rail[i];
struct nvkm_iccsense_sensor *sensor;
- int (*read)(struct nvkm_iccsense *,
- struct nvkm_iccsense_rail *);
+ int r;
- if (!r->mode || r->resistor_mohm == 0)
+ if (pwr_rail->mode != 1 || !pwr_rail->resistor_count)
continue;
- sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+ sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id);
if (!sensor)
continue;
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- if (r->rail != 0)
- continue;
- read = nvkm_iccsense_ina209_read;
- break;
- case NVBIOS_EXTDEV_INA219:
- if (r->rail != 0)
+ if (!sensor->config)
+ sensor->config = pwr_rail->config;
+ else if (sensor->config != pwr_rail->config)
+ nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id);
+
+ for (r = 0; r < pwr_rail->resistor_count; ++r) {
+ struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r];
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
+
+ if (!res->mohm || !res->enabled)
continue;
- read = nvkm_iccsense_ina219_read;
- break;
- case NVBIOS_EXTDEV_INA3221:
- if (r->rail >= 3)
+
+ switch (sensor->type) {
+ case NVBIOS_EXTDEV_INA209:
+ read = nvkm_iccsense_ina209_read;
+ break;
+ case NVBIOS_EXTDEV_INA219:
+ read = nvkm_iccsense_ina219_read;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ read = nvkm_iccsense_ina3221_read;
+ break;
+ default:
continue;
- read = nvkm_iccsense_ina3221_read;
- break;
- default:
- continue;
+ }
+
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
+
+ rail->read = read;
+ rail->sensor = sensor;
+ rail->idx = r;
+ rail->mohm = res->mohm;
+ nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm);
+ list_add_tail(&rail->head, &iccsense->rails);
}
-
- rail = kmalloc(sizeof(*rail), GFP_KERNEL);
- if (!rail)
- return -ENOMEM;
- sensor->rail_mask |= 1 << r->rail;
- rail->read = read;
- rail->sensor = sensor;
- rail->idx = r->rail;
- rail->mohm = r->resistor_mohm;
- list_add_tail(&rail->head, &iccsense->rails);
}
return 0;
}
@@ -329,7 +288,8 @@ nvkm_iccsense_init(struct nvkm_subdev *subdev)
return 0;
}
-struct nvkm_subdev_func iccsense_func = {
+static const struct nvkm_subdev_func
+iccsense_func = {
.oneinit = nvkm_iccsense_oneinit,
.init = nvkm_iccsense_init,
.dtor = nvkm_iccsense_dtor,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index b72c31d2f908..e90e0f6ed008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -10,7 +10,7 @@ struct nvkm_iccsense_sensor {
enum nvbios_extdev_type type;
struct i2c_adapter *i2c;
u8 addr;
- u8 rail_mask;
+ u16 config;
};
struct nvkm_iccsense_rail {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 8ed8f65ff664..10c987a654ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -104,7 +104,7 @@ nvkm_instobj_dtor(struct nvkm_memory *memory)
return iobj;
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
@@ -156,7 +156,7 @@ nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
return nvkm_wo32(iobj->parent, offset, data);
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func_slow = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 39c2a38e54f7..0c7ef250dcaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -47,8 +47,10 @@ nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
BUG_ON((first > limit) || (limit >= ltc->num_tags));
+ mutex_lock(&ltc->subdev.mutex);
ltc->func->cbc_clear(ltc, first, limit);
ltc->func->cbc_wait(ltc);
+ mutex_unlock(&ltc->subdev.mutex);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index c3d66ef5dc12..430a61c3df44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -34,7 +34,7 @@ g84_mc_reset[] = {
{}
};
-const struct nvkm_mc_map
+static const struct nvkm_mc_map
g84_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00020000, NVKM_ENGINE_VP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 21b65ee254e4..e3e2f5e83815 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -250,6 +250,10 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
}
nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+ nvkm_debug(&mxm->subdev, "module flags: %02x\n",
+ nvbios_rd08(bios, data + 0x01));
+ nvkm_debug(&mxm->subdev, "config flags: %02x\n",
+ nvbios_rd08(bios, data + 0x02));
if (mxm_shadow(mxm, ver)) {
nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index 45a2f8e784f9..9abfa5e2fe9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -23,8 +23,8 @@
*/
#include "mxms.h"
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
static u8 *
mxms_data(struct nvkm_mxm *mxm)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index db14fad2ddfc..844971e5e874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -190,8 +190,8 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
struct nvkm_bios *bios = subdev->device->bios;
u8 ver, hdr, cnt, len;
u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
- if (dcb == 0x0000 || ver != 0x40) {
- nvkm_debug(subdev, "unsupported DCB version\n");
+ if (dcb == 0x0000 || (ver != 0x40 && ver != 0x41)) {
+ nvkm_warn(subdev, "unsupported DCB version\n");
return;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 88b643b8664e..51fb4bf94a44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -8,3 +8,5 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
nvkm-y += nvkm/subdev/pmu/gm107.o
+nvkm-y += nvkm/subdev/pmu/gp100.o
+nvkm-y += nvkm/subdev/pmu/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 8dd164d13043..e611ce80f8ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -32,225 +32,85 @@ nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
pmu->func->pgob(pmu, enable);
}
-int
-nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
- u32 process, u32 message, u32 data0, u32 data1)
-{
- struct nvkm_subdev *subdev = &pmu->subdev;
- struct nvkm_device *device = subdev->device;
- u32 addr;
-
- mutex_lock(&subdev->mutex);
- /* wait for a free slot in the fifo */
- addr = nvkm_rd32(device, 0x10a4a0);
- if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x10a4b0);
- if (tmp != (addr ^ 8))
- break;
- ) < 0) {
- mutex_unlock(&subdev->mutex);
- return -EBUSY;
- }
-
- /* we currently only support a single process at a time waiting
- * on a synchronous reply, take the PMU mutex and tell the
- * receive handler what we're waiting for
- */
- if (reply) {
- pmu->recv.message = message;
- pmu->recv.process = process;
- }
-
- /* acquire data segment access */
- do {
- nvkm_wr32(device, 0x10a580, 0x00000001);
- } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
-
- /* write the packet */
- nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
- pmu->send.base));
- nvkm_wr32(device, 0x10a1c4, process);
- nvkm_wr32(device, 0x10a1c4, message);
- nvkm_wr32(device, 0x10a1c4, data0);
- nvkm_wr32(device, 0x10a1c4, data1);
- nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nvkm_wr32(device, 0x10a580, 0x00000000);
-
- /* wait for reply, if requested */
- if (reply) {
- wait_event(pmu->recv.wait, (pmu->recv.process == 0));
- reply[0] = pmu->recv.data[0];
- reply[1] = pmu->recv.data[1];
- }
-
- mutex_unlock(&subdev->mutex);
- return 0;
-}
-
static void
nvkm_pmu_recv(struct work_struct *work)
{
- struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
- struct nvkm_subdev *subdev = &pmu->subdev;
- struct nvkm_device *device = subdev->device;
- u32 process, message, data0, data1;
-
- /* nothing to do if GET == PUT */
- u32 addr = nvkm_rd32(device, 0x10a4cc);
- if (addr == nvkm_rd32(device, 0x10a4c8))
- return;
-
- /* acquire data segment access */
- do {
- nvkm_wr32(device, 0x10a580, 0x00000002);
- } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
-
- /* read the packet */
- nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
- pmu->recv.base));
- process = nvkm_rd32(device, 0x10a1c4);
- message = nvkm_rd32(device, 0x10a1c4);
- data0 = nvkm_rd32(device, 0x10a1c4);
- data1 = nvkm_rd32(device, 0x10a1c4);
- nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nvkm_wr32(device, 0x10a580, 0x00000000);
-
- /* wake process if it's waiting on a synchronous reply */
- if (pmu->recv.process) {
- if (process == pmu->recv.process &&
- message == pmu->recv.message) {
- pmu->recv.data[0] = data0;
- pmu->recv.data[1] = data1;
- pmu->recv.process = 0;
- wake_up(&pmu->recv.wait);
- return;
- }
- }
+ struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
+ return pmu->func->recv(pmu);
+}
- /* right now there's no other expected responses from the engine,
- * so assume that any unexpected message is an error.
- */
- nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
- (char)((process & 0x000000ff) >> 0),
- (char)((process & 0x0000ff00) >> 8),
- (char)((process & 0x00ff0000) >> 16),
- (char)((process & 0xff000000) >> 24),
- process, message, data0, data1);
+int
+nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ if (!pmu || !pmu->func->send)
+ return -ENODEV;
+ return pmu->func->send(pmu, reply, process, message, data0, data1);
}
static void
nvkm_pmu_intr(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- struct nvkm_device *device = pmu->subdev.device;
- u32 disp = nvkm_rd32(device, 0x10a01c);
- u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
-
- if (intr & 0x00000020) {
- u32 stat = nvkm_rd32(device, 0x10a16c);
- if (stat & 0x80000000) {
- nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
- stat & 0x00ffffff,
- nvkm_rd32(device, 0x10a168));
- nvkm_wr32(device, 0x10a16c, 0x00000000);
- intr &= ~0x00000020;
- }
- }
-
- if (intr & 0x00000040) {
- schedule_work(&pmu->recv.work);
- nvkm_wr32(device, 0x10a004, 0x00000040);
- intr &= ~0x00000040;
- }
-
- if (intr & 0x00000080) {
- nvkm_info(subdev, "wr32 %06x %08x\n",
- nvkm_rd32(device, 0x10a7a0),
- nvkm_rd32(device, 0x10a7a4));
- nvkm_wr32(device, 0x10a004, 0x00000080);
- intr &= ~0x00000080;
- }
-
- if (intr) {
- nvkm_error(subdev, "intr %08x\n", intr);
- nvkm_wr32(device, 0x10a004, intr);
- }
+ if (!pmu->func->intr)
+ return;
+ pmu->func->intr(pmu);
}
static int
nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- struct nvkm_device *device = pmu->subdev.device;
- nvkm_wr32(device, 0x10a014, 0x00000060);
+ if (pmu->func->fini)
+ pmu->func->fini(pmu);
+
flush_work(&pmu->recv.work);
return 0;
}
static int
-nvkm_pmu_init(struct nvkm_subdev *subdev)
+nvkm_pmu_reset(struct nvkm_pmu *pmu)
{
- struct nvkm_pmu *pmu = nvkm_pmu(subdev);
struct nvkm_device *device = pmu->subdev.device;
- int i;
- /* prevent previous ucode from running, wait for idle, reset */
- nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+ if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
+ return 0;
+
+ /* Inhibit interrupts, and wait for idle. */
+ nvkm_wr32(device, 0x10a014, 0x0000ffff);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x10a04c))
break;
);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
+
+ /* Reset. */
+ pmu->func->reset(pmu);
+
+ /* Wait for IMEM/DMEM scrubbing to be complete. */
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
break;
);
- /* upload data segment */
- nvkm_wr32(device, 0x10a1c0, 0x01000000);
- for (i = 0; i < pmu->func->data.size / 4; i++)
- nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
-
- /* upload code segment */
- nvkm_wr32(device, 0x10a180, 0x01000000);
- for (i = 0; i < pmu->func->code.size / 4; i++) {
- if ((i & 0x3f) == 0)
- nvkm_wr32(device, 0x10a188, i >> 6);
- nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
- }
-
- /* start it running */
- nvkm_wr32(device, 0x10a10c, 0x00000000);
- nvkm_wr32(device, 0x10a104, 0x00000000);
- nvkm_wr32(device, 0x10a100, 0x00000002);
-
- /* wait for valid host->pmu ring configuration */
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x10a4d0))
- break;
- ) < 0)
- return -EBUSY;
- pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
- pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
+ return 0;
+}
- /* wait for valid pmu->host ring configuration */
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x10a4dc))
- break;
- ) < 0)
- return -EBUSY;
- pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
- pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
+static int
+nvkm_pmu_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ return nvkm_pmu_reset(pmu);
+}
- nvkm_wr32(device, 0x10a010, 0x000000e0);
- return 0;
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ int ret = nvkm_pmu_reset(pmu);
+ if (ret == 0 && pmu->func->init)
+ ret = pmu->func->init(pmu);
+ return ret;
}
static void *
@@ -262,6 +122,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
+ .preinit = nvkm_pmu_preinit,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index e2faccffee6f..0bcf0b307a61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pmu_data[] = {
+static uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
};
-uint32_t gf100_pmu_code[] = {
+static uint32_t gf100_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index 2d5bdc539697..fe8905666c67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
-uint32_t gf119_pmu_data[] = {
+static uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
};
-uint32_t gf119_pmu_code[] = {
+static uint32_t gf119_pmu_code[] = {
0x03410ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 3c731ff12871..9cf4e6fc724e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_pmu_data[] = {
+static uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
};
-uint32_t gk208_pmu_code[] = {
+static uint32_t gk208_pmu_code[] = {
0x02f90ef5,
/* 0x0004: rd32 */
0xf607a040,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index e83341815ec6..5d692425b190 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pmu_data[] = {
+static uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
};
-uint32_t gt215_pmu_code[] = {
+static uint32_t gt215_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index aeb8ccd891fc..0e36d4cb7201 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -30,6 +30,12 @@ gf100_pmu = {
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
.data.size = sizeof(gf100_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index fbc88d8ecd4d..0e4ba4248b15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -30,6 +30,12 @@ gf119_pmu = {
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
.data.size = sizeof(gf119_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 86f9f3b13f71..2ad858d825ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -109,6 +109,12 @@ gk104_pmu = {
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
.data.size = sizeof(gk104_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk104_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index ae255247c9d1..fc4b8ecfdaeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -88,6 +88,12 @@ gk110_pmu = {
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
.data.size = sizeof(gk110_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index 3b4917637902..e9a91277683a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -30,6 +30,12 @@ gk208_pmu = {
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 31b8692b4641..9a248ed75f09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -32,6 +32,12 @@ gm107_pmu = {
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
.data.size = sizeof(gm107_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
new file mode 100644
index 000000000000..6c41c20c85a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_pmu_func
+gp100_pmu = {
+ .reset = gt215_pmu_reset,
+};
+
+int
+gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
new file mode 100644
index 000000000000..f017352206c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp102_pmu_reset(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000000);
+}
+
+static const struct nvkm_pmu_func
+gp102_pmu = {
+ .reset = gp102_pmu_reset,
+};
+
+int
+gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 8ba7fa4ca75b..90d428b3be97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,21 +24,229 @@
#include "priv.h"
#include "fuc/gt215.fuc3.h"
-static void
+#include <subdev/timer.h>
+
+int
+gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 addr;
+
+ mutex_lock(&subdev->mutex);
+ /* wait for a free slot in the fifo */
+ addr = nvkm_rd32(device, 0x10a4a0);
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x10a4b0);
+ if (tmp != (addr ^ 8))
+ break;
+ ) < 0) {
+ mutex_unlock(&subdev->mutex);
+ return -EBUSY;
+ }
+
+ /* we currently only support a single process at a time waiting
+ * on a synchronous reply, take the PMU mutex and tell the
+ * receive handler what we're waiting for
+ */
+ if (reply) {
+ pmu->recv.message = message;
+ pmu->recv.process = process;
+ }
+
+ /* acquire data segment access */
+ do {
+ nvkm_wr32(device, 0x10a580, 0x00000001);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
+
+ /* write the packet */
+ nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+ pmu->send.base));
+ nvkm_wr32(device, 0x10a1c4, process);
+ nvkm_wr32(device, 0x10a1c4, message);
+ nvkm_wr32(device, 0x10a1c4, data0);
+ nvkm_wr32(device, 0x10a1c4, data1);
+ nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nvkm_wr32(device, 0x10a580, 0x00000000);
+
+ /* wait for reply, if requested */
+ if (reply) {
+ wait_event(pmu->recv.wait, (pmu->recv.process == 0));
+ reply[0] = pmu->recv.data[0];
+ reply[1] = pmu->recv.data[1];
+ }
+
+ mutex_unlock(&subdev->mutex);
+ return 0;
+}
+
+void
+gt215_pmu_recv(struct nvkm_pmu *pmu)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 process, message, data0, data1;
+
+ /* nothing to do if GET == PUT */
+ u32 addr = nvkm_rd32(device, 0x10a4cc);
+ if (addr == nvkm_rd32(device, 0x10a4c8))
+ return;
+
+ /* acquire data segment access */
+ do {
+ nvkm_wr32(device, 0x10a580, 0x00000002);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
+
+ /* read the packet */
+ nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+ pmu->recv.base));
+ process = nvkm_rd32(device, 0x10a1c4);
+ message = nvkm_rd32(device, 0x10a1c4);
+ data0 = nvkm_rd32(device, 0x10a1c4);
+ data1 = nvkm_rd32(device, 0x10a1c4);
+ nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nvkm_wr32(device, 0x10a580, 0x00000000);
+
+ /* wake process if it's waiting on a synchronous reply */
+ if (pmu->recv.process) {
+ if (process == pmu->recv.process &&
+ message == pmu->recv.message) {
+ pmu->recv.data[0] = data0;
+ pmu->recv.data[1] = data1;
+ pmu->recv.process = 0;
+ wake_up(&pmu->recv.wait);
+ return;
+ }
+ }
+
+ /* right now there's no other expected responses from the engine,
+ * so assume that any unexpected message is an error.
+ */
+ nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+ (char)((process & 0x000000ff) >> 0),
+ (char)((process & 0x0000ff00) >> 8),
+ (char)((process & 0x00ff0000) >> 16),
+ (char)((process & 0xff000000) >> 24),
+ process, message, data0, data1);
+}
+
+void
+gt215_pmu_intr(struct nvkm_pmu *pmu)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 disp = nvkm_rd32(device, 0x10a01c);
+ u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000020) {
+ u32 stat = nvkm_rd32(device, 0x10a16c);
+ if (stat & 0x80000000) {
+ nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+ stat & 0x00ffffff,
+ nvkm_rd32(device, 0x10a168));
+ nvkm_wr32(device, 0x10a16c, 0x00000000);
+ intr &= ~0x00000020;
+ }
+ }
+
+ if (intr & 0x00000040) {
+ schedule_work(&pmu->recv.work);
+ nvkm_wr32(device, 0x10a004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr & 0x00000080) {
+ nvkm_info(subdev, "wr32 %06x %08x\n",
+ nvkm_rd32(device, 0x10a7a0),
+ nvkm_rd32(device, 0x10a7a4));
+ nvkm_wr32(device, 0x10a004, 0x00000080);
+ intr &= ~0x00000080;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, 0x10a004, intr);
+ }
+}
+
+void
+gt215_pmu_fini(struct nvkm_pmu *pmu)
+{
+ nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
+}
+
+void
gt215_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+ nvkm_rd32(device, 0x000200);
+}
+
+int
+gt215_pmu_init(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ int i;
+
+ /* upload data segment */
+ nvkm_wr32(device, 0x10a1c0, 0x01000000);
+ for (i = 0; i < pmu->func->data.size / 4; i++)
+ nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
+
+ /* upload code segment */
+ nvkm_wr32(device, 0x10a180, 0x01000000);
+ for (i = 0; i < pmu->func->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nvkm_wr32(device, 0x10a188, i >> 6);
+ nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
+ }
+
+ /* start it running */
+ nvkm_wr32(device, 0x10a10c, 0x00000000);
+ nvkm_wr32(device, 0x10a104, 0x00000000);
+ nvkm_wr32(device, 0x10a100, 0x00000002);
+
+ /* wait for valid host->pmu ring configuration */
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4d0))
+ break;
+ ) < 0)
+ return -EBUSY;
+ pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+ pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
+
+ /* wait for valid pmu->host ring configuration */
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4dc))
+ break;
+ ) < 0)
+ return -EBUSY;
+ pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+ pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
+
+ nvkm_wr32(device, 0x10a010, 0x000000e0);
+ return 0;
}
static const struct nvkm_pmu_func
gt215_pmu = {
- .reset = gt215_pmu_reset,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
.data.size = sizeof(gt215_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index f38c88fae3d6..2e2179a4ad17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -8,8 +8,6 @@ int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
int index, struct nvkm_pmu **);
struct nvkm_pmu_func {
- void (*reset)(struct nvkm_pmu *);
-
struct {
u32 *data;
u32 size;
@@ -20,8 +18,22 @@ struct nvkm_pmu_func {
u32 size;
} data;
+ void (*reset)(struct nvkm_pmu *);
+ int (*init)(struct nvkm_pmu *);
+ void (*fini)(struct nvkm_pmu *);
+ void (*intr)(struct nvkm_pmu *);
+ int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
+ u32 message, u32 data0, u32 data1);
+ void (*recv)(struct nvkm_pmu *);
void (*pgob)(struct nvkm_pmu *, bool);
};
+void gt215_pmu_reset(struct nvkm_pmu *);
+int gt215_pmu_init(struct nvkm_pmu *);
+void gt215_pmu_fini(struct nvkm_pmu *);
+void gt215_pmu_intr(struct nvkm_pmu *);
+void gt215_pmu_recv(struct nvkm_pmu *);
+int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index f1e2dc914366..ec48e4ace37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -1364,7 +1364,7 @@ gm200_secboot_init(struct nvkm_secboot *sb)
return 0;
}
-int
+static int
gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index c34076223b7b..bcd179ba11d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,6 +1,7 @@
nvkm-y += nvkm/subdev/volt/base.o
nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
+nvkm-y += nvkm/subdev/volt/gf100.o
nvkm-y += nvkm/subdev/volt/gk104.o
nvkm-y += nvkm/subdev/volt/gk20a.o
nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 1c3d23b0e84a..e344901cfdc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
+#include <subdev/therm.h>
int
nvkm_volt_get(struct nvkm_volt *volt)
@@ -50,33 +51,45 @@ static int
nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
struct nvkm_subdev *subdev = &volt->subdev;
- int i, ret = -EINVAL;
+ int i, ret = -EINVAL, best_err = volt->max_uv, best = -1;
if (volt->func->volt_set)
return volt->func->volt_set(volt, uv);
for (i = 0; i < volt->vid_nr; i++) {
- if (volt->vid[i].uv == uv) {
- ret = volt->func->vid_set(volt, volt->vid[i].vid);
- nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+ int err = volt->vid[i].uv - uv;
+ if (err < 0 || err > best_err)
+ continue;
+
+ best_err = err;
+ best = i;
+ if (best_err == 0)
break;
- }
}
+
+ if (best == -1) {
+ nvkm_error(subdev, "couldn't set %iuv\n", uv);
+ return ret;
+ }
+
+ ret = volt->func->vid_set(volt, volt->vid[best].vid);
+ nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv,
+ volt->vid[best].uv, ret);
return ret;
}
-static int
-nvkm_volt_map(struct nvkm_volt *volt, u8 id)
+int
+nvkm_volt_map_min(struct nvkm_volt *volt, u8 id)
{
struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
u8 ver, len;
- u16 vmap;
+ u32 vmap;
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
if (info.link != 0xff) {
- int ret = nvkm_volt_map(volt, info.link);
+ int ret = nvkm_volt_map_min(volt, info.link);
if (ret < 0)
return ret;
info.min += ret;
@@ -88,19 +101,79 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
}
int
-nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
+{
+ struct nvkm_bios *bios = volt->subdev.device->bios;
+ struct nvbios_vmap_entry info;
+ u8 ver, len;
+ u32 vmap;
+
+ vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+ if (vmap) {
+ s64 result;
+
+ if (volt->speedo < 0)
+ return volt->speedo;
+
+ if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) {
+ result = div64_s64((s64)info.arg[0], 10);
+ result += div64_s64((s64)info.arg[1] * volt->speedo, 10);
+ result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000);
+ } else if (ver == 0x20) {
+ switch (info.mode) {
+ /* 0x0 handled above! */
+ case 0x1:
+ result = ((s64)info.arg[0] * 15625) >> 18;
+ result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18;
+ result += ((s64)info.arg[2] * temp * 15625) >> 10;
+ result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18;
+ result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30;
+ result += ((s64)info.arg[5] * temp * temp * 15625) >> 18;
+ break;
+ case 0x3:
+ result = (info.min + info.max) / 2;
+ break;
+ case 0x2:
+ default:
+ result = info.min;
+ break;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ result = min(max(result, (s64)info.min), (s64)info.max);
+
+ if (info.link != 0xff) {
+ int ret = nvkm_volt_map(volt, info.link, temp);
+ if (ret < 0)
+ return ret;
+ result += ret;
+ }
+ return result;
+ }
+
+ return id ? id * 10000 : -ENODEV;
+}
+
+int
+nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp,
+ int condition)
{
int ret;
if (volt->func->set_id)
return volt->func->set_id(volt, id, condition);
- ret = nvkm_volt_map(volt, id);
+ ret = nvkm_volt_map(volt, id, temp);
if (ret >= 0) {
int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
(condition < 0 && ret < prev) ||
(condition > 0 && ret > prev)) {
+ int min = nvkm_volt_map(volt, min_id, temp);
+ if (min >= 0)
+ ret = max(min, ret);
ret = nvkm_volt_set(volt, ret);
} else {
ret = 0;
@@ -112,14 +185,16 @@ nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
static void
nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_volt_entry ivid;
struct nvbios_volt info;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
int i;
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
- if (data && info.vidmask && info.base && info.step) {
+ if (data && info.vidmask && info.base && info.step && info.ranged) {
+ nvkm_debug(subdev, "found ranged based VIDs\n");
volt->min_uv = info.min;
volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
@@ -132,7 +207,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
info.base += info.step;
}
volt->vid_mask = info.vidmask;
- } else if (data && info.vidmask) {
+ } else if (data && info.vidmask && !info.ranged) {
+ nvkm_debug(subdev, "found entry based VIDs\n");
volt->min_uv = 0xffffffff;
volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
@@ -154,6 +230,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
static int
+nvkm_volt_speedo_read(struct nvkm_volt *volt)
+{
+ if (volt->func->speedo_read)
+ return volt->func->speedo_read(volt);
+ return -EINVAL;
+}
+
+static int
nvkm_volt_init(struct nvkm_subdev *subdev)
{
struct nvkm_volt *volt = nvkm_volt(subdev);
@@ -167,6 +251,21 @@ nvkm_volt_init(struct nvkm_subdev *subdev)
return 0;
}
+static int
+nvkm_volt_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_volt *volt = nvkm_volt(subdev);
+
+ volt->speedo = nvkm_volt_speedo_read(volt);
+ if (volt->speedo > 0)
+ nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo);
+
+ if (volt->func->oneinit)
+ return volt->func->oneinit(volt);
+
+ return 0;
+}
+
static void *
nvkm_volt_dtor(struct nvkm_subdev *subdev)
{
@@ -177,6 +276,7 @@ static const struct nvkm_subdev_func
nvkm_volt = {
.dtor = nvkm_volt_dtor,
.init = nvkm_volt_init,
+ .oneinit = nvkm_volt_oneinit,
};
void
@@ -191,9 +291,22 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
/* Assuming the non-bios device should build the voltage table later */
if (bios) {
+ u8 ver, hdr, cnt, len;
+ struct nvbios_vmap vmap;
+
nvkm_volt_parse_bios(bios, volt);
nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
volt->min_uv, volt->max_uv);
+
+ if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) {
+ volt->max0_id = vmap.max0;
+ volt->max1_id = vmap.max1;
+ volt->max2_id = vmap.max2;
+ } else {
+ volt->max0_id = 0xff;
+ volt->max1_id = 0xff;
+ volt->max2_id = 0xff;
+ }
}
if (volt->vid_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
new file mode 100644
index 000000000000..d9ed6925ca64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf100_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+
+ if (!fuse)
+ return -EINVAL;
+
+ return nvkm_fuse_read(fuse, 0x1cc);
+}
+
+int
+gf100_volt_oneinit(struct nvkm_volt *volt)
+{
+ struct nvkm_subdev *subdev = &volt->subdev;
+ if (volt->speedo <= 0)
+ nvkm_error(subdev, "couldn't find speedo value, volting not "
+ "possible\n");
+ return 0;
+}
+
+static const struct nvkm_volt_func
+gf100_volt = {
+ .oneinit = gf100_volt_oneinit,
+ .vid_get = nvkm_voltgpio_get,
+ .vid_set = nvkm_voltgpio_set,
+ .speedo_read = gf100_volt_speedo_read,
+};
+
+int
+gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct nvkm_volt *volt;
+ int ret;
+
+ ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+ *pvolt = volt;
+ if (ret)
+ return ret;
+
+ return nvkm_voltgpio_init(volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 420bd84d8483..1c744e029454 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -27,6 +27,7 @@
#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/volt.h>
+#include <subdev/fuse.h>
#define gk104_volt(p) container_of((p), struct gk104_volt, base)
struct gk104_volt {
@@ -34,7 +35,7 @@ struct gk104_volt {
struct nvbios_volt bios;
};
-int
+static int
gk104_volt_get(struct nvkm_volt *base)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
@@ -47,7 +48,7 @@ gk104_volt_get(struct nvkm_volt *base)
return bios->base + bios->pwm_range * duty / div;
}
-int
+static int
gk104_volt_set(struct nvkm_volt *base, u32 uv)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
@@ -64,13 +65,33 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
return 0;
}
+static int
+gk104_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+ int ret;
+
+ if (!fuse)
+ return -EINVAL;
+
+ nvkm_wr32(device, 0x122634, 0x0);
+ ret = nvkm_fuse_read(fuse, 0x3a8);
+ nvkm_wr32(device, 0x122634, 0x41);
+ return ret;
+}
+
static const struct nvkm_volt_func
gk104_volt_pwm = {
+ .oneinit = gf100_volt_oneinit,
.volt_get = gk104_volt_get,
.volt_set = gk104_volt_set,
+ .speedo_read = gk104_volt_speedo_read,
}, gk104_volt_gpio = {
+ .oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
+ .speedo_read = gk104_volt_speedo_read,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 74db4d28930f..2925b9cae681 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -25,7 +25,7 @@
#include <core/tegra.h>
-const struct cvb_coef gm20b_cvb_coef[] = {
+static const struct cvb_coef gm20b_cvb_coef[] = {
/* KHz, c0, c1, c2 */
/* 76800 */ { 1786666, -85625, 1632 },
/* 153600 */ { 1846729, -87525, 1632 },
@@ -58,7 +58,7 @@ static const struct cvb_coef gm20b_na_cvb_coef[] = {
/* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
};
-const u32 speedo_to_vmin[] = {
+static const u32 speedo_to_vmin[] = {
/* 0, 1, 2, 3, 4, */
950000, 840000, 818750, 840000, 810000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index d2bac1d77819..443c031b966b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
+#include "priv.h"
static const u8 tags[] = {
DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index d5140d991161..354bafe4b4e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -9,11 +9,13 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
int index, struct nvkm_volt **);
struct nvkm_volt_func {
+ int (*oneinit)(struct nvkm_volt *);
int (*volt_get)(struct nvkm_volt *);
int (*volt_set)(struct nvkm_volt *, u32 uv);
int (*vid_get)(struct nvkm_volt *);
int (*vid_set)(struct nvkm_volt *, u8 vid);
int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+ int (*speedo_read)(struct nvkm_volt *);
};
int nvkm_voltgpio_init(struct nvkm_volt *);
@@ -23,4 +25,6 @@ int nvkm_voltgpio_set(struct nvkm_volt *, u8);
int nvkm_voltpwm_init(struct nvkm_volt *volt);
int nvkm_voltpwm_get(struct nvkm_volt *volt);
int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv);
+
+int gf100_volt_oneinit(struct nvkm_volt *);
#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 3485d1ecd655..aaa8a58390f1 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -24,23 +24,24 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
bool invert_polarity;
};
-static const struct omap_video_timings tvc_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+static const struct videomode tvc_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct of_device_id tvc_of_match[];
@@ -92,7 +93,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
if (!ddata->dev->of_node) {
in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
@@ -126,32 +127,32 @@ static void tvc_disable(struct omap_dss_device *dssdev)
}
static void tvc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void tvc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tvc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static u32 tvc_get_wss(struct omap_dss_device *dssdev)
@@ -253,14 +254,14 @@ static int tvc_probe(struct platform_device *pdev)
return -ENODEV;
}
- ddata->timings = tvc_pal_timings;
+ ddata->vm = tvc_pal_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = tvc_pal_timings;
+ dssdev->panel.vm = tvc_pal_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 684b7aeda411..d6875d9fcefa 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -19,32 +19,30 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings dvic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode dvic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 23500000,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
+ .hfront_porch = 48,
+ .hsync_len = 32,
+ .hback_porch = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
+ .vfront_porch = 3,
+ .vsync_len = 4,
+ .vback_porch = 7,
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
+ DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct i2c_adapter *i2c_adapter;
};
@@ -90,7 +88,7 @@ static int dvic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dvi->set_timings(in, &ddata->timings);
+ in->ops.dvi->set_timings(in, &ddata->vm);
r = in->ops.dvi->enable(in);
if (r)
@@ -115,32 +113,32 @@ static void dvic_disable(struct omap_dss_device *dssdev)
}
static void dvic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dvi->set_timings(in, timings);
+ in->ops.dvi->set_timings(in, vm);
}
static void dvic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int dvic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dvi->check_timings(in, timings);
+ return in->ops.dvi->check_timings(in, vm);
}
static int dvic_ddc_read(struct i2c_adapter *adapter,
@@ -287,14 +285,14 @@ static int dvic_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings = dvic_default_timings;
+ ddata->vm = dvic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &dvic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = dvic_default_timings;
+ dssdev->panel.vm = dvic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 7bdf83af9797..1ef130641bae 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -21,21 +21,18 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings hdmic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode hdmic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 25175000,
- .hsw = 96,
- .hfp = 16,
- .hbp = 48,
- .vsw = 2,
- .vfp = 11,
- .vbp = 31,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .interlace = false,
+ .hsync_len = 96,
+ .hfront_porch = 16,
+ .hback_porch = 48,
+ .vsync_len = 2,
+ .vfront_porch = 11,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
struct panel_drv_data {
@@ -44,7 +41,7 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
int hpd_gpio;
};
@@ -96,7 +93,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -123,32 +120,32 @@ static void hdmic_disable(struct omap_dss_device *dssdev)
}
static void hdmic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void hdmic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int hdmic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.hdmi->check_timings(in, timings);
+ return in->ops.hdmi->check_timings(in, vm);
}
static int hdmic_read_edid(struct omap_dss_device *dssdev,
@@ -259,14 +256,14 @@ static int hdmic_probe(struct platform_device *pdev)
goto err_reg;
}
- ddata->timings = hdmic_default_timings;
+ ddata->vm = hdmic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &hdmic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = hdmic_default_timings;
+ dssdev->panel.vm = hdmic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index fe4e7ec3bab0..f7a5731492d0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -27,7 +27,7 @@ struct panel_drv_data {
struct gpio_desc *enable_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -90,7 +90,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
r = in->ops.atv->enable(in);
if (r)
@@ -123,38 +123,38 @@ static void opa362_disable(struct omap_dss_device *dssdev)
}
static void opa362_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "set_timings\n");
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void opa362_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
dev_dbg(dssdev->dev, "get_timings\n");
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int opa362_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "check_timings\n");
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static void opa362_set_type(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index d768217cefe0..13e32d02c884 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -24,7 +24,7 @@ struct panel_drv_data {
int pd_gpio;
int data_lines;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -81,7 +81,7 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->timings);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
@@ -113,44 +113,43 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tfp410_fix_timings(struct omap_video_timings *timings)
+static void tfp410_fix_timings(struct videomode *vm)
{
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE;
}
static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static const struct omapdss_dvi_ops tfp410_dvi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 46855c8f5cbf..58276a48112e 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
struct gpio_desc *ls_oe_gpio;
struct gpio_desc *hpd_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -80,7 +80,7 @@ static int tpd_enable(struct omap_dss_device *dssdev)
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -105,33 +105,33 @@ static void tpd_disable(struct omap_dss_device *dssdev)
}
static void tpd_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void tpd_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tpd_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
- r = in->ops.hdmi->check_timings(in, timings);
+ r = in->ops.hdmi->check_timings(in, vm);
return r;
}
@@ -234,25 +234,30 @@ static int tpd_probe(struct platform_device *pdev)
if (r)
return r;
-
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->ct_cp_hpd_gpio = gpio;
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->ls_oe_gpio = gpio;
gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
GPIOD_IN);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->hpd_gpio = gpio;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7f16f985ab22..38003208d9ca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -28,7 +28,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
/* used for non-DT boot, to be removed */
int backlight_gpio;
@@ -80,7 +80,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -122,32 +122,32 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
}
static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver panel_dpi_ops = {
@@ -169,7 +169,6 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
const struct panel_dpi_platform_data *pdata;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev, *in;
- struct videomode vm;
int r;
pdata = dev_get_platdata(&pdev->dev);
@@ -185,8 +184,7 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
ddata->data_lines = pdata->data_lines;
- videomode_from_timing(pdata->display_timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(pdata->display_timing, &ddata->vm);
dssdev = &ddata->dssdev;
dssdev->name = pdata->name;
@@ -214,7 +212,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
struct omap_dss_device *in;
int r;
struct display_timing timing;
- struct videomode vm;
struct gpio_desc *gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
@@ -245,8 +242,7 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
return r;
}
- videomode_from_timing(&timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(&timing, &ddata->vm);
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
@@ -295,7 +291,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->driver = &panel_dpi_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index b1f3b818edf4..dc026a843712 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -42,7 +42,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct platform_device *pdev;
@@ -382,8 +382,8 @@ static const struct backlight_ops dsicm_bl_ops = {
static void dsicm_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
static ssize_t dsicm_num_errors_show(struct device *dev,
@@ -589,7 +589,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
struct omap_dss_dsi_config dsi_config = {
.mode = OMAP_DSS_DSI_CMD_MODE,
.pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .timings = &ddata->timings,
+ .vm = &ddata->vm,
.hs_clk_min = 150000000,
.hs_clk_max = 300000000,
.lp_clk_min = 7000000,
@@ -892,8 +892,8 @@ static int dsicm_update(struct omap_dss_device *dssdev,
/* XXX no need to send this every frame, but dsi break if not done */
r = dsicm_set_update_window(ddata, 0, 0,
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
+ dssdev->panel.vm.hactive,
+ dssdev->panel.vm.vactive);
if (r)
goto err;
@@ -1023,9 +1023,8 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
goto err1;
}
- size = min(w * h * 3,
- dssdev->panel.timings.x_res *
- dssdev->panel.timings.y_res * 3);
+ size = min((u32)w * h * 3,
+ dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3);
in->ops.dsi->bus_lock(in);
@@ -1186,14 +1185,14 @@ static int dsicm_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings.x_res = 864;
- ddata->timings.y_res = 480;
- ddata->timings.pixelclock = 864 * 480 * 60;
+ ddata->vm.hactive = 864;
+ ddata->vm.vactive = 480;
+ ddata->vm.pixelclock = 864 * 480 * 60;
dssdev = &ddata->dssdev;
dssdev->dev = dev;
dssdev->driver = &dsicm_ops;
- dssdev->panel.timings = ddata->timings;
+ dssdev->panel.vm = ddata->vm;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->owner = THIS_MODULE;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 6dfb96cea293..43d21edb51f5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -19,25 +19,28 @@
#include "../dss/omapdss.h"
-static struct omap_video_timings lb035q02_timings = {
- .x_res = 320,
- .y_res = 240,
+static struct videomode lb035q02_vm = {
+ .hactive = 320,
+ .vactive = 240,
.pixelclock = 6500000,
- .hsw = 2,
- .hfp = 20,
- .hbp = 68,
-
- .vsw = 2,
- .vfp = 4,
- .vbp = 18,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 20,
+ .hback_porch = 68,
+
+ .vsync_len = 2,
+ .vfront_porch = 4,
+ .vback_porch = 18,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DE is active LOW
+ * DATA needs to be driven on the FALLING edge
+ */
};
struct panel_drv_data {
@@ -48,7 +51,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *enable_gpio;
};
@@ -158,7 +161,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -189,32 +192,32 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
}
static void lb035q02_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int lb035q02_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver lb035q02_ops = {
@@ -278,14 +281,14 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = lb035q02_timings;
+ ddata->vm = lb035q02_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 9f3d6f48f3e1..2de27ba01552 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -23,7 +23,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -65,22 +65,20 @@ static const struct {
{ 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
};
-static const struct omap_video_timings nec_8048_panel_timings = {
- .x_res = LCD_XRES,
- .y_res = LCD_YRES,
+static const struct videomode nec_8048_panel_vm = {
+ .hactive = LCD_XRES,
+ .vactive = LCD_YRES,
.pixelclock = LCD_PIXEL_CLOCK,
- .hfp = 6,
- .hsw = 1,
- .hbp = 4,
- .vfp = 3,
- .vsw = 1,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 6,
+ .hsync_len = 1,
+ .hback_porch = 4,
+ .vfront_porch = 3,
+ .vsync_len = 1,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -157,7 +155,7 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -188,32 +186,32 @@ static void nec_8048_disable(struct omap_dss_device *dssdev)
}
static void nec_8048_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int nec_8048_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver nec_8048_ops = {
@@ -306,14 +304,14 @@ static int nec_8048_probe(struct spi_device *spi)
goto err_gpio;
}
- ddata->videomode = nec_8048_panel_timings;
+ ddata->vm = nec_8048_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 3d3efc561ea9..04fe235b7cac 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
struct gpio_desc *ini_gpio; /* high = power on */
@@ -35,25 +35,27 @@ struct panel_drv_data {
struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
};
-static const struct omap_video_timings sharp_ls_timings = {
- .x_res = 480,
- .y_res = 640,
+static const struct videomode sharp_ls_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 19200000,
- .hsw = 2,
- .hfp = 1,
- .hbp = 28,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 1,
+ .hback_porch = 28,
+
+ .vsync_len = 1,
+ .vfront_porch = 1,
+ .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DATA needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -99,7 +101,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
@@ -154,32 +156,32 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
}
static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver sharp_ls_ops = {
@@ -279,14 +281,14 @@ static int sharp_ls_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->videomode = sharp_ls_timings;
+ ddata->vm = sharp_ls_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
dssdev->driver = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 3557a4c7dd7b..746cb8d9cba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -71,7 +71,7 @@ struct panel_drv_data {
int reset_gpio;
int datapairs;
- struct omap_video_timings videomode;
+ struct videomode vm;
char *name;
int enabled;
@@ -92,23 +92,20 @@ struct panel_drv_data {
struct backlight_device *bl_dev;
};
-static const struct omap_video_timings acx565akm_panel_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode acx565akm_panel_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 24000000,
- .hfp = 28,
- .hsw = 4,
- .hbp = 24,
- .vfp = 3,
- .vsw = 3,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hfront_porch = 28,
+ .hsync_len = 4,
+ .hback_porch = 24,
+ .vfront_porch = 3,
+ .vsync_len = 3,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -548,7 +545,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- in->ops.sdi->set_timings(in, &ddata->videomode);
+ in->ops.sdi->set_timings(in, &ddata->vm);
if (ddata->datapairs > 0)
in->ops.sdi->set_datapairs(in, ddata->datapairs);
@@ -662,32 +659,32 @@ static void acx565akm_disable(struct omap_dss_device *dssdev)
}
static void acx565akm_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.sdi->set_timings(in, timings);
+ in->ops.sdi->set_timings(in, vm);
}
static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int acx565akm_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.sdi->check_timings(in, timings);
+ return in->ops.sdi->check_timings(in, vm);
}
static struct omap_dss_driver acx565akm_ops = {
@@ -845,14 +842,14 @@ static int acx565akm_probe(struct spi_device *spi)
acx565akm_bl_update_status(bldev);
- ddata->videomode = acx565akm_panel_timings;
+ ddata->vm = acx565akm_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index e859b3f893f7..f313dbfcbacb 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -37,28 +37,29 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct spi_device *spi_dev;
};
-static struct omap_video_timings td028ttec1_panel_timings = {
- .x_res = 480,
- .y_res = 640,
+static struct videomode td028ttec1_panel_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 22153000,
- .hfp = 24,
- .hsw = 8,
- .hbp = 8,
- .vfp = 4,
- .vsw = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 24,
+ .hsync_len = 8,
+ .hback_porch = 8,
+ .vfront_porch = 4,
+ .vsync_len = 2,
+ .vback_porch = 2,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define JBT_COMMAND 0x000
@@ -208,7 +209,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -325,32 +326,32 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
}
static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver td028ttec1_ops = {
@@ -414,14 +415,14 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = td028ttec1_panel_timings;
+ ddata->vm = td028ttec1_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 66c6bbe6472b..0787dba44faa 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -56,7 +56,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -72,25 +72,27 @@ struct panel_drv_data {
u32 power_on_resume:1;
};
-static const struct omap_video_timings tpo_td043_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode tpo_td043_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 36000000,
- .hsw = 1,
- .hfp = 68,
- .hbp = 214,
+ .hsync_len = 1,
+ .hfront_porch = 68,
+ .hback_porch = 214,
- .vsw = 1,
- .vfp = 39,
- .vbp = 34,
+ .vsync_len = 1,
+ .vfront_porch = 39,
+ .vback_porch = 34,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -378,7 +380,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -418,32 +420,32 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
}
static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver tpo_td043_ops = {
@@ -546,14 +548,14 @@ static int tpo_td043_probe(struct spi_device *spi)
goto err_sysfs;
}
- ddata->videomode = tpo_td043_timings;
+ ddata->vm = tpo_td043_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 535240fba671..c839f6456db2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -75,7 +75,7 @@ struct dispc_features {
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -1679,7 +1679,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
- bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
+ bool chroma_upscale = plane != OMAP_DSS_WB;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
@@ -2179,7 +2179,7 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
* undocumented horizontal position and timing related limitations.
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *t, u16 pos_x,
+ const struct videomode *vm, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height,
bool five_taps)
{
@@ -2189,14 +2189,16 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
u64 val, blank;
int i;
- nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+ nonactive = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch - out_width;
i = 0;
if (out_height < height)
i++;
if (out_width < width)
i++;
- blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+ blank = div_u64((u64)(vm->hback_porch + vm->hsync_len + vm->hfront_porch) *
+ lclk, pclk);
DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
if (blank <= limits[i])
return -EINVAL;
@@ -2231,7 +2233,7 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
}
static unsigned long calc_core_clk_five_taps(unsigned long pclk,
- const struct omap_video_timings *mgr_timings, u16 width,
+ const struct videomode *vm, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
@@ -2242,7 +2244,7 @@ static unsigned long calc_core_clk_five_taps(unsigned long pclk,
return (unsigned long) pclk;
if (height > out_height) {
- unsigned int ppl = mgr_timings->x_res;
+ unsigned int ppl = vm->hactive;
tmp = (u64)pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
@@ -2324,7 +2326,7 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
}
static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2370,7 +2372,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
}
static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2392,7 +2394,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
again:
if (*five_taps)
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ *core_clk = calc_core_clk_five_taps(pclk, vm,
in_width, in_height, out_width,
out_height, color_mode);
else
@@ -2400,7 +2402,7 @@ again:
in_height, out_width, out_height,
mem_to_mem);
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ error = check_horiz_timing_omap3(pclk, lclk, vm,
pos_x, in_width, in_height, out_width,
out_height, *five_taps);
if (error && *five_taps) {
@@ -2435,7 +2437,7 @@ again:
return -EINVAL;
}
- if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, in_width,
+ if (check_horiz_timing_omap3(pclk, lclk, vm, pos_x, in_width,
in_height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
@@ -2455,7 +2457,7 @@ again:
}
static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2501,7 +2503,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, u16 pos_x,
@@ -2515,7 +2517,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
- if (!mem_to_mem && (pclk == 0 || mgr_timings->pixelclock == 0)) {
+ if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
}
@@ -2551,7 +2553,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height,
+ ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
out_width, out_height, color_mode, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
@@ -2591,7 +2593,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 out_width, u16 out_height, enum omap_color_mode color_mode,
u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha,
u8 global_alpha, enum omap_dss_rotation_type rotation_type,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
bool five_taps = true;
@@ -2605,7 +2607,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
- bool ilace = mgr_timings->interlace;
+ bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(plane);
unsigned long lclk = dispc_plane_lclk_rate(plane);
@@ -2647,7 +2649,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width,
+ r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
in_height, out_width, out_height, color_mode,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
@@ -2784,7 +2786,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
int r;
@@ -2803,14 +2805,14 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, mgr_timings, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem);
return r;
}
EXPORT_SYMBOL(dispc_ovl_setup);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *mgr_timings)
+ bool mem_to_mem, const struct videomode *vm)
{
int r;
u32 l;
@@ -2819,8 +2821,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
const u8 zorder = 0, global_alpha = 0;
const bool replication = false;
bool truncation;
- int in_width = mgr_timings->x_res;
- int in_height = mgr_timings->y_res;
+ int in_width = vm->hactive;
+ int in_height = vm->vactive;
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
@@ -2833,7 +2835,7 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, mgr_timings, mem_to_mem);
+ replication, vm, mem_to_mem);
switch (wi->color_mode) {
case OMAP_DSS_COLOR_RGB16:
@@ -2867,8 +2869,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
} else {
int wbdelay;
- wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
- mgr_timings->vbp, 255);
+ wbdelay = min(vm->vfront_porch +
+ vm->vsync_len + vm->vback_porch, (u32)255);
/* WBDELAYCOUNT */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
@@ -3093,10 +3095,10 @@ static bool _dispc_mgr_size_ok(u16 width, u16 height)
height <= dispc.feat->mgr_height_max;
}
-static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
+static bool _dispc_lcd_timings_ok(int hsync_len, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
- if (hsw < 1 || hsw > dispc.feat->sw_max ||
+ if (hsync_len < 1 || hsync_len > dispc.feat->sw_max ||
hfp < 1 || hfp > dispc.feat->hp_max ||
hbp < 1 || hbp > dispc.feat->hp_max ||
vsw < 1 || vsw > dispc.feat->sw_max ||
@@ -3110,113 +3112,77 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+ return pclk <= dispc.feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk ? true : false;
+ return pclk <= dispc.feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
{
- if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res))
+ if (!_dispc_mgr_size_ok(vm->hactive, vm->vactive))
return false;
- if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock))
+ if (!_dispc_mgr_pclk_ok(channel, vm->pixelclock))
return false;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
- if (timings->interlace)
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return false;
- if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw, timings->vfp,
- timings->vbp))
+ if (!_dispc_lcd_timings_ok(vm->hsync_len,
+ vm->hfront_porch, vm->hback_porch,
+ vm->vsync_len, vm->vfront_porch,
+ vm->vback_porch))
return false;
}
return true;
}
-static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
- int hfp, int hbp, int vsw, int vfp, int vbp,
- enum omap_dss_signal_level vsync_level,
- enum omap_dss_signal_level hsync_level,
- enum omap_dss_signal_edge data_pclk_edge,
- enum omap_dss_signal_level de_level,
- enum omap_dss_signal_edge sync_pclk_edge)
-
+static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
+ const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
- timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(hfp-1, dispc.feat->fp_start, 8) |
- FLD_VAL(hbp-1, dispc.feat->bp_start, 20);
- timing_v = FLD_VAL(vsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(vfp, dispc.feat->fp_start, 8) |
- FLD_VAL(vbp, dispc.feat->bp_start, 20);
+ timing_h = FLD_VAL(vm->hsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->hfront_porch - 1, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->hback_porch - 1, dispc.feat->bp_start, 20);
+ timing_v = FLD_VAL(vm->vsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->vfront_porch, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->vback_porch, dispc.feat->bp_start, 20);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- switch (vsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- vs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
vs = false;
- break;
- default:
- BUG();
- }
+ else
+ vs = true;
- switch (hsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- hs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
hs = false;
- break;
- default:
- BUG();
- }
+ else
+ hs = true;
- switch (de_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- de = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
de = false;
- break;
- default:
- BUG();
- }
+ else
+ de = true;
- switch (data_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
ipc = false;
- break;
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
+ else
ipc = true;
- break;
- default:
- BUG();
- }
/* always use the 'rf' setting */
onoff = true;
- switch (sync_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
- rf = false;
- break;
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
rf = true;
- break;
- default:
- BUG();
- }
+ else
+ rf = false;
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
@@ -3253,13 +3219,13 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
/* change name to mode? */
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
unsigned xtot, ytot;
unsigned long ht, vt;
- struct omap_video_timings t = *timings;
+ struct videomode t = *vm;
- DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
+ DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
if (!dispc_mgr_timings_ok(channel, &t)) {
BUG();
@@ -3267,34 +3233,37 @@ void dispc_mgr_set_timings(enum omap_channel channel,
}
if (dss_mgr_is_lcd(channel)) {
- _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
- t.vfp, t.vbp, t.vsync_level, t.hsync_level,
- t.data_pclk_edge, t.de_level, t.sync_pclk_edge);
+ _dispc_mgr_set_lcd_timings(channel, &t);
- xtot = t.x_res + t.hfp + t.hsw + t.hbp;
- ytot = t.y_res + t.vfp + t.vsw + t.vbp;
+ xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
+ ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
- ht = timings->pixelclock / xtot;
- vt = timings->pixelclock / xtot / ytot;
+ ht = vm->pixelclock / xtot;
+ vt = vm->pixelclock / xtot / ytot;
- DSSDBG("pck %u\n", timings->pixelclock);
- DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
- t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
+ DSSDBG("pck %lu\n", vm->pixelclock);
+ DSSDBG("hsync_len %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ t.hsync_len, t.hfront_porch, t.hback_porch,
+ t.vsync_len, t.vfront_porch, t.vback_porch);
DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
- t.vsync_level, t.hsync_level, t.data_pclk_edge,
- t.de_level, t.sync_pclk_edge);
+ !!(t.flags & DISPLAY_FLAGS_VSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_HSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE),
+ !!(t.flags & DISPLAY_FLAGS_DE_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_SYNC_POSEDGE));
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
} else {
- if (t.interlace)
- t.y_res /= 2;
+ if (t.flags & DISPLAY_FLAGS_INTERLACED)
+ t.vactive /= 2;
if (dispc.feat->supports_double_pixel)
- REG_FLD_MOD(DISPC_CONTROL, t.double_pixel ? 1 : 0,
- 19, 17);
+ REG_FLD_MOD(DISPC_CONTROL,
+ !!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
+ 19, 17);
}
- dispc_mgr_set_size(channel, t.x_res, t.y_res);
+ dispc_mgr_set_size(channel, t.hactive, t.vactive);
}
EXPORT_SYMBOL(dispc_mgr_set_timings);
@@ -4214,23 +4183,20 @@ EXPORT_SYMBOL(dispc_free_irq);
*/
static const struct dispc_errata_i734_data {
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_overlay_info ovli;
struct omap_overlay_manager_info mgri;
struct dss_lcd_mgr_config lcd_conf;
} i734 = {
- .timings = {
- .x_res = 8, .y_res = 1,
+ .vm = {
+ .hactive = 8, .vactive = 1,
.pixelclock = 16000000,
- .hsw = 8, .hfp = 4, .hbp = 4,
- .vsw = 1, .vfp = 1, .vbp = 1,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .interlace = false,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .double_pixel = false,
+ .hsync_len = 8, .hfront_porch = 4, .hback_porch = 4,
+ .vsync_len = 1, .vfront_porch = 1, .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
},
.ovli = {
.screen_width = 1,
@@ -4320,7 +4286,7 @@ static void dispc_errata_i734_wa(void)
/* Setup and enable GFX plane */
dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.vm, false);
dispc_ovl_enable(OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
@@ -4328,7 +4294,7 @@ static void dispc_errata_i734_wa(void)
dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
&lcd_conf.clock_info);
dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
- dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+ dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.vm);
dispc_clear_irqstatus(framedone_irq);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8dcdd7cf9937..425a5a8dff8b 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -35,8 +35,8 @@
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
EXPORT_SYMBOL(omapdss_default_get_resolution);
@@ -72,9 +72,9 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = dssdev->panel.timings;
+ *vm = dssdev->panel.vm;
}
EXPORT_SYMBOL(omapdss_default_get_timings);
@@ -217,73 +217,3 @@ struct omap_dss_device *omap_dss_find_device(void *data,
return NULL;
}
EXPORT_SYMBOL(omap_dss_find_device);
-
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt)
-{
- memset(ovt, 0, sizeof(*ovt));
-
- ovt->pixelclock = vm->pixelclock;
- ovt->x_res = vm->hactive;
- ovt->hbp = vm->hback_porch;
- ovt->hfp = vm->hfront_porch;
- ovt->hsw = vm->hsync_len;
- ovt->y_res = vm->vactive;
- ovt->vbp = vm->vback_porch;
- ovt->vfp = vm->vfront_porch;
- ovt->vsw = vm->vsync_len;
-
- ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
- OMAPDSS_DRIVE_SIG_RISING_EDGE :
- OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- ovt->sync_pclk_edge = ovt->data_pclk_edge;
-}
-EXPORT_SYMBOL(videomode_to_omap_video_timings);
-
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm)
-{
- memset(vm, 0, sizeof(*vm));
-
- vm->pixelclock = ovt->pixelclock;
-
- vm->hactive = ovt->x_res;
- vm->hback_porch = ovt->hbp;
- vm->hfront_porch = ovt->hfp;
- vm->hsync_len = ovt->hsw;
- vm->vactive = ovt->y_res;
- vm->vback_porch = ovt->vbp;
- vm->vfront_porch = ovt->vfp;
- vm->vsync_len = ovt->vsw;
-
- if (ovt->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
-
- if (ovt->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
-
- if (ovt->de_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_DE_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_DE_LOW;
-
- if (ovt->data_pclk_edge == OMAPDSS_DRIVE_SIG_RISING_EDGE)
- vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else
- vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
-}
-EXPORT_SYMBOL(omap_video_timings_to_videomode);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b268295b76cf..e75162d26ac0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,7 +47,7 @@ struct dpi_data {
struct mutex lock;
- struct omap_video_timings timings;
+ struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
int data_lines;
@@ -333,31 +333,31 @@ static int dpi_set_mode(struct dpi_data *dpi)
{
struct omap_dss_device *out = &dpi->output;
enum omap_channel channel = out->dispc_channel;
- struct omap_video_timings *t = &dpi->timings;
+ struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
+ r = dpi_set_pll_clk(dpi, channel, vm->pixelclock, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
pck = fck / lck_div / pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
return 0;
}
@@ -476,7 +476,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -484,25 +484,25 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->timings = *timings;
+ dpi->vm = *vm;
mutex_unlock(&dpi->lock);
}
static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
mutex_lock(&dpi->lock);
- *timings = dpi->timings;
+ *vm = dpi->vm;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
enum omap_channel channel = dpi->output.dispc_channel;
@@ -512,23 +512,23 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (timings->x_res % 8 != 0)
+ if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -540,7 +540,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- timings->pixelclock = pck;
+ vm->pixelclock = pck;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index e1be5e795cd8..f060bda31235 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -289,7 +289,7 @@ struct dsi_clk_calc_ctx {
struct dss_pll_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
- struct omap_video_timings dispc_vm;
+ struct videomode vm;
struct omap_dss_dsi_videomode_timings dsi_vm;
};
@@ -383,7 +383,7 @@ struct dsi_data {
unsigned scp_clk_refcount;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format pix_fmt;
enum omap_dss_dsi_mode mode;
struct omap_dss_dsi_videomode_timings vm_timings;
@@ -3321,12 +3321,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
*/
- if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
+ if (dsi->line_buffer_size <= vm->hactive * bpp / 8)
num_line_buffers = 0;
else
num_line_buffers = 2;
@@ -3453,7 +3453,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
@@ -3494,7 +3494,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
exiths_clk = ths_exit + tclk_trail;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
if (!hsa_blanking_mode) {
@@ -3705,7 +3705,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
@@ -3713,7 +3713,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
t_he = hsync_end ?
((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
/* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
@@ -3722,7 +3722,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
hfp, hsync_end ? hsa : 0, tl);
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
- vsa, timings->y_res);
+ vsa, vm->vactive);
r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
@@ -3738,7 +3738,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
- r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
+ r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
}
@@ -3856,7 +3856,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
/* MODE, 1 = video mode */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
dsi_vc_write_long_header(dsidev, channel, data_type,
word_count, 0);
@@ -3918,8 +3918,8 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
int r;
const unsigned channel = dsi->update_channel;
const unsigned line_buf_size = dsi->line_buffer_size;
- u16 w = dsi->timings.x_res;
- u16 h = dsi->timings.y_res;
+ u16 w = dsi->vm.hactive;
+ u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
@@ -3969,7 +3969,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(dispc_channel, &dsi->timings);
+ dss_mgr_set_timings(dispc_channel, &dsi->vm);
dss_mgr_start_update(dispc_channel);
@@ -4056,8 +4056,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dw = dsi->timings.x_res;
- dh = dsi->timings.y_res;
+ dw = dsi->vm.hactive;
+ dh = dsi->vm.vactive;
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dw * dh *
@@ -4120,16 +4120,21 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
/*
* override interlace, logic level and edge related parameters in
- * omap_video_timings with default values
+ * videomode with default values
*/
- dsi->timings.interlace = false;
- dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(channel, &dsi->timings);
+ dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(channel, &dsi->vm);
r = dsi_configure_dispc_clocks(dsidev);
if (r)
@@ -4331,7 +4336,7 @@ static void print_dsi_vm(const char *str,
wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
- bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+ bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch;
tot = bl + pps;
#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
@@ -4340,14 +4345,14 @@ static void print_dsi_vm(const char *str,
"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
str,
byteclk,
- t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+ t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch,
bl, pps, tot,
TO_DSI_T(t->hss),
TO_DSI_T(t->hsa),
TO_DSI_T(t->hse),
TO_DSI_T(t->hbp),
TO_DSI_T(pps),
- TO_DSI_T(t->hfp),
+ TO_DSI_T(t->hfront_porch),
TO_DSI_T(bl),
TO_DSI_T(pps),
@@ -4356,13 +4361,13 @@ static void print_dsi_vm(const char *str,
#undef TO_DSI_T
}
-static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+static void print_dispc_vm(const char *str, const struct videomode *vm)
{
- unsigned long pck = t->pixelclock;
+ unsigned long pck = vm->pixelclock;
int hact, bl, tot;
- hact = t->x_res;
- bl = t->hsw + t->hbp + t->hfp;
+ hact = vm->hactive;
+ bl = vm->hsync_len + vm->hbp + vm->hfront_porch;
tot = hact + bl;
#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
@@ -4371,12 +4376,12 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
"%u/%u/%u/%u = %u + %u = %u\n",
str,
pck,
- t->hsw, t->hbp, hact, t->hfp,
+ vm->hsync_len, vm->hbp, hact, vm->hfront_porch,
bl, hact, tot,
- TO_DISPC_T(t->hsw),
- TO_DISPC_T(t->hbp),
+ TO_DISPC_T(vm->hsync_len),
+ TO_DISPC_T(vm->hbp),
TO_DISPC_T(hact),
- TO_DISPC_T(t->hfp),
+ TO_DISPC_T(vm->hfront_porch),
TO_DISPC_T(bl),
TO_DISPC_T(hact),
TO_DISPC_T(tot));
@@ -4387,7 +4392,7 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
static void print_dsi_dispc_vm(const char *str,
const struct omap_dss_dsi_videomode_timings *t)
{
- struct omap_video_timings vm = { 0 };
+ struct videomode vm = { 0 };
unsigned long byteclk = t->hsclk / 4;
unsigned long pck;
u64 dsi_tput;
@@ -4396,13 +4401,13 @@ static void print_dsi_dispc_vm(const char *str,
dsi_tput = (u64)byteclk * t->ndl * 8;
pck = (u32)div64_u64(dsi_tput, t->bitspp);
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
- dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+ dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch;
vm.pixelclock = pck;
- vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+ vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
- vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
- vm.x_res = t->hact;
+ vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk);
+ vm.hactive = t->hact;
print_dispc_vm(str, &vm);
}
@@ -4412,19 +4417,19 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct omap_video_timings *t = &ctx->dispc_vm;
+ struct videomode *vm = &ctx->vm;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
- *t = *ctx->config->timings;
- t->pixelclock = pck;
- t->x_res = ctx->config->timings->x_res;
- t->y_res = ctx->config->timings->y_res;
- t->hsw = t->hfp = t->hbp = t->vsw = 1;
- t->vfp = t->vbp = 0;
+ *vm = *ctx->config->vm;
+ vm->pixelclock = pck;
+ vm->hactive = ctx->config->vm->hactive;
+ vm->vactive = ctx->config->vm->vactive;
+ vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1;
+ vm->vfront_porch = vm->vback_porch = 0;
return true;
}
@@ -4475,7 +4480,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
* especially as we go to LP between each pixel packet due to HW
* "feature". So let's just estimate very roughly and multiply by 1.5.
*/
- pck = cfg->timings->pixelclock;
+ pck = cfg->vm->pixelclock;
pck = pck * 3 / 2;
txbyteclk = pck * bitspp / 8 / ndl;
@@ -4510,14 +4515,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
int dispc_htot, dispc_hbl; /* pixels */
int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
int hfp, hsa, hbp;
- const struct omap_video_timings *req_vm;
- struct omap_video_timings *dispc_vm;
+ const struct videomode *req_vm;
+ struct videomode *dispc_vm;
struct omap_dss_dsi_videomode_timings *dsi_vm;
u64 dsi_tput, dispc_tput;
dsi_tput = (u64)byteclk * ndl * 8;
- req_vm = cfg->timings;
+ req_vm = cfg->vm;
req_pck_min = ctx->req_pck_min;
req_pck_max = ctx->req_pck_max;
req_pck_nom = ctx->req_pck_nom;
@@ -4525,9 +4530,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dispc_pck = ctx->dispc_cinfo.pck;
dispc_tput = (u64)dispc_pck * bitspp;
- xres = req_vm->x_res;
+ xres = req_vm->hactive;
- panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+ panel_hbl = req_vm->hfront_porch + req_vm->hback_porch +
+ req_vm->hsync_len;
panel_htot = xres + panel_hbl;
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
@@ -4557,7 +4563,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
hss = DIV_ROUND_UP(4, ndl);
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- if (ndl == 3 && req_vm->hsw == 0)
+ if (ndl == 3 && req_vm->hsync_len == 0)
hse = 1;
else
hse = DIV_ROUND_UP(4, ndl);
@@ -4596,14 +4602,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
hsa = 0;
- } else if (ndl == 3 && req_vm->hsw == 0) {
+ } else if (ndl == 3 && req_vm->hsync_len == 0) {
hsa = 0;
} else {
- hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+ hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom);
hsa = max(hsa - hse, 1);
}
- hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom);
hbp = max(hbp, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
@@ -4633,10 +4639,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hact = xres;
dsi_vm->hfp = hfp;
- dsi_vm->vsa = req_vm->vsw;
- dsi_vm->vbp = req_vm->vbp;
- dsi_vm->vact = req_vm->y_res;
- dsi_vm->vfp = req_vm->vfp;
+ dsi_vm->vsa = req_vm->vsync_len;
+ dsi_vm->vbp = req_vm->vback_porch;
+ dsi_vm->vact = req_vm->vactive;
+ dsi_vm->vfp = req_vm->vfront_porch;
dsi_vm->trans_mode = cfg->trans_mode;
@@ -4650,19 +4656,19 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
/* setup DISPC videomode */
- dispc_vm = &ctx->dispc_vm;
+ dispc_vm = &ctx->vm;
*dispc_vm = *req_vm;
dispc_vm->pixelclock = dispc_pck;
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+ hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck,
req_pck_nom);
hsa = max(hsa, 1);
} else {
hsa = 1;
}
- hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom);
hbp = max(hbp, 1);
hfp = dispc_hbl - hsa - hbp;
@@ -4685,9 +4691,9 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (hfp < 1)
return false;
- dispc_vm->hfp = hfp;
- dispc_vm->hsw = hsa;
- dispc_vm->hbp = hbp;
+ dispc_vm->hfront_porch = hfp;
+ dispc_vm->hsync_len = hsa;
+ dispc_vm->hback_porch = hbp;
return true;
}
@@ -4707,9 +4713,9 @@ static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return false;
#ifdef PRINT_VERBOSE_VM_TIMINGS
- print_dispc_vm("dispc", &ctx->dispc_vm);
+ print_dispc_vm("dispc", &ctx->vm);
print_dsi_vm("dsi ", &ctx->dsi_vm);
- print_dispc_vm("req ", ctx->config->timings);
+ print_dispc_vm("req ", ctx->config->vm);
print_dsi_dispc_vm("act ", &ctx->dsi_vm);
#endif
@@ -4758,7 +4764,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
const struct omap_dss_dsi_config *cfg,
struct dsi_clk_calc_ctx *ctx)
{
- const struct omap_video_timings *t = cfg->timings;
+ const struct videomode *vm = cfg->vm;
unsigned long clkin;
unsigned long pll_min;
unsigned long pll_max;
@@ -4774,9 +4780,9 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
ctx->config = cfg;
/* these limits should come from the panel driver */
- ctx->req_pck_min = t->pixelclock - 1000;
- ctx->req_pck_nom = t->pixelclock;
- ctx->req_pck_max = t->pixelclock + 1000;
+ ctx->req_pck_min = vm->pixelclock - 1000;
+ ctx->req_pck_nom = vm->pixelclock;
+ ctx->req_pck_max = vm->pixelclock + 1000;
byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
@@ -4833,7 +4839,7 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
- dsi->timings = ctx.dispc_vm;
+ dsi->vm = ctx.vm;
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
@@ -5342,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->phy_base) {
DSSERR("can't ioremap DSI PHY\n");
return -ENOMEM;
}
@@ -5362,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->pll_base) {
DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 4fd06dc41cb3..56493b290731 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -366,8 +366,7 @@ bool dispc_div_calc(unsigned long dispc,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
@@ -390,7 +389,7 @@ void dispc_wb_enable(bool enable);
bool dispc_wb_is_enabled(void);
void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *timings);
+ bool mem_to_mem, const struct videomode *vm);
/* VENC */
int venc_init_platform_driver(void) __init;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 63e711545865..fb6cccd02374 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -181,7 +181,7 @@ struct hdmi_video_format {
};
struct hdmi_config {
- struct omap_video_timings timings;
+ struct videomode vm;
struct hdmi_avi_infoframe infoframe;
enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
};
@@ -298,11 +298,11 @@ int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
struct hdmi_video_format *video_fmt);
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param);
+ struct videomode *vm, struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cbd28dfdb86a..e7162c16de2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -155,7 +155,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct hdmi_wp_data *wp = &hdmi.wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
@@ -169,12 +169,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_wp_clear_irqenable(wp, 0xffffffff);
hdmi_wp_set_irqstatus(wp, 0xffffffff);
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -209,7 +210,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -255,30 +256,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -352,7 +353,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -643,7 +644,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
hd->audio_config = *dss_audio;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ef3afe99e487..e05b7ac4f7dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -310,7 +310,7 @@ void hdmi4_configure(struct hdmi_core_data *core,
struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
/* HDMI core */
struct hdmi_core_video_config v_core_cfg;
@@ -318,16 +318,16 @@ void hdmi4_configure(struct hdmi_core_data *core,
hdmi_core_init(&v_core_cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/*
* configure core video part
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c0a5139a301..678dfb02764a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -172,7 +172,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned pc;
@@ -181,12 +181,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
return r;
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -226,7 +227,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -272,30 +273,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -378,7 +379,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -669,7 +670,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8ab2093daa12..8de1d7b2ae55 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -292,35 +292,35 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
{
DSSDBG("hdmi_core_init\n");
- video_cfg->v_fc_config.timings = cfg->timings;
+ video_cfg->v_fc_config.vm = cfg->vm;
/* video core */
video_cfg->data_enable_pol = 1; /* It is always 1*/
- video_cfg->hblank = cfg->timings.hfp +
- cfg->timings.hbp + cfg->timings.hsw;
+ video_cfg->hblank = cfg->vm.hfront_porch +
+ cfg->vm.hback_porch + cfg->vm.hsync_len;
video_cfg->vblank_osc = 0;
- video_cfg->vblank = cfg->timings.vsw +
- cfg->timings.vfp + cfg->timings.vbp;
+ video_cfg->vblank = cfg->vm.vsync_len + cfg->vm.vfront_porch +
+ cfg->vm.vback_porch;
video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
- if (cfg->timings.interlace) {
+ if (cfg->vm.flags & DISPLAY_FLAGS_INTERLACED) {
/* set vblank_osc if vblank is fractional */
if (video_cfg->vblank % 2 != 0)
video_cfg->vblank_osc = 1;
- video_cfg->v_fc_config.timings.y_res /= 2;
+ video_cfg->v_fc_config.vm.vactive /= 2;
video_cfg->vblank /= 2;
- video_cfg->v_fc_config.timings.vfp /= 2;
- video_cfg->v_fc_config.timings.vsw /= 2;
- video_cfg->v_fc_config.timings.vbp /= 2;
+ video_cfg->v_fc_config.vm.vfront_porch /= 2;
+ video_cfg->v_fc_config.vm.vsync_len /= 2;
+ video_cfg->v_fc_config.vm.vback_porch /= 2;
}
- if (cfg->timings.double_pixel) {
- video_cfg->v_fc_config.timings.x_res *= 2;
+ if (cfg->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
+ video_cfg->v_fc_config.vm.hactive *= 2;
video_cfg->hblank *= 2;
- video_cfg->v_fc_config.timings.hfp *= 2;
- video_cfg->v_fc_config.timings.hsw *= 2;
- video_cfg->v_fc_config.timings.hbp *= 2;
+ video_cfg->v_fc_config.vm.hfront_porch *= 2;
+ video_cfg->v_fc_config.vm.hsync_len *= 2;
+ video_cfg->v_fc_config.vm.hback_porch *= 2;
}
}
@@ -329,13 +329,12 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
+ struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
- vsync_pol =
- cfg->v_fc_config.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol =
- cfg->v_fc_config.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
/* Set hsync, vsync and data-enable polarity */
r = hdmi_read_reg(base, HDMI_CORE_FC_INVIDCONF);
@@ -343,20 +342,16 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
r = FLD_MOD(r, hsync_pol, 5, 5);
r = FLD_MOD(r, cfg->data_enable_pol, 4, 4);
r = FLD_MOD(r, cfg->vblank_osc, 1, 1);
- r = FLD_MOD(r, cfg->v_fc_config.timings.interlace, 0, 0);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 0, 0);
hdmi_write_reg(base, HDMI_CORE_FC_INVIDCONF, r);
/* set x resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1,
- cfg->v_fc_config.timings.x_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0,
- cfg->v_fc_config.timings.x_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1, vm->hactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0, vm->hactive & 0xFF, 7, 0);
/* set y resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1,
- cfg->v_fc_config.timings.y_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0,
- cfg->v_fc_config.timings.y_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1, vm->vactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0, vm->vactive & 0xFF, 7, 0);
/* set horizontal blanking pixels */
REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK1, cfg->hblank >> 8, 4, 0);
@@ -366,30 +361,28 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_INVBLANK, cfg->vblank, 7, 0);
/* set horizontal sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1,
- cfg->v_fc_config.timings.hfp >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0,
- cfg->v_fc_config.timings.hfp & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1, vm->hfront_porch >> 8,
+ 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0, vm->hfront_porch & 0xFF,
+ 7, 0);
/* set vertical sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY,
- cfg->v_fc_config.timings.vfp, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY, vm->vfront_porch, 7, 0);
/* set horizontal sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1,
- (cfg->v_fc_config.timings.hsw >> 8), 1, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0,
- cfg->v_fc_config.timings.hsw & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1, (vm->hsync_len >> 8),
+ 1, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0, vm->hsync_len & 0xFF,
+ 7, 0);
/* set vertical sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH,
- cfg->v_fc_config.timings.vsw, 5, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH, vm->vsync_len, 5, 0);
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
- cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
+ cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
- if (cfg->v_fc_config.timings.double_pixel)
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
else
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
@@ -616,7 +609,7 @@ int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
@@ -624,16 +617,16 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_init(&v_core_cfg, cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/* support limited range with 24 bit color depth for now */
hdmi_core_configure_range(core);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 203694a52d18..b783d5a0750e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -144,87 +144,84 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 r;
bool vsync_pol, hsync_pol;
DSSDBG("Enter hdmi_wp_video_config_interface\n");
- vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
r = FLD_MOD(r, vsync_pol, 7, 7);
r = FLD_MOD(r, hsync_pol, 6, 6);
- r = FLD_MOD(r, timings->interlace, 3, 3);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
- unsigned hsw_offset = 1;
+ unsigned hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
/*
* On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
- * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsw-1.
+ * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsync_len-1.
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4)
- hsw_offset = 0;
+ hsync_len_offset = 0;
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw - hsw_offset, 7, 0);
+ timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
+ timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
+ timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
+ timing_v |= FLD_VAL(vm->vfront_porch, 19, 8);
+ timing_v |= FLD_VAL(vm->vsync_len, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
+ struct videomode *vm, struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = param->timings.y_res;
- video_fmt->x_res = param->timings.x_res;
-
- timings->hbp = param->timings.hbp;
- timings->hfp = param->timings.hfp;
- timings->hsw = param->timings.hsw;
- timings->vbp = param->timings.vbp;
- timings->vfp = param->timings.vfp;
- timings->vsw = param->timings.vsw;
-
- timings->vsync_level = param->timings.vsync_level;
- timings->hsync_level = param->timings.hsync_level;
- timings->interlace = param->timings.interlace;
- timings->double_pixel = param->timings.double_pixel;
-
- if (param->timings.interlace) {
+ video_fmt->y_res = param->vm.vactive;
+ video_fmt->x_res = param->vm.hactive;
+
+ vm->hback_porch = param->vm.hback_porch;
+ vm->hfront_porch = param->vm.hfront_porch;
+ vm->hsync_len = param->vm.hsync_len;
+ vm->vback_porch = param->vm.vback_porch;
+ vm->vfront_porch = param->vm.vfront_porch;
+ vm->vsync_len = param->vm.vsync_len;
+
+ vm->flags = param->vm.flags;
+
+ if (param->vm.flags & DISPLAY_FLAGS_INTERLACED) {
video_fmt->y_res /= 2;
- timings->vbp /= 2;
- timings->vfp /= 2;
- timings->vsw /= 2;
+ vm->vback_porch /= 2;
+ vm->vfront_porch /= 2;
+ vm->vsync_len /= 2;
}
- if (param->timings.double_pixel) {
+ if (param->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
video_fmt->x_res *= 2;
- timings->hfp *= 2;
- timings->hsw *= 2;
- timings->hbp *= 2;
+ vm->hfront_porch *= 2;
+ vm->hsync_len *= 2;
+ vm->hback_porch *= 2;
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 6eaf1adbd606..b420dde8c0fb 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -290,7 +290,7 @@ struct omap_dss_dsi_videomode_timings {
struct omap_dss_dsi_config {
enum omap_dss_dsi_mode mode;
enum omap_dss_dsi_pixel_format pixel_format;
- const struct omap_video_timings *timings;
+ const struct videomode *vm;
unsigned long hs_clk_min, hs_clk_max;
unsigned long lp_clk_min, lp_clk_max;
@@ -299,48 +299,12 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-struct omap_video_timings {
- /* Unit: pixels */
- u16 x_res;
- /* Unit: pixels */
- u16 y_res;
- /* Unit: Hz */
- u32 pixelclock;
- /* Unit: pixel clocks */
- u16 hsw; /* Horizontal synchronization pulse width */
- /* Unit: pixel clocks */
- u16 hfp; /* Horizontal front porch */
- /* Unit: pixel clocks */
- u16 hbp; /* Horizontal back porch */
- /* Unit: line clocks */
- u16 vsw; /* Vertical synchronization pulse width */
- /* Unit: line clocks */
- u16 vfp; /* Vertical front porch */
- /* Unit: line clocks */
- u16 vbp; /* Vertical back porch */
-
- /* Vsync logic level */
- enum omap_dss_signal_level vsync_level;
- /* Hsync logic level */
- enum omap_dss_signal_level hsync_level;
- /* Interlaced or Progressive timings */
- bool interlace;
- /* Pixel clock edge to drive LCD data */
- enum omap_dss_signal_edge data_pclk_edge;
- /* Data enable logic level */
- enum omap_dss_signal_level de_level;
- /* Pixel clock edges to drive HSYNC and VSYNC signals */
- enum omap_dss_signal_edge sync_pclk_edge;
-
- bool double_pixel;
-};
-
-/* Hardcoded timings for tv modes. Venc only uses these to
+/* Hardcoded videomodes for tv. Venc only uses these to
* identify the mode, and does not actually use the configs
* itself. However, the configs should be something that
* a normal monitor can also show */
-extern const struct omap_video_timings omap_dss_pal_timings;
-extern const struct omap_video_timings omap_dss_ntsc_timings;
+extern const struct videomode omap_dss_pal_vm;
+extern const struct videomode omap_dss_ntsc_vm;
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
@@ -502,11 +466,11 @@ struct omapdss_dpi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
};
@@ -521,11 +485,11 @@ struct omapdss_sdi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
};
@@ -540,11 +504,11 @@ struct omapdss_dvi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
};
struct omapdss_atv_ops {
@@ -557,11 +521,11 @@ struct omapdss_atv_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_type)(struct omap_dss_device *dssdev,
enum omap_dss_venc_type type);
@@ -582,11 +546,11 @@ struct omapdss_hdmi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -692,7 +656,7 @@ struct omap_dss_device {
} phy;
struct {
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format dsi_pix_fmt;
enum omap_dss_dsi_mode dsi_mode;
@@ -785,11 +749,11 @@ struct omap_dss_driver {
int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
@@ -819,11 +783,6 @@ struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
const char *omapdss_get_default_display_name(void);
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt);
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm);
-
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
@@ -852,7 +811,7 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres);
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -906,7 +865,7 @@ void dispc_mgr_go(enum omap_channel channel);
void dispc_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dispc_mgr_setup(enum omap_channel channel,
const struct omap_overlay_manager_info *info);
u32 dispc_mgr_gamma_size(enum omap_channel channel);
@@ -919,8 +878,7 @@ bool dispc_ovl_enabled(enum omap_plane plane);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
+ bool replication, const struct videomode *vm, bool mem_to_mem);
enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel);
@@ -934,7 +892,7 @@ struct dss_mgr_ops {
int (*enable)(enum omap_channel channel);
void (*disable)(enum omap_channel channel);
void (*set_timings)(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void (*set_lcd_config)(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int (*register_framedone_handler)(enum omap_channel channel,
@@ -951,7 +909,7 @@ int dss_mgr_connect(enum omap_channel channel,
void dss_mgr_disconnect(enum omap_channel channel,
struct omap_dss_device *dst);
void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dss_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int dss_mgr_enable(enum omap_channel channel);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 24f859488201..a901af5a9bc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -201,10 +201,9 @@ void dss_mgr_disconnect(enum omap_channel channel,
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+void dss_mgr_set_timings(enum omap_channel channel, const struct videomode *vm)
{
- dss_mgr_ops->set_timings(channel, timings);
+ dss_mgr_ops->set_timings(channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index cd53566d75eb..09724757366a 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -113,7 +113,7 @@ static struct {
struct semaphore bus_lock;
- struct omap_video_timings timings;
+ struct videomode vm;
int pixel_size;
int data_lines;
struct rfbi_timings intf_timings;
@@ -308,15 +308,15 @@ static int rfbi_transfer_area(struct omap_dss_device *dssdev,
u32 l;
int r;
struct omap_overlay_manager *mgr = rfbi.output.manager;
- u16 width = rfbi.timings.x_res;
- u16 height = rfbi.timings.y_res;
+ u16 width = rfbi.vm.hactive;
+ u16 height = rfbi.vm.vactive;
/*BUG_ON(callback == 0);*/
BUG_ON(rfbi.framedone_callback != NULL);
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ dss_mgr_set_timings(mgr, &rfbi.vm);
r = dss_mgr_enable(mgr);
if (r)
@@ -777,8 +777,8 @@ static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
{
- rfbi.timings.x_res = w;
- rfbi.timings.y_res = h;
+ rfbi.vm.hactive = w;
+ rfbi.vm.vactive = h;
}
static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
@@ -854,25 +854,30 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
dss_mgr_set_lcd_config(mgr, &mgr_config);
/*
- * Set rfbi.timings with default values, the x_res and y_res fields
+ * Set rfbi.timings with default values, the hactive and vactive fields
* are expected to be already configured by the panel driver via
* omapdss_rfbi_set_size()
*/
- rfbi.timings.hsw = 1;
- rfbi.timings.hfp = 1;
- rfbi.timings.hbp = 1;
- rfbi.timings.vsw = 1;
- rfbi.timings.vfp = 0;
- rfbi.timings.vbp = 0;
-
- rfbi.timings.interlace = false;
- rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ rfbi.vm.hsync_len = 1;
+ rfbi.vm.hfront_porch = 1;
+ rfbi.vm.hback_porch = 1;
+ rfbi.vm.vsync_len = 1;
+ rfbi.vm.vfront_porch = 0;
+ rfbi.vm.vback_porch = 0;
+
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(mgr, &rfbi.vm);
}
static int rfbi_display_enable(struct omap_dss_device *dssdev)
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 0a96c321ce62..b3bda2d3c08d 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -39,7 +39,7 @@ static struct {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
int datapairs;
struct omap_dss_device output;
@@ -131,7 +131,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
enum omap_channel channel = dssdev->dispc_channel;
- struct omap_video_timings *t = &sdi.timings;
+ struct videomode *vm = &sdi.vm;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
@@ -151,10 +151,9 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err_get_dispc;
/* 15.5.9.1.2 */
- t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
- r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(vm->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -162,15 +161,15 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
r = dss_set_fck_rate(fck);
if (r)
@@ -229,26 +228,26 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- sdi.timings = *timings;
+ sdi.vm = *vm;
}
static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = sdi.timings;
+ *vm = sdi.vm;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
enum omap_channel channel = dssdev->dispc_channel;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 6eedf2118708..d74f7fcc2e46 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -262,47 +262,41 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
-const struct omap_video_timings omap_dss_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+const struct videomode omap_dss_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_pal_timings);
+EXPORT_SYMBOL(omap_dss_pal_vm);
-const struct omap_video_timings omap_dss_ntsc_timings = {
- .x_res = 720,
- .y_res = 482,
+const struct videomode omap_dss_ntsc_vm = {
+ .hactive = 720,
+ .vactive = 482,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 16,
- .hbp = 58,
- .vsw = 6,
- .vfp = 6,
- .vbp = 31,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 16,
+ .hback_porch = 58,
+ .vsync_len = 6,
+ .vfront_porch = 6,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_ntsc_timings);
+EXPORT_SYMBOL(omap_dss_ntsc_vm);
static struct {
struct platform_device *pdev;
@@ -313,7 +307,7 @@ static struct {
struct clk *tv_dac_clk;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_venc_type type;
bool invert_polarity;
@@ -427,13 +421,12 @@ static void venc_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(
- struct omap_video_timings *timings)
+static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return &venc_config_pal_trm;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return &venc_config_ntsc_trm;
BUG();
@@ -451,7 +444,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
goto err0;
venc_reset();
- venc_write_config(venc_timings_to_config(&venc.timings));
+ venc_write_config(venc_timings_to_config(&venc.vm));
dss_set_venc_output(venc.type);
dss_set_dac_pwrdn_bgz(1);
@@ -468,7 +461,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(channel, &venc.timings);
+ dss_mgr_set_timings(channel, &venc.vm);
r = regulator_enable(venc.vdda_dac_reg);
if (r)
@@ -546,17 +539,17 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.timings, timings, sizeof(*timings)))
+ if (memcmp(&venc.vm, vm, sizeof(*vm)))
venc.wss_data = 0;
- venc.timings = *timings;
+ venc.vm = *vm;
dispc_set_tv_pclk(13500000);
@@ -564,25 +557,25 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return 0;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return 0;
return -EINVAL;
}
static void venc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&venc.venc_lock);
- *timings = venc.timings;
+ *vm = venc.vm;
mutex_unlock(&venc.venc_lock);
}
@@ -602,7 +595,7 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
mutex_lock(&venc.venc_lock);
- config = venc_timings_to_config(&venc.timings);
+ config = venc_timings_to_config(&venc.vm);
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 137fe690a0da..2580e8673908 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -42,73 +42,6 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
return omap_connector->hdmi_mode;
}
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings)
-{
- mode->clock = timings->pixelclock / 1000;
-
- mode->hdisplay = timings->x_res;
- mode->hsync_start = mode->hdisplay + timings->hfp;
- mode->hsync_end = mode->hsync_start + timings->hsw;
- mode->htotal = mode->hsync_end + timings->hbp;
-
- mode->vdisplay = timings->y_res;
- mode->vsync_start = mode->vdisplay + timings->vfp;
- mode->vsync_end = mode->vsync_start + timings->vsw;
- mode->vtotal = mode->vsync_end + timings->vbp;
-
- mode->flags = 0;
-
- if (timings->interlace)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (timings->double_pixel)
- mode->flags |= DRM_MODE_FLAG_DBLCLK;
-
- if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NVSYNC;
-}
-
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode)
-{
- timings->pixelclock = mode->clock * 1000;
-
- timings->x_res = mode->hdisplay;
- timings->hfp = mode->hsync_start - mode->hdisplay;
- timings->hsw = mode->hsync_end - mode->hsync_start;
- timings->hbp = mode->htotal - mode->hsync_end;
-
- timings->y_res = mode->vdisplay;
- timings->vfp = mode->vsync_start - mode->vdisplay;
- timings->vsw = mode->vsync_end - mode->vsync_start;
- timings->vbp = mode->vtotal - mode->vsync_end;
-
- timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
- timings->double_pixel = !!(mode->flags & DRM_MODE_FLAG_DBLCLK);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
@@ -185,11 +118,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
- dssdrv->get_timings(dssdev, &timings);
+ dssdrv->get_timings(dssdev, &vm);
- copy_timings_omap_to_drm(mode, &timings);
+ drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
@@ -207,12 +140,14 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
int r, ret = MODE_BAD;
- copy_timings_drm_to_omap(&timings, mode);
+ drm_display_mode_to_videomode(mode, &vm);
+ vm.flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
mode->vrefresh = drm_mode_vrefresh(mode);
/*
@@ -221,13 +156,13 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
* panel's timings
*/
if (dssdrv->check_timings) {
- r = dssdrv->check_timings(dssdev, &timings);
+ r = dssdrv->check_timings(dssdev, &vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(&vm, &t, sizeof(struct videomode)))
r = -EINVAL;
else
r = 0;
@@ -236,7 +171,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
- new_mode->clock = timings.pixelclock / 1000;
+ new_mode->clock = vm.pixelclock / 1000;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
ret = MODE_OK;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 180f644e861e..8dea89030e66 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -34,7 +34,7 @@ struct omap_crtc {
const char *name;
enum omap_channel channel;
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_drm_irq vblank_irq;
struct omap_drm_irq error_irq;
@@ -56,10 +56,10 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
+ return &omap_crtc->vm;
}
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
@@ -201,7 +201,7 @@ static int omap_crtc_dss_enable(enum omap_channel channel)
dispc_mgr_setup(omap_crtc->channel, &info);
dispc_mgr_set_timings(omap_crtc->channel,
- &omap_crtc->timings);
+ &omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
@@ -215,11 +215,11 @@ static void omap_crtc_dss_disable(enum omap_channel channel)
}
static void omap_crtc_dss_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
DBG("%s", omap_crtc->name);
- omap_crtc->timings = *timings;
+ omap_crtc->vm = *vm;
}
static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
@@ -369,7 +369,10 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ drm_display_mode_to_videomode(mode, &omap_crtc->vm);
+ omap_crtc->vm.flags |= DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -411,19 +414,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
}
- if (crtc->state->color_mgmt_changed) {
- struct drm_color_lut *lut = NULL;
- uint length = 0;
-
- if (crtc->state->gamma_lut) {
- lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
- sizeof(*lut);
- }
- dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
- }
-
if (dispc_mgr_is_enabled(omap_crtc->channel)) {
DBG("%s: GO", omap_crtc->name);
@@ -438,13 +428,14 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static bool omap_crtc_is_plane_prop(struct drm_device *dev,
+static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
struct drm_property *property)
{
+ struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
return property == priv->zorder_prop ||
- property == dev->mode_config.rotation_property;
+ property == crtc->primary->rotation_property;
}
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
@@ -452,9 +443,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
struct drm_plane_state *plane_state;
struct drm_plane *plane = crtc->primary;
@@ -479,9 +468,7 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t *val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
/*
* Delegate property get to the primary plane. The
* drm_atomic_plane_get_property() function isn't exported, but
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e1cfba51cff6..fdc83cbcde61 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -105,7 +105,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
dispc_runtime_put();
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&priv->commit.lock);
@@ -176,6 +176,7 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -266,13 +267,15 @@ cleanup:
}
static int omap_modeset_create_crtc(struct drm_device *dev, int id,
- enum omap_channel channel)
+ enum omap_channel channel,
+ u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
- plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY);
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY,
+ possible_crtcs);
if (IS_ERR(plane))
return PTR_ERR(plane);
@@ -292,16 +295,6 @@ static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- if (priv->has_dmm) {
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_ROTATE_90 |
- DRM_ROTATE_180 | DRM_ROTATE_270 |
- DRM_REFLECT_X | DRM_REFLECT_Y);
- if (!dev->mode_config.rotation_property)
- return -ENOMEM;
- }
-
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
if (!priv->zorder_prop)
return -ENOMEM;
@@ -318,6 +311,7 @@ static int omap_modeset_init(struct drm_device *dev)
int num_crtcs;
int i, id = 0;
int ret;
+ u32 possible_crtcs;
drm_mode_config_init(dev);
@@ -334,6 +328,7 @@ static int omap_modeset_init(struct drm_device *dev)
* We use the num_crtc argument to limit the number of crtcs we create.
*/
num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
+ possible_crtcs = (1 << num_crtcs) - 1;
dssdev = NULL;
@@ -397,7 +392,8 @@ static int omap_modeset_init(struct drm_device *dev)
* allocated crtc, we create a new crtc for it
*/
if (!channel_used(dev, channel)) {
- ret = omap_modeset_create_crtc(dev, id, channel);
+ ret = omap_modeset_create_crtc(dev, id, channel,
+ possible_crtcs);
if (ret < 0) {
dev_err(dev->dev,
"could not create CRTC (channel %u)\n",
@@ -427,7 +423,8 @@ static int omap_modeset_init(struct drm_device *dev)
return -ENOMEM;
}
- ret = omap_modeset_create_crtc(dev, id, i);
+ ret = omap_modeset_create_crtc(dev, id, i,
+ possible_crtcs);
if (ret < 0) {
dev_err(dev->dev,
"could not create CRTC (channel %u)\n", i);
@@ -441,7 +438,8 @@ static int omap_modeset_init(struct drm_device *dev)
for (; id < num_ovls; id++) {
struct drm_plane *plane;
- plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY);
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY,
+ possible_crtcs);
if (IS_ERR(plane))
return PTR_ERR(plane);
@@ -752,22 +750,32 @@ static void dev_lastclose(struct drm_device *dev)
DBG("lastclose: dev=%p", dev);
- if (dev->mode_config.rotation_property) {
- /* need to restore default rotation state.. not sure
- * if there is a cleaner way to restore properties to
- * default state? Maybe a flag that properties should
- * automatically be restored to default state on
- * lastclose?
- */
- for (i = 0; i < priv->num_crtcs; i++) {
- drm_object_property_set_value(&priv->crtcs[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ /* need to restore default rotation state.. not sure
+ * if there is a cleaner way to restore properties to
+ * default state? Maybe a flag that properties should
+ * automatically be restored to default state on
+ * lastclose?
+ */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ struct drm_crtc *crtc = priv->crtcs[i];
- for (i = 0; i < priv->num_planes; i++) {
- drm_object_property_set_value(&priv->planes[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ if (!crtc->primary->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&crtc->base,
+ crtc->primary->rotation_property,
+ DRM_ROTATE_0);
+ }
+
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+
+ if (!plane->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&plane->base,
+ plane->rotation_property,
+ DRM_ROTATE_0);
}
if (priv->fbdev) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index dcc30a98b9d4..7d9dd5400cef 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -148,7 +148,7 @@ static inline void omap_fbdev_free(struct drm_device *dev)
}
#endif
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
void omap_crtc_pre_init(void);
void omap_crtc_pre_uninit(void);
@@ -157,7 +157,8 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
int omap_crtc_wait_pending(struct drm_crtc *crtc);
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int id, enum drm_plane_type type);
+ int id, enum drm_plane_type type,
+ u32 possible_crtcs);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
@@ -171,11 +172,6 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings);
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode);
-
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0bbb9c59622e..a20f30039aee 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -102,7 +102,7 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
static int omap_encoder_update(struct drm_encoder *encoder,
enum omap_channel channel,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -111,13 +111,13 @@ static int omap_encoder_update(struct drm_encoder *encoder,
int ret;
if (dssdrv->check_timings) {
- ret = dssdrv->check_timings(dssdev, timings);
+ ret = dssdrv->check_timings(dssdev, vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(vm, &t, sizeof(struct videomode)))
ret = -EINVAL;
else
ret = 0;
@@ -129,7 +129,7 @@ static int omap_encoder_update(struct drm_encoder *encoder,
}
if (dssdrv->set_timings)
- dssdrv->set_timings(dssdev, timings);
+ dssdrv->set_timings(dssdev, vm);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index adb10fbe918d..8d8ac173f55d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -82,6 +82,7 @@ fallback:
static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -92,11 +93,7 @@ static struct fb_ops omap_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_pan_display = omap_fbdev_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 505dee0db973..4a90c690f09e 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -336,8 +336,10 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
int i, npages = obj->size >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
- dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (omap_obj->addrs[i])
+ dma_unmap_page(obj->dev->dev,
+ omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
}
@@ -396,8 +398,7 @@ static int fault_1d(struct drm_gem_object *obj,
pgoff_t pgoff;
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (omap_obj->pages) {
omap_gem_cpu_sync(obj, pgoff);
@@ -407,11 +408,10 @@ static int fault_1d(struct drm_gem_object *obj,
pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
}
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
@@ -425,7 +425,7 @@ static int fault_2d(struct drm_gem_object *obj,
struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn;
pgoff_t pgoff, base_pgoff;
- void __user *vaddr;
+ unsigned long vaddr;
int i, ret, slots;
/*
@@ -445,8 +445,7 @@ static int fault_2d(struct drm_gem_object *obj,
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/*
* Actual address we start mapping at is rounded down to previous slot
@@ -457,7 +456,7 @@ static int fault_2d(struct drm_gem_object *obj,
/* figure out buffer width in slots */
slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
- vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+ vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
@@ -501,12 +500,11 @@ static int fault_2d(struct drm_gem_object *obj,
pfn = entry->paddr >> PAGE_SHIFT;
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- vm_insert_mixed(vma, (unsigned long)vaddr,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 66ac8c40db26..82b2c23d6769 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -108,16 +108,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
win.src_x = state->src_x >> 16;
win.src_y = state->src_y >> 16;
- switch (state->rotation & DRM_ROTATE_MASK) {
- case DRM_ROTATE_90:
- case DRM_ROTATE_270:
+ if (drm_rotation_90_or_270(state->rotation)) {
win.src_w = state->src_h >> 16;
win.src_h = state->src_w >> 16;
- break;
- default:
+ } else {
win.src_w = state->src_w >> 16;
win.src_h = state->src_h >> 16;
- break;
}
/* update scanout: */
@@ -135,7 +131,9 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
/* and finally, update omapdss: */
ret = dispc_ovl_setup(omap_plane->id, &info, false,
omap_crtc_timings(state->crtc), false);
- if (WARN_ON(ret)) {
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane %s\n",
+ omap_plane->name);
dispc_ovl_enable(omap_plane->id, false);
return;
}
@@ -161,12 +159,20 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!state->fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ /* crtc should only be NULL when disabling (i.e., !state->fb) */
+ if (WARN_ON(!state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ /* we should have a crtc state if the plane is attached to a crtc */
+ if (WARN_ON(!crtc_state))
+ return 0;
+
+ if (!crtc_state->enable)
+ return 0;
if (state->crtc_x < 0 || state->crtc_y < 0)
return -EINVAL;
@@ -177,11 +183,9 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
- if (state->fb) {
- if (state->rotation != DRM_ROTATE_0 &&
- !omap_framebuffer_supports_rotation(state->fb))
- return -EINVAL;
- }
+ if (state->rotation != DRM_ROTATE_0 &&
+ !omap_framebuffer_supports_rotation(state->fb))
+ return -EINVAL;
return 0;
}
@@ -215,9 +219,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
struct omap_drm_private *priv = dev->dev_private;
if (priv->has_dmm) {
- struct drm_property *prop = dev->mode_config.rotation_property;
-
- drm_object_attach_property(obj, prop, 0);
+ if (!plane->rotation_property)
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270 |
+ DRM_REFLECT_X | DRM_REFLECT_Y);
+
+ /* Attach the rotation property also to the crtc object */
+ if (plane->rotation_property && obj != &plane->base)
+ drm_object_attach_property(obj, plane->rotation_property,
+ DRM_ROTATE_0);
}
drm_object_attach_property(obj, priv->zorder_prop, 0);
@@ -344,9 +356,9 @@ static const uint32_t error_irqs[] = {
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int id, enum drm_plane_type type)
+ int id, enum drm_plane_type type,
+ u32 possible_crtcs)
{
- struct omap_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct omap_plane *omap_plane;
int ret;
@@ -369,7 +381,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane->error_irq.irq = omap_plane_error_irq;
omap_irq_register(dev, &omap_plane->error_irq);
- ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
+ ret = drm_universal_plane_init(dev, plane, possible_crtcs,
&omap_plane_funcs, omap_plane->formats,
omap_plane->nformats, type, NULL);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 113db3c4a633..06aaf79de8c8 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -120,7 +120,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
mode->type |= DRM_MODE_TYPE_DRIVER;
- if (panel->desc->num_modes == 1)
+ if (panel->desc->num_timings == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
@@ -555,6 +555,91 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct display_timing auo_g133han01_timings = {
+ .pixelclock = { 134000000, 141200000, 149000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 39, 58, 77 },
+ .hback_porch = { 59, 88, 117 },
+ .hsync_len = { 28, 42, 56 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 3, 8, 11 },
+ .vback_porch = { 5, 14, 19 },
+ .vsync_len = { 4, 14, 19 },
+};
+
+static const struct panel_desc auo_g133han01 = {
+ .timings = &auo_g133han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 50,
+ .disable = 50,
+ .unprepare = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
+static const struct display_timing auo_g185han01_timings = {
+ .pixelclock = { 120000000, 144000000, 175000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 18, 60, 74 },
+ .hback_porch = { 12, 44, 54 },
+ .hsync_len = { 10, 24, 32 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 6, 10, 40 },
+ .vback_porch = { 2, 5, 20 },
+ .vsync_len = { 2, 5, 20 },
+};
+
+static const struct panel_desc auo_g185han01 = {
+ .timings = &auo_g185han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 409,
+ .height = 230,
+ },
+ .delay = {
+ .prepare = 50,
+ .enable = 200,
+ .disable = 110,
+ .unprepare = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
+static const struct drm_display_mode auo_t215hvn01_mode = {
+ .clock = 148800,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 88,
+ .hsync_end = 1920 + 88 + 44,
+ .htotal = 1920 + 88 + 44 + 148,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 5,
+ .vtotal = 1080 + 4 + 5 + 36,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_t215hvn01 = {
+ .modes = &auo_t215hvn01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 430,
+ .height = 270,
+ },
+ .delay = {
+ .disable = 5,
+ .unprepare = 1000,
+ }
+};
+
static const struct drm_display_mode avic_tm070ddh03_mode = {
.clock = 51200,
.hdisplay = 1024,
@@ -583,6 +668,30 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
+ .clock = 66770,
+ .hdisplay = 800,
+ .hsync_start = 800 + 49,
+ .hsync_end = 800 + 49 + 33,
+ .htotal = 800 + 49 + 33 + 17,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 1,
+ .vsync_end = 1280 + 1 + 7,
+ .vtotal = 1280 + 1 + 7 + 15,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc chunghwa_claa070wp03xg = {
+ .modes = &chunghwa_claa070wp03xg_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 94,
+ .height = 150,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -877,27 +986,31 @@ static const struct panel_desc innolux_g101ice_l01 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
-static const struct drm_display_mode innolux_g121i1_l01_mode = {
- .clock = 71000,
- .hdisplay = 1280,
- .hsync_start = 1280 + 64,
- .hsync_end = 1280 + 64 + 32,
- .htotal = 1280 + 64 + 32 + 64,
- .vdisplay = 800,
- .vsync_start = 800 + 9,
- .vsync_end = 800 + 9 + 6,
- .vtotal = 800 + 9 + 6 + 9,
- .vrefresh = 60,
+static const struct display_timing innolux_g121i1_l01_timing = {
+ .pixelclock = { 67450000, 71000000, 74550000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 40, 80, 160 },
+ .hback_porch = { 39, 79, 159 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 11, 100 },
+ .vback_porch = { 4, 11, 99 },
+ .vsync_len = { 1, 1, 1 },
};
static const struct panel_desc innolux_g121i1_l01 = {
- .modes = &innolux_g121i1_l01_mode,
- .num_modes = 1,
+ .timings = &innolux_g121i1_l01_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 261,
.height = 163,
},
+ .delay = {
+ .enable = 200,
+ .disable = 20,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
@@ -1164,6 +1277,29 @@ static const struct panel_desc nec_nl4827hc19_05b = {
.bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
+static const struct drm_display_mode nvd_9128_mode = {
+ .clock = 29500,
+ .hdisplay = 800,
+ .hsync_start = 800 + 130,
+ .hsync_end = 800 + 130 + 98,
+ .htotal = 800 + 0 + 130 + 98,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 50,
+ .vtotal = 480 + 0 + 10 + 50,
+};
+
+static const struct panel_desc nvd_9128 = {
+ .modes = &nvd_9128_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 156,
+ .height = 88,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
.pixelclock = { 30000000, 30000000, 40000000 },
.hactive = { 800, 800, 800 },
@@ -1409,6 +1545,7 @@ static const struct drm_display_mode sharp_lq123p1jx31_mode = {
static const struct panel_desc sharp_lq123p1jx31 = {
.modes = &sharp_lq123p1jx31_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 259,
.height = 173,
@@ -1420,6 +1557,30 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
+static const struct drm_display_mode sharp_lq150x1lg11_mode = {
+ .clock = 71100,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 168,
+ .hsync_end = 1024 + 168 + 64,
+ .htotal = 1024 + 168 + 64 + 88,
+ .vdisplay = 768,
+ .vsync_start = 768 + 37,
+ .vsync_end = 768 + 37 + 2,
+ .vtotal = 768 + 37 + 2 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc sharp_lq150x1lg11 = {
+ .modes = &sharp_lq150x1lg11_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 304,
+ .height = 228,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
+};
+
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
@@ -1575,9 +1736,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g133han01",
+ .data = &auo_g133han01,
+ }, {
+ .compatible = "auo,g185han01",
+ .data = &auo_g185han01,
+ }, {
+ .compatible = "auo,t215hvn01",
+ .data = &auo_t215hvn01,
+ }, {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "chunghwa,claa070wp03xg",
+ .data = &chunghwa_claa070wp03xg,
+ }, {
.compatible = "chunghwa,claa101wa01a",
.data = &chunghwa_claa101wa01a
}, {
@@ -1653,6 +1826,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "nec,nl4827hc19-05b",
.data = &nec_nl4827hc19_05b,
}, {
+ .compatible = "nvd,9128",
+ .data = &nvd_9128,
+ }, {
.compatible = "okaya,rs800480t-7x0gp",
.data = &okaya_rs800480t_7x0gp,
}, {
@@ -1683,6 +1859,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq123p1jx31",
.data = &sharp_lq123p1jx31,
}, {
+ .compatible = "sharp,lq150x1lg11",
+ .data = &sharp_lq150x1lg11,
+ }, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 04270f5d110c..74fc9362ecf9 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -578,7 +578,7 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
return 0;
}
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
{
struct qxl_rect rect;
int ret;
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 6911b8c44492..241af9131dc8 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -123,9 +123,6 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- qdev->ddev->control->debugfs_root,
- qdev->ddev->control);
- drm_debugfs_create_files(files, nfiles,
qdev->ddev->primary->debugfs_root,
qdev->ddev->primary);
#endif
@@ -140,9 +137,6 @@ void qxl_debugfs_remove_files(struct qxl_device *qdev)
for (i = 0; i < qdev->debugfs_count; i++) {
drm_debugfs_remove_files(qdev->debugfs[i].files,
qdev->debugfs[i].num_files,
- qdev->ddev->control);
- drm_debugfs_remove_files(qdev->debugfs[i].files,
- qdev->debugfs[i].num_files,
qdev->ddev->primary);
}
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index a61c0d460ec2..4b5eab8a47b3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -36,7 +36,7 @@ static bool qxl_head_enabled(struct qxl_head *head)
return head->width && head->height;
}
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
{
if (qdev->client_monitors_config &&
count > qdev->client_monitors_config->count) {
@@ -57,11 +57,18 @@ void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
qdev->client_monitors_config->count = count;
}
+enum {
+ MONITORS_CONFIG_MODIFIED,
+ MONITORS_CONFIG_UNCHANGED,
+ MONITORS_CONFIG_BAD_CRC,
+};
+
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
{
int i;
int num_monitors;
uint32_t crc;
+ int status = MONITORS_CONFIG_UNCHANGED;
num_monitors = qdev->rom->client_monitors_config.count;
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
@@ -70,7 +77,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
sizeof(qdev->rom->client_monitors_config),
qdev->rom->client_monitors_config_crc);
- return 1;
+ return MONITORS_CONFIG_BAD_CRC;
}
if (num_monitors > qdev->monitors_config->max_allowed) {
DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
@@ -79,6 +86,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
} else {
num_monitors = qdev->rom->client_monitors_config.count;
}
+ if (qdev->client_monitors_config
+ && (num_monitors != qdev->client_monitors_config->count)) {
+ status = MONITORS_CONFIG_MODIFIED;
+ }
qxl_alloc_client_monitors_config(qdev, num_monitors);
/* we copy max from the client but it isn't used */
qdev->client_monitors_config->max_allowed =
@@ -88,17 +99,39 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
&qdev->rom->client_monitors_config.heads[i];
struct qxl_head *client_head =
&qdev->client_monitors_config->heads[i];
- client_head->x = c_rect->left;
- client_head->y = c_rect->top;
- client_head->width = c_rect->right - c_rect->left;
- client_head->height = c_rect->bottom - c_rect->top;
- client_head->surface_id = 0;
- client_head->id = i;
- client_head->flags = 0;
+ if (client_head->x != c_rect->left) {
+ client_head->x = c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->y != c_rect->top) {
+ client_head->y = c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->width != c_rect->right - c_rect->left) {
+ client_head->width = c_rect->right - c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->height != c_rect->bottom - c_rect->top) {
+ client_head->height = c_rect->bottom - c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->surface_id != 0) {
+ client_head->surface_id = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->id != i) {
+ client_head->id = i;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->flags != 0) {
+ client_head->flags = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
client_head->x, client_head->y);
}
- return 0;
+
+ return status;
}
static void qxl_update_offset_props(struct qxl_device *qdev)
@@ -124,9 +157,18 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
struct drm_device *dev = qdev->ddev;
- while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+ int status;
+
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ while (status == MONITORS_CONFIG_BAD_CRC) {
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ }
+ if (status == MONITORS_CONFIG_UNCHANGED) {
+ qxl_io_log(qdev, "config unchanged\n");
+ DRM_DEBUG("ignoring unchanged client monitors config");
+ return;
}
drm_modeset_lock_all(dev);
@@ -157,6 +199,9 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
false);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->hdisplay = head->width;
+ mode->vdisplay = head->height;
+ drm_mode_set_name(mode);
*pwidth = head->width;
*pheight = head->height;
drm_mode_probed_add(connector, mode);
@@ -607,7 +652,7 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-void
+static void
qxl_send_monitors_config(struct qxl_device *qdev)
{
int i;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5f3e5ad99de7..785aad42e9bb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,7 +31,7 @@
* Definitions taken from spice-protocol, plus kernel driver specific bits.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -190,7 +190,7 @@ enum {
* spice-protocol/qxl_dev.h */
#define QXL_MAX_RES 96
struct qxl_release {
- struct fence base;
+ struct dma_fence base;
int id;
int type;
@@ -395,16 +395,11 @@ qxl_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj,
const struct drm_framebuffer_funcs *funcs);
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
-void qxl_send_monitors_config(struct qxl_device *qdev);
int qxl_create_monitors_object(struct qxl_device *qdev);
int qxl_destroy_monitors_object(struct qxl_device *qdev);
-/* used by qxl_debugfs only */
-void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
-
/* qxl_gem.c */
-int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_init(struct qxl_device *qdev);
void qxl_gem_fini(struct qxl_device *qdev);
int qxl_gem_object_create(struct qxl_device *qdev, int size,
int alignment, int initial_domain,
@@ -574,6 +569,5 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
struct qxl_drv_surface *
qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2cd879a4ae15..fd7e5e94be5b 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -81,16 +81,10 @@ static struct fb_deferred_io qxl_defio = {
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -197,7 +191,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
/*
* we are using a shadow draw buffer, at qdev->surface0_shadow
*/
- qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
+ qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2,
clips->y1, clips->y2);
image->dx = clips->x1;
image->dy = clips->y1;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index d9746e904ef1..3f185c4da5b7 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -111,10 +111,9 @@ void qxl_gem_object_close(struct drm_gem_object *obj,
{
}
-int qxl_gem_init(struct qxl_device *qdev)
+void qxl_gem_init(struct qxl_device *qdev)
{
INIT_LIST_HEAD(&qdev->gem.objects);
- return 0;
}
void qxl_gem_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e642242728c0..af685f1d91f8 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -131,7 +131,7 @@ static int qxl_device_init(struct qxl_device *qdev,
mutex_init(&qdev->update_area_mutex);
mutex_init(&qdev->release_mutex);
mutex_init(&qdev->surf_evict_mutex);
- INIT_LIST_HEAD(&qdev->gem.objects);
+ qxl_gem_init(qdev);
qdev->rom_base = pci_resource_start(pdev, 2);
qdev->rom_size = pci_resource_len(pdev, 2);
@@ -273,6 +273,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
+ qxl_gem_fini(qdev);
qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cd83f050cf3e..e6ec845b5be0 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,7 +21,7 @@
*/
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -40,23 +40,24 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-static const char *qxl_get_driver_name(struct fence *fence)
+static const char *qxl_get_driver_name(struct dma_fence *fence)
{
return "qxl";
}
-static const char *qxl_get_timeline_name(struct fence *fence)
+static const char *qxl_get_timeline_name(struct dma_fence *fence)
{
return "release";
}
-static bool qxl_nop_signaling(struct fence *fence)
+static bool qxl_nop_signaling(struct dma_fence *fence)
{
/* fences are always automatically signaled, so just pretend we did this.. */
return true;
}
-static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
+static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct qxl_device *qdev;
struct qxl_release *release;
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
retry:
sc++;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
qxl_io_notify_oom(qdev);
@@ -80,11 +81,11 @@ retry:
if (!qxl_queue_garbage_collect(qdev, true))
break;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
}
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
@@ -96,9 +97,9 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
+ DMA_FENCE_WARN(fence, "failed to wait on release %llu "
+ "after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
@@ -115,7 +116,7 @@ signaled:
return end - cur;
}
-static const struct fence_ops qxl_fence_ops = {
+static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
.enable_signaling = qxl_nop_signaling,
@@ -133,7 +134,7 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
release = kmalloc(size, GFP_KERNEL);
if (!release) {
DRM_ERROR("Out of memory\n");
- return 0;
+ return -ENOMEM;
}
release->base.ops = NULL;
release->type = type;
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev,
WARN_ON(list_empty(&release->bos));
qxl_release_free_list(release);
- fence_signal(&release->base);
- fence_put(&release->base);
+ dma_fence_signal(&release->base);
+ dma_fence_put(&release->base);
} else {
qxl_release_free_list(release);
kfree(release);
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
* Since we never really allocated a context and we don't want to conflict,
* set the highest bits. This will break if we really allow exporting of dma-bufs.
*/
- fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
- release->id | 0xf0000000, release->base.seqno);
- trace_fence_emit(&release->base);
+ dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+ release->id | 0xf0000000, release->base.seqno);
+ trace_dma_fence_emit(&release->base);
driver = bdev->driver;
glob = bo->glob;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index e26c82db948b..11761330a6b8 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
.invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.verify_access = &qxl_verify_access,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 74f99bac08b1..05f4ebe31ce2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1156,7 +1156,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1260,9 +1260,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -1473,7 +1472,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1563,9 +1562,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758f4e33..fa4f8f008e4d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_audio.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d3915408..f8b05090232a 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -27,6 +27,7 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b69c8de35bd3..595a19736458 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "r600d.h"
#include "r600_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1b0dcad916b0..44e0c5ed6418 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -66,7 +66,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -367,7 +367,7 @@ struct radeon_fence_driver {
};
struct radeon_fence {
- struct fence base;
+ struct dma_fence base;
struct radeon_device *rdev;
uint64_t seq;
@@ -746,7 +746,7 @@ struct radeon_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct fence *fence;
+ struct dma_fence *fence;
bool async;
};
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
*/
-extern const struct fence_ops radeon_fence_ops;
+extern const struct dma_fence_ops radeon_fence_ops;
-static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
{
struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5df3ec73021b..4134759a6823 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -29,6 +29,7 @@
#include "atom.h"
#include "atom-bits.h"
+#include "radeon_asic.h"
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 21b6732425c5..c829cfb02fc4 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -603,8 +603,9 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
- if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
+ tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 38e396dae0a9..c1135feb93c1 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/* 10 khz */
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 2a10e24b34b1..fb16070b266e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -90,6 +90,9 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
+ if (radeon_crtc->cursor_out_of_bounds)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(radeon_crtc->cursor_addr));
@@ -143,21 +146,25 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
int xorigin = 0, yorigin = 0;
int w = radeon_crtc->cursor_width;
+ radeon_crtc->cursor_x = x;
+ radeon_crtc->cursor_y = y;
+
if (ASIC_IS_AVIVO(rdev)) {
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
}
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
- if (x < 0) {
+ if (x < 0)
xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
+ if (y < 0)
yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
- y = 0;
+
+ if (!ASIC_IS_AVIVO(rdev)) {
+ x += crtc->x;
+ y += crtc->y;
}
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
/* fixed on DCE6 and newer */
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
@@ -180,27 +187,31 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
if (i > 1) {
int cursor_end, frame_end;
- cursor_end = x - xorigin + w;
+ cursor_end = x + w;
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
if (cursor_end >= frame_end) {
w = w - (cursor_end - frame_end);
if (!(frame_end & 0x7f))
w--;
- } else {
- if (!(cursor_end & 0x7f))
- w--;
+ } else if (cursor_end <= 0) {
+ goto out_of_bounds;
+ } else if (!(cursor_end & 0x7f)) {
+ w--;
}
if (w <= 0) {
- w = 1;
- cursor_end = x - xorigin + w;
- if (!(cursor_end & 0x7f)) {
- x--;
- WARN_ON_ONCE(x < 0);
- }
+ goto out_of_bounds;
}
}
}
+ if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
+ x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
+ y >= (crtc->y + crtc->mode.crtc_vdisplay))
+ goto out_of_bounds;
+
+ x += xorigin;
+ y += yorigin;
+
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
@@ -212,6 +223,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else {
+ x -= crtc->x;
+ y -= crtc->y;
+
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
y *= 2;
@@ -229,10 +243,20 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
yorigin * 256);
}
- radeon_crtc->cursor_x = x;
- radeon_crtc->cursor_y = y;
+ if (radeon_crtc->cursor_out_of_bounds) {
+ radeon_crtc->cursor_out_of_bounds = false;
+ if (radeon_crtc->cursor_bo)
+ radeon_show_cursor(crtc);
+ }
return 0;
+
+ out_of_bounds:
+ if (!radeon_crtc->cursor_out_of_bounds) {
+ radeon_hide_cursor(crtc);
+ radeon_crtc->cursor_out_of_bounds = true;
+ }
+ return 0;
}
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
@@ -297,22 +321,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- radeon_crtc->cursor_width = width;
- radeon_crtc->cursor_height = height;
-
radeon_lock_cursor(crtc, true);
- if (hot_x != radeon_crtc->cursor_hot_x ||
+ if (width != radeon_crtc->cursor_width ||
+ height != radeon_crtc->cursor_height ||
+ hot_x != radeon_crtc->cursor_hot_x ||
hot_y != radeon_crtc->cursor_hot_y) {
int x, y;
x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
- radeon_cursor_move_locked(crtc, x, y);
-
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
radeon_crtc->cursor_hot_x = hot_x;
radeon_crtc->cursor_hot_y = hot_y;
+
+ radeon_cursor_move_locked(crtc, x, y);
}
radeon_show_cursor(crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 621af069a3d2..8a1df2a1afbd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1333,7 +1333,7 @@ int radeon_device_init(struct radeon_device *rdev,
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
- rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
+ rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1664,7 +1664,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
radeon_bo_evict_vram(rdev);
radeon_agp_suspend(rdev);
@@ -1946,9 +1949,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
rdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- rdev->ddev->control->debugfs_root,
- rdev->ddev->control);
- drm_debugfs_create_files(files, nfiles,
rdev->ddev->primary->debugfs_root,
rdev->ddev->primary);
#endif
@@ -1963,9 +1963,6 @@ static void radeon_debugfs_remove_files(struct radeon_device *rdev)
for (i = 0; i < rdev->debugfs_count; i++) {
drm_debugfs_remove_files(rdev->debugfs[i].files,
rdev->debugfs[i].num_files,
- rdev->ddev->control);
- drm_debugfs_remove_files(rdev->debugfs[i].files,
- rdev->debugfs[i].num_files,
rdev->ddev->primary);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index cdb8cb568c15..e7409e8a9f87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
down_read(&rdev->exclusive_lock);
}
} else
- r = fence_wait(work->fence, false);
+ r = dma_fence_wait(work->fence, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* confused about which BO the CRTC is scanning out
*/
- fence_put(work->fence);
+ dma_fence_put(work->fence);
work->fence = NULL;
}
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -617,7 +617,7 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- fence_put(work->fence);
+ dma_fence_put(work->fence);
kfree(work);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index de504ea29c06..6d1237d6e1b8 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = {
.hotplug = radeon_dp_mst_hotplug,
};
-struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
.commit = radeon_mst_encoder_commit,
};
-void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0daad446d2c7..899b6a1644bd 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -74,28 +74,22 @@ radeonfb_release(struct fb_info *info, int user)
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = radeonfb_open,
.fb_release = radeonfb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
-int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = align_large ? 255 : 127;
break;
@@ -110,7 +104,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,13 +133,13 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
+ fb_tiled);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
@@ -165,11 +159,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (bpp) {
- case 32:
+ switch (cpp) {
+ case 4:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
- case 16:
+ case 2:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ef075acde9c..ef09f0a63754 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
(*fence)->is_vm_update = false;
- fence_init(&(*fence)->base, &radeon_fence_ops,
- &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
+ dma_fence_init(&(*fence)->base, &radeon_fence_ops,
+ &rdev->fence_queue.lock,
+ rdev->fence_context + ring,
+ seq);
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
radeon_fence_schedule_check(rdev, ring);
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = fence_signal_locked(&fence->base);
+ int ret = dma_fence_signal_locked(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
- FENCE_TRACE(&fence->base, "pending\n");
+ DMA_FENCE_TRACE(&fence->base, "pending\n");
return 0;
}
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
-static bool radeon_fence_is_signaled(struct fence *f)
+static bool radeon_fence_is_signaled(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool radeon_fence_enable_signaling(struct fence *f)
+static bool radeon_fence_enable_signaling(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f)
fence->fence_wake.private = NULL;
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
- fence_get(f);
+ dma_fence_get(f);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
int ret;
- ret = fence_signal(&fence->base);
+ ret = dma_fence_signal(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
return true;
}
return false;
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
* exclusive_lock is not held in that case.
*/
if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
- return fence_wait(&fence->base, intr);
+ return dma_fence_wait(&fence->base, intr);
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = fence_signal(&fence->base);
+ r_sig = dma_fence_signal(&fence->base);
if (!r_sig)
- FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return r;
}
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL;
if (tmp) {
- fence_put(&tmp->base);
+ dma_fence_put(&tmp->base);
}
}
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
#endif
}
-static const char *radeon_fence_get_driver_name(struct fence *fence)
+static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
{
return "radeon";
}
-static const char *radeon_fence_get_timeline_name(struct fence *f)
+static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
switch (fence->ring) {
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f)
static inline bool radeon_test_signaled(struct radeon_fence *fence)
{
- return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
struct radeon_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct radeon_wait_cb *wait =
container_of(cb, struct radeon_wait_cb, base);
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
wake_up_process(wait->task);
}
-static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
signed long t)
{
struct radeon_fence *fence = to_radeon_fence(f);
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
cb.task = current;
- if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+ if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
return t;
while (t > 0) {
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
}
__set_current_state(TASK_RUNNING);
- fence_remove_callback(f, &cb.base);
+ dma_fence_remove_callback(f, &cb.base);
return t;
}
-const struct fence_ops radeon_fence_ops = {
+const struct dma_fence_ops radeon_fence_ops = {
.get_driver_name = radeon_fence_get_driver_name,
.get_timeline_name = radeon_fence_get_timeline_name,
.enable_signaling = radeon_fence_enable_signaling,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511725c9..0bcffd8a7bd3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -745,7 +745,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = radeon_align_pitch(rdev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 868c3ba2efaa..222a1fa41d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index bb75201a24ba..f1da484864a9 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -330,6 +330,7 @@ struct radeon_crtc {
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
+ bool cursor_out_of_bounds;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff9..326ad068c15a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
+ /* allow new DPM state to be picked */
+ radeon_pm_compute_clocks_dpm(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ dpm_state = rdev->pm.dpm.ac_power ?
+ POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
restart_search:
/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 02ac8a1de4ff..be5d7a38d3aa 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
bool shared)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
struct radeon_fence *fence;
unsigned i;
int r = 0;
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
flist = reservation_object_get_list(resv);
if (shared || !flist || r)
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
if (r)
break;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e21662..0cf03ccbf0a7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0cd0e7bdee55..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct fence *f;
+ struct dma_fence *f;
void *ptr;
int i, r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e402be8821c4..ad4d7b8b8322 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1714,6 +1714,7 @@ static int si_init_microcode(struct radeon_device *rdev)
(rdev->pdev->revision == 0x80) ||
(rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
(rdev->pdev->device == 0x6604) ||
(rdev->pdev->device == 0x6605))
new_smc = true;
@@ -7858,7 +7859,7 @@ static void si_program_aspm(struct radeon_device *rdev)
}
}
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
{
unsigned i;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index c49934527a87..8b5e697f2549 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3026,6 +3026,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->revision == 0x80) ||
(rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
(rdev->pdev->device == 0x6604) ||
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 7316fc7fa0bd..a2ec6d8796a0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -149,8 +149,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
+ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
| DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
@@ -172,7 +172,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
mode->crtc_vsync_start - 1);
rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
+ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 73c971e39b1c..c05e00872778 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -110,6 +110,27 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7792_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7792 has two RGB outputs. */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 1,
+ },
+ },
+ .num_lvds = 0,
+};
+
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -157,13 +178,39 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .num_crtcs = 3,
+ .routes = {
+ /* R8A7796 has one RGB output, one LVDS output and one
+ * (currently unsupported) HDMI output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
+ { .compatible = "renesas,du-r8a7792", .data = &rcar_du_r8a7792_info },
{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
+ { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
{ }
};
@@ -201,9 +248,7 @@ static const struct file_operations rcar_du_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -285,7 +330,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
- drm_vblank_cleanup(ddev);
drm_dev_unref(ddev);
@@ -294,18 +338,12 @@ static int rcar_du_remove(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct rcar_du_device *rcdu;
struct drm_device *ddev;
struct resource *mem;
int ret;
- if (np == NULL) {
- dev_err(&pdev->dev, "no device tree node\n");
- return -ENODEV;
- }
-
- /* Allocate and initialize the DRM and R-Car device structures. */
+ /* Allocate and initialize the R-Car device structure. */
rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
if (rcdu == NULL)
return -ENOMEM;
@@ -315,31 +353,22 @@ static int rcar_du_probe(struct platform_device *pdev)
rcdu->dev = &pdev->dev;
rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- rcdu->ddev = ddev;
- ddev->dev_private = rcdu;
-
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(rcdu->mmio)) {
- ret = PTR_ERR(rcdu->mmio);
- goto error;
- }
-
- /* Initialize vertical blanking interrupts handling. Start with vblank
- * disabled for all CRTCs.
- */
- ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0)
- goto error;
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ rcdu->ddev = ddev;
+ ddev->dev_private = rcdu;
+
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 33b2fc53da3e..64738fca96d0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -105,16 +105,20 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
rcar_du_group_setup_defr8(rgrp);
- /* Configure input dot clock routing. We currently hardcode the
- * configuration to routing DOTCLKINn to DUn.
+ /*
+ * Configure input dot clock routing. We currently hardcode the
+ * configuration to routing DOTCLKINn to DUn. Register fields
+ * depend on the DU generation, but the resulting value is 0 in
+ * all cases.
+ *
+ * On Gen2 a single register in the first group controls dot
+ * clock selection for all channels, while on Gen3 dot clocks
+ * are setup through per-group registers, only available when
+ * the group has two channels.
*/
- rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE |
- DIDSR_LCDS_DCLKIN(2) |
- DIDSR_LCDS_DCLKIN(1) |
- DIDSR_LCDS_DCLKIN(0) |
- DIDSR_PDCS_CLK(2, 0) |
- DIDSR_PDCS_CLK(1, 0) |
- DIDSR_PDCS_CLK(0, 0));
+ if ((rcdu->info->gen < 3 && rgrp->index == 0) ||
+ (rcdu->info->gen == 3 && rgrp->num_crtcs > 1))
+ rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE);
}
if (rcdu->info->gen >= 3)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index e03004f4588d..f9515f53cc5b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -108,7 +108,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
if (hdmienc == NULL)
return -ENOMEM;
- /* Locate drm bridge from the hdmi encoder DT node */
+ /* Locate the DRM bridge from the HDMI encoder DT node. */
bridge = of_drm_find_bridge(np);
if (!bridge)
return -EPROBE_DEFER;
@@ -123,7 +123,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
renc->hdmi = hdmienc;
hdmienc->renc = renc;
- /* Link drm_bridge to encoder */
+ /* Link the bridge to the encoder. */
bridge->encoder = encoder;
encoder->bridge = bridge;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 392c7e6de042..b5d3f16cfa12 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -272,7 +272,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&rcdu->commit.wait.lock);
@@ -338,6 +338,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -453,13 +454,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
}
ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
- of_node_put(encoder);
- of_node_put(connector);
-
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
- "failed to initialize encoder %s (%d), skipping\n",
- encoder->full_name, ret);
+ "failed to initialize encoder %s on output %u (%d), skipping\n",
+ of_node_full_name(encoder), output, ret);
+
+ of_node_put(encoder);
+ of_node_put(connector);
return ret;
}
@@ -567,6 +568,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
+ if (ret < 0)
+ return ret;
+
/* Initialize the groups. */
num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 6afd0af312ba..3bcfd161c53f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -61,16 +61,9 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_lvds_connector_get_modes,
};
-static enum drm_connector_status
-rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .detect = rcar_du_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -79,7 +72,7 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- /* TODO const */ struct device_node *np)
+ const struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_lvds_connector *lvdscon;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index d4881ee0be7e..639071dd235c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -19,6 +19,6 @@ struct rcar_du_encoder;
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- struct device_node *np);
+ const struct device_node *np);
#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index ef3a50321ecc..e3a4985f6f3f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -104,7 +104,14 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
rcar_lvds_write(lvds, LVDPLLCR, pllcr);
- /* Turn the PLL on, set it to LVDS normal mode, wait for the startup
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1,
+ LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
+ LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
+ LVDCR1_CLKSTBY_GEN3);
+
+ /*
+ * Turn the PLL on, set it to LVDS normal mode, wait for the startup
* delay and turn the output on.
*/
lvdcr0 = LVDCR0_PLLON;
@@ -117,12 +124,6 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
lvdcr0 |= LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- /* Turn all the channels on. */
- rcar_lvds_write(lvds, LVDCR1,
- LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
- LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
- LVDCR1_CLKSTBY_GEN3);
}
static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
@@ -241,10 +242,8 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
for (i = 0; i < rcdu->info->num_lvds; ++i) {
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL) {
- dev_err(&pdev->dev, "failed to allocate private data\n");
+ if (lvds == NULL)
return -ENOMEM;
- }
lvds->dev = rcdu;
lvds->index = i;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 3c58669a06ce..6f7f9c59f05b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -1,7 +1,6 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- depends on RESET_CONTROLLER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index ca22e5ee89ca..d9aa382bb629 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -969,12 +969,6 @@ static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
.mode_valid = dw_mipi_dsi_mode_valid,
};
-static enum drm_connector_status
-dw_mipi_dsi_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
@@ -984,7 +978,6 @@ static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
static struct drm_connector_funcs dw_mipi_dsi_atomic_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = dw_mipi_dsi_detect,
.destroy = dw_mipi_dsi_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8c8cbe837e61..2390c8577617 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -274,9 +275,7 @@ static const struct file_operations rockchip_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
@@ -388,7 +387,7 @@ static void rockchip_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
@@ -437,7 +436,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
}
of_node_put(iommu);
- component_match_add(dev, &match, compare_of, port->parent);
+ drm_of_component_match_add(dev, &match, compare_of,
+ port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index a16c69f96ed5..8f639c8597a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,15 +37,11 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
static struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = rockchip_fbdev_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 3b807135a5cd..78c6d8e9b42c 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,9 +42,7 @@ static const struct file_operations savage_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 3dc0d8ff95ec..2db89bed52e8 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1004,6 +1004,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
if (IS_ERR(kvb_addr)) {
ret = PTR_ERR(kvb_addr);
+ kvb_addr = NULL;
goto done;
}
cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 6547b1db460a..dddbdd62bed0 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -669,15 +669,8 @@ static void shmob_drm_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static enum drm_connector_status
-shmob_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = shmob_drm_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = shmob_drm_connector_destroy,
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index f0492603ea88..38dd55f4af81 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -245,9 +245,7 @@ static const struct file_operations shmob_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index ae9839886c4d..a836451920f0 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,9 +72,7 @@ static const struct file_operations sis_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 9df308565f6c..ff71e25ab5bf 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
#include "sti_crtc.h"
#include "sti_drv.h"
@@ -184,7 +185,7 @@ static void sti_atomic_complete(struct sti_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void sti_atomic_work(struct work_struct *work)
@@ -237,6 +238,7 @@ static int sti_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
sti_atomic_schedule(private, state);
else
@@ -295,9 +297,7 @@ static const struct file_operations sti_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
@@ -443,8 +443,8 @@ static int sti_platform_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
- component_match_add(dev, &match, compare_of, child_np);
- of_node_put(child_np);
+ drm_of_component_match_add(dev, &match, compare_of,
+ child_np);
child_np = of_get_next_available_child(node, child_np);
}
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index e7c243f70870..96f336dd0e29 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -642,12 +642,6 @@ struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
.mode_valid = sti_hda_connector_mode_valid,
};
-static enum drm_connector_status
-sti_hda_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int sti_hda_late_register(struct drm_connector *connector)
{
struct sti_hda_connector *hda_connector
@@ -665,7 +659,6 @@ static int sti_hda_late_register(struct drm_connector *connector)
static const struct drm_connector_funcs sti_hda_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = sti_hda_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 32c0584e3c35..2e08f969bb64 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -95,6 +95,22 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
break;
+ case DRM_FORMAT_ARGB4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB1555:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
+ break;
+
+ case DRM_FORMAT_RGBA5551:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
+ break;
+
+ case DRM_FORMAT_RGBA4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
+ break;
+
case DRM_FORMAT_XRGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
break;
@@ -103,6 +119,10 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
break;
+ case DRM_FORMAT_RGB565:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
+ break;
+
default:
return -EINVAL;
}
@@ -389,7 +409,7 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
reset_control_assert(backend->reset);
}
-static struct component_ops sun4i_backend_ops = {
+static const struct component_ops sun4i_backend_ops = {
.bind = sun4i_backend_bind,
.unbind = sun4i_backend_unbind,
};
@@ -408,6 +428,7 @@ static int sun4i_backend_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_backend_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-backend" },
+ { .compatible = "allwinner,sun6i-a31-display-backend" },
{ .compatible = "allwinner,sun8i-a33-display-backend" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 70e9fd59c5a2..4ce665349f6b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -52,9 +53,7 @@ static const struct file_operations sun4i_drv_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -201,12 +200,15 @@ static const struct component_master_ops sun4i_drv_master_ops = {
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
}
@@ -239,7 +241,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %s\n",
of_node_full_name(node));
- component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, compare_of, node);
count++;
}
@@ -322,6 +324,8 @@ static int sun4i_drv_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-engine" },
+ { .compatible = "allwinner,sun6i-a31-display-engine" },
+ { .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index f0035bf5efea..5d53c977bca5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -73,12 +73,18 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
static const uint32_t sun4i_backend_layer_formats_primary[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const uint32_t sun4i_backend_layer_formats_overlay[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index d198ad7e5323..f5e86fe7750e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -110,12 +110,6 @@ static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.mode_valid = sun4i_rgb_mode_valid,
};
-static enum drm_connector_status
-sun4i_rgb_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
@@ -129,7 +123,6 @@ sun4i_rgb_connector_destroy(struct drm_connector *connector)
static struct drm_connector_funcs sun4i_rgb_con_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = sun4i_rgb_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_rgb_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index cadacb517f95..ea2906f87cb9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -20,6 +20,7 @@
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
@@ -62,7 +63,7 @@ void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE, 0);
clk_disable_unprepare(tcon->sclk1);
@@ -80,7 +81,7 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE,
SUN4I_TCON1_CTL_TCON_ENABLE);
@@ -202,7 +203,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
u8 clk_delay;
u32 val;
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
@@ -266,7 +267,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
/*
* FIXME: Undocumented bits
*/
- if (tcon->has_mux)
+ if (tcon->quirks->has_unknown_mux)
regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, 1);
}
EXPORT_SYMBOL(sun4i_tcon1_mode_set);
@@ -327,7 +328,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return PTR_ERR(tcon->sclk0);
}
- if (tcon->has_channel_1) {
+ if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
if (IS_ERR(tcon->sclk1)) {
dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
@@ -487,14 +488,7 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
drv->tcon = tcon;
tcon->drm = drm;
tcon->dev = dev;
-
- if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
- tcon->has_mux = true;
- tcon->has_channel_1 = true;
- } else {
- tcon->has_mux = false;
- tcon->has_channel_1 = false;
- }
+ tcon->quirks = of_device_get_match_data(dev);
tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
if (IS_ERR(tcon->lcd_rst)) {
@@ -551,7 +545,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
sun4i_tcon_free_clocks(tcon);
}
-static struct component_ops sun4i_tcon_ops = {
+static const struct component_ops sun4i_tcon_ops = {
.bind = sun4i_tcon_bind,
.unbind = sun4i_tcon_unbind,
};
@@ -588,9 +582,28 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
+ .has_unknown_mux = true,
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
+ /* nothing is supported */
+};
+
static const struct of_device_id sun4i_tcon_of_table[] = {
- { .compatible = "allwinner,sun5i-a13-tcon" },
- { .compatible = "allwinner,sun8i-a33-tcon" },
+ { .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
+ { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
+ { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
+ { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 12bd48925f4d..166064bafe2e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -142,6 +142,11 @@
#define SUN4I_TCON_MAX_CHANNELS 2
+struct sun4i_tcon_quirks {
+ bool has_unknown_mux; /* sun5i has undocumented mux */
+ bool has_channel_1; /* a33 does not have channel 1 */
+};
+
struct sun4i_tcon {
struct device *dev;
struct drm_device *drm;
@@ -160,12 +165,10 @@ struct sun4i_tcon {
/* Reset control */
struct reset_control *lcd_rst;
- /* Platform adjustments */
- bool has_mux;
-
struct drm_panel *panel;
- bool has_channel_1;
+ /* Platform adjustments */
+ const struct sun4i_tcon_quirks *quirks;
};
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 1dd3d9eabf2e..c6f47222e8fc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -537,12 +537,6 @@ static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs =
.mode_valid = sun4i_tv_comp_mode_valid,
};
-static enum drm_connector_status
-sun4i_tv_comp_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void
sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
{
@@ -551,7 +545,6 @@ sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = sun4i_tv_comp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_tv_comp_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
@@ -667,7 +660,7 @@ static void sun4i_tv_unbind(struct device *dev, struct device *master,
clk_disable_unprepare(tv->clk);
}
-static struct component_ops sun4i_tv_ops = {
+static const struct component_ops sun4i_tv_ops = {
.bind = sun4i_tv_bind,
.unbind = sun4i_tv_unbind,
};
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index bf6d846d8132..09bba853e2a4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -80,7 +80,7 @@ static void sun6i_drc_unbind(struct device *dev, struct device *master,
reset_control_assert(drc->reset);
}
-static struct component_ops sun6i_drc_ops = {
+static const struct component_ops sun6i_drc_ops = {
.bind = sun6i_drc_bind,
.unbind = sun6i_drc_unbind,
};
@@ -98,6 +98,8 @@ static int sun6i_drc_remove(struct platform_device *pdev)
}
static const struct of_device_id sun6i_drc_of_table[] = {
+ { .compatible = "allwinner,sun6i-a31-drc" },
+ { .compatible = "allwinner,sun6i-a31s-drc" },
{ .compatible = "allwinner,sun8i-a33-drc" },
{ }
};
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index f418892b0c71..c54138c3a376 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations tdfx_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 63ebb154b9b5..bbf5a4b7e0b6 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -3,7 +3,6 @@ config DRM_TEGRA
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on COMMON_CLK
depends on DRM
- depends on RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 059f409556d5..2fde44c3a1b3 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -539,9 +539,9 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->desc.owner = THIS_MODULE;
dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
- if (!dpaux->pinctrl) {
+ if (IS_ERR(dpaux->pinctrl)) {
dev_err(&pdev->dev, "failed to register pincontrol\n");
- return -ENODEV;
+ return PTR_ERR(dpaux->pinctrl);
}
#endif
/* enable and clear all interrupts */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8ab47b502d83..b8be3ee4d3b8 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -63,7 +63,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void tegra_atomic_work(struct work_struct *work)
@@ -96,6 +96,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
tegra_atomic_schedule(tegra, state);
else
@@ -801,9 +802,7 @@ static const struct file_operations tegra_drm_fops = {
.mmap = tegra_drm_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e6d71fa4028e..e4a5ab0a9677 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -186,14 +186,10 @@ unreference:
#ifdef CONFIG_DRM_FBDEV_EMULATION
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 95e622e31931..7d853e6b5ff0 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -2,7 +2,7 @@
* NVIDIA Tegra DRM GEM helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
+ * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
*
* Based on the GEM/CMA helpers
*
@@ -36,6 +36,8 @@ static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ *sgt = obj->sgt;
+
return obj->paddr;
}
@@ -47,23 +49,51 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return obj->vaddr;
+ if (obj->vaddr)
+ return obj->vaddr;
+ else if (obj->gem.import_attach)
+ return dma_buf_vmap(obj->gem.import_attach->dmabuf);
+ else
+ return vmap(obj->pages, obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ if (obj->vaddr)
+ return;
+ else if (obj->gem.import_attach)
+ dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
+ else
+ vunmap(addr);
}
static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return obj->vaddr + page * PAGE_SIZE;
+ if (obj->vaddr)
+ return obj->vaddr + page * PAGE_SIZE;
+ else if (obj->gem.import_attach)
+ return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
+ else
+ return vmap(obj->pages + page, 1, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
}
static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
void *addr)
{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ if (obj->vaddr)
+ return;
+ else if (obj->gem.import_attach)
+ dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
+ else
+ vunmap(addr);
}
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
@@ -318,11 +348,6 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
get_dma_buf(buf);
bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
- if (!bo->sgt) {
- err = -ENOMEM;
- goto detach;
- }
-
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto detach;
@@ -427,10 +452,10 @@ static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!bo->pages)
return VM_FAULT_SIGBUS;
- offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+ offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = bo->pages[offset];
- err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ err = vm_insert_page(vma, vmf->address, page);
switch (err) {
case -EAGAIN:
case 0:
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 0b3f2b977ba0..13f0d1b7cd98 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -268,9 +268,9 @@ static int gr3d_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
- if (IS_ERR(gr3d->clk)) {
+ if (IS_ERR(gr3d->clk_secondary)) {
dev_err(&pdev->dev, "cannot get secondary clock\n");
- return PTR_ERR(gr3d->clk);
+ return PTR_ERR(gr3d->clk_secondary);
}
gr3d->rst_secondary = devm_reset_control_get(&pdev->dev,
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 74d0540b8d4c..a8f528925009 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -349,8 +349,6 @@ static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
brick->hw.init = &init;
clk = devm_clk_register(sor->dev, &brick->hw);
- if (IS_ERR(clk))
- kfree(brick);
return clk;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 52ebe8fc1784..9942b0577d6e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -21,11 +21,15 @@
#include <drm/drm_flip_work.h>
#include <drm/drm_plane_helper.h>
#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
-#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+#define TILCDC_PALETTE_SIZE 32
+#define TILCDC_PALETTE_FIRST_ENTRY 0x4000
struct tilcdc_crtc {
struct drm_crtc base;
@@ -33,7 +37,9 @@ struct tilcdc_crtc {
struct drm_plane primary;
const struct tilcdc_panel_info *info;
struct drm_pending_vblank_event *event;
+ struct mutex enable_lock;
bool enabled;
+ bool shutdown;
wait_queue_head_t frame_done_wq;
bool frame_done;
spinlock_t irq_lock;
@@ -53,6 +59,11 @@ struct tilcdc_crtc {
int sync_lost_count;
bool frame_intact;
+ struct work_struct recover_work;
+
+ dma_addr_t palette_dma_handle;
+ u16 *palette_base;
+ struct completion palette_loaded;
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
@@ -71,17 +82,16 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
gem = drm_fb_cma_get_gem_obj(fb, 0);
start = gem->paddr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
- crtc->x * bpp / 8;
+ crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
@@ -90,7 +100,10 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
* unlikely that LCDC would fetch the DMA addresses in the middle of
* an update.
*/
- dma_base_and_ceiling = (u64)(end - 1) << 32 | start;
+ if (priv->rev == 1)
+ end -= 1;
+
+ dma_base_and_ceiling = (u64)end << 32 | start;
tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
if (tilcdc_crtc->curr_fb)
@@ -100,6 +113,56 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
tilcdc_crtc->curr_fb = fb;
}
+/*
+ * The driver currently only supports only true color formats. For
+ * true color the palette block is bypassed, but a 32 byte palette
+ * should still be loaded. The first 16-bit entry must be 0x4000 while
+ * all other entries must be zeroed.
+ */
+static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int ret;
+
+ reinit_completion(&tilcdc_crtc->palette_loaded);
+
+ /* Tell the LCDC where the palette is located. */
+ tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
+ tilcdc_crtc->palette_dma_handle);
+ tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
+ (u32) tilcdc_crtc->palette_dma_handle +
+ TILCDC_PALETTE_SIZE - 1);
+
+ /* Set dma load mode for palette loading only. */
+ tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
+ LCDC_PALETTE_LOAD_MODE_MASK);
+
+ /* Enable DMA Palette Loaded Interrupt */
+ if (priv->rev == 1)
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
+
+ /* Enable LCDC DMA and wait for palette to be loaded. */
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: Palette loading timeout", __func__);
+
+ /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
+}
+
static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
@@ -108,6 +171,7 @@ static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
if (priv->rev == 1) {
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA);
tilcdc_set(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
@@ -126,6 +190,7 @@ static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
/* disable irqs that we might have enabled: */
if (priv->rev == 1) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
@@ -150,193 +215,68 @@ static void reset(struct drm_crtc *crtc)
tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
}
-static void tilcdc_crtc_enable(struct drm_crtc *crtc)
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static unsigned int tilcdc_pclk_diff(unsigned long rate,
+ unsigned long real_rate)
{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (tilcdc_crtc->enabled)
- return;
-
- pm_runtime_get_sync(dev->dev);
-
- reset(crtc);
-
- tilcdc_crtc_enable_irqs(dev);
-
- tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
- tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
- tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
-
- drm_crtc_vblank_on(crtc);
+ int r = rate / 100, rr = real_rate / 100;
- tilcdc_crtc->enabled = true;
+ return (unsigned int)(abs(((rr - r) * 100) / r));
}
-void tilcdc_crtc_disable(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (!tilcdc_crtc->enabled)
- return;
-
- tilcdc_crtc->frame_done = false;
- tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
-
- /*
- * if necessary wait for framedone irq which will still come
- * before putting things to sleep..
- */
- if (priv->rev == 2) {
- int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
- tilcdc_crtc->frame_done,
- msecs_to_jiffies(500));
- if (ret == 0)
- dev_err(dev->dev, "%s: timeout waiting for framedone\n",
- __func__);
- }
-
- drm_crtc_vblank_off(crtc);
-
- tilcdc_crtc_disable_irqs(dev);
-
- pm_runtime_put_sync(dev->dev);
-
- if (tilcdc_crtc->next_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->next_fb);
- tilcdc_crtc->next_fb = NULL;
- }
-
- if (tilcdc_crtc->curr_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
- tilcdc_crtc->curr_fb = NULL;
- }
-
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = ktime_set(0, 0);
-
- tilcdc_crtc->enabled = false;
-}
-
-static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
-{
- return crtc->state && crtc->state->enable && crtc->state->active;
-}
-
-static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct tilcdc_drm_private *priv = crtc->dev->dev_private;
-
- drm_modeset_lock_crtc(crtc, NULL);
- tilcdc_crtc_disable(crtc);
- drm_modeset_unlock_crtc(crtc);
-
- flush_workqueue(priv->wq);
-
- of_node_put(crtc->port);
- drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
-}
-
-int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- unsigned long flags;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (tilcdc_crtc->event) {
- dev_err(dev->dev, "already pending page flip!\n");
- return -EBUSY;
- }
-
- drm_framebuffer_reference(fb);
-
- crtc->primary->fb = fb;
-
- spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
-
- if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
- ktime_t next_vblank;
- s64 tdiff;
-
- next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
-
- tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
-
- if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
- tilcdc_crtc->next_fb = fb;
- }
-
- if (tilcdc_crtc->next_fb != fb)
- set_scanout(crtc, fb);
-
- tilcdc_crtc->event = event;
-
- spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
-
- return 0;
-}
-
-static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ unsigned long clk_rate, real_rate, req_rate;
+ unsigned int clkdiv;
+ int ret;
- if (!tilcdc_crtc->simulate_vesa_sync)
- return true;
+ clkdiv = 2; /* first try using a standard divider of 2 */
- /*
- * tilcdc does not generate VESA-compliant sync but aligns
- * VS on the second edge of HS instead of first edge.
- * We use adjusted_mode, to fixup sync by aligning both rising
- * edges and add HSKEW offset to fix the sync.
- */
- adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
- adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+ /* mode.clock is in KHz, set_rate wants parameter in Hz */
+ req_rate = crtc->mode.clock * 1000;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
- adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
- adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
- } else {
- adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
- adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
- }
+ ret = clk_set_rate(priv->clk, req_rate * clkdiv);
+ clk_rate = clk_get_rate(priv->clk);
+ if (ret < 0) {
+ /*
+ * If we fail to set the clock rate (some architectures don't
+ * use the common clock framework yet and may not implement
+ * all the clk API calls for every clock), try the next best
+ * thing: adjusting the clock divider, unless clk_get_rate()
+ * failed as well.
+ */
+ if (!clk_rate) {
+ /* Nothing more we can do. Just bail out. */
+ dev_err(dev->dev,
+ "failed to set the pixel clock - unable to read current lcdc clock rate\n");
+ return;
+ }
- return true;
-}
+ clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
-static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- const unsigned clkdiv = 2; /* using a fixed divider of 2 */
- int ret;
+ /*
+ * Emit a warning if the real clock rate resulting from the
+ * calculated divider differs much from the requested rate.
+ *
+ * 5% is an arbitrary value - LCDs are usually quite tolerant
+ * about pixel clock rates.
+ */
+ real_rate = clkdiv * req_rate;
- /* mode.clock is in KHz, set_rate wants parameter in Hz */
- ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
- if (ret < 0) {
- dev_err(dev->dev, "failed to set display clock rate to: %d\n",
- crtc->mode.clock);
- return;
+ if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
+ dev_warn(dev->dev,
+ "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
+ clk_rate, real_rate);
+ }
}
- tilcdc_crtc->lcd_fck_rate = clk_get_rate(priv->clk);
+ tilcdc_crtc->lcd_fck_rate = clk_rate;
DBG("lcd_clk=%u, mode clock=%d, div=%u",
tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
@@ -351,7 +291,7 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN);
}
-static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -361,8 +301,6 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_framebuffer *fb = crtc->primary->state->fb;
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
if (WARN_ON(!info))
return;
@@ -461,16 +399,16 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
- unsigned int depth, bpp;
-
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
- switch (bpp) {
- case 16:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB565:
break;
- case 32:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
/* fallthrough */
- case 24:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
break;
default:
@@ -511,15 +449,226 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
else
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
- drm_framebuffer_reference(fb);
+ tilcdc_crtc_set_clk(crtc);
+
+ tilcdc_crtc_load_palette(crtc);
set_scanout(crtc, fb);
- tilcdc_crtc_set_clk(crtc);
+ drm_framebuffer_reference(fb);
crtc->hwmode = crtc->state->adjusted_mode;
}
+static void tilcdc_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ mutex_lock(&tilcdc_crtc->enable_lock);
+ if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+ return;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+
+ reset(crtc);
+
+ tilcdc_crtc_set_mode(crtc);
+
+ tilcdc_crtc_enable_irqs(dev);
+
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+ tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
+ LCDC_PALETTE_LOAD_MODE_MASK);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ drm_crtc_vblank_on(crtc);
+
+ tilcdc_crtc->enabled = true;
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+}
+
+static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int ret;
+
+ mutex_lock(&tilcdc_crtc->enable_lock);
+ if (shutdown)
+ tilcdc_crtc->shutdown = true;
+ if (!tilcdc_crtc->enabled) {
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+ return;
+ }
+ tilcdc_crtc->frame_done = false;
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ /*
+ * Wait for framedone irq which will still come before putting
+ * things to sleep..
+ */
+ ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+ __func__);
+
+ drm_crtc_vblank_off(crtc);
+
+ tilcdc_crtc_disable_irqs(dev);
+
+ pm_runtime_put_sync(dev->dev);
+
+ if (tilcdc_crtc->next_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->next_fb);
+ tilcdc_crtc->next_fb = NULL;
+ }
+
+ if (tilcdc_crtc->curr_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->curr_fb);
+ tilcdc_crtc->curr_fb = NULL;
+ }
+
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+ tilcdc_crtc->last_vblank = ktime_set(0, 0);
+
+ tilcdc_crtc->enabled = false;
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+}
+
+static void tilcdc_crtc_disable(struct drm_crtc *crtc)
+{
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ tilcdc_crtc_off(crtc, false);
+}
+
+void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_off(crtc, true);
+}
+
+static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
+{
+ return crtc->state && crtc->state->enable && crtc->state->active;
+}
+
+static void tilcdc_crtc_recover_work(struct work_struct *work)
+{
+ struct tilcdc_crtc *tilcdc_crtc =
+ container_of(work, struct tilcdc_crtc, recover_work);
+ struct drm_crtc *crtc = &tilcdc_crtc->base;
+
+ dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
+
+ drm_modeset_lock_crtc(crtc, NULL);
+
+ if (!tilcdc_crtc_is_on(crtc))
+ goto out;
+
+ tilcdc_crtc_disable(crtc);
+ tilcdc_crtc_enable(crtc);
+out:
+ drm_modeset_unlock_crtc(crtc);
+}
+
+static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+
+ drm_modeset_lock_crtc(crtc, NULL);
+ tilcdc_crtc_disable(crtc);
+ drm_modeset_unlock_crtc(crtc);
+
+ flush_workqueue(priv->wq);
+
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+}
+
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ if (tilcdc_crtc->event) {
+ dev_err(dev->dev, "already pending page flip!\n");
+ return -EBUSY;
+ }
+
+ drm_framebuffer_reference(fb);
+
+ crtc->primary->fb = fb;
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+ ktime_t next_vblank;
+ s64 tdiff;
+
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
+
+ tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+
+ if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+ tilcdc_crtc->next_fb = fb;
+ }
+
+ if (tilcdc_crtc->next_fb != fb)
+ set_scanout(crtc, fb);
+
+ tilcdc_crtc->event = event;
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
+ return 0;
+}
+
+static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ if (!tilcdc_crtc->simulate_vesa_sync)
+ return true;
+
+ /*
+ * tilcdc does not generate VESA-compliant sync but aligns
+ * VS on the second edge of HS instead of first edge.
+ * We use adjusted_mode, to fixup sync by aligning both rising
+ * edges and add HSKEW offset to fix the sync.
+ */
+ adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
+ adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
+ } else {
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
+ }
+
+ return true;
+}
+
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -560,7 +709,6 @@ static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
.enable = tilcdc_crtc_enable,
.disable = tilcdc_crtc_disable,
.atomic_check = tilcdc_crtc_atomic_check,
- .mode_set_nofb = tilcdc_crtc_mode_set_nofb,
};
int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -756,28 +904,48 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
}
if (stat & LCDC_FIFO_UNDERFLOW)
- dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
__func__, stat);
- /* For revision 2 only */
- if (priv->rev == 2) {
- if (stat & LCDC_FRAME_DONE) {
- tilcdc_crtc->frame_done = true;
- wake_up(&tilcdc_crtc->frame_done_wq);
- }
+ if (stat & LCDC_PL_LOAD_DONE) {
+ complete(&tilcdc_crtc->palette_loaded);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+ LCDC_V2_PL_INT_ENA);
+ }
- if (stat & LCDC_SYNC_LOST) {
- dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
- __func__, stat);
- tilcdc_crtc->frame_intact = false;
- if (tilcdc_crtc->sync_lost_count++ >
- SYNC_LOST_COUNT_LIMIT) {
- dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
+ if (stat & LCDC_SYNC_LOST) {
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+ __func__, stat);
+ tilcdc_crtc->frame_intact = false;
+ if (tilcdc_crtc->sync_lost_count++ >
+ SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
+ queue_work(system_wq, &tilcdc_crtc->recover_work);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA);
+ else
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_SYNC_LOST);
- }
+ tilcdc_crtc->sync_lost_count = 0;
}
+ }
+
+ if (stat & LCDC_FRAME_DONE) {
+ tilcdc_crtc->frame_done = true;
+ wake_up(&tilcdc_crtc->frame_done_wq);
+ /* rev 1 lcdc appears to hang if irq is not disbaled here */
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_FRAME_DONE_INT_ENA);
+ }
+ /* For revision 2 only */
+ if (priv->rev == 2) {
/* Indicate to LCDC that the interrupt service routine has
* completed, see 13.3.6.1.6 in AM335x TRM.
*/
@@ -787,7 +955,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
return IRQ_HANDLED;
}
-struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
+int tilcdc_crtc_create(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc;
@@ -797,21 +965,33 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
if (!tilcdc_crtc) {
dev_err(dev->dev, "allocation failed\n");
- return NULL;
+ return -ENOMEM;
}
+ init_completion(&tilcdc_crtc->palette_loaded);
+ tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
+ TILCDC_PALETTE_SIZE,
+ &tilcdc_crtc->palette_dma_handle,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tilcdc_crtc->palette_base)
+ return -ENOMEM;
+ *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
+
crtc = &tilcdc_crtc->base;
ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
if (ret < 0)
goto fail;
+ mutex_init(&tilcdc_crtc->enable_lock);
+
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
drm_flip_work_init(&tilcdc_crtc->unref_work,
"unref", unref_worker);
spin_lock_init(&tilcdc_crtc->irq_lock);
+ INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
ret = drm_crtc_init_with_planes(dev, crtc,
&tilcdc_crtc->primary,
@@ -837,13 +1017,15 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
if (!crtc->port) { /* This should never happen */
dev_err(dev->dev, "Port node not found in %s\n",
dev->dev->of_node->full_name);
+ ret = -EINVAL;
goto fail;
}
}
- return crtc;
+ priv->crtc = crtc;
+ return 0;
fail:
tilcdc_crtc_destroy(crtc);
- return NULL;
+ return -ENOMEM;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index a694977c32f4..bd0a3bd07167 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -127,24 +127,16 @@ static int tilcdc_commit(struct drm_device *dev,
* current layout.
*/
- /* Keep HW on while we commit the state. */
- pm_runtime_get_sync(dev->dev);
-
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* Now HW should remain on if need becase the crtc is enabled */
- pm_runtime_put_sync(dev->dev);
-
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
-
return 0;
}
@@ -155,15 +147,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = tilcdc_commit,
};
-static int modeset_init(struct drm_device *dev)
+static void modeset_init(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_module *mod;
- drm_mode_config_init(dev);
-
- priv->crtc = tilcdc_crtc_create(dev);
-
list_for_each_entry(mod, &module_list, list) {
DBG("loading module: %s", mod->name);
mod->funcs->modeset_init(mod, dev);
@@ -174,8 +162,6 @@ static int modeset_init(struct drm_device *dev)
dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
-
- return 0;
}
#ifdef CONFIG_CPU_FREQ
@@ -196,22 +182,29 @@ static int cpufreq_transition(struct notifier_block *nb,
* DRM operations:
*/
-static int tilcdc_unload(struct drm_device *dev)
+static void tilcdc_fini(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- tilcdc_remove_external_encoders(dev);
+ if (priv->crtc)
+ tilcdc_crtc_shutdown(priv->crtc);
+
+ if (priv->is_registered)
+ drm_dev_unregister(dev);
- drm_fbdev_cma_fini(priv->fbdev);
drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
+
+ if (priv->fbdev)
+ drm_fbdev_cma_fini(priv->fbdev);
drm_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+ tilcdc_remove_external_device(dev);
#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
+ if (priv->freq_transition.notifier_call)
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
if (priv->clk)
@@ -220,61 +213,71 @@ static int tilcdc_unload(struct drm_device *dev)
if (priv->mmio)
iounmap(priv->mmio);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ }
dev->dev_private = NULL;
pm_runtime_disable(dev->dev);
- return 0;
+ drm_dev_unref(dev);
}
-static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
{
- struct platform_device *pdev = dev->platformdev;
- struct device_node *node = pdev->dev.of_node;
+ struct drm_device *ddev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
struct tilcdc_drm_private *priv;
struct resource *res;
u32 bpp = 0;
int ret;
- priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
+ dev_err(dev, "failed to allocate private data\n");
return -ENOMEM;
}
- dev->dev_private = priv;
+ ddev = drm_dev_alloc(ddrv, dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->platformdev = pdev;
+ ddev->dev_private = priv;
+ platform_set_drvdata(pdev, ddev);
+ drm_mode_config_init(ddev);
priv->is_componentized =
- tilcdc_get_external_components(dev->dev, NULL) > 0;
+ tilcdc_get_external_components(dev, NULL) > 0;
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
if (!priv->wq) {
ret = -ENOMEM;
- goto fail_unset_priv;
+ goto init_failed;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(dev->dev, "failed to get memory resource\n");
+ dev_err(dev, "failed to get memory resource\n");
ret = -EINVAL;
- goto fail_free_wq;
+ goto init_failed;
}
priv->mmio = ioremap_nocache(res->start, resource_size(res));
if (!priv->mmio) {
- dev_err(dev->dev, "failed to ioremap\n");
+ dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
- goto fail_free_wq;
+ goto init_failed;
}
- priv->clk = clk_get(dev->dev, "fck");
+ priv->clk = clk_get(dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(dev->dev, "failed to get functional clock\n");
+ dev_err(dev, "failed to get functional clock\n");
ret = -ENODEV;
- goto fail_iounmap;
+ goto init_failed;
}
#ifdef CONFIG_CPU_FREQ
@@ -282,8 +285,9 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
ret = cpufreq_register_notifier(&priv->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
- dev_err(dev->dev, "failed to register cpufreq notifier\n");
- goto fail_put_clk;
+ dev_err(dev, "failed to register cpufreq notifier\n");
+ priv->freq_transition.notifier_call = NULL;
+ goto init_failed;
}
#endif
@@ -292,22 +296,22 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
- if (of_property_read_u32(node, "ti,max-width", &priv->max_width))
+ if (of_property_read_u32(node, "max-width", &priv->max_width))
priv->max_width = TILCDC_DEFAULT_MAX_WIDTH;
DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
- if (of_property_read_u32(node, "ti,max-pixelclock",
+ if (of_property_read_u32(node, "max-pixelclock",
&priv->max_pixelclock))
priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(dev);
/* Determine LCD IP Version */
- pm_runtime_get_sync(dev->dev);
- switch (tilcdc_read(dev, LCDC_PID_REG)) {
+ pm_runtime_get_sync(dev);
+ switch (tilcdc_read(ddev, LCDC_PID_REG)) {
case 0x4c100102:
priv->rev = 1;
break;
@@ -316,14 +320,14 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
priv->rev = 2;
break;
default:
- dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
- "defaulting to LCD revision 1\n",
- tilcdc_read(dev, LCDC_PID_REG));
+ dev_warn(dev, "Unknown PID Reg value 0x%08x, "
+ "defaulting to LCD revision 1\n",
+ tilcdc_read(ddev, LCDC_PID_REG));
priv->rev = 1;
break;
}
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_put_sync(dev);
if (priv->rev == 1) {
DBG("Revision 1 LCDC supports only RGB565 format");
@@ -356,91 +360,67 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
}
}
- ret = modeset_init(dev);
+ ret = tilcdc_crtc_create(ddev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
- goto fail_cpufreq_unregister;
+ dev_err(dev, "failed to create crtc\n");
+ goto init_failed;
}
-
- platform_set_drvdata(pdev, dev);
+ modeset_init(ddev);
if (priv->is_componentized) {
- ret = component_bind_all(dev->dev, dev);
+ ret = component_bind_all(dev, ddev);
if (ret < 0)
- goto fail_mode_config_cleanup;
+ goto init_failed;
- ret = tilcdc_add_external_encoders(dev);
+ ret = tilcdc_add_component_encoder(ddev);
if (ret < 0)
- goto fail_component_cleanup;
+ goto init_failed;
+ } else {
+ ret = tilcdc_attach_external_device(ddev);
+ if (ret)
+ goto init_failed;
}
- if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
- dev_err(dev->dev, "no encoders/connectors found\n");
+ if (!priv->external_connector &&
+ ((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
+ dev_err(dev, "no encoders/connectors found\n");
ret = -ENXIO;
- goto fail_external_cleanup;
+ goto init_failed;
}
- ret = drm_vblank_init(dev, 1);
+ ret = drm_vblank_init(ddev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
- goto fail_external_cleanup;
+ dev_err(dev, "failed to initialize vblank\n");
+ goto init_failed;
}
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
- goto fail_vblank_cleanup;
+ dev_err(dev, "failed to install IRQ handler\n");
+ goto init_failed;
}
- drm_mode_config_reset(dev);
+ drm_mode_config_reset(ddev);
- priv->fbdev = drm_fbdev_cma_init(dev, bpp,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
+ priv->fbdev = drm_fbdev_cma_init(ddev, bpp,
+ ddev->mode_config.num_crtc,
+ ddev->mode_config.num_connector);
if (IS_ERR(priv->fbdev)) {
ret = PTR_ERR(priv->fbdev);
- goto fail_irq_uninstall;
+ goto init_failed;
}
- drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_init(ddev);
- return 0;
-
-fail_irq_uninstall:
- drm_irq_uninstall(dev);
-
-fail_vblank_cleanup:
- drm_vblank_cleanup(dev);
-
-fail_component_cleanup:
- if (priv->is_componentized)
- component_unbind_all(dev->dev, dev);
-
-fail_mode_config_cleanup:
- drm_mode_config_cleanup(dev);
-
-fail_external_cleanup:
- tilcdc_remove_external_encoders(dev);
-
-fail_cpufreq_unregister:
- pm_runtime_disable(dev->dev);
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-
-fail_put_clk:
-#endif
- clk_put(priv->clk);
-
-fail_iounmap:
- iounmap(priv->mmio);
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto init_failed;
-fail_free_wq:
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
+ priv->is_registered = true;
+ return 0;
-fail_unset_priv:
- dev->dev_private = NULL;
+init_failed:
+ tilcdc_fini(ddev);
return ret;
}
@@ -575,9 +555,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -587,8 +565,6 @@ static const struct file_operations fops = {
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
DRIVER_PRIME | DRIVER_ATOMIC),
- .load = tilcdc_load,
- .unload = tilcdc_unload,
.lastclose = tilcdc_lastclose,
.irq_handler = tilcdc_irq,
.get_vblank_counter = drm_vblank_no_hw_counter,
@@ -662,10 +638,9 @@ static const struct dev_pm_ops tilcdc_pm_ops = {
/*
* Platform driver:
*/
-
static int tilcdc_bind(struct device *dev)
{
- return drm_platform_init(&tilcdc_driver, to_platform_device(dev));
+ return tilcdc_init(&tilcdc_driver, dev);
}
static void tilcdc_unbind(struct device *dev)
@@ -676,7 +651,7 @@ static void tilcdc_unbind(struct device *dev)
if (!ddev->dev_private)
return;
- drm_put_dev(dev_get_drvdata(dev));
+ tilcdc_fini(dev_get_drvdata(dev));
}
static const struct component_master_ops tilcdc_comp_ops = {
@@ -699,7 +674,7 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
else if (ret == 0)
- return drm_platform_init(&tilcdc_driver, pdev);
+ return tilcdc_init(&tilcdc_driver, &pdev->dev);
else
return component_master_add_with_match(&pdev->dev,
&tilcdc_comp_ops,
@@ -714,7 +689,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
if (ret < 0)
return ret;
else if (ret == 0)
- drm_put_dev(platform_get_drvdata(pdev));
+ tilcdc_fini(platform_get_drvdata(pdev));
else
component_master_del(&pdev->dev, &tilcdc_comp_ops);
@@ -723,6 +698,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
static struct of_device_id tilcdc_of_match[] = {
{ .compatible = "ti,am33xx-tilcdc", },
+ { .compatible = "ti,da850-tilcdc", },
{ },
};
MODULE_DEVICE_TABLE(of, tilcdc_of_match);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 9780c37ec4cd..0e71daf5b5cb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -33,6 +33,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_bridge.h>
/* Defaulting to pixel clock defined on AM335x */
#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
@@ -87,8 +88,12 @@ struct tilcdc_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
- const struct drm_connector_helper_funcs *connector_funcs[8];
+ struct drm_encoder *external_encoder;
+ struct drm_connector *external_connector;
+ const struct drm_connector_helper_funcs *connector_funcs;
+
+ bool is_registered;
bool is_componentized;
};
@@ -163,7 +168,7 @@ struct tilcdc_panel_info {
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
+int tilcdc_crtc_create(struct drm_device *dev);
irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
@@ -172,7 +177,7 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
int tilcdc_crtc_max_width(struct drm_crtc *crtc);
-void tilcdc_crtc_disable(struct drm_crtc *crtc);
+void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 68e895021005..c67d7cd7d57e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+#include <drm/drm_of.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -27,44 +28,50 @@ static const struct tilcdc_panel_info panel_info_tda998x = {
.raster_order = 0,
};
+static const struct tilcdc_panel_info panel_info_default = {
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .bpp = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
static int tilcdc_external_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct tilcdc_drm_private *priv = connector->dev->dev_private;
- int ret, i;
+ int ret;
ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
if (ret != MODE_OK)
return ret;
- for (i = 0; i < priv->num_connectors &&
- priv->connectors[i] != connector; i++)
- ;
-
- BUG_ON(priv->connectors[i] != connector);
- BUG_ON(!priv->connector_funcs[i]);
+ BUG_ON(priv->external_connector != connector);
+ BUG_ON(!priv->connector_funcs);
/* If the connector has its own mode_valid call it. */
- if (!IS_ERR(priv->connector_funcs[i]) &&
- priv->connector_funcs[i]->mode_valid)
- return priv->connector_funcs[i]->mode_valid(connector, mode);
+ if (!IS_ERR(priv->connector_funcs) &&
+ priv->connector_funcs->mode_valid)
+ return priv->connector_funcs->mode_valid(connector, mode);
return MODE_OK;
}
-static int tilcdc_add_external_encoder(struct drm_device *dev,
- struct drm_connector *connector)
+static int tilcdc_add_external_connector(struct drm_device *dev,
+ struct drm_connector *connector)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_connector_helper_funcs *connector_funcs;
- priv->connectors[priv->num_connectors] = connector;
- priv->encoders[priv->num_encoders++] = connector->encoder;
-
- /* Only tda998x is supported at the moment. */
- tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
- tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
+ /* There should never be more than one connector */
+ if (WARN_ON(priv->external_connector))
+ return -EINVAL;
+ priv->external_connector = connector;
connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
GFP_KERNEL);
if (!connector_funcs)
@@ -77,56 +84,177 @@ static int tilcdc_add_external_encoder(struct drm_device *dev,
* everything else but use our own mode_valid() (above).
*/
if (connector->helper_private) {
- priv->connector_funcs[priv->num_connectors] =
- connector->helper_private;
- *connector_funcs = *priv->connector_funcs[priv->num_connectors];
+ priv->connector_funcs = connector->helper_private;
+ *connector_funcs = *priv->connector_funcs;
} else {
- priv->connector_funcs[priv->num_connectors] = ERR_PTR(-ENOENT);
+ priv->connector_funcs = ERR_PTR(-ENOENT);
}
connector_funcs->mode_valid = tilcdc_external_mode_valid;
drm_connector_helper_add(connector, connector_funcs);
- priv->num_connectors++;
- dev_dbg(dev->dev, "External encoder '%s' connected\n",
- connector->encoder->name);
+ dev_dbg(dev->dev, "External connector '%s' connected\n",
+ connector->name);
return 0;
}
-int tilcdc_add_external_encoders(struct drm_device *dev)
+static
+struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
+ struct drm_encoder *encoder)
{
- struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_connector *connector;
- int num_internal_connectors = priv->num_connectors;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- bool found = false;
- int i, ret;
-
- for (i = 0; i < num_internal_connectors; i++)
- if (connector == priv->connectors[i])
- found = true;
- if (!found) {
- ret = tilcdc_add_external_encoder(dev, connector);
- if (ret)
- return ret;
- }
+ int i;
+
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+ if (connector->encoder_ids[i] == encoder->base.id)
+ return connector;
+
+ dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
+ encoder->name, encoder->base.id);
+
+ return NULL;
+}
+
+int tilcdc_add_component_encoder(struct drm_device *ddev)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
+ if (encoder->possible_crtcs & (1 << priv->crtc->index))
+ break;
+
+ if (!encoder) {
+ dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
+ return -ENODEV;
}
- return 0;
+
+ connector = tilcdc_encoder_find_connector(ddev, encoder);
+
+ if (!connector)
+ return -ENODEV;
+
+ /* Only tda998x is supported at the moment. */
+ tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
+ tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
+
+ return tilcdc_add_external_connector(ddev, connector);
}
-void tilcdc_remove_external_encoders(struct drm_device *dev)
+void tilcdc_remove_external_device(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- int i;
/* Restore the original helper functions, if any. */
- for (i = 0; i < priv->num_connectors; i++)
- if (IS_ERR(priv->connector_funcs[i]))
- drm_connector_helper_add(priv->connectors[i], NULL);
- else if (priv->connector_funcs[i])
- drm_connector_helper_add(priv->connectors[i],
- priv->connector_funcs[i]);
+ if (IS_ERR(priv->connector_funcs))
+ drm_connector_helper_add(priv->external_connector, NULL);
+ else if (priv->connector_funcs)
+ drm_connector_helper_add(priv->external_connector,
+ priv->connector_funcs);
+}
+
+static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static
+int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct drm_connector *connector;
+ int ret;
+
+ priv->external_encoder->possible_crtcs = BIT(0);
+ priv->external_encoder->bridge = bridge;
+ bridge->encoder = priv->external_encoder;
+
+ ret = drm_bridge_attach(ddev, bridge);
+ if (ret) {
+ dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
+ return ret;
+ }
+
+ tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_default);
+
+ connector = tilcdc_encoder_find_connector(ddev, priv->external_encoder);
+ if (!connector)
+ return -ENODEV;
+
+ ret = tilcdc_add_external_connector(ddev, connector);
+
+ return ret;
+}
+
+static int tilcdc_node_has_port(struct device_node *dev_node)
+{
+ struct device_node *node;
+
+ node = of_get_child_by_name(dev_node, "ports");
+ if (!node)
+ node = of_get_child_by_name(dev_node, "port");
+ if (!node)
+ return 0;
+ of_node_put(node);
+
+ return 1;
+}
+
+static
+struct device_node *tilcdc_get_remote_node(struct device_node *node)
+{
+ struct device_node *ep;
+ struct device_node *parent;
+
+ if (!tilcdc_node_has_port(node))
+ return NULL;
+
+ ep = of_graph_get_next_endpoint(node, NULL);
+ if (!ep)
+ return NULL;
+
+ parent = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+
+ return parent;
+}
+
+int tilcdc_attach_external_device(struct drm_device *ddev)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct device_node *remote_node;
+ struct drm_bridge *bridge;
+ int ret;
+
+ remote_node = tilcdc_get_remote_node(ddev->dev->of_node);
+ if (!remote_node)
+ return 0;
+
+ bridge = of_drm_find_bridge(remote_node);
+ of_node_put(remote_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ priv->external_encoder = devm_kzalloc(ddev->dev,
+ sizeof(*priv->external_encoder),
+ GFP_KERNEL);
+ if (!priv->external_encoder)
+ return -ENOMEM;
+
+ ret = drm_encoder_init(ddev, priv->external_encoder,
+ &tilcdc_external_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret) {
+ dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
+ return ret;
+ }
+
+ ret = tilcdc_attach_bridge(ddev, bridge);
+ if (ret)
+ drm_encoder_cleanup(priv->external_encoder);
+
+ return ret;
}
static int dev_match_of(struct device *dev, void *data)
@@ -140,16 +268,10 @@ int tilcdc_get_external_components(struct device *dev,
struct device_node *node;
struct device_node *ep = NULL;
int count = 0;
+ int ret = 0;
- /* Avoid error print by of_graph_get_next_endpoint() if there
- * is no ports present.
- */
- node = of_get_child_by_name(dev->of_node, "ports");
- if (!node)
- node = of_get_child_by_name(dev->of_node, "port");
- if (!node)
+ if (!tilcdc_node_has_port(dev->of_node))
return 0;
- of_node_put(node);
while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
node = of_graph_get_remote_port_parent(ep);
@@ -159,16 +281,20 @@ int tilcdc_get_external_components(struct device *dev,
}
dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
- if (match)
- component_match_add(dev, match, dev_match_of, node);
- of_node_put(node);
- count++;
- }
- if (count > 1) {
- dev_err(dev, "Only one external encoder is supported\n");
- return -EINVAL;
+ if (of_device_is_compatible(node, "nxp,tda998x")) {
+ if (match)
+ drm_of_component_match_add(dev, match,
+ dev_match_of, node);
+ ret = 1;
+ }
+
+ of_node_put(node);
+ if (count++ > 1) {
+ dev_err(dev, "Only one port is supported\n");
+ return -EINVAL;
+ }
}
- return count;
+ return ret;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index c700e0c1623e..763d18f006c7 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -18,8 +18,9 @@
#ifndef __TILCDC_EXTERNAL_H__
#define __TILCDC_EXTERNAL_H__
-int tilcdc_add_external_encoders(struct drm_device *dev);
-void tilcdc_remove_external_encoders(struct drm_device *dev);
+int tilcdc_add_component_encoder(struct drm_device *dev);
+void tilcdc_remove_external_device(struct drm_device *dev);
int tilcdc_get_external_components(struct device *dev,
struct component_match **match);
+int tilcdc_attach_external_device(struct drm_device *ddev);
#endif /* __TILCDC_SLAVE_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 2134bb20fbe9..28c3e2f44f64 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -144,13 +144,6 @@ static void panel_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static enum drm_connector_status panel_connector_detect(
- struct drm_connector *connector,
- bool force)
-{
- return connector_status_connected;
-}
-
static int panel_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -197,7 +190,6 @@ static struct drm_encoder *panel_connector_best_encoder(
static const struct drm_connector_funcs panel_connector_funcs = {
.destroy = panel_connector_destroy,
.dpms = drm_atomic_helper_connector_dpms,
- .detect = panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -240,8 +232,6 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_connector_register(connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 74c65fa859b2..8a6a50d74aff 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -39,7 +39,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
struct drm_plane_state *old_state = plane->state;
- unsigned int depth, bpp;
+ unsigned int pitch;
if (!state->crtc)
return 0;
@@ -68,8 +68,9 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
- if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+ pitch = crtc_state->mode.hdisplay *
+ drm_format_plane_cpp(state->fb->pixel_format, 0);
+ if (state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
return -EINVAL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index f57c0d62c76a..9d528c0a67a4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -34,11 +34,14 @@
/* LCDC DMA Control Register */
#define LCDC_DMA_BURST_SIZE(x) ((x) << 4)
+#define LCDC_DMA_BURST_SIZE_MASK ((0x7) << 4)
#define LCDC_DMA_BURST_1 0x0
#define LCDC_DMA_BURST_2 0x1
#define LCDC_DMA_BURST_4 0x2
#define LCDC_DMA_BURST_8 0x3
#define LCDC_DMA_BURST_16 0x4
+#define LCDC_DMA_FIFO_THRESHOLD(x) ((x) << 8)
+#define LCDC_DMA_FIFO_THRESHOLD_MASK ((0x3) << 8)
#define LCDC_V1_END_OF_FRAME_INT_ENA BIT(2)
#define LCDC_V2_END_OF_FRAME0_INT_ENA BIT(8)
#define LCDC_V2_END_OF_FRAME1_INT_ENA BIT(9)
@@ -46,10 +49,12 @@
/* LCDC Control Register */
#define LCDC_CLK_DIVISOR(x) ((x) << 8)
+#define LCDC_CLK_DIVISOR_MASK ((0xFF) << 8)
#define LCDC_RASTER_MODE 0x01
/* LCDC Raster Control Register */
#define LCDC_PALETTE_LOAD_MODE(x) ((x) << 20)
+#define LCDC_PALETTE_LOAD_MODE_MASK ((0x3) << 20)
#define PALETTE_AND_DATA 0x00
#define PALETTE_ONLY 0x01
#define DATA_ONLY 0x02
@@ -61,6 +66,8 @@
#define LCDC_V2_UNDERFLOW_INT_ENA BIT(5)
#define LCDC_V1_PL_INT_ENA BIT(4)
#define LCDC_V2_PL_INT_ENA BIT(6)
+#define LCDC_V1_SYNC_LOST_INT_ENA BIT(5)
+#define LCDC_V1_FRAME_DONE_INT_ENA BIT(3)
#define LCDC_MONOCHROME_MODE BIT(1)
#define LCDC_RASTER_ENABLE BIT(0)
#define LCDC_TFT_ALT_ENABLE BIT(23)
@@ -74,7 +81,9 @@
/* LCDC Raster Timing 2 Register */
#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+#define LCDC_AC_BIAS_TRANSITIONS_PER_INT_MASK ((0xF) << 16)
#define LCDC_AC_BIAS_FREQUENCY(x) ((x) << 8)
+#define LCDC_AC_BIAS_FREQUENCY_MASK ((0xFF) << 8)
#define LCDC_SYNC_CTRL BIT(25)
#define LCDC_SYNC_EDGE BIT(24)
#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
@@ -139,6 +148,12 @@ static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
return ioread32(priv->mmio + reg);
}
+static inline void tilcdc_write_mask(struct drm_device *dev, u32 reg,
+ u32 val, u32 mask)
+{
+ tilcdc_write(dev, reg, (tilcdc_read(dev, reg) & ~mask) | (val & mask));
+}
+
static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
{
tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 458043a53995..aabfad882e23 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -249,8 +249,6 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_connector_register(connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217dfe401..d5063618efa7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i;
fobj = reservation_object_get_list(bo->resv);
fence = reservation_object_get_excl(bo->resv);
if (fence && !fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(bo->resv));
if (!fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
}
}
@@ -717,6 +717,20 @@ out:
return ret;
}
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ /* Don't evict this BO if it's outside of the
+ * requested placement range
+ */
+ if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ (place->lpfn && place->lpfn <= bo->mem.start))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (!ret) {
- if (place && (place->fpfn || place->lpfn)) {
- /* Don't evict this BO if it's outside of the
- * requested placement range
- */
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
- (place->lpfn && place->lpfn <= bo->mem.start)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
- continue;
- }
- }
+ if (ret)
+ continue;
- break;
+ if (place && !bdev->driver->eviction_valuable(bo, place)) {
+ __ttm_bo_unreserve(bo);
+ ret = -EBUSY;
+ continue;
}
+
+ break;
}
if (ret) {
@@ -792,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
@@ -806,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (unlikely(ret))
return ret;
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = fence;
}
@@ -1286,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
/*
@@ -1309,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
- ret = fence_wait(fence, false);
- fence_put(fence);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (ret) {
if (allow_errors) {
return ret;
@@ -1343,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
- fence_put(man->move);
+ dma_fence_put(man->move);
man->use_type = false;
man->has_type = false;
@@ -1602,7 +1611,14 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- long timeout = no_wait ? 0 : 15 * HZ;
+ long timeout = 15 * HZ;
+
+ if (no_wait) {
+ if (reservation_object_test_signaled_rcu(bo->resv, true))
+ return 0;
+ else
+ return -EBUSY;
+ }
timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
interruptible, timeout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e21655c57..d0459b392e5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
+ struct dma_fence *fence,
bool evict,
struct ttm_mem_reg *new_mem)
{
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
*/
spin_lock(&from->move_lock);
- if (!from->move || fence_is_later(fence, from->move)) {
- fence_put(from->move);
- from->move = fence_get(fence);
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
+ dma_fence_put(from->move);
+ from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_free_old_node(bo);
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
} else {
/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5e5167..68ef993ab431 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* Quick non-stalling check for idle.
*/
- if (fence_is_signaled(bo->moving))
+ if (dma_fence_is_signaled(bo->moving))
goto out_clear;
/*
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) fence_wait(bo->moving, true);
+ (void) dma_fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = fence_wait(bo->moving, true);
+ ret = dma_fence_wait(bo->moving, true);
if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
}
out_clear:
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = NULL;
out_unlock:
@@ -101,7 +101,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- unsigned long address = (unsigned long)vmf->virtual_address;
+ unsigned long address = vmf->address;
int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index a80717b35dc6..d35bc491e8de 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, struct fence *fence)
+ struct list_head *list,
+ struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index cc45d98f9bb5..cd8b01727734 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -44,9 +44,7 @@ static const struct file_operations udl_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 611b6b9bb3cb..167f42c67c7c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -254,16 +254,10 @@ static int udl_fb_release(struct fb_info *info, int user)
static struct fb_ops udlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
.fb_mmap = udl_fb_mmap,
.fb_open = udl_fb_open,
.fb_release = udl_fb_release,
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 818e70712b18..3c0c4bd3f750 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,13 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned int page_offset;
int ret = 0;
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
- PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (!obj->pages)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ ret = vm_insert_page(vma, vmf->address, page);
switch (ret) {
case -EAGAIN:
case 0:
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index fb77db755e0a..7757f69a8a77 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -11,6 +11,7 @@ vc4-y := \
vc4_kms.o \
vc4_gem.o \
vc4_hdmi.o \
+ vc4_vec.o \
vc4_hvs.o \
vc4_irq.o \
vc4_plane.o \
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7f08d681a74b..a0fd3e66bc4b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -83,8 +83,7 @@ struct vc4_crtc_data {
/* Which channel of the HVS this pixelvalve sources from. */
int hvs_channel;
- enum vc4_encoder_type encoder0_type;
- enum vc4_encoder_type encoder1_type;
+ enum vc4_encoder_type encoder_types[4];
};
#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
@@ -669,6 +668,14 @@ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id)
CRTC_WRITE(PV_INTEN, 0);
}
+/* Must be called with the event lock held */
+bool vc4_event_pending(struct drm_crtc *crtc)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+ return !!vc4_crtc->event;
+}
+
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
{
struct drm_crtc *crtc = &vc4_crtc->base;
@@ -859,20 +866,26 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_crtc_data pv0_data = {
.hvs_channel = 0,
- .encoder0_type = VC4_ENCODER_TYPE_DSI0,
- .encoder1_type = VC4_ENCODER_TYPE_DPI,
+ .encoder_types = {
+ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
+ },
};
static const struct vc4_crtc_data pv1_data = {
.hvs_channel = 2,
- .encoder0_type = VC4_ENCODER_TYPE_DSI1,
- .encoder1_type = VC4_ENCODER_TYPE_SMI,
+ .encoder_types = {
+ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
+ },
};
static const struct vc4_crtc_data pv2_data = {
.hvs_channel = 1,
- .encoder0_type = VC4_ENCODER_TYPE_VEC,
- .encoder1_type = VC4_ENCODER_TYPE_HDMI,
+ .encoder_types = {
+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
+ [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
+ },
};
static const struct of_device_id vc4_crtc_dt_match[] = {
@@ -886,17 +899,20 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ const struct vc4_crtc_data *crtc_data = vc4_crtc->data;
+ const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, drm) {
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
-
- if (vc4_encoder->type == vc4_crtc->data->encoder0_type) {
- vc4_encoder->clock_select = 0;
- encoder->possible_crtcs |= drm_crtc_mask(crtc);
- } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) {
- vc4_encoder->clock_select = 1;
- encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
+ if (vc4_encoder->type == encoder_types[i]) {
+ vc4_encoder->clock_select = i;
+ encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ break;
+ }
}
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 245115d49c46..caf817bac885 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -19,6 +19,7 @@ static const struct drm_info_list vc4_debugfs_list[] = {
{"bo_stats", vc4_bo_stats_debugfs, 0},
{"dpi_regs", vc4_dpi_debugfs_regs, 0},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
+ {"vec_regs", vc4_vec_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
{"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8703f56b7947..ac09ca7ff430 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -61,23 +61,28 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ case DRM_VC4_PARAM_SUPPORTS_ETC1:
+ case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
args->value = true;
break;
default:
@@ -103,9 +108,7 @@ static const struct file_operations vc4_drm_fops = {
.mmap = vc4_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
@@ -274,12 +277,14 @@ static void vc4_drm_unbind(struct device *dev)
struct drm_device *drm = platform_get_drvdata(pdev);
struct vc4_dev *vc4 = to_vc4_dev(drm);
+ drm_dev_unregister(drm);
+
if (vc4->fbdev)
drm_fbdev_cma_fini(vc4->fbdev);
drm_mode_config_cleanup(drm);
- drm_put_dev(drm);
+ drm_dev_unref(drm);
}
static const struct component_master_ops vc4_drm_ops = {
@@ -289,6 +294,7 @@ static const struct component_master_ops vc4_drm_ops = {
static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
+ &vc4_vec_driver,
&vc4_dpi_driver,
&vc4_hvs_driver,
&vc4_crtc_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7c1e4d97486f..b5c4bb14d0d1 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -17,6 +17,7 @@ struct vc4_dev {
struct vc4_crtc *crtc[3];
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
+ struct vc4_vec *vec;
struct drm_fbdev_cma *fbdev;
@@ -194,6 +195,7 @@ to_vc4_plane(struct drm_plane *plane)
}
enum vc4_encoder_type {
+ VC4_ENCODER_TYPE_NONE,
VC4_ENCODER_TYPE_HDMI,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI0,
@@ -381,6 +383,8 @@ struct vc4_validated_shader_info {
uint32_t num_uniform_addr_offsets;
uint32_t *uniform_addr_offsets;
+
+ bool is_threaded;
};
/**
@@ -440,6 +444,7 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
extern struct platform_driver vc4_crtc_driver;
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
+bool vc4_event_pending(struct drm_crtc *crtc);
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
unsigned int flags, int *vpos, int *hpos,
@@ -483,6 +488,10 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
extern struct platform_driver vc4_hdmi_driver;
int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
+/* vc4_hdmi.c */
+extern struct platform_driver vc4_vec_driver;
+int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
+
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
void vc4_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 47a095f392f8..db920771bfb5 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -544,14 +544,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
if (!handles) {
+ ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
goto fail;
}
- ret = copy_from_user(handles,
- (void __user *)(uintptr_t)args->bo_handles,
- exec->bo_count * sizeof(uint32_t));
- if (ret) {
+ if (copy_from_user(handles,
+ (void __user *)(uintptr_t)args->bo_handles,
+ exec->bo_count * sizeof(uint32_t))) {
+ ret = -EFAULT;
DRM_ERROR("Failed to copy in GEM handles\n");
goto fail;
}
@@ -708,8 +709,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
}
mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0)
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
mutex_unlock(&vc4->power_lock);
kfree(exec);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c1f65c6c8e60..be8dd8262f27 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -61,7 +61,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
up(&vc4->async_modeset);
@@ -119,17 +119,34 @@ static int vc4_atomic_commit(struct drm_device *dev,
/* Make sure that any outstanding modesets have finished. */
if (nonblock) {
- ret = down_trylock(&vc4->async_modeset);
- if (ret) {
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ unsigned long flags;
+ bool busy = false;
+
+ /*
+ * If there's an undispatched event to send then we're
+ * obviously still busy. If there isn't, then we can
+ * unconditionally wait for the semaphore because it
+ * shouldn't be contended (for long).
+ *
+ * This is to prevent a race where queuing a new flip
+ * from userspace immediately on receipt of an event
+ * beats our clean-up and returns EBUSY.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ busy |= vc4_event_pending(crtc);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ if (busy) {
kfree(c);
return -EBUSY;
}
- } else {
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
- return ret;
- }
+ }
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
}
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -173,6 +190,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
vc4_atomic_complete_commit_seqno_cb);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 1aa44c2db556..39f6886b2410 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -177,8 +177,9 @@
# define PV_CONTROL_WAIT_HSTART BIT(12)
# define PV_CONTROL_PIXEL_REP_MASK VC4_MASK(5, 4)
# define PV_CONTROL_PIXEL_REP_SHIFT 4
-# define PV_CONTROL_CLK_SELECT_DSI_VEC 0
+# define PV_CONTROL_CLK_SELECT_DSI 0
# define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1
+# define PV_CONTROL_CLK_SELECT_VEC 2
# define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2)
# define PV_CONTROL_CLK_SELECT_SHIFT 2
# define PV_CONTROL_FIFO_CLR BIT(1)
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e6d3c6028341..7cc346ad9b0b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 26503e307438..9fd171c361c2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -644,6 +644,13 @@ reloc_tex(struct vc4_exec_info *exec,
cpp = 1;
break;
case VC4_TEXTURE_TYPE_ETC1:
+ /* ETC1 is arranged as 64-bit blocks, where each block is 4x4
+ * pixels.
+ */
+ cpp = 8;
+ width = (width + 3) >> 2;
+ height = (height + 3) >> 2;
+ break;
case VC4_TEXTURE_TYPE_BW1:
case VC4_TEXTURE_TYPE_A4:
case VC4_TEXTURE_TYPE_A1:
@@ -782,11 +789,6 @@ validate_gl_shader_rec(struct drm_device *dev,
exec->shader_rec_v += roundup(packet_size, 16);
exec->shader_rec_size -= packet_size;
- if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
- DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
- return -EINVAL;
- }
-
for (i = 0; i < shader_reloc_count; i++) {
if (src_handles[i] > exec->bo_count) {
DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
@@ -803,6 +805,18 @@ validate_gl_shader_rec(struct drm_device *dev,
return -EINVAL;
}
+ if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
+ to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("Thread mode of CL and FS do not match\n");
+ return -EINVAL;
+ }
+
+ if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
+ to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("cs and vs cannot be threaded\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < shader_reloc_count; i++) {
struct vc4_validated_shader_info *validated_shader;
uint32_t o = shader_reloc_offsets[i];
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2543cf5b8b51..5dba13dd1e9b 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -83,6 +83,13 @@ struct vc4_shader_validation_state {
* basic blocks.
*/
bool needs_uniform_address_for_loop;
+
+ /* Set when we find an instruction writing the top half of the
+ * register files. If we allowed writing the unusable regs in
+ * a threaded shader, then the other shader running on our
+ * QPU's clamp validation would be invalid.
+ */
+ bool all_registers_used;
};
static uint32_t
@@ -119,6 +126,13 @@ raddr_add_a_to_live_reg_index(uint64_t inst)
}
static bool
+live_reg_is_upper_half(uint32_t lri)
+{
+ return (lri >= 16 && lri < 32) ||
+ (lri >= 32 + 16 && lri < 32 + 32);
+}
+
+static bool
is_tmu_submit(uint32_t waddr)
{
return (waddr == QPU_W_TMU0_S ||
@@ -390,6 +404,9 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
} else {
validation_state->live_immediates[lri] = ~0;
}
+
+ if (live_reg_is_upper_half(lri))
+ validation_state->all_registers_used = true;
}
switch (waddr) {
@@ -598,6 +615,11 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
}
}
+ if ((raddr_a >= 16 && raddr_a < 32) ||
+ (raddr_b >= 16 && raddr_b < 32 && sig != QPU_SIG_SMALL_IMM)) {
+ validation_state->all_registers_used = true;
+ }
+
return true;
}
@@ -608,9 +630,7 @@ static bool
vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
{
uint32_t max_branch_target = 0;
- bool found_shader_end = false;
int ip;
- int shader_end_ip = 0;
int last_branch = -2;
for (ip = 0; ip < validation_state->max_ip; ip++) {
@@ -621,8 +641,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
uint32_t branch_target_ip;
if (sig == QPU_SIG_PROG_END) {
- shader_end_ip = ip;
- found_shader_end = true;
+ /* There are two delay slots after program end is
+ * signaled that are still executed, then we're
+ * finished. validation_state->max_ip is the
+ * instruction after the last valid instruction in the
+ * program.
+ */
+ validation_state->max_ip = ip + 3;
continue;
}
@@ -676,15 +701,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
}
set_bit(after_delay_ip, validation_state->branch_targets);
max_branch_target = max(max_branch_target, after_delay_ip);
-
- /* There are two delay slots after program end is signaled
- * that are still executed, then we're finished.
- */
- if (found_shader_end && ip == shader_end_ip + 2)
- break;
}
- if (max_branch_target > shader_end_ip) {
+ if (max_branch_target > validation_state->max_ip - 3) {
DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
return false;
}
@@ -756,6 +775,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
bool found_shader_end = false;
int shader_end_ip = 0;
+ uint32_t last_thread_switch_ip = -3;
uint32_t ip;
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
@@ -788,6 +808,17 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!vc4_handle_branch_target(&validation_state))
goto fail;
+ if (ip == last_thread_switch_ip + 3) {
+ /* Reset r0-r3 live clamp data */
+ int i;
+
+ for (i = 64; i < LIVE_REG_COUNT; i++) {
+ validation_state.live_min_clamp_offsets[i] = ~0;
+ validation_state.live_max_clamp_regs[i] = false;
+ validation_state.live_immediates[i] = ~0;
+ }
+ }
+
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -797,6 +828,8 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
+ case QPU_SIG_THREAD_SWITCH:
+ case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip);
@@ -812,6 +845,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
shader_end_ip = ip;
}
+ if (sig == QPU_SIG_THREAD_SWITCH ||
+ sig == QPU_SIG_LAST_THREAD_SWITCH) {
+ validated_shader->is_threaded = true;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Thread switch too soon after "
+ "last switch at ip %d\n", ip);
+ goto fail;
+ }
+ last_thread_switch_ip = ip;
+ }
+
break;
case QPU_SIG_LOAD_IMM:
@@ -826,6 +871,13 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!check_branch(inst, validated_shader,
&validation_state, ip))
goto fail;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Branch in thread switch at ip %d",
+ ip);
+ goto fail;
+ }
+
break;
default:
DRM_ERROR("Unsupported QPU signal %d at "
@@ -847,6 +899,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
goto fail;
}
+ /* Might corrupt other thread */
+ if (validated_shader->is_threaded &&
+ validation_state.all_registers_used) {
+ DRM_ERROR("Shader uses threading, but uses the upper "
+ "half of the registers, too\n");
+ goto fail;
+ }
+
/* If we did a backwards branch and we haven't emitted a uniforms
* reset since then, we still need the uniforms stream to have the
* uniforms address available so that the backwards branch can do its
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
new file mode 100644
index 000000000000..32bb8ef985fb
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: VC4 SDTV module
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+/* WSE Registers */
+#define VEC_WSE_RESET 0xc0
+
+#define VEC_WSE_CONTROL 0xc4
+#define VEC_WSE_WSS_ENABLE BIT(7)
+
+#define VEC_WSE_WSS_DATA 0xc8
+#define VEC_WSE_VPS_DATA1 0xcc
+#define VEC_WSE_VPS_CONTROL 0xd0
+
+/* VEC Registers */
+#define VEC_REVID 0x100
+
+#define VEC_CONFIG0 0x104
+#define VEC_CONFIG0_YDEL_MASK GENMASK(28, 26)
+#define VEC_CONFIG0_YDEL(x) ((x) << 26)
+#define VEC_CONFIG0_CDEL_MASK GENMASK(25, 24)
+#define VEC_CONFIG0_CDEL(x) ((x) << 24)
+#define VEC_CONFIG0_PBPR_FIL BIT(18)
+#define VEC_CONFIG0_CHROMA_GAIN_MASK GENMASK(17, 16)
+#define VEC_CONFIG0_CHROMA_GAIN_UNITY (0 << 16)
+#define VEC_CONFIG0_CHROMA_GAIN_1_32 (1 << 16)
+#define VEC_CONFIG0_CHROMA_GAIN_1_16 (2 << 16)
+#define VEC_CONFIG0_CHROMA_GAIN_1_8 (3 << 16)
+#define VEC_CONFIG0_CBURST_GAIN_MASK GENMASK(14, 13)
+#define VEC_CONFIG0_CBURST_GAIN_UNITY (0 << 13)
+#define VEC_CONFIG0_CBURST_GAIN_1_128 (1 << 13)
+#define VEC_CONFIG0_CBURST_GAIN_1_64 (2 << 13)
+#define VEC_CONFIG0_CBURST_GAIN_1_32 (3 << 13)
+#define VEC_CONFIG0_CHRBW1 BIT(11)
+#define VEC_CONFIG0_CHRBW0 BIT(10)
+#define VEC_CONFIG0_SYNCDIS BIT(9)
+#define VEC_CONFIG0_BURDIS BIT(8)
+#define VEC_CONFIG0_CHRDIS BIT(7)
+#define VEC_CONFIG0_PDEN BIT(6)
+#define VEC_CONFIG0_YCDELAY BIT(4)
+#define VEC_CONFIG0_RAMPEN BIT(2)
+#define VEC_CONFIG0_YCDIS BIT(2)
+#define VEC_CONFIG0_STD_MASK GENMASK(1, 0)
+#define VEC_CONFIG0_NTSC_STD 0
+#define VEC_CONFIG0_PAL_BDGHI_STD 1
+#define VEC_CONFIG0_PAL_N_STD 3
+
+#define VEC_SCHPH 0x108
+#define VEC_SOFT_RESET 0x10c
+#define VEC_CLMP0_START 0x144
+#define VEC_CLMP0_END 0x148
+#define VEC_FREQ3_2 0x180
+#define VEC_FREQ1_0 0x184
+
+#define VEC_CONFIG1 0x188
+#define VEC_CONFIG_VEC_RESYNC_OFF BIT(18)
+#define VEC_CONFIG_RGB219 BIT(17)
+#define VEC_CONFIG_CBAR_EN BIT(16)
+#define VEC_CONFIG_TC_OBB BIT(15)
+#define VEC_CONFIG1_OUTPUT_MODE_MASK GENMASK(12, 10)
+#define VEC_CONFIG1_C_Y_CVBS (0 << 10)
+#define VEC_CONFIG1_CVBS_Y_C (1 << 10)
+#define VEC_CONFIG1_PR_Y_PB (2 << 10)
+#define VEC_CONFIG1_RGB (4 << 10)
+#define VEC_CONFIG1_Y_C_CVBS (5 << 10)
+#define VEC_CONFIG1_C_CVBS_Y (6 << 10)
+#define VEC_CONFIG1_C_CVBS_CVBS (7 << 10)
+#define VEC_CONFIG1_DIS_CHR BIT(9)
+#define VEC_CONFIG1_DIS_LUMA BIT(8)
+#define VEC_CONFIG1_YCBCR_IN BIT(6)
+#define VEC_CONFIG1_DITHER_TYPE_LFSR 0
+#define VEC_CONFIG1_DITHER_TYPE_COUNTER BIT(5)
+#define VEC_CONFIG1_DITHER_EN BIT(4)
+#define VEC_CONFIG1_CYDELAY BIT(3)
+#define VEC_CONFIG1_LUMADIS BIT(2)
+#define VEC_CONFIG1_COMPDIS BIT(1)
+#define VEC_CONFIG1_CUSTOM_FREQ BIT(0)
+
+#define VEC_CONFIG2 0x18c
+#define VEC_CONFIG2_PROG_SCAN BIT(15)
+#define VEC_CONFIG2_SYNC_ADJ_MASK GENMASK(14, 12)
+#define VEC_CONFIG2_SYNC_ADJ(x) (((x) / 2) << 12)
+#define VEC_CONFIG2_PBPR_EN BIT(10)
+#define VEC_CONFIG2_UV_DIG_DIS BIT(6)
+#define VEC_CONFIG2_RGB_DIG_DIS BIT(5)
+#define VEC_CONFIG2_TMUX_MASK GENMASK(3, 2)
+#define VEC_CONFIG2_TMUX_DRIVE0 (0 << 2)
+#define VEC_CONFIG2_TMUX_RG_COMP (1 << 2)
+#define VEC_CONFIG2_TMUX_UV_YC (2 << 2)
+#define VEC_CONFIG2_TMUX_SYNC_YC (3 << 2)
+
+#define VEC_INTERRUPT_CONTROL 0x190
+#define VEC_INTERRUPT_STATUS 0x194
+#define VEC_FCW_SECAM_B 0x198
+#define VEC_SECAM_GAIN_VAL 0x19c
+
+#define VEC_CONFIG3 0x1a0
+#define VEC_CONFIG3_HORIZ_LEN_STD (0 << 0)
+#define VEC_CONFIG3_HORIZ_LEN_MPEG1_SIF (1 << 0)
+#define VEC_CONFIG3_SHAPE_NON_LINEAR BIT(1)
+
+#define VEC_STATUS0 0x200
+#define VEC_MASK0 0x204
+
+#define VEC_CFG 0x208
+#define VEC_CFG_SG_MODE_MASK GENMASK(6, 5)
+#define VEC_CFG_SG_MODE(x) ((x) << 5)
+#define VEC_CFG_SG_EN BIT(4)
+#define VEC_CFG_VEC_EN BIT(3)
+#define VEC_CFG_MB_EN BIT(2)
+#define VEC_CFG_ENABLE BIT(1)
+#define VEC_CFG_TB_EN BIT(0)
+
+#define VEC_DAC_TEST 0x20c
+
+#define VEC_DAC_CONFIG 0x210
+#define VEC_DAC_CONFIG_LDO_BIAS_CTRL(x) ((x) << 24)
+#define VEC_DAC_CONFIG_DRIVER_CTRL(x) ((x) << 16)
+#define VEC_DAC_CONFIG_DAC_CTRL(x) (x)
+
+#define VEC_DAC_MISC 0x214
+#define VEC_DAC_MISC_VCD_CTRL_MASK GENMASK(31, 16)
+#define VEC_DAC_MISC_VCD_CTRL(x) ((x) << 16)
+#define VEC_DAC_MISC_VID_ACT BIT(8)
+#define VEC_DAC_MISC_VCD_PWRDN BIT(6)
+#define VEC_DAC_MISC_BIAS_PWRDN BIT(5)
+#define VEC_DAC_MISC_DAC_PWRDN BIT(2)
+#define VEC_DAC_MISC_LDO_PWRDN BIT(1)
+#define VEC_DAC_MISC_DAC_RST_N BIT(0)
+
+
+/* General VEC hardware state. */
+struct vc4_vec {
+ struct platform_device *pdev;
+
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ void __iomem *regs;
+
+ struct clk *clock;
+
+ const struct vc4_vec_tv_mode *tv_mode;
+};
+
+#define VEC_READ(offset) readl(vec->regs + (offset))
+#define VEC_WRITE(offset, val) writel(val, vec->regs + (offset))
+
+/* VC4 VEC encoder KMS struct */
+struct vc4_vec_encoder {
+ struct vc4_encoder base;
+ struct vc4_vec *vec;
+};
+
+static inline struct vc4_vec_encoder *
+to_vc4_vec_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_vec_encoder, base.base);
+}
+
+/* VC4 VEC connector KMS struct */
+struct vc4_vec_connector {
+ struct drm_connector base;
+ struct vc4_vec *vec;
+
+ /* Since the connector is attached to just the one encoder,
+ * this is the reference to it so we can do the best_encoder()
+ * hook.
+ */
+ struct drm_encoder *encoder;
+};
+
+static inline struct vc4_vec_connector *
+to_vc4_vec_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_vec_connector, base);
+}
+
+enum vc4_vec_tv_mode_id {
+ VC4_VEC_TV_MODE_NTSC,
+ VC4_VEC_TV_MODE_NTSC_J,
+ VC4_VEC_TV_MODE_PAL,
+ VC4_VEC_TV_MODE_PAL_M,
+};
+
+struct vc4_vec_tv_mode {
+ const struct drm_display_mode *mode;
+ void (*mode_set)(struct vc4_vec *vec);
+};
+
+#define VEC_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} vec_regs[] = {
+ VEC_REG(VEC_WSE_CONTROL),
+ VEC_REG(VEC_WSE_WSS_DATA),
+ VEC_REG(VEC_WSE_VPS_DATA1),
+ VEC_REG(VEC_WSE_VPS_CONTROL),
+ VEC_REG(VEC_REVID),
+ VEC_REG(VEC_CONFIG0),
+ VEC_REG(VEC_SCHPH),
+ VEC_REG(VEC_CLMP0_START),
+ VEC_REG(VEC_CLMP0_END),
+ VEC_REG(VEC_FREQ3_2),
+ VEC_REG(VEC_FREQ1_0),
+ VEC_REG(VEC_CONFIG1),
+ VEC_REG(VEC_CONFIG2),
+ VEC_REG(VEC_INTERRUPT_CONTROL),
+ VEC_REG(VEC_INTERRUPT_STATUS),
+ VEC_REG(VEC_FCW_SECAM_B),
+ VEC_REG(VEC_SECAM_GAIN_VAL),
+ VEC_REG(VEC_CONFIG3),
+ VEC_REG(VEC_STATUS0),
+ VEC_REG(VEC_MASK0),
+ VEC_REG(VEC_CFG),
+ VEC_REG(VEC_DAC_TEST),
+ VEC_REG(VEC_DAC_CONFIG),
+ VEC_REG(VEC_DAC_MISC),
+};
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_vec_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_vec *vec = vc4->vec;
+ int i;
+
+ if (!vec)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(vec_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ vec_regs[i].name, vec_regs[i].reg,
+ VEC_READ(vec_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif
+
+static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
+ VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
+}
+
+static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD);
+ VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
+}
+
+static const struct drm_display_mode ntsc_mode = {
+ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
+ 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0,
+ DRM_MODE_FLAG_INTERLACE)
+};
+
+static void vc4_vec_pal_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
+ VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
+}
+
+static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
+ VEC_WRITE(VEC_CONFIG1,
+ VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ);
+ VEC_WRITE(VEC_FREQ3_2, 0x223b);
+ VEC_WRITE(VEC_FREQ1_0, 0x61d1);
+}
+
+static const struct drm_display_mode pal_mode = {
+ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
+ 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0,
+ DRM_MODE_FLAG_INTERLACE)
+};
+
+static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
+ [VC4_VEC_TV_MODE_NTSC] = {
+ .mode = &ntsc_mode,
+ .mode_set = vc4_vec_ntsc_mode_set,
+ },
+ [VC4_VEC_TV_MODE_NTSC_J] = {
+ .mode = &ntsc_mode,
+ .mode_set = vc4_vec_ntsc_j_mode_set,
+ },
+ [VC4_VEC_TV_MODE_PAL] = {
+ .mode = &pal_mode,
+ .mode_set = vc4_vec_pal_mode_set,
+ },
+ [VC4_VEC_TV_MODE_PAL_M] = {
+ .mode = &pal_mode,
+ .mode_set = vc4_vec_pal_m_mode_set,
+ },
+};
+
+static enum drm_connector_status
+vc4_vec_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_unknown;
+}
+
+static void vc4_vec_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int vc4_vec_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_connector_state *state = connector->state;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev,
+ vc4_vec_tv_modes[state->tv.mode].mode);
+ if (!mode) {
+ DRM_ERROR("Failed to create a new display mode\n");
+ return -ENOMEM;
+ }
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_connector_funcs vc4_vec_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = vc4_vec_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .destroy = vc4_vec_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs = {
+ .get_modes = vc4_vec_connector_get_modes,
+};
+
+static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
+ struct vc4_vec *vec)
+{
+ struct drm_connector *connector = NULL;
+ struct vc4_vec_connector *vec_connector;
+
+ vec_connector = devm_kzalloc(dev->dev, sizeof(*vec_connector),
+ GFP_KERNEL);
+ if (!vec_connector)
+ return ERR_PTR(-ENOMEM);
+
+ connector = &vec_connector->base;
+ connector->interlace_allowed = true;
+
+ vec_connector->encoder = vec->encoder;
+ vec_connector->vec = vec;
+
+ drm_connector_init(dev, connector, &vc4_vec_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite);
+ drm_connector_helper_add(connector, &vc4_vec_connector_helper_funcs);
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_mode_property,
+ VC4_VEC_TV_MODE_NTSC);
+ vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
+
+ drm_mode_connector_attach_encoder(connector, vec->encoder);
+
+ return connector;
+}
+
+static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
+{
+ struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
+ struct vc4_vec *vec = vc4_vec_encoder->vec;
+ int ret;
+
+ VEC_WRITE(VEC_CFG, 0);
+ VEC_WRITE(VEC_DAC_MISC,
+ VEC_DAC_MISC_VCD_PWRDN |
+ VEC_DAC_MISC_BIAS_PWRDN |
+ VEC_DAC_MISC_DAC_PWRDN |
+ VEC_DAC_MISC_LDO_PWRDN);
+
+ clk_disable_unprepare(vec->clock);
+
+ ret = pm_runtime_put(&vec->pdev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to release power domain: %d\n", ret);
+ return;
+ }
+}
+
+static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
+{
+ struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
+ struct vc4_vec *vec = vc4_vec_encoder->vec;
+ int ret;
+
+ ret = pm_runtime_get_sync(&vec->pdev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ return;
+ }
+
+ /*
+ * We need to set the clock rate each time we enable the encoder
+ * because there's a chance we share the same parent with the HDMI
+ * clock, and both drivers are requesting different rates.
+ * The good news is, these 2 encoders cannot be enabled at the same
+ * time, thus preventing incompatible rate requests.
+ */
+ ret = clk_set_rate(vec->clock, 108000000);
+ if (ret) {
+ DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(vec->clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ return;
+ }
+
+ /* Reset the different blocks */
+ VEC_WRITE(VEC_WSE_RESET, 1);
+ VEC_WRITE(VEC_SOFT_RESET, 1);
+
+ /* Disable the CGSM-A and WSE blocks */
+ VEC_WRITE(VEC_WSE_CONTROL, 0);
+
+ /* Write config common to all modes. */
+
+ /*
+ * Color subcarrier phase: phase = 360 * SCHPH / 256.
+ * 0x28 <=> 39.375 deg.
+ */
+ VEC_WRITE(VEC_SCHPH, 0x28);
+
+ /*
+ * Reset to default values.
+ */
+ VEC_WRITE(VEC_CLMP0_START, 0xac);
+ VEC_WRITE(VEC_CLMP0_END, 0xec);
+ VEC_WRITE(VEC_CONFIG2,
+ VEC_CONFIG2_UV_DIG_DIS | VEC_CONFIG2_RGB_DIG_DIS);
+ VEC_WRITE(VEC_CONFIG3, VEC_CONFIG3_HORIZ_LEN_STD);
+ VEC_WRITE(VEC_DAC_CONFIG,
+ VEC_DAC_CONFIG_DAC_CTRL(0xc) |
+ VEC_DAC_CONFIG_DRIVER_CTRL(0xc) |
+ VEC_DAC_CONFIG_LDO_BIAS_CTRL(0x46));
+
+ /* Mask all interrupts. */
+ VEC_WRITE(VEC_MASK0, 0);
+
+ vec->tv_mode->mode_set(vec);
+
+ VEC_WRITE(VEC_DAC_MISC,
+ VEC_DAC_MISC_VID_ACT | VEC_DAC_MISC_DAC_RST_N);
+ VEC_WRITE(VEC_CFG, VEC_CFG_VEC_EN);
+}
+
+
+static bool vc4_vec_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void vc4_vec_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
+ struct vc4_vec *vec = vc4_vec_encoder->vec;
+
+ vec->tv_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+}
+
+static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ const struct vc4_vec_tv_mode *vec_mode;
+
+ vec_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+
+ if (conn_state->crtc &&
+ !drm_mode_equal(vec_mode->mode, &crtc_state->adjusted_mode))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs vc4_vec_encoder_helper_funcs = {
+ .disable = vc4_vec_encoder_disable,
+ .enable = vc4_vec_encoder_enable,
+ .mode_fixup = vc4_vec_encoder_mode_fixup,
+ .atomic_check = vc4_vec_encoder_atomic_check,
+ .atomic_mode_set = vc4_vec_encoder_atomic_mode_set,
+};
+
+static const struct of_device_id vc4_vec_dt_match[] = {
+ { .compatible = "brcm,bcm2835-vec", .data = NULL },
+ { /* sentinel */ },
+};
+
+static const char * const tv_mode_names[] = {
+ [VC4_VEC_TV_MODE_NTSC] = "NTSC",
+ [VC4_VEC_TV_MODE_NTSC_J] = "NTSC-J",
+ [VC4_VEC_TV_MODE_PAL] = "PAL",
+ [VC4_VEC_TV_MODE_PAL_M] = "PAL-M",
+};
+
+static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_vec *vec;
+ struct vc4_vec_encoder *vc4_vec_encoder;
+ int ret;
+
+ ret = drm_mode_create_tv_properties(drm, ARRAY_SIZE(tv_mode_names),
+ tv_mode_names);
+ if (ret)
+ return ret;
+
+ vec = devm_kzalloc(dev, sizeof(*vec), GFP_KERNEL);
+ if (!vec)
+ return -ENOMEM;
+
+ vc4_vec_encoder = devm_kzalloc(dev, sizeof(*vc4_vec_encoder),
+ GFP_KERNEL);
+ if (!vc4_vec_encoder)
+ return -ENOMEM;
+ vc4_vec_encoder->base.type = VC4_ENCODER_TYPE_VEC;
+ vc4_vec_encoder->vec = vec;
+ vec->encoder = &vc4_vec_encoder->base.base;
+
+ vec->pdev = pdev;
+ vec->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(vec->regs))
+ return PTR_ERR(vec->regs);
+
+ vec->clock = devm_clk_get(dev, NULL);
+ if (IS_ERR(vec->clock)) {
+ ret = PTR_ERR(vec->clock);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
+
+ vec->connector = vc4_vec_connector_init(drm, vec);
+ if (IS_ERR(vec->connector)) {
+ ret = PTR_ERR(vec->connector);
+ goto err_destroy_encoder;
+ }
+
+ dev_set_drvdata(dev, vec);
+
+ vc4->vec = vec;
+
+ return 0;
+
+err_destroy_encoder:
+ drm_encoder_cleanup(vec->encoder);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static void vc4_vec_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_vec *vec = dev_get_drvdata(dev);
+
+ vc4_vec_connector_destroy(vec->connector);
+ drm_encoder_cleanup(vec->encoder);
+ pm_runtime_disable(dev);
+
+ vc4->vec = NULL;
+}
+
+static const struct component_ops vc4_vec_ops = {
+ .bind = vc4_vec_bind,
+ .unbind = vc4_vec_unbind,
+};
+
+static int vc4_vec_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_vec_ops);
+}
+
+static int vc4_vec_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_vec_ops);
+ return 0;
+}
+
+struct platform_driver vc4_vec_driver = {
+ .probe = vc4_vec_dev_probe,
+ .remove = vc4_vec_dev_remove,
+ .driver = {
+ .name = "vc4_vec",
+ .of_match_table = vc4_vec_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f36c14729b55..477e07f0ecb6 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -54,7 +54,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
- unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ unsigned long vaddr = vmf->address;
struct page *page;
page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 5c57c1ffa1f9..da25dfe7b80e 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,56 +28,57 @@
#define VGEM_FENCE_TIMEOUT (10*HZ)
struct vgem_fence {
- struct fence base;
+ struct dma_fence base;
struct spinlock lock;
struct timer_list timer;
};
-static const char *vgem_fence_get_driver_name(struct fence *fence)
+static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
{
return "vgem";
}
-static const char *vgem_fence_get_timeline_name(struct fence *fence)
+static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static bool vgem_fence_signaled(struct fence *fence)
+static bool vgem_fence_signaled(struct dma_fence *fence)
{
return false;
}
-static bool vgem_fence_enable_signaling(struct fence *fence)
+static bool vgem_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static void vgem_fence_release(struct fence *base)
+static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
del_timer_sync(&fence->timer);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
-static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
- snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+ snprintf(str, size, "%u",
+ dma_fence_is_signaled(fence) ? fence->seqno : 0);
}
-static const struct fence_ops vgem_fence_ops = {
+static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.enable_signaling = vgem_fence_enable_signaling,
.signaled = vgem_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data)
{
struct vgem_fence *fence = (struct vgem_fence *)data;
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
}
-static struct fence *vgem_fence_create(struct vgem_file *vfile,
- unsigned int flags)
+static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
{
struct vgem_fence *fence;
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile,
return NULL;
spin_lock_init(&fence->lock);
- fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
- fence_context_alloc(1), 1);
+ dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ dma_fence_context_alloc(1), 1);
setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
@@ -125,7 +126,6 @@ static int attach_dmabuf(struct drm_device *dev,
return PTR_ERR(dmabuf);
obj->dma_buf = dmabuf;
- drm_gem_object_reference(obj);
return 0;
}
@@ -157,7 +157,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
struct vgem_file *vfile = file->driver_priv;
struct reservation_object *resv;
struct drm_gem_object *obj;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
if (arg->flags & ~VGEM_FENCE_WRITE)
@@ -190,12 +190,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Expose the fence via the dma-buf */
ret = 0;
- mutex_lock(&resv->lock.base);
+ ww_mutex_lock(&resv->lock, NULL);
if (arg->flags & VGEM_FENCE_WRITE)
reservation_object_add_excl_fence(resv, fence);
else if ((ret = reservation_object_reserve_shared(resv)) == 0)
reservation_object_add_shared_fence(resv, fence);
- mutex_unlock(&resv->lock.base);
+ ww_mutex_unlock(&resv->lock);
/* Record the fence in our idr for later signaling */
if (ret == 0) {
@@ -209,8 +209,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
err_fence:
if (ret) {
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
}
err:
drm_gem_object_unreference_unlocked(obj);
@@ -239,7 +239,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
{
struct vgem_file *vfile = file->driver_priv;
struct drm_vgem_fence_signal *arg = data;
- struct fence *fence;
+ struct dma_fence *fence;
int ret = 0;
if (arg->flags)
@@ -253,11 +253,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
if (IS_ERR(fence))
return PTR_ERR(fence);
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
ret = -ETIMEDOUT;
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
return ret;
}
@@ -271,8 +271,8 @@ int vgem_fence_open(struct vgem_file *vfile)
static int __vgem_fence_idr_fini(int id, void *p, void *data)
{
- fence_signal(p);
- fence_put(p);
+ dma_fence_signal(p);
+ dma_fence_put(p);
return 0;
}
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e5582bab7e3c..9e0e5392b6ec 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,9 +64,7 @@ static const struct file_operations via_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index e1afc3d3f8d9..81d1807ac228 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,10 +1,10 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_KMS_HELPER
+ select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
- QEMU based VMMs (like KVM or Xen).
+ QEMU based VMMs (like KVM or Xen).
If unsure say M.
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 49e5996cb9f2..3b97d50fd392 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -28,16 +28,6 @@
#include "virtgpu_drv.h"
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- struct pci_dev *pdev = dev->pdev;
-
- if (pdev) {
- return drm_pci_set_busid(dev, master);
- }
- return 0;
-}
-
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
{
struct apertures_struct *ap;
@@ -71,13 +61,22 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+ char unique[20];
- DRM_INFO("pci: %s detected\n",
- vga ? "virtio-vga" : "virtio-gpu-pci");
+ DRM_INFO("pci: %s detected at %s\n",
+ vga ? "virtio-vga" : "virtio-gpu-pci",
+ pname);
dev->pdev = pdev;
if (vga)
virtio_pci_kick_out_firmware_fb(pdev);
+
+ snprintf(unique, sizeof(unique), "pci:%s", pname);
+ ret = drm_dev_set_unique(dev, unique);
+ if (ret)
+ goto err_free;
+
}
ret = drm_dev_register(dev, 0);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 5820b7020ae5..d82489815096 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -108,16 +108,13 @@ static const struct file_operations virtio_gpu_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ae59080d63d1..08906c8ce3fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,7 +49,6 @@
#define DRIVER_PATCHLEVEL 1
/* virtgpu_drm_bus.c */
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
struct virtio_gpu_object {
@@ -82,7 +81,7 @@ struct virtio_gpu_fence_driver {
};
struct virtio_gpu_fence {
- struct fence f;
+ struct dma_fence f;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
uint64_t seq;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 2242a80866a9..dd21f950e129 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -200,16 +200,10 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
static struct fb_ops virtio_gpufb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = virtio_gpu_3d_fillrect,
.fb_copyarea = virtio_gpu_3d_copyarea,
.fb_imageblit = virtio_gpu_3d_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f3f70fa8a4c7..23353521f903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -26,22 +26,22 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
-static const char *virtio_get_driver_name(struct fence *f)
+static const char *virtio_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct fence *f)
+static const char *virtio_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_enable_signaling(struct fence *f)
+static bool virtio_enable_signaling(struct dma_fence *f)
{
return true;
}
-static bool virtio_signaled(struct fence *f)
+static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f)
return false;
}
-static void virtio_fence_value_str(struct fence *f, char *str, int size)
+static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", fence->seq);
}
-static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
}
-static const struct fence_ops virtio_fence_ops = {
+static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
.enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
(*fence)->drv = drv;
(*fence)->seq = ++drv->sync_seq;
- fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- fence_get(&(*fence)->f);
+ dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+ drv->context, (*fence)->seq);
+ dma_fence_get(&(*fence)->f);
list_add_tail(&(*fence)->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
if (last_seq < fence->seq)
continue;
- fence_signal_locked(&fence->f);
+ dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b4c4f0..61f3a963af95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
drm_free_large(buflist);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
return 0;
out_unresv:
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return ret;
}
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return 0;
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
//fail_obj:
// drm_gem_object_handle_unreference_unlocked(obj);
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &box, &fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
out_unres:
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 036b0fbae0fb..1235519853f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
- vgdev->fence_drv.context = fence_context_alloc(1);
+ vgdev->fence_drv.context = dma_fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ba28c0f6f28a..11288ffa4af6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -88,8 +88,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
(vgdev, handle, 0,
cpu_to_le32(plane->state->src_w >> 16),
cpu_to_le32(plane->state->src_h >> 16),
- plane->state->src_x >> 16,
- plane->state->src_y >> 16, NULL);
+ cpu_to_le32(plane->state->src_x >> 16),
+ cpu_to_le32(plane->state->src_y >> 16), NULL);
}
} else {
handle = 0;
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 80482ac5f95d..4a1de9f81193 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
.invalidate_caches = &virtio_gpu_invalidate_caches,
.init_mem_type = &virtio_gpu_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
.move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..43ea0dc957d2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -75,7 +75,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_vbuffer *vbuf;
- int i, size, count = 0;
+ int i, size, count = 16;
void *ptr;
INIT_LIST_HEAD(&vgdev->free_vbufs);
@@ -109,8 +109,10 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
spin_lock(&vgdev->free_vbufs_lock);
for (i = 0; i < count; i++) {
- if (WARN_ON(list_empty(&vgdev->free_vbufs)))
+ if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
+ spin_unlock(&vgdev->free_vbufs_lock);
return;
+ }
vbuf = list_first_entry(&vgdev->free_vbufs,
struct virtio_gpu_vbuffer, list);
list_del(&vbuf->list);
@@ -295,6 +297,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
+ __releases(&vgdev->ctrlq.qlock)
+ __acquires(&vgdev->ctrlq.qlock)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
struct scatterlist *sgs[3], vcmd, vout, vresp;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee3c931..c894a48a74a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d2d93959b119..723fd763da8e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -465,33 +465,34 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
struct vmw_framebuffer *vfb;
- int ret = 0;
+ int ret = 0, depth;
size_t new_bo_size;
- ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+ ret = vmw_fb_compute_depth(var, &depth);
if (ret)
return ret;
mode_cmd.width = var->xres;
mode_cmd.height = var->yres;
- mode_cmd.bpp = var->bits_per_pixel;
- mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+ mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
+ mode_cmd.pixel_format =
+ drm_mode_legacy_fb_format(var->bits_per_pixel,
+ ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&
cur_fb->height == mode_cmd.height &&
- cur_fb->bits_per_pixel == mode_cmd.bpp &&
- cur_fb->depth == mode_cmd.depth &&
- cur_fb->pitches[0] == mode_cmd.pitch)
+ cur_fb->pixel_format == mode_cmd.pixel_format &&
+ cur_fb->pitches[0] == mode_cmd.pitches[0])
return 0;
/* Need new buffer object ? */
- new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+ new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
ret = vmw_fb_kms_detach(par,
par->bo_size < new_bo_size ||
par->bo_size > 2*new_bo_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e80a478..6541dd8b82dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
* objects with actions attached to them.
*/
-static void vmw_fence_obj_destroy(struct fence *f)
+static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
fence->destroy(fence);
}
-static const char *vmw_fence_get_driver_name(struct fence *f)
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
{
return "vmwgfx";
}
-static const char *vmw_fence_get_timeline_name(struct fence *f)
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
{
return "svga";
}
-static bool vmw_fence_enable_signaling(struct fence *f)
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
}
struct vmwgfx_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct vmwgfx_wait_cb *wait =
container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
static void __vmw_fences_update(struct vmw_fence_manager *fman);
-static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
while (ret > 0) {
__vmw_fences_update(fman);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
if (intr)
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct fence_ops vmw_fence_ops = {
+static struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
- fman->ctx = fence_context_alloc(1);
+ fman->ctx = dma_fence_context_alloc(1);
return fman;
}
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
unsigned long irq_flags;
int ret = 0;
- fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
- fman->ctx, seqno);
+ dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+ fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
u32 goal_seqno;
u32 *fifo_mem;
- if (fence_is_signaled_locked(&fence->base))
+ if (dma_fence_is_signaled_locked(&fence->base))
return false;
fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return 1;
vmw_fences_update(fman);
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
- long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
+ long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
if (likely(ret > 0))
return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
}
BUG_ON(!list_empty(&fence->head));
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
spin_lock_irq(&fman->lock);
}
spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock_irqsave(&fman->lock, irq_flags);
fman->pending_actions[action->type]++;
- if (fence_is_signaled_locked(&fence->base)) {
+ if (dma_fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301ee141..d9d85aa6ed20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
#ifndef _VMWGFX_FENCE_H_
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -52,7 +52,7 @@ struct vmw_fence_action {
};
struct vmw_fence_obj {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
*fence_p = NULL;
if (fence)
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
}
static inline struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (fence)
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bf28ccc150df..e7daf59bac80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -516,7 +516,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd
+ const struct drm_mode_fb_cmd2
*mode_cmd,
bool is_dmabuf_proxy)
@@ -525,6 +525,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
+ struct drm_format_name_buf format_name;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
@@ -548,21 +549,22 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return -EINVAL;
}
- switch (mode_cmd->depth) {
- case 32:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
format = SVGA3D_A8R8G8B8;
break;
- case 24:
+ case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
break;
- case 16:
+ case DRM_FORMAT_RGB565:
format = SVGA3D_R5G6B5;
break;
- case 15:
+ case DRM_FORMAT_XRGB1555:
format = SVGA3D_A1R5G5B5;
break;
default:
- DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
@@ -581,14 +583,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- /* XXX get the first 3 from the surface info */
- vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbs->base.base.pitches[0] = mode_cmd->pitch;
- vfbs->base.base.depth = mode_cmd->depth;
- vfbs->base.base.width = mode_cmd->width;
- vfbs->base.base.height = mode_cmd->height;
+ drm_helper_mode_fill_fb_struct(&vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
- vfbs->base.user_handle = mode_cmd->handle;
+ vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
*out = &vfbs->base;
@@ -755,7 +752,7 @@ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
* 0 on success, error code otherwise
*/
static int vmw_create_dmabuf_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd *mode_cmd,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
struct vmw_dma_buffer *dmabuf_mob,
struct vmw_surface **srf_out)
{
@@ -763,17 +760,18 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
struct drm_vmw_size content_base_size;
struct vmw_resource *res;
unsigned int bytes_pp;
+ struct drm_format_name_buf format_name;
int ret;
- switch (mode_cmd->depth) {
- case 32:
- case 24:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
bytes_pp = 4;
break;
- case 16:
- case 15:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
format = SVGA3D_R5G6B5;
bytes_pp = 2;
break;
@@ -784,11 +782,12 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
break;
default:
- DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid framebuffer format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
- content_base_size.width = mode_cmd->pitch / bytes_pp;
+ content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
content_base_size.height = mode_cmd->height;
content_base_size.depth = 1;
@@ -826,16 +825,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd
+ const struct drm_mode_fb_cmd2
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
unsigned int requested_size;
+ struct drm_format_name_buf format_name;
int ret;
- requested_size = mode_cmd->height * mode_cmd->pitch;
+ requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
@@ -844,27 +844,16 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* Limited framebuffer color depth support for screen objects */
if (dev_priv->active_display_unit == vmw_du_screen_object) {
- switch (mode_cmd->depth) {
- case 32:
- case 24:
- /* Only support 32 bpp for 32 and 24 depth fbs */
- if (mode_cmd->bpp == 32)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
- case 16:
- case 15:
- /* Only support 16 bpp for 16 and 15 depth fbs */
- if (mode_cmd->bpp == 16)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_RGB565:
+ break;
default:
- DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
}
@@ -875,14 +864,10 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbd->base.base.pitches[0] = mode_cmd->pitch;
- vfbd->base.base.depth = mode_cmd->depth;
- vfbd->base.base.width = mode_cmd->width;
- vfbd->base.base.height = mode_cmd->height;
+ drm_helper_mode_fill_fb_struct(&vfbd->base.base, mode_cmd);
vfbd->base.dmabuf = true;
vfbd->buffer = vmw_dmabuf_reference(dmabuf);
- vfbd->base.user_handle = mode_cmd->handle;
+ vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
@@ -916,7 +901,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
- const struct drm_mode_fb_cmd *mode_cmd)
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
bool is_dmabuf_proxy = false;
@@ -971,7 +956,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd2)
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -979,16 +964,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- struct drm_mode_fb_cmd mode_cmd;
int ret;
- mode_cmd.width = mode_cmd2->width;
- mode_cmd.height = mode_cmd2->height;
- mode_cmd.pitch = mode_cmd2->pitches[0];
- mode_cmd.handle = mode_cmd2->handles[0];
- drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
- &mode_cmd.bpp);
-
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
@@ -996,8 +973,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd.pitch,
- mode_cmd.height)) {
+ mode_cmd->pitches[0],
+ mode_cmd->height)) {
DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1011,7 +988,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* command stream using user-space handles.
*/
- user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
@@ -1023,14 +1000,14 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
/* returns either a dmabuf or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
- mode_cmd.handle,
+ mode_cmd->handles[0],
&surface, &bo);
if (ret)
goto err_out;
vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
!(dev_priv->capabilities & SVGA_CAP_3D),
- &mode_cmd);
+ mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff4803c107bc..f42ce9a1c3ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -248,7 +248,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
- const struct drm_mode_fb_cmd *mode_cmd);
+ const struct drm_mode_fb_cmd2 *mode_cmd);
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
unsigned unit,
u32 max_width,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 52ca1c9d070e..8e86d6d4141b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -575,7 +575,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
long lret;
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
new file mode 100644
index 000000000000..4065b2840f1c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZTE
+ tristate "DRM Support for ZTE SoCs"
+ depends on DRM && ARCH_ZX
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_HELPER
+ help
+ Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
new file mode 100644
index 000000000000..699180bfd57c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Makefile
@@ -0,0 +1,7 @@
+zxdrm-y := \
+ zx_drm_drv.o \
+ zx_hdmi.o \
+ zx_plane.o \
+ zx_vou.o
+
+obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
new file mode 100644
index 000000000000..3e76f72c92ff
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_vou.h"
+
+struct zx_drm_private {
+ struct drm_fbdev_cma *fbdev;
+};
+
+static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = zx_drm_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void zx_drm_lastclose(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static const struct file_operations zx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver zx_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .lastclose = zx_drm_lastclose,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = zx_vou_enable_vblank,
+ .disable_vblank = zx_vou_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &zx_drm_fops,
+ .name = "zx-vou",
+ .desc = "ZTE VOU Controller DRM",
+ .date = "20160811",
+ .major = 1,
+ .minor = 0,
+};
+
+static int zx_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ struct zx_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ drm = drm_dev_alloc(&zx_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ drm->dev_private = priv;
+ dev_set_drvdata(dev, drm);
+
+ drm_mode_config_init(drm);
+ drm->mode_config.min_width = 16;
+ drm->mode_config.min_height = 16;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &zx_drm_mode_config_funcs;
+
+ ret = component_bind_all(dev, drm);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
+ goto out_unregister;
+ }
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
+ goto out_unbind;
+ }
+
+ /*
+ * We will manage irq handler on our own. In this case, irq_enabled
+ * need to be true for using vblank core support.
+ */
+ drm->irq_enabled = true;
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+
+ priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ ret = PTR_ERR(priv->fbdev);
+ DRM_DEV_ERROR(dev, "failed to init cma fbdev: %d\n", ret);
+ priv->fbdev = NULL;
+ goto out_poll_fini;
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto out_fbdev_fini;
+
+ return 0;
+
+out_fbdev_fini:
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+out_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+out_unbind:
+ component_unbind_all(dev, drm);
+out_unregister:
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void zx_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+ component_unbind_all(dev, drm);
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+}
+
+static const struct component_master_ops zx_drm_master_ops = {
+ .bind = zx_drm_bind,
+ .unbind = zx_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int zx_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent = dev->of_node;
+ struct device_node *child;
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = of_platform_populate(parent, NULL, NULL, dev);
+ if (ret)
+ return ret;
+
+ for_each_available_child_of_node(parent, child) {
+ component_match_add(dev, &match, compare_of, child);
+ of_node_put(child);
+ }
+
+ return component_master_add_with_match(dev, &zx_drm_master_ops, match);
+}
+
+static int zx_drm_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &zx_drm_master_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_drm_of_match[] = {
+ { .compatible = "zte,zx296718-vou", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_drm_of_match);
+
+static struct platform_driver zx_drm_platform_driver = {
+ .probe = zx_drm_probe,
+ .remove = zx_drm_remove,
+ .driver = {
+ .name = "zx-drm",
+ .of_match_table = zx_drm_of_match,
+ },
+};
+
+static struct platform_driver *drivers[] = {
+ &zx_crtc_driver,
+ &zx_hdmi_driver,
+ &zx_drm_platform_driver,
+};
+
+static int zx_drm_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(zx_drm_init);
+
+static void zx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(zx_drm_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
new file mode 100644
index 000000000000..e65cd18a6cba
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_DRM_DRV_H__
+#define __ZX_DRM_DRV_H__
+
+extern struct platform_driver zx_crtc_driver;
+extern struct platform_driver zx_hdmi_driver;
+
+static inline u32 zx_readl(void __iomem *reg)
+{
+ return readl_relaxed(reg);
+}
+
+static inline void zx_writel(void __iomem *reg, u32 val)
+{
+ writel_relaxed(val, reg);
+}
+
+static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = zx_readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ zx_writel(reg, tmp);
+}
+
+#endif /* __ZX_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
new file mode 100644
index 000000000000..6bf6c364811e
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hdmi.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_hdmi_regs.h"
+#include "zx_vou.h"
+
+#define ZX_HDMI_INFOFRAME_SIZE 31
+#define DDC_SEGMENT_ADDR 0x30
+
+struct zx_hdmi_i2c {
+ struct i2c_adapter adap;
+ struct mutex lock;
+};
+
+struct zx_hdmi {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct zx_hdmi_i2c *ddc;
+ struct device *dev;
+ struct drm_device *drm;
+ void __iomem *mmio;
+ struct clk *cec_clk;
+ struct clk *osc_clk;
+ struct clk *xclk;
+ bool sink_is_hdmi;
+ bool sink_has_audio;
+ const struct vou_inf *inf;
+};
+
+#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
+
+static const struct vou_inf vou_inf_hdmi = {
+ .id = VOU_HDMI,
+ .data_sel = VOU_YUV444,
+ .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
+ .clocks_sel_bits = BIT(13) | BIT(2),
+};
+
+static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
+{
+ writel_relaxed(val, hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
+ u8 mask, u8 val)
+{
+ u8 tmp;
+
+ tmp = hdmi_readb(hdmi, offset);
+ tmp = (tmp & ~mask) | (val & mask);
+ hdmi_writeb(hdmi, offset, tmp);
+}
+
+static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
+ union hdmi_infoframe *frame, u8 fsel)
+{
+ u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
+ int num;
+ int i;
+
+ hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
+
+ num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
+ if (num < 0) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
+ return num;
+ }
+
+ for (i = 0; i < num; i++)
+ hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
+
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
+ TPI_INFO_TRANS_RPT);
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
+ TPI_INFO_TRANS_EN);
+
+ return num;
+}
+
+static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
+}
+
+static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* We always use YUV444 for HDMI output. */
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
+}
+
+static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ if (hdmi->sink_is_hdmi) {
+ zx_hdmi_config_video_avi(hdmi, mode);
+ zx_hdmi_config_video_vsi(hdmi, mode);
+ }
+}
+
+static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
+{
+ /* Copy from ZTE BSP code */
+ hdmi_writeb(hdmi, 0x222, 0x0);
+ hdmi_writeb(hdmi, 0x224, 0x4);
+ hdmi_writeb(hdmi, 0x909, 0x0);
+ hdmi_writeb(hdmi, 0x7b0, 0x90);
+ hdmi_writeb(hdmi, 0x7b1, 0x00);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b8, 0xaa);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b3, 0x0f);
+ hdmi_writeb(hdmi, 0x7b4, 0x0f);
+ hdmi_writeb(hdmi, 0x7b5, 0x55);
+ hdmi_writeb(hdmi, 0x7b7, 0x03);
+ hdmi_writeb(hdmi, 0x7b9, 0x12);
+ hdmi_writeb(hdmi, 0x7ba, 0x32);
+ hdmi_writeb(hdmi, 0x7bc, 0x68);
+ hdmi_writeb(hdmi, 0x7be, 0x40);
+ hdmi_writeb(hdmi, 0x7bf, 0x84);
+ hdmi_writeb(hdmi, 0x7c1, 0x0f);
+ hdmi_writeb(hdmi, 0x7c8, 0x02);
+ hdmi_writeb(hdmi, 0x7c9, 0x03);
+ hdmi_writeb(hdmi, 0x7ca, 0x40);
+ hdmi_writeb(hdmi, 0x7dc, 0x31);
+ hdmi_writeb(hdmi, 0x7e2, 0x04);
+ hdmi_writeb(hdmi, 0x7e0, 0x06);
+ hdmi_writeb(hdmi, 0x7cb, 0x68);
+ hdmi_writeb(hdmi, 0x7f9, 0x02);
+ hdmi_writeb(hdmi, 0x7b6, 0x02);
+ hdmi_writeb(hdmi, 0x7f3, 0x0);
+}
+
+static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
+{
+ /* Enable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
+
+ /* Enable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
+
+ /* Enable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Enable HDMI/MHL mode for output */
+ hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
+ TEST_TXCTRL_HDMI_MODE);
+
+ /* Configure reg_qc_sel */
+ hdmi_writeb(hdmi, HDMICTL4, 0x3);
+
+ /* Enable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
+ INTR1_MONITOR_DETECT);
+
+ /* Start up phy */
+ zx_hdmi_phy_start(hdmi);
+}
+
+static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
+{
+ /* Disable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
+
+ /* Disable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Disable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
+
+ /* Disable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
+}
+
+static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ clk_prepare_enable(hdmi->cec_clk);
+ clk_prepare_enable(hdmi->osc_clk);
+ clk_prepare_enable(hdmi->xclk);
+
+ zx_hdmi_hw_enable(hdmi);
+
+ vou_inf_enable(hdmi->inf, encoder->crtc);
+}
+
+static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ vou_inf_disable(hdmi->inf, encoder->crtc);
+
+ zx_hdmi_hw_disable(hdmi);
+
+ clk_disable_unprepare(hdmi->xclk);
+ clk_disable_unprepare(hdmi->osc_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
+}
+
+static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
+ .enable = zx_hdmi_encoder_enable,
+ .disable = zx_hdmi_encoder_disable,
+ .mode_set = zx_hdmi_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &hdmi->ddc->adap);
+ if (!edid)
+ return 0;
+
+ hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_mode_status
+zx_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
+ .get_modes = zx_hdmi_connector_get_modes,
+ .mode_valid = zx_hdmi_connector_mode_valid,
+};
+
+static enum drm_connector_status
+zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+
+ return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = zx_hdmi_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+
+ encoder->possible_crtcs = VOU_CRTC_MASK;
+
+ drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm, &hdmi->connector, &zx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(&hdmi->connector,
+ &zx_hdmi_connector_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+ u8 lstat;
+
+ lstat = hdmi_readb(hdmi, L1_INTR_STAT);
+
+ /* Monitor detect/HPD interrupt */
+ if (lstat & L1_INTR_STAT_INTR1) {
+ u8 stat;
+
+ stat = hdmi_readb(hdmi, INTR1_STAT);
+ hdmi_writeb(hdmi, INTR1_STAT, stat);
+
+ if (stat & INTR1_MONITOR_DETECT)
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ int len = msg->len;
+ u8 *buf = msg->buf;
+ int retry = 0;
+ int ret = 0;
+
+ /* Bits [9:8] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
+ /* Bits [7:0] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
+
+ /* Clear FIFO */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
+
+ /* Kick off the read */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
+ DDC_CMD_SEQUENTIAL_READ);
+
+ while (len > 0) {
+ int cnt, i;
+
+ /* FIFO needs some time to get ready */
+ usleep_range(500, 1000);
+
+ cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
+ if (cnt == 0) {
+ if (++retry > 5) {
+ DRM_DEV_ERROR(hdmi->dev,
+ "DDC FIFO read timed out!");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ for (i = 0; i < cnt; i++)
+ *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
+ len -= cnt;
+ }
+
+ return ret;
+}
+
+static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ /*
+ * The DDC I2C adapter is only for reading EDID data, so we assume
+ * that the write to this adapter must be the EDID data offset.
+ */
+ if ((msg->len != 1) ||
+ ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
+ return -EINVAL;
+
+ if (msg->addr == DDC_SEGMENT_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
+ else if (msg->addr == DDC_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
+
+ hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
+
+ return 0;
+}
+
+static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct zx_hdmi_i2c *ddc = hdmi->ddc;
+ int i, ret = 0;
+
+ mutex_lock(&ddc->lock);
+
+ /* Enable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
+
+ for (i = 0; i < num; i++) {
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Disable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
+
+ mutex_unlock(&ddc->lock);
+
+ return ret;
+}
+
+static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm zx_hdmi_algorithm = {
+ .master_xfer = zx_hdmi_i2c_xfer,
+ .functionality = zx_hdmi_i2c_func,
+};
+
+static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct zx_hdmi_i2c *ddc;
+ int ret;
+
+ ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
+ if (!ddc)
+ return -ENOMEM;
+
+ hdmi->ddc = ddc;
+ mutex_init(&ddc->lock);
+
+ adap = &ddc->adap;
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_DDC;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &zx_hdmi_algorithm;
+ snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
+ ret);
+ return ret;
+ }
+
+ i2c_set_adapdata(adap, hdmi);
+
+ return 0;
+}
+
+static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res;
+ struct zx_hdmi *hdmi;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm = drm;
+ hdmi->inf = &vou_inf_hdmi;
+
+ dev_set_drvdata(dev, hdmi);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
+ if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
+ if (IS_ERR(hdmi->osc_clk)) {
+ ret = PTR_ERR(hdmi->osc_clk);
+ DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
+ if (IS_ERR(hdmi->xclk)) {
+ ret = PTR_ERR(hdmi->xclk);
+ DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_ddc_register(hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_register(drm, hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
+ zx_hdmi_irq_thread, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void zx_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+}
+
+static const struct component_ops zx_hdmi_component_ops = {
+ .bind = zx_hdmi_bind,
+ .unbind = zx_hdmi_unbind,
+};
+
+static int zx_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_hdmi_component_ops);
+}
+
+static int zx_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_hdmi_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_hdmi_of_match[] = {
+ { .compatible = "zte,zx296718-hdmi", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
+
+struct platform_driver zx_hdmi_driver = {
+ .probe = zx_hdmi_probe,
+ .remove = zx_hdmi_remove,
+ .driver = {
+ .name = "zx-hdmi",
+ .of_match_table = zx_hdmi_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
new file mode 100644
index 000000000000..de911f66b658
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi_regs.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_HDMI_REGS_H__
+#define __ZX_HDMI_REGS_H__
+
+#define FUNC_SEL 0x000b
+#define FUNC_HDMI_EN BIT(0)
+#define CLKPWD 0x000d
+#define CLKPWD_PDIDCK BIT(2)
+#define P2T_CTRL 0x0066
+#define P2T_DC_PKT_EN BIT(7)
+#define L1_INTR_STAT 0x007e
+#define L1_INTR_STAT_INTR1 BIT(0)
+#define INTR1_STAT 0x008f
+#define INTR1_MASK 0x0095
+#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
+#define ZX_DDC_ADDR 0x00ed
+#define ZX_DDC_SEGM 0x00ee
+#define ZX_DDC_OFFSET 0x00ef
+#define ZX_DDC_DIN_CNT1 0x00f0
+#define ZX_DDC_DIN_CNT2 0x00f1
+#define ZX_DDC_CMD 0x00f3
+#define DDC_CMD_MASK 0xf
+#define DDC_CMD_CLEAR_FIFO 0x9
+#define DDC_CMD_SEQUENTIAL_READ 0x2
+#define ZX_DDC_DATA 0x00f4
+#define ZX_DDC_DOUT_CNT 0x00f5
+#define DDC_DOUT_CNT_MASK 0x1f
+#define TEST_TXCTRL 0x00f7
+#define TEST_TXCTRL_HDMI_MODE BIT(1)
+#define HDMICTL4 0x0235
+#define TPI_HPD_RSEN 0x063b
+#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
+#define TPI_INFO_FSEL 0x06bf
+#define FSEL_AVI 0
+#define FSEL_GBD 1
+#define FSEL_AUDIO 2
+#define FSEL_SPD 3
+#define FSEL_MPEG 4
+#define FSEL_VSIF 5
+#define TPI_INFO_B0 0x06c0
+#define TPI_INFO_EN 0x06df
+#define TPI_INFO_TRANS_EN BIT(7)
+#define TPI_INFO_TRANS_RPT BIT(6)
+#define TPI_DDC_MASTER_EN 0x06f8
+#define HW_DDC_MASTER BIT(7)
+
+#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
new file mode 100644
index 000000000000..546eb92a94e8
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_plane_regs.h"
+#include "zx_vou.h"
+
+struct zx_plane {
+ struct drm_plane plane;
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
+
+static const uint32_t gl_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+};
+
+static int zx_gl_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* nothing to check when disabling or disabled */
+ if (!crtc_state->enable)
+ return 0;
+
+ /* plane must be enabled */
+ if (!plane_state->crtc)
+ return -EINVAL;
+
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+ return drm_plane_helper_check_state(plane_state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+}
+
+static int zx_gl_get_fmt(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ return GL_FMT_ARGB8888;
+ case DRM_FORMAT_RGB888:
+ return GL_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ return GL_FMT_RGB565;
+ case DRM_FORMAT_ARGB1555:
+ return GL_FMT_ARGB1555;
+ case DRM_FORMAT_ARGB4444:
+ return GL_FMT_ARGB4444;
+ default:
+ WARN_ONCE(1, "invalid pixel format %d\n", format);
+ return -EINVAL;
+ }
+}
+
+static inline void zx_gl_set_update(struct zx_plane *zplane)
+{
+ void __iomem *layer = zplane->layer;
+
+ zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE);
+}
+
+static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
+{
+ zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
+}
+
+void zx_plane_set_update(struct drm_plane *plane)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+
+ zx_gl_rsz_set_update(zplane);
+ zx_gl_set_update(zplane);
+}
+
+static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
+ u32 dst_w, u32 dst_h)
+{
+ void __iomem *rsz = zplane->rsz;
+
+ zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
+ zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
+
+ zx_gl_rsz_set_update(zplane);
+}
+
+static void zx_gl_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ void __iomem *layer = zplane->layer;
+ void __iomem *csc = zplane->csc;
+ void __iomem *hbsc = zplane->hbsc;
+ u32 src_x, src_y, src_w, src_h;
+ u32 dst_x, dst_y, dst_w, dst_h;
+ unsigned int bpp;
+ uint32_t format;
+ dma_addr_t paddr;
+ u32 stride;
+ int fmt;
+
+ if (!fb)
+ return;
+
+ format = fb->pixel_format;
+ stride = fb->pitches[0];
+
+ src_x = plane->state->src_x >> 16;
+ src_y = plane->state->src_y >> 16;
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+
+ dst_x = plane->state->crtc_x;
+ dst_y = plane->state->crtc_y;
+ dst_w = plane->state->crtc_w;
+ dst_h = plane->state->crtc_h;
+
+ bpp = drm_format_plane_cpp(format, 0);
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ paddr = cma_obj->paddr + fb->offsets[0];
+ paddr += src_y * stride + src_x * bpp / 8;
+ zx_writel(layer + GL_ADDR, paddr);
+
+ /* Set up source height/width register */
+ zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
+
+ /* Set up start position register */
+ zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
+
+ /* Set up end position register */
+ zx_writel(layer + GL_POS_END,
+ GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
+
+ /* Set up stride register */
+ zx_writel(layer + GL_STRIDE, stride & 0xffff);
+
+ /* Set up graphic layer data format */
+ fmt = zx_gl_get_fmt(format);
+ if (fmt >= 0)
+ zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK,
+ fmt << GL_DATA_FMT_SHIFT);
+
+ /* Initialize global alpha with a sane value */
+ zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK,
+ 0xff << GL_GLOBAL_ALPHA_SHIFT);
+
+ /* Setup CSC for the GL */
+ if (dst_h > 720)
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ else
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE);
+
+ /* Always use scaler since it exists (set for not bypass) */
+ zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE,
+ GL_SCALER_BYPASS_MODE);
+
+ zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h);
+
+ /* Enable HBSC block */
+ zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
+
+ zx_gl_set_update(zplane);
+}
+
+static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
+ .atomic_check = zx_gl_plane_atomic_check,
+ .atomic_update = zx_gl_plane_atomic_update,
+};
+
+static void zx_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs zx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = zx_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void zx_plane_hbsc_init(struct zx_plane *zplane)
+{
+ void __iomem *hbsc = zplane->hbsc;
+
+ /*
+ * Initialize HBSC block with a sane configuration per recommedation
+ * from ZTE BSP code.
+ */
+ zx_writel(hbsc + HBSC_SATURATION, 0x200);
+ zx_writel(hbsc + HBSC_HUE, 0x0);
+ zx_writel(hbsc + HBSC_BRIGHT, 0x0);
+ zx_writel(hbsc + HBSC_CONTRAST, 0x200);
+
+ zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
+}
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type)
+{
+ const struct drm_plane_helper_funcs *helper;
+ struct zx_plane *zplane;
+ struct drm_plane *plane;
+ const uint32_t *formats;
+ unsigned int format_count;
+ int ret;
+
+ zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+ if (!zplane)
+ return ERR_PTR(-ENOMEM);
+
+ plane = &zplane->plane;
+
+ zplane->layer = data->layer;
+ zplane->hbsc = data->hbsc;
+ zplane->csc = data->csc;
+ zplane->rsz = data->rsz;
+
+ zx_plane_hbsc_init(zplane);
+
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ helper = &zx_gl_plane_helper_funcs;
+ formats = gl_formats;
+ format_count = ARRAY_SIZE(gl_formats);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ /* TODO: add video layer (vl) support */
+ break;
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
+ &zx_plane_funcs, formats, format_count,
+ type, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, helper);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
new file mode 100644
index 000000000000..2b82cd558d9d
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_H__
+#define __ZX_PLANE_H__
+
+struct zx_layer_data {
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type);
+void zx_plane_set_update(struct drm_plane *plane);
+
+#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
new file mode 100644
index 000000000000..3dde6716a558
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane_regs.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_REGS_H__
+#define __ZX_PLANE_REGS_H__
+
+/* GL registers */
+#define GL_CTRL0 0x00
+#define GL_UPDATE BIT(5)
+#define GL_CTRL1 0x04
+#define GL_DATA_FMT_SHIFT 0
+#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT)
+#define GL_FMT_ARGB8888 0
+#define GL_FMT_RGB888 1
+#define GL_FMT_RGB565 2
+#define GL_FMT_ARGB1555 3
+#define GL_FMT_ARGB4444 4
+#define GL_CTRL2 0x08
+#define GL_GLOBAL_ALPHA_SHIFT 8
+#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT)
+#define GL_CTRL3 0x0c
+#define GL_SCALER_BYPASS_MODE BIT(0)
+#define GL_STRIDE 0x18
+#define GL_ADDR 0x1c
+#define GL_SRC_SIZE 0x38
+#define GL_SRC_W_SHIFT 16
+#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT)
+#define GL_SRC_H_SHIFT 0
+#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT)
+#define GL_POS_START 0x9c
+#define GL_POS_END 0xa0
+#define GL_POS_X_SHIFT 16
+#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT)
+#define GL_POS_Y_SHIFT 0
+#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT)
+
+#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK)
+#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK)
+#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
+#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
+
+/* CSC registers */
+#define CSC_CTRL0 0x30
+#define CSC_COV_MODE_SHIFT 16
+#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
+#define CSC_BT601_IMAGE_RGB2YCBCR 0
+#define CSC_BT601_IMAGE_YCBCR2RGB 1
+#define CSC_BT601_VIDEO_RGB2YCBCR 2
+#define CSC_BT601_VIDEO_YCBCR2RGB 3
+#define CSC_BT709_IMAGE_RGB2YCBCR 4
+#define CSC_BT709_IMAGE_YCBCR2RGB 5
+#define CSC_BT709_VIDEO_RGB2YCBCR 6
+#define CSC_BT709_VIDEO_YCBCR2RGB 7
+#define CSC_BT2020_IMAGE_RGB2YCBCR 8
+#define CSC_BT2020_IMAGE_YCBCR2RGB 9
+#define CSC_BT2020_VIDEO_RGB2YCBCR 10
+#define CSC_BT2020_VIDEO_YCBCR2RGB 11
+#define CSC_WORK_ENABLE BIT(0)
+
+/* RSZ registers */
+#define RSZ_SRC_CFG 0x00
+#define RSZ_DEST_CFG 0x04
+#define RSZ_ENABLE_CFG 0x14
+
+#define RSZ_VER_SHIFT 16
+#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
+#define RSZ_HOR_SHIFT 0
+#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT)
+
+#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
+#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
+
+/* HBSC registers */
+#define HBSC_SATURATION 0x00
+#define HBSC_HUE 0x04
+#define HBSC_BRIGHT 0x08
+#define HBSC_CONTRAST 0x0c
+#define HBSC_THRESHOLD_COL1 0x10
+#define HBSC_THRESHOLD_COL2 0x14
+#define HBSC_THRESHOLD_COL3 0x18
+#define HBSC_CTRL0 0x28
+#define HBSC_CTRL_EN BIT(2)
+
+#endif /* __ZX_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
new file mode 100644
index 000000000000..73fe15c17c32
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_address.h>
+#include <video/videomode.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_vou.h"
+#include "zx_vou_regs.h"
+
+#define GL_NUM 2
+#define VL_NUM 3
+
+enum vou_chn_type {
+ VOU_CHN_MAIN,
+ VOU_CHN_AUX,
+};
+
+struct zx_crtc_regs {
+ u32 fir_active;
+ u32 fir_htiming;
+ u32 fir_vtiming;
+ u32 timing_shift;
+ u32 timing_pi_shift;
+};
+
+static const struct zx_crtc_regs main_crtc_regs = {
+ .fir_active = FIR_MAIN_ACTIVE,
+ .fir_htiming = FIR_MAIN_H_TIMING,
+ .fir_vtiming = FIR_MAIN_V_TIMING,
+ .timing_shift = TIMING_MAIN_SHIFT,
+ .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
+};
+
+static const struct zx_crtc_regs aux_crtc_regs = {
+ .fir_active = FIR_AUX_ACTIVE,
+ .fir_htiming = FIR_AUX_H_TIMING,
+ .fir_vtiming = FIR_AUX_V_TIMING,
+ .timing_shift = TIMING_AUX_SHIFT,
+ .timing_pi_shift = TIMING_AUX_PI_SHIFT,
+};
+
+struct zx_crtc_bits {
+ u32 polarity_mask;
+ u32 polarity_shift;
+ u32 int_frame_mask;
+ u32 tc_enable;
+ u32 gl_enable;
+};
+
+static const struct zx_crtc_bits main_crtc_bits = {
+ .polarity_mask = MAIN_POL_MASK,
+ .polarity_shift = MAIN_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_MAIN_FRAME,
+ .tc_enable = MAIN_TC_EN,
+ .gl_enable = OSD_CTRL0_GL0_EN,
+};
+
+static const struct zx_crtc_bits aux_crtc_bits = {
+ .polarity_mask = AUX_POL_MASK,
+ .polarity_shift = AUX_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_AUX_FRAME,
+ .tc_enable = AUX_TC_EN,
+ .gl_enable = OSD_CTRL0_GL1_EN,
+};
+
+struct zx_crtc {
+ struct drm_crtc crtc;
+ struct drm_plane *primary;
+ struct zx_vou_hw *vou;
+ void __iomem *chnreg;
+ const struct zx_crtc_regs *regs;
+ const struct zx_crtc_bits *bits;
+ enum vou_chn_type chn_type;
+ struct clk *pixclk;
+};
+
+#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
+
+struct zx_vou_hw {
+ struct device *dev;
+ void __iomem *osd;
+ void __iomem *timing;
+ void __iomem *vouctl;
+ void __iomem *otfppu;
+ void __iomem *dtrc;
+ struct clk *axi_clk;
+ struct clk *ppu_clk;
+ struct clk *main_clk;
+ struct clk *aux_clk;
+ struct zx_crtc *main_crtc;
+ struct zx_crtc *aux_crtc;
+};
+
+static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+
+ return zcrtc->vou;
+}
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
+ u32 data_sel_shift = inf->id << 1;
+
+ /* Select data format */
+ zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
+ inf->data_sel << data_sel_shift);
+
+ /* Select channel */
+ zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << inf->id,
+ zcrtc->chn_type << inf->id);
+
+ /* Select interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
+ is_main ? 0 : inf->clocks_sel_bits);
+
+ /* Enable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits,
+ inf->clocks_en_bits);
+
+ /* Enable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 1 << inf->id);
+}
+
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_vou_hw *vou = crtc_to_vou(crtc);
+
+ /* Disable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 0);
+
+ /* Disable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
+}
+
+static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
+{
+ zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
+}
+
+static void zx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ const struct zx_crtc_regs *regs = zcrtc->regs;
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct videomode vm;
+ u32 pol = 0;
+ u32 val;
+ int ret;
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ /* Set up timing parameters */
+ val = V_ACTIVE(vm.vactive - 1);
+ val |= H_ACTIVE(vm.hactive - 1);
+ zx_writel(vou->timing + regs->fir_active, val);
+
+ val = SYNC_WIDE(vm.hsync_len - 1);
+ val |= BACK_PORCH(vm.hback_porch - 1);
+ val |= FRONT_PORCH(vm.hfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_htiming, val);
+
+ val = SYNC_WIDE(vm.vsync_len - 1);
+ val |= BACK_PORCH(vm.vback_porch - 1);
+ val |= FRONT_PORCH(vm.vfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_vtiming, val);
+
+ /* Set up polarities */
+ if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
+ pol |= 1 << POL_VSYNC_SHIFT;
+ if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
+ pol |= 1 << POL_HSYNC_SHIFT;
+
+ zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask,
+ pol << bits->polarity_shift);
+
+ /* Setup SHIFT register by following what ZTE BSP does */
+ zx_writel(vou->timing + regs->timing_shift, H_SHIFT_VAL);
+ zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
+
+ /* Enable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
+ bits->tc_enable);
+
+ /* Configure channel screen size */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK,
+ vm.hactive << CHN_SCREEN_W_SHIFT);
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
+ vm.vactive << CHN_SCREEN_H_SHIFT);
+
+ /* Update channel */
+ vou_chn_set_update(zcrtc);
+
+ /* Enable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
+
+ /* Enable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable,
+ bits->gl_enable);
+
+ drm_crtc_vblank_on(crtc);
+
+ ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(zcrtc->pixclk);
+ if (ret)
+ DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
+}
+
+static void zx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct zx_vou_hw *vou = zcrtc->vou;
+
+ clk_disable_unprepare(zcrtc->pixclk);
+
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, 0);
+
+ /* Disable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
+
+ /* Disable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0);
+}
+
+static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ if (!event)
+ return;
+
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
+ .enable = zx_crtc_enable,
+ .disable = zx_crtc_disable,
+ .atomic_flush = zx_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs zx_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
+ enum vou_chn_type chn_type)
+{
+ struct device *dev = vou->dev;
+ struct zx_layer_data data;
+ struct zx_crtc *zcrtc;
+ int ret;
+
+ zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL);
+ if (!zcrtc)
+ return -ENOMEM;
+
+ zcrtc->vou = vou;
+ zcrtc->chn_type = chn_type;
+
+ if (chn_type == VOU_CHN_MAIN) {
+ data.layer = vou->osd + MAIN_GL_OFFSET;
+ data.csc = vou->osd + MAIN_CSC_OFFSET;
+ data.hbsc = vou->osd + MAIN_HBSC_OFFSET;
+ data.rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
+ zcrtc->regs = &main_crtc_regs;
+ zcrtc->bits = &main_crtc_bits;
+ } else {
+ data.layer = vou->osd + AUX_GL_OFFSET;
+ data.csc = vou->osd + AUX_CSC_OFFSET;
+ data.hbsc = vou->osd + AUX_HBSC_OFFSET;
+ data.rsz = vou->otfppu + AUX_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
+ zcrtc->regs = &aux_crtc_regs;
+ zcrtc->bits = &aux_crtc_bits;
+ }
+
+ zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ?
+ "main_wclk" : "aux_wclk");
+ if (IS_ERR(zcrtc->pixclk)) {
+ ret = PTR_ERR(zcrtc->pixclk);
+ DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret);
+ return ret;
+ }
+
+ zcrtc->primary = zx_plane_init(drm, dev, &data, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(zcrtc->primary)) {
+ ret = PTR_ERR(zcrtc->primary);
+ DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
+ &zx_crtc_funcs, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs);
+
+ if (chn_type == VOU_CHN_MAIN)
+ vou->main_crtc = zcrtc;
+ else
+ vou->aux_crtc = zcrtc;
+
+ return 0;
+}
+
+static inline struct drm_crtc *zx_find_crtc(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ if (crtc->index == pipe)
+ return crtc;
+
+ return NULL;
+}
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+ u32 int_frame_mask;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return 0;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+ int_frame_mask = zcrtc->bits->int_frame_mask;
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask,
+ int_frame_mask);
+
+ return 0;
+}
+
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL,
+ zcrtc->bits->int_frame_mask, 0);
+}
+
+static irqreturn_t vou_irq_handler(int irq, void *dev_id)
+{
+ struct zx_vou_hw *vou = dev_id;
+ u32 state;
+
+ /* Handle TIMING_CTRL frame interrupts */
+ state = zx_readl(vou->timing + TIMING_INT_STATE);
+ zx_writel(vou->timing + TIMING_INT_STATE, state);
+
+ if (state & TIMING_INT_MAIN_FRAME)
+ drm_crtc_handle_vblank(&vou->main_crtc->crtc);
+
+ if (state & TIMING_INT_AUX_FRAME)
+ drm_crtc_handle_vblank(&vou->aux_crtc->crtc);
+
+ /* Handle OSD interrupts */
+ state = zx_readl(vou->osd + OSD_INT_STA);
+ zx_writel(vou->osd + OSD_INT_CLRSTA, state);
+
+ if (state & OSD_INT_MAIN_UPT) {
+ vou_chn_set_update(vou->main_crtc);
+ zx_plane_set_update(vou->main_crtc->primary);
+ }
+
+ if (state & OSD_INT_AUX_UPT) {
+ vou_chn_set_update(vou->aux_crtc);
+ zx_plane_set_update(vou->aux_crtc->primary);
+ }
+
+ if (state & OSD_INT_ERROR)
+ DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
+
+ return IRQ_HANDLED;
+}
+
+static void vou_dtrc_init(struct zx_vou_hw *vou)
+{
+ /* Clear bit for bypass by ID */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL,
+ TILE2RASTESCAN_BYPASS_MODE, 0);
+
+ /* Select ARIDR mode */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK,
+ DETILE_ARID_IN_ARIDR);
+
+ /* Bypass decompression for both frames */
+ zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+ zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+
+ /* Set up ARID register */
+ zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) |
+ DTRC_ARID1(0xf) | DTRC_ARID0(0xe));
+}
+
+static void vou_hw_init(struct zx_vou_hw *vou)
+{
+ /* Set GL0 to main channel and GL1 to aux channel */
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL0_SEL, 0);
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL1_SEL,
+ OSD_CTRL0_GL1_SEL);
+
+ /* Release reset for all VOU modules */
+ zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
+
+ /* Select main clock for GL0 and aux clock for GL1 module */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL0_SEL, 0);
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL1_SEL,
+ VOU_CLK_GL1_SEL);
+
+ /* Enable clock auto-gating for all VOU modules */
+ zx_writel(vou->vouctl + VOU_CLK_REQEN, ~0);
+
+ /* Enable all VOU module clocks */
+ zx_writel(vou->vouctl + VOU_CLK_EN, ~0);
+
+ /* Clear both OSD and TIMING_CTRL interrupt state */
+ zx_writel(vou->osd + OSD_INT_CLRSTA, ~0);
+ zx_writel(vou->timing + TIMING_INT_STATE, ~0);
+
+ /* Enable OSD and TIMING_CTRL interrrupts */
+ zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE);
+ zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE);
+
+ /* Select GPC as input to gl/vl scaler as a sane default setting */
+ zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a);
+
+ /*
+ * Needs to reset channel and layer logic per frame when frame starts
+ * to get VOU work properly.
+ */
+ zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME);
+
+ vou_dtrc_init(vou);
+}
+
+static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct zx_vou_hw *vou;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL);
+ if (!vou)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd");
+ vou->osd = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->osd)) {
+ ret = PTR_ERR(vou->osd);
+ DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl");
+ vou->timing = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->timing)) {
+ ret = PTR_ERR(vou->timing);
+ DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc");
+ vou->dtrc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->dtrc)) {
+ ret = PTR_ERR(vou->dtrc);
+ DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl");
+ vou->vouctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->vouctl)) {
+ ret = PTR_ERR(vou->vouctl);
+ DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu");
+ vou->otfppu = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->otfppu)) {
+ ret = PTR_ERR(vou->otfppu);
+ DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ vou->axi_clk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(vou->axi_clk)) {
+ ret = PTR_ERR(vou->axi_clk);
+ DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ vou->ppu_clk = devm_clk_get(dev, "ppu_wclk");
+ if (IS_ERR(vou->ppu_clk)) {
+ ret = PTR_ERR(vou->ppu_clk);
+ DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vou->axi_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ clk_prepare_enable(vou->ppu_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret);
+ goto disable_axi_clk;
+ }
+
+ vou->dev = dev;
+ dev_set_drvdata(dev, vou);
+
+ vou_hw_init(vou);
+
+ ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_AUX);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ return 0;
+
+disable_ppu_clk:
+ clk_disable_unprepare(vou->ppu_clk);
+disable_axi_clk:
+ clk_disable_unprepare(vou->axi_clk);
+ return ret;
+}
+
+static void zx_crtc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_vou_hw *vou = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vou->axi_clk);
+ clk_disable_unprepare(vou->ppu_clk);
+}
+
+static const struct component_ops zx_crtc_component_ops = {
+ .bind = zx_crtc_bind,
+ .unbind = zx_crtc_unbind,
+};
+
+static int zx_crtc_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_crtc_component_ops);
+}
+
+static int zx_crtc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_crtc_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_crtc_of_match[] = {
+ { .compatible = "zte,zx296718-dpc", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_crtc_of_match);
+
+struct platform_driver zx_crtc_driver = {
+ .probe = zx_crtc_probe,
+ .remove = zx_crtc_remove,
+ .driver = {
+ .name = "zx-crtc",
+ .of_match_table = zx_crtc_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
new file mode 100644
index 000000000000..349e06cd86f4
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_H__
+#define __ZX_VOU_H__
+
+#define VOU_CRTC_MASK 0x3
+
+/* VOU output interfaces */
+enum vou_inf_id {
+ VOU_HDMI = 0,
+ VOU_RGB_LCD = 1,
+ VOU_TV_ENC = 2,
+ VOU_MIPI_DSI = 3,
+ VOU_LVDS = 4,
+ VOU_VGA = 5,
+};
+
+enum vou_inf_data_sel {
+ VOU_YUV444 = 0,
+ VOU_RGB_101010 = 1,
+ VOU_RGB_888 = 2,
+ VOU_RGB_666 = 3,
+};
+
+struct vou_inf {
+ enum vou_inf_id id;
+ enum vou_inf_data_sel data_sel;
+ u32 clocks_en_bits;
+ u32 clocks_sel_bits;
+};
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc);
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc);
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe);
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe);
+
+#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
new file mode 100644
index 000000000000..f44e7a4ae441
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou_regs.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_REGS_H__
+#define __ZX_VOU_REGS_H__
+
+/* Sub-module offset */
+#define MAIN_GL_OFFSET 0x130
+#define MAIN_CSC_OFFSET 0x580
+#define MAIN_HBSC_OFFSET 0x820
+#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */
+
+#define AUX_GL_OFFSET 0x200
+#define AUX_CSC_OFFSET 0x5d0
+#define AUX_HBSC_OFFSET 0x860
+#define AUX_RSZ_OFFSET 0x800
+
+/* OSD (GPC_GLOBAL) registers */
+#define OSD_INT_STA 0x04
+#define OSD_INT_CLRSTA 0x08
+#define OSD_INT_MSK 0x0c
+#define OSD_INT_AUX_UPT BIT(14)
+#define OSD_INT_MAIN_UPT BIT(13)
+#define OSD_INT_GL1_LBW BIT(10)
+#define OSD_INT_GL0_LBW BIT(9)
+#define OSD_INT_VL2_LBW BIT(8)
+#define OSD_INT_VL1_LBW BIT(7)
+#define OSD_INT_VL0_LBW BIT(6)
+#define OSD_INT_BUS_ERR BIT(3)
+#define OSD_INT_CFG_ERR BIT(2)
+#define OSD_INT_ERROR (\
+ OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \
+ OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \
+ OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \
+)
+#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
+#define OSD_CTRL0 0x10
+#define OSD_CTRL0_GL0_EN BIT(7)
+#define OSD_CTRL0_GL0_SEL BIT(6)
+#define OSD_CTRL0_GL1_EN BIT(5)
+#define OSD_CTRL0_GL1_SEL BIT(4)
+#define OSD_RST_CLR 0x1c
+#define RST_PER_FRAME BIT(19)
+
+/* Main/Aux channel registers */
+#define OSD_MAIN_CHN 0x470
+#define OSD_AUX_CHN 0x4d0
+#define CHN_CTRL0 0x00
+#define CHN_ENABLE BIT(0)
+#define CHN_CTRL1 0x04
+#define CHN_SCREEN_W_SHIFT 18
+#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT)
+#define CHN_SCREEN_H_SHIFT 5
+#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
+#define CHN_UPDATE 0x08
+
+/* TIMING_CTRL registers */
+#define TIMING_TC_ENABLE 0x04
+#define AUX_TC_EN BIT(1)
+#define MAIN_TC_EN BIT(0)
+#define FIR_MAIN_ACTIVE 0x08
+#define FIR_AUX_ACTIVE 0x0c
+#define V_ACTIVE_SHIFT 16
+#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT)
+#define H_ACTIVE_SHIFT 0
+#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT)
+#define FIR_MAIN_H_TIMING 0x10
+#define FIR_MAIN_V_TIMING 0x14
+#define FIR_AUX_H_TIMING 0x18
+#define FIR_AUX_V_TIMING 0x1c
+#define SYNC_WIDE_SHIFT 22
+#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT)
+#define BACK_PORCH_SHIFT 11
+#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT)
+#define FRONT_PORCH_SHIFT 0
+#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT)
+#define TIMING_CTRL 0x20
+#define AUX_POL_SHIFT 3
+#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT)
+#define MAIN_POL_SHIFT 0
+#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT)
+#define POL_DE_SHIFT 2
+#define POL_VSYNC_SHIFT 1
+#define POL_HSYNC_SHIFT 0
+#define TIMING_INT_CTRL 0x24
+#define TIMING_INT_STATE 0x28
+#define TIMING_INT_AUX_FRAME BIT(3)
+#define TIMING_INT_MAIN_FRAME BIT(1)
+#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10)
+#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6)
+#define TIMING_INT_ENABLE (\
+ TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \
+ TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \
+)
+#define TIMING_MAIN_SHIFT 0x2c
+#define TIMING_AUX_SHIFT 0x30
+#define H_SHIFT_VAL 0x0048
+#define TIMING_MAIN_PI_SHIFT 0x68
+#define TIMING_AUX_PI_SHIFT 0x6c
+#define H_PI_SHIFT_VAL 0x000f
+
+#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK)
+#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK)
+
+#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK)
+#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK)
+#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK)
+
+/* DTRC registers */
+#define DTRC_F0_CTRL 0x2c
+#define DTRC_F1_CTRL 0x5c
+#define DTRC_DECOMPRESS_BYPASS BIT(17)
+#define DTRC_DETILE_CTRL 0x68
+#define TILE2RASTESCAN_BYPASS_MODE BIT(30)
+#define DETILE_ARIDR_MODE_MASK (0x3 << 0)
+#define DETILE_ARID_ALL 0
+#define DETILE_ARID_IN_ARIDR 1
+#define DETILE_ARID_BYP_BUT_ARIDR 2
+#define DETILE_ARID_IN_ARIDR2 3
+#define DTRC_ARID 0x6c
+#define DTRC_ARID3_SHIFT 24
+#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT)
+#define DTRC_ARID2_SHIFT 16
+#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT)
+#define DTRC_ARID1_SHIFT 8
+#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT)
+#define DTRC_ARID0_SHIFT 0
+#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT)
+#define DTRC_DEC2DDR_ARID 0x70
+
+#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK)
+#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK)
+#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK)
+#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK)
+
+/* VOU_CTRL registers */
+#define VOU_INF_EN 0x00
+#define VOU_INF_CH_SEL 0x04
+#define VOU_INF_DATA_SEL 0x08
+#define VOU_SOFT_RST 0x14
+#define VOU_CLK_SEL 0x18
+#define VOU_CLK_GL1_SEL BIT(5)
+#define VOU_CLK_GL0_SEL BIT(4)
+#define VOU_CLK_REQEN 0x20
+#define VOU_CLK_EN 0x24
+
+/* OTFPPU_CTRL registers */
+#define OTFPPU_RSZ_DATA_SOURCE 0x04
+
+#endif /* __ZX_VOU_REGS_H__ */
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 5220510f39da..06dd4f85125f 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2013, NVIDIA Corporation.
+ * Copyright (c) 2012-2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -120,6 +120,7 @@ struct host1x {
struct host1x_syncpt *nop_sp;
+ struct mutex syncpt_mutex;
struct mutex chlist_mutex;
struct host1x_channel chlist;
unsigned long allocated_channels;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index a91b7c4a6110..92c3df933303 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -1,7 +1,7 @@
/*
* Tegra host1x Job
*
- * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2010-2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -539,9 +539,12 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
g->base = job->gather_addr_phys[i];
- for (j = i + 1; j < job->num_gathers; j++)
- if (job->gathers[j].bo == g->bo)
+ for (j = i + 1; j < job->num_gathers; j++) {
+ if (job->gathers[j].bo == g->bo) {
job->gathers[j].handled = true;
+ job->gathers[j].base = g->base;
+ }
+ }
err = do_relocs(job, g->bo);
if (err)
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 95589328ad52..25c11a85050b 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -1,7 +1,7 @@
/*
* Tegra host1x Syncpoints
*
- * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2010-2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -61,22 +61,24 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
struct host1x_syncpt *sp = host->syncpt;
char *name;
+ mutex_lock(&host->syncpt_mutex);
+
for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
;
if (i >= host->info->nb_pts)
- return NULL;
+ goto unlock;
if (flags & HOST1X_SYNCPT_HAS_BASE) {
sp->base = host1x_syncpt_base_request(host);
if (!sp->base)
- return NULL;
+ goto unlock;
}
name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
dev ? dev_name(dev) : NULL);
if (!name)
- return NULL;
+ goto free_base;
sp->dev = dev;
sp->name = name;
@@ -86,7 +88,15 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
else
sp->client_managed = false;
+ mutex_unlock(&host->syncpt_mutex);
return sp;
+
+free_base:
+ host1x_syncpt_base_free(sp->base);
+ sp->base = NULL;
+unlock:
+ mutex_unlock(&host->syncpt_mutex);
+ return NULL;
}
u32 host1x_syncpt_id(struct host1x_syncpt *sp)
@@ -378,6 +388,7 @@ int host1x_syncpt_init(struct host1x *host)
for (i = 0; i < host->info->nb_bases; i++)
bases[i].id = i;
+ mutex_init(&host->syncpt_mutex);
host->syncpt = syncpt;
host->bases = bases;
@@ -405,12 +416,16 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
if (!sp)
return;
+ mutex_lock(&sp->host->syncpt_mutex);
+
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
sp->dev = NULL;
sp->name = NULL;
sp->client_managed = false;
+
+ mutex_unlock(&sp->host->syncpt_mutex);
}
EXPORT_SYMBOL(host1x_syncpt_free);
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index aefdff95356d..08766c6e7856 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,6 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
- depends on RESET_CONTROLLER
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index b9539f7c5e9a..97218af4fe75 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -88,6 +88,8 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
@@ -1284,8 +1286,11 @@ static int ipu_irq_init(struct ipu_soc *ipu)
return ret;
}
- for (i = 0; i < IPU_NUM_IRQS; i += 32)
+ /* Mask and clear all interrupts */
+ for (i = 0; i < IPU_NUM_IRQS; i += 32) {
ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+ ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
+ }
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index fcb7dc86167b..4b2b67113d92 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -417,42 +417,6 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
-void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
- u32 pixel_format, int stride, int height)
-{
- int fourcc, u_offset, v_offset;
- int uv_stride = 0;
-
- fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
- switch (fourcc) {
- case DRM_FORMAT_YUV420:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YVU420:
- uv_stride = stride / 2;
- v_offset = stride * height;
- u_offset = v_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YUV422:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height);
- break;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- uv_stride = stride;
- u_offset = stride * height;
- v_offset = 0;
- break;
- default:
- return;
- }
- ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
-
static const struct ipu_rgb def_xrgb_32 = {
.red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
@@ -590,6 +554,13 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
case DRM_FORMAT_NV12:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index d6e5ded24418..63c7292f427a 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -529,6 +529,22 @@ void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
}
EXPORT_SYMBOL_GPL(ipu_csi_set_window);
+void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ reg &= ~(CSI_HORI_DOWNSIZE_EN | CSI_VERT_DOWNSIZE_EN);
+ reg |= (horiz ? CSI_HORI_DOWNSIZE_EN : 0) |
+ (vert ? CSI_VERT_DOWNSIZE_EN : 0);
+ ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk)
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index a8d87ddd8a17..d2f1bd9d3deb 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -535,7 +535,7 @@ int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
return -EINVAL;
}
- dev_warn(di->ipu->dev, "videomode adapted for IPU restrictions\n");
+ dev_dbg(di->ipu->dev, "videomode adapted for IPU restrictions\n");
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1887f199ccb7..0f5b2dd24507 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -31,6 +31,10 @@
#define pr_fmt(fmt) "vgaarb: " fmt
+#define vgaarb_dbg(dev, fmt, arg...) dev_dbg(dev, "vgaarb: " fmt, ##arg)
+#define vgaarb_info(dev, fmt, arg...) dev_info(dev, "vgaarb: " fmt, ##arg)
+#define vgaarb_err(dev, fmt, arg...) dev_err(dev, "vgaarb: " fmt, ##arg)
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -188,6 +192,7 @@ static void vga_check_first_use(void)
static struct vga_device *__vga_tryget(struct vga_device *vgadev,
unsigned int rsrc)
{
+ struct device *dev = &vgadev->pdev->dev;
unsigned int wants, legacy_wants, match;
struct vga_device *conflict;
unsigned int pci_bits;
@@ -203,8 +208,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
(vgadev->decodes & VGA_RSRC_LEGACY_MEM))
rsrc |= VGA_RSRC_LEGACY_MEM;
- pr_debug("%s: %d\n", __func__, rsrc);
- pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
+ vgaarb_dbg(dev, "%s: %d\n", __func__, rsrc);
+ vgaarb_dbg(dev, "%s: owns: %d\n", __func__, vgadev->owns);
/* Check what resources we need to acquire */
wants = rsrc & ~vgadev->owns;
@@ -336,9 +341,10 @@ lock_them:
static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
{
+ struct device *dev = &vgadev->pdev->dev;
unsigned int old_locks = vgadev->locks;
- pr_debug("%s\n", __func__);
+ vgaarb_dbg(dev, "%s\n", __func__);
/* Update our counters, and account for equivalent legacy resources
* if we decode them
@@ -611,7 +617,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Allocate structure */
vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL);
if (vgadev == NULL) {
- pr_err("failed to allocate pci device\n");
+ vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n");
/*
* What to do on allocation failure ? For now, let's just do
* nothing, I'm not sure there is anything saner to be done.
@@ -663,7 +669,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
*/
if (vga_default == NULL &&
((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
- pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
+ vgaarb_info(&pdev->dev, "setting as boot VGA device\n");
vga_set_default_device(pdev);
}
@@ -672,8 +678,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Add to the list */
list_add(&vgadev->list, &vga_list);
vga_count++;
- pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
- pci_name(pdev),
+ vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n",
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
vga_iostate_to_str(vgadev->locks));
@@ -725,6 +730,7 @@ bail:
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
+ struct device *dev = &vgadev->pdev->dev;
int old_decodes, decodes_removed, decodes_unlocked;
old_decodes = vgadev->decodes;
@@ -732,8 +738,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
decodes_unlocked = vgadev->locks & decodes_removed;
vgadev->decodes = new_decodes;
- pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
- pci_name(vgadev->pdev),
+ vgaarb_info(dev, "changed VGA decodes: olddecodes=%s,decodes=%s:owns=%s\n",
vga_iostate_to_str(old_decodes),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
@@ -754,7 +759,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
new_decodes & VGA_RSRC_LEGACY_MASK)
vga_decode_count++;
- pr_debug("decoding count now is: %d\n", vga_decode_count);
+ vgaarb_dbg(dev, "decoding count now is: %d\n", vga_decode_count);
}
static void __vga_set_legacy_decoding(struct pci_dev *pdev,
@@ -1022,21 +1027,16 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
unsigned int io_state;
- char *kbuf, *curr_pos;
+ char kbuf[64], *curr_pos;
size_t remaining = count;
int ret_val;
int i;
-
- kbuf = kmalloc(count + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, buf, count)) {
- kfree(kbuf);
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+ if (copy_from_user(kbuf, buf, count))
return -EFAULT;
- }
curr_pos = kbuf;
kbuf[count] = '\0'; /* Just to make sure... */
@@ -1189,24 +1189,25 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
ret_val = -EPROTO;
goto done;
}
- pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
- domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-
pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
- pr_debug("pdev %p\n", pdev);
if (!pdev) {
- pr_err("invalid PCI address %x:%x:%x\n",
- domain, bus, devfn);
+ pr_debug("invalid PCI address %04x:%02x:%02x.%x\n",
+ domain, bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
ret_val = -ENODEV;
goto done;
}
+
+ pr_debug("%s ==> %04x:%02x:%02x.%x pdev %p\n", curr_pos,
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ pdev);
}
vgadev = vgadev_find(pdev);
pr_debug("vgadev %p\n", vgadev);
if (vgadev == NULL) {
if (pdev) {
- pr_err("this pci device is not a vga device\n");
+ vgaarb_dbg(&pdev->dev, "not a VGA device\n");
pci_dev_put(pdev);
}
@@ -1226,7 +1227,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
}
}
if (i == MAX_USER_CARDS) {
- pr_err("maximum user cards (%d) number reached!\n",
+ vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n",
MAX_USER_CARDS);
pci_dev_put(pdev);
/* XXX: which value to return? */
@@ -1259,11 +1260,9 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
goto done;
}
/* If we got here, the message written is not part of the protocol! */
- kfree(kbuf);
return -EPROTO;
done:
- kfree(kbuf);
return ret_val;
}
@@ -1317,8 +1316,8 @@ static int vga_arb_release(struct inode *inode, struct file *file)
uc = &priv->cards[i];
if (uc->pdev == NULL)
continue;
- pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
- uc->io_cnt, uc->mem_cnt);
+ vgaarb_dbg(&uc->pdev->dev, "uc->io_cnt == %d, uc->mem_cnt == %d\n",
+ uc->io_cnt, uc->mem_cnt);
while (uc->io_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
while (uc->mem_cnt--)
@@ -1371,7 +1370,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
struct pci_dev *pdev = to_pci_dev(dev);
bool notify = false;
- pr_debug("%s\n", __func__);
+ vgaarb_dbg(dev, "%s\n", __func__);
/* For now we're only intereted in devices added and removed. I didn't
* test this thing here, so someone needs to double check for the
@@ -1423,9 +1422,8 @@ static int __init vga_arb_device_init(void)
PCI_ANY_ID, pdev)) != NULL)
vga_arbiter_add_pci_device(pdev);
- pr_info("loaded\n");
-
list_for_each_entry(vgadev, &vga_list, list) {
+ struct device *dev = &vgadev->pdev->dev;
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
/*
* Override vga_arbiter_add_pci_device()'s I/O based detection
@@ -1458,21 +1456,19 @@ static int __init vga_arb_device_init(void)
continue;
if (!vga_default_device())
- pr_info("setting as boot device: PCI:%s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "setting as boot device\n");
else if (vgadev->pdev != vga_default_device())
- pr_info("overriding boot device: PCI:%s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "overriding boot device\n");
vga_set_default_device(vgadev->pdev);
}
#endif
if (vgadev->bridge_has_one_vga)
- pr_info("bridge control possible %s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "bridge control possible\n");
else
- pr_info("no bridge control possible %s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "no bridge control possible\n");
}
+
+ pr_info("loaded\n");
return rc;
}
subsys_initcall(vga_arb_device_init);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c0523b..4070b7386e9d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -138,7 +138,7 @@ config HID_ASUS
tristate "Asus"
depends on I2C_HID
---help---
- Support for Asus notebook built-in keyboard via i2c.
+ Support for Asus notebook built-in keyboard and touchpad via i2c.
Supported devices:
- EeeBook X205TA
@@ -214,7 +214,7 @@ config HID_CMEDIA
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
- depends on USB_HID && I2C && GPIOLIB
+ depends on USB_HID && I2C && GPIOLIB && GPIOLIB_IRQCHIP
---help---
Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
This is a HID device driver which registers as an i2c adapter
@@ -512,6 +512,14 @@ config HID_MAGICMOUSE
Say Y here if you want support for the multi-touch features of the
Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
+config HID_MAYFLASH
+ tristate "Mayflash game controller adapter force feedback"
+ depends on HID
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have HJZ Mayflash PS3 game controller adapters
+ and want to enable force feedback support.
+
config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
@@ -861,6 +869,13 @@ config THRUSTMASTER_FF
a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
Rumble Force or Force Feedback Wheel.
+config HID_UDRAW_PS3
+ tristate "THQ PS3 uDraw tablet"
+ depends on HID
+ ---help---
+ Say Y here if you want to use the THQ uDraw gaming tablet for
+ the PS3.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 86b2b5785fd2..4d111f23e801 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
+obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
@@ -96,6 +97,7 @@ obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
+obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 7a811ec4f2e1..d40ed9fdf68d 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -11,6 +11,12 @@
* This module based on hid-ortek by
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
* Copyright (c) 2011 Jiri Kosina
+ *
+ * This module has been updated to add support for Asus i2c touchpad.
+ *
+ * Copyright (c) 2016 Brendan McGrath <redmcg@redmandi.dyndns.org>
+ * Copyright (c) 2016 Victor Vlasenko <victor.vlasenko@sysgears.com>
+ * Copyright (c) 2016 Frederik Wenigwieser <frederik.wenigwieser@gmail.com>
*/
/*
@@ -20,16 +26,287 @@
* any later version.
*/
-#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/input/mt.h>
#include "hid-ids.h"
+MODULE_AUTHOR("Yusuke Fujimaki <usk.fujimaki@gmail.com>");
+MODULE_AUTHOR("Brendan McGrath <redmcg@redmandi.dyndns.org>");
+MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>");
+MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
+MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+
+#define FEATURE_REPORT_ID 0x0d
+#define INPUT_REPORT_ID 0x5d
+
+#define INPUT_REPORT_SIZE 28
+
+#define MAX_CONTACTS 5
+
+#define MAX_X 2794
+#define MAX_Y 1758
+#define MAX_TOUCH_MAJOR 8
+#define MAX_PRESSURE 128
+
+#define CONTACT_DATA_SIZE 5
+
+#define BTN_LEFT_MASK 0x01
+#define CONTACT_TOOL_TYPE_MASK 0x80
+#define CONTACT_X_MSB_MASK 0xf0
+#define CONTACT_Y_MSB_MASK 0x0f
+#define CONTACT_TOUCH_MAJOR_MASK 0x07
+#define CONTACT_PRESSURE_MASK 0x7f
+
+#define QUIRK_FIX_NOTEBOOK_REPORT BIT(0)
+#define QUIRK_NO_INIT_REPORTS BIT(1)
+#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
+#define QUIRK_IS_MULTITOUCH BIT(3)
+
+#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT
+#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
+ QUIRK_SKIP_INPUT_MAPPING | \
+ QUIRK_IS_MULTITOUCH)
+
+#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
+
+struct asus_drvdata {
+ unsigned long quirks;
+ struct input_dev *input;
+};
+
+static void asus_report_contact_down(struct input_dev *input,
+ int toolType, u8 *data)
+{
+ int touch_major, pressure;
+ int x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
+ int y = MAX_Y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+
+ if (toolType == MT_TOOL_PALM) {
+ touch_major = MAX_TOUCH_MAJOR;
+ pressure = MAX_PRESSURE;
+ } else {
+ touch_major = (data[3] >> 4) & CONTACT_TOUCH_MAJOR_MASK;
+ pressure = data[4] & CONTACT_PRESSURE_MASK;
+ }
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major);
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+}
+
+/* Required for Synaptics Palm Detection */
+static void asus_report_tool_width(struct input_dev *input)
+{
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *oldest;
+ int oldid, count, i;
+
+ oldest = NULL;
+ oldid = mt->trkid;
+ count = 0;
+
+ for (i = 0; i < mt->num_slots; ++i) {
+ struct input_mt_slot *ps = &mt->slots[i];
+ int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+ if (id < 0)
+ continue;
+ if ((id - oldid) & TRKID_SGN) {
+ oldest = ps;
+ oldid = id;
+ }
+ count++;
+ }
+
+ if (oldest) {
+ input_report_abs(input, ABS_TOOL_WIDTH,
+ input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR));
+ }
+}
+
+static void asus_report_input(struct input_dev *input, u8 *data)
+{
+ int i;
+ u8 *contactData = data + 2;
+
+ for (i = 0; i < MAX_CONTACTS; i++) {
+ bool down = !!(data[1] & BIT(i+3));
+ int toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
+ MT_TOOL_PALM : MT_TOOL_FINGER;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, toolType, down);
+
+ if (down) {
+ asus_report_contact_down(input, toolType, contactData);
+ contactData += CONTACT_DATA_SIZE;
+ }
+ }
+
+ input_report_key(input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
+ asus_report_tool_width(input);
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int asus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH &&
+ data[0] == INPUT_REPORT_ID &&
+ size == INPUT_REPORT_SIZE) {
+ asus_report_input(drvdata->input, data);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ int ret;
+ struct input_dev *input = hi->input;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0);
+
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ ret = input_mt_init_slots(input, MAX_CONTACTS, INPUT_MT_POINTER);
+
+ if (ret) {
+ hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
+ return ret;
+ }
+
+ drvdata->input = input;
+ }
+
+ return 0;
+}
+
+static int asus_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_SKIP_INPUT_MAPPING) {
+ /* Don't map anything from the HID report.
+ * We do it all manually in asus_input_configured
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int asus_start_multitouch(struct hid_device *hdev)
+{
+ int ret;
+ const unsigned char buf[] = { FEATURE_REPORT_ID, 0x00, 0x03, 0x01, 0x00 };
+ unsigned char *dmabuf = kmemdup(buf, sizeof(buf), GFP_KERNEL);
+
+ if (!dmabuf) {
+ ret = -ENOMEM;
+ hid_err(hdev, "Asus failed to alloc dma buf: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_raw_request(hdev, dmabuf[0], dmabuf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ kfree(dmabuf);
+
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "Asus failed to start multitouch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ return asus_start_multitouch(hdev);
+
+ return 0;
+}
+
+static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct asus_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (drvdata == NULL) {
+ hid_err(hdev, "Can't alloc Asus descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drvdata);
+
+ drvdata->quirks = id->driver_data;
+
+ if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Asus hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "Asus hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!drvdata->input) {
+ hid_err(hdev, "Asus input not registered\n");
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
+ drvdata->input->name = "Asus TouchPad";
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ ret = asus_start_multitouch(hdev);
+ if (ret)
+ goto err_stop_hw;
+ }
+
+ return 0;
+err_stop_hw:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_FIX_NOTEBOOK_REPORT &&
+ *rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
@@ -37,15 +314,25 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
static const struct hid_device_id asus_devices[] = {
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
static struct hid_driver asus_driver = {
- .name = "asus",
- .id_table = asus_devices,
- .report_fixup = asus_report_fixup
+ .name = "asus",
+ .id_table = asus_devices,
+ .report_fixup = asus_report_fixup,
+ .probe = asus_probe,
+ .input_mapping = asus_input_mapping,
+ .input_configured = asus_input_configured,
+#ifdef CONFIG_PM
+ .reset_resume = asus_reset_resume,
+#endif
+ .raw_event = asus_raw_event
};
module_hid_driver(asus_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2b89c701076f..cff060b56da9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -727,8 +727,9 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
(hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1857,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1883,6 +1885,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
@@ -1983,8 +1988,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2059,6 +2065,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
@@ -2086,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 60d30203a5fa..f31a778b0851 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -24,6 +24,7 @@
* http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
*/
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/i2c.h>
@@ -168,6 +169,12 @@ struct cp2112_device {
struct gpio_chip gc;
u8 *in_out_buffer;
spinlock_t lock;
+
+ struct gpio_desc *desc[8];
+ bool gpio_poll;
+ struct delayed_work gpio_poll_worker;
+ unsigned long irq_mask;
+ u8 gpio_prev_state;
};
static int gpio_push_pull = 0xFF;
@@ -233,7 +240,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&dev->lock, flags);
}
-static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int cp2112_gpio_get_all(struct gpio_chip *chip)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
@@ -252,7 +259,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
goto exit;
}
- ret = (buf[1] >> offset) & 1;
+ ret = buf[1];
exit:
spin_unlock_irqrestore(&dev->lock, flags);
@@ -260,6 +267,17 @@ exit:
return ret;
}
+static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = cp2112_gpio_get_all(chip);
+ if (ret < 0)
+ return ret;
+
+ return (ret >> offset) & 1;
+}
+
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
@@ -1041,6 +1059,166 @@ static void chmod_sysfs_attrs(struct hid_device *hdev)
}
}
+static void cp2112_gpio_irq_ack(struct irq_data *d)
+{
+}
+
+static void cp2112_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __clear_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __set_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_poll_callback(struct work_struct *work)
+{
+ struct cp2112_device *dev = container_of(work, struct cp2112_device,
+ gpio_poll_worker.work);
+ struct irq_data *d;
+ u8 gpio_mask;
+ u8 virqs = (u8)dev->irq_mask;
+ u32 irq_type;
+ int irq, virq, ret;
+
+ ret = cp2112_gpio_get_all(&dev->gc);
+ if (ret == -ENODEV) /* the hardware has been disconnected */
+ return;
+ if (ret < 0)
+ goto exit;
+
+ gpio_mask = ret;
+
+ while (virqs) {
+ virq = ffs(virqs) - 1;
+ virqs &= ~BIT(virq);
+
+ if (!dev->gc.to_irq)
+ break;
+
+ irq = dev->gc.to_irq(&dev->gc, virq);
+
+ d = irq_get_irq_data(irq);
+ if (!d)
+ continue;
+
+ irq_type = irqd_get_trigger_type(d);
+
+ if (gpio_mask & BIT(virq)) {
+ /* Level High */
+
+ if (irq_type & IRQ_TYPE_LEVEL_HIGH)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING) &&
+ !(dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ } else {
+ /* Level Low */
+
+ if (irq_type & IRQ_TYPE_LEVEL_LOW)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_FALLING) &&
+ (dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ }
+ }
+
+ dev->gpio_prev_state = gpio_mask;
+
+exit:
+ if (dev->gpio_poll)
+ schedule_delayed_work(&dev->gpio_poll_worker, 10);
+}
+
+
+static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
+ cp2112_gpio_direction_input(gc, d->hwirq);
+
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+ }
+
+ cp2112_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+}
+
+static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ return 0;
+}
+
+static struct irq_chip cp2112_gpio_irqchip = {
+ .name = "cp2112-gpio",
+ .irq_startup = cp2112_gpio_irq_startup,
+ .irq_shutdown = cp2112_gpio_irq_shutdown,
+ .irq_ack = cp2112_gpio_irq_ack,
+ .irq_mask = cp2112_gpio_irq_mask,
+ .irq_unmask = cp2112_gpio_irq_unmask,
+ .irq_set_type = cp2112_gpio_irq_type,
+};
+
+static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ int pin)
+{
+ int ret;
+
+ if (dev->desc[pin])
+ return -EINVAL;
+
+ dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
+ "HID/I2C:Event");
+ if (IS_ERR(dev->desc[pin])) {
+ dev_err(dev->gc.parent, "Failed to request GPIO\n");
+ return PTR_ERR(dev->desc[pin]);
+ }
+
+ ret = gpiochip_lock_as_irq(&dev->gc, pin);
+ if (ret) {
+ dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
+ goto err_desc;
+ }
+
+ ret = gpiod_to_irq(dev->desc[pin]);
+ if (ret < 0) {
+ dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n");
+ goto err_lock;
+ }
+
+ return ret;
+
+err_lock:
+ gpiochip_unlock_as_irq(&dev->gc, pin);
+err_desc:
+ gpiochip_free_own_desc(dev->desc[pin]);
+ dev->desc[pin] = NULL;
+ return ret;
+}
+
static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct cp2112_device *dev;
@@ -1163,8 +1341,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
+ ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev->gc.parent, "failed to add IRQ chip\n");
+ goto err_sysfs_remove;
+ }
+
return ret;
+err_sysfs_remove:
+ sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
@@ -1181,10 +1368,22 @@ err_hid_stop:
static void cp2112_remove(struct hid_device *hdev)
{
struct cp2112_device *dev = hid_get_drvdata(hdev);
+ int i;
sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
- gpiochip_remove(&dev->gc);
i2c_del_adapter(&dev->adap);
+
+ if (dev->gpio_poll) {
+ dev->gpio_poll = false;
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->desc); i++) {
+ gpiochip_unlock_as_irq(&dev->gc, i);
+ gpiochip_free_own_desc(dev->desc[i]);
+ }
+
+ gpiochip_remove(&dev->gc);
/* i2c_del_adapter has finished removing all i2c devices from our
* adapter. Well behaved devices should no longer call our cp2112_xfer
* and should have waited for any pending calls to finish. It has also
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 575aa65436d1..ec277b96eaa1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -171,6 +171,7 @@
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
#define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD 0x8585
+#define USB_DEVICE_ID_ASUSTEK_TOUCHPAD 0x0101
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -315,8 +316,10 @@
#define USB_VENDOR_ID_DMI 0x0c0b
#define USB_DEVICE_ID_DMI_ENC 0x5fab
-#define USB_VENDOR_ID_DRAGONRISE 0x0079
-#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -718,8 +721,9 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
@@ -903,6 +907,8 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
@@ -959,6 +965,9 @@
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
+#define USB_VENDOR_ID_THQ 0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
@@ -1034,6 +1043,10 @@
#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500
#define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502
+#define USB_VENDOR_ID_WEIDA 0x2575
+#define USB_DEVICE_ID_WEIDA_8752 0xC300
+#define USB_DEVICE_ID_WEIDA_8755 0xC301
+
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fb9ace1cef8b..d05f903c7614 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -253,6 +253,7 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_RX:
case ABS_RY:
case ABS_RZ:
+ case ABS_WHEEL:
case ABS_TILT_X:
case ABS_TILT_Y:
if (field->unit == 0x14) { /* If degrees */
@@ -1468,6 +1469,31 @@ static void hidinput_cleanup_hidinput(struct hid_device *hid,
kfree(hidinput);
}
+static struct hid_input *hidinput_match(struct hid_report *report)
+{
+ struct hid_device *hid = report->device;
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ if (hidinput->report &&
+ hidinput->report->id == report->id)
+ return hidinput;
+ }
+
+ return NULL;
+}
+
+static inline void hidinput_configure_usages(struct hid_input *hidinput,
+ struct hid_report *report)
+{
+ int i, j;
+
+ for (i = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->maxusage; j++)
+ hidinput_configure_usage(hidinput, report->field[i],
+ report->field[i]->usage + j);
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -1478,8 +1504,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
{
struct hid_driver *drv = hid->driver;
struct hid_report *report;
- struct hid_input *hidinput = NULL;
- int i, j, k;
+ struct hid_input *next, *hidinput = NULL;
+ int i, k;
INIT_LIST_HEAD(&hid->inputs);
INIT_WORK(&hid->led_work, hidinput_led_worker);
@@ -1509,43 +1535,40 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (!report->maxfield)
continue;
+ /*
+ * Find the previous hidinput report attached
+ * to this report id.
+ */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
+ hidinput = hidinput_match(report);
+
if (!hidinput) {
hidinput = hidinput_allocate(hid);
if (!hidinput)
goto out_unwind;
}
- for (i = 0; i < report->maxfield; i++)
- for (j = 0; j < report->field[i]->maxusage; j++)
- hidinput_configure_usage(hidinput, report->field[i],
- report->field[i]->usage + j);
-
- if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput))
- continue;
+ hidinput_configure_usages(hidinput, report);
- if (hid->quirks & HID_QUIRK_MULTI_INPUT) {
- /* This will leave hidinput NULL, so that it
- * allocates another one if we have more inputs on
- * the same interface. Some devices (e.g. Happ's
- * UGCI) cram a lot of unrelated inputs into the
- * same interface. */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
hidinput->report = report;
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- hidinput = NULL;
- }
}
}
- if (hidinput && (hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput)) {
- /* no need to register an input device not populated */
- hidinput_cleanup_hidinput(hid, hidinput);
- hidinput = NULL;
+ list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
+ if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
+ !hidinput_has_been_populated(hidinput)) {
+ /* no need to register an input device not populated */
+ hidinput_cleanup_hidinput(hid, hidinput);
+ continue;
+ }
+
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_unwind;
+ if (input_register_device(hidinput->input))
+ goto out_unwind;
+ hidinput->registered = true;
}
if (list_empty(&hid->inputs)) {
@@ -1553,20 +1576,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
goto out_unwind;
}
- if (hidinput) {
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- }
-
return 0;
-out_cleanup:
- list_del(&hidinput->list);
- input_free_device(hidinput->input);
- kfree(hidinput);
out_unwind:
/* unwind the ones we already registered */
hidinput_disconnect(hid);
@@ -1583,7 +1594,10 @@ void hidinput_disconnect(struct hid_device *hid)
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
- input_unregister_device(hidinput->input);
+ if (hidinput->registered)
+ input_unregister_device(hidinput->input);
+ else
+ input_free_device(hidinput->input);
kfree(hidinput);
}
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
new file mode 100644
index 000000000000..d9090765a6e5
--- /dev/null
+++ b/drivers/hid/hid-mf.c
@@ -0,0 +1,166 @@
+/*
+ * Force feedback support for Mayflash game controller adapters.
+ *
+ * These devices are manufactured by Mayflash but identify themselves
+ * using the vendor ID of DragonRise Inc.
+ *
+ * Tested with:
+ * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ *
+ * The following adapters probably work too, but need to be tested:
+ * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ *
+ * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct mf_device {
+ struct hid_report *report;
+};
+
+static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct mf_device *mf = data;
+ int strong, weak;
+
+ strong = effect->u.rumble.strong_magnitude;
+ weak = effect->u.rumble.weak_magnitude;
+
+ dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak);
+
+ strong = strong * 0xff / 0xffff;
+ weak = weak * 0xff / 0xffff;
+
+ dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak);
+
+ mf->report->field[0]->value[0] = weak;
+ mf->report->field[0]->value[1] = strong;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+
+ return 0;
+}
+
+static int mf_init(struct hid_device *hid)
+{
+ struct mf_device *mf;
+
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+
+ struct list_head *report_ptr;
+ struct hid_report *report;
+
+ struct list_head *input_ptr = &hid->inputs;
+ struct hid_input *input;
+
+ struct input_dev *dev;
+
+ int error;
+
+ /* Setup each of the four inputs */
+ list_for_each(report_ptr, report_list) {
+ report = list_entry(report_ptr, struct hid_report, list);
+
+ if (report->maxfield < 1 || report->field[0]->report_count < 2) {
+ hid_err(hid, "Invalid report, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ if (list_is_last(input_ptr, &hid->inputs)) {
+ hid_err(hid, "Missing input, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ input_ptr = input_ptr->next;
+ input = list_entry(input_ptr, struct hid_input, list);
+
+ mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL);
+ if (!mf)
+ return -ENOMEM;
+
+ dev = input->input;
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, mf, mf_play);
+ if (error) {
+ kfree(mf);
+ return error;
+ }
+
+ mf->report = report;
+ mf->report->field[0]->value[0] = 0x00;
+ mf->report->field[0]->value[1] = 0x00;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+ }
+
+ hid_info(hid, "Force feedback for HJZ Mayflash game controller "
+ "adapters by Marcel Hasler <mahasler@gmail.com>\n");
+
+ return 0;
+}
+
+static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int error;
+
+ dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
+
+ /* Split device into four inputs */
+ hid->quirks |= HID_QUIRK_MULTI_INPUT;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "HID parse failed.\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "HID hw start failed\n");
+ return error;
+ }
+
+ error = mf_init(hid);
+ if (error) {
+ hid_err(hid, "Force feedback init failed.\n");
+ hid_hw_stop(hid);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id mf_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mf_devices);
+
+static struct hid_driver mf_driver = {
+ .name = "hid_mf",
+ .id_table = mf_devices,
+ .probe = mf_probe,
+};
+module_hid_driver(mf_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6cd392e9f99..74b7b84a0420 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -280,9 +280,11 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
+ .driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fb6f1f447279..6dca66806844 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -108,6 +108,7 @@ struct mt_device {
int cc_value_index; /* contact count value index in the field */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
+ unsigned long initial_quirks; /* initial quirks state */
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
__s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -318,13 +319,10 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
u8 *buf;
/*
- * Only fetch the feature report if initial reports are not already
- * been retrieved. Currently this is only done for Windows 8 touch
- * devices.
+ * Do not fetch the feature report if the device has been explicitly
+ * marked as non-capable.
*/
- if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
- return;
- if (td->mtclass.name != MT_CLS_WIN_8)
+ if (td->initial_quirks & HID_QUIRK_NO_INIT_REPORTS)
return;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -567,6 +565,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_BUTTON:
code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE);
+ /*
+ * MS PTP spec says that external buttons left and right have
+ * usages 2 and 3.
+ */
+ if (cls->name == MT_CLS_WIN_8 &&
+ field->application == HID_DG_TOUCHPAD &&
+ (usage->hid & HID_USAGE) > 1)
+ code--;
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -842,7 +848,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!td->mtclass.export_all_inputs &&
field->application != HID_DG_TOUCHSCREEN &&
field->application != HID_DG_PEN &&
- field->application != HID_DG_TOUCHPAD)
+ field->application != HID_DG_TOUCHPAD &&
+ field->application != HID_GD_KEYBOARD &&
+ field->application != HID_CP_CONSUMER_CONTROL)
return -1;
/*
@@ -1083,36 +1091,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
- /*
- * This allows the driver to handle different input sensors
- * that emits events through different reports on the same HID
- * device.
- */
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
- hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
-
- /*
- * Handle special quirks for Windows 8 certified devices.
- */
- if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
- /*
- * Some multitouch screens do not like to be polled for input
- * reports. Fortunately, the Win8 spec says that all touches
- * should be sent during each report, making the initialization
- * of input reports unnecessary.
- *
- * In addition some touchpads do not behave well if we read
- * all feature reports from them. Instead we prevent
- * initial report fetching and then selectively fetch each
- * report we are interested in.
- */
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1136,6 +1114,39 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
+ /*
+ * Store the initial quirk state
+ */
+ td->initial_quirks = hdev->quirks;
+
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+ /*
+ * This allows the driver to handle different input sensors
+ * that emits events through different reports on the same HID
+ * device.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
+
+ /*
+ * Some multitouch screens do not like to be polled for input
+ * reports. Fortunately, the Win8 spec says that all touches
+ * should be sent during each report, making the initialization
+ * of input reports unnecessary. For Win7 devices, well, let's hope
+ * they will still be happy (this is only be a problem if a touch
+ * was already there while probing the device).
+ *
+ * In addition some touchpads do not behave well if we read
+ * all feature reports from them. Instead we prevent
+ * initial report fetching and then selectively fetch each
+ * report we are interested in.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
ret = hid_parse(hdev);
if (ret != 0)
return ret;
@@ -1204,8 +1215,11 @@ static int mt_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
+ hdev->quirks = td->initial_quirks;
}
/*
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 60875625cbdf..5c925228847c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -795,6 +795,12 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
USB_DEVICE_ID_MS_TYPE_COVER_2),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
+ 0x07bd), /* Microsoft Surface 3 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
+ 0x0f01), /* MM7150 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a821bd..7687c0875395 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -36,6 +36,8 @@
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/input/mt.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
@@ -374,7 +376,7 @@ static u8 dualshock4_usb_rdesc[] = {
0x65, 0x00, /* Unit, */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -403,14 +405,14 @@ static u8 dualshock4_usb_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -687,7 +689,7 @@ static u8 dualshock4_bt_rdesc[] = {
0x81, 0x42, /* Input (Variable, Null State), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -712,14 +714,14 @@ static u8 dualshock4_bt_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -975,6 +977,32 @@ static const unsigned int buzz_keymap[] = {
[20] = BTN_TRIGGER_HAPPY20,
};
+static const unsigned int ds4_absmap[] = {
+ [0x30] = ABS_X,
+ [0x31] = ABS_Y,
+ [0x32] = ABS_RX, /* right stick X */
+ [0x33] = ABS_Z, /* L2 */
+ [0x34] = ABS_RZ, /* R2 */
+ [0x35] = ABS_RY, /* right stick Y */
+};
+
+static const unsigned int ds4_keymap[] = {
+ [0x1] = BTN_WEST, /* Square */
+ [0x2] = BTN_SOUTH, /* Cross */
+ [0x3] = BTN_EAST, /* Circle */
+ [0x4] = BTN_NORTH, /* Triangle */
+ [0x5] = BTN_TL, /* L1 */
+ [0x6] = BTN_TR, /* R1 */
+ [0x7] = BTN_TL2, /* L2 */
+ [0x8] = BTN_TR2, /* R2 */
+ [0x9] = BTN_SELECT, /* Share */
+ [0xa] = BTN_START, /* Options */
+ [0xb] = BTN_THUMBL, /* L3 */
+ [0xc] = BTN_THUMBR, /* R3 */
+ [0xd] = BTN_MODE, /* PS */
+};
+
+
static enum power_supply_property sony_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
@@ -1019,14 +1047,24 @@ struct motion_output_report_02 {
u8 rumble;
};
-#define DS4_REPORT_0x02_SIZE 37
-#define DS4_REPORT_0x05_SIZE 32
-#define DS4_REPORT_0x11_SIZE 78
-#define DS4_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0x02_SIZE 37
+#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_INPUT_REPORT_0x11_SIZE 78
+#define DS4_OUTPUT_REPORT_0x05_SIZE 32
+#define DS4_OUTPUT_REPORT_0x11_SIZE 78
#define SIXAXIS_REPORT_0xF2_SIZE 17
#define SIXAXIS_REPORT_0xF5_SIZE 8
#define MOTION_REPORT_0x02_SIZE 49
+/* Offsets relative to USB input report (0x1). Bluetooth (0x11) requires an
+ * additional +2.
+ */
+#define DS4_INPUT_REPORT_BUTTON_OFFSET 5
+#define DS4_INPUT_REPORT_BATTERY_OFFSET 30
+#define DS4_INPUT_REPORT_TOUCHPAD_OFFSET 33
+
+#define DS4_TOUCHPAD_SUFFIX " Touchpad"
+
static DEFINE_SPINLOCK(sony_dev_list_lock);
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
@@ -1035,6 +1073,7 @@ struct sony_sc {
spinlock_t lock;
struct list_head list_node;
struct hid_device *hdev;
+ struct input_dev *touchpad;
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
struct work_struct state_worker;
@@ -1130,6 +1169,37 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
+static int ds4_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+ unsigned int key = usage->hid & HID_USAGE;
+
+ if (key >= ARRAY_SIZE(ds4_keymap))
+ return -1;
+
+ key = ds4_keymap[key];
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+ return 1;
+ } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) {
+ unsigned int abs = usage->hid & HID_USAGE;
+
+ /* Let the HID parser deal with the HAT. */
+ if (usage->hid == HID_GD_HATSWITCH)
+ return 0;
+
+ if (abs >= ARRAY_SIZE(ds4_absmap))
+ return -1;
+
+ abs = ds4_absmap[abs];
+ hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs);
+ return 1;
+ }
+
+ return 0;
+}
+
static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -1219,23 +1289,22 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
unsigned long flags;
- int n, offset;
+ int n, m, offset, num_touch_data, max_touch_data;
u8 cable_state, battery_capacity, battery_charging;
- /*
- * Battery and touchpad data starts at byte 30 in the USB report and
- * 32 in Bluetooth report.
- */
- offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 30 : 32;
+ /* When using Bluetooth the header is 2 bytes longer, so skip these. */
+ int data_offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 0 : 2;
+
+ /* Second bit of third button byte is for the touchpad button. */
+ offset = data_offset + DS4_INPUT_REPORT_BUTTON_OFFSET;
+ input_report_key(sc->touchpad, BTN_LEFT, rd[offset+2] & 0x2);
/*
- * The lower 4 bits of byte 30 contain the battery level
+ * The lower 4 bits of byte 30 (or 32 for BT) contain the battery level
* and the 5th bit contains the USB cable state.
*/
+ offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
cable_state = (rd[offset] >> 4) & 0x01;
battery_capacity = rd[offset] & 0x0F;
@@ -1262,30 +1331,52 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
sc->battery_charging = battery_charging;
spin_unlock_irqrestore(&sc->lock, flags);
- offset += 5;
-
/*
- * The Dualshock 4 multi-touch trackpad data starts at offset 35 on USB
- * and 37 on Bluetooth.
- * The first 7 bits of the first byte is a counter and bit 8 is a touch
- * indicator that is 0 when pressed and 1 when not pressed.
- * The next 3 bytes are two 12 bit touch coordinates, X and Y.
- * The data for the second touch is in the same format and immediatly
- * follows the data for the first.
+ * The Dualshock 4 multi-touch trackpad data starts at offset 33 on USB
+ * and 35 on Bluetooth.
+ * The first byte indicates the number of touch data in the report.
+ * Trackpad data starts 2 bytes later (e.g. 35 for USB).
*/
- for (n = 0; n < 2; n++) {
- u16 x, y;
+ offset = data_offset + DS4_INPUT_REPORT_TOUCHPAD_OFFSET;
+ max_touch_data = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 3 : 4;
+ if (rd[offset] > 0 && rd[offset] <= max_touch_data)
+ num_touch_data = rd[offset];
+ else
+ num_touch_data = 1;
+ offset += 1;
+
+ for (m = 0; m < num_touch_data; m++) {
+ /* Skip past timestamp */
+ offset += 1;
+
+ /*
+ * The first 7 bits of the first byte is a counter and bit 8 is
+ * a touch indicator that is 0 when pressed and 1 when not
+ * pressed.
+ * The next 3 bytes are two 12 bit touch coordinates, X and Y.
+ * The data for the second touch is in the same format and
+ * immediately follows the data for the first.
+ */
+ for (n = 0; n < 2; n++) {
+ u16 x, y;
+ bool active;
- x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
- y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+ x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
+ y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
- input_mt_slot(input_dev, n);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- !(rd[offset] >> 7));
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ active = !(rd[offset] >> 7);
+ input_mt_slot(sc->touchpad, n);
+ input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active);
- offset += 4;
+ if (active) {
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, y);
+ }
+
+ offset += 4;
+ }
+ input_mt_sync_frame(sc->touchpad);
+ input_sync(sc->touchpad);
}
}
@@ -1324,6 +1415,21 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
} else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT)
&& rd[0] == 0x11 && size == 78)) {
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
+ /* CRC check */
+ u8 bthdr = 0xA1;
+ u32 crc;
+ u32 report_crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, rd, DS4_INPUT_REPORT_0x11_SIZE-4);
+ report_crc = get_unaligned_le32(&rd[DS4_INPUT_REPORT_0x11_SIZE-4]);
+ if (crc != report_crc) {
+ hid_dbg(sc->hdev, "DualShock 4 input report's CRC check failed, received crc 0x%0x != 0x%0x\n",
+ report_crc, crc);
+ return -EILSEQ;
+ }
+ }
dualshock4_parse_report(sc, rd, size);
}
@@ -1367,47 +1473,84 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & PS3REMOTE)
return ps3remote_mapping(hdev, hi, field, usage, bit, max);
+
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ return ds4_mapping(hdev, hi, field, usage, bit, max);
+
/* Let hid-core decide for the others */
return 0;
}
-static int sony_register_touchpad(struct hid_input *hi, int touch_count,
+static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
int w, int h)
{
- struct input_dev *input_dev = hi->input;
+ size_t name_sz;
+ char *name;
int ret;
- ret = input_mt_init_slots(input_dev, touch_count, 0);
+ sc->touchpad = input_allocate_device();
+ if (!sc->touchpad)
+ return -ENOMEM;
+
+ input_set_drvdata(sc->touchpad, sc);
+ sc->touchpad->dev.parent = &sc->hdev->dev;
+ sc->touchpad->phys = sc->hdev->phys;
+ sc->touchpad->uniq = sc->hdev->uniq;
+ sc->touchpad->id.bustype = sc->hdev->bus;
+ sc->touchpad->id.vendor = sc->hdev->vendor;
+ sc->touchpad->id.product = sc->hdev->product;
+ sc->touchpad->id.version = sc->hdev->version;
+
+ /* Append a suffix to the controller name as there are various
+ * DS4 compatible non-Sony devices with different names.
+ */
+ name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
+ name = kzalloc(name_sz, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
+ sc->touchpad->name = name;
+
+ ret = input_mt_init_slots(sc->touchpad, touch_count, 0);
if (ret < 0)
- return ret;
+ goto err;
+
+ /* We map the button underneath the touchpad to BTN_LEFT. */
+ __set_bit(EV_KEY, sc->touchpad->evbit);
+ __set_bit(BTN_LEFT, sc->touchpad->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, sc->touchpad->propbit);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_X, 0, w, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+ ret = input_register_device(sc->touchpad);
+ if (ret < 0)
+ goto err;
return 0;
+
+err:
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
+
+ input_free_device(sc->touchpad);
+ sc->touchpad = NULL;
+
+ return ret;
}
-static int sony_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput)
+static void sony_unregister_touchpad(struct sony_sc *sc)
{
- struct sony_sc *sc = hid_get_drvdata(hdev);
- int ret;
+ if (!sc->touchpad)
+ return;
- /*
- * The Dualshock 4 touchpad supports 2 touches and has a
- * resolution of 1920x942 (44.86 dots/mm).
- */
- if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- ret = sony_register_touchpad(hidinput, 2, 1920, 942);
- if (ret) {
- hid_err(sc->hdev,
- "Unable to initialize multi-touch slots: %d\n",
- ret);
- return ret;
- }
- }
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
- return 0;
+ input_unregister_device(sc->touchpad);
+ sc->touchpad = NULL;
}
/*
@@ -1483,11 +1626,11 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
u8 *buf;
int ret;
- buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
+ ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_FEATURE_REPORT_0x02_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
kfree(buf);
@@ -1892,14 +2035,14 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
* 0xD0 - 66hz
*/
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- memset(buf, 0, DS4_REPORT_0x05_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x05_SIZE);
buf[0] = 0x05;
buf[1] = 0xFF;
offset = 4;
} else {
- memset(buf, 0, DS4_REPORT_0x11_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x11_SIZE);
buf[0] = 0x11;
- buf[1] = 0x80;
+ buf[1] = 0xC0; /* HID + CRC */
buf[3] = 0x0F;
offset = 6;
}
@@ -1925,10 +2068,17 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
buf[offset++] = sc->led_delay_off[3];
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
- else
- hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
- HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x05_SIZE);
+ else {
+ /* CRC generation */
+ u8 bthdr = 0xA2;
+ u32 crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, buf, DS4_OUTPUT_REPORT_0x11_SIZE-4);
+ put_unaligned_le32(crc, &buf[74]);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x11_SIZE);
+ }
}
static void motion_send_output_report(struct sony_sc *sc)
@@ -1972,10 +2122,10 @@ static int sony_allocate_output_report(struct sony_sc *sc)
kmalloc(sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
@@ -2220,7 +2370,7 @@ static int sony_check_add(struct sony_sc *sc)
return 0;
}
} else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x81_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -2230,10 +2380,10 @@ static int sony_check_add(struct sony_sc *sc)
* offset 1.
*/
ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
- DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
+ DS4_FEATURE_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
- if (ret != DS4_REPORT_0x81_SIZE) {
+ if (ret != DS4_FEATURE_REPORT_0x81_SIZE) {
hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
ret = ret < 0 ? ret : -EINVAL;
goto out_free;
@@ -2329,45 +2479,12 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
cancel_work_sync(&sc->state_worker);
}
-static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
{
- int ret;
+ struct sony_sc *sc = hid_get_drvdata(hdev);
int append_dev_id;
- unsigned long quirks = id->driver_data;
- struct sony_sc *sc;
- unsigned int connect_mask = HID_CONNECT_DEFAULT;
-
- if (!strcmp(hdev->name, "FutureMax Dance Mat"))
- quirks |= FUTUREMAX_DANCE_MAT;
-
- sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
- if (sc == NULL) {
- hid_err(hdev, "can't alloc sony descriptor\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&sc->lock);
-
- sc->quirks = quirks;
- hid_set_drvdata(hdev, sc);
- sc->hdev = hdev;
-
- ret = hid_parse(hdev);
- if (ret) {
- hid_err(hdev, "parse failed\n");
- return ret;
- }
-
- if (sc->quirks & VAIO_RDESC_CONSTANT)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- else if (sc->quirks & SIXAXIS_CONTROLLER)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
-
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- return ret;
- }
+ int ret;
ret = sony_set_device_id(sc);
if (ret < 0) {
@@ -2415,11 +2532,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sony_init_output_report(sc, sixaxis_send_output_report);
} else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
- /*
- * The DualShock 4 wants output reports sent on the ctrl
- * endpoint when connected via Bluetooth.
- */
- hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
ret = dualshock4_set_operational_bt(hdev);
if (ret < 0) {
hid_err(hdev, "failed to set the Dualshock 4 operational mode\n");
@@ -2427,6 +2539,18 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /*
+ * The Dualshock 4 touchpad supports 2 touches and has a
+ * resolution of 1920x942 (44.86 dots/mm).
+ */
+ ret = sony_register_touchpad(sc, 2, 1920, 942);
+ if (ret) {
+ hid_err(sc->hdev,
+ "Unable to initialize multi-touch slots: %d\n",
+ ret);
+ return ret;
+ }
+
sony_init_output_report(sc, dualshock4_send_output_report);
} else if (sc->quirks & MOTION_CONTROLLER) {
sony_init_output_report(sc, motion_send_output_report);
@@ -2482,17 +2606,84 @@ err_stop:
return ret;
}
+static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ unsigned long quirks = id->driver_data;
+ struct sony_sc *sc;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+
+ if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+ quirks |= FUTUREMAX_DANCE_MAT;
+
+ sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (sc == NULL) {
+ hid_err(hdev, "can't alloc sony descriptor\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&sc->lock);
+
+ sc->quirks = quirks;
+ hid_set_drvdata(hdev, sc);
+ sc->hdev = hdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (sc->quirks & VAIO_RDESC_CONSTANT)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+ else if (sc->quirks & SIXAXIS_CONTROLLER)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+
+ /* Patch the hw version on DS4 compatible devices, so applications can
+ * distinguish between the default HID mappings and the mappings defined
+ * by the Linux game controller spec. This is important for the SDL2
+ * library, which has a game controller database, which uses device ids
+ * in combination with version as a key.
+ */
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ hdev->version |= 0x8000;
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ /* sony_input_configured can fail, but this doesn't result
+ * in hid_hw_start failures (intended). Check whether
+ * the HID layer claimed the device else fail.
+ * We don't know the actual reason for the failure, most
+ * likely it is due to EEXIST in case of double connection
+ * of USB and Bluetooth, but could have been due to ENOMEM
+ * or other reasons as well.
+ */
+ if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+ hid_err(hdev, "failed to claim input\n");
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ hid_hw_close(hdev);
+
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
- if (sc->quirks & SONY_BATTERY_SUPPORT) {
- hid_hw_close(hdev);
+ if (sc->quirks & SONY_BATTERY_SUPPORT)
sony_battery_remove(sc);
- }
+
+ if (sc->touchpad)
+ sony_unregister_touchpad(sc);
sony_cancel_work_sync(sc);
@@ -2596,6 +2787,12 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
/* Nyko Core Controller for PS3 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-udraw-ps3.c b/drivers/hid/hid-udraw-ps3.c
new file mode 100644
index 000000000000..88ea390c10ad
--- /dev/null
+++ b/drivers/hid/hid-udraw-ps3.c
@@ -0,0 +1,474 @@
+/*
+ * HID driver for THQ PS3 uDraw tablet
+ *
+ * Copyright (C) 2016 Red Hat Inc. All Rights Reserved
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("PS3 uDraw tablet driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Protocol information from:
+ * http://brandonw.net/udraw/
+ * and the source code of:
+ * https://vvvv.org/contribution/udraw-hid
+ */
+
+/*
+ * The device is setup with multiple input devices:
+ * - the touch area which works as a touchpad
+ * - the tablet area which works as a touchpad/drawing tablet
+ * - a joypad with a d-pad, and 7 buttons
+ * - an accelerometer device
+ */
+
+enum {
+ TOUCH_NONE,
+ TOUCH_PEN,
+ TOUCH_FINGER,
+ TOUCH_TWOFINGER
+};
+
+enum {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+/*
+ * Accelerometer min/max values
+ * in order, X, Y and Z
+ */
+static struct {
+ int min;
+ int max;
+} accel_limits[] = {
+ [AXIS_X] = { 490, 534 },
+ [AXIS_Y] = { 490, 534 },
+ [AXIS_Z] = { 492, 536 }
+};
+
+#define DEVICE_NAME "THQ uDraw Game Tablet for PS3"
+/* resolution in pixels */
+#define RES_X 1920
+#define RES_Y 1080
+/* size in mm */
+#define WIDTH 160
+#define HEIGHT 90
+#define PRESSURE_OFFSET 113
+#define MAX_PRESSURE (255 - PRESSURE_OFFSET)
+
+struct udraw {
+ struct input_dev *joy_input_dev;
+ struct input_dev *touch_input_dev;
+ struct input_dev *pen_input_dev;
+ struct input_dev *accel_input_dev;
+ struct hid_device *hdev;
+
+ /*
+ * The device's two-finger support is pretty unreliable, as
+ * the device could report a single touch when the two fingers
+ * are too close together, and the distance between fingers, even
+ * though reported is not in the same unit as the touches.
+ *
+ * We'll make do without it, and try to report the first touch
+ * as reliably as possible.
+ */
+ int last_one_finger_x;
+ int last_one_finger_y;
+ int last_two_finger_x;
+ int last_two_finger_y;
+};
+
+static int clamp_accel(int axis, int offset)
+{
+ axis = clamp(axis,
+ accel_limits[offset].min,
+ accel_limits[offset].max);
+ axis = (axis - accel_limits[offset].min) /
+ ((accel_limits[offset].max -
+ accel_limits[offset].min) * 0xFF);
+ return axis;
+}
+
+static int udraw_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int len)
+{
+ struct udraw *udraw = hid_get_drvdata(hdev);
+ int touch;
+ int x, y, z;
+
+ if (len != 27)
+ return 0;
+
+ if (data[11] == 0x00)
+ touch = TOUCH_NONE;
+ else if (data[11] == 0x40)
+ touch = TOUCH_PEN;
+ else if (data[11] == 0x80)
+ touch = TOUCH_FINGER;
+ else
+ touch = TOUCH_TWOFINGER;
+
+ /* joypad */
+ input_report_key(udraw->joy_input_dev, BTN_WEST, data[0] & 1);
+ input_report_key(udraw->joy_input_dev, BTN_SOUTH, !!(data[0] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_EAST, !!(data[0] & 4));
+ input_report_key(udraw->joy_input_dev, BTN_NORTH, !!(data[0] & 8));
+
+ input_report_key(udraw->joy_input_dev, BTN_SELECT, !!(data[1] & 1));
+ input_report_key(udraw->joy_input_dev, BTN_START, !!(data[1] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_MODE, !!(data[1] & 16));
+
+ x = y = 0;
+ switch (data[2]) {
+ case 0x0:
+ y = -127;
+ break;
+ case 0x1:
+ y = -127;
+ x = 127;
+ break;
+ case 0x2:
+ x = 127;
+ break;
+ case 0x3:
+ y = 127;
+ x = 127;
+ break;
+ case 0x4:
+ y = 127;
+ break;
+ case 0x5:
+ y = 127;
+ x = -127;
+ break;
+ case 0x6:
+ x = -127;
+ break;
+ case 0x7:
+ y = -127;
+ x = -127;
+ break;
+ default:
+ break;
+ }
+
+ input_report_abs(udraw->joy_input_dev, ABS_X, x);
+ input_report_abs(udraw->joy_input_dev, ABS_Y, y);
+
+ input_sync(udraw->joy_input_dev);
+
+ /* For pen and touchpad */
+ x = y = 0;
+ if (touch != TOUCH_NONE) {
+ if (data[15] != 0x0F)
+ x = data[15] * 256 + data[17];
+ if (data[16] != 0x0F)
+ y = data[16] * 256 + data[18];
+ }
+
+ if (touch == TOUCH_FINGER) {
+ /* Save the last one-finger touch */
+ udraw->last_one_finger_x = x;
+ udraw->last_one_finger_y = y;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+ } else if (touch == TOUCH_TWOFINGER) {
+ /*
+ * We have a problem because x/y is the one for the
+ * second finger but we want the first finger given
+ * to user-space otherwise it'll look as if it jumped.
+ *
+ * See the udraw struct definition for why this was
+ * implemented this way.
+ */
+ if (udraw->last_two_finger_x == -1) {
+ /* Save the position of the 2nd finger */
+ udraw->last_two_finger_x = x;
+ udraw->last_two_finger_y = y;
+
+ x = udraw->last_one_finger_x;
+ y = udraw->last_one_finger_y;
+ } else {
+ /*
+ * Offset the 2-finger coords using the
+ * saved data from the first finger
+ */
+ x = x - (udraw->last_two_finger_x
+ - udraw->last_one_finger_x);
+ y = y - (udraw->last_two_finger_y
+ - udraw->last_one_finger_y);
+ }
+ }
+
+ /* touchpad */
+ if (touch == TOUCH_FINGER || touch == TOUCH_TWOFINGER) {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 1);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER,
+ touch == TOUCH_FINGER);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP,
+ touch == TOUCH_TWOFINGER);
+
+ input_report_abs(udraw->touch_input_dev, ABS_X, x);
+ input_report_abs(udraw->touch_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, 0);
+ }
+ input_sync(udraw->touch_input_dev);
+
+ /* pen */
+ if (touch == TOUCH_PEN) {
+ int level;
+
+ level = clamp(data[13] - PRESSURE_OFFSET,
+ 0, MAX_PRESSURE);
+
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, (level != 0));
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 1);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, level);
+ input_report_abs(udraw->pen_input_dev, ABS_X, x);
+ input_report_abs(udraw->pen_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 0);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, 0);
+ }
+ input_sync(udraw->pen_input_dev);
+
+ /* accel */
+ x = (data[19] + (data[20] << 8));
+ x = clamp_accel(x, AXIS_X);
+ y = (data[21] + (data[22] << 8));
+ y = clamp_accel(y, AXIS_Y);
+ z = (data[23] + (data[24] << 8));
+ z = clamp_accel(z, AXIS_Z);
+ input_report_abs(udraw->accel_input_dev, ABS_X, x);
+ input_report_abs(udraw->accel_input_dev, ABS_Y, y);
+ input_report_abs(udraw->accel_input_dev, ABS_Z, z);
+ input_sync(udraw->accel_input_dev);
+
+ /* let hidraw and hiddev handle the report */
+ return 0;
+}
+
+static int udraw_open(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ return hid_hw_open(udraw->hdev);
+}
+
+static void udraw_close(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ hid_hw_close(udraw->hdev);
+}
+
+static struct input_dev *allocate_and_setup(struct hid_device *hdev,
+ const char *name)
+{
+ struct input_dev *input_dev;
+
+ input_dev = devm_input_allocate_device(&hdev->dev);
+ if (!input_dev)
+ return NULL;
+
+ input_dev->name = name;
+ input_dev->phys = hdev->phys;
+ input_dev->dev.parent = &hdev->dev;
+ input_dev->open = udraw_open;
+ input_dev->close = udraw_close;
+ input_dev->uniq = hdev->uniq;
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_set_drvdata(input_dev, hid_get_drvdata(hdev));
+
+ return input_dev;
+}
+
+static bool udraw_setup_touch(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Touchpad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->touch_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_pen(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Pen");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, MAX_PRESSURE, 0, 0);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_PEN, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->pen_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_accel(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Accelerometer");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS);
+
+ /* 1G accel is reported as ~256, so clamp to 2G */
+ input_set_abs_params(input_dev, ABS_X, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -512, 512, 0, 0);
+
+ set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
+
+ udraw->accel_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_joypad(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Joypad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+ set_bit(BTN_SOUTH, input_dev->keybit);
+ set_bit(BTN_NORTH, input_dev->keybit);
+ set_bit(BTN_EAST, input_dev->keybit);
+ set_bit(BTN_WEST, input_dev->keybit);
+ set_bit(BTN_SELECT, input_dev->keybit);
+ set_bit(BTN_START, input_dev->keybit);
+ set_bit(BTN_MODE, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, -127, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -127, 127, 0, 0);
+
+ udraw->joy_input_dev = input_dev;
+
+ return true;
+}
+
+static int udraw_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct udraw *udraw;
+ int ret;
+
+ udraw = devm_kzalloc(&hdev->dev, sizeof(struct udraw), GFP_KERNEL);
+ if (!udraw)
+ return -ENOMEM;
+
+ udraw->hdev = hdev;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+
+ hid_set_drvdata(hdev, udraw);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (!udraw_setup_joypad(udraw, hdev) ||
+ !udraw_setup_touch(udraw, hdev) ||
+ !udraw_setup_pen(udraw, hdev) ||
+ !udraw_setup_accel(udraw, hdev)) {
+ hid_err(hdev, "could not allocate interfaces\n");
+ return -ENOMEM;
+ }
+
+ ret = input_register_device(udraw->joy_input_dev) ||
+ input_register_device(udraw->touch_input_dev) ||
+ input_register_device(udraw->pen_input_dev) ||
+ input_register_device(udraw->accel_input_dev);
+ if (ret) {
+ hid_err(hdev, "failed to register interfaces\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW | HID_CONNECT_DRIVER);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id udraw_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, udraw_devices);
+
+static struct hid_driver udraw_driver = {
+ .name = "hid-udraw",
+ .id_table = udraw_devices,
+ .raw_event = udraw_raw_event,
+ .probe = udraw_probe,
+};
+module_hid_driver(udraw_driver);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b3ec4f2de875..78fb32a7b103 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
@@ -37,10 +38,15 @@
#include <linux/mutex.h>
#include <linux/acpi.h>
#include <linux/of.h>
-#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include <linux/i2c/i2c-hid.h>
+#include "../hid-ids.h"
+
+/* quirks to control the device */
+#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
@@ -143,10 +149,9 @@ struct i2c_hid {
char *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
+ unsigned long quirks; /* Various quirks */
wait_queue_head_t wait; /* For waiting the interrupt */
- struct gpio_desc *desc;
- int irq;
struct i2c_hid_platform_data pdata;
@@ -154,6 +159,39 @@ struct i2c_hid {
struct mutex reset_lock;
};
+static const struct i2c_hid_quirks {
+ __u16 idVendor;
+ __u16 idProduct;
+ __u32 quirks;
+} i2c_hid_quirks[] = {
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { 0, 0 }
+};
+
+/*
+ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
+ * @idVendor: the 16-bit vendor ID
+ * @idProduct: the 16-bit product ID
+ *
+ * Returns: a u32 quirks value.
+ */
+static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+{
+ u32 quirks = 0;
+ int n;
+
+ for (n = 0; i2c_hid_quirks[n].idVendor; n++)
+ if (i2c_hid_quirks[n].idVendor == idVendor &&
+ (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
+ i2c_hid_quirks[n].idProduct == idProduct))
+ quirks = i2c_hid_quirks[n].quirks;
+
+ return quirks;
+}
+
static int __i2c_hid_command(struct i2c_client *client,
const struct i2c_hid_cmd *command, u8 reportID,
u8 reportType, u8 *args, int args_len,
@@ -346,11 +384,27 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
i2c_hid_dbg(ihid, "%s\n", __func__);
+ /*
+ * Some devices require to send a command to wakeup before power on.
+ * The call will get a return value (EREMOTEIO) but device will be
+ * triggered and activated. After that, it goes like a normal device.
+ */
+ if (power_state == I2C_HID_PWR_ON &&
+ ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+ ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
+
+ /* Device was already activated */
+ if (!ret)
+ goto set_pwr_exit;
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
+set_pwr_exit:
return ret;
}
@@ -716,9 +770,11 @@ static int i2c_hid_start(struct hid_device *hid)
i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
if (bufsize > ihid->bufsize) {
+ disable_irq(client->irq);
i2c_hid_free_buffers(ihid);
ret = i2c_hid_alloc_buffers(ihid, bufsize);
+ enable_irq(client->irq);
if (ret)
return ret;
@@ -806,18 +862,21 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
static int i2c_hid_init_irq(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
+ unsigned long irqflags = 0;
int ret;
- dev_dbg(&client->dev, "Requesting IRQ: %d\n", ihid->irq);
+ dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
- ret = request_threaded_irq(ihid->irq, NULL, i2c_hid_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- client->name, ihid);
+ if (!irq_get_trigger_type(client->irq))
+ irqflags = IRQF_TRIGGER_LOW;
+
+ ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+ irqflags | IRQF_ONESHOT, client->name, ihid);
if (ret < 0) {
dev_warn(&client->dev,
"Could not register for %s interrupt, irq = %d,"
" ret = %d\n",
- client->name, ihid->irq, ret);
+ client->name, client->irq, ret);
return ret;
}
@@ -864,14 +923,6 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
}
#ifdef CONFIG_ACPI
-
-/* Default GPIO mapping */
-static const struct acpi_gpio_params i2c_hid_irq_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping i2c_hid_acpi_gpios[] = {
- { "gpios", &i2c_hid_irq_gpio, 1 },
- { },
-};
-
static int i2c_hid_acpi_pdata(struct i2c_client *client,
struct i2c_hid_platform_data *pdata)
{
@@ -882,7 +933,6 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
union acpi_object *obj;
struct acpi_device *adev;
acpi_handle handle;
- int ret;
handle = ACPI_HANDLE(&client->dev);
if (!handle || acpi_bus_get_device(handle, &adev))
@@ -898,9 +948,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
pdata->hid_descriptor_address = obj->integer.value;
ACPI_FREE(obj);
- /* GPIOs are optional */
- ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
- return ret < 0 && ret != -ENXIO ? ret : 0;
+ return 0;
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
@@ -964,6 +1012,19 @@ static int i2c_hid_probe(struct i2c_client *client,
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "HID over i2c has not been provided an Int IRQ\n");
+ return -EINVAL;
+ }
+
+ if (client->irq < 0) {
+ if (client->irq != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "HID over i2c doesn't have a valid IRQ\n");
+ return client->irq;
+ }
+
ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
if (!ihid)
return -ENOMEM;
@@ -983,23 +1044,6 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
- if (client->irq > 0) {
- ihid->irq = client->irq;
- } else if (ACPI_COMPANION(&client->dev)) {
- ihid->desc = gpiod_get(&client->dev, NULL, GPIOD_IN);
- if (IS_ERR(ihid->desc)) {
- dev_err(&client->dev, "Failed to get GPIO interrupt\n");
- return PTR_ERR(ihid->desc);
- }
-
- ihid->irq = gpiod_to_irq(ihid->desc);
- if (ihid->irq < 0) {
- gpiod_put(ihid->desc);
- dev_err(&client->dev, "Failed to convert GPIO to IRQ\n");
- return ihid->irq;
- }
- }
-
i2c_set_clientdata(client, ihid);
ihid->client = client;
@@ -1050,6 +1094,8 @@ static int i2c_hid_probe(struct i2c_client *client,
client->name, hid->vendor, hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1064,16 +1110,13 @@ err_mem_free:
hid_destroy_device(hid);
err_irq:
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
err_pm:
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
err:
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
i2c_hid_free_buffers(ihid);
kfree(ihid);
return ret;
@@ -1092,18 +1135,13 @@ static int i2c_hid_remove(struct i2c_client *client)
hid = ihid->hid;
hid_destroy_device(hid);
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
kfree(ihid);
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
-
return 0;
}
@@ -1142,11 +1180,11 @@ static int i2c_hid_suspend(struct device *dev)
/* Save some power */
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
}
if (device_may_wakeup(&client->dev)) {
- wake_status = enable_irq_wake(ihid->irq);
+ wake_status = enable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = true;
else
@@ -1166,7 +1204,7 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
- wake_status = disable_irq_wake(ihid->irq);
+ wake_status = disable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = false;
else
@@ -1179,7 +1217,7 @@ static int i2c_hid_resume(struct device *dev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
ret = i2c_hid_hwreset(client);
if (ret)
return ret;
@@ -1197,19 +1235,17 @@ static int i2c_hid_resume(struct device *dev)
static int i2c_hid_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
return 0;
}
static int i2c_hid_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
i2c_hid_set_power(client, I2C_HID_PWR_ON);
return 0;
}
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 0c9ac4d5d850..842d8416a7a6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include "client.h"
#include "hw-ish.h"
-#include "utils.h"
#include "hbm.h"
/* For FW reset flow */
@@ -310,6 +309,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
((uint32_t)tv_utc.tv_usec);
ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
ts_format.ts2_source = HOST_UTC_TIME_USEC;
+ ts_format.reserved = 0;
time_update.primary_host_time = usec_system;
time_update.secondary_host_time = usec_utc;
@@ -427,6 +427,59 @@ static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
sizeof(uint32_t) + size);
}
+#define WAIT_FOR_FW_RDY 0x1
+#define WAIT_FOR_INPUT_RDY 0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+ unsigned int timeinc, unsigned int timeout)
+{
+ bool complete = false;
+ int ret;
+
+ do {
+ if (condition == WAIT_FOR_FW_RDY) {
+ complete = ishtp_fw_is_ready(dev);
+ } else if (condition == WAIT_FOR_INPUT_RDY) {
+ complete = ish_is_input_ready(dev);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!complete) {
+ unsigned long left_time;
+
+ left_time = msleep_interruptible(timeinc);
+ timeout -= (timeinc - left_time);
+ }
+ } while (!complete && timeout > 0);
+
+ if (complete)
+ ret = 0;
+ else
+ ret = -EBUSY;
+
+out:
+ return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS 100
+#define TIME_SLICE_FOR_INPUT_RDY_MS 100
+#define TIMEOUT_FOR_FW_RDY_MS 2000
+#define TIMEOUT_FOR_INPUT_RDY_MS 2000
+
/**
* ish_fw_reset_handler() - FW reset handler
* @dev: ishtp device pointer
@@ -456,8 +509,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
ishtp_reset_handler(dev);
if (!ish_is_input_ready(dev))
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE,
- ish_is_input_ready(dev), (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
@@ -472,8 +525,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
sizeof(uint32_t));
/* Wait for ISH FW'es ILUP and ISHTP_READY */
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE, ishtp_fw_is_ready(dev),
- (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
if (!ishtp_fw_is_ready(dev)) {
/* ISH FW is dead */
uint32_t ish_status;
@@ -487,6 +540,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
return 0;
}
+#define TIMEOUT_FOR_HW_RDY_MS 300
+
/**
* ish_fw_reset_work_fn() - FW reset worker function
* @unused: not used
@@ -500,7 +555,7 @@ static void fw_reset_work_fn(struct work_struct *unused)
rv = ish_fw_reset_handler(ishtp_dev);
if (!rv) {
/* ISH is ILUP & ISHTP-ready. Restart ISHTP */
- schedule_timeout(HZ / 3);
+ msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
ishtp_dev->recvd_hw_ready = 1;
wake_up_interruptible(&ishtp_dev->wait_hw_ready);
diff --git a/drivers/hid/intel-ish-hid/ipc/utils.h b/drivers/hid/intel-ish-hid/ipc/utils.h
deleted file mode 100644
index 5a82123dc7b4..000000000000
--- a/drivers/hid/intel-ish-hid/ipc/utils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Utility macros of ISH
- *
- * Copyright (c) 2014-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-#ifndef UTILS__H
-#define UTILS__H
-
-#define WAIT_FOR_SEND_SLICE (HZ / 10)
-#define WAIT_FOR_CONNECT_SLICE (HZ / 10)
-
-/*
- * Waits for specified event when a thread that triggers event can't signal
- * Also, waits *at_least* `timeinc` after condition is satisfied
- */
-#define timed_wait_for(timeinc, condition) \
- do { \
- int completed = 0; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- completed = (condition); \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- } while (!(completed)); \
- } while (0)
-
-
-/*
- * Waits for specified event when a thread that triggers event
- * can't signal with timeout (use whenever we may hang)
- */
-#define timed_wait_for_timeout(timeinc, condition, timeout) \
- do { \
- int t = timeout; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- t -= timeinc; \
- if (t <= 0) \
- break; \
- } while (!(condition)); \
- } while (0)
-
-#endif /* UTILS__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 256521509d20..f4cbc744e657 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -585,14 +585,7 @@ int ishtp_bus_new_client(struct ishtp_device *dev)
*/
i = dev->fw_client_presentation_num - 1;
device_uuid = dev->fw_clients[i].props.protocol_name;
- dev_name = kasprintf(GFP_KERNEL,
- "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
- device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
- device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
- device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
- device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
- device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
- device_uuid.b[15]);
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", device_uuid.b);
if (!dev_name)
return -ENOMEM;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 74bffee60774..59460b66e689 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -378,11 +378,10 @@ static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
list_for_each_entry(cl, &dev->cl_list, link) {
if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
@@ -431,11 +430,10 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
cl->state = ISHTP_CL_DISCONNECTED;
cl->status = -ENODEV;
}
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ae83af649a60..333108ef18cf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1459,7 +1459,7 @@ static int hid_post_reset(struct usb_interface *intf)
rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL);
if (!rdesc) {
dbg_hid("couldn't allocate rdesc memory (post_reset)\n");
- return 1;
+ return -ENOMEM;
}
status = hid_get_class_descriptor(dev,
interface->desc.bInterfaceNumber,
@@ -1467,13 +1467,13 @@ static int hid_post_reset(struct usb_interface *intf)
if (status < 0) {
dbg_hid("reading report descriptor failed (post_reset)\n");
kfree(rdesc);
- return 1;
+ return status;
}
status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize);
kfree(rdesc);
if (status != 0) {
dbg_hid("report descriptor changed\n");
- return 1;
+ return -EPERM;
}
/* No need to do another reset or clear a halted endpoint */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e6cfd323babc..b3e01c82af05 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -82,6 +82,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -101,8 +103,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index b4800ea891cb..d303e413306d 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -210,7 +210,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
void wacom_wac_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage);
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value);
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
void wacom_battery_work(struct work_struct *work);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a5648e708..b9779bcbd140 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -122,6 +122,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
u8 *data;
int ret;
+ int n;
switch (usage->hid) {
case HID_DG_CONTACTMAX:
@@ -159,22 +160,48 @@ static void wacom_feature_mapping(struct hid_device *hdev,
case HID_UP_DIGITIZER:
if (field->report->id == 0x0B &&
- (field->application == WACOM_G9_DIGITIZER ||
- field->application == WACOM_G11_DIGITIZER)) {
+ (field->application == WACOM_HID_G9_PEN ||
+ field->application == WACOM_HID_G11_PEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
- case WACOM_G9_PAGE:
- case WACOM_G11_PAGE:
+ case WACOM_HID_WD_DATAMODE:
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ break;
+
+ case WACOM_HID_UP_G9:
+ case WACOM_HID_UP_G11:
if (field->report->id == 0x03 &&
- (field->application == WACOM_G9_TOUCHSCREEN ||
- field->application == WACOM_G11_TOUCHSCREEN)) {
+ (field->application == WACOM_HID_G9_TOUCHSCREEN ||
+ field->application == WACOM_HID_G11_TOUCHSCREEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
+ case WACOM_HID_WD_OFFSETLEFT:
+ case WACOM_HID_WD_OFFSETTOP:
+ case WACOM_HID_WD_OFFSETRIGHT:
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ /* read manually */
+ n = hid_report_len(field->report);
+ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+ if (!data)
+ break;
+ data[0] = field->report->id;
+ ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+ data, n, WAC_CMD_RETRIES);
+ if (ret == n) {
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT,
+ data, n, 0);
+ } else {
+ hid_warn(hdev, "%s: could not retrieve sensor offsets\n",
+ __func__);
+ }
+ kfree(data);
+ break;
}
}
@@ -240,6 +267,30 @@ static void wacom_usage_mapping(struct hid_device *hdev,
features->touch_max = 1;
}
+ /*
+ * ISDv4 devices which predate HID's adoption of the
+ * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+ * position instead. We can accurately detect if a
+ * usage with that value should be HID_DG_BARRELSWITCH2
+ * based on the surrounding usages, which have remained
+ * constant across generations.
+ */
+ if (features->type == HID_GENERIC &&
+ usage->hid == 0x000D0000 &&
+ field->application == HID_DG_PEN &&
+ field->physical == HID_DG_STYLUS) {
+ int i = usage->usage_index;
+
+ if (i-4 >= 0 && i+1 < field->maxusage &&
+ field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+ field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+ field->usage[i-2].hid == HID_DG_ERASER &&
+ field->usage[i-1].hid == HID_DG_INVERT &&
+ field->usage[i+1].hid == HID_DG_INRANGE) {
+ usage->hid = HID_DG_BARRELSWITCH2;
+ }
+ }
+
switch (usage->hid) {
case HID_GD_X:
features->x_max = field->logical_maximum;
@@ -689,11 +740,6 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
-
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -1916,6 +1962,19 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
/* shift everything including the terminator */
memmove(gap, gap+1, strlen(gap));
}
+
+ /* strip off excessive prefixing */
+ if (strstr(name, "Wacom Co.,Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co.,Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+ if (strstr(name, "Wacom Co., Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co., Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+
/* get rid of trailing whitespace */
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
@@ -1977,6 +2036,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
+
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2017,9 +2080,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1cb79925730d..b1a9a3ca6d56 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)");
static void wacom_report_numbered_buttons(struct input_dev *input_dev,
int button_count, int mask);
+static int wacom_numbered_button_to_key(int n);
+
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -588,6 +590,11 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
return 1;
}
+static int wacom_intuos_id_mangle(int tool_id)
+{
+ return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
int tool_type;
@@ -595,7 +602,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
switch (tool_id) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
- case 0x120802: /* Intuos4/5 Inking Pen */
+ case 0x12802: /* Intuos4/5 Inking Pen */
case 0x012:
tool_type = BTN_TOOL_PENCIL;
break;
@@ -610,11 +617,11 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
- case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x160802: /* Cintiq 13HD Pro Pen */
- case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
tool_type = BTN_TOOL_PEN;
break;
@@ -638,6 +645,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
break;
case 0x82a: /* Eraser */
+ case 0x84a:
case 0x85a:
case 0x91a:
case 0xd1a:
@@ -648,12 +656,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x1880a: /* DTH2242 Eraser */
+ case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
tool_type = BTN_TOOL_RUBBER;
break;
@@ -662,7 +670,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x112:
case 0x913: /* Intuos3 Airbrush */
case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
- case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
@@ -693,7 +701,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
- ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
+ ((data[7] & 0x0f) << 16) | ((data[8] & 0xf0) << 8);
wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
@@ -923,7 +931,7 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
* don't report events for invalid data
*/
/* older I4 styli don't work with new Cintiqs */
- if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ if ((!((wacom->id[idx] >> 16) & 0x01) &&
(features->type == WACOM_21UX2)) ||
/* Only large Intuos support Lense Cursor */
(wacom->tool[idx] == BTN_TOOL_LENS &&
@@ -1059,7 +1067,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
break;
}
- input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_abs(input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[idx])); /* report tool id */
input_report_key(input, wacom->tool[idx], 1);
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->reporting_data = true;
@@ -1435,11 +1444,59 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_equivalent_usage(int usage)
+{
+ if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
+ int subpage = (usage & 0xFF00) << 8;
+ int subusage = (usage & 0xFF);
+
+ if (subpage == WACOM_HID_SP_PAD ||
+ subpage == WACOM_HID_SP_BUTTON ||
+ subpage == WACOM_HID_SP_DIGITIZER ||
+ subpage == WACOM_HID_SP_DIGITIZERINFO ||
+ usage == WACOM_HID_WD_SENSE ||
+ usage == WACOM_HID_WD_SERIALHI ||
+ usage == WACOM_HID_WD_TOOLTYPE ||
+ usage == WACOM_HID_WD_DISTANCE ||
+ usage == WACOM_HID_WD_TOUCHSTRIP ||
+ usage == WACOM_HID_WD_TOUCHSTRIP2 ||
+ usage == WACOM_HID_WD_TOUCHRING ||
+ usage == WACOM_HID_WD_TOUCHRINGSTATUS) {
+ return usage;
+ }
+
+ if (subpage == HID_UP_UNDEFINED)
+ subpage = HID_UP_DIGITIZER;
+
+ return subpage | subusage;
+ }
+
+ return usage;
+}
+
static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
struct hid_field *field, __u8 type, __u16 code, int fuzz)
{
+ struct wacom *wacom = input_get_drvdata(input);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
int fmin = field->logical_minimum;
int fmax = field->logical_maximum;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int resolution_code = code;
+
+ if (equivalent_usage == HID_DG_TWIST) {
+ resolution_code = ABS_RZ;
+ }
+
+ if (equivalent_usage == HID_GD_X) {
+ fmin += features->offset_left;
+ fmax -= features->offset_right;
+ }
+ if (equivalent_usage == HID_GD_Y) {
+ fmin += features->offset_top;
+ fmax -= features->offset_bottom;
+ }
usage->type = type;
usage->code = code;
@@ -1450,7 +1507,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
input_abs_set_res(input, code,
- hidinput_calc_abs_res(field, code));
+ hidinput_calc_abs_res(field, resolution_code));
break;
case EV_KEY:
input_set_capability(input, EV_KEY, code);
@@ -1458,6 +1515,172 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_MSC:
input_set_capability(input, EV_MSC, code);
break;
+ case EV_SW:
+ input_set_capability(input, EV_SW, code);
+ break;
+ }
+}
+
+static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_X:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Y:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Z:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_BUTTONHOME:
+ case WACOM_HID_WD_BUTTONUP:
+ case WACOM_HID_WD_BUTTONDOWN:
+ case WACOM_HID_WD_BUTTONLEFT:
+ case WACOM_HID_WD_BUTTONRIGHT:
+ case WACOM_HID_WD_BUTTONCENTER:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHONOFF:
+ wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP2:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RY, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHRING:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+
+ switch (equivalent_usage & 0xfffffff0) {
+ case WACOM_HID_WD_EXPRESSKEY00:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+}
+
+static void wacom_wac_pad_battery_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ wacom_wac->hid_data.bat_charging = value;
+ wacom_wac->hid_data.ps_connected = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+ }
+}
+
+static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->pad_input;
+ struct wacom_features *features = &wacom_wac->features;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ wacom_wac->hid_data.inrange_state |= value;
+ }
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+ break;
+
+ default:
+ features->input_event_flag = true;
+ input_event(input, usage->type, usage->code, value);
+ break;
+ }
+}
+
+static void wacom_wac_pad_pre_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ wacom_wac->hid_data.inrange_state = 0;
+}
+
+static void wacom_wac_pad_battery_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (features->quirks & WACOM_QUIRK_BATTERY) {
+ int capacity = wacom_wac->hid_data.battery_capacity;
+ bool charging = wacom_wac->hid_data.bat_charging;
+ bool connected = wacom_wac->hid_data.bat_connected;
+ bool powered = wacom_wac->hid_data.ps_connected;
+
+ wacom_notify_battery(wacom_wac, capacity, charging,
+ connected, powered);
+ }
+}
+
+static void wacom_wac_pad_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ bool active = wacom_wac->hid_data.inrange_state != 0;
+
+ /* report prox for expresskey events */
+ if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ features->input_event_flag = true;
+ input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+ }
+
+ if (features->input_event_flag) {
+ features->input_event_flag = false;
+ input_sync(input);
}
}
@@ -1466,25 +1689,43 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
break;
case HID_GD_Y:
wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
break;
+ case WACOM_HID_WD_DISTANCE:
+ case HID_GD_Z:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_DISTANCE, 0);
+ break;
case HID_DG_TIPPRESSURE:
wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0);
break;
case HID_DG_INRANGE:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
+ case HID_DG_BATTERYSTRENGTH:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
case HID_DG_INVERT:
wacom_map_usage(input, usage, field, EV_KEY,
BTN_TOOL_RUBBER, 0);
break;
+ case HID_DG_TILT_X:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_X, 0);
+ break;
+ case HID_DG_TILT_Y:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_Y, 0);
+ break;
+ case HID_DG_TWIST:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ break;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
@@ -1498,39 +1739,131 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
case HID_DG_TOOLSERIALNUMBER:
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
break;
+ case WACOM_HID_WD_SENSE:
+ features->quirks |= WACOM_QUIRK_SENSE;
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+ break;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ break;
+ case WACOM_HID_WD_FINGERWHEEL:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ break;
}
}
-static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
+static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- /* checking which Tool / tip switch to send */
- switch (usage->hid) {
+ switch (equivalent_usage) {
+ case HID_GD_Z:
+ /*
+ * HID_GD_Z "should increase as the control's position is
+ * moved from high to low", while ABS_DISTANCE instead
+ * increases in value as the tool moves from low to high.
+ */
+ value = field->logical_maximum - value;
+ break;
case HID_DG_INRANGE:
wacom_wac->hid_data.inrange_state = value;
- return 0;
+ if (!(features->quirks & WACOM_QUIRK_SENSE))
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case HID_DG_BATTERYSTRENGTH:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
case HID_DG_INVERT:
wacom_wac->hid_data.invert_state = value;
- return 0;
+ return;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
- return 0;
+ return;
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+ wacom_wac->serial[0] |= value;
+ return;
+ case WACOM_HID_WD_SENSE:
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ /*
+ * Non-USI EMR devices may contain additional tool type
+ * information here. See WACOM_HID_WD_TOOLTYPE case for
+ * more details.
+ */
+ if (value >> 20 == 1) {
+ wacom_wac->id[0] |= value & 0xFFFFF;
+ }
+ return;
+ case WACOM_HID_WD_TOOLTYPE:
+ /*
+ * Some devices (MobileStudio Pro, and possibly later
+ * devices as well) do not return the complete tool
+ * type in their WACOM_HID_WD_TOOLTYPE usage. Use a
+ * bitwise OR so the complete value can be built
+ * up over time :(
+ */
+ wacom_wac->id[0] |= value;
+ return;
+ case WACOM_HID_WD_OFFSETLEFT:
+ if (features->offset_left && value != features->offset_left)
+ hid_warn(hdev, "%s: overriding exising left offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_left);
+ features->offset_left = value;
+ return;
+ case WACOM_HID_WD_OFFSETRIGHT:
+ if (features->offset_right && value != features->offset_right)
+ hid_warn(hdev, "%s: overriding exising right offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_right);
+ features->offset_right = value;
+ return;
+ case WACOM_HID_WD_OFFSETTOP:
+ if (features->offset_top && value != features->offset_top)
+ hid_warn(hdev, "%s: overriding exising top offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_top);
+ features->offset_top = value;
+ return;
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ if (features->offset_bottom && value != features->offset_bottom)
+ hid_warn(hdev, "%s: overriding exising bottom offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_bottom);
+ features->offset_bottom = value;
+ return;
}
/* send pen events only when touch is up or forced out
* or touch arbitration is off
*/
if (!usage->type || delay_pen_events(wacom_wac))
- return 0;
+ return;
- input_event(input, usage->type, usage->code, value);
+ /* send pen events only when the pen is in/entering/leaving proximity */
+ if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
+ return;
- return 0;
+ input_event(input, usage->type, usage->code, value);
}
static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -1546,24 +1879,53 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->pen_input;
bool prox = wacom_wac->hid_data.inrange_state;
+ bool range = wacom_wac->hid_data.sense_state;
- if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
+ if (!wacom_wac->tool[0] && prox) { /* first in prox */
/* Going into proximity select tool */
- wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ?
- BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom_wac->hid_data.invert_state)
+ wacom_wac->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom_wac->id[0])
+ wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
+ else
+ wacom_wac->tool[0] = BTN_TOOL_PEN;
+ }
/* keep pen state for touch events */
- wacom_wac->shared->stylus_in_proximity = prox;
+ wacom_wac->shared->stylus_in_proximity = range;
- if (!delay_pen_events(wacom_wac)) {
+ if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
+ int id = wacom_wac->id[0];
+
+ /*
+ * Non-USI EMR tools should have their IDs mangled to
+ * match the legacy behavior of wacom_intuos_general
+ */
+ if (wacom_wac->serial[0] >> 52 == 1)
+ id = wacom_intuos_id_mangle(id);
+
+ /*
+ * To ensure compatibility with xf86-input-wacom, we should
+ * report the BTN_TOOL_* event prior to the ABS_MISC or
+ * MSC_SERIAL events.
+ */
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], prox);
+ if (wacom_wac->serial[0]) {
+ input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ input_report_abs(input, ABS_MISC, id);
+ }
wacom_wac->hid_data.tipswitch = false;
input_sync(input);
}
+
+ if (!prox) {
+ wacom_wac->tool[0] = 0;
+ wacom_wac->id[0] = 0;
+ }
}
static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
@@ -1573,8 +1935,9 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
if (touch_max == 1)
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
@@ -1644,13 +2007,14 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
-static int wacom_wac_finger_event(struct hid_device *hdev,
+static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_wac->hid_data.x = value;
break;
@@ -1673,11 +2037,9 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
if (usage->usage_index + 1 == field->report_count) {
- if (usage->hid == wacom_wac->hid_data.last_slot_field)
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
-
- return 0;
}
static void wacom_wac_finger_pre_report(struct hid_device *hdev,
@@ -1762,28 +2124,30 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
/* currently, only direct devices have proper hid report descriptors */
features->device_type |= WACOM_DEVICETYPE_DIRECT;
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_usage_mapping(hdev, field, usage);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_usage_mapping(hdev, field, usage);
+ if (WACOM_PAD_FIELD(field))
+ wacom_wac_pad_usage_mapping(hdev, field, usage);
+ else if (WACOM_PEN_FIELD(field))
+ wacom_wac_pen_usage_mapping(hdev, field, usage);
+ else if (WACOM_FINGER_FIELD(field))
+ wacom_wac_finger_usage_mapping(hdev, field, usage);
}
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
if (wacom->wacom_wac.features.type != HID_GENERIC)
- return 0;
-
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_event(hdev, field, usage, value);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_event(hdev, field, usage, value);
+ return;
- return 0;
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_event(hdev, field, usage, value);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_event(hdev, field, usage, value);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_event(hdev, field, usage, value);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_event(hdev, field, usage, value);
}
static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
@@ -1814,19 +2178,23 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
if (wacom_wac->features.type != HID_GENERIC)
return;
- if (WACOM_PEN_FIELD(field))
+ if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ wacom_wac_pad_pre_report(hdev, report);
+ else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_pre_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_pre_report(hdev, report);
wacom_report_events(hdev, report);
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_report(hdev, report);
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_report(hdev, report);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_report(hdev, report);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_report(hdev, report);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_report(hdev, report);
}
static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -2399,6 +2767,8 @@ void wacom_setup_device_quirks(struct wacom *wacom)
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
+ if (features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
features->type == DTUS ||
(features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2448,7 +2818,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
/*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
- * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+ * (i.e., WACOM_HID_WD_DIGITIZER). We route pen packets back
* out through the HID_GENERIC device created for interface 1,
* so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
*/
@@ -2530,10 +2900,12 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
- input_set_abs_params(input_dev, ABS_X, features->x_min,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, features->y_min,
- features->y_max, features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, 0 + features->offset_left,
+ features->x_max - features->offset_right,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0 + features->offset_top,
+ features->y_max - features->offset_bottom,
+ features->y_fuzz, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0,
features->pressure_max, features->pressure_fuzz, 0);
@@ -2769,17 +3141,29 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
return 0;
}
+static int wacom_numbered_button_to_key(int n)
+{
+ if (n < 10)
+ return BTN_0 + n;
+ else if (n < 16)
+ return BTN_A + (n-10);
+ else if (n < 18)
+ return BTN_BASE + (n-16);
+ else
+ return 0;
+}
+
static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
int button_count)
{
int i;
- for (i = 0; i < button_count && i < 10; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
- for (i = 10; i < button_count && i < 16; i++)
- __set_bit(BTN_A + (i-10), input_dev->keybit);
- for (i = 16; i < button_count && i < 18; i++)
- __set_bit(BTN_BASE + (i-16), input_dev->keybit);
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ __set_bit(key, input_dev->keybit);
+ }
}
static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
@@ -2881,12 +3265,12 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
for (i = 0; i < wacom->led.count; i++)
wacom_update_led(wacom, button_count, mask, i);
- for (i = 0; i < button_count && i < 10; i++)
- input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
- for (i = 10; i < button_count && i < 16; i++)
- input_report_key(input_dev, BTN_A + (i-10), mask & (1 << i));
- for (i = 16; i < button_count && i < 18; i++)
- input_report_key(input_dev, BTN_BASE + (i-16), mask & (1 << i));
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ input_report_key(input_dev, key, mask & (1 << i));
+ }
}
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
@@ -2906,8 +3290,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
__set_bit(ABS_MISC, input_dev->absbit);
/* kept for making legacy xf86-input-wacom accepting the pad */
- input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_X].minimum ||
+ input_dev->absinfo[ABS_X].maximum)))
+ input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_Y].minimum ||
+ input_dev->absinfo[ABS_Y].maximum)))
+ input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
/* kept for making udev and libwacom accepting the pad */
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -3027,6 +3415,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
+ case HID_GENERIC:
+ break;
+
default:
/* no pad supported */
return -ENODEV;
@@ -3233,26 +3624,30 @@ static const struct wacom_features wacom_features_0x317 =
INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
+ { "Wacom Cintiq 24HD", 104480, 65600, 2047, 63,
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
+ { "Wacom Cintiq 24HD touch", 104480, 65600, 2047, 63, /* Pen */
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x32A =
- { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x32B =
- { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD touch", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
static const struct wacom_features wacom_features_0x32C =
{ "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT,
@@ -3267,13 +3662,15 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
+ { "Wacom Cintiq 13HD", 59552, 33848, 1023, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x333 =
- { "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
+ { "Wacom Cintiq 13HD touch", 59552, 33848, 2047, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
static const struct wacom_features wacom_features_0x335 =
{ "Wacom Cintiq 13HD touch", .type = WACOM_24HDT, /* Touch */
@@ -3290,42 +3687,50 @@ static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", 34623, 19553, 511, 0,
DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
- { "Wacom DTU1031", 21896, 13760, 511, 0,
+ { "Wacom DTU1031", 22096, 13960, 511, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x32F =
- { "Wacom DTU1031X", 22472, 12728, 511, 0,
+ { "Wacom DTU1031X", 22672, 12928, 511, 0,
DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x336 =
- { "Wacom DTU1141", 23472, 13203, 1023, 0,
+ { "Wacom DTU1141", 23672, 13403, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x57 =
- { "Wacom DTK2241", 95640, 54060, 2047, 63,
+ { "Wacom DTK2241", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x59 = /* Pen */
- { "Wacom DTH2242", 95640, 54060, 2047, 63,
+ { "Wacom DTH2242", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
+ { "Wacom Cintiq 21UX2", 87200, 65600, 2047, 63,
WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HD", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HDT", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -3469,18 +3874,20 @@ static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", 12800, 8000, 255, 0,
TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x307 =
- { "Wacom ISDv5 307", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 307", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30A =
- { "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 30A", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
static const struct wacom_features wacom_features_0x30C =
{ "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
@@ -3496,6 +3903,7 @@ static const struct wacom_features wacom_features_0x325 =
{ "Wacom ISDv5 325", 59552, 33848, 2047, 63,
CINTIQ_COMPANION_2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 11,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x326 };
static const struct wacom_features wacom_features_0x326 = /* Touch */
{ "Wacom ISDv5 326", .type = HID_GENERIC, .oVid = USB_VENDOR_ID_WACOM,
@@ -3525,8 +3933,9 @@ static const struct wacom_features wacom_features_0x33E =
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x343 =
- { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ { "Wacom DTK1651", 34816, 19759, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 324c40b0c119..fb0e50acb10d 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -74,6 +74,7 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
+#define WACOM_QUIRK_SENSE 0x0002
#define WACOM_QUIRK_BATTERY 0x0008
/* device types */
@@ -84,23 +85,66 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_DEVICETYPE_DIRECT 0x0010
-#define WACOM_VENDORDEFINED_PEN 0xff0d0001
-#define WACOM_G9_PAGE 0xff090000
-#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
-#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
-#define WACOM_G11_PAGE 0xff110000
-#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
-#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
+#define WACOM_HID_UP_WACOMDIGITIZER 0xff0d0000
+#define WACOM_HID_SP_PAD 0x00040000
+#define WACOM_HID_SP_BUTTON 0x00090000
+#define WACOM_HID_SP_DIGITIZER 0x000d0000
+#define WACOM_HID_SP_DIGITIZERINFO 0x00100000
+#define WACOM_HID_WD_DIGITIZER (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
+#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
+#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
+#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
+#define WACOM_HID_WD_TOUCHSTRIP (WACOM_HID_UP_WACOMDIGITIZER | 0x0136)
+#define WACOM_HID_WD_TOUCHSTRIP2 (WACOM_HID_UP_WACOMDIGITIZER | 0x0137)
+#define WACOM_HID_WD_TOUCHRING (WACOM_HID_UP_WACOMDIGITIZER | 0x0138)
+#define WACOM_HID_WD_TOUCHRINGSTATUS (WACOM_HID_UP_WACOMDIGITIZER | 0x0139)
+#define WACOM_HID_WD_ACCELEROMETER_X (WACOM_HID_UP_WACOMDIGITIZER | 0x0401)
+#define WACOM_HID_WD_ACCELEROMETER_Y (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
+#define WACOM_HID_WD_ACCELEROMETER_Z (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
+#define WACOM_HID_WD_BATTERY_CHARGING (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
+#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
+#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
+#define WACOM_HID_WD_BUTTONHOME (WACOM_HID_UP_WACOMDIGITIZER | 0x0990)
+#define WACOM_HID_WD_BUTTONUP (WACOM_HID_UP_WACOMDIGITIZER | 0x0991)
+#define WACOM_HID_WD_BUTTONDOWN (WACOM_HID_UP_WACOMDIGITIZER | 0x0992)
+#define WACOM_HID_WD_BUTTONLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
+#define WACOM_HID_WD_BUTTONRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
+#define WACOM_HID_WD_BUTTONCENTER (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
+#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
+#define WACOM_HID_WD_FINGERWHEEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
+#define WACOM_HID_WD_OFFSETLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
+#define WACOM_HID_WD_OFFSETTOP (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
+#define WACOM_HID_WD_OFFSETRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d32)
+#define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+#define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+#define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_UP_G9 0xff090000
+#define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
+#define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
+#define WACOM_HID_UP_G11 0xff110000
+#define WACOM_HID_G11_PEN (WACOM_HID_UP_G11 | 0x02)
+#define WACOM_HID_G11_TOUCHSCREEN (WACOM_HID_UP_G11 | 0x11)
+
+#define WACOM_PAD_FIELD(f) (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERINFO))
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_PEN) || \
((f)->application == HID_DG_PEN) || \
((f)->application == HID_DG_DIGITIZER) || \
- ((f)->application == WACOM_VENDORDEFINED_PEN))
+ ((f)->application == WACOM_HID_WD_DIGITIZER) || \
+ ((f)->application == WACOM_HID_G9_PEN) || \
+ ((f)->application == WACOM_HID_G11_PEN))
#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
((f)->physical == HID_DG_FINGER) || \
- ((f)->application == HID_DG_TOUCHSCREEN))
+ ((f)->application == HID_DG_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
enum {
PENPARTNER = 0,
@@ -167,8 +211,10 @@ struct wacom_features {
int x_resolution;
int y_resolution;
int numbered_buttons;
- int x_min;
- int y_min;
+ int offset_left;
+ int offset_right;
+ int offset_top;
+ int offset_bottom;
int device_type;
int x_phy;
int y_phy;
@@ -186,6 +232,7 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
+ bool input_event_flag;
};
struct wacom_shared {
@@ -202,6 +249,7 @@ struct wacom_shared {
struct hid_data {
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
+ bool sense_state;
bool inrange_state;
bool invert_state;
bool tipswitch;
@@ -217,6 +265,10 @@ struct hid_data {
int last_slot_field;
int num_expected;
int num_received;
+ int battery_capacity;
+ int bat_charging;
+ int bat_connected;
+ int ps_connected;
};
struct wacom_remote_data {
@@ -234,7 +286,7 @@ struct wacom_wac {
unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
- __u32 serial[2];
+ __u64 serial[2];
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 6031cd146556..7ef819680acd 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -960,15 +960,6 @@ static int ssip_pn_stop(struct net_device *dev)
return 0;
}
-static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU)
- return -EINVAL;
- dev->mtu = new_mtu;
-
- return 0;
-}
-
static void ssip_xmit_work(struct work_struct *work)
{
struct ssi_protocol *ssi =
@@ -1060,7 +1051,6 @@ static const struct net_device_ops ssip_pn_ops = {
.ndo_open = ssip_pn_open,
.ndo_stop = ssip_pn_stop,
.ndo_start_xmit = ssip_pn_xmit,
- .ndo_change_mtu = ssip_pn_set_mtu,
};
static void ssip_pn_setup(struct net_device *dev)
@@ -1136,6 +1126,10 @@ static int ssi_protocol_probe(struct device *dev)
goto out1;
}
+ /* MTU range: 6 - 65535 */
+ ssi->netdev->min_mtu = PHONET_MIN_MTU;
+ ssi->netdev->max_mtu = SSIP_MAX_MTU;
+
SET_NETDEV_DEV(ssi->netdev, dev);
netif_carrier_off(ssi->netdev);
err = register_netdev(ssi->netdev);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 16f91c8490fe..5fb4c6d9209b 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -39,7 +39,7 @@
* vmbus_setevent- Trigger an event notification on the specified
* channel.
*/
-static void vmbus_setevent(struct vmbus_channel *channel)
+void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
@@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
vmbus_set_event(channel);
}
}
+EXPORT_SYMBOL_GPL(vmbus_setevent);
/*
* vmbus_open - Open the specified channel.
@@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
- int ret;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- * NOTE: in this case, the hvsock channel is an exception, because
- * it looks the host side's hvsock implementation has a throttling
- * mechanism which can hurt the performance otherwise.
- */
-
- if (((ret == 0) && kick_q && signal) ||
- (ret && !is_hvsock_channel(channel)))
- vmbus_setevent(channel);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs,
+ lock, kick_q);
- return ret;
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
@@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 flags,
bool kick_q)
{
- int ret;
int i;
struct vmbus_channel_packet_page_buffer desc;
u32 descsize;
@@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
@@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- */
-
- if (((ret == 0) && kick_q && signal) || (ret))
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, kick_q);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
@@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
u32 desc_size,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
@@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *multi_pagebuffer,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
struct vmbus_channel_packet_multipage_buffer desc;
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
@@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
bool raw)
{
- int ret;
- bool signal = false;
+ return hv_ringbuffer_read(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, raw);
- ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
- buffer_actual_len, requestid, &signal, raw);
-
- if (signal)
- vmbus_setevent(channel);
-
- return ret;
}
int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 96a85cd39580..26b419203f16 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -134,7 +134,7 @@ static const struct vmbus_device vmbus_devs[] = {
},
/* Unknown GUID */
- { .dev_type = HV_UNKOWN,
+ { .dev_type = HV_UNKNOWN,
.perf_device = false,
},
};
@@ -163,9 +163,9 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
u16 i;
if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
- return HV_UNKOWN;
+ return HV_UNKNOWN;
- for (i = HV_IDE; i < HV_UNKOWN; i++) {
+ for (i = HV_IDE; i < HV_UNKNOWN; i++) {
if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
return i;
}
@@ -389,6 +389,7 @@ void vmbus_free_channels(void)
{
struct vmbus_channel *channel, *tmp;
+ mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
listentry) {
/* hv_process_channel_removal() needs this */
@@ -396,6 +397,7 @@ void vmbus_free_channels(void)
vmbus_device_unregister(channel->device_obj);
}
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
/*
@@ -447,8 +449,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
}
dev_type = hv_get_dev_type(newchannel);
- if (dev_type == HV_NIC)
- set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
init_vp_index(newchannel, dev_type);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 78e6368a4423..6ce8b874e833 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -39,6 +39,7 @@ struct vmbus_connection vmbus_connection = {
.conn_state = DISCONNECTED,
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
};
+EXPORT_SYMBOL_GPL(vmbus_connection);
/*
* Negotiated protocol version with the host.
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 60dbd6cb4640..446802ae8f1b 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -575,7 +575,7 @@ void hv_synic_clockevents_cleanup(void)
if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
return;
- for_each_online_cpu(cpu)
+ for_each_present_cpu(cpu)
clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
}
@@ -594,8 +594,10 @@ void hv_synic_cleanup(void *arg)
return;
/* Turn off clockevent device */
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
+ clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
hv_ce_shutdown(hv_context.clk_evt[cpu]);
+ }
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index fdf8da929cbe..14c3dc4bd23c 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -564,6 +564,11 @@ struct hv_dynmem_device {
* next version to try.
*/
__u32 next_version;
+
+ /*
+ * The negotiated version agreed by host.
+ */
+ __u32 version;
};
static struct hv_dynmem_device dm_device;
@@ -645,6 +650,7 @@ static void hv_bring_pgs_online(struct hv_hotadd_state *has,
{
int i;
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
for (i = 0; i < size; i++)
hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
@@ -685,7 +691,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
(HA_CHUNK << PAGE_SHIFT));
if (ret) {
- pr_info("hot_add memory failed error is %d\n", ret);
+ pr_warn("hot_add memory failed error is %d\n", ret);
if (ret == -EEXIST) {
/*
* This error indicates that the error
@@ -814,6 +820,9 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long old_covered_state;
unsigned long res = 0, flags;
+ pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
+ pg_start);
+
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
@@ -1025,8 +1034,13 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
switch (info_hdr->type) {
case INFO_TYPE_MAX_PAGE_CNT:
- pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
- pr_info("Data Size is %d\n", info_hdr->data_size);
+ if (info_hdr->data_size == sizeof(__u64)) {
+ __u64 *max_page_count = (__u64 *)&info_hdr[1];
+
+ pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
+ *max_page_count);
+ }
+
break;
default:
pr_info("Received Unknown type: %d\n", info_hdr->type);
@@ -1196,8 +1210,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
return num_pages;
}
-
-
static void balloon_up(struct work_struct *dummy)
{
unsigned int num_pages = dm_device.balloon_wrk.num_pages;
@@ -1224,6 +1236,10 @@ static void balloon_up(struct work_struct *dummy)
/* Refuse to balloon below the floor, keep the 2M granularity. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+ pr_warn("Balloon request will be partially fulfilled. %s\n",
+ avail_pages < num_pages ? "Not enough memory." :
+ "Balloon floor reached.");
+
num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
num_pages -= num_pages % PAGES_IN_2M;
}
@@ -1245,6 +1261,9 @@ static void balloon_up(struct work_struct *dummy)
}
if (num_ballooned == 0 || num_ballooned == num_pages) {
+ pr_debug("Ballooned %u out of %u requested pages.\n",
+ num_pages, dm_device.balloon_wrk.num_pages);
+
bl_resp->more_pages = 0;
done = true;
dm_device.state = DM_INITIALIZED;
@@ -1292,12 +1311,16 @@ static void balloon_down(struct hv_dynmem_device *dm,
int range_count = req->range_count;
struct dm_unballoon_response resp;
int i;
+ unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
complete(&dm_device.config_event);
}
+ pr_debug("Freed %u ballooned pages.\n",
+ prev_pages_ballooned - dm->num_pages_ballooned);
+
if (req->more_pages == 1)
return;
@@ -1365,6 +1388,7 @@ static void version_resp(struct hv_dynmem_device *dm,
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = dm->next_version;
+ dm->version = version_req.version.version;
/*
* Set the next version to try in case current version fails.
@@ -1501,7 +1525,11 @@ static int balloon_probe(struct hv_device *dev,
struct dm_version_request version_req;
struct dm_capabilities cap_msg;
+#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
+#else
+ do_hot_add = false;
+#endif
/*
* First allocate a send buffer.
@@ -1553,6 +1581,7 @@ static int balloon_probe(struct hv_device *dev,
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
version_req.is_last_attempt = 0;
+ dm_device.version = version_req.version.version;
ret = vmbus_sendpacket(dev->channel, &version_req,
sizeof(struct dm_version_request),
@@ -1575,6 +1604,11 @@ static int balloon_probe(struct hv_device *dev,
ret = -ETIMEDOUT;
goto probe_error2;
}
+
+ pr_info("Using Dynamic Memory protocol version %u.%u\n",
+ DYNMEM_MAJOR_VERSION(dm_device.version),
+ DYNMEM_MINOR_VERSION(dm_device.version));
+
/*
* Now submit our capabilities to the host.
*/
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index a6707133c297..eee238cc60bd 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,7 +31,10 @@
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
-#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
+/*
+ * Timeout values are based on expecations from host
+ */
+#define VSS_FREEZE_TIMEOUT (15 * 60)
/*
* Global state maintained for transaction that is being processed. For a class
@@ -120,7 +123,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
default:
return -EINVAL;
}
- pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
+ pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
@@ -128,8 +131,10 @@ static int vss_on_msg(void *msg, int len)
{
struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
- if (len != sizeof(*vss_msg))
+ if (len != sizeof(*vss_msg)) {
+ pr_debug("VSS: Message size does not match length\n");
return -EINVAL;
+ }
if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
@@ -137,8 +142,11 @@ static int vss_on_msg(void *msg, int len)
* Don't process registration messages if we're in the middle
* of a transaction processing.
*/
- if (vss_transaction.state > HVUTIL_READY)
+ if (vss_transaction.state > HVUTIL_READY) {
+ pr_debug("VSS: Got unexpected registration request\n");
return -EINVAL;
+ }
+
return vss_handle_handshake(vss_msg);
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
@@ -155,7 +163,7 @@ static int vss_on_msg(void *msg, int len)
}
} else {
/* This is a spurious call! */
- pr_warn("VSS: Transaction not active\n");
+ pr_debug("VSS: Transaction not active\n");
return -EINVAL;
}
return 0;
@@ -168,8 +176,10 @@ static void vss_send_op(void)
struct hv_vss_msg *vss_msg;
/* The transaction state is wrong. */
- if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
+ if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
+ pr_debug("VSS: Unexpected attempt to send to daemon\n");
return;
+ }
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
@@ -179,7 +189,8 @@ static void vss_send_op(void)
vss_transaction.state = HVUTIL_USERSPACE_REQ;
- schedule_delayed_work(&vss_timeout_work, VSS_USERSPACE_TIMEOUT);
+ schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
+ VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
@@ -210,9 +221,13 @@ static void vss_handle_request(struct work_struct *dummy)
case VSS_OP_HOT_BACKUP:
if (vss_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
+ pr_debug("VSS: Not ready for request.\n");
vss_respond_to_host(HV_E_FAIL);
return;
}
+
+ pr_debug("VSS: Received request for op code: %d\n",
+ vss_transaction.msg->vss_hdr.operation);
vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
vss_send_op();
return;
@@ -353,8 +368,10 @@ hv_vss_init(struct hv_util_service *srv)
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
- if (!hvt)
+ if (!hvt) {
+ pr_warn("VSS: Failed to initialize transport\n");
return -EFAULT;
+ }
return 0;
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index bcd06306f3e8..e7707747f56d 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -389,16 +389,19 @@ static int util_probe(struct hv_device *dev,
ts_srv_version = TS_VERSION_1;
hb_srv_version = HB_VERSION_1;
break;
- case(VERSION_WIN10):
+ case VERSION_WIN7:
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION;
+ ts_srv_version = TS_VERSION_3;
hb_srv_version = HB_VERSION;
break;
+ case VERSION_WIN10:
default:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION_3;
+ ts_srv_version = TS_VERSION;
hb_srv_version = HB_VERSION;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a5b4442433c8..0675b395ce5c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -38,7 +38,7 @@
/*
* Timeout for guest-host handshake for services.
*/
-#define HV_UTIL_NEGO_TIMEOUT 60
+#define HV_UTIL_NEGO_TIMEOUT 55
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
-int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_write(struct vmbus_channel *channel,
struct kvec *kv_list,
- u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy);
+ u32 kv_count, bool lock,
+ bool kick_q);
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw);
+ u64 *requestid, bool raw);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 08043da1a61c..cd49cb17eb7f 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
*/
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
- enum hv_signal_policy policy)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
+ bool kick_q)
{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
- return false;
-
- /*
- * When the client wants to control signaling,
- * we only honour the host interrupt mask.
- */
- if (policy == HV_SIGNAL_POLICY_EXPLICIT)
- return true;
+ return;
/* check interrupt_mask before read_index */
virt_rmb();
@@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/* Get the next write location for the specified ring buffer. */
@@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
}
/* Write to the ring buffer. */
-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy)
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ struct kvec *kv_list, u32 kv_count, bool lock,
+ bool kick_q)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 old_write;
u64 prev_indices = 0;
unsigned long flags = 0;
+ struct hv_ring_buffer_info *outring_info = &channel->outbound;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
@@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- *signal = hv_need_to_signal(old_write, outring_info, policy);
+ hv_signal_on_write(old_write, channel, kick_q);
return 0;
}
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw)
+ u64 *requestid, bool raw)
{
u32 bytes_avail_toread;
u32 next_read_location = 0;
@@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
u32 offset;
u32 packetlen;
int ret = 0;
+ struct hv_ring_buffer_info *inring_info = &channel->inbound;
if (buflen <= 0)
return -EINVAL;
@@ -416,7 +422,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
- *signal = hv_need_to_signal_on_read(inring_info);
+ hv_signal_on_read(channel);
return ret;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0276d2ef06ee..230c62e7f567 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,6 +45,11 @@
#include <linux/random.h>
#include "hyperv_vmbus.h"
+struct vmbus_dynid {
+ struct list_head node;
+ struct hv_vmbus_device_id id;
+};
+
static struct acpi_device *hv_acpi_dev;
static struct completion probe_event;
@@ -500,7 +505,7 @@ static ssize_t device_show(struct device *dev,
static DEVICE_ATTR_RO(device);
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
-static struct attribute *vmbus_attrs[] = {
+static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_id.attr,
&dev_attr_state.attr,
&dev_attr_monitor_id.attr,
@@ -528,7 +533,7 @@ static struct attribute *vmbus_attrs[] = {
&dev_attr_device.attr,
NULL,
};
-ATTRIBUTE_GROUPS(vmbus);
+ATTRIBUTE_GROUPS(vmbus_dev);
/*
* vmbus_uevent - add uevent for our device
@@ -565,10 +570,29 @@ static inline bool is_null_guid(const uuid_le *guid)
* Return a matching hv_vmbus_device_id pointer.
* If there is no match, return NULL.
*/
-static const struct hv_vmbus_device_id *hv_vmbus_get_id(
- const struct hv_vmbus_device_id *id,
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
const uuid_le *guid)
{
+ const struct hv_vmbus_device_id *id = NULL;
+ struct vmbus_dynid *dynid;
+
+ /* Look at the dynamic ids first, before the static ones */
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry(dynid, &drv->dynids.list, node) {
+ if (!uuid_le_cmp(dynid->id.guid, *guid)) {
+ id = &dynid->id;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ if (id)
+ return id;
+
+ id = drv->id_table;
+ if (id == NULL)
+ return NULL; /* empty device table */
+
for (; !is_null_guid(&id->guid); id++)
if (!uuid_le_cmp(id->guid, *guid))
return id;
@@ -576,6 +600,134 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(
return NULL;
}
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
+{
+ struct vmbus_dynid *dynid;
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.guid = *guid;
+
+ spin_lock(&drv->dynids.lock);
+ list_add_tail(&dynid->node, &drv->dynids.list);
+ spin_unlock(&drv->dynids.lock);
+
+ return driver_attach(&drv->driver);
+}
+
+static void vmbus_free_dynids(struct hv_driver *drv)
+{
+ struct vmbus_dynid *dynid, *n;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+/* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */
+static int get_uuid_le(const char *str, uuid_le *uu)
+{
+ unsigned int b[16];
+ int i;
+
+ if (strlen(str) < 37)
+ return -1;
+
+ for (i = 0; i < 36; i++) {
+ switch (i) {
+ case 8: case 13: case 18: case 23:
+ if (str[i] != '-')
+ return -1;
+ break;
+ default:
+ if (!isxdigit(str[i]))
+ return -1;
+ }
+ }
+
+ /* unparse little endian output byte order */
+ if (sscanf(str,
+ "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x",
+ &b[3], &b[2], &b[1], &b[0],
+ &b[5], &b[4], &b[7], &b[6], &b[8], &b[9],
+ &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16)
+ return -1;
+
+ for (i = 0; i < 16; i++)
+ uu->b[i] = b[i];
+ return 0;
+}
+
+/*
+ * store_new_id - sysfs frontend to vmbus_add_dynid()
+ *
+ * Allow GUIDs to be added to an existing driver via sysfs.
+ */
+static ssize_t new_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ uuid_le guid = NULL_UUID_LE;
+ ssize_t retval;
+
+ if (get_uuid_le(buf, &guid) != 0)
+ return -EINVAL;
+
+ if (hv_vmbus_get_id(drv, &guid))
+ return -EEXIST;
+
+ retval = vmbus_add_dynid(drv, &guid);
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR_WO(new_id);
+
+/*
+ * store_remove_id - remove a PCI device ID from this driver
+ *
+ * Removes a dynamic pci device ID to this driver.
+ */
+static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ struct vmbus_dynid *dynid, *n;
+ uuid_le guid = NULL_UUID_LE;
+ size_t retval = -ENODEV;
+
+ if (get_uuid_le(buf, &guid))
+ return -EINVAL;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ struct hv_vmbus_device_id *id = &dynid->id;
+
+ if (!uuid_le_cmp(id->guid, guid)) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ retval = count;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ return retval;
+}
+static DRIVER_ATTR_WO(remove_id);
+
+static struct attribute *vmbus_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vmbus_drv);
/*
@@ -590,7 +742,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
if (is_hvsock_channel(hv_dev->channel))
return drv->hvsock;
- if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
+ if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
return 1;
return 0;
@@ -607,7 +759,7 @@ static int vmbus_probe(struct device *child_device)
struct hv_device *dev = device_to_hv_device(child_device);
const struct hv_vmbus_device_id *dev_id;
- dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
+ dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
if (drv->probe) {
ret = drv->probe(dev, dev_id);
if (ret != 0)
@@ -684,7 +836,8 @@ static struct bus_type hv_bus = {
.remove = vmbus_remove,
.probe = vmbus_probe,
.uevent = vmbus_uevent,
- .dev_groups = vmbus_groups,
+ .dev_groups = vmbus_dev_groups,
+ .drv_groups = vmbus_drv_groups,
};
struct onmessage_work_context {
@@ -905,6 +1058,9 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
hv_driver->driver.mod_name = mod_name;
hv_driver->driver.bus = &hv_bus;
+ spin_lock_init(&hv_driver->dynids.lock);
+ INIT_LIST_HEAD(&hv_driver->dynids.list);
+
ret = driver_register(&hv_driver->driver);
return ret;
@@ -923,8 +1079,10 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
{
pr_info("unregistering driver %s\n", hv_driver->name);
- if (!vmbus_exists())
+ if (!vmbus_exists()) {
driver_unregister(&hv_driver->driver);
+ vmbus_free_dynids(hv_driver);
+ }
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 45cef3d2c75c..190d270b20a2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -907,6 +907,17 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_TC654
+ tristate "Microchip TC654/TC655 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for TC654 and TC655.
+ The TC654 and TC655 are PWM mode fan speed controllers with
+ FanSense technology for use with brushless DC fans.
+
+ This driver can also be built as a module. If so, the module
+ will be called tc654.
+
config SENSORS_MENF21BMC_HWMON
tristate "MEN 14F021P00 BMC Hardware Monitoring"
depends on MFD_MENF21BMC
@@ -1068,8 +1079,8 @@ config SENSORS_LM90
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
- Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, and GMT G781
- sensor chips.
+ Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, GMT G781, and
+ Texas Instruments TMP451 sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -1591,6 +1602,17 @@ config SENSORS_TMP103
This driver can also be built as a module. If so, the module
will be called tmp103.
+config SENSORS_TMP108
+ tristate "Texas Instruments TMP108"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP108
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp108.
+
config SENSORS_TMP401
tristate "Texas Instruments TMP401 and compatibles"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index aecf4ba17460..d2cb7e804a0f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
+obj-$(CONFIG_SENSORS_TC654) += tc654.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
@@ -152,6 +153,7 @@ obj-$(CONFIG_SENSORS_TC74) += tc74.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
+obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index d6c767ace916..1abb4609b412 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -93,7 +93,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
#define IN_FROM_REG(reg, scale) (((reg) * (scale) + 96) / 192)
#define IN_TO_REG(val, scale) ((val) <= 0 ? 0 : \
- (val) * 192 >= (scale) * 255 ? 255 : \
+ (val) >= (scale) * 255 / 192 ? 255 : \
((val) * 192 + (scale) / 2) / (scale))
#define TEMP_FROM_REG(reg) ((reg) * 1000)
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e67b9a50ac7c..b2a5d9e5c590 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,8 +197,9 @@ static int adm1026_scaling[] = { /* .001 Volts */
};
#define NEG12_OFFSET 16000
#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
-#define INS_TO_REG(n, val) (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
- 0, 255))
+#define INS_TO_REG(n, val) \
+ SCALE(clamp_val(val, 0, 255 * adm1026_scaling[n] / 192), \
+ adm1026_scaling[n], 192)
#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
/*
@@ -215,11 +216,11 @@ static int adm1026_scaling[] = { /* .001 Volts */
#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
/* Temperature is reported in 1 degC increments */
-#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
- / 1000, -127, 127))
+#define TEMP_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+ 1000)
#define TEMP_FROM_REG(val) ((val) * 1000)
-#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
- / 1000, -127, 127))
+#define OFFSET_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+ 1000)
#define OFFSET_FROM_REG(val) ((val) * 1000)
#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
@@ -233,7 +234,8 @@ static int adm1026_scaling[] = { /* .001 Volts */
* indicates that the DAC could be used to drive the fans, but in our
* example board (Arima HDAMA) it isn't connected to the fans at all.
*/
-#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
+#define DAC_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, 0, 2500) * 255, \
+ 2500)
#define DAC_FROM_REG(val) (((val) * 2500) / 255)
/*
@@ -593,7 +595,10 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_min[16] = INS_TO_REG(16, val + NEG12_OFFSET);
+ data->in_min[16] = INS_TO_REG(16,
+ clamp_val(val, INT_MIN,
+ INT_MAX - NEG12_OFFSET) +
+ NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MIN[16], data->in_min[16]);
mutex_unlock(&data->update_lock);
return count;
@@ -618,7 +623,10 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_max[16] = INS_TO_REG(16, val+NEG12_OFFSET);
+ data->in_max[16] = INS_TO_REG(16,
+ clamp_val(val, INT_MIN,
+ INT_MAX - NEG12_OFFSET) +
+ NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MAX[16], data->in_max[16]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 2fe1828bd10b..72bf2489511e 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -98,13 +98,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+ val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+ return SCALE(val, 192, nom_mv[n]);
}
/* temperature range: -40..125, 127 disables temperature alarm */
static inline s8 TEMP_TO_REG(long val)
{
- return clamp_val(SCALE(val, 1, 1000), -40, 127);
+ val = clamp_val(val, -40000, 127000);
+ return SCALE(val, 1, 1000);
}
/* two fans, each with low fan speed limit */
@@ -122,7 +124,8 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
/* analog out 0..1250mV */
static inline u8 AOUT_TO_REG(unsigned long val)
{
- return clamp_val(SCALE(val, 255, 1250), 0, 255);
+ val = clamp_val(val, 0, 1250);
+ return SCALE(val, 255, 1250);
}
static inline unsigned int AOUT_FROM_REG(u8 reg)
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 812fbc00f693..bdeaece9641d 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -55,7 +55,7 @@ struct adt7411_data {
struct mutex device_lock; /* for "atomic" device accesses */
struct mutex update_lock;
unsigned long next_update;
- int vref_cached;
+ long vref_cached;
struct i2c_client *client;
bool use_ext_temp;
};
@@ -114,85 +114,6 @@ static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit,
return ret;
}
-static ssize_t adt7411_show_vdd(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
- ADT7411_REG_VDD_MSB, 2);
-
- return ret < 0 ? ret : sprintf(buf, "%u\n", ret * 7000 / 1024);
-}
-
-static ssize_t adt7411_show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int nr = to_sensor_dev_attr(attr)->index;
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int val;
- struct {
- u8 low;
- u8 high;
- } reg[2] = {
- { ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_INT_TEMP_MSB },
- { ADT7411_REG_EXT_TEMP_AIN14_LSB,
- ADT7411_REG_EXT_TEMP_AIN1_MSB },
- };
-
- val = adt7411_read_10_bit(client, reg[nr].low, reg[nr].high, 0);
- if (val < 0)
- return val;
-
- val = val & 0x200 ? val - 0x400 : val; /* 10 bit signed */
-
- return sprintf(buf, "%d\n", val * 250);
-}
-
-static ssize_t adt7411_show_input(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int nr = to_sensor_dev_attr(attr)->index;
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int val;
- u8 lsb_reg, lsb_shift;
-
- mutex_lock(&data->update_lock);
- if (time_after_eq(jiffies, data->next_update)) {
- val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
- if (val < 0)
- goto exit_unlock;
-
- if (val & ADT7411_CFG3_REF_VDD) {
- val = adt7411_read_10_bit(client,
- ADT7411_REG_INT_TEMP_VDD_LSB,
- ADT7411_REG_VDD_MSB, 2);
- if (val < 0)
- goto exit_unlock;
-
- data->vref_cached = val * 7000 / 1024;
- } else {
- data->vref_cached = 2250;
- }
-
- data->next_update = jiffies + HZ;
- }
-
- lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
- lsb_shift = 2 * (nr & 0x03);
- val = adt7411_read_10_bit(client, lsb_reg,
- ADT7411_REG_EXT_TEMP_AIN1_MSB + nr, lsb_shift);
- if (val < 0)
- goto exit_unlock;
-
- val = sprintf(buf, "%u\n", val * data->vref_cached / 1024);
- exit_unlock:
- mutex_unlock(&data->update_lock);
- return val;
-}
-
static ssize_t adt7411_show_bit(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -228,65 +149,157 @@ static ssize_t adt7411_set_bit(struct device *dev,
return ret < 0 ? ret : count;
}
-
#define ADT7411_BIT_ATTR(__name, __reg, __bit) \
SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \
adt7411_set_bit, __bit, __reg)
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, adt7411_show_temp, NULL, 1);
-static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, adt7411_show_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, adt7411_show_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, adt7411_show_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, adt7411_show_input, NULL, 5);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, adt7411_show_input, NULL, 6);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, adt7411_show_input, NULL, 7);
static ADT7411_BIT_ATTR(no_average, ADT7411_REG_CFG2, ADT7411_CFG2_DISABLE_AVG);
static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_225);
static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD);
static struct attribute *adt7411_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &dev_attr_in0_input.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_no_average.dev_attr.attr,
&sensor_dev_attr_fast_sampling.dev_attr.attr,
&sensor_dev_attr_adc_ref_vdd.dev_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(adt7411);
-static umode_t adt7411_attrs_visible(struct kobject *kobj,
- struct attribute *attr, int index)
+static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
{
- struct device *dev = container_of(kobj, struct device, kobj);
struct adt7411_data *data = dev_get_drvdata(dev);
- bool visible = true;
+ struct i2c_client *client = data->client;
+ int ret;
- if (attr == &sensor_dev_attr_temp2_input.dev_attr.attr)
- visible = data->use_ext_temp;
- else if (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_in2_input.dev_attr.attr)
- visible = !data->use_ext_temp;
+ switch (attr) {
+ case hwmon_in_input:
+ ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
+ ADT7411_REG_VDD_MSB, 2);
+ if (ret < 0)
+ return ret;
+ *val = ret * 7000 / 1024;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
- return visible ? attr->mode : 0;
+ int ret;
+ int lsb_reg, lsb_shift;
+ int nr = channel - 1;
+
+ mutex_lock(&data->update_lock);
+ if (time_after_eq(jiffies, data->next_update)) {
+ ret = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
+ if (ret < 0)
+ goto exit_unlock;
+
+ if (ret & ADT7411_CFG3_REF_VDD) {
+ ret = adt7411_read_in_vdd(dev, hwmon_in_input,
+ &data->vref_cached);
+ if (ret < 0)
+ goto exit_unlock;
+ } else {
+ data->vref_cached = 2250;
+ }
+
+ data->next_update = jiffies + HZ;
+ }
+
+ switch (attr) {
+ case hwmon_in_input:
+ lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
+ lsb_shift = 2 * (nr & 0x03);
+ ret = adt7411_read_10_bit(client, lsb_reg,
+ ADT7411_REG_EXT_TEMP_AIN1_MSB + nr,
+ lsb_shift);
+ if (ret < 0)
+ goto exit_unlock;
+ *val = ret * data->vref_cached / 1024;
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ exit_unlock:
+ mutex_unlock(&data->update_lock);
+ return ret;
}
-static const struct attribute_group adt7411_group = {
- .attrs = adt7411_attrs,
- .is_visible = adt7411_attrs_visible,
-};
-__ATTRIBUTE_GROUPS(adt7411);
+static int adt7411_read_in(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ if (channel == 0)
+ return adt7411_read_in_vdd(dev, attr, val);
+ else
+ return adt7411_read_in_chan(dev, attr, channel, val);
+}
+
+static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret, regl, regh;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ regl = channel ? ADT7411_REG_EXT_TEMP_AIN14_LSB :
+ ADT7411_REG_INT_TEMP_VDD_LSB;
+ regh = channel ? ADT7411_REG_EXT_TEMP_AIN1_MSB :
+ ADT7411_REG_INT_TEMP_MSB;
+ ret = adt7411_read_10_bit(client, regl, regh, 0);
+ if (ret < 0)
+ return ret;
+ ret = ret & 0x200 ? ret - 0x400 : ret; /* 10 bit signed */
+ *val = ret * 250;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adt7411_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return adt7411_read_in(dev, attr, channel, val);
+ case hwmon_temp:
+ return adt7411_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t adt7411_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct adt7411_data *data = _data;
+
+ switch (type) {
+ case hwmon_in:
+ if (channel > 0 && channel < 3)
+ return data->use_ext_temp ? 0 : S_IRUGO;
+ else
+ return S_IRUGO;
+ case hwmon_temp:
+ if (channel == 1)
+ return data->use_ext_temp ? S_IRUGO : 0;
+ else
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+}
static int adt7411_detect(struct i2c_client *client,
struct i2c_board_info *info)
@@ -358,6 +371,51 @@ static int adt7411_init_device(struct adt7411_data *data)
return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
}
+static const u32 adt7411_in_config[] = {
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info adt7411_in = {
+ .type = hwmon_in,
+ .config = adt7411_in_config,
+};
+
+static const u32 adt7411_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info adt7411_temp = {
+ .type = hwmon_temp,
+ .config = adt7411_temp_config,
+};
+
+static const struct hwmon_channel_info *adt7411_info[] = {
+ &adt7411_in,
+ &adt7411_temp,
+ NULL
+};
+
+static const struct hwmon_ops adt7411_hwmon_ops = {
+ .is_visible = adt7411_is_visible,
+ .read = adt7411_read,
+};
+
+static const struct hwmon_chip_info adt7411_chip_info = {
+ .ops = &adt7411_hwmon_ops,
+ .info = adt7411_info,
+};
+
static int adt7411_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -382,9 +440,10 @@ static int adt7411_probe(struct i2c_client *client,
/* force update on first occasion */
data->next_update = jiffies;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- adt7411_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &adt7411_chip_info,
+ adt7411_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 5929e126da63..19f2a6d48bac 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -810,8 +810,8 @@ static ssize_t set_temp_min(struct device *dev,
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -848,8 +848,8 @@ static ssize_t set_temp_max(struct device *dev,
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -912,9 +912,9 @@ static ssize_t set_volt_max(struct device *dev,
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
+ temp = clamp_val(temp, 0, 255 * x / 1000);
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_max[attr->index] = temp;
@@ -954,9 +954,9 @@ static ssize_t set_volt_min(struct device *dev,
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
+ temp = clamp_val(temp, 0, 255 * x / 1000);
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_min[attr->index] = temp;
@@ -1220,8 +1220,8 @@ static ssize_t set_pwm_hyst(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, 0, 15000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 15);
/* package things up */
temp &= ADT7462_PWM_HYST_MASK;
@@ -1306,8 +1306,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 6e60ca53406e..c9a1d9c25572 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -483,8 +483,8 @@ static ssize_t set_temp_min(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -517,8 +517,8 @@ static ssize_t set_temp_max(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -880,8 +880,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 12e851a5af48..46b4e35fd555 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -188,8 +188,8 @@ static struct amc6821_data *amc6821_update_device(struct device *dev)
!data->valid) {
for (i = 0; i < TEMP_IDX_LEN; i++)
- data->temp[i] = i2c_smbus_read_byte_data(client,
- temp_reg[i]);
+ data->temp[i] = (int8_t)i2c_smbus_read_byte_data(
+ client, temp_reg[i]);
data->stat1 = i2c_smbus_read_byte_data(client,
AMC6821_REG_STAT1);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6a27eb2fed17..3ac4c03ba77b 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -51,6 +51,7 @@ static int force_tjmax;
module_param_named(tjmax, force_tjmax, int, 0444);
MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
@@ -58,7 +59,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id)
#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
@@ -102,20 +102,17 @@ struct temp_data {
/* Platform Data per Physical CPU */
struct platform_data {
- struct device *hwmon_dev;
- u16 phys_proc_id;
- struct temp_data *core_data[MAX_CORE_DATA];
+ struct device *hwmon_dev;
+ u16 pkg_id;
+ struct cpumask cpumask;
+ struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr;
};
-struct pdev_entry {
- struct list_head list;
- struct platform_device *pdev;
- u16 phys_proc_id;
-};
-
-static LIST_HEAD(pdev_list);
-static DEFINE_MUTEX(pdev_list_mutex);
+/* Keep track of how many package pointers we allocated in init() */
+static int max_packages __read_mostly;
+/* Array of package pointers. Serialized by cpu hotplug lock */
+static struct platform_device **pkg_devices;
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
@@ -125,7 +122,7 @@ static ssize_t show_label(struct device *dev,
struct temp_data *tdata = pdata->core_data[attr->index];
if (tdata->is_pkg_data)
- return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
+ return sprintf(buf, "Package id %u\n", pdata->pkg_id);
return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
}
@@ -138,7 +135,9 @@ static ssize_t show_crit_alarm(struct device *dev,
struct platform_data *pdata = dev_get_drvdata(dev);
struct temp_data *tdata = pdata->core_data[attr->index];
+ mutex_lock(&tdata->update_lock);
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+ mutex_unlock(&tdata->update_lock);
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
@@ -435,18 +434,10 @@ static int chk_ucode_version(unsigned int cpu)
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
{
- u16 phys_proc_id = TO_PHYS_ID(cpu);
- struct pdev_entry *p;
-
- mutex_lock(&pdev_list_mutex);
+ int pkgid = topology_logical_package_id(cpu);
- list_for_each_entry(p, &pdev_list, list)
- if (p->phys_proc_id == phys_proc_id) {
- mutex_unlock(&pdev_list_mutex);
- return p->pdev;
- }
-
- mutex_unlock(&pdev_list_mutex);
+ if (pkgid >= 0 && pkgid < max_packages)
+ return pkg_devices[pkgid];
return NULL;
}
@@ -483,21 +474,11 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
* The attr number is always core id + 2
* The Pkgtemp will always show up as temp1_*, if available
*/
- attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
+ attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
if (attr_no > MAX_CORE_DATA - 1)
return -ERANGE;
- /*
- * Provide a single set of attributes for all HT siblings of a core
- * to avoid duplicate sensors (the processor ID and core ID of all
- * HT siblings of a core are the same).
- * Skip if a HT sibling of this core is already registered.
- * This is not an error.
- */
- if (pdata->core_data[attr_no] != NULL)
- return 0;
-
tdata = init_temp_data(cpu, pkg_flag);
if (!tdata)
return -ENOMEM;
@@ -539,21 +520,14 @@ exit_free:
return err;
}
-static void coretemp_add_core(unsigned int cpu, int pkg_flag)
+static void
+coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
{
- struct platform_device *pdev = coretemp_get_pdev(cpu);
- int err;
-
- if (!pdev)
- return;
-
- err = create_core_data(pdev, cpu, pkg_flag);
- if (err)
+ if (create_core_data(pdev, cpu, pkg_flag))
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
}
-static void coretemp_remove_core(struct platform_data *pdata,
- int indx)
+static void coretemp_remove_core(struct platform_data *pdata, int indx)
{
struct temp_data *tdata = pdata->core_data[indx];
@@ -574,7 +548,7 @@ static int coretemp_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- pdata->phys_proc_id = pdev->id;
+ pdata->pkg_id = pdev->id;
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
@@ -602,85 +576,33 @@ static struct platform_driver coretemp_driver = {
.remove = coretemp_remove,
};
-static int coretemp_device_add(unsigned int cpu)
+static struct platform_device *coretemp_device_add(unsigned int cpu)
{
- int err;
+ int err, pkgid = topology_logical_package_id(cpu);
struct platform_device *pdev;
- struct pdev_entry *pdev_entry;
- mutex_lock(&pdev_list_mutex);
+ if (pkgid < 0)
+ return ERR_PTR(-ENOMEM);
- pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
- if (!pdev) {
- err = -ENOMEM;
- pr_err("Device allocation failed\n");
- goto exit;
- }
-
- pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
- if (!pdev_entry) {
- err = -ENOMEM;
- goto exit_device_put;
- }
+ pdev = platform_device_alloc(DRVNAME, pkgid);
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
err = platform_device_add(pdev);
if (err) {
- pr_err("Device addition failed (%d)\n", err);
- goto exit_device_free;
- }
-
- pdev_entry->pdev = pdev;
- pdev_entry->phys_proc_id = pdev->id;
-
- list_add_tail(&pdev_entry->list, &pdev_list);
- mutex_unlock(&pdev_list_mutex);
-
- return 0;
-
-exit_device_free:
- kfree(pdev_entry);
-exit_device_put:
- platform_device_put(pdev);
-exit:
- mutex_unlock(&pdev_list_mutex);
- return err;
-}
-
-static void coretemp_device_remove(unsigned int cpu)
-{
- struct pdev_entry *p, *n;
- u16 phys_proc_id = TO_PHYS_ID(cpu);
-
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- if (p->phys_proc_id != phys_proc_id)
- continue;
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
+ platform_device_put(pdev);
+ return ERR_PTR(err);
}
- mutex_unlock(&pdev_list_mutex);
-}
-
-static bool is_any_core_online(struct platform_data *pdata)
-{
- int i;
- /* Find online cores, except pkgtemp data */
- for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
- if (pdata->core_data[i] &&
- !pdata->core_data[i]->is_pkg_data) {
- return true;
- }
- }
- return false;
+ pkg_devices[pkgid] = pdev;
+ return pdev;
}
-static void get_core_online(unsigned int cpu)
+static int coretemp_cpu_online(unsigned int cpu)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct platform_device *pdev = coretemp_get_pdev(cpu);
- int err;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct platform_data *pdata;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -688,12 +610,12 @@ static void get_core_online(unsigned int cpu)
* without thermal sensors will be filtered out.
*/
if (!cpu_has(c, X86_FEATURE_DTHERM))
- return;
+ return -ENODEV;
if (!pdev) {
/* Check the microcode version of the CPU */
if (chk_ucode_version(cpu))
- return;
+ return -EINVAL;
/*
* Alright, we have DTS support.
@@ -701,101 +623,100 @@ static void get_core_online(unsigned int cpu)
* online. So, initialize per-pkg data structures and
* then bring this core online.
*/
- err = coretemp_device_add(cpu);
- if (err)
- return;
+ pdev = coretemp_device_add(cpu);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
/*
* Check whether pkgtemp support is available.
* If so, add interfaces for pkgtemp.
*/
if (cpu_has(c, X86_FEATURE_PTS))
- coretemp_add_core(cpu, 1);
+ coretemp_add_core(pdev, cpu, 1);
}
+
+ pdata = platform_get_drvdata(pdev);
/*
- * Physical CPU device already exists.
- * So, just add interfaces for this core.
+ * Check whether a thread sibling is already online. If not add the
+ * interface for this CPU core.
*/
- coretemp_add_core(cpu, 0);
+ if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
+ coretemp_add_core(pdev, cpu, 0);
+
+ cpumask_set_cpu(cpu, &pdata->cpumask);
+ return 0;
}
-static void put_core_offline(unsigned int cpu)
+static int coretemp_cpu_offline(unsigned int cpu)
{
- int i, indx;
- struct platform_data *pdata;
struct platform_device *pdev = coretemp_get_pdev(cpu);
+ struct platform_data *pd;
+ struct temp_data *tdata;
+ int indx, target;
/* If the physical CPU device does not exist, just return */
if (!pdev)
- return;
-
- pdata = platform_get_drvdata(pdev);
-
- indx = TO_ATTR_NO(cpu);
+ return 0;
/* The core id is too big, just return */
+ indx = TO_ATTR_NO(cpu);
if (indx > MAX_CORE_DATA - 1)
- return;
+ return 0;
- if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
- coretemp_remove_core(pdata, indx);
+ pd = platform_get_drvdata(pdev);
+ tdata = pd->core_data[indx];
+
+ cpumask_clear_cpu(cpu, &pd->cpumask);
/*
- * If a HT sibling of a core is taken offline, but another HT sibling
- * of the same core is still online, register the alternate sibling.
- * This ensures that exactly one set of attributes is provided as long
- * as at least one HT sibling of a core is online.
+ * If this is the last thread sibling, remove the CPU core
+ * interface, If there is still a sibling online, transfer the
+ * target cpu of that core interface to it.
*/
- for_each_sibling(i, cpu) {
- if (i != cpu) {
- get_core_online(i);
- /*
- * Display temperature sensor data for one HT sibling
- * per core only, so abort the loop after one such
- * sibling has been found.
- */
- break;
- }
+ target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
+ if (target >= nr_cpu_ids) {
+ coretemp_remove_core(pd, indx);
+ } else if (tdata && tdata->cpu == cpu) {
+ mutex_lock(&tdata->update_lock);
+ tdata->cpu = target;
+ mutex_unlock(&tdata->update_lock);
}
+
/*
- * If all cores in this pkg are offline, remove the device.
- * coretemp_device_remove calls unregister_platform_device,
- * which in turn calls coretemp_remove. This removes the
- * pkgtemp entry and does other clean ups.
+ * If all cores in this pkg are offline, remove the device. This
+ * will invoke the platform driver remove function, which cleans up
+ * the rest.
*/
- if (!is_any_core_online(pdata))
- coretemp_device_remove(cpu);
-}
+ if (cpumask_empty(&pd->cpumask)) {
+ pkg_devices[topology_logical_package_id(cpu)] = NULL;
+ platform_device_unregister(pdev);
+ return 0;
+ }
-static int coretemp_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- get_core_online(cpu);
- break;
- case CPU_DOWN_PREPARE:
- put_core_offline(cpu);
- break;
+ /*
+ * Check whether this core is the target for the package
+ * interface. We need to assign it to some other cpu.
+ */
+ tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ if (tdata && tdata->cpu == cpu) {
+ target = cpumask_first(&pd->cpumask);
+ mutex_lock(&tdata->update_lock);
+ tdata->cpu = target;
+ mutex_unlock(&tdata->update_lock);
}
- return NOTIFY_OK;
+ return 0;
}
-
-static struct notifier_block coretemp_cpu_notifier __refdata = {
- .notifier_call = coretemp_cpu_callback,
-};
-
static const struct x86_cpu_id __initconst coretemp_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
+static enum cpuhp_state coretemp_hp_online;
+
static int __init coretemp_init(void)
{
- int i, err;
+ int err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -805,54 +726,38 @@ static int __init coretemp_init(void)
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
+ max_packages = topology_max_packages();
+ pkg_devices = kzalloc(max_packages * sizeof(struct platform_device *),
+ GFP_KERNEL);
+ if (!pkg_devices)
+ return -ENOMEM;
+
err = platform_driver_register(&coretemp_driver);
if (err)
- goto exit;
+ return err;
- cpu_notifier_register_begin();
- for_each_online_cpu(i)
- get_core_online(i);
-
-#ifndef CONFIG_HOTPLUG_CPU
- if (list_empty(&pdev_list)) {
- cpu_notifier_register_done();
- err = -ENODEV;
- goto exit_driver_unreg;
- }
-#endif
-
- __register_hotcpu_notifier(&coretemp_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
+ coretemp_cpu_online, coretemp_cpu_offline);
+ if (err < 0)
+ goto outdrv;
+ coretemp_hp_online = err;
return 0;
-#ifndef CONFIG_HOTPLUG_CPU
-exit_driver_unreg:
+outdrv:
platform_driver_unregister(&coretemp_driver);
-#endif
-exit:
+ kfree(pkg_devices);
return err;
}
+module_init(coretemp_init)
static void __exit coretemp_exit(void)
{
- struct pdev_entry *p, *n;
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&coretemp_cpu_notifier);
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
- cpu_notifier_register_done();
+ cpuhp_remove_state(coretemp_hp_online);
platform_driver_unregister(&coretemp_driver);
+ kfree(pkg_devices);
}
+module_exit(coretemp_exit)
MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
MODULE_DESCRIPTION("Intel Core temperature monitor");
MODULE_LICENSE("GPL");
-
-module_init(coretemp_init)
-module_exit(coretemp_exit)
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index edf550fc4eef..0043a4c02b85 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
if (res)
return res;
- val = (val * 10 / 625) * 8;
+ val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
mutex_lock(&data->update_lock);
data->temp[attr->index] = val;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 24e395c5907d..4b870ee9b0d3 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -251,7 +251,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_min[nr] = val;
@@ -273,7 +273,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_max[nr] = val;
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index f37fe2011640..4aee5adf9ef2 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -215,12 +215,13 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
if (err < 0)
return err;
- val = DIV_ROUND_CLOSEST(val * 0xC0, nominal_mv[nr]);
+ val = clamp_val(val, 0, 255 * nominal_mv[nr] / 192);
+ val = DIV_ROUND_CLOSEST(val * 192, nominal_mv[nr]);
reg = (sf == min) ? EMC6W201_REG_IN_LOW(nr)
: EMC6W201_REG_IN_HIGH(nr);
mutex_lock(&data->update_lock);
- data->in[sf][nr] = clamp_val(val, 0, 255);
+ data->in[sf][nr] = val;
err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
@@ -252,12 +253,13 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
if (err < 0)
return err;
+ val = clamp_val(val, -127000, 127000);
val = DIV_ROUND_CLOSEST(val, 1000);
reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr)
: EMC6W201_REG_TEMP_HIGH(nr);
mutex_lock(&data->update_lock);
- data->temp[sf][nr] = clamp_val(val, -127, 127);
+ data->temp[sf][nr] = val;
err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index b96a2a9e4df7..628be9c95ff9 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
* Convert fan RPM value from sysfs into count value for fan controller
* register (FAN_SET_CNT).
*/
-static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p,
+static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
u8 clk_div, u8 gear_mult)
{
- if (!rpm) /* to stop the fan, set cnt to 255 */
+ unsigned long f1 = clk_freq * 30 * gear_mult;
+ unsigned long f2 = p * clk_div;
+
+ if (!rpm) /* to stop the fan, set cnt to 255 */
return 0xff;
- return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)),
- 0, 255);
+ rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2);
+ return DIV_ROUND_CLOSEST(f1, rpm * f2);
}
/* helper to grab and cache data, at most one time per second */
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index a74c075a30ec..3932f9276c07 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -38,12 +38,15 @@ struct hwmon_device {
#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+#define MAX_SYSFS_ATTR_NAME_LENGTH 32
+
struct hwmon_device_attribute {
struct device_attribute dev_attr;
const struct hwmon_ops *ops;
enum hwmon_sensor_types type;
u32 attr;
int index;
+ char name[MAX_SYSFS_ATTR_NAME_LENGTH];
};
#define to_hwmon_attr(d) \
@@ -178,6 +181,22 @@ static ssize_t hwmon_attr_show(struct device *dev,
return sprintf(buf, "%ld\n", val);
}
+static ssize_t hwmon_attr_show_string(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ char *s;
+ int ret;
+
+ ret = hattr->ops->read_string(dev, hattr->type, hattr->attr,
+ hattr->index, &s);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%s\n", s);
+}
+
static ssize_t hwmon_attr_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -205,6 +224,17 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
return 1;
}
+static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
+{
+ return (type == hwmon_temp && attr == hwmon_temp_label) ||
+ (type == hwmon_in && attr == hwmon_in_label) ||
+ (type == hwmon_curr && attr == hwmon_curr_label) ||
+ (type == hwmon_power && attr == hwmon_power_label) ||
+ (type == hwmon_energy && attr == hwmon_energy_label) ||
+ (type == hwmon_humidity && attr == hwmon_humidity_label) ||
+ (type == hwmon_fan && attr == hwmon_fan_label);
+}
+
static struct attribute *hwmon_genattr(struct device *dev,
const void *drvdata,
enum hwmon_sensor_types type,
@@ -218,6 +248,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
struct attribute *a;
umode_t mode;
char *name;
+ bool is_string = is_string_attr(type, attr);
/* The attribute is invisible if there is no template string */
if (!template)
@@ -227,32 +258,31 @@ static struct attribute *hwmon_genattr(struct device *dev,
if (!mode)
return ERR_PTR(-ENOENT);
- if ((mode & S_IRUGO) && !ops->read)
+ if ((mode & S_IRUGO) && ((is_string && !ops->read_string) ||
+ (!is_string && !ops->read)))
return ERR_PTR(-EINVAL);
if ((mode & S_IWUGO) && !ops->write)
return ERR_PTR(-EINVAL);
+ hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ if (!hattr)
+ return ERR_PTR(-ENOMEM);
+
if (type == hwmon_chip) {
name = (char *)template;
} else {
- name = devm_kzalloc(dev, strlen(template) + 16, GFP_KERNEL);
- if (!name)
- return ERR_PTR(-ENOMEM);
- scnprintf(name, strlen(template) + 16, template,
+ scnprintf(hattr->name, sizeof(hattr->name), template,
index + hwmon_attr_base(type));
+ name = hattr->name;
}
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
- if (!hattr)
- return ERR_PTR(-ENOMEM);
-
hattr->type = type;
hattr->attr = attr;
hattr->index = index;
hattr->ops = ops;
dattr = &hattr->dev_attr;
- dattr->show = hwmon_attr_show;
+ dattr->show = is_string ? hwmon_attr_show_string : hwmon_attr_show;
dattr->store = hwmon_attr_store;
a = &dattr->attr;
@@ -263,7 +293,11 @@ static struct attribute *hwmon_genattr(struct device *dev,
return a;
}
-static const char * const hwmon_chip_attr_templates[] = {
+/*
+ * Chip attributes are not attribute templates but actual sysfs attributes.
+ * See hwmon_genattr() for special handling.
+ */
+static const char * const hwmon_chip_attrs[] = {
[hwmon_chip_temp_reset_history] = "temp_reset_history",
[hwmon_chip_in_reset_history] = "in_reset_history",
[hwmon_chip_curr_reset_history] = "curr_reset_history",
@@ -400,7 +434,7 @@ static const char * const hwmon_pwm_attr_templates[] = {
};
static const char * const *__templates[] = {
- [hwmon_chip] = hwmon_chip_attr_templates,
+ [hwmon_chip] = hwmon_chip_attrs,
[hwmon_temp] = hwmon_temp_attr_templates,
[hwmon_in] = hwmon_in_attr_templates,
[hwmon_curr] = hwmon_curr_attr_templates,
@@ -412,7 +446,7 @@ static const char * const *__templates[] = {
};
static const int __templates_size[] = {
- [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attr_templates),
+ [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attrs),
[hwmon_temp] = ARRAY_SIZE(hwmon_temp_attr_templates),
[hwmon_in] = ARRAY_SIZE(hwmon_in_attr_templates),
[hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates),
@@ -526,9 +560,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hdev = &hwdev->dev;
- if (chip && chip->ops->is_visible) {
+ if (chip) {
struct attribute **attrs;
- int ngroups = 2;
+ int ngroups = 2; /* terminating NULL plus &hwdev->groups */
if (groups)
for (i = 0; groups[i]; i++)
@@ -572,7 +606,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
- if (chip && chip->ops->is_visible && chip->ops->read &&
+ if (chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
const struct hwmon_channel_info **info = chip->info;
@@ -626,8 +660,8 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @info: pointer to hwmon chip information
+ * @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
* longer needed.
@@ -638,12 +672,12 @@ struct device *
hwmon_device_register_with_info(struct device *dev, const char *name,
void *drvdata,
const struct hwmon_chip_info *chip,
- const struct attribute_group **groups)
+ const struct attribute_group **extra_groups)
{
- if (chip && (!chip->ops || !chip->info))
+ if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
return ERR_PTR(-EINVAL);
- return __hwmon_device_register(dev, name, drvdata, chip, groups);
+ return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
}
EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
@@ -658,6 +692,9 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
*/
struct device *hwmon_device_register(struct device *dev)
{
+ dev_warn(dev,
+ "hwmon_device_register() is deprecated. Please convert the driver to use hwmon_device_register_with_info().\n");
+
return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(hwmon_device_register);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 6ff773fcaefb..29c8136ce9c5 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -136,7 +136,8 @@ static const int lm85_scaling[] = { /* .001 Volts */
#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
#define INS_TO_REG(n, val) \
- clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
+ SCALE(clamp_val(val, 0, 255 * lm85_scaling[n] / 192), \
+ lm85_scaling[n], 192)
#define INSEXT_FROM_REG(n, val, ext) \
SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index a5e295826aea..13cca3606e06 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -121,7 +121,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
#define IN_FROM_REG(reg, scale) (((reg) * (scale) + 96) / 192)
#define IN_TO_REG(val, scale) ((val) <= 0 ? 0 : \
- (val) * 192 >= (scale) * 255 ? 255 : \
+ (val) >= (scale) * 255 / 192 ? 255 : \
((val) * 192 + (scale) / 2) / (scale))
#define TEMP_FROM_REG(reg) ((reg) * 1000)
@@ -154,7 +154,6 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
*/
struct lm87_data {
- struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -181,6 +180,8 @@ struct lm87_data {
u16 alarms; /* register values, combined */
u8 vid; /* register values, combined */
u8 vrm;
+
+ const struct attribute_group *attr_groups[6];
};
static inline int lm87_read_value(struct i2c_client *client, u8 reg)
@@ -195,7 +196,7 @@ static inline int lm87_write_value(struct i2c_client *client, u8 reg, u8 value)
static struct lm87_data *lm87_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
@@ -309,7 +310,7 @@ static ssize_t show_in_max(struct device *dev,
static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -330,7 +331,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -396,7 +397,7 @@ static ssize_t show_temp_high(struct device *dev,
static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -416,7 +417,7 @@ static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -495,7 +496,7 @@ static ssize_t show_fan_div(struct device *dev,
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -522,7 +523,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -635,7 +636,7 @@ static ssize_t show_aout(struct device *dev, struct device_attribute *attr,
static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
long val;
int err;
@@ -841,23 +842,18 @@ static int lm87_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static void lm87_remove_files(struct i2c_client *client)
+static void lm87_restore_config(void *arg)
{
- struct device *dev = &client->dev;
-
- sysfs_remove_group(&dev->kobj, &lm87_group);
- sysfs_remove_group(&dev->kobj, &lm87_group_in6);
- sysfs_remove_group(&dev->kobj, &lm87_group_fan1);
- sysfs_remove_group(&dev->kobj, &lm87_group_in7);
- sysfs_remove_group(&dev->kobj, &lm87_group_fan2);
- sysfs_remove_group(&dev->kobj, &lm87_group_temp3);
- sysfs_remove_group(&dev->kobj, &lm87_group_in0_5);
- sysfs_remove_group(&dev->kobj, &lm87_group_vid);
+ struct i2c_client *client = arg;
+ struct lm87_data *data = i2c_get_clientdata(client);
+
+ lm87_write_value(client, LM87_REG_CONFIG, data->config);
}
-static void lm87_init_client(struct i2c_client *client)
+static int lm87_init_client(struct i2c_client *client)
{
struct lm87_data *data = i2c_get_clientdata(client);
+ int rc;
if (dev_get_platdata(&client->dev)) {
data->channel = *(u8 *)dev_get_platdata(&client->dev);
@@ -868,6 +864,10 @@ static void lm87_init_client(struct i2c_client *client)
}
data->config = lm87_read_value(client, LM87_REG_CONFIG) & 0x6F;
+ rc = devm_add_action(&client->dev, lm87_restore_config, client);
+ if (rc)
+ return rc;
+
if (!(data->config & 0x01)) {
int i;
@@ -895,12 +895,15 @@ static void lm87_init_client(struct i2c_client *client)
if ((data->config & 0x09) != 0x01)
lm87_write_value(client, LM87_REG_CONFIG,
(data->config & 0x77) | 0x01);
+ return 0;
}
static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct lm87_data *data;
+ struct device *hwmon_dev;
int err;
+ unsigned int group_tail = 0;
data = devm_kzalloc(&client->dev, sizeof(struct lm87_data), GFP_KERNEL);
if (!data)
@@ -910,7 +913,9 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
mutex_init(&data->update_lock);
/* Initialize the LM87 chip */
- lm87_init_client(client);
+ err = lm87_init_client(client);
+ if (err)
+ return err;
data->in_scale[0] = 2500;
data->in_scale[1] = 2700;
@@ -921,72 +926,34 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->in_scale[6] = 1875;
data->in_scale[7] = 1875;
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &lm87_group);
- if (err)
- goto exit_stop;
-
- if (data->channel & CHAN_NO_FAN(0)) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in6);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan1);
- if (err)
- goto exit_remove;
- }
-
- if (data->channel & CHAN_NO_FAN(1)) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in7);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan2);
- if (err)
- goto exit_remove;
- }
-
- if (data->channel & CHAN_TEMP3) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_temp3);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in0_5);
- if (err)
- goto exit_remove;
- }
+ /*
+ * Construct the list of attributes, the list depends on the
+ * configuration of the chip
+ */
+ data->attr_groups[group_tail++] = &lm87_group;
+ if (data->channel & CHAN_NO_FAN(0))
+ data->attr_groups[group_tail++] = &lm87_group_in6;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_fan1;
+
+ if (data->channel & CHAN_NO_FAN(1))
+ data->attr_groups[group_tail++] = &lm87_group_in7;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_fan2;
+
+ if (data->channel & CHAN_TEMP3)
+ data->attr_groups[group_tail++] = &lm87_group_temp3;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_in0_5;
if (!(data->channel & CHAN_NO_VID)) {
data->vrm = vid_which_vrm();
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_vid);
- if (err)
- goto exit_remove;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
+ data->attr_groups[group_tail++] = &lm87_group_vid;
}
- return 0;
-
-exit_remove:
- lm87_remove_files(client);
-exit_stop:
- lm87_write_value(client, LM87_REG_CONFIG, data->config);
- return err;
-}
-
-static int lm87_remove(struct i2c_client *client)
-{
- struct lm87_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- lm87_remove_files(client);
-
- lm87_write_value(client, LM87_REG_CONFIG, data->config);
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(
+ &client->dev, client->name, client, data->attr_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/*
@@ -1006,7 +973,6 @@ static struct i2c_driver lm87_driver = {
.name = "lm87",
},
.probe = lm87_probe,
- .remove = lm87_remove,
.id_table = lm87_id,
.detect = lm87_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 972444a14cca..1929734c3b1d 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -4,6 +4,7 @@
* Copyright (C) 2008-2009, 2012 Freescale Semiconductor, Inc.
* Author: Mingkai Hu <Mingkai.hu@freescale.com>
* Reworked by Sven Schuchmann <schuchmann@schleissheimer.de>
+ * DT support added by Clemens Gruber <clemens.gruber@pqgruber.com>
*
* This driver export the value of analog input voltage to sysfs, the
* voltage unit is mV. Through the sysfs interface, lm-sensors tool
@@ -22,11 +23,13 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
-/* Vdd info */
-#define MCP3021_VDD_MAX 5500
-#define MCP3021_VDD_MIN 2700
-#define MCP3021_VDD_REF 3300
+/* Vdd / reference voltage in millivolt */
+#define MCP3021_VDD_REF_MAX 5500
+#define MCP3021_VDD_REF_MIN 2700
+#define MCP3021_VDD_REF_DEFAULT 3300
/* output format */
#define MCP3021_SAR_SHIFT 2
@@ -47,7 +50,7 @@ enum chips {
*/
struct mcp3021_data {
struct device *hwmon_dev;
- u32 vdd; /* device power supply */
+ u32 vdd; /* supply and reference voltage in millivolt */
u16 sar_shift;
u16 sar_mask;
u8 output_res;
@@ -99,13 +102,14 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", in_input);
}
-static DEVICE_ATTR(in0_input, S_IRUGO, show_in_input, NULL);
+static DEVICE_ATTR(in0_input, 0444, show_in_input, NULL);
static int mcp3021_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err;
struct mcp3021_data *data = NULL;
+ struct device_node *np = client->dev.of_node;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -117,6 +121,21 @@ static int mcp3021_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
+ if (np) {
+ if (!of_property_read_u32(np, "reference-voltage-microvolt",
+ &data->vdd))
+ data->vdd /= 1000;
+ else
+ data->vdd = MCP3021_VDD_REF_DEFAULT;
+ } else {
+ u32 *pdata = dev_get_platdata(&client->dev);
+
+ if (pdata)
+ data->vdd = *pdata;
+ else
+ data->vdd = MCP3021_VDD_REF_DEFAULT;
+ }
+
switch (id->driver_data) {
case mcp3021:
data->sar_shift = MCP3021_SAR_SHIFT;
@@ -131,13 +150,8 @@ static int mcp3021_probe(struct i2c_client *client,
break;
}
- if (dev_get_platdata(&client->dev)) {
- data->vdd = *(u32 *)dev_get_platdata(&client->dev);
- if (data->vdd > MCP3021_VDD_MAX || data->vdd < MCP3021_VDD_MIN)
- return -EINVAL;
- } else {
- data->vdd = MCP3021_VDD_REF;
- }
+ if (data->vdd > MCP3021_VDD_REF_MAX || data->vdd < MCP3021_VDD_REF_MIN)
+ return -EINVAL;
err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr);
if (err)
@@ -173,9 +187,19 @@ static const struct i2c_device_id mcp3021_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mcp3021_id);
+#ifdef CONFIG_OF
+static const struct of_device_id of_mcp3021_match[] = {
+ { .compatible = "microchip,mcp3021", .data = (void *)mcp3021 },
+ { .compatible = "microchip,mcp3221", .data = (void *)mcp3221 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_mcp3021_match);
+#endif
+
static struct i2c_driver mcp3021_driver = {
.driver = {
.name = "mcp3021",
+ .of_match_table = of_match_ptr(of_mcp3021_match),
},
.probe = mcp3021_probe,
.remove = mcp3021_remove,
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 3ce33d244cc0..12b94b094c0d 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -259,13 +259,15 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
ret = 0;
else if (ret)
ret = DIV_ROUND_CLOSEST(1350000U, ret);
+ else
+ ret = 1350000U;
abort:
mutex_unlock(&data->access_lock);
return ret;
}
static int nct7802_write_fan_min(struct nct7802_data *data, u8 reg_fan_low,
- u8 reg_fan_high, unsigned int limit)
+ u8 reg_fan_high, unsigned long limit)
{
int err;
@@ -326,8 +328,8 @@ static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
int err;
+ voltage = clamp_val(voltage, 0, 0x3ff * nct7802_vmul[nr]);
voltage = DIV_ROUND_CLOSEST(voltage, nct7802_vmul[nr]);
- voltage = clamp_val(voltage, 0, 0x3ff);
mutex_lock(&data->access_lock);
err = regmap_write(data->regmap,
@@ -402,7 +404,7 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
err = regmap_write(data->regmap, nr, val & 0xff);
return err ? : count;
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 3baa4f4a8c5e..4ab5293c7bf0 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -499,15 +499,27 @@ static int adm1275_probe(struct i2c_client *client,
pindex = 2;
tindex = 3;
- info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT;
+ info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+ /* Enable VOUT if not enabled (it is disabled by default) */
+ if (!(config & ADM1278_VOUT_EN)) {
+ config |= ADM1278_VOUT_EN;
+ ret = i2c_smbus_write_byte_data(client,
+ ADM1275_PMON_CONFIG,
+ config);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to enable VOUT monitoring\n");
+ return -ENODEV;
+ }
+ }
+
if (config & ADM1278_TEMP1_EN)
info->func[0] |=
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
if (config & ADM1278_VIN_EN)
info->func[0] |= PMBUS_HAVE_VIN;
- if (config & ADM1278_VOUT_EN)
- info->func[0] |=
- PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1293:
case adm1294:
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 559a3dcd64d8..094f948f99ff 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -251,6 +251,7 @@ static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi-sensors"},
{},
};
+MODULE_DEVICE_TABLE(of, scpi_of_match);
static struct platform_driver scpi_hwmon_platdrv = {
.driver = {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6ac7cda72d4c..15650f247679 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -77,14 +77,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+ val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+ return SCALE(val, 192, nom_mv[n]);
}
/*
* TEMP: 0.001 degC units (-128C to +127C)
* REG: 1C/bit, two's complement
*/
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
{
return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
new file mode 100644
index 000000000000..18136e1f95fd
--- /dev/null
+++ b/drivers/hwmon/tc654.c
@@ -0,0 +1,514 @@
+/*
+ * tc654.c - Linux kernel modules for fan speed controller
+ *
+ * Copyright (C) 2016 Allied Telesis Labs NZ
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+enum tc654_regs {
+ TC654_REG_RPM1 = 0x00, /* RPM Output 1 */
+ TC654_REG_RPM2 = 0x01, /* RPM Output 2 */
+ TC654_REG_FAN_FAULT1 = 0x02, /* Fan Fault 1 Threshold */
+ TC654_REG_FAN_FAULT2 = 0x03, /* Fan Fault 2 Threshold */
+ TC654_REG_CONFIG = 0x04, /* Configuration */
+ TC654_REG_STATUS = 0x05, /* Status */
+ TC654_REG_DUTY_CYCLE = 0x06, /* Fan Speed Duty Cycle */
+ TC654_REG_MFR_ID = 0x07, /* Manufacturer Identification */
+ TC654_REG_VER_ID = 0x08, /* Version Identification */
+};
+
+/* Macros to easily index the registers */
+#define TC654_REG_RPM(idx) (TC654_REG_RPM1 + (idx))
+#define TC654_REG_FAN_FAULT(idx) (TC654_REG_FAN_FAULT1 + (idx))
+
+/* Config register bits */
+#define TC654_REG_CONFIG_RES BIT(6) /* Resolution Selection */
+#define TC654_REG_CONFIG_DUTYC BIT(5) /* Duty Cycle Control */
+#define TC654_REG_CONFIG_SDM BIT(0) /* Shutdown Mode */
+
+/* Status register bits */
+#define TC654_REG_STATUS_F2F BIT(1) /* Fan 2 Fault */
+#define TC654_REG_STATUS_F1F BIT(0) /* Fan 1 Fault */
+
+/* RPM resolution for RPM Output registers */
+#define TC654_HIGH_RPM_RESOLUTION 25 /* 25 RPM resolution */
+#define TC654_LOW_RPM_RESOLUTION 50 /* 50 RPM resolution */
+
+/* Convert to the fan fault RPM threshold from register value */
+#define TC654_FAN_FAULT_FROM_REG(val) ((val) * 50) /* 50 RPM resolution */
+
+/* Convert to register value from the fan fault RPM threshold */
+#define TC654_FAN_FAULT_TO_REG(val) (((val) / 50) & 0xff)
+
+/* Register data is read (and cached) at most once per second. */
+#define TC654_UPDATE_INTERVAL HZ
+
+struct tc654_data {
+ struct i2c_client *client;
+
+ /* update mutex */
+ struct mutex update_lock;
+
+ /* tc654 register cache */
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ u8 rpm_output[2]; /* The fan RPM data for fans 1 and 2 is then
+ * written to registers RPM1 and RPM2
+ */
+ u8 fan_fault[2]; /* The Fan Fault Threshold Registers are used to
+ * set the fan fault threshold levels for fan 1
+ * and fan 2
+ */
+ u8 config; /* The Configuration Register is an 8-bit read/
+ * writable multi-function control register
+ * 7: Fan Fault Clear
+ * 1 = Clear Fan Fault
+ * 0 = Normal Operation (default)
+ * 6: Resolution Selection for RPM Output Registers
+ * RPM Output Registers (RPM1 and RPM2) will be
+ * set for
+ * 1 = 25 RPM (9-bit) resolution
+ * 0 = 50 RPM (8-bit) resolution (default)
+ * 5: Duty Cycle Control Method
+ * The V OUT duty cycle will be controlled via
+ * 1 = the SMBus interface.
+ * 0 = via the V IN analog input pin. (default)
+ * 4,3: Fan 2 Pulses Per Rotation
+ * 00 = 1
+ * 01 = 2 (default)
+ * 10 = 4
+ * 11 = 8
+ * 2,1: Fan 1 Pulses Per Rotation
+ * 00 = 1
+ * 01 = 2 (default)
+ * 10 = 4
+ * 11 = 8
+ * 0: Shutdown Mode
+ * 1 = Shutdown mode.
+ * 0 = Normal operation. (default)
+ */
+ u8 status; /* The Status register provides all the information
+ * about what is going on within the TC654/TC655
+ * devices.
+ * 7,6: Unimplemented, Read as '0'
+ * 5: Over-Temperature Fault Condition
+ * 1 = Over-Temperature condition has occurred
+ * 0 = Normal operation. V IN is less than 2.6V
+ * 4: RPM2 Counter Overflow
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 3: RPM1 Counter Overflow
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 2: V IN Input Status
+ * 1 = V IN is open
+ * 0 = Normal operation. voltage present at V IN
+ * 1: Fan 2 Fault
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 0: Fan 1 Fault
+ * 1 = Fault condition
+ * 0 = Normal operation
+ */
+ u8 duty_cycle; /* The DUTY_CYCLE register is a 4-bit read/
+ * writable register used to control the duty
+ * cycle of the V OUT output.
+ */
+};
+
+/* helper to grab and cache data, at most one time per second */
+static struct tc654_data *tc654_update_client(struct device *dev)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ mutex_lock(&data->update_lock);
+ if (time_before(jiffies, data->last_updated + TC654_UPDATE_INTERVAL) &&
+ likely(data->valid))
+ goto out;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(0));
+ if (ret < 0)
+ goto out;
+ data->rpm_output[0] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(1));
+ if (ret < 0)
+ goto out;
+ data->rpm_output[1] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(0));
+ if (ret < 0)
+ goto out;
+ data->fan_fault[0] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(1));
+ if (ret < 0)
+ goto out;
+ data->fan_fault[1] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+ if (ret < 0)
+ goto out;
+ data->config = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_STATUS);
+ if (ret < 0)
+ goto out;
+ data->status = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_DUTY_CYCLE);
+ if (ret < 0)
+ goto out;
+ data->duty_cycle = ret & 0x0f;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+out:
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0) /* upon error, encode it in return value */
+ data = ERR_PTR(ret);
+
+ return data;
+}
+
+/*
+ * sysfs attributes
+ */
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_RES)
+ val = data->rpm_output[nr] * TC654_HIGH_RPM_RESOLUTION;
+ else
+ val = data->rpm_output[nr] * TC654_LOW_RPM_RESOLUTION;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n",
+ TC654_FAN_FAULT_FROM_REG(data->fan_fault[nr]));
+}
+
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ val = clamp_val(val, 0, 12750);
+
+ mutex_lock(&data->update_lock);
+
+ data->fan_fault[nr] = TC654_FAN_FAULT_TO_REG(val);
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_FAN_FAULT(nr),
+ data->fan_fault[nr]);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (nr == 0)
+ val = !!(data->status & TC654_REG_STATUS_F1F);
+ else
+ val = !!(data->status & TC654_REG_STATUS_F2F);
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static const u8 TC654_FAN_PULSE_SHIFT[] = { 1, 3 };
+
+static ssize_t show_fan_pulses(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ u8 val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = BIT((data->config >> TC654_FAN_PULSE_SHIFT[nr]) & 0x03);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t set_fan_pulses(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ u8 config;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ switch (val) {
+ case 1:
+ config = 0;
+ break;
+ case 2:
+ config = 1;
+ break;
+ case 4:
+ config = 2;
+ break;
+ case 8:
+ config = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->update_lock);
+
+ data->config &= ~(0x03 << TC654_FAN_PULSE_SHIFT[nr]);
+ data->config |= (config << TC654_FAN_PULSE_SHIFT[nr]);
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_pwm_mode(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct tc654_data *data = tc654_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", !!(data->config & TC654_REG_CONFIG_DUTYC));
+}
+
+static ssize_t set_pwm_mode(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val)
+ data->config |= TC654_REG_CONFIG_DUTYC;
+ else
+ data->config &= ~TC654_REG_CONFIG_DUTYC;
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static const int tc654_pwm_map[16] = { 77, 88, 102, 112, 124, 136, 148, 160,
+ 172, 184, 196, 207, 219, 231, 243, 255};
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct tc654_data *data = tc654_update_client(dev);
+ int pwm;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_SDM)
+ pwm = 0;
+ else
+ pwm = tc654_pwm_map[data->duty_cycle];
+
+ return sprintf(buf, "%d\n", pwm);
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val > 255)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val == 0)
+ data->config |= TC654_REG_CONFIG_SDM;
+ else
+ data->config &= ~TC654_REG_CONFIG_SDM;
+
+ data->duty_cycle = find_closest(val, tc654_pwm_map,
+ ARRAY_SIZE(tc654_pwm_map));
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+ if (ret < 0)
+ goto out;
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_DUTY_CYCLE,
+ data->duty_cycle);
+
+out:
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
+ set_fan_min, 0);
+static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
+ set_fan_min, 1);
+static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+ set_fan_pulses, 0);
+static SENSOR_DEVICE_ATTR(fan2_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+ set_fan_pulses, 1);
+static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO,
+ show_pwm_mode, set_pwm_mode, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm,
+ set_pwm, 0);
+
+/* Driver data */
+static struct attribute *tc654_attrs[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan1_pulses.dev_attr.attr,
+ &sensor_dev_attr_fan2_pulses.dev_attr.attr,
+ &sensor_dev_attr_pwm1_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(tc654);
+
+/*
+ * device probe and removal
+ */
+
+static int tc654_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tc654_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct tc654_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ mutex_init(&data->update_lock);
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ data->config = ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_groups(dev, client->name, data,
+ tc654_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id tc654_id[] = {
+ {"tc654", 0},
+ {"tc655", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, tc654_id);
+
+static struct i2c_driver tc654_driver = {
+ .driver = {
+ .name = "tc654",
+ },
+ .probe = tc654_probe,
+ .id_table = tc654_id,
+};
+
+module_i2c_driver(tc654_driver);
+
+MODULE_AUTHOR("Allied Telesis Labs");
+MODULE_DESCRIPTION("Microchip TC654/TC655 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
new file mode 100644
index 000000000000..91bb94639286
--- /dev/null
+++ b/drivers/hwmon/tmp108.c
@@ -0,0 +1,469 @@
+/* Texas Instruments TMP108 SMBus temperature sensor driver
+ *
+ * Copyright (C) 2016 John Muir <john@jmuir.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "tmp108"
+
+#define TMP108_REG_TEMP 0x00
+#define TMP108_REG_CONF 0x01
+#define TMP108_REG_TLOW 0x02
+#define TMP108_REG_THIGH 0x03
+
+#define TMP108_TEMP_MIN_MC -50000 /* Minimum millicelcius. */
+#define TMP108_TEMP_MAX_MC 127937 /* Maximum millicelcius. */
+
+/* Configuration register bits.
+ * Note: these bit definitions are byte swapped.
+ */
+#define TMP108_CONF_M0 0x0100 /* Sensor mode. */
+#define TMP108_CONF_M1 0x0200
+#define TMP108_CONF_TM 0x0400 /* Thermostat mode. */
+#define TMP108_CONF_FL 0x0800 /* Watchdog flag - TLOW */
+#define TMP108_CONF_FH 0x1000 /* Watchdog flag - THIGH */
+#define TMP108_CONF_CR0 0x2000 /* Conversion rate. */
+#define TMP108_CONF_CR1 0x4000
+#define TMP108_CONF_ID 0x8000
+#define TMP108_CONF_HYS0 0x0010 /* Hysteresis. */
+#define TMP108_CONF_HYS1 0x0020
+#define TMP108_CONF_POL 0x0080 /* Polarity of alert. */
+
+/* Defaults set by the hardware upon reset. */
+#define TMP108_CONF_DEFAULTS (TMP108_CONF_CR0 | TMP108_CONF_TM |\
+ TMP108_CONF_HYS0 | TMP108_CONF_M1)
+/* These bits are read-only. */
+#define TMP108_CONF_READ_ONLY (TMP108_CONF_FL | TMP108_CONF_FH |\
+ TMP108_CONF_ID)
+
+#define TMP108_CONF_MODE_MASK (TMP108_CONF_M0|TMP108_CONF_M1)
+#define TMP108_MODE_SHUTDOWN 0x0000
+#define TMP108_MODE_ONE_SHOT TMP108_CONF_M0
+#define TMP108_MODE_CONTINUOUS TMP108_CONF_M1 /* Default */
+ /* When M1 is set, M0 is ignored. */
+
+#define TMP108_CONF_CONVRATE_MASK (TMP108_CONF_CR0|TMP108_CONF_CR1)
+#define TMP108_CONVRATE_0P25HZ 0x0000
+#define TMP108_CONVRATE_1HZ TMP108_CONF_CR0 /* Default */
+#define TMP108_CONVRATE_4HZ TMP108_CONF_CR1
+#define TMP108_CONVRATE_16HZ (TMP108_CONF_CR0|TMP108_CONF_CR1)
+
+#define TMP108_CONF_HYSTERESIS_MASK (TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+#define TMP108_HYSTERESIS_0C 0x0000
+#define TMP108_HYSTERESIS_1C TMP108_CONF_HYS0 /* Default */
+#define TMP108_HYSTERESIS_2C TMP108_CONF_HYS1
+#define TMP108_HYSTERESIS_4C (TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+
+#define TMP108_CONVERSION_TIME_MS 30 /* in milli-seconds */
+
+struct tmp108 {
+ struct regmap *regmap;
+ u16 orig_config;
+ unsigned long ready_time;
+};
+
+/* convert 12-bit TMP108 register value to milliCelsius */
+static inline int tmp108_temp_reg_to_mC(s16 val)
+{
+ return (val & ~0x0f) * 1000 / 256;
+}
+
+/* convert milliCelsius to left adjusted 12-bit TMP108 register value */
+static inline u16 tmp108_mC_to_temp_reg(int val)
+{
+ return (val * 256) / 1000;
+}
+
+static int tmp108_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err, hyst;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval) {
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF,
+ &regval);
+ if (err < 0)
+ return err;
+ switch (regval & TMP108_CONF_CONVRATE_MASK) {
+ case TMP108_CONVRATE_0P25HZ:
+ default:
+ *temp = 4000;
+ break;
+ case TMP108_CONVRATE_1HZ:
+ *temp = 1000;
+ break;
+ case TMP108_CONVRATE_4HZ:
+ *temp = 250;
+ break;
+ case TMP108_CONVRATE_16HZ:
+ *temp = 63;
+ break;
+ }
+ return 0;
+ }
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ /* Is it too early to return a conversion ? */
+ if (time_before(jiffies, tmp108->ready_time)) {
+ dev_dbg(dev, "%s: Conversion not ready yet..\n",
+ __func__);
+ return -EAGAIN;
+ }
+ err = regmap_read(tmp108->regmap, TMP108_REG_TEMP, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ break;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ err = regmap_read(tmp108->regmap, attr == hwmon_temp_min ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ break;
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+ if (err < 0)
+ return err;
+ *temp = !!(regval & (attr == hwmon_temp_min_alarm ?
+ TMP108_CONF_FL : TMP108_CONF_FH));
+ break;
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+ if (err < 0)
+ return err;
+ switch (regval & TMP108_CONF_HYSTERESIS_MASK) {
+ case TMP108_HYSTERESIS_0C:
+ default:
+ hyst = 0;
+ break;
+ case TMP108_HYSTERESIS_1C:
+ hyst = 1000;
+ break;
+ case TMP108_HYSTERESIS_2C:
+ hyst = 2000;
+ break;
+ case TMP108_HYSTERESIS_4C:
+ hyst = 4000;
+ break;
+ }
+ err = regmap_read(tmp108->regmap, attr == hwmon_temp_min_hyst ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ if (attr == hwmon_temp_min_hyst)
+ *temp += hyst;
+ else
+ *temp -= hyst;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tmp108_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ u32 regval, mask;
+ int err;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval) {
+ if (temp < 156)
+ mask = TMP108_CONVRATE_16HZ;
+ else if (temp < 625)
+ mask = TMP108_CONVRATE_4HZ;
+ else if (temp < 2500)
+ mask = TMP108_CONVRATE_1HZ;
+ else
+ mask = TMP108_CONVRATE_0P25HZ;
+ return regmap_update_bits(tmp108->regmap,
+ TMP108_REG_CONF,
+ TMP108_CONF_CONVRATE_MASK,
+ mask);
+ }
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+ return regmap_write(tmp108->regmap,
+ attr == hwmon_temp_min ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH,
+ tmp108_mC_to_temp_reg(temp));
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+ err = regmap_read(tmp108->regmap,
+ attr == hwmon_temp_min_hyst ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH,
+ &regval);
+ if (err < 0)
+ return err;
+ if (attr == hwmon_temp_min_hyst)
+ temp -= tmp108_temp_reg_to_mC(regval);
+ else
+ temp = tmp108_temp_reg_to_mC(regval) - temp;
+ if (temp < 500)
+ mask = TMP108_HYSTERESIS_0C;
+ else if (temp < 1500)
+ mask = TMP108_HYSTERESIS_1C;
+ else if (temp < 3000)
+ mask = TMP108_HYSTERESIS_2C;
+ else
+ mask = TMP108_HYSTERESIS_4C;
+ return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_HYSTERESIS_MASK, mask);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t tmp108_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return 0644;
+
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static u32 tmp108_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info tmp108_chip = {
+ .type = hwmon_chip,
+ .config = tmp108_chip_config,
+};
+
+static u32 tmp108_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_HYST
+ | HWMON_T_MAX_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ 0
+};
+
+static const struct hwmon_channel_info tmp108_temp = {
+ .type = hwmon_temp,
+ .config = tmp108_temp_config,
+};
+
+static const struct hwmon_channel_info *tmp108_info[] = {
+ &tmp108_chip,
+ &tmp108_temp,
+ NULL
+};
+
+static const struct hwmon_ops tmp108_hwmon_ops = {
+ .is_visible = tmp108_is_visible,
+ .read = tmp108_read,
+ .write = tmp108_write,
+};
+
+static const struct hwmon_chip_info tmp108_chip_info = {
+ .ops = &tmp108_hwmon_ops,
+ .info = tmp108_info,
+};
+
+static void tmp108_restore_config(void *data)
+{
+ struct tmp108 *tmp108 = data;
+
+ regmap_write(tmp108->regmap, TMP108_REG_CONF, tmp108->orig_config);
+}
+
+static bool tmp108_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != TMP108_REG_TEMP;
+}
+
+static bool tmp108_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* Configuration register must be volatile to enable FL and FH. */
+ return reg == TMP108_REG_TEMP || reg == TMP108_REG_CONF;
+}
+
+static const struct regmap_config tmp108_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP108_REG_THIGH,
+ .writeable_reg = tmp108_is_writeable_reg,
+ .volatile_reg = tmp108_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static int tmp108_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct tmp108 *tmp108;
+ int err;
+ u32 config;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(dev,
+ "adapter doesn't support SMBus word transactions\n");
+ return -ENODEV;
+ }
+
+ tmp108 = devm_kzalloc(dev, sizeof(*tmp108), GFP_KERNEL);
+ if (!tmp108)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, tmp108);
+
+ tmp108->regmap = devm_regmap_init_i2c(client, &tmp108_regmap_config);
+ if (IS_ERR(tmp108->regmap)) {
+ err = PTR_ERR(tmp108->regmap);
+ dev_err(dev, "regmap init failed: %d", err);
+ return err;
+ }
+
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &config);
+ if (err < 0) {
+ dev_err(dev, "error reading config register: %d", err);
+ return err;
+ }
+ tmp108->orig_config = config;
+
+ /* Only continuous mode is supported. */
+ config &= ~TMP108_CONF_MODE_MASK;
+ config |= TMP108_MODE_CONTINUOUS;
+
+ /* Only comparator mode is supported. */
+ config &= ~TMP108_CONF_TM;
+
+ err = regmap_write(tmp108->regmap, TMP108_REG_CONF, config);
+ if (err < 0) {
+ dev_err(dev, "error writing config register: %d", err);
+ return err;
+ }
+
+ tmp108->ready_time = jiffies;
+ if ((tmp108->orig_config & TMP108_CONF_MODE_MASK) ==
+ TMP108_MODE_SHUTDOWN)
+ tmp108->ready_time +=
+ msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+
+ err = devm_add_action_or_reset(dev, tmp108_restore_config, tmp108);
+ if (err) {
+ dev_err(dev, "add action or reset failed: %d", err);
+ return err;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ tmp108,
+ &tmp108_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static int __maybe_unused tmp108_suspend(struct device *dev)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_MODE_MASK, TMP108_MODE_SHUTDOWN);
+}
+
+static int __maybe_unused tmp108_resume(struct device *dev)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ int err;
+
+ err = regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_MODE_MASK, TMP108_MODE_CONTINUOUS);
+ tmp108->ready_time = jiffies +
+ msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
+
+static const struct i2c_device_id tmp108_i2c_ids[] = {
+ { "tmp108", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tmp108_of_ids[] = {
+ { .compatible = "ti,tmp108", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tmp108_of_ids);
+#endif
+
+static struct i2c_driver tmp108_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &tmp108_dev_pm_ops,
+ .of_match_table = of_match_ptr(tmp108_of_ids),
+ },
+ .probe = tmp108_probe,
+ .id_table = tmp108_i2c_ids,
+};
+
+module_i2c_driver(tmp108_driver);
+
+MODULE_AUTHOR("John Muir <john@jmuir.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP108 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index ac91c07e3f90..d1f209a5feac 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -220,7 +220,7 @@ struct pdev_entry {
static LIST_HEAD(pdev_list);
static DEFINE_MUTEX(pdev_list_mutex);
-static int via_cputemp_device_add(unsigned int cpu)
+static int via_cputemp_online(unsigned int cpu)
{
int err;
struct platform_device *pdev;
@@ -261,7 +261,7 @@ exit:
return err;
}
-static void via_cputemp_device_remove(unsigned int cpu)
+static int via_cputemp_down_prep(unsigned int cpu)
{
struct pdev_entry *p;
@@ -272,33 +272,13 @@ static void via_cputemp_device_remove(unsigned int cpu)
list_del(&p->list);
mutex_unlock(&pdev_list_mutex);
kfree(p);
- return;
+ return 0;
}
}
mutex_unlock(&pdev_list_mutex);
+ return 0;
}
-static int via_cputemp_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- via_cputemp_device_add(cpu);
- break;
- case CPU_DOWN_PREPARE:
- via_cputemp_device_remove(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
- .notifier_call = via_cputemp_cpu_callback,
-};
-
static const struct x86_cpu_id __initconst cputemp_ids[] = {
{ X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
{ X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
@@ -307,9 +287,11 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, cputemp_ids);
+static enum cpuhp_state via_temp_online;
+
static int __init via_cputemp_init(void)
{
- int i, err;
+ int err;
if (!x86_match_cpu(cputemp_ids))
return -ENODEV;
@@ -318,58 +300,33 @@ static int __init via_cputemp_init(void)
if (err)
goto exit;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
-
- if (c->x86 != 6)
- continue;
-
- if (c->x86_model < 0x0a)
- continue;
-
- if (c->x86_model > 0x0f) {
- pr_warn("Unknown CPU model 0x%x\n", c->x86_model);
- continue;
- }
-
- via_cputemp_device_add(i);
- }
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/via:online",
+ via_cputemp_online, via_cputemp_down_prep);
+ if (err < 0)
+ goto exit_driver_unreg;
+ via_temp_online = err;
#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
- cpu_notifier_register_done();
err = -ENODEV;
- goto exit_driver_unreg;
+ goto exit_hp_unreg;
}
#endif
-
- __register_hotcpu_notifier(&via_cputemp_cpu_notifier);
- cpu_notifier_register_done();
return 0;
#ifndef CONFIG_HOTPLUG_CPU
+exit_hp_unreg:
+ cpuhp_remove_state_nocalls(via_temp_online);
+#endif
exit_driver_unreg:
platform_driver_unregister(&via_cputemp_driver);
-#endif
exit:
return err;
}
static void __exit via_cputemp_exit(void)
{
- struct pdev_entry *p, *n;
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
- cpu_notifier_register_done();
+ cpuhp_remove_state(via_temp_online);
platform_driver_unregister(&via_cputemp_driver);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 2cd7c718198a..17741969026e 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -202,6 +202,21 @@ static void *etm_setup_aux(int event_cpu, void **pages,
if (!event_data)
return NULL;
+ /*
+ * In theory nothing prevent tracers in a trace session from being
+ * associated with different sinks, nor having a sink per tracer. But
+ * until we have HW with this kind of topology we need to assume tracers
+ * in a trace session are using the same sink. Therefore go through
+ * the coresight bus and pick the first enabled sink.
+ *
+ * When operated from sysFS users are responsible to enable the sink
+ * while from perf, the perf tools will do it based on the choice made
+ * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
+ */
+ sink = coresight_get_enabled_sink(true);
+ if (!sink)
+ goto err;
+
INIT_WORK(&event_data->work, free_event_data);
mask = &event_data->mask;
@@ -219,25 +234,11 @@ static void *etm_setup_aux(int event_cpu, void **pages,
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
- event_data->path[cpu] = coresight_build_path(csdev);
+ event_data->path[cpu] = coresight_build_path(csdev, sink);
if (IS_ERR(event_data->path[cpu]))
goto err;
}
- /*
- * In theory nothing prevent tracers in a trace session from being
- * associated with different sinks, nor having a sink per tracer. But
- * until we have HW with this kind of topology and a way to convey
- * sink assignement from the perf cmd line we need to assume tracers
- * in a trace session are using the same sink. Therefore pick the sink
- * found at the end of the first available path.
- */
- cpu = cpumask_first(mask);
- /* Grab the sink at the end of the path */
- sink = coresight_get_sink(event_data->path[cpu]);
- if (!sink)
- goto err;
-
if (!sink_ops(sink)->alloc_buffer)
goto err;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 4a18ee499965..ad063d7444e1 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -89,11 +89,13 @@
/* ETMCR - 0x00 */
#define ETMCR_PWD_DWN BIT(0)
#define ETMCR_STALL_MODE BIT(7)
+#define ETMCR_BRANCH_BROADCAST BIT(8)
#define ETMCR_ETM_PRG BIT(10)
#define ETMCR_ETM_EN BIT(11)
#define ETMCR_CYC_ACC BIT(12)
#define ETMCR_CTXID_SIZE (BIT(14)|BIT(15))
#define ETMCR_TIMESTAMP_EN BIT(28)
+#define ETMCR_RETURN_STACK BIT(29)
/* ETMCCR - 0x04 */
#define ETMCCR_FIFOFULL BIT(23)
/* ETMPDCR - 0x310 */
@@ -110,8 +112,11 @@
#define ETM_MODE_STALL BIT(2)
#define ETM_MODE_TIMESTAMP BIT(3)
#define ETM_MODE_CTXID BIT(4)
+#define ETM_MODE_BBROAD BIT(5)
+#define ETM_MODE_RET_STACK BIT(6)
#define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+ ETM_MODE_BBROAD | ETM_MODE_RET_STACK | \
ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
ETM_MODE_EXCL_USER)
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index e9b071953f80..ca98ad13bb8c 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -146,7 +146,7 @@ static ssize_t mode_store(struct device *dev,
goto err_unlock;
}
config->ctrl |= ETMCR_STALL_MODE;
- } else
+ } else
config->ctrl &= ~ETMCR_STALL_MODE;
if (config->mode & ETM_MODE_TIMESTAMP) {
@@ -164,6 +164,16 @@ static ssize_t mode_store(struct device *dev,
else
config->ctrl &= ~ETMCR_CTXID_SIZE;
+ if (config->mode & ETM_MODE_BBROAD)
+ config->ctrl |= ETMCR_BRANCH_BROADCAST;
+ else
+ config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
+
+ if (config->mode & ETM_MODE_RET_STACK)
+ config->ctrl |= ETMCR_RETURN_STACK;
+ else
+ config->ctrl &= ~ETMCR_RETURN_STACK;
+
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm_config_trace_mode(config);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 196a14be4b3d..ef9d8e93e3b2 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -111,7 +111,9 @@ static inline void CS_UNLOCK(void __iomem *addr)
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode);
struct coresight_device *coresight_get_sink(struct list_head *path);
-struct list_head *coresight_build_path(struct coresight_device *csdev);
+struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct list_head *coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 49e0f1b925a5..e4c55c5f9988 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -406,7 +406,7 @@ static long stm_generic_set_options(struct stm_data *stm_data,
return 0;
}
-static ssize_t stm_generic_packet(struct stm_data *stm_data,
+static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
unsigned int master,
unsigned int channel,
unsigned int packet,
@@ -419,10 +419,10 @@ static ssize_t stm_generic_packet(struct stm_data *stm_data,
struct stm_drvdata, stm);
if (!(drvdata && local_read(&drvdata->mode)))
- return 0;
+ return -EACCES;
if (channel >= drvdata->numsp)
- return 0;
+ return -EINVAL;
ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
@@ -920,6 +920,11 @@ static struct amba_id stm_ids[] = {
.mask = 0x0003ffff,
.data = "STM32",
},
+ {
+ .id = 0x0003b963,
+ .mask = 0x0003ffff,
+ .data = "STM500",
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea24d8d..1549436e2492 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -70,7 +70,7 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etb_dump_hw(drvdata);
tmc_disable_hw(drvdata);
@@ -103,19 +103,14 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
bool used = false;
char *buf = NULL;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_SYSFS))
- return -EINVAL;
-
/*
* If we don't have a buffer release the lock and allocate memory.
* Otherwise keep the lock and move along.
@@ -138,13 +133,12 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
@@ -163,6 +157,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
drvdata->buf = buf;
}
+ drvdata->mode = CS_MODE_SYSFS;
tmc_etb_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -177,34 +172,29 @@ out:
return ret;
}
-static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
{
int ret = 0;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_PERF))
- return -EINVAL;
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EINVAL;
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In Perf mode there can be only one writer per sink. There
* is also no need to continue if the ETB/ETR is already operated
* from sysFS.
*/
- if (val != CS_MODE_DISABLED) {
+ if (drvdata->mode != CS_MODE_DISABLED) {
ret = -EINVAL;
goto out;
}
+ drvdata->mode = CS_MODE_PERF;
tmc_etb_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -216,9 +206,9 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
{
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etf_sink_sysfs(csdev, mode);
+ return tmc_enable_etf_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etf_sink_perf(csdev, mode);
+ return tmc_enable_etf_sink_perf(csdev);
}
/* We shouldn't be here */
@@ -227,7 +217,6 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
static void tmc_disable_etf_sink(struct coresight_device *csdev)
{
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -237,10 +226,11 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev)
return;
}
- val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
+ if (drvdata->mode != CS_MODE_DISABLED) {
tmc_etb_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -260,7 +250,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
}
tmc_etf_enable_hw(drvdata);
- local_set(&drvdata->mode, CS_MODE_SYSFS);
+ drvdata->mode = CS_MODE_SYSFS;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_info(drvdata->dev, "TMC-ETF enabled\n");
@@ -280,7 +270,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
}
tmc_etf_disable_hw(drvdata);
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_info(drvdata->dev, "TMC disabled\n");
@@ -383,7 +373,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
return;
/* This shouldn't happen */
- if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
+ if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
return;
CS_UNLOCK(drvdata->base);
@@ -504,7 +494,6 @@ const struct coresight_ops tmc_etf_cs_ops = {
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
{
- long val;
enum tmc_mode mode;
int ret = 0;
unsigned long flags;
@@ -528,9 +517,8 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
goto out;
}
- val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
- if (val == CS_MODE_PERF) {
+ if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -542,7 +530,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etb_disable_hw(drvdata);
drvdata->reading = true;
@@ -573,7 +561,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
}
/* Re-enable the TMC if need be */
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 886ea83c68e0..5d312699b3b9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -86,26 +86,22 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etr_dump_hw(drvdata);
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
}
-static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
bool used = false;
- long val;
unsigned long flags;
void __iomem *vaddr = NULL;
dma_addr_t paddr;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_SYSFS))
- return -EINVAL;
/*
* If we don't have a buffer release the lock and allocate memory.
@@ -134,13 +130,12 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
@@ -155,8 +150,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
drvdata->buf = drvdata->vaddr;
}
- memset(drvdata->vaddr, 0, drvdata->size);
-
+ drvdata->mode = CS_MODE_SYSFS;
tmc_etr_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -171,34 +165,29 @@ out:
return ret;
}
-static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
{
int ret = 0;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_PERF))
- return -EINVAL;
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EINVAL;
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In Perf mode there can be only one writer per sink. There
* is also no need to continue if the ETR is already operated
* from sysFS.
*/
- if (val != CS_MODE_DISABLED) {
+ if (drvdata->mode != CS_MODE_DISABLED) {
ret = -EINVAL;
goto out;
}
+ drvdata->mode = CS_MODE_PERF;
tmc_etr_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -210,9 +199,9 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
{
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etr_sink_sysfs(csdev, mode);
+ return tmc_enable_etr_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etr_sink_perf(csdev, mode);
+ return tmc_enable_etr_sink_perf(csdev);
}
/* We shouldn't be here */
@@ -221,7 +210,6 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
static void tmc_disable_etr_sink(struct coresight_device *csdev)
{
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -231,10 +219,11 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
return;
}
- val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
+ if (drvdata->mode != CS_MODE_DISABLED) {
tmc_etr_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -253,7 +242,6 @@ const struct coresight_ops tmc_etr_cs_ops = {
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
{
int ret = 0;
- long val;
unsigned long flags;
/* config types are set a boot time and never change */
@@ -266,9 +254,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
- if (val == CS_MODE_PERF) {
+ if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -280,7 +267,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etr_disable_hw(drvdata);
drvdata->reading = true;
@@ -303,7 +290,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 44b3ae346118..51c01851533e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -117,7 +117,7 @@ struct tmc_drvdata {
void __iomem *vaddr;
u32 size;
u32 len;
- local_t mode;
+ u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 7bf00a0beb6f..0c37356e417c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -368,6 +368,52 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
+static int coresight_enabled_sink(struct device *dev, void *data)
+{
+ bool *reset = data;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ csdev->activated) {
+ /*
+ * Now that we have a handle on the sink for this session,
+ * disable the sysFS "enable_sink" flag so that possible
+ * concurrent perf session that wish to use another sink don't
+ * trip on it. Doing so has no ramification for the current
+ * session.
+ */
+ if (*reset)
+ csdev->activated = false;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * coresight_get_enabled_sink - returns the first enabled sink found on the bus
+ * @deactivate: Whether the 'enable_sink' flag should be reset
+ *
+ * When operated from perf the deactivate parameter should be set to 'true'.
+ * That way the "enabled_sink" flag of the sink that was selected can be reset,
+ * allowing for other concurrent perf sessions to choose a different sink.
+ *
+ * When operated from sysFS users have full control and as such the deactivate
+ * parameter should be set to 'false', hence mandating users to explicitly
+ * clear the flag.
+ */
+struct coresight_device *coresight_get_enabled_sink(bool deactivate)
+{
+ struct device *dev = NULL;
+
+ dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
+ coresight_enabled_sink);
+
+ return dev ? to_coresight_device(dev) : NULL;
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -380,6 +426,7 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
* last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *sink,
struct list_head *path)
{
int i;
@@ -387,15 +434,15 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_node *node;
/* An activated sink has been found. Enqueue the element */
- if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+ if (csdev == sink)
goto out;
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->nr_outport; i++) {
struct coresight_device *child_dev = csdev->conns[i].child_dev;
- if (child_dev && _coresight_build_path(child_dev, path) == 0) {
+ if (child_dev &&
+ _coresight_build_path(child_dev, sink, path) == 0) {
found = true;
break;
}
@@ -422,18 +469,22 @@ out:
return 0;
}
-struct list_head *coresight_build_path(struct coresight_device *csdev)
+struct list_head *coresight_build_path(struct coresight_device *source,
+ struct coresight_device *sink)
{
struct list_head *path;
int rc;
+ if (!sink)
+ return ERR_PTR(-EINVAL);
+
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(path);
- rc = _coresight_build_path(csdev, path);
+ rc = _coresight_build_path(source, sink, path);
if (rc) {
kfree(path);
return ERR_PTR(rc);
@@ -497,6 +548,7 @@ static int coresight_validate_source(struct coresight_device *csdev,
int coresight_enable(struct coresight_device *csdev)
{
int cpu, ret = 0;
+ struct coresight_device *sink;
struct list_head *path;
mutex_lock(&coresight_mutex);
@@ -508,7 +560,17 @@ int coresight_enable(struct coresight_device *csdev)
if (csdev->enable)
goto out;
- path = coresight_build_path(csdev);
+ /*
+ * Search for a valid sink for this session but don't reset the
+ * "enable_sink" flag in sysFS. Users get to do that explicitly.
+ */
+ sink = coresight_get_enabled_sink(false);
+ if (!sink) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ path = coresight_build_path(csdev, sink);
if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
ret = PTR_ERR(path);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 6f0a51a2c6ec..cdd9b3b26195 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -29,6 +29,9 @@
#include "intel_th.h"
#include "debug.h"
+static bool host_mode __read_mostly;
+module_param(host_mode, bool, 0444);
+
static DEFINE_IDA(intel_th_ida);
static int intel_th_match(struct device *dev, struct device_driver *driver)
@@ -380,7 +383,7 @@ static void intel_th_device_free(struct intel_th_device *thdev)
/*
* Intel(R) Trace Hub subdevices
*/
-static struct intel_th_subdevice {
+static const struct intel_th_subdevice {
const char *name;
struct resource res[3];
unsigned nres;
@@ -527,14 +530,19 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
{
struct resource res[3];
unsigned int req = 0;
- int i, err;
+ int src, dst, err;
/* create devices for each intel_th_subdevice */
- for (i = 0; i < ARRAY_SIZE(intel_th_subdevices); i++) {
- struct intel_th_subdevice *subdev = &intel_th_subdevices[i];
+ for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+ const struct intel_th_subdevice *subdev =
+ &intel_th_subdevices[src];
struct intel_th_device *thdev;
int r;
+ /* only allow SOURCE and SWITCH devices in host mode */
+ if (host_mode && subdev->type == INTEL_TH_OUTPUT)
+ continue;
+
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
subdev->id);
if (!thdev) {
@@ -577,10 +585,12 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
}
if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, i);
+ thdev->dev.devt = MKDEV(th->major, dst);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
thdev->output.scratchpad = subdev->scrpd;
+ } else if (subdev->type == INTEL_TH_SWITCH) {
+ thdev->host_mode = host_mode;
}
err = device_add(&thdev->dev);
@@ -597,14 +607,14 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
req++;
}
- th->thdev[i] = thdev;
+ th->thdev[dst++] = thdev;
}
return 0;
kill_subdevs:
- for (i-- ; i >= 0; i--)
- intel_th_device_remove(th->thdev[i]);
+ for (; dst >= 0; dst--)
+ intel_th_device_remove(th->thdev[dst]);
return err;
}
@@ -717,7 +727,7 @@ void intel_th_free(struct intel_th *th)
intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
- if (th->thdev[i] != th->hub)
+ if (th->thdev[i] && th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 33e09369a491..dd32d0bad687 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -564,6 +564,9 @@ static int intel_th_gth_assign(struct intel_th_device *thdev,
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int i, id;
+ if (thdev->host_mode)
+ return -EBUSY;
+
if (othdev->type != INTEL_TH_OUTPUT)
return -EINVAL;
@@ -600,6 +603,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
+ if (thdev->host_mode)
+ return;
+
spin_lock(&gth->gth_lock);
othdev->output.port = -1;
othdev->output.active = false;
@@ -654,9 +660,24 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
gth->base = base;
spin_lock_init(&gth->gth_lock);
+ /*
+ * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
+ * bit. Either way, don't reset HW in this case, and don't export any
+ * capture configuration attributes. Also, refuse to assign output
+ * drivers to ports, see intel_th_gth_assign().
+ */
+ if (thdev->host_mode)
+ goto done;
+
ret = intel_th_gth_reset(gth);
- if (ret)
- return ret;
+ if (ret) {
+ if (ret != -EBUSY)
+ return ret;
+
+ thdev->host_mode = true;
+
+ goto done;
+ }
for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
gth->master[i] = -1;
@@ -677,6 +698,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
return -ENOMEM;
}
+done:
dev_set_drvdata(dev, gth);
return 0;
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 4c195786bf1f..3096e7054f6d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -54,6 +54,7 @@ struct intel_th_output {
* @num_resources: number of resources in @resource array
* @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH}
* @id: device instance or -1
+ * @host_mode: Intel TH is controlled by an external debug host
* @output: output descriptor for INTEL_TH_OUTPUT devices
* @name: device name to match the driver
*/
@@ -64,6 +65,9 @@ struct intel_th_device {
unsigned int type;
int id;
+ /* INTEL_TH_SWITCH specific */
+ bool host_mode;
+
/* INTEL_TH_OUTPUT specific */
struct intel_th_output output;
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index e1aee61dd7b3..b03444624648 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -67,10 +67,13 @@ static void sth_iowrite(void __iomem *dest, const unsigned char *payload,
}
}
-static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
- unsigned int channel, unsigned int packet,
- unsigned int flags, unsigned int size,
- const unsigned char *payload)
+static ssize_t notrace sth_stm_packet(struct stm_data *stm_data,
+ unsigned int master,
+ unsigned int channel,
+ unsigned int packet,
+ unsigned int flags,
+ unsigned int size,
+ const unsigned char *payload)
{
struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
struct intel_th_channel __iomem *out =
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 847a39b35307..723e2d90083d 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -39,4 +39,15 @@ config STM_SOURCE_HEARTBEAT
If you want to send heartbeat messages over STM devices,
say Y.
+config STM_SOURCE_FTRACE
+ tristate "Copy the output from kernel Ftrace to STM engine"
+ depends on FUNCTION_TRACER
+ help
+ This option can be used to copy the output from kernel Ftrace
+ to STM engine. Enabling this option will introduce a slight
+ timing effect.
+
+ If you want to send kernel Ftrace messages over STM devices,
+ say Y.
+
endif
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index a9ce3d487e57..3abd84ce13d4 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -6,6 +6,8 @@ obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o
+obj-$(CONFIG_STM_SOURCE_FTRACE) += stm_ftrace.o
stm_console-y := console.o
stm_heartbeat-y := heartbeat.o
+stm_ftrace-y := ftrace.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 51f81d64ca37..0e731143f6a4 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -361,7 +361,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
struct stm_file *stmf;
struct device *dev;
unsigned int major = imajor(inode);
- int err = -ENODEV;
+ int err = -ENOMEM;
dev = class_find_device(&stm_class, NULL, &major, major_match);
if (!dev)
@@ -369,8 +369,9 @@ static int stm_char_open(struct inode *inode, struct file *file)
stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
if (!stmf)
- return -ENOMEM;
+ goto err_put_device;
+ err = -ENODEV;
stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
@@ -382,9 +383,10 @@ static int stm_char_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
err_free:
+ kfree(stmf);
+err_put_device:
/* matches class_find_device() above */
put_device(dev);
- kfree(stmf);
return err;
}
@@ -425,7 +427,7 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
return ret;
}
-static ssize_t stm_write(struct stm_data *data, unsigned int master,
+static ssize_t notrace stm_write(struct stm_data *data, unsigned int master,
unsigned int channel, const char *buf, size_t count)
{
unsigned int flags = STP_PACKET_TIMESTAMPED;
@@ -1121,8 +1123,9 @@ void stm_source_unregister_device(struct stm_source_data *data)
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
-int stm_source_write(struct stm_source_data *data, unsigned int chan,
- const char *buf, size_t count)
+int notrace stm_source_write(struct stm_source_data *data,
+ unsigned int chan,
+ const char *buf, size_t count)
{
struct stm_source_device *src = data->src;
struct stm_device *stm;
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index a86612d989f9..c5f94ca31c4d 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/stm.h>
-static ssize_t
+static ssize_t notrace
dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
unsigned int channel, unsigned int packet, unsigned int flags,
unsigned int size, const unsigned char *payload)
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
new file mode 100644
index 000000000000..bd126a7c6da2
--- /dev/null
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -0,0 +1,87 @@
+/*
+ * Simple kernel driver to link kernel Ftrace and an STM device
+ * Copyright (c) 2016, Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * STM Ftrace will be registered as a trace_export.
+ */
+
+#include <linux/module.h>
+#include <linux/stm.h>
+#include <linux/trace.h>
+
+#define STM_FTRACE_NR_CHANNELS 1
+#define STM_FTRACE_CHAN 0
+
+static int stm_ftrace_link(struct stm_source_data *data);
+static void stm_ftrace_unlink(struct stm_source_data *data);
+
+static struct stm_ftrace {
+ struct stm_source_data data;
+ struct trace_export ftrace;
+} stm_ftrace = {
+ .data = {
+ .name = "ftrace",
+ .nr_chans = STM_FTRACE_NR_CHANNELS,
+ .link = stm_ftrace_link,
+ .unlink = stm_ftrace_unlink,
+ },
+};
+
+/**
+ * stm_ftrace_write() - write data to STM via 'stm_ftrace' source
+ * @buf: buffer containing the data packet
+ * @len: length of the data packet
+ */
+static void notrace
+stm_ftrace_write(const void *buf, unsigned int len)
+{
+ stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+}
+
+static int stm_ftrace_link(struct stm_source_data *data)
+{
+ struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
+
+ sf->ftrace.write = stm_ftrace_write;
+
+ return register_ftrace_export(&sf->ftrace);
+}
+
+static void stm_ftrace_unlink(struct stm_source_data *data)
+{
+ struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
+
+ unregister_ftrace_export(&sf->ftrace);
+}
+
+static int __init stm_ftrace_init(void)
+{
+ int ret;
+
+ ret = stm_source_register_device(NULL, &stm_ftrace.data);
+ if (ret)
+ pr_err("Failed to register stm_source - ftrace.\n");
+
+ return ret;
+}
+
+static void __exit stm_ftrace_exit(void)
+{
+ stm_source_unregister_device(&stm_ftrace.data);
+}
+
+module_init(stm_ftrace_init);
+module_exit(stm_ftrace_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_ftrace driver");
+MODULE_AUTHOR("Chunyan Zhang <zhang.chunyan@linaro.org>");
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 11edabf425ae..efc3354d60ae 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -7,6 +7,7 @@ menu "I2C support"
config I2C
tristate "I2C support"
select RT_MUTEXES
+ select IRQ_DOMAIN
---help---
I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
many micro controller applications and developed by Philips. SMBus,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d252276feadf..0cdc8443deab 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -426,7 +426,7 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
config I2C_CADENCE
tristate "Cadence I2C Controller"
- depends on ARCH_ZYNQ || ARM64
+ depends on ARCH_ZYNQ || ARM64 || XTENSA
help
Say yes here to select Cadence I2C Host Controller. This controller is
e.g. used by Xilinx Zynq.
@@ -597,6 +597,16 @@ config I2C_IMX
This driver can also be built as a module. If so, the module
will be called i2c-imx.
+config I2C_IMX_LPI2C
+ tristate "IMX Low Power I2C interface"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Say Y here if you want to use the Low Power IIC bus controller
+ on the Freescale i.MX processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-imx-lpi2c.
+
config I2C_IOP3XX
tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IXP4XX || ARCH_IOP13XX
@@ -763,7 +773,7 @@ config I2C_PUV3
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
- depends on ARCH_PXA || ARCH_MMP || (X86_32 && PCI && OF)
+ depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF)
help
If you have devices in the PXA I2C bus, say yes to this option.
This driver can also be built as a module. If so, the module
@@ -1150,6 +1160,17 @@ config I2C_ELEKTOR
This support is also available as a module. If so, the module
will be called i2c-elektor.
+config I2C_MLXCPLD
+ tristate "Mellanox I2C driver"
+ depends on X86_64
+ help
+ This exposes the Mellanox platform I2C busses to the linux I2C layer
+ for X86 based systems.
+ Controller is implemented as CPLD logic.
+
+ This driver can also be built as a module. If so, the module will be
+ called as i2c-mlxcpld.
+
config I2C_PCA_ISA
tristate "PCA9564/PCA9665 on an ISA bus"
depends on ISA
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 29764cc20a44..1c1bac87a9db 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_I2C_HIX5HD2) += i2c-hix5hd2.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMG) += i2c-img-scb.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
+obj-$(CONFIG_I2C_IMX_LPI2C) += i2c-imx-lpi2c.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o
obj-$(CONFIG_I2C_BRCMSTB) += i2c-brcmstb.o
obj-$(CONFIG_I2C_CROS_EC_TUNNEL) += i2c-cros-ec-tunnel.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
+obj-$(CONFIG_I2C_MLXCPLD) += i2c-mlxcpld.o
obj-$(CONFIG_I2C_OPAL) += i2c-opal.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 4351a9343058..13f07482ec68 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -489,7 +489,7 @@ static const struct i2c_algorithm axxia_i2c_algo = {
.functionality = axxia_i2c_func,
};
-static struct i2c_adapter_quirks axxia_i2c_quirks = {
+static const struct i2c_adapter_quirks axxia_i2c_quirks = {
.max_read_len = 255,
.max_write_len = 255,
};
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 326b3db02c48..318df559adc5 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -395,7 +395,7 @@ static const struct i2c_algorithm bcm_iproc_algo = {
.functionality = bcm_iproc_i2c_functionality,
};
-static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
/* need to reserve one byte in the FIFO for the slave address */
.max_read_len = M_TX_RX_FIFO_SIZE - 1,
};
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d4f3239b5686..c3436f627028 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -50,20 +50,19 @@
#define BCM2835_I2C_S_CLKT BIT(9)
#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
-#define BCM2835_I2C_BITMSK_S 0x03FF
-
#define BCM2835_I2C_CDIV_MIN 0x0002
#define BCM2835_I2C_CDIV_MAX 0xFFFE
-#define BCM2835_I2C_TIMEOUT (msecs_to_jiffies(1000))
-
struct bcm2835_i2c_dev {
struct device *dev;
void __iomem *regs;
struct clk *clk;
int irq;
+ u32 bus_clk_rate;
struct i2c_adapter adapter;
struct completion completion;
+ struct i2c_msg *curr_msg;
+ int num_msgs;
u32 msg_err;
u8 *msg_buf;
size_t msg_buf_remaining;
@@ -80,6 +79,30 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
return readl(i2c_dev->regs + reg);
}
+static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 divider;
+
+ divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
+ i2c_dev->bus_clk_rate);
+ /*
+ * Per the datasheet, the register is always interpreted as an even
+ * number, by rounding down. In other words, the LSB is ignored. So,
+ * if the LSB is set, increment the divider to avoid any issue.
+ */
+ if (divider & 1)
+ divider++;
+ if ((divider < BCM2835_I2C_CDIV_MIN) ||
+ (divider > BCM2835_I2C_CDIV_MAX)) {
+ dev_err_ratelimited(i2c_dev->dev, "Invalid clock-frequency\n");
+ return -EINVAL;
+ }
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
+
+ return 0;
+}
+
static void bcm2835_fill_txfifo(struct bcm2835_i2c_dev *i2c_dev)
{
u32 val;
@@ -110,106 +133,159 @@ static void bcm2835_drain_rxfifo(struct bcm2835_i2c_dev *i2c_dev)
}
}
+/*
+ * Repeated Start Condition (Sr)
+ * The BCM2835 ARM Peripherals datasheet mentions a way to trigger a Sr when it
+ * talks about reading from a slave with 10 bit address. This is achieved by
+ * issuing a write, poll the I2CS.TA flag and wait for it to be set, and then
+ * issue a read.
+ * A comment in https://github.com/raspberrypi/linux/issues/254 shows how the
+ * firmware actually does it using polling and says that it's a workaround for
+ * a problem in the state machine.
+ * It turns out that it is possible to use the TXW interrupt to know when the
+ * transfer is active, provided the FIFO has not been prefilled.
+ */
+
+static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 c = BCM2835_I2C_C_ST | BCM2835_I2C_C_I2CEN;
+ struct i2c_msg *msg = i2c_dev->curr_msg;
+ bool last_msg = (i2c_dev->num_msgs == 1);
+
+ if (!i2c_dev->num_msgs)
+ return;
+
+ i2c_dev->num_msgs--;
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+
+ if (msg->flags & I2C_M_RD)
+ c |= BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
+ else
+ c |= BCM2835_I2C_C_INTT;
+
+ if (last_msg)
+ c |= BCM2835_I2C_C_INTD;
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
+}
+
+/*
+ * Note about I2C_C_CLEAR on error:
+ * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
+ * non-idle state and I2C_C_READ, it sets an abort_rx flag and runs through
+ * the state machine to send a NACK and a STOP. Since we're setting CLEAR
+ * without I2CEN, that NACK will be hanging around queued up for next time
+ * we start the engine.
+ */
+
static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
{
struct bcm2835_i2c_dev *i2c_dev = data;
u32 val, err;
val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
- val &= BCM2835_I2C_BITMSK_S;
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, val);
err = val & (BCM2835_I2C_S_CLKT | BCM2835_I2C_S_ERR);
if (err) {
i2c_dev->msg_err = err;
- complete(&i2c_dev->completion);
- return IRQ_HANDLED;
- }
-
- if (val & BCM2835_I2C_S_RXD) {
- bcm2835_drain_rxfifo(i2c_dev);
- if (!(val & BCM2835_I2C_S_DONE))
- return IRQ_HANDLED;
+ goto complete;
}
if (val & BCM2835_I2C_S_DONE) {
- if (i2c_dev->msg_buf_remaining)
+ if (i2c_dev->curr_msg->flags & I2C_M_RD) {
+ bcm2835_drain_rxfifo(i2c_dev);
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ }
+
+ if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining)
i2c_dev->msg_err = BCM2835_I2C_S_LEN;
else
i2c_dev->msg_err = 0;
- complete(&i2c_dev->completion);
- return IRQ_HANDLED;
+ goto complete;
}
- if (val & BCM2835_I2C_S_TXD) {
+ if (val & BCM2835_I2C_S_TXW) {
+ if (!i2c_dev->msg_buf_remaining) {
+ i2c_dev->msg_err = val | BCM2835_I2C_S_LEN;
+ goto complete;
+ }
+
bcm2835_fill_txfifo(i2c_dev);
+
+ if (i2c_dev->num_msgs && !i2c_dev->msg_buf_remaining) {
+ i2c_dev->curr_msg++;
+ bcm2835_i2c_start_transfer(i2c_dev);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_RXR) {
+ if (!i2c_dev->msg_buf_remaining) {
+ i2c_dev->msg_err = val | BCM2835_I2C_S_LEN;
+ goto complete;
+ }
+
+ bcm2835_drain_rxfifo(i2c_dev);
return IRQ_HANDLED;
}
return IRQ_NONE;
+
+complete:
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, BCM2835_I2C_S_CLKT |
+ BCM2835_I2C_S_ERR | BCM2835_I2C_S_DONE);
+ complete(&i2c_dev->completion);
+
+ return IRQ_HANDLED;
}
-static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
- struct i2c_msg *msg)
+static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
{
- u32 c;
+ struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
unsigned long time_left;
+ int i, ret;
- i2c_dev->msg_buf = msg->buf;
- i2c_dev->msg_buf_remaining = msg->len;
- reinit_completion(&i2c_dev->completion);
+ for (i = 0; i < (num - 1); i++)
+ if (msgs[i].flags & I2C_M_RD) {
+ dev_warn_once(i2c_dev->dev,
+ "only one read message supported, has to be last\n");
+ return -EOPNOTSUPP;
+ }
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ ret = bcm2835_i2c_set_divider(i2c_dev);
+ if (ret)
+ return ret;
- if (msg->flags & I2C_M_RD) {
- c = BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
- } else {
- c = BCM2835_I2C_C_INTT;
- bcm2835_fill_txfifo(i2c_dev);
- }
- c |= BCM2835_I2C_C_ST | BCM2835_I2C_C_INTD | BCM2835_I2C_C_I2CEN;
+ i2c_dev->curr_msg = msgs;
+ i2c_dev->num_msgs = num;
+ reinit_completion(&i2c_dev->completion);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
+ bcm2835_i2c_start_transfer(i2c_dev);
time_left = wait_for_completion_timeout(&i2c_dev->completion,
- BCM2835_I2C_TIMEOUT);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ adap->timeout);
if (!time_left) {
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
+ BCM2835_I2C_C_CLEAR);
dev_err(i2c_dev->dev, "i2c transfer timed out\n");
return -ETIMEDOUT;
}
- if (likely(!i2c_dev->msg_err))
- return 0;
+ if (!i2c_dev->msg_err)
+ return num;
- if ((i2c_dev->msg_err & BCM2835_I2C_S_ERR) &&
- (msg->flags & I2C_M_IGNORE_NAK))
- return 0;
-
- dev_err(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
+ dev_dbg(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
if (i2c_dev->msg_err & BCM2835_I2C_S_ERR)
return -EREMOTEIO;
- else
- return -EIO;
-}
-
-static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
- int num)
-{
- struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
- int i;
- int ret = 0;
-
- for (i = 0; i < num; i++) {
- ret = bcm2835_i2c_xfer_msg(i2c_dev, &msgs[i]);
- if (ret)
- break;
- }
- return ret ?: i;
+ return -EIO;
}
static u32 bcm2835_i2c_func(struct i2c_adapter *adap)
@@ -235,7 +311,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev;
struct resource *mem, *irq;
- u32 bus_clk_rate, divider;
int ret;
struct i2c_adapter *adap;
@@ -259,27 +334,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
}
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &bus_clk_rate);
+ &i2c_dev->bus_clk_rate);
if (ret < 0) {
dev_warn(&pdev->dev,
"Could not read clock-frequency property\n");
- bus_clk_rate = 100000;
- }
-
- divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), bus_clk_rate);
- /*
- * Per the datasheet, the register is always interpreted as an even
- * number, by rounding down. In other words, the LSB is ignored. So,
- * if the LSB is set, increment the divider to avoid any issue.
- */
- if (divider & 1)
- divider++;
- if ((divider < BCM2835_I2C_CDIV_MIN) ||
- (divider > BCM2835_I2C_CDIV_MAX)) {
- dev_err(&pdev->dev, "Invalid clock-frequency\n");
- return -ENODEV;
+ i2c_dev->bus_clk_rate = 100000;
}
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index b403fa5ecf49..6d81c56184d3 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -536,6 +536,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
intr_mask = DW_IC_INTR_DEFAULT_MASK;
for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+ u32 flags = msgs[dev->msg_write_idx].flags;
+
/*
* if target address has changed, we need to
* reprogram the target address in the i2c
@@ -581,8 +583,15 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
* detected from the registers so we set it always
* when writing/reading the last byte.
*/
+
+ /*
+ * i2c-core.c always sets the buffer length of
+ * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
+ * be adjusted when receiving the first byte.
+ * Thus we can't stop the transaction here.
+ */
if (dev->msg_write_idx == dev->msgs_num - 1 &&
- buf_len == 1)
+ buf_len == 1 && !(flags & I2C_M_RECV_LEN))
cmd |= BIT(9);
if (need_restart) {
@@ -607,7 +616,12 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
dev->tx_buf = buf;
dev->tx_buf_len = buf_len;
- if (buf_len > 0) {
+ /*
+ * Because we don't know the buffer length in the
+ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+ * the transaction here.
+ */
+ if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
/* more bytes to be written */
dev->status |= STATUS_WRITE_IN_PROGRESS;
break;
@@ -628,6 +642,24 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
dw_writel(dev, intr_mask, DW_IC_INTR_MASK);
}
+static u8
+i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+{
+ struct i2c_msg *msgs = dev->msgs;
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ /*
+ * Adjust the buffer length and mask the flag
+ * after receiving the first byte.
+ */
+ len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
+ dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
+ msgs[dev->msg_read_idx].len = len;
+ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+
+ return len;
+}
+
static void
i2c_dw_read(struct dw_i2c_dev *dev)
{
@@ -652,7 +684,15 @@ i2c_dw_read(struct dw_i2c_dev *dev)
rx_valid = dw_readl(dev, DW_IC_RXFLR);
for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
- *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ *buf = dw_readl(dev, DW_IC_DATA_CMD);
+ /* Ensure length byte is a valid value */
+ if (flags & I2C_M_RECV_LEN &&
+ *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) {
+ len = i2c_dw_recv_len(dev, *buf);
+ }
+ buf++;
dev->rx_outstanding--;
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 0d44d2ae7d4c..26250b425e2f 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -22,6 +22,14 @@
*
*/
+#include <linux/i2c.h>
+
+#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
+ I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_BLOCK_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 96f8230cd2d3..d6423cfac588 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -71,12 +71,6 @@ struct dw_pci_controller {
DW_IC_CON_SLAVE_DISABLE | \
DW_IC_CON_RESTART_EN)
-#define DW_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
- I2C_FUNC_SMBUS_BYTE | \
- I2C_FUNC_SMBUS_BYTE_DATA | \
- I2C_FUNC_SMBUS_WORD_DATA | \
- I2C_FUNC_SMBUS_I2C_BLOCK)
-
/* Merrifield HCNT/LCNT/SDA hold time */
static struct dw_scl_sda_cfg mrfld_config = {
.ss_hcnt = 0x2f8,
@@ -147,6 +141,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
.tx_fifo_depth = 32,
.rx_fifo_depth = 32,
+ .functionality = I2C_FUNC_10BIT_ADDR,
.clk_khz = 25000,
.setup = mfld_setup,
},
@@ -155,6 +150,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
.tx_fifo_depth = 64,
.rx_fifo_depth = 64,
+ .functionality = I2C_FUNC_10BIT_ADDR,
.scl_sda_cfg = &mrfld_config,
.setup = mrfld_setup,
},
@@ -249,7 +245,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
}
dev->functionality = controller->functionality |
- DW_DEFAULT_FUNCTIONALITY;
+ DW_IC_DEFAULT_FUNCTIONALITY;
dev->master_cfg = controller->bus_cfg;
if (controller->scl_sda_cfg) {
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0b42a12171f3..6ce431323125 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -150,6 +150,29 @@ static int i2c_dw_plat_prepare_clk(struct dw_i2c_dev *i_dev, bool prepare)
return 0;
}
+static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
+{
+ u32 param, tx_fifo_depth, rx_fifo_depth;
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec.
+ */
+ param = i2c_dw_read_comp_param(dev);
+ tx_fifo_depth = ((param >> 16) & 0xff) + 1;
+ rx_fifo_depth = ((param >> 8) & 0xff) + 1;
+ if (!dev->tx_fifo_depth) {
+ dev->tx_fifo_depth = tx_fifo_depth;
+ dev->rx_fifo_depth = rx_fifo_depth;
+ dev->adapter.nr = id;
+ } else if (tx_fifo_depth >= 2) {
+ dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
+ tx_fifo_depth);
+ dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
+ rx_fifo_depth);
+ }
+}
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -176,9 +199,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->irq = irq;
platform_set_drvdata(pdev, dev);
- /* fast mode by default because of legacy reasons */
- dev->clk_freq = 400000;
-
if (pdata) {
dev->clk_freq = pdata->i2c_scl_freq;
} else {
@@ -193,8 +213,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
- if (acpi_speed)
- dev->clk_freq = acpi_speed;
+ /*
+ * Find bus speed from the "clock-frequency" device property, ACPI
+ * or by using fast mode if neither is set.
+ */
+ if (acpi_speed && dev->clk_freq)
+ dev->clk_freq = min(dev->clk_freq, acpi_speed);
+ else if (acpi_speed || dev->clk_freq)
+ dev->clk_freq = max(dev->clk_freq, acpi_speed);
+ else
+ dev->clk_freq = 400000;
if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_configure(pdev);
@@ -214,13 +242,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (r)
return r;
- dev->functionality =
- I2C_FUNC_I2C |
- I2C_FUNC_10BIT_ADDR |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
+ dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
DW_IC_CON_RESTART_EN;
@@ -246,13 +268,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
1000000);
}
- if (!dev->tx_fifo_depth) {
- u32 param1 = i2c_dw_read_comp_param(dev);
-
- dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
- dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
- dev->adapter.nr = pdev->id;
- }
+ dw_i2c_set_fifo_size(dev, pdev->id);
adap = &dev->adapter;
adap->owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 8acda2aa1558..69075a32073e 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -182,7 +182,7 @@ static const struct i2c_algorithm dln2_i2c_usb_algorithm = {
.functionality = dln2_i2c_func,
};
-static struct i2c_adapter_quirks dln2_i2c_quirks = {
+static const struct i2c_adapter_quirks dln2_i2c_quirks = {
.max_read_len = DLN2_I2C_MAX_XFER_SIZE,
.max_write_len = DLN2_I2C_MAX_XFER_SIZE,
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index eb3627f35d12..e242db43774b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -118,7 +118,6 @@
#define SMBSLVSTS(p) (16 + (p)->smba) /* ICH3 and later */
#define SMBSLVCMD(p) (17 + (p)->smba) /* ICH3 and later */
#define SMBNTFDADD(p) (20 + (p)->smba) /* ICH3 and later */
-#define SMBNTFDDAT(p) (22 + (p)->smba) /* ICH3 and later */
/* PCI Address Constants */
#define SMBBAR 4
@@ -137,27 +136,27 @@
#define SBREG_SMBCTRL 0xc6000c
/* Host status bits for SMBPCISTS */
-#define SMBPCISTS_INTS 0x08
+#define SMBPCISTS_INTS BIT(3)
/* Control bits for SMBPCICTL */
-#define SMBPCICTL_INTDIS 0x0400
+#define SMBPCICTL_INTDIS BIT(10)
/* Host configuration bits for SMBHSTCFG */
-#define SMBHSTCFG_HST_EN 1
-#define SMBHSTCFG_SMB_SMI_EN 2
-#define SMBHSTCFG_I2C_EN 4
-#define SMBHSTCFG_SPD_WD 0x10
+#define SMBHSTCFG_HST_EN BIT(0)
+#define SMBHSTCFG_SMB_SMI_EN BIT(1)
+#define SMBHSTCFG_I2C_EN BIT(2)
+#define SMBHSTCFG_SPD_WD BIT(4)
/* TCO configuration bits for TCOCTL */
-#define TCOCTL_EN 0x0100
+#define TCOCTL_EN BIT(8)
/* Auxiliary status register bits, ICH4+ only */
-#define SMBAUXSTS_CRCE 1
-#define SMBAUXSTS_STCO 2
+#define SMBAUXSTS_CRCE BIT(0)
+#define SMBAUXSTS_STCO BIT(1)
/* Auxiliary control register bits, ICH4+ only */
-#define SMBAUXCTL_CRC 1
-#define SMBAUXCTL_E32B 2
+#define SMBAUXCTL_CRC BIT(0)
+#define SMBAUXCTL_E32B BIT(1)
/* Other settings */
#define MAX_RETRIES 400
@@ -172,27 +171,27 @@
#define I801_I2C_BLOCK_DATA 0x18 /* ICH5 and later */
/* I801 Host Control register bits */
-#define SMBHSTCNT_INTREN 0x01
-#define SMBHSTCNT_KILL 0x02
-#define SMBHSTCNT_LAST_BYTE 0x20
-#define SMBHSTCNT_START 0x40
-#define SMBHSTCNT_PEC_EN 0x80 /* ICH3 and later */
+#define SMBHSTCNT_INTREN BIT(0)
+#define SMBHSTCNT_KILL BIT(1)
+#define SMBHSTCNT_LAST_BYTE BIT(5)
+#define SMBHSTCNT_START BIT(6)
+#define SMBHSTCNT_PEC_EN BIT(7) /* ICH3 and later */
/* I801 Hosts Status register bits */
-#define SMBHSTSTS_BYTE_DONE 0x80
-#define SMBHSTSTS_INUSE_STS 0x40
-#define SMBHSTSTS_SMBALERT_STS 0x20
-#define SMBHSTSTS_FAILED 0x10
-#define SMBHSTSTS_BUS_ERR 0x08
-#define SMBHSTSTS_DEV_ERR 0x04
-#define SMBHSTSTS_INTR 0x02
-#define SMBHSTSTS_HOST_BUSY 0x01
+#define SMBHSTSTS_BYTE_DONE BIT(7)
+#define SMBHSTSTS_INUSE_STS BIT(6)
+#define SMBHSTSTS_SMBALERT_STS BIT(5)
+#define SMBHSTSTS_FAILED BIT(4)
+#define SMBHSTSTS_BUS_ERR BIT(3)
+#define SMBHSTSTS_DEV_ERR BIT(2)
+#define SMBHSTSTS_INTR BIT(1)
+#define SMBHSTSTS_HOST_BUSY BIT(0)
-/* Host Notify Status registers bits */
-#define SMBSLVSTS_HST_NTFY_STS 1
+/* Host Notify Status register bits */
+#define SMBSLVSTS_HST_NTFY_STS BIT(0)
-/* Host Notify Command registers bits */
-#define SMBSLVCMD_HST_NTFY_INTREN 0x01
+/* Host Notify Command register bits */
+#define SMBSLVCMD_HST_NTFY_INTREN BIT(0)
#define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
SMBHSTSTS_DEV_ERR)
@@ -243,6 +242,7 @@ struct i801_priv {
struct i2c_adapter adapter;
unsigned long smba;
unsigned char original_hstcfg;
+ unsigned char original_slvcmd;
struct pci_dev *pci_dev;
unsigned int features;
@@ -269,20 +269,17 @@ struct i801_priv {
*/
bool acpi_reserved;
struct mutex acpi_lock;
- struct smbus_host_notify *host_notify;
};
-#define SMBHSTNTFY_SIZE 8
-
-#define FEATURE_SMBUS_PEC (1 << 0)
-#define FEATURE_BLOCK_BUFFER (1 << 1)
-#define FEATURE_BLOCK_PROC (1 << 2)
-#define FEATURE_I2C_BLOCK_READ (1 << 3)
-#define FEATURE_IRQ (1 << 4)
-#define FEATURE_HOST_NOTIFY (1 << 5)
+#define FEATURE_SMBUS_PEC BIT(0)
+#define FEATURE_BLOCK_BUFFER BIT(1)
+#define FEATURE_BLOCK_PROC BIT(2)
+#define FEATURE_I2C_BLOCK_READ BIT(3)
+#define FEATURE_IRQ BIT(4)
+#define FEATURE_HOST_NOTIFY BIT(5)
/* Not really a feature, but it's convenient to handle it as such */
-#define FEATURE_IDF (1 << 15)
-#define FEATURE_TCO (1 << 16)
+#define FEATURE_IDF BIT(15)
+#define FEATURE_TCO BIT(16)
static const char *i801_feature_names[] = {
"SMBus PEC",
@@ -582,12 +579,15 @@ static void i801_isr_byte_done(struct i801_priv *priv)
static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
{
unsigned short addr;
- unsigned int data;
addr = inb_p(SMBNTFDADD(priv)) >> 1;
- data = inw_p(SMBNTFDDAT(priv));
- i2c_handle_smbus_host_notify(priv->host_notify, addr, data);
+ /*
+ * With the tested platforms, reading SMBNTFDDAT (22 + (p)->smba)
+ * always returns 0. Our current implementation doesn't provide
+ * data, so we just ignore it.
+ */
+ i2c_handle_smbus_host_notify(&priv->adapter, addr);
/* clear Host Notify bit and return */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
@@ -950,23 +950,29 @@ static u32 i801_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_HOST_NOTIFY : 0);
}
-static int i801_enable_host_notify(struct i2c_adapter *adapter)
+static void i801_enable_host_notify(struct i2c_adapter *adapter)
{
struct i801_priv *priv = i2c_get_adapdata(adapter);
if (!(priv->features & FEATURE_HOST_NOTIFY))
- return -ENOTSUPP;
+ return;
- if (!priv->host_notify)
- priv->host_notify = i2c_setup_smbus_host_notify(adapter);
- if (!priv->host_notify)
- return -ENOMEM;
+ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
+ if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+ outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+ SMBSLVCMD(priv));
- outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
/* clear Host Notify bit to allow a new notification */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+}
- return 0;
+static void i801_disable_host_notify(struct i801_priv *priv)
+{
+ if (!(priv->features & FEATURE_HOST_NOTIFY))
+ return;
+
+ outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
}
static const struct i2c_algorithm smbus_algorithm = {
@@ -1633,14 +1639,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return err;
}
- /*
- * Enable Host Notify for chips that supports it.
- * It is done after i2c_add_adapter() so that we are sure the work queue
- * is not used if i2c_add_adapter() fails.
- */
- err = i801_enable_host_notify(&priv->adapter);
- if (err && err != -ENOTSUPP)
- dev_warn(&dev->dev, "Unable to enable SMBus Host Notify\n");
+ i801_enable_host_notify(&priv->adapter);
i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
@@ -1663,6 +1662,7 @@ static void i801_remove(struct pci_dev *dev)
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
+ i801_disable_host_notify(priv);
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
i801_acpi_remove(priv);
@@ -1690,11 +1690,8 @@ static int i801_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct i801_priv *priv = pci_get_drvdata(pci_dev);
- int err;
- err = i801_enable_host_notify(&priv->adapter);
- if (err && err != -ENOTSUPP)
- dev_warn(dev, "Unable to enable SMBus Host Notify\n");
+ i801_enable_host_notify(&priv->adapter);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
new file mode 100644
index 000000000000..c62b7cd475f8
--- /dev/null
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -0,0 +1,652 @@
+/*
+ * This is i.MX low power i2c controller driver.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "imx-lpi2c"
+
+#define LPI2C_PARAM 0x04 /* i2c RX/TX FIFO size */
+#define LPI2C_MCR 0x10 /* i2c contrl register */
+#define LPI2C_MSR 0x14 /* i2c status register */
+#define LPI2C_MIER 0x18 /* i2c interrupt enable */
+#define LPI2C_MCFGR0 0x20 /* i2c master configuration */
+#define LPI2C_MCFGR1 0x24 /* i2c master configuration */
+#define LPI2C_MCFGR2 0x28 /* i2c master configuration */
+#define LPI2C_MCFGR3 0x2C /* i2c master configuration */
+#define LPI2C_MCCR0 0x48 /* i2c master clk configuration */
+#define LPI2C_MCCR1 0x50 /* i2c master clk configuration */
+#define LPI2C_MFCR 0x58 /* i2c master FIFO control */
+#define LPI2C_MFSR 0x5C /* i2c master FIFO status */
+#define LPI2C_MTDR 0x60 /* i2c master TX data register */
+#define LPI2C_MRDR 0x70 /* i2c master RX data register */
+
+/* i2c command */
+#define TRAN_DATA 0X00
+#define RECV_DATA 0X01
+#define GEN_STOP 0X02
+#define RECV_DISCARD 0X03
+#define GEN_START 0X04
+#define START_NACK 0X05
+#define START_HIGH 0X06
+#define START_HIGH_NACK 0X07
+
+#define MCR_MEN BIT(0)
+#define MCR_RST BIT(1)
+#define MCR_DOZEN BIT(2)
+#define MCR_DBGEN BIT(3)
+#define MCR_RTF BIT(8)
+#define MCR_RRF BIT(9)
+#define MSR_TDF BIT(0)
+#define MSR_RDF BIT(1)
+#define MSR_SDF BIT(9)
+#define MSR_NDF BIT(10)
+#define MSR_ALF BIT(11)
+#define MSR_MBF BIT(24)
+#define MSR_BBF BIT(25)
+#define MIER_TDIE BIT(0)
+#define MIER_RDIE BIT(1)
+#define MIER_SDIE BIT(9)
+#define MIER_NDIE BIT(10)
+#define MCFGR1_AUTOSTOP BIT(8)
+#define MCFGR1_IGNACK BIT(9)
+#define MRDR_RXEMPTY BIT(14)
+
+#define I2C_CLK_RATIO 2
+#define CHUNK_DATA 256
+
+#define LPI2C_DEFAULT_RATE 100000
+#define STARDARD_MAX_BITRATE 400000
+#define FAST_MAX_BITRATE 1000000
+#define FAST_PLUS_MAX_BITRATE 3400000
+#define HIGHSPEED_MAX_BITRATE 5000000
+
+enum lpi2c_imx_mode {
+ STANDARD, /* 100+Kbps */
+ FAST, /* 400+Kbps */
+ FAST_PLUS, /* 1.0+Mbps */
+ HS, /* 3.4+Mbps */
+ ULTRA_FAST, /* 5.0+Mbps */
+};
+
+enum lpi2c_imx_pincfg {
+ TWO_PIN_OD,
+ TWO_PIN_OO,
+ TWO_PIN_PP,
+ FOUR_PIN_PP,
+};
+
+struct lpi2c_imx_struct {
+ struct i2c_adapter adapter;
+ struct clk *clk;
+ void __iomem *base;
+ __u8 *rx_buf;
+ __u8 *tx_buf;
+ struct completion complete;
+ unsigned int msglen;
+ unsigned int delivered;
+ unsigned int block_data;
+ unsigned int bitrate;
+ unsigned int txfifosize;
+ unsigned int rxfifosize;
+ enum lpi2c_imx_mode mode;
+};
+
+static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
+ unsigned int enable)
+{
+ writel(enable, lpi2c_imx->base + LPI2C_MIER);
+}
+
+static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long orig_jiffies = jiffies;
+ unsigned int temp;
+
+ while (1) {
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+
+ /* check for arbitration lost, clear if set */
+ if (temp & MSR_ALF) {
+ writel(temp, lpi2c_imx->base + LPI2C_MSR);
+ return -EAGAIN;
+ }
+
+ if (temp & (MSR_BBF | MSR_MBF))
+ break;
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
+ return -ETIMEDOUT;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int bitrate = lpi2c_imx->bitrate;
+ enum lpi2c_imx_mode mode;
+
+ if (bitrate < STARDARD_MAX_BITRATE)
+ mode = STANDARD;
+ else if (bitrate < FAST_MAX_BITRATE)
+ mode = FAST;
+ else if (bitrate < FAST_PLUS_MAX_BITRATE)
+ mode = FAST_PLUS;
+ else if (bitrate < HIGHSPEED_MAX_BITRATE)
+ mode = HS;
+ else
+ mode = ULTRA_FAST;
+
+ lpi2c_imx->mode = mode;
+}
+
+static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msgs)
+{
+ unsigned int temp;
+ u8 read;
+
+ temp = readl(lpi2c_imx->base + LPI2C_MCR);
+ temp |= MCR_RRF | MCR_RTF;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+ writel(0x7f00, lpi2c_imx->base + LPI2C_MSR);
+
+ read = msgs->flags & I2C_M_RD;
+ temp = (msgs->addr << 1 | read) | (GEN_START << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+
+ return lpi2c_imx_bus_busy(lpi2c_imx);
+}
+
+static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long orig_jiffies = jiffies;
+ unsigned int temp;
+
+ writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR);
+
+ do {
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ if (temp & MSR_SDF)
+ break;
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
+ break;
+ }
+ schedule();
+
+ } while (1);
+}
+
+/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
+static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u8 prescale, filt, sethold, clkhi, clklo, datavd;
+ unsigned int clk_rate, clk_cycle;
+ enum lpi2c_imx_pincfg pincfg;
+ unsigned int temp;
+
+ lpi2c_imx_set_mode(lpi2c_imx);
+
+ clk_rate = clk_get_rate(lpi2c_imx->clk);
+ if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
+ filt = 0;
+ else
+ filt = 2;
+
+ for (prescale = 0; prescale <= 7; prescale++) {
+ clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
+ - 3 - (filt >> 1);
+ clkhi = (clk_cycle + I2C_CLK_RATIO) / (I2C_CLK_RATIO + 1);
+ clklo = clk_cycle - clkhi;
+ if (clklo < 64)
+ break;
+ }
+
+ if (prescale > 7)
+ return -EINVAL;
+
+ /* set MCFGR1: PINCFG, PRESCALE, IGNACK */
+ if (lpi2c_imx->mode == ULTRA_FAST)
+ pincfg = TWO_PIN_OO;
+ else
+ pincfg = TWO_PIN_OD;
+ temp = prescale | pincfg << 24;
+
+ if (lpi2c_imx->mode == ULTRA_FAST)
+ temp |= MCFGR1_IGNACK;
+
+ writel(temp, lpi2c_imx->base + LPI2C_MCFGR1);
+
+ /* set MCFGR2: FILTSDA, FILTSCL */
+ temp = (filt << 16) | (filt << 24);
+ writel(temp, lpi2c_imx->base + LPI2C_MCFGR2);
+
+ /* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */
+ sethold = clkhi;
+ datavd = clkhi >> 1;
+ temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo;
+
+ if (lpi2c_imx->mode == HS)
+ writel(temp, lpi2c_imx->base + LPI2C_MCCR1);
+ else
+ writel(temp, lpi2c_imx->base + LPI2C_MCCR0);
+
+ return 0;
+}
+
+static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int temp;
+ int ret;
+
+ ret = clk_enable(lpi2c_imx->clk);
+ if (ret)
+ return ret;
+
+ temp = MCR_RST;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+ writel(0, lpi2c_imx->base + LPI2C_MCR);
+
+ ret = lpi2c_imx_config(lpi2c_imx);
+ if (ret)
+ goto clk_disable;
+
+ temp = readl(lpi2c_imx->base + LPI2C_MCR);
+ temp |= MCR_MEN;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+
+ return 0;
+
+clk_disable:
+ clk_disable(lpi2c_imx->clk);
+
+ return ret;
+}
+
+static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u32 temp;
+
+ temp = readl(lpi2c_imx->base + LPI2C_MCR);
+ temp &= ~MCR_MEN;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+
+ clk_disable(lpi2c_imx->clk);
+
+ return 0;
+}
+
+static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long timeout;
+
+ timeout = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
+
+ return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long orig_jiffies = jiffies;
+ u32 txcnt;
+
+ do {
+ txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
+
+ if (readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n");
+ return -EIO;
+ }
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
+ return -ETIMEDOUT;
+ }
+ schedule();
+
+ } while (txcnt);
+
+ return 0;
+}
+
+static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR);
+}
+
+static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int temp, remaining;
+
+ remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
+
+ if (remaining > (lpi2c_imx->rxfifosize >> 1))
+ temp = lpi2c_imx->rxfifosize >> 1;
+ else
+ temp = 0;
+
+ writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR);
+}
+
+static void lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int data, txcnt;
+
+ txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
+
+ while (txcnt < lpi2c_imx->txfifosize) {
+ if (lpi2c_imx->delivered == lpi2c_imx->msglen)
+ break;
+
+ data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++];
+ writel(data, lpi2c_imx->base + LPI2C_MTDR);
+ txcnt++;
+ }
+
+ if (lpi2c_imx->delivered < lpi2c_imx->msglen)
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE);
+ else
+ complete(&lpi2c_imx->complete);
+}
+
+static void lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int blocklen, remaining;
+ unsigned int temp, data;
+
+ do {
+ data = readl(lpi2c_imx->base + LPI2C_MRDR);
+ if (data & MRDR_RXEMPTY)
+ break;
+
+ lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
+ } while (1);
+
+ /*
+ * First byte is the length of remaining packet in the SMBus block
+ * data read. Add it to msgs->len.
+ */
+ if (lpi2c_imx->block_data) {
+ blocklen = lpi2c_imx->rx_buf[0];
+ lpi2c_imx->msglen += blocklen;
+ }
+
+ remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
+
+ if (!remaining) {
+ complete(&lpi2c_imx->complete);
+ return;
+ }
+
+ /* not finished, still waiting for rx data */
+ lpi2c_imx_set_rx_watermark(lpi2c_imx);
+
+ /* multiple receive commands */
+ if (lpi2c_imx->block_data) {
+ lpi2c_imx->block_data = 0;
+ temp = remaining;
+ temp |= (RECV_DATA << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+ } else if (!(lpi2c_imx->delivered & 0xff)) {
+ temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1;
+ temp |= (RECV_DATA << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+ }
+
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE);
+}
+
+static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msgs)
+{
+ lpi2c_imx->tx_buf = msgs->buf;
+ lpi2c_imx_set_tx_watermark(lpi2c_imx);
+ lpi2c_imx_write_txfifo(lpi2c_imx);
+}
+
+static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msgs)
+{
+ unsigned int temp;
+
+ lpi2c_imx->rx_buf = msgs->buf;
+ lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN;
+
+ lpi2c_imx_set_rx_watermark(lpi2c_imx);
+ temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1;
+ temp |= (RECV_DATA << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
+}
+
+static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter);
+ unsigned int temp;
+ int i, result;
+
+ result = lpi2c_imx_master_enable(lpi2c_imx);
+ if (result)
+ return result;
+
+ for (i = 0; i < num; i++) {
+ result = lpi2c_imx_start(lpi2c_imx, &msgs[i]);
+ if (result)
+ goto disable;
+
+ /* quick smbus */
+ if (num == 1 && msgs[0].len == 0)
+ goto stop;
+
+ lpi2c_imx->delivered = 0;
+ lpi2c_imx->msglen = msgs[i].len;
+ init_completion(&lpi2c_imx->complete);
+
+ if (msgs[i].flags & I2C_M_RD)
+ lpi2c_imx_read(lpi2c_imx, &msgs[i]);
+ else
+ lpi2c_imx_write(lpi2c_imx, &msgs[i]);
+
+ result = lpi2c_imx_msg_complete(lpi2c_imx);
+ if (result)
+ goto stop;
+
+ if (!(msgs[i].flags & I2C_M_RD)) {
+ result = lpi2c_imx_txfifo_empty(lpi2c_imx);
+ if (result)
+ goto stop;
+ }
+ }
+
+stop:
+ lpi2c_imx_stop(lpi2c_imx);
+
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ if ((temp & MSR_NDF) && !result)
+ result = -EIO;
+
+disable:
+ lpi2c_imx_master_disable(lpi2c_imx);
+
+ dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
+ (result < 0) ? "error" : "success msg",
+ (result < 0) ? result : num);
+
+ return (result < 0) ? result : num;
+}
+
+static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+ unsigned int temp;
+
+ lpi2c_imx_intctrl(lpi2c_imx, 0);
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+
+ if (temp & MSR_RDF)
+ lpi2c_imx_read_rxfifo(lpi2c_imx);
+
+ if (temp & MSR_TDF)
+ lpi2c_imx_write_txfifo(lpi2c_imx);
+
+ if (temp & MSR_NDF)
+ complete(&lpi2c_imx->complete);
+
+ return IRQ_HANDLED;
+}
+
+static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+}
+
+static struct i2c_algorithm lpi2c_imx_algo = {
+ .master_xfer = lpi2c_imx_xfer,
+ .functionality = lpi2c_imx_func,
+};
+
+static const struct of_device_id lpi2c_imx_of_match[] = {
+ { .compatible = "fsl,imx7ulp-lpi2c" },
+ { .compatible = "fsl,imx8dv-lpi2c" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
+
+static int lpi2c_imx_probe(struct platform_device *pdev)
+{
+ struct lpi2c_imx_struct *lpi2c_imx;
+ struct resource *res;
+ unsigned int temp;
+ int irq, ret;
+
+ lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
+ if (!lpi2c_imx)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lpi2c_imx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(lpi2c_imx->base))
+ return PTR_ERR(lpi2c_imx->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "can't get irq number\n");
+ return irq;
+ }
+
+ lpi2c_imx->adapter.owner = THIS_MODULE;
+ lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
+ lpi2c_imx->adapter.dev.parent = &pdev->dev;
+ lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
+ strlcpy(lpi2c_imx->adapter.name, pdev->name,
+ sizeof(lpi2c_imx->adapter.name));
+
+ lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lpi2c_imx->clk)) {
+ dev_err(&pdev->dev, "can't get I2C peripheral clock\n");
+ return PTR_ERR(lpi2c_imx->clk);
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &lpi2c_imx->bitrate);
+ if (ret)
+ lpi2c_imx->bitrate = LPI2C_DEFAULT_RATE;
+
+ ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
+ pdev->name, lpi2c_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+ return ret;
+ }
+
+ i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
+ platform_set_drvdata(pdev, lpi2c_imx);
+
+ ret = clk_prepare_enable(lpi2c_imx->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed %d\n", ret);
+ return ret;
+ }
+
+ temp = readl(lpi2c_imx->base + LPI2C_PARAM);
+ lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
+ lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+
+ clk_disable(lpi2c_imx->clk);
+
+ ret = i2c_add_adapter(&lpi2c_imx->adapter);
+ if (ret)
+ goto clk_unprepare;
+
+ dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
+
+ return 0;
+
+clk_unprepare:
+ clk_unprepare(lpi2c_imx->clk);
+
+ return ret;
+}
+
+static int lpi2c_imx_remove(struct platform_device *pdev)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&lpi2c_imx->adapter);
+
+ clk_unprepare(lpi2c_imx->clk);
+
+ return 0;
+}
+
+static struct platform_driver lpi2c_imx_driver = {
+ .probe = lpi2c_imx_probe,
+ .remove = lpi2c_imx_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = lpi2c_imx_of_match,
+ },
+};
+
+module_platform_driver(lpi2c_imx_driver);
+
+MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
+MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
new file mode 100644
index 000000000000..d271e6a0954c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* General defines */
+#define MLXPLAT_CPLD_LPC_I2C_BASE_ADDR 0x2000
+#define MLXCPLD_I2C_DEVICE_NAME "i2c_mlxcpld"
+#define MLXCPLD_I2C_VALID_FLAG (I2C_M_RECV_LEN | I2C_M_RD)
+#define MLXCPLD_I2C_BUS_NUM 1
+#define MLXCPLD_I2C_DATA_REG_SZ 36
+#define MLXCPLD_I2C_MAX_ADDR_LEN 4
+#define MLXCPLD_I2C_RETR_NUM 2
+#define MLXCPLD_I2C_XFER_TO 500000 /* usec */
+#define MLXCPLD_I2C_POLL_TIME 2000 /* usec */
+
+/* LPC I2C registers */
+#define MLXCPLD_LPCI2C_LPF_REG 0x0
+#define MLXCPLD_LPCI2C_CTRL_REG 0x1
+#define MLXCPLD_LPCI2C_HALF_CYC_REG 0x4
+#define MLXCPLD_LPCI2C_I2C_HOLD_REG 0x5
+#define MLXCPLD_LPCI2C_CMD_REG 0x6
+#define MLXCPLD_LPCI2C_NUM_DAT_REG 0x7
+#define MLXCPLD_LPCI2C_NUM_ADDR_REG 0x8
+#define MLXCPLD_LPCI2C_STATUS_REG 0x9
+#define MLXCPLD_LPCI2C_DATA_REG 0xa
+
+/* LPC I2C masks and parametres */
+#define MLXCPLD_LPCI2C_RST_SEL_MASK 0x1
+#define MLXCPLD_LPCI2C_TRANS_END 0x1
+#define MLXCPLD_LPCI2C_STATUS_NACK 0x10
+#define MLXCPLD_LPCI2C_NO_IND 0
+#define MLXCPLD_LPCI2C_ACK_IND 1
+#define MLXCPLD_LPCI2C_NACK_IND 2
+
+struct mlxcpld_i2c_curr_xfer {
+ u8 cmd;
+ u8 addr_width;
+ u8 data_len;
+ u8 msg_num;
+ struct i2c_msg *msg;
+};
+
+struct mlxcpld_i2c_priv {
+ struct i2c_adapter adap;
+ u32 base_addr;
+ struct mutex lock;
+ struct mlxcpld_i2c_curr_xfer xfer;
+ struct device *dev;
+};
+
+static void mlxcpld_i2c_lpc_write_buf(u8 *data, u8 len, u32 addr)
+{
+ int i;
+
+ for (i = 0; i < len - len % 4; i += 4)
+ outl(*(u32 *)(data + i), addr + i);
+ for (; i < len; ++i)
+ outb(*(data + i), addr + i);
+}
+
+static void mlxcpld_i2c_lpc_read_buf(u8 *data, u8 len, u32 addr)
+{
+ int i;
+
+ for (i = 0; i < len - len % 4; i += 4)
+ *(u32 *)(data + i) = inl(addr + i);
+ for (; i < len; ++i)
+ *(data + i) = inb(addr + i);
+}
+
+static void mlxcpld_i2c_read_comm(struct mlxcpld_i2c_priv *priv, u8 offs,
+ u8 *data, u8 datalen)
+{
+ u32 addr = priv->base_addr + offs;
+
+ switch (datalen) {
+ case 1:
+ *(data) = inb(addr);
+ break;
+ case 2:
+ *((u16 *)data) = inw(addr);
+ break;
+ case 3:
+ *((u16 *)data) = inw(addr);
+ *(data + 2) = inb(addr + 2);
+ break;
+ case 4:
+ *((u32 *)data) = inl(addr);
+ break;
+ default:
+ mlxcpld_i2c_lpc_read_buf(data, datalen, addr);
+ break;
+ }
+}
+
+static void mlxcpld_i2c_write_comm(struct mlxcpld_i2c_priv *priv, u8 offs,
+ u8 *data, u8 datalen)
+{
+ u32 addr = priv->base_addr + offs;
+
+ switch (datalen) {
+ case 1:
+ outb(*(data), addr);
+ break;
+ case 2:
+ outw(*((u16 *)data), addr);
+ break;
+ case 3:
+ outw(*((u16 *)data), addr);
+ outb(*(data + 2), addr + 2);
+ break;
+ case 4:
+ outl(*((u32 *)data), addr);
+ break;
+ default:
+ mlxcpld_i2c_lpc_write_buf(data, datalen, addr);
+ break;
+ }
+}
+
+/*
+ * Check validity of received i2c messages parameters.
+ * Returns 0 if OK, other - in case of invalid parameters.
+ */
+static int mlxcpld_i2c_check_msg_params(struct mlxcpld_i2c_priv *priv,
+ struct i2c_msg *msgs, int num)
+{
+ int i;
+
+ if (!num) {
+ dev_err(priv->dev, "Incorrect 0 num of messages\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(msgs[0].addr > 0x7f)) {
+ dev_err(priv->dev, "Invalid address 0x%03x\n",
+ msgs[0].addr);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; ++i) {
+ if (unlikely(!msgs[i].buf)) {
+ dev_err(priv->dev, "Invalid buf in msg[%d]\n",
+ i);
+ return -EINVAL;
+ }
+ if (unlikely(msgs[0].addr != msgs[i].addr)) {
+ dev_err(priv->dev, "Invalid addr in msg[%d]\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Check if transfer is completed and status of operation.
+ * Returns 0 - transfer completed (both ACK or NACK),
+ * negative - transfer isn't finished.
+ */
+static int mlxcpld_i2c_check_status(struct mlxcpld_i2c_priv *priv, int *status)
+{
+ u8 val;
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1);
+
+ if (val & MLXCPLD_LPCI2C_TRANS_END) {
+ if (val & MLXCPLD_LPCI2C_STATUS_NACK)
+ /*
+ * The slave is unable to accept the data. No such
+ * slave, command not understood, or unable to accept
+ * any more data.
+ */
+ *status = MLXCPLD_LPCI2C_NACK_IND;
+ else
+ *status = MLXCPLD_LPCI2C_ACK_IND;
+ return 0;
+ }
+ *status = MLXCPLD_LPCI2C_NO_IND;
+
+ return -EIO;
+}
+
+static void mlxcpld_i2c_set_transf_data(struct mlxcpld_i2c_priv *priv,
+ struct i2c_msg *msgs, int num,
+ u8 comm_len)
+{
+ priv->xfer.msg = msgs;
+ priv->xfer.msg_num = num;
+
+ /*
+ * All upper layers currently are never use transfer with more than
+ * 2 messages. Actually, it's also not so relevant in Mellanox systems
+ * because of HW limitation. Max size of transfer is not more than 32
+ * bytes in the current x86 LPCI2C bridge.
+ */
+ priv->xfer.cmd = msgs[num - 1].flags & I2C_M_RD;
+
+ if (priv->xfer.cmd == I2C_M_RD && comm_len != msgs[0].len) {
+ priv->xfer.addr_width = msgs[0].len;
+ priv->xfer.data_len = comm_len - priv->xfer.addr_width;
+ } else {
+ priv->xfer.addr_width = 0;
+ priv->xfer.data_len = comm_len;
+ }
+}
+
+/* Reset CPLD LPCI2C block */
+static void mlxcpld_i2c_reset(struct mlxcpld_i2c_priv *priv)
+{
+ u8 val;
+
+ mutex_lock(&priv->lock);
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1);
+ val &= ~MLXCPLD_LPCI2C_RST_SEL_MASK;
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1);
+
+ mutex_unlock(&priv->lock);
+}
+
+/* Make sure the CPLD is ready to start transmitting. */
+static int mlxcpld_i2c_check_busy(struct mlxcpld_i2c_priv *priv)
+{
+ u8 val;
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1);
+
+ if (val & MLXCPLD_LPCI2C_TRANS_END)
+ return 0;
+
+ return -EIO;
+}
+
+static int mlxcpld_i2c_wait_for_free(struct mlxcpld_i2c_priv *priv)
+{
+ int timeout = 0;
+
+ do {
+ if (!mlxcpld_i2c_check_busy(priv))
+ break;
+ usleep_range(MLXCPLD_I2C_POLL_TIME / 2, MLXCPLD_I2C_POLL_TIME);
+ timeout += MLXCPLD_I2C_POLL_TIME;
+ } while (timeout <= MLXCPLD_I2C_XFER_TO);
+
+ if (timeout > MLXCPLD_I2C_XFER_TO)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/*
+ * Wait for master transfer to complete.
+ * It puts current process to sleep until we get interrupt or timeout expires.
+ * Returns the number of transferred or read bytes or error (<0).
+ */
+static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
+{
+ int status, i, timeout = 0;
+ u8 datalen;
+
+ do {
+ usleep_range(MLXCPLD_I2C_POLL_TIME / 2, MLXCPLD_I2C_POLL_TIME);
+ if (!mlxcpld_i2c_check_status(priv, &status))
+ break;
+ timeout += MLXCPLD_I2C_POLL_TIME;
+ } while (status == 0 && timeout < MLXCPLD_I2C_XFER_TO);
+
+ switch (status) {
+ case MLXCPLD_LPCI2C_NO_IND:
+ return -ETIMEDOUT;
+
+ case MLXCPLD_LPCI2C_ACK_IND:
+ if (priv->xfer.cmd != I2C_M_RD)
+ return (priv->xfer.addr_width + priv->xfer.data_len);
+
+ if (priv->xfer.msg_num == 1)
+ i = 0;
+ else
+ i = 1;
+
+ if (!priv->xfer.msg[i].buf)
+ return -EINVAL;
+
+ /*
+ * Actual read data len will be always the same as
+ * requested len. 0xff (line pull-up) will be returned
+ * if slave has no data to return. Thus don't read
+ * MLXCPLD_LPCI2C_NUM_DAT_REG reg from CPLD.
+ */
+ datalen = priv->xfer.data_len;
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_DATA_REG,
+ priv->xfer.msg[i].buf, datalen);
+
+ return datalen;
+
+ case MLXCPLD_LPCI2C_NACK_IND:
+ return -ENXIO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void mlxcpld_i2c_xfer_msg(struct mlxcpld_i2c_priv *priv)
+{
+ int i, len = 0;
+ u8 cmd;
+
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
+ &priv->xfer.data_len, 1);
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_ADDR_REG,
+ &priv->xfer.addr_width, 1);
+
+ for (i = 0; i < priv->xfer.msg_num; i++) {
+ if ((priv->xfer.msg[i].flags & I2C_M_RD) != I2C_M_RD) {
+ /* Don't write to CPLD buffer in read transaction */
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_DATA_REG +
+ len, priv->xfer.msg[i].buf,
+ priv->xfer.msg[i].len);
+ len += priv->xfer.msg[i].len;
+ }
+ }
+
+ /*
+ * Set target slave address with command for master transfer.
+ * It should be latest executed function before CPLD transaction.
+ */
+ cmd = (priv->xfer.msg[0].addr << 1) | priv->xfer.cmd;
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CMD_REG, &cmd, 1);
+}
+
+/*
+ * Generic lpc-i2c transfer.
+ * Returns the number of processed messages or error (<0).
+ */
+static int mlxcpld_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct mlxcpld_i2c_priv *priv = i2c_get_adapdata(adap);
+ u8 comm_len = 0;
+ int i, err;
+
+ err = mlxcpld_i2c_check_msg_params(priv, msgs, num);
+ if (err) {
+ dev_err(priv->dev, "Incorrect message\n");
+ return err;
+ }
+
+ for (i = 0; i < num; ++i)
+ comm_len += msgs[i].len;
+
+ /* Check bus state */
+ if (mlxcpld_i2c_wait_for_free(priv)) {
+ dev_err(priv->dev, "LPCI2C bridge is busy\n");
+
+ /*
+ * Usually it means something serious has happened.
+ * We can not have unfinished previous transfer
+ * so it doesn't make any sense to try to stop it.
+ * Probably we were not able to recover from the
+ * previous error.
+ * The only reasonable thing - is soft reset.
+ */
+ mlxcpld_i2c_reset(priv);
+ if (mlxcpld_i2c_check_busy(priv)) {
+ dev_err(priv->dev, "LPCI2C bridge is busy after reset\n");
+ return -EIO;
+ }
+ }
+
+ mlxcpld_i2c_set_transf_data(priv, msgs, num, comm_len);
+
+ mutex_lock(&priv->lock);
+
+ /* Do real transfer. Can't fail */
+ mlxcpld_i2c_xfer_msg(priv);
+
+ /* Wait for transaction complete */
+ err = mlxcpld_i2c_wait_for_tc(priv);
+
+ mutex_unlock(&priv->lock);
+
+ return err < 0 ? err : num;
+}
+
+static u32 mlxcpld_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm mlxcpld_i2c_algo = {
+ .master_xfer = mlxcpld_i2c_xfer,
+ .functionality = mlxcpld_i2c_func
+};
+
+static struct i2c_adapter_quirks mlxcpld_i2c_quirks = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_read_len = MLXCPLD_I2C_DATA_REG_SZ - MLXCPLD_I2C_MAX_ADDR_LEN,
+ .max_write_len = MLXCPLD_I2C_DATA_REG_SZ,
+ .max_comb_1st_msg_len = 4,
+};
+
+static struct i2c_adapter mlxcpld_i2c_adapter = {
+ .owner = THIS_MODULE,
+ .name = "i2c-mlxcpld",
+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .algo = &mlxcpld_i2c_algo,
+ .quirks = &mlxcpld_i2c_quirks,
+ .retries = MLXCPLD_I2C_RETR_NUM,
+ .nr = MLXCPLD_I2C_BUS_NUM,
+};
+
+static int mlxcpld_i2c_probe(struct platform_device *pdev)
+{
+ struct mlxcpld_i2c_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->dev = &pdev->dev;
+
+ /* Register with i2c layer */
+ mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO);
+ priv->adap = mlxcpld_i2c_adapter;
+ priv->adap.dev.parent = &pdev->dev;
+ priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
+ i2c_set_adapdata(&priv->adap, priv);
+
+ err = i2c_add_numbered_adapter(&priv->adap);
+ if (err)
+ mutex_destroy(&priv->lock);
+
+ return err;
+}
+
+static int mlxcpld_i2c_remove(struct platform_device *pdev)
+{
+ struct mlxcpld_i2c_priv *priv = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&priv->adap);
+ mutex_destroy(&priv->lock);
+
+ return 0;
+}
+
+static struct platform_driver mlxcpld_i2c_driver = {
+ .probe = mlxcpld_i2c_probe,
+ .remove = mlxcpld_i2c_remove,
+ .driver = {
+ .name = MLXCPLD_I2C_DEVICE_NAME,
+ },
+};
+
+module_platform_driver(mlxcpld_i2c_driver);
+
+MODULE_AUTHOR("Michael Shych <michaels@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox I2C-CPLD controller driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:i2c-mlxcpld");
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 5e63b17f935d..1d8775799056 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -36,24 +36,6 @@ static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
}
-static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
-{
- if (octeon_i2c_test_iflg(i2c))
- return true;
-
- if (*first) {
- *first = false;
- return false;
- }
-
- /*
- * IRQ has signaled an event but IFLG hasn't changed.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_test_iflg(i2c);
-}
-
/**
* octeon_i2c_wait - wait for the IFLG to be set
* @i2c: The struct octeon_i2c
@@ -63,7 +45,6 @@ static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
long time_left;
- bool first = true;
/*
* Some chip revisions don't assert the irq in the interrupt
@@ -80,7 +61,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
}
i2c->int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+ time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
i2c->adap.timeout);
i2c->int_disable(i2c);
@@ -102,25 +83,6 @@ static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
return (__raw_readq(i2c->twsi_base + SW_TWSI(i2c)) & SW_TWSI_V) == 0;
}
-static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
-{
- /* check if valid bit is cleared */
- if (octeon_i2c_hlc_test_valid(i2c))
- return true;
-
- if (*first) {
- *first = false;
- return false;
- }
-
- /*
- * IRQ has signaled an event but valid bit isn't cleared.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_hlc_test_valid(i2c);
-}
-
static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
{
/* clear ST/TS events, listen for neither */
@@ -176,7 +138,6 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
*/
static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
{
- bool first = true;
int time_left;
/*
@@ -195,7 +156,7 @@ static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
i2c->hlc_int_enable(i2c);
time_left = wait_event_timeout(i2c->queue,
- octeon_i2c_hlc_test_ready(i2c, &first),
+ octeon_i2c_hlc_test_valid(i2c),
i2c->adap.timeout);
i2c->hlc_int_disable(i2c);
if (!time_left)
@@ -381,7 +342,9 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
- data[i] = octeon_i2c_data_read(i2c);
+ data[i] = octeon_i2c_data_read(i2c, &result);
+ if (result)
+ return result;
if (recv_len && i == 0) {
if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
@@ -789,6 +752,9 @@ static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap)
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
octeon_i2c_hlc_disable(i2c);
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_RST, 0);
+ /* wait for software reset to settle */
+ udelay(5);
/*
* Bring control register to a good state regardless
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 87151ea74acd..e160f838c254 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -141,11 +141,14 @@ static inline void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
*/
static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
{
+ int tries = 1000;
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
do {
tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ if (--tries < 0)
+ return;
} while ((tmp & SW_TWSI_V) != 0);
}
@@ -163,24 +166,32 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
+static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
+ int *error)
{
+ int tries = 1000;
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
do {
tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ if (--tries < 0) {
+ /* signal that the returned data is invalid */
+ if (error)
+ *error = -EIO;
+ return 0;
+ }
} while ((tmp & SW_TWSI_V) != 0);
return tmp & 0xFF;
}
#define octeon_i2c_ctl_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
-#define octeon_i2c_data_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
+#define octeon_i2c_data_read(i2c, error) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
#define octeon_i2c_stat_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
/**
* octeon_i2c_read_int - read the TWSI_INT register
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 417464e9ea2a..004deb96afe3 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -1,9 +1,13 @@
/*
+ * CE4100 PCI-I2C glue code for PXA's driver
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * License: GPL v2
+ *
* The CE4100's I2C device is more or less the same one as found on PXA.
* It does not support slave mode, the register slightly moved. This PCI
* device provides three bars, every contains a single I2C controller.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/i2c/pxa-i2c.h>
@@ -134,35 +138,17 @@ err_mem:
return ret;
}
-static void ce4100_i2c_remove(struct pci_dev *dev)
-{
- struct ce4100_devices *sds;
- unsigned int i;
-
- sds = pci_get_drvdata(dev);
-
- for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
- platform_device_unregister(sds->pdev[i]);
-
- pci_disable_device(dev);
- kfree(sds);
-}
-
static const struct pci_device_id ce4100_i2c_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
{ },
};
-MODULE_DEVICE_TABLE(pci, ce4100_i2c_devices);
static struct pci_driver ce4100_i2c_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
.name = "ce4100_i2c",
.id_table = ce4100_i2c_devices,
.probe = ce4100_i2c_probe,
- .remove = ce4100_i2c_remove,
};
-
-module_pci_driver(ce4100_i2c_driver);
-
-MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
+builtin_pci_driver(ce4100_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index e28b825b0433..6cf333ecc8b8 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -48,6 +48,8 @@ struct pxa_reg_layout {
u32 isar;
u32 ilcr;
u32 iwcr;
+ u32 fm;
+ u32 hs;
};
enum pxa_i2c_types {
@@ -55,8 +57,12 @@ enum pxa_i2c_types {
REGS_PXA3XX,
REGS_CE4100,
REGS_PXA910,
+ REGS_A3700,
};
+#define ICR_BUSMODE_FM (1 << 16) /* shifted fast mode for armada-3700 */
+#define ICR_BUSMODE_HS (1 << 17) /* shifted high speed mode for armada-3700 */
+
/*
* I2C registers definitions
*/
@@ -91,6 +97,15 @@ static struct pxa_reg_layout pxa_reg_layout[] = {
.ilcr = 0x28,
.iwcr = 0x30,
},
+ [REGS_A3700] = {
+ .ibmr = 0x00,
+ .idbr = 0x04,
+ .icr = 0x08,
+ .isr = 0x0c,
+ .isar = 0x10,
+ .fm = ICR_BUSMODE_FM,
+ .hs = ICR_BUSMODE_HS,
+ },
};
static const struct platform_device_id i2c_pxa_id_table[] = {
@@ -98,6 +113,7 @@ static const struct platform_device_id i2c_pxa_id_table[] = {
{ "pxa3xx-pwri2c", REGS_PXA3XX },
{ "ce4100-i2c", REGS_CE4100 },
{ "pxa910-i2c", REGS_PXA910 },
+ { "armada-3700-i2c", REGS_A3700 },
{ },
};
MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
@@ -193,6 +209,8 @@ struct pxa_i2c {
unsigned char master_code;
unsigned long rate;
bool highmode_enter;
+ u32 fm_mask;
+ u32 hs_mask;
};
#define _IBMR(i2c) ((i2c)->reg_ibmr)
@@ -503,8 +521,8 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
writel(i2c->slave_addr, _ISAR(i2c));
/* set control register values */
- writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
- writel(readl(_ICR(i2c)) | (i2c->high_mode ? ICR_HS : 0), _ICR(i2c));
+ writel(I2C_ICR_INIT | (i2c->fast_mode ? i2c->fm_mask : 0), _ICR(i2c));
+ writel(readl(_ICR(i2c)) | (i2c->high_mode ? i2c->hs_mask : 0), _ICR(i2c));
#ifdef CONFIG_I2C_PXA_SLAVE
dev_info(&i2c->adap.dev, "Enabling slave mode\n");
@@ -1137,6 +1155,7 @@ static const struct of_device_id i2c_pxa_dt_ids[] = {
{ .compatible = "mrvl,pxa-i2c", .data = (void *)REGS_PXA2XX },
{ .compatible = "mrvl,pwri2c", .data = (void *)REGS_PXA3XX },
{ .compatible = "mrvl,mmp-twsi", .data = (void *)REGS_PXA910 },
+ { .compatible = "marvell,armada-3700-i2c", .data = (void *)REGS_A3700 },
{}
};
MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
@@ -1234,6 +1253,9 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr;
i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr;
+ i2c->fm_mask = pxa_reg_layout[i2c_type].fm ? : ICR_FM;
+ i2c->hs_mask = pxa_reg_layout[i2c_type].hs ? : ICR_HS;
+
if (i2c_type != REGS_CE4100)
i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index a8497cfdae6f..1902d8ac9753 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -14,6 +14,7 @@
*
*/
+#include <linux/acpi.h>
#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -132,6 +133,10 @@
/* Max timeout in ms for 32k bytes */
#define TOUT_MAX 300
+/* Default values. Use these if FW query fails */
+#define DEFAULT_CLK_FREQ 100000
+#define DEFAULT_SRC_CLK 20000000
+
struct qup_i2c_block {
int count;
int pos;
@@ -525,6 +530,33 @@ static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
return data_len;
}
+static bool qup_i2c_check_msg_len(struct i2c_msg *msg)
+{
+ return ((msg->flags & I2C_M_RD) && (msg->flags & I2C_M_RECV_LEN));
+}
+
+static int qup_i2c_set_tags_smb(u16 addr, u8 *tags, struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ int len = 0;
+
+ if (msg->len > 1) {
+ tags[len++] = QUP_TAG_V2_DATARD_STOP;
+ tags[len++] = qup_i2c_get_data_len(qup) - 1;
+ } else {
+ tags[len++] = QUP_TAG_V2_START;
+ tags[len++] = addr & 0xff;
+
+ if (msg->flags & I2C_M_TEN)
+ tags[len++] = addr >> 8;
+
+ tags[len++] = QUP_TAG_V2_DATARD;
+ /* Read 1 byte indicating the length of the SMBus message */
+ tags[len++] = 1;
+ }
+ return len;
+}
+
static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
struct i2c_msg *msg, int is_dma)
{
@@ -534,6 +566,10 @@ static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last);
+ /* Handle tags for SMBus block read */
+ if (qup_i2c_check_msg_len(msg))
+ return qup_i2c_set_tags_smb(addr, tags, qup, msg);
+
if (qup->blk.pos == 0) {
tags[len++] = QUP_TAG_V2_START;
tags[len++] = addr & 0xff;
@@ -1056,9 +1092,17 @@ static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
struct i2c_msg *msg)
{
u32 val;
- int idx, pos = 0, ret = 0, total;
+ int idx, pos = 0, ret = 0, total, msg_offset = 0;
+ /*
+ * If the message length is already read in
+ * the first byte of the buffer, account for
+ * that by setting the offset
+ */
+ if (qup_i2c_check_msg_len(msg) && (msg->len > 1))
+ msg_offset = 1;
total = qup_i2c_get_data_len(qup);
+ total -= msg_offset;
/* 2 extra bytes for read tags */
while (pos < (total + 2)) {
@@ -1078,8 +1122,8 @@ static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
if (pos >= (total + 2))
goto out;
-
- msg->buf[qup->pos++] = val & 0xff;
+ msg->buf[qup->pos + msg_offset] = val & 0xff;
+ qup->pos++;
}
}
@@ -1119,6 +1163,20 @@ static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
goto err;
qup->blk.pos++;
+
+ /* Handle SMBus block read length */
+ if (qup_i2c_check_msg_len(msg) && (msg->len == 1)) {
+ if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EPROTO;
+ goto err;
+ }
+ msg->len += msg->buf[0];
+ qup->pos = 0;
+ qup_i2c_set_blk_data(qup, msg);
+ /* set tag length for block read */
+ qup->blk.tx_tag_len = 2;
+ qup_i2c_set_read_mode_v2(qup, msg->buf[0]);
+ }
} while (qup->blk.pos < qup->blk.count);
err:
@@ -1204,6 +1262,11 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
goto out;
}
+ if (qup_i2c_check_msg_len(&msgs[idx])) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (msgs[idx].flags & I2C_M_RD)
ret = qup_i2c_read_one(qup, &msgs[idx]);
else
@@ -1358,14 +1421,13 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
static int qup_i2c_probe(struct platform_device *pdev)
{
static const int blk_sizes[] = {4, 16, 32};
- struct device_node *node = pdev->dev.of_node;
struct qup_i2c_dev *qup;
unsigned long one_bit_t;
struct resource *res;
u32 io_mode, hw_ver, size;
int ret, fs_div, hs_div;
- int src_clk_freq;
- u32 clk_freq = 100000;
+ u32 src_clk_freq = DEFAULT_SRC_CLK;
+ u32 clk_freq = DEFAULT_CLK_FREQ;
int blocks;
qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
@@ -1376,7 +1438,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
init_completion(&qup->xfer);
platform_set_drvdata(pdev, qup);
- of_property_read_u32(node, "clock-frequency", &clk_freq);
+ ret = device_property_read_u32(qup->dev, "clock-frequency", &clk_freq);
+ if (ret) {
+ dev_notice(qup->dev, "using default clock-frequency %d",
+ DEFAULT_CLK_FREQ);
+ }
if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
qup->adap.algo = &qup_i2c_algo;
@@ -1452,20 +1518,30 @@ nodma:
return qup->irq;
}
- qup->clk = devm_clk_get(qup->dev, "core");
- if (IS_ERR(qup->clk)) {
- dev_err(qup->dev, "Could not get core clock\n");
- return PTR_ERR(qup->clk);
- }
+ if (has_acpi_companion(qup->dev)) {
+ ret = device_property_read_u32(qup->dev,
+ "src-clock-hz", &src_clk_freq);
+ if (ret) {
+ dev_notice(qup->dev, "using default src-clock-hz %d",
+ DEFAULT_SRC_CLK);
+ }
+ ACPI_COMPANION_SET(&qup->adap.dev, ACPI_COMPANION(qup->dev));
+ } else {
+ qup->clk = devm_clk_get(qup->dev, "core");
+ if (IS_ERR(qup->clk)) {
+ dev_err(qup->dev, "Could not get core clock\n");
+ return PTR_ERR(qup->clk);
+ }
- qup->pclk = devm_clk_get(qup->dev, "iface");
- if (IS_ERR(qup->pclk)) {
- dev_err(qup->dev, "Could not get iface clock\n");
- return PTR_ERR(qup->pclk);
+ qup->pclk = devm_clk_get(qup->dev, "iface");
+ if (IS_ERR(qup->pclk)) {
+ dev_err(qup->dev, "Could not get iface clock\n");
+ return PTR_ERR(qup->pclk);
+ }
+ qup_i2c_enable_clocks(qup);
+ src_clk_freq = clk_get_rate(qup->clk);
}
- qup_i2c_enable_clocks(qup);
-
/*
* Bootloaders might leave a pending interrupt on certain QUP's,
* so we reset the core before registering for interrupts.
@@ -1512,7 +1588,6 @@ nodma:
size = QUP_INPUT_FIFO_SIZE(io_mode);
qup->in_fifo_sz = qup->in_blk_sz * (2 << size);
- src_clk_freq = clk_get_rate(qup->clk);
fs_div = ((src_clk_freq / clk_freq) / 2) - 3;
hs_div = 3;
qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff);
@@ -1631,6 +1706,14 @@ static const struct of_device_id qup_i2c_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, qup_i2c_dt_match);
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qup_i2c_acpi_match[] = {
+ { "QCOM8010"},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match);
+#endif
+
static struct platform_driver qup_i2c_driver = {
.probe = qup_i2c_probe,
.remove = qup_i2c_remove,
@@ -1638,6 +1721,7 @@ static struct platform_driver qup_i2c_driver = {
.name = "i2c_qup",
.pm = &qup_i2c_qup_pm_ops,
.of_match_table = qup_i2c_dt_match,
+ .acpi_match_table = ACPI_PTR(qup_i2c_acpi_match),
},
};
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 726615e54f2a..26f2ff22e97e 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -793,7 +793,6 @@ static const struct i2c_algorithm rcar_i2c_algo = {
};
static const struct of_device_id rcar_i2c_dt_ids[] = {
- { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 },
@@ -803,6 +802,10 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
{ .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
+ { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 }, /* Deprecated */
+ { .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
+ { .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
{},
};
MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 192f36f00e4d..3d9ebe6e5716 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -827,7 +827,6 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
};
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
- { .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
{ .compatible = "renesas,iic-r8a7790", .data = &fast_clock_dt_config },
@@ -835,8 +834,11 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7793", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7794", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index db9105e52c79..beee31892295 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -528,7 +528,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
if (!clk_rate) {
dev_err(dev, "input clock rate should not be zero\n");
ret = -EINVAL;
- goto err;
+ goto disable_clk;
}
init_completion(&priv->comp);
@@ -547,11 +547,11 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
pdev->name, priv);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
- goto err;
+ goto disable_clk;
}
ret = i2c_add_adapter(&priv->adap);
-err:
+disable_clk:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 56e92af46ddc..777c0fe93653 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -373,7 +373,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
if (!clk_rate) {
dev_err(dev, "input clock rate should not be zero\n");
ret = -EINVAL;
- goto err;
+ goto disable_clk;
}
init_completion(&priv->comp);
@@ -392,11 +392,11 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
priv);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
- goto err;
+ goto disable_clk;
}
ret = i2c_add_adapter(&priv->adap);
-err:
+disable_clk:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 543456a0a338..e4be86b3de9a 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -354,7 +354,7 @@ static const struct i2c_algorithm vprbrd_algorithm = {
.functionality = vprbrd_i2c_func,
};
-static struct i2c_adapter_quirks vprbrd_quirks = {
+static const struct i2c_adapter_quirks vprbrd_quirks = {
.max_read_len = 2048,
.max_write_len = 2048,
};
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 05cf192ef1ac..0ab1e55558bc 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -415,6 +415,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
adapter->algo = &xgene_slimpro_i2c_algorithm;
adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev;
+ adapter->dev.of_node = pdev->dev.of_node;
i2c_set_adapdata(adapter, ctx);
rc = i2c_add_adapter(adapter);
if (rc) {
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index e29ff37a43bd..84a8b2eccffb 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -393,6 +393,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
init_completion(&priv->msg_complete);
priv->adapter.dev.parent = &pdev->dev;
priv->adapter.algo = &xlp9xx_i2c_algo;
+ ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&pdev->dev));
priv->adapter.dev.of_node = pdev->dev.of_node;
priv->dev = &pdev->dev;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index b432b64e307a..3de95a29024c 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -65,6 +65,9 @@
#define I2C_ADDR_OFFSET_TEN_BIT 0xa000
#define I2C_ADDR_OFFSET_SLAVE 0x1000
+#define I2C_ADDR_7BITS_MAX 0x77
+#define I2C_ADDR_7BITS_COUNT (I2C_ADDR_7BITS_MAX + 1)
+
/* core_lock protects i2c_adapter_idr, and guarantees
that device detection, deletion of detected devices, and attach_adapter
calls are serialized */
@@ -77,9 +80,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
static struct static_key i2c_trace_msg = STATIC_KEY_INIT_FALSE;
static bool is_registered;
-void i2c_transfer_trace_reg(void)
+int i2c_transfer_trace_reg(void)
{
static_key_slow_inc(&i2c_trace_msg);
+ return 0;
}
void i2c_transfer_trace_unreg(void)
@@ -676,9 +680,12 @@ static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
/* ------------------------------------------------------------------------- */
-static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client)
{
+ if (!(id && client))
+ return NULL;
+
while (id->name[0]) {
if (strcmp(client->name, id->name) == 0)
return id;
@@ -686,17 +693,16 @@ static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
}
return NULL;
}
+EXPORT_SYMBOL_GPL(i2c_match_id);
static int i2c_device_match(struct device *dev, struct device_driver *drv)
{
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
- if (!client)
- return 0;
/* Attempt an OF style match */
- if (of_driver_match_device(dev, drv))
+ if (i2c_of_match_device(drv->of_match_table, client))
return 1;
/* Then ACPI style match */
@@ -704,9 +710,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
return 1;
driver = to_i2c_driver(drv);
- /* match on an id table if there is one */
- if (driver->id_table)
- return i2c_match_id(driver->id_table, client) != NULL;
+
+ /* Finally an I2C match */
+ if (i2c_match_id(driver->id_table, client))
+ return 1;
return 0;
}
@@ -893,6 +900,25 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
adap->bus_recovery_info = NULL;
}
+static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
+{
+ struct i2c_adapter *adap = client->adapter;
+ unsigned int irq;
+
+ if (!adap->host_notify_domain)
+ return -ENXIO;
+
+ if (client->flags & I2C_CLIENT_TEN)
+ return -EINVAL;
+
+ irq = irq_find_mapping(adap->host_notify_domain, client->addr);
+ if (!irq)
+ irq = irq_create_mapping(adap->host_notify_domain,
+ client->addr);
+
+ return irq > 0 ? irq : -ENXIO;
+}
+
static int i2c_device_probe(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -914,6 +940,14 @@ static int i2c_device_probe(struct device *dev)
}
if (irq == -EPROBE_DEFER)
return irq;
+ /*
+ * ACPI and OF did not find any useful IRQ, try to see
+ * if Host Notify can be used.
+ */
+ if (irq < 0) {
+ dev_dbg(dev, "Using Host Notify IRQ\n");
+ irq = i2c_smbus_host_notify_to_irq(client);
+ }
if (irq < 0)
irq = 0;
@@ -921,7 +955,13 @@ static int i2c_device_probe(struct device *dev)
}
driver = to_i2c_driver(dev->driver);
- if (!driver->probe || !driver->id_table)
+
+ /*
+ * An I2C ID table is not mandatory, if and only if, a suitable Device
+ * Tree match table entry is supplied for the probing device.
+ */
+ if (!driver->id_table &&
+ !i2c_of_match_device(dev->driver->of_match_table, client))
return -ENODEV;
if (client->flags & I2C_CLIENT_WAKE) {
@@ -956,7 +996,18 @@ static int i2c_device_probe(struct device *dev)
if (status == -EPROBE_DEFER)
goto err_clear_wakeup_irq;
- status = driver->probe(client, i2c_match_id(driver->id_table, client));
+ /*
+ * When there are no more users of probe(),
+ * rename probe_new to probe.
+ */
+ if (driver->probe_new)
+ status = driver->probe_new(client);
+ else if (driver->probe)
+ status = driver->probe(client,
+ i2c_match_id(driver->id_table, client));
+ else
+ status = -EINVAL;
+
if (status)
goto err_detach_pm_domain;
@@ -1767,6 +1818,52 @@ struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
return adapter;
}
EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
+
+static const struct of_device_id*
+i2c_of_match_device_sysfs(const struct of_device_id *matches,
+ struct i2c_client *client)
+{
+ const char *name;
+
+ for (; matches->compatible[0]; matches++) {
+ /*
+ * Adding devices through the i2c sysfs interface provides us
+ * a string to match which may be compatible with the device
+ * tree compatible strings, however with no actual of_node the
+ * of_match_device() will not match
+ */
+ if (sysfs_streq(client->name, matches->compatible))
+ return matches;
+
+ name = strchr(matches->compatible, ',');
+ if (!name)
+ name = matches->compatible;
+ else
+ name++;
+
+ if (sysfs_streq(client->name, name))
+ return matches;
+ }
+
+ return NULL;
+}
+
+const struct of_device_id
+*i2c_of_match_device(const struct of_device_id *matches,
+ struct i2c_client *client)
+{
+ const struct of_device_id *match;
+
+ if (!(client && matches))
+ return NULL;
+
+ match = of_match_device(matches, &client->dev);
+ if (match)
+ return match;
+
+ return i2c_of_match_device_sysfs(matches, client);
+}
+EXPORT_SYMBOL_GPL(i2c_of_match_device);
#else
static void of_i2c_register_devices(struct i2c_adapter *adap) { }
#endif /* CONFIG_OF */
@@ -1800,6 +1897,79 @@ static const struct i2c_lock_operations i2c_adapter_lock_ops = {
.unlock_bus = i2c_adapter_unlock_bus,
};
+static void i2c_host_notify_irq_teardown(struct i2c_adapter *adap)
+{
+ struct irq_domain *domain = adap->host_notify_domain;
+ irq_hw_number_t hwirq;
+
+ if (!domain)
+ return;
+
+ for (hwirq = 0 ; hwirq < I2C_ADDR_7BITS_COUNT ; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(domain, hwirq));
+
+ irq_domain_remove(domain);
+ adap->host_notify_domain = NULL;
+}
+
+static int i2c_host_notify_irq_map(struct irq_domain *h,
+ unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops i2c_host_notify_irq_ops = {
+ .map = i2c_host_notify_irq_map,
+};
+
+static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap)
+{
+ struct irq_domain *domain;
+
+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY))
+ return 0;
+
+ domain = irq_domain_create_linear(adap->dev.fwnode,
+ I2C_ADDR_7BITS_COUNT,
+ &i2c_host_notify_irq_ops, adap);
+ if (!domain)
+ return -ENOMEM;
+
+ adap->host_notify_domain = domain;
+
+ return 0;
+}
+
+/**
+ * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
+ * I2C client.
+ * @adap: the adapter
+ * @addr: the I2C address of the notifying device
+ * Context: can't sleep
+ *
+ * Helper function to be called from an I2C bus driver's interrupt
+ * handler. It will schedule the Host Notify IRQ.
+ */
+int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr)
+{
+ int irq;
+
+ if (!adap)
+ return -EINVAL;
+
+ irq = irq_find_mapping(adap->host_notify_domain, addr);
+ if (irq <= 0)
+ return -ENXIO;
+
+ generic_handle_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
+
static int i2c_register_adapter(struct i2c_adapter *adap)
{
int res = -EINVAL;
@@ -1831,6 +2001,14 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (adap->timeout == 0)
adap->timeout = HZ;
+ /* register soft irqs for Host Notify */
+ res = i2c_setup_host_notify_irq_domain(adap);
+ if (res) {
+ pr_err("adapter '%s': can't create Host Notify IRQs (%d)\n",
+ adap->name, res);
+ goto out_list;
+ }
+
dev_set_name(&adap->dev, "i2c-%d", adap->nr);
adap->dev.bus = &i2c_bus_type;
adap->dev.type = &i2c_adapter_type;
@@ -2068,6 +2246,8 @@ void i2c_del_adapter(struct i2c_adapter *adap)
pm_runtime_disable(&adap->dev);
+ i2c_host_notify_irq_teardown(adap);
+
/* wait until all references to the device are gone
*
* FIXME: This is old code and should ideally be replaced by an
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index b0d2679c60d1..f9271c713d20 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -241,108 +241,6 @@ int i2c_handle_smbus_alert(struct i2c_client *ara)
}
EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
-static void smbus_host_notify_work(struct work_struct *work)
-{
- struct alert_data alert;
- struct i2c_adapter *adapter;
- unsigned long flags;
- u16 payload;
- u8 addr;
- struct smbus_host_notify *data;
-
- data = container_of(work, struct smbus_host_notify, work);
-
- spin_lock_irqsave(&data->lock, flags);
- payload = data->payload;
- addr = data->addr;
- adapter = data->adapter;
-
- /* clear the pending bit and release the spinlock */
- data->pending = false;
- spin_unlock_irqrestore(&data->lock, flags);
-
- if (!adapter || !addr)
- return;
-
- alert.type = I2C_PROTOCOL_SMBUS_HOST_NOTIFY;
- alert.addr = addr;
- alert.data = payload;
-
- device_for_each_child(&adapter->dev, &alert, smbus_do_alert);
-}
-
-/**
- * i2c_setup_smbus_host_notify - Allocate a new smbus_host_notify for the given
- * I2C adapter.
- * @adapter: the adapter we want to associate a Host Notify function
- *
- * Returns a struct smbus_host_notify pointer on success, and NULL on failure.
- * The resulting smbus_host_notify must not be freed afterwards, it is a
- * managed resource already.
- */
-struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap)
-{
- struct smbus_host_notify *host_notify;
-
- host_notify = devm_kzalloc(&adap->dev, sizeof(struct smbus_host_notify),
- GFP_KERNEL);
- if (!host_notify)
- return NULL;
-
- host_notify->adapter = adap;
-
- spin_lock_init(&host_notify->lock);
- INIT_WORK(&host_notify->work, smbus_host_notify_work);
-
- return host_notify;
-}
-EXPORT_SYMBOL_GPL(i2c_setup_smbus_host_notify);
-
-/**
- * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
- * I2C client.
- * @host_notify: the struct host_notify attached to the relevant adapter
- * @addr: the I2C address of the notifying device
- * @data: the payload of the notification
- * Context: can't sleep
- *
- * Helper function to be called from an I2C bus driver's interrupt
- * handler. It will schedule the Host Notify work, in turn calling the
- * corresponding I2C device driver's alert function.
- *
- * host_notify should be a valid pointer previously returned by
- * i2c_setup_smbus_host_notify().
- */
-int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
- unsigned short addr, unsigned int data)
-{
- unsigned long flags;
- struct i2c_adapter *adapter;
-
- if (!host_notify || !host_notify->adapter)
- return -EINVAL;
-
- adapter = host_notify->adapter;
-
- spin_lock_irqsave(&host_notify->lock, flags);
-
- if (host_notify->pending) {
- spin_unlock_irqrestore(&host_notify->lock, flags);
- dev_warn(&adapter->dev, "Host Notify already scheduled.\n");
- return -EBUSY;
- }
-
- host_notify->payload = data;
- host_notify->addr = addr;
-
- /* Mark that there is a pending notification and release the lock */
- host_notify->pending = true;
- spin_unlock_irqrestore(&host_notify->lock, flags);
-
- return schedule_work(&host_notify->work);
-}
-EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
-
module_i2c_driver(smbalert_driver);
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 96de9ce5669b..10b3d17ae3ea 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -82,4 +82,15 @@ config I2C_DEMUX_PINCTRL
demultiplexer that uses the pinctrl subsystem. This is useful if you
want to change the I2C master at run-time depending on features.
+config I2C_MUX_MLXCPLD
+ tristate "Mellanox CPLD based I2C multiplexer"
+ help
+ If you say yes to this option, support will be included for a
+ CPLD based I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which is controlled
+ by a CPLD register.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-mlxcpld.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 7c267c29b191..9948fa45037f 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_I2C_ARB_GPIO_CHALLENGE) += i2c-arb-gpio-challenge.o
obj-$(CONFIG_I2C_DEMUX_PINCTRL) += i2c-demux-pinctrl.o
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_MLXCPLD) += i2c-mux-mlxcpld.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index e5cf26eefa97..655684d621a4 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -21,6 +21,8 @@
struct gpiomux {
struct i2c_mux_gpio_platform_data data;
unsigned gpio_base;
+ struct gpio_desc **gpios;
+ int *values;
};
static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
@@ -28,8 +30,10 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
int i;
for (i = 0; i < mux->data.n_gpios; i++)
- gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i],
- val & (1 << i));
+ mux->values[i] = (val >> i) & 1;
+
+ gpiod_set_array_value_cansleep(mux->data.n_gpios,
+ mux->gpios, mux->values);
}
static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan)
@@ -176,12 +180,16 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
if (!parent)
return -EPROBE_DEFER;
- muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
+ muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values,
+ mux->data.n_gpios * sizeof(*mux->gpios) +
+ mux->data.n_gpios * sizeof(*mux->values), 0,
i2c_mux_gpio_select, NULL);
if (!muxc) {
ret = -ENOMEM;
goto alloc_failed;
}
+ mux->gpios = muxc->priv;
+ mux->values = (int *)(mux->gpios + mux->data.n_gpios);
muxc->priv = mux;
platform_set_drvdata(pdev, muxc);
@@ -219,10 +227,12 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
goto err_request_gpio;
}
+ gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
+ mux->gpios[i] = gpio_desc;
+
if (!muxc->mux_locked)
continue;
- gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
gpio_dev = &gpio_desc->gdev->dev;
muxc->mux_locked = i2c_root_adapter(gpio_dev) == root;
}
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
new file mode 100644
index 000000000000..b7ca249ec9c3
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -0,0 +1,222 @@
+/*
+ * drivers/i2c/muxes/i2c-mux-mlxcpld.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/i2c/mlxcpld.h>
+
+#define CPLD_MUX_MAX_NCHANS 8
+
+/* mlxcpld_mux - mux control structure:
+ * @last_chan - last register value
+ * @client - I2C device client
+ */
+struct mlxcpld_mux {
+ u8 last_chan;
+ struct i2c_client *client;
+};
+
+/* MUX logic description.
+ * Driver can support different mux control logic, according to CPLD
+ * implementation.
+ *
+ * Connectivity schema.
+ *
+ * i2c-mlxcpld Digital Analog
+ * driver
+ * *--------* * -> mux1 (virt bus2) -> mux -> |
+ * | I2CLPC | i2c physical * -> mux2 (virt bus3) -> mux -> |
+ * | bridge | bus 1 *---------* |
+ * | logic |---------------------> * mux reg * |
+ * | in CPLD| *---------* |
+ * *--------* i2c-mux-mlxpcld ^ * -> muxn (virt busn) -> mux -> |
+ * | driver | |
+ * | *---------------* | Devices
+ * | * CPLD (i2c bus)* select |
+ * | * registers for *--------*
+ * | * mux selection * deselect
+ * | *---------------*
+ * | |
+ * <--------> <----------->
+ * i2c cntrl Board cntrl reg
+ * reg space space (mux select,
+ * IO, LED, WD, info)
+ *
+ */
+
+static const struct i2c_device_id mlxcpld_mux_id[] = {
+ { "mlxcpld_mux_module", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mlxcpld_mux_id);
+
+/* Write to mux register. Don't use i2c_transfer() and i2c_smbus_xfer()
+ * for this as they will try to lock adapter a second time.
+ */
+static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
+ struct i2c_client *client, u8 val)
+{
+ struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+ int ret = -ENODEV;
+
+ if (adap->algo->master_xfer) {
+ struct i2c_msg msg;
+ u8 msgbuf[] = {pdata->sel_reg_addr, val};
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = msgbuf;
+ ret = __i2c_transfer(adap, &msg, 1);
+
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ } else if (adap->algo->smbus_xfer) {
+ union i2c_smbus_data data;
+
+ data.byte = val;
+ ret = adap->algo->smbus_xfer(adap, client->addr,
+ client->flags, I2C_SMBUS_WRITE,
+ pdata->sel_reg_addr,
+ I2C_SMBUS_BYTE_DATA, &data);
+ }
+
+ return ret;
+}
+
+static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct mlxcpld_mux *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
+ u8 regval = chan + 1;
+ int err = 0;
+
+ /* Only select the channel if its different from the last channel */
+ if (data->last_chan != regval) {
+ err = mlxcpld_mux_reg_write(muxc->parent, client, regval);
+ data->last_chan = err < 0 ? 0 : regval;
+ }
+
+ return err;
+}
+
+static int mlxcpld_mux_deselect(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct mlxcpld_mux *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
+
+ /* Deselect active channel */
+ data->last_chan = 0;
+
+ return mlxcpld_mux_reg_write(muxc->parent, client, data->last_chan);
+}
+
+/* Probe/reomove functions */
+static int mlxcpld_mux_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
+ struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+ struct i2c_mux_core *muxc;
+ int num, force;
+ struct mlxcpld_mux *data;
+ int err;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -ENODEV;
+
+ muxc = i2c_mux_alloc(adap, &client->dev, CPLD_MUX_MAX_NCHANS,
+ sizeof(*data), 0, mlxcpld_mux_select_chan,
+ mlxcpld_mux_deselect);
+ if (!muxc)
+ return -ENOMEM;
+
+ data = i2c_mux_priv(muxc);
+ i2c_set_clientdata(client, muxc);
+ data->client = client;
+ data->last_chan = 0; /* force the first selection */
+
+ /* Create an adapter for each channel. */
+ for (num = 0; num < CPLD_MUX_MAX_NCHANS; num++) {
+ if (num >= pdata->num_adaps)
+ /* discard unconfigured channels */
+ break;
+
+ force = pdata->adap_ids[num];
+
+ err = i2c_mux_add_adapter(muxc, force, num, 0);
+ if (err)
+ goto virt_reg_failed;
+ }
+
+ return 0;
+
+virt_reg_failed:
+ i2c_mux_del_adapters(muxc);
+ return err;
+}
+
+static int mlxcpld_mux_remove(struct i2c_client *client)
+{
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+
+ i2c_mux_del_adapters(muxc);
+ return 0;
+}
+
+static struct i2c_driver mlxcpld_mux_driver = {
+ .driver = {
+ .name = "mlxcpld-mux",
+ },
+ .probe = mlxcpld_mux_probe,
+ .remove = mlxcpld_mux_remove,
+ .id_table = mlxcpld_mux_id,
+};
+
+module_i2c_driver(mlxcpld_mux_driver);
+
+MODULE_AUTHOR("Michael Shych (michaels@mellanox.com)");
+MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:i2c-mux-mlxcpld");
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 8bc3d36d2837..dd18b9ccb1f4 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -35,6 +35,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -120,6 +121,21 @@ static const struct i2c_device_id pca954x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca954x_id);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id pca954x_acpi_ids[] = {
+ { .id = "PCA9540", .driver_data = pca_9540 },
+ { .id = "PCA9542", .driver_data = pca_9540 },
+ { .id = "PCA9543", .driver_data = pca_9543 },
+ { .id = "PCA9544", .driver_data = pca_9544 },
+ { .id = "PCA9545", .driver_data = pca_9545 },
+ { .id = "PCA9546", .driver_data = pca_9545 },
+ { .id = "PCA9547", .driver_data = pca_9547 },
+ { .id = "PCA9548", .driver_data = pca_9548 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id pca954x_of_match[] = {
{ .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -151,6 +167,9 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
buf[0] = val;
msg.buf = buf;
ret = __i2c_transfer(adap, &msg, 1);
+
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
} else {
union i2c_smbus_data data;
ret = adap->algo->smbus_xfer(adap, client->addr,
@@ -179,7 +198,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
ret = pca954x_reg_write(muxc->parent, client, regval);
- data->last_chan = ret ? 0 : regval;
+ data->last_chan = ret < 0 ? 0 : regval;
}
return ret;
@@ -245,8 +264,17 @@ static int pca954x_probe(struct i2c_client *client,
match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
if (match)
data->chip = of_device_get_match_data(&client->dev);
- else
+ else if (id)
data->chip = &chips[id->driver_data];
+ else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
+ &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ data->chip = &chips[acpi_id->driver_data];
+ }
data->last_chan = 0; /* force the first selection */
@@ -321,6 +349,7 @@ static struct i2c_driver pca954x_driver = {
.name = "pca954x",
.pm = &pca954x_pm,
.of_match_table = of_match_ptr(pca954x_of_match),
+ .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
},
.probe = pca954x_probe,
.remove = pca954x_remove,
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 05352f490d60..f90ea221f7f2 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -211,7 +211,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
sense_rq->cmd[4] = cmd_len;
sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
- sense_rq->cmd_flags |= REQ_PREEMPT;
+ sense_rq->rq_flags |= RQF_PREEMPT;
if (drive->media == ide_tape)
sense_rq->cmd[13] = REQ_IDETAPE_PC1;
@@ -295,7 +295,7 @@ int ide_cd_expiry(ide_drive_t *drive)
wait = ATAPI_WAIT_PC;
break;
default:
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
rq->cmd[0]);
wait = 0;
@@ -375,7 +375,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
}
if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
return 1;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index bf9a2ad296ed..9cbd217bc0c9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -98,7 +98,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
struct request_sense *sense = &drive->sense_data;
int log = 0;
- if (!sense || !rq || (rq->cmd_flags & REQ_QUIET))
+ if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
return 0;
ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key);
@@ -291,7 +291,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* (probably while trying to recover from a former error).
* Just give up.
*/
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
return 2;
}
@@ -311,7 +311,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
cdrom_saw_media_change(drive);
if (rq->cmd_type == REQ_TYPE_FS &&
- !(rq->cmd_flags & REQ_QUIET))
+ !(rq->rq_flags & RQF_QUIET))
printk(KERN_ERR PFX "%s: tray open\n",
drive->name);
}
@@ -346,7 +346,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in retrying after an illegal request or data
* protect error.
*/
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "command error", stat);
do_end_request = 1;
break;
@@ -355,14 +355,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in re-trying a zillion times on a bad sector.
* If we got here the error is not correctable.
*/
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "media error "
"(bad sector)", stat);
do_end_request = 1;
break;
case BLANK_CHECK:
/* disk appears blank? */
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "media error (blank)",
stat);
do_end_request = 1;
@@ -380,7 +380,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
}
if (rq->cmd_type != REQ_TYPE_FS) {
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
do_end_request = 1;
}
@@ -422,19 +422,19 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
int write, void *buffer, unsigned *bufflen,
struct request_sense *sense, int timeout,
- unsigned int cmd_flags)
+ req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
struct request_sense local_sense;
int retries = 10;
- unsigned int flags = 0;
+ req_flags_t flags = 0;
if (!sense)
sense = &local_sense;
ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
- "cmd_flags: 0x%x",
- cmd[0], write, timeout, cmd_flags);
+ "rq_flags: 0x%x",
+ cmd[0], write, timeout, rq_flags);
/* start of retry loop */
do {
@@ -446,7 +446,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
memcpy(rq->cmd, cmd, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_ATA_PC;
rq->sense = sense;
- rq->cmd_flags |= cmd_flags;
+ rq->rq_flags |= rq_flags;
rq->timeout = timeout;
if (buffer) {
error = blk_rq_map_kern(drive->queue, rq, buffer,
@@ -462,14 +462,14 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
if (buffer)
*bufflen = rq->resid_len;
- flags = rq->cmd_flags;
+ flags = rq->rq_flags;
blk_put_request(rq);
/*
* FIXME: we should probably abort/retry or something in case of
* failure.
*/
- if (flags & REQ_FAILED) {
+ if (flags & RQF_FAILED) {
/*
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
@@ -494,10 +494,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
}
/* end of retry loop */
- } while ((flags & REQ_FAILED) && retries >= 0);
+ } while ((flags & RQF_FAILED) && retries >= 0);
/* return an error if the command failed */
- return (flags & REQ_FAILED) ? -EIO : 0;
+ return (flags & RQF_FAILED) ? -EIO : 0;
}
/*
@@ -589,7 +589,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
"(%u bytes)\n", drive->name, __func__,
cmd->nleft);
if (!write)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
uptodate = 0;
}
} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
@@ -607,7 +607,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
if (!uptodate)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
}
goto out_end;
}
@@ -745,9 +745,9 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
rq->cmd[0], rq->cmd_type);
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
else
- rq->cmd_flags &= ~REQ_FAILED;
+ rq->rq_flags &= ~RQF_FAILED;
drive->dma = 0;
@@ -867,7 +867,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
*/
cmd[7] = cdi->sanyo_slot % 3;
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -890,7 +890,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
cmd[0] = GPCMD_READ_CDVD_CAPACITY;
stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
- REQ_QUIET);
+ RQF_QUIET);
if (stat)
return stat;
@@ -943,7 +943,7 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
if (msf_flag)
cmd[1] = 2;
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
}
/* Try to read the entire TOC for the disk into our internal buffer. */
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1efc936f5b66..eea60c986c4f 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -101,7 +101,7 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
/* ide-cd.c functions used by ide-cd_ioctl.c */
int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct request_sense *, int, unsigned int);
+ unsigned *, struct request_sense *, int, req_flags_t);
int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
void ide_cdrom_update_speed(ide_drive_t *, u8 *);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 5887a7a09e37..f085e3a2e1d6 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -305,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd_flags = REQ_QUIET;
+ rq->rq_flags = RQF_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq);
/*
@@ -449,7 +449,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
ide_drive_t *drive = cdi->handle;
- unsigned int flags = 0;
+ req_flags_t flags = 0;
unsigned len = cgc->buflen;
if (cgc->timeout <= 0)
@@ -463,7 +463,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
memset(cgc->sense, 0, sizeof(struct request_sense));
if (cgc->quiet)
- flags |= REQ_QUIET;
+ flags |= RQF_QUIET;
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 669ea1e45795..6360bbd37efe 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t startstop;
- BUG_ON(!(rq->cmd_flags & REQ_STARTED));
+ BUG_ON(!(rq->rq_flags & RQF_STARTED));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
@@ -316,7 +316,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
/* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) {
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
goto kill_rq;
}
@@ -539,7 +539,7 @@ repeat:
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
- (rq->cmd_flags & REQ_PREEMPT) == 0) {
+ (rq->rq_flags & RQF_PREEMPT) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index e34af488693a..a015acdffb39 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -53,7 +53,7 @@ static int ide_pm_execute_rq(struct request *rq)
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
rq->errors = -ENXIO;
__blk_end_request_all(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
@@ -90,7 +90,7 @@ int generic_ide_resume(struct device *dev)
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
- rq->cmd_flags |= REQ_PREEMPT;
+ rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index 4732dfc15447..55bcf803841e 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -8,20 +8,3 @@ config INTEL_IDLE
native Intel hardware idle features. The acpi_idle driver
can be configured at the same time, in order to handle
processors intel_idle does not support.
-
-menu "Memory power savings"
-depends on X86_64
-
-config I7300_IDLE_IOAT_CHANNEL
- bool
-
-config I7300_IDLE
- tristate "Intel chipset idle memory power saving driver"
- select I7300_IDLE_IOAT_CHANNEL
- help
- Enable memory power savings when idle with certain Intel server
- chipsets. The chipset must have I/O AT support, such as the
- Intel 7300. The power savings depends on the type and quantity of
- DRAM devices.
-
-endmenu
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 23d295cf10f2..0007111d73e9 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,3 +1,2 @@
-obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
deleted file mode 100644
index ffeebc7e9f1c..000000000000
--- a/drivers/idle/i7300_idle.c
+++ /dev/null
@@ -1,612 +0,0 @@
-/*
- * (C) Copyright 2008 Intel Corporation
- * Authors:
- * Andy Henroid <andrew.d.henroid@intel.com>
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- */
-
-/*
- * Save DIMM power on Intel 7300-based platforms when all CPUs/cores
- * are idle, using the DIMM thermal throttling capability.
- *
- * This driver depends on the Intel integrated DMA controller (I/O AT).
- * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled,
- * this driver should work cooperatively.
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/sched.h>
-#include <linux/notifier.h>
-#include <linux/cpumask.h>
-#include <linux/ktime.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/stop_machine.h>
-#include <linux/i7300_idle.h>
-
-#include <asm/idle.h>
-
-#include "../dma/ioat/hw.h"
-#include "../dma/ioat/registers.h"
-
-#define I7300_IDLE_DRIVER_VERSION "1.55"
-#define I7300_PRINT "i7300_idle:"
-
-#define MAX_STOP_RETRIES 10
-
-static int debug;
-module_param_named(debug, debug, uint, 0644);
-MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
-
-static int forceload;
-module_param_named(forceload, forceload, uint, 0644);
-MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
-
-#define dprintk(fmt, arg...) \
- do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
-
-/*
- * Value to set THRTLOW to when initiating throttling
- * 0 = No throttling
- * 1 = Throttle when > 4 activations per eval window (Maximum throttling)
- * 2 = Throttle when > 8 activations
- * 168 = Throttle when > 672 activations (Minimum throttling)
- */
-#define MAX_THROTTLE_LOW_LIMIT 168
-static uint throttle_low_limit = 1;
-module_param_named(throttle_low_limit, throttle_low_limit, uint, 0644);
-MODULE_PARM_DESC(throttle_low_limit,
- "Value for THRTLOWLM activation field "
- "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)");
-
-/*
- * simple invocation and duration statistics
- */
-static unsigned long total_starts;
-static unsigned long total_us;
-
-#ifdef DEBUG
-static unsigned long past_skip;
-#endif
-
-static struct pci_dev *fbd_dev;
-
-static raw_spinlock_t i7300_idle_lock;
-static int i7300_idle_active;
-
-static u8 i7300_idle_thrtctl_saved;
-static u8 i7300_idle_thrtlow_saved;
-static u32 i7300_idle_mc_saved;
-
-static cpumask_var_t idle_cpumask;
-static ktime_t start_ktime;
-static unsigned long avg_idle_us;
-
-static struct dentry *debugfs_dir;
-
-/* Begin: I/O AT Helper routines */
-
-#define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan)
-/* Snoop control (disable snoops when coherency is not important) */
-#define IOAT_DESC_SADDR_SNP_CTL (1UL << 1)
-#define IOAT_DESC_DADDR_SNP_CTL (1UL << 2)
-
-static struct pci_dev *ioat_dev;
-static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */
-static unsigned long ioat_desc_phys;
-static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */
-static u8 *ioat_chanbase;
-
-/* Start I/O AT memory copy */
-static int i7300_idle_ioat_start(void)
-{
- u32 err;
- /* Clear error (due to circular descriptor pointer) */
- err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET);
- if (err)
- writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET);
-
- writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- return 0;
-}
-
-/* Stop I/O AT memory copy */
-static void i7300_idle_ioat_stop(void)
-{
- int i;
- u64 sts;
-
- for (i = 0; i < MAX_STOP_RETRIES; i++) {
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- udelay(10);
-
- sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (sts != IOAT_CHANSTS_ACTIVE)
- break;
-
- }
-
- if (i == MAX_STOP_RETRIES) {
- dprintk("failed to stop I/O AT after %d retries\n",
- MAX_STOP_RETRIES);
- }
-}
-
-/* Test I/O AT by copying 1024 byte from 2k to 1k */
-static int __init i7300_idle_ioat_selftest(u8 *ctl,
- struct ioat_dma_descriptor *desc, unsigned long desc_phys)
-{
- u64 chan_sts;
-
- memset(desc, 0, 2048);
- memset((u8 *) desc + 2048, 0xab, 1024);
-
- desc[0].size = 1024;
- desc[0].ctl = 0;
- desc[0].src_addr = desc_phys + 2048;
- desc[0].dst_addr = desc_phys + 1024;
- desc[0].next = 0;
-
- writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- udelay(1000);
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (chan_sts != IOAT_CHANSTS_DONE) {
- /* Not complete, reset the channel */
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- return -1;
- }
-
- if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab ||
- *(u32 *) ((u8 *) desc + 2044) != 0xabababab) {
- dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n",
- *(u32 *) ((u8 *) desc + 2048),
- *(u32 *) ((u8 *) desc + 1024),
- *(u32 *) ((u8 *) desc + 3072));
- return -1;
- }
- return 0;
-}
-
-static struct device dummy_dma_dev = {
- .init_name = "fallback device",
- .coherent_dma_mask = DMA_BIT_MASK(64),
- .dma_mask = &dummy_dma_dev.coherent_dma_mask,
-};
-
-/* Setup and initialize I/O AT */
-/* This driver needs I/O AT as the throttling takes effect only when there is
- * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs
- * go idle and memory is throttled.
- */
-static int __init i7300_idle_ioat_init(void)
-{
- u8 ver, chan_count, ioat_chan;
- u16 chan_ctl;
-
- ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0),
- pci_resource_len(ioat_dev, 0));
-
- if (!ioat_iomap) {
- printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n");
- goto err_ret;
- }
-
- ver = readb(ioat_iomap + IOAT_VER_OFFSET);
- if (ver != IOAT_VER_1_2) {
- printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n",
- ver >> 4, ver & 0xf);
- goto err_unmap;
- }
-
- chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET);
- if (!chan_count) {
- printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels "
- "(%u)\n",
- chan_count);
- goto err_unmap;
- }
-
- ioat_chan = chan_count - 1;
- ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan);
-
- chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET);
- if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
- printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan);
- goto err_unmap;
- }
-
- writew(IOAT_CHANCTRL_CHANNEL_IN_USE,
- ioat_chanbase + IOAT_CHANCTRL_OFFSET);
-
- ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent(
- &dummy_dma_dev, 4096,
- (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL);
- if (!ioat_desc) {
- printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n");
- goto err_mark_unused;
- }
-
- writel(ioat_desc_phys & 0xffffffffUL,
- ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW);
- writel(ioat_desc_phys >> 32,
- ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH);
-
- if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) {
- printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n");
- goto err_free;
- }
-
- /* Setup circular I/O AT descriptor chain */
- ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL;
- ioat_desc[0].src_addr = ioat_desc_phys + 2048;
- ioat_desc[0].dst_addr = ioat_desc_phys + 3072;
- ioat_desc[0].size = 128;
- ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor);
-
- ioat_desc[1].ctl = ioat_desc[0].ctl;
- ioat_desc[1].src_addr = ioat_desc[0].src_addr;
- ioat_desc[1].dst_addr = ioat_desc[0].dst_addr;
- ioat_desc[1].size = ioat_desc[0].size;
- ioat_desc[1].next = ioat_desc_phys;
-
- return 0;
-
-err_free:
- dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
-err_mark_unused:
- writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
-err_unmap:
- iounmap(ioat_iomap);
-err_ret:
- return -ENODEV;
-}
-
-/* Cleanup I/O AT */
-static void __exit i7300_idle_ioat_exit(void)
-{
- int i;
- u64 chan_sts;
-
- i7300_idle_ioat_stop();
-
- /* Wait for a while for the channel to halt before releasing */
- for (i = 0; i < MAX_STOP_RETRIES; i++) {
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (chan_sts != IOAT_CHANSTS_ACTIVE) {
- writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
- break;
- }
- udelay(1000);
- }
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- /*
- * We tried to reset multiple times. If IO A/T channel is still active
- * flag an error and return without cleanup. Memory leak is better
- * than random corruption in that extreme error situation.
- */
- if (chan_sts == IOAT_CHANSTS_ACTIVE) {
- printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
- " Not freeing resources\n");
- return;
- }
-
- dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
- iounmap(ioat_iomap);
-}
-
-/* End: I/O AT Helper routines */
-
-#define DIMM_THRTLOW 0x64
-#define DIMM_THRTCTL 0x67
-#define DIMM_THRTCTL_THRMHUNT (1UL << 0)
-#define DIMM_MC 0x40
-#define DIMM_GTW_MODE (1UL << 17)
-#define DIMM_GBLACT 0x60
-
-/*
- * Keep track of an exponential-decaying average of recent idle durations.
- * The latest duration gets DURATION_WEIGHT_PCT percentage weight
- * in this average, with the old average getting the remaining weight.
- *
- * High weights emphasize recent history, low weights include long history.
- */
-#define DURATION_WEIGHT_PCT 55
-
-/*
- * When the decaying average of recent durations or the predicted duration
- * of the next timer interrupt is shorter than duration_threshold, the
- * driver will decline to throttle.
- */
-#define DURATION_THRESHOLD_US 100
-
-
-/* Store DIMM thermal throttle configuration */
-static int i7300_idle_thrt_save(void)
-{
- u32 new_mc_val;
- u8 gblactlm;
-
- pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved);
- pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved);
- pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved);
- /*
- * Make sure we have Global Throttling Window Mode set to have a
- * "short" window. This (mostly) works around an issue where
- * throttling persists until the end of the global throttling window
- * size. On the tested system, this was resulting in a maximum of
- * 64 ms to exit throttling (average 32 ms). The actual numbers
- * depends on system frequencies. Setting the short window reduces
- * this by a factor of 4096.
- *
- * We will only do this only if the system is set for
- * unlimited-activations while in open-loop throttling (i.e., when
- * Global Activation Throttle Limit is zero).
- */
- pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm);
- dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n",
- i7300_idle_thrtctl_saved,
- i7300_idle_thrtlow_saved);
- dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n",
- i7300_idle_mc_saved,
- gblactlm);
- if (gblactlm == 0) {
- new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE;
- pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val);
- return 0;
- } else {
- dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n");
- return -ENODEV;
- }
-}
-
-/* Restore DIMM thermal throttle configuration */
-static void i7300_idle_thrt_restore(void)
-{
- pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
-}
-
-/* Enable DIMM thermal throttling */
-static void i7300_idle_start(void)
-{
- u8 new_ctl;
- u8 limit;
-
- new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-
- limit = throttle_low_limit;
- if (unlikely(limit > MAX_THROTTLE_LOW_LIMIT))
- limit = MAX_THROTTLE_LOW_LIMIT;
-
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit);
-
- new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-}
-
-/* Disable DIMM thermal throttling */
-static void i7300_idle_stop(void)
-{
- u8 new_ctl;
- u8 got_ctl;
-
- new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
- pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl);
- WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved);
-}
-
-
-/*
- * i7300_avg_duration_check()
- * return 0 if the decaying average of recent idle durations is
- * more than DURATION_THRESHOLD_US
- */
-static int i7300_avg_duration_check(void)
-{
- if (avg_idle_us >= DURATION_THRESHOLD_US)
- return 0;
-
-#ifdef DEBUG
- past_skip++;
-#endif
- return 1;
-}
-
-/* Idle notifier to look at idle CPUs */
-static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- unsigned long flags;
- ktime_t now_ktime;
- static ktime_t idle_begin_time;
- static int time_init = 1;
-
- if (!throttle_low_limit)
- return 0;
-
- if (unlikely(time_init)) {
- time_init = 0;
- idle_begin_time = ktime_get();
- }
-
- raw_spin_lock_irqsave(&i7300_idle_lock, flags);
- if (val == IDLE_START) {
-
- cpumask_set_cpu(smp_processor_id(), idle_cpumask);
-
- if (cpumask_weight(idle_cpumask) != num_online_cpus())
- goto end;
-
- now_ktime = ktime_get();
- idle_begin_time = now_ktime;
-
- if (i7300_avg_duration_check())
- goto end;
-
- i7300_idle_active = 1;
- total_starts++;
- start_ktime = now_ktime;
-
- i7300_idle_start();
- i7300_idle_ioat_start();
-
- } else if (val == IDLE_END) {
- cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
- if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
- /* First CPU coming out of idle */
- u64 idle_duration_us;
-
- now_ktime = ktime_get();
-
- idle_duration_us = ktime_to_us(ktime_sub
- (now_ktime, idle_begin_time));
-
- avg_idle_us =
- ((100 - DURATION_WEIGHT_PCT) * avg_idle_us +
- DURATION_WEIGHT_PCT * idle_duration_us) / 100;
-
- if (i7300_idle_active) {
- ktime_t idle_ktime;
-
- idle_ktime = ktime_sub(now_ktime, start_ktime);
- total_us += ktime_to_us(idle_ktime);
-
- i7300_idle_ioat_stop();
- i7300_idle_stop();
- i7300_idle_active = 0;
- }
- }
- }
-end:
- raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
- return 0;
-}
-
-static struct notifier_block i7300_idle_nb = {
- .notifier_call = i7300_idle_notifier,
-};
-
-MODULE_DEVICE_TABLE(pci, pci_tbl);
-
-static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
- loff_t *off)
-{
- unsigned long *p = fp->private_data;
- char buf[32];
- int len;
-
- len = snprintf(buf, 32, "%lu\n", *p);
- return simple_read_from_buffer(ubuf, count, off, buf, len);
-}
-
-static const struct file_operations idle_fops = {
- .open = simple_open,
- .read = stats_read_ul,
- .llseek = default_llseek,
-};
-
-struct debugfs_file_info {
- void *ptr;
- char name[32];
- struct dentry *file;
-} debugfs_file_list[] = {
- {&total_starts, "total_starts", NULL},
- {&total_us, "total_us", NULL},
-#ifdef DEBUG
- {&past_skip, "past_skip", NULL},
-#endif
- {NULL, "", NULL}
- };
-
-static int __init i7300_idle_init(void)
-{
- raw_spin_lock_init(&i7300_idle_lock);
- total_us = 0;
-
- if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
- return -ENODEV;
-
- if (i7300_idle_thrt_save())
- return -ENODEV;
-
- if (i7300_idle_ioat_init())
- return -ENODEV;
-
- if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
- return -ENOMEM;
-
- debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
- if (debugfs_dir) {
- int i = 0;
-
- while (debugfs_file_list[i].ptr != NULL) {
- debugfs_file_list[i].file = debugfs_create_file(
- debugfs_file_list[i].name,
- S_IRUSR,
- debugfs_dir,
- debugfs_file_list[i].ptr,
- &idle_fops);
- i++;
- }
- }
-
- idle_notifier_register(&i7300_idle_nb);
-
- printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
- return 0;
-}
-
-static void __exit i7300_idle_exit(void)
-{
- idle_notifier_unregister(&i7300_idle_nb);
- free_cpumask_var(idle_cpumask);
-
- if (debugfs_dir) {
- int i = 0;
-
- while (debugfs_file_list[i].file != NULL) {
- debugfs_remove(debugfs_file_list[i].file);
- i++;
- }
-
- debugfs_remove(debugfs_dir);
- }
- i7300_idle_thrt_restore();
- i7300_idle_ioat_exit();
-}
-
-module_init(i7300_idle_init);
-module_exit(i7300_idle_exit);
-
-MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>");
-MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v"
- I7300_IDLE_DRIVER_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 4466a2f969d7..7d8ea3d5fda6 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -98,8 +98,6 @@ static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static void intel_idle_freeze(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
-static int intel_idle_cpu_init(int cpu);
-
static struct cpuidle_state *cpuidle_state_table;
/*
@@ -724,6 +722,50 @@ static struct cpuidle_state atom_cstates[] = {
{
.enter = NULL }
};
+static struct cpuidle_state tangier_cstates[] = {
+ {
+ .name = "C1-TNG",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C4-TNG",
+ .desc = "MWAIT 0x30",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 100,
+ .target_residency = 400,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-TNG",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7-TNG",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C9-TNG",
+ .desc = "MWAIT 0x64",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
static struct cpuidle_state avn_cstates[] = {
{
.name = "C1-AVN",
@@ -907,51 +949,15 @@ static void intel_idle_freeze(struct cpuidle_device *dev,
mwait_idle_with_hints(eax, ecx);
}
-static void __setup_broadcast_timer(void *arg)
+static void __setup_broadcast_timer(bool on)
{
- unsigned long on = (unsigned long)arg;
-
if (on)
tick_broadcast_enable();
else
tick_broadcast_disable();
}
-static int cpu_hotplug_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- int hotcpu = (unsigned long)hcpu;
- struct cpuidle_device *dev;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
-
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- smp_call_function_single(hotcpu, __setup_broadcast_timer,
- (void *)true, 1);
-
- /*
- * Some systems can hotplug a cpu at runtime after
- * the kernel has booted, we have to initialize the
- * driver in this case
- */
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
- if (dev->registered)
- break;
-
- if (intel_idle_cpu_init(hotcpu))
- return NOTIFY_BAD;
-
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_hotplug_notifier = {
- .notifier_call = cpu_hotplug_notify,
-};
-
-static void auto_demotion_disable(void *dummy)
+static void auto_demotion_disable(void)
{
unsigned long long msr_bits;
@@ -959,7 +965,7 @@ static void auto_demotion_disable(void *dummy)
msr_bits &= ~(icpu->auto_demotion_disable_flags);
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
-static void c1e_promotion_disable(void *dummy)
+static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
@@ -978,6 +984,10 @@ static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
+static const struct idle_cpu idle_cpu_tangier = {
+ .state_table = tangier_cstates,
+};
+
static const struct idle_cpu idle_cpu_lincroft = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
@@ -1066,6 +1076,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
+ ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
@@ -1084,6 +1095,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{}
@@ -1373,12 +1385,11 @@ static void __init intel_idle_cpuidle_driver_init(void)
* allocate, initialize, register cpuidle_devices
* @cpu: cpu/core to initialize
*/
-static int intel_idle_cpu_init(int cpu)
+static int intel_idle_cpu_init(unsigned int cpu)
{
struct cpuidle_device *dev;
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
-
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
@@ -1387,17 +1398,36 @@ static int intel_idle_cpu_init(int cpu)
}
if (icpu->auto_demotion_disable_flags)
- smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+ auto_demotion_disable();
if (icpu->disable_promotion_to_c1e)
- smp_call_function_single(cpu, c1e_promotion_disable, NULL, 1);
+ c1e_promotion_disable();
+
+ return 0;
+}
+
+static int intel_idle_cpu_online(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+ __setup_broadcast_timer(true);
+
+ /*
+ * Some systems can hotplug a cpu at runtime after
+ * the kernel has booted, we have to initialize the
+ * driver in this case
+ */
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
+ if (!dev->registered)
+ return intel_idle_cpu_init(cpu);
return 0;
}
static int __init intel_idle_init(void)
{
- int retval, i;
+ int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -1417,35 +1447,29 @@ static int __init intel_idle_init(void)
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
- free_percpu(intel_idle_cpuidle_devices);
- return retval;
+ goto init_driver_fail;
}
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- retval = intel_idle_cpu_init(i);
- if (retval) {
- intel_idle_cpuidle_devices_uninit();
- cpu_notifier_register_done();
- cpuidle_unregister_driver(&intel_idle_driver);
- free_percpu(intel_idle_cpuidle_devices);
- return retval;
- }
- }
- __register_cpu_notifier(&cpu_hotplug_notifier);
-
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- else
- on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
- cpu_notifier_register_done();
+ retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
+ intel_idle_cpu_online, NULL);
+ if (retval < 0)
+ goto hp_setup_fail;
pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
lapic_timer_reliable_states);
return 0;
+
+hp_setup_fail:
+ intel_idle_cpuidle_devices_uninit();
+ cpuidle_unregister_driver(&intel_idle_driver);
+init_driver_fail:
+ free_percpu(intel_idle_cpuidle_devices);
+ return retval;
+
}
device_initcall(intel_idle_init);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 6743b18194fb..a918270d6f54 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -73,6 +73,7 @@ source "drivers/iio/adc/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
+source "drivers/iio/counter/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/dummy/Kconfig"
source "drivers/iio/frequency/Kconfig"
@@ -87,6 +88,7 @@ if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
source "drivers/iio/potentiometer/Kconfig"
+source "drivers/iio/potentiostat/Kconfig"
source "drivers/iio/pressure/Kconfig"
source "drivers/iio/proximity/Kconfig"
source "drivers/iio/temperature/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 87e4c4369e2f..33fa4026f92c 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,6 +18,7 @@ obj-y += amplifiers/
obj-y += buffer/
obj-y += chemical/
obj-y += common/
+obj-y += counter/
obj-y += dac/
obj-y += dummy/
obj-y += gyro/
@@ -29,6 +30,7 @@ obj-y += light/
obj-y += magnetometer/
obj-y += orientation/
obj-y += potentiometer/
+obj-y += potentiostat/
obj-y += pressure/
obj-y += proximity/
obj-y += temperature/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 2b791fe1e2bc..c68bdb649005 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -52,6 +52,26 @@ config BMC150_ACCEL_SPI
tristate
select REGMAP_SPI
+config DA280
+ tristate "MiraMEMS DA280 3-axis 14-bit digital accelerometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the MiraMEMS DA280 3-axis 14-bit
+ digital accelerometer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da280.
+
+config DA311
+ tristate "MiraMEMS DA311 3-axis 12-bit digital accelerometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the MiraMEMS DA311 3-axis 12-bit
+ digital accelerometer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da311.
+
config DMARD06
tristate "Domintech DMARD06 Digital Accelerometer Driver"
depends on OF || COMPILE_TEST
@@ -73,6 +93,16 @@ config DMARD09
Choosing M will build the driver as a module. If so, the module
will be called dmard09.
+config DMARD10
+ tristate "Domintech DMARD10 3-axis Accelerometer Driver"
+ depends on I2C
+ help
+ Say yes here to get support for the Domintech DMARD10 3-axis
+ accelerometer.
+
+ Choosing M will build the driver as a module. If so, the module
+ will be called dmard10.
+
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -97,7 +127,8 @@ config IIO_ST_ACCEL_3AXIS
help
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
- LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL.
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
+ LNG2DM
This driver can also be built as a module. If so, these modules
will be created:
@@ -273,6 +304,18 @@ config MXC6255
To compile this driver as a module, choose M here: the module will be
called mxc6255.
+config SCA3000
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ depends on SPI
+ tristate "VTI SCA3000 series accelerometers"
+ help
+ Say Y here to build support for the VTI SCA3000 series of SPI
+ accelerometers. These devices use a hardware ring buffer.
+
+ To compile this driver as a module, say M here: the module will be
+ called sca3000.
+
config STK8312
tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index f5d3ddee619e..69fe8edc57a2 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
+obj-$(CONFIG_DA280) += da280.o
+obj-$(CONFIG_DA311) += da311.o
obj-$(CONFIG_DMARD06) += dmard06.o
obj-$(CONFIG_DMARD09) += dmard09.o
+obj-$(CONFIG_DMARD10) += dmard10.o
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
obj-$(CONFIG_KXSD9) += kxsd9.o
@@ -32,6 +35,8 @@ obj-$(CONFIG_MMA9553) += mma9553.o
obj-$(CONFIG_MXC4005) += mxc4005.o
obj-$(CONFIG_MXC6255) += mxc6255.o
+obj-$(CONFIG_SCA3000) += sca3000.o
+
obj-$(CONFIG_STK8312) += stk8312.o
obj-$(CONFIG_STK8BA50) += stk8ba50.o
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
new file mode 100644
index 000000000000..ed8343aeac9c
--- /dev/null
+++ b/drivers/iio/accel/da280.c
@@ -0,0 +1,183 @@
+/**
+ * IIO driver for the MiraMEMS DA280 3-axis accelerometer and
+ * IIO driver for the MiraMEMS DA226 2-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA280_REG_CHIP_ID 0x01
+#define DA280_REG_ACC_X_LSB 0x02
+#define DA280_REG_ACC_Y_LSB 0x04
+#define DA280_REG_ACC_Z_LSB 0x06
+#define DA280_REG_MODE_BW 0x11
+
+#define DA280_CHIP_ID 0x13
+#define DA280_MODE_ENABLE 0x1e
+#define DA280_MODE_DISABLE 0x9e
+
+enum { da226, da280 };
+
+/*
+ * a value of + or -4096 corresponds to + or - 1G
+ * scale = 9.81 / 4096 = 0.002395019
+ */
+
+static const int da280_nscale = 2395019;
+
+#define DA280_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec da280_channels[] = {
+ DA280_CHANNEL(DA280_REG_ACC_X_LSB, X),
+ DA280_CHANNEL(DA280_REG_ACC_Y_LSB, Y),
+ DA280_CHANNEL(DA280_REG_ACC_Z_LSB, Z),
+};
+
+struct da280_data {
+ struct i2c_client *client;
+};
+
+static int da280_enable(struct i2c_client *client, bool enable)
+{
+ u8 data = enable ? DA280_MODE_ENABLE : DA280_MODE_DISABLE;
+
+ return i2c_smbus_write_byte_data(client, DA280_REG_MODE_BW, data);
+}
+
+static int da280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct da280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ /*
+ * Values are 14 bits, stored as 16 bits with the 2
+ * least significant bits always 0.
+ */
+ *val = (short)ret >> 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = da280_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info da280_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = da280_read_raw,
+};
+
+static int da280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct da280_data *data;
+
+ ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
+ if (ret != DA280_CHIP_ID)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &da280_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = da280_channels;
+ if (id->driver_data == da226) {
+ indio_dev->name = "da226";
+ indio_dev->num_channels = 2;
+ } else {
+ indio_dev->name = "da280";
+ indio_dev->num_channels = 3;
+ }
+
+ ret = da280_enable(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ da280_enable(client, false);
+ }
+
+ return ret;
+}
+
+static int da280_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return da280_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da280_suspend(struct device *dev)
+{
+ return da280_enable(to_i2c_client(dev), false);
+}
+
+static int da280_resume(struct device *dev)
+{
+ return da280_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
+
+static const struct i2c_device_id da280_i2c_id[] = {
+ { "da226", da226 },
+ { "da280", da280 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
+
+static struct i2c_driver da280_driver = {
+ .driver = {
+ .name = "da280",
+ .pm = &da280_pm_ops,
+ },
+ .probe = da280_probe,
+ .remove = da280_remove,
+ .id_table = da280_i2c_id,
+};
+
+module_i2c_driver(da280_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA280 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
new file mode 100644
index 000000000000..537cfa8b6edf
--- /dev/null
+++ b/drivers/iio/accel/da311.c
@@ -0,0 +1,305 @@
+/**
+ * IIO driver for the MiraMEMS DA311 3-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2011-2013 MiraMEMS Sensing Technology Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA311_CHIP_ID 0x13
+
+/*
+ * Note register addressed go from 0 - 0x3f and then wrap.
+ * For some reason there are 2 banks with 0 - 0x3f addresses,
+ * rather then a single 0-0x7f bank.
+ */
+
+/* Bank 0 regs */
+#define DA311_REG_BANK 0x0000
+#define DA311_REG_LDO_REG 0x0006
+#define DA311_REG_CHIP_ID 0x000f
+#define DA311_REG_TEMP_CFG_REG 0x001f
+#define DA311_REG_CTRL_REG1 0x0020
+#define DA311_REG_CTRL_REG3 0x0022
+#define DA311_REG_CTRL_REG4 0x0023
+#define DA311_REG_CTRL_REG5 0x0024
+#define DA311_REG_CTRL_REG6 0x0025
+#define DA311_REG_STATUS_REG 0x0027
+#define DA311_REG_OUT_X_L 0x0028
+#define DA311_REG_OUT_X_H 0x0029
+#define DA311_REG_OUT_Y_L 0x002a
+#define DA311_REG_OUT_Y_H 0x002b
+#define DA311_REG_OUT_Z_L 0x002c
+#define DA311_REG_OUT_Z_H 0x002d
+#define DA311_REG_INT1_CFG 0x0030
+#define DA311_REG_INT1_SRC 0x0031
+#define DA311_REG_INT1_THS 0x0032
+#define DA311_REG_INT1_DURATION 0x0033
+#define DA311_REG_INT2_CFG 0x0034
+#define DA311_REG_INT2_SRC 0x0035
+#define DA311_REG_INT2_THS 0x0036
+#define DA311_REG_INT2_DURATION 0x0037
+#define DA311_REG_CLICK_CFG 0x0038
+#define DA311_REG_CLICK_SRC 0x0039
+#define DA311_REG_CLICK_THS 0x003a
+#define DA311_REG_TIME_LIMIT 0x003b
+#define DA311_REG_TIME_LATENCY 0x003c
+#define DA311_REG_TIME_WINDOW 0x003d
+
+/* Bank 1 regs */
+#define DA311_REG_SOFT_RESET 0x0105
+#define DA311_REG_OTP_XOFF_L 0x0110
+#define DA311_REG_OTP_XOFF_H 0x0111
+#define DA311_REG_OTP_YOFF_L 0x0112
+#define DA311_REG_OTP_YOFF_H 0x0113
+#define DA311_REG_OTP_ZOFF_L 0x0114
+#define DA311_REG_OTP_ZOFF_H 0x0115
+#define DA311_REG_OTP_XSO 0x0116
+#define DA311_REG_OTP_YSO 0x0117
+#define DA311_REG_OTP_ZSO 0x0118
+#define DA311_REG_OTP_TRIM_OSC 0x011b
+#define DA311_REG_LPF_ABSOLUTE 0x011c
+#define DA311_REG_TEMP_OFF1 0x0127
+#define DA311_REG_TEMP_OFF2 0x0128
+#define DA311_REG_TEMP_OFF3 0x0129
+#define DA311_REG_OTP_TRIM_THERM_H 0x011a
+
+/*
+ * a value of + or -1024 corresponds to + or - 1G
+ * scale = 9.81 / 1024 = 0.009580078
+ */
+
+static const int da311_nscale = 9580078;
+
+#define DA311_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec da311_channels[] = {
+ /* | 0x80 comes from the android driver */
+ DA311_CHANNEL(DA311_REG_OUT_X_L | 0x80, X),
+ DA311_CHANNEL(DA311_REG_OUT_Y_L | 0x80, Y),
+ DA311_CHANNEL(DA311_REG_OUT_Z_L | 0x80, Z),
+};
+
+struct da311_data {
+ struct i2c_client *client;
+};
+
+static int da311_register_mask_write(struct i2c_client *client, u16 addr,
+ u8 mask, u8 data)
+{
+ int ret;
+ u8 tmp_data = 0;
+
+ if (addr & 0xff00) {
+ /* Select bank 1 */
+ ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x01);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (mask != 0xff) {
+ ret = i2c_smbus_read_byte_data(client, addr);
+ if (ret < 0)
+ return ret;
+ tmp_data = ret;
+ }
+
+ tmp_data &= ~mask;
+ tmp_data |= data & mask;
+ ret = i2c_smbus_write_byte_data(client, addr & 0xff, tmp_data);
+ if (ret < 0)
+ return ret;
+
+ if (addr & 0xff00) {
+ /* Back to bank 0 */
+ ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x00);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Init sequence taken from the android driver */
+static int da311_reset(struct i2c_client *client)
+{
+ const struct {
+ u16 addr;
+ u8 mask;
+ u8 data;
+ } init_data[] = {
+ { DA311_REG_TEMP_CFG_REG, 0xff, 0x08 },
+ { DA311_REG_CTRL_REG5, 0xff, 0x80 },
+ { DA311_REG_CTRL_REG4, 0x30, 0x00 },
+ { DA311_REG_CTRL_REG1, 0xff, 0x6f },
+ { DA311_REG_TEMP_CFG_REG, 0xff, 0x88 },
+ { DA311_REG_LDO_REG, 0xff, 0x02 },
+ { DA311_REG_OTP_TRIM_OSC, 0xff, 0x27 },
+ { DA311_REG_LPF_ABSOLUTE, 0xff, 0x30 },
+ { DA311_REG_TEMP_OFF1, 0xff, 0x3f },
+ { DA311_REG_TEMP_OFF2, 0xff, 0xff },
+ { DA311_REG_TEMP_OFF3, 0xff, 0x0f },
+ };
+ int i, ret;
+
+ /* Reset */
+ ret = da311_register_mask_write(client, DA311_REG_SOFT_RESET,
+ 0xff, 0xaa);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(init_data); i++) {
+ ret = da311_register_mask_write(client,
+ init_data[i].addr,
+ init_data[i].mask,
+ init_data[i].data);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int da311_enable(struct i2c_client *client, bool enable)
+{
+ u8 data = enable ? 0x00 : 0x20;
+
+ return da311_register_mask_write(client, DA311_REG_TEMP_CFG_REG,
+ 0x20, data);
+}
+
+static int da311_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct da311_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ /*
+ * Values are 12 bits, stored as 16 bits with the 4
+ * least significant bits always 0.
+ */
+ *val = (short)ret >> 4;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = da311_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info da311_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = da311_read_raw,
+};
+
+static int da311_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct da311_data *data;
+
+ ret = i2c_smbus_read_byte_data(client, DA311_REG_CHIP_ID);
+ if (ret != DA311_CHIP_ID)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &da311_info;
+ indio_dev->name = "da311";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = da311_channels;
+ indio_dev->num_channels = ARRAY_SIZE(da311_channels);
+
+ ret = da311_reset(client);
+ if (ret < 0)
+ return ret;
+
+ ret = da311_enable(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ da311_enable(client, false);
+ }
+
+ return ret;
+}
+
+static int da311_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return da311_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da311_suspend(struct device *dev)
+{
+ return da311_enable(to_i2c_client(dev), false);
+}
+
+static int da311_resume(struct device *dev)
+{
+ return da311_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da311_pm_ops, da311_suspend, da311_resume);
+
+static const struct i2c_device_id da311_i2c_id[] = {
+ {"da311", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, da311_i2c_id);
+
+static struct i2c_driver da311_driver = {
+ .driver = {
+ .name = "da311",
+ .pm = &da311_pm_ops,
+ },
+ .probe = da311_probe,
+ .remove = da311_remove,
+ .id_table = da311_i2c_id,
+};
+
+module_i2c_driver(da311_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA311 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
new file mode 100644
index 000000000000..b8736cc75656
--- /dev/null
+++ b/drivers/iio/accel/dmard10.c
@@ -0,0 +1,266 @@
+/**
+ * IIO driver for the 3-axis accelerometer Domintech ARD10.
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2012 Domintech Technology Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DMARD10_REG_ACTR 0x00
+#define DMARD10_REG_AFEM 0x0c
+#define DMARD10_REG_STADR 0x12
+#define DMARD10_REG_STAINT 0x1c
+#define DMARD10_REG_MISC2 0x1f
+#define DMARD10_REG_PD 0x21
+
+#define DMARD10_MODE_OFF 0x00
+#define DMARD10_MODE_STANDBY 0x02
+#define DMARD10_MODE_ACTIVE 0x06
+#define DMARD10_MODE_READ_OTP 0x12
+#define DMARD10_MODE_RESET_DATA_PATH 0x82
+
+/* AFEN set 1, ATM[2:0]=b'000 (normal), EN_Z/Y/X/T=1 */
+#define DMARD10_VALUE_AFEM_AFEN_NORMAL 0x8f
+/* ODR[3:0]=b'0111 (100Hz), CCK[3:0]=b'0100 (204.8kHZ) */
+#define DMARD10_VALUE_CKSEL_ODR_100_204 0x74
+/* INTC[6:5]=b'00 */
+#define DMARD10_VALUE_INTC 0x00
+/* TAP1/TAP2 Average 2 */
+#define DMARD10_VALUE_TAPNS_AVE_2 0x11
+
+#define DMARD10_VALUE_STADR 0x55
+#define DMARD10_VALUE_STAINT 0xaa
+#define DMARD10_VALUE_MISC2_OSCA_EN 0x08
+#define DMARD10_VALUE_PD_RST 0x52
+
+/* Offsets into the buffer read in dmard10_read_raw() */
+#define DMARD10_X_OFFSET 1
+#define DMARD10_Y_OFFSET 2
+#define DMARD10_Z_OFFSET 3
+
+/*
+ * a value of + or -128 corresponds to + or - 1G
+ * scale = 9.81 / 128 = 0.076640625
+ */
+
+static const int dmard10_nscale = 76640625;
+
+#define DMARD10_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec dmard10_channels[] = {
+ DMARD10_CHANNEL(DMARD10_X_OFFSET, X),
+ DMARD10_CHANNEL(DMARD10_Y_OFFSET, Y),
+ DMARD10_CHANNEL(DMARD10_Z_OFFSET, Z),
+};
+
+struct dmard10_data {
+ struct i2c_client *client;
+};
+
+/* Init sequence taken from the android driver */
+static int dmard10_reset(struct i2c_client *client)
+{
+ unsigned char buffer[7];
+ int ret;
+
+ /* 1. Powerdown reset */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_PD,
+ DMARD10_VALUE_PD_RST);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 2. ACTR => Standby mode => Download OTP to parameter reg =>
+ * Standby mode => Reset data path => Standby mode
+ */
+ buffer[0] = DMARD10_REG_ACTR;
+ buffer[1] = DMARD10_MODE_STANDBY;
+ buffer[2] = DMARD10_MODE_READ_OTP;
+ buffer[3] = DMARD10_MODE_STANDBY;
+ buffer[4] = DMARD10_MODE_RESET_DATA_PATH;
+ buffer[5] = DMARD10_MODE_STANDBY;
+ ret = i2c_master_send(client, buffer, 6);
+ if (ret < 0)
+ return ret;
+
+ /* 3. OSCA_EN = 1, TSTO = b'000 (INT1 = normal, TEST0 = normal) */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_MISC2,
+ DMARD10_VALUE_MISC2_OSCA_EN);
+ if (ret < 0)
+ return ret;
+
+ /* 4. AFEN = 1 (AFE will powerdown after ADC) */
+ buffer[0] = DMARD10_REG_AFEM;
+ buffer[1] = DMARD10_VALUE_AFEM_AFEN_NORMAL;
+ buffer[2] = DMARD10_VALUE_CKSEL_ODR_100_204;
+ buffer[3] = DMARD10_VALUE_INTC;
+ buffer[4] = DMARD10_VALUE_TAPNS_AVE_2;
+ buffer[5] = 0x00; /* DLYC, no delay timing */
+ buffer[6] = 0x07; /* INTD=1 push-pull, INTA=1 active high, AUTOT=1 */
+ ret = i2c_master_send(client, buffer, 7);
+ if (ret < 0)
+ return ret;
+
+ /* 5. Activation mode */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_ACTR,
+ DMARD10_MODE_ACTIVE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Shutdown sequence taken from the android driver */
+static int dmard10_shutdown(struct i2c_client *client)
+{
+ unsigned char buffer[3];
+
+ buffer[0] = DMARD10_REG_ACTR;
+ buffer[1] = DMARD10_MODE_STANDBY;
+ buffer[2] = DMARD10_MODE_OFF;
+
+ return i2c_master_send(client, buffer, 3);
+}
+
+static int dmard10_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dmard10_data *data = iio_priv(indio_dev);
+ __le16 buf[4];
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * Read 8 bytes starting at the REG_STADR register, trying to
+ * read the individual X, Y, Z registers will always read 0.
+ */
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ DMARD10_REG_STADR,
+ sizeof(buf), (u8 *)buf);
+ if (ret < 0)
+ return ret;
+ ret = le16_to_cpu(buf[chan->address]);
+ *val = sign_extend32(ret, 12);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = dmard10_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info dmard10_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dmard10_read_raw,
+};
+
+static int dmard10_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct dmard10_data *data;
+
+ /* These 2 registers have special POR reset values used for id */
+ ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STADR);
+ if (ret != DMARD10_VALUE_STADR)
+ return (ret < 0) ? ret : -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STAINT);
+ if (ret != DMARD10_VALUE_STAINT)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &dmard10_info;
+ indio_dev->name = "dmard10";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dmard10_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dmard10_channels);
+
+ ret = dmard10_reset(client);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ dmard10_shutdown(client);
+ }
+
+ return ret;
+}
+
+static int dmard10_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return dmard10_shutdown(client);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dmard10_suspend(struct device *dev)
+{
+ return dmard10_shutdown(to_i2c_client(dev));
+}
+
+static int dmard10_resume(struct device *dev)
+{
+ return dmard10_reset(to_i2c_client(dev));
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dmard10_pm_ops, dmard10_suspend, dmard10_resume);
+
+static const struct i2c_device_id dmard10_i2c_id[] = {
+ {"dmard10", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dmard10_i2c_id);
+
+static struct i2c_driver dmard10_driver = {
+ .driver = {
+ .name = "dmard10",
+ .pm = &dmard10_pm_ops,
+ },
+ .probe = dmard10_probe,
+ .remove = dmard10_remove,
+ .id_table = dmard10_i2c_id,
+};
+
+module_i2c_driver(dmard10_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Domintech ARD10 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 03beadf14ad3..3a40774cca74 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -39,7 +39,7 @@
#define MMA7660_SCALE_AVAIL "0.467142857"
-const int mma7660_nscale = 467142857;
+static const int mma7660_nscale = 467142857;
#define MMA7660_CHANNEL(reg, axis) { \
.type = IIO_ACCEL, \
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index d41e1b588e68..f418c588af6a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -459,12 +459,14 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&data->lock);
ret = mma8452_read(data, buffer);
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -664,37 +666,46 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
struct mma8452_data *data = iio_priv(indio_dev);
int i, ret;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
i = mma8452_get_samp_freq_index(data, val, val2);
- if (i < 0)
- return i;
-
+ if (i < 0) {
+ ret = i;
+ break;
+ }
data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
- return mma8452_change_config(data, MMA8452_CTRL_REG1,
- data->ctrl_reg1);
+ ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+ break;
case IIO_CHAN_INFO_SCALE:
i = mma8452_get_scale_index(data, val, val2);
- if (i < 0)
- return i;
+ if (i < 0) {
+ ret = i;
+ break;
+ }
data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
data->data_cfg |= i;
- return mma8452_change_config(data, MMA8452_DATA_CFG,
- data->data_cfg);
+ ret = mma8452_change_config(data, MMA8452_DATA_CFG,
+ data->data_cfg);
+ break;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -128 || val > 127)
- return -EINVAL;
+ if (val < -128 || val > 127) {
+ ret = -EINVAL;
+ break;
+ }
- return mma8452_change_config(data,
- MMA8452_OFF_X + chan->scan_index,
- val);
+ ret = mma8452_change_config(data,
+ MMA8452_OFF_X + chan->scan_index,
+ val);
+ break;
case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
if (val == 0 && val2 == 0) {
@@ -703,23 +714,30 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK;
ret = mma8452_set_hp_filter_frequency(data, val, val2);
if (ret < 0)
- return ret;
+ break;
}
- return mma8452_change_config(data, MMA8452_DATA_CFG,
+ ret = mma8452_change_config(data, MMA8452_DATA_CFG,
data->data_cfg);
+ break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
- if (mma8452_os_ratio[i][ret] == val)
- return mma8452_set_power_mode(data, i);
+ if (mma8452_os_ratio[i][ret] == val) {
+ ret = mma8452_set_power_mode(data, i);
+ break;
+ }
}
-
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static int mma8452_read_thresh(struct iio_dev *indio_dev,
@@ -1347,20 +1365,9 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
return mma8452_change_config(data, MMA8452_CTRL_REG4, reg);
}
-static int mma8452_validate_device(struct iio_trigger *trig,
- struct iio_dev *indio_dev)
-{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
- if (indio != indio_dev)
- return -EINVAL;
-
- return 0;
-}
-
static const struct iio_trigger_ops mma8452_trigger_ops = {
.set_trigger_state = mma8452_data_rdy_trigger_set_state,
- .validate_device = mma8452_validate_device,
+ .validate_device = iio_trigger_validate_own_device,
.owner = THIS_MODULE,
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
new file mode 100644
index 000000000000..cb1d83fa19a0
--- /dev/null
+++ b/drivers/iio/accel/sca3000.c
@@ -0,0 +1,1576 @@
+/*
+ * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
+ *
+ * See industrialio/accels/sca3000.h for comments.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
+#define SCA3000_READ_REG(a) ((a) << 2)
+
+#define SCA3000_REG_REVID_ADDR 0x00
+#define SCA3000_REG_REVID_MAJOR_MASK GENMASK(8, 4)
+#define SCA3000_REG_REVID_MINOR_MASK GENMASK(3, 0)
+
+#define SCA3000_REG_STATUS_ADDR 0x02
+#define SCA3000_LOCKED BIT(5)
+#define SCA3000_EEPROM_CS_ERROR BIT(1)
+#define SCA3000_SPI_FRAME_ERROR BIT(0)
+
+/* All reads done using register decrement so no need to directly access LSBs */
+#define SCA3000_REG_X_MSB_ADDR 0x05
+#define SCA3000_REG_Y_MSB_ADDR 0x07
+#define SCA3000_REG_Z_MSB_ADDR 0x09
+
+#define SCA3000_REG_RING_OUT_ADDR 0x0f
+
+/* Temp read untested - the e05 doesn't have the sensor */
+#define SCA3000_REG_TEMP_MSB_ADDR 0x13
+
+#define SCA3000_REG_MODE_ADDR 0x14
+#define SCA3000_MODE_PROT_MASK 0x28
+#define SCA3000_REG_MODE_RING_BUF_ENABLE BIT(7)
+#define SCA3000_REG_MODE_RING_BUF_8BIT BIT(6)
+
+/*
+ * Free fall detection triggers an interrupt if the acceleration
+ * is below a threshold for equivalent of 25cm drop
+ */
+#define SCA3000_REG_MODE_FREE_FALL_DETECT BIT(4)
+#define SCA3000_REG_MODE_MEAS_MODE_NORMAL 0x00
+#define SCA3000_REG_MODE_MEAS_MODE_OP_1 0x01
+#define SCA3000_REG_MODE_MEAS_MODE_OP_2 0x02
+
+/*
+ * In motion detection mode the accelerations are band pass filtered
+ * (approx 1 - 25Hz) and then a programmable threshold used to trigger
+ * and interrupt.
+ */
+#define SCA3000_REG_MODE_MEAS_MODE_MOT_DET 0x03
+#define SCA3000_REG_MODE_MODE_MASK 0x03
+
+#define SCA3000_REG_BUF_COUNT_ADDR 0x15
+
+#define SCA3000_REG_INT_STATUS_ADDR 0x16
+#define SCA3000_REG_INT_STATUS_THREE_QUARTERS BIT(7)
+#define SCA3000_REG_INT_STATUS_HALF BIT(6)
+
+#define SCA3000_INT_STATUS_FREE_FALL BIT(3)
+#define SCA3000_INT_STATUS_Y_TRIGGER BIT(2)
+#define SCA3000_INT_STATUS_X_TRIGGER BIT(1)
+#define SCA3000_INT_STATUS_Z_TRIGGER BIT(0)
+
+/* Used to allow access to multiplexed registers */
+#define SCA3000_REG_CTRL_SEL_ADDR 0x18
+/* Only available for SCA3000-D03 and SCA3000-D01 */
+#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
+#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
+#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
+#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
+#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
+/*
+ * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
+ * will not function
+ */
+#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
+
+#define SCA3000_REG_OUT_CTRL_PROT_MASK 0xE0
+#define SCA3000_REG_OUT_CTRL_BUF_X_EN 0x10
+#define SCA3000_REG_OUT_CTRL_BUF_Y_EN 0x08
+#define SCA3000_REG_OUT_CTRL_BUF_Z_EN 0x04
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_MASK 0x03
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_4 0x02
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_2 0x01
+
+
+/*
+ * Control which motion detector interrupts are on.
+ * For now only OR combinations are supported.
+ */
+#define SCA3000_MD_CTRL_PROT_MASK 0xC0
+#define SCA3000_MD_CTRL_OR_Y BIT(0)
+#define SCA3000_MD_CTRL_OR_X BIT(1)
+#define SCA3000_MD_CTRL_OR_Z BIT(2)
+/* Currently unsupported */
+#define SCA3000_MD_CTRL_AND_Y BIT(3)
+#define SCA3000_MD_CTRL_AND_X BIT(4)
+#define SAC3000_MD_CTRL_AND_Z BIT(5)
+
+/*
+ * Some control registers of complex access methods requiring this register to
+ * be used to remove a lock.
+ */
+#define SCA3000_REG_UNLOCK_ADDR 0x1e
+
+#define SCA3000_REG_INT_MASK_ADDR 0x21
+#define SCA3000_REG_INT_MASK_PROT_MASK 0x1C
+
+#define SCA3000_REG_INT_MASK_RING_THREE_QUARTER BIT(7)
+#define SCA3000_REG_INT_MASK_RING_HALF BIT(6)
+
+#define SCA3000_REG_INT_MASK_ALL_INTS 0x02
+#define SCA3000_REG_INT_MASK_ACTIVE_HIGH 0x01
+#define SCA3000_REG_INT_MASK_ACTIVE_LOW 0x00
+/* Values of multiplexed registers (write to ctrl_data after select) */
+#define SCA3000_REG_CTRL_DATA_ADDR 0x22
+
+/*
+ * Measurement modes available on some sca3000 series chips. Code assumes others
+ * may become available in the future.
+ *
+ * Bypass - Bypass the low-pass filter in the signal channel so as to increase
+ * signal bandwidth.
+ *
+ * Narrow - Narrow low-pass filtering of the signal channel and half output
+ * data rate by decimation.
+ *
+ * Wide - Widen low-pass filtering of signal channel to increase bandwidth
+ */
+#define SCA3000_OP_MODE_BYPASS 0x01
+#define SCA3000_OP_MODE_NARROW 0x02
+#define SCA3000_OP_MODE_WIDE 0x04
+#define SCA3000_MAX_TX 6
+#define SCA3000_MAX_RX 2
+
+/**
+ * struct sca3000_state - device instance state information
+ * @us: the associated spi device
+ * @info: chip variant information
+ * @last_timestamp: the timestamp of the last event
+ * @mo_det_use_count: reference counter for the motion detection unit
+ * @lock: lock used to protect elements of sca3000_state
+ * and the underlying device state.
+ * @tx: dma-able transmit buffer
+ * @rx: dma-able receive buffer
+ **/
+struct sca3000_state {
+ struct spi_device *us;
+ const struct sca3000_chip_info *info;
+ s64 last_timestamp;
+ int mo_det_use_count;
+ struct mutex lock;
+ /* Can these share a cacheline ? */
+ u8 rx[384] ____cacheline_aligned;
+ u8 tx[6] ____cacheline_aligned;
+};
+
+/**
+ * struct sca3000_chip_info - model dependent parameters
+ * @scale: scale * 10^-6
+ * @temp_output: some devices have temperature sensors.
+ * @measurement_mode_freq: normal mode sampling frequency
+ * @measurement_mode_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the normal measurement mode.
+ * @option_mode_1: first optional mode. Not all models have one
+ * @option_mode_1_freq: option mode 1 sampling frequency
+ * @option_mode_1_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the first option mode.
+ * @option_mode_2: second optional mode. Not all chips have one
+ * @option_mode_2_freq: option mode 2 sampling frequency
+ * @option_mode_2_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the second option mode.
+ * @mod_det_mult_xz: Bit wise multipliers to calculate the threshold
+ * for motion detection in the x and z axis.
+ * @mod_det_mult_y: Bit wise multipliers to calculate the threshold
+ * for motion detection in the y axis.
+ *
+ * This structure is used to hold information about the functionality of a given
+ * sca3000 variant.
+ **/
+struct sca3000_chip_info {
+ unsigned int scale;
+ bool temp_output;
+ int measurement_mode_freq;
+ int measurement_mode_3db_freq;
+ int option_mode_1;
+ int option_mode_1_freq;
+ int option_mode_1_3db_freq;
+ int option_mode_2;
+ int option_mode_2_freq;
+ int option_mode_2_3db_freq;
+ int mot_det_mult_xz[6];
+ int mot_det_mult_y[7];
+};
+
+enum sca3000_variant {
+ d01,
+ e02,
+ e04,
+ e05,
+};
+
+/*
+ * Note where option modes are not defined, the chip simply does not
+ * support any.
+ * Other chips in the sca3000 series use i2c and are not included here.
+ *
+ * Some of these devices are only listed in the family data sheet and
+ * do not actually appear to be available.
+ */
+static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
+ [d01] = {
+ .scale = 7357,
+ .temp_output = true,
+ .measurement_mode_freq = 250,
+ .measurement_mode_3db_freq = 45,
+ .option_mode_1 = SCA3000_OP_MODE_BYPASS,
+ .option_mode_1_freq = 250,
+ .option_mode_1_3db_freq = 70,
+ .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
+ .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
+ },
+ [e02] = {
+ .scale = 9810,
+ .measurement_mode_freq = 125,
+ .measurement_mode_3db_freq = 40,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 63,
+ .option_mode_1_3db_freq = 11,
+ .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
+ .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
+ },
+ [e04] = {
+ .scale = 19620,
+ .measurement_mode_freq = 100,
+ .measurement_mode_3db_freq = 38,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_1_3db_freq = 9,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ .option_mode_2_3db_freq = 70,
+ .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
+ .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
+ },
+ [e05] = {
+ .scale = 61313,
+ .measurement_mode_freq = 200,
+ .measurement_mode_3db_freq = 60,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_1_3db_freq = 9,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ .option_mode_2_3db_freq = 75,
+ .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
+ .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
+ },
+};
+
+static int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
+{
+ st->tx[0] = SCA3000_WRITE_REG(address);
+ st->tx[1] = val;
+ return spi_write(st->us, st->tx, 2);
+}
+
+static int sca3000_read_data_short(struct sca3000_state *st,
+ u8 reg_address_high,
+ int len)
+{
+ struct spi_transfer xfer[2] = {
+ {
+ .len = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = len,
+ .rx_buf = st->rx,
+ }
+ };
+ st->tx[0] = SCA3000_READ_REG(reg_address_high);
+
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_reg_lock_on() - test if the ctrl register lock is on
+ * @st: Driver specific device instance data.
+ *
+ * Lock must be held.
+ **/
+static int sca3000_reg_lock_on(struct sca3000_state *st)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_STATUS_ADDR, 1);
+ if (ret < 0)
+ return ret;
+
+ return !(st->rx[0] & SCA3000_LOCKED);
+}
+
+/**
+ * __sca3000_unlock_reg_lock() - unlock the control registers
+ * @st: Driver specific device instance data.
+ *
+ * Note the device does not appear to support doing this in a single transfer.
+ * This should only ever be used as part of ctrl reg read.
+ * Lock must be held before calling this
+ */
+static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
+{
+ struct spi_transfer xfer[3] = {
+ {
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx + 2,
+ }, {
+ .len = 2,
+ .tx_buf = st->tx + 4,
+ },
+ };
+ st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[1] = 0x00;
+ st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[3] = 0x50;
+ st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[5] = 0xA0;
+
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_write_ctrl_reg() write to a lock protect ctrl register
+ * @st: Driver specific device instance data.
+ * @sel: selects which registers we wish to write to
+ * @val: the value to be written
+ *
+ * Certain control registers are protected against overwriting by the lock
+ * register and use a shared write address. This function allows writing of
+ * these registers.
+ * Lock must be held.
+ */
+static int sca3000_write_ctrl_reg(struct sca3000_state *st,
+ u8 sel,
+ uint8_t val)
+{
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, sel);
+ if (ret)
+ goto error_ret;
+
+ /* Write the actual value into the register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_DATA_ADDR, val);
+
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_read_ctrl_reg() read from lock protected control register.
+ * @st: Driver specific device instance data.
+ * @ctrl_reg: Which ctrl register do we want to read.
+ *
+ * Lock must be held.
+ */
+static int sca3000_read_ctrl_reg(struct sca3000_state *st,
+ u8 ctrl_reg)
+{
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, ctrl_reg);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_read_data_short(st, SCA3000_REG_CTRL_DATA_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ return st->rx[0];
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_show_rev() - sysfs interface to read the chip revision number
+ * @indio_dev: Device instance specific generic IIO data.
+ * Driver specific device instance data can be obtained via
+ * via iio_priv(indio_dev)
+ */
+static int sca3000_print_rev(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_REVID_ADDR, 1);
+ if (ret < 0)
+ goto error_ret;
+ dev_info(&indio_dev->dev,
+ "sca3000 revision major=%lu, minor=%lu\n",
+ st->rx[0] & SCA3000_REG_REVID_MAJOR_MASK,
+ st->rx[0] & SCA3000_REG_REVID_MINOR_MASK);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static ssize_t
+sca3000_show_available_3db_freqs(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int len;
+
+ len = sprintf(buf, "%d", st->info->measurement_mode_3db_freq);
+ if (st->info->option_mode_1)
+ len += sprintf(buf + len, " %d",
+ st->info->option_mode_1_3db_freq);
+ if (st->info->option_mode_2)
+ len += sprintf(buf + len, " %d",
+ st->info->option_mode_2_3db_freq);
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
+ S_IRUGO, sca3000_show_available_3db_freqs,
+ NULL, 0);
+
+static const struct iio_event_spec sca3000_event = {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
+
+/*
+ * Note the hack in the number of bits to pretend we have 2 more than
+ * we do in the fifo.
+ */
+#define SCA3000_CHAN(index, mod) \
+ { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |\
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 13, \
+ .storagebits = 16, \
+ .shift = 3, \
+ .endianness = IIO_BE, \
+ }, \
+ .event_spec = &sca3000_event, \
+ .num_event_specs = 1, \
+ }
+
+static const struct iio_event_spec sca3000_freefall_event_spec = {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD),
+};
+
+static const struct iio_chan_spec sca3000_channels[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = &sca3000_freefall_event_spec,
+ .num_event_specs = 1,
+ },
+};
+
+static const struct iio_chan_spec sca3000_channels_with_temp[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ /* No buffer support */
+ .scan_index = -1,
+ },
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = &sca3000_freefall_event_spec,
+ .num_event_specs = 1,
+ },
+};
+
+static u8 sca3000_addresses[3][3] = {
+ [0] = {SCA3000_REG_X_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_X_TH,
+ SCA3000_MD_CTRL_OR_X},
+ [1] = {SCA3000_REG_Y_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Y_TH,
+ SCA3000_MD_CTRL_OR_Y},
+ [2] = {SCA3000_REG_Z_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Z_TH,
+ SCA3000_MD_CTRL_OR_Z},
+};
+
+/**
+ * __sca3000_get_base_freq() - obtain mode specific base frequency
+ * @st: Private driver specific device instance specific state.
+ * @info: chip type specific information.
+ * @base_freq: Base frequency for the current measurement mode.
+ *
+ * lock must be held
+ */
+static inline int __sca3000_get_base_freq(struct sca3000_state *st,
+ const struct sca3000_chip_info *info,
+ int *base_freq)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ switch (SCA3000_REG_MODE_MODE_MASK & st->rx[0]) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ *base_freq = info->measurement_mode_freq;
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ *base_freq = info->option_mode_1_freq;
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ *base_freq = info->option_mode_2_freq;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_read_raw_samp_freq() - read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency read back.
+ *
+ * lock must be held
+ **/
+static int sca3000_read_raw_samp_freq(struct sca3000_state *st, int *val)
+{
+ int ret;
+
+ ret = __sca3000_get_base_freq(st, st->info, val);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ if (*val > 0) {
+ ret &= SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+ switch (ret) {
+ case SCA3000_REG_OUT_CTRL_BUF_DIV_2:
+ *val /= 2;
+ break;
+ case SCA3000_REG_OUT_CTRL_BUF_DIV_4:
+ *val /= 4;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sca3000_write_raw_samp_freq() - write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency desired.
+ *
+ * lock must be held
+ */
+static int sca3000_write_raw_samp_freq(struct sca3000_state *st, int val)
+{
+ int ret, base_freq, ctrlval;
+
+ ret = __sca3000_get_base_freq(st, st->info, &base_freq);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ctrlval = ret & ~SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+
+ if (val == base_freq / 2)
+ ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_2;
+ if (val == base_freq / 4)
+ ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_4;
+ else if (val != base_freq)
+ return -EINVAL;
+
+ return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ ctrlval);
+}
+
+static int sca3000_read_3db_freq(struct sca3000_state *st, int *val)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ /* mask bottom 2 bits - only ones that are relevant */
+ st->rx[0] &= SCA3000_REG_MODE_MODE_MASK;
+ switch (st->rx[0]) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ *val = st->info->measurement_mode_3db_freq;
+ return IIO_VAL_INT;
+ case SCA3000_REG_MODE_MEAS_MODE_MOT_DET:
+ return -EBUSY;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ *val = st->info->option_mode_1_3db_freq;
+ return IIO_VAL_INT;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ *val = st->info->option_mode_2_3db_freq;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3000_write_3db_freq(struct sca3000_state *st, int val)
+{
+ int ret;
+ int mode;
+
+ if (val == st->info->measurement_mode_3db_freq)
+ mode = SCA3000_REG_MODE_MEAS_MODE_NORMAL;
+ else if (st->info->option_mode_1 &&
+ (val == st->info->option_mode_1_3db_freq))
+ mode = SCA3000_REG_MODE_MEAS_MODE_OP_1;
+ else if (st->info->option_mode_2 &&
+ (val == st->info->option_mode_2_3db_freq))
+ mode = SCA3000_REG_MODE_MEAS_MODE_OP_2;
+ else
+ return -EINVAL;
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ st->rx[0] &= ~SCA3000_REG_MODE_MODE_MASK;
+ st->rx[0] |= (mode & SCA3000_REG_MODE_MODE_MASK);
+
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR, st->rx[0]);
+}
+
+static int sca3000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ u8 address;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ if (chan->type == IIO_ACCEL) {
+ if (st->mo_det_use_count) {
+ mutex_unlock(&st->lock);
+ return -EBUSY;
+ }
+ address = sca3000_addresses[chan->address][0];
+ ret = sca3000_read_data_short(st, address, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
+ *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
+ (sizeof(*val) * 8 - 13);
+ } else {
+ /* get the temperature when available */
+ ret = sca3000_read_data_short(st,
+ SCA3000_REG_TEMP_MSB_ADDR,
+ 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = ((st->rx[0] & 0x3F) << 3) |
+ ((st->rx[1] & 0xE0) >> 5);
+ }
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (chan->type == IIO_ACCEL)
+ *val2 = st->info->scale;
+ else /* temperature */
+ *val2 = 555556;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -214;
+ *val2 = 600000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret ? ret : IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_3db_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ ret = sca3000_write_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ ret = sca3000_write_3db_freq(st, val);
+ mutex_unlock(&st->lock);
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * sca3000_read_av_freq() - sysfs function to get available frequencies
+ * @dev: Device structure for this device.
+ * @attr: Description of the attribute.
+ * @buf: Incoming string
+ *
+ * The later modes are only relevant to the ring buffer - and depend on current
+ * mode. Note that data sheet gives rather wide tolerances for these so integer
+ * division will give good enough answer and not all chips have them specified
+ * at all.
+ **/
+static ssize_t sca3000_read_av_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int len = 0, ret, val;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ val = st->rx[0];
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto error_ret;
+
+ switch (val & SCA3000_REG_MODE_MODE_MASK) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->measurement_mode_freq,
+ st->info->measurement_mode_freq / 2,
+ st->info->measurement_mode_freq / 4);
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_1_freq,
+ st->info->option_mode_1_freq / 2,
+ st->info->option_mode_1_freq / 4);
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_2_freq,
+ st->info->option_mode_2_freq / 2,
+ st->info->option_mode_2_freq / 4);
+ break;
+ }
+ return len;
+error_ret:
+ return ret;
+}
+
+/*
+ * Should only really be registered if ring buffer support is compiled in.
+ * Does no harm however and doing it right would add a fair bit of complexity
+ */
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
+
+/**
+ * sca3000_read_event_value() - query of a threshold or period
+ **/
+static int sca3000_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret, i;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_ctrl_reg(st,
+ sca3000_addresses[chan->address][1]);
+ mutex_unlock(&st->lock);
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ if (chan->channel2 == IIO_MOD_Y)
+ for_each_set_bit(i, (unsigned long *)&ret,
+ ARRAY_SIZE(st->info->mot_det_mult_y))
+ *val += st->info->mot_det_mult_y[i];
+ else
+ for_each_set_bit(i, (unsigned long *)&ret,
+ ARRAY_SIZE(st->info->mot_det_mult_xz))
+ *val += st->info->mot_det_mult_xz[i];
+
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_PERIOD:
+ *val = 0;
+ *val2 = 226000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * sca3000_write_value() - control of threshold and period
+ * @indio_dev: Device instance specific IIO information.
+ * @chan: Description of the channel for which the event is being
+ * configured.
+ * @type: The type of event being configured, here magnitude rising
+ * as everything else is read only.
+ * @dir: Direction of the event (here rising)
+ * @info: What information about the event are we configuring.
+ * Here the threshold only.
+ * @val: Integer part of the value being written..
+ * @val2: Non integer part of the value being written. Here always 0.
+ */
+static int sca3000_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ int i;
+ u8 nonlinear = 0;
+
+ if (chan->channel2 == IIO_MOD_Y) {
+ i = ARRAY_SIZE(st->info->mot_det_mult_y);
+ while (i > 0)
+ if (val >= st->info->mot_det_mult_y[--i]) {
+ nonlinear |= (1 << i);
+ val -= st->info->mot_det_mult_y[i];
+ }
+ } else {
+ i = ARRAY_SIZE(st->info->mot_det_mult_xz);
+ while (i > 0)
+ if (val >= st->info->mot_det_mult_xz[--i]) {
+ nonlinear |= (1 << i);
+ val -= st->info->mot_det_mult_xz[i];
+ }
+ }
+
+ mutex_lock(&st->lock);
+ ret = sca3000_write_ctrl_reg(st,
+ sca3000_addresses[chan->address][1],
+ nonlinear);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static struct attribute *sca3000_attributes[] = {
+ &iio_dev_attr_in_accel_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sca3000_attribute_group = {
+ .attrs = sca3000_attributes,
+};
+
+static int sca3000_read_data(struct sca3000_state *st,
+ u8 reg_address_high,
+ u8 *rx,
+ int len)
+{
+ int ret;
+ struct spi_transfer xfer[2] = {
+ {
+ .len = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = len,
+ .rx_buf = rx,
+ }
+ };
+
+ st->tx[0] = SCA3000_READ_REG(reg_address_high);
+ ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+ if (ret) {
+ dev_err(get_device(&st->us->dev), "problem reading register");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * sca3000_ring_int_process() - ring specific interrupt handling.
+ * @val: Value of the interrupt status register.
+ * @indio_dev: Device instance specific IIO device structure.
+ */
+static void sca3000_ring_int_process(u8 val, struct iio_dev *indio_dev)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, i, num_available;
+
+ mutex_lock(&st->lock);
+
+ if (val & SCA3000_REG_INT_STATUS_HALF) {
+ ret = sca3000_read_data_short(st, SCA3000_REG_BUF_COUNT_ADDR,
+ 1);
+ if (ret)
+ goto error_ret;
+ num_available = st->rx[0];
+ /*
+ * num_available is the total number of samples available
+ * i.e. number of time points * number of channels.
+ */
+ ret = sca3000_read_data(st, SCA3000_REG_RING_OUT_ADDR, st->rx,
+ num_available * 2);
+ if (ret)
+ goto error_ret;
+ for (i = 0; i < num_available / 3; i++) {
+ /*
+ * Dirty hack to cover for 11 bit in fifo, 13 bit
+ * direct reading.
+ *
+ * In theory the bottom two bits are undefined.
+ * In reality they appear to always be 0.
+ */
+ iio_push_to_buffers(indio_dev, st->rx + i * 3 * 2);
+ }
+ }
+error_ret:
+ mutex_unlock(&st->lock);
+}
+
+/**
+ * sca3000_event_handler() - handling ring and non ring events
+ * @irq: The irq being handled.
+ * @private: struct iio_device pointer for the device.
+ *
+ * Ring related interrupt handler. Depending on event, push to
+ * the ring buffer event chrdev or the event one.
+ *
+ * This function is complicated by the fact that the devices can signify ring
+ * and non ring events via the same interrupt line and they can only
+ * be distinguished via a read of the relevant status register.
+ */
+static irqreturn_t sca3000_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, val;
+ s64 last_timestamp = iio_get_time_ns(indio_dev);
+
+ /*
+ * Could lead if badly timed to an extra read of status reg,
+ * but ensures no interrupt is missed.
+ */
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+ val = st->rx[0];
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto done;
+
+ sca3000_ring_int_process(val, indio_dev);
+
+ if (val & SCA3000_INT_STATUS_FREE_FALL)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_Y_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_X_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_Z_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+done:
+ return IRQ_HANDLED;
+}
+
+/**
+ * sca3000_read_event_config() what events are enabled
+ **/
+static int sca3000_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ /* read current value of mode register */
+ mutex_lock(&st->lock);
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X_AND_Y_AND_Z:
+ ret = !!(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT);
+ break;
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ /*
+ * Motion detection mode cannot run at the same time as
+ * acceleration data being read.
+ */
+ if ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ != SCA3000_REG_MODE_MEAS_MODE_MOT_DET) {
+ ret = 0;
+ } else {
+ ret = sca3000_read_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ /* only supporting logical or's for now */
+ ret = !!(ret & sca3000_addresses[chan->address][2]);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_freefall_set_state(struct iio_dev *indio_dev, int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /* read current value of mode register */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ /* if off and should be on */
+ if (state && !(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] | SCA3000_REG_MODE_FREE_FALL_DETECT);
+ /* if on and should be off */
+ else if (!state && (st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] & ~SCA3000_REG_MODE_FREE_FALL_DETECT);
+ else
+ return 0;
+}
+
+static int sca3000_motion_detect_set_state(struct iio_dev *indio_dev, int axis,
+ int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, ctrlval;
+
+ /*
+ * First read the motion detector config to find out if
+ * this axis is on
+ */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ return ret;
+ ctrlval = ret;
+ /* if off and should be on */
+ if (state && !(ctrlval & sca3000_addresses[axis][2])) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ctrlval |
+ sca3000_addresses[axis][2]);
+ if (ret)
+ return ret;
+ st->mo_det_use_count++;
+ } else if (!state && (ctrlval & sca3000_addresses[axis][2])) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ctrlval &
+ ~(sca3000_addresses[axis][2]));
+ if (ret)
+ return ret;
+ st->mo_det_use_count--;
+ }
+
+ /* read current value of mode register */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+ /* if off and should be on */
+ if ((st->mo_det_use_count) &&
+ ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ != SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & ~SCA3000_REG_MODE_MODE_MASK)
+ | SCA3000_REG_MODE_MEAS_MODE_MOT_DET);
+ /* if on and should be off */
+ else if (!(st->mo_det_use_count) &&
+ ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ == SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] & SCA3000_REG_MODE_MODE_MASK);
+ else
+ return 0;
+}
+
+/**
+ * sca3000_write_event_config() - simple on off control for motion detector
+ * @indio_dev: IIO device instance specific structure. Data specific to this
+ * particular driver may be accessed via iio_priv(indio_dev).
+ * @chan: Description of the channel whose event we are configuring.
+ * @type: The type of event.
+ * @dir: The direction of the event.
+ * @state: Desired state of event being configured.
+ *
+ * This is a per axis control, but enabling any will result in the
+ * motion detector unit being enabled.
+ * N.B. enabling motion detector stops normal data acquisition.
+ * There is a complexity in knowing which mode to return to when
+ * this mode is disabled. Currently normal mode is assumed.
+ **/
+static int sca3000_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ switch (chan->channel2) {
+ case IIO_MOD_X_AND_Y_AND_Z:
+ ret = sca3000_freefall_set_state(indio_dev, state);
+ break;
+
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ ret = sca3000_motion_detect_set_state(indio_dev,
+ chan->address,
+ state);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_configure_ring(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+
+ buffer = iio_kfifo_allocate();
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+
+ return 0;
+}
+
+static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ iio_kfifo_free(indio_dev->buffer);
+}
+
+static inline
+int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ if (state) {
+ dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_MODE_ADDR,
+ (st->rx[0] | SCA3000_REG_MODE_RING_BUF_ENABLE));
+ } else
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & ~SCA3000_REG_MODE_RING_BUF_ENABLE));
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+/**
+ * sca3000_hw_ring_preenable() - hw ring buffer preenable function
+ * @indio_dev: structure representing the IIO device. Device instance
+ * specific state can be accessed via iio_priv(indio_dev).
+ *
+ * Very simple enable function as the chip will allows normal reads
+ * during ring buffer operation so as long as it is indeed running
+ * before we notify the core, the precise ordering does not matter.
+ */
+static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+
+ /* Enable the 50% full interrupt */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_unlock;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ st->rx[0] | SCA3000_REG_INT_MASK_RING_HALF);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&st->lock);
+
+ return __sca3000_hw_ring_state_set(indio_dev, 1);
+
+error_unlock:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ ret = __sca3000_hw_ring_state_set(indio_dev, 0);
+ if (ret)
+ return ret;
+
+ /* Disable the 50% full interrupt */
+ mutex_lock(&st->lock);
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto unlock;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ st->rx[0] & ~SCA3000_REG_INT_MASK_RING_HALF);
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
+ .preenable = &sca3000_hw_ring_preenable,
+ .postdisable = &sca3000_hw_ring_postdisable,
+};
+
+/**
+ * sca3000_clean_setup() - get the device into a predictable state
+ * @st: Device instance specific private data structure
+ *
+ * Devices use flash memory to store many of the register values
+ * and hence can come up in somewhat unpredictable states.
+ * Hence reset everything on driver load.
+ */
+static int sca3000_clean_setup(struct sca3000_state *st)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ /* Ensure all interrupts have been acknowledged */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+ if (ret)
+ goto error_ret;
+
+ /* Turn off all motion detection channels */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ret & SCA3000_MD_CTRL_PROT_MASK);
+ if (ret)
+ goto error_ret;
+
+ /* Disable ring buffer */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ (ret & SCA3000_REG_OUT_CTRL_PROT_MASK)
+ | SCA3000_REG_OUT_CTRL_BUF_X_EN
+ | SCA3000_REG_OUT_CTRL_BUF_Y_EN
+ | SCA3000_REG_OUT_CTRL_BUF_Z_EN
+ | SCA3000_REG_OUT_CTRL_BUF_DIV_4);
+ if (ret)
+ goto error_ret;
+ /* Enable interrupts, relevant to mode and set up as active low */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ (ret & SCA3000_REG_INT_MASK_PROT_MASK)
+ | SCA3000_REG_INT_MASK_ACTIVE_LOW);
+ if (ret)
+ goto error_ret;
+ /*
+ * Select normal measurement mode, free fall off, ring off
+ * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
+ * as that occurs in one of the example on the datasheet
+ */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & SCA3000_MODE_PROT_MASK));
+
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_info sca3000_info = {
+ .attrs = &sca3000_attribute_group,
+ .read_raw = &sca3000_read_raw,
+ .write_raw = &sca3000_write_raw,
+ .read_event_value = &sca3000_read_event_value,
+ .write_event_value = &sca3000_write_event_value,
+ .read_event_config = &sca3000_read_event_config,
+ .write_event_config = &sca3000_write_event_config,
+ .driver_module = THIS_MODULE,
+};
+
+static int sca3000_probe(struct spi_device *spi)
+{
+ int ret;
+ struct sca3000_state *st;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->us = spi;
+ mutex_init(&st->lock);
+ st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
+ ->driver_data];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &sca3000_info;
+ if (st->info->temp_output) {
+ indio_dev->channels = sca3000_channels_with_temp;
+ indio_dev->num_channels =
+ ARRAY_SIZE(sca3000_channels_with_temp);
+ } else {
+ indio_dev->channels = sca3000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ sca3000_configure_ring(indio_dev);
+
+ if (spi->irq) {
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ &sca3000_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "sca3000",
+ indio_dev);
+ if (ret)
+ return ret;
+ }
+ indio_dev->setup_ops = &sca3000_ring_setup_ops;
+ ret = sca3000_clean_setup(st);
+ if (ret)
+ goto error_free_irq;
+
+ ret = sca3000_print_rev(indio_dev);
+ if (ret)
+ goto error_free_irq;
+
+ return iio_device_register(indio_dev);
+
+error_free_irq:
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+
+ return ret;
+}
+
+static int sca3000_stop_all_interrupts(struct sca3000_state *st)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st, SCA3000_REG_INT_MASK_ADDR,
+ (st->rx[0] &
+ ~(SCA3000_REG_INT_MASK_RING_THREE_QUARTER |
+ SCA3000_REG_INT_MASK_RING_HALF |
+ SCA3000_REG_INT_MASK_ALL_INTS)));
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int sca3000_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* Must ensure no interrupts can be generated after this! */
+ sca3000_stop_all_interrupts(st);
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+
+ sca3000_unconfigure_ring(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id sca3000_id[] = {
+ {"sca3000_d01", d01},
+ {"sca3000_e02", e02},
+ {"sca3000_e04", e04},
+ {"sca3000_e05", e05},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, sca3000_id);
+
+static struct spi_driver sca3000_driver = {
+ .driver = {
+ .name = "sca3000",
+ },
+ .probe = sca3000_probe,
+ .remove = sca3000_remove,
+ .id_table = sca3000_id,
+};
+module_spi_driver(sca3000_driver);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index f8dfdb690563..7c231687109a 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -30,6 +30,7 @@
#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel"
#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
+#define LNG2DM_ACCEL_DEV_NAME "lng2dm"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index ce69048c88e9..f6b6d42385e1 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -43,194 +43,6 @@
#define ST_ACCEL_FS_AVL_200G 200
#define ST_ACCEL_FS_AVL_400G 400
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_ACCEL_1_WAI_EXP 0x33
-#define ST_ACCEL_1_ODR_ADDR 0x20
-#define ST_ACCEL_1_ODR_MASK 0xf0
-#define ST_ACCEL_1_ODR_AVL_1HZ_VAL 0x01
-#define ST_ACCEL_1_ODR_AVL_10HZ_VAL 0x02
-#define ST_ACCEL_1_ODR_AVL_25HZ_VAL 0x03
-#define ST_ACCEL_1_ODR_AVL_50HZ_VAL 0x04
-#define ST_ACCEL_1_ODR_AVL_100HZ_VAL 0x05
-#define ST_ACCEL_1_ODR_AVL_200HZ_VAL 0x06
-#define ST_ACCEL_1_ODR_AVL_400HZ_VAL 0x07
-#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL 0x08
-#define ST_ACCEL_1_FS_ADDR 0x23
-#define ST_ACCEL_1_FS_MASK 0x30
-#define ST_ACCEL_1_FS_AVL_2_VAL 0x00
-#define ST_ACCEL_1_FS_AVL_4_VAL 0x01
-#define ST_ACCEL_1_FS_AVL_8_VAL 0x02
-#define ST_ACCEL_1_FS_AVL_16_VAL 0x03
-#define ST_ACCEL_1_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_1_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_1_FS_AVL_8_GAIN IIO_G_TO_M_S_2(4000)
-#define ST_ACCEL_1_FS_AVL_16_GAIN IIO_G_TO_M_S_2(12000)
-#define ST_ACCEL_1_BDU_ADDR 0x23
-#define ST_ACCEL_1_BDU_MASK 0x80
-#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
-#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
-#define ST_ACCEL_1_IHL_IRQ_ADDR 0x25
-#define ST_ACCEL_1_IHL_IRQ_MASK 0x02
-#define ST_ACCEL_1_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_ACCEL_2_WAI_EXP 0x32
-#define ST_ACCEL_2_ODR_ADDR 0x20
-#define ST_ACCEL_2_ODR_MASK 0x18
-#define ST_ACCEL_2_ODR_AVL_50HZ_VAL 0x00
-#define ST_ACCEL_2_ODR_AVL_100HZ_VAL 0x01
-#define ST_ACCEL_2_ODR_AVL_400HZ_VAL 0x02
-#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL 0x03
-#define ST_ACCEL_2_PW_ADDR 0x20
-#define ST_ACCEL_2_PW_MASK 0xe0
-#define ST_ACCEL_2_FS_ADDR 0x23
-#define ST_ACCEL_2_FS_MASK 0x30
-#define ST_ACCEL_2_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_2_FS_AVL_4_VAL 0X01
-#define ST_ACCEL_2_FS_AVL_8_VAL 0x03
-#define ST_ACCEL_2_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_2_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_2_FS_AVL_8_GAIN IIO_G_TO_M_S_2(3900)
-#define ST_ACCEL_2_BDU_ADDR 0x23
-#define ST_ACCEL_2_BDU_MASK 0x80
-#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
-#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
-#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_2_OD_IRQ_ADDR 0x22
-#define ST_ACCEL_2_OD_IRQ_MASK 0x40
-#define ST_ACCEL_2_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_ACCEL_3_WAI_EXP 0x40
-#define ST_ACCEL_3_ODR_ADDR 0x20
-#define ST_ACCEL_3_ODR_MASK 0xf0
-#define ST_ACCEL_3_ODR_AVL_3HZ_VAL 0x01
-#define ST_ACCEL_3_ODR_AVL_6HZ_VAL 0x02
-#define ST_ACCEL_3_ODR_AVL_12HZ_VAL 0x03
-#define ST_ACCEL_3_ODR_AVL_25HZ_VAL 0x04
-#define ST_ACCEL_3_ODR_AVL_50HZ_VAL 0x05
-#define ST_ACCEL_3_ODR_AVL_100HZ_VAL 0x06
-#define ST_ACCEL_3_ODR_AVL_200HZ_VAL 0x07
-#define ST_ACCEL_3_ODR_AVL_400HZ_VAL 0x08
-#define ST_ACCEL_3_ODR_AVL_800HZ_VAL 0x09
-#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL 0x0a
-#define ST_ACCEL_3_FS_ADDR 0x24
-#define ST_ACCEL_3_FS_MASK 0x38
-#define ST_ACCEL_3_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_3_FS_AVL_4_VAL 0X01
-#define ST_ACCEL_3_FS_AVL_6_VAL 0x02
-#define ST_ACCEL_3_FS_AVL_8_VAL 0x03
-#define ST_ACCEL_3_FS_AVL_16_VAL 0x04
-#define ST_ACCEL_3_FS_AVL_2_GAIN IIO_G_TO_M_S_2(61)
-#define ST_ACCEL_3_FS_AVL_4_GAIN IIO_G_TO_M_S_2(122)
-#define ST_ACCEL_3_FS_AVL_6_GAIN IIO_G_TO_M_S_2(183)
-#define ST_ACCEL_3_FS_AVL_8_GAIN IIO_G_TO_M_S_2(244)
-#define ST_ACCEL_3_FS_AVL_16_GAIN IIO_G_TO_M_S_2(732)
-#define ST_ACCEL_3_BDU_ADDR 0x20
-#define ST_ACCEL_3_BDU_MASK 0x08
-#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
-#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
-#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
-#define ST_ACCEL_3_IHL_IRQ_ADDR 0x23
-#define ST_ACCEL_3_IHL_IRQ_MASK 0x40
-#define ST_ACCEL_3_IG1_EN_ADDR 0x23
-#define ST_ACCEL_3_IG1_EN_MASK 0x08
-#define ST_ACCEL_3_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 4 */
-#define ST_ACCEL_4_WAI_EXP 0x3a
-#define ST_ACCEL_4_ODR_ADDR 0x20
-#define ST_ACCEL_4_ODR_MASK 0x30 /* DF1 and DF0 */
-#define ST_ACCEL_4_ODR_AVL_40HZ_VAL 0x00
-#define ST_ACCEL_4_ODR_AVL_160HZ_VAL 0x01
-#define ST_ACCEL_4_ODR_AVL_640HZ_VAL 0x02
-#define ST_ACCEL_4_ODR_AVL_2560HZ_VAL 0x03
-#define ST_ACCEL_4_PW_ADDR 0x20
-#define ST_ACCEL_4_PW_MASK 0xc0
-#define ST_ACCEL_4_FS_ADDR 0x21
-#define ST_ACCEL_4_FS_MASK 0x80
-#define ST_ACCEL_4_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_4_FS_AVL_6_VAL 0X01
-#define ST_ACCEL_4_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1024)
-#define ST_ACCEL_4_FS_AVL_6_GAIN IIO_G_TO_M_S_2(340)
-#define ST_ACCEL_4_BDU_ADDR 0x21
-#define ST_ACCEL_4_BDU_MASK 0x40
-#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21
-#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_4_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 5 */
-#define ST_ACCEL_5_WAI_EXP 0x3b
-#define ST_ACCEL_5_ODR_ADDR 0x20
-#define ST_ACCEL_5_ODR_MASK 0x80
-#define ST_ACCEL_5_ODR_AVL_100HZ_VAL 0x00
-#define ST_ACCEL_5_ODR_AVL_400HZ_VAL 0x01
-#define ST_ACCEL_5_PW_ADDR 0x20
-#define ST_ACCEL_5_PW_MASK 0x40
-#define ST_ACCEL_5_FS_ADDR 0x20
-#define ST_ACCEL_5_FS_MASK 0x20
-#define ST_ACCEL_5_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_5_FS_AVL_8_VAL 0X01
-/* TODO: check these resulting gain settings, these are not in the datsheet */
-#define ST_ACCEL_5_FS_AVL_2_GAIN IIO_G_TO_M_S_2(18000)
-#define ST_ACCEL_5_FS_AVL_8_GAIN IIO_G_TO_M_S_2(72000)
-#define ST_ACCEL_5_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_5_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
-#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_5_OD_IRQ_ADDR 0x22
-#define ST_ACCEL_5_OD_IRQ_MASK 0x40
-#define ST_ACCEL_5_IG1_EN_ADDR 0x21
-#define ST_ACCEL_5_IG1_EN_MASK 0x08
-#define ST_ACCEL_5_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 6 */
-#define ST_ACCEL_6_WAI_EXP 0x32
-#define ST_ACCEL_6_ODR_ADDR 0x20
-#define ST_ACCEL_6_ODR_MASK 0x18
-#define ST_ACCEL_6_ODR_AVL_50HZ_VAL 0x00
-#define ST_ACCEL_6_ODR_AVL_100HZ_VAL 0x01
-#define ST_ACCEL_6_ODR_AVL_400HZ_VAL 0x02
-#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL 0x03
-#define ST_ACCEL_6_PW_ADDR 0x20
-#define ST_ACCEL_6_PW_MASK 0x20
-#define ST_ACCEL_6_FS_ADDR 0x23
-#define ST_ACCEL_6_FS_MASK 0x30
-#define ST_ACCEL_6_FS_AVL_100_VAL 0x00
-#define ST_ACCEL_6_FS_AVL_200_VAL 0x01
-#define ST_ACCEL_6_FS_AVL_400_VAL 0x03
-#define ST_ACCEL_6_FS_AVL_100_GAIN IIO_G_TO_M_S_2(49000)
-#define ST_ACCEL_6_FS_AVL_200_GAIN IIO_G_TO_M_S_2(98000)
-#define ST_ACCEL_6_FS_AVL_400_GAIN IIO_G_TO_M_S_2(195000)
-#define ST_ACCEL_6_BDU_ADDR 0x23
-#define ST_ACCEL_6_BDU_MASK 0x80
-#define ST_ACCEL_6_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK 0x02
-#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK 0x10
-#define ST_ACCEL_6_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_6_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 7 */
-#define ST_ACCEL_7_ODR_ADDR 0x20
-#define ST_ACCEL_7_ODR_MASK 0x30
-#define ST_ACCEL_7_ODR_AVL_280HZ_VAL 0x00
-#define ST_ACCEL_7_ODR_AVL_560HZ_VAL 0x01
-#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL 0x02
-#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL 0x03
-#define ST_ACCEL_7_PW_ADDR 0x20
-#define ST_ACCEL_7_PW_MASK 0xc0
-#define ST_ACCEL_7_FS_AVL_2_GAIN IIO_G_TO_M_S_2(488)
-#define ST_ACCEL_7_BDU_ADDR 0x21
-#define ST_ACCEL_7_BDU_MASK 0x40
-#define ST_ACCEL_7_DRDY_IRQ_ADDR 0x21
-#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_7_MULTIREAD_BIT false
-
static const struct iio_chan_spec st_accel_8bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -281,7 +93,7 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
static const struct st_sensor_settings st_accel_sensors_settings[] = {
{
- .wai = ST_ACCEL_1_WAI_EXP,
+ .wai = 0x33,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3DH_ACCEL_DEV_NAME,
@@ -294,22 +106,22 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_1_ODR_ADDR,
- .mask = ST_ACCEL_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.odr_avl = {
- { 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, },
- { 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, },
- { 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, },
- { 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, },
- { 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, },
- { 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, },
- { 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, },
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1600, .value = 0x08, },
},
},
.pw = {
- .addr = ST_ACCEL_1_ODR_ADDR,
- .mask = ST_ACCEL_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.enable_axis = {
@@ -317,48 +129,48 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_1_FS_ADDR,
- .mask = ST_ACCEL_1_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_1_FS_AVL_2_VAL,
- .gain = ST_ACCEL_1_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_1_FS_AVL_4_VAL,
- .gain = ST_ACCEL_1_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(2000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_1_FS_AVL_8_VAL,
- .gain = ST_ACCEL_1_FS_AVL_8_GAIN,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(4000),
},
[3] = {
.num = ST_ACCEL_FS_AVL_16G,
- .value = ST_ACCEL_1_FS_AVL_16_VAL,
- .gain = ST_ACCEL_1_FS_AVL_16_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(12000),
},
},
},
.bdu = {
- .addr = ST_ACCEL_1_BDU_ADDR,
- .mask = ST_ACCEL_1_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x10,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_ACCEL_2_WAI_EXP,
+ .wai = 0x32,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DLH_ACCEL_DEV_NAME,
@@ -368,18 +180,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_2_ODR_ADDR,
- .mask = ST_ACCEL_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x18,
.odr_avl = {
- { 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, },
- { 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, },
- { 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, },
+ { .hz = 50, .value = 0x00, },
+ { .hz = 100, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 1000, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_2_PW_ADDR,
- .mask = ST_ACCEL_2_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xe0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -388,69 +200,69 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_2_FS_ADDR,
- .mask = ST_ACCEL_2_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_2_FS_AVL_2_VAL,
- .gain = ST_ACCEL_2_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_2_FS_AVL_4_VAL,
- .gain = ST_ACCEL_2_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(2000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_2_FS_AVL_8_VAL,
- .gain = ST_ACCEL_2_FS_AVL_8_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(3900),
},
},
},
.bdu = {
- .addr = ST_ACCEL_2_BDU_ADDR,
- .mask = ST_ACCEL_2_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
- .addr_od = ST_ACCEL_2_OD_IRQ_ADDR,
- .mask_od = ST_ACCEL_2_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x02,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_ACCEL_3_WAI_EXP,
+ .wai = 0x40,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM330_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_16bit_channels,
.odr = {
- .addr = ST_ACCEL_3_ODR_ADDR,
- .mask = ST_ACCEL_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.odr_avl = {
- { 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL },
- { 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, },
- { 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, },
- { 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, },
- { 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, },
- { 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, },
- { 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, },
- { 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, },
- { 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, },
+ { .hz = 3, .value = 0x01, },
+ { .hz = 6, .value = 0x02, },
+ { .hz = 12, .value = 0x03, },
+ { .hz = 25, .value = 0x04, },
+ { .hz = 50, .value = 0x05, },
+ { .hz = 100, .value = 0x06, },
+ { .hz = 200, .value = 0x07, },
+ { .hz = 400, .value = 0x08, },
+ { .hz = 800, .value = 0x09, },
+ { .hz = 1600, .value = 0x0a, },
},
},
.pw = {
- .addr = ST_ACCEL_3_ODR_ADDR,
- .mask = ST_ACCEL_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.enable_axis = {
@@ -458,75 +270,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_3_FS_ADDR,
- .mask = ST_ACCEL_3_FS_MASK,
+ .addr = 0x24,
+ .mask = 0x38,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_3_FS_AVL_2_VAL,
- .gain = ST_ACCEL_3_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_3_FS_AVL_4_VAL,
- .gain = ST_ACCEL_3_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(122),
},
[2] = {
.num = ST_ACCEL_FS_AVL_6G,
- .value = ST_ACCEL_3_FS_AVL_6_VAL,
- .gain = ST_ACCEL_3_FS_AVL_6_GAIN,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(183),
},
[3] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_3_FS_AVL_8_VAL,
- .gain = ST_ACCEL_3_FS_AVL_8_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
},
[4] = {
.num = ST_ACCEL_FS_AVL_16G,
- .value = ST_ACCEL_3_FS_AVL_16_VAL,
- .gain = ST_ACCEL_3_FS_AVL_16_GAIN,
+ .value = 0x04,
+ .gain = IIO_G_TO_M_S_2(732),
},
},
},
.bdu = {
- .addr = ST_ACCEL_3_BDU_ADDR,
- .mask = ST_ACCEL_3_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
},
.drdy_irq = {
- .addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
+ .addr = 0x23,
+ .mask_int1 = 0x80,
+ .mask_int2 = 0x00,
+ .addr_ihl = 0x23,
+ .mask_ihl = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
.ig1 = {
- .en_addr = ST_ACCEL_3_IG1_EN_ADDR,
- .en_mask = ST_ACCEL_3_IG1_EN_MASK,
+ .en_addr = 0x23,
+ .en_mask = 0x08,
},
},
- .multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_ACCEL_4_WAI_EXP,
+ .wai = 0x3a,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3LV02DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_4_ODR_ADDR,
- .mask = ST_ACCEL_4_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30, /* DF1 and DF0 */
.odr_avl = {
- { 40, ST_ACCEL_4_ODR_AVL_40HZ_VAL },
- { 160, ST_ACCEL_4_ODR_AVL_160HZ_VAL, },
- { 640, ST_ACCEL_4_ODR_AVL_640HZ_VAL, },
- { 2560, ST_ACCEL_4_ODR_AVL_2560HZ_VAL, },
+ { .hz = 40, .value = 0x00, },
+ { .hz = 160, .value = 0x01, },
+ { .hz = 640, .value = 0x02, },
+ { .hz = 2560, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_4_PW_ADDR,
- .mask = ST_ACCEL_4_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -535,51 +347,51 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_4_FS_ADDR,
- .mask = ST_ACCEL_4_FS_MASK,
+ .addr = 0x21,
+ .mask = 0x80,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_4_FS_AVL_2_VAL,
- .gain = ST_ACCEL_4_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1024),
},
[1] = {
.num = ST_ACCEL_FS_AVL_6G,
- .value = ST_ACCEL_4_FS_AVL_6_VAL,
- .gain = ST_ACCEL_4_FS_AVL_6_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(340),
},
},
},
.bdu = {
- .addr = ST_ACCEL_4_BDU_ADDR,
- .mask = ST_ACCEL_4_BDU_MASK,
+ .addr = 0x21,
+ .mask = 0x40,
},
.drdy_irq = {
- .addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
+ .addr = 0x21,
+ .mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2, /* guess */
},
{
- .wai = ST_ACCEL_5_WAI_EXP,
+ .wai = 0x3b,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_8bit_channels,
.odr = {
- .addr = ST_ACCEL_5_ODR_ADDR,
- .mask = ST_ACCEL_5_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.odr_avl = {
- { 100, ST_ACCEL_5_ODR_AVL_100HZ_VAL },
- { 400, ST_ACCEL_5_ODR_AVL_400HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 400, .value = 0x01, },
},
},
.pw = {
- .addr = ST_ACCEL_5_PW_ADDR,
- .mask = ST_ACCEL_5_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x40,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -588,54 +400,58 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_5_FS_ADDR,
- .mask = ST_ACCEL_5_FS_MASK,
+ .addr = 0x20,
+ .mask = 0x20,
+ /*
+ * TODO: check these resulting gain settings, these are
+ * not in the datsheet
+ */
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_5_FS_AVL_2_VAL,
- .gain = ST_ACCEL_5_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(18000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_5_FS_AVL_8_VAL,
- .gain = ST_ACCEL_5_FS_AVL_8_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(72000),
},
},
},
.drdy_irq = {
- .addr = ST_ACCEL_5_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
- .addr_od = ST_ACCEL_5_OD_IRQ_ADDR,
- .mask_od = ST_ACCEL_5_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x20,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2, /* guess */
},
{
- .wai = ST_ACCEL_6_WAI_EXP,
+ .wai = 0x32,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = H3LIS331DL_DRIVER_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_6_ODR_ADDR,
- .mask = ST_ACCEL_6_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x18,
.odr_avl = {
- { 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL },
- { 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, },
- { 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, },
- { 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, },
+ { .hz = 50, .value = 0x00, },
+ { .hz = 100, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 1000, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_6_PW_ADDR,
- .mask = ST_ACCEL_6_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x20,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -644,38 +460,38 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_6_FS_ADDR,
- .mask = ST_ACCEL_6_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_100G,
- .value = ST_ACCEL_6_FS_AVL_100_VAL,
- .gain = ST_ACCEL_6_FS_AVL_100_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(49000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_200G,
- .value = ST_ACCEL_6_FS_AVL_200_VAL,
- .gain = ST_ACCEL_6_FS_AVL_200_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(98000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_400G,
- .value = ST_ACCEL_6_FS_AVL_400_VAL,
- .gain = ST_ACCEL_6_FS_AVL_400_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(195000),
},
},
},
.bdu = {
- .addr = ST_ACCEL_6_BDU_ADDR,
- .mask = ST_ACCEL_6_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_6_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x02,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
},
- .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
@@ -685,18 +501,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_7_ODR_ADDR,
- .mask = ST_ACCEL_7_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30,
.odr_avl = {
- { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, },
- { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, },
- { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, },
- { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, },
+ { .hz = 280, .value = 0x00, },
+ { .hz = 560, .value = 0x01, },
+ { .hz = 1120, .value = 0x02, },
+ { .hz = 4480, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_7_PW_ADDR,
- .mask = ST_ACCEL_7_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -708,7 +524,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .gain = ST_ACCEL_7_FS_AVL_2_GAIN,
+ .gain = IIO_G_TO_M_S_2(488),
},
},
},
@@ -719,11 +535,78 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.bdu = {
},
.drdy_irq = {
- .addr = ST_ACCEL_7_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK,
+ .addr = 0x21,
+ .mask_int1 = 0x04,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
+ {
+ .wai = 0x33,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LNG2DM_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1600, .value = 0x08, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(15600),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(31200),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(62500),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(187500),
+ },
+ },
+ },
+ .drdy_irq = {
+ .addr = 0x22,
+ .mask_int1 = 0x10,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
};
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index e9d427a5df7c..c0f8867aa1ea 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -84,6 +84,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis3l02dq",
.data = LIS3L02DQ_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lng2dm-accel",
+ .data = LNG2DM_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -135,6 +139,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index efd43941d45d..c25ac50d4600 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -60,6 +60,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 99c051490eff..38bc319904c4 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -58,6 +58,18 @@ config AD7476
To compile this driver as a module, choose M here: the
module will be called ad7476.
+config AD7766
+ tristate "Analog Devices AD7766/AD7767 ADC driver"
+ depends on SPI_MASTER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7766, AD7766-1,
+ AD7766-2, AD7767, AD7767-1, AD7767-2 SPI analog to digital converters.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad7766.
+
config AD7791
tristate "Analog Devices AD7791 ADC driver"
depends on SPI
@@ -195,6 +207,16 @@ config DA9150_GPADC
To compile this driver as a module, choose M here: the module will be
called berlin2-adc.
+config ENVELOPE_DETECTOR
+ tristate "Envelope detector using a DAC and a comparator"
+ depends on OF
+ help
+ Say yes here to build support for an envelope detector using a DAC
+ and a comparator.
+
+ To compile this driver as a module, choose M here: the module will be
+ called envelope-detector.
+
config EXYNOS_ADC
tristate "Exynos ADC driver support"
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
@@ -419,6 +441,28 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config STM32_ADC_CORE
+ tristate "STMicroelectronics STM32 adc core"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on REGULATOR
+ help
+ Select this option to enable the core driver for STMicroelectronics
+ STM32 analog-to-digital converter (ADC).
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-adc-core.
+
+config STM32_ADC
+ tristate "STMicroelectronics STM32 adc"
+ depends on STM32_ADC_CORE
+ help
+ Say yes here to build support for STMicroelectronics stm32 Analog
+ to Digital Converter (ADC).
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on X86 && ISA_BUS_API
@@ -449,6 +493,8 @@ config TI_ADC081C
config TI_ADC0832
tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC0831,
ADC0832, ADC0834, ADC0838 ADC chips.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 7a40c04c311f..d36c4be8d1fc 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AD7291) += ad7291.o
obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7476) += ad7476.o
+obj-$(CONFIG_AD7766) += ad7766.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
+obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
obj-$(CONFIG_HI8435) += hi8435.o
@@ -41,6 +43,8 @@ obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_STX104) += stx104.o
+obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
+obj-$(CONFIG_STM32_ADC) += stm32-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
new file mode 100644
index 000000000000..75cca42b6e70
--- /dev/null
+++ b/drivers/iio/adc/ad7766.c
@@ -0,0 +1,330 @@
+/*
+ * AD7766/AD7767 SPI ADC driver
+ *
+ * Copyright 2016 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+struct ad7766_chip_info {
+ unsigned int decimation_factor;
+};
+
+enum {
+ AD7766_SUPPLY_AVDD = 0,
+ AD7766_SUPPLY_DVDD = 1,
+ AD7766_SUPPLY_VREF = 2,
+ AD7766_NUM_SUPPLIES = 3
+};
+
+struct ad7766 {
+ const struct ad7766_chip_info *chip_info;
+ struct spi_device *spi;
+ struct clk *mclk;
+ struct gpio_desc *pd_gpio;
+ struct regulator_bulk_data reg[AD7766_NUM_SUPPLIES];
+
+ struct iio_trigger *trig;
+
+ struct spi_transfer xfer;
+ struct spi_message msg;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Make the buffer large enough for one 24 bit sample and one 64 bit
+ * aligned 64 bit timestamp.
+ */
+ unsigned char data[ALIGN(3, sizeof(s64)) + sizeof(s64)]
+ ____cacheline_aligned;
+};
+
+/*
+ * AD7766 and AD7767 variations are interface compatible, the main difference is
+ * analog performance. Both parts will use the same ID.
+ */
+enum ad7766_device_ids {
+ ID_AD7766,
+ ID_AD7766_1,
+ ID_AD7766_2,
+};
+
+static irqreturn_t ad7766_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_sync(ad7766->spi, &ad7766->msg);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, ad7766->data,
+ pf->timestamp);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad7766_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+ if (ret < 0) {
+ dev_err(&ad7766->spi->dev, "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ad7766->mclk);
+ if (ret < 0) {
+ dev_err(&ad7766->spi->dev, "Failed to enable MCLK: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+ return ret;
+ }
+
+ if (ad7766->pd_gpio)
+ gpiod_set_value(ad7766->pd_gpio, 0);
+
+ return 0;
+}
+
+static int ad7766_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+
+ if (ad7766->pd_gpio)
+ gpiod_set_value(ad7766->pd_gpio, 1);
+
+ /*
+ * The PD pin is synchronous to the clock, so give it some time to
+ * notice the change before we disable the clock.
+ */
+ msleep(20);
+
+ clk_disable_unprepare(ad7766->mclk);
+ regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+
+ return 0;
+}
+
+static int ad7766_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ struct regulator *vref = ad7766->reg[AD7766_SUPPLY_VREF].consumer;
+ int scale_uv;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = regulator_get_voltage(vref);
+ if (scale_uv < 0)
+ return scale_uv;
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(ad7766->mclk) /
+ ad7766->chip_info->decimation_factor;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec ad7766_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7766_chip_info ad7766_chip_info[] = {
+ [ID_AD7766] = {
+ .decimation_factor = 8,
+ },
+ [ID_AD7766_1] = {
+ .decimation_factor = 16,
+ },
+ [ID_AD7766_2] = {
+ .decimation_factor = 32,
+ },
+};
+
+static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = {
+ .preenable = &ad7766_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad7766_postdisable,
+};
+
+static const struct iio_info ad7766_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7766_read_raw,
+};
+
+static irqreturn_t ad7766_irq(int irq, void *private)
+{
+ iio_trigger_poll(private);
+ return IRQ_HANDLED;
+}
+
+static int ad7766_set_trigger_state(struct iio_trigger *trig, bool enable)
+{
+ struct ad7766 *ad7766 = iio_trigger_get_drvdata(trig);
+
+ if (enable)
+ enable_irq(ad7766->spi->irq);
+ else
+ disable_irq(ad7766->spi->irq);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops ad7766_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ad7766_set_trigger_state,
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int ad7766_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct ad7766 *ad7766;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*ad7766));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ad7766 = iio_priv(indio_dev);
+ ad7766->chip_info = &ad7766_chip_info[id->driver_data];
+
+ ad7766->mclk = devm_clk_get(&spi->dev, "mclk");
+ if (IS_ERR(ad7766->mclk))
+ return PTR_ERR(ad7766->mclk);
+
+ ad7766->reg[AD7766_SUPPLY_AVDD].supply = "avdd";
+ ad7766->reg[AD7766_SUPPLY_DVDD].supply = "dvdd";
+ ad7766->reg[AD7766_SUPPLY_VREF].supply = "vref";
+
+ ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(ad7766->reg),
+ ad7766->reg);
+ if (ret)
+ return ret;
+
+ ad7766->pd_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ad7766->pd_gpio))
+ return PTR_ERR(ad7766->pd_gpio);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7766_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7766_channels);
+ indio_dev->info = &ad7766_info;
+
+ if (spi->irq > 0) {
+ ad7766->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!ad7766->trig)
+ return -ENOMEM;
+
+ ad7766->trig->ops = &ad7766_trigger_ops;
+ ad7766->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(ad7766->trig, ad7766);
+
+ ret = devm_request_irq(&spi->dev, spi->irq, ad7766_irq,
+ IRQF_TRIGGER_FALLING, dev_name(&spi->dev),
+ ad7766->trig);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The device generates interrupts as long as it is powered up.
+ * Some platforms might not allow the option to power it down so
+ * disable the interrupt to avoid extra load on the system
+ */
+ disable_irq(spi->irq);
+
+ ret = devm_iio_trigger_register(&spi->dev, ad7766->trig);
+ if (ret)
+ return ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ad7766->spi = spi;
+
+ /* First byte always 0 */
+ ad7766->xfer.rx_buf = &ad7766->data[1];
+ ad7766->xfer.len = 3;
+
+ spi_message_init(&ad7766->msg);
+ spi_message_add_tail(&ad7766->xfer, &ad7766->msg);
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time, &ad7766_trigger_handler,
+ &ad7766_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static const struct spi_device_id ad7766_id[] = {
+ {"ad7766", ID_AD7766},
+ {"ad7766-1", ID_AD7766_1},
+ {"ad7766-2", ID_AD7766_2},
+ {"ad7767", ID_AD7766},
+ {"ad7767-1", ID_AD7766_1},
+ {"ad7767-2", ID_AD7766_2},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7766_id);
+
+static struct spi_driver ad7766_driver = {
+ .driver = {
+ .name = "ad7766",
+ },
+ .probe = ad7766_probe,
+ .id_table = ad7766_id,
+};
+module_spi_driver(ad7766_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD7766 and AD7767 ADCs driver support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index bbdac07f4aaa..34b928cefeed 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -30,6 +30,7 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/pinctrl/consumer.h>
/* Registers */
#define AT91_ADC_CR 0x00 /* Control Register */
@@ -1347,6 +1348,32 @@ static int at91_adc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int at91_adc_suspend(struct device *dev)
+{
+ struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(idev);
+
+ pinctrl_pm_select_sleep_state(dev);
+ clk_disable_unprepare(st->clk);
+
+ return 0;
+}
+
+static int at91_adc_resume(struct device *dev)
+{
+ struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(idev);
+
+ clk_prepare_enable(st->clk);
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
+
static struct at91_adc_caps at91sam9260_caps = {
.calc_startup_ticks = calc_startup_ticks_9260,
.num_channels = 4,
@@ -1441,6 +1468,7 @@ static struct platform_driver at91_adc_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(at91_adc_dt_ids),
+ .pm = &at91_adc_pm_ops,
},
};
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
new file mode 100644
index 000000000000..fef15c0d7c9c
--- /dev/null
+++ b/drivers/iio/adc/envelope-detector.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for an envelope detector using a DAC and a comparator
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The DAC is used to find the peak level of an alternating voltage input
+ * signal by a binary search using the output of a comparator wired to
+ * an interrupt pin. Like so:
+ * _
+ * | \
+ * input +------>-------|+ \
+ * | \
+ * .-------. | }---.
+ * | | | / |
+ * | dac|-->--|- / |
+ * | | |_/ |
+ * | | |
+ * | | |
+ * | irq|------<-------'
+ * | |
+ * '-------'
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct envelope {
+ spinlock_t comp_lock; /* protects comp */
+ int comp;
+
+ struct mutex read_lock; /* protects everything else */
+
+ int comp_irq;
+ u32 comp_irq_trigger;
+ u32 comp_irq_trigger_inv;
+
+ struct iio_channel *dac;
+ struct delayed_work comp_timeout;
+
+ unsigned int comp_interval;
+ bool invert;
+ u32 dac_max;
+
+ int high;
+ int level;
+ int low;
+
+ struct completion done;
+};
+
+/*
+ * The envelope_detector_comp_latch function works together with the compare
+ * interrupt service routine below (envelope_detector_comp_isr) as a latch
+ * (one-bit memory) for if the interrupt has triggered since last calling
+ * this function.
+ * The ..._comp_isr function disables the interrupt so that the cpu does not
+ * need to service a possible interrupt flood from the comparator when no-one
+ * cares anyway, and this ..._comp_latch function reenables them again if
+ * needed.
+ */
+static int envelope_detector_comp_latch(struct envelope *env)
+{
+ int comp;
+
+ spin_lock_irq(&env->comp_lock);
+ comp = env->comp;
+ env->comp = 0;
+ spin_unlock_irq(&env->comp_lock);
+
+ if (!comp)
+ return 0;
+
+ /*
+ * The irq was disabled, and is reenabled just now.
+ * But there might have been a pending irq that
+ * happened while the irq was disabled that fires
+ * just as the irq is reenabled. That is not what
+ * is desired.
+ */
+ enable_irq(env->comp_irq);
+
+ /* So, synchronize this possibly pending irq... */
+ synchronize_irq(env->comp_irq);
+
+ /* ...and redo the whole dance. */
+ spin_lock_irq(&env->comp_lock);
+ comp = env->comp;
+ env->comp = 0;
+ spin_unlock_irq(&env->comp_lock);
+
+ if (comp)
+ enable_irq(env->comp_irq);
+
+ return 1;
+}
+
+static irqreturn_t envelope_detector_comp_isr(int irq, void *ctx)
+{
+ struct envelope *env = ctx;
+
+ spin_lock(&env->comp_lock);
+ env->comp = 1;
+ disable_irq_nosync(env->comp_irq);
+ spin_unlock(&env->comp_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void envelope_detector_setup_compare(struct envelope *env)
+{
+ int ret;
+
+ /*
+ * Do a binary search for the peak input level, and stop
+ * when that level is "trapped" between two adjacent DAC
+ * values.
+ * When invert is active, use the midpoint floor so that
+ * env->level ends up as env->low when the termination
+ * criteria below is fulfilled, and use the midpoint
+ * ceiling when invert is not active so that env->level
+ * ends up as env->high in that case.
+ */
+ env->level = (env->high + env->low + !env->invert) / 2;
+
+ if (env->high == env->low + 1) {
+ complete(&env->done);
+ return;
+ }
+
+ /* Set a "safe" DAC level (if there is such a thing)... */
+ ret = iio_write_channel_raw(env->dac, env->invert ? 0 : env->dac_max);
+ if (ret < 0)
+ goto err;
+
+ /* ...clear the comparison result... */
+ envelope_detector_comp_latch(env);
+
+ /* ...set the real DAC level... */
+ ret = iio_write_channel_raw(env->dac, env->level);
+ if (ret < 0)
+ goto err;
+
+ /* ...and wait for a bit to see if the latch catches anything. */
+ schedule_delayed_work(&env->comp_timeout,
+ msecs_to_jiffies(env->comp_interval));
+ return;
+
+err:
+ env->level = ret;
+ complete(&env->done);
+}
+
+static void envelope_detector_timeout(struct work_struct *work)
+{
+ struct envelope *env = container_of(work, struct envelope,
+ comp_timeout.work);
+
+ /* Adjust low/high depending on the latch content... */
+ if (!envelope_detector_comp_latch(env) ^ !env->invert)
+ env->low = env->level;
+ else
+ env->high = env->level;
+
+ /* ...and continue the search. */
+ envelope_detector_setup_compare(env);
+}
+
+static int envelope_detector_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * When invert is active, start with high=max+1 and low=0
+ * since we will end up with the low value when the
+ * termination criteria is fulfilled (rounding down). And
+ * start with high=max and low=-1 when invert is not active
+ * since we will end up with the high value in that case.
+ * This ensures that the returned value in both cases are
+ * in the same range as the DAC and is a value that has not
+ * triggered the comparator.
+ */
+ mutex_lock(&env->read_lock);
+ env->high = env->dac_max + env->invert;
+ env->low = -1 + env->invert;
+ envelope_detector_setup_compare(env);
+ wait_for_completion(&env->done);
+ if (env->level < 0) {
+ ret = env->level;
+ goto err_unlock;
+ }
+ *val = env->invert ? env->dac_max - env->level : env->level;
+ mutex_unlock(&env->read_lock);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ return iio_read_channel_scale(env->dac, val, val2);
+ }
+
+ return -EINVAL;
+
+err_unlock:
+ mutex_unlock(&env->read_lock);
+ return ret;
+}
+
+static ssize_t envelope_show_invert(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch, char *buf)
+{
+ struct envelope *env = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", env->invert);
+}
+
+static ssize_t envelope_store_invert(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ const char *buf, size_t len)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ unsigned long invert;
+ int ret;
+ u32 trigger;
+
+ ret = kstrtoul(buf, 0, &invert);
+ if (ret < 0)
+ return ret;
+ if (invert > 1)
+ return -EINVAL;
+
+ trigger = invert ? env->comp_irq_trigger_inv : env->comp_irq_trigger;
+
+ mutex_lock(&env->read_lock);
+ if (invert != env->invert)
+ ret = irq_set_irq_type(env->comp_irq, trigger);
+ if (!ret) {
+ env->invert = invert;
+ ret = len;
+ }
+ mutex_unlock(&env->read_lock);
+
+ return ret;
+}
+
+static ssize_t envelope_show_comp_interval(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ char *buf)
+{
+ struct envelope *env = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", env->comp_interval);
+}
+
+static ssize_t envelope_store_comp_interval(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ const char *buf, size_t len)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ unsigned long interval;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &interval);
+ if (ret < 0)
+ return ret;
+ if (interval > 1000)
+ return -EINVAL;
+
+ mutex_lock(&env->read_lock);
+ env->comp_interval = interval;
+ mutex_unlock(&env->read_lock);
+
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info envelope_detector_ext_info[] = {
+ { .name = "invert",
+ .read = envelope_show_invert,
+ .write = envelope_store_invert, },
+ { .name = "compare_interval",
+ .read = envelope_show_comp_interval,
+ .write = envelope_store_comp_interval, },
+ { /* sentinel */ }
+};
+
+static const struct iio_chan_spec envelope_detector_iio_channel = {
+ .type = IIO_ALTVOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = envelope_detector_ext_info,
+ .indexed = 1,
+};
+
+static const struct iio_info envelope_detector_info = {
+ .read_raw = &envelope_detector_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int envelope_detector_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct envelope *env;
+ enum iio_chan_type type;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*env));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ env = iio_priv(indio_dev);
+ env->comp_interval = 50; /* some sensible default? */
+
+ spin_lock_init(&env->comp_lock);
+ mutex_init(&env->read_lock);
+ init_completion(&env->done);
+ INIT_DELAYED_WORK(&env->comp_timeout, envelope_detector_timeout);
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = dev->of_node;
+ indio_dev->info = &envelope_detector_info;
+ indio_dev->channels = &envelope_detector_iio_channel;
+ indio_dev->num_channels = 1;
+
+ env->dac = devm_iio_channel_get(dev, "dac");
+ if (IS_ERR(env->dac)) {
+ if (PTR_ERR(env->dac) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dac input channel\n");
+ return PTR_ERR(env->dac);
+ }
+
+ env->comp_irq = platform_get_irq_byname(pdev, "comp");
+ if (env->comp_irq < 0) {
+ if (env->comp_irq != -EPROBE_DEFER)
+ dev_err(dev, "failed to get compare interrupt\n");
+ return env->comp_irq;
+ }
+
+ ret = devm_request_irq(dev, env->comp_irq, envelope_detector_comp_isr,
+ 0, "envelope-detector", env);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request interrupt\n");
+ return ret;
+ }
+ env->comp_irq_trigger = irq_get_trigger_type(env->comp_irq);
+ if (env->comp_irq_trigger & IRQF_TRIGGER_RISING)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_FALLING;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_FALLING)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_RISING;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_HIGH)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_LOW;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_LOW)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_HIGH;
+
+ ret = iio_get_channel_type(env->dac, &type);
+ if (ret < 0)
+ return ret;
+
+ if (type != IIO_VOLTAGE) {
+ dev_err(dev, "dac is of the wrong type\n");
+ return -EINVAL;
+ }
+
+ ret = iio_read_max_channel_raw(env->dac, &env->dac_max);
+ if (ret < 0) {
+ dev_err(dev, "dac does not indicate its raw maximum value\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id envelope_detector_match[] = {
+ { .compatible = "axentia,tse850-envelope-detector", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, envelope_detector_match);
+
+static struct platform_driver envelope_detector_driver = {
+ .probe = envelope_detector_probe,
+ .driver = {
+ .name = "iio-envelope-detector",
+ .of_match_table = envelope_detector_match,
+ },
+};
+module_platform_driver(envelope_detector_driver);
+
+MODULE_DESCRIPTION("Envelope detector using a DAC and a comparator");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 712fbd2b1f16..3b7c4f78f37a 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -238,7 +238,9 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
/* Configure conversion register with the requested chan */
st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
- MAX1027_NOSCAN | !!(chan->type == IIO_TEMP);
+ MAX1027_NOSCAN;
+ if (chan->type == IIO_TEMP)
+ st->reg |= MAX1027_TEMP;
ret = spi_write(st->spi, &st->reg, 1);
if (ret < 0) {
dev_err(&indio_dev->dev,
@@ -360,17 +362,6 @@ static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
return 0;
}
-static int max1027_validate_device(struct iio_trigger *trig,
- struct iio_dev *indio_dev)
-{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
- if (indio != indio_dev)
- return -EINVAL;
-
- return 0;
-}
-
static irqreturn_t max1027_trigger_handler(int irq, void *private)
{
struct iio_poll_func *pf = (struct iio_poll_func *)private;
@@ -391,7 +382,7 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
static const struct iio_trigger_ops max1027_trigger_ops = {
.owner = THIS_MODULE,
- .validate_device = &max1027_validate_device,
+ .validate_device = &iio_trigger_validate_own_device,
.set_trigger_state = &max1027_set_trigger_state,
};
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
new file mode 100644
index 000000000000..4214b0cd6b1b
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -0,0 +1,303 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * Inspired from: fsl-imx25-tsadc
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3 BIT(17)
+#define STM32F4_EOC2 BIT(9)
+#define STM32F4_EOC1 BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT 16
+#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
+
+/* STM32 F4 maximum analog clock rate (from datasheet) */
+#define STM32F4_ADC_MAX_CLK_RATE 36000000
+
+/**
+ * struct stm32_adc_priv - stm32 ADC core private data
+ * @irq: irq for ADC block
+ * @domain: irq domain reference
+ * @aclk: clock reference for the analog circuitry
+ * @vref: regulator reference
+ * @common: common data for all ADC instances
+ */
+struct stm32_adc_priv {
+ int irq;
+ struct irq_domain *domain;
+ struct clk *aclk;
+ struct regulator *vref;
+ struct stm32_adc_common common;
+};
+
+static struct stm32_adc_priv *to_stm32_adc_priv(struct stm32_adc_common *com)
+{
+ return container_of(com, struct stm32_adc_priv, common);
+}
+
+/* STM32F4 ADC internal common clock prescaler division ratios */
+static int stm32f4_pclk_div[] = {2, 4, 6, 8};
+
+/**
+ * stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
+ * @priv: stm32 ADC core private data
+ * Select clock prescaler used for analog conversions, before using ADC.
+ */
+static int stm32f4_adc_clk_sel(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ unsigned long rate;
+ u32 val;
+ int i;
+
+ rate = clk_get_rate(priv->aclk);
+ for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
+ if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
+ break;
+ }
+ if (i >= ARRAY_SIZE(stm32f4_pclk_div))
+ return -EINVAL;
+
+ val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
+ val &= ~STM32F4_ADC_ADCPRE_MASK;
+ val |= i << STM32F4_ADC_ADCPRE_SHIFT;
+ writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
+
+ dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
+ rate / (stm32f4_pclk_div[i] * 1000));
+
+ return 0;
+}
+
+/* ADC common interrupt for all instances */
+static void stm32_adc_irq_handler(struct irq_desc *desc)
+{
+ struct stm32_adc_priv *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+ status = readl_relaxed(priv->common.base + STM32F4_ADC_CSR);
+
+ if (status & STM32F4_EOC1)
+ generic_handle_irq(irq_find_mapping(priv->domain, 0));
+
+ if (status & STM32F4_EOC2)
+ generic_handle_irq(irq_find_mapping(priv->domain, 1));
+
+ if (status & STM32F4_EOC3)
+ generic_handle_irq(irq_find_mapping(priv->domain, 2));
+
+ chained_irq_exit(chip, desc);
+};
+
+static int stm32_adc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static void stm32_adc_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops stm32_adc_domain_ops = {
+ .map = stm32_adc_domain_map,
+ .unmap = stm32_adc_domain_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int stm32_adc_irq_probe(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return priv->irq;
+ }
+
+ priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
+ &stm32_adc_domain_ops,
+ priv);
+ if (!priv->domain) {
+ dev_err(&pdev->dev, "Failed to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler(priv->irq, stm32_adc_irq_handler);
+ irq_set_handler_data(priv->irq, priv);
+
+ return 0;
+}
+
+static void stm32_adc_irq_remove(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ int hwirq;
+
+ for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
+ irq_domain_remove(priv->domain);
+ irq_set_chained_handler(priv->irq, NULL);
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+ struct stm32_adc_priv *priv;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->common.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->common.base))
+ return PTR_ERR(priv->common.base);
+
+ priv->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(priv->vref)) {
+ ret = PTR_ERR(priv->vref);
+ dev_err(&pdev->dev, "vref get failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "vref enable failed\n");
+ return ret;
+ }
+
+ ret = regulator_get_voltage(priv->vref);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "vref get voltage failed, %d\n", ret);
+ goto err_regulator_disable;
+ }
+ priv->common.vref_mv = ret / 1000;
+ dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
+
+ priv->aclk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(priv->aclk)) {
+ ret = PTR_ERR(priv->aclk);
+ dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+ goto err_regulator_disable;
+ }
+
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "adc clk enable failed\n");
+ goto err_regulator_disable;
+ }
+
+ ret = stm32f4_adc_clk_sel(pdev, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "adc clk selection failed\n");
+ goto err_clk_disable;
+ }
+
+ ret = stm32_adc_irq_probe(pdev, priv);
+ if (ret < 0)
+ goto err_clk_disable;
+
+ platform_set_drvdata(pdev, &priv->common);
+
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to populate DT children\n");
+ goto err_irq_remove;
+ }
+
+ return 0;
+
+err_irq_remove:
+ stm32_adc_irq_remove(pdev, priv);
+
+err_clk_disable:
+ clk_disable_unprepare(priv->aclk);
+
+err_regulator_disable:
+ regulator_disable(priv->vref);
+
+ return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_adc_common *common = platform_get_drvdata(pdev);
+ struct stm32_adc_priv *priv = to_stm32_adc_priv(common);
+
+ of_platform_depopulate(&pdev->dev);
+ stm32_adc_irq_remove(pdev, priv);
+ clk_disable_unprepare(priv->aclk);
+ regulator_disable(priv->vref);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+ { .compatible = "st,stm32f4-adc-core" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+ .probe = stm32_adc_probe,
+ .remove = stm32_adc_remove,
+ .driver = {
+ .name = "stm32-adc-core",
+ .of_match_table = stm32_adc_of_match,
+ },
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC core driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc-core");
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
new file mode 100644
index 000000000000..081fa5f55015
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __STM32_ADC_H
+#define __STM32_ADC_H
+
+/*
+ * STM32 - ADC global register map
+ * ________________________________________________________
+ * | Offset | Register |
+ * --------------------------------------------------------
+ * | 0x000 | Master ADC1 |
+ * --------------------------------------------------------
+ * | 0x100 | Slave ADC2 |
+ * --------------------------------------------------------
+ * | 0x200 | Slave ADC3 |
+ * --------------------------------------------------------
+ * | 0x300 | Master & Slave common regs |
+ * --------------------------------------------------------
+ */
+#define STM32_ADC_MAX_ADCS 3
+#define STM32_ADCX_COMN_OFFSET 0x300
+
+/**
+ * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @vref_mv: vref voltage (mv)
+ */
+struct stm32_adc_common {
+ void __iomem *base;
+ int vref_mv;
+};
+
+#endif
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
new file mode 100644
index 000000000000..5715e79f4935
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc.c
@@ -0,0 +1,518 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR 0x00
+#define STM32F4_ADC_CR1 0x04
+#define STM32F4_ADC_CR2 0x08
+#define STM32F4_ADC_SMPR1 0x0C
+#define STM32F4_ADC_SMPR2 0x10
+#define STM32F4_ADC_HTR 0x24
+#define STM32F4_ADC_LTR 0x28
+#define STM32F4_ADC_SQR1 0x2C
+#define STM32F4_ADC_SQR2 0x30
+#define STM32F4_ADC_SQR3 0x34
+#define STM32F4_ADC_JSQR 0x38
+#define STM32F4_ADC_JDR1 0x3C
+#define STM32F4_ADC_JDR2 0x40
+#define STM32F4_ADC_JDR3 0x44
+#define STM32F4_ADC_JDR4 0x48
+#define STM32F4_ADC_DR 0x4C
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT BIT(4)
+#define STM32F4_EOC BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_SCAN BIT(8)
+#define STM32F4_EOCIE BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART BIT(30)
+#define STM32F4_EXTEN_MASK GENMASK(29, 28)
+#define STM32F4_EOCS BIT(10)
+#define STM32F4_ADON BIT(0)
+
+/* STM32F4_ADC_SQR1 - bit fields */
+#define STM32F4_L_SHIFT 20
+#define STM32F4_L_MASK GENMASK(23, 20)
+
+/* STM32F4_ADC_SQR3 - bit fields */
+#define STM32F4_SQ1_SHIFT 0
+#define STM32F4_SQ1_MASK GENMASK(4, 0)
+
+#define STM32_ADC_TIMEOUT_US 100000
+#define STM32_ADC_TIMEOUT (msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
+
+/**
+ * struct stm32_adc - private data of each ADC IIO instance
+ * @common: reference to ADC block common data
+ * @offset: ADC instance register offset in ADC block
+ * @completion: end of single conversion completion
+ * @buffer: data buffer
+ * @clk: clock for this adc instance
+ * @irq: interrupt for this adc instance
+ * @lock: spinlock
+ */
+struct stm32_adc {
+ struct stm32_adc_common *common;
+ u32 offset;
+ struct completion completion;
+ u16 *buffer;
+ struct clk *clk;
+ int irq;
+ spinlock_t lock; /* interrupt lock */
+};
+
+/**
+ * struct stm32_adc_chan_spec - specification of stm32 adc channel
+ * @type: IIO channel type
+ * @channel: channel number (single ended)
+ * @name: channel name (single ended)
+ */
+struct stm32_adc_chan_spec {
+ enum iio_chan_type type;
+ int channel;
+ const char *name;
+};
+
+/* Input definitions common for all STM32F4 instances */
+static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
+ { IIO_VOLTAGE, 0, "in0" },
+ { IIO_VOLTAGE, 1, "in1" },
+ { IIO_VOLTAGE, 2, "in2" },
+ { IIO_VOLTAGE, 3, "in3" },
+ { IIO_VOLTAGE, 4, "in4" },
+ { IIO_VOLTAGE, 5, "in5" },
+ { IIO_VOLTAGE, 6, "in6" },
+ { IIO_VOLTAGE, 7, "in7" },
+ { IIO_VOLTAGE, 8, "in8" },
+ { IIO_VOLTAGE, 9, "in9" },
+ { IIO_VOLTAGE, 10, "in10" },
+ { IIO_VOLTAGE, 11, "in11" },
+ { IIO_VOLTAGE, 12, "in12" },
+ { IIO_VOLTAGE, 13, "in13" },
+ { IIO_VOLTAGE, 14, "in14" },
+ { IIO_VOLTAGE, 15, "in15" },
+};
+
+/**
+ * STM32 ADC registers access routines
+ * @adc: stm32 adc instance
+ * @reg: reg offset in adc instance
+ *
+ * Note: All instances share same base, with 0x0, 0x100 or 0x200 offset resp.
+ * for adc1, adc2 and adc3.
+ */
+static u32 stm32_adc_readl(struct stm32_adc *adc, u32 reg)
+{
+ return readl_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static u16 stm32_adc_readw(struct stm32_adc *adc, u32 reg)
+{
+ return readw_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_writel(struct stm32_adc *adc, u32 reg, u32 val)
+{
+ writel_relaxed(val, adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_set_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc->lock, flags);
+ stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) | bits);
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc->lock, flags);
+ stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) & ~bits);
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+/**
+ * stm32_adc_conv_irq_enable() - Enable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+};
+
+/**
+ * stm32_adc_conv_irq_disable() - Disable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+}
+
+/**
+ * stm32_adc_start_conv() - Start conversions for regular channels.
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_start_conv(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_EOCS | STM32F4_ADON);
+
+ /* Wait for Power-up time (tSTAB from datasheet) */
+ usleep_range(2, 3);
+
+ /* Software start ? (e.g. trigger detection disabled ?) */
+ if (!(stm32_adc_readl(adc, STM32F4_ADC_CR2) & STM32F4_EXTEN_MASK))
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_SWSTART);
+}
+
+static void stm32_adc_stop_conv(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+ stm32_adc_clr_bits(adc, STM32F4_ADC_SR, STM32F4_STRT);
+
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_ADON);
+}
+
+/**
+ * stm32_adc_single_conv() - Performs a single conversion
+ * @indio_dev: IIO device
+ * @chan: IIO channel
+ * @res: conversion result
+ *
+ * The function performs a single conversion on a given channel:
+ * - Program sequencer with one channel (e.g. in SQ1 with len = 1)
+ * - Use SW trigger
+ * - Start conversion, then wait for interrupt completion.
+ */
+static int stm32_adc_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *res)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ u32 val;
+ u16 result;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = &result;
+
+ /* Program chan number in regular sequence */
+ val = stm32_adc_readl(adc, STM32F4_ADC_SQR3);
+ val &= ~STM32F4_SQ1_MASK;
+ val |= chan->channel << STM32F4_SQ1_SHIFT;
+ stm32_adc_writel(adc, STM32F4_ADC_SQR3, val);
+
+ /* Set regular sequence len (0 for 1 conversion) */
+ stm32_adc_clr_bits(adc, STM32F4_ADC_SQR1, STM32F4_L_MASK);
+
+ /* Trigger detection disabled (conversion can be launched in SW) */
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+
+ stm32_adc_conv_irq_enable(adc);
+
+ stm32_adc_start_conv(adc);
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &adc->completion, STM32_ADC_TIMEOUT);
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else if (timeout < 0) {
+ ret = timeout;
+ } else {
+ *res = result;
+ ret = IIO_VAL_INT;
+ }
+
+ stm32_adc_stop_conv(adc);
+
+ stm32_adc_conv_irq_disable(adc);
+
+ return ret;
+}
+
+static int stm32_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ if (chan->type == IIO_VOLTAGE)
+ ret = stm32_adc_single_conv(indio_dev, chan, val);
+ else
+ ret = -EINVAL;
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = adc->common->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t stm32_adc_isr(int irq, void *data)
+{
+ struct stm32_adc *adc = data;
+ u32 status = stm32_adc_readl(adc, STM32F4_ADC_SR);
+
+ if (status & STM32F4_EOC) {
+ *adc->buffer = stm32_adc_readw(adc, STM32F4_ADC_DR);
+ complete(&adc->completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].channel == iiospec->args[0])
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * stm32_adc_debugfs_reg_access - read or write register value
+ *
+ * To read a value from an ADC register:
+ * echo [ADC reg offset] > direct_reg_access
+ * cat direct_reg_access
+ *
+ * To write a value in a ADC register:
+ * echo [ADC_reg_offset] [value] > direct_reg_access
+ */
+static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
+ if (!readval)
+ stm32_adc_writel(adc, reg, writeval);
+ else
+ *readval = stm32_adc_readl(adc, reg);
+
+ return 0;
+}
+
+static const struct iio_info stm32_adc_iio_info = {
+ .read_raw = stm32_adc_read_raw,
+ .debugfs_reg_access = stm32_adc_debugfs_reg_access,
+ .of_xlate = stm32_adc_of_xlate,
+ .driver_module = THIS_MODULE,
+};
+
+static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *chan,
+ const struct stm32_adc_chan_spec *channel,
+ int scan_index)
+{
+ chan->type = channel->type;
+ chan->channel = channel->channel;
+ chan->datasheet_name = channel->name;
+ chan->scan_index = scan_index;
+ chan->indexed = 1;
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 12;
+ chan->scan_type.storagebits = 16;
+}
+
+static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
+{
+ struct device_node *node = indio_dev->dev.of_node;
+ struct property *prop;
+ const __be32 *cur;
+ struct iio_chan_spec *channels;
+ int scan_index = 0, num_channels;
+ u32 val;
+
+ num_channels = of_property_count_u32_elems(node, "st,adc-channels");
+ if (num_channels < 0 ||
+ num_channels >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
+ return num_channels < 0 ? num_channels : -EINVAL;
+ }
+
+ channels = devm_kcalloc(&indio_dev->dev, num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
+ if (val >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
+ return -EINVAL;
+ }
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ &stm32f4_adc123_channels[val],
+ scan_index);
+ scan_index++;
+ }
+
+ indio_dev->num_channels = scan_index;
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct stm32_adc *adc;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->common = dev_get_drvdata(pdev->dev.parent);
+ spin_lock_init(&adc->lock);
+ init_completion(&adc->completion);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &stm32_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ adc->irq = platform_get_irq(pdev, 0);
+ if (adc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return adc->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
+ 0, pdev->name, adc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ adc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(adc->clk)) {
+ dev_err(&pdev->dev, "Can't get clock\n");
+ return PTR_ERR(adc->clk);
+ }
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ ret = stm32_adc_chan_of_init(indio_dev);
+ if (ret < 0)
+ goto err_clk_disable;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "iio dev register failed\n");
+ goto err_clk_disable;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(adc->clk);
+
+ return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(adc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+ { .compatible = "st,stm32f4-adc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+ .probe = stm32_adc_probe,
+ .remove = stm32_adc_remove,
+ .driver = {
+ .name = "stm32-adc",
+ .of_match_table = stm32_adc_of_match,
+ },
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC IIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc");
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index f4ba23effe9a..e952e94a14af 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -14,6 +14,10 @@
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
enum {
adc0831,
@@ -38,10 +42,16 @@ struct adc0832 {
.indexed = 1, \
.channel = chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = chan, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
}
-#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \
+#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -49,18 +59,26 @@ struct adc0832 {
.channel2 = (chan2), \
.differential = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
}
static const struct iio_chan_spec adc0831_channels[] = {
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
};
static const struct iio_chan_spec adc0832_channels[] = {
ADC0832_VOLTAGE_CHANNEL(0),
ADC0832_VOLTAGE_CHANNEL(1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct iio_chan_spec adc0834_channels[] = {
@@ -68,10 +86,11 @@ static const struct iio_chan_spec adc0834_channels[] = {
ADC0832_VOLTAGE_CHANNEL(1),
ADC0832_VOLTAGE_CHANNEL(2),
ADC0832_VOLTAGE_CHANNEL(3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
- ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 4),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 5),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 6),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
};
static const struct iio_chan_spec adc0838_channels[] = {
@@ -83,14 +102,15 @@ static const struct iio_chan_spec adc0838_channels[] = {
ADC0832_VOLTAGE_CHANNEL(5),
ADC0832_VOLTAGE_CHANNEL(6),
ADC0832_VOLTAGE_CHANNEL(7),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
- ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
- ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5),
- ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4),
- ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7),
- ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 8),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 9),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 10),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 11),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5, 12),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4, 13),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7, 14),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6, 15),
+ IIO_CHAN_SOFT_TIMESTAMP(16),
};
static int adc0831_adc_conversion(struct adc0832 *adc)
@@ -178,6 +198,42 @@ static const struct iio_info adc0832_info = {
.driver_module = THIS_MODULE,
};
+static irqreturn_t adc0832_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc0832 *adc = iio_priv(indio_dev);
+ u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
+ int scan_index;
+ int i = 0;
+
+ mutex_lock(&adc->lock);
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan =
+ &indio_dev->channels[scan_index];
+ int ret = adc0832_adc_conversion(adc, scan_chan->channel,
+ scan_chan->differential);
+ if (ret < 0) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+ goto out;
+ }
+
+ data[i] = ret;
+ i++;
+ }
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
+out:
+ mutex_unlock(&adc->lock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int adc0832_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -233,9 +289,20 @@ static int adc0832_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ adc0832_trigger_handler, NULL);
+ if (ret)
+ goto err_reg_disable;
+
ret = iio_device_register(indio_dev);
if (ret)
- regulator_disable(adc->reg);
+ goto err_buffer_cleanup;
+
+ return 0;
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_reg_disable:
+ regulator_disable(adc->reg);
return ret;
}
@@ -246,6 +313,7 @@ static int adc0832_remove(struct spi_device *spi)
struct adc0832 *adc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(adc->reg);
return 0;
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index f94b69f9c288..4836a0d7aef5 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -27,6 +27,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/regulator/consumer.h>
#define TI_ADC_DRV_NAME "ti-adc161s626"
@@ -39,7 +40,9 @@ static const struct iio_chan_spec ti_adc141s626_channels[] = {
{
.type = IIO_VOLTAGE,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -54,7 +57,9 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
{
.type = IIO_VOLTAGE,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -68,6 +73,8 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
struct ti_adc_data {
struct iio_dev *indio_dev;
struct spi_device *spi;
+ struct regulator *ref;
+
u8 read_size;
u8 shift;
@@ -135,18 +142,32 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
struct ti_adc_data *data = iio_priv(indio_dev);
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
- return -EINVAL;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ ret = ti_adc_read_measurement(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
- ret = ti_adc_read_measurement(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- if (!ret)
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(data->ref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 1 << (chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ }
return 0;
}
@@ -191,10 +212,17 @@ static int ti_adc_probe(struct spi_device *spi)
break;
}
+ data->ref = devm_regulator_get(&spi->dev, "vdda");
+ if (!IS_ERR(data->ref)) {
+ ret = regulator_enable(data->ref);
+ if (ret < 0)
+ return ret;
+ }
+
ret = iio_triggered_buffer_setup(indio_dev, NULL,
ti_adc_trigger_handler, NULL);
if (ret)
- return ret;
+ goto error_regulator_disable;
ret = iio_device_register(indio_dev);
if (ret)
@@ -205,15 +233,20 @@ static int ti_adc_probe(struct spi_device *spi)
error_unreg_buffer:
iio_triggered_buffer_cleanup(indio_dev);
+error_regulator_disable:
+ regulator_disable(data->ref);
+
return ret;
}
static int ti_adc_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ti_adc_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(data->ref);
return 0;
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c3cfacca2541..ad9dec30bb30 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -30,10 +30,28 @@
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define DMA_BUFFER_SIZE SZ_2K
+
+struct tiadc_dma {
+ struct dma_slave_config conf;
+ struct dma_chan *chan;
+ dma_addr_t addr;
+ dma_cookie_t cookie;
+ u8 *buf;
+ int current_period;
+ int period_size;
+ u8 fifo_thresh;
+};
+
struct tiadc_device {
struct ti_tscadc_dev *mfd_tscadc;
+ struct tiadc_dma dma;
struct mutex fifo1_lock; /* to protect fifo access */
int channels;
+ int total_ch_enabled;
u8 channel_line[8];
u8 channel_step[8];
int buffer_en_ch_steps;
@@ -198,6 +216,67 @@ static irqreturn_t tiadc_worker_h(int irq, void *private)
return IRQ_HANDLED;
}
+static void tiadc_dma_rx_complete(void *param)
+{
+ struct iio_dev *indio_dev = param;
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ u8 *data;
+ int i;
+
+ data = dma->buf + dma->current_period * dma->period_size;
+ dma->current_period = 1 - dma->current_period; /* swap the buffer ID */
+
+ for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) {
+ iio_push_to_buffers(indio_dev, data);
+ data += indio_dev->scan_bytes;
+ }
+}
+
+static int tiadc_start_dma(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ struct dma_async_tx_descriptor *desc;
+
+ dma->current_period = 0; /* We start to fill period 0 */
+ /*
+ * Make the fifo thresh as the multiple of total number of
+ * channels enabled, so make sure that cyclic DMA period
+ * length is also a multiple of total number of channels
+ * enabled. This ensures that no invalid data is reported
+ * to the stack via iio_push_to_buffers().
+ */
+ dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1,
+ adc_dev->total_ch_enabled) - 1;
+ /* Make sure that period length is multiple of fifo thresh level */
+ dma->period_size = rounddown(DMA_BUFFER_SIZE / 2,
+ (dma->fifo_thresh + 1) * sizeof(u16));
+
+ dma->conf.src_maxburst = dma->fifo_thresh + 1;
+ dmaengine_slave_config(dma->chan, &dma->conf);
+
+ desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr,
+ dma->period_size * 2,
+ dma->period_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = tiadc_dma_rx_complete;
+ desc->callback_param = indio_dev;
+
+ dma->cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(dma->chan);
+
+ tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh);
+ tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh);
+ tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1);
+
+ return 0;
+}
+
static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -218,20 +297,30 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ unsigned int irq_enable;
unsigned int enb = 0;
u8 bit;
tiadc_step_config(indio_dev);
- for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
+ for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) {
enb |= (get_adc_step_bit(adc_dev, bit) << 1);
+ adc_dev->total_ch_enabled++;
+ }
adc_dev->buffer_en_ch_steps = enb;
+ if (dma->chan)
+ tiadc_start_dma(indio_dev);
+
am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb);
tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES
| IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
- tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES
- | IRQENB_FIFO1OVRRUN);
+
+ irq_enable = IRQENB_FIFO1OVRRUN;
+ if (!dma->chan)
+ irq_enable |= IRQENB_FIFO1THRES;
+ tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable);
return 0;
}
@@ -239,12 +328,18 @@ static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
int fifo1count, i, read;
tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
adc_dev->buffer_en_ch_steps = 0;
+ adc_dev->total_ch_enabled = 0;
+ if (dma->chan) {
+ tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2);
+ dmaengine_terminate_async(dma->chan);
+ }
/* Flush FIFO of leftover data in the time it takes to disable adc */
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
@@ -430,6 +525,41 @@ static const struct iio_info tiadc_info = {
.driver_module = THIS_MODULE,
};
+static int tiadc_request_dma(struct platform_device *pdev,
+ struct tiadc_device *adc_dev)
+{
+ struct tiadc_dma *dma = &adc_dev->dma;
+ dma_cap_mask_t mask;
+
+ /* Default slave configuration parameters */
+ dma->conf.direction = DMA_DEV_TO_MEM;
+ dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ /* Get a channel for RX */
+ dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1");
+ if (IS_ERR(dma->chan)) {
+ int ret = PTR_ERR(dma->chan);
+
+ dma->chan = NULL;
+ return ret;
+ }
+
+ /* RX buffer */
+ dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+ &dma->addr, GFP_KERNEL);
+ if (!dma->buf)
+ goto err;
+
+ return 0;
+err:
+ dma_release_channel(dma->chan);
+ return -ENOMEM;
+}
+
static int tiadc_parse_dt(struct platform_device *pdev,
struct tiadc_device *adc_dev)
{
@@ -512,8 +642,14 @@ static int tiadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
+ err = tiadc_request_dma(pdev, adc_dev);
+ if (err && err == -EPROBE_DEFER)
+ goto err_dma;
+
return 0;
+err_dma:
+ iio_device_unregister(indio_dev);
err_buffer_unregister:
tiadc_iio_buffered_hardware_remove(indio_dev);
err_free_channels:
@@ -525,8 +661,14 @@ static int tiadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
u32 step_en;
+ if (dma->chan) {
+ dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+ dma->buf, dma->addr);
+ dma_release_channel(dma->chan);
+ }
iio_device_unregister(indio_dev);
tiadc_iio_buffered_hardware_remove(indio_dev);
tiadc_channels_remove(indio_dev);
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 26a6026de614..e108996a9627 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -2,6 +2,7 @@
# IIO common modules
#
+source "drivers/iio/common/cros_ec_sensors/Kconfig"
source "drivers/iio/common/hid-sensors/Kconfig"
source "drivers/iio/common/ms_sensors/Kconfig"
source "drivers/iio/common/ssp_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 585da6a1b188..6fa760e1bdd5 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -7,6 +7,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-y += cros_ec_sensors/
obj-y += hid-sensors/
obj-y += ms_sensors/
obj-y += ssp_sensors/
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
new file mode 100644
index 000000000000..135f6825903f
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -0,0 +1,22 @@
+#
+# Chrome OS Embedded Controller managed sensors library
+#
+config IIO_CROS_EC_SENSORS_CORE
+ tristate "ChromeOS EC Sensors Core"
+ depends on SYSFS && MFD_CROS_EC
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Base module for the ChromeOS EC Sensors module.
+ Contains core functions used by other IIO CrosEC sensor
+ drivers.
+ Define common attributes and sysfs interrupt handler.
+
+config IIO_CROS_EC_SENSORS
+ tristate "ChromeOS EC Contiguous Sensors"
+ depends on IIO_CROS_EC_SENSORS_CORE
+ help
+ Module to handle 3d contiguous sensors like
+ Accelerometers, Gyroscope and Magnetometer that are
+ presented by the ChromeOS EC Sensor hub.
+ Creates an IIO device for each functions.
diff --git a/drivers/iio/common/cros_ec_sensors/Makefile b/drivers/iio/common/cros_ec_sensors/Makefile
new file mode 100644
index 000000000000..ec716ff2a775
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for sensors seen through the ChromeOS EC sensor hub.
+#
+
+obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros_ec_sensors_core.o
+obj-$(CONFIG_IIO_CROS_EC_SENSORS) += cros_ec_sensors.o
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
new file mode 100644
index 000000000000..d6c372bb433b
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -0,0 +1,322 @@
+/*
+ * cros_ec_sensors - Driver for Chrome OS Embedded Controller sensors.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the cros-ec interface to communicate with the Chrome OS
+ * EC about sensors data. Data access is presented through iio sysfs.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "cros_ec_sensors_core.h"
+
+#define CROS_EC_SENSORS_MAX_CHANNELS 4
+
+/* State data for ec_sensors iio driver. */
+struct cros_ec_sensors_state {
+ /* Shared by all sensors */
+ struct cros_ec_sensors_core_state core;
+
+ struct iio_chan_spec channels[CROS_EC_SENSORS_MAX_CHANNELS];
+};
+
+static int cros_ec_sensors_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+ s16 data = 0;
+ s64 val64;
+ int i;
+ int ret;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
+ if (ret < 0)
+ break;
+
+ *val = data;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags = 0;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret < 0)
+ break;
+
+ /* Save values */
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.calib[i] =
+ st->core.resp->sensor_offset.offset[i];
+
+ *val = st->core.calib[idx];
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret < 0)
+ break;
+
+ val64 = st->core.resp->sensor_range.ret;
+ switch (st->core.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ /*
+ * EC returns data in g, iio exepects m/s^2.
+ * Do not use IIO_G_TO_M_S_2 to avoid precision loss.
+ */
+ *val = div_s64(val64 * 980665, 10);
+ *val2 = 10000 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ /*
+ * EC returns data in dps, iio expects rad/s.
+ * Do not use IIO_DEGREE_TO_RAD to avoid precision
+ * loss. Round to the nearest integer.
+ */
+ *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
+ *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ /*
+ * EC returns data in 16LSB / uT,
+ * iio expects Gauss
+ */
+ *val = val64;
+ *val2 = 100 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = cros_ec_sensors_core_read(&st->core, chan, val, val2,
+ mask);
+ break;
+ }
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static int cros_ec_sensors_write(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+ int i;
+ int ret;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.calib[idx] = val;
+
+ /* Send to EC for each axis, even if not complete */
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags =
+ MOTION_SENSE_SET_OFFSET;
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.param.sensor_offset.offset[i] =
+ st->core.calib[i];
+ st->core.param.sensor_offset.temp =
+ EC_MOTION_SENSE_INVALID_CALIB_TEMP;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ if (st->core.type == MOTIONSENSE_TYPE_MAG) {
+ ret = -EINVAL;
+ break;
+ }
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = val;
+
+ /* Always roundup, so caller gets at least what it asks for. */
+ st->core.param.sensor_range.roundup = 1;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ break;
+ default:
+ ret = cros_ec_sensors_core_write(
+ &st->core, chan, val, val2, mask);
+ break;
+ }
+
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static const struct iio_info ec_sensors_info = {
+ .read_raw = &cros_ec_sensors_read,
+ .write_raw = &cros_ec_sensors_write,
+ .driver_module = THIS_MODULE,
+};
+
+static int cros_ec_sensors_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *ec_device;
+ struct iio_dev *indio_dev;
+ struct cros_ec_sensors_state *state;
+ struct iio_chan_spec *channel;
+ int ret, i;
+
+ if (!ec_dev || !ec_dev->ec_dev) {
+ dev_warn(&pdev->dev, "No CROS EC device found.\n");
+ return -EINVAL;
+ }
+ ec_device = ec_dev->ec_dev;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &ec_sensors_info;
+ state = iio_priv(indio_dev);
+ for (channel = state->channels, i = CROS_EC_SENSOR_X;
+ i < CROS_EC_SENSOR_MAX_AXIS; i++, channel++) {
+ /* Common part */
+ channel->info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS);
+ channel->info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
+ channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
+ channel->scan_index = i;
+ channel->ext_info = cros_ec_sensors_ext_info;
+ channel->modified = 1;
+ channel->channel2 = IIO_MOD_X + i;
+ channel->scan_type.sign = 's';
+
+ /* Sensor specific */
+ switch (state->core.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ channel->type = IIO_ACCEL;
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ channel->type = IIO_ANGL_VEL;
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ channel->type = IIO_MAGN;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown motion sensor\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Timestamp */
+ channel->type = IIO_TIMESTAMP;
+ channel->channel = -1;
+ channel->scan_index = CROS_EC_SENSOR_MAX_AXIS;
+ channel->scan_type.sign = 's';
+ channel->scan_type.realbits = 64;
+ channel->scan_type.storagebits = 64;
+
+ indio_dev->channels = state->channels;
+ indio_dev->num_channels = CROS_EC_SENSORS_MAX_CHANNELS;
+
+ /* There is only enough room for accel and gyro in the io space */
+ if ((state->core.ec->cmd_readmem != NULL) &&
+ (state->core.type != MOTIONSENSE_TYPE_MAG))
+ state->core.read_ec_sensors_data = cros_ec_sensors_read_lpc;
+ else
+ state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ cros_ec_sensors_capture, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_uninit_buffer;
+
+ return 0;
+
+error_uninit_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int cros_ec_sensors_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id cros_ec_sensors_ids[] = {
+ {
+ .name = "cros-ec-accel",
+ },
+ {
+ .name = "cros-ec-gyro",
+ },
+ {
+ .name = "cros-ec-mag",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
+
+static struct platform_driver cros_ec_sensors_platform_driver = {
+ .driver = {
+ .name = "cros-ec-sensors",
+ },
+ .probe = cros_ec_sensors_probe,
+ .remove = cros_ec_sensors_remove,
+ .id_table = cros_ec_sensors_ids,
+};
+module_platform_driver(cros_ec_sensors_platform_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC 3-axis sensors driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
new file mode 100644
index 000000000000..416cae5ebbd0
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -0,0 +1,450 @@
+/*
+ * cros_ec_sensors_core - Common function for Chrome OS EC sensor driver.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "cros_ec_sensors_core.h"
+
+static char *cros_ec_loc[] = {
+ [MOTIONSENSE_LOC_BASE] = "base",
+ [MOTIONSENSE_LOC_LID] = "lid",
+ [MOTIONSENSE_LOC_MAX] = "unknown",
+};
+
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev,
+ bool physical_device)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
+ struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ state->ec = ec->ec_dev;
+ state->msg = devm_kzalloc(&pdev->dev,
+ max((u16)sizeof(struct ec_params_motion_sense),
+ state->ec->max_response), GFP_KERNEL);
+ if (!state->msg)
+ return -ENOMEM;
+
+ state->resp = (struct ec_response_motion_sense *)state->msg->data;
+
+ mutex_init(&state->cmd_lock);
+
+ /* Set up the host command structure. */
+ state->msg->version = 2;
+ state->msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ state->msg->outsize = sizeof(struct ec_params_motion_sense);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = pdev->name;
+
+ if (physical_device) {
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ state->param.cmd = MOTIONSENSE_CMD_INFO;
+ state->param.info.sensor_num = sensor_platform->sensor_num;
+ if (cros_ec_motion_send_host_cmd(state, 0)) {
+ dev_warn(dev, "Can not access sensor info\n");
+ return -EIO;
+ }
+ state->type = state->resp->info.type;
+ state->loc = state->resp->info.location;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
+
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
+ u16 opt_length)
+{
+ int ret;
+
+ if (opt_length)
+ state->msg->insize = min(opt_length, state->ec->max_response);
+ else
+ state->msg->insize = state->ec->max_response;
+
+ memcpy(state->msg->data, &state->param, sizeof(state->param));
+
+ ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
+ if (ret < 0)
+ return -EIO;
+
+ if (ret &&
+ state->resp != (struct ec_response_motion_sense *)state->msg->data)
+ memcpy(state->resp, state->msg->data, ret);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_motion_send_host_cmd);
+
+static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret, i;
+ bool calibrate;
+
+ ret = strtobool(buf, &calibrate);
+ if (ret < 0)
+ return ret;
+ if (!calibrate)
+ return -EINVAL;
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_PERFORM_CALIB;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret != 0) {
+ dev_warn(&indio_dev->dev, "Unable to calibrate sensor\n");
+ } else {
+ /* Save values */
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->calib[i] = st->resp->perform_calib.offset[i];
+ }
+ mutex_unlock(&st->cmd_lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t cros_ec_sensors_loc(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", cros_ec_loc[st->loc]);
+}
+
+const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
+ {
+ .name = "calibrate",
+ .shared = IIO_SHARED_BY_ALL,
+ .write = cros_ec_sensors_calibrate
+ },
+ {
+ .name = "location",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = cros_ec_sensors_loc
+ },
+ { },
+};
+EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
+
+/**
+ * cros_ec_sensors_idx_to_reg - convert index into offset in shared memory
+ * @st: pointer to state information for device
+ * @idx: sensor index (should be element of enum sensor_index)
+ *
+ * Return: address to read at
+ */
+static unsigned int cros_ec_sensors_idx_to_reg(
+ struct cros_ec_sensors_core_state *st,
+ unsigned int idx)
+{
+ /*
+ * When using LPC interface, only space for 2 Accel and one Gyro.
+ * First halfword of MOTIONSENSE_TYPE_ACCEL is used by angle.
+ */
+ if (st->type == MOTIONSENSE_TYPE_ACCEL)
+ return EC_MEMMAP_ACC_DATA + sizeof(u16) *
+ (1 + idx + st->param.info.sensor_num *
+ CROS_EC_SENSOR_MAX_AXIS);
+
+ return EC_MEMMAP_GYRO_DATA + sizeof(u16) * idx;
+}
+
+static int cros_ec_sensors_cmd_read_u8(struct cros_ec_device *ec,
+ unsigned int offset, u8 *dest)
+{
+ return ec->cmd_readmem(ec, offset, 1, dest);
+}
+
+static int cros_ec_sensors_cmd_read_u16(struct cros_ec_device *ec,
+ unsigned int offset, u16 *dest)
+{
+ __le16 tmp;
+ int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
+
+ if (ret >= 0)
+ *dest = le16_to_cpu(tmp);
+
+ return ret;
+}
+
+/**
+ * cros_ec_sensors_read_until_not_busy() - read until is not busy
+ *
+ * @st: pointer to state information for device
+ *
+ * Read from EC status byte until it reads not busy.
+ * Return: 8-bit status if ok, -errno on failure.
+ */
+static int cros_ec_sensors_read_until_not_busy(
+ struct cros_ec_sensors_core_state *st)
+{
+ struct cros_ec_device *ec = st->ec;
+ u8 status;
+ int ret, attempts = 0;
+
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
+ /* Give up after enough attempts, return error. */
+ if (attempts++ >= 50)
+ return -EIO;
+
+ /* Small delay every so often. */
+ if (attempts % 5 == 0)
+ msleep(25);
+
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+ }
+
+ return status;
+}
+
+/**
+ * read_ec_sensors_data_unsafe() - read acceleration data from EC shared memory
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * This is the unsafe function for reading the EC data. It does not guarantee
+ * that the EC will not modify the data as it is being read in.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ struct cros_ec_device *ec = st->ec;
+ unsigned int i;
+ int ret;
+
+ /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ ret = cros_ec_sensors_cmd_read_u16(ec,
+ cros_ec_sensors_idx_to_reg(st, i),
+ data);
+ if (ret < 0)
+ return ret;
+
+ data++;
+ }
+
+ return 0;
+}
+
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ struct cros_ec_device *ec = st->ec;
+ u8 samp_id = 0xff, status = 0;
+ int ret, attempts = 0;
+
+ /*
+ * Continually read all data from EC until the status byte after
+ * all reads reflects that the EC is not busy and the sample id
+ * matches the sample id from before all reads. This guarantees
+ * that data read in was not modified by the EC while reading.
+ */
+ while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
+ EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
+ /* If we have tried to read too many times, return error. */
+ if (attempts++ >= 5)
+ return -EIO;
+
+ /* Read status byte until EC is not busy. */
+ ret = cros_ec_sensors_read_until_not_busy(st);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Store the current sample id so that we can compare to the
+ * sample id after reading the data.
+ */
+ samp_id = ret & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
+
+ /* Read all EC data, format it, and store it into data. */
+ ret = cros_ec_sensors_read_data_unsafe(indio_dev, scan_mask,
+ data);
+ if (ret < 0)
+ return ret;
+
+ /* Read status byte. */
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
+
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int i;
+
+ /* Read all sensor data through a command. */
+ st->param.cmd = MOTIONSENSE_CMD_DATA;
+ ret = cros_ec_motion_send_host_cmd(st, sizeof(st->resp->data));
+ if (ret != 0) {
+ dev_warn(&indio_dev->dev, "Unable to read sensor data\n");
+ return ret;
+ }
+
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ *data = st->resp->data.data[i];
+ data++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
+
+irqreturn_t cros_ec_sensors_capture(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->cmd_lock);
+
+ /* Clear capture data. */
+ memset(st->samples, 0, indio_dev->scan_bytes);
+
+ /* Read data based on which channels are enabled in scan mask. */
+ ret = st->read_ec_sensors_data(indio_dev,
+ *(indio_dev->active_scan_mask),
+ (s16 *)st->samples);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+ iio_get_time_ns(indio_dev));
+
+done:
+ /*
+ * Tell the core we are done with this trigger and ready for the
+ * next one.
+ */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ mutex_unlock(&st->cmd_lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
+
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = IIO_VAL_INT;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data =
+ EC_MOTION_SENSE_NO_VALUE;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ *val = st->resp->ec_rate.ret;
+ break;
+ case IIO_CHAN_INFO_FREQUENCY:
+ st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+ st->param.sensor_odr.data =
+ EC_MOTION_SENSE_NO_VALUE;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ *val = st->resp->sensor_odr.ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
+
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_FREQUENCY:
+ st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+ st->param.sensor_odr.data = val;
+
+ /* Always roundup, so caller gets at least what it asks for. */
+ st->param.sensor_odr.roundup = 1;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = val;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ st->curr_sampl_freq = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
+
+MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
new file mode 100644
index 000000000000..8bc2ca3c2e2e
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
@@ -0,0 +1,175 @@
+/*
+ * ChromeOS EC sensor hub
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CROS_EC_SENSORS_CORE_H
+#define __CROS_EC_SENSORS_CORE_H
+
+#include <linux/irqreturn.h>
+
+enum {
+ CROS_EC_SENSOR_X,
+ CROS_EC_SENSOR_Y,
+ CROS_EC_SENSOR_Z,
+ CROS_EC_SENSOR_MAX_AXIS,
+};
+
+/* EC returns sensor values using signed 16 bit registers */
+#define CROS_EC_SENSOR_BITS 16
+
+/*
+ * 4 16 bit channels are allowed.
+ * Good enough for current sensors, they use up to 3 16 bit vectors.
+ */
+#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
+
+/* Minimum sampling period to use when device is suspending */
+#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */
+
+/**
+ * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
+ * @ec: cros EC device structure
+ * @cmd_lock: lock used to prevent simultaneous access to the
+ * commands.
+ * @msg: cros EC command structure
+ * @param: motion sensor parameters structure
+ * @resp: motion sensor response structure
+ * @type: type of motion sensor
+ * @loc: location where the motion sensor is placed
+ * @calib: calibration parameters. Note that trigger
+ * captured data will always provide the calibrated
+ * data
+ * @samples: static array to hold data from a single capture.
+ * For each channel we need 2 bytes, except for
+ * the timestamp. The timestamp is always last and
+ * is always 8-byte aligned.
+ * @read_ec_sensors_data: function used for accessing sensors values
+ * @cuur_sampl_freq: current sampling period
+ */
+struct cros_ec_sensors_core_state {
+ struct cros_ec_device *ec;
+ struct mutex cmd_lock;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense param;
+ struct ec_response_motion_sense *resp;
+
+ enum motionsensor_type type;
+ enum motionsensor_location loc;
+
+ s16 calib[CROS_EC_SENSOR_MAX_AXIS];
+
+ u8 samples[CROS_EC_SAMPLE_SIZE];
+
+ int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data);
+
+ int curr_sampl_freq;
+};
+
+/**
+ * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * This is the safe function for reading the EC data. It guarantees that the
+ * data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+/**
+ * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+/**
+ * cros_ec_sensors_core_init() - basic initialization of the core structure
+ * @pdev: platform device created for the sensors
+ * @indio_dev: iio device structure of the device
+ * @physical_device: true if the device refers to a physical device
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev, bool physical_device);
+
+/**
+ * cros_ec_sensors_capture() - the trigger handler function
+ * @irq: the interrupt number.
+ * @p: a pointer to the poll function.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+
+/**
+ * cros_ec_motion_send_host_cmd() - send motion sense host command
+ * @st: pointer to state information for device
+ * @opt_length: optional length to reduce the response size, useful on the data
+ * path. Otherwise, the maximal allowed response size is used
+ *
+ * When called, the sub-command is assumed to be set in param->cmd.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
+ u16 opt_length);
+
+/**
+ * cros_ec_sensors_core_read() - function to request a value from the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: will contain one element making up the returned value
+ * @val2: will contain another element making up the returned value
+ * @mask: specifies which values to be requested
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+
+/**
+ * cros_ec_sensors_core_write() - function to write a value to the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: first part of value to write
+ * @val2: second part of value to write
+ * @mask: specifies which values to write
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+
+/* List of extended channel specification for all sensors */
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+
+#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index b5beea53d6f6..7ef94a90ecf7 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -201,7 +201,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int ret;
if (val1 < 0 || val2 < 0)
- ret = -EINVAL;
+ return -EINVAL;
value = val1 * pow_10(6) + val2;
if (value) {
@@ -250,6 +250,9 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
s32 value;
int ret;
+ if (val1 < 0 || val2 < 0)
+ return -EINVAL;
+
value = convert_to_vtf_format(st->sensitivity.size,
st->sensitivity.unit_expo,
val1, val2);
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
new file mode 100644
index 000000000000..2d2ee353dde7
--- /dev/null
+++ b/drivers/iio/counter/104-quad-8.c
@@ -0,0 +1,593 @@
+/*
+ * IIO driver for the ACCES 104-QUAD-8
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#define QUAD8_EXTENT 32
+
+static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
+static unsigned int num_quad8;
+module_param_array(base, uint, &num_quad8, 0);
+MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+
+#define QUAD8_NUM_COUNTERS 8
+
+/**
+ * struct quad8_iio - IIO device private data structure
+ * @preset: array of preset values
+ * @count_mode: array of count mode configurations
+ * @quadrature_mode: array of quadrature mode configurations
+ * @quadrature_scale: array of quadrature mode scale configurations
+ * @ab_enable: array of A and B inputs enable configurations
+ * @preset_enable: array of set_to_preset_on_index attribute configurations
+ * @synchronous_mode: array of index function synchronous mode configurations
+ * @index_polarity: array of index function polarity configurations
+ * @base: base port address of the IIO device
+ */
+struct quad8_iio {
+ unsigned int preset[QUAD8_NUM_COUNTERS];
+ unsigned int count_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
+ unsigned int ab_enable[QUAD8_NUM_COUNTERS];
+ unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+ unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
+ unsigned int index_polarity[QUAD8_NUM_COUNTERS];
+ unsigned int base;
+};
+
+static int quad8_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int flags;
+ unsigned int borrow;
+ unsigned int carry;
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX) {
+ *val = !!(inb(priv->base + 0x16) & BIT(chan->channel));
+ return IIO_VAL_INT;
+ }
+
+ flags = inb(base_offset);
+ borrow = flags & BIT(0);
+ carry = !!(flags & BIT(1));
+
+ /* Borrow XOR Carry effectively doubles count range */
+ *val = (borrow ^ carry) << 24;
+
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(0x11, base_offset + 1);
+
+ for (i = 0; i < 3; i++)
+ *val |= (unsigned int)inb(base_offset) << (8 * i);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = priv->ab_enable[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ *val2 = priv->quadrature_scale[chan->channel];
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static int quad8_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ int i;
+ unsigned int ior_cfg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX)
+ return -EINVAL;
+
+ /* Only 24-bit values are supported */
+ if ((unsigned int)val > 0xFFFFFF)
+ return -EINVAL;
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Counter can only be set via Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Transfer Preset Register to Counter */
+ outb(0x08, base_offset + 1);
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Set Preset Register back to original value */
+ val = priv->preset[chan->channel];
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(0x02, base_offset + 1);
+ /* Reset Error flag */
+ outb(0x06, base_offset + 1);
+
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ /* only boolean values accepted */
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ priv->ab_enable[chan->channel] = val;
+
+ ior_cfg = val | priv->preset_enable[chan->channel] << 1;
+
+ /* Load I/O control configuration */
+ outb(0x40 | ior_cfg, base_offset);
+
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ /* Quadrature scaling only available in quadrature mode */
+ if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+ return -EINVAL;
+
+ /* Only three gain states (1, 0.5, 0.25) */
+ if (val == 1 && !val2)
+ priv->quadrature_scale[chan->channel] = 0;
+ else if (!val)
+ switch (val2) {
+ case 500000:
+ priv->quadrature_scale[chan->channel] = 1;
+ break;
+ case 250000:
+ priv->quadrature_scale[chan->channel] = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info quad8_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = quad8_read_raw,
+ .write_raw = quad8_write_raw
+};
+
+static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
+}
+
+static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int preset;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+ priv->preset[chan->channel] = preset;
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
+ return len;
+}
+
+static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ priv->preset_enable[chan->channel]);
+}
+
+static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ bool preset_enable;
+ int ret;
+ unsigned int ior_cfg;
+
+ ret = kstrtobool(buf, &preset_enable);
+ if (ret)
+ return ret;
+
+ priv->preset_enable[chan->channel] = preset_enable;
+
+ ior_cfg = priv->ab_enable[chan->channel] |
+ (unsigned int)preset_enable << 1;
+
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(0x40 | ior_cfg, base_offset);
+
+ return len;
+}
+
+static const char *const quad8_noise_error_states[] = {
+ "No excessive noise is present at the count inputs",
+ "Excessive noise is present at the count inputs"
+};
+
+static int quad8_get_noise_error(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & BIT(4));
+}
+
+static const struct iio_enum quad8_noise_error_enum = {
+ .items = quad8_noise_error_states,
+ .num_items = ARRAY_SIZE(quad8_noise_error_states),
+ .get = quad8_get_noise_error
+};
+
+static const char *const quad8_count_direction_states[] = {
+ "down",
+ "up"
+};
+
+static int quad8_get_count_direction(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & BIT(5));
+}
+
+static const struct iio_enum quad8_count_direction_enum = {
+ .items = quad8_count_direction_states,
+ .num_items = ARRAY_SIZE(quad8_count_direction_states),
+ .get = quad8_get_count_direction
+};
+
+static const char *const quad8_count_modes[] = {
+ "normal",
+ "range limit",
+ "non-recycle",
+ "modulo-n"
+};
+
+static int quad8_set_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int count_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = count_mode << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->count_mode[chan->channel] = count_mode;
+
+ /* Add quadrature mode configuration */
+ if (priv->quadrature_mode[chan->channel])
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(0x20 | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->count_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_count_mode_enum = {
+ .items = quad8_count_modes,
+ .num_items = ARRAY_SIZE(quad8_count_modes),
+ .set = quad8_set_count_mode,
+ .get = quad8_get_count_mode
+};
+
+static const char *const quad8_synchronous_modes[] = {
+ "non-synchronous",
+ "synchronous"
+};
+
+static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int synchronous_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = synchronous_mode |
+ priv->index_polarity[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+ if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+ return -EINVAL;
+
+ priv->synchronous_mode[chan->channel] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(0x40 | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->synchronous_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_synchronous_mode_enum = {
+ .items = quad8_synchronous_modes,
+ .num_items = ARRAY_SIZE(quad8_synchronous_modes),
+ .set = quad8_set_synchronous_mode,
+ .get = quad8_get_synchronous_mode
+};
+
+static const char *const quad8_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature"
+};
+
+static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int quadrature_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ if (quadrature_mode)
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+ else {
+ /* Quadrature scaling only available in quadrature mode */
+ priv->quadrature_scale[chan->channel] = 0;
+
+ /* Synchronous function not supported in non-quadrature mode */
+ if (priv->synchronous_mode[chan->channel])
+ quad8_set_synchronous_mode(indio_dev, chan, 0);
+ }
+
+ priv->quadrature_mode[chan->channel] = quadrature_mode;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(0x20 | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_quadrature_mode_enum = {
+ .items = quad8_quadrature_modes,
+ .num_items = ARRAY_SIZE(quad8_quadrature_modes),
+ .set = quad8_set_quadrature_mode,
+ .get = quad8_get_quadrature_mode
+};
+
+static const char *const quad8_index_polarity_modes[] = {
+ "negative",
+ "positive"
+};
+
+static int quad8_set_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int index_polarity)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
+ index_polarity << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->index_polarity[chan->channel] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(0x40 | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->index_polarity[chan->channel];
+}
+
+static const struct iio_enum quad8_index_polarity_enum = {
+ .items = quad8_index_polarity_modes,
+ .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+ .set = quad8_set_index_polarity,
+ .get = quad8_get_index_polarity
+};
+
+static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_preset,
+ .write = quad8_write_preset
+ },
+ {
+ .name = "set_to_preset_on_index",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_set_to_preset_on_index,
+ .write = quad8_write_set_to_preset_on_index
+ },
+ IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
+ IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
+ IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
+ IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
+ IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
+ IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
+ {}
+};
+
+static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
+ IIO_ENUM("synchronous_mode", IIO_SEPARATE,
+ &quad8_synchronous_mode_enum),
+ IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
+ IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
+ IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
+ {}
+};
+
+#define QUAD8_COUNT_CHAN(_chan) { \
+ .type = IIO_COUNT, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = quad8_count_ext_info, \
+ .indexed = 1 \
+}
+
+#define QUAD8_INDEX_CHAN(_chan) { \
+ .type = IIO_INDEX, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .ext_info = quad8_index_ext_info, \
+ .indexed = 1 \
+}
+
+static const struct iio_chan_spec quad8_channels[] = {
+ QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
+ QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
+ QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
+ QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
+ QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
+ QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
+ QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
+ QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
+};
+
+static int quad8_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct quad8_iio *priv;
+ int i, j;
+ unsigned int base_offset;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base[id], QUAD8_EXTENT,
+ dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + QUAD8_EXTENT);
+ return -EBUSY;
+ }
+
+ indio_dev->info = &quad8_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
+ indio_dev->channels = quad8_channels;
+ indio_dev->name = dev_name(dev);
+
+ priv = iio_priv(indio_dev);
+ priv->base = base[id];
+
+ /* Reset all counters and disable interrupt function */
+ outb(0x01, base[id] + 0x11);
+ /* Set initial configuration for all counters */
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
+ base_offset = base[id] + 2 * i;
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+ /* Reset Preset Register */
+ for (j = 0; j < 3; j++)
+ outb(0x00, base_offset);
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(0x04, base_offset + 1);
+ /* Reset Error flag */
+ outb(0x06, base_offset + 1);
+ /* Binary encoding; Normal count; non-quadrature mode */
+ outb(0x20, base_offset + 1);
+ /* Disable A and B inputs; preset on index; FLG1 as Carry */
+ outb(0x40, base_offset + 1);
+ /* Disable index function; negative index polarity */
+ outb(0x60, base_offset + 1);
+ }
+ /* Enable all counters */
+ outb(0x00, base[id] + 0x11);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver quad8_driver = {
+ .probe = quad8_probe,
+ .driver = {
+ .name = "104-quad-8"
+ }
+};
+
+module_isa_driver(quad8_driver, num_quad8);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
new file mode 100644
index 000000000000..44627f6e4861
--- /dev/null
+++ b/drivers/iio/counter/Kconfig
@@ -0,0 +1,24 @@
+#
+# Counter devices
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Counters"
+
+config 104_QUAD_8
+ tristate "ACCES 104-QUAD-8 driver"
+ depends on X86 && ISA_BUS_API
+ help
+ Say yes here to build support for the ACCES 104-QUAD-8 quadrature
+ encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
+
+ Performing a write to a counter's IIO_CHAN_INFO_RAW sets the counter and
+ also clears the counter's respective error flag. Although the counters
+ have a 25-bit range, only the lower 24 bits may be set, either directly
+ or via a counter's preset attribute. Interrupts are not supported by
+ this driver.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
+endmenu
diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile
new file mode 100644
index 000000000000..007e88411648
--- /dev/null
+++ b/drivers/iio/counter/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for IIO counter devices
+#
+
+# When adding new entries keep the list in alphabetical order
+
+obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 120b24478469..d3084028905b 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -200,6 +200,16 @@ config AD8801
To compile this driver as a module choose M here: the module will be called
ad8801.
+config DPOT_DAC
+ tristate "DAC emulation using a DPOT"
+ depends on OF
+ help
+ Say yes here to build support for DAC emulation using a digital
+ potentiometer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dpot-dac.
+
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 27642bbf75f2..f01bf4a99867 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
+obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 0b235a2c7359..6eed5b7729be 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -17,7 +17,7 @@
#define AD5592R_GPIO_READBACK_EN BIT(10)
#define AD5592R_LDAC_READBACK_EN BIT(6)
-static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf)
+static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, __be16 *buf)
{
struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
struct spi_transfer t = {
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
new file mode 100644
index 000000000000..960a2b430480
--- /dev/null
+++ b/drivers/iio/dac/dpot-dac.c
@@ -0,0 +1,266 @@
+/*
+ * IIO DAC emulation driver using a digital potentiometer
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * It is assumed that the dpot is used as a voltage divider between the
+ * current dpot wiper setting and the maximum resistance of the dpot. The
+ * divided voltage is provided by a vref regulator.
+ *
+ * .------.
+ * .-----------. | |
+ * | vref |--' .---.
+ * | regulator |--. | |
+ * '-----------' | | d |
+ * | | p |
+ * | | o | wiper
+ * | | t |<---------+
+ * | | |
+ * | '---' dac output voltage
+ * | |
+ * '------+------------+
+ */
+
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+struct dpot_dac {
+ struct regulator *vref;
+ struct iio_channel *dpot;
+ u32 max_ohms;
+};
+
+static const struct iio_chan_spec dpot_dac_iio_channel = {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .output = 1,
+ .indexed = 1,
+};
+
+static int dpot_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+ int ret;
+ unsigned long long tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return iio_read_channel_raw(dac->dpot, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_read_channel_scale(dac->dpot, val, val2);
+ switch (ret) {
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = *val * 1000000000LL;
+ do_div(tmp, dac->max_ohms);
+ tmp *= regulator_get_voltage(dac->vref) / 1000;
+ do_div(tmp, 1000000000LL);
+ *val = tmp;
+ return ret;
+ case IIO_VAL_INT:
+ /*
+ * Convert integer scale to fractional scale by
+ * setting the denominator (val2) to one...
+ */
+ *val2 = 1;
+ ret = IIO_VAL_FRACTIONAL;
+ /* ...and fall through. */
+ case IIO_VAL_FRACTIONAL:
+ *val *= regulator_get_voltage(dac->vref) / 1000;
+ *val2 *= dac->max_ohms;
+ break;
+ }
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *type = IIO_VAL_INT;
+ return iio_read_avail_channel_raw(dac->dpot, vals, length);
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return iio_write_channel_raw(dac->dpot, val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info dpot_dac_info = {
+ .read_raw = dpot_dac_read_raw,
+ .read_avail = dpot_dac_read_avail,
+ .write_raw = dpot_dac_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int dpot_dac_channel_max_ohms(struct iio_dev *indio_dev)
+{
+ struct device *dev = &indio_dev->dev;
+ struct dpot_dac *dac = iio_priv(indio_dev);
+ unsigned long long tmp;
+ int ret;
+ int val;
+ int val2;
+ int max;
+
+ ret = iio_read_max_channel_raw(dac->dpot, &max);
+ if (ret < 0) {
+ dev_err(dev, "dpot does not indicate its raw maximum value\n");
+ return ret;
+ }
+
+ switch (iio_read_channel_scale(dac->dpot, &val, &val2)) {
+ case IIO_VAL_INT:
+ return max * val;
+ case IIO_VAL_FRACTIONAL:
+ tmp = (unsigned long long)max * val;
+ do_div(tmp, val2);
+ return tmp;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = val * 1000000000LL * max >> val2;
+ do_div(tmp, 1000000000LL);
+ return tmp;
+ default:
+ dev_err(dev, "dpot has a scale that is too weird\n");
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct dpot_dac *dac;
+ enum iio_chan_type type;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ dac = iio_priv(indio_dev);
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &dpot_dac_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &dpot_dac_iio_channel;
+ indio_dev->num_channels = 1;
+
+ dac->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(dac->vref)) {
+ if (PTR_ERR(dac->vref) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get vref regulator\n");
+ return PTR_ERR(dac->vref);
+ }
+
+ dac->dpot = devm_iio_channel_get(dev, "dpot");
+ if (IS_ERR(dac->dpot)) {
+ if (PTR_ERR(dac->dpot) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dpot input channel\n");
+ return PTR_ERR(dac->dpot);
+ }
+
+ ret = iio_get_channel_type(dac->dpot, &type);
+ if (ret < 0)
+ return ret;
+
+ if (type != IIO_RESISTANCE) {
+ dev_err(dev, "dpot is of the wrong type\n");
+ return -EINVAL;
+ }
+
+ ret = dpot_dac_channel_max_ohms(indio_dev);
+ if (ret < 0)
+ return ret;
+ dac->max_ohms = ret;
+
+ ret = regulator_enable(dac->vref);
+ if (ret) {
+ dev_err(dev, "failed to enable the vref regulator\n");
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "failed to register iio device\n");
+ goto disable_reg;
+ }
+
+ return 0;
+
+disable_reg:
+ regulator_disable(dac->vref);
+ return ret;
+}
+
+static int dpot_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(dac->vref);
+
+ return 0;
+}
+
+static const struct of_device_id dpot_dac_match[] = {
+ { .compatible = "dpot-dac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dpot_dac_match);
+
+static struct platform_driver dpot_dac_driver = {
+ .probe = dpot_dac_probe,
+ .remove = dpot_dac_remove,
+ .driver = {
+ .name = "iio-dpot-dac",
+ .of_match_table = dpot_dac_match,
+ },
+};
+module_platform_driver(dpot_dac_driver);
+
+MODULE_DESCRIPTION("DAC emulation driver using a digital potentiometer");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index cca935c06f2b..db109f0cdd8c 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -18,6 +18,8 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -26,12 +28,20 @@
#define MCP4725_DRV_NAME "mcp4725"
+#define MCP472X_REF_VDD 0x00
+#define MCP472X_REF_VREF_UNBUFFERED 0x02
+#define MCP472X_REF_VREF_BUFFERED 0x03
+
struct mcp4725_data {
struct i2c_client *client;
- u16 vref_mv;
+ int id;
+ unsigned ref_mode;
+ bool vref_buffered;
u16 dac_value;
bool powerdown;
unsigned powerdown_mode;
+ struct regulator *vdd_reg;
+ struct regulator *vref_reg;
};
static int mcp4725_suspend(struct device *dev)
@@ -86,6 +96,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
return 0;
inoutbuf[0] = 0x60; /* write EEPROM */
+ inoutbuf[0] |= data->ref_mode << 3;
inoutbuf[1] = data->dac_value >> 4;
inoutbuf[2] = (data->dac_value & 0xf) << 4;
@@ -278,18 +289,49 @@ static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
return 0;
}
+static int mcp4726_set_cfg(struct iio_dev *indio_dev)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ u8 outbuf[3];
+ int ret;
+
+ outbuf[0] = 0x40;
+ outbuf[0] |= data->ref_mode << 3;
+ if (data->powerdown)
+ outbuf[0] |= data->powerdown << 1;
+ outbuf[1] = data->dac_value >> 4;
+ outbuf[2] = (data->dac_value & 0xf) << 4;
+
+ ret = i2c_master_send(data->client, outbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+ else
+ return 0;
+}
+
static int mcp4725_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mcp4725_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
*val = data->dac_value;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = data->vref_mv;
+ if (data->ref_mode == MCP472X_REF_VDD)
+ ret = regulator_get_voltage(data->vdd_reg);
+ else
+ ret = regulator_get_voltage(data->vref_reg);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -323,27 +365,98 @@ static const struct iio_info mcp4725_info = {
.driver_module = THIS_MODULE,
};
+#ifdef CONFIG_OF
+static int mcp4725_probe_dt(struct device *dev,
+ struct mcp4725_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ /* check if is the vref-supply defined */
+ pdata->use_vref = of_property_read_bool(np, "vref-supply");
+ pdata->vref_buffered =
+ of_property_read_bool(np, "microchip,vref-buffered");
+
+ return 0;
+}
+#else
+static int mcp4725_probe_dt(struct device *dev,
+ struct mcp4725_platform_data *platform_data)
+{
+ return -ENODEV;
+}
+#endif
+
static int mcp4725_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcp4725_data *data;
struct iio_dev *indio_dev;
- struct mcp4725_platform_data *platform_data = client->dev.platform_data;
- u8 inbuf[3];
+ struct mcp4725_platform_data *pdata, pdata_dt;
+ u8 inbuf[4];
u8 pd;
+ u8 ref;
int err;
- if (!platform_data || !platform_data->vref_mv) {
- dev_err(&client->dev, "invalid platform data");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (indio_dev == NULL)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->id = id->driver_data;
+ pdata = dev_get_platdata(&client->dev);
+
+ if (!pdata) {
+ err = mcp4725_probe_dt(&client->dev, &pdata_dt);
+ if (err) {
+ dev_err(&client->dev,
+ "invalid platform or devicetree data");
+ return err;
+ }
+ pdata = &pdata_dt;
+ }
+
+ if (data->id == MCP4725 && pdata->use_vref) {
+ dev_err(&client->dev,
+ "external reference is unavailable on MCP4725");
+ return -EINVAL;
+ }
+
+ if (!pdata->use_vref && pdata->vref_buffered) {
+ dev_err(&client->dev,
+ "buffering is unavailable on the internal reference");
+ return -EINVAL;
+ }
+
+ if (!pdata->use_vref)
+ data->ref_mode = MCP472X_REF_VDD;
+ else
+ data->ref_mode = pdata->vref_buffered ?
+ MCP472X_REF_VREF_BUFFERED :
+ MCP472X_REF_VREF_UNBUFFERED;
+
+ data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd_reg))
+ return PTR_ERR(data->vdd_reg);
+
+ err = regulator_enable(data->vdd_reg);
+ if (err)
+ return err;
+
+ if (pdata->use_vref) {
+ data->vref_reg = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(data->vref_reg)) {
+ err = PTR_ERR(data->vref_reg);
+ goto err_disable_vdd_reg;
+ }
+
+ err = regulator_enable(data->vref_reg);
+ if (err)
+ goto err_disable_vdd_reg;
+ }
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
@@ -352,25 +465,56 @@ static int mcp4725_probe(struct i2c_client *client,
indio_dev->num_channels = 1;
indio_dev->modes = INDIO_DIRECT_MODE;
- data->vref_mv = platform_data->vref_mv;
+ /* read current DAC value and settings */
+ err = i2c_master_recv(client, inbuf, data->id == MCP4725 ? 3 : 4);
- /* read current DAC value */
- err = i2c_master_recv(client, inbuf, 3);
if (err < 0) {
dev_err(&client->dev, "failed to read DAC value");
- return err;
+ goto err_disable_vref_reg;
}
pd = (inbuf[0] >> 1) & 0x3;
data->powerdown = pd > 0 ? true : false;
- data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */
+ data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
+ if (data->id == MCP4726)
+ ref = (inbuf[3] >> 3) & 0x3;
+
+ if (data->id == MCP4726 && ref != data->ref_mode) {
+ dev_info(&client->dev,
+ "voltage reference mode differs (conf: %u, eeprom: %u), setting %u",
+ data->ref_mode, ref, data->ref_mode);
+ err = mcp4726_set_cfg(indio_dev);
+ if (err < 0)
+ goto err_disable_vref_reg;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_disable_vref_reg;
+
+ return 0;
+
+err_disable_vref_reg:
+ if (data->vref_reg)
+ regulator_disable(data->vref_reg);
- return iio_device_register(indio_dev);
+err_disable_vdd_reg:
+ regulator_disable(data->vdd_reg);
+
+ return err;
}
static int mcp4725_remove(struct i2c_client *client)
{
- iio_device_unregister(i2c_get_clientdata(client));
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (data->vref_reg)
+ regulator_disable(data->vref_reg);
+ regulator_disable(data->vdd_reg);
+
return 0;
}
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 205a84420ae9..3126cf05e6b9 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -84,6 +84,24 @@ config HID_SENSOR_GYRO_3D
Say yes here to build support for the HID SENSOR
Gyroscope 3D.
+config MPU3050
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP
+
+config MPU3050_I2C
+ tristate "Invensense MPU3050 devices on I2C"
+ depends on !(INPUT_MPU3050=y || INPUT_MPU3050=m)
+ depends on I2C
+ select MPU3050
+ select REGMAP_I2C
+ select I2C_MUX
+ help
+ This driver supports the Invensense MPU3050 gyroscope over I2C.
+ This driver can be built as a module. The module will be called
+ inv-mpu3050-i2c.
+
config IIO_ST_GYRO_3AXIS
tristate "STMicroelectronics gyroscopes 3-Axis Driver"
depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index f866a4be0667..f0e149a606b0 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -14,6 +14,11 @@ obj-$(CONFIG_BMG160_SPI) += bmg160_spi.o
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
+# Currently this is rolled into one module, split it if
+# we ever create a separate SPI interface for MPU-3050
+obj-$(CONFIG_MPU3050) += mpu3050.o
+mpu3050-objs := mpu3050-core.o mpu3050-i2c.o
+
itg3200-y := itg3200_core.o
itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
obj-$(CONFIG_ITG3200) += itg3200.o
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
new file mode 100644
index 000000000000..2be2a5d287e6
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -0,0 +1,1306 @@
+/*
+ * MPU3050 gyroscope driver
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the input subsystem driver, Copyright (C) 2011 Wistron Co.Ltd
+ * Joseph Lai <joseph_lai@wistron.com> and trimmed down by
+ * Alan Cox <alan@linux.intel.com> in turn based on bma023.c.
+ * Device behaviour based on a misc driver posted by Nathan Royer in 2011.
+ *
+ * TODO: add support for setting up the low pass 3dB frequency.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include "mpu3050.h"
+
+#define MPU3050_CHIP_ID 0x69
+
+/*
+ * Register map: anything suffixed *_H is a big-endian high byte and always
+ * followed by the corresponding low byte (*_L) even though these are not
+ * explicitly included in the register definitions.
+ */
+#define MPU3050_CHIP_ID_REG 0x00
+#define MPU3050_PRODUCT_ID_REG 0x01
+#define MPU3050_XG_OFFS_TC 0x05
+#define MPU3050_YG_OFFS_TC 0x08
+#define MPU3050_ZG_OFFS_TC 0x0B
+#define MPU3050_X_OFFS_USR_H 0x0C
+#define MPU3050_Y_OFFS_USR_H 0x0E
+#define MPU3050_Z_OFFS_USR_H 0x10
+#define MPU3050_FIFO_EN 0x12
+#define MPU3050_AUX_VDDIO 0x13
+#define MPU3050_SLV_ADDR 0x14
+#define MPU3050_SMPLRT_DIV 0x15
+#define MPU3050_DLPF_FS_SYNC 0x16
+#define MPU3050_INT_CFG 0x17
+#define MPU3050_AUX_ADDR 0x18
+#define MPU3050_INT_STATUS 0x1A
+#define MPU3050_TEMP_H 0x1B
+#define MPU3050_XOUT_H 0x1D
+#define MPU3050_YOUT_H 0x1F
+#define MPU3050_ZOUT_H 0x21
+#define MPU3050_DMP_CFG1 0x35
+#define MPU3050_DMP_CFG2 0x36
+#define MPU3050_BANK_SEL 0x37
+#define MPU3050_MEM_START_ADDR 0x38
+#define MPU3050_MEM_R_W 0x39
+#define MPU3050_FIFO_COUNT_H 0x3A
+#define MPU3050_FIFO_R 0x3C
+#define MPU3050_USR_CTRL 0x3D
+#define MPU3050_PWR_MGM 0x3E
+
+/* MPU memory bank read options */
+#define MPU3050_MEM_PRFTCH BIT(5)
+#define MPU3050_MEM_USER_BANK BIT(4)
+/* Bits 8-11 select memory bank */
+#define MPU3050_MEM_RAM_BANK_0 0
+#define MPU3050_MEM_RAM_BANK_1 1
+#define MPU3050_MEM_RAM_BANK_2 2
+#define MPU3050_MEM_RAM_BANK_3 3
+#define MPU3050_MEM_OTP_BANK_0 4
+
+#define MPU3050_AXIS_REGS(axis) (MPU3050_XOUT_H + (axis * 2))
+
+/* Register bits */
+
+/* FIFO Enable */
+#define MPU3050_FIFO_EN_FOOTER BIT(0)
+#define MPU3050_FIFO_EN_AUX_ZOUT BIT(1)
+#define MPU3050_FIFO_EN_AUX_YOUT BIT(2)
+#define MPU3050_FIFO_EN_AUX_XOUT BIT(3)
+#define MPU3050_FIFO_EN_GYRO_ZOUT BIT(4)
+#define MPU3050_FIFO_EN_GYRO_YOUT BIT(5)
+#define MPU3050_FIFO_EN_GYRO_XOUT BIT(6)
+#define MPU3050_FIFO_EN_TEMP_OUT BIT(7)
+
+/*
+ * Digital Low Pass filter (DLPF)
+ * Full Scale (FS)
+ * and Synchronization
+ */
+#define MPU3050_EXT_SYNC_NONE 0x00
+#define MPU3050_EXT_SYNC_TEMP 0x20
+#define MPU3050_EXT_SYNC_GYROX 0x40
+#define MPU3050_EXT_SYNC_GYROY 0x60
+#define MPU3050_EXT_SYNC_GYROZ 0x80
+#define MPU3050_EXT_SYNC_ACCELX 0xA0
+#define MPU3050_EXT_SYNC_ACCELY 0xC0
+#define MPU3050_EXT_SYNC_ACCELZ 0xE0
+#define MPU3050_EXT_SYNC_MASK 0xE0
+#define MPU3050_EXT_SYNC_SHIFT 5
+
+#define MPU3050_FS_250DPS 0x00
+#define MPU3050_FS_500DPS 0x08
+#define MPU3050_FS_1000DPS 0x10
+#define MPU3050_FS_2000DPS 0x18
+#define MPU3050_FS_MASK 0x18
+#define MPU3050_FS_SHIFT 3
+
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00
+#define MPU3050_DLPF_CFG_188HZ 0x01
+#define MPU3050_DLPF_CFG_98HZ 0x02
+#define MPU3050_DLPF_CFG_42HZ 0x03
+#define MPU3050_DLPF_CFG_20HZ 0x04
+#define MPU3050_DLPF_CFG_10HZ 0x05
+#define MPU3050_DLPF_CFG_5HZ 0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07
+#define MPU3050_DLPF_CFG_MASK 0x07
+#define MPU3050_DLPF_CFG_SHIFT 0
+
+/* Interrupt config */
+#define MPU3050_INT_RAW_RDY_EN BIT(0)
+#define MPU3050_INT_DMP_DONE_EN BIT(1)
+#define MPU3050_INT_MPU_RDY_EN BIT(2)
+#define MPU3050_INT_ANYRD_2CLEAR BIT(4)
+#define MPU3050_INT_LATCH_EN BIT(5)
+#define MPU3050_INT_OPEN BIT(6)
+#define MPU3050_INT_ACTL BIT(7)
+/* Interrupt status */
+#define MPU3050_INT_STATUS_RAW_RDY BIT(0)
+#define MPU3050_INT_STATUS_DMP_DONE BIT(1)
+#define MPU3050_INT_STATUS_MPU_RDY BIT(2)
+#define MPU3050_INT_STATUS_FIFO_OVFLW BIT(7)
+/* USR_CTRL */
+#define MPU3050_USR_CTRL_FIFO_EN BIT(6)
+#define MPU3050_USR_CTRL_AUX_IF_EN BIT(5)
+#define MPU3050_USR_CTRL_AUX_IF_RST BIT(3)
+#define MPU3050_USR_CTRL_FIFO_RST BIT(1)
+#define MPU3050_USR_CTRL_GYRO_RST BIT(0)
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X 0x01
+#define MPU3050_PWR_MGM_PLL_Y 0x02
+#define MPU3050_PWR_MGM_PLL_Z 0x03
+#define MPU3050_PWR_MGM_CLKSEL_MASK 0x07
+#define MPU3050_PWR_MGM_STBY_ZG BIT(3)
+#define MPU3050_PWR_MGM_STBY_YG BIT(4)
+#define MPU3050_PWR_MGM_STBY_XG BIT(5)
+#define MPU3050_PWR_MGM_SLEEP BIT(6)
+#define MPU3050_PWR_MGM_RESET BIT(7)
+#define MPU3050_PWR_MGM_MASK 0xff
+
+/*
+ * Fullscale precision is (for finest precision) +/- 250 deg/s, so the full
+ * scale is actually 500 deg/s. All 16 bits are then used to cover this scale,
+ * in two's complement.
+ */
+static unsigned int mpu3050_fs_precision[] = {
+ IIO_DEGREE_TO_RAD(250),
+ IIO_DEGREE_TO_RAD(500),
+ IIO_DEGREE_TO_RAD(1000),
+ IIO_DEGREE_TO_RAD(2000)
+};
+
+/*
+ * Regulator names
+ */
+static const char mpu3050_reg_vdd[] = "vdd";
+static const char mpu3050_reg_vlogic[] = "vlogic";
+
+static unsigned int mpu3050_get_freq(struct mpu3050 *mpu3050)
+{
+ unsigned int freq;
+
+ if (mpu3050->lpf == MPU3050_DLPF_CFG_256HZ_NOLPF2)
+ freq = 8000;
+ else
+ freq = 1000;
+ freq /= (mpu3050->divisor + 1);
+
+ return freq;
+}
+
+static int mpu3050_start_sampling(struct mpu3050 *mpu3050)
+{
+ __be16 raw_val[3];
+ int ret;
+ int i;
+
+ /* Reset */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET, MPU3050_PWR_MGM_RESET);
+ if (ret)
+ return ret;
+
+ /* Turn on the Z-axis PLL */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_CLKSEL_MASK,
+ MPU3050_PWR_MGM_PLL_Z);
+ if (ret)
+ return ret;
+
+ /* Write calibration offset registers */
+ for (i = 0; i < 3; i++)
+ raw_val[i] = cpu_to_be16(mpu3050->calibration[i]);
+
+ ret = regmap_bulk_write(mpu3050->map, MPU3050_X_OFFS_USR_H, raw_val,
+ sizeof(raw_val));
+ if (ret)
+ return ret;
+
+ /* Set low pass filter (sample rate), sync and full scale */
+ ret = regmap_write(mpu3050->map, MPU3050_DLPF_FS_SYNC,
+ MPU3050_EXT_SYNC_NONE << MPU3050_EXT_SYNC_SHIFT |
+ mpu3050->fullscale << MPU3050_FS_SHIFT |
+ mpu3050->lpf << MPU3050_DLPF_CFG_SHIFT);
+ if (ret)
+ return ret;
+
+ /* Set up sampling frequency */
+ ret = regmap_write(mpu3050->map, MPU3050_SMPLRT_DIV, mpu3050->divisor);
+ if (ret)
+ return ret;
+
+ /*
+ * Max 50 ms start-up time after setting DLPF_FS_SYNC
+ * according to the data sheet, then wait for the next sample
+ * at this frequency T = 1000/f ms.
+ */
+ msleep(50 + 1000 / mpu3050_get_freq(mpu3050));
+
+ return 0;
+}
+
+static int mpu3050_set_8khz_samplerate(struct mpu3050 *mpu3050)
+{
+ int ret;
+ u8 divisor;
+ enum mpu3050_lpf lpf;
+
+ lpf = mpu3050->lpf;
+ divisor = mpu3050->divisor;
+
+ mpu3050->lpf = LPF_256_HZ_NOLPF; /* 8 kHz base frequency */
+ mpu3050->divisor = 0; /* Divide by 1 */
+ ret = mpu3050_start_sampling(mpu3050);
+
+ mpu3050->lpf = lpf;
+ mpu3050->divisor = divisor;
+
+ return ret;
+}
+
+static int mpu3050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
+ __be16 raw_val;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* The temperature scaling is (x+23000)/280 Celsius */
+ *val = 23000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = mpu3050->calibration[chan->scan_index-1];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = mpu3050_get_freq(mpu3050);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* Millidegrees, see about temperature scaling above */
+ *val = 1000;
+ *val2 = 280;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ANGL_VEL:
+ /*
+ * Convert to the corresponding full scale in
+ * radians. All 16 bits are used with sign to
+ * span the available scale: to account for the one
+ * missing value if we multiply by 1/S16_MAX, instead
+ * multiply with 2/U16_MAX.
+ */
+ *val = mpu3050_fs_precision[mpu3050->fullscale] * 2;
+ *val2 = U16_MAX;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ /* Resume device */
+ pm_runtime_get_sync(mpu3050->dev);
+ mutex_lock(&mpu3050->lock);
+
+ ret = mpu3050_set_8khz_samplerate(mpu3050);
+ if (ret)
+ goto out_read_raw_unlock;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H,
+ &raw_val, sizeof(raw_val));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading temperature\n");
+ goto out_read_raw_unlock;
+ }
+
+ *val = be16_to_cpu(raw_val);
+ ret = IIO_VAL_INT;
+
+ goto out_read_raw_unlock;
+ case IIO_ANGL_VEL:
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_AXIS_REGS(chan->scan_index-1),
+ &raw_val,
+ sizeof(raw_val));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading axis data\n");
+ goto out_read_raw_unlock;
+ }
+
+ *val = be16_to_cpu(raw_val);
+ ret = IIO_VAL_INT;
+
+ goto out_read_raw_unlock;
+ default:
+ ret = -EINVAL;
+ goto out_read_raw_unlock;
+ }
+ default:
+ break;
+ }
+
+ return -EINVAL;
+
+out_read_raw_unlock:
+ mutex_unlock(&mpu3050->lock);
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+
+ return ret;
+}
+
+static int mpu3050_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ /*
+ * Couldn't figure out a way to precalculate these at compile time.
+ */
+ unsigned int fs250 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[0] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs500 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[1] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs1000 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[2] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs2000 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[3] * 1000000 * 2,
+ U16_MAX);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+ mpu3050->calibration[chan->scan_index-1] = val;
+ return 0;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /*
+ * The max samplerate is 8000 Hz, the minimum
+ * 1000 / 256 ~= 4 Hz
+ */
+ if (val < 4 || val > 8000)
+ return -EINVAL;
+
+ /*
+ * Above 1000 Hz we must turn off the digital low pass filter
+ * so we get a base frequency of 8kHz to the divider
+ */
+ if (val > 1000) {
+ mpu3050->lpf = LPF_256_HZ_NOLPF;
+ mpu3050->divisor = DIV_ROUND_CLOSEST(8000, val) - 1;
+ return 0;
+ }
+
+ mpu3050->lpf = LPF_188_HZ;
+ mpu3050->divisor = DIV_ROUND_CLOSEST(1000, val) - 1;
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+ /*
+ * We support +/-250, +/-500, +/-1000 and +/2000 deg/s
+ * which means we need to round to the closest radians
+ * which will be roughly +/-4.3, +/-8.7, +/-17.5, +/-35
+ * rad/s. The scale is then for the 16 bits used to cover
+ * it 2/(2^16) of that.
+ */
+
+ /* Just too large, set the max range */
+ if (val != 0) {
+ mpu3050->fullscale = FS_2000_DPS;
+ return 0;
+ }
+
+ /*
+ * Now we're dealing with fractions below zero in millirad/s
+ * do some integer interpolation and match with the closest
+ * fullscale in the table.
+ */
+ if (val2 <= fs250 ||
+ val2 < ((fs500 + fs250) / 2))
+ mpu3050->fullscale = FS_250_DPS;
+ else if (val2 <= fs500 ||
+ val2 < ((fs1000 + fs500) / 2))
+ mpu3050->fullscale = FS_500_DPS;
+ else if (val2 <= fs1000 ||
+ val2 < ((fs2000 + fs1000) / 2))
+ mpu3050->fullscale = FS_1000_DPS;
+ else
+ /* Catch-all */
+ mpu3050->fullscale = FS_2000_DPS;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
+ /*
+ * Temperature 1*16 bits
+ * Three axes 3*16 bits
+ * Timestamp 64 bits (4*16 bits)
+ * Sum total 8*16 bits
+ */
+ __be16 hw_values[8];
+ s64 timestamp;
+ unsigned int datums_from_fifo = 0;
+
+ /*
+ * If we're using the hardware trigger, get the precise timestamp from
+ * the top half of the threaded IRQ handler. Otherwise get the
+ * timestamp here so it will be close in time to the actual values
+ * read from the registers.
+ */
+ if (iio_trigger_using_own(indio_dev))
+ timestamp = mpu3050->hw_timestamp;
+ else
+ timestamp = iio_get_time_ns(indio_dev);
+
+ mutex_lock(&mpu3050->lock);
+
+ /* Using the hardware IRQ trigger? Check the buffer then. */
+ if (mpu3050->hw_irq_trigger) {
+ __be16 raw_fifocnt;
+ u16 fifocnt;
+ /* X, Y, Z + temperature */
+ unsigned int bytes_per_datum = 8;
+ bool fifo_overflow = false;
+
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_COUNT_H,
+ &raw_fifocnt,
+ sizeof(raw_fifocnt));
+ if (ret)
+ goto out_trigger_unlock;
+ fifocnt = be16_to_cpu(raw_fifocnt);
+
+ if (fifocnt == 512) {
+ dev_info(mpu3050->dev,
+ "FIFO overflow! Emptying and resetting FIFO\n");
+ fifo_overflow = true;
+ /* Reset and enable the FIFO */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret) {
+ dev_info(mpu3050->dev, "error resetting FIFO\n");
+ goto out_trigger_unlock;
+ }
+ mpu3050->pending_fifo_footer = false;
+ }
+
+ if (fifocnt)
+ dev_dbg(mpu3050->dev,
+ "%d bytes in the FIFO\n",
+ fifocnt);
+
+ while (!fifo_overflow && fifocnt > bytes_per_datum) {
+ unsigned int toread;
+ unsigned int offset;
+ __be16 fifo_values[5];
+
+ /*
+ * If there is a FIFO footer in the pipe, first clear
+ * that out. This follows the complex algorithm in the
+ * datasheet that states that you may never leave the
+ * FIFO empty after the first reading: you have to
+ * always leave two footer bytes in it. The footer is
+ * in practice just two zero bytes.
+ */
+ if (mpu3050->pending_fifo_footer) {
+ toread = bytes_per_datum + 2;
+ offset = 0;
+ } else {
+ toread = bytes_per_datum;
+ offset = 1;
+ /* Put in some dummy value */
+ fifo_values[0] = 0xAAAA;
+ }
+
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_R,
+ &fifo_values[offset],
+ toread);
+
+ dev_dbg(mpu3050->dev,
+ "%04x %04x %04x %04x %04x\n",
+ fifo_values[0],
+ fifo_values[1],
+ fifo_values[2],
+ fifo_values[3],
+ fifo_values[4]);
+
+ /* Index past the footer (fifo_values[0]) and push */
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &fifo_values[1],
+ timestamp);
+
+ fifocnt -= toread;
+ datums_from_fifo++;
+ mpu3050->pending_fifo_footer = true;
+
+ /*
+ * If we're emptying the FIFO, just make sure to
+ * check if something new appeared.
+ */
+ if (fifocnt < bytes_per_datum) {
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_COUNT_H,
+ &raw_fifocnt,
+ sizeof(raw_fifocnt));
+ if (ret)
+ goto out_trigger_unlock;
+ fifocnt = be16_to_cpu(raw_fifocnt);
+ }
+
+ if (fifocnt < bytes_per_datum)
+ dev_dbg(mpu3050->dev,
+ "%d bytes left in the FIFO\n",
+ fifocnt);
+
+ /*
+ * At this point, the timestamp that triggered the
+ * hardware interrupt is no longer valid for what
+ * we are reading (the interrupt likely fired for
+ * the value on the top of the FIFO), so set the
+ * timestamp to zero and let userspace deal with it.
+ */
+ timestamp = 0;
+ }
+ }
+
+ /*
+ * If we picked some datums from the FIFO that's enough, else
+ * fall through and just read from the current value registers.
+ * This happens in two cases:
+ *
+ * - We are using some other trigger (external, like an HRTimer)
+ * than the sensor's own sample generator. In this case the
+ * sensor is just set to the max sampling frequency and we give
+ * the trigger a copy of the latest value every time we get here.
+ *
+ * - The hardware trigger is active but unused and we actually use
+ * another trigger which calls here with a frequency higher
+ * than what the device provides data. We will then just read
+ * duplicate values directly from the hardware registers.
+ */
+ if (datums_from_fifo) {
+ dev_dbg(mpu3050->dev,
+ "read %d datums from the FIFO\n",
+ datums_from_fifo);
+ goto out_trigger_unlock;
+ }
+
+ ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, &hw_values,
+ sizeof(hw_values));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading axis data\n");
+ goto out_trigger_unlock;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, hw_values, timestamp);
+
+out_trigger_unlock:
+ mutex_unlock(&mpu3050->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int mpu3050_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(mpu3050->dev);
+
+ /* Unless we have OUR trigger active, run at full speed */
+ if (!mpu3050->hw_irq_trigger)
+ return mpu3050_set_8khz_samplerate(mpu3050);
+
+ return 0;
+}
+
+static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = {
+ .preenable = mpu3050_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = mpu3050_buffer_postdisable,
+};
+
+static const struct iio_mount_matrix *
+mpu3050_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ return &mpu3050->orientation;
+}
+
+static const struct iio_chan_spec_ext_info mpu3050_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, mpu3050_get_mount_matrix),
+ { },
+};
+
+#define MPU3050_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .ext_info = mpu3050_ext_info, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec mpu3050_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ MPU3050_AXIS_CHANNEL(X, 1),
+ MPU3050_AXIS_CHANNEL(Y, 2),
+ MPU3050_AXIS_CHANNEL(Z, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/* Four channels apart from timestamp, scan mask = 0x0f */
+static const unsigned long mpu3050_scan_masks[] = { 0xf, 0 };
+
+/*
+ * These are just the hardcoded factors resulting from the more elaborate
+ * calculations done with fractions in the scale raw get/set functions.
+ */
+static IIO_CONST_ATTR(anglevel_scale_available,
+ "0.000122070 "
+ "0.000274658 "
+ "0.000518798 "
+ "0.001068115");
+
+static struct attribute *mpu3050_attributes[] = {
+ &iio_const_attr_anglevel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mpu3050_attribute_group = {
+ .attrs = mpu3050_attributes,
+};
+
+static const struct iio_info mpu3050_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mpu3050_read_raw,
+ .write_raw = mpu3050_write_raw,
+ .attrs = &mpu3050_attribute_group,
+};
+
+/**
+ * mpu3050_read_mem() - read MPU-3050 internal memory
+ * @mpu3050: device to read from
+ * @bank: target bank
+ * @addr: target address
+ * @len: number of bytes
+ * @buf: the buffer to store the read bytes in
+ */
+static int mpu3050_read_mem(struct mpu3050 *mpu3050,
+ u8 bank,
+ u8 addr,
+ u8 len,
+ u8 *buf)
+{
+ int ret;
+
+ ret = regmap_write(mpu3050->map,
+ MPU3050_BANK_SEL,
+ bank);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(mpu3050->map,
+ MPU3050_MEM_START_ADDR,
+ addr);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(mpu3050->map,
+ MPU3050_MEM_R_W,
+ buf,
+ len);
+}
+
+static int mpu3050_hw_init(struct mpu3050 *mpu3050)
+{
+ int ret;
+ u8 otp[8];
+
+ /* Reset */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET,
+ MPU3050_PWR_MGM_RESET);
+ if (ret)
+ return ret;
+
+ /* Turn on the PLL */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_CLKSEL_MASK,
+ MPU3050_PWR_MGM_PLL_Z);
+ if (ret)
+ return ret;
+
+ /* Disable IRQs */
+ ret = regmap_write(mpu3050->map,
+ MPU3050_INT_CFG,
+ 0);
+ if (ret)
+ return ret;
+
+ /* Read out the 8 bytes of OTP (one-time-programmable) memory */
+ ret = mpu3050_read_mem(mpu3050,
+ (MPU3050_MEM_PRFTCH |
+ MPU3050_MEM_USER_BANK |
+ MPU3050_MEM_OTP_BANK_0),
+ 0,
+ sizeof(otp),
+ otp);
+ if (ret)
+ return ret;
+
+ /* This is device-unique data so it goes into the entropy pool */
+ add_device_randomness(otp, sizeof(otp));
+
+ dev_info(mpu3050->dev,
+ "die ID: %04X, wafer ID: %02X, A lot ID: %04X, "
+ "W lot ID: %03X, WP ID: %01X, rev ID: %02X\n",
+ /* Die ID, bits 0-12 */
+ (otp[1] << 8 | otp[0]) & 0x1fff,
+ /* Wafer ID, bits 13-17 */
+ ((otp[2] << 8 | otp[1]) & 0x03e0) >> 5,
+ /* A lot ID, bits 18-33 */
+ ((otp[4] << 16 | otp[3] << 8 | otp[2]) & 0x3fffc) >> 2,
+ /* W lot ID, bits 34-45 */
+ ((otp[5] << 8 | otp[4]) & 0x3ffc) >> 2,
+ /* WP ID, bits 47-49 */
+ ((otp[6] << 8 | otp[5]) & 0x0380) >> 7,
+ /* rev ID, bits 50-55 */
+ otp[6] >> 2);
+
+ return 0;
+}
+
+static int mpu3050_power_up(struct mpu3050 *mpu3050)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+ if (ret) {
+ dev_err(mpu3050->dev, "cannot enable regulators\n");
+ return ret;
+ }
+ /*
+ * 20-100 ms start-up time for register read/write according to
+ * the datasheet, be on the safe side and wait 200 ms.
+ */
+ msleep(200);
+
+ /* Take device out of sleep mode */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP, 0);
+ if (ret) {
+ dev_err(mpu3050->dev, "error setting power mode\n");
+ return ret;
+ }
+ msleep(10);
+
+ return 0;
+}
+
+static int mpu3050_power_down(struct mpu3050 *mpu3050)
+{
+ int ret;
+
+ /*
+ * Put MPU-3050 into sleep mode before cutting regulators.
+ * This is important, because we may not be the sole user
+ * of the regulator so the power may stay on after this, and
+ * then we would be wasting power unless we go to sleep mode
+ * first.
+ */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP, MPU3050_PWR_MGM_SLEEP);
+ if (ret)
+ dev_err(mpu3050->dev, "error putting to sleep\n");
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling regulators\n");
+
+ return 0;
+}
+
+static irqreturn_t mpu3050_irq_handler(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ if (!mpu3050->hw_irq_trigger)
+ return IRQ_NONE;
+
+ /* Get the time stamp as close in time as possible */
+ mpu3050->hw_timestamp = iio_get_time_ns(indio_dev);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mpu3050_irq_thread(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ /* ACK IRQ and check if it was from us */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret) {
+ dev_err(mpu3050->dev, "error reading IRQ status\n");
+ return IRQ_HANDLED;
+ }
+ if (!(val & MPU3050_INT_STATUS_RAW_RDY))
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(p);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpu3050_drdy_trigger_set_state() - set data ready interrupt state
+ * @trig: trigger instance
+ * @enable: true if trigger should be enabled, false to disable
+ */
+static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
+ bool enable)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ /* Disabling trigger: disable interrupt and return */
+ if (!enable) {
+ /* Disable all interrupts */
+ ret = regmap_write(mpu3050->map,
+ MPU3050_INT_CFG,
+ 0);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling IRQ\n");
+
+ /* Clear IRQ flag */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret)
+ dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+ /* Disable all things in the FIFO and reset it */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling FIFO\n");
+
+ ret = regmap_write(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret)
+ dev_err(mpu3050->dev, "error resetting FIFO\n");
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ mpu3050->hw_irq_trigger = false;
+
+ return 0;
+ } else {
+ /* Else we're enabling the trigger from this point */
+ pm_runtime_get_sync(mpu3050->dev);
+ mpu3050->hw_irq_trigger = true;
+
+ /* Disable all things in the FIFO */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+ if (ret)
+ return ret;
+
+ /* Reset and enable the FIFO */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret)
+ return ret;
+
+ mpu3050->pending_fifo_footer = false;
+
+ /* Turn on the FIFO for temp+X+Y+Z */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN,
+ MPU3050_FIFO_EN_TEMP_OUT |
+ MPU3050_FIFO_EN_GYRO_XOUT |
+ MPU3050_FIFO_EN_GYRO_YOUT |
+ MPU3050_FIFO_EN_GYRO_ZOUT |
+ MPU3050_FIFO_EN_FOOTER);
+ if (ret)
+ return ret;
+
+ /* Configure the sample engine */
+ ret = mpu3050_start_sampling(mpu3050);
+ if (ret)
+ return ret;
+
+ /* Clear IRQ flag */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret)
+ dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+ /* Give us interrupts whenever there is new data ready */
+ val = MPU3050_INT_RAW_RDY_EN;
+
+ if (mpu3050->irq_actl)
+ val |= MPU3050_INT_ACTL;
+ if (mpu3050->irq_latch)
+ val |= MPU3050_INT_LATCH_EN;
+ if (mpu3050->irq_opendrain)
+ val |= MPU3050_INT_OPEN;
+
+ ret = regmap_write(mpu3050->map, MPU3050_INT_CFG, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops mpu3050_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = mpu3050_drdy_trigger_set_state,
+};
+
+static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned long irq_trig;
+ int ret;
+
+ mpu3050->trig = devm_iio_trigger_alloc(&indio_dev->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!mpu3050->trig)
+ return -ENOMEM;
+
+ /* Check if IRQ is open drain */
+ if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
+ mpu3050->irq_opendrain = true;
+
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ /*
+ * Configure the interrupt generator hardware to supply whatever
+ * the interrupt is configured for, edges low/high level low/high,
+ * we can provide it all.
+ */
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ dev_info(&indio_dev->dev,
+ "pulse interrupts on the rising edge\n");
+ if (mpu3050->irq_opendrain) {
+ dev_info(&indio_dev->dev,
+ "rising edge incompatible with open drain\n");
+ mpu3050->irq_opendrain = false;
+ }
+ break;
+ case IRQF_TRIGGER_FALLING:
+ mpu3050->irq_actl = true;
+ dev_info(&indio_dev->dev,
+ "pulse interrupts on the falling edge\n");
+ break;
+ case IRQF_TRIGGER_HIGH:
+ mpu3050->irq_latch = true;
+ dev_info(&indio_dev->dev,
+ "interrupts active high level\n");
+ if (mpu3050->irq_opendrain) {
+ dev_info(&indio_dev->dev,
+ "active high incompatible with open drain\n");
+ mpu3050->irq_opendrain = false;
+ }
+ /*
+ * With level IRQs, we mask the IRQ until it is processed,
+ * but with edge IRQs (pulses) we can queue several interrupts
+ * in the top half.
+ */
+ irq_trig |= IRQF_ONESHOT;
+ break;
+ case IRQF_TRIGGER_LOW:
+ mpu3050->irq_latch = true;
+ mpu3050->irq_actl = true;
+ irq_trig |= IRQF_ONESHOT;
+ dev_info(&indio_dev->dev,
+ "interrupts active low level\n");
+ break;
+ default:
+ /* This is the most preferred mode, if possible */
+ dev_err(&indio_dev->dev,
+ "unsupported IRQ trigger specified (%lx), enforce "
+ "rising edge\n", irq_trig);
+ irq_trig = IRQF_TRIGGER_RISING;
+ break;
+ }
+
+ /* An open drain line can be shared with several devices */
+ if (mpu3050->irq_opendrain)
+ irq_trig |= IRQF_SHARED;
+
+ ret = request_threaded_irq(irq,
+ mpu3050_irq_handler,
+ mpu3050_irq_thread,
+ irq_trig,
+ mpu3050->trig->name,
+ mpu3050->trig);
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "can't get IRQ %d, error %d\n", irq, ret);
+ return ret;
+ }
+
+ mpu3050->irq = irq;
+ mpu3050->trig->dev.parent = mpu3050->dev;
+ mpu3050->trig->ops = &mpu3050_trigger_ops;
+ iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
+
+ ret = iio_trigger_register(mpu3050->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(mpu3050->trig);
+
+ return 0;
+}
+
+int mpu3050_common_probe(struct device *dev,
+ struct regmap *map,
+ int irq,
+ const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct mpu3050 *mpu3050;
+ unsigned int val;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*mpu3050));
+ if (!indio_dev)
+ return -ENOMEM;
+ mpu3050 = iio_priv(indio_dev);
+
+ mpu3050->dev = dev;
+ mpu3050->map = map;
+ mutex_init(&mpu3050->lock);
+ /* Default fullscale: 2000 degrees per second */
+ mpu3050->fullscale = FS_2000_DPS;
+ /* 1 kHz, divide by 100, default frequency = 10 Hz */
+ mpu3050->lpf = MPU3050_DLPF_CFG_188HZ;
+ mpu3050->divisor = 99;
+
+ /* Read the mounting matrix, if present */
+ ret = of_iio_read_mount_matrix(dev, "mount-matrix",
+ &mpu3050->orientation);
+ if (ret)
+ return ret;
+
+ /* Fetch and turn on regulators */
+ mpu3050->regs[0].supply = mpu3050_reg_vdd;
+ mpu3050->regs[1].supply = mpu3050_reg_vlogic;
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(mpu3050->regs),
+ mpu3050->regs);
+ if (ret) {
+ dev_err(dev, "Cannot get regulators\n");
+ return ret;
+ }
+
+ ret = mpu3050_power_up(mpu3050);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val);
+ if (ret) {
+ dev_err(dev, "could not read device ID\n");
+ ret = -ENODEV;
+
+ goto err_power_down;
+ }
+
+ if (val != MPU3050_CHIP_ID) {
+ dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+ ret = -ENODEV;
+ goto err_power_down;
+ }
+
+ ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val);
+ if (ret) {
+ dev_err(dev, "could not read device ID\n");
+ ret = -ENODEV;
+
+ goto err_power_down;
+ }
+ dev_info(dev, "found MPU-3050 part no: %d, version: %d\n",
+ ((val >> 4) & 0xf), (val & 0xf));
+
+ ret = mpu3050_hw_init(mpu3050);
+ if (ret)
+ goto err_power_down;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = mpu3050_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpu3050_channels);
+ indio_dev->info = &mpu3050_info;
+ indio_dev->available_scan_masks = mpu3050_scan_masks;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = name;
+
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ mpu3050_trigger_handler,
+ &mpu3050_buffer_setup_ops);
+ if (ret) {
+ dev_err(dev, "triggered buffer setup failed\n");
+ goto err_power_down;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "device register failed\n");
+ goto err_cleanup_buffer;
+ }
+
+ dev_set_drvdata(dev, indio_dev);
+
+ /* Check if we have an assigned IRQ to use as trigger */
+ if (irq) {
+ ret = mpu3050_trigger_probe(indio_dev, irq);
+ if (ret)
+ dev_err(dev, "failed to register trigger\n");
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Set autosuspend to two orders of magnitude larger than the
+ * start-up time. 100ms start-up time means 10000ms autosuspend,
+ * i.e. 10 seconds.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 10000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_power_down:
+ mpu3050_power_down(mpu3050);
+
+ return ret;
+}
+EXPORT_SYMBOL(mpu3050_common_probe);
+
+int mpu3050_common_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (mpu3050->irq)
+ free_irq(mpu3050->irq, mpu3050);
+ iio_device_unregister(indio_dev);
+ mpu3050_power_down(mpu3050);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpu3050_common_remove);
+
+#ifdef CONFIG_PM
+static int mpu3050_runtime_suspend(struct device *dev)
+{
+ return mpu3050_power_down(iio_priv(dev_get_drvdata(dev)));
+}
+
+static int mpu3050_runtime_resume(struct device *dev)
+{
+ return mpu3050_power_up(iio_priv(dev_get_drvdata(dev)));
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops mpu3050_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(mpu3050_runtime_suspend,
+ mpu3050_runtime_resume, NULL)
+};
+EXPORT_SYMBOL(mpu3050_dev_pm_ops);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
new file mode 100644
index 000000000000..06007200bf49
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -0,0 +1,124 @@
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include "mpu3050.h"
+
+static const struct regmap_config mpu3050_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int mpu3050_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+ /* Just power up the device, that is all that is needed */
+ pm_runtime_get_sync(mpu3050->dev);
+ return 0;
+}
+
+static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ return 0;
+}
+
+static int mpu3050_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name;
+ struct mpu3050 *mpu3050;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EOPNOTSUPP;
+
+ if (id)
+ name = id->name;
+ else
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ ret = mpu3050_common_probe(&client->dev, regmap, client->irq, name);
+ if (ret)
+ return ret;
+
+ /* The main driver is up, now register the I2C mux */
+ mpu3050 = iio_priv(dev_get_drvdata(&client->dev));
+ mpu3050->i2cmux = i2c_mux_alloc(client->adapter, &client->dev,
+ 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
+ mpu3050_i2c_bypass_select,
+ mpu3050_i2c_bypass_deselect);
+ /* Just fail the mux, there is no point in killing the driver */
+ if (!mpu3050->i2cmux)
+ dev_err(&client->dev, "failed to allocate I2C mux\n");
+ else {
+ mpu3050->i2cmux->priv = mpu3050;
+ ret = i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
+ if (ret)
+ dev_err(&client->dev, "failed to add I2C mux\n");
+ }
+
+ return 0;
+}
+
+static int mpu3050_i2c_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ if (mpu3050->i2cmux)
+ i2c_mux_del_adapters(mpu3050->i2cmux);
+
+ return mpu3050_common_remove(&client->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id mpu3050_i2c_id[] = {
+ { "mpu3050" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mpu3050_i2c_id);
+
+static const struct of_device_id mpu3050_i2c_of_match[] = {
+ { .compatible = "invensense,mpu3050", .data = "mpu3050" },
+ /* Deprecated vendor ID from the Input driver */
+ { .compatible = "invn,mpu3050", .data = "mpu3050" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match);
+
+static struct i2c_driver mpu3050_i2c_driver = {
+ .probe = mpu3050_i2c_probe,
+ .remove = mpu3050_i2c_remove,
+ .id_table = mpu3050_i2c_id,
+ .driver = {
+ .of_match_table = mpu3050_i2c_of_match,
+ .name = "mpu3050-i2c",
+ .pm = &mpu3050_dev_pm_ops,
+ },
+};
+module_i2c_driver(mpu3050_i2c_driver);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("Invensense MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
new file mode 100644
index 000000000000..bef87a714dc5
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050.h
@@ -0,0 +1,96 @@
+#include <linux/iio/iio.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+
+/**
+ * enum mpu3050_fullscale - indicates the full range of the sensor in deg/sec
+ */
+enum mpu3050_fullscale {
+ FS_250_DPS = 0,
+ FS_500_DPS,
+ FS_1000_DPS,
+ FS_2000_DPS,
+};
+
+/**
+ * enum mpu3050_lpf - indicates the low pass filter width
+ */
+enum mpu3050_lpf {
+ /* This implicity sets sample frequency to 8 kHz */
+ LPF_256_HZ_NOLPF = 0,
+ /* All others sets the sample frequency to 1 kHz */
+ LPF_188_HZ,
+ LPF_98_HZ,
+ LPF_42_HZ,
+ LPF_20_HZ,
+ LPF_10_HZ,
+ LPF_5_HZ,
+ LPF_2100_HZ_NOLPF,
+};
+
+enum mpu3050_axis {
+ AXIS_X = 0,
+ AXIS_Y,
+ AXIS_Z,
+ AXIS_MAX,
+};
+
+/**
+ * struct mpu3050 - instance state container for the device
+ * @dev: parent device for this instance
+ * @orientation: mounting matrix, flipped axis etc
+ * @map: regmap to reach the registers
+ * @lock: serialization lock to marshal all requests
+ * @irq: the IRQ used for this device
+ * @regs: the regulators to power this device
+ * @fullscale: the current fullscale setting for the device
+ * @lpf: digital low pass filter setting for the device
+ * @divisor: base frequency divider: divides 8 or 1 kHz
+ * @calibration: the three signed 16-bit calibration settings that
+ * get written into the offset registers for each axis to compensate
+ * for DC offsets
+ * @trig: trigger for the MPU-3050 interrupt, if present
+ * @hw_irq_trigger: hardware interrupt trigger is in use
+ * @irq_actl: interrupt is active low
+ * @irq_latch: latched IRQ, this means that it is a level IRQ
+ * @irq_opendrain: the interrupt line shall be configured open drain
+ * @pending_fifo_footer: tells us if there is a pending footer in the FIFO
+ * that we have to read out first when handling the FIFO
+ * @hw_timestamp: latest hardware timestamp from the trigger IRQ, when in
+ * use
+ * @i2cmux: an I2C mux reflecting the fact that this sensor is a hub with
+ * a pass-through I2C interface coming out of it: this device needs to be
+ * powered up in order to reach devices on the other side of this mux
+ */
+struct mpu3050 {
+ struct device *dev;
+ struct iio_mount_matrix orientation;
+ struct regmap *map;
+ struct mutex lock;
+ int irq;
+ struct regulator_bulk_data regs[2];
+ enum mpu3050_fullscale fullscale;
+ enum mpu3050_lpf lpf;
+ u8 divisor;
+ s16 calibration[3];
+ struct iio_trigger *trig;
+ bool hw_irq_trigger;
+ bool irq_actl;
+ bool irq_latch;
+ bool irq_opendrain;
+ bool pending_fifo_footer;
+ s64 hw_timestamp;
+ struct i2c_mux_core *i2cmux;
+};
+
+/* Probe called from different transports */
+int mpu3050_common_probe(struct device *dev,
+ struct regmap *map,
+ int irq,
+ const char *name);
+int mpu3050_common_remove(struct device *dev);
+
+/* PM ops */
+extern const struct dev_pm_ops mpu3050_dev_pm_ops;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index aea034d8fe0f..2a42b3d583e8 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -39,79 +39,6 @@
#define ST_GYRO_FS_AVL_500DPS 500
#define ST_GYRO_FS_AVL_2000DPS 2000
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_GYRO_1_WAI_EXP 0xd3
-#define ST_GYRO_1_ODR_ADDR 0x20
-#define ST_GYRO_1_ODR_MASK 0xc0
-#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
-#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
-#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
-#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
-#define ST_GYRO_1_PW_ADDR 0x20
-#define ST_GYRO_1_PW_MASK 0x08
-#define ST_GYRO_1_FS_ADDR 0x23
-#define ST_GYRO_1_FS_MASK 0x30
-#define ST_GYRO_1_FS_AVL_250_VAL 0x00
-#define ST_GYRO_1_FS_AVL_500_VAL 0x01
-#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_1_BDU_ADDR 0x23
-#define ST_GYRO_1_BDU_MASK 0x80
-#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_1_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_1_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_GYRO_2_WAI_EXP 0xd4
-#define ST_GYRO_2_ODR_ADDR 0x20
-#define ST_GYRO_2_ODR_MASK 0xc0
-#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_2_PW_ADDR 0x20
-#define ST_GYRO_2_PW_MASK 0x08
-#define ST_GYRO_2_FS_ADDR 0x23
-#define ST_GYRO_2_FS_MASK 0x30
-#define ST_GYRO_2_FS_AVL_250_VAL 0x00
-#define ST_GYRO_2_FS_AVL_500_VAL 0x01
-#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_2_BDU_ADDR 0x23
-#define ST_GYRO_2_BDU_MASK 0x80
-#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_2_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_2_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_GYRO_3_WAI_EXP 0xd7
-#define ST_GYRO_3_ODR_ADDR 0x20
-#define ST_GYRO_3_ODR_MASK 0xc0
-#define ST_GYRO_3_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_3_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_3_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_3_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_3_PW_ADDR 0x20
-#define ST_GYRO_3_PW_MASK 0x08
-#define ST_GYRO_3_FS_ADDR 0x23
-#define ST_GYRO_3_FS_MASK 0x30
-#define ST_GYRO_3_FS_AVL_250_VAL 0x00
-#define ST_GYRO_3_FS_AVL_500_VAL 0x01
-#define ST_GYRO_3_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_3_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_3_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_3_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_3_BDU_ADDR 0x23
-#define ST_GYRO_3_BDU_MASK 0x80
-#define ST_GYRO_3_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_3_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_3_MULTIREAD_BIT true
-
-
static const struct iio_chan_spec st_gyro_16bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -130,7 +57,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
static const struct st_sensor_settings st_gyro_sensors_settings[] = {
{
- .wai = ST_GYRO_1_WAI_EXP,
+ .wai = 0xd3,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3G4200D_GYRO_DEV_NAME,
@@ -138,18 +65,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_1_ODR_ADDR,
- .mask = ST_GYRO_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
- { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
- { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
- { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 200, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 800, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_1_PW_ADDR,
- .mask = ST_GYRO_1_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -158,33 +85,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_1_FS_ADDR,
- .mask = ST_GYRO_1_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_1_FS_AVL_250_VAL,
- .gain = ST_GYRO_1_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_1_FS_AVL_500_VAL,
- .gain = ST_GYRO_1_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_1_FS_AVL_2000_VAL,
- .gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_1_BDU_ADDR,
- .mask = ST_GYRO_1_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_1_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -192,11 +119,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_2_WAI_EXP,
+ .wai = 0xd4,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
@@ -208,18 +135,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_2_ODR_ADDR,
- .mask = ST_GYRO_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_2_PW_ADDR,
- .mask = ST_GYRO_2_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -228,33 +155,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_2_FS_ADDR,
- .mask = ST_GYRO_2_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_2_FS_AVL_250_VAL,
- .gain = ST_GYRO_2_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_2_FS_AVL_500_VAL,
- .gain = ST_GYRO_2_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_2_FS_AVL_2000_VAL,
- .gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_2_BDU_ADDR,
- .mask = ST_GYRO_2_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_2_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -262,29 +189,29 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_3_WAI_EXP,
+ .wai = 0xd7,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_3_ODR_ADDR,
- .mask = ST_GYRO_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_3_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_3_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_3_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_3_ODR_AVL_760HZ_VAL, },
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_3_PW_ADDR,
- .mask = ST_GYRO_3_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -293,33 +220,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_3_FS_ADDR,
- .mask = ST_GYRO_3_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_3_FS_AVL_250_VAL,
- .gain = ST_GYRO_3_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_3_FS_AVL_500_VAL,
- .gain = ST_GYRO_3_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_3_FS_AVL_2000_VAL,
- .gain = ST_GYRO_3_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_3_BDU_ADDR,
- .mask = ST_GYRO_3_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_3_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -327,7 +254,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
};
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index b17e2e2bd4f5..912477d54be2 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -27,6 +27,8 @@ config DHT11
config HDC100X
tristate "TI HDC100x relative humidity and temperature sensor"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the Texas Instruments
HDC1000 and HDC1008 relative humidity and temperature sensors.
@@ -34,6 +36,28 @@ config HDC100X
To compile this driver as a module, choose M here: the module
will be called hdc100x.
+config HTS221
+ tristate "STMicroelectronics HTS221 sensor Driver"
+ depends on (I2C || SPI)
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HTS221_I2C if (I2C)
+ select HTS221_SPI if (SPI_MASTER)
+ help
+ Say yes here to build support for STMicroelectronics HTS221
+ temperature-humidity sensor
+
+ To compile this driver as a module, choose M here: the module
+ will be called hts221.
+
+config HTS221_I2C
+ tristate
+ depends on HTS221
+
+config HTS221_SPI
+ tristate
+ depends on HTS221
+
config HTU21
tristate "Measurement Specialties HTU21 humidity & temperature sensor"
depends on I2C
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index 4a73442fcd9c..a6850e47c100 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,13 @@
obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
+
+hts221-y := hts221_core.o \
+ hts221_buffer.o
+obj-$(CONFIG_HTS221) += hts221.o
+obj-$(CONFIG_HTS221_I2C) += hts221_i2c.o
+obj-$(CONFIG_HTS221_SPI) += hts221_spi.o
+
obj-$(CONFIG_HTU21) += htu21.o
obj-$(CONFIG_SI7005) += si7005.o
obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index e0c9c70c2a4a..265c34da52d1 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -22,11 +22,15 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define HDC100X_REG_TEMP 0x00
#define HDC100X_REG_HUMIDITY 0x01
#define HDC100X_REG_CONFIG 0x02
+#define HDC100X_REG_CONFIG_ACQ_MODE BIT(12)
#define HDC100X_REG_CONFIG_HEATER_EN BIT(13)
struct hdc100x_data {
@@ -87,22 +91,40 @@ static const struct iio_chan_spec hdc100x_channels[] = {
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_HUMIDITYRELATIVE,
.address = HDC100X_REG_HUMIDITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_INT_TIME)
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_CURRENT,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.extend_name = "heater",
.output = 1,
+ .scan_index = -1,
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static const unsigned long hdc100x_scan_masks[] = {0x3, 0};
+
static int hdc100x_update_config(struct hdc100x_data *data, int mask, int val)
{
int tmp = (~mask & data->config) | val;
@@ -183,7 +205,14 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
*val = hdc100x_get_heater_status(data);
ret = IIO_VAL_INT;
} else {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
ret = hdc100x_get_measurement(data, chan);
+ iio_device_release_direct_mode(indio_dev);
if (ret >= 0) {
*val = ret;
ret = IIO_VAL_INT;
@@ -246,6 +275,78 @@ static int hdc100x_write_raw(struct iio_dev *indio_dev,
}
}
+static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Buffer is enabled. First set ACQ Mode, then attach poll func */
+ mutex_lock(&data->lock);
+ ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
+ HDC100X_REG_CONFIG_ACQ_MODE);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+
+ return iio_triggered_buffer_postenable(indio_dev);
+}
+
+static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* First detach poll func, then reset ACQ mode. OK to disable buffer */
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops hdc_buffer_setup_ops = {
+ .postenable = hdc100x_buffer_postenable,
+ .predisable = hdc100x_buffer_predisable,
+};
+
+static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int delay = data->adc_int_us[0] + data->adc_int_us[1];
+ int ret;
+ s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */
+
+ /* dual read starts at temp register */
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte(client, HDC100X_REG_TEMP);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot start measurement\n");
+ goto err;
+ }
+ usleep_range(delay, delay + 1000);
+
+ ret = i2c_master_recv(client, (u8 *)buf, 4);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot read sensor data\n");
+ goto err;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
+err:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info hdc100x_info = {
.read_raw = hdc100x_read_raw,
.write_raw = hdc100x_write_raw,
@@ -258,6 +359,7 @@ static int hdc100x_probe(struct i2c_client *client,
{
struct iio_dev *indio_dev;
struct hdc100x_data *data;
+ int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
@@ -279,12 +381,35 @@ static int hdc100x_probe(struct i2c_client *client,
indio_dev->channels = hdc100x_channels;
indio_dev->num_channels = ARRAY_SIZE(hdc100x_channels);
+ indio_dev->available_scan_masks = hdc100x_scan_masks;
/* be sure we are in a known state */
hdc100x_set_it_time(data, 0, hdc100x_int_time[0][0]);
hdc100x_set_it_time(data, 1, hdc100x_int_time[1][0]);
+ hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ hdc100x_trigger_handler,
+ &hdc_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int hdc100x_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
- return devm_iio_device_register(&client->dev, indio_dev);
+ return 0;
}
static const struct i2c_device_id hdc100x_id[] = {
@@ -298,6 +423,7 @@ static struct i2c_driver hdc100x_driver = {
.name = "hdc100x",
},
.probe = hdc100x_probe,
+ .remove = hdc100x_remove,
.id_table = hdc100x_id,
};
module_i2c_driver(hdc100x_driver);
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
new file mode 100644
index 000000000000..c7154665512e
--- /dev/null
+++ b/drivers/iio/humidity/hts221.h
@@ -0,0 +1,73 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef HTS221_H
+#define HTS221_H
+
+#define HTS221_DEV_NAME "hts221"
+
+#include <linux/iio/iio.h>
+
+#define HTS221_RX_MAX_LENGTH 8
+#define HTS221_TX_MAX_LENGTH 8
+
+#define HTS221_DATA_SIZE 2
+
+struct hts221_transfer_buffer {
+ u8 rx_buf[HTS221_RX_MAX_LENGTH];
+ u8 tx_buf[HTS221_TX_MAX_LENGTH] ____cacheline_aligned;
+};
+
+struct hts221_transfer_function {
+ int (*read)(struct device *dev, u8 addr, int len, u8 *data);
+ int (*write)(struct device *dev, u8 addr, int len, u8 *data);
+};
+
+#define HTS221_AVG_DEPTH 8
+struct hts221_avg_avl {
+ u16 avg;
+ u8 val;
+};
+
+enum hts221_sensor_type {
+ HTS221_SENSOR_H,
+ HTS221_SENSOR_T,
+ HTS221_SENSOR_MAX,
+};
+
+struct hts221_sensor {
+ u8 cur_avg_idx;
+ int slope, b_gen;
+};
+
+struct hts221_hw {
+ const char *name;
+ struct device *dev;
+
+ struct mutex lock;
+ struct iio_trigger *trig;
+ int irq;
+
+ struct hts221_sensor sensors[HTS221_SENSOR_MAX];
+
+ u8 odr;
+
+ const struct hts221_transfer_function *tf;
+ struct hts221_transfer_buffer tb;
+};
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable);
+int hts221_probe(struct iio_dev *iio_dev);
+int hts221_power_on(struct hts221_hw *hw);
+int hts221_power_off(struct hts221_hw *hw);
+int hts221_allocate_buffers(struct hts221_hw *hw);
+int hts221_allocate_trigger(struct hts221_hw *hw);
+
+#endif /* HTS221_H */
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
new file mode 100644
index 000000000000..72ddcdac21a2
--- /dev/null
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -0,0 +1,168 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_STATUS_ADDR 0x27
+#define HTS221_RH_DRDY_MASK BIT(1)
+#define HTS221_TEMP_DRDY_MASK BIT(0)
+
+static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ return hts221_config_drdy(hw, state);
+}
+
+static const struct iio_trigger_ops hts221_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = hts221_trig_set_state,
+};
+
+static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
+{
+ struct hts221_hw *hw = (struct hts221_hw *)private;
+ u8 status;
+ int err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_STATUS_ADDR, sizeof(status),
+ &status);
+ if (err < 0)
+ return IRQ_HANDLED;
+
+ /*
+ * H_DA bit (humidity data available) is routed to DRDY line.
+ * Humidity sample is computed after temperature one.
+ * Here we can assume data channels are both available if H_DA bit
+ * is set in status register
+ */
+ if (!(status & HTS221_RH_DRDY_MASK))
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+int hts221_allocate_trigger(struct hts221_hw *hw)
+{
+ struct iio_dev *iio_dev = iio_priv_to_dev(hw);
+ unsigned long irq_type;
+ int err;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ break;
+ default:
+ dev_info(hw->dev,
+ "mode %lx unsupported, using IRQF_TRIGGER_RISING\n",
+ irq_type);
+ irq_type = IRQF_TRIGGER_RISING;
+ break;
+ }
+
+ err = devm_request_threaded_irq(hw->dev, hw->irq, NULL,
+ hts221_trigger_handler_thread,
+ irq_type | IRQF_ONESHOT,
+ hw->name, hw);
+ if (err) {
+ dev_err(hw->dev, "failed to request trigger irq %d\n",
+ hw->irq);
+ return err;
+ }
+
+ hw->trig = devm_iio_trigger_alloc(hw->dev, "%s-trigger",
+ iio_dev->name);
+ if (!hw->trig)
+ return -ENOMEM;
+
+ iio_trigger_set_drvdata(hw->trig, iio_dev);
+ hw->trig->ops = &hts221_trigger_ops;
+ hw->trig->dev.parent = hw->dev;
+ iio_dev->trig = iio_trigger_get(hw->trig);
+
+ return devm_iio_trigger_register(hw->dev, hw->trig);
+}
+
+static int hts221_buffer_preenable(struct iio_dev *iio_dev)
+{
+ return hts221_power_on(iio_priv(iio_dev));
+}
+
+static int hts221_buffer_postdisable(struct iio_dev *iio_dev)
+{
+ return hts221_power_off(iio_priv(iio_dev));
+}
+
+static const struct iio_buffer_setup_ops hts221_buffer_ops = {
+ .preenable = hts221_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = hts221_buffer_postdisable,
+};
+
+static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
+{
+ u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio_dev = pf->indio_dev;
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ struct iio_chan_spec const *ch;
+ int err;
+
+ /* humidity data */
+ ch = &iio_dev->channels[HTS221_SENSOR_H];
+ err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+ buffer);
+ if (err < 0)
+ goto out;
+
+ /* temperature data */
+ ch = &iio_dev->channels[HTS221_SENSOR_T];
+ err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+ buffer + HTS221_DATA_SIZE);
+ if (err < 0)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+ iio_get_time_ns(iio_dev));
+
+out:
+ iio_trigger_notify_done(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+int hts221_allocate_buffers(struct hts221_hw *hw)
+{
+ return devm_iio_triggered_buffer_setup(hw->dev, iio_priv_to_dev(hw),
+ NULL, hts221_buffer_handler_thread,
+ &hts221_buffer_ops);
+}
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 buffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
new file mode 100644
index 000000000000..3f3ef4a1a474
--- /dev/null
+++ b/drivers/iio/humidity/hts221_core.c
@@ -0,0 +1,687 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_WHOAMI_ADDR 0x0f
+#define HTS221_REG_WHOAMI_VAL 0xbc
+
+#define HTS221_REG_CNTRL1_ADDR 0x20
+#define HTS221_REG_CNTRL2_ADDR 0x21
+#define HTS221_REG_CNTRL3_ADDR 0x22
+
+#define HTS221_REG_AVG_ADDR 0x10
+#define HTS221_REG_H_OUT_L 0x28
+#define HTS221_REG_T_OUT_L 0x2a
+
+#define HTS221_HUMIDITY_AVG_MASK 0x07
+#define HTS221_TEMP_AVG_MASK 0x38
+
+#define HTS221_ODR_MASK 0x87
+#define HTS221_BDU_MASK BIT(2)
+
+#define HTS221_DRDY_MASK BIT(2)
+
+#define HTS221_ENABLE_SENSOR BIT(7)
+
+#define HTS221_HUMIDITY_AVG_4 0x00 /* 0.4 %RH */
+#define HTS221_HUMIDITY_AVG_8 0x01 /* 0.3 %RH */
+#define HTS221_HUMIDITY_AVG_16 0x02 /* 0.2 %RH */
+#define HTS221_HUMIDITY_AVG_32 0x03 /* 0.15 %RH */
+#define HTS221_HUMIDITY_AVG_64 0x04 /* 0.1 %RH */
+#define HTS221_HUMIDITY_AVG_128 0x05 /* 0.07 %RH */
+#define HTS221_HUMIDITY_AVG_256 0x06 /* 0.05 %RH */
+#define HTS221_HUMIDITY_AVG_512 0x07 /* 0.03 %RH */
+
+#define HTS221_TEMP_AVG_2 0x00 /* 0.08 degC */
+#define HTS221_TEMP_AVG_4 0x08 /* 0.05 degC */
+#define HTS221_TEMP_AVG_8 0x10 /* 0.04 degC */
+#define HTS221_TEMP_AVG_16 0x18 /* 0.03 degC */
+#define HTS221_TEMP_AVG_32 0x20 /* 0.02 degC */
+#define HTS221_TEMP_AVG_64 0x28 /* 0.015 degC */
+#define HTS221_TEMP_AVG_128 0x30 /* 0.01 degC */
+#define HTS221_TEMP_AVG_256 0x38 /* 0.007 degC */
+
+/* calibration registers */
+#define HTS221_REG_0RH_CAL_X_H 0x36
+#define HTS221_REG_1RH_CAL_X_H 0x3a
+#define HTS221_REG_0RH_CAL_Y_H 0x30
+#define HTS221_REG_1RH_CAL_Y_H 0x31
+#define HTS221_REG_0T_CAL_X_L 0x3c
+#define HTS221_REG_1T_CAL_X_L 0x3e
+#define HTS221_REG_0T_CAL_Y_H 0x32
+#define HTS221_REG_1T_CAL_Y_H 0x33
+#define HTS221_REG_T1_T0_CAL_Y_H 0x35
+
+struct hts221_odr {
+ u8 hz;
+ u8 val;
+};
+
+struct hts221_avg {
+ u8 addr;
+ u8 mask;
+ struct hts221_avg_avl avg_avl[HTS221_AVG_DEPTH];
+};
+
+static const struct hts221_odr hts221_odr_table[] = {
+ { 1, 0x01 }, /* 1Hz */
+ { 7, 0x02 }, /* 7Hz */
+ { 13, 0x03 }, /* 12.5Hz */
+};
+
+static const struct hts221_avg hts221_avg_list[] = {
+ {
+ .addr = HTS221_REG_AVG_ADDR,
+ .mask = HTS221_HUMIDITY_AVG_MASK,
+ .avg_avl = {
+ { 4, HTS221_HUMIDITY_AVG_4 },
+ { 8, HTS221_HUMIDITY_AVG_8 },
+ { 16, HTS221_HUMIDITY_AVG_16 },
+ { 32, HTS221_HUMIDITY_AVG_32 },
+ { 64, HTS221_HUMIDITY_AVG_64 },
+ { 128, HTS221_HUMIDITY_AVG_128 },
+ { 256, HTS221_HUMIDITY_AVG_256 },
+ { 512, HTS221_HUMIDITY_AVG_512 },
+ },
+ },
+ {
+ .addr = HTS221_REG_AVG_ADDR,
+ .mask = HTS221_TEMP_AVG_MASK,
+ .avg_avl = {
+ { 2, HTS221_TEMP_AVG_2 },
+ { 4, HTS221_TEMP_AVG_4 },
+ { 8, HTS221_TEMP_AVG_8 },
+ { 16, HTS221_TEMP_AVG_16 },
+ { 32, HTS221_TEMP_AVG_32 },
+ { 64, HTS221_TEMP_AVG_64 },
+ { 128, HTS221_TEMP_AVG_128 },
+ { 256, HTS221_TEMP_AVG_256 },
+ },
+ },
+};
+
+static const struct iio_chan_spec hts221_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .address = HTS221_REG_H_OUT_L,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .address = HTS221_REG_T_OUT_L,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask,
+ u8 val)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&hw->lock);
+
+ err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read %02x register\n", addr);
+ goto unlock;
+ }
+
+ data = (data & ~mask) | (val & mask);
+
+ err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
+ if (err < 0)
+ dev_err(hw->dev, "failed to write %02x register\n", addr);
+
+unlock:
+ mutex_unlock(&hw->lock);
+
+ return err;
+}
+
+static int hts221_check_whoami(struct hts221_hw *hw)
+{
+ u8 data;
+ int err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_WHOAMI_ADDR, sizeof(data),
+ &data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read whoami register\n");
+ return err;
+ }
+
+ if (data != HTS221_REG_WHOAMI_VAL) {
+ dev_err(hw->dev, "wrong whoami {%02x vs %02x}\n",
+ data, HTS221_REG_WHOAMI_VAL);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable)
+{
+ u8 val = enable ? BIT(2) : 0;
+ int err;
+
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL3_ADDR,
+ HTS221_DRDY_MASK, val);
+
+ return err < 0 ? err : 0;
+}
+
+static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
+{
+ int i, err;
+ u8 val;
+
+ for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+ if (hts221_odr_table[i].hz == odr)
+ break;
+
+ if (i == ARRAY_SIZE(hts221_odr_table))
+ return -EINVAL;
+
+ val = HTS221_ENABLE_SENSOR | HTS221_BDU_MASK | hts221_odr_table[i].val;
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+ HTS221_ODR_MASK, val);
+ if (err < 0)
+ return err;
+
+ hw->odr = odr;
+
+ return 0;
+}
+
+static int hts221_update_avg(struct hts221_hw *hw,
+ enum hts221_sensor_type type,
+ u16 val)
+{
+ int i, err;
+ const struct hts221_avg *avg = &hts221_avg_list[type];
+
+ for (i = 0; i < HTS221_AVG_DEPTH; i++)
+ if (avg->avg_avl[i].avg == val)
+ break;
+
+ if (i == HTS221_AVG_DEPTH)
+ return -EINVAL;
+
+ err = hts221_write_with_mask(hw, avg->addr, avg->mask,
+ avg->avg_avl[i].val);
+ if (err < 0)
+ return err;
+
+ hw->sensors[type].cur_avg_idx = i;
+
+ return 0;
+}
+
+static ssize_t hts221_sysfs_sampling_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ ssize_t len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ hts221_odr_table[i].hz);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+hts221_sysfs_rh_oversampling_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_H];
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ avg->avg_avl[i].avg);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+hts221_sysfs_temp_oversampling_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_T];
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ avg->avg_avl[i].avg);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+int hts221_power_on(struct hts221_hw *hw)
+{
+ return hts221_update_odr(hw, hw->odr);
+}
+
+int hts221_power_off(struct hts221_hw *hw)
+{
+ u8 data[] = {0x00, 0x00};
+
+ return hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
+ data);
+}
+
+static int hts221_parse_temp_caldata(struct hts221_hw *hw)
+{
+ int err, *slope, *b_gen;
+ s16 cal_x0, cal_x1, cal_y0, cal_y1;
+ u8 cal0, cal1;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_Y_H,
+ sizeof(cal0), &cal0);
+ if (err < 0)
+ return err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_T1_T0_CAL_Y_H,
+ sizeof(cal1), &cal1);
+ if (err < 0)
+ return err;
+ cal_y0 = (le16_to_cpu(cal1 & 0x3) << 8) | cal0;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_Y_H,
+ sizeof(cal0), &cal0);
+ if (err < 0)
+ return err;
+ cal_y1 = (((cal1 & 0xc) >> 2) << 8) | cal0;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_X_L, sizeof(cal_x0),
+ (u8 *)&cal_x0);
+ if (err < 0)
+ return err;
+ cal_x0 = le16_to_cpu(cal_x0);
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_X_L, sizeof(cal_x1),
+ (u8 *)&cal_x1);
+ if (err < 0)
+ return err;
+ cal_x1 = le16_to_cpu(cal_x1);
+
+ slope = &hw->sensors[HTS221_SENSOR_T].slope;
+ b_gen = &hw->sensors[HTS221_SENSOR_T].b_gen;
+
+ *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+ *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+ (cal_x1 - cal_x0);
+ *b_gen *= 8;
+
+ return 0;
+}
+
+static int hts221_parse_rh_caldata(struct hts221_hw *hw)
+{
+ int err, *slope, *b_gen;
+ s16 cal_x0, cal_x1, cal_y0, cal_y1;
+ u8 data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_Y_H, sizeof(data),
+ &data);
+ if (err < 0)
+ return err;
+ cal_y0 = data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_Y_H, sizeof(data),
+ &data);
+ if (err < 0)
+ return err;
+ cal_y1 = data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_X_H, sizeof(cal_x0),
+ (u8 *)&cal_x0);
+ if (err < 0)
+ return err;
+ cal_x0 = le16_to_cpu(cal_x0);
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_X_H, sizeof(cal_x1),
+ (u8 *)&cal_x1);
+ if (err < 0)
+ return err;
+ cal_x1 = le16_to_cpu(cal_x1);
+
+ slope = &hw->sensors[HTS221_SENSOR_H].slope;
+ b_gen = &hw->sensors[HTS221_SENSOR_H].b_gen;
+
+ *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+ *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+ (cal_x1 - cal_x0);
+ *b_gen *= 8;
+
+ return 0;
+}
+
+static int hts221_get_sensor_scale(struct hts221_hw *hw,
+ enum iio_chan_type ch_type,
+ int *val, int *val2)
+{
+ s64 tmp;
+ s32 rem, div, data;
+
+ switch (ch_type) {
+ case IIO_HUMIDITYRELATIVE:
+ data = hw->sensors[HTS221_SENSOR_H].slope;
+ div = (1 << 4) * 1000;
+ break;
+ case IIO_TEMP:
+ data = hw->sensors[HTS221_SENSOR_T].slope;
+ div = (1 << 6) * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp = div_s64(data * 1000000000LL, div);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+ *val = tmp;
+ *val2 = rem;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_get_sensor_offset(struct hts221_hw *hw,
+ enum iio_chan_type ch_type,
+ int *val, int *val2)
+{
+ s64 tmp;
+ s32 rem, div, data;
+
+ switch (ch_type) {
+ case IIO_HUMIDITYRELATIVE:
+ data = hw->sensors[HTS221_SENSOR_H].b_gen;
+ div = hw->sensors[HTS221_SENSOR_H].slope;
+ break;
+ case IIO_TEMP:
+ data = hw->sensors[HTS221_SENSOR_T].b_gen;
+ div = hw->sensors[HTS221_SENSOR_T].slope;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp = div_s64(data * 1000000000LL, div);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+ *val = tmp;
+ *val2 = rem;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
+{
+ u8 data[HTS221_DATA_SIZE];
+ int err;
+
+ err = hts221_power_on(hw);
+ if (err < 0)
+ return err;
+
+ msleep(50);
+
+ err = hw->tf->read(hw->dev, addr, sizeof(data), data);
+ if (err < 0)
+ return err;
+
+ hts221_power_off(hw);
+
+ *val = (s16)get_unaligned_le16(data);
+
+ return IIO_VAL_INT;
+}
+
+static int hts221_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch,
+ int *val, int *val2, long mask)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = hts221_read_oneshot(hw, ch->address, val);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ ret = hts221_get_sensor_scale(hw, ch->type, val, val2);
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = hts221_get_sensor_offset(hw, ch->type, val, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = hw->odr;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO: {
+ u8 idx;
+ const struct hts221_avg *avg;
+
+ switch (ch->type) {
+ case IIO_HUMIDITYRELATIVE:
+ avg = &hts221_avg_list[HTS221_SENSOR_H];
+ idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx;
+ *val = avg->avg_avl[idx].avg;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ avg = &hts221_avg_list[HTS221_SENSOR_T];
+ idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx;
+ *val = avg->avg_avl[idx].avg;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(iio_dev);
+
+ return ret;
+}
+
+static int hts221_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hts221_update_odr(hw, val);
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = hts221_update_avg(hw, HTS221_SENSOR_H, val);
+ break;
+ case IIO_TEMP:
+ ret = hts221_update_avg(hw, HTS221_SENSOR_T, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(iio_dev);
+
+ return ret;
+}
+
+static int hts221_validate_trigger(struct iio_dev *iio_dev,
+ struct iio_trigger *trig)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ return hw->trig == trig ? 0 : -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(in_humidity_oversampling_ratio_available, S_IRUGO,
+ hts221_sysfs_rh_oversampling_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available, S_IRUGO,
+ hts221_sysfs_temp_oversampling_avail, NULL, 0);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hts221_sysfs_sampling_freq);
+
+static struct attribute *hts221_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_humidity_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group hts221_attribute_group = {
+ .attrs = hts221_attributes,
+};
+
+static const struct iio_info hts221_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &hts221_attribute_group,
+ .read_raw = hts221_read_raw,
+ .write_raw = hts221_write_raw,
+ .validate_trigger = hts221_validate_trigger,
+};
+
+static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
+
+int hts221_probe(struct iio_dev *iio_dev)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int err;
+ u8 data;
+
+ mutex_init(&hw->lock);
+
+ err = hts221_check_whoami(hw);
+ if (err < 0)
+ return err;
+
+ hw->odr = hts221_odr_table[0].hz;
+
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->dev.parent = hw->dev;
+ iio_dev->available_scan_masks = hts221_scan_masks;
+ iio_dev->channels = hts221_channels;
+ iio_dev->num_channels = ARRAY_SIZE(hts221_channels);
+ iio_dev->name = HTS221_DEV_NAME;
+ iio_dev->info = &hts221_info;
+
+ /* configure humidity sensor */
+ err = hts221_parse_rh_caldata(hw);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to get rh calibration data\n");
+ return err;
+ }
+
+ data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3].avg;
+ err = hts221_update_avg(hw, HTS221_SENSOR_H, data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to set rh oversampling ratio\n");
+ return err;
+ }
+
+ /* configure temperature sensor */
+ err = hts221_parse_temp_caldata(hw);
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to get temperature calibration data\n");
+ return err;
+ }
+
+ data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3].avg;
+ err = hts221_update_avg(hw, HTS221_SENSOR_T, data);
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to set temperature oversampling ratio\n");
+ return err;
+ }
+
+ if (hw->irq > 0) {
+ err = hts221_allocate_buffers(hw);
+ if (err < 0)
+ return err;
+
+ err = hts221_allocate_trigger(hw);
+ if (err)
+ return err;
+ }
+
+ return devm_iio_device_register(hw->dev, iio_dev);
+}
+EXPORT_SYMBOL(hts221_probe);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
new file mode 100644
index 000000000000..367ecd509f31
--- /dev/null
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -0,0 +1,110 @@
+/*
+ * STMicroelectronics hts221 i2c driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define I2C_AUTO_INCREMENT 0x80
+
+static int hts221_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+ struct i2c_msg msg[2];
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (len > 1)
+ addr |= I2C_AUTO_INCREMENT;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags;
+ msg[0].len = 1;
+ msg[0].buf = &addr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags | I2C_M_RD;
+ msg[1].len = len;
+ msg[1].buf = data;
+
+ return i2c_transfer(client->adapter, msg, 2);
+}
+
+static int hts221_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+ u8 send[len + 1];
+ struct i2c_msg msg;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (len > 1)
+ addr |= I2C_AUTO_INCREMENT;
+
+ send[0] = addr;
+ memcpy(&send[1], data, len * sizeof(u8));
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.len = len + 1;
+ msg.buf = send;
+
+ return i2c_transfer(client->adapter, &msg, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+ .read = hts221_i2c_read,
+ .write = hts221_i2c_write,
+};
+
+static int hts221_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hts221_hw *hw;
+ struct iio_dev *iio_dev;
+
+ iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->name = client->name;
+ hw->dev = &client->dev;
+ hw->irq = client->irq;
+ hw->tf = &hts221_transfer_fn;
+
+ return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_i2c_of_match[] = {
+ { .compatible = "st,hts221", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hts221_i2c_of_match);
+
+static const struct i2c_device_id hts221_i2c_id_table[] = {
+ { HTS221_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
+
+static struct i2c_driver hts221_driver = {
+ .driver = {
+ .name = "hts221_i2c",
+ .of_match_table = of_match_ptr(hts221_i2c_of_match),
+ },
+ .probe = hts221_i2c_probe,
+ .id_table = hts221_i2c_id_table,
+};
+module_i2c_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
new file mode 100644
index 000000000000..70df5e7150c1
--- /dev/null
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -0,0 +1,125 @@
+/*
+ * STMicroelectronics hts221 spi driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define SENSORS_SPI_READ 0x80
+#define SPI_AUTO_INCREMENT 0x40
+
+static int hts221_spi_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+ int err;
+ struct spi_device *spi = to_spi_device(dev);
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = hw->tb.tx_buf,
+ .bits_per_word = 8,
+ .len = 1,
+ },
+ {
+ .rx_buf = hw->tb.rx_buf,
+ .bits_per_word = 8,
+ .len = len,
+ }
+ };
+
+ if (len > 1)
+ addr |= SPI_AUTO_INCREMENT;
+ hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
+
+ err = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
+ if (err < 0)
+ return err;
+
+ memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
+
+ return len;
+}
+
+static int hts221_spi_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ struct spi_transfer xfers = {
+ .tx_buf = hw->tb.tx_buf,
+ .bits_per_word = 8,
+ .len = len + 1,
+ };
+
+ if (len >= HTS221_TX_MAX_LENGTH)
+ return -ENOMEM;
+
+ if (len > 1)
+ addr |= SPI_AUTO_INCREMENT;
+ hw->tb.tx_buf[0] = addr;
+ memcpy(&hw->tb.tx_buf[1], data, len);
+
+ return spi_sync_transfer(spi, &xfers, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+ .read = hts221_spi_read,
+ .write = hts221_spi_write,
+};
+
+static int hts221_spi_probe(struct spi_device *spi)
+{
+ struct hts221_hw *hw;
+ struct iio_dev *iio_dev;
+
+ iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->name = spi->modalias;
+ hw->dev = &spi->dev;
+ hw->irq = spi->irq;
+ hw->tf = &hts221_transfer_fn;
+
+ return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_spi_of_match[] = {
+ { .compatible = "st,hts221", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hts221_spi_of_match);
+
+static const struct spi_device_id hts221_spi_id_table[] = {
+ { HTS221_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
+
+static struct spi_driver hts221_driver = {
+ .driver = {
+ .name = "hts221_spi",
+ .of_match_table = of_match_ptr(hts221_spi_of_match),
+ },
+ .probe = hts221_spi_probe,
+ .id_table = hts221_spi_id_table,
+};
+module_spi_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index ffc2ccf6374e..345a7656c5ef 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -154,8 +154,17 @@ static const struct i2c_device_id si7020_id[] = {
};
MODULE_DEVICE_TABLE(i2c, si7020_id);
+static const struct of_device_id si7020_dt_ids[] = {
+ { .compatible = "silabs,si7020" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si7020_dt_ids);
+
static struct i2c_driver si7020_driver = {
- .driver.name = "si7020",
+ .driver = {
+ .name = "si7020",
+ .of_match_table = of_match_ptr(si7020_dt_ids),
+ },
.probe = si7020_probe,
.id_table = si7020_id,
};
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index e0251b8c1a52..5355507f8fa1 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -398,7 +398,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmi160_data *data = iio_priv(indio_dev);
- s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */
+ __le16 buf[16];
+ /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
__le16 sample;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 19580d1db597..2c3f8964a3ea 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -126,7 +126,7 @@ static int inv_mpu_probe(struct i2c_client *client,
st = iio_priv(dev_get_drvdata(&client->dev));
st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
- 1, 0, I2C_MUX_LOCKED,
+ 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
inv_mpu6050_select_bypass,
inv_mpu6050_deselect_bypass);
if (!st->muxc) {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 158aaf44dd95..b12830b09c7d 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -307,10 +307,9 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kmalloc(sizeof(*trialmask)*
- BITS_TO_LONGS(indio_dev->masklength),
- GFP_KERNEL);
-
+ trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*trialmask),
+ GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index fc340ed3dca1..aaca42862389 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -81,6 +81,8 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_PH] = "ph",
[IIO_UVINDEX] = "uvindex",
[IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
+ [IIO_COUNT] = "count",
+ [IIO_INDEX] = "index",
};
static const char * const iio_modifier_names[] = {
@@ -575,66 +577,82 @@ int of_iio_read_mount_matrix(const struct device *dev,
#endif
EXPORT_SYMBOL(of_iio_read_mount_matrix);
-/**
- * iio_format_value() - Formats a IIO value into its string representation
- * @buf: The buffer to which the formatted value gets written
- * @type: One of the IIO_VAL_... constants. This decides how the val
- * and val2 parameters are formatted.
- * @size: Number of IIO value entries contained in vals
- * @vals: Pointer to the values, exact meaning depends on the
- * type parameter.
- *
- * Return: 0 by default, a negative number on failure or the
- * total number of characters written for a type that belongs
- * to the IIO_VAL_... constant.
- */
-ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
+ int size, const int *vals)
{
unsigned long long tmp;
+ int tmp0, tmp1;
bool scale_db = false;
switch (type) {
case IIO_VAL_INT:
- return sprintf(buf, "%d\n", vals[0]);
+ return snprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
- return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
- -vals[1], scale_db ? " dB" : "");
+ return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
+ -vals[1], scale_db ? " dB" : "");
else
- return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
- scale_db ? " dB" : "");
+ return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
+ scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
- return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
- -vals[1]);
+ return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
+ -vals[1]);
else
- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
- vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
- return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
+ tmp1 = vals[1];
+ tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
+ return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
tmp = (s64)vals[0] * 1000000000LL >> vals[1];
- vals[1] = do_div(tmp, 1000000000LL);
- vals[0] = tmp;
- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ tmp1 = do_div(tmp, 1000000000LL);
+ tmp0 = tmp;
+ return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
case IIO_VAL_INT_MULTIPLE:
{
int i;
- int len = 0;
+ int l = 0;
- for (i = 0; i < size; ++i)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%d ",
- vals[i]);
- len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
- return len;
+ for (i = 0; i < size; ++i) {
+ l += snprintf(&buf[l], len - l, "%d ", vals[i]);
+ if (l >= len)
+ break;
+ }
+ return l;
}
default:
return 0;
}
}
+
+/**
+ * iio_format_value() - Formats a IIO value into its string representation
+ * @buf: The buffer to which the formatted value gets written
+ * which is assumed to be big enough (i.e. PAGE_SIZE).
+ * @type: One of the IIO_VAL_... constants. This decides how the val
+ * and val2 parameters are formatted.
+ * @size: Number of IIO value entries contained in vals
+ * @vals: Pointer to the values, exact meaning depends on the
+ * type parameter.
+ *
+ * Return: 0 by default, a negative number on failure or the
+ * total number of characters written for a type that belongs
+ * to the IIO_VAL_... constant.
+ */
+ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+{
+ ssize_t len;
+
+ len = __iio_format_value(buf, PAGE_SIZE, type, size, vals);
+ if (len >= PAGE_SIZE - 1)
+ return -EFBIG;
+
+ return len + sprintf(buf + len, "\n");
+}
EXPORT_SYMBOL_GPL(iio_format_value);
static ssize_t iio_read_channel_info(struct device *dev,
@@ -662,6 +680,119 @@ static ssize_t iio_read_channel_info(struct device *dev,
return iio_format_value(buf, ret, val_len, vals);
}
+static ssize_t iio_format_avail_list(char *buf, const int *vals,
+ int type, int length)
+{
+ int i;
+ ssize_t len = 0;
+
+ switch (type) {
+ case IIO_VAL_INT:
+ for (i = 0; i < length; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 1, &vals[i]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < length - 1)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ break;
+ default:
+ for (i = 0; i < length / 2; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 2, &vals[i * 2]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < length / 2 - 1)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
+{
+ int i;
+ ssize_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "[");
+ switch (type) {
+ case IIO_VAL_INT:
+ for (i = 0; i < 3; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 1, &vals[i]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < 2)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "]\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ break;
+ default:
+ for (i = 0; i < 3; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 2, &vals[i * 2]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < 2)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "]\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t iio_read_channel_info_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const int *vals;
+ int ret;
+ int length;
+ int type;
+
+ ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
+ &vals, &type, &length,
+ this_attr->address);
+
+ if (ret < 0)
+ return ret;
+ switch (ret) {
+ case IIO_AVAIL_LIST:
+ return iio_format_avail_list(buf, vals, type, length);
+ case IIO_AVAIL_RANGE:
+ return iio_format_avail_range(buf, vals, type);
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
@@ -978,6 +1109,40 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
return attrcount;
}
+static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_shared_by shared_by,
+ const long *infomask)
+{
+ int i, ret, attrcount = 0;
+ char *avail_postfix;
+
+ for_each_set_bit(i, infomask, sizeof(infomask) * 8) {
+ avail_postfix = kasprintf(GFP_KERNEL,
+ "%s_available",
+ iio_chan_info_postfix[i]);
+ if (!avail_postfix)
+ return -ENOMEM;
+
+ ret = __iio_add_chan_devattr(avail_postfix,
+ chan,
+ &iio_read_channel_info_avail,
+ NULL,
+ i,
+ shared_by,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
+ kfree(avail_postfix);
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+ continue;
+ else if (ret < 0)
+ return ret;
+ attrcount++;
+ }
+
+ return attrcount;
+}
+
static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
@@ -993,6 +1158,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SEPARATE,
+ &chan->
+ info_mask_separate_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_TYPE,
&chan->info_mask_shared_by_type);
@@ -1000,6 +1173,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_TYPE,
+ &chan->
+ info_mask_shared_by_type_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_DIR,
&chan->info_mask_shared_by_dir);
@@ -1007,6 +1188,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_DIR,
+ &chan->info_mask_shared_by_dir_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_ALL,
&chan->info_mask_shared_by_all);
@@ -1014,6 +1202,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_ALL,
+ &chan->info_mask_shared_by_all_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
if (chan->ext_info) {
unsigned int i = 0;
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index e1e104845e38..978729f6d7c4 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -717,6 +717,27 @@ bool iio_trigger_using_own(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL(iio_trigger_using_own);
+/**
+ * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
+ * the same device
+ * @trig: The IIO trigger to check
+ * @indio_dev: the IIO device to check
+ *
+ * This function can be used as the validate_device callback for triggers that
+ * can only be attached to their own device.
+ *
+ * Return: 0 if both the trigger and the IIO device belong to the same
+ * device, -EINVAL otherwise.
+ */
+int iio_trigger_validate_own_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ if (indio_dev->dev.parent != trig->dev.parent)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL(iio_trigger_validate_own_device);
+
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c4757e6367e7..b0f4630a163f 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -658,6 +658,31 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
+static int iio_read_channel_attribute(struct iio_channel *chan,
+ int *val, int *val2,
+ enum iio_chan_info_enum attribute)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read(chan, val, val2, attribute);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+
+int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
+{
+ return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_offset);
+
int iio_read_channel_processed(struct iio_channel *chan, int *val)
{
int ret;
@@ -687,21 +712,113 @@ EXPORT_SYMBOL_GPL(iio_read_channel_processed);
int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
{
+ return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+
+static int iio_channel_read_avail(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum info)
+{
+ if (!iio_channel_has_available(chan->channel, info))
+ return -EINVAL;
+
+ return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
+ vals, type, length, info);
+}
+
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length)
+{
int ret;
+ int type;
mutex_lock(&chan->indio_dev->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
- ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
+ ret = iio_channel_read_avail(chan,
+ vals, &type, length, IIO_CHAN_INFO_RAW);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
+ if (ret >= 0 && type != IIO_VAL_INT) {
+ /* raw values are assumed to be IIO_VAL_INT */
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
return ret;
}
-EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
+
+static int iio_channel_read_max(struct iio_channel *chan,
+ int *val, int *val2, int *type,
+ enum iio_chan_info_enum info)
+{
+ int unused;
+ const int *vals;
+ int length;
+ int ret;
+
+ if (!val2)
+ val2 = &unused;
+
+ ret = iio_channel_read_avail(chan, &vals, type, &length, info);
+ switch (ret) {
+ case IIO_AVAIL_RANGE:
+ switch (*type) {
+ case IIO_VAL_INT:
+ *val = vals[2];
+ break;
+ default:
+ *val = vals[4];
+ *val2 = vals[5];
+ }
+ return 0;
+
+ case IIO_AVAIL_LIST:
+ if (length <= 0)
+ return -EINVAL;
+ switch (*type) {
+ case IIO_VAL_INT:
+ *val = vals[--length];
+ while (length) {
+ if (vals[--length] > *val)
+ *val = vals[length];
+ }
+ break;
+ default:
+ /* FIXME: learn about max for other iio values */
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ return ret;
+ }
+}
+
+int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
+{
+ int ret;
+ int type;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (!chan->indio_dev->info) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
{
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ba2e64d7ee58..298ea5081a96 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -140,6 +140,18 @@ config GP2AP020A00F
To compile this driver as a module, choose M here: the
module will be called gp2ap020a00f.
+config SENSORS_ISL29018
+ tristate "Intersil 29018 light and proximity sensor"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for ambient light sensing and
+ proximity infrared sensing from Intersil ISL29018.
+ This driver will provide the measurements of ambient light intensity
+ in lux, proximity infrared sensing and normal infrared sensing.
+ Data from sensor is accessible via sysfs.
+
config ISL29125
tristate "Intersil ISL29125 digital color light sensor"
depends on I2C
@@ -326,6 +338,13 @@ config SENSORS_TSL2563
This driver can also be built as a module. If so, the module
will be called tsl2563.
+config TSL2583
+ tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
+ depends on I2C
+ help
+ Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
+ Access ALS data via iio, sysfs.
+
config TSL4531
tristate "TAOS TSL4531 ambient light sensors"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c5768df87a17..4de520036e6e 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
+obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
+obj-$(CONFIG_TSL2583) += tsl2583.o
obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index a767a43c995c..917dd8b43e72 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -62,16 +62,6 @@
#define ISL29035_BOUT_SHIFT 0x07
#define ISL29035_BOUT_MASK (0x01 << ISL29035_BOUT_SHIFT)
-#define ISL29018_INT_TIME_AVAIL "0.090000 0.005630 0.000351 0.000021"
-#define ISL29023_INT_TIME_AVAIL "0.090000 0.005600 0.000352 0.000022"
-#define ISL29035_INT_TIME_AVAIL "0.105000 0.006500 0.000410 0.000025"
-
-static const char * const int_time_avail[] = {
- ISL29018_INT_TIME_AVAIL,
- ISL29023_INT_TIME_AVAIL,
- ISL29035_INT_TIME_AVAIL,
-};
-
enum isl29018_int_time {
ISL29018_INT_TIME_16,
ISL29018_INT_TIME_12,
@@ -110,7 +100,8 @@ struct isl29018_chip {
static int isl29018_set_integration_time(struct isl29018_chip *chip,
unsigned int utime)
{
- int i, ret;
+ unsigned int i;
+ int ret;
unsigned int int_time, new_int_time;
for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) {
@@ -145,7 +136,8 @@ static int isl29018_set_integration_time(struct isl29018_chip *chip,
static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
{
- int i, ret;
+ unsigned int i;
+ int ret;
struct isl29018_scale new_scale;
for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) {
@@ -276,29 +268,35 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
return 0;
}
-static ssize_t isl29018_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_illuminance_scale_available_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
+ unsigned int i;
+ int len = 0;
+ mutex_lock(&chip->lock);
for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i)
len += sprintf(buf + len, "%d.%06d ",
isl29018_scales[chip->int_time][i].scale,
isl29018_scales[chip->int_time][i].uscale);
+ mutex_unlock(&chip->lock);
buf[len - 1] = '\n';
return len;
}
-static ssize_t isl29018_show_int_time_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_illuminance_integration_time_available_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
+ unsigned int i;
+ int len = 0;
for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i)
len += sprintf(buf + len, "0.%06d ",
@@ -309,9 +307,27 @@ static ssize_t isl29018_show_int_time_available(struct device *dev,
return len;
}
-static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/*
+ * From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
+ * infrared suppression:
+ *
+ * Proximity Sensing Scheme: Bit 7. This bit programs the function
+ * of the proximity detection. Logic 0 of this bit, Scheme 0, makes
+ * full n (4, 8, 12, 16) bits (unsigned) proximity detection. The range
+ * of Scheme 0 proximity count is from 0 to 2^n. Logic 1 of this bit,
+ * Scheme 1, makes n-1 (3, 7, 11, 15) bits (2's complementary)
+ * proximity_less_ambient detection. The range of Scheme 1
+ * proximity count is from -2^(n-1) to 2^(n-1) . The sign bit is extended
+ * for resolutions less than 16. While Scheme 0 has wider dynamic
+ * range, Scheme 1 proximity detection is less affected by the
+ * ambient IR noise variation.
+ *
+ * 0 Sensing IR from LED and ambient
+ * 1 Sensing IR from LED with ambient IR rejection
+ */
+static ssize_t proximity_on_chip_ambient_infrared_suppression_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -323,9 +339,9 @@ static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
return sprintf(buf, "%d\n", chip->prox_scheme);
}
-static ssize_t isl29018_store_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t proximity_on_chip_ambient_infrared_suppression_store
+ (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -357,6 +373,10 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto write_done;
+ }
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT) {
@@ -366,13 +386,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
}
break;
case IIO_CHAN_INFO_INT_TIME:
- if (chan->type == IIO_LIGHT) {
- if (val) {
- mutex_unlock(&chip->lock);
- return -EINVAL;
- }
+ if (chan->type == IIO_LIGHT && !val)
ret = isl29018_set_integration_time(chip, val2);
- }
break;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_LIGHT)
@@ -381,6 +396,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
default:
break;
}
+
+write_done:
mutex_unlock(&chip->lock);
return ret;
@@ -397,8 +414,8 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
mutex_lock(&chip->lock);
if (chip->suspended) {
- mutex_unlock(&chip->lock);
- return -EBUSY;
+ ret = -EBUSY;
+ goto read_done;
}
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -445,7 +462,10 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
default:
break;
}
+
+read_done:
mutex_unlock(&chip->lock);
+
return ret;
}
@@ -482,14 +502,9 @@ static const struct iio_chan_spec isl29023_channels[] = {
ISL29018_IR_CHANNEL,
};
-static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, S_IRUGO,
- isl29018_show_int_time_available, NULL, 0);
-static IIO_DEVICE_ATTR(in_illuminance_scale_available, S_IRUGO,
- isl29018_show_scale_available, NULL, 0);
-static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression,
- S_IRUGO | S_IWUSR,
- isl29018_show_prox_infrared_suppression,
- isl29018_store_prox_infrared_suppression, 0);
+static IIO_DEVICE_ATTR_RO(in_illuminance_integration_time_available, 0);
+static IIO_DEVICE_ATTR_RO(in_illuminance_scale_available, 0);
+static IIO_DEVICE_ATTR_RW(proximity_on_chip_ambient_infrared_suppression, 0);
#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
@@ -514,30 +529,6 @@ static const struct attribute_group isl29023_group = {
.attrs = isl29023_attributes,
};
-static int isl29035_detect(struct isl29018_chip *chip)
-{
- int status;
- unsigned int id;
- struct device *dev = regmap_get_device(chip->regmap);
-
- status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
- if (status < 0) {
- dev_err(dev,
- "Error reading ID register with error %d\n",
- status);
- return status;
- }
-
- id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
-
- if (id != ISL29035_DEVICE_ID)
- return -ENODEV;
-
- /* Clear brownout bit */
- return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID,
- ISL29035_BOUT_MASK, 0);
-}
-
enum {
isl29018,
isl29023,
@@ -550,12 +541,31 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
struct device *dev = regmap_get_device(chip->regmap);
if (chip->type == isl29035) {
- status = isl29035_detect(chip);
+ unsigned int id;
+
+ status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
+ if (status < 0) {
+ dev_err(dev,
+ "Error reading ID register with error %d\n",
+ status);
+ return status;
+ }
+
+ id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
+
+ if (id != ISL29035_DEVICE_ID)
+ return -ENODEV;
+
+ /* Clear brownout bit */
+ status = regmap_update_bits(chip->regmap,
+ ISL29035_REG_DEVICE_ID,
+ ISL29035_BOUT_MASK, 0);
if (status < 0)
return status;
}
- /* Code added per Intersil Application Note 1534:
+ /*
+ * Code added per Intersil Application Note 1534:
* When VDD sinks to approximately 1.8V or below, some of
* the part's registers may change their state. When VDD
* recovers to 2.25V (or greater), the part may thus be in an
@@ -582,7 +592,8 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
return status;
}
- /* See Intersil AN1534 comments above.
+ /*
+ * See Intersil AN1534 comments above.
* "Operating Mode" (COMMAND1) register is reprogrammed when
* data is read from the device.
*/
@@ -605,12 +616,10 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
status = isl29018_set_integration_time(chip,
isl29018_int_utimes[chip->type][chip->int_time]);
- if (status < 0) {
+ if (status < 0)
dev_err(dev, "Init of isl29018 fails\n");
- return status;
- }
- return 0;
+ return status;
}
static const struct iio_info isl29018_info = {
@@ -713,6 +722,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
+
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
@@ -752,6 +762,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev->name = name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
+
return devm_iio_device_register(&client->dev, indio_dev);
}
@@ -762,13 +773,15 @@ static int isl29018_suspend(struct device *dev)
mutex_lock(&chip->lock);
- /* Since this driver uses only polling commands, we are by default in
+ /*
+ * Since this driver uses only polling commands, we are by default in
* auto shutdown (ie, power-down) mode.
* So we do not have much to do here.
*/
chip->suspended = true;
mutex_unlock(&chip->lock);
+
return 0;
}
@@ -784,6 +797,7 @@ static int isl29018_resume(struct device *dev)
chip->suspended = false;
mutex_unlock(&chip->lock);
+
return err;
}
@@ -807,7 +821,6 @@ static const struct i2c_device_id isl29018_id[] = {
{"isl29035", isl29035},
{}
};
-
MODULE_DEVICE_TABLE(i2c, isl29018_id);
static const struct of_device_id isl29018_of_match[] = {
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 3afc53a3d0b6..b30e0c1c6cc4 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -631,14 +631,16 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
-
switch (chan->type) {
case IIO_LIGHT:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&data->lock_als);
ret = ltr501_read_als(data, buf);
mutex_unlock(&data->lock_als);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ltr501_calculate_lux(le16_to_cpu(buf[1]),
@@ -648,8 +650,9 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (chan->type) {
case IIO_INTENSITY:
@@ -657,21 +660,28 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
ret = ltr501_read_als(data, buf);
mutex_unlock(&data->lock_als);
if (ret < 0)
- return ret;
+ break;
*val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
buf[0] : buf[1]);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
case IIO_PROXIMITY:
mutex_lock(&data->lock_ps);
ret = ltr501_read_ps(data);
mutex_unlock(&data->lock_ps);
if (ret < 0)
- return ret;
+ break;
*val = ret & LTR501_PS_DATA_MASK;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_INTENSITY:
@@ -729,8 +739,9 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
int i, ret, freq_val, freq_val2;
struct ltr501_chip_info *info = data->chip_info;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -739,85 +750,105 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
i = ltr501_get_gain_index(info->als_gain,
info->als_gain_tbl_size,
val, val2);
- if (i < 0)
- return -EINVAL;
+ if (i < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->als_contr &= ~info->als_gain_mask;
data->als_contr |= i << info->als_gain_shift;
- return regmap_write(data->regmap, LTR501_ALS_CONTR,
- data->als_contr);
+ ret = regmap_write(data->regmap, LTR501_ALS_CONTR,
+ data->als_contr);
+ break;
case IIO_PROXIMITY:
i = ltr501_get_gain_index(info->ps_gain,
info->ps_gain_tbl_size,
val, val2);
- if (i < 0)
- return -EINVAL;
+ if (i < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
- return regmap_write(data->regmap, LTR501_PS_CONTR,
- data->ps_contr);
+ ret = regmap_write(data->regmap, LTR501_PS_CONTR,
+ data->ps_contr);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
case IIO_CHAN_INFO_INT_TIME:
switch (chan->type) {
case IIO_INTENSITY:
- if (val != 0)
- return -EINVAL;
+ if (val != 0) {
+ ret = -EINVAL;
+ break;
+ }
mutex_lock(&data->lock_als);
- i = ltr501_set_it_time(data, val2);
+ ret = ltr501_set_it_time(data, val2);
mutex_unlock(&data->lock_als);
- return i;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
case IIO_INTENSITY:
ret = ltr501_als_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- return ret;
+ break;
ret = ltr501_als_write_samp_freq(data, val, val2);
if (ret < 0)
- return ret;
+ break;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->als_period);
if (ret < 0)
- return ltr501_als_write_samp_freq(data,
- freq_val,
- freq_val2);
- return ret;
+ ret = ltr501_als_write_samp_freq(data, freq_val,
+ freq_val2);
+ break;
case IIO_PROXIMITY:
ret = ltr501_ps_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- return ret;
+ break;
ret = ltr501_ps_write_samp_freq(data, val, val2);
if (ret < 0)
- return ret;
+ break;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->ps_period);
if (ret < 0)
- return ltr501_ps_write_samp_freq(data,
- freq_val,
- freq_val2);
- return ret;
+ ret = ltr501_ps_write_samp_freq(data, freq_val,
+ freq_val2);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static int ltr501_read_thresh(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 6511b20a2a29..a144ca3461fc 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -204,17 +204,18 @@ static int max44000_write_alspga(struct max44000_data *data, int val)
static int max44000_read_alsval(struct max44000_data *data)
{
u16 regval;
+ __be16 val;
int alstim, ret;
ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
- &regval, sizeof(regval));
+ &val, sizeof(val));
if (ret < 0)
return ret;
alstim = ret = max44000_read_alstim(data);
if (ret < 0)
return ret;
- regval = be16_to_cpu(regval);
+ regval = be16_to_cpu(val);
/*
* Overflow is explained on datasheet page 17.
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
new file mode 100644
index 000000000000..a78b6025c465
--- /dev/null
+++ b/drivers/iio/light/tsl2583.c
@@ -0,0 +1,913 @@
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * within the TAOS tsl258x family of devices (tsl2580, tsl2581, tsl2583).
+ *
+ * Copyright (c) 2011, TAOS Corporation.
+ * Copyright (c) 2016 Brian Masney <masneyb@onstation.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Device Registers and Masks */
+#define TSL2583_CNTRL 0x00
+#define TSL2583_ALS_TIME 0X01
+#define TSL2583_INTERRUPT 0x02
+#define TSL2583_GAIN 0x07
+#define TSL2583_REVID 0x11
+#define TSL2583_CHIPID 0x12
+#define TSL2583_ALS_CHAN0LO 0x14
+#define TSL2583_ALS_CHAN0HI 0x15
+#define TSL2583_ALS_CHAN1LO 0x16
+#define TSL2583_ALS_CHAN1HI 0x17
+#define TSL2583_TMR_LO 0x18
+#define TSL2583_TMR_HI 0x19
+
+/* tsl2583 cmd reg masks */
+#define TSL2583_CMD_REG 0x80
+#define TSL2583_CMD_SPL_FN 0x60
+#define TSL2583_CMD_ALS_INT_CLR 0x01
+
+/* tsl2583 cntrl reg masks */
+#define TSL2583_CNTL_ADC_ENBL 0x02
+#define TSL2583_CNTL_PWR_OFF 0x00
+#define TSL2583_CNTL_PWR_ON 0x01
+
+/* tsl2583 status reg masks */
+#define TSL2583_STA_ADC_VALID 0x01
+#define TSL2583_STA_ADC_INTR 0x10
+
+/* Lux calculation constants */
+#define TSL2583_LUX_CALC_OVER_FLOW 65535
+
+#define TSL2583_INTERRUPT_DISABLED 0x00
+
+#define TSL2583_CHIP_ID 0x90
+#define TSL2583_CHIP_ID_MASK 0xf0
+
+/* Per-device data */
+struct tsl2583_als_info {
+ u16 als_ch0;
+ u16 als_ch1;
+ u16 lux;
+};
+
+struct tsl2583_lux {
+ unsigned int ratio;
+ unsigned int ch0;
+ unsigned int ch1;
+};
+
+static const struct tsl2583_lux tsl2583_default_lux[] = {
+ { 9830, 8520, 15729 },
+ { 12452, 10807, 23344 },
+ { 14746, 6383, 11705 },
+ { 17695, 4063, 6554 },
+ { 0, 0, 0 } /* Termination segment */
+};
+
+#define TSL2583_MAX_LUX_TABLE_ENTRIES 11
+
+struct tsl2583_settings {
+ int als_time;
+ int als_gain;
+ int als_gain_trim;
+ int als_cal_target;
+
+ /*
+ * This structure is intentionally large to accommodate updates via
+ * sysfs. Sized to 11 = max 10 segments + 1 termination segment.
+ * Assumption is that one and only one type of glass used.
+ */
+ struct tsl2583_lux als_device_lux[TSL2583_MAX_LUX_TABLE_ENTRIES];
+};
+
+struct tsl2583_chip {
+ struct mutex als_mutex;
+ struct i2c_client *client;
+ struct tsl2583_als_info als_cur_info;
+ struct tsl2583_settings als_settings;
+ int als_time_scale;
+ int als_saturation;
+ bool suspended;
+};
+
+struct gainadj {
+ s16 ch0;
+ s16 ch1;
+ s16 mean;
+};
+
+/* Index = (0 - 3) Used to validate the gain selection index */
+static const struct gainadj gainadj[] = {
+ { 1, 1, 1 },
+ { 8, 8, 8 },
+ { 16, 16, 16 },
+ { 107, 115, 111 }
+};
+
+/*
+ * Provides initial operational parameter defaults.
+ * These defaults may be changed through the device's sysfs files.
+ */
+static void tsl2583_defaults(struct tsl2583_chip *chip)
+{
+ /*
+ * The integration time must be a multiple of 50ms and within the
+ * range [50, 600] ms.
+ */
+ chip->als_settings.als_time = 100;
+
+ /*
+ * This is an index into the gainadj table. Assume clear glass as the
+ * default.
+ */
+ chip->als_settings.als_gain = 0;
+
+ /* Default gain trim to account for aperture effects */
+ chip->als_settings.als_gain_trim = 1000;
+
+ /* Known external ALS reading used for calibration */
+ chip->als_settings.als_cal_target = 130;
+
+ /* Default lux table. */
+ memcpy(chip->als_settings.als_device_lux, tsl2583_default_lux,
+ sizeof(tsl2583_default_lux));
+}
+
+/*
+ * Reads and calculates current lux value.
+ * The raw ch0 and ch1 values of the ambient light sensed in the last
+ * integration cycle are read from the device.
+ * Time scale factor array values are adjusted based on the integration time.
+ * The raw values are multiplied by a scale factor, and device gain is obtained
+ * using gain index. Limit checks are done next, then the ratio of a multiple
+ * of ch1 value, to the ch0 value, is calculated. The array als_device_lux[]
+ * declared above is then scanned to find the first ratio value that is just
+ * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
+ * the array are then used along with the time scale factor array values, to
+ * calculate the lux.
+ */
+static int tsl2583_get_lux(struct iio_dev *indio_dev)
+{
+ u16 ch0, ch1; /* separated ch0/ch1 data from device */
+ u32 lux; /* raw lux calculated from device data */
+ u64 lux64;
+ u32 ratio;
+ u8 buf[5];
+ struct tsl2583_lux *p;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int i, ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, TSL2583_CMD_REG);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to read CMD_REG register\n",
+ __func__);
+ goto done;
+ }
+
+ /* is data new & valid */
+ if (!(ret & TSL2583_STA_ADC_INTR)) {
+ dev_err(&chip->client->dev, "%s: data not valid; returning last value\n",
+ __func__);
+ ret = chip->als_cur_info.lux; /* return LAST VALUE */
+ goto done;
+ }
+
+ for (i = 0; i < 4; i++) {
+ int reg = TSL2583_CMD_REG | (TSL2583_ALS_CHAN0LO + i);
+
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to read register %x\n",
+ __func__, reg);
+ goto done;
+ }
+ buf[i] = ret;
+ }
+
+ /*
+ * Clear the pending interrupt status bit on the chip to allow the next
+ * integration cycle to start. This has to be done even though this
+ * driver currently does not support interrupts.
+ */
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL2583_CMD_REG | TSL2583_CMD_SPL_FN |
+ TSL2583_CMD_ALS_INT_CLR));
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to clear the interrupt bit\n",
+ __func__);
+ goto done; /* have no data, so return failure */
+ }
+
+ /* extract ALS/lux data */
+ ch0 = le16_to_cpup((const __le16 *)&buf[0]);
+ ch1 = le16_to_cpup((const __le16 *)&buf[2]);
+
+ chip->als_cur_info.als_ch0 = ch0;
+ chip->als_cur_info.als_ch1 = ch1;
+
+ if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
+ goto return_max;
+
+ if (!ch0) {
+ /*
+ * The sensor appears to be in total darkness so set the
+ * calculated lux to 0 and return early to avoid a division by
+ * zero below when calculating the ratio.
+ */
+ ret = 0;
+ chip->als_cur_info.lux = 0;
+ goto done;
+ }
+
+ /* calculate ratio */
+ ratio = (ch1 << 15) / ch0;
+
+ /* convert to unscaled lux using the pointer to the table */
+ for (p = (struct tsl2583_lux *)chip->als_settings.als_device_lux;
+ p->ratio != 0 && p->ratio < ratio; p++)
+ ;
+
+ if (p->ratio == 0) {
+ lux = 0;
+ } else {
+ u32 ch0lux, ch1lux;
+
+ ch0lux = ((ch0 * p->ch0) +
+ (gainadj[chip->als_settings.als_gain].ch0 >> 1))
+ / gainadj[chip->als_settings.als_gain].ch0;
+ ch1lux = ((ch1 * p->ch1) +
+ (gainadj[chip->als_settings.als_gain].ch1 >> 1))
+ / gainadj[chip->als_settings.als_gain].ch1;
+
+ /* note: lux is 31 bit max at this point */
+ if (ch1lux > ch0lux) {
+ dev_dbg(&chip->client->dev, "%s: No Data - Returning 0\n",
+ __func__);
+ ret = 0;
+ chip->als_cur_info.lux = 0;
+ goto done;
+ }
+
+ lux = ch0lux - ch1lux;
+ }
+
+ /* adjust for active time scale */
+ if (chip->als_time_scale == 0)
+ lux = 0;
+ else
+ lux = (lux + (chip->als_time_scale >> 1)) /
+ chip->als_time_scale;
+
+ /*
+ * Adjust for active gain scale.
+ * The tsl2583_default_lux tables above have a factor of 8192 built in,
+ * so we need to shift right.
+ * User-specified gain provides a multiplier.
+ * Apply user-specified gain before shifting right to retain precision.
+ * Use 64 bits to avoid overflow on multiplication.
+ * Then go back to 32 bits before division to avoid using div_u64().
+ */
+ lux64 = lux;
+ lux64 = lux64 * chip->als_settings.als_gain_trim;
+ lux64 >>= 13;
+ lux = lux64;
+ lux = (lux + 500) / 1000;
+
+ if (lux > TSL2583_LUX_CALC_OVER_FLOW) { /* check for overflow */
+return_max:
+ lux = TSL2583_LUX_CALC_OVER_FLOW;
+ }
+
+ /* Update the structure with the latest VALID lux. */
+ chip->als_cur_info.lux = lux;
+ ret = lux;
+
+done:
+ return ret;
+}
+
+/*
+ * Obtain single reading and calculate the als_gain_trim (later used
+ * to derive actual lux).
+ * Return updated gain_trim value.
+ */
+static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ unsigned int gain_trim_val;
+ int ret;
+ int lux_val;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_CNTRL);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to read from the CNTRL register\n",
+ __func__);
+ return ret;
+ }
+
+ if ((ret & (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON))
+ != (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON)) {
+ dev_err(&chip->client->dev,
+ "%s: Device is not powered on and/or ADC is not enabled\n",
+ __func__);
+ return -EINVAL;
+ } else if ((ret & TSL2583_STA_ADC_VALID) != TSL2583_STA_ADC_VALID) {
+ dev_err(&chip->client->dev,
+ "%s: The two ADC channels have not completed an integration cycle\n",
+ __func__);
+ return -ENODATA;
+ }
+
+ lux_val = tsl2583_get_lux(indio_dev);
+ if (lux_val < 0) {
+ dev_err(&chip->client->dev, "%s: failed to get lux\n",
+ __func__);
+ return lux_val;
+ }
+
+ gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
+ * chip->als_settings.als_gain_trim) / lux_val);
+ if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
+ dev_err(&chip->client->dev,
+ "%s: trim_val of %d is not within the range [250, 4000]\n",
+ __func__, gain_trim_val);
+ return -ENODATA;
+ }
+
+ chip->als_settings.als_gain_trim = (int)gain_trim_val;
+
+ return 0;
+}
+
+static int tsl2583_set_als_time(struct tsl2583_chip *chip)
+{
+ int als_count, als_time, ret;
+ u8 val;
+
+ /* determine als integration register */
+ als_count = (chip->als_settings.als_time * 100 + 135) / 270;
+ if (!als_count)
+ als_count = 1; /* ensure at least one cycle */
+
+ /* convert back to time (encompasses overrides) */
+ als_time = (als_count * 27 + 5) / 10;
+
+ val = 256 - als_count;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_ALS_TIME,
+ val);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to set the als time to %d\n",
+ __func__, val);
+ return ret;
+ }
+
+ /* set chip struct re scaling and saturation */
+ chip->als_saturation = als_count * 922; /* 90% of full scale */
+ chip->als_time_scale = (als_time + 25) / 50;
+
+ return ret;
+}
+
+static int tsl2583_set_als_gain(struct tsl2583_chip *chip)
+{
+ int ret;
+
+ /* Set the gain based on als_settings struct */
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_GAIN,
+ chip->als_settings.als_gain);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to set the gain to %d\n", __func__,
+ chip->als_settings.als_gain);
+
+ return ret;
+}
+
+static int tsl2583_set_power_state(struct tsl2583_chip *chip, u8 state)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_CNTRL, state);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to set the power state to %d\n", __func__,
+ state);
+
+ return ret;
+}
+
+/*
+ * Turn the device on.
+ * Configuration must be set before calling this function.
+ */
+static int tsl2583_chip_init_and_power_on(struct iio_dev *indio_dev)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ /* Power on the device; ADC off. */
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_INTERRUPT,
+ TSL2583_INTERRUPT_DISABLED);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to disable interrupts\n", __func__);
+ return ret;
+ }
+
+ ret = tsl2583_set_als_time(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2583_set_als_gain(chip);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 3500);
+
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON |
+ TSL2583_CNTL_ADC_ENBL);
+ if (ret < 0)
+ return ret;
+
+ chip->suspended = false;
+
+ return ret;
+}
+
+/* Sysfs Interface Functions */
+
+static ssize_t in_illuminance_input_target_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+ ret = sprintf(buf, "%d\n", chip->als_settings.als_cal_target);
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static ssize_t in_illuminance_input_target_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int value;
+
+ if (kstrtoint(buf, 0, &value) || !value)
+ return -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+ chip->als_settings.als_cal_target = value;
+ mutex_unlock(&chip->als_mutex);
+
+ return len;
+}
+
+static ssize_t in_illuminance_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int value, ret;
+
+ if (kstrtoint(buf, 0, &value) || value != 1)
+ return -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ ret = tsl2583_als_calibrate(indio_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = len;
+done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static ssize_t in_illuminance_lux_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ unsigned int i;
+ int offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(chip->als_settings.als_device_lux); i++) {
+ offset += sprintf(buf + offset, "%u,%u,%u,",
+ chip->als_settings.als_device_lux[i].ratio,
+ chip->als_settings.als_device_lux[i].ch0,
+ chip->als_settings.als_device_lux[i].ch1);
+ if (chip->als_settings.als_device_lux[i].ratio == 0) {
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
+ offset--;
+ break;
+ }
+ }
+
+ offset += sprintf(buf + offset, "\n");
+
+ return offset;
+}
+
+static ssize_t in_illuminance_lux_table_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ const unsigned int max_ints = TSL2583_MAX_LUX_TABLE_ENTRIES * 3;
+ int value[TSL2583_MAX_LUX_TABLE_ENTRIES * 3 + 1];
+ int ret = -EINVAL;
+ unsigned int n;
+
+ mutex_lock(&chip->als_mutex);
+
+ get_options(buf, ARRAY_SIZE(value), value);
+
+ /*
+ * We now have an array of ints starting at value[1], and
+ * enumerated by value[0].
+ * We expect each group of three ints is one table entry,
+ * and the last table entry is all 0.
+ */
+ n = value[0];
+ if ((n % 3) || n < 6 || n > max_ints) {
+ dev_err(dev,
+ "%s: The number of entries in the lux table must be a multiple of 3 and within the range [6, %d]\n",
+ __func__, max_ints);
+ goto done;
+ }
+ if ((value[n - 2] | value[n - 1] | value[n]) != 0) {
+ dev_err(dev, "%s: The last 3 entries in the lux table must be zeros.\n",
+ __func__);
+ goto done;
+ }
+
+ memcpy(chip->als_settings.als_device_lux, &value[1],
+ value[0] * sizeof(value[1]));
+
+ ret = len;
+
+done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static IIO_CONST_ATTR(in_illuminance_calibscale_available, "1 8 16 111");
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ "0.000050 0.000100 0.000150 0.000200 0.000250 0.000300 0.000350 0.000400 0.000450 0.000500 0.000550 0.000600 0.000650");
+static IIO_DEVICE_ATTR_RW(in_illuminance_input_target, 0);
+static IIO_DEVICE_ATTR_WO(in_illuminance_calibrate, 0);
+static IIO_DEVICE_ATTR_RW(in_illuminance_lux_table, 0);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &iio_const_attr_in_illuminance_calibscale_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_input_target.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_calibrate.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_lux_table.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tsl2583_attribute_group = {
+ .attrs = sysfs_attrs_ctrl,
+};
+
+static const struct iio_chan_spec tsl2583_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_LIGHT,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+};
+
+static int tsl2583_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto read_done;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_LIGHT) {
+ ret = tsl2583_get_lux(indio_dev);
+ if (ret < 0)
+ goto read_done;
+
+ /*
+ * From page 20 of the TSL2581, TSL2583 data
+ * sheet (TAOS134 − MARCH 2011):
+ *
+ * One of the photodiodes (channel 0) is
+ * sensitive to both visible and infrared light,
+ * while the second photodiode (channel 1) is
+ * sensitive primarily to infrared light.
+ */
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+ *val = chip->als_cur_info.als_ch0;
+ else
+ *val = chip->als_cur_info.als_ch1;
+
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type == IIO_LIGHT) {
+ ret = tsl2583_get_lux(indio_dev);
+ if (ret < 0)
+ goto read_done;
+
+ *val = ret;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type == IIO_LIGHT) {
+ *val = chip->als_settings.als_gain_trim;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT) {
+ *val = gainadj[chip->als_settings.als_gain].mean;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT) {
+ *val = 0;
+ *val2 = chip->als_settings.als_time;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+read_done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int tsl2583_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto write_done;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type == IIO_LIGHT) {
+ chip->als_settings.als_gain_trim = val;
+ ret = 0;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gainadj); i++) {
+ if (gainadj[i].mean == val) {
+ chip->als_settings.als_gain = i;
+ ret = tsl2583_set_als_gain(chip);
+ break;
+ }
+ }
+ }
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT && !val && val2 >= 50 &&
+ val2 <= 650 && !(val2 % 50)) {
+ chip->als_settings.als_time = val2;
+ ret = tsl2583_set_als_time(chip);
+ }
+ break;
+ default:
+ break;
+ }
+
+write_done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static const struct iio_info tsl2583_info = {
+ .attrs = &tsl2583_attribute_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = tsl2583_read_raw,
+ .write_raw = tsl2583_write_raw,
+};
+
+static int tsl2583_probe(struct i2c_client *clientp,
+ const struct i2c_device_id *idp)
+{
+ int ret;
+ struct tsl2583_chip *chip;
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(clientp->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&clientp->dev, "%s: i2c smbus byte data functionality is unsupported\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->client = clientp;
+ i2c_set_clientdata(clientp, indio_dev);
+
+ mutex_init(&chip->als_mutex);
+ chip->suspended = true;
+
+ ret = i2c_smbus_read_byte_data(clientp,
+ TSL2583_CMD_REG | TSL2583_CHIPID);
+ if (ret < 0) {
+ dev_err(&clientp->dev,
+ "%s: failed to read the chip ID register\n", __func__);
+ return ret;
+ }
+
+ if ((ret & TSL2583_CHIP_ID_MASK) != TSL2583_CHIP_ID) {
+ dev_err(&clientp->dev, "%s: received an unknown chip ID %x\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ indio_dev->info = &tsl2583_info;
+ indio_dev->channels = tsl2583_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl2583_channels);
+ indio_dev->dev.parent = &clientp->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->client->name;
+
+ ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ if (ret) {
+ dev_err(&clientp->dev, "%s: iio registration failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Load up the V2 defaults (these are hard coded defaults for now) */
+ tsl2583_defaults(chip);
+
+ /* Make sure the chip is on */
+ ret = tsl2583_chip_init_and_power_on(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(&clientp->dev, "Light sensor found.\n");
+
+ return 0;
+}
+
+static int __maybe_unused tsl2583_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
+ chip->suspended = true;
+
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int __maybe_unused tsl2583_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ ret = tsl2583_chip_init_and_power_on(indio_dev);
+
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(tsl2583_pm_ops, tsl2583_suspend, tsl2583_resume);
+
+static struct i2c_device_id tsl2583_idtable[] = {
+ { "tsl2580", 0 },
+ { "tsl2581", 1 },
+ { "tsl2583", 2 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tsl2583_idtable);
+
+static const struct of_device_id tsl2583_of_match[] = {
+ { .compatible = "amstaos,tsl2580", },
+ { .compatible = "amstaos,tsl2581", },
+ { .compatible = "amstaos,tsl2583", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tsl2583_of_match);
+
+/* Driver definition */
+static struct i2c_driver tsl2583_driver = {
+ .driver = {
+ .name = "tsl2583",
+ .pm = &tsl2583_pm_ops,
+ .of_match_table = tsl2583_of_match,
+ },
+ .id_table = tsl2583_idtable,
+ .probe = tsl2583_probe,
+};
+module_i2c_driver(tsl2583_driver);
+
+MODULE_AUTHOR("J. August Brenner <jbrenner@taosinc.com>");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 217353145676..ce09d771c1fb 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -287,7 +287,7 @@ static int ak8974_await_drdy(struct ak8974 *ak8974)
return 0;
}
-static int ak8974_getresult(struct ak8974 *ak8974, s16 *result)
+static int ak8974_getresult(struct ak8974 *ak8974, __le16 *result)
{
unsigned int src;
int ret;
@@ -395,7 +395,7 @@ static int ak8974_selftest(struct ak8974 *ak8974)
static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
{
int ret;
- u16 bulk;
+ __le16 bulk;
ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
if (ret)
@@ -453,7 +453,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
- s16 hw_values[3];
+ __le16 hw_values[3];
int ret = -EINVAL;
pm_runtime_get_sync(&ak8974->i2c->dev);
@@ -494,7 +494,7 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
int ret;
- s16 hw_values[8]; /* Three axes + 64bit padding */
+ __le16 hw_values[8]; /* Three axes + 64bit padding */
pm_runtime_get_sync(&ak8974->i2c->dev);
mutex_lock(&ak8974->lock);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index af8606cc7812..825369fb1c57 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -690,6 +690,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
struct ak8975_data *data = iio_priv(indio_dev);
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
+ __le16 rval;
u16 buff;
int ret;
@@ -703,7 +704,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
ret = i2c_smbus_read_i2c_block_data_or_emulated(
client, def->data_regs[index],
- sizeof(buff), (u8*)&buff);
+ sizeof(rval), (u8*)&rval);
if (ret < 0)
goto exit;
@@ -713,7 +714,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
pm_runtime_put_autosuspend(&data->client->dev);
/* Swap bytes and convert to valid range. */
- buff = le16_to_cpu(buff);
+ buff = le16_to_cpu(rval);
*val = clamp_t(s16, buff, -def->range, def->range);
return IIO_VAL_INT;
@@ -813,6 +814,7 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
const struct ak_def *def = data->def;
int ret;
s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
+ __le16 fval[3];
mutex_lock(&data->lock);
@@ -826,17 +828,17 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
*/
ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
def->data_regs[0],
- 3 * sizeof(buff[0]),
- (u8 *)buff);
+ 3 * sizeof(fval[0]),
+ (u8 *)fval);
if (ret < 0)
goto unlock;
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range);
- buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
- buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
+ buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
+ buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
+ buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
iio_push_to_buffers_with_timestamp(indio_dev, buff,
iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d8a0c8da8db0..0e791b02ed4a 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -42,9 +42,17 @@ enum magn_3d_channel {
MAGN_3D_CHANNEL_MAX,
};
+struct common_attributes {
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
struct magn_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_common common_attributes;
+ struct hid_sensor_common magn_flux_attributes;
+ struct hid_sensor_common rot_attributes;
struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
/* dynamically sized array to hold sensor values */
@@ -52,10 +60,8 @@ struct magn_3d_state {
/* array of pointers to sensor value */
u32 *magn_val_addr[MAGN_3D_CHANNEL_MAX];
- int scale_pre_decml;
- int scale_post_decml;
- int scale_precision;
- int value_offset;
+ struct common_attributes magn_flux_attr;
+ struct common_attributes rot_attr;
};
static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
@@ -162,41 +168,74 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
*val2 = 0;
switch (mask) {
case 0:
- hid_sensor_power_state(&magn_state->common_attributes, true);
+ hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
report_id =
magn_state->magn[chan->address].report_id;
address = magn_3d_addresses[chan->address];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
- magn_state->common_attributes.hsdev,
+ magn_state->magn_flux_attributes.hsdev,
HID_USAGE_SENSOR_COMPASS_3D, address,
report_id,
SENSOR_HUB_SYNC);
else {
*val = 0;
- hid_sensor_power_state(&magn_state->common_attributes,
- false);
+ hid_sensor_power_state(
+ &magn_state->magn_flux_attributes,
+ false);
return -EINVAL;
}
- hid_sensor_power_state(&magn_state->common_attributes, false);
+ hid_sensor_power_state(&magn_state->magn_flux_attributes,
+ false);
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- *val = magn_state->scale_pre_decml;
- *val2 = magn_state->scale_post_decml;
- ret_type = magn_state->scale_precision;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = magn_state->magn_flux_attr.scale_pre_decml;
+ *val2 = magn_state->magn_flux_attr.scale_post_decml;
+ ret_type = magn_state->magn_flux_attr.scale_precision;
+ break;
+ case IIO_ROT:
+ *val = magn_state->rot_attr.scale_pre_decml;
+ *val2 = magn_state->rot_attr.scale_post_decml;
+ ret_type = magn_state->rot_attr.scale_precision;
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
case IIO_CHAN_INFO_OFFSET:
- *val = magn_state->value_offset;
- ret_type = IIO_VAL_INT;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = magn_state->magn_flux_attr.value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_ROT:
+ *val = magn_state->rot_attr.value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
case IIO_CHAN_INFO_SAMP_FREQ:
ret_type = hid_sensor_read_samp_freq_value(
- &magn_state->common_attributes, val, val2);
+ &magn_state->magn_flux_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret_type = hid_sensor_read_raw_hyst_value(
- &magn_state->common_attributes, val, val2);
+ switch (chan->type) {
+ case IIO_MAGN:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &magn_state->magn_flux_attributes, val, val2);
+ break;
+ case IIO_ROT:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &magn_state->rot_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
default:
ret_type = -EINVAL;
@@ -219,11 +258,21 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
ret = hid_sensor_write_samp_freq_value(
- &magn_state->common_attributes, val, val2);
+ &magn_state->magn_flux_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_write_raw_hyst_value(
- &magn_state->common_attributes, val, val2);
+ switch (chan->type) {
+ case IIO_MAGN:
+ ret = hid_sensor_write_raw_hyst_value(
+ &magn_state->magn_flux_attributes, val, val2);
+ break;
+ case IIO_ROT:
+ ret = hid_sensor_write_raw_hyst_value(
+ &magn_state->rot_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
break;
default:
ret = -EINVAL;
@@ -254,7 +303,7 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct magn_3d_state *magn_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
- if (atomic_read(&magn_state->common_attributes.data_ready))
+ if (atomic_read(&magn_state->magn_flux_attributes.data_ready))
hid_sensor_push_data(indio_dev, magn_state->iio_vals);
return 0;
@@ -389,21 +438,48 @@ static int magn_3d_parse_report(struct platform_device *pdev,
dev_dbg(&pdev->dev, "magn_3d Setup %d IIO channels\n",
*chan_count);
- st->scale_precision = hid_sensor_format_scale(
+ st->magn_flux_attr.scale_precision = hid_sensor_format_scale(
HID_USAGE_SENSOR_COMPASS_3D,
&st->magn[CHANNEL_SCAN_INDEX_X],
- &st->scale_pre_decml, &st->scale_post_decml);
+ &st->magn_flux_attr.scale_pre_decml,
+ &st->magn_flux_attr.scale_post_decml);
+ st->rot_attr.scale_precision
+ = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+ &st->magn[CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP],
+ &st->rot_attr.scale_pre_decml,
+ &st->rot_attr.scale_post_decml);
/* Set Sensitivity field ids, when there is no individual modifier */
- if (st->common_attributes.sensitivity.index < 0) {
+ if (st->magn_flux_attributes.sensitivity.index < 0) {
sensor_hub_input_get_attribute_info(hsdev,
HID_FEATURE_REPORT, usage_id,
HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
HID_USAGE_SENSOR_DATA_ORIENTATION,
- &st->common_attributes.sensitivity);
+ &st->magn_flux_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->magn_flux_attributes.sensitivity.index,
+ st->magn_flux_attributes.sensitivity.report_id);
+ }
+ if (st->magn_flux_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_ORIENT_MAGN_FLUX,
+ &st->magn_flux_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->magn_flux_attributes.sensitivity.index,
+ st->magn_flux_attributes.sensitivity.report_id);
+ }
+ if (st->rot_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+ &st->rot_attributes.sensitivity);
dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
- st->common_attributes.sensitivity.index,
- st->common_attributes.sensitivity.report_id);
+ st->rot_attributes.sensitivity.index,
+ st->rot_attributes.sensitivity.report_id);
}
return 0;
@@ -428,16 +504,17 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
magn_state = iio_priv(indio_dev);
- magn_state->common_attributes.hsdev = hsdev;
- magn_state->common_attributes.pdev = pdev;
+ magn_state->magn_flux_attributes.hsdev = hsdev;
+ magn_state->magn_flux_attributes.pdev = pdev;
ret = hid_sensor_parse_common_attributes(hsdev,
HID_USAGE_SENSOR_COMPASS_3D,
- &magn_state->common_attributes);
+ &magn_state->magn_flux_attributes);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
return ret;
}
+ magn_state->rot_attributes = magn_state->magn_flux_attributes;
ret = magn_3d_parse_report(pdev, hsdev,
&channels, &chan_count,
@@ -460,9 +537,9 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
return ret;
}
- atomic_set(&magn_state->common_attributes.data_ready, 0);
+ atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
ret = hid_sensor_setup_trigger(indio_dev, name,
- &magn_state->common_attributes);
+ &magn_state->magn_flux_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
goto error_unreg_buffer_funcs;
@@ -489,7 +566,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&magn_state->common_attributes);
+ hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
return ret;
@@ -504,7 +581,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&magn_state->common_attributes);
+ hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
iio_triggered_buffer_cleanup(indio_dev);
return 0;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 3e1f06b2224c..8e1b0861fbe4 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -46,139 +46,12 @@
#define ST_MAGN_FS_AVL_15000MG 15000
#define ST_MAGN_FS_AVL_16000MG 16000
-/* CUSTOM VALUES FOR SENSOR 0 */
-#define ST_MAGN_0_ODR_ADDR 0x00
-#define ST_MAGN_0_ODR_MASK 0x1c
-#define ST_MAGN_0_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_0_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_0_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_0_ODR_AVL_8HZ_VAL 0x03
-#define ST_MAGN_0_ODR_AVL_15HZ_VAL 0x04
-#define ST_MAGN_0_ODR_AVL_30HZ_VAL 0x05
-#define ST_MAGN_0_ODR_AVL_75HZ_VAL 0x06
-#define ST_MAGN_0_ODR_AVL_220HZ_VAL 0x07
-#define ST_MAGN_0_PW_ADDR 0x02
-#define ST_MAGN_0_PW_MASK 0x03
-#define ST_MAGN_0_PW_ON 0x00
-#define ST_MAGN_0_PW_OFF 0x03
-#define ST_MAGN_0_FS_ADDR 0x01
-#define ST_MAGN_0_FS_MASK 0xe0
-#define ST_MAGN_0_FS_AVL_1300_VAL 0x01
-#define ST_MAGN_0_FS_AVL_1900_VAL 0x02
-#define ST_MAGN_0_FS_AVL_2500_VAL 0x03
-#define ST_MAGN_0_FS_AVL_4000_VAL 0x04
-#define ST_MAGN_0_FS_AVL_4700_VAL 0x05
-#define ST_MAGN_0_FS_AVL_5600_VAL 0x06
-#define ST_MAGN_0_FS_AVL_8100_VAL 0x07
-#define ST_MAGN_0_FS_AVL_1300_GAIN_XY 1100
-#define ST_MAGN_0_FS_AVL_1900_GAIN_XY 855
-#define ST_MAGN_0_FS_AVL_2500_GAIN_XY 670
-#define ST_MAGN_0_FS_AVL_4000_GAIN_XY 450
-#define ST_MAGN_0_FS_AVL_4700_GAIN_XY 400
-#define ST_MAGN_0_FS_AVL_5600_GAIN_XY 330
-#define ST_MAGN_0_FS_AVL_8100_GAIN_XY 230
-#define ST_MAGN_0_FS_AVL_1300_GAIN_Z 980
-#define ST_MAGN_0_FS_AVL_1900_GAIN_Z 760
-#define ST_MAGN_0_FS_AVL_2500_GAIN_Z 600
-#define ST_MAGN_0_FS_AVL_4000_GAIN_Z 400
-#define ST_MAGN_0_FS_AVL_4700_GAIN_Z 355
-#define ST_MAGN_0_FS_AVL_5600_GAIN_Z 295
-#define ST_MAGN_0_FS_AVL_8100_GAIN_Z 205
-#define ST_MAGN_0_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_MAGN_1_WAI_EXP 0x3c
-#define ST_MAGN_1_ODR_ADDR 0x00
-#define ST_MAGN_1_ODR_MASK 0x1c
-#define ST_MAGN_1_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_1_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_1_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_1_ODR_AVL_8HZ_VAL 0x03
-#define ST_MAGN_1_ODR_AVL_15HZ_VAL 0x04
-#define ST_MAGN_1_ODR_AVL_30HZ_VAL 0x05
-#define ST_MAGN_1_ODR_AVL_75HZ_VAL 0x06
-#define ST_MAGN_1_ODR_AVL_220HZ_VAL 0x07
-#define ST_MAGN_1_PW_ADDR 0x02
-#define ST_MAGN_1_PW_MASK 0x03
-#define ST_MAGN_1_PW_ON 0x00
-#define ST_MAGN_1_PW_OFF 0x03
-#define ST_MAGN_1_FS_ADDR 0x01
-#define ST_MAGN_1_FS_MASK 0xe0
-#define ST_MAGN_1_FS_AVL_1300_VAL 0x01
-#define ST_MAGN_1_FS_AVL_1900_VAL 0x02
-#define ST_MAGN_1_FS_AVL_2500_VAL 0x03
-#define ST_MAGN_1_FS_AVL_4000_VAL 0x04
-#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
-#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
-#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878
-#define ST_MAGN_1_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_MAGN_2_WAI_EXP 0x3d
-#define ST_MAGN_2_ODR_ADDR 0x20
-#define ST_MAGN_2_ODR_MASK 0x1c
-#define ST_MAGN_2_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_2_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_2_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_2_ODR_AVL_5HZ_VAL 0x03
-#define ST_MAGN_2_ODR_AVL_10HZ_VAL 0x04
-#define ST_MAGN_2_ODR_AVL_20HZ_VAL 0x05
-#define ST_MAGN_2_ODR_AVL_40HZ_VAL 0x06
-#define ST_MAGN_2_ODR_AVL_80HZ_VAL 0x07
-#define ST_MAGN_2_PW_ADDR 0x22
-#define ST_MAGN_2_PW_MASK 0x03
-#define ST_MAGN_2_PW_ON 0x00
-#define ST_MAGN_2_PW_OFF 0x03
-#define ST_MAGN_2_FS_ADDR 0x21
-#define ST_MAGN_2_FS_MASK 0x60
-#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
-#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
-#define ST_MAGN_2_FS_AVL_12000_VAL 0x02
-#define ST_MAGN_2_FS_AVL_16000_VAL 0x03
-#define ST_MAGN_2_FS_AVL_4000_GAIN 146
-#define ST_MAGN_2_FS_AVL_8000_GAIN 292
-#define ST_MAGN_2_FS_AVL_12000_GAIN 438
-#define ST_MAGN_2_FS_AVL_16000_GAIN 584
-#define ST_MAGN_2_MULTIREAD_BIT false
+/* Special L addresses for Sensor 2 */
#define ST_MAGN_2_OUT_X_L_ADDR 0x28
#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_MAGN_3_WAI_ADDR 0x4f
-#define ST_MAGN_3_WAI_EXP 0x40
-#define ST_MAGN_3_ODR_ADDR 0x60
-#define ST_MAGN_3_ODR_MASK 0x0c
-#define ST_MAGN_3_ODR_AVL_10HZ_VAL 0x00
-#define ST_MAGN_3_ODR_AVL_20HZ_VAL 0x01
-#define ST_MAGN_3_ODR_AVL_50HZ_VAL 0x02
-#define ST_MAGN_3_ODR_AVL_100HZ_VAL 0x03
-#define ST_MAGN_3_PW_ADDR 0x60
-#define ST_MAGN_3_PW_MASK 0x03
-#define ST_MAGN_3_PW_ON 0x00
-#define ST_MAGN_3_PW_OFF 0x03
-#define ST_MAGN_3_BDU_ADDR 0x62
-#define ST_MAGN_3_BDU_MASK 0x10
-#define ST_MAGN_3_DRDY_IRQ_ADDR 0x62
-#define ST_MAGN_3_DRDY_INT_MASK 0x01
-#define ST_MAGN_3_IHL_IRQ_ADDR 0x63
-#define ST_MAGN_3_IHL_IRQ_MASK 0x04
-#define ST_MAGN_3_FS_AVL_15000_GAIN 1500
-#define ST_MAGN_3_MULTIREAD_BIT false
+/* Special L addresses for sensor 3 */
#define ST_MAGN_3_OUT_X_L_ADDR 0x68
#define ST_MAGN_3_OUT_Y_L_ADDR 0x6a
#define ST_MAGN_3_OUT_Z_L_ADDR 0x6c
@@ -240,77 +113,78 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
.odr = {
- .addr = ST_MAGN_0_ODR_ADDR,
- .mask = ST_MAGN_0_ODR_MASK,
+ .addr = 0x00,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_0_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_0_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_0_ODR_AVL_3HZ_VAL, },
- { 8, ST_MAGN_0_ODR_AVL_8HZ_VAL, },
- { 15, ST_MAGN_0_ODR_AVL_15HZ_VAL, },
- { 30, ST_MAGN_0_ODR_AVL_30HZ_VAL, },
- { 75, ST_MAGN_0_ODR_AVL_75HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 8, .value = 0x03 },
+ { .hz = 15, .value = 0x04 },
+ { .hz = 30, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ /* 220 Hz, 0x07 reportedly exist */
},
},
.pw = {
- .addr = ST_MAGN_0_PW_ADDR,
- .mask = ST_MAGN_0_PW_MASK,
- .value_on = ST_MAGN_0_PW_ON,
- .value_off = ST_MAGN_0_PW_OFF,
+ .addr = 0x02,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_0_FS_ADDR,
- .mask = ST_MAGN_0_FS_MASK,
+ .addr = 0x01,
+ .mask = 0xe0,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_1300MG,
- .value = ST_MAGN_0_FS_AVL_1300_VAL,
- .gain = ST_MAGN_0_FS_AVL_1300_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_1300_GAIN_Z,
+ .value = 0x01,
+ .gain = 1100,
+ .gain2 = 980,
},
[1] = {
.num = ST_MAGN_FS_AVL_1900MG,
- .value = ST_MAGN_0_FS_AVL_1900_VAL,
- .gain = ST_MAGN_0_FS_AVL_1900_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_1900_GAIN_Z,
+ .value = 0x02,
+ .gain = 855,
+ .gain2 = 760,
},
[2] = {
.num = ST_MAGN_FS_AVL_2500MG,
- .value = ST_MAGN_0_FS_AVL_2500_VAL,
- .gain = ST_MAGN_0_FS_AVL_2500_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_2500_GAIN_Z,
+ .value = 0x03,
+ .gain = 670,
+ .gain2 = 600,
},
[3] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_0_FS_AVL_4000_VAL,
- .gain = ST_MAGN_0_FS_AVL_4000_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_4000_GAIN_Z,
+ .value = 0x04,
+ .gain = 450,
+ .gain2 = 400,
},
[4] = {
.num = ST_MAGN_FS_AVL_4700MG,
- .value = ST_MAGN_0_FS_AVL_4700_VAL,
- .gain = ST_MAGN_0_FS_AVL_4700_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_4700_GAIN_Z,
+ .value = 0x05,
+ .gain = 400,
+ .gain2 = 355,
},
[5] = {
.num = ST_MAGN_FS_AVL_5600MG,
- .value = ST_MAGN_0_FS_AVL_5600_VAL,
- .gain = ST_MAGN_0_FS_AVL_5600_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_5600_GAIN_Z,
+ .value = 0x06,
+ .gain = 330,
+ .gain2 = 295,
},
[6] = {
.num = ST_MAGN_FS_AVL_8100MG,
- .value = ST_MAGN_0_FS_AVL_8100_VAL,
- .gain = ST_MAGN_0_FS_AVL_8100_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_8100_GAIN_Z,
+ .value = 0x07,
+ .gain = 230,
+ .gain2 = 205,
},
},
},
- .multi_read_bit = ST_MAGN_0_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_1_WAI_EXP,
+ .wai = 0x3c,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM303DLHC_MAGN_DEV_NAME,
@@ -318,175 +192,175 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
.odr = {
- .addr = ST_MAGN_1_ODR_ADDR,
- .mask = ST_MAGN_1_ODR_MASK,
+ .addr = 0x00,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, },
- { 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, },
- { 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, },
- { 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, },
- { 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, },
- { 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 8, .value = 0x03 },
+ { .hz = 15, .value = 0x04 },
+ { .hz = 30, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ { .hz = 220, .value = 0x07 },
},
},
.pw = {
- .addr = ST_MAGN_1_PW_ADDR,
- .mask = ST_MAGN_1_PW_MASK,
- .value_on = ST_MAGN_1_PW_ON,
- .value_off = ST_MAGN_1_PW_OFF,
+ .addr = 0x02,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_1_FS_ADDR,
- .mask = ST_MAGN_1_FS_MASK,
+ .addr = 0x01,
+ .mask = 0xe0,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_1300MG,
- .value = ST_MAGN_1_FS_AVL_1300_VAL,
- .gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z,
+ .value = 0x01,
+ .gain = 909,
+ .gain2 = 1020,
},
[1] = {
.num = ST_MAGN_FS_AVL_1900MG,
- .value = ST_MAGN_1_FS_AVL_1900_VAL,
- .gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z,
+ .value = 0x02,
+ .gain = 1169,
+ .gain2 = 1315,
},
[2] = {
.num = ST_MAGN_FS_AVL_2500MG,
- .value = ST_MAGN_1_FS_AVL_2500_VAL,
- .gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z,
+ .value = 0x03,
+ .gain = 1492,
+ .gain2 = 1666,
},
[3] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_1_FS_AVL_4000_VAL,
- .gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z,
+ .value = 0x04,
+ .gain = 2222,
+ .gain2 = 2500,
},
[4] = {
.num = ST_MAGN_FS_AVL_4700MG,
- .value = ST_MAGN_1_FS_AVL_4700_VAL,
- .gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z,
+ .value = 0x05,
+ .gain = 2500,
+ .gain2 = 2816,
},
[5] = {
.num = ST_MAGN_FS_AVL_5600MG,
- .value = ST_MAGN_1_FS_AVL_5600_VAL,
- .gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z,
+ .value = 0x06,
+ .gain = 3030,
+ .gain2 = 3389,
},
[6] = {
.num = ST_MAGN_FS_AVL_8100MG,
- .value = ST_MAGN_1_FS_AVL_8100_VAL,
- .gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z,
+ .value = 0x07,
+ .gain = 4347,
+ .gain2 = 4878,
},
},
},
- .multi_read_bit = ST_MAGN_1_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_2_WAI_EXP,
+ .wai = 0x3d,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3MDL_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
.odr = {
- .addr = ST_MAGN_2_ODR_ADDR,
- .mask = ST_MAGN_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, },
- { 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, },
- { 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, },
- { 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, },
- { 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, },
- { 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 5, .value = 0x03 },
+ { .hz = 10, .value = 0x04 },
+ { .hz = 20, .value = 0x05 },
+ { .hz = 40, .value = 0x06 },
+ { .hz = 80, .value = 0x07 },
},
},
.pw = {
- .addr = ST_MAGN_2_PW_ADDR,
- .mask = ST_MAGN_2_PW_MASK,
- .value_on = ST_MAGN_2_PW_ON,
- .value_off = ST_MAGN_2_PW_OFF,
+ .addr = 0x22,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_2_FS_ADDR,
- .mask = ST_MAGN_2_FS_MASK,
+ .addr = 0x21,
+ .mask = 0x60,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_2_FS_AVL_4000_VAL,
- .gain = ST_MAGN_2_FS_AVL_4000_GAIN,
+ .value = 0x00,
+ .gain = 146,
},
[1] = {
.num = ST_MAGN_FS_AVL_8000MG,
- .value = ST_MAGN_2_FS_AVL_8000_VAL,
- .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+ .value = 0x01,
+ .gain = 292,
},
[2] = {
.num = ST_MAGN_FS_AVL_12000MG,
- .value = ST_MAGN_2_FS_AVL_12000_VAL,
- .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+ .value = 0x02,
+ .gain = 438,
},
[3] = {
.num = ST_MAGN_FS_AVL_16000MG,
- .value = ST_MAGN_2_FS_AVL_16000_VAL,
- .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
+ .value = 0x03,
+ .gain = 584,
},
},
},
- .multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_3_WAI_EXP,
- .wai_addr = ST_MAGN_3_WAI_ADDR,
+ .wai = 0x40,
+ .wai_addr = 0x4f,
.sensors_supported = {
[0] = LSM303AGR_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
.odr = {
- .addr = ST_MAGN_3_ODR_ADDR,
- .mask = ST_MAGN_3_ODR_MASK,
+ .addr = 0x60,
+ .mask = 0x0c,
.odr_avl = {
- { 10, ST_MAGN_3_ODR_AVL_10HZ_VAL, },
- { 20, ST_MAGN_3_ODR_AVL_20HZ_VAL, },
- { 50, ST_MAGN_3_ODR_AVL_50HZ_VAL, },
- { 100, ST_MAGN_3_ODR_AVL_100HZ_VAL, },
+ { .hz = 10, .value = 0x00 },
+ { .hz = 20, .value = 0x01 },
+ { .hz = 50, .value = 0x02 },
+ { .hz = 100, .value = 0x03 },
},
},
.pw = {
- .addr = ST_MAGN_3_PW_ADDR,
- .mask = ST_MAGN_3_PW_MASK,
- .value_on = ST_MAGN_3_PW_ON,
- .value_off = ST_MAGN_3_PW_OFF,
+ .addr = 0x60,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_15000MG,
- .gain = ST_MAGN_3_FS_AVL_15000_GAIN,
+ .gain = 1500,
},
},
},
.bdu = {
- .addr = ST_MAGN_3_BDU_ADDR,
- .mask = ST_MAGN_3_BDU_MASK,
+ .addr = 0x62,
+ .mask = 0x10,
},
.drdy_irq = {
- .addr = ST_MAGN_3_DRDY_IRQ_ADDR,
- .mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
- .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
- .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
+ .addr = 0x62,
+ .mask_int1 = 0x01,
+ .addr_ihl = 0x63,
+ .mask_ihl = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
};
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 13b6ae2fcf7b..0d1bcf89ae17 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -38,7 +38,7 @@
struct mcp4531_cfg {
int wipers;
- int max_pos;
+ int avail[3];
int kohms;
};
@@ -78,38 +78,38 @@ enum mcp4531_type {
};
static const struct mcp4531_cfg mcp4531_cfg[] = {
- [MCP453x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
- [MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
- [MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
- [MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
- [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
- [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
- [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
- [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
- [MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
- [MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
- [MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
- [MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
- [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
- [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
- [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
- [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
- [MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
- [MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
- [MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
- [MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
- [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
- [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
- [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
- [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
- [MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
- [MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
- [MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
- [MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
- [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
- [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
- [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
- [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+ [MCP453x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP453x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP453x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP453x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP454x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP454x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP454x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP454x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP455x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP455x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP455x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP455x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP456x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP456x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP456x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP456x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP463x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP463x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP463x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP463x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP464x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP464x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP464x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP464x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP465x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP465x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP465x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP465x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP466x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP466x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP466x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP466x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
};
#define MCP4531_WRITE (0 << 2)
@@ -124,13 +124,14 @@ struct mcp4531_data {
const struct mcp4531_cfg *cfg;
};
-#define MCP4531_CHANNEL(ch) { \
- .type = IIO_RESISTANCE, \
- .indexed = 1, \
- .output = 1, \
- .channel = (ch), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+#define MCP4531_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec mcp4531_channels[] = {
@@ -156,13 +157,31 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
- *val2 = data->cfg->max_pos;
+ *val2 = data->cfg->avail[2];
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
+static int mcp4531_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct mcp4531_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *length = ARRAY_SIZE(data->cfg->avail);
+ *vals = data->cfg->avail;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ }
+
+ return -EINVAL;
+}
+
static int mcp4531_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -172,7 +191,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val > data->cfg->max_pos || val < 0)
+ if (val > data->cfg->avail[2] || val < 0)
return -EINVAL;
break;
default:
@@ -186,6 +205,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
static const struct iio_info mcp4531_info = {
.read_raw = mcp4531_read_raw,
+ .read_avail = mcp4531_read_avail,
.write_raw = mcp4531_write_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/iio/potentiostat/Kconfig b/drivers/iio/potentiostat/Kconfig
new file mode 100644
index 000000000000..1e3baf2cc97d
--- /dev/null
+++ b/drivers/iio/potentiostat/Kconfig
@@ -0,0 +1,22 @@
+#
+# Potentiostat drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Digital potentiostats"
+
+config LMP91000
+ tristate "Texas Instruments LMP91000 potentiostat driver"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_BUFFER_CB
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Texas Instruments
+ LMP91000 digital potentiostat chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lmp91000
+
+endmenu
diff --git a/drivers/iio/potentiostat/Makefile b/drivers/iio/potentiostat/Makefile
new file mode 100644
index 000000000000..64d315ef4449
--- /dev/null
+++ b/drivers/iio/potentiostat/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O potentiostat drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_LMP91000) += lmp91000.o
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
new file mode 100644
index 000000000000..e22714365022
--- /dev/null
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -0,0 +1,446 @@
+/*
+ * lmp91000.c - Support for Texas Instruments digital potentiostats
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO: bias voltage + polarity control, and multiple chip support
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define LMP91000_REG_LOCK 0x01
+#define LMP91000_REG_TIACN 0x10
+#define LMP91000_REG_TIACN_GAIN_SHIFT 2
+
+#define LMP91000_REG_REFCN 0x11
+#define LMP91000_REG_REFCN_EXT_REF 0x20
+#define LMP91000_REG_REFCN_50_ZERO 0x80
+
+#define LMP91000_REG_MODECN 0x12
+#define LMP91000_REG_MODECN_3LEAD 0x03
+#define LMP91000_REG_MODECN_TEMP 0x07
+
+#define LMP91000_DRV_NAME "lmp91000"
+
+static const int lmp91000_tia_gain[] = { 0, 2750, 3500, 7000, 14000, 35000,
+ 120000, 350000 };
+
+static const int lmp91000_rload[] = { 10, 33, 50, 100 };
+
+#define LMP91000_TEMP_BASE -40
+
+static const u16 lmp91000_temp_lut[] = {
+ 1875, 1867, 1860, 1852, 1844, 1836, 1828, 1821, 1813, 1805,
+ 1797, 1789, 1782, 1774, 1766, 1758, 1750, 1742, 1734, 1727,
+ 1719, 1711, 1703, 1695, 1687, 1679, 1671, 1663, 1656, 1648,
+ 1640, 1632, 1624, 1616, 1608, 1600, 1592, 1584, 1576, 1568,
+ 1560, 1552, 1544, 1536, 1528, 1520, 1512, 1504, 1496, 1488,
+ 1480, 1472, 1464, 1456, 1448, 1440, 1432, 1424, 1415, 1407,
+ 1399, 1391, 1383, 1375, 1367, 1359, 1351, 1342, 1334, 1326,
+ 1318, 1310, 1302, 1293, 1285, 1277, 1269, 1261, 1253, 1244,
+ 1236, 1228, 1220, 1212, 1203, 1195, 1187, 1179, 1170, 1162,
+ 1154, 1146, 1137, 1129, 1121, 1112, 1104, 1096, 1087, 1079,
+ 1071, 1063, 1054, 1046, 1038, 1029, 1021, 1012, 1004, 996,
+ 987, 979, 971, 962, 954, 945, 937, 929, 920, 912,
+ 903, 895, 886, 878, 870, 861 };
+
+static const struct regmap_config lmp91000_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+struct lmp91000_data {
+ struct regmap *regmap;
+ struct device *dev;
+
+ struct iio_trigger *trig;
+ struct iio_cb_buffer *cb_buffer;
+ struct iio_channel *adc_chan;
+
+ struct completion completion;
+ u8 chan_select;
+
+ u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
+};
+
+static const struct iio_chan_spec lmp91000_channels[] = {
+ { /* chemical channel mV */
+ .type = IIO_VOLTAGE,
+ .channel = 0,
+ .address = LMP91000_REG_MODECN_3LEAD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+ { /* temperature channel mV */
+ .type = IIO_TEMP,
+ .channel = 1,
+ .address = LMP91000_REG_MODECN_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1,
+ },
+};
+
+static int lmp91000_read(struct lmp91000_data *data, int channel, int *val)
+{
+ int state, ret;
+
+ ret = regmap_read(data->regmap, LMP91000_REG_MODECN, &state);
+ if (ret)
+ return -EINVAL;
+
+ ret = regmap_write(data->regmap, LMP91000_REG_MODECN, channel);
+ if (ret)
+ return -EINVAL;
+
+ /* delay till first temperature reading is complete */
+ if ((state != channel) && (channel == LMP91000_REG_MODECN_TEMP))
+ usleep_range(3000, 4000);
+
+ data->chan_select = channel != LMP91000_REG_MODECN_3LEAD;
+
+ iio_trigger_poll_chained(data->trig);
+
+ ret = wait_for_completion_timeout(&data->completion, HZ);
+ reinit_completion(&data->completion);
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *val = data->buffer[data->chan_select];
+
+ return 0;
+}
+
+static irqreturn_t lmp91000_buffer_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct lmp91000_data *data = iio_priv(indio_dev);
+ int ret, val;
+
+ memset(data->buffer, 0, sizeof(data->buffer));
+
+ ret = lmp91000_read(data, LMP91000_REG_MODECN_3LEAD, &val);
+ if (!ret) {
+ data->buffer[0] = val;
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns(indio_dev));
+ }
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int lmp91000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED: {
+ int ret = iio_channel_start_all_cb(data->cb_buffer);
+
+ if (ret)
+ return ret;
+
+ ret = lmp91000_read(data, chan->address, val);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+
+ if (ret)
+ return ret;
+
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ int tmp, i;
+
+ ret = iio_convert_raw_to_processed(data->adc_chan,
+ *val, &tmp, 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(lmp91000_temp_lut); i++)
+ if (lmp91000_temp_lut[i] < tmp)
+ break;
+
+ *val = (LMP91000_TEMP_BASE + i) * 1000;
+ }
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ return iio_read_channel_offset(data->adc_chan, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return iio_read_channel_scale(data->adc_chan, val, val2);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lmp91000_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = lmp91000_read_raw,
+};
+
+static int lmp91000_read_config(struct lmp91000_data *data)
+{
+ struct device *dev = data->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int reg, val;
+ int i, ret;
+
+ ret = of_property_read_u32(np, "ti,tia-gain-ohm", &val);
+ if (ret) {
+ if (of_property_read_bool(np, "ti,external-tia-resistor"))
+ val = 0;
+ else {
+ dev_err(dev, "no ti,tia-gain-ohm defined");
+ return ret;
+ }
+ }
+
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(lmp91000_tia_gain); i++) {
+ if (lmp91000_tia_gain[i] == val) {
+ reg = i << LMP91000_REG_TIACN_GAIN_SHIFT;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ dev_err(dev, "invalid ti,tia-gain-ohm %d\n", val);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "ti,rload-ohm", &val);
+ if (ret) {
+ val = 100;
+ dev_info(dev, "no ti,rload-ohm defined, default to %d\n", val);
+ }
+
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(lmp91000_rload); i++) {
+ if (lmp91000_rload[i] == val) {
+ reg |= i;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ dev_err(dev, "invalid ti,rload-ohm %d\n", val);
+ return ret;
+ }
+
+ regmap_write(data->regmap, LMP91000_REG_LOCK, 0);
+ regmap_write(data->regmap, LMP91000_REG_TIACN, reg);
+ regmap_write(data->regmap, LMP91000_REG_REFCN, LMP91000_REG_REFCN_EXT_REF
+ | LMP91000_REG_REFCN_50_ZERO);
+ regmap_write(data->regmap, LMP91000_REG_LOCK, 1);
+
+ return 0;
+}
+
+static int lmp91000_buffer_cb(const void *val, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ data->buffer[data->chan_select] = *((int *)val);
+ complete_all(&data->completion);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops lmp91000_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+
+static int lmp91000_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ return iio_channel_start_all_cb(data->cb_buffer);
+}
+
+static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
+ .preenable = lmp91000_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = lmp91000_buffer_predisable,
+};
+
+static int lmp91000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct lmp91000_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &lmp91000_info;
+ indio_dev->channels = lmp91000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lmp91000_channels);
+ indio_dev->name = LMP91000_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->regmap = devm_regmap_init_i2c(client, &lmp91000_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "regmap initialization failed.\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ data->trig = devm_iio_trigger_alloc(data->dev, "%s-mux%d",
+ indio_dev->name, indio_dev->id);
+ if (!data->trig) {
+ dev_err(dev, "cannot allocate iio trigger.\n");
+ return -ENOMEM;
+ }
+
+ data->trig->ops = &lmp91000_trigger_ops;
+ data->trig->dev.parent = dev;
+ init_completion(&data->completion);
+
+ ret = lmp91000_read_config(data);
+ if (ret)
+ return ret;
+
+ ret = iio_trigger_set_immutable(iio_channel_cb_get_iio_dev(data->cb_buffer),
+ data->trig);
+ if (ret) {
+ dev_err(dev, "cannot set immutable trigger.\n");
+ return ret;
+ }
+
+ ret = iio_trigger_register(data->trig);
+ if (ret) {
+ dev_err(dev, "cannot register iio trigger.\n");
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &lmp91000_buffer_handler,
+ &lmp91000_buffer_setup_ops);
+ if (ret)
+ goto error_unreg_trigger;
+
+ data->cb_buffer = iio_channel_get_all_cb(dev, &lmp91000_buffer_cb,
+ indio_dev);
+
+ if (IS_ERR(data->cb_buffer)) {
+ if (PTR_ERR(data->cb_buffer) == -ENODEV)
+ ret = -EPROBE_DEFER;
+ else
+ ret = PTR_ERR(data->cb_buffer);
+
+ goto error_unreg_buffer;
+ }
+
+ data->adc_chan = iio_channel_cb_get_channels(data->cb_buffer);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_cb_buffer;
+
+ return 0;
+
+error_unreg_cb_buffer:
+ iio_channel_release_all_cb(data->cb_buffer);
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+error_unreg_trigger:
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int lmp91000_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+ iio_channel_release_all_cb(data->cb_buffer);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->trig);
+
+ return 0;
+}
+
+static const struct of_device_id lmp91000_of_match[] = {
+ { .compatible = "ti,lmp91000", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lmp91000_of_match);
+
+static const struct i2c_device_id lmp91000_id[] = {
+ { "lmp91000", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lmp91000_id);
+
+static struct i2c_driver lmp91000_driver = {
+ .driver = {
+ .name = LMP91000_DRV_NAME,
+ .of_match_table = of_match_ptr(lmp91000_of_match),
+ },
+ .probe = lmp91000_probe,
+ .remove = lmp91000_remove,
+ .id_table = lmp91000_id,
+};
+module_i2c_driver(lmp91000_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("LMP91000 digital potentiostat");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 15cd416365c1..bd8d96b96771 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,16 @@
menu "Pressure sensors"
+config ABP060MG
+ tristate "Honeywell ABP pressure sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for the Honeywell ABP pressure
+ sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called abp060mg.
+
config BMP280
tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
depends on (I2C || SPI_MASTER)
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index fff77185a5cc..de3dbc81dc5a 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ABP060MG) += abp060mg.o
obj-$(CONFIG_BMP280) += bmp280.o
bmp280-objs := bmp280-core.o bmp280-regmap.o
obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
new file mode 100644
index 000000000000..43bdd0b9155f
--- /dev/null
+++ b/drivers/iio/pressure/abp060mg.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2016 - Marcin Malagowski <mrc@bourne.st>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+
+#define ABP060MG_ERROR_MASK 0xC000
+#define ABP060MG_RESP_TIME_MS 40
+#define ABP060MG_MIN_COUNTS 1638 /* = 0x0666 (10% of u14) */
+#define ABP060MG_MAX_COUNTS 14745 /* = 0x3999 (90% of u14) */
+#define ABP060MG_NUM_COUNTS (ABP060MG_MAX_COUNTS - ABP060MG_MIN_COUNTS)
+
+enum abp_variant {
+ /* gage [kPa] */
+ ABP006KG, ABP010KG, ABP016KG, ABP025KG, ABP040KG, ABP060KG, ABP100KG,
+ ABP160KG, ABP250KG, ABP400KG, ABP600KG, ABP001GG,
+ /* differential [kPa] */
+ ABP006KD, ABP010KD, ABP016KD, ABP025KD, ABP040KD, ABP060KD, ABP100KD,
+ ABP160KD, ABP250KD, ABP400KD,
+ /* gage [psi] */
+ ABP001PG, ABP005PG, ABP015PG, ABP030PG, ABP060PG, ABP100PG, ABP150PG,
+ /* differential [psi] */
+ ABP001PD, ABP005PD, ABP015PD, ABP030PD, ABP060PD,
+};
+
+struct abp_config {
+ int min;
+ int max;
+};
+
+static struct abp_config abp_config[] = {
+ /* mbar & kPa variants */
+ [ABP006KG] = { .min = 0, .max = 6000 },
+ [ABP010KG] = { .min = 0, .max = 10000 },
+ [ABP016KG] = { .min = 0, .max = 16000 },
+ [ABP025KG] = { .min = 0, .max = 25000 },
+ [ABP040KG] = { .min = 0, .max = 40000 },
+ [ABP060KG] = { .min = 0, .max = 60000 },
+ [ABP100KG] = { .min = 0, .max = 100000 },
+ [ABP160KG] = { .min = 0, .max = 160000 },
+ [ABP250KG] = { .min = 0, .max = 250000 },
+ [ABP400KG] = { .min = 0, .max = 400000 },
+ [ABP600KG] = { .min = 0, .max = 600000 },
+ [ABP001GG] = { .min = 0, .max = 1000000 },
+ [ABP006KD] = { .min = -6000, .max = 6000 },
+ [ABP010KD] = { .min = -10000, .max = 10000 },
+ [ABP016KD] = { .min = -16000, .max = 16000 },
+ [ABP025KD] = { .min = -25000, .max = 25000 },
+ [ABP040KD] = { .min = -40000, .max = 40000 },
+ [ABP060KD] = { .min = -60000, .max = 60000 },
+ [ABP100KD] = { .min = -100000, .max = 100000 },
+ [ABP160KD] = { .min = -160000, .max = 160000 },
+ [ABP250KD] = { .min = -250000, .max = 250000 },
+ [ABP400KD] = { .min = -400000, .max = 400000 },
+ /* psi variants (1 psi ~ 6895 Pa) */
+ [ABP001PG] = { .min = 0, .max = 6985 },
+ [ABP005PG] = { .min = 0, .max = 34474 },
+ [ABP015PG] = { .min = 0, .max = 103421 },
+ [ABP030PG] = { .min = 0, .max = 206843 },
+ [ABP060PG] = { .min = 0, .max = 413686 },
+ [ABP100PG] = { .min = 0, .max = 689476 },
+ [ABP150PG] = { .min = 0, .max = 1034214 },
+ [ABP001PD] = { .min = -6895, .max = 6895 },
+ [ABP005PD] = { .min = -34474, .max = 34474 },
+ [ABP015PD] = { .min = -103421, .max = 103421 },
+ [ABP030PD] = { .min = -206843, .max = 206843 },
+ [ABP060PD] = { .min = -413686, .max = 413686 },
+};
+
+struct abp_state {
+ struct i2c_client *client;
+ struct mutex lock;
+
+ /*
+ * bus-dependent MEASURE_REQUEST length.
+ * If no SMBUS_QUICK support, need to send dummy byte
+ */
+ int mreq_len;
+
+ /* model-dependent values (calculated on probe) */
+ int scale;
+ int offset;
+};
+
+static const struct iio_chan_spec abp060mg_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int abp060mg_get_measurement(struct abp_state *state, int *val)
+{
+ struct i2c_client *client = state->client;
+ __be16 buf[2];
+ u16 pressure;
+ int ret;
+
+ buf[0] = 0;
+ ret = i2c_master_send(client, (u8 *)&buf, state->mreq_len);
+ if (ret < 0)
+ return ret;
+
+ msleep_interruptible(ABP060MG_RESP_TIME_MS);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ pressure = be16_to_cpu(buf[0]);
+ if (pressure & ABP060MG_ERROR_MASK)
+ return -EIO;
+
+ if (pressure < ABP060MG_MIN_COUNTS || pressure > ABP060MG_MAX_COUNTS)
+ return -EIO;
+
+ *val = pressure;
+
+ return IIO_VAL_INT;
+}
+
+static int abp060mg_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct abp_state *state = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&state->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = abp060mg_get_measurement(state, val);
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = state->offset;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = state->scale;
+ *val2 = ABP060MG_NUM_COUNTS * 1000; /* to kPa */
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static const struct iio_info abp060mg_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = abp060mg_read_raw,
+};
+
+static void abp060mg_init_device(struct iio_dev *indio_dev, unsigned long id)
+{
+ struct abp_state *state = iio_priv(indio_dev);
+ struct abp_config *cfg = &abp_config[id];
+
+ state->scale = cfg->max - cfg->min;
+ state->offset = -ABP060MG_MIN_COUNTS;
+
+ if (cfg->min < 0) /* differential */
+ state->offset -= ABP060MG_NUM_COUNTS >> 1;
+}
+
+static int abp060mg_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct abp_state *state;
+ unsigned long cfg_id = id->driver_data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ i2c_set_clientdata(client, state);
+ state->client = client;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
+ state->mreq_len = 1;
+
+ abp060mg_init_device(indio_dev, cfg_id);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &abp060mg_info;
+
+ indio_dev->channels = abp060mg_channels;
+ indio_dev->num_channels = ARRAY_SIZE(abp060mg_channels);
+
+ mutex_init(&state->lock);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id abp060mg_id_table[] = {
+ /* mbar & kPa variants (abp060m [60 mbar] == abp006k [6 kPa]) */
+ /* gage: */
+ { "abp060mg", ABP006KG }, { "abp006kg", ABP006KG },
+ { "abp100mg", ABP010KG }, { "abp010kg", ABP010KG },
+ { "abp160mg", ABP016KG }, { "abp016kg", ABP016KG },
+ { "abp250mg", ABP025KG }, { "abp025kg", ABP025KG },
+ { "abp400mg", ABP040KG }, { "abp040kg", ABP040KG },
+ { "abp600mg", ABP060KG }, { "abp060kg", ABP060KG },
+ { "abp001bg", ABP100KG }, { "abp100kg", ABP100KG },
+ { "abp1_6bg", ABP160KG }, { "abp160kg", ABP160KG },
+ { "abp2_5bg", ABP250KG }, { "abp250kg", ABP250KG },
+ { "abp004bg", ABP400KG }, { "abp400kg", ABP400KG },
+ { "abp006bg", ABP600KG }, { "abp600kg", ABP600KG },
+ { "abp010bg", ABP001GG }, { "abp001gg", ABP001GG },
+ /* differential: */
+ { "abp060md", ABP006KD }, { "abp006kd", ABP006KD },
+ { "abp100md", ABP010KD }, { "abp010kd", ABP010KD },
+ { "abp160md", ABP016KD }, { "abp016kd", ABP016KD },
+ { "abp250md", ABP025KD }, { "abp025kd", ABP025KD },
+ { "abp400md", ABP040KD }, { "abp040kd", ABP040KD },
+ { "abp600md", ABP060KD }, { "abp060kd", ABP060KD },
+ { "abp001bd", ABP100KD }, { "abp100kd", ABP100KD },
+ { "abp1_6bd", ABP160KD }, { "abp160kd", ABP160KD },
+ { "abp2_5bd", ABP250KD }, { "abp250kd", ABP250KD },
+ { "abp004bd", ABP400KD }, { "abp400kd", ABP400KD },
+ /* psi variants */
+ /* gage: */
+ { "abp001pg", ABP001PG },
+ { "abp005pg", ABP005PG },
+ { "abp015pg", ABP015PG },
+ { "abp030pg", ABP030PG },
+ { "abp060pg", ABP060PG },
+ { "abp100pg", ABP100PG },
+ { "abp150pg", ABP150PG },
+ /* differential: */
+ { "abp001pd", ABP001PD },
+ { "abp005pd", ABP005PD },
+ { "abp015pd", ABP015PD },
+ { "abp030pd", ABP030PD },
+ { "abp060pd", ABP060PD },
+ { /* empty */ },
+};
+MODULE_DEVICE_TABLE(i2c, abp060mg_id_table);
+
+static struct i2c_driver abp060mg_driver = {
+ .driver = {
+ .name = "abp060mg",
+ },
+ .probe = abp060mg_probe,
+ .id_table = abp060mg_id_table,
+};
+module_i2c_driver(abp060mg_driver);
+
+MODULE_AUTHOR("Marcin Malagowski <mrc@bourne.st>");
+MODULE_DESCRIPTION("Honeywell ABP pressure sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 6392d7b62841..cc3f84139157 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -82,8 +82,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (chan->type) {
case IIO_PRESSURE: /* in 0.25 pascal / LSB */
@@ -91,32 +92,39 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
ret = mpl3115_request(data);
if (ret < 0) {
mutex_unlock(&data->lock);
- return ret;
+ break;
}
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
mutex_unlock(&data->lock);
if (ret < 0)
- return ret;
+ break;
*val = be32_to_cpu(tmp) >> 12;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
case IIO_TEMP: /* in 0.0625 celsius / LSB */
mutex_lock(&data->lock);
ret = mpl3115_request(data);
if (ret < 0) {
mutex_unlock(&data->lock);
- return ret;
+ break;
}
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
mutex_unlock(&data->lock);
if (ret < 0)
- return ret;
+ break;
*val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_PRESSURE:
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index a74ed1f0c880..6bd53e702667 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -392,17 +392,14 @@ static int ms5611_init(struct iio_dev *indio_dev)
/* Enable attached regulator if any. */
st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
- if (!IS_ERR(st->vdd)) {
- ret = regulator_enable(st->vdd);
- if (ret) {
- dev_err(indio_dev->dev.parent,
- "failed to enable Vdd supply: %d\n", ret);
- return ret;
- }
- } else {
- ret = PTR_ERR(st->vdd);
- if (ret != -ENODEV)
- return ret;
+ if (IS_ERR(st->vdd))
+ return PTR_ERR(st->vdd);
+
+ ret = regulator_enable(st->vdd);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "failed to enable Vdd supply: %d\n", ret);
+ return ret;
}
ret = ms5611_reset(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a75eb3a..e19e0787864c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -112,115 +112,24 @@
#define ST_PRESS_1_OUT_XL_ADDR 0x28
#define ST_TEMP_1_OUT_L_ADDR 0x2b
-/*
- * CUSTOM VALUES FOR LPS331AP SENSOR
- * See LPS331AP datasheet:
- * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
- */
-#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
-#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
-#define ST_PRESS_LPS331AP_ODR_MASK 0x70
-#define ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL 0x05
-#define ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL 0x06
-#define ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL 0x07
-#define ST_PRESS_LPS331AP_PW_ADDR 0x20
-#define ST_PRESS_LPS331AP_PW_MASK 0x80
-#define ST_PRESS_LPS331AP_FS_ADDR 0x23
-#define ST_PRESS_LPS331AP_FS_MASK 0x30
-#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
-#define ST_PRESS_LPS331AP_BDU_MASK 0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
-#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
-
-/*
- * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
- */
-
/* LPS001WP pressure resolution */
#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
/* LPS001WP temperature resolution */
#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
-
-#define ST_PRESS_LPS001WP_WAI_EXP 0xba
-#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
-#define ST_PRESS_LPS001WP_ODR_MASK 0x30
-#define ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL 0x02
-#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
-#define ST_PRESS_LPS001WP_PW_ADDR 0x20
-#define ST_PRESS_LPS001WP_PW_MASK 0x40
+/* LPS001WP pressure gain */
#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
(100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
-#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
-#define ST_PRESS_LPS001WP_BDU_MASK 0x04
-#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
+/* LPS001WP pressure and temp L addresses */
#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
-/*
- * CUSTOM VALUES FOR LPS25H SENSOR
- * See LPS25H datasheet:
- * http://www2.st.com/resource/en/datasheet/lps25h.pdf
- */
-#define ST_PRESS_LPS25H_WAI_EXP 0xbd
-#define ST_PRESS_LPS25H_ODR_ADDR 0x20
-#define ST_PRESS_LPS25H_ODR_MASK 0x70
-#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL 0x02
-#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL 0x03
-#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
-#define ST_PRESS_LPS25H_PW_ADDR 0x20
-#define ST_PRESS_LPS25H_PW_MASK 0x80
-#define ST_PRESS_LPS25H_BDU_ADDR 0x20
-#define ST_PRESS_LPS25H_BDU_MASK 0x04
-#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
-#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
-#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
-#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS25H_MULTIREAD_BIT true
+/* LPS25H pressure and temp L addresses */
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
-/*
- * CUSTOM VALUES FOR LPS22HB SENSOR
- * See LPS22HB datasheet:
- * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
- */
-
/* LPS22HB temperature sensitivity */
#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL
-#define ST_PRESS_LPS22HB_WAI_EXP 0xb1
-#define ST_PRESS_LPS22HB_ODR_ADDR 0x10
-#define ST_PRESS_LPS22HB_ODR_MASK 0x70
-#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL 0x02
-#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL 0x03
-#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL 0x04
-#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL 0x05
-#define ST_PRESS_LPS22HB_PW_ADDR 0x10
-#define ST_PRESS_LPS22HB_PW_MASK 0x70
-#define ST_PRESS_LPS22HB_BDU_ADDR 0x10
-#define ST_PRESS_LPS22HB_BDU_MASK 0x02
-#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK 0x04
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK 0x08
-#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS22HB_OD_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS22HB_MULTIREAD_BIT true
-
static const struct iio_chan_spec st_press_1_channels[] = {
{
.type = IIO_PRESSURE,
@@ -321,7 +230,12 @@ static const struct iio_chan_spec st_press_lps22hb_channels[] = {
static const struct st_sensor_settings st_press_sensors_settings[] = {
{
- .wai = ST_PRESS_LPS331AP_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS331AP SENSOR
+ * See LPS331AP datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
+ */
+ .wai = 0xbb,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS331AP_PRESS_DEV_NAME,
@@ -329,24 +243,24 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_1_channels,
.num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
- .addr = ST_PRESS_LPS331AP_ODR_ADDR,
- .mask = ST_PRESS_LPS331AP_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL, },
- { 25, ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x05 },
+ { .hz = 13, .value = 0x06 },
+ { .hz = 25, .value = 0x07 },
},
},
.pw = {
- .addr = ST_PRESS_LPS331AP_PW_ADDR,
- .mask = ST_PRESS_LPS331AP_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = ST_PRESS_LPS331AP_FS_ADDR,
- .mask = ST_PRESS_LPS331AP_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
/*
* Pressure and temperature sensitivity values
@@ -360,24 +274,27 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS331AP_BDU_ADDR,
- .mask = ST_PRESS_LPS331AP_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x20,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS001WP_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS001WP SENSOR
+ */
+ .wai = 0xba,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS001WP_PRESS_DEV_NAME,
@@ -385,17 +302,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
.num_ch = ARRAY_SIZE(st_press_lps001wp_channels),
.odr = {
- .addr = ST_PRESS_LPS001WP_ODR_ADDR,
- .mask = ST_PRESS_LPS001WP_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30,
.odr_avl = {
- { 1, ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x02 },
+ { .hz = 13, .value = 0x03 },
},
},
.pw = {
- .addr = ST_PRESS_LPS001WP_PW_ADDR,
- .mask = ST_PRESS_LPS001WP_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x40,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -413,17 +330,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS001WP_BDU_ADDR,
- .mask = ST_PRESS_LPS001WP_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
.addr = 0,
},
- .multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS25H_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS25H SENSOR
+ * See LPS25H datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps25h.pdf
+ */
+ .wai = 0xbd,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS25H_PRESS_DEV_NAME,
@@ -431,18 +353,18 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_1_channels,
.num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
- .addr = ST_PRESS_LPS25H_ODR_ADDR,
- .mask = ST_PRESS_LPS25H_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, },
- { 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x02 },
+ { .hz = 13, .value = 0x03 },
+ { .hz = 25, .value = 0x04 },
},
},
.pw = {
- .addr = ST_PRESS_LPS25H_PW_ADDR,
- .mask = ST_PRESS_LPS25H_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -460,24 +382,29 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS25H_BDU_ADDR,
- .mask = ST_PRESS_LPS25H_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK,
+ .addr = 0x23,
+ .mask_int1 = 0x01,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS22HB_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS22HB SENSOR
+ * See LPS22HB datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
+ */
+ .wai = 0xb1,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS22HB_PRESS_DEV_NAME,
@@ -485,19 +412,19 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
.num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
.odr = {
- .addr = ST_PRESS_LPS22HB_ODR_ADDR,
- .mask = ST_PRESS_LPS22HB_ODR_MASK,
+ .addr = 0x10,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, },
- { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, },
- { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, },
- { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, },
- { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 10, .value = 0x02 },
+ { .hz = 25, .value = 0x03 },
+ { .hz = 50, .value = 0x04 },
+ { .hz = 75, .value = 0x05 },
},
},
.pw = {
- .addr = ST_PRESS_LPS22HB_PW_ADDR,
- .mask = ST_PRESS_LPS22HB_PW_MASK,
+ .addr = 0x10,
+ .mask = 0x70,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
@@ -514,20 +441,20 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS22HB_BDU_ADDR,
- .mask = ST_PRESS_LPS22HB_BDU_MASK,
+ .addr = 0x10,
+ .mask = 0x02,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK,
+ .addr = 0x12,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x12,
+ .mask_ihl = 0x80,
+ .addr_od = 0x12,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT,
+ .multi_read_bit = true,
},
};
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 19d2eb46fda6..c720c3ac0b9b 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -147,12 +147,8 @@ struct zpa2326_private {
#define zpa2326_warn(_idev, _format, _arg...) \
dev_warn(_idev->dev.parent, _format, ##_arg)
-#ifdef DEBUG
#define zpa2326_dbg(_idev, _format, _arg...) \
dev_dbg(_idev->dev.parent, _format, ##_arg)
-#else
-#define zpa2326_dbg(_idev, _format, _arg...)
-#endif
bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg)
{
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 3141c3c161bb..1fa9eefa0982 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -301,8 +301,6 @@ static int lidar_probe(struct i2c_client *client,
if (ret)
goto error_unreg_buffer;
pm_runtime_enable(&client->dev);
-
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_idle(&client->dev);
return 0;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index fb3fb89640e5..670917387eda 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -73,6 +73,7 @@ source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
+source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
source "drivers/infiniband/hw/hns/Kconfig"
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 4fa524dfb6cf..11dacd97a667 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -156,7 +156,6 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
- dev_err(&device->dev, "No memory for ib_agent_port_private\n");
ret = -ENOMEM;
goto error1;
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 1a2984c28b95..ae04826e82fc 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
int err = 0;
table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-
- if (!table) {
- pr_warn("failed to allocate ib gid cache for %s\n",
- ib_dev->name);
+ if (!table)
return -ENOMEM;
- }
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
u8 rdma_port = port + rdma_start_port(ib_dev);
@@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL);
if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) {
- pr_warn("Couldn't allocate cache for %s\n", device->name);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free;
}
err = gid_table_setup_one(device);
if (err)
- /* Allocated memory will be cleaned in the release function */
- return err;
+ goto free;
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
ib_cache_update(device, p + rdma_start_port(device));
@@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device)
err:
gid_table_cleanup_one(device);
+free:
+ kfree(device->cache.pkey_cache);
+ kfree(device->cache.lmc_cache);
return err;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 71c7c4c328ef..cf1edfa1cbac 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -57,6 +57,54 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
+static const char * const ibcm_rej_reason_strs[] = {
+ [IB_CM_REJ_NO_QP] = "no QP",
+ [IB_CM_REJ_NO_EEC] = "no EEC",
+ [IB_CM_REJ_NO_RESOURCES] = "no resources",
+ [IB_CM_REJ_TIMEOUT] = "timeout",
+ [IB_CM_REJ_UNSUPPORTED] = "unsupported",
+ [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
+ [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
+ [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
+ [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
+ [IB_CM_REJ_STALE_CONN] = "stale conn",
+ [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
+ [IB_CM_REJ_INVALID_GID] = "invalid GID",
+ [IB_CM_REJ_INVALID_LID] = "invalid LID",
+ [IB_CM_REJ_INVALID_SL] = "invalid SL",
+ [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
+ [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
+ [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
+ [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
+ [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
+ [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
+ [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
+ [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
+ [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
+ [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
+ [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
+ [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
+ [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
+ [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
+ [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
+ [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
+ [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
+ [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
+ [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
+};
+
+const char *__attribute_const__ ibcm_reject_msg(int reason)
+{
+ size_t index = reason;
+
+ if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
+ ibcm_rej_reason_strs[index])
+ return ibcm_rej_reason_strs[index];
+ else
+ return "unrecognized reason";
+}
+EXPORT_SYMBOL(ibcm_reject_msg);
+
static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
@@ -1582,6 +1630,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
+ struct ib_cm_id *cm_id;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1603,10 +1652,18 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
+ if (cur_cm_id_priv) {
+ cm_id = &cur_cm_id_priv->id;
+ ib_send_cm_dreq(cm_id, NULL, 0);
+ cm_deref_id(cur_cm_id_priv);
+ }
return NULL;
}
@@ -1984,6 +2041,9 @@ static int cm_rep_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
int ret;
+ struct cm_id_private *cur_cm_id_priv;
+ struct ib_cm_id *cm_id;
+ struct cm_timewait_info *timewait_info;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
@@ -2018,16 +2078,26 @@ static int cm_rep_handler(struct cm_work *work)
goto error;
}
/* Check for a stale connection. */
- if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
+ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
+ if (timewait_info) {
rb_erase(&cm_id_priv->timewait_info->remote_id_node,
&cm.remote_id_table);
cm_id_priv->timewait_info->inserted_remote_id = 0;
+ cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
+ if (cur_cm_id_priv) {
+ cm_id = &cur_cm_id_priv->id;
+ ib_send_cm_dreq(cm_id, NULL, 0);
+ cm_deref_id(cur_cm_id_priv);
+ }
+
goto error;
}
spin_unlock(&cm.lock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2a6fc47a1dfb..e7dcfac877ca 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -101,6 +101,49 @@ const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
}
EXPORT_SYMBOL(rdma_event_msg);
+const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
+ int reason)
+{
+ if (rdma_ib_or_roce(id->device, id->port_num))
+ return ibcm_reject_msg(reason);
+
+ if (rdma_protocol_iwarp(id->device, id->port_num))
+ return iwcm_reject_msg(reason);
+
+ WARN_ON_ONCE(1);
+ return "unrecognized transport";
+}
+EXPORT_SYMBOL(rdma_reject_msg);
+
+bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
+{
+ if (rdma_ib_or_roce(id->device, id->port_num))
+ return reason == IB_CM_REJ_CONSUMER_DEFINED;
+
+ if (rdma_protocol_iwarp(id->device, id->port_num))
+ return reason == -ECONNREFUSED;
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+EXPORT_SYMBOL(rdma_is_consumer_reject);
+
+const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
+ struct rdma_cm_event *ev, u8 *data_len)
+{
+ const void *p;
+
+ if (rdma_is_consumer_reject(id, ev->status)) {
+ *data_len = ev->param.conn.private_data_len;
+ p = ev->param.conn.private_data;
+ } else {
+ *data_len = 0;
+ p = NULL;
+ }
+ return p;
+}
+EXPORT_SYMBOL(rdma_consumer_reject_data);
+
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -116,7 +159,7 @@ static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
static struct workqueue_struct *cma_wq;
-static int cma_pernet_id;
+static unsigned int cma_pernet_id;
struct cma_pernet {
struct idr tcp_ps;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 19d499dcab76..d29372624f3a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -72,9 +72,6 @@ void ib_device_unregister_sysfs(struct ib_device *device);
void ib_cache_setup(void);
void ib_cache_cleanup(void);
-int ib_resolve_eth_dmac(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr, int *qp_attr_mask);
-
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
@@ -127,14 +124,7 @@ void ib_cache_release_one(struct ib_device *device);
static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
struct net_device *upper)
{
- struct net_device *_upper = NULL;
- struct list_head *iter;
-
- netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
- if (_upper == upper)
- break;
-
- return _upper == upper;
+ return netdev_has_upper_dev_all_rcu(dev, upper);
}
int addr_init(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 760ef603a468..571974cd3919 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -254,11 +254,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
unsigned long flags;
context = kmalloc(sizeof *context, GFP_KERNEL);
- if (!context) {
- pr_warn("Couldn't allocate client context for %s/%s\n",
- device->name, client->name);
+ if (!context)
return -ENOMEM;
- }
context->client = client;
context->data = NULL;
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index cdbb1f1a6d97..cdfad5f26212 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -247,7 +247,6 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL);
if (!pool->cache_bucket) {
- pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM;
goto out_free_pool;
}
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 5495e22839a7..31661b5c1743 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -59,6 +59,27 @@ MODULE_AUTHOR("Tom Tucker");
MODULE_DESCRIPTION("iWARP CM");
MODULE_LICENSE("Dual BSD/GPL");
+static const char * const iwcm_rej_reason_strs[] = {
+ [ECONNRESET] = "reset by remote host",
+ [ECONNREFUSED] = "refused by remote application",
+ [ETIMEDOUT] = "setup timeout",
+};
+
+const char *__attribute_const__ iwcm_reject_msg(int reason)
+{
+ size_t index;
+
+ /* iWARP uses negative errnos */
+ index = -reason;
+
+ if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
+ iwcm_rej_reason_strs[index])
+ return iwcm_rej_reason_strs[index];
+ else
+ return "unrecognized reason";
+}
+EXPORT_SYMBOL(iwcm_reject_msg);
+
static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 1c41b95cefec..a0e7c16d8bd8 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -604,7 +604,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
}
rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
if (!rem_info) {
- pr_err("%s: Unable to allocate a remote info\n", __func__);
ret = -ENOMEM;
return ret;
}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index ade71e7f0131..3ef51a96bbf1 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -62,7 +62,6 @@ int iwpm_init(u8 nl_client)
sizeof(struct hlist_head), GFP_KERNEL);
if (!iwpm_hash_bucket) {
ret = -ENOMEM;
- pr_err("%s Unable to create mapinfo hash table\n", __func__);
goto init_exit;
}
iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
@@ -70,7 +69,6 @@ int iwpm_init(u8 nl_client)
if (!iwpm_reminfo_bucket) {
kfree(iwpm_hash_bucket);
ret = -ENOMEM;
- pr_err("%s Unable to create reminfo hash table\n", __func__);
goto init_exit;
}
}
@@ -128,10 +126,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
if (!iwpm_valid_client(nl_client))
return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
- if (!map_info) {
- pr_err("%s: Unable to allocate a mapping info\n", __func__);
+ if (!map_info)
return -ENOMEM;
- }
+
memcpy(&map_info->local_sockaddr, local_sockaddr,
sizeof(struct sockaddr_storage));
memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
@@ -309,10 +306,9 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
unsigned long flags;
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
- if (!nlmsg_request) {
- pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
+ if (!nlmsg_request)
return NULL;
- }
+
spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 40cbd6bdb73b..a009f7132c73 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
* If we are at the start of the LID routed part, don't update the
* hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
*/
- if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
+ if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
u32 opa_drslid;
if ((opa_get_smp_direction(opa_smp)
@@ -816,7 +816,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local = kmalloc(sizeof *local, GFP_ATOMIC);
if (!local) {
ret = -ENOMEM;
- dev_err(&device->dev, "No memory for ib_mad_local_private\n");
goto out;
}
local->mad_priv = NULL;
@@ -824,7 +823,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
if (!mad_priv) {
ret = -ENOMEM;
- dev_err(&device->dev, "No memory for local response MAD\n");
kfree(local);
goto out;
}
@@ -947,9 +945,6 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
if (!seg) {
- dev_err(&send_buf->mad_agent->device->dev,
- "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
- sizeof (*seg) + seg_size, gfp_mask);
free_send_rmpp_list(send_wr);
return -ENOMEM;
}
@@ -1362,12 +1357,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{
/* Allocate management method table */
*method = kzalloc(sizeof **method, GFP_ATOMIC);
- if (!*method) {
- pr_err("No memory for ib_mad_mgmt_method_table\n");
- return -ENOMEM;
- }
-
- return 0;
+ return (*method) ? 0 : (-ENOMEM);
}
/*
@@ -1458,8 +1448,6 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate management class table for "new" class version */
*class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_class_table\n");
ret = -ENOMEM;
goto error1;
}
@@ -1524,22 +1512,16 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
if (!*vendor_table) {
/* Allocate mgmt vendor class table for "new" class version */
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
- if (!vendor) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_vendor_class_table\n");
+ if (!vendor)
goto error1;
- }
*vendor_table = vendor;
}
if (!(*vendor_table)->vendor_class[vclass]) {
/* Allocate table for this management vendor class */
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
- if (!vendor_class) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_vendor_class\n");
+ if (!vendor_class)
goto error2;
- }
(*vendor_table)->vendor_class[vclass] = vendor_class;
}
@@ -1746,7 +1728,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
if (!class)
goto out;
if (convert_mgmt_class(mad_hdr->mgmt_class) >=
- IB_MGMT_MAX_METHODS)
+ ARRAY_SIZE(class->method_table))
goto out;
method = class->method_table[convert_mgmt_class(
mad_hdr->mgmt_class)];
@@ -2167,7 +2149,7 @@ handle_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
- mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
+ mad_hdr->class_version == OPA_SM_CLASS_VERSION)
return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
response);
@@ -2238,11 +2220,8 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
mad_size = recv->mad_size;
response = alloc_mad_private(mad_size, GFP_KERNEL);
- if (!response) {
- dev_err(&port_priv->device->dev,
- "%s: no memory for response buffer\n", __func__);
+ if (!response)
goto out;
- }
if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
@@ -2869,8 +2848,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC);
if (!mad_priv) {
- dev_err(&qp_info->port_priv->device->dev,
- "No memory for receive buffer\n");
ret = -ENOMEM;
break;
}
@@ -2961,11 +2938,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
u16 pkey_index;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
- if (!attr) {
- dev_err(&port_priv->device->dev,
- "Couldn't kmalloc ib_qp_attr\n");
+ if (!attr)
return -ENOMEM;
- }
ret = ib_find_pkey(port_priv->device, port_priv->port_num,
IB_DEFAULT_PKEY_FULL, &pkey_index);
@@ -3135,10 +3109,8 @@ static int ib_mad_port_open(struct ib_device *device,
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
- if (!port_priv) {
- dev_err(&device->dev, "No memory for ib_mad_port_private\n");
+ if (!port_priv)
return -ENOMEM;
- }
port_priv->device = device;
port_priv->port_num = port_num;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index e51b739f6ea3..322cb67b07a9 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -518,8 +518,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
process_join_error(group, status);
else {
int mgids_changed, is_mgid0;
- ib_find_pkey(group->port->dev->device, group->port->port_num,
- be16_to_cpu(rec->pkey), &pkey_index);
+
+ if (ib_find_pkey(group->port->dev->device,
+ group->port->port_num, be16_to_cpu(rec->pkey),
+ &pkey_index))
+ pkey_index = MCAST_INVALID_PKEY_INDEX;
spin_lock_irq(&group->port->lock);
if (group->state == MCAST_BUSY &&
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 06556c34606d..0621f4455732 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -304,10 +304,9 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
for_ifa(in_dev) {
struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry) {
- pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
+ if (!entry)
continue;
- }
+
entry->ip.sin_family = AF_INET;
entry->ip.sin_addr.s_addr = ifa->ifa_address;
list_add_tail(&entry->list, &sin_list);
@@ -348,10 +347,8 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry) {
- pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
+ if (!entry)
continue;
- }
entry->sin6.sin6_family = AF_INET6;
entry->sin6.sin6_addr = ifp->addr;
@@ -437,6 +434,26 @@ static void callback_for_addr_gid_device_scan(struct ib_device *device,
&parsed->gid_attr);
}
+struct upper_list {
+ struct list_head list;
+ struct net_device *upper;
+};
+
+static int netdev_upper_walk(struct net_device *upper, void *data)
+{
+ struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ struct list_head *upper_list = data;
+
+ if (!entry)
+ return 0;
+
+ list_add_tail(&entry->list, upper_list);
+ dev_hold(upper);
+ entry->upper = upper;
+
+ return 0;
+}
+
static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
void *cookie,
void (*handle_netdev)(struct ib_device *ib_dev,
@@ -444,30 +461,12 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
struct net_device *ndev))
{
struct net_device *ndev = (struct net_device *)cookie;
- struct upper_list {
- struct list_head list;
- struct net_device *upper;
- };
- struct net_device *upper;
- struct list_head *iter;
struct upper_list *upper_iter;
struct upper_list *upper_temp;
LIST_HEAD(upper_list);
rcu_read_lock();
- netdev_for_each_all_upper_dev_rcu(ndev, upper, iter) {
- struct upper_list *entry = kmalloc(sizeof(*entry),
- GFP_ATOMIC);
-
- if (!entry) {
- pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
- continue;
- }
-
- list_add_tail(&entry->list, &upper_list);
- dev_hold(upper);
- entry->upper = upper;
- }
+ netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
rcu_read_unlock();
handle_netdev(ib_dev, port, ndev);
@@ -555,10 +554,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
struct netdev_event_work *ndev_work =
kmalloc(sizeof(*ndev_work), GFP_KERNEL);
- if (!ndev_work) {
- pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
+ if (!ndev_work)
return NOTIFY_DONE;
- }
memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
@@ -692,10 +689,8 @@ static int addr_event(struct notifier_block *this, unsigned long event,
}
work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
+ if (!work)
return NOTIFY_DONE;
- }
INIT_WORK(&work->work, update_gid_event_work_handler);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 7713ef089c3c..579f9a7f6283 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1104,8 +1104,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("ucm_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 9520154f1d7c..e12f8faf8c23 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1584,8 +1584,11 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 84b4eff90395..1e62a5f0cb28 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -51,7 +51,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
if (umem->nmap > 0)
ib_dma_unmap_sg(dev, umem->sg_head.sgl,
- umem->nmap,
+ umem->npages,
DMA_BIDIRECTIONAL);
for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 1f0fe3217f23..6b079a31dced 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -578,7 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- flags, local_page_list, NULL);
+ flags, local_page_list, NULL, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index df26a741cda6..455034ac994e 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -289,5 +289,6 @@ IB_UVERBS_DECLARE_EX_CMD(modify_wq);
IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
+IB_UVERBS_DECLARE_EX_CMD(modify_qp);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index cb3f515a2285..09b649159e6c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2328,94 +2328,88 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
}
}
-ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int modify_qp(struct ib_uverbs_file *file,
+ struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
{
- struct ib_uverbs_modify_qp cmd;
- struct ib_udata udata;
- struct ib_qp *qp;
- struct ib_qp_attr *attr;
- int ret;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
- out_len);
+ struct ib_qp_attr *attr;
+ struct ib_qp *qp;
+ int ret;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr)
return -ENOMEM;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_read_qp(cmd->base.qp_handle, file->ucontext);
if (!qp) {
ret = -EINVAL;
goto out;
}
- attr->qp_state = cmd.qp_state;
- attr->cur_qp_state = cmd.cur_qp_state;
- attr->path_mtu = cmd.path_mtu;
- attr->path_mig_state = cmd.path_mig_state;
- attr->qkey = cmd.qkey;
- attr->rq_psn = cmd.rq_psn;
- attr->sq_psn = cmd.sq_psn;
- attr->dest_qp_num = cmd.dest_qp_num;
- attr->qp_access_flags = cmd.qp_access_flags;
- attr->pkey_index = cmd.pkey_index;
- attr->alt_pkey_index = cmd.alt_pkey_index;
- attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
- attr->max_rd_atomic = cmd.max_rd_atomic;
- attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
- attr->min_rnr_timer = cmd.min_rnr_timer;
- attr->port_num = cmd.port_num;
- attr->timeout = cmd.timeout;
- attr->retry_cnt = cmd.retry_cnt;
- attr->rnr_retry = cmd.rnr_retry;
- attr->alt_port_num = cmd.alt_port_num;
- attr->alt_timeout = cmd.alt_timeout;
-
- memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
- attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
- attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
- attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
- attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
- attr->ah_attr.dlid = cmd.dest.dlid;
- attr->ah_attr.sl = cmd.dest.sl;
- attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
- attr->ah_attr.static_rate = cmd.dest.static_rate;
- attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
- attr->ah_attr.port_num = cmd.dest.port_num;
-
- memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
- attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
- attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
- attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
- attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
- attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
- attr->alt_ah_attr.sl = cmd.alt_dest.sl;
- attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
- attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
- attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
- attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
+ attr->qp_state = cmd->base.qp_state;
+ attr->cur_qp_state = cmd->base.cur_qp_state;
+ attr->path_mtu = cmd->base.path_mtu;
+ attr->path_mig_state = cmd->base.path_mig_state;
+ attr->qkey = cmd->base.qkey;
+ attr->rq_psn = cmd->base.rq_psn;
+ attr->sq_psn = cmd->base.sq_psn;
+ attr->dest_qp_num = cmd->base.dest_qp_num;
+ attr->qp_access_flags = cmd->base.qp_access_flags;
+ attr->pkey_index = cmd->base.pkey_index;
+ attr->alt_pkey_index = cmd->base.alt_pkey_index;
+ attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+ attr->max_rd_atomic = cmd->base.max_rd_atomic;
+ attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+ attr->min_rnr_timer = cmd->base.min_rnr_timer;
+ attr->port_num = cmd->base.port_num;
+ attr->timeout = cmd->base.timeout;
+ attr->retry_cnt = cmd->base.retry_cnt;
+ attr->rnr_retry = cmd->base.rnr_retry;
+ attr->alt_port_num = cmd->base.alt_port_num;
+ attr->alt_timeout = cmd->base.alt_timeout;
+ attr->rate_limit = cmd->rate_limit;
+
+ memcpy(attr->ah_attr.grh.dgid.raw, cmd->base.dest.dgid, 16);
+ attr->ah_attr.grh.flow_label = cmd->base.dest.flow_label;
+ attr->ah_attr.grh.sgid_index = cmd->base.dest.sgid_index;
+ attr->ah_attr.grh.hop_limit = cmd->base.dest.hop_limit;
+ attr->ah_attr.grh.traffic_class = cmd->base.dest.traffic_class;
+ attr->ah_attr.dlid = cmd->base.dest.dlid;
+ attr->ah_attr.sl = cmd->base.dest.sl;
+ attr->ah_attr.src_path_bits = cmd->base.dest.src_path_bits;
+ attr->ah_attr.static_rate = cmd->base.dest.static_rate;
+ attr->ah_attr.ah_flags = cmd->base.dest.is_global ?
+ IB_AH_GRH : 0;
+ attr->ah_attr.port_num = cmd->base.dest.port_num;
+
+ memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd->base.alt_dest.dgid, 16);
+ attr->alt_ah_attr.grh.flow_label = cmd->base.alt_dest.flow_label;
+ attr->alt_ah_attr.grh.sgid_index = cmd->base.alt_dest.sgid_index;
+ attr->alt_ah_attr.grh.hop_limit = cmd->base.alt_dest.hop_limit;
+ attr->alt_ah_attr.grh.traffic_class = cmd->base.alt_dest.traffic_class;
+ attr->alt_ah_attr.dlid = cmd->base.alt_dest.dlid;
+ attr->alt_ah_attr.sl = cmd->base.alt_dest.sl;
+ attr->alt_ah_attr.src_path_bits = cmd->base.alt_dest.src_path_bits;
+ attr->alt_ah_attr.static_rate = cmd->base.alt_dest.static_rate;
+ attr->alt_ah_attr.ah_flags = cmd->base.alt_dest.is_global ?
+ IB_AH_GRH : 0;
+ attr->alt_ah_attr.port_num = cmd->base.alt_dest.port_num;
if (qp->real_qp == qp) {
- ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
- if (ret)
- goto release_qp;
+ if (cmd->base.attr_mask & IB_QP_AV) {
+ ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
+ if (ret)
+ goto release_qp;
+ }
ret = qp->device->modify_qp(qp, attr,
- modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask),
+ udata);
} else {
- ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+ ret = ib_modify_qp(qp, attr,
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask));
}
- if (ret)
- goto release_qp;
-
- ret = in_len;
-
release_qp:
put_qp_read(qp);
@@ -2425,6 +2419,68 @@ out:
return ret;
}
+ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_ex_modify_qp cmd = {};
+ struct ib_udata udata;
+ int ret;
+
+ if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
+ return -EFAULT;
+
+ if (cmd.base.attr_mask &
+ ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
+ return -EOPNOTSUPP;
+
+ INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
+ in_len - sizeof(cmd.base), out_len);
+
+ ret = modify_qp(file, &cmd, &udata);
+ if (ret)
+ return ret;
+
+ return in_len;
+}
+
+int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_modify_qp cmd = {};
+ int ret;
+
+ /*
+ * Last bit is reserved for extending the attr_mask by
+ * using another field.
+ */
+ BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
+
+ if (ucore->inlen < sizeof(cmd.base))
+ return -EINVAL;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (cmd.base.attr_mask &
+ ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
+ return -EOPNOTSUPP;
+
+ if (ucore->inlen > sizeof(cmd)) {
+ if (ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+ }
+
+ ret = modify_qp(file, &cmd, uhw);
+
+ return ret;
+}
+
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_device *ib_dev,
const char __user *buf, int in_len,
@@ -2875,6 +2931,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
struct ib_ah *ah;
struct ib_ah_attr attr;
int ret;
+ struct ib_udata udata;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -2882,6 +2939,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long)cmd.response + sizeof(resp),
+ in_len - sizeof(cmd), out_len - sizeof(resp));
+
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;
@@ -2908,12 +2969,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
memset(&attr.dmac, 0, sizeof(attr.dmac));
memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
- ah = ib_create_ah(pd, &attr);
+ ah = pd->device->create_ah(pd, &attr, &udata);
+
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_put;
}
+ ah->device = pd->device;
+ ah->pd = pd;
+ atomic_inc(&pd->usecnt);
ah->uobject = uobj;
uobj->object = ah;
@@ -3124,8 +3189,10 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr);
kern_spec_mask = kern_spec_val + kern_filter_sz;
+ if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
+ return -EINVAL;
- switch (ib_spec->type) {
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
actual_filter_sz = spec_filter_size(kern_spec_mask,
@@ -3175,6 +3242,21 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
+ ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
+ memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
+
+ if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
+ (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -3745,7 +3827,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err = PTR_ERR(flow_id);
goto err_free;
}
- flow_id->qp = qp;
flow_id->uobject = uobj;
uobj->object = flow_id;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 44b1104eb168..813593550c4b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -137,6 +137,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq,
[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
+ [IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp,
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -746,8 +747,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (count < sizeof hdr)
return -EINVAL;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 83687646da68..71580cc28c9e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -315,7 +315,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
struct ib_ah *ah;
- ah = pd->device->create_ah(pd, ah_attr);
+ ah = pd->device->create_ah(pd, ah_attr, NULL);
if (!IS_ERR(ah)) {
ah->device = pd->device;
@@ -328,7 +328,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
}
EXPORT_SYMBOL(ib_create_ah);
-static int ib_get_header_version(const union rdma_network_hdr *hdr)
+int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
{
const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
struct iphdr ip4h_checked;
@@ -359,6 +359,7 @@ static int ib_get_header_version(const union rdma_network_hdr *hdr)
return 4;
return 6;
}
+EXPORT_SYMBOL(ib_get_rdma_header_version);
static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
u8 port_num,
@@ -369,7 +370,7 @@ static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
if (rdma_protocol_ib(device, port_num))
return RDMA_NETWORK_IB;
- grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
+ grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
if (grh_version == 4)
return RDMA_NETWORK_IPV4;
@@ -415,9 +416,9 @@ static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
&context, gid_index);
}
-static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
- enum rdma_network_type net_type,
- union ib_gid *sgid, union ib_gid *dgid)
+int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
+ enum rdma_network_type net_type,
+ union ib_gid *sgid, union ib_gid *dgid)
{
struct sockaddr_in src_in;
struct sockaddr_in dst_in;
@@ -447,6 +448,7 @@ static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
return -EINVAL;
}
}
+EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
@@ -469,8 +471,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
net_type = ib_get_net_type_by_grh(device, port_num, grh);
gid_type = ib_network_to_gid_type(net_type);
}
- ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
- &sgid, &dgid);
+ ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+ &sgid, &dgid);
if (ret)
return ret;
@@ -1014,6 +1016,7 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
}
}
},
@@ -1047,6 +1050,7 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
}
},
[IB_QPS_SQD] = {
@@ -1196,66 +1200,66 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
-int ib_resolve_eth_dmac(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr, int *qp_attr_mask)
+int ib_resolve_eth_dmac(struct ib_device *device,
+ struct ib_ah_attr *ah_attr)
{
int ret = 0;
- if (*qp_attr_mask & IB_QP_AV) {
- if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
- qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
- return -EINVAL;
-
- if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
- return 0;
-
- if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
- rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
- qp_attr->ah_attr.dmac);
- } else {
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
- int ifindex;
- int hop_limit;
-
- ret = ib_query_gid(qp->device,
- qp_attr->ah_attr.port_num,
- qp_attr->ah_attr.grh.sgid_index,
- &sgid, &sgid_attr);
-
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- goto out;
- }
+ if (ah_attr->port_num < rdma_start_port(device) ||
+ ah_attr->port_num > rdma_end_port(device))
+ return -EINVAL;
- ifindex = sgid_attr.ndev->ifindex;
+ if (!rdma_cap_eth_ah(device, ah_attr->port_num))
+ return 0;
- ret = rdma_addr_find_l2_eth_by_grh(&sgid,
- &qp_attr->ah_attr.grh.dgid,
- qp_attr->ah_attr.dmac,
- NULL, &ifindex, &hop_limit);
+ if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+ rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw,
+ ah_attr->dmac);
+ } else {
+ union ib_gid sgid;
+ struct ib_gid_attr sgid_attr;
+ int ifindex;
+ int hop_limit;
+
+ ret = ib_query_gid(device,
+ ah_attr->port_num,
+ ah_attr->grh.sgid_index,
+ &sgid, &sgid_attr);
+
+ if (ret || !sgid_attr.ndev) {
+ if (!ret)
+ ret = -ENXIO;
+ goto out;
+ }
- dev_put(sgid_attr.ndev);
+ ifindex = sgid_attr.ndev->ifindex;
- qp_attr->ah_attr.grh.hop_limit = hop_limit;
- }
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid,
+ &ah_attr->grh.dgid,
+ ah_attr->dmac,
+ NULL, &ifindex, &hop_limit);
+
+ dev_put(sgid_attr.ndev);
+
+ ah_attr->grh.hop_limit = hop_limit;
}
out:
return ret;
}
EXPORT_SYMBOL(ib_resolve_eth_dmac);
-
int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- int ret;
- ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
- if (ret)
- return ret;
+ if (qp_attr_mask & IB_QP_AV) {
+ int ret;
+
+ ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
+ if (ret)
+ return ret;
+ }
return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
@@ -1734,8 +1738,10 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOSYS);
flow_id = qp->device->create_flow(qp, flow_attr, domain);
- if (!IS_ERR(flow_id))
+ if (!IS_ERR(flow_id)) {
atomic_inc(&qp->usecnt);
+ flow_id->qp = qp;
+ }
return flow_id;
}
EXPORT_SYMBOL(ib_create_flow);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index e7a5ed9f6f3f..ed553de2ca12 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
index 8bca6b4ec9af..445e89e5e7cf 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
@@ -45,10 +45,9 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
int size = 32;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_PMRX;
m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
m->len = size;
@@ -82,10 +81,9 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
size = npages * sizeof(u64);
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_PMRX;
m->addr = pbl_addr;
m->len = size;
@@ -144,10 +142,9 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_PMRX;
m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
m->len = size;
@@ -177,10 +174,9 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_CM;
m->addr = hwtid * size;
m->len = size;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index cba57bb53dba..9d5fe1853da4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -62,7 +62,8 @@
#include "common.h"
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
return ERR_PTR(-ENOSYS);
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 93e3d270a98a..516b0ae6dc3f 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -828,8 +828,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
- if (!rdev->status_page)
+ if (!rdev->status_page) {
+ err = -ENOMEM;
goto destroy_ocqp_pool;
+ }
rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
@@ -841,8 +843,6 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
if (rdev->wr_log) {
rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
atomic_set(&rdev->wr_log_idx, 0);
- } else {
- pr_err(MOD "error allocating wr_log. Logging disabled\n");
}
}
@@ -1424,8 +1424,6 @@ static void recover_queues(struct uld_ctx *ctx)
qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
if (!qp_list.qps) {
- printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
- pci_name(ctx->lldi.pdev));
spin_unlock_irq(&ctx->dev->lock);
return;
}
@@ -1481,6 +1479,7 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static struct cxgb4_uld_info c4iw_uld_info = {
.name = DRV_NAME,
.nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
.rxq_size = 511,
.ciq = true,
.lro = false,
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 645e606a17c5..49b51b7e0fd7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -59,7 +59,9 @@ module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
return ERR_PTR(-ENOSYS);
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b7ac97b27c88..cda5542e13a2 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
+ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
+ FW_RI_RES_WR_FBMAX_V(3)) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 67ea85a56945..7a3d906b3671 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -125,6 +125,7 @@ int node_affinity_init(void)
cpumask_weight(topology_sibling_cpumask(
cpumask_first(&node_affinity.proc.mask)
));
+ node_affinity.num_possible_nodes = num_possible_nodes();
node_affinity.num_online_nodes = num_online_nodes();
node_affinity.num_online_cpus = num_online_cpus();
@@ -135,7 +136,7 @@ int node_affinity_init(void)
*/
init_real_cpu_mask();
- hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
+ hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
if (!hfi1_per_node_cntr)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 42e63316afd1..e78c7aa094e0 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -70,14 +70,6 @@ struct cpu_mask_set {
uint gen;
};
-struct hfi1_affinity {
- struct cpu_mask_set def_intr;
- struct cpu_mask_set rcv_intr;
- struct cpumask real_cpu_mask;
- /* spin lock to protect affinity struct */
- spinlock_t lock;
-};
-
struct hfi1_msix_entry;
/* Initialize non-HT cpu cores mask */
@@ -115,6 +107,7 @@ struct hfi1_affinity_node_list {
struct cpumask real_cpu_mask;
struct cpu_mask_set proc;
int num_core_siblings;
+ int num_possible_nodes;
int num_online_nodes;
int num_online_cpus;
struct mutex lock; /* protects affinity nodes */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 24d0820873cf..ef72bc2a9e1d 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8477,7 +8477,10 @@ static int do_8051_command(
*/
if (type == HCMD_WRITE_LCB_CSR) {
in_data |= ((*out_data) & 0xffffffffffull) << 8;
- reg = ((((*out_data) >> 40) & 0xff) <<
+ /* must preserve COMPLETED - it is tied to hardware */
+ reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
+ reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
+ reg |= ((((*out_data) >> 40) & 0xff) <<
DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
| ((((*out_data) >> 48) & 0xffff) <<
DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
@@ -9556,11 +9559,11 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
- guid = ppd->guid;
+ guid = ppd->guids[HFI1_PORT_GUID_INDEX];
if (!guid) {
if (dd->base_guid)
guid = dd->base_guid + ppd->port - 1;
- ppd->guid = guid;
+ ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
}
/* Set linkinit_reason on power up per OPA spec */
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 5b9993899789..5bfa839d1c48 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -415,6 +415,9 @@
#define ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT 32
#define ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT 0
#define ASIC_CFG_SCRATCH (ASIC + 0x000000000020)
+#define ASIC_CFG_SCRATCH_1 (ASIC_CFG_SCRATCH + 0x08)
+#define ASIC_CFG_SCRATCH_2 (ASIC_CFG_SCRATCH + 0x10)
+#define ASIC_CFG_SCRATCH_3 (ASIC_CFG_SCRATCH + 0x18)
#define ASIC_CFG_THERM_POLL_EN (ASIC + 0x000000000050)
#define ASIC_EEP_ADDR_CMD (ASIC + 0x000000000308)
#define ASIC_EEP_ADDR_CMD_EP_ADDR_MASK 0xFFFFFFull
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 632ba21759ab..8725f4c086cf 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -541,6 +541,114 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
return ret;
}
+/* read the dc8051 memory */
+static ssize_t dc8051_memory_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ ssize_t rval;
+ void *tmp;
+ loff_t start, end;
+
+ /* the checks below expect the position to be positive */
+ if (*ppos < 0)
+ return -EINVAL;
+
+ tmp = kzalloc(DC8051_DATA_MEM_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ /*
+ * Fill in the requested portion of the temporary buffer from the
+ * 8051 memory. The 8051 memory read is done in terms of 8 bytes.
+ * Adjust start and end to fit. Skip reading anything if out of
+ * range.
+ */
+ start = *ppos & ~0x7; /* round down */
+ if (start < DC8051_DATA_MEM_SIZE) {
+ end = (*ppos + count + 7) & ~0x7; /* round up */
+ if (end > DC8051_DATA_MEM_SIZE)
+ end = DC8051_DATA_MEM_SIZE;
+ rval = read_8051_data(ppd->dd, start, end - start,
+ (u64 *)(tmp + start));
+ if (rval)
+ goto done;
+ }
+
+ rval = simple_read_from_buffer(buf, count, ppos, tmp,
+ DC8051_DATA_MEM_SIZE);
+done:
+ kfree(tmp);
+ return rval;
+}
+
+static ssize_t debugfs_lcb_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned long total, csr_off;
+ u64 data;
+
+ if (*ppos < 0)
+ return -EINVAL;
+ /* only read 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*ppos % 8) != 0)
+ return -EINVAL;
+ /* do nothing if out of range or zero count */
+ if (*ppos >= (LCB_END - LCB_START) || !count)
+ return 0;
+ /* reduce count if needed */
+ if (*ppos + count > LCB_END - LCB_START)
+ count = (LCB_END - LCB_START) - *ppos;
+
+ csr_off = LCB_START + *ppos;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ if (read_lcb_csr(dd, csr_off, (u64 *)&data))
+ break; /* failed */
+ if (put_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ }
+ *ppos += total;
+ return total;
+}
+
+static ssize_t debugfs_lcb_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned long total, csr_off, data;
+
+ if (*ppos < 0)
+ return -EINVAL;
+ /* only write 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*ppos % 8) != 0)
+ return -EINVAL;
+ /* do nothing if out of range or zero count */
+ if (*ppos >= (LCB_END - LCB_START) || !count)
+ return 0;
+ /* reduce count if needed */
+ if (*ppos + count > LCB_END - LCB_START)
+ count = (LCB_END - LCB_START) - *ppos;
+
+ csr_off = LCB_START + *ppos;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ if (get_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ if (write_lcb_csr(dd, csr_off, data))
+ break; /* failed */
+ }
+ *ppos += total;
+ return total;
+}
+
/*
* read the per-port QSFP data for ppd
*/
@@ -931,6 +1039,8 @@ static const struct counter_info port_cntr_ops[] = {
DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
qsfp2_debugfs_open, qsfp2_debugfs_release),
DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
+ DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
+ DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
};
static void *_sdma_cpu_list_seq_start(struct seq_file *s, loff_t *pos)
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index c5efff29c147..4fbaee68012b 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -795,8 +795,7 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index e70c223801b4..26da124c88e2 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -207,6 +207,40 @@ done_asic:
/* magic character sequence that trails an image */
#define IMAGE_TRAIL_MAGIC "egamiAPO"
+/* EPROM file types */
+#define HFI1_EFT_PLATFORM_CONFIG 2
+
+/* segment size - 128 KiB */
+#define SEG_SIZE (128 * 1024)
+
+struct hfi1_eprom_footer {
+ u32 oprom_size; /* size of the oprom, in bytes */
+ u16 num_table_entries;
+ u16 version; /* version of this footer */
+ u32 magic; /* must be last */
+};
+
+struct hfi1_eprom_table_entry {
+ u32 type; /* file type */
+ u32 offset; /* file offset from start of EPROM */
+ u32 size; /* file size, in bytes */
+};
+
+/*
+ * Calculate the max number of table entries that will fit within a directory
+ * buffer of size 'dir_size'.
+ */
+#define MAX_TABLE_ENTRIES(dir_size) \
+ (((dir_size) - sizeof(struct hfi1_eprom_footer)) / \
+ sizeof(struct hfi1_eprom_table_entry))
+
+#define DIRECTORY_SIZE(n) (sizeof(struct hfi1_eprom_footer) + \
+ (sizeof(struct hfi1_eprom_table_entry) * (n)))
+
+#define MAGIC4(a, b, c, d) ((d) << 24 | (c) << 16 | (b) << 8 | (a))
+#define FOOTER_MAGIC MAGIC4('e', 'p', 'r', 'm')
+#define FOOTER_VERSION 1
+
/*
* Read all of partition 1. The actual file is at the front. Adjust
* the returned size if a trailing image magic is found.
@@ -242,6 +276,167 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
}
/*
+ * The segment magic has been checked. There is a footer and table of
+ * contents present.
+ *
+ * directory is a u32 aligned buffer of size EP_PAGE_SIZE.
+ */
+static int read_segment_platform_config(struct hfi1_devdata *dd,
+ void *directory, void **data, u32 *size)
+{
+ struct hfi1_eprom_footer *footer;
+ struct hfi1_eprom_table_entry *table;
+ struct hfi1_eprom_table_entry *entry;
+ void *buffer = NULL;
+ void *table_buffer = NULL;
+ int ret, i;
+ u32 directory_size;
+ u32 seg_base, seg_offset;
+ u32 bytes_available, ncopied, to_copy;
+
+ /* the footer is at the end of the directory */
+ footer = (struct hfi1_eprom_footer *)
+ (directory + EP_PAGE_SIZE - sizeof(*footer));
+
+ /* make sure the structure version is supported */
+ if (footer->version != FOOTER_VERSION)
+ return -EINVAL;
+
+ /* oprom size cannot be larger than a segment */
+ if (footer->oprom_size >= SEG_SIZE)
+ return -EINVAL;
+
+ /* the file table must fit in a segment with the oprom */
+ if (footer->num_table_entries >
+ MAX_TABLE_ENTRIES(SEG_SIZE - footer->oprom_size))
+ return -EINVAL;
+
+ /* find the file table start, which precedes the footer */
+ directory_size = DIRECTORY_SIZE(footer->num_table_entries);
+ if (directory_size <= EP_PAGE_SIZE) {
+ /* the file table fits into the directory buffer handed in */
+ table = (struct hfi1_eprom_table_entry *)
+ (directory + EP_PAGE_SIZE - directory_size);
+ } else {
+ /* need to allocate and read more */
+ table_buffer = kmalloc(directory_size, GFP_KERNEL);
+ if (!table_buffer)
+ return -ENOMEM;
+ ret = read_length(dd, SEG_SIZE - directory_size,
+ directory_size, table_buffer);
+ if (ret)
+ goto done;
+ table = table_buffer;
+ }
+
+ /* look for the platform configuration file in the table */
+ for (entry = NULL, i = 0; i < footer->num_table_entries; i++) {
+ if (table[i].type == HFI1_EFT_PLATFORM_CONFIG) {
+ entry = &table[i];
+ break;
+ }
+ }
+ if (!entry) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ /*
+ * Sanity check on the configuration file size - it should never
+ * be larger than 4 KiB.
+ */
+ if (entry->size > (4 * 1024)) {
+ dd_dev_err(dd, "Bad configuration file size 0x%x\n",
+ entry->size);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* check for bogus offset and size that wrap when added together */
+ if (entry->offset + entry->size < entry->offset) {
+ dd_dev_err(dd,
+ "Bad configuration file start + size 0x%x+0x%x\n",
+ entry->offset, entry->size);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* allocate the buffer to return */
+ buffer = kmalloc(entry->size, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * Extract the file by looping over segments until it is fully read.
+ */
+ seg_offset = entry->offset % SEG_SIZE;
+ seg_base = entry->offset - seg_offset;
+ ncopied = 0;
+ while (ncopied < entry->size) {
+ /* calculate data bytes available in this segment */
+
+ /* start with the bytes from the current offset to the end */
+ bytes_available = SEG_SIZE - seg_offset;
+ /* subtract off footer and table from segment 0 */
+ if (seg_base == 0) {
+ /*
+ * Sanity check: should not have a starting point
+ * at or within the directory.
+ */
+ if (bytes_available <= directory_size) {
+ dd_dev_err(dd,
+ "Bad configuration file - offset 0x%x within footer+table\n",
+ entry->offset);
+ ret = -EINVAL;
+ goto done;
+ }
+ bytes_available -= directory_size;
+ }
+
+ /* calculate bytes wanted */
+ to_copy = entry->size - ncopied;
+
+ /* max out at the available bytes in this segment */
+ if (to_copy > bytes_available)
+ to_copy = bytes_available;
+
+ /*
+ * Read from the EPROM.
+ *
+ * The sanity check for entry->offset is done in read_length().
+ * The EPROM offset is validated against what the hardware
+ * addressing supports. In addition, if the offset is larger
+ * than the actual EPROM, it silently wraps. It will work
+ * fine, though the reader may not get what they expected
+ * from the EPROM.
+ */
+ ret = read_length(dd, seg_base + seg_offset, to_copy,
+ buffer + ncopied);
+ if (ret)
+ goto done;
+
+ ncopied += to_copy;
+
+ /* set up for next segment */
+ seg_offset = footer->oprom_size;
+ seg_base += SEG_SIZE;
+ }
+
+ /* success */
+ ret = 0;
+ *data = buffer;
+ *size = entry->size;
+
+done:
+ kfree(table_buffer);
+ if (ret)
+ kfree(buffer);
+ return ret;
+}
+
+/*
* Read the platform configuration file from the EPROM.
*
* On success, an allocated buffer containing the data and its size are
@@ -253,6 +448,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
* -EBUSY - not able to acquire access to the EPROM
* -ENOENT - no recognizable file written
* -ENOMEM - buffer could not be allocated
+ * -EINVAL - invalid EPROM contentents found
*/
int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
{
@@ -266,21 +462,20 @@ int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
if (ret)
return -EBUSY;
- /* read the last page of P0 for the EPROM format magic */
- ret = read_length(dd, P1_START - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
+ /* read the last page of the segment for the EPROM format magic */
+ ret = read_length(dd, SEG_SIZE - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
if (ret)
goto done;
- /* last dword of P0 contains a magic indicator */
- if (directory[EP_PAGE_DWORDS - 1] == 0) {
+ /* last dword of the segment contains a magic value */
+ if (directory[EP_PAGE_DWORDS - 1] == FOOTER_MAGIC) {
+ /* segment format */
+ ret = read_segment_platform_config(dd, directory, data, size);
+ } else {
/* partition format */
ret = read_partition_platform_config(dd, data, size);
- goto done;
}
- /* nothing recognized */
- ret = -ENOENT;
-
done:
release_chip_resource(dd, CR_EPROM);
return ret;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 13db8eb4f4ec..0dd50cdb039a 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -239,6 +239,16 @@ static const u8 all_fabric_serdes_broadcast = 0xe1;
const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
static const u8 all_pcie_serdes_broadcast = 0xe0;
+static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
+ 0,
+ SYSTEM_TABLE_MAX,
+ PORT_TABLE_MAX,
+ RX_PRESET_TABLE_MAX,
+ TX_PRESET_TABLE_MAX,
+ QSFP_ATTEN_TABLE_MAX,
+ VARIABLE_SETTINGS_TABLE_MAX
+};
+
/* forwards */
static void dispose_one_firmware(struct firmware_details *fdet);
static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
@@ -263,11 +273,13 @@ static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
u64 reg;
int count;
- /* start the read at the given address */
- reg = ((addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
- << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
- | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK;
+ /* step 1: set the address, clear enable */
+ reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
+ << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
+ /* step 2: enable */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
+ reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
/* wait until ACCESS_COMPLETED is set */
count = 0;
@@ -707,6 +719,9 @@ static int obtain_firmware(struct hfi1_devdata *dd)
&dd->pcidev->dev);
if (err) {
platform_config = NULL;
+ dd_dev_err(dd,
+ "%s: No default platform config file found\n",
+ __func__);
goto done;
}
dd->platform_config.data = platform_config->data;
@@ -1761,8 +1776,17 @@ int parse_platform_config(struct hfi1_devdata *dd)
u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
int ret = -EINVAL; /* assume failure */
+ /*
+ * For integrated devices that did not fall back to the default file,
+ * the SI tuning information for active channels is acquired from the
+ * scratch register bitmap, thus there is no platform config to parse.
+ * Skip parsing in these situations.
+ */
+ if (is_integrated(dd) && !platform_config_load)
+ return 0;
+
if (!dd->platform_config.data) {
- dd_dev_info(dd, "%s: Missing config file\n", __func__);
+ dd_dev_err(dd, "%s: Missing config file\n", __func__);
goto bail;
}
ptr = (u32 *)dd->platform_config.data;
@@ -1770,7 +1794,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
magic_num = *ptr;
ptr++;
if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
- dd_dev_info(dd, "%s: Bad config file\n", __func__);
+ dd_dev_err(dd, "%s: Bad config file\n", __func__);
goto bail;
}
@@ -1797,9 +1821,9 @@ int parse_platform_config(struct hfi1_devdata *dd)
header1 = *ptr;
header2 = *(ptr + 1);
if (header1 != ~header2) {
- dd_dev_info(dd, "%s: Failed validation at offset %ld\n",
- __func__, (ptr - (u32 *)
- dd->platform_config.data));
+ dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
+ __func__, (ptr - (u32 *)
+ dd->platform_config.data));
goto bail;
}
@@ -1841,11 +1865,11 @@ int parse_platform_config(struct hfi1_devdata *dd)
table_length_dwords;
break;
default:
- dd_dev_info(dd,
- "%s: Unknown data table %d, offset %ld\n",
- __func__, table_type,
- (ptr - (u32 *)
- dd->platform_config.data));
+ dd_dev_err(dd,
+ "%s: Unknown data table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr - (u32 *)
+ dd->platform_config.data));
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table = ptr;
@@ -1865,11 +1889,11 @@ int parse_platform_config(struct hfi1_devdata *dd)
case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
break;
default:
- dd_dev_info(dd,
- "%s: Unknown meta table %d, offset %ld\n",
- __func__, table_type,
- (ptr -
- (u32 *)dd->platform_config.data));
+ dd_dev_err(dd,
+ "%s: Unknown meta table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr -
+ (u32 *)dd->platform_config.data));
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table_metadata =
@@ -1884,10 +1908,9 @@ int parse_platform_config(struct hfi1_devdata *dd)
/* Jump the table */
ptr += table_length_dwords;
if (crc != *ptr) {
- dd_dev_info(dd, "%s: Failed CRC check at offset %ld\n",
- __func__, (ptr -
- (u32 *)
- dd->platform_config.data));
+ dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
+ __func__, (ptr -
+ (u32 *)dd->platform_config.data));
goto bail;
}
/* Jump the CRC DWORD */
@@ -1901,6 +1924,84 @@ bail:
return ret;
}
+static void get_integrated_platform_config_field(
+ struct hfi1_devdata *dd,
+ enum platform_config_table_type_encoding table_type,
+ int field_index, u32 *data)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u8 *cache = ppd->qsfp_info.cache;
+ u32 tx_preset = 0;
+
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
+ *data = ppd->max_power_class;
+ else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
+ *data = ppd->default_atten;
+ break;
+ case PLATFORM_CONFIG_PORT_TABLE:
+ if (field_index == PORT_TABLE_PORT_TYPE)
+ *data = ppd->port_type;
+ else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
+ *data = ppd->local_atten;
+ else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
+ *data = ppd->remote_atten;
+ break;
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
+ QSFP_RX_CDR_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
+ QSFP_RX_EMP_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
+ QSFP_RX_AMP_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
+ *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
+ QSFP_RX_CDR_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
+ *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
+ QSFP_RX_EMP_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
+ *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
+ QSFP_RX_AMP_SHIFT;
+ break;
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
+ tx_preset = ppd->tx_preset_eq;
+ else
+ tx_preset = ppd->tx_preset_noeq;
+ if (field_index == TX_PRESET_TABLE_PRECUR)
+ *data = (tx_preset & TX_PRECUR_SMASK) >>
+ TX_PRECUR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_ATTN)
+ *data = (tx_preset & TX_ATTN_SMASK) >>
+ TX_ATTN_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_POSTCUR)
+ *data = (tx_preset & TX_POSTCUR_SMASK) >>
+ TX_POSTCUR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
+ *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
+ QSFP_TX_CDR_APPLY_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
+ *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
+ QSFP_TX_EQ_APPLY_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
+ *data = (tx_preset & QSFP_TX_CDR_SMASK) >>
+ QSFP_TX_CDR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
+ *data = (tx_preset & QSFP_TX_EQ_SMASK) >>
+ QSFP_TX_EQ_SHIFT;
+ break;
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ default:
+ break;
+ }
+}
+
static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
int field, u32 *field_len_bits,
u32 *field_start_bits)
@@ -1976,6 +2077,15 @@ int get_platform_config_field(struct hfi1_devdata *dd,
else
return -EINVAL;
+ if (is_integrated(dd) && !platform_config_load) {
+ /*
+ * Use saved configuration from ppd for integrated platforms
+ */
+ get_integrated_platform_config_field(dd, table_type,
+ field_index, data);
+ return 0;
+ }
+
ret = get_platform_fw_field_metadata(dd, table_type, field_index,
&field_len_bits,
&field_start_bits);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cc87fd4e534b..751a0fb29fa5 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -492,6 +492,9 @@ struct rvt_sge_state;
#define HFI1_MIN_VLS_SUPPORTED 1
#define HFI1_MAX_VLS_SUPPORTED 8
+#define HFI1_GUIDS_PER_PORT 5
+#define HFI1_PORT_GUID_INDEX 0
+
static inline void incr_cntr64(u64 *cntr)
{
if (*cntr < (u64)-1LL)
@@ -559,11 +562,20 @@ struct hfi1_pportdata {
struct kobject vl2mtu_kobj;
/* PHY support */
- u32 port_type;
struct qsfp_data qsfp_info;
+ /* Values for SI tuning of SerDes */
+ u32 port_type;
+ u32 tx_preset_eq;
+ u32 tx_preset_noeq;
+ u32 rx_preset;
+ u8 local_atten;
+ u8 remote_atten;
+ u8 default_atten;
+ u8 max_power_class;
+
+ /* GUIDs for this interface, in host order, guids[0] is a port guid */
+ u64 guids[HFI1_GUIDS_PER_PORT];
- /* GUID for this interface, in host order */
- u64 guid;
/* GUID for peer interface, in host order */
u64 neighbor_guid;
@@ -826,32 +838,29 @@ struct hfi1_devdata {
u8 __iomem *kregend;
/* physical address of chip for io_remap, etc. */
resource_size_t physaddr;
- /* receive context data */
- struct hfi1_ctxtdata **rcd;
+ /* Per VL data. Enough for all VLs but not all elements are set/used. */
+ struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
/* send context data */
struct send_context_info *send_contexts;
/* map hardware send contexts to software index */
u8 *hw_to_sw;
/* spinlock for allocating and releasing send context resources */
spinlock_t sc_lock;
- /* Per VL data. Enough for all VLs but not all elements are set/used. */
- struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
/* lock for pio_map */
spinlock_t pio_map_lock;
+ /* Send Context initialization lock. */
+ spinlock_t sc_init_lock;
+ /* lock for sdma_map */
+ spinlock_t sde_map_lock;
/* array of kernel send contexts */
struct send_context **kernel_send_context;
/* array of vl maps */
struct pio_vl_map __rcu *pio_map;
- /* seqlock for sc2vl */
- seqlock_t sc2vl_lock;
- u64 sc2vl[4];
- /* Send Context initialization lock. */
- spinlock_t sc_init_lock;
+ /* default flags to last descriptor */
+ u64 default_desc1;
/* fields common to all SDMA engines */
- /* default flags to last descriptor */
- u64 default_desc1;
volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */
dma_addr_t sdma_heads_phys;
void *sdma_pad_dma; /* DMA'ed by chip */
@@ -862,8 +871,6 @@ struct hfi1_devdata {
u32 chip_sdma_engines;
/* num used */
u32 num_sdma;
- /* lock for sdma_map */
- spinlock_t sde_map_lock;
/* array of engines sized by num_sdma */
struct sdma_engine *per_sdma;
/* array of vl maps */
@@ -872,14 +879,11 @@ struct hfi1_devdata {
wait_queue_head_t sdma_unfreeze_wq;
atomic_t sdma_unfreeze_count;
+ u32 lcb_access_count; /* count of LCB users */
+
/* common data between shared ASIC HFIs in this OS */
struct hfi1_asic_data *asic_data;
- /* hfi1_pportdata, points to array of (physical) port-specific
- * data structs, indexed by pidx (0..n-1)
- */
- struct hfi1_pportdata *pport;
-
/* mem-mapped pointer to base of PIO buffers */
void __iomem *piobase;
/*
@@ -896,20 +900,13 @@ struct hfi1_devdata {
/* send context numbers and sizes for each type */
struct sc_config_sizes sc_sizes[SC_MAX];
- u32 lcb_access_count; /* count of LCB users */
-
char *boardname; /* human readable board info */
- /* device (not port) flags, basically device capabilities */
- u32 flags;
-
/* reset value */
u64 z_int_counter;
u64 z_rcv_limit;
u64 z_send_schedule;
- /* percpu int_counter */
- u64 __percpu *int_counter;
- u64 __percpu *rcv_limit;
+
u64 __percpu *send_schedule;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
@@ -924,6 +921,7 @@ struct hfi1_devdata {
/* base receive interrupt timeout, in CSR units */
u32 rcv_intr_timeout_csr;
+ u32 freezelen; /* max length of freezemsg */
u64 __iomem *egrtidbase;
spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
@@ -945,7 +943,6 @@ struct hfi1_devdata {
* IB link status cheaply
*/
struct hfi1_status *status;
- u32 freezelen; /* max length of freezemsg */
/* revision register shadow */
u64 revision;
@@ -973,6 +970,8 @@ struct hfi1_devdata {
u16 rcvegrbufsize_shift;
/* both sides of the PCIe link are gen3 capable */
u8 link_gen3_capable;
+ /* default link down value (poll/sleep) */
+ u8 link_default;
/* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width;
/* localbus speed in MHz */
@@ -1008,8 +1007,6 @@ struct hfi1_devdata {
u8 hfi1_id;
/* implementation code */
u8 icode;
- /* default link down value (poll/sleep) */
- u8 link_default;
/* vAU of this device */
u8 vau;
/* vCU of this device */
@@ -1020,27 +1017,17 @@ struct hfi1_devdata {
u16 vl15_init;
/* Misc small ints */
- /* Number of physical ports available */
- u8 num_pports;
- /* Lowest context number which can be used by user processes */
- u8 first_user_ctxt;
u8 n_krcv_queues;
u8 qos_shift;
- u8 qpn_mask;
- u16 rhf_offset; /* offset of RHF within receive header entry */
u16 irev; /* implementation revision */
u16 dc8051_ver; /* 8051 firmware version */
+ spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
struct platform_config platform_config;
struct platform_config_cache pcfg_cache;
struct diag_client *diag_client;
- spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
-
- u8 psxmitwait_supported;
- /* cycle length of PS* counters in HW (in picoseconds) */
- u16 psxmitwait_check_rate;
/* MSI-X information */
struct hfi1_msix_entry *msix_entries;
@@ -1055,6 +1042,9 @@ struct hfi1_devdata {
struct rcv_array_data rcv_entries;
+ /* cycle length of PS* counters in HW (in picoseconds) */
+ u16 psxmitwait_check_rate;
+
/*
* 64 bit synthetic counters
*/
@@ -1085,11 +1075,11 @@ struct hfi1_devdata {
struct err_info_rcvport err_info_rcvport;
struct err_info_constraint err_info_rcv_constraint;
struct err_info_constraint err_info_xmit_constraint;
- u8 err_info_uncorrectable;
- u8 err_info_fmconfig;
atomic_t drop_packet;
u8 do_drop;
+ u8 err_info_uncorrectable;
+ u8 err_info_fmconfig;
/*
* Software counters for the status bits defined by the
@@ -1112,40 +1102,60 @@ struct hfi1_devdata {
u64 sw_cce_err_status_aggregate;
/* Software counter that aggregates all bypass packet rcv errors */
u64 sw_rcv_bypass_packet_errors;
- /* receive interrupt functions */
- rhf_rcv_function_ptr *rhf_rcv_function_map;
+ /* receive interrupt function */
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
+ /* Save the enabled LCB error bits */
+ u64 lcb_err_en;
+
/*
* Capability to have different send engines simply by changing a
* pointer value.
*/
- send_routine process_pio_send;
+ send_routine process_pio_send ____cacheline_aligned_in_smp;
send_routine process_dma_send;
void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
+ /* hfi1_pportdata, points to array of (physical) port-specific
+ * data structs, indexed by pidx (0..n-1)
+ */
+ struct hfi1_pportdata *pport;
+ /* receive context data */
+ struct hfi1_ctxtdata **rcd;
+ u64 __percpu *int_counter;
+ /* device (not port) flags, basically device capabilities */
+ u16 flags;
+ /* Number of physical ports available */
+ u8 num_pports;
+ /* Lowest context number which can be used by user processes */
+ u8 first_user_ctxt;
+ /* adding a new field here would make it part of this cacheline */
+
+ /* seqlock for sc2vl */
+ seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
+ u64 sc2vl[4];
+ /* receive interrupt functions */
+ rhf_rcv_function_ptr *rhf_rcv_function_map;
+ u64 __percpu *rcv_limit;
+ u16 rhf_offset; /* offset of RHF within receive header entry */
+ /* adding a new field here would make it part of this cacheline */
/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
u8 oui1;
u8 oui2;
u8 oui3;
+ u8 dc_shutdown;
+
/* Timer and counter used to detect RcvBufOvflCnt changes */
struct timer_list rcverr_timer;
- u32 rcv_ovfl_cnt;
wait_queue_head_t event_queue;
- /* Save the enabled LCB error bits */
- u64 lcb_err_en;
- u8 dc_shutdown;
-
/* receive context tail dummy address */
__le64 *rcvhdrtail_dummy_kvaddr;
dma_addr_t rcvhdrtail_dummy_dma;
- bool eprom_available; /* true if EPROM is available for this device */
- bool aspm_supported; /* Does HW support ASPM */
- bool aspm_enabled; /* ASPM state: enabled/disabled */
+ u32 rcv_ovfl_cnt;
/* Serialize ASPM enable/disable between multiple verbs contexts */
spinlock_t aspm_lock;
/* Number of verbs contexts which have disabled ASPM */
@@ -1155,8 +1165,11 @@ struct hfi1_devdata {
/* Used to wait for outstanding user space clients before dev removal */
struct completion user_comp;
- struct hfi1_affinity *affinity;
+ bool eprom_available; /* true if EPROM is available for this device */
+ bool aspm_supported; /* Does HW support ASPM */
+ bool aspm_enabled; /* ASPM state: enabled/disabled */
struct rhashtable sdma_rht;
+
struct kobject kobj;
};
@@ -1604,6 +1617,17 @@ static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
}
/*
+ * Return the indexed GUID from the port GUIDs table.
+ */
+static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ WARN_ON(index >= HFI1_GUIDS_PER_PORT);
+ return cpu_to_be64(ppd->guids[index]);
+}
+
+/*
* Called by readers of cc_state only, must call under rcu_read_lock().
*/
static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
@@ -1982,6 +2006,12 @@ static inline u32 qsfp_resource(struct hfi1_devdata *dd)
return i2c_target(dd->hfi1_id);
}
+/* Is this device integrated or discrete? */
+static inline bool is_integrated(struct hfi1_devdata *dd)
+{
+ return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
+}
+
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 2ec6ef38d389..d9740ddea6f1 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -64,6 +64,7 @@ struct sdma_engine;
/**
* struct iowait - linkage for delayed progress/waiting
* @list: used to add/insert into QP/PQ wait lists
+ * @lock: uses to record the list head lock
* @tx_head: overflow list of sdma_txreq's
* @sleep: no space callback
* @wakeup: space callback wakeup
@@ -91,6 +92,11 @@ struct sdma_engine;
* so sleeping is not allowed.
*
* The wait_dma member along with the iow
+ *
+ * The lock field is used by waiters to record
+ * the seqlock_t that guards the list head.
+ * Waiters explicity know that, but the destroy
+ * code that unwaits QPs does not.
*/
struct iowait {
@@ -103,6 +109,7 @@ struct iowait {
unsigned seq);
void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait);
+ seqlock_t *lock;
struct work_struct iowork;
wait_queue_head_t wait_dma;
wait_queue_head_t wait_pio;
@@ -141,6 +148,7 @@ static inline void iowait_init(
void (*sdma_drained)(struct iowait *wait))
{
wait->count = 0;
+ wait->lock = NULL;
INIT_LIST_HEAD(&wait->list);
INIT_LIST_HEAD(&wait->tx_head);
INIT_WORK(&wait->iowork, func);
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 9487c9bb8920..6e595afca24c 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -128,7 +128,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
smp = send_buf->mad;
smp->base_version = OPA_MGMT_BASE_VERSION;
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- smp->class_version = OPA_SMI_CLASS_VERSION;
+ smp->class_version = OPA_SM_CLASS_VERSION;
smp->method = IB_MGMT_METHOD_TRAP;
ibp->rvp.tid++;
smp->tid = cpu_to_be64(ibp->rvp.tid);
@@ -336,20 +336,20 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
ni = (struct opa_node_info *)data;
/* GUID 0 is illegal */
- if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
+ if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
+ get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
- ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+ ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
ni->base_version = OPA_MGMT_BASE_VERSION;
- ni->class_version = OPA_SMI_CLASS_VERSION;
+ ni->class_version = OPA_SM_CLASS_VERSION;
ni->node_type = 1; /* channel adapter */
ni->num_ports = ibdev->phys_port_cnt;
/* This is already in network order */
ni->system_image_guid = ib_hfi1_sys_image_guid;
- /* Use first-port GUID as node */
- ni->node_guid = cpu_to_be64(dd->pport->guid);
+ ni->node_guid = ibdev->node_guid;
ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
ni->device_id = cpu_to_be16(dd->pcidev->device);
ni->revision = cpu_to_be32(dd->minrev);
@@ -373,19 +373,20 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* GUID 0 is illegal */
if (smp->attr_mod || pidx >= dd->num_pports ||
- dd->pport[pidx].guid == 0)
+ ibdev->node_guid == 0 ||
+ get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
smp->status |= IB_SMP_INVALID_FIELD;
- else
- nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
nip->base_version = OPA_MGMT_BASE_VERSION;
- nip->class_version = OPA_SMI_CLASS_VERSION;
+ nip->class_version = OPA_SM_CLASS_VERSION;
nip->node_type = 1; /* channel adapter */
nip->num_ports = ibdev->phys_port_cnt;
/* This is already in network order */
nip->sys_guid = ib_hfi1_sys_image_guid;
- /* Use first-port GUID as node */
- nip->node_guid = cpu_to_be64(dd->pport->guid);
+ nip->node_guid = ibdev->node_guid;
nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
nip->device_id = cpu_to_be16(dd->pcidev->device);
nip->revision = cpu_to_be32(dd->minrev);
@@ -2302,7 +2303,7 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
p->base_version = OPA_MGMT_BASE_VERSION;
- p->class_version = OPA_SMI_CLASS_VERSION;
+ p->class_version = OPA_SM_CLASS_VERSION;
/*
* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
*/
@@ -4022,7 +4023,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
am = be32_to_cpu(smp->attr_mod);
attr_id = smp->attr_id;
- if (smp->class_version != OPA_SMI_CLASS_VERSION) {
+ if (smp->class_version != OPA_SM_CLASS_VERSION) {
smp->status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_mad_hdr *)smp);
return ret;
@@ -4232,7 +4233,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
*out_mad = *in_mad;
- if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
+ if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
return reply((struct ib_mad_hdr *)pmp);
}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 7ad30898fc19..ccbf52c8ff6f 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -81,7 +81,7 @@ static void do_remove(struct mmu_rb_handler *handler,
struct list_head *del_list);
static void handle_remove(struct work_struct *work);
-static struct mmu_notifier_ops mn_opts = {
+static const struct mmu_notifier_ops mn_opts = {
.invalidate_page = mmu_notifier_page,
.invalidate_range_start = mmu_notifier_range_start,
};
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d89b8745d4c1..615be68e40b3 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -758,6 +758,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
sc->hw_context = hw_context;
cr_group_addresses(sc, &dma);
sc->credits = sci->credits;
+ sc->size = sc->credits * PIO_BLOCK_SIZE;
/* PIO Send Memory Address details */
#define PIO_ADDR_CONTEXT_MASK 0xfful
@@ -1242,6 +1243,7 @@ int sc_enable(struct send_context *sc)
sc->free = 0;
sc->alloc_free = 0;
sc->fill = 0;
+ sc->fill_wrap = 0;
sc->sr_head = 0;
sc->sr_tail = 0;
sc->flags = 0;
@@ -1385,7 +1387,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
unsigned long flags;
unsigned long avail;
unsigned long blocks = dwords_to_blocks(dw_len);
- unsigned long start_fill;
+ u32 fill_wrap;
int trycount = 0;
u32 head, next;
@@ -1410,9 +1412,7 @@ retry:
(sc->fill - sc->alloc_free);
if (blocks > avail) {
/* still no room, actively update */
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
sc_release_update(sc);
- spin_lock_irqsave(&sc->alloc_lock, flags);
sc->alloc_free = ACCESS_ONCE(sc->free);
trycount++;
goto retry;
@@ -1428,8 +1428,11 @@ retry:
head = sc->sr_head;
/* "allocate" the buffer */
- start_fill = sc->fill;
sc->fill += blocks;
+ fill_wrap = sc->fill_wrap;
+ sc->fill_wrap += blocks;
+ if (sc->fill_wrap >= sc->credits)
+ sc->fill_wrap = sc->fill_wrap - sc->credits;
/*
* Fill the parts that the releaser looks at before moving the head.
@@ -1458,11 +1461,8 @@ retry:
spin_unlock_irqrestore(&sc->alloc_lock, flags);
/* finish filling in the buffer outside the lock */
- pbuf->start = sc->base_addr + ((start_fill % sc->credits)
- * PIO_BLOCK_SIZE);
- pbuf->size = sc->credits * PIO_BLOCK_SIZE;
- pbuf->end = sc->base_addr + pbuf->size;
- pbuf->block_count = blocks;
+ pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
+ pbuf->end = sc->base_addr + sc->size;
pbuf->qw_written = 0;
pbuf->carry_bytes = 0;
pbuf->carry.val64 = 0;
@@ -1573,6 +1573,7 @@ static void sc_piobufavail(struct send_context *sc)
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
/* refcount held until actual wake up */
qps[n++] = qp;
}
@@ -2028,29 +2029,17 @@ freesc15:
int init_credit_return(struct hfi1_devdata *dd)
{
int ret;
- int num_numa;
int i;
- num_numa = num_online_nodes();
- /* enforce the expectation that the numas are compact */
- for (i = 0; i < num_numa; i++) {
- if (!node_online(i)) {
- dd_dev_err(dd, "NUMA nodes are not compact\n");
- ret = -EINVAL;
- goto done;
- }
- }
-
dd->cr_base = kcalloc(
- num_numa,
+ node_affinity.num_possible_nodes,
sizeof(struct credit_return_base),
GFP_KERNEL);
if (!dd->cr_base) {
- dd_dev_err(dd, "Unable to allocate credit return base\n");
ret = -ENOMEM;
goto done;
}
- for (i = 0; i < num_numa; i++) {
+ for_each_node_with_cpus(i) {
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
set_dev_node(&dd->pcidev->dev, i);
@@ -2077,14 +2066,11 @@ done:
void free_credit_return(struct hfi1_devdata *dd)
{
- int num_numa;
int i;
if (!dd->cr_base)
return;
-
- num_numa = num_online_nodes();
- for (i = 0; i < num_numa; i++) {
+ for (i = 0; i < node_affinity.num_possible_nodes; i++) {
if (dd->cr_base[i].va) {
dma_free_coherent(&dd->pcidev->dev,
TXE_NUM_CONTEXTS *
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index e709eaf743b5..867e5ffc3595 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -83,53 +83,55 @@ struct pio_buf {
void *arg; /* argument for cb */
void __iomem *start; /* buffer start address */
void __iomem *end; /* context end address */
- unsigned long size; /* context size, in bytes */
unsigned long sent_at; /* buffer is sent when <= free */
- u32 block_count; /* size of buffer, in blocks */
- u32 qw_written; /* QW written so far */
- u32 carry_bytes; /* number of valid bytes in carry */
union mix carry; /* pending unwritten bytes */
+ u16 qw_written; /* QW written so far */
+ u8 carry_bytes; /* number of valid bytes in carry */
};
/* cache line aligned pio buffer array */
union pio_shadow_ring {
struct pio_buf pbuf;
- u64 unused[16]; /* cache line spacer */
} ____cacheline_aligned;
/* per-NUMA send context */
struct send_context {
/* read-only after init */
struct hfi1_devdata *dd; /* device */
- void __iomem *base_addr; /* start of PIO memory */
union pio_shadow_ring *sr; /* shadow ring */
+ void __iomem *base_addr; /* start of PIO memory */
+ u32 __percpu *buffers_allocated;/* count of buffers allocated */
+ u32 size; /* context size, in bytes */
- volatile __le64 *hw_free; /* HW free counter */
- struct work_struct halt_work; /* halted context work queue entry */
- unsigned long flags; /* flags */
int node; /* context home node */
- int type; /* context type */
- u32 sw_index; /* software index number */
- u32 hw_context; /* hardware context number */
- u32 credits; /* number of blocks in context */
u32 sr_size; /* size of the shadow ring */
- u32 group; /* credit return group */
+ u16 flags; /* flags */
+ u8 type; /* context type */
+ u8 sw_index; /* software index number */
+ u8 hw_context; /* hardware context number */
+ u8 group; /* credit return group */
+
/* allocator fields */
spinlock_t alloc_lock ____cacheline_aligned_in_smp;
+ u32 sr_head; /* shadow ring head */
unsigned long fill; /* official alloc count */
unsigned long alloc_free; /* copy of free (less cache thrash) */
- u32 sr_head; /* shadow ring head */
+ u32 fill_wrap; /* tracks fill within ring */
+ u32 credits; /* number of blocks in context */
+ /* adding a new field here would make it part of this cacheline */
+
/* releaser fields */
spinlock_t release_lock ____cacheline_aligned_in_smp;
- unsigned long free; /* official free count */
u32 sr_tail; /* shadow ring tail */
+ unsigned long free; /* official free count */
+ volatile __le64 *hw_free; /* HW free counter */
/* list for PIO waiters */
struct list_head piowait ____cacheline_aligned_in_smp;
spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
- u64 credit_ctrl; /* cache for credit control */
u32 credit_intr_count; /* count of credit intr users */
- u32 __percpu *buffers_allocated;/* count of buffers allocated */
+ u64 credit_ctrl; /* cache for credit control */
wait_queue_head_t halt_wait; /* wait until kernel sees interrupt */
+ struct work_struct halt_work; /* halted context work queue entry */
};
/* send context flags */
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index aa7773643107..03024cec78dd 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -129,8 +129,8 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -361,8 +361,8 @@ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -458,8 +458,8 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -492,7 +492,7 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
*/
/* adjust if we have wrapped */
if (dest >= pbuf->end)
- dest -= pbuf->size;
+ dest -= pbuf->sc->size;
/* jump to the SOP range if within the first block */
else if (pbuf->qw_written < PIO_BLOCK_QWS)
dest += SOP_DISTANCE;
@@ -584,8 +584,8 @@ static void mid_copy_straight(struct pio_buf *pbuf,
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -666,7 +666,7 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
*/
/* adjust if we've wrapped */
if (dest >= pbuf->end)
- dest -= pbuf->size;
+ dest -= pbuf->sc->size;
/* jump to SOP range if within the first block */
else if (pbuf->qw_written < PIO_BLOCK_QWS)
dest += SOP_DISTANCE;
@@ -719,7 +719,7 @@ void seg_pio_copy_end(struct pio_buf *pbuf)
*/
/* adjust if we have wrapped */
if (dest >= pbuf->end)
- dest -= pbuf->size;
+ dest -= pbuf->sc->size;
/* jump to the SOP range if within the first block */
else if (pbuf->qw_written < PIO_BLOCK_QWS)
dest += SOP_DISTANCE;
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 202433178864..838fe84e285a 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -49,6 +49,90 @@
#include "efivar.h"
#include "eprom.h"
+static int validate_scratch_checksum(struct hfi1_devdata *dd)
+{
+ u64 checksum = 0, temp_scratch = 0;
+ int i, j, version;
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+ version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
+
+ /* Prevent power on default of all zeroes from passing checksum */
+ if (!version)
+ return 0;
+
+ /*
+ * ASIC scratch 0 only contains the checksum and bitmap version as
+ * fields of interest, both of which are handled separately from the
+ * loop below, so skip it
+ */
+ checksum += version;
+ for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
+ for (j = sizeof(u64); j != 0; j -= 2) {
+ checksum += (temp_scratch & 0xFFFF);
+ temp_scratch >>= 16;
+ }
+ }
+
+ while (checksum >> 16)
+ checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+ temp_scratch &= CHECKSUM_SMASK;
+ temp_scratch >>= CHECKSUM_SHIFT;
+
+ if (checksum + temp_scratch == 0xFFFF)
+ return 1;
+ return 0;
+}
+
+static void save_platform_config_fields(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 temp_scratch = 0, temp_dest = 0;
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
+ PORT0_PORT_TYPE_SMASK);
+ ppd->port_type = temp_dest >>
+ (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
+ PORT0_PORT_TYPE_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
+ PORT0_LOCAL_ATTEN_SMASK);
+ ppd->local_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
+ PORT0_LOCAL_ATTEN_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
+ PORT0_REMOTE_ATTEN_SMASK);
+ ppd->remote_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
+ PORT0_REMOTE_ATTEN_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
+ PORT0_DEFAULT_ATTEN_SMASK);
+ ppd->default_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
+ PORT0_DEFAULT_ATTEN_SHIFT);
+
+ temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
+ ASIC_CFG_SCRATCH_2);
+
+ ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
+ ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
+ ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
+
+ ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
+ QSFP_MAX_POWER_SHIFT;
+}
+
void get_platform_config(struct hfi1_devdata *dd)
{
int ret = 0;
@@ -56,38 +140,49 @@ void get_platform_config(struct hfi1_devdata *dd)
u8 *temp_platform_config = NULL;
u32 esize;
- ret = eprom_read_platform_config(dd, (void **)&temp_platform_config,
- &esize);
- if (!ret) {
- /* success */
- size = esize;
- goto success;
+ if (is_integrated(dd)) {
+ if (validate_scratch_checksum(dd)) {
+ save_platform_config_fields(dd);
+ return;
+ }
+ dd_dev_err(dd, "%s: Config bitmap corrupted/uninitialized\n",
+ __func__);
+ dd_dev_err(dd,
+ "%s: Please update your BIOS to support active channels\n",
+ __func__);
+ } else {
+ ret = eprom_read_platform_config(dd,
+ (void **)&temp_platform_config,
+ &esize);
+ if (!ret) {
+ /* success */
+ dd->platform_config.data = temp_platform_config;
+ dd->platform_config.size = esize;
+ return;
+ }
+ /* fail, try EFI variable */
+
+ ret = read_hfi1_efi_var(dd, "configuration", &size,
+ (void **)&temp_platform_config);
+ if (!ret) {
+ dd->platform_config.data = temp_platform_config;
+ dd->platform_config.size = size;
+ return;
+ }
}
- /* fail, try EFI variable */
-
- ret = read_hfi1_efi_var(dd, "configuration", &size,
- (void **)&temp_platform_config);
- if (!ret)
- goto success;
-
- dd_dev_info(dd,
- "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
- __func__);
+ dd_dev_err(dd,
+ "%s: Failed to get platform config, falling back to sub-optimal default file\n",
+ __func__);
/* fall back to request firmware */
platform_config_load = 1;
- return;
-
-success:
- dd->platform_config.data = temp_platform_config;
- dd->platform_config.size = size;
}
void free_platform_config(struct hfi1_devdata *dd)
{
if (!platform_config_load) {
/*
- * was loaded from EFI, release memory
- * allocated by read_efi_var
+ * was loaded from EFI or the EPROM, release memory
+ * allocated by read_efi_var/eprom_read_platform_config
*/
kfree(dd->platform_config.data);
}
@@ -100,12 +195,16 @@ void free_platform_config(struct hfi1_devdata *dd)
void get_port_type(struct hfi1_pportdata *ppd)
{
int ret;
+ u32 temp;
ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_PORT_TYPE, &ppd->port_type,
+ PORT_TABLE_PORT_TYPE, &temp,
4);
- if (ret)
+ if (ret) {
ppd->port_type = PORT_TYPE_UNKNOWN;
+ return;
+ }
+ ppd->port_type = temp;
}
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
@@ -538,6 +637,38 @@ static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
}
}
+/*
+ * Return a special SerDes setting for low power AOC cables. The power class
+ * threshold and setting being used were all found by empirical testing.
+ *
+ * Summary of the logic:
+ *
+ * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
+ * return 0xe
+ * return 0; // leave at default
+ */
+static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ int power_class;
+
+ /* QSFP only */
+ if (ppd->port_type != PORT_TYPE_QSFP)
+ return 0; /* leave at default */
+
+ /* active optical cables only */
+ switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
+ case 0x0 ... 0x9: /* fallthrough */
+ case 0xC: /* fallthrough */
+ case 0xE:
+ /* active AOC */
+ power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+ if (power_class < QSFP_POWER_CLASS_4)
+ return 0xe;
+ }
+ return 0; /* leave at default */
+}
+
static void apply_tunings(
struct hfi1_pportdata *ppd, u32 tx_preset_index,
u8 tuning_method, u32 total_atten, u8 limiting_active)
@@ -606,7 +737,17 @@ static void apply_tunings(
tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
postcur = tx_preset;
- config_data = precur | (attn << 8) | (postcur << 16);
+ /*
+ * NOTES:
+ * o The aoc_low_power_setting is applied to all lanes even
+ * though only lane 0's value is examined by the firmware.
+ * o A lingering low power setting after a cable swap does
+ * not occur. On cable unplug the 8051 is reset and
+ * restarted on cable insert. This resets all settings to
+ * their default, erasing any previous low power setting.
+ */
+ config_data = precur | (attn << 8) | (postcur << 16) |
+ (aoc_low_power_setting(ppd) << 24);
apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
"Applying TX settings");
diff --git a/drivers/infiniband/hw/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
index e2c21613c326..eed0aa9124fa 100644
--- a/drivers/infiniband/hw/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -168,16 +168,6 @@ struct platform_config_cache {
struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX];
};
-static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
- 0,
- SYSTEM_TABLE_MAX,
- PORT_TABLE_MAX,
- RX_PRESET_TABLE_MAX,
- TX_PRESET_TABLE_MAX,
- QSFP_ATTEN_TABLE_MAX,
- VARIABLE_SETTINGS_TABLE_MAX
-};
-
/* This section defines default values and encodings for the
* fields defined for each table above
*/
@@ -295,6 +285,123 @@ enum link_tuning_encoding {
OPA_UNKNOWN_TUNING
};
+/*
+ * Shifts and masks for the link SI tuning values stuffed into the ASIC scratch
+ * registers for integrated platforms
+ */
+#define PORT0_PORT_TYPE_SHIFT 0
+#define PORT0_LOCAL_ATTEN_SHIFT 4
+#define PORT0_REMOTE_ATTEN_SHIFT 10
+#define PORT0_DEFAULT_ATTEN_SHIFT 32
+
+#define PORT1_PORT_TYPE_SHIFT 16
+#define PORT1_LOCAL_ATTEN_SHIFT 20
+#define PORT1_REMOTE_ATTEN_SHIFT 26
+#define PORT1_DEFAULT_ATTEN_SHIFT 40
+
+#define PORT0_PORT_TYPE_MASK 0xFUL
+#define PORT0_LOCAL_ATTEN_MASK 0x3FUL
+#define PORT0_REMOTE_ATTEN_MASK 0x3FUL
+#define PORT0_DEFAULT_ATTEN_MASK 0xFFUL
+
+#define PORT1_PORT_TYPE_MASK 0xFUL
+#define PORT1_LOCAL_ATTEN_MASK 0x3FUL
+#define PORT1_REMOTE_ATTEN_MASK 0x3FUL
+#define PORT1_DEFAULT_ATTEN_MASK 0xFFUL
+
+#define PORT0_PORT_TYPE_SMASK (PORT0_PORT_TYPE_MASK << \
+ PORT0_PORT_TYPE_SHIFT)
+#define PORT0_LOCAL_ATTEN_SMASK (PORT0_LOCAL_ATTEN_MASK << \
+ PORT0_LOCAL_ATTEN_SHIFT)
+#define PORT0_REMOTE_ATTEN_SMASK (PORT0_REMOTE_ATTEN_MASK << \
+ PORT0_REMOTE_ATTEN_SHIFT)
+#define PORT0_DEFAULT_ATTEN_SMASK (PORT0_DEFAULT_ATTEN_MASK << \
+ PORT0_DEFAULT_ATTEN_SHIFT)
+
+#define PORT1_PORT_TYPE_SMASK (PORT1_PORT_TYPE_MASK << \
+ PORT1_PORT_TYPE_SHIFT)
+#define PORT1_LOCAL_ATTEN_SMASK (PORT1_LOCAL_ATTEN_MASK << \
+ PORT1_LOCAL_ATTEN_SHIFT)
+#define PORT1_REMOTE_ATTEN_SMASK (PORT1_REMOTE_ATTEN_MASK << \
+ PORT1_REMOTE_ATTEN_SHIFT)
+#define PORT1_DEFAULT_ATTEN_SMASK (PORT1_DEFAULT_ATTEN_MASK << \
+ PORT1_DEFAULT_ATTEN_SHIFT)
+
+#define QSFP_MAX_POWER_SHIFT 0
+#define TX_NO_EQ_SHIFT 4
+#define TX_EQ_SHIFT 25
+#define RX_SHIFT 46
+
+#define QSFP_MAX_POWER_MASK 0xFUL
+#define TX_NO_EQ_MASK 0x1FFFFFUL
+#define TX_EQ_MASK 0x1FFFFFUL
+#define RX_MASK 0xFFFFUL
+
+#define QSFP_MAX_POWER_SMASK (QSFP_MAX_POWER_MASK << \
+ QSFP_MAX_POWER_SHIFT)
+#define TX_NO_EQ_SMASK (TX_NO_EQ_MASK << TX_NO_EQ_SHIFT)
+#define TX_EQ_SMASK (TX_EQ_MASK << TX_EQ_SHIFT)
+#define RX_SMASK (RX_MASK << RX_SHIFT)
+
+#define TX_PRECUR_SHIFT 0
+#define TX_ATTN_SHIFT 4
+#define QSFP_TX_CDR_APPLY_SHIFT 9
+#define QSFP_TX_EQ_APPLY_SHIFT 10
+#define QSFP_TX_CDR_SHIFT 11
+#define QSFP_TX_EQ_SHIFT 12
+#define TX_POSTCUR_SHIFT 16
+
+#define TX_PRECUR_MASK 0xFUL
+#define TX_ATTN_MASK 0x1FUL
+#define QSFP_TX_CDR_APPLY_MASK 0x1UL
+#define QSFP_TX_EQ_APPLY_MASK 0x1UL
+#define QSFP_TX_CDR_MASK 0x1UL
+#define QSFP_TX_EQ_MASK 0xFUL
+#define TX_POSTCUR_MASK 0x1FUL
+
+#define TX_PRECUR_SMASK (TX_PRECUR_MASK << TX_PRECUR_SHIFT)
+#define TX_ATTN_SMASK (TX_ATTN_MASK << TX_ATTN_SHIFT)
+#define QSFP_TX_CDR_APPLY_SMASK (QSFP_TX_CDR_APPLY_MASK << \
+ QSFP_TX_CDR_APPLY_SHIFT)
+#define QSFP_TX_EQ_APPLY_SMASK (QSFP_TX_EQ_APPLY_MASK << \
+ QSFP_TX_EQ_APPLY_SHIFT)
+#define QSFP_TX_CDR_SMASK (QSFP_TX_CDR_MASK << QSFP_TX_CDR_SHIFT)
+#define QSFP_TX_EQ_SMASK (QSFP_TX_EQ_MASK << QSFP_TX_EQ_SHIFT)
+#define TX_POSTCUR_SMASK (TX_POSTCUR_MASK << TX_POSTCUR_SHIFT)
+
+#define QSFP_RX_CDR_APPLY_SHIFT 0
+#define QSFP_RX_EMP_APPLY_SHIFT 1
+#define QSFP_RX_AMP_APPLY_SHIFT 2
+#define QSFP_RX_CDR_SHIFT 3
+#define QSFP_RX_EMP_SHIFT 4
+#define QSFP_RX_AMP_SHIFT 8
+
+#define QSFP_RX_CDR_APPLY_MASK 0x1UL
+#define QSFP_RX_EMP_APPLY_MASK 0x1UL
+#define QSFP_RX_AMP_APPLY_MASK 0x1UL
+#define QSFP_RX_CDR_MASK 0x1UL
+#define QSFP_RX_EMP_MASK 0xFUL
+#define QSFP_RX_AMP_MASK 0x3UL
+
+#define QSFP_RX_CDR_APPLY_SMASK (QSFP_RX_CDR_APPLY_MASK << \
+ QSFP_RX_CDR_APPLY_SHIFT)
+#define QSFP_RX_EMP_APPLY_SMASK (QSFP_RX_EMP_APPLY_MASK << \
+ QSFP_RX_EMP_APPLY_SHIFT)
+#define QSFP_RX_AMP_APPLY_SMASK (QSFP_RX_AMP_APPLY_MASK << \
+ QSFP_RX_AMP_APPLY_SHIFT)
+#define QSFP_RX_CDR_SMASK (QSFP_RX_CDR_MASK << QSFP_RX_CDR_SHIFT)
+#define QSFP_RX_EMP_SMASK (QSFP_RX_EMP_MASK << QSFP_RX_EMP_SHIFT)
+#define QSFP_RX_AMP_SMASK (QSFP_RX_AMP_MASK << QSFP_RX_AMP_SHIFT)
+
+#define BITMAP_VERSION 1
+#define BITMAP_VERSION_SHIFT 44
+#define BITMAP_VERSION_MASK 0xFUL
+#define BITMAP_VERSION_SMASK (BITMAP_VERSION_MASK << \
+ BITMAP_VERSION_SHIFT)
+#define CHECKSUM_SHIFT 48
+#define CHECKSUM_MASK 0xFFFFUL
+#define CHECKSUM_SMASK (CHECKSUM_MASK << CHECKSUM_SHIFT)
+
/* platform.c */
void get_platform_config(struct hfi1_devdata *dd);
void free_platform_config(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9fc75e7e8781..d752d6768a49 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -196,15 +196,18 @@ static void flush_tx_list(struct rvt_qp *qp)
static void flush_iowait(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
unsigned long flags;
+ seqlock_t *lock = priv->s_iowait.lock;
- write_seqlock_irqsave(&dev->iowait_lock, flags);
+ if (!lock)
+ return;
+ write_seqlock_irqsave(lock, flags);
if (!list_empty(&priv->s_iowait.list)) {
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
}
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+ write_sequnlock_irqrestore(lock, flags);
}
static inline int opa_mtu_enum_to_int(int mtu)
@@ -543,6 +546,7 @@ static int iowait_sleep(
ibp->rvp.n_dmawait++;
qp->s_flags |= RVT_S_WAIT_DMA_DESC;
list_add_tail(&priv->s_iowait.list, &sde->dmawait);
+ priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
rvt_get_qp(qp);
}
@@ -964,6 +968,7 @@ void notify_error_qp(struct rvt_qp *qp)
if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 83198a8a8797..809b26eb6d3c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -276,7 +276,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
rvt_get_mr(ps->s_txreq->mr);
qp->s_ack_rdma_sge.sge = e->rdma_sge;
qp->s_ack_rdma_sge.num_sge = 1;
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
if (len > pmtu) {
len = pmtu;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
@@ -290,7 +290,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
bth2 = mask_psn(qp->s_ack_rdma_psn++);
} else {
/* COMPARE_SWAP or FETCH_ADD */
- qp->s_cur_sge = NULL;
+ ps->s_txreq->ss = NULL;
len = 0;
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
ohdr->u.at.aeth = hfi1_compute_aeth(qp);
@@ -306,7 +306,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
/* FALLTHROUGH */
case OP(RDMA_READ_RESPONSE_MIDDLE):
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
if (ps->s_txreq->mr)
rvt_get_mr(ps->s_txreq->mr);
@@ -335,7 +335,7 @@ normal:
*/
qp->s_ack_state = OP(SEND_ONLY);
qp->s_flags &= ~RVT_S_ACK_PENDING;
- qp->s_cur_sge = NULL;
+ ps->s_txreq->ss = NULL;
if (qp->s_nak_state)
ohdr->u.aeth =
cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
@@ -351,7 +351,7 @@ normal:
qp->s_rdma_ack_cnt++;
qp->s_hdrwords = hwords;
ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_size = len;
+ ps->s_txreq->s_cur_size = len;
hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
/* pbc */
ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
@@ -801,8 +801,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_len -= len;
qp->s_hdrwords = hwords;
ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_sge = ss;
- qp->s_cur_size = len;
+ ps->s_txreq->ss = ss;
+ ps->s_txreq->s_cur_size = len;
hfi1_make_ruc_header(
qp,
ohdr,
@@ -1146,8 +1146,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{
struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
- struct ib_wc wc;
- unsigned i;
u32 opcode;
u32 psn;
@@ -1195,22 +1193,8 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
qp->s_last = s_last;
/* see post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_put_swqe(wqe);
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
}
/*
* If we were waiting for sends to complete before re-sending,
@@ -1240,9 +1224,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
struct rvt_swqe *wqe,
struct hfi1_ibport *ibp)
{
- struct ib_wc wc;
- unsigned i;
-
lockdep_assert_held(&qp->s_lock);
/*
* Don't decrement refcount and don't generate a
@@ -1253,28 +1234,14 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
u32 s_last;
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
s_last = qp->s_last;
if (++s_last >= qp->s_size)
s_last = 0;
qp->s_last = s_last;
/* see post_send() */
barrier();
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
} else {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -2295,7 +2262,7 @@ send_last:
hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
break;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -2410,8 +2377,7 @@ send_last:
* Update the next expected PSN. We add 1 later
* below, so only add the remainder here.
*/
- if (len > pmtu)
- qp->r_psn += (len - 1) / pmtu;
+ qp->r_psn += rvt_div_mtu(qp, len - 1);
} else {
e->rdma_sge.mr = NULL;
e->rdma_sge.vaddr = NULL;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a1576aea4756..717ed4b159d3 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -239,16 +239,6 @@ bail:
return ret;
}
-static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index)
-{
- if (!index) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- return cpu_to_be64(ppd->guid);
- }
- return ibp->guids[index - 1];
-}
-
static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
{
return (gid->global.interface_id == id &&
@@ -699,9 +689,9 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
/* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
hdr->sgid.global.interface_id =
- grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ?
- ibp->guids[grh->sgid_index - 1] :
- cpu_to_be64(ppd_from_ibp(ibp)->guid);
+ grh->sgid_index < HFI1_GUIDS_PER_PORT ?
+ get_sguid(ibp, grh->sgid_index) :
+ get_sguid(ibp, HFI1_PORT_GUID_INDEX);
hdr->dgid = grh->dgid;
/* GRH header size in 32-bit words. */
@@ -777,8 +767,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth1;
/* Construct the header. */
- extra_bytes = -qp->s_cur_size & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
+ extra_bytes = -ps->s_txreq->s_cur_size & 3;
+ nwords = (ps->s_txreq->s_cur_size + extra_bytes) >> 2;
lrh0 = HFI1_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
qp->s_hdrwords += hfi1_make_grh(ibp,
@@ -952,7 +942,6 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
- unsigned i;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
@@ -964,32 +953,13 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->s_last = last;
/* See post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
- /* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.qp = &qp->ibqp;
- if (status == IB_WC_SUCCESS)
- wc.byte_len = wqe->length;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
+ rvt_qp_swqe_complete(qp, wqe, status);
if (qp->s_acked == old_last)
qp->s_acked = last;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 9cbe52d21077..1d81cac1fa6c 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -375,7 +375,7 @@ static inline void complete_tx(struct sdma_engine *sde,
sde->head_sn, tx->sn);
sde->head_sn++;
#endif
- sdma_txclean(sde->dd, tx);
+ __sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
if (wait && iowait_sdma_dec(wait))
@@ -1643,7 +1643,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
}
/**
- * sdma_txclean() - clean tx of mappings, descp *kmalloc's
+ * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
* @dd: hfi1_devdata for unmapping
* @tx: tx request to clean
*
@@ -1653,7 +1653,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
* The code can be called multiple times without issue.
*
*/
-void sdma_txclean(
+void __sdma_txclean(
struct hfi1_devdata *dd,
struct sdma_txreq *tx)
{
@@ -3065,7 +3065,7 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
tx->descp[i] = tx->descs[i];
return 0;
enomem:
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOMEM;
}
@@ -3094,14 +3094,14 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return rval;
}
/* If coalesce buffer is allocated, copy data into it */
if (tx->coalesce_buf) {
if (type == SDMA_MAP_NONE) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -EINVAL;
}
@@ -3109,7 +3109,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
kvaddr = kmap(page);
kvaddr += offset;
} else if (WARN_ON(!kvaddr)) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -EINVAL;
}
@@ -3139,7 +3139,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOSPC;
}
@@ -3181,7 +3181,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return rval;
}
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 56257ea3598f..21f1e2834f37 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -667,7 +667,13 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
int type, void *kvaddr, struct page *page,
unsigned long offset, u16 len);
int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
-void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+
+static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+ if (tx->num_desc)
+ __sdma_txclean(dd, tx);
+}
/* helpers used by public routines */
static inline void _sdma_close_tx(struct hfi1_devdata *dd,
@@ -753,7 +759,7 @@ static inline int sdma_txadd_page(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOSPC;
}
@@ -834,7 +840,7 @@ static inline int sdma_txadd_kvaddr(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOSPC;
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 5e6d1bac4914..b141a78ae38b 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -258,8 +258,8 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_len -= len;
qp->s_hdrwords = hwords;
ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_cur_size = len;
+ ps->s_txreq->ss = &qp->s_sge;
+ ps->s_txreq->s_cur_size = len;
hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
mask_psn(qp->s_psn++), middle, ps);
/* pbc */
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 97ae24b6314c..c071955c0272 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -354,8 +354,8 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
qp->s_hdrwords = 7;
- qp->s_cur_size = wqe->length;
- qp->s_cur_sge = &qp->s_sge;
+ ps->s_txreq->s_cur_size = wqe->length;
+ ps->s_txreq->ss = &qp->s_sge;
qp->s_srate = ah_attr->static_rate;
qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
qp->s_wqe = wqe;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 77697d690f3e..7d22f8ee98ef 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -115,6 +115,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define KDETH_HCRC_LOWER_MASK 0xff
#define AHG_KDETH_INTR_SHIFT 12
+#define AHG_KDETH_SH_SHIFT 13
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
@@ -144,8 +145,9 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define KDETH_OM_LARGE 64
#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
-/* Last packet in the request */
-#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
+/* Tx request flag bits */
+#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
+#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
/* SDMA request flag bits */
#define SDMA_REQ_FOR_THREAD 1
@@ -943,8 +945,13 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
tx->busycount = 0;
INIT_LIST_HEAD(&tx->list);
+ /*
+ * For the last packet set the ACK request
+ * and disable header suppression.
+ */
if (req->seqnum == req->info.npkts - 1)
- tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
+ tx->flags |= (TXREQ_FLAGS_REQ_ACK |
+ TXREQ_FLAGS_REQ_DISABLE_SH);
/*
* Calculate the payload size - this is min of the fragment
@@ -963,11 +970,22 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
}
datalen = compute_data_length(req, tx);
+
+ /*
+ * Disable header suppression for the payload <= 8DWS.
+ * If there is an uncorrectable error in the receive
+ * data FIFO when the received payload size is less than
+ * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
+ * not reported.There is set RHF.EccErr if the header
+ * is not suppressed.
+ */
if (!datalen) {
SDMA_DBG(req,
"Request has data but pkt len is 0");
ret = -EFAULT;
goto free_tx;
+ } else if (datalen <= 32) {
+ tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
}
}
@@ -990,6 +1008,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
LRH2PBC(lrhlen);
tx->hdr.pbc[0] = cpu_to_le16(pbclen);
}
+ ret = check_header_template(req, &tx->hdr,
+ lrhlen, datalen);
+ if (ret)
+ goto free_tx;
ret = sdma_txinit_ahg(&tx->txreq,
SDMA_TXREQ_F_AHG_COPY,
sizeof(tx->hdr) + datalen,
@@ -1351,7 +1373,7 @@ static int set_txreq_header(struct user_sdma_request *req,
req->seqnum));
/* Set ACK request on last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
hdr->bth[2] |= cpu_to_be32(1UL << 31);
/* Set the new offset */
@@ -1384,8 +1406,8 @@ static int set_txreq_header(struct user_sdma_request *req,
/* Set KDETH.TID based on value for this TID */
KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
EXP_TID_GET(tidval, IDX));
- /* Clear KDETH.SH only on the last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+ /* Clear KDETH.SH when DISABLE_SH flag is set */
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
/*
* Set the KDETH.OFFSET and KDETH.OM based on size of
@@ -1429,7 +1451,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
/* BTH.PSN and BTH.A */
val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
val32 |= 1UL << 31;
AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
@@ -1468,19 +1490,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
((req->tidoffset / req->omfactor) & 0x7fff)));
- /* KDETH.TIDCtrl, KDETH.TID */
+ /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
- (EXP_TID_GET(tidval, IDX) & 0x3ff));
- /* Clear KDETH.SH on last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
- val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
- INTR) <<
- AHG_KDETH_INTR_SHIFT);
- val &= cpu_to_le16(~(1U << 13));
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
+ (EXP_TID_GET(tidval, IDX) & 0x3ff));
+
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
+ val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT));
} else {
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
+ val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
+ cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
+ cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT));
}
+
+ AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
}
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 4b7a16ceb362..95ed4d6da510 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -297,22 +297,6 @@ static inline int wss_exceeds_threshold(void)
}
/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
- [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
- [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
- [IB_WR_REG_MR] = IB_WC_REG_MR
-};
-
-/*
* Length of header by opcode, 0 --> not supported
*/
const u8 hdr_len_by_opcode[256] = {
@@ -694,6 +678,7 @@ static void mem_timer(unsigned long data)
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
/* refcount held until actual wake up */
if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
@@ -769,6 +754,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
mod_timer(&dev->mem_timer, jiffies + 1);
qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->s_iowait.list, &dev->memwait);
+ priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
rvt_get_qp(qp);
}
@@ -788,10 +774,10 @@ static int wait_kmem(struct hfi1_ibdev *dev,
*/
static noinline int build_verbs_ulp_payload(
struct sdma_engine *sde,
- struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx)
{
+ struct rvt_sge_state *ss = tx->ss;
struct rvt_sge *sg_list = ss->sg_list;
struct rvt_sge sge = ss->sge;
u8 num_sge = ss->num_sge;
@@ -835,7 +821,6 @@ bail_txadd:
/* New API */
static int build_verbs_tx_desc(
struct sdma_engine *sde,
- struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx,
struct hfi1_ahg_info *ahg_info,
@@ -879,9 +864,9 @@ static int build_verbs_tx_desc(
goto bail_txadd;
}
- /* add the ulp payload - if any. ss can be NULL for acks */
- if (ss)
- ret = build_verbs_ulp_payload(sde, ss, length, tx);
+ /* add the ulp payload - if any. tx->ss can be NULL for acks */
+ if (tx->ss)
+ ret = build_verbs_ulp_payload(sde, length, tx);
bail_txadd:
return ret;
}
@@ -892,8 +877,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ahg_info *ahg_info = priv->s_ahg;
u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
+ u32 len = ps->s_txreq->s_cur_size;
u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
struct hfi1_ibdev *dev = ps->dev;
struct hfi1_pportdata *ppd = ps->ppd;
@@ -918,7 +902,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
plen);
}
tx->wqe = qp->s_wqe;
- ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahg_info, pbc);
+ ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
if (unlikely(ret))
goto bail_build;
}
@@ -980,6 +964,7 @@ static int pio_wait(struct rvt_qp *qp,
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait);
+ priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
rvt_get_qp(qp);
/* counting: only call wantpiobuf_intr if first user */
@@ -1008,8 +993,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
{
struct hfi1_qp_priv *priv = qp->priv;
u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
+ struct rvt_sge_state *ss = ps->s_txreq->ss;
+ u32 len = ps->s_txreq->s_cur_size;
u32 dwords = (len + 3) >> 2;
u32 plen = hdrwords + dwords + 2; /* includes pbc */
struct hfi1_pportdata *ppd = ps->ppd;
@@ -1237,7 +1222,7 @@ static inline send_routine get_send_routine(struct rvt_qp *qp,
u8 op = get_opcode(h);
if (piothreshold &&
- qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
+ tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
(BIT(op & OPMASK) & pio_opmask[op >> 5]) &&
iowait_sdma_pending(&priv->s_iowait) == 0 &&
!sdma_txreq_built(&tx->txreq))
@@ -1483,15 +1468,11 @@ static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
int guid_index, __be64 *guid)
{
struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- if (guid_index == 0)
- *guid = cpu_to_be64(ppd->guid);
- else if (guid_index < HFI1_GUIDS_PER_PORT)
- *guid = ibp->guids[guid_index - 1];
- else
+ if (guid_index >= HFI1_GUIDS_PER_PORT)
return -EINVAL;
+ *guid = get_sguid(ibp, guid_index);
return 0;
}
@@ -1610,6 +1591,154 @@ static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
dc8051_ver_min(ver));
}
+static const char * const driver_cntr_names[] = {
+ /* must be element 0*/
+ "DRIVER_KernIntr",
+ "DRIVER_ErrorIntr",
+ "DRIVER_Tx_Errs",
+ "DRIVER_Rcv_Errs",
+ "DRIVER_HW_Errs",
+ "DRIVER_NoPIOBufs",
+ "DRIVER_CtxtsOpen",
+ "DRIVER_RcvLen_Errs",
+ "DRIVER_EgrBufFull",
+ "DRIVER_EgrHdrFull"
+};
+
+static const char **dev_cntr_names;
+static const char **port_cntr_names;
+static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+static int num_dev_cntrs;
+static int num_port_cntrs;
+static int cntr_names_initialized;
+
+/*
+ * Convert a list of names separated by '\n' into an array of NULL terminated
+ * strings. Optionally some entries can be reserved in the array to hold extra
+ * external strings.
+ */
+static int init_cntr_names(const char *names_in,
+ const int names_len,
+ int num_extra_names,
+ int *num_cntrs,
+ const char ***cntr_names)
+{
+ char *names_out, *p, **q;
+ int i, n;
+
+ n = 0;
+ for (i = 0; i < names_len; i++)
+ if (names_in[i] == '\n')
+ n++;
+
+ names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
+ GFP_KERNEL);
+ if (!names_out) {
+ *num_cntrs = 0;
+ *cntr_names = NULL;
+ return -ENOMEM;
+ }
+
+ p = names_out + (n + num_extra_names) * sizeof(char *);
+ memcpy(p, names_in, names_len);
+
+ q = (char **)names_out;
+ for (i = 0; i < n; i++) {
+ q[i] = p;
+ p = strchr(p, '\n');
+ *p++ = '\0';
+ }
+
+ *num_cntrs = n;
+ *cntr_names = (const char **)names_out;
+ return 0;
+}
+
+static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ int i, err;
+
+ if (!cntr_names_initialized) {
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ err = init_cntr_names(dd->cntrnames,
+ dd->cntrnameslen,
+ num_driver_cntrs,
+ &num_dev_cntrs,
+ &dev_cntr_names);
+ if (err)
+ return NULL;
+
+ for (i = 0; i < num_driver_cntrs; i++)
+ dev_cntr_names[num_dev_cntrs + i] =
+ driver_cntr_names[i];
+
+ err = init_cntr_names(dd->portcntrnames,
+ dd->portcntrnameslen,
+ 0,
+ &num_port_cntrs,
+ &port_cntr_names);
+ if (err) {
+ kfree(dev_cntr_names);
+ dev_cntr_names = NULL;
+ return NULL;
+ }
+ cntr_names_initialized = 1;
+ }
+
+ if (!port_num)
+ return rdma_alloc_hw_stats_struct(
+ dev_cntr_names,
+ num_dev_cntrs + num_driver_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ else
+ return rdma_alloc_hw_stats_struct(
+ port_cntr_names,
+ num_port_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static u64 hfi1_sps_ints(void)
+{
+ unsigned long flags;
+ struct hfi1_devdata *dd;
+ u64 sps_ints = 0;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+ list_for_each_entry(dd, &hfi1_dev_list, list) {
+ sps_ints += get_all_cpu_total(dd->int_counter);
+ }
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ return sps_ints;
+}
+
+static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ u64 *values;
+ int count;
+
+ if (!port) {
+ u64 *stats = (u64 *)&hfi1_stats;
+ int i;
+
+ hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
+ values[num_dev_cntrs] = hfi1_sps_ints();
+ for (i = 1; i < num_driver_cntrs; i++)
+ values[num_dev_cntrs + i] = stats[i];
+ count = num_dev_cntrs + num_driver_cntrs;
+ } else {
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
+ count = num_port_cntrs;
+ }
+
+ memcpy(stats->value, values, count * sizeof(u64));
+ return count;
+}
+
/**
* hfi1_register_ib_device - register our device with the infiniband core
* @dd: the device data structure
@@ -1620,6 +1749,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
struct hfi1_ibdev *dev = &dd->verbs_dev;
struct ib_device *ibdev = &dev->rdi.ibdev;
struct hfi1_pportdata *ppd = dd->pport;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
unsigned i;
int ret;
size_t lcpysz = IB_DEVICE_NAME_MAX;
@@ -1632,6 +1762,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
seqlock_init(&dev->iowait_lock);
+ seqlock_init(&dev->txwait_lock);
INIT_LIST_HEAD(&dev->txwait);
INIT_LIST_HEAD(&dev->memwait);
@@ -1639,20 +1770,24 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
if (ret)
goto err_verbs_txreq;
+ /* Use first-port GUID as node guid */
+ ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
+
/*
* The system image GUID is supposed to be the same for all
* HFIs in a single system but since there can be other
* device types in the system, we can't be sure this is unique.
*/
if (!ib_hfi1_sys_image_guid)
- ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
+ ib_hfi1_sys_image_guid = ibdev->node_guid;
lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
ibdev->owner = THIS_MODULE;
- ibdev->node_guid = cpu_to_be64(ppd->guid);
ibdev->phys_port_cnt = dd->num_pports;
ibdev->dma_device = &dd->pcidev->dev;
ibdev->modify_device = modify_device;
+ ibdev->alloc_hw_stats = alloc_hw_stats;
+ ibdev->get_hw_stats = get_hw_stats;
/* keep process mad in the driver */
ibdev->process_mad = hfi1_process_mad;
@@ -1767,6 +1902,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
del_timer_sync(&dev->mem_timer);
verbs_txreq_exit(dev);
+
+ kfree(dev_cntr_names);
+ kfree(port_cntr_names);
+ cntr_names_initialized = 0;
}
void hfi1_cnp_rcv(struct hfi1_packet *packet)
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 1c3815d89eb7..e6b893010e6d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -73,7 +73,6 @@ struct hfi1_packet;
#include "iowait.h"
#define HFI1_MAX_RDMA_ATOMIC 16
-#define HFI1_GUIDS_PER_PORT 5
/*
* Increment this value if any changes that break userspace ABI
@@ -169,8 +168,6 @@ struct hfi1_ibport {
struct rvt_qp __rcu *qp[2];
struct rvt_ibport rvp;
- __be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */
-
/* the first 16 entries are sl_to_vl for !OPA */
u8 sl_to_sc[32];
u8 sc_to_sl[32];
@@ -180,18 +177,19 @@ struct hfi1_ibdev {
struct rvt_dev_info rdi; /* Must be first */
/* QP numbers are shared by all IB ports */
- /* protect wait lists */
- seqlock_t iowait_lock;
+ /* protect txwait list */
+ seqlock_t txwait_lock ____cacheline_aligned_in_smp;
struct list_head txwait; /* list for wait verbs_txreq */
struct list_head memwait; /* list for wait kernel memory */
- struct list_head txreq_free;
struct kmem_cache *verbs_txreq_cache;
- struct timer_list mem_timer;
+ u64 n_txwait;
+ u64 n_kmem_wait;
+ /* protect iowait lists */
+ seqlock_t iowait_lock ____cacheline_aligned_in_smp;
u64 n_piowait;
u64 n_piodrain;
- u64 n_txwait;
- u64 n_kmem_wait;
+ struct timer_list mem_timer;
#ifdef CONFIG_DEBUG_FS
/* per HFI debugfs */
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 094ab829ec42..5d23172c470f 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -72,22 +72,22 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
kmem_cache_free(dev->verbs_txreq_cache, tx);
do {
- seq = read_seqbegin(&dev->iowait_lock);
+ seq = read_seqbegin(&dev->txwait_lock);
if (!list_empty(&dev->txwait)) {
struct iowait *wait;
- write_seqlock_irqsave(&dev->iowait_lock, flags);
+ write_seqlock_irqsave(&dev->txwait_lock, flags);
wait = list_first_entry(&dev->txwait, struct iowait,
list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
/* refcount held until actual wake up */
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+ write_sequnlock_irqrestore(&dev->txwait_lock, flags);
hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
break;
}
- } while (read_seqretry(&dev->iowait_lock, seq));
+ } while (read_seqretry(&dev->txwait_lock, seq));
}
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
@@ -96,7 +96,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
{
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
- write_seqlock(&dev->iowait_lock);
+ write_seqlock(&dev->txwait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
@@ -108,13 +108,14 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
dev->n_txwait++;
qp->s_flags |= RVT_S_WAIT_TX;
list_add_tail(&priv->s_iowait.list, &dev->txwait);
+ priv->s_iowait.lock = &dev->txwait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
rvt_get_qp(qp);
}
qp->s_flags &= ~RVT_S_BUSY;
}
out:
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&dev->txwait_lock);
return tx;
}
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 5660897593ba..76216f2ef35a 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -65,6 +65,7 @@ struct verbs_txreq {
struct sdma_engine *sde;
struct send_context *psc;
u16 hdr_dwords;
+ u16 s_cur_size;
};
struct hfi1_ibdev;
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 24f79ee39fdf..0ac294db3b29 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,7 +39,8 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
-struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
+struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
struct device *dev = &hr_dev->pdev->dev;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 863a17a2de40..605962f2828c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -61,9 +61,10 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
return ret;
}
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
+ int rr)
{
- hns_roce_bitmap_free_range(bitmap, obj, 1);
+ hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
}
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
@@ -106,7 +107,8 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
}
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
- unsigned long obj, int cnt)
+ unsigned long obj, int cnt,
+ int rr)
{
int i;
@@ -116,7 +118,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table);
- bitmap->last = min(bitmap->last, obj);
+ if (!rr)
+ bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 2a0b6c05da5f..8c1f7a6f84d2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -216,10 +216,10 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
goto out;
/*
- * It is timeout when wait_for_completion_timeout return 0
- * The return value is the time limit set in advance
- * how many seconds showing
- */
+ * It is timeout when wait_for_completion_timeout return 0
+ * The return value is the time limit set in advance
+ * how many seconds showing
+ */
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index e3997d312c55..f5a9ee2fc53d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_CMD_H
#define HNS_ROCE_MAILBOX_SIZE 4096
+#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum {
/* TPT commands */
@@ -57,17 +58,6 @@ enum {
HNS_ROCE_CMD_QUERY_QP = 0x22,
};
-enum {
- HNS_ROCE_CMD_TIME_CLASS_A = 10000,
- HNS_ROCE_CMD_TIME_CLASS_B = 10000,
- HNS_ROCE_CMD_TIME_CLASS_C = 10000,
-};
-
-struct hns_roce_cmd_mailbox {
- void *buf;
- dma_addr_t dma;
-};
-
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout);
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 297016103aa7..4af403e1348c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -57,6 +57,32 @@
#define roce_set_bit(origin, shift, val) \
roce_set_field((origin), (1ul << (shift)), (shift), (val))
+/*
+ * roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon
+ * SOC, check if a is less than b.
+ * @a: hardware index value
+ * @b: hardware index value
+ * @bits: the number of bits of a and b, range: 0~31.
+ *
+ * Hardware index increases continuously till max value, and then restart
+ * from zero, again and again. Because the bits of reg field is often
+ * limited, the reg field can only hold the low bits of the hardware index
+ * in hisilicon SOC.
+ * In some scenes we need to compare two values(a,b) getted from two reg
+ * fields in this driver, for example:
+ * If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has
+ * incresed from 0xffff to 0x1 and a is less than b.
+ * If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a
+ * is bigger than b.
+ *
+ * Return true on a less than b, otherwise false.
+ */
+#define roce_hw_index_mask(bits) ((1ul << (bits)) - 1)
+#define roce_hw_index_shift(bits) (32 - (bits))
+#define roce_hw_index_cmp_lt(a, b, bits) \
+ ((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \
+ roce_hw_index_shift(bits)) < 0)
+
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
@@ -245,16 +271,26 @@
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
+#define ROCEE_SDB_PTR_CMP_BITS 28
+
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
+#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
+#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
+ (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
+
+#define ROCEE_SDB_CNT_CMP_BITS 16
+
+#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
+
+#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
+
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4
-#define ROCEE_HW_VERSION_REG 0x8
-
#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
@@ -318,7 +354,11 @@
#define ROCEE_SDB_ISSUE_PTR_REG 0x758
#define ROCEE_SDB_SEND_PTR_REG 0x75C
+#define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850
+#define ROCEE_SCAEP_WR_CQE_CNT 0x8D0
#define ROCEE_SDB_INV_CNT_REG 0x9A4
+#define ROCEE_SDB_RETRY_CNT_REG 0x9AC
+#define ROCEE_TSP_BP_ST_REG 0x9EC
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 097365932b09..589496c8fb9e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -35,7 +35,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
@@ -77,7 +77,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
unsigned long cq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
- HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
@@ -166,7 +166,7 @@ err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
err_out:
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
return ret;
}
@@ -176,11 +176,10 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
+void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev;
@@ -204,7 +203,7 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
spin_unlock_irq(&cq_table->lock);
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
@@ -349,6 +348,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
goto err_mtt;
}
+ /*
+ * For the QP created by kernel space, tptr value should be initialized
+ * to zero; For the QP created by user space, it will cause synchronous
+ * problems if tptr is set to zero here, so we initialze it in user
+ * space.
+ */
+ if (!context)
+ *hr_cq->tptr_addr = 0;
+
/* Get created cq handler and carry out event */
hr_cq->comp = hns_roce_ib_cq_comp;
hr_cq->event = hns_roce_ib_cq_event;
@@ -383,19 +391,25 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ int ret = 0;
- hns_roce_free_cq(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ if (hr_dev->hw->destroy_cq) {
+ ret = hr_dev->hw->destroy_cq(ib_cq);
+ } else {
+ hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- if (ib_cq->uobject)
- ib_umem_release(hr_cq->umem);
- else
- /* Free the buff of stored cq */
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+ if (ib_cq->uobject)
+ ib_umem_release(hr_cq->umem);
+ else
+ /* Free the buff of stored cq */
+ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
+ ib_cq->cqe);
- kfree(hr_cq);
+ kfree(hr_cq);
+ }
- return 0;
+ return ret;
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 341731553a60..1a6cb5d7a0dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -37,6 +37,8 @@
#define DRV_NAME "hns_roce"
+#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
+
#define MAC_ADDR_OCTET_NUM 6
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
@@ -54,6 +56,12 @@
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
+#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
+#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
+ (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
+#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
+#define HNS_ROCE_MIN_CQE_CNT 16
+
#define HNS_ROCE_MAX_IRQ_NUM 34
#define HNS_ROCE_COMP_VEC_NUM 32
@@ -70,6 +78,9 @@
#define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16
+#define BITMAP_NO_RR 0
+#define BITMAP_RR 1
+
#define MR_TYPE_MR 0x00
#define MR_TYPE_DMA 0x03
@@ -196,9 +207,9 @@ struct hns_roce_bitmap {
/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
/* Every bit repesent to a partner free/used status in bitmap */
/*
-* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
-* Bit = 1 represent to idle and available; bit = 0: not available
-*/
+ * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
+ * Bit = 1 represent to idle and available; bit = 0: not available
+ */
struct hns_roce_buddy {
/* Members point to every order level bitmap */
unsigned long **bits;
@@ -296,7 +307,7 @@ struct hns_roce_cq {
u32 cq_depth;
u32 cons_index;
void __iomem *cq_db_l;
- void __iomem *tptr_addr;
+ u16 *tptr_addr;
unsigned long cqn;
u32 vector;
atomic_t refcount;
@@ -360,29 +371,34 @@ struct hns_roce_cmdq {
struct mutex hcr_mutex;
struct semaphore poll_sem;
/*
- * Event mode: cmd register mutex protection,
- * ensure to not exceed max_cmds and user use limit region
- */
+ * Event mode: cmd register mutex protection,
+ * ensure to not exceed max_cmds and user use limit region
+ */
struct semaphore event_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
struct hns_roce_cmd_context *context;
/*
- * Result of get integer part
- * which max_comds compute according a power of 2
- */
+ * Result of get integer part
+ * which max_comds compute according a power of 2
+ */
u16 token_mask;
/*
- * Process whether use event mode, init default non-zero
- * After the event queue of cmd event ready,
- * can switch into event mode
- * close device, switch into poll mode(non event mode)
- */
+ * Process whether use event mode, init default non-zero
+ * After the event queue of cmd event ready,
+ * can switch into event mode
+ * close device, switch into poll mode(non event mode)
+ */
u8 use_events;
u8 toggle;
};
+struct hns_roce_cmd_mailbox {
+ void *buf;
+ dma_addr_t dma;
+};
+
struct hns_roce_dev;
struct hns_roce_qp {
@@ -424,8 +440,6 @@ struct hns_roce_ib_iboe {
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
struct notifier_block nb;
struct notifier_block nb_inet;
- /* 16 GID is shared by 6 port in v1 engine. */
- union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
@@ -519,6 +533,8 @@ struct hns_roce_hw {
struct ib_recv_wr **bad_recv_wr);
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
+ int (*destroy_cq)(struct ib_cq *ibcq);
void *priv;
};
@@ -553,6 +569,8 @@ struct hns_roce_dev {
int cmd_mod;
int loop_idc;
+ dma_addr_t tptr_dma_addr; /*only for hw v1*/
+ u32 tptr_size; /*only for hw v1*/
struct hns_roce_hw *hw;
};
@@ -657,7 +675,8 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
+ int rr);
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
@@ -665,9 +684,11 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj);
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
- unsigned long obj, int cnt);
+ unsigned long obj, int cnt,
+ int rr);
-struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int hns_roce_destroy_ah(struct ib_ah *ah);
@@ -681,6 +702,10 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
+int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index);
+unsigned long key_to_hw_index(u32 key);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
@@ -717,6 +742,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
+void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 21e21b03cfb5..50f864935a0e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -371,9 +371,9 @@ static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
int i = 0;
/**
- * AEQ overflow ECC mult bit err CEQ overflow alarm
- * must clear interrupt, mask irq, clear irq, cancel mask operation
- */
+ * AEQ overflow ECC mult bit err CEQ overflow alarm
+ * must clear interrupt, mask irq, clear irq, cancel mask operation
+ */
aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
if (roce_get_bit(aeshift_val,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 250d8f280390..c5104e0b2916 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -80,9 +80,9 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
--order;
/*
- * Alloc memory one time. If failed, don't alloc small block
- * memory, directly return fail.
- */
+ * Alloc memory one time. If failed, don't alloc small block
+ * memory, directly return fail.
+ */
mem = &chunk->mem[chunk->npages];
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 71232e5fabf6..b8111b0c8877 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/etherdevice.h>
#include <rdma/ib_umem.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
@@ -72,6 +73,8 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq = 0;
u32 ind = 0;
int ret = 0;
+ u8 *smac;
+ int loopback;
if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_RC)) {
@@ -129,6 +132,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
UD_SEND_WQE_U32_8_DMAC_5_M,
UD_SEND_WQE_U32_8_DMAC_5_S,
ah->av.mac[5]);
+
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac,
+ smac) ? 1 : 0;
+ roce_set_bit(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
+ loopback);
+
roce_set_field(ud_sq_wqe->u32_8,
UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
@@ -284,6 +295,8 @@ out:
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
+ roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
+ SQ_DOORBELL_U32_4_SL_S, qp->sl);
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
@@ -611,6 +624,213 @@ ext_sdb_buf_fail_out:
return ret;
}
+static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
+ struct ib_pd *pd)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_qp_init_attr init_attr;
+ struct ib_qp *qp;
+
+ memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
+ init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
+
+ qp = hns_roce_create_qp(pd, &init_attr, NULL);
+ if (IS_ERR(qp)) {
+ dev_err(dev, "Create loop qp for mr free failed!");
+ return NULL;
+ }
+
+ return to_hr_qp(qp);
+}
+
+static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_cq_init_attr cq_init_attr;
+ struct hns_roce_free_mr *free_mr;
+ struct ib_qp_attr attr = { 0 };
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_qp *hr_qp;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ u64 subnet_prefix;
+ int attr_mask = 0;
+ int i;
+ int ret;
+ u8 phy_port;
+ u8 sl;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ /* Reserved cq for loop qp */
+ cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
+ cq_init_attr.comp_vector = 0;
+ cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
+ if (IS_ERR(cq)) {
+ dev_err(dev, "Create cq for reseved loop qp failed!");
+ return -ENOMEM;
+ }
+ free_mr->mr_free_cq = to_hr_cq(cq);
+ free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
+ free_mr->mr_free_cq->ib_cq.uobject = NULL;
+ free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
+ free_mr->mr_free_cq->ib_cq.event_handler = NULL;
+ free_mr->mr_free_cq->ib_cq.cq_context = NULL;
+ atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
+
+ pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
+ if (IS_ERR(pd)) {
+ dev_err(dev, "Create pd for reseved loop qp failed!");
+ ret = -ENOMEM;
+ goto alloc_pd_failed;
+ }
+ free_mr->mr_free_pd = to_hr_pd(pd);
+ free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
+ free_mr->mr_free_pd->ibpd.uobject = NULL;
+ atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
+
+ attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
+ attr.pkey_index = 0;
+ attr.min_rnr_timer = 0;
+ /* Disable read ability */
+ attr.max_dest_rd_atomic = 0;
+ attr.max_rd_atomic = 0;
+ /* Use arbitrary values as rq_psn and sq_psn */
+ attr.rq_psn = 0x0808;
+ attr.sq_psn = 0x0808;
+ attr.retry_cnt = 7;
+ attr.rnr_retry = 7;
+ attr.timeout = 0x12;
+ attr.path_mtu = IB_MTU_256;
+ attr.ah_attr.ah_flags = 1;
+ attr.ah_attr.static_rate = 3;
+ attr.ah_attr.grh.sgid_index = 0;
+ attr.ah_attr.grh.hop_limit = 1;
+ attr.ah_attr.grh.flow_label = 0;
+ attr.ah_attr.grh.traffic_class = 0;
+
+ subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
+ if (IS_ERR(free_mr->mr_free_qp[i])) {
+ dev_err(dev, "Create loop qp failed!\n");
+ goto create_lp_qp_failed;
+ }
+ hr_qp = free_mr->mr_free_qp[i];
+
+ sl = i / caps->num_ports;
+
+ if (caps->num_ports == HNS_ROCE_MAX_PORTS)
+ phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
+ (i % caps->num_ports);
+ else
+ phy_port = i % caps->num_ports;
+
+ hr_qp->port = phy_port + 1;
+ hr_qp->phy_port = phy_port;
+ hr_qp->ibqp.qp_type = IB_QPT_RC;
+ hr_qp->ibqp.device = &hr_dev->ib_dev;
+ hr_qp->ibqp.uobject = NULL;
+ atomic_set(&hr_qp->ibqp.usecnt, 0);
+ hr_qp->ibqp.pd = pd;
+ hr_qp->ibqp.recv_cq = cq;
+ hr_qp->ibqp.send_cq = cq;
+
+ attr.ah_attr.port_num = phy_port + 1;
+ attr.ah_attr.sl = sl;
+ attr.port_num = phy_port + 1;
+
+ attr.dest_qp_num = hr_qp->qpn;
+ memcpy(attr.ah_attr.dmac, hr_dev->dev_addr[phy_port],
+ MAC_ADDR_OCTET_NUM);
+
+ memcpy(attr.ah_attr.grh.dgid.raw,
+ &subnet_prefix, sizeof(u64));
+ memcpy(&attr.ah_attr.grh.dgid.raw[8],
+ hr_dev->dev_addr[phy_port], 3);
+ memcpy(&attr.ah_attr.grh.dgid.raw[13],
+ hr_dev->dev_addr[phy_port] + 3, 3);
+ attr.ah_attr.grh.dgid.raw[11] = 0xff;
+ attr.ah_attr.grh.dgid.raw[12] = 0xfe;
+ attr.ah_attr.grh.dgid.raw[8] ^= 2;
+
+ attr_mask |= IB_QP_PORT;
+
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ IB_QPS_RESET, IB_QPS_INIT);
+ if (ret) {
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
+ goto create_lp_qp_failed;
+ }
+
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ IB_QPS_INIT, IB_QPS_RTR);
+ if (ret) {
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
+ goto create_lp_qp_failed;
+ }
+
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ IB_QPS_RTR, IB_QPS_RTS);
+ if (ret) {
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
+ goto create_lp_qp_failed;
+ }
+ }
+
+ return 0;
+
+create_lp_qp_failed:
+ for (i -= 1; i >= 0; i--) {
+ hr_qp = free_mr->mr_free_qp[i];
+ if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
+ dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
+ }
+
+ if (hns_roce_dealloc_pd(pd))
+ dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
+
+alloc_pd_failed:
+ if (hns_roce_ib_destroy_cq(cq))
+ dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
+
+ return -EINVAL;
+}
+
+static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_qp *hr_qp;
+ int ret;
+ int i;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ hr_qp = free_mr->mr_free_qp[i];
+ ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
+ if (ret)
+ dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
+ i, ret);
+ }
+
+ ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
+ if (ret)
+ dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
+
+ ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
+ if (ret)
+ dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
+}
+
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
@@ -648,6 +868,223 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
return 0;
}
+void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
+{
+ struct hns_roce_recreate_lp_qp_work *lp_qp_work;
+ struct hns_roce_dev *hr_dev;
+
+ lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
+ work);
+ hr_dev = to_hr_dev(lp_qp_work->ib_dev);
+
+ hns_roce_v1_release_lp_qp(hr_dev);
+
+ if (hns_roce_v1_rsv_lp_qp(hr_dev))
+ dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
+
+ if (lp_qp_work->comp_flag)
+ complete(lp_qp_work->comp);
+
+ kfree(lp_qp_work);
+}
+
+static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_recreate_lp_qp_work *lp_qp_work;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+ unsigned long end =
+ msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
+ GFP_KERNEL);
+
+ INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
+
+ lp_qp_work->ib_dev = &(hr_dev->ib_dev);
+ lp_qp_work->comp = &comp;
+ lp_qp_work->comp_flag = 1;
+
+ init_completion(lp_qp_work->comp);
+
+ queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
+
+ while (time_before_eq(jiffies, end)) {
+ if (try_wait_for_completion(&comp))
+ return 0;
+ msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
+ }
+
+ lp_qp_work->comp_flag = 0;
+ if (try_wait_for_completion(&comp))
+ return 0;
+
+ dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
+ return -ETIMEDOUT;
+}
+
+static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_send_wr send_wr, *bad_wr;
+ int ret;
+
+ memset(&send_wr, 0, sizeof(send_wr));
+ send_wr.next = NULL;
+ send_wr.num_sge = 0;
+ send_wr.send_flags = 0;
+ send_wr.sg_list = NULL;
+ send_wr.wr_id = (unsigned long long)&send_wr;
+ send_wr.opcode = IB_WR_RDMA_WRITE;
+
+ ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
+ if (ret) {
+ dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
+{
+ struct hns_roce_mr_free_work *mr_work;
+ struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_cq *mr_free_cq;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_dev *hr_dev;
+ struct hns_roce_mr *hr_mr;
+ struct hns_roce_qp *hr_qp;
+ struct device *dev;
+ unsigned long end =
+ msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
+ int i;
+ int ret;
+ int ne;
+
+ mr_work = container_of(work, struct hns_roce_mr_free_work, work);
+ hr_mr = (struct hns_roce_mr *)mr_work->mr;
+ hr_dev = to_hr_dev(mr_work->ib_dev);
+ dev = &hr_dev->pdev->dev;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+ mr_free_cq = free_mr->mr_free_cq;
+
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ hr_qp = free_mr->mr_free_qp[i];
+ ret = hns_roce_v1_send_lp_wqe(hr_qp);
+ if (ret) {
+ dev_err(dev,
+ "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
+ hr_qp->qpn, ret);
+ goto free_work;
+ }
+ }
+
+ ne = HNS_ROCE_V1_RESV_QP;
+ do {
+ ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
+ if (ret < 0) {
+ dev_err(dev,
+ "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
+ hr_qp->qpn, ret, hr_mr->key, ne);
+ goto free_work;
+ }
+ ne -= ret;
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+ } while (ne && time_before_eq(jiffies, end));
+
+ if (ne != 0)
+ dev_err(dev,
+ "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
+ hr_mr->key, ne);
+
+free_work:
+ if (mr_work->comp_flag)
+ complete(mr_work->comp);
+ kfree(mr_work);
+}
+
+int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_mr_free_work *mr_work;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+ unsigned long end =
+ msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
+ unsigned long start = jiffies;
+ int npages;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ if (mr->enabled) {
+ if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
+ & (hr_dev->caps.num_mtpts - 1)))
+ dev_warn(dev, "HW2SW_MPT failed!\n");
+ }
+
+ mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
+ if (!mr_work) {
+ ret = -ENOMEM;
+ goto free_mr;
+ }
+
+ INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
+
+ mr_work->ib_dev = &(hr_dev->ib_dev);
+ mr_work->comp = &comp;
+ mr_work->comp_flag = 1;
+ mr_work->mr = (void *)mr;
+ init_completion(mr_work->comp);
+
+ queue_work(free_mr->free_mr_wq, &(mr_work->work));
+
+ while (time_before_eq(jiffies, end)) {
+ if (try_wait_for_completion(&comp))
+ goto free_mr;
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+ }
+
+ mr_work->comp_flag = 0;
+ if (try_wait_for_completion(&comp))
+ goto free_mr;
+
+ dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
+ ret = -ETIMEDOUT;
+
+free_mr:
+ dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
+ mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
+
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+ dma_free_coherent(dev, npages * 8, mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+ key_to_hw_index(mr->key), 0);
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return ret;
+}
+
static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
@@ -849,6 +1286,85 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
}
+static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_buf_list *tptr_buf;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ tptr_buf = &priv->tptr_table.tptr_buf;
+
+ /*
+ * This buffer will be used for CQ's tptr(tail pointer), also
+ * named ci(customer index). Every CQ will use 2 bytes to save
+ * cqe ci in hip06. Hardware will read this area to get new ci
+ * when the queue is almost full.
+ */
+ tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
+ &tptr_buf->map, GFP_KERNEL);
+ if (!tptr_buf->buf)
+ return -ENOMEM;
+
+ hr_dev->tptr_dma_addr = tptr_buf->map;
+ hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
+
+ return 0;
+}
+
+static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_buf_list *tptr_buf;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ tptr_buf = &priv->tptr_table.tptr_buf;
+
+ dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
+ tptr_buf->buf, tptr_buf->map);
+}
+
+static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
+ if (!free_mr->free_mr_wq) {
+ dev_err(dev, "Create free mr workqueue failed!\n");
+ return -ENOMEM;
+ }
+
+ ret = hns_roce_v1_rsv_lp_qp(hr_dev);
+ if (ret) {
+ dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
+ flush_workqueue(free_mr->free_mr_wq);
+ destroy_workqueue(free_mr->free_mr_wq);
+ }
+
+ return ret;
+}
+
+static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ flush_workqueue(free_mr->free_mr_wq);
+ destroy_workqueue(free_mr->free_mr_wq);
+
+ hns_roce_v1_release_lp_qp(hr_dev);
+}
+
/**
* hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer
@@ -898,6 +1414,38 @@ int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
return ret;
}
+static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_des_qp *des_qp;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ des_qp = &priv->des_qp;
+
+ des_qp->requeue_flag = 1;
+ des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
+ if (!des_qp->qp_wq) {
+ dev_err(dev, "Create destroy qp workqueue failed!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_des_qp *des_qp;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ des_qp = &priv->des_qp;
+
+ des_qp->requeue_flag = 0;
+ flush_workqueue(des_qp->qp_wq);
+ destroy_workqueue(des_qp->qp_wq);
+}
+
void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
{
int i = 0;
@@ -906,12 +1454,11 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
ROCEE_VENDOR_PART_ID_REG));
- hr_dev->hw_rev = le32_to_cpu(roce_read(hr_dev, ROCEE_HW_VERSION_REG));
-
hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
ROCEE_SYS_IMAGE_GUID_L_REG)) |
((u64)le32_to_cpu(roce_read(hr_dev,
ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
+ hr_dev->hw_rev = HNS_ROCE_HW_VER1;
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
@@ -1001,18 +1548,44 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
goto error_failed_raq_init;
}
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
-
ret = hns_roce_bt_init(hr_dev);
if (ret) {
dev_err(dev, "bt init failed!\n");
goto error_failed_bt_init;
}
+ ret = hns_roce_tptr_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "tptr init failed!\n");
+ goto error_failed_tptr_init;
+ }
+
+ ret = hns_roce_des_qp_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "des qp init failed!\n");
+ goto error_failed_des_qp_init;
+ }
+
+ ret = hns_roce_free_mr_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "free mr init failed!\n");
+ goto error_failed_free_mr_init;
+ }
+
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+
return 0;
+error_failed_free_mr_init:
+ hns_roce_des_qp_free(hr_dev);
+
+error_failed_des_qp_init:
+ hns_roce_tptr_free(hr_dev);
+
+error_failed_tptr_init:
+ hns_roce_bt_free(hr_dev);
+
error_failed_bt_init:
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_raq_free(hr_dev);
error_failed_raq_init:
@@ -1022,8 +1595,11 @@ error_failed_raq_init:
void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
- hns_roce_bt_free(hr_dev);
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_free_mr_free(hr_dev);
+ hns_roce_des_qp_free(hr_dev);
+ hns_roce_tptr_free(hr_dev);
+ hns_roce_bt_free(hr_dev);
hns_roce_raq_free(hr_dev);
hns_roce_db_free(hr_dev);
}
@@ -1061,6 +1637,14 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
u32 *p;
u32 val;
+ /*
+ * When mac changed, loopback may fail
+ * because of smac not equal to dmac.
+ * We Need to release and create reserved qp again.
+ */
+ if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
+ dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
+
p = (u32 *)(&addr[0]);
reg_smac_l = *p;
roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
@@ -1293,9 +1877,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
}
/*
- * Now backwards through the CQ, removing CQ entries
- * that match our QP by overwriting them with next entries.
- */
+ * Now backwards through the CQ, removing CQ entries
+ * that match our QP by overwriting them with next entries.
+ */
while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
@@ -1317,9 +1901,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
/*
- * Make sure update of buffer contents is done before
- * updating consumer index.
- */
+ * Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
wmb();
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
@@ -1339,14 +1923,21 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
dma_addr_t dma_handle, int nent, u32 vector)
{
struct hns_roce_cq_context *cq_context = NULL;
- void __iomem *tptr_addr;
+ struct hns_roce_buf_list *tptr_buf;
+ struct hns_roce_v1_priv *priv;
+ dma_addr_t tptr_dma_addr;
+ int offset;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ tptr_buf = &priv->tptr_table.tptr_buf;
cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context));
- tptr_addr = 0;
- hr_dev->priv_addr = tptr_addr;
- hr_cq->tptr_addr = tptr_addr;
+ /* Get the tptr for this CQ. */
+ offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
+ tptr_dma_addr = tptr_buf->map + offset;
+ hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
/* Register cq_context members */
roce_set_field(cq_context->cqc_byte_4,
@@ -1390,10 +1981,10 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
- (u64)tptr_addr >> 44);
+ tptr_dma_addr >> 44);
cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
- cq_context->cqe_tptr_addr_l = (u32)((u64)tptr_addr >> 12);
+ cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
@@ -1407,7 +1998,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_bit(cq_context->cqc_byte_32,
CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
0);
- /*The initial value of cq's ci is 0 */
+ /* The initial value of cq's ci is 0 */
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
@@ -1424,9 +2015,9 @@ int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
/*
- * flags = 0; Notification Flag = 1, next
- * flags = 1; Notification Flag = 0, solocited
- */
+ * flags = 0; Notification Flag = 1, next
+ * flags = 1; Notification Flag = 0, solocited
+ */
doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
@@ -1581,10 +2172,10 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
wq = &(*cur_qp)->sq;
if ((*cur_qp)->sq_signal_bits) {
/*
- * If sg_signal_bit is 1,
- * firstly tail pointer updated to wqe
- * which current cqe correspond to
- */
+ * If sg_signal_bit is 1,
+ * firstly tail pointer updated to wqe
+ * which current cqe correspond to
+ */
wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M,
CQE_BYTE_4_WQE_INDEX_S);
@@ -1659,8 +2250,14 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
break;
}
- if (npolled)
+ if (npolled) {
+ *hr_cq->tptr_addr = hr_cq->cons_index &
+ ((hr_cq->cq_depth << 1) - 1);
+
+ /* Memroy barrier */
+ wmb();
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
+ }
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -1799,12 +2396,12 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
HNS_ROCE_CMD_2RST_QP,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
HNS_ROCE_CMD_2ERR_QP,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
@@ -1814,7 +2411,7 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
op[cur_state][new_state],
- HNS_ROCE_CMD_TIME_CLASS_C);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
@@ -2000,11 +2597,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
/*
- *Reset to init
- * Mandatory param:
- * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
- * Optional param: NA
- */
+ * Reset to init
+ * Mandatory param:
+ * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
+ * Optional param: NA
+ */
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
roce_set_field(context->qpc_bytes_4,
QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
@@ -2172,24 +2769,14 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
hr_qp->sq_signal_bits);
- for (port = 0; port < hr_dev->caps.num_ports; port++) {
- smac = (u8 *)hr_dev->dev_addr[port];
- dev_dbg(dev, "smac: %2x: %2x: %2x: %2x: %2x: %2x\n",
- smac[0], smac[1], smac[2], smac[3], smac[4],
- smac[5]);
- if ((dmac[0] == smac[0]) && (dmac[1] == smac[1]) &&
- (dmac[2] == smac[2]) && (dmac[3] == smac[3]) &&
- (dmac[4] == smac[4]) && (dmac[5] == smac[5])) {
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S,
- 1);
- break;
- }
- }
-
- if (hr_dev->loop_idc == 0x1)
+ port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
+ hr_qp->port;
+ smac = (u8 *)hr_dev->dev_addr[port];
+ /* when dmac equals smac or loop_idc is 1, it should loopback */
+ if (ether_addr_equal_unaligned(dmac, smac) ||
+ hr_dev->loop_idc == 0x1)
roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
+ QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
roce_set_bit(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
@@ -2509,7 +3096,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
/* Every status migrate must change state */
roce_set_field(context->qpc_bytes_144,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, attr->qp_state);
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
/* SW pass context to HW */
ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
@@ -2522,9 +3109,9 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
/*
- * Use rst2init to instead of init2init with drv,
- * need to hw to flash RQ HEAD by DB again
- */
+ * Use rst2init to instead of init2init with drv,
+ * need to hw to flash RQ HEAD by DB again
+ */
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
/* Memory barrier */
wmb();
@@ -2619,7 +3206,7 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
HNS_ROCE_CMD_QUERY_QP,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
else
@@ -2630,8 +3217,78 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
return ret;
}
-int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_sqp_context context;
+ u32 addr;
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (hr_qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ goto done;
+ }
+
+ addr = ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->port * sizeof(struct hns_roce_sqp_context);
+ context.qp1c_bytes_4 = roce_read(hr_dev, addr);
+ context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
+ context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
+ context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
+ context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
+ context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
+ context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
+ context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
+ context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
+ context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
+
+ hr_qp->state = roce_get_field(context.qp1c_bytes_4,
+ QP1C_BYTES_4_QP_STATE_M,
+ QP1C_BYTES_4_QP_STATE_S);
+ qp_attr->qp_state = hr_qp->state;
+ qp_attr->path_mtu = IB_MTU_256;
+ qp_attr->path_mig_state = IB_MIG_ARMED;
+ qp_attr->qkey = QKEY_VAL;
+ qp_attr->rq_psn = 0;
+ qp_attr->sq_psn = 0;
+ qp_attr->dest_qp_num = 1;
+ qp_attr->qp_access_flags = 6;
+
+ qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
+ QP1C_BYTES_20_PKEY_IDX_M,
+ QP1C_BYTES_20_PKEY_IDX_S);
+ qp_attr->port_num = hr_qp->port + 1;
+ qp_attr->sq_draining = 0;
+ qp_attr->max_rd_atomic = 0;
+ qp_attr->max_dest_rd_atomic = 0;
+ qp_attr->min_rnr_timer = 0;
+ qp_attr->timeout = 0;
+ qp_attr->retry_cnt = 0;
+ qp_attr->rnr_retry = 0;
+ qp_attr->alt_timeout = 0;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+ qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->cap = qp_attr->cap;
+ qp_init_attr->create_flags = 0;
+
+ mutex_unlock(&hr_qp->mutex);
+
+ return 0;
+}
+
+static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -2725,9 +3382,7 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
- qp_attr->port_num = (u8)roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) + 1;
+ qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0;
qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
@@ -2767,134 +3422,397 @@ out:
return ret;
}
-static void hns_roce_v1_destroy_qp_common(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- int is_user)
+int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ return hr_qp->doorbell_qpn <= 1 ?
+ hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
+ hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
+}
+
+static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ u32 sdb_issue_ptr,
+ u32 *sdb_inv_cnt,
+ u32 *wait_stage)
{
- u32 sdbinvcnt;
- unsigned long end = 0;
- u32 sdbinvcnt_val;
- u32 sdbsendptr_val;
- u32 sdbisusepr_val;
- struct hns_roce_cq *send_cq, *recv_cq;
struct device *dev = &hr_dev->pdev->dev;
+ u32 sdb_retry_cnt, old_retry;
+ u32 sdb_send_ptr, old_send;
+ u32 success_flags = 0;
+ u32 cur_cnt, old_cnt;
+ unsigned long end;
+ u32 send_ptr;
+ u32 inv_cnt;
+ u32 tsp_st;
+
+ if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
+ *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
+ dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
+ hr_qp->qpn, *wait_stage);
+ return -EINVAL;
+ }
- if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
- if (hr_qp->state != IB_QPS_RESET) {
- /*
- * Set qp to ERR,
- * waiting for hw complete processing all dbs
- */
- if (hns_roce_v1_qp_modify(hr_dev, NULL,
- to_hns_roce_state(
- (enum ib_qp_state)hr_qp->state),
- HNS_ROCE_QP_STATE_ERR, NULL,
- hr_qp))
- dev_err(dev, "modify QP %06lx to ERR failed.\n",
- hr_qp->qpn);
-
- /* Record issued doorbell */
- sdbisusepr_val = roce_read(hr_dev,
- ROCEE_SDB_ISSUE_PTR_REG);
- /*
- * Query db process status,
- * until hw process completely
- */
- end = msecs_to_jiffies(
- HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS) + jiffies;
- do {
- sdbsendptr_val = roce_read(hr_dev,
+ /* Calculate the total timeout for the entire verification process */
+ end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
+
+ if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
+ /* Query db process status, until hw process completely */
+ sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+ while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
+ ROCEE_SDB_PTR_CMP_BITS)) {
+ if (!time_before(jiffies, end)) {
+ dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
+ hr_qp->qpn, sdb_issue_ptr,
+ sdb_send_ptr);
+ return 0;
+ }
+
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+ sdb_send_ptr = roce_read(hr_dev,
ROCEE_SDB_SEND_PTR_REG);
- if (!time_before(jiffies, end)) {
- dev_err(dev, "destroy qp(0x%lx) timeout!!!",
- hr_qp->qpn);
- break;
- }
- } while ((short)(roce_get_field(sdbsendptr_val,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -
- roce_get_field(sdbisusepr_val,
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
- ) < 0);
+ }
- /* Get list pointer */
- sdbinvcnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ if (roce_get_field(sdb_issue_ptr,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
+ roce_get_field(sdb_send_ptr,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
+ old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+ old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
- /* Query db's list status, until hw reversal */
do {
- sdbinvcnt_val = roce_read(hr_dev,
- ROCEE_SDB_INV_CNT_REG);
+ tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
+ if (roce_get_bit(tsp_st,
+ ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
+ *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
+ return 0;
+ }
+
if (!time_before(jiffies, end)) {
- dev_err(dev, "destroy qp(0x%lx) timeout!!!",
- hr_qp->qpn);
- dev_err(dev, "SdbInvCnt = 0x%x\n",
- sdbinvcnt_val);
- break;
+ dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
+ "issue 0x%x send 0x%x.\n",
+ hr_qp->qpn, sdb_issue_ptr,
+ sdb_send_ptr);
+ return 0;
+ }
+
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+
+ sdb_send_ptr = roce_read(hr_dev,
+ ROCEE_SDB_SEND_PTR_REG);
+ sdb_retry_cnt = roce_read(hr_dev,
+ ROCEE_SDB_RETRY_CNT_REG);
+ cur_cnt = roce_get_field(sdb_send_ptr,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (!roce_get_bit(tsp_st,
+ ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
+ old_cnt = roce_get_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(old_retry,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ success_flags = 1;
+ } else {
+ old_cnt = roce_get_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ success_flags = 1;
+ else {
+ send_ptr = roce_get_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ roce_set_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
+ send_ptr);
+ }
}
- } while ((short)(roce_get_field(sdbinvcnt_val,
- ROCEE_SDB_INV_CNT_SDB_INV_CNT_M,
- ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) -
- (sdbinvcnt + SDB_INV_CNT_OFFSET)) < 0);
-
- /* Modify qp to reset before destroying qp */
- if (hns_roce_v1_qp_modify(hr_dev, NULL,
- to_hns_roce_state(
- (enum ib_qp_state)hr_qp->state),
- HNS_ROCE_QP_STATE_RST, NULL, hr_qp))
- dev_err(dev, "modify QP %06lx to RESET failed.\n",
- hr_qp->qpn);
+ } while (!success_flags);
}
+
+ *wait_stage = HNS_ROCE_V1_DB_STAGE2;
+
+ /* Get list pointer */
+ *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
+ hr_qp->qpn, *sdb_inv_cnt);
+ }
+
+ if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
+ /* Query db's list status, until hw reversal */
+ inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ while (roce_hw_index_cmp_lt(inv_cnt,
+ *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
+ ROCEE_SDB_CNT_CMP_BITS)) {
+ if (!time_before(jiffies, end)) {
+ dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
+ hr_qp->qpn, inv_cnt);
+ return 0;
+ }
+
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+ inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ }
+
+ *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
+ }
+
+ return 0;
+}
+
+static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_qp_work *qp_work_entry,
+ int *is_timeout)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 sdb_issue_ptr;
+ int ret;
+
+ if (hr_qp->state != IB_QPS_RESET) {
+ /* Set qp to ERR, waiting for hw complete processing all dbs */
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+ IB_QPS_ERR);
+ if (ret) {
+ dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
+ hr_qp->qpn);
+ return ret;
+ }
+
+ /* Record issued doorbell */
+ sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
+ qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
+ qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
+
+ /* Query db process status, until hw process completely */
+ ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
+ &qp_work_entry->sdb_inv_cnt,
+ &qp_work_entry->db_wait_stage);
+ if (ret) {
+ dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
+ hr_qp->qpn);
+ return ret;
+ }
+
+ if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
+ qp_work_entry->sche_cnt = 0;
+ *is_timeout = 1;
+ return 0;
+ }
+
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+ IB_QPS_RESET);
+ if (ret) {
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
+ hr_qp->qpn);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
+{
+ struct hns_roce_qp_work *qp_work_entry;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_dev *hr_dev;
+ struct hns_roce_qp *hr_qp;
+ struct device *dev;
+ int ret;
+
+ qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
+ hr_dev = to_hr_dev(qp_work_entry->ib_dev);
+ dev = &hr_dev->pdev->dev;
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ hr_qp = qp_work_entry->qp;
+
+ dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
+
+ qp_work_entry->sche_cnt++;
+
+ /* Query db process status, until hw process completely */
+ ret = check_qp_db_process_status(hr_dev, hr_qp,
+ qp_work_entry->sdb_issue_ptr,
+ &qp_work_entry->sdb_inv_cnt,
+ &qp_work_entry->db_wait_stage);
+ if (ret) {
+ dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
+ hr_qp->qpn);
+ return;
+ }
+
+ if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
+ priv->des_qp.requeue_flag) {
+ queue_work(priv->des_qp.qp_wq, work);
+ return;
+ }
+
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+ IB_QPS_RESET);
+ if (ret) {
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
+ return;
+ }
+
+ hns_roce_qp_remove(hr_dev, hr_qp);
+ hns_roce_qp_free(hr_dev, hr_qp);
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
+ /* RC QP, release QPN */
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ kfree(hr_qp);
+ } else
+ kfree(hr_to_hr_sqp(hr_qp));
+
+ kfree(qp_work_entry);
+
+ dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
+}
+
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp_work qp_work_entry;
+ struct hns_roce_qp_work *qp_work;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_cq *send_cq, *recv_cq;
+ int is_user = !!ibqp->pd->uobject;
+ int is_timeout = 0;
+ int ret;
+
+ ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
+ if (ret) {
+ dev_err(dev, "QP reset state check failed(%d)!\n", ret);
+ return ret;
}
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
hns_roce_lock_cqs(send_cq, recv_cq);
-
if (!is_user) {
__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
to_hr_srq(hr_qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
-
- hns_roce_qp_remove(hr_dev, hr_qp);
-
hns_roce_unlock_cqs(send_cq, recv_cq);
- hns_roce_qp_free(hr_dev, hr_qp);
+ if (!is_timeout) {
+ hns_roce_qp_remove(hr_dev, hr_qp);
+ hns_roce_qp_free(hr_dev, hr_qp);
- /* Not special_QP, free their QPN */
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UD))
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ /* RC QP, release QPN */
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ }
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
- if (is_user) {
+ if (is_user)
ib_umem_release(hr_qp->umem);
- } else {
+ else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
+
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
+
+ if (!is_timeout) {
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+ kfree(hr_qp);
+ else
+ kfree(hr_to_hr_sqp(hr_qp));
+ } else {
+ qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
+ if (!qp_work)
+ return -ENOMEM;
+
+ INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
+ qp_work->ib_dev = &hr_dev->ib_dev;
+ qp_work->qp = hr_qp;
+ qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
+ qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
+ qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
+ qp_work->sche_cnt = qp_work_entry.sche_cnt;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ queue_work(priv->des_qp.qp_wq, &qp_work->work);
+ dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ }
+
+ return 0;
}
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqe_cnt_ori;
+ u32 cqe_cnt_cur;
+ u32 cq_buf_size;
+ int wait_time = 0;
+ int ret = 0;
- hns_roce_v1_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+ hns_roce_free_cq(hr_dev, hr_cq);
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- kfree(hr_to_hr_sqp(hr_qp));
- else
- kfree(hr_qp);
+ /*
+ * Before freeing cq buffer, we need to ensure that the outstanding CQE
+ * have been written by checking the CQE counter.
+ */
+ cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
+ while (1) {
+ if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
+ HNS_ROCE_CQE_WCMD_EMPTY_BIT)
+ break;
- return 0;
+ cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
+ if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
+ break;
+
+ msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
+ if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
+ dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
+ hr_cq->cqn);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ wait_time++;
+ }
+
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+
+ if (ibcq->uobject)
+ ib_umem_release(hr_cq->umem);
+ else {
+ /* Free the buff of stored cq */
+ cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
+ hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+ }
+
+ kfree(hr_cq);
+
+ return ret;
}
struct hns_roce_v1_priv hr_v1_priv;
@@ -2917,5 +3835,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.post_recv = hns_roce_v1_post_recv,
.req_notify_cq = hns_roce_v1_req_notify_cq,
.poll_cq = hns_roce_v1_poll_cq,
+ .dereg_mr = hns_roce_v1_dereg_mr,
+ .destroy_cq = hns_roce_v1_destroy_cq,
.priv = &hr_v1_priv,
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 539b0a3b92b0..b213b5e6fef1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -58,6 +58,7 @@
#define HNS_ROCE_V1_PHY_UAR_NUM 8
#define HNS_ROCE_V1_GID_NUM 16
+#define HNS_ROCE_V1_RESV_QP 8
#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
@@ -102,8 +103,22 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_V1_DB_WAIT_OK 0
+#define HNS_ROCE_V1_DB_STAGE1 1
+#define HNS_ROCE_V1_DB_STAGE2 2
+#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS 10000
+#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS 20
+#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
+#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
+#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
+#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20
+
#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
+#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2
+#define HNS_ROCE_V1_TPTR_BUF_SIZE \
+ (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
+
#define HNS_ROCE_ODB_POLL_MODE 0
#define HNS_ROCE_SDB_NORMAL_MODE 0
@@ -140,6 +155,7 @@
#define SQ_PSN_SHIFT 8
#define QKEY_VAL 0x80010000
#define SDB_INV_CNT_OFFSET 8
+#define SDB_ST_CMP_VAL 8
struct hns_roce_cq_context {
u32 cqc_byte_4;
@@ -436,6 +452,8 @@ struct hns_roce_ud_send_wqe {
#define UD_SEND_WQE_U32_8_DMAC_5_M \
(((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
+#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
+
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
(((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
@@ -480,13 +498,17 @@ struct hns_roce_sqp_context {
u32 qp1c_bytes_12;
u32 qp1c_bytes_16;
u32 qp1c_bytes_20;
- u32 qp1c_bytes_28;
u32 cur_rq_wqe_ba_l;
+ u32 qp1c_bytes_28;
u32 qp1c_bytes_32;
u32 cur_sq_wqe_ba_l;
u32 qp1c_bytes_40;
};
+#define QP1C_BYTES_4_QP_STATE_S 0
+#define QP1C_BYTES_4_QP_STATE_M \
+ (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
+
#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
(((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
@@ -952,6 +974,10 @@ struct hns_roce_sq_db {
#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
(((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
+#define SQ_DOORBELL_U32_4_SL_S 16
+#define SQ_DOORBELL_U32_4_SL_M \
+ (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
+
#define SQ_DOORBELL_U32_4_PORT_S 18
#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
@@ -979,12 +1005,58 @@ struct hns_roce_bt_table {
struct hns_roce_buf_list cqc_buf;
};
+struct hns_roce_tptr_table {
+ struct hns_roce_buf_list tptr_buf;
+};
+
+struct hns_roce_qp_work {
+ struct work_struct work;
+ struct ib_device *ib_dev;
+ struct hns_roce_qp *qp;
+ u32 db_wait_stage;
+ u32 sdb_issue_ptr;
+ u32 sdb_inv_cnt;
+ u32 sche_cnt;
+};
+
+struct hns_roce_des_qp {
+ struct workqueue_struct *qp_wq;
+ int requeue_flag;
+};
+
+struct hns_roce_mr_free_work {
+ struct work_struct work;
+ struct ib_device *ib_dev;
+ struct completion *comp;
+ int comp_flag;
+ void *mr;
+};
+
+struct hns_roce_recreate_lp_qp_work {
+ struct work_struct work;
+ struct ib_device *ib_dev;
+ struct completion *comp;
+ int comp_flag;
+};
+
+struct hns_roce_free_mr {
+ struct workqueue_struct *free_mr_wq;
+ struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
+ struct hns_roce_cq *mr_free_cq;
+ struct hns_roce_pd *mr_free_pd;
+};
+
struct hns_roce_v1_priv {
struct hns_roce_db_table db_table;
struct hns_roce_raq_table raq_table;
struct hns_roce_bt_table bt_table;
+ struct hns_roce_tptr_table tptr_table;
+ struct hns_roce_des_qp des_qp;
+ struct hns_roce_free_mr free_mr;
};
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp);
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 764e35a54457..4953d9cb83a7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -35,52 +35,13 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
#include "hns_roce_hem.h"
/**
- * hns_roce_addrconf_ifid_eui48 - Get default gid.
- * @eui: eui.
- * @vlan_id: gid
- * @dev: net device
- * Description:
- * MAC convert to GID
- * gid[0..7] = fe80 0000 0000 0000
- * gid[8] = mac[0] ^ 2
- * gid[9] = mac[1]
- * gid[10] = mac[2]
- * gid[11] = ff (VLAN ID high byte (4 MS bits))
- * gid[12] = fe (VLAN ID low byte)
- * gid[13] = mac[3]
- * gid[14] = mac[4]
- * gid[15] = mac[5]
- */
-static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
- struct net_device *dev)
-{
- memcpy(eui, dev->dev_addr, 3);
- memcpy(eui + 5, dev->dev_addr + 3, 3);
- if (vlan_id < 0x1000) {
- eui[3] = vlan_id >> 8;
- eui[4] = vlan_id & 0xff;
- } else {
- eui[3] = 0xff;
- eui[4] = 0xfe;
- }
- eui[0] ^= 2;
-}
-
-static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
-{
- memset(gid, 0, sizeof(*gid));
- gid->raw[0] = 0xFE;
- gid->raw[1] = 0x80;
- hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
-}
-
-/**
* hns_get_gid_index - Get gid index.
* @hr_dev: pointer to structure hns_roce_dev.
* @port: port, value range: 0 ~ MAX
@@ -96,30 +57,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
return gid_index * hr_dev->caps.num_ports + port;
}
-static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u8 gid_idx = 0;
-
- if (gid_index >= hr_dev->caps.gid_table_len[port]) {
- dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
- gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
- return -EINVAL;
- }
-
- gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
-
- if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
- return -EINVAL;
-
- memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
-
- hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
-
- return 0;
-}
-
static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
@@ -135,27 +72,44 @@ static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
-static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
+static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
{
- u8 phy_port = hr_dev->iboe.phy_port[port];
- enum ib_mtu tmp;
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ u8 port = port_num - 1;
+ unsigned long flags;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- tmp = iboe_get_mtu(mtu);
- if (!tmp)
- tmp = IB_MTU_256;
+ hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
- hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+ return 0;
}
-static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
+static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context)
{
- struct ib_event event;
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ union ib_gid zgid = { {0} };
+ u8 port = port_num - 1;
+ unsigned long flags;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- /* Refresh gid in ib_cache */
- event.device = &hr_dev->ib_dev;
- event.element.port_num = port + 1;
- event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&event);
+ hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+ return 0;
}
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
@@ -163,9 +117,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
{
struct device *dev = &hr_dev->pdev->dev;
struct net_device *netdev;
- unsigned long flags;
- union ib_gid gid;
- int ret = 0;
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
@@ -173,7 +124,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
return -ENODEV;
}
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+ spin_lock_bh(&hr_dev->iboe.lock);
switch (event) {
case NETDEV_UP:
@@ -181,23 +132,19 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
case NETDEV_REGISTER:
case NETDEV_CHANGEADDR:
hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
- hns_roce_make_default_gid(netdev, &gid);
- ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
- if (!ret)
- hns_roce_update_gids(hr_dev, port);
break;
case NETDEV_DOWN:
/*
- * In v1 engine, only support all ports closed together.
- */
+ * In v1 engine, only support all ports closed together.
+ */
break;
default:
dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
break;
}
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- return ret;
+ spin_unlock_bh(&hr_dev->iboe.lock);
+ return 0;
}
static int hns_roce_netdev_event(struct notifier_block *self,
@@ -224,118 +171,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
return NOTIFY_DONE;
}
-static void hns_roce_addr_event(int event, struct net_device *event_netdev,
- struct hns_roce_dev *hr_dev, union ib_gid *gid)
+static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_ib_iboe *iboe = NULL;
- int gid_table_len = 0;
- unsigned long flags;
- union ib_gid zgid;
- u8 gid_idx = 0;
- u8 port = 0;
- int i = 0;
- int free;
- struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
- rdma_vlan_dev_real_dev(event_netdev) :
- event_netdev;
-
- if (event != NETDEV_UP && event != NETDEV_DOWN)
- return;
-
- iboe = &hr_dev->iboe;
- while (port < hr_dev->caps.num_ports) {
- if (real_dev == iboe->netdevs[port])
- break;
- port++;
- }
-
- if (port >= hr_dev->caps.num_ports) {
- dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
- return;
- }
-
- memset(zgid.raw, 0, sizeof(zgid.raw));
- free = -1;
- gid_table_len = hr_dev->caps.gid_table_len[port];
-
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
-
- for (i = 0; i < gid_table_len; i++) {
- gid_idx = hns_get_gid_index(hr_dev, port, i);
- if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
- sizeof(gid->raw)))
- break;
- if (free < 0 && !memcmp(zgid.raw,
- iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
- free = i;
- }
-
- if (i >= gid_table_len) {
- if (free < 0) {
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_dbg(&hr_dev->pdev->dev,
- "gid_index overflow, port(%d)\n", port);
- return;
- }
- if (!hns_roce_set_gid(hr_dev, port, free, gid))
- hns_roce_update_gids(hr_dev, port);
- } else if (event == NETDEV_DOWN) {
- if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
- hns_roce_update_gids(hr_dev, port);
- }
-
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-}
-
-static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
- void *ptr)
-{
- struct in_ifaddr *ifa = ptr;
- struct hns_roce_dev *hr_dev;
- struct net_device *dev = ifa->ifa_dev->dev;
- union ib_gid gid;
-
- ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
-
- hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
-
- hns_roce_addr_event(event, dev, hr_dev, &gid);
-
- return NOTIFY_DONE;
-}
-
-static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
-{
- struct in_ifaddr *ifa_list = NULL;
- union ib_gid gid = {{0} };
- u32 ipaddr = 0;
- int index = 0;
- int ret = 0;
- u8 i = 0;
+ u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- hns_roce_set_mtu(hr_dev, i,
- ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
+ hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
+ hr_dev->caps.max_mtu);
hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
-
- if (hr_dev->iboe.netdevs[i]->ip_ptr) {
- ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
- index = 1;
- while (ifa_list) {
- ipaddr = ifa_list->ifa_address;
- ipv6_addr_set_v4mapped(ipaddr,
- (struct in6_addr *)&gid);
- ret = hns_roce_set_gid(hr_dev, i, index, &gid);
- if (ret)
- break;
- index++;
- ifa_list = ifa_list->ifa_next;
- }
- hns_roce_update_gids(hr_dev, i);
- }
}
- return ret;
+ return 0;
}
static int hns_roce_query_device(struct ib_device *ib_dev,
@@ -444,31 +290,6 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
union ib_gid *gid)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
- u8 gid_idx = 0;
- u8 port;
-
- if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
- index >= hr_dev->caps.gid_table_len[port_num - 1]) {
- dev_err(dev,
- "port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
- port_num, index, hr_dev->caps.num_ports,
- hr_dev->caps.gid_table_len[port_num - 1] - 1);
- return -EINVAL;
- }
-
- port = port_num - 1;
- gid_idx = hns_get_gid_index(hr_dev, port, index);
- if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
- dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
- port_num, index, HNS_ROCE_MAX_GID_NUM);
- return -EINVAL;
- }
-
- memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
- HNS_ROCE_GID_SIZE);
-
return 0;
}
@@ -549,6 +370,8 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
+
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
return -EINVAL;
@@ -558,10 +381,15 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
-
- } else {
+ } else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ /* vm_pgoff: 1 -- TPTR */
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ hr_dev->tptr_dma_addr >> PAGE_SHIFT,
+ hr_dev->tptr_size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ } else
return -EINVAL;
- }
return 0;
}
@@ -605,7 +433,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
- strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
+ strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
@@ -639,6 +467,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
+ ib_dev->add_gid = hns_roce_add_gid;
+ ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
@@ -681,32 +511,22 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
return ret;
}
- ret = hns_roce_setup_mtu_gids(hr_dev);
+ ret = hns_roce_setup_mtu_mac(hr_dev);
if (ret) {
- dev_err(dev, "roce_setup_mtu_gids failed!\n");
- goto error_failed_setup_mtu_gids;
+ dev_err(dev, "setup_mtu_mac failed!\n");
+ goto error_failed_setup_mtu_mac;
}
iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
dev_err(dev, "register_netdevice_notifier failed!\n");
- goto error_failed_setup_mtu_gids;
- }
-
- iboe->nb_inet.notifier_call = hns_roce_inet_event;
- ret = register_inetaddr_notifier(&iboe->nb_inet);
- if (ret) {
- dev_err(dev, "register inet addr notifier failed!\n");
- goto error_failed_register_inetaddr_notifier;
+ goto error_failed_setup_mtu_mac;
}
return 0;
-error_failed_register_inetaddr_notifier:
- unregister_netdevice_notifier(&iboe->nb);
-
-error_failed_setup_mtu_gids:
+error_failed_setup_mtu_mac:
ib_unregister_device(ib_dev);
return ret;
@@ -940,10 +760,10 @@ err_unmap_mtt:
}
/**
-* hns_roce_setup_hca - setup host channel adapter
-* @hr_dev: pointer to hns roce device
-* Return : int
-*/
+ * hns_roce_setup_hca - setup host channel adapter
+ * @hr_dev: pointer to hns roce device
+ * Return : int
+ */
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -1008,11 +828,11 @@ err_uar_table_free:
}
/**
-* hns_roce_probe - RoCE driver entrance
-* @pdev: pointer to platform device
-* Return : int
-*
-*/
+ * hns_roce_probe - RoCE driver entrance
+ * @pdev: pointer to platform device
+ * Return : int
+ *
+ */
static int hns_roce_probe(struct platform_device *pdev)
{
int ret;
@@ -1023,9 +843,6 @@ static int hns_roce_probe(struct platform_device *pdev)
if (!hr_dev)
return -ENOMEM;
- memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
- sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
-
hr_dev->pdev = pdev;
platform_set_drvdata(pdev, hr_dev);
@@ -1125,9 +942,9 @@ error_failed_get_cfg:
}
/**
-* hns_roce_remove - remove RoCE device
-* @pdev: pointer to platform device
-*/
+ * hns_roce_remove - remove RoCE device
+ * @pdev: pointer to platform device
+ */
static int hns_roce_remove(struct platform_device *pdev)
{
struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index fb87883ead34..4139abee3b54 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -42,7 +42,7 @@ static u32 hw_index_to_key(unsigned long ind)
return (u32)(ind >> 24) | (ind << 8);
}
-static unsigned long key_to_hw_index(u32 key)
+unsigned long key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
@@ -53,16 +53,16 @@ static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
{
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
HNS_ROCE_CMD_SW2HW_MPT,
- HNS_ROCE_CMD_TIME_CLASS_B);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
- HNS_ROCE_CMD_TIME_CLASS_B);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
@@ -137,11 +137,13 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
- if (!buddy->bits[i])
- goto err_out_free;
-
- bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+ buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
+ __GFP_NOWARN);
+ if (!buddy->bits[i]) {
+ buddy->bits[i] = vzalloc(s * sizeof(long));
+ if (!buddy->bits[i])
+ goto err_out_free;
+ }
}
set_bit(0, buddy->bits[buddy->max_order]);
@@ -151,7 +153,7 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -164,7 +166,7 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@@ -287,7 +289,7 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
}
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
- key_to_hw_index(mr->key));
+ key_to_hw_index(mr->key), BITMAP_NO_RR);
}
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
@@ -605,13 +607,20 @@ err_free:
int hns_roce_dereg_mr(struct ib_mr *ibmr)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ int ret = 0;
- hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
- if (mr->umem)
- ib_umem_release(mr->umem);
+ if (hr_dev->hw->dereg_mr) {
+ ret = hr_dev->hw->dereg_mr(hr_dev, mr);
+ } else {
+ hns_roce_mr_free(hr_dev, mr);
- kfree(mr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
- return 0;
+ kfree(mr);
+ }
+
+ return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 05db7d59812a..a64500fa1145 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -40,7 +40,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
{
- hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
+ hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
}
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
@@ -121,7 +121,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
- hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
+ hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index,
+ BITMAP_NO_RR);
}
int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index e86dd8d06777..f036f32f15d3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -37,7 +37,7 @@
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
@@ -250,7 +250,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
if (base_qpn < SQP_NUM)
return;
- hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+ hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
}
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_user.h b/drivers/infiniband/hw/hns/hns_roce_user.h
deleted file mode 100644
index a28f761a9f65..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_user.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_USER_H
-#define _HNS_ROCE_USER_H
-
-struct hns_roce_ib_create_cq {
- __u64 buf_addr;
-};
-
-struct hns_roce_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
- __u8 log_sq_bb_count;
- __u8 log_sq_stride;
- __u8 sq_no_prefetch;
- __u8 reserved[5];
-};
-
-struct hns_roce_ib_alloc_ucontext_resp {
- __u32 qp_tab_size;
-};
-
-#endif /*_HNS_ROCE_USER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 8ec09e470f84..da2eb5a281fa 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -112,9 +112,12 @@
#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
-#define IW_CFG_FPM_QP_COUNT 32768
-#define I40IW_MAX_PAGES_PER_FMR 512
-#define I40IW_MIN_PAGES_PER_FMR 1
+#define IW_CFG_FPM_QP_COUNT 32768
+#define I40IW_MAX_PAGES_PER_FMR 512
+#define I40IW_MIN_PAGES_PER_FMR 1
+#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2
+#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
+#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
#define I40IW_MTU_TO_MSS 40
#define I40IW_DEFAULT_MSS 1460
@@ -210,6 +213,12 @@ struct i40iw_msix_vector {
u32 ceq_id;
};
+struct l2params_work {
+ struct work_struct work;
+ struct i40iw_device *iwdev;
+ struct i40iw_l2params l2params;
+};
+
#define I40IW_MSIX_TABLE_SIZE 65
struct virtchnl_work {
@@ -227,6 +236,7 @@ struct i40iw_device {
struct net_device *netdev;
wait_queue_head_t vchnl_waitq;
struct i40iw_sc_dev sc_dev;
+ struct i40iw_sc_vsi vsi;
struct i40iw_handler *hdl;
struct i40e_info *ldev;
struct i40e_client *client;
@@ -280,7 +290,6 @@ struct i40iw_device {
u32 sd_type;
struct workqueue_struct *param_wq;
atomic_t params_busy;
- u32 mss;
enum init_completion_state init_state;
u16 mac_ip_table_idx;
atomic_t vchnl_msgs;
@@ -297,6 +306,14 @@ struct i40iw_device {
u32 mr_stagmask;
u32 mpa_version;
bool dcb;
+ bool closing;
+ bool reset;
+ u32 used_pds;
+ u32 used_cqs;
+ u32 used_mrs;
+ u32 used_qps;
+ wait_queue_head_t close_wq;
+ atomic64_t use_count;
};
struct i40iw_ib_device {
@@ -498,7 +515,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
int i40iw_register_rdma_device(struct i40iw_device *iwdev);
void i40iw_port_ibevent(struct i40iw_device *iwdev);
-int i40iw_cm_disconn(struct i40iw_qp *);
+void i40iw_cm_disconn(struct i40iw_qp *iwqp);
void i40iw_cm_disconn_worker(void *);
int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
struct sk_buff *);
@@ -508,20 +525,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
u8 *mac_addr, u8 *mac_index);
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+void i40iw_rem_devusecount(struct i40iw_device *iwdev);
+void i40iw_add_devusecount(struct i40iw_device *iwdev);
void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
struct i40iw_modify_qp_info *info, bool wait);
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_qp *qp,
+ bool suspend);
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
enum i40iw_quad_entry_type etype,
enum i40iw_quad_hash_manage_type mtype,
void *cmnode,
bool wait);
-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
+void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
+void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
void i40iw_free_qp_resources(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
u32 qp_num);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 85637696f6e9..95a0586a4da8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -68,13 +68,13 @@ static void i40iw_disconnect_worker(struct work_struct *work);
/**
* i40iw_free_sqbuf - put back puda buffer if refcount = 0
- * @dev: FPK device
+ * @vsi: pointer to vsi structure
* @buf: puda buffer to free
*/
-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
+void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
{
struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
- struct i40iw_puda_rsrc *ilq = dev->ilq;
+ struct i40iw_puda_rsrc *ilq = vsi->ilq;
if (!atomic_dec_return(&buf->refcount))
i40iw_puda_ret_bufpool(ilq, buf);
@@ -221,6 +221,7 @@ static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
cm_info->loc_port = cm_node->loc_port;
cm_info->rem_port = cm_node->rem_port;
+ cm_info->user_pri = cm_node->user_pri;
}
/**
@@ -271,6 +272,7 @@ static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
event.provider_data = (void *)cm_node;
event.private_data = (void *)cm_node->pdata_buf;
event.private_data_len = (u8)cm_node->pdata.size;
+ event.ird = cm_node->ird_size;
break;
case IW_CM_EVENT_CONNECT_REPLY:
i40iw_get_cmevent_info(cm_node, cm_id, &event);
@@ -335,13 +337,13 @@ static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
*/
static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
{
- struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_device *iwdev = cm_node->iwdev;
struct i40iw_timer_entry *send_entry;
send_entry = cm_node->send_entry;
if (send_entry) {
cm_node->send_entry = NULL;
- i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
+ i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
kfree(send_entry);
atomic_dec(&cm_node->ref_count);
}
@@ -360,15 +362,6 @@ static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
}
-static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
-{
- if ((cm_node->rem_mac[0] == 0x0) &&
- (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
- ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
- return true;
- return false;
-}
-
/**
* i40iw_form_cm_frame - get a free packet and build frame
* @cm_node: connection's node ionfo to use in frame
@@ -384,7 +377,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
u8 flags)
{
struct i40iw_puda_buf *sqbuf;
- struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
u8 *buf;
struct tcphdr *tcph;
@@ -396,8 +389,9 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
u32 opts_len = 0;
u32 pd_len = 0;
u32 hdr_len = 0;
+ u16 vtag;
- sqbuf = i40iw_puda_get_bufpool(dev->ilq);
+ sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
if (!sqbuf)
return NULL;
buf = sqbuf->mem.va;
@@ -408,11 +402,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
if (hdr)
hdr_len = hdr->size;
- if (pdata) {
+ if (pdata)
pd_len = pdata->size;
- if (!is_remote_ne020_or_chelsio(cm_node))
- pd_len += MPA_ZERO_PAD_LEN;
- }
if (cm_node->vlan_id < VLAN_TAG_PRESENT)
eth_hlen += 4;
@@ -445,7 +436,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
- ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
} else {
@@ -454,7 +446,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
iph->version = IPVERSION;
iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
- iph->tos = 0;
+ iph->tos = cm_node->tos;
iph->tot_len = htons(packetsize);
iph->id = htons(++cm_node->tcp_cntxt.loc_id);
@@ -474,13 +466,15 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
- ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
} else {
ethh->h_proto = htons(ETH_P_IPV6);
}
ip6h->version = 6;
- ip6h->flow_lbl[0] = 0;
+ ip6h->priority = cm_node->tos >> 4;
+ ip6h->flow_lbl[0] = cm_node->tos << 4;
ip6h->flow_lbl[1] = 0;
ip6h->flow_lbl[2] = 0;
ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
@@ -1065,7 +1059,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
int send_retrans,
int close_when_complete)
{
- struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
struct i40iw_cm_core *cm_core = cm_node->cm_core;
struct i40iw_timer_entry *new_send;
int ret = 0;
@@ -1074,7 +1068,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send) {
- i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ i40iw_free_sqbuf(vsi, (void *)sqbuf);
return -ENOMEM;
}
new_send->retrycount = I40IW_DEFAULT_RETRYS;
@@ -1089,7 +1083,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send->timetosend += (HZ / 10);
if (cm_node->close_entry) {
kfree(new_send);
- i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ i40iw_free_sqbuf(vsi, (void *)sqbuf);
i40iw_pr_err("already close entry\n");
return -EINVAL;
}
@@ -1104,7 +1098,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
atomic_inc(&sqbuf->refcount);
- i40iw_puda_send_buf(dev->ilq, sqbuf);
+ i40iw_puda_send_buf(vsi->ilq, sqbuf);
if (!send_retrans) {
i40iw_cleanup_retrans_entry(cm_node);
if (close_when_complete)
@@ -1201,6 +1195,7 @@ static void i40iw_cm_timer_tick(unsigned long pass)
struct i40iw_cm_node *cm_node;
struct i40iw_timer_entry *send_entry, *close_entry;
struct list_head *list_core_temp;
+ struct i40iw_sc_vsi *vsi;
struct list_head *list_node;
struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
u32 settimer = 0;
@@ -1276,9 +1271,10 @@ static void i40iw_cm_timer_tick(unsigned long pass)
cm_node->cm_core->stats_pkt_retrans++;
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ vsi = &cm_node->iwdev->vsi;
dev = cm_node->dev;
atomic_inc(&send_entry->sqbuf->refcount);
- i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
+ i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (send_entry->send_retrans) {
send_entry->retranscount--;
@@ -1379,10 +1375,11 @@ int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
{
struct i40iw_puda_buf *sqbuf;
+ struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
if (sqbuf)
- i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
+ i40iw_puda_send_buf(vsi->ilq, sqbuf);
else
i40iw_pr_err("no sqbuf\n");
}
@@ -1564,9 +1561,15 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
cm_info->vlan_id = child_listen_node->vlan_id;
- ret = i40iw_manage_qhash(iwdev, cm_info,
- I40IW_QHASH_TYPE_TCP_SYN,
- I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ if (child_listen_node->qhash_set) {
+ ret = i40iw_manage_qhash(iwdev, cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ child_listen_node->qhash_set = false;
+ } else {
+ ret = I40IW_SUCCESS;
+ }
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
"freed pointer = %p\n",
@@ -1591,9 +1594,10 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
{
struct net_device *ip_dev = NULL;
-#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr laddr6;
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return NULL;
i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
if (vlan_id)
*vlan_id = I40IW_NO_VLAN;
@@ -1610,7 +1614,6 @@ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *ma
}
}
rcu_read_unlock();
-#endif
return ip_dev;
}
@@ -1646,7 +1649,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
{
struct net_device *ip_dev;
struct inet6_dev *idev;
- struct inet6_ifaddr *ifp;
+ struct inet6_ifaddr *ifp, *tmp;
enum i40iw_status_code ret = 0;
struct i40iw_cm_listener *child_listen_node;
unsigned long flags;
@@ -1661,7 +1664,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
i40iw_pr_err("idev == NULL\n");
break;
}
- list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
"IP=%pI6, vlan_id=%d, MAC=%pM\n",
@@ -1675,7 +1678,6 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
"Allocating child listener %p\n",
child_listen_node);
if (!child_listen_node) {
- i40iw_pr_err("listener memory allocation\n");
ret = I40IW_ERR_NO_MEMORY;
goto exit;
}
@@ -1695,6 +1697,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
I40IW_QHASH_MANAGE_TYPE_ADD,
NULL, true);
if (!ret) {
+ child_listen_node->qhash_set = true;
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
list_add(&child_listen_node->child_listen_list,
&cm_parent_listen_node->child_listen_list);
@@ -1751,7 +1754,6 @@ static enum i40iw_status_code i40iw_add_mqh_4(
"Allocating child listener %p\n",
child_listen_node);
if (!child_listen_node) {
- i40iw_pr_err("listener memory allocation\n");
in_dev_put(idev);
ret = I40IW_ERR_NO_MEMORY;
goto exit;
@@ -1773,6 +1775,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
NULL,
true);
if (!ret) {
+ child_listen_node->qhash_set = true;
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
list_add(&child_listen_node->child_listen_list,
&cm_parent_listen_node->child_listen_list);
@@ -1880,6 +1883,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
nfo.loc_port = listener->loc_port;
nfo.ipv4 = listener->ipv4;
nfo.vlan_id = listener->vlan_id;
+ nfo.user_pri = listener->user_pri;
if (!list_empty(&listener->child_listen_list)) {
i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
@@ -2138,6 +2142,20 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
/* set our node specific transport info */
cm_node->ipv4 = cm_info->ipv4;
cm_node->vlan_id = cm_info->vlan_id;
+ if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
+ cm_node->vlan_id = 0;
+ cm_node->tos = cm_info->tos;
+ cm_node->user_pri = cm_info->user_pri;
+ if (listener) {
+ if (listener->tos != cm_info->tos)
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
+ "application TOS[%d] and remote client TOS[%d] mismatch\n",
+ listener->tos, cm_info->tos);
+ cm_node->tos = max(listener->tos, cm_info->tos);
+ cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
+ cm_node->tos, cm_node->user_pri);
+ }
memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
cm_node->loc_port = cm_info->loc_port;
@@ -2162,7 +2180,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
- cm_node->tcp_cntxt.mss = iwdev->mss;
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
cm_node->iwdev = iwdev;
cm_node->dev = &iwdev->sc_dev;
@@ -2236,7 +2254,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else {
if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
- cm_node->apbvt_set && cm_node->iwdev) {
+ cm_node->apbvt_set) {
i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2861,7 +2879,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
/* create a CM connection node */
cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
if (!cm_node)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* set our node side to client (active) side */
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
@@ -2874,7 +2892,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
cm_node->vlan_id,
I40IW_CM_LISTENER_ACTIVE_STATE);
if (!loopback_remotelistener) {
- i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ i40iw_rem_ref_cm_node(cm_node);
+ return ERR_PTR(-ECONNREFUSED);
} else {
loopback_cm_info = *cm_info;
loopback_cm_info.loc_port = cm_info->rem_port;
@@ -2887,7 +2906,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
loopback_remotelistener);
if (!loopback_remotenode) {
i40iw_rem_ref_cm_node(cm_node);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
cm_core->stats_loopbacks++;
loopback_remotenode->loopbackpartner = cm_node;
@@ -3041,10 +3060,10 @@ static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
/**
* i40iw_receive_ilq - recv an ETHERNET packet, and process it
* through CM
- * @dev: FPK dev struct
+ * @vsi: pointer to the vsi structure
* @rbuf: receive buffer
*/
-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
{
struct i40iw_cm_node *cm_node;
struct i40iw_cm_listener *listener;
@@ -3052,9 +3071,11 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
struct i40iw_cm_info cm_info;
+ struct i40iw_sc_dev *dev = vsi->dev;
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
struct vlan_ethhdr *ethh;
+ u16 vtag;
/* if vlan, then maclen = 18 else 14 */
iph = (struct iphdr *)rbuf->iph;
@@ -3068,7 +3089,9 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
ethh = (struct vlan_ethhdr *)rbuf->mem.va;
if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
- cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
+ vtag = ntohs(ethh->h_vlan_TCI);
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
i40iw_debug(cm_core->dev,
I40IW_DEBUG_CM,
"%s vlan_id=%d\n",
@@ -3083,6 +3106,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
cm_info.loc_addr[0] = ntohl(iph->daddr);
cm_info.rem_addr[0] = ntohl(iph->saddr);
cm_info.ipv4 = true;
+ cm_info.tos = iph->tos;
} else {
ip6h = (struct ipv6hdr *)rbuf->iph;
i40iw_copy_ip_ntohl(cm_info.loc_addr,
@@ -3090,6 +3114,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
i40iw_copy_ip_ntohl(cm_info.rem_addr,
ip6h->saddr.in6_u.u6_addr32);
cm_info.ipv4 = false;
+ cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
}
cm_info.loc_port = ntohs(tcph->dest);
cm_info.rem_port = ntohs(tcph->source);
@@ -3309,6 +3334,8 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
ctx_info->tcp_info_valid = true;
ctx_info->iwarp_info_valid = true;
+ ctx_info->add_to_qoslist = true;
+ ctx_info->user_pri = cm_node->user_pri;
i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
if (cm_node->snd_mark_en) {
@@ -3320,33 +3347,47 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
cm_node->state = I40IW_CM_STATE_OFFLOADED;
tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
+ tcp_info.tos = cm_node->tos;
dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
/* once tcp_info is set, no need to do it again */
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = false;
+ ctx_info->add_to_qoslist = false;
}
/**
* i40iw_cm_disconn - when a connection is being closed
* @iwqp: associate qp for the connection
*/
-int i40iw_cm_disconn(struct i40iw_qp *iwqp)
+void i40iw_cm_disconn(struct i40iw_qp *iwqp)
{
struct disconn_work *work;
struct i40iw_device *iwdev = iwqp->iwdev;
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
- return -ENOMEM; /* Timer will clean up */
-
+ return; /* Timer will clean up */
+
+ spin_lock_irqsave(&iwdev->qptable_lock, flags);
+ if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+ "%s qp_id %d is already freed\n",
+ __func__, iwqp->ibqp.qp_num);
+ kfree(work);
+ return;
+ }
i40iw_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+
work->iwqp = iwqp;
INIT_WORK(&work->work, i40iw_disconnect_worker);
queue_work(cm_core->disconn_wq, &work->work);
- return 0;
+ return;
}
/**
@@ -3432,7 +3473,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
*terminate-handler to issue cm_disconn which can re-free
*a QP even after its refcnt=0.
*/
- del_timer(&iwqp->terminate_timer);
+ i40iw_terminate_del_timer(qp);
if (!iwqp->flush_issued) {
iwqp->flush_issued = 1;
issue_flush = 1;
@@ -3462,7 +3503,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
/* Flush the queues */
i40iw_flush_wqes(iwdev, iwqp);
- if (qp->term_flags) {
+ if (qp->term_flags && iwqp->ibqp.event_handler) {
ibevent.device = iwqp->ibqp.device;
ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
@@ -3571,7 +3612,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->cm_node = (void *)cm_node;
cm_node->iwqp = iwqp;
- buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
+ buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
@@ -3605,18 +3646,10 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->lsmm_mr = ibmr;
if (iwqp->page)
iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- if (is_remote_ne020_or_chelsio(cm_node))
- dev->iw_priv_qp_ops->qp_send_lsmm(
- &iwqp->sc_qp,
+ dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
iwqp->ietf_mem.va,
(accept.size + conn_param->private_data_len),
ibmr->lkey);
- else
- dev->iw_priv_qp_ops->qp_send_lsmm(
- &iwqp->sc_qp,
- iwqp->ietf_mem.va,
- (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
- ibmr->lkey);
} else {
if (iwqp->page)
@@ -3714,6 +3747,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in6 *raddr6;
bool qhash_set = false;
int apbvt_set = 0;
+ int err = 0;
enum i40iw_status_code status;
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
@@ -3759,6 +3793,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
}
cm_info.cm_id = cm_id;
+ cm_info.tos = cm_id->tos;
+ cm_info.user_pri = rt_tos2priority(cm_id->tos);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
+ __func__, cm_id->tos, cm_info.user_pri);
if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
(!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
raddr6->sin6_addr.in6_u.u6_addr32,
@@ -3790,8 +3828,11 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
conn_param->private_data_len,
(void *)conn_param->private_data,
&cm_info);
- if (!cm_node)
- goto err;
+
+ if (IS_ERR(cm_node)) {
+ err = PTR_ERR(cm_node);
+ goto err_out;
+ }
i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
@@ -3805,10 +3846,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->cm_id = cm_id;
i40iw_add_ref(&iwqp->ibqp);
- if (cm_node->state == I40IW_CM_STATE_SYN_SENT) {
- if (i40iw_send_syn(cm_node, 0)) {
+ if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+ cm_node->state = I40IW_CM_STATE_SYN_SENT;
+ err = i40iw_send_syn(cm_node, 0);
+ if (err) {
i40iw_rem_ref_cm_node(cm_node);
- goto err;
+ goto err_out;
}
}
@@ -3820,24 +3863,25 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->cm_id);
return 0;
-err:
- if (cm_node) {
- if (cm_node->ipv4)
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI4",
- cm_node->rem_addr);
- else
- i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI6",
- cm_node->rem_addr);
- }
- i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
+err_out:
+ if (cm_info.ipv4)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI4",
+ cm_info.rem_addr);
+ else
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI6",
+ cm_info.rem_addr);
+
+ if (qhash_set)
+ i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
cm_info.loc_port))
@@ -3846,7 +3890,7 @@ err:
I40IW_MANAGE_APBVT_DEL);
cm_id->rem_ref(cm_id);
iwdev->cm_core.stats_connect_errs++;
- return -ENOMEM;
+ return err;
}
/**
@@ -3904,6 +3948,10 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = cm_listen_node;
+ cm_listen_node->tos = cm_id->tos;
+ cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = cm_listen_node->user_pri;
+
if (!cm_listen_node->reused_node) {
if (wildcard) {
if (cm_info.ipv4)
@@ -4124,3 +4172,158 @@ static void i40iw_cm_post_event(struct i40iw_cm_event *event)
queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
}
+
+/**
+ * i40iw_qhash_ctrl - enable/disable qhash for list
+ * @iwdev: device pointer
+ * @parent_listen_node: parent listen node
+ * @nfo: cm info node
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ *
+ * Enables or disables the qhash for the node in the child
+ * listen list that matches ipaddr. If no matching IP was found
+ * it will allocate and add a new child listen node to the
+ * parent listen node. The listen_list_lock is assumed to be
+ * held when called.
+ */
+static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
+ struct i40iw_cm_listener *parent_listen_node,
+ struct i40iw_cm_info *nfo,
+ u32 *ipaddr, bool ipv4, bool ifup)
+{
+ struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
+ struct i40iw_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ enum i40iw_status_code ret;
+ bool node_allocated = false;
+ enum i40iw_quad_hash_manage_type op =
+ ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+
+ list_for_each_safe(pos, tpos, child_listen_list) {
+ child_listen_node =
+ list_entry(pos,
+ struct i40iw_cm_listener,
+ child_listen_list);
+ if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
+ goto set_qhash;
+ }
+
+ /* if not found then add a child listener if interface is going up */
+ if (!ifup)
+ return;
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ if (!child_listen_node)
+ return;
+ node_allocated = true;
+ memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
+
+ memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
+
+set_qhash:
+ memcpy(nfo->loc_addr,
+ child_listen_node->loc_addr,
+ sizeof(nfo->loc_addr));
+ nfo->vlan_id = child_listen_node->vlan_id;
+ ret = i40iw_manage_qhash(iwdev, nfo,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ op,
+ NULL, false);
+ if (!ret) {
+ child_listen_node->qhash_set = ifup;
+ if (node_allocated)
+ list_add(&child_listen_node->child_listen_list,
+ &parent_listen_node->child_listen_list);
+ } else if (node_allocated) {
+ kfree(child_listen_node);
+ }
+}
+
+/**
+ * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * @iwdev: device pointer
+ */
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct i40iw_cm_node *cm_node;
+ unsigned long flags;
+ struct list_head connected_list;
+ struct ib_qp_attr attr;
+
+ INIT_LIST_HEAD(&connected_list);
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_node, struct i40iw_cm_node, list);
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->connected_entry, &connected_list);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &connected_list) {
+ cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
+ attr.qp_state = IB_QPS_ERR;
+ i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+}
+
+/**
+ * i40iw_ifdown_notify - process an ifdown on an interface
+ * @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ */
+void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+ struct i40iw_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ struct i40iw_cm_info nfo;
+ u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ enum i40iw_status_code ret;
+ enum i40iw_quad_hash_manage_type op =
+ ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+
+ /* Disable or enable qhash for listeners */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (vlan_id == listen_node->vlan_id &&
+ (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
+ !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
+ memcpy(nfo.loc_addr, listen_node->loc_addr,
+ sizeof(nfo.loc_addr));
+ nfo.loc_port = listen_node->loc_port;
+ nfo.ipv4 = listen_node->ipv4;
+ nfo.vlan_id = listen_node->vlan_id;
+ nfo.user_pri = listen_node->user_pri;
+ if (!list_empty(&listen_node->child_listen_list)) {
+ i40iw_qhash_ctrl(iwdev,
+ listen_node,
+ &nfo,
+ ipaddr, ipv4, ifup);
+ } else if (memcmp(listen_node->loc_addr, ip_zero,
+ ipv4 ? 4 : 16)) {
+ ret = i40iw_manage_qhash(iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ op,
+ NULL,
+ false);
+ if (!ret)
+ listen_node->qhash_set = ifup;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ /* disconnect any connected qp's on ifdown */
+ if (!ifup)
+ i40iw_cm_disconnect_all(iwdev);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index e9046d9f9645..2e52e38ffcf3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -56,8 +56,6 @@
#define I40IW_MAX_IETF_SIZE 32
-#define MPA_ZERO_PAD_LEN 4
-
/* IETF RTR MSG Fields */
#define IETF_PEER_TO_PEER 0x8000
#define IETF_FLPDU_ZERO_LEN 0x4000
@@ -299,6 +297,7 @@ struct i40iw_cm_listener {
enum i40iw_cm_listener_state listener_state;
u32 reused_node;
u8 user_pri;
+ u8 tos;
u16 vlan_id;
bool qhash_set;
bool ipv4;
@@ -341,9 +340,11 @@ struct i40iw_cm_node {
int accept_pend;
struct list_head timer_entry;
struct list_head reset_entry;
+ struct list_head connected_entry;
atomic_t passive_state;
bool qhash_set;
u8 user_pri;
+ u8 tos;
bool ipv4;
bool snd_mark_en;
u16 lsmm_size;
@@ -368,7 +369,8 @@ struct i40iw_cm_info {
u32 rem_addr[4];
u16 vlan_id;
int backlog;
- u16 user_pri;
+ u8 user_pri;
+ u8 tos;
bool ipv4;
};
@@ -445,4 +447,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
u8 *mac_addr,
u32 action);
+void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup);
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 2c4b4d072d6a..98923a8cf86d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -103,6 +103,7 @@ static enum i40iw_status_code i40iw_cqp_poll_registers(
if (newtail != tail) {
/* SUCCESS */
I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
return 0;
}
udelay(I40IW_SLEEP_COUNT);
@@ -223,17 +224,150 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
}
/**
+ * i40iw_fill_qos_list - Change all unknown qs handles to available ones
+ * @qs_list: list of qs_handles to be fixed with valid qs_handles
+ */
+static void i40iw_fill_qos_list(u16 *qs_list)
+{
+ u16 qshandle = qs_list[0];
+ int i;
+
+ for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+ if (qs_list[i] == QS_HANDLE_UNKNOWN)
+ qs_list[i] = qshandle;
+ else
+ qshandle = qs_list[i];
+ }
+}
+
+/**
+ * i40iw_qp_from_entry - Given entry, get to the qp structure
+ * @entry: Points to list of qp structure
+ */
+static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
+{
+ if (!entry)
+ return NULL;
+
+ return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
+}
+
+/**
+ * i40iw_get_qp - get the next qp from the list given current qp
+ * @head: Listhead of qp's
+ * @qp: current qp
+ */
+static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
+{
+ struct list_head *entry = NULL;
+ struct list_head *lastentry;
+
+ if (list_empty(head))
+ return NULL;
+
+ if (!qp) {
+ entry = head->next;
+ } else {
+ lastentry = &qp->list;
+ entry = (lastentry != head) ? lastentry->next : NULL;
+ }
+
+ return i40iw_qp_from_entry(entry);
+}
+
+/**
+ * i40iw_change_l2params - given the new l2 parameters, change all qp
+ * @vsi: pointer to the vsi structure
+ * @l2params: New paramaters from l2
+ */
+void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
+{
+ struct i40iw_sc_dev *dev = vsi->dev;
+ struct i40iw_sc_qp *qp = NULL;
+ bool qs_handle_change = false;
+ bool mss_change = false;
+ unsigned long flags;
+ u16 qs_handle;
+ int i;
+
+ if (vsi->mss != l2params->mss) {
+ mss_change = true;
+ vsi->mss = l2params->mss;
+ }
+
+ i40iw_fill_qos_list(l2params->qs_handle_list);
+ for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+ qs_handle = l2params->qs_handle_list[i];
+ if (vsi->qos[i].qs_handle != qs_handle)
+ qs_handle_change = true;
+ else if (!mss_change)
+ continue; /* no MSS nor qs handle change */
+ spin_lock_irqsave(&vsi->qos[i].lock, flags);
+ qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+ while (qp) {
+ if (mss_change)
+ i40iw_qp_mss_modify(dev, qp);
+ if (qs_handle_change) {
+ qp->qs_handle = qs_handle;
+ /* issue cqp suspend command */
+ i40iw_qp_suspend_resume(dev, qp, true);
+ }
+ qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+ }
+ spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
+ vsi->qos[i].qs_handle = qs_handle;
+ }
+}
+
+/**
+ * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
+ * @qp: qp to be removed from qos
+ */
+static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_sc_vsi *vsi = qp->vsi;
+ unsigned long flags;
+
+ if (!qp->on_qoslist)
+ return;
+ spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
+ list_del(&qp->list);
+ spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+}
+
+/**
+ * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
+ * @qp: qp to be added to qos
+ */
+void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_sc_vsi *vsi = qp->vsi;
+ unsigned long flags;
+
+ if (qp->on_qoslist)
+ return;
+ spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
+ qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+ list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+ qp->on_qoslist = true;
+ spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+}
+
+/**
* i40iw_sc_pd_init - initialize sc pd struct
* @dev: sc device struct
* @pd: sc pd ptr
* @pd_id: pd_id for allocated pd
+ * @abi_ver: ABI version from user context, -1 if not valid
*/
static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
struct i40iw_sc_pd *pd,
- u16 pd_id)
+ u16 pd_id,
+ int abi_ver)
{
pd->size = sizeof(*pd);
pd->pd_id = pd_id;
+ pd->abi_ver = abi_ver;
pd->dev = dev;
}
@@ -292,6 +426,9 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
info->dev->cqp = cqp;
I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
+ cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
+
i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
__func__, cqp->sq_size, cqp->hw_sq_size,
@@ -302,12 +439,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
/**
* i40iw_sc_cqp_create - create cqp during bringup
* @cqp: struct for cqp hw
- * @disable_pfpdus: if pfpdu to be disabled
* @maj_err: If error, major err number
* @min_err: If error, minor err number
*/
static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
- bool disable_pfpdus,
u16 *maj_err,
u16 *min_err)
{
@@ -326,9 +461,6 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
- if (disable_pfpdus)
- temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
-
set_64bit_val(cqp->host_ctx, 0, temp);
set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
@@ -424,6 +556,7 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
return NULL;
}
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+ cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
if (ret_code)
return NULL;
if (!wqe_idx)
@@ -559,6 +692,8 @@ static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
wmb(); /* write shadow area before tail */
I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
+
return ret_code;
}
@@ -1051,6 +1186,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
u64 qw1 = 0;
u64 qw2 = 0;
u64 temp;
+ struct i40iw_sc_vsi *vsi = info->vsi;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
@@ -1082,7 +1218,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
}
- qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+ qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
if (info->vlan_valid)
qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
set_64bit_val(wqe, 16, qw2);
@@ -2103,6 +2239,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
u32 offset;
qp->dev = info->pd->dev;
+ qp->vsi = info->vsi;
qp->sq_pa = info->sq_pa;
qp->rq_pa = info->rq_pa;
qp->hw_host_ctx_pa = info->host_ctx_pa;
@@ -2118,6 +2255,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
offset);
info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+ info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
if (ret_code)
return ret_code;
@@ -2136,10 +2274,21 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
false);
i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
__func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
- ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
- &wqe_size);
- if (ret_code)
- return ret_code;
+
+ switch (qp->pd->abi_ver) {
+ case 4:
+ ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+ break;
+ case 5: /* fallthrough until next ABI version */
+ default:
+ if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ wqe_size = I40IW_MAX_WQE_SIZE_RQ;
+ break;
+ }
qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
(wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
@@ -2151,7 +2300,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
qp->rq_tph_en = info->rq_tph_en;
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
- qp->qs_handle = qp->pd->dev->qs_handle;
+ qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
return 0;
@@ -2296,6 +2445,7 @@ static enum i40iw_status_code i40iw_sc_qp_destroy(
struct i40iw_sc_cqp *cqp;
u64 header;
+ i40iw_qp_rem_qos(qp);
cqp = qp->pd->dev->cqp;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
@@ -2443,10 +2593,20 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
{
struct i40iwarp_offload_info *iw;
struct i40iw_tcp_offload_info *tcp;
+ struct i40iw_sc_vsi *vsi;
+ struct i40iw_sc_dev *dev;
u64 qw0, qw3, qw7 = 0;
iw = info->iwarp_info;
tcp = info->tcp_info;
+ vsi = qp->vsi;
+ dev = qp->dev;
+ if (info->add_to_qoslist) {
+ qp->user_pri = info->user_pri;
+ i40iw_qp_add_qos(qp);
+ i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
+ __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
+ }
qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
@@ -2487,16 +2647,14 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
- set_64bit_val(qp_ctx, 144, qp->q2_pa);
+ set_64bit_val(qp_ctx,
+ 144,
+ LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
+ LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
set_64bit_val(qp_ctx,
152,
LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
- /*
- * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
- *advertisable IRD of 64
- */
- iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
set_64bit_val(qp_ctx,
160,
LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
@@ -2507,6 +2665,9 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(iw->bind_en, I40IWQPC_BINDEN) |
LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
+ LS_64((((vsi->stats_fcn_id_alloc) &&
+ (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
+ I40IWQPC_USESTATSINSTANCE) |
LS_64(1, I40IWQPC_IWARPMODE) |
LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
@@ -2623,7 +2784,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
u64 *wqe;
struct i40iw_sc_cqp *cqp;
u64 header;
+ enum i40iw_page_size page_size;
+ page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
cqp = dev->cqp;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
@@ -2643,7 +2806,7 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
LS_64(1, I40IW_CQPSQ_STAG_MR) |
LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
- LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
@@ -2679,7 +2842,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
u32 pble_obj_cnt;
bool remote_access;
u8 addr_type;
+ enum i40iw_page_size page_size;
+ page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
remote_access = true;
@@ -2722,7 +2887,7 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
LS_64(1, I40IW_CQPSQ_STAG_MR) |
LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
- LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
@@ -2937,7 +3102,9 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
u64 temp, header;
u64 *wqe;
u32 wqe_idx;
+ enum i40iw_page_size page_size;
+ page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
0, info->wr_id);
if (!wqe)
@@ -2964,7 +3131,7 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
- LS_64(info->page_size, I40IWQPSQ_HPAGESIZE) |
+ LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
@@ -3959,7 +4126,7 @@ enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
struct cqp_commands_info *pcmdinfo)
{
enum i40iw_status_code status = 0;
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&dev->cqp_lock, flags);
if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
@@ -3978,7 +4145,7 @@ enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
{
enum i40iw_status_code status = 0;
struct cqp_commands_info *pcmdinfo;
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&dev->cqp_lock, flags);
while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
@@ -4055,7 +4222,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
u16 ddp_seg_len;
int copy_len = 0;
u8 is_tagged = 0;
- enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
u32 opcode;
struct i40iw_terminate_hdr *termhdr;
@@ -4228,9 +4394,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
if (copy_len)
memcpy(termhdr + 1, pkt, copy_len);
- if (flush_code && !info->in_rdrsp_wr)
- qp->sq_flush = (info->sq) ? true : false;
-
return sizeof(struct i40iw_terminate_hdr) + copy_len;
}
@@ -4321,286 +4484,370 @@ void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *in
}
/**
- * i40iw_hw_stat_init - Initiliaze HW stats table
- * @devstat: pestat struct
+ * i40iw_sc_vsi_init - Initialize virtual device
+ * @vsi: pointer to the vsi structure
+ * @info: parameters to initialize vsi
+ **/
+void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
+{
+ int i;
+
+ vsi->dev = info->dev;
+ vsi->back_vsi = info->back_vsi;
+ vsi->mss = info->params->mss;
+ i40iw_fill_qos_list(info->params->qs_handle_list);
+
+ for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+ vsi->qos[i].qs_handle =
+ info->params->qs_handle_list[i];
+ i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, vsi->qos[i].qs_handle);
+ spin_lock_init(&vsi->qos[i].lock);
+ INIT_LIST_HEAD(&vsi->qos[i].qplist);
+ }
+}
+
+/**
+ * i40iw_hw_stats_init - Initiliaze HW stats table
+ * @stats: pestat struct
* @fcn_idx: PCI fn id
- * @hw: PF i40iw_hw structure.
* @is_pf: Is it a PF?
*
- * Populate the HW stat table with register offset addr for each
- * stat. And start the perioidic stats timer.
+ * Populate the HW stats table with register offset addr for each
+ * stats. And start the perioidic stats timer.
*/
-static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
- u8 fcn_idx,
- struct i40iw_hw *hw, bool is_pf)
+void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
{
- u32 stat_reg_offset;
- u32 stat_index;
- struct i40iw_dev_hw_stat_offsets *stat_table =
- &devstat->hw_stat_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
-
- devstat->hw = hw;
+ u32 stats_reg_offset;
+ u32 stats_index;
+ struct i40iw_dev_hw_stats_offsets *stats_table =
+ &stats->hw_stats_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
if (is_pf) {
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
I40E_GLPES_PFTCPRTXSEG(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
I40E_GLPES_PFRDMAVINVLO(fcn_idx);
} else {
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
I40E_GLPES_VFTCPRTXSEG(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
I40E_GLPES_VFRDMAVINVLO(fcn_idx);
}
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
- stat_index++) {
- stat_reg_offset = stat_table->stat_offset_64[stat_index];
- last_rd_stats->stat_value_64[stat_index] =
- readq(devstat->hw->hw_addr + stat_reg_offset);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stats_index++) {
+ stats_reg_offset = stats_table->stats_offset_64[stats_index];
+ last_rd_stats->stats_value_64[stats_index] =
+ readq(stats->hw->hw_addr + stats_reg_offset);
}
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
- stat_index++) {
- stat_reg_offset = stat_table->stat_offset_32[stat_index];
- last_rd_stats->stat_value_32[stat_index] =
- i40iw_rd32(devstat->hw, stat_reg_offset);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stats_index++) {
+ stats_reg_offset = stats_table->stats_offset_32[stats_index];
+ last_rd_stats->stats_value_32[stats_index] =
+ i40iw_rd32(stats->hw, stats_reg_offset);
}
}
/**
- * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
- * @devstat: pestat struct
- * @index: index in HW stat table which contains offset reg-addr
- * @value: hw stat value
+ * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
+ * @stat: pestat struct
+ * @index: index in HW stats table which contains offset reg-addr
+ * @value: hw stats value
*/
-static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
- enum i40iw_hw_stat_index_32b index,
- u64 *value)
+void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_32b index,
+ u64 *value)
{
- struct i40iw_dev_hw_stat_offsets *stat_table =
- &devstat->hw_stat_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
- struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- u64 new_stat_value = 0;
- u32 stat_reg_offset = stat_table->stat_offset_32[index];
-
- new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
+ struct i40iw_dev_hw_stats_offsets *stats_table =
+ &stats->hw_stats_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
+ u64 new_stats_value = 0;
+ u32 stats_reg_offset = stats_table->stats_offset_32[index];
+
+ new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
/*roll-over case */
- if (new_stat_value < last_rd_stats->stat_value_32[index])
- hw_stats->stat_value_32[index] += new_stat_value;
+ if (new_stats_value < last_rd_stats->stats_value_32[index])
+ hw_stats->stats_value_32[index] += new_stats_value;
else
- hw_stats->stat_value_32[index] +=
- new_stat_value - last_rd_stats->stat_value_32[index];
- last_rd_stats->stat_value_32[index] = new_stat_value;
- *value = hw_stats->stat_value_32[index];
+ hw_stats->stats_value_32[index] +=
+ new_stats_value - last_rd_stats->stats_value_32[index];
+ last_rd_stats->stats_value_32[index] = new_stats_value;
+ *value = hw_stats->stats_value_32[index];
}
/**
- * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
- * @devstat: pestat struct
- * @index: index in HW stat table which contains offset reg-addr
- * @value: hw stat value
+ * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
+ * @stats: pestat struct
+ * @index: index in HW stats table which contains offset reg-addr
+ * @value: hw stats value
*/
-static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
- enum i40iw_hw_stat_index_64b index,
- u64 *value)
+void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_64b index,
+ u64 *value)
{
- struct i40iw_dev_hw_stat_offsets *stat_table =
- &devstat->hw_stat_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
- struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- u64 new_stat_value = 0;
- u32 stat_reg_offset = stat_table->stat_offset_64[index];
-
- new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
+ struct i40iw_dev_hw_stats_offsets *stats_table =
+ &stats->hw_stats_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
+ u64 new_stats_value = 0;
+ u32 stats_reg_offset = stats_table->stats_offset_64[index];
+
+ new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
/*roll-over case */
- if (new_stat_value < last_rd_stats->stat_value_64[index])
- hw_stats->stat_value_64[index] += new_stat_value;
+ if (new_stats_value < last_rd_stats->stats_value_64[index])
+ hw_stats->stats_value_64[index] += new_stats_value;
else
- hw_stats->stat_value_64[index] +=
- new_stat_value - last_rd_stats->stat_value_64[index];
- last_rd_stats->stat_value_64[index] = new_stat_value;
- *value = hw_stats->stat_value_64[index];
+ hw_stats->stats_value_64[index] +=
+ new_stats_value - last_rd_stats->stats_value_64[index];
+ last_rd_stats->stats_value_64[index] = new_stats_value;
+ *value = hw_stats->stats_value_64[index];
}
/**
- * i40iw_hw_stat_read_all - read all HW stat counters
- * @devstat: pestat struct
- * @stat_values: hw stats structure
+ * i40iw_hw_stats_read_all - read all HW stat counters
+ * @stats: pestat struct
+ * @stats_values: hw stats structure
*
* Read all the HW stat counters and populates hw_stats structure
- * of passed-in dev's pestat as well as copy created in stat_values.
+ * of passed-in vsi's pestat as well as copy created in stat_values.
*/
-static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
- struct i40iw_dev_hw_stats *stat_values)
+void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
+ struct i40iw_dev_hw_stats *stats_values)
{
- u32 stat_index;
-
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
- stat_index++)
- i40iw_hw_stat_read_32(devstat, stat_index,
- &stat_values->stat_value_32[stat_index]);
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
- stat_index++)
- i40iw_hw_stat_read_64(devstat, stat_index,
- &stat_values->stat_value_64[stat_index]);
+ u32 stats_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->lock, flags);
+
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stats_index++)
+ i40iw_hw_stats_read_32(stats, stats_index,
+ &stats_values->stats_value_32[stats_index]);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stats_index++)
+ i40iw_hw_stats_read_64(stats, stats_index,
+ &stats_values->stats_value_64[stats_index]);
+ spin_unlock_irqrestore(&stats->lock, flags);
}
/**
- * i40iw_hw_stat_refresh_all - Update all HW stat structs
- * @devstat: pestat struct
- * @stat_values: hw stats structure
+ * i40iw_hw_stats_refresh_all - Update all HW stats structs
+ * @stats: pestat struct
*
- * Read all the HW stat counters to refresh values in hw_stats structure
+ * Read all the HW stats counters to refresh values in hw_stats structure
* of passed-in dev's pestat
*/
-static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
+{
+ u64 stats_value;
+ u32 stats_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->lock, flags);
+
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stats_index++)
+ i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stats_index++)
+ i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
+ spin_unlock_irqrestore(&stats->lock, flags);
+}
+
+/**
+ * i40iw_get_fcn_id - Return the function id
+ * @dev: pointer to the device
+ */
+static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
+{
+ u8 fcn_id = I40IW_INVALID_FCN_ID;
+ u8 i;
+
+ for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
+ if (!dev->fcn_id_array[i]) {
+ fcn_id = i;
+ dev->fcn_id_array[i] = true;
+ break;
+ }
+ return fcn_id;
+}
+
+/**
+ * i40iw_vsi_stats_init - Initialize the vsi statistics
+ * @vsi: pointer to the vsi structure
+ * @info: The info structure used for initialization
+ */
+enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
+{
+ u8 fcn_id = info->fcn_id;
+
+ if (info->alloc_fcn_id)
+ fcn_id = i40iw_get_fcn_id(vsi->dev);
+
+ if (fcn_id == I40IW_INVALID_FCN_ID)
+ return I40IW_ERR_NOT_READY;
+
+ vsi->pestat = info->pestat;
+ vsi->pestat->hw = vsi->dev->hw;
+
+ if (info->stats_initialize) {
+ i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
+ spin_lock_init(&vsi->pestat->lock);
+ i40iw_hw_stats_start_timer(vsi);
+ }
+ vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
+ vsi->fcn_id = fcn_id;
+ return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vsi_stats_free - Free the vsi stats
+ * @vsi: pointer to the vsi structure
+ */
+void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
{
- u64 stat_value;
- u32 stat_index;
-
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
- stat_index++)
- i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
- stat_index++)
- i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
+ u8 fcn_id = vsi->fcn_id;
+
+ if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+ vsi->dev->fcn_id_array[fcn_id] = false;
+ i40iw_hw_stats_stop_timer(vsi);
}
static struct i40iw_cqp_ops iw_cqp_ops = {
@@ -4711,24 +4958,6 @@ static struct i40iw_hmc_ops iw_hmc_ops = {
NULL
};
-static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
- i40iw_hw_stat_init,
- i40iw_hw_stat_read_32,
- i40iw_hw_stat_read_64,
- i40iw_hw_stat_read_all,
- i40iw_hw_stat_refresh_all
-};
-
-/**
- * i40iw_device_init_pestat - Initialize the pestat structure
- * @dev: pestat struct
- */
-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
-{
- devstat->ops = iw_device_pestat_ops;
- return 0;
-}
-
/**
* i40iw_device_init - Initialize IWARP device
* @dev: IWARP device pointer
@@ -4750,14 +4979,7 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
dev->debug_mask = info->debug_mask;
- ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_DEV,
- "%s: i40iw_device_init_pestat failed\n", __func__);
- return ret_code;
- }
dev->hmc_fn_id = info->hmc_fn_id;
- dev->qs_handle = info->qs_handle;
dev->exception_lan_queue = info->exception_lan_queue;
dev->is_pf = info->is_pf;
@@ -4770,15 +4992,10 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
dev->hw = info->hw;
dev->hw->hw_addr = info->bar0;
- val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
- dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
-
if (dev->is_pf) {
- dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
- dev->hmc_fn_id, dev->hw, true);
- spin_lock_init(&dev->dev_pestat.stats_lock);
- /*start the periodic stats_timer */
- i40iw_hw_stats_start_timer(dev);
+ val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
+ dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
+
val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
if ((db_size != I40IW_PE_DB_SIZE_4M) &&
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 2fac1db0e0a0..a39ac12b6a7e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -35,6 +35,8 @@
#ifndef I40IW_D_H
#define I40IW_D_H
+#define I40IW_FIRST_USER_QP_ID 2
+
#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
@@ -67,6 +69,9 @@
#define I40IW_STAG_TYPE_NONSHARED 1
#define I40IW_MAX_USER_PRIORITY 8
+#define I40IW_MAX_STATS_COUNT 16
+#define I40IW_FIRST_NON_PF_STAT 4
+
#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
@@ -74,6 +79,8 @@
#define RS_32_1(val, bits) (u32)(val >> bits)
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define QS_HANDLE_UNKNOWN 0xffff
+
#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
@@ -1199,8 +1206,11 @@
#define I40IWQPC_RXCQNUM_SHIFT 32
#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
-#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
+#define I40IWQPC_STAT_INDEX_SHIFT 0
+#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)
+
+#define I40IWQPC_Q2ADDR_SHIFT 0
+#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)
#define I40IWQPC_LASTBYTESENT_SHIFT 0
#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
@@ -1232,11 +1242,8 @@
#define I40IWQPC_PRIVEN_SHIFT 25
#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
-#define I40IWQPC_LSMMPRESENT_SHIFT 26
-#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
-
-#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
-#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
+#define I40IWQPC_USESTATSINSTANCE_SHIFT 26
+#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)
#define I40IWQPC_IWARPMODE_SHIFT 28
#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
@@ -1713,6 +1720,8 @@ enum i40iw_alignment {
#define OP_MANAGE_VF_PBLE_BP 28
#define OP_QUERY_FPM_VALUES 29
#define OP_COMMIT_FPM_VALUES 30
-#define OP_SIZE_CQP_STAT_ARRAY 31
+#define OP_REQUESTED_COMMANDS 31
+#define OP_COMPLETED_COMMANDS 32
+#define OP_SIZE_CQP_STAT_ARRAY 33
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 0c92a40b3e86..476867a3f584 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -62,7 +62,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
iwdev->max_cqe = 0xFFFFF;
- num_pds = max_qp * 4;
+ num_pds = I40IW_MAX_PDS;
resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
@@ -308,7 +308,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
iwqp = iwdev->qp_table[info->qp_cq_id];
if (!iwqp) {
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
+ i40iw_debug(dev, I40IW_DEBUG_AEQ,
+ "%s qp_id %d is already freed\n",
+ __func__, info->qp_cq_id);
continue;
}
i40iw_add_ref(&iwqp->ibqp);
@@ -359,6 +361,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
continue;
i40iw_cm_disconn(iwqp);
break;
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
+ break;
case I40IW_AE_TERMINATE_SENT:
i40iw_terminate_send_fin(qp);
break;
@@ -404,19 +409,18 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LCE_CQ_CATASTROPHIC:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
- case I40IW_AE_QP_SUSPEND_COMPLETE:
ctx_info->err_rq_idx_valid = false;
default:
- if (!info->sq && ctx_info->err_rq_idx_valid) {
- ctx_info->err_rq_idx = info->wqe_idx;
- ctx_info->tcp_info_valid = false;
- ctx_info->iwarp_info_valid = false;
- ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
- iwqp->host_ctx.va,
- ctx_info);
- }
- i40iw_terminate_connection(qp, info);
- break;
+ if (!info->sq && ctx_info->err_rq_idx_valid) {
+ ctx_info->err_rq_idx = info->wqe_idx;
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ iwqp->host_ctx.va,
+ ctx_info);
+ }
+ i40iw_terminate_connection(qp, info);
+ break;
}
if (info->qp)
i40iw_rem_ref(&iwqp->ibqp);
@@ -538,6 +542,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
{
struct i40iw_qhash_table_info *info;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_sc_vsi *vsi = &iwdev->vsi;
enum i40iw_status_code status;
struct i40iw_cqp *iwcqp = &iwdev->cqp;
struct i40iw_cqp_request *cqp_request;
@@ -550,6 +555,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
info = &cqp_info->in.u.manage_qhash_table_entry.info;
memset(info, 0, sizeof(*info));
+ info->vsi = &iwdev->vsi;
info->manage = mtype;
info->entry_type = etype;
if (cminfo->vlan_id != 0xFFFF) {
@@ -560,8 +566,9 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
}
info->ipv4_valid = cminfo->ipv4;
+ info->user_pri = cminfo->user_pri;
ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
- info->qp_num = cpu_to_le32(dev->ilq->qp_id);
+ info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
info->dest_port = cpu_to_le16(cminfo->loc_port);
info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
@@ -617,6 +624,7 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_qp_flush_info *hw_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
@@ -631,9 +639,30 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
cqp_info->in.u.qp_flush_wqes.qp = qp;
cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
+ if (status) {
i40iw_pr_err("CQP-OP Flush WQE's fail");
- return status;
+ complete(&iwqp->sq_drained);
+ complete(&iwqp->rq_drained);
+ return status;
+ }
+ if (!cqp_request->compl_info.maj_err_code) {
+ switch (cqp_request->compl_info.min_err_code) {
+ case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
+ complete(&iwqp->sq_drained);
+ break;
+ case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
+ complete(&iwqp->rq_drained);
+ break;
+ case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
+ break;
+ default:
+ complete(&iwqp->sq_drained);
+ complete(&iwqp->rq_drained);
+ break;
+ }
+ }
+
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index ac2f3cd9478c..2728af3103ce 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -237,14 +237,11 @@ static irqreturn_t i40iw_irq_handler(int irq, void *data)
*/
static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
{
- enum i40iw_status_code status = 0;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_cqp *cqp = &iwdev->cqp;
- if (free_hwcqp && dev->cqp_ops->cqp_destroy)
- status = dev->cqp_ops->cqp_destroy(dev->cqp);
- if (status)
- i40iw_pr_err("destroy cqp failed");
+ if (free_hwcqp)
+ dev->cqp_ops->cqp_destroy(dev->cqp);
i40iw_free_dma_mem(dev->hw, &cqp->sq);
kfree(cqp->scratch_array);
@@ -270,6 +267,7 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
else
i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
+ irq_set_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
}
@@ -603,7 +601,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
i40iw_pr_err("cqp init status %d\n", status);
goto exit;
}
- status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
+ status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
if (status) {
i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
status, maj_err, min_err);
@@ -688,6 +686,7 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
struct i40iw_msix_vector *msix_vec)
{
enum i40iw_status_code status;
+ cpumask_t mask;
if (iwdev->msix_shared && !ceq_id) {
tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,12 +696,15 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
}
+ cpumask_clear(&mask);
+ cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
+ irq_set_affinity_hint(msix_vec->irq, &mask);
+
if (status) {
i40iw_pr_err("ceq irq config fail\n");
return I40IW_ERR_CONFIG;
}
msix_vec->ceq_id = ceq_id;
- msix_vec->cpu_affinity = 0;
return 0;
}
@@ -930,6 +932,7 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
struct i40iw_puda_rsrc_info info;
enum i40iw_status_code status;
+ memset(&info, 0, sizeof(info));
info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
info.cq_id = 1;
info.qp_id = 0;
@@ -939,10 +942,9 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
info.rq_size = 8192;
info.buf_size = 1024;
info.tx_buf_cnt = 16384;
- info.mss = iwdev->mss;
info.receive = i40iw_receive_ilq;
info.xmit_complete = i40iw_free_sqbuf;
- status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
i40iw_pr_err("ilq create fail\n");
return status;
@@ -959,6 +961,7 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
struct i40iw_puda_rsrc_info info;
enum i40iw_status_code status;
+ memset(&info, 0, sizeof(info));
info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
info.cq_id = 2;
info.qp_id = iwdev->sc_dev.exception_lan_queue;
@@ -967,9 +970,8 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
info.sq_size = 8192;
info.rq_size = 8192;
info.buf_size = 2048;
- info.mss = iwdev->mss;
info.tx_buf_cnt = 16384;
- status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
i40iw_pr_err("ieq create fail\n");
return status;
@@ -1159,7 +1161,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
{
struct net_device *ip_dev;
struct inet6_dev *idev;
- struct inet6_ifaddr *ifp;
+ struct inet6_ifaddr *ifp, *tmp;
u32 local_ipaddr6[4];
rcu_read_lock();
@@ -1172,7 +1174,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
i40iw_pr_err("ipv6 inet device not found\n");
break;
}
- list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
i40iw_copy_ip_ntohl(local_ipaddr6,
@@ -1294,17 +1296,23 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
enum i40iw_status_code status;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_device_init_info info;
+ struct i40iw_vsi_init_info vsi_info;
struct i40iw_dma_mem mem;
+ struct i40iw_l2params l2params;
u32 size;
+ struct i40iw_vsi_stats_info stats_info;
+ u16 last_qset = I40IW_NO_QSET;
+ u16 qset;
+ u32 i;
+ memset(&l2params, 0, sizeof(l2params));
memset(&info, 0, sizeof(info));
size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
- if (!iwdev->hmc_info_mem) {
- i40iw_pr_err("memory alloc fail\n");
+ if (!iwdev->hmc_info_mem)
return I40IW_ERR_NO_MEMORY;
- }
+
iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
dev->hmc_info = &iwdev->hw.hmc;
dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
@@ -1325,7 +1333,17 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
info.bar0 = ldev->hw_addr;
info.hw = &iwdev->hw;
info.debug_mask = debug;
- info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
+ l2params.mss =
+ (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
+ for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
+ qset = ldev->params.qos.prio_qos[i].qs_handle;
+ l2params.qs_handle_list[i] = qset;
+ if (last_qset == I40IW_NO_QSET)
+ last_qset = qset;
+ else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
+ iwdev->dcb = true;
+ }
+ i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
info.exception_lan_queue = 1;
info.vchnl_send = i40iw_virtchnl_send;
status = i40iw_device_init(&iwdev->sc_dev, &info);
@@ -1334,6 +1352,20 @@ exit:
kfree(iwdev->hmc_info_mem);
iwdev->hmc_info_mem = NULL;
}
+ memset(&vsi_info, 0, sizeof(vsi_info));
+ vsi_info.dev = &iwdev->sc_dev;
+ vsi_info.back_vsi = (void *)iwdev;
+ vsi_info.params = &l2params;
+ i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
+
+ if (dev->is_pf) {
+ memset(&stats_info, 0, sizeof(stats_info));
+ stats_info.fcn_id = ldev->fid;
+ stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+ stats_info.stats_initialize = true;
+ if (stats_info.pestat)
+ i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
+ }
return status;
}
@@ -1384,6 +1416,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
+ iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
if (i == 0) {
iw_qvinfo->aeq_idx = 0;
if (iwdev->msix_shared)
@@ -1404,18 +1437,19 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
* i40iw_deinit_device - clean up the device resources
* @iwdev: iwarp device
* @reset: true if called before reset
- * @del_hdl: true if delete hdl entry
*
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
* destroy the device queues and free the pble and the hmc objects
*/
-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
+static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
{
struct i40e_info *ldev = iwdev->ldev;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
i40iw_pr_info("state = %d\n", iwdev->init_state);
+ if (iwdev->param_wq)
+ destroy_workqueue(iwdev->param_wq);
switch (iwdev->init_state) {
case RDMA_DEV_REGISTERED:
@@ -1441,10 +1475,10 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
i40iw_destroy_aeq(iwdev, reset);
/* fallthrough */
case IEQ_CREATED:
- i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
/* fallthrough */
case ILQ_CREATED:
- i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
/* fallthrough */
case CCQ_CREATED:
i40iw_destroy_ccq(iwdev, reset);
@@ -1456,13 +1490,14 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
/* fallthrough */
case CQP_CREATED:
- i40iw_destroy_cqp(iwdev, !reset);
+ i40iw_destroy_cqp(iwdev, true);
/* fallthrough */
case INITIAL_STATE:
i40iw_cleanup_cm_core(&iwdev->cm_core);
- if (dev->is_pf)
- i40iw_hw_stats_del_timer(dev);
-
+ if (iwdev->vsi.pestat) {
+ i40iw_vsi_stats_free(&iwdev->vsi);
+ kfree(iwdev->vsi.pestat);
+ }
i40iw_del_init_mem(iwdev);
break;
case INVALID_STATE:
@@ -1472,8 +1507,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
break;
}
- if (del_hdl)
- i40iw_del_handler(i40iw_find_i40e_handler(ldev));
+ i40iw_del_handler(i40iw_find_i40e_handler(ldev));
kfree(iwdev->hdl);
}
@@ -1508,7 +1542,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
iwdev->netdev = ldev->netdev;
hdl->client = client;
- iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
if (!ldev->ftype)
iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
else
@@ -1528,6 +1561,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
init_waitqueue_head(&iwdev->vchnl_waitq);
init_waitqueue_head(&dev->vf_reqs);
+ init_waitqueue_head(&iwdev->close_wq);
status = i40iw_initialize_dev(iwdev, ldev);
exit:
@@ -1540,6 +1574,20 @@ exit:
}
/**
+ * i40iw_get_used_rsrc - determine resources used internally
+ * @iwdev: iwarp device
+ *
+ * Called after internal allocations
+ */
+static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
+{
+ iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
+ iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
+ iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
+ iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
+}
+
+/**
* i40iw_open - client interface operation open for iwarp/uda device
* @ldev: lan device information
* @client: iwarp client information, provided during registration
@@ -1611,6 +1659,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_initialize_hw_resources(iwdev);
if (status)
break;
+ i40iw_get_used_rsrc(iwdev);
dev->ccq_ops->ccq_arm(dev->ccq);
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status)
@@ -1630,35 +1679,73 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
iwdev->init_state = RDMA_DEV_REGISTERED;
iwdev->iw_status = 1;
i40iw_port_ibevent(iwdev);
+ iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
+ if(iwdev->param_wq == NULL)
+ break;
i40iw_pr_info("i40iw_open completed\n");
return 0;
} while (0);
i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
- i40iw_deinit_device(iwdev, false, false);
+ i40iw_deinit_device(iwdev, false);
return -ERESTART;
}
/**
- * i40iw_l2param_change : handle qs handles for qos and mss change
+ * i40iw_l2params_worker - worker for l2 params change
+ * @work: work pointer for l2 params
+ */
+static void i40iw_l2params_worker(struct work_struct *work)
+{
+ struct l2params_work *dwork =
+ container_of(work, struct l2params_work, work);
+ struct i40iw_device *iwdev = dwork->iwdev;
+
+ i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
+ atomic_dec(&iwdev->params_busy);
+ kfree(work);
+}
+
+/**
+ * i40iw_l2param_change - handle qs handles for qos and mss change
* @ldev: lan device information
* @client: client for paramater change
* @params: new parameters from L2
*/
-static void i40iw_l2param_change(struct i40e_info *ldev,
- struct i40e_client *client,
+static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
struct i40e_params *params)
{
struct i40iw_handler *hdl;
+ struct i40iw_l2params *l2params;
+ struct l2params_work *work;
struct i40iw_device *iwdev;
+ int i;
hdl = i40iw_find_i40e_handler(ldev);
if (!hdl)
return;
iwdev = &hdl->device;
- if (params->mtu)
- iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
+
+ if (atomic_read(&iwdev->params_busy))
+ return;
+
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ atomic_inc(&iwdev->params_busy);
+
+ work->iwdev = iwdev;
+ l2params = &work->l2params;
+ for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
+ l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
+
+ l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
+
+ INIT_WORK(&work->work, i40iw_l2params_worker);
+ queue_work(iwdev->param_wq, &work->work);
}
/**
@@ -1679,8 +1766,11 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
return;
iwdev = &hdl->device;
+ iwdev->closing = true;
+
+ i40iw_cm_disconnect_all(iwdev);
destroy_workqueue(iwdev->virtchnl_wq);
- i40iw_deinit_device(iwdev, reset, true);
+ i40iw_deinit_device(iwdev, reset);
}
/**
@@ -1701,21 +1791,23 @@ static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u
struct i40iw_vfdev *tmp_vfdev;
unsigned int i;
unsigned long flags;
+ struct i40iw_device *iwdev;
hdl = i40iw_find_i40e_handler(ldev);
if (!hdl)
return;
dev = &hdl->device.sc_dev;
+ iwdev = (struct i40iw_device *)dev->back_dev;
for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
continue;
/* free all resources allocated on behalf of vf */
tmp_vfdev = dev->vf_dev[i];
- spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+ spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
dev->vf_dev[i] = NULL;
- spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
/* remove vf hmc function */
memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index 80f422bf3967..aa66c1c63dfa 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -198,6 +198,8 @@ enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
void *i40iw_remove_head(struct list_head *list);
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
+void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
@@ -207,9 +209,9 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_manage_vf_pble_info *info,
bool wait);
-struct i40iw_dev_pestat;
-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
+struct i40iw_sc_vsi;
+void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
+void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
#define i40iw_mmiowb() mmiowb()
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index a0b8ca10d67e..28a92fee0822 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -47,8 +47,6 @@ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
struct i40iw_device_init_info *info);
-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
-
void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
@@ -64,7 +62,24 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
u32 *vf_cnt_array);
-/* cqp misc functions */
+/* stats functions */
+void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
+void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
+void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_32b index,
+ u64 *value);
+void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_64b index,
+ u64 *value);
+void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
+
+/* vsi misc functions */
+enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
+void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
+void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
+
+void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
+void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 85993dc44f6e..c87ba1617087 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -353,10 +353,6 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
- if (!pages) {
- ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
- goto error;
- }
info.chunk = chunk;
info.hmc_info = hmc_info;
info.pages = pages;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c62d354f7810..db41ab40da9c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -42,12 +42,13 @@
#include "i40iw_p.h"
#include "i40iw_puda.h"
-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_buf *buf);
-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
+static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc, bool initial);
+static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
/**
* i40iw_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -292,7 +293,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
unsigned long flags;
if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
- rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
+ rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
} else {
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
return I40IW_ERR_BAD_PTR;
@@ -335,7 +336,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
rsrc->stats_pkt_rcvd++;
rsrc->compl_rxwqe_idx = info.wqe_idx;
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
- rsrc->receive(rsrc->dev, buf);
+ rsrc->receive(rsrc->vsi, buf);
if (cq_type == I40IW_CQ_TYPE_ILQ)
i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
else
@@ -345,12 +346,12 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
- rsrc->xmit_complete(rsrc->dev, sqwrid);
+ rsrc->xmit_complete(rsrc->vsi, sqwrid);
spin_lock_irqsave(&rsrc->bufpool_lock, flags);
rsrc->tx_wqe_avail_cnt++;
spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
- if (!list_empty(&dev->ilq->txpend))
- i40iw_puda_send_buf(dev->ilq, NULL);
+ if (!list_empty(&rsrc->vsi->ilq->txpend))
+ i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
}
done:
@@ -513,10 +514,8 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
* i40iw_puda_qp_wqe - setup wqe for qp create
* @rsrc: resource for qp
*/
-static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
+static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
{
- struct i40iw_sc_qp *qp = &rsrc->qp;
- struct i40iw_sc_dev *dev = rsrc->dev;
struct i40iw_sc_cqp *cqp;
u64 *wqe;
u64 header;
@@ -582,6 +581,7 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
qp->back_qp = (void *)rsrc;
qp->sq_pa = mem->pa;
qp->rq_pa = qp->sq_pa + sq_size;
+ qp->vsi = rsrc->vsi;
ukqp->sq_base = mem->va;
ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
@@ -608,15 +608,63 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
I40E_VFPE_WQEALLOC1);
- qp->qs_handle = qp->dev->qs_handle;
+ qp->user_pri = 0;
+ i40iw_qp_add_qos(qp);
i40iw_puda_qp_setctx(rsrc);
- ret = i40iw_puda_qp_wqe(rsrc);
+ if (rsrc->ceq_valid)
+ ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
+ else
+ ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
if (ret)
i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
return ret;
}
/**
+ * i40iw_puda_cq_wqe - setup wqe for cq create
+ * @rsrc: resource for cq
+ */
+static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ struct i40iw_ccq_cqe_info compl_info;
+ enum i40iw_status_code status = 0;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 16,
+ LS_64(cq->shadow_read_threshold,
+ I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, cq->cq_pa);
+
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+
+ header = cq->cq_uk.cq_id |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ set_64bit_val(wqe, 24, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(dev->cqp);
+ status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_CREATE_CQ,
+ &compl_info);
+ return status;
+}
+
+/**
* i40iw_puda_cq_create - create cq for resource
* @rsrc: resource for which cq to create
*/
@@ -624,18 +672,13 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
{
struct i40iw_sc_dev *dev = rsrc->dev;
struct i40iw_sc_cq *cq = &rsrc->cq;
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
enum i40iw_status_code ret = 0;
u32 tsize, cqsize;
- u32 shadow_read_threshold = 128;
struct i40iw_dma_mem *mem;
- struct i40iw_ccq_cqe_info compl_info;
struct i40iw_cq_init_info info;
struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
- cq->back_cq = (void *)rsrc;
+ cq->vsi = rsrc->vsi;
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
@@ -656,43 +699,84 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
init_info->cq_size = rsrc->cq_size;
init_info->cq_id = rsrc->cq_id;
+ info.ceqe_mask = true;
+ info.ceq_id_valid = true;
ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
if (ret)
goto error;
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
- if (!wqe) {
- ret = I40IW_ERR_RING_FULL;
- goto error;
- }
+ if (rsrc->ceq_valid)
+ ret = i40iw_cqp_cq_create_cmd(dev, cq);
+ else
+ ret = i40iw_puda_cq_wqe(dev, cq);
+error:
+ if (ret)
+ i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+ return ret;
+}
- set_64bit_val(wqe, 0, rsrc->cq_size);
- set_64bit_val(wqe, 8, RS_64_1(cq, 1));
- set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
- set_64bit_val(wqe, 32, cq->cq_pa);
+/**
+ * i40iw_puda_free_qp - free qp for resource
+ * @rsrc: resource for which qp to free
+ */
+static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
+{
+ enum i40iw_status_code ret;
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_sc_dev *dev = rsrc->dev;
- set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ if (rsrc->ceq_valid) {
+ i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
+ return;
+ }
- header = rsrc->cq_id |
- LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
+ 0, false, true, true);
+ if (ret)
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error puda qp destroy wqe\n",
+ __func__);
- i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (!ret) {
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_QP,
+ &compl_info);
+ if (ret)
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error puda qp destroy failed\n",
+ __func__);
+ }
+}
- i40iw_sc_cqp_post_sq(dev->cqp);
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_CREATE_CQ,
- &compl_info);
+/**
+ * i40iw_puda_free_cq - free cq for resource
+ * @rsrc: resource for which cq to free
+ */
+static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
+{
+ enum i40iw_status_code ret;
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->ceq_valid) {
+ i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
+ return;
+ }
+ ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
-error:
if (ret)
- i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
- return ret;
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error ieq cq destroy\n",
+ __func__);
+
+ if (!ret) {
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_CQ,
+ &compl_info);
+ if (ret)
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy done\n",
+ __func__);
+ }
}
/**
@@ -701,25 +785,24 @@ error:
* @type: type of resource to dele
* @reset: true if reset chip
*/
-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
enum puda_resource_type type,
bool reset)
{
- struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_sc_dev *dev = vsi->dev;
struct i40iw_puda_rsrc *rsrc;
struct i40iw_puda_buf *buf = NULL;
struct i40iw_puda_buf *nextbuf = NULL;
struct i40iw_virt_mem *vmem;
- enum i40iw_status_code ret;
switch (type) {
case I40IW_PUDA_RSRC_TYPE_ILQ:
- rsrc = dev->ilq;
- vmem = &dev->ilq_mem;
+ rsrc = vsi->ilq;
+ vmem = &vsi->ilq_mem;
break;
case I40IW_PUDA_RSRC_TYPE_IEQ:
- rsrc = dev->ieq;
- vmem = &dev->ieq_mem;
+ rsrc = vsi->ieq;
+ vmem = &vsi->ieq_mem;
break;
default:
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
@@ -731,45 +814,14 @@ void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
case PUDA_HASH_CRC_COMPLETE:
i40iw_free_hash_desc(rsrc->hash_desc);
case PUDA_QP_CREATED:
- do {
- if (reset)
- break;
- ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
- 0, false, true, true);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq qp destroy\n",
- __func__);
-
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_DESTROY_QP,
- &compl_info);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq qp destroy done\n",
- __func__);
- } while (0);
+ if (!reset)
+ i40iw_puda_free_qp(rsrc);
i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
/* fallthrough */
case PUDA_CQ_CREATED:
- do {
- if (reset)
- break;
- ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq cq destroy\n",
- __func__);
-
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_DESTROY_CQ,
- &compl_info);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq qp destroy done\n",
- __func__);
- } while (0);
+ if (!reset)
+ i40iw_puda_free_cq(rsrc);
i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
break;
@@ -825,9 +877,10 @@ static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
* @dev: iwarp device
* @info: resource information
*/
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_rsrc_info *info)
{
+ struct i40iw_sc_dev *dev = vsi->dev;
enum i40iw_status_code ret = 0;
struct i40iw_puda_rsrc *rsrc;
u32 pudasize;
@@ -840,10 +893,10 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
rqwridsize = info->rq_size * 8;
switch (info->type) {
case I40IW_PUDA_RSRC_TYPE_ILQ:
- vmem = &dev->ilq_mem;
+ vmem = &vsi->ilq_mem;
break;
case I40IW_PUDA_RSRC_TYPE_IEQ:
- vmem = &dev->ieq_mem;
+ vmem = &vsi->ieq_mem;
break;
default:
return I40IW_NOT_SUPPORTED;
@@ -856,28 +909,28 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
rsrc = (struct i40iw_puda_rsrc *)vmem->va;
spin_lock_init(&rsrc->bufpool_lock);
if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
- dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
- dev->ilq_count = info->count;
+ vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
+ vsi->ilq_count = info->count;
rsrc->receive = info->receive;
rsrc->xmit_complete = info->xmit_complete;
} else {
- vmem = &dev->ieq_mem;
- dev->ieq_count = info->count;
- dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
+ vmem = &vsi->ieq_mem;
+ vsi->ieq_count = info->count;
+ vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
rsrc->receive = i40iw_ieq_receive;
rsrc->xmit_complete = i40iw_ieq_tx_compl;
}
+ rsrc->ceq_valid = info->ceq_valid;
rsrc->type = info->type;
rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
- rsrc->mss = info->mss;
/* Initialize all ieq lists */
INIT_LIST_HEAD(&rsrc->bufpool);
INIT_LIST_HEAD(&rsrc->txpend);
rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
- dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+ dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
rsrc->qp_id = info->qp_id;
rsrc->cq_id = info->cq_id;
rsrc->sq_size = info->sq_size;
@@ -885,6 +938,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
rsrc->cq_size = info->rq_size + info->sq_size;
rsrc->buf_size = info->buf_size;
rsrc->dev = dev;
+ rsrc->vsi = vsi;
ret = i40iw_puda_cq_create(rsrc);
if (!ret) {
@@ -919,7 +973,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
dev->ccq_ops->ccq_arm(&rsrc->cq);
return ret;
error:
- i40iw_puda_dele_resources(dev, info->type, false);
+ i40iw_puda_dele_resources(vsi, info->type, false);
return ret;
}
@@ -1131,7 +1185,7 @@ static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *i
list_add(&buf->list, &pbufl);
status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
- if (!status)
+ if (status)
goto error;
txbuf = i40iw_puda_get_bufpool(ieq);
@@ -1332,7 +1386,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
}
if (pfpdu->mode && (fps != pfpdu->fps)) {
/* clean up qp as it is new partial sequence */
- i40iw_ieq_cleanup_qp(ieq->dev, qp);
+ i40iw_ieq_cleanup_qp(ieq, qp);
i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
"%s: restarting new partial\n", __func__);
pfpdu->mode = false;
@@ -1344,7 +1398,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
pfpdu->rcv_nxt = fps;
pfpdu->fps = fps;
pfpdu->mode = true;
- pfpdu->max_fpdu_data = ieq->mss;
+ pfpdu->max_fpdu_data = ieq->vsi->mss;
pfpdu->pmode_count++;
INIT_LIST_HEAD(rxlist);
i40iw_ieq_check_first_buf(buf, fps);
@@ -1379,14 +1433,14 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
* @dev: iwarp device
* @buf: exception buffer received
*/
-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_buf *buf)
{
- struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_puda_rsrc *ieq = vsi->ieq;
struct i40iw_sc_qp *qp = NULL;
u32 wqe_idx = ieq->compl_rxwqe_idx;
- qp = i40iw_ieq_get_qp(dev, buf);
+ qp = i40iw_ieq_get_qp(vsi->dev, buf);
if (!qp) {
ieq->stats_bad_qp_id++;
i40iw_puda_ret_bufpool(ieq, buf);
@@ -1404,12 +1458,12 @@ static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
/**
* i40iw_ieq_tx_compl - put back after sending completed exception buffer
- * @dev: iwarp device
+ * @vsi: pointer to the vsi structure
* @sqwrid: pointer to puda buffer
*/
-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
+static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
{
- struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_puda_rsrc *ieq = vsi->ieq;
struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
i40iw_puda_ret_bufpool(ieq, buf);
@@ -1421,15 +1475,14 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
/**
* i40iw_ieq_cleanup_qp - qp is being destroyed
- * @dev: iwarp device
+ * @ieq: ieq resource
* @qp: all pending fpdu buffers
*/
-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
{
struct i40iw_puda_buf *buf;
struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
struct list_head *rxlist = &pfpdu->rxlist;
- struct i40iw_puda_rsrc *ieq = dev->ieq;
if (!pfpdu->mode)
return;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index 52bf7826ce4e..dba05ce7d392 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -100,6 +100,7 @@ struct i40iw_puda_rsrc_info {
enum puda_resource_type type; /* ILQ or IEQ */
u32 count;
u16 pd_id;
+ bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
@@ -107,8 +108,8 @@ struct i40iw_puda_rsrc_info {
u16 buf_size;
u16 mss;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
- void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
- void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+ void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
};
struct i40iw_puda_rsrc {
@@ -116,6 +117,7 @@ struct i40iw_puda_rsrc {
struct i40iw_sc_qp qp;
struct i40iw_sc_pd sc_pd;
struct i40iw_sc_dev *dev;
+ struct i40iw_sc_vsi *vsi;
struct i40iw_dma_mem cqmem;
struct i40iw_dma_mem qpmem;
struct i40iw_virt_mem ilq_mem;
@@ -123,6 +125,7 @@ struct i40iw_puda_rsrc {
enum puda_resource_type type;
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
u16 mss;
+ bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
@@ -142,8 +145,8 @@ struct i40iw_puda_rsrc {
u32 avail_buf_count; /* snapshot of currently available buffers */
spinlock_t bufpool_lock;
struct i40iw_puda_buf *alloclist;
- void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
- void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+ void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
/* puda stats */
u64 stats_buf_alloc_fail;
u64 stats_pkt_rcvd;
@@ -160,14 +163,13 @@ void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
struct i40iw_puda_send_info *info);
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_rsrc_info *info);
-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
enum puda_resource_type type,
bool reset);
enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
struct i40iw_sc_cq *cq, u32 *compl_err);
-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
struct i40iw_puda_buf *buf);
@@ -180,4 +182,8 @@ void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_free_hash_desc(struct shash_desc *desc);
void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
u32 seqnum);
+enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 2b1a04e9ca3c..7b76259752b0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -61,7 +61,7 @@ struct i40iw_cq_shadow_area {
struct i40iw_sc_dev;
struct i40iw_hmc_info;
-struct i40iw_dev_pestat;
+struct i40iw_vsi_pestat;
struct i40iw_cqp_ops;
struct i40iw_ccq_ops;
@@ -74,6 +74,11 @@ struct i40iw_priv_qp_ops;
struct i40iw_priv_cq_ops;
struct i40iw_hmc_ops;
+enum i40iw_page_size {
+ I40IW_PAGE_SIZE_4K,
+ I40IW_PAGE_SIZE_2M
+};
+
enum i40iw_resource_indicator_type {
I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
I40IW_RSRC_INDICATOR_TYPE_CQ,
@@ -186,7 +191,7 @@ enum i40iw_debug_flag {
I40IW_DEBUG_ALL = 0xFFFFFFFF
};
-enum i40iw_hw_stat_index_32b {
+enum i40iw_hw_stats_index_32b {
I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
I40IW_HW_STAT_INDEX_IP4RXTRUNC,
I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
@@ -199,7 +204,7 @@ enum i40iw_hw_stat_index_32b {
I40IW_HW_STAT_INDEX_MAX_32
};
-enum i40iw_hw_stat_index_64b {
+enum i40iw_hw_stats_index_64b {
I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
I40IW_HW_STAT_INDEX_IP4RXPKTS,
I40IW_HW_STAT_INDEX_IP4RXFRAGS,
@@ -229,32 +234,23 @@ enum i40iw_hw_stat_index_64b {
I40IW_HW_STAT_INDEX_MAX_64
};
-struct i40iw_dev_hw_stat_offsets {
- u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
- u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
+struct i40iw_dev_hw_stats_offsets {
+ u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
};
struct i40iw_dev_hw_stats {
- u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
- u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
-};
-
-struct i40iw_device_pestat_ops {
- void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
- void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
- void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
- void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
- void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
+ u64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64];
};
-struct i40iw_dev_pestat {
+struct i40iw_vsi_pestat {
struct i40iw_hw *hw;
- struct i40iw_device_pestat_ops ops;
struct i40iw_dev_hw_stats hw_stats;
struct i40iw_dev_hw_stats last_read_hw_stats;
- struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
+ struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
struct timer_list stats_timer;
- spinlock_t stats_lock; /* rdma stats lock */
+ spinlock_t lock; /* rdma stats lock */
};
struct i40iw_hw {
@@ -284,6 +280,7 @@ struct i40iw_sc_pd {
u32 size;
struct i40iw_sc_dev *dev;
u16 pd_id;
+ int abi_ver;
};
struct i40iw_cqp_quanta {
@@ -350,6 +347,7 @@ struct i40iw_sc_cq {
u64 cq_pa;
u64 shadow_area_pa;
struct i40iw_sc_dev *dev;
+ struct i40iw_sc_vsi *vsi;
void *pbl_list;
void *back_cq;
u32 ceq_id;
@@ -373,6 +371,7 @@ struct i40iw_sc_qp {
u64 shadow_area_pa;
u64 q2_pa;
struct i40iw_sc_dev *dev;
+ struct i40iw_sc_vsi *vsi;
struct i40iw_sc_pd *pd;
u64 *hw_host_ctx;
void *llp_stream_handle;
@@ -397,6 +396,9 @@ struct i40iw_sc_qp {
bool virtual_map;
bool flush_sq;
bool flush_rq;
+ u8 user_pri;
+ struct list_head list;
+ bool on_qoslist;
bool sq_flush;
enum i40iw_flush_opcode flush_code;
enum i40iw_term_eventtypes eventtype;
@@ -424,10 +426,16 @@ struct i40iw_vchnl_vf_msg_buffer {
char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
};
+struct i40iw_qos {
+ struct list_head qplist;
+ spinlock_t lock; /* qos list */
+ u16 qs_handle;
+};
+
struct i40iw_vfdev {
struct i40iw_sc_dev *pf_dev;
u8 *hmc_info_mem;
- struct i40iw_dev_pestat dev_pestat;
+ struct i40iw_vsi_pestat pestat;
struct i40iw_hmc_pble_info *pble_info;
struct i40iw_hmc_info hmc_info;
struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
@@ -441,11 +449,28 @@ struct i40iw_vfdev {
bool stats_initialized;
};
+#define I40IW_INVALID_FCN_ID 0xff
+struct i40iw_sc_vsi {
+ struct i40iw_sc_dev *dev;
+ void *back_vsi; /* Owned by OS */
+ u32 ilq_count;
+ struct i40iw_virt_mem ilq_mem;
+ struct i40iw_puda_rsrc *ilq;
+ u32 ieq_count;
+ struct i40iw_virt_mem ieq_mem;
+ struct i40iw_puda_rsrc *ieq;
+ u16 mss;
+ u8 fcn_id;
+ bool stats_fcn_id_alloc;
+ struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
+ struct i40iw_vsi_pestat *pestat;
+};
+
struct i40iw_sc_dev {
struct list_head cqp_cmd_head; /* head of the CQP command list */
spinlock_t cqp_lock; /* cqp list sync */
struct i40iw_dev_uk dev_uk;
- struct i40iw_dev_pestat dev_pestat;
+ bool fcn_id_array[I40IW_MAX_STATS_COUNT];
struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
u64 fpm_query_buf_pa;
u64 fpm_commit_buf_pa;
@@ -472,17 +497,9 @@ struct i40iw_sc_dev {
struct i40iw_cqp_misc_ops *cqp_misc_ops;
struct i40iw_hmc_ops *hmc_ops;
struct i40iw_vchnl_if vchnl_if;
- u32 ilq_count;
- struct i40iw_virt_mem ilq_mem;
- struct i40iw_puda_rsrc *ilq;
- u32 ieq_count;
- struct i40iw_virt_mem ieq_mem;
- struct i40iw_puda_rsrc *ieq;
-
const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
- u16 qs_handle;
u32 debug_mask;
u16 exception_lan_queue;
u8 hmc_fn_id;
@@ -556,6 +573,19 @@ struct i40iw_l2params {
u16 mss;
};
+struct i40iw_vsi_init_info {
+ struct i40iw_sc_dev *dev;
+ void *back_vsi;
+ struct i40iw_l2params *params;
+};
+
+struct i40iw_vsi_stats_info {
+ struct i40iw_vsi_pestat *pestat;
+ u8 fcn_id;
+ bool alloc_fcn_id;
+ bool stats_initialize;
+};
+
struct i40iw_device_init_info {
u64 fpm_query_buf_pa;
u64 fpm_commit_buf_pa;
@@ -564,7 +594,6 @@ struct i40iw_device_init_info {
struct i40iw_hw *hw;
void __iomem *bar0;
enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
- u16 qs_handle;
u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
@@ -722,6 +751,8 @@ struct i40iw_qp_host_ctx_info {
bool iwarp_info_valid;
bool err_rq_idx_valid;
u16 err_rq_idx;
+ bool add_to_qoslist;
+ u8 user_pri;
};
struct i40iw_aeqe_info {
@@ -814,6 +845,7 @@ struct i40iw_register_shared_stag {
struct i40iw_qp_init_info {
struct i40iw_qp_uk_init_info qp_uk_init_info;
struct i40iw_sc_pd *pd;
+ struct i40iw_sc_vsi *vsi;
u64 *host_ctx;
u8 *q2;
u64 sq_pa;
@@ -821,6 +853,7 @@ struct i40iw_qp_init_info {
u64 host_ctx_pa;
u64 q2_pa;
u64 shadow_area_pa;
+ int abi_ver;
u8 sq_tph_val;
u8 rq_tph_val;
u8 type;
@@ -880,13 +913,14 @@ enum i40iw_quad_hash_manage_type {
};
struct i40iw_qhash_table_info {
+ struct i40iw_sc_vsi *vsi;
enum i40iw_quad_hash_manage_type manage;
enum i40iw_quad_entry_type entry_type;
bool vlan_valid;
bool ipv4_valid;
u8 mac_addr[6];
u16 vlan_id;
- u16 qs_handle;
+ u8 user_pri;
u32 qp_num;
u32 dest_ip[4];
u32 src_ip[4];
@@ -976,7 +1010,7 @@ struct i40iw_cqp_query_fpm_values {
struct i40iw_cqp_ops {
enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
struct i40iw_cqp_init_info *);
- enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
+ enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *);
void (*cqp_post_sq)(struct i40iw_sc_cqp *);
u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
@@ -1019,7 +1053,7 @@ struct i40iw_aeq_ops {
};
struct i40iw_pd_ops {
- void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+ void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);
};
struct i40iw_priv_qp_ops {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
index 12acd688def4..57d3f1d11ff1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
@@ -39,8 +39,8 @@
#include <linux/types.h>
-#define I40IW_ABI_USERSPACE_VER 4
-#define I40IW_ABI_KERNEL_VER 4
+#define I40IW_ABI_VER 5
+
struct i40iw_alloc_ucontext_req {
__u32 reserved32;
__u8 userspace_ver;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 4d28c3cb03cc..2800f796271c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -175,12 +175,10 @@ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
if (!*wqe_idx)
qp->swqe_polarity = !qp->swqe_polarity;
}
-
- for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
- I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
- if (ret_code)
- return NULL;
- }
+ I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,
+ wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);
+ if (ret_code)
+ return NULL;
wqe = qp->sq_base[*wqe_idx].elem;
@@ -430,7 +428,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
struct i40iw_inline_rdma_write *op_info;
u64 *push;
u64 header = 0;
- u32 i, wqe_idx;
+ u32 wqe_idx;
enum i40iw_status_code ret_code;
bool read_fence = false;
u8 wqe_size;
@@ -465,14 +463,12 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
src = (u8 *)(op_info->data);
if (op_info->len <= 16) {
- for (i = 0; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len);
} else {
- for (i = 0; i < 16; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, 16);
+ src += 16;
dest = (u8 *)wqe + 32;
- for (; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len - 16);
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -507,7 +503,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
u8 *dest, *src;
struct i40iw_post_inline_send *op_info;
u64 header;
- u32 wqe_idx, i;
+ u32 wqe_idx;
enum i40iw_status_code ret_code;
bool read_fence = false;
u8 wqe_size;
@@ -540,14 +536,12 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
src = (u8 *)(op_info->data);
if (op_info->len <= 16) {
- for (i = 0; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len);
} else {
- for (i = 0; i < 16; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, 16);
+ src += 16;
dest = (u8 *)wqe + 32;
- for (; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len - 16);
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -972,10 +966,6 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (ret_code)
return ret_code;
- ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
- if (ret_code)
- return ret_code;
-
qp->sq_base = info->sq;
qp->rq_base = info->rq;
qp->shadow_area = info->shadow_area;
@@ -1004,8 +994,19 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (!qp->use_srq) {
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
- qp->rq_wqe_size = rqshift;
I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+ switch (info->abi_ver) {
+ case 4:
+ ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
+ if (ret_code)
+ return ret_code;
+ break;
+ case 5: /* fallthrough until next ABI version */
+ default:
+ rqshift = I40IW_MAX_RQ_WQE_SHIFT;
+ break;
+ }
+ qp->rq_wqe_size = rqshift;
qp->rq_wqe_size_multiplier = 4 << rqshift;
}
qp->ops = iw_qp_uk_ops;
@@ -1190,12 +1191,8 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
if (data_size <= 16)
*wqe_size = I40IW_QP_WQE_MIN_SIZE;
- else if (data_size <= 48)
- *wqe_size = 64;
- else if (data_size <= 80)
- *wqe_size = 96;
else
- *wqe_size = 128;
+ *wqe_size = 64;
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 276bcefffd7e..84be6f13b9c5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -72,12 +72,13 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
- I40IW_MAX_IRD_SIZE = 32,
- I40IW_QPCTX_ENCD_MAXIRD = 3,
+ I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
- I40IW_MAX_ORD_SIZE = 32,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
- I40IW_QP_CTX_SIZE = 248
+ I40IW_MAX_WQE_SIZE_RQ = 128,
+ I40IW_QP_CTX_SIZE = 248,
+ I40IW_MAX_PDS = 32768
};
#define i40iw_handle void *
@@ -96,13 +97,8 @@ enum i40iw_device_capabilities_const {
#define i40iw_physical_fragment u64
#define i40iw_address_list u64 *
-#define I40IW_CREATE_STAG(index, key) (((index) << 8) + (key))
-
-#define I40IW_STAG_KEY_FROM_STAG(stag) ((stag) && 0x000000FF)
-
-#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8)
-
#define I40IW_MAX_MR_SIZE 0x10000000000L
+#define I40IW_MAX_RQ_WQE_SHIFT 2
struct i40iw_qp_uk;
struct i40iw_cq_uk;
@@ -411,7 +407,7 @@ struct i40iw_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
-
+ int abi_ver;
};
struct i40iw_cq_uk_init_info {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 6fd043b1d714..0f5d43d1f5fc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -153,6 +153,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
struct i40iw_device *iwdev;
struct i40iw_handler *hdl;
u32 local_ipaddr;
+ u32 action = I40IW_ARP_ADD;
hdl = i40iw_find_netdev(event_netdev);
if (!hdl)
@@ -164,44 +165,25 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
if (netdev != event_netdev)
return NOTIFY_DONE;
+ if (upper_dev)
+ local_ipaddr = ntohl(
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
+ else
+ local_ipaddr = ntohl(ifa->ifa_address);
switch (event) {
case NETDEV_DOWN:
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
- local_ipaddr = ntohl(ifa->ifa_address);
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- &local_ipaddr,
- true,
- I40IW_ARP_DELETE);
- return NOTIFY_OK;
+ action = I40IW_ARP_DELETE;
+ /* Fall through */
case NETDEV_UP:
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
- local_ipaddr = ntohl(ifa->ifa_address);
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- &local_ipaddr,
- true,
- I40IW_ARP_ADD);
- break;
+ /* Fall through */
case NETDEV_CHANGEADDR:
- /* Add the address to the IP table */
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
- local_ipaddr = ntohl(ifa->ifa_address);
-
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
true,
- I40IW_ARP_ADD);
+ action);
+ i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
+ (action == I40IW_ARP_ADD) ? true : false);
break;
default:
break;
@@ -225,6 +207,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
struct i40iw_device *iwdev;
struct i40iw_handler *hdl;
u32 local_ipaddr6[4];
+ u32 action = I40IW_ARP_ADD;
hdl = i40iw_find_netdev(event_netdev);
if (!hdl)
@@ -235,24 +218,21 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
if (netdev != event_netdev)
return NOTIFY_DONE;
+ i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
switch (event) {
case NETDEV_DOWN:
- i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- local_ipaddr6,
- false,
- I40IW_ARP_DELETE);
- return NOTIFY_OK;
+ action = I40IW_ARP_DELETE;
+ /* Fall through */
case NETDEV_UP:
/* Fall through */
case NETDEV_CHANGEADDR:
- i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
local_ipaddr6,
false,
- I40IW_ARP_ADD);
+ action);
+ i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
+ (action == I40IW_ARP_ADD) ? true : false);
break;
default:
break;
@@ -392,6 +372,7 @@ static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ i40iw_rem_devusecount(iwdev);
}
/**
@@ -415,7 +396,10 @@ static int i40iw_wait_event(struct i40iw_device *iwdev,
i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
info->cqp_cmd, timeout_ret);
err_code = -ETIME;
- i40iw_request_reset(iwdev);
+ if (!iwdev->reset) {
+ iwdev->reset = true;
+ i40iw_request_reset(iwdev);
+ }
goto done;
}
cqp_error = cqp_request->compl_info.error;
@@ -445,6 +429,11 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
struct cqp_commands_info *info = &cqp_request->info;
int err_code = 0;
+ if (iwdev->reset) {
+ i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
+ return I40IW_ERR_CQP_COMPL_ERROR;
+ }
+
status = i40iw_process_cqp_cmd(dev, info);
if (status) {
i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
@@ -459,6 +448,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
}
/**
+ * i40iw_add_devusecount - add dev refcount
+ * @iwdev: dev for refcount
+ */
+void i40iw_add_devusecount(struct i40iw_device *iwdev)
+{
+ atomic64_inc(&iwdev->use_count);
+}
+
+/**
+ * i40iw_rem_devusecount - decrement refcount for dev
+ * @iwdev: device
+ */
+void i40iw_rem_devusecount(struct i40iw_device *iwdev)
+{
+ if (!atomic64_dec_and_test(&iwdev->use_count))
+ return;
+ wake_up(&iwdev->close_wq);
+}
+
+/**
* i40iw_add_pdusecount - add pd refcount
* @iwpd: pd for refcount
*/
@@ -712,6 +721,51 @@ enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
}
/**
+ * i40iw_qp_suspend_resume - cqp command for suspend/resume
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ * @suspend: flag if suspend or resume
+ */
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp_request *cqp_request;
+ struct i40iw_sc_cqp *cqp = dev->cqp;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
+ cqp_info->in.u.suspend_resume.cqp = cqp;
+ cqp_info->in.u.suspend_resume.qp = qp;
+ cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
+}
+
+/**
+ * i40iw_qp_mss_modify - modify mss for qp
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+ struct i40iw_modify_qp_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.mss_change = true;
+ info.new_mss = qp->vsi->mss;
+ i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
+}
+
+/**
* i40iw_term_modify_qp - modify qp for term message
* @qp: hardware control qp
* @next_state: qp's next state
@@ -769,6 +823,7 @@ static void i40iw_terminate_timeout(unsigned long context)
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
i40iw_terminate_done(qp, 1);
+ i40iw_rem_ref(&iwqp->ibqp);
}
/**
@@ -780,6 +835,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
struct i40iw_qp *iwqp;
iwqp = (struct i40iw_qp *)qp->back_qp;
+ i40iw_add_ref(&iwqp->ibqp);
init_timer(&iwqp->terminate_timer);
iwqp->terminate_timer.function = i40iw_terminate_timeout;
iwqp->terminate_timer.expires = jiffies + HZ;
@@ -796,7 +852,8 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
struct i40iw_qp *iwqp;
iwqp = (struct i40iw_qp *)qp->back_qp;
- del_timer(&iwqp->terminate_timer);
+ if (del_timer(&iwqp->terminate_timer))
+ i40iw_rem_ref(&iwqp->ibqp);
}
/**
@@ -1011,6 +1068,116 @@ enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
}
/**
+ * i40iw_cqp_cq_create_cmd - create a cq for the cqp
+ * @dev: device pointer
+ * @cq: pointer to created cq
+ */
+enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_cq *cq)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Create QP fail");
+
+ return status;
+}
+
+/**
+ * i40iw_cqp_qp_create_cmd - create a qp for the cqp
+ * @dev: device pointer
+ * @qp: pointer to created qp
+ */
+enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_qp *qp)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_create_qp_info *qp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
+
+ cqp_info->cqp_cmd = OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP QP create fail");
+ return status;
+}
+
+/**
+ * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
+ * @dev: device pointer
+ * @cq: pointer to cq
+ */
+void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_cq_wq_destroy(iwdev, cq);
+}
+
+/**
+ * i40iw_cqp_qp_destroy_cmd - destroy the cqp
+ * @dev: device pointer
+ * @qp: pointer to qp
+ */
+void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+
+ cqp_info->cqp_cmd = OP_QP_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_destroy.qp = qp;
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP QP_DESTROY fail");
+}
+
+
+/**
* i40iw_ieq_mpa_crc_ae - generate AE for crc error
* @dev: hardware control device structure
* @qp: hardware control qp
@@ -1208,7 +1375,7 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
buf->totallen = pkt_len + buf->maclen;
- if (info->payload_len < buf->totallen - 4) {
+ if (info->payload_len < buf->totallen) {
i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
info->payload_len, buf->totallen);
return I40IW_ERR_INVALID_SIZE;
@@ -1224,27 +1391,29 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
/**
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
- * @dev: hardware control device structure
+ * @vsi: pointer to the vsi structure
*/
-static void i40iw_hw_stats_timeout(unsigned long dev)
+static void i40iw_hw_stats_timeout(unsigned long vsi)
{
- struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
- struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
- struct i40iw_dev_pestat *vf_devstat = NULL;
+ struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
+ struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
+ struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
+ struct i40iw_vsi_pestat *vf_devstat = NULL;
u16 iw_vf_idx;
unsigned long flags;
/*PF*/
- pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
+ i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
+
for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
- spin_lock_irqsave(&pf_devstat->stats_lock, flags);
+ spin_lock_irqsave(&pf_devstat->lock, flags);
if (pf_dev->vf_dev[iw_vf_idx]) {
if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
- vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
- vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
+ vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
+ i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
}
}
- spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
+ spin_unlock_irqrestore(&pf_devstat->lock, flags);
}
mod_timer(&pf_devstat->stats_timer,
@@ -1253,26 +1422,26 @@ static void i40iw_hw_stats_timeout(unsigned long dev)
/**
* i40iw_hw_stats_start_timer - Start periodic stats timer
- * @dev: hardware control device structure
+ * @vsi: pointer to the vsi structure
*/
-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
+void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
{
- struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_vsi_pestat *devstat = vsi->pestat;
init_timer(&devstat->stats_timer);
devstat->stats_timer.function = i40iw_hw_stats_timeout;
- devstat->stats_timer.data = (unsigned long)dev;
+ devstat->stats_timer.data = (unsigned long)vsi;
mod_timer(&devstat->stats_timer,
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
}
/**
- * i40iw_hw_stats_del_timer - Delete periodic stats timer
- * @dev: hardware control device structure
+ * i40iw_hw_stats_stop_timer - Delete periodic stats timer
+ * @vsi: pointer to the vsi structure
*/
-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
+void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
{
- struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_vsi_pestat *devstat = vsi->pestat;
del_timer_sync(&devstat->stats_timer);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 6329c971c22f..29e97df9e1a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -37,6 +37,7 @@
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/time.h>
+#include <linux/hugetlb.h>
#include <asm/byteorder.h>
#include <net/ip.h>
#include <rdma/ib_verbs.h>
@@ -67,13 +68,13 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->vendor_part_id = iwdev->ldev->pcidev->device;
props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
- props->max_qp = iwdev->max_qp;
+ props->max_qp = iwdev->max_qp - iwdev->used_qps;
props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
- props->max_cq = iwdev->max_cq;
+ props->max_cq = iwdev->max_cq - iwdev->used_cqs;
props->max_cqe = iwdev->max_cqe;
- props->max_mr = iwdev->max_mr;
- props->max_pd = iwdev->max_pd;
+ props->max_mr = iwdev->max_mr - iwdev->used_mrs;
+ props->max_pd = iwdev->max_pd - iwdev->used_pds;
props->max_sge_rd = I40IW_MAX_SGE_RD;
props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
@@ -144,9 +145,8 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
if (ib_copy_from_udata(&req, udata, sizeof(req)))
return ERR_PTR(-EINVAL);
- if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
- i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
- req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+ if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
+ i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
return ERR_PTR(-EINVAL);
}
@@ -154,13 +154,14 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
uresp.max_qps = iwdev->max_qp;
uresp.max_pds = iwdev->max_pd;
uresp.wq_size = iwdev->max_qp_wr * 2;
- uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+ uresp.kernel_ver = req.userspace_ver;
ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
if (!ucontext)
return ERR_PTR(-ENOMEM);
ucontext->iwdev = iwdev;
+ ucontext->abi_ver = req.userspace_ver;
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
kfree(ucontext);
@@ -254,7 +255,6 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
{
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
enum i40iw_status_code status;
if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
@@ -270,7 +270,7 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
cqp_info->post_sq = 1;
- cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
cqp_info->in.u.manage_push_page.info.free_page = 0;
cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
@@ -292,7 +292,6 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
{
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
enum i40iw_status_code status;
if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
@@ -307,7 +306,7 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
cqp_info->post_sq = 1;
cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
- cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
cqp_info->in.u.manage_push_page.info.free_page = 1;
cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
@@ -334,9 +333,13 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_alloc_pd_resp uresp;
struct i40iw_sc_pd *sc_pd;
+ struct i40iw_ucontext *ucontext;
u32 pd_id = 0;
int err;
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
+
err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
iwdev->max_pd, &pd_id, &iwdev->next_pd);
if (err) {
@@ -351,15 +354,18 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
}
sc_pd = &iwpd->sc_pd;
- dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
if (context) {
+ ucontext = to_ucontext(context);
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
memset(&uresp, 0, sizeof(uresp));
uresp.pd_id = pd_id;
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
err = -EFAULT;
goto error;
}
+ } else {
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
}
i40iw_add_pdusecount(iwpd);
@@ -516,7 +522,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
u32 sqdepth, rqdepth;
u32 sq_size, rq_size;
- u8 sqshift, rqshift;
+ u8 sqshift;
u32 size;
enum i40iw_status_code status;
struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
@@ -525,14 +531,11 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
- if (!status)
- status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
-
if (status)
return -ENOMEM;
sqdepth = sq_size << sqshift;
- rqdepth = rq_size << rqshift;
+ rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
@@ -602,6 +605,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
struct i40iwarp_offload_info *iwarp_info;
unsigned long flags;
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
+
if (init_attr->create_flags)
return ERR_PTR(-EINVAL);
if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
@@ -610,11 +616,15 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+
memset(&init_info, 0, sizeof(init_info));
sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr;
+ init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.sq_size = sq_size;
init_info.qp_uk_init_info.rq_size = rq_size;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
@@ -774,6 +784,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
iwdev->qp_table[qp_num] = iwqp;
i40iw_add_pdusecount(iwqp->iwpd);
+ i40iw_add_devusecount(iwdev);
if (ibpd->uobject && udata) {
memset(&uresp, 0, sizeof(uresp));
uresp.actual_sq_size = sq_size;
@@ -815,8 +826,9 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
attr->qp_access_flags = 0;
attr->cap.max_send_wr = qp->qp_uk.sq_size;
attr->cap.max_recv_wr = qp->qp_uk.rq_size;
- attr->cap.max_recv_sge = 1;
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+ attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
init_attr->event_handler = iwqp->ibqp.event_handler;
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
@@ -884,6 +896,11 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
+ if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
+ err = -EINVAL;
+ goto exit;
+ }
+
switch (attr->qp_state) {
case IB_QPS_INIT:
case IB_QPS_RTR:
@@ -944,7 +961,7 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto exit;
}
if (iwqp->sc_qp.term_flags)
- del_timer(&iwqp->terminate_timer);
+ i40iw_terminate_del_timer(&iwqp->sc_qp);
info.next_iwarp_state = I40IW_QP_STATE_ERROR;
if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
iwdev->iw_status &&
@@ -1037,11 +1054,11 @@ static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
}
/**
- * cq_wq_destroy - send cq destroy cqp
+ * i40iw_cq_wq_destroy - send cq destroy cqp
* @iwdev: iwarp device
* @cq: hardware control cq
*/
-static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
{
enum i40iw_status_code status;
struct i40iw_cqp_request *cqp_request;
@@ -1080,9 +1097,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
iwcq = to_iwcq(ib_cq);
iwdev = to_iwdev(ib_cq->device);
cq = &iwcq->sc_cq;
- cq_wq_destroy(iwdev, cq);
+ i40iw_cq_wq_destroy(iwdev, cq);
cq_free_resources(iwdev, iwcq);
kfree(iwcq);
+ i40iw_rem_devusecount(iwdev);
return 0;
}
@@ -1113,6 +1131,9 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
int err_code;
int entries = attr->cqe;
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
+
if (entries > iwdev->max_cqe)
return ERR_PTR(-EINVAL);
@@ -1137,7 +1158,8 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
ukinfo->cq_id = cq_num;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
info.ceqe_mask = 0;
- info.ceq_id = 0;
+ if (attr->comp_vector < iwdev->ceqs_count)
+ info.ceq_id = attr->comp_vector;
info.ceq_id_valid = true;
info.ceqe_mask = 1;
info.type = I40IW_CQ_TYPE_IWARP;
@@ -1229,10 +1251,11 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
}
}
+ i40iw_add_devusecount(iwdev);
return (struct ib_cq *)iwcq;
cq_destroy:
- cq_wq_destroy(iwdev, cq);
+ i40iw_cq_wq_destroy(iwdev, cq);
cq_free_resources:
cq_free_resources(iwdev, iwcq);
error:
@@ -1266,6 +1289,7 @@ static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
+ i40iw_rem_devusecount(iwdev);
}
/**
@@ -1296,19 +1320,18 @@ static u32 i40iw_create_stag(struct i40iw_device *iwdev)
stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
stag |= driver_key;
stag += (u32)consumer_key;
+ i40iw_add_devusecount(iwdev);
}
return stag;
}
/**
* i40iw_next_pbl_addr - Get next pbl address
- * @palloc: Poiner to allocated pbles
* @pbl: pointer to a pble
* @pinfo: info pointer
* @idx: index
*/
-static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
- u64 *pbl,
+static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
struct i40iw_pble_info **pinfo,
u32 *idx)
{
@@ -1336,9 +1359,11 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo;
struct scatterlist *sg;
+ u64 pg_addr = 0;
u32 idx = 0;
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
+
pg_shift = ffs(region->page_size) - 1;
for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
chunk_pages = sg_dma_len(sg) >> pg_shift;
@@ -1346,17 +1371,96 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
!iwpbl->qp_mr.sq_page)
iwpbl->qp_mr.sq_page = sg_page(sg);
for (i = 0; i < chunk_pages; i++) {
- *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
- pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
+ pg_addr = sg_dma_address(sg) + region->page_size * i;
+
+ if ((entry + i) == 0)
+ *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
+ else if (!(pg_addr & ~iwmr->page_msk))
+ *pbl = cpu_to_le64(pg_addr);
+ else
+ continue;
+ pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
+ }
+ }
+}
+
+/**
+ * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
+ * @addr: virtual address
+ * @iwmr: mr pointer for this memory registration
+ */
+static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
+{
+ struct vm_area_struct *vma;
+ struct hstate *h;
+
+ vma = find_vma(current->mm, addr);
+ if (vma && is_vm_hugetlb_page(vma)) {
+ h = hstate_vma(vma);
+ if (huge_page_size(h) == 0x200000) {
+ iwmr->page_size = huge_page_size(h);
+ iwmr->page_msk = huge_page_mask(h);
}
}
}
/**
+ * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
+ * @arr: lvl1 pbl array
+ * @npages: page count
+ * pg_size: page size
+ *
+ */
+static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
+{
+ u32 pg_idx;
+
+ for (pg_idx = 0; pg_idx < npages; pg_idx++) {
+ if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
+ return false;
+ }
+ return true;
+}
+
+/**
+ * i40iw_check_mr_contiguous - check if MR is physically contiguous
+ * @palloc: pbl allocation struct
+ * pg_size: page size
+ */
+static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
+{
+ struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+ struct i40iw_pble_info *leaf = lvl2->leaf;
+ u64 *arr = NULL;
+ u64 *start_addr = NULL;
+ int i;
+ bool ret;
+
+ if (palloc->level == I40IW_LEVEL_1) {
+ arr = (u64 *)palloc->level1.addr;
+ ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
+ return ret;
+ }
+
+ start_addr = (u64 *)leaf->addr;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ arr = (u64 *)leaf->addr;
+ if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
+ return false;
+ ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+
+/**
* i40iw_setup_pbles - copy user pg address to pble's
* @iwdev: iwarp device
* @iwmr: mr pointer for this memory registration
- * @use_pbles: flag if to use pble's or memory (level 0)
+ * @use_pbles: flag if to use pble's
*/
static int i40iw_setup_pbles(struct i40iw_device *iwdev,
struct i40iw_mr *iwmr,
@@ -1369,9 +1473,6 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
enum i40iw_status_code status;
enum i40iw_pble_level level = I40IW_LEVEL_1;
- if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
- return -ENOMEM;
-
if (use_pbles) {
mutex_lock(&iwdev->pbl_mutex);
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
@@ -1388,6 +1489,10 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
}
i40iw_copy_user_pgaddrs(iwmr, pbl, level);
+
+ if (use_pbles)
+ iwmr->pgaddrmem[0] = *pbl;
+
return 0;
}
@@ -1409,14 +1514,18 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
struct i40iw_hmc_pble *hmc_p;
u64 *arr = iwmr->pgaddrmem;
+ u32 pg_size;
int err;
int total;
+ bool ret = true;
total = req->sq_pages + req->rq_pages + req->cq_pages;
+ pg_size = iwmr->page_size;
err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
if (err)
return err;
+
if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
i40iw_free_pble(iwdev->pble_rsrc, palloc);
iwpbl->pbl_allocated = false;
@@ -1425,26 +1534,44 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
if (use_pbles)
arr = (u64 *)palloc->level1.addr;
- if (req->reg_type == IW_MEMREG_TYPE_QP) {
+
+ if (iwmr->type == IW_MEMREG_TYPE_QP) {
hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total];
+
if (use_pbles) {
+ ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
+ if (ret)
+ ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
+ }
+
+ if (!ret) {
hmc_p->idx = palloc->level1.idx;
hmc_p = &qpmr->rq_pbl;
hmc_p->idx = palloc->level1.idx + req->sq_pages;
} else {
hmc_p->addr = arr[0];
hmc_p = &qpmr->rq_pbl;
- hmc_p->addr = arr[1];
+ hmc_p->addr = arr[req->sq_pages];
}
} else { /* CQ */
hmc_p = &cqmr->cq_pbl;
cqmr->shadow = (dma_addr_t)arr[total];
+
if (use_pbles)
+ ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
+
+ if (!ret)
hmc_p->idx = palloc->level1.idx;
else
hmc_p->addr = arr[0];
}
+
+ if (use_pbles && ret) {
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+
return err;
}
@@ -1642,8 +1769,9 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
stag_info->access_rights = access;
stag_info->pd_id = iwpd->sc_pd.pd_id;
stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
+ stag_info->page_size = iwmr->page_size;
- if (iwmr->page_cnt > 1) {
+ if (iwpbl->pbl_allocated) {
if (palloc->level == I40IW_LEVEL_1) {
stag_info->first_pm_pbl_index = palloc->level1.idx;
stag_info->chunk_size = 1;
@@ -1699,6 +1827,11 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
bool use_pbles = false;
unsigned long flags;
int err = -ENOSYS;
+ int ret;
+ int pg_shift;
+
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
@@ -1723,9 +1856,17 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
ucontext = to_ucontext(pd->uobject->context);
- region_length = region->length + (start & 0xfff);
- pbl_depth = region_length >> 12;
- pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
+
+ iwmr->page_size = region->page_size;
+ iwmr->page_msk = PAGE_MASK;
+
+ if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
+ i40iw_set_hugetlb_values(start, iwmr);
+
+ region_length = region->length + (start & (iwmr->page_size - 1));
+ pg_shift = ffs(iwmr->page_size) - 1;
+ pbl_depth = region_length >> pg_shift;
+ pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
iwmr->length = region->length;
iwpbl->user_base = virt;
@@ -1755,13 +1896,21 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IW_MEMREG_TYPE_MEM:
+ use_pbles = (iwmr->page_cnt != 1);
access = I40IW_ACCESS_FLAGS_LOCALREAD;
- use_pbles = (iwmr->page_cnt != 1);
err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
if (err)
goto error;
+ if (use_pbles) {
+ ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
+ if (ret) {
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
access |= i40iw_get_user_access(acc);
stag = i40iw_create_stag(iwdev);
if (!stag) {
@@ -1778,6 +1927,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
i40iw_free_stag(iwdev, stag);
goto error;
}
+
break;
default:
goto error;
@@ -1789,7 +1939,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
return &iwmr->ibmr;
error:
- if (palloc->level != I40IW_LEVEL_0)
+ if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
ib_umem_release(region);
kfree(iwmr);
@@ -2142,7 +2292,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
case IB_WR_REG_MR:
{
struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
- int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size);
int flags = reg_wr(ib_wr)->access;
struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
@@ -2153,6 +2302,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.access_rights |= i40iw_get_user_access(flags);
info.stag_key = reg_wr(ib_wr)->key & 0xff;
info.stag_idx = reg_wr(ib_wr)->key >> 8;
+ info.page_size = reg_wr(ib_wr)->mr->page_size;
info.wr_id = ib_wr->wr_id;
info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
@@ -2166,9 +2316,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
info.chunk_size = 1;
- if (page_shift == 21)
- info.page_size = 1; /* 2M page */
-
ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
if (ret)
err = -ENOMEM;
@@ -2487,21 +2634,17 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- unsigned long flags;
if (dev->is_pf) {
- spin_lock_irqsave(&devstat->stats_lock, flags);
- devstat->ops.iw_hw_stat_read_all(devstat,
- &devstat->hw_stats);
- spin_unlock_irqrestore(&devstat->stats_lock, flags);
+ i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
} else {
if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
return -ENOSYS;
}
- memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
+ memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
return stats->num_counters;
}
@@ -2562,7 +2705,9 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
* @ah_attr: address handle attributes
*/
static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
- struct ib_ah_attr *attr)
+ struct ib_ah_attr *attr,
+ struct ib_udata *udata)
+
{
return ERR_PTR(-ENOSYS);
}
@@ -2621,7 +2766,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
(1ull << IB_USER_VERBS_CMD_POST_RECV) |
(1ull << IB_USER_VERBS_CMD_POST_SEND);
iwibdev->ibdev.phys_port_cnt = 1;
- iwibdev->ibdev.num_comp_vectors = 1;
+ iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
iwibdev->ibdev.dma_device = &pcidev->dev;
iwibdev->ibdev.dev.parent = &pcidev->dev;
iwibdev->ibdev.query_port = i40iw_query_port;
@@ -2654,7 +2799,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
if (!iwibdev->ibdev.iwcm) {
ib_dealloc_device(&iwibdev->ibdev);
- i40iw_pr_err("iwcm == NULL\n");
return NULL;
}
@@ -2719,6 +2863,9 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
i40iw_unregister_rdma_device(iwibdev);
kfree(iwibdev->ibdev.iwcm);
iwibdev->ibdev.iwcm = NULL;
+ wait_event_timeout(iwibdev->iwdev->close_wq,
+ !atomic64_read(&iwibdev->iwdev->use_count),
+ I40IW_EVENT_TIMEOUT);
ib_dealloc_device(&iwibdev->ibdev);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 0069be8a5a38..07c3fec77de6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -42,6 +42,7 @@ struct i40iw_ucontext {
spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+ int abi_ver;
};
struct i40iw_pd {
@@ -92,6 +93,8 @@ struct i40iw_mr {
struct ib_umem *region;
u16 type;
u32 page_cnt;
+ u32 page_size;
+ u64 page_msk;
u32 npages;
u32 stag;
u64 length;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index 3041003c94d2..f4d13683a403 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -403,6 +403,19 @@ del_out:
}
/**
+ * i40iw_vf_init_pestat - Initialize stats for VF
+ * @devL pointer to the VF Device
+ * @stats: Statistics structure pointer
+ * @index: Stats index
+ */
+static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
+{
+ stats->hw = dev->hw;
+ i40iw_hw_stats_init(stats, (u8)index, false);
+ spin_lock_init(&stats->lock);
+}
+
+/**
* i40iw_vchnl_recv_pf - Receive PF virtual channel messages
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
@@ -421,9 +434,8 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
struct i40iw_virt_mem vf_dev_mem;
struct i40iw_virtchnl_work_info work_info;
- struct i40iw_dev_pestat *devstat;
+ struct i40iw_vsi_pestat *stats;
enum i40iw_status_code ret_code;
- unsigned long flags;
if (!dev || !msg || !len)
return I40IW_ERR_PARAM;
@@ -496,14 +508,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u error CQP HMC Function operation.\n",
vf_id);
- ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "VF%u - i40iw_device_init_pestat failed\n",
- vf_id);
- vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
- (u8)vf_dev->pmf_index,
- dev->hw, false);
+ i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
vf_dev->stats_initialized = true;
} else {
if (vf_dev) {
@@ -534,12 +539,10 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
case I40IW_VCHNL_OP_GET_STATS:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
- devstat = &vf_dev->dev_pestat;
- spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
- devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
- spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ stats = &vf_dev->pestat;
+ i40iw_hw_stats_read_all(stats, &stats->hw_stats);
vf_dev->msg_count--;
- vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &devstat->hw_stats);
+ vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
break;
default:
i40iw_debug(dev, I40IW_DEBUG_VIRT,
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index b9bf0759f10a..077c33d2dc75 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -114,7 +114,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
!(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
--ah->av.eth.stat_rate;
}
-
+ ah->av.eth.sl_tclass_flowlabel |=
+ cpu_to_be32((ah_attr->grh.traffic_class << 20) |
+ ah_attr->grh.flow_label);
/*
* HW requires multicast LID so we just choose one.
*/
@@ -122,12 +124,14 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
ah->av.ib.dlid = cpu_to_be16(0xc000);
memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
+ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
return &ah->ibah;
}
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
struct mlx4_ib_ah *ah;
struct ib_ah *ret;
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 5e9939045852..06020c54db20 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -755,10 +755,8 @@ static void alias_guid_work(struct work_struct *work)
struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
rec = kzalloc(sizeof *rec, GFP_KERNEL);
- if (!rec) {
- pr_err("alias_guid_work: No Memory\n");
+ if (!rec)
return;
- }
pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 39a488889fc7..d64845335e87 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -247,10 +247,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
- if (!ent) {
- mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
+ if (!ent)
return ERR_PTR(-ENOMEM);
- }
ent->sl_cm_id = sl_cm_id;
ent->slave_id = slave_id;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 1672907ff219..db564ccc0f92 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -39,6 +39,8 @@
#include <linux/mlx4/cmd.h>
#include <linux/gfp.h>
#include <rdma/ib_pma.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"
@@ -480,6 +482,23 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
return -EINVAL;
}
+static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
+ enum rdma_network_type net_type;
+
+ if (version == 4)
+ net_type = RDMA_NETWORK_IPV4;
+ else if (version == 6)
+ net_type = RDMA_NETWORK_IPV6;
+ else
+ return -EINVAL;
+
+ return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+ sgid, dgid);
+}
+
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type dest_qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad)
@@ -538,7 +557,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
memset(&attr, 0, sizeof attr);
attr.port_num = port;
if (is_eth) {
- memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
+ union ib_gid sgid;
+
+ if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid))
+ return -EINVAL;
attr.ah_flags = IB_AH_GRH;
}
ah = ib_create_ah(tun_ctx->pd, &attr);
@@ -651,6 +673,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
is_eth = 1;
if (is_eth) {
+ union ib_gid dgid;
+ union ib_gid sgid;
+
+ if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
+ return -EINVAL;
if (!(wc->wc_flags & IB_WC_GRH)) {
mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
return -EINVAL;
@@ -659,10 +686,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
return -EINVAL;
}
- err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
+ err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
if (err && mlx4_is_mf_bonded(dev->dev)) {
other_port = (port == 1) ? 2 : 1;
- err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
+ err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
if (!err) {
port = other_port;
pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
@@ -702,10 +729,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
/* If a grh is present, we demux according to it */
if (wc->wc_flags & IB_WC_GRH) {
- slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
- if (slave < 0) {
- mlx4_ib_warn(ibdev, "failed matching grh\n");
- return -ENOENT;
+ if (grh->dgid.global.interface_id ==
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
+ grh->dgid.global.subnet_prefix == cpu_to_be64(
+ atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
+ slave = 0;
+ } else {
+ slave = mlx4_ib_find_real_gid(ibdev, port,
+ grh->dgid.global.interface_id);
+ if (slave < 0) {
+ mlx4_ib_warn(ibdev, "failed matching grh\n");
+ return -ENOENT;
+ }
}
}
/* Class-specific handling */
@@ -1102,10 +1137,8 @@ static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
- if (!in_mad || !out_mad) {
- mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
+ if (!in_mad || !out_mad)
goto out;
- }
guid_tbl_blk_num *= 4;
@@ -1916,11 +1949,8 @@ static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
*ret_ctx = NULL;
ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
- if (!ctx) {
- pr_err("failed allocating pv resource context "
- "for port %d, slave %d\n", port, slave);
+ if (!ctx)
return -ENOMEM;
- }
ctx->ib_dev = &dev->ib_dev;
ctx->port = port;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b597e8227591..c8413fc120e6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -430,7 +430,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ int err;
int have_ib_ports;
struct mlx4_uverbs_ex_query_device cmd;
struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
@@ -455,6 +455,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(resp.response_length);
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ err = -ENOMEM;
if (!in_mad || !out_mad)
goto out;
@@ -547,6 +548,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
+ props->max_ah = INT_MAX;
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@@ -697,9 +699,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
if (err)
goto out;
- props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- props->active_speed = IB_SPEED_QDR;
+ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
+ (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+ IB_SPEED_FDR : IB_SPEED_QDR;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
@@ -2814,20 +2818,22 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
sizeof(long),
GFP_KERNEL);
- if (!ibdev->ib_uc_qpns_bitmap) {
- dev_err(&dev->persist->pdev->dev,
- "bit map alloc failed\n");
+ if (!ibdev->ib_uc_qpns_bitmap)
goto err_steer_qp_release;
- }
- bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
-
- err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
- dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_base +
- ibdev->steer_qpn_count - 1);
- if (err)
- goto err_steer_free_bitmap;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
+ bitmap_zero(ibdev->ib_uc_qpns_bitmap,
+ ibdev->steer_qpn_count);
+ err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+ dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_base +
+ ibdev->steer_qpn_count - 1);
+ if (err)
+ goto err_steer_free_bitmap;
+ } else {
+ bitmap_fill(ibdev->ib_uc_qpns_bitmap,
+ ibdev->steer_qpn_count);
+ }
}
for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
@@ -3055,15 +3061,12 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
- if (!dm) {
- pr_err("failed to allocate memory for tunneling qp update\n");
+ if (!dm)
return;
- }
for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
- pr_err("failed to allocate memory for tunneling qp update work struct\n");
while (--i >= 0)
kfree(dm[i]);
goto out;
@@ -3223,8 +3226,6 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
ew->port = port;
ew->ib_dev = ibdev;
queue_work(wq, &ew->work);
- } else {
- pr_err("failed to allocate memory for sl2vl update work\n");
}
}
@@ -3284,10 +3285,8 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
- if (!ew) {
- pr_err("failed to allocate memory for events work\n");
+ if (!ew)
break;
- }
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index a21d37f02f35..e010fe459e67 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1142,7 +1142,6 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
work = kmalloc(sizeof *work, GFP_KERNEL);
if (!work) {
ctx->flushing = 0;
- mcg_warn("failed allocating work for cleanup\n");
return;
}
@@ -1202,10 +1201,8 @@ static int push_deleteing_req(struct mcast_group *group, int slave)
return 0;
req = kzalloc(sizeof *req, GFP_KERNEL);
- if (!req) {
- mcg_warn_group(group, "failed allocation - may leave stall groups\n");
+ if (!req)
return -ENOMEM;
- }
if (!list_empty(&group->func[slave].pending)) {
pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 35141f451e5c..7f3d976d81ed 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -742,7 +742,8 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int mlx4_ib_destroy_ah(struct ib_ah *ah);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 570bc866b1d6..c068add8838b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -644,7 +644,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
int qpn;
int err;
struct ib_qp_cap backup_cap;
- struct mlx4_ib_sqp *sqp;
+ struct mlx4_ib_sqp *sqp = NULL;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq;
@@ -933,7 +933,9 @@ err_db:
mlx4_db_free(dev->dev, &qp->db);
err:
- if (!*caller_qp)
+ if (sqp)
+ kfree(sqp);
+ else if (!*caller_qp)
kfree(qp);
return err;
}
@@ -1280,7 +1282,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
- if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+ if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
+ dev->qp1_proxy[mqp->port - 1] == mqp) {
mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
dev->qp1_proxy[mqp->port - 1] = NULL;
mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
@@ -1764,14 +1767,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
union ib_gid gid;
- struct ib_gid_attr gid_attr;
+ struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
u16 vlan = 0xffff;
u8 smac[ETH_ALEN];
int status = 0;
int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
attr->ah_attr.ah_flags & IB_AH_GRH;
- if (is_eth) {
+ if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) {
int index = attr->ah_attr.grh.sgid_index;
status = ib_get_cached_gid(ibqp->device, port_num,
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 745efa4cfc71..d090e96f6f01 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
return &ah->ibah;
}
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
struct mlx5_ib_ah *ah;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -75,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
+ if (ll == IB_LINK_LAYER_ETHERNET && udata) {
+ int err;
+ struct mlx5_ib_create_ah_resp resp = {};
+ u32 min_resp_len = offsetof(typeof(resp), dmac) +
+ sizeof(resp.dmac);
+
+ if (udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ resp.response_length = min_resp_len;
+
+ err = ib_resolve_eth_dmac(pd->device, ah_attr);
+ if (err)
+ return ERR_PTR(err);
+
+ memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ return ERR_PTR(err);
+ }
+
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index fcd04b881ec1..b3ef47c3ab73 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
- struct mlx5_ib_create_cq ucmd;
+ struct mlx5_ib_create_cq ucmd = {};
size_t ucmdlen;
int page_shift;
__be64 *pas;
@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (err)
goto err_umem;
- mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
+ mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
&ncont, NULL);
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
@@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->uuari.uars[0].index;
+ if (ucmd.cqe_comp_en == 1) {
+ if (unlikely((*cqe_size != 64) ||
+ !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ if (unlikely(!ucmd.cqe_comp_res_format ||
+ !(ucmd.cqe_comp_res_format <
+ MLX5_IB_CQE_RES_RESERVED) ||
+ (ucmd.cqe_comp_res_format &
+ (ucmd.cqe_comp_res_format - 1)))) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
+ ucmd.cqe_comp_res_format);
+ goto err_cqb;
+ }
+
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format,
+ ilog2(ucmd.cqe_comp_res_format));
+ }
+
return 0;
+err_cqb:
+ kfree(cqb);
+
err_db:
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
@@ -1124,7 +1152,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
return err;
}
- mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
+ mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
npas, NULL);
cq->resize_umem = umem;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 32b09f059c84..d566f6738833 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -127,7 +127,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
&& ibdev->ib_active) {
- struct ib_event ibev = {0};
+ struct ib_event ibev = { };
ibev.device = &ibdev->ib_dev;
ibev.event = (event == NETDEV_UP) ?
@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
+ int max_sq_desc;
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
- max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
- sizeof(struct mlx5_wqe_ctrl_seg)) /
- sizeof(struct mlx5_wqe_data_seg);
+ max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
+ max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
@@ -643,6 +645,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+ props->max_ah = INT_MAX;
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
@@ -669,6 +672,40 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
}
+ if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
+ uhw->outlen)) {
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
+ MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+ resp.response_length +=
+ sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+ }
+
+ if (field_avail(typeof(resp), reserved, uhw->outlen))
+ resp.response_length += sizeof(resp.reserved);
+
+ if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
+ resp.cqe_comp_caps.max_num =
+ MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
+ resp.cqe_comp_caps.supported_format =
+ MLX5_IB_CQE_RES_FORMAT_HASH |
+ MLX5_IB_CQE_RES_FORMAT_CSUM;
+ resp.response_length += sizeof(resp.cqe_comp_caps);
+ }
+
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+ if (MLX5_CAP_QOS(mdev, packet_pacing) &&
+ MLX5_CAP_GEN(mdev, qos)) {
+ resp.packet_pacing_caps.qp_rate_limit_max =
+ MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
+ resp.packet_pacing_caps.qp_rate_limit_min =
+ MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
+ resp.packet_pacing_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ }
+ resp.response_length += sizeof(resp.packet_pacing_caps);
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -1093,7 +1130,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cqe_version);
if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
- resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
+ resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
+ MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
resp.response_length += sizeof(resp.cmds_supp_uhw);
}
@@ -1502,6 +1540,22 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
}
+static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
+ bool inner)
+{
+ if (inner) {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, inner_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, inner_ipv6_flow_label, val);
+ } else {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, outer_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, outer_ipv6_flow_label, val);
+ }
+}
+
static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
{
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
@@ -1515,6 +1569,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
#define LAST_IPV4_FIELD tos
#define LAST_IPV6_FIELD traffic_class
#define LAST_TCP_UDP_FIELD src_port
+#define LAST_TUNNEL_FIELD tunnel_id
/* Field is the last supported field */
#define FIELDS_NOT_SUPPORTED(filter, field)\
@@ -1527,155 +1582,164 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
static int parse_flow_attr(u32 *match_c, u32 *match_v,
const union ib_flow_spec *ib_spec)
{
- void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
- outer_headers);
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
misc_parameters);
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
misc_parameters);
+ void *headers_c;
+ void *headers_v;
+
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ inner_headers);
+ } else {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ }
- switch (ib_spec->type) {
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
return -ENOTSUPP;
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
ib_spec->eth.mask.dst_mac);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
ib_spec->eth.val.dst_mac);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
ib_spec->eth.mask.src_mac);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
ib_spec->eth.val.src_mac);
if (ib_spec->eth.mask.vlan_tag) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
vlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
vlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_vid, ntohs(ib_spec->eth.val.vlan_tag));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_cfi,
ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_cfi,
ntohs(ib_spec->eth.val.vlan_tag) >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_prio,
ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_prio,
ntohs(ib_spec->eth.val.vlan_tag) >> 13);
}
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, ntohs(ib_spec->eth.mask.ether_type));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ntohs(ib_spec->eth.val.ether_type));
break;
case IB_FLOW_SPEC_IPV4:
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ETH_P_IP);
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.mask.src_ip,
sizeof(ib_spec->ipv4.mask.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.src_ip,
sizeof(ib_spec->ipv4.val.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.mask.dst_ip,
sizeof(ib_spec->ipv4.mask.dst_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.dst_ip,
sizeof(ib_spec->ipv4.val.dst_ip));
- set_tos(outer_headers_c, outer_headers_v,
+ set_tos(headers_c, headers_v,
ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
- set_proto(outer_headers_c, outer_headers_v,
+ set_proto(headers_c, headers_v,
ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
break;
case IB_FLOW_SPEC_IPV6:
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ETH_P_IPV6);
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.mask.src_ip,
sizeof(ib_spec->ipv6.mask.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.src_ip,
sizeof(ib_spec->ipv6.val.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.mask.dst_ip,
sizeof(ib_spec->ipv6.mask.dst_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.dst_ip,
sizeof(ib_spec->ipv6.val.dst_ip));
- set_tos(outer_headers_c, outer_headers_v,
+ set_tos(headers_c, headers_v,
ib_spec->ipv6.mask.traffic_class,
ib_spec->ipv6.val.traffic_class);
- set_proto(outer_headers_c, outer_headers_v,
+ set_proto(headers_c, headers_v,
ib_spec->ipv6.mask.next_hdr,
ib_spec->ipv6.val.next_hdr);
- MLX5_SET(fte_match_set_misc, misc_params_c,
- outer_ipv6_flow_label,
- ntohl(ib_spec->ipv6.mask.flow_label));
- MLX5_SET(fte_match_set_misc, misc_params_v,
- outer_ipv6_flow_label,
- ntohl(ib_spec->ipv6.val.flow_label));
+ set_flow_label(misc_params_c, misc_params_v,
+ ntohl(ib_spec->ipv6.mask.flow_label),
+ ntohl(ib_spec->ipv6.val.flow_label),
+ ib_spec->type & IB_FLOW_SPEC_INNER);
+
break;
case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
0xff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
IPPROTO_TCP);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
ntohs(ib_spec->tcp_udp.val.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
ntohs(ib_spec->tcp_udp.mask.dst_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
case IB_FLOW_SPEC_UDP:
@@ -1683,21 +1747,31 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
LAST_TCP_UDP_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
0xff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
IPPROTO_UDP);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
ntohs(ib_spec->tcp_udp.val.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
ntohs(ib_spec->tcp_udp.mask.dst_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
+ LAST_TUNNEL_FIELD))
+ return -ENOTSUPP;
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
+ ntohl(ib_spec->tunnel.mask.tunnel_id));
+ MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
+ ntohl(ib_spec->tunnel.val.tunnel_id));
+ break;
default:
return -EINVAL;
}
@@ -1771,13 +1845,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
- mlx5_del_flow_rule(iter->rule);
+ mlx5_del_flow_rules(iter->rule);
put_flow_table(dev, iter->prio, true);
list_del(&iter->list);
kfree(iter);
}
- mlx5_del_flow_rule(handler->rule);
+ mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock);
@@ -1857,7 +1931,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
num_groups,
- 0);
+ 0, 0);
if (!IS_ERR(ft)) {
prio->refcount = 0;
@@ -1877,10 +1951,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
- u32 action;
int err = 0;
if (!is_valid_attr(flow_attr))
@@ -1905,12 +1979,12 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
- action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
- handler->rule = mlx5_add_flow_rule(ft, spec,
- action,
- MLX5_FS_DEFAULT_FLOW_TAG,
- dst);
+ flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act,
+ dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
@@ -1941,7 +2015,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
handler_dst = create_flow_rule(dev, ft_prio,
flow_attr, dst);
if (IS_ERR(handler_dst)) {
- mlx5_del_flow_rule(handler->rule);
+ mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_dst;
@@ -2004,7 +2078,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr,
dst);
if (IS_ERR(handler_ucast)) {
- mlx5_del_flow_rule(handler->rule);
+ mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_ucast;
@@ -2046,7 +2120,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
return handler_rx;
err_tx:
- mlx5_del_flow_rule(handler_rx->rule);
+ mlx5_del_flow_rules(handler_rx->rule);
ft_rx->refcount--;
kfree(handler_rx);
err:
@@ -2358,6 +2432,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
ibev.event = IB_EVENT_CLIENT_REREGISTER;
port = (u8)param;
break;
+ default:
+ return;
}
ibev.device = &ibdev->ib_dev;
@@ -2719,6 +2795,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
int err;
err = mlx5_ib_query_port(ibdev, port_num, &attr);
@@ -2728,7 +2806,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = get_core_cap_flags(ibdev);
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
@@ -2742,7 +2821,7 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
-static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
+static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
@@ -2771,7 +2850,7 @@ err_destroy_vport_lag:
return err;
}
-static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -2783,7 +2862,21 @@ static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ dev->roce.nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->roce.nb);
+ if (err) {
+ dev->roce.nb.notifier_call = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
{
if (dev->roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->roce.nb);
@@ -2791,39 +2884,40 @@ static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
- dev->roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce.nb);
- if (err) {
- dev->roce.nb.notifier_call = NULL;
+ err = mlx5_add_netdev_notifier(dev);
+ if (err)
return err;
- }
- err = mlx5_nic_vport_enable_roce(dev->mdev);
- if (err)
- goto err_unregister_netdevice_notifier;
+ if (MLX5_CAP_GEN(dev->mdev, roce)) {
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ goto err_unregister_netdevice_notifier;
+ }
- err = mlx5_roce_lag_init(dev);
+ err = mlx5_eth_lag_init(dev);
if (err)
goto err_disable_roce;
return 0;
err_disable_roce:
- mlx5_nic_vport_disable_roce(dev->mdev);
+ if (MLX5_CAP_GEN(dev->mdev, roce))
+ mlx5_nic_vport_disable_roce(dev->mdev);
err_unregister_netdevice_notifier:
- mlx5_remove_roce_notifier(dev);
+ mlx5_remove_netdev_notifier(dev);
return err;
}
-static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
+static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
{
- mlx5_roce_lag_cleanup(dev);
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_eth_lag_cleanup(dev);
+ if (MLX5_CAP_GEN(dev->mdev, roce))
+ mlx5_nic_vport_disable_roce(dev->mdev);
}
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
@@ -2945,9 +3039,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
- if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
- return NULL;
-
printk_once(KERN_INFO "%s", mlx5_version);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -2993,6 +3084,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_REREG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
@@ -3015,7 +3108,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.uverbs_ex_cmd_mask =
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
@@ -3126,14 +3220,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
spin_lock_init(&dev->reset_flow_resource_lock);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_enable_roce(dev);
+ err = mlx5_enable_eth(dev);
if (err)
goto err_free_port;
}
err = create_dev_resources(&dev->devr);
if (err)
- goto err_disable_roce;
+ goto err_disable_eth;
err = mlx5_ib_odp_init_one(dev);
if (err)
@@ -3177,10 +3271,10 @@ err_odp:
err_rsrc:
destroy_dev_resources(&dev->devr);
-err_disable_roce:
+err_disable_eth:
if (ll == IB_LINK_LAYER_ETHERNET) {
- mlx5_disable_roce(dev);
- mlx5_remove_roce_notifier(dev);
+ mlx5_disable_eth(dev);
+ mlx5_remove_netdev_notifier(dev);
}
err_free_port:
@@ -3197,14 +3291,14 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
- mlx5_remove_roce_notifier(dev);
+ mlx5_remove_netdev_notifier(dev);
ib_unregister_device(&dev->ib_dev);
mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
if (ll == IB_LINK_LAYER_ETHERNET)
- mlx5_disable_roce(dev);
+ mlx5_disable_eth(dev);
kfree(dev->port);
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 996b54e366b0..6851357c16f4 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -37,12 +37,15 @@
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
+ * @max_page_shift: high limit for page_shift - 0 means no limit
* @count: number of PAGE_SIZE pages covered by umem
* @shift: page shift for the compound pages found in the region
* @ncont: number of compund pages
* @order: log2 of the number of compound pages
*/
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
+ unsigned long max_page_shift,
+ int *count, int *shift,
int *ncont, int *order)
{
unsigned long tmp;
@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
addr = addr >> page_shift;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, BITS_PER_LONG);
+ if (max_page_shift)
+ m = min_t(unsigned long, max_page_shift - page_shift, m);
skip = 1 << m;
mask = skip - 1;
i = 0;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7d689903c87c..6c6057eb60ea 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
#define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
+#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
+
enum {
MLX5_IB_MMAP_CMD_SHIFT = 8,
MLX5_IB_MMAP_CMD_MASK = 0xff,
@@ -153,7 +155,7 @@ struct mlx5_ib_flow_handler {
struct list_head list;
struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
};
struct mlx5_ib_flow_db {
@@ -387,6 +389,7 @@ struct mlx5_ib_qp {
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
+ u32 rate_limit;
};
struct mlx5_ib_cq_buf {
@@ -418,7 +421,7 @@ struct mlx5_umr_wr {
struct ib_pd *pd;
unsigned int page_shift;
unsigned int npages;
- u32 length;
+ u64 length;
int access_flags;
u32 mkey;
};
@@ -739,7 +742,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int mlx5_ib_destroy_ah(struct ib_ah *ah);
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -825,7 +829,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
+ unsigned long max_page_shift,
+ int *count, int *shift,
int *ncont, int *order);
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4e9012463c37..8f608debe141 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -628,7 +628,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->order = i + 2;
ent->dev = dev;
- if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
+ if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+ (mlx5_core_is_pf(dev->mdev)))
limit = dev->mdev->profile->mr_cache[i].limit;
else
limit = 0;
@@ -646,6 +647,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return 0;
}
+static void wait_for_async_commands(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int total = 0;
+ int i;
+ int j;
+
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ for (j = 0 ; j < 1000; j++) {
+ if (!ent->pending)
+ break;
+ msleep(50);
+ }
+ }
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ total += ent->pending;
+ }
+
+ if (total)
+ mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
+ else
+ mlx5_ib_warn(dev, "done with all pending requests\n");
+}
+
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
int i;
@@ -659,6 +687,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
+ wait_for_async_commands(dev);
del_timer_sync(&dev->delay_timer);
return 0;
@@ -816,29 +845,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
umrwr->mkey = key;
}
-static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
- int access_flags, int *npages,
- int *page_shift, int *ncont, int *order)
+static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+ int access_flags, struct ib_umem **umem,
+ int *npages, int *page_shift, int *ncont,
+ int *order)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
- if (IS_ERR(umem)) {
+ int err;
+
+ *umem = ib_umem_get(pd->uobject->context, start, length,
+ access_flags, 0);
+ err = PTR_ERR_OR_ZERO(*umem);
+ if (err < 0) {
mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
- return (void *)umem;
+ return err;
}
- mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
+ mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+ page_shift, ncont, order);
if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n");
- ib_umem_release(umem);
- return ERR_PTR(-EINVAL);
+ ib_umem_release(*umem);
+ return -EINVAL;
}
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift);
- return umem;
+ return 0;
}
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1164,11 +1198,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
- umem = mr_umem_get(pd, start, length, access_flags, &npages,
+ err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order);
- if (IS_ERR(umem))
- return (void *)umem;
+ if (err < 0)
+ return ERR_PTR(err);
if (use_umr(order)) {
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
@@ -1345,10 +1379,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
- mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
- &page_shift, &ncont, &order);
- if (IS_ERR(mr->umem)) {
- err = PTR_ERR(mr->umem);
+ err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
+ &npages, &page_shift, &ncont, &order);
+ if (err < 0) {
mr->umem = NULL;
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d1e921816bfe..a1b3125f0a6e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -77,12 +77,14 @@ struct mlx5_wqe_eth_pad {
enum raw_qp_set_mask_map {
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
+ MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
};
struct mlx5_modify_raw_qp_param {
u16 operation;
u32 set_mask; /* raw_qp_set_mask_map */
+ u32 rate_limit;
u8 rq_q_ctr_id;
};
@@ -351,6 +353,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
}
+static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
+{
+ int max_sge;
+
+ if (attr->qp_type == IB_QPT_RC)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else if (attr->qp_type == IB_QPT_XRC_INI)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_xrc_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else
+ max_sge = (wqe_size - sq_overhead(attr)) /
+ sizeof(struct mlx5_wqe_data_seg);
+
+ return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
+ sizeof(struct mlx5_wqe_data_seg));
+}
+
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
struct mlx5_ib_qp *qp)
{
@@ -381,13 +406,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
- mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+ mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
+ attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
qp->sq.wqe_cnt,
1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
- qp->sq.max_gs = attr->cap.max_send_sge;
+ qp->sq.max_gs = get_send_sge(attr, wqe_size);
+ if (qp->sq.max_gs < attr->cap.max_send_sge)
+ return -ENOMEM;
+
+ attr->cap.max_send_sge = qp->sq.max_gs;
qp->sq.max_post = wq_size / wqe_size;
attr->cap.max_send_wr = qp->sq.max_post;
@@ -647,7 +677,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
return PTR_ERR(*umem);
}
- mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL);
+ mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
if (err) {
@@ -700,7 +730,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err;
}
- mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
+ mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
&ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
&rwq->rq_page_offset);
@@ -2442,8 +2472,14 @@ out:
}
static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, int new_state)
+ struct mlx5_ib_sq *sq,
+ int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param)
{
+ struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
+ u32 old_rate = ibqp->rate_limit;
+ u32 new_rate = old_rate;
+ u16 rl_index = 0;
void *in;
void *sqc;
int inlen;
@@ -2459,10 +2495,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(sqc, sqc, state, new_state);
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
+ if (new_state != MLX5_SQC_STATE_RDY)
+ pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
+ __func__);
+ else
+ new_rate = raw_qp_param->rate_limit;
+ }
+
+ if (old_rate != new_rate) {
+ if (new_rate) {
+ err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
+ if (err) {
+ pr_err("Failed configuring rate %u: %d\n",
+ new_rate, err);
+ goto out;
+ }
+ }
+
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ }
+
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
- if (err)
+ if (err) {
+ /* Remove new rate from table if failed */
+ if (new_rate &&
+ old_rate != new_rate)
+ mlx5_rl_remove_rate(dev, new_rate);
goto out;
+ }
+
+ /* Only remove the old rate after new rate was set */
+ if ((old_rate &&
+ (old_rate != new_rate)) ||
+ (new_state != MLX5_SQC_STATE_RDY))
+ mlx5_rl_remove_rate(dev, old_rate);
+ ibqp->rate_limit = new_rate;
sq->state = new_state;
out:
@@ -2477,6 +2547,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ int modify_rq = !!qp->rq.wqe_cnt;
+ int modify_sq = !!qp->sq.wqe_cnt;
int rq_state;
int sq_state;
int err;
@@ -2494,10 +2566,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq_state = MLX5_RQC_STATE_RST;
sq_state = MLX5_SQC_STATE_RST;
break;
- case MLX5_CMD_OP_INIT2INIT_QP:
- case MLX5_CMD_OP_INIT2RTR_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
+ if (raw_qp_param->set_mask ==
+ MLX5_RAW_QP_RATE_LIMIT) {
+ modify_rq = 0;
+ sq_state = sq->state;
+ } else {
+ return raw_qp_param->set_mask ? -EINVAL : 0;
+ }
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
if (raw_qp_param->set_mask)
return -EINVAL;
else
@@ -2507,13 +2587,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EINVAL;
}
- if (qp->rq.wqe_cnt) {
- err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
+ if (modify_rq) {
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
if (err)
return err;
}
- if (qp->sq.wqe_cnt) {
+ if (modify_sq) {
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
tx_affinity);
@@ -2521,7 +2601,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return err;
}
- return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
+ return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
}
return 0;
@@ -2577,7 +2657,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
- int sqd_event;
int mlx5_st;
int err;
u16 op;
@@ -2724,12 +2803,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->db_rec_addr = cpu_to_be64(qp->db.dma);
- if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
- attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
- sqd_event = 1;
- else
- sqd_event = 0;
-
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
@@ -2776,6 +2849,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
}
+
+ if (attr_mask & IB_QP_RATE_LIMIT) {
+ raw_qp_param.rate_limit = attr->rate_limit;
+ raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
+ }
+
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else {
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
@@ -3067,10 +3146,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
{
memset(umr, 0, sizeof(*umr));
umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
- umr->flags = 1 << 7;
+ umr->flags = MLX5_UMR_INLINE;
}
-static __be64 get_umr_reg_mr_mask(void)
+static __be64 get_umr_reg_mr_mask(int atomic)
{
u64 result;
@@ -3083,9 +3162,11 @@ static __be64 get_umr_reg_mr_mask(void)
MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_FREE;
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
return cpu_to_be64(result);
}
@@ -3146,7 +3227,7 @@ static __be64 get_umr_update_pd_mask(void)
}
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr)
+ struct ib_send_wr *wr, int atomic)
{
struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -3171,7 +3252,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
umr->mkey_mask |= get_umr_update_pd_mask();
if (!umr->mkey_mask)
- umr->mkey_mask = get_umr_reg_mr_mask();
+ umr->mkey_mask = get_umr_reg_mr_mask(atomic);
} else {
umr->mkey_mask = get_umr_unreg_mr_mask();
}
@@ -4024,7 +4105,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
- set_reg_umr_segment(seg, wr);
+ set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((seg == qend)))
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 3857dbd9c956..6f4397ee1ed6 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return err;
}
- mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
+ mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
&page_shift, &ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
&offset);
@@ -203,8 +203,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
- mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
- (unsigned long)(srq->msrq.max * sizeof(u64)));
err = -ENOMEM;
goto err_in;
}
@@ -282,6 +280,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
+ in.type = init_attr->srq_type;
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size);
@@ -294,7 +293,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- in.type = init_attr->srq_type;
in.log_size = ilog2(srq->msrq.max);
in.wqe_shift = srq->msrq.wqe_shift - 4;
if (srq->wq_sig)
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index bcac294042f5..c9f0f364f484 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -186,8 +186,8 @@ int mthca_create_ah(struct mthca_dev *dev,
on_hca_fail:
if (ah->type == MTHCA_AH_PCI_POOL) {
- ah->av = pci_pool_alloc(dev->av_table.pool,
- GFP_ATOMIC, &ah->avdma);
+ ah->av = pci_pool_zalloc(dev->av_table.pool,
+ GFP_ATOMIC, &ah->avdma);
if (!ah->av)
return -ENOMEM;
@@ -196,8 +196,6 @@ on_hca_fail:
ah->key = pd->ntmr.ibmr.lkey;
- memset(av, 0, MTHCA_AV_SIZE);
-
av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
av->g_slid = ah_attr->src_path_bits;
av->dlid = cpu_to_be16(ah_attr->dlid);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 358930a41e36..d31708742ba5 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -410,7 +410,9 @@ static int mthca_dealloc_pd(struct ib_pd *pd)
}
static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
int err;
struct mthca_ah *ah;
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 6727af27c017..2a6979e4ae1c 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -96,8 +96,6 @@ int mthca_reset(struct mthca_dev *mdev)
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
- mthca_err(mdev, "Couldn't allocate memory to save HCA "
- "PCI header, aborting.\n");
goto put_dev;
}
@@ -119,8 +117,6 @@ int mthca_reset(struct mthca_dev *mdev)
bridge_header = kmalloc(256, GFP_KERNEL);
if (!bridge_header) {
err = -ENOMEM;
- mthca_err(mdev, "Couldn't allocate memory to save HCA "
- "bridge PCI header, aborting.\n");
goto free_hca;
}
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 35cbb17bec12..5b9601014f0c 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -65,7 +65,6 @@ MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-int max_mtu = 9000;
int interrupt_mod_interval = 0;
/* Interoperability */
@@ -516,7 +515,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Allocate hardware structure */
nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
if (!nesdev) {
- printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
ret = -ENOMEM;
goto bail2;
}
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index e7430c9254d3..85acd0843b50 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -83,6 +83,8 @@
#define NES_FIRST_QPN 64
#define NES_SW_CONTEXT_ALIGN 1024
+#define NES_MAX_MTU 9000
+
#define NES_NIC_MAX_NICS 16
#define NES_MAX_ARP_TABLE_SIZE 4096
@@ -169,8 +171,6 @@ do { \
#include "nes_cm.h"
#include "nes_mgt.h"
-extern int max_mtu;
-#define max_frame_len (max_mtu+ETH_HLEN)
extern int interrupt_mod_interval;
extern int nes_if_count;
extern int mpa_version;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 57db9b332f44..8e703479e7ce 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2282,10 +2282,8 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
if (!listener) {
/* create a CM listen node (1/2 node to compare incoming traffic to) */
listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
- if (!listener) {
- nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
+ if (!listener)
return NULL;
- }
listener->loc_addr = cm_info->loc_addr;
listener->loc_port = cm_info->loc_port;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index a1c6481d8038..19acd13c6cb1 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -351,9 +351,8 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
/* allocate a new adapter struct */
nesadapter = kzalloc(adapter_size, GFP_KERNEL);
- if (nesadapter == NULL) {
+ if (!nesadapter)
return NULL;
- }
nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
@@ -1007,8 +1006,7 @@ int nes_init_cqp(struct nes_device *nesdev)
/* Allocate a twice the number of CQP requests as the SQ size */
nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
- if (nesdev->nes_cqp_requests == NULL) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
+ if (!nesdev->nes_cqp_requests) {
pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
nesdev->cqp.sq_pbase);
return -ENOMEM;
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 416645259b0f..33624f17c347 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -320,8 +320,7 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
/* Found one */
fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
- if (fpdu_info == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
+ if (!fpdu_info) {
rc = -ENOMEM;
goto out;
}
@@ -729,8 +728,7 @@ static int nes_change_quad_hash(struct nes_device *nesdev,
}
qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
- if (qh_chg == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
+ if (!qh_chg) {
ret = -ENOMEM;
goto chg_qh_err;
}
@@ -880,10 +878,8 @@ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct
/* Allocate space the all mgt QPs once */
mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
- if (mgtvnic == NULL) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
+ if (!mgtvnic)
return -ENOMEM;
- }
/* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
/* We are not sending from this NIC so sq is not allocated */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 2b27d1351cf7..5921ea3d50ae 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -662,10 +662,14 @@ tso_sq_no_longer_full:
nesnic->sq_head &= nesnic->sq_size-1;
}
} else {
- nesvnic->linearized_skbs++;
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
- skb_linearize(skb);
+ if (skb_linearize(skb)) {
+ nesvnic->tx_sw_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ nesvnic->linearized_skbs++;
skb_set_transport_header(skb, hoffset);
skb_set_network_header(skb, nhoffset);
if (!nes_nic_send(skb, netdev))
@@ -981,20 +985,16 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
- int ret = 0;
u8 jumbomode = 0;
u32 nic_active;
u32 nic_active_bit;
u32 uc_all_active;
u32 mc_all_active;
- if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
- return -EINVAL;
-
netdev->mtu = new_mtu;
nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
- if (netdev->mtu > 1500) {
+ if (netdev->mtu > ETH_DATA_LEN) {
jumbomode=1;
}
nes_nic_init_timer_defaults(nesdev, jumbomode);
@@ -1020,7 +1020,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
- return ret;
+ return 0;
}
@@ -1465,7 +1465,8 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
/**
* nes_netdev_get_settings
*/
-static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+static int nes_netdev_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -1474,54 +1475,59 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
u8 phy_type = nesadapter->phy_type[mac_index];
u8 phy_index = nesadapter->phy_index[mac_index];
u16 phy_data;
+ u32 supported, advertising;
- et_cmd->duplex = DUPLEX_FULL;
- et_cmd->port = PORT_MII;
- et_cmd->maxtxpkt = 511;
- et_cmd->maxrxpkt = 511;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
if (nesadapter->OneG_Mode) {
- ethtool_cmd_speed_set(et_cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
if (phy_type == NES_PHY_TYPE_PUMA_1G) {
- et_cmd->supported = SUPPORTED_1000baseT_Full;
- et_cmd->advertising = ADVERTISED_1000baseT_Full;
- et_cmd->autoneg = AUTONEG_DISABLE;
- et_cmd->transceiver = XCVR_INTERNAL;
- et_cmd->phy_address = mac_index;
+ supported = SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_1000baseT_Full;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.phy_address = mac_index;
} else {
unsigned long flags;
- et_cmd->supported = SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg;
- et_cmd->advertising = ADVERTISED_1000baseT_Full
- | ADVERTISED_Autoneg;
+
+ supported = SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg;
+ advertising = ADVERTISED_1000baseT_Full
+ | ADVERTISED_Autoneg;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x1000)
- et_cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- et_cmd->autoneg = AUTONEG_DISABLE;
- et_cmd->transceiver = XCVR_EXTERNAL;
- et_cmd->phy_address = phy_index;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.phy_address = phy_index;
}
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
return 0;
}
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
(phy_type == NES_PHY_TYPE_SFP_D) ||
(phy_type == NES_PHY_TYPE_KR)) {
- et_cmd->transceiver = XCVR_EXTERNAL;
- et_cmd->port = PORT_FIBRE;
- et_cmd->supported = SUPPORTED_FIBRE;
- et_cmd->advertising = ADVERTISED_FIBRE;
- et_cmd->phy_address = phy_index;
+ cmd->base.port = PORT_FIBRE;
+ supported = SUPPORTED_FIBRE;
+ advertising = ADVERTISED_FIBRE;
+ cmd->base.phy_address = phy_index;
} else {
- et_cmd->transceiver = XCVR_INTERNAL;
- et_cmd->supported = SUPPORTED_10000baseT_Full;
- et_cmd->advertising = ADVERTISED_10000baseT_Full;
- et_cmd->phy_address = mac_index;
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
+ cmd->base.phy_address = mac_index;
}
- ethtool_cmd_speed_set(et_cmd, SPEED_10000);
- et_cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -1529,7 +1535,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
/**
* nes_netdev_set_settings
*/
-static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+static int
+nes_netdev_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -1543,7 +1551,7 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- if (et_cmd->autoneg) {
+ if (cmd->base.autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
} else {
@@ -1560,8 +1568,6 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
static const struct ethtool_ops nes_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = nes_netdev_get_settings,
- .set_settings = nes_netdev_set_settings,
.get_strings = nes_netdev_get_strings,
.get_sset_count = nes_netdev_get_sset_count,
.get_ethtool_stats = nes_netdev_get_ethtool_stats,
@@ -1570,6 +1576,8 @@ static const struct ethtool_ops nes_ethtool_ops = {
.set_coalesce = nes_netdev_set_coalesce,
.get_pauseparam = nes_netdev_get_pauseparam,
.set_pauseparam = nes_netdev_set_pauseparam,
+ .get_link_ksettings = nes_netdev_get_link_ksettings,
+ .set_link_ksettings = nes_netdev_set_link_ksettings,
};
static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
@@ -1658,7 +1666,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->watchdog_timeo = NES_TX_TIMEOUT;
netdev->irq = nesdev->pcidev->irq;
- netdev->mtu = ETH_DATA_LEN;
+ netdev->max_mtu = NES_MAX_MTU;
netdev->hard_header_len = ETH_HLEN;
netdev->addr_len = ETH_ALEN;
netdev->type = ARPHRD_ETHER;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index bd69125731c1..aff9fb14768b 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -771,7 +771,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
/**
* nes_create_ah
*/
-static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
return ERR_PTR(-ENOSYS);
}
@@ -1075,7 +1076,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
if (!mem) {
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
return ERR_PTR(-ENOMEM);
}
u64nesqp = (unsigned long)mem;
@@ -1475,7 +1475,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
if (!nescq) {
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
return ERR_PTR(-ENOMEM);
}
@@ -2408,7 +2407,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
if (!nespbl) {
- nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
ib_umem_release(region);
return ERR_PTR(-ENOMEM);
}
@@ -2416,7 +2414,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!nesmr) {
ib_umem_release(region);
kfree(nespbl);
- nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
return ERR_PTR(-ENOMEM);
}
nesmr->region = region;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 797362a297b2..14d33b0f3950 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -154,7 +154,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
return status;
}
-struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata)
{
u32 *ahid_addr;
int status;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 3856dd4c7e3d..0704a24b17c8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -50,7 +50,9 @@ enum {
OCRDMA_AH_L3_TYPE_MASK = 0x03,
OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
+
+struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *,
+ struct ib_udata *);
int ocrdma_destroy_ah(struct ib_ah *);
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 67fc0b6857e1..9a305201545e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1596,10 +1596,9 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
GFP_KERNEL);
- if (!dev->pd_mgr) {
- pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+ if (!dev->pd_mgr)
return;
- }
+
status = ocrdma_mbx_alloc_pd_range(dev);
if (status) {
pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
@@ -1642,7 +1641,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
{
int i;
- int status = 0;
+ int status = -ENOMEM;
int max_ah;
struct ocrdma_create_ah_tbl *cmd;
struct ocrdma_create_ah_tbl_rsp *rsp;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 8bef09a8c49f..f8e4b0a6486f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -84,10 +84,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
- if (!mem->debugfs_mem) {
- pr_err("%s: stats debugfs mem allocation failed\n", __func__);
+ if (!mem->debugfs_mem)
return false;
- }
return true;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a61514296767..57c8de208077 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -511,8 +511,10 @@ int qedr_dealloc_pd(struct ib_pd *ibpd)
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd);
- if (!pd)
+ if (!pd) {
pr_err("Invalid PD received in dealloc_pd\n");
+ return -EINVAL;
+ }
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
@@ -888,6 +890,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
pbl_ptr = cq->q.pbl_tbl->pa;
page_cnt = cq->q.pbl_info.num_pbes;
+
+ cq->ibcq.cqe = chain_entries;
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
@@ -903,6 +907,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
page_cnt = qed_chain_get_page_cnt(&cq->pbl);
pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ cq->ibcq.cqe = cq->pbl.capacity;
}
qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
@@ -980,8 +985,13 @@ int qedr_destroy_cq(struct ib_cq *ibcq)
/* GSIs CQs are handled by driver, so they don't exist in the FW */
if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+ int rc;
+
iparams.icid = cq->icid;
- dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
+ &oparams);
+ if (rc)
+ return rc;
dev->ops->common->chain_free(dev->cdev, &cq->pbl);
}
@@ -1477,6 +1487,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
struct qedr_qp *qp;
+ struct ib_qp *ibqp;
int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
@@ -1486,13 +1497,13 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
if (rc)
return ERR_PTR(rc);
+ if (attrs->srq)
+ return ERR_PTR(-EINVAL);
+
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
- if (attrs->srq)
- return ERR_PTR(-EINVAL);
-
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
get_qedr_cq(attrs->send_cq),
@@ -1508,7 +1519,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
"create qp: unexpected udata when creating GSI QP\n");
goto err0;
}
- return qedr_create_gsi_qp(dev, attrs, qp);
+ ibqp = qedr_create_gsi_qp(dev, attrs, qp);
+ if (IS_ERR(ibqp))
+ kfree(qp);
+ return ibqp;
}
memset(&in_params, 0, sizeof(in_params));
@@ -1960,7 +1974,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_STATE) {
if ((qp->qp_type != IB_QPT_GSI) && (!udata))
- qedr_update_qp_state(dev, qp, qp_params.new_state);
+ rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
qp->state = qp_params.new_state;
}
@@ -2064,8 +2078,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type);
- if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
- QED_ROCE_QP_STATE_INIT)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_INIT)) {
+
attr.qp_state = IB_QPS_ERR;
attr_mask |= IB_QP_STATE;
@@ -2094,7 +2110,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
return rc;
}
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata)
{
struct qedr_ah *ah;
@@ -2413,8 +2430,7 @@ static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
*/
pbl = list_first_entry(&info->inuse_pbl_list,
struct qedr_pbl, list_entry);
- list_del(&pbl->list_entry);
- list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+ list_move_tail(&pbl->list_entry, &info->free_pbl_list);
info->completed_handled++;
}
}
@@ -2620,7 +2636,9 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
- if (wr->send_flags & IB_SEND_INLINE) {
+ if (wr->send_flags & IB_SEND_INLINE &&
+ (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE)) {
u8 flags = 0;
SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
@@ -2971,8 +2989,9 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
- (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_SQD)) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -2981,11 +3000,6 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return -EINVAL;
}
- if (!wr) {
- DP_ERR(dev, "Got an empty post send.\n");
- return -EINVAL;
- }
-
while (wr) {
rc = __qedr_post_send(ibqp, wr, bad_wr);
if (rc)
@@ -3030,8 +3044,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
- (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ if (qp->state == QED_ROCE_QP_STATE_RESET) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
return -EINVAL;
@@ -3173,6 +3186,7 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
/* fill WC */
wc->status = status;
+ wc->vendor_err = 0;
wc->wc_flags = 0;
wc->src_qp = qp->id;
wc->qp = &qp->ibqp;
@@ -3224,7 +3238,7 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
- IB_WC_WR_FLUSH_ERR, 0);
+ IB_WC_WR_FLUSH_ERR, 1);
break;
default:
/* process all WQE before the cosumer */
@@ -3362,6 +3376,7 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
/* fill WC */
wc->status = wc_status;
+ wc->vendor_err = 0;
wc->src_qp = qp->id;
wc->qp = &qp->ibqp;
wc->wr_id = wr_id;
@@ -3390,6 +3405,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
while (num_entries && qp->rq.wqe_cons != hw_cons) {
/* fill WC */
wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
wc->wc_flags = 0;
wc->src_qp = qp->id;
wc->byte_len = 0;
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index a9b5e67bb81e..070677ca4d19 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,7 +70,8 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int qedr_destroy_qp(struct ib_qp *ibqp);
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata);
int qedr_destroy_ah(struct ib_ah *ibah);
int qedr_dereg_mr(struct ib_mr *);
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 8c34b23e5bf6..775018b32b0d 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -609,8 +609,6 @@ static ssize_t qib_diagpkt_write(struct file *fp,
tmpbuf = vmalloc(plen);
if (!tmpbuf) {
- qib_devinfo(dd->pcidev,
- "Unable to allocate tmp buffer, failing\n");
ret = -ENOMEM;
goto bail;
}
@@ -702,10 +700,8 @@ int qib_register_observer(struct qib_devdata *dd,
if (!dd || !op)
return -EINVAL;
olp = vmalloc(sizeof(*olp));
- if (!olp) {
- pr_err("vmalloc for observer failed\n");
+ if (!olp)
return -ENOMEM;
- }
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
olp->op = op;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 728e0a030d2e..2b5982f743ef 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -420,8 +420,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
if (list_empty(&qp->rspwait)) {
qp->r_flags |=
RVT_R_RSP_NAK;
- atomic_inc(
- &qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(
&qp->rspwait,
&rcd->qp_wait_list);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 311ee6c3dd5e..33a2e74c8495 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -182,12 +182,8 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
* */
len = sizeof(struct qib_flash);
buf = vmalloc(len);
- if (!buf) {
- qib_dev_err(dd,
- "Couldn't allocate memory to read %u bytes from eeprom for GUID\n",
- len);
+ if (!buf)
goto bail;
- }
/*
* Use "public" eeprom read function, which does locking and
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 382466a90da7..2d1eacf1dfed 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2066,8 +2066,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
- if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ if (!ib_safe_file_access(fp)) {
+ pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a3733f25280f..92399d3ffd15 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1759,9 +1759,7 @@ static void pe_boardname(struct qib_devdata *dd)
}
namelen = strlen(n) + 1;
dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
+ if (dd->boardname)
snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
@@ -2533,8 +2531,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
dd->cspec->cntrnamelen = 1 + s - cntr6120names;
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
for (i = 0, s = (char *)portcntr6120names; s; i++)
s = strchr(s + 1, '\n');
@@ -2542,8 +2538,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->portcntrs)
- qib_dev_err(dd, "Failed allocation for portcounters\n");
}
static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 00b2af211157..e55e31a69195 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2070,9 +2070,7 @@ static void qib_7220_boardname(struct qib_devdata *dd)
namelen = strlen(n) + 1;
dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
+ if (dd->boardname)
snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
@@ -3179,8 +3177,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
dd->cspec->cntrnamelen = 1 + s - cntr7220names;
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
for (i = 0, s = (char *)portcntr7220names; s; i++)
s = strchr(s + 1, '\n');
@@ -3188,8 +3184,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->portcntrs)
- qib_dev_err(dd, "Failed allocation for portcounters\n");
}
static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ded27172320e..c4a3616062f1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3627,9 +3627,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
namelen = strlen(n) + 1;
dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
+ if (dd->boardname)
snprintf(dd->boardname, namelen, "%s", n);
snprintf(dd->boardversion, sizeof(dd->boardversion),
@@ -3656,7 +3654,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
static int qib_do_7322_reset(struct qib_devdata *dd)
{
u64 val;
- u64 *msix_vecsave;
+ u64 *msix_vecsave = NULL;
int i, msix_entries, ret = 1;
u16 cmdval;
u8 int_line, clinesz;
@@ -3677,10 +3675,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
/* can be up to 512 bytes, too big for stack */
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
sizeof(u64), GFP_KERNEL);
- if (!msix_vecsave)
- qib_dev_err(dd, "No mem to save MSIx data\n");
- } else
- msix_vecsave = NULL;
+ }
/*
* Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
@@ -5043,8 +5038,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
dd->cspec->cntrnamelen = 1 + s - cntr7322names;
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
for (i = 0, s = (char *)portcntr7322names; s; i++)
s = strchr(s + 1, '\n');
@@ -5053,9 +5046,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
for (i = 0; i < dd->num_pports; ++i) {
dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->pport[i].cpspec->portcntrs)
- qib_dev_err(dd,
- "Failed allocation for portcounters\n");
}
}
@@ -6461,7 +6451,6 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
- qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
ret = -ENOMEM;
goto bail;
}
@@ -7338,10 +7327,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
tabsize = actual_cnt;
dd->cspec->msix_entries = kzalloc(tabsize *
sizeof(struct qib_msix_entry), GFP_KERNEL);
- if (!dd->cspec->msix_entries) {
- qib_dev_err(dd, "No memory for MSIx table\n");
+ if (!dd->cspec->msix_entries)
tabsize = 0;
- }
+
for (i = 0; i < tabsize; i++)
dd->cspec->msix_entries[i].msix.entry = i;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 1730aa839a47..b50240b1d5a4 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -133,11 +133,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
* cleanup iterates across all possible ctxts.
*/
dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
- if (!dd->rcd) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata array, failing\n");
+ if (!dd->rcd)
return -ENOMEM;
- }
/* create (one or more) kctxt */
for (i = 0; i < dd->first_user_ctxt; ++i) {
@@ -265,39 +262,23 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
* IB_CCT_ENTRIES;
ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries) {
- qib_dev_err(dd,
- "failed to allocate congestion control table for port %d!\n",
- port);
+ if (!ppd->ccti_entries)
goto bail;
- }
size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries) {
- qib_dev_err(dd,
- "failed to allocate congestion setting list for port %d!\n",
- port);
+ if (!ppd->congestion_entries)
goto bail_1;
- }
size = sizeof(struct cc_table_shadow);
ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries_shadow) {
- qib_dev_err(dd,
- "failed to allocate shadow ccti list for port %d!\n",
- port);
+ if (!ppd->ccti_entries_shadow)
goto bail_2;
- }
size = sizeof(struct ib_cc_congestion_setting_attr);
ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries_shadow) {
- qib_dev_err(dd,
- "failed to allocate shadow congestion setting list for port %d!\n",
- port);
+ if (!ppd->congestion_entries_shadow)
goto bail_3;
- }
return 0;
@@ -391,18 +372,12 @@ static void init_shadow_tids(struct qib_devdata *dd)
dma_addr_t *addrs;
pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
- if (!pages) {
- qib_dev_err(dd,
- "failed to allocate shadow page * array, no expected sends!\n");
+ if (!pages)
goto bail;
- }
addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
- if (!addrs) {
- qib_dev_err(dd,
- "failed to allocate shadow dma handle array, no expected sends!\n");
+ if (!addrs)
goto bail_free;
- }
dd->pageshadow = pages;
dd->physshadow = addrs;
@@ -1026,11 +1001,8 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
cnt = 1024;
addr = vmalloc(cnt);
- if (!addr) {
- qib_devinfo(dd->pcidev,
- "Couldn't get memory for checking PIO perf, skipping\n");
+ if (!addr)
goto done;
- }
preempt_disable(); /* we want reasonably accurate elapsed time */
msecs = 1 + jiffies_to_msecs(jiffies);
@@ -1172,9 +1144,6 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
sizeof(long), GFP_KERNEL);
if (qib_cpulist)
qib_cpulist_count = count;
- else
- qib_early_err(&pdev->dev,
- "Could not alloc cpulist info, cpu affinity might be wrong\n");
}
#ifdef CONFIG_DEBUG_FS
qib_dbg_ibdev_init(&dd->verbs_dev);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2097512e75aa..031433cb7206 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -941,8 +941,6 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{
struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
- struct ib_wc wc;
- unsigned i;
u32 opcode;
u32 psn;
@@ -988,22 +986,8 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
qp->s_last = s_last;
/* see post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_put_swqe(wqe);
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
}
/*
* If we were waiting for sends to complete before resending,
@@ -1032,9 +1016,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
struct rvt_swqe *wqe,
struct qib_ibport *ibp)
{
- struct ib_wc wc;
- unsigned i;
-
/*
* Don't decrement refcount and don't generate a
* completion if the SWQE is being resent until the send
@@ -1044,28 +1025,14 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
u32 s_last;
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
s_last = qp->s_last;
if (++s_last >= qp->s_size)
s_last = 0;
qp->s_last = s_last;
/* see post_send() */
barrier();
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
} else
this_cpu_inc(*ibp->rvp.rc_delayed_comp);
@@ -2112,8 +2079,7 @@ send_last:
* Update the next expected PSN. We add 1 later
* below, so only add the remainder here.
*/
- if (len > pmtu)
- qp->r_psn += (len - 1) / pmtu;
+ qp->r_psn += rvt_div_mtu(qp, len - 1);
} else {
e->rdma_sge.mr = NULL;
e->rdma_sge.vaddr = NULL;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index de1bde5950f5..e54a2feeeb10 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -793,7 +793,6 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
- unsigned i;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
@@ -805,32 +804,13 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->s_last = last;
/* See post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
- /* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.qp = &qp->ibqp;
- if (status == IB_WC_SUCCESS)
- wc.byte_len = wqe->length;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
+ rvt_qp_swqe_complete(qp, wqe, status);
if (qp->s_acked == old_last)
qp->s_acked = last;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 954f15064514..4b54c0ddd08a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -114,19 +114,6 @@ module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_qib_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
* System image GUID.
*/
__be64 ib_qib_sys_image_guid;
@@ -464,7 +451,7 @@ static void mem_timer(unsigned long data)
priv = list_entry(list->next, struct qib_qp_priv, iowait);
qp = priv->owner;
list_del_init(&priv->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
}
@@ -477,8 +464,7 @@ static void mem_timer(unsigned long data)
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
@@ -762,7 +748,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
iowait);
qp = priv->owner;
list_del_init(&priv->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
spin_lock_irqsave(&qp->s_lock, flags);
@@ -772,8 +758,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
}
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
} else
spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
}
@@ -808,7 +793,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
break;
avail -= qpp->s_tx->txreq.sg_count;
list_del_init(&qpp->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
qps[n++] = qp;
}
@@ -822,8 +807,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
qib_schedule_send(qp);
}
spin_unlock(&qp->s_lock);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
@@ -1288,7 +1272,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
priv = list_entry(list->next, struct qib_qp_priv, iowait);
qp = priv->owner;
list_del_init(&priv->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
qps[n++] = qp;
}
dd->f_wantpiobuf_intr(dd, 0);
@@ -1306,8 +1290,7 @@ full:
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify qib_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 5b0248adf4ce..092d4e11a633 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -117,10 +117,10 @@ static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
for (i = 0; i < res_chunk->cnt; i++) {
@@ -158,10 +158,10 @@ static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
for (i = 0; i < res_chunk->cnt; i++) {
@@ -186,11 +186,11 @@ static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
struct usnic_vnic_res_chunk *res_chunk;
res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get %s with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
@@ -228,8 +228,6 @@ create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
- usnic_err("Unable to alloc flow failed with err %ld\n",
- PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_unreserve_port;
}
@@ -303,8 +301,6 @@ create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
- usnic_err("Unable to alloc flow failed with err %ld\n",
- PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_put_sock;
}
@@ -694,18 +690,14 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
}
qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
- if (!qp_grp) {
- usnic_err("Unable to alloc qp_grp - Out of memory\n");
+ if (!qp_grp)
return NULL;
- }
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
qp_grp);
if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
err = qp_grp->res_chunk_list ?
PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
- usnic_err("Unable to alloc res for %d with err %d\n",
- qp_grp->grp_id, err);
goto out_free_qp_grp;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index a5bfbba6bbac..74819a7951e2 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -87,12 +87,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.bar_len = bar->len;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
@@ -101,12 +101,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.rq_idx[i] = chunk->res[i]->vnic_idx;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
@@ -115,12 +115,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.wq_idx[i] = chunk->res[i]->vnic_idx;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
@@ -738,7 +738,9 @@ int usnic_ib_mmap(struct ib_ucontext *context,
/* In ib callbacks section - Start of stub funcs */
struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
usnic_dbg("\n");
return ERR_PTR(-EPERM);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 0d9d2e6a14d5..0ed8e072329e 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -75,7 +75,9 @@ int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma);
struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr);
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
+
int usnic_ib_destroy_ah(struct ib_ah *ah);
int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
index 887510718690..e7b0030254da 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.c
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -241,17 +241,12 @@ usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
return ERR_PTR(-EINVAL);
ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
- if (!ret) {
- usnic_err("Failed to allocate chunk for %s - Out of memory\n",
- usnic_vnic_pci_name(vnic));
+ if (!ret)
return ERR_PTR(-ENOMEM);
- }
if (cnt > 0) {
ret->res = kcalloc(cnt, sizeof(*(ret->res)), GFP_ATOMIC);
if (!ret->res) {
- usnic_err("Failed to allocate resources for %s. Out of memory\n",
- usnic_vnic_pci_name(vnic));
kfree(ret);
return ERR_PTR(-ENOMEM);
}
@@ -311,8 +306,10 @@ static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
struct usnic_vnic_res *res;
cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
- if (cnt < 1)
+ if (cnt < 1) {
+ usnic_err("Wrong res count with cnt %d\n", cnt);
return -EINVAL;
+ }
chunk->cnt = chunk->free_cnt = cnt;
chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
@@ -384,12 +381,8 @@ static int usnic_vnic_discover_resources(struct pci_dev *pdev,
res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
err = usnic_vnic_alloc_res_chunk(vnic, res_type,
&vnic->chunks[res_type]);
- if (err) {
- usnic_err("Failed to alloc res %s with err %d\n",
- usnic_vnic_res_type_to_str(res_type),
- err);
+ if (err)
goto out_clean_chunks;
- }
}
return 0;
@@ -454,11 +447,8 @@ struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
}
vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
- if (!vnic) {
- usnic_err("Failed to alloc vnic for %s - out of memory\n",
- pci_name(pdev));
+ if (!vnic)
return ERR_PTR(-ENOMEM);
- }
spin_lock_init(&vnic->res_lock);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Kconfig b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
new file mode 100644
index 000000000000..5a9790ac0ede
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_VMWARE_PVRDMA
+ tristate "VMware Paravirtualized RDMA Driver"
+ depends on NETDEVICES && ETHERNET && PCI && INET && VMXNET3
+ ---help---
+ This driver provides low-level support for VMware Paravirtual
+ RDMA adapter. It interacts with the VMXNet3 driver to provide
+ Ethernet capabilities.
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile
new file mode 100644
index 000000000000..0194ed19f542
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
+
+vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_verbs.o
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
new file mode 100644
index 000000000000..71e1d55d69d6
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_H__
+#define __PVRDMA_H__
+
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+
+#include "pvrdma_ring.h"
+#include "pvrdma_dev_api.h"
+#include "pvrdma_verbs.h"
+
+/* NOT the same as BIT_MASK(). */
+#define PVRDMA_MASK(n) ((n << 1) - 1)
+
+/*
+ * VMware PVRDMA PCI device id.
+ */
+#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
+
+struct pvrdma_dev;
+
+struct pvrdma_page_dir {
+ dma_addr_t dir_dma;
+ u64 *dir;
+ int ntables;
+ u64 **tables;
+ u64 npages;
+ void **pages;
+};
+
+struct pvrdma_cq {
+ struct ib_cq ibcq;
+ int offset;
+ spinlock_t cq_lock; /* Poll lock. */
+ struct pvrdma_uar_map *uar;
+ struct ib_umem *umem;
+ struct pvrdma_ring_state *ring_state;
+ struct pvrdma_page_dir pdir;
+ u32 cq_handle;
+ bool is_kernel;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+};
+
+struct pvrdma_id_table {
+ u32 last;
+ u32 top;
+ u32 max;
+ u32 mask;
+ spinlock_t lock; /* Table lock. */
+ unsigned long *table;
+};
+
+struct pvrdma_uar_map {
+ unsigned long pfn;
+ void __iomem *map;
+ int index;
+};
+
+struct pvrdma_uar_table {
+ struct pvrdma_id_table tbl;
+ int size;
+};
+
+struct pvrdma_ucontext {
+ struct ib_ucontext ibucontext;
+ struct pvrdma_dev *dev;
+ struct pvrdma_uar_map uar;
+ u64 ctx_handle;
+};
+
+struct pvrdma_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+ u32 pd_handle;
+ int privileged;
+};
+
+struct pvrdma_mr {
+ u32 mr_handle;
+ u64 iova;
+ u64 size;
+};
+
+struct pvrdma_user_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct pvrdma_mr mmr;
+ struct pvrdma_page_dir pdir;
+ u64 *pages;
+ u32 npages;
+ u32 max_pages;
+ u32 page_shift;
+};
+
+struct pvrdma_wq {
+ struct pvrdma_ring *ring;
+ spinlock_t lock; /* Work queue lock. */
+ int wqe_cnt;
+ int wqe_size;
+ int max_sg;
+ int offset;
+};
+
+struct pvrdma_ah {
+ struct ib_ah ibah;
+ struct pvrdma_av av;
+};
+
+struct pvrdma_qp {
+ struct ib_qp ibqp;
+ u32 qp_handle;
+ u32 qkey;
+ struct pvrdma_wq sq;
+ struct pvrdma_wq rq;
+ struct ib_umem *rumem;
+ struct ib_umem *sumem;
+ struct pvrdma_page_dir pdir;
+ int npages;
+ int npages_send;
+ int npages_recv;
+ u32 flags;
+ u8 port;
+ u8 state;
+ bool is_kernel;
+ struct mutex mutex; /* QP state mutex. */
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+};
+
+struct pvrdma_dev {
+ /* PCI device-related information. */
+ struct ib_device ib_dev;
+ struct pci_dev *pdev;
+ void __iomem *regs;
+ struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
+ dma_addr_t dsrbase; /* Shared region base address */
+ void *cmd_slot;
+ void *resp_slot;
+ unsigned long flags;
+ struct list_head device_link;
+
+ /* Locking and interrupt information. */
+ spinlock_t cmd_lock; /* Command lock. */
+ struct semaphore cmd_sema;
+ struct completion cmd_done;
+ struct {
+ enum pvrdma_intr_type type; /* Intr type */
+ struct msix_entry msix_entry[PVRDMA_MAX_INTERRUPTS];
+ irq_handler_t handler[PVRDMA_MAX_INTERRUPTS];
+ u8 enabled[PVRDMA_MAX_INTERRUPTS];
+ u8 size;
+ } intr;
+
+ /* RDMA-related device information. */
+ union ib_gid *sgid_tbl;
+ struct pvrdma_ring_state *async_ring_state;
+ struct pvrdma_page_dir async_pdir;
+ struct pvrdma_ring_state *cq_ring_state;
+ struct pvrdma_page_dir cq_pdir;
+ struct pvrdma_cq **cq_tbl;
+ spinlock_t cq_tbl_lock;
+ struct pvrdma_qp **qp_tbl;
+ spinlock_t qp_tbl_lock;
+ struct pvrdma_uar_table uar_table;
+ struct pvrdma_uar_map driver_uar;
+ __be64 sys_image_guid;
+ spinlock_t desc_lock; /* Device modification lock. */
+ u32 port_cap_mask;
+ struct mutex port_mutex; /* Port modification mutex. */
+ bool ib_active;
+ atomic_t num_qps;
+ atomic_t num_cqs;
+ atomic_t num_pds;
+ atomic_t num_ahs;
+
+ /* Network device information. */
+ struct net_device *netdev;
+ struct notifier_block nb_netdev;
+};
+
+struct pvrdma_netdevice_work {
+ struct work_struct work;
+ struct net_device *event_netdev;
+ unsigned long event;
+};
+
+static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct pvrdma_dev, ib_dev);
+}
+
+static inline struct
+pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
+}
+
+static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct pvrdma_pd, ibpd);
+}
+
+static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct pvrdma_cq, ibcq);
+}
+
+static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct pvrdma_user_mr, ibmr);
+}
+
+static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct pvrdma_qp, ibqp);
+}
+
+static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct pvrdma_ah, ibah);
+}
+
+static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
+{
+ writel(cpu_to_le32(val), dev->regs + reg);
+}
+
+static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
+{
+ return le32_to_cpu(readl(dev->regs + reg));
+}
+
+static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
+{
+ writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
+}
+
+static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
+{
+ writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
+}
+
+static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
+ u64 offset)
+{
+ return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
+}
+
+static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
+{
+ return (enum pvrdma_mtu)mtu;
+}
+
+static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
+{
+ return (enum ib_mtu)mtu;
+}
+
+static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
+ enum ib_port_state state)
+{
+ return (enum pvrdma_port_state)state;
+}
+
+static inline enum ib_port_state pvrdma_port_state_to_ib(
+ enum pvrdma_port_state state)
+{
+ return (enum ib_port_state)state;
+}
+
+static inline int ib_port_cap_flags_to_pvrdma(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX);
+}
+
+static inline int pvrdma_port_cap_flags_to_ib(int flags)
+{
+ return flags;
+}
+
+static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
+ enum ib_port_width width)
+{
+ return (enum pvrdma_port_width)width;
+}
+
+static inline enum ib_port_width pvrdma_port_width_to_ib(
+ enum pvrdma_port_width width)
+{
+ return (enum ib_port_width)width;
+}
+
+static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
+ enum ib_port_speed speed)
+{
+ return (enum pvrdma_port_speed)speed;
+}
+
+static inline enum ib_port_speed pvrdma_port_speed_to_ib(
+ enum pvrdma_port_speed speed)
+{
+ return (enum ib_port_speed)speed;
+}
+
+static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask)
+{
+ return attr_mask;
+}
+
+static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
+{
+ return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
+}
+
+static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
+ enum ib_mig_state state)
+{
+ return (enum pvrdma_mig_state)state;
+}
+
+static inline enum ib_mig_state pvrdma_mig_state_to_ib(
+ enum pvrdma_mig_state state)
+{
+ return (enum ib_mig_state)state;
+}
+
+static inline int ib_access_flags_to_pvrdma(int flags)
+{
+ return flags;
+}
+
+static inline int pvrdma_access_flags_to_ib(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
+}
+
+static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
+{
+ return (enum pvrdma_qp_type)type;
+}
+
+static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type)
+{
+ return (enum ib_qp_type)type;
+}
+
+static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
+{
+ return (enum pvrdma_qp_state)state;
+}
+
+static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
+{
+ return (enum ib_qp_state)state;
+}
+
+static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
+{
+ return (enum pvrdma_wr_opcode)op;
+}
+
+static inline enum ib_wc_status pvrdma_wc_status_to_ib(
+ enum pvrdma_wc_status status)
+{
+ return (enum ib_wc_status)status;
+}
+
+static inline int pvrdma_wc_opcode_to_ib(int opcode)
+{
+ return opcode;
+}
+
+static inline int pvrdma_wc_flags_to_ib(int flags)
+{
+ return flags;
+}
+
+static inline int ib_send_flags_to_pvrdma(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
+ const struct pvrdma_qp_cap *src);
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
+ const struct ib_qp_cap *src);
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+ const struct pvrdma_global_route *src);
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+ const struct ib_global_route *src);
+void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst,
+ const struct pvrdma_ah_attr *src);
+void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+ const struct ib_ah_attr *src);
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev);
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+ u64 npages, bool alloc_pages);
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir);
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+ dma_addr_t daddr);
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+ struct ib_umem *umem, u64 offset);
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+ u64 *page_list, int num_pages);
+
+int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp, unsigned resp_code);
+
+#endif /* __PVRDMA_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
new file mode 100644
index 000000000000..4a78c537d8a1
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+
+#include "pvrdma.h"
+
+#define PVRDMA_CMD_TIMEOUT 10000 /* ms */
+
+static inline int pvrdma_cmd_recv(struct pvrdma_dev *dev,
+ union pvrdma_cmd_resp *resp,
+ unsigned resp_code)
+{
+ int err;
+
+ dev_dbg(&dev->pdev->dev, "receive response from device\n");
+
+ err = wait_for_completion_interruptible_timeout(&dev->cmd_done,
+ msecs_to_jiffies(PVRDMA_CMD_TIMEOUT));
+ if (err == 0 || err == -ERESTARTSYS) {
+ dev_warn(&dev->pdev->dev,
+ "completion timeout or interrupted\n");
+ return -ETIMEDOUT;
+ }
+
+ spin_lock(&dev->cmd_lock);
+ memcpy(resp, dev->resp_slot, sizeof(*resp));
+ spin_unlock(&dev->cmd_lock);
+
+ if (resp->hdr.ack != resp_code) {
+ dev_warn(&dev->pdev->dev,
+ "unknown response %#x expected %#x\n",
+ resp->hdr.ack, resp_code);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *resp, unsigned resp_code)
+{
+ int err;
+
+ dev_dbg(&dev->pdev->dev, "post request to device\n");
+
+ /* Serializiation */
+ down(&dev->cmd_sema);
+
+ BUILD_BUG_ON(sizeof(union pvrdma_cmd_req) !=
+ sizeof(struct pvrdma_cmd_modify_qp));
+
+ spin_lock(&dev->cmd_lock);
+ memcpy(dev->cmd_slot, req, sizeof(*req));
+ spin_unlock(&dev->cmd_lock);
+
+ init_completion(&dev->cmd_done);
+ pvrdma_write_reg(dev, PVRDMA_REG_REQUEST, 0);
+
+ /* Make sure the request is written before reading status. */
+ mb();
+
+ err = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+ if (err == 0) {
+ if (resp != NULL)
+ err = pvrdma_cmd_recv(dev, resp, resp_code);
+ } else {
+ dev_warn(&dev->pdev->dev,
+ "failed to write request error reg: %d\n", err);
+ err = -EFAULT;
+ }
+
+ up(&dev->cmd_sema);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
new file mode 100644
index 000000000000..e429ca5b16aa
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_req_notify_cq - request notification for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: notification flags
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct pvrdma_dev *dev = to_vdev(ibcq->device);
+ struct pvrdma_cq *cq = to_vcq(ibcq);
+ u32 val = cq->cq_handle;
+
+ val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
+
+ pvrdma_write_uar_cq(dev, val);
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_cq - create completion queue
+ * @ibdev: the device
+ * @attr: completion queue attributes
+ * @context: user context
+ * @udata: user data
+ *
+ * @return: ib_cq completion queue pointer on success,
+ * otherwise returns negative errno.
+ */
+struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ int entries = attr->cqe;
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ struct pvrdma_cq *cq;
+ int ret;
+ int npages;
+ unsigned long flags;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
+ struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
+ struct pvrdma_create_cq ucmd;
+
+ BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
+
+ entries = roundup_pow_of_two(entries);
+ if (entries < 1 || entries > dev->dsr->caps.max_cqe)
+ return ERR_PTR(-EINVAL);
+
+ if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
+ return ERR_PTR(-ENOMEM);
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ atomic_dec(&dev->num_cqs);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cq->ibcq.cqe = entries;
+
+ if (context) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_cq;
+ }
+
+ cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(cq->umem)) {
+ ret = PTR_ERR(cq->umem);
+ goto err_cq;
+ }
+
+ npages = ib_umem_page_count(cq->umem);
+ } else {
+ cq->is_kernel = true;
+
+ /* One extra page for shared ring state */
+ npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
+ PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /* Skip header page. */
+ cq->offset = PAGE_SIZE;
+ }
+
+ if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in completion queue\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ /* Ring state is always the first page. Set in library for user cq. */
+ if (cq->is_kernel)
+ cq->ring_state = cq->pdir.pages[0];
+ else
+ pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
+
+ atomic_set(&cq->refcnt, 1);
+ init_waitqueue_head(&cq->wait);
+ spin_lock_init(&cq->cq_lock);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
+ cmd->nchunks = npages;
+ cmd->ctx_handle = (context) ?
+ (u64)to_vucontext(context)->ctx_handle : 0;
+ cmd->cqe = entries;
+ cmd->pdir_dma = cq->pdir.dir_dma;
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create completion queue, error: %d\n", ret);
+ goto err_page_dir;
+ }
+
+ cq->ibcq.cqe = resp->cqe;
+ cq->cq_handle = resp->cq_handle;
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (context) {
+ cq->uar = &(to_vucontext(context)->uar);
+
+ /* Copy udata back. */
+ if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ pvrdma_destroy_cq(&cq->ibcq);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return &cq->ibcq;
+
+err_page_dir:
+ pvrdma_page_dir_cleanup(dev, &cq->pdir);
+err_umem:
+ if (context)
+ ib_umem_release(cq->umem);
+err_cq:
+ atomic_dec(&dev->num_cqs);
+ kfree(cq);
+
+ return ERR_PTR(ret);
+}
+
+static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
+{
+ atomic_dec(&cq->refcnt);
+ wait_event(cq->wait, !atomic_read(&cq->refcnt));
+
+ if (!cq->is_kernel)
+ ib_umem_release(cq->umem);
+
+ pvrdma_page_dir_cleanup(dev, &cq->pdir);
+ kfree(cq);
+}
+
+/**
+ * pvrdma_destroy_cq - destroy completion queue
+ * @cq: the completion queue to destroy.
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_destroy_cq(struct ib_cq *cq)
+{
+ struct pvrdma_cq *vcq = to_vcq(cq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
+ struct pvrdma_dev *dev = to_vdev(cq->device);
+ unsigned long flags;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
+ cmd->cq_handle = vcq->cq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "could not destroy completion queue, error: %d\n",
+ ret);
+
+ /* free cq's resources */
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ dev->cq_tbl[vcq->cq_handle] = NULL;
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ pvrdma_free_cq(dev, vcq);
+ atomic_dec(&dev->num_cqs);
+
+ return ret;
+}
+
+/**
+ * pvrdma_modify_cq - modify the CQ moderation parameters
+ * @ibcq: the CQ to modify
+ * @cq_count: number of CQEs that will trigger an event
+ * @cq_period: max period of time in usec before triggering an event
+ *
+ * @return: -EOPNOTSUPP as CQ resize is not supported.
+ */
+int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
+{
+ return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
+ &cq->pdir,
+ cq->offset +
+ sizeof(struct pvrdma_cqe) * i);
+}
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
+{
+ int head;
+ int has_data;
+
+ if (!cq->is_kernel)
+ return;
+
+ /* Lock held */
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (unlikely(has_data > 0)) {
+ int items;
+ int curr;
+ int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
+ cq->ibcq.cqe);
+ struct pvrdma_cqe *cqe;
+ struct pvrdma_cqe *curr_cqe;
+
+ items = (tail > head) ? (tail - head) :
+ (cq->ibcq.cqe - head + tail);
+ curr = --tail;
+ while (items-- > 0) {
+ if (curr < 0)
+ curr = cq->ibcq.cqe - 1;
+ if (tail < 0)
+ tail = cq->ibcq.cqe - 1;
+ curr_cqe = get_cqe(cq, curr);
+ if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
+ if (curr != tail) {
+ cqe = get_cqe(cq, tail);
+ *cqe = *curr_cqe;
+ }
+ tail--;
+ } else {
+ pvrdma_idx_ring_inc(
+ &cq->ring_state->rx.cons_head,
+ cq->ibcq.cqe);
+ }
+ curr--;
+ }
+ }
+}
+
+static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
+ int has_data;
+ unsigned int head;
+ bool tried = false;
+ struct pvrdma_cqe *cqe;
+
+retry:
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (has_data == 0) {
+ if (tried)
+ return -EAGAIN;
+
+ pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
+
+ tried = true;
+ goto retry;
+ } else if (has_data == PVRDMA_INVALID_IDX) {
+ dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+ return -EAGAIN;
+ }
+
+ cqe = get_cqe(cq, head);
+
+ /* Ensure cqe is valid. */
+ rmb();
+ if (dev->qp_tbl[cqe->qp & 0xffff])
+ *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
+ else
+ return -EAGAIN;
+
+ wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
+ wc->status = pvrdma_wc_status_to_ib(cqe->status);
+ wc->wr_id = cqe->wr_id;
+ wc->qp = &(*cur_qp)->ibqp;
+ wc->byte_len = cqe->byte_len;
+ wc->ex.imm_data = cqe->imm_data;
+ wc->src_qp = cqe->src_qp;
+ wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
+ wc->pkey_index = cqe->pkey_index;
+ wc->slid = cqe->slid;
+ wc->sl = cqe->sl;
+ wc->dlid_path_bits = cqe->dlid_path_bits;
+ wc->port_num = cqe->port_num;
+ wc->vendor_err = 0;
+
+ /* Update shared ring state */
+ pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
+
+ return 0;
+}
+
+/**
+ * pvrdma_poll_cq - poll for work completion queue entries
+ * @ibcq: completion queue
+ * @num_entries: the maximum number of entries
+ * @entry: pointer to work completion array
+ *
+ * @return: number of polled completion entries
+ */
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct pvrdma_cq *cq = to_vcq(ibcq);
+ struct pvrdma_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+
+ if (num_entries < 1 || wc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
+ break;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ /* Ensure we do not return errors from poll_cq */
+ return npolled;
+}
+
+/**
+ * pvrdma_resize_cq - resize CQ
+ * @ibcq: the completion queue
+ * @entries: CQ entries
+ * @udata: user data
+ *
+ * @return: -EOPNOTSUPP as CQ resize is not supported.
+ */
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
new file mode 100644
index 000000000000..c06768635d65
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -0,0 +1,586 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_DEV_API_H__
+#define __PVRDMA_DEV_API_H__
+
+#include <linux/types.h>
+
+#include "pvrdma_verbs.h"
+
+#define PVRDMA_VERSION 17
+#define PVRDMA_BOARD_ID 1
+#define PVRDMA_REV_ID 1
+
+/*
+ * Masks and accessors for page directory, which is a two-level lookup:
+ * page directory -> page table -> page. Only one directory for now, but we
+ * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
+ * gigabyte for memory regions and so forth.
+ */
+
+#define PVRDMA_PDIR_SHIFT 18
+#define PVRDMA_PTABLE_SHIFT 9
+#define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
+#define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
+#define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff)
+#define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512)
+#define PVRDMA_MAX_FAST_REG_PAGES 128
+
+/*
+ * Max MSI-X vectors.
+ */
+
+#define PVRDMA_MAX_INTERRUPTS 3
+
+/* Register offsets within PCI resource on BAR1. */
+#define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */
+#define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */
+#define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */
+#define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */
+#define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */
+#define PVRDMA_REG_ERR 0x14 /* R: Device error. */
+#define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */
+#define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */
+#define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */
+#define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */
+
+/* Object flags. */
+#define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */
+#define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */
+#define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */
+#define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */
+
+/*
+ * Atomic operation capability (masked versions are extended atomic
+ * operations.
+ */
+
+#define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */
+#define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */
+#define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */
+#define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */
+
+/*
+ * Base Memory Management Extension flags to support Fast Reg Memory Regions
+ * and Fast Reg Work Requests. Each flag represents a verb operation and we
+ * must support all of them to qualify for the BMME device cap.
+ */
+
+#define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */
+#define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */
+#define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */
+
+/*
+ * GID types. The interpretation of the gid_types bit field in the device
+ * capabilities will depend on the device mode. For now, the device only
+ * supports RoCE as mode, so only the different GID types for RoCE are
+ * defined.
+ */
+
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
+
+enum pvrdma_pci_resource {
+ PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
+ PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
+ PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */
+ PVRDMA_PCI_RESOURCE_LAST, /* Last. */
+};
+
+enum pvrdma_device_ctl {
+ PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
+ PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */
+ PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
+};
+
+enum pvrdma_intr_vector {
+ PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */
+ PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */
+ PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */
+ /* Additional CQ notification vectors. */
+};
+
+enum pvrdma_intr_cause {
+ PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE),
+ PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC),
+ PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
+};
+
+enum pvrdma_intr_type {
+ PVRDMA_INTR_TYPE_INTX, /* Legacy. */
+ PVRDMA_INTR_TYPE_MSI, /* MSI. */
+ PVRDMA_INTR_TYPE_MSIX, /* MSI-X. */
+};
+
+enum pvrdma_gos_bits {
+ PVRDMA_GOS_BITS_UNK, /* Unknown. */
+ PVRDMA_GOS_BITS_32, /* 32-bit. */
+ PVRDMA_GOS_BITS_64, /* 64-bit. */
+};
+
+enum pvrdma_gos_type {
+ PVRDMA_GOS_TYPE_UNK, /* Unknown. */
+ PVRDMA_GOS_TYPE_LINUX, /* Linux. */
+};
+
+enum pvrdma_device_mode {
+ PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */
+ PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */
+ PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */
+};
+
+struct pvrdma_gos_info {
+ u32 gos_bits:2; /* W: PVRDMA_GOS_BITS_ */
+ u32 gos_type:4; /* W: PVRDMA_GOS_TYPE_ */
+ u32 gos_ver:16; /* W: Guest OS version. */
+ u32 gos_misc:10; /* W: Other. */
+ u32 pad; /* Pad to 8-byte alignment. */
+};
+
+struct pvrdma_device_caps {
+ u64 fw_ver; /* R: Query device. */
+ __be64 node_guid;
+ __be64 sys_image_guid;
+ u64 max_mr_size;
+ u64 page_size_cap;
+ u64 atomic_arg_sizes; /* EX verbs. */
+ u32 ex_comp_mask; /* EX verbs. */
+ u32 device_cap_flags2; /* EX verbs. */
+ u32 max_fa_bit_boundary; /* EX verbs. */
+ u32 log_max_atomic_inline_arg; /* EX verbs. */
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u32 max_qp;
+ u32 max_qp_wr;
+ u32 device_cap_flags;
+ u32 max_sge;
+ u32 max_sge_rd;
+ u32 max_cq;
+ u32 max_cqe;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_qp_rd_atom;
+ u32 max_ee_rd_atom;
+ u32 max_res_rd_atom;
+ u32 max_qp_init_rd_atom;
+ u32 max_ee_init_rd_atom;
+ u32 max_ee;
+ u32 max_rdd;
+ u32 max_mw;
+ u32 max_raw_ipv6_qp;
+ u32 max_raw_ethy_qp;
+ u32 max_mcast_grp;
+ u32 max_mcast_qp_attach;
+ u32 max_total_mcast_qp_attach;
+ u32 max_ah;
+ u32 max_fmr;
+ u32 max_map_per_fmr;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u32 max_srq_sge;
+ u32 max_uar;
+ u32 gid_tbl_len;
+ u16 max_pkeys;
+ u8 local_ca_ack_delay;
+ u8 phys_port_cnt;
+ u8 mode; /* PVRDMA_DEVICE_MODE_ */
+ u8 atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
+ u8 bmme_flags; /* FRWR Mem Mgmt Extensions */
+ u8 gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
+ u8 reserved[4];
+};
+
+struct pvrdma_ring_page_info {
+ u32 num_pages; /* Num pages incl. header. */
+ u32 reserved; /* Reserved. */
+ u64 pdir_dma; /* Page directory PA. */
+};
+
+#pragma pack(push, 1)
+
+struct pvrdma_device_shared_region {
+ u32 driver_version; /* W: Driver version. */
+ u32 pad; /* Pad to 8-byte align. */
+ struct pvrdma_gos_info gos_info; /* W: Guest OS information. */
+ u64 cmd_slot_dma; /* W: Command slot address. */
+ u64 resp_slot_dma; /* W: Response slot address. */
+ struct pvrdma_ring_page_info async_ring_pages;
+ /* W: Async ring page info. */
+ struct pvrdma_ring_page_info cq_ring_pages;
+ /* W: CQ ring page info. */
+ u32 uar_pfn; /* W: UAR pageframe. */
+ u32 pad2; /* Pad to 8-byte align. */
+ struct pvrdma_device_caps caps; /* R: Device capabilities. */
+};
+
+#pragma pack(pop)
+
+/* Event types. Currently a 1:1 mapping with enum ib_event. */
+enum pvrdma_eqe_type {
+ PVRDMA_EVENT_CQ_ERR,
+ PVRDMA_EVENT_QP_FATAL,
+ PVRDMA_EVENT_QP_REQ_ERR,
+ PVRDMA_EVENT_QP_ACCESS_ERR,
+ PVRDMA_EVENT_COMM_EST,
+ PVRDMA_EVENT_SQ_DRAINED,
+ PVRDMA_EVENT_PATH_MIG,
+ PVRDMA_EVENT_PATH_MIG_ERR,
+ PVRDMA_EVENT_DEVICE_FATAL,
+ PVRDMA_EVENT_PORT_ACTIVE,
+ PVRDMA_EVENT_PORT_ERR,
+ PVRDMA_EVENT_LID_CHANGE,
+ PVRDMA_EVENT_PKEY_CHANGE,
+ PVRDMA_EVENT_SM_CHANGE,
+ PVRDMA_EVENT_SRQ_ERR,
+ PVRDMA_EVENT_SRQ_LIMIT_REACHED,
+ PVRDMA_EVENT_QP_LAST_WQE_REACHED,
+ PVRDMA_EVENT_CLIENT_REREGISTER,
+ PVRDMA_EVENT_GID_CHANGE,
+};
+
+/* Event queue element. */
+struct pvrdma_eqe {
+ u32 type; /* Event type. */
+ u32 info; /* Handle, other. */
+};
+
+/* CQ notification queue element. */
+struct pvrdma_cqne {
+ u32 info; /* Handle */
+};
+
+enum {
+ PVRDMA_CMD_FIRST,
+ PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
+ PVRDMA_CMD_QUERY_PKEY,
+ PVRDMA_CMD_CREATE_PD,
+ PVRDMA_CMD_DESTROY_PD,
+ PVRDMA_CMD_CREATE_MR,
+ PVRDMA_CMD_DESTROY_MR,
+ PVRDMA_CMD_CREATE_CQ,
+ PVRDMA_CMD_RESIZE_CQ,
+ PVRDMA_CMD_DESTROY_CQ,
+ PVRDMA_CMD_CREATE_QP,
+ PVRDMA_CMD_MODIFY_QP,
+ PVRDMA_CMD_QUERY_QP,
+ PVRDMA_CMD_DESTROY_QP,
+ PVRDMA_CMD_CREATE_UC,
+ PVRDMA_CMD_DESTROY_UC,
+ PVRDMA_CMD_CREATE_BIND,
+ PVRDMA_CMD_DESTROY_BIND,
+ PVRDMA_CMD_MAX,
+};
+
+enum {
+ PVRDMA_CMD_FIRST_RESP = (1 << 31),
+ PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
+ PVRDMA_CMD_QUERY_PKEY_RESP,
+ PVRDMA_CMD_CREATE_PD_RESP,
+ PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
+ PVRDMA_CMD_CREATE_MR_RESP,
+ PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
+ PVRDMA_CMD_CREATE_CQ_RESP,
+ PVRDMA_CMD_RESIZE_CQ_RESP,
+ PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
+ PVRDMA_CMD_CREATE_QP_RESP,
+ PVRDMA_CMD_MODIFY_QP_RESP,
+ PVRDMA_CMD_QUERY_QP_RESP,
+ PVRDMA_CMD_DESTROY_QP_RESP,
+ PVRDMA_CMD_CREATE_UC_RESP,
+ PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
+ PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
+ PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
+ PVRDMA_CMD_MAX_RESP,
+};
+
+struct pvrdma_cmd_hdr {
+ u64 response; /* Key for response lookup. */
+ u32 cmd; /* PVRDMA_CMD_ */
+ u32 reserved; /* Reserved. */
+};
+
+struct pvrdma_cmd_resp_hdr {
+ u64 response; /* From cmd hdr. */
+ u32 ack; /* PVRDMA_CMD_XXX_RESP */
+ u8 err; /* Error. */
+ u8 reserved[3]; /* Reserved. */
+};
+
+struct pvrdma_cmd_query_port {
+ struct pvrdma_cmd_hdr hdr;
+ u8 port_num;
+ u8 reserved[7];
+};
+
+struct pvrdma_cmd_query_port_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_port_attr attrs;
+};
+
+struct pvrdma_cmd_query_pkey {
+ struct pvrdma_cmd_hdr hdr;
+ u8 port_num;
+ u8 index;
+ u8 reserved[6];
+};
+
+struct pvrdma_cmd_query_pkey_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u16 pkey;
+ u8 reserved[6];
+};
+
+struct pvrdma_cmd_create_uc {
+ struct pvrdma_cmd_hdr hdr;
+ u32 pfn; /* UAR page frame number */
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_uc_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_uc {
+ struct pvrdma_cmd_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd {
+ struct pvrdma_cmd_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 pd_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_pd {
+ struct pvrdma_cmd_hdr hdr;
+ u32 pd_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_mr {
+ struct pvrdma_cmd_hdr hdr;
+ u64 start;
+ u64 length;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 access_flags;
+ u32 flags;
+ u32 nchunks;
+};
+
+struct pvrdma_cmd_create_mr_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 mr_handle;
+ u32 lkey;
+ u32 rkey;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_mr {
+ struct pvrdma_cmd_hdr hdr;
+ u32 mr_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 ctx_handle;
+ u32 cqe;
+ u32 nchunks;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 cq_handle;
+ u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 cq_handle;
+ u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 cqe;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 cq_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 send_cq_handle;
+ u32 recv_cq_handle;
+ u32 srq_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+ u32 lkey;
+ u32 access_flags;
+ u16 total_chunks;
+ u16 send_chunks;
+ u16 max_atomic_arg;
+ u8 sq_sig_all;
+ u8 qp_type;
+ u8 is_srq;
+ u8 reserved[3];
+};
+
+struct pvrdma_cmd_create_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
+struct pvrdma_cmd_modify_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u32 attr_mask;
+ struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_query_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u32 attr_mask;
+};
+
+struct pvrdma_cmd_query_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_destroy_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 events_reported;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_bind {
+ struct pvrdma_cmd_hdr hdr;
+ u32 mtu;
+ u32 vlan;
+ u32 index;
+ u8 new_gid[16];
+ u8 gid_type;
+ u8 reserved[3];
+};
+
+struct pvrdma_cmd_destroy_bind {
+ struct pvrdma_cmd_hdr hdr;
+ u32 index;
+ u8 dest_gid[16];
+ u8 reserved[4];
+};
+
+union pvrdma_cmd_req {
+ struct pvrdma_cmd_hdr hdr;
+ struct pvrdma_cmd_query_port query_port;
+ struct pvrdma_cmd_query_pkey query_pkey;
+ struct pvrdma_cmd_create_uc create_uc;
+ struct pvrdma_cmd_destroy_uc destroy_uc;
+ struct pvrdma_cmd_create_pd create_pd;
+ struct pvrdma_cmd_destroy_pd destroy_pd;
+ struct pvrdma_cmd_create_mr create_mr;
+ struct pvrdma_cmd_destroy_mr destroy_mr;
+ struct pvrdma_cmd_create_cq create_cq;
+ struct pvrdma_cmd_resize_cq resize_cq;
+ struct pvrdma_cmd_destroy_cq destroy_cq;
+ struct pvrdma_cmd_create_qp create_qp;
+ struct pvrdma_cmd_modify_qp modify_qp;
+ struct pvrdma_cmd_query_qp query_qp;
+ struct pvrdma_cmd_destroy_qp destroy_qp;
+ struct pvrdma_cmd_create_bind create_bind;
+ struct pvrdma_cmd_destroy_bind destroy_bind;
+};
+
+union pvrdma_cmd_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_cmd_query_port_resp query_port_resp;
+ struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
+ struct pvrdma_cmd_create_uc_resp create_uc_resp;
+ struct pvrdma_cmd_create_pd_resp create_pd_resp;
+ struct pvrdma_cmd_create_mr_resp create_mr_resp;
+ struct pvrdma_cmd_create_cq_resp create_cq_resp;
+ struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
+ struct pvrdma_cmd_create_qp_resp create_qp_resp;
+ struct pvrdma_cmd_query_qp_resp query_qp_resp;
+ struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
+};
+
+#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
new file mode 100644
index 000000000000..bf51357ea3aa
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev)
+{
+ u32 num = dev->dsr->caps.max_uar;
+ u32 mask = num - 1;
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+ if (!is_power_of_2(num))
+ return -EINVAL;
+
+ tbl->last = 0;
+ tbl->top = 0;
+ tbl->max = num;
+ tbl->mask = mask;
+ spin_lock_init(&tbl->lock);
+ tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
+ if (!tbl->table)
+ return -ENOMEM;
+
+ /* 0th UAR is taken by the device. */
+ set_bit(0, tbl->table);
+
+ return 0;
+}
+
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
+{
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+ kfree(tbl->table);
+}
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+ struct pvrdma_id_table *tbl;
+ unsigned long flags;
+ u32 obj;
+
+ tbl = &dev->uar_table.tbl;
+
+ spin_lock_irqsave(&tbl->lock, flags);
+ obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last);
+ if (obj >= tbl->max) {
+ tbl->top = (tbl->top + tbl->max) & tbl->mask;
+ obj = find_first_zero_bit(tbl->table, tbl->max);
+ }
+
+ if (obj >= tbl->max) {
+ spin_unlock_irqrestore(&tbl->lock, flags);
+ return -ENOMEM;
+ }
+
+ set_bit(obj, tbl->table);
+ obj |= tbl->top;
+
+ spin_unlock_irqrestore(&tbl->lock, flags);
+
+ uar->index = obj;
+ uar->pfn = (pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+ PAGE_SHIFT) + uar->index;
+
+ return 0;
+}
+
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+ unsigned long flags;
+ u32 obj;
+
+ obj = uar->index & (tbl->max - 1);
+ spin_lock_irqsave(&tbl->lock, flags);
+ clear_bit(obj, tbl->table);
+ tbl->last = min(tbl->last, obj);
+ tbl->top = (tbl->top + tbl->max) & tbl->mask;
+ spin_unlock_irqrestore(&tbl->lock, flags);
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
new file mode 100644
index 000000000000..231a1ce1f4be
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -0,0 +1,1211 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <net/addrconf.h>
+
+#include "pvrdma.h"
+
+#define DRV_NAME "vmw_pvrdma"
+#define DRV_VERSION "1.0.0.0-k"
+
+static DEFINE_MUTEX(pvrdma_device_list_lock);
+static LIST_HEAD(pvrdma_device_list);
+static struct workqueue_struct *event_wq;
+
+static int pvrdma_add_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context);
+static int pvrdma_del_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ void **context);
+
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
+}
+
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", PVRDMA_REV_ID);
+}
+
+static ssize_t show_board(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct device_attribute *pvrdma_class_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type,
+ &dev_attr_board_id
+};
+
+static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct pvrdma_dev *dev =
+ container_of(device, struct pvrdma_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%d\n",
+ (int) (dev->dsr->caps.fw_ver >> 32),
+ (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->dsr->caps.fw_ver & 0xffff);
+}
+
+static int pvrdma_init_device(struct pvrdma_dev *dev)
+{
+ /* Initialize some device related stuff */
+ spin_lock_init(&dev->cmd_lock);
+ sema_init(&dev->cmd_sema, 1);
+ atomic_set(&dev->num_qps, 0);
+ atomic_set(&dev->num_cqs, 0);
+ atomic_set(&dev->num_pds, 0);
+ atomic_set(&dev->num_ahs, 0);
+
+ return 0;
+}
+
+static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = pvrdma_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ return 0;
+}
+
+static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
+ u8 port_num)
+{
+ struct net_device *netdev;
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (port_num != 1)
+ return NULL;
+
+ rcu_read_lock();
+ netdev = dev->netdev;
+ if (netdev)
+ dev_hold(netdev);
+ rcu_read_unlock();
+
+ return netdev;
+}
+
+static int pvrdma_register_device(struct pvrdma_dev *dev)
+{
+ int ret = -1;
+ int i = 0;
+
+ strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
+ dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
+ dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
+ dev->flags = 0;
+ dev->ib_dev.owner = THIS_MODULE;
+ dev->ib_dev.num_comp_vectors = 1;
+ dev->ib_dev.dma_device = &dev->pdev->dev;
+ dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION;
+ dev->ib_dev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
+
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
+
+ dev->ib_dev.query_device = pvrdma_query_device;
+ dev->ib_dev.query_port = pvrdma_query_port;
+ dev->ib_dev.query_gid = pvrdma_query_gid;
+ dev->ib_dev.query_pkey = pvrdma_query_pkey;
+ dev->ib_dev.modify_port = pvrdma_modify_port;
+ dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext;
+ dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext;
+ dev->ib_dev.mmap = pvrdma_mmap;
+ dev->ib_dev.alloc_pd = pvrdma_alloc_pd;
+ dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd;
+ dev->ib_dev.create_ah = pvrdma_create_ah;
+ dev->ib_dev.destroy_ah = pvrdma_destroy_ah;
+ dev->ib_dev.create_qp = pvrdma_create_qp;
+ dev->ib_dev.modify_qp = pvrdma_modify_qp;
+ dev->ib_dev.query_qp = pvrdma_query_qp;
+ dev->ib_dev.destroy_qp = pvrdma_destroy_qp;
+ dev->ib_dev.post_send = pvrdma_post_send;
+ dev->ib_dev.post_recv = pvrdma_post_recv;
+ dev->ib_dev.create_cq = pvrdma_create_cq;
+ dev->ib_dev.modify_cq = pvrdma_modify_cq;
+ dev->ib_dev.resize_cq = pvrdma_resize_cq;
+ dev->ib_dev.destroy_cq = pvrdma_destroy_cq;
+ dev->ib_dev.poll_cq = pvrdma_poll_cq;
+ dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq;
+ dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr;
+ dev->ib_dev.reg_user_mr = pvrdma_reg_user_mr;
+ dev->ib_dev.dereg_mr = pvrdma_dereg_mr;
+ dev->ib_dev.alloc_mr = pvrdma_alloc_mr;
+ dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg;
+ dev->ib_dev.add_gid = pvrdma_add_gid;
+ dev->ib_dev.del_gid = pvrdma_del_gid;
+ dev->ib_dev.get_netdev = pvrdma_get_netdev;
+ dev->ib_dev.get_port_immutable = pvrdma_port_immutable;
+ dev->ib_dev.get_link_layer = pvrdma_port_link_layer;
+ dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str;
+
+ mutex_init(&dev->port_mutex);
+ spin_lock_init(&dev->desc_lock);
+
+ dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->cq_tbl)
+ return ret;
+ spin_lock_init(&dev->cq_tbl_lock);
+
+ dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->qp_tbl)
+ goto err_cq_free;
+ spin_lock_init(&dev->qp_tbl_lock);
+
+ ret = ib_register_device(&dev->ib_dev, NULL);
+ if (ret)
+ goto err_qp_free;
+
+ for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
+ ret = device_create_file(&dev->ib_dev.dev,
+ pvrdma_class_attributes[i]);
+ if (ret)
+ goto err_class;
+ }
+
+ dev->ib_active = true;
+
+ return 0;
+
+err_class:
+ ib_unregister_device(&dev->ib_dev);
+err_qp_free:
+ kfree(dev->qp_tbl);
+err_cq_free:
+ kfree(dev->cq_tbl);
+
+ return ret;
+}
+
+static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
+{
+ u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
+ struct pvrdma_dev *dev = dev_id;
+
+ dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
+
+ if (dev->intr.type != PVRDMA_INTR_TYPE_MSIX) {
+ /* Legacy intr */
+ icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
+ if (icr == 0)
+ return IRQ_NONE;
+ }
+
+ if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
+ complete(&dev->cmd_done);
+
+ return IRQ_HANDLED;
+}
+
+static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
+{
+ struct pvrdma_qp *qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
+ if (qp)
+ atomic_inc(&qp->refcnt);
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ if (qp && qp->ibqp.event_handler) {
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct ib_event e;
+
+ e.device = ibqp->device;
+ e.element.qp = ibqp;
+ e.event = type; /* 1:1 mapping for now. */
+ ibqp->event_handler(&e, ibqp->qp_context);
+ }
+ if (qp) {
+ atomic_dec(&qp->refcnt);
+ if (atomic_read(&qp->refcnt) == 0)
+ wake_up(&qp->wait);
+ }
+}
+
+static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
+{
+ struct pvrdma_cq *cq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
+ if (cq)
+ atomic_inc(&cq->refcnt);
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (cq && cq->ibcq.event_handler) {
+ struct ib_cq *ibcq = &cq->ibcq;
+ struct ib_event e;
+
+ e.device = ibcq->device;
+ e.element.cq = ibcq;
+ e.event = type; /* 1:1 mapping for now. */
+ ibcq->event_handler(&e, ibcq->cq_context);
+ }
+ if (cq) {
+ atomic_dec(&cq->refcnt);
+ if (atomic_read(&cq->refcnt) == 0)
+ wake_up(&cq->wait);
+ }
+}
+
+static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
+ enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+ ib_event.device = &dev->ib_dev;
+ ib_event.element.port_num = port;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+}
+
+static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
+{
+ if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
+ dev_warn(&dev->pdev->dev, "event on port %d\n", port);
+ return;
+ }
+
+ pvrdma_dispatch_event(dev, port, type);
+}
+
+static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
+{
+ return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
+ &dev->async_pdir,
+ PAGE_SIZE +
+ sizeof(struct pvrdma_eqe) * i);
+}
+
+static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
+{
+ struct pvrdma_dev *dev = dev_id;
+ struct pvrdma_ring *ring = &dev->async_ring_state->rx;
+ int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
+ PAGE_SIZE / sizeof(struct pvrdma_eqe);
+ unsigned int head;
+
+ dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
+
+ /*
+ * Don't process events until the IB device is registered. Otherwise
+ * we'll try to ib_dispatch_event() on an invalid device.
+ */
+ if (!dev->ib_active)
+ return IRQ_HANDLED;
+
+ while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+ struct pvrdma_eqe *eqe;
+
+ eqe = get_eqe(dev, head);
+
+ switch (eqe->type) {
+ case PVRDMA_EVENT_QP_FATAL:
+ case PVRDMA_EVENT_QP_REQ_ERR:
+ case PVRDMA_EVENT_QP_ACCESS_ERR:
+ case PVRDMA_EVENT_COMM_EST:
+ case PVRDMA_EVENT_SQ_DRAINED:
+ case PVRDMA_EVENT_PATH_MIG:
+ case PVRDMA_EVENT_PATH_MIG_ERR:
+ case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
+ pvrdma_qp_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_CQ_ERR:
+ pvrdma_cq_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_SRQ_ERR:
+ case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
+ break;
+
+ case PVRDMA_EVENT_PORT_ACTIVE:
+ case PVRDMA_EVENT_PORT_ERR:
+ case PVRDMA_EVENT_LID_CHANGE:
+ case PVRDMA_EVENT_PKEY_CHANGE:
+ case PVRDMA_EVENT_SM_CHANGE:
+ case PVRDMA_EVENT_CLIENT_REREGISTER:
+ case PVRDMA_EVENT_GID_CHANGE:
+ pvrdma_dev_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_DEVICE_FATAL:
+ pvrdma_dev_event(dev, 1, eqe->type);
+ break;
+
+ default:
+ break;
+ }
+
+ pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
+ unsigned int i)
+{
+ return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
+ &dev->cq_pdir,
+ PAGE_SIZE +
+ sizeof(struct pvrdma_cqne) * i);
+}
+
+static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
+{
+ struct pvrdma_dev *dev = dev_id;
+ struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
+ int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
+ sizeof(struct pvrdma_cqne);
+ unsigned int head;
+ unsigned long flags;
+
+ dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
+
+ while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+ struct pvrdma_cqne *cqne;
+ struct pvrdma_cq *cq;
+
+ cqne = get_cqne(dev, head);
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
+ if (cq)
+ atomic_inc(&cq->refcnt);
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (cq && cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ if (cq) {
+ atomic_dec(&cq->refcnt);
+ if (atomic_read(&cq->refcnt))
+ wake_up(&cq->wait);
+ }
+ pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pvrdma_disable_msi_all(struct pvrdma_dev *dev)
+{
+ if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX)
+ pci_disable_msix(dev->pdev);
+ else if (dev->intr.type == PVRDMA_INTR_TYPE_MSI)
+ pci_disable_msi(dev->pdev);
+}
+
+static void pvrdma_free_irq(struct pvrdma_dev *dev)
+{
+ int i;
+
+ dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
+
+ if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX) {
+ for (i = 0; i < dev->intr.size; i++) {
+ if (dev->intr.enabled[i]) {
+ free_irq(dev->intr.msix_entry[i].vector, dev);
+ dev->intr.enabled[i] = 0;
+ }
+ }
+ } else if (dev->intr.type == PVRDMA_INTR_TYPE_INTX ||
+ dev->intr.type == PVRDMA_INTR_TYPE_MSI) {
+ free_irq(dev->pdev->irq, dev);
+ }
+}
+
+static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
+{
+ dev_dbg(&dev->pdev->dev, "enable interrupts\n");
+ pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
+}
+
+static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
+{
+ dev_dbg(&dev->pdev->dev, "disable interrupts\n");
+ pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
+}
+
+static int pvrdma_enable_msix(struct pci_dev *pdev, struct pvrdma_dev *dev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < PVRDMA_MAX_INTERRUPTS; i++) {
+ dev->intr.msix_entry[i].entry = i;
+ dev->intr.msix_entry[i].vector = i;
+
+ switch (i) {
+ case 0:
+ /* CMD ring handler */
+ dev->intr.handler[i] = pvrdma_intr0_handler;
+ break;
+ case 1:
+ /* Async event ring handler */
+ dev->intr.handler[i] = pvrdma_intr1_handler;
+ break;
+ default:
+ /* Completion queue handler */
+ dev->intr.handler[i] = pvrdma_intrx_handler;
+ break;
+ }
+ }
+
+ ret = pci_enable_msix(pdev, dev->intr.msix_entry,
+ PVRDMA_MAX_INTERRUPTS);
+ if (!ret) {
+ dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
+ dev->intr.size = PVRDMA_MAX_INTERRUPTS;
+ } else if (ret > 0) {
+ ret = pci_enable_msix(pdev, dev->intr.msix_entry, ret);
+ if (!ret) {
+ dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
+ dev->intr.size = ret;
+ } else {
+ dev->intr.size = 0;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "using interrupt type %d, size %d\n",
+ dev->intr.type, dev->intr.size);
+
+ return ret;
+}
+
+static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
+{
+ int ret = 0;
+ int i;
+
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX) &&
+ pvrdma_enable_msix(dev->pdev, dev)) {
+ /* Try MSI */
+ ret = pci_enable_msi(dev->pdev);
+ if (!ret) {
+ dev->intr.type = PVRDMA_INTR_TYPE_MSI;
+ } else {
+ /* Legacy INTR */
+ dev->intr.type = PVRDMA_INTR_TYPE_INTX;
+ }
+ }
+
+ /* Request First IRQ */
+ switch (dev->intr.type) {
+ case PVRDMA_INTR_TYPE_INTX:
+ case PVRDMA_INTR_TYPE_MSI:
+ ret = request_irq(dev->pdev->irq, pvrdma_intr0_handler,
+ IRQF_SHARED, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt\n");
+ goto disable_msi;
+ }
+ break;
+ case PVRDMA_INTR_TYPE_MSIX:
+ ret = request_irq(dev->intr.msix_entry[0].vector,
+ pvrdma_intr0_handler, 0, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt 0\n");
+ goto disable_msi;
+ }
+ dev->intr.enabled[0] = 1;
+ break;
+ default:
+ /* Not reached */
+ break;
+ }
+
+ /* For MSIX: request intr for each vector */
+ if (dev->intr.size > 1) {
+ ret = request_irq(dev->intr.msix_entry[1].vector,
+ pvrdma_intr1_handler, 0, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt 1\n");
+ goto free_irq;
+ }
+ dev->intr.enabled[1] = 1;
+
+ for (i = 2; i < dev->intr.size; i++) {
+ ret = request_irq(dev->intr.msix_entry[i].vector,
+ pvrdma_intrx_handler, 0,
+ DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt %d\n", i);
+ goto free_irq;
+ }
+ dev->intr.enabled[i] = 1;
+ }
+ }
+
+ return 0;
+
+free_irq:
+ pvrdma_free_irq(dev);
+disable_msi:
+ pvrdma_disable_msi_all(dev);
+ return ret;
+}
+
+static void pvrdma_free_slots(struct pvrdma_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (dev->resp_slot)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
+ dev->dsr->resp_slot_dma);
+ if (dev->cmd_slot)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
+ dev->dsr->cmd_slot_dma);
+}
+
+static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
+ const union ib_gid *gid,
+ int index)
+{
+ int ret;
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
+
+ if (!dev->sgid_tbl) {
+ dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(cmd_bind, 0, sizeof(*cmd_bind));
+ cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
+ memcpy(cmd_bind->new_gid, gid->raw, 16);
+ cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
+ cmd_bind->vlan = 0xfff;
+ cmd_bind->index = index;
+ cmd_bind->gid_type = PVRDMA_GID_TYPE_FLAG_ROCE_V1;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create binding, error: %d\n", ret);
+ return -EFAULT;
+ }
+ memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
+ return 0;
+}
+
+static int pvrdma_add_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ return pvrdma_add_gid_at_index(dev, gid, index);
+}
+
+static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
+{
+ int ret;
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
+
+ /* Update sgid table. */
+ if (!dev->sgid_tbl) {
+ dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(cmd_dest, 0, sizeof(*cmd_dest));
+ cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
+ memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
+ cmd_dest->index = index;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not destroy binding, error: %d\n", ret);
+ return ret;
+ }
+ memset(&dev->sgid_tbl[index], 0, 16);
+ return 0;
+}
+
+static int pvrdma_del_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ void **context)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
+ index, dev->netdev->name);
+
+ return pvrdma_del_gid_at_index(dev, index);
+}
+
+static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_REBOOT:
+ case NETDEV_DOWN:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_UP:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+ default:
+ dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
+ event, dev->ib_dev.name);
+ break;
+ }
+}
+
+static void pvrdma_netdevice_event_work(struct work_struct *work)
+{
+ struct pvrdma_netdevice_work *netdev_work;
+ struct pvrdma_dev *dev;
+
+ netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_for_each_entry(dev, &pvrdma_device_list, device_link) {
+ if (dev->netdev == netdev_work->event_netdev) {
+ pvrdma_netdevice_event_handle(dev, netdev_work->event);
+ break;
+ }
+ }
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ kfree(netdev_work);
+}
+
+static int pvrdma_netdevice_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
+ struct pvrdma_netdevice_work *netdev_work;
+
+ netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
+ if (!netdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
+ netdev_work->event_netdev = event_netdev;
+ netdev_work->event = event;
+ queue_work(event_wq, &netdev_work->work);
+
+ return NOTIFY_DONE;
+}
+
+static int pvrdma_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pci_dev *pdev_net;
+ struct pvrdma_dev *dev;
+ int ret;
+ unsigned long start;
+ unsigned long len;
+ unsigned int version;
+ dma_addr_t slot_dma = 0;
+
+ dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
+
+ /* Allocate zero-out device */
+ dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev) {
+ dev_err(&pdev->dev, "failed to allocate IB device\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_add(&dev->device_link, &pvrdma_device_list);
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ ret = pvrdma_init_device(dev);
+ if (ret)
+ goto err_free_device;
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_free_device;
+ }
+
+ dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
+ pci_resource_flags(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+ (unsigned long long)pci_resource_start(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
+ pci_resource_flags(pdev, 1));
+ dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+ (unsigned long long)pci_resource_len(pdev, 1));
+ dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+ (unsigned long long)pci_resource_start(pdev, 1));
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
+ ret = -ENOMEM;
+ goto err_free_device;
+ }
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot request PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ /* Enable 64-Bit DMA */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed\n");
+ goto err_free_resource;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_dma_mask failed\n");
+ goto err_free_resource;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* Map register space */
+ start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+ len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+ dev->regs = ioremap(start, len);
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "register mapping failed\n");
+ ret = -ENOMEM;
+ goto err_free_resource;
+ }
+
+ /* Setup per-device UAR. */
+ dev->driver_uar.index = 0;
+ dev->driver_uar.pfn =
+ pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+ PAGE_SHIFT;
+ dev->driver_uar.map =
+ ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!dev->driver_uar.map) {
+ dev_err(&pdev->dev, "failed to remap UAR pages\n");
+ ret = -ENOMEM;
+ goto err_unmap_regs;
+ }
+
+ version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
+ dev_info(&pdev->dev, "device version %d, driver version %d\n",
+ version, PVRDMA_VERSION);
+ if (version < PVRDMA_VERSION) {
+ dev_err(&pdev->dev, "incompatible device version\n");
+ goto err_uar_unmap;
+ }
+
+ dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
+ if (!dev->dsr) {
+ dev_err(&pdev->dev, "failed to allocate shared region\n");
+ ret = -ENOMEM;
+ goto err_uar_unmap;
+ }
+
+ /* Setup the shared region */
+ memset(dev->dsr, 0, sizeof(*dev->dsr));
+ dev->dsr->driver_version = PVRDMA_VERSION;
+ dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
+ PVRDMA_GOS_BITS_32 :
+ PVRDMA_GOS_BITS_64;
+ dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
+ dev->dsr->gos_info.gos_ver = 1;
+ dev->dsr->uar_pfn = dev->driver_uar.pfn;
+
+ /* Command slot. */
+ dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &slot_dma, GFP_KERNEL);
+ if (!dev->cmd_slot) {
+ ret = -ENOMEM;
+ goto err_free_dsr;
+ }
+
+ dev->dsr->cmd_slot_dma = (u64)slot_dma;
+
+ /* Response slot. */
+ dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &slot_dma, GFP_KERNEL);
+ if (!dev->resp_slot) {
+ ret = -ENOMEM;
+ goto err_free_slots;
+ }
+
+ dev->dsr->resp_slot_dma = (u64)slot_dma;
+
+ /* Async event ring */
+ dev->dsr->async_ring_pages.num_pages = 4;
+ ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
+ dev->dsr->async_ring_pages.num_pages, true);
+ if (ret)
+ goto err_free_slots;
+ dev->async_ring_state = dev->async_pdir.pages[0];
+ dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
+
+ /* CQ notification ring */
+ dev->dsr->cq_ring_pages.num_pages = 4;
+ ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
+ dev->dsr->cq_ring_pages.num_pages, true);
+ if (ret)
+ goto err_free_async_ring;
+ dev->cq_ring_state = dev->cq_pdir.pages[0];
+ dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
+
+ /*
+ * Write the PA of the shared region to the device. The writes must be
+ * ordered such that the high bits are written last. When the writes
+ * complete, the device will have filled out the capabilities.
+ */
+
+ pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
+ pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
+ (u32)((u64)(dev->dsrbase) >> 32));
+
+ /* Make sure the write is complete before reading status. */
+ mb();
+
+ /* Currently, the driver only supports RoCE mode. */
+ if (dev->dsr->caps.mode != PVRDMA_DEVICE_MODE_ROCE) {
+ dev_err(&pdev->dev, "unsupported transport %d\n",
+ dev->dsr->caps.mode);
+ ret = -EFAULT;
+ goto err_free_cq_ring;
+ }
+
+ /* Currently, the driver only supports RoCE V1. */
+ if (!(dev->dsr->caps.gid_types & PVRDMA_GID_TYPE_FLAG_ROCE_V1)) {
+ dev_err(&pdev->dev, "driver needs RoCE v1 support\n");
+ ret = -EFAULT;
+ goto err_free_cq_ring;
+ }
+
+ /* Paired vmxnet3 will have same bus, slot. But func will be 0 */
+ pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+ if (!pdev_net) {
+ dev_err(&pdev->dev, "failed to find paired net device\n");
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
+ pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
+ dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
+ pci_dev_put(pdev_net);
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ dev->netdev = pci_get_drvdata(pdev_net);
+ pci_dev_put(pdev_net);
+ if (!dev->netdev) {
+ dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
+
+ /* Interrupt setup */
+ ret = pvrdma_alloc_intrs(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to allocate interrupts\n");
+ ret = -ENOMEM;
+ goto err_netdevice;
+ }
+
+ /* Allocate UAR table. */
+ ret = pvrdma_uar_table_init(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to allocate UAR table\n");
+ ret = -ENOMEM;
+ goto err_free_intrs;
+ }
+
+ /* Allocate GID table */
+ dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
+ sizeof(union ib_gid), GFP_KERNEL);
+ if (!dev->sgid_tbl) {
+ ret = -ENOMEM;
+ goto err_free_uar_table;
+ }
+ dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
+
+ pvrdma_enable_intrs(dev);
+
+ /* Activate pvrdma device */
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
+
+ /* Make sure the write is complete before reading status. */
+ mb();
+
+ /* Check if device was successfully activated */
+ ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to activate device\n");
+ ret = -EFAULT;
+ goto err_disable_intr;
+ }
+
+ /* Register IB device */
+ ret = pvrdma_register_device(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IB device\n");
+ goto err_disable_intr;
+ }
+
+ dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
+ ret = register_netdevice_notifier(&dev->nb_netdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register netdevice events\n");
+ goto err_unreg_ibdev;
+ }
+
+ dev_info(&pdev->dev, "attached to device\n");
+ return 0;
+
+err_unreg_ibdev:
+ ib_unregister_device(&dev->ib_dev);
+err_disable_intr:
+ pvrdma_disable_intrs(dev);
+ kfree(dev->sgid_tbl);
+err_free_uar_table:
+ pvrdma_uar_table_cleanup(dev);
+err_free_intrs:
+ pvrdma_free_irq(dev);
+ pvrdma_disable_msi_all(dev);
+err_netdevice:
+ unregister_netdevice_notifier(&dev->nb_netdev);
+err_free_cq_ring:
+ pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+err_free_async_ring:
+ pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+err_free_slots:
+ pvrdma_free_slots(dev);
+err_free_dsr:
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
+err_uar_unmap:
+ iounmap(dev->driver_uar.map);
+err_unmap_regs:
+ iounmap(dev->regs);
+err_free_resource:
+ pci_release_regions(pdev);
+err_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+err_free_device:
+ mutex_lock(&pvrdma_device_list_lock);
+ list_del(&dev->device_link);
+ mutex_unlock(&pvrdma_device_list_lock);
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
+}
+
+static void pvrdma_pci_remove(struct pci_dev *pdev)
+{
+ struct pvrdma_dev *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ return;
+
+ dev_info(&pdev->dev, "detaching from device\n");
+
+ unregister_netdevice_notifier(&dev->nb_netdev);
+ dev->nb_netdev.notifier_call = NULL;
+
+ flush_workqueue(event_wq);
+
+ /* Unregister ib device */
+ ib_unregister_device(&dev->ib_dev);
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_del(&dev->device_link);
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ pvrdma_disable_intrs(dev);
+ pvrdma_free_irq(dev);
+ pvrdma_disable_msi_all(dev);
+
+ /* Deactivate pvrdma device */
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
+ pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+ pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+ pvrdma_free_slots(dev);
+
+ iounmap(dev->regs);
+ kfree(dev->sgid_tbl);
+ kfree(dev->cq_tbl);
+ kfree(dev->qp_tbl);
+ pvrdma_uar_table_cleanup(dev);
+ iounmap(dev->driver_uar.map);
+
+ ib_dealloc_device(&dev->ib_dev);
+
+ /* Free pci resources */
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_device_id pvrdma_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
+
+static struct pci_driver pvrdma_driver = {
+ .name = DRV_NAME,
+ .id_table = pvrdma_pci_table,
+ .probe = pvrdma_pci_probe,
+ .remove = pvrdma_pci_remove,
+};
+
+static int __init pvrdma_init(void)
+{
+ int err;
+
+ event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
+ if (!event_wq)
+ return -ENOMEM;
+
+ err = pci_register_driver(&pvrdma_driver);
+ if (err)
+ destroy_workqueue(event_wq);
+
+ return err;
+}
+
+static void __exit pvrdma_cleanup(void)
+{
+ pci_unregister_driver(&pvrdma_driver);
+
+ destroy_workqueue(event_wq);
+}
+
+module_init(pvrdma_init);
+module_exit(pvrdma_cleanup);
+
+MODULE_AUTHOR("VMware, Inc");
+MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
new file mode 100644
index 000000000000..948b5ccd2a70
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+
+#include "pvrdma.h"
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+ u64 npages, bool alloc_pages)
+{
+ u64 i;
+
+ if (npages > PVRDMA_PAGE_DIR_MAX_PAGES)
+ return -EINVAL;
+
+ memset(pdir, 0, sizeof(*pdir));
+
+ pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &pdir->dir_dma, GFP_KERNEL);
+ if (!pdir->dir)
+ goto err;
+
+ pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1;
+ pdir->tables = kcalloc(pdir->ntables, sizeof(*pdir->tables),
+ GFP_KERNEL);
+ if (!pdir->tables)
+ goto err;
+
+ for (i = 0; i < pdir->ntables; i++) {
+ pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ (dma_addr_t *)&pdir->dir[i],
+ GFP_KERNEL);
+ if (!pdir->tables[i])
+ goto err;
+ }
+
+ pdir->npages = npages;
+
+ if (alloc_pages) {
+ pdir->pages = kcalloc(npages, sizeof(*pdir->pages),
+ GFP_KERNEL);
+ if (!pdir->pages)
+ goto err;
+
+ for (i = 0; i < pdir->npages; i++) {
+ dma_addr_t page_dma;
+
+ pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev,
+ PAGE_SIZE,
+ &page_dma,
+ GFP_KERNEL);
+ if (!pdir->pages[i])
+ goto err;
+
+ pvrdma_page_dir_insert_dma(pdir, i, page_dma);
+ }
+ }
+
+ return 0;
+
+err:
+ pvrdma_page_dir_cleanup(dev, pdir);
+
+ return -ENOMEM;
+}
+
+static u64 *pvrdma_page_dir_table(struct pvrdma_page_dir *pdir, u64 idx)
+{
+ return pdir->tables[PVRDMA_PAGE_DIR_TABLE(idx)];
+}
+
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx)
+{
+ return pvrdma_page_dir_table(pdir, idx)[PVRDMA_PAGE_DIR_PAGE(idx)];
+}
+
+static void pvrdma_page_dir_cleanup_pages(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->pages) {
+ u64 i;
+
+ for (i = 0; i < pdir->npages && pdir->pages[i]; i++) {
+ dma_addr_t page_dma = pvrdma_page_dir_get_dma(pdir, i);
+
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ pdir->pages[i], page_dma);
+ }
+
+ kfree(pdir->pages);
+ }
+}
+
+static void pvrdma_page_dir_cleanup_tables(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->tables) {
+ int i;
+
+ pvrdma_page_dir_cleanup_pages(dev, pdir);
+
+ for (i = 0; i < pdir->ntables; i++) {
+ u64 *table = pdir->tables[i];
+
+ if (table)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ table, pdir->dir[i]);
+ }
+
+ kfree(pdir->tables);
+ }
+}
+
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->dir) {
+ pvrdma_page_dir_cleanup_tables(dev, pdir);
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ pdir->dir, pdir->dir_dma);
+ }
+}
+
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+ dma_addr_t daddr)
+{
+ u64 *table;
+
+ if (idx >= pdir->npages)
+ return -EINVAL;
+
+ table = pvrdma_page_dir_table(pdir, idx);
+ table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr;
+
+ return 0;
+}
+
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+ struct ib_umem *umem, u64 offset)
+{
+ u64 i = offset;
+ int j, entry;
+ int ret = 0, len = 0;
+ struct scatterlist *sg;
+
+ if (offset >= pdir->npages)
+ return -EINVAL;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (j = 0; j < len; j++) {
+ dma_addr_t addr = sg_dma_address(sg) +
+ umem->page_size * j;
+
+ ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
+ if (ret)
+ goto exit;
+
+ i++;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+ u64 *page_list,
+ int num_pages)
+{
+ int i;
+ int ret;
+
+ if (num_pages > pdir->npages)
+ return -EINVAL;
+
+ for (i = 0; i < num_pages; i++) {
+ ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src)
+{
+ dst->max_send_wr = src->max_send_wr;
+ dst->max_recv_wr = src->max_recv_wr;
+ dst->max_send_sge = src->max_send_sge;
+ dst->max_recv_sge = src->max_recv_sge;
+ dst->max_inline_data = src->max_inline_data;
+}
+
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, const struct ib_qp_cap *src)
+{
+ dst->max_send_wr = src->max_send_wr;
+ dst->max_recv_wr = src->max_recv_wr;
+ dst->max_send_sge = src->max_send_sge;
+ dst->max_recv_sge = src->max_recv_sge;
+ dst->max_inline_data = src->max_inline_data;
+}
+
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src)
+{
+ BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+ memcpy(dst, src, sizeof(*src));
+}
+
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src)
+{
+ BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+ memcpy(dst, src, sizeof(*src));
+}
+
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+ const struct pvrdma_global_route *src)
+{
+ pvrdma_gid_to_ib(&dst->dgid, &src->dgid);
+ dst->flow_label = src->flow_label;
+ dst->sgid_index = src->sgid_index;
+ dst->hop_limit = src->hop_limit;
+ dst->traffic_class = src->traffic_class;
+}
+
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+ const struct ib_global_route *src)
+{
+ ib_gid_to_pvrdma(&dst->dgid, &src->dgid);
+ dst->flow_label = src->flow_label;
+ dst->sgid_index = src->sgid_index;
+ dst->hop_limit = src->hop_limit;
+ dst->traffic_class = src->traffic_class;
+}
+
+void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst,
+ const struct pvrdma_ah_attr *src)
+{
+ pvrdma_global_route_to_ib(&dst->grh, &src->grh);
+ dst->dlid = src->dlid;
+ dst->sl = src->sl;
+ dst->src_path_bits = src->src_path_bits;
+ dst->static_rate = src->static_rate;
+ dst->ah_flags = src->ah_flags;
+ dst->port_num = src->port_num;
+ memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+}
+
+void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+ const struct ib_ah_attr *src)
+{
+ ib_global_route_to_pvrdma(&dst->grh, &src->grh);
+ dst->dlid = src->dlid;
+ dst->sl = src->sl;
+ dst->src_path_bits = src->src_path_bits;
+ dst->static_rate = src->static_rate;
+ dst->ah_flags = src->ah_flags;
+ dst->port_num = src->port_num;
+ memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
new file mode 100644
index 000000000000..8519f3212e52
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_get_dma_mr - get a DMA memory region
+ * @pd: protection domain
+ * @acc: access flags
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int ret;
+
+ /* Support only LOCAL_WRITE flag for DMA MRs */
+ if (acc & ~IB_ACCESS_LOCAL_WRITE) {
+ dev_warn(&dev->pdev->dev,
+ "unsupported dma mr access flags %#x\n", acc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = acc;
+ cmd->flags = PVRDMA_MR_FLAG_DMA;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not get DMA mem region, error: %d\n", ret);
+ kfree(mr);
+ return ERR_PTR(ret);
+ }
+
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+
+ return &mr->ibmr;
+}
+
+/**
+ * pvrdma_reg_user_mr - register a userspace memory region
+ * @pd: protection domain
+ * @start: starting address
+ * @length: length of region
+ * @virt_addr: I/O virtual address
+ * @access_flags: access flags for memory region
+ * @udata: user data
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr = NULL;
+ struct ib_umem *umem;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int nchunks;
+ int ret;
+ int entry;
+ struct scatterlist *sg;
+
+ if (length == 0 || length > dev->dsr->caps.max_mr_size) {
+ dev_warn(&dev->pdev->dev, "invalid mem region length\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ umem = ib_umem_get(pd->uobject->context, start,
+ length, access_flags, 0);
+ if (IS_ERR(umem)) {
+ dev_warn(&dev->pdev->dev,
+ "could not get umem for mem region\n");
+ return ERR_CAST(umem);
+ }
+
+ nchunks = 0;
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
+ nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
+
+ if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
+ nchunks);
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = -ENOMEM;
+ goto err_umem;
+ }
+
+ mr->mmr.iova = virt_addr;
+ mr->mmr.size = length;
+ mr->umem = umem;
+
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
+ if (ret)
+ goto err_pdir;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->start = start;
+ cmd->length = length;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = access_flags;
+ cmd->nchunks = nchunks;
+ cmd->pdir_dma = mr->pdir.dir_dma;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not register mem region, error: %d\n", ret);
+ goto err_pdir;
+ }
+
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+
+ return &mr->ibmr;
+
+err_pdir:
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+err_umem:
+ ib_umem_release(umem);
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_alloc_mr - allocate a memory region
+ * @pd: protection domain
+ * @mr_type: type of memory region
+ * @max_num_sg: maximum number of pages
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int size = max_num_sg * sizeof(u64);
+ int ret;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
+ max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->pages = kzalloc(size, GFP_KERNEL);
+ if (!mr->pages) {
+ ret = -ENOMEM;
+ goto freemr;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "failed to allocate page dir for mr\n");
+ ret = -ENOMEM;
+ goto freepages;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = 0;
+ cmd->flags = PVRDMA_MR_FLAG_FRMR;
+ cmd->nchunks = max_num_sg;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create FR mem region, error: %d\n", ret);
+ goto freepdir;
+ }
+
+ mr->max_pages = max_num_sg;
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+ mr->page_shift = PAGE_SHIFT;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+freepdir:
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+freepages:
+ kfree(mr->pages);
+freemr:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_dereg_mr - deregister a memory region
+ * @ibmr: memory region
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_dereg_mr(struct ib_mr *ibmr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+ struct pvrdma_dev *dev = to_vdev(ibmr->device);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
+ cmd->mr_handle = mr->mmr.mr_handle;
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "could not deregister mem region, error: %d\n", ret);
+
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr->pages);
+ kfree(mr);
+
+ return 0;
+}
+
+static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+
+ if (mr->npages == mr->max_pages)
+ return -ENOMEM;
+
+ mr->pages[mr->npages++] = addr;
+ return 0;
+}
+
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+ struct pvrdma_dev *dev = to_vdev(ibmr->device);
+ int ret;
+
+ mr->npages = 0;
+
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
new file mode 100644
index 000000000000..c8c01e558125
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
+ struct pvrdma_cq **recv_cq)
+{
+ *send_cq = to_vcq(qp->ibqp.send_cq);
+ *recv_cq = to_vcq(qp->ibqp.recv_cq);
+}
+
+static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+ unsigned long *scq_flags,
+ unsigned long *rcq_flags)
+ __acquires(scq->cq_lock) __acquires(rcq->cq_lock)
+{
+ if (scq == rcq) {
+ spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+ __acquire(rcq->cq_lock);
+ } else if (scq->cq_handle < rcq->cq_handle) {
+ spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+ spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
+ SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
+ spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
+ SINGLE_DEPTH_NESTING);
+ }
+}
+
+static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+ unsigned long *scq_flags,
+ unsigned long *rcq_flags)
+ __releases(scq->cq_lock) __releases(rcq->cq_lock)
+{
+ if (scq == rcq) {
+ __release(rcq->cq_lock);
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ } else if (scq->cq_handle < rcq->cq_handle) {
+ spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ } else {
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+ }
+}
+
+static void pvrdma_reset_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq, *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* Clean up cqes */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ /*
+ * Reset queuepair. The checks are because usermode queuepairs won't
+ * have kernel ringstates.
+ */
+ if (qp->rq.ring) {
+ atomic_set(&qp->rq.ring->cons_head, 0);
+ atomic_set(&qp->rq.ring->prod_tail, 0);
+ }
+ if (qp->sq.ring) {
+ atomic_set(&qp->sq.ring->cons_head, 0);
+ atomic_set(&qp->sq.ring->prod_tail, 0);
+ }
+}
+
+static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
+ struct ib_qp_cap *req_cap,
+ struct pvrdma_qp *qp)
+{
+ if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
+ req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
+ dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
+ return -EINVAL;
+ }
+
+ qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
+ qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
+
+ /* Write back */
+ req_cap->max_recv_wr = qp->rq.wqe_cnt;
+ req_cap->max_recv_sge = qp->rq.max_sg;
+
+ qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) *
+ qp->rq.max_sg);
+ qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
+
+ return 0;
+}
+
+static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
+ enum ib_qp_type type, struct pvrdma_qp *qp)
+{
+ if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
+ req_cap->max_send_sge > dev->dsr->caps.max_sge) {
+ dev_warn(&dev->pdev->dev, "send queue size invalid\n");
+ return -EINVAL;
+ }
+
+ qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
+ qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
+
+ /* Write back */
+ req_cap->max_send_wr = qp->sq.wqe_cnt;
+ req_cap->max_send_sge = qp->sq.max_sg;
+
+ qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) *
+ qp->sq.max_sg);
+ /* Note: one extra page for the header. */
+ qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size +
+ PAGE_SIZE - 1) / PAGE_SIZE;
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_qp - create queue pair
+ * @pd: protection domain
+ * @init_attr: queue pair attributes
+ * @udata: user data
+ *
+ * @return: the ib_qp pointer on success, otherwise returns an errno.
+ */
+struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_qp *qp = NULL;
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
+ struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+ struct pvrdma_create_qp ucmd;
+ unsigned long flags;
+ int ret;
+
+ if (init_attr->create_flags) {
+ dev_warn(&dev->pdev->dev,
+ "invalid create queuepair flags %#x\n",
+ init_attr->create_flags);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_attr->qp_type != IB_QPT_RC &&
+ init_attr->qp_type != IB_QPT_UD &&
+ init_attr->qp_type != IB_QPT_GSI) {
+ dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
+ init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
+ return ERR_PTR(-ENOMEM);
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > pd->device->phys_port_cnt ||
+ udata) {
+ dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
+ ret = -EINVAL;
+ goto err_qp;
+ }
+ /* fall through */
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ ret = -ENOMEM;
+ goto err_qp;
+ }
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+ mutex_init(&qp->mutex);
+ atomic_set(&qp->refcnt, 1);
+ init_waitqueue_head(&qp->wait);
+
+ qp->state = IB_QPS_RESET;
+
+ if (pd->uobject && udata) {
+ dev_dbg(&dev->pdev->dev,
+ "create queuepair from user space\n");
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_qp;
+ }
+
+ /* set qp->sq.wqe_cnt, shift, buf_size.. */
+ qp->rumem = ib_umem_get(pd->uobject->context,
+ ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0, 0);
+ if (IS_ERR(qp->rumem)) {
+ ret = PTR_ERR(qp->rumem);
+ goto err_qp;
+ }
+
+ qp->sumem = ib_umem_get(pd->uobject->context,
+ ucmd.sbuf_addr,
+ ucmd.sbuf_size, 0, 0);
+ if (IS_ERR(qp->sumem)) {
+ ib_umem_release(qp->rumem);
+ ret = PTR_ERR(qp->sumem);
+ goto err_qp;
+ }
+
+ qp->npages_send = ib_umem_page_count(qp->sumem);
+ qp->npages_recv = ib_umem_page_count(qp->rumem);
+ qp->npages = qp->npages_send + qp->npages_recv;
+ } else {
+ qp->is_kernel = true;
+
+ ret = pvrdma_set_sq_size(to_vdev(pd->device),
+ &init_attr->cap,
+ init_attr->qp_type, qp);
+ if (ret)
+ goto err_qp;
+
+ ret = pvrdma_set_rq_size(to_vdev(pd->device),
+ &init_attr->cap, qp);
+ if (ret)
+ goto err_qp;
+
+ qp->npages = qp->npages_send + qp->npages_recv;
+
+ /* Skip header page. */
+ qp->sq.offset = PAGE_SIZE;
+
+ /* Recv queue pages are after send pages. */
+ qp->rq.offset = qp->npages_send * PAGE_SIZE;
+ }
+
+ if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in queuepair\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
+ qp->is_kernel);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ if (!qp->is_kernel) {
+ pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
+ pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem,
+ qp->npages_send);
+ } else {
+ /* Ring state is always the first page. */
+ qp->sq.ring = qp->pdir.pages[0];
+ qp->rq.ring = &qp->sq.ring[1];
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_qp;
+ }
+
+ /* Not supported */
+ init_attr->cap.max_inline_data = 0;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
+ cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
+ cmd->max_send_wr = init_attr->cap.max_send_wr;
+ cmd->max_recv_wr = init_attr->cap.max_recv_wr;
+ cmd->max_send_sge = init_attr->cap.max_send_sge;
+ cmd->max_recv_sge = init_attr->cap.max_recv_sge;
+ cmd->max_inline_data = init_attr->cap.max_inline_data;
+ cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
+ cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
+ cmd->total_chunks = qp->npages;
+ cmd->send_chunks = qp->npages_send - 1;
+ cmd->pdir_dma = qp->pdir.dir_dma;
+
+ dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
+ cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
+ cmd->max_recv_sge);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create queuepair, error: %d\n", ret);
+ goto err_pdir;
+ }
+
+ /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
+ qp->qp_handle = resp->qpn;
+ qp->port = init_attr->port_num;
+ qp->ibqp.qp_num = resp->qpn;
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ return &qp->ibqp;
+
+err_pdir:
+ pvrdma_page_dir_cleanup(dev, &qp->pdir);
+err_umem:
+ if (pd->uobject && udata) {
+ if (qp->rumem)
+ ib_umem_release(qp->rumem);
+ if (qp->sumem)
+ ib_umem_release(qp->sumem);
+ }
+err_qp:
+ kfree(qp);
+ atomic_dec(&dev->num_qps);
+
+ return ERR_PTR(ret);
+}
+
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
+ struct pvrdma_cq *scq;
+ struct pvrdma_cq *rcq;
+ unsigned long flags, scq_flags, rcq_flags;
+
+ /* In case cq is polling */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ dev->qp_tbl[qp->qp_handle] = NULL;
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ atomic_dec(&qp->refcnt);
+ wait_event(qp->wait, !atomic_read(&qp->refcnt));
+
+ pvrdma_page_dir_cleanup(dev, &qp->pdir);
+
+ kfree(qp);
+
+ atomic_dec(&dev->num_qps);
+}
+
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp)
+{
+ struct pvrdma_qp *vqp = to_vqp(qp);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
+ cmd->qp_handle = vqp->qp_handle;
+
+ ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&to_vdev(qp->device)->pdev->dev,
+ "destroy queuepair failed, error: %d\n", ret);
+
+ pvrdma_free_qp(vqp);
+
+ return 0;
+}
+
+/**
+ * pvrdma_modify_qp - modify queue pair attributes
+ * @ibqp: the queue pair
+ * @attr: the new queue pair's attributes
+ * @attr_mask: attributes mask
+ * @udata: user data
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
+ int cur_state, next_state;
+ int ret;
+
+ /* Sanity checking. Should need lock here */
+ mutex_lock(&qp->mutex);
+ cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
+ qp->state;
+ next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
+ attr_mask, IB_LINK_LAYER_ETHERNET)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_PORT) {
+ if (attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ if (attr->min_rnr_timer > 31) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (cur_state == next_state && cur_state == IB_QPS_RESET) {
+ ret = 0;
+ goto out;
+ }
+
+ qp->state = next_state;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
+ cmd->qp_handle = qp->qp_handle;
+ cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+ cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
+ cmd->attrs.cur_qp_state =
+ ib_qp_state_to_pvrdma(attr->cur_qp_state);
+ cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
+ cmd->attrs.path_mig_state =
+ ib_mig_state_to_pvrdma(attr->path_mig_state);
+ cmd->attrs.qkey = attr->qkey;
+ cmd->attrs.rq_psn = attr->rq_psn;
+ cmd->attrs.sq_psn = attr->sq_psn;
+ cmd->attrs.dest_qp_num = attr->dest_qp_num;
+ cmd->attrs.qp_access_flags =
+ ib_access_flags_to_pvrdma(attr->qp_access_flags);
+ cmd->attrs.pkey_index = attr->pkey_index;
+ cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
+ cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
+ cmd->attrs.sq_draining = attr->sq_draining;
+ cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
+ cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
+ cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
+ cmd->attrs.port_num = attr->port_num;
+ cmd->attrs.timeout = attr->timeout;
+ cmd->attrs.retry_cnt = attr->retry_cnt;
+ cmd->attrs.rnr_retry = attr->rnr_retry;
+ cmd->attrs.alt_port_num = attr->alt_port_num;
+ cmd->attrs.alt_timeout = attr->alt_timeout;
+ ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
+ ib_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
+ ib_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not modify queuepair, error: %d\n", ret);
+ } else if (rsp.hdr.err > 0) {
+ dev_warn(&dev->pdev->dev,
+ "cannot modify queuepair, error: %d\n", rsp.hdr.err);
+ ret = -EINVAL;
+ }
+
+ if (ret == 0 && next_state == IB_QPS_RESET)
+ pvrdma_reset_qp(qp);
+
+out:
+ mutex_unlock(&qp->mutex);
+
+ return ret;
+}
+
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
+{
+ return pvrdma_page_dir_get_ptr(&qp->pdir,
+ qp->sq.offset + n * qp->sq.wqe_size);
+}
+
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
+{
+ return pvrdma_page_dir_get_ptr(&qp->pdir,
+ qp->rq.offset + n * qp->rq.wqe_size);
+}
+
+static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(wr->mr);
+
+ wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
+ wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
+ wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
+ wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
+ wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
+ wqe_hdr->wr.fast_reg.access_flags = wr->access;
+ wqe_hdr->wr.fast_reg.rkey = wr->key;
+
+ return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
+ mr->npages);
+}
+
+/**
+ * pvrdma_post_send - post send work request entries on a QP
+ * @ibqp: the QP
+ * @wr: work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ unsigned long flags;
+ struct pvrdma_sq_wqe_hdr *wqe_hdr;
+ struct pvrdma_sge *sge;
+ int i, index;
+ int nreq;
+ int ret;
+
+ /*
+ * In states lower than RTS, we can fail immediately. In other states,
+ * just post and let the device figure it out.
+ */
+ if (qp->state < IB_QPS_RTS) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ unsigned int tail;
+
+ if (unlikely(!pvrdma_idx_ring_has_space(
+ qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "send queue is full\n");
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "send SGE overflow\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(wr->opcode < 0)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid send opcode\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Only support UD, RC.
+ * Need to check opcode table for thorough checking.
+ * opcode _UD _UC _RC
+ * _SEND x x x
+ * _SEND_WITH_IMM x x x
+ * _RDMA_WRITE x x
+ * _RDMA_WRITE_WITH_IMM x x
+ * _LOCAL_INV x x
+ * _SEND_WITH_INV x x
+ * _RDMA_READ x
+ * _ATOMIC_CMP_AND_SWP x
+ * _ATOMIC_FETCH_AND_ADD x
+ * _MASK_ATOMIC_CMP_AND_SWP x
+ * _MASK_ATOMIC_FETCH_AND_ADD x
+ * _REG_MR x
+ *
+ */
+ if (qp->ibqp.qp_type != IB_QPT_UD &&
+ qp->ibqp.qp_type != IB_QPT_RC &&
+ wr->opcode != IB_WR_SEND) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "unsupported queuepair type\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ } else if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI) {
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid send opcode\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
+ memset(wqe_hdr, 0, sizeof(*wqe_hdr));
+ wqe_hdr->wr_id = wr->wr_id;
+ wqe_hdr->num_sge = wr->num_sge;
+ wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
+ wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
+ if (wr->opcode == IB_WR_SEND_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wqe_hdr->ex.imm_data = wr->ex.imm_data;
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ if (unlikely(!ud_wr(wr)->ah)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid address handle\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Use qkey from qp context if high order bit set,
+ * otherwise from work request.
+ */
+ wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
+ wqe_hdr->wr.ud.remote_qkey =
+ ud_wr(wr)->remote_qkey & 0x80000000 ?
+ qp->qkey : ud_wr(wr)->remote_qkey;
+ wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
+
+ break;
+ case IB_QPT_RC:
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe_hdr->wr.rdma.remote_addr =
+ rdma_wr(wr)->remote_addr;
+ wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
+ break;
+ case IB_WR_LOCAL_INV:
+ case IB_WR_SEND_WITH_INV:
+ wqe_hdr->ex.invalidate_rkey =
+ wr->ex.invalidate_rkey;
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wqe_hdr->wr.atomic.remote_addr =
+ atomic_wr(wr)->remote_addr;
+ wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
+ wqe_hdr->wr.atomic.compare_add =
+ atomic_wr(wr)->compare_add;
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
+ wqe_hdr->wr.atomic.swap =
+ atomic_wr(wr)->swap;
+ break;
+ case IB_WR_REG_MR:
+ ret = set_reg_seg(wqe_hdr, reg_wr(wr));
+ if (ret < 0) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "Failed to set fast register work request\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ break;
+ default:
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid queuepair type\n");
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+ for (i = 0; i < wr->num_sge; i++) {
+ /* Need to check wqe_size 0 or max size */
+ sge->addr = wr->sg_list[i].addr;
+ sge->length = wr->sg_list[i].length;
+ sge->lkey = wr->sg_list[i].lkey;
+ sge++;
+ }
+
+ /* Make sure wqe is written before index update */
+ smp_wmb();
+
+ index++;
+ if (unlikely(index >= qp->sq.wqe_cnt))
+ index = 0;
+ /* Update shared sq ring */
+ pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
+ qp->sq.wqe_cnt);
+ }
+
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ if (!ret)
+ pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
+
+ return ret;
+}
+
+/**
+ * pvrdma_post_receive - post receive work request entries on a QP
+ * @ibqp: the QP
+ * @wr: the work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ unsigned long flags;
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_rq_wqe_hdr *wqe_hdr;
+ struct pvrdma_sge *sge;
+ int index, nreq;
+ int ret = 0;
+ int i;
+
+ /*
+ * In the RESET state, we can fail immediately. For other states,
+ * just post and let the device figure it out.
+ */
+ if (qp->state == IB_QPS_RESET) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ unsigned int tail;
+
+ if (unlikely(wr->num_sge > qp->rq.max_sg ||
+ wr->num_sge < 0)) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "recv SGE overflow\n");
+ goto out;
+ }
+
+ if (unlikely(!pvrdma_idx_ring_has_space(
+ qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "recv queue full\n");
+ goto out;
+ }
+
+ wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
+ wqe_hdr->wr_id = wr->wr_id;
+ wqe_hdr->num_sge = wr->num_sge;
+ wqe_hdr->total_len = 0;
+
+ sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+ for (i = 0; i < wr->num_sge; i++) {
+ sge->addr = wr->sg_list[i].addr;
+ sge->length = wr->sg_list[i].length;
+ sge->lkey = wr->sg_list[i].lkey;
+ sge++;
+ }
+
+ /* Make sure wqe is written before index update */
+ smp_wmb();
+
+ index++;
+ if (unlikely(index >= qp->rq.wqe_cnt))
+ index = 0;
+ /* Update shared rq ring */
+ pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
+ qp->rq.wqe_cnt);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
+
+ return ret;
+
+out:
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return ret;
+}
+
+/**
+ * pvrdma_query_qp - query a queue pair's attributes
+ * @ibqp: the queue pair to query
+ * @attr: the queue pair's attributes
+ * @attr_mask: attributes mask
+ * @init_attr: initial queue pair attributes
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
+ struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
+ int ret = 0;
+
+ mutex_lock(&qp->mutex);
+
+ if (qp->state == IB_QPS_RESET) {
+ attr->qp_state = IB_QPS_RESET;
+ goto out;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
+ cmd->qp_handle = qp->qp_handle;
+ cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query queuepair, error: %d\n", ret);
+ goto out;
+ }
+
+ attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
+ attr->cur_qp_state =
+ pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
+ attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
+ attr->path_mig_state =
+ pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
+ attr->qkey = resp->attrs.qkey;
+ attr->rq_psn = resp->attrs.rq_psn;
+ attr->sq_psn = resp->attrs.sq_psn;
+ attr->dest_qp_num = resp->attrs.dest_qp_num;
+ attr->qp_access_flags =
+ pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
+ attr->pkey_index = resp->attrs.pkey_index;
+ attr->alt_pkey_index = resp->attrs.alt_pkey_index;
+ attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
+ attr->sq_draining = resp->attrs.sq_draining;
+ attr->max_rd_atomic = resp->attrs.max_rd_atomic;
+ attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
+ attr->min_rnr_timer = resp->attrs.min_rnr_timer;
+ attr->port_num = resp->attrs.port_num;
+ attr->timeout = resp->attrs.timeout;
+ attr->retry_cnt = resp->attrs.retry_cnt;
+ attr->rnr_retry = resp->attrs.rnr_retry;
+ attr->alt_port_num = resp->attrs.alt_port_num;
+ attr->alt_timeout = resp->attrs.alt_timeout;
+ pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
+ pvrdma_ah_attr_to_ib(&attr->ah_attr, &resp->attrs.ah_attr);
+ pvrdma_ah_attr_to_ib(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
+
+ qp->state = attr->qp_state;
+
+ ret = 0;
+
+out:
+ attr->cur_qp_state = attr->qp_state;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->xrcd = NULL;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type = 0;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->create_flags = 0;
+ init_attr->port_num = qp->port;
+
+ mutex_unlock(&qp->mutex);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
new file mode 100644
index 000000000000..ed9022a91a1d
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_RING_H__
+#define __PVRDMA_RING_H__
+
+#include <linux/types.h>
+
+#define PVRDMA_INVALID_IDX -1 /* Invalid index. */
+
+struct pvrdma_ring {
+ atomic_t prod_tail; /* Producer tail. */
+ atomic_t cons_head; /* Consumer head. */
+};
+
+struct pvrdma_ring_state {
+ struct pvrdma_ring tx; /* Tx ring. */
+ struct pvrdma_ring rx; /* Rx ring. */
+};
+
+static inline int pvrdma_idx_valid(__u32 idx, __u32 max_elems)
+{
+ /* Generates fewer instructions than a less-than. */
+ return (idx & ~((max_elems << 1) - 1)) == 0;
+}
+
+static inline __s32 pvrdma_idx(atomic_t *var, __u32 max_elems)
+{
+ const unsigned int idx = atomic_read(var);
+
+ if (pvrdma_idx_valid(idx, max_elems))
+ return idx & (max_elems - 1);
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline void pvrdma_idx_ring_inc(atomic_t *var, __u32 max_elems)
+{
+ __u32 idx = atomic_read(var) + 1; /* Increment. */
+
+ idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */
+ atomic_set(var, idx);
+}
+
+static inline __s32 pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *out_tail)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems)) {
+ *out_tail = tail & (max_elems - 1);
+ return tail != (head ^ max_elems);
+ }
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline __s32 pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *out_head)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems)) {
+ *out_head = head & (max_elems - 1);
+ return tail != head;
+ }
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline bool pvrdma_idx_ring_is_valid_idx(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *idx)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems) &&
+ pvrdma_idx_valid(*idx, max_elems)) {
+ if (tail > head && (*idx < tail && *idx >= head))
+ return true;
+ else if (head > tail && (*idx >= head || *idx < tail))
+ return true;
+ }
+ return false;
+}
+
+#endif /* __PVRDMA_RING_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
new file mode 100644
index 000000000000..54891370d18a
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/inet.h>
+#include <linux/io.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_query_device - query device
+ * @ibdev: the device to query
+ * @props: the device properties
+ * @uhw: user data
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+
+ props->fw_ver = dev->dsr->caps.fw_ver;
+ props->sys_image_guid = dev->dsr->caps.sys_image_guid;
+ props->max_mr_size = dev->dsr->caps.max_mr_size;
+ props->page_size_cap = dev->dsr->caps.page_size_cap;
+ props->vendor_id = dev->dsr->caps.vendor_id;
+ props->vendor_part_id = dev->pdev->device;
+ props->hw_ver = dev->dsr->caps.hw_ver;
+ props->max_qp = dev->dsr->caps.max_qp;
+ props->max_qp_wr = dev->dsr->caps.max_qp_wr;
+ props->device_cap_flags = dev->dsr->caps.device_cap_flags;
+ props->max_sge = dev->dsr->caps.max_sge;
+ props->max_cq = dev->dsr->caps.max_cq;
+ props->max_cqe = dev->dsr->caps.max_cqe;
+ props->max_mr = dev->dsr->caps.max_mr;
+ props->max_pd = dev->dsr->caps.max_pd;
+ props->max_qp_rd_atom = dev->dsr->caps.max_qp_rd_atom;
+ props->max_qp_init_rd_atom = dev->dsr->caps.max_qp_init_rd_atom;
+ props->atomic_cap =
+ dev->dsr->caps.atomic_ops &
+ (PVRDMA_ATOMIC_OP_COMP_SWAP | PVRDMA_ATOMIC_OP_FETCH_ADD) ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ props->max_ah = dev->dsr->caps.max_ah;
+ props->max_pkeys = dev->dsr->caps.max_pkeys;
+ props->local_ca_ack_delay = dev->dsr->caps.local_ca_ack_delay;
+ if ((dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_LOCAL_INV) &&
+ (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_REMOTE_INV) &&
+ (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_FAST_REG_WR)) {
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ }
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_port - query device port attributes
+ * @ibdev: the device to query
+ * @port: the port number
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_port *cmd = &req.query_port;
+ struct pvrdma_cmd_query_port_resp *resp = &rsp.query_port_resp;
+ int err;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_PORT;
+ cmd->port_num = port;
+
+ err = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_PORT_RESP);
+ if (err < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query port, error: %d\n", err);
+ return err;
+ }
+
+ memset(props, 0, sizeof(*props));
+
+ props->state = pvrdma_port_state_to_ib(resp->attrs.state);
+ props->max_mtu = pvrdma_mtu_to_ib(resp->attrs.max_mtu);
+ props->active_mtu = pvrdma_mtu_to_ib(resp->attrs.active_mtu);
+ props->gid_tbl_len = resp->attrs.gid_tbl_len;
+ props->port_cap_flags =
+ pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
+ props->max_msg_sz = resp->attrs.max_msg_sz;
+ props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
+ props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
+ props->pkey_tbl_len = resp->attrs.pkey_tbl_len;
+ props->lid = resp->attrs.lid;
+ props->sm_lid = resp->attrs.sm_lid;
+ props->lmc = resp->attrs.lmc;
+ props->max_vl_num = resp->attrs.max_vl_num;
+ props->sm_sl = resp->attrs.sm_sl;
+ props->subnet_timeout = resp->attrs.subnet_timeout;
+ props->init_type_reply = resp->attrs.init_type_reply;
+ props->active_width = pvrdma_port_width_to_ib(resp->attrs.active_width);
+ props->active_speed = pvrdma_port_speed_to_ib(resp->attrs.active_speed);
+ props->phys_state = resp->attrs.phys_state;
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_gid - query device gid
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @gid: the device gid value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (index >= dev->dsr->caps.gid_tbl_len)
+ return -EINVAL;
+
+ memcpy(gid, &dev->sgid_tbl[index], sizeof(union ib_gid));
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_pkey - query device port's P_Key table
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @pkey: the device P_Key value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ int err = 0;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_pkey *cmd = &req.query_pkey;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_PKEY;
+ cmd->port_num = port;
+ cmd->index = index;
+
+ err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp,
+ PVRDMA_CMD_QUERY_PKEY_RESP);
+ if (err < 0) {
+ dev_warn(&to_vdev(ibdev)->pdev->dev,
+ "could not query pkey, error: %d\n", err);
+ return err;
+ }
+
+ *pkey = rsp.query_pkey_resp.pkey;
+
+ return 0;
+}
+
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+ u8 port)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+int pvrdma_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *props)
+{
+ unsigned long flags;
+
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ dev_warn(&to_vdev(ibdev)->pdev->dev,
+ "unsupported device modify mask %#x\n", mask);
+ return -EOPNOTSUPP;
+ }
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ spin_lock_irqsave(&to_vdev(ibdev)->desc_lock, flags);
+ memcpy(ibdev->node_desc, props->node_desc, 64);
+ spin_unlock_irqrestore(&to_vdev(ibdev)->desc_lock, flags);
+ }
+
+ if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+ mutex_lock(&to_vdev(ibdev)->port_mutex);
+ to_vdev(ibdev)->sys_image_guid =
+ cpu_to_be64(props->sys_image_guid);
+ mutex_unlock(&to_vdev(ibdev)->port_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * pvrdma_modify_port - modify device port attributes
+ * @ibdev: the device to modify
+ * @port: the port number
+ * @mask: attributes to modify
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct ib_port_attr attr;
+ struct pvrdma_dev *vdev = to_vdev(ibdev);
+ int ret;
+
+ if (mask & ~IB_PORT_SHUTDOWN) {
+ dev_warn(&vdev->pdev->dev,
+ "unsupported port modify mask %#x\n", mask);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&vdev->port_mutex);
+ ret = pvrdma_query_port(ibdev, port, &attr);
+ if (ret)
+ goto out;
+
+ vdev->port_cap_mask |= props->set_port_cap_mask;
+ vdev->port_cap_mask &= ~props->clr_port_cap_mask;
+
+ if (mask & IB_PORT_SHUTDOWN)
+ vdev->ib_active = false;
+
+out:
+ mutex_unlock(&vdev->port_mutex);
+ return ret;
+}
+
+/**
+ * pvrdma_alloc_ucontext - allocate ucontext
+ * @ibdev: the IB device
+ * @udata: user data
+ *
+ * @return: the ib_ucontext pointer on success, otherwise errno.
+ */
+struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct pvrdma_dev *vdev = to_vdev(ibdev);
+ struct pvrdma_ucontext *context;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
+ struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
+ struct pvrdma_alloc_ucontext_resp uresp;
+ int ret;
+ void *ptr;
+
+ if (!vdev->ib_active)
+ return ERR_PTR(-EAGAIN);
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ context->dev = vdev;
+ ret = pvrdma_uar_alloc(vdev, &context->uar);
+ if (ret) {
+ kfree(context);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* get ctx_handle from host */
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->pfn = context->uar.pfn;
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC;
+ ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP);
+ if (ret < 0) {
+ dev_warn(&vdev->pdev->dev,
+ "could not create ucontext, error: %d\n", ret);
+ ptr = ERR_PTR(ret);
+ goto err;
+ }
+
+ context->ctx_handle = resp->ctx_handle;
+
+ /* copy back to user */
+ uresp.qp_tab_size = vdev->dsr->caps.max_qp;
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret) {
+ pvrdma_uar_free(vdev, &context->uar);
+ context->ibucontext.device = ibdev;
+ pvrdma_dealloc_ucontext(&context->ibucontext);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return &context->ibucontext;
+
+err:
+ pvrdma_uar_free(vdev, &context->uar);
+ kfree(context);
+ return ptr;
+}
+
+/**
+ * pvrdma_dealloc_ucontext - deallocate ucontext
+ * @ibcontext: the ucontext
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC;
+ cmd->ctx_handle = context->ctx_handle;
+
+ ret = pvrdma_cmd_post(context->dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&context->dev->pdev->dev,
+ "destroy ucontext failed, error: %d\n", ret);
+
+ /* Free the UAR even if the device command failed */
+ pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar);
+ kfree(context);
+
+ return ret;
+}
+
+/**
+ * pvrdma_mmap - create mmap region
+ * @ibcontext: the user context
+ * @vma: the VMA
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+ struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ dev_dbg(&context->dev->pdev->dev, "create mmap region\n");
+
+ if ((size != PAGE_SIZE) || (offset & ~PAGE_MASK)) {
+ dev_warn(&context->dev->pdev->dev,
+ "invalid params for mmap region\n");
+ return -EINVAL;
+ }
+
+ /* Map UAR to kernel space, VM_LOCKED? */
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * pvrdma_alloc_pd - allocate protection domain
+ * @ibdev: the IB device
+ * @context: user context
+ * @udata: user data
+ *
+ * @return: the ib_pd protection domain pointer on success, otherwise errno.
+ */
+struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct pvrdma_pd *pd;
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
+ struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
+ int ret;
+ void *ptr;
+
+ /* Check allowed max pds */
+ if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
+ return ERR_PTR(-ENOMEM);
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ ptr = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
+ cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "failed to allocate protection domain, error: %d\n",
+ ret);
+ ptr = ERR_PTR(ret);
+ goto freepd;
+ }
+
+ pd->privileged = !context;
+ pd->pd_handle = resp->pd_handle;
+ pd->pdn = resp->pd_handle;
+
+ if (context) {
+ if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back protection domain\n");
+ pvrdma_dealloc_pd(&pd->ibpd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ /* u32 pd handle */
+ return &pd->ibpd;
+
+freepd:
+ kfree(pd);
+err:
+ atomic_dec(&dev->num_pds);
+ return ptr;
+}
+
+/**
+ * pvrdma_dealloc_pd - deallocate protection domain
+ * @pd: the protection domain to be released
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_dealloc_pd(struct ib_pd *pd)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret)
+ dev_warn(&dev->pdev->dev,
+ "could not dealloc protection domain, error: %d\n",
+ ret);
+
+ kfree(to_vpd(pd));
+ atomic_dec(&dev->num_pds);
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ * @udata: user data blob
+ *
+ * @return: the ib_ah pointer on success, otherwise errno.
+ */
+struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_ah *ah;
+ enum rdma_link_layer ll;
+
+ if (!(ah_attr->ah_flags & IB_AH_GRH))
+ return ERR_PTR(-EINVAL);
+
+ ll = rdma_port_get_link_layer(pd->device, ah_attr->port_num);
+
+ if (ll != IB_LINK_LAYER_ETHERNET ||
+ rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw))
+ return ERR_PTR(-EINVAL);
+
+ if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
+ return ERR_PTR(-ENOMEM);
+
+ ah = kzalloc(sizeof(*ah), GFP_KERNEL);
+ if (!ah) {
+ atomic_dec(&dev->num_ahs);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ah->av.port_pd = to_vpd(pd)->pd_handle | (ah_attr->port_num << 24);
+ ah->av.src_path_bits = ah_attr->src_path_bits;
+ ah->av.src_path_bits |= 0x80;
+ ah->av.gid_index = ah_attr->grh.sgid_index;
+ ah->av.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.sl_tclass_flowlabel = (ah_attr->grh.traffic_class << 20) |
+ ah_attr->grh.flow_label;
+ memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
+ memcpy(ah->av.dmac, ah_attr->dmac, 6);
+
+ ah->ibah.device = pd->device;
+ ah->ibah.pd = pd;
+ ah->ibah.uobject = NULL;
+
+ return &ah->ibah;
+}
+
+/**
+ * pvrdma_destroy_ah - destroy an address handle
+ * @ah: the address handle to destroyed
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_destroy_ah(struct ib_ah *ah)
+{
+ struct pvrdma_dev *dev = to_vdev(ah->device);
+
+ kfree(to_vah(ah));
+ atomic_dec(&dev->num_ahs);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
new file mode 100644
index 000000000000..bfbe96b56255
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_VERBS_H__
+#define __PVRDMA_VERBS_H__
+
+#include <linux/types.h>
+
+union pvrdma_gid {
+ u8 raw[16];
+ struct {
+ __be64 subnet_prefix;
+ __be64 interface_id;
+ } global;
+};
+
+enum pvrdma_link_layer {
+ PVRDMA_LINK_LAYER_UNSPECIFIED,
+ PVRDMA_LINK_LAYER_INFINIBAND,
+ PVRDMA_LINK_LAYER_ETHERNET,
+};
+
+enum pvrdma_mtu {
+ PVRDMA_MTU_256 = 1,
+ PVRDMA_MTU_512 = 2,
+ PVRDMA_MTU_1024 = 3,
+ PVRDMA_MTU_2048 = 4,
+ PVRDMA_MTU_4096 = 5,
+};
+
+static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
+{
+ switch (mtu) {
+ case PVRDMA_MTU_256: return 256;
+ case PVRDMA_MTU_512: return 512;
+ case PVRDMA_MTU_1024: return 1024;
+ case PVRDMA_MTU_2048: return 2048;
+ case PVRDMA_MTU_4096: return 4096;
+ default: return -1;
+ }
+}
+
+static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
+{
+ switch (mtu) {
+ case 256: return PVRDMA_MTU_256;
+ case 512: return PVRDMA_MTU_512;
+ case 1024: return PVRDMA_MTU_1024;
+ case 2048: return PVRDMA_MTU_2048;
+ case 4096:
+ default: return PVRDMA_MTU_4096;
+ }
+}
+
+enum pvrdma_port_state {
+ PVRDMA_PORT_NOP = 0,
+ PVRDMA_PORT_DOWN = 1,
+ PVRDMA_PORT_INIT = 2,
+ PVRDMA_PORT_ARMED = 3,
+ PVRDMA_PORT_ACTIVE = 4,
+ PVRDMA_PORT_ACTIVE_DEFER = 5,
+};
+
+enum pvrdma_port_cap_flags {
+ PVRDMA_PORT_SM = 1 << 1,
+ PVRDMA_PORT_NOTICE_SUP = 1 << 2,
+ PVRDMA_PORT_TRAP_SUP = 1 << 3,
+ PVRDMA_PORT_OPT_IPD_SUP = 1 << 4,
+ PVRDMA_PORT_AUTO_MIGR_SUP = 1 << 5,
+ PVRDMA_PORT_SL_MAP_SUP = 1 << 6,
+ PVRDMA_PORT_MKEY_NVRAM = 1 << 7,
+ PVRDMA_PORT_PKEY_NVRAM = 1 << 8,
+ PVRDMA_PORT_LED_INFO_SUP = 1 << 9,
+ PVRDMA_PORT_SM_DISABLED = 1 << 10,
+ PVRDMA_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
+ PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
+ PVRDMA_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
+ PVRDMA_PORT_CM_SUP = 1 << 16,
+ PVRDMA_PORT_SNMP_TUNNEL_SUP = 1 << 17,
+ PVRDMA_PORT_REINIT_SUP = 1 << 18,
+ PVRDMA_PORT_DEVICE_MGMT_SUP = 1 << 19,
+ PVRDMA_PORT_VENDOR_CLASS_SUP = 1 << 20,
+ PVRDMA_PORT_DR_NOTICE_SUP = 1 << 21,
+ PVRDMA_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
+ PVRDMA_PORT_BOOT_MGMT_SUP = 1 << 23,
+ PVRDMA_PORT_LINK_LATENCY_SUP = 1 << 24,
+ PVRDMA_PORT_CLIENT_REG_SUP = 1 << 25,
+ PVRDMA_PORT_IP_BASED_GIDS = 1 << 26,
+ PVRDMA_PORT_CAP_FLAGS_MAX = PVRDMA_PORT_IP_BASED_GIDS,
+};
+
+enum pvrdma_port_width {
+ PVRDMA_WIDTH_1X = 1,
+ PVRDMA_WIDTH_4X = 2,
+ PVRDMA_WIDTH_8X = 4,
+ PVRDMA_WIDTH_12X = 8,
+};
+
+static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
+{
+ switch (width) {
+ case PVRDMA_WIDTH_1X: return 1;
+ case PVRDMA_WIDTH_4X: return 4;
+ case PVRDMA_WIDTH_8X: return 8;
+ case PVRDMA_WIDTH_12X: return 12;
+ default: return -1;
+ }
+}
+
+enum pvrdma_port_speed {
+ PVRDMA_SPEED_SDR = 1,
+ PVRDMA_SPEED_DDR = 2,
+ PVRDMA_SPEED_QDR = 4,
+ PVRDMA_SPEED_FDR10 = 8,
+ PVRDMA_SPEED_FDR = 16,
+ PVRDMA_SPEED_EDR = 32,
+};
+
+struct pvrdma_port_attr {
+ enum pvrdma_port_state state;
+ enum pvrdma_mtu max_mtu;
+ enum pvrdma_mtu active_mtu;
+ u32 gid_tbl_len;
+ u32 port_cap_flags;
+ u32 max_msg_sz;
+ u32 bad_pkey_cntr;
+ u32 qkey_viol_cntr;
+ u16 pkey_tbl_len;
+ u16 lid;
+ u16 sm_lid;
+ u8 lmc;
+ u8 max_vl_num;
+ u8 sm_sl;
+ u8 subnet_timeout;
+ u8 init_type_reply;
+ u8 active_width;
+ u8 active_speed;
+ u8 phys_state;
+ u8 reserved[2];
+};
+
+struct pvrdma_global_route {
+ union pvrdma_gid dgid;
+ u32 flow_label;
+ u8 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
+ u8 reserved;
+};
+
+struct pvrdma_grh {
+ __be32 version_tclass_flow;
+ __be16 paylen;
+ u8 next_hdr;
+ u8 hop_limit;
+ union pvrdma_gid sgid;
+ union pvrdma_gid dgid;
+};
+
+enum pvrdma_ah_flags {
+ PVRDMA_AH_GRH = 1,
+};
+
+enum pvrdma_rate {
+ PVRDMA_RATE_PORT_CURRENT = 0,
+ PVRDMA_RATE_2_5_GBPS = 2,
+ PVRDMA_RATE_5_GBPS = 5,
+ PVRDMA_RATE_10_GBPS = 3,
+ PVRDMA_RATE_20_GBPS = 6,
+ PVRDMA_RATE_30_GBPS = 4,
+ PVRDMA_RATE_40_GBPS = 7,
+ PVRDMA_RATE_60_GBPS = 8,
+ PVRDMA_RATE_80_GBPS = 9,
+ PVRDMA_RATE_120_GBPS = 10,
+ PVRDMA_RATE_14_GBPS = 11,
+ PVRDMA_RATE_56_GBPS = 12,
+ PVRDMA_RATE_112_GBPS = 13,
+ PVRDMA_RATE_168_GBPS = 14,
+ PVRDMA_RATE_25_GBPS = 15,
+ PVRDMA_RATE_100_GBPS = 16,
+ PVRDMA_RATE_200_GBPS = 17,
+ PVRDMA_RATE_300_GBPS = 18,
+};
+
+struct pvrdma_ah_attr {
+ struct pvrdma_global_route grh;
+ u16 dlid;
+ u16 vlan_id;
+ u8 sl;
+ u8 src_path_bits;
+ u8 static_rate;
+ u8 ah_flags;
+ u8 port_num;
+ u8 dmac[6];
+ u8 reserved;
+};
+
+enum pvrdma_cq_notify_flags {
+ PVRDMA_CQ_SOLICITED = 1 << 0,
+ PVRDMA_CQ_NEXT_COMP = 1 << 1,
+ PVRDMA_CQ_SOLICITED_MASK = PVRDMA_CQ_SOLICITED |
+ PVRDMA_CQ_NEXT_COMP,
+ PVRDMA_CQ_REPORT_MISSED_EVENTS = 1 << 2,
+};
+
+struct pvrdma_qp_cap {
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+ u32 reserved;
+};
+
+enum pvrdma_sig_type {
+ PVRDMA_SIGNAL_ALL_WR,
+ PVRDMA_SIGNAL_REQ_WR,
+};
+
+enum pvrdma_qp_type {
+ PVRDMA_QPT_SMI,
+ PVRDMA_QPT_GSI,
+ PVRDMA_QPT_RC,
+ PVRDMA_QPT_UC,
+ PVRDMA_QPT_UD,
+ PVRDMA_QPT_RAW_IPV6,
+ PVRDMA_QPT_RAW_ETHERTYPE,
+ PVRDMA_QPT_RAW_PACKET = 8,
+ PVRDMA_QPT_XRC_INI = 9,
+ PVRDMA_QPT_XRC_TGT,
+ PVRDMA_QPT_MAX,
+};
+
+enum pvrdma_qp_create_flags {
+ PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO = 1 << 0,
+ PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+};
+
+enum pvrdma_qp_attr_mask {
+ PVRDMA_QP_STATE = 1 << 0,
+ PVRDMA_QP_CUR_STATE = 1 << 1,
+ PVRDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
+ PVRDMA_QP_ACCESS_FLAGS = 1 << 3,
+ PVRDMA_QP_PKEY_INDEX = 1 << 4,
+ PVRDMA_QP_PORT = 1 << 5,
+ PVRDMA_QP_QKEY = 1 << 6,
+ PVRDMA_QP_AV = 1 << 7,
+ PVRDMA_QP_PATH_MTU = 1 << 8,
+ PVRDMA_QP_TIMEOUT = 1 << 9,
+ PVRDMA_QP_RETRY_CNT = 1 << 10,
+ PVRDMA_QP_RNR_RETRY = 1 << 11,
+ PVRDMA_QP_RQ_PSN = 1 << 12,
+ PVRDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13,
+ PVRDMA_QP_ALT_PATH = 1 << 14,
+ PVRDMA_QP_MIN_RNR_TIMER = 1 << 15,
+ PVRDMA_QP_SQ_PSN = 1 << 16,
+ PVRDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
+ PVRDMA_QP_PATH_MIG_STATE = 1 << 18,
+ PVRDMA_QP_CAP = 1 << 19,
+ PVRDMA_QP_DEST_QPN = 1 << 20,
+ PVRDMA_QP_ATTR_MASK_MAX = PVRDMA_QP_DEST_QPN,
+};
+
+enum pvrdma_qp_state {
+ PVRDMA_QPS_RESET,
+ PVRDMA_QPS_INIT,
+ PVRDMA_QPS_RTR,
+ PVRDMA_QPS_RTS,
+ PVRDMA_QPS_SQD,
+ PVRDMA_QPS_SQE,
+ PVRDMA_QPS_ERR,
+};
+
+enum pvrdma_mig_state {
+ PVRDMA_MIG_MIGRATED,
+ PVRDMA_MIG_REARM,
+ PVRDMA_MIG_ARMED,
+};
+
+enum pvrdma_mw_type {
+ PVRDMA_MW_TYPE_1 = 1,
+ PVRDMA_MW_TYPE_2 = 2,
+};
+
+struct pvrdma_qp_attr {
+ enum pvrdma_qp_state qp_state;
+ enum pvrdma_qp_state cur_qp_state;
+ enum pvrdma_mtu path_mtu;
+ enum pvrdma_mig_state path_mig_state;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 dest_qp_num;
+ u32 qp_access_flags;
+ u16 pkey_index;
+ u16 alt_pkey_index;
+ u8 en_sqd_async_notify;
+ u8 sq_draining;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ u8 min_rnr_timer;
+ u8 port_num;
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 alt_port_num;
+ u8 alt_timeout;
+ u8 reserved[5];
+ struct pvrdma_qp_cap cap;
+ struct pvrdma_ah_attr ah_attr;
+ struct pvrdma_ah_attr alt_ah_attr;
+};
+
+enum pvrdma_send_flags {
+ PVRDMA_SEND_FENCE = 1 << 0,
+ PVRDMA_SEND_SIGNALED = 1 << 1,
+ PVRDMA_SEND_SOLICITED = 1 << 2,
+ PVRDMA_SEND_INLINE = 1 << 3,
+ PVRDMA_SEND_IP_CSUM = 1 << 4,
+ PVRDMA_SEND_FLAGS_MAX = PVRDMA_SEND_IP_CSUM,
+};
+
+enum pvrdma_access_flags {
+ PVRDMA_ACCESS_LOCAL_WRITE = 1 << 0,
+ PVRDMA_ACCESS_REMOTE_WRITE = 1 << 1,
+ PVRDMA_ACCESS_REMOTE_READ = 1 << 2,
+ PVRDMA_ACCESS_REMOTE_ATOMIC = 1 << 3,
+ PVRDMA_ACCESS_MW_BIND = 1 << 4,
+ PVRDMA_ZERO_BASED = 1 << 5,
+ PVRDMA_ACCESS_ON_DEMAND = 1 << 6,
+ PVRDMA_ACCESS_FLAGS_MAX = PVRDMA_ACCESS_ON_DEMAND,
+};
+
+int pvrdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata);
+int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+int pvrdma_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid);
+int pvrdma_query_pkey(struct ib_device *ibdev, u8 port,
+ u16 index, u16 *pkey);
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+ u8 port);
+int pvrdma_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *props);
+int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
+ int mask, struct ib_port_modify *props);
+int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata);
+int pvrdma_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int pvrdma_dealloc_pd(struct ib_pd *ibpd);
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int pvrdma_dereg_mr(struct ib_mr *mr);
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
+ struct ib_udata *udata);
+struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
+ struct ib_udata *udata);
+int pvrdma_destroy_cq(struct ib_cq *cq);
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
+int pvrdma_destroy_ah(struct ib_ah *ah);
+struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+int pvrdma_destroy_qp(struct ib_qp *qp);
+int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+#endif /* __PVRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 6d9904a4a0ab..7aa7a4e312f1 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -119,18 +119,17 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED &&
(solicited || entry->status != IB_WC_SUCCESS))) {
- struct kthread_worker *worker;
/*
* This will cause send_complete() to be called in
* another thread.
*/
- smp_read_barrier_depends(); /* see rvt_cq_exit */
- worker = cq->rdi->worker;
- if (likely(worker)) {
+ spin_lock(&cq->rdi->n_cqs_lock);
+ if (likely(cq->rdi->worker)) {
cq->notify = RVT_CQ_NONE;
cq->triggered++;
- kthread_queue_work(worker, &cq->comptask);
+ kthread_queue_work(cq->rdi->worker, &cq->comptask);
}
+ spin_unlock(&cq->rdi->n_cqs_lock);
}
spin_unlock_irqrestore(&cq->lock, flags);
@@ -240,15 +239,15 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
}
}
- spin_lock(&rdi->n_cqs_lock);
+ spin_lock_irq(&rdi->n_cqs_lock);
if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
- spin_unlock(&rdi->n_cqs_lock);
+ spin_unlock_irq(&rdi->n_cqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
rdi->n_cqs_allocated++;
- spin_unlock(&rdi->n_cqs_lock);
+ spin_unlock_irq(&rdi->n_cqs_lock);
if (cq->ip) {
spin_lock_irq(&rdi->pending_lock);
@@ -296,9 +295,9 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
struct rvt_dev_info *rdi = cq->rdi;
kthread_flush_work(&cq->comptask);
- spin_lock(&rdi->n_cqs_lock);
+ spin_lock_irq(&rdi->n_cqs_lock);
rdi->n_cqs_allocated--;
- spin_unlock(&rdi->n_cqs_lock);
+ spin_unlock_irq(&rdi->n_cqs_lock);
if (cq->ip)
kref_put(&cq->ip->ref, rvt_release_mmap_info);
else
@@ -504,33 +503,23 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
*/
int rvt_driver_cq_init(struct rvt_dev_info *rdi)
{
- int ret = 0;
int cpu;
- struct task_struct *task;
+ struct kthread_worker *worker;
if (rdi->worker)
return 0;
+
spin_lock_init(&rdi->n_cqs_lock);
- rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
- if (!rdi->worker)
- return -ENOMEM;
- kthread_init_worker(rdi->worker);
- task = kthread_create_on_node(
- kthread_worker_fn,
- rdi->worker,
- rdi->dparms.node,
- "%s", rdi->dparms.cq_name);
- if (IS_ERR(task)) {
- kfree(rdi->worker);
- rdi->worker = NULL;
- return PTR_ERR(task);
- }
- set_user_nice(task, MIN_NICE);
cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
- kthread_bind(task, cpu);
- wake_up_process(task);
- return ret;
+ worker = kthread_create_worker_on_cpu(cpu, 0,
+ "%s", rdi->dparms.cq_name);
+ if (IS_ERR(worker))
+ return PTR_ERR(worker);
+
+ set_user_nice(worker->task, MIN_NICE);
+ rdi->worker = worker;
+ return 0;
}
/**
@@ -541,13 +530,15 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
{
struct kthread_worker *worker;
+ /* block future queuing from send_complete() */
+ spin_lock_irq(&rdi->n_cqs_lock);
worker = rdi->worker;
- if (!worker)
+ if (!worker) {
+ spin_unlock_irq(&rdi->n_cqs_lock);
return;
- /* blocks future queuing from send_complete() */
+ }
rdi->worker = NULL;
- smp_wmb(); /* See rdi_cq_enter */
- kthread_flush_worker(worker);
- kthread_stop(worker->task);
- kfree(worker);
+ spin_unlock_irq(&rdi->n_cqs_lock);
+
+ kthread_destroy_worker(worker);
}
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 983d319ac976..05c8c2afb0e3 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -81,7 +81,7 @@ static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
goto bail;
mqp->qp = qp;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
bail:
return mqp;
@@ -92,8 +92,7 @@ static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
struct rvt_qp *qp = mqp->qp;
/* Notify hfi1_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
kfree(mqp);
}
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 46b64970058e..52fd15276ee6 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -51,6 +51,7 @@
#include <rdma/rdma_vt.h>
#include "vt.h"
#include "mr.h"
+#include "trace.h"
/**
* rvt_driver_mr_init - Init MR resources per driver
@@ -84,6 +85,7 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
lkey_table_size = rdi->dparms.lkey_table_size;
}
rdi->lkey_table.max = 1 << lkey_table_size;
+ rdi->lkey_table.shift = 32 - lkey_table_size;
lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
rdi->lkey_table.table = (struct rvt_mregion __rcu **)
vmalloc_node(lk_tab_size, rdi->dparms.node);
@@ -402,6 +404,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
+ trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size);
n++;
if (n == RVT_SEGSZ) {
m++;
@@ -506,6 +509,7 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
mr->mr.map[m]->segs[n].length = ps;
+ trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
mr->mr.length += ps;
return 0;
@@ -692,6 +696,7 @@ int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
+ trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
if (++n == RVT_SEGSZ) {
m++;
n = 0;
@@ -774,7 +779,6 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
struct rvt_mregion *mr;
unsigned n, m;
size_t off;
- struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
/*
* We use LKEY == zero for kernel virtual addresses
@@ -782,12 +786,14 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
*/
rcu_read_lock();
if (sge->lkey == 0) {
+ struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
+
if (pd->user)
goto bail;
mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
isge->mr = mr;
@@ -798,8 +804,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
isge->n = 0;
goto ok;
}
- mr = rcu_dereference(
- rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
+ mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail;
@@ -809,7 +814,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc))
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
off += mr->offset;
@@ -887,7 +892,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
mr = rcu_dereference(rdi->dma_mr);
if (!mr)
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
sge->mr = mr;
@@ -899,8 +904,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
goto ok;
}
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+ mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
@@ -909,7 +913,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0))
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
off += mr->offset;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 6500c3b5a89c..2a13ac660f2b 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -76,6 +76,23 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
};
EXPORT_SYMBOL(ib_rvt_state_ops);
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+ [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+ [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [IB_WR_REG_MR] = IB_WC_REG_MR
+};
+EXPORT_SYMBOL(ib_rvt_wc_opcode);
+
static void get_map_page(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map,
gfp_t gfp)
@@ -884,7 +901,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
return ret;
bail_ip:
- kref_put(&qp->ip->ref, rvt_release_mmap_info);
+ if (qp->ip)
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
bail_qpn:
free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index 6c0457db5499..e2d23acb6a7d 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -45,143 +45,10 @@
*
*/
-#undef TRACE_SYSTEM_VAR
-#define TRACE_SYSTEM_VAR rdmavt
-
-#if !defined(__RDMAVT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __RDMAVT_TRACE_H
-
-#include <linux/tracepoint.h>
-#include <linux/trace_seq.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/rdma_vt.h>
-
#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rdmavt
-
-TRACE_EVENT(rvt_dbg,
- TP_PROTO(struct rvt_dev_info *rdi,
- const char *msg),
- TP_ARGS(rdi, msg),
- TP_STRUCT__entry(
- RDI_DEV_ENTRY(rdi)
- __string(msg, msg)
- ),
- TP_fast_assign(
- RDI_DEV_ASSIGN(rdi);
- __assign_str(msg, msg);
- ),
- TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rvt_qphash
-DECLARE_EVENT_CLASS(rvt_qphash_template,
- TP_PROTO(struct rvt_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket),
- TP_STRUCT__entry(
- RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, bucket)
- ),
- TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
- __entry->qpn = qp->ibqp.qp_num;
- __entry->bucket = bucket;
- ),
- TP_printk(
- "[%s] qpn 0x%x bucket %u",
- __get_str(dev),
- __entry->qpn,
- __entry->bucket
- )
-);
-
-DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
- TP_PROTO(struct rvt_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket));
-
-DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
- TP_PROTO(struct rvt_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rvt_tx
-
-#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode }
-#define show_wr_opcode(opcode) \
-__print_symbolic(opcode, \
- wr_opcode_name(RDMA_WRITE), \
- wr_opcode_name(RDMA_WRITE_WITH_IMM), \
- wr_opcode_name(SEND), \
- wr_opcode_name(SEND_WITH_IMM), \
- wr_opcode_name(RDMA_READ), \
- wr_opcode_name(ATOMIC_CMP_AND_SWP), \
- wr_opcode_name(ATOMIC_FETCH_AND_ADD), \
- wr_opcode_name(LSO), \
- wr_opcode_name(SEND_WITH_INV), \
- wr_opcode_name(RDMA_READ_WITH_INV), \
- wr_opcode_name(LOCAL_INV), \
- wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \
- wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
-
-#define POS_PRN \
-"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
-
-TRACE_EVENT(
- rvt_post_one_wr,
- TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
- TP_ARGS(qp, wqe),
- TP_STRUCT__entry(
- RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
- __field(u64, wr_id)
- __field(u32, qpn)
- __field(u32, psn)
- __field(u32, lpsn)
- __field(u32, length)
- __field(u32, opcode)
- __field(u32, size)
- __field(u32, avail)
- __field(u32, head)
- __field(u32, last)
- ),
- TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
- __entry->wr_id = wqe->wr.wr_id;
- __entry->qpn = qp->ibqp.qp_num;
- __entry->psn = wqe->psn;
- __entry->lpsn = wqe->lpsn;
- __entry->length = wqe->length;
- __entry->opcode = wqe->wr.opcode;
- __entry->size = qp->s_size;
- __entry->avail = qp->s_avail;
- __entry->head = qp->s_head;
- __entry->last = qp->s_last;
- ),
- TP_printk(
- POS_PRN,
- __get_str(dev),
- __entry->wr_id,
- __entry->qpn,
- __entry->psn,
- __entry->lpsn,
- __entry->length,
- __entry->opcode, show_wr_opcode(__entry->opcode),
- __entry->size,
- __entry->avail,
- __entry->head,
- __entry->last
- )
-);
-
-#endif /* __RDMAVT_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
+#include "trace_rvt.h"
+#include "trace_qp.h"
+#include "trace_tx.h"
+#include "trace_mr.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h
new file mode 100644
index 000000000000..3318a6c36373
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_mr.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_MR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_MR_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_mr.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_mr
+DECLARE_EVENT_CLASS(
+ rvt_mr_template,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(mr->pd->device))
+ __field(void *, vaddr)
+ __field(struct page *, page)
+ __field(size_t, len)
+ __field(u32, lkey)
+ __field(u16, m)
+ __field(u16, n)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(mr->pd->device));
+ __entry->vaddr = v;
+ __entry->page = virt_to_page(v);
+ __entry->m = m;
+ __entry->n = n;
+ __entry->len = len;
+ ),
+ TP_printk(
+ "[%s] vaddr %p page %p m %u n %u len %ld",
+ __get_str(dev),
+ __entry->vaddr,
+ __entry->page,
+ __entry->m,
+ __entry->n,
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_page_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_fmr_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_user_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+#endif /* __RVT_TRACE_MR_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_mr
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
new file mode 100644
index 000000000000..4c77a3119bda
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_QP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_QP_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_qp
+
+DECLARE_EVENT_CLASS(rvt_qphash_template,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, bucket)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->bucket = bucket;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x bucket %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->bucket
+ )
+);
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+
+#endif /* __RVT_TRACE_QP_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_qp
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_rvt.h b/drivers/infiniband/sw/rdmavt/trace_rvt.h
new file mode 100644
index 000000000000..746f33461d9a
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_rvt.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_RVT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_RVT_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt
+
+TRACE_EVENT(rvt_dbg,
+ TP_PROTO(struct rvt_dev_info *rdi,
+ const char *msg),
+ TP_ARGS(rdi, msg),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(rdi)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(rdi);
+ __assign_str(msg, msg);
+ ),
+ TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
+);
+
+#endif /* __RVT_TRACE_MISC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rvt
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
new file mode 100644
index 000000000000..0e03173662d8
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_TX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_tx
+
+#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode }
+#define show_wr_opcode(opcode) \
+__print_symbolic(opcode, \
+ wr_opcode_name(RDMA_WRITE), \
+ wr_opcode_name(RDMA_WRITE_WITH_IMM), \
+ wr_opcode_name(SEND), \
+ wr_opcode_name(SEND_WITH_IMM), \
+ wr_opcode_name(RDMA_READ), \
+ wr_opcode_name(ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(ATOMIC_FETCH_AND_ADD), \
+ wr_opcode_name(LSO), \
+ wr_opcode_name(SEND_WITH_INV), \
+ wr_opcode_name(RDMA_READ_WITH_INV), \
+ wr_opcode_name(LOCAL_INV), \
+ wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
+
+#define POS_PRN \
+"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
+
+TRACE_EVENT(
+ rvt_post_one_wr,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
+ TP_ARGS(qp, wqe),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, psn)
+ __field(u32, lpsn)
+ __field(u32, length)
+ __field(u32, opcode)
+ __field(u32, size)
+ __field(u32, avail)
+ __field(u32, head)
+ __field(u32, last)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->psn = wqe->psn;
+ __entry->lpsn = wqe->lpsn;
+ __entry->length = wqe->length;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->size = qp->s_size;
+ __entry->avail = qp->s_avail;
+ __entry->head = qp->s_head;
+ __entry->last = qp->s_last;
+ ),
+ TP_printk(
+ POS_PRN,
+ __get_str(dev),
+ __entry->wr_id,
+ __entry->qpn,
+ __entry->psn,
+ __entry->lpsn,
+ __entry->length,
+ __entry->opcode, show_wr_opcode(__entry->opcode),
+ __entry->size,
+ __entry->avail,
+ __entry->head,
+ __entry->last
+ )
+);
+
+#endif /* __RVT_TRACE_TX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tx
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 6c5e29db88e3..d369f24425f9 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -224,7 +224,7 @@ static inline enum comp_state check_psn(struct rxe_qp *qp,
else
return COMPST_DONE;
} else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
- return COMPST_ERROR_RETRY;
+ return COMPST_DONE;
} else {
return COMPST_CHECK_ACK;
}
@@ -420,11 +420,12 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
(qp->req.state == QP_STATE_ERROR)) {
make_send_cqe(qp, wqe, &cqe);
+ advance_consumer(qp->sq.queue);
rxe_cq_post(qp->scq, &cqe, 0);
+ } else {
+ advance_consumer(qp->sq.queue);
}
- advance_consumer(qp->sq.queue);
-
/*
* we completed something so let req run again
* if it is trying to fence
@@ -510,6 +511,8 @@ int rxe_completer(void *arg)
struct rxe_pkt_info *pkt = NULL;
enum comp_state state;
+ rxe_add_ref(qp);
+
if (!qp->valid) {
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp);
@@ -739,11 +742,13 @@ exit:
/* we come here if we are done with processing and want the task to
* exit from the loop calling us
*/
+ rxe_drop_ref(qp);
return -EAGAIN;
done:
/* we come here if we have processed a packet we want the task to call
* us again to see if there is anything else to do
*/
+ rxe_drop_ref(qp);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 73849a5a91b3..efe4c6a35442 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -266,8 +266,6 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- atomic_inc(&qp->skb_out);
-
if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 1869152f1d23..d0faca294006 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -355,6 +355,9 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
size_t offset;
u32 crc = crcp ? (*crcp) : 0;
+ if (length == 0)
+ return 0;
+
if (mem->type == RXE_MEM_TYPE_DMA) {
u8 *src, *dest;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index ffff5a54cb34..342e78163613 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -46,7 +46,7 @@
#include "rxe_loc.h"
static LIST_HEAD(rxe_dev_list);
-static spinlock_t dev_list_lock; /* spinlock for device list */
+static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
struct rxe_dev *net_to_rxe(struct net_device *ndev)
{
@@ -455,6 +455,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return -EAGAIN;
}
+ atomic_inc(&pkt->qp->skb_out);
kfree_skb(skb);
return 0;
@@ -659,8 +660,6 @@ struct notifier_block rxe_net_notifier = {
int rxe_net_ipv4_init(void)
{
- spin_lock_init(&dev_list_lock);
-
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
htons(ROCE_V2_UDP_DPORT), false);
if (IS_ERR(recv_sockets.sk4)) {
@@ -676,8 +675,6 @@ int rxe_net_ipv6_init(void)
{
#if IS_ENABLED(CONFIG_IPV6)
- spin_lock_init(&dev_list_lock);
-
recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
htons(ROCE_V2_UDP_DPORT), true);
if (IS_ERR(recv_sockets.sk6)) {
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index f459c43a77c8..13ed2cc6eaa2 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -82,7 +82,7 @@ enum rxe_device_param {
RXE_MAX_SGE = 32,
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
- RXE_MAX_LOG_CQE = 13,
+ RXE_MAX_LOG_CQE = 15,
RXE_MAX_MR = 2 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 6bac0717c540..d723947a8542 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -180,7 +180,6 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
pool->table = kmalloc(size, GFP_KERNEL);
if (!pool->table) {
- pr_warn("no memory for bit table\n");
err = -ENOMEM;
goto out;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c3e60e4bde6e..486d576e55bc 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -855,4 +855,5 @@ void rxe_qp_cleanup(void *arg)
free_rd_atomic_resources(qp);
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 46f062842a9a..252b4d637d45 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -391,16 +391,15 @@ int rxe_rcv(struct sk_buff *skb)
payload_size(pkt));
calc_icrc = cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
- char saddr[sizeof(struct in6_addr)];
-
if (skb->protocol == htons(ETH_P_IPV6))
- sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
+ pr_warn_ratelimited("bad ICRC from %pI6c\n",
+ &ipv6_hdr(skb)->saddr);
else if (skb->protocol == htons(ETH_P_IP))
- sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
+ pr_warn_ratelimited("bad ICRC from %pI4\n",
+ &ip_hdr(skb)->saddr);
else
- sprintf(saddr, "unknown");
+ pr_warn_ratelimited("bad ICRC from unknown\n");
- pr_warn_ratelimited("bad ICRC from %s\n", saddr);
goto drop;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 22bd9630dcd9..73d4a97603a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
static void save_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
- struct rxe_qp *rollback_qp)
+ u32 *rollback_psn)
{
rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn;
rollback_wqe->last_psn = wqe->last_psn;
- rollback_qp->req.psn = qp->req.psn;
+ *rollback_psn = qp->req.psn;
}
static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
- struct rxe_qp *rollback_qp)
+ u32 rollback_psn)
{
wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn;
wqe->last_psn = rollback_wqe->last_psn;
- qp->req.psn = rollback_qp->req.psn;
+ qp->req.psn = rollback_psn;
}
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
@@ -593,8 +593,10 @@ int rxe_requester(void *arg)
int mtu;
int opcode;
int ret;
- struct rxe_qp rollback_qp;
struct rxe_send_wqe rollback_wqe;
+ u32 rollback_psn;
+
+ rxe_add_ref(qp);
next_wqe:
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -697,6 +699,7 @@ next_wqe:
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
__rxe_do_task(&qp->comp.task);
+ rxe_drop_ref(qp);
return 0;
}
payload = mtu;
@@ -719,7 +722,7 @@ next_wqe:
* rxe_xmit_packet().
* Otherwise, completer might initiate an unjustified retry flow.
*/
- save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ save_state(wqe, qp, &rollback_wqe, &rollback_psn);
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
@@ -727,7 +730,7 @@ next_wqe:
qp->need_req_skb = 1;
kfree_skb(skb);
- rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (ret == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
@@ -756,8 +759,7 @@ err:
*/
wqe->wr.send_flags |= IB_SEND_SIGNALED;
__rxe_do_task(&qp->comp.task);
- return -EAGAIN;
-
exit:
+ rxe_drop_ref(qp);
return -EAGAIN;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index dd3d88adc003..3435efff8799 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -444,6 +444,13 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
return RESPST_EXECUTE;
}
+ /* A zero-byte op is not required to set an addr or rkey. */
+ if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
+ (pkt->mask & RXE_RETH_MASK) &&
+ reth_len(pkt) == 0) {
+ return RESPST_EXECUTE;
+ }
+
va = qp->resp.va;
rkey = qp->resp.rkey;
resid = qp->resp.resid;
@@ -680,9 +687,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
res->read.va_org = qp->resp.va;
res->first_psn = req_pkt->psn;
- res->last_psn = req_pkt->psn +
- (reth_len(req_pkt) + mtu - 1) /
- mtu - 1;
+
+ if (reth_len(req_pkt)) {
+ res->last_psn = (req_pkt->psn +
+ (reth_len(req_pkt) + mtu - 1) /
+ mtu - 1) & BTH_PSN_MASK;
+ } else {
+ res->last_psn = res->first_psn;
+ }
res->cur_psn = req_pkt->psn;
res->read.resid = qp->resp.resid;
@@ -742,7 +754,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
} else {
qp->resp.res = NULL;
qp->resp.opcode = -1;
- qp->resp.psn = res->cur_psn;
+ if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
+ qp->resp.psn = res->cur_psn;
state = RESPST_CLEANUP;
}
@@ -1057,12 +1070,13 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
enum resp_states rc;
+ u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
rc = RESPST_CLEANUP;
goto out;
} else if (pkt->mask & RXE_READ_MASK) {
@@ -1132,6 +1146,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
pkt, skb_copy);
if (rc) {
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
+ rxe_drop_ref(qp);
kfree_skb(skb_copy);
rc = RESPST_CLEANUP;
goto out;
@@ -1198,6 +1213,8 @@ int rxe_responder(void *arg)
struct rxe_pkt_info *pkt = NULL;
int ret = 0;
+ rxe_add_ref(qp);
+
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
if (!qp->valid) {
@@ -1386,5 +1403,6 @@ int rxe_responder(void *arg)
exit:
ret = -EAGAIN;
done:
+ rxe_drop_ref(qp);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 2a6e3cd2d4e8..efc832a2d7c6 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -169,7 +169,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
}
}
- err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr,
+ err = rxe_queue_resize(q, &attr->max_wr,
rcv_wqe_size(srq->rq.max_sge),
srq->rq.queue->ip ?
srq->rq.queue->ip->context :
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 1e19bf828a6e..d2a14a1bdc7f 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -121,6 +121,7 @@ int rxe_init_task(void *obj, struct rxe_task *task,
task->arg = arg;
task->func = func;
snprintf(task->name, sizeof(task->name), "%s", name);
+ task->destroyed = false;
tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
@@ -132,11 +133,29 @@ int rxe_init_task(void *obj, struct rxe_task *task,
void rxe_cleanup_task(struct rxe_task *task)
{
+ unsigned long flags;
+ bool idle;
+
+ /*
+ * Mark the task, then wait for it to finish. It might be
+ * running in a non-tasklet (direct call) context.
+ */
+ task->destroyed = true;
+
+ do {
+ spin_lock_irqsave(&task->state_lock, flags);
+ idle = (task->state == TASK_STATE_START);
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ } while (!idle);
+
tasklet_kill(&task->tasklet);
}
void rxe_run_task(struct rxe_task *task, int sched)
{
+ if (task->destroyed)
+ return;
+
if (sched)
tasklet_schedule(&task->tasklet);
else
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index d14aa6daed05..08ff42d451c6 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -54,6 +54,7 @@ struct rxe_task {
int (*func)(void *arg);
int ret;
char name[16];
+ bool destroyed;
};
/*
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 19841c863daf..beb7021ff18a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -316,7 +316,9 @@ static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
return err;
}
-static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata)
+
{
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
@@ -564,7 +566,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
if (udata) {
if (udata->inlen) {
err = -EINVAL;
- goto err1;
+ goto err2;
}
qp->is_user = 1;
}
@@ -573,12 +575,13 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
if (err)
- goto err2;
+ goto err3;
return &qp->ibqp;
-err2:
+err3:
rxe_drop_index(qp);
+err2:
rxe_drop_ref(qp);
err1:
return ERR_PTR(err);
@@ -1007,11 +1010,19 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct rxe_cq *cq = to_rcq(ibcq);
+ unsigned long irq_flags;
+ int ret = 0;
+ spin_lock_irqsave(&cq->cq_lock, irq_flags);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
- return 0;
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
+ ret = 1;
+
+ spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+
+ return ret;
}
static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 339a1eecdfe3..096c4f6fbd65 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -357,11 +357,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
int i;
rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
- if (!rx->rx_ring) {
- printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
- priv->ca->name, ipoib_recvq_size);
+ if (!rx->rx_ring)
return -ENOMEM;
- }
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
@@ -1054,8 +1051,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
- ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
- priv->ca->name);
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}
@@ -1134,7 +1129,6 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
GFP_NOIO, PAGE_KERNEL);
if (!p->tx_ring) {
- ipoib_warn(priv, "failed to allocate tx ring\n");
ret = -ENOMEM;
goto err_tx;
}
@@ -1550,8 +1544,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
- printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
- priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 830fecb6934c..5038f9d2d753 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -416,11 +416,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
- if (!qp_work) {
- ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
- __func__, priv->qp->qp_num);
+ if (!qp_work)
return;
- }
INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
qp_work->priv = priv;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b58d9dca5c93..3ce0765a05ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -292,6 +292,25 @@ static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
return dev;
}
+struct ipoib_walk_data {
+ const struct sockaddr *addr;
+ struct net_device *result;
+};
+
+static int ipoib_upper_walk(struct net_device *upper, void *_data)
+{
+ struct ipoib_walk_data *data = _data;
+ int ret = 0;
+
+ if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
+ dev_hold(upper);
+ data->result = upper;
+ ret = 1;
+ }
+
+ return ret;
+}
+
/**
* Find a net_device matching the given address, which is an upper device of
* the given net_device.
@@ -304,27 +323,21 @@ static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
static struct net_device *ipoib_get_net_dev_match_addr(
const struct sockaddr *addr, struct net_device *dev)
{
- struct net_device *upper,
- *result = NULL;
- struct list_head *iter;
+ struct ipoib_walk_data data = {
+ .addr = addr,
+ };
rcu_read_lock();
if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
dev_hold(dev);
- result = dev;
+ data.result = dev;
goto out;
}
- netdev_for_each_all_upper_dev_rcu(dev, upper, iter) {
- if (ipoib_is_dev_match_addr_rcu(addr, upper)) {
- dev_hold(upper);
- result = upper;
- break;
- }
- }
+ netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
out:
rcu_read_unlock();
- return result;
+ return data.result;
}
/* returns the number of IPoIB netdevs on top a given ipoib device matching a
@@ -1606,11 +1619,8 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
- if (!priv->rx_ring) {
- printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
- ca->name, ipoib_recvq_size);
+ if (!priv->rx_ring)
goto out;
- }
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
if (!priv->tx_ring) {
@@ -2016,6 +2026,7 @@ static struct net_device *ipoib_add_port(const char *format,
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+ priv->dev->max_mtu = IPOIB_CM_MTU;
priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1909dd252c94..fddff403d5d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -575,8 +575,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
- if (ib_query_port(priv->ca, priv->port, &port_attr) ||
- port_attr.state != IB_PORT_ACTIVE) {
+ if (ib_query_port(priv->ca, priv->port, &port_attr)) {
+ ipoib_dbg(priv, "ib_query_port() failed\n");
+ return;
+ }
+ if (port_attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
port_attr.state);
return;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a4b791dfaa1d..8ae7a3beddb7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -890,11 +890,14 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_ESTABLISHED:
iser_connected_handler(cma_id, event->param.conn.private_data);
break;
+ case RDMA_CM_EVENT_REJECTED:
+ iser_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
+ /* FALLTHROUGH */
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- case RDMA_CM_EVENT_REJECTED:
iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6dd43f63238e..314e95516068 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -184,7 +184,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!isert_conn->rx_descs)
- goto fail;
+ return -ENOMEM;
rx_desc = isert_conn->rx_descs;
@@ -213,9 +213,7 @@ dma_map_fail:
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
-fail:
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
-
return -ENOMEM;
}
@@ -269,10 +267,8 @@ isert_alloc_comps(struct isert_device *device)
device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
GFP_KERNEL);
- if (!device->comps) {
- isert_err("Unable to allocate completion contexts\n");
+ if (!device->comps)
return -ENOMEM;
- }
max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
@@ -432,10 +428,8 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
GFP_KERNEL);
- if (!isert_conn->login_req_buf) {
- isert_err("Unable to allocate isert_conn->login_buf\n");
+ if (!isert_conn->login_req_buf)
return -ENOMEM;
- }
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
isert_conn->login_req_buf,
@@ -795,6 +789,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
*/
return 1;
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ isert_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
@@ -1276,11 +1272,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
if (payload_length) {
text_in = kzalloc(payload_length, GFP_KERNEL);
- if (!text_in) {
- isert_err("Unable to allocate text_in of payload_length: %u\n",
- payload_length);
+ if (!text_in)
return -ENOMEM;
- }
}
cmd->text_in_ptr = text_in;
@@ -1851,6 +1844,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->sense_buffer, pdu_len,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
@@ -1978,6 +1973,8 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
@@ -2018,6 +2015,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
@@ -2307,10 +2306,9 @@ isert_setup_np(struct iscsi_np *np,
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
- if (!isert_np) {
- isert_err("Unable to allocate struct isert_np\n");
+ if (!isert_np)
return -ENOMEM;
- }
+
sema_init(&isert_np->sem, 0);
mutex_init(&isert_np->mutex);
INIT_LIST_HEAD(&isert_np->accepted);
@@ -2651,7 +2649,6 @@ static int __init isert_init(void)
WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!isert_comp_wq) {
isert_err("Unable to allocate isert_comp_wq\n");
- ret = -ENOMEM;
return -ENOMEM;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d980fb458ad4..8ddc07123193 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -64,6 +64,11 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_INFO(release_date, DRV_RELDATE);
+#if !defined(CONFIG_DYNAMIC_DEBUG)
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
+#endif
+
static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
@@ -384,6 +389,9 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
max_page_list_len);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
+ if (ret == -ENOMEM)
+ pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
+ dev_name(&device->dev));
goto destroy_pool;
}
d->mr = mr;
@@ -1266,8 +1274,12 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
- if (state->fmr.next >= state->fmr.end)
+ if (state->fmr.next >= state->fmr.end) {
+ shost_printk(KERN_ERR, ch->target->scsi_host,
+ PFX "Out of MRs (mr_per_cmd = %d)\n",
+ ch->target->mr_per_cmd);
return -ENOMEM;
+ }
WARN_ON_ONCE(!dev->use_fmr);
@@ -1323,8 +1335,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
u32 rkey;
int n, err;
- if (state->fr.next >= state->fr.end)
+ if (state->fr.next >= state->fr.end) {
+ shost_printk(KERN_ERR, ch->target->scsi_host,
+ PFX "Out of MRs (mr_per_cmd = %d)\n",
+ ch->target->mr_per_cmd);
return -ENOMEM;
+ }
WARN_ON_ONCE(!dev->use_fast_reg);
@@ -1556,7 +1572,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
return 0;
}
-#if defined(DYNAMIC_DATA_DEBUG)
static void srp_check_mapping(struct srp_map_state *state,
struct srp_rdma_ch *ch, struct srp_request *req,
struct scatterlist *scat, int count)
@@ -1580,7 +1595,6 @@ static void srp_check_mapping(struct srp_map_state *state,
scsi_bufflen(req->scmnd), desc_len, mr_len,
state->ndesc, state->nmdesc);
}
-#endif
/**
* srp_map_data() - map SCSI data buffer onto an SRP request
@@ -1669,14 +1683,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
if (ret < 0)
goto unmap;
-#if defined(DYNAMIC_DEBUG)
{
DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
"Memory mapping consistency check");
- if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
+ if (DYNAMIC_DEBUG_BRANCH(ddm))
srp_check_mapping(&state, ch, req, scat, count);
}
-#endif
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
@@ -3287,7 +3299,9 @@ static ssize_t srp_create_target(struct device *dev,
*/
scsi_host_get(target->scsi_host);
- mutex_lock(&host->add_target_mutex);
+ ret = mutex_lock_interruptible(&host->add_target_mutex);
+ if (ret < 0)
+ goto put;
ret = srp_parse_options(buf, target);
if (ret)
@@ -3443,6 +3457,7 @@ connected:
out:
mutex_unlock(&host->add_target_mutex);
+put:
scsi_host_put(target->scsi_host);
if (ret < 0)
scsi_host_put(target->scsi_host);
@@ -3526,6 +3541,7 @@ free_host:
static void srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
+ struct ib_device_attr *attr = &device->attrs;
struct srp_host *host;
int mr_page_shift, p;
u64 max_pages_per_mr;
@@ -3540,25 +3556,25 @@ static void srp_add_one(struct ib_device *device)
* minimum of 4096 bytes. We're unlikely to build large sglists
* out of smaller entries.
*/
- mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
+ mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
srp_dev->mr_page_size = 1 << mr_page_shift;
srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
- max_pages_per_mr = device->attrs.max_mr_size;
+ max_pages_per_mr = attr->max_mr_size;
do_div(max_pages_per_mr, srp_dev->mr_page_size);
pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
- device->attrs.max_mr_size, srp_dev->mr_page_size,
+ attr->max_mr_size, srp_dev->mr_page_size,
max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
device->map_phys_fmr && device->unmap_fmr);
- srp_dev->has_fr = (device->attrs.device_cap_flags &
+ srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
dev_warn(&device->dev, "neither FMR nor FR is supported\n");
} else if (!never_register &&
- device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
srp_dev->use_fast_reg = (srp_dev->has_fr &&
(!srp_dev->has_fmr || prefer_fr));
srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
@@ -3571,13 +3587,13 @@ static void srp_add_one(struct ib_device *device)
if (srp_dev->use_fast_reg) {
srp_dev->max_pages_per_mr =
min_t(u32, srp_dev->max_pages_per_mr,
- device->attrs.max_fast_reg_page_list_len);
+ attr->max_fast_reg_page_list_len);
}
srp_dev->mr_max_size = srp_dev->mr_page_size *
srp_dev->max_pages_per_mr;
pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
- device->name, mr_page_shift, device->attrs.max_mr_size,
- device->attrs.max_fast_reg_page_list_len,
+ device->name, mr_page_shift, attr->max_mr_size,
+ attr->max_fast_reg_page_list_len,
srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
INIT_LIST_HEAD(&srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0b1f69ed2e92..d21ba9d857c3 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1840,7 +1840,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srpt_rdma_ch *ch, *tmp_ch;
u32 it_iu_len;
int i, ret = 0;
- unsigned char *p;
WARN_ON_ONCE(irqs_disabled());
@@ -1994,21 +1993,18 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
- p = &ch->sess_name[0];
-try_again:
ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
- TARGET_PROT_NORMAL, p, ch, NULL);
+ TARGET_PROT_NORMAL, ch->sess_name, ch,
+ NULL);
+ /* Retry without leading "0x" */
+ if (IS_ERR(ch->sess))
+ ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
+ TARGET_PROT_NORMAL,
+ ch->sess_name + 2, ch, NULL);
if (IS_ERR(ch->sess)) {
- pr_info("Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", p);
- /*
- * XXX: Hack to retry of ch->i_port_id without leading '0x'
- */
- if (p == &ch->sess_name[0]) {
- p += 2;
- goto try_again;
- }
+ pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
+ ch->sess_name);
rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 83af17ad0f1f..6d9499658671 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -134,6 +134,7 @@ static const struct xpad_device {
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -1044,9 +1045,9 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
packet->data[7] = 0x00;
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
- packet->data[10] = 0xFF;
- packet->data[11] = 0x00;
- packet->data[12] = 0x00;
+ packet->data[10] = 0xFF; /* on period */
+ packet->data[11] = 0x00; /* off period */
+ packet->data[12] = 0xFF; /* repeat count */
packet->len = 13;
packet->pending = true;
break;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 29093657f2ef..582462d0af75 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,15 +26,15 @@
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
+ struct gpio_desc *gpiod;
struct timer_list release_timer;
unsigned int release_delay; /* in msecs, for IRQ-only buttons */
@@ -140,7 +140,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
*/
disable_irq(bdata->irq);
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -358,19 +358,20 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
const struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
- int state = gpio_get_value_cansleep(button->gpio);
+ int state;
+ state = gpiod_get_value_cansleep(bdata->gpiod);
if (state < 0) {
- dev_err(input->dev.parent, "failed to get gpio state\n");
+ dev_err(input->dev.parent,
+ "failed to get gpio state: %d\n", state);
return;
}
- state = (state ? 1 : 0) ^ button->active_low;
if (type == EV_ABS) {
if (state)
input_event(input, type, button->code, button->value);
} else {
- input_event(input, type, button->code, !!state);
+ input_event(input, type, button->code, state);
}
input_sync(input);
}
@@ -456,7 +457,7 @@ static void gpio_keys_quiesce_key(void *data)
{
struct gpio_button_data *bdata = data;
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -465,7 +466,8 @@ static void gpio_keys_quiesce_key(void *data)
static int gpio_keys_setup_key(struct platform_device *pdev,
struct input_dev *input,
struct gpio_button_data *bdata,
- const struct gpio_keys_button *button)
+ const struct gpio_keys_button *button,
+ struct fwnode_handle *child)
{
const char *desc = button->desc ? button->desc : "gpio_keys";
struct device *dev = &pdev->dev;
@@ -478,18 +480,56 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
bdata->button = button;
spin_lock_init(&bdata->lock);
- if (gpio_is_valid(button->gpio)) {
+ if (child) {
+ bdata->gpiod = devm_get_gpiod_from_child(dev, NULL, child);
+ if (IS_ERR(bdata->gpiod)) {
+ error = PTR_ERR(bdata->gpiod);
+ if (error == -ENOENT) {
+ /*
+ * GPIO is optional, we may be dealing with
+ * purely interrupt-driven setup.
+ */
+ bdata->gpiod = NULL;
+ } else {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "failed to get gpio: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = gpiod_direction_input(bdata->gpiod);
+ if (error) {
+ dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+ desc_to_gpio(bdata->gpiod), error);
+ return error;
+ }
+ }
+ } else if (gpio_is_valid(button->gpio)) {
+ /*
+ * Legacy GPIO number, so request the GPIO here and
+ * convert it to descriptor.
+ */
+ unsigned flags = GPIOF_IN;
+
+ if (button->active_low)
+ flags |= GPIOF_ACTIVE_LOW;
- error = devm_gpio_request_one(&pdev->dev, button->gpio,
- GPIOF_IN, desc);
+ error = devm_gpio_request_one(&pdev->dev, button->gpio, flags,
+ desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
return error;
}
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod)
+ return -EINVAL;
+ }
+
+ if (bdata->gpiod) {
if (button->debounce_interval) {
- error = gpio_set_debounce(button->gpio,
+ error = gpiod_set_debounce(bdata->gpiod,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
@@ -500,7 +540,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
if (button->irq) {
bdata->irq = button->irq;
} else {
- irq = gpio_to_irq(button->gpio);
+ irq = gpiod_to_irq(bdata->gpiod);
if (irq < 0) {
error = irq;
dev_err(dev,
@@ -518,9 +558,10 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
} else {
if (!button->irq) {
- dev_err(dev, "No IRQ specified\n");
+ dev_err(dev, "Found button without gpio or irq\n");
return -EINVAL;
}
+
bdata->irq = button->irq;
if (button->type && button->type != EV_KEY) {
@@ -575,7 +616,7 @@ static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
gpio_keys_gpio_report_event(bdata);
}
input_sync(input);
@@ -612,25 +653,18 @@ static void gpio_keys_close(struct input_dev *input)
* Handlers for alternative sources of platform_data
*/
-#ifdef CONFIG_OF
/*
- * Translate OpenFirmware node properties into platform_data
+ * Translate properties into platform_data
*/
static struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
- struct device_node *node, *pp;
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
- int error;
+ struct fwnode_handle *child;
int nbuttons;
- int i;
- node = dev->of_node;
- if (!node)
- return ERR_PTR(-ENODEV);
-
- nbuttons = of_get_available_child_count(node);
+ nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0)
return ERR_PTR(-ENODEV);
@@ -640,64 +674,47 @@ gpio_keys_get_devtree_pdata(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
- pdata->nbuttons = nbuttons;
-
- pdata->rep = !!of_get_property(node, "autorepeat", NULL);
-
- of_property_read_string(node, "label", &pdata->name);
-
- i = 0;
- for_each_available_child_of_node(node, pp) {
- enum of_gpio_flags flags;
+ button = (struct gpio_keys_button *)(pdata + 1);
- button = &pdata->buttons[i++];
+ pdata->buttons = button;
+ pdata->nbuttons = nbuttons;
- button->gpio = of_get_gpio_flags(pp, 0, &flags);
- if (button->gpio < 0) {
- error = button->gpio;
- if (error != -ENOENT) {
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- return ERR_PTR(error);
- }
- } else {
- button->active_low = flags & OF_GPIO_ACTIVE_LOW;
- }
+ pdata->rep = device_property_read_bool(dev, "autorepeat");
- button->irq = irq_of_parse_and_map(pp, 0);
+ device_property_read_string(dev, "label", &pdata->name);
- if (!gpio_is_valid(button->gpio) && !button->irq) {
- dev_err(dev, "Found button without gpios or irqs\n");
- return ERR_PTR(-EINVAL);
- }
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child))
+ button->irq =
+ irq_of_parse_and_map(to_of_node(child), 0);
- if (of_property_read_u32(pp, "linux,code", &button->code)) {
- dev_err(dev, "Button without keycode: 0x%x\n",
- button->gpio);
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button->code)) {
+ dev_err(dev, "Button without keycode\n");
+ fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
- button->desc = of_get_property(pp, "label", NULL);
+ fwnode_property_read_string(child, "label", &button->desc);
- if (of_property_read_u32(pp, "linux,input-type", &button->type))
+ if (fwnode_property_read_u32(child, "linux,input-type",
+ &button->type))
button->type = EV_KEY;
- button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
- /* legacy name */
- of_property_read_bool(pp, "gpio-key,wakeup");
+ button->wakeup =
+ fwnode_property_read_bool(child, "wakeup-source") ||
+ /* legacy name */
+ fwnode_property_read_bool(child, "gpio-key,wakeup");
- button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+ button->can_disable =
+ fwnode_property_read_bool(child, "linux,can-disable");
- if (of_property_read_u32(pp, "debounce-interval",
+ if (fwnode_property_read_u32(child, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
- }
- if (pdata->nbuttons == 0)
- return ERR_PTR(-EINVAL);
+ button++;
+ }
return pdata;
}
@@ -708,20 +725,11 @@ static const struct of_device_id gpio_keys_of_match[] = {
};
MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
-#else
-
-static inline struct gpio_keys_platform_data *
-gpio_keys_get_devtree_pdata(struct device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-
-#endif
-
static int gpio_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
+ struct fwnode_handle *child = NULL;
struct gpio_keys_drvdata *ddata;
struct input_dev *input;
size_t size;
@@ -774,14 +782,28 @@ static int gpio_keys_probe(struct platform_device *pdev)
const struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_button_data *bdata = &ddata->data[i];
- error = gpio_keys_setup_key(pdev, input, bdata, button);
- if (error)
+ if (!dev_get_platdata(dev)) {
+ child = device_get_next_child_node(&pdev->dev, child);
+ if (!child) {
+ dev_err(&pdev->dev,
+ "missing child device node for entry %d\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ error = gpio_keys_setup_key(pdev, input, bdata, button, child);
+ if (error) {
+ fwnode_handle_put(child);
return error;
+ }
if (button->wakeup)
wakeup = 1;
}
+ fwnode_handle_put(child);
+
error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group);
if (error) {
dev_err(dev, "Unable to export keys/switches, error: %d\n",
@@ -814,8 +836,7 @@ static int gpio_keys_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gpio_keys_suspend(struct device *dev)
+static int __maybe_unused gpio_keys_suspend(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
@@ -837,7 +858,7 @@ static int gpio_keys_suspend(struct device *dev)
return 0;
}
-static int gpio_keys_resume(struct device *dev)
+static int __maybe_unused gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
@@ -863,7 +884,6 @@ static int gpio_keys_resume(struct device *dev)
gpio_keys_report_state(ddata);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
@@ -873,7 +893,7 @@ static struct platform_driver gpio_keys_device_driver = {
.driver = {
.name = "gpio-keys",
.pm = &gpio_keys_pm_ops,
- .of_match_table = of_match_ptr(gpio_keys_of_match),
+ .of_match_table = gpio_keys_of_match,
}
};
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 62bdb1d48c49..bed4f2086158 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -30,10 +30,10 @@
#define DRV_NAME "gpio-keys-polled"
struct gpio_keys_button_data {
+ struct gpio_desc *gpiod;
int last_state;
int count;
int threshold;
- int can_sleep;
};
struct gpio_keys_polled_dev {
@@ -46,7 +46,7 @@ struct gpio_keys_polled_dev {
};
static void gpio_keys_button_event(struct input_polled_dev *dev,
- struct gpio_keys_button *button,
+ const struct gpio_keys_button *button,
int state)
{
struct gpio_keys_polled_dev *bdev = dev->private;
@@ -70,21 +70,22 @@ static void gpio_keys_button_event(struct input_polled_dev *dev,
}
static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
- struct gpio_keys_button *button,
+ const struct gpio_keys_button *button,
struct gpio_keys_button_data *bdata)
{
int state;
- if (bdata->can_sleep)
- state = !!gpiod_get_value_cansleep(button->gpiod);
- else
- state = !!gpiod_get_value(button->gpiod);
-
- gpio_keys_button_event(dev, button, state);
+ state = gpiod_get_value_cansleep(bdata->gpiod);
+ if (state < 0) {
+ dev_err(dev->input->dev.parent,
+ "failed to get gpio state: %d\n", state);
+ } else {
+ gpio_keys_button_event(dev, button, state);
- if (state != bdata->last_state) {
- bdata->count = 0;
- bdata->last_state = state;
+ if (state != bdata->last_state) {
+ bdata->count = 0;
+ bdata->last_state = state;
+ }
}
}
@@ -142,48 +143,35 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
pdata->disable(bdev->dev);
}
-static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
+static struct gpio_keys_platform_data *
+gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
struct fwnode_handle *child;
- int error;
int nbuttons;
nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(dev, sizeof(*pdata) + nbuttons * sizeof(*button),
GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
+ button = (struct gpio_keys_button *)(pdata + 1);
+
+ pdata->buttons = button;
+ pdata->nbuttons = nbuttons;
pdata->rep = device_property_present(dev, "autorepeat");
device_property_read_u32(dev, "poll-interval", &pdata->poll_interval);
device_for_each_child_node(dev, child) {
- struct gpio_desc *desc;
-
- desc = devm_get_gpiod_from_child(dev, NULL, child);
- if (IS_ERR(desc)) {
- error = PTR_ERR(desc);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- fwnode_handle_put(child);
- return ERR_PTR(error);
- }
-
- button = &pdata->buttons[pdata->nbuttons++];
- button->gpiod = desc;
-
- if (fwnode_property_read_u32(child, "linux,code", &button->code)) {
- dev_err(dev, "Button without keycode: %d\n",
- pdata->nbuttons - 1);
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button->code)) {
+ dev_err(dev, "button without keycode\n");
fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
@@ -206,10 +194,9 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
if (fwnode_property_read_u32(child, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
- }
- if (pdata->nbuttons == 0)
- return ERR_PTR(-EINVAL);
+ button++;
+ }
return pdata;
}
@@ -220,7 +207,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
int i, min = 0, max = 0;
for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ const struct gpio_keys_button *button = &pdata->buttons[i];
if (button->type != EV_ABS || button->code != code)
continue;
@@ -230,6 +217,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
if (button->value > max)
max = button->value;
}
+
input_set_abs_params(input, code, min, max, 0, 0);
}
@@ -242,6 +230,7 @@ MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match);
static int gpio_keys_polled_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct fwnode_handle *child = NULL;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
struct gpio_keys_polled_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -254,10 +243,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
pdata = gpio_keys_polled_get_devtree_pdata(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
}
if (!pdata->poll_interval) {
@@ -300,20 +285,48 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
__set_bit(EV_REP, input->evbit);
for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ const struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_keys_button_data *bdata = &bdev->data[i];
unsigned int type = button->type ?: EV_KEY;
if (button->wakeup) {
dev_err(dev, DRV_NAME " does not support wakeup\n");
+ fwnode_handle_put(child);
return -EINVAL;
}
- /*
- * Legacy GPIO number so request the GPIO here and
- * convert it to descriptor.
- */
- if (!button->gpiod && gpio_is_valid(button->gpio)) {
+ if (!dev_get_platdata(dev)) {
+ /* No legacy static platform data */
+ child = device_get_next_child_node(dev, child);
+ if (!child) {
+ dev_err(dev, "missing child device node\n");
+ return -EINVAL;
+ }
+
+ bdata->gpiod = devm_get_gpiod_from_child(dev, NULL,
+ child);
+ if (IS_ERR(bdata->gpiod)) {
+ error = PTR_ERR(bdata->gpiod);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get gpio: %d\n",
+ error);
+ fwnode_handle_put(child);
+ return error;
+ }
+
+ error = gpiod_direction_input(bdata->gpiod);
+ if (error) {
+ dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+ desc_to_gpio(bdata->gpiod), error);
+ fwnode_handle_put(child);
+ return error;
+ }
+ } else if (gpio_is_valid(button->gpio)) {
+ /*
+ * Legacy GPIO number so request the GPIO here and
+ * convert it to descriptor.
+ */
unsigned flags = GPIOF_IN;
if (button->active_low)
@@ -322,18 +335,21 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
error = devm_gpio_request_one(&pdev->dev, button->gpio,
flags, button->desc ? : DRV_NAME);
if (error) {
- dev_err(dev, "unable to claim gpio %u, err=%d\n",
+ dev_err(dev,
+ "unable to claim gpio %u, err=%d\n",
button->gpio, error);
return error;
}
- button->gpiod = gpio_to_desc(button->gpio);
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod) {
+ dev_err(dev,
+ "unable to convert gpio %u to descriptor\n",
+ button->gpio);
+ return -EINVAL;
+ }
}
- if (IS_ERR(button->gpiod))
- return PTR_ERR(button->gpiod);
-
- bdata->can_sleep = gpiod_cansleep(button->gpiod);
bdata->last_state = -1;
bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
pdata->poll_interval);
@@ -344,6 +360,8 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
button->code);
}
+ fwnode_handle_put(child);
+
bdev->poll_dev = poll_dev;
bdev->dev = dev;
bdev->pdata = pdata;
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 265d641c40e2..632523d4f5dc 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -182,7 +182,7 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0 || irq >= NR_IRQS) {
+ if (irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
return -EINVAL;
}
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index fcef5d1365e2..e24443376e75 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -316,7 +316,7 @@ static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
error = of_property_read_u32(np, "marvell,debounce-interval",
&pdata->debounce_interval);
if (error) {
- dev_err(dev, "failed to parse debpunce-interval\n");
+ dev_err(dev, "failed to parse debounce-interval\n");
return error;
}
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 9002298698fc..3048ef3e3e16 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -164,11 +164,18 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
int error, col, row;
u8 reg, state, code;
- /* Initial read of the key event FIFO */
- error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ do {
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_KEY_EVENT_A\n");
+ break;
+ }
+
+ /* Assume that key code 0 signifies empty FIFO */
+ if (reg <= 0)
+ break;
- /* Assume that key code 0 signifies empty FIFO */
- while (error >= 0 && reg > 0) {
state = reg & KEY_EVENT_VALUE;
code = reg & KEY_EVENT_CODE;
@@ -184,11 +191,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
/* Read for next loop */
error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
- }
-
- if (error < 0)
- dev_err(&keypad_data->client->dev,
- "unable to read REG_KEY_EVENT_A\n");
+ } while (1);
input_sync(input);
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7ffb614ce566..1ae4d9617ff8 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -625,11 +625,12 @@ config INPUT_DA9055_ONKEY
will be called da9055_onkey.
config INPUT_DA9063_ONKEY
- tristate "Dialog DA9062/63 OnKey"
+ tristate "Dialog DA9063/62/61 OnKey"
depends on MFD_DA9063 || MFD_DA9062
help
- Support the ONKEY of Dialog DA9063 and DA9062 Power Management ICs
- as an input device capable of reporting the power button status.
+ Support the ONKEY of Dialog DA9063, DA9062 and DA9061 Power
+ Management ICs as an input device capable of reporting the
+ power button status.
To compile this driver as a module, choose M here: the module
will be called da9063_onkey.
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 982936334537..07ec465f1095 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,6 +37,8 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(arizona->dapm);
int ret;
if (!haptics->arizona->dapm) {
@@ -66,7 +68,7 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_enable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
@@ -81,7 +83,7 @@ static void arizona_haptics_work(struct work_struct *work)
}
} else {
/* This disable sequence will be a noop if already enabled */
- ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_disable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
@@ -140,11 +142,14 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct snd_soc_component *component;
cancel_work_sync(&haptics->work);
- if (haptics->arizona->dapm)
- snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+ if (haptics->arizona->dapm) {
+ component = snd_soc_dapm_to_component(haptics->arizona->dapm);
+ snd_soc_component_disable_pin(component, "HAPTICS");
+ }
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b0d445390ee4..2124390ec38c 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -538,8 +538,13 @@ static int bma150_probe(struct i2c_client *client,
return -EIO;
}
+ /*
+ * Note if the IIO CONFIG_BMA180 driver is enabled we want to fail
+ * the probe for the bma180 as the iio driver is preferred.
+ */
chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
- if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
+ if (chip_id != BMA150_CHIP_ID &&
+ (IS_ENABLED(CONFIG_BMA180) || chip_id != BMA180_CHIP_ID)) {
dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
return -EINVAL;
}
@@ -643,7 +648,9 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
static const struct i2c_device_id bma150_id[] = {
{ "bma150", 0 },
+#if !IS_ENABLED(CONFIG_BMA180)
{ "bma180", 0 },
+#endif
{ "smb380", 0 },
{ "bma023", 0 },
{ }
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index bb863e062b03..b4ff1e86d3d3 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -1,5 +1,5 @@
/*
- * OnKey device driver for DA9063 and DA9062 PMICs
+ * OnKey device driver for DA9063, DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
@@ -87,6 +87,7 @@ static const struct of_device_id da9063_compatible_reg_id_table[] = {
{ .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
{ },
};
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
static void da9063_poll_on(struct work_struct *work)
{
@@ -149,13 +150,13 @@ static void da9063_poll_on(struct work_struct *work)
* and then send shutdown command
*/
dev_dbg(&onkey->input->dev,
- "Sending SHUTDOWN to DA9063 ...\n");
+ "Sending SHUTDOWN to PMIC ...\n");
error = regmap_write(onkey->regmap,
config->onkey_shutdown,
config->onkey_shutdown_mask);
if (error)
dev_err(&onkey->input->dev,
- "Cannot SHUTDOWN DA9063: %d\n",
+ "Cannot SHUTDOWN PMIC: %d\n",
error);
}
}
@@ -300,6 +301,6 @@ static struct platform_driver da9063_onkey_driver = {
module_platform_driver(da9063_onkey_driver);
MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063 and DA9062");
+MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063, DA9062 and DA9061");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DA9063_DRVNAME_ONKEY);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 2adfd86c869a..0a2b865b1000 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -18,8 +18,6 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -27,7 +25,6 @@
#include <linux/regulator/consumer.h>
#include <dt-bindings/input/ti-drv260x.h>
-#include <linux/platform_data/drv260x-pdata.h>
#define DRV260X_STATUS 0x0
#define DRV260X_MODE 0x1
@@ -468,90 +465,39 @@ static const struct regmap_config drv260x_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-#ifdef CONFIG_OF
-static int drv260x_parse_dt(struct device *dev,
- struct drv260x_data *haptics)
-{
- struct device_node *np = dev->of_node;
- unsigned int voltage;
- int error;
-
- error = of_property_read_u32(np, "mode", &haptics->mode);
- if (error) {
- dev_err(dev, "%s: No entry for mode\n", __func__);
- return error;
- }
-
- error = of_property_read_u32(np, "library-sel", &haptics->library);
- if (error) {
- dev_err(dev, "%s: No entry for library selection\n",
- __func__);
- return error;
- }
-
- error = of_property_read_u32(np, "vib-rated-mv", &voltage);
- if (!error)
- haptics->rated_voltage = drv260x_calculate_voltage(voltage);
-
-
- error = of_property_read_u32(np, "vib-overdrive-mv", &voltage);
- if (!error)
- haptics->overdrive_voltage = drv260x_calculate_voltage(voltage);
-
- return 0;
-}
-#else
-static inline int drv260x_parse_dt(struct device *dev,
- struct drv260x_data *haptics)
-{
- dev_err(dev, "no platform data defined\n");
-
- return -EINVAL;
-}
-#endif
-
static int drv260x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct drv260x_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device *dev = &client->dev;
struct drv260x_data *haptics;
+ u32 voltage;
int error;
- haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
+ haptics = devm_kzalloc(dev, sizeof(*haptics), GFP_KERNEL);
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
- haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
-
- if (pdata) {
- haptics->mode = pdata->mode;
- haptics->library = pdata->library_selection;
- if (pdata->vib_overdrive_voltage)
- haptics->overdrive_voltage = drv260x_calculate_voltage(pdata->vib_overdrive_voltage);
- if (pdata->vib_rated_voltage)
- haptics->rated_voltage = drv260x_calculate_voltage(pdata->vib_rated_voltage);
- } else if (client->dev.of_node) {
- error = drv260x_parse_dt(&client->dev, haptics);
- if (error)
- return error;
- } else {
- dev_err(&client->dev, "Platform data not set\n");
- return -ENODEV;
+ error = device_property_read_u32(dev, "mode", &haptics->mode);
+ if (error) {
+ dev_err(dev, "Can't fetch 'mode' property: %d\n", error);
+ return error;
}
-
if (haptics->mode < DRV260X_LRA_MODE ||
haptics->mode > DRV260X_ERM_MODE) {
- dev_err(&client->dev,
- "Vibrator mode is invalid: %i\n",
- haptics->mode);
+ dev_err(dev, "Vibrator mode is invalid: %i\n", haptics->mode);
return -EINVAL;
}
+ error = device_property_read_u32(dev, "library-sel", &haptics->library);
+ if (error) {
+ dev_err(dev, "Can't fetch 'library-sel' property: %d\n", error);
+ return error;
+ }
+
if (haptics->library < DRV260X_LIB_EMPTY ||
haptics->library > DRV260X_ERM_LIB_F) {
- dev_err(&client->dev,
+ dev_err(dev,
"Library value is invalid: %i\n", haptics->library);
return -EINVAL;
}
@@ -559,40 +505,44 @@ static int drv260x_probe(struct i2c_client *client,
if (haptics->mode == DRV260X_LRA_MODE &&
haptics->library != DRV260X_LIB_EMPTY &&
haptics->library != DRV260X_LIB_LRA) {
- dev_err(&client->dev,
- "LRA Mode with ERM Library mismatch\n");
+ dev_err(dev, "LRA Mode with ERM Library mismatch\n");
return -EINVAL;
}
if (haptics->mode == DRV260X_ERM_MODE &&
(haptics->library == DRV260X_LIB_EMPTY ||
haptics->library == DRV260X_LIB_LRA)) {
- dev_err(&client->dev,
- "ERM Mode with LRA Library mismatch\n");
+ dev_err(dev, "ERM Mode with LRA Library mismatch\n");
return -EINVAL;
}
- haptics->regulator = devm_regulator_get(&client->dev, "vbat");
+ error = device_property_read_u32(dev, "vib-rated-mv", &voltage);
+ haptics->rated_voltage = error ? DRV260X_DEF_RATED_VOLT :
+ drv260x_calculate_voltage(voltage);
+
+ error = device_property_read_u32(dev, "vib-overdrive-mv", &voltage);
+ haptics->overdrive_voltage = error ? DRV260X_DEF_OD_CLAMP_VOLT :
+ drv260x_calculate_voltage(voltage);
+
+ haptics->regulator = devm_regulator_get(dev, "vbat");
if (IS_ERR(haptics->regulator)) {
error = PTR_ERR(haptics->regulator);
- dev_err(&client->dev,
- "unable to get regulator, error: %d\n", error);
+ dev_err(dev, "unable to get regulator, error: %d\n", error);
return error;
}
- haptics->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ haptics->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(haptics->enable_gpio))
return PTR_ERR(haptics->enable_gpio);
- haptics->input_dev = devm_input_allocate_device(&client->dev);
+ haptics->input_dev = devm_input_allocate_device(dev);
if (!haptics->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
haptics->input_dev->name = "drv260x:haptics";
- haptics->input_dev->dev.parent = client->dev.parent;
haptics->input_dev->close = drv260x_close;
input_set_drvdata(haptics->input_dev, haptics);
input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
@@ -600,8 +550,7 @@ static int drv260x_probe(struct i2c_client *client,
error = input_ff_create_memless(haptics->input_dev, NULL,
drv260x_haptics_play);
if (error) {
- dev_err(&client->dev, "input_ff_create() failed: %d\n",
- error);
+ dev_err(dev, "input_ff_create() failed: %d\n", error);
return error;
}
@@ -613,21 +562,19 @@ static int drv260x_probe(struct i2c_client *client,
haptics->regmap = devm_regmap_init_i2c(client, &drv260x_regmap_config);
if (IS_ERR(haptics->regmap)) {
error = PTR_ERR(haptics->regmap);
- dev_err(&client->dev, "Failed to allocate register map: %d\n",
- error);
+ dev_err(dev, "Failed to allocate register map: %d\n", error);
return error;
}
error = drv260x_init(haptics);
if (error) {
- dev_err(&client->dev, "Device init failed: %d\n", error);
+ dev_err(dev, "Device init failed: %d\n", error);
return error;
}
error = input_register_device(haptics->input_dev);
if (error) {
- dev_err(&client->dev, "couldn't register input device: %d\n",
- error);
+ dev_err(dev, "couldn't register input device: %d\n", error);
return error;
}
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index ef9bc12b3be3..dcb6d8e94b11 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -125,8 +125,8 @@ static void drv2665_close(struct input_dev *input)
cancel_work_sync(&haptics->work);
- error = regmap_update_bits(haptics->regmap,
- DRV2665_CTRL_2, DRV2665_STANDBY, 1);
+ error = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
+ DRV2665_STANDBY, DRV2665_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
@@ -240,7 +240,7 @@ static int __maybe_unused drv2665_suspend(struct device *dev)
if (haptics->input_dev->users) {
ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
- DRV2665_STANDBY, 1);
+ DRV2665_STANDBY, DRV2665_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index d5ba7481328c..2849bb6906a8 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -256,7 +256,7 @@ static void drv2667_close(struct input_dev *input)
cancel_work_sync(&haptics->work);
error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
- DRV2667_STANDBY, 1);
+ DRV2667_STANDBY, DRV2667_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
@@ -415,7 +415,7 @@ static int __maybe_unused drv2667_suspend(struct device *dev)
if (haptics->input_dev->users) {
ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
- DRV2667_STANDBY, 1);
+ DRV2667_STANDBY, DRV2667_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index c14b82709b0f..908b51089dee 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio_keys.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
/*
@@ -92,7 +93,7 @@ soc_button_device_create(struct platform_device *pdev,
continue;
gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
- if (gpio < 0)
+ if (!gpio_is_valid(gpio))
continue;
gpio_keys[n_buttons].type = info->event_type;
@@ -166,6 +167,11 @@ static int soc_button_probe(struct platform_device *pdev)
button_info = (struct soc_button_info *)id->driver_data;
+ if (gpiod_count(&pdev->dev, KBUILD_MODNAME) <= 0) {
+ dev_dbg(&pdev->dev, "no GPIO attached, ignoring...\n");
+ return -ENODEV;
+ }
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
index 3273217ce80c..cc74a41bdb0d 100644
--- a/drivers/input/misc/tps65218-pwrbutton.c
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -150,12 +150,20 @@ static int tps6521x_pb_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id tps6521x_pwrbtn_id_table[] = {
+ { "tps65218-pwrbutton", },
+ { "tps65217-pwrbutton", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6521x_pwrbtn_id_table);
+
static struct platform_driver tps6521x_pb_driver = {
.probe = tps6521x_pb_probe,
.driver = {
.name = "tps6521x_pwrbutton",
.of_match_table = of_tps6521x_pb_match,
},
+ .id_table = tps6521x_pwrbtn_id_table,
};
module_platform_driver(tps6521x_pb_driver);
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 227fbd2dbb71..3900875dec10 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -108,7 +108,8 @@ static irqreturn_t input_handler(int rq, void *dev_id)
static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
- int ret, i, abs;
+ int ret, i;
+ unsigned int abs;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr;
@@ -127,8 +128,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
if (!info->page)
goto error_nomem;
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
- abs = 0;
+ abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0);
if (abs) {
ret = xenbus_write(XBT_NIL, dev->nodename,
"request-abs-pointer", "1");
@@ -322,11 +322,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
case XenbusStateInitWait:
InitWait:
- ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-abs-pointer", "%d", &val);
- if (ret < 0)
- val = 0;
- if (val) {
+ if (xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-abs-pointer", 0)) {
ret = xenbus_write(XBT_NIL, info->xbdev->nodename,
"request-abs-pointer", "1");
if (ret)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 6d7de9bfed9a..328edc8c8786 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1153,15 +1153,13 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
alps_process_touchpad_packet_v7(psmouse);
}
-static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
+static enum SS4_PACKET_ID alps_get_pkt_id_ss4_v2(unsigned char *byte)
{
- unsigned char pkt_id = SS4_PACKET_ID_IDLE;
+ enum SS4_PACKET_ID pkt_id = SS4_PACKET_ID_IDLE;
switch (byte[3] & 0x30) {
case 0x00:
- if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
- (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
- byte[5] == 0x00) {
+ if (SS4_IS_IDLE_V2(byte)) {
pkt_id = SS4_PACKET_ID_IDLE;
} else {
pkt_id = SS4_PACKET_ID_ONE;
@@ -1188,7 +1186,7 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
unsigned char *p, struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- unsigned char pkt_id;
+ enum SS4_PACKET_ID pkt_id;
unsigned int no_data_x, no_data_y;
pkt_id = alps_get_pkt_id_ss4_v2(p);
@@ -1267,18 +1265,12 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
break;
case SS4_PACKET_ID_STICK:
- if (!(priv->flags & ALPS_DUALPOINT)) {
- psmouse_warn(psmouse,
- "Rejected trackstick packet from non DualPoint device");
- } else {
- int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
- int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
- int pressure = (s8)(p[4] & 0x7f);
-
- input_report_rel(priv->dev2, REL_X, x);
- input_report_rel(priv->dev2, REL_Y, -y);
- input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
- }
+ /*
+ * x, y, and pressure are decoded in
+ * alps_process_packet_ss4_v2()
+ */
+ f->first_mp = 0;
+ f->is_mp = 0;
break;
case SS4_PACKET_ID_IDLE:
@@ -1346,6 +1338,27 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
priv->multi_packet = 0;
+ /* Report trackstick */
+ if (alps_get_pkt_id_ss4_v2(packet) == SS4_PACKET_ID_STICK) {
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ return;
+ }
+
+ input_report_rel(dev2, REL_X, SS4_TS_X_V2(packet));
+ input_report_rel(dev2, REL_Y, SS4_TS_Y_V2(packet));
+ input_report_abs(dev2, ABS_PRESSURE, SS4_TS_Z_V2(packet));
+
+ input_report_key(dev2, BTN_LEFT, f->ts_left);
+ input_report_key(dev2, BTN_RIGHT, f->ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+
+ input_sync(dev2);
+ return;
+ }
+
+ /* Report touchpad */
alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4);
input_mt_report_finger_count(dev, f->fingers);
@@ -1356,13 +1369,6 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, f->pressure);
input_sync(dev);
-
- if (priv->flags & ALPS_DUALPOINT) {
- input_report_key(dev2, BTN_LEFT, f->ts_left);
- input_report_key(dev2, BTN_RIGHT, f->ts_right);
- input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
- input_sync(dev2);
- }
}
static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index b9417e2d7ad3..cde6f4bd8ea2 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,7 +54,15 @@ enum SS4_PACKET_ID {
#define SS4_MASK_NORMAL_BUTTONS 0x07
-#define SS4_1F_X_V2(_b) ((_b[0] & 0x0007) | \
+#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \
+ ((_b[1]) == 0x10) && \
+ ((_b[2]) == 0x00) && \
+ ((_b[3] & 0x88) == 0x08) && \
+ ((_b[4]) == 0x10) && \
+ ((_b[5]) == 0x00) \
+ )
+
+#define SS4_1F_X_V2(_b) (((_b[0]) & 0x0007) | \
((_b[1] << 3) & 0x0078) | \
((_b[1] << 2) & 0x0380) | \
((_b[2] << 5) & 0x1C00) \
@@ -101,6 +109,18 @@ enum SS4_PACKET_ID {
#define SS4_IS_MF_CONTINUE(_b) ((_b[2] & 0x10) == 0x10)
#define SS4_IS_5F_DETECTED(_b) ((_b[2] & 0x10) == 0x10)
+#define SS4_TS_X_V2(_b) (s8)( \
+ ((_b[0] & 0x01) << 7) | \
+ (_b[1] & 0x7F) \
+ )
+
+#define SS4_TS_Y_V2(_b) (s8)( \
+ ((_b[3] & 0x01) << 7) | \
+ (_b[2] & 0x7F) \
+ )
+
+#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
+
#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
@@ -146,7 +166,7 @@ struct alps_protocol_info {
* (aka command mode response) identifies the firmware minor version. This
* can be used to distinguish different hardware models which are not
* uniquely identifiable through their E7 responses.
- * @protocol_info: information about protcol used by the device.
+ * @protocol_info: information about protocol used by the device.
*
* Many (but not all) ALPS touchpads can be identified by looking at the
* values returned in the "E7 report" and/or the "EC report." This table
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index d15b33813021..fa598f7f4372 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1093,19 +1093,18 @@ static int elan_probe(struct i2c_client *client,
if (error)
return error;
+ dev_info(&client->dev,
+ "Elan Touchpad: Module ID: 0x%04x, Firmware: 0x%04x, Sample: 0x%04x, IAP: 0x%04x\n",
+ data->product_id,
+ data->fw_version,
+ data->sm_version,
+ data->iap_version);
+
dev_dbg(&client->dev,
- "Elan Touchpad Information:\n"
- " Module product ID: 0x%04x\n"
- " Firmware Version: 0x%04x\n"
- " Sample Version: 0x%04x\n"
- " IAP Version: 0x%04x\n"
+ "Elan Touchpad Extra Information:\n"
" Max ABS X,Y: %d,%d\n"
" Width X,Y: %d,%d\n"
" Resolution X,Y: %d,%d (dots/mm)\n",
- data->product_id,
- data->fw_version,
- data->sm_version,
- data->iap_version,
data->max_x, data->max_y,
data->width_x, data->width_y,
data->x_res, data->y_res);
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 4c8a55857e00..30cc627a4f45 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -27,6 +27,27 @@ config RMI4_SPI
If unsure, say N.
+config RMI4_SMB
+ tristate "RMI4 SMB Support"
+ depends on RMI4_CORE && I2C
+ help
+ Say Y here if you want to support RMI4 devices connected to an SMB
+ bus.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will be
+ called rmi_smbus.
+
+config RMI4_F03
+ bool "RMI4 Function 03 (PS2 Guest)"
+ depends on RMI4_CORE && SERIO
+ help
+ Say Y here if you want to add support for RMI4 function 03.
+
+ Function 03 provides PS2 guest support for RMI4 devices. This
+ includes support for TrackPoints on TouchPads.
+
config RMI4_2D_SENSOR
bool
depends on RMI4_CORE
@@ -62,13 +83,34 @@ config RMI4_F30
Function 30 provides GPIO and LED support for RMI4 devices. This
includes support for buttons on TouchPads and ClickPads.
+config RMI4_F34
+ bool "RMI4 Function 34 (Device reflash)"
+ depends on RMI4_CORE
+ select FW_LOADER
+ help
+ Say Y here if you want to add support for RMI4 function 34.
+
+ Function 34 provides support for upgrading the firmware on the RMI4
+ device via the firmware loader interface. This is triggered using a
+ sysfs attribute.
+
config RMI4_F54
bool "RMI4 Function 54 (Analog diagnostics)"
depends on RMI4_CORE
depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
select VIDEOBUF2_VMALLOC
+ select RMI4_F55
help
Say Y here if you want to add support for RMI4 function 54
Function 54 provides access to various diagnostic features in certain
RMI4 touch sensors.
+
+config RMI4_F55
+ bool "RMI4 Function 55 (Sensor tuning)"
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 55
+
+ Function 55 provides access to the RMI4 touch sensor tuning
+ mechanism.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 0bafc8502c4b..9aaac3dd8613 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -4,11 +4,15 @@ rmi_core-y := rmi_bus.o rmi_driver.o rmi_f01.o
rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
# Function drivers
+rmi_core-$(CONFIG_RMI4_F03) += rmi_f03.o
rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+rmi_core-$(CONFIG_RMI4_F34) += rmi_f34.o rmi_f34v7.o
rmi_core-$(CONFIG_RMI4_F54) += rmi_f54.o
+rmi_core-$(CONFIG_RMI4_F55) += rmi_f55.o
# Transports
obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
obj-$(CONFIG_RMI4_SPI) += rmi_spi.o
+obj-$(CONFIG_RMI4_SMB) += rmi_smbus.o
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index e97bd7fabccc..07007ff8e29f 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -177,10 +177,12 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
sensor->dmax = DMAX * res_x;
}
- input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
if (sensor->sensor_type == rmi_sensor_touchpad)
input_flags = INPUT_MT_POINTER;
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
index 77fcdfef003c..c871bef4dac0 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.h
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -67,6 +67,8 @@ struct rmi_2d_sensor {
u8 report_rel;
u8 x_mm;
u8 y_mm;
+ enum rmi_reg_state dribble;
+ enum rmi_reg_state palm_detect;
};
int rmi_2d_sensor_of_probe(struct device *dev,
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index ef8c747c35e7..1c40d94ca506 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -230,6 +230,9 @@ err_put_device:
void rmi_unregister_function(struct rmi_function *fn)
{
+ rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
+ fn->fd.function_number);
+
device_del(&fn->dev);
of_node_put(fn->dev.of_node);
put_device(&fn->dev);
@@ -302,6 +305,9 @@ struct bus_type rmi_bus_type = {
static struct rmi_function_handler *fn_handlers[] = {
&rmi_f01_handler,
+#ifdef CONFIG_RMI4_F03
+ &rmi_f03_handler,
+#endif
#ifdef CONFIG_RMI4_F11
&rmi_f11_handler,
#endif
@@ -311,9 +317,15 @@ static struct rmi_function_handler *fn_handlers[] = {
#ifdef CONFIG_RMI4_F30
&rmi_f30_handler,
#endif
+#ifdef CONFIG_RMI4_F34
+ &rmi_f34_handler,
+#endif
#ifdef CONFIG_RMI4_F54
&rmi_f54_handler,
#endif
+#ifdef CONFIG_RMI4_F55
+ &rmi_f55_handler,
+#endif
};
static void __rmi_unregister_function_handlers(int start_idx)
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index 899579830536..b7625a9ac66a 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -105,6 +105,18 @@ rmi_get_platform_data(struct rmi_device *d)
bool rmi_is_physical_device(struct device *dev);
/**
+ * rmi_reset - reset a RMI4 device
+ * @d: Pointer to an RMI device
+ *
+ * Calls for a reset of each function implemented by a specific device.
+ * Returns 0 on success or a negative error code.
+ */
+static inline int rmi_reset(struct rmi_device *d)
+{
+ return d->driver->reset_handler(d);
+}
+
+/**
* rmi_read - read a single byte
* @d: Pointer to an RMI device
* @addr: The address to read from
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4a88312fbd25..11447ab1055c 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -33,12 +34,22 @@
#define RMI_DEVICE_RESET_CMD 0x01
#define DEFAULT_RESET_DELAY_MS 100
-static void rmi_free_function_list(struct rmi_device *rmi_dev)
+void rmi_free_function_list(struct rmi_device *rmi_dev)
{
struct rmi_function *fn, *tmp;
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
+
+ devm_kfree(&rmi_dev->dev, data->irq_memory);
+ data->irq_memory = NULL;
+ data->irq_status = NULL;
+ data->fn_irq_bits = NULL;
+ data->current_irq_mask = NULL;
+ data->new_irq_mask = NULL;
+
data->f01_container = NULL;
+ data->f34_container = NULL;
/* Doing it in the reverse order so F01 will be removed last */
list_for_each_entry_safe_reverse(fn, tmp,
@@ -133,7 +144,7 @@ static void process_one_interrupt(struct rmi_driver_data *data,
}
}
-int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
+static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
{
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
struct device *dev = &rmi_dev->dev;
@@ -143,7 +154,7 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
if (!data)
return 0;
- if (!rmi_dev->xport->attn_data) {
+ if (!data->attn_data.data) {
error = rmi_read_block(rmi_dev,
data->f01_container->fd.data_base_addr + 1,
data->irq_status, data->num_of_irq_regs);
@@ -178,7 +189,81 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
return 0;
}
-EXPORT_SYMBOL_GPL(rmi_process_interrupt_requests);
+
+void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
+ void *data, size_t size)
+{
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data;
+ void *fifo_data;
+
+ if (!drvdata->enabled)
+ return;
+
+ fifo_data = kmemdup(data, size, GFP_ATOMIC);
+ if (!fifo_data)
+ return;
+
+ attn_data.irq_status = irq_status;
+ attn_data.size = size;
+ attn_data.data = fifo_data;
+
+ kfifo_put(&drvdata->attn_fifo, attn_data);
+}
+EXPORT_SYMBOL_GPL(rmi_set_attn_data);
+
+static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
+{
+ struct rmi_device *rmi_dev = dev_id;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data = {0};
+ int ret, count;
+
+ count = kfifo_get(&drvdata->attn_fifo, &attn_data);
+ if (count) {
+ *(drvdata->irq_status) = attn_data.irq_status;
+ drvdata->attn_data = attn_data;
+ }
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
+
+ if (count)
+ kfree(attn_data.data);
+
+ if (!kfifo_is_empty(&drvdata->attn_fifo))
+ return rmi_irq_fn(irq, dev_id);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_irq_init(struct rmi_device *rmi_dev)
+{
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int irq_flags = irq_get_trigger_type(pdata->irq);
+ int ret;
+
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_LOW;
+
+ ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
+ rmi_irq_fn, irq_flags | IRQF_ONESHOT,
+ dev_name(rmi_dev->xport->dev),
+ rmi_dev);
+ if (ret < 0) {
+ dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
+ pdata->irq);
+
+ return ret;
+ }
+
+ data->enabled = true;
+
+ return 0;
+}
static int suspend_one_function(struct rmi_function *fn)
{
@@ -248,7 +333,7 @@ static int rmi_resume_functions(struct rmi_device *rmi_dev)
return 0;
}
-static int enable_sensor(struct rmi_device *rmi_dev)
+int rmi_enable_sensor(struct rmi_device *rmi_dev)
{
int retval = 0;
@@ -379,8 +464,8 @@ static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
return 0;
}
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
- u16 pdt_address)
+static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
+ struct pdt_entry *entry, u16 pdt_address)
{
u8 buf[RMI_PDT_ENTRY_SIZE];
int error;
@@ -403,7 +488,6 @@ int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
return 0;
}
-EXPORT_SYMBOL_GPL(rmi_read_pdt_entry);
static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
struct rmi_function_descriptor *fd)
@@ -422,6 +506,7 @@ static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
int page,
+ int *empty_pages,
void *ctx,
int (*callback)(struct rmi_device *rmi_dev,
void *ctx,
@@ -449,20 +534,30 @@ static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
return retval;
}
- return (data->f01_bootloader_mode || addr == pdt_start) ?
+ /*
+ * Count number of empty PDT pages. If a gap of two pages
+ * or more is found, stop scanning.
+ */
+ if (addr == pdt_start)
+ ++*empty_pages;
+ else
+ *empty_pages = 0;
+
+ return (data->bootloader_mode || *empty_pages >= 2) ?
RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
}
-static int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
- int (*callback)(struct rmi_device *rmi_dev,
- void *ctx,
- const struct pdt_entry *entry))
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *entry))
{
int page;
+ int empty_pages = 0;
int retval = RMI_SCAN_DONE;
for (page = 0; page <= RMI4_MAX_PAGE; page++) {
- retval = rmi_scan_pdt_page(rmi_dev, page, ctx, callback);
+ retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
+ ctx, callback);
if (retval != RMI_SCAN_CONTINUE)
break;
}
@@ -600,7 +695,6 @@ free_struct_buff:
kfree(struct_buf);
return ret;
}
-EXPORT_SYMBOL_GPL(rmi_read_register_desc);
const struct rmi_register_desc_item *rmi_get_register_desc_item(
struct rmi_register_descriptor *rdesc, u16 reg)
@@ -616,7 +710,6 @@ const struct rmi_register_desc_item *rmi_get_register_desc_item(
return NULL;
}
-EXPORT_SYMBOL_GPL(rmi_get_register_desc_item);
size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
{
@@ -630,7 +723,6 @@ size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
}
return size;
}
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_size);
/* Compute the register offset relative to the base address */
int rmi_register_desc_calc_reg_offset(
@@ -648,7 +740,6 @@ int rmi_register_desc_calc_reg_offset(
}
return -1;
}
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_reg_offset);
bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
u8 subpacket)
@@ -657,51 +748,55 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
subpacket) == subpacket;
}
-/* Indicates that flash programming is enabled (bootloader mode). */
-#define RMI_F01_STATUS_BOOTLOADER(status) (!!((status) & 0x40))
-
-/*
- * Given the PDT entry for F01, read the device status register to determine
- * if we're stuck in bootloader mode or not.
- *
- */
static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
const struct pdt_entry *pdt)
{
- int error;
- u8 device_status;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int ret;
+ u8 status;
- error = rmi_read(rmi_dev, pdt->data_base_addr + pdt->page_start,
- &device_status);
- if (error) {
- dev_err(&rmi_dev->dev,
- "Failed to read device status: %d.\n", error);
- return error;
+ if (pdt->function_number == 0x34 && pdt->function_version > 1) {
+ ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read F34 status: %d.\n", ret);
+ return ret;
+ }
+
+ if (status & BIT(7))
+ data->bootloader_mode = true;
+ } else if (pdt->function_number == 0x01) {
+ ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read F01 status: %d.\n", ret);
+ return ret;
+ }
+
+ if (status & BIT(6))
+ data->bootloader_mode = true;
}
- return RMI_F01_STATUS_BOOTLOADER(device_status);
+ return 0;
}
static int rmi_count_irqs(struct rmi_device *rmi_dev,
void *ctx, const struct pdt_entry *pdt)
{
- struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
int *irq_count = ctx;
+ int ret;
*irq_count += pdt->interrupt_source_count;
- if (pdt->function_number == 0x01) {
- data->f01_bootloader_mode =
- rmi_check_bootloader_mode(rmi_dev, pdt);
- if (data->f01_bootloader_mode)
- dev_warn(&rmi_dev->dev,
- "WARNING: RMI4 device is in bootloader mode!\n");
- }
+
+ ret = rmi_check_bootloader_mode(rmi_dev, pdt);
+ if (ret < 0)
+ return ret;
return RMI_SCAN_CONTINUE;
}
-static int rmi_initial_reset(struct rmi_device *rmi_dev,
- void *ctx, const struct pdt_entry *pdt)
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *pdt)
{
int error;
@@ -720,6 +815,7 @@ static int rmi_initial_reset(struct rmi_device *rmi_dev,
return RMI_SCAN_DONE;
}
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
if (error) {
dev_err(&rmi_dev->dev,
@@ -776,6 +872,8 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
if (pdt->function_number == 0x01)
data->f01_container = fn;
+ else if (pdt->function_number == 0x34)
+ data->f34_container = fn;
list_add_tail(&fn->node, &data->function_list);
@@ -786,23 +884,95 @@ err_put_fn:
return error;
}
-int rmi_driver_suspend(struct rmi_device *rmi_dev)
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
{
- int retval = 0;
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int irq = pdata->irq;
+ int irq_flags;
+ int retval;
+
+ mutex_lock(&data->enabled_mutex);
+
+ if (data->enabled)
+ goto out;
+
+ enable_irq(irq);
+ data->enabled = true;
+ if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = disable_irq_wake(irq);
+ if (!retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to disable irq for wake: %d\n",
+ retval);
+ }
+
+ /*
+ * Call rmi_process_interrupt_requests() after enabling irq,
+ * otherwise we may lose interrupt on edge-triggered systems.
+ */
+ irq_flags = irq_get_trigger_type(pdata->irq);
+ if (irq_flags & IRQ_TYPE_EDGE_BOTH)
+ rmi_process_interrupt_requests(rmi_dev);
+
+out:
+ mutex_unlock(&data->enabled_mutex);
+}
+
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
+{
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data = {0};
+ int irq = pdata->irq;
+ int retval, count;
+
+ mutex_lock(&data->enabled_mutex);
+
+ if (!data->enabled)
+ goto out;
+
+ data->enabled = false;
+ disable_irq(irq);
+ if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = enable_irq_wake(irq);
+ if (!retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to enable irq for wake: %d\n",
+ retval);
+ }
+
+ /* make sure the fifo is clean */
+ while (!kfifo_is_empty(&data->attn_fifo)) {
+ count = kfifo_get(&data->attn_fifo, &attn_data);
+ if (count)
+ kfree(attn_data.data);
+ }
+
+out:
+ mutex_unlock(&data->enabled_mutex);
+}
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
+{
+ int retval;
retval = rmi_suspend_functions(rmi_dev);
if (retval)
dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
retval);
+ rmi_disable_irq(rmi_dev, enable_wake);
return retval;
}
EXPORT_SYMBOL_GPL(rmi_driver_suspend);
-int rmi_driver_resume(struct rmi_device *rmi_dev)
+int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
{
int retval;
+ rmi_enable_irq(rmi_dev, clear_wake);
+
retval = rmi_resume_functions(rmi_dev);
if (retval)
dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
@@ -816,6 +986,9 @@ static int rmi_driver_remove(struct device *dev)
{
struct rmi_device *rmi_dev = to_rmi_device(dev);
+ rmi_disable_irq(rmi_dev, false);
+
+ rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
return 0;
@@ -842,15 +1015,95 @@ static inline int rmi_driver_of_probe(struct device *dev,
}
#endif
+int rmi_probe_interrupts(struct rmi_driver_data *data)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ int irq_count;
+ size_t size;
+ int retval;
+
+ /*
+ * We need to count the IRQs and allocate their storage before scanning
+ * the PDT and creating the function entries, because adding a new
+ * function can trigger events that result in the IRQ related storage
+ * being accessed.
+ */
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
+ irq_count = 0;
+ data->bootloader_mode = false;
+
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
+ if (retval < 0) {
+ dev_err(dev, "IRQ counting failed with code %d.\n", retval);
+ return retval;
+ }
+
+ if (data->bootloader_mode)
+ dev_warn(&rmi_dev->dev, "Device in bootloader mode.\n");
+
+ data->irq_count = irq_count;
+ data->num_of_irq_regs = (data->irq_count + 7) / 8;
+
+ size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
+ data->irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
+ if (!data->irq_memory) {
+ dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ return retval;
+ }
+
+ data->irq_status = data->irq_memory + size * 0;
+ data->fn_irq_bits = data->irq_memory + size * 1;
+ data->current_irq_mask = data->irq_memory + size * 2;
+ data->new_irq_mask = data->irq_memory + size * 3;
+
+ return retval;
+}
+
+int rmi_init_functions(struct rmi_driver_data *data)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ int irq_count;
+ int retval;
+
+ irq_count = 0;
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
+ if (retval < 0) {
+ dev_err(dev, "Function creation failed with code %d.\n",
+ retval);
+ goto err_destroy_functions;
+ }
+
+ if (!data->f01_container) {
+ dev_err(dev, "Missing F01 container!\n");
+ retval = -EINVAL;
+ goto err_destroy_functions;
+ }
+
+ retval = rmi_read_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->current_irq_mask, data->num_of_irq_regs);
+ if (retval < 0) {
+ dev_err(dev, "%s: Failed to read current IRQ mask.\n",
+ __func__);
+ goto err_destroy_functions;
+ }
+
+ return 0;
+
+err_destroy_functions:
+ rmi_free_function_list(rmi_dev);
+ return retval;
+}
+
static int rmi_driver_probe(struct device *dev)
{
struct rmi_driver *rmi_driver;
struct rmi_driver_data *data;
struct rmi_device_platform_data *pdata;
struct rmi_device *rmi_dev;
- size_t size;
- void *irq_memory;
- int irq_count;
int retval;
rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
@@ -916,35 +1169,12 @@ static int rmi_driver_probe(struct device *dev)
PDT_PROPERTIES_LOCATION, retval);
}
- /*
- * We need to count the IRQs and allocate their storage before scanning
- * the PDT and creating the function entries, because adding a new
- * function can trigger events that result in the IRQ related storage
- * being accessed.
- */
- rmi_dbg(RMI_DEBUG_CORE, dev, "Counting IRQs.\n");
- irq_count = 0;
- retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
- if (retval < 0) {
- dev_err(dev, "IRQ counting failed with code %d.\n", retval);
- goto err;
- }
- data->irq_count = irq_count;
- data->num_of_irq_regs = (data->irq_count + 7) / 8;
-
mutex_init(&data->irq_mutex);
+ mutex_init(&data->enabled_mutex);
- size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
- irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
- if (!irq_memory) {
- dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ retval = rmi_probe_interrupts(data);
+ if (retval)
goto err;
- }
-
- data->irq_status = irq_memory + size * 0;
- data->fn_irq_bits = irq_memory + size * 1;
- data->current_irq_mask = irq_memory + size * 2;
- data->new_irq_mask = irq_memory + size * 3;
if (rmi_dev->xport->input) {
/*
@@ -961,36 +1191,20 @@ static int rmi_driver_probe(struct device *dev)
dev_err(dev, "%s: Failed to allocate input device.\n",
__func__);
retval = -ENOMEM;
- goto err_destroy_functions;
+ goto err;
}
rmi_driver_set_input_params(rmi_dev, data->input);
data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
"%s/input0", dev_name(dev));
}
- irq_count = 0;
- rmi_dbg(RMI_DEBUG_CORE, dev, "Creating functions.");
- retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
- if (retval < 0) {
- dev_err(dev, "Function creation failed with code %d.\n",
- retval);
- goto err_destroy_functions;
- }
-
- if (!data->f01_container) {
- dev_err(dev, "Missing F01 container!\n");
- retval = -EINVAL;
- goto err_destroy_functions;
- }
+ retval = rmi_init_functions(data);
+ if (retval)
+ goto err;
- retval = rmi_read_block(rmi_dev,
- data->f01_container->fd.control_base_addr + 1,
- data->current_irq_mask, data->num_of_irq_regs);
- if (retval < 0) {
- dev_err(dev, "%s: Failed to read current IRQ mask.\n",
- __func__);
- goto err_destroy_functions;
- }
+ retval = rmi_f34_create_sysfs(rmi_dev);
+ if (retval)
+ goto err;
if (data->input) {
rmi_driver_set_input_name(rmi_dev, data->input);
@@ -1003,9 +1217,13 @@ static int rmi_driver_probe(struct device *dev)
}
}
+ retval = rmi_irq_init(rmi_dev);
+ if (retval < 0)
+ goto err_destroy_functions;
+
if (data->f01_container->dev.driver)
/* Driver already bound, so enable ATTN now. */
- return enable_sensor(rmi_dev);
+ return rmi_enable_sensor(rmi_dev);
return 0;
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 8dfbebe9bf86..24f8f764d171 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -51,9 +51,6 @@ struct pdt_entry {
u8 function_number;
};
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
- u16 pdt_address);
-
#define RMI_REG_DESC_PRESENSE_BITS (32 * BITS_PER_BYTE)
#define RMI_REG_DESC_SUBPACKET_BITS (37 * BITS_PER_BYTE)
@@ -95,12 +92,40 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
bool rmi_is_physical_driver(struct device_driver *);
int rmi_register_physical_driver(void);
void rmi_unregister_physical_driver(void);
+void rmi_free_function_list(struct rmi_device *rmi_dev);
+int rmi_enable_sensor(struct rmi_device *rmi_dev);
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *entry));
+int rmi_probe_interrupts(struct rmi_driver_data *data);
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake);
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake);
+int rmi_init_functions(struct rmi_driver_data *data);
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *pdt);
char *rmi_f01_get_product_ID(struct rmi_function *fn);
+#ifdef CONFIG_RMI4_F34
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev);
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev);
+#else
+static inline int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+ return 0;
+}
+
+static inline void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+}
+#endif /* CONFIG_RMI_F34 */
+
extern struct rmi_function_handler rmi_f01_handler;
+extern struct rmi_function_handler rmi_f03_handler;
extern struct rmi_function_handler rmi_f11_handler;
extern struct rmi_function_handler rmi_f12_handler;
extern struct rmi_function_handler rmi_f30_handler;
+extern struct rmi_function_handler rmi_f34_handler;
extern struct rmi_function_handler rmi_f54_handler;
+extern struct rmi_function_handler rmi_f55_handler;
#endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index b5d2dfc23bad..18baf4ceb940 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -62,6 +62,8 @@ struct f01_basic_properties {
#define RMI_F01_STATUS_CODE(status) ((status) & 0x0f)
/* The device has lost its configuration for some reason. */
#define RMI_F01_STATUS_UNCONFIGURED(status) (!!((status) & 0x80))
+/* The device is in bootloader mode */
+#define RMI_F01_STATUS_BOOTLOADER(status) ((status) & 0x40)
/* Control register bits */
@@ -326,12 +328,12 @@ static int rmi_f01_probe(struct rmi_function *fn)
}
switch (pdata->power_management.nosleep) {
- case RMI_F01_NOSLEEP_DEFAULT:
+ case RMI_REG_STATE_DEFAULT:
break;
- case RMI_F01_NOSLEEP_OFF:
+ case RMI_REG_STATE_OFF:
f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT;
break;
- case RMI_F01_NOSLEEP_ON:
+ case RMI_REG_STATE_ON:
f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT;
break;
}
@@ -593,6 +595,10 @@ static int rmi_f01_attention(struct rmi_function *fn,
return error;
}
+ if (RMI_F01_STATUS_BOOTLOADER(device_status))
+ dev_warn(&fn->dev,
+ "Device in bootloader mode, please update firmware\n");
+
if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
dev_warn(&fn->dev, "Device reset detected.\n");
error = rmi_dev->driver->reset_handler(rmi_dev);
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
new file mode 100644
index 000000000000..8a7ca3e2f95e
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2015-2016 Red Hat
+ * Copyright (C) 2015 Lyude Paul <thatslyude@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/serio.h>
+#include <linux/notifier.h>
+#include "rmi_driver.h"
+
+#define RMI_F03_RX_DATA_OFB 0x01
+#define RMI_F03_OB_SIZE 2
+
+#define RMI_F03_OB_OFFSET 2
+#define RMI_F03_OB_DATA_OFFSET 1
+#define RMI_F03_OB_FLAG_TIMEOUT BIT(6)
+#define RMI_F03_OB_FLAG_PARITY BIT(7)
+
+#define RMI_F03_DEVICE_COUNT 0x07
+#define RMI_F03_BYTES_PER_DEVICE 0x07
+#define RMI_F03_BYTES_PER_DEVICE_SHIFT 4
+#define RMI_F03_QUEUE_LENGTH 0x0F
+
+struct f03_data {
+ struct rmi_function *fn;
+
+ struct serio *serio;
+
+ u8 device_count;
+ u8 rx_queue_length;
+};
+
+static int rmi_f03_pt_write(struct serio *id, unsigned char val)
+{
+ struct f03_data *f03 = id->port_data;
+ int error;
+
+ rmi_dbg(RMI_DEBUG_FN, &f03->fn->dev,
+ "%s: Wrote %.2hhx to PS/2 passthrough address",
+ __func__, val);
+
+ error = rmi_write(f03->fn->rmi_dev, f03->fn->fd.data_base_addr, val);
+ if (error) {
+ dev_err(&f03->fn->dev,
+ "%s: Failed to write to F03 TX register (%d).\n",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f03_initialize(struct f03_data *f03)
+{
+ struct rmi_function *fn = f03->fn;
+ struct device *dev = &fn->dev;
+ int error;
+ u8 bytes_per_device;
+ u8 query1;
+ u8 query2[RMI_F03_DEVICE_COUNT * RMI_F03_BYTES_PER_DEVICE];
+ size_t query2_len;
+
+ error = rmi_read(fn->rmi_dev, fn->fd.query_base_addr, &query1);
+ if (error) {
+ dev_err(dev, "Failed to read query register (%d).\n", error);
+ return error;
+ }
+
+ f03->device_count = query1 & RMI_F03_DEVICE_COUNT;
+ bytes_per_device = (query1 >> RMI_F03_BYTES_PER_DEVICE_SHIFT) &
+ RMI_F03_BYTES_PER_DEVICE;
+
+ query2_len = f03->device_count * bytes_per_device;
+
+ /*
+ * The first generation of image sensors don't have a second part to
+ * their f03 query, as such we have to set some of these values manually
+ */
+ if (query2_len < 1) {
+ f03->device_count = 1;
+ f03->rx_queue_length = 7;
+ } else {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr + 1,
+ query2, query2_len);
+ if (error) {
+ dev_err(dev,
+ "Failed to read second set of query registers (%d).\n",
+ error);
+ return error;
+ }
+
+ f03->rx_queue_length = query2[0] & RMI_F03_QUEUE_LENGTH;
+ }
+
+ return 0;
+}
+
+static int rmi_f03_register_pt(struct f03_data *f03)
+{
+ struct serio *serio;
+
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
+ serio->id.type = SERIO_8042;
+ serio->write = rmi_f03_pt_write;
+ serio->port_data = f03;
+
+ strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through",
+ sizeof(serio->name));
+ strlcpy(serio->phys, "synaptics-rmi4-pt/serio1",
+ sizeof(serio->phys));
+ serio->dev.parent = &f03->fn->dev;
+
+ f03->serio = serio;
+
+ serio_register_port(serio);
+
+ return 0;
+}
+
+static int rmi_f03_probe(struct rmi_function *fn)
+{
+ struct device *dev = &fn->dev;
+ struct f03_data *f03;
+ int error;
+
+ f03 = devm_kzalloc(dev, sizeof(struct f03_data), GFP_KERNEL);
+ if (!f03)
+ return -ENOMEM;
+
+ f03->fn = fn;
+
+ error = rmi_f03_initialize(f03);
+ if (error < 0)
+ return error;
+
+ if (f03->device_count != 1)
+ dev_warn(dev, "found %d devices on PS/2 passthrough",
+ f03->device_count);
+
+ dev_set_drvdata(dev, f03);
+
+ error = rmi_f03_register_pt(f03);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int rmi_f03_config(struct rmi_function *fn)
+{
+ fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+ u16 data_addr = fn->fd.data_base_addr;
+ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
+ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
+ u8 ob_status;
+ u8 ob_data;
+ unsigned int serio_flags;
+ int i;
+ int error;
+
+ if (!rmi_dev)
+ return -ENODEV;
+
+ if (drvdata->attn_data.data) {
+ /* First grab the data passed by the transport device */
+ if (drvdata->attn_data.size < ob_len) {
+ dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
+ return 0;
+ }
+
+ memcpy(obs, drvdata->attn_data.data, ob_len);
+
+ drvdata->attn_data.data += ob_len;
+ drvdata->attn_data.size -= ob_len;
+ } else {
+ /* Grab all of the data registers, and check them for data */
+ error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET,
+ &obs, ob_len);
+ if (error) {
+ dev_err(&fn->dev,
+ "%s: Failed to read F03 output buffers: %d\n",
+ __func__, error);
+ serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
+ return error;
+ }
+ }
+
+ for (i = 0; i < ob_len; i += RMI_F03_OB_SIZE) {
+ ob_status = obs[i];
+ ob_data = obs[i + RMI_F03_OB_DATA_OFFSET];
+ serio_flags = 0;
+
+ if (!(ob_status & RMI_F03_RX_DATA_OFB))
+ continue;
+
+ if (ob_status & RMI_F03_OB_FLAG_TIMEOUT)
+ serio_flags |= SERIO_TIMEOUT;
+ if (ob_status & RMI_F03_OB_FLAG_PARITY)
+ serio_flags |= SERIO_PARITY;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
+ "%s: Received %.2hhx from PS2 guest T: %c P: %c\n",
+ __func__, ob_data,
+ serio_flags & SERIO_TIMEOUT ? 'Y' : 'N',
+ serio_flags & SERIO_PARITY ? 'Y' : 'N');
+
+ serio_interrupt(f03->serio, ob_data, serio_flags);
+ }
+
+ return 0;
+}
+
+static void rmi_f03_remove(struct rmi_function *fn)
+{
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+
+ serio_unregister_port(f03->serio);
+}
+
+struct rmi_function_handler rmi_f03_handler = {
+ .driver = {
+ .name = "rmi4_f03",
+ },
+ .func = 0x03,
+ .probe = rmi_f03_probe,
+ .config = rmi_f03_config,
+ .attention = rmi_f03_attention,
+ .remove = rmi_f03_remove,
+};
+
+MODULE_AUTHOR("Lyude Paul <thatslyude@gmail.com>");
+MODULE_DESCRIPTION("RMI F03 module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f798f427a46f..bc5e37f30ac1 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -571,31 +571,48 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
static void rmi_f11_finger_handler(struct f11_data *f11,
struct rmi_2d_sensor *sensor,
- unsigned long *irq_bits, int num_irq_regs)
+ unsigned long *irq_bits, int num_irq_regs,
+ int size)
{
const u8 *f_state = f11->data.f_state;
u8 finger_state;
u8 i;
+ int abs_fingers;
+ int rel_fingers;
+ int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
num_irq_regs * 8);
int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
num_irq_regs * 8);
- for (i = 0; i < sensor->nbr_fingers; i++) {
- /* Possible of having 4 fingers per f_statet register */
- finger_state = rmi_f11_parse_finger_state(f_state, i);
- if (finger_state == F11_RESERVED) {
- pr_err("Invalid finger state[%d]: 0x%02x", i,
- finger_state);
- continue;
- }
+ if (abs_bits) {
+ if (abs_size > size)
+ abs_fingers = size / RMI_F11_ABS_BYTES;
+ else
+ abs_fingers = sensor->nbr_fingers;
+
+ for (i = 0; i < abs_fingers; i++) {
+ /* Possible of having 4 fingers per f_state register */
+ finger_state = rmi_f11_parse_finger_state(f_state, i);
+ if (finger_state == F11_RESERVED) {
+ pr_err("Invalid finger state[%d]: 0x%02x", i,
+ finger_state);
+ continue;
+ }
- if (abs_bits)
rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
finger_state, i);
+ }
+ }
+
+ if (rel_bits) {
+ if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+ rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+ else
+ rel_fingers = sensor->nbr_fingers;
- if (rel_bits)
+ for (i = 0; i < rel_fingers; i++)
rmi_f11_rel_pos_report(f11, i);
}
@@ -611,7 +628,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
sensor->nbr_fingers,
sensor->dmax);
- for (i = 0; i < sensor->nbr_fingers; i++) {
+ for (i = 0; i < abs_fingers; i++) {
finger_state = rmi_f11_parse_finger_state(f_state, i);
if (finger_state == F11_RESERVED)
/* no need to send twice the error */
@@ -1062,8 +1079,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
rc = rmi_2d_sensor_of_probe(&fn->dev, &f11->sensor_pdata);
if (rc)
return rc;
- } else if (pdata->sensor_pdata) {
- f11->sensor_pdata = *pdata->sensor_pdata;
+ } else {
+ f11->sensor_pdata = pdata->sensor_pdata;
}
f11->rezero_wait_ms = f11->sensor_pdata.rezero_wait;
@@ -1124,6 +1141,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
sensor->topbuttonpad = f11->sensor_pdata.topbuttonpad;
sensor->kernel_tracking = f11->sensor_pdata.kernel_tracking;
sensor->dmax = f11->sensor_pdata.dmax;
+ sensor->dribble = f11->sensor_pdata.dribble;
+ sensor->palm_detect = f11->sensor_pdata.palm_detect;
if (f11->sens_query.has_physical_props) {
sensor->x_mm = f11->sens_query.x_sensor_size_mm;
@@ -1191,11 +1210,33 @@ static int rmi_f11_initialize(struct rmi_function *fn)
ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
sensor->axis_align.delta_y_threshold;
- if (f11->sens_query.has_dribble)
- ctrl->ctrl0_11[0] = ctrl->ctrl0_11[0] & ~BIT(6);
+ if (f11->sens_query.has_dribble) {
+ switch (sensor->dribble) {
+ case RMI_REG_STATE_OFF:
+ ctrl->ctrl0_11[0] &= ~BIT(6);
+ break;
+ case RMI_REG_STATE_ON:
+ ctrl->ctrl0_11[0] |= BIT(6);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+ }
- if (f11->sens_query.has_palm_det)
- ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
+ if (f11->sens_query.has_palm_det) {
+ switch (sensor->palm_detect) {
+ case RMI_REG_STATE_OFF:
+ ctrl->ctrl0_11[11] &= ~BIT(0);
+ break;
+ case RMI_REG_STATE_ON:
+ ctrl->ctrl0_11[11] |= BIT(0);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+ }
rc = f11_write_control_regs(fn, &f11->sens_query,
&f11->dev_controls, fn->fd.query_base_addr);
@@ -1241,12 +1282,21 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
struct f11_data *f11 = dev_get_drvdata(&fn->dev);
u16 data_base_addr = fn->fd.data_base_addr;
int error;
+ int valid_bytes = f11->sensor.pkt_size;
- if (rmi_dev->xport->attn_data) {
- memcpy(f11->sensor.data_pkt, rmi_dev->xport->attn_data,
- f11->sensor.attn_size);
- rmi_dev->xport->attn_data += f11->sensor.attn_size;
- rmi_dev->xport->attn_size -= f11->sensor.attn_size;
+ if (drvdata->attn_data.data) {
+ /*
+ * The valid data in the attention report is less then
+ * expected. Only process the complete fingers.
+ */
+ if (f11->sensor.attn_size > drvdata->attn_data.size)
+ valid_bytes = drvdata->attn_data.size;
+ else
+ valid_bytes = f11->sensor.attn_size;
+ memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+ drvdata->attn_data.data += f11->sensor.attn_size;
+ drvdata->attn_data.size -= f11->sensor.attn_size;
} else {
error = rmi_read_block(rmi_dev,
data_base_addr, f11->sensor.data_pkt,
@@ -1256,7 +1306,7 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
}
rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
- drvdata->num_of_irq_regs);
+ drvdata->num_of_irq_regs, valid_bytes);
return 0;
}
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 332c02f0b107..07aff4356fe0 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -26,9 +26,12 @@ enum rmi_f12_object_type {
RMI_F12_OBJECT_SMALL_OBJECT = 0x0D,
};
+#define F12_DATA1_BYTES_PER_OBJ 8
+
struct f12_data {
struct rmi_2d_sensor sensor;
struct rmi_2d_sensor_platform_data sensor_pdata;
+ bool has_dribble;
u16 data_addr;
@@ -68,10 +71,6 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
- int clip_x_low = 0;
- int clip_x_high = 0;
- int clip_y_low = 0;
- int clip_y_high = 0;
int rx_receivers = 0;
int tx_receivers = 0;
int sensor_flags = 0;
@@ -124,7 +123,9 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
}
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x low: %d x high: %d y low: %d y high: %d\n",
- __func__, clip_x_low, clip_x_high, clip_y_low, clip_y_high);
+ __func__,
+ sensor->axis_align.clip_x_low, sensor->axis_align.clip_x_high,
+ sensor->axis_align.clip_y_low, sensor->axis_align.clip_y_high);
if (rmi_register_desc_has_subpacket(item, 3)) {
rx_receivers = buf[offset];
@@ -146,12 +147,16 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
return 0;
}
-static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
+static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
{
int i;
struct rmi_2d_sensor *sensor = &f12->sensor;
+ int objects = f12->data1->num_subpackets;
+
+ if ((f12->data1->num_subpackets * F12_DATA1_BYTES_PER_OBJ) > size)
+ objects = size / F12_DATA1_BYTES_PER_OBJ;
- for (i = 0; i < f12->data1->num_subpackets; i++) {
+ for (i = 0; i < objects; i++) {
struct rmi_2d_sensor_abs_object *obj = &sensor->objs[i];
obj->type = RMI_2D_OBJECT_NONE;
@@ -182,7 +187,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
rmi_2d_sensor_abs_process(sensor, obj, i);
- data1 += 8;
+ data1 += F12_DATA1_BYTES_PER_OBJ;
}
if (sensor->kernel_tracking)
@@ -192,7 +197,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
sensor->nbr_fingers,
sensor->dmax);
- for (i = 0; i < sensor->nbr_fingers; i++)
+ for (i = 0; i < objects; i++)
rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
}
@@ -201,14 +206,20 @@ static int rmi_f12_attention(struct rmi_function *fn,
{
int retval;
struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
struct rmi_2d_sensor *sensor = &f12->sensor;
-
- if (rmi_dev->xport->attn_data) {
- memcpy(sensor->data_pkt, rmi_dev->xport->attn_data,
- sensor->attn_size);
- rmi_dev->xport->attn_data += sensor->attn_size;
- rmi_dev->xport->attn_size -= sensor->attn_size;
+ int valid_bytes = sensor->pkt_size;
+
+ if (drvdata->attn_data.data) {
+ if (sensor->attn_size > drvdata->attn_data.size)
+ valid_bytes = drvdata->attn_data.size;
+ else
+ valid_bytes = sensor->attn_size;
+ memcpy(sensor->data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+ drvdata->attn_data.data += sensor->attn_size;
+ drvdata->attn_data.size -= sensor->attn_size;
} else {
retval = rmi_read_block(rmi_dev, f12->data_addr,
sensor->data_pkt, sensor->pkt_size);
@@ -221,19 +232,83 @@ static int rmi_f12_attention(struct rmi_function *fn,
if (f12->data1)
rmi_f12_process_objects(f12,
- &sensor->data_pkt[f12->data1_offset]);
+ &sensor->data_pkt[f12->data1_offset], valid_bytes);
input_mt_sync_frame(sensor->input);
return 0;
}
+static int rmi_f12_write_control_regs(struct rmi_function *fn)
+{
+ int ret;
+ const struct rmi_register_desc_item *item;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ int control_size;
+ char buf[3];
+ u16 control_offset = 0;
+ u8 subpacket_offset = 0;
+
+ if (f12->has_dribble
+ && (f12->sensor.dribble != RMI_REG_STATE_DEFAULT)) {
+ item = rmi_get_register_desc_item(&f12->control_reg_desc, 20);
+ if (item) {
+ control_offset = rmi_register_desc_calc_reg_offset(
+ &f12->control_reg_desc, 20);
+
+ /*
+ * The byte containing the EnableDribble bit will be
+ * in either byte 0 or byte 2 of control 20. Depending
+ * on the existence of subpacket 0. If control 20 is
+ * larger then 3 bytes, just read the first 3.
+ */
+ control_size = min(item->reg_size, 3UL);
+
+ ret = rmi_read_block(rmi_dev, fn->fd.control_base_addr
+ + control_offset, buf, control_size);
+ if (ret)
+ return ret;
+
+ if (rmi_register_desc_has_subpacket(item, 0))
+ subpacket_offset += 1;
+
+ switch (f12->sensor.dribble) {
+ case RMI_REG_STATE_OFF:
+ buf[subpacket_offset] &= ~BIT(2);
+ break;
+ case RMI_REG_STATE_ON:
+ buf[subpacket_offset] |= BIT(2);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+
+ ret = rmi_write_block(rmi_dev,
+ fn->fd.control_base_addr + control_offset,
+ buf, control_size);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+
+}
+
static int rmi_f12_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
+ int ret;
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ ret = rmi_f12_write_control_regs(fn);
+ if (ret)
+ dev_warn(&fn->dev,
+ "Failed to write F12 control registers: %d\n", ret);
+
return 0;
}
@@ -247,7 +322,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
const struct rmi_register_desc_item *item;
struct rmi_2d_sensor *sensor;
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
- struct rmi_transport_dev *xport = rmi_dev->xport;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
u16 data_offset = 0;
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
@@ -260,7 +335,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
++query_addr;
- if (!(buf & 0x1)) {
+ if (!(buf & BIT(0))) {
dev_err(&fn->dev,
"Behavior of F12 without register descriptors is undefined.\n");
return -ENODEV;
@@ -270,12 +345,14 @@ static int rmi_f12_probe(struct rmi_function *fn)
if (!f12)
return -ENOMEM;
+ f12->has_dribble = !!(buf & BIT(3));
+
if (fn->dev.of_node) {
ret = rmi_2d_sensor_of_probe(&fn->dev, &f12->sensor_pdata);
if (ret)
return ret;
- } else if (pdata->sensor_pdata) {
- f12->sensor_pdata = *pdata->sensor_pdata;
+ } else {
+ f12->sensor_pdata = pdata->sensor_pdata;
}
ret = rmi_read_register_desc(rmi_dev, query_addr,
@@ -318,6 +395,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
sensor->x_mm = f12->sensor_pdata.x_mm;
sensor->y_mm = f12->sensor_pdata.y_mm;
+ sensor->dribble = f12->sensor_pdata.dribble;
if (sensor->sensor_type == rmi_sensor_default)
sensor->sensor_type =
@@ -343,7 +421,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
* HID attention reports.
*/
item = rmi_get_register_desc_item(&f12->data_reg_desc, 0);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 1);
@@ -357,15 +435,15 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 2);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 3);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 4);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 5);
@@ -377,22 +455,22 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 6);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data6 = item;
f12->data6_offset = data_offset;
data_offset += item->reg_size;
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 7);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 8);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 9);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data9 = item;
f12->data9_offset = data_offset;
data_offset += item->reg_size;
@@ -401,27 +479,27 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 10);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 11);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 12);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 13);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 14);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 15);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data15 = item;
f12->data15_offset = data_offset;
data_offset += item->reg_size;
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 760aff1bc420..f4b491e3e0fd 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -99,6 +99,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
{
struct f30_data *f30 = dev_get_drvdata(&fn->dev);
struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
int retval;
int gpiled = 0;
int value = 0;
@@ -109,11 +110,15 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
return 0;
/* Read the gpi led data. */
- if (rmi_dev->xport->attn_data) {
- memcpy(f30->data_regs, rmi_dev->xport->attn_data,
+ if (drvdata->attn_data.data) {
+ if (drvdata->attn_data.size < f30->register_count) {
+ dev_warn(&fn->dev, "F30 interrupted, but data is missing\n");
+ return 0;
+ }
+ memcpy(f30->data_regs, drvdata->attn_data.data,
f30->register_count);
- rmi_dev->xport->attn_data += f30->register_count;
- rmi_dev->xport->attn_size -= f30->register_count;
+ drvdata->attn_data.data += f30->register_count;
+ drvdata->attn_data.size -= f30->register_count;
} else {
retval = rmi_read_block(rmi_dev, fn->fd.data_base_addr,
f30->data_regs, f30->register_count);
@@ -192,7 +197,7 @@ static int rmi_f30_config(struct rmi_function *fn)
rmi_get_platform_data(fn->rmi_dev);
int error;
- if (pdata->f30_data && pdata->f30_data->disable) {
+ if (pdata->f30_data.disable) {
drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
} else {
/* Write Control Register values back to device */
@@ -351,7 +356,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
f30->gpioled_key_map = (u16 *)map_memory;
pdata = rmi_get_platform_data(rmi_dev);
- if (pdata && f30->has_gpio) {
+ if (f30->has_gpio) {
button = BTN_LEFT;
for (i = 0; i < f30->gpioled_count; i++) {
if (rmi_f30_is_valid_button(i, f30->ctrl)) {
@@ -362,8 +367,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
* f30->has_mech_mouse_btns, but I am
* not sure, so use only the pdata info
*/
- if (pdata->f30_data &&
- pdata->f30_data->buttonpad)
+ if (pdata->f30_data.buttonpad)
break;
}
}
@@ -378,7 +382,7 @@ static int rmi_f30_probe(struct rmi_function *fn)
const struct rmi_device_platform_data *pdata =
rmi_get_platform_data(fn->rmi_dev);
- if (pdata->f30_data && pdata->f30_data->disable)
+ if (pdata->f30_data.disable)
return 0;
rc = rmi_f30_initialize(fn);
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
new file mode 100644
index 000000000000..9774dfbab9bb
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <asm/unaligned.h>
+#include <linux/bitops.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34_write_bootloader_id(struct f34_data *f34)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ u8 bootloader_id[F34_BOOTLOADER_ID_LEN];
+ int ret;
+
+ ret = rmi_read_block(rmi_dev, fn->fd.query_base_addr,
+ bootloader_id, sizeof(bootloader_id));
+ if (ret) {
+ dev_err(&fn->dev, "%s: Reading bootloader ID failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: writing bootloader id '%c%c'\n",
+ __func__, bootloader_id[0], bootloader_id[1]);
+
+ ret = rmi_write_block(rmi_dev,
+ fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET,
+ bootloader_id, sizeof(bootloader_id));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to write bootloader ID: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34_command(struct f34_data *f34, u8 command,
+ unsigned int timeout, bool write_bl_id)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int ret;
+
+ if (write_bl_id) {
+ ret = rmi_f34_write_bootloader_id(f34);
+ if (ret)
+ return ret;
+ }
+
+ init_completion(&f34->v5.cmd_done);
+
+ ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ if (ret) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read cmd register: %d (command %#02x)\n",
+ __func__, ret, command);
+ return ret;
+ }
+
+ f34->v5.status |= command & 0x0f;
+
+ ret = rmi_write(rmi_dev, f34->v5.ctrl_address, f34->v5.status);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "Failed to write F34 command %#02x: %d\n",
+ command, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&f34->v5.cmd_done,
+ msecs_to_jiffies(timeout))) {
+
+ ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ if (ret) {
+ dev_err(&f34->fn->dev,
+ "%s: cmd %#02x timed out: %d\n",
+ __func__, command, ret);
+ return ret;
+ }
+
+ if (f34->v5.status & 0x7f) {
+ dev_err(&f34->fn->dev,
+ "%s: cmd %#02x timed out, status: %#02x\n",
+ __func__, command, f34->v5.status);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct f34_data *f34 = dev_get_drvdata(&fn->dev);
+ int ret;
+
+ if (f34->bl_version != 5)
+ return 0;
+
+ ret = rmi_read(f34->fn->rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: status: %#02x, ret: %d\n",
+ __func__, f34->v5.status, ret);
+
+ if (!ret && !(f34->v5.status & 0x7f))
+ complete(&f34->v5.cmd_done);
+
+ return 0;
+}
+
+static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
+ int block_count, u8 command)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ u16 address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET;
+ u8 start_address[] = { 0, 0 };
+ int i;
+ int ret;
+
+ ret = rmi_write_block(rmi_dev, fn->fd.data_base_addr,
+ start_address, sizeof(start_address));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to write initial zeros: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < block_count; i++) {
+ ret = rmi_write_block(rmi_dev, address,
+ data, f34->v5.block_size);
+ if (ret) {
+ dev_err(&fn->dev,
+ "failed to write block #%d: %d\n", i, ret);
+ return ret;
+ }
+
+ ret = rmi_f34_command(f34, command, F34_IDLE_WAIT_MS, false);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to write command for block #%d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "wrote block %d of %d\n",
+ i + 1, block_count);
+
+ data += f34->v5.block_size;
+ }
+
+ return 0;
+}
+
+static int rmi_f34_write_firmware(struct f34_data *f34, const void *data)
+{
+ return rmi_f34_write_blocks(f34, data, f34->v5.fw_blocks,
+ F34_WRITE_FW_BLOCK);
+}
+
+static int rmi_f34_write_config(struct f34_data *f34, const void *data)
+{
+ return rmi_f34_write_blocks(f34, data, f34->v5.config_blocks,
+ F34_WRITE_CONFIG_BLOCK);
+}
+
+int rmi_f34_enable_flash(struct f34_data *f34)
+{
+ return rmi_f34_command(f34, F34_ENABLE_FLASH_PROG,
+ F34_ENABLE_WAIT_MS, true);
+}
+
+static int rmi_f34_flash_firmware(struct f34_data *f34,
+ const struct rmi_f34_firmware *syn_fw)
+{
+ struct rmi_function *fn = f34->fn;
+ int ret;
+
+ if (syn_fw->image_size) {
+ dev_info(&fn->dev, "Erasing firmware...\n");
+ ret = rmi_f34_command(f34, F34_ERASE_ALL,
+ F34_ERASE_WAIT_MS, true);
+ if (ret)
+ return ret;
+
+ dev_info(&fn->dev, "Writing firmware (%d bytes)...\n",
+ syn_fw->image_size);
+ ret = rmi_f34_write_firmware(f34, syn_fw->data);
+ if (ret)
+ return ret;
+ }
+
+ if (syn_fw->config_size) {
+ /*
+ * We only need to erase config if we haven't updated
+ * firmware.
+ */
+ if (!syn_fw->image_size) {
+ dev_info(&fn->dev, "Erasing config...\n");
+ ret = rmi_f34_command(f34, F34_ERASE_CONFIG,
+ F34_ERASE_WAIT_MS, true);
+ if (ret)
+ return ret;
+ }
+
+ dev_info(&fn->dev, "Writing config (%d bytes)...\n",
+ syn_fw->config_size);
+ ret = rmi_f34_write_config(f34,
+ &syn_fw->data[syn_fw->image_size]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int rmi_f34_update_firmware(struct f34_data *f34, const struct firmware *fw)
+{
+ const struct rmi_f34_firmware *syn_fw;
+ int ret;
+
+ syn_fw = (const struct rmi_f34_firmware *)fw->data;
+ BUILD_BUG_ON(offsetof(struct rmi_f34_firmware, data) !=
+ F34_FW_IMAGE_OFFSET);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "FW size:%d, checksum:%08x, image_size:%d, config_size:%d\n",
+ (int)fw->size,
+ le32_to_cpu(syn_fw->checksum),
+ le32_to_cpu(syn_fw->image_size),
+ le32_to_cpu(syn_fw->config_size));
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "FW bootloader_id:%02x, product_id:%.*s, info: %02x%02x\n",
+ syn_fw->bootloader_version,
+ (int)sizeof(syn_fw->product_id), syn_fw->product_id,
+ syn_fw->product_info[0], syn_fw->product_info[1]);
+
+ if (syn_fw->image_size &&
+ syn_fw->image_size != f34->v5.fw_blocks * f34->v5.block_size) {
+ dev_err(&f34->fn->dev,
+ "Bad firmware image: fw size %d, expected %d\n",
+ syn_fw->image_size,
+ f34->v5.fw_blocks * f34->v5.block_size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ if (syn_fw->config_size &&
+ syn_fw->config_size != f34->v5.config_blocks * f34->v5.block_size) {
+ dev_err(&f34->fn->dev,
+ "Bad firmware image: config size %d, expected %d\n",
+ syn_fw->config_size,
+ f34->v5.config_blocks * f34->v5.block_size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ if (syn_fw->image_size && !syn_fw->config_size) {
+ dev_err(&f34->fn->dev, "Bad firmware image: no config data\n");
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ dev_info(&f34->fn->dev, "Firmware image OK\n");
+ mutex_lock(&f34->v5.flash_mutex);
+
+ ret = rmi_f34_flash_firmware(f34, syn_fw);
+
+ mutex_unlock(&f34->v5.flash_mutex);
+
+out:
+ return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+ const struct firmware *fw)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ struct f34_data *f34;
+ int ret;
+
+ if (!data->f34_container) {
+ dev_warn(dev, "%s: No F34 present!\n", __func__);
+ return -EINVAL;
+ }
+
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+
+ if (f34->bl_version == 7) {
+ if (data->pdt_props & HAS_BSR) {
+ dev_err(dev, "%s: LTS not supported\n", __func__);
+ return -ENODEV;
+ }
+ } else if (f34->bl_version != 5) {
+ dev_warn(dev, "F34 V%d not supported!\n",
+ data->f34_container->fd.function_version);
+ return -ENODEV;
+ }
+
+ /* Enter flash mode */
+ if (f34->bl_version == 7)
+ ret = rmi_f34v7_start_reflash(f34, fw);
+ else
+ ret = rmi_f34_enable_flash(f34);
+ if (ret)
+ return ret;
+
+ rmi_disable_irq(rmi_dev, false);
+
+ /* Tear down functions and re-probe */
+ rmi_free_function_list(rmi_dev);
+
+ ret = rmi_probe_interrupts(data);
+ if (ret)
+ return ret;
+
+ ret = rmi_init_functions(data);
+ if (ret)
+ return ret;
+
+ if (!data->bootloader_mode || !data->f34_container) {
+ dev_warn(dev, "%s: No F34 present or not in bootloader!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi_enable_irq(rmi_dev, false);
+
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+
+ /* Perform firmware update */
+ if (f34->bl_version == 7)
+ ret = rmi_f34v7_do_reflash(f34, fw);
+ else
+ ret = rmi_f34_update_firmware(f34, fw);
+
+ dev_info(&f34->fn->dev, "Firmware update complete, status:%d\n", ret);
+
+ rmi_disable_irq(rmi_dev, false);
+
+ /* Re-probe */
+ rmi_dbg(RMI_DEBUG_FN, dev, "Re-probing device\n");
+ rmi_free_function_list(rmi_dev);
+
+ ret = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
+ if (ret < 0)
+ dev_warn(dev, "RMI reset failed!\n");
+
+ ret = rmi_probe_interrupts(data);
+ if (ret)
+ return ret;
+
+ ret = rmi_init_functions(data);
+ if (ret)
+ return ret;
+
+ rmi_enable_irq(rmi_dev, false);
+
+ if (data->f01_container->dev.driver)
+ /* Driver already bound, so enable ATTN now. */
+ return rmi_enable_sensor(rmi_dev);
+
+ rmi_dbg(RMI_DEBUG_FN, dev, "%s complete\n", __func__);
+
+ return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+ const struct firmware *fw);
+
+static ssize_t rmi_driver_update_fw_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(dev);
+ char fw_name[NAME_MAX];
+ const struct firmware *fw;
+ size_t copy_count = count;
+ int ret;
+
+ if (count == 0 || count >= NAME_MAX)
+ return -EINVAL;
+
+ if (buf[count - 1] == '\0' || buf[count - 1] == '\n')
+ copy_count -= 1;
+
+ strncpy(fw_name, buf, copy_count);
+ fw_name[copy_count] = '\0';
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Flashing %s\n", fw_name);
+
+ ret = rmi_firmware_update(data, fw);
+
+ release_firmware(fw);
+
+ return ret ?: count;
+}
+
+static DEVICE_ATTR(update_fw, 0200, NULL, rmi_driver_update_fw_store);
+
+static struct attribute *rmi_firmware_attrs[] = {
+ &dev_attr_update_fw.attr,
+ NULL
+};
+
+static struct attribute_group rmi_firmware_attr_group = {
+ .attrs = rmi_firmware_attrs,
+};
+
+static int rmi_f34_probe(struct rmi_function *fn)
+{
+ struct f34_data *f34;
+ unsigned char f34_queries[9];
+ bool has_config_id;
+ u8 version = fn->fd.function_version;
+ int ret;
+
+ f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+ if (!f34)
+ return -ENOMEM;
+
+ f34->fn = fn;
+ dev_set_drvdata(&fn->dev, f34);
+
+ /* v5 code only supported version 0, try V7 probe */
+ if (version > 0)
+ return rmi_f34v7_probe(f34);
+ else if (version != 0)
+ return -ENODEV;
+
+ f34->bl_version = 5;
+
+ ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (ret) {
+ dev_err(&fn->dev, "%s: Failed to query properties\n",
+ __func__);
+ return ret;
+ }
+
+ snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
+ "%c%c", f34_queries[0], f34_queries[1]);
+
+ mutex_init(&f34->v5.flash_mutex);
+ init_completion(&f34->v5.cmd_done);
+
+ f34->v5.block_size = get_unaligned_le16(&f34_queries[3]);
+ f34->v5.fw_blocks = get_unaligned_le16(&f34_queries[5]);
+ f34->v5.config_blocks = get_unaligned_le16(&f34_queries[7]);
+ f34->v5.ctrl_address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET +
+ f34->v5.block_size;
+ has_config_id = f34_queries[2] & (1 << 2);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Bootloader ID: %s\n",
+ f34->bootloader_id);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Block size: %d\n",
+ f34->v5.block_size);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "FW blocks: %d\n",
+ f34->v5.fw_blocks);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "CFG blocks: %d\n",
+ f34->v5.config_blocks);
+
+ if (has_config_id) {
+ ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to read F34 config ID\n");
+ return ret;
+ }
+
+ snprintf(f34->configuration_id, sizeof(f34->configuration_id),
+ "%02x%02x%02x%02x",
+ f34_queries[0], f34_queries[1],
+ f34_queries[2], f34_queries[3]);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
+ f34->configuration_id);
+ }
+
+ return 0;
+}
+
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+ return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+ sysfs_remove_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+struct rmi_function_handler rmi_f34_handler = {
+ .driver = {
+ .name = "rmi4_f34",
+ },
+ .func = 0x34,
+ .probe = rmi_f34_probe,
+ .attention = rmi_f34_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f34.h b/drivers/input/rmi4/rmi_f34.h
new file mode 100644
index 000000000000..2c21056dc375
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_F34_H
+#define _RMI_F34_H
+
+/* F34 image file offsets. */
+#define F34_FW_IMAGE_OFFSET 0x100
+
+/* F34 register offsets. */
+#define F34_BLOCK_DATA_OFFSET 2
+
+/* F34 commands */
+#define F34_WRITE_FW_BLOCK 0x2
+#define F34_ERASE_ALL 0x3
+#define F34_READ_CONFIG_BLOCK 0x5
+#define F34_WRITE_CONFIG_BLOCK 0x6
+#define F34_ERASE_CONFIG 0x7
+#define F34_ENABLE_FLASH_PROG 0xf
+
+#define F34_STATUS_IN_PROGRESS 0xff
+#define F34_STATUS_IDLE 0x80
+
+#define F34_IDLE_WAIT_MS 500
+#define F34_ENABLE_WAIT_MS 300
+#define F34_ERASE_WAIT_MS 5000
+
+#define F34_BOOTLOADER_ID_LEN 2
+
+/* F34 V7 defines */
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+#define V7_BOOTLOADER_ID_OFFSET 1
+
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define CONFIG_ID_SIZE 32
+#define PRODUCT_ID_SIZE 10
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define HAS_BSR BIT(5)
+#define HAS_CONFIG_ID BIT(3)
+#define HAS_GUEST_CODE BIT(6)
+#define HAS_DISP_CFG BIT(5)
+
+/* F34 V7 commands */
+#define CMD_V7_IDLE 0
+#define CMD_V7_ENTER_BL 1
+#define CMD_V7_READ 2
+#define CMD_V7_WRITE 3
+#define CMD_V7_ERASE 4
+#define CMD_V7_ERASE_AP 5
+#define CMD_V7_SENSOR_ID 6
+
+#define v7_CMD_IDLE 0
+#define v7_CMD_WRITE_FW 1
+#define v7_CMD_WRITE_CONFIG 2
+#define v7_CMD_WRITE_LOCKDOWN 3
+#define v7_CMD_WRITE_GUEST_CODE 4
+#define v7_CMD_READ_CONFIG 5
+#define v7_CMD_ERASE_ALL 6
+#define v7_CMD_ERASE_UI_FIRMWARE 7
+#define v7_CMD_ERASE_UI_CONFIG 8
+#define v7_CMD_ERASE_BL_CONFIG 9
+#define v7_CMD_ERASE_DISP_CONFIG 10
+#define v7_CMD_ERASE_FLASH_CONFIG 11
+#define v7_CMD_ERASE_GUEST_CODE 12
+#define v7_CMD_ENABLE_FLASH_PROG 13
+
+#define v7_UI_CONFIG_AREA 0
+#define v7_PM_CONFIG_AREA 1
+#define v7_BL_CONFIG_AREA 2
+#define v7_DP_CONFIG_AREA 3
+#define v7_FLASH_CONFIG_AREA 4
+
+/* F34 V7 partition IDs */
+#define BOOTLOADER_PARTITION 1
+#define DEVICE_CONFIG_PARTITION 2
+#define FLASH_CONFIG_PARTITION 3
+#define MANUFACTURING_BLOCK_PARTITION 4
+#define GUEST_SERIALIZATION_PARTITION 5
+#define GLOBAL_PARAMETERS_PARTITION 6
+#define CORE_CODE_PARTITION 7
+#define CORE_CONFIG_PARTITION 8
+#define GUEST_CODE_PARTITION 9
+#define DISPLAY_CONFIG_PARTITION 10
+
+/* F34 V7 container IDs */
+#define TOP_LEVEL_CONTAINER 0
+#define UI_CONTAINER 1
+#define UI_CONFIG_CONTAINER 2
+#define BL_CONTAINER 3
+#define BL_IMAGE_CONTAINER 4
+#define BL_CONFIG_CONTAINER 5
+#define BL_LOCKDOWN_INFO_CONTAINER 6
+#define PERMANENT_CONFIG_CONTAINER 7
+#define GUEST_CODE_CONTAINER 8
+#define BL_PROTOCOL_DESCRIPTOR_CONTAINER 9
+#define UI_PROTOCOL_DESCRIPTOR_CONTAINER 10
+#define RMI_SELF_DISCOVERY_CONTAINER 11
+#define RMI_PAGE_CONTENT_CONTAINER 12
+#define GENERAL_INFORMATION_CONTAINER 13
+#define DEVICE_CONFIG_CONTAINER 14
+#define FLASH_CONFIG_CONTAINER 15
+#define GUEST_SERIALIZATION_CONTAINER 16
+#define GLOBAL_PARAMETERS_CONTAINER 17
+#define CORE_CODE_CONTAINER 18
+#define CORE_CONFIG_CONTAINER 19
+#define DISPLAY_CONFIG_CONTAINER 20
+
+struct f34v7_query_1_7 {
+ u8 bl_minor_revision; /* query 1 */
+ u8 bl_major_revision;
+ __le32 bl_fw_id; /* query 2 */
+ u8 minimum_write_size; /* query 3 */
+ __le16 block_size;
+ __le16 flash_page_size;
+ __le16 adjustable_partition_area_size; /* query 4 */
+ __le16 flash_config_length; /* query 5 */
+ __le16 payload_length; /* query 6 */
+ u8 partition_support[4]; /* query 7 */
+} __packed;
+
+struct f34v7_data_1_5 {
+ u8 partition_id;
+ __le16 block_offset;
+ __le16 transfer_length;
+ u8 command;
+ u8 payload[2];
+} __packed;
+
+struct block_data {
+ const void *data;
+ int size;
+};
+
+struct partition_table {
+ u8 partition_id;
+ u8 byte_1_reserved;
+ __le16 partition_length;
+ __le16 start_physical_address;
+ __le16 partition_properties;
+} __packed;
+
+struct physical_address {
+ u16 ui_firmware;
+ u16 ui_config;
+ u16 dp_config;
+ u16 guest_code;
+};
+
+struct container_descriptor {
+ __le32 content_checksum;
+ __le16 container_id;
+ u8 minor_version;
+ u8 major_version;
+ u8 reserved_08;
+ u8 reserved_09;
+ u8 reserved_0a;
+ u8 reserved_0b;
+ u8 container_option_flags[4];
+ __le32 content_options_length;
+ __le32 content_options_address;
+ __le32 content_length;
+ __le32 content_address;
+} __packed;
+
+struct block_count {
+ u16 ui_firmware;
+ u16 ui_config;
+ u16 dp_config;
+ u16 fl_config;
+ u16 pm_config;
+ u16 bl_config;
+ u16 lockdown;
+ u16 guest_code;
+};
+
+struct image_header_10 {
+ __le32 checksum;
+ u8 reserved_04;
+ u8 reserved_05;
+ u8 minor_header_version;
+ u8 major_header_version;
+ u8 reserved_08;
+ u8 reserved_09;
+ u8 reserved_0a;
+ u8 reserved_0b;
+ __le32 top_level_container_start_addr;
+};
+
+struct image_metadata {
+ bool contains_firmware_id;
+ bool contains_bootloader;
+ bool contains_display_cfg;
+ bool contains_guest_code;
+ bool contains_flash_config;
+ unsigned int firmware_id;
+ unsigned int checksum;
+ unsigned int bootloader_size;
+ unsigned int display_cfg_offset;
+ unsigned char bl_version;
+ unsigned char product_id[PRODUCT_ID_SIZE + 1];
+ unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+ struct block_data bootloader;
+ struct block_data ui_firmware;
+ struct block_data ui_config;
+ struct block_data dp_config;
+ struct block_data fl_config;
+ struct block_data bl_config;
+ struct block_data guest_code;
+ struct block_data lockdown;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+};
+
+struct register_offset {
+ u8 properties;
+ u8 properties_2;
+ u8 block_size;
+ u8 block_count;
+ u8 gc_block_count;
+ u8 flash_status;
+ u8 partition_id;
+ u8 block_number;
+ u8 transfer_length;
+ u8 flash_cmd;
+ u8 payload;
+};
+
+struct rmi_f34_firmware {
+ __le32 checksum;
+ u8 pad1[3];
+ u8 bootloader_version;
+ __le32 image_size;
+ __le32 config_size;
+ u8 product_id[10];
+ u8 product_info[2];
+ u8 pad2[228];
+ u8 data[];
+};
+
+struct f34v5_data {
+ u16 block_size;
+ u16 fw_blocks;
+ u16 config_blocks;
+ u16 ctrl_address;
+ u8 status;
+
+ struct completion cmd_done;
+ struct mutex flash_mutex;
+};
+
+struct f34v7_data {
+ bool has_display_cfg;
+ bool has_guest_code;
+ bool force_update;
+ bool in_bl_mode;
+ u8 *read_config_buf;
+ size_t read_config_buf_size;
+ u8 command;
+ u8 flash_status;
+ u16 block_size;
+ u16 config_block_count;
+ u16 config_size;
+ u16 config_area;
+ u16 flash_config_length;
+ u16 payload_length;
+ u8 partitions;
+ u16 partition_table_bytes;
+ bool new_partition_table;
+
+ struct register_offset off;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+ struct image_metadata img;
+
+ const void *config_data;
+ const void *image;
+};
+
+struct f34_data {
+ struct rmi_function *fn;
+
+ u8 bl_version;
+ unsigned char bootloader_id[5];
+ unsigned char configuration_id[CONFIG_ID_SIZE*2 + 1];
+
+ union {
+ struct f34v5_data v5;
+ struct f34v7_data v7;
+ };
+};
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_probe(struct f34_data *f34);
+
+#endif /* _RMI_F34_H */
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
new file mode 100644
index 000000000000..ca31f9539d9b
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -0,0 +1,1372 @@
+/*
+ * Copyright (c) 2016, Zodiac Inflight Innovations
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34v7_read_flash_status(struct f34_data *f34)
+{
+ u8 status;
+ u8 command;
+ int ret;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.data_base_addr + f34->v7.off.flash_status,
+ &status,
+ sizeof(status));
+ if (ret < 0) {
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Failed to read flash status\n", __func__);
+ return ret;
+ }
+
+ f34->v7.in_bl_mode = status >> 7;
+ f34->v7.flash_status = status & 0x1f;
+
+ if (f34->v7.flash_status != 0x00) {
+ dev_err(&f34->fn->dev, "%s: status=%d, command=0x%02x\n",
+ __func__, f34->v7.flash_status, f34->v7.command);
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.data_base_addr + f34->v7.off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read flash command\n",
+ __func__);
+ return ret;
+ }
+
+ f34->v7.command = command;
+
+ return 0;
+}
+
+static int rmi_f34v7_wait_for_idle(struct f34_data *f34, int timeout_ms)
+{
+ int count = 0;
+ int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+
+ do {
+ usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+ count++;
+
+ rmi_f34v7_read_flash_status(f34);
+
+ if ((f34->v7.command == v7_CMD_IDLE)
+ && (f34->v7.flash_status == 0x00)) {
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "Idle status detected\n");
+ return 0;
+ }
+ } while (count < timeout_count);
+
+ dev_err(&f34->fn->dev,
+ "%s: Timed out waiting for idle status\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
+ u8 cmd)
+{
+ int ret;
+ u8 base;
+ struct f34v7_data_1_5 data_1_5;
+
+ base = f34->fn->fd.data_base_addr;
+
+ memset(&data_1_5, 0, sizeof(data_1_5));
+
+ switch (cmd) {
+ case v7_CMD_ERASE_ALL:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE_AP;
+ break;
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_BL_CONFIG:
+ data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_UI_CONFIG:
+ data_1_5.partition_id = CORE_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_DISP_CONFIG:
+ data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_GUEST_CODE:
+ data_1_5.partition_id = GUEST_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ data_1_5.partition_id = BOOTLOADER_PARTITION;
+ data_1_5.command = CMD_V7_ENTER_BL;
+ break;
+ }
+
+ data_1_5.payload[0] = f34->bootloader_id[0];
+ data_1_5.payload[1] = f34->bootloader_id[1];
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.partition_id,
+ &data_1_5, sizeof(data_1_5));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to write single transaction command\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_write_command(struct f34_data *f34, u8 cmd)
+{
+ int ret;
+ u8 base;
+ u8 command;
+
+ base = f34->fn->fd.data_base_addr;
+
+ switch (cmd) {
+ case v7_CMD_WRITE_FW:
+ case v7_CMD_WRITE_CONFIG:
+ case v7_CMD_WRITE_GUEST_CODE:
+ command = CMD_V7_WRITE;
+ break;
+ case v7_CMD_READ_CONFIG:
+ command = CMD_V7_READ;
+ break;
+ case v7_CMD_ERASE_ALL:
+ command = CMD_V7_ERASE_AP;
+ break;
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ case v7_CMD_ERASE_BL_CONFIG:
+ case v7_CMD_ERASE_UI_CONFIG:
+ case v7_CMD_ERASE_DISP_CONFIG:
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ case v7_CMD_ERASE_GUEST_CODE:
+ command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ command = CMD_V7_ENTER_BL;
+ break;
+ default:
+ dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ f34->v7.command = command;
+
+ switch (cmd) {
+ case v7_CMD_ERASE_ALL:
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ case v7_CMD_ERASE_BL_CONFIG:
+ case v7_CMD_ERASE_UI_CONFIG:
+ case v7_CMD_ERASE_DISP_CONFIG:
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ case v7_CMD_ERASE_GUEST_CODE:
+ case v7_CMD_ENABLE_FLASH_PROG:
+ ret = rmi_f34v7_write_command_single_transaction(f34, cmd);
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
+ default:
+ break;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: writing cmd %02X\n",
+ __func__, command);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.flash_cmd,
+ &command, sizeof(command));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write flash command\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_write_partition_id(struct f34_data *f34, u8 cmd)
+{
+ int ret;
+ u8 base;
+ u8 partition;
+
+ base = f34->fn->fd.data_base_addr;
+
+ switch (cmd) {
+ case v7_CMD_WRITE_FW:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case v7_CMD_WRITE_CONFIG:
+ case v7_CMD_READ_CONFIG:
+ if (f34->v7.config_area == v7_UI_CONFIG_AREA)
+ partition = CORE_CONFIG_PARTITION;
+ else if (f34->v7.config_area == v7_DP_CONFIG_AREA)
+ partition = DISPLAY_CONFIG_PARTITION;
+ else if (f34->v7.config_area == v7_PM_CONFIG_AREA)
+ partition = GUEST_SERIALIZATION_PARTITION;
+ else if (f34->v7.config_area == v7_BL_CONFIG_AREA)
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ else if (f34->v7.config_area == v7_FLASH_CONFIG_AREA)
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case v7_CMD_WRITE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case v7_CMD_ERASE_ALL:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case v7_CMD_ERASE_BL_CONFIG:
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ break;
+ case v7_CMD_ERASE_UI_CONFIG:
+ partition = CORE_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_DISP_CONFIG:
+ partition = DISPLAY_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ partition = BOOTLOADER_PARTITION;
+ break;
+ default:
+ dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.partition_id,
+ &partition, sizeof(partition));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write partition ID\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_read_f34v7_partition_table(struct f34_data *f34)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 block_number = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+
+ ret = rmi_f34v7_write_partition_id(f34, v7_CMD_READ_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ put_unaligned_le16(f34->v7.flash_config_length, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write transfer length\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_READ_CONFIG);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write command\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, WRITE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to wait for idle status\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ f34->v7.read_config_buf,
+ f34->v7.partition_table_bytes);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read block data\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rmi_f34v7_parse_partition_table(struct f34_data *f34,
+ const void *partition_table,
+ struct block_count *blkcount,
+ struct physical_address *phyaddr)
+{
+ int i;
+ int index;
+ u16 partition_length;
+ u16 physical_address;
+ const struct partition_table *ptable;
+
+ for (i = 0; i < f34->v7.partitions; i++) {
+ index = i * 8 + 2;
+ ptable = partition_table + index;
+ partition_length = le16_to_cpu(ptable->partition_length);
+ physical_address = le16_to_cpu(ptable->start_physical_address);
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Partition entry %d: %*ph\n",
+ __func__, i, sizeof(struct partition_table), ptable);
+ switch (ptable->partition_id & 0x1f) {
+ case CORE_CODE_PARTITION:
+ blkcount->ui_firmware = partition_length;
+ phyaddr->ui_firmware = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Core code block count: %d\n",
+ __func__, blkcount->ui_firmware);
+ break;
+ case CORE_CONFIG_PARTITION:
+ blkcount->ui_config = partition_length;
+ phyaddr->ui_config = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Core config block count: %d\n",
+ __func__, blkcount->ui_config);
+ break;
+ case DISPLAY_CONFIG_PARTITION:
+ blkcount->dp_config = partition_length;
+ phyaddr->dp_config = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Display config block count: %d\n",
+ __func__, blkcount->dp_config);
+ break;
+ case FLASH_CONFIG_PARTITION:
+ blkcount->fl_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Flash config block count: %d\n",
+ __func__, blkcount->fl_config);
+ break;
+ case GUEST_CODE_PARTITION:
+ blkcount->guest_code = partition_length;
+ phyaddr->guest_code = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Guest code block count: %d\n",
+ __func__, blkcount->guest_code);
+ break;
+ case GUEST_SERIALIZATION_PARTITION:
+ blkcount->pm_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Guest serialization block count: %d\n",
+ __func__, blkcount->pm_config);
+ break;
+ case GLOBAL_PARAMETERS_PARTITION:
+ blkcount->bl_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Global parameters block count: %d\n",
+ __func__, blkcount->bl_config);
+ break;
+ case DEVICE_CONFIG_PARTITION:
+ blkcount->lockdown = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Device config block count: %d\n",
+ __func__, blkcount->lockdown);
+ break;
+ }
+ }
+}
+
+static int rmi_f34v7_read_queries_bl_version(struct f34_data *f34)
+{
+ int ret;
+ u8 base;
+ int offset;
+ u8 query_0;
+ struct f34v7_query_1_7 query_1_7;
+
+ base = f34->fn->fd.query_base_addr;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base,
+ &query_0,
+ sizeof(query_0));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read query 0\n", __func__);
+ return ret;
+ }
+
+ offset = (query_0 & 0x7) + 1;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + offset,
+ &query_1_7,
+ sizeof(query_1_7));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return ret;
+ }
+
+ f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+ f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Bootloader V%d.%d\n",
+ f34->bootloader_id[1], f34->bootloader_id[0]);
+
+ return 0;
+}
+
+static int rmi_f34v7_read_queries(struct f34_data *f34)
+{
+ int ret;
+ int i, j;
+ u8 base;
+ int offset;
+ u8 *ptable;
+ u8 query_0;
+ struct f34v7_query_1_7 query_1_7;
+
+ base = f34->fn->fd.query_base_addr;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base,
+ &query_0,
+ sizeof(query_0));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read query 0\n", __func__);
+ return ret;
+ }
+
+ offset = (query_0 & 0x07) + 1;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + offset,
+ &query_1_7,
+ sizeof(query_1_7));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return ret;
+ }
+
+ f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+ f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ f34->v7.block_size = le16_to_cpu(query_1_7.block_size);
+ f34->v7.flash_config_length =
+ le16_to_cpu(query_1_7.flash_config_length);
+ f34->v7.payload_length = le16_to_cpu(query_1_7.payload_length);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.block_size = %d\n",
+ __func__, f34->v7.block_size);
+
+ f34->v7.off.flash_status = V7_FLASH_STATUS_OFFSET;
+ f34->v7.off.partition_id = V7_PARTITION_ID_OFFSET;
+ f34->v7.off.block_number = V7_BLOCK_NUMBER_OFFSET;
+ f34->v7.off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+ f34->v7.off.flash_cmd = V7_COMMAND_OFFSET;
+ f34->v7.off.payload = V7_PAYLOAD_OFFSET;
+
+ f34->v7.has_display_cfg = query_1_7.partition_support[1] & HAS_DISP_CFG;
+ f34->v7.has_guest_code =
+ query_1_7.partition_support[1] & HAS_GUEST_CODE;
+
+ if (query_0 & HAS_CONFIG_ID) {
+ char f34_ctrl[CONFIG_ID_SIZE];
+ int i = 0;
+ u8 *p = f34->configuration_id;
+ *p = '\0';
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.control_base_addr,
+ f34_ctrl,
+ sizeof(f34_ctrl));
+ if (ret)
+ return ret;
+
+ /* Eat leading zeros */
+ while (i < sizeof(f34_ctrl) && !f34_ctrl[i])
+ i++;
+
+ for (; i < sizeof(f34_ctrl); i++)
+ p += snprintf(p, f34->configuration_id
+ + sizeof(f34->configuration_id) - p,
+ "%02X", f34_ctrl[i]);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Configuration ID: %s\n",
+ f34->configuration_id);
+ }
+
+ f34->v7.partitions = 0;
+ for (i = 0; i < sizeof(query_1_7.partition_support); i++)
+ for (j = 0; j < 8; j++)
+ if (query_1_7.partition_support[i] & (1 << j))
+ f34->v7.partitions++;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: Supported partitions: %*ph\n",
+ __func__, sizeof(query_1_7.partition_support),
+ query_1_7.partition_support);
+
+
+ f34->v7.partition_table_bytes = f34->v7.partitions * 8 + 2;
+
+ f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+ f34->v7.partition_table_bytes,
+ GFP_KERNEL);
+ if (!f34->v7.read_config_buf) {
+ f34->v7.read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+
+ f34->v7.read_config_buf_size = f34->v7.partition_table_bytes;
+ ptable = f34->v7.read_config_buf;
+
+ ret = rmi_f34v7_read_f34v7_partition_table(f34);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read partition table\n",
+ __func__);
+ return ret;
+ }
+
+ rmi_f34v7_parse_partition_table(f34, ptable,
+ &f34->v7.blkcount, &f34->v7.phyaddr);
+
+ return 0;
+}
+
+static int rmi_f34v7_check_ui_firmware_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.ui_firmware) {
+ dev_err(&f34->fn->dev,
+ "UI firmware size mismatch: %d != %d\n",
+ block_count, f34->v7.blkcount.ui_firmware);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_ui_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.ui_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.ui_config) {
+ dev_err(&f34->fn->dev, "UI config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_dp_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.dp_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.dp_config) {
+ dev_err(&f34->fn->dev, "Display config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_guest_code_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.guest_code.size / f34->v7.block_size;
+ if (block_count != f34->v7.blkcount.guest_code) {
+ dev_err(&f34->fn->dev, "Guest code size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.bl_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.bl_config) {
+ dev_err(&f34->fn->dev, "Bootloader config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_erase_config(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing config...\n");
+
+ switch (f34->v7.config_area) {
+ case v7_UI_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ case v7_DP_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_DISP_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ case v7_BL_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_BL_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int rmi_f34v7_erase_guest_code(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing guest code...\n");
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_GUEST_CODE);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_erase_all(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing firmware...\n");
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_FIRMWARE);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.has_display_cfg) {
+ f34->v7.config_area = v7_DP_CONFIG_AREA;
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (f34->v7.new_partition_table && f34->v7.has_guest_code) {
+ ret = rmi_f34v7_erase_guest_code(f34);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_read_f34v7_blocks(struct f34_data *f34, u16 block_cnt,
+ u8 command)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 transfer;
+ u16 max_transfer;
+ u16 remaining = block_cnt;
+ u16 block_number = 0;
+ u16 index = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ ret = rmi_f34v7_write_partition_id(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ max_transfer = min(f34->v7.payload_length,
+ (u16)(PAGE_SIZE / f34->v7.block_size));
+
+ do {
+ transfer = min(remaining, max_transfer);
+ put_unaligned_le16(transfer, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Write transfer length fail (%d remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Wait for idle failed (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ &f34->v7.read_config_buf[index],
+ transfer * f34->v7.block_size);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Read block failed (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ index += (transfer * f34->v7.block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
+ const void *block_ptr, u16 block_cnt,
+ u8 command)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 transfer;
+ u16 max_transfer;
+ u16 remaining = block_cnt;
+ u16 block_number = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ ret = rmi_f34v7_write_partition_id(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ if (f34->v7.payload_length > (PAGE_SIZE / f34->v7.block_size))
+ max_transfer = PAGE_SIZE / f34->v7.block_size;
+ else
+ max_transfer = f34->v7.payload_length;
+
+ do {
+ transfer = min(remaining, max_transfer);
+ put_unaligned_le16(transfer, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Write transfer length fail (%d remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ block_ptr, transfer * f34->v7.block_size);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed writing data (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed wait for idle (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ block_ptr += (transfer * f34->v7.block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int rmi_f34v7_write_config(struct f34_data *f34)
+{
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.config_data,
+ f34->v7.config_block_count,
+ v7_CMD_WRITE_CONFIG);
+}
+
+static int rmi_f34v7_write_ui_config(struct f34_data *f34)
+{
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.ui_config.data;
+ f34->v7.config_size = f34->v7.img.ui_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_dp_config(struct f34_data *f34)
+{
+ f34->v7.config_area = v7_DP_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.dp_config.data;
+ f34->v7.config_size = f34->v7.img.dp_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_guest_code(struct f34_data *f34)
+{
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.guest_code.data,
+ f34->v7.img.guest_code.size /
+ f34->v7.block_size,
+ v7_CMD_WRITE_GUEST_CODE);
+}
+
+static int rmi_f34v7_write_flash_config(struct f34_data *f34)
+{
+ int ret;
+
+ f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.fl_config.data;
+ f34->v7.config_size = f34->v7.img.fl_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ if (f34->v7.config_block_count != f34->v7.blkcount.fl_config) {
+ dev_err(&f34->fn->dev, "%s: Flash config size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_FLASH_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Erase flash config command written\n", __func__);
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_write_config(f34);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_write_partition_table(struct f34_data *f34)
+{
+ u16 block_count;
+ int ret;
+
+ block_count = f34->v7.blkcount.bl_config;
+ f34->v7.config_area = v7_BL_CONFIG_AREA;
+ f34->v7.config_size = f34->v7.block_size * block_count;
+ devm_kfree(&f34->fn->dev, f34->v7.read_config_buf);
+ f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+ f34->v7.config_size, GFP_KERNEL);
+ if (!f34->v7.read_config_buf) {
+ f34->v7.read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+
+ f34->v7.read_config_buf_size = f34->v7.config_size;
+
+ ret = rmi_f34v7_read_f34v7_blocks(f34, block_count, v7_CMD_READ_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_write_flash_config(f34);
+ if (ret < 0)
+ return ret;
+
+ f34->v7.config_area = v7_BL_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.read_config_buf;
+ f34->v7.config_size = f34->v7.img.bl_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ ret = rmi_f34v7_write_config(f34);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_write_firmware(struct f34_data *f34)
+{
+ u16 blk_count;
+
+ blk_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.ui_firmware.data,
+ blk_count, v7_CMD_WRITE_FW);
+}
+
+static void rmi_f34v7_compare_partition_tables(struct f34_data *f34)
+{
+ if (f34->v7.phyaddr.ui_firmware != f34->v7.img.phyaddr.ui_firmware) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.phyaddr.ui_config != f34->v7.img.phyaddr.ui_config) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.has_display_cfg &&
+ f34->v7.phyaddr.dp_config != f34->v7.img.phyaddr.dp_config) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.has_guest_code &&
+ f34->v7.phyaddr.guest_code != f34->v7.img.phyaddr.guest_code) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ f34->v7.new_partition_table = false;
+}
+
+static void rmi_f34v7_parse_img_header_10_bl_container(struct f34_data *f34,
+ const void *image)
+{
+ int i;
+ int num_of_containers;
+ unsigned int addr;
+ unsigned int container_id;
+ unsigned int length;
+ const void *content;
+ const struct container_descriptor *descriptor;
+
+ num_of_containers = f34->v7.img.bootloader.size / 4 - 1;
+
+ for (i = 1; i <= num_of_containers; i++) {
+ addr = get_unaligned_le32(f34->v7.img.bootloader.data + i * 4);
+ descriptor = image + addr;
+ container_id = le16_to_cpu(descriptor->container_id);
+ content = image + le32_to_cpu(descriptor->content_address);
+ length = le32_to_cpu(descriptor->content_length);
+ switch (container_id) {
+ case BL_CONFIG_CONTAINER:
+ case GLOBAL_PARAMETERS_CONTAINER:
+ f34->v7.img.bl_config.data = content;
+ f34->v7.img.bl_config.size = length;
+ break;
+ case BL_LOCKDOWN_INFO_CONTAINER:
+ case DEVICE_CONFIG_CONTAINER:
+ f34->v7.img.lockdown.data = content;
+ f34->v7.img.lockdown.size = length;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rmi_f34v7_parse_image_header_10(struct f34_data *f34)
+{
+ unsigned int i;
+ unsigned int num_of_containers;
+ unsigned int addr;
+ unsigned int offset;
+ unsigned int container_id;
+ unsigned int length;
+ const void *image = f34->v7.image;
+ const u8 *content;
+ const struct container_descriptor *descriptor;
+ const struct image_header_10 *header = image;
+
+ f34->v7.img.checksum = le32_to_cpu(header->checksum);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.img.checksum=%X\n",
+ __func__, f34->v7.img.checksum);
+
+ /* address of top level container */
+ offset = le32_to_cpu(header->top_level_container_start_addr);
+ descriptor = image + offset;
+
+ /* address of top level container content */
+ offset = le32_to_cpu(descriptor->content_address);
+ num_of_containers = le32_to_cpu(descriptor->content_length) / 4;
+
+ for (i = 0; i < num_of_containers; i++) {
+ addr = get_unaligned_le32(image + offset);
+ offset += 4;
+ descriptor = image + addr;
+ container_id = le16_to_cpu(descriptor->container_id);
+ content = image + le32_to_cpu(descriptor->content_address);
+ length = le32_to_cpu(descriptor->content_length);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: container_id=%d, length=%d\n", __func__,
+ container_id, length);
+
+ switch (container_id) {
+ case UI_CONTAINER:
+ case CORE_CODE_CONTAINER:
+ f34->v7.img.ui_firmware.data = content;
+ f34->v7.img.ui_firmware.size = length;
+ break;
+ case UI_CONFIG_CONTAINER:
+ case CORE_CONFIG_CONTAINER:
+ f34->v7.img.ui_config.data = content;
+ f34->v7.img.ui_config.size = length;
+ break;
+ case BL_CONTAINER:
+ f34->v7.img.bl_version = *content;
+ f34->v7.img.bootloader.data = content;
+ f34->v7.img.bootloader.size = length;
+ rmi_f34v7_parse_img_header_10_bl_container(f34, image);
+ break;
+ case GUEST_CODE_CONTAINER:
+ f34->v7.img.contains_guest_code = true;
+ f34->v7.img.guest_code.data = content;
+ f34->v7.img.guest_code.size = length;
+ break;
+ case DISPLAY_CONFIG_CONTAINER:
+ f34->v7.img.contains_display_cfg = true;
+ f34->v7.img.dp_config.data = content;
+ f34->v7.img.dp_config.size = length;
+ break;
+ case FLASH_CONFIG_CONTAINER:
+ f34->v7.img.contains_flash_config = true;
+ f34->v7.img.fl_config.data = content;
+ f34->v7.img.fl_config.size = length;
+ break;
+ case GENERAL_INFORMATION_CONTAINER:
+ f34->v7.img.contains_firmware_id = true;
+ f34->v7.img.firmware_id =
+ get_unaligned_le32(content + 4);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int rmi_f34v7_parse_image_info(struct f34_data *f34)
+{
+ const struct image_header_10 *header = f34->v7.image;
+
+ memset(&f34->v7.img, 0x00, sizeof(f34->v7.img));
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: header->major_header_version = %d\n",
+ __func__, header->major_header_version);
+
+ switch (header->major_header_version) {
+ case IMAGE_HEADER_VERSION_10:
+ rmi_f34v7_parse_image_header_10(f34);
+ break;
+ default:
+ dev_err(&f34->fn->dev, "Unsupported image file format %02X\n",
+ header->major_header_version);
+ return -EINVAL;
+ }
+
+ if (!f34->v7.img.contains_flash_config) {
+ dev_err(&f34->fn->dev, "%s: No flash config in fw image\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi_f34v7_parse_partition_table(f34, f34->v7.img.fl_config.data,
+ &f34->v7.img.blkcount, &f34->v7.img.phyaddr);
+
+ rmi_f34v7_compare_partition_tables(f34);
+
+ return 0;
+}
+
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+ int ret;
+
+ rmi_f34v7_read_queries_bl_version(f34);
+
+ f34->v7.image = fw->data;
+
+ ret = rmi_f34v7_parse_image_info(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (!f34->v7.new_partition_table) {
+ ret = rmi_f34v7_check_ui_firmware_size(f34);
+ if (ret < 0)
+ goto fail;
+
+ ret = rmi_f34v7_check_ui_config_size(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.has_display_cfg &&
+ f34->v7.img.contains_display_cfg) {
+ ret = rmi_f34v7_check_dp_config_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ ret = rmi_f34v7_check_guest_code_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+ } else {
+ ret = rmi_f34v7_check_bl_config_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = rmi_f34v7_erase_all(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.new_partition_table) {
+ ret = rmi_f34v7_write_partition_table(f34);
+ if (ret < 0)
+ goto fail;
+ dev_info(&f34->fn->dev, "%s: Partition table programmed\n",
+ __func__);
+ }
+
+ dev_info(&f34->fn->dev, "Writing firmware (%d bytes)...\n",
+ f34->v7.img.ui_firmware.size);
+
+ ret = rmi_f34v7_write_firmware(f34);
+ if (ret < 0)
+ goto fail;
+
+ dev_info(&f34->fn->dev, "Writing config (%d bytes)...\n",
+ f34->v7.img.ui_config.size);
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ ret = rmi_f34v7_write_ui_config(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.has_display_cfg && f34->v7.img.contains_display_cfg) {
+ dev_info(&f34->fn->dev, "Writing display config...\n");
+
+ ret = rmi_f34v7_write_dp_config(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (f34->v7.new_partition_table) {
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ dev_info(&f34->fn->dev, "Writing guest code...\n");
+
+ ret = rmi_f34v7_write_guest_code(f34);
+ if (ret < 0)
+ goto fail;
+ }
+ }
+
+fail:
+ return ret;
+}
+
+static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
+{
+ int ret;
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.in_bl_mode)
+ return 0;
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ENABLE_FLASH_PROG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ if (!f34->v7.in_bl_mode) {
+ dev_err(&f34->fn->dev, "%s: BL mode not entered\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+ int ret = 0;
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ f34->v7.image = fw->data;
+
+ ret = rmi_f34v7_parse_image_info(f34);
+ if (ret < 0)
+ goto exit;
+
+ if (!f34->v7.force_update && f34->v7.new_partition_table) {
+ dev_err(&f34->fn->dev, "%s: Partition table mismatch\n",
+ __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ dev_info(&f34->fn->dev, "Firmware image OK\n");
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ goto exit;
+
+ if (f34->v7.in_bl_mode) {
+ dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
+ __func__);
+ }
+
+ rmi_f34v7_enter_flash_prog(f34);
+
+ return 0;
+
+exit:
+ return ret;
+}
+
+int rmi_f34v7_probe(struct f34_data *f34)
+{
+ int ret;
+
+ /* Read bootloader version */
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.query_base_addr + V7_BOOTLOADER_ID_OFFSET,
+ f34->bootloader_id,
+ sizeof(f34->bootloader_id));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read bootloader ID\n",
+ __func__);
+ return ret;
+ }
+
+ if (f34->bootloader_id[1] == '5') {
+ f34->bl_version = 5;
+ } else if (f34->bootloader_id[1] == '6') {
+ f34->bl_version = 6;
+ } else if (f34->bootloader_id[1] == 7) {
+ f34->bl_version = 7;
+ } else {
+ dev_err(&f34->fn->dev, "%s: Unrecognized bootloader version\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(&f34->v7.blkcount, 0x00, sizeof(f34->v7.blkcount));
+ memset(&f34->v7.phyaddr, 0x00, sizeof(f34->v7.phyaddr));
+ rmi_f34v7_read_queries(f34);
+
+ f34->v7.force_update = false;
+ return 0;
+}
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index cf805b960866..dea63e2db3e6 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -200,7 +200,7 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
error = rmi_write(rmi_dev, fn->fd.command_base_addr, F54_GET_REPORT);
if (error < 0)
- return error;
+ goto unlock;
init_completion(&f54->cmd_done);
@@ -209,15 +209,18 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
queue_delayed_work(f54->workqueue, &f54->work, 0);
+unlock:
mutex_unlock(&f54->data_mutex);
- return 0;
+ return error;
}
static size_t rmi_f54_get_report_size(struct f54_data *f54)
{
- u8 rx = f54->num_rx_electrodes ? : f54->num_rx_electrodes;
- u8 tx = f54->num_tx_electrodes ? : f54->num_tx_electrodes;
+ struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+ u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
size_t size;
switch (rmi_f54_get_reptype(f54, f54->input)) {
@@ -401,6 +404,10 @@ static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
{
+ struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+ u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
struct v4l2_pix_format *f = &f54->format;
enum rmi_f54_report_type reptype;
int ret;
@@ -415,8 +422,8 @@ static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
f54->input = i;
- f->width = f54->num_rx_electrodes;
- f->height = f54->num_tx_electrodes;
+ f->width = rx;
+ f->height = tx;
f->field = V4L2_FIELD_NONE;
f->colorspace = V4L2_COLORSPACE_RAW;
f->bytesperline = f->width * sizeof(u16);
diff --git a/drivers/input/rmi4/rmi_f55.c b/drivers/input/rmi4/rmi_f55.c
new file mode 100644
index 000000000000..37390ca6a924
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f55.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012-2015 Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define F55_NAME "rmi4_f55"
+
+/* F55 data offsets */
+#define F55_NUM_RX_OFFSET 0
+#define F55_NUM_TX_OFFSET 1
+#define F55_PHYS_CHAR_OFFSET 2
+
+/* Only read required query registers */
+#define F55_QUERY_LEN 3
+
+/* F55 capabilities */
+#define F55_CAP_SENSOR_ASSIGN BIT(0)
+
+struct f55_data {
+ struct rmi_function *fn;
+
+ u8 qry[F55_QUERY_LEN];
+ u8 num_rx_electrodes;
+ u8 cfg_num_rx_electrodes;
+ u8 num_tx_electrodes;
+ u8 cfg_num_tx_electrodes;
+};
+
+static int rmi_f55_detect(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f55_data *f55;
+ int error;
+
+ f55 = dev_get_drvdata(&fn->dev);
+
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ &f55->qry, sizeof(f55->qry));
+ if (error) {
+ dev_err(&fn->dev, "%s: Failed to query F55 properties\n",
+ __func__);
+ return error;
+ }
+
+ f55->num_rx_electrodes = f55->qry[F55_NUM_RX_OFFSET];
+ f55->num_tx_electrodes = f55->qry[F55_NUM_TX_OFFSET];
+
+ f55->cfg_num_rx_electrodes = f55->num_rx_electrodes;
+ f55->cfg_num_tx_electrodes = f55->num_rx_electrodes;
+
+ drv_data->num_rx_electrodes = f55->cfg_num_rx_electrodes;
+ drv_data->num_tx_electrodes = f55->cfg_num_rx_electrodes;
+
+ if (f55->qry[F55_PHYS_CHAR_OFFSET] & F55_CAP_SENSOR_ASSIGN) {
+ int i, total;
+ u8 buf[256];
+
+ /*
+ * Calculate the number of enabled receive and transmit
+ * electrodes by reading F55:Ctrl1 (sensor receiver assignment)
+ * and F55:Ctrl2 (sensor transmitter assignment). The number of
+ * enabled electrodes is the sum of all field entries with a
+ * value other than 0xff.
+ */
+ error = rmi_read_block(fn->rmi_dev,
+ fn->fd.control_base_addr + 1,
+ buf, f55->num_rx_electrodes);
+ if (!error) {
+ total = 0;
+ for (i = 0; i < f55->num_rx_electrodes; i++) {
+ if (buf[i] != 0xff)
+ total++;
+ }
+ f55->cfg_num_rx_electrodes = total;
+ drv_data->num_rx_electrodes = total;
+ }
+
+ error = rmi_read_block(fn->rmi_dev,
+ fn->fd.control_base_addr + 2,
+ buf, f55->num_tx_electrodes);
+ if (!error) {
+ total = 0;
+ for (i = 0; i < f55->num_tx_electrodes; i++) {
+ if (buf[i] != 0xff)
+ total++;
+ }
+ f55->cfg_num_tx_electrodes = total;
+ drv_data->num_tx_electrodes = total;
+ }
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F55 num_rx_electrodes: %d (raw %d)\n",
+ f55->cfg_num_rx_electrodes, f55->num_rx_electrodes);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F55 num_tx_electrodes: %d (raw %d)\n",
+ f55->cfg_num_tx_electrodes, f55->num_tx_electrodes);
+
+ return 0;
+}
+
+static int rmi_f55_probe(struct rmi_function *fn)
+{
+ struct f55_data *f55;
+
+ f55 = devm_kzalloc(&fn->dev, sizeof(struct f55_data), GFP_KERNEL);
+ if (!f55)
+ return -ENOMEM;
+
+ f55->fn = fn;
+ dev_set_drvdata(&fn->dev, f55);
+
+ return rmi_f55_detect(fn);
+}
+
+struct rmi_function_handler rmi_f55_handler = {
+ .driver = {
+ .name = F55_NAME,
+ },
+ .func = 0x55,
+ .probe = rmi_f55_probe,
+};
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index 1ebc2c1debae..082306d7c207 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -9,7 +9,6 @@
#include <linux/i2c.h>
#include <linux/rmi.h>
-#include <linux/irq.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
@@ -35,8 +34,6 @@ struct rmi_i2c_xport {
struct mutex page_mutex;
int page;
- int irq;
-
u8 *tx_buf;
size_t tx_buf_size;
@@ -177,42 +174,6 @@ static const struct rmi_transport_ops rmi_i2c_ops = {
.read_block = rmi_i2c_read_block,
};
-static irqreturn_t rmi_i2c_irq(int irq, void *dev_id)
-{
- struct rmi_i2c_xport *rmi_i2c = dev_id;
- struct rmi_device *rmi_dev = rmi_i2c->xport.rmi_dev;
- int ret;
-
- ret = rmi_process_interrupt_requests(rmi_dev);
- if (ret)
- rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
- "Failed to process interrupt request: %d\n", ret);
-
- return IRQ_HANDLED;
-}
-
-static int rmi_i2c_init_irq(struct i2c_client *client)
-{
- struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
- int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_i2c->irq));
- int ret;
-
- if (!irq_flags)
- irq_flags = IRQF_TRIGGER_LOW;
-
- ret = devm_request_threaded_irq(&client->dev, rmi_i2c->irq, NULL,
- rmi_i2c_irq, irq_flags | IRQF_ONESHOT, client->name,
- rmi_i2c);
- if (ret < 0) {
- dev_warn(&client->dev, "Failed to register interrupt %d\n",
- rmi_i2c->irq);
-
- return ret;
- }
-
- return 0;
-}
-
#ifdef CONFIG_OF
static const struct of_device_id rmi_i2c_of_match[] = {
{ .compatible = "syna,rmi4-i2c" },
@@ -255,8 +216,7 @@ static int rmi_i2c_probe(struct i2c_client *client,
if (!client->dev.of_node && client_pdata)
*pdata = *client_pdata;
- if (client->irq > 0)
- rmi_i2c->irq = client->irq;
+ pdata->irq = client->irq;
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
dev_name(&client->dev));
@@ -321,10 +281,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
if (retval)
return retval;
- retval = rmi_i2c_init_irq(client);
- if (retval < 0)
- return retval;
-
dev_info(&client->dev, "registered rmi i2c driver at %#04x.\n",
client->addr);
return 0;
@@ -337,18 +293,10 @@ static int rmi_i2c_suspend(struct device *dev)
struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
int ret;
- ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_i2c->irq);
- if (device_may_wakeup(&client->dev)) {
- ret = enable_irq_wake(rmi_i2c->irq);
- if (!ret)
- dev_warn(dev, "Failed to enable irq for wake: %d\n",
- ret);
- }
-
regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
rmi_i2c->supplies);
@@ -368,15 +316,7 @@ static int rmi_i2c_resume(struct device *dev)
msleep(rmi_i2c->startup_delay);
- enable_irq(rmi_i2c->irq);
- if (device_may_wakeup(&client->dev)) {
- ret = disable_irq_wake(rmi_i2c->irq);
- if (!ret)
- dev_warn(dev, "Failed to disable irq for wake: %d\n",
- ret);
- }
-
- ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
@@ -391,12 +331,10 @@ static int rmi_i2c_runtime_suspend(struct device *dev)
struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
int ret;
- ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_i2c->irq);
-
regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
rmi_i2c->supplies);
@@ -416,9 +354,7 @@ static int rmi_i2c_runtime_resume(struct device *dev)
msleep(rmi_i2c->startup_delay);
- enable_irq(rmi_i2c->irq);
-
- ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
new file mode 100644
index 000000000000..76752555d809
--- /dev/null
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2015 - 2016 Red Hat, Inc
+ * Copyright (c) 2011, 2012 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kconfig.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define SMB_PROTOCOL_VERSION_ADDRESS 0xfd
+#define SMB_MAX_COUNT 32
+#define RMI_SMB2_MAP_SIZE 8 /* 8 entry of 4 bytes each */
+#define RMI_SMB2_MAP_FLAGS_WE 0x01
+
+struct mapping_table_entry {
+ __le16 rmiaddr;
+ u8 readcount;
+ u8 flags;
+};
+
+struct rmi_smb_xport {
+ struct rmi_transport_dev xport;
+ struct i2c_client *client;
+
+ struct mutex page_mutex;
+ int page;
+ u8 table_index;
+ struct mutex mappingtable_mutex;
+ struct mapping_table_entry mapping_table[RMI_SMB2_MAP_SIZE];
+};
+
+static int rmi_smb_get_version(struct rmi_smb_xport *rmi_smb)
+{
+ struct i2c_client *client = rmi_smb->client;
+ int retval;
+
+ /* Check if for SMBus new version device by reading version byte. */
+ retval = i2c_smbus_read_byte_data(client, SMB_PROTOCOL_VERSION_ADDRESS);
+ if (retval < 0) {
+ dev_err(&client->dev, "failed to get SMBus version number!\n");
+ return retval;
+ }
+ return retval + 1;
+}
+
+/* SMB block write - wrapper over ic2_smb_write_block */
+static int smb_block_write(struct rmi_transport_dev *xport,
+ u8 commandcode, const void *buf, size_t len)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ struct i2c_client *client = rmi_smb->client;
+ int retval;
+
+ retval = i2c_smbus_write_block_data(client, commandcode, len, buf);
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev,
+ "wrote %zd bytes at %#04x: %d (%*ph)\n",
+ len, commandcode, retval, (int)len, buf);
+
+ return retval;
+}
+
+/*
+ * The function to get command code for smbus operations and keeps
+ * records to the driver mapping table
+ */
+static int rmi_smb_get_command_code(struct rmi_transport_dev *xport,
+ u16 rmiaddr, int bytecount, bool isread, u8 *commandcode)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ int i;
+ int retval;
+ struct mapping_table_entry mapping_data[1];
+
+ mutex_lock(&rmi_smb->mappingtable_mutex);
+ for (i = 0; i < RMI_SMB2_MAP_SIZE; i++) {
+ if (rmi_smb->mapping_table[i].rmiaddr == rmiaddr) {
+ if (isread) {
+ if (rmi_smb->mapping_table[i].readcount
+ == bytecount) {
+ *commandcode = i;
+ retval = 0;
+ goto exit;
+ }
+ } else {
+ if (rmi_smb->mapping_table[i].flags &
+ RMI_SMB2_MAP_FLAGS_WE) {
+ *commandcode = i;
+ retval = 0;
+ goto exit;
+ }
+ }
+ }
+ }
+ i = rmi_smb->table_index;
+ rmi_smb->table_index = (i + 1) % RMI_SMB2_MAP_SIZE;
+
+ /* constructs mapping table data entry. 4 bytes each entry */
+ memset(mapping_data, 0, sizeof(mapping_data));
+
+ mapping_data[0].rmiaddr = cpu_to_le16(rmiaddr);
+ mapping_data[0].readcount = bytecount;
+ mapping_data[0].flags = !isread ? RMI_SMB2_MAP_FLAGS_WE : 0;
+
+ retval = smb_block_write(xport, i + 0x80, mapping_data,
+ sizeof(mapping_data));
+
+ if (retval < 0) {
+ /*
+ * if not written to device mapping table
+ * clear the driver mapping table records
+ */
+ rmi_smb->mapping_table[i].rmiaddr = 0x0000;
+ rmi_smb->mapping_table[i].readcount = 0;
+ rmi_smb->mapping_table[i].flags = 0;
+ goto exit;
+ }
+ /* save to the driver level mapping table */
+ rmi_smb->mapping_table[i].rmiaddr = rmiaddr;
+ rmi_smb->mapping_table[i].readcount = bytecount;
+ rmi_smb->mapping_table[i].flags = !isread ? RMI_SMB2_MAP_FLAGS_WE : 0;
+ *commandcode = i;
+
+exit:
+ mutex_unlock(&rmi_smb->mappingtable_mutex);
+
+ return retval;
+}
+
+static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+ const void *databuff, size_t len)
+{
+ int retval = 0;
+ u8 commandcode;
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ int cur_len = (int)len;
+
+ mutex_lock(&rmi_smb->page_mutex);
+
+ while (cur_len > 0) {
+ /*
+ * break into 32 bytes chunks to write get command code
+ */
+ int block_len = min_t(int, len, SMB_MAX_COUNT);
+
+ retval = rmi_smb_get_command_code(xport, rmiaddr, block_len,
+ false, &commandcode);
+ if (retval < 0)
+ goto exit;
+
+ retval = smb_block_write(xport, commandcode,
+ databuff, block_len);
+ if (retval < 0)
+ goto exit;
+
+ /* prepare to write next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
+ }
+exit:
+ mutex_unlock(&rmi_smb->page_mutex);
+ return retval;
+}
+
+/* SMB block read - wrapper over ic2_smb_read_block */
+static int smb_block_read(struct rmi_transport_dev *xport,
+ u8 commandcode, void *buf, size_t len)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ struct i2c_client *client = rmi_smb->client;
+ int retval;
+
+ retval = i2c_smbus_read_block_data(client, commandcode, buf);
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
+static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+ void *databuff, size_t len)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ int retval;
+ u8 commandcode;
+ int cur_len = (int)len;
+
+ mutex_lock(&rmi_smb->page_mutex);
+ memset(databuff, 0, len);
+
+ while (cur_len > 0) {
+ /* break into 32 bytes chunks to write get command code */
+ int block_len = min_t(int, cur_len, SMB_MAX_COUNT);
+
+ retval = rmi_smb_get_command_code(xport, rmiaddr, block_len,
+ true, &commandcode);
+ if (retval < 0)
+ goto exit;
+
+ retval = smb_block_read(xport, commandcode,
+ databuff, block_len);
+ if (retval < 0)
+ goto exit;
+
+ /* prepare to read next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
+ }
+
+ retval = 0;
+
+exit:
+ mutex_unlock(&rmi_smb->page_mutex);
+ return retval;
+}
+
+static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
+{
+ /* the mapping table has been flushed, discard the current one */
+ mutex_lock(&rmi_smb->mappingtable_mutex);
+ memset(rmi_smb->mapping_table, 0, sizeof(rmi_smb->mapping_table));
+ mutex_unlock(&rmi_smb->mappingtable_mutex);
+}
+
+static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
+{
+ int retval;
+
+ /* we need to get the smbus version to activate the touchpad */
+ retval = rmi_smb_get_version(rmi_smb);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+
+ rmi_smb_clear_state(rmi_smb);
+
+ /*
+ * we do not call the actual reset command, it has to be handled in
+ * PS/2 or there will be races between PS/2 and SMBus.
+ * PS/2 should ensure that a psmouse_reset is called before
+ * intializing the device and after it has been removed to be in a known
+ * state.
+ */
+ return rmi_smb_enable_smbus_mode(rmi_smb);
+}
+
+static const struct rmi_transport_ops rmi_smb_ops = {
+ .write_block = rmi_smb_write_block,
+ .read_block = rmi_smb_read_block,
+ .reset = rmi_smb_reset,
+};
+
+static int rmi_smb_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct rmi_smb_xport *rmi_smb;
+ int retval;
+ int smbus_version;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_HOST_NOTIFY)) {
+ dev_err(&client->dev,
+ "adapter does not support required functionality.\n");
+ return -ENODEV;
+ }
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "no IRQ provided, giving up.\n");
+ return client->irq ? client->irq : -ENODEV;
+ }
+
+ rmi_smb = devm_kzalloc(&client->dev, sizeof(struct rmi_smb_xport),
+ GFP_KERNEL);
+ if (!rmi_smb)
+ return -ENOMEM;
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data, aborting\n");
+ return -ENOMEM;
+ }
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
+ dev_name(&client->dev));
+
+ rmi_smb->client = client;
+ mutex_init(&rmi_smb->page_mutex);
+ mutex_init(&rmi_smb->mappingtable_mutex);
+
+ rmi_smb->xport.dev = &client->dev;
+ rmi_smb->xport.pdata = *pdata;
+ rmi_smb->xport.pdata.irq = client->irq;
+ rmi_smb->xport.proto_name = "smb2";
+ rmi_smb->xport.ops = &rmi_smb_ops;
+
+ retval = rmi_smb_get_version(rmi_smb);
+ if (retval < 0)
+ return retval;
+
+ smbus_version = retval;
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+ smbus_version);
+
+ if (smbus_version != 2) {
+ dev_err(&client->dev, "Unrecognized SMB version %d.\n",
+ smbus_version);
+ return -ENODEV;
+ }
+
+ i2c_set_clientdata(client, rmi_smb);
+
+ retval = rmi_register_transport_device(&rmi_smb->xport);
+ if (retval) {
+ dev_err(&client->dev, "Failed to register transport driver at 0x%.2X.\n",
+ client->addr);
+ i2c_set_clientdata(client, NULL);
+ return retval;
+ }
+
+ dev_info(&client->dev, "registered rmi smb driver at %#04x.\n",
+ client->addr);
+ return 0;
+
+}
+
+static int rmi_smb_remove(struct i2c_client *client)
+{
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+
+ rmi_unregister_transport_device(&rmi_smb->xport);
+
+ return 0;
+}
+
+static int __maybe_unused rmi_smb_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_smb->xport.rmi_dev, true);
+ if (ret)
+ dev_warn(dev, "Failed to suspend device: %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused rmi_smb_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_smb->xport.rmi_dev, false);
+ if (ret)
+ dev_warn(dev, "Failed to suspend device: %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused rmi_smb_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ struct rmi_device *rmi_dev = rmi_smb->xport.rmi_dev;
+ int ret;
+
+ rmi_smb_reset(&rmi_smb->xport, 0);
+
+ rmi_reset(rmi_dev);
+
+ ret = rmi_driver_resume(rmi_smb->xport.rmi_dev, true);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+
+static int __maybe_unused rmi_smb_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_resume(rmi_smb->xport.rmi_dev, false);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rmi_smb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rmi_smb_suspend, rmi_smb_resume)
+ SET_RUNTIME_PM_OPS(rmi_smb_runtime_suspend, rmi_smb_runtime_resume,
+ NULL)
+};
+
+static const struct i2c_device_id rmi_id[] = {
+ { "rmi4_smbus", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rmi_id);
+
+static struct i2c_driver rmi_smb_driver = {
+ .driver = {
+ .name = "rmi4_smbus",
+ .pm = &rmi_smb_pm,
+ },
+ .id_table = rmi_id,
+ .probe = rmi_smb_probe,
+ .remove = rmi_smb_remove,
+};
+
+module_i2c_driver(rmi_smb_driver);
+
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@redhat.com>");
+MODULE_DESCRIPTION("RMI4 SMBus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 4ebef607e214..69548d7d1f10 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -12,7 +12,6 @@
#include <linux/rmi.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/irq.h>
#include <linux/of.h>
#include "rmi_driver.h"
@@ -44,8 +43,6 @@ struct rmi_spi_xport {
struct mutex page_mutex;
int page;
- int irq;
-
u8 *rx_buf;
u8 *tx_buf;
int xfer_buf_size;
@@ -326,41 +323,6 @@ static const struct rmi_transport_ops rmi_spi_ops = {
.read_block = rmi_spi_read_block,
};
-static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
-{
- struct rmi_spi_xport *rmi_spi = dev_id;
- struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
- int ret;
-
- ret = rmi_process_interrupt_requests(rmi_dev);
- if (ret)
- rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
- "Failed to process interrupt request: %d\n", ret);
-
- return IRQ_HANDLED;
-}
-
-static int rmi_spi_init_irq(struct spi_device *spi)
-{
- struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
- int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
- int ret;
-
- if (!irq_flags)
- irq_flags = IRQF_TRIGGER_LOW;
-
- ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
- rmi_spi_irq, irq_flags | IRQF_ONESHOT,
- dev_name(&spi->dev), rmi_spi);
- if (ret < 0) {
- dev_warn(&spi->dev, "Failed to register interrupt %d\n",
- rmi_spi->irq);
- return ret;
- }
-
- return 0;
-}
-
#ifdef CONFIG_OF
static int rmi_spi_of_probe(struct spi_device *spi,
struct rmi_device_platform_data *pdata)
@@ -440,8 +402,7 @@ static int rmi_spi_probe(struct spi_device *spi)
return retval;
}
- if (spi->irq > 0)
- rmi_spi->irq = spi->irq;
+ pdata->irq = spi->irq;
rmi_spi->spi = spi;
mutex_init(&rmi_spi->page_mutex);
@@ -477,10 +438,6 @@ static int rmi_spi_probe(struct spi_device *spi)
if (retval)
return retval;
- retval = rmi_spi_init_irq(spi);
- if (retval < 0)
- return retval;
-
dev_info(&spi->dev, "registered RMI SPI driver\n");
return 0;
}
@@ -492,17 +449,10 @@ static int rmi_spi_suspend(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_spi->irq);
- if (device_may_wakeup(&spi->dev)) {
- ret = enable_irq_wake(rmi_spi->irq);
- if (!ret)
- dev_warn(dev, "Failed to enable irq for wake: %d\n",
- ret);
- }
return ret;
}
@@ -512,15 +462,7 @@ static int rmi_spi_resume(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- enable_irq(rmi_spi->irq);
- if (device_may_wakeup(&spi->dev)) {
- ret = disable_irq_wake(rmi_spi->irq);
- if (!ret)
- dev_warn(dev, "Failed to disable irq for wake: %d\n",
- ret);
- }
-
- ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
@@ -535,12 +477,10 @@ static int rmi_spi_runtime_suspend(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_spi->irq);
-
return 0;
}
@@ -550,9 +490,7 @@ static int rmi_spi_runtime_resume(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- enable_irq(rmi_spi->irq);
-
- ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 073246c7d163..77551f522202 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -517,79 +517,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
},
{ }
@@ -1055,7 +983,11 @@ static int __init i8042_pnp_init(void)
#if defined(__ia64__)
return -ENODEV;
#else
- pr_info("PNP: No PS/2 controller found. Probing ports directly.\n");
+ pr_info("PNP: No PS/2 controller found.\n");
+ if (x86_platform.legacy.i8042 !=
+ X86_LEGACY_I8042_EXPECTED_PRESENT)
+ return -ENODEV;
+ pr_info("Probing ports directly.\n");
return 0;
#endif
}
@@ -1131,10 +1063,10 @@ static int __init i8042_pnp_init(void)
return 0;
}
-#else
+#else /* !CONFIG_PNP */
static inline int i8042_pnp_init(void) { return 0; }
static inline void i8042_pnp_exit(void) { }
-#endif
+#endif /* CONFIG_PNP */
static int __init i8042_platform_init(void)
{
@@ -1142,8 +1074,8 @@ static int __init i8042_platform_init(void)
#ifdef CONFIG_X86
u8 a20_on = 0xdf;
- /* Just return if pre-detection shows no i8042 controller exist */
- if (!x86_platform.i8042_detect())
+ /* Just return if platform does not have i8042 controller */
+ if (x86_platform.legacy.i8042 == X86_LEGACY_I8042_PLATFORM_ABSENT)
return -ENODEV;
#endif
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 89abfdb539ac..62685a768913 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -387,7 +387,7 @@ static int i8042_aux_write(struct serio *serio, unsigned char c)
/*
- * i8042_aux_close attempts to clear AUX or KBD port state by disabling
+ * i8042_port_close attempts to clear AUX or KBD port state by disabling
* and then re-enabling it.
*/
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index fe9877a6af9e..d50ee490c9cc 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -55,6 +55,7 @@ static const struct of_device_id mx25_tcq_ids[] = {
{ .compatible = "fsl,imx25-tcq", },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mx25_tcq_ids);
#define TSC_4WIRE_PRE_INDEX 0
#define TSC_4WIRE_X_INDEX 1
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index 8275267eac25..7098e0a47019 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -21,17 +21,25 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/log2.h>
/* ADC configuration registers field define */
#define ADC_AIEN (0x1 << 7)
#define ADC_CONV_DISABLE 0x1F
+#define ADC_AVGE (0x1 << 5)
#define ADC_CAL (0x1 << 7)
#define ADC_CALF 0x2
#define ADC_12BIT_MODE (0x2 << 2)
+#define ADC_CONV_MODE_MASK (0x3 << 2)
#define ADC_IPG_CLK 0x00
+#define ADC_INPUT_CLK_MASK 0x3
#define ADC_CLK_DIV_8 (0x03 << 5)
+#define ADC_CLK_DIV_MASK (0x3 << 5)
#define ADC_SHORT_SAMPLE_MODE (0x0 << 4)
+#define ADC_SAMPLE_MODE_MASK (0x1 << 4)
#define ADC_HARDWARE_TRIGGER (0x1 << 13)
+#define ADC_AVGS_SHIFT 14
+#define ADC_AVGS_MASK (0x3 << 14)
#define SELECT_CHANNEL_4 0x04
#define SELECT_CHANNEL_1 0x01
#define DISABLE_CONVERSION_INT (0x0 << 7)
@@ -84,8 +92,10 @@ struct imx6ul_tsc {
struct clk *adc_clk;
struct gpio_desc *xnur_gpio;
- int measure_delay_time;
- int pre_charge_time;
+ u32 measure_delay_time;
+ u32 pre_charge_time;
+ bool average_enable;
+ u32 average_select;
struct completion completion;
};
@@ -96,17 +106,23 @@ struct imx6ul_tsc {
*/
static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
{
- int adc_hc = 0;
- int adc_gc;
- int adc_gs;
- int adc_cfg;
- int timeout;
+ u32 adc_hc = 0;
+ u32 adc_gc;
+ u32 adc_gs;
+ u32 adc_cfg;
+ unsigned long timeout;
reinit_completion(&tsc->completion);
adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
+ adc_cfg &= ~(ADC_CONV_MODE_MASK | ADC_INPUT_CLK_MASK);
adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
+ adc_cfg &= ~(ADC_CLK_DIV_MASK | ADC_SAMPLE_MODE_MASK);
adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
+ if (tsc->average_enable) {
+ adc_cfg &= ~ADC_AVGS_MASK;
+ adc_cfg |= (tsc->average_select) << ADC_AVGS_SHIFT;
+ }
adc_cfg &= ~ADC_HARDWARE_TRIGGER;
writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
@@ -118,6 +134,8 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
/* start ADC calibration */
adc_gc = readl(tsc->adc_regs + REG_ADC_GC);
adc_gc |= ADC_CAL;
+ if (tsc->average_enable)
+ adc_gc |= ADC_AVGE;
writel(adc_gc, tsc->adc_regs + REG_ADC_GC);
timeout = wait_for_completion_timeout
@@ -148,7 +166,7 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
*/
static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
{
- int adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
+ u32 adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
adc_hc0 = DISABLE_CONVERSION_INT;
writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
@@ -173,8 +191,8 @@ static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
*/
static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
{
- int basic_setting = 0;
- int start;
+ u32 basic_setting = 0;
+ u32 start;
basic_setting |= tsc->measure_delay_time << 8;
basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
@@ -209,8 +227,8 @@ static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
{
- int tsc_flow;
- int adc_cfg;
+ u32 tsc_flow;
+ u32 adc_cfg;
/* TSC controller enters to idle status */
tsc_flow = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
@@ -227,8 +245,8 @@ static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
{
unsigned long timeout = jiffies + msecs_to_jiffies(2);
- int state_machine;
- int debug_mode2;
+ u32 state_machine;
+ u32 debug_mode2;
do {
if (time_after(jiffies, timeout))
@@ -246,10 +264,10 @@ static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
{
struct imx6ul_tsc *tsc = dev_id;
- int status;
- int value;
- int x, y;
- int start;
+ u32 status;
+ u32 value;
+ u32 x, y;
+ u32 start;
status = readl(tsc->tsc_regs + REG_TSC_INT_STATUS);
@@ -289,8 +307,8 @@ static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
static irqreturn_t adc_irq_fn(int irq, void *dev_id)
{
struct imx6ul_tsc *tsc = dev_id;
- int coco;
- int value;
+ u32 coco;
+ u32 value;
coco = readl(tsc->adc_regs + REG_ADC_HS);
if (coco & 0x01) {
@@ -346,6 +364,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
int err;
int tsc_irq;
int adc_irq;
+ u32 average_samples;
tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
if (!tsc)
@@ -450,6 +469,30 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
if (err)
tsc->pre_charge_time = 0xfff;
+ err = of_property_read_u32(np, "touchscreen-average-samples",
+ &average_samples);
+ if (err)
+ average_samples = 1;
+
+ switch (average_samples) {
+ case 1:
+ tsc->average_enable = false;
+ tsc->average_select = 0; /* value unused; initialize anyway */
+ break;
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ tsc->average_enable = true;
+ tsc->average_select = ilog2(average_samples) - 2;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "touchscreen-average-samples (%u) must be 1, 4, 8, 16 or 32\n",
+ average_samples);
+ return -EINVAL;
+ }
+
err = input_register_device(tsc->input);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 552a3773f79d..703d7f983d0a 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -33,7 +33,7 @@
/*****************************************************************
* Protocol
- * Version : MIP 4.0 Rev 4.6
+ * Version : MIP 4.0 Rev 5.4
*****************************************************************/
/* Address */
@@ -81,6 +81,9 @@
#define MIP4_R1_INFO_IC_HW_CATEGORY 0x77
#define MIP4_R1_INFO_CONTACT_THD_SCR 0x78
#define MIP4_R1_INFO_CONTACT_THD_KEY 0x7A
+#define MIP4_R1_INFO_PID 0x7C
+#define MIP4_R1_INFO_VID 0x7E
+#define MIP4_R1_INFO_SLAVE_ADDR 0x80
#define MIP4_R0_EVENT 0x02
#define MIP4_R1_EVENT_SUPPORTED_FUNC 0x00
@@ -157,7 +160,9 @@ struct mip4_ts {
char phys[32];
char product_name[16];
+ u16 product_id;
char ic_name[4];
+ char fw_name[32];
unsigned int max_x;
unsigned int max_y;
@@ -264,6 +269,23 @@ static int mip4_query_device(struct mip4_ts *ts)
dev_dbg(&ts->client->dev, "product name: %.*s\n",
(int)sizeof(ts->product_name), ts->product_name);
+ /* Product ID */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_PID;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, 2);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve product id: %d\n", error);
+ } else {
+ ts->product_id = get_unaligned_le16(&buf[0]);
+ dev_dbg(&ts->client->dev, "product id: %04X\n", ts->product_id);
+ }
+
+ /* Firmware name */
+ snprintf(ts->fw_name, sizeof(ts->fw_name),
+ "melfas_mip4_%04X.fw", ts->product_id);
+ dev_dbg(&ts->client->dev, "firmware name: %s\n", ts->fw_name);
+
/* IC name */
cmd[0] = MIP4_R0_INFO;
cmd[1] = MIP4_R1_INFO_IC_NAME;
@@ -1269,11 +1291,11 @@ static ssize_t mip4_sysfs_fw_update(struct device *dev,
const struct firmware *fw;
int error;
- error = request_firmware(&fw, MIP4_FW_NAME, dev);
+ error = request_firmware(&fw, ts->fw_name, dev);
if (error) {
dev_err(&ts->client->dev,
"Failed to retrieve firmware %s: %d\n",
- MIP4_FW_NAME, error);
+ ts->fw_name, error);
return error;
}
@@ -1348,6 +1370,25 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
+static ssize_t mip4_sysfs_read_product_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ mutex_lock(&ts->input->mutex);
+
+ count = snprintf(buf, PAGE_SIZE, "%04X\n", ts->product_id);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(product_id, S_IRUGO, mip4_sysfs_read_product_id, NULL);
+
static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1371,6 +1412,7 @@ static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL);
static struct attribute *mip4_attrs[] = {
&dev_attr_fw_version.attr,
&dev_attr_hw_version.attr,
+ &dev_attr_product_id.attr,
&dev_attr_ic_name.attr,
&dev_attr_update_fw.attr,
NULL,
@@ -1435,6 +1477,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
input->id.bustype = BUS_I2C;
input->id.vendor = 0x13c5;
+ input->id.product = ts->product_id;
input->open = mip4_input_open;
input->close = mip4_input_close;
@@ -1572,6 +1615,6 @@ static struct i2c_driver mip4_driver = {
module_i2c_driver(mip4_driver);
MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
-MODULE_VERSION("2016.09.28");
+MODULE_VERSION("2016.10.31");
MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index a99fb5cac5a0..2658afa016c9 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -669,7 +669,7 @@ static int raydium_i2c_do_update_firmware(struct raydium_data *ts,
if (ts->boot_mode == RAYDIUM_TS_MAIN) {
dev_err(&client->dev,
- "failied to jump to boot loader: %d\n",
+ "failed to jump to boot loader: %d\n",
error);
return -EIO;
}
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index f502c8488be8..404830a4a366 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -29,6 +29,7 @@
#include <linux/input/touchscreen.h>
#include <linux/pm.h>
#include <linux/irq.h>
+#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
@@ -73,6 +74,7 @@ struct silead_ts_data {
struct i2c_client *client;
struct gpio_desc *gpio_power;
struct input_dev *input;
+ struct regulator_bulk_data regulators[2];
char fw_name[64];
struct touchscreen_properties prop;
u32 max_fingers;
@@ -433,6 +435,13 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
}
#endif
+static void silead_disable_regulator(void *arg)
+{
+ struct silead_ts_data *data = arg;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
static int silead_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -465,6 +474,26 @@ static int silead_ts_probe(struct i2c_client *client,
if (client->irq <= 0)
return -ENODEV;
+ data->regulators[0].supply = "vddio";
+ data->regulators[1].supply = "avdd";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (error)
+ return error;
+
+ /*
+ * Enable regulators at probe and disable them at remove, we need
+ * to keep the chip powered otherwise it forgets its firmware.
+ */
+ error = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(dev, silead_disable_regulator, data);
+ if (error)
+ return error;
+
/* Power GPIO pin */
data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
if (IS_ERR(data->gpio_power)) {
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 90d6be3c26cc..83cf11312fd9 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata;
+ wm->battery_dev->dev.platform_data = pdata->batt_pdata;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 754595ee11b6..019e02707cd5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
if (!entry->group)
entry->group = generic_device_group(dev);
+ else
+ iommu_group_ref_get(entry->group);
return entry->group;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 157e93421fb8..6799cf9713f7 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -28,6 +28,7 @@
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/iommu.h>
+#include <linux/kmemleak.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -2090,6 +2091,7 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_on_init_error(void)
{
+ kmemleak_free(irq_lookup_table);
free_pages((unsigned long)irq_lookup_table,
get_order(rlookup_table_size));
@@ -2207,14 +2209,13 @@ static void __init free_dma_resources(void)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- acpi_size ivrs_size;
acpi_status status;
int i, remap_cache_sz, ret = 0;
if (!amd_iommu_detected)
return -ENODEV;
- status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
if (status == AE_NOT_FOUND)
return -ENODEV;
else if (ACPI_FAILURE(status)) {
@@ -2321,6 +2322,8 @@ static int __init early_amd_iommu_init(void)
irq_lookup_table = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
get_order(rlookup_table_size));
+ kmemleak_alloc(irq_lookup_table, rlookup_table_size,
+ 1, GFP_KERNEL);
if (!irq_lookup_table)
goto out;
}
@@ -2334,7 +2337,7 @@ static int __init early_amd_iommu_init(void)
out:
/* Don't leak any ACPI memory */
- early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ acpi_put_table(ivrs_base);
ivrs_base = NULL;
return ret;
@@ -2358,10 +2361,9 @@ out:
static bool detect_ivrs(void)
{
struct acpi_table_header *ivrs_base;
- acpi_size ivrs_size;
acpi_status status;
- status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
if (status == AE_NOT_FOUND)
return false;
else if (ACPI_FAILURE(status)) {
@@ -2370,7 +2372,7 @@ static bool detect_ivrs(void)
return false;
}
- early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ acpi_put_table(ivrs_base);
/* Make sure ACS will be enabled during PCI probe */
pci_request_acs();
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 594849a3a9be..f8ed8c95b685 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -805,8 +805,10 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
goto out_free_domain;
group = iommu_group_get(&pdev->dev);
- if (!group)
+ if (!group) {
+ ret = -EINVAL;
goto out_free_domain;
+ }
ret = iommu_attach_group(dev_state->domain, group);
if (ret != 0)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e6f9b2d745ca..4d6ec444a9d6 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -20,6 +20,8 @@
* This driver is powered by bad coffee and bombay mix.
*/
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@@ -1358,7 +1360,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
} while (size -= granule);
}
-static struct iommu_gather_ops arm_smmu_gather_ops = {
+static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
@@ -1723,13 +1725,14 @@ static struct platform_driver arm_smmu_driver;
static int arm_smmu_match_node(struct device *dev, void *data)
{
- return dev->of_node == data;
+ return dev->fwnode == data;
}
-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
- np, arm_smmu_match_node);
+ fwnode, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1765,7 +1768,7 @@ static int arm_smmu_add_device(struct device *dev)
master = fwspec->iommu_priv;
smmu = master->smmu;
} else {
- smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
if (!smmu)
return -ENODEV;
master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -2380,10 +2383,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return 0;
}
-static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
+static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{
u32 reg;
- bool coherent;
+ bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
/* IDR0 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
@@ -2435,13 +2438,9 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_HYP;
/*
- * The dma-coherent property is used in preference to the ID
+ * The coherency feature as set by FW is used in preference to the ID
* register, but warn on mismatch.
*/
- coherent = of_dma_is_coherent(smmu->dev->of_node);
- if (coherent)
- smmu->features |= ARM_SMMU_FEAT_COHERENCY;
-
if (!!(reg & IDR0_COHACC) != coherent)
dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
coherent ? "true" : "false");
@@ -2562,21 +2561,61 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
return 0;
}
-static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ struct acpi_iort_smmu_v3 *iort_smmu;
+ struct device *dev = smmu->dev;
+ struct acpi_iort_node *node;
+
+ node = *(struct acpi_iort_node **)dev_get_platdata(dev);
+
+ /* Retrieve SMMUv3 specific data */
+ iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+ return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
{
- int irq, ret;
- struct resource *res;
- struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- bool bypass = true;
u32 cells;
+ int ret = -EINVAL;
if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
dev_err(dev, "missing #iommu-cells property\n");
else if (cells != 1)
dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
else
- bypass = false;
+ ret = 0;
+
+ parse_driver_options(smmu);
+
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+ return ret;
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+ int irq, ret;
+ struct resource *res;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ bool bypass;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@@ -2613,10 +2652,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (irq > 0)
smmu->gerr_irq = irq;
- parse_driver_options(smmu);
+ if (dev->of_node) {
+ ret = arm_smmu_device_dt_probe(pdev, smmu);
+ } else {
+ ret = arm_smmu_device_acpi_probe(pdev, smmu);
+ if (ret == -ENODEV)
+ return ret;
+ }
+
+ /* Set bypass mode according to firmware probing result */
+ bypass = !!ret;
/* Probe the h/w */
- ret = arm_smmu_device_probe(smmu);
+ ret = arm_smmu_device_hw_probe(smmu);
if (ret)
return ret;
@@ -2634,7 +2682,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return ret;
/* And we're up. Go go go! */
- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+ iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
pci_request_acs();
@@ -2677,7 +2726,7 @@ static struct platform_driver arm_smmu_driver = {
.name = "arm-smmu-v3",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
- .probe = arm_smmu_device_dt_probe,
+ .probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
@@ -2715,6 +2764,17 @@ static int __init arm_smmu_of_init(struct device_node *np)
}
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
+#ifdef CONFIG_ACPI
+static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
+{
+ if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
+ return arm_smmu_init();
+
+ return 0;
+}
+IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
+#endif
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8f7281444551..a60cded8a6ed 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -28,6 +28,8 @@
#define pr_fmt(fmt) "arm-smmu: " fmt
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
@@ -247,6 +249,7 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define CB_PAR_F (1 << 0)
@@ -642,7 +645,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
}
}
-static struct iommu_gather_ops arm_smmu_gather_ops = {
+static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
@@ -1379,13 +1382,14 @@ static bool arm_smmu_capable(enum iommu_cap cap)
static int arm_smmu_match_node(struct device *dev, void *data)
{
- return dev->of_node == data;
+ return dev->fwnode == data;
}
-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
- np, arm_smmu_match_node);
+ fwnode, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1403,7 +1407,7 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
- smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
return -ENODEV;
}
@@ -1478,7 +1482,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
}
if (group)
- return group;
+ return iommu_group_ref_get(group);
if (dev_is_pci(dev))
group = pci_device_group(dev);
@@ -1581,16 +1585,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
- /*
- * Before clearing ARM_MMU500_ACTLR_CPRE, need to
- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
- * bit is only present in MMU-500r2 onwards.
- */
- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
- if ((smmu->model == ARM_MMU500) && (major >= 2)) {
+ if (smmu->model == ARM_MMU500) {
+ /*
+ * Before clearing ARM_MMU500_ACTLR_CPRE, need to
+ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
+ * bit is only present in MMU-500r2 onwards.
+ */
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
- reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ if (major >= 2)
+ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ /*
+ * Allow unmatched Stream IDs to allocate bypass
+ * TLB entries for reduced latency.
+ */
+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
@@ -1667,7 +1677,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned long size;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id;
- bool cttw_dt, cttw_reg;
+ bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
int i;
dev_notice(smmu->dev, "probing hardware configuration...\n");
@@ -1712,20 +1722,17 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/*
* In order for DMA API calls to work properly, we must defer to what
- * the DT says about coherency, regardless of what the hardware claims.
+ * the FW says about coherency, regardless of what the hardware claims.
* Fortunately, this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly.
*/
- cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
cttw_reg = !!(id & ID0_CTTW);
- if (cttw_dt)
- smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
- if (cttw_dt || cttw_reg)
+ if (cttw_fw || cttw_reg)
dev_notice(smmu->dev, "\t%scoherent table walk\n",
- cttw_dt ? "" : "non-");
- if (cttw_dt != cttw_reg)
+ cttw_fw ? "" : "non-");
+ if (cttw_fw != cttw_reg)
dev_notice(smmu->dev,
- "\t(IDR0.CTTW overridden by dma-coherent property)\n");
+ "\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
@@ -1906,15 +1913,83 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
-static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
+{
+ int ret = 0;
+
+ switch (model) {
+ case ACPI_IORT_SMMU_V1:
+ case ACPI_IORT_SMMU_CORELINK_MMU400:
+ smmu->version = ARM_SMMU_V1;
+ smmu->model = GENERIC_SMMU;
+ break;
+ case ACPI_IORT_SMMU_V2:
+ smmu->version = ARM_SMMU_V2;
+ smmu->model = GENERIC_SMMU;
+ break;
+ case ACPI_IORT_SMMU_CORELINK_MMU500:
+ smmu->version = ARM_SMMU_V2;
+ smmu->model = ARM_MMU500;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ struct device *dev = smmu->dev;
+ struct acpi_iort_node *node =
+ *(struct acpi_iort_node **)dev_get_platdata(dev);
+ struct acpi_iort_smmu *iort_smmu;
+ int ret;
+
+ /* Retrieve SMMU1/2 specific data */
+ iort_smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ ret = acpi_smmu_get_data(iort_smmu->model, smmu);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore the configuration access interrupt */
+ smmu->num_global_irqs = 1;
+
+ if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+ return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
{
const struct arm_smmu_match_data *data;
- struct resource *res;
- struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- int num_irqs, i, err;
bool legacy_binding;
+ if (of_property_read_u32(dev->of_node, "#global-interrupts",
+ &smmu->num_global_irqs)) {
+ dev_err(dev, "missing #global-interrupts property\n");
+ return -ENODEV;
+ }
+
+ data = of_device_get_match_data(dev);
+ smmu->version = data->version;
+ smmu->model = data->model;
+
+ parse_driver_options(smmu);
+
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding)
@@ -1927,6 +2002,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+ return 0;
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ int num_irqs, i, err;
+
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate arm_smmu_device\n");
@@ -1934,9 +2022,13 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
smmu->dev = dev;
- data = of_device_get_match_data(dev);
- smmu->version = data->version;
- smmu->model = data->model;
+ if (dev->of_node)
+ err = arm_smmu_device_dt_probe(pdev, smmu);
+ else
+ err = arm_smmu_device_acpi_probe(pdev, smmu);
+
+ if (err)
+ return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
smmu->base = devm_ioremap_resource(dev, res);
@@ -1944,12 +2036,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return PTR_ERR(smmu->base);
smmu->size = resource_size(res);
- if (of_property_read_u32(dev->of_node, "#global-interrupts",
- &smmu->num_global_irqs)) {
- dev_err(dev, "missing #global-interrupts property\n");
- return -ENODEV;
- }
-
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
@@ -1984,8 +2070,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err)
return err;
- parse_driver_options(smmu);
-
if (smmu->version == ARM_SMMU_V2 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
@@ -2007,7 +2091,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
}
- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+ iommu_register_instance(dev->fwnode, &arm_smmu_ops);
platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
@@ -2047,7 +2131,7 @@ static struct platform_driver arm_smmu_driver = {
.name = "arm-smmu",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
- .probe = arm_smmu_device_dt_probe,
+ .probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
@@ -2090,6 +2174,17 @@ IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
+#ifdef CONFIG_ACPI
+static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
+{
+ if (iort_node_match(ACPI_IORT_NODE_SMMU))
+ return arm_smmu_init();
+
+ return 0;
+}
+IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
+#endif
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c5ab8667e6f2..2db0d641cf45 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -432,13 +432,12 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
return ret;
}
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int prot)
+static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+ size_t size, int prot)
{
dma_addr_t dma_addr;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iova_domain *iovad = cookie_iovad(domain);
- phys_addr_t phys = page_to_phys(page) + offset;
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
@@ -454,6 +453,12 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return dma_addr + iova_off;
}
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, int prot)
+{
+ return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
+}
+
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
@@ -624,6 +629,19 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
}
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return __iommu_dma_map(dev, phys, size,
+ dma_direction_to_prot(dir, false) | IOMMU_MMIO);
+}
+
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+}
+
int iommu_dma_supported(struct device *dev, u64 mask)
{
/*
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8c53748a769d..a88576d50740 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -68,7 +68,6 @@ DECLARE_RWSEM(dmar_global_lock);
LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
-static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
@@ -543,9 +542,7 @@ static int __init dmar_table_detect(void)
acpi_status status = AE_OK;
/* if we could find DMAR table, then there are DMAR devices */
- status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
- (struct acpi_table_header **)&dmar_tbl,
- &dmar_tbl_size);
+ status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
pr_warn("Unable to map DMAR\n");
@@ -906,7 +903,7 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
+ acpi_put_table(dmar_tbl);
dmar_tbl = NULL;
up_write(&dmar_global_lock);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 30808e91b775..57ba0d3091ea 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -70,6 +70,36 @@ static short PG_ENT_SHIFT = -1;
#define SYSMMU_PG_ENT_SHIFT 0
#define SYSMMU_V5_PG_ENT_SHIFT 4
+static const sysmmu_pte_t *LV1_PROT;
+static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
+ ((0 << 15) | (0 << 10)), /* no access */
+ ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
+ ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
+ ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
+};
+static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
+ (0 << 4), /* no access */
+ (1 << 4), /* IOMMU_READ only */
+ (2 << 4), /* IOMMU_WRITE only */
+ (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
+};
+
+static const sysmmu_pte_t *LV2_PROT;
+static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
+ ((0 << 9) | (0 << 4)), /* no access */
+ ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
+ ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
+ ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
+};
+static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
+ (0 << 2), /* no access */
+ (1 << 2), /* IOMMU_READ only */
+ (2 << 2), /* IOMMU_WRITE only */
+ (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
+};
+
+#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
+
#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
#define section_offs(iova) (iova & (SECT_SIZE - 1))
@@ -97,16 +127,17 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
-#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
-#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
-#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
+#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
#define CTRL_ENABLE 0x5
#define CTRL_BLOCK 0x7
#define CTRL_DISABLE 0x0
#define CFG_LRU 0x1
+#define CFG_EAP (1 << 2)
#define CFG_QOS(n) ((n & 0xF) << 7)
#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
@@ -206,6 +237,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
struct exynos_iommu_owner {
struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
struct iommu_domain *domain; /* domain this device is attached */
+ struct mutex rpm_lock; /* for runtime pm of all sysmmus */
};
/*
@@ -237,8 +269,8 @@ struct sysmmu_drvdata {
struct clk *aclk; /* SYSMMU's aclk clock */
struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
- int activations; /* number of calls to sysmmu_enable */
spinlock_t lock; /* lock for modyfying state */
+ bool active; /* current status */
struct exynos_iommu_domain *domain; /* domain we belong to */
struct list_head domain_node; /* node for domain clients list */
struct list_head owner_node; /* node for owner controllers list */
@@ -251,25 +283,6 @@ static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
return container_of(dom, struct exynos_iommu_domain, domain);
}
-static bool set_sysmmu_active(struct sysmmu_drvdata *data)
-{
- /* return true if the System MMU was not active previously
- and it needs to be initialized */
- return ++data->activations == 1;
-}
-
-static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
-{
- /* return true if the System MMU is needed to be disabled */
- BUG_ON(data->activations < 1);
- return --data->activations == 0;
-}
-
-static bool is_sysmmu_active(struct sysmmu_drvdata *data)
-{
- return data->activations > 0;
-}
-
static void sysmmu_unblock(struct sysmmu_drvdata *data)
{
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
@@ -388,7 +401,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
- WARN_ON(!is_sysmmu_active(data));
+ WARN_ON(!data->active);
if (MMU_MAJ_VER(data->version) < 5) {
reg_status = REG_INT_STATUS;
@@ -434,40 +447,19 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
+static void __sysmmu_disable(struct sysmmu_drvdata *data)
{
+ unsigned long flags;
+
clk_enable(data->clk_master);
+ spin_lock_irqsave(&data->lock, flags);
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
writel(0, data->sfrbase + REG_MMU_CFG);
-
- __sysmmu_disable_clocks(data);
-}
-
-static bool __sysmmu_disable(struct sysmmu_drvdata *data)
-{
- bool disabled;
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
-
- disabled = set_sysmmu_inactive(data);
-
- if (disabled) {
- data->pgtable = 0;
- data->domain = NULL;
-
- __sysmmu_disable_nocount(data);
-
- dev_dbg(data->sysmmu, "Disabled\n");
- } else {
- dev_dbg(data->sysmmu, "%d times left to disable\n",
- data->activations);
- }
-
+ data->active = false;
spin_unlock_irqrestore(&data->lock, flags);
- return disabled;
+ __sysmmu_disable_clocks(data);
}
static void __sysmmu_init_config(struct sysmmu_drvdata *data)
@@ -481,20 +473,24 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
else
cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
+ cfg |= CFG_EAP; /* enable access protection bits check */
+
writel(cfg, data->sfrbase + REG_MMU_CFG);
}
-static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
+static void __sysmmu_enable(struct sysmmu_drvdata *data)
{
+ unsigned long flags;
+
__sysmmu_enable_clocks(data);
+ spin_lock_irqsave(&data->lock, flags);
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
-
__sysmmu_init_config(data);
-
__sysmmu_set_ptbase(data, data->pgtable);
-
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+ data->active = true;
+ spin_unlock_irqrestore(&data->lock, flags);
/*
* SYSMMU driver keeps master's clock enabled only for the short
@@ -505,48 +501,18 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
clk_disable(data->clk_master);
}
-static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
- struct exynos_iommu_domain *domain)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
- if (set_sysmmu_active(data)) {
- data->pgtable = pgtable;
- data->domain = domain;
-
- __sysmmu_enable_nocount(data);
-
- dev_dbg(data->sysmmu, "Enabled\n");
- } else {
- ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
-
- dev_dbg(data->sysmmu, "already enabled\n");
- }
-
- if (WARN_ON(ret < 0))
- set_sysmmu_inactive(data); /* decrement count */
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return ret;
-}
-
static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
sysmmu_iova_t iova)
{
unsigned long flags;
-
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
+ if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
clk_enable(data->clk_master);
__sysmmu_tlb_invalidate_entry(data, iova, 1);
clk_disable(data->clk_master);
}
spin_unlock_irqrestore(&data->lock, flags);
-
}
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -555,7 +521,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data)) {
+ if (data->active) {
unsigned int num_inv = 1;
clk_enable(data->clk_master);
@@ -578,9 +544,6 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
sysmmu_unblock(data);
}
clk_disable(data->clk_master);
- } else {
- dev_dbg(data->master,
- "disabled. Skipping TLB invalidation @ %#x\n", iova);
}
spin_unlock_irqrestore(&data->lock, flags);
}
@@ -652,10 +615,15 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
__sysmmu_get_version(data);
if (PG_ENT_SHIFT < 0) {
- if (MMU_MAJ_VER(data->version) < 5)
+ if (MMU_MAJ_VER(data->version) < 5) {
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
- else
+ LV1_PROT = SYSMMU_LV1_PROT;
+ LV2_PROT = SYSMMU_LV2_PROT;
+ } else {
PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
+ LV1_PROT = SYSMMU_V5_LV1_PROT;
+ LV2_PROT = SYSMMU_V5_LV2_PROT;
+ }
}
pm_runtime_enable(dev);
@@ -665,34 +633,46 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int exynos_sysmmu_suspend(struct device *dev)
+static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+ struct device *master = data->master;
+
+ if (master) {
+ struct exynos_iommu_owner *owner = master->archdata.iommu;
- dev_dbg(dev, "suspend\n");
- if (is_sysmmu_active(data)) {
- __sysmmu_disable_nocount(data);
- pm_runtime_put(dev);
+ mutex_lock(&owner->rpm_lock);
+ if (data->domain) {
+ dev_dbg(data->sysmmu, "saving state\n");
+ __sysmmu_disable(data);
+ }
+ mutex_unlock(&owner->rpm_lock);
}
return 0;
}
-static int exynos_sysmmu_resume(struct device *dev)
+static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+ struct device *master = data->master;
+
+ if (master) {
+ struct exynos_iommu_owner *owner = master->archdata.iommu;
- dev_dbg(dev, "resume\n");
- if (is_sysmmu_active(data)) {
- pm_runtime_get_sync(dev);
- __sysmmu_enable_nocount(data);
+ mutex_lock(&owner->rpm_lock);
+ if (data->domain) {
+ dev_dbg(data->sysmmu, "restoring state\n");
+ __sysmmu_enable(data);
+ }
+ mutex_unlock(&owner->rpm_lock);
}
return 0;
}
-#endif
static const struct dev_pm_ops sysmmu_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
+ SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id sysmmu_of_match[] __initconst = {
@@ -796,9 +776,12 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
- if (__sysmmu_disable(data))
- data->master = NULL;
+ spin_lock(&data->lock);
+ __sysmmu_disable(data);
+ data->pgtable = 0;
+ data->domain = NULL;
list_del_init(&data->domain_node);
+ spin_unlock(&data->lock);
}
spin_unlock_irqrestore(&domain->lock, flags);
@@ -832,31 +815,34 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
struct sysmmu_drvdata *data, *next;
unsigned long flags;
- bool found = false;
if (!has_sysmmu(dev) || owner->domain != iommu_domain)
return;
+ mutex_lock(&owner->rpm_lock);
+
+ list_for_each_entry(data, &owner->controllers, owner_node) {
+ pm_runtime_get_noresume(data->sysmmu);
+ if (pm_runtime_active(data->sysmmu))
+ __sysmmu_disable(data);
+ pm_runtime_put(data->sysmmu);
+ }
+
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
- if (data->master == dev) {
- if (__sysmmu_disable(data)) {
- data->master = NULL;
- list_del_init(&data->domain_node);
- }
- pm_runtime_put(data->sysmmu);
- found = true;
- }
+ spin_lock(&data->lock);
+ data->pgtable = 0;
+ data->domain = NULL;
+ list_del_init(&data->domain_node);
+ spin_unlock(&data->lock);
}
+ owner->domain = NULL;
spin_unlock_irqrestore(&domain->lock, flags);
- owner->domain = NULL;
+ mutex_unlock(&owner->rpm_lock);
- if (found)
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
- __func__, &pagetable);
- else
- dev_err(dev, "%s: No IOMMU is attached\n", __func__);
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
+ &pagetable);
}
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
@@ -867,7 +853,6 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags;
- int ret = -ENODEV;
if (!has_sysmmu(dev))
return -ENODEV;
@@ -875,29 +860,32 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
if (owner->domain)
exynos_iommu_detach_device(owner->domain, dev);
+ mutex_lock(&owner->rpm_lock);
+
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(data, &owner->controllers, owner_node) {
- pm_runtime_get_sync(data->sysmmu);
- ret = __sysmmu_enable(data, pagetable, domain);
- if (ret >= 0) {
- data->master = dev;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_add_tail(&data->domain_node, &domain->clients);
- spin_unlock_irqrestore(&domain->lock, flags);
- }
+ spin_lock(&data->lock);
+ data->pgtable = pagetable;
+ data->domain = domain;
+ list_add_tail(&data->domain_node, &domain->clients);
+ spin_unlock(&data->lock);
}
+ owner->domain = iommu_domain;
+ spin_unlock_irqrestore(&domain->lock, flags);
- if (ret < 0) {
- dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
- __func__, &pagetable);
- return ret;
+ list_for_each_entry(data, &owner->controllers, owner_node) {
+ pm_runtime_get_noresume(data->sysmmu);
+ if (pm_runtime_active(data->sysmmu))
+ __sysmmu_enable(data);
+ pm_runtime_put(data->sysmmu);
}
- owner->domain = iommu_domain;
- dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
- __func__, &pagetable, (ret == 0) ? "" : ", again");
+ mutex_unlock(&owner->rpm_lock);
- return ret;
+ dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
+ &pagetable);
+
+ return 0;
}
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
@@ -954,7 +942,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
static int lv1set_section(struct exynos_iommu_domain *domain,
sysmmu_pte_t *sent, sysmmu_iova_t iova,
- phys_addr_t paddr, short *pgcnt)
+ phys_addr_t paddr, int prot, short *pgcnt)
{
if (lv1ent_section(sent)) {
WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
@@ -973,7 +961,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
- update_pte(sent, mk_lv1ent_sect(paddr));
+ update_pte(sent, mk_lv1ent_sect(paddr, prot));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@@ -991,13 +979,13 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
}
static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
- short *pgcnt)
+ int prot, short *pgcnt)
{
if (size == SPAGE_SIZE) {
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
- update_pte(pent, mk_lv2ent_spage(paddr));
+ update_pte(pent, mk_lv2ent_spage(paddr, prot));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
@@ -1013,7 +1001,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
return -EADDRINUSE;
}
- *pent = mk_lv2ent_lpage(paddr);
+ *pent = mk_lv2ent_lpage(paddr, prot);
}
dma_sync_single_for_device(dma_dev, pent_base,
sizeof(*pent) * SPAGES_PER_LPAGE,
@@ -1061,13 +1049,14 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
int ret = -ENOMEM;
BUG_ON(domain->pgtable == NULL);
+ prot &= SYSMMU_SUPPORTED_PROT_BITS;
spin_lock_irqsave(&domain->pgtablelock, flags);
entry = section_entry(domain->pgtable, iova);
if (size == SECT_SIZE) {
- ret = lv1set_section(domain, entry, iova, paddr,
+ ret = lv1set_section(domain, entry, iova, paddr, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
} else {
sysmmu_pte_t *pent;
@@ -1078,7 +1067,7 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
if (IS_ERR(pent))
ret = PTR_ERR(pent);
else
- ret = lv2set_page(pent, paddr, size,
+ ret = lv2set_page(pent, paddr, size, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
}
@@ -1268,10 +1257,20 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENOMEM;
INIT_LIST_HEAD(&owner->controllers);
+ mutex_init(&owner->rpm_lock);
dev->archdata.iommu = owner;
}
list_add_tail(&data->owner_node, &owner->controllers);
+ data->master = dev;
+
+ /*
+ * SYSMMU will be runtime activated via device link (dependency) to its
+ * master device, so there are no direct calls to pm_runtime_get/put
+ * in this driver.
+ */
+ device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
+
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d8376c2d18b3..c66c273dfd8a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4688,25 +4688,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
}
}
-static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
- unsigned long action, void *v)
+static int intel_iommu_cpu_dead(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)v;
-
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_all_cpu_cached_iovas(cpu);
- flush_unmaps_timeout(cpu);
- break;
- }
- return NOTIFY_OK;
+ free_all_cpu_cached_iovas(cpu);
+ flush_unmaps_timeout(cpu);
+ return 0;
}
-static struct notifier_block intel_iommu_cpu_nb = {
- .notifier_call = intel_iommu_cpu_notifier,
-};
-
static ssize_t intel_iommu_show_version(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -4855,8 +4843,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
- register_hotcpu_notifier(&intel_iommu_cpu_nb);
-
+ cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
+ intel_iommu_cpu_dead);
intel_iommu_enabled = 1;
return 0;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f50e51c1a9c8..0769276c0537 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -793,8 +793,7 @@ static int __init arm_v7s_do_selftests(void)
* Distinct mappings of different granule sizes.
*/
iova = 0;
- i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
- while (i != BITS_PER_LONG) {
+ for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << i;
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
@@ -811,8 +810,6 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
iova += SZ_16M;
- i++;
- i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
loopnr++;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f5c90e1366ce..a40ce3406fef 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -916,7 +916,7 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
-static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync,
@@ -980,8 +980,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
* Distinct mappings of different granule sizes.
*/
iova = 0;
- j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
- while (j != BITS_PER_LONG) {
+ for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << j;
if (ops->map(ops, iova, iova, size, IOMMU_READ |
@@ -999,8 +998,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
iova += SZ_1G;
- j++;
- j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
}
/* Partial unmap */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9a2f1960873b..dbe7f653bb7c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -552,6 +552,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
EXPORT_SYMBOL_GPL(iommu_group_get);
/**
+ * iommu_group_ref_get - Increment reference on a group
+ * @group: the group to use, must not be NULL
+ *
+ * This function is called by iommu drivers to take additional references on an
+ * existing group. Returns the given group for convenience.
+ */
+struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
+{
+ kobject_get(group->devices_kobj);
+ return group;
+}
+
+/**
* iommu_group_put - Decrement group reference
* @group: the group to use
*
@@ -1615,6 +1628,46 @@ out:
return ret;
}
+struct iommu_instance {
+ struct list_head list;
+ struct fwnode_handle *fwnode;
+ const struct iommu_ops *ops;
+};
+static LIST_HEAD(iommu_instance_list);
+static DEFINE_SPINLOCK(iommu_instance_lock);
+
+void iommu_register_instance(struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops)
+{
+ struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+
+ if (WARN_ON(!iommu))
+ return;
+
+ of_node_get(to_of_node(fwnode));
+ INIT_LIST_HEAD(&iommu->list);
+ iommu->fwnode = fwnode;
+ iommu->ops = ops;
+ spin_lock(&iommu_instance_lock);
+ list_add_tail(&iommu->list, &iommu_instance_list);
+ spin_unlock(&iommu_instance_lock);
+}
+
+const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+{
+ struct iommu_instance *instance;
+ const struct iommu_ops *ops = NULL;
+
+ spin_lock(&iommu_instance_lock);
+ list_for_each_entry(instance, &iommu_instance_list, list)
+ if (instance->fwnode == fwnode) {
+ ops = instance->ops;
+ break;
+ }
+ spin_unlock(&iommu_instance_lock);
+ return ops;
+}
+
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops)
{
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index e23001bfcfee..080beca0197d 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(init_iova_domain);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
{
- if ((*limit_pfn != iovad->dma_32bit_pfn) ||
+ if ((*limit_pfn > iovad->dma_32bit_pfn) ||
(iovad->cached32_node == NULL))
return rb_last(&iovad->rbroot);
else {
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index b12c12d74c33..1479c76ece9e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -195,14 +195,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ int i;
- head = dev->archdata.iommu;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
- portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
+ for (i = 0; i < fwspec->num_ids; ++i) {
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
+ portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
@@ -282,14 +282,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
- if (!priv)
+ if (!data)
return -ENODEV;
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
@@ -310,13 +308,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- if (!priv)
+ if (!data)
return;
- data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
@@ -366,8 +362,8 @@ static int mtk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
- if (!dev->archdata.iommu) /* Not a iommu client device */
- return -ENODEV;
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+ return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@@ -379,44 +375,33 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
-
- head = dev->archdata.iommu;
- if (!head)
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- list_del(&cur->client);
- kfree(cur);
- }
- kfree(head);
- dev->archdata.iommu = NULL;
-
iommu_group_remove_device(dev);
+ iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
- struct mtk_iommu_data *data;
- struct mtk_iommu_client_priv *priv;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- priv = dev->archdata.iommu;
- if (!priv)
+ if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ } else {
+ iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- struct mtk_iommu_client_priv *head, *priv, *next;
struct platform_device *m4updev;
if (args->args_count != 1) {
@@ -425,38 +410,16 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return -EINVAL;
}
- if (!dev->archdata.iommu) {
+ if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return -ENOMEM;
-
- dev->archdata.iommu = head;
- INIT_LIST_HEAD(&head->client);
- head->m4udev = &m4updev->dev;
- } else {
- head = dev->archdata.iommu;
+ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- goto err_free_mem;
-
- priv->mtk_m4u_id = args->args[0];
- list_add_tail(&priv->client, &head->client);
-
- return 0;
-
-err_free_mem:
- list_for_each_entry_safe(priv, next, &head->client, client)
- kfree(priv);
- kfree(head);
- dev->archdata.iommu = NULL;
- return -ENOMEM;
+ return iommu_fwspec_add_ids(dev, args->args, 1);
}
static struct iommu_ops mtk_iommu_ops = {
@@ -583,17 +546,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
continue;
plarbdev = of_find_device_by_node(larbnode);
- of_node_put(larbnode);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larbnode, NULL,
platform_bus_type.dev_root);
- if (!plarbdev)
+ if (!plarbdev) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
+ }
}
data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
- component_match_add(dev, &match, compare_of, larbnode);
+ component_match_add_release(dev, &match, release_of,
+ compare_of, larbnode);
}
platform_set_drvdata(pdev, data);
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 3dab13b4a211..50177f738e4e 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -34,12 +34,6 @@ struct mtk_iommu_suspend_reg {
u32 int_main_control;
};
-struct mtk_iommu_client_priv {
- struct list_head client;
- unsigned int mtk_m4u_id;
- struct device *m4udev;
-};
-
struct mtk_iommu_domain;
struct mtk_iommu_data {
@@ -60,6 +54,11 @@ static inline int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
+static inline void release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
static inline int mtk_iommu_bind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b8aeb0768483..19e010083408 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -204,14 +204,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ int i;
- head = dev->archdata.iommu;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id);
- portid = mt2701_m4u_to_port(cur->mtk_m4u_id);
+ for (i = 0; i < fwspec->num_ids; ++i) {
+ larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
+ portid = mt2701_m4u_to_port(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
@@ -271,14 +271,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
- if (!priv)
+ if (!data)
return -ENODEV;
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
@@ -295,13 +293,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- if (!priv)
+ if (!data)
return;
- data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
@@ -366,6 +362,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa;
}
+static struct iommu_ops mtk_iommu_ops;
+
/*
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
@@ -373,7 +371,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static int mtk_iommu_create_mapping(struct device *dev,
struct of_phandle_args *args)
{
- struct mtk_iommu_client_priv *head, *priv, *next;
+ struct mtk_iommu_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
struct device *m4udev;
@@ -385,41 +383,37 @@ static int mtk_iommu_create_mapping(struct device *dev,
return -EINVAL;
}
- if (!dev->archdata.iommu) {
+ if (!dev->iommu_fwspec) {
+ ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
+ if (ret)
+ return ret;
+ } else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
+ return -EINVAL;
+ }
+
+ if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return -ENOMEM;
-
- dev->archdata.iommu = head;
- INIT_LIST_HEAD(&head->client);
- head->m4udev = &m4updev->dev;
- } else {
- head = dev->archdata.iommu;
+ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_free_mem;
- }
- priv->mtk_m4u_id = args->args[0];
- list_add_tail(&priv->client, &head->client);
+ ret = iommu_fwspec_add_ids(dev, args->args, 1);
+ if (ret)
+ return ret;
- m4udev = head->m4udev;
+ data = dev->iommu_fwspec->iommu_priv;
+ m4udev = data->dev;
mtk_mapping = m4udev->archdata.iommu;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
0, 1ULL << 32);
- if (IS_ERR(mtk_mapping)) {
- ret = PTR_ERR(mtk_mapping);
- goto err_free_mem;
- }
+ if (IS_ERR(mtk_mapping))
+ return PTR_ERR(mtk_mapping);
+
m4udev->archdata.iommu = mtk_mapping;
}
@@ -432,11 +426,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
err_release_mapping:
arm_iommu_release_mapping(mtk_mapping);
m4udev->archdata.iommu = NULL;
-err_free_mem:
- list_for_each_entry_safe(priv, next, &head->client, client)
- kfree(priv);
- kfree(head);
- dev->archdata.iommu = NULL;
return ret;
}
@@ -458,8 +447,8 @@ static int mtk_iommu_add_device(struct device *dev)
of_node_put(iommu_spec.np);
}
- if (!dev->archdata.iommu) /* Not a iommu client device */
- return -ENODEV;
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+ return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@@ -471,37 +460,27 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
-
- head = dev->archdata.iommu;
- if (!head)
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- list_del(&cur->client);
- kfree(cur);
- }
- kfree(head);
- dev->archdata.iommu = NULL;
-
iommu_group_remove_device(dev);
+ iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
- struct mtk_iommu_data *data;
- struct mtk_iommu_client_priv *priv;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- priv = dev->archdata.iommu;
- if (!priv)
+ if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ } else {
+ iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
@@ -624,17 +603,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
continue;
plarbdev = of_find_device_by_node(larb_spec.np);
- of_node_put(larb_spec.np);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larb_spec.np, NULL,
platform_bus_type.dev_root);
- if (!plarbdev)
+ if (!plarbdev) {
+ of_node_put(larb_spec.np);
return -EPROBE_DEFER;
+ }
}
data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
- component_match_add(dev, &match, compare_of, larb_spec.np);
+ component_match_add_release(dev, &match, release_of,
+ compare_of, larb_spec.np);
larb_nr++;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5b82862f571f..0f57ddc4ecc2 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -96,45 +96,6 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
-struct of_iommu_node {
- struct list_head list;
- struct device_node *np;
- const struct iommu_ops *ops;
-};
-static LIST_HEAD(of_iommu_list);
-static DEFINE_SPINLOCK(of_iommu_lock);
-
-void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
-{
- struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
- if (WARN_ON(!iommu))
- return;
-
- of_node_get(np);
- INIT_LIST_HEAD(&iommu->list);
- iommu->np = np;
- iommu->ops = ops;
- spin_lock(&of_iommu_lock);
- list_add_tail(&iommu->list, &of_iommu_list);
- spin_unlock(&of_iommu_lock);
-}
-
-const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
-{
- struct of_iommu_node *node;
- const struct iommu_ops *ops = NULL;
-
- spin_lock(&of_iommu_lock);
- list_for_each_entry(node, &of_iommu_list, list)
- if (node->np == np) {
- ops = node->ops;
- break;
- }
- spin_unlock(&of_iommu_lock);
- return ops;
-}
-
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
struct of_phandle_args *iommu_spec = data;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b44b1d82f3b..179e636a4d91 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -8,7 +8,6 @@
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/iommu-helper.h>
-#include <linux/pci.h>
#include <linux/sizes.h>
#include <asm/pci_dma.h>
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bc0af3307bbf..ae96731cd2fb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -211,6 +211,10 @@ config XTENSA_MX
bool
select IRQ_DOMAIN
+config XILINX_INTC
+ bool
+ select IRQ_DOMAIN
+
config IRQ_CROSSBAR
bool
help
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e4dbfc85abdb..0e55d94065bf 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
+obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 353c54986211..c2662a1bfdd3 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -215,6 +215,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
+static void bcm7038_l1_cpu_offline(struct irq_data *d)
+{
+ struct cpumask *mask = irq_data_get_affinity_mask(d);
+ int cpu = smp_processor_id();
+ cpumask_t new_affinity;
+
+ /* This CPU was not on the affinity mask */
+ if (!cpumask_test_cpu(cpu, mask))
+ return;
+
+ if (cpumask_weight(mask) > 1) {
+ /*
+ * Multiple CPU affinity, remove this CPU from the affinity
+ * mask
+ */
+ cpumask_copy(&new_affinity, mask);
+ cpumask_clear_cpu(cpu, &new_affinity);
+ } else {
+ /* Only CPU, put on the lowest online CPU */
+ cpumask_clear(&new_affinity);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+ }
+ irq_set_affinity_locked(d, &new_affinity, false);
+}
+
static int __init bcm7038_l1_init_one(struct device_node *dn,
unsigned int idx,
struct bcm7038_l1_chip *intc)
@@ -266,6 +291,7 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
.irq_set_affinity = bcm7038_l1_set_affinity,
+ .irq_cpu_offline = bcm7038_l1_cpu_offline,
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5dee300e8a3..69b040f47d56 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -37,7 +37,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
-#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -196,7 +195,7 @@ typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
{
- cmd->raw_cmd[0] &= ~0xffUL;
+ cmd->raw_cmd[0] &= ~0xffULL;
cmd->raw_cmd[0] |= cmd_nr;
}
@@ -208,43 +207,43 @@ static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
{
- cmd->raw_cmd[1] &= ~0xffffffffUL;
+ cmd->raw_cmd[1] &= ~0xffffffffULL;
cmd->raw_cmd[1] |= id;
}
static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
{
- cmd->raw_cmd[1] &= 0xffffffffUL;
+ cmd->raw_cmd[1] &= 0xffffffffULL;
cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
}
static void its_encode_size(struct its_cmd_block *cmd, u8 size)
{
- cmd->raw_cmd[1] &= ~0x1fUL;
+ cmd->raw_cmd[1] &= ~0x1fULL;
cmd->raw_cmd[1] |= size & 0x1f;
}
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- cmd->raw_cmd[2] &= ~0xffffffffffffUL;
- cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
+ cmd->raw_cmd[2] &= ~0xffffffffffffULL;
+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL;
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
{
- cmd->raw_cmd[2] &= ~(1UL << 63);
+ cmd->raw_cmd[2] &= ~(1ULL << 63);
cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
}
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
- cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
+ cmd->raw_cmd[2] &= ~(0xffffffffULL << 16);
+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16));
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
{
- cmd->raw_cmd[2] &= ~0xffffUL;
+ cmd->raw_cmd[2] &= ~0xffffULL;
cmd->raw_cmd[2] |= col;
}
@@ -433,7 +432,7 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
* the ITS.
*/
if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
- __flush_dcache_area(cmd, sizeof(*cmd));
+ gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
else
dsb(ishst);
}
@@ -602,7 +601,7 @@ static void lpi_set_config(struct irq_data *d, bool enable)
* Humpf...
*/
if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
- __flush_dcache_area(cfg, sizeof(*cfg));
+ gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
else
dsb(ishst);
its_send_inv(its_dev, id);
@@ -657,8 +656,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
its = its_dev->its;
addr = its->phys_base + GITS_TRANSLATER;
- msg->address_lo = addr & ((1UL << 32) - 1);
- msg->address_hi = addr >> 32;
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
msg->data = its_get_event_id(d);
iommu_dma_map_msi_msg(d->irq, msg);
@@ -817,7 +816,7 @@ static int __init its_alloc_lpi_tables(void)
LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
- __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
return 0;
}
@@ -836,7 +835,7 @@ static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{
u32 idx = baser - its->tables;
- return readq_relaxed(its->base + GITS_BASER + (idx << 3));
+ return gits_read_baser(its->base + GITS_BASER + (idx << 3));
}
static void its_write_baser(struct its_node *its, struct its_baser *baser,
@@ -844,7 +843,7 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
{
u32 idx = baser - its->tables;
- writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
+ gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
baser->val = its_read_baser(its, baser);
}
@@ -910,7 +909,7 @@ retry_baser:
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
+ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -935,9 +934,9 @@ retry_baser:
}
if (val != tmp) {
- pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
+ pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
- (unsigned long) val, (unsigned long) tmp);
+ val, tmp);
free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -948,7 +947,7 @@ retry_baser:
tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
- &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
+ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
indirect ? "indirect" : "flat", (int)esz,
@@ -983,7 +982,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
* which is reported by ITS hardware times lvl1 table
* entry size.
*/
- ids -= ilog2(psz / esz);
+ ids -= ilog2(psz / (int)esz);
esz = GITS_LVL1_ENTRY_SIZE;
}
}
@@ -998,7 +997,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
new_order = max_t(u32, get_order(esz << ids), new_order);
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
- ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
&its->phys_base, its->device_ids, ids);
}
@@ -1102,7 +1101,7 @@ static void its_cpu_init_lpis(void)
}
/* Make sure the GIC will observe the zero-ed page */
- __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
paddr = page_to_phys(pend_page);
pr_info("CPU%d: using LPI pending table @%pa\n",
@@ -1126,8 +1125,8 @@ static void its_cpu_init_lpis(void)
GICR_PROPBASER_WaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
- writeq_relaxed(val, rbase + GICR_PROPBASER);
- tmp = readq_relaxed(rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+ tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
@@ -1139,7 +1138,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
- writeq_relaxed(val, rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
@@ -1150,8 +1149,8 @@ static void its_cpu_init_lpis(void)
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_WaWb);
- writeq_relaxed(val, rbase + GICR_PENDBASER);
- tmp = readq_relaxed(rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+ tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
@@ -1161,7 +1160,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
- writeq_relaxed(val, rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
@@ -1287,13 +1286,13 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(page_address(page), baser->psz);
+ gic_flush_dcache_to_poc(page_address(page), baser->psz);
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
+ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
/* Ensure updated table contents are visible to ITS hardware */
dsb(sy);
@@ -1340,7 +1339,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
return NULL;
}
- __flush_dcache_area(itt, sz);
+ gic_flush_dcache_to_poc(itt, sz);
dev->its = its;
dev->itt = itt;
@@ -1717,8 +1716,8 @@ static int __init its_probe_one(struct resource *res,
(ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
GITS_CBASER_VALID);
- writeq_relaxed(baser, its->base + GITS_CBASER);
- tmp = readq_relaxed(its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
+ tmp = gits_read_cbaser(its->base + GITS_CBASER);
if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
@@ -1730,13 +1729,13 @@ static int __init its_probe_one(struct resource *res,
baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
GITS_CBASER_CACHEABILITY_MASK);
baser |= GITS_CBASER_nC;
- writeq_relaxed(baser, its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
}
pr_info("ITS: using cache flushing for cmd queue\n");
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
- writeq_relaxed(0, its->base + GITS_CWRITER);
+ gits_write_cwriter(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
err = its_init_domain(handle, its);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 19d642eae096..26e1d7fafb1e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
}
#ifdef CONFIG_ARM64
-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
static u64 __maybe_unused gic_read_iar(void)
{
- if (static_branch_unlikely(&is_cavium_thunderx))
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
return gic_read_iar_cavium_thunderx();
else
return gic_read_iar_common();
@@ -905,14 +904,6 @@ static const struct irq_domain_ops partition_domain_ops = {
.select = gic_irq_domain_select,
};
-static void gicv3_enable_quirks(void)
-{
-#ifdef CONFIG_ARM64
- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
- static_branch_enable(&is_cavium_thunderx);
-#endif
-}
-
static int __init gic_init_bases(void __iomem *dist_base,
struct redist_region *rdist_regs,
u32 nr_redist_regions,
@@ -935,8 +926,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.nr_redist_regions = nr_redist_regions;
gic_data.redist_stride = redist_stride;
- gicv3_enable_quirks();
-
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
diff --git a/drivers/irqchip/irq-st.c b/drivers/irqchip/irq-st.c
index 9af48a85c16f..5e0e250db0be 100644
--- a/drivers/irqchip/irq-st.c
+++ b/drivers/irqchip/irq-st.c
@@ -180,7 +180,7 @@ static int st_irq_syscfg_probe(struct platform_device *pdev)
return st_irq_syscfg_enable(pdev);
}
-static int st_irq_syscfg_resume(struct device *dev)
+static int __maybe_unused st_irq_syscfg_resume(struct device *dev)
{
struct st_irq_syscfg *ddata = dev_get_drvdata(dev);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
new file mode 100644
index 000000000000..3db7ab1c9741
--- /dev/null
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012-2013 Xilinx, Inc.
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <linux/of_irq.h>
+
+/* No one else should require these constants, so define them locally here. */
+#define ISR 0x00 /* Interrupt Status Register */
+#define IPR 0x04 /* Interrupt Pending Register */
+#define IER 0x08 /* Interrupt Enable Register */
+#define IAR 0x0c /* Interrupt Acknowledge Register */
+#define SIE 0x10 /* Set Interrupt Enable bits */
+#define CIE 0x14 /* Clear Interrupt Enable bits */
+#define IVR 0x18 /* Interrupt Vector Register */
+#define MER 0x1c /* Master Enable Register */
+
+#define MER_ME (1<<0)
+#define MER_HIE (1<<1)
+
+static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
+
+struct xintc_irq_chip {
+ void __iomem *base;
+ struct irq_domain *root_domain;
+ u32 intr_mask;
+};
+
+static struct xintc_irq_chip *xintc_irqc;
+
+static void xintc_write(int reg, u32 data)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ iowrite32be(data, xintc_irqc->base + reg);
+ else
+ iowrite32(data, xintc_irqc->base + reg);
+}
+
+static unsigned int xintc_read(int reg)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ return ioread32be(xintc_irqc->base + reg);
+ else
+ return ioread32(xintc_irqc->base + reg);
+}
+
+static void intc_enable_or_unmask(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
+
+ /* ack level irqs because they can't be acked during
+ * ack function since the handle_level_irq function
+ * acks the irq before calling the interrupt handler
+ */
+ if (irqd_is_level_type(d))
+ xintc_write(IAR, mask);
+
+ xintc_write(SIE, mask);
+}
+
+static void intc_disable_or_mask(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
+ xintc_write(CIE, 1 << d->hwirq);
+}
+
+static void intc_ack(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
+ xintc_write(IAR, 1 << d->hwirq);
+}
+
+static void intc_mask_ack(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
+ xintc_write(CIE, mask);
+ xintc_write(IAR, mask);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "Xilinx INTC",
+ .irq_unmask = intc_enable_or_unmask,
+ .irq_mask = intc_disable_or_mask,
+ .irq_ack = intc_ack,
+ .irq_mask_ack = intc_mask_ack,
+};
+
+unsigned int xintc_get_irq(void)
+{
+ unsigned int hwirq, irq = -1;
+
+ hwirq = xintc_read(IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
+
+static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ if (xintc_irqc->intr_mask & (1 << hw)) {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
+}
+
+static const struct irq_domain_ops xintc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = xintc_map,
+};
+
+static void xil_intc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+ do {
+ pending = xintc_get_irq();
+ if (pending == -1U)
+ break;
+ generic_handle_irq(pending);
+ } while (true);
+ chained_irq_exit(chip, desc);
+}
+
+static int __init xilinx_intc_of_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ u32 nr_irq;
+ int ret, irq;
+ struct xintc_irq_chip *irqc;
+
+ if (xintc_irqc) {
+ pr_err("irq-xilinx: Multiple instances aren't supported\n");
+ return -EINVAL;
+ }
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ xintc_irqc = irqc;
+
+ irqc->base = of_iomap(intc, 0);
+ BUG_ON(!irqc->base);
+
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ if (ret < 0) {
+ pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
+ goto err_alloc;
+ }
+
+ ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
+ if (ret < 0) {
+ pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n");
+ irqc->intr_mask = 0;
+ }
+
+ if (irqc->intr_mask >> nr_irq)
+ pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+ pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n",
+ intc->full_name, nr_irq, irqc->intr_mask);
+
+
+ /*
+ * Disable all external interrupts until they are
+ * explicity requested.
+ */
+ xintc_write(IER, 0);
+
+ /* Acknowledge any pending interrupts just in case. */
+ xintc_write(IAR, 0xffffffff);
+
+ /* Turn on the Master Enable. */
+ xintc_write(MER, MER_HIE | MER_ME);
+ if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
+ static_branch_enable(&xintc_is_be);
+ xintc_write(MER, MER_HIE | MER_ME);
+ }
+
+ irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ &xintc_irq_domain_ops, irqc);
+ if (!irqc->root_domain) {
+ pr_err("irq-xilinx: Unable to create IRQ domain\n");
+ goto err_alloc;
+ }
+
+ if (parent) {
+ irq = irq_of_parse_and_map(intc, 0);
+ if (irq) {
+ irq_set_chained_handler_and_data(irq,
+ xil_intc_irq_handler,
+ irqc);
+ } else {
+ pr_err("irq-xilinx: interrupts property not in DT\n");
+ ret = -EINVAL;
+ goto err_alloc;
+ }
+ } else {
+ irq_set_default_host(irqc->root_domain);
+ }
+
+ return 0;
+
+err_alloc:
+ xintc_irqc = NULL;
+ kfree(irqc);
+ return ret;
+
+}
+
+IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
+IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d32463..11e13c56126f 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
static const struct gigaset_ops gigops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_isoc_send_skb,
- gigaset_isoc_input,
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_isoc_send_skb,
+ .handle_input = gigaset_isoc_input,
};
/* bas_gigaset_init
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index b90776ef56ec..ab0b63a4d045 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -445,22 +445,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
}
static const struct gigaset_ops ops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_m10x_send_skb, /* asyncdata.c */
- gigaset_m10x_input, /* asyncdata.c */
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
};
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 5f306e2eece5..eade36dafa34 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
}
static const struct gigaset_ops ops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_m10x_send_skb,
- gigaset_m10x_input,
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb,
+ .handle_input = gigaset_m10x_input,
};
/*
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a3cf4a..2d12c6ceeb89 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -659,7 +659,7 @@ int jiftime(char *s, long mark)
static u_char tmpbuf[HISAX_STATUS_BUFSIZE];
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
+void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt,
va_list args)
{
/* if head == NULL the fmt contains the full info */
@@ -669,23 +669,24 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
u_char *p;
isdn_ctrl ic;
int len;
+ const u_char *data;
if (!cs) {
printk(KERN_WARNING "HiSax: No CardStatus for message");
return;
}
spin_lock_irqsave(&cs->statlock, flags);
- p = tmpbuf;
if (head) {
+ p = tmpbuf;
p += jiftime(p, jiffies);
p += sprintf(p, " %s", head);
p += vsprintf(p, fmt, args);
*p++ = '\n';
*p = 0;
len = p - tmpbuf;
- p = tmpbuf;
+ data = tmpbuf;
} else {
- p = fmt;
+ data = fmt;
len = strlen(fmt);
}
if (len > HISAX_STATUS_BUFSIZE) {
@@ -699,13 +700,12 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
if (i >= len)
i = len;
len -= i;
- memcpy(cs->status_write, p, i);
+ memcpy(cs->status_write, data, i);
cs->status_write += i;
if (cs->status_write > cs->status_end)
cs->status_write = cs->status_buf;
- p += i;
if (len) {
- memcpy(cs->status_write, p, len);
+ memcpy(cs->status_write, data + i, len);
cs->status_write += len;
}
#ifdef KERNELSTACK_DEBUG
@@ -729,7 +729,7 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
}
}
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...)
+void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...)
{
va_list args;
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 6ead6314e6d2..338d0408b377 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1288,9 +1288,9 @@ int jiftime(char *s, long mark);
int HiSax_command(isdn_ctrl *ic);
int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
__printf(3, 4)
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
+void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...);
__printf(3, 0)
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
+void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, va_list args);
void HiSax_reportcard(int cardnr, int sel);
int QuickHex(char *txt, u_char *p, int cnt);
void LogFrame(struct IsdnCardState *cs, u_char *p, int size);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index ba4beb25d872..298c8dba0321 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -855,7 +855,7 @@ struct DTag { /* Display tags */
{ 0x8c, "Reason" },
{ 0x8d, "Calling party name" },
{ 0x8e, "Called party name" },
- { 0x8f, "Orignal called name" },
+ { 0x8f, "Original called name" },
{ 0x90, "Redirecting name" },
{ 0x91, "Connected name" },
{ 0x92, "Originating restrictions" },
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index 5609deee7cd3..b93a4e9a8d34 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -232,7 +232,6 @@ static const struct net_device_ops hysdn_netdev_ops = {
.ndo_open = net_open,
.ndo_stop = net_close,
.ndo_start_xmit = net_send_packet,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
index 91d57304d4d3..336523ec077c 100644
--- a/drivers/isdn/i4l/isdn_concap.c
+++ b/drivers/isdn/i4l/isdn_concap.c
@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
}
struct concap_device_ops isdn_concap_reliable_dl_dops = {
- &isdn_concap_dl_data_req,
- &isdn_concap_dl_connect_req,
- &isdn_concap_dl_disconn_req
+ .data_req = &isdn_concap_dl_data_req,
+ .connect_req = &isdn_concap_dl_connect_req,
+ .disconn_req = &isdn_concap_dl_disconn_req
};
/* The following should better go into a dedicated source file such that
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index 0c5d8de41b23..ba60076e0b95 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
static struct concap_proto_ops ix25_pops = {
- &isdn_x25iface_proto_new,
- &isdn_x25iface_proto_del,
- &isdn_x25iface_proto_restart,
- &isdn_x25iface_proto_close,
- &isdn_x25iface_xmit,
- &isdn_x25iface_receive,
- &isdn_x25iface_connect_ind,
- &isdn_x25iface_disconn_ind
+ .proto_new = &isdn_x25iface_proto_new,
+ .proto_del = &isdn_x25iface_proto_del,
+ .restart = &isdn_x25iface_proto_restart,
+ .close = &isdn_x25iface_proto_close,
+ .encap_and_xmit = &isdn_x25iface_xmit,
+ .data_ind = &isdn_x25iface_receive,
+ .connect_ind = &isdn_x25iface_connect_ind,
+ .disconn_ind = &isdn_x25iface_disconn_ind
};
/* error message helper function */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7a628c6516f6..c621cbbb5768 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -645,7 +645,7 @@ config LEDS_VERSATILE
config LEDS_PM8058
tristate "LED Support for the Qualcomm PM8058 PMIC"
- depends on MFD_PM8921_CORE
+ depends on MFD_PM8XXX
depends on LEDS_CLASS
help
Choose this option if you want to use the LED drivers in
@@ -659,6 +659,25 @@ config LEDS_MLXCPLD
This option enabled support for the LEDs on the Mellanox
boards. Say Y to enabled these.
+config LEDS_USER
+ tristate "Userspace LED support"
+ depends on LEDS_CLASS
+ help
+ This option enables support for userspace LEDs. Say 'y' to enable this
+ support in kernel. To compile this driver as a module, choose 'm' here:
+ the module will be called uleds.
+
+config LEDS_NIC78BX
+ tristate "LED support for NI PXI NIC78bx devices"
+ depends on LEDS_CLASS
+ depends on X86 && ACPI
+ help
+ This option enables support for the User1 and User2 LEDs on NI
+ PXI NIC78bx devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-nic78bx.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 3965070190f5..6b8273736478 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -71,9 +71,13 @@ obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
+obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
+# LED Userspace Drivers
+obj-$(CONFIG_LEDS_USER) += uleds.o
+
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index aa84e5b37593..326ee6e925a2 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <uapi/linux/uleds.h>
#include "leds.h"
static struct class *leds_class;
@@ -187,7 +188,7 @@ static int led_classdev_next_name(const char *init_name, char *name,
*/
int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
- char name[64];
+ char name[LED_MAX_NAME_SIZE];
int ret;
ret = led_classdev_next_name(led_cdev->name, name, sizeof(name));
@@ -203,6 +204,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
dev_warn(parent, "Led %s renamed to %s due to name collision",
led_cdev->name, dev_name(led_cdev->dev));
+ led_cdev->work_flags = 0;
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 3bce44893021..ef1360445413 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -53,30 +53,30 @@ static void led_timer_function(unsigned long data)
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
- led_cdev->flags &= ~LED_BLINK_SW;
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
return;
}
- if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
- led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
+ if (test_and_clear_bit(LED_BLINK_ONESHOT_STOP,
+ &led_cdev->work_flags)) {
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
return;
}
brightness = led_get_brightness(led_cdev);
if (!brightness) {
/* Time to switch the LED on. */
- brightness = led_cdev->blink_brightness;
+ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE,
+ &led_cdev->work_flags))
+ brightness = led_cdev->new_blink_brightness;
+ else
+ brightness = led_cdev->blink_brightness;
delay = led_cdev->blink_delay_on;
} else {
/* Store the current brightness value to be able
* to restore it when the delay_off period is over.
- * Do it only if there is no pending blink brightness
- * change, to avoid overwriting the new value.
*/
- if (!(led_cdev->flags & LED_BLINK_BRIGHTNESS_CHANGE))
- led_cdev->blink_brightness = brightness;
- else
- led_cdev->flags &= ~LED_BLINK_BRIGHTNESS_CHANGE;
+ led_cdev->blink_brightness = brightness;
brightness = LED_OFF;
delay = led_cdev->blink_delay_off;
}
@@ -87,13 +87,15 @@ static void led_timer_function(unsigned long data)
* the final blink state so that the led is toggled each delay_on +
* delay_off milliseconds in worst case.
*/
- if (led_cdev->flags & LED_BLINK_ONESHOT) {
- if (led_cdev->flags & LED_BLINK_INVERT) {
+ if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags)) {
+ if (test_bit(LED_BLINK_INVERT, &led_cdev->work_flags)) {
if (brightness)
- led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+ set_bit(LED_BLINK_ONESHOT_STOP,
+ &led_cdev->work_flags);
} else {
if (!brightness)
- led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+ set_bit(LED_BLINK_ONESHOT_STOP,
+ &led_cdev->work_flags);
}
}
@@ -106,10 +108,9 @@ static void set_brightness_delayed(struct work_struct *ws)
container_of(ws, struct led_classdev, set_brightness_work);
int ret = 0;
- if (led_cdev->flags & LED_BLINK_DISABLE) {
+ if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) {
led_cdev->delayed_set_value = LED_OFF;
led_stop_software_blink(led_cdev);
- led_cdev->flags &= ~LED_BLINK_DISABLE;
}
ret = __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
@@ -152,7 +153,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
return;
}
- led_cdev->flags |= LED_BLINK_SW;
+ set_bit(LED_BLINK_SW, &led_cdev->work_flags);
mod_timer(&led_cdev->blink_timer, jiffies + 1);
}
@@ -161,7 +162,7 @@ static void led_blink_setup(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- if (!(led_cdev->flags & LED_BLINK_ONESHOT) &&
+ if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
led_cdev->blink_set &&
!led_cdev->blink_set(led_cdev, delay_on, delay_off))
return;
@@ -188,8 +189,8 @@ void led_blink_set(struct led_classdev *led_cdev,
{
del_timer_sync(&led_cdev->blink_timer);
- led_cdev->flags &= ~LED_BLINK_ONESHOT;
- led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+ clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
+ clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
led_blink_setup(led_cdev, delay_on, delay_off);
}
@@ -200,17 +201,17 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
unsigned long *delay_off,
int invert)
{
- if ((led_cdev->flags & LED_BLINK_ONESHOT) &&
+ if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
timer_pending(&led_cdev->blink_timer))
return;
- led_cdev->flags |= LED_BLINK_ONESHOT;
- led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+ set_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
+ clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
if (invert)
- led_cdev->flags |= LED_BLINK_INVERT;
+ set_bit(LED_BLINK_INVERT, &led_cdev->work_flags);
else
- led_cdev->flags &= ~LED_BLINK_INVERT;
+ clear_bit(LED_BLINK_INVERT, &led_cdev->work_flags);
led_blink_setup(led_cdev, delay_on, delay_off);
}
@@ -221,7 +222,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
del_timer_sync(&led_cdev->blink_timer);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
- led_cdev->flags &= ~LED_BLINK_SW;
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
EXPORT_SYMBOL_GPL(led_stop_software_blink);
@@ -232,18 +233,19 @@ void led_set_brightness(struct led_classdev *led_cdev,
* If software blink is active, delay brightness setting
* until the next timer tick.
*/
- if (led_cdev->flags & LED_BLINK_SW) {
+ if (test_bit(LED_BLINK_SW, &led_cdev->work_flags)) {
/*
* If we need to disable soft blinking delegate this to the
* work queue task to avoid problems in case we are called
* from hard irq context.
*/
if (brightness == LED_OFF) {
- led_cdev->flags |= LED_BLINK_DISABLE;
+ set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags);
schedule_work(&led_cdev->set_brightness_work);
} else {
- led_cdev->flags |= LED_BLINK_BRIGHTNESS_CHANGE;
- led_cdev->blink_brightness = brightness;
+ set_bit(LED_BLINK_BRIGHTNESS_CHANGE,
+ &led_cdev->work_flags);
+ led_cdev->new_blink_brightness = brightness;
}
return;
}
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index b316df4a8c1e..8d066facdc73 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -115,8 +115,4 @@ static struct platform_driver cobalt_raq_led_driver = {
},
};
-static int __init cobalt_raq_led_init(void)
-{
- return platform_driver_register(&cobalt_raq_led_driver);
-}
-device_initcall(cobalt_raq_led_init);
+builtin_platform_driver(cobalt_raq_led_driver);
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index a73c8ff08530..4847e89883a7 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -274,6 +274,7 @@ static const struct i2c_device_id lp3952_id[] = {
{LP3952_NAME, 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, lp3952_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id lp3952_acpi_match[] = {
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index a2e4c1792e17..2421cf104991 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -84,8 +84,9 @@ static int mc13xxx_led_set(struct led_classdev *led_cdev,
case MC13892_LED_MD:
case MC13892_LED_AD:
case MC13892_LED_KP:
- reg = (led->id - MC13892_LED_MD) / 2;
- shift = 3 + (led->id - MC13892_LED_MD) * 12;
+ off = led->id - MC13892_LED_MD;
+ reg = off / 2;
+ shift = 3 + (off - reg * 2) * 12;
break;
case MC13892_LED_R:
case MC13892_LED_G:
diff --git a/drivers/leds/leds-mlxcpld.c b/drivers/leds/leds-mlxcpld.c
index 197ab9b29a9c..281482e1d50f 100644
--- a/drivers/leds/leds-mlxcpld.c
+++ b/drivers/leds/leds-mlxcpld.c
@@ -400,6 +400,9 @@ static int __init mlxcpld_led_init(void)
struct platform_device *pdev;
int err;
+ if (!dmi_match(DMI_CHASSIS_VENDOR, "Mellanox Technologies Ltd."))
+ return -ENODEV;
+
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (IS_ERR(pdev)) {
pr_err("Device allocation failed\n");
@@ -426,5 +429,5 @@ module_exit(mlxcpld_led_exit);
MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
MODULE_DESCRIPTION("Mellanox board LED driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:leds_mlxcpld");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 4b88b93244be..f48b1aed9b4e 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -534,6 +534,7 @@ static const struct of_device_id of_netxbig_leds_match[] = {
{ .compatible = "lacie,netxbig-leds", },
{},
};
+MODULE_DEVICE_TABLE(of, of_netxbig_leds_match);
#else
static inline int
netxbig_leds_get_of_pdata(struct device *dev,
diff --git a/drivers/leds/leds-nic78bx.c b/drivers/leds/leds-nic78bx.c
new file mode 100644
index 000000000000..8d69e2b74a27
--- /dev/null
+++ b/drivers/leds/leds-nic78bx.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2016 National Instruments Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define NIC78BX_USER1_LED_MASK 0x3
+#define NIC78BX_USER1_GREEN_LED BIT(0)
+#define NIC78BX_USER1_YELLOW_LED BIT(1)
+
+#define NIC78BX_USER2_LED_MASK 0xC
+#define NIC78BX_USER2_GREEN_LED BIT(2)
+#define NIC78BX_USER2_YELLOW_LED BIT(3)
+
+#define NIC78BX_LOCK_REG_OFFSET 1
+#define NIC78BX_LOCK_VALUE 0xA5
+#define NIC78BX_UNLOCK_VALUE 0x5A
+
+#define NIC78BX_USER_LED_IO_SIZE 2
+
+struct nic78bx_led_data {
+ u16 io_base;
+ spinlock_t lock;
+ struct platform_device *pdev;
+};
+
+struct nic78bx_led {
+ u8 bit;
+ u8 mask;
+ struct nic78bx_led_data *data;
+ struct led_classdev cdev;
+};
+
+static inline struct nic78bx_led *to_nic78bx_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct nic78bx_led, cdev);
+}
+
+static void nic78bx_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct nic78bx_led *nled = to_nic78bx_led(cdev);
+ unsigned long flags;
+ u8 value;
+
+ spin_lock_irqsave(&nled->data->lock, flags);
+ value = inb(nled->data->io_base);
+
+ if (brightness) {
+ value &= ~nled->mask;
+ value |= nled->bit;
+ } else {
+ value &= ~nled->bit;
+ }
+
+ outb(value, nled->data->io_base);
+ spin_unlock_irqrestore(&nled->data->lock, flags);
+}
+
+static enum led_brightness nic78bx_brightness_get(struct led_classdev *cdev)
+{
+ struct nic78bx_led *nled = to_nic78bx_led(cdev);
+ unsigned long flags;
+ u8 value;
+
+ spin_lock_irqsave(&nled->data->lock, flags);
+ value = inb(nled->data->io_base);
+ spin_unlock_irqrestore(&nled->data->lock, flags);
+
+ return (value & nled->bit) ? 1 : LED_OFF;
+}
+
+static struct nic78bx_led nic78bx_leds[] = {
+ {
+ .bit = NIC78BX_USER1_GREEN_LED,
+ .mask = NIC78BX_USER1_LED_MASK,
+ .cdev = {
+ .name = "nilrt:green:user1",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ },
+ {
+ .bit = NIC78BX_USER1_YELLOW_LED,
+ .mask = NIC78BX_USER1_LED_MASK,
+ .cdev = {
+ .name = "nilrt:yellow:user1",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ },
+ {
+ .bit = NIC78BX_USER2_GREEN_LED,
+ .mask = NIC78BX_USER2_LED_MASK,
+ .cdev = {
+ .name = "nilrt:green:user2",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ },
+ {
+ .bit = NIC78BX_USER2_YELLOW_LED,
+ .mask = NIC78BX_USER2_LED_MASK,
+ .cdev = {
+ .name = "nilrt:yellow:user2",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ }
+};
+
+static int nic78bx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nic78bx_led_data *led_data;
+ struct resource *io_rc;
+ int ret, i;
+
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
+ return -ENOMEM;
+
+ led_data->pdev = pdev;
+ platform_set_drvdata(pdev, led_data);
+
+ io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!io_rc) {
+ dev_err(dev, "missing IO resources\n");
+ return -EINVAL;
+ }
+
+ if (resource_size(io_rc) < NIC78BX_USER_LED_IO_SIZE) {
+ dev_err(dev, "IO region too small\n");
+ return -EINVAL;
+ }
+
+ if (!devm_request_region(dev, io_rc->start, resource_size(io_rc),
+ KBUILD_MODNAME)) {
+ dev_err(dev, "failed to get IO region\n");
+ return -EBUSY;
+ }
+
+ led_data->io_base = io_rc->start;
+ spin_lock_init(&led_data->lock);
+
+ for (i = 0; i < ARRAY_SIZE(nic78bx_leds); i++) {
+ nic78bx_leds[i].data = led_data;
+
+ ret = devm_led_classdev_register(dev, &nic78bx_leds[i].cdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Unlock LED register */
+ outb(NIC78BX_UNLOCK_VALUE,
+ led_data->io_base + NIC78BX_LOCK_REG_OFFSET);
+
+ return ret;
+}
+
+static int nic78bx_remove(struct platform_device *pdev)
+{
+ struct nic78bx_led_data *led_data = platform_get_drvdata(pdev);
+
+ /* Lock LED register */
+ outb(NIC78BX_LOCK_VALUE,
+ led_data->io_base + NIC78BX_LOCK_REG_OFFSET);
+
+ return 0;
+}
+
+static const struct acpi_device_id led_device_ids[] = {
+ {"NIC78B3", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, led_device_ids);
+
+static struct platform_driver led_driver = {
+ .probe = nic78bx_probe,
+ .remove = nic78bx_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .acpi_match_table = ACPI_PTR(led_device_ids),
+ },
+};
+
+module_platform_driver(led_driver);
+
+MODULE_DESCRIPTION("National Instruments PXI User LEDs driver");
+MODULE_AUTHOR("Hui Chun Ong <hui.chun.ong@ni.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 09a7cffbc46f..06e63106ae1e 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -369,7 +369,7 @@ static int pca9532_configure(struct i2c_client *client,
led->state = pled->state;
led->name = pled->name;
led->ldev.name = led->name;
- led->ldev.default_trigger = led->default_trigger;
+ led->ldev.default_trigger = pled->default_trigger;
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set_blocking =
pca9532_set_brightness;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 840401ae9a4e..78a7ce816a47 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -40,6 +40,7 @@
* bits the chip supports.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -100,6 +101,15 @@ static const struct i2c_device_id pca955x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
+static const struct acpi_device_id pca955x_acpi_ids[] = {
+ { "PCA9550", pca9550 },
+ { "PCA9551", pca9551 },
+ { "PCA9552", pca9552 },
+ { "PCA9553", pca9553 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pca955x_acpi_ids);
+
struct pca955x {
struct mutex lock;
struct pca955x_led *leds;
@@ -250,7 +260,16 @@ static int pca955x_probe(struct i2c_client *client,
struct led_platform_data *pdata;
int i, err;
- chip = &pca955x_chipdefs[id->driver_data];
+ if (id) {
+ chip = &pca955x_chipdefs[id->driver_data];
+ } else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(pca955x_acpi_ids, &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ chip = &pca955x_chipdefs[acpi_id->driver_data];
+ }
adapter = to_i2c_adapter(client->dev.parent);
pdata = dev_get_platdata(&client->dev);
@@ -264,7 +283,7 @@ static int pca955x_probe(struct i2c_client *client,
dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
"slave address 0x%02x\n",
- id->name, chip->bits, client->addr);
+ client->name, chip->bits, client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -EIO;
@@ -358,6 +377,7 @@ static int pca955x_remove(struct i2c_client *client)
static struct i2c_driver pca955x_driver = {
.driver = {
.name = "leds-pca955x",
+ .acpi_match_table = ACPI_PTR(pca955x_acpi_ids),
},
.probe = pca955x_probe,
.remove = pca955x_remove,
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 407eba11e187..ded1e4dac36a 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -25,6 +25,7 @@
* or by adding the 'nxp,hw-blink' property to the DTS.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -59,6 +60,7 @@ struct pca963x_chipdef {
u8 grpfreq;
u8 ledout_base;
int n_leds;
+ unsigned int scaling;
};
static struct pca963x_chipdef pca963x_chipdefs[] = {
@@ -95,6 +97,15 @@ static const struct i2c_device_id pca963x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca963x_id);
+static const struct acpi_device_id pca963x_acpi_ids[] = {
+ { "PCA9632", pca9633 },
+ { "PCA9633", pca9633 },
+ { "PCA9634", pca9634 },
+ { "PCA9635", pca9635 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pca963x_acpi_ids);
+
struct pca963x_led;
struct pca963x {
@@ -102,6 +113,7 @@ struct pca963x {
struct mutex mutex;
struct i2c_client *client;
struct pca963x_led *leds;
+ unsigned long leds_on;
};
struct pca963x_led {
@@ -123,7 +135,6 @@ static int pca963x_brightness(struct pca963x_led *pca963x,
u8 mask = 0x3 << shift;
int ret;
- mutex_lock(&pca963x->chip->mutex);
ledout = i2c_smbus_read_byte_data(pca963x->chip->client, ledout_addr);
switch (brightness) {
case LED_FULL:
@@ -140,14 +151,13 @@ static int pca963x_brightness(struct pca963x_led *pca963x,
PCA963X_PWM_BASE + pca963x->led_num,
brightness);
if (ret < 0)
- goto unlock;
+ return ret;
ret = i2c_smbus_write_byte_data(pca963x->chip->client,
ledout_addr,
(ledout & ~mask) | (PCA963X_LED_PWM << shift));
break;
}
-unlock:
- mutex_unlock(&pca963x->chip->mutex);
+
return ret;
}
@@ -179,14 +189,49 @@ static void pca963x_blink(struct pca963x_led *pca963x)
mutex_unlock(&pca963x->chip->mutex);
}
+static int pca963x_power_state(struct pca963x_led *pca963x)
+{
+ unsigned long *leds_on = &pca963x->chip->leds_on;
+ unsigned long cached_leds = pca963x->chip->leds_on;
+
+ if (pca963x->led_cdev.brightness)
+ set_bit(pca963x->led_num, leds_on);
+ else
+ clear_bit(pca963x->led_num, leds_on);
+
+ if (!(*leds_on) != !cached_leds)
+ return i2c_smbus_write_byte_data(pca963x->chip->client,
+ PCA963X_MODE1, *leds_on ? 0 : BIT(4));
+
+ return 0;
+}
+
static int pca963x_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct pca963x_led *pca963x;
+ int ret;
pca963x = container_of(led_cdev, struct pca963x_led, led_cdev);
- return pca963x_brightness(pca963x, value);
+ mutex_lock(&pca963x->chip->mutex);
+
+ ret = pca963x_brightness(pca963x, value);
+ if (ret < 0)
+ goto unlock;
+ ret = pca963x_power_state(pca963x);
+
+unlock:
+ mutex_unlock(&pca963x->chip->mutex);
+ return ret;
+}
+
+static unsigned int pca963x_period_scale(struct pca963x_led *pca963x,
+ unsigned int val)
+{
+ unsigned int scaling = pca963x->chip->chipdef->scaling;
+
+ return scaling ? DIV_ROUND_CLOSEST(val * scaling, 1000) : val;
}
static int pca963x_blink_set(struct led_classdev *led_cdev,
@@ -207,14 +252,14 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
time_off = 500;
}
- period = time_on + time_off;
+ period = pca963x_period_scale(pca963x, time_on + time_off);
/* If period not supported by hardware, default to someting sane. */
if ((period < PCA963X_BLINK_PERIOD_MIN) ||
(period > PCA963X_BLINK_PERIOD_MAX)) {
time_on = 500;
time_off = 500;
- period = time_on + time_off;
+ period = pca963x_period_scale(pca963x, 1000);
}
/*
@@ -222,7 +267,7 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
* (time_on / period) = (GDC / 256) ->
* GDC = ((time_on * 256) / period)
*/
- gdc = (time_on * 256) / period;
+ gdc = (pca963x_period_scale(pca963x, time_on) * 256) / period;
/*
* From manual: period = ((GFRQ + 1) / 24) in seconds.
@@ -294,6 +339,9 @@ pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
else
pdata->blink_type = PCA963X_SW_BLINK;
+ if (of_property_read_u32(np, "nxp,period-scale", &chip->scaling))
+ chip->scaling = 1000;
+
return pdata;
}
@@ -322,7 +370,16 @@ static int pca963x_probe(struct i2c_client *client,
struct pca963x_chipdef *chip;
int i, err;
- chip = &pca963x_chipdefs[id->driver_data];
+ if (id) {
+ chip = &pca963x_chipdefs[id->driver_data];
+ } else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(pca963x_acpi_ids, &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ chip = &pca963x_chipdefs[acpi_id->driver_data];
+ }
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
@@ -391,8 +448,8 @@ static int pca963x_probe(struct i2c_client *client,
goto exit;
}
- /* Disable LED all-call address and set normal mode */
- i2c_smbus_write_byte_data(client, PCA963X_MODE1, 0x00);
+ /* Disable LED all-call address, and power down initially */
+ i2c_smbus_write_byte_data(client, PCA963X_MODE1, BIT(4));
if (pdata) {
/* Configure output: open-drain or totem pole (push-pull) */
@@ -426,6 +483,7 @@ static struct i2c_driver pca963x_driver = {
.driver = {
.name = "leds-pca963x",
.of_match_table = of_match_ptr(of_pca963x_match),
+ .acpi_match_table = ACPI_PTR(pca963x_acpi_ids),
},
.probe = pca963x_probe,
.remove = pca963x_remove,
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 22f0634dd3fa..9719caf7437c 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
* @evt: CPU event to be emitted
*
* Emit a CPU event on a CPU core, which will trigger a
- * binded LED to turn on or turn off.
+ * bound LED to turn on or turn off.
*/
void ledtrig_cpu(enum cpu_led_event ledevt)
{
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
new file mode 100644
index 000000000000..5e9e8a1fdefb
--- /dev/null
+++ b/drivers/leds/uleds.c
@@ -0,0 +1,235 @@
+/*
+ * Userspace driver for the LED subsystem
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * Based on uinput.c: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <uapi/linux/uleds.h>
+
+#define ULEDS_NAME "uleds"
+
+enum uleds_state {
+ ULEDS_STATE_UNKNOWN,
+ ULEDS_STATE_REGISTERED,
+};
+
+struct uleds_device {
+ struct uleds_user_dev user_dev;
+ struct led_classdev led_cdev;
+ struct mutex mutex;
+ enum uleds_state state;
+ wait_queue_head_t waitq;
+ int brightness;
+ bool new_data;
+};
+
+static struct miscdevice uleds_misc;
+
+static void uleds_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct uleds_device *udev = container_of(led_cdev, struct uleds_device,
+ led_cdev);
+
+ if (udev->brightness != brightness) {
+ udev->brightness = brightness;
+ udev->new_data = true;
+ wake_up_interruptible(&udev->waitq);
+ }
+}
+
+static int uleds_open(struct inode *inode, struct file *file)
+{
+ struct uleds_device *udev;
+
+ udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+ if (!udev)
+ return -ENOMEM;
+
+ udev->led_cdev.name = udev->user_dev.name;
+ udev->led_cdev.brightness_set = uleds_brightness_set;
+
+ mutex_init(&udev->mutex);
+ init_waitqueue_head(&udev->waitq);
+ udev->state = ULEDS_STATE_UNKNOWN;
+
+ file->private_data = udev;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static ssize_t uleds_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct uleds_device *udev = file->private_data;
+ const char *name;
+ int ret;
+
+ if (count == 0)
+ return 0;
+
+ ret = mutex_lock_interruptible(&udev->mutex);
+ if (ret)
+ return ret;
+
+ if (udev->state == ULEDS_STATE_REGISTERED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (count != sizeof(struct uleds_user_dev)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(&udev->user_dev, buffer,
+ sizeof(struct uleds_user_dev))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ name = udev->user_dev.name;
+ if (!name[0] || !strcmp(name, ".") || !strcmp(name, "..") ||
+ strchr(name, '/')) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (udev->user_dev.max_brightness <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ udev->led_cdev.max_brightness = udev->user_dev.max_brightness;
+
+ ret = devm_led_classdev_register(uleds_misc.this_device,
+ &udev->led_cdev);
+ if (ret < 0)
+ goto out;
+
+ udev->new_data = true;
+ udev->state = ULEDS_STATE_REGISTERED;
+ ret = count;
+
+out:
+ mutex_unlock(&udev->mutex);
+
+ return ret;
+}
+
+static ssize_t uleds_read(struct file *file, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct uleds_device *udev = file->private_data;
+ ssize_t retval;
+
+ if (count < sizeof(udev->brightness))
+ return 0;
+
+ do {
+ retval = mutex_lock_interruptible(&udev->mutex);
+ if (retval)
+ return retval;
+
+ if (udev->state != ULEDS_STATE_REGISTERED) {
+ retval = -ENODEV;
+ } else if (!udev->new_data && (file->f_flags & O_NONBLOCK)) {
+ retval = -EAGAIN;
+ } else if (udev->new_data) {
+ retval = copy_to_user(buffer, &udev->brightness,
+ sizeof(udev->brightness));
+ udev->new_data = false;
+ retval = sizeof(udev->brightness);
+ }
+
+ mutex_unlock(&udev->mutex);
+
+ if (retval)
+ break;
+
+ if (!(file->f_flags & O_NONBLOCK))
+ retval = wait_event_interruptible(udev->waitq,
+ udev->new_data ||
+ udev->state != ULEDS_STATE_REGISTERED);
+ } while (retval == 0);
+
+ return retval;
+}
+
+static unsigned int uleds_poll(struct file *file, poll_table *wait)
+{
+ struct uleds_device *udev = file->private_data;
+
+ poll_wait(file, &udev->waitq, wait);
+
+ if (udev->new_data)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int uleds_release(struct inode *inode, struct file *file)
+{
+ struct uleds_device *udev = file->private_data;
+
+ if (udev->state == ULEDS_STATE_REGISTERED) {
+ udev->state = ULEDS_STATE_UNKNOWN;
+ devm_led_classdev_unregister(uleds_misc.this_device,
+ &udev->led_cdev);
+ }
+ kfree(udev);
+
+ return 0;
+}
+
+static const struct file_operations uleds_fops = {
+ .owner = THIS_MODULE,
+ .open = uleds_open,
+ .release = uleds_release,
+ .read = uleds_read,
+ .write = uleds_write,
+ .poll = uleds_poll,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice uleds_misc = {
+ .fops = &uleds_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = ULEDS_NAME,
+};
+
+static int __init uleds_init(void)
+{
+ return misc_register(&uleds_misc);
+}
+module_init(uleds_init);
+
+static void __exit uleds_exit(void)
+{
+ misc_deregister(&uleds_misc);
+}
+module_exit(uleds_exit);
+
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("Userspace driver for the LED subsystem");
+MODULE_LICENSE("GPL");
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 19a32280731d..601f81c04873 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -109,10 +109,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1);
break;
- case LHCALL_TS:
- /* This sets the TS flag, as we saw used in run_guest(). */
- cpu->ts = args->arg1;
- break;
case LHCALL_HALT:
/* Similarly, this sets the halted flag for run_guest(). */
cpu->halted = 1;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 69b3814afd2f..2356a2318034 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -43,7 +43,6 @@ struct lg_cpu {
struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */
u32 cr2;
- int ts;
u32 esp1;
u16 ss1;
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6e9042e3d2a9..743253fc638f 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -247,14 +247,6 @@ unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
void lguest_arch_run_guest(struct lg_cpu *cpu)
{
/*
- * Remember the awfully-named TS bit? If the Guest has asked to set it
- * we set it now, so we can trap and pass that trap to the Guest if it
- * uses the FPU.
- */
- if (cpu->ts && fpregs_active())
- stts();
-
- /*
* SYSENTER is an optimized way of doing system calls. We can't allow
* it because it always jumps to privilege level 0. A normal Guest
* won't try it because we don't advertise it in CPUID, but a malicious
@@ -282,10 +274,6 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
- /* Clear the host TS bit if it was set above. */
- if (cpu->ts && fpregs_active())
- clts();
-
/*
* If the Guest page faulted, then the cr2 register will tell us the
* bad virtual address. We have to grab this now, because once we
@@ -421,12 +409,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
kill_guest(cpu, "Writing cr2");
break;
case 7: /* We've intercepted a Device Not Available fault. */
- /*
- * If the Guest doesn't want to know, we already restored the
- * Floating Point Unit, so we just continue without telling it.
- */
- if (!cpu->ts)
- return;
+ /* No special handling is needed here. */
break;
case 32 ... 255:
/* This might be a syscall. */
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 1f6b6521016a..a7a0a22cf1a5 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
# Makefile for Open-Channel SSDs.
#
-obj-$(CONFIG_NVM) := core.o sysblk.o sysfs.o
+obj-$(CONFIG_NVM) := core.o sysblk.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 1cac0f8bc0dc..02240a0b39c9 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -22,13 +22,11 @@
#include <linux/types.h>
#include <linux/sem.h>
#include <linux/bitmap.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
-#include "lightnvm.h"
-
static LIST_HEAD(nvm_tgt_types);
static DECLARE_RWSEM(nvm_tgtt_lock);
static LIST_HEAD(nvm_mgrs);
@@ -88,8 +86,7 @@ void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
-void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
- dma_addr_t dma_handler)
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
{
dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
}
@@ -178,38 +175,133 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
-struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
- unsigned long flags)
+static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
+ struct nvm_rq *rqd)
{
- return dev->mt->get_blk(dev, lun, flags);
+ struct nvm_dev *dev = tgt_dev->parent;
+ int i;
+
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
+ rqd->ppa_list[i], TRANS_TGT_TO_DEV);
+ rqd->ppa_list[i] = generic_to_dev_addr(dev,
+ rqd->ppa_list[i]);
+ }
+ } else {
+ rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
+ TRANS_TGT_TO_DEV);
+ rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
+ }
+}
+
+int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+ int type)
+{
+ struct nvm_rq rqd;
+ int ret;
+
+ if (nr_ppas > dev->ops->max_phys_sect) {
+ pr_err("nvm: unable to update all sysblocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+ nvm_generic_to_addr_mode(dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(dev, &rqd);
+ if (ret) {
+ pr_err("nvm: sysblk failed bb mark\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_get_blk);
+EXPORT_SYMBOL(nvm_set_bb_tbl);
-/* Assumes that all valid pages have already been moved on release to bm */
-void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
+ int nr_ppas, int type)
{
- return dev->mt->put_blk(dev, blk);
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_rq rqd;
+ int ret;
+
+ if (nr_ppas > dev->ops->max_phys_sect) {
+ pr_err("nvm: unable to update all blocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+ nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(dev, &rqd);
+ if (ret) {
+ pr_err("nvm: sysblk failed bb mark\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_put_blk);
+EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
{
- return dev->mt->mark_blk(dev, ppa, type);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->ops->max_phys_sect;
}
-EXPORT_SYMBOL(nvm_mark_blk);
+EXPORT_SYMBOL(nvm_max_phys_sects);
-int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- return dev->mt->submit_io(dev, rqd);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->submit_io(tgt_dev, rqd);
}
EXPORT_SYMBOL(nvm_submit_io);
-int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
{
- return dev->mt->erase_blk(dev, blk, 0);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->erase_blk(tgt_dev, p, flags);
}
EXPORT_SYMBOL(nvm_erase_blk);
+int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
+ nvm_l2p_update_fn *update_l2p, void *priv)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ if (!dev->ops->get_l2p_tbl)
+ return 0;
+
+ return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
+}
+EXPORT_SYMBOL(nvm_get_l2p_tbl);
+
+int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->get_area(dev, lba, len);
+}
+EXPORT_SYMBOL(nvm_get_area);
+
+void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ dev->mt->put_area(dev, lba);
+}
+EXPORT_SYMBOL(nvm_put_area);
+
void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
@@ -241,10 +333,11 @@ EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
const struct ppa_addr *ppas, int nr_ppas, int vblk)
{
+ struct nvm_geo *geo = &dev->geo;
int i, plane_cnt, pl_idx;
struct ppa_addr ppa;
- if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+ if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
rqd->nr_ppas = nr_ppas;
rqd->ppa_addr = ppas[0];
@@ -262,7 +355,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
for (i = 0; i < nr_ppas; i++)
rqd->ppa_list[i] = ppas[i];
} else {
- plane_cnt = dev->plane_mode;
+ plane_cnt = geo->plane_mode;
rqd->nr_ppas *= plane_cnt;
for (i = 0; i < nr_ppas; i++) {
@@ -287,7 +380,8 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_free_rqd_ppalist);
-int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
+int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+ int flags)
{
struct nvm_rq rqd;
int ret;
@@ -303,6 +397,8 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
nvm_generic_to_addr_mode(dev, &rqd);
+ rqd.flags = flags;
+
ret = dev->ops->erase_block(dev, &rqd);
nvm_free_rqd_ppalist(dev, &rqd);
@@ -341,7 +437,7 @@ static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
nvm_generic_to_addr_mode(dev, rqd);
- rqd->dev = dev;
+ rqd->dev = NULL;
rqd->opcode = opcode;
rqd->flags = flags;
rqd->bio = bio;
@@ -437,17 +533,18 @@ EXPORT_SYMBOL(nvm_submit_ppa);
*/
int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
{
+ struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+ if (nr_blks != geo->blks_per_lun * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < dev->blks_per_lun; blk++) {
- offset = blk * dev->plane_mode;
+ for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ offset = blk * geo->plane_mode;
blktype = blks[offset];
/* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < dev->plane_mode; pl++) {
+ for (pl = 0; pl < geo->plane_mode; pl++) {
if (blks[offset + pl] &
(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
blktype = blks[offset + pl];
@@ -458,7 +555,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return dev->blks_per_lun;
+ return geo->blks_per_lun;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -470,11 +567,22 @@ int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
}
EXPORT_SYMBOL(nvm_get_bb_tbl);
+int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
+ u8 *blks)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
+ return nvm_get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
+ struct nvm_geo *geo = &dev->geo;
int i;
- dev->lps_per_blk = dev->pgs_per_blk;
+ dev->lps_per_blk = geo->pgs_per_blk;
dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
if (!dev->lptbl)
return -ENOMEM;
@@ -520,29 +628,32 @@ static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
struct nvm_id_group *grp = &id->groups[0];
+ struct nvm_geo *geo = &dev->geo;
int ret;
- /* device values */
- dev->nr_chnls = grp->num_ch;
- dev->luns_per_chnl = grp->num_lun;
- dev->pgs_per_blk = grp->num_pg;
- dev->blks_per_lun = grp->num_blk;
- dev->nr_planes = grp->num_pln;
- dev->fpg_size = grp->fpg_sz;
- dev->pfpg_size = grp->fpg_sz * grp->num_pln;
- dev->sec_size = grp->csecs;
- dev->oob_size = grp->sos;
- dev->sec_per_pg = grp->fpg_sz / grp->csecs;
- dev->mccap = grp->mccap;
- memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- dev->plane_mode = NVM_PLANE_SINGLE;
- dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
+ /* Whole device values */
+ geo->nr_chnls = grp->num_ch;
+ geo->luns_per_chnl = grp->num_lun;
+
+ /* Generic device values */
+ geo->pgs_per_blk = grp->num_pg;
+ geo->blks_per_lun = grp->num_blk;
+ geo->nr_planes = grp->num_pln;
+ geo->fpg_size = grp->fpg_sz;
+ geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->sec_size = grp->csecs;
+ geo->oob_size = grp->sos;
+ geo->sec_per_pg = grp->fpg_sz / grp->csecs;
+ geo->mccap = grp->mccap;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ geo->plane_mode = NVM_PLANE_SINGLE;
+ geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
if (grp->mpos & 0x020202)
- dev->plane_mode = NVM_PLANE_DOUBLE;
+ geo->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404)
- dev->plane_mode = NVM_PLANE_QUAD;
+ geo->plane_mode = NVM_PLANE_QUAD;
if (grp->mtype != 0) {
pr_err("nvm: memory type not supported\n");
@@ -550,13 +661,13 @@ static int nvm_core_init(struct nvm_dev *dev)
}
/* calculated values */
- dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
- dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
- dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
- dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
+ geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
+ geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
+ geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
+ geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = dev->nr_luns * dev->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+ dev->total_secs = geo->nr_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
@@ -583,7 +694,7 @@ static int nvm_core_init(struct nvm_dev *dev)
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
- blk_queue_logical_block_size(dev->q, dev->sec_size);
+ blk_queue_logical_block_size(dev->q, geo->sec_size);
return 0;
err_fmtype:
@@ -617,6 +728,7 @@ void nvm_free(struct nvm_dev *dev)
static int nvm_init(struct nvm_dev *dev)
{
+ struct nvm_geo *geo = &dev->geo;
int ret = -EINVAL;
if (!dev->q || !dev->ops)
@@ -648,20 +760,15 @@ static int nvm_init(struct nvm_dev *dev)
}
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
- dev->name, dev->sec_per_pg, dev->nr_planes,
- dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
- dev->nr_chnls);
+ dev->name, geo->sec_per_pg, geo->nr_planes,
+ geo->pgs_per_blk, geo->blks_per_lun,
+ geo->nr_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
return ret;
}
-static void nvm_exit(struct nvm_dev *dev)
-{
- nvm_sysfs_unregister_dev(dev);
-}
-
struct nvm_dev *nvm_alloc_dev(int node)
{
return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
@@ -691,10 +798,6 @@ int nvm_register(struct nvm_dev *dev)
}
}
- ret = nvm_sysfs_register_dev(dev);
- if (ret)
- goto err_ppalist;
-
if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
ret = nvm_get_sysblock(dev, &dev->sb);
if (!ret)
@@ -711,8 +814,6 @@ int nvm_register(struct nvm_dev *dev)
up_write(&nvm_lock);
return 0;
-err_ppalist:
- dev->ops->destroy_dma_pool(dev->dma_pool);
err_init:
kfree(dev->lun_map);
return ret;
@@ -725,7 +826,7 @@ void nvm_unregister(struct nvm_dev *dev)
list_del(&dev->devices);
up_write(&nvm_lock);
- nvm_exit(dev);
+ nvm_free(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -754,149 +855,15 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
}
s = &create->conf.s;
- if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
+ if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->nr_luns);
+ s->lun_begin, s->lun_end, dev->geo.nr_luns);
return -EINVAL;
}
return dev->mt->create_tgt(dev, create);
}
-#ifdef CONFIG_NVM_DEBUG
-static int nvm_configure_show(const char *val)
-{
- struct nvm_dev *dev;
- char opcode, devname[DISK_NAME_LEN];
- int ret;
-
- ret = sscanf(val, "%c %32s", &opcode, devname);
- if (ret != 2) {
- pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
- return -EINVAL;
- }
-
- down_write(&nvm_lock);
- dev = nvm_find_nvm_dev(devname);
- up_write(&nvm_lock);
- if (!dev) {
- pr_err("nvm: device not found\n");
- return -EINVAL;
- }
-
- if (!dev->mt)
- return 0;
-
- dev->mt->lun_info_print(dev);
-
- return 0;
-}
-
-static int nvm_configure_remove(const char *val)
-{
- struct nvm_ioctl_remove remove;
- struct nvm_dev *dev;
- char opcode;
- int ret = 0;
-
- ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
- if (ret != 2) {
- pr_err("nvm: invalid command. Use \"d targetname\".\n");
- return -EINVAL;
- }
-
- remove.flags = 0;
-
- list_for_each_entry(dev, &nvm_devices, devices) {
- ret = dev->mt->remove_tgt(dev, &remove);
- if (!ret)
- break;
- }
-
- return ret;
-}
-
-static int nvm_configure_create(const char *val)
-{
- struct nvm_ioctl_create create;
- char opcode;
- int lun_begin, lun_end, ret;
-
- ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
- create.tgtname, create.tgttype,
- &lun_begin, &lun_end);
- if (ret != 6) {
- pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
- return -EINVAL;
- }
-
- create.flags = 0;
- create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
- create.conf.s.lun_begin = lun_begin;
- create.conf.s.lun_end = lun_end;
-
- return __nvm_configure_create(&create);
-}
-
-
-/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
-static int nvm_configure_by_str_event(const char *val,
- const struct kernel_param *kp)
-{
- char opcode;
- int ret;
-
- ret = sscanf(val, "%c", &opcode);
- if (ret != 1) {
- pr_err("nvm: string must have the format of \"cmd ...\"\n");
- return -EINVAL;
- }
-
- switch (opcode) {
- case 'a':
- return nvm_configure_create(val);
- case 'd':
- return nvm_configure_remove(val);
- case 's':
- return nvm_configure_show(val);
- default:
- pr_err("nvm: invalid command\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int nvm_configure_get(char *buf, const struct kernel_param *kp)
-{
- int sz;
- struct nvm_dev *dev;
-
- sz = sprintf(buf, "available devices:\n");
- down_write(&nvm_lock);
- list_for_each_entry(dev, &nvm_devices, devices) {
- if (sz > 4095 - DISK_NAME_LEN - 2)
- break;
- sz += sprintf(buf + sz, " %32s\n", dev->name);
- }
- up_write(&nvm_lock);
-
- return sz;
-}
-
-static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
- .set = nvm_configure_by_str_event,
- .get = nvm_configure_get,
-};
-
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "lnvm."
-
-module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
- 0644);
-
-#endif /* CONFIG_NVM_DEBUG */
-
static long nvm_ioctl_info(struct file *file, void __user *arg)
{
struct nvm_ioctl_info *info;
@@ -1162,10 +1129,4 @@ static struct miscdevice _nvm_misc = {
.nodename = "lightnvm/control",
.fops = &_ctl_fops,
};
-module_misc_device(_nvm_misc);
-
-MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
-
-MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.1");
+builtin_misc_device(_nvm_misc);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index b74174c6d021..ca7880082d80 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,165 @@ static const struct block_device_operations gen_fops = {
.owner = THIS_MODULE,
};
+static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
+ int lun_begin, int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++) {
+ if (test_and_set_bit(i, dev->lun_map)) {
+ pr_err("nvm: lun %d already allocated\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (--i > lun_begin)
+ clear_bit(i, dev->lun_map);
+
+ return -EBUSY;
+}
+
+static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
+ int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++)
+ WARN_ON(!test_and_clear_bit(i, dev->lun_map));
+}
+
+static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_map = tgt_dev->map;
+ int i, j;
+
+ for (i = 0; i < dev_map->nr_chnls; i++) {
+ struct gen_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs = ch_map->lun_offs;
+ int ch = i + ch_map->ch_off;
+
+ for (j = 0; j < ch_map->nr_luns; j++) {
+ int lun = j + lun_offs[j];
+ int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+ }
+
+ kfree(ch_map->lun_offs);
+ }
+
+ kfree(dev_map->chnls);
+ kfree(dev_map);
+ kfree(tgt_dev->luns);
+ kfree(tgt_dev);
+}
+
+static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
+ int lun_begin, int lun_end)
+{
+ struct nvm_tgt_dev *tgt_dev = NULL;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_dev_map *dev_map;
+ struct ppa_addr *luns;
+ int nr_luns = lun_end - lun_begin + 1;
+ int luns_left = nr_luns;
+ int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
+ int bch = lun_begin / dev->geo.luns_per_chnl;
+ int blun = lun_begin % dev->geo.luns_per_chnl;
+ int lunid = 0;
+ int lun_balanced = 1;
+ int prev_nr_luns;
+ int i, j;
+
+ nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
+
+ dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+ if (!dev_map)
+ goto err_dev;
+
+ dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
+ GFP_KERNEL);
+ if (!dev_map->chnls)
+ goto err_chnls;
+
+ luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
+ if (!luns)
+ goto err_luns;
+
+ prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+ for (i = 0; i < nr_chnls; i++) {
+ struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
+ int *lun_roffs = ch_rmap->lun_offs;
+ struct gen_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs;
+ int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+
+ if (lun_balanced && prev_nr_luns != luns_in_chnl)
+ lun_balanced = 0;
+
+ ch_map->ch_off = ch_rmap->ch_off = bch;
+ ch_map->nr_luns = luns_in_chnl;
+
+ lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_offs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++) {
+ luns[lunid].ppa = 0;
+ luns[lunid].g.ch = i;
+ luns[lunid++].g.lun = j;
+
+ lun_offs[j] = blun;
+ lun_roffs[j + blun] = blun;
+ }
+
+ ch_map->lun_offs = lun_offs;
+
+ /* when starting a new channel, lun offset is reset */
+ blun = 0;
+ luns_left -= luns_in_chnl;
+ }
+
+ dev_map->nr_chnls = nr_chnls;
+
+ tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
+ if (!tgt_dev)
+ goto err_ch;
+
+ memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
+ /* Target device only owns a portion of the physical device */
+ tgt_dev->geo.nr_chnls = nr_chnls;
+ tgt_dev->geo.nr_luns = nr_luns;
+ tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
+ tgt_dev->q = dev->q;
+ tgt_dev->map = dev_map;
+ tgt_dev->luns = luns;
+ memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
+
+ tgt_dev->parent = dev;
+
+ return tgt_dev;
+err_ch:
+ while (--i > 0)
+ kfree(dev_map->chnls[i].lun_offs);
+ kfree(luns);
+err_luns:
+ kfree(dev_map->chnls);
+err_chnls:
+ kfree(dev_map);
+err_dev:
+ return tgt_dev;
+}
+
static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
struct gen_dev *gn = dev->mp;
@@ -43,6 +202,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
struct nvm_target *t;
+ struct nvm_tgt_dev *tgt_dev;
void *targetdata;
tt = nvm_find_target_type(create->tgttype, 1);
@@ -64,9 +224,18 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
if (!t)
return -ENOMEM;
+ if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
+ goto err_t;
+
+ tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ if (!tgt_dev) {
+ pr_err("nvm: could not create target device\n");
+ goto err_reserve;
+ }
+
tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
if (!tqueue)
- goto err_t;
+ goto err_dev;
blk_queue_make_request(tqueue, tt->make_rq);
tdisk = alloc_disk(0);
@@ -80,7 +249,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tdisk->fops = &gen_fops;
tdisk->queue = tqueue;
- targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+ targetdata = tt->init(tgt_dev, tdisk);
if (IS_ERR(targetdata))
goto err_init;
@@ -94,7 +263,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
t->type = tt;
t->disk = tdisk;
- t->dev = dev;
+ t->dev = tgt_dev;
mutex_lock(&gn->lock);
list_add_tail(&t->list, &gn->targets);
@@ -105,6 +274,10 @@ err_init:
put_disk(tdisk);
err_queue:
blk_cleanup_queue(tqueue);
+err_dev:
+ kfree(tgt_dev);
+err_reserve:
+ gen_release_luns_err(dev, s->lun_begin, s->lun_end);
err_t:
kfree(t);
return -ENOMEM;
@@ -122,6 +295,7 @@ static void __gen_remove_target(struct nvm_target *t)
if (tt->exit)
tt->exit(tdisk->private_data);
+ gen_remove_tgt_dev(t->dev);
put_disk(tdisk);
list_del(&t->list);
@@ -160,10 +334,11 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
{
+ struct nvm_geo *geo = &dev->geo;
struct gen_dev *gn = dev->mp;
struct gen_area *area, *prev, *next;
sector_t begin = 0;
- sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+ sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
if (len > max_sectors)
return -EINVAL;
@@ -220,240 +395,74 @@ static void gen_put_area(struct nvm_dev *dev, sector_t begin)
spin_unlock(&dev->lock);
}
-static void gen_blocks_free(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- int i;
-
- gen_for_each_lun(gn, lun, i) {
- if (!lun->vlun.blocks)
- break;
- vfree(lun->vlun.blocks);
- }
-}
-
-static void gen_luns_free(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
-
- kfree(gn->luns);
-}
-
-static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
- struct gen_lun *lun;
- int i;
-
- gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
- if (!gn->luns)
- return -ENOMEM;
-
- gen_for_each_lun(gn, lun, i) {
- spin_lock_init(&lun->vlun.lock);
- INIT_LIST_HEAD(&lun->free_list);
- INIT_LIST_HEAD(&lun->used_list);
- INIT_LIST_HEAD(&lun->bb_list);
-
- lun->reserved_blocks = 2; /* for GC only */
- lun->vlun.id = i;
- lun->vlun.lun_id = i % dev->luns_per_chnl;
- lun->vlun.chnl_id = i / dev->luns_per_chnl;
- lun->vlun.nr_free_blocks = dev->blks_per_lun;
- }
- return 0;
-}
-
-static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
- u8 *blks, int nr_blks)
-{
- struct nvm_dev *dev = gn->dev;
- struct gen_lun *lun;
- struct nvm_block *blk;
- int i;
-
- nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
- if (nr_blks < 0)
- return nr_blks;
-
- lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == 0)
- continue;
-
- blk = &lun->vlun.blocks[i];
- list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_free_blocks--;
- }
-
- return 0;
-}
-
-static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct nvm_dev *dev = private;
- struct gen_dev *gn = dev->mp;
- u64 elba = slba + nlb;
- struct gen_lun *lun;
- struct nvm_block *blk;
- u64 i;
- int lun_id;
-
- if (unlikely(elba > dev->total_secs)) {
- pr_err("gen: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
-
- for (i = 0; i < nlb; i++) {
- u64 pba = le64_to_cpu(entries[i]);
-
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("gen: L2P data entry is out of bounds!\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. It often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- /* resolve block from physical address */
- lun_id = div_u64(pba, dev->sec_per_lun);
- lun = &gn->luns[lun_id];
-
- /* Calculate block offset into lun */
- pba = pba - (dev->sec_per_lun * lun_id);
- blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
-
- if (!blk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_TGT;
- lun->vlun.nr_free_blocks--;
- }
- }
-
- return 0;
-}
-
-static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
- struct gen_lun *lun;
- struct nvm_block *block;
- sector_t lun_iter, blk_iter, cur_block_id = 0;
- int ret, nr_blks;
- u8 *blks;
-
- nr_blks = dev->blks_per_lun * dev->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- gen_for_each_lun(gn, lun, lun_iter) {
- lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
- dev->blks_per_lun);
- if (!lun->vlun.blocks) {
- kfree(blks);
- return -ENOMEM;
- }
-
- for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
- block = &lun->vlun.blocks[blk_iter];
-
- INIT_LIST_HEAD(&block->list);
-
- block->lun = &lun->vlun;
- block->id = cur_block_id++;
-
- /* First block is reserved for device */
- if (unlikely(lun_iter == 0 && blk_iter == 0)) {
- lun->vlun.nr_free_blocks--;
- continue;
- }
-
- list_add_tail(&block->list, &lun->free_list);
- }
-
- if (dev->ops->get_bb_tbl) {
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.ch = lun->vlun.chnl_id;
- ppa.g.lun = lun->vlun.lun_id;
-
- ret = nvm_get_bb_tbl(dev, ppa, blks);
- if (ret)
- pr_err("gen: could not get BB table\n");
-
- ret = gen_block_bb(gn, ppa, blks, nr_blks);
- if (ret)
- pr_err("gen: BB table map failed\n");
- }
- }
-
- if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
- gen_block_map, dev);
- if (ret) {
- pr_err("gen: could not read L2P table.\n");
- pr_warn("gen: default block initialization");
- }
- }
-
- kfree(blks);
- return 0;
-}
-
static void gen_free(struct nvm_dev *dev)
{
- gen_blocks_free(dev);
- gen_luns_free(dev);
kfree(dev->mp);
+ kfree(dev->rmap);
dev->mp = NULL;
}
static int gen_register(struct nvm_dev *dev)
{
struct gen_dev *gn;
- int ret;
+ struct gen_dev_map *dev_rmap;
+ int i, j;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
if (!gn)
- return -ENOMEM;
+ goto err_gn;
+
+ dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+ if (!dev_rmap)
+ goto err_rmap;
+
+ dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
+ GFP_KERNEL);
+ if (!dev_rmap->chnls)
+ goto err_chnls;
+
+ for (i = 0; i < dev->geo.nr_chnls; i++) {
+ struct gen_ch_map *ch_rmap;
+ int *lun_roffs;
+ int luns_in_chnl = dev->geo.luns_per_chnl;
+
+ ch_rmap = &dev_rmap->chnls[i];
+
+ ch_rmap->ch_off = -1;
+ ch_rmap->nr_luns = luns_in_chnl;
+
+ lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_roffs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++)
+ lun_roffs[j] = -1;
+
+ ch_rmap->lun_offs = lun_roffs;
+ }
gn->dev = dev;
- gn->nr_luns = dev->nr_luns;
+ gn->nr_luns = dev->geo.nr_luns;
INIT_LIST_HEAD(&gn->area_list);
mutex_init(&gn->lock);
INIT_LIST_HEAD(&gn->targets);
dev->mp = gn;
-
- ret = gen_luns_init(dev, gn);
- if (ret) {
- pr_err("gen: could not initialize luns\n");
- goto err;
- }
-
- ret = gen_blocks_init(dev, gn);
- if (ret) {
- pr_err("gen: could not initialize blocks\n");
- goto err;
- }
+ dev->rmap = dev_rmap;
return 1;
-err:
+err_ch:
+ while (--i >= 0)
+ kfree(dev_rmap->chnls[i].lun_offs);
+err_chnls:
+ kfree(dev_rmap);
+err_rmap:
gen_free(dev);
+err_gn:
module_put(THIS_MODULE);
- return ret;
+ return -ENOMEM;
}
static void gen_unregister(struct nvm_dev *dev)
@@ -463,7 +472,7 @@ static void gen_unregister(struct nvm_dev *dev)
mutex_lock(&gn->lock);
list_for_each_entry_safe(t, tmp, &gn->targets, list) {
- if (t->dev != dev)
+ if (t->dev->parent != dev)
continue;
__gen_remove_target(t);
}
@@ -473,168 +482,142 @@ static void gen_unregister(struct nvm_dev *dev)
module_put(THIS_MODULE);
}
-static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
- struct nvm_lun *vlun, unsigned long flags)
+static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
{
- struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
- struct nvm_block *blk = NULL;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&vlun->lock);
- if (list_empty(&lun->free_list)) {
- pr_err_ratelimited("gen: lun %u have no free pages available",
- lun->vlun.id);
- goto out;
+ struct gen_dev_map *dev_map = tgt_dev->map;
+ struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
+ int lun_off = ch_map->lun_offs[p->g.lun];
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_ch_map *ch_rmap;
+ int lun_roff;
+
+ p->g.ch += ch_map->ch_off;
+ p->g.lun += lun_off;
+
+ ch_rmap = &dev_rmap->chnls[p->g.ch];
+ lun_roff = ch_rmap->lun_offs[p->g.lun];
+
+ if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
+ pr_err("nvm: corrupted device partition table\n");
+ return -EINVAL;
}
- if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
- goto out;
-
- blk = list_first_entry(&lun->free_list, struct nvm_block, list);
-
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_TGT;
- lun->vlun.nr_free_blocks--;
-out:
- spin_unlock(&vlun->lock);
- return blk;
-}
-
-static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
- struct nvm_lun *vlun = blk->lun;
- struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
-
- spin_lock(&vlun->lock);
- if (blk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&blk->list, &lun->free_list);
- lun->vlun.nr_free_blocks++;
- blk->state = NVM_BLK_ST_FREE;
- } else if (blk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&blk->list, &lun->bb_list);
- blk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("gen: erroneous block type (%lu -> %u)\n",
- blk->id, blk->state);
- list_move_tail(&blk->list, &lun->bb_list);
- }
- spin_unlock(&vlun->lock);
+ return 0;
}
-static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- struct nvm_block *blk;
-
- pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
- ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
-
- if (unlikely(ppa.g.ch > dev->nr_chnls ||
- ppa.g.lun > dev->luns_per_chnl ||
- ppa.g.blk > dev->blks_per_lun)) {
- WARN_ON_ONCE(1);
- pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
- ppa.g.ch, dev->nr_chnls,
- ppa.g.lun, dev->luns_per_chnl,
- ppa.g.blk, dev->blks_per_lun);
- return;
- }
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
+ int lun_roff = ch_rmap->lun_offs[p->g.lun];
- lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
- blk = &lun->vlun.blocks[ppa.g.blk];
+ p->g.ch -= ch_rmap->ch_off;
+ p->g.lun -= lun_roff;
- /* will be moved to bb list on put_blk from target */
- blk->state = type;
+ return 0;
}
-/*
- * mark block bad in gen. It is expected that the target recovers separately
- */
-static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
+ int flag)
{
- int bit = -1;
- int max_secs = dev->ops->max_phys_sect;
- void *comp_bits = &rqd->ppa_status;
+ gen_trans_fn *f;
+ int i;
+ int ret = 0;
- nvm_addr_to_generic_mode(dev, rqd);
+ f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
- /* look up blocks and mark them as bad */
- if (rqd->nr_ppas == 1) {
- gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
- return;
+ if (rqd->nr_ppas == 1)
+ return f(tgt_dev, &rqd->ppa_addr);
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ ret = f(tgt_dev, &rqd->ppa_list[i]);
+ if (ret)
+ goto out;
}
- while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
- gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
+out:
+ return ret;
}
static void gen_end_io(struct nvm_rq *rqd)
{
+ struct nvm_tgt_dev *tgt_dev = rqd->dev;
struct nvm_tgt_instance *ins = rqd->ins;
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- gen_mark_blk_bad(rqd->dev, rqd);
+ /* Convert address space */
+ if (tgt_dev)
+ gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
ins->tt->end_io(rqd);
}
-static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
+ struct nvm_dev *dev = tgt_dev->parent;
+
if (!dev->ops->submit_io)
return -ENODEV;
/* Convert address space */
+ gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
nvm_generic_to_addr_mode(dev, rqd);
- rqd->dev = dev;
+ rqd->dev = tgt_dev;
rqd->end_io = gen_end_io;
return dev->ops->submit_io(dev, rqd);
}
-static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
- unsigned long flags)
+static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
+ int flags)
{
- struct ppa_addr addr = block_to_ppa(dev, blk);
+ /* Convert address space */
+ gen_map_to_dev(tgt_dev, p);
- return nvm_erase_ppa(dev, &addr, 1);
+ return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
}
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
+static struct ppa_addr gen_trans_ppa(struct nvm_tgt_dev *tgt_dev,
+ struct ppa_addr p, int direction)
{
- return test_and_set_bit(lunid, dev->lun_map);
-}
+ gen_trans_fn *f;
+ struct ppa_addr ppa = p;
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
-{
- WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+ f = (direction == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
+ f(tgt_dev, &ppa);
+
+ return ppa;
}
-static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
+static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
+ int len)
{
- struct gen_dev *gn = dev->mp;
-
- if (unlikely(lunid >= dev->nr_luns))
- return NULL;
+ struct nvm_geo *geo = &dev->geo;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ u64 i;
- return &gn->luns[lunid].vlun;
-}
+ for (i = 0; i < len; i++) {
+ struct gen_ch_map *ch_rmap;
+ int *lun_roffs;
+ struct ppa_addr gaddr;
+ u64 pba = le64_to_cpu(entries[i]);
+ int off;
+ u64 diff;
-static void gen_lun_info_print(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- unsigned int i;
+ if (!pba)
+ continue;
+ gaddr = linear_to_generic_addr(geo, pba);
+ ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
+ lun_roffs = ch_rmap->lun_offs;
- gen_for_each_lun(gn, lun, i) {
- spin_lock(&lun->vlun.lock);
+ off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
- pr_info("%s: lun%8u\t%u\n", dev->name, i,
- lun->vlun.nr_free_blocks);
+ diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
+ (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
- spin_unlock(&lun->vlun.lock);
+ entries[i] -= cpu_to_le64(diff);
}
}
@@ -648,22 +631,14 @@ static struct nvmm_type gen = {
.create_tgt = gen_create_tgt,
.remove_tgt = gen_remove_tgt,
- .get_blk = gen_get_blk,
- .put_blk = gen_put_blk,
-
.submit_io = gen_submit_io,
.erase_blk = gen_erase_blk,
- .mark_blk = gen_mark_blk,
-
- .get_lun = gen_get_lun,
- .reserve_lun = gen_reserve_lun,
- .release_lun = gen_release_lun,
- .lun_info_print = gen_lun_info_print,
-
.get_area = gen_get_area,
.put_area = gen_put_area,
+ .trans_ppa = gen_trans_ppa,
+ .part_to_tgt = gen_part_to_tgt,
};
static int __init gen_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 8ecfa817d21d..6a4b3f368848 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -20,37 +20,41 @@
#include <linux/lightnvm.h>
-struct gen_lun {
- struct nvm_lun vlun;
-
- int reserved_blocks;
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
-};
-
struct gen_dev {
struct nvm_dev *dev;
int nr_luns;
- struct gen_lun *luns;
struct list_head area_list;
struct mutex lock;
struct list_head targets;
};
+/* Map between virtual and physical channel and lun */
+struct gen_ch_map {
+ int ch_off;
+ int nr_luns;
+ int *lun_offs;
+};
+
+struct gen_dev_map {
+ struct gen_ch_map *chnls;
+ int nr_chnls;
+};
+
struct gen_area {
struct list_head list;
sector_t begin;
sector_t end; /* end is excluded */
};
+static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
+{
+ return ch_map + 1;
+}
+
+typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
+
#define gen_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h
deleted file mode 100644
index 305c181509a6..000000000000
--- a/drivers/lightnvm/lightnvm.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2016 CNEX Labs. All rights reserved.
- * Initial release: Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- */
-
-#ifndef LIGHTNVM_H
-#define LIGHTNVM_H
-
-#include <linux/lightnvm.h>
-
-/* core -> sysfs.c */
-int __must_check nvm_sysfs_register_dev(struct nvm_dev *);
-void nvm_sysfs_unregister_dev(struct nvm_dev *);
-int nvm_sysfs_register(void);
-void nvm_sysfs_unregister(void);
-
-/* sysfs > core */
-void nvm_free(struct nvm_dev *);
-
-#endif
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 37fcaadbf80c..9fb7de395915 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -28,6 +28,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk = a->rblk;
unsigned int pg_offset;
@@ -38,13 +39,13 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
+ div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
spin_unlock(&rblk->lock);
- rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+ rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -116,62 +117,35 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- return (rblk->next_page == rrpc->dev->sec_per_blk);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
+ return (rblk->next_page == dev->geo.sec_per_blk);
}
/* Calculate relative addr for the given block, considering instantiated LUNs */
static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_block *blk = rblk->parent;
- int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
-
- return lun_blk * rrpc->dev->sec_per_blk;
-}
-
-/* Calculate global addr for the given block */
-static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_block *blk = rblk->parent;
-
- return blk->id * rrpc->dev->sec_per_blk;
-}
-
-static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, dev->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, dev->pgs_per_blk);
- div_u64_rem(ppa, dev->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, dev->blks_per_lun);
- div_u64_rem(ppa, dev->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, dev->luns_per_chnl);
- l.g.ch = ppa;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun = rblk->rlun;
- return l;
+ return rlun->id * dev->geo.sec_per_blk;
}
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
+ struct rrpc_addr *gp)
{
+ struct rrpc_block *rblk = gp->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
+ u64 addr = gp->addr;
struct ppa_addr paddr;
paddr.ppa = addr;
- return linear_to_generic_addr(dev, paddr);
+ paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
+ paddr.g.ch = rlun->bppa.g.ch;
+ paddr.g.lun = rlun->bppa.g.lun;
+ paddr.g.blk = rblk->id;
+
+ return paddr;
}
/* requires lun->lock taken */
@@ -188,21 +162,47 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
*cur_rblk = new_rblk;
}
+static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
+ struct rrpc_lun *rlun)
+{
+ struct rrpc_block *rblk = NULL;
+
+ if (list_empty(&rlun->free_list))
+ goto out;
+
+ rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
+
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+
+out:
+ return rblk;
+}
+
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
- struct nvm_block *blk;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk;
+ int is_gc = flags & NVM_IOTYPE_GC;
+
+ spin_lock(&rlun->lock);
+ if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
+ pr_err("nvm: rrpc: cannot give block to non GC request\n");
+ spin_unlock(&rlun->lock);
+ return NULL;
+ }
- blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
- if (!blk) {
- pr_err("nvm: rrpc: cannot get new block from media manager\n");
+ rblk = __rrpc_get_blk(rrpc, rlun);
+ if (!rblk) {
+ pr_err("nvm: rrpc: cannot get new block\n");
+ spin_unlock(&rlun->lock);
return NULL;
}
+ spin_unlock(&rlun->lock);
- rblk = rrpc_get_rblk(rlun, blk->id);
- blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
+ bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
atomic_set(&rblk->data_cmnt_size, 0);
@@ -212,7 +212,24 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- nvm_put_blk(rrpc->dev, rblk->parent);
+ struct rrpc_lun *rlun = rblk->rlun;
+
+ spin_lock(&rlun->lock);
+ if (rblk->state & NVM_BLK_ST_TGT) {
+ list_move_tail(&rblk->list, &rlun->free_list);
+ rlun->nr_free_blocks++;
+ rblk->state = NVM_BLK_ST_FREE;
+ } else if (rblk->state & NVM_BLK_ST_BAD) {
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ } else {
+ WARN_ON_ONCE(1);
+ pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id, rblk->state);
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ }
+ spin_unlock(&rlun->lock);
}
static void rrpc_put_blks(struct rrpc *rrpc)
@@ -280,13 +297,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
*/
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct request_queue *q = rrpc->dev->q;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct request_queue *q = dev->q;
struct rrpc_rev_addr *rev;
struct nvm_rq *rqd;
struct bio *bio;
struct page *page;
int slot;
- int nr_sec_per_blk = rrpc->dev->sec_per_blk;
+ int nr_sec_per_blk = dev->geo.sec_per_blk;
u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
@@ -309,12 +327,12 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
nr_sec_per_blk)) < nr_sec_per_blk) {
/* Lock laddr */
- phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
+ phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
try:
spin_lock(&rrpc->rev_lock);
/* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[phys_addr];
/* already updated by previous regular write */
if (rev->addr == ADDR_EMPTY) {
spin_unlock(&rrpc->rev_lock);
@@ -396,15 +414,23 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct ppa_addr ppa;
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
if (rrpc_move_valid_pages(rrpc, rblk))
goto put_back;
- if (nvm_erase_blk(dev, rblk->parent))
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+ ppa.g.blk = rblk->id;
+
+ if (nvm_erase_blk(dev, &ppa, 0))
goto put_back;
rrpc_put_blk(rrpc, rblk);
@@ -420,7 +446,7 @@ put_back:
/* the block with highest number of invalid pages, will be in the beginning
* of the list
*/
-static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
+static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
struct rrpc_block *rb)
{
if (ra->nr_invalid_pages == rb->nr_invalid_pages)
@@ -435,13 +461,13 @@ static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblock, *max;
+ struct rrpc_block *rblk, *max;
BUG_ON(list_empty(prio_list));
max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblock, prio_list, prio)
- max = rblock_max_invalid(max, rblock);
+ list_for_each_entry(rblk, prio_list, prio)
+ max = rblk_max_invalid(max, rblk);
return max;
}
@@ -450,36 +476,37 @@ static void rrpc_lun_gc(struct work_struct *work)
{
struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
struct rrpc *rrpc = rlun->rrpc;
- struct nvm_lun *lun = rlun->parent;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block_gc *gcb;
unsigned int nr_blocks_need;
- nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
+ nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
spin_lock(&rlun->lock);
- while (nr_blocks_need > lun->nr_free_blocks &&
+ while (nr_blocks_need > rlun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblock = block_prio_find_max(rlun);
- struct nvm_block *block = rblock->parent;
+ struct rrpc_block *rblk = block_prio_find_max(rlun);
- if (!rblock->nr_invalid_pages)
+ if (!rblk->nr_invalid_pages)
break;
gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
if (!gcb)
break;
- list_del_init(&rblock->prio);
+ list_del_init(&rblk->prio);
- BUG_ON(!block_is_full(rrpc, rblock));
+ WARN_ON(!block_is_full(rrpc, rblk));
- pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+ pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
gcb->rrpc = rrpc;
- gcb->rblk = rblock;
+ gcb->rblk = rblk;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
@@ -504,8 +531,9 @@ static void rrpc_gc_queue(struct work_struct *work)
spin_unlock(&rlun->lock);
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
- rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
}
static const struct block_device_operations rrpc_fops = {
@@ -529,8 +557,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
* estimate.
*/
rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->parent->nr_free_blocks >
- max_free->parent->nr_free_blocks)
+ if (rlun->nr_free_blocks > max_free->nr_free_blocks)
max_free = rlun;
}
@@ -553,7 +580,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
gp->addr = paddr;
gp->rblk = rblk;
- rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[gp->addr];
rev->addr = laddr;
spin_unlock(&rrpc->rev_lock);
@@ -568,7 +595,7 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
if (block_is_full(rrpc, rblk))
goto out;
- addr = block_to_addr(rrpc, rblk) + rblk->next_page;
+ addr = rblk->next_page;
rblk->next_page++;
out:
@@ -582,20 +609,22 @@ out:
* Returns rrpc_addr with the physical address and block. Returns NULL if no
* blocks in the next rlun are available.
*/
-static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
+static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
int is_gc)
{
+ struct nvm_tgt_dev *tgt_dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk, **cur_rblk;
- struct nvm_lun *lun;
+ struct rrpc_addr *p;
+ struct ppa_addr ppa;
u64 paddr;
int gc_force = 0;
+ ppa.ppa = ADDR_EMPTY;
rlun = rrpc_get_lun_rr(rrpc, is_gc);
- lun = rlun->parent;
- if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
- return NULL;
+ if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
+ return ppa;
/*
* page allocation steps:
@@ -652,10 +681,15 @@ new_blk:
}
pr_err("rrpc: failed to allocate new block\n");
- return NULL;
+ return ppa;
done:
spin_unlock(&rlun->lock);
- return rrpc_update_map(rrpc, laddr, rblk, paddr);
+ p = rrpc_update_map(rrpc, laddr, rblk, paddr);
+ if (!p)
+ return ppa;
+
+ /* return global address */
+ return rrpc_ppa_to_gaddr(tgt_dev, p);
}
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -675,21 +709,70 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
}
+static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
+{
+ struct rrpc_lun *rlun = NULL;
+ int i;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
+ rrpc->luns[i].bppa.g.lun == p.g.lun) {
+ rlun = &rrpc->luns[i];
+ break;
+ }
+ }
+
+ return rlun;
+}
+
+static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
+
+ rlun = rrpc_ppa_to_lun(rrpc, ppa);
+ rblk = &rlun->blocks[ppa.g.blk];
+ rblk->state = NVM_BLK_ST_BAD;
+
+ nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+ void *comp_bits = &rqd->ppa_status;
+ struct ppa_addr ppa, prev_ppa;
+ int nr_ppas = rqd->nr_ppas;
+ int bit;
+
+ if (rqd->nr_ppas == 1)
+ __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
+
+ ppa_set_empty(&prev_ppa);
+ bit = -1;
+ while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
+ ppa = rqd->ppa_list[bit];
+ if (ppa_cmp_blk(ppa, prev_ppa))
+ continue;
+
+ __rrpc_mark_bad_block(rrpc, ppa);
+ }
+}
+
static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
sector_t laddr, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *p;
struct rrpc_block *rblk;
- struct nvm_lun *lun;
int cmnt_size, i;
for (i = 0; i < npages; i++) {
p = &rrpc->trans_map[laddr + i];
rblk = p->rblk;
- lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
+ if (unlikely(cmnt_size == dev->geo.sec_per_blk))
rrpc_run_gc(rrpc, rblk);
}
}
@@ -697,12 +780,17 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
static void rrpc_end_io(struct nvm_rq *rqd)
{
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
uint8_t npages = rqd->nr_ppas;
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
- if (bio_data_dir(rqd->bio) == WRITE)
+ if (bio_data_dir(rqd->bio) == WRITE) {
+ if (rqd->error == NVM_RSP_ERR_FAILWRITE)
+ rrpc_mark_bad_block(rrpc, rqd);
+
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+ }
bio_put(rqd->bio);
@@ -712,7 +800,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
rrpc_unlock_rq(rrpc, rqd);
if (npages > 1)
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
mempool_free(rqd, rrpc->rq_pool);
}
@@ -720,6 +808,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
struct rrpc_addr *gp;
sector_t laddr = rrpc_get_laddr(bio);
@@ -727,7 +816,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
@@ -737,12 +826,11 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
gp = &rrpc->trans_map[laddr + i];
if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- gp->addr);
+ rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
return NVM_IO_DONE;
}
@@ -756,7 +844,6 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
struct rrpc_addr *gp;
@@ -768,7 +855,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
gp = &rrpc->trans_map[laddr];
if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
+ rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
@@ -776,7 +863,6 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
}
rqd->opcode = NVM_OP_HBREAD;
- rrqd->addr = gp;
return NVM_IO_OK;
}
@@ -784,31 +870,31 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
sector_t laddr = rrpc_get_laddr(bio);
int is_gc = flags & NVM_IOTYPE_GC;
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
for (i = 0; i < npages; i++) {
/* We assume that mapping occurs at 4KB granularity */
p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- p->addr);
+ rqd->ppa_list[i] = p;
}
rqd->opcode = NVM_OP_HBWRITE;
@@ -819,8 +905,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
@@ -828,16 +913,15 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_REQUEUE;
p = rrpc_map_page(rrpc, laddr, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
+ rqd->ppa_addr = p;
rqd->opcode = NVM_OP_HBWRITE;
- rrqd->addr = p;
return NVM_IO_OK;
}
@@ -845,8 +929,10 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
+ rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("rrpc: not able to allocate ppa list\n");
@@ -869,14 +955,15 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- int err;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
uint8_t nr_pages = rrpc_get_pages(bio);
int bio_size = bio_sectors(bio) << 9;
+ int err;
- if (bio_size < rrpc->dev->sec_size)
+ if (bio_size < dev->geo.sec_size)
return NVM_IO_ERR;
- else if (bio_size > rrpc->dev->max_rq_size)
+ else if (bio_size > dev->geo.max_rq_size)
return NVM_IO_ERR;
err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
@@ -889,15 +976,15 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
rqd->nr_ppas = nr_pages;
rrq->flags = flags;
- err = nvm_submit_io(rrpc->dev, rqd);
+ err = nvm_submit_io(dev, rqd);
if (err) {
pr_err("rrpc: I/O submission failed: %d\n", err);
bio_put(bio);
if (!(flags & NVM_IOTYPE_GC)) {
rrpc_unlock_rq(rrpc, rqd);
if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(rrpc->dev,
- rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
+ rqd->dma_ppa_list);
}
return NVM_IO_ERR;
}
@@ -911,6 +998,8 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
struct nvm_rq *rqd;
int err;
+ blk_queue_split(q, &bio, q->bio_split);
+
if (bio_op(bio) == REQ_OP_DISCARD) {
rrpc_discard(rrpc, bio);
return BLK_QC_T_NONE;
@@ -997,25 +1086,24 @@ static void rrpc_map_free(struct rrpc *rrpc)
static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *addr = rrpc->trans_map + slba;
struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- u64 elba = slba + nlb;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
u64 i;
- if (unlikely(elba > dev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
-
for (i = 0; i < nlb; i++) {
+ struct ppa_addr gaddr;
u64 pba = le64_to_cpu(entries[i]);
unsigned int mod;
+
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("nvm: L2P data entry is out of bounds!\n");
+ pr_err("nvm: Maybe loaded an old target L2P\n");
return -EINVAL;
}
@@ -1028,7 +1116,27 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
div_u64_rem(pba, rrpc->nr_sects, &mod);
+ gaddr = rrpc_recov_addr(dev, pba);
+ rlun = rrpc_ppa_to_lun(rrpc, gaddr);
+ if (!rlun) {
+ pr_err("rrpc: l2p corruption on lba %llu\n",
+ slba + i);
+ return -EINVAL;
+ }
+
+ rblk = &rlun->blocks[gaddr.g.blk];
+ if (!rblk->state) {
+ /* at this point, we don't know anything about the
+ * block. It's up to the FTL on top to re-etablish the
+ * block state. The block is assumed to be open.
+ */
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+ }
+
addr[i].addr = pba;
+ addr[i].rblk = rblk;
raddr[mod].addr = slba + i;
}
@@ -1037,7 +1145,7 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
static int rrpc_map_init(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t i;
int ret;
@@ -1058,12 +1166,9 @@ static int rrpc_map_init(struct rrpc *rrpc)
r->addr = ADDR_EMPTY;
}
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
+ ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+ rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1102,7 +1207,7 @@ static int rrpc_core_init(struct rrpc *rrpc)
if (!rrpc->page_pool)
return -ENOMEM;
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
+ rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
rrpc_gcb_cache);
if (!rrpc->gcb_pool)
return -ENOMEM;
@@ -1126,8 +1231,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvm_lun *lun;
struct rrpc_lun *rlun;
int i;
@@ -1136,23 +1239,74 @@ static void rrpc_luns_free(struct rrpc *rrpc)
for (i = 0; i < rrpc->nr_luns; i++) {
rlun = &rrpc->luns[i];
- lun = rlun->parent;
- if (!lun)
- break;
- dev->mt->release_lun(dev, lun->id);
vfree(rlun->blocks);
}
kfree(rrpc->luns);
}
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct rrpc_block *rblk;
+ struct ppa_addr ppa;
+ u8 *blks;
+ int nr_blks;
+ int i;
+ int ret;
+
+ if (!dev->parent->ops->get_bb_tbl)
+ return 0;
+
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
+
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+
+ ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
+ if (ret) {
+ pr_err("rrpc: could not get BB table\n");
+ goto out;
+ }
+
+ nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_FREE)
+ continue;
+
+ rblk = &rlun->blocks[i];
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ rlun->nr_free_blocks--;
+ }
+
+out:
+ kfree(blks);
+ return ret;
+}
+
+static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
+{
+ rlun->bppa.ppa = 0;
+ rlun->bppa.g.ch = ppa.g.ch;
+ rlun->bppa.g.lun = ppa.g.lun;
+}
+
+static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc_lun *rlun;
int i, j, ret = -EINVAL;
- if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
@@ -1166,43 +1320,46 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- int lunid = lun_begin + i;
- struct nvm_lun *lun;
-
- if (dev->mt->reserve_lun(dev, lunid)) {
- pr_err("rrpc: lun %u is already allocated\n", lunid);
- goto err;
- }
-
- lun = dev->mt->get_lun(dev, lunid);
- if (!lun)
- goto err;
-
rlun = &rrpc->luns[i];
- rlun->parent = lun;
+ rlun->id = i;
+ rrpc_set_lun_ppa(rlun, luns[i]);
rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- rrpc->dev->blks_per_lun);
+ geo->blks_per_lun);
if (!rlun->blocks) {
ret = -ENOMEM;
goto err;
}
- for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
+ INIT_LIST_HEAD(&rlun->free_list);
+ INIT_LIST_HEAD(&rlun->used_list);
+ INIT_LIST_HEAD(&rlun->bb_list);
+
+ for (j = 0; j < geo->blks_per_lun; j++) {
struct rrpc_block *rblk = &rlun->blocks[j];
- struct nvm_block *blk = &lun->blocks[j];
- rblk->parent = blk;
+ rblk->id = j;
rblk->rlun = rlun;
+ rblk->state = NVM_BLK_T_FREE;
INIT_LIST_HEAD(&rblk->prio);
+ INIT_LIST_HEAD(&rblk->list);
spin_lock_init(&rblk->lock);
+
+ list_add_tail(&rblk->list, &rlun->free_list);
}
rlun->rrpc = rrpc;
+ rlun->nr_free_blocks = geo->blks_per_lun;
+ rlun->reserved_blocks = 2; /* for GC only */
+
INIT_LIST_HEAD(&rlun->prio_list);
INIT_LIST_HEAD(&rlun->wblk_list);
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
+
+ if (rrpc_bb_discovery(dev, rlun))
+ goto err;
+
}
return 0;
@@ -1213,27 +1370,25 @@ err:
/* returns 0 on success and stores the beginning address in *begin */
static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t size = rrpc->nr_sects * dev->sec_size;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t size = rrpc->nr_sects * dev->geo.sec_size;
int ret;
size >>= 9;
- ret = mt->get_area(dev, begin, size);
+ ret = nvm_get_area(dev, begin, size);
if (!ret)
- *begin >>= (ilog2(dev->sec_size) - 9);
+ *begin >>= (ilog2(dev->geo.sec_size) - 9);
return ret;
}
static void rrpc_area_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
- mt->put_area(dev, begin);
+ nvm_put_area(dev, begin);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1262,11 +1417,11 @@ static void rrpc_exit(void *private)
static sector_t rrpc_capacity(void *private)
{
struct rrpc *rrpc = private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t reserved, provisioned;
/* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
+ reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
provisioned = rrpc->nr_sects - reserved;
if (reserved > rrpc->nr_sects) {
@@ -1285,13 +1440,13 @@ static sector_t rrpc_capacity(void *private)
*/
static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
u64 bpaddr, paddr, pladdr;
bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->sec_per_blk; offset++) {
+ for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
paddr = bpaddr + offset;
pladdr = rrpc->rev_trans_map[paddr].addr;
@@ -1311,6 +1466,7 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
static int rrpc_blocks_init(struct rrpc *rrpc)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk;
int lun_iter, blk_iter;
@@ -1318,7 +1474,7 @@ static int rrpc_blocks_init(struct rrpc *rrpc)
for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
rlun = &rrpc->luns[lun_iter];
- for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
+ for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
blk_iter++) {
rblk = &rlun->blocks[blk_iter];
rrpc_block_map_update(rrpc, rblk);
@@ -1357,11 +1513,11 @@ err:
static struct nvm_tgt_type tt_rrpc;
-static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
- int lun_begin, int lun_end)
+static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
{
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc *rrpc;
sector_t soffset;
int ret;
@@ -1384,9 +1540,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
spin_lock_init(&rrpc->bio_lock);
INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
- rrpc->nr_luns = lun_end - lun_begin + 1;
- rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
- rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
+ rrpc->nr_luns = geo->nr_luns;
+ rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
@@ -1398,15 +1553,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
}
rrpc->soffset = soffset;
- ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+ ret = rrpc_luns_init(rrpc, dev->luns);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
goto err;
}
- rrpc->poffset = dev->sec_per_lun * lun_begin;
- rrpc->lun_offset = lun_begin;
-
ret = rrpc_core_init(rrpc);
if (ret) {
pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 5e87d52cb983..94e4d73116b2 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -48,14 +48,15 @@ struct rrpc_inflight_rq {
struct rrpc_rq {
struct rrpc_inflight_rq inflight_rq;
- struct rrpc_addr *addr;
unsigned long flags;
};
struct rrpc_block {
- struct nvm_block *parent;
+ int id; /* id inside of LUN */
struct rrpc_lun *rlun;
- struct list_head prio;
+
+ struct list_head prio; /* LUN CG list */
+ struct list_head list; /* LUN free, used, bb list */
#define MAX_INVALID_PAGES_STORAGE 8
/* Bitmap for invalid page intries */
@@ -65,21 +66,38 @@ struct rrpc_block {
/* number of pages that are invalid, wrt host page size */
unsigned int nr_invalid_pages;
+ int state;
+
spinlock_t lock;
atomic_t data_cmnt_size; /* data pages committed to stable storage */
};
struct rrpc_lun {
struct rrpc *rrpc;
- struct nvm_lun *parent;
+
+ int id;
+ struct ppa_addr bppa;
+
struct rrpc_block *cur, *gc_cur;
struct rrpc_block *blocks; /* Reference to block allocation */
struct list_head prio_list; /* Blocks that may be GC'ed */
struct list_head wblk_list; /* Queued blocks to be written to */
+ /* lun block lists */
+ struct list_head used_list; /* In-use blocks */
+ struct list_head free_list; /* Not used blocks i.e. released
+ * and ready for use
+ */
+ struct list_head bb_list; /* Bad blocks. Mutually exclusive with
+ * free_list and used_list
+ */
+ unsigned int nr_free_blocks; /* Number of unused blocks */
+
struct work_struct ws_gc;
+ int reserved_blocks;
+
spinlock_t lock;
};
@@ -87,19 +105,16 @@ struct rrpc {
/* instance must be kept in top to resolve rrpc in unprep */
struct nvm_tgt_instance instance;
- struct nvm_dev *dev;
+ struct nvm_tgt_dev *dev;
struct gendisk *disk;
sector_t soffset; /* logical sector offset */
- u64 poffset; /* physical page offset */
- int lun_offset;
int nr_luns;
struct rrpc_lun *luns;
/* calculated values */
unsigned long long nr_sects;
- unsigned long total_blocks;
/* Write strategy variables. Move these into each for structure for each
* strategy
@@ -150,13 +165,37 @@ struct rrpc_rev_addr {
u64 addr;
};
-static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
- int blk_id)
+static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
+ struct ppa_addr r)
+{
+ struct ppa_addr l;
+ int secs, pgs;
+ sector_t ppa = r.ppa;
+
+ l.ppa = 0;
+
+ div_u64_rem(ppa, geo->sec_per_pg, &secs);
+ l.g.sec = secs;
+
+ sector_div(ppa, geo->sec_per_pg);
+ div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
+ l.g.pg = pgs;
+
+ return l;
+}
+
+static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
+{
+ return linear_to_generic_addr(&dev->geo, pba);
+}
+
+static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct rrpc *rrpc = rlun->rrpc;
- int lun_blk = blk_id % rrpc->dev->blks_per_lun;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct rrpc_lun *rlun = rblk->rlun;
- return &rlun->blocks[lun_blk];
+ return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
}
static inline sector_t rrpc_get_laddr(struct bio *bio)
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index a75bd28aaca3..12002bf4efc2 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -62,7 +62,8 @@ static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
{
- int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
+ struct nvm_geo *geo = &dev->geo;
+ int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
int i;
for (i = 0; i < nr_rows; i++)
@@ -71,7 +72,7 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
/* if possible, place sysblk at first channel, middle channel and last
* channel of the device. If not, create only one or two sys blocks
*/
- switch (dev->nr_chnls) {
+ switch (geo->nr_chnls) {
case 2:
sysblk_ppas[1].g.ch = 1;
/* fall-through */
@@ -80,8 +81,8 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
break;
default:
sysblk_ppas[0].g.ch = 0;
- sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
- sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
+ sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
+ sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
break;
}
@@ -162,11 +163,12 @@ static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
struct ppa_addr *ppas, int get_free)
{
+ struct nvm_geo *geo = &dev->geo;
int i, nr_blks, ret = 0;
u8 *blks;
s->nr_ppas = 0;
- nr_blks = dev->blks_per_lun * dev->plane_mode;
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
@@ -210,13 +212,14 @@ err_get:
static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
struct nvm_system_block *sblk)
{
+ struct nvm_geo *geo = &dev->geo;
struct nvm_system_block *cur;
int pg, ret, found = 0;
/* the full buffer for a flash page is allocated. Only the first of it
* contains the system block information
*/
- cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
+ cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
if (!cur)
return -ENOMEM;
@@ -225,7 +228,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
ppa->g.pg = ppa_to_slc(dev, pg);
ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
- cur, dev->pfpg_size);
+ cur, geo->pfpg_size);
if (ret) {
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -267,34 +270,16 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
return found;
}
-static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
+static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
+ int type)
{
- struct nvm_rq rqd;
- int ret;
-
- if (s->nr_ppas > dev->ops->max_phys_sect) {
- pr_err("nvm: unable to update all sysblocks atomically\n");
- return -EINVAL;
- }
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
- nvm_generic_to_addr_mode(dev, &rqd);
-
- ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
- nvm_free_rqd_ppalist(dev, &rqd);
- if (ret) {
- pr_err("nvm: sysblk failed bb mark\n");
- return -EINVAL;
- }
-
- return 0;
+ return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
}
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
struct sysblk_scan *s)
{
+ struct nvm_geo *geo = &dev->geo;
struct nvm_system_block nvmsb;
void *buf;
int i, sect, ret = 0;
@@ -302,12 +287,12 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
nvm_cpu_to_sysblk(&nvmsb, info);
- buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
+ buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
- ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
+ ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas) {
ret = -ENOMEM;
goto err;
@@ -324,15 +309,15 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
ppas[0].g.pg);
/* Expand to all sectors within a flash page */
- if (dev->sec_per_pg > 1) {
- for (sect = 1; sect < dev->sec_per_pg; sect++) {
+ if (geo->sec_per_pg > 1) {
+ for (sect = 1; sect < geo->sec_per_pg; sect++) {
ppas[sect].ppa = ppas[0].ppa;
ppas[sect].g.sec = sect;
}
}
- ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
- NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+ ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
+ NVM_IO_SLC_MODE, buf, geo->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed program (%u %u %u)\n",
ppas[0].g.ch,
@@ -341,8 +326,8 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
break;
}
- ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
- NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+ ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
+ NVM_IO_SLC_MODE, buf, geo->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed read (%u %u %u)\n",
ppas[0].g.ch,
@@ -379,7 +364,7 @@ static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
ppa->g.pg = ppa_to_slc(dev, 0);
- ret = nvm_erase_ppa(dev, ppa, 1);
+ ret = nvm_erase_ppa(dev, ppa, 1, 0);
if (ret)
return ret;
@@ -546,6 +531,7 @@ err_sysblk:
int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
int ret;
@@ -560,7 +546,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
return -EINVAL;
- if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
+ if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
pr_err("nvm: memory does not support SLC access\n");
return -EINVAL;
}
@@ -573,7 +559,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
if (ret)
goto err_mark;
- ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
+ ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
if (ret)
goto err_mark;
@@ -590,11 +576,11 @@ static int factory_nblks(int nblks)
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
}
-static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
+static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
{
- int nblks = factory_nblks(dev->blks_per_lun);
+ int nblks = factory_nblks(geo->blks_per_lun);
- return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
+ return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
BITS_PER_LONG;
}
@@ -608,7 +594,7 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
if (nr_blks < 0)
return nr_blks;
- lunoff = factory_blk_offset(dev, ppa);
+ lunoff = factory_blk_offset(&dev->geo, ppa);
/* non-set bits correspond to the block must be erased */
for (i = 0; i < nr_blks; i++) {
@@ -637,19 +623,19 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
int max_ppas, unsigned long *blk_bitmap)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr ppa;
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
unsigned long *offset;
while (!done) {
done = 1;
- nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
- idx = factory_blk_offset(dev, ppa);
+ nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
+ idx = factory_blk_offset(geo, ppa);
offset = &blk_bitmap[idx];
- blkid = find_first_zero_bit(offset,
- dev->blks_per_lun);
- if (blkid >= dev->blks_per_lun)
+ blkid = find_first_zero_bit(offset, geo->blks_per_lun);
+ if (blkid >= geo->blks_per_lun)
continue;
set_bit(blkid, offset);
@@ -674,16 +660,17 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
int flags)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr ppa;
int ch, lun, nr_blks, ret = 0;
u8 *blks;
- nr_blks = dev->blks_per_lun * dev->plane_mode;
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
- nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
ret = nvm_get_bb_tbl(dev, ppa, blks);
if (ret)
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
@@ -701,14 +688,15 @@ static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
int nvm_dev_factory(struct nvm_dev *dev, int flags)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr *ppas;
int ppa_cnt, ret = -ENOMEM;
- int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
+ int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
unsigned long *blk_bitmap;
- blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+ blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
GFP_KERNEL);
if (!blk_bitmap)
return ret;
@@ -725,7 +713,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
/* continue to erase until list of blks until empty */
while ((ppa_cnt =
nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
- nvm_erase_ppa(dev, ppas, ppa_cnt);
+ nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
/* mark host reserved blocks free */
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
@@ -733,7 +721,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
mutex_lock(&dev->mlock);
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (!ret)
- ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
+ ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
mutex_unlock(&dev->mlock);
}
err_ppas:
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
deleted file mode 100644
index 0338c27ab95a..000000000000
--- a/drivers/lightnvm/sysfs.c
+++ /dev/null
@@ -1,198 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/lightnvm.h>
-#include <linux/miscdevice.h>
-#include <linux/kobject.h>
-#include <linux/blk-mq.h>
-
-#include "lightnvm.h"
-
-static ssize_t nvm_dev_attr_show(struct device *dev,
- struct device_attribute *dattr, char *page)
-{
- struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev);
- struct nvm_id *id = &ndev->identity;
- struct nvm_id_group *grp = &id->groups[0];
- struct attribute *attr = &dattr->attr;
-
- if (strcmp(attr->name, "version") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
- } else if (strcmp(attr->name, "vendor_opcode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
- } else if (strcmp(attr->name, "capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
- } else if (strcmp(attr->name, "device_mode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
- } else if (strcmp(attr->name, "media_manager") == 0) {
- if (!ndev->mt)
- return scnprintf(page, PAGE_SIZE, "%s\n", "none");
- return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
- } else if (strcmp(attr->name, "ppa_format") == 0) {
- return scnprintf(page, PAGE_SIZE,
- "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- id->ppaf.ch_offset, id->ppaf.ch_len,
- id->ppaf.lun_offset, id->ppaf.lun_len,
- id->ppaf.pln_offset, id->ppaf.pln_len,
- id->ppaf.blk_offset, id->ppaf.blk_len,
- id->ppaf.pg_offset, id->ppaf.pg_len,
- id->ppaf.sect_offset, id->ppaf.sect_len);
- } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
- } else if (strcmp(attr->name, "flash_media_type") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
- } else if (strcmp(attr->name, "num_channels") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
- } else if (strcmp(attr->name, "num_luns") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
- } else if (strcmp(attr->name, "num_planes") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
- } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
- } else if (strcmp(attr->name, "num_pages") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
- } else if (strcmp(attr->name, "page_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
- } else if (strcmp(attr->name, "hw_sector_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
- } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
- } else if (strcmp(attr->name, "read_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
- } else if (strcmp(attr->name, "read_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
- } else if (strcmp(attr->name, "prog_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
- } else if (strcmp(attr->name, "prog_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
- } else if (strcmp(attr->name, "erase_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
- } else if (strcmp(attr->name, "erase_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
- } else if (strcmp(attr->name, "multiplane_modes") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
- } else if (strcmp(attr->name, "media_capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
- } else if (strcmp(attr->name, "max_phys_secs") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n",
- ndev->ops->max_phys_sect);
- } else {
- return scnprintf(page,
- PAGE_SIZE,
- "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
- attr->name);
- }
-}
-
-#define NVM_DEV_ATTR_RO(_name) \
- DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
-
-static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(vendor_opcode);
-static NVM_DEV_ATTR_RO(capabilities);
-static NVM_DEV_ATTR_RO(device_mode);
-static NVM_DEV_ATTR_RO(ppa_format);
-static NVM_DEV_ATTR_RO(media_manager);
-
-static NVM_DEV_ATTR_RO(media_type);
-static NVM_DEV_ATTR_RO(flash_media_type);
-static NVM_DEV_ATTR_RO(num_channels);
-static NVM_DEV_ATTR_RO(num_luns);
-static NVM_DEV_ATTR_RO(num_planes);
-static NVM_DEV_ATTR_RO(num_blocks);
-static NVM_DEV_ATTR_RO(num_pages);
-static NVM_DEV_ATTR_RO(page_size);
-static NVM_DEV_ATTR_RO(hw_sector_size);
-static NVM_DEV_ATTR_RO(oob_sector_size);
-static NVM_DEV_ATTR_RO(read_typ);
-static NVM_DEV_ATTR_RO(read_max);
-static NVM_DEV_ATTR_RO(prog_typ);
-static NVM_DEV_ATTR_RO(prog_max);
-static NVM_DEV_ATTR_RO(erase_typ);
-static NVM_DEV_ATTR_RO(erase_max);
-static NVM_DEV_ATTR_RO(multiplane_modes);
-static NVM_DEV_ATTR_RO(media_capabilities);
-static NVM_DEV_ATTR_RO(max_phys_secs);
-
-#define NVM_DEV_ATTR(_name) (dev_attr_##_name##)
-
-static struct attribute *nvm_dev_attrs[] = {
- &dev_attr_version.attr,
- &dev_attr_vendor_opcode.attr,
- &dev_attr_capabilities.attr,
- &dev_attr_device_mode.attr,
- &dev_attr_media_manager.attr,
-
- &dev_attr_ppa_format.attr,
- &dev_attr_media_type.attr,
- &dev_attr_flash_media_type.attr,
- &dev_attr_num_channels.attr,
- &dev_attr_num_luns.attr,
- &dev_attr_num_planes.attr,
- &dev_attr_num_blocks.attr,
- &dev_attr_num_pages.attr,
- &dev_attr_page_size.attr,
- &dev_attr_hw_sector_size.attr,
- &dev_attr_oob_sector_size.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
- &dev_attr_prog_typ.attr,
- &dev_attr_prog_max.attr,
- &dev_attr_erase_typ.attr,
- &dev_attr_erase_max.attr,
- &dev_attr_multiplane_modes.attr,
- &dev_attr_media_capabilities.attr,
- &dev_attr_max_phys_secs.attr,
- NULL,
-};
-
-static struct attribute_group nvm_dev_attr_group = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs,
-};
-
-static const struct attribute_group *nvm_dev_attr_groups[] = {
- &nvm_dev_attr_group,
- NULL,
-};
-
-static void nvm_dev_release(struct device *device)
-{
- struct nvm_dev *dev = container_of(device, struct nvm_dev, dev);
- struct request_queue *q = dev->q;
-
- pr_debug("nvm/sysfs: `nvm_dev_release`\n");
-
- blk_mq_unregister_dev(device, q);
-
- nvm_free(dev);
-}
-
-static struct device_type nvm_type = {
- .name = "lightnvm",
- .groups = nvm_dev_attr_groups,
- .release = nvm_dev_release,
-};
-
-int nvm_sysfs_register_dev(struct nvm_dev *dev)
-{
- int ret;
-
- if (!dev->parent_dev)
- return 0;
-
- dev->dev.parent = dev->parent_dev;
- dev_set_name(&dev->dev, "%s", dev->name);
- dev->dev.type = &nvm_type;
- device_initialize(&dev->dev);
- ret = device_add(&dev->dev);
-
- if (!ret)
- blk_mq_register_dev(&dev->dev, dev->q);
-
- return ret;
-}
-
-void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
-{
- if (dev && dev->parent_dev)
- kobject_put(&dev->dev.kobj);
-}
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d28690f6e262..5d80810934df 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -102,7 +102,6 @@ config ADB_PMU_LED_DISK
bool "Use front LED as DISK LED by default"
depends on ADB_PMU_LED
depends on LEDS_CLASS
- depends on IDE_GD_ATA
select LEDS_TRIGGERS
select LEDS_TRIGGER_DISK
help
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 11eebfe8a4cb..ceff415f201c 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -124,6 +124,15 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config TEGRA_HSP_MBOX
+ bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
+ depends on ARCH_TEGRA_186_SOC
+ help
+ The Tegra HSP driver is used for the interprocessor communication
+ between different remote processors and host processors on Tegra186
+ and later SoCs. Say Y here if you want to have this support.
+ If unsure say N.
+
config XGENE_SLIMPRO_MBOX
tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
depends on ARCH_XGENE
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index ace6fed8fea9..7dde4f609ae8 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -29,3 +29,5 @@ obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
+
+obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index c19dd820ea9b..2aeb034d5fb9 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -60,7 +60,13 @@
#define RING_ENTRY_SIZE sizeof(struct dma64dd)
/* # entries in PDC dma ring */
-#define PDC_RING_ENTRIES 128
+#define PDC_RING_ENTRIES 512
+/*
+ * Minimum number of ring descriptor entries that must be free to tell mailbox
+ * framework that it can submit another request
+ */
+#define PDC_RING_SPACE_MIN 15
+
#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
/* Rings are 8k aligned */
#define RING_ALIGN_ORDER 13
@@ -93,11 +99,9 @@
* Interrupt mask and status definitions. Enable interrupts for tx and rx on
* ring 0
*/
-#define PDC_XMTINT_0 (24 + PDC_RINGSET)
#define PDC_RCVINT_0 (16 + PDC_RINGSET)
-#define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0)
#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
-#define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0)
+#define PDC_INTMASK (PDC_RCVINTEN_0)
#define PDC_LAZY_FRAMECOUNT 1
#define PDC_LAZY_TIMEOUT 10000
#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
@@ -117,15 +121,16 @@
/*
* Sets the following bits for write to transmit control reg:
- * 0 - XmtEn - enable activity on the tx channel
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
-#define PDC_TX_CTL 0x000C0801
+#define PDC_TX_CTL 0x000C0800
+
+/* Bit in tx control reg to enable tx channel */
+#define PDC_TX_ENABLE 0x1
/*
* Sets the following bits for write to receive control reg:
- * 0 - RcvEn - enable activity on the rx channel
* 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
* 9 - SepRxHdrDescEn - place start of new frames only in descriptors
* that have StartOfFrame set
@@ -135,7 +140,10 @@
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
-#define PDC_RX_CTL 0x000C0E01
+#define PDC_RX_CTL 0x000C0E00
+
+/* Bit in rx control reg to enable rx channel */
+#define PDC_RX_ENABLE 0x1
#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
@@ -252,11 +260,29 @@ struct pdc_ring_alloc {
u32 size; /* ring allocation size in bytes */
};
+/*
+ * context associated with a receive descriptor.
+ * @rxp_ctx: opaque context associated with frame that starts at each
+ * rx ring index.
+ * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
+ * index. Retained in order to unmap each sg after reply is processed.
+ * @rxin_numd: Number of rx descriptors associated with the message that starts
+ * at a descriptor index. Not set for every index. For example,
+ * if descriptor index i points to a scatterlist with 4 entries,
+ * then the next three descriptor indexes don't have a value set.
+ * @resp_hdr: Virtual address of buffer used to catch DMA rx status
+ * @resp_hdr_daddr: physical address of DMA rx status buffer
+ */
+struct pdc_rx_ctx {
+ void *rxp_ctx;
+ struct scatterlist *dst_sg;
+ u32 rxin_numd;
+ void *resp_hdr;
+ dma_addr_t resp_hdr_daddr;
+};
+
/* PDC state structure */
struct pdc_state {
- /* synchronize access to this PDC state structure */
- spinlock_t pdc_lock;
-
/* Index of the PDC whose state is in this structure instance */
u8 pdc_idx;
@@ -272,13 +298,8 @@ struct pdc_state {
unsigned int pdc_irq;
- /*
- * Last interrupt status read from PDC device. Saved in interrupt
- * handler so the handler can clear the interrupt in the device,
- * and the interrupt thread called later can know which interrupt
- * bits are active.
- */
- unsigned long intstatus;
+ /* tasklet for deferred processing after DMA rx interrupt */
+ struct tasklet_struct rx_tasklet;
/* Number of bytes of receive status prior to each rx frame */
u32 rx_status_len;
@@ -369,11 +390,7 @@ struct pdc_state {
/* Index of next rx descriptor to post. */
u32 rxout;
- /*
- * opaque context associated with frame that starts at each
- * rx ring index.
- */
- void *rxp_ctx[PDC_RING_ENTRIES];
+ struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
/*
* Scatterlists used to form request and reply frames beginning at a
@@ -381,27 +398,18 @@ struct pdc_state {
* is processed
*/
struct scatterlist *src_sg[PDC_RING_ENTRIES];
- struct scatterlist *dst_sg[PDC_RING_ENTRIES];
-
- /*
- * Number of rx descriptors associated with the message that starts
- * at this descriptor index. Not set for every index. For example,
- * if descriptor index i points to a scatterlist with 4 entries, then
- * the next three descriptor indexes don't have a value set.
- */
- u32 rxin_numd[PDC_RING_ENTRIES];
-
- void *resp_hdr[PDC_RING_ENTRIES];
- dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES];
struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
/* counters */
- u32 pdc_requests; /* number of request messages submitted */
- u32 pdc_replies; /* number of reply messages received */
- u32 txnobuf; /* count of tx ring full */
- u32 rxnobuf; /* count of rx ring full */
- u32 rx_oflow; /* count of rx overflows */
+ u32 pdc_requests; /* number of request messages submitted */
+ u32 pdc_replies; /* number of reply messages received */
+ u32 last_tx_not_done; /* too few tx descriptors to indicate done */
+ u32 tx_ring_full; /* unable to accept msg because tx ring full */
+ u32 rx_ring_full; /* unable to accept msg because rx ring full */
+ u32 txnobuf; /* unable to create tx descriptor */
+ u32 rxnobuf; /* unable to create rx descriptor */
+ u32 rx_oflow; /* count of rx overflows */
};
/* Global variables */
@@ -434,20 +442,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"SPU %u stats:\n", pdcs->pdc_idx);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "PDC requests............%u\n",
+ "PDC requests....................%u\n",
pdcs->pdc_requests);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "PDC responses...........%u\n",
+ "PDC responses...................%u\n",
pdcs->pdc_replies);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Tx err ring full........%u\n",
+ "Tx not done.....................%u\n",
+ pdcs->last_tx_not_done);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx ring full....................%u\n",
+ pdcs->tx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Rx ring full....................%u\n",
+ pdcs->rx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx desc write fail. Ring full...%u\n",
pdcs->txnobuf);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Rx err ring full........%u\n",
+ "Rx desc write fail. Ring full...%u\n",
pdcs->rxnobuf);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Receive overflow........%u\n",
+ "Receive overflow................%u\n",
pdcs->rx_oflow);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Num frags in rx ring............%u\n",
+ NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+ pdcs->nrxpost));
if (out_offset > out_count)
out_offset = out_count;
@@ -480,17 +501,16 @@ static void pdc_setup_debugfs(struct pdc_state *pdcs)
if (!debugfs_dir)
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR,
+ /* S_IRUSR == 0400 */
+ pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400,
debugfs_dir, pdcs,
&pdc_debugfs_stats);
}
static void pdc_free_debugfs(void)
{
- if (debugfs_dir && simple_empty(debugfs_dir)) {
- debugfs_remove_recursive(debugfs_dir);
- debugfs_dir = NULL;
- }
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
}
/**
@@ -505,17 +525,17 @@ pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
u32 buf_len, u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
dev_dbg(dev,
"Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
- iowrite32(lower_32_bits(dma_addr),
- (void *)&pdcs->rxd_64[pdcs->rxout].addrlow);
- iowrite32(upper_32_bits(dma_addr),
- (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh);
- iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1);
- iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2);
+ rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ rxd->ctrl1 = cpu_to_le32(flags);
+ rxd->ctrl2 = cpu_to_le32(buf_len);
+
/* bump ring index and return */
pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
}
@@ -533,53 +553,50 @@ pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
dev_dbg(dev,
"Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
pdcs->pdc_idx, pdcs->txout, buf_len, flags);
- iowrite32(lower_32_bits(dma_addr),
- (void *)&pdcs->txd_64[pdcs->txout].addrlow);
- iowrite32(upper_32_bits(dma_addr),
- (void *)&pdcs->txd_64[pdcs->txout].addrhigh);
- iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1);
- iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2);
+ txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ txd->ctrl1 = cpu_to_le32(flags);
+ txd->ctrl2 = cpu_to_le32(buf_len);
/* bump ring index and return */
pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
}
/**
- * pdc_receive() - Receive a response message from a given SPU.
+ * pdc_receive_one() - Receive a response message from a given SPU.
* @pdcs: PDC state for the SPU to receive from
- * @mssg: mailbox message to be returned to client
*
* When the return code indicates success, the response message is available in
* the receive buffers provided prior to submission of the request.
*
- * Input:
- * pdcs - PDC state structure for the SPU to be polled
- * mssg - mailbox message to be returned to client. This function sets the
- * context pointer on the message to help the client associate the
- * response with a request.
- *
* Return: PDC_SUCCESS if one or more receive descriptors was processed
* -EAGAIN indicates that no response message is available
* -EIO an error occurred
*/
static int
-pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
+pdc_receive_one(struct pdc_state *pdcs)
{
struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ struct mbox_chan *chan;
+ struct brcm_message mssg;
u32 len, rx_status;
u32 num_frags;
- int i;
u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
u32 frags_rdy; /* number of fragments ready to read */
u32 rx_idx; /* ring index of start of receive frame */
dma_addr_t resp_hdr_daddr;
+ struct pdc_rx_ctx *rx_ctx;
- spin_lock(&pdcs->pdc_lock);
+ mbc = &pdcs->mbc;
+ chan = &mbc->chans[0];
+ mssg.type = BRCM_MESSAGE_SPU;
/*
* return if a complete response message is not yet ready.
@@ -587,47 +604,34 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
* to read.
*/
frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
- if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
- /* See if the hw has written more fragments than we know */
- pdcs->last_rx_curr =
- (ioread32((void *)&pdcs->rxregs_64->status0) &
- CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
- frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
- pdcs->nrxpost);
- if ((frags_rdy == 0) ||
- (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
- /* No response ready */
- spin_unlock(&pdcs->pdc_lock);
- return -EAGAIN;
- }
- /* can't read descriptors/data until write index is read */
- rmb();
- }
+ if ((frags_rdy == 0) ||
+ (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
+ /* No response ready */
+ return -EAGAIN;
num_frags = pdcs->txin_numd[pdcs->txin];
+ WARN_ON(num_frags == 0);
+
dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
- for (i = 0; i < num_frags; i++)
- pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost);
+ pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
pdcs->pdc_idx, num_frags);
rx_idx = pdcs->rxin;
- num_frags = pdcs->rxin_numd[rx_idx];
+ rx_ctx = &pdcs->rx_ctx[rx_idx];
+ num_frags = rx_ctx->rxin_numd;
/* Return opaque context with result */
- mssg->ctx = pdcs->rxp_ctx[rx_idx];
- pdcs->rxp_ctx[rx_idx] = NULL;
- resp_hdr = pdcs->resp_hdr[rx_idx];
- resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx];
- dma_unmap_sg(dev, pdcs->dst_sg[rx_idx],
- sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE);
-
- for (i = 0; i < num_frags; i++)
- pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost);
+ mssg.ctx = rx_ctx->rxp_ctx;
+ rx_ctx->rxp_ctx = NULL;
+ resp_hdr = rx_ctx->resp_hdr;
+ resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
+ dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
+ DMA_FROM_DEVICE);
- spin_unlock(&pdcs->pdc_lock);
+ pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
pdcs->pdc_idx, num_frags);
@@ -659,12 +663,35 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
+ mbox_chan_received_data(chan, &mssg);
+
pdcs->pdc_replies++;
- /* if we read one or more rx descriptors, claim success */
- if (num_frags > 0)
- return PDC_SUCCESS;
- else
- return -EIO;
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_receive() - Process as many responses as are available in the rx ring.
+ * @pdcs: PDC state
+ *
+ * Called within the hard IRQ.
+ * Return:
+ */
+static int
+pdc_receive(struct pdc_state *pdcs)
+{
+ int rx_status;
+
+ /* read last_rx_curr from register once */
+ pdcs->last_rx_curr =
+ (ioread32(&pdcs->rxregs_64->status0) &
+ CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+
+ do {
+ /* Could be many frames ready */
+ rx_status = pdc_receive_one(pdcs);
+ } while (rx_status == PDC_SUCCESS);
+
+ return 0;
}
/**
@@ -766,8 +793,8 @@ static int pdc_tx_list_final(struct pdc_state *pdcs)
* before chip starts to process new request
*/
wmb();
- iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
- iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
+ iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
+ iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
pdcs->pdc_requests++;
return PDC_SUCCESS;
@@ -796,6 +823,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
dma_addr_t daddr;
void *vaddr;
+ struct pdc_rx_ctx *rx_ctx;
rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
pdcs->nrxpost);
@@ -806,7 +834,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
/* allocate a buffer for the dma rx status */
vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
- if (!vaddr)
+ if (unlikely(!vaddr))
return -ENOMEM;
/*
@@ -819,15 +847,16 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
/* This is always the first descriptor in the receive sequence */
flags = D64_CTRL1_SOF;
- pdcs->rxin_numd[pdcs->rx_msg_start] = 1;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
flags |= D64_CTRL1_EOT;
- pdcs->rxp_ctx[pdcs->rxout] = ctx;
- pdcs->dst_sg[pdcs->rxout] = dst_sg;
- pdcs->resp_hdr[pdcs->rxout] = vaddr;
- pdcs->resp_hdr_daddr[pdcs->rxout] = daddr;
+ rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
+ rx_ctx->rxp_ctx = ctx;
+ rx_ctx->dst_sg = dst_sg;
+ rx_ctx->resp_hdr = vaddr;
+ rx_ctx->resp_hdr_daddr = daddr;
pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
return PDC_SUCCESS;
}
@@ -895,7 +924,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
desc_w++;
sg = sg_next(sg);
}
- pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
return PDC_SUCCESS;
}
@@ -903,7 +932,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
/**
* pdc_irq_handler() - Interrupt handler called in interrupt context.
* @irq: Interrupt number that has fired
- * @cookie: PDC state for DMA engine that generated the interrupt
+ * @data: device struct for DMA engine that generated the interrupt
*
* We have to clear the device interrupt status flags here. So cache the
* status for later use in the thread function. Other than that, just return
@@ -912,88 +941,39 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
* Return: IRQ_WAKE_THREAD if interrupt is ours
* IRQ_NONE otherwise
*/
-static irqreturn_t pdc_irq_handler(int irq, void *cookie)
+static irqreturn_t pdc_irq_handler(int irq, void *data)
{
- struct pdc_state *pdcs = cookie;
+ struct device *dev = (struct device *)data;
+ struct pdc_state *pdcs = dev_get_drvdata(dev);
u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
- if (intstatus & PDC_XMTINTEN_0)
- set_bit(PDC_XMTINT_0, &pdcs->intstatus);
- if (intstatus & PDC_RCVINTEN_0)
- set_bit(PDC_RCVINT_0, &pdcs->intstatus);
+ if (unlikely(intstatus == 0))
+ return IRQ_NONE;
+
+ /* Disable interrupts until soft handler runs */
+ iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
/* Clear interrupt flags in device */
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
- if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
- return IRQ_WAKE_THREAD;
-
- return IRQ_NONE;
+ tasklet_schedule(&pdcs->rx_tasklet);
+ return IRQ_HANDLED;
}
/**
- * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has
- * completed or data is available to receive.
- * @irq: Interrupt number
- * @cookie: PDC state for PDC that generated the interrupt
- *
- * On DMA tx complete, notify the mailbox client. On DMA rx complete, process
- * as many SPU response messages as are available and send each to the mailbox
- * client.
- *
- * Return: IRQ_HANDLED if we recognized and handled the interrupt
- * IRQ_NONE otherwise
+ * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * a DMA receive interrupt. Reenables the receive interrupt.
+ * @data: PDC state structure
*/
-static irqreturn_t pdc_irq_thread(int irq, void *cookie)
+static void pdc_tasklet_cb(unsigned long data)
{
- struct pdc_state *pdcs = cookie;
- struct mbox_controller *mbc;
- struct mbox_chan *chan;
- bool tx_int;
- bool rx_int;
- int rx_status;
- struct brcm_message mssg;
+ struct pdc_state *pdcs = (struct pdc_state *)data;
- tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus);
- rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
+ pdc_receive(pdcs);
- if (pdcs && (tx_int || rx_int)) {
- dev_dbg(&pdcs->pdev->dev,
- "%s() got irq %d with tx_int %s, rx_int %s",
- __func__, irq,
- tx_int ? "set" : "clear", rx_int ? "set" : "clear");
-
- mbc = &pdcs->mbc;
- chan = &mbc->chans[0];
-
- if (tx_int) {
- dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__);
- /* only one frame in flight at a time */
- mbox_chan_txdone(chan, PDC_SUCCESS);
- }
- if (rx_int) {
- while (1) {
- /* Could be many frames ready */
- memset(&mssg, 0, sizeof(mssg));
- mssg.type = BRCM_MESSAGE_SPU;
- rx_status = pdc_receive(pdcs, &mssg);
- if (rx_status >= 0) {
- dev_dbg(&pdcs->pdev->dev,
- "%s(): invoking client rx cb",
- __func__);
- mbox_chan_received_data(chan, &mssg);
- } else {
- dev_dbg(&pdcs->pdev->dev,
- "%s(): no SPU response available",
- __func__);
- break;
- }
- }
- }
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ /* reenable interrupts */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
}
/**
@@ -1016,14 +996,14 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
/* Allocate tx ring */
tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
- if (!tx.vbase) {
+ if (unlikely(!tx.vbase)) {
err = -ENOMEM;
goto done;
}
/* Allocate rx ring */
rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
- if (!rx.vbase) {
+ if (unlikely(!rx.vbase)) {
err = -ENOMEM;
goto fail_dealloc;
}
@@ -1033,9 +1013,6 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
- /* lock after ring allocation to avoid scheduling while atomic */
- spin_lock(&pdcs->pdc_lock);
-
memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
@@ -1053,40 +1030,52 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
/* Tell device the base DMA address of each ring */
dma_reg = &pdcs->regs->dmaregs[ringset];
+
+ /* But first disable DMA and set curptr to 0 for both TX & RX */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
+
+ /* Set base DMA addresses */
iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
- (void *)&dma_reg->dmaxmt.addrlow);
+ &dma_reg->dmaxmt.addrlow);
iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
- (void *)&dma_reg->dmaxmt.addrhigh);
+ &dma_reg->dmaxmt.addrhigh);
iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
- (void *)&dma_reg->dmarcv.addrlow);
+ &dma_reg->dmarcv.addrlow);
iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
- (void *)&dma_reg->dmarcv.addrhigh);
+ &dma_reg->dmarcv.addrhigh);
+
+ /* Re-enable DMA */
+ iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
/* Initialize descriptors */
for (i = 0; i < PDC_RING_ENTRIES; i++) {
/* Every tx descriptor can be used for start of frame. */
if (i != pdcs->ntxpost) {
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
- (void *)&pdcs->txd_64[i].ctrl1);
+ &pdcs->txd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
- D64_CTRL1_EOT,
- (void *)&pdcs->txd_64[i].ctrl1);
+ D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
}
/* Every rx descriptor can be used for start of frame */
if (i != pdcs->nrxpost) {
iowrite32(D64_CTRL1_SOF,
- (void *)&pdcs->rxd_64[i].ctrl1);
+ &pdcs->rxd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
- (void *)&pdcs->rxd_64[i].ctrl1);
+ &pdcs->rxd_64[i].ctrl1);
}
}
- spin_unlock(&pdcs->pdc_lock);
return PDC_SUCCESS;
fail_dealloc:
@@ -1111,6 +1100,80 @@ static void pdc_ring_free(struct pdc_state *pdcs)
}
/**
+ * pdc_desc_count() - Count the number of DMA descriptors that will be required
+ * for a given scatterlist. Account for the max length of a DMA buffer.
+ * @sg: Scatterlist to be DMA'd
+ * Return: Number of descriptors required
+ */
+static u32 pdc_desc_count(struct scatterlist *sg)
+{
+ u32 cnt = 0;
+
+ while (sg) {
+ cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
+ sg = sg_next(sg);
+ }
+ return cnt;
+}
+
+/**
+ * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
+ * and the rx ring has room for rx_cnt descriptors.
+ * @pdcs: PDC state
+ * @tx_cnt: The number of descriptors required in the tx ring
+ * @rx_cnt: The number of descriptors required i the rx ring
+ *
+ * Return: true if one of the rings does not have enough space
+ * false if sufficient space is available in both rings
+ */
+static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
+{
+ u32 rx_avail;
+ u32 tx_avail;
+ bool full = false;
+
+ /* Check if the tx and rx rings are likely to have enough space */
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_cnt > rx_avail)) {
+ pdcs->rx_ring_full++;
+ full = true;
+ }
+
+ if (likely(!full)) {
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(tx_cnt > tx_avail)) {
+ pdcs->tx_ring_full++;
+ full = true;
+ }
+ }
+ return full;
+}
+
+/**
+ * pdc_last_tx_done() - If both the tx and rx rings have at least
+ * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
+ * framework can submit another message.
+ * @chan: mailbox channel to check
+ * Return: true if PDC can accept another message on this channel
+ */
+static bool pdc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ bool ret;
+
+ if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
+ PDC_RING_SPACE_MIN))) {
+ pdcs->last_tx_not_done++;
+ ret = false;
+ } else {
+ ret = true;
+ }
+ return ret;
+}
+
+/**
* pdc_send_data() - mailbox send_data function
* @chan: The mailbox channel on which the data is sent. The channel
* corresponds to a DMA ringset.
@@ -1141,29 +1204,43 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
int src_nent;
int dst_nent;
int nent;
+ u32 tx_desc_req;
+ u32 rx_desc_req;
- if (mssg->type != BRCM_MESSAGE_SPU)
+ if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
return -ENOTSUPP;
src_nent = sg_nents(mssg->spu.src);
- if (src_nent) {
+ if (likely(src_nent)) {
nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
- if (nent == 0)
+ if (unlikely(nent == 0))
return -EIO;
}
dst_nent = sg_nents(mssg->spu.dst);
- if (dst_nent) {
+ if (likely(dst_nent)) {
nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
DMA_FROM_DEVICE);
- if (nent == 0) {
+ if (unlikely(nent == 0)) {
dma_unmap_sg(dev, mssg->spu.src, src_nent,
DMA_TO_DEVICE);
return -EIO;
}
}
- spin_lock(&pdcs->pdc_lock);
+ /*
+ * Check if the tx and rx rings have enough space. Do this prior to
+ * writing any tx or rx descriptors. Need to ensure that we do not write
+ * a partial set of descriptors, or write just rx descriptors but
+ * corresponding tx descriptors don't fit. Note that we want this check
+ * and the entire sequence of descriptor to happen without another
+ * thread getting in. The channel spin lock in the mailbox framework
+ * ensures this.
+ */
+ tx_desc_req = pdc_desc_count(mssg->spu.src);
+ rx_desc_req = pdc_desc_count(mssg->spu.dst);
+ if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
+ return -ENOSPC;
/* Create rx descriptors to SPU catch response */
err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
@@ -1173,9 +1250,7 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
err |= pdc_tx_list_final(pdcs); /* initiate transfer */
- spin_unlock(&pdcs->pdc_lock);
-
- if (err)
+ if (unlikely(err))
dev_err(&pdcs->pdev->dev,
"%s failed with error %d", __func__, err);
@@ -1224,26 +1299,29 @@ void pdc_hw_init(struct pdc_state *pdcs)
/* initialize data structures */
pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
pdcs->txregs_64 = (struct dma64_regs *)
- (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ (((u8 *)pdcs->pdc_reg_vbase) +
PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->rxregs_64 = (struct dma64_regs *)
- (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ (((u8 *)pdcs->pdc_reg_vbase) +
PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->ntxd = PDC_RING_ENTRIES;
pdcs->nrxd = PDC_RING_ENTRIES;
pdcs->ntxpost = PDC_RING_ENTRIES - 1;
pdcs->nrxpost = PDC_RING_ENTRIES - 1;
- pdcs->regs->intmask = 0;
+ iowrite32(0, &pdcs->regs->intmask);
dma_reg = &pdcs->regs->dmaregs[ringset];
- iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
- iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
- iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control);
+ /* Configure DMA but will enable later in pdc_ring_init() */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
- (void *)&dma_reg->dmarcv.control);
+ &dma_reg->dmarcv.control);
+
+ /* Reset current index pointers after making sure DMA is disabled */
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
iowrite32(PDC_CKSUM_CTRL,
@@ -1251,6 +1329,21 @@ void pdc_hw_init(struct pdc_state *pdcs)
}
/**
+ * pdc_hw_disable() - Disable the tx and rx control in the hw.
+ * @pdcs: PDC state structure
+ *
+ */
+static void pdc_hw_disable(struct pdc_state *pdcs)
+{
+ struct dma64 *dma_reg;
+
+ dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ &dma_reg->dmarcv.control);
+}
+
+/**
* pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
* header returned with each response message.
* @pdcs: PDC state structure
@@ -1301,8 +1394,6 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
struct device_node *dn = pdev->dev.of_node;
int err;
- pdcs->intstatus = 0;
-
/* interrupt configuration */
iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
@@ -1311,11 +1402,11 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
dev_name(dev), pdcs->pdc_irq, pdcs);
- err = devm_request_threaded_irq(dev, pdcs->pdc_irq,
- pdc_irq_handler,
- pdc_irq_thread, 0, dev_name(dev), pdcs);
+
+ err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
+ dev_name(dev), dev);
if (err) {
- dev_err(dev, "threaded tx IRQ %u request failed with err %d\n",
+ dev_err(dev, "IRQ %u request failed with err %d\n",
pdcs->pdc_irq, err);
return err;
}
@@ -1324,6 +1415,7 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
static const struct mbox_chan_ops pdc_mbox_chan_ops = {
.send_data = pdc_send_data,
+ .last_tx_done = pdc_last_tx_done,
.startup = pdc_startup,
.shutdown = pdc_shutdown
};
@@ -1356,8 +1448,9 @@ static int pdc_mb_init(struct pdc_state *pdcs)
if (!mbc->chans)
return -ENOMEM;
- mbc->txdone_irq = true;
- mbc->txdone_poll = false;
+ mbc->txdone_irq = false;
+ mbc->txdone_poll = true;
+ mbc->txpoll_period = 1;
for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
mbc->chans[chan_index].con_priv = pdcs;
@@ -1427,7 +1520,6 @@ static int pdc_probe(struct platform_device *pdev)
goto cleanup;
}
- spin_lock_init(&pdcs->pdc_lock);
pdcs->pdev = pdev;
platform_set_drvdata(pdev, pdcs);
pdcs->pdc_idx = pdcg.num_spu;
@@ -1473,6 +1565,9 @@ static int pdc_probe(struct platform_device *pdev)
pdc_hw_init(pdcs);
+ /* Init tasklet for deferred DMA rx processing */
+ tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs);
+
err = pdc_interrupts_init(pdcs);
if (err)
goto cleanup_buf_pool;
@@ -1489,6 +1584,7 @@ static int pdc_probe(struct platform_device *pdev)
return PDC_SUCCESS;
cleanup_buf_pool:
+ tasklet_kill(&pdcs->rx_tasklet);
dma_pool_destroy(pdcs->rx_buf_pool);
cleanup_ring_pool:
@@ -1504,6 +1600,10 @@ static int pdc_remove(struct platform_device *pdev)
pdc_free_debugfs();
+ tasklet_kill(&pdcs->rx_tasklet);
+
+ pdc_hw_disable(pdcs);
+
mbox_controller_unregister(&pdcs->mbc);
dma_pool_destroy(pdcs->rx_buf_pool);
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index a334db5c9f1c..41bcd339b68a 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -403,6 +403,7 @@ static const struct of_device_id sti_mailbox_match[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sti_mailbox_match);
static int sti_mbox_probe(struct platform_device *pdev)
{
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 9ca96e9db6bf..9c79f8019d2a 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -11,12 +11,14 @@
#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -39,6 +41,8 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
+ wait_queue_head_t waitq;
+ struct fasync_struct *async_queue;
};
static ssize_t mbox_test_signal_write(struct file *filp,
@@ -81,6 +85,13 @@ static const struct file_operations mbox_test_signal_ops = {
.llseek = generic_file_llseek,
};
+static int mbox_test_message_fasync(int fd, struct file *filp, int on)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ return fasync_helper(fd, filp, on, &tdev->async_queue);
+}
+
static ssize_t mbox_test_message_write(struct file *filp,
const char __user *userbuf,
size_t count, loff_t *ppos)
@@ -138,6 +149,20 @@ out:
return ret < 0 ? ret : count;
}
+static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
+{
+ unsigned char data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdev->lock, flags);
+ data = tdev->rx_buffer[0];
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ if (data != '\0')
+ return true;
+ return false;
+}
+
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -147,6 +172,8 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
int l = 0;
int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
touser = kzalloc(MBOX_HEXDUMP_MAX_LEN + 1, GFP_KERNEL);
if (!touser)
return -ENOMEM;
@@ -155,15 +182,29 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
ret = snprintf(touser, 20, "<NO RX CAPABILITY>\n");
ret = simple_read_from_buffer(userbuf, count, ppos,
touser, ret);
- goto out;
+ goto kfree_err;
}
- if (tdev->rx_buffer[0] == '\0') {
- ret = snprintf(touser, 9, "<EMPTY>\n");
- ret = simple_read_from_buffer(userbuf, count, ppos,
- touser, ret);
- goto out;
- }
+ add_wait_queue(&tdev->waitq, &wait);
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (mbox_test_message_data_ready(tdev))
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto waitq_err;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto waitq_err;
+ }
+ schedule();
+
+ } while (1);
spin_lock_irqsave(&tdev->lock, flags);
@@ -185,14 +226,31 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
spin_unlock_irqrestore(&tdev->lock, flags);
ret = simple_read_from_buffer(userbuf, count, ppos, touser, MBOX_HEXDUMP_MAX_LEN);
-out:
+waitq_err:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tdev->waitq, &wait);
+kfree_err:
kfree(touser);
return ret;
}
+static unsigned int
+mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ poll_wait(filp, &tdev->waitq, wait);
+
+ if (mbox_test_message_data_ready(tdev))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
static const struct file_operations mbox_test_message_ops = {
.write = mbox_test_message_write,
.read = mbox_test_message_read,
+ .fasync = mbox_test_message_fasync,
+ .poll = mbox_test_message_poll,
.open = simple_open,
.llseek = generic_file_llseek,
};
@@ -234,6 +292,10 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
spin_unlock_irqrestore(&tdev->lock, flags);
+
+ wake_up_interruptible(&tdev->waitq);
+
+ kill_fasync(&tdev->async_queue, SIGIO, POLL_IN);
}
static void mbox_test_prepare_message(struct mbox_client *client, void *message)
@@ -290,6 +352,7 @@ static int mbox_test_probe(struct platform_device *pdev)
{
struct mbox_test_device *tdev;
struct resource *res;
+ resource_size_t size;
int ret;
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
@@ -298,14 +361,21 @@ static int mbox_test_probe(struct platform_device *pdev)
/* It's okay for MMIO to be NULL */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ size = resource_size(res);
tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->tx_mmio))
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY)
+ /* if reserved area in SRAM, try just ioremap */
+ tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ else if (IS_ERR(tdev->tx_mmio))
tdev->tx_mmio = NULL;
/* If specified, second reg entry is Rx MMIO */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ size = resource_size(res);
tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->rx_mmio))
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY)
+ tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ else if (IS_ERR(tdev->rx_mmio))
tdev->rx_mmio = tdev->tx_mmio;
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
@@ -334,6 +404,7 @@ static int mbox_test_probe(struct platform_device *pdev)
if (ret)
return ret;
+ init_waitqueue_head(&tdev->waitq);
dev_info(&pdev->dev, "Successfully registered\n");
return 0;
@@ -357,6 +428,7 @@ static const struct of_device_id mbox_test_match[] = {
{ .compatible = "mailbox-test" },
{},
};
+MODULE_DEVICE_TABLE(of, mbox_test_match);
static struct platform_driver mbox_test_driver = {
.driver = {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 1f32688c312d..dd9ecd354a3e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -447,7 +447,6 @@ static int pcc_parse_subspace_irq(int id,
*/
static int __init acpi_pcc_probe(void)
{
- acpi_size pcct_tbl_header_size;
struct acpi_table_header *pcct_tbl;
struct acpi_subtable_header *pcct_entry;
struct acpi_table_pcct *acpi_pcct_tbl;
@@ -456,9 +455,7 @@ static int __init acpi_pcc_probe(void)
acpi_status status = AE_OK;
/* Search for PCCT */
- status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
- &pcct_tbl,
- &pcct_tbl_header_size);
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
if (ACPI_FAILURE(status) || !pcct_tbl) {
pr_warn("PCCT header not found.\n");
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
new file mode 100644
index 000000000000..0cde356c11ab
--- /dev/null
+++ b/drivers/mailbox/tegra-hsp.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mailbox/tegra186-hsp.h>
+
+#define HSP_INT_DIMENSIONING 0x380
+#define HSP_nSM_SHIFT 0
+#define HSP_nSS_SHIFT 4
+#define HSP_nAS_SHIFT 8
+#define HSP_nDB_SHIFT 12
+#define HSP_nSI_SHIFT 16
+#define HSP_nINT_MASK 0xf
+
+#define HSP_DB_TRIGGER 0x0
+#define HSP_DB_ENABLE 0x4
+#define HSP_DB_RAW 0x8
+#define HSP_DB_PENDING 0xc
+
+#define HSP_DB_CCPLEX 1
+#define HSP_DB_BPMP 3
+#define HSP_DB_MAX 7
+
+struct tegra_hsp_channel;
+struct tegra_hsp;
+
+struct tegra_hsp_channel {
+ struct tegra_hsp *hsp;
+ struct mbox_chan *chan;
+ void __iomem *regs;
+};
+
+struct tegra_hsp_doorbell {
+ struct tegra_hsp_channel channel;
+ struct list_head list;
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_db_map {
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_soc {
+ const struct tegra_hsp_db_map *map;
+};
+
+struct tegra_hsp {
+ const struct tegra_hsp_soc *soc;
+ struct mbox_controller mbox;
+ void __iomem *regs;
+ unsigned int irq;
+ unsigned int num_sm;
+ unsigned int num_as;
+ unsigned int num_ss;
+ unsigned int num_db;
+ unsigned int num_si;
+ spinlock_t lock;
+
+ struct list_head doorbells;
+};
+
+static inline struct tegra_hsp *
+to_tegra_hsp(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct tegra_hsp, mbox);
+}
+
+static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
+{
+ return readl(hsp->regs + offset);
+}
+
+static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
+ unsigned int offset)
+{
+ writel(value, hsp->regs + offset);
+}
+
+static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
+ unsigned int offset)
+{
+ return readl(channel->regs + offset);
+}
+
+static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
+ u32 value, unsigned int offset)
+{
+ writel(value, channel->regs + offset);
+}
+
+static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
+{
+ u32 value;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
+
+ return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
+}
+
+static struct tegra_hsp_doorbell *
+__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *entry;
+
+ list_for_each_entry(entry, &hsp->doorbells, list)
+ if (entry->master == master)
+ return entry;
+
+ return NULL;
+}
+
+static struct tegra_hsp_doorbell *
+tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return db;
+}
+
+static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ struct tegra_hsp_doorbell *db;
+ unsigned long master, value;
+
+ db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!db)
+ return IRQ_NONE;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
+ tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
+
+ spin_lock(&hsp->lock);
+
+ for_each_set_bit(master, &value, hsp->mbox.num_chans) {
+ struct tegra_hsp_doorbell *db;
+
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ /*
+ * Depending on the bootloader chain, the CCPLEX doorbell will
+ * have some doorbells enabled, which means that requesting an
+ * interrupt will immediately fire.
+ *
+ * In that case, db->channel.chan will still be NULL here and
+ * cause a crash if not properly guarded.
+ *
+ * It remains to be seen if ignoring the doorbell in that case
+ * is the correct solution.
+ */
+ if (db && db->channel.chan)
+ mbox_chan_received_data(db->channel.chan, NULL);
+ }
+
+ spin_unlock(&hsp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct tegra_hsp_channel *
+tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
+ unsigned int master, unsigned int index)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned int offset;
+ unsigned long flags;
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return ERR_PTR(-ENOMEM);
+
+ offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) << 16;
+ offset += index * 0x100;
+
+ db->channel.regs = hsp->regs + offset;
+ db->channel.hsp = hsp;
+
+ db->name = kstrdup_const(name, GFP_KERNEL);
+ db->master = master;
+ db->index = index;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ list_add_tail(&db->list, &hsp->doorbells);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return &db->channel;
+}
+
+static void __tegra_hsp_doorbell_destroy(struct tegra_hsp_doorbell *db)
+{
+ list_del(&db->list);
+ kfree_const(db->name);
+ kfree(db);
+}
+
+static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+
+ tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
+
+ return 0;
+}
+
+static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ if (db->master >= hsp->mbox.num_chans) {
+ dev_err(hsp->mbox.dev,
+ "invalid master ID %u for HSP channel\n",
+ db->master);
+ return -EINVAL;
+ }
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return -ENODEV;
+
+ if (!tegra_hsp_doorbell_can_ring(db))
+ return -ENODEV;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value |= BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value &= ~BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_doorbell_ops = {
+ .send_data = tegra_hsp_doorbell_send_data,
+ .startup = tegra_hsp_doorbell_startup,
+ .shutdown = tegra_hsp_doorbell_shutdown,
+};
+
+static struct mbox_chan *of_tegra_hsp_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
+ struct tegra_hsp *hsp = to_tegra_hsp(mbox);
+ unsigned int type = args->args[0];
+ unsigned int master = args->args[1];
+ struct tegra_hsp_doorbell *db;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ unsigned int i;
+
+ switch (type) {
+ case TEGRA_HSP_MBOX_TYPE_DB:
+ db = tegra_hsp_doorbell_get(hsp, master);
+ if (db)
+ channel = &db->channel;
+
+ break;
+
+ default:
+ break;
+ }
+
+ if (IS_ERR(channel))
+ return ERR_CAST(channel);
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ for (i = 0; i < hsp->mbox.num_chans; i++) {
+ chan = &hsp->mbox.chans[i];
+ if (!chan->con_priv) {
+ chan->con_priv = channel;
+ channel->chan = chan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static void tegra_hsp_remove_doorbells(struct tegra_hsp *hsp)
+{
+ struct tegra_hsp_doorbell *db, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ list_for_each_entry_safe(db, tmp, &hsp->doorbells, list)
+ __tegra_hsp_doorbell_destroy(db);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
+{
+ const struct tegra_hsp_db_map *map = hsp->soc->map;
+ struct tegra_hsp_channel *channel;
+
+ while (map->name) {
+ channel = tegra_hsp_doorbell_create(hsp, map->name,
+ map->master, map->index);
+ if (IS_ERR(channel)) {
+ tegra_hsp_remove_doorbells(hsp);
+ return PTR_ERR(channel);
+ }
+
+ map++;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_probe(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp;
+ struct resource *res;
+ u32 value;
+ int err;
+
+ hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
+ if (!hsp)
+ return -ENOMEM;
+
+ hsp->soc = of_device_get_match_data(&pdev->dev);
+ INIT_LIST_HEAD(&hsp->doorbells);
+ spin_lock_init(&hsp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsp->regs))
+ return PTR_ERR(hsp->regs);
+
+ value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+ hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+ hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+ hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+ err = platform_get_irq_byname(pdev, "doorbell");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get doorbell IRQ: %d\n", err);
+ return err;
+ }
+
+ hsp->irq = err;
+
+ hsp->mbox.of_xlate = of_tegra_hsp_xlate;
+ hsp->mbox.num_chans = 32;
+ hsp->mbox.dev = &pdev->dev;
+ hsp->mbox.txdone_irq = false;
+ hsp->mbox.txdone_poll = false;
+ hsp->mbox.ops = &tegra_hsp_doorbell_ops;
+
+ hsp->mbox.chans = devm_kcalloc(&pdev->dev, hsp->mbox.num_chans,
+ sizeof(*hsp->mbox.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox.chans)
+ return -ENOMEM;
+
+ err = tegra_hsp_add_doorbells(hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add doorbells: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hsp);
+
+ err = mbox_controller_register(&hsp->mbox);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mailbox: %d\n", err);
+ tegra_hsp_remove_doorbells(hsp);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, hsp->irq, tegra_hsp_doorbell_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hsp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_remove(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&hsp->mbox);
+ tegra_hsp_remove_doorbells(hsp);
+
+ return 0;
+}
+
+static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
+ { "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
+ { "bpmp", TEGRA_HSP_DB_MASTER_BPMP, HSP_DB_BPMP, },
+ { /* sentinel */ }
+};
+
+static const struct tegra_hsp_soc tegra186_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+};
+
+static const struct of_device_id tegra_hsp_match[] = {
+ { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
+ { }
+};
+
+static struct platform_driver tegra_hsp_driver = {
+ .driver = {
+ .name = "tegra-hsp",
+ .of_match_table = tegra_hsp_match,
+ },
+ .probe = tegra_hsp_probe,
+ .remove = tegra_hsp_remove,
+};
+
+static int __init tegra_hsp_init(void)
+{
+ return platform_driver_register(&tegra_hsp_driver);
+}
+core_initcall(tegra_hsp_init);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 4ca2739b4fad..ee7fb6ec96bd 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -149,7 +149,7 @@ static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
reg = readl(*base);
bar_count = BAR_CNT(reg);
- if (bar_count <= 0 && bar_count > CHAMELEON_BAR_MAX)
+ if (bar_count <= 0 || bar_count > CHAMELEON_BAR_MAX)
return -ENODEV;
c = kcalloc(bar_count, sizeof(struct chameleon_bar),
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 02a5345a44a6..b7767da50c26 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,9 +240,17 @@ config DM_BUFIO
as a cache, holding recently-read blocks in memory and performing
delayed writes.
+config DM_DEBUG_BLOCK_MANAGER_LOCKING
+ bool "Block manager locking"
+ depends on DM_BUFIO
+ ---help---
+ Block manager locking can catch various metadata corruption issues.
+
+ If unsure, say N.
+
config DM_DEBUG_BLOCK_STACK_TRACING
bool "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_BUFIO
+ depends on STACKTRACE_SUPPORT && DM_DEBUG_BLOCK_MANAGER_LOCKING
select STACKTRACE
---help---
Enable this for messages that may help debug problems with the
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6b420a55c745..c3ea03c9a1a8 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -425,7 +425,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
*/
- unsigned invalidate_needs_gc:1;
+ unsigned invalidate_needs_gc;
bool discard; /* Get rid of? */
@@ -593,8 +593,8 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
+ wait_queue_head_t gc_wait;
- wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
struct semaphore moving_in_flight;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81d3db40cd7b..a43eedd5804d 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b)
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+ bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, b->keys.set[0].data);
@@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
- bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
+ b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
bch_bio_map(b->bio, i);
/*
@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
bch_moving_gc(c);
}
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
{
- struct cache_set *c = arg;
struct cache *ca;
unsigned i;
- while (1) {
-again:
- bch_btree_gc(c);
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc)
+ return true;
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
- break;
+ if (atomic_read(&c->sectors_to_gc) < 0)
+ return true;
- mutex_lock(&c->bucket_lock);
+ return false;
+}
- for_each_cache(ca, c, i)
- if (ca->invalidate_needs_gc) {
- mutex_unlock(&c->bucket_lock);
- set_current_state(TASK_RUNNING);
- goto again;
- }
+static int bch_gc_thread(void *arg)
+{
+ struct cache_set *c = arg;
- mutex_unlock(&c->bucket_lock);
+ while (1) {
+ wait_event_interruptible(c->gc_wait,
+ kthread_should_stop() || gc_should_run(c));
- schedule();
+ if (kthread_should_stop())
+ break;
+
+ set_gc_sectors(c);
+ bch_btree_gc(c);
}
return 0;
@@ -1790,11 +1792,10 @@ again:
int bch_gc_thread_start(struct cache_set *c)
{
- c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+ c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
if (IS_ERR(c->gc_thread))
return PTR_ERR(c->gc_thread);
- set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
return 0;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 5c391fa01bed..9b80417cd547 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c)
{
- if (c->gc_thread)
- wake_up_process(c->gc_thread);
+ wake_up(&c->gc_wait);
}
#define MAP_DONE 0
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 333a1e5f6ae6..06f55056aaae 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b)
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+ bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, sorted);
submit_bio_wait(bio);
@@ -107,22 +107,26 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec bv;
- struct bvec_iter iter;
+ struct bio_vec bv, cbv;
+ struct bvec_iter iter, citer = { 0 };
check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
- bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
+ check->bi_opf = REQ_OP_READ;
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
+ citer.bi_size = UINT_MAX;
bio_for_each_segment(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
- void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
+ void *p2;
+
+ cbv = bio_iter_iovec(check, citer);
+ p2 = page_address(cbv.bv_page);
cache_set_err_on(memcmp(p1 + bv.bv_offset,
p2 + bv.bv_offset,
@@ -133,6 +137,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
(uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
+ bio_advance_iter(check, &citer, bv.bv_len);
}
bio_free_pages(check);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index e97b0acf7b8d..db45a88c0ce9 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -24,9 +24,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio);
- bio->bi_max_vecs = bucket_pages(c);
- bio->bi_io_vec = bio->bi_inline_vecs;
+ bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6925023e12d4..1198e53d5670 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -448,13 +448,11 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs, 1);
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
- bio->bi_max_vecs = 1;
- bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 5c4bddecfaf0..13b8a907006d 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -77,15 +77,13 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
- PAGE_SECTORS);
bio->bi_private = &io->cl;
- bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 40ffe5e424b3..76d20875503c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
- set_gc_sectors(op->c);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
wake_up_gc(op->c);
- }
if (op->bypass)
return bch_data_invalidate(cl);
@@ -404,8 +402,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
- op_is_write(bio_op(bio)) &&
- (bio->bi_opf & REQ_SYNC))
+ op_is_write(bio->bi_opf) &&
+ op_is_sync(bio->bi_opf))
goto rescale;
spin_lock(&dc->io_lock);
@@ -623,7 +621,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
{
struct bio *bio = &s->bio.bio;
- bio_init(bio);
+ bio_init(bio, NULL, 0);
__bio_clone_fast(bio, orig_bio);
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
@@ -923,7 +921,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
flush->bi_bdev = bio->bi_bdev;
flush->bi_end_io = request_endio;
flush->bi_private = cl;
- bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
+ flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
closure_bio_submit(flush, cl);
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 849ad441cd76..3a19cbc8b230 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
+#define BCACHE_MINORS 16 /* partition support */
/* Superblock */
@@ -381,7 +382,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
- uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
+ uuid_io(c, REQ_OP_READ, 0, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids;
@@ -600,7 +601,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++;
- prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
+ prio_io(ca, bucket, REQ_OP_READ, 0);
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (minor < 0)
return minor;
+ minor *= BCACHE_MINORS;
+
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(d->disk = alloc_disk(1))) {
+ !(d->disk = alloc_disk(BCACHE_MINORS))) {
ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
}
@@ -1152,9 +1155,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
- bio_init(&dc->sb_bio);
- dc->sb_bio.bi_max_vecs = 1;
- dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
+ bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
@@ -1491,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
+ init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
spin_lock_init(&c->btree_gc_time.lock);
@@ -1550,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
+ set_gc_sectors(c);
if (CACHE_SYNC(&c->sb)) {
LIST_HEAD(journal);
@@ -1814,9 +1817,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio);
- ca->journal.bio.bi_max_vecs = 8;
- ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
+ bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
@@ -1852,9 +1853,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
- bio_init(&ca->sb_bio);
- ca->sb_bio.bi_max_vecs = 1;
- ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
+ bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index e51644e503a5..69e1ae59cab8 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -106,14 +106,13 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
- bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 301eaf565167..629bd1a502fd 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,8 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return bio->bi_opf & REQ_SYNC ||
- in_use <= CUTOFF_WRITEBACK;
+ return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
}
static inline void bch_writeback_queue(struct cached_dev *dc)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2d826927a3bf..9fb2ccac958a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -27,6 +27,7 @@
#include <linux/mount.h>
#include <linux/buffer_head.h>
#include <linux/seq_file.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
@@ -208,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{
- struct md_rdev *rdev = NULL;
+ struct md_rdev *rdev;
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+restart:
+ rdev = NULL;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
loff_t offset = mddev->bitmap_info.offset;
@@ -268,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
page);
}
- if (wait)
- md_super_wait(mddev);
+ if (wait && md_super_wait(mddev) < 0)
+ goto restart;
return 0;
bad_alignment:
@@ -405,10 +408,10 @@ static int read_page(struct file *file, unsigned long index,
ret = -EIO;
out:
if (ret)
- printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
- (int)PAGE_SIZE,
- (unsigned long long)index << PAGE_SHIFT,
- ret);
+ pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
+ (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT,
+ ret);
return ret;
}
@@ -416,6 +419,28 @@ out:
* bitmap file superblock operations
*/
+/*
+ * bitmap_wait_writes() should be called before writing any bitmap
+ * blocks, to ensure previous writes, particularly from
+ * bitmap_daemon_work(), have completed.
+ */
+static void bitmap_wait_writes(struct bitmap *bitmap)
+{
+ if (bitmap->storage.file)
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ else
+ /* Note that we ignore the return value. The writes
+ * might have failed, but that would just mean that
+ * some bits which should be cleared haven't been,
+ * which is safe. The relevant bitmap blocks will
+ * probably get written again, but there is no great
+ * loss if they aren't.
+ */
+ md_super_wait(bitmap->mddev);
+}
+
+
/* update the event counter and sync the superblock to disk */
void bitmap_update_sb(struct bitmap *bitmap)
{
@@ -455,24 +480,24 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
sb = kmap_atomic(bitmap->storage.sb_page);
- printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
- printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
- printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
- printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
- *(__u32 *)(sb->uuid+0),
- *(__u32 *)(sb->uuid+4),
- *(__u32 *)(sb->uuid+8),
- *(__u32 *)(sb->uuid+12));
- printk(KERN_DEBUG " events: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events));
- printk(KERN_DEBUG "events cleared: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events_cleared));
- printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
- printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
- printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
- printk(KERN_DEBUG " sync size: %llu KB\n",
- (unsigned long long)le64_to_cpu(sb->sync_size)/2);
- printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
+ pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
+ pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
+ pr_debug(" version: %d\n", le32_to_cpu(sb->version));
+ pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
+ *(__u32 *)(sb->uuid+0),
+ *(__u32 *)(sb->uuid+4),
+ *(__u32 *)(sb->uuid+8),
+ *(__u32 *)(sb->uuid+12));
+ pr_debug(" events: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events));
+ pr_debug("events cleared: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events_cleared));
+ pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
+ pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
+ pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
+ pr_debug(" sync size: %llu KB\n",
+ (unsigned long long)le64_to_cpu(sb->sync_size)/2);
+ pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb);
}
@@ -506,14 +531,14 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
kunmap_atomic(sb);
- printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+ pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
- printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+ pr_debug("Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ;
}
sb->daemon_sleep = cpu_to_le32(daemon_sleep);
@@ -584,7 +609,7 @@ re_read:
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
- pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+ pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
bitmap->cluster_slot, offset);
}
@@ -634,7 +659,7 @@ re_read:
else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)";
if (reason) {
- printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
+ pr_warn("%s: invalid bitmap file superblock: %s\n",
bmname(bitmap), reason);
goto out;
}
@@ -648,18 +673,15 @@ re_read:
* bitmap's UUID and event counter to the mddev's
*/
if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
- printk(KERN_INFO
- "%s: bitmap superblock UUID mismatch\n",
- bmname(bitmap));
+ pr_warn("%s: bitmap superblock UUID mismatch\n",
+ bmname(bitmap));
goto out;
}
events = le64_to_cpu(sb->events);
if (!nodes && (events < bitmap->mddev->events)) {
- printk(KERN_INFO
- "%s: bitmap file is out of date (%llu < %llu) "
- "-- forcing full recovery\n",
- bmname(bitmap), events,
- (unsigned long long) bitmap->mddev->events);
+ pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
+ bmname(bitmap), events,
+ (unsigned long long) bitmap->mddev->events);
set_bit(BITMAP_STALE, &bitmap->flags);
}
}
@@ -679,8 +701,8 @@ out:
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
- pr_err("%s: Could not setup cluster service (%d)\n",
- bmname(bitmap), err);
+ pr_warn("%s: Could not setup cluster service (%d)\n",
+ bmname(bitmap), err);
goto out_no_sb;
}
bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
@@ -847,15 +869,13 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = file_path(bitmap->storage.file,
path, PAGE_SIZE);
- printk(KERN_ALERT
- "%s: kicking failed bitmap file %s from array!\n",
- bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
+ pr_warn("%s: kicking failed bitmap file %s from array!\n",
+ bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
kfree(path);
} else
- printk(KERN_ALERT
- "%s: disabling internal bitmap due to errors\n",
- bmname(bitmap));
+ pr_warn("%s: disabling internal bitmap due to errors\n",
+ bmname(bitmap));
}
}
@@ -983,6 +1003,7 @@ void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
+ int writing = 0;
if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags))
@@ -997,15 +1018,19 @@ void bitmap_unplug(struct bitmap *bitmap)
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
if (dirty || need_write) {
+ if (!writing) {
+ bitmap_wait_writes(bitmap);
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_unplug");
+ }
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0);
+ writing = 1;
}
}
- if (bitmap->storage.file)
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
- else
- md_super_wait(bitmap->mddev);
+ if (writing)
+ bitmap_wait_writes(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
@@ -1056,14 +1081,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
if (outofdate)
- printk(KERN_INFO "%s: bitmap file is out of date, doing full "
- "recovery\n", bmname(bitmap));
+ pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
if (file && i_size_read(file->f_mapping->host) < store->bytes) {
- printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
- bmname(bitmap),
- (unsigned long) i_size_read(file->f_mapping->host),
- store->bytes);
+ pr_warn("%s: bitmap file too short %lu < %lu\n",
+ bmname(bitmap),
+ (unsigned long) i_size_read(file->f_mapping->host),
+ store->bytes);
goto err;
}
@@ -1137,16 +1161,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
offset = 0;
}
- printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu pages, set %lu of %lu bits\n",
- bmname(bitmap), store->file_pages,
- bit_cnt, chunks);
+ pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
+ bmname(bitmap), store->file_pages,
+ bit_cnt, chunks);
return 0;
err:
- printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
- bmname(bitmap), ret);
+ pr_warn("%s: bitmap initialisation failed: %d\n",
+ bmname(bitmap), ret);
return ret;
}
@@ -1225,6 +1248,10 @@ void bitmap_daemon_work(struct mddev *mddev)
}
bitmap->allclean = 1;
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_daemon_work");
+
/* Any file-page which is PENDING now needs to be written.
* So set NEEDWRITE now, then after we make any last-minute changes
* we will write it.
@@ -1289,6 +1316,7 @@ void bitmap_daemon_work(struct mddev *mddev)
}
spin_unlock_irq(&counts->lock);
+ bitmap_wait_writes(bitmap);
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
* DIRTY pages need to be written by bitmap_unplug so it can wait
* for them.
@@ -1595,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = sector;
- set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
@@ -1825,8 +1853,8 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
if (err)
goto error;
- printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
- bitmap->counts.pages, bmname(bitmap));
+ pr_debug("created bitmap (%lu pages) for device %s\n",
+ bitmap->counts.pages, bmname(bitmap));
err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
if (err)
@@ -2029,8 +2057,10 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
!bitmap->mddev->bitmap_info.external,
mddev_is_clustered(bitmap->mddev)
? bitmap->cluster_slot : 0);
- if (ret)
+ if (ret) {
+ bitmap_file_unmap(&store);
goto err;
+ }
pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
@@ -2089,7 +2119,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift;
- pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+ pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break;
} else
bitmap->counts.bp[page].count += 1;
@@ -2266,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
/* Ensure new bitmap info is stored in
* metadata promptly.
*/
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
rv = 0;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 125aedc3875f..84d2f0e4c754 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -611,9 +611,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
char *ptr;
int len;
- bio_init(&b->bio);
- b->bio.bi_io_vec = b->bio_vec;
- b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
+ bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = inline_endio;
@@ -822,12 +820,14 @@ enum new_flag {
static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{
struct dm_buffer *b;
+ bool tried_noio_alloc = false;
/*
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOIO: don't recurse into the I/O layer
+ * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+ * mutex and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
* __GFP_NOWARN: don't print a warning in case of failure
@@ -837,7 +837,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (b)
return b;
}
@@ -845,6 +845,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
if (nf == NF_PREFETCH)
return NULL;
+ if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
+ dm_bufio_unlock(c);
+ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ dm_bufio_lock(c);
+ if (b)
+ return b;
+ tried_noio_alloc = true;
+ }
+
if (!list_empty(&c->reserved_buffers)) {
b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1316,7 +1325,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = WRITE_FLUSH,
+ .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1587,18 +1596,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c;
- unsigned long count;
-
- c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (sc->gfp_mask & __GFP_FS)
- dm_bufio_lock(c);
- else if (!dm_bufio_trylock(c))
- return 0;
+ struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- dm_bufio_unlock(c);
- return count;
+ return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
index bed4ad4e1b7c..389c9e8ac785 100644
--- a/drivers/md/dm-cache-block-types.h
+++ b/drivers/md/dm-cache-block-types.h
@@ -17,9 +17,9 @@
* discard bitset.
*/
-typedef dm_block_t __bitwise__ dm_oblock_t;
-typedef uint32_t __bitwise__ dm_cblock_t;
-typedef dm_block_t __bitwise__ dm_dblock_t;
+typedef dm_block_t __bitwise dm_oblock_t;
+typedef uint32_t __bitwise dm_cblock_t;
+typedef dm_block_t __bitwise dm_dblock_t;
static inline dm_oblock_t to_oblock(dm_block_t b)
{
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 695577812cf6..624fe4319b24 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -383,7 +383,6 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
goto bad;
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
-
r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
if (r < 0)
goto bad;
@@ -789,7 +788,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
{
if (cmd->data_block_size != data_block_size) {
- DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)",
(unsigned long long) data_block_size,
(unsigned long long) cmd->data_block_size);
return false;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index c33f4a6e1d7d..f19c6930a67c 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1361,7 +1361,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
static unsigned random_level(dm_cblock_t cblock)
{
- return hash_32_generic(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+ return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
}
static int smq_load_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 59b2c50562e4..e04c61e0839e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -989,7 +989,8 @@ static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mod
enum cache_metadata_mode old_mode = get_cache_mode(cache);
if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
- DMERR("unable to read needs_check flag, setting failure mode");
+ DMERR("%s: unable to read needs_check flag, setting failure mode.",
+ cache_device_name(cache));
new_mode = CM_FAIL;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a2768835d394..7c6c57216bf2 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
@@ -23,12 +24,14 @@
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <linux/rbtree.h>
+#include <linux/ctype.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
#include <crypto/skcipher.h>
+#include <keys/user-type.h>
#include <linux/device-mapper.h>
@@ -140,8 +143,9 @@ struct crypt_config {
char *cipher;
char *cipher_string;
+ char *key_string;
- struct crypt_iv_operations *iv_gen_ops;
+ const struct crypt_iv_operations *iv_gen_ops;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
@@ -758,15 +762,15 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
return r;
}
-static struct crypt_iv_operations crypt_iv_plain_ops = {
+static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
-static struct crypt_iv_operations crypt_iv_plain64_ops = {
+static const struct crypt_iv_operations crypt_iv_plain64_ops = {
.generator = crypt_iv_plain64_gen
};
-static struct crypt_iv_operations crypt_iv_essiv_ops = {
+static const struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
.init = crypt_iv_essiv_init,
@@ -774,17 +778,17 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
.generator = crypt_iv_essiv_gen
};
-static struct crypt_iv_operations crypt_iv_benbi_ops = {
+static const struct crypt_iv_operations crypt_iv_benbi_ops = {
.ctr = crypt_iv_benbi_ctr,
.dtr = crypt_iv_benbi_dtr,
.generator = crypt_iv_benbi_gen
};
-static struct crypt_iv_operations crypt_iv_null_ops = {
+static const struct crypt_iv_operations crypt_iv_null_ops = {
.generator = crypt_iv_null_gen
};
-static struct crypt_iv_operations crypt_iv_lmk_ops = {
+static const struct crypt_iv_operations crypt_iv_lmk_ops = {
.ctr = crypt_iv_lmk_ctr,
.dtr = crypt_iv_lmk_dtr,
.init = crypt_iv_lmk_init,
@@ -793,7 +797,7 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
.post = crypt_iv_lmk_post
};
-static struct crypt_iv_operations crypt_iv_tcw_ops = {
+static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.ctr = crypt_iv_tcw_ctr,
.dtr = crypt_iv_tcw_dtr,
.init = crypt_iv_tcw_init,
@@ -994,7 +998,6 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned i, len, remaining_size;
struct page *page;
- struct bio_vec *bvec;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
@@ -1019,12 +1022,7 @@ retry:
len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
- bvec = &clone->bi_io_vec[clone->bi_vcnt++];
- bvec->bv_page = page;
- bvec->bv_len = len;
- bvec->bv_offset = 0;
-
- clone->bi_iter.bi_size += len;
+ bio_add_page(clone, page, len, 0);
remaining_size -= len;
}
@@ -1135,7 +1133,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
+ clone->bi_opf = io->base_bio->bi_opf;
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1471,7 +1469,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return 0;
}
-static int crypt_setkey_allcpus(struct crypt_config *cc)
+static int crypt_setkey(struct crypt_config *cc)
{
unsigned subkey_size;
int err = 0, i, r;
@@ -1490,25 +1488,157 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
return err;
}
+#ifdef CONFIG_KEYS
+
+static bool contains_whitespace(const char *str)
+{
+ while (*str)
+ if (isspace(*str++))
+ return true;
+ return false;
+}
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ char *new_key_string, *key_desc;
+ int ret;
+ struct key *key;
+ const struct user_key_payload *ukp;
+
+ /*
+ * Reject key_string with whitespace. dm core currently lacks code for
+ * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
+ */
+ if (contains_whitespace(key_string)) {
+ DMERR("whitespace chars not allowed in key string");
+ return -EINVAL;
+ }
+
+ /* look for next ':' separating key_type from key_description */
+ key_desc = strpbrk(key_string, ":");
+ if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
+ return -EINVAL;
+
+ if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
+ strncmp(key_string, "user:", key_desc - key_string + 1))
+ return -EINVAL;
+
+ new_key_string = kstrdup(key_string, GFP_KERNEL);
+ if (!new_key_string)
+ return -ENOMEM;
+
+ key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
+ key_desc + 1, NULL);
+ if (IS_ERR(key)) {
+ kzfree(new_key_string);
+ return PTR_ERR(key);
+ }
+
+ rcu_read_lock();
+
+ ukp = user_key_payload(key);
+ if (!ukp) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EKEYREVOKED;
+ }
+
+ if (cc->key_size != ukp->datalen) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EINVAL;
+ }
+
+ memcpy(cc->key, ukp->data, cc->key_size);
+
+ rcu_read_unlock();
+ key_put(key);
+
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
+ ret = crypt_setkey(cc);
+
+ /* wipe the kernel key payload copy in each case */
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
+
+ if (!ret) {
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ kzfree(cc->key_string);
+ cc->key_string = new_key_string;
+ } else
+ kzfree(new_key_string);
+
+ return ret;
+}
+
+static int get_key_size(char **key_string)
+{
+ char *colon, dummy;
+ int ret;
+
+ if (*key_string[0] != ':')
+ return strlen(*key_string) >> 1;
+
+ /* look for next ':' in key string */
+ colon = strpbrk(*key_string + 1, ":");
+ if (!colon)
+ return -EINVAL;
+
+ if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
+ return -EINVAL;
+
+ *key_string = colon;
+
+ /* remaining key string should be :<logon|user>:<key_desc> */
+
+ return ret;
+}
+
+#else
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ return -EINVAL;
+}
+
+static int get_key_size(char **key_string)
+{
+ return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
+}
+
+#endif
+
static int crypt_set_key(struct crypt_config *cc, char *key)
{
int r = -EINVAL;
int key_string_len = strlen(key);
- /* The key size may not be changed. */
- if (cc->key_size != (key_string_len >> 1))
- goto out;
-
/* Hyphen (which gives a key_size of zero) means there is no key. */
if (!cc->key_size && strcmp(key, "-"))
goto out;
- if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ /* ':' means the key is in kernel keyring, short-circuit normal key processing */
+ if (key[0] == ':') {
+ r = crypt_set_keyring_key(cc, key + 1);
goto out;
+ }
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- r = crypt_setkey_allcpus(cc);
+ /* wipe references to any kernel keyring key */
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
+
+ if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ goto out;
+
+ r = crypt_setkey(cc);
+ if (!r)
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
out:
/* Hex key string not needed after here, so wipe it. */
@@ -1521,8 +1651,10 @@ static int crypt_wipe_key(struct crypt_config *cc)
{
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
memset(&cc->key, 0, cc->key_size * sizeof(u8));
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
- return crypt_setkey_allcpus(cc);
+ return crypt_setkey(cc);
}
static void crypt_dtr(struct dm_target *ti)
@@ -1558,6 +1690,7 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher);
kzfree(cc->cipher_string);
+ kzfree(cc->key_string);
/* Must zero key material before freeing */
kzfree(cc);
@@ -1726,12 +1859,13 @@ bad_mem:
/*
* Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
+ * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
*/
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- unsigned int key_size, opt_params;
+ int key_size;
+ unsigned int opt_params;
unsigned long long tmpll;
int ret;
size_t iv_size_padding;
@@ -1748,7 +1882,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- key_size = strlen(argv[1]) >> 1;
+ key_size = get_key_size(&argv[1]);
+ if (key_size < 0) {
+ ti->error = "Cannot parse key size";
+ return -EINVAL;
+ }
cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
if (!cc) {
@@ -1955,10 +2093,13 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
DMEMIT("%s ", cc->cipher_string);
- if (cc->key_size > 0)
- for (i = 0; i < cc->key_size; i++)
- DMEMIT("%02x", cc->key[i]);
- else
+ if (cc->key_size > 0) {
+ if (cc->key_string)
+ DMEMIT(":%u:%s", cc->key_size, cc->key_string);
+ else
+ for (i = 0; i < cc->key_size; i++)
+ DMEMIT("%02x", cc->key[i]);
+ } else
DMEMIT("-");
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
@@ -2014,7 +2155,7 @@ static void crypt_resume(struct dm_target *ti)
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct crypt_config *cc = ti->private;
- int ret = -EINVAL;
+ int key_size, ret = -EINVAL;
if (argc < 2)
goto error;
@@ -2025,6 +2166,13 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return -EINVAL;
}
if (argc == 3 && !strcasecmp(argv[1], "set")) {
+ /* The key size may not be changed. */
+ key_size = get_key_size(&argv[2]);
+ if (key_size < 0 || cc->key_size != key_size) {
+ memset(argv[2], '0', strlen(argv[2]));
+ return -EINVAL;
+ }
+
ret = crypt_set_key(cc, argv[2]);
if (ret)
return ret;
@@ -2068,7 +2216,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 14, 1},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 6a2e8dd44a1b..13305a182611 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -36,7 +36,8 @@ struct flakey_c {
};
enum feature_flag_bits {
- DROP_WRITES
+ DROP_WRITES,
+ ERROR_WRITES
};
struct per_bio_data {
@@ -76,6 +77,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
ti->error = "Feature drop_writes duplicated";
return -EINVAL;
+ } else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature drop_writes conflicts with feature error_writes";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ /*
+ * error_writes
+ */
+ if (!strcasecmp(arg_name, "error_writes")) {
+ if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes duplicated";
+ return -EINVAL;
+
+ } else if (test_bit(DROP_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes conflicts with feature drop_writes";
+ return -EINVAL;
}
continue;
@@ -135,6 +155,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
+
+ } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+ ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
}
return 0;
@@ -200,11 +224,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!(fc->up_interval + fc->down_interval)) {
ti->error = "Total (up + down) interval is zero";
+ r = -EINVAL;
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
ti->error = "Interval overflow";
+ r = -EINVAL;
goto bad;
}
@@ -289,22 +315,27 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Error reads if neither corrupt_bio_byte or drop_writes are set.
+ * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
* Otherwise, flakey_end_io() will decide if the reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
- if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags))
return -EIO;
goto map_bio;
}
/*
- * Drop writes?
+ * Drop or error writes?
*/
if (test_bit(DROP_WRITES, &fc->flags)) {
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
+ else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
/*
* Corrupt matching writes.
@@ -340,10 +371,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
*/
corrupt_bio_data(bio, fc);
- } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+ } else if (!test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags)) {
/*
* Error read during the down_interval if drop_writes
- * wasn't configured.
+ * and error_writes were not configured.
*/
return -EIO;
}
@@ -357,7 +389,7 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
- unsigned drop_writes;
+ unsigned drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:
@@ -370,10 +402,13 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
fc->down_interval);
drop_writes = test_bit(DROP_WRITES, &fc->flags);
- DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+ error_writes = test_bit(ERROR_WRITES, &fc->flags);
+ DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
if (drop_writes)
DMEMIT("drop_writes ");
+ else if (error_writes)
+ DMEMIT("error_writes ");
if (fc->corrupt_bio_byte)
DMEMIT("corrupt_bio_byte %u %c %u %u ",
@@ -410,7 +445,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 3, 1},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0bf1a12e35fe..03940bf36f6c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -162,7 +162,10 @@ struct dpages {
struct page **p, unsigned long *len, unsigned *offset);
void (*next_page)(struct dpages *dp);
- unsigned context_u;
+ union {
+ unsigned context_u;
+ struct bvec_iter context_bi;
+ };
void *context_ptr;
void *vma_invalidate_address;
@@ -204,25 +207,36 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
static void bio_get_page(struct dpages *dp, struct page **p,
unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = dp->context_ptr;
- *p = bvec->bv_page;
- *len = bvec->bv_len - dp->context_u;
- *offset = bvec->bv_offset + dp->context_u;
+ struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
+ dp->context_bi);
+
+ *p = bvec.bv_page;
+ *len = bvec.bv_len;
+ *offset = bvec.bv_offset;
+
+ /* avoid figuring it out again in bio_next_page() */
+ dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
}
static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = dp->context_ptr;
- dp->context_ptr = bvec + 1;
- dp->context_u = 0;
+ unsigned int len = (unsigned int)dp->context_bi.bi_sector;
+
+ bvec_iter_advance((struct bio_vec *)dp->context_ptr,
+ &dp->context_bi, len);
}
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
dp->get_page = bio_get_page;
dp->next_page = bio_next_page;
- dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- dp->context_u = bio->bi_iter.bi_bvec_done;
+
+ /*
+ * We just use bvec iterator to retrieve pages, so it is ok to
+ * access the bvec table directly here
+ */
+ dp->context_ptr = bio->bi_io_vec;
+ dp->context_bi = bio->bi_iter;
}
/*
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 966eb4b61aed..c72a77048b73 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1697,7 +1697,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
{
struct dm_ioctl *dmi;
int secure_data;
- const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
+ const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 07fc1ad42ec5..33e71ea6cc14 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
};
lc->io_req.bi_op = REQ_OP_WRITE;
- lc->io_req.bi_op_flags = WRITE_FLUSH;
+ lc->io_req.bi_op_flags = REQ_PREFLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8596e2..6400cffb986d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -372,16 +372,13 @@ static int __pg_init_all_paths(struct multipath *m)
return atomic_read(&m->pg_init_in_progress);
}
-static int pg_init_all_paths(struct multipath *m)
+static void pg_init_all_paths(struct multipath *m)
{
- int r;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- r = __pg_init_all_paths(m);
+ __pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
-
- return r;
}
static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -583,16 +580,17 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
* .request_fn stacked on blk-mq path(s) and
* blk-mq stacked on blk-mq path(s).
*/
- *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
- rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(*__clone)) {
- /* ENOMEM, requeue */
+ clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+ rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
clear_request_fn_mpio(m, map_context);
return r;
}
- (*__clone)->bio = (*__clone)->biotail = NULL;
- (*__clone)->rq_disk = bdev->bd_disk;
- (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ clone->bio = clone->biotail = NULL;
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ *__clone = clone;
}
if (pgpath->pg->ps.type->start_io)
@@ -852,18 +850,22 @@ retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
/*
+ * Clear any hw_handler_params associated with a
+ * handler that isn't already attached.
+ */
+ if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+ kfree(m->hw_handler_params);
+ m->hw_handler_params = NULL;
+ }
+
+ /*
* Reset hw_handler_name to match the attached handler
- * and clear any hw_handler_params associated with the
- * ignored handler.
*
* NB. This modifies the table line to show the actual
* handler instead of the original table passed in.
*/
kfree(m->hw_handler_name);
m->hw_handler_name = attached_handler_name;
-
- kfree(m->hw_handler_params);
- m->hw_handler_params = NULL;
}
}
@@ -1002,6 +1004,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
}
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
+ if (!m->hw_handler_name)
+ return -EINVAL;
if (hw_argc > 1) {
char *p;
@@ -1362,7 +1366,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
}
@@ -1394,7 +1398,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6d53810963f7..b8f978e551d7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -160,7 +160,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -171,7 +170,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -2011,7 +2009,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
/* Force writing of superblocks to disk */
- set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
/* Any superblock is better than none, choose that if given */
return refdev ? 0 : 1;
@@ -2050,16 +2048,17 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
mddev->reshape_position = MaxSector;
+ mddev->raid_disks = le32_to_cpu(sb->num_devices);
+ mddev->level = le32_to_cpu(sb->level);
+ mddev->layout = le32_to_cpu(sb->layout);
+ mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
+
/*
* Reshaping is supported, e.g. reshape_position is valid
* in superblock and superblock content is authoritative.
*/
if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
/* Superblock is authoritative wrt given raid set layout! */
- mddev->raid_disks = le32_to_cpu(sb->num_devices);
- mddev->level = le32_to_cpu(sb->level);
- mddev->layout = le32_to_cpu(sb->layout);
- mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
@@ -2087,38 +2086,44 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->new_level) {
- DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
- return -EINVAL;
- }
- if (le32_to_cpu(sb->layout) != mddev->new_layout) {
- DMERR("Reshaping raid sets not yet supported. (raid layout change)");
- DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
- DMERR(" Old layout: %s w/ %d copies",
- raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
- raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
- DMERR(" New layout: %s w/ %d copies",
- raid10_md_layout_to_format(mddev->layout),
- raid10_md_layout_to_copies(mddev->layout));
- return -EINVAL;
- }
- if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
- DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
- return -EINVAL;
- }
+ struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
+ struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
- /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
- if (!rt_is_raid1(rs->raid_type) &&
- (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
- sb->num_devices, mddev->raid_disks);
+ if (rs_takeover_requested(rs)) {
+ if (rt_cur && rt_new)
+ DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
+ return -EINVAL;
+ } else if (rs_reshape_requested(rs)) {
+ DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
+ if (mddev->layout != mddev->new_layout) {
+ if (rt_cur && rt_new)
+ DMERR(" current layout %s vs new layout %s",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR(" current layout 0x%X vs new layout 0x%X",
+ le32_to_cpu(sb->layout), mddev->new_layout);
+ }
+ if (mddev->chunk_sectors != mddev->new_chunk_sectors)
+ DMERR(" current stripe sectors %u vs new stripe sectors %u",
+ mddev->chunk_sectors, mddev->new_chunk_sectors);
+ if (rs->delta_disks)
+ DMERR(" current %u disks vs new %u disks",
+ mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
+ if (rs_is_raid10(rs)) {
+ DMERR(" Old layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ DMERR(" New layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->new_layout),
+ raid10_md_layout_to_copies(mddev->new_layout));
+ }
return -EINVAL;
}
DMINFO("Discovered old metadata format; upgrading to extended metadata format");
-
- /* Table line is checked vs. authoritative superblock */
- rs_set_new(rs);
}
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
@@ -2211,7 +2216,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
continue;
if (role != r->raid_disk) {
- if (__is_raid10_near(mddev->layout)) {
+ if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
rs->raid_disks % rs->raid10_copies) {
rs->ti->error =
@@ -2994,6 +2999,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
}
+ /* Disable/enable discard support on raid set. */
+ configure_discard_support(rs);
+
mddev_unlock(&rs->md);
return 0;
@@ -3497,7 +3505,7 @@ static void rs_update_sbs(struct raid_set *rs)
struct mddev *mddev = &rs->md;
int ro = mddev->ro;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->ro = 0;
md_update_sb(mddev, 1);
mddev->ro = ro;
@@ -3580,12 +3588,6 @@ static int raid_preresume(struct dm_target *ti)
if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
rs_update_sbs(rs);
- /*
- * Disable/enable discard support on raid set after any
- * conversion, because devices can have been added
- */
- configure_discard_support(rs);
-
/* Load the bitmap from disk unless raid0 */
r = __load_dirty_region_bitmap(rs);
if (r)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9a8b71067c6e..2ddc2d20e62d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = WRITE_FLUSH,
+ .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
@@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
+ .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1d0d2adc050a..9d7275fb541a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,11 +23,7 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-#ifdef CONFIG_DM_MQ_DEFAULT
-static bool use_blk_mq = true;
-#else
-static bool use_blk_mq = false;
-#endif
+static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
bool dm_use_blk_mq_default(void)
{
@@ -75,12 +71,6 @@ static void dm_old_start_queue(struct request_queue *q)
static void dm_mq_start_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
blk_mq_start_stopped_hw_queues(q, true);
blk_mq_kick_requeue_list(q);
}
@@ -105,20 +95,10 @@ static void dm_old_stop_queue(struct request_queue *q)
static void dm_mq_stop_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q)) {
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (blk_mq_queue_stopped(q))
return;
- }
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- /* Avoid that requeuing could restart the queue. */
- blk_mq_cancel_requeue_work(q);
- blk_mq_stop_hw_queues(q);
+ blk_mq_quiesce_queue(q);
}
void dm_stop_queue(struct request_queue *q)
@@ -226,6 +206,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
@@ -238,8 +221,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (!md->queue->mq_ops && run_queue)
- blk_run_queue_async(md->queue);
+ if (!q->mq_ops && run_queue) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
/*
* dm_put() must be at the end of this function. See the comment above
@@ -313,7 +299,7 @@ static void dm_unprep_request(struct request *rq)
if (!rq->q->mq_ops) {
rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
+ rq->rq_flags &= ~RQF_DONTPREP;
}
if (clone)
@@ -338,12 +324,7 @@ static void dm_old_requeue_request(struct request *rq)
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_mq_delay_kick_requeue_list(q, msecs);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_mq_delay_kick_requeue_list(q, msecs);
}
void dm_mq_kick_requeue_list(struct mapped_device *md)
@@ -354,7 +335,7 @@ EXPORT_SYMBOL(dm_mq_kick_requeue_list);
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{
- blk_mq_requeue_request(rq);
+ blk_mq_requeue_request(rq, false);
__dm_mq_kick_requeue_list(rq->q, msecs);
}
@@ -431,7 +412,7 @@ static void dm_softirq_done(struct request *rq)
return;
}
- if (rq->cmd_flags & REQ_FAILED)
+ if (rq->rq_flags & RQF_FAILED)
mapped = false;
dm_done(clone, tio->error, mapped);
@@ -460,7 +441,7 @@ static void dm_complete_request(struct request *rq, int error)
*/
static void dm_kill_unmapped_request(struct request *rq, int error)
{
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
dm_complete_request(rq, error);
}
@@ -476,7 +457,7 @@ static void end_clone_request(struct request *clone, int error)
* For just cleaning up the information of the queue in which
* the clone was dispatched.
* The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (REQ_ALLOCED isn't set).
+ * from dm own mempool (RQF_ALLOCED isn't set).
*/
__blk_put_request(clone->q, clone);
}
@@ -497,7 +478,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
int r;
if (blk_queue_io_stat(clone->q))
- clone->cmd_flags |= REQ_IO_STAT;
+ clone->rq_flags |= RQF_IO_STAT;
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
@@ -633,7 +614,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_DEFER;
rq->special = tio;
- rq->cmd_flags |= REQ_DONTPREP;
+ rq->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
@@ -819,7 +800,7 @@ static void dm_old_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq);
if ((dm_old_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
(ti->type->busy && ti->type->busy(ti))) {
blk_delay_queue(q, 10);
@@ -904,17 +885,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
dm_put_live_table(md, srcu_idx);
}
- /*
- * On suspend dm_stop_queue() handles stopping the blk-mq
- * request_queue BUT: even though the hw_queues are marked
- * BLK_MQ_S_STOPPED at that point there is still a race that
- * is allowing block/blk-mq.c to call ->queue_rq against a
- * hctx that it really shouldn't. The following check guards
- * against this rarity (albeit _not_ race-free).
- */
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
- return BLK_MQ_RQ_QUEUE_BUSY;
-
if (ti->type->busy && ti->type->busy(ti))
return BLK_MQ_RQ_QUEUE_BUSY;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index b8cf956b577b..b93476c3ba3f 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
+ if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
ps->valid = 0;
/*
@@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+ r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index c4b53b332607..0a427de23ed2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -871,7 +871,7 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- bool verify_blk_mq = false;
+ unsigned sq_count = 0, mq_count = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
@@ -924,12 +924,6 @@ static int dm_table_determine_type(struct dm_table *t)
BUG_ON(!request_based); /* No targets in this table */
- if (list_empty(devices) && __table_type_request_based(live_md_type)) {
- /* inherit live MD type */
- t->type = live_md_type;
- return 0;
- }
-
/*
* The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
* having a compatible target use dm_table_set_type.
@@ -948,6 +942,19 @@ verify_rq_based:
return -EINVAL;
}
+ if (list_empty(devices)) {
+ int srcu_idx;
+ struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
+
+ /* inherit live table's type and all_blk_mq */
+ if (live_table) {
+ t->type = live_table->type;
+ t->all_blk_mq = live_table->all_blk_mq;
+ }
+ dm_put_live_table(t->md, srcu_idx);
+ return 0;
+ }
+
/* Non-request-stackable devices can't be used for request-based dm */
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
@@ -959,19 +966,19 @@ verify_rq_based:
}
if (q->mq_ops)
- verify_blk_mq = true;
+ mq_count++;
+ else
+ sq_count++;
}
+ if (sq_count && mq_count) {
+ DMERR("table load rejected: not all devices are blk-mq request-stackable");
+ return -EINVAL;
+ }
+ t->all_blk_mq = mq_count > 0;
- if (verify_blk_mq) {
- /* verify _all_ devices in the table are blk-mq devices */
- list_for_each_entry(dd, devices, list)
- if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
- DMERR("table load rejected: not all devices"
- " are blk-mq request-stackable");
- return -EINVAL;
- }
-
- t->all_blk_mq = true;
+ if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ DMERR("table load rejected: all devices are not blk-mq request-stackable");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0aba34a7b3b3..7335d8a3fc47 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -868,7 +868,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
if (r) {
- ti->error = "Data device lookup failed";
+ ti->error = "Hash device lookup failed";
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ef7bf1dd6900..3086da5664f3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1525,9 +1525,9 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->bdev)
goto bad;
- bio_init(&md->flush_bio);
+ bio_init(&md->flush_bio, NULL, 0);
md->flush_bio.bi_bdev = md->bdev;
- bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
dm_stats_init(&md->stats);
@@ -1886,9 +1886,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(q->queue_lock);
+ blk_set_queue_dying(q);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 86f5d435901d..5975c9915684 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "linear.h"
@@ -101,8 +102,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -123,8 +124,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
discard_supported = true;
}
if (cnt != raid_disks) {
- printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -227,22 +228,22 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
end_sector = tmp_dev->end_sector;
data_offset = tmp_dev->rdev->data_offset;
bio->bi_bdev = tmp_dev->rdev->bdev;
- if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
- bio->bi_iter.bi_sector < start_sector))
+ if (unlikely(bio_sector >= end_sector ||
+ bio_sector < start_sector))
goto out_of_bounds;
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
- split = bio_split(bio, end_sector -
- bio->bi_iter.bi_sector,
+ split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, fs_bio_set);
bio_chain(split, bio);
} else {
@@ -256,15 +257,18 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
return;
out_of_bounds:
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
mdname(mddev),
(unsigned long long)bio->bi_iter.bi_sector,
bdevname(tmp_dev->rdev->bdev, b),
@@ -275,7 +279,6 @@ out_of_bounds:
static void linear_status (struct seq_file *seq, struct mddev *mddev)
{
-
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2089d46b0eb8..82821ee0d57f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -30,6 +30,18 @@
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Errors, Warnings, etc.
+ Please use:
+ pr_crit() for error conditions that risk data loss
+ pr_err() for error conditions that are unexpected, like an IO error
+ or internal inconsistency
+ pr_warn() for error conditions that could have been predicated, like
+ adding a device to an array when it has incompatible metadata
+ pr_info() for every interesting, very rare events, like an array starting
+ or stopping, or resync starting or stopping
+ pr_debug() for everything else.
+
*/
#include <linux/kthread.h>
@@ -52,6 +64,7 @@
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
#include "md-cluster.h"
@@ -394,7 +407,7 @@ static void submit_flushes(struct work_struct *ws)
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
- bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
+ bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
@@ -684,11 +697,8 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
static int alloc_disk_sb(struct md_rdev *rdev)
{
rdev->sb_page = alloc_page(GFP_KERNEL);
- if (!rdev->sb_page) {
- printk(KERN_ALERT "md: out of memory.\n");
+ if (!rdev->sb_page)
return -ENOMEM;
- }
-
return 0;
}
@@ -715,9 +725,15 @@ static void super_written(struct bio *bio)
struct mddev *mddev = rdev->mddev;
if (bio->bi_error) {
- printk("md: super_written gets error=%d\n", bio->bi_error);
+ pr_err("md: super_written gets error=%d\n", bio->bi_error);
md_error(mddev, rdev);
- }
+ if (!test_bit(Faulty, &rdev->flags)
+ && (bio->bi_opf & MD_FAILFAST)) {
+ set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
+ set_bit(LastDev, &rdev->flags);
+ }
+ } else
+ clear_bit(LastDev, &rdev->flags);
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
@@ -734,7 +750,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* if zero is reached.
* If an error occurred, call md_error
*/
- struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ struct bio *bio;
+ int ff = 0;
+
+ if (test_bit(Faulty, &rdev->flags))
+ return;
+
+ bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
atomic_inc(&rdev->nr_pending);
@@ -743,16 +765,24 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+
+ if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
+ test_bit(FailFast, &rdev->flags) &&
+ !test_bit(LastDev, &rdev->flags))
+ ff = MD_FAILFAST;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
}
-void md_super_wait(struct mddev *mddev)
+int md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
+ return -EAGAIN;
+ return 0;
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
@@ -795,8 +825,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
fail:
- printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
- bdevname(rdev->bdev,b));
+ pr_err("md: disabled device %s, could not read superblock.\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
@@ -818,7 +848,6 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
if (!tmp1 || !tmp2) {
ret = 0;
- printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
@@ -932,7 +961,7 @@ int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
- printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
+ pr_warn("%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
@@ -956,7 +985,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
- if (ret) return ret;
+ if (ret)
+ return ret;
ret = -EINVAL;
@@ -964,17 +994,15 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
- printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
- b);
+ pr_warn("md: invalid raid superblock magic on %s\n", b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
- printk(KERN_WARNING "Bad version number %d.%d on %s\n",
- sb->major_version, sb->minor_version,
- b);
+ pr_warn("Bad version number %d.%d on %s\n",
+ sb->major_version, sb->minor_version, b);
goto abort;
}
@@ -982,8 +1010,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
- printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
- b);
+ pr_warn("md: invalid superblock checksum on %s\n", b);
goto abort;
}
@@ -1004,14 +1031,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has same UUID"
- " but different superblock to %s\n",
- b, bdevname(refdev->bdev, b2));
+ pr_warn("md: %s has same UUID but different superblock to %s\n",
+ b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
@@ -1158,6 +1184,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (desc->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
@@ -1283,6 +1311,8 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev2->flags))
+ d->state |= (1<<MD_DISK_FAILFAST);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
@@ -1324,9 +1354,10 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
- md_super_wait(rdev->mddev);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -1413,13 +1444,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
- printk("md: invalid superblock checksum on %s\n",
+ pr_warn("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
- printk("md: data_size too small on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: data_size too small on %s\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
if (sb->pad0 ||
@@ -1503,8 +1534,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
- printk(KERN_WARNING "md: %s has strangely different"
- " superblock to %s\n",
+ pr_warn("md: %s has strangely different superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
@@ -1646,8 +1676,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
case MD_DISK_ROLE_JOURNAL: /* journal device */
if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
/* journal device without journal feature */
- printk(KERN_WARNING
- "md: journal device provided without journal feature, ignoring the device\n");
+ pr_warn("md: journal device provided without journal feature, ignoring the device\n");
return -EINVAL;
}
set_bit(Journal, &rdev->flags);
@@ -1669,6 +1698,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
+ if (sb->devflags & FailFast1)
+ set_bit(FailFast, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
@@ -1707,6 +1738,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
+ if (test_bit(FailFast, &rdev->flags))
+ sb->devflags |= FailFast1;
+ else
+ sb->devflags &= ~FailFast1;
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
@@ -1863,9 +1898,10 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
- md_super_wait(rdev->mddev);
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -2004,9 +2040,9 @@ int md_integrity_register(struct mddev *mddev)
blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev));
- printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
+ pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
- printk(KERN_ERR "md: failed to create integrity pool for %s\n",
+ pr_err("md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
@@ -2034,8 +2070,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
return 0;
if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
- printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
- mdname(mddev), bdevname(rdev->bdev, name));
+ pr_err("%s: incompatible integrity profile for %s\n",
+ mdname(mddev), bdevname(rdev->bdev, name));
return -ENXIO;
}
@@ -2089,15 +2125,15 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
rcu_read_unlock();
if (!test_bit(Journal, &rdev->flags) &&
mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
- printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
- mdname(mddev), mddev->max_disks);
+ pr_warn("md: %s: array is limited to %d devices\n",
+ mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
strreplace(b, '/', '!');
rdev->mddev = mddev;
- printk(KERN_INFO "md: bind<%s>\n", b);
+ pr_debug("md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2116,8 +2152,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
return 0;
fail:
- printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
- b, mdname(mddev));
+ pr_warn("md: failed to register dev-%s for %s\n",
+ b, mdname(mddev));
return err;
}
@@ -2134,7 +2170,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
- printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
+ pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2164,8 +2200,7 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
- printk(KERN_ERR "md: could not open %s.\n",
- __bdevname(dev, b));
+ pr_warn("md: could not open %s.\n", __bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
@@ -2185,8 +2220,7 @@ static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: export_rdev(%s)\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
@@ -2288,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
if (mddev->ro) {
if (force_change)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return;
}
repeat:
if (mddev_is_clustered(mddev)) {
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
nospares = 1;
ret = md_cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
if (ret == 0)
md_cluster_ops->metadata_update_cancel(mddev);
- bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) |
- BIT(MD_CHANGE_CLEAN));
+ bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) |
+ BIT(MD_SB_CHANGE_CLEAN));
return;
}
}
@@ -2321,10 +2355,10 @@ repeat:
}
if (!mddev->persistent) {
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
- clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->external) {
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
@@ -2344,9 +2378,9 @@ repeat:
mddev->utime = ktime_get_real_seconds();
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
@@ -2402,6 +2436,9 @@ repeat:
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
+ if (mddev->queue)
+ blk_add_trace_msg(mddev->queue, "md md_update_sb");
+rewrite:
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
@@ -2433,15 +2470,16 @@ repeat:
/* only need to write one superblock... */
break;
}
- md_super_wait(mddev);
- /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
+ if (md_super_wait(mddev) < 0)
+ goto rewrite;
+ /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
if (mddev_is_clustered(mddev) && ret == 0)
md_cluster_ops->metadata_update_finish(mddev);
if (mddev->in_sync != sync_req ||
- !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
+ !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
/* have to write it out again */
goto repeat;
wake_up(&mddev->sb_wait);
@@ -2485,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -2523,51 +2561,41 @@ struct rdev_sysfs_entry {
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
- char *sep = "";
+ char *sep = ",";
size_t len = 0;
unsigned long flags = ACCESS_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
- rdev->badblocks.unacked_exist) {
- len+= sprintf(page+len, "%sfaulty",sep);
- sep = ",";
- }
- if (test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sin_sync",sep);
- sep = ",";
- }
- if (test_bit(Journal, &flags)) {
- len += sprintf(page+len, "%sjournal",sep);
- sep = ",";
- }
- if (test_bit(WriteMostly, &flags)) {
- len += sprintf(page+len, "%swrite_mostly",sep);
- sep = ",";
- }
+ (!test_bit(ExternalBbl, &flags) &&
+ rdev->badblocks.unacked_exist))
+ len += sprintf(page+len, "faulty%s", sep);
+ if (test_bit(In_sync, &flags))
+ len += sprintf(page+len, "in_sync%s", sep);
+ if (test_bit(Journal, &flags))
+ len += sprintf(page+len, "journal%s", sep);
+ if (test_bit(WriteMostly, &flags))
+ len += sprintf(page+len, "write_mostly%s", sep);
if (test_bit(Blocked, &flags) ||
(rdev->badblocks.unacked_exist
- && !test_bit(Faulty, &flags))) {
- len += sprintf(page+len, "%sblocked", sep);
- sep = ",";
- }
+ && !test_bit(Faulty, &flags)))
+ len += sprintf(page+len, "blocked%s", sep);
if (!test_bit(Faulty, &flags) &&
!test_bit(Journal, &flags) &&
- !test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sspare", sep);
- sep = ",";
- }
- if (test_bit(WriteErrorSeen, &flags)) {
- len += sprintf(page+len, "%swrite_error", sep);
- sep = ",";
- }
- if (test_bit(WantReplacement, &flags)) {
- len += sprintf(page+len, "%swant_replacement", sep);
- sep = ",";
- }
- if (test_bit(Replacement, &flags)) {
- len += sprintf(page+len, "%sreplacement", sep);
- sep = ",";
- }
+ !test_bit(In_sync, &flags))
+ len += sprintf(page+len, "spare%s", sep);
+ if (test_bit(WriteErrorSeen, &flags))
+ len += sprintf(page+len, "write_error%s", sep);
+ if (test_bit(WantReplacement, &flags))
+ len += sprintf(page+len, "want_replacement%s", sep);
+ if (test_bit(Replacement, &flags))
+ len += sprintf(page+len, "replacement%s", sep);
+ if (test_bit(ExternalBbl, &flags))
+ len += sprintf(page+len, "external_bbl%s", sep);
+ if (test_bit(FailFast, &flags))
+ len += sprintf(page+len, "failfast%s", sep);
+
+ if (len)
+ len -= strlen(sep);
return len+sprintf(page+len, "\n");
}
@@ -2587,6 +2615,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
+ * {,-}failfast - set/clear FailFast
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2610,8 +2639,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (err == 0) {
md_kick_rdev_from_array(rdev);
- if (mddev->pers)
- md_update_sb(mddev, 1);
+ if (mddev->pers) {
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
+ }
md_new_event(mddev);
}
}
@@ -2626,6 +2657,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
+ !test_bit(ExternalBbl, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
@@ -2642,6 +2674,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "failfast")) {
+ set_bit(FailFast, &rdev->flags);
+ err = 0;
+ } else if (cmd_match(buf, "-failfast")) {
+ clear_bit(FailFast, &rdev->flags);
+ err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
if (rdev->mddev->pers == NULL) {
@@ -2708,6 +2746,13 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else
err = -EBUSY;
+ } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
+ set_bit(ExternalBbl, &rdev->flags);
+ rdev->badblocks.shift = 0;
+ err = 0;
+ } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
+ clear_bit(ExternalBbl, &rdev->flags);
+ err = 0;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -3211,10 +3256,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- printk(KERN_ERR "md: could not alloc mem for new device!\n");
+ if (!rdev)
return ERR_PTR(-ENOMEM);
- }
err = md_rdev_init(rdev);
if (err)
@@ -3231,8 +3274,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
- printk(KERN_WARNING
- "md: %s has zero or unknown size, marking faulty!\n",
+ pr_warn("md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
@@ -3242,16 +3284,13 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- printk(KERN_WARNING
- "md: %s does not have a valid v%d.%d "
- "superblock, not importing!\n",
+ pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
bdevname(rdev->bdev,b),
- super_format, super_minor);
+ super_format, super_minor);
goto abort_free;
}
if (err < 0) {
- printk(KERN_WARNING
- "md: could not read %s's sb, not importing!\n",
+ pr_warn("md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
@@ -3287,9 +3326,7 @@ static void analyze_sbs(struct mddev *mddev)
case 0:
break;
default:
- printk( KERN_ERR \
- "md: fatal superblock inconsistency in %s"
- " -- removing from array\n",
+ pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
}
@@ -3302,18 +3339,16 @@ static void analyze_sbs(struct mddev *mddev)
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
- printk(KERN_WARNING
- "md: %s: %s: only %d devices permitted\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mddev->max_disks);
+ pr_warn("md: %s: %s: only %d devices permitted\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
- printk(KERN_WARNING "md: kicking non-fresh %s"
- " from array!\n",
+ pr_warn("md: kicking non-fresh %s from array!\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
continue;
@@ -3384,7 +3419,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
unsigned long msec;
if (mddev_is_clustered(mddev)) {
- pr_info("md: Safemode is disabled for clustered mode\n");
+ pr_warn("md: Safemode is disabled for clustered mode\n");
return -EINVAL;
}
@@ -3472,8 +3507,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EINVAL;
if (!mddev->pers->quiesce) {
- printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
- mdname(mddev), mddev->pers->name);
+ pr_warn("md: %s: %s does not support online personality change\n",
+ mdname(mddev), mddev->pers->name);
goto out_unlock;
}
@@ -3491,7 +3526,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
+ pr_warn("md: personality %s not loaded\n", clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3505,8 +3540,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
if (!pers->takeover) {
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s does not support personality takeover\n",
+ mdname(mddev), clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3526,8 +3561,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s would not accept array\n",
+ mdname(mddev), clevel);
rv = PTR_ERR(priv);
goto out_unlock;
}
@@ -3570,9 +3605,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (oldpers->sync_request != NULL &&
@@ -3603,9 +3637,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING "md: cannot register rd%d"
- " for %s after level change\n",
- rdev->raid_disk, mdname(mddev));
+ pr_warn("md: cannot register rd%d for %s after level change\n",
+ rdev->raid_disk, mdname(mddev));
}
}
@@ -3618,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
@@ -3813,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
if (!err) {
mddev->recovery_cp = n;
if (mddev->pers)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
mddev_unlock(mddev);
return err ?: len;
@@ -3887,7 +3920,7 @@ array_state_show(struct mddev *mddev, char *page)
st = read_auto;
break;
case 0:
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
st = write_pending;
else if (mddev->in_sync)
st = clean;
@@ -3925,7 +3958,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
spin_lock(&mddev->lock);
if (st == active) {
restart_array(mddev);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
wake_up(&mddev->sb_wait);
err = 0;
} else /* st == clean */ {
@@ -3935,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4001,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4015,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = restart_array(mddev);
if (err)
break;
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -5071,13 +5105,13 @@ static int md_alloc(dev_t dev, char *name)
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
- printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
- disk->disk_name);
+ pr_debug("md: cannot register %s/md - name in use\n",
+ disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
- printk(KERN_DEBUG "pointless warning\n");
+ pr_debug("pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
@@ -5179,15 +5213,15 @@ int md_run(struct mddev *mddev)
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
- printk("md: %s: data overlaps metadata\n",
- mdname(mddev));
+ pr_warn("md: %s: data overlaps metadata\n",
+ mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
- printk("md: %s: metadata overlaps data\n",
- mdname(mddev));
+ pr_warn("md: %s: metadata overlaps data\n",
+ mdname(mddev));
return -EINVAL;
}
}
@@ -5202,11 +5236,11 @@ int md_run(struct mddev *mddev)
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
- printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
- mddev->level);
+ pr_warn("md: personality for level %d is not loaded!\n",
+ mddev->level);
else
- printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
- mddev->clevel);
+ pr_warn("md: personality for level %s is not loaded!\n",
+ mddev->clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -5236,21 +5270,16 @@ int md_run(struct mddev *mddev)
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
- printk(KERN_WARNING
- "%s: WARNING: %s appears to be"
- " on the same physical disk as"
- " %s.\n",
- mdname(mddev),
- bdevname(rdev->bdev,b),
- bdevname(rdev2->bdev,b2));
+ pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev,b),
+ bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
- printk(KERN_WARNING
- "True protection against single-disk"
- " failure might be compromised.\n");
+ pr_warn("True protection against single-disk failure might be compromised.\n");
}
mddev->recovery = 0;
@@ -5264,14 +5293,14 @@ int md_run(struct mddev *mddev)
err = pers->run(mddev);
if (err)
- printk(KERN_ERR "md: pers->run() failed ...\n");
+ pr_warn("md: pers->run() failed ...\n");
else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
- WARN_ONCE(!mddev->external_size, "%s: default size too small,"
- " but 'external_size' not in effect?\n", __func__);
- printk(KERN_ERR
- "md: invalid array_size %llu > default size %llu\n",
- (unsigned long long)mddev->array_sectors / 2,
- (unsigned long long)pers->size(mddev, 0, 0) / 2);
+ WARN_ONCE(!mddev->external_size,
+ "%s: default size too small, but 'external_size' not in effect?\n",
+ __func__);
+ pr_warn("md: invalid array_size %llu > default size %llu\n",
+ (unsigned long long)mddev->array_sectors / 2,
+ (unsigned long long)pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
}
if (err == 0 && pers->sync_request &&
@@ -5281,8 +5310,8 @@ int md_run(struct mddev *mddev)
bitmap = bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap);
- printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
- mdname(mddev), err);
+ pr_warn("%s: failed to create bitmap (%d)\n",
+ mdname(mddev), err);
} else
mddev->bitmap = bitmap;
@@ -5318,9 +5347,8 @@ int md_run(struct mddev *mddev)
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -5350,7 +5378,7 @@ int md_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
md_new_event(mddev);
@@ -5421,8 +5449,7 @@ static int restart_array(struct mddev *mddev)
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
- printk(KERN_INFO "md: %s switched to read-write mode.\n",
- mdname(mddev));
+ pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -5446,6 +5473,7 @@ static void md_clean(struct mddev *mddev)
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
+ mddev->sb_flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
@@ -5490,12 +5518,15 @@ static void __md_stop_writes(struct mddev *mddev)
del_timer_sync(&mddev->safemode_timer);
+ if (mddev->pers && mddev->pers->quiesce) {
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
bitmap_flush(mddev);
- md_super_wait(mddev);
if (mddev->ro == 0 &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
- (mddev->flags & MD_UPDATE_SB_FLAGS))) {
+ mddev->sb_flags)) {
/* mark array as shutdown cleanly */
if (!mddev_is_clustered(mddev))
mddev->in_sync = 1;
@@ -5516,8 +5547,8 @@ static void mddev_detach(struct mddev *mddev)
struct bitmap *bitmap = mddev->bitmap;
/* wait for behind writes to complete */
if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
- mdname(mddev));
+ pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
@@ -5578,20 +5609,20 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
- if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5653,7 +5684,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
mddev->sysfs_active ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -5690,7 +5721,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
* Free resources if final stop
*/
if (mode == 0) {
- printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+ pr_info("md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
@@ -5722,17 +5753,17 @@ static void autorun_array(struct mddev *mddev)
if (list_empty(&mddev->disks))
return;
- printk(KERN_INFO "md: running: ");
+ pr_info("md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
- printk("<%s>", bdevname(rdev->bdev,b));
+ pr_cont("<%s>", bdevname(rdev->bdev,b));
}
- printk("\n");
+ pr_cont("\n");
err = do_md_run(mddev);
if (err) {
- printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+ pr_warn("md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, NULL);
}
}
@@ -5755,7 +5786,7 @@ static void autorun_devices(int part)
struct mddev *mddev;
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: autorun ...\n");
+ pr_info("md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
@@ -5763,13 +5794,12 @@ static void autorun_devices(int part)
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
- printk(KERN_INFO "md: considering %s ...\n",
- bdevname(rdev0->bdev,b));
+ pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
- printk(KERN_INFO "md: adding %s ...\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: adding %s ...\n",
+ bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
@@ -5786,8 +5816,8 @@ static void autorun_devices(int part)
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
- printk(KERN_INFO "md: unit number in %s is bad: %d\n",
- bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+ pr_warn("md: unit number in %s is bad: %d\n",
+ bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
@@ -5796,21 +5826,17 @@ static void autorun_devices(int part)
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
- printk(KERN_ERR
- "md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
- printk(KERN_WARNING "md: %s locked, cannot run\n",
- mdname(mddev));
+ pr_warn("md: %s locked, cannot run\n", mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: %s already running, cannot run %s\n",
+ pr_warn("md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
- printk(KERN_INFO "md: created %s\n", mdname(mddev));
+ pr_debug("md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
@@ -5829,7 +5855,7 @@ static void autorun_devices(int part)
}
mddev_put(mddev);
}
- printk(KERN_INFO "md: ... autorun DONE.\n");
+ pr_info("md: ... autorun DONE.\n");
}
#endif /* !MODULE */
@@ -5964,6 +5990,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
info.state |= (1<<MD_DISK_JOURNAL);
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev->flags))
+ info.state |= (1<<MD_DISK_FAILFAST);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
@@ -5985,8 +6013,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev_is_clustered(mddev) &&
!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
- pr_err("%s: Cannot add to clustered mddev.\n",
- mdname(mddev));
+ pr_warn("%s: Cannot add to clustered mddev.\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -5998,8 +6026,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6010,8 +6037,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
- printk(KERN_WARNING
- "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
@@ -6032,9 +6058,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
- mdname(mddev));
+ pr_warn("%s: personality does not support diskops!\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
@@ -6043,8 +6068,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6075,6 +6099,10 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
+ else
+ clear_bit(FailFast, &rdev->flags);
if (info->state & (1<<MD_DISK_JOURNAL)) {
struct md_rdev *rdev2;
@@ -6140,8 +6168,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
- mdname(mddev));
+ pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
return -EINVAL;
}
@@ -6149,8 +6176,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6166,9 +6192,11 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
if (!mddev->persistent) {
- printk(KERN_INFO "md: nonpersistent superblock ...\n");
+ pr_debug("md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
@@ -6207,13 +6235,17 @@ kick_rdev:
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (mddev->thread)
+ md_wakeup_thread(mddev->thread);
+ else
+ md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
busy:
- printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_debug("md: cannot remove active disk %s from %s ...\n",
+ bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
@@ -6227,22 +6259,19 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
return -ENODEV;
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: HOT_ADD may only be used with"
- " version-0 superblocks.\n",
+ pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
+ pr_warn("%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
@@ -6255,8 +6284,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
- printk(KERN_WARNING
- "md: can not hot-add faulty %s disk to %s!\n",
+ pr_warn("md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
@@ -6276,7 +6304,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->raid_disk = -1;
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (!mddev->thread)
+ md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -6312,23 +6342,23 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
f = fget(fd);
if (f == NULL) {
- printk(KERN_ERR "%s: error: failed to get bitmap file\n",
- mdname(mddev));
+ pr_warn("%s: error: failed to get bitmap file\n",
+ mdname(mddev));
return -EBADF;
}
inode = f->f_mapping->host;
if (!S_ISREG(inode->i_mode)) {
- printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must be a regular file\n",
+ mdname(mddev));
err = -EBADF;
} else if (!(f->f_mode & FMODE_WRITE)) {
- printk(KERN_ERR "%s: error: bitmap file must open for write\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must open for write\n",
+ mdname(mddev));
err = -EBADF;
} else if (atomic_read(&inode->i_writecount) != 1) {
- printk(KERN_ERR "%s: error: bitmap file is already in use\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file is already in use\n",
+ mdname(mddev));
err = -EBUSY;
}
if (err) {
@@ -6393,8 +6423,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
- printk(KERN_INFO
- "md: superblock version %d not known\n",
+ pr_warn("md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
@@ -6432,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->max_disks = MD_SB_DISKS;
- if (mddev->persistent)
+ if (mddev->persistent) {
mddev->flags = 0;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev->sb_flags = 0;
+ }
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
@@ -6660,8 +6691,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
if (mddev->bitmap_info.nodes) {
/* hold PW on all the bitmap lock */
if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
- printk("md: can't change bitmap to none since the"
- " array is in use by more than one node\n");
+ pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
rv = -EPERM;
md_cluster_ops->unlock_all_bitmaps(mddev);
goto err;
@@ -6829,7 +6859,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
!test_bit(MD_RECOVERY_NEEDED,
- &mddev->flags),
+ &mddev->recovery),
msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
@@ -6847,9 +6877,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
err = mddev_lock(mddev);
if (err) {
- printk(KERN_INFO
- "md: ioctl lock interrupted, reason %d, cmd %d\n",
- err, cmd);
+ pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
+ err, cmd);
goto out;
}
@@ -6864,30 +6893,24 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't update"
- " array info. %d\n", err);
+ pr_warn("md: couldn't update array info. %d\n", err);
goto unlock;
}
goto unlock;
}
if (!list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: array %s already has disks!\n",
- mdname(mddev));
+ pr_warn("md: array %s already has disks!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
if (mddev->raid_disks) {
- printk(KERN_WARNING
- "md: array %s already initialised!\n",
- mdname(mddev));
+ pr_warn("md: array %s already initialised!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
err = set_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't set"
- " array info. %d\n", err);
+ pr_warn("md: couldn't set array info. %d\n", err);
goto unlock;
}
goto unlock;
@@ -6987,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
- if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+ if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
}
} else {
@@ -7092,7 +7115,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
if (test_bit(MD_CLOSING, &mddev->flags)) {
mutex_unlock(&mddev->open_mutex);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
err = 0;
@@ -7101,6 +7125,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_change(bdev);
out:
+ if (err)
+ mddev_put(mddev);
return err;
}
@@ -7171,10 +7197,12 @@ static int md_thread(void *arg)
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
- || kthread_should_stop(),
+ || kthread_should_stop() || kthread_should_park(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
+ if (kthread_should_park())
+ kthread_parkme();
if (!kthread_should_stop())
thread->run(thread);
}
@@ -7588,8 +7616,8 @@ static const struct file_operations md_seq_fops = {
int register_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality registered for level %d\n",
- p->name, p->level);
+ pr_debug("md: %s personality registered for level %d\n",
+ p->name, p->level);
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
spin_unlock(&pers_lock);
@@ -7599,7 +7627,7 @@ EXPORT_SYMBOL(register_md_personality);
int unregister_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality unregistered\n", p->name);
+ pr_debug("md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
@@ -7639,7 +7667,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
spin_lock(&pers_lock);
/* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
- pr_err("can't find md-cluster module or get it's reference.\n");
+ pr_warn("can't find md-cluster module or get it's reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
@@ -7741,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
@@ -7751,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
}
EXPORT_SYMBOL(md_write_start);
@@ -7772,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end);
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
- * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
+ * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
@@ -7787,8 +7815,8 @@ int md_allow_write(struct mddev *mddev)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
@@ -7798,7 +7826,7 @@ int md_allow_write(struct mddev *mddev)
} else
spin_unlock(&mddev->lock);
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EAGAIN;
else
return 0;
@@ -7914,11 +7942,9 @@ void md_do_sync(struct md_thread *thread)
mddev2->curr_resync >= mddev->curr_resync) {
if (mddev2_minor != mddev2->md_minor) {
mddev2_minor = mddev2->md_minor;
- printk(KERN_INFO "md: delaying %s of %s"
- " until %s has finished (they"
- " share one or more physical units)\n",
- desc, mdname(mddev),
- mdname(mddev2));
+ pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
+ desc, mdname(mddev),
+ mdname(mddev2));
}
mddev_put(mddev2);
if (signal_pending(current))
@@ -7975,12 +8001,10 @@ void md_do_sync(struct md_thread *thread)
}
}
- printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
- printk(KERN_INFO "md: minimum _guaranteed_ speed:"
- " %d KB/sec/disk.\n", speed_min(mddev));
- printk(KERN_INFO "md: using maximum available idle IO bandwidth "
- "(but not more than %d KB/sec) for %s.\n",
- speed_max(mddev), desc);
+ pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
+ pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
+ pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
+ speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
@@ -7997,16 +8021,15 @@ void md_do_sync(struct md_thread *thread)
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
- printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
- window/2, (unsigned long long)max_sectors/2);
+ pr_debug("md: using %dk window, over a total of %lluk.\n",
+ window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
- printk(KERN_INFO
- "md: resuming %s of %s from checkpoint.\n",
- desc, mdname(mddev));
+ pr_debug("md: resuming %s of %s from checkpoint.\n",
+ desc, mdname(mddev));
mddev->curr_resync = j;
} else
mddev->curr_resync = 3; /* no longer delayed */
@@ -8038,7 +8061,7 @@ void md_do_sync(struct md_thread *thread)
j > mddev->recovery_cp)
mddev->recovery_cp = j;
update_time = jiffies;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -8133,9 +8156,9 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
- test_bit(MD_RECOVERY_INTR, &mddev->recovery)
- ? "interrupted" : "done");
+ pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+ ? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
@@ -8155,9 +8178,8 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
- printk(KERN_INFO
- "md: checkpointing %s of %s.\n",
- desc, mdname(mddev));
+ pr_debug("md: checkpointing %s of %s.\n",
+ desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
mddev->recovery_cp =
@@ -8187,8 +8209,8 @@ void md_do_sync(struct md_thread *thread)
/* set CHANGE_PENDING here since maybe another update is needed,
* so other nodes are informed. It should be harmless for normal
* raid */
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8288,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev,
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
no_add:
if (removed)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return spares;
}
@@ -8305,8 +8327,8 @@ static void md_start_sync(struct work_struct *ws)
mddev,
"resync");
if (!mddev->sync_thread) {
- printk(KERN_ERR "%s: could not start resync thread...\n",
- mdname(mddev));
+ pr_warn("%s: could not start resync thread...\n",
+ mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8356,8 +8378,8 @@ void md_check_recovery(struct mddev *mddev)
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
- printk(KERN_INFO "md: %s in immediate safe mode\n",
- mdname(mddev));
+ pr_debug("md: %s in immediate safe mode\n",
+ mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
@@ -8366,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+ (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
test_bit(MD_RELOAD_SB, &mddev->flags) ||
@@ -8404,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev)
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
goto unlock;
}
@@ -8432,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
@@ -8441,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -8537,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev->pers->spare_active(mddev)) {
sysfs_notify(&mddev->kobj, NULL,
"degraded");
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
@@ -8552,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
- /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+ /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
* clustered raid */
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
@@ -8614,9 +8636,12 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
if (rv == 0) {
/* Make sure they get written out promptly */
+ if (test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL,
+ "unacknowledged_bad_blocks");
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(rdev->mddev->thread);
return 1;
} else
@@ -8627,12 +8652,15 @@ EXPORT_SYMBOL_GPL(rdev_set_badblocks);
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
+ int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- return badblocks_clear(&rdev->badblocks,
- s, sectors);
+ rv = badblocks_clear(&rdev->badblocks, s, sectors);
+ if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+ return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
@@ -8749,7 +8777,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
- bdevname(rdev2->bdev,b));
+ bdevname(rdev2->bdev,b));
/* wakeup mddev->thread here, so array could
* perform resync with the new activated disk */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -8785,15 +8813,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
* variable in case we err in the future
*/
rdev->sb_page = NULL;
- alloc_disk_sb(rdev);
- ClearPageUptodate(rdev->sb_page);
- rdev->sb_loaded = 0;
- err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
-
+ err = alloc_disk_sb(rdev);
+ if (err == 0) {
+ ClearPageUptodate(rdev->sb_page);
+ rdev->sb_loaded = 0;
+ err = super_types[mddev->major_version].
+ load_super(rdev, NULL, mddev->minor_version);
+ }
if (err < 0) {
pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
__func__, __LINE__, rdev->desc_nr, err);
- put_page(rdev->sb_page);
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
rdev->sb_page = swapout;
rdev->sb_loaded = 1;
return err;
@@ -8871,9 +8902,6 @@ void md_autodetect_dev(dev_t dev)
mutex_lock(&detected_devices_mutex);
list_add_tail(&node_detected_dev->list, &all_detected_devices);
mutex_unlock(&detected_devices_mutex);
- } else {
- printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
- ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
@@ -8887,7 +8915,7 @@ static void autostart_arrays(int part)
i_scanned = 0;
i_passed = 0;
- printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+ pr_info("md: Autodetecting RAID arrays.\n");
mutex_lock(&detected_devices_mutex);
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
@@ -8912,8 +8940,7 @@ static void autostart_arrays(int part)
}
mutex_unlock(&detected_devices_mutex);
- printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
- i_scanned, i_passed);
+ pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
autorun_devices(part);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2b2041773e79..e38936d05df1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -30,6 +30,16 @@
#define MaxSector (~(sector_t)0)
/*
+ * These flags should really be called "NO_RETRY" rather than
+ * "FAILFAST" because they don't make any promise about time lapse,
+ * only about the number of retries, which will be zero.
+ * REQ_FAILFAST_DRIVER is not included because
+ * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
+ * seems to suggest that the errors it avoids retrying should usually
+ * be retried.
+ */
+#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+/*
* MD's 'extended' device
*/
struct md_rdev {
@@ -168,6 +178,19 @@ enum flag_bits {
* so it is safe to remove without
* another synchronize_rcu() call.
*/
+ ExternalBbl, /* External metadata provides bad
+ * block management for a disk
+ */
+ FailFast, /* Minimal retries should be attempted on
+ * this device, so use REQ_FAILFAST_DEV.
+ * Also don't try to repair failed reads.
+ * It is expects that no bad block log
+ * is present.
+ */
+ LastDev, /* Seems to be the last working dev as
+ * it didn't fail, so don't use FailFast
+ * any more for metadata
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -189,6 +212,31 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
+enum mddev_flags {
+ MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
+ MD_CLOSING, /* If set, we are closing the array, do not open
+ * it then */
+ MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
+ MD_HAS_JOURNAL, /* The raid array has journal feature set */
+ MD_RELOAD_SB, /* Reload the superblock because another node
+ * updated it.
+ */
+ MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
+ * already took resync lock, need to
+ * release the lock */
+ MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
+ * supported as calls to md_error() will
+ * never cause the array to become failed.
+ */
+};
+
+enum mddev_sb_flags {
+ MD_SB_CHANGE_DEVS, /* Some device status has changed */
+ MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
+ MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
+ MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -196,21 +244,7 @@ struct mddev {
int md_minor;
struct list_head disks;
unsigned long flags;
-#define MD_CHANGE_DEVS 0 /* Some device status has changed */
-#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
-#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
-#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
-#define MD_CLOSING 4 /* If set, we are closing the array, do not open
- * it then */
-#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
-#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
-#define MD_RELOAD_SB 7 /* Reload the superblock because another node
- * updated it.
- */
-#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node
- * already took resync lock, need to
- * release the lock */
+ unsigned long sb_flags;
int suspended;
atomic_t active_io;
@@ -304,31 +338,6 @@ struct mddev {
int parallel_resync;
int ok_start_degraded;
- /* recovery/resync flags
- * NEEDED: we might need to start a resync/recover
- * RUNNING: a thread is running, or about to be started
- * SYNC: actually doing a resync, not a recovery
- * RECOVER: doing recovery, or need to try it.
- * INTR: resync needs to be aborted for some reason
- * DONE: thread is done and is waiting to be reaped
- * REQUEST: user-space has requested a sync (used with SYNC)
- * CHECK: user-space request for check-only, no repair
- * RESHAPE: A reshape is happening
- * ERROR: sync-action interrupted because io-error
- *
- * If neither SYNC or RESHAPE are set, then it is a recovery.
- */
-#define MD_RECOVERY_RUNNING 0
-#define MD_RECOVERY_SYNC 1
-#define MD_RECOVERY_RECOVER 2
-#define MD_RECOVERY_INTR 3
-#define MD_RECOVERY_DONE 4
-#define MD_RECOVERY_NEEDED 5
-#define MD_RECOVERY_REQUESTED 6
-#define MD_RECOVERY_CHECK 7
-#define MD_RECOVERY_RESHAPE 8
-#define MD_RECOVERY_FROZEN 9
-#define MD_RECOVERY_ERROR 10
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
@@ -442,6 +451,23 @@ struct mddev {
unsigned int good_device_nr; /* good device num within cluster raid */
};
+enum recovery_flags {
+ /*
+ * If neither SYNC or RESHAPE are set, then it is a recovery.
+ */
+ MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
+ MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
+ MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
+ MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
+ MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
+ MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
+ MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
+ MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
+ MD_RECOVERY_RESHAPE, /* A reshape is happening */
+ MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
+ MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+};
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -623,7 +649,7 @@ extern int mddev_congested(struct mddev *mddev, int bits);
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
-extern void md_super_wait(struct mddev *mddev);
+extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 673efbd6fc47..aa8c4e5c1ee2 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -52,7 +52,7 @@ static int multipath_map (struct mpconf *conf)
}
rcu_read_unlock();
- printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
+ pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
return (-1);
}
@@ -97,9 +97,9 @@ static void multipath_end_request(struct bio *bio)
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
- printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_info("multipath: %s: rescheduling sector %llu\n",
+ bdevname(rdev->bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, bio->bi_error);
@@ -130,7 +130,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio);
+ bio_init(&mp_bh->bio, NULL, 0);
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
@@ -194,8 +194,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device
* which has just failed.
*/
- printk(KERN_ALERT
- "multipath: only one IO path left and IO error.\n");
+ pr_warn("multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
return;
}
@@ -209,11 +208,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
set_bit(Faulty, &rdev->flags);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "multipath: IO failure on %s,"
- " disabling IO path.\n"
- "multipath: Operation continuing"
- " on %d IO paths.\n",
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ pr_err("multipath: IO failure on %s, disabling IO path.\n"
+ "multipath: Operation continuing on %d IO paths.\n",
bdevname(rdev->bdev, b),
conf->raid_disks - mddev->degraded);
}
@@ -223,21 +220,21 @@ static void print_multipath_conf (struct mpconf *conf)
int i;
struct multipath_info *tmp;
- printk("MULTIPATH conf printout:\n");
+ pr_debug("MULTIPATH conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i;
if (tmp->rdev)
- printk(" disk%d, o:%d, dev:%s\n",
- i,!test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ pr_debug(" disk%d, o:%d, dev:%s\n",
+ i,!test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev,b));
}
}
@@ -292,8 +289,7 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
- printk(KERN_ERR "hot-remove-disk, slot %d is identified"
- " but is still operational!\n", number);
+ pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
err = -EBUSY;
goto abort;
}
@@ -346,16 +342,14 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
- printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
- " error for block %llu\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
- printk(KERN_ERR "multipath: %s: redirecting sector %llu"
- " to another IO path\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
@@ -389,8 +383,8 @@ static int multipath_run (struct mddev *mddev)
return -EINVAL;
if (mddev->level != LEVEL_MULTIPATH) {
- printk("multipath: %s: raid level not set to multipath IO (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
+ mdname(mddev), mddev->level);
goto out;
}
/*
@@ -401,21 +395,13 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
- if (!conf->multipaths) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf->multipaths)
goto out_free_conf;
- }
working_disks = 0;
rdev_for_each(rdev, mddev) {
@@ -439,7 +425,7 @@ static int multipath_run (struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list);
if (!working_disks) {
- printk(KERN_ERR "multipath: no operational IO paths for %s\n",
+ pr_warn("multipath: no operational IO paths for %s\n",
mdname(mddev));
goto out_free_conf;
}
@@ -447,27 +433,17 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
- if (conf->pool == NULL) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (conf->pool == NULL)
goto out_free_conf;
- }
- {
- mddev->thread = md_register_thread(multipathd, mddev,
- "multipath");
- if (!mddev->thread) {
- printk(KERN_ERR "multipath: couldn't allocate thread"
- " for %s\n", mdname(mddev));
- goto out_free_conf;
- }
- }
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
+ if (!mddev->thread)
+ goto out_free_conf;
- printk(KERN_INFO
- "multipath: array %s active with %d out of %d IO paths\n",
+ pr_info("multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ mddev->raid_disks);
/*
* Ok, everything is just fine now
*/
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index e83047cbb2da..7938cd21fa4c 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -700,13 +700,11 @@ static int populate_ablock_with_values(struct dm_array_info *info, struct array_
{
int r;
unsigned i;
- uint32_t nr_entries;
struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(le32_to_cpu(ab->nr_entries));
BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
- nr_entries = le32_to_cpu(ab->nr_entries);
for (i = 0; i < new_nr; i++) {
r = fn(base + i, element_at(info, ab, i), context);
if (r)
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 1e33dd51c21f..a6dde7cab458 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -18,6 +18,8 @@
/*----------------------------------------------------------------*/
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+
/*
* This is a read/write semaphore with a couple of differences.
*
@@ -302,6 +304,18 @@ static void report_recursive_bug(dm_block_t b, int r)
(unsigned long long) b);
}
+#else /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
+#define bl_init(x) do { } while (0)
+#define bl_down_read(x) 0
+#define bl_down_read_nonblock(x) 0
+#define bl_up_read(x) do { } while (0)
+#define bl_down_write(x) 0
+#define bl_up_write(x) do { } while (0)
+#define report_recursive_bug(x, y) do { } while (0)
+
+#endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
/*----------------------------------------------------------------*/
/*
@@ -330,8 +344,11 @@ EXPORT_SYMBOL_GPL(dm_block_data);
struct buffer_aux {
struct dm_block_validator *validator;
- struct block_lock lock;
int write_locked;
+
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+ struct block_lock lock;
+#endif
};
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 306d2e4502c4..4c28608a0c94 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -464,7 +464,8 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
ll->nr_allocated--;
le32_add_cpu(&ie_disk.nr_free, 1);
ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
- }
+ } else
+ *ev = SM_NONE;
return ll->save_ie(ll, index, &ie_disk);
}
@@ -547,7 +548,6 @@ static int metadata_ll_init_index(struct ll_disk *ll)
if (r < 0)
return r;
- memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
dm_tm_unlock(ll->tm, b);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 7e44005595c1..20557e2c60c6 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -775,17 +775,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
r = sm_ll_new_metadata(&smm->ll, tm);
+ if (!r) {
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
+ r = sm_ll_extend(&smm->ll, nr_blocks);
+ }
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
if (r)
return r;
- if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
- nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
- r = sm_ll_extend(&smm->ll, nr_blocks);
- if (r)
- return r;
-
- memcpy(&smm->sm, &ops, sizeof(smm->sm));
-
/*
* Now we need to update the newly created data structures with the
* allocated blocks that they were built from.
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 258986a2699d..a162fedeb51a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid0.h"
#include "raid5.h"
@@ -51,20 +52,21 @@ static void dump_zones(struct mddev *mddev)
char b[BDEVNAME_SIZE];
struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev;
- printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
- mdname(mddev),
- conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
+ pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
+ mdname(mddev),
+ conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
for (j = 0; j < conf->nr_strip_zones; j++) {
- printk(KERN_INFO "md: zone%d=[", j);
+ char line[200];
+ int len = 0;
+
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk(KERN_CONT "%s%s", k?"/":"",
- bdevname(conf->devlist[j*raid_disks
- + k]->bdev, b));
- printk(KERN_CONT "]\n");
+ len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
+ bdevname(conf->devlist[j*raid_disks
+ + k]->bdev, b));
+ pr_debug("md: zone%d=[%s]\n", j, line);
zone_size = conf->strip_zone[j].zone_end - zone_start;
- printk(KERN_INFO " zone-offset=%10lluKB, "
- "device-offset=%10lluKB, size=%10lluKB\n",
+ pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
(unsigned long long)zone_start>>1,
(unsigned long long)conf->strip_zone[j].dev_start>>1,
(unsigned long long)zone_size>>1);
@@ -142,9 +144,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % blksize) {
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
- mdname(mddev),
- mddev->chunk_sectors << 9, blksize);
+ pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9, blksize);
err = -EINVAL;
goto abort;
}
@@ -186,19 +188,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
if (j < 0) {
- printk(KERN_ERR
- "md/raid0:%s: remove inactive devices before converting to RAID0\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
+ mdname(mddev));
goto abort;
}
if (j >= mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -208,8 +209,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
- "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
+ pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
+ mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -357,8 +358,7 @@ static int raid0_run(struct mddev *mddev)
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
- mdname(mddev),
- (unsigned long long)mddev->array_sectors);
+ pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
if (mddev->queue) {
/* calculate the max read-ahead size.
@@ -464,7 +464,8 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- sector_t sector = bio->bi_iter.bi_sector;
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ sector_t sector = bio_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects -
@@ -473,7 +474,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects));
/* Restore due to sector_div */
- sector = bio->bi_iter.bi_sector;
+ sector = bio_sector;
if (sectors < bio_sectors(bio)) {
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
@@ -492,8 +493,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
}
@@ -509,17 +515,17 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
struct r0conf *priv_conf;
if (mddev->degraded != 1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
- mdname(mddev),
- mddev->degraded);
+ pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
return ERR_PTR(-EINVAL);
}
rdev_for_each(rdev, mddev) {
/* check slot number for a disk */
if (rdev->raid_disk == mddev->raid_disks-1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
rdev->sectors = mddev->dev_sectors;
@@ -533,8 +539,11 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
+
return priv_conf;
}
@@ -549,19 +558,19 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
* - all mirrors must be already degraded
*/
if (mddev->layout != ((1 << 8) + 2)) {
- printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
- mdname(mddev),
- mddev->layout);
+ pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
return ERR_PTR(-EINVAL);
}
if (mddev->raid_disks & 1) {
- printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
if (mddev->degraded != (mddev->raid_disks>>1)) {
- printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -574,6 +583,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -588,7 +598,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
* - (N - 1) mirror drives must be already faulty
*/
if ((mddev->raid_disks - 1) != mddev->degraded) {
- printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+ pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -616,6 +626,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -631,8 +642,8 @@ static void *raid0_takeover(struct mddev *mddev)
*/
if (mddev->bitmap) {
- printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
- mdname(mddev));
+ pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
+ mdname(mddev));
return ERR_PTR(-EBUSY);
}
if (mddev->level == 4)
@@ -642,8 +653,8 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->layout == ALGORITHM_PARITY_N)
return raid0_takeover_raid45(mddev);
- printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
- mdname(mddev), ALGORITHM_PARITY_N);
+ pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
}
if (mddev->level == 10)
@@ -652,7 +663,7 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->level == 1)
return raid0_takeover_raid1(mddev);
- printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+ pr_warn("Takeover from raid%i to raid0 not supported\n",
mddev->level);
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 29e2df5cd77b..a1f3fbed9100 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid1.h"
#include "bitmap.h"
@@ -70,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
sector_t bi_sector);
static void lower_barrier(struct r1conf *conf);
+#define raid1_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
+
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
@@ -325,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)
if (uptodate)
set_bit(R1BIO_Uptodate, &r1_bio->state);
+ else if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ /* This was a fail-fast read so we definitely
+ * want to retry */
+ ;
else {
/* If all other devices have failed, we want to return
* the error upwards rather than fail the last device.
@@ -347,13 +356,10 @@ static void raid1_end_read_request(struct bio *bio)
* oops, read error:
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(
- KERN_ERR "md/raid1:%s: %s: "
- "rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(rdev->bdev,
- b),
- (unsigned long long)r1_bio->sector);
+ pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
/* don't drop the reference on read_disk yet */
@@ -416,7 +422,24 @@ static void raid1_end_write_request(struct bio *bio)
set_bit(MD_RECOVERY_NEEDED, &
conf->mddev->recovery);
- set_bit(R1BIO_WriteError, &r1_bio->state);
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST) &&
+ /* We never try FailFast to WriteMostly devices */
+ !test_bit(WriteMostly, &rdev->flags)) {
+ md_error(r1_bio->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+ }
+ } else
+ set_bit(R1BIO_WriteError, &r1_bio->state);
} else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
@@ -534,6 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = 0;
has_nonrot_disk = 0;
choose_next_idle = 0;
+ clear_bit(R1BIO_FailFast, &r1_bio->state);
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
@@ -607,6 +631,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} else
best_good_sectors = sectors;
+ if (best_disk >= 0)
+ /* At least two disks to choose from so failfast is OK */
+ set_bit(R1BIO_FailFast, &r1_bio->state);
+
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
@@ -645,11 +673,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
}
break;
}
- /* If device is idle, use it */
- if (pending == 0) {
- best_disk = disk;
- break;
- }
if (choose_next_idle)
continue;
@@ -672,7 +695,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
* mixed ratation/non-rotational disks depending on workload.
*/
if (best_disk == -1) {
- if (has_nonrot_disk)
+ if (has_nonrot_disk || min_pending == 0)
best_disk = best_pending_disk;
else
best_disk = best_dist_disk;
@@ -745,9 +768,14 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -832,7 +860,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if (conf->barrier && bio_data_dir(bio) == WRITE) {
if ((conf->mddev->curr_resync_completed
>= bio_end_sector(bio)) ||
- (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ (conf->start_next_window + NEXT_NORMALIO_DISTANCE
<= bio->bi_iter.bi_sector))
wait = false;
else
@@ -858,6 +886,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* that queue to allow conf->start_next_window
* to increase.
*/
+ raid1_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
(!conf->barrier ||
@@ -937,6 +966,7 @@ static void freeze_array(struct r1conf *conf, int extra)
*/
spin_lock_irq(&conf->resync_lock);
conf->array_frozen = 1;
+ raid1_log(conf->mddev, "wait freeze");
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -1019,9 +1049,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1136,6 +1171,7 @@ read_again:
* take care not to over-take any writes
* that are 'behind'
*/
+ raid1_log(mddev, "wait behind writes");
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
@@ -1153,8 +1189,16 @@ read_again:
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+
if (max_sectors < r1_bio->sectors) {
/* could not read all from this device, so we will
* need another r1_bio.
@@ -1195,6 +1239,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid1_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1286,6 +1331,7 @@ read_again:
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
+ raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio);
/*
@@ -1363,10 +1409,21 @@ read_again:
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
+ !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+ conf->raid_disks - mddev->degraded > 1)
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
plug = container_of(cb, struct raid1_plug_cb, cb);
@@ -1436,6 +1493,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* next level up know.
* else mark the drive as failed
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_bit(In_sync, &rdev->flags)
&& (conf->raid_disks - mddev->degraded) == 1) {
/*
@@ -1445,10 +1503,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* it is very likely to fail.
*/
conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
set_bit(Blocked, &rdev->flags);
- spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
@@ -1459,36 +1517,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* if recovery is running, make sure it aborts.
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid1:%s: Disk failure on %s, disabling device.\n"
- "md/raid1:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+ "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(struct r1conf *conf)
{
int i;
- printk(KERN_DEBUG "RAID1 conf printout:\n");
+ pr_debug("RAID1 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
rcu_read_unlock();
}
@@ -1788,12 +1845,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
sector_t sect = r1_bio->sector;
int sectors = r1_bio->sectors;
int idx = 0;
+ struct md_rdev *rdev;
+
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (test_bit(FailFast, &rdev->flags)) {
+ /* Don't try recovering from here - just fail it
+ * ... unless it is the last working device of course */
+ md_error(mddev, rdev);
+ if (test_bit(Faulty, &rdev->flags))
+ /* Don't try to read from here, but make sure
+ * put_buf does it's thing
+ */
+ bio->bi_end_io = end_sync_write;
+ }
while(sectors) {
int s = sectors;
int d = r1_bio->read_disk;
int success = 0;
- struct md_rdev *rdev;
int start;
if (s > (PAGE_SIZE>>9))
@@ -1825,11 +1894,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
- " for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev, b),
- (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
+ (unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -2013,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
continue;
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+ wbio->bi_opf |= MD_FAILFAST;
+
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2122,13 +2193,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
- printk(KERN_INFO
- "md/raid1:%s: read error corrected "
- "(%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(sect +
- rdev->data_offset),
- bdevname(rdev->bdev, b));
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect +
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
} else
@@ -2287,6 +2356,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct bio *bio;
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
+ dev_t bio_dev;
+ sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2300,10 +2371,14 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
bio = r1_bio->bios[r1_bio->read_disk];
bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL;
- if (mddev->ro == 0) {
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (mddev->ro == 0
+ && !test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
@@ -2312,14 +2387,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
}
- rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
read_more:
disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) {
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b, (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b, (unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
const unsigned long do_sync
@@ -2330,16 +2404,17 @@ read_more:
max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
- printk_ratelimited(KERN_ERR
- "md/raid1:%s: redirecting sector %llu"
- " to other mirror: %s\n",
- mdname(mddev),
- (unsigned long long)r1_bio->sector,
- bdevname(rdev->bdev, b));
+ pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev, b));
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */
@@ -2353,6 +2428,8 @@ read_more:
else
mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
bio = NULL;
@@ -2367,8 +2444,11 @@ read_more:
sectors_handled;
goto read_more;
- } else
+ } else {
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
+ }
}
}
@@ -2384,10 +2464,10 @@ static void raid1d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2441,7 +2521,7 @@ static void raid1d(struct md_thread *thread)
generic_make_request(r1_bio->bios[r1_bio->read_disk]);
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -2623,6 +2703,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
}
}
rcu_read_unlock();
@@ -2642,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
min_bad, 0
) && ok;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
*skipped = 1;
put_buf(r1_bio);
@@ -2753,6 +2835,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
}
@@ -2760,6 +2844,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
@@ -2875,12 +2961,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -ENOMEM;
conf->thread = md_register_thread(raid1d, mddev, "raid1");
- if (!conf->thread) {
- printk(KERN_ERR
- "md/raid1:%s: couldn't allocate thread\n",
- mdname(mddev));
+ if (!conf->thread)
goto abort;
- }
return conf;
@@ -2905,13 +2987,13 @@ static int raid1_run(struct mddev *mddev)
bool discard_supported = false;
if (mddev->level != 1) {
- printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
+ mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
- mdname(mddev));
+ pr_warn("md/raid1:%s: reshape_position set but not supported\n",
+ mdname(mddev));
return -EIO;
}
/*
@@ -2950,11 +3032,9 @@ static int raid1_run(struct mddev *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid1:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid1:%s: active with %d out of %d mirrors\n",
+ pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2964,6 +3044,7 @@ static int raid1_run(struct mddev *mddev)
mddev->thread = conf->thread;
conf->thread = NULL;
mddev->private = conf;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
@@ -3107,9 +3188,8 @@ static int raid1_reshape(struct mddev *mddev)
rdev->raid_disk = d2;
sysfs_unlink_rdev(mddev, rdev);
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING
- "md/raid1:%s: cannot register rd%d\n",
- mdname(mddev), rdev->raid_disk);
+ pr_warn("md/raid1:%s: cannot register rd%d\n",
+ mdname(mddev), rdev->raid_disk);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
@@ -3163,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
- if (!IS_ERR(conf))
+ if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */
conf->array_frozen = 1;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ }
return conf;
}
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 61c39b390cd8..c52ef424a24b 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -161,14 +161,15 @@ struct r1bio {
};
/* bits for r1bio.state */
-#define R1BIO_Uptodate 0
-#define R1BIO_IsSync 1
-#define R1BIO_Degraded 2
-#define R1BIO_BehindIO 3
+enum r1bio_state {
+ R1BIO_Uptodate,
+ R1BIO_IsSync,
+ R1BIO_Degraded,
+ R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
*/
-#define R1BIO_ReadError 4
+ R1BIO_ReadError,
/* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when
@@ -176,10 +177,12 @@ struct r1bio {
* with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag...
*/
-#define R1BIO_Returned 6
+ R1BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag
*/
-#define R1BIO_MadeGood 7
-#define R1BIO_WriteError 8
+ R1BIO_MadeGood,
+ R1BIO_WriteError,
+ R1BIO_FailFast,
+};
#endif
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 39fddda2fef2..ab5e86209322 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -99,12 +100,16 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
static int _enough(struct r10conf *conf, int previous, int ignore);
+static int enough(struct r10conf *conf, int ignore);
static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
int *skipped);
static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
+#define raid10_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
+
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
@@ -404,8 +409,7 @@ static void raid10_end_read_request(struct bio *bio)
* oops, read error - keep the refcount on the rdev
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(KERN_ERR
- "md/raid10:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev),
bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
@@ -447,6 +451,7 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ struct bio *to_put = NULL;
bool discard_error;
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
@@ -474,8 +479,24 @@ static void raid10_end_write_request(struct bio *bio)
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery);
- set_bit(R10BIO_WriteError, &r10_bio->state);
+
dec_rdev = 0;
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST)) {
+ md_error(rdev->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ else {
+ r10_bio->devs[slot].bio = NULL;
+ to_put = bio;
+ dec_rdev = 1;
+ }
+ } else
+ set_bit(R10BIO_WriteError, &r10_bio->state);
}
} else {
/*
@@ -525,6 +546,8 @@ static void raid10_end_write_request(struct bio *bio)
one_write_done(r10_bio);
if (dec_rdev)
rdev_dec_pending(rdev, conf->mddev);
+ if (to_put)
+ bio_put(to_put);
}
/*
@@ -716,6 +739,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
+ clear_bit(R10BIO_FailFast, &r10_bio->state);
/*
* Check if we can balance. We can balance on the whole
* device if no resync is going on (recovery is ok), or below
@@ -780,15 +804,18 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance)
break;
+ if (best_slot >= 0)
+ /* At least 2 disks to choose from so failfast is OK */
+ set_bit(R10BIO_FailFast, &r10_bio->state);
/* This optimisation is debatable, and completely destroys
* sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later.
*/
if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
- break;
+ new_distance = 0;
/* for far > 1 always use the lowest address */
- if (geo->far_copies > 1)
+ else if (geo->far_copies > 1)
new_distance = r10_bio->devs[slot].addr;
else
new_distance = abs(r10_bio->devs[slot].addr -
@@ -859,9 +886,14 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -937,6 +969,7 @@ static void wait_barrier(struct r10conf *conf)
* that queue to get the nr_pending
* count down.
*/
+ raid10_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
@@ -1037,9 +1070,14 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1083,6 +1121,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
+ raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
@@ -1099,11 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
+ raid10_log(conf->mddev, "wait reshape metadata");
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
conf->reshape_safe = mddev->reshape_position;
}
@@ -1154,8 +1194,15 @@ read_again:
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r10_bio->sector);
if (max_sectors < r10_bio->sectors) {
/* Could not read all from this device, so we will
* need another r10_bio.
@@ -1195,6 +1242,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid10_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1322,6 +1370,7 @@ retry_write:
}
}
allow_barrier(conf);
+ raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
goto retry_write;
@@ -1361,8 +1410,18 @@ retry_write:
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) &&
+ enough(conf, d))
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
cb = blk_check_plugged(raid10_unplug, mddev,
@@ -1405,6 +1464,13 @@ retry_write:
bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
@@ -1586,14 +1652,13 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags);
- printk(KERN_ALERT
- "md/raid10:%s: Disk failure on %s, disabling device.\n"
- "md/raid10:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->geo.raid_disks - mddev->degraded);
+ pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+ "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
static void print_conf(struct r10conf *conf)
@@ -1601,13 +1666,13 @@ static void print_conf(struct r10conf *conf)
int i;
struct md_rdev *rdev;
- printk(KERN_DEBUG "RAID10 conf printout:\n");
+ pr_debug("RAID10 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
- conf->geo.raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
+ conf->geo.raid_disks);
/* This is only called with ->reconfix_mutex held, so
* rcu protection of rdev is not needed */
@@ -1615,10 +1680,10 @@ static void print_conf(struct r10conf *conf)
char b[BDEVNAME_SIZE];
rdev = conf->mirrors[i].rdev;
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
}
@@ -1953,6 +2018,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* now find blocks with errors */
for (i=0 ; i < conf->copies ; i++) {
int j, d;
+ struct md_rdev *rdev;
tbio = r10_bio->devs[i].bio;
@@ -1960,6 +2026,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
if (i == first)
continue;
+ d = r10_bio->devs[i].devnum;
+ rdev = conf->mirrors[d].rdev;
if (!r10_bio->devs[i].bio->bi_error) {
/* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them.
@@ -1982,6 +2050,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */
continue;
+ } else if (test_bit(FailFast, &rdev->flags)) {
+ /* Just give up on this device */
+ md_error(rdev->mddev, rdev);
+ continue;
}
/* Ok, we need to write this bio, either to correct an
* inconsistency or to correct an unreadable block.
@@ -1999,11 +2071,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio);
- d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
@@ -2109,10 +2182,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
ok = rdev_set_badblocks(rdev2, addr, s, 0);
if (!ok) {
/* just abort the recovery */
- printk(KERN_NOTICE
- "md/raid10:%s: recovery aborted"
- " due to read error\n",
- mdname(mddev));
+ pr_notice("md/raid10:%s: recovery aborted due to read error\n",
+ mdname(mddev));
conf->mirrors[dw].recovery_disabled
= mddev->recovery_disabled;
@@ -2259,14 +2330,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Raid device exceeded "
- "read_error threshold [cur %d:max %d]\n",
- mdname(mddev), b,
- atomic_read(&rdev->read_errors), max_read_errors);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Failing raid device\n",
- mdname(mddev), b);
+ pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), b,
+ atomic_read(&rdev->read_errors), max_read_errors);
+ pr_notice("md/raid10:%s: %s: Failing raid device\n",
+ mdname(mddev), b);
md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
@@ -2356,20 +2424,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: read correction "
- "write failed"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect +
- choose_data_offset(r10_bio,
- rdev)),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect +
+ choose_data_offset(r10_bio,
+ rdev)),
+ bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
@@ -2397,24 +2461,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
READ)) {
case 0:
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: unable to read back "
- "corrected sectors"
- " (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
+ pr_notice("md/raid10:%s: %s: failing drive\n",
mdname(mddev),
bdevname(rdev->bdev, b));
break;
case 1:
- printk(KERN_INFO
- "md/raid10:%s: read error corrected"
- " (%d sectors at %llu on %s)\n",
+ pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
@@ -2503,6 +2561,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
char b[BDEVNAME_SIZE];
unsigned long do_sync;
int max_sectors;
+ dev_t bio_dev;
+ sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -2514,38 +2574,38 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
*/
bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
- if (mddev->ro == 0) {
+ if (mddev->ro)
+ r10_bio->devs[slot].bio = IO_BLOCKED;
+ else if (!test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
} else
- r10_bio->devs[slot].bio = IO_BLOCKED;
+ md_error(mddev, rdev);
rdev_dec_pending(rdev, mddev);
read_more:
rdev = read_balance(conf, r10_bio, &max_sectors);
if (rdev == NULL) {
- printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b,
- (unsigned long long)r10_bio->sector);
+ pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b,
+ (unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
return;
}
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot;
- printk_ratelimited(
- KERN_ERR
- "md/raid10:%s: %s: redirecting "
- "sector %llu to another mirror\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- (unsigned long long)r10_bio->sector);
+ pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
@@ -2555,8 +2615,15 @@ read_more:
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev,
+ bio_last_sector - r10_bio->sectors);
+
if (max_sectors < r10_bio->sectors) {
/* Drat - have to split this up more */
struct bio *mbio = r10_bio->master_bio;
@@ -2694,10 +2761,10 @@ static void raid10d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2755,7 +2822,7 @@ static void raid10d(struct md_thread *thread)
}
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -3072,6 +3139,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
@@ -3160,8 +3229,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!any_working) {
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "md/raid10:%s: insufficient "
- "working devices for recovery.\n",
+ pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
mdname(mddev));
mirror->recovery_disabled
= mddev->recovery_disabled;
@@ -3178,6 +3246,23 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
+ if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
+ /* Only want this if there is elsewhere to
+ * read from. 'j' is currently the first
+ * readable copy.
+ */
+ int targets = 1;
+ for (; j < conf->copies; j++) {
+ int d = r10_bio->devs[j].devnum;
+ if (conf->mirrors[d].rdev &&
+ test_bit(In_sync,
+ &conf->mirrors[d].rdev->flags))
+ targets++;
+ }
+ if (targets == 1)
+ r10_bio->devs[0].bio->bi_opf
+ &= ~MD_FAILFAST;
+ }
}
if (biolist == NULL) {
while (r10_bio) {
@@ -3256,6 +3341,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3279,6 +3366,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3489,15 +3578,14 @@ static struct r10conf *setup_conf(struct mddev *mddev)
copies = setup_geo(&geo, mddev, geo_new);
if (copies == -2) {
- printk(KERN_ERR "md/raid10:%s: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n",
- mdname(mddev), PAGE_SIZE);
+ pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
goto out;
}
if (copies < 2 || copies > mddev->raid_disks) {
- printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
+ mdname(mddev), mddev->new_layout);
goto out;
}
@@ -3557,9 +3645,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- if (err == -ENOMEM)
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
- mdname(mddev));
if (conf) {
mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
@@ -3656,7 +3741,7 @@ static int raid10_run(struct mddev *mddev)
}
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
- printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
+ pr_err("md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -3698,11 +3783,9 @@ static int raid10_run(struct mddev *mddev)
}
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid10:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid10:%s: active with %d out of %d devices\n",
+ pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid10:%s: active with %d out of %d devices\n",
mdname(mddev), conf->geo.raid_disks - mddev->degraded,
conf->geo.raid_disks);
/*
@@ -3712,6 +3795,7 @@ static int raid10_run(struct mddev *mddev)
size = raid10_size(mddev, 0, 0);
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
if (mddev->queue) {
int stripe = conf->geo.raid_disks *
@@ -3739,7 +3823,7 @@ static int raid10_run(struct mddev *mddev)
if (max(before_length, after_length) > min_offset_diff) {
/* This cannot work */
- printk("md/raid10: offset difference not enough to continue reshape\n");
+ pr_warn("md/raid10: offset difference not enough to continue reshape\n");
goto out_free_conf;
}
conf->offset_diff = min_offset_diff;
@@ -3846,8 +3930,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
struct r10conf *conf;
if (mddev->degraded > 0) {
- printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
sector_div(size, devs);
@@ -3887,9 +3971,8 @@ static void *raid10_takeover(struct mddev *mddev)
/* for raid0 takeover only one zone is supported */
raid0_conf = mddev->private;
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
- " with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
return raid10_takeover_raid0(mddev,
@@ -4078,8 +4161,8 @@ static int raid10_start_reshape(struct mddev *mddev)
sector_t size = raid10_size(mddev, 0, 0);
if (size < mddev->array_sectors) {
spin_unlock_irq(&conf->device_lock);
- printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
mddev->resync_max_sectors = size;
@@ -4126,7 +4209,7 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
mddev->raid_disks = conf->geo.raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -4321,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
else
mddev->curr_resync_completed = conf->reshape_progress;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
allow_barrier(conf);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 18ec1f7a98bf..3162615e57bd 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -156,5 +156,7 @@ enum r10bio_state {
* flag is set
*/
R10BIO_Previous,
+/* failfast devices did receive failfast requests. */
+ R10BIO_FailFast,
};
#endif
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index a227a9f3ee65..d7bfb6fc8aef 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015 Shaohua Li <shli@fb.com>
+ * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -18,8 +19,10 @@
#include <linux/raid/md_p.h>
#include <linux/crc32c.h>
#include <linux/random.h>
+#include <linux/kthread.h>
#include "md.h"
#include "raid5.h"
+#include "bitmap.h"
/*
* metadata/data stored in disk with 4k size unit (a block) regardless
@@ -28,18 +31,70 @@
#define BLOCK_SECTORS (8)
/*
- * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
- * recovery scans a very long log
+ * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
+ *
+ * In write through mode, the reclaim runs every log->max_free_space.
+ * This can prevent the recovery scans for too long
*/
#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
+/* wake up reclaim thread periodically */
+#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
+/* start flush with these full stripes */
+#define R5C_FULL_STRIPE_FLUSH_BATCH 256
+/* reclaim stripes in groups */
+#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
+
/*
* We only need 2 bios per I/O unit to make progress, but ensure we
* have a few more available to not get too tight.
*/
#define R5L_POOL_SIZE 4
+/*
+ * r5c journal modes of the array: write-back or write-through.
+ * write-through mode has identical behavior as existing log only
+ * implementation.
+ */
+enum r5c_journal_mode {
+ R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
+ R5C_JOURNAL_MODE_WRITE_BACK = 1,
+};
+
+static char *r5c_journal_mode_str[] = {"write-through",
+ "write-back"};
+/*
+ * raid5 cache state machine
+ *
+ * With the RAID cache, each stripe works in two phases:
+ * - caching phase
+ * - writing-out phase
+ *
+ * These two phases are controlled by bit STRIPE_R5C_CACHING:
+ * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
+ * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
+ *
+ * When there is no journal, or the journal is in write-through mode,
+ * the stripe is always in writing-out phase.
+ *
+ * For write-back journal, the stripe is sent to caching phase on write
+ * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
+ * the write-out phase by clearing STRIPE_R5C_CACHING.
+ *
+ * Stripes in caching phase do not write the raid disks. Instead, all
+ * writes are committed from the log device. Therefore, a stripe in
+ * caching phase handles writes as:
+ * - write to log device
+ * - return IO
+ *
+ * Stripes in writing-out phase handle writes as:
+ * - calculate parity
+ * - write pending data and parity to journal
+ * - write data and parity to raid disks
+ * - return IO for pending writes
+ */
+
struct r5l_log {
struct md_rdev *rdev;
@@ -58,7 +113,6 @@ struct r5l_log {
u64 seq; /* log head sequence */
sector_t next_checkpoint;
- u64 next_cp_seq;
struct mutex io_mutex;
struct r5l_io_unit *current_io; /* current io_unit accepting new data */
@@ -96,6 +150,18 @@ struct r5l_log {
spinlock_t no_space_stripes_lock;
bool need_cache_flush;
+
+ /* for r5c_cache */
+ enum r5c_journal_mode r5c_journal_mode;
+
+ /* all stripes in r5cache, in the order of seq at sh->log_start */
+ struct list_head stripe_in_journal_list;
+
+ spinlock_t stripe_in_journal_lock;
+ atomic_t stripe_in_journal_count;
+
+ /* to submit async io_units, to fulfill ordering of flush */
+ struct work_struct deferred_io_work;
};
/*
@@ -122,6 +188,18 @@ struct r5l_io_unit {
int state;
bool need_split_bio;
+ struct bio *split_bio;
+
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include empty flush request */
+ /*
+ * io isn't sent yet, flush/fua request can only be submitted till it's
+ * the first IO in running_ios list
+ */
+ unsigned int io_deferred:1;
+
+ struct bio_list flush_barriers; /* size == 0 flush bios */
};
/* r5l_io_unit state */
@@ -133,6 +211,12 @@ enum r5l_io_unit_state {
IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
};
+bool r5c_is_writeback(struct r5l_log *log)
+{
+ return (log != NULL &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
+}
+
static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
{
start += inc;
@@ -168,12 +252,235 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
io->state = state;
}
+static void
+r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
+ struct bio_list *return_bi)
+{
+ struct bio *wbi, *wbi2;
+
+ wbi = dev->written;
+ dev->written = NULL;
+ while (wbi && wbi->bi_iter.bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ wbi2 = r5_next_bio(wbi, dev->sector);
+ if (!raid5_dec_bi_active_stripes(wbi)) {
+ md_write_end(conf->mddev);
+ bio_list_add(return_bi, wbi);
+ }
+ wbi = wbi2;
+ }
+}
+
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi)
+{
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ if (sh->dev[i].written) {
+ set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ r5c_return_dev_pending_writes(conf, &sh->dev[i],
+ return_bi);
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ 0);
+ }
+ }
+}
+
+/* Check whether we should flush some stripes to free up stripe cache */
+void r5c_check_stripe_cache_usage(struct r5conf *conf)
+{
+ int total_cached;
+
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ /*
+ * The following condition is true for either of the following:
+ * - stripe cache pressure high:
+ * total_cached > 3/4 min_nr_stripes ||
+ * empty_inactive_list_nr > 0
+ * - stripe cache pressure moderate:
+ * total_cached > 1/2 min_nr_stripes
+ */
+ if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
+ * stripes in the cache
+ */
+void r5c_check_cached_full_stripe(struct r5conf *conf)
+{
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ /*
+ * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
+ * or a full stripe (chunk size / 4k stripes).
+ */
+ if (atomic_read(&conf->r5c_cached_full_stripes) >=
+ min(R5C_FULL_STRIPE_FLUSH_BATCH,
+ conf->chunk_sectors >> STRIPE_SHIFT))
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * Total log space (in sectors) needed to flush all data in cache
+ *
+ * Currently, writing-out phase automatically includes all pending writes
+ * to the same sector. So the reclaim of each stripe takes up to
+ * (conf->raid_disks + 1) pages of log space.
+ *
+ * To totally avoid deadlock due to log space, the code reserves
+ * (conf->raid_disks + 1) pages for each stripe in cache, which is not
+ * necessary in most cases.
+ *
+ * To improve this, we will need writing-out phase to be able to NOT include
+ * pending writes, which will reduce the requirement to
+ * (conf->max_degraded + 1) pages per stripe in cache.
+ */
+static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+
+ if (!r5c_is_writeback(log))
+ return 0;
+
+ return BLOCK_SECTORS * (conf->raid_disks + 1) *
+ atomic_read(&log->stripe_in_journal_count);
+}
+
+/*
+ * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
+ *
+ * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
+ * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
+ * device is less than 2x of reclaim_required_space.
+ */
+static inline void r5c_update_log_state(struct r5l_log *log)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ sector_t free_space;
+ sector_t reclaim_space;
+ bool wake_reclaim = false;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ free_space = r5l_ring_distance(log, log->log_start,
+ log->last_checkpoint);
+ reclaim_space = r5c_log_required_to_flush_cache(conf);
+ if (free_space < 2 * reclaim_space)
+ set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ else {
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ wake_reclaim = true;
+ clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ }
+ if (free_space < 3 * reclaim_space)
+ set_bit(R5C_LOG_TIGHT, &conf->cache_state);
+ else
+ clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
+
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, 0);
+}
+
+/*
+ * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
+ * This function should only be called in write-back mode.
+ */
+void r5c_make_stripe_write_out(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct r5l_log *log = conf->log;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
+
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ }
+
+ if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_full_stripes);
+ }
+}
+
+static void r5c_handle_data_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+ set_bit(R5_InJournal, &sh->dev[i].flags);
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ }
+ clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
+}
+
+/*
+ * this journal write must contain full parity,
+ * it may also contain some data pages
+ */
+static void r5c_handle_parity_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+}
+
+/*
+ * Setting proper flags after writing (or flushing) data and/or parity to the
+ * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
+ */
+static void r5c_finish_cache_stripe(struct stripe_head *sh)
+{
+ struct r5l_log *log = sh->raid_conf->log;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ /*
+ * Set R5_InJournal for parity dev[pd_idx]. This means
+ * all data AND parity in the journal. For RAID 6, it is
+ * NOT necessary to set the flag for dev[qd_idx], as the
+ * two parities are written out together.
+ */
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5c_handle_data_cached(sh);
+ } else {
+ r5c_handle_parity_cached(sh);
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ }
+}
+
static void r5l_io_run_stripes(struct r5l_io_unit *io)
{
struct stripe_head *sh, *next;
list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
list_del_init(&sh->log_list);
+
+ r5c_finish_cache_stripe(sh);
+
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
}
@@ -209,9 +516,11 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
}
}
+static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
static void r5l_log_endio(struct bio *bio)
{
struct r5l_io_unit *io = bio->bi_private;
+ struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
@@ -227,18 +536,89 @@ static void r5l_log_endio(struct bio *bio)
r5l_move_to_end_ios(log);
else
r5l_log_run_stripes(log);
+ if (!list_empty(&log->running_ios)) {
+ /*
+ * FLUSH/FUA io_unit is deferred because of ordering, now we
+ * can dispatch it
+ */
+ io_deferred = list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling);
+ if (io_deferred->io_deferred)
+ schedule_work(&log->deferred_io_work);
+ }
+
spin_unlock_irqrestore(&log->io_list_lock, flags);
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
+
+ if (io->has_null_flush) {
+ struct bio *bi;
+
+ WARN_ON(bio_list_empty(&io->flush_barriers));
+ while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
+ bio_endio(bi);
+ atomic_dec(&io->pending_stripe);
+ }
+ if (atomic_read(&io->pending_stripe) == 0)
+ __r5l_stripe_write_finished(io);
+ }
+}
+
+static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ if (io->has_flush)
+ io->current_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->current_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->current_bio);
+
+ if (!io->split_bio)
+ return;
+
+ if (io->has_flush)
+ io->split_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->split_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->split_bio);
+}
+
+/* deferred io_unit will be dispatched here */
+static void r5l_submit_io_async(struct work_struct *work)
+{
+ struct r5l_log *log = container_of(work, struct r5l_log,
+ deferred_io_work);
+ struct r5l_io_unit *io = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ if (!list_empty(&log->running_ios)) {
+ io = list_first_entry(&log->running_ios, struct r5l_io_unit,
+ log_sibling);
+ if (!io->io_deferred)
+ io = NULL;
+ else
+ io->io_deferred = 0;
+ }
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+ if (io)
+ r5l_do_submit_io(log, io);
}
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
+ struct bio *bio;
struct r5l_meta_block *block;
unsigned long flags;
u32 crc;
+ bool do_submit = true;
if (!io)
return;
@@ -247,13 +627,20 @@ static void r5l_submit_current_io(struct r5l_log *log)
block->meta_size = cpu_to_le32(io->meta_offset);
crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
+ bio = io->current_bio;
log->current_io = NULL;
spin_lock_irqsave(&log->io_list_lock, flags);
- __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ if (io->has_flush || io->has_fua) {
+ if (io != list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling)) {
+ io->io_deferred = 1;
+ do_submit = false;
+ }
+ }
spin_unlock_irqrestore(&log->io_list_lock, flags);
-
- submit_bio(io->current_bio);
+ if (do_submit)
+ r5l_do_submit_io(log, io);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log)
@@ -271,6 +658,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
{
log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
+ r5c_update_log_state(log);
/*
* If we filled up the log device start from the beginning again,
* which will require a new bio.
@@ -297,6 +685,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
io->log = log;
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
+ bio_list_init(&io->flush_barriers);
io->state = IO_UNIT_RUNNING;
io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
@@ -367,12 +756,11 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
struct r5l_io_unit *io = log->current_io;
if (io->need_split_bio) {
- struct bio *prev = io->current_bio;
-
+ BUG_ON(io->split_bio);
+ io->split_bio = io->current_bio;
io->current_bio = r5l_bio_alloc(log);
- bio_chain(io->current_bio, prev);
-
- submit_bio(prev);
+ bio_chain(io->current_bio, io->split_bio);
+ io->need_split_bio = false;
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -401,50 +789,85 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
io = log->current_io;
+ if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
+ io->has_flush = 1;
+
for (i = 0; i < sh->disks; i++) {
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
if (i == sh->pd_idx || i == sh->qd_idx)
continue;
+ if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ io->has_fua = 1;
+ /*
+ * we need to flush journal to make sure recovery can
+ * reach the data with fua flag
+ */
+ io->has_flush = 1;
+ }
r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
raid5_compute_blocknr(sh, i, 0),
sh->dev[i].log_checksum, 0, false);
r5l_append_payload_page(log, sh->dev[i].page);
}
- if (sh->qd_idx >= 0) {
+ if (parity_pages == 2) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
sh->dev[sh->qd_idx].log_checksum, true);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
- } else {
+ } else if (parity_pages == 1) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
0, false);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
- }
+ } else /* Just writing data, not parity, in caching phase */
+ BUG_ON(parity_pages != 0);
list_add_tail(&sh->log_list, &io->stripe_list);
atomic_inc(&io->pending_stripe);
sh->log_io = io;
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return 0;
+
+ if (sh->log_start == MaxSector) {
+ BUG_ON(!list_empty(&sh->r5c));
+ sh->log_start = io->log_start;
+ spin_lock_irq(&log->stripe_in_journal_lock);
+ list_add_tail(&sh->r5c,
+ &log->stripe_in_journal_list);
+ spin_unlock_irq(&log->stripe_in_journal_lock);
+ atomic_inc(&log->stripe_in_journal_count);
+ }
return 0;
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+/* add stripe to no_space_stripes, and then wake up reclaim */
+static inline void r5l_add_no_space_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
+{
+ spin_lock(&log->no_space_stripes_lock);
+ list_add_tail(&sh->log_list, &log->no_space_stripes);
+ spin_unlock(&log->no_space_stripes_lock);
+}
+
/*
* running in raid5d, where reclaim could wait for raid5d too (when it flushes
* data from log to raid disks), so we shouldn't wait for reclaim here
*/
int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
{
+ struct r5conf *conf = sh->raid_conf;
int write_disks = 0;
int data_pages, parity_pages;
- int meta_size;
int reserve;
int i;
int ret = 0;
+ bool wake_reclaim = false;
if (!log)
return -EAGAIN;
@@ -456,11 +879,15 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
return -EAGAIN;
}
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+
for (i = 0; i < sh->disks; i++) {
void *addr;
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
+
write_disks++;
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
@@ -473,15 +900,6 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
- meta_size =
- ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
- * data_pages) +
- sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) * parity_pages;
- /* Doesn't work with very big raid array */
- if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
- return -EINVAL;
-
set_bit(STRIPE_LOG_TRAPPED, &sh->state);
/*
* The stripe must enter state machine again to finish the write, so
@@ -493,22 +911,49 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
mutex_lock(&log->io_mutex);
/* meta + data */
reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
- if (!r5l_has_free_space(log, reserve)) {
- spin_lock(&log->no_space_stripes_lock);
- list_add_tail(&sh->log_list, &log->no_space_stripes);
- spin_unlock(&log->no_space_stripes_lock);
- r5l_wake_reclaim(log, reserve);
- } else {
- ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
- if (ret) {
- spin_lock_irq(&log->io_list_lock);
- list_add_tail(&sh->log_list, &log->no_mem_stripes);
- spin_unlock_irq(&log->io_list_lock);
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ if (!r5l_has_free_space(log, reserve)) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+ } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
+ /*
+ * log space critical, do not process stripes that are
+ * not in cache yet (sh->log_start == MaxSector).
+ */
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ reserve = 0;
+ } else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
}
}
mutex_unlock(&log->io_mutex);
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, reserve);
return 0;
}
@@ -525,17 +970,34 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
if (!log)
return -ENODEV;
- /*
- * we flush log disk cache first, then write stripe data to raid disks.
- * So if bio is finished, the log disk cache is flushed already. The
- * recovery guarantees we can recovery the bio from log disk, so we
- * don't need to flush again
- */
- if (bio->bi_iter.bi_size == 0) {
- bio_endio(bio);
- return 0;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ /*
+ * in write through (journal only)
+ * we flush log disk cache first, then write stripe data to
+ * raid disks. So if bio is finished, the log disk cache is
+ * flushed already. The recovery guarantees we can recovery
+ * the bio from log disk, so we don't need to flush again
+ */
+ if (bio->bi_iter.bi_size == 0) {
+ bio_endio(bio);
+ return 0;
+ }
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ } else {
+ /* write back (with cache) */
+ if (bio->bi_iter.bi_size == 0) {
+ mutex_lock(&log->io_mutex);
+ r5l_get_meta(log, 0);
+ bio_list_add(&log->current_io->flush_barriers, bio);
+ log->current_io->has_flush = 1;
+ log->current_io->has_null_flush = 1;
+ atomic_inc(&log->current_io->pending_stripe);
+ r5l_submit_current_io(log);
+ mutex_unlock(&log->io_mutex);
+ return 0;
+ }
}
- bio->bi_opf &= ~REQ_PREFLUSH;
return -EAGAIN;
}
@@ -555,10 +1017,40 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
spin_unlock(&log->no_space_stripes_lock);
}
+/*
+ * calculate new last_checkpoint
+ * for write through mode, returns log->next_checkpoint
+ * for write back, returns log_start of first sh in stripe_in_journal_list
+ */
+static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ struct r5l_log *log = conf->log;
+ sector_t new_cp;
+ unsigned long flags;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return log->next_checkpoint;
+
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ if (list_empty(&conf->log->stripe_in_journal_list)) {
+ /* all stripes flushed */
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return log->next_checkpoint;
+ }
+ sh = list_first_entry(&conf->log->stripe_in_journal_list,
+ struct stripe_head, r5c);
+ new_cp = sh->log_start;
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return new_cp;
+}
+
static sector_t r5l_reclaimable_space(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
+
return r5l_ring_distance(log, log->last_checkpoint,
- log->next_checkpoint);
+ r5c_calculate_new_cp(conf));
}
static void r5l_run_no_mem_stripe(struct r5l_log *log)
@@ -589,7 +1081,6 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
break;
log->next_checkpoint = io->log_start;
- log->next_cp_seq = io->seq;
list_del(&io->log_sibling);
mempool_free(io, log->io_pool);
@@ -604,6 +1095,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
{
struct r5l_log *log = io->log;
+ struct r5conf *conf = log->rdev->mddev->private;
unsigned long flags;
spin_lock_irqsave(&log->io_list_lock, flags);
@@ -614,7 +1106,8 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
return;
}
- if (r5l_reclaimable_space(log) > log->max_free_space)
+ if (r5l_reclaimable_space(log) > log->max_free_space ||
+ test_bit(R5C_LOG_TIGHT, &conf->cache_state))
r5l_wake_reclaim(log, 0);
spin_unlock_irqrestore(&log->io_list_lock, flags);
@@ -685,7 +1178,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev;
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
@@ -713,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
* there is a deadlock. We workaround this issue with a trylock.
* FIXME: we could miss discard if we can't take reconfig mutex
*/
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
if (!mddev_trylock(mddev))
return;
md_update_sb(mddev, 1);
@@ -735,15 +1228,148 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
}
}
+/*
+ * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
+ * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
+ *
+ * must hold conf->device_lock
+ */
+static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
+{
+ BUG_ON(list_empty(&sh->lru));
+ BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
+
+ /*
+ * The stripe is not ON_RELEASE_LIST, so it is safe to call
+ * raid5_release_stripe() while holding conf->device_lock
+ */
+ BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
+ assert_spin_locked(&conf->device_lock);
+
+ list_del_init(&sh->lru);
+ atomic_inc(&sh->count);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ atomic_inc(&conf->active_stripes);
+ r5c_make_stripe_write_out(sh);
+
+ raid5_release_stripe(sh);
+}
+
+/*
+ * if num == 0, flush all full stripes
+ * if num > 0, flush all full stripes. If less than num full stripes are
+ * flushed, flush some partial stripes until totally num stripes are
+ * flushed or there is no more cached stripes.
+ */
+void r5c_flush_cache(struct r5conf *conf, int num)
+{
+ int count;
+ struct stripe_head *sh, *next;
+
+ assert_spin_locked(&conf->device_lock);
+ if (!conf->log)
+ return;
+
+ count = 0;
+ list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ count++;
+ }
+
+ if (count >= num)
+ return;
+ list_for_each_entry_safe(sh, next,
+ &conf->r5c_partial_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ if (++count >= num)
+ break;
+ }
+}
+
+static void r5c_do_reclaim(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+ struct stripe_head *sh;
+ int count = 0;
+ unsigned long flags;
+ int total_cached;
+ int stripes_to_flush;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ if (total_cached > conf->min_nr_stripes * 3 / 4 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ /*
+ * if stripe cache pressure high, flush all full stripes and
+ * some partial stripes
+ */
+ stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
+ else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->r5c_cached_full_stripes) >
+ R5C_FULL_STRIPE_FLUSH_BATCH)
+ /*
+ * if stripe cache pressure moderate, or if there is many full
+ * stripes,flush all full stripes
+ */
+ stripes_to_flush = 0;
+ else
+ /* no need to flush */
+ stripes_to_flush = -1;
+
+ if (stripes_to_flush >= 0) {
+ spin_lock_irqsave(&conf->device_lock, flags);
+ r5c_flush_cache(conf, stripes_to_flush);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+
+ /* if log space is tight, flush stripes on stripe_in_journal_list */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ spin_lock(&conf->device_lock);
+ list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
+ /*
+ * stripes on stripe_in_journal_list could be in any
+ * state of the stripe_cache state machine. In this
+ * case, we only want to flush stripe on
+ * r5c_cached_full/partial_stripes. The following
+ * condition makes sure the stripe is on one of the
+ * two lists.
+ */
+ if (!list_empty(&sh->lru) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) &&
+ atomic_read(&sh->count) == 0) {
+ r5c_flush_stripe(conf, sh);
+ }
+ if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
+ break;
+ }
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ }
+
+ if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ r5l_run_no_space_stripes(log);
+
+ md_wakeup_thread(conf->mddev->thread);
+}
static void r5l_do_reclaim(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
sector_t reclaim_target = xchg(&log->reclaim_target, 0);
sector_t reclaimable;
sector_t next_checkpoint;
- u64 next_cp_seq;
+ bool write_super;
spin_lock_irq(&log->io_list_lock);
+ write_super = r5l_reclaimable_space(log) > log->max_free_space ||
+ reclaim_target != 0 || !list_empty(&log->no_space_stripes);
/*
* move proper io_unit to reclaim list. We should not change the order.
* reclaimable/unreclaimable io_unit can be mixed in the list, we
@@ -764,12 +1390,12 @@ static void r5l_do_reclaim(struct r5l_log *log)
log->io_list_lock);
}
- next_checkpoint = log->next_checkpoint;
- next_cp_seq = log->next_cp_seq;
+ next_checkpoint = r5c_calculate_new_cp(conf);
spin_unlock_irq(&log->io_list_lock);
BUG_ON(reclaimable < 0);
- if (reclaimable == 0)
+
+ if (reclaimable == 0 || !write_super)
return;
/*
@@ -781,7 +1407,7 @@ static void r5l_do_reclaim(struct r5l_log *log)
mutex_lock(&log->io_mutex);
log->last_checkpoint = next_checkpoint;
- log->last_cp_seq = next_cp_seq;
+ r5c_update_log_state(log);
mutex_unlock(&log->io_mutex);
r5l_run_no_space_stripes(log);
@@ -795,14 +1421,17 @@ static void r5l_reclaim_thread(struct md_thread *thread)
if (!log)
return;
+ r5c_do_reclaim(conf);
r5l_do_reclaim(log);
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
{
unsigned long target;
unsigned long new = (unsigned long)space; /* overflow in theory */
+ if (!log)
+ return;
do {
target = log->reclaim_target;
if (new < target)
@@ -816,22 +1445,14 @@ void r5l_quiesce(struct r5l_log *log, int state)
struct mddev *mddev;
if (!log || state == 2)
return;
- if (state == 0) {
- /*
- * This is a special case for hotadd. In suspend, the array has
- * no journal. In resume, journal is initialized as well as the
- * reclaim thread.
- */
- if (log->reclaim_thread)
- return;
- log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
- log->rdev->mddev, "reclaim");
- } else if (state == 1) {
+ if (state == 0)
+ kthread_unpark(log->reclaim_thread->tsk);
+ else if (state == 1) {
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
- r5l_wake_reclaim(log, -1L);
- md_unregister_thread(&log->reclaim_thread);
+ kthread_park(log->reclaim_thread->tsk);
+ r5l_wake_reclaim(log, MaxSector);
r5l_do_reclaim(log);
}
}
@@ -857,10 +1478,13 @@ struct r5l_recovery_ctx {
sector_t meta_total_blocks; /* total size of current meta and data */
sector_t pos; /* recovery position */
u64 seq; /* recovery position seq */
+ int data_parity_stripes; /* number of data_parity stripes */
+ int data_only_stripes; /* number of data_only stripes */
+ struct list_head cached_list;
};
-static int r5l_read_meta_block(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+static int r5l_recovery_read_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
struct page *page = ctx->meta_page;
struct r5l_meta_block *mb;
@@ -892,170 +1516,618 @@ static int r5l_read_meta_block(struct r5l_log *log,
return 0;
}
-static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx,
- sector_t stripe_sect,
- int *offset, sector_t *log_offset)
+static void
+r5l_recovery_create_empty_meta_block(struct r5l_log *log,
+ struct page *page,
+ sector_t pos, u64 seq)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct stripe_head *sh;
- struct r5l_payload_data_parity *payload;
- int disk_index;
+ struct r5l_meta_block *mb;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
- while (1) {
- payload = page_address(ctx->meta_page) + *offset;
+ mb = page_address(page);
+ clear_page(mb);
+ mb->magic = cpu_to_le32(R5LOG_MAGIC);
+ mb->version = R5LOG_VERSION;
+ mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
+ mb->seq = cpu_to_le64(seq);
+ mb->position = cpu_to_le64(pos);
+}
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
- raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0,
- &disk_index, sh);
+static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
+ u64 seq)
+{
+ struct page *page;
+ struct r5l_meta_block *mb;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
- ctx->meta_total_blocks += BLOCK_SECTORS;
- } else {
- disk_index = sh->pd_idx;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
-
- if (sh->qd_idx >= 0) {
- disk_index = sh->qd_idx;
- sync_page_io(log->rdev,
- r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
- PAGE_SIZE, sh->dev[disk_index].page,
- REQ_OP_READ, 0, false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[1]);
- set_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags);
- }
- ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
- }
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ r5l_recovery_create_empty_meta_block(log, page, pos, seq);
+ mb = page_address(page);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ REQ_FUA, false)) {
+ __free_page(page);
+ return -EIO;
+ }
+ __free_page(page);
+ return 0;
+}
- *log_offset = r5l_ring_add(log, *log_offset,
- le32_to_cpu(payload->size));
- *offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
- break;
+/*
+ * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
+ * to mark valid (potentially not flushed) data in the journal.
+ *
+ * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
+ * so there should not be any mismatch here.
+ */
+static void r5l_recovery_load_data(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ int dd_idx;
+
+ raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0,
+ &dd_idx, sh);
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[dd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ ctx->meta_total_blocks += BLOCK_SECTORS;
+
+ set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+}
+
+static void r5l_recovery_load_parity(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[sh->pd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
+
+ if (sh->qd_idx >= 0) {
+ sync_page_io(log->rdev,
+ r5l_ring_add(log, log_offset, BLOCK_SECTORS),
+ PAGE_SIZE, sh->dev[sh->qd_idx].page,
+ REQ_OP_READ, 0, false);
+ sh->dev[sh->qd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[1]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
}
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- void *addr;
- u32 checksum;
+static void r5l_recovery_reset_stripe(struct stripe_head *sh)
+{
+ int i;
+ sh->state = 0;
+ sh->log_start = MaxSector;
+ for (i = sh->disks; i--; )
+ sh->dev[i].flags = 0;
+}
+
+static void
+r5l_recovery_replay_one_stripe(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct md_rdev *rdev, *rrdev;
+ int disk_index;
+ int data_count = 0;
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
- addr = kmap_atomic(sh->dev[disk_index].page);
- checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
- if (checksum != sh->dev[disk_index].log_checksum)
- goto error;
+ if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
+ continue;
+ data_count++;
}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- struct md_rdev *rdev, *rrdev;
+ /*
+ * stripes that only have parity must have been flushed
+ * before the crash that we are now recovering from, so
+ * there is nothing more to recovery.
+ */
+ if (data_count == 0)
+ goto out;
- if (!test_and_clear_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags))
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
/* in case device is broken */
+ rcu_read_lock();
rdev = rcu_dereference(conf->disks[disk_index].rdev);
- if (rdev)
- sync_page_io(rdev, stripe_sect, PAGE_SIZE,
+ if (rdev) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rdev, rdev->mddev);
+ rcu_read_lock();
+ }
rrdev = rcu_dereference(conf->disks[disk_index].replacement);
- if (rrdev)
- sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
+ if (rrdev) {
+ atomic_inc(&rrdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rrdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rrdev, rrdev->mddev);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
}
- raid5_release_stripe(sh);
+ ctx->data_parity_stripes++;
+out:
+ r5l_recovery_reset_stripe(sh);
+}
+
+static struct stripe_head *
+r5c_recovery_alloc_stripe(struct r5conf *conf,
+ sector_t stripe_sect,
+ sector_t log_start)
+{
+ struct stripe_head *sh;
+
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+ if (!sh)
+ return NULL; /* no more stripe available */
+
+ r5l_recovery_reset_stripe(sh);
+ sh->log_start = log_start;
+
+ return sh;
+}
+
+static struct stripe_head *
+r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
+{
+ struct stripe_head *sh;
+
+ list_for_each_entry(sh, list, lru)
+ if (sh->sector == sect)
+ return sh;
+ return NULL;
+}
+
+static void
+r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
+ r5l_recovery_reset_stripe(sh);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+static void
+r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+/* if matches return 0; otherwise return -EINVAL */
+static int
+r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page,
+ sector_t log_offset, __le32 log_checksum)
+{
+ void *addr;
+ u32 checksum;
+
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ page, REQ_OP_READ, 0, false);
+ addr = kmap_atomic(page);
+ checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
+}
+
+/*
+ * before loading data to stripe cache, we need verify checksum for all data,
+ * if there is mismatch for any data page, we drop all data in the mata block
+ */
+static int
+r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct r5l_meta_block *mb = page_address(ctx->meta_page);
+ sector_t mb_offset = sizeof(struct r5l_meta_block);
+ sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+ struct page *page;
+ struct r5l_payload_data_parity *payload;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
+ payload = (void *)mb + mb_offset;
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ if (conf->max_degraded == 2 && /* q for RAID 6 */
+ r5l_recovery_verify_data_checksum(
+ log, page,
+ r5l_ring_add(log, log_offset,
+ BLOCK_SECTORS),
+ payload->checksum[1]) < 0)
+ goto mismatch;
+ } else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */
+ goto mismatch;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ }
+
+ put_page(page);
return 0;
-error:
- for (disk_index = 0; disk_index < sh->disks; disk_index++)
- sh->dev[disk_index].flags = 0;
- raid5_release_stripe(sh);
+mismatch:
+ put_page(page);
return -EINVAL;
}
-static int r5l_recovery_flush_one_meta(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Analyze all data/parity pages in one meta block
+ * Returns:
+ * 0 for success
+ * -EINVAL for unknown playload type
+ * -EAGAIN for checksum mismatch of data page
+ * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
+ */
+static int
+r5c_recovery_analyze_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ struct list_head *cached_stripe_list)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct r5l_payload_data_parity *payload;
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
struct r5l_meta_block *mb;
- int offset;
+ struct r5l_payload_data_parity *payload;
+ int mb_offset;
sector_t log_offset;
- sector_t stripe_sector;
+ sector_t stripe_sect;
+ struct stripe_head *sh;
+ int ret;
+
+ /*
+ * for mismatch in data blocks, we will drop all data in this mb, but
+ * we will still read next mb for other data with FLUSH flag, as
+ * io_unit could finish out of order.
+ */
+ ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
+ if (ret == -EINVAL)
+ return -EAGAIN;
+ else if (ret)
+ return ret; /* -ENOMEM duo to alloc_page() failed */
mb = page_address(ctx->meta_page);
- offset = sizeof(struct r5l_meta_block);
+ mb_offset = sizeof(struct r5l_meta_block);
log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
- while (offset < le32_to_cpu(mb->meta_size)) {
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
int dd;
- payload = (void *)mb + offset;
- stripe_sector = raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0, &dd, NULL);
- if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
- &offset, &log_offset))
+ payload = (void *)mb + mb_offset;
+ stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ?
+ raid5_compute_sector(
+ conf, le64_to_cpu(payload->location), 0, &dd,
+ NULL)
+ : le64_to_cpu(payload->location);
+
+ sh = r5c_recovery_lookup_stripe(cached_stripe_list,
+ stripe_sect);
+
+ if (!sh) {
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+ /*
+ * cannot get stripe from raid5_get_active_stripe
+ * try replay some stripes
+ */
+ if (!sh) {
+ r5c_recovery_replay_stripes(
+ cached_stripe_list, ctx);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
+ mdname(mddev),
+ conf->min_nr_stripes * 2);
+ raid5_set_cache_size(mddev,
+ conf->min_nr_stripes * 2);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
+ mdname(mddev));
+ return -ENOMEM;
+ }
+ list_add_tail(&sh->lru, cached_stripe_list);
+ }
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
+ r5l_recovery_replay_one_stripe(conf, sh, ctx);
+ sh->log_start = ctx->pos;
+ list_move_tail(&sh->lru, cached_stripe_list);
+ }
+ r5l_recovery_load_data(log, sh, ctx, payload,
+ log_offset);
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY)
+ r5l_recovery_load_parity(log, sh, ctx, payload,
+ log_offset);
+ else
return -EINVAL;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
}
+
return 0;
}
-/* copy data/parity from log to raid disks */
-static void r5l_recovery_flush_log(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Load the stripe into cache. The stripe will be written out later by
+ * the stripe cache state machine.
+ */
+static void r5c_recovery_load_one_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
{
+ struct r5dev *dev;
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ dev = sh->dev + i;
+ if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
+ set_bit(R5_InJournal, &dev->flags);
+ set_bit(R5_UPTODATE, &dev->flags);
+ }
+ }
+ list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+ atomic_inc(&log->stripe_in_journal_count);
+}
+
+/*
+ * Scan through the log for all to-be-flushed data
+ *
+ * For stripes with data and parity, namely Data-Parity stripe
+ * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
+ *
+ * For stripes with only data, namely Data-Only stripe
+ * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
+ *
+ * For a stripe, if we see data after parity, we should discard all previous
+ * data and parity for this stripe, as these data are already flushed to
+ * the array.
+ *
+ * At the end of the scan, we return the new journal_tail, which points to
+ * first data-only stripe on the journal device, or next invalid meta block.
+ */
+static int r5c_recovery_flush_log(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh;
+ int ret = 0;
+
+ /* scan through the log */
while (1) {
- if (r5l_read_meta_block(log, ctx))
- return;
- if (r5l_recovery_flush_one_meta(log, ctx))
- return;
+ if (r5l_recovery_read_meta_block(log, ctx))
+ break;
+
+ ret = r5c_recovery_analyze_meta_block(log, ctx,
+ &ctx->cached_list);
+ /*
+ * -EAGAIN means mismatch in data block, in this case, we still
+ * try scan the next metablock
+ */
+ if (ret && ret != -EAGAIN)
+ break; /* ret == -EINVAL or -ENOMEM */
ctx->seq++;
ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
}
+
+ if (ret == -ENOMEM) {
+ r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
+ return ret;
+ }
+
+ /* replay data-parity stripes */
+ r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
+
+ /* load data-only stripes to stripe cache */
+ list_for_each_entry(sh, &ctx->cached_list, lru) {
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5c_recovery_load_one_stripe(log, sh);
+ ctx->data_only_stripes++;
+ }
+
+ return 0;
}
-static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
- u64 seq)
+/*
+ * we did a recovery. Now ctx.pos points to an invalid meta block. New
+ * log will start here. but we can't let superblock point to last valid
+ * meta block. The log might looks like:
+ * | meta 1| meta 2| meta 3|
+ * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
+ * superblock points to meta 1, we write a new valid meta 2n. if crash
+ * happens again, new recovery will start from meta 1. Since meta 2n is
+ * valid now, recovery will think meta 3 is valid, which is wrong.
+ * The solution is we create a new meta in meta2 with its seq == meta
+ * 1's seq + 10000 and let superblock points to meta2. The same recovery
+ * will not think meta 3 is a valid meta, because its seq doesn't match
+ */
+
+/*
+ * Before recovery, the log looks like the following
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^
+ * |- log->last_checkpoint
+ * |- log->last_cp_seq
+ *
+ * Now we scan through the log until we see invalid entry
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos
+ * |- log->last_cp_seq |- ctx->seq
+ *
+ * From this point, we need to increase seq number by 10 to avoid
+ * confusing next recovery.
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+1
+ * |- log->last_cp_seq |- ctx->seq+10001
+ *
+ * However, it is not safe to start the state machine yet, because data only
+ * parities are not yet secured in RAID. To save these data only parities, we
+ * rewrite them from seq+11.
+ *
+ * -----------------------------------------------------------------
+ * | valid log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * If failure happens again during this process, the recovery can safe start
+ * again from log->last_checkpoint.
+ *
+ * Once data only stripes are rewritten to journal, we move log_tail
+ *
+ * -----------------------------------------------------------------
+ * | old log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * Then we can safely start the state machine. If failure happens from this
+ * point on, the recovery will start from new log->last_checkpoint.
+ */
+static int
+r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
+ struct stripe_head *sh, *next;
+ struct mddev *mddev = log->rdev->mddev;
struct page *page;
- struct r5l_meta_block *mb;
- u32 crc;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
+ mdname(mddev));
return -ENOMEM;
- mb = page_address(page);
- mb->magic = cpu_to_le32(R5LOG_MAGIC);
- mb->version = R5LOG_VERSION;
- mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
- mb->seq = cpu_to_le64(seq);
- mb->position = cpu_to_le64(pos);
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
- mb->checksum = cpu_to_le32(crc);
+ }
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
- WRITE_FUA, false)) {
- __free_page(page);
- return -EIO;
+ list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ struct r5l_meta_block *mb;
+ int i;
+ int offset;
+ sector_t write_pos;
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5l_recovery_create_empty_meta_block(log, page,
+ ctx->pos, ctx->seq);
+ mb = page_address(page);
+ offset = le32_to_cpu(mb->meta_size);
+ write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+ for (i = sh->disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ struct r5l_payload_data_parity *payload;
+ void *addr;
+
+ if (test_bit(R5_InJournal, &dev->flags)) {
+ payload = (void *)mb + offset;
+ payload->header.type = cpu_to_le16(
+ R5LOG_PAYLOAD_DATA);
+ payload->size = BLOCK_SECTORS;
+ payload->location = cpu_to_le64(
+ raid5_compute_blocknr(sh, i, 0));
+ addr = kmap_atomic(dev->page);
+ payload->checksum[0] = cpu_to_le32(
+ crc32c_le(log->uuid_checksum, addr,
+ PAGE_SIZE));
+ kunmap_atomic(addr);
+ sync_page_io(log->rdev, write_pos, PAGE_SIZE,
+ dev->page, REQ_OP_WRITE, 0, false);
+ write_pos = r5l_ring_add(log, write_pos,
+ BLOCK_SECTORS);
+ offset += sizeof(__le32) +
+ sizeof(struct r5l_payload_data_parity);
+
+ }
+ }
+ mb->meta_size = cpu_to_le32(offset);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
+ REQ_OP_WRITE, REQ_FUA, false);
+ sh->log_start = ctx->pos;
+ ctx->pos = write_pos;
+ ctx->seq += 1;
+
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
}
__free_page(page);
return 0;
@@ -1063,45 +2135,60 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
static int r5l_recovery_log(struct r5l_log *log)
{
+ struct mddev *mddev = log->rdev->mddev;
struct r5l_recovery_ctx ctx;
+ int ret;
+ sector_t pos;
+ struct stripe_head *sh;
ctx.pos = log->last_checkpoint;
ctx.seq = log->last_cp_seq;
ctx.meta_page = alloc_page(GFP_KERNEL);
+ ctx.data_only_stripes = 0;
+ ctx.data_parity_stripes = 0;
+ INIT_LIST_HEAD(&ctx.cached_list);
+
if (!ctx.meta_page)
return -ENOMEM;
- r5l_recovery_flush_log(log, &ctx);
+ ret = r5c_recovery_flush_log(log, &ctx);
__free_page(ctx.meta_page);
- /*
- * we did a recovery. Now ctx.pos points to an invalid meta block. New
- * log will start here. but we can't let superblock point to last valid
- * meta block. The log might looks like:
- * | meta 1| meta 2| meta 3|
- * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
- * superblock points to meta 1, we write a new valid meta 2n. if crash
- * happens again, new recovery will start from meta 1. Since meta 2n is
- * valid now, recovery will think meta 3 is valid, which is wrong.
- * The solution is we create a new meta in meta2 with its seq == meta
- * 1's seq + 10 and let superblock points to meta2. The same recovery will
- * not think meta 3 is a valid meta, because its seq doesn't match
- */
- if (ctx.seq > log->last_cp_seq) {
- int ret;
-
- ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
- if (ret)
- return ret;
- log->seq = ctx.seq + 11;
- log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- r5l_write_super(log, ctx.pos);
- log->last_checkpoint = ctx.pos;
+ if (ret)
+ return ret;
+
+ pos = ctx.pos;
+ ctx.seq += 10000;
+
+ if (ctx.data_only_stripes == 0) {
log->next_checkpoint = ctx.pos;
+ r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+ ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
} else {
- log->log_start = ctx.pos;
- log->seq = ctx.seq;
+ sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
+ log->next_checkpoint = sh->log_start;
+ }
+
+ if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
+ pr_debug("md/raid:%s: starting from clean shutdown\n",
+ mdname(mddev));
+ else {
+ pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+ mdname(mddev), ctx.data_only_stripes,
+ ctx.data_parity_stripes);
+
+ if (ctx.data_only_stripes > 0)
+ if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+ mdname(mddev));
+ return -EIO;
+ }
}
+
+ log->log_start = ctx.pos;
+ log->seq = ctx.seq;
+ log->last_checkpoint = pos;
+ r5l_write_super(log, pos);
return 0;
}
@@ -1110,7 +2197,293 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp)
struct mddev *mddev = log->rdev->mddev;
log->rdev->journal_tail = cp;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+}
+
+static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ int ret;
+
+ if (!conf->log)
+ return 0;
+
+ switch (conf->log->r5c_journal_mode) {
+ case R5C_JOURNAL_MODE_WRITE_THROUGH:
+ ret = snprintf(
+ page, PAGE_SIZE, "[%s] %s\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ case R5C_JOURNAL_MODE_WRITE_BACK:
+ ret = snprintf(
+ page, PAGE_SIZE, "%s [%s]\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+static ssize_t r5c_journal_mode_store(struct mddev *mddev,
+ const char *page, size_t length)
+{
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+ int val = -1, i;
+ int len = length;
+
+ if (!log)
+ return -ENODEV;
+
+ if (len && page[len - 1] == '\n')
+ len -= 1;
+ for (i = 0; i < ARRAY_SIZE(r5c_journal_mode_str); i++)
+ if (strlen(r5c_journal_mode_str[i]) == len &&
+ strncmp(page, r5c_journal_mode_str[i], len) == 0) {
+ val = i;
+ break;
+ }
+ if (val < R5C_JOURNAL_MODE_WRITE_THROUGH ||
+ val > R5C_JOURNAL_MODE_WRITE_BACK)
+ return -EINVAL;
+
+ mddev_suspend(mddev);
+ conf->log->r5c_journal_mode = val;
+ mddev_resume(mddev);
+
+ pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
+ mdname(mddev), val, r5c_journal_mode_str[val]);
+ return length;
+}
+
+struct md_sysfs_entry
+r5c_journal_mode = __ATTR(journal_mode, 0644,
+ r5c_journal_mode_show, r5c_journal_mode_store);
+
+/*
+ * Try handle write operation in caching phase. This function should only
+ * be called in write-back mode.
+ *
+ * If all outstanding writes can be handled in caching phase, returns 0
+ * If writes requires write-out phase, call r5c_make_stripe_write_out()
+ * and returns -EAGAIN
+ */
+int r5c_try_caching_write(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
+{
+ struct r5l_log *log = conf->log;
+ int i;
+ struct r5dev *dev;
+ int to_cache = 0;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /*
+ * There are two different scenarios here:
+ * 1. The stripe has some data cached, and it is sent to
+ * write-out phase for reclaim
+ * 2. The stripe is clean, and this is the first write
+ *
+ * For 1, return -EAGAIN, so we continue with
+ * handle_stripe_dirtying().
+ *
+ * For 2, set STRIPE_R5C_CACHING and continue with caching
+ * write.
+ */
+
+ /* case 1: anything injournal or anything in written */
+ if (s->injournal > 0 || s->written > 0)
+ return -EAGAIN;
+ /* case 2 */
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ /* if non-overwrite, use writing-out phase */
+ if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
+ !test_bit(R5_InJournal, &dev->flags)) {
+ r5c_make_stripe_write_out(sh);
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->towrite) {
+ set_bit(R5_Wantwrite, &dev->flags);
+ set_bit(R5_Wantdrain, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ to_cache++;
+ }
+ }
+
+ if (to_cache) {
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ /*
+ * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
+ * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
+ * r5c_handle_data_cached()
+ */
+ set_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ }
+
+ return 0;
+}
+
+/*
+ * free extra pages (orig_page) we allocated for prexor
+ */
+void r5c_release_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ bool using_disk_info_extra_page;
+
+ using_disk_info_extra_page =
+ sh->dev[0].orig_page == conf->disks[0].extra_page;
+
+ for (i = sh->disks; i--; )
+ if (sh->dev[i].page != sh->dev[i].orig_page) {
+ struct page *p = sh->dev[i].orig_page;
+
+ sh->dev[i].orig_page = sh->dev[i].page;
+ if (!using_disk_info_extra_page)
+ put_page(p);
+ }
+
+ if (using_disk_info_extra_page) {
+ clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+void r5c_use_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ struct r5dev *dev;
+
+ for (i = sh->disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->orig_page != dev->page)
+ put_page(dev->orig_page);
+ dev->orig_page = conf->disks[i].extra_page;
+ }
+}
+
+/*
+ * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
+ * stripe is committed to RAID disks.
+ */
+void r5c_finish_stripe_write_out(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ int i;
+ int do_wakeup = 0;
+
+ if (!conf->log ||
+ !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
+ return;
+
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ for (i = sh->disks; i--; ) {
+ clear_bit(R5_InJournal, &sh->dev[i].flags);
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ do_wakeup = 1;
+ }
+
+ /*
+ * analyse_stripe() runs before r5c_finish_stripe_write_out(),
+ * We updated R5_InJournal, so we also update s->injournal.
+ */
+ s->injournal = 0;
+
+ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+ if (atomic_dec_and_test(&conf->pending_full_writes))
+ md_wakeup_thread(conf->mddev->thread);
+
+ if (do_wakeup)
+ wake_up(&conf->wait_for_overlap);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ spin_lock_irq(&conf->log->stripe_in_journal_lock);
+ list_del_init(&sh->r5c);
+ spin_unlock_irq(&conf->log->stripe_in_journal_lock);
+ sh->log_start = MaxSector;
+ atomic_dec(&conf->log->stripe_in_journal_count);
+ r5c_update_log_state(conf->log);
+}
+
+int
+r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int pages = 0;
+ int reserve;
+ int i;
+ int ret = 0;
+
+ BUG_ON(!log);
+
+ for (i = 0; i < sh->disks; i++) {
+ void *addr;
+
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ addr = kmap_atomic(sh->dev[i].page);
+ sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
+ addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ pages++;
+ }
+ WARN_ON(pages == 0);
+
+ /*
+ * The stripe must enter state machine again to call endio, so
+ * don't delay.
+ */
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ atomic_inc(&sh->count);
+
+ mutex_lock(&log->io_mutex);
+ /* meta + data */
+ reserve = (1 + pages) << (PAGE_SHIFT - 9);
+
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector)
+ r5l_add_no_space_stripe(log, sh);
+ else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, pages, 0);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list, &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+
+ mutex_unlock(&log->io_mutex);
+ return 0;
}
static int r5l_load_log(struct r5l_log *log)
@@ -1121,7 +2494,7 @@ static int r5l_load_log(struct r5l_log *log)
sector_t cp = log->rdev->journal_tail;
u32 stored_crc, expected_crc;
bool create_super = false;
- int ret;
+ int ret = 0;
/* Make sure it's valid */
if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
@@ -1171,11 +2544,18 @@ create:
if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
log->max_free_space = RECLAIM_MAX_FREE_SPACE;
log->last_checkpoint = cp;
- log->next_checkpoint = cp;
__free_page(page);
- return r5l_recovery_log(log);
+ if (create_super) {
+ log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
+ log->seq = log->last_cp_seq + 1;
+ log->next_checkpoint = cp;
+ } else
+ ret = r5l_recovery_log(log);
+
+ r5c_update_log_state(log);
+ return ret;
ioerr:
__free_page(page);
return ret;
@@ -1188,6 +2568,22 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (PAGE_SIZE != 4096)
return -EINVAL;
+
+ /*
+ * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
+ * raid_disks r5l_payload_data_parity.
+ *
+ * Write journal and cache does not work for very big array
+ * (raid_disks > 203)
+ */
+ if (sizeof(struct r5l_meta_block) +
+ ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
+ conf->raid_disks) > PAGE_SIZE) {
+ pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
+ mdname(conf->mddev), conf->raid_disks);
+ return -EINVAL;
+ }
+
log = kzalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return -ENOMEM;
@@ -1205,7 +2601,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio);
+ bio_init(&log->flush_bio, NULL, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
@@ -1227,6 +2623,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
log->rdev->mddev, "reclaim");
if (!log->reclaim_thread)
goto reclaim_thread;
+ log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
+
init_waitqueue_head(&log->iounit_wait);
INIT_LIST_HEAD(&log->no_mem_stripes);
@@ -1234,6 +2632,13 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->no_space_stripes);
spin_lock_init(&log->no_space_stripes_lock);
+ INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ INIT_LIST_HEAD(&log->stripe_in_journal_list);
+ spin_lock_init(&log->stripe_in_journal_lock);
+ atomic_set(&log->stripe_in_journal_count, 0);
+
if (r5l_load_log(log))
goto error;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 92ac251e91e6..06d7279bdd04 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -70,19 +70,6 @@ module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
static struct workqueue_struct *raid5_wq;
-/*
- * Stripe cache
- */
-
-#define NR_STRIPES 256
-#define STRIPE_SIZE PAGE_SIZE
-#define STRIPE_SHIFT (PAGE_SHIFT - 9)
-#define STRIPE_SECTORS (STRIPE_SIZE>>9)
-#define IO_THRESHOLD 1
-#define BYPASS_THRESHOLD 1
-#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
-#define HASH_MASK (NR_HASH - 1)
-#define MAX_STRIPE_BATCH 8
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
@@ -126,64 +113,6 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
local_irq_enable();
}
-/* bio's attached to a stripe+device for I/O are linked together in bi_sector
- * order without overlap. There may be several bio's per stripe+device, and
- * a bio could span several devices.
- * When walking this list for a particular stripe+device, we must never proceed
- * beyond a bio that extends past this device, as the next bio might no longer
- * be valid.
- * This function is used to determine the 'next' bio in the list, given the sector
- * of the current stripe+device
- */
-static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
-{
- int sectors = bio_sectors(bio);
- if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
- return bio->bi_next;
- else
- return NULL;
-}
-
-/*
- * We maintain a biased count of active stripes in the bottom 16 bits of
- * bi_phys_segments, and a count of processed stripes in the upper 16 bits
- */
-static inline int raid5_bi_processed_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return (atomic_read(segments) >> 16) & 0xffff;
-}
-
-static inline int raid5_dec_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return atomic_sub_return(1, segments) & 0xffff;
-}
-
-static inline void raid5_inc_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_inc(segments);
-}
-
-static inline void raid5_set_bi_processed_stripes(struct bio *bio,
- unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- int old, new;
-
- do {
- old = atomic_read(segments);
- new = (old & 0xffff) | (cnt << 16);
- } while (atomic_cmpxchg(segments, old, new) != old);
-}
-
-static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_set(segments, cnt);
-}
-
/* Find first data disk in a raid6 stripe */
static inline int raid6_d0(struct stripe_head *sh)
{
@@ -289,8 +218,27 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
struct list_head *temp_inactive_list)
{
+ int i;
+ int injournal = 0; /* number of date pages with R5_InJournal */
+
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
+
+ if (r5c_is_writeback(conf->log))
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ injournal++;
+ /*
+ * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with
+ * data in journal, so they are not released to cached lists
+ */
+ if (conf->quiesce && r5c_is_writeback(conf->log) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) {
+ if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+ r5c_make_stripe_write_out(sh);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state) &&
!test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
@@ -316,8 +264,30 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state))
- list_add_tail(&sh->lru, temp_inactive_list);
+ if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+ if (!r5c_is_writeback(conf->log))
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else {
+ WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags));
+ if (injournal == 0)
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else if (injournal == conf->raid_disks - conf->max_degraded) {
+ /* full stripe */
+ if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state))
+ atomic_inc(&conf->r5c_cached_full_stripes);
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
+ r5c_check_cached_full_stripe(conf);
+ } else {
+ /* partial stripe */
+ if (!test_and_set_bit(STRIPE_R5C_PARTIAL_STRIPE,
+ &sh->state))
+ atomic_inc(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
+ }
+ }
+ }
}
}
@@ -541,7 +511,7 @@ retry:
if (dev->toread || dev->read || dev->towrite || dev->written ||
test_bit(R5_LOCKED, &dev->flags)) {
- printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
+ pr_err("sector=%llx i=%d %p %p %p %p %d\n",
(unsigned long long)sh->sector, i, dev->toread,
dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
@@ -680,9 +650,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
if (noblock && sh == NULL)
break;
+
+ r5c_check_stripe_cache_usage(conf);
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
wait_event_lock_irq(
conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
@@ -901,8 +874,19 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
might_sleep();
- if (r5l_write_stripe(conf->log, sh) == 0)
- return;
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /* writing out phase */
+ if (s->waiting_extra_page)
+ return;
+ if (r5l_write_stripe(conf->log, sh) == 0)
+ return;
+ } else { /* caching phase */
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) {
+ r5c_cache_data(conf->log, sh, s);
+ return;
+ }
+ }
+
for (i = disks; i--; ) {
int op, op_flags = 0;
int replace_only = 0;
@@ -913,7 +897,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
op = REQ_OP_WRITE;
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
if (test_bit(R5_Discard, &sh->dev[i].flags))
op = REQ_OP_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
@@ -977,7 +961,7 @@ again:
if (bad < 0) {
set_bit(BlockedBadBlocks, &rdev->flags);
if (!conf->mddev->external &&
- conf->mddev->flags) {
+ conf->mddev->sb_flags) {
/* It is very unlikely, but we might
* still need to write out the
* bad block log - better give it
@@ -1115,7 +1099,7 @@ again:
static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page **page,
sector_t sector, struct dma_async_tx_descriptor *tx,
- struct stripe_head *sh)
+ struct stripe_head *sh, int no_skipcopy)
{
struct bio_vec bvl;
struct bvec_iter iter;
@@ -1155,7 +1139,8 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
if (frombio) {
if (sh->raid_conf->skip_copy &&
b_offset == 0 && page_offset == 0 &&
- clen == STRIPE_SIZE)
+ clen == STRIPE_SIZE &&
+ !no_skipcopy)
*page = bio_page;
else
tx = async_memcpy(*page, bio_page, page_offset,
@@ -1237,7 +1222,7 @@ static void ops_run_biofill(struct stripe_head *sh)
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, &dev->page,
- dev->sector, tx, sh);
+ dev->sector, tx, sh, 0);
rbi = r5_next_bio(rbi, dev->sector);
}
}
@@ -1364,10 +1349,15 @@ static int set_syndrome_sources(struct page **srcs,
if (i == sh->qd_idx || i == sh->pd_idx ||
(srctype == SYNDROME_SRC_ALL) ||
(srctype == SYNDROME_SRC_WANT_DRAIN &&
- test_bit(R5_Wantdrain, &dev->flags)) ||
+ (test_bit(R5_Wantdrain, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags))) ||
(srctype == SYNDROME_SRC_WRITTEN &&
- dev->written))
- srcs[slot] = sh->dev[i].page;
+ dev->written)) {
+ if (test_bit(R5_InJournal, &dev->flags))
+ srcs[slot] = sh->dev[i].orig_page;
+ else
+ srcs[slot] = sh->dev[i].page;
+ }
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1546,6 +1536,13 @@ static void ops_complete_prexor(void *stripe_head_ref)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+
+ if (r5c_is_writeback(sh->raid_conf->log))
+ /*
+ * raid5-cache write back uses orig_page during prexor.
+ * After prexor, it is time to free orig_page
+ */
+ r5c_release_extra_page(sh);
}
static struct dma_async_tx_descriptor *
@@ -1567,7 +1564,9 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* Only process blocks that are known to be uptodate */
- if (test_bit(R5_Wantdrain, &dev->flags))
+ if (test_bit(R5_InJournal, &dev->flags))
+ xor_srcs[count++] = dev->orig_page;
+ else if (test_bit(R5_Wantdrain, &dev->flags))
xor_srcs[count++] = dev->page;
}
@@ -1601,6 +1600,7 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
+ struct r5conf *conf = sh->raid_conf;
int disks = sh->disks;
int i;
struct stripe_head *head_sh = sh;
@@ -1618,6 +1618,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
again:
dev = &sh->dev[i];
+ /*
+ * clear R5_InJournal, so when rewriting a page in
+ * journal, it is not skipped by r5l_log_stripe()
+ */
+ clear_bit(R5_InJournal, &dev->flags);
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
@@ -1637,8 +1642,10 @@ again:
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
- dev->sector, tx, sh);
- if (dev->page != dev->orig_page) {
+ dev->sector, tx, sh,
+ r5c_is_writeback(conf->log));
+ if (dev->page != dev->orig_page &&
+ !r5c_is_writeback(conf->log)) {
set_bit(R5_SkipCopy, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
clear_bit(R5_OVERWRITE, &dev->flags);
@@ -1746,7 +1753,8 @@ again:
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (head_sh->dev[i].written)
+ if (head_sh->dev[i].written ||
+ test_bit(R5_InJournal, &head_sh->dev[i].flags))
xor_srcs[count++] = dev->page;
}
} else {
@@ -2000,17 +2008,15 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
spin_lock_init(&sh->batch_lock);
INIT_LIST_HEAD(&sh->batch_list);
INIT_LIST_HEAD(&sh->lru);
+ INIT_LIST_HEAD(&sh->r5c);
+ INIT_LIST_HEAD(&sh->log_list);
atomic_set(&sh->count, 1);
+ sh->log_start = MaxSector;
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req);
- dev->req.bi_io_vec = &dev->vec;
- dev->req.bi_max_vecs = 1;
-
- bio_init(&dev->rreq);
- dev->rreq.bi_io_vec = &dev->rvec;
- dev->rreq.bi_max_vecs = 1;
+ bio_init(&dev->req, &dev->vec, 1);
+ bio_init(&dev->rreq, &dev->rvec, 1);
}
}
return sh;
@@ -2245,10 +2251,24 @@ static int resize_stripes(struct r5conf *conf, int newsize)
*/
ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
if (ndisks) {
- for (i=0; i<conf->raid_disks; i++)
+ for (i = 0; i < conf->pool_size; i++)
ndisks[i] = conf->disks[i];
- kfree(conf->disks);
- conf->disks = ndisks;
+
+ for (i = conf->pool_size; i < newsize; i++) {
+ ndisks[i].extra_page = alloc_page(GFP_NOIO);
+ if (!ndisks[i].extra_page)
+ err = -ENOMEM;
+ }
+
+ if (err) {
+ for (i = conf->pool_size; i < newsize; i++)
+ if (ndisks[i].extra_page)
+ put_page(ndisks[i].extra_page);
+ kfree(ndisks);
+ } else {
+ kfree(conf->disks);
+ conf->disks = ndisks;
+ }
} else
err = -ENOMEM;
@@ -2347,10 +2367,8 @@ static void raid5_end_read_request(struct bio * bi)
* replacement device. We just fail those on
* any error
*/
- printk_ratelimited(
- KERN_INFO
- "md/raid:%s: read error corrected"
- " (%lu sectors at %llu on %s)\n",
+ pr_info_ratelimited(
+ "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)s,
bdevname(rdev->bdev, b));
@@ -2370,36 +2388,29 @@ static void raid5_end_read_request(struct bio * bi)
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error on replacement device "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
else if (conf->mddev->degraded >= conf->max_degraded) {
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error not correctable "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error not correctable (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error NOT corrected!! "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
- printk(KERN_WARNING
- "md/raid:%s: Too many read errors, failing device %s.\n",
+ pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -2531,15 +2542,14 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid:%s: Disk failure on %s, disabling device.\n"
- "md/raid:%s: Operation continuing on %d devices.\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- mdname(mddev),
- conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
/*
@@ -2861,8 +2871,8 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
- mdname(conf->mddev));
+ pr_warn("md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -2877,6 +2887,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int level = conf->level;
if (rcw) {
+ /*
+ * In some cases, handle_stripe_dirtying initially decided to
+ * run rmw and allocates extra page for prexor. However, rcw is
+ * cheaper later on. We need to free the extra page now,
+ * because we won't be able to do that in ops_complete_prexor().
+ */
+ r5c_release_extra_page(sh);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2887,6 +2904,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
if (!expand)
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
/* if we are not expanding this is a proper write request, and
@@ -2926,6 +2946,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
if (!s->locked)
@@ -3569,10 +3592,10 @@ unhash:
break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
}
-static void handle_stripe_dirtying(struct r5conf *conf,
- struct stripe_head *sh,
- struct stripe_head_state *s,
- int disks)
+static int handle_stripe_dirtying(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
@@ -3597,9 +3620,12 @@ static void handle_stripe_dirtying(struct r5conf *conf,
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
@@ -3611,13 +3637,15 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
+ test_bit(R5_InJournal, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rcw++;
else
rcw += 2*disks;
}
}
+
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3629,10 +3657,44 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if (test_bit(R5_InJournal, &dev->flags) &&
+ dev->page == dev->orig_page &&
+ !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
+ /* alloc page for prexor */
+ struct page *p = alloc_page(GFP_NOIO);
+
+ if (p) {
+ dev->orig_page = p;
+ continue;
+ }
+
+ /*
+ * alloc_page() failed, try use
+ * disk_info->extra_page
+ */
+ if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE,
+ &conf->cache_state)) {
+ r5c_use_extra_page(sh);
+ break;
+ }
+
+ /* extra_page in use, add to delayed_list */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ s->waiting_extra_page = 1;
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if ((dev->towrite ||
+ i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags)) &&
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (test_bit(STRIPE_PREREAD_ACTIVE,
&sh->state)) {
@@ -3658,6 +3720,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
if (test_bit(R5_Insync, &dev->flags) &&
@@ -3697,8 +3760,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
(s->locked == 0 && (rcw == 0 || rmw == 0) &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
schedule_reconstruction(sh, s, rcw == 0, 0);
+ return 0;
}
static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
@@ -3782,7 +3846,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+ pr_err("%s: unknown check_state: %d sector: %llu\n",
__func__, sh->check_state,
(unsigned long long) sh->sector);
BUG();
@@ -3946,9 +4010,9 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
- __func__, sh->check_state,
- (unsigned long long) sh->sector);
+ pr_warn("%s: unknown check_state: %d sector: %llu\n",
+ __func__, sh->check_state,
+ (unsigned long long) sh->sector);
BUG();
}
}
@@ -4188,6 +4252,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
}
+
+ if (test_bit(R5_InJournal, &dev->flags))
+ s->injournal++;
+ if (test_bit(R5_InJournal, &dev->flags) && dev->written)
+ s->just_cached++;
}
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
@@ -4416,7 +4485,8 @@ static void handle_stripe(struct stripe_head *sh)
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || i == sh->qd_idx ||
- dev->written)) {
+ dev->written || test_bit(R5_InJournal,
+ &dev->flags))) {
pr_debug("Writing block %d\n", i);
set_bit(R5_Wantwrite, &dev->flags);
if (prexor)
@@ -4456,6 +4526,10 @@ static void handle_stripe(struct stripe_head *sh)
test_bit(R5_Discard, &qdev->flags))))))
handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+ if (s.just_cached)
+ r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi);
+ r5l_stripe_write_finished(sh);
+
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
@@ -4467,14 +4541,51 @@ static void handle_stripe(struct stripe_head *sh)
|| s.expanding)
handle_stripe_fill(sh, &s, disks);
- /* Now to consider new write requests and what else, if anything
- * should be read. We do not handle new writes when:
+ /*
+ * When the stripe finishes full journal write cycle (write to journal
+ * and raid disk), this is the clean up procedure so it is ready for
+ * next operation.
+ */
+ r5c_finish_stripe_write_out(conf, sh, &s);
+
+ /*
+ * Now to consider new write requests, cache write back and what else,
+ * if anything should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
+ * 3/ A r5c cache log write is in flight.
*/
- if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying(conf, sh, &s, disks);
+
+ if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) {
+ if (!r5c_is_writeback(conf->log)) {
+ if (s.to_write)
+ handle_stripe_dirtying(conf, sh, &s, disks);
+ } else { /* write back cache */
+ int ret = 0;
+
+ /* First, try handle writes in caching phase */
+ if (s.to_write)
+ ret = r5c_try_caching_write(conf, sh, &s,
+ disks);
+ /*
+ * If caching phase failed: ret == -EAGAIN
+ * OR
+ * stripe under reclaim: !caching && injournal
+ *
+ * fall back to handle_stripe_dirtying()
+ */
+ if (ret == -EAGAIN ||
+ /* stripe under reclaim: !caching && injournal */
+ (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ s.injournal > 0)) {
+ ret = handle_stripe_dirtying(conf, sh, &s,
+ disks);
+ if (ret == -EAGAIN)
+ goto finish;
+ }
+ }
+ }
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
@@ -4645,9 +4756,7 @@ finish:
}
if (!bio_list_empty(&s.return_bi)) {
- if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
- (s.failed <= conf->max_degraded ||
- conf->mddev->external == 0)) {
+ if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->return_bi, &s.return_bi);
spin_unlock_irq(&conf->device_lock);
@@ -4703,6 +4812,10 @@ static int raid5_congested(struct mddev *mddev, int bits)
if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return 1;
+
+ /* Also checks whether there is pressure on r5cache log space */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state))
+ return 1;
if (conf->quiesce)
return 1;
if (atomic_read(&conf->empty_inactive_list_nr))
@@ -5172,6 +5285,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
int remaining;
DEFINE_WAIT(w);
bool do_prepare;
+ bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
@@ -5183,6 +5297,11 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
return;
}
/* ret == -EAGAIN, fallback */
+ /*
+ * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
+ * we need to flush journal device
+ */
+ do_flush = bi->bi_opf & REQ_PREFLUSH;
}
md_write_start(mddev, bi);
@@ -5193,6 +5312,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
* data on failed drives.
*/
if (rw == READ && mddev->degraded == 0 &&
+ !r5c_is_writeback(conf->log) &&
mddev->reshape_position == MaxSector) {
bi = chunk_aligned_read(mddev, bi);
if (!bi)
@@ -5321,6 +5441,12 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
do_prepare = true;
goto retry;
}
+ if (do_flush) {
+ set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+ /* we only need flush for one stripe */
+ do_flush = false;
+ }
+
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
@@ -5486,9 +5612,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
return 0;
@@ -5584,10 +5710,10 @@ finish:
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
|| test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto ret;
@@ -5862,10 +5988,10 @@ static void raid5d(struct md_thread *thread)
md_check_recovery(mddev);
if (!bio_list_empty(&conf->return_bi) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
struct bio_list tmp = BIO_EMPTY_LIST;
spin_lock_irq(&conf->device_lock);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
bio_list_merge(&tmp, &conf->return_bi);
bio_list_init(&conf->return_bi);
}
@@ -5912,7 +6038,7 @@ static void raid5d(struct md_thread *thread)
break;
handled += batch_size;
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+ if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
@@ -6242,6 +6368,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
&raid5_rmw_level.attr,
+ &r5c_journal_mode.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -6368,6 +6495,8 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ int i;
+
if (conf->log)
r5l_exit_log(conf->log);
if (conf->shrinker.nr_deferred)
@@ -6376,6 +6505,9 @@ static void free_conf(struct r5conf *conf)
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
+ for (i = 0; i < conf->pool_size; i++)
+ if (conf->disks[i].extra_page)
+ put_page(conf->disks[i].extra_page);
kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
@@ -6387,8 +6519,8 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
if (alloc_scratch_buffer(conf, percpu)) {
- pr_err("%s: failed memory allocation for cpu%u\n",
- __func__, cpu);
+ pr_warn("%s: failed memory allocation for cpu%u\n",
+ __func__, cpu);
return -ENOMEM;
}
return 0;
@@ -6458,29 +6590,29 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
- mdname(mddev), mddev->new_level);
+ pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n",
+ mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
if ((mddev->new_level == 5
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid:%s: layout %d not supported\n",
+ mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
- mdname(mddev), mddev->raid_disks);
+ pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n",
+ mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
- mdname(mddev), mddev->new_chunk_sectors << 9);
+ pr_warn("md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -6522,9 +6654,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
+
if (!conf->disks)
goto abort;
+ for (i = 0; i < max_disks; i++) {
+ conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
+ if (!conf->disks[i].extra_page)
+ goto abort;
+ }
+
conf->mddev = mddev;
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
@@ -6545,6 +6684,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
INIT_LIST_HEAD(conf->temp_inactive_list + i);
+ atomic_set(&conf->r5c_cached_full_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
+ atomic_set(&conf->r5c_cached_partial_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
+
conf->level = mddev->new_level;
conf->chunk_sectors = mddev->new_chunk_sectors;
if (raid5_alloc_percpu(conf) != 0)
@@ -6571,9 +6715,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md/raid:%s: device %s operational as raid"
- " disk %d\n",
- mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+ pr_info("md/raid:%s: device %s operational as raid disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -6607,21 +6750,18 @@ static struct r5conf *setup_conf(struct mddev *mddev)
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
conf->min_nr_stripes = max(NR_STRIPES, stripes);
if (conf->min_nr_stripes != NR_STRIPES)
- printk(KERN_INFO
- "md/raid:%s: force stripe size %d for reshape\n",
+ pr_info("md/raid:%s: force stripe size %d for reshape\n",
mdname(mddev), conf->min_nr_stripes);
}
memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
if (grow_stripes(conf, conf->min_nr_stripes)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate %dkB for buffers\n",
- mdname(mddev), memory);
+ pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
- mdname(mddev), memory);
+ pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
/*
* Losing a stripe head costs more than the time to refill it,
* it reduces the queue depth and so can hurt throughput.
@@ -6633,18 +6773,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
if (register_shrinker(&conf->shrinker)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't register shrinker.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't register shrinker.\n",
+ mdname(mddev));
goto abort;
}
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate thread.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't allocate thread.\n",
+ mdname(mddev));
goto abort;
}
@@ -6697,9 +6835,8 @@ static int raid5_run(struct mddev *mddev)
int first = 1;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
+ pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
rdev_for_each(rdev, mddev) {
long long diff;
@@ -6742,15 +6879,14 @@ static int raid5_run(struct mddev *mddev)
int new_data_disks;
if (journal_dev) {
- printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "md/raid:%s: unsupported reshape "
- "required - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
@@ -6765,8 +6901,8 @@ static int raid5_run(struct mddev *mddev)
chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
new_data_disks = mddev->raid_disks - max_degraded;
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
- printk(KERN_ERR "md/raid:%s: reshape_position not "
- "on a stripe boundary\n", mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
+ mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * chunk_sectors;
@@ -6787,10 +6923,8 @@ static int raid5_run(struct mddev *mddev)
abs(min_offset_diff) >= mddev->new_chunk_sectors)
/* not really in-place - so OK */;
else if (mddev->ro == 0) {
- printk(KERN_ERR "md/raid:%s: in-place reshape "
- "must be started in read-only mode "
- "- aborting\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->reshape_backwards
@@ -6799,13 +6933,11 @@ static int raid5_run(struct mddev *mddev)
: (here_new * chunk_sectors >=
here_old * chunk_sectors + (-min_offset_diff))) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "md/raid:%s: reshape_position too early for "
- "auto-recovery - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "md/raid:%s: reshape will continue\n",
- mdname(mddev));
+ pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -6824,8 +6956,8 @@ static int raid5_run(struct mddev *mddev)
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
if (!journal_dev) {
- pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: journal disk is missing, force array readonly\n",
+ mdname(mddev));
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
} else if (mddev->recovery_cp == MaxSector)
@@ -6852,8 +6984,7 @@ static int raid5_run(struct mddev *mddev)
if (conf->disks[i].replacement &&
conf->reshape_progress != MaxSector) {
/* replacements and reshape simply do not mix. */
- printk(KERN_ERR "md: cannot handle concurrent "
- "replacement and reshape.\n");
+ pr_warn("md: cannot handle concurrent replacement and reshape.\n");
goto abort;
}
if (test_bit(In_sync, &rdev->flags)) {
@@ -6895,8 +7026,7 @@ static int raid5_run(struct mddev *mddev)
mddev->degraded = calc_degraded(conf);
if (has_failed(conf)) {
- printk(KERN_ERR "md/raid:%s: not enough operational devices"
- " (%d/%d failed)\n",
+ pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
}
@@ -6908,29 +7038,19 @@ static int raid5_run(struct mddev *mddev)
if (mddev->degraded > dirty_parity_disks &&
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
- printk(KERN_WARNING
- "md/raid:%s: starting dirty degraded array"
- " - data corruption possible.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n",
+ mdname(mddev));
else {
- printk(KERN_ERR
- "md/raid:%s: cannot start dirty degraded array.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: cannot start dirty degraded array.\n",
+ mdname(mddev));
goto abort;
}
}
- if (mddev->degraded == 0)
- printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
- " devices, algorithm %d\n", mdname(mddev), conf->level,
- mddev->raid_disks-mddev->degraded, mddev->raid_disks,
- mddev->new_layout);
- else
- printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
- " out of %d devices, algorithm %d\n",
- mdname(mddev), conf->level,
- mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks-mddev->degraded, mddev->raid_disks,
+ mddev->new_layout);
print_raid5_conf(conf);
@@ -6950,9 +7070,8 @@ static int raid5_run(struct mddev *mddev)
mddev->to_remove = NULL;
else if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
- printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
- mdname(mddev));
+ pr_warn("raid5: failed to create sysfs attributes for %s\n",
+ mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
if (mddev->queue) {
@@ -6984,6 +7103,15 @@ static int raid5_run(struct mddev *mddev)
stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
+
+ /*
+ * We use 16-bit counter of active stripes in bi_phys_segments
+ * (minus one for over-loaded initialization)
+ */
+ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
+ blk_queue_max_discard_sectors(mddev->queue,
+ 0xfffe * STRIPE_SECTORS);
+
/*
* unaligned part of discard request will be ignored, so can't
* guarantee discard_zeroes_data
@@ -7040,9 +7168,10 @@ static int raid5_run(struct mddev *mddev)
if (journal_dev) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(journal_dev->bdev, b));
- r5l_init_log(conf, journal_dev);
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(journal_dev->bdev, b));
+ if (r5l_init_log(conf, journal_dev))
+ goto abort;
}
return 0;
@@ -7051,7 +7180,7 @@ abort:
print_raid5_conf(conf);
free_conf(conf);
mddev->private = NULL;
- printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
+ pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
@@ -7085,12 +7214,12 @@ static void print_raid5_conf (struct r5conf *conf)
int i;
struct disk_info *tmp;
- printk(KERN_DEBUG "RAID conf printout:\n");
+ pr_debug("RAID conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
conf->raid_disks,
conf->raid_disks - conf->mddev->degraded);
@@ -7098,7 +7227,7 @@ static void print_raid5_conf (struct r5conf *conf)
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, o:%d, dev:%s\n",
i, !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev, b));
}
@@ -7246,8 +7375,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* write requests running. We should be safe
*/
r5l_init_log(conf, rdev);
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
return 0;
}
if (mddev->recovery_disabled == conf->recovery_disabled)
@@ -7351,10 +7480,10 @@ static int check_stripe_cache(struct mddev *mddev)
> conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->min_nr_stripes) {
- printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
- mdname(mddev),
- ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
- / STRIPE_SIZE)*4);
+ pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
+ ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
+ / STRIPE_SIZE)*4);
return 0;
}
return 1;
@@ -7435,8 +7564,8 @@ static int raid5_start_reshape(struct mddev *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md/raid:%s: array size must be reduced "
- "before number of disks\n", mdname(mddev));
+ pr_warn("md/raid:%s: array size must be reduced before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -7506,7 +7635,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
mddev->raid_disks = conf->raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -7624,6 +7753,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
+ r5c_flush_cache(conf, INT_MAX);
conf->quiesce = 2;
wait_event_cmd(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0 &&
@@ -7654,8 +7784,8 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
/* for raid0 takeover only one zone is supported */
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -7676,6 +7806,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
static void *raid5_takeover_raid1(struct mddev *mddev)
{
int chunksect;
+ void *ret;
if (mddev->raid_disks != 2 ||
mddev->degraded > 1)
@@ -7697,7 +7828,10 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
mddev->new_chunk_sectors = chunksect;
- return setup_conf(mddev);
+ ret = setup_conf(mddev);
+ if (!IS_ERR_VALUE(ret))
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ return ret;
}
static void *raid5_takeover_raid6(struct mddev *mddev)
@@ -7767,7 +7901,7 @@ static int raid5_check_reshape(struct mddev *mddev)
conf->chunk_sectors = new_chunk ;
mddev->chunk_sectors = new_chunk;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
return check_reshape(mddev);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 57ec49f0839e..ed8e1362ab36 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -226,6 +226,8 @@ struct stripe_head {
struct r5l_io_unit *log_io;
struct list_head log_list;
+ sector_t log_start; /* first meta block on the journal */
+ struct list_head r5c; /* for r5c_cache->stripe_in_journal */
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -264,6 +266,7 @@ struct stripe_head_state {
int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
+ int injournal, just_cached;
int failed_num[2];
int p_failed, q_failed;
int dec_preread_active;
@@ -273,6 +276,7 @@ struct stripe_head_state {
struct md_rdev *blocked_rdev;
int handle_bad_blocks;
int log_failed;
+ int waiting_extra_page;
};
/* Flags for struct r5dev.flags */
@@ -313,6 +317,11 @@ enum r5dev_flags {
*/
R5_Discard, /* Discard the stripe */
R5_SkipCopy, /* Don't copy data from bio to stripe cache */
+ R5_InJournal, /* data being written is in the journal device.
+ * if R5_InJournal is set for parity pd_idx, all the
+ * data and parity being written are in the journal
+ * device
+ */
};
/*
@@ -345,7 +354,30 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet.
*/
- STRIPE_LOG_TRAPPED, /* trapped into log */
+ STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
+ * this bit is used in two scenarios:
+ *
+ * 1. write-out phase
+ * set in first entry of r5l_write_stripe
+ * clear in second entry of r5l_write_stripe
+ * used to bypass logic in handle_stripe
+ *
+ * 2. caching phase
+ * set in r5c_try_caching_write()
+ * clear when journal write is done
+ * used to initiate r5c_cache_data()
+ * also used to bypass logic in handle_stripe
+ */
+ STRIPE_R5C_CACHING, /* the stripe is in caching phase
+ * see more detail in the raid5-cache.c
+ */
+ STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_partial_stripe_list)
+ */
+ STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_full_stripe_list)
+ */
+ STRIPE_R5C_PREFLUSH, /* need to flush journal device */
};
#define STRIPE_EXPAND_SYNC_FLAGS \
@@ -408,8 +440,86 @@ enum {
struct disk_info {
struct md_rdev *rdev, *replacement;
+ struct page *extra_page; /* extra page to use in prexor */
};
+/*
+ * Stripe cache
+ */
+
+#define NR_STRIPES 256
+#define STRIPE_SIZE PAGE_SIZE
+#define STRIPE_SHIFT (PAGE_SHIFT - 9)
+#define STRIPE_SECTORS (STRIPE_SIZE>>9)
+#define IO_THRESHOLD 1
+#define BYPASS_THRESHOLD 1
+#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
+#define HASH_MASK (NR_HASH - 1)
+#define MAX_STRIPE_BATCH 8
+
+/* bio's attached to a stripe+device for I/O are linked together in bi_sector
+ * order without overlap. There may be several bio's per stripe+device, and
+ * a bio could span several devices.
+ * When walking this list for a particular stripe+device, we must never proceed
+ * beyond a bio that extends past this device, as the next bio might no longer
+ * be valid.
+ * This function is used to determine the 'next' bio in the list, given the
+ * sector of the current stripe+device
+ */
+static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
+{
+ int sectors = bio_sectors(bio);
+
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
+ return bio->bi_next;
+ else
+ return NULL;
+}
+
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_processed_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return (atomic_read(segments) >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return atomic_sub_return(1, segments) & 0xffff;
+}
+
+static inline void raid5_inc_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_inc(segments);
+}
+
+static inline void raid5_set_bi_processed_stripes(struct bio *bio,
+ unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ int old, new;
+
+ do {
+ old = atomic_read(segments);
+ new = (old & 0xffff) | (cnt << 16);
+ } while (atomic_cmpxchg(segments, old, new) != old);
+}
+
+static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_set(segments, cnt);
+}
+
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause
@@ -432,6 +542,30 @@ struct r5worker_group {
int stripes_cnt;
};
+enum r5_cache_state {
+ R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
+ * waiting for 25% to be free
+ */
+ R5_ALLOC_MORE, /* It might help to allocate another
+ * stripe.
+ */
+ R5_DID_ALLOC, /* A stripe was allocated, don't allocate
+ * more until at least one has been
+ * released. This avoids flooding
+ * the cache.
+ */
+ R5C_LOG_TIGHT, /* log device space tight, need to
+ * prioritize stripes at last_checkpoint
+ */
+ R5C_LOG_CRITICAL, /* log device is running out of space,
+ * only process stripes that are already
+ * occupying the log
+ */
+ R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
+ * for prexor
+ */
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */
@@ -519,23 +653,18 @@ struct r5conf {
*/
atomic_t active_stripes;
struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+
+ atomic_t r5c_cached_full_stripes;
+ struct list_head r5c_full_stripe_list;
+ atomic_t r5c_cached_partial_stripes;
+ struct list_head r5c_partial_stripe_list;
+
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
-#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
-#define R5_ALLOC_MORE 2 /* It might help to allocate another
- * stripe.
- */
-#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
- * more until at least one has been
- * released. This avoids flooding
- * the cache.
- */
struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
@@ -633,4 +762,23 @@ extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
extern void r5l_quiesce(struct r5l_log *log, int state);
extern bool r5l_log_disk_error(struct r5conf *conf);
+extern bool r5c_is_writeback(struct r5l_log *log);
+extern int
+r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks);
+extern void
+r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_release_extra_page(struct stripe_head *sh);
+extern void r5c_use_extra_page(struct stripe_head *sh);
+extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+extern void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi);
+extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_make_stripe_write_out(struct stripe_head *sh);
+extern void r5c_flush_cache(struct r5conf *conf, int num);
+extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
+extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+extern struct md_sysfs_entry r5c_journal_mode;
#endif
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 7b8540291217..3512316e7a46 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -80,6 +80,22 @@ config MEDIA_RC_SUPPORT
Say Y when you have a TV or an IR device.
+config MEDIA_CEC_SUPPORT
+ bool "HDMI CEC support"
+ select MEDIA_CEC_EDID
+ ---help---
+ Enable support for HDMI CEC (Consumer Electronics Control),
+ which is an optional HDMI feature.
+
+ Say Y when you have an HDMI receiver, transmitter or a USB CEC
+ adapter that supports HDMI CEC.
+
+config MEDIA_CEC_DEBUG
+ bool "HDMI CEC debugfs interface"
+ depends on MEDIA_CEC_SUPPORT && DEBUG_FS
+ ---help---
+ Turns on the DebugFS interface for CEC devices.
+
config MEDIA_CEC_EDID
bool
@@ -99,7 +115,7 @@ config MEDIA_CONTROLLER
config MEDIA_CONTROLLER_DVB
bool "Enable Media controller for DVB (EXPERIMENTAL)"
- depends on MEDIA_CONTROLLER
+ depends on MEDIA_CONTROLLER && DVB_CORE
---help---
Enable the media controller API support for DVB.
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 0deaa93efdee..d87ccb8eeabe 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -6,6 +6,10 @@ ifeq ($(CONFIG_MEDIA_CEC_EDID),y)
obj-$(CONFIG_MEDIA_SUPPORT) += cec-edid.o
endif
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
+ obj-$(CONFIG_MEDIA_SUPPORT) += cec/
+endif
+
media-objs := media-device.o media-devnode.o media-entity.o
#
diff --git a/drivers/staging/media/cec/Makefile b/drivers/media/cec/Makefile
index bd7f3c593468..d6686337275f 100644
--- a/drivers/staging/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,5 +1,5 @@
cec-objs := cec-core.o cec-adap.o cec-api.o
-ifeq ($(CONFIG_MEDIA_CEC),y)
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
obj-$(CONFIG_MEDIA_SUPPORT) += cec.o
endif
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 611e07b78bfe..0ea4efb3de66 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -587,7 +587,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
msg->tx_nack_cnt = 0;
msg->tx_low_drive_cnt = 0;
msg->tx_error_cnt = 0;
- msg->flags = 0;
msg->sequence = ++adap->sequence;
if (!msg->sequence)
msg->sequence = ++adap->sequence;
@@ -596,6 +595,10 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
/* Make sure the timeout isn't 0. */
msg->timeout = 1000;
}
+ if (msg->timeout)
+ msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS;
+ else
+ msg->flags = 0;
/* Sanity checks */
if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
@@ -763,23 +766,133 @@ EXPORT_SYMBOL_GPL(cec_transmit_msg);
static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
bool is_reply);
+#define DIRECTED 0x80
+#define BCAST1_4 0x40
+#define BCAST2_0 0x20 /* broadcast only allowed for >= 2.0 */
+#define BCAST (BCAST1_4 | BCAST2_0)
+#define BOTH (BCAST | DIRECTED)
+
+/*
+ * Specify minimum length and whether the message is directed, broadcast
+ * or both. Messages that do not match the criteria are ignored as per
+ * the CEC specification.
+ */
+static const u8 cec_msg_size[256] = {
+ [CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST,
+ [CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED,
+ [CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED,
+ [CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED,
+ [CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST,
+ [CEC_MSG_ROUTING_CHANGE] = 6 | BCAST,
+ [CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST,
+ [CEC_MSG_SET_STREAM_PATH] = 4 | BCAST,
+ [CEC_MSG_STANDBY] = 2 | BOTH,
+ [CEC_MSG_RECORD_OFF] = 2 | DIRECTED,
+ [CEC_MSG_RECORD_ON] = 3 | DIRECTED,
+ [CEC_MSG_RECORD_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED,
+ [CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED,
+ [CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED,
+ [CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED,
+ [CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_TIMER_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_CEC_VERSION] = 3 | DIRECTED,
+ [CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED,
+ [CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST,
+ [CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST,
+ [CEC_MSG_REPORT_FEATURES] = 6 | BCAST,
+ [CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED,
+ [CEC_MSG_DECK_CONTROL] = 3 | DIRECTED,
+ [CEC_MSG_DECK_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_PLAY] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED,
+ [CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED,
+ [CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED,
+ [CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED,
+ [CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED,
+ [CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST,
+ [CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED,
+ [CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED,
+ [CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH,
+ [CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH,
+ [CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH,
+ [CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED,
+ [CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED,
+ [CEC_MSG_MENU_REQUEST] = 3 | DIRECTED,
+ [CEC_MSG_MENU_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED,
+ [CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0,
+ [CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED,
+ [CEC_MSG_ABORT] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+ [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
+ [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
+ [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
+ [CEC_MSG_INITIATE_ARC] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
+ [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
+ [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+ [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
+};
+
/* Called by the CEC adapter if a message is received */
void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
{
struct cec_data *data;
u8 msg_init = cec_msg_initiator(msg);
u8 msg_dest = cec_msg_destination(msg);
+ u8 cmd = msg->msg[1];
bool is_reply = false;
bool valid_la = true;
+ u8 min_len = 0;
if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
return;
+ /*
+ * Some CEC adapters will receive the messages that they transmitted.
+ * This test filters out those messages by checking if we are the
+ * initiator, and just returning in that case.
+ *
+ * Note that this won't work if this is an Unregistered device.
+ *
+ * It is bad practice if the hardware receives the message that it
+ * transmitted and luckily most CEC adapters behave correctly in this
+ * respect.
+ */
+ if (msg_init != CEC_LOG_ADDR_UNREGISTERED &&
+ cec_has_log_addr(adap, msg_init))
+ return;
+
msg->rx_ts = ktime_get_ns();
msg->rx_status = CEC_RX_STATUS_OK;
msg->sequence = msg->reply = msg->timeout = 0;
msg->tx_status = 0;
msg->tx_ts = 0;
+ msg->tx_arb_lost_cnt = 0;
+ msg->tx_nack_cnt = 0;
+ msg->tx_low_drive_cnt = 0;
+ msg->tx_error_cnt = 0;
msg->flags = 0;
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
@@ -790,9 +903,71 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
if (!cec_msg_is_broadcast(msg))
valid_la = cec_has_log_addr(adap, msg_dest);
+ /*
+ * Check if the length is not too short or if the message is a
+ * broadcast message where a directed message was expected or
+ * vice versa. If so, then the message has to be ignored (according
+ * to section CEC 7.3 and CEC 12.2).
+ */
+ if (valid_la && msg->len > 1 && cec_msg_size[cmd]) {
+ u8 dir_fl = cec_msg_size[cmd] & BOTH;
+
+ min_len = cec_msg_size[cmd] & 0x1f;
+ if (msg->len < min_len)
+ valid_la = false;
+ else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
+ valid_la = false;
+ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+ valid_la = false;
+ else if (cec_msg_is_broadcast(msg) &&
+ adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
+ !(dir_fl & BCAST2_0))
+ valid_la = false;
+ }
+ if (valid_la && min_len) {
+ /* These messages have special length requirements */
+ switch (cmd) {
+ case CEC_MSG_TIMER_STATUS:
+ if (msg->msg[2] & 0x10) {
+ switch (msg->msg[2] & 0xf) {
+ case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+ case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+ if (msg->len < 5)
+ valid_la = false;
+ break;
+ }
+ } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+ if (msg->len < 5)
+ valid_la = false;
+ }
+ break;
+ case CEC_MSG_RECORD_ON:
+ switch (msg->msg[2]) {
+ case CEC_OP_RECORD_SRC_OWN:
+ break;
+ case CEC_OP_RECORD_SRC_DIGITAL:
+ if (msg->len < 10)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_ANALOG:
+ if (msg->len < 7)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PLUG:
+ if (msg->len < 4)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+ if (msg->len < 5)
+ valid_la = false;
+ break;
+ }
+ break;
+ }
+ }
+
/* It's a valid message and not a poll or CDC message */
- if (valid_la && msg->len > 1 && msg->msg[1] != CEC_MSG_CDC_MESSAGE) {
- u8 cmd = msg->msg[1];
+ if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) {
bool abort = cmd == CEC_MSG_FEATURE_ABORT;
/* The aborted command is in msg[2] */
@@ -806,6 +981,18 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
list_for_each_entry(data, &adap->wait_queue, list) {
struct cec_msg *dst = &data->msg;
+ /*
+ * The *only* CEC message that has two possible replies
+ * is CEC_MSG_INITIATE_ARC.
+ * In this case allow either of the two replies.
+ */
+ if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
+ (cmd == CEC_MSG_REPORT_ARC_INITIATED ||
+ cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
+ (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
+ dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
+ dst->reply = cmd;
+
/* Does the command match? */
if ((abort && cmd != dst->msg[1]) ||
(!abort && cmd != dst->reply))
@@ -823,6 +1010,7 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
dst->rx_status = msg->rx_status;
if (abort)
dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
+ msg->flags = dst->flags;
/* Remove it from the wait_queue */
list_del_init(&data->list);
@@ -1068,7 +1256,8 @@ configured:
mutex_unlock(&adap->lock);
for (i = 0; i < las->num_log_addrs; i++) {
- if (las->log_addr[i] == CEC_LOG_ADDR_INVALID)
+ if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
+ (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
continue;
/*
@@ -1190,6 +1379,29 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
return 0;
}
+ if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) {
+ /*
+ * Sanitize log_addrs fields if a CDC-Only device is
+ * requested.
+ */
+ log_addrs->num_log_addrs = 1;
+ log_addrs->osd_name[0] = '\0';
+ log_addrs->vendor_id = CEC_VENDOR_ID_NONE;
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ /*
+ * This is just an internal convention since a CDC-Only device
+ * doesn't have to be a switch. But switches already use
+ * unregistered, so it makes some kind of sense to pick this
+ * as the primary device. Since a CDC-Only device never sends
+ * any 'normal' CEC messages this primary device type is never
+ * sent over the CEC bus.
+ */
+ log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH;
+ log_addrs->all_device_types[0] = 0;
+ log_addrs->features[0][0] = 0;
+ log_addrs->features[0][1] = 0;
+ }
+
/* Ensure the osd name is 0-terminated */
log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
@@ -1223,6 +1435,7 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
u8 *features = log_addrs->features[i];
bool op_is_dev_features = false;
+ unsigned j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (type_mask & (1 << log_addrs->log_addr_type[i])) {
@@ -1249,19 +1462,19 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
dprintk(1, "unknown logical address type\n");
return -EINVAL;
}
- for (i = 0; i < feature_sz; i++) {
- if ((features[i] & 0x80) == 0) {
+ for (j = 0; j < feature_sz; j++) {
+ if ((features[j] & 0x80) == 0) {
if (op_is_dev_features)
break;
op_is_dev_features = true;
}
}
- if (!op_is_dev_features || i == feature_sz) {
+ if (!op_is_dev_features || j == feature_sz) {
dprintk(1, "malformed features\n");
return -EINVAL;
}
/* Zero unused part of the feature array */
- memset(features + i + 1, 0, feature_sz - i - 1);
+ memset(features + j + 1, 0, feature_sz - j - 1);
}
if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
@@ -1410,6 +1623,11 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
+ /* If this is a CDC-Only device, then ignore any non-CDC messages */
+ if (cec_is_cdc_only(&adap->log_addrs) &&
+ msg->msg[1] != CEC_MSG_CDC_MESSAGE)
+ return 0;
+
if (adap->ops->received) {
/* Allow drivers to process the message first */
if (adap->ops->received(adap, msg) != -ENOMSG)
@@ -1478,7 +1696,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
}
case CEC_MSG_USER_CONTROL_PRESSED:
- if (!(adap->capabilities & CEC_CAP_RC))
+ if (!(adap->capabilities & CEC_CAP_RC) ||
+ !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
#if IS_REACHABLE(CONFIG_RC_CORE)
@@ -1515,7 +1734,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
break;
case CEC_MSG_USER_CONTROL_RELEASED:
- if (!(adap->capabilities & CEC_CAP_RC))
+ if (!(adap->capabilities & CEC_CAP_RC) ||
+ !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
#if IS_REACHABLE(CONFIG_RC_CORE)
rc_keyup(adap->rc);
@@ -1573,8 +1793,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
}
skip_processing:
- /* If this was a reply, then we're done */
- if (is_reply)
+ /* If this was a reply, then we're done, unless otherwise specified */
+ if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS))
return 0;
/*
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index e274e2f22398..8950b6c9d6a9 100644
--- a/drivers/staging/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -88,7 +88,7 @@ static long cec_adap_g_caps(struct cec_adapter *adap,
{
struct cec_caps caps = {};
- strlcpy(caps.driver, adap->devnode.parent->driver->name,
+ strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
sizeof(caps.driver));
strlcpy(caps.name, adap->name, sizeof(caps.name));
caps.available_log_addrs = adap->available_log_addrs;
@@ -162,7 +162,9 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return -ENOTTY;
if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
return -EFAULT;
- log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
+ log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
+ CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
+ CEC_LOG_ADDRS_FL_CDC_ONLY;
mutex_lock(&adap->lock);
if (!adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
@@ -189,6 +191,12 @@ static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
return -ENOTTY;
if (copy_from_user(&msg, parg, sizeof(msg)))
return -EFAULT;
+
+ /* A CDC-Only device can only send CDC messages */
+ if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
+ (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
+ return -EINVAL;
+
mutex_lock(&adap->lock);
if (!adap->is_configured)
err = -ENONET;
@@ -273,6 +281,7 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
err = cec_receive_msg(fh, &msg, block);
if (err)
return err;
+ msg.flags = 0;
if (copy_to_user(parg, &msg, sizeof(msg)))
return -EFAULT;
return 0;
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index b0137e247dc9..aca3ab83a8a1 100644
--- a/drivers/staging/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -132,7 +132,6 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
devnode->dev.bus = &cec_bus_type;
devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
devnode->dev.release = cec_devnode_release;
- devnode->dev.parent = devnode->parent;
dev_set_name(&devnode->dev, "cec%d", devnode->minor);
device_initialize(&devnode->dev);
@@ -198,13 +197,11 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps,
- u8 available_las, struct device *parent)
+ u8 available_las)
{
struct cec_adapter *adap;
int res;
- if (WARN_ON(!parent))
- return ERR_PTR(-EINVAL);
if (WARN_ON(!caps))
return ERR_PTR(-EINVAL);
if (WARN_ON(!ops))
@@ -214,8 +211,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
if (!adap)
return ERR_PTR(-ENOMEM);
- adap->owner = parent->driver->owner;
- adap->devnode.parent = parent;
strlcpy(adap->name, name, sizeof(adap->name));
adap->phys_addr = CEC_PHYS_ADDR_INVALID;
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
@@ -264,7 +259,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->input_id.vendor = 0;
adap->rc->input_id.product = 0;
adap->rc->input_id.version = 1;
- adap->rc->dev.parent = parent;
adap->rc->driver_type = RC_DRIVER_SCANCODE;
adap->rc->driver_name = CEC_NAME;
adap->rc->allowed_protocols = RC_BIT_CEC;
@@ -278,14 +272,22 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
}
EXPORT_SYMBOL_GPL(cec_allocate_adapter);
-int cec_register_adapter(struct cec_adapter *adap)
+int cec_register_adapter(struct cec_adapter *adap,
+ struct device *parent)
{
int res;
if (IS_ERR_OR_NULL(adap))
return 0;
+ if (WARN_ON(!parent))
+ return -EINVAL;
+
+ adap->owner = parent->driver->owner;
+ adap->devnode.dev.parent = parent;
+
#if IS_REACHABLE(CONFIG_RC_CORE)
+ adap->rc->dev.parent = parent;
if (adap->capabilities & CEC_CAP_RC) {
res = rc_register_device(adap->rc);
diff --git a/drivers/staging/media/cec/cec-priv.h b/drivers/media/cec/cec-priv.h
index 70767a7900f2..70767a7900f2 100644
--- a/drivers/staging/media/cec/cec-priv.h
+++ b/drivers/media/cec/cec-priv.h
diff --git a/drivers/media/common/b2c2/flexcop-common.h b/drivers/media/common/b2c2/flexcop-common.h
index 2b2460e9e6b4..2533574c0cf4 100644
--- a/drivers/media/common/b2c2/flexcop-common.h
+++ b/drivers/media/common/b2c2/flexcop-common.h
@@ -14,7 +14,6 @@
#include "dmxdev.h"
#include "dvb_demux.h"
-#include "dvb_filter.h"
#include "dvb_net.h"
#include "dvb_frontend.h"
diff --git a/drivers/media/common/b2c2/flexcop-eeprom.c b/drivers/media/common/b2c2/flexcop-eeprom.c
index a25373a9bd84..844c7836c2a6 100644
--- a/drivers/media/common/b2c2/flexcop-eeprom.c
+++ b/drivers/media/common/b2c2/flexcop-eeprom.c
@@ -136,8 +136,7 @@ int flexcop_eeprom_check_mac_addr(struct flexcop_device *fc, int extended)
if ((ret = flexcop_eeprom_lrc_read(fc,0x3f8,buf,8,4)) == 0) {
if (extended != 0) {
- err("TODO: extended (EUI64) MAC addresses aren't "
- "completely supported yet");
+ err("TODO: extended (EUI64) MAC addresses aren't completely supported yet");
ret = -EINVAL;
} else
memcpy(fc->dvb_adapter.proposed_mac,buf,6);
diff --git a/drivers/media/common/b2c2/flexcop-i2c.c b/drivers/media/common/b2c2/flexcop-i2c.c
index 965d5eb33752..58d39a59fc09 100644
--- a/drivers/media/common/b2c2/flexcop-i2c.c
+++ b/drivers/media/common/b2c2/flexcop-i2c.c
@@ -33,8 +33,8 @@ static int flexcop_i2c_operation(struct flexcop_device *fc,
return -EREMOTEIO;
}
}
- deb_i2c("tried %d times i2c operation, "
- "never finished or too many ack errors.\n", i);
+ deb_i2c("tried %d times i2c operation, never finished or too many ack errors.\n",
+ i);
return -EREMOTEIO;
}
@@ -124,10 +124,10 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
#ifdef DUMP_I2C_MESSAGES
printk(KERN_DEBUG "%d ", i2c->port);
if (op == FC_READ)
- printk("rd(");
+ printk(KERN_CONT "rd(");
else
- printk("wr(");
- printk("%02x): %02x ", chipaddr, addr);
+ printk(KERN_CONT "wr(");
+ printk(KERN_CONT "%02x): %02x ", chipaddr, addr);
#endif
/* in that case addr is the only value ->
@@ -151,7 +151,7 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
#ifdef DUMP_I2C_MESSAGES
for (i = 0; i < bytes_to_transfer; i++)
- printk("%02x ", buf[i]);
+ printk(KERN_CONT "%02x ", buf[i]);
#endif
if (ret < 0)
@@ -163,7 +163,7 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
}
#ifdef DUMP_I2C_MESSAGES
- printk("\n");
+ printk(KERN_CONT "\n");
#endif
return 0;
diff --git a/drivers/media/common/b2c2/flexcop-misc.c b/drivers/media/common/b2c2/flexcop-misc.c
index b8eff235367d..bb0d95fe64f9 100644
--- a/drivers/media/common/b2c2/flexcop-misc.c
+++ b/drivers/media/common/b2c2/flexcop-misc.c
@@ -23,18 +23,15 @@ void flexcop_determine_revision(struct flexcop_device *fc)
fc->rev = FLEXCOP_III;
break;
default:
- err("unknown FlexCop Revision: %x. Please report this to "
- "linux-dvb@linuxtv.org.",
+ err("unknown FlexCop Revision: %x. Please report this to linux-dvb@linuxtv.org.",
v.misc_204.Rev_N_sig_revision_hi);
break;
}
if ((fc->has_32_hw_pid_filter = v.misc_204.Rev_N_sig_caps))
- deb_info("this FlexCop has "
- "the additional 32 hardware pid filter.\n");
+ deb_info("this FlexCop has the additional 32 hardware pid filter.\n");
else
- deb_info("this FlexCop has "
- "the 6 basic main hardware pid filter.\n");
+ deb_info("this FlexCop has the 6 basic main hardware pid filter.\n");
/* bus parts have to decide if hw pid filtering is used or not. */
}
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index 0f5114d406f8..4338ab0043b4 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -46,8 +46,7 @@ int b2c2_flexcop_debug;
EXPORT_SYMBOL_GPL(b2c2_flexcop_debug);
module_param_named(debug, b2c2_flexcop_debug, int, 0644);
MODULE_PARM_DESC(debug,
- "set debug level (1=info,2=tuner,4=i2c,8=ts,"
- "16=sram,32=reg (|-able))."
+ "set debug level (1=info,2=tuner,4=i2c,8=ts,16=sram,32=reg (|-able))."
DEBSTATUS);
#undef DEBSTATUS
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 5e4afa0131e6..2725702eda7b 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -1190,8 +1190,8 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
if (p->stream_insert_nav_packets)
- printk(" (with navigation packets)");
- printk("\n");
+ printk(KERN_CONT " (with navigation packets)");
+ printk(KERN_CONT "\n");
printk(KERN_INFO "%s: VBI Format: %s\n",
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT));
@@ -1209,8 +1209,8 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
p->video_bitrate);
if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
- printk(", Peak %d", p->video_bitrate_peak);
- printk("\n");
+ printk(KERN_CONT ", Peak %d", p->video_bitrate_peak);
+ printk(KERN_CONT "\n");
printk(KERN_INFO
"%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure\n",
prefix,
@@ -1232,9 +1232,9 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE),
p->audio_mute ? " (muted)" : "");
if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO)
- printk(", %s", cx2341x_menu_item(p,
+ printk(KERN_CONT ", %s", cx2341x_menu_item(p,
V4L2_CID_MPEG_AUDIO_MODE_EXTENSION));
- printk(", %s, %s\n",
+ printk(KERN_CONT ", %s, %s\n",
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index ea2f3bf7368b..e034bcfcf757 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -390,6 +390,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
{
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
+ struct saa7146_dmaqueue *q = &vv->video_dmaq;
struct saa7146_format *fmt = NULL;
unsigned long flags;
unsigned int resource;
@@ -428,6 +429,9 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
/* shut down all used video dma transfers */
saa7146_write(dev, MC1, dmas);
+ if (q->curr)
+ saa7146_buffer_finish(dev, q, VIDEOBUF_DONE);
+
spin_unlock_irqrestore(&dev->slock, flags);
vv->video_fh = NULL;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 9148e14c9d07..affde1426b7a 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1044,7 +1044,7 @@ static void smsdvb_release(struct dvb_frontend *fe)
/* do nothing */
}
-static struct dvb_frontend_ops smsdvb_fe_ops = {
+static const struct dvb_frontend_ops smsdvb_fe_ops = {
.info = {
.name = "Siano Mobile Digital MDTV Receiver",
.frequency_min = 44250000,
diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c
index 47da0378cad8..11976031aff8 100644
--- a/drivers/media/common/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -28,6 +28,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/errno.h>
@@ -45,22 +46,9 @@ MODULE_DESCRIPTION("i2c Hauppauge eeprom decoder driver");
MODULE_AUTHOR("John Klar");
MODULE_LICENSE("GPL");
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-1)");
-
#define STRM(array, i) \
(i < sizeof(array) / sizeof(char *) ? array[i] : "unknown")
-#define tveeprom_info(fmt, arg...) \
- v4l_printk(KERN_INFO, "tveeprom", c->adapter, c->addr, fmt , ## arg)
-#define tveeprom_warn(fmt, arg...) \
- v4l_printk(KERN_WARNING, "tveeprom", c->adapter, c->addr, fmt , ## arg)
-#define tveeprom_dbg(fmt, arg...) do { \
- if (debug) \
- v4l_printk(KERN_DEBUG, "tveeprom", \
- c->adapter, c->addr, fmt , ## arg); \
- } while (0)
/*
* The Hauppauge eeprom uses an 8bit field to determine which
@@ -510,19 +498,13 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
len = eeprom_data[i] & 0x07;
++i;
} else {
- tveeprom_warn("Encountered bad packet header [%02x]. "
- "Corrupt or not a Hauppauge eeprom.\n",
+ pr_warn("Encountered bad packet header [%02x]. Corrupt or not a Hauppauge eeprom.\n",
eeprom_data[i]);
return;
}
- if (debug) {
- tveeprom_info("Tag [%02x] + %d bytes:",
- eeprom_data[i], len - 1);
- for (j = 1; j < len; j++)
- printk(KERN_CONT " %02x", eeprom_data[i + j]);
- printk(KERN_CONT "\n");
- }
+ pr_debug("Tag [%02x] + %d bytes: %*ph\n",
+ eeprom_data[i], len - 1, len, &eeprom_data[i]);
/* process by tag */
tag = eeprom_data[i];
@@ -662,14 +644,14 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
/* case 0x12: tag 'InfoBits' */
default:
- tveeprom_dbg("Not sure what to do with tag [%02x]\n",
+ pr_debug("Not sure what to do with tag [%02x]\n",
tag);
/* dump the rest of the packet? */
}
}
if (!done) {
- tveeprom_warn("Ran out of data!\n");
+ pr_warn("Ran out of data!\n");
return;
}
@@ -682,8 +664,8 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
}
if (hasRadioTuner(tuner1) && !tvee->has_radio) {
- tveeprom_info("The eeprom says no radio is present, but the tuner type\n");
- tveeprom_info("indicates otherwise. I will assume that radio is present.\n");
+ pr_info("The eeprom says no radio is present, but the tuner type\n");
+ pr_info("indicates otherwise. I will assume that radio is present.\n");
tvee->has_radio = 1;
}
@@ -718,46 +700,46 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
}
}
- tveeprom_info("Hauppauge model %d, rev %s, serial# %u\n",
+ pr_info("Hauppauge model %d, rev %s, serial# %u\n",
tvee->model, tvee->rev_str, tvee->serial_number);
if (tvee->has_MAC_address == 1)
- tveeprom_info("MAC address is %pM\n", tvee->MAC_address);
- tveeprom_info("tuner model is %s (idx %d, type %d)\n",
+ pr_info("MAC address is %pM\n", tvee->MAC_address);
+ pr_info("tuner model is %s (idx %d, type %d)\n",
t_name1, tuner1, tvee->tuner_type);
- tveeprom_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
+ pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
t_fmt_name1[0], t_fmt_name1[1], t_fmt_name1[2],
t_fmt_name1[3], t_fmt_name1[4], t_fmt_name1[5],
t_fmt_name1[6], t_fmt_name1[7], t_format1);
if (tuner2)
- tveeprom_info("second tuner model is %s (idx %d, type %d)\n",
+ pr_info("second tuner model is %s (idx %d, type %d)\n",
t_name2, tuner2, tvee->tuner2_type);
if (t_format2)
- tveeprom_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
+ pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
t_fmt_name2[0], t_fmt_name2[1], t_fmt_name2[2],
t_fmt_name2[3], t_fmt_name2[4], t_fmt_name2[5],
t_fmt_name2[6], t_fmt_name2[7], t_format2);
if (audioic < 0) {
- tveeprom_info("audio processor is unknown (no idx)\n");
+ pr_info("audio processor is unknown (no idx)\n");
tvee->audio_processor = TVEEPROM_AUDPROC_OTHER;
} else {
if (audioic < ARRAY_SIZE(audio_ic))
- tveeprom_info("audio processor is %s (idx %d)\n",
+ pr_info("audio processor is %s (idx %d)\n",
audio_ic[audioic].name, audioic);
else
- tveeprom_info("audio processor is unknown (idx %d)\n",
+ pr_info("audio processor is unknown (idx %d)\n",
audioic);
}
if (tvee->decoder_processor)
- tveeprom_info("decoder processor is %s (idx %d)\n",
+ pr_info("decoder processor is %s (idx %d)\n",
STRM(decoderIC, tvee->decoder_processor),
tvee->decoder_processor);
if (tvee->has_ir)
- tveeprom_info("has %sradio, has %sIR receiver, has %sIR transmitter\n",
+ pr_info("has %sradio, has %sIR receiver, has %sIR transmitter\n",
tvee->has_radio ? "" : "no ",
(tvee->has_ir & 2) ? "" : "no ",
(tvee->has_ir & 4) ? "" : "no ");
else
- tveeprom_info("has %sradio\n",
+ pr_info("has %sradio\n",
tvee->has_radio ? "" : "no ");
}
EXPORT_SYMBOL(tveeprom_hauppauge_analog);
@@ -773,26 +755,17 @@ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len)
buf = 0;
err = i2c_master_send(c, &buf, 1);
if (err != 1) {
- tveeprom_info("Huh, no eeprom present (err=%d)?\n", err);
+ pr_info("Huh, no eeprom present (err=%d)?\n", err);
return -1;
}
err = i2c_master_recv(c, eedata, len);
if (err != len) {
- tveeprom_warn("i2c eeprom read error (err=%d)\n", err);
+ pr_warn("i2c eeprom read error (err=%d)\n", err);
return -1;
}
- if (debug) {
- int i;
-
- tveeprom_info("full 256-byte eeprom dump:\n");
- for (i = 0; i < len; i++) {
- if (0 == (i % 16))
- tveeprom_info("%02x:", i);
- printk(KERN_CONT " %02x", eedata[i]);
- if (15 == (i % 16))
- printk(KERN_CONT "\n");
- }
- }
+
+ print_hex_dump_debug("full 256-byte eeprom dump:", DUMP_PREFIX_NONE,
+ 16, 1, eedata, len, true);
return 0;
}
EXPORT_SYMBOL(tveeprom_read);
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 1684810cab83..e47b46e2d26c 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -117,6 +117,7 @@ void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
tpg_s_fourcc(tpg, V4L2_PIX_FMT_RGB24);
tpg->colorspace = V4L2_COLORSPACE_SRGB;
tpg->perc_fill = 100;
+ tpg->hsv_enc = V4L2_HSV_ENC_180;
}
EXPORT_SYMBOL_GPL(tpg_init);
@@ -234,16 +235,18 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
+ tpg->color_enc = TGP_COLOR_ENC_RGB;
+ break;
case V4L2_PIX_FMT_GREY:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
- tpg->is_yuv = false;
+ tpg->color_enc = TGP_COLOR_ENC_LUMA;
break;
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_YUV32:
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV420M:
case V4L2_PIX_FMT_YVU420M:
@@ -256,7 +259,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 2;
tpg->hdownsampling[2] = 2;
tpg->planes = 3;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YVU422M:
@@ -268,7 +271,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 2;
tpg->hdownsampling[2] = 2;
tpg->planes = 3;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_NV16M:
case V4L2_PIX_FMT_NV61M:
@@ -280,7 +283,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 1;
tpg->hmask[1] = ~1;
tpg->planes = 2;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_NV12M:
case V4L2_PIX_FMT_NV21M:
@@ -292,7 +295,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 1;
tpg->hmask[1] = ~1;
tpg->planes = 2;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV444M:
case V4L2_PIX_FMT_YVU444M:
@@ -302,21 +305,25 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->vdownsampling[2] = 1;
tpg->hdownsampling[1] = 1;
tpg->hdownsampling[2] = 1;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_NV24:
case V4L2_PIX_FMT_NV42:
tpg->vdownsampling[1] = 1;
tpg->hdownsampling[1] = 1;
tpg->planes = 2;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
tpg->hmask[0] = ~1;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
+ break;
+ case V4L2_PIX_FMT_HSV24:
+ case V4L2_PIX_FMT_HSV32:
+ tpg->color_enc = TGP_COLOR_ENC_HSV;
break;
default:
return false;
@@ -351,6 +358,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
break;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_HSV24:
tpg->twopixelsize[0] = 2 * 3;
break;
case V4L2_PIX_FMT_BGR666:
@@ -361,6 +369,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_HSV32:
tpg->twopixelsize[0] = 2 * 4;
break;
case V4L2_PIX_FMT_NV12:
@@ -490,6 +499,71 @@ static inline int linear_to_rec709(int v)
return tpg_linear_to_rec709[v];
}
+static void color_to_hsv(struct tpg_data *tpg, int r, int g, int b,
+ int *h, int *s, int *v)
+{
+ int max_rgb, min_rgb, diff_rgb;
+ int aux;
+ int third;
+ int third_size;
+
+ r >>= 4;
+ g >>= 4;
+ b >>= 4;
+
+ /* Value */
+ max_rgb = max3(r, g, b);
+ *v = max_rgb;
+ if (!max_rgb) {
+ *h = 0;
+ *s = 0;
+ return;
+ }
+
+ /* Saturation */
+ min_rgb = min3(r, g, b);
+ diff_rgb = max_rgb - min_rgb;
+ aux = 255 * diff_rgb;
+ aux += max_rgb / 2;
+ aux /= max_rgb;
+ *s = aux;
+ if (!aux) {
+ *h = 0;
+ return;
+ }
+
+ third_size = (tpg->real_hsv_enc == V4L2_HSV_ENC_180) ? 60 : 85;
+
+ /* Hue */
+ if (max_rgb == r) {
+ aux = g - b;
+ third = 0;
+ } else if (max_rgb == g) {
+ aux = b - r;
+ third = third_size;
+ } else {
+ aux = r - g;
+ third = third_size * 2;
+ }
+
+ aux *= third_size / 2;
+ aux += diff_rgb / 2;
+ aux /= diff_rgb;
+ aux += third;
+
+ /* Clamp Hue */
+ if (tpg->real_hsv_enc == V4L2_HSV_ENC_180) {
+ if (aux < 0)
+ aux += 180;
+ else if (aux > 180)
+ aux -= 180;
+ } else {
+ aux = aux & 0xff;
+ }
+
+ *h = aux;
+}
+
static void rgb2ycbcr(const int m[3][3], int r, int g, int b,
int y_offset, int *y, int *cb, int *cr)
{
@@ -729,6 +803,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
int r = tpg_colors[col].r;
int g = tpg_colors[col].g;
int b = tpg_colors[col].b;
+ int y, cb, cr;
+ bool ycbcr_valid = false;
if (k == TPG_COLOR_TEXTBG) {
col = tpg_get_textbg_color(tpg);
@@ -759,9 +835,9 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g <<= 4;
b <<= 4;
}
- if (tpg->qual == TPG_QUAL_GRAY || tpg->fourcc == V4L2_PIX_FMT_GREY ||
- tpg->fourcc == V4L2_PIX_FMT_Y16 ||
- tpg->fourcc == V4L2_PIX_FMT_Y16_BE) {
+
+ if (tpg->qual == TPG_QUAL_GRAY ||
+ tpg->color_enc == TGP_COLOR_ENC_LUMA) {
/* Rec. 709 Luma function */
/* (0.2126, 0.7152, 0.0722) * (255 * 256) */
r = g = b = (13879 * r + 46688 * g + 4713 * b) >> 16;
@@ -775,7 +851,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
* Remember that r, g and b are still in the 0 - 0xff0 range.
*/
if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
- tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL && !tpg->is_yuv) {
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL &&
+ tpg->color_enc == TGP_COLOR_ENC_RGB) {
/*
* Convert from full range (which is what r, g and b are)
* to limited range (which is the 'real' RGB range), which
@@ -785,7 +862,9 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g = (g * 219) / 255 + (16 << 4);
b = (b * 219) / 255 + (16 << 4);
} else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
- tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED && !tpg->is_yuv) {
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
+ tpg->color_enc == TGP_COLOR_ENC_RGB) {
+
/*
* Clamp r, g and b to the limited range and convert to full
* range since that's what we deliver.
@@ -798,10 +877,10 @@ static void precalculate_color(struct tpg_data *tpg, int k)
b = (b - (16 << 4)) * 255 / 219;
}
- if (tpg->brightness != 128 || tpg->contrast != 128 ||
- tpg->saturation != 128 || tpg->hue) {
+ if ((tpg->brightness != 128 || tpg->contrast != 128 ||
+ tpg->saturation != 128 || tpg->hue) &&
+ tpg->color_enc != TGP_COLOR_ENC_LUMA) {
/* Implement these operations */
- int y, cb, cr;
int tmp_cb, tmp_cr;
/* First convert to YCbCr */
@@ -818,29 +897,45 @@ static void precalculate_color(struct tpg_data *tpg, int k)
cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128);
- if (tpg->is_yuv) {
- tpg->colors[k][0] = clamp(y >> 4, 1, 254);
- tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
- tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
- return;
- }
- ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
+ if (tpg->color_enc == TGP_COLOR_ENC_YCBCR)
+ ycbcr_valid = true;
+ else
+ ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
+ } else if ((tpg->brightness != 128 || tpg->contrast != 128) &&
+ tpg->color_enc == TGP_COLOR_ENC_LUMA) {
+ r = (16 << 4) + ((r - (16 << 4)) * tpg->contrast) / 128;
+ r += (tpg->brightness << 4) - (128 << 4);
}
- if (tpg->is_yuv) {
- /* Convert to YCbCr */
- int y, cb, cr;
+ switch (tpg->color_enc) {
+ case TGP_COLOR_ENC_HSV:
+ {
+ int h, s, v;
- color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
+ color_to_hsv(tpg, r, g, b, &h, &s, &v);
+ tpg->colors[k][0] = h;
+ tpg->colors[k][1] = s;
+ tpg->colors[k][2] = v;
+ break;
+ }
+ case TGP_COLOR_ENC_YCBCR:
+ {
+ /* Convert to YCbCr */
+ if (!ycbcr_valid)
+ color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
+ y >>= 4;
+ cb >>= 4;
+ cr >>= 4;
if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
- y = clamp(y, 16 << 4, 235 << 4);
- cb = clamp(cb, 16 << 4, 240 << 4);
- cr = clamp(cr, 16 << 4, 240 << 4);
+ y = clamp(y, 16, 235);
+ cb = clamp(cb, 16, 240);
+ cr = clamp(cr, 16, 240);
+ } else {
+ y = clamp(y, 1, 254);
+ cb = clamp(cb, 1, 254);
+ cr = clamp(cr, 1, 254);
}
- y = clamp(y >> 4, 1, 254);
- cb = clamp(cb >> 4, 1, 254);
- cr = clamp(cr >> 4, 1, 254);
switch (tpg->fourcc) {
case V4L2_PIX_FMT_YUV444:
y >>= 4;
@@ -861,7 +956,15 @@ static void precalculate_color(struct tpg_data *tpg, int k)
tpg->colors[k][0] = y;
tpg->colors[k][1] = cb;
tpg->colors[k][2] = cr;
- } else {
+ break;
+ }
+ case TGP_COLOR_ENC_LUMA:
+ {
+ tpg->colors[k][0] = r >> 4;
+ break;
+ }
+ case TGP_COLOR_ENC_RGB:
+ {
if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
r = (r * 219) / 255 + (16 << 4);
g = (g * 219) / 255 + (16 << 4);
@@ -911,6 +1014,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
tpg->colors[k][0] = r;
tpg->colors[k][1] = g;
tpg->colors[k][2] = b;
+ break;
+ }
}
}
@@ -928,7 +1033,7 @@ static void gen_twopix(struct tpg_data *tpg,
{
unsigned offset = odd * tpg->twopixelsize[0] / 2;
u8 alpha = tpg->alpha_component;
- u8 r_y, g_u, b_v;
+ u8 r_y_h, g_u_s, b_v;
if (tpg->alpha_red_only && color != TPG_COLOR_CSC_RED &&
color != TPG_COLOR_100_RED &&
@@ -936,161 +1041,161 @@ static void gen_twopix(struct tpg_data *tpg,
alpha = 0;
if (color == TPG_COLOR_RANDOM)
precalculate_color(tpg, color);
- r_y = tpg->colors[color][0]; /* R or precalculated Y */
- g_u = tpg->colors[color][1]; /* G or precalculated U */
+ r_y_h = tpg->colors[color][0]; /* R or precalculated Y, H */
+ g_u_s = tpg->colors[color][1]; /* G or precalculated U, V */
b_v = tpg->colors[color][2]; /* B or precalculated V */
switch (tpg->fourcc) {
case V4L2_PIX_FMT_GREY:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
break;
case V4L2_PIX_FMT_Y16:
/*
- * Ideally both bytes should be set to r_y, but then you won't
+ * Ideally both bytes should be set to r_y_h, but then you won't
* be able to detect endian problems. So keep it 0 except for
- * the corner case where r_y is 0xff so white really will be
+ * the corner case where r_y_h is 0xff so white really will be
* white (0xffff).
*/
- buf[0][offset] = r_y == 0xff ? r_y : 0;
- buf[0][offset+1] = r_y;
+ buf[0][offset] = r_y_h == 0xff ? r_y_h : 0;
+ buf[0][offset+1] = r_y_h;
break;
case V4L2_PIX_FMT_Y16_BE:
/* See comment for V4L2_PIX_FMT_Y16 above */
- buf[0][offset] = r_y;
- buf[0][offset+1] = r_y == 0xff ? r_y : 0;
+ buf[0][offset] = r_y_h;
+ buf[0][offset+1] = r_y_h == 0xff ? r_y_h : 0;
break;
case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV420M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
- buf[1][0] = (buf[1][0] + g_u) / 2;
+ buf[1][0] = (buf[1][0] + g_u_s) / 2;
buf[2][0] = (buf[2][0] + b_v) / 2;
buf[1][1] = buf[1][0];
buf[2][1] = buf[2][0];
break;
}
- buf[1][0] = g_u;
+ buf[1][0] = g_u_s;
buf[2][0] = b_v;
break;
case V4L2_PIX_FMT_YVU422M:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YVU420M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
buf[1][0] = (buf[1][0] + b_v) / 2;
- buf[2][0] = (buf[2][0] + g_u) / 2;
+ buf[2][0] = (buf[2][0] + g_u_s) / 2;
buf[1][1] = buf[1][0];
buf[2][1] = buf[2][0];
break;
}
buf[1][0] = b_v;
- buf[2][0] = g_u;
+ buf[2][0] = g_u_s;
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV12M:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV16M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
- buf[1][0] = (buf[1][0] + g_u) / 2;
+ buf[1][0] = (buf[1][0] + g_u_s) / 2;
buf[1][1] = (buf[1][1] + b_v) / 2;
break;
}
- buf[1][0] = g_u;
+ buf[1][0] = g_u_s;
buf[1][1] = b_v;
break;
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV21M:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV61M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
buf[1][0] = (buf[1][0] + b_v) / 2;
- buf[1][1] = (buf[1][1] + g_u) / 2;
+ buf[1][1] = (buf[1][1] + g_u_s) / 2;
break;
}
buf[1][0] = b_v;
- buf[1][1] = g_u;
+ buf[1][1] = g_u_s;
break;
case V4L2_PIX_FMT_YUV444M:
- buf[0][offset] = r_y;
- buf[1][offset] = g_u;
+ buf[0][offset] = r_y_h;
+ buf[1][offset] = g_u_s;
buf[2][offset] = b_v;
break;
case V4L2_PIX_FMT_YVU444M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
buf[1][offset] = b_v;
- buf[2][offset] = g_u;
+ buf[2][offset] = g_u_s;
break;
case V4L2_PIX_FMT_NV24:
- buf[0][offset] = r_y;
- buf[1][2 * offset] = g_u;
+ buf[0][offset] = r_y_h;
+ buf[1][2 * offset] = g_u_s;
buf[1][2 * offset + 1] = b_v;
break;
case V4L2_PIX_FMT_NV42:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
buf[1][2 * offset] = b_v;
- buf[1][2 * offset + 1] = g_u;
+ buf[1][2 * offset + 1] = g_u_s;
break;
case V4L2_PIX_FMT_YUYV:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
- buf[0][1] = (buf[0][1] + g_u) / 2;
+ buf[0][1] = (buf[0][1] + g_u_s) / 2;
buf[0][3] = (buf[0][3] + b_v) / 2;
break;
}
- buf[0][1] = g_u;
+ buf[0][1] = g_u_s;
buf[0][3] = b_v;
break;
case V4L2_PIX_FMT_UYVY:
- buf[0][offset + 1] = r_y;
+ buf[0][offset + 1] = r_y_h;
if (odd) {
- buf[0][0] = (buf[0][0] + g_u) / 2;
+ buf[0][0] = (buf[0][0] + g_u_s) / 2;
buf[0][2] = (buf[0][2] + b_v) / 2;
break;
}
- buf[0][0] = g_u;
+ buf[0][0] = g_u_s;
buf[0][2] = b_v;
break;
case V4L2_PIX_FMT_YVYU:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
buf[0][1] = (buf[0][1] + b_v) / 2;
- buf[0][3] = (buf[0][3] + g_u) / 2;
+ buf[0][3] = (buf[0][3] + g_u_s) / 2;
break;
}
buf[0][1] = b_v;
- buf[0][3] = g_u;
+ buf[0][3] = g_u_s;
break;
case V4L2_PIX_FMT_VYUY:
- buf[0][offset + 1] = r_y;
+ buf[0][offset + 1] = r_y_h;
if (odd) {
buf[0][0] = (buf[0][0] + b_v) / 2;
- buf[0][2] = (buf[0][2] + g_u) / 2;
+ buf[0][2] = (buf[0][2] + g_u_s) / 2;
break;
}
buf[0][0] = b_v;
- buf[0][2] = g_u;
+ buf[0][2] = g_u_s;
break;
case V4L2_PIX_FMT_RGB332:
- buf[0][offset] = (r_y << 5) | (g_u << 2) | b_v;
+ buf[0][offset] = (r_y_h << 5) | (g_u_s << 2) | b_v;
break;
case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_RGB565:
- buf[0][offset] = (g_u << 5) | b_v;
- buf[0][offset + 1] = (r_y << 3) | (g_u >> 3);
+ buf[0][offset] = (g_u_s << 5) | b_v;
+ buf[0][offset + 1] = (r_y_h << 3) | (g_u_s >> 3);
break;
case V4L2_PIX_FMT_RGB565X:
- buf[0][offset] = (r_y << 3) | (g_u >> 3);
- buf[0][offset + 1] = (g_u << 5) | b_v;
+ buf[0][offset] = (r_y_h << 3) | (g_u_s >> 3);
+ buf[0][offset + 1] = (g_u_s << 5) | b_v;
break;
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_XRGB444:
@@ -1098,8 +1203,8 @@ static void gen_twopix(struct tpg_data *tpg,
/* fall through */
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_ARGB444:
- buf[0][offset] = (g_u << 4) | b_v;
- buf[0][offset + 1] = (alpha & 0xf0) | r_y;
+ buf[0][offset] = (g_u_s << 4) | b_v;
+ buf[0][offset + 1] = (alpha & 0xf0) | r_y_h;
break;
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
@@ -1107,42 +1212,45 @@ static void gen_twopix(struct tpg_data *tpg,
/* fall through */
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_ARGB555:
- buf[0][offset] = (g_u << 5) | b_v;
- buf[0][offset + 1] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+ buf[0][offset] = (g_u_s << 5) | b_v;
+ buf[0][offset + 1] = (alpha & 0x80) | (r_y_h << 2)
+ | (g_u_s >> 3);
break;
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
alpha = 0;
/* fall through */
case V4L2_PIX_FMT_ARGB555X:
- buf[0][offset] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
- buf[0][offset + 1] = (g_u << 5) | b_v;
+ buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3);
+ buf[0][offset + 1] = (g_u_s << 5) | b_v;
break;
case V4L2_PIX_FMT_RGB24:
- buf[0][offset] = r_y;
- buf[0][offset + 1] = g_u;
+ case V4L2_PIX_FMT_HSV24:
+ buf[0][offset] = r_y_h;
+ buf[0][offset + 1] = g_u_s;
buf[0][offset + 2] = b_v;
break;
case V4L2_PIX_FMT_BGR24:
buf[0][offset] = b_v;
- buf[0][offset + 1] = g_u;
- buf[0][offset + 2] = r_y;
+ buf[0][offset + 1] = g_u_s;
+ buf[0][offset + 2] = r_y_h;
break;
case V4L2_PIX_FMT_BGR666:
- buf[0][offset] = (b_v << 2) | (g_u >> 4);
- buf[0][offset + 1] = (g_u << 4) | (r_y >> 2);
- buf[0][offset + 2] = r_y << 6;
+ buf[0][offset] = (b_v << 2) | (g_u_s >> 4);
+ buf[0][offset + 1] = (g_u_s << 4) | (r_y_h >> 2);
+ buf[0][offset + 2] = r_y_h << 6;
buf[0][offset + 3] = 0;
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_HSV32:
alpha = 0;
/* fall through */
case V4L2_PIX_FMT_YUV32:
case V4L2_PIX_FMT_ARGB32:
buf[0][offset] = alpha;
- buf[0][offset + 1] = r_y;
- buf[0][offset + 2] = g_u;
+ buf[0][offset + 1] = r_y_h;
+ buf[0][offset + 2] = g_u_s;
buf[0][offset + 3] = b_v;
break;
case V4L2_PIX_FMT_BGR32:
@@ -1151,87 +1259,87 @@ static void gen_twopix(struct tpg_data *tpg,
/* fall through */
case V4L2_PIX_FMT_ABGR32:
buf[0][offset] = b_v;
- buf[0][offset + 1] = g_u;
- buf[0][offset + 2] = r_y;
+ buf[0][offset + 1] = g_u_s;
+ buf[0][offset + 2] = r_y_h;
buf[0][offset + 3] = alpha;
break;
case V4L2_PIX_FMT_SBGGR8:
- buf[0][offset] = odd ? g_u : b_v;
- buf[1][offset] = odd ? r_y : g_u;
+ buf[0][offset] = odd ? g_u_s : b_v;
+ buf[1][offset] = odd ? r_y_h : g_u_s;
break;
case V4L2_PIX_FMT_SGBRG8:
- buf[0][offset] = odd ? b_v : g_u;
- buf[1][offset] = odd ? g_u : r_y;
+ buf[0][offset] = odd ? b_v : g_u_s;
+ buf[1][offset] = odd ? g_u_s : r_y_h;
break;
case V4L2_PIX_FMT_SGRBG8:
- buf[0][offset] = odd ? r_y : g_u;
- buf[1][offset] = odd ? g_u : b_v;
+ buf[0][offset] = odd ? r_y_h : g_u_s;
+ buf[1][offset] = odd ? g_u_s : b_v;
break;
case V4L2_PIX_FMT_SRGGB8:
- buf[0][offset] = odd ? g_u : r_y;
- buf[1][offset] = odd ? b_v : g_u;
+ buf[0][offset] = odd ? g_u_s : r_y_h;
+ buf[1][offset] = odd ? b_v : g_u_s;
break;
case V4L2_PIX_FMT_SBGGR10:
- buf[0][offset] = odd ? g_u << 2 : b_v << 2;
- buf[0][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
- buf[1][offset] = odd ? r_y << 2 : g_u << 2;
- buf[1][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
+ buf[0][offset] = odd ? g_u_s << 2 : b_v << 2;
+ buf[0][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6;
+ buf[1][offset] = odd ? r_y_h << 2 : g_u_s << 2;
+ buf[1][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SGBRG10:
- buf[0][offset] = odd ? b_v << 2 : g_u << 2;
- buf[0][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
- buf[1][offset] = odd ? g_u << 2 : r_y << 2;
- buf[1][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
+ buf[0][offset] = odd ? b_v << 2 : g_u_s << 2;
+ buf[0][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6;
+ buf[1][offset] = odd ? g_u_s << 2 : r_y_h << 2;
+ buf[1][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SGRBG10:
- buf[0][offset] = odd ? r_y << 2 : g_u << 2;
- buf[0][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
- buf[1][offset] = odd ? g_u << 2 : b_v << 2;
- buf[1][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
+ buf[0][offset] = odd ? r_y_h << 2 : g_u_s << 2;
+ buf[0][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6;
+ buf[1][offset] = odd ? g_u_s << 2 : b_v << 2;
+ buf[1][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SRGGB10:
- buf[0][offset] = odd ? g_u << 2 : r_y << 2;
- buf[0][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
- buf[1][offset] = odd ? b_v << 2 : g_u << 2;
- buf[1][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
+ buf[0][offset] = odd ? g_u_s << 2 : r_y_h << 2;
+ buf[0][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6;
+ buf[1][offset] = odd ? b_v << 2 : g_u_s << 2;
+ buf[1][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SBGGR12:
- buf[0][offset] = odd ? g_u << 4 : b_v << 4;
- buf[0][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
- buf[1][offset] = odd ? r_y << 4 : g_u << 4;
- buf[1][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
+ buf[0][offset] = odd ? g_u_s << 4 : b_v << 4;
+ buf[0][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4;
+ buf[1][offset] = odd ? r_y_h << 4 : g_u_s << 4;
+ buf[1][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
case V4L2_PIX_FMT_SGBRG12:
- buf[0][offset] = odd ? b_v << 4 : g_u << 4;
- buf[0][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
- buf[1][offset] = odd ? g_u << 4 : r_y << 4;
- buf[1][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
+ buf[0][offset] = odd ? b_v << 4 : g_u_s << 4;
+ buf[0][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4;
+ buf[1][offset] = odd ? g_u_s << 4 : r_y_h << 4;
+ buf[1][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
case V4L2_PIX_FMT_SGRBG12:
- buf[0][offset] = odd ? r_y << 4 : g_u << 4;
- buf[0][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
- buf[1][offset] = odd ? g_u << 4 : b_v << 4;
- buf[1][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
+ buf[0][offset] = odd ? r_y_h << 4 : g_u_s << 4;
+ buf[0][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4;
+ buf[1][offset] = odd ? g_u_s << 4 : b_v << 4;
+ buf[1][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
case V4L2_PIX_FMT_SRGGB12:
- buf[0][offset] = odd ? g_u << 4 : r_y << 4;
- buf[0][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
- buf[1][offset] = odd ? b_v << 4 : g_u << 4;
- buf[1][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
+ buf[0][offset] = odd ? g_u_s << 4 : r_y_h << 4;
+ buf[0][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4;
+ buf[1][offset] = odd ? b_v << 4 : g_u_s << 4;
+ buf[1][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
@@ -1828,6 +1936,7 @@ static void tpg_recalc(struct tpg_data *tpg)
tpg->recalc_lines = true;
tpg->real_xfer_func = tpg->xfer_func;
tpg->real_ycbcr_enc = tpg->ycbcr_enc;
+ tpg->real_hsv_enc = tpg->hsv_enc;
tpg->real_quantization = tpg->quantization;
if (tpg->xfer_func == V4L2_XFER_FUNC_DEFAULT)
@@ -1840,7 +1949,8 @@ static void tpg_recalc(struct tpg_data *tpg)
if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT)
tpg->real_quantization =
- V4L2_MAP_QUANTIZATION_DEFAULT(!tpg->is_yuv,
+ V4L2_MAP_QUANTIZATION_DEFAULT(
+ tpg->color_enc != TGP_COLOR_ENC_YCBCR,
tpg->colorspace, tpg->real_ycbcr_enc);
tpg_precalculate_colors(tpg);
@@ -1887,11 +1997,28 @@ static int tpg_pattern_avg(const struct tpg_data *tpg,
return -1;
}
+static const char *tpg_color_enc_str(enum tgp_color_enc
+ color_enc)
+{
+ switch (color_enc) {
+ case TGP_COLOR_ENC_HSV:
+ return "HSV";
+ case TGP_COLOR_ENC_YCBCR:
+ return "Y'CbCr";
+ case TGP_COLOR_ENC_LUMA:
+ return "Luma";
+ case TGP_COLOR_ENC_RGB:
+ default:
+ return "R'G'B";
+
+ }
+}
+
void tpg_log_status(struct tpg_data *tpg)
{
pr_info("tpg source WxH: %ux%u (%s)\n",
- tpg->src_width, tpg->src_height,
- tpg->is_yuv ? "YCbCr" : "RGB");
+ tpg->src_width, tpg->src_height,
+ tpg_color_enc_str(tpg->color_enc));
pr_info("tpg field: %u\n", tpg->field);
pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height,
tpg->crop.left, tpg->crop.top);
@@ -1900,6 +2027,7 @@ void tpg_log_status(struct tpg_data *tpg)
pr_info("tpg colorspace: %d\n", tpg->colorspace);
pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func);
pr_info("tpg Y'CbCr encoding: %d/%d\n", tpg->ycbcr_enc, tpg->real_ycbcr_enc);
+ pr_info("tpg HSV encoding: %d/%d\n", tpg->hsv_enc, tpg->real_hsv_enc);
pr_info("tpg quantization: %d/%d\n", tpg->quantization, tpg->real_quantization);
pr_info("tpg RGB range: %d/%d\n", tpg->rgb_range, tpg->real_rgb_range);
}
diff --git a/drivers/media/dvb-core/Kconfig b/drivers/media/dvb-core/Kconfig
index fa7a2490ed5f..eeef94a0c84e 100644
--- a/drivers/media/dvb-core/Kconfig
+++ b/drivers/media/dvb-core/Kconfig
@@ -5,7 +5,7 @@
config DVB_MAX_ADAPTERS
int "maximum number of DVB/ATSC adapters"
depends on DVB_CORE
- default 8
+ default 16
range 1 255
help
Maximum number of DVB/ATSC adapters. Increasing this number
@@ -13,7 +13,7 @@ config DVB_MAX_ADAPTERS
if a much lower number of DVB/ATSC adapters is present.
Only values in the range 4-32 are tested.
- If you are unsure about this, use the default value 8
+ If you are unsure about this, use the default value 16
config DVB_DYNAMIC_MINORS
bool "Dynamic DVB minor allocation"
@@ -27,3 +27,16 @@ config DVB_DYNAMIC_MINORS
will be required to manage the device nodes.
If you are unsure about this, say N here.
+
+config DVB_DEMUX_SECTION_LOSS_LOG
+ bool "Enable DVB demux section packet loss log"
+ depends on DVB_CORE
+ default n
+ help
+ Enable extra log messages meant to detect packet loss
+ inside the Kernel.
+
+ Should not be enabled on normal cases, as logs can
+ be very verbose.
+
+ If you are unsure about this, say N here.
diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile
index 8f22bcd7c1f9..281bc89576e6 100644
--- a/drivers/media/dvb-core/Makefile
+++ b/drivers/media/dvb-core/Makefile
@@ -4,7 +4,7 @@
dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
-dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \
+dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \
dvb_ca_en50221.o dvb_frontend.o \
$(dvb-net-y) dvb_ringbuffer.o dvb_math.o
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index aeda2b64931c..f8adf4506a45 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -103,7 +103,6 @@ struct dmx_ts_feed {
u16 pid,
int type,
enum dmx_ts_pes pes_type,
- size_t circular_buffer_size,
ktime_t timeout);
int (*start_filtering)(struct dmx_ts_feed *feed);
int (*stop_filtering)(struct dmx_ts_feed *feed);
@@ -181,7 +180,6 @@ struct dmx_section_feed {
/* public: */
int (*set)(struct dmx_section_feed *feed,
u16 pid,
- size_t circular_buffer_size,
int check_crc);
int (*allocate_filter)(struct dmx_section_feed *feed,
struct dmx_section_filter **filter);
@@ -206,8 +204,7 @@ struct dmx_section_feed {
* the &dmx_demux.
* Any TS packets that match the filter settings are copied to a circular
* buffer. The filtered TS packets are delivered to the client using this
- * callback function. The size of the circular buffer is controlled by the
- * circular_buffer_size parameter of the &dmx_ts_feed.@set function.
+ * callback function.
* It is expected that the @buffer1 and @buffer2 callback parameters point to
* addresses within the circular buffer, but other implementations are also
* possible. Note that the called party should not try to free the memory
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 7b67e1dd97fd..efe55a3e80d0 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) "dmxdev: " fmt
+
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -36,7 +38,11 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
-#define dprintk if (debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
const u8 *src, size_t len)
@@ -50,7 +56,7 @@ static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
free = dvb_ringbuffer_free(buf);
if (len > free) {
- dprintk("dmxdev: buffer overflow\n");
+ dprintk("buffer overflow\n");
return -EOVERFLOW;
}
@@ -126,7 +132,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
struct dmxdev *dmxdev = dvbdev->priv;
struct dmx_frontend *front;
- dprintk("function : %s\n", __func__);
+ dprintk("%s\n", __func__);
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
@@ -258,7 +264,7 @@ static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
void *newmem;
void *oldmem;
- dprintk("function : %s\n", __func__);
+ dprintk("%s\n", __func__);
if (buf->size == size)
return 0;
@@ -367,7 +373,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
return 0;
}
del_timer(&dmxdevfilter->timer);
- dprintk("dmxdev: section callback %*ph\n", 6, buffer1);
+ dprintk("section callback %*ph\n", 6, buffer1);
ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
buffer1_len);
if (ret == buffer1_len) {
@@ -589,7 +595,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
tsfeed = feed->ts;
tsfeed->priv = filter;
- ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout);
+ ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, timeout);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
return ret;
@@ -655,15 +661,15 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
secfeed,
dvb_dmxdev_section_callback);
if (ret < 0) {
- printk("DVB (%s): could not alloc feed\n",
+ pr_err("DVB (%s): could not alloc feed\n",
__func__);
return ret;
}
- ret = (*secfeed)->set(*secfeed, para->pid, 32768,
+ ret = (*secfeed)->set(*secfeed, para->pid,
(para->flags & DMX_CHECK_CRC) ? 1 : 0);
if (ret < 0) {
- printk("DVB (%s): could not set feed\n",
+ pr_err("DVB (%s): could not set feed\n",
__func__);
dvb_dmxdev_feed_restart(filter);
return ret;
@@ -844,7 +850,7 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
struct dmxdev_filter *dmxdevfilter,
struct dmx_sct_filter_params *params)
{
- dprintk("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
+ dprintk("%s: PID=0x%04x, flags=%02x, timeout=%d\n",
__func__, params->pid, params->flags, params->timeout);
dvb_dmxdev_filter_stop(dmxdevfilter);
@@ -1184,7 +1190,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
struct dmxdev *dmxdev = dvbdev->priv;
unsigned int mask = 0;
- dprintk("function : %s\n", __func__);
+ dprintk("%s\n", __func__);
if (dmxdev->exit)
return POLLERR;
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index a7a4674ccc40..779f4224b63e 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -262,6 +262,7 @@
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015
#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
@@ -411,4 +412,5 @@
#define USB_PID_SVEON_STV27 0xd3af
#define USB_PID_TURBOX_DTT_2000 0xd3a4
#define USB_PID_WINTV_SOLOHD 0x0264
+#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
#endif
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index b5b5b195ea7f..fd893141211c 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -28,6 +28,8 @@
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
+#define pr_fmt(fmt) "dvb_ca_en50221: " fmt
+
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -46,7 +48,10 @@ static int dvb_ca_en50221_debug;
module_param_named(cam_debug, dvb_ca_en50221_debug, int, 0644);
MODULE_PARM_DESC(cam_debug, "enable verbose debug messages");
-#define dprintk if (dvb_ca_en50221_debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (dvb_ca_en50221_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg);\
+} while (0)
#define INIT_TIMEOUT_SECS 10
@@ -166,7 +171,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca)
{
unsigned int i;
- dvb_unregister_device(ca->dvbdev);
+ dvb_free_device(ca->dvbdev);
for (i = 0; i < ca->slot_count; i++)
vfree(ca->slot_info[i].rx_buffer.data);
@@ -298,7 +303,8 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
/* if we got the flags, it was successful! */
if (res & waitfor) {
- dprintk("%s succeeded timeout:%lu\n", __func__, jiffies - start);
+ dprintk("%s succeeded timeout:%lu\n",
+ __func__, jiffies - start);
return 0;
}
@@ -519,8 +525,9 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
/* is it a version we support? */
if (strncmp(dvb_str + 8, "1.00", 4)) {
- printk("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n",
- ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]);
+ pr_err("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n",
+ ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9],
+ dvb_str[10], dvb_str[11]);
return -EINVAL;
}
@@ -557,8 +564,8 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
break;
default: /* Unknown tuple type - just skip this tuple and move to the next one */
- dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n", tupleType,
- tupleLength);
+ dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n",
+ tupleType, tupleLength);
break;
}
}
@@ -567,7 +574,8 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
return -EINVAL;
dprintk("Valid DVB CAM detected MANID:%x DEVID:%x CONFIGBASE:0x%x CONFIGOPTION:0x%x\n",
- manfid, devid, ca->slot_info[slot].config_base, ca->slot_info[slot].config_option);
+ manfid, devid, ca->slot_info[slot].config_base,
+ ca->slot_info[slot].config_option);
// success!
return 0;
@@ -661,14 +669,15 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
/* check it will fit */
if (ebuf == NULL) {
if (bytes_read > ca->slot_info[slot].link_buf_size) {
- printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
- ca->dvbdev->adapter->num, bytes_read, ca->slot_info[slot].link_buf_size);
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
+ ca->dvbdev->adapter->num, bytes_read,
+ ca->slot_info[slot].link_buf_size);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
status = -EIO;
goto exit;
}
if (bytes_read < 2) {
- printk("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
+ pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
status = -EIO;
@@ -676,7 +685,7 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
}
} else {
if (bytes_read > ecount) {
- printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
ca->dvbdev->adapter->num);
status = -EIO;
goto exit;
@@ -1062,7 +1071,7 @@ static int dvb_ca_en50221_thread(void *data)
case DVB_CA_SLOTSTATE_WAITREADY:
if (time_after(jiffies, ca->slot_info[slot].timeout)) {
- printk("dvb_ca adaptor %d: PC card did not respond :(\n",
+ pr_err("dvb_ca adaptor %d: PC card did not respond :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1084,14 +1093,14 @@ static int dvb_ca_en50221_thread(void *data)
}
}
- printk("dvb_ca adapter %d: Invalid PC card inserted :(\n",
+ pr_err("dvb_ca adapter %d: Invalid PC card inserted :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
break;
}
if (dvb_ca_en50221_set_configoption(ca, slot) != 0) {
- printk("dvb_ca adapter %d: Unable to initialise CAM :(\n",
+ pr_err("dvb_ca adapter %d: Unable to initialise CAM :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1099,7 +1108,7 @@ static int dvb_ca_en50221_thread(void *data)
}
if (ca->pub->write_cam_control(ca->pub, slot,
CTRLIF_COMMAND, CMDREG_RS) != 0) {
- printk("dvb_ca adapter %d: Unable to reset CAM IF\n",
+ pr_err("dvb_ca adapter %d: Unable to reset CAM IF\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1114,7 +1123,7 @@ static int dvb_ca_en50221_thread(void *data)
case DVB_CA_SLOTSTATE_WAITFR:
if (time_after(jiffies, ca->slot_info[slot].timeout)) {
- printk("dvb_ca adapter %d: DVB CAM did not respond :(\n",
+ pr_err("dvb_ca adapter %d: DVB CAM did not respond :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1141,7 +1150,8 @@ static int dvb_ca_en50221_thread(void *data)
}
}
- printk("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n",
+ ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
break;
@@ -1150,7 +1160,8 @@ static int dvb_ca_en50221_thread(void *data)
if (ca->slot_info[slot].rx_buffer.data == NULL) {
rxbuf = vmalloc(RX_BUFFER_SIZE);
if (rxbuf == NULL) {
- printk("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n",
+ ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
break;
@@ -1161,7 +1172,8 @@ static int dvb_ca_en50221_thread(void *data)
ca->pub->slot_ts_enable(ca->pub, slot);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING;
dvb_ca_en50221_thread_update_delay(ca);
- printk("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n",
+ ca->dvbdev->adapter->num);
break;
case DVB_CA_SLOTSTATE_RUNNING:
@@ -1497,7 +1509,8 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user * buf,
pktlen = 2;
do {
if (idx == -1) {
- printk("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n",
+ ca->dvbdev->adapter->num);
status = -EIO;
goto exit;
}
@@ -1755,8 +1768,8 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
ca->dvbdev->adapter->num, ca->dvbdev->id);
if (IS_ERR(ca->thread)) {
ret = PTR_ERR(ca->thread);
- printk("dvb_ca_init: failed to start kernel_thread (%d)\n",
- ret);
+ pr_err("dvb_ca_init: failed to start kernel_thread (%d)\n",
+ ret);
goto unregister_device;
}
return 0;
@@ -1794,6 +1807,7 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
for (i = 0; i < ca->slot_count; i++) {
dvb_ca_en50221_slot_shutdown(ca, i);
}
+ dvb_remove_device(ca->dvbdev);
dvb_ca_private_put(ca);
pubca->private = NULL;
}
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index a0cf7b0d03e8..3ad0b2cd26b1 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) "dvb_demux: " fmt
+
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -34,12 +36,6 @@
#include "dvb_demux.h"
-#define NOBUFS
-/*
-** #define DVB_DEMUX_SECTION_LOSS_LOG to monitor payload loss in the syslog
-*/
-// #define DVB_DEMUX_SECTION_LOSS_LOG
-
static int dvb_demux_tscheck;
module_param(dvb_demux_tscheck, int, 0644);
MODULE_PARM_DESC(dvb_demux_tscheck,
@@ -55,10 +51,13 @@ module_param(dvb_demux_feed_err_pkts, int, 0644);
MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
"when set to 0, drop packets with the TEI bit set (1 by default)");
-#define dprintk_tscheck(x...) do { \
- if (dvb_demux_tscheck && printk_ratelimit()) \
- printk(x); \
- } while (0)
+#define dprintk(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
+#define dprintk_tscheck(x...) do { \
+ if (dvb_demux_tscheck && printk_ratelimit()) \
+ dprintk(x); \
+} while (0)
/******************************************************************************
* static inlined helper functions
@@ -109,21 +108,23 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
{
int count = payload(buf);
int p;
- //int ccok;
- //u8 cc;
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+ int ccok;
+ u8 cc;
+#endif
if (count == 0)
return -1;
p = 188 - count;
- /*
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
cc = buf[3] & 0x0f;
ccok = ((feed->cc + 1) & 0x0f) == cc;
feed->cc = cc;
if (!ccok)
- printk("missed packet!\n");
- */
+ dprintk("missed packet!\n");
+#endif
if (buf[1] & 0x40) // PUSI ?
feed->peslen = 0xfffa;
@@ -189,7 +190,7 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
{
struct dmx_section_feed *sec = &feed->feed.sec;
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
if (sec->secbufp < sec->tsfeedp) {
int i, n = sec->tsfeedp - sec->secbufp;
@@ -199,12 +200,12 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
* but just first and last.
*/
if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) {
- printk("dvb_demux.c section ts padding loss: %d/%d\n",
+ dprintk("dvb_demux.c section ts padding loss: %d/%d\n",
n, sec->tsfeedp);
- printk("dvb_demux.c pad data:");
+ dprintk("dvb_demux.c pad data:");
for (i = 0; i < n; i++)
- printk(" %02x", sec->secbuf[i]);
- printk("\n");
+ pr_cont(" %02x", sec->secbuf[i]);
+ pr_cont("\n");
}
}
#endif
@@ -242,8 +243,8 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
return 0;
if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) {
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
- printk("dvb_demux.c section buffer full loss: %d/%d\n",
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+ dprintk("dvb_demux.c section buffer full loss: %d/%d\n",
sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
DMX_MAX_SECFEED_SIZE);
#endif
@@ -276,9 +277,9 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
/* dump [secbuf .. secbuf+seclen) */
if (feed->pusi_seen)
dvb_dmx_swfilter_section_feed(feed);
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
else
- printk("dvb_demux.c pusi not seen, discarding section data\n");
+ dprintk("dvb_demux.c pusi not seen, discarding section data\n");
#endif
sec->secbufp += seclen; /* secbufp and secbuf moving together is */
sec->secbuf += seclen; /* redundant but saves pointer arithmetic */
@@ -312,9 +313,9 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
}
if (!ccok || dc_i) {
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
- printk("dvb_demux.c discontinuity detected %d bytes lost\n",
- count);
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+ dprintk("dvb_demux.c discontinuity detected %d bytes lost\n",
+ count);
/*
* those bytes under sume circumstances will again be reported
* in the following dvb_dmx_swfilter_section_new
@@ -344,9 +345,10 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
dvb_dmx_swfilter_section_copy_dump(feed, after,
after_len);
}
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
else if (count > 0)
- printk("dvb_demux.c PUSI=1 but %d bytes lost\n", count);
+ dprintk("dvb_demux.c PUSI=1 but %d bytes lost\n",
+ count);
#endif
} else {
/* PUSI=0 (is not set), no section boundary */
@@ -415,9 +417,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
1024);
speed_timedelta = ktime_ms_delta(cur_time,
demux->speed_last_time);
- printk(KERN_INFO "TS speed %llu Kbits/sec \n",
- div64_u64(speed_bytes,
- speed_timedelta));
+ dprintk("TS speed %llu Kbits/sec \n",
+ div64_u64(speed_bytes,
+ speed_timedelta));
}
demux->speed_last_time = cur_time;
@@ -426,8 +428,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
}
if (buf[1] & 0x80) {
- dprintk_tscheck("TEI detected. "
- "PID=0x%x data1=0x%x\n",
+ dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n",
pid, buf[1]);
/* data in this packet can't be trusted - drop it unless
* module option dvb_demux_feed_err_pkts is set */
@@ -635,7 +636,7 @@ static void dvb_demux_feed_add(struct dvb_demux_feed *feed)
{
spin_lock_irq(&feed->demux->lock);
if (dvb_demux_feed_find(feed)) {
- printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed already in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -649,7 +650,7 @@ static void dvb_demux_feed_del(struct dvb_demux_feed *feed)
{
spin_lock_irq(&feed->demux->lock);
if (!(dvb_demux_feed_find(feed))) {
- printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed not in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -660,8 +661,7 @@ out:
}
static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
- enum dmx_ts_pes pes_type,
- size_t circular_buffer_size, ktime_t timeout)
+ enum dmx_ts_pes pes_type, ktime_t timeout)
{
struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
struct dvb_demux *demux = feed->demux;
@@ -691,23 +691,10 @@ static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
dvb_demux_feed_add(feed);
feed->pid = pid;
- feed->buffer_size = circular_buffer_size;
feed->timeout = timeout;
feed->ts_type = ts_type;
feed->pes_type = pes_type;
- if (feed->buffer_size) {
-#ifdef NOBUFS
- feed->buffer = NULL;
-#else
- feed->buffer = vmalloc(feed->buffer_size);
- if (!feed->buffer) {
- mutex_unlock(&demux->mutex);
- return -ENOMEM;
- }
-#endif
- }
-
feed->state = DMX_STATE_READY;
mutex_unlock(&demux->mutex);
@@ -796,7 +783,6 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
feed->demux = demux;
feed->pid = 0xffff;
feed->peslen = 0xfffa;
- feed->buffer = NULL;
(*ts_feed) = &feed->feed.ts;
(*ts_feed)->parent = dmx;
@@ -833,10 +819,6 @@ static int dvbdmx_release_ts_feed(struct dmx_demux *dmx,
mutex_unlock(&demux->mutex);
return -EINVAL;
}
-#ifndef NOBUFS
- vfree(feed->buffer);
- feed->buffer = NULL;
-#endif
feed->state = DMX_STATE_FREE;
feed->filter->state = DMX_STATE_FREE;
@@ -888,8 +870,7 @@ static int dmx_section_feed_allocate_filter(struct dmx_section_feed *feed,
}
static int dmx_section_feed_set(struct dmx_section_feed *feed,
- u16 pid, size_t circular_buffer_size,
- int check_crc)
+ u16 pid, int check_crc)
{
struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
@@ -903,19 +884,8 @@ static int dmx_section_feed_set(struct dmx_section_feed *feed,
dvb_demux_feed_add(dvbdmxfeed);
dvbdmxfeed->pid = pid;
- dvbdmxfeed->buffer_size = circular_buffer_size;
dvbdmxfeed->feed.sec.check_crc = check_crc;
-#ifdef NOBUFS
- dvbdmxfeed->buffer = NULL;
-#else
- dvbdmxfeed->buffer = vmalloc(dvbdmxfeed->buffer_size);
- if (!dvbdmxfeed->buffer) {
- mutex_unlock(&dvbdmx->mutex);
- return -ENOMEM;
- }
-#endif
-
dvbdmxfeed->state = DMX_STATE_READY;
mutex_unlock(&dvbdmx->mutex);
return 0;
@@ -1074,7 +1044,6 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
dvbdmxfeed->feed.sec.tsfeedp = 0;
dvbdmxfeed->filter = NULL;
- dvbdmxfeed->buffer = NULL;
(*feed) = &dvbdmxfeed->feed.sec;
(*feed)->is_filtering = 0;
@@ -1103,10 +1072,6 @@ static int dvbdmx_release_section_feed(struct dmx_demux *demux,
mutex_unlock(&dvbdmx->mutex);
return -EINVAL;
}
-#ifndef NOBUFS
- vfree(dvbdmxfeed->buffer);
- dvbdmxfeed->buffer = NULL;
-#endif
dvbdmxfeed->state = DMX_STATE_FREE;
dvb_demux_feed_del(dvbdmxfeed);
@@ -1268,7 +1233,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
if (!dvbdemux->cnt_storage)
- printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+ pr_warn("Couldn't allocate memory for TS/TEI check. Disabling it\n");
INIT_LIST_HEAD(&dvbdemux->frontend_list);
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 5ed3cab4ad28..9235b008ea0a 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -80,8 +80,6 @@ struct dvb_demux_feed {
int type;
int state;
u16 pid;
- u8 *buffer;
- int buffer_size;
ktime_t timeout;
struct dvb_demux_filter *filter;
diff --git a/drivers/media/dvb-core/dvb_filter.c b/drivers/media/dvb-core/dvb_filter.c
deleted file mode 100644
index 772003fb1821..000000000000
--- a/drivers/media/dvb-core/dvb_filter.c
+++ /dev/null
@@ -1,603 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include "dvb_filter.h"
-
-#if 0
-static unsigned int bitrates[3][16] =
-{{0,32,64,96,128,160,192,224,256,288,320,352,384,416,448,0},
- {0,32,48,56,64,80,96,112,128,160,192,224,256,320,384,0},
- {0,32,40,48,56,64,80,96,112,128,160,192,224,256,320,0}};
-#endif
-
-static u32 freq[4] = {480, 441, 320, 0};
-
-static unsigned int ac3_bitrates[32] =
- {32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
- 0,0,0,0,0,0,0,0,0,0,0,0,0};
-
-static u32 ac3_frames[3][32] =
- {{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
- 1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
- {69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
- 1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
- {96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
- 1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
-
-
-
-#if 0
-static void setup_ts2pes(ipack *pa, ipack *pv, u16 *pida, u16 *pidv,
- void (*pes_write)(u8 *buf, int count, void *data),
- void *priv)
-{
- dvb_filter_ipack_init(pa, IPACKS, pes_write);
- dvb_filter_ipack_init(pv, IPACKS, pes_write);
- pa->pid = pida;
- pv->pid = pidv;
- pa->data = priv;
- pv->data = priv;
-}
-#endif
-
-#if 0
-static void ts_to_pes(ipack *p, u8 *buf) // don't need count (=188)
-{
- u8 off = 0;
-
- if (!buf || !p ){
- printk("NULL POINTER IDIOT\n");
- return;
- }
- if (buf[1]&PAY_START) {
- if (p->plength == MMAX_PLENGTH-6 && p->found>6){
- p->plength = p->found-6;
- p->found = 0;
- send_ipack(p);
- dvb_filter_ipack_reset(p);
- }
- }
- if (buf[3] & ADAPT_FIELD) { // adaptation field?
- off = buf[4] + 1;
- if (off+4 > 187) return;
- }
- dvb_filter_instant_repack(buf+4+off, TS_SIZE-4-off, p);
-}
-#endif
-
-#if 0
-/* needs 5 byte input, returns picture coding type*/
-static int read_picture_header(u8 *headr, struct mpg_picture *pic, int field, int pr)
-{
- u8 pct;
-
- if (pr) printk( "Pic header: ");
- pic->temporal_reference[field] = (( headr[0] << 2 ) |
- (headr[1] & 0x03) )& 0x03ff;
- if (pr) printk( " temp ref: 0x%04x", pic->temporal_reference[field]);
-
- pct = ( headr[1] >> 2 ) & 0x07;
- pic->picture_coding_type[field] = pct;
- if (pr) {
- switch(pct){
- case I_FRAME:
- printk( " I-FRAME");
- break;
- case B_FRAME:
- printk( " B-FRAME");
- break;
- case P_FRAME:
- printk( " P-FRAME");
- break;
- }
- }
-
-
- pic->vinfo.vbv_delay = (( headr[1] >> 5 ) | ( headr[2] << 3) |
- ( (headr[3] & 0x1F) << 11) ) & 0xffff;
-
- if (pr) printk( " vbv delay: 0x%04x", pic->vinfo.vbv_delay);
-
- pic->picture_header_parameter = ( headr[3] & 0xe0 ) |
- ((headr[4] & 0x80) >> 3);
-
- if ( pct == B_FRAME ){
- pic->picture_header_parameter |= ( headr[4] >> 3 ) & 0x0f;
- }
- if (pr) printk( " pic head param: 0x%x",
- pic->picture_header_parameter);
-
- return pct;
-}
-#endif
-
-#if 0
-/* needs 4 byte input */
-static int read_gop_header(u8 *headr, struct mpg_picture *pic, int pr)
-{
- if (pr) printk("GOP header: ");
-
- pic->time_code = (( headr[0] << 17 ) | ( headr[1] << 9) |
- ( headr[2] << 1 ) | (headr[3] &0x01)) & 0x1ffffff;
-
- if (pr) printk(" time: %d:%d.%d ", (headr[0]>>2)& 0x1F,
- ((headr[0]<<4)& 0x30)| ((headr[1]>>4)& 0x0F),
- ((headr[1]<<3)& 0x38)| ((headr[2]>>5)& 0x0F));
-
- if ( ( headr[3] & 0x40 ) != 0 ){
- pic->closed_gop = 1;
- } else {
- pic->closed_gop = 0;
- }
- if (pr) printk("closed: %d", pic->closed_gop);
-
- if ( ( headr[3] & 0x20 ) != 0 ){
- pic->broken_link = 1;
- } else {
- pic->broken_link = 0;
- }
- if (pr) printk(" broken: %d\n", pic->broken_link);
-
- return 0;
-}
-#endif
-
-#if 0
-/* needs 8 byte input */
-static int read_sequence_header(u8 *headr, struct dvb_video_info *vi, int pr)
-{
- int sw;
- int form = -1;
-
- if (pr) printk("Reading sequence header\n");
-
- vi->horizontal_size = ((headr[1] &0xF0) >> 4) | (headr[0] << 4);
- vi->vertical_size = ((headr[1] &0x0F) << 8) | (headr[2]);
-
- sw = (int)((headr[3]&0xF0) >> 4) ;
-
- switch( sw ){
- case 1:
- if (pr)
- printk("Videostream: ASPECT: 1:1");
- vi->aspect_ratio = 100;
- break;
- case 2:
- if (pr)
- printk("Videostream: ASPECT: 4:3");
- vi->aspect_ratio = 133;
- break;
- case 3:
- if (pr)
- printk("Videostream: ASPECT: 16:9");
- vi->aspect_ratio = 177;
- break;
- case 4:
- if (pr)
- printk("Videostream: ASPECT: 2.21:1");
- vi->aspect_ratio = 221;
- break;
-
- case 5 ... 15:
- if (pr)
- printk("Videostream: ASPECT: reserved");
- vi->aspect_ratio = 0;
- break;
-
- default:
- vi->aspect_ratio = 0;
- return -1;
- }
-
- if (pr)
- printk(" Size = %dx%d",vi->horizontal_size,vi->vertical_size);
-
- sw = (int)(headr[3]&0x0F);
-
- switch ( sw ) {
- case 1:
- if (pr)
- printk(" FRate: 23.976 fps");
- vi->framerate = 23976;
- form = -1;
- break;
- case 2:
- if (pr)
- printk(" FRate: 24 fps");
- vi->framerate = 24000;
- form = -1;
- break;
- case 3:
- if (pr)
- printk(" FRate: 25 fps");
- vi->framerate = 25000;
- form = VIDEO_MODE_PAL;
- break;
- case 4:
- if (pr)
- printk(" FRate: 29.97 fps");
- vi->framerate = 29970;
- form = VIDEO_MODE_NTSC;
- break;
- case 5:
- if (pr)
- printk(" FRate: 30 fps");
- vi->framerate = 30000;
- form = VIDEO_MODE_NTSC;
- break;
- case 6:
- if (pr)
- printk(" FRate: 50 fps");
- vi->framerate = 50000;
- form = VIDEO_MODE_PAL;
- break;
- case 7:
- if (pr)
- printk(" FRate: 60 fps");
- vi->framerate = 60000;
- form = VIDEO_MODE_NTSC;
- break;
- }
-
- vi->bit_rate = (headr[4] << 10) | (headr[5] << 2) | (headr[6] & 0x03);
-
- vi->vbv_buffer_size
- = (( headr[6] & 0xF8) >> 3 ) | (( headr[7] & 0x1F )<< 5);
-
- if (pr){
- printk(" BRate: %d Mbit/s",4*(vi->bit_rate)/10000);
- printk(" vbvbuffer %d",16*1024*(vi->vbv_buffer_size));
- printk("\n");
- }
-
- vi->video_format = form;
-
- return 0;
-}
-#endif
-
-
-#if 0
-static int get_vinfo(u8 *mbuf, int count, struct dvb_video_info *vi, int pr)
-{
- u8 *headr;
- int found = 0;
- int c = 0;
-
- while (found < 4 && c+4 < count){
- u8 *b;
-
- b = mbuf+c;
- if ( b[0] == 0x00 && b[1] == 0x00 && b[2] == 0x01
- && b[3] == 0xb3) found = 4;
- else {
- c++;
- }
- }
-
- if (! found) return -1;
- c += 4;
- if (c+12 >= count) return -1;
- headr = mbuf+c;
- if (read_sequence_header(headr, vi, pr) < 0) return -1;
- vi->off = c-4;
- return 0;
-}
-#endif
-
-
-#if 0
-static int get_ainfo(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
-{
- u8 *headr;
- int found = 0;
- int c = 0;
- int fr = 0;
-
- while (found < 2 && c < count){
- u8 b[2];
- memcpy( b, mbuf+c, 2);
-
- if ( b[0] == 0xff && (b[1] & 0xf8) == 0xf8)
- found = 2;
- else {
- c++;
- }
- }
-
- if (!found) return -1;
-
- if (c+3 >= count) return -1;
- headr = mbuf+c;
-
- ai->layer = (headr[1] & 0x06) >> 1;
-
- if (pr)
- printk("Audiostream: Layer: %d", 4-ai->layer);
-
-
- ai->bit_rate = bitrates[(3-ai->layer)][(headr[2] >> 4 )]*1000;
-
- if (pr){
- if (ai->bit_rate == 0)
- printk(" Bit rate: free");
- else if (ai->bit_rate == 0xf)
- printk(" BRate: reserved");
- else
- printk(" BRate: %d kb/s", ai->bit_rate/1000);
- }
-
- fr = (headr[2] & 0x0c ) >> 2;
- ai->frequency = freq[fr]*100;
- if (pr){
- if (ai->frequency == 3)
- printk(" Freq: reserved\n");
- else
- printk(" Freq: %d kHz\n",ai->frequency);
-
- }
- ai->off = c;
- return 0;
-}
-#endif
-
-
-int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
-{
- u8 *headr;
- int found = 0;
- int c = 0;
- u8 frame = 0;
- int fr = 0;
-
- while ( !found && c < count){
- u8 *b = mbuf+c;
-
- if ( b[0] == 0x0b && b[1] == 0x77 )
- found = 1;
- else {
- c++;
- }
- }
-
- if (!found) return -1;
- if (pr)
- printk("Audiostream: AC3");
-
- ai->off = c;
- if (c+5 >= count) return -1;
-
- ai->layer = 0; // 0 for AC3
- headr = mbuf+c+2;
-
- frame = (headr[2]&0x3f);
- ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
-
- if (pr)
- printk(" BRate: %d kb/s", (int) ai->bit_rate/1000);
-
- ai->frequency = (headr[2] & 0xc0 ) >> 6;
- fr = (headr[2] & 0xc0 ) >> 6;
- ai->frequency = freq[fr]*100;
- if (pr) printk (" Freq: %d Hz\n", (int) ai->frequency);
-
-
- ai->framesize = ac3_frames[fr][frame >> 1];
- if ((frame & 1) && (fr == 1)) ai->framesize++;
- ai->framesize = ai->framesize << 1;
- if (pr) printk (" Framesize %d\n",(int) ai->framesize);
-
-
- return 0;
-}
-EXPORT_SYMBOL(dvb_filter_get_ac3info);
-
-
-#if 0
-static u8 *skip_pes_header(u8 **bufp)
-{
- u8 *inbuf = *bufp;
- u8 *buf = inbuf;
- u8 *pts = NULL;
- int skip = 0;
-
- static const int mpeg1_skip_table[16] = {
- 1, 0xffff, 5, 10, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff
- };
-
-
- if ((inbuf[6] & 0xc0) == 0x80){ /* mpeg2 */
- if (buf[7] & PTS_ONLY)
- pts = buf+9;
- else pts = NULL;
- buf = inbuf + 9 + inbuf[8];
- } else { /* mpeg1 */
- for (buf = inbuf + 6; *buf == 0xff; buf++)
- if (buf == inbuf + 6 + 16) {
- break;
- }
- if ((*buf & 0xc0) == 0x40)
- buf += 2;
- skip = mpeg1_skip_table [*buf >> 4];
- if (skip == 5 || skip == 10) pts = buf;
- else pts = NULL;
-
- buf += mpeg1_skip_table [*buf >> 4];
- }
-
- *bufp = buf;
- return pts;
-}
-#endif
-
-#if 0
-static void initialize_quant_matrix( u32 *matrix )
-{
- int i;
-
- matrix[0] = 0x08101013;
- matrix[1] = 0x10131616;
- matrix[2] = 0x16161616;
- matrix[3] = 0x1a181a1b;
- matrix[4] = 0x1b1b1a1a;
- matrix[5] = 0x1a1a1b1b;
- matrix[6] = 0x1b1d1d1d;
- matrix[7] = 0x2222221d;
- matrix[8] = 0x1d1d1b1b;
- matrix[9] = 0x1d1d2020;
- matrix[10] = 0x22222526;
- matrix[11] = 0x25232322;
- matrix[12] = 0x23262628;
- matrix[13] = 0x28283030;
- matrix[14] = 0x2e2e3838;
- matrix[15] = 0x3a454553;
-
- for ( i = 16 ; i < 32 ; i++ )
- matrix[i] = 0x10101010;
-}
-#endif
-
-#if 0
-static void initialize_mpg_picture(struct mpg_picture *pic)
-{
- int i;
-
- /* set MPEG1 */
- pic->mpeg1_flag = 1;
- pic->profile_and_level = 0x4A ; /* MP@LL */
- pic->progressive_sequence = 1;
- pic->low_delay = 0;
-
- pic->sequence_display_extension_flag = 0;
- for ( i = 0 ; i < 4 ; i++ ){
- pic->frame_centre_horizontal_offset[i] = 0;
- pic->frame_centre_vertical_offset[i] = 0;
- }
- pic->last_frame_centre_horizontal_offset = 0;
- pic->last_frame_centre_vertical_offset = 0;
-
- pic->picture_display_extension_flag[0] = 0;
- pic->picture_display_extension_flag[1] = 0;
- pic->sequence_header_flag = 0;
- pic->gop_flag = 0;
- pic->sequence_end_flag = 0;
-}
-#endif
-
-#if 0
-static void mpg_set_picture_parameter( int32_t field_type, struct mpg_picture *pic )
-{
- int16_t last_h_offset;
- int16_t last_v_offset;
-
- int16_t *p_h_offset;
- int16_t *p_v_offset;
-
- if ( pic->mpeg1_flag ){
- pic->picture_structure[field_type] = VIDEO_FRAME_PICTURE;
- pic->top_field_first = 0;
- pic->repeat_first_field = 0;
- pic->progressive_frame = 1;
- pic->picture_coding_parameter = 0x000010;
- }
-
- /* Reset flag */
- pic->picture_display_extension_flag[field_type] = 0;
-
- last_h_offset = pic->last_frame_centre_horizontal_offset;
- last_v_offset = pic->last_frame_centre_vertical_offset;
- if ( field_type == FIRST_FIELD ){
- p_h_offset = pic->frame_centre_horizontal_offset;
- p_v_offset = pic->frame_centre_vertical_offset;
- *p_h_offset = last_h_offset;
- *(p_h_offset + 1) = last_h_offset;
- *(p_h_offset + 2) = last_h_offset;
- *p_v_offset = last_v_offset;
- *(p_v_offset + 1) = last_v_offset;
- *(p_v_offset + 2) = last_v_offset;
- } else {
- pic->frame_centre_horizontal_offset[3] = last_h_offset;
- pic->frame_centre_vertical_offset[3] = last_v_offset;
- }
-}
-#endif
-
-#if 0
-static void init_mpg_picture( struct mpg_picture *pic, int chan, int32_t field_type)
-{
- pic->picture_header = 0;
- pic->sequence_header_data
- = ( INIT_HORIZONTAL_SIZE << 20 )
- | ( INIT_VERTICAL_SIZE << 8 )
- | ( INIT_ASPECT_RATIO << 4 )
- | ( INIT_FRAME_RATE );
- pic->mpeg1_flag = 0;
- pic->vinfo.horizontal_size
- = INIT_DISP_HORIZONTAL_SIZE;
- pic->vinfo.vertical_size
- = INIT_DISP_VERTICAL_SIZE;
- pic->picture_display_extension_flag[field_type]
- = 0;
- pic->pts_flag[field_type] = 0;
-
- pic->sequence_gop_header = 0;
- pic->picture_header = 0;
- pic->sequence_header_flag = 0;
- pic->gop_flag = 0;
- pic->sequence_end_flag = 0;
- pic->sequence_display_extension_flag = 0;
- pic->last_frame_centre_horizontal_offset = 0;
- pic->last_frame_centre_vertical_offset = 0;
- pic->channel = chan;
-}
-#endif
-
-void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
- dvb_filter_pes2ts_cb_t *cb, void *priv)
-{
- unsigned char *buf=p2ts->buf;
-
- buf[0]=0x47;
- buf[1]=(pid>>8);
- buf[2]=pid&0xff;
- p2ts->cc=0;
- p2ts->cb=cb;
- p2ts->priv=priv;
-}
-EXPORT_SYMBOL(dvb_filter_pes2ts_init);
-
-int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
- int len, int payload_start)
-{
- unsigned char *buf=p2ts->buf;
- int ret=0, rest;
-
- //len=6+((pes[4]<<8)|pes[5]);
-
- if (payload_start)
- buf[1]|=0x40;
- else
- buf[1]&=~0x40;
- while (len>=184) {
- buf[3]=0x10|((p2ts->cc++)&0x0f);
- memcpy(buf+4, pes, 184);
- if ((ret=p2ts->cb(p2ts->priv, buf)))
- return ret;
- len-=184; pes+=184;
- buf[1]&=~0x40;
- }
- if (!len)
- return 0;
- buf[3]=0x30|((p2ts->cc++)&0x0f);
- rest=183-len;
- if (rest) {
- buf[5]=0x00;
- if (rest-1)
- memset(buf+6, 0xff, rest-1);
- }
- buf[4]=rest;
- memcpy(buf+5+rest, pes, len);
- return p2ts->cb(p2ts->priv, buf);
-}
-EXPORT_SYMBOL(dvb_filter_pes2ts);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 01511e5a5566..db74cb74d271 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -28,6 +28,8 @@
/* Enables DVBv3 compatibility bits at the headers */
#define __DVB_CORE__
+#define pr_fmt(fmt) "dvb_frontend: " fmt
+
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -67,6 +69,9 @@ MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB volt
module_param(dvb_mfe_wait_time, int, 0644);
MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open() for multi-frontend to become available (default:5 seconds)");
+#define dprintk(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
#define FESTATE_IDLE 1
#define FESTATE_RETUNE 2
#define FESTATE_TUNING_FAST 4
@@ -99,8 +104,6 @@ MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open(
static DEFINE_MUTEX(frontend_mutex);
struct dvb_frontend_private {
- struct kref refcount;
-
/* thread/frontend values */
struct dvb_device *dvbdev;
struct dvb_frontend_parameters parameters_out;
@@ -138,21 +141,30 @@ struct dvb_frontend_private {
#endif
};
-static void dvb_frontend_private_free(struct kref *ref)
+static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
+ void (*release)(struct dvb_frontend *fe));
+
+static void dvb_frontend_free(struct kref *ref)
{
- struct dvb_frontend_private *fepriv =
- container_of(ref, struct dvb_frontend_private, refcount);
+ struct dvb_frontend *fe =
+ container_of(ref, struct dvb_frontend, refcount);
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+
+ dvb_free_device(fepriv->dvbdev);
+
+ dvb_frontend_invoke_release(fe, fe->ops.release);
+
kfree(fepriv);
}
-static void dvb_frontend_private_put(struct dvb_frontend_private *fepriv)
+static void dvb_frontend_put(struct dvb_frontend *fe)
{
- kref_put(&fepriv->refcount, dvb_frontend_private_free);
+ kref_put(&fe->refcount, dvb_frontend_free);
}
-static void dvb_frontend_private_get(struct dvb_frontend_private *fepriv)
+static void dvb_frontend_get(struct dvb_frontend *fe)
{
- kref_get(&fepriv->refcount);
+ kref_get(&fe->refcount);
}
static void dvb_frontend_wakeup(struct dvb_frontend *fe);
@@ -1515,12 +1527,8 @@ static int dtv_set_frontend(struct dvb_frontend *fe);
static bool is_dvbv3_delsys(u32 delsys)
{
- bool status;
-
- status = (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) ||
- (delsys == SYS_DVBS) || (delsys == SYS_ATSC);
-
- return status;
+ return (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) ||
+ (delsys == SYS_DVBS) || (delsys == SYS_ATSC);
}
/**
@@ -2356,7 +2364,8 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
int i;
u8 last = 1;
if (dvb_frontend_debug)
- printk("%s switch command: 0x%04lx\n", __func__, swcmd);
+ dprintk("%s switch command: 0x%04lx\n",
+ __func__, swcmd);
nexttime = ktime_get_boottime();
if (dvb_frontend_debug)
tv[0] = nexttime;
@@ -2379,10 +2388,10 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
dvb_frontend_sleep_until(&nexttime, 8000);
}
if (dvb_frontend_debug) {
- printk("%s(%d): switch delay (should be 32k followed by all 8k\n",
+ dprintk("%s(%d): switch delay (should be 32k followed by all 8k)\n",
__func__, fe->dvb->num);
for (i = 1; i < 10; i++)
- printk("%d: %d\n", i,
+ pr_info("%d: %d\n", i,
(int) ktime_us_delta(tv[i], tv[i-1]));
}
err = 0;
@@ -2545,7 +2554,7 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
fepriv->events.eventr = fepriv->events.eventw = 0;
}
- dvb_frontend_private_get(fepriv);
+ dvb_frontend_get(fe);
if (adapter->mfe_shared)
mutex_unlock (&adapter->mfe_lock);
@@ -2595,7 +2604,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
fe->ops.ts_bus_ctrl(fe, 0);
}
- dvb_frontend_private_put(fepriv);
+ dvb_frontend_put(fe);
return ret;
}
@@ -2685,7 +2694,14 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
}
fepriv = fe->frontend_priv;
- kref_init(&fepriv->refcount);
+ kref_init(&fe->refcount);
+
+ /*
+ * After initialization, there need to be two references: one
+ * for dvb_unregister_frontend(), and another one for
+ * dvb_frontend_detach().
+ */
+ dvb_frontend_get(fe);
sema_init(&fepriv->sem, 1);
init_waitqueue_head (&fepriv->wait_queue);
@@ -2720,50 +2736,33 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
dev_dbg(fe->dvb->device, "%s:\n", __func__);
mutex_lock(&frontend_mutex);
- dvb_frontend_stop (fe);
- dvb_unregister_device (fepriv->dvbdev);
+ dvb_frontend_stop(fe);
+ dvb_remove_device(fepriv->dvbdev);
/* fe is invalid now */
mutex_unlock(&frontend_mutex);
- dvb_frontend_private_put(fepriv);
+ dvb_frontend_put(fe);
return 0;
}
EXPORT_SYMBOL(dvb_unregister_frontend);
-#ifdef CONFIG_MEDIA_ATTACH
-void dvb_frontend_detach(struct dvb_frontend* fe)
+static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
+ void (*release)(struct dvb_frontend *fe))
{
- void *ptr;
-
- if (fe->ops.release_sec) {
- fe->ops.release_sec(fe);
- dvb_detach(fe->ops.release_sec);
- }
- if (fe->ops.tuner_ops.release) {
- fe->ops.tuner_ops.release(fe);
- dvb_detach(fe->ops.tuner_ops.release);
- }
- if (fe->ops.analog_ops.release) {
- fe->ops.analog_ops.release(fe);
- dvb_detach(fe->ops.analog_ops.release);
- }
- ptr = (void*)fe->ops.release;
- if (ptr) {
- fe->ops.release(fe);
- dvb_detach(ptr);
+ if (release) {
+ release(fe);
+#ifdef CONFIG_MEDIA_ATTACH
+ dvb_detach(release);
+#endif
}
}
-#else
+
void dvb_frontend_detach(struct dvb_frontend* fe)
{
- if (fe->ops.release_sec)
- fe->ops.release_sec(fe);
- if (fe->ops.tuner_ops.release)
- fe->ops.tuner_ops.release(fe);
- if (fe->ops.analog_ops.release)
- fe->ops.analog_ops.release(fe);
- if (fe->ops.release)
- fe->ops.release(fe);
+ dvb_frontend_invoke_release(fe, fe->ops.release_sec);
+ dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
+ dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
+ dvb_frontend_invoke_release(fe, fe->ops.detach);
+ dvb_frontend_put(fe);
}
-#endif
EXPORT_SYMBOL(dvb_frontend_detach);
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index fb6e84811504..482912d3b77a 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -225,7 +225,7 @@ struct dvb_tuner_ops {
struct dvb_tuner_info info;
- int (*release)(struct dvb_frontend *fe);
+ void (*release)(struct dvb_frontend *fe);
int (*init)(struct dvb_frontend *fe);
int (*sleep)(struct dvb_frontend *fe);
int (*suspend)(struct dvb_frontend *fe);
@@ -323,7 +323,11 @@ struct dtv_frontend_properties;
*
* @info: embedded struct dvb_tuner_info with tuner properties
* @delsys: Delivery systems supported by the frontend
- * @release: callback function called when frontend is dettached.
+ * @detach: callback function called when frontend is detached.
+ * drivers should clean up, but not yet free the struct
+ * dvb_frontend allocation.
+ * @release: callback function called when frontend is ready to be
+ * freed.
* drivers should free any allocated memory.
* @release_sec: callback function requesting that the Satelite Equipment
* Control (SEC) driver to release and free any memory
@@ -408,6 +412,7 @@ struct dvb_frontend_ops {
u8 delsys[MAX_DELSYS];
+ void (*detach)(struct dvb_frontend *fe);
void (*release)(struct dvb_frontend* fe);
void (*release_sec)(struct dvb_frontend* fe);
@@ -655,6 +660,7 @@ struct dtv_frontend_properties {
*/
struct dvb_frontend {
+ struct kref refcount;
struct dvb_frontend_ops ops;
struct dvb_adapter *dvb;
void *demodulator_priv;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 9914f69a4a02..dfc03a95df71 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -54,6 +54,8 @@
*
*/
+#define pr_fmt(fmt) "dvb_net: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -309,451 +311,589 @@ static inline void reset_ule( struct dvb_net_priv *p )
* Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
* TS cells of a single PID.
*/
-static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
-{
- struct dvb_net_priv *priv = netdev_priv(dev);
- unsigned long skipped = 0L;
- const u8 *ts, *ts_end, *from_where = NULL;
- u8 ts_remain = 0, how_much = 0, new_ts = 1;
- struct ethhdr *ethh = NULL;
- bool error = false;
+struct dvb_net_ule_handle {
+ struct net_device *dev;
+ struct dvb_net_priv *priv;
+ struct ethhdr *ethh;
+ const u8 *buf;
+ size_t buf_len;
+ unsigned long skipped;
+ const u8 *ts, *ts_end, *from_where;
+ u8 ts_remain, how_much, new_ts;
+ bool error;
#ifdef ULE_DEBUG
- /* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
+ /*
+ * The code inside ULE_DEBUG keeps a history of the
+ * last 100 TS cells processed.
+ */
static unsigned char ule_hist[100*TS_SZ];
static unsigned char *ule_where = ule_hist, ule_dump;
#endif
+};
- /* For all TS cells in current buffer.
- * Appearently, we are called for every single TS cell.
- */
- for (ts = buf, ts_end = buf + buf_len; ts < ts_end; /* no default incr. */ ) {
-
- if (new_ts) {
- /* We are about to process a new TS cell. */
+static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h)
+{
+ /* We are about to process a new TS cell. */
#ifdef ULE_DEBUG
- if (ule_where >= &ule_hist[100*TS_SZ]) ule_where = ule_hist;
- memcpy( ule_where, ts, TS_SZ );
- if (ule_dump) {
- hexdump( ule_where, TS_SZ );
- ule_dump = 0;
- }
- ule_where += TS_SZ;
+ if (h->ule_where >= &h->ule_hist[100*TS_SZ])
+ h->ule_where = h->ule_hist;
+ memcpy(h->ule_where, h->ts, TS_SZ);
+ if (h->ule_dump) {
+ hexdump(h->ule_where, TS_SZ);
+ h->ule_dump = 0;
+ }
+ h->ule_where += TS_SZ;
#endif
- /* Check TS error conditions: sync_byte, transport_error_indicator, scrambling_control . */
- if ((ts[0] != TS_SYNC) || (ts[1] & TS_TEI) || ((ts[3] & TS_SC) != 0)) {
- printk(KERN_WARNING "%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
- priv->ts_count, ts[0],
- (ts[1] & TS_TEI) >> 7,
- (ts[3] & TS_SC) >> 6);
-
- /* Drop partly decoded SNDU, reset state, resync on PUSI. */
- if (priv->ule_skb) {
- dev_kfree_skb( priv->ule_skb );
- /* Prepare for next SNDU. */
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
- }
- reset_ule(priv);
- priv->need_pusi = 1;
+ /*
+ * Check TS h->error conditions: sync_byte, transport_error_indicator,
+ * scrambling_control .
+ */
+ if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) ||
+ ((h->ts[3] & TS_SC) != 0)) {
+ pr_warn("%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
+ h->priv->ts_count, h->ts[0],
+ (h->ts[1] & TS_TEI) >> 7,
+ (h->ts[3] & TS_SC) >> 6);
+
+ /* Drop partly decoded SNDU, reset state, resync on PUSI. */
+ if (h->priv->ule_skb) {
+ dev_kfree_skb(h->priv->ule_skb);
+ /* Prepare for next SNDU. */
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_frame_errors++;
+ }
+ reset_ule(h->priv);
+ h->priv->need_pusi = 1;
- /* Continue with next TS cell. */
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
+ /* Continue with next TS cell. */
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+ }
- ts_remain = 184;
- from_where = ts + 4;
+ h->ts_remain = 184;
+ h->from_where = h->ts + 4;
+
+ return 0;
+}
+
+static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h)
+{
+ if (h->ts[1] & TS_PUSI) {
+ /* Find beginning of first ULE SNDU in current TS cell. */
+ /* Synchronize continuity counter. */
+ h->priv->tscc = h->ts[3] & 0x0F;
+ /* There is a pointer field here. */
+ if (h->ts[4] > h->ts_remain) {
+ pr_err("%lu: Invalid ULE packet (pointer field %d)\n",
+ h->priv->ts_count, h->ts[4]);
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
}
- /* Synchronize on PUSI, if required. */
- if (priv->need_pusi) {
- if (ts[1] & TS_PUSI) {
- /* Find beginning of first ULE SNDU in current TS cell. */
- /* Synchronize continuity counter. */
- priv->tscc = ts[3] & 0x0F;
- /* There is a pointer field here. */
- if (ts[4] > ts_remain) {
- printk(KERN_ERR "%lu: Invalid ULE packet "
- "(pointer field %d)\n", priv->ts_count, ts[4]);
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
- /* Skip to destination of pointer field. */
- from_where = &ts[5] + ts[4];
- ts_remain -= 1 + ts[4];
- skipped = 0;
- } else {
- skipped++;
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
+ /* Skip to destination of pointer field. */
+ h->from_where = &h->ts[5] + h->ts[4];
+ h->ts_remain -= 1 + h->ts[4];
+ h->skipped = 0;
+ } else {
+ h->skipped++;
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h)
+{
+ /* Check continuity counter. */
+ if ((h->ts[3] & 0x0F) == h->priv->tscc)
+ h->priv->tscc = (h->priv->tscc + 1) & 0x0F;
+ else {
+ /* TS discontinuity handling: */
+ pr_warn("%lu: TS discontinuity: got %#x, expected %#x.\n",
+ h->priv->ts_count, h->ts[3] & 0x0F,
+ h->priv->tscc);
+ /* Drop partly decoded SNDU, reset state, resync on PUSI. */
+ if (h->priv->ule_skb) {
+ dev_kfree_skb(h->priv->ule_skb);
+ /* Prepare for next SNDU. */
+ // reset_ule(h->priv); moved to below.
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_frame_errors++;
}
+ reset_ule(h->priv);
+ /* skip to next PUSI. */
+ h->priv->need_pusi = 1;
+ return 1;
+ }
+ /*
+ * If we still have an incomplete payload, but PUSI is
+ * set; some TS cells are missing.
+ * This is only possible here, if we missed exactly 16 TS
+ * cells (continuity counter wrap).
+ */
+ if (h->ts[1] & TS_PUSI) {
+ if (!h->priv->need_pusi) {
+ if (!(*h->from_where < (h->ts_remain-1)) ||
+ *h->from_where != h->priv->ule_sndu_remain) {
+ /*
+ * Pointer field is invalid.
+ * Drop this TS cell and any started ULE SNDU.
+ */
+ pr_warn("%lu: Invalid pointer field: %u.\n",
+ h->priv->ts_count,
+ *h->from_where);
- if (new_ts) {
- /* Check continuity counter. */
- if ((ts[3] & 0x0F) == priv->tscc)
- priv->tscc = (priv->tscc + 1) & 0x0F;
- else {
- /* TS discontinuity handling: */
- printk(KERN_WARNING "%lu: TS discontinuity: got %#x, "
- "expected %#x.\n", priv->ts_count, ts[3] & 0x0F, priv->tscc);
- /* Drop partly decoded SNDU, reset state, resync on PUSI. */
- if (priv->ule_skb) {
- dev_kfree_skb( priv->ule_skb );
- /* Prepare for next SNDU. */
- // reset_ule(priv); moved to below.
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
+ /*
+ * Drop partly decoded SNDU, reset state,
+ * resync on PUSI.
+ */
+ if (h->priv->ule_skb) {
+ h->error = true;
+ dev_kfree_skb(h->priv->ule_skb);
}
- reset_ule(priv);
- /* skip to next PUSI. */
- priv->need_pusi = 1;
- continue;
- }
- /* If we still have an incomplete payload, but PUSI is
- * set; some TS cells are missing.
- * This is only possible here, if we missed exactly 16 TS
- * cells (continuity counter wrap). */
- if (ts[1] & TS_PUSI) {
- if (! priv->need_pusi) {
- if (!(*from_where < (ts_remain-1)) || *from_where != priv->ule_sndu_remain) {
- /* Pointer field is invalid. Drop this TS cell and any started ULE SNDU. */
- printk(KERN_WARNING "%lu: Invalid pointer "
- "field: %u.\n", priv->ts_count, *from_where);
-
- /* Drop partly decoded SNDU, reset state, resync on PUSI. */
- if (priv->ule_skb) {
- error = true;
- dev_kfree_skb(priv->ule_skb);
- }
-
- if (error || priv->ule_sndu_remain) {
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
- error = false;
- }
-
- reset_ule(priv);
- priv->need_pusi = 1;
- continue;
- }
- /* Skip pointer field (we're processing a
- * packed payload). */
- from_where += 1;
- ts_remain -= 1;
- } else
- priv->need_pusi = 0;
-
- if (priv->ule_sndu_remain > 183) {
- /* Current SNDU lacks more data than there could be available in the
- * current TS cell. */
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
- printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but "
- "got PUSI (pf %d, ts_remain %d). Flushing incomplete payload.\n",
- priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain);
- dev_kfree_skb(priv->ule_skb);
- /* Prepare for next SNDU. */
- reset_ule(priv);
- /* Resync: go to where pointer field points to: start of next ULE SNDU. */
- from_where += ts[4];
- ts_remain -= ts[4];
+
+ if (h->error || h->priv->ule_sndu_remain) {
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_frame_errors++;
+ h->error = false;
}
+
+ reset_ule(h->priv);
+ h->priv->need_pusi = 1;
+ return 1;
}
+ /*
+ * Skip pointer field (we're processing a
+ * packed payload).
+ */
+ h->from_where += 1;
+ h->ts_remain -= 1;
+ } else
+ h->priv->need_pusi = 0;
+
+ if (h->priv->ule_sndu_remain > 183) {
+ /*
+ * Current SNDU lacks more data than there
+ * could be available in the current TS cell.
+ */
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_length_errors++;
+ pr_warn("%lu: Expected %d more SNDU bytes, but got PUSI (pf %d, h->ts_remain %d). Flushing incomplete payload.\n",
+ h->priv->ts_count,
+ h->priv->ule_sndu_remain,
+ h->ts[4], h->ts_remain);
+ dev_kfree_skb(h->priv->ule_skb);
+ /* Prepare for next SNDU. */
+ reset_ule(h->priv);
+ /*
+ * Resync: go to where pointer field points to:
+ * start of next ULE SNDU.
+ */
+ h->from_where += h->ts[4];
+ h->ts_remain -= h->ts[4];
}
+ }
+ return 0;
+}
- /* Check if new payload needs to be started. */
- if (priv->ule_skb == NULL) {
- /* Start a new payload with skb.
- * Find ULE header. It is only guaranteed that the
- * length field (2 bytes) is contained in the current
- * TS.
- * Check ts_remain has to be >= 2 here. */
- if (ts_remain < 2) {
- printk(KERN_WARNING "Invalid payload packing: only %d "
- "bytes left in TS. Resyncing.\n", ts_remain);
- priv->ule_sndu_len = 0;
- priv->need_pusi = 1;
- ts += TS_SZ;
- continue;
- }
- if (! priv->ule_sndu_len) {
- /* Got at least two bytes, thus extrace the SNDU length. */
- priv->ule_sndu_len = from_where[0] << 8 | from_where[1];
- if (priv->ule_sndu_len & 0x8000) {
- /* D-Bit is set: no dest mac present. */
- priv->ule_sndu_len &= 0x7FFF;
- priv->ule_dbit = 1;
- } else
- priv->ule_dbit = 0;
-
- if (priv->ule_sndu_len < 5) {
- printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. "
- "Resyncing.\n", priv->ts_count, priv->ule_sndu_len);
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
- priv->ule_sndu_len = 0;
- priv->need_pusi = 1;
- new_ts = 1;
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
- ts_remain -= 2; /* consume the 2 bytes SNDU length. */
- from_where += 2;
- }
+/*
+ * Start a new payload with skb.
+ * Find ULE header. It is only guaranteed that the
+ * length field (2 bytes) is contained in the current
+ * TS.
+ * Check h.ts_remain has to be >= 2 here.
+ */
+static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
+{
+ if (h->ts_remain < 2) {
+ pr_warn("Invalid payload packing: only %d bytes left in TS. Resyncing.\n",
+ h->ts_remain);
+ h->priv->ule_sndu_len = 0;
+ h->priv->need_pusi = 1;
+ h->ts += TS_SZ;
+ return 1;
+ }
- priv->ule_sndu_remain = priv->ule_sndu_len + 2;
+ if (!h->priv->ule_sndu_len) {
+ /* Got at least two bytes, thus extrace the SNDU length. */
+ h->priv->ule_sndu_len = h->from_where[0] << 8 |
+ h->from_where[1];
+ if (h->priv->ule_sndu_len & 0x8000) {
+ /* D-Bit is set: no dest mac present. */
+ h->priv->ule_sndu_len &= 0x7FFF;
+ h->priv->ule_dbit = 1;
+ } else
+ h->priv->ule_dbit = 0;
+
+ if (h->priv->ule_sndu_len < 5) {
+ pr_warn("%lu: Invalid ULE SNDU length %u. Resyncing.\n",
+ h->priv->ts_count,
+ h->priv->ule_sndu_len);
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_length_errors++;
+ h->priv->ule_sndu_len = 0;
+ h->priv->need_pusi = 1;
+ h->new_ts = 1;
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+ }
+ h->ts_remain -= 2; /* consume the 2 bytes SNDU length. */
+ h->from_where += 2;
+ }
+
+ h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2;
+ /*
+ * State of current TS:
+ * h->ts_remain (remaining bytes in the current TS cell)
+ * 0 ule_type is not available now, we need the next TS cell
+ * 1 the first byte of the ule_type is present
+ * >=2 full ULE header present, maybe some payload data as well.
+ */
+ switch (h->ts_remain) {
+ case 1:
+ h->priv->ule_sndu_remain--;
+ h->priv->ule_sndu_type = h->from_where[0] << 8;
+
+ /* first byte of ule_type is set. */
+ h->priv->ule_sndu_type_1 = 1;
+ h->ts_remain -= 1;
+ h->from_where += 1;
+ /* fallthrough */
+ case 0:
+ h->new_ts = 1;
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+
+ default: /* complete ULE header is present in current TS. */
+ /* Extract ULE type field. */
+ if (h->priv->ule_sndu_type_1) {
+ h->priv->ule_sndu_type_1 = 0;
+ h->priv->ule_sndu_type |= h->from_where[0];
+ h->from_where += 1; /* points to payload start. */
+ h->ts_remain -= 1;
+ } else {
+ /* Complete type is present in new TS. */
+ h->priv->ule_sndu_type = h->from_where[0] << 8 |
+ h->from_where[1];
+ h->from_where += 2; /* points to payload start. */
+ h->ts_remain -= 2;
+ }
+ break;
+ }
+
+ /*
+ * Allocate the skb (decoder target buffer) with the correct size,
+ * as follows:
+ *
+ * prepare for the largest case: bridged SNDU with MAC address
+ * (dbit = 0).
+ */
+ h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len +
+ ETH_HLEN + ETH_ALEN);
+ if (!h->priv->ule_skb) {
+ pr_notice("%s: Memory squeeze, dropping packet.\n",
+ h->dev->name);
+ h->dev->stats.rx_dropped++;
+ return -1;
+ }
+
+ /* This includes the CRC32 _and_ dest mac, if !dbit. */
+ h->priv->ule_sndu_remain = h->priv->ule_sndu_len;
+ h->priv->ule_skb->dev = h->dev;
+ /*
+ * Leave space for Ethernet or bridged SNDU header
+ * (eth hdr plus one MAC addr).
+ */
+ skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN);
+
+ return 0;
+}
+
+
+static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h)
+{
+ static const u8 bc_addr[ETH_ALEN] = { [0 ... ETH_ALEN - 1] = 0xff };
+
+ /*
+ * The destination MAC address is the next data in the skb. It comes
+ * before any extension headers.
+ *
+ * Check if the payload of this SNDU should be passed up the stack.
+ */
+ if (h->priv->rx_mode == RX_MODE_PROMISC)
+ return 0;
+
+ if (h->priv->ule_skb->data[0] & 0x01) {
+ /* multicast or broadcast */
+ if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) {
+ /* multicast */
+ if (h->priv->rx_mode == RX_MODE_MULTI) {
+ int i;
+
+ for (i = 0; i < h->priv->multi_num &&
+ !ether_addr_equal(h->priv->ule_skb->data,
+ h->priv->multi_macs[i]);
+ i++)
+ ;
+ if (i == h->priv->multi_num)
+ return 1;
+ } else if (h->priv->rx_mode != RX_MODE_ALL_MULTI)
+ return 1; /* no broadcast; */
/*
- * State of current TS:
- * ts_remain (remaining bytes in the current TS cell)
- * 0 ule_type is not available now, we need the next TS cell
- * 1 the first byte of the ule_type is present
- * >=2 full ULE header present, maybe some payload data as well.
+ * else:
+ * all multicast mode: accept all multicast packets
*/
- switch (ts_remain) {
- case 1:
- priv->ule_sndu_remain--;
- priv->ule_sndu_type = from_where[0] << 8;
- priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
- ts_remain -= 1; from_where += 1;
- /* Continue w/ next TS. */
- case 0:
- new_ts = 1;
- ts += TS_SZ;
- priv->ts_count++;
- continue;
-
- default: /* complete ULE header is present in current TS. */
- /* Extract ULE type field. */
- if (priv->ule_sndu_type_1) {
- priv->ule_sndu_type_1 = 0;
- priv->ule_sndu_type |= from_where[0];
- from_where += 1; /* points to payload start. */
- ts_remain -= 1;
- } else {
- /* Complete type is present in new TS. */
- priv->ule_sndu_type = from_where[0] << 8 | from_where[1];
- from_where += 2; /* points to payload start. */
- ts_remain -= 2;
- }
- break;
- }
+ }
+ /* else: broadcast */
+ } else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr))
+ return 1;
- /* Allocate the skb (decoder target buffer) with the correct size, as follows:
- * prepare for the largest case: bridged SNDU with MAC address (dbit = 0). */
- priv->ule_skb = dev_alloc_skb( priv->ule_sndu_len + ETH_HLEN + ETH_ALEN );
- if (priv->ule_skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- return;
- }
+ return 0;
+}
+
+
+static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
+ u32 ule_crc, u32 expected_crc)
+{
+ u8 dest_addr[ETH_ALEN];
+
+ if (ule_crc != expected_crc) {
+ pr_warn("%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
+ h->priv->ts_count, ule_crc, expected_crc,
+ h->priv->ule_sndu_len, h->priv->ule_sndu_type,
+ h->ts_remain,
+ h->ts_remain > 2 ?
+ *(unsigned short *)h->from_where : 0);
+
+ #ifdef ULE_DEBUG
+ hexdump(iov[0].iov_base, iov[0].iov_len);
+ hexdump(iov[1].iov_base, iov[1].iov_len);
+ hexdump(iov[2].iov_base, iov[2].iov_len);
+
+ if (h->ule_where == h->ule_hist) {
+ hexdump(&h->ule_hist[98*TS_SZ], TS_SZ);
+ hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
+ } else if (h->ule_where == &h->ule_hist[TS_SZ]) {
+ hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
+ hexdump(h->ule_hist, TS_SZ);
+ } else {
+ hexdump(h->ule_where - TS_SZ - TS_SZ, TS_SZ);
+ hexdump(h->ule_where - TS_SZ, TS_SZ);
+ }
+ h->ule_dump = 1;
+ #endif
+
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_crc_errors++;
+ dev_kfree_skb(h->priv->ule_skb);
+
+ return;
+ }
+
+ /* CRC32 verified OK. */
+
+ /* CRC32 was OK, so remove it from skb. */
+ h->priv->ule_skb->tail -= 4;
+ h->priv->ule_skb->len -= 4;
+
+ if (!h->priv->ule_dbit) {
+ if (dvb_net_ule_should_drop(h)) {
+#ifdef ULE_DEBUG
+ netdev_dbg(h->dev,
+ "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\n",
+ h->priv->ule_skb->data, h->dev->dev_addr);
+#endif
+ dev_kfree_skb(h->priv->ule_skb);
+ return;
+ }
+
+ skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
+ ETH_ALEN);
+ skb_pull(h->priv->ule_skb, ETH_ALEN);
+ }
+
+ /* Handle ULE Extension Headers. */
+ if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) {
+ /* There is an extension header. Handle it accordingly. */
+ int l = handle_ule_extensions(h->priv);
+
+ if (l < 0) {
+ /*
+ * Mandatory extension header unknown or TEST SNDU.
+ * Drop it.
+ */
+
+ // pr_warn("Dropping SNDU, extension headers.\n" );
+ dev_kfree_skb(h->priv->ule_skb);
+ return;
+ }
+ skb_pull(h->priv->ule_skb, l);
+ }
+
+ /*
+ * Construct/assure correct ethernet header.
+ * Note: in bridged mode (h->priv->ule_bridged != 0)
+ * we already have the (original) ethernet
+ * header at the start of the payload (after
+ * optional dest. address and any extension
+ * headers).
+ */
+ if (!h->priv->ule_bridged) {
+ skb_push(h->priv->ule_skb, ETH_HLEN);
+ h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
+ if (!h->priv->ule_dbit) {
+ /*
+ * dest_addr buffer is only valid if
+ * h->priv->ule_dbit == 0
+ */
+ memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+ eth_zero_addr(h->ethh->h_source);
+ } else /* zeroize source and dest */
+ memset(h->ethh, 0, ETH_ALEN * 2);
- /* This includes the CRC32 _and_ dest mac, if !dbit. */
- priv->ule_sndu_remain = priv->ule_sndu_len;
- priv->ule_skb->dev = dev;
- /* Leave space for Ethernet or bridged SNDU header (eth hdr plus one MAC addr). */
- skb_reserve( priv->ule_skb, ETH_HLEN + ETH_ALEN );
+ h->ethh->h_proto = htons(h->priv->ule_sndu_type);
+ }
+ /* else: skb is in correct state; nothing to do. */
+ h->priv->ule_bridged = 0;
+
+ /* Stuff into kernel's protocol stack. */
+ h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb,
+ h->dev);
+ /*
+ * If D-bit is set (i.e. destination MAC address not present),
+ * receive the packet anyhow.
+ */
+#if 0
+ if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
+ h->priv->ule_skb->pkt_type = PACKET_HOST;
+#endif
+ h->dev->stats.rx_packets++;
+ h->dev->stats.rx_bytes += h->priv->ule_skb->len;
+ netif_rx(h->priv->ule_skb);
+}
+
+static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
+{
+ int ret;
+ struct dvb_net_ule_handle h = {
+ .dev = dev,
+ .buf = buf,
+ .buf_len = buf_len,
+ .skipped = 0L,
+ .ts = NULL,
+ .ts_end = NULL,
+ .from_where = NULL,
+ .ts_remain = 0,
+ .how_much = 0,
+ .new_ts = 1,
+ .ethh = NULL,
+ .error = false,
+#ifdef ULE_DEBUG
+ .ule_where = ule_hist,
+#endif
+ };
+
+ /*
+ * For all TS cells in current buffer.
+ * Appearently, we are called for every single TS cell.
+ */
+ for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len;
+ h.ts < h.ts_end; /* no incr. */) {
+ if (h.new_ts) {
+ /* We are about to process a new TS cell. */
+ if (dvb_net_ule_new_ts_cell(&h))
+ continue;
+ }
+
+ /* Synchronize on PUSI, if required. */
+ if (h.priv->need_pusi) {
+ if (dvb_net_ule_ts_pusi(&h))
+ continue;
+ }
+
+ if (h.new_ts) {
+ if (dvb_net_ule_new_ts(&h))
+ continue;
+ }
+
+ /* Check if new payload needs to be started. */
+ if (h.priv->ule_skb == NULL) {
+ ret = dvb_net_ule_new_payload(&h);
+ if (ret < 0)
+ return;
+ if (ret)
+ continue;
}
/* Copy data into our current skb. */
- how_much = min(priv->ule_sndu_remain, (int)ts_remain);
- memcpy(skb_put(priv->ule_skb, how_much), from_where, how_much);
- priv->ule_sndu_remain -= how_much;
- ts_remain -= how_much;
- from_where += how_much;
+ h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
+ memcpy(skb_put(h.priv->ule_skb, h.how_much),
+ h.from_where, h.how_much);
+ h.priv->ule_sndu_remain -= h.how_much;
+ h.ts_remain -= h.how_much;
+ h.from_where += h.how_much;
/* Check for complete payload. */
- if (priv->ule_sndu_remain <= 0) {
+ if (h.priv->ule_sndu_remain <= 0) {
/* Check CRC32, we've got it in our skb already. */
- __be16 ulen = htons(priv->ule_sndu_len);
- __be16 utype = htons(priv->ule_sndu_type);
+ __be16 ulen = htons(h.priv->ule_sndu_len);
+ __be16 utype = htons(h.priv->ule_sndu_type);
const u8 *tail;
struct kvec iov[3] = {
{ &ulen, sizeof ulen },
{ &utype, sizeof utype },
- { priv->ule_skb->data, priv->ule_skb->len - 4 }
+ { h.priv->ule_skb->data,
+ h.priv->ule_skb->len - 4 }
};
u32 ule_crc = ~0L, expected_crc;
- if (priv->ule_dbit) {
+ if (h.priv->ule_dbit) {
/* Set D-bit for CRC32 verification,
* if it was set originally. */
ulen |= htons(0x8000);
}
ule_crc = iov_crc32(ule_crc, iov, 3);
- tail = skb_tail_pointer(priv->ule_skb);
+ tail = skb_tail_pointer(h.priv->ule_skb);
expected_crc = *(tail - 4) << 24 |
*(tail - 3) << 16 |
*(tail - 2) << 8 |
*(tail - 1);
- if (ule_crc != expected_crc) {
- printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
- priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0);
-#ifdef ULE_DEBUG
- hexdump( iov[0].iov_base, iov[0].iov_len );
- hexdump( iov[1].iov_base, iov[1].iov_len );
- hexdump( iov[2].iov_base, iov[2].iov_len );
-
- if (ule_where == ule_hist) {
- hexdump( &ule_hist[98*TS_SZ], TS_SZ );
- hexdump( &ule_hist[99*TS_SZ], TS_SZ );
- } else if (ule_where == &ule_hist[TS_SZ]) {
- hexdump( &ule_hist[99*TS_SZ], TS_SZ );
- hexdump( ule_hist, TS_SZ );
- } else {
- hexdump( ule_where - TS_SZ - TS_SZ, TS_SZ );
- hexdump( ule_where - TS_SZ, TS_SZ );
- }
- ule_dump = 1;
-#endif
+ dvb_net_ule_check_crc(&h, ule_crc, expected_crc);
- dev->stats.rx_errors++;
- dev->stats.rx_crc_errors++;
- dev_kfree_skb(priv->ule_skb);
- } else {
- /* CRC32 verified OK. */
- u8 dest_addr[ETH_ALEN];
- static const u8 bc_addr[ETH_ALEN] =
- { [ 0 ... ETH_ALEN-1] = 0xff };
-
- /* CRC32 was OK. Remove it from skb. */
- priv->ule_skb->tail -= 4;
- priv->ule_skb->len -= 4;
-
- if (!priv->ule_dbit) {
- /*
- * The destination MAC address is the
- * next data in the skb. It comes
- * before any extension headers.
- *
- * Check if the payload of this SNDU
- * should be passed up the stack.
- */
- register int drop = 0;
- if (priv->rx_mode != RX_MODE_PROMISC) {
- if (priv->ule_skb->data[0] & 0x01) {
- /* multicast or broadcast */
- if (!ether_addr_equal(priv->ule_skb->data, bc_addr)) {
- /* multicast */
- if (priv->rx_mode == RX_MODE_MULTI) {
- int i;
- for(i = 0; i < priv->multi_num &&
- !ether_addr_equal(priv->ule_skb->data,
- priv->multi_macs[i]); i++)
- ;
- if (i == priv->multi_num)
- drop = 1;
- } else if (priv->rx_mode != RX_MODE_ALL_MULTI)
- drop = 1; /* no broadcast; */
- /* else: all multicast mode: accept all multicast packets */
- }
- /* else: broadcast */
- }
- else if (!ether_addr_equal(priv->ule_skb->data, dev->dev_addr))
- drop = 1;
- /* else: destination address matches the MAC address of our receiver device */
- }
- /* else: promiscuous mode; pass everything up the stack */
-
- if (drop) {
-#ifdef ULE_DEBUG
- netdev_dbg(dev, "Dropping SNDU: MAC destination address does not match: dest addr: %pM, dev addr: %pM\n",
- priv->ule_skb->data, dev->dev_addr);
-#endif
- dev_kfree_skb(priv->ule_skb);
- goto sndu_done;
- }
- else
- {
- skb_copy_from_linear_data(priv->ule_skb,
- dest_addr,
- ETH_ALEN);
- skb_pull(priv->ule_skb, ETH_ALEN);
- }
- }
-
- /* Handle ULE Extension Headers. */
- if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
- /* There is an extension header. Handle it accordingly. */
- int l = handle_ule_extensions(priv);
- if (l < 0) {
- /* Mandatory extension header unknown or TEST SNDU. Drop it. */
- // printk( KERN_WARNING "Dropping SNDU, extension headers.\n" );
- dev_kfree_skb(priv->ule_skb);
- goto sndu_done;
- }
- skb_pull(priv->ule_skb, l);
- }
-
- /*
- * Construct/assure correct ethernet header.
- * Note: in bridged mode (priv->ule_bridged !=
- * 0) we already have the (original) ethernet
- * header at the start of the payload (after
- * optional dest. address and any extension
- * headers).
- */
-
- if (!priv->ule_bridged) {
- skb_push(priv->ule_skb, ETH_HLEN);
- ethh = (struct ethhdr *)priv->ule_skb->data;
- if (!priv->ule_dbit) {
- /* dest_addr buffer is only valid if priv->ule_dbit == 0 */
- memcpy(ethh->h_dest, dest_addr, ETH_ALEN);
- eth_zero_addr(ethh->h_source);
- }
- else /* zeroize source and dest */
- memset( ethh, 0, ETH_ALEN*2 );
-
- ethh->h_proto = htons(priv->ule_sndu_type);
- }
- /* else: skb is in correct state; nothing to do. */
- priv->ule_bridged = 0;
-
- /* Stuff into kernel's protocol stack. */
- priv->ule_skb->protocol = dvb_net_eth_type_trans(priv->ule_skb, dev);
- /* If D-bit is set (i.e. destination MAC address not present),
- * receive the packet anyhow. */
- /* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
- priv->ule_skb->pkt_type = PACKET_HOST; */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += priv->ule_skb->len;
- netif_rx(priv->ule_skb);
- }
- sndu_done:
/* Prepare for next SNDU. */
- reset_ule(priv);
+ reset_ule(h.priv);
}
/* More data in current TS (look at the bytes following the CRC32)? */
- if (ts_remain >= 2 && *((unsigned short *)from_where) != 0xFFFF) {
+ if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) {
/* Next ULE SNDU starts right there. */
- new_ts = 0;
- priv->ule_skb = NULL;
- priv->ule_sndu_type_1 = 0;
- priv->ule_sndu_len = 0;
- // printk(KERN_WARNING "More data in current TS: [%#x %#x %#x %#x]\n",
- // *(from_where + 0), *(from_where + 1),
- // *(from_where + 2), *(from_where + 3));
- // printk(KERN_WARNING "ts @ %p, stopped @ %p:\n", ts, from_where + 0);
- // hexdump(ts, 188);
+ h.new_ts = 0;
+ h.priv->ule_skb = NULL;
+ h.priv->ule_sndu_type_1 = 0;
+ h.priv->ule_sndu_len = 0;
+ // pr_warn("More data in current TS: [%#x %#x %#x %#x]\n",
+ // *(h.from_where + 0), *(h.from_where + 1),
+ // *(h.from_where + 2), *(h.from_where + 3));
+ // pr_warn("h.ts @ %p, stopped @ %p:\n", h.ts, h.from_where + 0);
+ // hexdump(h.ts, 188);
} else {
- new_ts = 1;
- ts += TS_SZ;
- priv->ts_count++;
- if (priv->ule_skb == NULL) {
- priv->need_pusi = 1;
- priv->ule_sndu_type_1 = 0;
- priv->ule_sndu_len = 0;
+ h.new_ts = 1;
+ h.ts += TS_SZ;
+ h.priv->ts_count++;
+ if (h.priv->ule_skb == NULL) {
+ h.priv->need_pusi = 1;
+ h.priv->ule_sndu_type_1 = 0;
+ h.priv->ule_sndu_len = 0;
}
}
} /* for all available TS cells */
@@ -766,10 +906,10 @@ static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
struct net_device *dev = feed->priv;
if (buffer2)
- printk(KERN_WARNING "buffer2 not NULL: %p.\n", buffer2);
+ pr_warn("buffer2 not NULL: %p.\n", buffer2);
if (buffer1_len > 32768)
- printk(KERN_WARNING "length > 32k: %zu.\n", buffer1_len);
- /* printk("TS callback: %u bytes, %u TS cells @ %p.\n",
+ pr_warn("length > 32k: %zu.\n", buffer1_len);
+ /* pr_info("TS callback: %u bytes, %u TS cells @ %p.\n",
buffer1_len, buffer1_len / TS_SZ, buffer1); */
dvb_net_ule(dev, buffer1, buffer1_len);
return 0;
@@ -786,7 +926,7 @@ static void dvb_net_sec(struct net_device *dev,
/* note: pkt_len includes a 32bit checksum */
if (pkt_len < 16) {
- printk("%s: IP/MPE packet length = %d too small.\n",
+ pr_warn("%s: IP/MPE packet length = %d too small.\n",
dev->name, pkt_len);
stats->rx_errors++;
stats->rx_length_errors++;
@@ -824,7 +964,7 @@ static void dvb_net_sec(struct net_device *dev,
* 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP
*/
if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
- //printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ //pr_notice("%s: Memory squeeze, dropping packet.\n", dev->name);
stats->rx_dropped++;
return;
}
@@ -903,7 +1043,7 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
*secfilter=NULL;
ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter);
if (ret<0) {
- printk("%s: could not get filter\n", dev->name);
+ pr_err("%s: could not get filter\n", dev->name);
return ret;
}
@@ -944,7 +1084,7 @@ static int dvb_net_feed_start(struct net_device *dev)
netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
mutex_lock(&priv->mutex);
if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
- printk("%s: BUG %d\n", __func__, __LINE__);
+ pr_err("%s: BUG %d\n", __func__, __LINE__);
priv->secfeed=NULL;
priv->secfilter=NULL;
@@ -955,14 +1095,15 @@ static int dvb_net_feed_start(struct net_device *dev)
ret=demux->allocate_section_feed(demux, &priv->secfeed,
dvb_net_sec_callback);
if (ret<0) {
- printk("%s: could not allocate section feed\n", dev->name);
+ pr_err("%s: could not allocate section feed\n",
+ dev->name);
goto error;
}
- ret = priv->secfeed->set(priv->secfeed, priv->pid, 32768, 1);
+ ret = priv->secfeed->set(priv->secfeed, priv->pid, 1);
if (ret<0) {
- printk("%s: could not set section feed\n", dev->name);
+ pr_err("%s: could not set section feed\n", dev->name);
priv->demux->release_section_feed(priv->demux, priv->secfeed);
priv->secfeed=NULL;
goto error;
@@ -1003,7 +1144,7 @@ static int dvb_net_feed_start(struct net_device *dev)
netdev_dbg(dev, "alloc tsfeed\n");
ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
if (ret < 0) {
- printk("%s: could not allocate ts feed\n", dev->name);
+ pr_err("%s: could not allocate ts feed\n", dev->name);
goto error;
}
@@ -1013,12 +1154,11 @@ static int dvb_net_feed_start(struct net_device *dev)
priv->pid, /* pid */
TS_PACKET, /* type */
DMX_PES_OTHER, /* pes type */
- 32768, /* circular buffer size */
timeout /* timeout */
);
if (ret < 0) {
- printk("%s: could not set ts feed\n", dev->name);
+ pr_err("%s: could not set ts feed\n", dev->name);
priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
priv->tsfeed = NULL;
goto error;
@@ -1067,7 +1207,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
priv->demux->release_section_feed(priv->demux, priv->secfeed);
priv->secfeed = NULL;
} else
- printk("%s: no feed to stop\n", dev->name);
+ pr_err("%s: no feed to stop\n", dev->name);
} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
if (priv->tsfeed) {
if (priv->tsfeed->is_filtering) {
@@ -1078,7 +1218,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
priv->tsfeed = NULL;
}
else
- printk("%s: no ts feed to stop\n", dev->name);
+ pr_err("%s: no ts feed to stop\n", dev->name);
} else
ret = -EINVAL;
mutex_unlock(&priv->mutex);
@@ -1198,7 +1338,6 @@ static const struct net_device_ops dvb_netdev_ops = {
.ndo_start_xmit = dvb_net_tx,
.ndo_set_rx_mode = dvb_net_set_multicast_list,
.ndo_set_mac_address = dvb_net_set_mac,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1209,6 +1348,7 @@ static void dvb_net_setup(struct net_device *dev)
dev->header_ops = &dvb_header_ops;
dev->netdev_ops = &dvb_netdev_ops;
dev->mtu = 4096;
+ dev->max_mtu = 4096;
dev->flags |= IFF_NOARP;
}
@@ -1279,7 +1419,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
free_netdev(net);
return result;
}
- printk("dvb_net: created network interface %s\n", net->name);
+ pr_info("created network interface %s\n", net->name);
return if_num;
}
@@ -1298,7 +1438,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
dvb_net_stop(net);
flush_work(&priv->set_multicast_list_wq);
flush_work(&priv->restart_net_feed_wq);
- printk("dvb_net: removed network interface %s\n", net->name);
+ pr_info("removed network interface %s\n", net->name);
unregister_netdev(net);
dvbnet->state[num]=0;
dvbnet->device[num] = NULL;
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 75a3f4b57fd4..38c844667789 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) "dvbdev: " fmt
+
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -43,7 +45,11 @@ static int dvbdev_debug;
module_param(dvbdev_debug, int, 0644);
MODULE_PARM_DESC(dvbdev_debug, "Turn on/off device debugging (default:off).");
-#define dprintk if (dvbdev_debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (dvbdev_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static LIST_HEAD(dvb_adapter_list);
static DEFINE_MUTEX(dvbdev_register_lock);
@@ -354,7 +360,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
if (ret)
return ret;
- printk(KERN_DEBUG "%s: media entity '%s' registered.\n",
+ pr_info("%s: media entity '%s' registered.\n",
__func__, dvbdev->entity->name);
return 0;
@@ -438,7 +444,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if ((id = dvbdev_get_free_id (adap, type)) < 0){
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
- printk(KERN_ERR "%s: couldn't find free device id\n", __func__);
+ pr_err("%s: couldn't find free device id\n", __func__);
return -ENFILE;
}
@@ -493,8 +499,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
if (ret) {
- printk(KERN_ERR
- "%s: dvb_register_media_device failed to create the mediagraph\n",
+ pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
__func__);
dvb_media_device_free(dvbdev);
@@ -511,11 +516,11 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
MKDEV(DVB_MAJOR, minor),
dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
if (IS_ERR(clsdev)) {
- printk(KERN_ERR "%s: failed to create device dvb%d.%s%d (%ld)\n",
+ pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
return PTR_ERR(clsdev);
}
- dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
+ dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, minor, minor);
return 0;
@@ -523,7 +528,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
EXPORT_SYMBOL(dvb_register_device);
-void dvb_unregister_device(struct dvb_device *dvbdev)
+void dvb_remove_device(struct dvb_device *dvbdev)
{
if (!dvbdev)
return;
@@ -537,9 +542,26 @@ void dvb_unregister_device(struct dvb_device *dvbdev)
device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor));
list_del (&dvbdev->list_head);
+}
+EXPORT_SYMBOL(dvb_remove_device);
+
+
+void dvb_free_device(struct dvb_device *dvbdev)
+{
+ if (!dvbdev)
+ return;
+
kfree (dvbdev->fops);
kfree (dvbdev);
}
+EXPORT_SYMBOL(dvb_free_device);
+
+
+void dvb_unregister_device(struct dvb_device *dvbdev)
+{
+ dvb_remove_device(dvbdev);
+ dvb_free_device(dvbdev);
+}
EXPORT_SYMBOL(dvb_unregister_device);
@@ -808,7 +830,7 @@ int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
memset (adap, 0, sizeof(struct dvb_adapter));
INIT_LIST_HEAD (&adap->device_list);
- printk(KERN_INFO "DVB: registering new adapter (%s)\n", name);
+ pr_info("DVB: registering new adapter (%s)\n", name);
adap->num = num;
adap->name = name;
@@ -926,13 +948,13 @@ static int __init init_dvbdev(void)
dev_t dev = MKDEV(DVB_MAJOR, 0);
if ((retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB")) != 0) {
- printk(KERN_ERR "dvb-core: unable to get major %d\n", DVB_MAJOR);
+ pr_err("dvb-core: unable to get major %d\n", DVB_MAJOR);
return retval;
}
cdev_init(&dvb_device_cdev, &dvb_device_fops);
if ((retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS)) != 0) {
- printk(KERN_ERR "dvb-core: unable register character device\n");
+ pr_err("dvb-core: unable register character device\n");
goto error;
}
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 4aff7bd3dea8..8c0a7b51555e 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -34,7 +34,7 @@
#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0
#define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS
#else
- #define DVB_MAX_ADAPTERS 8
+ #define DVB_MAX_ADAPTERS 16
#endif
#define DVB_UNSET (-1)
@@ -212,8 +212,31 @@ int dvb_register_device(struct dvb_adapter *adap,
int demux_sink_pads);
/**
+ * dvb_remove_device - Remove a registered DVB device
+ *
+ * This does not free memory. To do that, call dvb_free_device().
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_remove_device(struct dvb_device *dvbdev);
+
+/**
+ * dvb_free_device - Free memory occupied by a DVB device.
+ *
+ * Call dvb_unregister_device() before calling this function.
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_free_device(struct dvb_device *dvbdev);
+
+/**
* dvb_unregister_device - Unregisters a DVB device
*
+ * This is a combination of dvb_remove_device() and dvb_free_device().
+ * Using this function is usually a mistake, and is often an indicator
+ * for a use-after-free bug (when a userspace process keeps a file
+ * handle to a detached device).
+ *
* @dvbdev: pointer to struct dvb_device
*/
void dvb_unregister_device(struct dvb_device *dvbdev);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index b71b747ee0ba..c841fa1770be 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -642,7 +642,7 @@ config DVB_S5H1409
to support this frontend.
config DVB_AU8522
- depends on I2C
+ depends on DVB_CORE && I2C
tristate
config DVB_AU8522_DTV
@@ -656,7 +656,7 @@ config DVB_AU8522_DTV
config DVB_AU8522_V4L
tristate "Auvitek AU8522 based ATV demod"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_V4L2 && DVB_CORE && I2C
select DVB_AU8522
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -722,7 +722,7 @@ config DVB_PLL
config DVB_TUNER_DIB0070
tristate "DiBcom DiB0070 silicon base-band tuner"
- depends on I2C
+ depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A driver for the silicon baseband tuner DiB0070 from DiBcom.
@@ -731,7 +731,7 @@ config DVB_TUNER_DIB0070
config DVB_TUNER_DIB0090
tristate "DiBcom DiB0090 silicon base-band tuner"
- depends on I2C
+ depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A driver for the silicon baseband tuner DiB0090 from DiBcom.
@@ -879,5 +879,6 @@ comment "Tools to develop new frontends"
config DVB_DUMMY_FE
tristate "Dummy frontend driver"
+ depends on DVB_CORE
default n
endmenu
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 8bcde336ffd7..c6cb3bbc912a 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1351,7 +1351,7 @@ static void af9013_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops af9013_ops;
+static const struct dvb_frontend_ops af9013_ops;
static int af9013_download_firmware(struct af9013_state *state)
{
@@ -1516,7 +1516,7 @@ err:
}
EXPORT_SYMBOL(af9013_attach);
-static struct dvb_frontend_ops af9013_ops = {
+static const struct dvb_frontend_ops af9013_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Afatech AF9013",
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 9a8157a5f49d..f8818028752e 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1198,7 +1198,7 @@ err:
return ret;
}
-static struct dvb_frontend_ops af9033_ops = {
+static const struct dvb_frontend_ops af9033_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Afatech AF9033 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 9412fcd1bddb..98d575f2744c 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -415,7 +415,7 @@ static void as102_fe_release(struct dvb_frontend *fe)
}
-static struct dvb_frontend_ops as102_fe_ops = {
+static const struct dvb_frontend_ops as102_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Abilis AS102 DVB-T",
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index ad304eed656d..0ee0df53b91b 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -254,14 +254,13 @@ static int ascot2e_init(struct dvb_frontend *fe)
return ascot2e_leave_power_save(priv);
}
-static int ascot2e_release(struct dvb_frontend *fe)
+static void ascot2e_release(struct dvb_frontend *fe)
{
struct ascot2e_priv *priv = fe->tuner_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int ascot2e_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index 47248b868e38..07ce05578278 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -428,7 +428,7 @@ static int atbm8830_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
return atbm8830_write_reg(priv, REG_I2C_GATE, enable ? 1 : 0);
}
-static struct dvb_frontend_ops atbm8830_ops = {
+static const struct dvb_frontend_ops atbm8830_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "AltoBeam ATBM8830/8831 DMB-TH",
diff --git a/drivers/media/dvb-frontends/au8522_common.c b/drivers/media/dvb-frontends/au8522_common.c
index f135126bc373..cf4ac240a01f 100644
--- a/drivers/media/dvb-frontends/au8522_common.c
+++ b/drivers/media/dvb-frontends/au8522_common.c
@@ -50,8 +50,8 @@ int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index e676b9461a59..7ed326e43fc4 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -834,7 +834,7 @@ static int au8522_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops au8522_ops;
+static const struct dvb_frontend_ops au8522_ops;
static void au8522_release(struct dvb_frontend *fe)
@@ -894,7 +894,7 @@ error:
}
EXPORT_SYMBOL(au8522_attach);
-static struct dvb_frontend_ops au8522_ops = {
+static const struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Auvitek AU8522 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index bb698839e477..617c5e29f919 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -788,7 +788,7 @@ static int bcm3510_init(struct dvb_frontend* fe)
}
-static struct dvb_frontend_ops bcm3510_ops;
+static const struct dvb_frontend_ops bcm3510_ops;
struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
struct i2c_adapter *i2c)
@@ -834,7 +834,7 @@ error:
}
EXPORT_SYMBOL(bcm3510_attach);
-static struct dvb_frontend_ops bcm3510_ops = {
+static const struct dvb_frontend_ops bcm3510_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Broadcom BCM3510 VSB/QAM frontend",
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index 5cad925609e0..2b629e23ceeb 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -380,7 +380,7 @@ static void cx22700_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops cx22700_ops;
+static const struct dvb_frontend_ops cx22700_ops;
struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
struct i2c_adapter* i2c)
@@ -408,7 +408,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops cx22700_ops = {
+static const struct dvb_frontend_ops cx22700_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22700 DVB-T",
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 6cb81ec12847..cf1bc99d1f32 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -120,8 +120,8 @@ static int cx24110_writereg (struct cx24110_state* state, int reg, int data)
int err;
if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
- dprintk ("%s: writereg error (err == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, err, reg, data);
+ dprintk("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, err, reg, data);
return -EREMOTEIO;
}
@@ -592,7 +592,7 @@ static void cx24110_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops cx24110_ops;
+static const struct dvb_frontend_ops cx24110_ops;
struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
struct i2c_adapter* i2c)
@@ -625,7 +625,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops cx24110_ops = {
+static const struct dvb_frontend_ops cx24110_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24110 DVB-S",
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 3883c3b31aef..db44ebb7c561 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -108,8 +108,8 @@ static int cx24113_writereg(struct cx24113_state *state, int reg, int data)
.flags = 0, .buf = buf, .len = 2 };
int err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, err, reg, data);
+ printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, err, reg, data);
return err;
}
@@ -527,13 +527,12 @@ static int cx24113_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int cx24113_release(struct dvb_frontend *fe)
+static void cx24113_release(struct dvb_frontend *fe)
{
struct cx24113_state *state = fe->tuner_priv;
dprintk("\n");
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
static const struct dvb_tuner_ops cx24113_tuner_ops = {
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index 8814f36d53fb..e105532bfba8 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -209,8 +209,8 @@ static int cx24116_writereg(struct cx24116_state *state, int reg, int data)
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
- " value == 0x%02x)\n", __func__, err, reg, data);
+ printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+ __func__, err, reg, data);
return -EREMOTEIO;
}
@@ -498,8 +498,8 @@ static int cx24116_firmware_ondemand(struct dvb_frontend *fe)
printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n",
__func__);
if (ret) {
- printk(KERN_ERR "%s: No firmware uploaded "
- "(timeout or file not found?)\n", __func__);
+ printk(KERN_ERR "%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -1116,7 +1116,7 @@ static void cx24116_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cx24116_ops;
+static const struct dvb_frontend_ops cx24116_ops;
struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
struct i2c_adapter *i2c)
@@ -1467,7 +1467,7 @@ static int cx24116_get_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_HW;
}
-static struct dvb_frontend_ops cx24116_ops = {
+static const struct dvb_frontend_ops cx24116_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24116/CX24118",
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index a3f7eb4e609d..d37cb7762bd6 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -474,8 +474,8 @@ static int cx24117_firmware_ondemand(struct dvb_frontend *fe)
"%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
dev_err(&state->priv->i2c->dev,
- "%s: No firmware uploaded "
- "(timeout or file not found?)\n", __func__);
+ "%s: No firmware uploaded (timeout or file not found?)\n",
+__func__);
return ret;
}
@@ -1164,7 +1164,7 @@ static void cx24117_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cx24117_ops;
+static const struct dvb_frontend_ops cx24117_ops;
struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
struct i2c_adapter *i2c)
@@ -1618,7 +1618,7 @@ static int cx24117_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops cx24117_ops = {
+static const struct dvb_frontend_ops cx24117_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24117/CX24132",
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 066ee387bf25..7f11dcc94d85 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -267,7 +267,7 @@ out:
return ret;
}
-static struct dvb_frontend_ops cx24120_ops;
+static const struct dvb_frontend_ops cx24120_ops;
struct dvb_frontend *cx24120_attach(const struct cx24120_config *config,
struct i2c_adapter *i2c)
@@ -1154,8 +1154,7 @@ static int cx24120_set_frontend(struct dvb_frontend *fe)
dev_dbg(&state->i2c->dev,
"delivery system(%d) not supported\n",
c->delivery_system);
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
state->dnxt.delsys = c->delivery_system;
@@ -1552,7 +1551,7 @@ static int cx24120_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static struct dvb_frontend_ops cx24120_ops = {
+static const struct dvb_frontend_ops cx24120_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24120/CX24118",
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 113b0949408a..8aed8cc9f93d 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -255,8 +255,8 @@ static int cx24123_i2c_writereg(struct cx24123_state *state,
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk("%s: writereg error(err == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, err, reg, data);
+ printk("%s: writereg error(err == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, err, reg, data);
return err;
}
@@ -1049,7 +1049,7 @@ struct i2c_adapter *
}
EXPORT_SYMBOL(cx24123_get_tuner_i2c_adapter);
-static struct dvb_frontend_ops cx24123_ops;
+static const struct dvb_frontend_ops cx24123_ops;
struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
struct i2c_adapter *i2c)
@@ -1111,7 +1111,7 @@ error:
}
EXPORT_SYMBOL(cx24123_attach);
-static struct dvb_frontend_ops cx24123_ops = {
+static const struct dvb_frontend_ops cx24123_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24123/CX24109",
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 5afb9c508f65..614bfb3740f1 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3719,7 +3719,7 @@ static int cxd2841er_init_tc(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
+static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
static struct dvb_frontend_ops cxd2841er_t_c_ops;
static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
@@ -3801,7 +3801,7 @@ struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
}
EXPORT_SYMBOL(cxd2841er_attach_t_c);
-static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
+static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Sony CXD2841ER DVB-S/S2 demodulator",
@@ -3829,7 +3829,7 @@ static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.tune = cxd2841er_tune_s
};
-static struct dvb_frontend_ops cxd2841er_t_c_ops = {
+static struct dvb_frontend_ops cxd2841er_t_c_ops = {
.delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
.info = {
.name = "", /* will set in attach function */
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index ee7d66997ccd..befc8172159d 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -24,6 +24,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -38,12 +40,10 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { \
- if (debug) { \
- printk(KERN_DEBUG "DiB0070: "); \
- printk(args); \
- printk("\n"); \
- } \
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while (0)
#define DIB0070_P1D 0x00
@@ -87,7 +87,7 @@ static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -104,7 +104,7 @@ static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
- printk(KERN_WARNING "DiB0070 I2C read failed\n");
+ pr_warn("DiB0070 I2C read failed\n");
ret = 0;
} else
ret = (state->i2c_read_buffer[0] << 8)
@@ -119,7 +119,7 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
state->i2c_write_buffer[0] = reg;
@@ -133,7 +133,7 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
state->msg[0].len = 3;
if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0070 I2C write failed\n");
+ pr_warn("DiB0070 I2C write failed\n");
ret = -EREMOTEIO;
} else
ret = 0;
@@ -205,7 +205,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
adc = dib0070_read_reg(state, 0x19);
- dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
+ dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV\n", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
if (adc >= 400) {
adc -= 400;
@@ -216,7 +216,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
}
if (adc < state->adc_diff) {
- dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)", state->captrim, adc, state->adc_diff);
+ dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)\n", state->captrim, adc, state->adc_diff);
state->adc_diff = adc;
state->fcaptrim = state->captrim;
}
@@ -241,7 +241,7 @@ static int dib0070_set_ctrl_lo5(struct dvb_frontend *fe, u8 vco_bias_trim, u8 hf
struct dib0070_state *state = fe->tuner_priv;
u16 lo5 = (third_order_filt << 14) | (0 << 13) | (1 << 12) | (3 << 9) | (cp_current << 6) | (hf_div_trim << 3) | (vco_bias_trim << 0);
- dprintk("CTRL_LO5: 0x%x", lo5);
+ dprintk("CTRL_LO5: 0x%x\n", lo5);
return dib0070_write_reg(state, 0x15, lo5);
}
@@ -256,7 +256,7 @@ void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
dib0070_write_reg(state, 0x1b, 0x4112);
if (state->cfg->vga_filter != 0) {
dib0070_write_reg(state, 0x1a, state->cfg->vga_filter);
- dprintk("vga filter register is set to %x", state->cfg->vga_filter);
+ dprintk("vga filter register is set to %x\n", state->cfg->vga_filter);
} else
dib0070_write_reg(state, 0x1a, 0x0009);
}
@@ -380,7 +380,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
}
if (*tune_state == CT_TUNER_START) {
- dprintk("Tuning for Band: %hd (%d kHz)", band, freq);
+ dprintk("Tuning for Band: %hd (%d kHz)\n", band, freq);
if (state->current_rf != freq) {
u8 REFDIV;
u32 FBDiv, Rest, FREF, VCOF_kHz;
@@ -458,12 +458,12 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
dib0070_write_reg(state, 0x20,
0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
- dprintk("REFDIV: %hd, FREF: %d", REFDIV, FREF);
- dprintk("FBDIV: %d, Rest: %d", FBDiv, Rest);
- dprintk("Num: %hd, Den: %hd, SD: %hd", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
- dprintk("HFDIV code: %hd", state->current_tune_table_index->hfdiv);
- dprintk("VCO = %hd", state->current_tune_table_index->vco_band);
- dprintk("VCOF: ((%hd*%d) << 1))", state->current_tune_table_index->vco_multi, freq);
+ dprintk("REFDIV: %hd, FREF: %d\n", REFDIV, FREF);
+ dprintk("FBDIV: %d, Rest: %d\n", FBDiv, Rest);
+ dprintk("Num: %hd, Den: %hd, SD: %hd\n", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
+ dprintk("HFDIV code: %hd\n", state->current_tune_table_index->hfdiv);
+ dprintk("VCO = %hd\n", state->current_tune_table_index->vco_band);
+ dprintk("VCOF: ((%hd*%d) << 1))\n", state->current_tune_table_index->vco_multi, freq);
*tune_state = CT_TUNER_STEP_0;
} else { /* we are already tuned to this frequency - the configuration is correct */
@@ -625,7 +625,7 @@ static void dib0070_wbd_offset_calibration(struct dib0070_state *state)
u8 gain;
for (gain = 6; gain < 8; gain++) {
state->wbd_offset_3_3[gain - 6] = ((dib0070_read_wbd_offset(state, gain) * 8 * 18 / 33 + 1) / 2);
- dprintk("Gain: %d, WBDOffset (3.3V) = %hd", gain, state->wbd_offset_3_3[gain-6]);
+ dprintk("Gain: %d, WBDOffset (3.3V) = %hd\n", gain, state->wbd_offset_3_3[gain-6]);
}
}
@@ -665,10 +665,10 @@ static int dib0070_reset(struct dvb_frontend *fe)
state->revision = DIB0070S_P1A;
/* P1F or not */
- dprintk("Revision: %x", state->revision);
+ dprintk("Revision: %x\n", state->revision);
if (state->revision == DIB0070_P1D) {
- dprintk("Error: this driver is not to be used meant for P1D or earlier");
+ dprintk("Error: this driver is not to be used meant for P1D or earlier\n");
return -EINVAL;
}
@@ -722,11 +722,10 @@ static int dib0070_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int dib0070_release(struct dvb_frontend *fe)
+static void dib0070_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops dib0070_ops = {
@@ -761,7 +760,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
if (dib0070_reset(fe) != 0)
goto free_mem;
- printk(KERN_INFO "DiB0070: successfully identified\n");
+ pr_info("DiB0070: successfully identified\n");
memcpy(&fe->ops.tuner_ops, &dib0070_ops, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = state;
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 14c403254fe0..fd3b33296b15 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -24,6 +24,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -38,12 +40,10 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { \
- if (debug) { \
- printk(KERN_DEBUG "DiB0090: "); \
- printk(args); \
- printk("\n"); \
- } \
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while (0)
#define CONFIG_SYS_DVBT
@@ -218,7 +218,7 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -235,7 +235,7 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
- printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ pr_warn("DiB0090 I2C read failed\n");
ret = 0;
} else
ret = (state->i2c_read_buffer[0] << 8)
@@ -250,7 +250,7 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -265,7 +265,7 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
state->msg[0].len = 3;
if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ pr_warn("DiB0090 I2C write failed\n");
ret = -EREMOTEIO;
} else
ret = 0;
@@ -279,7 +279,7 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -291,7 +291,7 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
state->msg.buf = state->i2c_read_buffer;
state->msg.len = 2;
if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ pr_warn("DiB0090 I2C read failed\n");
ret = 0;
} else
ret = (state->i2c_read_buffer[0] << 8)
@@ -306,7 +306,7 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -319,7 +319,7 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
state->msg.buf = state->i2c_write_buffer;
state->msg.len = 2;
if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ pr_warn("DiB0090 I2C write failed\n");
ret = -EREMOTEIO;
} else
ret = 0;
@@ -351,7 +351,7 @@ static int dib0090_identify(struct dvb_frontend *fe)
identity->p1g = 0;
identity->in_soc = 0;
- dprintk("Tuner identification (Version = 0x%04x)", v);
+ dprintk("Tuner identification (Version = 0x%04x)\n", v);
/* without PLL lock info */
v &= ~KROSUS_PLL_LOCKED;
@@ -366,19 +366,19 @@ static int dib0090_identify(struct dvb_frontend *fe)
identity->in_soc = 1;
switch (identity->version) {
case SOC_8090_P1G_11R1:
- dprintk("SOC 8090 P1-G11R1 Has been detected");
+ dprintk("SOC 8090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_8090_P1G_21R1:
- dprintk("SOC 8090 P1-G21R1 Has been detected");
+ dprintk("SOC 8090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_11R1:
- dprintk("SOC 7090 P1-G11R1 Has been detected");
+ dprintk("SOC 7090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_21R1:
- dprintk("SOC 7090 P1-G21R1 Has been detected");
+ dprintk("SOC 7090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
default:
@@ -387,16 +387,16 @@ static int dib0090_identify(struct dvb_frontend *fe)
} else {
switch ((identity->version >> 5) & 0x7) {
case MP001:
- dprintk("MP001 : 9090/8096");
+ dprintk("MP001 : 9090/8096\n");
break;
case MP005:
- dprintk("MP005 : Single Sband");
+ dprintk("MP005 : Single Sband\n");
break;
case MP008:
- dprintk("MP008 : diversity VHF-UHF-LBAND");
+ dprintk("MP008 : diversity VHF-UHF-LBAND\n");
break;
case MP009:
- dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+ dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND\n");
break;
default:
goto identification_error;
@@ -404,21 +404,21 @@ static int dib0090_identify(struct dvb_frontend *fe)
switch (identity->version & 0x1f) {
case P1G_21R2:
- dprintk("P1G_21R2 detected");
+ dprintk("P1G_21R2 detected\n");
identity->p1g = 1;
break;
case P1G:
- dprintk("P1G detected");
+ dprintk("P1G detected\n");
identity->p1g = 1;
break;
case P1D_E_F:
- dprintk("P1D/E/F detected");
+ dprintk("P1D/E/F detected\n");
break;
case P1C:
- dprintk("P1C detected");
+ dprintk("P1C detected\n");
break;
case P1A_B:
- dprintk("P1-A/B detected: driver is deactivated - not available");
+ dprintk("P1-A/B detected: driver is deactivated - not available\n");
goto identification_error;
break;
default:
@@ -441,7 +441,7 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
identity->p1g = 0;
identity->in_soc = 0;
- dprintk("FE: Tuner identification (Version = 0x%04x)", v);
+ dprintk("FE: Tuner identification (Version = 0x%04x)\n", v);
/* without PLL lock info */
v &= ~KROSUS_PLL_LOCKED;
@@ -456,19 +456,19 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
identity->in_soc = 1;
switch (identity->version) {
case SOC_8090_P1G_11R1:
- dprintk("SOC 8090 P1-G11R1 Has been detected");
+ dprintk("SOC 8090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_8090_P1G_21R1:
- dprintk("SOC 8090 P1-G21R1 Has been detected");
+ dprintk("SOC 8090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_11R1:
- dprintk("SOC 7090 P1-G11R1 Has been detected");
+ dprintk("SOC 7090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_21R1:
- dprintk("SOC 7090 P1-G21R1 Has been detected");
+ dprintk("SOC 7090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
default:
@@ -477,16 +477,16 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
} else {
switch ((identity->version >> 5) & 0x7) {
case MP001:
- dprintk("MP001 : 9090/8096");
+ dprintk("MP001 : 9090/8096\n");
break;
case MP005:
- dprintk("MP005 : Single Sband");
+ dprintk("MP005 : Single Sband\n");
break;
case MP008:
- dprintk("MP008 : diversity VHF-UHF-LBAND");
+ dprintk("MP008 : diversity VHF-UHF-LBAND\n");
break;
case MP009:
- dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+ dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND\n");
break;
default:
goto identification_error;
@@ -494,21 +494,21 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
switch (identity->version & 0x1f) {
case P1G_21R2:
- dprintk("P1G_21R2 detected");
+ dprintk("P1G_21R2 detected\n");
identity->p1g = 1;
break;
case P1G:
- dprintk("P1G detected");
+ dprintk("P1G detected\n");
identity->p1g = 1;
break;
case P1D_E_F:
- dprintk("P1D/E/F detected");
+ dprintk("P1D/E/F detected\n");
break;
case P1C:
- dprintk("P1C detected");
+ dprintk("P1C detected\n");
break;
case P1A_B:
- dprintk("P1-A/B detected: driver is deactivated - not available");
+ dprintk("P1-A/B detected: driver is deactivated - not available\n");
goto identification_error;
break;
default:
@@ -574,7 +574,7 @@ static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_
} while (--i);
if (i == 0) {
- dprintk("Pll: Unable to lock Pll");
+ dprintk("Pll: Unable to lock Pll\n");
return;
}
@@ -596,7 +596,7 @@ static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib009
u16 v;
int i;
- dprintk("fw reset digital");
+ dprintk("fw reset digital\n");
HARD_RESET(state);
dib0090_fw_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL);
@@ -645,7 +645,7 @@ static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib009
} while (--i);
if (i == 0) {
- dprintk("Pll: Unable to lock Pll");
+ dprintk("Pll: Unable to lock Pll\n");
return -EIO;
}
@@ -922,7 +922,7 @@ static void dib0090_wbd_target(struct dib0090_state *state, u32 rf)
#endif
state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset);
- dprintk("wbd-target: %d dB", (u32) state->wbd_target);
+ dprintk("wbd-target: %d dB\n", (u32) state->wbd_target);
}
static const int gain_reg_addr[4] = {
@@ -1019,7 +1019,7 @@ static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16
gain_reg[3] |= ((bb % 10) * 100) / 125;
#ifdef DEBUG_AGC
- dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb,
+ dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x\n", rf, bb, rf + bb,
gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]);
#endif
@@ -1050,7 +1050,7 @@ static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg)
dib0090_write_reg(state, 0x2a, 0xffff);
- dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
+ dprintk("total RF gain: %ddB, step: %d\n", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
dib0090_write_regs(state, 0x2c, cfg + 3, 6);
dib0090_write_regs(state, 0x3e, cfg + 9, 2);
@@ -1069,7 +1069,7 @@ static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg)
dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */
dib0090_write_reg(state, 0x33, 0xffff);
- dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33));
+ dprintk("total BB gain: %ddB, step: %d\n", (u32) cfg[0], dib0090_read_reg(state, 0x33));
dib0090_write_regs(state, 0x35, cfg + 3, 4);
}
@@ -1122,7 +1122,7 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
/* activate the ramp generator using PWM control */
if (state->rf_ramp)
- dprintk("ramp RF gain = %d BAND = %s version = %d",
+ dprintk("ramp RF gain = %d BAND = %s version = %d\n",
state->rf_ramp[0],
(state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND",
state->identity.version & 0x1f);
@@ -1130,10 +1130,10 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
if (rf_ramp && ((state->rf_ramp && state->rf_ramp[0] == 0) ||
(state->current_band == BAND_CBAND &&
(state->identity.version & 0x1f) <= P1D_E_F))) {
- dprintk("DE-Engage mux for direct gain reg control");
+ dprintk("DE-Engage mux for direct gain reg control\n");
en_pwm_rf_mux = 0;
} else
- dprintk("Engage mux for PWM control");
+ dprintk("Engage mux for PWM control\n");
dib0090_write_reg(state, 0x32, (en_pwm_rf_mux << 12) | (en_pwm_rf_mux << 11));
@@ -1352,7 +1352,7 @@ u16 dib0090_get_wbd_target(struct dvb_frontend *fe)
while (f_MHz > wbd->max_freq)
wbd++;
- dprintk("using wbd-table-entry with max freq %d", wbd->max_freq);
+ dprintk("using wbd-table-entry with max freq %d\n", wbd->max_freq);
if (current_temp < 0)
current_temp = 0;
@@ -1373,8 +1373,8 @@ u16 dib0090_get_wbd_target(struct dvb_frontend *fe)
wbd_tcold += ((wbd_thot - wbd_tcold) * current_temp) >> 7;
state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + wbd_tcold);
- dprintk("wbd-target: %d dB", (u32) state->wbd_target);
- dprintk("wbd offset applied is %d", wbd_tcold);
+ dprintk("wbd-target: %d dB\n", (u32) state->wbd_target);
+ dprintk("wbd offset applied is %d\n", wbd_tcold);
return state->wbd_offset + wbd_tcold;
}
@@ -1415,7 +1415,7 @@ int dib0090_update_rframp_7090(struct dvb_frontend *fe, u8 cfg_sensitivity)
if ((!state->identity.p1g) || (!state->identity.in_soc)
|| ((state->identity.version != SOC_7090_P1G_21R1)
&& (state->identity.version != SOC_7090_P1G_11R1))) {
- dprintk("%s() function can only be used for dib7090P", __func__);
+ dprintk("%s() function can only be used for dib7090P\n", __func__);
return -ENODEV;
}
@@ -1598,7 +1598,7 @@ static int dib0090_reset(struct dvb_frontend *fe)
dib0090_write_reg(state, 0x14, 1);
else
dib0090_write_reg(state, 0x14, 2);
- dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
+ dprintk("Pll lock : %d\n", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
state->calibrate = DC_CAL | WBD_CAL | TEMP_CAL; /* enable iq-offset-calibration and wbd-calibration when tuning next time */
@@ -1711,7 +1711,8 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
/* fall through */
case CT_TUNER_STEP_0:
- dprintk("Start/continue DC calibration for %s path", (state->dc->i == 1) ? "I" : "Q");
+ dprintk("Start/continue DC calibration for %s path\n",
+ (state->dc->i == 1) ? "I" : "Q");
dib0090_write_reg(state, 0x01, state->dc->bb1);
dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7));
@@ -1733,13 +1734,13 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
break;
case CT_TUNER_STEP_5: /* found an offset */
- dprintk("adc_diff = %d, current step= %d", (u32) state->adc_diff, state->step);
+ dprintk("adc_diff = %d, current step= %d\n", (u32) state->adc_diff, state->step);
if (state->step == 0 && state->adc_diff < 0) {
state->min_adc_diff = -1023;
- dprintk("Change of sign of the minimum adc diff");
+ dprintk("Change of sign of the minimum adc diff\n");
}
- dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d", state->adc_diff, state->min_adc_diff, state->step);
+ dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d\n", state->adc_diff, state->min_adc_diff, state->step);
/* first turn for this frequency */
if (state->step == 0) {
@@ -1758,12 +1759,12 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
} else {
/* the minimum was what we have seen in the step before */
if (ABS(state->adc_diff) > ABS(state->min_adc_diff)) {
- dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step", state->adc_diff, state->min_adc_diff);
+ dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step\n", state->adc_diff, state->min_adc_diff);
state->step--;
}
dib0090_set_trim(state);
- dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->dc->addr, state->adc_diff, state->step);
+ dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd\n", state->dc->addr, state->adc_diff, state->step);
state->dc++;
if (state->dc->addr == 0) /* done */
@@ -1819,7 +1820,7 @@ static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tu
case CT_TUNER_STEP_0:
state->wbd_offset = dib0090_get_slow_adc_val(state);
- dprintk("WBD calibration offset = %d", state->wbd_offset);
+ dprintk("WBD calibration offset = %d\n", state->wbd_offset);
*tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
state->calibrate &= ~WBD_CAL;
break;
@@ -2064,7 +2065,7 @@ int dib0090_update_tuning_table_7090(struct dvb_frontend *fe,
if ((!state->identity.p1g) || (!state->identity.in_soc)
|| ((state->identity.version != SOC_7090_P1G_21R1)
&& (state->identity.version != SOC_7090_P1G_11R1))) {
- dprintk("%s() function can only be used for dib7090", __func__);
+ dprintk("%s() function can only be used for dib7090\n", __func__);
return -ENODEV;
}
@@ -2098,7 +2099,8 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
force_soft_search = 1;
if (*tune_state == CT_TUNER_START) {
- dprintk("Start Captrim search : %s", (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO");
+ dprintk("Start Captrim search : %s\n",
+ (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO");
dib0090_write_reg(state, 0x10, 0x2B1);
dib0090_write_reg(state, 0x1e, 0x0032);
@@ -2140,13 +2142,13 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
dib0090_read_reg(state, 0x40);
state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7F;
- dprintk("***Final Captrim= 0x%x", state->fcaptrim);
+ dprintk("***Final Captrim= 0x%x\n", state->fcaptrim);
*tune_state = CT_TUNER_STEP_3;
} else {
/* MERGE for all krosus before P1G */
adc = dib0090_get_slow_adc_val(state);
- dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024);
+ dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV\n", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024);
if (state->rest == 0 || state->identity.in_soc) { /* Just for 8090P SOCS where auto captrim HW bug : TO CHECK IN ACI for SOCS !!! if 400 for 8090p SOC => tune issue !!! */
adc_target = 200;
@@ -2162,7 +2164,7 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
}
if (adc < state->adc_diff) {
- dprintk("CAPTRIM=%d is closer to target (%d/%d)", (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
+ dprintk("CAPTRIM=%d is closer to target (%d/%d)\n", (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
state->adc_diff = adc;
state->fcaptrim = state->captrim;
}
@@ -2216,7 +2218,7 @@ static int dib0090_get_temperature(struct dib0090_state *state, enum frontend_tu
val = dib0090_get_slow_adc_val(state);
state->temperature = ((s16) ((val - state->adc_diff) * 180) >> 8) + 55;
- dprintk("temperature: %d C", state->temperature - 30);
+ dprintk("temperature: %d C\n", state->temperature - 30);
*tune_state = CT_TUNER_STEP_2;
break;
@@ -2478,13 +2480,13 @@ static int dib0090_tune(struct dvb_frontend *fe)
wbd++;
dib0090_write_reg(state, 0x1e, 0x07ff);
- dprintk("Final Captrim: %d", (u32) state->fcaptrim);
- dprintk("HFDIV code: %d", (u32) pll->hfdiv_code);
- dprintk("VCO = %d", (u32) pll->vco_band);
- dprintk("VCOF in kHz: %d ((%d*%d) << 1))", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request);
- dprintk("REFDIV: %d, FREF: %d", (u32) 1, (u32) state->config->io.clock_khz);
- dprintk("FBDIV: %d, Rest: %d", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
- dprintk("Num: %d, Den: %d, SD: %d", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8),
+ dprintk("Final Captrim: %d\n", (u32) state->fcaptrim);
+ dprintk("HFDIV code: %d\n", (u32) pll->hfdiv_code);
+ dprintk("VCO = %d\n", (u32) pll->vco_band);
+ dprintk("VCOF in kHz: %d ((%d*%d) << 1))\n", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request);
+ dprintk("REFDIV: %d, FREF: %d\n", (u32) 1, (u32) state->config->io.clock_khz);
+ dprintk("FBDIV: %d, Rest: %d\n", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
+ dprintk("Num: %d, Den: %d, SD: %d\n", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8),
(u32) dib0090_read_reg(state, 0x1c) & 0x3);
#define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */
@@ -2498,7 +2500,7 @@ static int dib0090_tune(struct dvb_frontend *fe)
dib0090_write_reg(state, 0x10, state->wbdmux);
if ((tune->tuner_enable == EN_CAB) && state->identity.p1g) {
- dprintk("P1G : The cable band is selected and lna_tune = %d", tune->lna_tune);
+ dprintk("P1G : The cable band is selected and lna_tune = %d\n", tune->lna_tune);
dib0090_write_reg(state, 0x09, tune->lna_bias);
dib0090_write_reg(state, 0x0b, 0xb800 | (tune->lna_tune << 6) | (tune->switch_trim));
} else
@@ -2524,11 +2526,10 @@ static int dib0090_tune(struct dvb_frontend *fe)
return ret;
}
-static int dib0090_release(struct dvb_frontend *fe)
+static void dib0090_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
@@ -2643,7 +2644,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
if (dib0090_reset(fe) != 0)
goto free_mem;
- printk(KERN_INFO "DiB0090: successfully identified\n");
+ pr_info("DiB0090: successfully identified\n");
memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops));
return fe;
@@ -2670,7 +2671,7 @@ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_ada
if (dib0090_fw_reset_digital(fe, st->config) != 0)
goto free_mem;
- dprintk("DiB0090 FW: successfully identified");
+ dprintk("DiB0090 FW: successfully identified\n");
memcpy(&fe->ops.tuner_ops, &dib0090_fw_ops, sizeof(struct dvb_tuner_ops));
return fe;
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 6821ecb53d63..068bec104e29 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -42,13 +44,13 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-able)).");
-#define deb_info(args...) dprintk(0x01,args)
-#define deb_i2c(args...) dprintk(0x02,args)
-#define deb_srch(args...) dprintk(0x04,args)
-#define deb_info(args...) dprintk(0x01,args)
-#define deb_xfer(args...) dprintk(0x02,args)
-#define deb_setf(args...) dprintk(0x04,args)
-#define deb_getf(args...) dprintk(0x08,args)
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_i2c(args...) dprintk(0x02, args)
+#define deb_srch(args...) dprintk(0x04, args)
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_xfer(args...) dprintk(0x02, args)
+#define deb_setf(args...) dprintk(0x04, args)
+#define deb_getf(args...) dprintk(0x08, args)
static int dib3000_read_reg(struct dib3000_state *state, u16 reg)
{
@@ -126,103 +128,96 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
- deb_setf("bandwidth: ");
switch (c->bandwidth_hz) {
case 8000000:
- deb_setf("8 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz);
break;
case 7000000:
- deb_setf("7 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[1]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_7mhz);
break;
case 6000000:
- deb_setf("6 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[0]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_6mhz);
break;
case 0:
return -EOPNOTSUPP;
default:
- err("unknown bandwidth value.");
+ pr_err("unknown bandwidth value.\n");
return -EINVAL;
}
+ deb_setf("bandwidth: %d MHZ\n", c->bandwidth_hz / 1000000);
}
wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4);
- deb_setf("transmission mode: ");
switch (c->transmission_mode) {
case TRANSMISSION_MODE_2K:
- deb_setf("2k\n");
+ deb_setf("transmission mode: 2k\n");
wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K);
break;
case TRANSMISSION_MODE_8K:
- deb_setf("8k\n");
+ deb_setf("transmission mode: 8k\n");
wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K);
break;
case TRANSMISSION_MODE_AUTO:
- deb_setf("auto\n");
+ deb_setf("transmission mode: auto\n");
break;
default:
return -EINVAL;
}
- deb_setf("guard: ");
switch (c->guard_interval) {
case GUARD_INTERVAL_1_32:
- deb_setf("1_32\n");
+ deb_setf("guard 1_32\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32);
break;
case GUARD_INTERVAL_1_16:
- deb_setf("1_16\n");
+ deb_setf("guard 1_16\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16);
break;
case GUARD_INTERVAL_1_8:
- deb_setf("1_8\n");
+ deb_setf("guard 1_8\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8);
break;
case GUARD_INTERVAL_1_4:
- deb_setf("1_4\n");
+ deb_setf("guard 1_4\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4);
break;
case GUARD_INTERVAL_AUTO:
- deb_setf("auto\n");
+ deb_setf("guard auto\n");
break;
default:
return -EINVAL;
}
- deb_setf("inversion: ");
switch (c->inversion) {
case INVERSION_OFF:
- deb_setf("off\n");
+ deb_setf("inversion off\n");
wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF);
break;
case INVERSION_AUTO:
- deb_setf("auto ");
+ deb_setf("inversion auto\n");
break;
case INVERSION_ON:
- deb_setf("on\n");
+ deb_setf("inversion on\n");
wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON);
break;
default:
return -EINVAL;
}
- deb_setf("modulation: ");
switch (c->modulation) {
case QPSK:
- deb_setf("qpsk\n");
+ deb_setf("modulation: qpsk\n");
wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK);
break;
case QAM_16:
- deb_setf("qam16\n");
+ deb_setf("modulation: qam16\n");
wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_16QAM);
break;
case QAM_64:
- deb_setf("qam64\n");
+ deb_setf("modulation: qam64\n");
wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_64QAM);
break;
case QAM_AUTO:
@@ -230,69 +225,64 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
default:
return -EINVAL;
}
- deb_setf("hierarchy: ");
switch (c->hierarchy) {
case HIERARCHY_NONE:
- deb_setf("none ");
+ deb_setf("hierarchy: none\n");
/* fall through */
case HIERARCHY_1:
- deb_setf("alpha=1\n");
+ deb_setf("hierarchy: alpha=1\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1);
break;
case HIERARCHY_2:
- deb_setf("alpha=2\n");
+ deb_setf("hierarchy: alpha=2\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_2);
break;
case HIERARCHY_4:
- deb_setf("alpha=4\n");
+ deb_setf("hierarchy: alpha=4\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_4);
break;
case HIERARCHY_AUTO:
- deb_setf("alpha=auto\n");
+ deb_setf("hierarchy: alpha=auto\n");
break;
default:
return -EINVAL;
}
- deb_setf("hierarchy: ");
if (c->hierarchy == HIERARCHY_NONE) {
- deb_setf("none\n");
wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_OFF);
wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_HP);
fe_cr = c->code_rate_HP;
} else if (c->hierarchy != HIERARCHY_AUTO) {
- deb_setf("on\n");
wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_ON);
wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_LP);
fe_cr = c->code_rate_LP;
}
- deb_setf("fec: ");
switch (fe_cr) {
case FEC_1_2:
- deb_setf("1_2\n");
+ deb_setf("fec: 1_2\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_1_2);
break;
case FEC_2_3:
- deb_setf("2_3\n");
+ deb_setf("fec: 2_3\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_2_3);
break;
case FEC_3_4:
- deb_setf("3_4\n");
+ deb_setf("fec: 3_4\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_3_4);
break;
case FEC_5_6:
- deb_setf("5_6\n");
+ deb_setf("fec: 5_6\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_5_6);
break;
case FEC_7_8:
- deb_setf("7_8\n");
+ deb_setf("fec: 7_8\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_7_8);
break;
case FEC_NONE:
- deb_setf("none ");
+ deb_setf("fec: none\n");
break;
case FEC_AUTO:
- deb_setf("auto\n");
+ deb_setf("fec: auto\n");
break;
default:
return -EINVAL;
@@ -357,7 +347,8 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
rd(DIB3000MB_REG_LOCK2_VALUE))) < 0 && as_count++ < 100)
msleep(1);
- deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count);
+ deb_setf("search_state after autosearch %d after %d checks\n",
+ search_state, as_count);
if (search_state == 1) {
if (dib3000mb_get_frontend(fe, c) == 0) {
@@ -464,7 +455,7 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
return 0;
dds_val = ((rd(DIB3000MB_REG_DDS_VALUE_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_VALUE_LSB);
- deb_getf("DDS_VAL: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB));
+ deb_getf("DDS_VAL: %x %x %x\n", dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB));
if (dds_val < threshold)
inv_test1 = 0;
else if (dds_val == threshold)
@@ -473,7 +464,7 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
inv_test1 = 2;
dds_val = ((rd(DIB3000MB_REG_DDS_FREQ_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_FREQ_LSB);
- deb_getf("DDS_FREQ: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB));
+ deb_getf("DDS_FREQ: %x %x %x\n", dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB));
if (dds_val < threshold)
inv_test2 = 0;
else if (dds_val == threshold)
@@ -490,19 +481,19 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
switch ((tps_val = rd(DIB3000MB_REG_TPS_QAM))) {
case DIB3000_CONSTELLATION_QPSK:
- deb_getf("QPSK ");
+ deb_getf("QPSK\n");
c->modulation = QPSK;
break;
case DIB3000_CONSTELLATION_16QAM:
- deb_getf("QAM16 ");
+ deb_getf("QAM16\n");
c->modulation = QAM_16;
break;
case DIB3000_CONSTELLATION_64QAM:
- deb_getf("QAM64 ");
+ deb_getf("QAM64\n");
c->modulation = QAM_64;
break;
default:
- err("Unexpected constellation returned by TPS (%d)", tps_val);
+ pr_err("Unexpected constellation returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
@@ -513,23 +504,23 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
c->code_rate_HP = FEC_NONE;
switch ((tps_val = rd(DIB3000MB_REG_TPS_VIT_ALPHA))) {
case DIB3000_ALPHA_0:
- deb_getf("HIERARCHY_NONE ");
+ deb_getf("HIERARCHY_NONE\n");
c->hierarchy = HIERARCHY_NONE;
break;
case DIB3000_ALPHA_1:
- deb_getf("HIERARCHY_1 ");
+ deb_getf("HIERARCHY_1\n");
c->hierarchy = HIERARCHY_1;
break;
case DIB3000_ALPHA_2:
- deb_getf("HIERARCHY_2 ");
+ deb_getf("HIERARCHY_2\n");
c->hierarchy = HIERARCHY_2;
break;
case DIB3000_ALPHA_4:
- deb_getf("HIERARCHY_4 ");
+ deb_getf("HIERARCHY_4\n");
c->hierarchy = HIERARCHY_4;
break;
default:
- err("Unexpected ALPHA value returned by TPS (%d)", tps_val);
+ pr_err("Unexpected ALPHA value returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
@@ -546,65 +537,65 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
switch (tps_val) {
case DIB3000_FEC_1_2:
- deb_getf("FEC_1_2 ");
+ deb_getf("FEC_1_2\n");
*cr = FEC_1_2;
break;
case DIB3000_FEC_2_3:
- deb_getf("FEC_2_3 ");
+ deb_getf("FEC_2_3\n");
*cr = FEC_2_3;
break;
case DIB3000_FEC_3_4:
- deb_getf("FEC_3_4 ");
+ deb_getf("FEC_3_4\n");
*cr = FEC_3_4;
break;
case DIB3000_FEC_5_6:
- deb_getf("FEC_5_6 ");
+ deb_getf("FEC_5_6\n");
*cr = FEC_4_5;
break;
case DIB3000_FEC_7_8:
- deb_getf("FEC_7_8 ");
+ deb_getf("FEC_7_8\n");
*cr = FEC_7_8;
break;
default:
- err("Unexpected FEC returned by TPS (%d)", tps_val);
+ pr_err("Unexpected FEC returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n",tps_val);
switch ((tps_val = rd(DIB3000MB_REG_TPS_GUARD_TIME))) {
case DIB3000_GUARD_TIME_1_32:
- deb_getf("GUARD_INTERVAL_1_32 ");
+ deb_getf("GUARD_INTERVAL_1_32\n");
c->guard_interval = GUARD_INTERVAL_1_32;
break;
case DIB3000_GUARD_TIME_1_16:
- deb_getf("GUARD_INTERVAL_1_16 ");
+ deb_getf("GUARD_INTERVAL_1_16\n");
c->guard_interval = GUARD_INTERVAL_1_16;
break;
case DIB3000_GUARD_TIME_1_8:
- deb_getf("GUARD_INTERVAL_1_8 ");
+ deb_getf("GUARD_INTERVAL_1_8\n");
c->guard_interval = GUARD_INTERVAL_1_8;
break;
case DIB3000_GUARD_TIME_1_4:
- deb_getf("GUARD_INTERVAL_1_4 ");
+ deb_getf("GUARD_INTERVAL_1_4\n");
c->guard_interval = GUARD_INTERVAL_1_4;
break;
default:
- err("Unexpected Guard Time returned by TPS (%d)", tps_val);
+ pr_err("Unexpected Guard Time returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
switch ((tps_val = rd(DIB3000MB_REG_TPS_FFT))) {
case DIB3000_TRANSMISSION_MODE_2K:
- deb_getf("TRANSMISSION_MODE_2K ");
+ deb_getf("TRANSMISSION_MODE_2K\n");
c->transmission_mode = TRANSMISSION_MODE_2K;
break;
case DIB3000_TRANSMISSION_MODE_8K:
- deb_getf("TRANSMISSION_MODE_8K ");
+ deb_getf("TRANSMISSION_MODE_8K\n");
c->transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
- err("unexpected transmission mode return by TPS (%d)", tps_val);
+ pr_err("unexpected transmission mode return by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
@@ -751,7 +742,7 @@ static int dib3000mb_tuner_pass_ctrl(struct dvb_frontend *fe, int onoff, u8 pll_
return 0;
}
-static struct dvb_frontend_ops dib3000mb_ops;
+static const struct dvb_frontend_ops dib3000mb_ops;
struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
@@ -791,7 +782,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops dib3000mb_ops = {
+static const struct dvb_frontend_ops dib3000mb_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000M-B DVB-T",
diff --git a/drivers/media/dvb-frontends/dib3000mb_priv.h b/drivers/media/dvb-frontends/dib3000mb_priv.h
index 0459d5c84314..ef7f5d136c6b 100644
--- a/drivers/media/dvb-frontends/dib3000mb_priv.h
+++ b/drivers/media/dvb-frontends/dib3000mb_priv.h
@@ -13,20 +13,15 @@
#ifndef __DIB3000MB_PRIV_H_INCLUDED__
#define __DIB3000MB_PRIV_H_INCLUDED__
-/* info and err, taken from usb.h, if there is anything available like by default. */
-#define err(format, arg...) printk(KERN_ERR "dib3000: " format "\n" , ## arg)
-#define info(format, arg...) printk(KERN_INFO "dib3000: " format "\n" , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "dib3000: " format "\n" , ## arg)
-
/* handy shortcuts */
#define rd(reg) dib3000_read_reg(state,reg)
#define wr(reg,val) if (dib3000_write_reg(state,reg,val)) \
- { err("while sending 0x%04x to 0x%04x.",val,reg); return -EREMOTEIO; }
+ { pr_err("while sending 0x%04x to 0x%04x.", val, reg); return -EREMOTEIO; }
#define wr_foreach(a,v) { int i; \
if (sizeof(a) != sizeof(v)) \
- err("sizeof: %zu %zu is different",sizeof(a),sizeof(v));\
+ pr_err("sizeof: %zu %zu is different", sizeof(a), sizeof(v));\
for (i=0; i < sizeof(a)/sizeof(u16); i++) \
wr(a[i],v[i]); \
}
@@ -37,8 +32,11 @@
/* debug */
-#define dprintk(level,args...) \
- do { if ((debug & level)) { printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug & level) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
/* mask for enabling a specific pid for the pid_filter */
#define DIB3000_ACTIVATE_PID_FILTERING (0x2000)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index da0f1dc5aaf7..224283fe100a 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation, version 2.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -27,7 +29,11 @@ static int buggy_sfn_workaround;
module_param(buggy_sfn_workaround, int, 0644);
MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB3000MC/P:"); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct dib3000mc_state {
struct dvb_frontend demod;
@@ -873,7 +879,7 @@ int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defa
}
EXPORT_SYMBOL(dib3000mc_i2c_enumeration);
-static struct dvb_frontend_ops dib3000mc_ops;
+static const struct dvb_frontend_ops dib3000mc_ops;
struct dvb_frontend * dib3000mc_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib3000mc_config *cfg)
{
@@ -906,7 +912,7 @@ error:
}
EXPORT_SYMBOL(dib3000mc_attach);
-static struct dvb_frontend_ops dib3000mc_ops = {
+static const struct dvb_frontend_ops dib3000mc_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000MC/P",
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index b3ddae8885ac..5ce9f93a65c3 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -8,6 +8,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -21,7 +24,11 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000M: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct dib7000m_state {
struct dvb_frontend demod;
@@ -74,7 +81,7 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -92,7 +99,7 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
- dprintk("i2c read error on %d",reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
mutex_unlock(&state->i2c_buffer_lock);
@@ -105,7 +112,7 @@ static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -154,7 +161,7 @@ static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
fifo_threshold = 1792;
smo_mode = (dib7000m_read_word(state, 294 + state->reg_offs) & 0x0010) | (1 << 1);
- dprintk( "setting output mode for demod %p to %d", &state->demod, mode);
+ dprintk("setting output mode for demod %p to %d\n", &state->demod, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock
@@ -181,7 +188,7 @@ static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
outreg = 0;
break;
default:
- dprintk( "Unhandled output_mode passed to be set for demod %p",&state->demod);
+ dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->demod);
break;
}
@@ -302,7 +309,7 @@ static int dib7000m_set_adc_state(struct dib7000m_state *state, enum dibx000_adc
break;
}
-// dprintk( "913: %x, 914: %x", reg_913, reg_914);
+// dprintk("913: %x, 914: %x\n", reg_913, reg_914);
ret |= dib7000m_write_word(state, 913, reg_913);
ret |= dib7000m_write_word(state, 914, reg_914);
@@ -320,10 +327,10 @@ static int dib7000m_set_bandwidth(struct dib7000m_state *state, u32 bw)
state->current_bandwidth = bw;
if (state->timf == 0) {
- dprintk( "using default timf");
+ dprintk("using default timf\n");
timf = state->timf_default;
} else {
- dprintk( "using updated timf");
+ dprintk("using updated timf\n");
timf = state->timf;
}
@@ -340,7 +347,7 @@ static int dib7000m_set_diversity_in(struct dvb_frontend *demod, int onoff)
struct dib7000m_state *state = demod->demodulator_priv;
if (state->div_force_off) {
- dprintk( "diversity combination deactivated - forced by COFDM parameters");
+ dprintk("diversity combination deactivated - forced by COFDM parameters\n");
onoff = 0;
}
state->div_state = (u8)onoff;
@@ -580,10 +587,10 @@ static int dib7000m_demod_reset(struct dib7000m_state *state)
dib7000mc_reset_pll(state);
if (dib7000m_reset_gpio(state) != 0)
- dprintk( "GPIO reset was not successful.");
+ dprintk("GPIO reset was not successful.\n");
if (dib7000m_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
- dprintk( "OUTPUT_MODE could not be reset.");
+ dprintk("OUTPUT_MODE could not be reset.\n");
/* unforce divstr regardless whether i2c enumeration was done or not */
dib7000m_write_word(state, 1794, dib7000m_read_word(state, 1794) & ~(1 << 1) );
@@ -650,7 +657,7 @@ static int dib7000m_agc_soft_split(struct dib7000m_state *state)
(agc - state->current_agc->split.min_thres) /
(state->current_agc->split.max_thres - state->current_agc->split.min_thres);
- dprintk( "AGC split_offset: %d",split_offset);
+ dprintk("AGC split_offset: %d\n", split_offset);
// P_agc_force_split and P_agc_split_offset
return dib7000m_write_word(state, 103, (dib7000m_read_word(state, 103) & 0xff00) | split_offset);
@@ -687,7 +694,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
}
if (agc == NULL) {
- dprintk( "no valid AGC configuration found for band 0x%02x",band);
+ dprintk("no valid AGC configuration found for band 0x%02x\n", band);
return -EINVAL;
}
@@ -703,7 +710,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
dib7000m_write_word(state, 98, (agc->alpha_mant << 5) | agc->alpha_exp);
dib7000m_write_word(state, 99, (agc->beta_mant << 6) | agc->beta_exp);
- dprintk( "WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
/* AGC continued */
@@ -724,7 +731,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
if (state->revision > 0x4000) { // settings for the MC
dib7000m_write_word(state, 71, agc->agc1_pt3);
-// dprintk( "929: %x %d %d",
+// dprintk("929: %x %d %d\n",
// (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2), agc->wbd_inv, agc->wbd_sel);
dib7000m_write_word(state, 929, (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2));
} else {
@@ -742,7 +749,7 @@ static void dib7000m_update_timf(struct dib7000m_state *state)
state->timf = timf * 160 / (state->current_bandwidth / 50);
dib7000m_write_word(state, 23, (u16) (timf >> 16));
dib7000m_write_word(state, 24, (u16) (timf & 0xffff));
- dprintk( "updated timf_frequency: %d (default: %d)",state->timf, state->timf_default);
+ dprintk("updated timf_frequency: %d (default: %d)\n", state->timf, state->timf_default);
}
static int dib7000m_agc_startup(struct dvb_frontend *demod)
@@ -804,7 +811,7 @@ static int dib7000m_agc_startup(struct dvb_frontend *demod)
dib7000m_restart_agc(state);
- dprintk( "SPLIT %p: %hd", demod, agc_split);
+ dprintk("SPLIT %p: %hd\n", demod, agc_split);
(*agc_state)++;
ret = 5;
@@ -1013,12 +1020,12 @@ static int dib7000m_autosearch_irq(struct dib7000m_state *state, u16 reg)
u16 irq_pending = dib7000m_read_word(state, reg);
if (irq_pending & 0x1) { // failed
- dprintk( "autosearch failed");
+ dprintk("autosearch failed\n");
return 1;
}
if (irq_pending & 0x2) { // succeeded
- dprintk( "autosearch succeeded");
+ dprintk("autosearch succeeded\n");
return 2;
}
return 0; // still pending
@@ -1102,7 +1109,7 @@ static int dib7000m_wakeup(struct dvb_frontend *demod)
dib7000m_set_power_mode(state, DIB7000M_POWER_ALL);
if (dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
- dprintk( "could not start Slow ADC");
+ dprintk("could not start Slow ADC\n");
return 0;
}
@@ -1121,7 +1128,7 @@ static int dib7000m_identify(struct dib7000m_state *state)
u16 value;
if ((value = dib7000m_read_word(state, 896)) != 0x01b3) {
- dprintk( "wrong Vendor ID (0x%x)",value);
+ dprintk("wrong Vendor ID (0x%x)\n", value);
return -EREMOTEIO;
}
@@ -1130,21 +1137,21 @@ static int dib7000m_identify(struct dib7000m_state *state)
state->revision != 0x4001 &&
state->revision != 0x4002 &&
state->revision != 0x4003) {
- dprintk( "wrong Device ID (0x%x)",value);
+ dprintk("wrong Device ID (0x%x)\n", value);
return -EREMOTEIO;
}
/* protect this driver to be used with 7000PC */
if (state->revision == 0x4000 && dib7000m_read_word(state, 769) == 0x4000) {
- dprintk( "this driver does not work with DiB7000PC");
+ dprintk("this driver does not work with DiB7000PC\n");
return -EREMOTEIO;
}
switch (state->revision) {
- case 0x4000: dprintk( "found DiB7000MA/PA/MB/PB"); break;
- case 0x4001: state->reg_offs = 1; dprintk( "found DiB7000HC"); break;
- case 0x4002: state->reg_offs = 1; dprintk( "found DiB7000MC"); break;
- case 0x4003: state->reg_offs = 1; dprintk( "found DiB9000"); break;
+ case 0x4000: dprintk("found DiB7000MA/PA/MB/PB\n"); break;
+ case 0x4001: state->reg_offs = 1; dprintk("found DiB7000HC\n"); break;
+ case 0x4002: state->reg_offs = 1; dprintk("found DiB7000MC\n"); break;
+ case 0x4003: state->reg_offs = 1; dprintk("found DiB9000\n"); break;
}
return 0;
@@ -1242,7 +1249,7 @@ static int dib7000m_set_frontend(struct dvb_frontend *fe)
found = dib7000m_autosearch_is_irq(fe);
} while (found == 0 && i--);
- dprintk("autosearch returns: %d",found);
+ dprintk("autosearch returns: %d\n", found);
if (found == 0 || found == 1)
return 0; // no channel found
@@ -1330,7 +1337,7 @@ int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
struct dib7000m_state *state = fe->demodulator_priv;
u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
+ dprintk("PID filter enabled %d\n", onoff);
return dib7000m_write_word(state, 294 + state->reg_offs, val);
}
EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
@@ -1338,7 +1345,7 @@ EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib7000m_state *state = fe->demodulator_priv;
- dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("PID filter: index %x, PID %d, OnOff %d\n", id, pid, onoff);
return dib7000m_write_word(state, 300 + state->reg_offs + id,
onoff ? (1 << 13) | pid : 0);
}
@@ -1362,7 +1369,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
if (dib7000m_identify(&st) != 0) {
st.i2c_addr = default_addr;
if (dib7000m_identify(&st) != 0) {
- dprintk("DiB7000M #%d: not identified", k);
+ dprintk("DiB7000M #%d: not identified\n", k);
return -EIO;
}
}
@@ -1375,7 +1382,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
/* set new i2c address and force divstart */
dib7000m_write_word(&st, 1794, (new_addr << 2) | 0x2);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -1394,7 +1401,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
EXPORT_SYMBOL(dib7000m_i2c_enumeration);
#endif
-static struct dvb_frontend_ops dib7000m_ops;
+static const struct dvb_frontend_ops dib7000m_ops;
struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000m_config *cfg)
{
struct dvb_frontend *demod;
@@ -1432,7 +1439,7 @@ error:
}
EXPORT_SYMBOL(dib7000m_attach);
-static struct dvb_frontend_ops dib7000m_ops = {
+static const struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000MA/MB/PA/PB/MC",
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index b861d4437f2a..a27c0001f2d6 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -7,6 +7,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -26,7 +29,11 @@ static int buggy_sfn_workaround;
module_param(buggy_sfn_workaround, int, 0644);
MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000P: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct i2c_device {
struct i2c_adapter *i2c_adap;
@@ -98,7 +105,7 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -116,7 +123,7 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
mutex_unlock(&state->i2c_buffer_lock);
@@ -128,7 +135,7 @@ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -174,7 +181,7 @@ static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
fifo_threshold = 1792;
smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1);
- dprintk("setting output mode for demod %p to %d", &state->demod, mode);
+ dprintk("setting output mode for demod %p to %d\n", &state->demod, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
@@ -204,7 +211,7 @@ static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
outreg = 0;
break;
default:
- dprintk("Unhandled output_mode passed to be set for demod %p", &state->demod);
+ dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->demod);
break;
}
@@ -224,7 +231,7 @@ static int dib7000p_set_diversity_in(struct dvb_frontend *demod, int onoff)
struct dib7000p_state *state = demod->demodulator_priv;
if (state->div_force_off) {
- dprintk("diversity combination deactivated - forced by COFDM parameters");
+ dprintk("diversity combination deactivated - forced by COFDM parameters\n");
onoff = 0;
dib7000p_write_word(state, 207, 0);
} else
@@ -374,10 +381,10 @@ static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw)
state->current_bandwidth = bw;
if (state->timf == 0) {
- dprintk("using default timf");
+ dprintk("using default timf\n");
timf = state->cfg.bw->timf;
} else {
- dprintk("using updated timf");
+ dprintk("using updated timf\n");
timf = state->timf;
}
@@ -494,7 +501,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
loopdiv = (reg_1856 >> 6) & 0x3f;
if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
- dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
+ dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
reg_1856 &= 0xf000;
reg_1857 = dib7000p_read_word(state, 1857);
dib7000p_write_word(state, 1857, reg_1857 & ~(1 << 15));
@@ -511,7 +518,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
dib7000p_write_word(state, 1857, reg_1857 | (1 << 15));
while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1)
- dprintk("Waiting for PLL to lock");
+ dprintk("Waiting for PLL to lock\n");
return 0;
}
@@ -521,7 +528,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
static int dib7000p_reset_gpio(struct dib7000p_state *st)
{
/* reset the GPIOs */
- dprintk("gpio dir: %x: val: %x, pwm_pos: %x", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
+ dprintk("gpio dir: %x: val: %x, pwm_pos: %x\n", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
dib7000p_write_word(st, 1029, st->gpio_dir);
dib7000p_write_word(st, 1030, st->gpio_val);
@@ -669,7 +676,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
dib7000p_reset_pll(state);
if (dib7000p_reset_gpio(state) != 0)
- dprintk("GPIO reset was not successful.");
+ dprintk("GPIO reset was not successful.\n");
if (state->version == SOC7090) {
dib7000p_write_word(state, 899, 0);
@@ -681,7 +688,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
dib7000p_write_word(state, 273, (0<<6) | 30);
}
if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
- dprintk("OUTPUT_MODE could not be reset.");
+ dprintk("OUTPUT_MODE could not be reset.\n");
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
dib7000p_sad_calib(state);
@@ -759,7 +766,7 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
}
if (agc == NULL) {
- dprintk("no valid AGC configuration found for band 0x%02x", band);
+ dprintk("no valid AGC configuration found for band 0x%02x\n", band);
return -EINVAL;
}
@@ -776,7 +783,7 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp);
/* AGC continued */
- dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
if (state->wbd_ref != 0)
@@ -806,7 +813,7 @@ static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
u32 dds = state->cfg.bw->ifreq & 0x1ffffff;
u8 invert = !!(state->cfg.bw->ifreq & (1 << 25));
- dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d", offset_khz, internal, invert);
+ dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d\n", offset_khz, internal, invert);
if (offset_khz < 0)
unit_khz_dds_val *= -1;
@@ -902,7 +909,7 @@ static int dib7000p_agc_startup(struct dvb_frontend *demod)
dib7000p_restart_agc(state);
- dprintk("SPLIT %p: %hd", demod, agc_split);
+ dprintk("SPLIT %p: %hd\n", demod, agc_split);
(*agc_state)++;
ret = 5;
@@ -934,7 +941,7 @@ static void dib7000p_update_timf(struct dib7000p_state *state)
state->timf = timf * 160 / (state->current_bandwidth / 50);
dib7000p_write_word(state, 23, (u16) (timf >> 16));
dib7000p_write_word(state, 24, (u16) (timf & 0xffff));
- dprintk("updated timf_frequency: %d (default: %d)", state->timf, state->cfg.bw->timf);
+ dprintk("updated timf_frequency: %d (default: %d)\n", state->timf, state->cfg.bw->timf);
}
@@ -1202,7 +1209,7 @@ static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32
int bw_khz = bw;
u32 pha;
- dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)", f_rel, rf_khz, xtal);
+ dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)\n", f_rel, rf_khz, xtal);
if (f_rel < -bw_khz / 2 || f_rel > bw_khz / 2)
return;
@@ -1252,7 +1259,7 @@ static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32
coef_im[k] = (1 << 24) - 1;
coef_im[k] /= (1 << 15);
- dprintk("PALF COEF: %d re: %d im: %d", k, coef_re[k], coef_im[k]);
+ dprintk("PALF COEF: %d re: %d im: %d\n", k, coef_re[k], coef_im[k]);
dib7000p_write_word(state, 143, (0 << 14) | (k << 10) | (coef_re[k] & 0x3ff));
dib7000p_write_word(state, 144, coef_im[k] & 0x3ff);
@@ -1280,7 +1287,7 @@ static int dib7000p_tune(struct dvb_frontend *demod)
/* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
tmp = (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3);
if (state->sfn_workaround_active) {
- dprintk("SFN workaround is active");
+ dprintk("SFN workaround is active\n");
tmp |= (1 << 9);
dib7000p_write_word(state, 166, 0x4000);
} else {
@@ -1390,15 +1397,15 @@ static int dib7000p_sleep(struct dvb_frontend *demod)
static int dib7000p_identify(struct dib7000p_state *st)
{
u16 value;
- dprintk("checking demod on I2C address: %d (%x)", st->i2c_addr, st->i2c_addr);
+ dprintk("checking demod on I2C address: %d (%x)\n", st->i2c_addr, st->i2c_addr);
if ((value = dib7000p_read_word(st, 768)) != 0x01b3) {
- dprintk("wrong Vendor ID (read=0x%x)", value);
+ dprintk("wrong Vendor ID (read=0x%x)\n", value);
return -EREMOTEIO;
}
if ((value = dib7000p_read_word(st, 769)) != 0x4000) {
- dprintk("wrong Device ID (%x)", value);
+ dprintk("wrong Device ID (%x)\n", value);
return -EREMOTEIO;
}
@@ -1536,7 +1543,7 @@ static int dib7000p_set_frontend(struct dvb_frontend *fe)
found = dib7000p_autosearch_is_irq(fe);
} while (found == 0 && i--);
- dprintk("autosearch returns: %d", found);
+ dprintk("autosearch returns: %d\n", found);
if (found == 0 || found == 1)
return 0;
@@ -1951,7 +1958,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, enum fe_status stat)
time_us = dib7000p_get_time_us(demod);
state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
- dprintk("Next all layers stats available in %u us.", time_us);
+ dprintk("Next all layers stats available in %u us.\n", time_us);
dib7000p_read_ber(demod, &val);
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
@@ -2019,7 +2026,7 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
- dprintk("-D- DiB7000PC detected");
+ dprintk("-D- DiB7000PC detected\n");
return 1;
}
@@ -2027,11 +2034,11 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
- dprintk("-D- DiB7000PC detected");
+ dprintk("-D- DiB7000PC detected\n");
return 1;
}
- dprintk("-D- DiB7000PC not detected");
+ dprintk("-D- DiB7000PC not detected\n");
kfree(rx);
rx_memory_error:
@@ -2050,14 +2057,14 @@ static int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
struct dib7000p_state *state = fe->demodulator_priv;
u16 val = dib7000p_read_word(state, 235) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
+ dprintk("PID filter enabled %d\n", onoff);
return dib7000p_write_word(state, 235, val);
}
static int dib7000p_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib7000p_state *state = fe->demodulator_priv;
- dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("PID filter: index %x, PID %d, OnOff %d\n", id, pid, onoff);
return dib7000p_write_word(state, 241 + id, onoff ? (1 << 13) | pid : 0);
}
@@ -2100,7 +2107,7 @@ static int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u
/* set new i2c address and force divstart */
dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -2136,21 +2143,21 @@ static s32 dib7000p_get_adc_power(struct dvb_frontend *fe)
buf[0] = dib7000p_read_word(state, 0x184);
buf[1] = dib7000p_read_word(state, 0x185);
pow_i = (buf[0] << 16) | buf[1];
- dprintk("raw pow_i = %d", pow_i);
+ dprintk("raw pow_i = %d\n", pow_i);
tmp_val = pow_i;
while (tmp_val >>= 1)
exp++;
mant = (pow_i * 1000 / (1 << exp));
- dprintk(" mant = %d exp = %d", mant / 1000, exp);
+ dprintk(" mant = %d exp = %d\n", mant / 1000, exp);
ix = (u8) ((mant - 1000) / 100); /* index of the LUT */
- dprintk(" ix = %d", ix);
+ dprintk(" ix = %d\n", ix);
pow_i = (lut_1000ln_mant[ix] + 693 * (exp - 20) - 6908);
pow_i = (pow_i << 8) / 1000;
- dprintk(" pow_i = %d", pow_i);
+ dprintk(" pow_i = %d\n", pow_i);
return pow_i;
}
@@ -2185,7 +2192,7 @@ static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_ms
n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("Tuner ITF: write busy (overflow)");
+ dprintk("Tuner ITF: write busy (overflow)\n");
}
dib7000p_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
dib7000p_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
@@ -2205,7 +2212,7 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (overflow)");
+ dprintk("TunerITF: read busy (overflow)\n");
}
dib7000p_write_word(state, 1985, (0 << 6) | (serpar_num & 0x3f));
@@ -2214,7 +2221,7 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
n_empty = dib7000p_read_word(state, 1984) & 0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (empty)");
+ dprintk("TunerITF: read busy (empty)\n");
}
read_word = dib7000p_read_word(state, 1987);
msg[1].buf[0] = (read_word >> 8) & 0xff;
@@ -2435,7 +2442,7 @@ static u32 dib7090_calcSyncFreq(u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32
static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 synchroMode, u32 syncWord, u32 syncSize)
{
- dprintk("Configure DibStream Tx");
+ dprintk("Configure DibStream Tx\n");
dib7000p_write_word(state, 1615, 1);
dib7000p_write_word(state, 1603, P_Kin);
@@ -2455,7 +2462,7 @@ static int dib7090_cfg_DibRx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout
{
u32 syncFreq;
- dprintk("Configure DibStream Rx");
+ dprintk("Configure DibStream Rx\n");
if ((P_Kin != 0) && (P_Kout != 0)) {
syncFreq = dib7090_calcSyncFreq(P_Kin, P_Kout, insertExtSynchro, syncSize);
dib7000p_write_word(state, 1542, syncFreq);
@@ -2492,7 +2499,7 @@ static void dib7090_enMpegMux(struct dib7000p_state *state, int onoff)
static void dib7090_configMpegMux(struct dib7000p_state *state,
u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
{
- dprintk("Enable Mpeg mux");
+ dprintk("Enable Mpeg mux\n");
dib7090_enMpegMux(state, 0);
@@ -2513,17 +2520,17 @@ static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode)
switch (mode) {
case MPEG_ON_DIBTX:
- dprintk("SET MPEG ON DIBSTREAM TX");
+ dprintk("SET MPEG ON DIBSTREAM TX\n");
dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
reg_1288 |= (1<<9);
break;
case DIV_ON_DIBTX:
- dprintk("SET DIV_OUT ON DIBSTREAM TX");
+ dprintk("SET DIV_OUT ON DIBSTREAM TX\n");
dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
reg_1288 |= (1<<8);
break;
case ADC_ON_DIBTX:
- dprintk("SET ADC_OUT ON DIBSTREAM TX");
+ dprintk("SET ADC_OUT ON DIBSTREAM TX\n");
dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
reg_1288 |= (1<<7);
break;
@@ -2539,17 +2546,17 @@ static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode)
switch (mode) {
case DEMOUT_ON_HOSTBUS:
- dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+ dprintk("SET DEM OUT OLD INTERF ON HOST BUS\n");
dib7090_enMpegMux(state, 0);
reg_1288 |= (1<<6);
break;
case DIBTX_ON_HOSTBUS:
- dprintk("SET DIBSTREAM TX ON HOST BUS");
+ dprintk("SET DIBSTREAM TX ON HOST BUS\n");
dib7090_enMpegMux(state, 0);
reg_1288 |= (1<<5);
break;
case MPEG_ON_HOSTBUS:
- dprintk("SET MPEG MUX ON HOST BUS");
+ dprintk("SET MPEG MUX ON HOST BUS\n");
reg_1288 |= (1<<4);
break;
default:
@@ -2565,7 +2572,7 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
switch (onoff) {
case 0: /* only use the internal way - not the diversity input */
- dprintk("%s mode OFF : by default Enable Mpeg INPUT", __func__);
+ dprintk("%s mode OFF : by default Enable Mpeg INPUT\n", __func__);
dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
/* Do not divide the serial clock of MPEG MUX */
@@ -2581,7 +2588,7 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
break;
case 1: /* both ways */
case 2: /* only the diversity input */
- dprintk("%s ON : Enable diversity INPUT", __func__);
+ dprintk("%s ON : Enable diversity INPUT\n", __func__);
dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
state->input_mode_mpeg = 0;
break;
@@ -2612,11 +2619,11 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_SERIAL:
if (prefer_mpeg_mux_use) {
- dprintk("setting output mode TS_SERIAL using Mpeg Mux");
+ dprintk("setting output mode TS_SERIAL using Mpeg Mux\n");
dib7090_configMpegMux(state, 3, 1, 1);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("setting output mode TS_SERIAL using Smooth bloc");
+ dprintk("setting output mode TS_SERIAL using Smooth bloc\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (2<<6) | (0 << 1);
}
@@ -2624,24 +2631,24 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_PAR_GATED_CLK:
if (prefer_mpeg_mux_use) {
- dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+ dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux\n");
dib7090_configMpegMux(state, 2, 0, 0);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else { /* Use Smooth block */
- dprintk("setting output mode TS_PARALLEL_GATED using Smooth block");
+ dprintk("setting output mode TS_PARALLEL_GATED using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (0<<6);
}
break;
case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
- dprintk("setting output mode TS_PARALLEL_CONT using Smooth block");
+ dprintk("setting output mode TS_PARALLEL_CONT using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (1<<6);
break;
case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */
- dprintk("setting output mode TS_FIFO using Smooth block");
+ dprintk("setting output mode TS_FIFO using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5<<6);
smo_mode |= (3 << 1);
@@ -2649,13 +2656,13 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
break;
case OUTMODE_DIVERSITY:
- dprintk("setting output mode MODE_DIVERSITY");
+ dprintk("setting output mode MODE_DIVERSITY\n");
dib7090_setDibTxMux(state, DIV_ON_DIBTX);
dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
case OUTMODE_ANALOG_ADC:
- dprintk("setting output mode MODE_ANALOG_ADC");
+ dprintk("setting output mode MODE_ANALOG_ADC\n");
dib7090_setDibTxMux(state, ADC_ON_DIBTX);
dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
@@ -2678,7 +2685,7 @@ static int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
struct dib7000p_state *state = fe->demodulator_priv;
u16 en_cur_state;
- dprintk("sleep dib7090: %d", onoff);
+ dprintk("sleep dib7090: %d\n", onoff);
en_cur_state = dib7000p_read_word(state, 1922);
@@ -2714,7 +2721,7 @@ static int dib7090_slave_reset(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops dib7000p_ops;
+static const struct dvb_frontend_ops dib7000p_ops;
static struct dvb_frontend *dib7000p_init(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
{
struct dvb_frontend *demod;
@@ -2804,7 +2811,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
}
EXPORT_SYMBOL(dib7000p_attach);
-static struct dvb_frontend_ops dib7000p_ops = {
+static const struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000PC",
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index ddf9c44877a2..e501ec964df1 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -7,6 +7,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -31,7 +34,11 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB8000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct i2c_device {
struct i2c_adapter *adap;
@@ -147,7 +154,7 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
};
if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -157,7 +164,7 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
msg[1].buf = i2c->i2c_read_buffer;
if (i2c_transfer(i2c->adap, msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (msg[1].buf[0] << 8) | msg[1].buf[1];
mutex_unlock(i2c->i2c_buffer_lock);
@@ -182,7 +189,7 @@ static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
@@ -194,7 +201,7 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -210,7 +217,7 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
u16 rw[2];
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -228,7 +235,7 @@ static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
int ret = 0;
if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -249,7 +256,7 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -395,7 +402,7 @@ static void dib8000_set_acquisition_mode(struct dib8000_state *state)
{
u16 nud = dib8000_read_word(state, 298);
nud |= (1 << 3) | (1 << 0);
- dprintk("acquisition mode activated");
+ dprintk("acquisition mode activated\n");
dib8000_write_word(state, 298, nud);
}
static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
@@ -408,7 +415,7 @@ static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
fifo_threshold = 1792;
smo_mode = (dib8000_read_word(state, 299) & 0x0050) | (1 << 1);
- dprintk("-I- Setting output mode for demod %p to %d",
+ dprintk("-I- Setting output mode for demod %p to %d\n",
&state->fe[0], mode);
switch (mode) {
@@ -443,7 +450,7 @@ static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
break;
default:
- dprintk("Unhandled output_mode passed to be set for demod %p",
+ dprintk("Unhandled output_mode passed to be set for demod %p\n",
&state->fe[0]);
return -EINVAL;
}
@@ -464,7 +471,7 @@ static int dib8000_set_diversity_in(struct dvb_frontend *fe, int onoff)
struct dib8000_state *state = fe->demodulator_priv;
u16 tmp, sync_wait = dib8000_read_word(state, 273) & 0xfff0;
- dprintk("set diversity input to %i", onoff);
+ dprintk("set diversity input to %i\n", onoff);
if (!state->differential_constellation) {
dib8000_write_word(state, 272, 1 << 9); //dvsy_off_lmod4 = 1
dib8000_write_word(state, 273, sync_wait | (1 << 2) | 2); // sync_enable = 1; comb_mode = 2
@@ -531,7 +538,7 @@ static void dib8000_set_power_mode(struct dib8000_state *state, enum dib8000_pow
break;
}
- dprintk("powermode : 774 : %x ; 775 : %x; 776 : %x ; 900 : %x; 1280 : %x", reg_774, reg_775, reg_776, reg_900, reg_1280);
+ dprintk("powermode : 774 : %x ; 775 : %x; 776 : %x ; 900 : %x; 1280 : %x\n", reg_774, reg_775, reg_776, reg_900, reg_1280);
dib8000_write_word(state, 774, reg_774);
dib8000_write_word(state, 775, reg_775);
dib8000_write_word(state, 776, reg_776);
@@ -619,10 +626,10 @@ static int dib8000_set_bandwidth(struct dvb_frontend *fe, u32 bw)
bw = 6000;
if (state->timf == 0) {
- dprintk("using default timf");
+ dprintk("using default timf\n");
timf = state->timf_default;
} else {
- dprintk("using updated timf");
+ dprintk("using updated timf\n");
timf = state->timf;
}
@@ -667,7 +674,7 @@ static int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value)
static void dib8000_reset_pll_common(struct dib8000_state *state, const struct dibx000_bandwidth_config *bw)
{
- dprintk("ifreq: %d %x, inversion: %d", bw->ifreq, bw->ifreq, bw->ifreq >> 25);
+ dprintk("ifreq: %d %x, inversion: %d\n", bw->ifreq, bw->ifreq, bw->ifreq >> 25);
if (state->revision != 0x8090) {
dib8000_write_word(state, 23,
(u16) (((bw->internal * 1000) >> 16) & 0xffff));
@@ -704,7 +711,7 @@ static void dib8000_reset_pll(struct dib8000_state *state)
clk_cfg1 = (clk_cfg1 & 0xfff7) | (pll->pll_bypass << 3);
dib8000_write_word(state, 902, clk_cfg1);
- dprintk("clk_cfg1: 0x%04x", clk_cfg1);
+ dprintk("clk_cfg1: 0x%04x\n", clk_cfg1);
/* smpl_cfg: P_refclksel=2, P_ensmplsel=1 nodivsmpl=1 */
if (state->cfg.pll->ADClkSrc == 0)
@@ -754,7 +761,7 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
pll->pll_ratio == loopdiv))
return -EINVAL;
- dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, pll->pll_prediv, loopdiv, pll->pll_ratio);
+ dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, pll->pll_prediv, loopdiv, pll->pll_ratio);
if (state->revision == 0x8090) {
reg_1856 &= 0xf000;
reg_1857 = dib8000_read_word(state, 1857);
@@ -767,11 +774,11 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
/* write new system clk into P_sec_len */
internal = dib8000_read32(state, 23) / 1000;
- dprintk("Old Internal = %d", internal);
+ dprintk("Old Internal = %d\n", internal);
xtal = 2 * (internal / loopdiv) * prediv;
internal = 1000 * (xtal/pll->pll_prediv) * pll->pll_ratio;
- dprintk("Xtal = %d , New Fmem = %d New Fdemod = %d, New Fsampling = %d", xtal, internal/1000, internal/2000, internal/8000);
- dprintk("New Internal = %d", internal);
+ dprintk("Xtal = %d , New Fmem = %d New Fdemod = %d, New Fsampling = %d\n", xtal, internal/1000, internal/2000, internal/8000);
+ dprintk("New Internal = %d\n", internal);
dib8000_write_word(state, 23,
(u16) (((internal / 2) >> 16) & 0xffff));
@@ -780,22 +787,22 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
dib8000_write_word(state, 1857, reg_1857 | (1 << 15));
while (((dib8000_read_word(state, 1856)>>15)&0x1) != 1)
- dprintk("Waiting for PLL to lock");
+ dprintk("Waiting for PLL to lock\n");
/* verify */
reg_1856 = dib8000_read_word(state, 1856);
- dprintk("PLL Updated with prediv = %d and loopdiv = %d",
+ dprintk("PLL Updated with prediv = %d and loopdiv = %d\n",
reg_1856&0x3f, (reg_1856>>6)&0x3f);
} else {
if (bw != state->current_demod_bw) {
/** Bandwidth change => force PLL update **/
- dprintk("PLL: Bandwidth Change %d MHz -> %d MHz (prediv: %d->%d)", state->current_demod_bw / 1000, bw / 1000, oldprediv, state->cfg.pll->pll_prediv);
+ dprintk("PLL: Bandwidth Change %d MHz -> %d MHz (prediv: %d->%d)\n", state->current_demod_bw / 1000, bw / 1000, oldprediv, state->cfg.pll->pll_prediv);
if (state->cfg.pll->pll_prediv != oldprediv) {
/** Full PLL change only if prediv is changed **/
/** full update => bypass and reconfigure **/
- dprintk("PLL: New Setting for %d MHz Bandwidth (prediv: %d, ratio: %d)", bw/1000, state->cfg.pll->pll_prediv, state->cfg.pll->pll_ratio);
+ dprintk("PLL: New Setting for %d MHz Bandwidth (prediv: %d, ratio: %d)\n", bw/1000, state->cfg.pll->pll_prediv, state->cfg.pll->pll_ratio);
dib8000_write_word(state, 902, dib8000_read_word(state, 902) | (1<<3)); /* bypass PLL */
dib8000_reset_pll(state);
dib8000_write_word(state, 898, 0x0004); /* sad */
@@ -807,7 +814,7 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
if (ratio != 0) {
/** ratio update => only change ratio **/
- dprintk("PLL: Update ratio (prediv: %d, ratio: %d)", state->cfg.pll->pll_prediv, ratio);
+ dprintk("PLL: Update ratio (prediv: %d, ratio: %d)\n", state->cfg.pll->pll_prediv, ratio);
dib8000_write_word(state, 901, (state->cfg.pll->pll_prediv << 8) | (ratio << 0)); /* only the PLL ratio is updated. */
}
}
@@ -841,7 +848,7 @@ static int dib8000_cfg_gpio(struct dib8000_state *st, u8 num, u8 dir, u8 val)
st->cfg.gpio_val |= (val & 0x01) << num; /* set the new value */
dib8000_write_word(st, 1030, st->cfg.gpio_val);
- dprintk("gpio dir: %x: gpio val: %x", st->cfg.gpio_dir, st->cfg.gpio_val);
+ dprintk("gpio dir: %x: gpio val: %x\n", st->cfg.gpio_dir, st->cfg.gpio_val);
return 0;
}
@@ -958,29 +965,29 @@ static u16 dib8000_identify(struct i2c_device *client)
value = dib8000_i2c_read16(client, 896);
if ((value = dib8000_i2c_read16(client, 896)) != 0x01b3) {
- dprintk("wrong Vendor ID (read=0x%x)", value);
+ dprintk("wrong Vendor ID (read=0x%x)\n", value);
return 0;
}
value = dib8000_i2c_read16(client, 897);
if (value != 0x8000 && value != 0x8001 &&
value != 0x8002 && value != 0x8090) {
- dprintk("wrong Device ID (%x)", value);
+ dprintk("wrong Device ID (%x)\n", value);
return 0;
}
switch (value) {
case 0x8000:
- dprintk("found DiB8000A");
+ dprintk("found DiB8000A\n");
break;
case 0x8001:
- dprintk("found DiB8000B");
+ dprintk("found DiB8000B\n");
break;
case 0x8002:
- dprintk("found DiB8000C");
+ dprintk("found DiB8000C\n");
break;
case 0x8090:
- dprintk("found DiB8096P");
+ dprintk("found DiB8096P\n");
break;
}
return value;
@@ -1037,7 +1044,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_write_word(state, 1287, 0x0003);
if (state->revision == 0x8000)
- dprintk("error : dib8000 MA not supported");
+ dprintk("error : dib8000 MA not supported\n");
dibx000_reset_i2c_master(&state->i2c_master);
@@ -1069,7 +1076,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
if (state->cfg.drives)
dib8000_write_word(state, 906, state->cfg.drives);
else {
- dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.");
+ dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.\n");
/* min drive SDRAM - not optimal - adjust */
dib8000_write_word(state, 906, 0x2d98);
}
@@ -1080,11 +1087,11 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_write_word(state, 898, 0x0004);
if (dib8000_reset_gpio(state) != 0)
- dprintk("GPIO reset was not successful.");
+ dprintk("GPIO reset was not successful.\n");
if ((state->revision != 0x8090) &&
(dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0))
- dprintk("OUTPUT_MODE could not be resetted.");
+ dprintk("OUTPUT_MODE could not be resetted.\n");
state->current_agc = NULL;
@@ -1176,7 +1183,7 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
}
if (agc == NULL) {
- dprintk("no valid AGC configuration found for band 0x%02x", band);
+ dprintk("no valid AGC configuration found for band 0x%02x\n", band);
return -EINVAL;
}
@@ -1192,7 +1199,7 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
dib8000_write_word(state, 102, (agc->alpha_mant << 5) | agc->alpha_exp);
dib8000_write_word(state, 103, (agc->beta_mant << 6) | agc->beta_exp);
- dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
/* AGC continued */
@@ -1251,7 +1258,7 @@ static int dib8000_agc_soft_split(struct dib8000_state *state)
(agc - state->current_agc->split.min_thres) /
(state->current_agc->split.max_thres - state->current_agc->split.min_thres);
- dprintk("AGC split_offset: %d", split_offset);
+ dprintk("AGC split_offset: %d\n", split_offset);
// P_agc_force_split and P_agc_split_offset
dib8000_write_word(state, 107, (dib8000_read_word(state, 107) & 0xff00) | split_offset);
@@ -1395,7 +1402,7 @@ static void dib8096p_cfg_DibTx(struct dib8000_state *state, u32 P_Kin,
u32 P_Kout, u32 insertExtSynchro, u32 synchroMode,
u32 syncWord, u32 syncSize)
{
- dprintk("Configure DibStream Tx");
+ dprintk("Configure DibStream Tx\n");
dib8000_write_word(state, 1615, 1);
dib8000_write_word(state, 1603, P_Kin);
@@ -1414,7 +1421,7 @@ static void dib8096p_cfg_DibRx(struct dib8000_state *state, u32 P_Kin,
{
u32 syncFreq;
- dprintk("Configure DibStream Rx synchroMode = %d", synchroMode);
+ dprintk("Configure DibStream Rx synchroMode = %d\n", synchroMode);
if ((P_Kin != 0) && (P_Kout != 0)) {
syncFreq = dib8096p_calcSyncFreq(P_Kin, P_Kout,
@@ -1456,7 +1463,7 @@ static void dib8096p_configMpegMux(struct dib8000_state *state,
{
u16 reg_1287;
- dprintk("Enable Mpeg mux");
+ dprintk("Enable Mpeg mux\n");
dib8096p_enMpegMux(state, 0);
@@ -1477,15 +1484,15 @@ static void dib8096p_setDibTxMux(struct dib8000_state *state, int mode)
switch (mode) {
case MPEG_ON_DIBTX:
- dprintk("SET MPEG ON DIBSTREAM TX");
+ dprintk("SET MPEG ON DIBSTREAM TX\n");
dib8096p_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
reg_1288 |= (1 << 9); break;
case DIV_ON_DIBTX:
- dprintk("SET DIV_OUT ON DIBSTREAM TX");
+ dprintk("SET DIV_OUT ON DIBSTREAM TX\n");
dib8096p_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
reg_1288 |= (1 << 8); break;
case ADC_ON_DIBTX:
- dprintk("SET ADC_OUT ON DIBSTREAM TX");
+ dprintk("SET ADC_OUT ON DIBSTREAM TX\n");
dib8096p_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
reg_1288 |= (1 << 7); break;
default:
@@ -1500,17 +1507,17 @@ static void dib8096p_setHostBusMux(struct dib8000_state *state, int mode)
switch (mode) {
case DEMOUT_ON_HOSTBUS:
- dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+ dprintk("SET DEM OUT OLD INTERF ON HOST BUS\n");
dib8096p_enMpegMux(state, 0);
reg_1288 |= (1 << 6);
break;
case DIBTX_ON_HOSTBUS:
- dprintk("SET DIBSTREAM TX ON HOST BUS");
+ dprintk("SET DIBSTREAM TX ON HOST BUS\n");
dib8096p_enMpegMux(state, 0);
reg_1288 |= (1 << 5);
break;
case MPEG_ON_HOSTBUS:
- dprintk("SET MPEG MUX ON HOST BUS");
+ dprintk("SET MPEG MUX ON HOST BUS\n");
reg_1288 |= (1 << 4);
break;
default:
@@ -1526,7 +1533,7 @@ static int dib8096p_set_diversity_in(struct dvb_frontend *fe, int onoff)
switch (onoff) {
case 0: /* only use the internal way - not the diversity input */
- dprintk("%s mode OFF : by default Enable Mpeg INPUT",
+ dprintk("%s mode OFF : by default Enable Mpeg INPUT\n",
__func__);
/* outputRate = 8 */
dib8096p_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
@@ -1544,7 +1551,7 @@ static int dib8096p_set_diversity_in(struct dvb_frontend *fe, int onoff)
break;
case 1: /* both ways */
case 2: /* only the diversity input */
- dprintk("%s ON : Enable diversity INPUT", __func__);
+ dprintk("%s ON : Enable diversity INPUT\n", __func__);
dib8096p_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
state->input_mode_mpeg = 0;
break;
@@ -1576,11 +1583,11 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_SERIAL:
if (prefer_mpeg_mux_use) {
- dprintk("dib8096P setting output mode TS_SERIAL using Mpeg Mux");
+ dprintk("dib8096P setting output mode TS_SERIAL using Mpeg Mux\n");
dib8096p_configMpegMux(state, 3, 1, 1);
dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc");
+ dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc\n");
dib8096p_setHostBusMux(state,
DEMOUT_ON_HOSTBUS);
outreg |= (2 << 6) | (0 << 1);
@@ -1589,11 +1596,11 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_PAR_GATED_CLK:
if (prefer_mpeg_mux_use) {
- dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+ dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Mpeg Mux\n");
dib8096p_configMpegMux(state, 2, 0, 0);
dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else { /* Use Smooth block */
- dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Smooth block");
+ dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Smooth block\n");
dib8096p_setHostBusMux(state,
DEMOUT_ON_HOSTBUS);
outreg |= (0 << 6);
@@ -1601,7 +1608,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
break;
case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
- dprintk("dib8096P setting output mode TS_PARALLEL_CONT using Smooth block");
+ dprintk("dib8096P setting output mode TS_PARALLEL_CONT using Smooth block\n");
dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (1 << 6);
break;
@@ -1609,7 +1616,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_FIFO:
/* Using Smooth block because not supported
by new Mpeg Mux bloc */
- dprintk("dib8096P setting output mode TS_FIFO using Smooth block");
+ dprintk("dib8096P setting output mode TS_FIFO using Smooth block\n");
dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5 << 6);
smo_mode |= (3 << 1);
@@ -1617,13 +1624,13 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
break;
case OUTMODE_DIVERSITY:
- dprintk("dib8096P setting output mode MODE_DIVERSITY");
+ dprintk("dib8096P setting output mode MODE_DIVERSITY\n");
dib8096p_setDibTxMux(state, DIV_ON_DIBTX);
dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
case OUTMODE_ANALOG_ADC:
- dprintk("dib8096P setting output mode MODE_ANALOG_ADC");
+ dprintk("dib8096P setting output mode MODE_ANALOG_ADC\n");
dib8096p_setDibTxMux(state, ADC_ON_DIBTX);
dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
@@ -1632,7 +1639,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
if (mode != OUTMODE_HIGH_Z)
outreg |= (1<<10);
- dprintk("output_mpeg2_in_188_bytes = %d",
+ dprintk("output_mpeg2_in_188_bytes = %d\n",
state->cfg.output_mpeg2_in_188_bytes);
if (state->cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5);
@@ -1678,7 +1685,7 @@ static int dib8096p_tuner_write_serpar(struct i2c_adapter *i2c_adap,
n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("Tuner ITF: write busy (overflow)");
+ dprintk("Tuner ITF: write busy (overflow)\n");
}
dib8000_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
dib8000_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
@@ -1699,7 +1706,7 @@ static int dib8096p_tuner_read_serpar(struct i2c_adapter *i2c_adap,
n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (overflow)");
+ dprintk("TunerITF: read busy (overflow)\n");
}
dib8000_write_word(state, 1985, (0<<6) | (serpar_num&0x3f));
@@ -1708,7 +1715,7 @@ static int dib8096p_tuner_read_serpar(struct i2c_adapter *i2c_adap,
n_empty = dib8000_read_word(state, 1984)&0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (empty)");
+ dprintk("TunerITF: read busy (empty)\n");
}
read_word = dib8000_read_word(state, 1987);
@@ -1889,7 +1896,7 @@ static int dib8096p_tuner_sleep(struct dvb_frontend *fe, int onoff)
struct dib8000_state *state = fe->demodulator_priv;
u16 en_cur_state;
- dprintk("sleep dib8096p: %d", onoff);
+ dprintk("sleep dib8096p: %d\n", onoff);
en_cur_state = dib8000_read_word(state, 1922);
@@ -1958,7 +1965,7 @@ static void dib8000_update_timf(struct dib8000_state *state)
dib8000_write_word(state, 29, (u16) (timf >> 16));
dib8000_write_word(state, 30, (u16) (timf & 0xffff));
- dprintk("Updated timing frequency: %d (default: %d)", state->timf, state->timf_default);
+ dprintk("Updated timing frequency: %d (default: %d)\n", state->timf, state->timf_default);
}
static u32 dib8000_ctrl_timf(struct dvb_frontend *fe, uint8_t op, uint32_t timf)
@@ -2118,7 +2125,7 @@ static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
int sub_channel_prbs_group = 0;
sub_channel_prbs_group = (subchannel / 3) + 1;
- dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
+ dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_2K:
@@ -2604,7 +2611,7 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
slist = 0;
}
}
- dprintk("Using list for autosearch : %d", slist);
+ dprintk("Using list for autosearch : %d\n", slist);
dib8000_set_isdbt_common_channel(state, slist, 1);
@@ -2638,17 +2645,17 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
if ((state->revision >= 0x8002) &&
(state->autosearch_state == AS_SEARCHING_FFT)) {
if (irq_pending & 0x1) {
- dprintk("dib8000_autosearch_irq: max correlation result available");
+ dprintk("dib8000_autosearch_irq: max correlation result available\n");
return 3;
}
} else {
if (irq_pending & 0x1) { /* failed */
- dprintk("dib8000_autosearch_irq failed");
+ dprintk("dib8000_autosearch_irq failed\n");
return 1;
}
if (irq_pending & 0x2) { /* succeeded */
- dprintk("dib8000_autosearch_irq succeeded");
+ dprintk("dib8000_autosearch_irq succeeded\n");
return 2;
}
}
@@ -2699,7 +2706,7 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz)
dds += abs_offset_khz * unit_khz_dds_val;
}
- dprintk("setting a DDS frequency offset of %c%dkHz", invert ? '-' : ' ', dds / unit_khz_dds_val);
+ dprintk("setting a DDS frequency offset of %c%dkHz\n", invert ? '-' : ' ', dds / unit_khz_dds_val);
if (abs_offset_khz <= (state->cfg.pll->internal / ratio)) {
/* Max dds offset is the half of the demod freq */
@@ -2738,7 +2745,7 @@ static void dib8000_set_frequency_offset(struct dib8000_state *state)
}
}
- dprintk("%dkhz tuner offset (frequency = %dHz & current_rf = %dHz) total_dds_offset_hz = %d", c->frequency - current_rf, c->frequency, current_rf, total_dds_offset_khz);
+ dprintk("%dkhz tuner offset (frequency = %dHz & current_rf = %dHz) total_dds_offset_hz = %d\n", c->frequency - current_rf, c->frequency, current_rf, total_dds_offset_khz);
/* apply dds offset now */
dib8000_set_dds(state, total_dds_offset_khz);
@@ -2890,7 +2897,7 @@ static u16 dib8000_read_lock(struct dvb_frontend *fe)
static int dib8090p_init_sdram(struct dib8000_state *state)
{
u16 reg = 0;
- dprintk("init sdram");
+ dprintk("init sdram\n");
reg = dib8000_read_word(state, 274) & 0xfff0;
dib8000_write_word(state, 274, reg | 0x7); /* P_dintlv_delay_ram = 7 because of MobileSdram */
@@ -2931,7 +2938,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* Transmission mode is only detected on auto mode, currently
*/
if (c->transmission_mode == TRANSMISSION_MODE_AUTO) {
- dprintk("transmission mode auto");
+ dprintk("transmission mode auto\n");
return 0;
}
@@ -2939,7 +2946,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* Guard interval is only detected on auto mode, currently
*/
if (c->guard_interval == GUARD_INTERVAL_AUTO) {
- dprintk("guard interval auto");
+ dprintk("guard interval auto\n");
return 0;
}
@@ -2948,7 +2955,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* layer should be enabled
*/
if (!c->isdbt_layer_enabled) {
- dprintk("no layer modulation specified");
+ dprintk("no layer modulation specified\n");
return 0;
}
@@ -2970,7 +2977,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
if ((c->layer[i].modulation == QAM_AUTO) ||
(c->layer[i].fec == FEC_AUTO)) {
- dprintk("layer %c has either modulation or FEC auto",
+ dprintk("layer %c has either modulation or FEC auto\n",
'A' + i);
return 0;
}
@@ -2981,7 +2988,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* fallback to auto mode.
*/
if (n_segs == 0 || n_segs > 13) {
- dprintk("number of segments is invalid");
+ dprintk("number of segments is invalid\n");
return 0;
}
@@ -3009,7 +3016,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
#if 0
if (*tune_state < CT_DEMOD_STOP)
- dprintk("IN: context status = %d, TUNE_STATE %d autosearch step = %u jiffies = %lu",
+ dprintk("IN: context status = %d, TUNE_STATE %d autosearch step = %u jiffies = %lu\n",
state->channel_parameters_set, *tune_state, state->autosearch_state, now);
#endif
@@ -3022,7 +3029,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
state->status = FE_STATUS_TUNE_PENDING;
state->channel_parameters_set = is_manual_mode(c);
- dprintk("Tuning channel on %s search mode",
+ dprintk("Tuning channel on %s search mode\n",
state->channel_parameters_set ? "manual" : "auto");
dib8000_viterbi_state(state, 0); /* force chan dec in restart */
@@ -3102,7 +3109,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
corm[1] = (dib8000_read_word(state, 596) << 16) | (dib8000_read_word(state, 597));
corm[0] = (dib8000_read_word(state, 598) << 16) | (dib8000_read_word(state, 599));
}
- /* dprintk("corm fft: %u %u %u", corm[0], corm[1], corm[2]); */
+ /* dprintk("corm fft: %u %u %u\n", corm[0], corm[1], corm[2]); */
max_value = 0;
for (find_index = 1 ; find_index < 3 ; find_index++) {
@@ -3122,7 +3129,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
state->found_nfft = TRANSMISSION_MODE_8K;
break;
}
- /* dprintk("Autosearch FFT has found Mode %d", max_value + 1); */
+ /* dprintk("Autosearch FFT has found Mode %d\n", max_value + 1); */
*tune_state = CT_DEMOD_SEARCH_NEXT;
state->autosearch_state = AS_SEARCHING_GUARD;
@@ -3137,7 +3144,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
state->found_guard = dib8000_read_word(state, 572) & 0x3;
else
state->found_guard = dib8000_read_word(state, 570) & 0x3;
- /* dprintk("guard interval found=%i", state->found_guard); */
+ /* dprintk("guard interval found=%i\n", state->found_guard); */
*tune_state = CT_DEMOD_STEP_3;
break;
@@ -3233,7 +3240,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
/* defines timeout for mpeg lock depending on interleaver length of longest layer */
for (i = 0; i < 3; i++) {
if (c->layer[i].interleaving >= deeper_interleaver) {
- dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving);
+ dprintk("layer%i: time interleaver = %d\n", i, c->layer[i].interleaving);
if (c->layer[i].segment_count > 0) { /* valid layer */
deeper_interleaver = c->layer[0].interleaving;
state->longest_intlv_layer = i;
@@ -3252,7 +3259,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
locks *= 2;
*timeout = now + msecs_to_jiffies(200 * locks); /* give the mpeg lock 800ms if sram is present */
- dprintk("Deeper interleaver mode = %d on layer %d : timeout mult factor = %d => will use timeout = %ld",
+ dprintk("Deeper interleaver mode = %d on layer %d : timeout mult factor = %d => will use timeout = %ld\n",
deeper_interleaver, state->longest_intlv_layer, locks, *timeout);
*tune_state = CT_DEMOD_STEP_10;
@@ -3263,7 +3270,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
case CT_DEMOD_STEP_10: /* 40 */
locks = dib8000_read_lock(fe);
if (locks&(1<<(7-state->longest_intlv_layer))) { /* mpeg lock : check the longest one */
- dprintk("ISDB-T layer locks: Layer A %s, Layer B %s, Layer C %s",
+ dprintk("ISDB-T layer locks: Layer A %s, Layer B %s, Layer C %s\n",
c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
c->layer[2].segment_count ? (locks >> 5) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled");
@@ -3283,7 +3290,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
*tune_state = CT_DEMOD_STEP_11;
} else { /* we are done mpeg of the longest interleaver xas not locking but let's try if an other layer has locked in the same time */
if (locks & (0x7 << 5)) {
- dprintk("Not all ISDB-T layers locked in %d ms: Layer A %s, Layer B %s, Layer C %s",
+ dprintk("Not all ISDB-T layers locked in %d ms: Layer A %s, Layer B %s, Layer C %s\n",
jiffies_to_msecs(now - *timeout),
c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
@@ -3348,7 +3355,7 @@ static int dib8000_wakeup(struct dvb_frontend *fe)
dib8000_set_power_mode(state, DIB8000_POWER_ALL);
dib8000_set_adc_state(state, DIBX000_ADC_ON);
if (dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
- dprintk("could not start Slow ADC");
+ dprintk("could not start Slow ADC\n");
if (state->revision == 0x8090)
dib8000_sad_calib(state);
@@ -3401,11 +3408,11 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
if (!(stat & FE_HAS_SYNC))
return 0;
- dprintk("dib8000_get_frontend: TMCC lock");
+ dprintk("dib8000_get_frontend: TMCC lock\n");
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat&FE_HAS_SYNC) {
- dprintk("TMCC lock on the slave%i", index_frontend);
+ dprintk("TMCC lock on the slave%i\n", index_frontend);
/* synchronize the cache with the other frontends */
state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) {
@@ -3437,41 +3444,41 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
switch ((val & 0x30) >> 4) {
case 1:
c->transmission_mode = TRANSMISSION_MODE_2K;
- dprintk("dib8000_get_frontend: transmission mode 2K");
+ dprintk("dib8000_get_frontend: transmission mode 2K\n");
break;
case 2:
c->transmission_mode = TRANSMISSION_MODE_4K;
- dprintk("dib8000_get_frontend: transmission mode 4K");
+ dprintk("dib8000_get_frontend: transmission mode 4K\n");
break;
case 3:
default:
c->transmission_mode = TRANSMISSION_MODE_8K;
- dprintk("dib8000_get_frontend: transmission mode 8K");
+ dprintk("dib8000_get_frontend: transmission mode 8K\n");
break;
}
switch (val & 0x3) {
case 0:
c->guard_interval = GUARD_INTERVAL_1_32;
- dprintk("dib8000_get_frontend: Guard Interval = 1/32 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/32\n");
break;
case 1:
c->guard_interval = GUARD_INTERVAL_1_16;
- dprintk("dib8000_get_frontend: Guard Interval = 1/16 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/16\n");
break;
case 2:
- dprintk("dib8000_get_frontend: Guard Interval = 1/8 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/8\n");
c->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
- dprintk("dib8000_get_frontend: Guard Interval = 1/4 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/4\n");
c->guard_interval = GUARD_INTERVAL_1_4;
break;
}
val = dib8000_read_word(state, 505);
c->isdbt_partial_reception = val & 1;
- dprintk("dib8000_get_frontend: partial_reception = %d ", c->isdbt_partial_reception);
+ dprintk("dib8000_get_frontend: partial_reception = %d\n", c->isdbt_partial_reception);
for (i = 0; i < 3; i++) {
int show;
@@ -3485,7 +3492,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
show = 1;
if (show)
- dprintk("dib8000_get_frontend: Layer %d segments = %d ",
+ dprintk("dib8000_get_frontend: Layer %d segments = %d\n",
i, c->layer[i].segment_count);
val = dib8000_read_word(state, 499 + i) & 0x3;
@@ -3494,7 +3501,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
val = 4;
c->layer[i].interleaving = val;
if (show)
- dprintk("dib8000_get_frontend: Layer %d time_intlv = %d ",
+ dprintk("dib8000_get_frontend: Layer %d time_intlv = %d\n",
i, c->layer[i].interleaving);
val = dib8000_read_word(state, 481 + i);
@@ -3502,27 +3509,27 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
case 1:
c->layer[i].fec = FEC_1_2;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2\n", i);
break;
case 2:
c->layer[i].fec = FEC_2_3;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3\n", i);
break;
case 3:
c->layer[i].fec = FEC_3_4;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4\n", i);
break;
case 5:
c->layer[i].fec = FEC_5_6;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6\n", i);
break;
default:
c->layer[i].fec = FEC_7_8;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8\n", i);
break;
}
@@ -3531,23 +3538,23 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
case 0:
c->layer[i].modulation = DQPSK;
if (show)
- dprintk("dib8000_get_frontend: Layer %d DQPSK ", i);
+ dprintk("dib8000_get_frontend: Layer %d DQPSK\n", i);
break;
case 1:
c->layer[i].modulation = QPSK;
if (show)
- dprintk("dib8000_get_frontend: Layer %d QPSK ", i);
+ dprintk("dib8000_get_frontend: Layer %d QPSK\n", i);
break;
case 2:
c->layer[i].modulation = QAM_16;
if (show)
- dprintk("dib8000_get_frontend: Layer %d QAM16 ", i);
+ dprintk("dib8000_get_frontend: Layer %d QAM16\n", i);
break;
case 3:
default:
c->layer[i].modulation = QAM_64;
if (show)
- dprintk("dib8000_get_frontend: Layer %d QAM64 ", i);
+ dprintk("dib8000_get_frontend: Layer %d QAM64\n", i);
break;
}
}
@@ -3578,12 +3585,12 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
unsigned long delay, callback_time;
if (c->frequency == 0) {
- dprintk("dib8000: must at least specify frequency ");
+ dprintk("dib8000: must at least specify frequency\n");
return 0;
}
if (c->bandwidth_hz == 0) {
- dprintk("dib8000: no bandwidth specified, set to default ");
+ dprintk("dib8000: no bandwidth specified, set to default\n");
c->bandwidth_hz = 6000000;
}
@@ -3671,7 +3678,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
/* we are in autosearch */
if (state->channel_parameters_set == 0) { /* searching */
if ((dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_DEMOD_SUCCESS) || (dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_FFT_SUCCESS)) {
- dprintk("autosearch succeeded on fe%i", index_frontend);
+ dprintk("autosearch succeeded on fe%i\n", index_frontend);
dib8000_get_frontend(state->fe[index_frontend], c); /* we read the channel parameters from the frontend which was successful */
state->channel_parameters_set = 1;
@@ -3708,11 +3715,11 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
active = 1;
}
if (active == 0)
- dprintk("tuning done with status %d", dib8000_get_status(state->fe[0]));
+ dprintk("tuning done with status %d\n", dib8000_get_status(state->fe[0]));
}
if ((active == 1) && (callback_time == 0)) {
- dprintk("strange callback time something went wrong");
+ dprintk("strange callback time something went wrong\n");
active = 0;
}
@@ -4172,7 +4179,7 @@ static int dib8000_get_stats(struct dvb_frontend *fe, enum fe_status stat)
time_us = dib8000_get_time_us(fe, -1);
state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
- dprintk("Next all layers stats available in %u us.", time_us);
+ dprintk("Next all layers stats available in %u us.\n", time_us);
dib8000_read_ber(fe, &val);
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
@@ -4239,12 +4246,12 @@ static int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_fronte
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
- dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+ dprintk("set slave fe %p to index %i\n", fe_slave, index_frontend);
state->fe[index_frontend] = fe_slave;
return 0;
}
- dprintk("too many slave frontend");
+ dprintk("too many slave frontend\n");
return -ENOMEM;
}
@@ -4256,12 +4263,12 @@ static int dib8000_remove_slave_frontend(struct dvb_frontend *fe)
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend != 1) {
- dprintk("remove slave fe %p (index %i)", state->fe[index_frontend-1], index_frontend-1);
+ dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend-1], index_frontend-1);
state->fe[index_frontend] = NULL;
return 0;
}
- dprintk("no frontend to be removed");
+ dprintk("no frontend to be removed\n");
return -ENODEV;
}
@@ -4283,18 +4290,18 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_write_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
return -ENOMEM;
}
client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_read_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
ret = -ENOMEM;
goto error_memory_read;
}
client.i2c_buffer_lock = kzalloc(sizeof(struct mutex), GFP_KERNEL);
if (!client.i2c_buffer_lock) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
ret = -ENOMEM;
goto error_memory_lock;
}
@@ -4313,7 +4320,7 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
dib8000_i2c_write16(&client, 1287, 0x0003);
client.addr = default_addr;
if (dib8000_identify(&client) == 0) {
- dprintk("#%d: not identified", k);
+ dprintk("#%d: not identified\n", k);
ret = -EINVAL;
goto error;
}
@@ -4327,7 +4334,7 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
client.addr = new_addr;
dib8000_identify(&client);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -4385,14 +4392,14 @@ static int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
u16 val = dib8000_read_word(st, 299) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("pid filter enabled %d", onoff);
+ dprintk("pid filter enabled %d\n", onoff);
return dib8000_write_word(st, 299, val);
}
static int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib8000_state *st = fe->demodulator_priv;
- dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("Index %x, PID %d, OnOff %d\n", id, pid, onoff);
return dib8000_write_word(st, 305 + id, onoff ? (1 << 13) | pid : 0);
}
@@ -4431,7 +4438,7 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad
struct dvb_frontend *fe;
struct dib8000_state *state;
- dprintk("dib8000_init");
+ dprintk("dib8000_init\n");
state = kzalloc(sizeof(struct dib8000_state), GFP_KERNEL);
if (state == NULL)
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 5897977d2d00..c95fff4f9582 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -7,6 +7,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
@@ -21,7 +24,12 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB9000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
#define MAX_NUMBER_OF_FRONTENDS 6
struct i2c_device {
@@ -258,7 +266,7 @@ static int dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 *b, u32
state->msg[1].buf = b;
ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
if (ret != 0) {
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
return -EREMOTEIO;
}
@@ -285,7 +293,7 @@ static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
i2c->i2c_write_buffer[1] = reg & 0xff;
if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
- dprintk("read register %x error", reg);
+ dprintk("read register %x error\n", reg);
return 0;
}
@@ -440,7 +448,7 @@ static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u1
return -EIO;
if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd | 0x80);
@@ -456,7 +464,7 @@ static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8
return -EIO;
if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd);
@@ -479,13 +487,13 @@ static int dib9000_firmware_download(struct dib9000_state *state, u8 risc_id, u1
dib9000_write_word(state, 1025 + offs, 0);
dib9000_write_word(state, 1031 + offs, key);
- dprintk("going to download %dB of microcode", len);
+ dprintk("going to download %dB of microcode\n", len);
if (dib9000_write16_noinc(state, 1026 + offs, (u8 *) code, (u16) len) != 0) {
- dprintk("error while downloading microcode for RISC %c", 'A' + risc_id);
+ dprintk("error while downloading microcode for RISC %c\n", 'A' + risc_id);
return -EIO;
}
- dprintk("Microcode for RISC %c loaded", 'A' + risc_id);
+ dprintk("Microcode for RISC %c loaded\n", 'A' + risc_id);
return 0;
}
@@ -511,10 +519,10 @@ static int dib9000_mbx_host_init(struct dib9000_state *state, u8 risc_id)
} while ((reset_reg & 0x8000) && --tries);
if (reset_reg & 0x8000) {
- dprintk("MBX: init ERROR, no response from RISC %c", 'A' + risc_id);
+ dprintk("MBX: init ERROR, no response from RISC %c\n", 'A' + risc_id);
return -EIO;
}
- dprintk("MBX: initialized");
+ dprintk("MBX: initialized\n");
return 0;
}
@@ -531,30 +539,27 @@ static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data,
return -EINVAL;
if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
tmp = MAX_MAILBOX_TRY;
do {
size = dib9000_read_word_attr(state, 1043, attr) & 0xff;
if ((size + len + 1) > MBX_MAX_WORDS && --tmp) {
- dprintk("MBX: RISC mbx full, retrying");
+ dprintk("MBX: RISC mbx full, retrying\n");
msleep(100);
} else
break;
} while (1);
- /*dprintk( "MBX: size: %d", size); */
+ /*dprintk( "MBX: size: %d\n", size); */
if (tmp == 0) {
ret = -EINVAL;
goto out;
}
#ifdef DUMP_MSG
- dprintk("--> %02x %d ", id, len + 1);
- for (i = 0; i < len; i++)
- dprintk("%04x ", data[i]);
- dprintk("\n");
+ dprintk("--> %02x %d %*ph\n", id, len + 1, len, data);
#endif
/* byte-order conversion - works on big (where it is not necessary) or little endian */
@@ -596,7 +601,7 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
return 0;
if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
if (risc_id == 1)
@@ -622,13 +627,13 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
}
#ifdef DUMP_MSG
- dprintk("<-- ");
+ dprintk("<--\n");
for (i = 0; i < size + 1; i++)
- dprintk("%04x ", d[i]);
+ dprintk("%04x\n", d[i]);
dprintk("\n");
#endif
} else {
- dprintk("MBX: message is too big for message cache (%d), flushing message", size);
+ dprintk("MBX: message is too big for message cache (%d), flushing message\n", size);
size--; /* Initial word already read */
while (size--)
dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, 2, attr);
@@ -649,9 +654,11 @@ static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 si
b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */
if (*b == '~') {
b++;
- dprintk("%s", b);
+ dprintk("%s\n", b);
} else
- dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<empty>");
+ dprintk("RISC%d: %d.%04d %s\n",
+ state->fe_id,
+ ts / 10000, ts % 10000, *b ? b : "<empty>");
return 1;
}
@@ -666,7 +673,7 @@ static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
if (*block == 0) {
size = dib9000_mbx_read(state, block, 1, attr);
-/* dprintk( "MBX: fetched %04x message to cache", *block); */
+/* dprintk( "MBX: fetched %04x message to cache\n", *block); */
switch (*block >> 8) {
case IN_MSG_DEBUG_BUF:
@@ -686,7 +693,7 @@ static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
return 1;
}
}
- dprintk("MBX: no free cache-slot found for new message...");
+ dprintk("MBX: no free cache-slot found for new message...\n");
return -1;
}
@@ -706,7 +713,7 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
return -1;
if (mutex_lock_interruptible(&state->platform.risc.mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -1;
}
@@ -715,7 +722,7 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
/* if (tmp) */
-/* dprintk( "cleared IRQ: %x", tmp); */
+/* dprintk( "cleared IRQ: %x\n", tmp); */
mutex_unlock(&state->platform.risc.mbx_lock);
return ret;
@@ -750,7 +757,7 @@ static int dib9000_mbx_get_message_attr(struct dib9000_state *state, u16 id, u16
} while (--timeout);
if (timeout == 0) {
- dprintk("waiting for message %d timed out", id);
+ dprintk("waiting for message %d timed out\n", id);
return -1;
}
@@ -770,7 +777,7 @@ static int dib9000_risc_check_version(struct dib9000_state *state)
return -EIO;
fw_version = (r[0] << 8) | r[1];
- dprintk("RISC: ver: %d.%02d (IC: %d)", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
+ dprintk("RISC: ver: %d.%02d (IC: %d)\n", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
if ((fw_version >> 10) != 7)
return -EINVAL;
@@ -850,40 +857,40 @@ static u16 dib9000_identify(struct i2c_device *client)
value = dib9000_i2c_read16(client, 896);
if (value != 0x01b3) {
- dprintk("wrong Vendor ID (0x%x)", value);
+ dprintk("wrong Vendor ID (0x%x)\n", value);
return 0;
}
value = dib9000_i2c_read16(client, 897);
if (value != 0x4000 && value != 0x4001 && value != 0x4002 && value != 0x4003 && value != 0x4004 && value != 0x4005) {
- dprintk("wrong Device ID (0x%x)", value);
+ dprintk("wrong Device ID (0x%x)\n", value);
return 0;
}
/* protect this driver to be used with 7000PC */
if (value == 0x4000 && dib9000_i2c_read16(client, 769) == 0x4000) {
- dprintk("this driver does not work with DiB7000PC");
+ dprintk("this driver does not work with DiB7000PC\n");
return 0;
}
switch (value) {
case 0x4000:
- dprintk("found DiB7000MA/PA/MB/PB");
+ dprintk("found DiB7000MA/PA/MB/PB\n");
break;
case 0x4001:
- dprintk("found DiB7000HC");
+ dprintk("found DiB7000HC\n");
break;
case 0x4002:
- dprintk("found DiB7000MC");
+ dprintk("found DiB7000MC\n");
break;
case 0x4003:
- dprintk("found DiB9000A");
+ dprintk("found DiB9000A\n");
break;
case 0x4004:
- dprintk("found DiB9000H");
+ dprintk("found DiB9000H\n");
break;
case 0x4005:
- dprintk("found DiB9000M");
+ dprintk("found DiB9000M\n");
break;
}
@@ -1013,7 +1020,7 @@ static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address
if (address >= 1024 || !state->platform.risc.fw_is_running)
return -EINVAL;
- /* dprintk( "APB access thru rd fw %d %x", address, attribute); */
+ /* dprintk( "APB access thru rd fw %d %x\n", address, attribute); */
mb[0] = (u16) address;
mb[1] = len / 2;
@@ -1043,7 +1050,7 @@ static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 addres
if (len > 18)
return -EINVAL;
- /* dprintk( "APB access thru wr fw %d %x", address, attribute); */
+ /* dprintk( "APB access thru wr fw %d %x\n", address, attribute); */
mb[0] = (u16)address;
for (i = 0; i + 1 < len; i += 2)
@@ -1191,7 +1198,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe)
int ret = 0;
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
@@ -1534,7 +1541,7 @@ static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
struct dib9000_state *state = fe->demodulator_priv;
u16 outreg, smo_mode;
- dprintk("setting output mode for demod %p to %d", fe, mode);
+ dprintk("setting output mode for demod %p to %d\n", fe, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
@@ -1556,7 +1563,7 @@ static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
outreg = 0;
break;
default:
- dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]);
+ dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->fe[0]);
return -EINVAL;
}
@@ -1590,7 +1597,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
len = 16;
if (dib9000_read_word(state, 790) != 0)
- dprintk("TunerITF: read busy");
+ dprintk("TunerITF: read busy\n");
dib9000_write_word(state, 784, (u16) (msg[index_msg].addr));
dib9000_write_word(state, 787, (len / 2) - 1);
@@ -1601,7 +1608,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
i--;
if (i == 0)
- dprintk("TunerITF: read failed");
+ dprintk("TunerITF: read failed\n");
for (i = 0; i < len; i += 2) {
t = dib9000_read_word(state, 785);
@@ -1609,13 +1616,13 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
msg[index_msg].buf[i + 1] = (t) & 0xff;
}
if (dib9000_read_word(state, 790) != 0)
- dprintk("TunerITF: read more data than expected");
+ dprintk("TunerITF: read more data than expected\n");
} else {
i = 1000;
while (dib9000_read_word(state, 789) && i)
i--;
if (i == 0)
- dprintk("TunerITF: write busy");
+ dprintk("TunerITF: write busy\n");
len = msg[index_msg].len;
if (len > 16)
@@ -1631,7 +1638,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
while (dib9000_read_word(state, 791) > 0 && i)
i--;
if (i == 0)
- dprintk("TunerITF: write failed");
+ dprintk("TunerITF: write failed\n");
}
}
return num;
@@ -1676,7 +1683,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
@@ -1759,7 +1766,7 @@ static int dib9000_cfg_gpio(struct dib9000_state *st, u8 num, u8 dir, u8 val)
st->gpio_val |= (val & 0x01) << num; /* set the new value */
dib9000_write_word(st, 774, st->gpio_val);
- dprintk("gpio dir: %04x: gpio val: %04x", st->gpio_dir, st->gpio_val);
+ dprintk("gpio dir: %04x: gpio val: %04x\n", st->gpio_dir, st->gpio_val);
return 0;
}
@@ -1779,7 +1786,7 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
/* postpone the pid filtering cmd */
- dprintk("pid filter cmd postpone");
+ dprintk("pid filter cmd postpone\n");
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
@@ -1787,14 +1794,14 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
}
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
val = dib9000_read_word(state, 294 + 1) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
+ dprintk("PID filter enabled %d\n", onoff);
ret = dib9000_write_word(state, 294 + 1, val);
mutex_unlock(&state->demod_lock);
return ret;
@@ -1809,7 +1816,7 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
if (state->pid_ctrl_index != -2) {
/* postpone the pid filtering cmd */
- dprintk("pid filter postpone");
+ dprintk("pid filter postpone\n");
if (state->pid_ctrl_index < 9) {
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
@@ -1817,15 +1824,15 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
state->pid_ctrl[state->pid_ctrl_index].pid = pid;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
} else
- dprintk("can not add any more pid ctrl cmd");
+ dprintk("can not add any more pid ctrl cmd\n");
return 0;
}
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
- dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("Index %x, PID %d, OnOff %d\n", id, pid, onoff);
ret = dib9000_write_word(state, 300 + 1 + id,
onoff ? (1 << 13) | pid : 0);
mutex_unlock(&state->demod_lock);
@@ -1868,7 +1875,7 @@ static int dib9000_sleep(struct dvb_frontend *fe)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
@@ -1899,7 +1906,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe,
if (state->get_frontend_internal == 0) {
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
}
@@ -1907,7 +1914,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe,
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat & FE_HAS_SYNC) {
- dprintk("TPS lock on the slave%i", index_frontend);
+ dprintk("TPS lock on the slave%i\n", index_frontend);
/* synchronize the cache with the other frontends */
state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
@@ -1995,18 +2002,18 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* check that the correct parameters are set */
if (state->fe[0]->dtv_property_cache.frequency == 0) {
- dprintk("dib9000: must specify frequency ");
+ dprintk("dib9000: must specify frequency\n");
return 0;
}
if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) {
- dprintk("dib9000: must specify bandwidth ");
+ dprintk("dib9000: must specify bandwidth\n");
return 0;
}
state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
@@ -2073,14 +2080,14 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* check the tune result */
if (exit_condition == 1) { /* tune failed */
- dprintk("tune failed");
+ dprintk("tune failed\n");
mutex_unlock(&state->demod_lock);
/* tune failed; put all the pid filtering cmd to junk */
state->pid_ctrl_index = -1;
return 0;
}
- dprintk("tune success on frontend%i", index_frontend_success);
+ dprintk("tune success on frontend%i\n", index_frontend_success);
/* synchronize all the channel cache */
state->get_frontend_internal = 1;
@@ -2169,7 +2176,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, enum fe_status *stat)
u16 lock = 0, lock_slave = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
@@ -2202,11 +2209,11 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
ret = -EINTR;
goto error;
}
@@ -2237,7 +2244,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
*strength = 0;
@@ -2250,7 +2257,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
ret = -EINTR;
goto error;
}
@@ -2281,7 +2288,7 @@ static u32 dib9000_get_snr(struct dvb_frontend *fe)
u16 val;
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
@@ -2320,7 +2327,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
u32 snr_master;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
snr_master = dib9000_get_snr(fe);
@@ -2345,11 +2352,11 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
ret = -EINTR;
goto error;
}
@@ -2376,12 +2383,12 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_write_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
return -ENOMEM;
}
client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_read_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
ret = -ENOMEM;
goto error_memory;
}
@@ -2408,7 +2415,7 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
if (dib9000_identify(&client) == 0) {
client.i2c_addr = default_addr;
if (dib9000_identify(&client) == 0) {
- dprintk("DiB9000 #%d: not identified", k);
+ dprintk("DiB9000 #%d: not identified\n", k);
ret = -EIO;
goto error;
}
@@ -2417,7 +2424,7 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
dib9000_i2c_write16(&client, 1795, (1 << 10) | (4 << 6));
dib9000_i2c_write16(&client, 1794, (new_addr << 2) | 2);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -2445,12 +2452,12 @@ int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
- dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+ dprintk("set slave fe %p to index %i\n", fe_slave, index_frontend);
state->fe[index_frontend] = fe_slave;
return 0;
}
- dprintk("too many slave frontend");
+ dprintk("too many slave frontend\n");
return -ENOMEM;
}
EXPORT_SYMBOL(dib9000_set_slave_frontend);
@@ -2463,12 +2470,12 @@ int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend != 1) {
- dprintk("remove slave fe %p (index %i)", state->fe[index_frontend - 1], index_frontend - 1);
+ dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend - 1], index_frontend - 1);
state->fe[index_frontend] = NULL;
return 0;
}
- dprintk("no frontend to be removed");
+ dprintk("no frontend to be removed\n");
return -ENODEV;
}
EXPORT_SYMBOL(dib9000_remove_slave_frontend);
@@ -2483,7 +2490,7 @@ struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int sla
}
EXPORT_SYMBOL(dib9000_get_slave_frontend);
-static struct dvb_frontend_ops dib9000_ops;
+static const struct dvb_frontend_ops dib9000_ops;
struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg)
{
struct dvb_frontend *fe;
@@ -2560,7 +2567,7 @@ error:
}
EXPORT_SYMBOL(dib9000_attach);
-static struct dvb_frontend_ops dib9000_ops = {
+static const struct dvb_frontend_ops dib9000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 9000",
diff --git a/drivers/media/dvb-frontends/dibx000_common.c b/drivers/media/dvb-frontends/dibx000_common.c
index 723358d7ca84..bc28184c7fb0 100644
--- a/drivers/media/dvb-frontends/dibx000_common.c
+++ b/drivers/media/dvb-frontends/dibx000_common.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -8,14 +10,18 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
{
int ret;
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -41,7 +47,7 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -59,7 +65,7 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
mst->msg[1].len = 2;
if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
mutex_unlock(&mst->i2c_buffer_lock);
@@ -192,7 +198,7 @@ static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
enum dibx000_i2c_interface intf)
{
if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) {
- dprintk("selecting interface: %d", intf);
+ dprintk("selecting interface: %d\n", intf);
mst->selected_interface = intf;
return dibx000_write_word(mst, mst->base_reg + 4, intf);
}
@@ -290,7 +296,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -337,7 +343,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
@@ -391,7 +397,7 @@ struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst,
i2c = &mst->master_i2c_adap_gpio67;
break;
default:
- printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n");
+ pr_err("incorrect I2C interface selected\n");
break;
}
@@ -434,7 +440,7 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
mutex_init(&mst->i2c_buffer_lock);
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
memset(mst->msg, 0, sizeof(struct i2c_msg));
@@ -456,29 +462,25 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
if (i2c_adapter_init
(&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo,
"DiBX000 tuner I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the tuner i2c_adapter\n");
+ pr_err("could not initialize the tuner i2c_adapter\n");
mst->master_i2c_adap_gpio12.dev.parent = mst->i2c_adap->dev.parent;
if (i2c_adapter_init
(&mst->master_i2c_adap_gpio12, &dibx000_i2c_master_gpio12_xfer_algo,
"DiBX000 master GPIO12 I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the master i2c_adapter\n");
+ pr_err("could not initialize the master i2c_adapter\n");
mst->master_i2c_adap_gpio34.dev.parent = mst->i2c_adap->dev.parent;
if (i2c_adapter_init
(&mst->master_i2c_adap_gpio34, &dibx000_i2c_master_gpio34_xfer_algo,
"DiBX000 master GPIO34 I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the master i2c_adapter\n");
+ pr_err("could not initialize the master i2c_adapter\n");
mst->master_i2c_adap_gpio67.dev.parent = mst->i2c_adap->dev.parent;
if (i2c_adapter_init
(&mst->master_i2c_adap_gpio67, &dibx000_i2c_gated_gpio67_algo,
"DiBX000 master GPIO67 I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the master i2c_adapter\n");
+ pr_err("could not initialize the master i2c_adapter\n");
/* initialize the i2c-master by closing the gate */
dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0);
@@ -500,16 +502,6 @@ void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst)
}
EXPORT_SYMBOL(dibx000_exit_i2c_master);
-
-u32 systime(void)
-{
- struct timespec t;
-
- t = current_kernel_time();
- return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
-}
-EXPORT_SYMBOL(systime);
-
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dibx000_common.h b/drivers/media/dvb-frontends/dibx000_common.h
index b538e0555c95..61f4152f24ee 100644
--- a/drivers/media/dvb-frontends/dibx000_common.h
+++ b/drivers/media/dvb-frontends/dibx000_common.h
@@ -47,8 +47,6 @@ extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst);
extern void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst);
extern int dibx000_i2c_set_speed(struct i2c_adapter *i2c_adap, u16 speed);
-extern u32 systime(void);
-
#define BAND_LBAND 0x01
#define BAND_UHF 0x02
#define BAND_VHF 0x04
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index bd6d2ee0f7c9..f1c3e3b09b65 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12264,7 +12264,7 @@ static void drx39xxj_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops drx39xxj_ops;
+static const struct dvb_frontend_ops drx39xxj_ops;
struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c)
{
@@ -12363,7 +12363,7 @@ error:
}
EXPORT_SYMBOL(drx39xxj_attach);
-static struct dvb_frontend_ops drx39xxj_ops = {
+static const struct dvb_frontend_ops drx39xxj_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Micronas DRX39xxj family Frontend",
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 445a15c2714f..4143f0326684 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2912,7 +2912,7 @@ static void drxd_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops drxd_ops = {
+static const struct dvb_frontend_ops drxd_ops = {
.delsys = { SYS_DVBT},
.info = {
.name = "Micronas DRXD DVB-T",
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index c595adc61c6f..146edf344dd8 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6737,7 +6737,7 @@ static int drxk_get_tune_settings(struct dvb_frontend *fe,
}
}
-static struct dvb_frontend_ops drxk_ops = {
+static const struct dvb_frontend_ops drxk_ops = {
/* .delsys will be filled dynamically */
.info = {
.name = "DRXK",
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 447b518e287a..0b17a45c5640 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -248,8 +248,8 @@ static int ds3000_writereg(struct ds3000_state *state, int reg, int data)
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
- " value == 0x%02x)\n", __func__, err, reg, data);
+ printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+ __func__, err, reg, data);
return -EREMOTEIO;
}
@@ -296,8 +296,8 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1) {
- printk(KERN_ERR "%s: write error(err == %i, "
- "reg == 0x%02x\n", __func__, ret, reg);
+ printk(KERN_ERR "%s: write error(err == %i, reg == 0x%02x\n",
+ __func__, ret, reg);
ret = -EREMOTEIO;
goto error;
}
@@ -364,8 +364,8 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
state->i2c->dev.parent);
printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
- printk(KERN_ERR "%s: No firmware uploaded (timeout or file not "
- "found?)\n", __func__);
+ printk(KERN_ERR "%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -830,7 +830,7 @@ static void ds3000_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops ds3000_ops;
+static const struct dvb_frontend_ops ds3000_ops;
struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
struct i2c_adapter *i2c)
@@ -1104,7 +1104,7 @@ static int ds3000_initfe(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops ds3000_ops = {
+static const struct dvb_frontend_ops ds3000_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Montage Technology DS3000",
@@ -1144,8 +1144,7 @@ static struct dvb_frontend_ops ds3000_ops = {
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
- "DS3000 hardware");
+MODULE_DESCRIPTION("DVB Frontend module for Montage Technology DS3000 hardware");
MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 735a96662022..ef976eb23344 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/dvb/frontend.h>
@@ -25,6 +27,9 @@
#include "dvb-pll.h"
+#define dprintk(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
struct dvb_pll_priv {
/* pll number */
int nr;
@@ -362,7 +367,7 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
result = i2c_transfer(priv->i2c, &msg, 1);
if (result != 1)
- printk(KERN_ERR "%s: i2c_transfer failed:%d",
+ pr_err("%s: i2c_transfer failed:%d",
__func__, result);
if (b_w <= 10000)
@@ -432,7 +437,7 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
result = i2c_transfer(priv->i2c, &msg, 1);
if (result != 1)
- printk(KERN_ERR "%s: i2c_transfer failed:%d",
+ pr_err("%s: i2c_transfer failed:%d",
__func__, result);
buf[2] = 0x9e;
@@ -578,7 +583,7 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
}
if (debug)
- printk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
+ dprintk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
frequency, i, desc->count);
if (i == desc->count)
return -EINVAL;
@@ -594,18 +599,17 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
desc->set(fe, buf);
if (debug)
- printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
+ dprintk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
desc->name, div, buf[0], buf[1], buf[2], buf[3]);
// calculate the frequency we set it to
return (div * desc->entries[i].stepsize) - desc->iffreq;
}
-static int dvb_pll_release(struct dvb_frontend *fe)
+static void dvb_pll_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int dvb_pll_sleep(struct dvb_frontend *fe)
@@ -803,10 +807,10 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
fe->tuner_priv = priv;
if ((debug) || (id[priv->nr] == pll_desc_id)) {
- printk("dvb-pll[%d]", priv->nr);
+ dprintk("dvb-pll[%d]", priv->nr);
if (i2c != NULL)
- printk(" %d-%04x", i2c_adapter_id(i2c), pll_addr);
- printk(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name,
+ pr_cont(" %d-%04x", i2c_adapter_id(i2c), pll_addr);
+ pr_cont(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name,
id[priv->nr] == pll_desc_id ?
"insmod option" : "autodetected");
}
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index e5bd8c62ad3a..efc3c31a7635 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -119,7 +119,7 @@ static void dvb_dummy_fe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
{
@@ -136,7 +136,7 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
return &state->frontend;
}
-static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
{
@@ -153,7 +153,7 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
return &state->frontend;
}
-static struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
{
@@ -170,7 +170,7 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
return &state->frontend;
}
-static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Dummy DVB-T",
@@ -201,7 +201,7 @@ static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.read_ucblocks = dvb_dummy_fe_read_ucblocks,
};
-static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "Dummy DVB-C",
@@ -230,7 +230,7 @@ static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
.read_ucblocks = dvb_dummy_fe_read_ucblocks,
};
-static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Dummy DVB-S",
diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
index c9012e677cd1..d97ce21e26e1 100644
--- a/drivers/media/dvb-frontends/ec100.c
+++ b/drivers/media/dvb-frontends/ec100.c
@@ -280,7 +280,7 @@ static void ec100_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops ec100_ops;
+static const struct dvb_frontend_ops ec100_ops;
struct dvb_frontend *ec100_attach(const struct ec100_config *config,
struct i2c_adapter *i2c)
@@ -315,7 +315,7 @@ error:
}
EXPORT_SYMBOL(ec100_attach);
-static struct dvb_frontend_ops ec100_ops = {
+static const struct dvb_frontend_ops ec100_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "E3C EC100 DVB-T",
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index 93f59bfea092..efe015df7f1d 100644
--- a/drivers/media/dvb-frontends/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -323,7 +323,7 @@ static void gp8psk_fe_release(struct dvb_frontend* fe)
kfree(st);
}
-static struct dvb_frontend_ops gp8psk_fe_ops;
+static const struct dvb_frontend_ops gp8psk_fe_ops;
struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
void *priv, bool is_rev1)
@@ -351,7 +351,7 @@ struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
}
EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
-static struct dvb_frontend_ops gp8psk_fe_ops = {
+static const struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Genpix DVB-S",
diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c
index 1c7eb477e2cd..8b53633cf325 100644
--- a/drivers/media/dvb-frontends/hd29l2.c
+++ b/drivers/media/dvb-frontends/hd29l2.c
@@ -793,7 +793,7 @@ static void hd29l2_release(struct dvb_frontend *fe)
kfree(priv);
}
-static struct dvb_frontend_ops hd29l2_ops;
+static const struct dvb_frontend_ops hd29l2_ops;
struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
struct i2c_adapter *i2c)
@@ -828,7 +828,7 @@ err:
}
EXPORT_SYMBOL(hd29l2_attach);
-static struct dvb_frontend_ops hd29l2_ops = {
+static const struct dvb_frontend_ops hd29l2_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "HDIC HD29L2 DMB-TH",
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index dc43c5f6d0ea..ef35c2b30ea3 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -434,14 +434,13 @@ static int helene_init(struct dvb_frontend *fe)
return helene_leave_power_save(priv);
}
-static int helene_release(struct dvb_frontend *fe)
+static void helene_release(struct dvb_frontend *fe)
{
struct helene_priv *priv = fe->tuner_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int helene_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 0c089b5986a1..94bb4f7a2298 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -151,14 +151,13 @@ static int horus3a_init(struct dvb_frontend *fe)
return 0;
}
-static int horus3a_release(struct dvb_frontend *fe)
+static void horus3a_release(struct dvb_frontend *fe)
{
struct horus3a_priv *priv = fe->tuner_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int horus3a_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index cadcae4cff89..475525134327 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -348,11 +348,10 @@ static int itd1000_sleep(struct dvb_frontend *fe)
return 0;
}
-static int itd1000_release(struct dvb_frontend *fe)
+static void itd1000_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops itd1000_tuner_ops = {
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 2826bbb36b73..ca371680a69f 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -94,14 +94,13 @@ static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count)
return 0;
}
-static int ix2505v_release(struct dvb_frontend *fe)
+static void ix2505v_release(struct dvb_frontend *fe)
{
struct ix2505v_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
/**
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 2f3d0519e19b..68923c84679a 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -496,7 +496,7 @@ static void l64781_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops l64781_ops;
+static const struct dvb_frontend_ops l64781_ops;
struct dvb_frontend* l64781_attach(const struct l64781_config* config,
struct i2c_adapter* i2c)
@@ -571,7 +571,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops l64781_ops = {
+static const struct dvb_frontend_ops l64781_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "LSI L64781 DVB-T",
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index f51a3a0b3949..3b31e5f20f46 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1359,7 +1359,7 @@ static void lg216x_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops lg2160_ops = {
+static const struct dvb_frontend_ops lg2160_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2160 ATSC/MH Frontend",
@@ -1387,7 +1387,7 @@ static struct dvb_frontend_ops lg2160_ops = {
.release = lg216x_release,
};
-static struct dvb_frontend_ops lg2161_ops = {
+static const struct dvb_frontend_ops lg2161_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2161 ATSC/MH Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 4503e8852fd1..9f5d9380bf5f 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1103,8 +1103,8 @@ static void lgdt3305_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops lgdt3304_ops;
-static struct dvb_frontend_ops lgdt3305_ops;
+static const struct dvb_frontend_ops lgdt3304_ops;
+static const struct dvb_frontend_ops lgdt3305_ops;
struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
struct i2c_adapter *i2c_adap)
@@ -1164,7 +1164,7 @@ fail:
}
EXPORT_SYMBOL(lgdt3305_attach);
-static struct dvb_frontend_ops lgdt3304_ops = {
+static const struct dvb_frontend_ops lgdt3304_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3304 VSB/QAM Frontend",
@@ -1187,7 +1187,7 @@ static struct dvb_frontend_ops lgdt3304_ops = {
.release = lgdt3305_release,
};
-static struct dvb_frontend_ops lgdt3305_ops = {
+static const struct dvb_frontend_ops lgdt3305_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3305 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 0ca4e810e9d8..19dca46b1171 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1767,7 +1767,7 @@ static void lgdt3306a_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops lgdt3306a_ops;
+static const struct dvb_frontend_ops lgdt3306a_ops;
struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
struct i2c_adapter *i2c_adap)
@@ -2103,7 +2103,7 @@ static void lgdt3306a_DumpRegs(struct lgdt3306a_state *state)
-static struct dvb_frontend_ops lgdt3306a_ops = {
+static const struct dvb_frontend_ops lgdt3306a_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3306A VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 96bf254da21e..2f4a0316f89c 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -405,8 +405,7 @@ static int lgdt330x_set_parameters(struct dvb_frontend *fe)
return -1;
}
if (err < 0)
- printk(KERN_WARNING "lgdt330x: %s: error blasting "
- "bytes to lgdt3303 for modulation type(%d)\n",
+ printk(KERN_WARNING "lgdt330x: %s: error blasting bytes to lgdt3303 for modulation type(%d)\n",
__func__, p->modulation);
/*
@@ -729,8 +728,8 @@ static void lgdt330x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops lgdt3302_ops;
-static struct dvb_frontend_ops lgdt3303_ops;
+static const struct dvb_frontend_ops lgdt3302_ops;
+static const struct dvb_frontend_ops lgdt3303_ops;
struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
struct i2c_adapter* i2c)
@@ -775,7 +774,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops lgdt3302_ops = {
+static const struct dvb_frontend_ops lgdt3302_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name= "LG Electronics LGDT3302 VSB/QAM Frontend",
@@ -798,7 +797,7 @@ static struct dvb_frontend_ops lgdt3302_ops = {
.release = lgdt330x_release,
};
-static struct dvb_frontend_ops lgdt3303_ops = {
+static const struct dvb_frontend_ops lgdt3303_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name= "LG Electronics LGDT3303 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgs8gl5.c b/drivers/media/dvb-frontends/lgs8gl5.c
index fbfd87b5b803..970e42fdbc1b 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.c
+++ b/drivers/media/dvb-frontends/lgs8gl5.c
@@ -376,7 +376,7 @@ lgs8gl5_release(struct dvb_frontend *fe)
}
-static struct dvb_frontend_ops lgs8gl5_ops;
+static const struct dvb_frontend_ops lgs8gl5_ops;
struct dvb_frontend*
@@ -412,7 +412,7 @@ error:
EXPORT_SYMBOL(lgs8gl5_attach);
-static struct dvb_frontend_ops lgs8gl5_ops = {
+static const struct dvb_frontend_ops lgs8gl5_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS-8GL5 DMB-TH",
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 919daeb96747..6d2e62469d58 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -985,7 +985,7 @@ static int lgs8gxx_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
return lgs8gxx_write_reg(priv, 0x01, 0);
}
-static struct dvb_frontend_ops lgs8gxx_ops = {
+static const struct dvb_frontend_ops lgs8gxx_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS8913/LGS8GXX DMB-TH",
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index e0fe5bc9dbce..50bce68ffd66 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -16,7 +16,7 @@
#include "m88ds3103_priv.h"
-static struct dvb_frontend_ops m88ds3103_ops;
+static const struct dvb_frontend_ops m88ds3103_ops;
/* write single register with mask */
static int m88ds3103_update_bits(struct m88ds3103_dev *dev,
@@ -1295,7 +1295,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
}
EXPORT_SYMBOL(m88ds3103_attach);
-static struct dvb_frontend_ops m88ds3103_ops = {
+static const struct dvb_frontend_ops m88ds3103_ops = {
.delsys = {SYS_DVBS, SYS_DVBS2},
.info = {
.name = "Montage Technology M88DS3103",
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index ef79a4ec31e2..ce6c21d405ee 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -75,8 +75,8 @@ static int m88rs2000_writereg(struct m88rs2000_state *state,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- deb_info("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ deb_info("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -618,10 +618,9 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
state->no_lock_count = 0;
if (c->delivery_system != SYS_DVBS) {
- deb_info("%s: unsupported delivery "
- "system selected (%d)\n",
- __func__, c->delivery_system);
- return -EOPNOTSUPP;
+ deb_info("%s: unsupported delivery system selected (%d)\n",
+ __func__, c->delivery_system);
+ return -EOPNOTSUPP;
}
/* Set Tuner */
@@ -753,7 +752,7 @@ static void m88rs2000_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops m88rs2000_ops = {
+static const struct dvb_frontend_ops m88rs2000_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "M88RS2000 DVB-S",
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 79bc671e8769..9bb122c39c1b 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1816,7 +1816,7 @@ static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_CUSTOM;
}
-static struct dvb_frontend_ops mb86a16_ops = {
+static const struct dvb_frontend_ops mb86a16_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Fujitsu MB86A16 DVB-S",
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index fe79358b035e..e8ac8c3e2ec0 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -1967,6 +1967,7 @@ static int mb86a20s_read_status_and_stats(struct dvb_frontend *fe,
if (status_nr < 0) {
dev_err(&state->i2c->dev,
"%s: Can't read frontend lock status\n", __func__);
+ rc = status_nr;
goto error;
}
@@ -2059,7 +2060,7 @@ static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_HW;
}
-static struct dvb_frontend_ops mb86a20s_ops;
+static const struct dvb_frontend_ops mb86a20s_ops;
struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
struct i2c_adapter *i2c)
@@ -2107,7 +2108,7 @@ error:
}
EXPORT_SYMBOL(mb86a20s_attach);
-static struct dvb_frontend_ops mb86a20s_ops = {
+static const struct dvb_frontend_ops mb86a20s_ops = {
.delsys = { SYS_ISDBT },
/* Use dib8000 values per default */
.info = {
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 18fb2df1e2bd..29dd13b36c28 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -411,7 +411,7 @@ err:
return ret;
}
-static struct dvb_frontend_ops mn88472_ops = {
+static const struct dvb_frontend_ops mn88472_ops = {
.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
.info = {
.name = "Panasonic MN88472",
@@ -488,18 +488,6 @@ static int mn88472_probe(struct i2c_client *client,
goto err_kfree;
}
- /* Check demod answers with correct chip id */
- ret = regmap_read(dev->regmap[0], 0xff, &utmp);
- if (ret)
- goto err_regmap_0_regmap_exit;
-
- dev_dbg(&client->dev, "chip id=%02x\n", utmp);
-
- if (utmp != 0x02) {
- ret = -ENODEV;
- goto err_regmap_0_regmap_exit;
- }
-
/*
* Chip has three I2C addresses for different register banks. Used
* addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
@@ -536,6 +524,18 @@ static int mn88472_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[2], dev);
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(dev->regmap[2], 0xff, &utmp);
+ if (ret)
+ goto err_regmap_2_regmap_exit;
+
+ dev_dbg(&client->dev, "chip id=%02x\n", utmp);
+
+ if (utmp != 0x02) {
+ ret = -ENODEV;
+ goto err_regmap_2_regmap_exit;
+ }
+
/* Sleep because chip is active by default */
ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
if (ret)
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 451974a1d7ed..c221c7d2ac3e 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -239,62 +239,68 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct i2c_client *client = fe->demodulator_priv;
struct mn88473_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret;
- unsigned int uitmp;
+ int ret, i, stmp;
+ unsigned int utmp, utmp1, utmp2;
+ u8 buf[5];
if (!dev->active) {
ret = -EAGAIN;
goto err;
}
- *status = 0;
-
+ /* Lock detection */
switch (c->delivery_system) {
case SYS_DVBT:
- ret = regmap_read(dev->regmap[0], 0x62, &uitmp);
+ ret = regmap_read(dev->regmap[0], 0x62, &utmp);
if (ret)
goto err;
- if (!(uitmp & 0xa0)) {
- if ((uitmp & 0x0f) >= 0x09)
+ if (!(utmp & 0xa0)) {
+ if ((utmp & 0x0f) >= 0x09)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
- else if ((uitmp & 0x0f) >= 0x03)
+ else if ((utmp & 0x0f) >= 0x03)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ } else {
+ *status = 0;
}
break;
case SYS_DVBT2:
- ret = regmap_read(dev->regmap[2], 0x8b, &uitmp);
+ ret = regmap_read(dev->regmap[2], 0x8b, &utmp);
if (ret)
goto err;
- if (!(uitmp & 0x40)) {
- if ((uitmp & 0x0f) >= 0x0d)
+ if (!(utmp & 0x40)) {
+ if ((utmp & 0x0f) >= 0x0d)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
- else if ((uitmp & 0x0f) >= 0x0a)
+ else if ((utmp & 0x0f) >= 0x0a)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
- else if ((uitmp & 0x0f) >= 0x07)
+ else if ((utmp & 0x0f) >= 0x07)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ } else {
+ *status = 0;
}
break;
case SYS_DVBC_ANNEX_A:
- ret = regmap_read(dev->regmap[1], 0x85, &uitmp);
+ ret = regmap_read(dev->regmap[1], 0x85, &utmp);
if (ret)
goto err;
- if (!(uitmp & 0x40)) {
- ret = regmap_read(dev->regmap[1], 0x89, &uitmp);
+ if (!(utmp & 0x40)) {
+ ret = regmap_read(dev->regmap[1], 0x89, &utmp);
if (ret)
goto err;
- if (uitmp & 0x01)
+ if (utmp & 0x01)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC |
- FE_HAS_LOCK;
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ } else {
+ *status = 0;
}
break;
default:
@@ -302,6 +308,148 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto err;
}
+ /* Signal strength */
+ if (*status & FE_HAS_SIGNAL) {
+ for (i = 0; i < 2; i++) {
+ ret = regmap_bulk_read(dev->regmap[2], 0x86 + i,
+ &buf[i], 1);
+ if (ret)
+ goto err;
+ }
+
+ /* AGCRD[15:6] gives us a 10bit value ([5:0] are always 0) */
+ utmp1 = buf[0] << 8 | buf[1] << 0 | buf[0] >> 2;
+ dev_dbg(&client->dev, "strength=%u\n", utmp1);
+
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = utmp1;
+ } else {
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* CNR */
+ if (*status & FE_HAS_VITERBI && c->delivery_system == SYS_DVBT) {
+ /* DVB-T CNR */
+ ret = regmap_bulk_read(dev->regmap[0], 0x8f, buf, 2);
+ if (ret)
+ goto err;
+
+ utmp = buf[0] << 8 | buf[1] << 0;
+ if (utmp) {
+ /* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+ /* log10(65536) = 80807124, 0.2 = 3355443 */
+ stmp = div_u64(((u64)80807124 - intlog10(utmp)
+ + 3355443) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d value=%u\n", stmp, utmp);
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else if (*status & FE_HAS_VITERBI &&
+ c->delivery_system == SYS_DVBT2) {
+ /* DVB-T2 CNR */
+ for (i = 0; i < 3; i++) {
+ ret = regmap_bulk_read(dev->regmap[2], 0xb7 + i,
+ &buf[i], 1);
+ if (ret)
+ goto err;
+ }
+
+ utmp = buf[1] << 8 | buf[2] << 0;
+ utmp1 = (buf[0] >> 2) & 0x01; /* 0=SISO, 1=MISO */
+ if (utmp) {
+ if (utmp1) {
+ /* CNR[dB]: 10 * (log10(16384 / value) - 0.6) */
+ /* log10(16384) = 70706234, 0.6 = 10066330 */
+ stmp = div_u64(((u64)70706234 - intlog10(utmp)
+ - 10066330) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d value=%u MISO\n",
+ stmp, utmp);
+ } else {
+ /* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+ /* log10(65536) = 80807124, 0.2 = 3355443 */
+ stmp = div_u64(((u64)80807124 - intlog10(utmp)
+ + 3355443) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d value=%u SISO\n",
+ stmp, utmp);
+ }
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else if (*status & FE_HAS_VITERBI &&
+ c->delivery_system == SYS_DVBC_ANNEX_A) {
+ /* DVB-C CNR */
+ ret = regmap_bulk_read(dev->regmap[1], 0xa1, buf, 4);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 8 | buf[1] << 0; /* signal */
+ utmp2 = buf[2] << 8 | buf[3] << 0; /* noise */
+ if (utmp1 && utmp2) {
+ /* CNR[dB]: 10 * log10(8 * (signal / noise)) */
+ /* log10(8) = 15151336 */
+ stmp = div_u64(((u64)15151336 + intlog10(utmp1)
+ - intlog10(utmp2)) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d signal=%u noise=%u\n",
+ stmp, utmp1, utmp2);
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else {
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* BER */
+ if (*status & FE_HAS_LOCK && (c->delivery_system == SYS_DVBT ||
+ c->delivery_system == SYS_DVBC_ANNEX_A)) {
+ /* DVB-T & DVB-C BER */
+ ret = regmap_bulk_read(dev->regmap[0], 0x92, buf, 5);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 16 | buf[1] << 8 | buf[2] << 0;
+ utmp2 = buf[3] << 8 | buf[4] << 0;
+ utmp2 = utmp2 * 8 * 204;
+ dev_dbg(&client->dev, "post_bit_error=%u post_bit_count=%u\n",
+ utmp1, utmp2);
+
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue += utmp1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue += utmp2;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* PER */
+ if (*status & FE_HAS_LOCK) {
+ ret = regmap_bulk_read(dev->regmap[0], 0xdd, buf, 4);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 8 | buf[1] << 0;
+ utmp2 = buf[2] << 8 | buf[3] << 0;
+ dev_dbg(&client->dev, "block_error=%u block_count=%u\n",
+ utmp1, utmp2);
+
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue += utmp1;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue += utmp2;
+ } else {
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -312,6 +460,7 @@ static int mn88473_init(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->demodulator_priv;
struct mn88473_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, len, remain;
unsigned int uitmp;
const struct firmware *fw;
@@ -378,6 +527,20 @@ warm:
dev->active = true;
+ /* init stats here to indicate which stats are supported */
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.len = 1;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
return 0;
err_release_firmware:
release_firmware(fw);
@@ -485,18 +648,6 @@ static int mn88473_probe(struct i2c_client *client,
goto err_kfree;
}
- /* Check demod answers with correct chip id */
- ret = regmap_read(dev->regmap[0], 0xff, &uitmp);
- if (ret)
- goto err_regmap_0_regmap_exit;
-
- dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
-
- if (uitmp != 0x03) {
- ret = -ENODEV;
- goto err_regmap_0_regmap_exit;
- }
-
/*
* Chip has three I2C addresses for different register banks. Used
* addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
@@ -533,6 +684,18 @@ static int mn88473_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[2], dev);
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(dev->regmap[2], 0xff, &uitmp);
+ if (ret)
+ goto err_regmap_2_regmap_exit;
+
+ dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
+
+ if (uitmp != 0x03) {
+ ret = -ENODEV;
+ goto err_regmap_2_regmap_exit;
+ }
+
/* Sleep because chip is active by default */
ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
if (ret)
diff --git a/drivers/media/dvb-frontends/mn88473_priv.h b/drivers/media/dvb-frontends/mn88473_priv.h
index e6c65893e451..5fc463d147c8 100644
--- a/drivers/media/dvb-frontends/mn88473_priv.h
+++ b/drivers/media/dvb-frontends/mn88473_priv.h
@@ -18,7 +18,9 @@
#define MN88473_PRIV_H
#include "dvb_frontend.h"
+#include "dvb_math.h"
#include "mn88473.h"
+#include <linux/math64.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index fc08429c99b7..961b9a2508e0 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -457,8 +457,8 @@ static int mt312_read_status(struct dvb_frontend *fe, enum fe_status *s)
if (ret < 0)
return ret;
- dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x,"
- " FEC_STATUS: 0x%02x\n", status[0], status[1], status[2]);
+ dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x, FEC_STATUS: 0x%02x\n",
+ status[0], status[1], status[2]);
if (status[0] & 0xc0)
*s |= FE_HAS_SIGNAL; /* signal noise ratio */
@@ -748,7 +748,7 @@ static void mt312_release(struct dvb_frontend *fe)
}
#define MT312_SYS_CLK 90000000UL /* 90 MHz */
-static struct dvb_frontend_ops mt312_ops = {
+static const struct dvb_frontend_ops mt312_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Zarlink ???? DVB-S",
@@ -827,8 +827,7 @@ struct dvb_frontend *mt312_attach(const struct mt312_config *config,
state->freq_mult = 9;
break;
default:
- printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313"
- " are supported chips.\n");
+ printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313 are supported chips.\n");
goto error;
}
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index c0bb6328956b..48ea0408f02a 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -538,7 +538,7 @@ static void mt352_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops mt352_ops;
+static const struct dvb_frontend_ops mt352_ops;
struct dvb_frontend* mt352_attach(const struct mt352_config* config,
struct i2c_adapter* i2c)
@@ -566,7 +566,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops mt352_ops = {
+static const struct dvb_frontend_ops mt352_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink MT352 DVB-T",
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 79c3040912ab..2fe40372ca07 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -289,8 +289,7 @@ static void nxt200x_microcontroller_stop (struct nxt200x_state* state)
counter++;
}
- pr_warn("Timeout waiting for nxt200x to stop. This is ok after "
- "firmware upload.\n");
+ pr_warn("Timeout waiting for nxt200x to stop. This is ok after firmware upload.\n");
return;
}
@@ -893,8 +892,8 @@ static int nxt2002_init(struct dvb_frontend* fe)
state->i2c->dev.parent);
pr_debug("%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
- pr_err("%s: No firmware uploaded (timeout or file not found?)"
- "\n", __func__);
+ pr_err("%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -960,8 +959,8 @@ static int nxt2004_init(struct dvb_frontend* fe)
state->i2c->dev.parent);
pr_debug("%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
- pr_err("%s: No firmware uploaded (timeout or file not found?)"
- "\n", __func__);
+ pr_err("%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -1150,7 +1149,7 @@ static void nxt200x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops nxt200x_ops;
+static const struct dvb_frontend_ops nxt200x_ops;
struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
struct i2c_adapter* i2c)
@@ -1213,7 +1212,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops nxt200x_ops = {
+static const struct dvb_frontend_ops nxt200x_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Nextwave NXT200X VSB/QAM frontend",
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 73f9505367ac..1ce5ea28489b 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -19,6 +19,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -39,7 +41,11 @@ struct nxt6000_state {
};
static int debug;
-#define dprintk if (debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static int nxt6000_writereg(struct nxt6000_state* state, u8 reg, u8 data)
{
@@ -215,119 +221,129 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
{
u8 val;
-/*
- printk("RS_COR_STAT: 0x%02X\n", nxt6000_readreg(fe, RS_COR_STAT));
- printk("VIT_SYNC_STATUS: 0x%02X\n", nxt6000_readreg(fe, VIT_SYNC_STATUS));
- printk("OFDM_COR_STAT: 0x%02X\n", nxt6000_readreg(fe, OFDM_COR_STAT));
- printk("OFDM_SYR_STAT: 0x%02X\n", nxt6000_readreg(fe, OFDM_SYR_STAT));
- printk("OFDM_TPS_RCVD_1: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_1));
- printk("OFDM_TPS_RCVD_2: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_2));
- printk("OFDM_TPS_RCVD_3: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_3));
- printk("OFDM_TPS_RCVD_4: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_4));
- printk("OFDM_TPS_RESERVED_1: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RESERVED_1));
- printk("OFDM_TPS_RESERVED_2: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RESERVED_2));
-*/
- printk("NXT6000 status:");
+#if 0
+ pr_info("RS_COR_STAT: 0x%02X\n",
+ nxt6000_readreg(fe, RS_COR_STAT));
+ pr_info("VIT_SYNC_STATUS: 0x%02X\n",
+ nxt6000_readreg(fe, VIT_SYNC_STATUS));
+ pr_info("OFDM_COR_STAT: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_COR_STAT));
+ pr_info("OFDM_SYR_STAT: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_SYR_STAT));
+ pr_info("OFDM_TPS_RCVD_1: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_1));
+ pr_info("OFDM_TPS_RCVD_2: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_2));
+ pr_info("OFDM_TPS_RCVD_3: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_3));
+ pr_info("OFDM_TPS_RCVD_4: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_4));
+ pr_info("OFDM_TPS_RESERVED_1: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RESERVED_1));
+ pr_info("OFDM_TPS_RESERVED_2: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RESERVED_2));
+#endif
+ pr_info("NXT6000 status:");
val = nxt6000_readreg(state, RS_COR_STAT);
- printk(" DATA DESCR LOCK: %d,", val & 0x01);
- printk(" DATA SYNC LOCK: %d,", (val >> 1) & 0x01);
+ pr_cont(" DATA DESCR LOCK: %d,", val & 0x01);
+ pr_cont(" DATA SYNC LOCK: %d,", (val >> 1) & 0x01);
val = nxt6000_readreg(state, VIT_SYNC_STATUS);
- printk(" VITERBI LOCK: %d,", (val >> 7) & 0x01);
+ pr_cont(" VITERBI LOCK: %d,", (val >> 7) & 0x01);
switch ((val >> 4) & 0x07) {
case 0x00:
- printk(" VITERBI CODERATE: 1/2,");
+ pr_cont(" VITERBI CODERATE: 1/2,");
break;
case 0x01:
- printk(" VITERBI CODERATE: 2/3,");
+ pr_cont(" VITERBI CODERATE: 2/3,");
break;
case 0x02:
- printk(" VITERBI CODERATE: 3/4,");
+ pr_cont(" VITERBI CODERATE: 3/4,");
break;
case 0x03:
- printk(" VITERBI CODERATE: 5/6,");
+ pr_cont(" VITERBI CODERATE: 5/6,");
break;
case 0x04:
- printk(" VITERBI CODERATE: 7/8,");
+ pr_cont(" VITERBI CODERATE: 7/8,");
break;
default:
- printk(" VITERBI CODERATE: Reserved,");
+ pr_cont(" VITERBI CODERATE: Reserved,");
}
val = nxt6000_readreg(state, OFDM_COR_STAT);
- printk(" CHCTrack: %d,", (val >> 7) & 0x01);
- printk(" TPSLock: %d,", (val >> 6) & 0x01);
- printk(" SYRLock: %d,", (val >> 5) & 0x01);
- printk(" AGCLock: %d,", (val >> 4) & 0x01);
+ pr_cont(" CHCTrack: %d,", (val >> 7) & 0x01);
+ pr_cont(" TPSLock: %d,", (val >> 6) & 0x01);
+ pr_cont(" SYRLock: %d,", (val >> 5) & 0x01);
+ pr_cont(" AGCLock: %d,", (val >> 4) & 0x01);
switch (val & 0x0F) {
case 0x00:
- printk(" CoreState: IDLE,");
+ pr_cont(" CoreState: IDLE,");
break;
case 0x02:
- printk(" CoreState: WAIT_AGC,");
+ pr_cont(" CoreState: WAIT_AGC,");
break;
case 0x03:
- printk(" CoreState: WAIT_SYR,");
+ pr_cont(" CoreState: WAIT_SYR,");
break;
case 0x04:
- printk(" CoreState: WAIT_PPM,");
+ pr_cont(" CoreState: WAIT_PPM,");
break;
case 0x01:
- printk(" CoreState: WAIT_TRL,");
+ pr_cont(" CoreState: WAIT_TRL,");
break;
case 0x05:
- printk(" CoreState: WAIT_TPS,");
+ pr_cont(" CoreState: WAIT_TPS,");
break;
case 0x06:
- printk(" CoreState: MONITOR_TPS,");
+ pr_cont(" CoreState: MONITOR_TPS,");
break;
default:
- printk(" CoreState: Reserved,");
+ pr_cont(" CoreState: Reserved,");
}
val = nxt6000_readreg(state, OFDM_SYR_STAT);
- printk(" SYRLock: %d,", (val >> 4) & 0x01);
- printk(" SYRMode: %s,", (val >> 2) & 0x01 ? "8K" : "2K");
+ pr_cont(" SYRLock: %d,", (val >> 4) & 0x01);
+ pr_cont(" SYRMode: %s,", (val >> 2) & 0x01 ? "8K" : "2K");
switch ((val >> 4) & 0x03) {
case 0x00:
- printk(" SYRGuard: 1/32,");
+ pr_cont(" SYRGuard: 1/32,");
break;
case 0x01:
- printk(" SYRGuard: 1/16,");
+ pr_cont(" SYRGuard: 1/16,");
break;
case 0x02:
- printk(" SYRGuard: 1/8,");
+ pr_cont(" SYRGuard: 1/8,");
break;
case 0x03:
- printk(" SYRGuard: 1/4,");
+ pr_cont(" SYRGuard: 1/4,");
break;
}
@@ -336,77 +352,77 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
switch ((val >> 4) & 0x07) {
case 0x00:
- printk(" TPSLP: 1/2,");
+ pr_cont(" TPSLP: 1/2,");
break;
case 0x01:
- printk(" TPSLP: 2/3,");
+ pr_cont(" TPSLP: 2/3,");
break;
case 0x02:
- printk(" TPSLP: 3/4,");
+ pr_cont(" TPSLP: 3/4,");
break;
case 0x03:
- printk(" TPSLP: 5/6,");
+ pr_cont(" TPSLP: 5/6,");
break;
case 0x04:
- printk(" TPSLP: 7/8,");
+ pr_cont(" TPSLP: 7/8,");
break;
default:
- printk(" TPSLP: Reserved,");
+ pr_cont(" TPSLP: Reserved,");
}
switch (val & 0x07) {
case 0x00:
- printk(" TPSHP: 1/2,");
+ pr_cont(" TPSHP: 1/2,");
break;
case 0x01:
- printk(" TPSHP: 2/3,");
+ pr_cont(" TPSHP: 2/3,");
break;
case 0x02:
- printk(" TPSHP: 3/4,");
+ pr_cont(" TPSHP: 3/4,");
break;
case 0x03:
- printk(" TPSHP: 5/6,");
+ pr_cont(" TPSHP: 5/6,");
break;
case 0x04:
- printk(" TPSHP: 7/8,");
+ pr_cont(" TPSHP: 7/8,");
break;
default:
- printk(" TPSHP: Reserved,");
+ pr_cont(" TPSHP: Reserved,");
}
val = nxt6000_readreg(state, OFDM_TPS_RCVD_4);
- printk(" TPSMode: %s,", val & 0x01 ? "8K" : "2K");
+ pr_cont(" TPSMode: %s,", val & 0x01 ? "8K" : "2K");
switch ((val >> 4) & 0x03) {
case 0x00:
- printk(" TPSGuard: 1/32,");
+ pr_cont(" TPSGuard: 1/32,");
break;
case 0x01:
- printk(" TPSGuard: 1/16,");
+ pr_cont(" TPSGuard: 1/16,");
break;
case 0x02:
- printk(" TPSGuard: 1/8,");
+ pr_cont(" TPSGuard: 1/8,");
break;
case 0x03:
- printk(" TPSGuard: 1/4,");
+ pr_cont(" TPSGuard: 1/4,");
break;
}
@@ -416,8 +432,8 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
val = nxt6000_readreg(state, RF_AGC_STATUS);
val = nxt6000_readreg(state, RF_AGC_STATUS);
- printk(" RF AGC LOCK: %d,", (val >> 4) & 0x01);
- printk("\n");
+ pr_cont(" RF AGC LOCK: %d,", (val >> 4) & 0x01);
+ pr_cont("\n");
}
static int nxt6000_read_status(struct dvb_frontend *fe, enum fe_status *status)
@@ -548,7 +564,7 @@ static int nxt6000_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
}
}
-static struct dvb_frontend_ops nxt6000_ops;
+static const struct dvb_frontend_ops nxt6000_ops;
struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
struct i2c_adapter* i2c)
@@ -576,7 +592,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops nxt6000_ops = {
+static const struct dvb_frontend_ops nxt6000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NxtWave NXT6000 DVB-T",
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index a165af990672..17bdadd7d0e1 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -342,15 +342,13 @@ static int or51132_set_parameters(struct dvb_frontend *fe)
fwname);
ret = request_firmware(&fw, fwname, state->i2c->dev.parent);
if (ret) {
- printk(KERN_WARNING "or51132: No firmware up"
- "loaded(timeout or file not found?)\n");
+ printk(KERN_WARNING "or51132: No firmware uploaded(timeout or file not found?)\n");
return ret;
}
ret = or51132_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
- printk(KERN_WARNING "or51132: Writing firmware to "
- "device failed!\n");
+ printk(KERN_WARNING "or51132: Writing firmware to device failed!\n");
return ret;
}
printk("or51132: Firmware upload complete.\n");
@@ -561,7 +559,7 @@ static void or51132_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops or51132_ops;
+static const struct dvb_frontend_ops or51132_ops;
struct dvb_frontend* or51132_attach(const struct or51132_config* config,
struct i2c_adapter* i2c)
@@ -585,7 +583,7 @@ struct dvb_frontend* or51132_attach(const struct or51132_config* config,
return &state->frontend;
}
-static struct dvb_frontend_ops or51132_ops = {
+static const struct dvb_frontend_ops or51132_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51132 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index e82413b975e6..27eb73aa4f62 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -377,8 +377,7 @@ static int or51211_init(struct dvb_frontend* fe)
OR51211_DEFAULT_FIRMWARE);
pr_info("Got Hotplug firmware\n");
if (ret) {
- pr_warn("No firmware uploaded "
- "(timeout or file not found?)\n");
+ pr_warn("No firmware uploaded (timeout or file not found?)\n");
return ret;
}
@@ -508,7 +507,7 @@ static void or51211_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops or51211_ops;
+static const struct dvb_frontend_ops or51211_ops;
struct dvb_frontend* or51211_attach(const struct or51211_config* config,
struct i2c_adapter* i2c)
@@ -532,7 +531,7 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config,
return &state->frontend;
}
-static struct dvb_frontend_ops or51211_ops = {
+static const struct dvb_frontend_ops or51211_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51211 VSB Frontend",
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 87226056f226..7bbfe11d11ed 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -548,7 +548,7 @@ static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
return 0;
}
-static struct dvb_frontend_ops rtl2830_ops = {
+static const struct dvb_frontend_ops rtl2830_ops = {
.delsys = {SYS_DVBT},
.info = {
.name = "Realtek RTL2830 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 0ced01f1012e..94bf5b7d6f3f 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -837,7 +837,7 @@ static int rtl2832_deselect(struct i2c_mux_core *muxc, u32 chan_id)
return 0;
}
-static struct dvb_frontend_ops rtl2832_ops = {
+static const struct dvb_frontend_ops rtl2832_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Realtek RTL2832 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index c68965ad97c0..f370c6df0a8b 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -321,8 +321,8 @@ static int s5h1409_writereg(struct s5h1409_state *state, u8 reg, u16 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -949,7 +949,7 @@ static void s5h1409_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s5h1409_ops;
+static const struct dvb_frontend_ops s5h1409_ops;
struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
struct i2c_adapter *i2c)
@@ -995,7 +995,7 @@ error:
}
EXPORT_SYMBOL(s5h1409_attach);
-static struct dvb_frontend_ops s5h1409_ops = {
+static const struct dvb_frontend_ops s5h1409_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1409 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 90f86e82b087..f29750a96196 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -350,8 +350,8 @@ static int s5h1411_writereg(struct s5h1411_state *state,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
- "ret == %i)\n", __func__, addr, reg, data, ret);
+ printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, ret == %i)\n",
+ __func__, addr, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -864,7 +864,7 @@ static void s5h1411_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s5h1411_ops;
+static const struct dvb_frontend_ops s5h1411_ops;
struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
struct i2c_adapter *i2c)
@@ -914,7 +914,7 @@ error:
}
EXPORT_SYMBOL(s5h1411_attach);
-static struct dvb_frontend_ops s5h1411_ops = {
+static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1411 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index d7d0b7d57ad7..f9a18fe94d88 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -880,7 +880,7 @@ struct i2c_adapter *s5h1420_get_tuner_i2c_adapter(struct dvb_frontend *fe)
}
EXPORT_SYMBOL(s5h1420_get_tuner_i2c_adapter);
-static struct dvb_frontend_ops s5h1420_ops;
+static const struct dvb_frontend_ops s5h1420_ops;
struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
struct i2c_adapter *i2c)
@@ -934,7 +934,7 @@ error:
}
EXPORT_SYMBOL(s5h1420_attach);
-static struct dvb_frontend_ops s5h1420_ops = {
+static const struct dvb_frontend_ops s5h1420_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Samsung S5H1420/PnpNetwork PN1010 DVB-S",
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 4215652f8eb7..a32fd9bc51a9 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -63,8 +63,8 @@ static int s5h1432_writereg(struct s5h1432_state *state,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
- "ret == %i)\n", __func__, addr, reg, data, ret);
+ printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, ret == %i)\n",
+ __func__, addr, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -341,7 +341,7 @@ static void s5h1432_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s5h1432_ops;
+static const struct dvb_frontend_ops s5h1432_ops;
struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
struct i2c_adapter *i2c)
@@ -370,7 +370,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
}
EXPORT_SYMBOL(s5h1432_attach);
-static struct dvb_frontend_ops s5h1432_ops = {
+static const struct dvb_frontend_ops s5h1432_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Samsung s5h1432 DVB-T Frontend",
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index b5e3d90eba5e..274544a3ae0e 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -214,8 +214,8 @@ static int s921_i2c_writereg(struct s921_state *state,
rc = i2c_transfer(state->i2c, &msg, 1);
if (rc != 1) {
- printk("%s: writereg rcor(rc == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, rc, reg, data);
+ printk("%s: writereg rcor(rc == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, rc, reg, data);
return rc;
}
@@ -477,7 +477,7 @@ static void s921_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s921_ops;
+static const struct dvb_frontend_ops s921_ops;
struct dvb_frontend *s921_attach(const struct s921_config *config,
struct i2c_adapter *i2c)
@@ -505,7 +505,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
}
EXPORT_SYMBOL(s921_attach);
-static struct dvb_frontend_ops s921_ops = {
+static const struct dvb_frontend_ops s921_ops = {
.delsys = { SYS_ISDBT },
/* Use dib8000 values per default */
.info = {
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 78669ea68c61..528b82a5dd46 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -978,7 +978,7 @@ static int si2165_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops si2165_ops = {
+static const struct dvb_frontend_ops si2165_ops = {
.info = {
.name = "Silicon Labs ",
/* For DVB-C */
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 62ad7a7be9f8..4e8c3ac4303f 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -245,8 +245,8 @@ static int si21_writeregs(struct si21xx_state *state, u8 reg1,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, "
- "ret == %i)\n", __func__, reg1, data[0], ret);
+ dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, ret == %i)\n",
+ __func__, reg1, data[0], ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -265,8 +265,8 @@ static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -866,7 +866,7 @@ static void si21xx_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops si21xx_ops = {
+static const struct dvb_frontend_ops si21xx_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "SL SI21XX DVB-S",
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index e87ac30d7fb8..04454cb78467 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -551,7 +551,7 @@ static void sp8870_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops sp8870_ops;
+static const struct dvb_frontend_ops sp8870_ops;
struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct i2c_adapter* i2c)
@@ -580,7 +580,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops sp8870_ops = {
+static const struct dvb_frontend_ops sp8870_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP8870 DVB-T",
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 4378fe1b978e..7c511c3cd4ca 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -63,8 +63,7 @@ static int sp887x_writereg (struct sp887x_state* state, u16 reg, u16 data)
if (!(reg == 0xf1a && data == 0x000 &&
(ret == -EREMOTEIO || ret == -EFAULT)))
{
- printk("%s: writereg error "
- "(reg %03x, data %03x, ret == %i)\n",
+ printk("%s: writereg error (reg %03x, data %03x, ret == %i)\n",
__func__, reg & 0xffff, data & 0xffff, ret);
return ret;
}
@@ -562,7 +561,7 @@ static void sp887x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops sp887x_ops;
+static const struct dvb_frontend_ops sp887x_ops;
struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
struct i2c_adapter* i2c)
@@ -591,7 +590,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops sp887x_ops = {
+static const struct dvb_frontend_ops sp887x_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP887x DVB-T",
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 3d171b0e00c2..02347598277a 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -485,15 +485,8 @@ int stb0899_read_regs(struct stb0899_state *state, unsigned int reg, u8 *buf, u3
(((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
_stb0899_read_reg(state, (reg | 0x00ff));
- if (unlikely(*state->verbose >= FE_DEBUGREG)) {
- int i;
-
- printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
- for (i = 0; i < count; i++) {
- printk(" %02x", buf[i]);
- }
- printk("\n");
- }
+ dprintk(state->verbose, FE_DEBUGREG, 1,
+ "%s [0x%04x]: %*ph", __func__, reg, count, buf);
return 0;
err:
@@ -522,14 +515,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
- if (unlikely(*state->verbose >= FE_DEBUGREG)) {
- int i;
-
- printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
- for (i = 0; i < count; i++)
- printk(" %02x", data[i]);
- printk("\n");
- }
+ dprintk(state->verbose, FE_DEBUGREG, 1,
+ "%s [0x%04x]: %*ph", __func__, reg, count, data);
ret = i2c_transfer(state->i2c, &i2c_msg, 1);
/*
@@ -614,13 +601,19 @@ static int stb0899_postproc(struct stb0899_state *state, u8 ctl, int enable)
return 0;
}
-static void stb0899_release(struct dvb_frontend *fe)
+static void stb0899_detach(struct dvb_frontend *fe)
{
struct stb0899_state *state = fe->demodulator_priv;
- dprintk(state->verbose, FE_DEBUG, 1, "Release Frontend");
/* post process event */
stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
+}
+
+static void stb0899_release(struct dvb_frontend *fe)
+{
+ struct stb0899_state *state = fe->demodulator_priv;
+
+ dprintk(state->verbose, FE_DEBUG, 1, "Release Frontend");
kfree(state);
}
@@ -1586,7 +1579,7 @@ static enum dvbfe_algo stb0899_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_CUSTOM;
}
-static struct dvb_frontend_ops stb0899_ops = {
+static const struct dvb_frontend_ops stb0899_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STB0899 Multistandard",
@@ -1603,6 +1596,7 @@ static struct dvb_frontend_ops stb0899_ops = {
FE_CAN_QPSK
},
+ .detach = stb0899_detach,
.release = stb0899_release,
.init = stb0899_init,
.sleep = stb0899_sleep,
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 73347d51f340..69c03892f2da 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -41,11 +41,10 @@ struct stb6000_priv {
u32 frequency;
};
-static int stb6000_release(struct dvb_frontend *fe)
+static void stb6000_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int stb6000_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 5add1182c3ca..17a955d0031b 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -61,7 +61,7 @@ struct stb6100_lkup {
u8 reg;
};
-static int stb6100_release(struct dvb_frontend *fe);
+static void stb6100_release(struct dvb_frontend *fe);
static const struct stb6100_lkup lkup[] = {
{ 0, 950000, 0x0a },
@@ -560,14 +560,12 @@ struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
return fe;
}
-static int stb6100_release(struct dvb_frontend *fe)
+static void stb6100_release(struct dvb_frontend *fe)
{
struct stb6100_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
-
- return 0;
}
EXPORT_SYMBOL(stb6100_attach);
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index c93d9a45f7f7..45cbc898ad25 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -74,8 +74,8 @@ static int stv0288_writeregI(struct stv0288_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -465,10 +465,9 @@ static int stv0288_set_frontend(struct dvb_frontend *fe)
dprintk("%s : FE_SET_FRONTEND\n", __func__);
if (c->delivery_system != SYS_DVBS) {
- dprintk("%s: unsupported delivery "
- "system selected (%d)\n",
- __func__, c->delivery_system);
- return -EOPNOTSUPP;
+ dprintk("%s: unsupported delivery system selected (%d)\n",
+ __func__, c->delivery_system);
+ return -EOPNOTSUPP;
}
if (state->config->set_ts_params)
@@ -536,7 +535,7 @@ static void stv0288_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0288_ops = {
+static const struct dvb_frontend_ops stv0288_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0288 DVB-S",
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index 81b27b7c0c96..db94d4d109f9 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -57,8 +57,8 @@ static int stv0297_writereg(struct stv0297_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -658,7 +658,7 @@ static void stv0297_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0297_ops;
+static const struct dvb_frontend_ops stv0297_ops;
struct dvb_frontend *stv0297_attach(const struct stv0297_config *config,
struct i2c_adapter *i2c)
@@ -690,7 +690,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops stv0297_ops = {
+static const struct dvb_frontend_ops stv0297_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0297 DVB-C",
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 7927fa925f2f..b36b21a13201 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -88,8 +88,8 @@ static int stv0299_writeregI (struct stv0299_state* state, u8 reg, u8 data)
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -673,7 +673,7 @@ static void stv0299_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0299_ops;
+static const struct dvb_frontend_ops stv0299_ops;
struct dvb_frontend* stv0299_attach(const struct stv0299_config* config,
struct i2c_adapter* i2c)
@@ -713,7 +713,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops stv0299_ops = {
+static const struct dvb_frontend_ops stv0299_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0299 DVB-S",
@@ -761,8 +761,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
-MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, "
- "Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
+MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(stv0299_attach);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index abc379aea713..4ac1ce2831ba 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -2272,7 +2272,7 @@ static void stv0367_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0367ter_ops = {
+static const struct dvb_frontend_ops stv0367ter_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "ST STV0367 DVB-T",
@@ -3390,7 +3390,7 @@ static int stv0367cab_read_ucblcks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
};
-static struct dvb_frontend_ops stv0367cab_ops = {
+static const struct dvb_frontend_ops stv0367cab_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0367 DVB-C",
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index f667005a6661..43a0f69b4b14 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1875,7 +1875,7 @@ static int stv0900_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops stv0900_ops = {
+static const struct dvb_frontend_ops stv0900_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV0900 frontend",
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index fa63a9e929ce..bded82774f4b 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -1485,8 +1485,7 @@ static u32 stv0900_search_srate_coarse(struct dvb_frontend *fe)
current_step++;
direction *= -1;
- dprintk("lock: I2C_DEMOD_MODE_FIELD =0. Search started."
- " tuner freq=%d agc2=0x%x srate_coarse=%d tmg_cpt=%d\n",
+ dprintk("lock: I2C_DEMOD_MODE_FIELD =0. Search started. tuner freq=%d agc2=0x%x srate_coarse=%d tmg_cpt=%d\n",
tuner_freq, agc2_integr, coarse_srate, timingcpt);
if ((timingcpt >= 5) &&
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 25bdf6e0f963..7ef469c0c866 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -739,14 +739,8 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
- if (unlikely(*state->verbose >= FE_DEBUGREG)) {
- int i;
-
- printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
- for (i = 0; i < count; i++)
- printk(" %02x", data[i]);
- printk("\n");
- }
+ dprintk(FE_DEBUGREG, 1, "%s [0x%04x]: %*ph",
+ __func__, reg, count, data);
ret = i2c_transfer(state->i2c, &i2c_msg, 1);
if (ret != 1) {
@@ -3698,9 +3692,12 @@ static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
}
val /= 16;
last = ARRAY_SIZE(stv090x_s2cn_tab) - 1;
- div = stv090x_s2cn_tab[0].read -
- stv090x_s2cn_tab[last].read;
- *cnr = 0xFFFF - ((val * 0xFFFF) / div);
+ div = stv090x_s2cn_tab[last].real -
+ stv090x_s2cn_tab[3].real;
+ val = stv090x_table_lookup(stv090x_s2cn_tab, last, val);
+ if (val < 0)
+ val = 0;
+ *cnr = val * 0xFFFF / div;
}
break;
@@ -3720,9 +3717,10 @@ static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
}
val /= 16;
last = ARRAY_SIZE(stv090x_s1cn_tab) - 1;
- div = stv090x_s1cn_tab[0].read -
- stv090x_s1cn_tab[last].read;
- *cnr = 0xFFFF - ((val * 0xFFFF) / div);
+ div = stv090x_s1cn_tab[last].real -
+ stv090x_s1cn_tab[0].real;
+ val = stv090x_table_lookup(stv090x_s1cn_tab, last, val);
+ *cnr = val * 0xFFFF / div;
}
break;
default:
@@ -4886,7 +4884,7 @@ static int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir,
return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
}
-static struct dvb_frontend_ops stv090x_ops = {
+static const struct dvb_frontend_ops stv090x_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV090x Multistandard",
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 66a5a7f2295c..6a72d0be2ec5 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -59,11 +59,10 @@ static s32 abssub(s32 a, s32 b)
return b - a;
};
-static int stv6110_release(struct dvb_frontend *fe)
+static void stv6110_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[],
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index c611ad210b5c..66eba38f1014 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -335,14 +335,12 @@ static int stv6110x_get_status(struct dvb_frontend *fe, u32 *status)
}
-static int stv6110x_release(struct dvb_frontend *fe)
+static void stv6110x_release(struct dvb_frontend *fe)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(stv6110x);
-
- return 0;
}
static const struct dvb_tuner_ops stv6110x_ops = {
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 31cd32532387..4687e1546af2 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -656,7 +656,7 @@ tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
for (i = 0; i < num; i++)
if (msgs[i].flags & I2C_M_RD)
rd_num++;
- new_msgs = kmalloc(sizeof(*new_msgs) * (num + rd_num), GFP_KERNEL);
+ new_msgs = kmalloc_array(num + rd_num, sizeof(*new_msgs), GFP_KERNEL);
if (!new_msgs)
return -ENOMEM;
@@ -794,14 +794,13 @@ static int tc90522_probe(struct i2c_client *client,
i2c_set_adapdata(adap, state);
ret = i2c_add_adapter(adap);
if (ret < 0)
- goto err;
+ goto free_state;
cfg->tuner_i2c = state->cfg.tuner_i2c = adap;
i2c_set_clientdata(client, &state->cfg);
dev_info(&client->dev, "Toshiba TC90522 attached.\n");
return 0;
-
-err:
+free_state:
kfree(state);
return ret;
}
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index 806c56691ca5..32ba8401e743 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -77,8 +77,7 @@ static int _tda10021_writereg (struct tda10021_state* state, u8 reg, u8 data)
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1)
- printk("DVB: TDA10021(%d): %s, writereg error "
- "(reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ printk("DVB: TDA10021(%d): %s, writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
state->frontend.dvb->num, __func__, reg, data, ret);
msleep(10);
@@ -444,7 +443,7 @@ static void tda10021_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10021_ops;
+static const struct dvb_frontend_ops tda10021_ops;
struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
struct i2c_adapter* i2c,
@@ -484,7 +483,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops tda10021_ops = {
+static const struct dvb_frontend_ops tda10021_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10021 DVB-C",
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 3b8c7e499d0d..8028007c68eb 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -72,8 +72,7 @@ static u8 tda10023_readreg (struct tda10023_state* state, u8 reg)
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2) {
int num = state->frontend.dvb ? state->frontend.dvb->num : -1;
- printk(KERN_ERR "DVB: TDA10023(%d): %s: readreg error "
- "(reg == 0x%02x, ret == %i)\n",
+ printk(KERN_ERR "DVB: TDA10023(%d): %s: readreg error (reg == 0x%02x, ret == %i)\n",
num, __func__, reg, ret);
}
return b1[0];
@@ -88,8 +87,7 @@ static int tda10023_writereg (struct tda10023_state* state, u8 reg, u8 data)
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1) {
int num = state->frontend.dvb ? state->frontend.dvb->num : -1;
- printk(KERN_ERR "DVB: TDA10023(%d): %s, writereg error "
- "(reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ printk(KERN_ERR "DVB: TDA10023(%d): %s, writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
num, __func__, reg, data, ret);
}
return (ret != 1) ? -EREMOTEIO : 0;
@@ -516,7 +514,7 @@ static void tda10023_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10023_ops;
+static const struct dvb_frontend_ops tda10023_ops;
struct dvb_frontend *tda10023_attach(const struct tda10023_config *config,
struct i2c_adapter *i2c,
@@ -573,7 +571,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops tda10023_ops = {
+static const struct dvb_frontend_ops tda10023_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10023 DVB-C",
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index c2bf89d0b0b0..92ab34c3e0be 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1063,38 +1063,34 @@ static void tda10048_establish_defaults(struct dvb_frontend *fe)
/* Validate/default the config */
if (config->dtv6_if_freq_khz == 0) {
config->dtv6_if_freq_khz = TDA10048_IF_4300;
- printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz is not set (defaulting to %d)\n",
__func__,
config->dtv6_if_freq_khz);
}
if (config->dtv7_if_freq_khz == 0) {
config->dtv7_if_freq_khz = TDA10048_IF_4300;
- printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz is not set (defaulting to %d)\n",
__func__,
config->dtv7_if_freq_khz);
}
if (config->dtv8_if_freq_khz == 0) {
config->dtv8_if_freq_khz = TDA10048_IF_4300;
- printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz is not set (defaulting to %d)\n",
__func__,
config->dtv8_if_freq_khz);
}
if (config->clk_freq_khz == 0) {
config->clk_freq_khz = TDA10048_CLK_16000;
- printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz is not set (defaulting to %d)\n",
__func__,
config->clk_freq_khz);
}
}
-static struct dvb_frontend_ops tda10048_ops;
+static const struct dvb_frontend_ops tda10048_ops;
struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
struct i2c_adapter *i2c)
@@ -1156,7 +1152,7 @@ error:
}
EXPORT_SYMBOL(tda10048_attach);
-static struct dvb_frontend_ops tda10048_ops = {
+static const struct dvb_frontend_ops tda10048_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NXP TDA10048HN DVB-T",
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index b89848313fb9..e674508c349c 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -1245,7 +1245,7 @@ static void tda1004x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10045_ops = {
+static const struct dvb_frontend_ops tda10045_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10045H DVB-T",
@@ -1315,7 +1315,7 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
return &state->frontend;
}
-static struct dvb_frontend_ops tda10046_ops = {
+static const struct dvb_frontend_ops tda10046_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10046H DVB-T",
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 37ebeef2bbd0..a59f4fd09df6 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -20,7 +20,7 @@
#include "tda10071_priv.h"
-static struct dvb_frontend_ops tda10071_ops;
+static const struct dvb_frontend_ops tda10071_ops;
/*
* XXX: regmap_update_bits() does not fit our needs as it does not support
@@ -1102,7 +1102,7 @@ static int tda10071_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops tda10071_ops = {
+static const struct dvb_frontend_ops tda10071_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "NXP TDA10071",
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index 31d0acb54fe8..b6d16c05904d 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -706,7 +706,7 @@ static void tda10086_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10086_ops = {
+static const struct dvb_frontend_ops tda10086_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA10086 DVB-S",
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index bc247f9b553a..6859fa5d5a85 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -1126,11 +1126,10 @@ static int init(struct dvb_frontend *fe)
return 0;
}
-static int release(struct dvb_frontend *fe)
+static void release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 7ca965987f40..a63dec44295b 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -197,13 +197,12 @@ static int tda665x_set_params(struct dvb_frontend *fe)
return 0;
}
-static int tda665x_release(struct dvb_frontend *fe)
+static void tda665x_release(struct dvb_frontend *fe)
{
struct tda665x_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
static const struct dvb_tuner_ops tda665x_ops = {
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 9072d6463094..aa3200d3c352 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -421,7 +421,7 @@ static void tda8083_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda8083_ops;
+static const struct dvb_frontend_ops tda8083_ops;
struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
struct i2c_adapter* i2c)
@@ -449,7 +449,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops tda8083_ops = {
+static const struct dvb_frontend_ops tda8083_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA8083 DVB-S",
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index e0df93191b9e..4eb294f330bc 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -152,13 +152,12 @@ static int tda8261_set_params(struct dvb_frontend *fe)
return 0;
}
-static int tda8261_release(struct dvb_frontend *fe)
+static void tda8261_release(struct dvb_frontend *fe)
{
struct tda8261_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
static const struct dvb_tuner_ops tda8261_ops = {
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index 2ec671df1441..da427b4c2aaa 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -41,11 +41,10 @@ struct tda826x_priv {
u32 frequency;
};
-static int tda826x_release(struct dvb_frontend *fe)
+static void tda826x_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int tda826x_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index a9f6bbea6df3..931e5c98da8a 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -56,7 +56,7 @@ struct ts2020_reg_val {
static void ts2020_stat_work(struct work_struct *work);
-static int ts2020_release(struct dvb_frontend *fe)
+static void ts2020_release(struct dvb_frontend *fe)
{
struct ts2020_priv *priv = fe->tuner_priv;
struct i2c_client *client = priv->client;
@@ -64,7 +64,6 @@ static int ts2020_release(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
i2c_unregister_device(client);
- return 0;
}
static int ts2020_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 6da12b9e55eb..05ee16d29851 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -42,11 +42,10 @@ struct tua6100_priv {
u32 frequency;
};
-static int tua6100_release(struct dvb_frontend *fe)
+static void tua6100_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int tua6100_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index b09fe88c40f8..178363704bd4 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -65,8 +65,8 @@ static int ves1820_writereg(struct ves1820_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk("ves1820: %s(): writereg error (reg == 0x%02x, "
- "val == 0x%02x, ret == %i)\n", __func__, reg, data, ret);
+ printk("ves1820: %s(): writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -84,8 +84,8 @@ static u8 ves1820_readreg(struct ves1820_state *state, u8 reg)
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
- printk("ves1820: %s(): readreg error (reg == 0x%02x, "
- "ret == %i)\n", __func__, reg, ret);
+ printk("ves1820: %s(): readreg error (reg == 0x%02x, ret == %i)\n",
+ __func__, reg, ret);
return b1[0];
}
@@ -369,7 +369,7 @@ static void ves1820_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops ves1820_ops;
+static const struct dvb_frontend_ops ves1820_ops;
struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
struct i2c_adapter* i2c,
@@ -408,7 +408,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops ves1820_ops = {
+static const struct dvb_frontend_ops ves1820_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "VLSI VES1820 DVB-C",
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index ed113e216e14..d0ee52f66a8e 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -454,7 +454,7 @@ static int ves1x93_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
}
}
-static struct dvb_frontend_ops ves1x93_ops;
+static const struct dvb_frontend_ops ves1x93_ops;
struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
struct i2c_adapter* i2c)
@@ -512,7 +512,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops ves1x93_ops = {
+static const struct dvb_frontend_ops ves1x93_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "VLSI VES1x93 DVB-S",
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 7ed81315965f..a6d020fe9b8b 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -85,8 +85,8 @@ static int zl10036_read_status_reg(struct zl10036_state *state)
deb_i2c("R(status): %02x [FL=%d]\n", status,
(status & STATUS_FL) ? 1 : 0);
if (status & STATUS_POR)
- deb_info("%s: Power-On-Reset bit enabled - "
- "need to initialize the tuner\n", __func__);
+ deb_info("%s: Power-On-Reset bit enabled - need to initialize the tuner\n",
+ __func__);
return status;
}
@@ -134,14 +134,12 @@ static int zl10036_write(struct zl10036_state *state, u8 buf[], u8 count)
return 0;
}
-static int zl10036_release(struct dvb_frontend *fe)
+static void zl10036_release(struct dvb_frontend *fe)
{
struct zl10036_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
-
- return 0;
}
static int zl10036_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index f8c271be196c..60a2954f8ff8 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -152,8 +152,7 @@ static int zl10039_init(struct dvb_frontend *fe)
/* Reset logic */
ret = zl10039_writereg(state, GENERAL, 0x40);
if (ret < 0) {
- dprintk("Note: i2c write error normal when resetting the "
- "tuner\n");
+ dprintk("Note: i2c write error normal when resetting the tuner\n");
}
/* Wake up */
ret = zl10039_writereg(state, GENERAL, 0x01);
@@ -245,14 +244,13 @@ error:
return ret;
}
-static int zl10039_release(struct dvb_frontend *fe)
+static void zl10039_release(struct dvb_frontend *fe)
{
struct zl10039_state *state = fe->tuner_priv;
dprintk("%s\n", __func__);
kfree(state);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops zl10039_ops = {
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 3b08176d7bec..4f3ff3e853ac 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -602,7 +602,7 @@ static void zl10353_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops zl10353_ops;
+static const struct dvb_frontend_ops zl10353_ops;
struct dvb_frontend *zl10353_attach(const struct zl10353_config *config,
struct i2c_adapter *i2c)
@@ -634,7 +634,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops zl10353_ops = {
+static const struct dvb_frontend_ops zl10353_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink ZL10353 DVB-T",
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 251a556112a9..5bde6c209cd7 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -1181,8 +1181,8 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
if (es_info_length > 0) {
pmt_cmd_id = msg[read_pos++];
if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
- dev_err(fdtv->device, "invalid pmt_cmd_id %d "
- "at stream level\n", pmt_cmd_id);
+ dev_err(fdtv->device, "invalid pmt_cmd_id %d at stream level\n",
+ pmt_cmd_id);
if (es_info_length > sizeof(c->operand) - 4 -
write_pos) {
diff --git a/drivers/media/firewire/firedtv-rc.c b/drivers/media/firewire/firedtv-rc.c
index f82d4a93feb3..04dea2aac583 100644
--- a/drivers/media/firewire/firedtv-rc.c
+++ b/drivers/media/firewire/firedtv-rc.c
@@ -184,8 +184,9 @@ void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
else if (code >= 0x4540 && code <= 0x4542)
code = oldtable[code - 0x4521];
else {
- printk(KERN_DEBUG "firedtv: invalid key code 0x%04x "
- "from remote control\n", code);
+ dev_dbg(fdtv->device,
+ "invalid key code 0x%04x from remote control\n",
+ code);
return;
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 2669b4bad910..b31fa6fae009 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -221,7 +221,7 @@ config VIDEO_ADV7604
config VIDEO_ADV7604_CEC
bool "Enable Analog Devices ADV7604 CEC support"
- depends on VIDEO_ADV7604 && MEDIA_CEC
+ depends on VIDEO_ADV7604 && MEDIA_CEC_SUPPORT
---help---
When selected the adv7604 will support the optional
HDMI CEC feature.
@@ -242,7 +242,7 @@ config VIDEO_ADV7842
config VIDEO_ADV7842_CEC
bool "Enable Analog Devices ADV7842 CEC support"
- depends on VIDEO_ADV7842 && MEDIA_CEC
+ depends on VIDEO_ADV7842 && MEDIA_CEC_SUPPORT
---help---
When selected the adv7842 will support the optional
HDMI CEC feature.
@@ -481,7 +481,7 @@ config VIDEO_ADV7511
config VIDEO_ADV7511_CEC
bool "Enable Analog Devices ADV7511 CEC support"
- depends on VIDEO_ADV7511 && MEDIA_CEC
+ depends on VIDEO_ADV7511 && MEDIA_CEC_SUPPORT
---help---
When selected the adv7511 will support the optional
HDMI CEC feature.
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index beab2f381b81..a9026a91855e 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -65,16 +65,17 @@ static int ad5820_write(struct ad5820_device *coil, u16 data)
{
struct i2c_client *client = v4l2_get_subdevdata(&coil->subdev);
struct i2c_msg msg;
+ __be16 be_data;
int r;
if (!client->adapter)
return -ENODEV;
- data = cpu_to_be16(data);
+ be_data = cpu_to_be16(data);
msg.addr = client->addr;
msg.flags = 0;
msg.len = 2;
- msg.buf = (u8 *)&data;
+ msg.buf = (u8 *)&be_data;
r = i2c_transfer(client->adapter, &msg, 1);
if (r < 0) {
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 5ba0f21bcfe4..8c9e28949ab1 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1732,9 +1732,10 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
static int adv7511_registered(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int err;
- err = cec_register_adapter(state->cec_adap);
+ err = cec_register_adapter(state->cec_adap, &client->dev);
if (err)
cec_delete_adapter(state->cec_adap);
return err;
@@ -1928,7 +1929,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
state, dev_name(&client->dev), CEC_CAP_TRANSMIT |
CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
- ADV7511_MAX_ADDRS, &client->dev);
+ ADV7511_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err) {
destroy_workqueue(state->work_queue);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 4003831de712..d0375cac6a05 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1566,10 +1566,24 @@ static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
if (is_digital_input(sd)) {
+ bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+ u8 vic = 0;
+ u32 w, h;
+
+ w = hdmi_read16(sd, 0x07, info->linewidth_mask);
+ h = hdmi_read16(sd, 0x09, info->field0_height_mask);
+
+ if (hdmi_signal && (io_read(sd, 0x60) & 1))
+ vic = infoframe_read(sd, 0x04);
+
+ if (vic && v4l2_find_dv_timings_cea861_vic(timings, vic) &&
+ bt->width == w && bt->height == h)
+ goto found;
+
timings->type = V4L2_DV_BT_656_1120;
- bt->width = hdmi_read16(sd, 0x07, info->linewidth_mask);
- bt->height = hdmi_read16(sd, 0x09, info->field0_height_mask);
+ bt->width = w;
+ bt->height = h;
bt->pixelclock = info->read_hdmi_pixelclock(sd);
bt->hfrontporch = hdmi_read16(sd, 0x20, info->hfrontporch_mask);
bt->hsync = hdmi_read16(sd, 0x22, info->hsync_mask);
@@ -2617,9 +2631,10 @@ static int adv76xx_subscribe_event(struct v4l2_subdev *sd,
static int adv76xx_registered(struct v4l2_subdev *sd)
{
struct adv76xx_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int err;
- err = cec_register_adapter(state->cec_adap);
+ err = cec_register_adapter(state->cec_adap, &client->dev);
if (err)
cec_delete_adapter(state->cec_adap);
return err;
@@ -3074,13 +3089,13 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
return ret;
}
- if (!of_property_read_u32(endpoint, "default-input", &v))
+ of_node_put(endpoint);
+
+ if (!of_property_read_u32(np, "default-input", &v))
state->pdata.default_input = v;
else
state->pdata.default_input = -1;
- of_node_put(endpoint);
-
flags = bus_cfg.bus.parallel.flags;
if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
@@ -3497,8 +3512,7 @@ static int adv76xx_probe(struct i2c_client *client,
state->cec_adap = cec_allocate_adapter(&adv76xx_cec_adap_ops,
state, dev_name(&client->dev),
CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS,
- &client->dev);
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err)
goto err_entity;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 8c2a52e280af..2d61f0cc2b5b 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3250,9 +3250,10 @@ static int adv7842_subscribe_event(struct v4l2_subdev *sd,
static int adv7842_registered(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int err;
- err = cec_register_adapter(state->cec_adap);
+ err = cec_register_adapter(state->cec_adap, &client->dev);
if (err)
cec_delete_adapter(state->cec_adap);
return err;
@@ -3568,8 +3569,7 @@ static int adv7842_probe(struct i2c_client *client,
state->cec_adap = cec_allocate_adapter(&adv7842_cec_adap_ops,
state, dev_name(&client->dev),
CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS,
- &client->dev);
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err)
goto err_entity;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 142ae28803bb..0dcf450052ac 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -873,10 +873,7 @@ void cx25840_std_setup(struct i2c_client *client)
"Chroma sub-carrier freq = %d.%06d MHz\n",
fsc / 1000000, fsc % 1000000);
- v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, "
- "vblank %i, vactive %i, vblank656 %i, src_dec %i, "
- "burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, "
- "sc 0x%06x\n",
+ v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
hblank, hactive, vblank, vactive, vblank656,
src_decimation, burst, luma_lpf, uv_lpf, comb, sc);
}
@@ -5169,11 +5166,9 @@ static int cx25840_probe(struct i2c_client *client,
id = CX2310X_AV;
} else if ((device_id & 0xff) == (device_id >> 8)) {
v4l_err(client,
- "likely a confused/unresponsive cx2388[578] A/V decoder"
- " found @ 0x%x (%s)\n",
+ "likely a confused/unresponsive cx2388[578] A/V decoder found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- v4l_err(client, "A method to reset it from the cx25840 driver"
- " software is not known at this time\n");
+ v4l_err(client, "A method to reset it from the cx25840 driver software is not known at this time\n");
return -ENODEV;
} else {
v4l_dbg(1, cx25840_debug, client, "cx25840 not found\n");
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 4b782012cadc..15fbd9607cee 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -1113,8 +1113,8 @@ int cx25840_ir_log_status(struct v4l2_subdev *sd)
j = 0;
break;
}
- v4l2_info(sd, "\tNext carrier edge window: 16 clocks "
- "-%1d/+%1d, %u to %u Hz\n", i, j,
+ v4l2_info(sd, "\tNext carrier edge window: 16 clocks -%1d/+%1d, %u to %u Hz\n",
+ i, j,
clock_divider_to_freq(rxclk, 16 + j),
clock_divider_to_freq(rxclk, 16 - i));
}
@@ -1124,8 +1124,7 @@ int cx25840_ir_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "\tLow pass filter: %s\n",
filtr ? "enabled" : "disabled");
if (filtr)
- v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, "
- "%u ns\n",
+ v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, %u ns\n",
lpf_count_to_us(filtr),
lpf_count_to_ns(filtr));
v4l2_info(sd, "\tPulse width timer timed-out: %s\n",
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 503b7c4f0a9b..201a9800ea52 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -146,11 +146,11 @@ int msp_reset(struct i2c_client *client)
},
};
- v4l_dbg(3, msp_debug, client, "msp_reset\n");
+ dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_reset\n");
if (i2c_transfer(client->adapter, &reset[0], 1) != 1 ||
i2c_transfer(client->adapter, &reset[1], 1) != 1 ||
i2c_transfer(client->adapter, test, 2) != 2) {
- v4l_err(client, "chip reset failed\n");
+ dev_err(&client->dev, "chip reset failed\n");
return -1;
}
return 0;
@@ -182,17 +182,17 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
for (err = 0; err < 3; err++) {
if (i2c_transfer(client->adapter, msgs, 2) == 2)
break;
- v4l_warn(client, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
+ dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
dev, addr);
schedule_timeout_interruptible(msecs_to_jiffies(10));
}
if (err == 3) {
- v4l_warn(client, "resetting chip, sound will go off.\n");
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
msp_reset(client);
return -1;
}
retval = read[0] << 8 | read[1];
- v4l_dbg(3, msp_debug, client, "msp_read(0x%x, 0x%x): 0x%x\n",
+ dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_read(0x%x, 0x%x): 0x%x\n",
dev, addr, retval);
return retval;
}
@@ -218,17 +218,17 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
buffer[3] = val >> 8;
buffer[4] = val & 0xff;
- v4l_dbg(3, msp_debug, client, "msp_write(0x%x, 0x%x, 0x%x)\n",
+ dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_write(0x%x, 0x%x, 0x%x)\n",
dev, addr, val);
for (err = 0; err < 3; err++) {
if (i2c_master_send(client, buffer, 5) == 5)
break;
- v4l_warn(client, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
+ dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
dev, addr);
schedule_timeout_interruptible(msecs_to_jiffies(10));
}
if (err == 3) {
- v4l_warn(client, "resetting chip, sound will go off.\n");
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
msp_reset(client);
return -1;
}
@@ -301,7 +301,7 @@ void msp_set_scart(struct i2c_client *client, int in, int out)
} else
state->acb = 0xf60; /* Mute Input and SCART 1 Output */
- v4l_dbg(1, msp_debug, client, "scart switch: %s => %d (ACB=0x%04x)\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "scart switch: %s => %d (ACB=0x%04x)\n",
scart_names[in], out, state->acb);
msp_write_dsp(client, 0x13, state->acb);
@@ -359,7 +359,7 @@ static int msp_s_ctrl(struct v4l2_ctrl *ctrl)
if (!reallymuted)
val = (val * 0x7f / 65535) << 8;
- v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "mute=%s scanning=%s volume=%d\n",
state->muted->val ? "on" : "off",
state->scan_in_progress ? "yes" : "no",
state->volume->val);
@@ -426,7 +426,7 @@ static int msp_s_radio(struct v4l2_subdev *sd)
if (state->radio)
return 0;
state->radio = 1;
- v4l_dbg(1, msp_debug, client, "switching to radio mode\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "switching to radio mode\n");
state->watch_stereo = 0;
switch (state->opmode) {
case OPMODE_MANUAL:
@@ -461,7 +461,7 @@ static int msp_querystd(struct v4l2_subdev *sd, v4l2_std_id *id)
*id &= state->detected_std;
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"detected standard: %s(0x%08Lx)\n",
msp_standard_std_name(state->std), state->detected_std);
@@ -555,7 +555,7 @@ static int msp_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
struct msp_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- v4l_dbg(1, msp_debug, client, "Setting I2S speed to %d\n", freq);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "Setting I2S speed to %d\n", freq);
switch (freq) {
case 1024000:
@@ -579,7 +579,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
if (state->opmode == OPMODE_AUTOSELECT)
msp_detect_stereo(client);
- v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
+ dev_info(&client->dev, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
client->name, state->rev1, state->rev2);
snprintf(prefix, sizeof(prefix), "%s: Audio: ", sd->name);
v4l2_ctrl_handler_log_status(&state->hdl, prefix);
@@ -596,23 +596,23 @@ static int msp_log_status(struct v4l2_subdev *sd)
default: p = "unknown"; break;
}
if (state->mode == MSP_MODE_EXTERN) {
- v4l_info(client, "Mode: %s\n", p);
+ dev_info(&client->dev, "Mode: %s\n", p);
} else if (state->opmode == OPMODE_MANUAL) {
- v4l_info(client, "Mode: %s (%s%s)\n", p,
+ dev_info(&client->dev, "Mode: %s (%s%s)\n", p,
(state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
(state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
} else {
if (state->opmode == OPMODE_AUTODETECT)
- v4l_info(client, "Mode: %s\n", p);
- v4l_info(client, "Standard: %s (%s%s)\n",
+ dev_info(&client->dev, "Mode: %s\n", p);
+ dev_info(&client->dev, "Standard: %s (%s%s)\n",
msp_standard_std_name(state->std),
(state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
(state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
}
- v4l_info(client, "Audmode: 0x%04x\n", state->audmode);
- v4l_info(client, "Routing: 0x%08x (input) 0x%08x (output)\n",
+ dev_info(&client->dev, "Audmode: 0x%04x\n", state->audmode);
+ dev_info(&client->dev, "Routing: 0x%08x (input) 0x%08x (output)\n",
state->route_in, state->route_out);
- v4l_info(client, "ACB: 0x%04x\n", state->acb);
+ dev_info(&client->dev, "ACB: 0x%04x\n", state->acb);
return 0;
}
@@ -620,7 +620,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
static int msp_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- v4l_dbg(1, msp_debug, client, "suspend\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "suspend\n");
msp_reset(client);
return 0;
}
@@ -628,7 +628,7 @@ static int msp_suspend(struct device *dev)
static int msp_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- v4l_dbg(1, msp_debug, client, "resume\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "resume\n");
msp_wake_thread(client);
return 0;
}
@@ -670,6 +670,13 @@ static const struct v4l2_subdev_ops msp_ops = {
/* ----------------------------------------------------------------------- */
+
+static const char * const opmode_str[] = {
+ [OPMODE_MANUAL] = "manual",
+ [OPMODE_AUTODETECT] = "autodetect",
+ [OPMODE_AUTOSELECT] = "autodetect and autoselect",
+};
+
static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct msp_state *state;
@@ -689,7 +696,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
strlcpy(client->name, "msp3400", sizeof(client->name));
if (msp_reset(client) == -1) {
- v4l_dbg(1, msp_debug, client, "msp3400 not found\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3400 not found\n");
return -ENODEV;
}
@@ -724,10 +731,10 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
state->rev1 = msp_read_dsp(client, 0x1e);
if (state->rev1 != -1)
state->rev2 = msp_read_dsp(client, 0x1f);
- v4l_dbg(1, msp_debug, client, "rev1=0x%04x, rev2=0x%04x\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "rev1=0x%04x, rev2=0x%04x\n",
state->rev1, state->rev2);
if (state->rev1 == -1 || (state->rev1 == 0 && state->rev2 == 0)) {
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"not an msp3400 (cannot read chip version)\n");
return -ENODEV;
}
@@ -791,7 +798,8 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
msp_family == 3 && msp_revision == 'G' && msp_prod_hi == 3;
state->opmode = opmode;
- if (state->opmode == OPMODE_AUTO) {
+ if (state->opmode < OPMODE_MANUAL
+ || state->opmode > OPMODE_AUTOSELECT) {
/* MSP revision G and up have both autodetect and autoselect */
if (msp_revision >= 'G')
state->opmode = OPMODE_AUTOSELECT;
@@ -829,43 +837,35 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
v4l2_ctrl_cluster(2, &state->volume);
v4l2_ctrl_handler_setup(hdl);
- /* hello world :-) */
- v4l_info(client, "MSP%d4%02d%c-%c%d found @ 0x%x (%s)\n",
- msp_family, msp_product,
- msp_revision, msp_hard, msp_rom,
- client->addr << 1, client->adapter->name);
- v4l_info(client, "%s ", client->name);
- if (state->has_nicam && state->has_radio)
- printk(KERN_CONT "supports nicam and radio, ");
- else if (state->has_nicam)
- printk(KERN_CONT "supports nicam, ");
- else if (state->has_radio)
- printk(KERN_CONT "supports radio, ");
- printk(KERN_CONT "mode is ");
+ dev_info(&client->dev,
+ "MSP%d4%02d%c-%c%d found on %s: supports %s%s%s, mode is %s\n",
+ msp_family, msp_product,
+ msp_revision, msp_hard, msp_rom,
+ client->adapter->name,
+ (state->has_nicam) ? "nicam" : "",
+ (state->has_nicam && state->has_radio) ? " and " : "",
+ (state->has_radio) ? "radio" : "",
+ opmode_str[state->opmode]);
/* version-specific initialization */
switch (state->opmode) {
case OPMODE_MANUAL:
- printk(KERN_CONT "manual");
thread_func = msp3400c_thread;
break;
case OPMODE_AUTODETECT:
- printk(KERN_CONT "autodetect");
thread_func = msp3410d_thread;
break;
case OPMODE_AUTOSELECT:
- printk(KERN_CONT "autodetect and autoselect");
thread_func = msp34xxg_thread;
break;
}
- printk(KERN_CONT "\n");
/* startup control thread if needed */
if (thread_func) {
state->kthread = kthread_run(thread_func, client, "msp34xx");
if (IS_ERR(state->kthread))
- v4l_warn(client, "kernel_thread() failed\n");
+ dev_warn(&client->dev, "kernel_thread() failed\n");
msp_wake_thread(client);
}
return 0;
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c
index 17120804fab7..eec7aa4c6f98 100644
--- a/drivers/media/i2c/msp3400-kthreads.c
+++ b/drivers/media/i2c/msp3400-kthreads.c
@@ -220,7 +220,7 @@ void msp3400c_set_mode(struct i2c_client *client, int mode)
int tuner = (state->route_in >> 3) & 1;
int i;
- v4l_dbg(1, msp_debug, client, "set_mode: %d\n", mode);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "set_mode: %d\n", mode);
state->mode = mode;
state->rxsubchans = V4L2_TUNER_SUB_MONO;
@@ -266,7 +266,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
/* this method would break everything, let's make sure
* it's never called
*/
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"set_audmode called with mode=%d instead of set_source (ignored)\n",
state->audmode);
return;
@@ -295,7 +295,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
/* switch demodulator */
switch (state->mode) {
case MSP_MODE_FM_TERRA:
- v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "FM set_audmode: %s\n", modestr);
switch (audmode) {
case V4L2_TUNER_MODE_STEREO:
msp_write_dsp(client, 0x000e, 0x3001);
@@ -309,7 +309,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
}
break;
case MSP_MODE_FM_SAT:
- v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "SAT set_audmode: %s\n", modestr);
switch (audmode) {
case V4L2_TUNER_MODE_MONO:
msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
@@ -329,31 +329,31 @@ static void msp3400c_set_audmode(struct i2c_client *client)
case MSP_MODE_FM_NICAM1:
case MSP_MODE_FM_NICAM2:
case MSP_MODE_AM_NICAM:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"NICAM set_audmode: %s\n", modestr);
if (state->nicam_on)
src = 0x0100; /* NICAM */
break;
case MSP_MODE_BTSC:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"BTSC set_audmode: %s\n", modestr);
break;
case MSP_MODE_EXTERN:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"extern set_audmode: %s\n", modestr);
src = 0x0200; /* SCART */
break;
case MSP_MODE_FM_RADIO:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"FM-Radio set_audmode: %s\n", modestr);
break;
default:
- v4l_dbg(1, msp_debug, client, "mono set_audmode\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "mono set_audmode\n");
return;
}
/* switch audio */
- v4l_dbg(1, msp_debug, client, "set audmode %d\n", audmode);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "set audmode %d\n", audmode);
switch (audmode) {
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1_LANG2:
@@ -361,7 +361,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
break;
case V4L2_TUNER_MODE_MONO:
if (state->mode == MSP_MODE_AM_NICAM) {
- v4l_dbg(1, msp_debug, client, "switching to AM mono\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "switching to AM mono\n");
/* AM mono decoding is handled by tuner, not MSP chip */
/* SCART switching control register */
msp_set_scart(client, SCART_MONO, 0);
@@ -377,7 +377,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
src |= 0x0010;
break;
}
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"set_audmode final source/matrix = 0x%x\n", src);
msp_set_source(client, src);
@@ -388,23 +388,23 @@ static void msp3400c_print_mode(struct i2c_client *client)
struct msp_state *state = to_state(i2c_get_clientdata(client));
if (state->main == state->second)
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"mono sound carrier: %d.%03d MHz\n",
state->main / 910000, (state->main / 910) % 1000);
else
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"main sound carrier: %d.%03d MHz\n",
state->main / 910000, (state->main / 910) % 1000);
if (state->mode == MSP_MODE_FM_NICAM1 || state->mode == MSP_MODE_FM_NICAM2)
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"NICAM/FM carrier : %d.%03d MHz\n",
state->second / 910000, (state->second/910) % 1000);
if (state->mode == MSP_MODE_AM_NICAM)
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"NICAM/AM carrier : %d.%03d MHz\n",
state->second / 910000, (state->second / 910) % 1000);
if (state->mode == MSP_MODE_FM_TERRA && state->main != state->second) {
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"FM-stereo carrier : %d.%03d MHz\n",
state->second / 910000, (state->second / 910) % 1000);
}
@@ -425,7 +425,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
val = msp_read_dsp(client, 0x18);
if (val > 32767)
val -= 65536;
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"stereo detect register: %d\n", val);
if (val > 8192) {
rxsubchans = V4L2_TUNER_SUB_STEREO;
@@ -440,7 +440,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
case MSP_MODE_FM_NICAM2:
case MSP_MODE_AM_NICAM:
val = msp_read_dem(client, 0x23);
- v4l_dbg(2, msp_debug, client, "nicam sync=%d, mode=%d\n",
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "nicam sync=%d, mode=%d\n",
val & 1, (val & 0x1e) >> 1);
if (val & 1) {
@@ -471,14 +471,14 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
}
if (rxsubchans != state->rxsubchans) {
update = 1;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"watch: rxsubchans %02x => %02x\n",
state->rxsubchans, rxsubchans);
state->rxsubchans = rxsubchans;
}
if (newnicam != state->nicam_on) {
update = 1;
- v4l_dbg(1, msp_debug, client, "watch: nicam %d => %d\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "watch: nicam %d => %d\n",
state->nicam_on, newnicam);
state->nicam_on = newnicam;
}
@@ -508,23 +508,23 @@ int msp3400c_thread(void *data)
struct msp3400c_carrier_detect *cd;
int count, max1, max2, val1, val2, val, i;
- v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3400 daemon started\n");
state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
- v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3400 thread: sleep\n");
msp_sleep(state, -1);
- v4l_dbg(2, msp_debug, client, "msp3400 thread: wakeup\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3400 thread: wakeup\n");
restart:
- v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "thread: restart scan\n");
state->restart = 0;
if (kthread_should_stop())
break;
if (state->radio || MSP_MODE_EXTERN == state->mode) {
/* no carrier scan, just unmute */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
msp_update_volume(state);
@@ -553,7 +553,7 @@ restart:
/* autodetect doesn't work well with AM ... */
max1 = 3;
count = 0;
- v4l_dbg(1, msp_debug, client, "AM sound override\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "AM sound override\n");
}
for (i = 0; i < count; i++) {
@@ -565,7 +565,7 @@ restart:
val -= 65536;
if (val1 < val)
val1 = val, max1 = i;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"carrier1 val: %5d / %s\n", val, cd[i].name);
}
@@ -602,7 +602,7 @@ restart:
val -= 65536;
if (val2 < val)
val2 = val, max2 = i;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"carrier2 val: %5d / %s\n", val, cd[i].name);
}
@@ -687,7 +687,7 @@ no_second:
watch_stereo(client);
}
}
- v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
return 0;
}
@@ -698,23 +698,23 @@ int msp3410d_thread(void *data)
struct msp_state *state = to_state(i2c_get_clientdata(client));
int val, i, std, count;
- v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3410 daemon started\n");
state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
- v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3410 thread: sleep\n");
msp_sleep(state, -1);
- v4l_dbg(2, msp_debug, client, "msp3410 thread: wakeup\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3410 thread: wakeup\n");
restart:
- v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "thread: restart scan\n");
state->restart = 0;
if (kthread_should_stop())
break;
if (state->mode == MSP_MODE_EXTERN) {
/* no carrier scan needed, just unmute */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
msp_update_volume(state);
@@ -740,7 +740,7 @@ restart:
goto restart;
if (msp_debug)
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"setting standard: %s (0x%04x)\n",
msp_standard_std_name(std), std);
@@ -758,14 +758,14 @@ restart:
val = msp_read_dem(client, 0x7e);
if (val < 0x07ff)
break;
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"detection still in progress\n");
}
}
for (i = 0; msp_stdlist[i].name != NULL; i++)
if (msp_stdlist[i].retval == val)
break;
- v4l_dbg(1, msp_debug, client, "current standard: %s (0x%04x)\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "current standard: %s (0x%04x)\n",
msp_standard_std_name(val), val);
state->main = msp_stdlist[i].main;
state->second = msp_stdlist[i].second;
@@ -775,8 +775,7 @@ restart:
if (msp_amsound && !state->radio &&
(state->v4l2_std & V4L2_STD_SECAM) && (val != 0x0009)) {
/* autodetection has failed, let backup */
- v4l_dbg(1, msp_debug, client, "autodetection failed,"
- " switching to backup standard: %s (0x%04x)\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "autodetection failed, switching to backup standard: %s (0x%04x)\n",
msp_stdlist[8].name ?
msp_stdlist[8].name : "unknown", val);
state->std = val = 0x0009;
@@ -850,7 +849,7 @@ restart:
watch_stereo(client);
}
}
- v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
return 0;
}
@@ -867,23 +866,23 @@ static int msp34xxg_modus(struct i2c_client *client)
struct msp_state *state = to_state(i2c_get_clientdata(client));
if (state->radio) {
- v4l_dbg(1, msp_debug, client, "selected radio modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected radio modus\n");
return 0x0001;
}
if (state->v4l2_std == V4L2_STD_NTSC_M_JP) {
- v4l_dbg(1, msp_debug, client, "selected M (EIA-J) modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (EIA-J) modus\n");
return 0x4001;
}
if (state->v4l2_std == V4L2_STD_NTSC_M_KR) {
- v4l_dbg(1, msp_debug, client, "selected M (A2) modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (A2) modus\n");
return 0x0001;
}
if (state->v4l2_std == V4L2_STD_SECAM_L) {
- v4l_dbg(1, msp_debug, client, "selected SECAM-L modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected SECAM-L modus\n");
return 0x6001;
}
if (state->v4l2_std & V4L2_STD_MN) {
- v4l_dbg(1, msp_debug, client, "selected M (BTSC) modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (BTSC) modus\n");
return 0x2001;
}
return 0x7001;
@@ -927,7 +926,7 @@ static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in)
else
source = (in << 8) | matrix;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"set source to %d (0x%x) for output %02x\n", in, source, reg);
msp_write_dsp(client, reg, source);
}
@@ -996,23 +995,23 @@ int msp34xxg_thread(void *data)
struct msp_state *state = to_state(i2c_get_clientdata(client));
int val, i;
- v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp34xxg daemon started\n");
state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
- v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp34xxg thread: sleep\n");
msp_sleep(state, -1);
- v4l_dbg(2, msp_debug, client, "msp34xxg thread: wakeup\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp34xxg thread: wakeup\n");
restart:
- v4l_dbg(1, msp_debug, client, "thread: restart scan\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: restart scan\n");
state->restart = 0;
if (kthread_should_stop())
break;
if (state->mode == MSP_MODE_EXTERN) {
/* no carrier scan needed, just unmute */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
msp_update_volume(state);
@@ -1029,7 +1028,7 @@ restart:
goto unmute;
/* watch autodetect */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"started autodetect, waiting for result\n");
for (i = 0; i < 10; i++) {
if (msp_sleep(state, 100))
@@ -1041,17 +1040,17 @@ restart:
state->std = val;
break;
}
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"detection still in progress\n");
}
if (state->std == 1) {
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"detection still in progress after 10 tries. giving up.\n");
continue;
}
unmute:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"detected standard: %s (0x%04x)\n",
msp_standard_std_name(state->std), state->std);
state->detected_std = msp_standard_std(state->std);
@@ -1084,7 +1083,7 @@ unmute:
goto restart;
}
}
- v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
return 0;
}
@@ -1111,7 +1110,7 @@ static int msp34xxg_detect_stereo(struct i2c_client *client)
state->rxsubchans =
V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
}
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"status=0x%x, stereo=%d, bilingual=%d -> rxsubchans=%d\n",
status, is_stereo, is_bilingual, state->rxsubchans);
return (oldrx != state->rxsubchans);
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index e3348db56c46..771db56332b2 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -479,7 +479,8 @@ int smiapp_pll_calculate(struct device *dev,
return 0;
}
- dev_info(dev, "unable to compute pre_pll divisor\n");
+ dev_dbg(dev, "unable to compute pre_pll divisor\n");
+
return rval;
}
EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 44f8c7e10a35..59872b31f832 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -26,6 +26,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/smiapp.h>
@@ -68,10 +69,9 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
u32 fmt_model_type, fmt_model_subtype, ncol_desc, nrow_desc;
unsigned int i;
- int rval;
+ int pixel_count = 0;
int line_count = 0;
- int embedded_start = -1, embedded_end = -1;
- int image_start = 0;
+ int rval;
rval = smiapp_read(sensor, SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE,
&fmt_model_type);
@@ -101,12 +101,11 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
u32 pixels;
char *which;
char *what;
+ u32 reg;
if (fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i),
- &desc);
+ reg = SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i);
+ rval = smiapp_read(sensor, reg, &desc);
if (rval)
return rval;
@@ -117,10 +116,8 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
pixels = desc & SMIAPP_FRAME_FORMAT_DESC_2_PIXELS_MASK;
} else if (fmt_model_type
== SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i),
- &desc);
+ reg = SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i);
+ rval = smiapp_read(sensor, reg, &desc);
if (rval)
return rval;
@@ -159,40 +156,47 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
break;
default:
what = "invalid";
- dev_dbg(&client->dev, "pixelcode %d\n", pixelcode);
break;
}
- dev_dbg(&client->dev, "%s pixels: %d %s\n",
- what, pixels, which);
-
- if (i < ncol_desc)
+ dev_dbg(&client->dev,
+ "0x%8.8x %s pixels: %d %s (pixelcode %u)\n", reg,
+ what, pixels, which, pixelcode);
+
+ if (i < ncol_desc) {
+ if (pixelcode ==
+ SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE)
+ sensor->visible_pixel_start = pixel_count;
+ pixel_count += pixels;
continue;
+ }
/* Handle row descriptors */
- if (pixelcode
- == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED) {
- embedded_start = line_count;
- } else {
- if (pixelcode == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE
- || pixels >= sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES] / 2)
- image_start = line_count;
- if (embedded_start != -1 && embedded_end == -1)
- embedded_end = line_count;
+ switch (pixelcode) {
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED:
+ if (sensor->embedded_end)
+ break;
+ sensor->embedded_start = line_count;
+ sensor->embedded_end = line_count + pixels;
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE:
+ sensor->image_start = line_count;
+ break;
}
line_count += pixels;
}
- if (embedded_start == -1 || embedded_end == -1) {
- embedded_start = 0;
- embedded_end = 0;
+ if (sensor->embedded_end > sensor->image_start) {
+ dev_dbg(&client->dev,
+ "adjusting image start line to %u (was %u)\n",
+ sensor->embedded_end, sensor->image_start);
+ sensor->image_start = sensor->embedded_end;
}
- sensor->image_start = image_start;
-
dev_dbg(&client->dev, "embedded data from lines %d to %d\n",
- embedded_start, embedded_end);
- dev_dbg(&client->dev, "image data starts at line %d\n", image_start);
+ sensor->embedded_start, sensor->embedded_end);
+ dev_dbg(&client->dev, "image data starts at line %d\n",
+ sensor->image_start);
return 0;
}
@@ -443,8 +447,7 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
orient |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
orient ^= sensor->hvflip_inv_mask;
- rval = smiapp_write(sensor,
- SMIAPP_REG_U8_IMAGE_ORIENTATION,
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_IMAGE_ORIENTATION,
orient);
if (rval < 0)
return rval;
@@ -459,10 +462,8 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
__smiapp_update_exposure_limits(sensor);
if (exposure > sensor->exposure->maximum) {
- sensor->exposure->val =
- sensor->exposure->maximum;
- rval = smiapp_set_ctrl(
- sensor->exposure);
+ sensor->exposure->val = sensor->exposure->maximum;
+ rval = smiapp_set_ctrl(sensor->exposure);
if (rval < 0)
return rval;
}
@@ -621,7 +622,7 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
static int smiapp_init_late_controls(struct smiapp_sensor *sensor)
{
unsigned long *valid_link_freqs = &sensor->valid_link_freqs[
- sensor->csi_format->compressed - SMIAPP_COMPRESSED_BASE];
+ sensor->csi_format->compressed - sensor->compressed_min_bpp];
unsigned int max, i;
for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++) {
@@ -754,6 +755,7 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
struct smiapp_pll *pll = &sensor->pll;
+ u8 compressed_max_bpp = 0;
unsigned int type, n;
unsigned int i, pixel_order;
int rval;
@@ -826,16 +828,27 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
pll->scale_m = sensor->scale_m;
for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+ sensor->compressed_min_bpp =
+ min(smiapp_csi_data_formats[i].compressed,
+ sensor->compressed_min_bpp);
+ compressed_max_bpp =
+ max(smiapp_csi_data_formats[i].compressed,
+ compressed_max_bpp);
+ }
+
+ sensor->valid_link_freqs = devm_kcalloc(
+ &client->dev,
+ compressed_max_bpp - sensor->compressed_min_bpp + 1,
+ sizeof(*sensor->valid_link_freqs), GFP_KERNEL);
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
const struct smiapp_csi_data_format *f =
&smiapp_csi_data_formats[i];
unsigned long *valid_link_freqs =
&sensor->valid_link_freqs[
- f->compressed - SMIAPP_COMPRESSED_BASE];
+ f->compressed - sensor->compressed_min_bpp];
unsigned int j;
- BUG_ON(f->compressed < SMIAPP_COMPRESSED_BASE);
- BUG_ON(f->compressed > SMIAPP_COMPRESSED_MAX);
-
if (!(sensor->default_mbus_frame_fmts & 1 << i))
continue;
@@ -914,12 +927,6 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
unsigned int binning_mode;
int rval;
- dev_dbg(&client->dev, "frame size: %dx%d\n",
- sensor->src->crop[SMIAPP_PAD_SRC].width,
- sensor->src->crop[SMIAPP_PAD_SRC].height);
- dev_dbg(&client->dev, "csi format width: %d\n",
- sensor->csi_format->width);
-
/* Binning has to be set up here; it affects limits */
if (sensor->binning_horizontal == 1 &&
sensor->binning_vertical == 1) {
@@ -1196,9 +1203,17 @@ out:
* Power management
*/
-static int smiapp_power_on(struct smiapp_sensor *sensor)
+static int smiapp_power_on(struct device *dev)
{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ /*
+ * The sub-device related to the I2C device is always the
+ * source one, i.e. ssds[0].
+ */
+ struct smiapp_sensor *sensor =
+ container_of(ssd, struct smiapp_sensor, ssds[0]);
unsigned int sleep;
int rval;
@@ -1307,8 +1322,7 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
if (!sensor->pixel_array)
return 0;
- rval = v4l2_ctrl_handler_setup(
- &sensor->pixel_array->ctrl_handler);
+ rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
if (rval)
goto out_cci_addr_fail;
@@ -1325,16 +1339,24 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
return 0;
out_cci_addr_fail:
+
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
out_xclk_fail:
regulator_disable(sensor->vana);
+
return rval;
}
-static void smiapp_power_off(struct smiapp_sensor *sensor)
+static int smiapp_power_off(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct smiapp_sensor *sensor =
+ container_of(ssd, struct smiapp_sensor, ssds[0]);
+
/*
* Currently power/clock to lens are enable/disabled separately
* but they are essentially the same signals. So if the sensor is
@@ -1352,31 +1374,31 @@ static void smiapp_power_off(struct smiapp_sensor *sensor)
usleep_range(5000, 5000);
regulator_disable(sensor->vana);
sensor->streaming = false;
+
+ return 0;
}
static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
{
- struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- int ret = 0;
+ int rval;
- mutex_lock(&sensor->power_mutex);
+ if (!on) {
+ pm_runtime_mark_last_busy(subdev->dev);
+ pm_runtime_put_autosuspend(subdev->dev);
- if (on && !sensor->power_count) {
- /* Power on and perform initialisation. */
- ret = smiapp_power_on(sensor);
- if (ret < 0)
- goto out;
- } else if (!on && sensor->power_count == 1) {
- smiapp_power_off(sensor);
+ return 0;
}
- /* Update the power count. */
- sensor->power_count += on ? 1 : -1;
- WARN_ON(sensor->power_count < 0);
+ rval = pm_runtime_get_sync(subdev->dev);
+ if (rval >= 0)
+ return 0;
-out:
- mutex_unlock(&sensor->power_mutex);
- return ret;
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(subdev->dev);
+
+ pm_runtime_put(subdev->dev);
+
+ return rval;
}
/* -----------------------------------------------------------------------------
@@ -1614,7 +1636,8 @@ static int __smiapp_get_format(struct v4l2_subdev *subdev,
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(subdev, cfg,
+ fmt->pad);
} else {
struct v4l2_rect *r;
@@ -1714,7 +1737,6 @@ static void smiapp_propagate(struct v4l2_subdev *subdev,
static const struct smiapp_csi_data_format
*smiapp_validate_csi_data_format(struct smiapp_sensor *sensor, u32 code)
{
- const struct smiapp_csi_data_format *csi_format = sensor->csi_format;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
@@ -1723,7 +1745,7 @@ static const struct smiapp_csi_data_format
return &smiapp_csi_data_formats[i];
}
- return csi_format;
+ return sensor->csi_format;
}
static int smiapp_set_format_source(struct v4l2_subdev *subdev,
@@ -1769,7 +1791,7 @@ static int smiapp_set_format_source(struct v4l2_subdev *subdev,
valid_link_freqs =
&sensor->valid_link_freqs[sensor->csi_format->compressed
- - SMIAPP_COMPRESSED_BASE];
+ - sensor->compressed_min_bpp];
__v4l2_ctrl_modify_range(
sensor->link_freq, 0,
@@ -2057,8 +2079,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
smiapp_set_compose_scaler(subdev, cfg, sel, crops, comp);
*comp = sel->r;
- smiapp_propagate(subdev, cfg, sel->which,
- V4L2_SEL_TGT_COMPOSE);
+ smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return smiapp_update_mode(sensor);
@@ -2135,9 +2156,8 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
->height;
src_size = &_r;
} else {
- src_size =
- v4l2_subdev_get_try_compose(
- subdev, cfg, ssd->sink_pad);
+ src_size = v4l2_subdev_get_try_compose(
+ subdev, cfg, ssd->sink_pad);
}
}
@@ -2161,6 +2181,15 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
return 0;
}
+static void smiapp_get_native_size(struct smiapp_subdev *ssd,
+ struct v4l2_rect *r)
+{
+ r->top = 0;
+ r->left = 0;
+ r->width = ssd->sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ r->height = ssd->sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+}
+
static int __smiapp_get_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
@@ -2192,17 +2221,12 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_NATIVE_SIZE:
- if (ssd == sensor->pixel_array) {
- sel->r.left = sel->r.top = 0;
- sel->r.width =
- sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
- sel->r.height =
- sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
- } else if (sel->pad == ssd->sink_pad) {
+ if (ssd == sensor->pixel_array)
+ smiapp_get_native_size(ssd, &sel->r);
+ else if (sel->pad == ssd->sink_pad)
sel->r = sink_fmt;
- } else {
+ else
sel->r = *comp;
- }
break;
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -2303,15 +2327,26 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
return -EBUSY;
if (!sensor->nvm_size) {
+ int rval;
+
/* NVM not read yet - read it now */
sensor->nvm_size = sensor->hwcfg->nvm_size;
- if (smiapp_set_power(subdev, 1) < 0)
+
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put(&client->dev);
return -ENODEV;
+ }
+
if (smiapp_read_nvm(sensor, sensor->nvm)) {
dev_err(&client->dev, "nvm read failed\n");
return -ENODEV;
}
- smiapp_set_power(subdev, 0);
+
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
}
/*
* NVM is still way below a PAGE_SIZE, so we can safely
@@ -2475,383 +2510,160 @@ static const struct v4l2_subdev_ops smiapp_ops;
static const struct v4l2_subdev_internal_ops smiapp_internal_ops;
static const struct media_entity_operations smiapp_entity_ops;
-static int smiapp_register_subdevs(struct smiapp_sensor *sensor)
+static int smiapp_register_subdev(struct smiapp_sensor *sensor,
+ struct smiapp_subdev *ssd,
+ struct smiapp_subdev *sink_ssd,
+ u16 source_pad, u16 sink_pad, u32 link_flags)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- struct smiapp_subdev *ssds[] = {
- sensor->scaler,
- sensor->binner,
- sensor->pixel_array,
- };
- unsigned int i;
int rval;
- for (i = 0; i < SMIAPP_SUBDEVS - 1; i++) {
- struct smiapp_subdev *this = ssds[i + 1];
- struct smiapp_subdev *last = ssds[i];
-
- if (!last)
- continue;
+ if (!sink_ssd)
+ return 0;
- rval = media_entity_pads_init(&this->sd.entity,
- this->npads, this->pads);
- if (rval) {
- dev_err(&client->dev,
- "media_entity_pads_init failed\n");
- return rval;
- }
+ rval = media_entity_pads_init(&ssd->sd.entity,
+ ssd->npads, ssd->pads);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_pads_init failed\n");
+ return rval;
+ }
- rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
- &this->sd);
- if (rval) {
- dev_err(&client->dev,
- "v4l2_device_register_subdev failed\n");
- return rval;
- }
+ rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
+ &ssd->sd);
+ if (rval) {
+ dev_err(&client->dev,
+ "v4l2_device_register_subdev failed\n");
+ return rval;
+ }
- rval = media_create_pad_link(&this->sd.entity,
- this->source_pad,
- &last->sd.entity,
- last->sink_pad,
- MEDIA_LNK_FL_ENABLED |
- MEDIA_LNK_FL_IMMUTABLE);
- if (rval) {
- dev_err(&client->dev,
- "media_create_pad_link failed\n");
- return rval;
- }
+ rval = media_create_pad_link(&ssd->sd.entity, source_pad,
+ &sink_ssd->sd.entity, sink_pad,
+ link_flags);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_create_pad_link failed\n");
+ v4l2_device_unregister_subdev(&ssd->sd);
+ return rval;
}
return 0;
}
-static void smiapp_cleanup(struct smiapp_sensor *sensor)
+static void smiapp_unregistered(struct v4l2_subdev *subdev)
{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
-
- device_remove_file(&client->dev, &dev_attr_nvm);
- device_remove_file(&client->dev, &dev_attr_ident);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
- smiapp_free_controls(sensor);
+ for (i = 1; i < sensor->ssds_used; i++)
+ v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
}
-static int smiapp_init(struct smiapp_sensor *sensor)
+static int smiapp_registered(struct v4l2_subdev *subdev)
{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- struct smiapp_pll *pll = &sensor->pll;
- struct smiapp_subdev *last = NULL;
- unsigned int i;
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
int rval;
- sensor->vana = devm_regulator_get(&client->dev, "vana");
- if (IS_ERR(sensor->vana)) {
- dev_err(&client->dev, "could not get regulator for vana\n");
- return PTR_ERR(sensor->vana);
- }
-
- sensor->ext_clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock (%ld)\n",
- PTR_ERR(sensor->ext_clk));
- return -EPROBE_DEFER;
- }
-
- rval = clk_set_rate(sensor->ext_clk,
- sensor->hwcfg->ext_clk);
- if (rval < 0) {
- dev_err(&client->dev,
- "unable to set clock freq to %u\n",
- sensor->hwcfg->ext_clk);
- return rval;
+ if (sensor->scaler) {
+ rval = smiapp_register_subdev(
+ sensor, sensor->binner, sensor->scaler,
+ SMIAPP_PAD_SRC, SMIAPP_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (rval < 0)
+ return rval;
}
- sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
- GPIOD_OUT_LOW);
- if (IS_ERR(sensor->xshutdown))
- return PTR_ERR(sensor->xshutdown);
-
- rval = smiapp_power_on(sensor);
+ rval = smiapp_register_subdev(
+ sensor, sensor->pixel_array, sensor->binner,
+ SMIAPP_PA_PAD_SRC, SMIAPP_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
if (rval)
- return -ENODEV;
-
- rval = smiapp_identify_module(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_power_off;
- }
-
- rval = smiapp_get_all_limits(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_power_off;
- }
-
- /*
- * Handle Sensor Module orientation on the board.
- *
- * The application of H-FLIP and V-FLIP on the sensor is modified by
- * the sensor orientation on the board.
- *
- * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
- * both H-FLIP and V-FLIP for normal operation which also implies
- * that a set/unset operation for user space HFLIP and VFLIP v4l2
- * controls will need to be internally inverted.
- *
- * Rotation also changes the bayer pattern.
- */
- if (sensor->hwcfg->module_board_orient ==
- SMIAPP_MODULE_BOARD_ORIENT_180)
- sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
- SMIAPP_IMAGE_ORIENTATION_VFLIP;
-
- rval = smiapp_call_quirk(sensor, limits);
- if (rval) {
- dev_err(&client->dev, "limits quirks failed\n");
- goto out_power_off;
- }
-
- if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
- u32 val;
-
- rval = smiapp_read(sensor,
- SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
- if (rval < 0) {
- rval = -ENODEV;
- goto out_power_off;
- }
- sensor->nbinning_subtypes = min_t(u8, val,
- SMIAPP_BINNING_SUBTYPES);
-
- for (i = 0; i < sensor->nbinning_subtypes; i++) {
- rval = smiapp_read(
- sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
- if (rval < 0) {
- rval = -ENODEV;
- goto out_power_off;
- }
- sensor->binning_subtypes[i] =
- *(struct smiapp_binning_subtype *)&val;
+ goto out_err;
- dev_dbg(&client->dev, "binning %xx%x\n",
- sensor->binning_subtypes[i].horizontal,
- sensor->binning_subtypes[i].vertical);
- }
- }
- sensor->binning_horizontal = 1;
- sensor->binning_vertical = 1;
+ return 0;
- if (device_create_file(&client->dev, &dev_attr_ident) != 0) {
- dev_err(&client->dev, "sysfs ident entry creation failed\n");
- rval = -ENOENT;
- goto out_power_off;
- }
- /* SMIA++ NVM initialization - it will be read from the sensor
- * when it is first requested by userspace.
- */
- if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
- sensor->nvm = devm_kzalloc(&client->dev,
- sensor->hwcfg->nvm_size, GFP_KERNEL);
- if (sensor->nvm == NULL) {
- dev_err(&client->dev, "nvm buf allocation failed\n");
- rval = -ENOMEM;
- goto out_cleanup;
- }
+out_err:
+ smiapp_unregistered(subdev);
- if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
- dev_err(&client->dev, "sysfs nvm entry failed\n");
- rval = -EBUSY;
- goto out_cleanup;
- }
- }
+ return rval;
+}
- /* We consider this as profile 0 sensor if any of these are zero. */
- if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
- sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
- } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
- != SMIAPP_SCALING_CAPABILITY_NONE) {
- if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
- == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
- sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
- else
- sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
- sensor->scaler = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
- } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
- == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
- sensor->scaler = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
- }
- sensor->binner = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
- sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
+static void smiapp_cleanup(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ device_remove_file(&client->dev, &dev_attr_nvm);
+ device_remove_file(&client->dev, &dev_attr_ident);
- /* prepare PLL configuration input values */
- pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
- pll->csi2.lanes = sensor->hwcfg->lanes;
- pll->ext_clk_freq_hz = sensor->hwcfg->ext_clk;
- pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
- /* Profile 0 sensors have no separate OP clock branch. */
- if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
- pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
-
- for (i = 0; i < SMIAPP_SUBDEVS; i++) {
- struct {
- struct smiapp_subdev *ssd;
- char *name;
- } const __this[] = {
- { sensor->scaler, "scaler", },
- { sensor->binner, "binner", },
- { sensor->pixel_array, "pixel array", },
- }, *_this = &__this[i];
- struct smiapp_subdev *this = _this->ssd;
-
- if (!this)
- continue;
+ smiapp_free_controls(sensor);
+}
- if (this != sensor->src)
- v4l2_subdev_init(&this->sd, &smiapp_ops);
+static void smiapp_create_subdev(struct smiapp_sensor *sensor,
+ struct smiapp_subdev *ssd, const char *name,
+ unsigned short num_pads)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- this->sensor = sensor;
+ if (!ssd)
+ return;
- if (this == sensor->pixel_array) {
- this->npads = 1;
- } else {
- this->npads = 2;
- this->source_pad = 1;
- }
+ if (ssd != sensor->src)
+ v4l2_subdev_init(&ssd->sd, &smiapp_ops);
- snprintf(this->sd.name,
- sizeof(this->sd.name), "%s %s %d-%4.4x",
- sensor->minfo.name, _this->name,
- i2c_adapter_id(client->adapter), client->addr);
-
- this->sink_fmt.width =
- sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
- this->sink_fmt.height =
- sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
- this->compose.width = this->sink_fmt.width;
- this->compose.height = this->sink_fmt.height;
- this->crop[this->source_pad] = this->compose;
- this->pads[this->source_pad].flags = MEDIA_PAD_FL_SOURCE;
- if (this != sensor->pixel_array) {
- this->crop[this->sink_pad] = this->compose;
- this->pads[this->sink_pad].flags = MEDIA_PAD_FL_SINK;
- }
+ ssd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ssd->sensor = sensor;
- this->sd.entity.ops = &smiapp_entity_ops;
+ ssd->npads = num_pads;
+ ssd->source_pad = num_pads - 1;
- if (last == NULL) {
- last = this;
- continue;
- }
+ snprintf(ssd->sd.name,
+ sizeof(ssd->sd.name), "%s %s %d-%4.4x", sensor->minfo.name,
+ name, i2c_adapter_id(client->adapter), client->addr);
- this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- this->sd.internal_ops = &smiapp_internal_ops;
- this->sd.owner = THIS_MODULE;
- v4l2_set_subdevdata(&this->sd, client);
+ smiapp_get_native_size(ssd, &ssd->sink_fmt);
- last = this;
+ ssd->compose.width = ssd->sink_fmt.width;
+ ssd->compose.height = ssd->sink_fmt.height;
+ ssd->crop[ssd->source_pad] = ssd->compose;
+ ssd->pads[ssd->source_pad].flags = MEDIA_PAD_FL_SOURCE;
+ if (ssd != sensor->pixel_array) {
+ ssd->crop[ssd->sink_pad] = ssd->compose;
+ ssd->pads[ssd->sink_pad].flags = MEDIA_PAD_FL_SINK;
}
- dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
+ ssd->sd.entity.ops = &smiapp_entity_ops;
- sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- /* final steps */
- smiapp_read_frame_fmt(sensor);
- rval = smiapp_init_controls(sensor);
- if (rval < 0)
- goto out_cleanup;
-
- rval = smiapp_call_quirk(sensor, init);
- if (rval)
- goto out_cleanup;
+ if (ssd == sensor->src)
+ return;
- rval = smiapp_get_mbus_formats(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_cleanup;
- }
-
- rval = smiapp_init_late_controls(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_cleanup;
- }
-
- mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
- mutex_unlock(&sensor->mutex);
- if (rval) {
- dev_err(&client->dev, "update mode failed\n");
- goto out_cleanup;
- }
-
- sensor->streaming = false;
- sensor->dev_init_done = true;
-
- smiapp_power_off(sensor);
-
- return 0;
-
-out_cleanup:
- smiapp_cleanup(sensor);
-
-out_power_off:
- smiapp_power_off(sensor);
- return rval;
-}
-
-static int smiapp_registered(struct v4l2_subdev *subdev)
-{
- struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
- int rval;
-
- if (!client->dev.of_node) {
- rval = smiapp_init(sensor);
- if (rval)
- return rval;
- }
-
- rval = smiapp_register_subdevs(sensor);
- if (rval)
- smiapp_cleanup(sensor);
-
- return rval;
+ ssd->sd.internal_ops = &smiapp_internal_ops;
+ ssd->sd.owner = THIS_MODULE;
+ ssd->sd.dev = &client->dev;
+ v4l2_set_subdevdata(&ssd->sd, client);
}
static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
struct smiapp_sensor *sensor = ssd->sensor;
- u32 mbus_code =
- smiapp_csi_data_formats[smiapp_pixel_order(sensor)].code;
unsigned int i;
+ int rval;
mutex_lock(&sensor->mutex);
for (i = 0; i < ssd->npads; i++) {
struct v4l2_mbus_framefmt *try_fmt =
v4l2_subdev_get_try_format(sd, fh->pad, i);
- struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, i);
+ struct v4l2_rect *try_crop =
+ v4l2_subdev_get_try_crop(sd, fh->pad, i);
struct v4l2_rect *try_comp;
- try_fmt->width = sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
- try_fmt->height = sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
- try_fmt->code = mbus_code;
- try_fmt->field = V4L2_FIELD_NONE;
+ smiapp_get_native_size(ssd, try_crop);
- try_crop->top = 0;
- try_crop->left = 0;
- try_crop->width = try_fmt->width;
- try_crop->height = try_fmt->height;
+ try_fmt->width = try_crop->width;
+ try_fmt->height = try_crop->height;
+ try_fmt->code = sensor->internal_csi_format->code;
+ try_fmt->field = V4L2_FIELD_NONE;
if (ssd != sensor->pixel_array)
continue;
@@ -2862,12 +2674,23 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&sensor->mutex);
- return smiapp_set_power(sd, 1);
+ rval = pm_runtime_get_sync(sd->dev);
+ if (rval >= 0)
+ return 0;
+
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(sd->dev);
+ pm_runtime_put(sd->dev);
+
+ return rval;
}
static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- return smiapp_set_power(sd, 0);
+ pm_runtime_mark_last_busy(sd->dev);
+ pm_runtime_put_autosuspend(sd->dev);
+
+ return 0;
}
static const struct v4l2_subdev_video_ops smiapp_video_ops = {
@@ -2904,6 +2727,7 @@ static const struct media_entity_operations smiapp_entity_ops = {
static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
.registered = smiapp_registered,
+ .unregistered = smiapp_unregistered,
.open = smiapp_open,
.close = smiapp_close,
};
@@ -2924,20 +2748,20 @@ static int smiapp_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- bool streaming;
-
- BUG_ON(mutex_is_locked(&sensor->mutex));
+ bool streaming = sensor->streaming;
+ int rval;
- if (sensor->power_count == 0)
- return 0;
+ rval = pm_runtime_get_sync(dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put(dev);
+ return -EAGAIN;
+ }
if (sensor->streaming)
smiapp_stop_streaming(sensor);
- streaming = sensor->streaming;
-
- smiapp_power_off(sensor);
-
/* save state for resume */
sensor->streaming = streaming;
@@ -2949,14 +2773,9 @@ static int smiapp_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- int rval;
-
- if (sensor->power_count == 0)
- return 0;
+ int rval = 0;
- rval = smiapp_power_on(sensor);
- if (rval)
- return rval;
+ pm_runtime_put(dev);
if (sensor->streaming)
rval = smiapp_start_streaming(sensor);
@@ -3051,6 +2870,7 @@ static int smiapp_probe(struct i2c_client *client,
{
struct smiapp_sensor *sensor;
struct smiapp_hwconfig *hwcfg = smiapp_get_hwconfig(&client->dev);
+ unsigned int i;
int rval;
if (hwcfg == NULL)
@@ -3062,35 +2882,240 @@ static int smiapp_probe(struct i2c_client *client,
sensor->hwcfg = hwcfg;
mutex_init(&sensor->mutex);
- mutex_init(&sensor->power_mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
sensor->src->sd.internal_ops = &smiapp_internal_src_ops;
- sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- sensor->src->sensor = sensor;
- sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE;
- rval = media_entity_pads_init(&sensor->src->sd.entity, 2,
- sensor->src->pads);
- if (rval < 0)
+ sensor->vana = devm_regulator_get(&client->dev, "vana");
+ if (IS_ERR(sensor->vana)) {
+ dev_err(&client->dev, "could not get regulator for vana\n");
+ return PTR_ERR(sensor->vana);
+ }
+
+ sensor->ext_clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(sensor->ext_clk)) {
+ dev_err(&client->dev, "could not get clock (%ld)\n",
+ PTR_ERR(sensor->ext_clk));
+ return -EPROBE_DEFER;
+ }
+
+ rval = clk_set_rate(sensor->ext_clk, sensor->hwcfg->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock freq to %u\n",
+ sensor->hwcfg->ext_clk);
return rval;
+ }
- if (client->dev.of_node) {
- rval = smiapp_init(sensor);
- if (rval)
- goto out_media_entity_cleanup;
+ sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sensor->xshutdown))
+ return PTR_ERR(sensor->xshutdown);
+
+ pm_runtime_enable(&client->dev);
+
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ rval = smiapp_identify_module(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ rval = smiapp_get_all_limits(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
}
+ rval = smiapp_read_frame_fmt(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ /*
+ * Handle Sensor Module orientation on the board.
+ *
+ * The application of H-FLIP and V-FLIP on the sensor is modified by
+ * the sensor orientation on the board.
+ *
+ * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
+ * both H-FLIP and V-FLIP for normal operation which also implies
+ * that a set/unset operation for user space HFLIP and VFLIP v4l2
+ * controls will need to be internally inverted.
+ *
+ * Rotation also changes the bayer pattern.
+ */
+ if (sensor->hwcfg->module_board_orient ==
+ SMIAPP_MODULE_BOARD_ORIENT_180)
+ sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
+ SMIAPP_IMAGE_ORIENTATION_VFLIP;
+
+ rval = smiapp_call_quirk(sensor, limits);
+ if (rval) {
+ dev_err(&client->dev, "limits quirks failed\n");
+ goto out_power_off;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
+ u32 val;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->nbinning_subtypes = min_t(u8, val,
+ SMIAPP_BINNING_SUBTYPES);
+
+ for (i = 0; i < sensor->nbinning_subtypes; i++) {
+ rval = smiapp_read(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->binning_subtypes[i] =
+ *(struct smiapp_binning_subtype *)&val;
+
+ dev_dbg(&client->dev, "binning %xx%x\n",
+ sensor->binning_subtypes[i].horizontal,
+ sensor->binning_subtypes[i].vertical);
+ }
+ }
+ sensor->binning_horizontal = 1;
+ sensor->binning_vertical = 1;
+
+ if (device_create_file(&client->dev, &dev_attr_ident) != 0) {
+ dev_err(&client->dev, "sysfs ident entry creation failed\n");
+ rval = -ENOENT;
+ goto out_power_off;
+ }
+ /* SMIA++ NVM initialization - it will be read from the sensor
+ * when it is first requested by userspace.
+ */
+ if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
+ sensor->nvm = devm_kzalloc(&client->dev,
+ sensor->hwcfg->nvm_size, GFP_KERNEL);
+ if (sensor->nvm == NULL) {
+ rval = -ENOMEM;
+ goto out_cleanup;
+ }
+
+ if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
+ dev_err(&client->dev, "sysfs nvm entry failed\n");
+ rval = -EBUSY;
+ goto out_cleanup;
+ }
+ }
+
+ /* We consider this as profile 0 sensor if any of these are zero. */
+ if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
+ } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE) {
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
+ else
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ }
+ sensor->binner = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+
+ sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
+ /* prepare PLL configuration input values */
+ sensor->pll.bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
+ sensor->pll.csi2.lanes = sensor->hwcfg->lanes;
+ sensor->pll.ext_clk_freq_hz = sensor->hwcfg->ext_clk;
+ sensor->pll.scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ /* Profile 0 sensors have no separate OP clock branch. */
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ sensor->pll.flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+
+ smiapp_create_subdev(sensor, sensor->scaler, "scaler", 2);
+ smiapp_create_subdev(sensor, sensor->binner, "binner", 2);
+ smiapp_create_subdev(sensor, sensor->pixel_array, "pixel_array", 1);
+
+ dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
+
+ sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ rval = smiapp_init_controls(sensor);
+ if (rval < 0)
+ goto out_cleanup;
+
+ rval = smiapp_call_quirk(sensor, init);
+ if (rval)
+ goto out_cleanup;
+
+ rval = smiapp_get_mbus_formats(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_cleanup;
+ }
+
+ rval = smiapp_init_late_controls(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_cleanup;
+ }
+
+ mutex_lock(&sensor->mutex);
+ rval = smiapp_update_mode(sensor);
+ mutex_unlock(&sensor->mutex);
+ if (rval) {
+ dev_err(&client->dev, "update mode failed\n");
+ goto out_cleanup;
+ }
+
+ sensor->streaming = false;
+ sensor->dev_init_done = true;
+
+ rval = media_entity_pads_init(&sensor->src->sd.entity, 2,
+ sensor->src->pads);
+ if (rval < 0)
+ goto out_media_entity_cleanup;
+
rval = v4l2_async_register_subdev(&sensor->src->sd);
if (rval < 0)
goto out_media_entity_cleanup;
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
return 0;
out_media_entity_cleanup:
media_entity_cleanup(&sensor->src->sd.entity);
+out_cleanup:
+ smiapp_cleanup(sensor);
+
+out_power_off:
+ pm_runtime_put(&client->dev);
+ pm_runtime_disable(&client->dev);
+
return rval;
}
@@ -3102,11 +3127,8 @@ static int smiapp_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
- if (sensor->power_count) {
- gpiod_set_value(sensor->xshutdown, 0);
- clk_disable_unprepare(sensor->ext_clk);
- sensor->power_count = 0;
- }
+ pm_runtime_suspend(&client->dev);
+ pm_runtime_disable(&client->dev);
for (i = 0; i < sensor->ssds_used; i++) {
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
@@ -3130,8 +3152,8 @@ static const struct i2c_device_id smiapp_id_table[] = {
MODULE_DEVICE_TABLE(i2c, smiapp_id_table);
static const struct dev_pm_ops smiapp_pm_ops = {
- .suspend = smiapp_suspend,
- .resume = smiapp_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(smiapp_suspend, smiapp_resume)
+ SET_RUNTIME_PM_OPS(smiapp_power_off, smiapp_power_on, NULL)
};
static struct i2c_driver smiapp_i2c_driver = {
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 1e501c06d18c..d6779e35d36f 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -268,8 +268,8 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
if (r == 1) {
if (retries)
dev_err(&client->dev,
- "sensor i2c stall encountered. "
- "retries: %d\n", retries);
+ "sensor i2c stall encountered. retries: %d\n",
+ retries);
return 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index aae72bc87bf7..f74d695018b9 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -150,11 +150,6 @@ struct smiapp_csi_data_format {
#define SMIAPP_PAD_SRC 1
#define SMIAPP_PADS 2
-#define SMIAPP_COMPRESSED_BASE 8
-#define SMIAPP_COMPRESSED_MAX 16
-#define SMIAPP_NR_OF_COMPRESSED (SMIAPP_COMPRESSED_MAX - \
- SMIAPP_COMPRESSED_BASE + 1)
-
struct smiapp_binning_subtype {
u8 horizontal:4;
u8 vertical:4;
@@ -162,9 +157,9 @@ struct smiapp_binning_subtype {
struct smiapp_subdev {
struct v4l2_subdev sd;
- struct media_pad pads[2];
+ struct media_pad pads[SMIAPP_PADS];
struct v4l2_rect sink_fmt;
- struct v4l2_rect crop[2];
+ struct v4l2_rect crop[SMIAPP_PADS];
struct v4l2_rect compose; /* compose on sink */
unsigned short sink_pad;
unsigned short source_pad;
@@ -181,16 +176,9 @@ struct smiapp_sensor {
* "mutex" is used to serialise access to all fields here
* except v4l2_ctrls at the end of the struct. "mutex" is also
* used to serialise access to file handle specific
- * information. The exception to this rule is the power_mutex
- * below.
+ * information.
*/
struct mutex mutex;
- /*
- * power_mutex is used to serialise power management related
- * activities. Acquiring "mutex" at that time isn't necessary
- * since there are no other users anyway.
- */
- struct mutex power_mutex;
struct smiapp_subdev ssds[SMIAPP_SUBDEVS];
u32 ssds_used;
struct smiapp_subdev *src;
@@ -218,12 +206,14 @@ struct smiapp_sensor {
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
u8 frame_skip;
- u16 image_start; /* Offset to first line after metadata lines */
-
- int power_count;
+ u16 embedded_start; /* embedded data start line */
+ u16 embedded_end;
+ u16 image_start; /* image data start line */
+ u16 visible_pixel_start; /* start pixel of the visible image */
bool streaming;
bool dev_init_done;
+ u8 compressed_min_bpp;
u8 *nvm; /* nvm memory buffer */
unsigned int nvm_size; /* bytes */
@@ -233,7 +223,7 @@ struct smiapp_sensor {
struct smiapp_pll pll;
/* Is a default format supported for a given BPP? */
- unsigned long valid_link_freqs[SMIAPP_NR_OF_COMPRESSED];
+ unsigned long *valid_link_freqs;
/* Pixel array controls */
struct v4l2_ctrl *analog_gain;
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 7e68762b3a4b..985a3672b243 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -1064,8 +1064,7 @@ static int ov772x_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
- "I2C-Adapter doesn't support "
- "I2C_FUNC_SMBUS_BYTE_DATA\n");
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
return -EIO;
}
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index 0da632d7d33a..f11f76cdacad 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -881,8 +881,7 @@ static int ov9740_video_probe(struct i2c_client *client)
goto done;
}
- dev_info(&client->dev, "ov9740 Model ID 0x%04x, Revision 0x%02x, "
- "Manufacturer 0x%02x, SMIA Version 0x%02x\n",
+ dev_info(&client->dev, "ov9740 Model ID 0x%04x, Revision 0x%02x, Manufacturer 0x%02x, SMIA Version 0x%02x\n",
priv->model, priv->revision, priv->manid, priv->smiaver);
ret = v4l2_ctrl_handler_setup(&priv->hdl);
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 4002c07f3857..c9c49ed707b8 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -947,8 +947,7 @@ static int tw9910_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev,
- "I2C-Adapter doesn't support "
- "I2C_FUNC_SMBUS_BYTE_DATA\n");
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
return -EIO;
}
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 42d1e26e581c..ce86534450ac 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -1894,8 +1894,9 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
printk(KERN_INFO "tvaudio: TV audio decoder + audio/video mux driver\n");
printk(KERN_INFO "tvaudio: known chips: ");
for (desc = chiplist; desc->name != NULL; desc++)
- printk("%s%s", (desc == chiplist) ? "" : ", ", desc->name);
- printk("\n");
+ printk(KERN_CONT "%s%s",
+ (desc == chiplist) ? "" : ", ", desc->name);
+ printk(KERN_CONT "\n");
}
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index d5c9347f4c6d..0c62899c3667 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -894,7 +894,7 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
if (index != 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ code->code = MEDIA_BUS_FMT_UYVY8_2X8;
return 0;
}
@@ -922,7 +922,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
return 0;
}
- format->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+ format->format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format->format.width = tvp514x_std_list[decoder->current_std].width;
format->format.height = tvp514x_std_list[decoder->current_std].height;
format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -946,7 +946,7 @@ static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
struct tvp514x_decoder *decoder = to_decoder(sd);
if (fmt->format.field != V4L2_FIELD_INTERLACED ||
- fmt->format.code != MEDIA_BUS_FMT_YUYV8_2X8 ||
+ fmt->format.code != MEDIA_BUS_FMT_UYVY8_2X8 ||
fmt->format.colorspace != V4L2_COLORSPACE_SMPTE170M ||
fmt->format.width != tvp514x_std_list[decoder->current_std].width ||
fmt->format.height != tvp514x_std_list[decoder->current_std].height)
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 4740da39d698..3a0fe8cc64e9 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -36,6 +36,8 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
+#define dprintk0(__dev, __arg...) dev_dbg_lvl(__dev, 0, 0, __arg)
+
struct tvp5150 {
struct v4l2_subdev sd;
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -74,11 +76,11 @@ static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
rc = i2c_smbus_read_byte_data(c, addr);
if (rc < 0) {
- v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+ dev_err(sd->dev, "i2c i/o error: rc == %d\n", rc);
return rc;
}
- v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, rc);
+ dev_dbg_lvl(sd->dev, 2, debug, "tvp5150: read 0x%02x = %02x\n", addr, rc);
return rc;
}
@@ -89,10 +91,10 @@ static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
struct i2c_client *c = v4l2_get_subdevdata(sd);
int rc;
- v4l2_dbg(2, debug, sd, "tvp5150: writing 0x%02x 0x%02x\n", addr, value);
+ dev_dbg_lvl(sd->dev, 2, debug, "tvp5150: writing %02x %02x\n", addr, value);
rc = i2c_smbus_write_byte_data(c, addr, value);
if (rc < 0)
- v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+ dev_err(sd->dev, "i2c i/o error: rc == %d\n", rc);
return rc;
}
@@ -100,138 +102,140 @@ static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
static void dump_reg_range(struct v4l2_subdev *sd, char *s, u8 init,
const u8 end, int max_line)
{
- int i = 0;
+ u8 buf[16];
+ int i = 0, j, len;
- while (init != (u8)(end + 1)) {
- if ((i % max_line) == 0) {
- if (i > 0)
- printk("\n");
- printk("tvp5150: %s reg 0x%02x = ", s, init);
- }
- printk("%02x ", tvp5150_read(sd, init));
+ if (max_line > 16) {
+ dprintk0(sd->dev, "too much data to dump\n");
+ return;
+ }
+
+ for (i = init; i < end; i += max_line) {
+ len = (end - i > max_line) ? max_line : end - i;
+
+ for (j = 0; j < len; j++)
+ buf[j] = tvp5150_read(sd, i + j);
- init++;
- i++;
+ dprintk0(sd->dev, "%s reg %02x = %*ph\n", s, i, len, buf);
}
- printk("\n");
}
static int tvp5150_log_status(struct v4l2_subdev *sd)
{
- printk("tvp5150: Video input source selection #1 = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VD_IN_SRC_SEL_1));
- printk("tvp5150: Analog channel controls = 0x%02x\n",
- tvp5150_read(sd, TVP5150_ANAL_CHL_CTL));
- printk("tvp5150: Operation mode controls = 0x%02x\n",
- tvp5150_read(sd, TVP5150_OP_MODE_CTL));
- printk("tvp5150: Miscellaneous controls = 0x%02x\n",
- tvp5150_read(sd, TVP5150_MISC_CTL));
- printk("tvp5150: Autoswitch mask= 0x%02x\n",
- tvp5150_read(sd, TVP5150_AUTOSW_MSK));
- printk("tvp5150: Color killer threshold control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_COLOR_KIL_THSH_CTL));
- printk("tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
- tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_1),
- tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_2),
- tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_3));
- printk("tvp5150: Brightness control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_BRIGHT_CTL));
- printk("tvp5150: Color saturation control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_SATURATION_CTL));
- printk("tvp5150: Hue control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_HUE_CTL));
- printk("tvp5150: Contrast control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_CONTRAST_CTL));
- printk("tvp5150: Outputs and data rates select = 0x%02x\n",
- tvp5150_read(sd, TVP5150_DATA_RATE_SEL));
- printk("tvp5150: Configuration shared pins = 0x%02x\n",
- tvp5150_read(sd, TVP5150_CONF_SHARED_PIN));
- printk("tvp5150: Active video cropping start = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_MSB),
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_LSB));
- printk("tvp5150: Active video cropping stop = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_MSB),
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_LSB));
- printk("tvp5150: Genlock/RTC = 0x%02x\n",
- tvp5150_read(sd, TVP5150_GENLOCK));
- printk("tvp5150: Horizontal sync start = 0x%02x\n",
- tvp5150_read(sd, TVP5150_HORIZ_SYNC_START));
- printk("tvp5150: Vertical blanking start = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VERT_BLANKING_START));
- printk("tvp5150: Vertical blanking stop = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VERT_BLANKING_STOP));
- printk("tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
- tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_1),
- tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_2));
- printk("tvp5150: Interrupt reset register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_RESET_REG_B));
- printk("tvp5150: Interrupt enable register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_ENABLE_REG_B));
- printk("tvp5150: Interrupt configuration register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INTT_CONFIG_REG_B));
- printk("tvp5150: Video standard = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VIDEO_STD));
- printk("tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
- tvp5150_read(sd, TVP5150_CB_GAIN_FACT),
- tvp5150_read(sd, TVP5150_CR_GAIN_FACTOR));
- printk("tvp5150: Macrovision on counter = 0x%02x\n",
- tvp5150_read(sd, TVP5150_MACROVISION_ON_CTR));
- printk("tvp5150: Macrovision off counter = 0x%02x\n",
- tvp5150_read(sd, TVP5150_MACROVISION_OFF_CTR));
- printk("tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
- (tvp5150_read(sd, TVP5150_REV_SELECT) & 1) ? 3 : 4);
- printk("tvp5150: Device ID = %02x%02x\n",
- tvp5150_read(sd, TVP5150_MSB_DEV_ID),
- tvp5150_read(sd, TVP5150_LSB_DEV_ID));
- printk("tvp5150: ROM version = (hex) %02x.%02x\n",
- tvp5150_read(sd, TVP5150_ROM_MAJOR_VER),
- tvp5150_read(sd, TVP5150_ROM_MINOR_VER));
- printk("tvp5150: Vertical line count = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_VERT_LN_COUNT_MSB),
- tvp5150_read(sd, TVP5150_VERT_LN_COUNT_LSB));
- printk("tvp5150: Interrupt status register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_STATUS_REG_B));
- printk("tvp5150: Interrupt active register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_ACTIVE_REG_B));
- printk("tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
- tvp5150_read(sd, TVP5150_STATUS_REG_1),
- tvp5150_read(sd, TVP5150_STATUS_REG_2),
- tvp5150_read(sd, TVP5150_STATUS_REG_3),
- tvp5150_read(sd, TVP5150_STATUS_REG_4),
- tvp5150_read(sd, TVP5150_STATUS_REG_5));
+ dprintk0(sd->dev, "tvp5150: Video input source selection #1 = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VD_IN_SRC_SEL_1));
+ dprintk0(sd->dev, "tvp5150: Analog channel controls = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_ANAL_CHL_CTL));
+ dprintk0(sd->dev, "tvp5150: Operation mode controls = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_OP_MODE_CTL));
+ dprintk0(sd->dev, "tvp5150: Miscellaneous controls = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_MISC_CTL));
+ dprintk0(sd->dev, "tvp5150: Autoswitch mask= 0x%02x\n",
+ tvp5150_read(sd, TVP5150_AUTOSW_MSK));
+ dprintk0(sd->dev, "tvp5150: Color killer threshold control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_COLOR_KIL_THSH_CTL));
+ dprintk0(sd->dev, "tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
+ tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_1),
+ tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_2),
+ tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_3));
+ dprintk0(sd->dev, "tvp5150: Brightness control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_BRIGHT_CTL));
+ dprintk0(sd->dev, "tvp5150: Color saturation control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_SATURATION_CTL));
+ dprintk0(sd->dev, "tvp5150: Hue control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_HUE_CTL));
+ dprintk0(sd->dev, "tvp5150: Contrast control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_CONTRAST_CTL));
+ dprintk0(sd->dev, "tvp5150: Outputs and data rates select = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_DATA_RATE_SEL));
+ dprintk0(sd->dev, "tvp5150: Configuration shared pins = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_CONF_SHARED_PIN));
+ dprintk0(sd->dev, "tvp5150: Active video cropping start = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_MSB),
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_LSB));
+ dprintk0(sd->dev, "tvp5150: Active video cropping stop = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_MSB),
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_LSB));
+ dprintk0(sd->dev, "tvp5150: Genlock/RTC = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_GENLOCK));
+ dprintk0(sd->dev, "tvp5150: Horizontal sync start = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_HORIZ_SYNC_START));
+ dprintk0(sd->dev, "tvp5150: Vertical blanking start = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VERT_BLANKING_START));
+ dprintk0(sd->dev, "tvp5150: Vertical blanking stop = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VERT_BLANKING_STOP));
+ dprintk0(sd->dev, "tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
+ tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_1),
+ tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_2));
+ dprintk0(sd->dev, "tvp5150: Interrupt reset register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_RESET_REG_B));
+ dprintk0(sd->dev, "tvp5150: Interrupt enable register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_ENABLE_REG_B));
+ dprintk0(sd->dev, "tvp5150: Interrupt configuration register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INTT_CONFIG_REG_B));
+ dprintk0(sd->dev, "tvp5150: Video standard = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VIDEO_STD));
+ dprintk0(sd->dev, "tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
+ tvp5150_read(sd, TVP5150_CB_GAIN_FACT),
+ tvp5150_read(sd, TVP5150_CR_GAIN_FACTOR));
+ dprintk0(sd->dev, "tvp5150: Macrovision on counter = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_MACROVISION_ON_CTR));
+ dprintk0(sd->dev, "tvp5150: Macrovision off counter = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_MACROVISION_OFF_CTR));
+ dprintk0(sd->dev, "tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
+ (tvp5150_read(sd, TVP5150_REV_SELECT) & 1) ? 3 : 4);
+ dprintk0(sd->dev, "tvp5150: Device ID = %02x%02x\n",
+ tvp5150_read(sd, TVP5150_MSB_DEV_ID),
+ tvp5150_read(sd, TVP5150_LSB_DEV_ID));
+ dprintk0(sd->dev, "tvp5150: ROM version = (hex) %02x.%02x\n",
+ tvp5150_read(sd, TVP5150_ROM_MAJOR_VER),
+ tvp5150_read(sd, TVP5150_ROM_MINOR_VER));
+ dprintk0(sd->dev, "tvp5150: Vertical line count = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_VERT_LN_COUNT_MSB),
+ tvp5150_read(sd, TVP5150_VERT_LN_COUNT_LSB));
+ dprintk0(sd->dev, "tvp5150: Interrupt status register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_STATUS_REG_B));
+ dprintk0(sd->dev, "tvp5150: Interrupt active register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_ACTIVE_REG_B));
+ dprintk0(sd->dev, "tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
+ tvp5150_read(sd, TVP5150_STATUS_REG_1),
+ tvp5150_read(sd, TVP5150_STATUS_REG_2),
+ tvp5150_read(sd, TVP5150_STATUS_REG_3),
+ tvp5150_read(sd, TVP5150_STATUS_REG_4),
+ tvp5150_read(sd, TVP5150_STATUS_REG_5));
dump_reg_range(sd, "Teletext filter 1", TVP5150_TELETEXT_FIL1_INI,
TVP5150_TELETEXT_FIL1_END, 8);
dump_reg_range(sd, "Teletext filter 2", TVP5150_TELETEXT_FIL2_INI,
TVP5150_TELETEXT_FIL2_END, 8);
- printk("tvp5150: Teletext filter enable = 0x%02x\n",
- tvp5150_read(sd, TVP5150_TELETEXT_FIL_ENA));
- printk("tvp5150: Interrupt status register A = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_STATUS_REG_A));
- printk("tvp5150: Interrupt enable register A = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_ENABLE_REG_A));
- printk("tvp5150: Interrupt configuration = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_CONF));
- printk("tvp5150: VDP status register = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VDP_STATUS_REG));
- printk("tvp5150: FIFO word count = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_WORD_COUNT));
- printk("tvp5150: FIFO interrupt threshold = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_INT_THRESHOLD));
- printk("tvp5150: FIFO reset = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_RESET));
- printk("tvp5150: Line number interrupt = 0x%02x\n",
- tvp5150_read(sd, TVP5150_LINE_NUMBER_INT));
- printk("tvp5150: Pixel alignment register = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_HIGH),
- tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_LOW));
- printk("tvp5150: FIFO output control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_OUT_CTRL));
- printk("tvp5150: Full field enable = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FULL_FIELD_ENA));
- printk("tvp5150: Full field mode register = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FULL_FIELD_MODE_REG));
+ dprintk0(sd->dev, "tvp5150: Teletext filter enable = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_TELETEXT_FIL_ENA));
+ dprintk0(sd->dev, "tvp5150: Interrupt status register A = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_STATUS_REG_A));
+ dprintk0(sd->dev, "tvp5150: Interrupt enable register A = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_ENABLE_REG_A));
+ dprintk0(sd->dev, "tvp5150: Interrupt configuration = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_CONF));
+ dprintk0(sd->dev, "tvp5150: VDP status register = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VDP_STATUS_REG));
+ dprintk0(sd->dev, "tvp5150: FIFO word count = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_WORD_COUNT));
+ dprintk0(sd->dev, "tvp5150: FIFO interrupt threshold = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_INT_THRESHOLD));
+ dprintk0(sd->dev, "tvp5150: FIFO reset = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_RESET));
+ dprintk0(sd->dev, "tvp5150: Line number interrupt = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_LINE_NUMBER_INT));
+ dprintk0(sd->dev, "tvp5150: Pixel alignment register = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_HIGH),
+ tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_LOW));
+ dprintk0(sd->dev, "tvp5150: FIFO output control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_OUT_CTRL));
+ dprintk0(sd->dev, "tvp5150: Full field enable = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FULL_FIELD_ENA));
+ dprintk0(sd->dev, "tvp5150: Full field mode register = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FULL_FIELD_MODE_REG));
dump_reg_range(sd, "CC data", TVP5150_CC_DATA_INI,
TVP5150_CC_DATA_END, 8);
@@ -254,7 +258,7 @@ static int tvp5150_log_status(struct v4l2_subdev *sd)
Basic functions
****************************************************************************/
-static inline void tvp5150_selmux(struct v4l2_subdev *sd)
+static void tvp5150_selmux(struct v4l2_subdev *sd)
{
int opmode = 0;
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -280,8 +284,7 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
break;
}
- v4l2_dbg(1, debug, sd, "Selecting video route: route input=%i, output=%i "
- "=> tvp5150 input=%i, opmode=%i\n",
+ dev_dbg_lvl(sd->dev, 1, debug, "Selecting video route: route input=%i, output=%i => tvp5150 input=%i, opmode=%i\n",
decoder->input, decoder->output,
input, opmode);
@@ -293,7 +296,7 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
*/
val = tvp5150_read(sd, TVP5150_MISC_CTL);
if (val < 0) {
- v4l2_err(sd, "%s: failed with error = %d\n", __func__, val);
+ dev_err(sd->dev, "%s: failed with error = %d\n", __func__, val);
return;
}
@@ -611,7 +614,7 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
const struct i2c_vbi_ram_value *regs = vbi_ram_default;
int line;
- v4l2_dbg(1, debug, sd, "g_sliced_vbi_cap\n");
+ dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n");
memset(cap, 0, sizeof *cap);
while (regs->reg != (u16)-1 ) {
@@ -649,7 +652,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
int pos=0;
if (std == V4L2_STD_ALL) {
- v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
+ dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
return 0;
} else if (std & V4L2_STD_625_50) {
/* Don't follow NTSC Line number convension */
@@ -697,7 +700,7 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
int i, ret = 0;
if (std == V4L2_STD_ALL) {
- v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
+ dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
return 0;
} else if (std & V4L2_STD_625_50) {
/* Don't follow NTSC Line number convension */
@@ -712,7 +715,7 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
for (i = 0; i <= 1; i++) {
ret = tvp5150_read(sd, reg + i);
if (ret < 0) {
- v4l2_err(sd, "%s: failed with error = %d\n",
+ dev_err(sd->dev, "%s: failed with error = %d\n",
__func__, ret);
return 0;
}
@@ -749,7 +752,7 @@ static int tvp5150_set_std(struct v4l2_subdev *sd, v4l2_std_id std)
fmt = VIDEO_STD_SECAM_BIT;
}
- v4l2_dbg(1, debug, sd, "Set video std register to %d.\n", fmt);
+ dev_dbg_lvl(sd->dev, 1, debug, "Set video std register to %d.\n", fmt);
tvp5150_write(sd, TVP5150_VIDEO_STD, fmt);
return 0;
}
@@ -815,6 +818,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
case V4L2_CID_HUE:
tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
+ break;
case V4L2_CID_TEST_PATTERN:
decoder->enable = ctrl->val ? false : true;
tvp5150_selmux(sd);
@@ -866,7 +870,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f->field = V4L2_FIELD_ALTERNATE;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
- v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width,
+ dev_dbg_lvl(sd->dev, 1, debug, "width = %d, height = %d\n", f->width,
f->height);
return 0;
}
@@ -884,7 +888,7 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
+ dev_dbg_lvl(sd->dev, 1, debug, "%s left=%d, top=%d, width=%d, height=%d\n",
__func__, rect.left, rect.top, rect.width, rect.height);
/* tvp5150 has some special limits */
@@ -1010,11 +1014,11 @@ static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
Media entity ops
****************************************************************************/
+#ifdef CONFIG_MEDIA_CONTROLLER
static int tvp5150_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
-#ifdef CONFIG_MEDIA_CONTROLLER
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct tvp5150 *decoder = to_tvp5150(sd);
int i;
@@ -1031,7 +1035,6 @@ static int tvp5150_link_setup(struct media_entity *entity,
decoder->input = i;
tvp5150_selmux(sd);
-#endif
return 0;
}
@@ -1039,6 +1042,7 @@ static int tvp5150_link_setup(struct media_entity *entity,
static const struct media_entity_operations tvp5150_sd_media_ops = {
.link_setup = tvp5150_link_setup,
};
+#endif
/****************************************************************************
I2C Command
@@ -1148,7 +1152,7 @@ static int tvp5150_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *
res = tvp5150_read(sd, reg->reg & 0xff);
if (res < 0) {
- v4l2_err(sd, "%s: failed with error = %d\n", __func__, res);
+ dev_err(sd->dev, "%s: failed with error = %d\n", __func__, res);
return res;
}
@@ -1288,21 +1292,21 @@ static int tvp5150_detect_version(struct tvp5150 *core)
core->dev_id = (regs[0] << 8) | regs[1];
core->rom_ver = (regs[2] << 8) | regs[3];
- v4l2_info(sd, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
+ dev_info(sd->dev, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
core->dev_id, regs[2], regs[3], c->addr << 1,
c->adapter->name);
if (core->dev_id == 0x5150 && core->rom_ver == 0x0321) {
- v4l2_info(sd, "tvp5150a detected.\n");
+ dev_info(sd->dev, "tvp5150a detected.\n");
} else if (core->dev_id == 0x5150 && core->rom_ver == 0x0400) {
- v4l2_info(sd, "tvp5150am1 detected.\n");
+ dev_info(sd->dev, "tvp5150am1 detected.\n");
/* ITU-T BT.656.4 timing */
tvp5150_write(sd, TVP5150_REV_SELECT, 0);
} else if (core->dev_id == 0x5151 && core->rom_ver == 0x0100) {
- v4l2_info(sd, "tvp5151 detected.\n");
+ dev_info(sd->dev, "tvp5151 detected.\n");
} else {
- v4l2_info(sd, "*** unknown tvp%04x chip detected.\n",
+ dev_info(sd->dev, "*** unknown tvp%04x chip detected.\n",
core->dev_id);
}
@@ -1381,7 +1385,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
for_each_available_child_of_node(connectors, child) {
ret = of_property_read_u32(child, "input", &input_type);
if (ret) {
- v4l2_err(&decoder->sd,
+ dev_err(decoder->sd.dev,
"missing type property in node %s\n",
child->name);
goto err_connector;
@@ -1396,7 +1400,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
/* Each input connector can only be defined once */
if (input->name) {
- v4l2_err(&decoder->sd,
+ dev_err(decoder->sd.dev,
"input %s with same type already exists\n",
input->name);
ret = -EINVAL;
@@ -1417,7 +1421,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
ret = of_property_read_string(child, "label", &name);
if (ret < 0) {
- v4l2_err(&decoder->sd,
+ dev_err(decoder->sd.dev,
"missing label property in node %s\n",
child->name);
goto err_connector;
@@ -1465,7 +1469,7 @@ static int tvp5150_probe(struct i2c_client *c,
if (IS_ENABLED(CONFIG_OF) && np) {
res = tvp5150_parse_dt(core, np);
if (res) {
- v4l2_err(sd, "DT parsing error: %d\n", res);
+ dev_err(sd->dev, "DT parsing error: %d\n", res);
return res;
}
} else {
@@ -1549,7 +1553,7 @@ static int tvp5150_remove(struct i2c_client *c)
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct tvp5150 *decoder = to_tvp5150(sd);
- v4l2_dbg(1, debug, sd,
+ dev_dbg_lvl(sd->dev, 1, debug,
"tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
c->addr << 1);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 2783531f9fc0..8756275e9fc4 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -801,9 +801,13 @@ void media_device_unregister(struct media_device *mdev)
/* Remove all interfaces from the media device */
list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
graph_obj.list) {
+ /*
+ * Unlink the interface, but don't free it here; the
+ * module which created it is responsible for freeing
+ * it
+ */
__media_remove_intf_links(intf);
media_gobj_destroy(&intf->graph_obj);
- kfree(intf);
}
mutex_unlock(&mdev->graph_mutex);
@@ -817,32 +821,6 @@ void media_device_unregister(struct media_device *mdev)
}
EXPORT_SYMBOL_GPL(media_device_unregister);
-static void media_device_release_devres(struct device *dev, void *res)
-{
-}
-
-struct media_device *media_device_get_devres(struct device *dev)
-{
- struct media_device *mdev;
-
- mdev = devres_find(dev, media_device_release_devres, NULL, NULL);
- if (mdev)
- return mdev;
-
- mdev = devres_alloc(media_device_release_devres,
- sizeof(struct media_device), GFP_KERNEL);
- if (!mdev)
- return NULL;
- return devres_get(dev, mdev, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(media_device_get_devres);
-
-struct media_device *media_device_find_devres(struct device *dev)
-{
- return devres_find(dev, media_device_release_devres, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(media_device_find_devres);
-
#if IS_ENABLED(CONFIG_PCI)
void media_device_pci_init(struct media_device *mdev,
struct pci_dev *pci_dev,
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index c68239e60487..f9f723f5e4f0 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -205,10 +205,16 @@ void media_gobj_destroy(struct media_gobj *gobj)
{
dev_dbg_obj(__func__, gobj);
+ /* Do nothing if the object is not linked. */
+ if (gobj->mdev == NULL)
+ return;
+
gobj->mdev->topology_version++;
/* Remove the object from mdev list */
list_del(&gobj->list);
+
+ gobj->mdev = NULL;
}
int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
diff --git a/drivers/media/pci/b2c2/flexcop-dma.c b/drivers/media/pci/b2c2/flexcop-dma.c
index 2881e0d956ad..913dc97f8b49 100644
--- a/drivers/media/pci/b2c2/flexcop-dma.c
+++ b/drivers/media/pci/b2c2/flexcop-dma.c
@@ -57,8 +57,7 @@ int flexcop_dma_config(struct flexcop_device *fc,
fc->write_ibi_reg(fc,dma2_014,v0x4);
fc->write_ibi_reg(fc,dma2_01c,v0xc);
} else {
- err("either DMA1 or DMA2 can be configured within one "
- "flexcop_dma_config call.");
+ err("either DMA1 or DMA2 can be configured within one flexcop_dma_config call.");
return -EINVAL;
}
@@ -82,8 +81,7 @@ int flexcop_dma_xfer_control(struct flexcop_device *fc,
r0x0 = dma2_010;
r0xc = dma2_01c;
} else {
- err("either transfer DMA1 or DMA2 can be started within one "
- "flexcop_dma_xfer_control call.");
+ err("either transfer DMA1 or DMA2 can be started within one flexcop_dma_xfer_control call.");
return -EINVAL;
}
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 4cac1fc233f2..99ce28442a75 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -185,8 +185,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
- deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, "
- "last_cur_pos: %08x ",
+ deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
jiffies_to_usecs(jiffies - fc_pci->last_irq),
v.raw, (unsigned long long)cur_addr, cur_pos,
fc_pci->last_dma1_cur_pos);
@@ -220,8 +219,8 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
fc_pci->last_dma1_cur_pos = cur_pos;
fc_pci->count++;
} else {
- deb_irq("isr for flexcop called, "
- "apparently without reason (%08x)\n", v.raw);
+ deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
+ v.raw);
ret = IRQ_NONE;
}
diff --git a/drivers/media/pci/bt8xx/btcx-risc.c b/drivers/media/pci/bt8xx/btcx-risc.c
index 57c7f58c3af2..70bdf93fc020 100644
--- a/drivers/media/pci/bt8xx/btcx-risc.c
+++ b/drivers/media/pci/bt8xx/btcx-risc.c
@@ -22,6 +22,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -36,6 +38,13 @@ static unsigned int btcx_debug;
module_param(btcx_debug, int, 0644);
MODULE_PARM_DESC(btcx_debug,"debug messages, default is 0 (no)");
+#define dprintk(fmt, arg...) do { \
+ if (btcx_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
+
/* ---------------------------------------------------------- */
/* allocate/free risc memory */
@@ -46,11 +55,11 @@ void btcx_riscmem_free(struct pci_dev *pci,
{
if (NULL == risc->cpu)
return;
- if (btcx_debug) {
- memcnt--;
- printk("btcx: riscmem free [%d] dma=%lx\n",
- memcnt, (unsigned long)risc->dma);
- }
+
+ memcnt--;
+ dprintk("btcx: riscmem free [%d] dma=%lx\n",
+ memcnt, (unsigned long)risc->dma);
+
pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
memset(risc,0,sizeof(*risc));
}
@@ -71,11 +80,10 @@ int btcx_riscmem_alloc(struct pci_dev *pci,
risc->cpu = cpu;
risc->dma = dma;
risc->size = size;
- if (btcx_debug) {
- memcnt++;
- printk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
- memcnt, (unsigned long)dma, cpu, size);
- }
+
+ memcnt++;
+ dprintk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
+ memcnt, (unsigned long)dma, cpu, size);
}
memset(risc->cpu,0,risc->size);
return 0;
@@ -137,9 +145,8 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
dx = nx - win->left;
win->left = nx;
win->width = nw;
- if (btcx_debug)
- printk(KERN_DEBUG "btcx: window align %dx%d+%d+%d [dx=%d]\n",
- win->width, win->height, win->left, win->top, dx);
+ dprintk("btcx: window align %dx%d+%d+%d [dx=%d]\n",
+ win->width, win->height, win->left, win->top, dx);
/* fixup clips */
for (i = 0; i < n; i++) {
@@ -149,10 +156,9 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
nw += mask+1;
clips[i].c.left = nx;
clips[i].c.width = nw;
- if (btcx_debug)
- printk(KERN_DEBUG "btcx: clip align %dx%d+%d+%d\n",
- clips[i].c.width, clips[i].c.height,
- clips[i].c.left, clips[i].c.top);
+ dprintk("btcx: clip align %dx%d+%d+%d\n",
+ clips[i].c.width, clips[i].c.height,
+ clips[i].c.left, clips[i].c.top);
}
return 0;
}
@@ -228,10 +234,10 @@ btcx_calc_skips(int line, int width, int *maxy,
*maxy = maxline;
if (btcx_debug) {
- printk(KERN_DEBUG "btcx: skips line %d-%d:",line,maxline);
+ dprintk("btcx: skips line %d-%d:", line, maxline);
for (skip = 0; skip < *nskips; skip++) {
- printk(" %d-%d",skips[skip].start,skips[skip].end);
+ pr_cont(" %d-%d", skips[skip].start, skips[skip].end);
}
- printk("\n");
+ pr_cont("\n");
}
}
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 8a17cc0bfa07..a1b0f3193bc0 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -125,10 +125,8 @@ module_param_array(remote, int, NULL, 0444);
module_param_array(audiodev, int, NULL, 0444);
module_param_array(audiomux, int, NULL, 0444);
-MODULE_PARM_DESC(triton1,"set ETBF pci config bit "
- "[enable bug compatibility for triton1 + others]");
-MODULE_PARM_DESC(vsfx,"set VSFX pci config bit "
- "[yet another chipset flaw workaround]");
+MODULE_PARM_DESC(triton1, "set ETBF pci config bit [enable bug compatibility for triton1 + others]");
+MODULE_PARM_DESC(vsfx, "set VSFX pci config bit [yet another chipset flaw workaround]");
MODULE_PARM_DESC(latency,"pci latency timer");
MODULE_PARM_DESC(card,"specify TV/grabber card model, see CARDLIST file for a list");
MODULE_PARM_DESC(pll, "specify installed crystal (0=none, 28=28 MHz, 35=35 MHz, 14=14 MHz)");
@@ -141,8 +139,7 @@ MODULE_PARM_DESC(audiodev, "specify audio device:\n"
"\t\t 2 = tda7432\n"
"\t\t 3 = tvaudio");
MODULE_PARM_DESC(saa6588, "if 1, then load the saa6588 RDS module, default (0) is to use the card definition.");
-MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
- " [some VIA/SIS chipsets are known to have problem with overlay]");
+MODULE_PARM_DESC(no_overlay, "allow override overlay default (0 disables, 1 enables) [some VIA/SIS chipsets are known to have problem with overlay]");
/* ----------------------------------------------------------------------- */
/* list of card IDs for bt878+ cards */
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 97b91a9f9fa9..fb4aefbcc8f8 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -148,8 +148,7 @@ MODULE_PARM_DESC(irq_debug, "irq handler debug messages, default is 0 (no)");
MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
MODULE_PARM_DESC(gbuffers, "number of capture buffers. range 2-32, default 8");
MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 0x208000");
-MODULE_PARM_DESC(reset_crop, "reset cropping parameters at open(), default "
- "is 1 (yes) for compatibility with older applications");
+MODULE_PARM_DESC(reset_crop, "reset cropping parameters at open(), default is 1 (yes) for compatibility with older applications");
MODULE_PARM_DESC(automute, "mute audio on bad/missing video signal, default is 1 (yes)");
MODULE_PARM_DESC(chroma_agc, "enables the AGC of chroma signal, default is 0 (no)");
MODULE_PARM_DESC(agc_crush, "enables the luminance AGC crush, default is 1 (yes)");
@@ -3506,8 +3505,7 @@ static void bttv_irq_debug_low_latency(struct bttv *btv, u32 rc)
(unsigned long)rc);
if (0 == (btread(BT848_DSTATUS) & BT848_DSTATUS_HLOC)) {
- pr_notice("%d: Oh, there (temporarily?) is no input signal. "
- "Ok, then this is harmless, don't worry ;)\n",
+ pr_notice("%d: Oh, there (temporarily?) is no input signal. Ok, then this is harmless, don't worry ;)\n",
btv->c.nr);
return;
}
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index d43911deb617..274fd036b306 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -44,15 +44,13 @@ static int i2c_scan;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "configure i2c debug level");
module_param(i2c_hw, int, 0444);
-MODULE_PARM_DESC(i2c_hw,"force use of hardware i2c support, "
- "instead of software bitbang");
+MODULE_PARM_DESC(i2c_hw, "force use of hardware i2c support, instead of software bitbang");
module_param(i2c_scan, int, 0444);
MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
static unsigned int i2c_udelay = 5;
module_param(i2c_udelay, int, 0444);
-MODULE_PARM_DESC(i2c_udelay,"soft i2c delay at insmod time, in usecs "
- "(should be 5 or higher). Lower value means higher bus speed.");
+MODULE_PARM_DESC(i2c_udelay, "soft i2c delay at insmod time, in usecs (should be 5 or higher). Lower value means higher bus speed.");
/* ----------------------------------------------------------------------- */
/* I2C functions - bitbanging adapter (software i2c) */
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index a75c53da224a..4da720e4867e 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -185,8 +185,8 @@ static u32 bttv_rc5_decode(unsigned int code)
return 0;
}
}
- dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, "
- "instr=%x\n", rc5, org_code, RC5_START(rc5),
+ dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, instr=%x\n",
+ rc5, org_code, RC5_START(rc5),
RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5));
return rc5;
}
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 35bc9b2287b4..7166d2279465 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -18,6 +18,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -30,9 +32,9 @@
#include "dst_priv.h"
#include "dst_common.h"
-static unsigned int verbose = 1;
+static unsigned int verbose;
module_param(verbose, int, 0644);
-MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
+MODULE_PARM_DESC(verbose, "verbosity level (0 to 3)");
static unsigned int dst_addons;
module_param(dst_addons, int, 0644);
@@ -46,29 +48,10 @@ MODULE_PARM_DESC(dst_algo, "tuning algo: default is 0=(SW), 1=(HW)");
#define ATTEMPT_TUNE 2
#define HAS_POWER 4
-#define DST_ERROR 0
-#define DST_NOTICE 1
-#define DST_INFO 2
-#define DST_DEBUG 3
-
-#define dprintk(x, y, z, format, arg...) do { \
- if (z) { \
- if ((x > DST_ERROR) && (x > y)) \
- printk(KERN_ERR "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- else if ((x > DST_NOTICE) && (x > y)) \
- printk(KERN_NOTICE "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- else if ((x > DST_INFO) && (x > y)) \
- printk(KERN_INFO "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- else if ((x > DST_DEBUG) && (x > y)) \
- printk(KERN_DEBUG "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- } else { \
- if (x > y) \
- printk(format, ##arg); \
- } \
+#define dprintk(level, fmt, arg...) do { \
+ if (level >= verbose) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while(0)
static int dst_command(struct dst_state *state, u8 *data, u8 len);
@@ -91,9 +74,11 @@ static int dst_gpio_outb(struct dst_state *state, u32 mask, u32 enbb,
enb.enb.mask = mask;
enb.enb.enable = enbb;
- dprintk(verbose, DST_INFO, 1, "mask=[%04x], enbb=[%04x], outhigh=[%04x]", mask, enbb, outhigh);
+ dprintk(2, "mask=[%04x], enbb=[%04x], outhigh=[%04x]\n",
+ mask, enbb, outhigh);
if ((err = bt878_device_control(state->bt, DST_IG_ENABLE, &enb)) < 0) {
- dprintk(verbose, DST_INFO, 1, "dst_gpio_enb error (err == %i, mask == %02x, enb == %02x)", err, mask, enbb);
+ dprintk(2, "dst_gpio_enb error (err == %i, mask == %02x, enb == %02x)\n",
+ err, mask, enbb);
return -EREMOTEIO;
}
udelay(1000);
@@ -105,7 +90,8 @@ static int dst_gpio_outb(struct dst_state *state, u32 mask, u32 enbb,
bits.outp.mask = enbb;
bits.outp.highvals = outhigh;
if ((err = bt878_device_control(state->bt, DST_IG_WRITE, &bits)) < 0) {
- dprintk(verbose, DST_INFO, 1, "dst_gpio_outb error (err == %i, enbb == %02x, outhigh == %02x)", err, enbb, outhigh);
+ dprintk(2, "dst_gpio_outb error (err == %i, enbb == %02x, outhigh == %02x)\n",
+ err, enbb, outhigh);
return -EREMOTEIO;
}
@@ -119,7 +105,7 @@ static int dst_gpio_inb(struct dst_state *state, u8 *result)
*result = 0;
if ((err = bt878_device_control(state->bt, DST_IG_READ, &rd_packet)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_inb error (err == %i)", err);
+ pr_err("dst_gpio_inb error (err == %i)\n", err);
return -EREMOTEIO;
}
*result = (u8) rd_packet.rd.value;
@@ -129,14 +115,14 @@ static int dst_gpio_inb(struct dst_state *state, u8 *result)
int rdc_reset_state(struct dst_state *state)
{
- dprintk(verbose, DST_INFO, 1, "Resetting state machine");
+ dprintk(2, "Resetting state machine\n");
if (dst_gpio_outb(state, RDC_8820_INT, RDC_8820_INT, 0, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
msleep(10);
if (dst_gpio_outb(state, RDC_8820_INT, RDC_8820_INT, RDC_8820_INT, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
msleep(10);
return -1;
}
@@ -147,14 +133,14 @@ EXPORT_SYMBOL(rdc_reset_state);
static int rdc_8820_reset(struct dst_state *state)
{
- dprintk(verbose, DST_DEBUG, 1, "Resetting DST");
+ dprintk(3, "Resetting DST\n");
if (dst_gpio_outb(state, RDC_8820_RESET, RDC_8820_RESET, 0, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
udelay(1000);
if (dst_gpio_outb(state, RDC_8820_RESET, RDC_8820_RESET, RDC_8820_RESET, DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
@@ -164,7 +150,7 @@ static int rdc_8820_reset(struct dst_state *state)
static int dst_pio_enable(struct dst_state *state)
{
if (dst_gpio_outb(state, ~0, RDC_8820_PIO_0_ENABLE, 0, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
udelay(1000);
@@ -175,7 +161,7 @@ static int dst_pio_enable(struct dst_state *state)
int dst_pio_disable(struct dst_state *state)
{
if (dst_gpio_outb(state, ~0, RDC_8820_PIO_0_DISABLE, RDC_8820_PIO_0_DISABLE, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
if (state->type_flags & DST_TYPE_HAS_FW_1)
@@ -192,16 +178,16 @@ int dst_wait_dst_ready(struct dst_state *state, u8 delay_mode)
for (i = 0; i < 200; i++) {
if (dst_gpio_inb(state, &reply) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_inb ERROR !");
+ pr_err("dst_gpio_inb ERROR !\n");
return -1;
}
if ((reply & RDC_8820_PIO_0_ENABLE) == 0) {
- dprintk(verbose, DST_INFO, 1, "dst wait ready after %d", i);
+ dprintk(2, "dst wait ready after %d\n", i);
return 1;
}
msleep(10);
}
- dprintk(verbose, DST_NOTICE, 1, "dst wait NOT ready after %d", i);
+ dprintk(1, "dst wait NOT ready after %d\n", i);
return 0;
}
@@ -209,7 +195,7 @@ EXPORT_SYMBOL(dst_wait_dst_ready);
int dst_error_recovery(struct dst_state *state)
{
- dprintk(verbose, DST_NOTICE, 1, "Trying to return from previous errors.");
+ dprintk(1, "Trying to return from previous errors.\n");
dst_pio_disable(state);
msleep(10);
dst_pio_enable(state);
@@ -221,7 +207,7 @@ EXPORT_SYMBOL(dst_error_recovery);
int dst_error_bailout(struct dst_state *state)
{
- dprintk(verbose, DST_INFO, 1, "Trying to bailout from previous error.");
+ dprintk(2, "Trying to bailout from previous error.\n");
rdc_8820_reset(state);
dst_pio_disable(state);
msleep(10);
@@ -232,13 +218,13 @@ EXPORT_SYMBOL(dst_error_bailout);
int dst_comm_init(struct dst_state *state)
{
- dprintk(verbose, DST_INFO, 1, "Initializing DST.");
+ dprintk(2, "Initializing DST.\n");
if ((dst_pio_enable(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "PIO Enable Failed");
+ pr_err("PIO Enable Failed\n");
return -1;
}
if ((rdc_reset_state(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "RDC 8820 State RESET Failed.");
+ pr_err("RDC 8820 State RESET Failed.\n");
return -1;
}
if (state->type_flags & DST_TYPE_HAS_FW_1)
@@ -260,23 +246,21 @@ int write_dst(struct dst_state *state, u8 *data, u8 len)
};
int err;
- u8 cnt, i;
+ u8 cnt;
- dprintk(verbose, DST_NOTICE, 0, "writing [ ");
- for (i = 0; i < len; i++)
- dprintk(verbose, DST_NOTICE, 0, "%02x ", data[i]);
- dprintk(verbose, DST_NOTICE, 0, "]\n");
+ dprintk(1, "writing [ %*ph ]\n", len, data);
for (cnt = 0; cnt < 2; cnt++) {
if ((err = i2c_transfer(state->i2c, &msg, 1)) < 0) {
- dprintk(verbose, DST_INFO, 1, "_write_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)", err, len, data[0]);
+ dprintk(2, "_write_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)\n",
+ err, len, data[0]);
dst_error_recovery(state);
continue;
} else
break;
}
if (cnt >= 2) {
- dprintk(verbose, DST_INFO, 1, "RDC 8820 RESET");
+ dprintk(2, "RDC 8820 RESET\n");
dst_error_bailout(state);
return -1;
@@ -300,23 +284,20 @@ int read_dst(struct dst_state *state, u8 *ret, u8 len)
for (cnt = 0; cnt < 2; cnt++) {
if ((err = i2c_transfer(state->i2c, &msg, 1)) < 0) {
- dprintk(verbose, DST_INFO, 1, "read_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)", err, len, ret[0]);
+ dprintk(2, "read_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)\n",
+ err, len, ret[0]);
dst_error_recovery(state);
continue;
} else
break;
}
if (cnt >= 2) {
- dprintk(verbose, DST_INFO, 1, "RDC 8820 RESET");
+ dprintk(2, "RDC 8820 RESET\n");
dst_error_bailout(state);
return -1;
}
- dprintk(verbose, DST_DEBUG, 1, "reply is 0x%x", ret[0]);
- for (err = 1; err < len; err++)
- dprintk(verbose, DST_DEBUG, 0, " 0x%x", ret[err]);
- if (err > 1)
- dprintk(verbose, DST_DEBUG, 0, "\n");
+ dprintk(3, "reply is %*ph\n", len, ret);
return 0;
}
@@ -326,11 +307,11 @@ static int dst_set_polarization(struct dst_state *state)
{
switch (state->voltage) {
case SEC_VOLTAGE_13: /* Vertical */
- dprintk(verbose, DST_INFO, 1, "Polarization=[Vertical]");
+ dprintk(2, "Polarization=[Vertical]\n");
state->tx_tuna[8] &= ~0x40;
break;
case SEC_VOLTAGE_18: /* Horizontal */
- dprintk(verbose, DST_INFO, 1, "Polarization=[Horizontal]");
+ dprintk(2, "Polarization=[Horizontal]\n");
state->tx_tuna[8] |= 0x40;
break;
case SEC_VOLTAGE_OFF:
@@ -343,7 +324,7 @@ static int dst_set_polarization(struct dst_state *state)
static int dst_set_freq(struct dst_state *state, u32 freq)
{
state->frequency = freq;
- dprintk(verbose, DST_INFO, 1, "set Frequency %u", freq);
+ dprintk(2, "set Frequency %u\n", freq);
if (state->dst_type == DST_TYPE_IS_SAT) {
freq = freq / 1000;
@@ -463,7 +444,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
if (state->dst_type == DST_TYPE_IS_TERR) {
return -EOPNOTSUPP;
}
- dprintk(verbose, DST_INFO, 1, "set symrate %u", srate);
+ dprintk(2, "set symrate %u\n", srate);
srate /= 1000;
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_SYMDIV) {
@@ -471,7 +452,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
sval <<= 20;
do_div(sval, 88000);
symcalc = (u32) sval;
- dprintk(verbose, DST_INFO, 1, "set symcalc %u", symcalc);
+ dprintk(2, "set symcalc %u\n", symcalc);
state->tx_tuna[5] = (u8) (symcalc >> 12);
state->tx_tuna[6] = (u8) (symcalc >> 4);
state->tx_tuna[7] = (u8) (symcalc << 4);
@@ -486,7 +467,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
state->tx_tuna[8] |= 0x20;
}
} else if (state->dst_type == DST_TYPE_IS_CABLE) {
- dprintk(verbose, DST_DEBUG, 1, "%s", state->fw_name);
+ dprintk(3, "%s\n", state->fw_name);
if (!strncmp(state->fw_name, "DCTNEW", 6)) {
state->tx_tuna[5] = (u8) (srate >> 8);
state->tx_tuna[6] = (u8) srate;
@@ -561,24 +542,24 @@ static void dst_type_flags_print(struct dst_state *state)
{
u32 type_flags = state->type_flags;
- dprintk(verbose, DST_ERROR, 0, "DST type flags :");
+ pr_err("DST type flags :\n");
if (type_flags & DST_TYPE_HAS_TS188)
- dprintk(verbose, DST_ERROR, 0, " 0x%x newtuner", DST_TYPE_HAS_TS188);
+ pr_err(" 0x%x newtuner\n", DST_TYPE_HAS_TS188);
if (type_flags & DST_TYPE_HAS_NEWTUNE_2)
- dprintk(verbose, DST_ERROR, 0, " 0x%x newtuner 2", DST_TYPE_HAS_NEWTUNE_2);
+ pr_err(" 0x%x newtuner 2\n", DST_TYPE_HAS_NEWTUNE_2);
if (type_flags & DST_TYPE_HAS_TS204)
- dprintk(verbose, DST_ERROR, 0, " 0x%x ts204", DST_TYPE_HAS_TS204);
+ pr_err(" 0x%x ts204\n", DST_TYPE_HAS_TS204);
if (type_flags & DST_TYPE_HAS_VLF)
- dprintk(verbose, DST_ERROR, 0, " 0x%x VLF", DST_TYPE_HAS_VLF);
+ pr_err(" 0x%x VLF\n", DST_TYPE_HAS_VLF);
if (type_flags & DST_TYPE_HAS_SYMDIV)
- dprintk(verbose, DST_ERROR, 0, " 0x%x symdiv", DST_TYPE_HAS_SYMDIV);
+ pr_err(" 0x%x symdiv\n", DST_TYPE_HAS_SYMDIV);
if (type_flags & DST_TYPE_HAS_FW_1)
- dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 1", DST_TYPE_HAS_FW_1);
+ pr_err(" 0x%x firmware version = 1\n", DST_TYPE_HAS_FW_1);
if (type_flags & DST_TYPE_HAS_FW_2)
- dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 2", DST_TYPE_HAS_FW_2);
+ pr_err(" 0x%x firmware version = 2\n", DST_TYPE_HAS_FW_2);
if (type_flags & DST_TYPE_HAS_FW_3)
- dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 3", DST_TYPE_HAS_FW_3);
- dprintk(verbose, DST_ERROR, 0, "\n");
+ pr_err(" 0x%x firmware version = 3\n", DST_TYPE_HAS_FW_3);
+ pr_err("\n");
}
@@ -603,10 +584,10 @@ static int dst_type_print(struct dst_state *state, u8 type)
break;
default:
- dprintk(verbose, DST_INFO, 1, "invalid dst type %d", type);
+ dprintk(2, "invalid dst type %d\n", type);
return -EINVAL;
}
- dprintk(verbose, DST_INFO, 1, "DST type: %s", otype);
+ dprintk(2, "DST type: %s\n", otype);
return 0;
}
@@ -914,12 +895,12 @@ static int dst_get_mac(struct dst_state *state)
u8 get_mac[] = { 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_mac[7] = dst_check_sum(get_mac, 7);
if (dst_command(state, get_mac, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memset(&state->mac_address, '\0', 8);
memcpy(&state->mac_address, &state->rxbuffer, 6);
- dprintk(verbose, DST_ERROR, 1, "MAC Address=[%pM]", state->mac_address);
+ pr_err("MAC Address=[%pM]\n", state->mac_address);
return 0;
}
@@ -929,11 +910,11 @@ static int dst_fw_ver(struct dst_state *state)
u8 get_ver[] = { 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_ver[7] = dst_check_sum(get_ver, 7);
if (dst_command(state, get_ver, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memcpy(&state->fw_version, &state->rxbuffer, 8);
- dprintk(verbose, DST_ERROR, 1, "Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x",
+ pr_err("Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x\n",
state->fw_version[0] >> 4, state->fw_version[0] & 0x0f,
state->fw_version[1],
state->fw_version[5], state->fw_version[6],
@@ -950,17 +931,17 @@ static int dst_card_type(struct dst_state *state)
u8 get_type[] = { 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_type[7] = dst_check_sum(get_type, 7);
if (dst_command(state, get_type, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memset(&state->card_info, '\0', 8);
memcpy(&state->card_info, &state->rxbuffer, 7);
- dprintk(verbose, DST_ERROR, 1, "Device Model=[%s]", &state->card_info[0]);
+ pr_err("Device Model=[%s]\n", &state->card_info[0]);
for (j = 0, p_tuner_list = tuner_list; j < ARRAY_SIZE(tuner_list); j++, p_tuner_list++) {
if (!strcmp(&state->card_info[0], p_tuner_list->board_name)) {
state->tuner_type = p_tuner_list->tuner_type;
- dprintk(verbose, DST_ERROR, 1, "DST has [%s] tuner, tuner type=[%d]",
+ pr_err("DST has [%s] tuner, tuner type=[%d]\n",
p_tuner_list->tuner_name, p_tuner_list->tuner_type);
}
}
@@ -973,26 +954,19 @@ static int dst_get_vendor(struct dst_state *state)
u8 get_vendor[] = { 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_vendor[7] = dst_check_sum(get_vendor, 7);
if (dst_command(state, get_vendor, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memset(&state->vendor, '\0', 8);
memcpy(&state->vendor, &state->rxbuffer, 7);
- dprintk(verbose, DST_ERROR, 1, "Vendor=[%s]", &state->vendor[0]);
+ pr_err("Vendor=[%s]\n", &state->vendor[0]);
return 0;
}
static void debug_dst_buffer(struct dst_state *state)
{
- int i;
-
- if (verbose > 2) {
- printk("%s: [", __func__);
- for (i = 0; i < 8; i++)
- printk(" %02x", state->rxbuffer[i]);
- printk("]\n");
- }
+ dprintk(3, "%s: [ %*ph ]\n", __func__, 8, state->rxbuffer);
}
static int dst_check_stv0299(struct dst_state *state)
@@ -1001,13 +975,13 @@ static int dst_check_stv0299(struct dst_state *state)
check_stv0299[7] = dst_check_sum(check_stv0299, 7);
if (dst_command(state, check_stv0299, 8) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Cmd=[0x04] failed");
+ pr_err("Cmd=[0x04] failed\n");
return -1;
}
debug_dst_buffer(state);
if (memcmp(&check_stv0299, &state->rxbuffer, 8)) {
- dprintk(verbose, DST_ERROR, 1, "Found a STV0299 NIM");
+ pr_err("Found a STV0299 NIM\n");
state->tuner_type = TUNER_TYPE_STV0299;
return 0;
}
@@ -1021,13 +995,13 @@ static int dst_check_mb86a15(struct dst_state *state)
check_mb86a15[7] = dst_check_sum(check_mb86a15, 7);
if (dst_command(state, check_mb86a15, 8) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Cmd=[0x10], failed");
+ pr_err("Cmd=[0x10], failed\n");
return -1;
}
debug_dst_buffer(state);
if (memcmp(&check_mb86a15, &state->rxbuffer, 8) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Found a MB86A15 NIM");
+ pr_err("Found a MB86A15 NIM\n");
state->tuner_type = TUNER_TYPE_MB86A15;
return 0;
}
@@ -1042,21 +1016,21 @@ static int dst_get_tuner_info(struct dst_state *state)
get_tuner_1[7] = dst_check_sum(get_tuner_1, 7);
get_tuner_2[7] = dst_check_sum(get_tuner_2, 7);
- dprintk(verbose, DST_ERROR, 1, "DST TYpe = MULTI FE");
+ pr_err("DST TYpe = MULTI FE\n");
if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
if (dst_command(state, get_tuner_1, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Cmd=[0x13], Unsupported");
+ dprintk(2, "Cmd=[0x13], Unsupported\n");
goto force;
}
} else {
if (dst_command(state, get_tuner_2, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Cmd=[0xb], Unsupported");
+ dprintk(2, "Cmd=[0xb], Unsupported\n");
goto force;
}
}
memcpy(&state->board_info, &state->rxbuffer, 8);
if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
- dprintk(verbose, DST_ERROR, 1, "DST type has TS=188");
+ pr_err("DST type has TS=188\n");
}
if (state->board_info[0] == 0xbc) {
if (state->dst_type != DST_TYPE_IS_ATSC)
@@ -1066,7 +1040,7 @@ static int dst_get_tuner_info(struct dst_state *state)
if (state->board_info[1] == 0x01) {
state->dst_hw_cap |= DST_TYPE_HAS_DBOARD;
- dprintk(verbose, DST_ERROR, 1, "DST has Daughterboard");
+ pr_err("DST has Daughterboard\n");
}
}
@@ -1074,7 +1048,7 @@ static int dst_get_tuner_info(struct dst_state *state)
force:
if (!strncmp(state->fw_name, "DCT-CI", 6)) {
state->type_flags |= DST_TYPE_HAS_TS204;
- dprintk(verbose, DST_ERROR, 1, "Forcing [%s] to TS188", state->fw_name);
+ pr_err("Forcing [%s] to TS188\n", state->fw_name);
}
return -1;
@@ -1103,7 +1077,7 @@ static int dst_get_device_id(struct dst_state *state)
if (read_dst(state, &reply, GET_ACK))
return -1; /* Read failure */
if (reply != ACK) {
- dprintk(verbose, DST_INFO, 1, "Write not Acknowledged! [Reply=0x%02x]", reply);
+ dprintk(2, "Write not Acknowledged! [Reply=0x%02x]\n", reply);
return -1; /* Unack'd write */
}
if (!dst_wait_dst_ready(state, DEVICE_INIT))
@@ -1113,7 +1087,7 @@ static int dst_get_device_id(struct dst_state *state)
dst_pio_disable(state);
if (state->rxbuffer[7] != dst_check_sum(state->rxbuffer, 7)) {
- dprintk(verbose, DST_INFO, 1, "Checksum failure!");
+ dprintk(2, "Checksum failure!\n");
return -1; /* Checksum failure */
}
state->rxbuffer[7] = '\0';
@@ -1125,7 +1099,7 @@ static int dst_get_device_id(struct dst_state *state)
/* Card capabilities */
state->dst_hw_cap = p_dst_type->dst_feature;
- dprintk(verbose, DST_ERROR, 1, "Recognise [%s]", p_dst_type->device_id);
+ pr_err("Recognise [%s]\n", p_dst_type->device_id);
strncpy(&state->fw_name[0], p_dst_type->device_id, 6);
/* Multiple tuners */
if (p_dst_type->tuner_type & TUNER_TYPE_MULTI) {
@@ -1133,7 +1107,7 @@ static int dst_get_device_id(struct dst_state *state)
case DST_TYPE_IS_SAT:
/* STV0299 check */
if (dst_check_stv0299(state) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Unsupported");
+ pr_err("Unsupported\n");
state->tuner_type = TUNER_TYPE_MB86A15;
}
break;
@@ -1141,7 +1115,7 @@ static int dst_get_device_id(struct dst_state *state)
break;
}
if (dst_check_mb86a15(state) < 0)
- dprintk(verbose, DST_ERROR, 1, "Unsupported");
+ pr_err("Unsupported\n");
/* Single tuner */
} else {
state->tuner_type = p_dst_type->tuner_type;
@@ -1149,7 +1123,7 @@ static int dst_get_device_id(struct dst_state *state)
for (j = 0, p_tuner_list = tuner_list; j < ARRAY_SIZE(tuner_list); j++, p_tuner_list++) {
if (!(strncmp(p_dst_type->device_id, p_tuner_list->fw_name, 7)) &&
p_tuner_list->tuner_type == state->tuner_type) {
- dprintk(verbose, DST_ERROR, 1, "[%s] has a [%s]",
+ pr_err("[%s] has a [%s]\n",
p_dst_type->device_id, p_tuner_list->tuner_name);
}
}
@@ -1158,8 +1132,8 @@ static int dst_get_device_id(struct dst_state *state)
}
if (i >= ARRAY_SIZE(dst_tlist)) {
- dprintk(verbose, DST_ERROR, 1, "Unable to recognize %s or %s", &state->rxbuffer[0], &state->rxbuffer[1]);
- dprintk(verbose, DST_ERROR, 1, "please email linux-dvb@linuxtv.org with this type in");
+ pr_err("Unable to recognize %s or %s\n", &state->rxbuffer[0], &state->rxbuffer[1]);
+ pr_err("please email linux-dvb@linuxtv.org with this type in");
use_dst_type = DST_TYPE_IS_SAT;
use_type_flags = DST_TYPE_HAS_SYMDIV;
}
@@ -1176,7 +1150,7 @@ static int dst_probe(struct dst_state *state)
mutex_init(&state->dst_mutex);
if (dst_addons & DST_TYPE_HAS_CA) {
if ((rdc_8820_reset(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "RDC 8820 RESET Failed.");
+ pr_err("RDC 8820 RESET Failed.\n");
return -1;
}
msleep(4000);
@@ -1184,35 +1158,35 @@ static int dst_probe(struct dst_state *state)
msleep(100);
}
if ((dst_comm_init(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "DST Initialization Failed.");
+ pr_err("DST Initialization Failed.\n");
return -1;
}
msleep(100);
if (dst_get_device_id(state) < 0) {
- dprintk(verbose, DST_ERROR, 1, "unknown device.");
+ pr_err("unknown device.\n");
return -1;
}
if (dst_get_mac(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "MAC: Unsupported command");
+ dprintk(2, "MAC: Unsupported command\n");
}
if ((state->type_flags & DST_TYPE_HAS_MULTI_FE) || (state->type_flags & DST_TYPE_HAS_FW_BUILD)) {
if (dst_get_tuner_info(state) < 0)
- dprintk(verbose, DST_INFO, 1, "Tuner: Unsupported command");
+ dprintk(2, "Tuner: Unsupported command\n");
}
if (state->type_flags & DST_TYPE_HAS_TS204) {
dst_packsize(state, 204);
}
if (state->type_flags & DST_TYPE_HAS_FW_BUILD) {
if (dst_fw_ver(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "FW: Unsupported command");
+ dprintk(2, "FW: Unsupported command\n");
return 0;
}
if (dst_card_type(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "Card: Unsupported command");
+ dprintk(2, "Card: Unsupported command\n");
return 0;
}
if (dst_get_vendor(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "Vendor: Unsupported command");
+ dprintk(2, "Vendor: Unsupported command\n");
return 0;
}
}
@@ -1226,33 +1200,33 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
mutex_lock(&state->dst_mutex);
if ((dst_comm_init(state)) < 0) {
- dprintk(verbose, DST_NOTICE, 1, "DST Communication Initialization Failed.");
+ dprintk(1, "DST Communication Initialization Failed.\n");
goto error;
}
if (write_dst(state, data, len)) {
- dprintk(verbose, DST_INFO, 1, "Trying to recover.. ");
+ dprintk(2, "Trying to recover..\n");
if ((dst_error_recovery(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Recovery Failed.");
+ pr_err("Recovery Failed.\n");
goto error;
}
goto error;
}
if ((dst_pio_disable(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "PIO Disable Failed.");
+ pr_err("PIO Disable Failed.\n");
goto error;
}
if (state->type_flags & DST_TYPE_HAS_FW_1)
mdelay(3);
if (read_dst(state, &reply, GET_ACK)) {
- dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. ");
+ dprintk(3, "Trying to recover..\n");
if ((dst_error_recovery(state)) < 0) {
- dprintk(verbose, DST_INFO, 1, "Recovery Failed.");
+ dprintk(2, "Recovery Failed.\n");
goto error;
}
goto error;
}
if (reply != ACK) {
- dprintk(verbose, DST_INFO, 1, "write not acknowledged 0x%02x ", reply);
+ dprintk(2, "write not acknowledged 0x%02x\n", reply);
goto error;
}
if (len >= 2 && data[0] == 0 && (data[1] == 1 || data[1] == 3))
@@ -1264,15 +1238,15 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
if (!dst_wait_dst_ready(state, NO_DELAY))
goto error;
if (read_dst(state, state->rxbuffer, FIXED_COMM)) {
- dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. ");
+ dprintk(3, "Trying to recover..\n");
if ((dst_error_recovery(state)) < 0) {
- dprintk(verbose, DST_INFO, 1, "Recovery failed.");
+ dprintk(2, "Recovery failed.\n");
goto error;
}
goto error;
}
if (state->rxbuffer[7] != dst_check_sum(state->rxbuffer, 7)) {
- dprintk(verbose, DST_INFO, 1, "checksum failure");
+ dprintk(2, "checksum failure\n");
goto error;
}
mutex_unlock(&state->dst_mutex);
@@ -1348,19 +1322,19 @@ static int dst_get_tuna(struct dst_state *state)
else
retval = read_dst(state, &state->rx_tuna[2], FIXED_COMM);
if (retval < 0) {
- dprintk(verbose, DST_DEBUG, 1, "read not successful");
+ dprintk(3, "read not successful\n");
return retval;
}
if ((state->type_flags & DST_TYPE_HAS_VLF) &&
!(state->dst_type == DST_TYPE_IS_ATSC)) {
if (state->rx_tuna[9] != dst_check_sum(&state->rx_tuna[0], 9)) {
- dprintk(verbose, DST_INFO, 1, "checksum failure ? ");
+ dprintk(2, "checksum failure ?\n");
return -EIO;
}
} else {
if (state->rx_tuna[9] != dst_check_sum(&state->rx_tuna[2], 7)) {
- dprintk(verbose, DST_INFO, 1, "checksum failure? ");
+ dprintk(2, "checksum failure?\n");
return -EIO;
}
}
@@ -1387,7 +1361,7 @@ static int dst_write_tuna(struct dvb_frontend *fe)
int retval;
u8 reply;
- dprintk(verbose, DST_INFO, 1, "type_flags 0x%x ", state->type_flags);
+ dprintk(2, "type_flags 0x%x\n", state->type_flags);
state->decode_freq = 0;
state->decode_lock = state->decode_strength = state->decode_snr = 0;
if (state->dst_type == DST_TYPE_IS_SAT) {
@@ -1397,7 +1371,7 @@ static int dst_write_tuna(struct dvb_frontend *fe)
state->diseq_flags &= ~(HAS_LOCK | ATTEMPT_TUNE);
mutex_lock(&state->dst_mutex);
if ((dst_comm_init(state)) < 0) {
- dprintk(verbose, DST_DEBUG, 1, "DST Communication initialization failed.");
+ dprintk(3, "DST Communication initialization failed.\n");
goto error;
}
// if (state->type_flags & DST_TYPE_HAS_NEWTUNE) {
@@ -1412,19 +1386,19 @@ static int dst_write_tuna(struct dvb_frontend *fe)
}
if (retval < 0) {
dst_pio_disable(state);
- dprintk(verbose, DST_DEBUG, 1, "write not successful");
+ dprintk(3, "write not successful\n");
goto werr;
}
if ((dst_pio_disable(state)) < 0) {
- dprintk(verbose, DST_DEBUG, 1, "DST PIO disable failed !");
+ dprintk(3, "DST PIO disable failed !\n");
goto error;
}
if ((read_dst(state, &reply, GET_ACK) < 0)) {
- dprintk(verbose, DST_DEBUG, 1, "read verify not successful.");
+ dprintk(3, "read verify not successful.\n");
goto error;
}
if (reply != ACK) {
- dprintk(verbose, DST_DEBUG, 1, "write not acknowledged 0x%02x ", reply);
+ dprintk(3, "write not acknowledged 0x%02x\n", reply);
goto error;
}
state->diseq_flags |= ATTEMPT_TUNE;
@@ -1622,7 +1596,7 @@ static int dst_set_frontend(struct dvb_frontend *fe)
retval = dst_set_freq(state, p->frequency);
if(retval != 0)
return retval;
- dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
+ dprintk(3, "Set Frequency=[%d]\n", p->frequency);
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
@@ -1630,7 +1604,7 @@ static int dst_set_frontend(struct dvb_frontend *fe)
dst_set_fec(state, p->fec_inner);
dst_set_symbolrate(state, p->symbol_rate);
dst_set_polarization(state);
- dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
+ dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
} else if (state->dst_type == DST_TYPE_IS_TERR)
dst_set_bandwidth(state, p->bandwidth_hz);
@@ -1656,7 +1630,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
if (re_tune) {
dst_set_freq(state, p->frequency);
- dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
+ dprintk(3, "Set Frequency=[%d]\n", p->frequency);
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
@@ -1664,7 +1638,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
dst_set_fec(state, p->fec_inner);
dst_set_symbolrate(state, p->symbol_rate);
dst_set_polarization(state);
- dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
+ dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
} else if (state->dst_type == DST_TYPE_IS_TERR)
dst_set_bandwidth(state, p->bandwidth_hz);
@@ -1722,10 +1696,10 @@ static void bt8xx_dst_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops dst_dvbt_ops;
-static struct dvb_frontend_ops dst_dvbs_ops;
-static struct dvb_frontend_ops dst_dvbc_ops;
-static struct dvb_frontend_ops dst_atsc_ops;
+static const struct dvb_frontend_ops dst_dvbt_ops;
+static const struct dvb_frontend_ops dst_dvbs_ops;
+static const struct dvb_frontend_ops dst_dvbc_ops;
+static const struct dvb_frontend_ops dst_atsc_ops;
struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_adapter)
{
@@ -1750,7 +1724,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
memcpy(&state->frontend.ops, &dst_atsc_ops, sizeof(struct dvb_frontend_ops));
break;
default:
- dprintk(verbose, DST_ERROR, 1, "unknown DST type. please report to the LinuxTV.org DVB mailinglist.");
+ pr_err("unknown DST type. please report to the LinuxTV.org DVB mailinglist.\n");
kfree(state);
return NULL;
}
@@ -1761,7 +1735,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
EXPORT_SYMBOL(dst_attach);
-static struct dvb_frontend_ops dst_dvbt_ops = {
+static const struct dvb_frontend_ops dst_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DST DVB-T",
@@ -1790,7 +1764,7 @@ static struct dvb_frontend_ops dst_dvbt_ops = {
.read_snr = dst_read_snr,
};
-static struct dvb_frontend_ops dst_dvbs_ops = {
+static const struct dvb_frontend_ops dst_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "DST DVB-S",
@@ -1819,7 +1793,7 @@ static struct dvb_frontend_ops dst_dvbs_ops = {
.set_tone = dst_set_tone,
};
-static struct dvb_frontend_ops dst_dvbc_ops = {
+static const struct dvb_frontend_ops dst_dvbc_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "DST DVB-C",
@@ -1848,7 +1822,7 @@ static struct dvb_frontend_ops dst_dvbc_ops = {
.read_snr = dst_read_snr,
};
-static struct dvb_frontend_ops dst_atsc_ops = {
+static const struct dvb_frontend_ops dst_atsc_ops = {
.delsys = { SYS_ATSC },
.info = {
.name = "DST ATSC",
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index e69d338ab9be..6100fa71ece8 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -19,7 +19,7 @@
*
*/
-#define pr_fmt(fmt) "dvb_bt8xx: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/module.h>
@@ -44,10 +44,12 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk( args... ) \
- do { \
- if (debug) printk(KERN_DEBUG args); \
- } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
#define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */
@@ -55,7 +57,7 @@ static void dvb_bt8xx_task(unsigned long data)
{
struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *)data;
- //printk("%d ", card->bt->finished_block);
+ dprintk("%d\n", card->bt->finished_block);
while (card->bt->last_block != card->bt->finished_block) {
(card->bt->TS_Size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter)
@@ -443,7 +445,7 @@ static void or51211_reset(struct dvb_frontend * fe)
/* reset & PRM1,2&4 are outputs */
int ret = bttv_gpio_enable(bt->bttv_nr, 0x001F, 0x001F);
if (ret != 0)
- printk(KERN_WARNING "or51211: Init Error - Can't Reset DVR (%i)\n", ret);
+ pr_warn("or51211: Init Error - Can't Reset DVR (%i)\n", ret);
bttv_write_gpio(bt->bttv_nr, 0x001F, 0x0000); /* Reset */
msleep(20);
/* Now set for normal operation */
@@ -560,7 +562,8 @@ static void digitv_alps_tded4_reset(struct dvb_bt8xx_card *bt)
int ret = bttv_gpio_enable(bt->bttv_nr, 0x08, 0x08);
if (ret != 0)
- printk(KERN_WARNING "digitv_alps_tded4: Init Error - Can't Reset DVR (%i)\n", ret);
+ pr_warn("digitv_alps_tded4: Init Error - Can't Reset DVR (%i)\n",
+ ret);
/* Pulse the reset line */
bttv_write_gpio(bt->bttv_nr, 0x08, 0x08); /* High */
@@ -620,7 +623,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
dvb_attach(simple_tuner_attach, card->fe,
card->i2c_adapter, 0x61,
TUNER_LG_TDVS_H06XF);
- dprintk ("dvb_bt8xx: lgdt330x detected\n");
+ dprintk("dvb_bt8xx: lgdt330x detected\n");
}
break;
@@ -635,7 +638,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
card->fe = dvb_attach(nxt6000_attach, &vp3021_alps_tded4_config, card->i2c_adapter);
if (card->fe != NULL) {
card->fe->ops.tuner_ops.set_params = vp3021_alps_tded4_tuner_set_params;
- dprintk ("dvb_bt8xx: an nxt6000 was detected on your digitv card\n");
+ dprintk("dvb_bt8xx: an nxt6000 was detected on your digitv card\n");
break;
}
@@ -645,7 +648,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
if (card->fe != NULL) {
card->fe->ops.tuner_ops.calc_regs = digitv_alps_tded4_tuner_calc_regs;
- dprintk ("dvb_bt8xx: an mt352 was detected on your digitv card\n");
+ dprintk("dvb_bt8xx: an mt352 was detected on your digitv card\n");
}
break;
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 5c76637900d0..def4a3b37084 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -527,7 +527,7 @@ static void cobalt_video_input_status_show(struct cobalt_stream *s)
cvi_ctrl = ioread32(&cvi->control);
cvi_stat = ioread32(&cvi->status);
vmr_ctrl = ioread32(&vmr->control);
- vmr_stat = ioread32(&vmr->control);
+ vmr_stat = ioread32(&vmr->status);
cobalt_info("rx%d: cvi resolution: %dx%d\n", rx,
ioread32(&cvi->frame_width), ioread32(&cvi->frame_height));
cobalt_info("rx%d: cvi control: %s%s%s\n", rx,
@@ -1084,12 +1084,33 @@ static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
return 0;
}
+static int cobalt_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cc)
+{
+ struct cobalt_stream *s = video_drvdata(file);
+ struct v4l2_dv_timings timings;
+ int err = 0;
+
+ if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (s->input == 1)
+ timings = cea1080p60;
+ else
+ err = v4l2_subdev_call(s->sd, video, g_dv_timings, &timings);
+ if (!err) {
+ cc->bounds.width = cc->defrect.width = timings.bt.width;
+ cc->bounds.height = cc->defrect.height = timings.bt.height;
+ cc->pixelaspect = v4l2_dv_timings_aspect_ratio(&timings);
+ }
+ return err;
+}
+
static const struct v4l2_ioctl_ops cobalt_ioctl_ops = {
.vidioc_querycap = cobalt_querycap,
.vidioc_g_parm = cobalt_g_parm,
.vidioc_log_status = cobalt_log_status,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_cropcap = cobalt_cropcap,
.vidioc_enum_input = cobalt_enum_input,
.vidioc_g_input = cobalt_g_input,
.vidioc_s_input = cobalt_s_input,
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 0b0e8015ad34..9fb7f5978c8b 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -217,8 +217,8 @@ static int cx18_alsa_load(struct cx18 *cx)
s = &cx->streams[CX18_ENC_STREAM_TYPE_PCM];
if (s->video_dev.v4l2_dev == NULL) {
- CX18_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
- "skipping\n", __func__);
+ CX18_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - skipping\n",
+ __func__);
return 0;
}
@@ -232,8 +232,8 @@ static int cx18_alsa_load(struct cx18 *cx)
CX18_ALSA_ERR("%s: failed to create struct snd_cx18_card\n",
__func__);
} else {
- CX18_DEBUG_ALSA_INFO("%s: created cx18 ALSA interface instance "
- "\n", __func__);
+ CX18_DEBUG_ALSA_INFO("%s: created cx18 ALSA interface instance\n",
+ __func__);
}
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 30bbe8d1ea55..7f7306fd9a7f 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -468,21 +468,19 @@ void cx18_av_std_setup(struct cx18 *cx)
CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n",
pll / 8000000, (pll / 8) % 1000000);
- CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio "
- "= %d.%03d\n", src_decimation / 256,
+ CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio = %d.%03d\n",
+ src_decimation / 256,
((src_decimation % 256) * 1000) / 256);
tmp = 28636360 * (u64) sc;
do_div(tmp, src_decimation);
fsc = tmp >> 13;
CX18_DEBUG_INFO_DEV(sd,
- "Chroma sub-carrier initial freq = %d.%06d "
- "MHz\n", fsc / 1000000, fsc % 1000000);
+ "Chroma sub-carrier initial freq = %d.%06d MHz\n",
+ fsc / 1000000, fsc % 1000000);
- CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, "
- "vactive %i, vblank656 %i, src_dec %i, "
- "burst 0x%02x, luma_lpf %i, uv_lpf %i, "
- "comb 0x%02x, sc 0x%06x\n",
+ CX18_DEBUG_INFO_DEV(sd,
+ "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
hblank, hactive, vblank, vactive, vblank656,
src_decimation, burst, luma_lpf, uv_lpf,
comb, sc);
@@ -1069,8 +1067,7 @@ static void log_video_status(struct cx18 *cx)
CX18_INFO_DEV(sd, "Specified video input: Composite %d\n",
vid_input - CX18_AV_COMPOSITE1 + 1);
} else {
- CX18_INFO_DEV(sd, "Specified video input: "
- "S-Video (Luma In%d, Chroma In%d)\n",
+ CX18_INFO_DEV(sd, "Specified video input: S-Video (Luma In%d, Chroma In%d)\n",
(vid_input & 0xf0) >> 4,
(vid_input & 0xf00) >> 8);
}
diff --git a/drivers/media/pci/cx18/cx18-av-firmware.c b/drivers/media/pci/cx18/cx18-av-firmware.c
index a34fd082b76e..160e2e53383f 100644
--- a/drivers/media/pci/cx18/cx18-av-firmware.c
+++ b/drivers/media/pci/cx18/cx18-av-firmware.c
@@ -61,8 +61,7 @@ static int cx18_av_verifyfw(struct cx18 *cx, const struct firmware *fw)
dl_control &= 0xffff3fff; /* ignore top 2 bits of address */
expected = 0x0f000000 | ((u32)data[addr] << 16) | addr;
if (expected != dl_control) {
- CX18_ERR_DEV(sd, "verification of %s firmware load "
- "failed: expected %#010x got %#010x\n",
+ CX18_ERR_DEV(sd, "verification of %s firmware load failed: expected %#010x got %#010x\n",
FWFILE, expected, dl_control);
ret = -EIO;
break;
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index adb5a8c72c06..812a2507945a 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -44,8 +44,7 @@ static int cx18_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
type == V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD)) {
/* Only IVTV fmt VBI insertion & only MPEG-2 PS type streams */
cx->vbi.insert_mpeg = V4L2_MPEG_STREAM_VBI_FMT_NONE;
- CX18_DEBUG_INFO("disabled insertion of sliced VBI data into "
- "the MPEG stream\n");
+ CX18_DEBUG_INFO("disabled insertion of sliced VBI data into the MPEG stream\n");
return 0;
}
@@ -63,16 +62,14 @@ static int cx18_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
}
cx->vbi.insert_mpeg =
V4L2_MPEG_STREAM_VBI_FMT_NONE;
- CX18_WARN("Unable to allocate buffers for "
- "sliced VBI data insertion\n");
+ CX18_WARN("Unable to allocate buffers for sliced VBI data insertion\n");
return -ENOMEM;
}
}
}
cx->vbi.insert_mpeg = fmt;
- CX18_DEBUG_INFO("enabled insertion of sliced VBI data into the MPEG PS,"
- "when sliced VBI is enabled\n");
+ CX18_DEBUG_INFO("enabled insertion of sliced VBI data into the MPEG PS,when sliced VBI is enabled\n");
/*
* If our current settings have no lines set for capture, store a valid,
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 2f23b26b16c0..b8eedbe51c8f 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -405,8 +405,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
CX18_ERR("Invalid EEPROM\n");
return;
default:
- CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
- "(cardtype=1)\n", tv.model);
+ CX18_ERR("Unknown model %d, defaulting to original HVR-1600 (cardtype=1)\n",
+ tv.model);
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
break;
}
@@ -635,8 +635,8 @@ static void cx18_process_options(struct cx18 *cx)
/* convert from kB to bytes */
cx->stream_buf_size[i] *= 1024;
}
- CX18_DEBUG_INFO("Stream type %d options: %d MB, %d buffers, "
- "%d bytes\n", i, cx->options.megabytes[i],
+ CX18_DEBUG_INFO("Stream type %d options: %d MB, %d buffers, %d bytes\n",
+ i, cx->options.megabytes[i],
cx->stream_buffers[i], cx->stream_buf_size[i]);
}
@@ -838,14 +838,13 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 64 && cx18_pci_latency) {
- CX18_INFO("Unreasonably low latency timer, "
- "setting to 64 (was %d)\n", pci_latency);
+ CX18_INFO("Unreasonably low latency timer, setting to 64 (was %d)\n",
+ pci_latency);
pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);
}
- CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%llx\n",
+ CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, irq: %d, latency: %d, memory: 0x%llx\n",
cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
@@ -910,8 +909,8 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* FIXME - module parameter arrays constrain max instances */
i = atomic_inc_return(&cx18_instance) - 1;
if (i >= CX18_MAX_CARDS) {
- printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
- "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
+ printk(KERN_ERR "cx18: cannot manage card %d, driver has a limit of 0 - %d\n",
+ i, CX18_MAX_CARDS - 1);
return -ENOMEM;
}
@@ -926,8 +925,8 @@ static int cx18_probe(struct pci_dev *pci_dev,
retval = v4l2_device_register(&pci_dev->dev, &cx->v4l2_dev);
if (retval) {
- printk(KERN_ERR "cx18: v4l2_device_register of card %d failed"
- "\n", cx->instance);
+ printk(KERN_ERR "cx18: v4l2_device_register of card %d failed\n",
+ cx->instance);
kfree(cx);
return retval;
}
@@ -958,13 +957,10 @@ static int cx18_probe(struct pci_dev *pci_dev,
cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
- CX18_ERR("ioremap failed. Can't get a window into CX23418 "
- "memory and register space\n");
- CX18_ERR("Each capture card with a CX23418 needs 64 MB of "
- "vmalloc address space for the window\n");
+ CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n");
+ CX18_ERR("Each capture card with a CX23418 needs 64 MB of vmalloc address space for the window\n");
CX18_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
- CX18_ERR("Use the vmalloc= kernel command line option to set "
- "VmallocTotal to a larger value\n");
+ CX18_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
@@ -1000,8 +996,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* Initialize GPIO Reset Controller to do chip resets during i2c init */
if (cx->card->hw_all & CX18_HW_GPIO_RESET_CTRL) {
if (cx18_gpio_register(cx, CX18_HW_GPIO_RESET_CTRL) != 0)
- CX18_WARN("Could not register GPIO reset controller"
- "subdevice; proceeding anyway.\n");
+ CX18_WARN("Could not register GPIO reset controllersubdevice; proceeding anyway.\n");
else
cx->hw_flags |= CX18_HW_GPIO_RESET_CTRL;
}
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index 3eac59c51231..03d0478170a7 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -155,10 +155,8 @@ static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
}
if (ret) {
- CX18_ERR("The MPC718 board variant with the MT352 DVB-T"
- "demodualtor will not work without it\n");
- CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware "
- "mpc718' if you need the firmware\n");
+ CX18_ERR("The MPC718 board variant with the MT352 DVB-Tdemodualtor will not work without it\n");
+ CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware mpc718' if you need the firmware\n");
}
return ret;
}
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index df837408efd5..78b399b8613e 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -49,8 +49,7 @@ int cx18_claim_stream(struct cx18_open_id *id, int type)
/* Nothing should ever try to directly claim the IDX stream */
if (type == CX18_ENC_STREAM_TYPE_IDX) {
- CX18_WARN("MPEG Index stream cannot be claimed "
- "directly, but something tried.\n");
+ CX18_WARN("MPEG Index stream cannot be claimed directly, but something tried.\n");
return -EINVAL;
}
@@ -728,8 +727,7 @@ void cx18_stop_capture(struct cx18_open_id *id, int gop_end)
/* Stop internal use associated VBI and IDX streams */
if (test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) &&
!test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
- CX18_DEBUG_INFO("close stopping embedded VBI "
- "capture\n");
+ CX18_DEBUG_INFO("close stopping embedded VBI capture\n");
cx18_stop_v4l2_encode_stream(s_vbi, 0);
}
if (test_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) {
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index fecca2a63891..0faeb979ceb9 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -951,8 +951,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
return 0;
h = cx18_find_handle(cx);
if (h == CX18_INVALID_TASK_HANDLE) {
- CX18_ERR("Can't find valid task handle for "
- "V4L2_ENC_CMD_PAUSE\n");
+ CX18_ERR("Can't find valid task handle for V4L2_ENC_CMD_PAUSE\n");
return -EBADFD;
}
cx18_mute(cx);
@@ -968,8 +967,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
return 0;
h = cx18_find_handle(cx);
if (h == CX18_INVALID_TASK_HANDLE) {
- CX18_ERR("Can't find valid task handle for "
- "V4L2_ENC_CMD_RESUME\n");
+ CX18_ERR("Can't find valid task handle for V4L2_ENC_CMD_RESUME\n");
return -EBADFD;
}
cx18_vapi(cx, CX18_CPU_CAPTURE_RESUME, 1, h);
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index 80edfe93a3d8..361426485e98 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -59,8 +59,8 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
cx18_write_reg_expect(cx, hw2, HW2_INT_CLR_STATUS, ~hw2, hw2);
if (sw1 || sw2 || hw2)
- CX18_DEBUG_HI_IRQ("received interrupts "
- "SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2);
+ CX18_DEBUG_HI_IRQ("received interrupts SW1: %x SW2: %x HW2: %x\n",
+ sw1, sw2, hw2);
/*
* SW1 responses have to happen first. The sending XPU times out the
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index 1f8aa9a749a1..d3cf3588879f 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -123,8 +123,8 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
if (!(cx18_debug & CX18_DBGFLG_API))
return;
- CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
- "\n", name, mb->request, mb->ack, mb->cmd, mb->error,
+ CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s\n",
+ name, mb->request, mb->ack, mb->cmd, mb->error,
u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr));
}
@@ -255,8 +255,8 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
s = cx18_handle_to_stream(cx, handle);
if (s == NULL) {
- CX18_WARN("Got DMA done notification for unknown/inactive"
- " handle %d, %s mailbox seq no %d\n", handle,
+ CX18_WARN("Got DMA done notification for unknown/inactive handle %d, %s mailbox seq no %d\n",
+ handle,
(order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
"stale" : "good", mb->request);
return;
@@ -290,9 +290,8 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
!(id >= s->mdl_base_idx &&
id < (s->mdl_base_idx + s->buffers))) {
- CX18_WARN("Fell behind! Ignoring stale mailbox with "
- " inconsistent data. Lost MDL for mailbox "
- "seq no %d\n", mb->request);
+ CX18_WARN("Fell behind! Ignoring stale mailbox with inconsistent data. Lost MDL for mailbox seq no %d\n",
+ mb->request);
break;
}
mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
@@ -418,9 +417,7 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
/* Don't ack if the RPU has gotten impatient and timed us out */
if (req != cx18_readl(cx, &ack_mb->request) ||
req == cx18_readl(cx, &ack_mb->ack)) {
- CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
- "incoming %s to EPU mailbox (sequence no. %u) "
- "while processing\n",
+ CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u) while processing\n",
rpu_str[order->rpu], rpu_str[order->rpu], req);
order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
return;
@@ -555,8 +552,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
order = alloc_in_work_order_irq(cx);
if (order == NULL) {
- CX18_WARN("Unable to find blank work order form to schedule "
- "incoming mailbox command processing\n");
+ CX18_WARN("Unable to find blank work order form to schedule incoming mailbox command processing\n");
return;
}
@@ -573,9 +569,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
(&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
if (order_mb->request == order_mb->ack) {
- CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
- "incoming %s to EPU mailbox (sequence no. %u)"
- "\n",
+ CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u)\n",
rpu_str[rpu], rpu_str[rpu], order_mb->request);
if (cx18_debug & CX18_DBGFLG_WARN)
dump_mb(cx, order_mb, "incoming");
@@ -663,8 +657,8 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
if (req != ack) {
/* waited long enough, make the mbox "not busy" from our end */
cx18_writel(cx, req, &mb->ack);
- CX18_ERR("mbox was found stuck busy when setting up for %s; "
- "clearing busy and trying to proceed\n", info->name);
+ CX18_ERR("mbox was found stuck busy when setting up for %s; clearing busy and trying to proceed\n",
+ info->name);
} else if (ret != timeout)
CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
jiffies_to_msecs(timeout-ret));
@@ -707,14 +701,10 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
mutex_unlock(mb_lock);
if (ret >= timeout) {
/* Timed out */
- CX18_DEBUG_WARN("sending %s timed out waiting %d msecs "
- "for RPU acknowledgement\n",
+ CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU acknowledgment\n",
info->name, jiffies_to_msecs(ret));
} else {
- CX18_DEBUG_WARN("woken up before mailbox ack was ready "
- "after submitting %s to RPU. only "
- "waited %d msecs on req %u but awakened"
- " with unmatched ack %u\n",
+ CX18_DEBUG_WARN("woken up before mailbox ack was ready after submitting %s to RPU. only waited %d msecs on req %u but awakened with unmatched ack %u\n",
info->name,
jiffies_to_msecs(ret),
req, ack);
@@ -723,8 +713,7 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
}
if (ret >= timeout)
- CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment "
- "sending %s; timed out waiting %d msecs\n",
+ CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment sending %s; timed out waiting %d msecs\n",
info->name, jiffies_to_msecs(ret));
else
CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 2a247d264b87..13e96d6055eb 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -164,9 +164,8 @@ struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
mdl->skipped++;
if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
/* mdl must have fallen out of rotation */
- CX18_WARN("Skipped %s, MDL %d, %d "
- "times - it must have dropped out of "
- "rotation\n", s->name, mdl->id,
+ CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n",
+ s->name, mdl->id,
mdl->skipped);
/* Sweep it up to put it back into rotation */
list_move_tail(&mdl->list, &sweep_up);
@@ -352,8 +351,7 @@ int cx18_stream_alloc(struct cx18_stream *s)
if (s->buffers == 0)
return 0;
- CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers "
- "(%d.%02d kB total)\n",
+ CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%d.%02d kB total)\n",
s->name, s->buffers, s->buf_size,
s->buffers * s->buf_size / 1024,
(s->buffers * s->buf_size * 100 / 1024) % 100);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index f3802ec1b383..7f699f0ee76c 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -353,8 +353,8 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
if (cx->card->hw_all & CX18_HW_DVB) {
s->dvb = kzalloc(sizeof(struct cx18_dvb), GFP_KERNEL);
if (s->dvb == NULL) {
- CX18_ERR("Couldn't allocate cx18_dvb structure"
- " for %s\n", s->name);
+ CX18_ERR("Couldn't allocate cx18_dvb structure for %s\n",
+ s->name);
return -ENOMEM;
}
} else {
@@ -462,8 +462,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
case VFL_TYPE_VBI:
if (cx->stream_buffers[type])
- CX18_INFO("Registered device %s for %s "
- "(%d x %d bytes)\n",
+ CX18_INFO("Registered device %s for %s (%d x %d bytes)\n",
name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type]);
else
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index aaf4e46ff3e9..5c94e312cba3 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -48,6 +48,9 @@
* | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <dvb_demux.h>
#include <dvb_frontend.h>
#include "altera-ci.h"
@@ -84,16 +87,18 @@ MODULE_DESCRIPTION("altera FPGA CI module");
MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>");
MODULE_LICENSE("GPL");
-#define ci_dbg_print(args...) \
+#define ci_dbg_print(fmt, args...) \
do { \
if (ci_dbg) \
- printk(KERN_DEBUG args); \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##args); \
} while (0)
-#define pid_dbg_print(args...) \
+#define pid_dbg_print(fmt, args...) \
do { \
if (pid_dbg) \
- printk(KERN_DEBUG args); \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##args); \
} while (0)
struct altera_ci_state;
@@ -718,7 +723,7 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr)
if (temp_int != NULL) {
inter = temp_int->internal;
(inter->cis_used)++;
- inter->fpga_rw = config->fpga_rw;
+ inter->fpga_rw = config->fpga_rw;
ci_dbg_print("%s: Find Internal Structure!\n", __func__);
} else {
inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL);
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h
index 57a40c84b46e..ababd80fee93 100644
--- a/drivers/media/pci/cx23885/altera-ci.h
+++ b/drivers/media/pci/cx23885/altera-ci.h
@@ -48,24 +48,24 @@ extern int altera_ci_tuner_reset(void *dev, int ci_nr);
static inline int altera_ci_init(struct altera_ci_config *config, int ci_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
static inline void altera_ci_release(void *dev, int ci_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
}
static inline int altera_ci_irq(void *dev)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
static inline int altera_ci_tuner_reset(void *dev, int ci_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
@@ -74,19 +74,19 @@ static inline int altera_ci_tuner_reset(void *dev, int ci_nr)
static inline int altera_hw_filt_init(struct altera_ci_config *config,
int hw_filt_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
static inline void altera_hw_filt_release(void *dev, int filt_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
}
static inline int altera_pid_feed_control(void *dev, int filt_nr,
struct dvb_demux_feed *dvbdmxfeed, int onoff)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 631e4f24aea6..5e8e134d81c2 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -65,10 +65,11 @@ static unsigned int ci_irq_enable;
module_param(ci_irq_enable, int, 0644);
MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
-#define ci_dbg_print(args...) \
+#define ci_dbg_print(fmt, args...) \
do { \
if (ci_dbg) \
- printk(KERN_DEBUG args); \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##args); \
} while (0)
#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
@@ -135,8 +136,7 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
};
if (1 + len > sizeof(buffer)) {
- printk(KERN_WARNING
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n",
KBUILD_MODNAME, reg, len);
return -EINVAL;
}
@@ -365,11 +365,8 @@ static void netup_read_ci_status(struct work_struct *work)
if (ret != 0)
return;
- ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
- "Reg=[0x%02x], data=%02x, "
- "TS config = %02x\n", __func__,
- state->ci_i2c_addr, 0, buf[0],
- buf[0]);
+ ci_dbg_print("%s: Slot Status Addr=[0x%04x], Reg=[0x%02x], data=%02x, TS config = %02x\n",
+ __func__, state->ci_i2c_addr, 0, buf[0], buf[0]);
if (buf[0] & 1)
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index da892f3e3c29..2ff1d1e274be 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -20,6 +20,9 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-ioctl.h"
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -32,9 +35,6 @@
#include <media/v4l2-ioctl.h>
#include <media/drv-intf/cx2341x.h>
-#include "cx23885.h"
-#include "cx23885-ioctl.h"
-
#define CX23885_FIRM_IMAGE_SIZE 376836
#define CX23885_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
@@ -55,8 +55,8 @@ MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
#define dprintk(level, fmt, arg...)\
do { if (v4l_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, \
- (dev) ? dev->name : "cx23885[?]", ## arg); \
+ printk(KERN_DEBUG pr_fmt("%s: 417:" fmt), \
+ __func__, ##arg); \
} while (0)
static struct cx23885_tvnorm cx23885_tvnorms[] = {
@@ -769,10 +769,8 @@ static int cx23885_mbox_func(void *priv,
without side effects */
mc417_memory_read(dev, dev->cx23417_mailbox - 4, &value);
if (value != 0x12345678) {
- printk(KERN_ERR
- "Firmware and/or mailbox pointer not initialized "
- "or corrupted, signature = 0x%x, cmd = %s\n", value,
- cmd_to_str(command));
+ pr_err("Firmware and/or mailbox pointer not initialized or corrupted, signature = 0x%x, cmd = %s\n",
+ value, cmd_to_str(command));
return -1;
}
@@ -781,8 +779,8 @@ static int cx23885_mbox_func(void *priv,
*/
mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
if (flag) {
- printk(KERN_ERR "ERROR: Mailbox appears to be in use "
- "(%x), cmd = %s\n", flag, cmd_to_str(command));
+ pr_err("ERROR: Mailbox appears to be in use (%x), cmd = %s\n",
+ flag, cmd_to_str(command));
return -1;
}
@@ -811,7 +809,7 @@ static int cx23885_mbox_func(void *priv,
if (0 != (flag & 4))
break;
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR "ERROR: API Mailbox timeout\n");
+ pr_err("ERROR: API Mailbox timeout\n");
return -1;
}
udelay(10);
@@ -888,7 +886,7 @@ static int cx23885_find_mailbox(struct cx23885_dev *dev)
return i+1;
}
}
- printk(KERN_ERR "Mailbox signature values not found!\n");
+ pr_err("Mailbox signature values not found!\n");
return -1;
}
@@ -923,7 +921,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
IVTV_REG_APU, 0);
if (retval != 0) {
- printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ pr_err("%s: Error with mc417_register_write\n",
__func__);
return -1;
}
@@ -932,25 +930,21 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
&dev->pci->dev);
if (retval != 0) {
- printk(KERN_ERR
- "ERROR: Hotplug firmware request failed (%s).\n",
- CX23885_FIRM_IMAGE_NAME);
- printk(KERN_ERR "Please fix your hotplug setup, the board will "
- "not work without firmware loaded!\n");
+ pr_err("ERROR: Hotplug firmware request failed (%s).\n",
+ CX23885_FIRM_IMAGE_NAME);
+ pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
return -1;
}
if (firmware->size != CX23885_FIRM_IMAGE_SIZE) {
- printk(KERN_ERR "ERROR: Firmware size mismatch "
- "(have %zu, expected %d)\n",
- firmware->size, CX23885_FIRM_IMAGE_SIZE);
+ pr_err("ERROR: Firmware size mismatch (have %zu, expected %d)\n",
+ firmware->size, CX23885_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
if (0 != memcmp(firmware->data, magic, 8)) {
- printk(KERN_ERR
- "ERROR: Firmware magic mismatch, wrong file?\n");
+ pr_err("ERROR: Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -1;
}
@@ -962,7 +956,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
value = *dataptr;
checksum += ~value;
if (mc417_memory_write(dev, i, value) != 0) {
- printk(KERN_ERR "ERROR: Loading firmware failed!\n");
+ pr_err("ERROR: Loading firmware failed!\n");
release_firmware(firmware);
return -1;
}
@@ -973,15 +967,14 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
dprintk(1, "Verifying firmware ...\n");
for (i--; i >= 0; i--) {
if (mc417_memory_read(dev, i, &value) != 0) {
- printk(KERN_ERR "ERROR: Reading firmware failed!\n");
+ pr_err("ERROR: Reading firmware failed!\n");
release_firmware(firmware);
return -1;
}
checksum -= ~value;
}
if (checksum) {
- printk(KERN_ERR
- "ERROR: Firmware load failed (checksum mismatch).\n");
+ pr_err("ERROR: Firmware load failed (checksum mismatch).\n");
release_firmware(firmware);
return -1;
}
@@ -1006,7 +999,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
mc417_register_read(dev, 0x900C, &gpio_value);
if (retval < 0)
- printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ pr_err("%s: Error with mc417_register_write\n",
__func__);
return 0;
}
@@ -1058,27 +1051,25 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder)
dprintk(2, "%s() PING OK\n", __func__);
retval = cx23885_load_firmware(dev);
if (retval < 0) {
- printk(KERN_ERR "%s() f/w load failed\n", __func__);
+ pr_err("%s() f/w load failed\n", __func__);
return retval;
}
retval = cx23885_find_mailbox(dev);
if (retval < 0) {
- printk(KERN_ERR "%s() mailbox < 0, error\n",
+ pr_err("%s() mailbox < 0, error\n",
__func__);
return -1;
}
dev->cx23417_mailbox = retval;
retval = cx23885_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
if (retval < 0) {
- printk(KERN_ERR
- "ERROR: cx23417 firmware ping failed!\n");
+ pr_err("ERROR: cx23417 firmware ping failed!\n");
return -1;
}
retval = cx23885_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
&version);
if (retval < 0) {
- printk(KERN_ERR "ERROR: cx23417 firmware get encoder :"
- "version failed!\n");
+ pr_err("ERROR: cx23417 firmware get encoder :version failed!\n");
return -1;
}
dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
@@ -1563,11 +1554,11 @@ int cx23885_417_register(struct cx23885_dev *dev)
err = video_register_device(dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
- printk(KERN_INFO "%s: can't register mpeg device\n", dev->name);
+ pr_info("%s: can't register mpeg device\n", dev->name);
return err;
}
- printk(KERN_INFO "%s: registered device %s [mpeg]\n",
+ pr_info("%s: registered device %s [mpeg]\n",
dev->name, video_device_node_name(dev->v4l_device));
/* ST: Configure the encoder paramaters, but don't begin
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index 6115d4e148ba..c148f9a4a9ac 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -17,6 +17,9 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-reg.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -35,20 +38,14 @@
#include <sound/tlv.h>
-
-#include "cx23885.h"
-#include "cx23885-reg.h"
-
#define AUDIO_SRAM_CHANNEL SRAM_CH07
#define dprintk(level, fmt, arg...) do { \
if (audio_debug + 1 > level) \
- printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg); \
+ printk(KERN_DEBUG pr_fmt("%s: alsa: " fmt), \
+ chip->dev->name, ##arg); \
} while(0)
-#define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg)
-
/****************************************************************************
Module global static vars
****************************************************************************/
@@ -186,8 +183,8 @@ static int cx23885_start_audio_dma(struct cx23885_audio_dev *chip)
cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
- dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
- "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start+12)>>1,
+ dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d byte buffer\n",
+ buf->bpl, cx_read(audio_ch->cmds_start+12)>>1,
chip->num_periods, buf->bpl * chip->num_periods);
/* Enables corresponding bits at AUD_INT_STAT */
@@ -247,7 +244,7 @@ int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask)
/* risc op code error */
if (status & AUD_INT_OPC_ERR) {
- printk(KERN_WARNING "%s/1: Audio risc op code error\n",
+ pr_warn("%s/1: Audio risc op code error\n",
dev->name);
cx_clear(AUD_INT_DMA_CTL, 0x11);
cx23885_sram_channel_dump(dev,
@@ -327,8 +324,7 @@ static int snd_cx23885_pcm_open(struct snd_pcm_substream *substream)
int err;
if (!chip) {
- printk(KERN_ERR "BUG: cx23885 can't find device struct."
- " Can't proceed with open\n");
+ pr_err("BUG: cx23885 can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
@@ -555,8 +551,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
return NULL;
if (dev->sram_channels[AUDIO_SRAM_CHANNEL].cmds_start == 0) {
- printk(KERN_WARNING "%s(): Missing SRAM channel configuration "
- "for analog TV Audio\n", __func__);
+ pr_warn("%s(): Missing SRAM channel configuration for analog TV Audio\n",
+ __func__);
return NULL;
}
@@ -590,8 +586,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
error:
snd_card_free(card);
- printk(KERN_ERR "%s(): Failed to register analog "
- "audio adapter\n", __func__);
+ pr_err("%s(): Failed to register analog audio adapter\n",
+ __func__);
return NULL;
}
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 99ba8d6328f0..0350f13c5a9f 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -23,7 +25,6 @@
#include <linux/firmware.h>
#include <misc/altera.h>
-#include "cx23885.h"
#include "tuner-xc2028.h"
#include "netup-eeprom.h"
#include "netup-init.h"
@@ -1096,26 +1097,24 @@ void cx23885_card_list(struct cx23885_dev *dev)
if (0 == dev->pci->subsystem_vendor &&
0 == dev->pci->subsystem_device) {
- printk(KERN_INFO
- "%s: Board has no valid PCIe Subsystem ID and can't\n"
- "%s: be autodetected. Pass card=<n> insmod option\n"
- "%s: to workaround that. Redirect complaints to the\n"
- "%s: vendor of the TV card. Best regards,\n"
- "%s: -- tux\n",
- dev->name, dev->name, dev->name, dev->name, dev->name);
+ pr_info("%s: Board has no valid PCIe Subsystem ID and can't\n"
+ "%s: be autodetected. Pass card=<n> insmod option\n"
+ "%s: to workaround that. Redirect complaints to the\n"
+ "%s: vendor of the TV card. Best regards,\n"
+ "%s: -- tux\n",
+ dev->name, dev->name, dev->name, dev->name, dev->name);
} else {
- printk(KERN_INFO
- "%s: Your board isn't known (yet) to the driver.\n"
- "%s: Try to pick one of the existing card configs via\n"
- "%s: card=<n> insmod option. Updating to the latest\n"
- "%s: version might help as well.\n",
- dev->name, dev->name, dev->name, dev->name);
+ pr_info("%s: Your board isn't known (yet) to the driver.\n"
+ "%s: Try to pick one of the existing card configs via\n"
+ "%s: card=<n> insmod option. Updating to the latest\n"
+ "%s: version might help as well.\n",
+ dev->name, dev->name, dev->name, dev->name);
}
- printk(KERN_INFO "%s: Here is a list of valid choices for the card=<n> insmod option:\n",
+ pr_info("%s: Here is a list of valid choices for the card=<n> insmod option:\n",
dev->name);
for (i = 0; i < cx23885_bcount; i++)
- printk(KERN_INFO "%s: card=%d -> %s\n",
- dev->name, i, cx23885_boards[i].name);
+ pr_info("%s: card=%d -> %s\n",
+ dev->name, i, cx23885_boards[i].name);
}
static void viewcast_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
@@ -1304,14 +1303,13 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
*/
break;
default:
- printk(KERN_WARNING "%s: warning: "
- "unknown hauppauge model #%d\n",
+ pr_warn("%s: warning: unknown hauppauge model #%d\n",
dev->name, tv.model);
break;
}
- printk(KERN_INFO "%s: hauppauge eeprom: model=%d\n",
- dev->name, tv.model);
+ pr_info("%s: hauppauge eeprom: model=%d\n",
+ dev->name, tv.model);
}
/* Some TBS cards require initing a chip using a bitbanged SPI attached
@@ -1353,8 +1351,8 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
return 0;
if (command != 0) {
- printk(KERN_ERR "%s(): Unknown command 0x%x.\n",
- __func__, command);
+ pr_err("%s(): Unknown command 0x%x.\n",
+ __func__, command);
return -EINVAL;
}
@@ -2337,14 +2335,13 @@ void cx23885_card_setup(struct cx23885_dev *dev)
filename = "dvb-netup-altera-01.fw";
break;
}
- printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
- cinfo.rev, filename);
+ pr_info("NetUP card rev=0x%x fw_filename=%s\n",
+ cinfo.rev, filename);
ret = request_firmware(&fw, filename, &dev->pci->dev);
if (ret != 0)
- printk(KERN_ERR "did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details "
- "on firmware-problems.", filename);
+ pr_err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+ filename);
else
altera_init(&netup_config, fw);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index c86b1093ab99..02b5ec549369 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -27,7 +29,6 @@
#include <asm/div64.h>
#include <linux/firmware.h>
-#include "cx23885.h"
#include "cimax2.h"
#include "altera-ci.h"
#include "cx23888-ir.h"
@@ -50,7 +51,8 @@ MODULE_PARM_DESC(card, "card type");
#define dprintk(level, fmt, arg...)\
do { if (debug >= level)\
- printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while (0)
static unsigned int cx23885_devcount;
@@ -407,19 +409,18 @@ static int cx23885_risc_decode(u32 risc)
};
int i;
- printk("0x%08x [ %s", risc,
+ printk(KERN_DEBUG "0x%08x [ %s", risc,
instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
if (risc & (1 << (i + 12)))
- printk(" %s", bits[i]);
- printk(" count=%d ]\n", risc & 0xfff);
+ pr_cont(" %s", bits[i]);
+ pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
static void cx23885_wakeup(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q, u32 count)
{
- struct cx23885_dev *dev = port->dev;
struct cx23885_buffer *buf;
if (list_empty(&q->active))
@@ -530,44 +531,44 @@ void cx23885_sram_channel_dump(struct cx23885_dev *dev,
u32 risc;
unsigned int i, j, n;
- printk(KERN_WARNING "%s: %s - dma channel status dump\n",
- dev->name, ch->name);
+ pr_warn("%s: %s - dma channel status dump\n",
+ dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
- printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
- dev->name, name[i],
- cx_read(ch->cmds_start + 4*i));
+ pr_warn("%s: cmds: %-15s: 0x%08x\n",
+ dev->name, name[i],
+ cx_read(ch->cmds_start + 4*i));
for (i = 0; i < 4; i++) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
- printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
+ pr_warn("%s: risc%d: ", dev->name, i);
cx23885_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
- printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
- ch->ctrl_start + 4 * i, i);
+ pr_warn("%s: (0x%08x) iq %x: ", dev->name,
+ ch->ctrl_start + 4 * i, i);
n = cx23885_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
- printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
- dev->name, i+j, risc, j);
+ pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
+ dev->name, i+j, risc, j);
}
}
- printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
- dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
- printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
- dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
- printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
- dev->name, cx_read(ch->ptr1_reg));
- printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
- dev->name, cx_read(ch->ptr2_reg));
- printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
- dev->name, cx_read(ch->cnt1_reg));
- printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
- dev->name, cx_read(ch->cnt2_reg));
+ pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
+ dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
+ pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
+ dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
+ pr_warn("%s: ptr1_reg: 0x%08x\n",
+ dev->name, cx_read(ch->ptr1_reg));
+ pr_warn("%s: ptr2_reg: 0x%08x\n",
+ dev->name, cx_read(ch->ptr2_reg));
+ pr_warn("%s: cnt1_reg: 0x%08x\n",
+ dev->name, cx_read(ch->cnt1_reg));
+ pr_warn("%s: cnt2_reg: 0x%08x\n",
+ dev->name, cx_read(ch->cnt2_reg));
}
static void cx23885_risc_disasm(struct cx23885_tsport *port,
@@ -576,14 +577,14 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
struct cx23885_dev *dev = port->dev;
unsigned int i, j, n;
- printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
+ pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
dev->name, risc->cpu, (unsigned long)risc->dma);
for (i = 0; i < (risc->size >> 2); i += n) {
- printk(KERN_INFO "%s: %04d: ", dev->name, i);
+ pr_info("%s: %04d: ", dev->name, i);
n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
for (j = 1; j < n; j++)
- printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
- dev->name, i + j, risc->cpu[i + j], j);
+ pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
+ dev->name, i + j, risc->cpu[i + j], j);
if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
break;
}
@@ -674,8 +675,8 @@ static int get_resources(struct cx23885_dev *dev)
dev->name))
return 0;
- printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
- dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
+ pr_err("%s: can't get MMIO memory @ 0x%llx\n",
+ dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
return -EBUSY;
}
@@ -793,15 +794,15 @@ static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
dev->hwrevision = 0xb1;
break;
default:
- printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
- __func__, dev->hwrevision);
+ pr_err("%s() New hardware revision found 0x%x\n",
+ __func__, dev->hwrevision);
}
if (dev->hwrevision)
- printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
+ pr_info("%s() Hardware revision = 0x%02x\n",
__func__, dev->hwrevision);
else
- printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
- __func__, dev->hwrevision);
+ pr_err("%s() Hardware revision unknown 0x%x\n",
+ __func__, dev->hwrevision);
}
/* Find the first v4l2_subdev member of the group id in hw */
@@ -915,8 +916,7 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
cx23885_init_tsport(dev, &dev->ts2, 2);
if (get_resources(dev) < 0) {
- printk(KERN_ERR "CORE %s No more PCIe resources for "
- "subsystem: %04x:%04x\n",
+ pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
@@ -930,11 +930,11 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->bmmio = (u8 __iomem *)dev->lmmio;
- printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
- dev->name, dev->pci->subsystem_vendor,
- dev->pci->subsystem_device, cx23885_boards[dev->board].name,
- dev->board, card[dev->nr] == dev->board ?
- "insmod option" : "autodetected");
+ pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
+ dev->name, dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, cx23885_boards[dev->board].name,
+ dev->board, card[dev->nr] == dev->board ?
+ "insmod option" : "autodetected");
cx23885_pci_quirks(dev);
@@ -980,8 +980,8 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
if (cx23885_video_register(dev) < 0) {
- printk(KERN_ERR "%s() Failed to register analog "
- "video adapters on VID_A\n", __func__);
+ pr_err("%s() Failed to register analog video adapters on VID_A\n",
+ __func__);
}
}
@@ -990,14 +990,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->ts1.num_frontends =
cx23885_boards[dev->board].num_fds_portb;
if (cx23885_dvb_register(&dev->ts1) < 0) {
- printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
+ pr_err("%s() Failed to register dvb adapters on VID_B\n",
__func__);
}
} else
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
if (cx23885_417_register(dev) < 0) {
- printk(KERN_ERR
- "%s() Failed to register 417 on VID_B\n",
+ pr_err("%s() Failed to register 417 on VID_B\n",
__func__);
}
}
@@ -1007,15 +1006,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->ts2.num_frontends =
cx23885_boards[dev->board].num_fds_portc;
if (cx23885_dvb_register(&dev->ts2) < 0) {
- printk(KERN_ERR
- "%s() Failed to register dvb on VID_C\n",
+ pr_err("%s() Failed to register dvb on VID_C\n",
__func__);
}
} else
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
if (cx23885_417_register(dev) < 0) {
- printk(KERN_ERR
- "%s() Failed to register 417 on VID_C\n",
+ pr_err("%s() Failed to register 417 on VID_C\n",
__func__);
}
}
@@ -1344,7 +1341,7 @@ int cx23885_start_dma(struct cx23885_tsport *port,
if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
- printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
+ pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
__func__,
cx23885_boards[dev->board].portb,
cx23885_boards[dev->board].portc);
@@ -1531,7 +1528,6 @@ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
{
- struct cx23885_dev *dev = port->dev;
struct cx23885_dmaqueue *q = &port->mpegq;
struct cx23885_buffer *buf;
unsigned long flags;
@@ -1551,8 +1547,6 @@ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
void cx23885_cancel_buffers(struct cx23885_tsport *port)
{
- struct cx23885_dev *dev = port->dev;
-
dprintk(1, "%s()\n", __func__);
cx23885_stop_dma(port);
do_cancel_buffers(port, "cancel");
@@ -1579,8 +1573,8 @@ int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
(status & VID_B_MSK_VBI_SYNC) ||
(status & VID_B_MSK_OF) ||
(status & VID_B_MSK_VBI_OF)) {
- printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
- "= 0x%x\n", dev->name, status);
+ pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
+ dev->name, status);
if (status & VID_B_MSK_BAD_PKT)
dprintk(1, " VID_B_MSK_BAD_PKT\n");
if (status & VID_B_MSK_OPC_ERR)
@@ -1641,7 +1635,7 @@ static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
VID_BC_MSK_OF);
- printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
+ pr_err("%s: mpeg risc op code error\n", dev->name);
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
cx23885_sram_channel_dump(dev,
@@ -1881,15 +1875,14 @@ void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Setting GPIO on encoder ports\n",
+ pr_err("%s: Setting GPIO on encoder ports\n",
dev->name);
cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
- printk(KERN_INFO "%s: Unsupported\n", dev->name);
+ pr_info("%s: Unsupported\n", dev->name);
}
void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
@@ -1899,15 +1892,14 @@ void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Clearing GPIO moving on encoder ports\n",
+ pr_err("%s: Clearing GPIO moving on encoder ports\n",
dev->name);
cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
- printk(KERN_INFO "%s: Unsupported\n", dev->name);
+ pr_info("%s: Unsupported\n", dev->name);
}
u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
@@ -1917,15 +1909,14 @@ u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Reading GPIO moving on encoder ports\n",
+ pr_err("%s: Reading GPIO moving on encoder ports\n",
dev->name);
return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
- printk(KERN_INFO "%s: Unsupported\n", dev->name);
+ pr_info("%s: Unsupported\n", dev->name);
return 0;
}
@@ -1939,8 +1930,7 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Enabling GPIO on encoder ports\n",
+ pr_err("%s: Enabling GPIO on encoder ports\n",
dev->name);
}
@@ -1995,8 +1985,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->name,
+ pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ dev->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,
(unsigned long long)pci_resource_start(pci_dev, 0));
@@ -2004,14 +1994,14 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
err = pci_set_dma_mask(pci_dev, 0xffffffff);
if (err) {
- printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
+ pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
goto fail_ctrl;
}
err = request_irq(pci_dev->irq, cx23885_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
- printk(KERN_ERR "%s: can't get IRQ %d\n",
+ pr_err("%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
goto fail_irq;
}
@@ -2097,7 +2087,7 @@ static struct pci_driver cx23885_pci_driver = {
static int __init cx23885_init(void)
{
- printk(KERN_INFO "cx23885 driver version %s loaded\n",
+ pr_info("cx23885 driver version %s loaded\n",
CX23885_VERSION);
return pci_register_driver(&cx23885_pci_driver);
}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 818f3c2fc98d..589a168d1df4 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -23,7 +25,6 @@
#include <linux/file.h>
#include <linux/suspend.h>
-#include "cx23885.h"
#include <media/v4l2-common.h>
#include "dvb_ca_en50221.h"
@@ -80,7 +81,8 @@ static unsigned int debug;
#define dprintk(level, fmt, arg...)\
do { if (debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s dvb: " fmt), \
+ __func__, ##arg); \
} while (0)
/* ------------------------------------------------------------------ */
@@ -1101,7 +1103,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
memcpy(port->frontends.adapter.proposed_mac,
cinfo.port[port->nr - 1].mac, 6);
- printk(KERN_INFO "NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
+ pr_info("NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
port->nr, port->frontends.adapter.proposed_mac);
netup_ci_init(port);
@@ -1127,7 +1129,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
/* Read entire EEPROM */
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
- printk(KERN_INFO "TeVii S470 MAC= %pM\n", eeprom + 0xa0);
+ pr_info("TeVii S470 MAC= %pM\n", eeprom + 0xa0);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
return 0;
}
@@ -1144,7 +1146,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
sizeof(eeprom));
- printk(KERN_INFO "%s port %d MAC address: %pM\n",
+ pr_info("%s port %d MAC address: %pM\n",
cx23885_boards[dev->board].name, port->nr,
eeprom + 0xc0 + (port->nr-1) * 8);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
@@ -1185,7 +1187,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
sizeof(eeprom));
- printk(KERN_INFO "%s MAC address: %pM\n",
+ pr_info("%s MAC address: %pM\n",
cx23885_boards[dev->board].name, eeprom + 0xc0);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0, 6);
return 0;
@@ -1464,7 +1466,7 @@ static int dvb_register(struct cx23885_tsport *port)
return -ENODEV;
if (dib7000p_ops.i2c_enumeration(&i2c_bus->i2c_adap, 1, 0x12, &dib7070p_dib7000p_config) < 0) {
- printk(KERN_WARNING "Unable to enumerate dib7000p\n");
+ pr_warn("Unable to enumerate dib7000p\n");
return -ENODEV;
}
fe0->dvb.frontend = dib7000p_ops.init(&i2c_bus->i2c_adap, 0x80, &dib7070p_dib7000p_config);
@@ -1524,7 +1526,7 @@ static int dvb_register(struct cx23885_tsport *port)
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
&dev->i2c_bus[1].i2c_adap, &cfg);
if (!fe) {
- printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+ pr_err("%s/2: xc4000 attach failed\n",
dev->name);
goto frontend_detach;
}
@@ -1597,8 +1599,7 @@ static int dvb_register(struct cx23885_tsport *port)
&i2c_bus->i2c_adap,
LNBH24_PCL | LNBH24_TTX,
LNBH24_TEN, 0x09))
- printk(KERN_ERR
- "No LNBH24 found!\n");
+ pr_err("No LNBH24 found!\n");
}
}
@@ -1618,8 +1619,7 @@ static int dvb_register(struct cx23885_tsport *port)
&i2c_bus->i2c_adap,
LNBH24_PCL | LNBH24_TTX,
LNBH24_TEN, 0x0a))
- printk(KERN_ERR
- "No LNBH24 found!\n");
+ pr_err("No LNBH24 found!\n");
}
}
@@ -2482,14 +2482,13 @@ static int dvb_register(struct cx23885_tsport *port)
break;
default:
- printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
- " isn't supported yet\n",
- dev->name);
+ pr_info("%s: The frontend of your DVB/ATSC card isn't supported yet\n",
+ dev->name);
break;
}
if ((NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend)) {
- printk(KERN_ERR "%s: frontend initialization failed\n",
+ pr_err("%s: frontend initialization failed\n",
dev->name);
goto frontend_detach;
}
@@ -2570,7 +2569,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
* are for safety, and should provide a good foundation for the
* future addition of any multi-frontend cx23885 based boards.
*/
- printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
+ pr_info("%s() allocating %d frontend(s)\n", __func__,
port->num_frontends);
for (i = 1; i <= port->num_frontends; i++) {
@@ -2578,7 +2577,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
if (vb2_dvb_alloc_frontend(
&port->frontends, i) == NULL) {
- printk(KERN_ERR "%s() failed to alloc\n", __func__);
+ pr_err("%s() failed to alloc\n", __func__);
return -ENOMEM;
}
@@ -2597,7 +2596,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
/* dvb stuff */
/* We have to init the queue for each frontend on a port. */
- printk(KERN_INFO "%s: cx23885 based dvb card\n", dev->name);
+ pr_info("%s: cx23885 based dvb card\n", dev->name);
q = &fe0->dvb.dvbq;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
@@ -2617,8 +2616,8 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
}
err = dvb_register(port);
if (err != 0)
- printk(KERN_ERR "%s() dvb_register failed err = %d\n",
- __func__, err);
+ pr_err("%s() dvb_register failed err = %d\n",
+ __func__, err);
return err;
}
diff --git a/drivers/media/pci/cx23885/cx23885-f300.c b/drivers/media/pci/cx23885/cx23885-f300.c
index a6c45eb0a105..460cb8f314b2 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.c
+++ b/drivers/media/pci/cx23885/cx23885-f300.c
@@ -122,7 +122,7 @@ static u8 f300_xfer(struct dvb_frontend *fe, u8 *buf)
}
if (i > 7) {
- printk(KERN_ERR "%s: timeout, the slave no response\n",
+ pr_err("%s: timeout, the slave no response\n",
__func__);
ret = 1; /* timeout, the slave no response */
} else { /* the slave not busy, prepare for getting data */
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 61591225be9a..8528032090f2 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -15,14 +15,14 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/io.h>
-#include "cx23885.h"
-
#include <media/v4l2-common.h>
static unsigned int i2c_debug;
@@ -35,7 +35,8 @@ MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
#define dprintk(level, fmt, arg...)\
do { if (i2c_debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: i2c:" fmt), \
+ __func__, ##arg); \
} while (0)
#define I2C_WAIT_DELAY 32
@@ -119,9 +120,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
goto eio;
if (i2c_debug) {
- printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
+ printk(KERN_DEBUG " <W %02x %02x", msg->addr << 1, msg->buf[0]);
if (!(ctrl & I2C_NOSTOP))
- printk(" >\n");
+ pr_cont(" >\n");
}
for (cnt = 1; cnt < msg->len; cnt++) {
@@ -141,9 +142,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
goto eio;
if (i2c_debug) {
- dprintk(1, " %02x", msg->buf[cnt]);
+ pr_cont(" %02x", msg->buf[cnt]);
if (!(ctrl & I2C_NOSTOP))
- dprintk(1, " >\n");
+ pr_cont(" >\n");
}
}
return msg->len;
@@ -151,7 +152,7 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
eio:
retval = -EIO;
if (i2c_debug)
- printk(KERN_ERR " ERR: %d\n", retval);
+ pr_err(" ERR: %d\n", retval);
return retval;
}
@@ -212,15 +213,13 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
eio:
retval = -EIO;
if (i2c_debug)
- printk(KERN_ERR " ERR: %d\n", retval);
+ pr_err(" ERR: %d\n", retval);
return retval;
}
static int i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
- struct cx23885_i2c *bus = i2c_adap->algo_data;
- struct cx23885_dev *dev = bus->dev;
int i, retval = 0;
dprintk(1, "%s(num = %d)\n", __func__, num);
@@ -302,7 +301,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
rc = i2c_master_recv(c, &buf, 0);
if (rc < 0)
continue;
- printk(KERN_INFO "%s: i2c scan: found device @ 0x%04x [%s]\n",
+ pr_info("%s: i2c scan: found device @ 0x%04x [%s]\n",
name, i, i2c_devs[i] ? i2c_devs[i] : "???");
}
}
@@ -330,12 +329,12 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
if (0 == bus->i2c_rc) {
dprintk(1, "%s: i2c bus %d registered\n", dev->name, bus->nr);
if (i2c_scan) {
- printk(KERN_INFO "%s: scan bus %d:\n",
+ pr_info("%s: scan bus %d:\n",
dev->name, bus->nr);
do_i2c_scan(dev->name, &bus->i2c_client);
}
} else
- printk(KERN_WARNING "%s: i2c bus %d register FAILED\n",
+ pr_warn("%s: i2c bus %d register FAILED\n",
dev->name, bus->nr);
/* Instantiate the IR receiver device, if present */
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 410c3141c163..1f092febdbd1 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -30,13 +30,13 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-input.h"
+
#include <linux/slab.h>
#include <media/rc-core.h>
#include <media/v4l2-subdev.h>
-#include "cx23885.h"
-#include "cx23885-input.h"
-
#define MODULE_NAME "cx23885"
static void cx23885_input_process_measurements(struct cx23885_dev *dev,
diff --git a/drivers/media/pci/cx23885/cx23885-ir.c b/drivers/media/pci/cx23885/cx23885-ir.c
index 89dc4cc3e1ce..2cd5ac41ab75 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.c
+++ b/drivers/media/pci/cx23885/cx23885-ir.c
@@ -16,12 +16,12 @@
* GNU General Public License for more details.
*/
-#include <media/v4l2-device.h>
-
#include "cx23885.h"
#include "cx23885-ir.h"
#include "cx23885-input.h"
+#include <media/v4l2-device.h>
+
#define CX23885_IR_RX_FIFO_SERVICE_REQ 0
#define CX23885_IR_RX_END_OF_RX_DETECTED 1
#define CX23885_IR_RX_HW_FIFO_OVERRUN 2
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 75e7fa7b1121..369e545cac04 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -15,13 +15,13 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include "cx23885.h"
-
static unsigned int vbibufs = 4;
module_param(vbibufs, int, 0644);
MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
@@ -32,7 +32,8 @@ MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
#define dprintk(level, fmt, arg...)\
do { if (vbi_debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
+ __func__, ##arg); \
} while (0)
/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 33d168ef278d..ecc580af0148 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -15,6 +15,9 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-video.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -27,8 +30,6 @@
#include <linux/kthread.h>
#include <asm/div64.h>
-#include "cx23885.h"
-#include "cx23885-video.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -66,7 +67,8 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
#define dprintk(level, fmt, arg...)\
do { if (video_debug >= level)\
- printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: video:" fmt), \
+ __func__, ##arg); \
} while (0)
/* ------------------------------------------------------------------- */
@@ -194,7 +196,7 @@ u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
ret = i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg[0], 2);
if (ret != 2)
- printk(KERN_ERR "%s() error\n", __func__);
+ pr_err("%s() error\n", __func__);
return b1[0];
}
@@ -811,7 +813,6 @@ static int vidioc_log_status(struct file *file, void *priv)
static int cx23885_query_audinput(struct file *file, void *priv,
struct v4l2_audio *i)
{
- struct cx23885_dev *dev = video_drvdata(file);
static const char *iname[] = {
[0] = "Baseband L/R 1",
[1] = "Baseband L/R 2",
@@ -1000,7 +1001,7 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
fe->ops.tuner_ops.set_analog_params(fe, &params);
}
else
- printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+ pr_err("%s() No analog tuner, aborting\n", __func__);
/* When changing channels it is required to reset TVAUDIO */
msleep(100);
@@ -1058,15 +1059,14 @@ int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
if (status & VID_BC_MSK_OPC_ERR) {
dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
VID_BC_MSK_OPC_ERR);
- printk(KERN_WARNING "%s: video risc op code error\n",
+ pr_warn("%s: video risc op code error\n",
dev->name);
cx23885_sram_channel_dump(dev,
&dev->sram_channels[SRAM_CH01]);
}
if (status & VID_BC_MSK_SYNC)
- dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) "
- "video lines miss-match\n",
+ dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) video lines miss-match\n",
VID_BC_MSK_SYNC);
if (status & VID_BC_MSK_OF)
@@ -1297,11 +1297,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
- printk(KERN_INFO "%s: can't register video device\n",
+ pr_info("%s: can't register video device\n",
dev->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s: registered device %s [v4l2]\n",
+ pr_info("%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
/* register VBI device */
@@ -1311,11 +1311,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0) {
- printk(KERN_INFO "%s: can't register vbi device\n",
+ pr_info("%s: can't register vbi device\n",
dev->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s: registered device %s\n",
+ pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
/* Register ALSA audio device */
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index a6735afe2269..cb714ab60d69 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/kdev_t.h>
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c1aa888af705..040323b0f945 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -16,15 +16,15 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23888-ir.h"
+
#include <linux/kfifo.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/rc-core.h>
-#include "cx23885.h"
-#include "cx23888-ir.h"
-
static unsigned int ir_888_debug;
module_param(ir_888_debug, int, 0644);
MODULE_PARM_DESC(ir_888_debug, "enable debug messages [CX23888 IR controller]");
@@ -1015,8 +1015,8 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
j = 0;
break;
}
- v4l2_info(sd, "\tNext carrier edge window: 16 clocks "
- "-%1d/+%1d, %u to %u Hz\n", i, j,
+ v4l2_info(sd, "\tNext carrier edge window: 16 clocks -%1d/+%1d, %u to %u Hz\n",
+ i, j,
clock_divider_to_freq(rxclk, 16 + j),
clock_divider_to_freq(rxclk, 16 - i));
}
@@ -1026,8 +1026,7 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "\tLow pass filter: %s\n",
filtr ? "enabled" : "disabled");
if (filtr)
- v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, "
- "%u ns\n",
+ v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, %u ns\n",
lpf_count_to_us(filtr),
lpf_count_to_ns(filtr));
v4l2_info(sd, "\tPulse width timer timed-out: %s\n",
diff --git a/drivers/media/pci/cx23885/netup-eeprom.c b/drivers/media/pci/cx23885/netup-eeprom.c
index b6542ee4385b..6384c12aa38e 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.c
+++ b/drivers/media/pci/cx23885/netup-eeprom.c
@@ -52,7 +52,7 @@ int netup_eeprom_read(struct i2c_adapter *i2c_adap, u8 addr)
ret = i2c_transfer(i2c_adap, msg, 2);
if (ret != 2) {
- printk(KERN_ERR "eeprom i2c read error, status=%d\n", ret);
+ pr_err("eeprom i2c read error, status=%d\n", ret);
return -1;
}
@@ -80,7 +80,7 @@ int netup_eeprom_write(struct i2c_adapter *i2c_adap, u8 addr, u8 data)
ret = i2c_transfer(i2c_adap, msg, 1);
if (ret != 1) {
- printk(KERN_ERR "eeprom i2c write error, status=%d\n", ret);
+ pr_err("eeprom i2c write error, status=%d\n", ret);
return -1;
}
diff --git a/drivers/media/pci/cx23885/netup-init.c b/drivers/media/pci/cx23885/netup-init.c
index 76d9487aafc8..6a27ef5d9ec2 100644
--- a/drivers/media/pci/cx23885/netup-init.c
+++ b/drivers/media/pci/cx23885/netup-init.c
@@ -40,7 +40,7 @@ static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c write error!\n", __func__);
+ pr_err("%s: i2c write error!\n", __func__);
}
static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val)
@@ -64,7 +64,7 @@ static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c write error!\n", __func__);
+ pr_err("%s: i2c write error!\n", __func__);
}
static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
@@ -84,7 +84,7 @@ static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c write error!\n", __func__);
+ pr_err("%s: i2c write error!\n", __func__);
msg.flags = I2C_M_RD;
msg.len = 1;
@@ -92,7 +92,7 @@ static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c read error!\n", __func__);
+ pr_err("%s: i2c read error!\n", __func__);
return buf[0];
}
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 723f06462104..c81fe4681d14 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -1,5 +1,4 @@
/*
- *
* Support for audio capture
* PCI function #1 of the cx2388x.
*
@@ -18,14 +17,14 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "cx88-reg.h"
+
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
@@ -33,7 +32,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <asm/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -42,22 +40,15 @@
#include <sound/tlv.h>
#include <media/i2c/wm8775.h>
-#include "cx88.h"
-#include "cx88-reg.h"
-
#define dprintk(level, fmt, arg...) do { \
if (debug + 1 > level) \
- printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg);\
-} while(0)
-
-#define dprintk_core(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg);\
-} while(0)
+ printk(KERN_DEBUG pr_fmt("%s: alsa: " fmt), \
+ chip->core->name, ##arg); \
+} while (0)
-/****************************************************************************
- Data type declarations - Can be moded to a header file later
- ****************************************************************************/
+/*
+ * Data type declarations - Can be moded to a header file later
+ */
struct cx88_audio_buffer {
unsigned int bpl;
@@ -91,13 +82,10 @@ struct cx88_audio_dev {
struct snd_pcm_substream *substream;
};
-typedef struct cx88_audio_dev snd_cx88_card_t;
-
-
-/****************************************************************************
- Module global static vars
- ****************************************************************************/
+/*
+ * Module global static vars
+ */
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
@@ -109,10 +97,9 @@ MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
-
-/****************************************************************************
- Module macros
- ****************************************************************************/
+/*
+ * Module macros
+ */
MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
MODULE_AUTHOR("Ricardo Cerqueira");
@@ -120,25 +107,23 @@ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
-MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
- "{{Conexant,23882},"
- "{{Conexant,23883}");
+MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}");
static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
-/****************************************************************************
- Module specific funtions
- ****************************************************************************/
+/*
+ * Module specific functions
+ */
/*
* BOARD Specific: Sets audio DMA
*/
-static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
+static int _cx88_start_audio_dma(struct cx88_audio_dev *chip)
{
struct cx88_audio_buffer *buf = chip->buf;
- struct cx88_core *core=chip->core;
+ struct cx88_core *core = chip->core;
const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
@@ -154,8 +139,9 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
- dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
- "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start + 8)>>1,
+ dprintk(1,
+ "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d byte buffer\n",
+ buf->bpl, cx_read(audio_ch->cmds_start + 8) >> 1,
chip->num_periods, buf->bpl * chip->num_periods);
/* Enables corresponding bits at AUD_INT_STAT */
@@ -169,8 +155,11 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
cx_set(MO_PCI_INTMSK, chip->core->pci_irqmask | PCI_INT_AUDINT);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */
- cx_set(MO_AUD_DMACNTRL, 0x11); /* audio downstream FIFO and RISC enable */
+
+ /* Enables Risc Processor */
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
+ /* audio downstream FIFO and RISC enable */
+ cx_set(MO_AUD_DMACNTRL, 0x11);
if (debug)
cx88_sram_channel_dump(chip->core, audio_ch);
@@ -181,9 +170,10 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
/*
* BOARD Specific: Resets audio DMA
*/
-static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
+static int _cx88_stop_audio_dma(struct cx88_audio_dev *chip)
{
- struct cx88_core *core=chip->core;
+ struct cx88_core *core = chip->core;
+
dprintk(1, "Stopping audio DMA\n");
/* stop dma */
@@ -195,7 +185,8 @@ static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1);
if (debug)
- cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]);
+ cx88_sram_channel_dump(chip->core,
+ &cx88_sram_channels[SRAM_CH25]);
return 0;
}
@@ -221,7 +212,7 @@ static const char *cx88_aud_irqs[32] = {
/*
* BOARD Specific: Threats IRQ audio specific calls
*/
-static void cx8801_aud_irq(snd_cx88_card_t *chip)
+static void cx8801_aud_irq(struct cx88_audio_dev *chip)
{
struct cx88_core *core = chip->core;
u32 status, mask;
@@ -232,12 +223,12 @@ static void cx8801_aud_irq(snd_cx88_card_t *chip)
return;
cx_write(MO_AUD_INTSTAT, status);
if (debug > 1 || (status & mask & ~0xff))
- cx88_print_irqbits(core->name, "irq aud",
+ cx88_print_irqbits("irq aud",
cx88_aud_irqs, ARRAY_SIZE(cx88_aud_irqs),
status, mask);
/* risc op code error */
if (status & AUD_INT_OPC_ERR) {
- printk(KERN_WARNING "%s/1: Audio risc op code error\n",core->name);
+ pr_warn("Audio risc op code error\n");
cx_clear(MO_AUD_DMACNTRL, 0x11);
cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH25]);
}
@@ -259,7 +250,7 @@ static void cx8801_aud_irq(snd_cx88_card_t *chip)
*/
static irqreturn_t cx8801_irq(int irq, void *dev_id)
{
- snd_cx88_card_t *chip = dev_id;
+ struct cx88_audio_dev *chip = dev_id;
struct cx88_core *core = chip->core;
u32 status;
int loop, handled = 0;
@@ -267,7 +258,7 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_AUDINT);
- if (0 == status)
+ if (status == 0)
goto out;
dprintk(3, "cx8801_irq loop %d/%d, status %x\n",
loop, MAX_IRQ_LOOP, status);
@@ -280,10 +271,8 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
cx8801_aud_irq(chip);
}
- if (MAX_IRQ_LOOP == loop) {
- printk(KERN_ERR
- "%s/1: IRQ loop detected, disabling interrupts\n",
- core->name);
+ if (loop == MAX_IRQ_LOOP) {
+ pr_err("IRQ loop detected, disabling interrupts\n");
cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT);
}
@@ -298,26 +287,25 @@ static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
int i;
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
- if (NULL == buf->vaddr) {
+ if (!buf->vaddr) {
dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
return -ENOMEM;
}
dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
- (unsigned long)buf->vaddr,
- nr_pages << PAGE_SHIFT);
+ (unsigned long)buf->vaddr, nr_pages << PAGE_SHIFT);
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
buf->nr_pages = nr_pages;
buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
- if (NULL == buf->sglist)
+ if (!buf->sglist)
goto vzalloc_err;
sg_init_table(buf->sglist, buf->nr_pages);
for (i = 0; i < buf->nr_pages; i++) {
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
- if (NULL == pg)
+ if (!pg)
goto vmalloc_to_page_err;
sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
}
@@ -339,7 +327,7 @@ static int cx88_alsa_dma_map(struct cx88_audio_dev *dev)
buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
buf->nr_pages, PCI_DMA_FROMDEVICE);
- if (0 == buf->sglen) {
+ if (buf->sglen == 0) {
pr_warn("%s: cx88_alsa_map_sg failed\n", __func__);
return -ENOMEM;
}
@@ -353,7 +341,8 @@ static int cx88_alsa_dma_unmap(struct cx88_audio_dev *dev)
if (!buf->sglen)
return 0;
- dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen,
+ PCI_DMA_FROMDEVICE);
buf->sglen = 0;
return 0;
}
@@ -367,18 +356,18 @@ static int cx88_alsa_dma_free(struct cx88_audio_buffer *buf)
return 0;
}
-
-static int dsp_buffer_free(snd_cx88_card_t *chip)
+static int dsp_buffer_free(struct cx88_audio_dev *chip)
{
struct cx88_riscmem *risc = &chip->buf->risc;
- BUG_ON(!chip->dma_size);
+ WARN_ON(!chip->dma_size);
- dprintk(2,"Freeing buffer\n");
+ dprintk(2, "Freeing buffer\n");
cx88_alsa_dma_unmap(chip);
cx88_alsa_dma_free(chip->buf);
if (risc->cpu)
- pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
+ pci_free_consistent(chip->pci, risc->size,
+ risc->cpu, risc->dma);
kfree(chip->buf);
chip->buf = NULL;
@@ -386,9 +375,9 @@ static int dsp_buffer_free(snd_cx88_card_t *chip)
return 0;
}
-/****************************************************************************
- ALSA PCM Interface
- ****************************************************************************/
+/*
+ * ALSA PCM Interface
+ */
/*
* Digital hardware definition
@@ -406,13 +395,15 @@ static const struct snd_pcm_hardware snd_cx88_digital_hw = {
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
- /* Analog audio output will be full of clicks and pops if there
- are not exactly four lines in the SRAM FIFO buffer. */
- .period_bytes_min = DEFAULT_FIFO_SIZE/4,
- .period_bytes_max = DEFAULT_FIFO_SIZE/4,
+ /*
+ * Analog audio output will be full of clicks and pops if there
+ * are not exactly four lines in the SRAM FIFO buffer.
+ */
+ .period_bytes_min = DEFAULT_FIFO_SIZE / 4,
+ .period_bytes_max = DEFAULT_FIFO_SIZE / 4,
.periods_min = 1,
.periods_max = 1024,
- .buffer_bytes_max = (1024*1024),
+ .buffer_bytes_max = (1024 * 1024),
};
/*
@@ -420,17 +411,17 @@ static const struct snd_pcm_hardware snd_cx88_digital_hw = {
*/
static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
if (!chip) {
- printk(KERN_ERR "BUG: cx88 can't find device struct."
- " Can't proceed with open\n");
+ pr_err("BUG: cx88 can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
- err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS);
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
goto _error;
@@ -440,6 +431,7 @@ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
if (cx88_sram_channels[SRAM_CH25].fifo_size != DEFAULT_FIFO_SIZE) {
unsigned int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4;
+
bpl &= ~7; /* must be multiple of 8 */
runtime->hw.period_bytes_min = bpl;
runtime->hw.period_bytes_max = bpl;
@@ -447,7 +439,7 @@ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
return 0;
_error:
- dprintk(1,"Error opening PCM!\n");
+ dprintk(1, "Error opening PCM!\n");
return err;
}
@@ -462,10 +454,10 @@ static int snd_cx88_close(struct snd_pcm_substream *substream)
/*
* hw_params callback
*/
-static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
- struct snd_pcm_hw_params * hw_params)
+static int snd_cx88_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
struct cx88_audio_buffer *buf;
int ret;
@@ -479,18 +471,18 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
chip->num_periods = params_periods(hw_params);
chip->dma_size = chip->period_size * params_periods(hw_params);
- BUG_ON(!chip->dma_size);
- BUG_ON(chip->num_periods & (chip->num_periods-1));
+ WARN_ON(!chip->dma_size);
+ WARN_ON(chip->num_periods & (chip->num_periods - 1));
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (NULL == buf)
+ if (!buf)
return -ENOMEM;
chip->buf = buf;
buf->bpl = chip->period_size;
ret = cx88_alsa_dma_init(chip,
- (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
+ (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
@@ -504,7 +496,7 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
goto error;
/* Loop back to start of program */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
substream->runtime->dma_area = chip->buf->vaddr;
@@ -520,10 +512,9 @@ error:
/*
* hw free callback
*/
-static int snd_cx88_hw_free(struct snd_pcm_substream * substream)
+static int snd_cx88_hw_free(struct snd_pcm_substream *substream)
{
-
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
@@ -546,7 +537,7 @@ static int snd_cx88_prepare(struct snd_pcm_substream *substream)
*/
static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
int err;
/* Local interrupts are already disabled by ALSA */
@@ -554,13 +545,13 @@ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- err=_cx88_start_audio_dma(chip);
+ err = _cx88_start_audio_dma(chip);
break;
case SNDRV_PCM_TRIGGER_STOP:
- err=_cx88_stop_audio_dma(chip);
+ err = _cx88_stop_audio_dma(chip);
break;
default:
- err=-EINVAL;
+ err = -EINVAL;
break;
}
@@ -574,7 +565,7 @@ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
*/
static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
u16 count;
@@ -583,16 +574,17 @@ static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream)
// dprintk(2, "%s - count %d (+%u), period %d, frame %lu\n", __func__,
// count, new, count & (runtime->periods-1),
// runtime->period_size * (count & (runtime->periods-1)));
- return runtime->period_size * (count & (runtime->periods-1));
+ return runtime->period_size * (count & (runtime->periods - 1));
}
/*
* page callback (needed for mmap)
*/
static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
- unsigned long offset)
+ unsigned long offset)
{
void *pageptr = substream->runtime->dma_area + offset;
+
return vmalloc_to_page(pageptr);
}
@@ -614,7 +606,8 @@ static const struct snd_pcm_ops snd_cx88_pcm_ops = {
/*
* create a PCM device
*/
-static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
+static int snd_cx88_pcm(struct cx88_audio_dev *chip, int device,
+ const char *name)
{
int err;
struct snd_pcm *pcm;
@@ -629,9 +622,9 @@ static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
return 0;
}
-/****************************************************************************
- CONTROL INTERFACE
- ****************************************************************************/
+/*
+ * CONTROL INTERFACE
+ */
static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
@@ -646,8 +639,8 @@ static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
- struct cx88_core *core=chip->core;
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
int vol = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f),
bal = cx_read(AUD_BAL_CTL);
@@ -659,9 +652,9 @@ static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
}
static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
int left = value->value.integer.value[0];
int right = value->value.integer.value[1];
@@ -683,8 +676,8 @@ static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
- struct cx88_core *core=chip->core;
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
int left, right, v, b;
int changed = 0;
u32 old;
@@ -733,7 +726,7 @@ static const struct snd_kcontrol_new snd_cx88_volume = {
static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
u32 bit = kcontrol->private_value;
@@ -742,9 +735,9 @@ static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
}
static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
u32 bit = kcontrol->private_value;
int ret = 0;
@@ -756,8 +749,9 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
vol ^= bit;
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
/* Pass mute onto any WM8775 */
- if (core->sd_wm8775 && ((1<<6) == bit))
- wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
+ if (core->sd_wm8775 && ((1 << 6) == bit))
+ wm8775_s_ctrl(core,
+ V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
ret = 1;
}
spin_unlock_irq(&chip->reg_lock);
@@ -770,7 +764,7 @@ static const struct snd_kcontrol_new snd_cx88_dac_switch = {
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
- .private_value = (1<<8),
+ .private_value = (1 << 8),
};
static const struct snd_kcontrol_new snd_cx88_source_switch = {
@@ -779,13 +773,13 @@ static const struct snd_kcontrol_new snd_cx88_source_switch = {
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
- .private_value = (1<<6),
+ .private_value = (1 << 6),
};
static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
s32 val;
@@ -795,9 +789,9 @@ static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
}
static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
wm8775_s_ctrl(core, V4L2_CID_AUDIO_LOUDNESS,
@@ -813,9 +807,9 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
.put = snd_cx88_alc_put,
};
-/****************************************************************************
- Basic Flow for Sound Devices
- ****************************************************************************/
+/*
+ * Basic Flow for Sound Devices
+ */
/*
* PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio
@@ -823,8 +817,8 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
*/
static const struct pci_device_id cx88_audio_pci_tbl[] = {
- {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
- {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0x14f1, 0x8801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x14f1, 0x8811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0, }
};
MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
@@ -833,13 +827,12 @@ MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
* Chip-specific destructor
*/
-static int snd_cx88_free(snd_cx88_card_t *chip)
+static int snd_cx88_free(struct cx88_audio_dev *chip)
{
-
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- cx88_core_put(chip->core,chip->pci);
+ cx88_core_put(chip->core, chip->pci);
pci_disable_device(chip->pci);
return 0;
@@ -848,27 +841,26 @@ static int snd_cx88_free(snd_cx88_card_t *chip)
/*
* Component Destructor
*/
-static void snd_cx88_dev_free(struct snd_card * card)
+static void snd_cx88_dev_free(struct snd_card *card)
{
- snd_cx88_card_t *chip = card->private_data;
+ struct cx88_audio_dev *chip = card->private_data;
snd_cx88_free(chip);
}
-
/*
* Alsa Constructor - Component probe
*/
static int devno;
static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
- snd_cx88_card_t **rchip,
+ struct cx88_audio_dev **rchip,
struct cx88_core **core_ptr)
{
- snd_cx88_card_t *chip;
- struct cx88_core *core;
- int err;
- unsigned char pci_lat;
+ struct cx88_audio_dev *chip;
+ struct cx88_core *core;
+ int err;
+ unsigned char pci_lat;
*rchip = NULL;
@@ -881,19 +873,18 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
chip = card->private_data;
core = cx88_core_get(pci);
- if (NULL == core) {
+ if (!core) {
err = -EINVAL;
return err;
}
- err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(pci, DMA_BIT_MASK(32));
if (err) {
- dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
+ dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n", core->name);
cx88_core_put(core, pci);
return err;
}
-
/* pci init */
chip->card = card;
chip->pci = pci;
@@ -907,17 +898,18 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
IRQF_SHARED, chip->core->name, chip);
if (err < 0) {
dprintk(0, "%s: can't get IRQ %d\n",
- chip->core->name, chip->pci->irq);
+ chip->core->name, chip->pci->irq);
return err;
}
/* print pci info */
pci_read_config_byte(pci, PCI_LATENCY_TIMER, &pci_lat);
- dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", core->name, devno,
- pci_name(pci), pci->revision, pci->irq,
- pci_lat, (unsigned long long)pci_resource_start(pci,0));
+ dprintk(1,
+ "ALSA %s/%i: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ core->name, devno,
+ pci_name(pci), pci->revision, pci->irq,
+ pci_lat, (unsigned long long)pci_resource_start(pci, 0));
chip->irq = pci->irq;
synchronize_irq(chip->irq);
@@ -931,10 +923,10 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
static int cx88_audio_initdev(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
- struct snd_card *card;
- snd_cx88_card_t *chip;
- struct cx88_core *core = NULL;
- int err;
+ struct snd_card *card;
+ struct cx88_audio_dev *chip;
+ struct cx88_core *core = NULL;
+ int err;
if (devno >= SNDRV_CARDS)
return (-ENODEV);
@@ -945,7 +937,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
}
err = snd_card_new(&pci->dev, index[devno], id[devno], THIS_MODULE,
- sizeof(snd_cx88_card_t), &card);
+ sizeof(struct cx88_audio_dev), &card);
if (err < 0)
return err;
@@ -973,19 +965,20 @@ static int cx88_audio_initdev(struct pci_dev *pci,
if (core->sd_wm8775)
snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
- strcpy (card->driver, "CX88x");
+ strcpy(card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
sprintf(card->longname, "%s at %#llx",
- card->shortname,(unsigned long long)pci_resource_start(pci, 0));
- strcpy (card->mixername, "CX88");
+ card->shortname,
+ (unsigned long long)pci_resource_start(pci, 0));
+ strcpy(card->mixername, "CX88");
- dprintk (0, "%s/%i: ALSA support for cx2388x boards\n",
- card->driver,devno);
+ dprintk(0, "%s/%i: ALSA support for cx2388x boards\n",
+ card->driver, devno);
err = snd_card_register(card);
if (err < 0)
goto error;
- pci_set_drvdata(pci,card);
+ pci_set_drvdata(pci, card);
devno++;
return 0;
@@ -994,6 +987,7 @@ error:
snd_card_free(card);
return err;
}
+
/*
* ALSA destructor
*/
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index b532e49e8f33..aa49c9597d9c 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1,5 +1,4 @@
/*
- *
* Support for a cx23416 mpeg encoder via cx2388x host port.
* "blackbird" reference design.
*
@@ -20,12 +19,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -38,21 +35,20 @@
#include <media/v4l2-event.h>
#include <media/drv-intf/cx2341x.h>
-#include "cx88.h"
-
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages [blackbird]");
-#define dprintk(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg); \
-} while(0)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG pr_fmt("%s: blackbird:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
@@ -70,6 +66,7 @@ enum blackbird_capture_type {
BLACKBIRD_RAW_CAPTURE,
BLACKBIRD_RAW_PASSTHRU_CAPTURE
};
+
enum blackbird_capture_bits {
BLACKBIRD_RAW_BITS_NONE = 0x00,
BLACKBIRD_RAW_BITS_YUV_CAPTURE = 0x01,
@@ -78,33 +75,40 @@ enum blackbird_capture_bits {
BLACKBIRD_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
BLACKBIRD_RAW_BITS_TO_HOST_CAPTURE = 0x10
};
+
enum blackbird_capture_end {
BLACKBIRD_END_AT_GOP, /* stop at the end of gop, generate irq */
BLACKBIRD_END_NOW, /* stop immediately, no irq */
};
+
enum blackbird_framerate {
BLACKBIRD_FRAMERATE_NTSC_30, /* NTSC: 30fps */
BLACKBIRD_FRAMERATE_PAL_25 /* PAL: 25fps */
};
+
enum blackbird_stream_port {
BLACKBIRD_OUTPUT_PORT_MEMORY,
BLACKBIRD_OUTPUT_PORT_STREAMING,
BLACKBIRD_OUTPUT_PORT_SERIAL
};
+
enum blackbird_data_xfer_status {
BLACKBIRD_MORE_BUFFERS_FOLLOW,
BLACKBIRD_LAST_BUFFER,
};
+
enum blackbird_picture_mask {
BLACKBIRD_PICTURE_MASK_NONE,
BLACKBIRD_PICTURE_MASK_I_FRAMES,
BLACKBIRD_PICTURE_MASK_I_P_FRAMES = 0x3,
BLACKBIRD_PICTURE_MASK_ALL_FRAMES = 0x7,
};
+
enum blackbird_vbi_mode_bits {
BLACKBIRD_VBI_BITS_SLICED,
BLACKBIRD_VBI_BITS_RAW,
};
+
enum blackbird_vbi_insertion_bits {
BLACKBIRD_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
BLACKBIRD_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
@@ -112,56 +116,69 @@ enum blackbird_vbi_insertion_bits {
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
};
+
enum blackbird_dma_unit {
BLACKBIRD_DMA_BYTES,
BLACKBIRD_DMA_FRAMES,
};
+
enum blackbird_dma_transfer_status_bits {
BLACKBIRD_DMA_TRANSFER_BITS_DONE = 0x01,
BLACKBIRD_DMA_TRANSFER_BITS_ERROR = 0x04,
BLACKBIRD_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
};
+
enum blackbird_pause {
BLACKBIRD_PAUSE_ENCODING,
BLACKBIRD_RESUME_ENCODING,
};
+
enum blackbird_copyright {
BLACKBIRD_COPYRIGHT_OFF,
BLACKBIRD_COPYRIGHT_ON,
};
+
enum blackbird_notification_type {
BLACKBIRD_NOTIFICATION_REFRESH,
};
+
enum blackbird_notification_status {
BLACKBIRD_NOTIFICATION_OFF,
BLACKBIRD_NOTIFICATION_ON,
};
+
enum blackbird_notification_mailbox {
BLACKBIRD_NOTIFICATION_NO_MAILBOX = -1,
};
+
enum blackbird_field1_lines {
BLACKBIRD_FIELD1_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD1_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD1_MICRONAS = 0x0105, /* 261 */
};
+
enum blackbird_field2_lines {
BLACKBIRD_FIELD2_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD2_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD2_MICRONAS = 0x0106, /* 262 */
};
+
enum blackbird_custom_data_type {
BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
BLACKBIRD_CUSTOM_PRIVATE_PACKET,
};
+
enum blackbird_mute {
BLACKBIRD_UNMUTE,
BLACKBIRD_MUTE,
};
+
enum blackbird_mute_video_mask {
BLACKBIRD_MUTE_VIDEO_V_MASK = 0x0000FF00,
BLACKBIRD_MUTE_VIDEO_U_MASK = 0x00FF0000,
BLACKBIRD_MUTE_VIDEO_Y_MASK = 0xFF000000,
};
+
enum blackbird_mute_video_shift {
BLACKBIRD_MUTE_VIDEO_V_SHIFT = 8,
BLACKBIRD_MUTE_VIDEO_U_SHIFT = 16,
@@ -215,14 +232,14 @@ static void host_setup(struct cx88_core *core)
static int wait_ready_gpio0_bit1(struct cx88_core *core, u32 state)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1);
- u32 gpio0,need;
+ u32 gpio0, need;
need = state ? 2 : 0;
for (;;) {
gpio0 = cx_read(MO_GP0_IO) & 2;
if (need == gpio0)
return 0;
- if (time_after(jiffies,timeout))
+ if (time_after(jiffies, timeout))
return -1;
udelay(1);
}
@@ -241,7 +258,7 @@ static int memory_write(struct cx88_core *core, u32 address, u32 value)
cx_read(P1_MDATA0);
cx_read(P1_MADDR0);
- return wait_ready_gpio0_bit1(core,1);
+ return wait_ready_gpio0_bit1(core, 1);
}
static int memory_read(struct cx88_core *core, u32 address, u32 *value)
@@ -255,7 +272,7 @@ static int memory_read(struct cx88_core *core, u32 address, u32 *value)
cx_writeb(P1_MADDR0, (unsigned int)address);
cx_read(P1_MADDR0);
- retval = wait_ready_gpio0_bit1(core,1);
+ retval = wait_ready_gpio0_bit1(core, 1);
cx_writeb(P1_MDATA3, 0);
val = (unsigned char)cx_read(P1_MDATA3) << 24;
@@ -282,10 +299,9 @@ static int register_write(struct cx88_core *core, u32 address, u32 value)
cx_read(P1_RDATA0);
cx_read(P1_RADDR0);
- return wait_ready_gpio0_bit1(core,1);
+ return wait_ready_gpio0_bit1(core, 1);
}
-
static int register_read(struct cx88_core *core, u32 address, u32 *value)
{
int retval;
@@ -296,7 +312,7 @@ static int register_read(struct cx88_core *core, u32 address, u32 *value)
cx_writeb(P1_RRDWR, 0);
cx_read(P1_RADDR0);
- retval = wait_ready_gpio0_bit1(core,1);
+ retval = wait_ready_gpio0_bit1(core, 1);
val = (unsigned char)cx_read(P1_RDATA0);
val |= (unsigned char)cx_read(P1_RDATA1) << 8;
val |= (unsigned char)cx_read(P1_RDATA2) << 16;
@@ -308,20 +324,24 @@ static int register_read(struct cx88_core *core, u32 address, u32 *value)
/* ------------------------------------------------------------------ */
-static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
+static int blackbird_mbox_func(void *priv, u32 command, int in,
+ int out, u32 data[CX2341X_MBOX_MAX_DATA])
{
struct cx8802_dev *dev = priv;
unsigned long timeout;
u32 value, flag, retval;
int i;
- dprintk(1,"%s: 0x%X\n", __func__, command);
+ dprintk(1, "%s: 0x%X\n", __func__, command);
- /* this may not be 100% safe if we can't read any memory location
- without side effects */
+ /*
+ * this may not be 100% safe if we can't read any memory location
+ * without side effects
+ */
memory_read(dev->core, dev->mailbox - 4, &value);
if (value != 0x12345678) {
- dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n");
+ dprintk(0,
+ "Firmware and/or mailbox pointer not initialized or corrupted\n");
return -EIO;
}
@@ -336,7 +356,8 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
/* write command + args + fill remaining with zeros */
memory_write(dev->core, dev->mailbox + 1, command); /* command code */
- memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */
+ /* timeout */
+ memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT);
for (i = 0; i < in; i++) {
memory_write(dev->core, dev->mailbox + 4 + i, data[i]);
dprintk(1, "API Input %d = %d\n", i, data[i]);
@@ -353,7 +374,7 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
memory_read(dev->core, dev->mailbox, &flag);
if (0 != (flag & 4))
break;
- if (time_after(jiffies,timeout)) {
+ if (time_after(jiffies, timeout)) {
dprintk(0, "ERROR: API Mailbox timeout %x\n", command);
return -EIO;
}
@@ -367,15 +388,19 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
}
memory_read(dev->core, dev->mailbox + 2, &retval);
- dprintk(1, "API result = %d\n",retval);
+ dprintk(1, "API result = %d\n", retval);
flag = 0;
memory_write(dev->core, dev->mailbox, flag);
return retval;
}
+
/* ------------------------------------------------------------------ */
-/* We don't need to call the API often, so using just one mailbox will probably suffice */
+/*
+ * We don't need to call the API often, so using just one mailbox
+ * will probably suffice
+ */
static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
u32 inputcnt, u32 outputcnt, ...)
{
@@ -385,9 +410,9 @@ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
va_start(vargs, outputcnt);
- for (i = 0; i < inputcnt; i++) {
+ for (i = 0; i < inputcnt; i++)
data[i] = va_arg(vargs, int);
- }
+
err = blackbird_mbox_func(dev, command, inputcnt, outputcnt, data);
for (i = 0; i < outputcnt; i++) {
int *vptr = va_arg(vargs, int *);
@@ -399,8 +424,8 @@ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
static int blackbird_find_mailbox(struct cx8802_dev *dev)
{
- u32 signature[4]={0x12345678, 0x34567812, 0x56781234, 0x78123456};
- int signaturecnt=0;
+ u32 signature[4] = {0x12345678, 0x34567812, 0x56781234, 0x78123456};
+ int signaturecnt = 0;
u32 value;
int i;
@@ -410,9 +435,9 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev)
signaturecnt++;
else
signaturecnt = 0;
- if (4 == signaturecnt) {
+ if (signaturecnt == 4) {
dprintk(1, "Mailbox signature found\n");
- return i+1;
+ return i + 1;
}
}
dprintk(0, "Mailbox signature values not found!\n");
@@ -431,10 +456,13 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
__le32 *dataptr;
retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED);
- retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
- retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000640);
- retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
- msleep(1);
+ retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS,
+ IVTV_CMD_HW_BLOCKS_RST);
+ retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH,
+ 0x80000640);
+ retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE,
+ 0x1A);
+ usleep_range(10000, 20000);
retval |= register_write(dev->core, IVTV_REG_APU, 0);
if (retval < 0)
@@ -443,29 +471,28 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
retval = request_firmware(&firmware, CX2341X_FIRM_ENC_FILENAME,
&dev->pci->dev);
-
if (retval != 0) {
pr_err("Hotplug firmware request failed (%s).\n",
- CX2341X_FIRM_ENC_FILENAME);
+ CX2341X_FIRM_ENC_FILENAME);
pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
return -EIO;
}
if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
pr_err("Firmware size mismatch (have %zd, expected %d)\n",
- firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
+ firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -EINVAL;
}
- if (0 != memcmp(firmware->data, magic, 8)) {
+ if (memcmp(firmware->data, magic, 8) != 0) {
pr_err("Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -EINVAL;
}
/* transfer to the chip */
- dprintk(1,"Loading firmware ...\n");
+ dprintk(1, "Loading firmware ...\n");
dataptr = (__le32 *)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
value = le32_to_cpu(*dataptr);
@@ -486,10 +513,11 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
}
dprintk(0, "Firmware upload successful.\n");
- retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
+ retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS,
+ IVTV_CMD_HW_BLOCKS_RST);
retval |= register_read(dev->core, IVTV_REG_SPU, &value);
retval |= register_write(dev->core, IVTV_REG_SPU, value & 0xFFFFFFFE);
- msleep(1);
+ usleep_range(10000, 20000);
retval |= register_read(dev->core, IVTV_REG_VPU, &value);
retval |= register_write(dev->core, IVTV_REG_VPU, value & 0xFFFFFFE8);
@@ -499,19 +527,19 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
return 0;
}
-/**
- Settings used by the windows tv app for PVR2000:
-=================================================================================================================
-Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
------------------------------------------------------------------------------------------------------------------
-MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-=================================================================================================================
-*DB: "DirectBurn"
-*/
+/*
+ * Settings used by the windows tv app for PVR2000:
+ * =================================================================================================================
+ * Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
+ * -----------------------------------------------------------------------------------------------------------------
+ * MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * =================================================================================================================
+ * [*] DB: "DirectBurn"
+ */
static void blackbird_codec_settings(struct cx8802_dev *dev)
{
@@ -519,11 +547,12 @@ static void blackbird_codec_settings(struct cx8802_dev *dev)
/* assign frame size */
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
- core->height, core->width);
+ core->height, core->width);
dev->cxhdl.width = core->width;
dev->cxhdl.height = core->height;
- cx2341x_handler_set_50hz(&dev->cxhdl, dev->core->tvnorm & V4L2_STD_625_50);
+ cx2341x_handler_set_50hz(&dev->cxhdl,
+ dev->core->tvnorm & V4L2_STD_625_50);
cx2341x_handler_setup(&dev->cxhdl);
}
@@ -533,7 +562,7 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
int version;
int retval;
- dprintk(1,"Initialize codec\n");
+ dprintk(1, "Initialize codec\n");
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
/* ping was not successful, reset and upload firmware */
@@ -549,15 +578,18 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
dev->mailbox = retval;
- retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
+ /* ping */
+ retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
if (retval < 0) {
dprintk(0, "ERROR: Firmware ping failed!\n");
return -1;
}
- retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version);
+ retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION,
+ 0, 1, &version);
if (retval < 0) {
- dprintk(0, "ERROR: Firmware get encoder version failed!\n");
+ dprintk(0,
+ "ERROR: Firmware get encoder version failed!\n");
return -1;
}
dprintk(0, "Firmware version is 0x%08x\n", version);
@@ -571,13 +603,11 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
blackbird_codec_settings(dev);
blackbird_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
- BLACKBIRD_FIELD1_SAA7115,
- BLACKBIRD_FIELD2_SAA7115
- );
+ BLACKBIRD_FIELD1_SAA7115, BLACKBIRD_FIELD2_SAA7115);
blackbird_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
- BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
return 0;
}
@@ -615,9 +645,7 @@ static int blackbird_start_codec(struct cx8802_dev *dev)
/* start capturing to the host interface */
blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
- BLACKBIRD_MPEG_CAPTURE,
- BLACKBIRD_RAW_BITS_NONE
- );
+ BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_BITS_NONE);
return 0;
}
@@ -625,10 +653,9 @@ static int blackbird_start_codec(struct cx8802_dev *dev)
static int blackbird_stop_codec(struct cx8802_dev *dev)
{
blackbird_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- BLACKBIRD_END_NOW,
- BLACKBIRD_MPEG_CAPTURE,
- BLACKBIRD_RAW_BITS_NONE
- );
+ BLACKBIRD_END_NOW,
+ BLACKBIRD_MPEG_CAPTURE,
+ BLACKBIRD_RAW_BITS_NONE);
cx2341x_handler_set_busy(&dev->cxhdl, 0);
@@ -638,8 +665,8 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8802_dev *dev = q->drv_priv;
@@ -699,7 +726,8 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
err = drv->request_acquire(drv);
if (err != 0) {
- dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__, err);
+ dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__,
+ err);
goto fail;
}
@@ -770,7 +798,7 @@ static const struct vb2_ops blackbird_qops = {
/* ------------------------------------------------------------------ */
static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+ struct v4l2_capability *cap)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -781,8 +809,8 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
if (f->index != 0)
return -EINVAL;
@@ -794,7 +822,7 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -810,11 +838,11 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- unsigned maxw, maxh;
+ unsigned int maxw, maxh;
enum v4l2_field field;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -850,7 +878,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -864,20 +892,21 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
core->width = f->fmt.pix.width;
core->height = f->fmt.pix.height;
core->field = f->fmt.pix.field;
- cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
+ cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.field);
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
- f->fmt.pix.height, f->fmt.pix.width);
+ f->fmt.pix.height, f->fmt.pix.width);
return 0;
}
-static int vidioc_s_frequency (struct file *file, void *priv,
- const struct v4l2_frequency *f)
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
bool streaming;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
@@ -885,16 +914,15 @@ static int vidioc_s_frequency (struct file *file, void *priv,
if (streaming)
blackbird_stop_codec(dev);
- cx88_set_freq (core,f);
+ cx88_set_freq(core, f);
blackbird_initialize_codec(dev);
- cx88_set_scale(core, core->width, core->height,
- core->field);
+ cx88_set_scale(core, core->width, core->height, core->field);
if (streaming)
blackbird_start_codec(dev);
return 0;
}
-static int vidioc_log_status (struct file *file, void *priv)
+static int vidioc_log_status(struct file *file, void *priv)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -906,21 +934,22 @@ static int vidioc_log_status (struct file *file, void *priv)
return 0;
}
-static int vidioc_enum_input (struct file *file, void *priv,
- struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- return cx88_enum_input (core,i);
+
+ return cx88_enum_input(core, i);
}
-static int vidioc_g_frequency (struct file *file, void *priv,
- struct v4l2_frequency *f)
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
@@ -931,7 +960,7 @@ static int vidioc_g_frequency (struct file *file, void *priv,
return 0;
}
-static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -940,31 +969,31 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
if (i >= 4)
return -EINVAL;
- if (0 == INPUT(i).type)
+ if (!INPUT(i).type)
return -EINVAL;
cx88_newstation(core);
- cx88_video_mux(core,i);
+ cx88_video_mux(core, i);
return 0;
}
-static int vidioc_g_tuner (struct file *file, void *priv,
- struct v4l2_tuner *t)
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
u32 reg;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
strcpy(t->name, "Television");
@@ -972,21 +1001,21 @@ static int vidioc_g_tuner (struct file *file, void *priv,
t->rangehigh = 0xffffffffUL;
call_all(core, tuner, g_tuner, t);
- cx88_get_stereo(core ,t);
+ cx88_get_stereo(core, t);
reg = cx_read(MO_DEVICE_STATUS);
- t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
+ t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
return 0;
}
-static int vidioc_s_tuner (struct file *file, void *priv,
- const struct v4l2_tuner *t)
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (UNSET == core->board.tuner_type)
+ if (core->board.tuner_type == UNSET)
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
cx88_set_stereo(core, t->audmode, 1);
@@ -1010,8 +1039,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
return cx88_set_tvnorm(core, id);
}
-static const struct v4l2_file_operations mpeg_fops =
-{
+static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
@@ -1050,7 +1078,7 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
static struct video_device cx8802_mpeg_template = {
.name = "cx8802",
.fops = &mpeg_fops,
- .ioctl_ops = &mpeg_ioctl_ops,
+ .ioctl_ops = &mpeg_ioctl_ops,
.tvnorms = CX88_NORMS,
};
@@ -1064,7 +1092,9 @@ static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv)
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
- /* By default, core setup will leave the cx22702 out of reset, on the bus.
+ /*
+ * By default, core setup will leave the cx22702 out of reset,
+ * on the bus.
* We left the hardware on power up with the cx22702 active.
* We're being given access to re-arrange the GPIOs.
* Take the bus off the cx22702 and put the cx23416 on it.
@@ -1118,12 +1148,11 @@ static int blackbird_register_video(struct cx8802_dev *dev)
dev->mpeg_dev.queue = &dev->vb2_mpegq;
err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
if (err < 0) {
- printk(KERN_INFO "%s/2: can't register mpeg device\n",
- dev->core->name);
+ pr_info("can't register mpeg device\n");
return err;
}
- printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
- dev->core->name, video_device_node_name(&dev->mpeg_dev));
+ pr_info("registered device %s [mpeg]\n",
+ video_device_node_name(&dev->mpeg_dev));
return 0;
}
@@ -1136,8 +1165,8 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
struct vb2_queue *q;
int err;
- dprintk( 1, "%s\n", __func__);
- dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
+ dprintk(1, "%s\n", __func__);
+ dprintk(1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
core->boardnr,
core->name,
core->pci_bus,
@@ -1158,16 +1187,15 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL);
/* blackbird stuff */
- printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n",
- core->name);
+ pr_info("cx23416 based mpeg encoder (blackbird reference design)\n");
host_setup(dev->core);
blackbird_initialize_codec(dev);
/* initial device configuration: needed ? */
// init_controls(core);
- cx88_set_tvnorm(core,core->tvnorm);
- cx88_video_mux(core,0);
+ cx88_set_tvnorm(core, core->tvnorm);
+ cx88_video_mux(core, 0);
cx2341x_handler_set_50hz(&dev->cxhdl, core->height == 576);
cx2341x_handler_setup(&dev->cxhdl);
@@ -1219,8 +1247,8 @@ static struct cx8802_driver cx8802_blackbird_driver = {
static int __init blackbird_init(void)
{
- printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n",
- CX88_VERSION);
+ pr_info("cx2388x blackbird driver version %s loaded\n",
+ CX88_VERSION);
return cx8802_register_driver(&cx8802_blackbird_driver);
}
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 8f2556ec3971..cdfbde277b8b 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -1,5 +1,4 @@
/*
- *
* device driver for Conexant 2388x based TV cards
* card-specific stuff.
*
@@ -14,22 +13,18 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "tea5767.h"
+#include "xc4000.h"
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include "cx88.h"
-#include "tea5767.h"
-#include "xc4000.h"
-
static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int card[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
@@ -38,32 +33,23 @@ module_param_array(tuner, int, NULL, 0444);
module_param_array(radio, int, NULL, 0444);
module_param_array(card, int, NULL, 0444);
-MODULE_PARM_DESC(tuner,"tuner type");
-MODULE_PARM_DESC(radio,"radio tuner type");
-MODULE_PARM_DESC(card,"card type");
+MODULE_PARM_DESC(tuner, "tuner type");
+MODULE_PARM_DESC(radio, "radio tuner type");
+MODULE_PARM_DESC(card, "card type");
static unsigned int latency = UNSET;
-module_param(latency,int,0444);
-MODULE_PARM_DESC(latency,"pci latency timer");
+module_param(latency, int, 0444);
+MODULE_PARM_DESC(latency, "pci latency timer");
static int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir, "Disable IR support");
-#define info_printk(core, fmt, arg...) \
- printk(KERN_INFO "%s: " fmt, core->name , ## arg)
-
-#define warn_printk(core, fmt, arg...) \
- printk(KERN_WARNING "%s: " fmt, core->name , ## arg)
-
-#define err_printk(core, fmt, arg...) \
- printk(KERN_ERR "%s: " fmt, core->name , ## arg)
-
-#define dprintk(level,fmt, arg...) do { \
+#define dprintk(level, fmt, arg...) do { \
if (cx88_core_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, core->name , ## arg); \
- } while(0)
-
+ printk(KERN_DEBUG pr_fmt("%s: core:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
/* board config info */
@@ -291,7 +277,6 @@ static const struct cx88_board cx88_boards[] = {
.gpio2 = 0x0035e700,
.gpio3 = 0x02000000,
}, {
-
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0035c700,
@@ -505,22 +490,22 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
/*
- GPIO[0] resets DT3302 DTV receiver
- 0 - reset asserted
- 1 - normal operation
- GPIO[1] mutes analog audio output connector
- 0 - enable selected source
- 1 - mute
- GPIO[2] selects source for analog audio output connector
- 0 - analog audio input connector on tab
- 1 - analog DAC output from CX23881 chip
- GPIO[3] selects RF input connector on tuner module
- 0 - RF connector labeled CABLE
- 1 - RF connector labeled ANT
- GPIO[4] selects high RF for QAM256 mode
- 0 - normal RF
- 1 - high RF
- */
+ * GPIO[0] resets DT3302 DTV receiver
+ * 0 - reset asserted
+ * 1 - normal operation
+ * GPIO[1] mutes analog audio output connector
+ * 0 - enable selected source
+ * 1 - mute
+ * GPIO[2] selects source for analog audio output connector
+ * 0 - analog audio input connector on tab
+ * 1 - analog DAC output from CX23881 chip
+ * GPIO[3] selects RF input connector on tuner module
+ * 0 - RF connector labeled CABLE
+ * 1 - RF connector labeled ANT
+ * GPIO[4] selects high RF for QAM256 mode
+ * 0 - normal RF
+ * 1 - high RF
+ */
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -743,7 +728,10 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- /* Some variants use a tda9874 and so need the tvaudio module. */
+ /*
+ * Some variants use a tda9874 and so need the
+ * tvaudio module.
+ */
.audio_chip = CX88_AUDIO_TVAUDIO,
.input = { {
.type = CX88_VMUX_TELEVISION,
@@ -1209,8 +1197,10 @@ static const struct cx88_board cx88_boards[] = {
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_MCE200_DELUXE] = {
- /* FIXME: tested TV input only, disabled composite,
- svideo and radio until they can be tested also. */
+ /*
+ * FIXME: tested TV input only, disabled composite,
+ * svideo and radio until they can be tested also.
+ */
.name = "Kworld MCE 200 Deluxe",
.tuner_type = TUNER_TENA_9533_DI,
.radio_type = UNSET,
@@ -1721,16 +1711,24 @@ static const struct cx88_board cx88_boards[] = {
},
},
[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
- .name = "PowerColor RA330", /* Long names may confuse LIRC. */
+ /* Long names may confuse LIRC. */
+ .name = "PowerColor RA330",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.input = { {
+ /*
+ * Due to the way the cx88 driver is written,
+ * there is no way to deactivate audio pass-
+ * through without this entry. Furthermore, if
+ * the TV mux entry is first, you get audio
+ * from the tuner on boot for a little while.
+ */
.type = CX88_VMUX_DEBUG,
- .vmux = 3, /* Due to the way the cx88 driver is written, */
- .gpio0 = 0x00ff, /* there is no way to deactivate audio pass- */
- .gpio1 = 0xf39d, /* through without this entry. Furthermore, if */
- .gpio3 = 0x0000, /* the TV mux entry is first, you get audio */
- }, { /* from the tuner on boot for a little while. */
+ .vmux = 3,
+ .gpio0 = 0x00ff,
+ .gpio1 = 0xf39d,
+ .gpio3 = 0x0000,
+ }, {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00ff,
@@ -1883,11 +1881,12 @@ static const struct cx88_board cx88_boards[] = {
.gpio2 = 0x0cf7,
},
},
- /* Both radio, analog and ATSC work with this board.
- However, for analog to work, s5h1409 gate should be open,
- otherwise, tuner-xc3028 won't be detected.
- A proper fix require using the newer i2c methods to add
- tuner-xc3028 without doing an i2c probe.
+ /*
+ * Both radio, analog and ATSC work with this board.
+ * However, for analog to work, s5h1409 gate should be open,
+ * otherwise, tuner-xc3028 won't be detected.
+ * A proper fix require using the newer i2c methods to add
+ * tuner-xc3028 without doing an i2c probe.
*/
[CX88_BOARD_KWORLD_ATSC_120] = {
.name = "Kworld PlusTV HD PCI 120 (ATSC 120)",
@@ -2821,15 +2820,15 @@ static const struct cx88_subid cx88_subids[] = {
},
};
-/* ----------------------------------------------------------------------- */
-/* some leadtek specific stuff */
-
+/*
+ * some leadtek specific stuff
+ */
static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
if (eeprom_data[4] != 0x7d ||
eeprom_data[5] != 0x10 ||
eeprom_data[7] != 0x66) {
- warn_printk(core, "Leadtek eeprom invalid.\n");
+ pr_warn("Leadtek eeprom invalid.\n");
return;
}
@@ -2847,9 +2846,8 @@ static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
break;
}
- info_printk(core, "Leadtek Winfast 2000XP Expert config: "
- "tuner=%d, eeprom[0]=0x%02x\n",
- core->board.tuner_type, eeprom_data[0]);
+ pr_info("Leadtek Winfast 2000XP Expert config: tuner=%d, eeprom[0]=0x%02x\n",
+ core->board.tuner_type, eeprom_data[0]);
}
static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
@@ -2863,8 +2861,7 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
core->model = tv.model;
/* Make sure we support the board model */
- switch (tv.model)
- {
+ switch (tv.model) {
case 14009: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in) */
case 14019: /* WinTV-HVR3000 (Retail, IR Blaster, b/panel video, 3.5mm audio in) */
case 14029: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - 880 bridge) */
@@ -2905,50 +2902,50 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
cx_set(MO_GP0_IO, 0x008989FF);
break;
default:
- warn_printk(core, "warning: unknown hauppauge model #%d\n",
- tv.model);
+ pr_warn("warning: unknown hauppauge model #%d\n", tv.model);
break;
}
- info_printk(core, "hauppauge eeprom: model=%d\n", tv.model);
+ pr_info("hauppauge eeprom: model=%d\n", tv.model);
}
-/* ----------------------------------------------------------------------- */
-/* some GDI (was: Modular Technology) specific stuff */
+/*
+ * some GDI (was: Modular Technology) specific stuff
+ */
static const struct {
int id;
int fm;
const char *name;
} gdi_tuner[] = {
- [ 0x01 ] = { .id = UNSET,
- .name = "NTSC_M" },
- [ 0x02 ] = { .id = UNSET,
- .name = "PAL_B" },
- [ 0x03 ] = { .id = UNSET,
- .name = "PAL_I" },
- [ 0x04 ] = { .id = UNSET,
- .name = "PAL_D" },
- [ 0x05 ] = { .id = UNSET,
- .name = "SECAM" },
-
- [ 0x10 ] = { .id = UNSET,
- .fm = 1,
- .name = "TEMIC_4049" },
- [ 0x11 ] = { .id = TUNER_TEMIC_4136FY5,
- .name = "TEMIC_4136" },
- [ 0x12 ] = { .id = UNSET,
- .name = "TEMIC_4146" },
-
- [ 0x20 ] = { .id = TUNER_PHILIPS_FQ1216ME,
- .fm = 1,
- .name = "PHILIPS_FQ1216_MK3" },
- [ 0x21 ] = { .id = UNSET, .fm = 1,
- .name = "PHILIPS_FQ1236_MK3" },
- [ 0x22 ] = { .id = UNSET,
- .name = "PHILIPS_FI1236_MK3" },
- [ 0x23 ] = { .id = UNSET,
- .name = "PHILIPS_FI1216_MK3" },
+ [0x01] = { .id = UNSET,
+ .name = "NTSC_M" },
+ [0x02] = { .id = UNSET,
+ .name = "PAL_B" },
+ [0x03] = { .id = UNSET,
+ .name = "PAL_I" },
+ [0x04] = { .id = UNSET,
+ .name = "PAL_D" },
+ [0x05] = { .id = UNSET,
+ .name = "SECAM" },
+
+ [0x10] = { .id = UNSET,
+ .fm = 1,
+ .name = "TEMIC_4049" },
+ [0x11] = { .id = TUNER_TEMIC_4136FY5,
+ .name = "TEMIC_4136" },
+ [0x12] = { .id = UNSET,
+ .name = "TEMIC_4146" },
+
+ [0x20] = { .id = TUNER_PHILIPS_FQ1216ME,
+ .fm = 1,
+ .name = "PHILIPS_FQ1216_MK3" },
+ [0x21] = { .id = UNSET, .fm = 1,
+ .name = "PHILIPS_FQ1236_MK3" },
+ [0x22] = { .id = UNSET,
+ .name = "PHILIPS_FI1236_MK3" },
+ [0x23] = { .id = UNSET,
+ .name = "PHILIPS_FI1216_MK3" },
};
static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
@@ -2956,16 +2953,17 @@ static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
const char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
? gdi_tuner[eeprom_data[0x0d]].name : NULL;
- info_printk(core, "GDI: tuner=%s\n", name ? name : "unknown");
- if (NULL == name)
+ pr_info("GDI: tuner=%s\n", name ? name : "unknown");
+ if (!name)
return;
core->board.tuner_type = gdi_tuner[eeprom_data[0x0d]].id;
core->board.radio.type = gdi_tuner[eeprom_data[0x0d]].fm ?
CX88_RADIO : 0;
}
-/* ------------------------------------------------------------------- */
-/* some Divco specific stuff */
+/*
+ * some Divco specific stuff
+ */
static int cx88_dvico_xc2028_callback(struct cx88_core *core,
int command, int arg)
{
@@ -2994,9 +2992,9 @@ static int cx88_dvico_xc2028_callback(struct cx88_core *core,
return 0;
}
-
-/* ----------------------------------------------------------------------- */
-/* some Geniatech specific stuff */
+/*
+ * some Geniatech specific stuff
+ */
static int cx88_xc3028_geniatech_tuner_callback(struct cx88_core *core,
int command, int mode)
@@ -3059,8 +3057,9 @@ static int cx88_xc4000_winfast2000h_plus_callback(struct cx88_core *core,
return -EINVAL;
}
-/* ------------------------------------------------------------------- */
-/* some Divco specific stuff */
+/*
+ * some Divco specific stuff
+ */
static int cx88_pv_8000gt_callback(struct cx88_core *core,
int command, int arg)
{
@@ -3079,8 +3078,9 @@ static int cx88_pv_8000gt_callback(struct cx88_core *core,
return 0;
}
-/* ----------------------------------------------------------------------- */
-/* some DViCO specific stuff */
+/*
+ * some DViCO specific stuff
+ */
static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
{
@@ -3107,8 +3107,8 @@ static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
msg.len = (i != 12 ? 5 : 2);
err = i2c_transfer(&core->i2c_adap, &msg, 1);
if (err != 1) {
- warn_printk(core, "dvico_fusionhdtv_hybrid_init buf %d "
- "failed (err = %d)!\n", i, err);
+ pr_warn("dvico_fusionhdtv_hybrid_init buf %d failed (err = %d)!\n",
+ i, err);
return;
}
}
@@ -3176,11 +3176,11 @@ static int cx88_xc4000_tuner_callback(struct cx88_core *core,
return -EINVAL;
}
-/* ----------------------------------------------------------------------- */
-/* Tuner callback function. Currently only needed for the Pinnacle *
- * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both *
- * analog tuner attach (tuner-core.c) and dvb tuner attach (cx88-dvb.c) */
-
+/*
+ * Tuner callback function. Currently only needed for the Pinnacle
+ * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both
+ * analog tuner attach (tuner-core.c) and dvb tuner attach (cx88-dvb.c)
+ */
static int cx88_xc5000_tuner_callback(struct cx88_core *core,
int command, int arg)
{
@@ -3188,38 +3188,38 @@ static int cx88_xc5000_tuner_callback(struct cx88_core *core,
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
if (command == 0) { /* This is the reset command from xc5000 */
- /* djh - According to the engineer at PCTV Systems,
- the xc5000 reset pin is supposed to be on GPIO12.
- However, despite three nights of effort, pulling
- that GPIO low didn't reset the xc5000. While
- pulling MO_SRST_IO low does reset the xc5000, this
- also resets in the s5h1409 being reset as well.
- This causes tuning to always fail since the internal
- state of the s5h1409 does not match the driver's
- state. Given that the only two conditions in which
- the driver performs a reset is during firmware load
- and powering down the chip, I am taking out the
- reset. We know that the chip is being reset
- when the cx88 comes online, and not being able to
- do power management for this board is worse than
- not having any tuning at all. */
+ /*
+ * djh - According to the engineer at PCTV Systems,
+ * the xc5000 reset pin is supposed to be on GPIO12.
+ * However, despite three nights of effort, pulling
+ * that GPIO low didn't reset the xc5000. While
+ * pulling MO_SRST_IO low does reset the xc5000, this
+ * also resets in the s5h1409 being reset as well.
+ * This causes tuning to always fail since the internal
+ * state of the s5h1409 does not match the driver's
+ * state. Given that the only two conditions in which
+ * the driver performs a reset is during firmware load
+ * and powering down the chip, I am taking out the
+ * reset. We know that the chip is being reset
+ * when the cx88 comes online, and not being able to
+ * do power management for this board is worse than
+ * not having any tuning at all.
+ */
return 0;
- } else {
- dprintk(1, "xc5000: unknown tuner callback command.\n");
- return -EINVAL;
}
- break;
+
+ dprintk(1, "xc5000: unknown tuner callback command.\n");
+ return -EINVAL;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
if (command == 0) { /* This is the reset command from xc5000 */
cx_clear(MO_GP0_IO, 0x00000010);
- msleep(10);
+ usleep_range(10000, 20000);
cx_set(MO_GP0_IO, 0x00000010);
return 0;
- } else {
- dprintk(1, "xc5000: unknown tuner callback command.\n");
- return -EINVAL;
}
- break;
+
+ dprintk(1, "xc5000: unknown tuner callback command.\n");
+ return -EINVAL;
}
return 0; /* Should never be here */
}
@@ -3230,14 +3230,14 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
struct cx88_core *core;
if (!i2c_algo) {
- printk(KERN_ERR "cx88: Error - i2c private data undefined.\n");
+ pr_err("Error - i2c private data undefined.\n");
return -EINVAL;
}
core = i2c_algo->data;
if (!core) {
- printk(KERN_ERR "cx88: Error - device struct undefined.\n");
+ pr_err("Error - device struct undefined.\n");
return -EINVAL;
}
@@ -3245,18 +3245,18 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
return -EINVAL;
switch (core->board.tuner_type) {
- case TUNER_XC2028:
- dprintk(1, "Calling XC2028/3028 callback\n");
- return cx88_xc2028_tuner_callback(core, command, arg);
- case TUNER_XC4000:
- dprintk(1, "Calling XC4000 callback\n");
- return cx88_xc4000_tuner_callback(core, command, arg);
- case TUNER_XC5000:
- dprintk(1, "Calling XC5000 callback\n");
- return cx88_xc5000_tuner_callback(core, command, arg);
+ case TUNER_XC2028:
+ dprintk(1, "Calling XC2028/3028 callback\n");
+ return cx88_xc2028_tuner_callback(core, command, arg);
+ case TUNER_XC4000:
+ dprintk(1, "Calling XC4000 callback\n");
+ return cx88_xc4000_tuner_callback(core, command, arg);
+ case TUNER_XC5000:
+ dprintk(1, "Calling XC5000 callback\n");
+ return cx88_xc5000_tuner_callback(core, command, arg);
}
- err_printk(core, "Error: Calling callback for tuner %d\n",
- core->board.tuner_type);
+ pr_err("Error: Calling callback for tuner %d\n",
+ core->board.tuner_type);
return -EINVAL;
}
EXPORT_SYMBOL(cx88_tuner_callback);
@@ -3267,28 +3267,20 @@ static void cx88_card_list(struct cx88_core *core, struct pci_dev *pci)
{
int i;
- if (0 == pci->subsystem_vendor &&
- 0 == pci->subsystem_device) {
- printk(KERN_ERR
- "%s: Your board has no valid PCI Subsystem ID and thus can't\n"
- "%s: be autodetected. Please pass card=<n> insmod option to\n"
- "%s: workaround that. Redirect complaints to the vendor of\n"
- "%s: the TV card. Best regards,\n"
- "%s: -- tux\n",
- core->name,core->name,core->name,core->name,core->name);
+ if (!pci->subsystem_vendor && !pci->subsystem_device) {
+ pr_err("Your board has no valid PCI Subsystem ID and thus can't\n");
+ pr_err("be autodetected. Please pass card=<n> insmod option to\n");
+ pr_err("workaround that. Redirect complaints to the vendor of\n");
+ pr_err("the TV card\n");
} else {
- printk(KERN_ERR
- "%s: Your board isn't known (yet) to the driver. You can\n"
- "%s: try to pick one of the existing card configs via\n"
- "%s: card=<n> insmod option. Updating to the latest\n"
- "%s: version might help as well.\n",
- core->name,core->name,core->name,core->name);
+ pr_err("Your board isn't known (yet) to the driver. You can\n");
+ pr_err("try to pick one of the existing card configs via\n");
+ pr_err("card=<n> insmod option. Updating to the latest\n");
+ pr_err("version might help as well.\n");
}
- err_printk(core, "Here is a list of valid choices for the card=<n> "
- "insmod option:\n");
+ pr_err("Here is a list of valid choices for the card=<n> insmod option:\n");
for (i = 0; i < ARRAY_SIZE(cx88_boards); i++)
- printk(KERN_ERR "%s: card=%d -> %s\n",
- core->name, i, cx88_boards[i].name);
+ pr_err(" card=%d -> %s\n", i, cx88_boards[i].name);
}
static void cx88_card_setup_pre_i2c(struct cx88_core *core)
@@ -3296,7 +3288,9 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/*
- * Bring the 702 demod up before i2c scanning/attach or devices are hidden
+ * Bring the 702 demod up before i2c scanning/attach or
+ * devices are hidden.
+ *
* We leave here with the 702 on the bus
*
* "reset the IR receiver on GPIO[3]"
@@ -3317,7 +3311,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
cx_write(MO_GP2_IO, 0xef5);
mdelay(50);
cx_write(MO_GP2_IO, 0xcf7);
- msleep(10);
+ usleep_range(10000, 20000);
break;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
@@ -3353,7 +3347,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
case CX88_BOARD_TWINHAN_VP1027_DVBS:
cx_write(MO_GP0_IO, 0x00003230);
cx_write(MO_GP0_IO, 0x00003210);
- msleep(1);
+ usleep_range(10000, 20000);
cx_write(MO_GP0_IO, 0x00001230);
break;
}
@@ -3384,11 +3378,13 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
ctl->demod = XC3028_FE_OREN538;
break;
case CX88_BOARD_GENIATECH_X8000_MT:
- /* FIXME: For this board, the xc3028 never recovers after being
- powered down (the reset GPIO probably is not set properly).
- We don't have access to the hardware so we cannot determine
- which GPIO is used for xc3028, so just disable power xc3028
- power management for now */
+ /*
+ * FIXME: For this board, the xc3028 never recovers after being
+ * powered down (the reset GPIO probably is not set properly).
+ * We don't have access to the hardware so we cannot determine
+ * which GPIO is used for xc3028, so just disable power xc3028
+ * power management for now
+ */
ctl->disable_power_mgmt = 1;
break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
@@ -3418,7 +3414,7 @@ static void cx88_card_setup(struct cx88_core *core)
memset(&tun_setup, 0, sizeof(tun_setup));
- if (0 == core->i2c_rc) {
+ if (!core->i2c_rc) {
core->i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&core->i2c_client, eeprom, sizeof(eeprom));
}
@@ -3426,17 +3422,17 @@ static void cx88_card_setup(struct cx88_core *core)
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE:
case CX88_BOARD_HAUPPAUGE_ROSLYN:
- if (0 == core->i2c_rc)
- hauppauge_eeprom(core, eeprom+8);
+ if (!core->i2c_rc)
+ hauppauge_eeprom(core, eeprom + 8);
break;
case CX88_BOARD_GDI:
- if (0 == core->i2c_rc)
+ if (!core->i2c_rc)
gdi_eeprom(core, eeprom);
break;
case CX88_BOARD_LEADTEK_PVR2000:
case CX88_BOARD_WINFAST_DV2000:
case CX88_BOARD_WINFAST2000XP_EXPERT:
- if (0 == core->i2c_rc)
+ if (!core->i2c_rc)
leadtek_eeprom(core, eeprom);
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
@@ -3449,7 +3445,7 @@ static void cx88_card_setup(struct cx88_core *core)
case CX88_BOARD_HAUPPAUGE_HVR4000:
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
case CX88_BOARD_HAUPPAUGE_IRONLY:
- if (0 == core->i2c_rc)
+ if (!core->i2c_rc)
hauppauge_eeprom(core, eeprom);
break;
case CX88_BOARD_KWORLD_DVBS_100:
@@ -3460,7 +3456,7 @@ static void cx88_card_setup(struct cx88_core *core)
/* GPIO0:0 is hooked to demod reset */
/* GPIO0:4 is hooked to xc3028 reset */
cx_write(MO_GP0_IO, 0x00111100);
- msleep(1);
+ usleep_range(10000, 20000);
cx_write(MO_GP0_IO, 0x00111111);
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
@@ -3476,9 +3472,9 @@ static void cx88_card_setup(struct cx88_core *core)
/* GPIO0:0 is hooked to mt352 reset pin */
cx_set(MO_GP0_IO, 0x00000101);
cx_clear(MO_GP0_IO, 0x00000001);
- msleep(1);
+ usleep_range(10000, 20000);
cx_set(MO_GP0_IO, 0x00000101);
- if (0 == core->i2c_rc &&
+ if (!core->i2c_rc &&
core->boardnr == CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID)
dvico_fusionhdtv_hybrid_init(core);
break;
@@ -3487,7 +3483,7 @@ static void cx88_card_setup(struct cx88_core *core)
cx_set(MO_GP0_IO, 0x00000707);
cx_set(MO_GP2_IO, 0x00000101);
cx_clear(MO_GP2_IO, 0x00000001);
- msleep(1);
+ usleep_range(10000, 20000);
cx_clear(MO_GP0_IO, 0x00000007);
cx_set(MO_GP2_IO, 0x00000101);
break;
@@ -3495,23 +3491,23 @@ static void cx88_card_setup(struct cx88_core *core)
cx_write(MO_GP0_IO, 0x00080808);
break;
case CX88_BOARD_ATI_HDTVWONDER:
- if (0 == core->i2c_rc) {
+ if (!core->i2c_rc) {
/* enable tuner */
int i;
- static const u8 buffer [][2] = {
- {0x10,0x12},
- {0x13,0x04},
- {0x16,0x00},
- {0x14,0x04},
- {0x17,0x00}
+ static const u8 buffer[][2] = {
+ {0x10, 0x12},
+ {0x13, 0x04},
+ {0x16, 0x00},
+ {0x14, 0x04},
+ {0x17, 0x00}
};
core->i2c_client.addr = 0x0a;
for (i = 0; i < ARRAY_SIZE(buffer); i++)
- if (2 != i2c_master_send(&core->i2c_client,
- buffer[i],2))
- warn_printk(core, "Unable to enable "
- "tuner(%i).\n", i);
+ if (i2c_master_send(&core->i2c_client,
+ buffer[i], 2) != 2)
+ pr_warn("Unable to enable tuner(%i).\n",
+ i);
}
break;
case CX88_BOARD_MSI_TVANYWHERE_MASTER:
@@ -3545,7 +3541,7 @@ static void cx88_card_setup(struct cx88_core *core)
cx_write(MO_GP0_IO, 0x8000);
msleep(100);
cx_write(MO_SRST_IO, 0);
- msleep(10);
+ usleep_range(10000, 20000);
cx_write(MO_GP0_IO, 0x8080);
msleep(100);
cx_write(MO_SRST_IO, 1);
@@ -3553,9 +3549,8 @@ static void cx88_card_setup(struct cx88_core *core)
break;
} /*end switch() */
-
/* Setup tuners */
- if ((core->board.radio_type != UNSET)) {
+ if (core->board.radio_type != UNSET) {
tun_setup.mode_mask = T_RADIO;
tun_setup.type = core->board.radio_type;
tun_setup.addr = core->board.radio_addr;
@@ -3610,35 +3605,30 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
/* check pci quirks */
if (pci_pci_problems & PCIPCI_TRITON) {
- printk(KERN_INFO "%s: quirk: PCIPCI_TRITON -- set TBFX\n",
- name);
+ pr_info("quirk: PCIPCI_TRITON -- set TBFX\n");
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_NATOMA) {
- printk(KERN_INFO "%s: quirk: PCIPCI_NATOMA -- set TBFX\n",
- name);
+ pr_info("quirk: PCIPCI_NATOMA -- set TBFX\n");
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_VIAETBF) {
- printk(KERN_INFO "%s: quirk: PCIPCI_VIAETBF -- set TBFX\n",
- name);
+ pr_info("quirk: PCIPCI_VIAETBF -- set TBFX\n");
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_VSFX) {
- printk(KERN_INFO "%s: quirk: PCIPCI_VSFX -- set VSFX\n",
- name);
+ pr_info("quirk: PCIPCI_VSFX -- set VSFX\n");
ctrl |= CX88X_EN_VSFX;
}
#ifdef PCIPCI_ALIMAGIK
if (pci_pci_problems & PCIPCI_ALIMAGIK) {
- printk(KERN_INFO "%s: quirk: PCIPCI_ALIMAGIK -- latency fixup\n",
- name);
+ pr_info("quirk: PCIPCI_ALIMAGIK -- latency fixup\n");
lat = 0x0A;
}
#endif
/* check insmod options */
- if (UNSET != latency)
+ if (latency != UNSET)
lat = latency;
/* apply stuff */
@@ -3647,9 +3637,8 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
value |= ctrl;
pci_write_config_byte(pci, CX88X_DEVCTRL, value);
}
- if (UNSET != lat) {
- printk(KERN_INFO "%s: setting pci latency timer to %d\n",
- name, latency);
+ if (lat != UNSET) {
+ pr_info("setting pci latency timer to %d\n", latency);
pci_write_config_byte(pci, PCI_LATENCY_TIMER, latency);
}
return 0;
@@ -3657,27 +3646,28 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
int cx88_get_resources(const struct cx88_core *core, struct pci_dev *pci)
{
- if (request_mem_region(pci_resource_start(pci,0),
- pci_resource_len(pci,0),
+ if (request_mem_region(pci_resource_start(pci, 0),
+ pci_resource_len(pci, 0),
core->name))
return 0;
- printk(KERN_ERR
- "%s/%d: Can't get MMIO memory @ 0x%llx, subsystem: %04x:%04x\n",
- core->name, PCI_FUNC(pci->devfn),
+ pr_err("func %d: Can't get MMIO memory @ 0x%llx, subsystem: %04x:%04x\n",
+ PCI_FUNC(pci->devfn),
(unsigned long long)pci_resource_start(pci, 0),
pci->subsystem_vendor, pci->subsystem_device);
return -EBUSY;
}
-/* Allocate and initialize the cx88 core struct. One should hold the
- * devlist mutex before calling this. */
+/*
+ * Allocate and initialize the cx88 core struct. One should hold the
+ * devlist mutex before calling this.
+ */
struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
{
struct cx88_core *core;
int i;
core = kzalloc(sizeof(*core), GFP_KERNEL);
- if (core == NULL)
+ if (!core)
return NULL;
atomic_inc(&core->refcount);
@@ -3715,7 +3705,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
return NULL;
}
- if (0 != cx88_get_resources(core, pci)) {
+ if (cx88_get_resources(core, pci) != 0) {
v4l2_ctrl_handler_free(&core->video_hdl);
v4l2_ctrl_handler_free(&core->audio_hdl);
v4l2_device_unregister(&core->v4l2_dev);
@@ -3729,9 +3719,9 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
pci_resource_len(pci, 0));
core->bmmio = (u8 __iomem *)core->lmmio;
- if (core->lmmio == NULL) {
+ if (!core->lmmio) {
release_mem_region(pci_resource_start(pci, 0),
- pci_resource_len(pci, 0));
+ pci_resource_len(pci, 0));
v4l2_ctrl_handler_free(&core->video_hdl);
v4l2_ctrl_handler_free(&core->audio_hdl);
v4l2_device_unregister(&core->v4l2_dev);
@@ -3743,11 +3733,11 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
core->boardnr = UNSET;
if (card[core->nr] < ARRAY_SIZE(cx88_boards))
core->boardnr = card[core->nr];
- for (i = 0; UNSET == core->boardnr && i < ARRAY_SIZE(cx88_subids); i++)
+ for (i = 0; core->boardnr == UNSET && i < ARRAY_SIZE(cx88_subids); i++)
if (pci->subsystem_vendor == cx88_subids[i].subvendor &&
pci->subsystem_device == cx88_subids[i].subdevice)
core->boardnr = cx88_subids[i].card;
- if (UNSET == core->boardnr) {
+ if (core->boardnr == UNSET) {
core->boardnr = CX88_BOARD_UNKNOWN;
cx88_card_list(core, pci);
}
@@ -3757,7 +3747,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
core->board.num_frontends = 1;
- info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
+ pr_info("subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
pci->subsystem_vendor, pci->subsystem_device, core->board.name,
core->boardnr, card[core->nr] == core->boardnr ?
"insmod option" : "autodetected",
@@ -3777,10 +3767,12 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
cx88_i2c_init(core, pci);
/* load tuner module, if needed */
- if (UNSET != core->board.tuner_type) {
- /* Ignore 0x6b and 0x6f on cx88 boards.
+ if (core->board.tuner_type != UNSET) {
+ /*
+ * Ignore 0x6b and 0x6f on cx88 boards.
* FusionHDTV5 RT Gold has an ir receiver at 0x6b
- * and an RTC at 0x6f which can get corrupted if probed. */
+ * and an RTC at 0x6f which can get corrupted if probed.
+ */
static const unsigned short tv_addrs[] = {
0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
@@ -3789,24 +3781,27 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
};
int has_demod = (core->board.tda9887_conf & TDA9887_PRESENT);
- /* I don't trust the radio_type as is stored in the card
- definitions, so we just probe for it.
- The radio_type is sometimes missing, or set to UNSET but
- later code configures a tea5767.
+ /*
+ * I don't trust the radio_type as is stored in the card
+ * definitions, so we just probe for it.
+ * The radio_type is sometimes missing, or set to UNSET but
+ * later code configures a tea5767.
*/
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
+ "tuner", 0,
+ v4l2_i2c_tuner_addrs(ADDRS_RADIO));
if (has_demod)
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner",
+ &core->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (core->board.tuner_addr == ADDR_UNSET) {
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner",
+ &core->i2c_adap, "tuner",
0, has_demod ? tv_addrs + 4 : tv_addrs);
} else {
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", core->board.tuner_addr, NULL);
+ "tuner", core->board.tuner_addr,
+ NULL);
}
}
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 46fe8c1eb9d4..973a9cd4c635 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -1,5 +1,4 @@
/*
- *
* device driver for Conexant 2388x based TV cards
* driver core
*
@@ -19,12 +18,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -38,7 +35,6 @@
#include <linux/videodev2.h>
#include <linux/mutex.h>
-#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -53,17 +49,22 @@ module_param_named(core_debug, cx88_core_debug, int, 0644);
MODULE_PARM_DESC(core_debug, "enable debug messages [core]");
static unsigned int nicam;
-module_param(nicam,int,0644);
-MODULE_PARM_DESC(nicam,"tv audio is nicam");
+module_param(nicam, int, 0644);
+MODULE_PARM_DESC(nicam, "tv audio is nicam");
static unsigned int nocomb;
-module_param(nocomb,int,0644);
-MODULE_PARM_DESC(nocomb,"disable comb filter");
+module_param(nocomb, int, 0644);
+MODULE_PARM_DESC(nocomb, "disable comb filter");
+
+#define dprintk0(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: core:" fmt), \
+ __func__, ##arg) \
-#define dprintk(level,fmt, arg...) do { \
- if (cx88_core_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, core->name , ## arg); \
- } while(0)
+#define dprintk(level, fmt, arg...) do { \
+ if (cx88_core_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: core:" fmt), \
+ __func__, ##arg); \
+} while (0)
static unsigned int cx88_devcount;
static LIST_HEAD(cx88_devlist);
@@ -71,15 +72,17 @@ static DEFINE_MUTEX(devlist);
#define NO_SYNC_LINE (-1U)
-/* @lpi: lines per IRQ, or 0 to not generate irqs. Note: IRQ to be
- generated _after_ lpi lines are transferred. */
-static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
- unsigned int offset, u32 sync_line,
- unsigned int bpl, unsigned int padding,
- unsigned int lines, unsigned int lpi, bool jump)
+/*
+ * @lpi: lines per IRQ, or 0 to not generate irqs. Note: IRQ to be
+ * generated _after_ lpi lines are transferred.
+ */
+static __le32 *cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
+ unsigned int offset, u32 sync_line,
+ unsigned int bpl, unsigned int padding,
+ unsigned int lines, unsigned int lpi, bool jump)
{
struct scatterlist *sg;
- unsigned int line,todo,sol;
+ unsigned int line, todo, sol;
if (jump) {
(*rp++) = cpu_to_le32(RISC_JUMP);
@@ -97,33 +100,34 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
offset -= sg_dma_len(sg);
sg = sg_next(sg);
}
- if (lpi && line>0 && !(line % lpi))
+ if (lpi && line > 0 && !(line % lpi))
sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
else
sol = RISC_SOL;
- if (bpl <= sg_dma_len(sg)-offset) {
+ if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
- *(rp++)=cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
- *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
- offset+=bpl;
+ *(rp++) = cpu_to_le32(RISC_WRITE | sol |
+ RISC_EOL | bpl);
+ *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+ offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
- *(rp++)=cpu_to_le32(RISC_WRITE|sol|
- (sg_dma_len(sg)-offset));
- *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
- todo -= (sg_dma_len(sg)-offset);
+ *(rp++) = cpu_to_le32(RISC_WRITE | sol |
+ (sg_dma_len(sg) - offset));
+ *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+ todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
- *(rp++)=cpu_to_le32(RISC_WRITE|
- sg_dma_len(sg));
- *(rp++)=cpu_to_le32(sg_dma_address(sg));
+ *(rp++) = cpu_to_le32(RISC_WRITE |
+ sg_dma_len(sg));
+ *(rp++) = cpu_to_le32(sg_dma_address(sg));
todo -= sg_dma_len(sg);
sg = sg_next(sg);
}
- *(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
- *(rp++)=cpu_to_le32(sg_dma_address(sg));
+ *(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
+ *(rp++) = cpu_to_le32(sg_dma_address(sg));
offset += todo;
}
offset += padding;
@@ -137,41 +141,46 @@ int cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
unsigned int top_offset, unsigned int bottom_offset,
unsigned int bpl, unsigned int padding, unsigned int lines)
{
- u32 instructions,fields;
+ u32 instructions, fields;
__le32 *rp;
fields = 0;
- if (UNSET != top_offset)
+ if (top_offset != UNSET)
fields++;
- if (UNSET != bottom_offset)
+ if (bottom_offset != UNSET)
fields++;
- /* estimate risc mem: worst case is one write per page border +
- one write per scan line + syncs + jump (all 2 dwords). Padding
- can cause next bpl to start close to a page border. First DMA
- region may be smaller than PAGE_SIZE */
- instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
+ /*
+ * estimate risc mem: worst case is one write per page border +
+ * one write per scan line + syncs + jump (all 2 dwords). Padding
+ * can cause next bpl to start close to a page border. First DMA
+ * region may be smaller than PAGE_SIZE
+ */
+ instructions = fields * (1 + ((bpl + padding) * lines) /
+ PAGE_SIZE + lines);
instructions += 4;
risc->size = instructions * 8;
risc->dma = 0;
risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
- if (NULL == risc->cpu)
+ if (!risc->cpu)
return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
- if (UNSET != top_offset)
+ if (top_offset != UNSET)
rp = cx88_risc_field(rp, sglist, top_offset, 0,
bpl, padding, lines, 0, true);
- if (UNSET != bottom_offset)
+ if (bottom_offset != UNSET)
rp = cx88_risc_field(rp, sglist, bottom_offset, 0x200,
- bpl, padding, lines, 0, top_offset == UNSET);
+ bpl, padding, lines, 0,
+ top_offset == UNSET);
/* save pointer to jmp instruction address */
risc->jmp = rp;
- BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+ WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
+EXPORT_SYMBOL(cx88_risc_buffer);
int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist, unsigned int bpl,
@@ -180,32 +189,38 @@ int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
u32 instructions;
__le32 *rp;
- /* estimate risc mem: worst case is one write per page border +
- one write per scan line + syncs + jump (all 2 dwords). Here
- there is no padding and no sync. First DMA region may be smaller
- than PAGE_SIZE */
+ /*
+ * estimate risc mem: worst case is one write per page border +
+ * one write per scan line + syncs + jump (all 2 dwords). Here
+ * there is no padding and no sync. First DMA region may be smaller
+ * than PAGE_SIZE
+ */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 3;
risc->size = instructions * 8;
risc->dma = 0;
risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
- if (NULL == risc->cpu)
+ if (!risc->cpu)
return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
- rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi, !lpi);
+ rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
+ lines, lpi, !lpi);
/* save pointer to jmp instruction address */
risc->jmp = rp;
- BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+ WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
+EXPORT_SYMBOL(cx88_risc_databuffer);
-/* ------------------------------------------------------------------ */
-/* our SRAM memory layout */
+/*
+ * our SRAM memory layout
+ */
-/* we are going to put all thr risc programs into host memory, so we
+/*
+ * we are going to put all thr risc programs into host memory, so we
* can use the whole SDRAM for the DMA fifos. To simplify things, we
* use a static memory layout. That surely will waste memory in case
* we don't use all DMA channels at the same time (which will be the
@@ -329,12 +344,13 @@ const struct sram_channel cx88_sram_channels[] = {
.cnt2_reg = MO_DMA27_CNT2,
},
};
+EXPORT_SYMBOL(cx88_sram_channels);
int cx88_sram_channel_setup(struct cx88_core *core,
const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
- unsigned int i,lines;
+ unsigned int i, lines;
u32 cdt;
bpl = (bpl + 7) & ~7; /* alignment */
@@ -342,16 +358,16 @@ int cx88_sram_channel_setup(struct cx88_core *core,
lines = ch->fifo_size / bpl;
if (lines > 6)
lines = 6;
- BUG_ON(lines < 2);
+ WARN_ON(lines < 2);
/* write CDT */
for (i = 0; i < lines; i++)
- cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
+ cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
/* write CMDS */
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, cdt);
- cx_write(ch->cmds_start + 8, (lines*16) >> 3);
+ cx_write(ch->cmds_start + 8, (lines * 16) >> 3);
cx_write(ch->cmds_start + 12, ch->ctrl_start);
cx_write(ch->cmds_start + 16, 64 >> 2);
for (i = 20; i < 64; i += 4)
@@ -360,12 +376,13 @@ int cx88_sram_channel_setup(struct cx88_core *core,
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
- cx_write(ch->cnt1_reg, (bpl >> 3) -1);
- cx_write(ch->cnt2_reg, (lines*16) >> 3);
+ cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
+ cx_write(ch->cnt2_reg, (lines * 16) >> 3);
- dprintk(2,"sram setup %s: bpl=%d lines=%d\n", ch->name, bpl, lines);
+ dprintk(2, "sram setup %s: bpl=%d lines=%d\n", ch->name, bpl, lines);
return 0;
}
+EXPORT_SYMBOL(cx88_sram_channel_setup);
/* ------------------------------------------------------------------ */
/* debug helper code */
@@ -373,23 +390,23 @@ int cx88_sram_channel_setup(struct cx88_core *core,
static int cx88_risc_decode(u32 risc)
{
static const char * const instr[16] = {
- [ RISC_SYNC >> 28 ] = "sync",
- [ RISC_WRITE >> 28 ] = "write",
- [ RISC_WRITEC >> 28 ] = "writec",
- [ RISC_READ >> 28 ] = "read",
- [ RISC_READC >> 28 ] = "readc",
- [ RISC_JUMP >> 28 ] = "jump",
- [ RISC_SKIP >> 28 ] = "skip",
- [ RISC_WRITERM >> 28 ] = "writerm",
- [ RISC_WRITECM >> 28 ] = "writecm",
- [ RISC_WRITECR >> 28 ] = "writecr",
+ [RISC_SYNC >> 28] = "sync",
+ [RISC_WRITE >> 28] = "write",
+ [RISC_WRITEC >> 28] = "writec",
+ [RISC_READ >> 28] = "read",
+ [RISC_READC >> 28] = "readc",
+ [RISC_JUMP >> 28] = "jump",
+ [RISC_SKIP >> 28] = "skip",
+ [RISC_WRITERM >> 28] = "writerm",
+ [RISC_WRITECM >> 28] = "writecm",
+ [RISC_WRITECR >> 28] = "writecr",
};
static int const incr[16] = {
- [ RISC_WRITE >> 28 ] = 2,
- [ RISC_JUMP >> 28 ] = 2,
- [ RISC_WRITERM >> 28 ] = 3,
- [ RISC_WRITECM >> 28 ] = 3,
- [ RISC_WRITECR >> 28 ] = 4,
+ [RISC_WRITE >> 28] = 2,
+ [RISC_JUMP >> 28] = 2,
+ [RISC_WRITERM >> 28] = 3,
+ [RISC_WRITECM >> 28] = 3,
+ [RISC_WRITECR >> 28] = 4,
};
static const char * const bits[] = {
"12", "13", "14", "resync",
@@ -399,16 +416,15 @@ static int cx88_risc_decode(u32 risc)
};
int i;
- printk("0x%08x [ %s", risc,
- instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
- for (i = ARRAY_SIZE(bits)-1; i >= 0; i--)
+ dprintk0("0x%08x [ %s", risc,
+ instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
+ for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
if (risc & (1 << (i + 12)))
- printk(" %s",bits[i]);
- printk(" count=%d ]\n", risc & 0xfff);
+ pr_cont(" %s", bits[i]);
+ pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
-
void cx88_sram_channel_dump(struct cx88_core *core,
const struct sram_channel *ch)
{
@@ -426,46 +442,41 @@ void cx88_sram_channel_dump(struct cx88_core *core,
"line / byte",
};
u32 risc;
- unsigned int i,j,n;
+ unsigned int i, j, n;
- printk("%s: %s - dma channel status dump\n",
- core->name,ch->name);
+ dprintk0("%s - dma channel status dump\n", ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
- printk("%s: cmds: %-12s: 0x%08x\n",
- core->name,name[i],
- cx_read(ch->cmds_start + 4*i));
+ dprintk0(" cmds: %-12s: 0x%08x\n",
+ name[i], cx_read(ch->cmds_start + 4 * i));
for (n = 1, i = 0; i < 4; i++) {
- risc = cx_read(ch->cmds_start + 4 * (i+11));
- printk("%s: risc%d: ", core->name, i);
+ risc = cx_read(ch->cmds_start + 4 * (i + 11));
+ pr_cont(" risc%d: ", i);
if (--n)
- printk("0x%08x [ arg #%d ]\n", risc, n);
+ pr_cont("0x%08x [ arg #%d ]\n", risc, n);
else
n = cx88_risc_decode(risc);
}
for (i = 0; i < 16; i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
- printk("%s: iq %x: ", core->name, i);
+ dprintk0(" iq %x: ", i);
n = cx88_risc_decode(risc);
for (j = 1; j < n; j++) {
- risc = cx_read(ch->ctrl_start + 4 * (i+j));
- printk("%s: iq %x: 0x%08x [ arg #%d ]\n",
- core->name, i+j, risc, j);
+ risc = cx_read(ch->ctrl_start + 4 * (i + j));
+ pr_cont(" iq %x: 0x%08x [ arg #%d ]\n",
+ i + j, risc, j);
}
}
- printk("%s: fifo: 0x%08x -> 0x%x\n",
- core->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
- printk("%s: ctrl: 0x%08x -> 0x%x\n",
- core->name, ch->ctrl_start, ch->ctrl_start+6*16);
- printk("%s: ptr1_reg: 0x%08x\n",
- core->name,cx_read(ch->ptr1_reg));
- printk("%s: ptr2_reg: 0x%08x\n",
- core->name,cx_read(ch->ptr2_reg));
- printk("%s: cnt1_reg: 0x%08x\n",
- core->name,cx_read(ch->cnt1_reg));
- printk("%s: cnt2_reg: 0x%08x\n",
- core->name,cx_read(ch->cnt2_reg));
+ dprintk0("fifo: 0x%08x -> 0x%x\n",
+ ch->fifo_start, ch->fifo_start + ch->fifo_size);
+ dprintk0("ctrl: 0x%08x -> 0x%x\n",
+ ch->ctrl_start, ch->ctrl_start + 6 * 16);
+ dprintk0(" ptr1_reg: 0x%08x\n", cx_read(ch->ptr1_reg));
+ dprintk0(" ptr2_reg: 0x%08x\n", cx_read(ch->ptr2_reg));
+ dprintk0(" cnt1_reg: 0x%08x\n", cx_read(ch->cnt1_reg));
+ dprintk0(" cnt2_reg: 0x%08x\n", cx_read(ch->cnt2_reg));
}
+EXPORT_SYMBOL(cx88_sram_channel_dump);
static const char *cx88_pci_irqs[32] = {
"vid", "aud", "ts", "vip", "hst", "5", "6", "tm1",
@@ -474,25 +485,26 @@ static const char *cx88_pci_irqs[32] = {
"i2c", "i2c_rack", "ir_smp", "gpio0", "gpio1"
};
-void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
+void cx88_print_irqbits(const char *tag, const char *strings[],
int len, u32 bits, u32 mask)
{
unsigned int i;
- printk(KERN_DEBUG "%s: %s [0x%x]", name, tag, bits);
+ dprintk0("%s [0x%x]", tag, bits);
for (i = 0; i < len; i++) {
if (!(bits & (1 << i)))
continue;
if (strings[i])
- printk(" %s", strings[i]);
+ pr_cont(" %s", strings[i]);
else
- printk(" %d", i);
+ pr_cont(" %d", i);
if (!(mask & (1 << i)))
continue;
- printk("*");
+ pr_cont("*");
}
- printk("\n");
+ pr_cont("\n");
}
+EXPORT_SYMBOL(cx88_print_irqbits);
/* ------------------------------------------------------------------ */
@@ -505,11 +517,12 @@ int cx88_core_irq(struct cx88_core *core, u32 status)
handled++;
}
if (!handled)
- cx88_print_irqbits(core->name, "irq pci",
+ cx88_print_irqbits("irq pci",
cx88_pci_irqs, ARRAY_SIZE(cx88_pci_irqs),
status, core->pci_irqmask);
return handled;
}
+EXPORT_SYMBOL(cx88_core_irq);
void cx88_wakeup(struct cx88_core *core,
struct cx88_dmaqueue *q, u32 count)
@@ -524,6 +537,7 @@ void cx88_wakeup(struct cx88_core *core,
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
+EXPORT_SYMBOL(cx88_wakeup);
void cx88_shutdown(struct cx88_core *core)
{
@@ -548,10 +562,11 @@ void cx88_shutdown(struct cx88_core *core)
/* stop capturing */
cx_write(VID_CAPTURE_CONTROL, 0);
}
+EXPORT_SYMBOL(cx88_shutdown);
int cx88_reset(struct cx88_core *core)
{
- dprintk(1,"%s\n",__func__);
+ dprintk(1, "");
cx88_shutdown(core);
/* clear irq status */
@@ -563,13 +578,15 @@ int cx88_reset(struct cx88_core *core)
msleep(100);
/* init sram */
- cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21], 720*4, 0);
+ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21],
+ 720 * 4, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH22], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH23], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH24], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], 128, 0);
- cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 188*4, 0);
+ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28],
+ 188 * 4, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH27], 128, 0);
/* misc init ... */
@@ -597,11 +614,12 @@ int cx88_reset(struct cx88_core *core)
/* Reset on-board parts */
cx_write(MO_SRST_IO, 0);
- msleep(10);
+ usleep_range(10000, 20000);
cx_write(MO_SRST_IO, 1);
return 0;
}
+EXPORT_SYMBOL(cx88_reset);
/* ------------------------------------------------------------------ */
@@ -631,10 +649,11 @@ static inline unsigned int norm_fsc8(v4l2_std_id norm)
if (norm & V4L2_STD_NTSC) // All NTSC/M and variants
return 28636360; // 3.57954545 MHz +/- 10 Hz
- /* SECAM have also different sub carrier for chroma,
- but step_db and step_dr, at cx88_set_tvnorm already handles that.
-
- The same FSC applies to PAL/BGDKIH, PAL/60, NTSC/4.43 and PAL/N
+ /*
+ * SECAM have also different sub carrier for chroma,
+ * but step_db and step_dr, at cx88_set_tvnorm already handles that.
+ *
+ * The same FSC applies to PAL/BGDKIH, PAL/60, NTSC/4.43 and PAL/N
*/
return 35468950; // 4.43361875 MHz +/- 5 Hz
@@ -642,13 +661,12 @@ static inline unsigned int norm_fsc8(v4l2_std_id norm)
static inline unsigned int norm_htotal(v4l2_std_id norm)
{
-
- unsigned int fsc4=norm_fsc8(norm)/2;
+ unsigned int fsc4 = norm_fsc8(norm) / 2;
/* returns 4*FSC / vtotal / frames per seconds */
return (norm & V4L2_STD_625_50) ?
- ((fsc4+312)/625+12)/25 :
- ((fsc4+262)/525*1001+15000)/30000;
+ ((fsc4 + 312) / 625 + 12) / 25 :
+ ((fsc4 + 262) / 525 * 1001 + 15000) / 30000;
}
static inline unsigned int norm_vbipack(v4l2_std_id norm)
@@ -656,14 +674,14 @@ static inline unsigned int norm_vbipack(v4l2_std_id norm)
return (norm & V4L2_STD_625_50) ? 511 : 400;
}
-int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int height,
- enum v4l2_field field)
+int cx88_set_scale(struct cx88_core *core, unsigned int width,
+ unsigned int height, enum v4l2_field field)
{
unsigned int swidth = norm_swidth(core->tvnorm);
unsigned int sheight = norm_maxh(core->tvnorm);
u32 value;
- dprintk(1,"set_scale: %dx%d [%s%s,%s]\n", width, height,
+ dprintk(1, "set_scale: %dx%d [%s%s,%s]\n", width, height,
V4L2_FIELD_HAS_TOP(field) ? "T" : "",
V4L2_FIELD_HAS_BOTTOM(field) ? "B" : "",
v4l2_norm_to_name(core->tvnorm));
@@ -675,30 +693,30 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
value &= 0x3fe;
cx_write(MO_HDELAY_EVEN, value);
cx_write(MO_HDELAY_ODD, value);
- dprintk(1,"set_scale: hdelay 0x%04x (width %d)\n", value,swidth);
+ dprintk(1, "set_scale: hdelay 0x%04x (width %d)\n", value, swidth);
value = (swidth * 4096 / width) - 4096;
cx_write(MO_HSCALE_EVEN, value);
cx_write(MO_HSCALE_ODD, value);
- dprintk(1,"set_scale: hscale 0x%04x\n", value);
+ dprintk(1, "set_scale: hscale 0x%04x\n", value);
cx_write(MO_HACTIVE_EVEN, width);
cx_write(MO_HACTIVE_ODD, width);
- dprintk(1,"set_scale: hactive 0x%04x\n", width);
+ dprintk(1, "set_scale: hactive 0x%04x\n", width);
// recalc V scale Register (delay is constant)
cx_write(MO_VDELAY_EVEN, norm_vdelay(core->tvnorm));
cx_write(MO_VDELAY_ODD, norm_vdelay(core->tvnorm));
- dprintk(1,"set_scale: vdelay 0x%04x\n", norm_vdelay(core->tvnorm));
+ dprintk(1, "set_scale: vdelay 0x%04x\n", norm_vdelay(core->tvnorm));
value = (0x10000 - (sheight * 512 / height - 512)) & 0x1fff;
cx_write(MO_VSCALE_EVEN, value);
cx_write(MO_VSCALE_ODD, value);
- dprintk(1,"set_scale: vscale 0x%04x\n", value);
+ dprintk(1, "set_scale: vscale 0x%04x\n", value);
cx_write(MO_VACTIVE_EVEN, sheight);
cx_write(MO_VACTIVE_ODD, sheight);
- dprintk(1,"set_scale: vactive 0x%04x\n", sheight);
+ dprintk(1, "set_scale: vactive 0x%04x\n", sheight);
// setup filters
value = 0;
@@ -709,7 +727,7 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
}
if (INPUT(core->input).type == CX88_VMUX_SVIDEO)
value |= (1 << 13) | (1 << 5);
- if (V4L2_FIELD_INTERLACED == field)
+ if (field == V4L2_FIELD_INTERLACED)
value |= (1 << 3); // VINT (interlaced vertical scaling)
if (width < 385)
value |= (1 << 0); // 3-tap interpolation
@@ -720,10 +738,11 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
cx_andor(MO_FILTER_EVEN, 0x7ffc7f, value); /* preserve PEAKEN, PSEL */
cx_andor(MO_FILTER_ODD, 0x7ffc7f, value);
- dprintk(1,"set_scale: filter 0x%04x\n", value);
+ dprintk(1, "set_scale: filter 0x%04x\n", value);
return 0;
}
+EXPORT_SYMBOL(cx88_set_scale);
static const u32 xtal = 28636363;
@@ -740,36 +759,36 @@ static int set_pll(struct cx88_core *core, int prescale, u32 ofreq)
prescale = 5;
pll = ofreq * 8 * prescale * (u64)(1 << 20);
- do_div(pll,xtal);
+ do_div(pll, xtal);
reg = (pll & 0x3ffffff) | (pre[prescale] << 26);
if (((reg >> 20) & 0x3f) < 14) {
- printk("%s/0: pll out of range\n",core->name);
+ pr_err("pll out of range\n");
return -1;
}
- dprintk(1,"set_pll: MO_PLL_REG 0x%08x [old=0x%08x,freq=%d]\n",
+ dprintk(1, "set_pll: MO_PLL_REG 0x%08x [old=0x%08x,freq=%d]\n",
reg, cx_read(MO_PLL_REG), ofreq);
cx_write(MO_PLL_REG, reg);
for (i = 0; i < 100; i++) {
reg = cx_read(MO_DEVICE_STATUS);
- if (reg & (1<<2)) {
- dprintk(1,"pll locked [pre=%d,ofreq=%d]\n",
- prescale,ofreq);
+ if (reg & (1 << 2)) {
+ dprintk(1, "pll locked [pre=%d,ofreq=%d]\n",
+ prescale, ofreq);
return 0;
}
- dprintk(1,"pll not locked yet, waiting ...\n");
- msleep(10);
+ dprintk(1, "pll not locked yet, waiting ...\n");
+ usleep_range(10000, 20000);
}
- dprintk(1,"pll NOT locked [pre=%d,ofreq=%d]\n",prescale,ofreq);
+ dprintk(1, "pll NOT locked [pre=%d,ofreq=%d]\n", prescale, ofreq);
return -1;
}
int cx88_start_audio_dma(struct cx88_core *core)
{
/* constant 128 made buzz in analog Nicam-stereo for bigger fifo_size */
- int bpl = cx88_sram_channels[SRAM_CH25].fifo_size/4;
+ int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4;
- int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size/AUD_RDS_LINES;
+ int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size / AUD_RDS_LINES;
/* If downstream RISC is enabled, bail out; ALSA is managing DMA */
if (cx_read(MO_AUD_DMACNTRL) & 0x10)
@@ -806,8 +825,8 @@ static int set_tvaudio(struct cx88_core *core)
{
v4l2_std_id norm = core->tvnorm;
- if (CX88_VMUX_TELEVISION != INPUT(core->input).type &&
- CX88_VMUX_CABLE != INPUT(core->input).type)
+ if (INPUT(core->input).type != CX88_VMUX_TELEVISION &&
+ INPUT(core->input).type != CX88_VMUX_CABLE)
return 0;
if (V4L2_STD_PAL_BG & norm) {
@@ -822,7 +841,8 @@ static int set_tvaudio(struct cx88_core *core)
} else if (V4L2_STD_SECAM_L & norm) {
core->tvaudio = WW_L;
- } else if ((V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H) & norm) {
+ } else if ((V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H) &
+ norm) {
core->tvaudio = WW_BG;
} else if (V4L2_STD_SECAM_DK & norm) {
@@ -836,8 +856,8 @@ static int set_tvaudio(struct cx88_core *core)
core->tvaudio = WW_EIAJ;
} else {
- printk("%s/0: tvaudio support needs work for this tv norm [%s], sorry\n",
- core->name, v4l2_norm_to_name(core->tvnorm));
+ pr_info("tvaudio support needs work for this tv norm [%s], sorry\n",
+ v4l2_norm_to_name(core->tvnorm));
core->tvaudio = WW_NONE;
return 0;
}
@@ -847,23 +867,21 @@ static int set_tvaudio(struct cx88_core *core)
/* cx88_set_stereo(dev,V4L2_TUNER_MODE_STEREO); */
/*
- This should be needed only on cx88-alsa. It seems that some cx88 chips have
- bugs and does require DMA enabled for it to work.
+ * This should be needed only on cx88-alsa. It seems that some cx88 chips have
+ * bugs and does require DMA enabled for it to work.
*/
cx88_start_audio_dma(core);
return 0;
}
-
-
int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
{
u32 fsc8;
u32 adc_clock;
u32 vdec_clock;
- u32 step_db,step_dr;
+ u32 step_db, step_dr;
u64 tmp64;
- u32 bdelay,agcdelay,htotal;
+ u32 bdelay, agcdelay, htotal;
u32 cxiformat, cxoformat;
if (norm == core->tvnorm)
@@ -912,62 +930,67 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
cxoformat = 0x181f0008;
}
- dprintk(1,"set_tvnorm: \"%s\" fsc8=%d adc=%d vdec=%d db/dr=%d/%d\n",
+ dprintk(1, "set_tvnorm: \"%s\" fsc8=%d adc=%d vdec=%d db/dr=%d/%d\n",
v4l2_norm_to_name(core->tvnorm), fsc8, adc_clock, vdec_clock,
step_db, step_dr);
- set_pll(core,2,vdec_clock);
+ set_pll(core, 2, vdec_clock);
- dprintk(1,"set_tvnorm: MO_INPUT_FORMAT 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_INPUT_FORMAT 0x%08x [old=0x%08x]\n",
cxiformat, cx_read(MO_INPUT_FORMAT) & 0x0f);
- /* Chroma AGC must be disabled if SECAM is used, we enable it
- by default on PAL and NTSC */
+ /*
+ * Chroma AGC must be disabled if SECAM is used, we enable it
+ * by default on PAL and NTSC
+ */
cx_andor(MO_INPUT_FORMAT, 0x40f,
norm & V4L2_STD_SECAM ? cxiformat : cxiformat | 0x400);
// FIXME: as-is from DScaler
- dprintk(1,"set_tvnorm: MO_OUTPUT_FORMAT 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_OUTPUT_FORMAT 0x%08x [old=0x%08x]\n",
cxoformat, cx_read(MO_OUTPUT_FORMAT));
cx_write(MO_OUTPUT_FORMAT, cxoformat);
// MO_SCONV_REG = adc clock / video dec clock * 2^17
tmp64 = adc_clock * (u64)(1 << 17);
do_div(tmp64, vdec_clock);
- dprintk(1,"set_tvnorm: MO_SCONV_REG 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_SCONV_REG 0x%08x [old=0x%08x]\n",
(u32)tmp64, cx_read(MO_SCONV_REG));
cx_write(MO_SCONV_REG, (u32)tmp64);
// MO_SUB_STEP = 8 * fsc / video dec clock * 2^22
tmp64 = step_db * (u64)(1 << 22);
do_div(tmp64, vdec_clock);
- dprintk(1,"set_tvnorm: MO_SUB_STEP 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_SUB_STEP 0x%08x [old=0x%08x]\n",
(u32)tmp64, cx_read(MO_SUB_STEP));
cx_write(MO_SUB_STEP, (u32)tmp64);
// MO_SUB_STEP_DR = 8 * 4406250 / video dec clock * 2^22
tmp64 = step_dr * (u64)(1 << 22);
do_div(tmp64, vdec_clock);
- dprintk(1,"set_tvnorm: MO_SUB_STEP_DR 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_SUB_STEP_DR 0x%08x [old=0x%08x]\n",
(u32)tmp64, cx_read(MO_SUB_STEP_DR));
cx_write(MO_SUB_STEP_DR, (u32)tmp64);
// bdelay + agcdelay
bdelay = vdec_clock * 65 / 20000000 + 21;
agcdelay = vdec_clock * 68 / 20000000 + 15;
- dprintk(1,"set_tvnorm: MO_AGC_BURST 0x%08x [old=0x%08x,bdelay=%d,agcdelay=%d]\n",
- (bdelay << 8) | agcdelay, cx_read(MO_AGC_BURST), bdelay, agcdelay);
+ dprintk(1,
+ "set_tvnorm: MO_AGC_BURST 0x%08x [old=0x%08x,bdelay=%d,agcdelay=%d]\n",
+ (bdelay << 8) | agcdelay, cx_read(MO_AGC_BURST),
+ bdelay, agcdelay);
cx_write(MO_AGC_BURST, (bdelay << 8) | agcdelay);
// htotal
tmp64 = norm_htotal(norm) * (u64)vdec_clock;
do_div(tmp64, fsc8);
htotal = (u32)tmp64;
- dprintk(1,"set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n",
+ dprintk(1,
+ "set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n",
htotal, cx_read(MO_HTOTAL), (u32)tmp64);
cx_andor(MO_HTOTAL, 0x07ff, htotal);
// vbi stuff, set vbi offset to 10 (for 20 Clk*2 pixels), this makes
// the effective vbi offset ~244 samples, the same as the Bt8x8
- cx_write(MO_VBI_PACKET, (10<<11) | norm_vbipack(norm));
+ cx_write(MO_VBI_PACKET, (10 << 11) | norm_vbipack(norm));
// this is needed as well to set all tvnorm parameter
cx88_set_scale(core, 320, 240, V4L2_FIELD_INTERLACED);
@@ -978,12 +1001,16 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
// tell i2c chips
call_all(core, video, s_std, norm);
- /* The chroma_agc control should be inaccessible if the video format is SECAM */
+ /*
+ * The chroma_agc control should be inaccessible
+ * if the video format is SECAM
+ */
v4l2_ctrl_grab(core->chroma_agc, cxiformat == VideoFormatSECAM);
// done
return 0;
}
+EXPORT_SYMBOL(cx88_set_tvnorm);
/* ------------------------------------------------------------------ */
@@ -1008,8 +1035,9 @@ void cx88_vdev_init(struct cx88_core *core,
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
core->name, type, core->board.name);
}
+EXPORT_SYMBOL(cx88_vdev_init);
-struct cx88_core* cx88_core_get(struct pci_dev *pci)
+struct cx88_core *cx88_core_get(struct pci_dev *pci)
{
struct cx88_core *core;
@@ -1020,7 +1048,7 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
if (PCI_SLOT(pci->devfn) != core->pci_slot)
continue;
- if (0 != cx88_get_resources(core, pci)) {
+ if (cx88_get_resources(core, pci) != 0) {
mutex_unlock(&devlist);
return NULL;
}
@@ -1030,7 +1058,7 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
}
core = cx88_core_create(pci, cx88_devcount);
- if (NULL != core) {
+ if (core) {
cx88_devcount++;
list_add_tail(&core->devlist, &cx88_devlist);
}
@@ -1038,18 +1066,19 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
mutex_unlock(&devlist);
return core;
}
+EXPORT_SYMBOL(cx88_core_get);
void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
{
- release_mem_region(pci_resource_start(pci,0),
- pci_resource_len(pci,0));
+ release_mem_region(pci_resource_start(pci, 0),
+ pci_resource_len(pci, 0));
if (!atomic_dec_and_test(&core->refcount))
return;
mutex_lock(&devlist);
cx88_ir_fini(core);
- if (0 == core->i2c_rc) {
+ if (core->i2c_rc == 0) {
if (core->i2c_rtc)
i2c_unregister_device(core->i2c_rtc);
i2c_del_adapter(&core->i2c_adap);
@@ -1063,29 +1092,4 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
v4l2_device_unregister(&core->v4l2_dev);
kfree(core);
}
-
-/* ------------------------------------------------------------------ */
-
-EXPORT_SYMBOL(cx88_print_irqbits);
-
-EXPORT_SYMBOL(cx88_core_irq);
-EXPORT_SYMBOL(cx88_wakeup);
-EXPORT_SYMBOL(cx88_reset);
-EXPORT_SYMBOL(cx88_shutdown);
-
-EXPORT_SYMBOL(cx88_risc_buffer);
-EXPORT_SYMBOL(cx88_risc_databuffer);
-
-EXPORT_SYMBOL(cx88_sram_channels);
-EXPORT_SYMBOL(cx88_sram_channel_setup);
-EXPORT_SYMBOL(cx88_sram_channel_dump);
-
-EXPORT_SYMBOL(cx88_set_tvnorm);
-EXPORT_SYMBOL(cx88_set_scale);
-
-EXPORT_SYMBOL(cx88_vdev_init);
-EXPORT_SYMBOL(cx88_core_get);
EXPORT_SYMBOL(cx88_core_put);
-
-EXPORT_SYMBOL(cx88_ir_start);
-EXPORT_SYMBOL(cx88_ir_stop);
diff --git a/drivers/media/pci/cx88/cx88-dsp.c b/drivers/media/pci/cx88/cx88-dsp.c
index a9907265ff66..105029088120 100644
--- a/drivers/media/pci/cx88/cx88-dsp.c
+++ b/drivers/media/pci/cx88/cx88-dsp.c
@@ -1,5 +1,4 @@
/*
- *
* Stereo and SAP detection for cx88
*
* Copyright (c) 2009 Marton Balint <cus@fazekas.hu>
@@ -13,41 +12,41 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "cx88-reg.h"
+
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <asm/div64.h>
-#include "cx88.h"
-#include "cx88-reg.h"
-
#define INT_PI ((s32)(3.141592653589 * 32768.0))
#define compat_remainder(a, b) \
- ((float)(((s32)((a)*100))%((s32)((b)*100)))/100.0)
+ ((float)(((s32)((a) * 100)) % ((s32)((b) * 100))) / 100.0)
#define baseband_freq(carrier, srate, tone) ((s32)( \
(compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI))
-/* We calculate the baseband frequencies of the carrier and the pilot tones
- * based on the the sampling rate of the audio rds fifo. */
+/*
+ * We calculate the baseband frequencies of the carrier and the pilot tones
+ * based on the the sampling rate of the audio rds fifo.
+ */
#define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0)
#define FREQ_A2_DUAL baseband_freq(54687.5, 2689.36, 274.1)
#define FREQ_A2_STEREO baseband_freq(54687.5, 2689.36, 117.5)
-/* The frequencies below are from the reference driver. They probably need
+/*
+ * The frequencies below are from the reference driver. They probably need
* further adjustments, because they are not tested at all. You may even need
* to play a bit with the registers of the chip to select the proper signal
* for the input of the audio rds fifo, and measure it's sampling rate to
- * calculate the proper baseband frequencies... */
+ * calculate the proper baseband frequencies...
+ */
#define FREQ_A2M_CARRIER ((s32)(2.114516 * 32768.0))
#define FREQ_A2M_DUAL ((s32)(2.754916 * 32768.0))
@@ -71,43 +70,52 @@ static unsigned int dsp_debug;
module_param(dsp_debug, int, 0644);
MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages");
-#define dprintk(level, fmt, arg...) if (dsp_debug >= level) \
- printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (dsp_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: dsp:" fmt), \
+ __func__, ##arg); \
+} while (0)
static s32 int_cos(u32 x)
{
u32 t2, t4, t6, t8;
s32 ret;
u16 period = x / INT_PI;
+
if (period % 2)
return -int_cos(x - INT_PI);
x = x % INT_PI;
- if (x > INT_PI/2)
- return -int_cos(INT_PI/2 - (x % (INT_PI/2)));
- /* Now x is between 0 and INT_PI/2.
- * To calculate cos(x) we use it's Taylor polinom. */
- t2 = x*x/32768/2;
- t4 = t2*x/32768*x/32768/3/4;
- t6 = t4*x/32768*x/32768/5/6;
- t8 = t6*x/32768*x/32768/7/8;
- ret = 32768-t2+t4-t6+t8;
+ if (x > INT_PI / 2)
+ return -int_cos(INT_PI / 2 - (x % (INT_PI / 2)));
+ /*
+ * Now x is between 0 and INT_PI/2.
+ * To calculate cos(x) we use it's Taylor polinom.
+ */
+ t2 = x * x / 32768 / 2;
+ t4 = t2 * x / 32768 * x / 32768 / 3 / 4;
+ t6 = t4 * x / 32768 * x / 32768 / 5 / 6;
+ t8 = t6 * x / 32768 * x / 32768 / 7 / 8;
+ ret = 32768 - t2 + t4 - t6 + t8;
return ret;
}
static u32 int_goertzel(s16 x[], u32 N, u32 freq)
{
- /* We use the Goertzel algorithm to determine the power of the
- * given frequency in the signal */
+ /*
+ * We use the Goertzel algorithm to determine the power of the
+ * given frequency in the signal
+ */
s32 s_prev = 0;
s32 s_prev2 = 0;
- s32 coeff = 2*int_cos(freq);
+ s32 coeff = 2 * int_cos(freq);
u32 i;
u64 tmp;
u32 divisor;
for (i = 0; i < N; i++) {
- s32 s = x[i] + ((s64)coeff*s_prev/32768) - s_prev2;
+ s32 s = x[i] + ((s64)coeff * s_prev / 32768) - s_prev2;
+
s_prev2 = s_prev;
s_prev = s;
}
@@ -115,17 +123,20 @@ static u32 int_goertzel(s16 x[], u32 N, u32 freq)
tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev -
(s64)coeff * s_prev2 * s_prev / 32768;
- /* XXX: N must be low enough so that N*N fits in s32.
- * Else we need two divisions. */
+ /*
+ * XXX: N must be low enough so that N*N fits in s32.
+ * Else we need two divisions.
+ */
divisor = N * N;
do_div(tmp, divisor);
- return (u32) tmp;
+ return (u32)tmp;
}
static u32 freq_magnitude(s16 x[], u32 N, u32 freq)
{
u32 sum = int_goertzel(x, N, freq);
+
return (u32)int_sqrt(sum);
}
@@ -138,7 +149,7 @@ static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end)
if (N > 192) {
/* The last 192 samples are enough for noise detection */
- x += (N-192);
+ x += (N - 192);
N = 192;
}
@@ -176,8 +187,8 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
dual_freq = FREQ_EIAJ_DUAL;
break;
default:
- printk(KERN_WARNING "%s/0: unsupported audio mode %d for %s\n",
- core->name, core->tvaudio, __func__);
+ pr_warn("unsupported audio mode %d for %s\n",
+ core->tvaudio, __func__);
return UNSET;
}
@@ -186,8 +197,9 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
dual = freq_magnitude(x, N, dual_freq);
noise = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END);
- dprintk(1, "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, "
- "noise=%d\n", carrier, stereo, dual, noise);
+ dprintk(1,
+ "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, noise=%d\n",
+ carrier, stereo, dual, noise);
if (stereo > dual)
ret = V4L2_TUNER_SUB_STEREO;
@@ -196,20 +208,22 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
if (core->tvaudio == WW_EIAJ) {
/* EIAJ checks may need adjustments */
- if ((carrier > max(stereo, dual)*2) &&
- (carrier < max(stereo, dual)*6) &&
+ if ((carrier > max(stereo, dual) * 2) &&
+ (carrier < max(stereo, dual) * 6) &&
(carrier > 20 && carrier < 200) &&
(max(stereo, dual) > min(stereo, dual))) {
- /* For EIAJ the carrier is always present,
- so we probably don't need noise detection */
+ /*
+ * For EIAJ the carrier is always present,
+ * so we probably don't need noise detection
+ */
return ret;
}
} else {
- if ((carrier > max(stereo, dual)*2) &&
- (carrier < max(stereo, dual)*8) &&
+ if ((carrier > max(stereo, dual) * 2) &&
+ (carrier < max(stereo, dual) * 8) &&
(carrier > 20 && carrier < 200) &&
(noise < 10) &&
- (max(stereo, dual) > min(stereo, dual)*2)) {
+ (max(stereo, dual) > min(stereo, dual) * 2)) {
return ret;
}
}
@@ -222,8 +236,9 @@ static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP);
s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF);
s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL);
- dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d"
- "\n", dual_ref, dual, sap_ref, sap);
+
+ dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d\n",
+ dual_ref, dual, sap_ref, sap);
/* FIXME: Currently not supported */
return UNSET;
}
@@ -234,36 +249,31 @@ static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
s16 *samples;
unsigned int i;
- unsigned int bpl = srch->fifo_size/AUD_RDS_LINES;
- unsigned int spl = bpl/4;
- unsigned int sample_count = spl*(AUD_RDS_LINES-1);
+ unsigned int bpl = srch->fifo_size / AUD_RDS_LINES;
+ unsigned int spl = bpl / 4;
+ unsigned int sample_count = spl * (AUD_RDS_LINES - 1);
u32 current_address = cx_read(srch->ptr1_reg);
u32 offset = (current_address - srch->fifo_start + bpl);
- dprintk(1, "read RDS samples: current_address=%08x (offset=%08x), "
- "sample_count=%d, aud_intstat=%08x\n", current_address,
+ dprintk(1,
+ "read RDS samples: current_address=%08x (offset=%08x), sample_count=%d, aud_intstat=%08x\n",
+ current_address,
current_address - srch->fifo_start, sample_count,
cx_read(MO_AUD_INTSTAT));
-
- samples = kmalloc(sizeof(s16)*sample_count, GFP_KERNEL);
+ samples = kmalloc_array(sample_count, sizeof(*samples), GFP_KERNEL);
if (!samples)
return NULL;
*N = sample_count;
for (i = 0; i < sample_count; i++) {
- offset = offset % (AUD_RDS_LINES*bpl);
+ offset = offset % (AUD_RDS_LINES * bpl);
samples[i] = cx_read(srch->fifo_start + offset);
offset += 4;
}
- if (dsp_debug >= 2) {
- dprintk(2, "RDS samples dump: ");
- for (i = 0; i < sample_count; i++)
- printk("%hd ", samples[i]);
- printk(".\n");
- }
+ dprintk(2, "RDS samples dump: %*ph\n", sample_count, samples);
return samples;
}
@@ -310,11 +320,11 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
kfree(samples);
- if (UNSET != ret)
+ if (ret != UNSET)
dprintk(1, "stereo/sap detection result:%s%s%s\n",
- (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
- (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
- (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
+ (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
+ (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
+ (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
return ret;
}
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index ac2392d8887a..ddf90678df34 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1,5 +1,4 @@
/*
- *
* device driver for Conexant 2388x based TV cards
* MPEG Transport Stream (DVB) routines
*
@@ -15,12 +14,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "dvb-pll.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -29,8 +27,6 @@
#include <linux/file.h>
#include <linux/suspend.h>
-#include "cx88.h"
-#include "dvb-pll.h"
#include <media/v4l2-common.h>
#include "mt352.h"
@@ -69,7 +65,7 @@ MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug,"enable debug messages [dvb]");
+MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
static unsigned int dvb_buf_tscnt = 32;
module_param(dvb_buf_tscnt, int, 0644);
@@ -77,14 +73,17 @@ MODULE_PARM_DESC(dvb_buf_tscnt, "DVB Buffer TS count [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-dvb: " fmt, core->name, ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: dvb:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8802_dev *dev = q->drv_priv;
@@ -169,23 +168,23 @@ static const struct vb2_ops dvb_qops = {
/* ------------------------------------------------------------------ */
-static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
+static int cx88_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx8802_driver *drv = NULL;
int ret = 0;
int fe_id;
fe_id = vb2_dvb_find_frontend(&dev->frontends, fe);
if (!fe_id) {
- printk(KERN_ERR "%s() No frontend found\n", __func__);
+ pr_err("%s() No frontend found\n", __func__);
return -EINVAL;
}
mutex_lock(&dev->core->lock);
drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
if (drv) {
- if (acquire){
+ if (acquire) {
dev->frontends.active_fe_id = fe_id;
ret = drv->request_acquire(drv);
} else {
@@ -222,13 +221,13 @@ static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
/* ------------------------------------------------------------------ */
-static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
+static int dvico_fusionhdtv_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
- static const u8 reset [] = { RESET, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static const u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
- static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 clock_config[] = { CLOCK_CTL, 0x38, 0x39 };
+ static const u8 reset[] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg[] = { AGC_TARGET, 0x24, 0x20 };
+ static const u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 };
static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
@@ -244,11 +243,11 @@ static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
static int dvico_dual_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
- static const u8 reset [] = { RESET, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static const u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
- static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 clock_config[] = { CLOCK_CTL, 0x38, 0x38 };
+ static const u8 reset[] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 };
+ static const u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 };
static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
@@ -263,12 +262,12 @@ static int dvico_dual_demod_init(struct dvb_frontend *fe)
return 0;
}
-static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
+static int dntv_live_dvbt_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { 0x89, 0x38, 0x39 };
- static const u8 reset [] = { 0x50, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static const u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config[] = { 0x89, 0x38, 0x39 };
+ static const u8 reset[] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { 0x8E, 0x40 };
+ static const u8 agc_cfg[] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
static const u8 dntv_extra[] = { 0xB5, 0x7A };
static const u8 capt_range_cfg[] = { 0x75, 0x32 };
@@ -312,12 +311,12 @@ static struct mb86a16_config twinhan_vp1027 = {
};
#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
-static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
+static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { 0x89, 0x38, 0x38 };
- static const u8 reset [] = { 0x50, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static const u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config[] = { 0x89, 0x38, 0x38 };
+ static const u8 reset[] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { 0x8E, 0x40 };
+ static const u8 agc_cfg[] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
static const u8 dntv_extra[] = { 0xB5, 0x7A };
static const u8 capt_range_cfg[] = { 0x75, 0x32 };
@@ -374,9 +373,10 @@ static const struct cx22702_config hauppauge_hvr_config = {
.output_mode = CX22702_SERIAL_OUTPUT,
};
-static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int or51132_set_ts_param(struct dvb_frontend *fe, int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
return 0;
}
@@ -386,9 +386,9 @@ static const struct or51132_config pchdtv_hd3000 = {
.set_ts_params = or51132_set_ts_param,
};
-static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
+static int lgdt330x_pll_rf_set(struct dvb_frontend *fe, int index)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
dprintk(1, "%s: index = %d\n", __func__, index);
@@ -399,9 +399,10 @@ static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
return 0;
}
-static int lgdt330x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int lgdt330x_set_ts_param(struct dvb_frontend *fe, int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
if (is_punctured)
dev->ts_gen_cntrl |= 0x04;
else
@@ -430,9 +431,10 @@ static const struct lgdt330x_config pchdtv_hd5500 = {
.set_ts_params = lgdt330x_set_ts_param,
};
-static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int nxt200x_set_ts_param(struct dvb_frontend *fe, int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
return 0;
}
@@ -442,18 +444,19 @@ static const struct nxt200x_config ati_hdtvwonder = {
.set_ts_params = nxt200x_set_ts_param,
};
-static int cx24123_set_ts_param(struct dvb_frontend* fe,
- int is_punctured)
+static int cx24123_set_ts_param(struct dvb_frontend *fe,
+ int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 0x02;
return 0;
}
-static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
+static int kworld_dvbs_100_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
if (voltage == SEC_VOLTAGE_OFF)
@@ -469,11 +472,11 @@ static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
if (voltage == SEC_VOLTAGE_OFF) {
- dprintk(1,"LNB Voltage OFF\n");
+ dprintk(1, "LNB Voltage OFF\n");
cx_write(MO_GP0_IO, 0x0000efff);
}
@@ -485,7 +488,7 @@ static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe,
static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
cx_set(MO_GP0_IO, 0x6040);
@@ -625,9 +628,7 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
return -EINVAL;
if (!fe0->dvb.frontend) {
- printk(KERN_ERR "%s/2: dvb frontend not attached. "
- "Can't attach xc3028\n",
- dev->core->name);
+ pr_err("dvb frontend not attached. Can't attach xc3028\n");
return -EINVAL;
}
@@ -640,16 +641,14 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
if (!fe) {
- printk(KERN_ERR "%s/2: xc3028 attach failed\n",
- dev->core->name);
+ pr_err("xc3028 attach failed\n");
dvb_frontend_detach(fe0->dvb.frontend);
dvb_unregister_frontend(fe0->dvb.frontend);
fe0->dvb.frontend = NULL;
return -EINVAL;
}
- printk(KERN_INFO "%s/2: xc3028 attached\n",
- dev->core->name);
+ pr_info("xc3028 attached\n");
return 0;
}
@@ -665,41 +664,40 @@ static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
return -EINVAL;
if (!fe0->dvb.frontend) {
- printk(KERN_ERR "%s/2: dvb frontend not attached. "
- "Can't attach xc4000\n",
- dev->core->name);
+ pr_err("dvb frontend not attached. Can't attach xc4000\n");
return -EINVAL;
}
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->core->i2c_adap,
cfg);
if (!fe) {
- printk(KERN_ERR "%s/2: xc4000 attach failed\n",
- dev->core->name);
+ pr_err("xc4000 attach failed\n");
dvb_frontend_detach(fe0->dvb.frontend);
dvb_unregister_frontend(fe0->dvb.frontend);
fe0->dvb.frontend = NULL;
return -EINVAL;
}
- printk(KERN_INFO "%s/2: xc4000 attached\n", dev->core->name);
+ pr_info("xc4000 attached\n");
return 0;
}
static int cx24116_set_ts_param(struct dvb_frontend *fe,
- int is_punctured)
+ int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 0x2;
return 0;
}
static int stv0900_set_ts_param(struct dvb_frontend *fe,
- int is_punctured)
+ int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 0;
return 0;
@@ -713,10 +711,10 @@ static int cx24116_reset_device(struct dvb_frontend *fe)
/* Reset the part */
/* Put the cx24116 into reset */
cx_write(MO_SRST_IO, 0);
- msleep(10);
+ usleep_range(10000, 20000);
/* Take the cx24116 out of reset */
cx_write(MO_SRST_IO, 1);
- msleep(10);
+ usleep_range(10000, 20000);
return 0;
}
@@ -734,9 +732,10 @@ static const struct cx24116_config tevii_s460_config = {
};
static int ds3000_set_ts_param(struct dvb_frontend *fe,
- int is_punctured)
+ int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 4;
return 0;
@@ -800,12 +799,12 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
if (!core->board.num_frontends)
return -ENODEV;
- printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
- core->board.num_frontends);
+ pr_info("%s: allocating %d frontend(s)\n", __func__,
+ core->board.num_frontends);
for (i = 1; i <= core->board.num_frontends; i++) {
fe = vb2_dvb_alloc_frontend(&dev->frontends, i);
if (!fe) {
- printk(KERN_ERR "%s() failed to alloc\n", __func__);
+ pr_err("%s() failed to alloc\n", __func__);
vb2_dvb_dealloc_frontends(&dev->frontends);
return -ENOMEM;
}
@@ -813,8 +812,6 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
return 0;
}
-
-
static const u8 samsung_smt_7020_inittab[] = {
0x01, 0x15,
0x02, 0x00,
@@ -866,7 +863,6 @@ static const u8 samsung_smt_7020_inittab[] = {
0xff, 0xff,
};
-
static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -899,7 +895,7 @@ static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
}
static int samsung_smt_7020_set_tone(struct dvb_frontend *fe,
- enum fe_sec_tone_mode tone)
+ enum fe_sec_tone_mode tone)
{
struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
@@ -954,7 +950,7 @@ static int samsung_smt_7020_set_voltage(struct dvb_frontend *fe,
}
static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
- u32 srate, u32 ratio)
+ u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
@@ -988,7 +984,6 @@ static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
return 0;
}
-
static const struct stv0299_config samsung_stv0299_config = {
.demod_address = 0x68,
.inittab = samsung_smt_7020_inittab,
@@ -1008,8 +1003,8 @@ static int dvb_register(struct cx8802_dev *dev)
int mfe_shared = 0; /* bus not shared by default */
int res = -EINVAL;
- if (0 != core->i2c_rc) {
- printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name);
+ if (core->i2c_rc != 0) {
+ pr_err("no i2c-bus available, cannot attach dvb drivers\n");
goto frontend_detach;
}
@@ -1030,7 +1025,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, &core->i2c_adap,
DVB_PLL_THOMSON_DTT759X))
@@ -1044,7 +1039,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, &core->i2c_adap,
DVB_PLL_THOMSON_DTT7579))
@@ -1058,10 +1053,10 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x61,
- TUNER_PHILIPS_FMD1216ME_MK3))
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
break;
@@ -1069,10 +1064,10 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x61,
- TUNER_PHILIPS_FMD1216MEX_MK3))
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_FMD1216MEX_MK3))
goto frontend_detach;
}
break;
@@ -1082,8 +1077,8 @@ static int dvb_register(struct cx8802_dev *dev)
dev->frontends.gate = 2;
/* DVB-S init */
fe0->dvb.frontend = dvb_attach(cx24123_attach,
- &hauppauge_novas_config,
- &dev->core->i2c_adap);
+ &hauppauge_novas_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
@@ -1097,8 +1092,8 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
/* DVB-T init */
fe1->dvb.frontend = dvb_attach(cx22702_attach,
- &hauppauge_hvr_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr_config,
+ &dev->core->i2c_adap);
if (fe1->dvb.frontend) {
fe1->dvb.frontend->id = 1;
if (!dvb_attach(simple_tuner_attach,
@@ -1112,7 +1107,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
@@ -1122,19 +1117,21 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
- /* The tin box says DEE1601, but it seems to be DTT7579
- * compatible, with a slightly different MT352 AGC gain. */
+ /*
+ * The tin box says DEE1601, but it seems to be DTT7579
+ * compatible, with a slightly different MT352 AGC gain.
+ */
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_dual,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
@@ -1144,7 +1141,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
@@ -1154,7 +1151,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_LG_Z201))
goto frontend_detach;
@@ -1166,7 +1163,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dntv_live_dvbt_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_UNKNOWN_1))
goto frontend_detach;
@@ -1175,27 +1172,27 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
/* MT352 is on a secondary I2C bus made from some GPIO lines */
- fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
+ fe0->dvb.frontend = dvb_attach(mt352_attach,
+ &dntv_live_dvbt_pro_config,
&dev->vp3054->adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
#else
- printk(KERN_ERR "%s/2: built without vp3054 support\n",
- core->name);
+ pr_err("built without vp3054 support\n");
#endif
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_hybrid,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x61,
- TUNER_THOMSON_FE6600))
+ &core->i2c_adap, 0x61,
+ TUNER_THOMSON_FE6600))
goto frontend_detach;
}
break;
@@ -1203,7 +1200,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_xc3028,
&core->i2c_adap);
- if (fe0->dvb.frontend == NULL)
+ if (!fe0->dvb.frontend)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_mt352_xc3028,
&core->i2c_adap);
@@ -1220,7 +1217,7 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_PCHDTV_HD3000:
fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_THOMSON_DTT761X))
@@ -1241,7 +1238,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_MICROTUNE_4042FI5))
@@ -1259,7 +1256,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_THOMSON_DTT761X))
@@ -1277,13 +1274,13 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_5_gold,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF))
goto frontend_detach;
if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x43))
+ &core->i2c_adap, 0x43))
goto frontend_detach;
}
break;
@@ -1298,13 +1295,13 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&pchdtv_hd5500,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF))
goto frontend_detach;
if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x43))
+ &core->i2c_adap, 0x43))
goto frontend_detach;
}
break;
@@ -1312,7 +1309,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(nxt200x_attach,
&ati_hdtvwonder,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_TUV1236D))
@@ -1333,8 +1330,8 @@ static int dvb_register(struct cx8802_dev *dev)
override_tone = false;
if (!dvb_attach(isl6421_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x08, ISL6421_DCL, 0x00,
- override_tone))
+ &core->i2c_adap, 0x08, ISL6421_DCL,
+ 0x00, override_tone))
goto frontend_detach;
}
break;
@@ -1360,7 +1357,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&pinnacle_pctv_hd_800i_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
&core->i2c_adap,
&pinnacle_pctv_hd_800i_tuner_config))
@@ -1369,9 +1366,9 @@ static int dvb_register(struct cx8802_dev *dev)
break;
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
- &dvico_hdtv5_pci_nano_config,
- &core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ &dvico_hdtv5_pci_nano_config,
+ &core->i2c_adap);
+ if (fe0->dvb.frontend) {
struct dvb_frontend *fe;
struct xc2028_config cfg = {
.i2c_adap = &core->i2c_adap,
@@ -1385,7 +1382,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe = dvb_attach(xc2028_attach,
fe0->dvb.frontend, &cfg);
- if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
+ if (fe && fe->ops.tuner_ops.set_config)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
@@ -1427,7 +1424,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (attach_xc3028(0x61, dev) < 0)
goto frontend_detach;
break;
- case CX88_BOARD_KWORLD_ATSC_120:
+ case CX88_BOARD_KWORLD_ATSC_120:
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&kworld_atsc_120_config,
&core->i2c_adap);
@@ -1438,7 +1435,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&dvico_fusionhdtv7_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
&core->i2c_adap,
&dvico_fusionhdtv7_tuner_config))
@@ -1451,8 +1448,8 @@ static int dvb_register(struct cx8802_dev *dev)
dev->frontends.gate = 2;
/* DVB-S/S2 Init */
fe0->dvb.frontend = dvb_attach(cx24116_attach,
- &hauppauge_hvr4000_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr4000_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
@@ -1466,8 +1463,8 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
/* DVB-T Init */
fe1->dvb.frontend = dvb_attach(cx22702_attach,
- &hauppauge_hvr_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr_config,
+ &dev->core->i2c_adap);
if (fe1->dvb.frontend) {
fe1->dvb.frontend->id = 1;
if (!dvb_attach(simple_tuner_attach,
@@ -1479,8 +1476,8 @@ static int dvb_register(struct cx8802_dev *dev)
break;
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
fe0->dvb.frontend = dvb_attach(cx24116_attach,
- &hauppauge_hvr4000_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr4000_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
@@ -1495,7 +1492,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(stv0299_attach,
&tevii_tuner_sharp_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
&core->i2c_adap, DVB_PLL_OPERA1))
goto frontend_detach;
@@ -1506,8 +1503,9 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(stv0288_attach,
&tevii_tuner_earda_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61,
+ if (fe0->dvb.frontend) {
+ if (!dvb_attach(stb6000_attach,
+ fe0->dvb.frontend, 0x61,
&core->i2c_adap))
goto frontend_detach;
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
@@ -1519,16 +1517,16 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&tevii_s460_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend)
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
break;
case CX88_BOARD_TEVII_S464:
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
dvb_attach(ts2020_attach, fe0->dvb.frontend,
- &tevii_ts2020_config, &core->i2c_adap);
+ &tevii_ts2020_config, &core->i2c_adap);
fe0->dvb.frontend->ops.set_voltage =
tevii_dvbs_set_voltage;
}
@@ -1540,7 +1538,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&hauppauge_hvr4000_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend)
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
break;
case CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII:
@@ -1557,9 +1555,9 @@ static int dvb_register(struct cx8802_dev *dev)
struct dvb_tuner_ops *tuner_ops = NULL;
fe0->dvb.frontend = dvb_attach(stv0900_attach,
- &prof_7301_stv0900_config,
- &core->i2c_adap, 0);
- if (fe0->dvb.frontend != NULL) {
+ &prof_7301_stv0900_config,
+ &core->i2c_adap, 0);
+ if (fe0->dvb.frontend) {
if (!dvb_attach(stb6100_attach, fe0->dvb.frontend,
&prof_7301_stb6100_config,
&core->i2c_adap))
@@ -1589,8 +1587,8 @@ static int dvb_register(struct cx8802_dev *dev)
mdelay(200);
fe0->dvb.frontend = dvb_attach(stv0299_attach,
- &samsung_stv0299_config,
- &dev->core->i2c_adap);
+ &samsung_stv0299_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
fe0->dvb.frontend->ops.tuner_ops.set_params =
samsung_smt_7020_tuner_set_params;
@@ -1606,8 +1604,8 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_TWINHAN_VP1027_DVBS:
dev->ts_gen_cntrl = 0x00;
fe0->dvb.frontend = dvb_attach(mb86a16_attach,
- &twinhan_vp1027,
- &core->i2c_adap);
+ &twinhan_vp1027,
+ &core->i2c_adap);
if (fe0->dvb.frontend) {
core->prev_set_voltage =
fe0->dvb.frontend->ops.set_voltage;
@@ -1617,15 +1615,12 @@ static int dvb_register(struct cx8802_dev *dev)
break;
default:
- printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
- core->name);
+ pr_err("The frontend of your DVB/ATSC card isn't supported yet\n");
break;
}
- if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) {
- printk(KERN_ERR
- "%s/2: frontend initialization failed\n",
- core->name);
+ if ((NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend)) {
+ pr_err("frontend initialization failed\n");
goto frontend_detach;
}
/* define general-purpose callback pointer */
@@ -1660,7 +1655,8 @@ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
- dprintk( 1, "%s\n", __func__);
+
+ dprintk(1, "%s\n", __func__);
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -1724,7 +1720,8 @@ static int cx8802_dvb_advise_release(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
- dprintk( 1, "%s\n", __func__);
+
+ dprintk(1, "%s\n", __func__);
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -1747,8 +1744,8 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
struct vb2_dvb_frontend *fe;
int i;
- dprintk( 1, "%s\n", __func__);
- dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
+ dprintk(1, "%s\n", __func__);
+ dprintk(1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
core->boardnr,
core->name,
core->pci_bus,
@@ -1760,25 +1757,25 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
/* If vp3054 isn't enabled, a stub will just return 0 */
err = vp3054_i2c_probe(dev);
- if (0 != err)
+ if (err != 0)
goto fail_core;
/* dvb stuff */
- printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name);
+ pr_info("cx2388x based DVB/ATSC card\n");
dev->ts_gen_cntrl = 0x0c;
err = cx8802_alloc_frontends(dev);
if (err)
goto fail_core;
- err = -ENODEV;
for (i = 1; i <= core->board.num_frontends; i++) {
struct vb2_queue *q;
fe = vb2_dvb_get_frontend(&core->dvbdev->frontends, i);
- if (fe == NULL) {
- printk(KERN_ERR "%s() failed to get frontend(%d)\n",
- __func__, i);
+ if (!fe) {
+ pr_err("%s() failed to get frontend(%d)\n",
+ __func__, i);
+ err = -ENODEV;
goto fail_probe;
}
q = &fe->dvb.dvbq;
@@ -1805,8 +1802,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
err = dvb_register(dev);
if (err)
/* frontends/adapter de-allocated in dvb_register */
- printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n",
- core->name, err);
+ pr_err("dvb_register failed (err = %d)\n", err);
return err;
fail_probe:
vb2_dvb_dealloc_frontends(&core->dvbdev->frontends);
@@ -1819,7 +1815,7 @@ static int cx8802_dvb_remove(struct cx8802_driver *drv)
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = drv->core->dvbdev;
- dprintk( 1, "%s\n", __func__);
+ dprintk(1, "%s\n", __func__);
vb2_dvb_unregister_bus(&dev->frontends);
@@ -1841,8 +1837,7 @@ static struct cx8802_driver cx8802_dvb_driver = {
static int __init dvb_init(void)
{
- printk(KERN_INFO "cx88/2: cx2388x dvb driver version %s loaded\n",
- CX88_VERSION);
+ pr_info("cx2388x dvb driver version %s loaded\n", CX88_VERSION);
return cx8802_register_driver(&cx8802_dvb_driver);
}
diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c
index cf2d69615838..f7692775fb5a 100644
--- a/drivers/media/pci/cx88/cx88-i2c.c
+++ b/drivers/media/pci/cx88/cx88-i2c.c
@@ -1,55 +1,53 @@
/*
+ *
+ * cx88-i2c.c -- all the i2c code is here
+ *
+ * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
+ * & Marcus Metzler (mocm@thp.uni-koeln.de)
+ * (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
+ * (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * - Multituner support and i2c address binding
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- cx88-i2c.c -- all the i2c code is here
-
- Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
- & Marcus Metzler (mocm@thp.uni-koeln.de)
- (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
- (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
-
- (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
- - Multituner support and i2c address binding
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+#include "cx88.h"
-#include <linux/module.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
-#include <asm/io.h>
-
-#include "cx88.h"
#include <media/v4l2-common.h>
static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
-MODULE_PARM_DESC(i2c_debug,"enable debug messages [i2c]");
+MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
static unsigned int i2c_scan;
module_param(i2c_scan, int, 0444);
-MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
+MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
static unsigned int i2c_udelay = 5;
module_param(i2c_udelay, int, 0644);
-MODULE_PARM_DESC(i2c_udelay,"i2c delay at insmod time, in usecs "
- "(should be 5 or higher). Lower value means higher bus speed.");
+MODULE_PARM_DESC(i2c_udelay,
+ "i2c delay at insmod time, in usecs (should be 5 or higher). Lower value means higher bus speed.");
-#define dprintk(level,fmt, arg...) if (i2c_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (i2c_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: i2c:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ----------------------------------------------------------------------- */
@@ -109,26 +107,26 @@ static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
static const char * const i2c_devs[128] = {
- [ 0x1c >> 1 ] = "lgdt330x",
- [ 0x86 >> 1 ] = "tda9887/cx22702",
- [ 0xa0 >> 1 ] = "eeprom",
- [ 0xc0 >> 1 ] = "tuner (analog)",
- [ 0xc2 >> 1 ] = "tuner (analog/dvb)",
- [ 0xc8 >> 1 ] = "xc5000",
+ [0x1c >> 1] = "lgdt330x",
+ [0x86 >> 1] = "tda9887/cx22702",
+ [0xa0 >> 1] = "eeprom",
+ [0xc0 >> 1] = "tuner (analog)",
+ [0xc2 >> 1] = "tuner (analog/dvb)",
+ [0xc8 >> 1] = "xc5000",
};
static void do_i2c_scan(const char *name, struct i2c_client *c)
{
unsigned char buf;
- int i,rc;
+ int i, rc;
for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
c->addr = i;
- rc = i2c_master_recv(c,&buf,0);
+ rc = i2c_master_recv(c, &buf, 0);
if (rc < 0)
continue;
- printk("%s: i2c scan: found device @ 0x%x [%s]\n",
- name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
+ pr_info("i2c scan: found device @ 0x%x [%s]\n",
+ i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
}
}
@@ -136,14 +134,13 @@ static void do_i2c_scan(const char *name, struct i2c_client *c)
int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
{
/* Prevents usage of invalid delay values */
- if (i2c_udelay<5)
- i2c_udelay=5;
+ if (i2c_udelay < 5)
+ i2c_udelay = 5;
core->i2c_algo = cx8800_i2c_algo_template;
-
core->i2c_adap.dev.parent = &pci->dev;
- strlcpy(core->i2c_adap.name,core->name,sizeof(core->i2c_adap.name));
+ strlcpy(core->i2c_adap.name, core->name, sizeof(core->i2c_adap.name));
core->i2c_adap.owner = THIS_MODULE;
core->i2c_algo.udelay = i2c_udelay;
core->i2c_algo.data = core;
@@ -152,32 +149,35 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
core->i2c_client.adapter = &core->i2c_adap;
strlcpy(core->i2c_client.name, "cx88xx internal", I2C_NAME_SIZE);
- cx8800_bit_setscl(core,1);
- cx8800_bit_setsda(core,1);
+ cx8800_bit_setscl(core, 1);
+ cx8800_bit_setsda(core, 1);
core->i2c_rc = i2c_bit_add_bus(&core->i2c_adap);
- if (0 == core->i2c_rc) {
- static u8 tuner_data[] =
- { 0x0b, 0xdc, 0x86, 0x52 };
- static struct i2c_msg tuner_msg =
- { .flags = 0, .addr = 0xc2 >> 1, .buf = tuner_data, .len = 4 };
+ if (core->i2c_rc == 0) {
+ static u8 tuner_data[] = {
+ 0x0b, 0xdc, 0x86, 0x52 };
+ static struct i2c_msg tuner_msg = {
+ .flags = 0,
+ .addr = 0xc2 >> 1,
+ .buf = tuner_data,
+ .len = 4
+ };
dprintk(1, "i2c register ok\n");
- switch( core->boardnr ) {
- case CX88_BOARD_HAUPPAUGE_HVR1300:
- case CX88_BOARD_HAUPPAUGE_HVR3000:
- case CX88_BOARD_HAUPPAUGE_HVR4000:
- printk("%s: i2c init: enabling analog demod on HVR1300/3000/4000 tuner\n",
- core->name);
- i2c_transfer(core->i2c_client.adapter, &tuner_msg, 1);
- break;
- default:
- break;
+ switch (core->boardnr) {
+ case CX88_BOARD_HAUPPAUGE_HVR1300:
+ case CX88_BOARD_HAUPPAUGE_HVR3000:
+ case CX88_BOARD_HAUPPAUGE_HVR4000:
+ pr_info("i2c init: enabling analog demod on HVR1300/3000/4000 tuner\n");
+ i2c_transfer(core->i2c_client.adapter, &tuner_msg, 1);
+ break;
+ default:
+ break;
}
if (i2c_scan)
- do_i2c_scan(core->name,&core->i2c_client);
+ do_i2c_scan(core->name, &core->i2c_client);
} else
- printk("%s: i2c register FAILED\n", core->name);
+ pr_err("i2c register FAILED\n");
return core->i2c_rc;
}
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index cd7687183381..dcfea3502e42 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -16,19 +16,16 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "cx88.h"
+
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "cx88.h"
#include <media/rc-core.h>
#define MODULE_NAME "cx88xx"
@@ -57,7 +54,7 @@ struct cx88_IR {
u32 mask_keyup;
};
-static unsigned ir_samplerate = 4;
+static unsigned int ir_samplerate = 4;
module_param(ir_samplerate, uint, 0444);
MODULE_PARM_DESC(ir_samplerate, "IR samplerate in kHz, 1 - 20, default 4");
@@ -65,11 +62,15 @@ static int ir_debug;
module_param(ir_debug, int, 0644); /* debug level [IR] */
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
-#define ir_dprintk(fmt, arg...) if (ir_debug) \
- printk(KERN_DEBUG "%s IR: " fmt , ir->core->name , ##arg)
+#define ir_dprintk(fmt, arg...) do { \
+ if (ir_debug) \
+ printk(KERN_DEBUG "%s IR: " fmt, ir->core->name, ##arg);\
+} while (0)
-#define dprintk(fmt, arg...) if (ir_debug) \
- printk(KERN_DEBUG "cx88 IR: " fmt , ##arg)
+#define dprintk(fmt, arg...) do { \
+ if (ir_debug) \
+ printk(KERN_DEBUG "cx88 IR: " fmt, ##arg); \
+} while (0)
/* ---------------------------------------------------------------------- */
@@ -82,21 +83,22 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
gpio = cx_read(ir->gpio_addr);
switch (core->boardnr) {
case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
- /* This board apparently uses a combination of 2 GPIO
- to represent the keys. Additionally, the second GPIO
- can be used for parity.
-
- Example:
-
- for key "5"
- gpio = 0x758, auxgpio = 0xe5 or 0xf5
- for key "Power"
- gpio = 0x758, auxgpio = 0xed or 0xfd
+ /*
+ * This board apparently uses a combination of 2 GPIO
+ * to represent the keys. Additionally, the second GPIO
+ * can be used for parity.
+ *
+ * Example:
+ *
+ * for key "5"
+ * gpio = 0x758, auxgpio = 0xe5 or 0xf5
+ * for key "Power"
+ * gpio = 0x758, auxgpio = 0xed or 0xfd
*/
auxgpio = cx_read(MO_GP1_IO);
/* Take out the parity part */
- gpio=(gpio & 0x7fd) + (auxgpio & 0xef);
+ gpio = (gpio & 0x7fd) + (auxgpio & 0xef);
break;
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_DTV1800H:
@@ -145,7 +147,7 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
if (0 == (gpio & ir->mask_keyup))
rc_keydown_notimeout(ir->dev, RC_TYPE_NECX, scancode,
- 0);
+ 0);
else
rc_keyup(ir->dev);
@@ -234,12 +236,14 @@ int cx88_ir_start(struct cx88_core *core)
return 0;
}
+EXPORT_SYMBOL(cx88_ir_start);
void cx88_ir_stop(struct cx88_core *core)
{
if (core->ir->users)
__cx88_ir_stop(core);
}
+EXPORT_SYMBOL(cx88_ir_stop);
static int cx88_ir_open(struct rc_dev *rc)
{
@@ -511,7 +515,7 @@ int cx88_ir_fini(struct cx88_core *core)
struct cx88_IR *ir = core->ir;
/* skip detach on non attached boards */
- if (NULL == ir)
+ if (!ir)
return 0;
cx88_ir_stop(core);
@@ -529,7 +533,7 @@ void cx88_ir_irq(struct cx88_core *core)
{
struct cx88_IR *ir = core->ir;
u32 samples;
- unsigned todo, bits;
+ unsigned int todo, bits;
struct ir_raw_event ev;
if (!ir || !ir->sampling)
@@ -579,7 +583,7 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol,
}
dprintk("IR Key/Flags: (0x%02x/0x%02x)\n",
- code & 0xff, flags & 0xff);
+ code & 0xff, flags & 0xff);
*protocol = RC_TYPE_UNKNOWN;
*scancode = code & 0xff;
@@ -601,7 +605,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
const unsigned short *addr_list = default_addr_list;
const unsigned short *addrp;
/* Instantiate the IR receiver device, if present */
- if (0 != core->i2c_rc)
+ if (core->i2c_rc != 0)
return;
memset(&info, 0, sizeof(struct i2c_board_info));
@@ -639,8 +643,8 @@ void cx88_i2c_init_ir(struct cx88_core *core)
info.platform_data = &core->init_data;
}
if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
- I2C_SMBUS_READ, 0,
- I2C_SMBUS_QUICK, NULL) >= 0) {
+ I2C_SMBUS_READ, 0,
+ I2C_SMBUS_QUICK, NULL) >= 0) {
info.addr = *addrp;
i2c_new_device(&core->i2c_adap, &info);
break;
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 245357adbc25..52ff00ebd4bd 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -16,21 +16,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
-#include <asm/delay.h>
-
-#include "cx88.h"
+#include <linux/delay.h>
/* ------------------------------------------------------------------ */
@@ -42,23 +38,20 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages [mpeg]");
-#define dprintk(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg); \
-} while(0)
-
-#define mpeg_dbg(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg); \
-} while(0)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG pr_fmt("%s: mpeg:" fmt), \
+ __func__, ##arg); \
+} while (0)
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work)
{
- struct cx8802_dev *dev=container_of(work, struct cx8802_dev, request_module_wk);
+ struct cx8802_dev *dev = container_of(work, struct cx8802_dev,
+ request_module_wk);
if (dev->core->board.mpeg & CX88_MPEG_DVB)
request_module("cx88-dvb");
@@ -81,18 +74,17 @@ static void flush_request_modules(struct cx8802_dev *dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
-
static LIST_HEAD(cx8802_devlist);
static DEFINE_MUTEX(cx8802_mutex);
/* ------------------------------------------------------------------ */
int cx8802_start_dma(struct cx8802_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf)
+ struct cx88_dmaqueue *q,
+ struct cx88_buffer *buf)
{
struct cx88_core *core = dev->core;
- dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n",
+ dprintk(1, "w: %d, h: %d, f: %d\n",
core->width, core->height, core->field);
/* setup fifo + format */
@@ -102,33 +94,35 @@ int cx8802_start_dma(struct cx8802_dev *dev,
/* write TS length to chip */
cx_write(MO_TS_LNGTH, dev->ts_packet_size);
- /* FIXME: this needs a review.
- * also: move to cx88-blackbird + cx88-dvb source files? */
+ /*
+ * FIXME: this needs a review.
+ * also: move to cx88-blackbird + cx88-dvb source files?
+ */
- dprintk( 1, "core->active_type_id = 0x%08x\n", core->active_type_id);
+ dprintk(1, "core->active_type_id = 0x%08x\n", core->active_type_id);
- if ( (core->active_type_id == CX88_MPEG_DVB) &&
- (core->board.mpeg & CX88_MPEG_DVB) ) {
-
- dprintk( 1, "cx8802_start_dma doing .dvb\n");
+ if ((core->active_type_id == CX88_MPEG_DVB) &&
+ (core->board.mpeg & CX88_MPEG_DVB)) {
+ dprintk(1, "cx8802_start_dma doing .dvb\n");
/* negedge driven & software reset */
cx_write(TS_GEN_CNTRL, 0x0040 | dev->ts_gen_cntrl);
udelay(100);
cx_write(MO_PINMUX_IO, 0x00);
- cx_write(TS_HW_SOP_CNTRL, 0x47<<16|188<<4|0x01);
+ cx_write(TS_HW_SOP_CNTRL, 0x47 << 16 | 188 << 4 | 0x01);
switch (core->boardnr) {
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T:
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
case CX88_BOARD_PCHDTV_HD5500:
- cx_write(TS_SOP_STAT, 1<<13);
+ cx_write(TS_SOP_STAT, 1 << 13);
break;
case CX88_BOARD_SAMSUNG_SMT_7020:
cx_write(TS_SOP_STAT, 0x00);
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
- cx_write(MO_PINMUX_IO, 0x88); /* Enable MPEG parallel IO and video signal pins */
+ /* Enable MPEG parallel IO and video signal pins */
+ cx_write(MO_PINMUX_IO, 0x88);
udelay(100);
break;
case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -152,22 +146,24 @@ int cx8802_start_dma(struct cx8802_dev *dev,
}
cx_write(TS_GEN_CNTRL, dev->ts_gen_cntrl);
udelay(100);
- } else if ( (core->active_type_id == CX88_MPEG_BLACKBIRD) &&
- (core->board.mpeg & CX88_MPEG_BLACKBIRD) ) {
- dprintk( 1, "cx8802_start_dma doing .blackbird\n");
+ } else if ((core->active_type_id == CX88_MPEG_BLACKBIRD) &&
+ (core->board.mpeg & CX88_MPEG_BLACKBIRD)) {
+ dprintk(1, "cx8802_start_dma doing .blackbird\n");
cx_write(MO_PINMUX_IO, 0x88); /* enable MPEG parallel IO */
- cx_write(TS_GEN_CNTRL, 0x46); /* punctured clock TS & posedge driven & software reset */
+ /* punctured clock TS & posedge driven & software reset */
+ cx_write(TS_GEN_CNTRL, 0x46);
udelay(100);
cx_write(TS_HW_SOP_CNTRL, 0x408); /* mpeg start byte */
cx_write(TS_VALERR_CNTRL, 0x2000);
- cx_write(TS_GEN_CNTRL, 0x06); /* punctured clock TS & posedge driven */
+ /* punctured clock TS & posedge driven */
+ cx_write(TS_GEN_CNTRL, 0x06);
udelay(100);
} else {
- printk( "%s() Failed. Unsupported value in .mpeg (0x%08x)\n", __func__,
- core->board.mpeg );
+ pr_err("%s() Failed. Unsupported value in .mpeg (0x%08x)\n",
+ __func__, core->board.mpeg);
return -EINVAL;
}
@@ -176,20 +172,22 @@ int cx8802_start_dma(struct cx8802_dev *dev,
q->count = 0;
/* enable irqs */
- dprintk( 1, "setting the interrupt mask\n" );
+ dprintk(1, "setting the interrupt mask\n");
cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT);
cx_set(MO_TS_INTMSK, 0x1f0011);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5));
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
cx_set(MO_TS_DMACNTRL, 0x11);
return 0;
}
+EXPORT_SYMBOL(cx8802_start_dma);
static int cx8802_stop_dma(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
- dprintk( 1, "cx8802_stop_dma\n" );
+
+ dprintk(1, "\n");
/* stop dma */
cx_clear(MO_TS_DMACNTRL, 0x11);
@@ -208,12 +206,12 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
{
struct cx88_buffer *buf;
- dprintk( 1, "cx8802_restart_queue\n" );
+ dprintk(1, "\n");
if (list_empty(&q->active))
return 0;
buf = list_entry(q->active.next, struct cx88_buffer, list);
- dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.vb2_buf.index);
cx8802_start_dma(dev, q, buf);
return 0;
@@ -222,7 +220,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
/* ------------------------------------------------------------------ */
int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
- struct cx88_buffer *buf)
+ struct cx88_buffer *buf)
{
int size = dev->ts_packet_size * dev->ts_packet_count;
struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
@@ -234,43 +232,46 @@ int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
- dev->ts_packet_size, dev->ts_packet_count, 0);
+ dev->ts_packet_size, dev->ts_packet_count, 0);
if (rc) {
if (risc->cpu)
- pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ pci_free_consistent(dev->pci, risc->size,
+ risc->cpu, risc->dma);
memset(risc, 0, sizeof(*risc));
return rc;
}
return 0;
}
+EXPORT_SYMBOL(cx8802_buf_prepare);
void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
{
struct cx88_buffer *prev;
struct cx88_dmaqueue *cx88q = &dev->mpegq;
- dprintk( 1, "cx8802_buf_queue\n" );
+ dprintk(1, "\n");
/* add jump to start */
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
if (list_empty(&cx88q->active)) {
- dprintk( 1, "queue is empty - first active\n" );
+ dprintk(1, "queue is empty - first active\n");
list_add_tail(&buf->list, &cx88q->active);
- dprintk(1,"[%p/%d] %s - first active\n",
+ dprintk(1, "[%p/%d] %s - first active\n",
buf, buf->vb.vb2_buf.index, __func__);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
- dprintk( 1, "queue is not empty - append to active\n" );
+ dprintk(1, "queue is not empty - append to active\n");
prev = list_entry(cx88q->active.prev, struct cx88_buffer, list);
list_add_tail(&buf->list, &cx88q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- dprintk( 1, "[%p/%d] %s - append to active\n",
+ dprintk(1, "[%p/%d] %s - append to active\n",
buf, buf->vb.vb2_buf.index, __func__);
}
}
+EXPORT_SYMBOL(cx8802_buf_queue);
/* ----------------------------------------------------------- */
@@ -280,23 +281,24 @@ static void do_cancel_buffers(struct cx8802_dev *dev)
struct cx88_buffer *buf;
unsigned long flags;
- spin_lock_irqsave(&dev->slock,flags);
+ spin_lock_irqsave(&dev->slock, flags);
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
- spin_unlock_irqrestore(&dev->slock,flags);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
void cx8802_cancel_buffers(struct cx8802_dev *dev)
{
- dprintk( 1, "cx8802_cancel_buffers" );
+ dprintk(1, "\n");
cx8802_stop_dma(dev);
do_cancel_buffers(dev);
}
+EXPORT_SYMBOL(cx8802_cancel_buffers);
-static const char * cx88_mpeg_irqs[32] = {
+static const char *cx88_mpeg_irqs[32] = {
"ts_risci1", NULL, NULL, NULL,
"ts_risci2", NULL, NULL, NULL,
"ts_oflow", NULL, NULL, NULL,
@@ -310,7 +312,7 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
struct cx88_core *core = dev->core;
u32 status, mask, count;
- dprintk( 1, "cx8802_mpeg_irq\n" );
+ dprintk(1, "\n");
status = cx_read(MO_TS_INTSTAT);
mask = cx_read(MO_TS_INTMSK);
if (0 == (status & mask))
@@ -319,20 +321,21 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
cx_write(MO_TS_INTSTAT, status);
if (debug || (status & mask & ~0xff))
- cx88_print_irqbits(core->name, "irq mpeg ",
+ cx88_print_irqbits("irq mpeg ",
cx88_mpeg_irqs, ARRAY_SIZE(cx88_mpeg_irqs),
status, mask);
/* risc op code error */
if (status & (1 << 16)) {
- printk(KERN_WARNING "%s: mpeg risc op code error\n",core->name);
+ pr_warn("mpeg risc op code error\n");
cx_clear(MO_TS_DMACNTRL, 0x11);
- cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH28]);
+ cx88_sram_channel_dump(dev->core,
+ &cx88_sram_channels[SRAM_CH28]);
}
/* risc1 y */
if (status & 0x01) {
- dprintk( 1, "wake up\n" );
+ dprintk(1, "wake up\n");
spin_lock(&dev->slock);
count = cx_read(MO_TS_GPCNT);
cx88_wakeup(dev->core, &dev->mpegq, count);
@@ -341,7 +344,7 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
/* other general errors */
if (status & 0x1f0100) {
- dprintk( 0, "general errors: 0x%08x\n", status & 0x1f0100 );
+ dprintk(0, "general errors: 0x%08x\n", status & 0x1f0100);
spin_lock(&dev->slock);
cx8802_stop_dma(dev);
spin_unlock(&dev->slock);
@@ -360,24 +363,23 @@ static irqreturn_t cx8802_irq(int irq, void *dev_id)
for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_TSINT);
- if (0 == status)
+ if (status == 0)
goto out;
- dprintk( 1, "cx8802_irq\n" );
- dprintk( 1, " loop: %d/%d\n", loop, MAX_IRQ_LOOP );
- dprintk( 1, " status: %d\n", status );
+ dprintk(1, "cx8802_irq\n");
+ dprintk(1, " loop: %d/%d\n", loop, MAX_IRQ_LOOP);
+ dprintk(1, " status: %d\n", status);
handled = 1;
cx_write(MO_PCI_INTSTAT, status);
if (status & core->pci_irqmask)
- cx88_core_irq(core,status);
+ cx88_core_irq(core, status);
if (status & PCI_INT_TSINT)
cx8802_mpeg_irq(dev);
}
- if (MAX_IRQ_LOOP == loop) {
- dprintk( 0, "clearing mask\n" );
- printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
- core->name);
- cx_write(MO_PCI_INTMSK,0);
+ if (loop == MAX_IRQ_LOOP) {
+ dprintk(0, "clearing mask\n");
+ pr_warn("irq loop -- clearing mask\n");
+ cx_write(MO_PCI_INTMSK, 0);
}
out:
@@ -393,18 +395,18 @@ static int cx8802_init_common(struct cx8802_dev *dev)
if (pci_enable_device(dev->pci))
return -EIO;
pci_set_master(dev->pci);
- err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(dev->pci, DMA_BIT_MASK(32));
if (err) {
- printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
+ pr_err("Oops: no 32bit PCI DMA ???\n");
return -EIO;
}
dev->pci_rev = dev->pci->revision;
pci_read_config_byte(dev->pci, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/2: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->core->name,
- pci_name(dev->pci), dev->pci_rev, dev->pci->irq,
- dev->pci_lat,(unsigned long long)pci_resource_start(dev->pci,0));
+ pr_info("found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ pci_name(dev->pci), dev->pci_rev, dev->pci->irq,
+ dev->pci_lat,
+ (unsigned long long)pci_resource_start(dev->pci, 0));
/* initialize driver struct */
spin_lock_init(&dev->slock);
@@ -416,20 +418,19 @@ static int cx8802_init_common(struct cx8802_dev *dev)
err = request_irq(dev->pci->irq, cx8802_irq,
IRQF_SHARED, dev->core->name, dev);
if (err < 0) {
- printk(KERN_ERR "%s: can't get IRQ %d\n",
- dev->core->name, dev->pci->irq);
+ pr_err("can't get IRQ %d\n", dev->pci->irq);
return err;
}
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
/* everything worked */
- pci_set_drvdata(dev->pci,dev);
+ pci_set_drvdata(dev->pci, dev);
return 0;
}
static void cx8802_fini_common(struct cx8802_dev *dev)
{
- dprintk( 2, "cx8802_fini_common\n" );
+ dprintk(2, "\n");
cx8802_stop_dma(dev);
pci_disable_device(dev->pci);
@@ -442,14 +443,13 @@ static void cx8802_fini_common(struct cx8802_dev *dev)
static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
{
struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
- struct cx88_core *core = dev->core;
unsigned long flags;
/* stop mpeg dma */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->mpegq.active)) {
- dprintk( 2, "suspend\n" );
- printk("%s: suspend mpeg\n", core->name);
+ dprintk(2, "suspend\n");
+ pr_info("suspend mpeg\n");
cx8802_stop_dma(dev);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -458,7 +458,8 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
cx88_shutdown(dev->core);
pci_save_state(pci_dev);
- if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) {
+ if (pci_set_power_state(pci_dev,
+ pci_choose_state(pci_dev, state)) != 0) {
pci_disable_device(pci_dev);
dev->state.disabled = 1;
}
@@ -468,23 +469,20 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
static int cx8802_resume_common(struct pci_dev *pci_dev)
{
struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
- struct cx88_core *core = dev->core;
unsigned long flags;
int err;
if (dev->state.disabled) {
- err=pci_enable_device(pci_dev);
+ err = pci_enable_device(pci_dev);
if (err) {
- printk(KERN_ERR "%s: can't enable device\n",
- dev->core->name);
+ pr_err("can't enable device\n");
return err;
}
dev->state.disabled = 0;
}
- err=pci_set_power_state(pci_dev, PCI_D0);
+ err = pci_set_power_state(pci_dev, PCI_D0);
if (err) {
- printk(KERN_ERR "%s: can't enable device\n",
- dev->core->name);
+ pr_err("can't enable device\n");
pci_disable_device(pci_dev);
dev->state.disabled = 1;
@@ -498,15 +496,16 @@ static int cx8802_resume_common(struct pci_dev *pci_dev)
/* restart video+vbi capture */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->mpegq.active)) {
- printk("%s: resume mpeg\n", core->name);
- cx8802_restart_queue(dev,&dev->mpegq);
+ pr_info("resume mpeg\n");
+ cx8802_restart_queue(dev, &dev->mpegq);
}
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
-struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
+struct cx8802_driver *cx8802_get_driver(struct cx8802_dev *dev,
+ enum cx88_board_type btype)
{
struct cx8802_driver *d;
@@ -516,6 +515,7 @@ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board
return NULL;
}
+EXPORT_SYMBOL(cx8802_get_driver);
/* Driver asked for hardware access. */
static int cx8802_request_acquire(struct cx8802_driver *drv)
@@ -533,7 +533,8 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
core->last_analog_input = core->input;
core->input = 0;
for (i = 0;
- i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+ i < (sizeof(core->board.input) /
+ sizeof(struct cx88_input));
i++) {
if (core->board.input[i].type == CX88_VMUX_DVB) {
core->input = i;
@@ -542,15 +543,14 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
}
}
- if (drv->advise_acquire)
- {
+ if (drv->advise_acquire) {
core->active_ref++;
if (core->active_type_id == CX88_BOARD_NONE) {
core->active_type_id = drv->type_id;
drv->advise_acquire(drv);
}
- mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+ dprintk(1, "Post acquire GPIO=%x\n", cx_read(MO_GP0_IO));
}
return 0;
@@ -561,17 +561,18 @@ static int cx8802_request_release(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
- if (drv->advise_release && --core->active_ref == 0)
- {
+ if (drv->advise_release && --core->active_ref == 0) {
if (drv->type_id == CX88_MPEG_DVB) {
- /* If the DVB driver is releasing, reset the input
- state to the last configured analog input */
+ /*
+ * If the DVB driver is releasing, reset the input
+ * state to the last configured analog input
+ */
core->input = core->last_analog_input;
}
drv->advise_release(drv);
core->active_type_id = CX88_BOARD_NONE;
- mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+ dprintk(1, "Post release GPIO=%x\n", cx_read(MO_GP0_IO));
}
return 0;
@@ -579,21 +580,21 @@ static int cx8802_request_release(struct cx8802_driver *drv)
static int cx8802_check_driver(struct cx8802_driver *drv)
{
- if (drv == NULL)
+ if (!drv)
return -ENODEV;
if ((drv->type_id != CX88_MPEG_DVB) &&
- (drv->type_id != CX88_MPEG_BLACKBIRD))
+ (drv->type_id != CX88_MPEG_BLACKBIRD))
return -EINVAL;
if ((drv->hw_access != CX8802_DRVCTL_SHARED) &&
- (drv->hw_access != CX8802_DRVCTL_EXCLUSIVE))
+ (drv->hw_access != CX8802_DRVCTL_EXCLUSIVE))
return -EINVAL;
- if ((drv->probe == NULL) ||
- (drv->remove == NULL) ||
- (drv->advise_acquire == NULL) ||
- (drv->advise_release == NULL))
+ if ((!drv->probe) ||
+ (!drv->remove) ||
+ (!drv->advise_acquire) ||
+ (!drv->advise_release))
return -EINVAL;
return 0;
@@ -605,28 +606,28 @@ int cx8802_register_driver(struct cx8802_driver *drv)
struct cx8802_driver *driver;
int err, i = 0;
- printk(KERN_INFO
- "cx88/2: registering cx8802 driver, type: %s access: %s\n",
- drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
- drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
+ pr_info("registering cx8802 driver, type: %s access: %s\n",
+ drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
+ drv->hw_access == CX8802_DRVCTL_SHARED ?
+ "shared" : "exclusive");
- if ((err = cx8802_check_driver(drv)) != 0) {
- printk(KERN_ERR "cx88/2: cx8802_driver is invalid\n");
+ err = cx8802_check_driver(drv);
+ if (err) {
+ pr_err("cx8802_driver is invalid\n");
return err;
}
mutex_lock(&cx8802_mutex);
list_for_each_entry(dev, &cx8802_devlist, devlist) {
- printk(KERN_INFO
- "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
- dev->core->name, dev->pci->subsystem_vendor,
- dev->pci->subsystem_device, dev->core->board.name,
- dev->core->boardnr);
+ pr_info("subsystem: %04x:%04x, board: %s [card=%d]\n",
+ dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, dev->core->board.name,
+ dev->core->boardnr);
/* Bring up a new struct for each driver instance */
- driver = kzalloc(sizeof(*drv),GFP_KERNEL);
- if (driver == NULL) {
+ driver = kzalloc(sizeof(*drv), GFP_KERNEL);
+ if (!driver) {
err = -ENOMEM;
goto out;
}
@@ -645,9 +646,7 @@ int cx8802_register_driver(struct cx8802_driver *drv)
i++;
list_add_tail(&driver->drvlist, &dev->drvlist);
} else {
- printk(KERN_ERR
- "%s/2: cx8802 probe failed, err = %d\n",
- dev->core->name, err);
+ pr_err("cx8802 probe failed, err = %d\n", err);
}
mutex_unlock(&drv->core->lock);
}
@@ -657,6 +656,7 @@ out:
mutex_unlock(&cx8802_mutex);
return err;
}
+EXPORT_SYMBOL(cx8802_register_driver);
int cx8802_unregister_driver(struct cx8802_driver *drv)
{
@@ -664,19 +664,18 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
struct cx8802_driver *d, *dtmp;
int err = 0;
- printk(KERN_INFO
- "cx88/2: unregistering cx8802 driver, type: %s access: %s\n",
- drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
- drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
+ pr_info("unregistering cx8802 driver, type: %s access: %s\n",
+ drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
+ drv->hw_access == CX8802_DRVCTL_SHARED ?
+ "shared" : "exclusive");
mutex_lock(&cx8802_mutex);
list_for_each_entry(dev, &cx8802_devlist, devlist) {
- printk(KERN_INFO
- "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
- dev->core->name, dev->pci->subsystem_vendor,
- dev->pci->subsystem_device, dev->core->board.name,
- dev->core->boardnr);
+ pr_info("subsystem: %04x:%04x, board: %s [card=%d]\n",
+ dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, dev->core->board.name,
+ dev->core->boardnr);
mutex_lock(&dev->core->lock);
@@ -690,8 +689,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
list_del(&d->drvlist);
kfree(d);
} else
- printk(KERN_ERR "%s/2: cx8802 driver remove "
- "failed (%d)\n", dev->core->name, err);
+ pr_err("cx8802 driver remove failed (%d)\n",
+ err);
}
mutex_unlock(&dev->core->lock);
@@ -701,6 +700,7 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
return err;
}
+EXPORT_SYMBOL(cx8802_unregister_driver);
/* ----------------------------------------------------------- */
static int cx8802_probe(struct pci_dev *pci_dev,
@@ -712,18 +712,18 @@ static int cx8802_probe(struct pci_dev *pci_dev,
/* general setup */
core = cx88_core_get(pci_dev);
- if (NULL == core)
+ if (!core)
return -EINVAL;
- printk("%s/2: cx2388x 8802 Driver Manager\n", core->name);
+ pr_info("cx2388x 8802 Driver Manager\n");
err = -ENODEV;
if (!core->board.mpeg)
goto fail_core;
err = -ENOMEM;
- dev = kzalloc(sizeof(*dev),GFP_KERNEL);
- if (NULL == dev)
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
goto fail_core;
dev->pci = pci_dev;
dev->core = core;
@@ -737,7 +737,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
INIT_LIST_HEAD(&dev->drvlist);
mutex_lock(&cx8802_mutex);
- list_add_tail(&dev->devlist,&cx8802_devlist);
+ list_add_tail(&dev->devlist, &cx8802_devlist);
mutex_unlock(&cx8802_mutex);
/* now autoload cx88-dvb or cx88-blackbird */
@@ -748,7 +748,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
kfree(dev);
fail_core:
core->dvbdev = NULL;
- cx88_core_put(core,pci_dev);
+ cx88_core_put(core, pci_dev);
return err;
}
@@ -758,7 +758,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
dev = pci_get_drvdata(pci_dev);
- dprintk( 1, "%s\n", __func__);
+ dprintk(1, "%s\n", __func__);
flush_request_modules(dev);
@@ -768,17 +768,15 @@ static void cx8802_remove(struct pci_dev *pci_dev)
struct cx8802_driver *drv, *tmp;
int err;
- printk(KERN_WARNING "%s/2: Trying to remove cx8802 driver "
- "while cx8802 sub-drivers still loaded?!\n",
- dev->core->name);
+ pr_warn("Trying to remove cx8802 driver while cx8802 sub-drivers still loaded?!\n");
list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) {
err = drv->remove(drv);
if (err == 0) {
list_del(&drv->drvlist);
} else
- printk(KERN_ERR "%s/2: cx8802 driver remove "
- "failed (%d)\n", dev->core->name, err);
+ pr_err("cx8802 driver remove failed (%d)\n",
+ err);
kfree(drv);
}
}
@@ -790,7 +788,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
/* common */
cx8802_fini_common(dev);
- cx88_core_put(dev->core,dev->pci);
+ cx88_core_put(dev->core, dev->pci);
kfree(dev);
}
@@ -800,7 +798,7 @@ static const struct pci_device_id cx8802_pci_tbl[] = {
.device = 0x8802,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- },{
+ }, {
/* --- end of list --- */
}
};
@@ -814,12 +812,3 @@ static struct pci_driver cx8802_pci_driver = {
};
module_pci_driver(cx8802_pci_driver);
-
-EXPORT_SYMBOL(cx8802_buf_prepare);
-EXPORT_SYMBOL(cx8802_buf_queue);
-EXPORT_SYMBOL(cx8802_cancel_buffers);
-EXPORT_SYMBOL(cx8802_start_dma);
-
-EXPORT_SYMBOL(cx8802_register_driver);
-EXPORT_SYMBOL(cx8802_unregister_driver);
-EXPORT_SYMBOL(cx8802_get_driver);
diff --git a/drivers/media/pci/cx88/cx88-reg.h b/drivers/media/pci/cx88/cx88-reg.h
index 2ec52d1cdea0..f1e1dd634a72 100644
--- a/drivers/media/pci/cx88/cx88-reg.h
+++ b/drivers/media/pci/cx88/cx88-reg.h
@@ -1,32 +1,28 @@
/*
-
- cx88x-hw.h - CX2388x register offsets
-
- Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
- 2001 Michael Eskin
- 2002 Yurij Sysoev <yurij@naturesoft.net>
- 2003 Gerd Knorr <kraxel@bytesex.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * cx88x-hw.h - CX2388x register offsets
+ *
+ * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
+ * 2001 Michael Eskin
+ * 2002 Yurij Sysoev <yurij@naturesoft.net>
+ * 2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _CX88_REG_H_
#define _CX88_REG_H_
-/* ---------------------------------------------------------------------- */
-/* PCI IDs and config space */
+/*
+ * PCI IDs and config space
+ */
#ifndef PCI_VENDOR_ID_CONEXANT
# define PCI_VENDOR_ID_CONEXANT 0x14F1
@@ -39,8 +35,9 @@
#define CX88X_EN_TBFX 0x02
#define CX88X_EN_VSFX 0x04
-/* ---------------------------------------------------------------------- */
-/* PCI controller registers */
+/*
+ * PCI controller registers
+ */
/* Command and Status Register */
#define F0_CMD_STAT_MM 0x2f0004
@@ -63,8 +60,9 @@
#define F3_BAR0_MM 0x2f0310
#define F4_BAR0_MM 0x2f0410
-/* ---------------------------------------------------------------------- */
-/* DMA Controller registers */
+/*
+ * DMA Controller registers
+ */
#define MO_PDMA_STHRSH 0x200000 // Source threshold
#define MO_PDMA_STADRS 0x200004 // Source target address
@@ -157,9 +155,9 @@
#define MO_DMA31_CNT2 0x300168 // {11}RW* DMA Table Size : Ch#31
#define MO_DMA32_CNT2 0x30016C // {11}RW* DMA Table Size : Ch#32
-
-/* ---------------------------------------------------------------------- */
-/* Video registers */
+/*
+ * Video registers
+ */
#define MO_VIDY_DMA 0x310000 // {64}RWp Video Y
#define MO_VIDU_DMA 0x310008 // {64}RWp Video U
@@ -217,9 +215,9 @@
#define MO_VID_DMACNTRL 0x31C040 // {8}RW Video DMA control
#define MO_VID_XFR_STAT 0x31C044 // {1}RO Video transfer status
-
-/* ---------------------------------------------------------------------- */
-/* audio registers */
+/*
+ * audio registers
+ */
#define MO_AUDD_DMA 0x320000 // {64}RWp Audio downstream
#define MO_AUDU_DMA 0x320008 // {64}RWp Audio upstream
@@ -437,9 +435,9 @@
#define AUD_PHACC_FREQ_8LSB 0x320d2b
#define AUD_QAM_MODE 0x320d04
-
-/* ---------------------------------------------------------------------- */
-/* transport stream registers */
+/*
+ * transport stream registers
+ */
#define MO_TS_DMA 0x330000 // {64}RWp Transport stream downstream
#define MO_TS_GPCNT 0x33C020 // {16}RO TS general purpose counter
@@ -455,9 +453,9 @@
#define TS_FIFO_OVFL_STAT 0x33C05C
#define TS_VALERR_CNTRL 0x33C060
-
-/* ---------------------------------------------------------------------- */
-/* VIP registers */
+/*
+ * VIP registers
+ */
#define MO_VIPD_DMA 0x340000 // {64}RWp VIP downstream
#define MO_VIPU_DMA 0x340008 // {64}RWp VIP upstream
@@ -475,9 +473,9 @@
#define MO_VIP_INTCNTRL 0x34C05C // VIP Interrupt Control
#define MO_VIP_XFTERM 0x340060 // VIP transfer terminate
-
-/* ---------------------------------------------------------------------- */
-/* misc registers */
+/*
+ * misc registers
+ */
#define MO_M2M_DMA 0x350000 // {64}RWp Mem2Mem DMA Bfr
#define MO_GP0_IO 0x350010 // {32}RW* GPIOoutput enablesdata I/O
@@ -509,9 +507,9 @@
#define MO_INT1_STAT 0x35C064 // DMA RISC interrupt status
#define MO_INT1_MSTAT 0x35C068 // DMA RISC interrupt masked status
-
-/* ---------------------------------------------------------------------- */
-/* i2c bus registers */
+/*
+ * i2c bus registers
+ */
#define MO_I2C 0x368000 // I2C data/control
#define MO_I2C_DIV (0xf<<4)
@@ -521,9 +519,11 @@
#define MO_I2C_SDA (1<<0)
-/* ---------------------------------------------------------------------- */
-/* general purpose host registers */
-/* FIXME: tyops? s/0x35/0x38/ ?? */
+/*
+ * general purpose host registers
+ *
+ * FIXME: tyops? s/0x35/0x38/ ??
+ */
#define MO_GPHSTD_DMA 0x350000 // {64}RWp Host downstream
#define MO_GPHSTU_DMA 0x350008 // {64}RWp Host upstream
@@ -545,9 +545,9 @@
#define MO_GPHST_XFR_STAT 0x38C044 // Host transfer status
#define MO_GPHST_SOFT_RST 0x38C06C // Host software reset
-
-/* ---------------------------------------------------------------------- */
-/* RISC instructions */
+/*
+ * RISC instructions
+ */
#define RISC_SYNC 0x80000000
#define RISC_SYNC_ODD 0x80000000
@@ -576,11 +576,11 @@
#define RISC_CNT_INC 0x00010000
#define RISC_CNT_RSVR 0x00020000
#define RISC_CNT_RESET 0x00030000
-#define RISC_JMP_SRP 0x01
+#define RISC_JMP_SRP 0x01
-
-/* ---------------------------------------------------------------------- */
-/* various constants */
+/*
+ * various constants
+ */
// DMA
/* Interrupt mask/status */
@@ -822,15 +822,4 @@
#define DEFAULT_SAT_U_NTSC 0x7F
#define DEFAULT_SAT_V_NTSC 0x5A
-typedef enum
-{
- SOURCE_TUNER = 0,
- SOURCE_COMPOSITE,
- SOURCE_SVIDEO,
- SOURCE_OTHER1,
- SOURCE_OTHER2,
- SOURCE_COMPVIASVIDEO,
- SOURCE_CCIR656
-} VIDEOSOURCETYPE;
-
#endif /* _CX88_REG_H_ */
diff --git a/drivers/media/pci/cx88/cx88-tvaudio.c b/drivers/media/pci/cx88/cx88-tvaudio.c
index 6bbce6ad6295..545ad4c4d1c7 100644
--- a/drivers/media/pci/cx88/cx88-tvaudio.c
+++ b/drivers/media/pci/cx88/cx88-tvaudio.c
@@ -1,39 +1,36 @@
/*
+ * cx88x-audio.c - Conexant CX23880/23881 audio downstream driver driver
+ *
+ * (c) 2001 Michael Eskin, Tom Zakrajsek [Windows version]
+ * (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
+ * (c) 2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * -----------------------------------------------------------------------
+ *
+ * Lot of voodoo here. Even the data sheet doesn't help to
+ * understand what is going on here, the documentation for the audio
+ * part of the cx2388x chip is *very* bad.
+ *
+ * Some of this comes from party done linux driver sources I got from
+ * [undocumented].
+ *
+ * Some comes from the dscaler sources, one of the dscaler driver guy works
+ * for Conexant ...
+ *
+ * -----------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- cx88x-audio.c - Conexant CX23880/23881 audio downstream driver driver
-
- (c) 2001 Michael Eskin, Tom Zakrajsek [Windows version]
- (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
- (c) 2003 Gerd Knorr <kraxel@bytesex.org>
-
- -----------------------------------------------------------------------
-
- Lot of voodoo here. Even the data sheet doesn't help to
- understand what is going on here, the documentation for the audio
- part of the cx2388x chip is *very* bad.
-
- Some of this comes from party done linux driver sources I got from
- [undocumented].
-
- Some comes from the dscaler sources, one of the dscaler driver guy works
- for Conexant ...
-
- -----------------------------------------------------------------------
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+#include "cx88.h"
#include <linux/module.h>
#include <linux/errno.h>
@@ -50,24 +47,24 @@
#include <linux/delay.h>
#include <linux/kthread.h>
-#include "cx88.h"
-
static unsigned int audio_debug;
module_param(audio_debug, int, 0644);
MODULE_PARM_DESC(audio_debug, "enable debug messages [audio]");
static unsigned int always_analog;
-module_param(always_analog,int,0644);
-MODULE_PARM_DESC(always_analog,"force analog audio out");
+module_param(always_analog, int, 0644);
+MODULE_PARM_DESC(always_analog, "force analog audio out");
static unsigned int radio_deemphasis;
-module_param(radio_deemphasis,int,0644);
-MODULE_PARM_DESC(radio_deemphasis, "Radio deemphasis time constant, "
- "0=None, 1=50us (elsewhere), 2=75us (USA)");
-
-#define dprintk(fmt, arg...) if (audio_debug) \
- printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
-
+module_param(radio_deemphasis, int, 0644);
+MODULE_PARM_DESC(radio_deemphasis,
+ "Radio deemphasis time constant, 0=None, 1=50us (elsewhere), 2=75us (USA)");
+
+#define dprintk(fmt, arg...) do { \
+ if (audio_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: tvaudio:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ----------------------------------------------------------- */
static const char * const aud_ctl_names[64] = {
@@ -145,7 +142,10 @@ static void set_audio_finish(struct cx88_core *core, u32 ctl)
if (core->board.mpeg & CX88_MPEG_BLACKBIRD) {
cx_write(AUD_I2SINPUTCNTL, 4);
cx_write(AUD_BAUDRATE, 1);
- /* 'pass-thru mode': this enables the i2s output to the mpeg encoder */
+ /*
+ * 'pass-thru mode': this enables the i2s
+ * output to the mpeg encoder
+ */
cx_set(AUD_CTL, EN_I2SOUT_ENABLE);
cx_write(AUD_I2SOUTPUTCNTL, 1);
cx_write(AUD_I2SCNTL, 0);
@@ -349,7 +349,7 @@ static void set_audio_standard_NICAM(struct cx88_core *core, u32 mode)
{ /* end of list */ },
};
- set_audio_start(core,SEL_NICAM);
+ set_audio_start(core, SEL_NICAM);
switch (core->tvaudio) {
case WW_L:
dprintk("%s SECAM-L NICAM (status: devel)\n", __func__);
@@ -638,7 +638,6 @@ static void set_audio_standard_A2(struct cx88_core *core, u32 mode)
case WW_M:
dprintk("%s Warning: wrong value\n", __func__);
return;
- break;
}
mode |= EN_FMRADIO_EN_RDS | EN_DMTRX_SUMDIFF;
@@ -695,13 +694,15 @@ static void set_audio_standard_FM(struct cx88_core *core,
{ /* end of list */ },
};
- /* It is enough to leave default values? */
- /* No, it's not! The deemphasis registers are reset to the 75us
+ /*
+ * It is enough to leave default values?
+ *
+ * No, it's not! The deemphasis registers are reset to the 75us
* values by default. Analyzing the spectrum of the decoded audio
* reveals that "no deemphasis" is the same as 75 us, while the 50 us
- * setting results in less deemphasis. */
+ * setting results in less deemphasis.
+ */
static const struct rlist fm_no_deemph[] = {
-
{AUD_POLYPH80SCALEFAC, 0x0003},
{ /* end of list */ },
};
@@ -745,7 +746,7 @@ static int cx88_detect_nicam(struct cx88_core *core)
}
/* wait a little bit for next reading status */
- msleep(10);
+ usleep_range(10000, 20000);
}
dprintk("nicam is not detected.\n");
@@ -766,10 +767,12 @@ void cx88_set_tvaudio(struct cx88_core *core)
/* prepare all dsp registers */
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
- /* set nicam mode - otherwise
- AUD_NICAM_STATUS2 contains wrong values */
+ /*
+ * set nicam mode - otherwise
+ * AUD_NICAM_STATUS2 contains wrong values
+ */
set_audio_standard_NICAM(core, EN_NICAM_AUTO_STEREO);
- if (0 == cx88_detect_nicam(core)) {
+ if (cx88_detect_nicam(core) == 0) {
/* fall back to fm / am mono */
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
core->audiomode_current = V4L2_TUNER_MODE_MONO;
@@ -798,23 +801,25 @@ void cx88_set_tvaudio(struct cx88_core *core)
break;
case WW_NONE:
case WW_I2SPT:
- printk("%s/0: unknown tv audio mode [%d]\n",
- core->name, core->tvaudio);
+ pr_info("unknown tv audio mode [%d]\n", core->tvaudio);
break;
}
- return;
}
+EXPORT_SYMBOL(cx88_set_tvaudio);
void cx88_newstation(struct cx88_core *core)
{
core->audiomode_manual = UNSET;
core->last_change = jiffies;
}
+EXPORT_SYMBOL(cx88_newstation);
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
{
- static const char * const m[] = { "stereo", "dual mono", "mono", "sap" };
- static const char * const p[] = { "no pilot", "pilot c1", "pilot c2", "?" };
+ static const char * const m[] = { "stereo", "dual mono",
+ "mono", "sap" };
+ static const char * const p[] = { "no pilot", "pilot c1",
+ "pilot c2", "?" };
u32 reg, mode, pilot;
reg = cx_read(AUD_STATUS);
@@ -869,15 +874,18 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
}
/* If software stereo detection is not supported... */
- if (UNSET == t->rxsubchans) {
+ if (t->rxsubchans == UNSET) {
t->rxsubchans = V4L2_TUNER_SUB_MONO;
- /* If the hardware itself detected stereo, also return
- stereo as an available subchannel */
- if (V4L2_TUNER_MODE_STEREO == t->audmode)
+ /*
+ * If the hardware itself detected stereo, also return
+ * stereo as an available subchannel
+ */
+ if (t->audmode == V4L2_TUNER_MODE_STEREO)
t->rxsubchans |= V4L2_TUNER_SUB_STEREO;
}
- return;
}
+EXPORT_SYMBOL(cx88_get_stereo);
+
void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
{
@@ -887,7 +895,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
if (manual) {
core->audiomode_manual = mode;
} else {
- if (UNSET != core->audiomode_manual)
+ if (core->audiomode_manual != UNSET)
return;
}
core->audiomode_current = mode;
@@ -915,7 +923,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
case WW_M:
case WW_I:
case WW_L:
- if (1 == core->use_nicam) {
+ if (core->use_nicam == 1) {
switch (mode) {
case V4L2_TUNER_MODE_MONO:
case V4L2_TUNER_MODE_LANG1:
@@ -933,7 +941,8 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
break;
}
} else {
- if ((core->tvaudio == WW_I) || (core->tvaudio == WW_L)) {
+ if ((core->tvaudio == WW_I) ||
+ (core->tvaudio == WW_L)) {
/* fall back to fm / am mono */
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
} else {
@@ -975,15 +984,14 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
break;
}
- if (UNSET != ctl) {
- dprintk("cx88_set_stereo: mask 0x%x, ctl 0x%x "
- "[status=0x%x,ctl=0x%x,vol=0x%x]\n",
+ if (ctl != UNSET) {
+ dprintk("cx88_set_stereo: mask 0x%x, ctl 0x%x [status=0x%x,ctl=0x%x,vol=0x%x]\n",
mask, ctl, cx_read(AUD_STATUS),
cx_read(AUD_CTL), cx_sread(SHADOW_AUD_VOL_CTL));
cx_andor(AUD_CTL, mask, ctl);
}
- return;
}
+EXPORT_SYMBOL(cx88_set_stereo);
int cx88_audio_thread(void *data)
{
@@ -1012,7 +1020,7 @@ int cx88_audio_thread(void *data)
memset(&t, 0, sizeof(t));
cx88_get_stereo(core, &t);
- if (UNSET != core->audiomode_manual)
+ if (core->audiomode_manual != UNSET)
/* manually set, don't do anything. */
continue;
@@ -1033,8 +1041,10 @@ int cx88_audio_thread(void *data)
case WW_FM:
case WW_I2SADC:
hw_autodetect:
- /* stereo autodetection is supported by hardware so
- we don't need to do it manually. Do nothing. */
+ /*
+ * stereo autodetection is supported by hardware so
+ * we don't need to do it manually. Do nothing.
+ */
break;
}
}
@@ -1042,11 +1052,4 @@ hw_autodetect:
dprintk("cx88: tvaudio thread exiting\n");
return 0;
}
-
-/* ----------------------------------------------------------- */
-
-EXPORT_SYMBOL(cx88_set_tvaudio);
-EXPORT_SYMBOL(cx88_newstation);
-EXPORT_SYMBOL(cx88_set_stereo);
-EXPORT_SYMBOL(cx88_get_stereo);
EXPORT_SYMBOL(cx88_audio_thread);
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index d3237cf8ffa3..2d0ef19e6d65 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -1,22 +1,26 @@
/*
*/
+
+#include "cx88.h"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include "cx88.h"
-
static unsigned int vbi_debug;
-module_param(vbi_debug,int,0644);
-MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
+module_param(vbi_debug, int, 0644);
+MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
-#define dprintk(level,fmt, arg...) if (vbi_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, dev->core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (vbi_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
-int cx8800_vbi_fmt (struct file *file, void *priv,
- struct v4l2_format *f)
+int cx8800_vbi_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
@@ -44,8 +48,8 @@ int cx8800_vbi_fmt (struct file *file, void *priv,
}
static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf)
+ struct cx88_dmaqueue *q,
+ struct cx88_buffer *buf)
{
struct cx88_core *core = dev->core;
@@ -53,9 +57,9 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
VBI_LINE_LENGTH, buf->risc.dma);
- cx_write(MO_VBOS_CONTROL, ( (1 << 18) | // comb filter delay fixup
- (1 << 15) | // enable vbi capture
- (1 << 11) ));
+ cx_write(MO_VBOS_CONTROL, (1 << 18) | /* comb filter delay fixup */
+ (1 << 15) | /* enable vbi capture */
+ (1 << 11));
/* reset counter */
cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
@@ -66,10 +70,10 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
cx_set(MO_VID_INTMSK, 0x0f0088);
/* enable capture */
- cx_set(VID_CAPTURE_CONTROL,0x18);
+ cx_set(VID_CAPTURE_CONTROL, 0x18);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5));
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
cx_set(MO_VID_DMACNTRL, 0x88);
return 0;
@@ -83,7 +87,7 @@ void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
cx_clear(MO_VID_DMACNTRL, 0x88);
/* disable capture */
- cx_clear(VID_CAPTURE_CONTROL,0x18);
+ cx_clear(VID_CAPTURE_CONTROL, 0x18);
/* disable irqs */
cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
@@ -99,7 +103,7 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
return 0;
buf = list_entry(q->active.next, struct cx88_buffer, list);
- dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.vb2_buf.index);
cx8800_start_vbi_dma(dev, q, buf);
return 0;
@@ -108,8 +112,8 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8800_dev *dev = q->drv_priv;
@@ -121,7 +125,6 @@ static int queue_setup(struct vb2_queue *q,
return 0;
}
-
static int buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -175,7 +178,7 @@ static void buffer_queue(struct vb2_buffer *vb)
if (list_empty(&q->active)) {
list_add_tail(&buf->list, &q->active);
cx8800_start_vbi_dma(dev, q, buf);
- dprintk(2,"[%p/%d] vbi_queue - first active\n",
+ dprintk(2, "[%p/%d] vbi_queue - first active\n",
buf, buf->vb.vb2_buf.index);
} else {
@@ -183,7 +186,7 @@ static void buffer_queue(struct vb2_buffer *vb)
prev = list_entry(q->active.prev, struct cx88_buffer, list);
list_add_tail(&buf->list, &q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- dprintk(2,"[%p/%d] buffer_queue - append to active\n",
+ dprintk(2, "[%p/%d] buffer_queue - append to active\n",
buf, buf->vb.vb2_buf.index);
}
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index d83eb3b10f54..c7d4e87ccb64 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -19,12 +19,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -37,7 +35,6 @@
#include <linux/kthread.h>
#include <asm/div64.h>
-#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -58,20 +55,23 @@ module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
module_param_array(radio_nr, int, NULL, 0444);
-MODULE_PARM_DESC(video_nr,"video device numbers");
-MODULE_PARM_DESC(vbi_nr,"vbi device numbers");
-MODULE_PARM_DESC(radio_nr,"radio device numbers");
+MODULE_PARM_DESC(video_nr, "video device numbers");
+MODULE_PARM_DESC(vbi_nr, "vbi device numbers");
+MODULE_PARM_DESC(radio_nr, "radio device numbers");
static unsigned int video_debug;
-module_param(video_debug,int,0644);
-MODULE_PARM_DESC(video_debug,"enable debug messages [video]");
+module_param(video_debug, int, 0644);
+MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
static unsigned int irq_debug;
-module_param(irq_debug,int,0644);
-MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
+module_param(irq_debug, int, 0644);
+MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
-#define dprintk(level,fmt, arg...) if (video_debug >= level) \
- printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (video_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: video:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------- */
/* static data */
@@ -83,55 +83,56 @@ static const struct cx8800_fmt formats[] = {
.cxformat = ColorFormatY8,
.depth = 8,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.cxformat = ColorFormatRGB15,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.cxformat = ColorFormatRGB15 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.cxformat = ColorFormatRGB16,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.cxformat = ColorFormatRGB16 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.cxformat = ColorFormatRGB24,
.depth = 24,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.cxformat = ColorFormatRGB32,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
- .cxformat = ColorFormatRGB32 | ColorFormatBSWAP | ColorFormatWSWAP,
+ .cxformat = ColorFormatRGB32 | ColorFormatBSWAP |
+ ColorFormatWSWAP,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.cxformat = ColorFormatYUY2,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.cxformat = ColorFormatYUY2 | ColorFormatBSWAP,
@@ -140,13 +141,13 @@ static const struct cx8800_fmt formats[] = {
},
};
-static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
+static const struct cx8800_fmt *format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fourcc)
- return formats+i;
+ return formats + i;
return NULL;
}
@@ -180,7 +181,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.reg = MO_CONTR_BRIGHT,
.mask = 0x00ff,
.shift = 0,
- },{
+ }, {
.id = V4L2_CID_CONTRAST,
.minimum = 0,
.maximum = 0xff,
@@ -190,7 +191,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.reg = MO_CONTR_BRIGHT,
.mask = 0xff00,
.shift = 8,
- },{
+ }, {
.id = V4L2_CID_HUE,
.minimum = 0,
.maximum = 0xff,
@@ -200,7 +201,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.reg = MO_HUE,
.mask = 0x00ff,
.shift = 0,
- },{
+ }, {
/* strictly, this only describes only U saturation.
* V saturation is handled specially through code.
*/
@@ -220,8 +221,10 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.step = 1,
.default_value = 0x0,
.off = 0,
- /* NOTE: the value is converted and written to both even
- and odd registers in the code */
+ /*
+ * NOTE: the value is converted and written to both even
+ * and odd registers in the code
+ */
.reg = MO_FILTER_ODD,
.mask = 7 << 7,
.shift = 7,
@@ -265,7 +268,7 @@ static const struct cx88_ctrl cx8800_aud_ctls[] = {
.sreg = SHADOW_AUD_VOL_CTL,
.mask = (1 << 6),
.shift = 6,
- },{
+ }, {
.id = V4L2_CID_AUDIO_VOLUME,
.minimum = 0,
.maximum = 0x3f,
@@ -275,7 +278,7 @@ static const struct cx88_ctrl cx8800_aud_ctls[] = {
.sreg = SHADOW_AUD_VOL_CTL,
.mask = 0x3f,
.shift = 0,
- },{
+ }, {
.id = V4L2_CID_AUDIO_BALANCE,
.minimum = 0,
.maximum = 0x7f,
@@ -299,10 +302,10 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
{
/* struct cx88_core *core = dev->core; */
- dprintk(1,"video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n",
+ dprintk(1, "video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n",
input, INPUT(input).vmux,
- INPUT(input).gpio0,INPUT(input).gpio1,
- INPUT(input).gpio2,INPUT(input).gpio3);
+ INPUT(input).gpio0, INPUT(input).gpio1,
+ INPUT(input).gpio2, INPUT(input).gpio3);
core->input = input;
cx_andor(MO_INPUT_FORMAT, 0x03 << 14, INPUT(input).vmux << 14);
cx_write(MO_GP3_IO, INPUT(input).gpio3);
@@ -325,19 +328,25 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
break;
}
- /* if there are audioroutes defined, we have an external
- ADC to deal with audio */
+ /*
+ * if there are audioroutes defined, we have an external
+ * ADC to deal with audio
+ */
if (INPUT(input).audioroute) {
- /* The wm8775 module has the "2" route hardwired into
- the initialization. Some boards may use different
- routes for different inputs. HVR-1300 surely does */
+ /*
+ * The wm8775 module has the "2" route hardwired into
+ * the initialization. Some boards may use different
+ * routes for different inputs. HVR-1300 surely does
+ */
if (core->sd_wm8775) {
call_all(core, audio, s_routing,
INPUT(input).audioroute, 0, 0);
}
- /* cx2388's C-ADC is connected to the tuner only.
- When used with S-Video, that ADC is busy dealing with
- chroma, so an external must be used for baseband audio */
+ /*
+ * cx2388's C-ADC is connected to the tuner only.
+ * When used with S-Video, that ADC is busy dealing with
+ * chroma, so an external must be used for baseband audio
+ */
if (INPUT(input).type != CX88_VMUX_TELEVISION &&
INPUT(input).type != CX88_VMUX_CABLE) {
/* "I2S ADC mode" */
@@ -369,26 +378,27 @@ static int start_video_dma(struct cx8800_dev *dev,
cx_write(MO_COLOR_CTRL, dev->fmt->cxformat | ColorFormatGamma);
/* reset counter */
- cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET);
+ cx_write(MO_VIDY_GPCNTRL, GP_COUNT_CONTROL_RESET);
q->count = 0;
/* enable irqs */
cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
- /* Enables corresponding bits at PCI_INT_STAT:
- bits 0 to 4: video, audio, transport stream, VIP, Host
- bit 7: timer
- bits 8 and 9: DMA complete for: SRC, DST
- bits 10 and 11: BERR signal asserted for RISC: RD, WR
- bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB
+ /*
+ * Enables corresponding bits at PCI_INT_STAT:
+ * bits 0 to 4: video, audio, transport stream, VIP, Host
+ * bit 7: timer
+ * bits 8 and 9: DMA complete for: SRC, DST
+ * bits 10 and 11: BERR signal asserted for RISC: RD, WR
+ * bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB
*/
cx_set(MO_VID_INTMSK, 0x0f0011);
/* enable capture */
- cx_set(VID_CAPTURE_CONTROL,0x06);
+ cx_set(VID_CAPTURE_CONTROL, 0x06);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5));
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
cx_set(MO_VID_DMACNTRL, 0x11); /* Planar Y and packed FIFO and RISC enable */
return 0;
@@ -403,7 +413,7 @@ static int stop_video_dma(struct cx8800_dev *dev)
cx_clear(MO_VID_DMACNTRL, 0x11);
/* disable capture */
- cx_clear(VID_CAPTURE_CONTROL,0x06);
+ cx_clear(VID_CAPTURE_CONTROL, 0x06);
/* disable irqs */
cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
@@ -414,12 +424,11 @@ static int stop_video_dma(struct cx8800_dev *dev)
static int restart_video_queue(struct cx8800_dev *dev,
struct cx88_dmaqueue *q)
{
- struct cx88_core *core = dev->core;
struct cx88_buffer *buf;
if (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, list);
- dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.vb2_buf.index);
start_video_dma(dev, q, buf);
}
@@ -430,8 +439,8 @@ static int restart_video_queue(struct cx8800_dev *dev,
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8800_dev *dev = q->drv_priv;
struct cx88_core *core = dev->core;
@@ -488,7 +497,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
core->height >> 1);
break;
}
- dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
+ dprintk(2,
+ "[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
buf, buf->vb.vb2_buf.index,
core->width, core->height, dev->fmt->depth, dev->fmt->name,
(unsigned long)buf->risc.dma);
@@ -513,7 +523,6 @@ static void buffer_queue(struct vb2_buffer *vb)
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_buffer *prev;
- struct cx88_core *core = dev->core;
struct cx88_dmaqueue *q = &dev->vidq;
/* add jump to start */
@@ -523,7 +532,7 @@ static void buffer_queue(struct vb2_buffer *vb)
if (list_empty(&q->active)) {
list_add_tail(&buf->list, &q->active);
- dprintk(2,"[%p/%d] buffer_queue - first active\n",
+ dprintk(2, "[%p/%d] buffer_queue - first active\n",
buf, buf->vb.vb2_buf.index);
} else {
@@ -596,7 +605,7 @@ static int radio_open(struct file *file)
if (core->board.radio.audioroute) {
if (core->sd_wm8775) {
call_all(core, audio, s_routing,
- core->board.radio.audioroute, 0, 0);
+ core->board.radio.audioroute, 0, 0);
}
/* "I2S ADC mode" */
core->tvaudio = WW_I2SADC;
@@ -650,9 +659,10 @@ static int cx8800_s_vid_ctrl(struct v4l2_ctrl *ctrl)
value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
break;
}
- dprintk(1, "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
- ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
- mask, cc->sreg ? " [shadowed]" : "");
+ dprintk(1,
+ "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+ ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+ mask, cc->sreg ? " [shadowed]" : "");
if (cc->sreg)
cx_sandor(cc->sreg, cc->reg, mask, value);
else
@@ -665,7 +675,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
struct cx88_core *core =
container_of(ctrl->handler, struct cx88_core, audio_hdl);
const struct cx88_ctrl *cc = ctrl->priv;
- u32 value,mask;
+ u32 value, mask;
/* Pass changes onto any WM8775 */
if (core->sd_wm8775) {
@@ -688,7 +698,8 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
mask = cc->mask;
switch (ctrl->id) {
case V4L2_CID_AUDIO_BALANCE:
- value = (ctrl->val < 0x40) ? (0x7f - ctrl->val) : (ctrl->val - 0x40);
+ value = (ctrl->val < 0x40) ?
+ (0x7f - ctrl->val) : (ctrl->val - 0x40);
break;
case V4L2_CID_AUDIO_VOLUME:
value = 0x3f - (ctrl->val & 0x3f);
@@ -697,9 +708,10 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
break;
}
- dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
- ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
- mask, cc->sreg ? " [shadowed]" : "");
+ dprintk(1,
+ "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+ ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+ mask, cc->sreg ? " [shadowed]" : "");
if (cc->sreg)
cx_sandor(cc->sreg, cc->reg, mask, value);
else
@@ -711,7 +723,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
/* VIDEO IOCTLS */
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -729,7 +741,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -738,7 +750,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
unsigned int maxw, maxh;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (NULL == fmt)
+ if (!fmt)
return -EINVAL;
maxw = norm_maxw(core->tvnorm);
@@ -775,13 +787,13 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- int err = vidioc_try_fmt_vid_cap (file,priv,f);
+ int err = vidioc_try_fmt_vid_cap(file, priv, f);
- if (0 != err)
+ if (err != 0)
return err;
if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq))
return -EBUSY;
@@ -795,13 +807,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
void cx88_querycap(struct file *file, struct cx88_core *core,
- struct v4l2_capability *cap)
+ struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
strlcpy(cap->card, core->board.name, sizeof(cap->card));
cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- if (UNSET != core->board.tuner_type)
+ if (core->board.tuner_type != UNSET)
cap->device_caps |= V4L2_CAP_TUNER;
switch (vdev->vfl_type) {
case VFL_TYPE_RADIO:
@@ -822,7 +834,7 @@ void cx88_querycap(struct file *file, struct cx88_core *core,
EXPORT_SYMBOL(cx88_querycap);
static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+ struct v4l2_capability *cap)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -833,13 +845,13 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
- strlcpy(f->description,formats[f->index].name,sizeof(f->description));
+ strlcpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].fourcc;
return 0;
@@ -863,45 +875,46 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms)
}
/* only one input in this sample driver */
-int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
+int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i)
{
static const char * const iname[] = {
- [ CX88_VMUX_COMPOSITE1 ] = "Composite1",
- [ CX88_VMUX_COMPOSITE2 ] = "Composite2",
- [ CX88_VMUX_COMPOSITE3 ] = "Composite3",
- [ CX88_VMUX_COMPOSITE4 ] = "Composite4",
- [ CX88_VMUX_SVIDEO ] = "S-Video",
- [ CX88_VMUX_TELEVISION ] = "Television",
- [ CX88_VMUX_CABLE ] = "Cable TV",
- [ CX88_VMUX_DVB ] = "DVB",
- [ CX88_VMUX_DEBUG ] = "for debug only",
+ [CX88_VMUX_COMPOSITE1] = "Composite1",
+ [CX88_VMUX_COMPOSITE2] = "Composite2",
+ [CX88_VMUX_COMPOSITE3] = "Composite3",
+ [CX88_VMUX_COMPOSITE4] = "Composite4",
+ [CX88_VMUX_SVIDEO] = "S-Video",
+ [CX88_VMUX_TELEVISION] = "Television",
+ [CX88_VMUX_CABLE] = "Cable TV",
+ [CX88_VMUX_DVB] = "DVB",
+ [CX88_VMUX_DEBUG] = "for debug only",
};
unsigned int n = i->index;
if (n >= 4)
return -EINVAL;
- if (0 == INPUT(n).type)
+ if (!INPUT(n).type)
return -EINVAL;
i->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(i->name,iname[INPUT(n).type]);
- if ((CX88_VMUX_TELEVISION == INPUT(n).type) ||
- (CX88_VMUX_CABLE == INPUT(n).type)) {
+ strcpy(i->name, iname[INPUT(n).type]);
+ if ((INPUT(n).type == CX88_VMUX_TELEVISION) ||
+ (INPUT(n).type == CX88_VMUX_CABLE))
i->type = V4L2_INPUT_TYPE_TUNER;
- }
+
i->std = CX88_NORMS;
return 0;
}
EXPORT_SYMBOL(cx88_enum_input);
-static int vidioc_enum_input (struct file *file, void *priv,
- struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- return cx88_enum_input (core,i);
+
+ return cx88_enum_input(core, i);
}
-static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -910,31 +923,31 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
if (i >= 4)
return -EINVAL;
- if (0 == INPUT(i).type)
+ if (!INPUT(i).type)
return -EINVAL;
cx88_newstation(core);
- cx88_video_mux(core,i);
+ cx88_video_mux(core, i);
return 0;
}
-static int vidioc_g_tuner (struct file *file, void *priv,
- struct v4l2_tuner *t)
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
u32 reg;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
strcpy(t->name, "Television");
@@ -942,34 +955,34 @@ static int vidioc_g_tuner (struct file *file, void *priv,
t->rangehigh = 0xffffffffUL;
call_all(core, tuner, g_tuner, t);
- cx88_get_stereo(core ,t);
+ cx88_get_stereo(core, t);
reg = cx_read(MO_DEVICE_STATUS);
- t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
+ t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
return 0;
}
-static int vidioc_s_tuner (struct file *file, void *priv,
- const struct v4l2_tuner *t)
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (UNSET == core->board.tuner_type)
+ if (core->board.tuner_type == UNSET)
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
cx88_set_stereo(core, t->audmode, 1);
return 0;
}
-static int vidioc_g_frequency (struct file *file, void *priv,
- struct v4l2_frequency *f)
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (f->tuner)
return -EINVAL;
@@ -981,12 +994,12 @@ static int vidioc_g_frequency (struct file *file, void *priv,
return 0;
}
-int cx88_set_freq (struct cx88_core *core,
- const struct v4l2_frequency *f)
+int cx88_set_freq(struct cx88_core *core,
+ const struct v4l2_frequency *f)
{
struct v4l2_frequency new_freq = *f;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
@@ -997,15 +1010,15 @@ int cx88_set_freq (struct cx88_core *core,
core->freq = new_freq.frequency;
/* When changing channels it is required to reset TVAUDIO */
- msleep (10);
+ usleep_range(10000, 20000);
cx88_set_tvaudio(core);
return 0;
}
EXPORT_SYMBOL(cx88_set_freq);
-static int vidioc_s_frequency (struct file *file, void *priv,
- const struct v4l2_frequency *f)
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1014,8 +1027,8 @@ static int vidioc_s_frequency (struct file *file, void *priv,
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int vidioc_g_register (struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int vidioc_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1026,8 +1039,8 @@ static int vidioc_g_register (struct file *file, void *fh,
return 0;
}
-static int vidioc_s_register (struct file *file, void *fh,
- const struct v4l2_dbg_register *reg)
+static int vidioc_s_register(struct file *file, void *fh,
+ const struct v4l2_dbg_register *reg)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1041,8 +1054,8 @@ static int vidioc_s_register (struct file *file, void *fh,
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
-static int radio_g_tuner (struct file *file, void *priv,
- struct v4l2_tuner *t)
+static int radio_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1056,13 +1069,13 @@ static int radio_g_tuner (struct file *file, void *priv,
return 0;
}
-static int radio_s_tuner (struct file *file, void *priv,
- const struct v4l2_tuner *t)
+static int radio_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
call_all(core, tuner, s_tuner, t);
@@ -1090,13 +1103,13 @@ static void cx8800_vid_irq(struct cx8800_dev *dev)
return;
cx_write(MO_VID_INTSTAT, status);
if (irq_debug || (status & mask & ~0xff))
- cx88_print_irqbits(core->name, "irq vid",
+ cx88_print_irqbits("irq vid",
cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs),
status, mask);
/* risc op code error */
if (status & (1 << 16)) {
- printk(KERN_WARNING "%s/0: video risc op code error\n",core->name);
+ pr_warn("video risc op code error\n");
cx_clear(MO_VID_DMACNTRL, 0x11);
cx_clear(VID_CAPTURE_CONTROL, 0x06);
cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]);
@@ -1129,20 +1142,19 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
for (loop = 0; loop < 10; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_VIDINT);
- if (0 == status)
+ if (status == 0)
goto out;
cx_write(MO_PCI_INTSTAT, status);
handled = 1;
if (status & core->pci_irqmask)
- cx88_core_irq(core,status);
+ cx88_core_irq(core, status);
if (status & PCI_INT_VIDINT)
cx8800_vid_irq(dev);
}
- if (10 == loop) {
- printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
- core->name);
- cx_write(MO_PCI_INTMSK,0);
+ if (loop == 10) {
+ pr_warn("irq loop -- clearing mask\n");
+ cx_write(MO_PCI_INTMSK, 0);
}
out:
@@ -1152,8 +1164,7 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
/* ----------------------------------------------------------- */
/* exported stuff */
-static const struct v4l2_file_operations video_fops =
-{
+static const struct v4l2_file_operations video_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
@@ -1195,7 +1206,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static const struct video_device cx8800_video_template = {
.name = "cx8800-video",
.fops = &video_fops,
- .ioctl_ops = &video_ioctl_ops,
+ .ioctl_ops = &video_ioctl_ops,
.tvnorms = CX88_NORMS,
};
@@ -1232,8 +1243,7 @@ static const struct video_device cx8800_vbi_template = {
.tvnorms = CX88_NORMS,
};
-static const struct v4l2_file_operations radio_fops =
-{
+static const struct v4l2_file_operations radio_fops = {
.owner = THIS_MODULE,
.open = radio_open,
.poll = v4l2_ctrl_poll,
@@ -1258,7 +1268,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
static const struct video_device cx8800_radio_template = {
.name = "cx8800-radio",
.fops = &radio_fops,
- .ioctl_ops = &radio_ioctl_ops,
+ .ioctl_ops = &radio_ioctl_ops,
};
static const struct v4l2_ctrl_ops cx8800_ctrl_vid_ops = {
@@ -1287,8 +1297,8 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
int err;
int i;
- dev = kzalloc(sizeof(*dev),GFP_KERNEL);
- if (NULL == dev)
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
/* pci init */
@@ -1298,7 +1308,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
goto fail_free;
}
core = cx88_core_get(dev->pci);
- if (NULL == core) {
+ if (!core) {
err = -EINVAL;
goto fail_free;
}
@@ -1307,15 +1317,15 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", core->name,
- pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
- dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
+ pr_info("found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
+ dev->pci_lat,
+ (unsigned long long)pci_resource_start(pci_dev, 0));
pci_set_master(pci_dev);
- err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
if (err) {
- printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
+ pr_err("Oops: no 32bit PCI DMA ???\n");
goto fail_core;
}
@@ -1332,8 +1342,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = request_irq(pci_dev->irq, cx8800_irq,
IRQF_SHARED, core->name, dev);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't get IRQ %d\n",
- core->name,pci_dev->irq);
+ pr_err("can't get IRQ %d\n", pci_dev->irq);
goto fail_core;
}
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
@@ -1343,8 +1352,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
struct v4l2_ctrl *vc;
vc = v4l2_ctrl_new_std(&core->audio_hdl, &cx8800_ctrl_aud_ops,
- cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
- if (vc == NULL) {
+ cc->id, cc->minimum, cc->maximum,
+ cc->step, cc->default_value);
+ if (!vc) {
err = core->audio_hdl.error;
goto fail_core;
}
@@ -1356,8 +1366,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
struct v4l2_ctrl *vc;
vc = v4l2_ctrl_new_std(&core->video_hdl, &cx8800_ctrl_vid_ops,
- cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
- if (vc == NULL) {
+ cc->id, cc->minimum, cc->maximum,
+ cc->step, cc->default_value);
+ if (!vc) {
err = core->video_hdl.error;
goto fail_core;
}
@@ -1383,18 +1394,20 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
core->wm8775_data.is_nova_s = false;
sd = v4l2_i2c_new_subdev_board(&core->v4l2_dev, &core->i2c_adap,
- &wm8775_info, NULL);
- if (sd != NULL) {
+ &wm8775_info, NULL);
+ if (sd) {
core->sd_wm8775 = sd;
sd->grp_id = WM8775_GID;
}
}
if (core->board.audio_chip == CX88_AUDIO_TVAUDIO) {
- /* This probes for a tda9874 as is used on some
- Pixelview Ultra boards. */
+ /*
+ * This probes for a tda9874 as is used on some
+ * Pixelview Ultra boards.
+ */
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
+ "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
}
switch (core->boardnr) {
@@ -1470,12 +1483,11 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't register video device\n",
- core->name);
+ pr_err("can't register video device\n");
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
- core->name, video_device_node_name(&dev->video_dev));
+ pr_info("registered device %s [v4l2]\n",
+ video_device_node_name(&dev->video_dev));
cx88_vdev_init(core, dev->pci, &dev->vbi_dev,
&cx8800_vbi_template, "vbi");
@@ -1484,12 +1496,11 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[core->nr]);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't register vbi device\n",
- core->name);
+ pr_err("can't register vbi device\n");
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device %s\n",
- core->name, video_device_node_name(&dev->vbi_dev));
+ pr_info("registered device %s\n",
+ video_device_node_name(&dev->vbi_dev));
if (core->board.radio.type == CX88_RADIO) {
cx88_vdev_init(core, dev->pci, &dev->radio_dev,
@@ -1499,21 +1510,21 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[core->nr]);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't register radio device\n",
- core->name);
+ pr_err("can't register radio device\n");
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device %s\n",
- core->name, video_device_node_name(&dev->radio_dev));
+ pr_info("registered device %s\n",
+ video_device_node_name(&dev->radio_dev));
}
/* start tvaudio thread */
if (core->board.tuner_type != UNSET) {
- core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio");
+ core->kthread = kthread_run(cx88_audio_thread,
+ core, "cx88 tvaudio");
if (IS_ERR(core->kthread)) {
err = PTR_ERR(core->kthread);
- printk(KERN_ERR "%s/0: failed to create cx88 audio thread, err=%d\n",
- core->name, err);
+ pr_err("failed to create cx88 audio thread, err=%d\n",
+ err);
}
}
mutex_unlock(&core->lock);
@@ -1526,7 +1537,7 @@ fail_unreg:
mutex_unlock(&core->lock);
fail_core:
core->v4ldev = NULL;
- cx88_core_put(core,dev->pci);
+ cx88_core_put(core, dev->pci);
fail_free:
kfree(dev);
return err;
@@ -1557,7 +1568,7 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
core->v4ldev = NULL;
/* free memory */
- cx88_core_put(core,dev->pci);
+ cx88_core_put(core, dev->pci);
kfree(dev);
}
@@ -1571,11 +1582,11 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
/* stop video+vbi capture */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->vidq.active)) {
- printk("%s/0: suspend video\n", core->name);
+ pr_info("suspend video\n");
stop_video_dma(dev);
}
if (!list_empty(&dev->vbiq.active)) {
- printk("%s/0: suspend vbi\n", core->name);
+ pr_info("suspend vbi\n");
cx8800_stop_vbi_dma(dev);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -1586,7 +1597,8 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
cx88_shutdown(core);
pci_save_state(pci_dev);
- if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) {
+ if (pci_set_power_state(pci_dev,
+ pci_choose_state(pci_dev, state)) != 0) {
pci_disable_device(pci_dev);
dev->state.disabled = 1;
}
@@ -1601,18 +1613,17 @@ static int cx8800_resume(struct pci_dev *pci_dev)
int err;
if (dev->state.disabled) {
- err=pci_enable_device(pci_dev);
+ err = pci_enable_device(pci_dev);
if (err) {
- printk(KERN_ERR "%s/0: can't enable device\n",
- core->name);
+ pr_err("can't enable device\n");
return err;
}
dev->state.disabled = 0;
}
- err= pci_set_power_state(pci_dev, PCI_D0);
+ err = pci_set_power_state(pci_dev, PCI_D0);
if (err) {
- printk(KERN_ERR "%s/0: can't set power state\n", core->name);
+ pr_err("can't set power state\n");
pci_disable_device(pci_dev);
dev->state.disabled = 1;
@@ -1630,12 +1641,12 @@ static int cx8800_resume(struct pci_dev *pci_dev)
/* restart video+vbi capture */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->vidq.active)) {
- printk("%s/0: resume video\n", core->name);
- restart_video_queue(dev,&dev->vidq);
+ pr_info("resume video\n");
+ restart_video_queue(dev, &dev->vidq);
}
if (!list_empty(&dev->vbiq.active)) {
- printk("%s/0: resume vbi\n", core->name);
- cx8800_restart_vbi_queue(dev,&dev->vbiq);
+ pr_info("resume vbi\n");
+ cx8800_restart_vbi_queue(dev, &dev->vbiq);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -1651,7 +1662,7 @@ static const struct pci_device_id cx8800_pci_tbl[] = {
.device = 0x8800,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- },{
+ }, {
/* --- end of list --- */
}
};
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.c b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
index deede6e25d94..92876de3841c 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
@@ -1,35 +1,28 @@
/*
+ * cx88-vp3054-i2c.c -- support for the secondary I2C bus of the
+ * DNTV Live! DVB-T Pro (VP-3054), wired as:
+ * GPIO[0] -> SCL, GPIO[1] -> SDA
+ *
+ * (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- cx88-vp3054-i2c.c -- support for the secondary I2C bus of the
- DNTV Live! DVB-T Pro (VP-3054), wired as:
- GPIO[0] -> SCL, GPIO[1] -> SDA
-
- (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+#include "cx88.h"
+#include "cx88-vp3054-i2c.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
-
-#include <asm/io.h>
-
-#include "cx88.h"
-#include "cx88-vp3054-i2c.h"
+#include <linux/io.h>
MODULE_DESCRIPTION("driver for cx2388x VP3054 design");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -114,7 +107,7 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
return 0;
vp3054_i2c = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL);
- if (vp3054_i2c == NULL)
+ if (!vp3054_i2c)
return -ENOMEM;
dev->vp3054 = vp3054_i2c;
@@ -128,12 +121,12 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
i2c_set_adapdata(&vp3054_i2c->adap, dev);
vp3054_i2c->adap.algo_data = &vp3054_i2c->algo;
- vp3054_bit_setscl(dev,1);
- vp3054_bit_setsda(dev,1);
+ vp3054_bit_setscl(dev, 1);
+ vp3054_bit_setsda(dev, 1);
rc = i2c_bit_add_bus(&vp3054_i2c->adap);
- if (0 != rc) {
- printk("%s: vp3054_i2c register FAILED\n", core->name);
+ if (rc != 0) {
+ pr_err("vp3054_i2c register FAILED\n");
kfree(dev->vp3054);
dev->vp3054 = NULL;
@@ -141,18 +134,17 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
return rc;
}
+EXPORT_SYMBOL(vp3054_i2c_probe);
void vp3054_i2c_remove(struct cx8802_dev *dev)
{
struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
- if (vp3054_i2c == NULL ||
+ if (!vp3054_i2c ||
dev->core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
return;
i2c_del_adapter(&vp3054_i2c->adap);
kfree(vp3054_i2c);
}
-
-EXPORT_SYMBOL(vp3054_i2c_probe);
EXPORT_SYMBOL(vp3054_i2c_remove);
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.h b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
index 95d0c60a35e1..ec19bea8f1e2 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.h
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
@@ -1,26 +1,20 @@
/*
-
- cx88-vp3054-i2c.h -- support for the secondary I2C bus of the
- DNTV Live! DVB-T Pro (VP-3054), wired as:
- GPIO[0] -> SCL, GPIO[1] -> SDA
-
- (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+ * cx88-vp3054-i2c.h -- support for the secondary I2C bus of the
+ * DNTV Live! DVB-T Pro (VP-3054), wired as:
+ * GPIO[0] -> SCL, GPIO[1] -> SDA
+ *
+ * (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/* ----------------------------------------------------------------------- */
struct vp3054_i2c_state {
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index ecd4b7bece99..115414cf520f 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -1,5 +1,4 @@
/*
- *
* v4l2 device driver for cx2388x based TV cards
*
* (c) 2003,04 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
@@ -13,12 +12,13 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifndef CX88_H
+#define CX88_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -53,7 +53,7 @@
/* defines and enums */
/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM/LC */
-#define CX88_NORMS (V4L2_STD_ALL \
+#define CX88_NORMS (V4L2_STD_ALL \
& ~V4L2_STD_PAL_H \
& ~V4L2_STD_NTSC_M_KR \
& ~V4L2_STD_SECAM_LC)
@@ -98,7 +98,6 @@ static inline unsigned int norm_maxw(v4l2_std_id norm)
return 720;
}
-
static inline unsigned int norm_maxh(v4l2_std_id norm)
{
return (norm & V4L2_STD_525_60) ? 480 : 576;
@@ -140,6 +139,7 @@ struct sram_channel {
u32 cnt1_reg;
u32 cnt2_reg;
};
+
extern const struct sram_channel cx88_sram_channels[];
/* ----------------------------------------------------------- */
@@ -361,12 +361,12 @@ struct cx88_core {
u32 i2c_state, i2c_rc;
/* config info -- analog */
- struct v4l2_device v4l2_dev;
+ struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler video_hdl;
struct v4l2_ctrl *chroma_agc;
struct v4l2_ctrl_handler audio_hdl;
struct v4l2_subdev *sd_wm8775;
- struct i2c_client *i2c_rtc;
+ struct i2c_client *i2c_rtc;
unsigned int boardnr;
struct cx88_board board;
@@ -383,8 +383,8 @@ struct cx88_core {
/* state info */
struct task_struct *kthread;
v4l2_std_id tvnorm;
- unsigned width, height;
- unsigned field;
+ unsigned int width, height;
+ unsigned int field;
enum cx88_tvaudio tvaudio;
u32 audiomode_manual;
u32 audiomode_current;
@@ -427,7 +427,8 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
if (!core->i2c_rc) { \
if (core->gate_ctrl) \
core->gate_ctrl(core, 1); \
- v4l2_device_call_all(&core->v4l2_dev, grpid, o, f, ##args); \
+ v4l2_device_call_all(&core->v4l2_dev, \
+ grpid, o, f, ##args); \
if (core->gate_ctrl) \
core->gate_ctrl(core, 0); \
} \
@@ -438,31 +439,31 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
#define WM8775_GID (1 << 0)
#define wm8775_s_ctrl(core, id, val) \
- do { \
- struct v4l2_ctrl *ctrl_ = \
- v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id); \
- if (ctrl_ && !core->i2c_rc) { \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 1); \
- v4l2_ctrl_s_ctrl(ctrl_, val); \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 0); \
- } \
+ do { \
+ struct v4l2_ctrl *ctrl_ = \
+ v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);\
+ if (ctrl_ && !core->i2c_rc) { \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 1); \
+ v4l2_ctrl_s_ctrl(ctrl_, val); \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 0); \
+ } \
} while (0)
#define wm8775_g_ctrl(core, id) \
- ({ \
- struct v4l2_ctrl *ctrl_ = \
- v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id); \
- s32 val = 0; \
- if (ctrl_ && !core->i2c_rc) { \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 1); \
- val = v4l2_ctrl_g_ctrl(ctrl_); \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 0); \
- } \
- val; \
+ ({ \
+ struct v4l2_ctrl *ctrl_ = \
+ v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);\
+ s32 val = 0; \
+ if (ctrl_ && !core->i2c_rc) { \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 1); \
+ val = v4l2_ctrl_g_ctrl(ctrl_); \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 0); \
+ } \
+ val; \
})
/* ----------------------------------------------------------- */
@@ -484,7 +485,7 @@ struct cx8800_dev {
/* pci i/o */
struct pci_dev *pci;
- unsigned char pci_rev,pci_lat;
+ unsigned char pci_rev, pci_lat;
const struct cx8800_fmt *fmt;
@@ -504,7 +505,6 @@ struct cx8800_dev {
/* function 1: audio/alsa stuff */
/* =============> moved to cx88-alsa.c <====================== */
-
/* ----------------------------------------------------------- */
/* function 2: mpeg stuff */
@@ -547,7 +547,7 @@ struct cx8802_dev {
/* pci i/o */
struct pci_dev *pci;
- unsigned char pci_rev,pci_lat;
+ unsigned char pci_rev, pci_lat;
/* dma queues */
struct cx88_dmaqueue mpegq;
@@ -566,6 +566,7 @@ struct cx8802_dev {
/* mpeg params */
struct cx2341x_handler cxhdl;
+
#endif
#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
@@ -588,40 +589,42 @@ struct cx8802_dev {
/* ----------------------------------------------------------- */
-#define cx_read(reg) readl(core->lmmio + ((reg)>>2))
-#define cx_write(reg,value) writel((value), core->lmmio + ((reg)>>2))
-#define cx_writeb(reg,value) writeb((value), core->bmmio + (reg))
+#define cx_read(reg) readl(core->lmmio + ((reg) >> 2))
+#define cx_write(reg, value) writel((value), core->lmmio + ((reg) >> 2))
+#define cx_writeb(reg, value) writeb((value), core->bmmio + (reg))
-#define cx_andor(reg,mask,value) \
- writel((readl(core->lmmio+((reg)>>2)) & ~(mask)) |\
- ((value) & (mask)), core->lmmio+((reg)>>2))
-#define cx_set(reg,bit) cx_andor((reg),(bit),(bit))
-#define cx_clear(reg,bit) cx_andor((reg),(bit),0)
+#define cx_andor(reg, mask, value) \
+ writel((readl(core->lmmio + ((reg) >> 2)) & ~(mask)) |\
+ ((value) & (mask)), core->lmmio + ((reg) >> 2))
+#define cx_set(reg, bit) cx_andor((reg), (bit), (bit))
+#define cx_clear(reg, bit) cx_andor((reg), (bit), 0)
#define cx_wait(d) { if (need_resched()) schedule(); else udelay(d); }
/* shadow registers */
#define cx_sread(sreg) (core->shadow[sreg])
-#define cx_swrite(sreg,reg,value) \
- (core->shadow[sreg] = value, \
- writel(core->shadow[sreg], core->lmmio + ((reg)>>2)))
-#define cx_sandor(sreg,reg,mask,value) \
- (core->shadow[sreg] = (core->shadow[sreg] & ~(mask)) | ((value) & (mask)), \
- writel(core->shadow[sreg], core->lmmio + ((reg)>>2)))
+#define cx_swrite(sreg, reg, value) \
+ (core->shadow[sreg] = value, \
+ writel(core->shadow[sreg], core->lmmio + ((reg) >> 2)))
+#define cx_sandor(sreg, reg, mask, value) \
+ (core->shadow[sreg] = (core->shadow[sreg] & ~(mask)) | \
+ ((value) & (mask)), \
+ writel(core->shadow[sreg], \
+ core->lmmio + ((reg) >> 2)))
/* ----------------------------------------------------------- */
/* cx88-core.c */
extern unsigned int cx88_core_debug;
-extern void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
- int len, u32 bits, u32 mask);
+void cx88_print_irqbits(const char *tag, const char *strings[],
+ int len, u32 bits, u32 mask);
-extern int cx88_core_irq(struct cx88_core *core, u32 status);
-extern void cx88_wakeup(struct cx88_core *core,
- struct cx88_dmaqueue *q, u32 count);
-extern void cx88_shutdown(struct cx88_core *core);
-extern int cx88_reset(struct cx88_core *core);
+int cx88_core_irq(struct cx88_core *core, u32 status);
+void cx88_wakeup(struct cx88_core *core,
+ struct cx88_dmaqueue *q, u32 count);
+void cx88_shutdown(struct cx88_core *core);
+int cx88_reset(struct cx88_core *core);
extern int
cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
@@ -633,43 +636,37 @@ cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist, unsigned int bpl,
unsigned int lines, unsigned int lpi);
-extern void cx88_risc_disasm(struct cx88_core *core,
- struct cx88_riscmem *risc);
-extern int cx88_sram_channel_setup(struct cx88_core *core,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc);
-extern void cx88_sram_channel_dump(struct cx88_core *core,
- const struct sram_channel *ch);
-
-extern int cx88_set_scale(struct cx88_core *core, unsigned int width,
- unsigned int height, enum v4l2_field field);
-extern int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
-
-extern void cx88_vdev_init(struct cx88_core *core,
- struct pci_dev *pci,
- struct video_device *vfd,
- const struct video_device *template_,
- const char *type);
-extern struct cx88_core *cx88_core_get(struct pci_dev *pci);
-extern void cx88_core_put(struct cx88_core *core,
- struct pci_dev *pci);
-
-extern int cx88_start_audio_dma(struct cx88_core *core);
-extern int cx88_stop_audio_dma(struct cx88_core *core);
-
+void cx88_risc_disasm(struct cx88_core *core,
+ struct cx88_riscmem *risc);
+int cx88_sram_channel_setup(struct cx88_core *core,
+ const struct sram_channel *ch,
+ unsigned int bpl, u32 risc);
+void cx88_sram_channel_dump(struct cx88_core *core,
+ const struct sram_channel *ch);
+
+int cx88_set_scale(struct cx88_core *core, unsigned int width,
+ unsigned int height, enum v4l2_field field);
+int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
+
+void cx88_vdev_init(struct cx88_core *core,
+ struct pci_dev *pci,
+ struct video_device *vfd,
+ const struct video_device *template_,
+ const char *type);
+struct cx88_core *cx88_core_get(struct pci_dev *pci);
+void cx88_core_put(struct cx88_core *core,
+ struct pci_dev *pci);
+
+int cx88_start_audio_dma(struct cx88_core *core);
+int cx88_stop_audio_dma(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-vbi.c */
/* Can be used as g_vbi_fmt, try_vbi_fmt and s_vbi_fmt */
-int cx8800_vbi_fmt (struct file *file, void *priv,
- struct v4l2_format *f);
+int cx8800_vbi_fmt(struct file *file, void *priv,
+ struct v4l2_format *f);
-/*
-int cx8800_start_vbi_dma(struct cx8800_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf);
-*/
void cx8800_stop_vbi_dma(struct cx8800_dev *dev);
int cx8800_restart_vbi_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q);
@@ -678,17 +675,16 @@ extern const struct vb2_ops cx8800_vbi_qops;
/* ----------------------------------------------------------- */
/* cx88-i2c.c */
-extern int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
-
+int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
/* ----------------------------------------------------------- */
/* cx88-cards.c */
-extern int cx88_tuner_callback(void *dev, int component, int command, int arg);
-extern int cx88_get_resources(const struct cx88_core *core,
- struct pci_dev *pci);
-extern struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr);
-extern void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
+int cx88_tuner_callback(void *dev, int component, int command, int arg);
+int cx88_get_resources(const struct cx88_core *core,
+ struct pci_dev *pci);
+struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr);
+void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
/* ----------------------------------------------------------- */
/* cx88-tvaudio.c */
@@ -703,7 +699,8 @@ int cx8802_register_driver(struct cx8802_driver *drv);
int cx8802_unregister_driver(struct cx8802_driver *drv);
/* Caller must hold core->lock */
-struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
+struct cx8802_driver *cx8802_get_driver(struct cx8802_dev *dev,
+ enum cx88_board_type btype);
/* ----------------------------------------------------------- */
/* cx88-dsp.c */
@@ -718,18 +715,18 @@ int cx88_ir_fini(struct cx88_core *core);
void cx88_ir_irq(struct cx88_core *core);
int cx88_ir_start(struct cx88_core *core);
void cx88_ir_stop(struct cx88_core *core);
-extern void cx88_i2c_init_ir(struct cx88_core *core);
+void cx88_i2c_init_ir(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
- struct cx88_buffer *buf);
+ struct cx88_buffer *buf);
void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf);
void cx8802_cancel_buffers(struct cx8802_dev *dev);
int cx8802_start_dma(struct cx8802_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf);
+ struct cx88_dmaqueue *q,
+ struct cx88_buffer *buf);
/* ----------------------------------------------------------- */
/* cx88-video.c*/
@@ -737,4 +734,6 @@ int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i);
int cx88_set_freq(struct cx88_core *core, const struct v4l2_frequency *f);
int cx88_video_mux(struct cx88_core *core, unsigned int input);
void cx88_querycap(struct file *file, struct cx88_core *core,
- struct v4l2_capability *cap);
+ struct v4l2_capability *cap);
+
+#endif
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 18e3a4deee64..a6c9fe235974 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -824,8 +824,7 @@ static int dvb_input_attach(struct ddb_input *input)
&input->port->dev->pdev->dev,
adapter_nr);
if (ret < 0) {
- printk(KERN_ERR "ddbridge: Could not register adapter."
- "Check if you enabled enough adapters in dvb-core!\n");
+ printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n");
return ret;
}
input->attached = 1;
@@ -1730,8 +1729,7 @@ static __init int module_init_ddbridge(void)
{
int ret;
- printk(KERN_INFO "Digital Devices PCIE bridge driver, "
- "Copyright (C) 2010-11 Digital Devices GmbH\n");
+ printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n");
ret = ddb_class_create();
if (ret < 0)
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index 5dd504741b12..a589aa78d1d9 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -315,8 +315,7 @@ static void dm1105_card_list(struct pci_dev *pci)
"dm1105: Updating to the latest version might help\n"
"dm1105: as well.\n");
}
- printk(KERN_ERR "Here is a list of valid choices for the card=<n> "
- "insmod option:\n");
+ printk(KERN_ERR "Here is a list of valid choices for the card=<n> insmod option:\n");
for (i = 0; i < ARRAY_SIZE(dm1105_boards); i++)
printk(KERN_ERR "dm1105: card=%d -> %s\n",
i, dm1105_boards[i].name);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 8a86b61a896d..374f45f81ab3 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -177,8 +177,8 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
#if 0
ret = snd_ivtv_mixer_create(itvsc);
if (ret) {
- IVTV_ALSA_WARN("%s: snd_ivtv_mixer_create() failed with err %d:"
- " proceeding anyway\n", __func__, ret);
+ IVTV_ALSA_WARN("%s: snd_ivtv_mixer_create() failed with err %d: proceeding anyway\n",
+ __func__, ret);
}
#endif
@@ -235,8 +235,8 @@ static int ivtv_alsa_load(struct ivtv *itv)
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
if (s->vdev.v4l2_dev == NULL) {
- IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
- "skipping\n", __func__);
+ IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - skipping\n",
+ __func__);
return 0;
}
@@ -250,8 +250,8 @@ static int ivtv_alsa_load(struct ivtv *itv)
IVTV_ALSA_ERR("%s: failed to create struct snd_ivtv_card\n",
__func__);
} else {
- IVTV_DEBUG_ALSA_INFO("%s: created ivtv ALSA interface instance "
- "\n", __func__);
+ IVTV_DEBUG_ALSA_INFO("%s: created ivtv ALSA interface instance \n",
+ __func__);
}
return 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index ee48c3e09de4..0a3b80a4bd69 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -885,8 +885,8 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 64 && ivtv_pci_latency) {
- IVTV_INFO("Unreasonably low latency timer, "
- "setting to 64 (was %d)\n", pci_latency);
+ IVTV_INFO("Unreasonably low latency timer, setting to 64 (was %d)\n",
+ pci_latency);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
}
@@ -896,8 +896,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
these problems. */
pci_write_config_dword(pdev, 0x40, 0xffff);
- IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%llx\n",
+ IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, irq: %d, latency: %d, memory: 0x%llx\n",
pdev->device, pdev->revision, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev->irq, pci_latency, (u64)itv->base_addr);
@@ -1047,13 +1046,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
- IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 "
- "encoder memory\n");
- IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of "
- "vmalloc address space for this window\n");
+ IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
+ IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of vmalloc address space for this window\n");
IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
- IVTV_ERR("Use the vmalloc= kernel command line option to set "
- "VmallocTotal to a larger value\n");
+ IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
@@ -1064,14 +1060,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
- IVTV_ERR("ioremap failed. Can't get a window into "
- "CX23415 decoder memory\n");
- IVTV_ERR("Each capture card with a CX23415 needs 8 MB "
- "of vmalloc address space for this window\n");
- IVTV_ERR("Check the output of 'grep Vmalloc "
- "/proc/meminfo'\n");
- IVTV_ERR("Use the vmalloc= kernel command line option "
- "to set VmallocTotal to a larger value\n");
+ IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
+ IVTV_ERR("Each capture card with a CX23415 needs 8 MB of vmalloc address space for this window\n");
+ IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
+ IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
@@ -1086,13 +1078,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
itv->reg_mem =
ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
- IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 "
- "register space\n");
- IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of "
- "vmalloc address space for this window\n");
+ IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
+ IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
- IVTV_ERR("Use the vmalloc= kernel command line option to set "
- "VmallocTotal to a larger value\n");
+ IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_io;
}
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 5b3095f65dce..ba279fdb3df8 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -376,8 +376,8 @@ int ivtv_firmware_check(struct ivtv *itv, char *where)
/* If something failed & currently idle, try to reload */
if (res && !atomic_read(&itv->capturing) &&
!atomic_read(&itv->decoding)) {
- IVTV_INFO("Detected in %s that firmware had failed - "
- "Reloading\n", where);
+ IVTV_INFO("Detected in %s that firmware had failed - Reloading\n",
+ where);
res = ivtv_firmware_restart(itv);
/*
* Even if restarted ok, still signal a problem had occurred.
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index f7299d3d8244..44936d6d7c39 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -89,8 +89,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
if (y_pages == y_dma.page_count) {
IVTV_DEBUG_WARN
- ("failed to map uv user pages, returned %d "
- "expecting %d\n", uv_pages, uv_dma.page_count);
+ ("failed to map uv user pages, returned %d expecting %d\n",
+ uv_pages, uv_dma.page_count);
if (uv_pages >= 0) {
for (i = 0; i < uv_pages; i++)
@@ -101,8 +101,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
}
} else {
IVTV_DEBUG_WARN
- ("failed to map y user pages, returned %d "
- "expecting %d\n", y_pages, y_dma.page_count);
+ ("failed to map y user pages, returned %d expecting %d\n",
+ y_pages, y_dma.page_count);
}
if (y_pages >= 0) {
for (i = 0; i < y_pages; i++)
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 8b95eefb610b..612a8402cf4d 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -293,8 +293,7 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
/* Map User DMA */
if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
mutex_unlock(&itv->udma.lock);
- IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, "
- "Error with get_user_pages: %d bytes, %d pages returned\n",
+ IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with get_user_pages: %d bytes, %d pages returned\n",
size_in_bytes, itv->udma.page_count);
/* get_user_pages must have failed completely */
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index ba887e8e1b17..e825bc93ea7a 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -60,8 +60,7 @@ MODULE_PARM_DESC(gbuffers, "number of capture buffers, default is 2 (32 max)");
/* size of a grab buffer */
static unsigned int gbufsize = MEYE_MAX_BUFSIZE;
module_param(gbufsize, int, 0444);
-MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 614400"
- " (will be rounded up to a page multiple)");
+MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 614400 (will be rounded up to a page multiple)");
/* /dev/videoX registration number */
static int video_nr = -1;
@@ -587,10 +586,7 @@ static void mchip_hic_stop(void)
/* get the next ready frame from the dma engine */
static u32 mchip_get_frame(void)
{
- u32 v;
-
- v = mchip_read(MCHIP_MM_FIR(meye.mchip_fnum));
- return v;
+ return mchip_read(MCHIP_MM_FIR(meye.mchip_fnum));
}
/* frees the current frame from the dma engine */
@@ -1261,8 +1257,7 @@ static int vidioc_reqbufs(struct file *file, void *fh,
meye.grab_fbuffer = rvmalloc(gbuffers * gbufsize);
if (!meye.grab_fbuffer) {
- printk(KERN_ERR "meye: v4l framebuffer allocation"
- " failed\n");
+ printk(KERN_ERR "meye: v4l framebuffer allocation failed\n");
mutex_unlock(&meye.lock);
return -ENOMEM;
}
@@ -1659,8 +1654,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
ret = -EIO;
if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
- v4l2_err(v4l2_dev, "meye: did you enable the camera in "
- "sonypi using the module options ?\n");
+ v4l2_err(v4l2_dev, "meye: did you enable the camera in sonypi using the module options ?\n");
goto outsonypienable;
}
@@ -1834,8 +1828,7 @@ static int __init meye_init(void)
if (gbufsize > MEYE_MAX_BUFSIZE)
gbufsize = MEYE_MAX_BUFSIZE;
gbufsize = PAGE_ALIGN(gbufsize);
- printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) "
- "for capture\n",
+ printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) for capture\n",
gbuffers,
gbufsize / 1024, gbuffers * gbufsize / 1024);
return pci_register_driver(&meye_driver);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index b078ac2a682c..191bd8299dc3 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -1030,15 +1030,4 @@ static struct pci_driver netup_unidvb_pci_driver = {
.resume = NULL,
};
-static int __init netup_unidvb_init(void)
-{
- return pci_register_driver(&netup_unidvb_pci_driver);
-}
-
-static void __exit netup_unidvb_fini(void)
-{
- pci_unregister_driver(&netup_unidvb_pci_driver);
-}
-
-module_init(netup_unidvb_init);
-module_exit(netup_unidvb_fini);
+module_pci_driver(netup_unidvb_pci_driver);
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 655d6854a8d7..65afb71ff79f 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -577,12 +577,12 @@ static int pluto_read_serial(struct pluto *pluto)
for (j = 0; j < 32; j += 8) {
if ((val & 0xff) == 0xff)
goto out;
- printk("%c", val & 0xff);
+ printk(KERN_CONT "%c", val & 0xff);
val >>= 8;
}
}
out:
- printk("\n");
+ printk(KERN_CONT "\n");
pci_iounmap(pdev, cis);
return 0;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index e7e4428109c3..d5ee82aee9e8 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -282,13 +282,12 @@ static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
continue;
if (upacket >> 24 & 1)
- printk_ratelimited(KERN_INFO "earth-pt1: device "
- "buffer overflowing. table[%d] buf[%d]\n",
+ printk_ratelimited(KERN_INFO "earth-pt1: device buffer overflowing. table[%d] buf[%d]\n",
pt1->table_index, pt1->buf_index);
sc = upacket >> 26 & 0x7;
if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
- printk_ratelimited(KERN_INFO "earth-pt1: data loss"
- " in streamID(adapter)[%d]\n", index);
+ printk_ratelimited(KERN_INFO "earth-pt1: data loss in streamID(adapter)[%d]\n",
+ index);
adap->st_count = sc;
buf = adap->buf;
diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
index d0e70dc0e16f..249273b2e0f2 100644
--- a/drivers/media/pci/pt1/va1j5jf8007s.c
+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
@@ -578,7 +578,7 @@ static void va1j5jf8007s_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops va1j5jf8007s_ops = {
+static const struct dvb_frontend_ops va1j5jf8007s_ops = {
.delsys = { SYS_ISDBS },
.info = {
.name = "VA1J5JF8007/VA1J5JF8011 ISDB-S",
diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
index 0268f20b8097..e0766e69a370 100644
--- a/drivers/media/pci/pt1/va1j5jf8007t.c
+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
@@ -427,7 +427,7 @@ static void va1j5jf8007t_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops va1j5jf8007t_ops = {
+static const struct dvb_frontend_ops va1j5jf8007t_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "VA1J5JF8007/VA1J5JF8011 ISDB-T",
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index dc0e2fc5f68b..8a35ecfb75e3 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -813,8 +813,7 @@ static int snd_card_saa7134_capture_open(struct snd_pcm_substream * substream)
int amux, err;
if (!saa7134) {
- pr_err("BUG: saa7134 can't find device struct."
- " Can't proceed with open\n");
+ pr_err("BUG: saa7134 can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
dev = saa7134->dev;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index c480a7e87593..2b60af493de4 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -7341,8 +7341,8 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data)
case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */
break;
default:
- pr_warn("%s: warning: "
- "unknown hauppauge model #%d\n", dev->name, tv.model);
+ pr_warn("%s: warning: unknown hauppauge model #%d\n",
+ dev->name, tv.model);
break;
}
@@ -7920,8 +7920,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
msg.addr = 0x0b;
msg.len = 1;
if (1 != i2c_transfer(&dev->i2c_adap, &msg, 1)) {
- pr_warn("%s: send wake up byte to pic16C505"
- "(IR chip) failed\n", dev->name);
+ pr_warn("%s: send wake up byte to pic16C505(IR chip) failed\n",
+ dev->name);
} else {
msg.flags = I2C_M_RD;
rc = i2c_transfer(&dev->i2c_adap, &msg, 1);
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index ffb66a9ae23e..7d6bb5c9343f 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -66,8 +66,7 @@ MODULE_PARM_DESC(latency,"pci latency timer");
int saa7134_no_overlay=-1;
module_param_named(no_overlay, saa7134_no_overlay, int, 0444);
-MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
- " [some VIA/SIS chipsets are known to have problem with overlay]");
+MODULE_PARM_DESC(no_overlay, "allow override overlay default (0 disables, 1 enables) [some VIA/SIS chipsets are known to have problem with overlay]");
bool saa7134_userptr;
module_param(saa7134_userptr, bool, 0644);
@@ -619,25 +618,25 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
print_irqstatus(dev,loop,report,status);
if (report & SAA7134_IRQ_REPORT_PE) {
/* disable all parity error */
- pr_warn("%s/irq: looping -- "
- "clearing PE (parity error!) enable bit\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing PE (parity error!) enable bit\n",
+ dev->name);
saa_clearl(SAA7134_IRQ2,SAA7134_IRQ2_INTE_PE);
} else if (report & SAA7134_IRQ_REPORT_GPIO16) {
/* disable gpio16 IRQ */
- pr_warn("%s/irq: looping -- "
- "clearing GPIO16 enable bit\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing GPIO16 enable bit\n",
+ dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
/* disable gpio18 IRQs */
- pr_warn("%s/irq: looping -- "
- "clearing GPIO18 enable bit\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing GPIO18 enable bit\n",
+ dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
} else {
/* disable all irqs */
- pr_warn("%s/irq: looping -- "
- "clearing all enable bits\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing all enable bits\n",
+ dev->name);
saa_writel(SAA7134_IRQ1,0);
saa_writel(SAA7134_IRQ2,0);
}
@@ -1081,18 +1080,14 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
}
#endif
if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) {
- pr_info("%s: quirk: this driver and your "
- "chipset may not work together"
- " in overlay mode.\n",dev->name);
+ pr_info("%s: quirk: this driver and your chipset may not work together in overlay mode.\n",
+ dev->name);
if (!saa7134_no_overlay) {
- pr_info("%s: quirk: overlay "
- "mode will be disabled.\n",
+ pr_info("%s: quirk: overlay mode will be disabled.\n",
dev->name);
saa7134_no_overlay = 1;
} else {
- pr_info("%s: quirk: overlay "
- "mode will be forced. Use this"
- " option at your own risk.\n",
+ pr_info("%s: quirk: overlay mode will be forced. Use this option at your own risk.\n",
dev->name);
}
}
@@ -1106,10 +1101,10 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- pr_info("%s: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->name,
- pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
- dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
+ pr_info("%s: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
+ dev->pci_lat,
+ (unsigned long long)pci_resource_start(pci_dev, 0));
pci_set_master(pci_dev);
err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
if (err) {
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index 59a4b5f7724e..598b8bbfe726 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1449,8 +1449,8 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach, fe0->dvb.frontend,
0x60, &dev->i2c_adap, 0) == NULL) {
- pr_warn("%s: Medion Quadro, no tda826x "
- "found !\n", __func__);
+ pr_warn("%s: Medion Quadro, no tda826x found !\n",
+ __func__);
goto detach_frontend;
}
if (dev_id != 0x08) {
@@ -1458,8 +1458,8 @@ static int dvb_init(struct saa7134_dev *dev)
fe->ops.i2c_gate_ctrl(fe, 1);
if (dvb_attach(isl6405_attach, fe,
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
- pr_warn("%s: Medion Quadro, no ISL6405 "
- "found !\n", __func__);
+ pr_warn("%s: Medion Quadro, no ISL6405 found !\n",
+ __func__);
goto detach_frontend;
}
if (dev_id == 0x07) {
@@ -1629,8 +1629,8 @@ static int dvb_init(struct saa7134_dev *dev)
struct dvb_frontend *fe;
if (dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
&dev->i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261) == NULL) {
- pr_warn("%s: MD7134 DVB-S, no SD1878 "
- "found !\n", __func__);
+ pr_warn("%s: MD7134 DVB-S, no SD1878 found !\n",
+ __func__);
goto detach_frontend;
}
/* we need to open the i2c gate (we know it exists) */
@@ -1638,8 +1638,8 @@ static int dvb_init(struct saa7134_dev *dev)
fe->ops.i2c_gate_ctrl(fe, 1);
if (dvb_attach(isl6405_attach, fe,
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
- pr_warn("%s: MD7134 DVB-S, no ISL6405 "
- "found !\n", __func__);
+ pr_warn("%s: MD7134 DVB-S, no ISL6405 found !\n",
+ __func__);
goto detach_frontend;
}
fe->ops.i2c_gate_ctrl(fe, 0);
@@ -1670,14 +1670,14 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach,
fe0->dvb.frontend, 0x60,
&dev->i2c_adap, 0) == NULL) {
- pr_warn("%s: Asus Tiger 3in1, no "
- "tda826x found!\n", __func__);
+ pr_warn("%s: Asus Tiger 3in1, no tda826x found!\n",
+ __func__);
goto detach_frontend;
}
if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0, 0) == NULL) {
- pr_warn("%s: Asus Tiger 3in1, no lnbp21"
- " found!\n", __func__);
+ pr_warn("%s: Asus Tiger 3in1, no lnbp21 found!\n",
+ __func__);
goto detach_frontend;
}
}
@@ -1695,14 +1695,14 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach,
fe0->dvb.frontend, 0x60,
&dev->i2c_adap, 0) == NULL) {
- pr_warn("%s: Asus My Cinema PS3-100, no "
- "tda826x found!\n", __func__);
+ pr_warn("%s: Asus My Cinema PS3-100, no tda826x found!\n",
+ __func__);
goto detach_frontend;
}
if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0, 0) == NULL) {
- pr_warn("%s: Asus My Cinema PS3-100, no lnbp21"
- " found!\n", __func__);
+ pr_warn("%s: Asus My Cinema PS3-100, no lnbp21 found!\n",
+ __func__);
goto detach_frontend;
}
}
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 2dac48fa1386..dca0592c5f47 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = {
/* ----------------------------------------------------------- */
+/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+{
+ u8 subaddr = 0x7, dmdregval;
+ u8 data[2];
+ int ret;
+ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
+ .buf = &subaddr, .len = 1},
+ {.addr = 0x08,
+ .flags = I2C_M_RD,
+ .buf = &dmdregval, .len = 1}
+ };
+ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
+ .buf = data, .len = 2} };
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+ pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+ pr_err("%s: EEPROM i2c gate open failure\n",
+ dev->name);
+ }
+}
+
static int
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
{
unsigned char buf;
int i,err;
+ if (dev->board == SAA7134_BOARD_MD7134)
+ saa7134_i2c_eeprom_md7134_gate(dev);
+
dev->i2c_client.addr = 0xa0 >> 1;
buf = 0;
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index eff52bbbfd66..823b75ed47e1 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -123,8 +123,7 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
struct saa7134_dev *dev = ir->c->adapter->algo_data;
if (dev == NULL) {
- ir_dbg(ir, "get_key_flydvb_trio: "
- "ir->c->adapter->algo_data is NULL!\n");
+ ir_dbg(ir, "get_key_flydvb_trio: ir->c->adapter->algo_data is NULL!\n");
return -EIO;
}
@@ -150,8 +149,8 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
msleep(10);
continue;
}
- ir_dbg(ir, "send wake up byte to pic16C505 (IR chip)"
- "failed %dx\n", attempt);
+ ir_dbg(ir, "send wake up byte to pic16C505 (IR chip)failed %dx\n",
+ attempt);
return -EIO;
}
if (1 != i2c_master_recv(ir->c, &b, 1)) {
@@ -174,8 +173,7 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, enum rc_type *protocol
/* <dev> is needed to access GPIO. Used by the saa_readl macro. */
struct saa7134_dev *dev = ir->c->adapter->algo_data;
if (dev == NULL) {
- ir_dbg(ir, "get_key_msi_tvanywhere_plus: "
- "ir->c->adapter->algo_data is NULL!\n");
+ ir_dbg(ir, "get_key_msi_tvanywhere_plus: ir->c->adapter->algo_data is NULL!\n");
return -EIO;
}
@@ -223,8 +221,7 @@ static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_type *protocol,
/* <dev> is needed to access GPIO. Used by the saa_readl macro. */
struct saa7134_dev *dev = ir->c->adapter->algo_data;
if (dev == NULL) {
- ir_dbg(ir, "get_key_kworld_pc150u: "
- "ir->c->adapter->algo_data is NULL!\n");
+ ir_dbg(ir, "get_key_kworld_pc150u: ir->c->adapter->algo_data is NULL!\n");
return -EIO;
}
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index f30758e24f5d..62c34504199d 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -218,8 +218,7 @@ int saa7164_buffer_activate(struct saa7164_buffer *buf, int i)
saa7164_writel(port->bufptr32h + ((sizeof(u32) * 2) * i), buf->pt_dma);
saa7164_writel(port->bufptr32l + ((sizeof(u32) * 2) * i), 0);
- dprintk(DBGLVL_BUF, " buf[%d] offset 0x%llx (0x%x) "
- "buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
+ dprintk(DBGLVL_BUF, " buf[%d] offset 0x%llx (0x%x) buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
buf->idx,
(u64)port->bufoffset + (i * sizeof(u32)),
saa7164_readl(port->bufoffset + (sizeof(u32) * i)),
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a18fe5d47238..e305c02f9dc9 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -427,8 +427,8 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp;
if (bytes_to_read > write_distance) {
- printk(KERN_ERR "%s() Invalid bus state, missing msg "
- "or mangled ring, faulty H/W / bad code?\n", __func__);
+ printk(KERN_ERR "%s() Invalid bus state, missing msg or mangled ring, faulty H/W / bad code?\n",
+ __func__);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
diff --git a/drivers/media/pci/saa7164/saa7164-cards.c b/drivers/media/pci/saa7164/saa7164-cards.c
index c2b738227f58..15a98c638c55 100644
--- a/drivers/media/pci/saa7164/saa7164-cards.c
+++ b/drivers/media/pci/saa7164/saa7164-cards.c
@@ -726,8 +726,8 @@ void saa7164_card_list(struct saa7164_dev *dev)
dev->name, dev->name, dev->name, dev->name);
}
- printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod "
- "option:\n", dev->name);
+ printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod option:\n",
+ dev->name);
for (i = 0; i < saa7164_bcount; i++)
printk(KERN_ERR "%s: card=%d -> %s\n",
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 3285c37b4583..45951b3cc251 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -301,8 +301,8 @@ static int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
else
saa7164_cmd_timeout_seqno(dev, seqno);
- dprintk(DBGLVL_CMD, "%s(seqno=%d) Waiting res = %d "
- "(signalled=%d)\n", __func__, seqno, r,
+ dprintk(DBGLVL_CMD, "%s(seqno=%d) Waiting res = %d (signalled=%d)\n",
+ __func__, seqno, r,
dev->cmds[seqno].signalled);
} else
ret = SAA_OK;
@@ -353,8 +353,8 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
int ret;
int safety = 0;
- dprintk(DBGLVL_CMD, "%s(unitid = %s (%d) , command = 0x%x, "
- "sel = 0x%x)\n", __func__, saa7164_unitid_name(dev, id), id,
+ dprintk(DBGLVL_CMD, "%s(unitid = %s (%d) , command = 0x%x, sel = 0x%x)\n",
+ __func__, saa7164_unitid_name(dev, id), id,
command, controlselector);
if ((size == 0) || (buf == NULL)) {
@@ -452,9 +452,7 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
if (presponse_t->seqno != pcommand_t->seqno) {
dprintk(DBGLVL_CMD,
- "wrong event: seqno = %d, "
- "expected seqno = %d, "
- "will dequeue regardless\n",
+ "wrong event: seqno = %d, expected seqno = %d, will dequeue regardless\n",
presponse_t->seqno, pcommand_t->seqno);
ret = saa7164_cmd_dequeue(dev);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 8bbd092fbe1d..03a1511a92be 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -710,9 +710,7 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id)
} else {
/* Find the function */
dprintk(DBGLVL_IRQ,
- "%s() unhandled interrupt "
- "reg 0x%x bit 0x%x "
- "intid = 0x%x\n",
+ "%s() unhandled interrupt reg 0x%x bit 0x%x intid = 0x%x\n",
__func__, i, bit, intid);
}
}
@@ -767,13 +765,11 @@ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr)
{
int i;
- dprintk(1, "--------------------> "
- "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ dprintk(1, "--------------------> 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
for (i = 0; i < 0x100; i += 16)
- dprintk(1, "region0[0x%08x] = "
- "%02x %02x %02x %02x %02x %02x %02x %02x"
- " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ dprintk(1, "region0[0x%08x] = %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ i,
(u8)saa7164_readb(addr + i + 0),
(u8)saa7164_readb(addr + i + 1),
(u8)saa7164_readb(addr + i + 2),
@@ -825,8 +821,7 @@ static void saa7164_dump_hwdesc(struct saa7164_dev *dev)
static void saa7164_dump_intfdesc(struct saa7164_dev *dev)
{
- dprintk(1, "@0x%p intfdesc "
- "sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
+ dprintk(1, "@0x%p intfdesc sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
&dev->intfdesc, (u32)sizeof(struct tmComResInterfaceDescr));
dprintk(1, " .bLength = 0x%x\n", dev->intfdesc.bLength);
@@ -1011,8 +1006,7 @@ static int saa7164_dev_setup(struct saa7164_dev *dev)
saa7164_port_init(dev, SAA7164_PORT_VBI2);
if (get_resources(dev) < 0) {
- printk(KERN_ERR "CORE %s No more PCIe resources for "
- "subsystem: %04x:%04x\n",
+ printk(KERN_ERR "CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
@@ -1204,8 +1198,8 @@ static bool saa7164_enable_msi(struct pci_dev *pci_dev, struct saa7164_dev *dev)
err = pci_enable_msi(pci_dev);
if (err) {
- printk(KERN_ERR "%s() Failed to enable MSI interrupt."
- " Falling back to a shared IRQ\n", __func__);
+ printk(KERN_ERR "%s() Failed to enable MSI interrupt. Falling back to a shared IRQ\n",
+ __func__);
return false;
}
@@ -1215,8 +1209,8 @@ static bool saa7164_enable_msi(struct pci_dev *pci_dev, struct saa7164_dev *dev)
if (err) {
/* fall back to legacy interrupt */
- printk(KERN_ERR "%s() Failed to get an MSI interrupt."
- " Falling back to a shared IRQ\n", __func__);
+ printk(KERN_ERR "%s() Failed to get an MSI interrupt. Falling back to a shared IRQ\n",
+ __func__);
pci_disable_msi(pci_dev);
return false;
}
@@ -1256,8 +1250,8 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->name,
+ printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ dev->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,
(unsigned long long)pci_resource_start(pci_dev, 0));
@@ -1307,8 +1301,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
err = saa7164_downloadfirmware(dev);
if (err < 0) {
printk(KERN_ERR
- "Failed to boot firmware, no features "
- "registered\n");
+ "Failed to boot firmware, no features registered\n");
goto fail_fw;
}
@@ -1327,8 +1320,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
*/
version = 0;
if (saa7164_api_get_fw_version(dev, &version) == SAA_OK)
- dprintk(1, "Bus is operating correctly using "
- "version %d.%d.%d.%d (0x%x)\n",
+ dprintk(1, "Bus is operating correctly using version %d.%d.%d.%d (0x%x)\n",
(version & 0x0000fc00) >> 10,
(version & 0x000003e0) >> 5,
(version & 0x0000001f),
@@ -1356,45 +1348,43 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
/* Begin to create the video sub-systems and register funcs */
if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB) {
if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS1]) < 0) {
- printk(KERN_ERR "%s() Failed to register "
- "dvb adapters on porta\n",
+ printk(KERN_ERR "%s() Failed to register dvb adapters on porta\n",
__func__);
}
}
if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB) {
if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS2]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "dvb adapters on portb\n",
+ printk(KERN_ERR"%s() Failed to register dvb adapters on portb\n",
__func__);
}
}
if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER) {
if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC1]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "mpeg encoder\n", __func__);
+ printk(KERN_ERR"%s() Failed to register mpeg encoder\n",
+ __func__);
}
}
if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER) {
if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC2]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "mpeg encoder\n", __func__);
+ printk(KERN_ERR"%s() Failed to register mpeg encoder\n",
+ __func__);
}
}
if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI) {
if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI1]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "vbi device\n", __func__);
+ printk(KERN_ERR"%s() Failed to register vbi device\n",
+ __func__);
}
}
if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI) {
if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI2]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "vbi device\n", __func__);
+ printk(KERN_ERR"%s() Failed to register vbi device\n",
+ __func__);
}
}
saa7164_api_set_debug(dev, fw_debug);
@@ -1404,15 +1394,15 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
"saa7164 debug");
if (IS_ERR(dev->kthread)) {
dev->kthread = NULL;
- printk(KERN_ERR "%s() Failed to create "
- "debug kernel thread\n", __func__);
+ printk(KERN_ERR "%s() Failed to create debug kernel thread\n",
+ __func__);
}
}
} /* != BOARD_UNKNOWN */
else
- printk(KERN_ERR "%s() Unsupported board detected, "
- "registering without firmware\n", __func__);
+ printk(KERN_ERR "%s() Unsupported board detected, registering without firmware\n",
+ __func__);
dprintk(1, "%s() parameter debug = %d\n", __func__, saa_debug);
dprintk(1, "%s() parameter waitsecs = %d\n", __func__, waitsecs);
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index e9a783b71b45..cd3eeda5250b 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -244,8 +244,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() acquire/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() acquire/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
goto out;
@@ -261,8 +261,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() pause/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -279,8 +279,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() run/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -357,8 +357,7 @@ static int dvb_register(struct saa7164_port *port)
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
result = -ENOMEM;
- printk(KERN_ERR "%s: dvb_register_adapter failed "
- "(errno = %d), NO PCI configuration\n",
+ printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), NO PCI configuration\n",
DRIVER_NAME, result);
goto fail_adapter;
}
@@ -386,8 +385,7 @@ static int dvb_register(struct saa7164_port *port)
if (!buf) {
result = -ENOMEM;
- printk(KERN_ERR "%s: dvb_register_adapter failed "
- "(errno = %d), unable to allocate buffers\n",
+ printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), unable to allocate buffers\n",
DRIVER_NAME, result);
goto fail_adapter;
}
@@ -401,8 +399,8 @@ static int dvb_register(struct saa7164_port *port)
result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE,
&dev->pci->dev, adapter_nr);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_register_adapter failed "
- "(errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_adapter;
}
dvb->adapter.priv = port;
@@ -410,8 +408,8 @@ static int dvb_register(struct saa7164_port *port)
/* register frontend */
result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_register_frontend failed "
- "(errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: dvb_register_frontend failed (errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_frontend;
}
@@ -444,16 +442,16 @@ static int dvb_register(struct saa7164_port *port)
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_ERR "%s: add_frontend failed "
- "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
- printk(KERN_ERR "%s: add_frontend failed "
- "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_fe_mem;
}
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 32a353d162e7..68124ce7ebc3 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -157,8 +157,7 @@ static int saa7164_encoder_buffers_alloc(struct saa7164_port *port)
params->pitch);
if (!buf) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), unable to allocate buffer\n",
+ printk(KERN_ERR "%s() failed (errno = %d), unable to allocate buffer\n",
__func__, result);
result = -ENOMEM;
goto failed;
@@ -681,8 +680,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() acquire/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() acquire/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
goto out;
@@ -698,8 +697,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() pause/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -716,8 +715,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() run/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -1026,8 +1025,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), NO PCI configuration\n",
+ printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
__func__, result);
result = -ENOMEM;
goto failed;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 269e0782c7b6..8568adfd7ece 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -421,8 +421,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
ret = request_firmware(&fw, fwname, &dev->pci->dev);
if (ret) {
- printk(KERN_ERR "%s() Upload failed. "
- "(file not found?)\n", __func__);
+ printk(KERN_ERR "%s() Upload failed. (file not found?)\n",
+ __func__);
return -ENOMEM;
}
@@ -478,15 +478,13 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
0x03) && (saa7164_readl(SAA_DATAREADY_FLAG_ACK)
== 0x00) && (version == 0x00)) {
- dprintk(DBGLVL_FW, "BootLoader version in "
- "rom %d.%d.%d.%d\n",
+ dprintk(DBGLVL_FW, "BootLoader version in rom %d.%d.%d.%d\n",
(bootloaderversion & 0x0000fc00) >> 10,
(bootloaderversion & 0x000003e0) >> 5,
(bootloaderversion & 0x0000001f),
(bootloaderversion & 0xffff0000) >> 16
);
- dprintk(DBGLVL_FW, "BootLoader version "
- "in file %d.%d.%d.%d\n",
+ dprintk(DBGLVL_FW, "BootLoader version in file %d.%d.%d.%d\n",
(boothdr->version & 0x0000fc00) >> 10,
(boothdr->version & 0x000003e0) >> 5,
(boothdr->version & 0x0000001f),
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index ee54491459a6..e5dcb81029d3 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -110,8 +110,7 @@ static int saa7164_vbi_buffers_alloc(struct saa7164_port *port)
params->pitch);
if (!buf) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), unable to allocate buffer\n",
+ printk(KERN_ERR "%s() failed (errno = %d), unable to allocate buffer\n",
__func__, result);
result = -ENOMEM;
goto failed;
@@ -384,8 +383,8 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_vbi_stop_port(port);
if (result != SAA_OK) {
- printk(KERN_ERR "%s() pause/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -403,8 +402,8 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
result = saa7164_vbi_acquire_port(port);
result = saa7164_vbi_stop_port(port);
if (result != SAA_OK) {
- printk(KERN_ERR "%s() run/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -728,8 +727,7 @@ int saa7164_vbi_register(struct saa7164_port *port)
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), NO PCI configuration\n",
+ printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
__func__, result);
result = -ENOMEM;
goto failed;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index b4be47969b6b..896bec6627aa 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -702,8 +702,8 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
snprintf(solo_dev->vfd->name, sizeof(solo_dev->vfd->name), "%s (%i)",
SOLO6X10_NAME, solo_dev->vfd->num);
- dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with "
- "%d inputs (%d extended)\n", solo_dev->vfd->num,
+ dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with %d inputs (%d extended)\n",
+ solo_dev->vfd->num,
solo_dev->nr_chans, solo_dev->nr_ext);
return 0;
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 5bd498735a66..3f8da5e8c430 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -284,7 +284,10 @@ static inline u32 solo_reg_read(struct solo_dev *solo_dev, int reg)
static inline void solo_reg_write(struct solo_dev *solo_dev, int reg,
u32 data)
{
+ u16 val;
+
writel(data, solo_dev->reg_base + reg);
+ pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
}
static inline void solo_irq_on(struct solo_dev *dev, u32 mask)
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/media/pci/ttpci/Makefile
index 49f71b1eaf14..3cf617737f7c 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/media/pci/ttpci/Makefile
@@ -3,7 +3,7 @@
# and the AV7110 DVB device driver
#
-dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o
+dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o dvb_filter.o
ifdef CONFIG_DVB_AV7110_IR
dvb-ttpci-objs += av7110_ir.o
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 382caf200ba1..6e63949d6ad0 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -100,8 +100,7 @@ MODULE_PARM_DESC(adac,"audio DAC type: 0 TI, 1 CRYSTAL, 2 MSP (use if autodetect
module_param(hw_sections, int, 0444);
MODULE_PARM_DESC(hw_sections, "0 use software section filter, 1 use hardware");
module_param(rgb_on, int, 0444);
-MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control"
- " signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
+MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
module_param(volume, int, 0444);
MODULE_PARM_DESC(volume, "initial volume: default 255 (range 0-255)");
module_param(budgetpatch, int, 0444);
@@ -444,21 +443,6 @@ static void debiirq(unsigned long cookie)
case DATA_COMMON_INTERFACE:
CI_handle(av7110, (u8 *)av7110->debi_virt, av7110->debilen);
-#if 0
- {
- int i;
-
- printk("av7110%d: ", av7110->num);
- printk("%02x ", *(u8 *)av7110->debi_virt);
- printk("%02x ", *(1+(u8 *)av7110->debi_virt));
- for (i = 2; i < av7110->debilen; i++)
- printk("%02x ", (*(i+(unsigned char *)av7110->debi_virt)));
- for (i = 2; i < av7110->debilen; i++)
- printk("%c", chtrans(*(i+(unsigned char *)av7110->debi_virt)));
-
- printk("\n");
- }
-#endif
xfer = RX_BUFF;
break;
@@ -833,8 +817,7 @@ static int StartHWFilter(struct dvb_demux_filter *dvbdmxfilter)
ret = av7110_fw_request(av7110, buf, 20, &handle, 1);
if (ret != 0 || handle >= 32) {
- printk("dvb-ttpci: %s error buf %04x %04x %04x %04x "
- "ret %d handle %04x\n",
+ printk(KERN_ERR "dvb-ttpci: %s error buf %04x %04x %04x %04x ret %d handle %04x\n",
__func__, buf[0], buf[1], buf[2], buf[3],
ret, handle);
dvbdmxfilter->hw_handle = 0xffff;
@@ -876,8 +859,7 @@ static int StopHWFilter(struct dvb_demux_filter *dvbdmxfilter)
buf[2] = handle;
ret = av7110_fw_request(av7110, buf, 3, answ, 2);
if (ret != 0 || answ[1] != handle) {
- printk("dvb-ttpci: %s error cmd %04x %04x %04x ret %x "
- "resp %04x %04x pid %d\n",
+ printk(KERN_ERR "dvb-ttpci: %s error cmd %04x %04x %04x ret %x resp %04x %04x pid %d\n",
__func__, buf[0], buf[1], buf[2], ret,
answ[0], answ[1], dvbdmxfilter->feed->pid);
if (!ret)
@@ -1532,15 +1514,12 @@ static int get_firmware(struct av7110* av7110)
ret = request_firmware(&fw, "dvb-ttpci-01.fw", &av7110->dev->pci->dev);
if (ret) {
if (ret == -ENOENT) {
- printk(KERN_ERR "dvb-ttpci: could not load firmware,"
- " file not found: dvb-ttpci-01.fw\n");
- printk(KERN_ERR "dvb-ttpci: usually this should be in "
- "/usr/lib/hotplug/firmware or /lib/firmware\n");
- printk(KERN_ERR "dvb-ttpci: and can be downloaded from"
- " https://linuxtv.org/download/dvb/firmware/\n");
+ printk(KERN_ERR "dvb-ttpci: could not load firmware, file not found: dvb-ttpci-01.fw\n");
+ printk(KERN_ERR "dvb-ttpci: usually this should be in /usr/lib/hotplug/firmware or /lib/firmware\n");
+ printk(KERN_ERR "dvb-ttpci: and can be downloaded from https://linuxtv.org/download/dvb/firmware/\n");
} else
- printk(KERN_ERR "dvb-ttpci: cannot request firmware"
- " (error %i)\n", ret);
+ printk(KERN_ERR "dvb-ttpci: cannot request firmware (error %i)\n",
+ ret);
return -EINVAL;
}
@@ -2700,8 +2679,9 @@ static int av7110_attach(struct saa7146_dev* dev,
goto err_stop_arm_9;
if (FW_VERSION(av7110->arm_app)<0x2501)
- printk ("dvb-ttpci: Warning, firmware version 0x%04x is too old. "
- "System might be unstable!\n", FW_VERSION(av7110->arm_app));
+ printk(KERN_WARNING
+ "dvb-ttpci: Warning, firmware version 0x%04x is too old. System might be unstable!\n",
+ FW_VERSION(av7110->arm_app));
thread = kthread_run(arm_thread, (void *) av7110, "arm_mon");
if (IS_ERR(thread)) {
@@ -2930,9 +2910,7 @@ static struct saa7146_extension av7110_extension_driver = {
static int __init av7110_init(void)
{
- int retval;
- retval = saa7146_register_extension(&av7110_extension_driver);
- return retval;
+ return saa7146_register_extension(&av7110_extension_driver);
}
@@ -2944,7 +2922,6 @@ static void __exit av7110_exit(void)
module_init(av7110_init);
module_exit(av7110_exit);
-MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by "
- "Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by Siemens, Technotrend, Hauppauge");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 3707ccd02732..824c1e262fbb 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -40,8 +40,11 @@
extern int av7110_debug;
-#define dprintk(level,args...) \
- do { if ((av7110_debug & level)) { printk("dvb-ttpci: %s(): ", __func__); printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do { \
+ if (level & av7110_debug) \
+ printk(KERN_DEBUG KBUILD_MODNAME ": %s(): " fmt, \
+ __func__, ##arg); \
+} while (0)
#define MAXFILT 32
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 0583d56ef5ef..520414cbe087 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -235,8 +235,7 @@ int av7110_bootarm(struct av7110 *av7110)
iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
if ((ret=irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4)) != 0x10325476) {
- printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: "
- "%08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
+ printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: %08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
ret, 0x10325476);
return -1;
}
@@ -262,8 +261,7 @@ int av7110_bootarm(struct av7110 *av7110)
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
- printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
- "saa7146_wait_for_debi_done() timed out\n");
+ printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
@@ -271,8 +269,7 @@ int av7110_bootarm(struct av7110 *av7110)
dprintk(1, "load dram code\n");
if (load_dram(av7110, (u32 *)av7110->bin_root, av7110->size_root) < 0) {
- printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
- "load_dram() failed\n");
+ printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): load_dram() failed\n");
return -1;
}
@@ -283,8 +280,7 @@ int av7110_bootarm(struct av7110 *av7110)
mwdebi(av7110, DEBISWAB, DPRAM_BASE, av7110->bin_dpram, av7110->size_dpram);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
- printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
- "saa7146_wait_for_debi_done() timed out after loading DRAM\n");
+ printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out after loading DRAM\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 6f0d0161970e..896c66d4b3ae 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1636,5 +1636,4 @@ module_exit(budget_av_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
- "budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 7b27af4d9658..20ad93bf0f54 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -1586,6 +1586,4 @@ module_exit(budget_ci_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hunold, Jack Thomasson, Andrew de Quincey, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
- "budget PCI DVB cards w/ CI-module produced by "
- "Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards w/ CI-module produced by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c
index 591dbdfa2a13..f152eda0123a 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/media/pci/ttpci/budget-patch.c
@@ -679,5 +679,4 @@ module_exit(budget_patch_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Emard, Roberto Deza, Holger Waechtler, Michael Hunold, others");
-MODULE_DESCRIPTION("Driver for full TS modified DVB-S SAA7146+AV7110 "
- "based so-called Budget Patch cards");
+MODULE_DESCRIPTION("Driver for full TS modified DVB-S SAA7146+AV7110 based so-called Budget Patch cards");
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index fb8ede5a1531..3091b480ce22 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -897,5 +897,4 @@ module_exit(budget_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
- "budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/media/pci/ttpci/budget.h
index 655eef5236ca..d5ae4438153e 100644
--- a/drivers/media/pci/ttpci/budget.h
+++ b/drivers/media/pci/ttpci/budget.h
@@ -21,8 +21,12 @@ extern int budget_debug;
#undef dprintk
#endif
-#define dprintk(level,args...) \
- do { if ((budget_debug & level)) { printk("%s: %s(): ", KBUILD_MODNAME, __func__); printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do { \
+ if (level & budget_debug) \
+ printk(KERN_DEBUG KBUILD_MODNAME ": %s(): " fmt, \
+ __func__, ##arg); \
+} while (0)
+
struct budget_info {
char *name;
diff --git a/drivers/media/pci/ttpci/dvb_filter.c b/drivers/media/pci/ttpci/dvb_filter.c
new file mode 100644
index 000000000000..b67127b67d4e
--- /dev/null
+++ b/drivers/media/pci/ttpci/dvb_filter.c
@@ -0,0 +1,114 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "dvb_filter.h"
+
+static u32 freq[4] = {480, 441, 320, 0};
+
+static unsigned int ac3_bitrates[32] =
+ {32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0};
+
+static u32 ac3_frames[3][32] =
+ {{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
+ 1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
+ {69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
+ 1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
+ {96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
+ 1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
+
+int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
+{
+ u8 *headr;
+ int found = 0;
+ int c = 0;
+ u8 frame = 0;
+ int fr = 0;
+
+ while ( !found && c < count){
+ u8 *b = mbuf+c;
+
+ if ( b[0] == 0x0b && b[1] == 0x77 )
+ found = 1;
+ else {
+ c++;
+ }
+ }
+
+ if (!found) return -1;
+ if (pr)
+ printk(KERN_DEBUG "Audiostream: AC3");
+
+ ai->off = c;
+ if (c+5 >= count) return -1;
+
+ ai->layer = 0; // 0 for AC3
+ headr = mbuf+c+2;
+
+ frame = (headr[2]&0x3f);
+ ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
+
+ if (pr)
+ printk(KERN_CONT " BRate: %d kb/s", (int) ai->bit_rate/1000);
+
+ ai->frequency = (headr[2] & 0xc0 ) >> 6;
+ fr = (headr[2] & 0xc0 ) >> 6;
+ ai->frequency = freq[fr]*100;
+ if (pr)
+ printk(KERN_CONT " Freq: %d Hz\n", (int) ai->frequency);
+
+ ai->framesize = ac3_frames[fr][frame >> 1];
+ if ((frame & 1) && (fr == 1)) ai->framesize++;
+ ai->framesize = ai->framesize << 1;
+ if (pr)
+ printk(KERN_DEBUG " Framesize %d\n", (int) ai->framesize);
+
+ return 0;
+}
+
+void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
+ dvb_filter_pes2ts_cb_t *cb, void *priv)
+{
+ unsigned char *buf=p2ts->buf;
+
+ buf[0]=0x47;
+ buf[1]=(pid>>8);
+ buf[2]=pid&0xff;
+ p2ts->cc=0;
+ p2ts->cb=cb;
+ p2ts->priv=priv;
+}
+
+int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
+ int len, int payload_start)
+{
+ unsigned char *buf=p2ts->buf;
+ int ret=0, rest;
+
+ //len=6+((pes[4]<<8)|pes[5]);
+
+ if (payload_start)
+ buf[1]|=0x40;
+ else
+ buf[1]&=~0x40;
+ while (len>=184) {
+ buf[3]=0x10|((p2ts->cc++)&0x0f);
+ memcpy(buf+4, pes, 184);
+ if ((ret=p2ts->cb(p2ts->priv, buf)))
+ return ret;
+ len-=184; pes+=184;
+ buf[1]&=~0x40;
+ }
+ if (!len)
+ return 0;
+ buf[3]=0x30|((p2ts->cc++)&0x0f);
+ rest=183-len;
+ if (rest) {
+ buf[5]=0x00;
+ if (rest-1)
+ memset(buf+6, 0xff, rest-1);
+ }
+ buf[4]=rest;
+ memcpy(buf+5+rest, pes, len);
+ return p2ts->cb(p2ts->priv, buf);
+}
diff --git a/drivers/media/dvb-core/dvb_filter.h b/drivers/media/pci/ttpci/dvb_filter.h
index 375e3be184b1..375e3be184b1 100644
--- a/drivers/media/dvb-core/dvb_filter.h
+++ b/drivers/media/pci/ttpci/dvb_filter.h
diff --git a/drivers/media/pci/ttpci/ttpci-eeprom.c b/drivers/media/pci/ttpci/ttpci-eeprom.c
index 079ee098b7e3..9534f29c1ffd 100644
--- a/drivers/media/pci/ttpci/ttpci-eeprom.c
+++ b/drivers/media/pci/ttpci/ttpci-eeprom.c
@@ -171,5 +171,4 @@ EXPORT_SYMBOL(ttpci_eeprom_parse_mac);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
-MODULE_DESCRIPTION("Decode dvb_net MAC address from EEPROM of PCI DVB cards "
- "made by Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("Decode dvb_net MAC address from EEPROM of PCI DVB cards made by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/tw5864/tw5864-reg.h b/drivers/media/pci/tw5864/tw5864-reg.h
index 92a1b077ef8a..30ac14210e91 100644
--- a/drivers/media/pci/tw5864/tw5864-reg.h
+++ b/drivers/media/pci/tw5864/tw5864-reg.h
@@ -1879,6 +1879,14 @@
#define TW5864_INDIR_IN_PIC_HEIGHT(channel) (0x201 + 4 * channel)
#define TW5864_INDIR_OUT_PIC_WIDTH(channel) (0x202 + 4 * channel)
#define TW5864_INDIR_OUT_PIC_HEIGHT(channel) (0x203 + 4 * channel)
+
+/* Some registers skipped */
+
+#define TW5864_INDIR_CROP_ETC 0x260
+/* Define controls in register TW5864_INDIR_CROP_ETC */
+/* Enable cropping from 720 to 704 */
+#define TW5864_INDIR_CROP_ETC_CROP_EN 0x4
+
/*
* Interrupt status register from the front-end. Write "1" to each bit to clear
* the interrupt
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 652a059b2e0a..9421216bb942 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -330,6 +330,15 @@ static int tw5864_enable_input(struct tw5864_input *input)
tw_indir_writeb(TW5864_INDIR_OUT_PIC_WIDTH(nr), input->width / 4);
tw_indir_writeb(TW5864_INDIR_OUT_PIC_HEIGHT(nr), input->height / 4);
+ /*
+ * Crop width from 720 to 704.
+ * Above register settings need value 720 involved.
+ */
+ input->width = 704;
+ tw_indir_writeb(TW5864_INDIR_CROP_ETC,
+ tw_indir_readb(TW5864_INDIR_CROP_ETC) |
+ TW5864_INDIR_CROP_ETC_CROP_EN);
+
tw_writel(TW5864_DSP_PIC_MAX_MB,
((input->width / 16) << 8) | (input->height / 16));
@@ -532,7 +541,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv,
{
struct tw5864_input *input = video_drvdata(file);
- f->fmt.pix.width = 720;
+ f->fmt.pix.width = 704;
switch (input->std) {
default:
WARN_ON_ONCE(1);
@@ -738,7 +747,7 @@ static int tw5864_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = 720;
+ fsize->discrete.width = 704;
fsize->discrete.height = input->std == STD_NTSC ? 480 : 576;
return 0;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index a45e02367321..58c4dd75bfa1 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -279,9 +279,8 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
height /= 2; /* we must set for 1-frame */
pr_debug("%s: width=%d, height=%d, both=%d\n"
- " tvnorm h_delay=%d, h_start=%d, h_stop=%d, "
- "v_delay=%d, v_start=%d, v_stop=%d\n" , __func__,
- width, height, V4L2_FIELD_HAS_BOTH(field),
+ " tvnorm h_delay=%d, h_start=%d, h_stop=%d, v_delay=%d, v_start=%d, v_stop=%d\n",
+ __func__, width, height, V4L2_FIELD_HAS_BOTH(field),
norm->h_delay, norm->h_start, norm->h_stop,
norm->v_delay, norm->video_v_start,
norm->video_v_stop);
@@ -309,16 +308,15 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
V4L2_FIELD_HAS_TOP(field) ? "T" : "",
V4L2_FIELD_HAS_BOTTOM(field) ? "B" : "",
v4l2_norm_to_name(dev->tvnorm->id));
- pr_debug("%s: hactive=%d, hdelay=%d, hscale=%d; "
- "vactive=%d, vdelay=%d, vscale=%d\n", __func__,
+ pr_debug("%s: hactive=%d, hdelay=%d, hscale=%d; vactive=%d, vdelay=%d, vscale=%d\n",
+ __func__,
hactive, hdelay, hscale, vactive, vdelay, vscale);
comb = ((vdelay & 0x300) >> 2) |
((vactive & 0x300) >> 4) |
((hdelay & 0x300) >> 6) |
((hactive & 0x300) >> 8);
- pr_debug("%s: setting CROP_HI=%02x, VDELAY_LO=%02x, "
- "VACTIVE_LO=%02x, HDELAY_LO=%02x, HACTIVE_LO=%02x\n",
+ pr_debug("%s: setting CROP_HI=%02x, VDELAY_LO=%02x, VACTIVE_LO=%02x, HDELAY_LO=%02x, HACTIVE_LO=%02x\n",
__func__, comb, vdelay, vactive, hdelay, hactive);
tw_writeb(TW68_CROP_HI, comb);
tw_writeb(TW68_VDELAY_LO, vdelay & 0xff);
@@ -327,8 +325,8 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
tw_writeb(TW68_HACTIVE_LO, hactive & 0xff);
comb = ((vscale & 0xf00) >> 4) | ((hscale & 0xf00) >> 8);
- pr_debug("%s: setting SCALE_HI=%02x, VSCALE_LO=%02x, "
- "HSCALE_LO=%02x\n", __func__, comb, vscale, hscale);
+ pr_debug("%s: setting SCALE_HI=%02x, VSCALE_LO=%02x, HSCALE_LO=%02x\n",
+ __func__, comb, vscale, hscale);
tw_writeb(TW68_SCALE_HI, comb);
tw_writeb(TW68_VSCALE_LO, vscale);
tw_writeb(TW68_HSCALE_LO, hscale);
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index 4d47ddac97dc..35b552c178da 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -173,12 +173,8 @@ dump_guests (struct zoran *zr)
guest[i] = post_office_read(zr, i, 0);
}
- printk(KERN_INFO "%s: Guests:", ZR_DEVNAME(zr));
-
- for (i = 1; i < 8; i++) {
- printk(" 0x%02x", guest[i]);
- }
- printk("\n");
+ printk(KERN_INFO "%s: Guests: %*ph\n",
+ ZR_DEVNAME(zr), 8, guest);
}
}
@@ -216,12 +212,9 @@ detect_guest_activity (struct zoran *zr)
if (j >= 8)
break;
}
- printk(KERN_INFO "%s: Guests:", ZR_DEVNAME(zr));
- for (i = 1; i < 8; i++) {
- printk(" 0x%02x", guest0[i]);
- }
- printk("\n");
+ printk(KERN_INFO "%s: Guests: %*ph\n", ZR_DEVNAME(zr), 8, guest0);
+
if (j == 0) {
printk(KERN_INFO "%s: No activity detected.\n", ZR_DEVNAME(zr));
return;
@@ -822,39 +815,39 @@ print_interrupts (struct zoran *zr)
printk(KERN_INFO "%s: interrupts received:", ZR_DEVNAME(zr));
if ((res = zr->field_counter) < -1 || res > 1) {
- printk(" FD:%d", res);
+ printk(KERN_CONT " FD:%d", res);
}
if ((res = zr->intr_counter_GIRQ1) != 0) {
- printk(" GIRQ1:%d", res);
+ printk(KERN_CONT " GIRQ1:%d", res);
noerr++;
}
if ((res = zr->intr_counter_GIRQ0) != 0) {
- printk(" GIRQ0:%d", res);
+ printk(KERN_CONT " GIRQ0:%d", res);
noerr++;
}
if ((res = zr->intr_counter_CodRepIRQ) != 0) {
- printk(" CodRepIRQ:%d", res);
+ printk(KERN_CONT " CodRepIRQ:%d", res);
noerr++;
}
if ((res = zr->intr_counter_JPEGRepIRQ) != 0) {
- printk(" JPEGRepIRQ:%d", res);
+ printk(KERN_CONT " JPEGRepIRQ:%d", res);
noerr++;
}
if (zr->JPEG_max_missed) {
- printk(" JPEG delays: max=%d min=%d", zr->JPEG_max_missed,
+ printk(KERN_CONT " JPEG delays: max=%d min=%d", zr->JPEG_max_missed,
zr->JPEG_min_missed);
}
if (zr->END_event_missed) {
- printk(" ENDs missed: %d", zr->END_event_missed);
+ printk(KERN_CONT " ENDs missed: %d", zr->END_event_missed);
}
//if (zr->jpg_queued_num) {
- printk(" queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail,
+ printk(KERN_CONT " queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail,
zr->jpg_dma_tail, zr->jpg_dma_head, zr->jpg_que_head);
//}
if (!noerr) {
- printk(": no interrupts detected.");
+ printk(KERN_CONT ": no interrupts detected.");
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
void
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index d6b631add216..2170e174c335 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -1488,7 +1488,7 @@ zoran_set_input (struct zoran *zr,
if (input < 0 || input >= zr->card.inputs) {
dprintk(1,
KERN_ERR
- "%s: %s - unnsupported input %d\n",
+ "%s: %s - unsupported input %d\n",
ZR_DEVNAME(zr), __func__, input);
return -EINVAL;
}
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ce4a96fccc43..d944421e392d 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -93,7 +93,7 @@ config VIDEO_OMAP3_DEBUG
config VIDEO_PXA27x
tristate "PXA27x Quick Capture Interface driver"
- depends on VIDEO_DEV && HAS_DMA
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on PXA27x || COMPILE_TEST
select VIDEOBUF2_DMA_SG
select SG_SPLIT
@@ -175,6 +175,23 @@ config VIDEO_MEDIATEK_VPU
To compile this driver as a module, choose M here: the
module will be called mtk-vpu.
+config VIDEO_MEDIATEK_MDP
+ tristate "Mediatek MDP driver"
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_MEDIATEK_VPU
+ default n
+ ---help---
+ It is a v4l2 driver and present in Mediatek MT8173 SoCs.
+ The driver supports for scaling and color space conversion.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-mdp.
+
config VIDEO_MEDIATEK_VCODEC
tristate "Mediatek Video Codec driver"
depends on MTK_IOMMU || COMPILE_TEST
@@ -249,7 +266,7 @@ config VIDEO_MX2_EMMAPRP
config VIDEO_SAMSUNG_EXYNOS_GSC
tristate "Samsung Exynos G-Scaler driver"
depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_EXYNOS5 || COMPILE_TEST
+ depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
@@ -290,6 +307,20 @@ config VIDEO_SH_VEU
Support for the Video Engine Unit (VEU) on SuperH and
SH-Mobile SoCs.
+config VIDEO_RENESAS_FDP1
+ tristate "Renesas Fine Display Processor"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on (!ARCH_RENESAS && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a V4L2 driver for the Renesas Fine Display Processor
+ providing colour space conversion, and de-interlacing features.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_fdp1.
+
config VIDEO_RENESAS_JPU
tristate "Renesas JPEG Processing Unit"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
@@ -334,6 +365,9 @@ config VIDEO_TI_VPE
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
+ select VIDEO_TI_VPDMA
+ select VIDEO_TI_SC
+ select VIDEO_TI_CSC
default n
---help---
Support for the TI VPE(Video Processing Engine) block
@@ -347,6 +381,17 @@ config VIDEO_TI_VPE_DEBUG
endif # V4L_MEM2MEM_DRIVERS
+# TI VIDEO PORT Helper Modules
+# These will be selected by VPE and VIP
+config VIDEO_TI_VPDMA
+ tristate
+
+config VIDEO_TI_SC
+ tristate
+
+config VIDEO_TI_CSC
+ tristate
+
menuconfig V4L_TEST_DRIVERS
bool "Media test drivers"
depends on MEDIA_CAMERA_SUPPORT
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 40b18d12726e..5b3cb271d2b8 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
+obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
@@ -66,3 +67,5 @@ ccflags-y += -I$(srctree)/drivers/media/i2c
obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index ccfe13b7d3f8..fa68fe912c95 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -617,7 +617,13 @@ static void isc_buffer_queue(struct vb2_buffer *vb)
unsigned long flags;
spin_lock_irqsave(&isc->dma_queue_lock, flags);
- list_add_tail(&buf->list, &isc->dma_queue);
+ if (!isc->cur_frm && list_empty(&isc->dma_queue) &&
+ vb2_is_streaming(vb->vb2_queue)) {
+ isc->cur_frm = buf;
+ isc_start_dma(isc->regmap, isc->cur_frm,
+ isc->current_fmt->reg_dctrl_dview);
+ } else
+ list_add_tail(&buf->list, &isc->dma_queue);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
}
@@ -1418,6 +1424,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
if (list_empty(&isc->subdev_entities)) {
dev_err(dev, "no subdev found\n");
+ ret = -ENODEV;
goto unregister_v4l2_device;
}
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 8eb03397d736..2e6edc09b58f 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -169,7 +169,7 @@ static int bcap_init_sensor_formats(struct bcap_device *bcap_dev)
if (!num_formats)
return -ENXIO;
- sf = kzalloc(num_formats * sizeof(*sf), GFP_KERNEL);
+ sf = kcalloc(num_formats, sizeof(*sf), GFP_KERNEL);
if (!sf)
return -ENOMEM;
@@ -802,10 +802,8 @@ static int bcap_probe(struct platform_device *pdev)
}
bcap_dev = kzalloc(sizeof(*bcap_dev), GFP_KERNEL);
- if (!bcap_dev) {
- v4l2_err(pdev->dev.driver, "Unable to alloc bcap_dev\n");
+ if (!bcap_dev)
return -ENOMEM;
- }
bcap_dev->cfg = config;
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index cff63e511e6d..b8f3d9fa66e9 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -214,6 +214,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
if (params->dlen > 24 || params->dlen <= 0)
return -EINVAL;
pctrl = devm_pinctrl_get(ppi->dev);
+ if (IS_ERR(pctrl))
+ return PTR_ERR(pctrl);
pstate = pinctrl_lookup_state(pctrl,
pin_state[(params->dlen + 7) / 8 - 1]);
if (pinctrl_select_state(pctrl, pstate))
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index c39718a63e5e..9e6bdafa16f5 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2295,8 +2295,13 @@ static int coda_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return coda_firmware_request(dev);
+ ret = coda_firmware_request(dev);
+ if (ret)
+ goto err_alloc_workqueue;
+ return 0;
+err_alloc_workqueue:
+ destroy_workqueue(dev->workqueue);
err_v4l2_register:
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
index 456773af1f1d..09dfcca7cc50 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <coda.h>
static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index c90b9a4f0c24..65c2973167c6 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -334,8 +334,8 @@ static int ccdc_set_params(void __user *params)
x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdc"
- "params, %d\n", x);
+ dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n",
+ x);
return -EFAULT;
}
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 6fba32bec974..c7523a7e0594 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -354,8 +354,8 @@ static int ccdc_set_params(void __user *params)
x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying"
- "ccdc params, %d\n", x);
+ dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n",
+ x);
return -EFAULT;
}
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 9a6c2cc38acb..8c8cbeb7d90f 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -107,7 +107,7 @@ static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
struct v4l2_cropcap *cropcap)
{
- if (NULL == cropcap)
+ if (!cropcap)
return -EINVAL;
cropcap->bounds.left = 0;
cropcap->bounds.top = 0;
@@ -149,7 +149,7 @@ static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
int curr_output = output_index;
int i;
- if (NULL == mode)
+ if (!mode)
return -EINVAL;
for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
@@ -166,7 +166,7 @@ static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
struct vpbe_enc_mode_info *mode_info)
{
- if (NULL == mode_info)
+ if (!mode_info)
return -EINVAL;
*mode_info = vpbe_dev->current_timings;
@@ -227,10 +227,9 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
vpbe_current_encoder_info(vpbe_dev);
struct vpbe_config *cfg = vpbe_dev->cfg;
struct venc_platform_data *venc_device = vpbe_dev->venc_device;
- u32 if_params;
int enc_out_index;
int sd_index;
- int ret = 0;
+ int ret;
if (index >= cfg->num_outputs)
return -EINVAL;
@@ -254,20 +253,19 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
sd_index = vpbe_find_encoder_sd_index(cfg, index);
if (sd_index < 0) {
ret = -EINVAL;
- goto out;
+ goto unlock;
}
- if_params = cfg->outputs[index].if_params;
- venc_device->setup_if_config(if_params);
+ ret = venc_device->setup_if_config(cfg->outputs[index].if_params);
if (ret)
- goto out;
+ goto unlock;
}
/* Set output at the encoder */
ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
s_routing, 0, enc_out_index, 0);
if (ret)
- goto out;
+ goto unlock;
/*
* It is assumed that venc or extenal encoder will set a default
@@ -289,7 +287,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
vpbe_dev->current_sd_index = sd_index;
vpbe_dev->current_out_index = index;
}
-out:
+unlock:
mutex_unlock(&vpbe_dev->lock);
return ret;
}
@@ -297,19 +295,19 @@ out:
static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
{
struct vpbe_config *cfg = vpbe_dev->cfg;
- int ret = 0;
int i;
for (i = 0; i < cfg->num_outputs; i++) {
if (!strcmp(def_output,
cfg->outputs[i].output.name)) {
- ret = vpbe_set_output(vpbe_dev, i);
+ int ret = vpbe_set_output(vpbe_dev, i);
+
if (!ret)
vpbe_dev->current_out_index = i;
return ret;
}
}
- return ret;
+ return 0;
}
/**
@@ -356,7 +354,7 @@ static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
s_dv_timings, dv_timings);
- if (!ret && (vpbe_dev->amp != NULL)) {
+ if (!ret && vpbe_dev->amp) {
/* Call amplifier subdevice */
ret = v4l2_subdev_call(vpbe_dev->amp, video,
s_dv_timings, dv_timings);
@@ -509,10 +507,9 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
struct v4l2_dv_timings dv_timings;
struct osd_state *osd_device;
int out_index = vpbe_dev->current_out_index;
- int ret = 0;
int i;
- if ((NULL == mode_info) || (NULL == mode_info->name))
+ if (!mode_info || !mode_info->name)
return -EINVAL;
for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
@@ -536,7 +533,7 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
}
/* Only custom timing should reach here */
- if (preset_mode == NULL)
+ if (!preset_mode)
return -EINVAL;
mutex_lock(&vpbe_dev->lock);
@@ -549,8 +546,7 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
vpbe_dev->current_timings.upper_margin);
mutex_unlock(&vpbe_dev->lock);
-
- return ret;
+ return 0;
}
static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
@@ -570,9 +566,9 @@ static int platform_device_get(struct device *dev, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct vpbe_device *vpbe_dev = data;
- if (strstr(pdev->name, "vpbe-osd") != NULL)
+ if (strstr(pdev->name, "vpbe-osd"))
vpbe_dev->osd_device = platform_get_drvdata(pdev);
- if (strstr(pdev->name, "vpbe-venc") != NULL)
+ if (strstr(pdev->name, "vpbe-venc"))
vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
return 0;
@@ -606,7 +602,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
* from the platform device by iteration of platform drivers and
* matching with device name
*/
- if (NULL == vpbe_dev || NULL == dev) {
+ if (!vpbe_dev || !dev) {
printk(KERN_ERR "Null device pointers.\n");
return -ENODEV;
}
@@ -652,7 +648,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
vpbe_dev->cfg->venc.module_name);
/* register venc sub device */
- if (vpbe_dev->venc == NULL) {
+ if (!vpbe_dev->venc) {
v4l2_err(&vpbe_dev->v4l2_dev,
"vpbe unable to init venc sub device\n");
ret = -ENODEV;
@@ -660,8 +656,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
}
/* initialize osd device */
osd_device = vpbe_dev->osd_device;
-
- if (NULL != osd_device->ops.initialize) {
+ if (osd_device->ops.initialize) {
err = osd_device->ops.initialize(osd_device);
if (err) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -676,12 +671,10 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
* store venc sd index.
*/
num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
- vpbe_dev->encoders = kmalloc(
- sizeof(struct v4l2_subdev *)*num_encoders,
- GFP_KERNEL);
- if (NULL == vpbe_dev->encoders) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "unable to allocate memory for encoders sub devices");
+ vpbe_dev->encoders = kmalloc_array(num_encoders,
+ sizeof(*vpbe_dev->encoders),
+ GFP_KERNEL);
+ if (!vpbe_dev->encoders) {
ret = -ENOMEM;
goto fail_dev_unregister;
}
@@ -705,19 +698,17 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
"v4l2 sub device %s registered\n",
enc_info->module_name);
else {
- v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
- " failed to register",
+ v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s failed to register",
enc_info->module_name);
ret = -ENODEV;
goto fail_kfree_encoders;
}
} else
- v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
- " currently not supported");
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders currently not supported");
}
/* Add amplifier subdevice for dm365 */
if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
- vpbe_dev->cfg->amp != NULL) {
+ vpbe_dev->cfg->amp) {
amp_info = vpbe_dev->cfg->amp;
if (amp_info->is_i2c) {
vpbe_dev->amp = v4l2_i2c_new_subdev_board(
@@ -735,8 +726,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
amp_info->module_name);
} else {
vpbe_dev->amp = NULL;
- v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers"
- " currently not supported");
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers currently not supported");
}
} else {
vpbe_dev->amp = NULL;
@@ -824,9 +814,8 @@ static int vpbe_probe(struct platform_device *pdev)
{
struct vpbe_device *vpbe_dev;
struct vpbe_config *cfg;
- int ret = -EINVAL;
- if (pdev->dev.platform_data == NULL) {
+ if (!pdev->dev.platform_data) {
v4l2_err(pdev->dev.driver, "No platform data\n");
return -ENODEV;
}
@@ -835,17 +824,14 @@ static int vpbe_probe(struct platform_device *pdev)
if (!cfg->module_name[0] ||
!cfg->osd.module_name[0] ||
!cfg->venc.module_name[0]) {
- v4l2_err(pdev->dev.driver, "vpbe display module names not"
- " defined\n");
- return ret;
+ v4l2_err(pdev->dev.driver, "vpbe display module names not defined\n");
+ return -EINVAL;
}
vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
- if (vpbe_dev == NULL) {
- v4l2_err(pdev->dev.driver, "Unable to allocate memory"
- " for vpbe_device\n");
+ if (!vpbe_dev)
return -ENOMEM;
- }
+
vpbe_dev->cfg = cfg;
vpbe_dev->ops = vpbe_dev_ops;
vpbe_dev->pdev = &pdev->dev;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 6efb2f1631c4..ee1cd79739c8 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -229,7 +229,7 @@ int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
BUG_ON(!dev->hw_ops.getfid);
mutex_lock(&ccdc_lock);
- if (NULL == ccdc_cfg) {
+ if (!ccdc_cfg) {
/*
* TODO. Will this ever happen? if so, we need to fix it.
* Proabably we need to add the request to a linked list and
@@ -265,7 +265,7 @@ EXPORT_SYMBOL(vpfe_register_ccdc_device);
*/
void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
{
- if (NULL == dev) {
+ if (!dev) {
printk(KERN_ERR "invalid ccdc device ptr\n");
return;
}
@@ -281,7 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
mutex_lock(&ccdc_lock);
ccdc_dev = NULL;
mutex_unlock(&ccdc_lock);
- return;
}
EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
@@ -384,7 +383,7 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
};
struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
- int i, ret = 0;
+ int i, ret;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
if (vpfe_standards[i].std_id & std_id) {
@@ -453,7 +452,7 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
{
- int ret = 0;
+ int ret;
/* set first input of current subdevice as the current input */
vpfe_dev->current_input = 0;
@@ -469,7 +468,7 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
/* now open the ccdc device to initialize it */
mutex_lock(&ccdc_lock);
- if (NULL == ccdc_dev) {
+ if (!ccdc_dev) {
v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
ret = -ENODEV;
goto unlock;
@@ -511,12 +510,10 @@ static int vpfe_open(struct file *file)
}
/* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
- if (NULL == fh) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "unable to allocate memory for file handle object\n");
+ fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
return -ENOMEM;
- }
+
/* store pointer to fh in private_data member of file */
file->private_data = fh;
fh->vpfe_dev = vpfe_dev;
@@ -584,7 +581,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
goto clear_intr;
/* only for 6446 this will be applicable */
- if (NULL != ccdc_dev->hw_ops.reset)
+ if (ccdc_dev->hw_ops.reset)
ccdc_dev->hw_ops.reset();
if (field == V4L2_FIELD_NONE) {
@@ -617,9 +614,8 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
* interleavely or separately in memory, reconfigure
* the CCDC memory address
*/
- if (field == V4L2_FIELD_SEQ_TB) {
+ if (field == V4L2_FIELD_SEQ_TB)
vpfe_schedule_bottom_field(vpfe_dev);
- }
goto clear_intr;
}
/*
@@ -824,7 +820,7 @@ static const struct vpfe_pixel_format *
int temp, found;
vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- if (NULL == vpfe_pix_fmt) {
+ if (!vpfe_pix_fmt) {
/*
* use current pixel format in the vpfe device. We
* will find this pix format in the table
@@ -919,8 +915,7 @@ static const struct vpfe_pixel_format *
else
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
- v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height ="
- " %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
+ v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height = %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
pixfmt->bytesperline, pixfmt->sizeimage);
return vpfe_pix_fmt;
@@ -967,7 +962,7 @@ static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
/* Fill in the information about format */
pix_fmt = vpfe_lookup_pix_format(pix);
- if (NULL != pix_fmt) {
+ if (pix_fmt) {
temp_index = fmt->index;
*fmt = pix_fmt->fmtdesc;
fmt->index = temp_index;
@@ -981,7 +976,7 @@ static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
const struct vpfe_pixel_format *pix_fmts;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
@@ -993,8 +988,7 @@ static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
/* Check for valid frame format */
pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
-
- if (NULL == pix_fmts)
+ if (!pix_fmts)
return -EINVAL;
/* store the pixel format in the device object */
@@ -1020,7 +1014,7 @@ static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
- if (NULL == pix_fmts)
+ if (!pix_fmts)
return -EINVAL;
return 0;
}
@@ -1088,12 +1082,11 @@ static int vpfe_enum_input(struct file *file, void *priv,
&subdev,
&index,
inp->index) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "input information not found"
- " for the subdev\n");
+ v4l2_err(&vpfe_dev->v4l2_dev, "input information not found for the subdev\n");
return -EINVAL;
}
sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
- memcpy(inp, &sdinfo->inputs[index], sizeof(struct v4l2_input));
+ *inp = sdinfo->inputs[index];
return 0;
}
@@ -1114,8 +1107,8 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
struct vpfe_subdev_info *sdinfo;
int subdev_index, inp_index;
struct vpfe_route *route;
- u32 input = 0, output = 0;
- int ret = -EINVAL;
+ u32 input, output;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
@@ -1147,6 +1140,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
if (route && sdinfo->can_route) {
input = route->input;
output = route->output;
+ } else {
+ input = 0;
+ output = 0;
}
if (sd)
@@ -1181,7 +1177,7 @@ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
@@ -1200,7 +1196,7 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
@@ -1349,7 +1345,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_fh *fh = file->private_data;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
@@ -1481,7 +1477,7 @@ static int vpfe_streamon(struct file *file, void *priv,
struct vpfe_fh *fh = file->private_data;
struct vpfe_subdev_info *sdinfo;
unsigned long addr;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
@@ -1564,7 +1560,7 @@ static int vpfe_streamoff(struct file *file, void *priv,
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_fh *fh = file->private_data;
struct vpfe_subdev_info *sdinfo;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
@@ -1650,7 +1646,7 @@ static int vpfe_s_selection(struct file *file, void *priv,
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct v4l2_rect rect = sel->r;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
@@ -1708,7 +1704,7 @@ static long vpfe_param_handler(struct file *file, void *priv,
bool valid_prio, unsigned int cmd, void *param)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
- int ret = 0;
+ int ret;
v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
@@ -1821,7 +1817,7 @@ static int vpfe_probe(struct platform_device *pdev)
struct vpfe_device *vpfe_dev;
struct i2c_adapter *i2c_adap;
struct video_device *vfd;
- int ret = -ENOMEM, i, j;
+ int ret, i, j;
int num_subdevs = 0;
/* Get the pointer to the device object */
@@ -1830,12 +1826,12 @@ static int vpfe_probe(struct platform_device *pdev)
if (!vpfe_dev) {
v4l2_err(pdev->dev.driver,
"Failed to allocate memory for vpfe_dev\n");
- return ret;
+ return -ENOMEM;
}
vpfe_dev->pdev = &pdev->dev;
- if (NULL == pdev->dev.platform_data) {
+ if (!pdev->dev.platform_data) {
v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
ret = -ENODEV;
goto probe_free_dev_mem;
@@ -1843,19 +1839,16 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe_cfg = pdev->dev.platform_data;
vpfe_dev->cfg = vpfe_cfg;
- if (NULL == vpfe_cfg->ccdc ||
- NULL == vpfe_cfg->card_name ||
- NULL == vpfe_cfg->sub_devs) {
+ if (!vpfe_cfg->ccdc || !vpfe_cfg->card_name || !vpfe_cfg->sub_devs) {
v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
ret = -ENOENT;
goto probe_free_dev_mem;
}
/* Allocate memory for ccdc configuration */
- ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
- if (NULL == ccdc_cfg) {
- v4l2_err(pdev->dev.driver,
- "Memory allocation failed for ccdc_cfg\n");
+ ccdc_cfg = kmalloc(sizeof(*ccdc_cfg), GFP_KERNEL);
+ if (!ccdc_cfg) {
+ ret = -ENOMEM;
goto probe_free_dev_mem;
}
@@ -1940,11 +1933,10 @@ static int vpfe_probe(struct platform_device *pdev)
video_set_drvdata(&vpfe_dev->video_dev, vpfe_dev);
i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
num_subdevs = vpfe_cfg->num_subdevs;
- vpfe_dev->sd = kmalloc(sizeof(struct v4l2_subdev *) * num_subdevs,
- GFP_KERNEL);
- if (NULL == vpfe_dev->sd) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "unable to allocate memory for subdevice pointers\n");
+ vpfe_dev->sd = kmalloc_array(num_subdevs,
+ sizeof(*vpfe_dev->sd),
+ GFP_KERNEL);
+ if (!vpfe_dev->sd) {
ret = -ENOMEM;
goto probe_out_video_unregister;
}
@@ -1974,6 +1966,7 @@ static int vpfe_probe(struct platform_device *pdev)
v4l2_info(&vpfe_dev->v4l2_dev,
"v4l2 sub device %s register fails\n",
sdinfo->name);
+ ret = -ENXIO;
goto probe_sd_out;
}
}
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 5104cc0ee40e..f791f5c402bf 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -291,10 +291,10 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
- if (common->cur_frm != NULL)
+ if (common->cur_frm)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
- if (common->next_frm != NULL)
+ if (common->next_frm)
vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -375,7 +375,7 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
struct vpif_device *dev = &vpif_obj;
struct common_obj *common;
struct channel_obj *ch;
- int channel_id = 0;
+ int channel_id;
int fid = -1, i;
channel_id = *(int *)(dev_id);
@@ -648,7 +648,7 @@ static int vpif_input_to_subdev(
vpif_dbg(2, debug, "vpif_input_to_subdev\n");
subdev_name = chan_cfg->inputs[input_index].subdev_name;
- if (subdev_name == NULL)
+ if (!subdev_name)
return -1;
/* loop through the sub device list to get the sub device info */
@@ -731,7 +731,7 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
- int ret = 0;
+ int ret;
vpif_dbg(2, debug, "vpif_querystd\n");
@@ -764,7 +764,7 @@ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
vpif_dbg(2, debug, "vpif_g_std\n");
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -794,7 +794,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
vpif_dbg(2, debug, "vpif_s_std\n");
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1050,7 +1050,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
struct v4l2_input input;
int ret;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1084,7 +1084,7 @@ vpif_query_dv_timings(struct file *file, void *priv,
struct v4l2_input input;
int ret;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1120,7 +1120,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
struct v4l2_input input;
int ret;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1152,11 +1152,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
timings->bt.vfrontporch &&
(timings->bt.vbackporch ||
timings->bt.vsync))) {
- vpif_dbg(2, debug, "Timings for width, height, "
- "horizontal back porch, horizontal sync, "
- "horizontal front porch, vertical back porch, "
- "vertical sync and vertical back porch "
- "must be defined\n");
+ vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
return -EINVAL;
}
@@ -1181,8 +1177,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
std_info->l11 = std_info->vsize -
(bt->il_vfrontporch - 1);
} else {
- vpif_dbg(2, debug, "Required timing values for "
- "interlaced BT format missing\n");
+ vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
return -EINVAL;
}
} else {
@@ -1218,7 +1213,7 @@ static int vpif_g_dv_timings(struct file *file, void *priv,
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1464,10 +1459,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
- vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
- GFP_KERNEL);
- if (vpif_obj.sd == NULL) {
- vpif_err("unable to allocate memory for subdevice pointers\n");
+ vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+ if (!vpif_obj.sd) {
err = -ENOMEM;
goto vpif_unregister;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 75b27233ec2f..e5f18448dbf7 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -271,10 +271,10 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
- if (common->cur_frm != NULL)
+ if (common->cur_frm)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
- if (common->next_frm != NULL)
+ if (common->next_frm)
vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -301,7 +301,7 @@ static struct vb2_ops video_qops = {
static void process_progressive_mode(struct common_obj *common)
{
- unsigned long addr = 0;
+ unsigned long addr;
spin_lock(&common->irqlock);
/* Get the next buffer from buffer queue */
@@ -363,7 +363,7 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
struct channel_obj *ch;
struct common_obj *common;
int fid = -1, i;
- int channel_id = 0;
+ int channel_id;
channel_id = *(int *)(dev_id);
if (!vpif_intr_status(channel_id + 2))
@@ -686,7 +686,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
struct v4l2_output output;
int ret;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -732,7 +732,7 @@ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
struct vpif_display_chan_config *chan_cfg;
struct v4l2_output output;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -783,11 +783,11 @@ vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
vpif_dbg(2, debug, "vpif_output_to_subdev\n");
- if (chan_cfg->outputs == NULL)
+ if (!chan_cfg->outputs)
return -1;
subdev_name = chan_cfg->outputs[index].subdev_name;
- if (subdev_name == NULL)
+ if (!subdev_name)
return -1;
/* loop through the sub device list to get the sub device info */
@@ -833,7 +833,7 @@ static int vpif_set_output(struct vpif_display_config *vpif_cfg,
}
ch->output_idx = index;
ch->sd = sd;
- if (chan_cfg->outputs != NULL)
+ if (chan_cfg->outputs)
/* update tvnorms from the sub device output info */
ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std;
return 0;
@@ -885,7 +885,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
struct v4l2_output output;
int ret;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -922,7 +922,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
struct v4l2_output output;
int ret;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -954,11 +954,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
timings->bt.vfrontporch &&
(timings->bt.vbackporch ||
timings->bt.vsync))) {
- vpif_dbg(2, debug, "Timings for width, height, "
- "horizontal back porch, horizontal sync, "
- "horizontal front porch, vertical back porch, "
- "vertical sync and vertical back porch "
- "must be defined\n");
+ vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
return -EINVAL;
}
@@ -983,8 +979,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
std_info->l11 = std_info->vsize -
(bt->il_vfrontporch - 1);
} else {
- vpif_dbg(2, debug, "Required timing values for "
- "interlaced BT format missing\n");
+ vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
return -EINVAL;
}
} else {
@@ -1021,7 +1016,7 @@ static int vpif_g_dv_timings(struct file *file, void *priv,
struct video_obj *vid_ch = &ch->video;
struct v4l2_output output;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
goto error;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1279,10 +1274,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
subdevdata = vpif_obj.config->subdevinfo;
- vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
- GFP_KERNEL);
- if (vpif_obj.sd == NULL) {
- vpif_err("unable to allocate memory for subdevice pointers\n");
+ vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+ if (!vpif_obj.sd) {
err = -ENOMEM;
goto vpif_unregister;
}
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index fce86f17dffc..373b796132f2 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -261,8 +261,8 @@ static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en)
shift = 6;
break;
default:
- printk(KERN_ERR "dm355_enable_clock:"
- " Invalid selector: %d\n", clock_sel);
+ printk(KERN_ERR "dm355_enable_clock: Invalid selector: %d\n",
+ clock_sel);
return -EINVAL;
}
@@ -421,8 +421,7 @@ static int vpss_probe(struct platform_device *pdev)
else if (!strcmp(platform_name, "dm644x_vpss"))
oper_cfg.platform = DM644X;
else {
- dev_err(&pdev->dev, "vpss driver not supported on"
- " this platform\n");
+ dev_err(&pdev->dev, "vpss driver not supported on this platform\n");
return -ENODEV;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 787bd16c19e5..cbf75b6194b4 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -24,12 +24,11 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <media/v4l2-ioctl.h>
#include "gsc-core.h"
-#define GSC_CLOCK_GATE_NAME "gscl"
-
static const struct gsc_fmt gsc_formats[] = {
{
.name = "RGB565",
@@ -39,8 +38,8 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 1,
}, {
- .name = "XRGB-8-8-8-8, 32 bpp",
- .pixelformat = V4L2_PIX_FMT_RGB32,
+ .name = "BGRX-8-8-8-8, 32 bpp",
+ .pixelformat = V4L2_PIX_FMT_BGR32,
.depth = { 32 },
.color = GSC_RGB,
.num_planes = 1,
@@ -441,7 +440,7 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
v4l_bound_align_image(&pix_mp->width, min_w, max_w, mod_x,
&pix_mp->height, min_h, max_h, mod_y, 0);
if (tmp_w != pix_mp->width || tmp_h != pix_mp->height)
- pr_info("Image size has been modified from %dx%d to %dx%d",
+ pr_debug("Image size has been modified from %dx%d to %dx%d\n",
tmp_w, tmp_h, pix_mp->width, pix_mp->height);
pix_mp->num_planes = fmt->num_planes;
@@ -451,12 +450,25 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
else /* SD */
pix_mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
-
for (i = 0; i < pix_mp->num_planes; ++i) {
- int bpl = (pix_mp->width * fmt->depth[i]) >> 3;
- pix_mp->plane_fmt[i].bytesperline = bpl;
- pix_mp->plane_fmt[i].sizeimage = bpl * pix_mp->height;
+ struct v4l2_plane_pix_format *plane_fmt = &pix_mp->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
+
+ if (fmt->num_comp == 1 && /* Packed */
+ (bpl == 0 || (bpl * 8 / fmt->depth[i]) < pix_mp->width))
+ bpl = pix_mp->width * fmt->depth[i] / 8;
+
+ if (fmt->num_comp > 1 && /* Planar */
+ (bpl == 0 || bpl < pix_mp->width))
+ bpl = pix_mp->width;
+
+ if (i != 0 && fmt->num_comp == 3)
+ bpl /= 2;
+ plane_fmt->bytesperline = bpl;
+ plane_fmt->sizeimage = max(pix_mp->width * pix_mp->height *
+ fmt->depth[i] / 8,
+ plane_fmt->sizeimage);
pr_debug("[%d]: bpl: %d, sizeimage: %d",
i, bpl, pix_mp->plane_fmt[i].sizeimage);
}
@@ -964,7 +976,19 @@ static struct gsc_driverdata gsc_v_100_drvdata = {
[3] = &gsc_v_100_variant,
},
.num_entities = 4,
- .lclk_frequency = 266000000UL,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_5433_drvdata = {
+ .variant = {
+ [0] = &gsc_v_100_variant,
+ [1] = &gsc_v_100_variant,
+ [2] = &gsc_v_100_variant,
+ },
+ .num_entities = 3,
+ .clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
+ .num_clocks = 4,
};
static const struct of_device_id exynos_gsc_match[] = {
@@ -972,98 +996,22 @@ static const struct of_device_id exynos_gsc_match[] = {
.compatible = "samsung,exynos5-gsc",
.data = &gsc_v_100_drvdata,
},
+ {
+ .compatible = "samsung,exynos5433-gsc",
+ .data = &gsc_5433_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, exynos_gsc_match);
-static void *gsc_get_drv_data(struct platform_device *pdev)
-{
- struct gsc_driverdata *driver_data = NULL;
- const struct of_device_id *match;
-
- match = of_match_node(exynos_gsc_match, pdev->dev.of_node);
- if (match)
- driver_data = (struct gsc_driverdata *)match->data;
-
- return driver_data;
-}
-
-static void gsc_clk_put(struct gsc_dev *gsc)
-{
- if (!IS_ERR(gsc->clock))
- clk_unprepare(gsc->clock);
-}
-
-static int gsc_clk_get(struct gsc_dev *gsc)
-{
- int ret;
-
- dev_dbg(&gsc->pdev->dev, "gsc_clk_get Called\n");
-
- gsc->clock = devm_clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
- if (IS_ERR(gsc->clock)) {
- dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
- GSC_CLOCK_GATE_NAME);
- return PTR_ERR(gsc->clock);
- }
-
- ret = clk_prepare(gsc->clock);
- if (ret < 0) {
- dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
- GSC_CLOCK_GATE_NAME);
- gsc->clock = ERR_PTR(-EINVAL);
- return ret;
- }
-
- return 0;
-}
-
-static int gsc_m2m_suspend(struct gsc_dev *gsc)
-{
- unsigned long flags;
- int timeout;
-
- spin_lock_irqsave(&gsc->slock, flags);
- if (!gsc_m2m_pending(gsc)) {
- spin_unlock_irqrestore(&gsc->slock, flags);
- return 0;
- }
- clear_bit(ST_M2M_SUSPENDED, &gsc->state);
- set_bit(ST_M2M_SUSPENDING, &gsc->state);
- spin_unlock_irqrestore(&gsc->slock, flags);
-
- timeout = wait_event_timeout(gsc->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &gsc->state),
- GSC_SHUTDOWN_TIMEOUT);
-
- clear_bit(ST_M2M_SUSPENDING, &gsc->state);
- return timeout == 0 ? -EAGAIN : 0;
-}
-
-static int gsc_m2m_resume(struct gsc_dev *gsc)
-{
- struct gsc_ctx *ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&gsc->slock, flags);
- /* Clear for full H/W setup in first run after resume */
- ctx = gsc->m2m.ctx;
- gsc->m2m.ctx = NULL;
- spin_unlock_irqrestore(&gsc->slock, flags);
-
- if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
- gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
-
- return 0;
-}
-
static int gsc_probe(struct platform_device *pdev)
{
struct gsc_dev *gsc;
struct resource *res;
- struct gsc_driverdata *drv_data = gsc_get_drv_data(pdev);
struct device *dev = &pdev->dev;
+ const struct gsc_driverdata *drv_data = of_device_get_match_data(dev);
int ret;
+ int i;
gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
if (!gsc)
@@ -1079,13 +1027,13 @@ static int gsc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ gsc->num_clocks = drv_data->num_clocks;
gsc->variant = drv_data->variant[gsc->id];
gsc->pdev = pdev;
init_waitqueue_head(&gsc->irq_queue);
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
- gsc->clock = ERR_PTR(-EINVAL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gsc->regs = devm_ioremap_resource(dev, res);
@@ -1098,9 +1046,25 @@ static int gsc_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = gsc_clk_get(gsc);
- if (ret)
- return ret;
+ for (i = 0; i < gsc->num_clocks; i++) {
+ gsc->clock[i] = devm_clk_get(dev, drv_data->clk_names[i]);
+ if (IS_ERR(gsc->clock[i])) {
+ dev_err(dev, "failed to get clock: %s\n",
+ drv_data->clk_names[i]);
+ return PTR_ERR(gsc->clock[i]);
+ }
+ }
+
+ for (i = 0; i < gsc->num_clocks; i++) {
+ ret = clk_prepare_enable(gsc->clock[i]);
+ if (ret) {
+ dev_err(dev, "clock prepare failed for clock: %s\n",
+ drv_data->clk_names[i]);
+ while (--i >= 0)
+ clk_disable_unprepare(gsc->clock[i]);
+ return ret;
+ }
+ }
ret = devm_request_irq(dev, res->start, gsc_irq_handler,
0, pdev->name, gsc);
@@ -1118,114 +1082,131 @@ static int gsc_probe(struct platform_device *pdev)
goto err_v4l2;
platform_set_drvdata(pdev, gsc);
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- goto err_m2m;
+
+ gsc_hw_set_sw_reset(gsc);
+ gsc_wait_reset(gsc);
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
- pm_runtime_put(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
return 0;
-err_m2m:
- gsc_unregister_m2m_device(gsc);
err_v4l2:
v4l2_device_unregister(&gsc->v4l2_dev);
err_clk:
- gsc_clk_put(gsc);
+ for (i = gsc->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(gsc->clock[i]);
return ret;
}
static int gsc_remove(struct platform_device *pdev)
{
struct gsc_dev *gsc = platform_get_drvdata(pdev);
+ int i;
+
+ pm_runtime_get_sync(&pdev->dev);
gsc_unregister_m2m_device(gsc);
v4l2_device_unregister(&gsc->v4l2_dev);
vb2_dma_contig_clear_max_seg_size(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- gsc_clk_put(gsc);
+ for (i = 0; i < gsc->num_clocks; i++)
+ clk_disable_unprepare(gsc->clock[i]);
+
+ pm_runtime_put_noidle(&pdev->dev);
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
}
-static int gsc_runtime_resume(struct device *dev)
+#ifdef CONFIG_PM
+static int gsc_m2m_suspend(struct gsc_dev *gsc)
{
- struct gsc_dev *gsc = dev_get_drvdata(dev);
- int ret = 0;
-
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+ unsigned long flags;
+ int timeout;
- ret = clk_enable(gsc->clock);
- if (ret)
- return ret;
+ spin_lock_irqsave(&gsc->slock, flags);
+ if (!gsc_m2m_pending(gsc)) {
+ spin_unlock_irqrestore(&gsc->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &gsc->state);
+ set_bit(ST_M2M_SUSPENDING, &gsc->state);
+ spin_unlock_irqrestore(&gsc->slock, flags);
- gsc_hw_set_sw_reset(gsc);
- gsc_wait_reset(gsc);
+ timeout = wait_event_timeout(gsc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &gsc->state),
+ GSC_SHUTDOWN_TIMEOUT);
- return gsc_m2m_resume(gsc);
+ clear_bit(ST_M2M_SUSPENDING, &gsc->state);
+ return timeout == 0 ? -EAGAIN : 0;
}
-static int gsc_runtime_suspend(struct device *dev)
+static void gsc_m2m_resume(struct gsc_dev *gsc)
{
- struct gsc_dev *gsc = dev_get_drvdata(dev);
- int ret = 0;
+ struct gsc_ctx *ctx;
+ unsigned long flags;
- ret = gsc_m2m_suspend(gsc);
- if (!ret)
- clk_disable(gsc->clock);
+ spin_lock_irqsave(&gsc->slock, flags);
+ /* Clear for full H/W setup in first run after resume */
+ ctx = gsc->m2m.ctx;
+ gsc->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&gsc->slock, flags);
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
- return ret;
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
-static int gsc_resume(struct device *dev)
+static int gsc_runtime_resume(struct device *dev)
{
struct gsc_dev *gsc = dev_get_drvdata(dev);
- unsigned long flags;
+ int ret = 0;
+ int i;
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+ pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
- /* Do not resume if the device was idle before system suspend */
- spin_lock_irqsave(&gsc->slock, flags);
- if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
- !gsc_m2m_opened(gsc)) {
- spin_unlock_irqrestore(&gsc->slock, flags);
- return 0;
+ for (i = 0; i < gsc->num_clocks; i++) {
+ ret = clk_prepare_enable(gsc->clock[i]);
+ if (ret) {
+ while (--i >= 0)
+ clk_disable_unprepare(gsc->clock[i]);
+ return ret;
+ }
}
- spin_unlock_irqrestore(&gsc->slock, flags);
- if (!pm_runtime_suspended(dev))
- return gsc_runtime_resume(dev);
+ gsc_hw_set_sw_reset(gsc);
+ gsc_wait_reset(gsc);
+ gsc_m2m_resume(gsc);
return 0;
}
-static int gsc_suspend(struct device *dev)
+static int gsc_runtime_suspend(struct device *dev)
{
struct gsc_dev *gsc = dev_get_drvdata(dev);
+ int ret = 0;
+ int i;
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
-
- if (test_and_set_bit(ST_SUSPEND, &gsc->state))
- return 0;
+ ret = gsc_m2m_suspend(gsc);
+ if (ret)
+ return ret;
- if (!pm_runtime_suspended(dev))
- return gsc_runtime_suspend(dev);
+ for (i = gsc->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(gsc->clock[i]);
- return 0;
+ pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
+ return ret;
}
+#endif
static const struct dev_pm_ops gsc_pm_ops = {
- .suspend = gsc_suspend,
- .resume = gsc_resume,
- .runtime_suspend = gsc_runtime_suspend,
- .runtime_resume = gsc_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
static struct platform_driver gsc_driver = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 7ad7b9dc2243..696217e9af66 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -33,6 +33,7 @@
#define GSC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
#define GSC_MAX_DEVS 4
+#define GSC_MAX_CLOCKS 4
#define GSC_M2M_BUF_NUM 0
#define GSC_MAX_CTRL_NUM 10
#define GSC_SC_ALIGN_4 4
@@ -48,9 +49,6 @@
#define GSC_CTX_ABORT (1 << 7)
enum gsc_dev_flags {
- /* for global */
- ST_SUSPEND,
-
/* for m2m node */
ST_M2M_OPEN,
ST_M2M_RUN,
@@ -306,12 +304,12 @@ struct gsc_variant {
* struct gsc_driverdata - per device type driver data for init time.
*
* @variant: the variant information for this driver.
- * @lclk_frequency: G-Scaler clock frequency
* @num_entities: the number of g-scalers
*/
struct gsc_driverdata {
struct gsc_variant *variant[GSC_MAX_DEVS];
- unsigned long lclk_frequency;
+ const char *clk_names[GSC_MAX_CLOCKS];
+ int num_clocks;
int num_entities;
};
@@ -335,7 +333,8 @@ struct gsc_dev {
struct platform_device *pdev;
struct gsc_variant *variant;
u16 id;
- struct clk *clock;
+ int num_clocks;
+ struct clk *clock[GSC_MAX_CLOCKS];
void __iomem *regs;
wait_queue_head_t irq_queue;
struct gsc_m2m_device m2m;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 9f03b791b711..f49f24b4462a 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -66,12 +66,29 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
return ret > 0 ? 0 : ret;
}
+static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+}
+
static void gsc_m2m_stop_streaming(struct vb2_queue *q)
{
struct gsc_ctx *ctx = q->drv_priv;
__gsc_m2m_job_abort(ctx);
+ __gsc_m2m_cleanup_queue(ctx);
+
pm_runtime_put(&ctx->gsc_dev->pdev->dev);
}
@@ -365,14 +382,8 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
- if (reqbufs->count > max_cnt) {
+ if (reqbufs->count > max_cnt)
return -EINVAL;
- } else if (reqbufs->count == 0) {
- if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx);
- else
- gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
- }
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
@@ -766,30 +777,29 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
if (IS_ERR(gsc->m2m.m2m_dev)) {
dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
- ret = PTR_ERR(gsc->m2m.m2m_dev);
- goto err_m2m_r1;
+ return PTR_ERR(gsc->m2m.m2m_dev);
}
ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
dev_err(&pdev->dev,
"%s(): failed to register video device\n", __func__);
- goto err_m2m_r2;
+ goto err_m2m_release;
}
pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
return 0;
-err_m2m_r2:
+err_m2m_release:
v4l2_m2m_release(gsc->m2m.m2m_dev);
-err_m2m_r1:
- video_device_release(gsc->m2m.vfd);
return ret;
}
void gsc_unregister_m2m_device(struct gsc_dev *gsc)
{
- if (gsc)
+ if (gsc) {
v4l2_m2m_release(gsc->m2m.m2m_dev);
+ video_unregister_device(&gsc->vdev);
+ }
}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 8f89ca21b631..099c735a39b7 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -736,6 +736,7 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
for (i = 0; i < pix->num_planes; ++i) {
struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
u32 bpl = plane_fmt->bytesperline;
+ u32 sizeimage;
if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
bpl = pix->width; /* Planar */
@@ -755,8 +756,17 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
bytesperline /= 2;
plane_fmt->bytesperline = bytesperline;
- plane_fmt->sizeimage = max((pix->width * pix->height *
- fmt->depth[i]) / 8, plane_fmt->sizeimage);
+ sizeimage = pix->width * pix->height * fmt->depth[i] / 8;
+
+ /* Ensure full last row for tiled formats */
+ if (tiled_fmt(fmt)) {
+ /* 64 * 32 * plane_fmt->bytesperline / 64 */
+ u32 row_size = plane_fmt->bytesperline * 32;
+
+ sizeimage = roundup(sizeimage, row_size);
+ }
+
+ plane_fmt->sizeimage = max(sizeimage, plane_fmt->sizeimage);
}
}
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 1a1154a9dfa4..e3a8709138fa 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -938,8 +938,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
csis = fmd->csis[pdata->mux_id].sd;
if (WARN(csis == NULL,
- "MIPI-CSI interface specified "
- "but s5p-csis module is not loaded!\n"))
+ "MIPI-CSI interface specified but s5p-csis module is not loaded!\n"))
return -EINVAL;
pad = sensor->entity.num_pads - 1;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index af59bf4dca2d..a8bda6679422 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -49,24 +49,17 @@
static bool alloc_bufs_at_read;
module_param(alloc_bufs_at_read, bool, 0444);
MODULE_PARM_DESC(alloc_bufs_at_read,
- "Non-zero value causes DMA buffers to be allocated when the "
- "video capture device is read, rather than at module load "
- "time. This saves memory, but decreases the chances of "
- "successfully getting those buffers. This parameter is "
- "only used in the vmalloc buffer mode");
+ "Non-zero value causes DMA buffers to be allocated when the video capture device is read, rather than at module load time. This saves memory, but decreases the chances of successfully getting those buffers. This parameter is only used in the vmalloc buffer mode");
static int n_dma_bufs = 3;
module_param(n_dma_bufs, uint, 0644);
MODULE_PARM_DESC(n_dma_bufs,
- "The number of DMA buffers to allocate. Can be either two "
- "(saves memory, makes timing tighter) or three.");
+ "The number of DMA buffers to allocate. Can be either two (saves memory, makes timing tighter) or three.");
static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
module_param(dma_buf_size, uint, 0444);
MODULE_PARM_DESC(dma_buf_size,
- "The size of the allocated DMA buffers. If actual operating "
- "parameters require larger buffers, an attempt to reallocate "
- "will be made.");
+ "The size of the allocated DMA buffers. If actual operating parameters require larger buffers, an attempt to reallocate will be made.");
#else /* MCAM_MODE_VMALLOC */
static const bool alloc_bufs_at_read;
static const int n_dma_bufs = 3; /* Used by S/G_PARM */
@@ -75,15 +68,12 @@ static const int n_dma_bufs = 3; /* Used by S/G_PARM */
static bool flip;
module_param(flip, bool, 0444);
MODULE_PARM_DESC(flip,
- "If set, the sensor will be instructed to flip the image "
- "vertically.");
+ "If set, the sensor will be instructed to flip the image vertically.");
static int buffer_mode = -1;
module_param(buffer_mode, int, 0444);
MODULE_PARM_DESC(buffer_mode,
- "Set the buffer mode to be used; default is to go with what "
- "the platform driver asks for. Set to 0 for vmalloc, 1 for "
- "DMA contiguous.");
+ "Set the buffer mode to be used; default is to go with what the platform driver asks for. Set to 0 for vmalloc, 1 for DMA contiguous.");
/*
* Status flags. Always manipulated with bit operations.
@@ -1759,8 +1749,7 @@ int mccic_register(struct mcam_camera *cam)
cam->buffer_mode = buffer_mode;
if (cam->buffer_mode == B_DMA_sg &&
cam->chip_id == MCAM_CAFE) {
- printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
- "attempting vmalloc mode instead\n");
+ printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, attempting vmalloc mode instead\n");
cam->buffer_mode = B_vmalloc;
}
if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
@@ -1828,8 +1817,7 @@ int mccic_register(struct mcam_camera *cam)
*/
if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
if (mcam_alloc_dma_bufs(cam, 1))
- cam_warn(cam, "Unable to alloc DMA buffers at load"
- " will try again later.");
+ cam_warn(cam, "Unable to alloc DMA buffers at load will try again later.");
}
mutex_unlock(&cam->s_mutex);
diff --git a/drivers/media/platform/mtk-mdp/Makefile b/drivers/media/platform/mtk-mdp/Makefile
new file mode 100644
index 000000000000..f8025699af99
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/Makefile
@@ -0,0 +1,9 @@
+mtk-mdp-y += mtk_mdp_core.o
+mtk-mdp-y += mtk_mdp_comp.o
+mtk-mdp-y += mtk_mdp_m2m.o
+mtk-mdp-y += mtk_mdp_regs.o
+mtk-mdp-y += mtk_mdp_vpu.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp.o
+
+ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
new file mode 100644
index 000000000000..aa8f9fd1f1a2
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_comp.h"
+
+
+static const char * const mtk_mdp_comp_stem[MTK_MDP_COMP_TYPE_MAX] = {
+ "mdp_rdma",
+ "mdp_rsz",
+ "mdp_wdma",
+ "mdp_wrot",
+};
+
+struct mtk_mdp_comp_match {
+ enum mtk_mdp_comp_type type;
+ int alias_id;
+};
+
+static const struct mtk_mdp_comp_match mtk_mdp_matches[MTK_MDP_COMP_ID_MAX] = {
+ { MTK_MDP_RDMA, 0 },
+ { MTK_MDP_RDMA, 1 },
+ { MTK_MDP_RSZ, 0 },
+ { MTK_MDP_RSZ, 1 },
+ { MTK_MDP_RSZ, 2 },
+ { MTK_MDP_WDMA, 0 },
+ { MTK_MDP_WROT, 0 },
+ { MTK_MDP_WROT, 1 },
+};
+
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+ enum mtk_mdp_comp_type comp_type)
+{
+ int id = of_alias_get_id(node, mtk_mdp_comp_stem[comp_type]);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_matches); i++) {
+ if (comp_type == mtk_mdp_matches[i].type &&
+ id == mtk_mdp_matches[i].alias_id)
+ return i;
+ }
+
+ dev_err(dev, "Failed to get id. type: %d, id: %d\n", comp_type, id);
+
+ return -EINVAL;
+}
+
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ int i, err;
+
+ if (comp->larb_dev) {
+ err = mtk_smi_larb_get(comp->larb_dev);
+ if (err)
+ dev_err(dev,
+ "failed to get larb, err %d. type:%d id:%d\n",
+ err, comp->type, comp->id);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ if (!comp->clk[i])
+ continue;
+ err = clk_prepare_enable(comp->clk[i]);
+ if (err)
+ dev_err(dev,
+ "failed to enable clock, err %d. type:%d id:%d i:%d\n",
+ err, comp->type, comp->id, i);
+ }
+}
+
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ if (!comp->clk[i])
+ continue;
+ clk_disable_unprepare(comp->clk[i]);
+ }
+
+ if (comp->larb_dev)
+ mtk_smi_larb_put(comp->larb_dev);
+}
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+ struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id)
+{
+ struct device_node *larb_node;
+ struct platform_device *larb_pdev;
+ int i;
+
+ if (comp_id < 0 || comp_id >= MTK_MDP_COMP_ID_MAX) {
+ dev_err(dev, "Invalid comp_id %d\n", comp_id);
+ return -EINVAL;
+ }
+
+ comp->dev_node = of_node_get(node);
+ comp->id = comp_id;
+ comp->type = mtk_mdp_matches[comp_id].type;
+ comp->regs = of_iomap(node, 0);
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ comp->clk[i] = of_clk_get(node, i);
+
+ /* Only RDMA needs two clocks */
+ if (comp->type != MTK_MDP_RDMA)
+ break;
+ }
+
+ /* Only DMA capable components need the LARB property */
+ comp->larb_dev = NULL;
+ if (comp->type != MTK_MDP_RDMA &&
+ comp->type != MTK_MDP_WDMA &&
+ comp->type != MTK_MDP_WROT)
+ return 0;
+
+ larb_node = of_parse_phandle(node, "mediatek,larb", 0);
+ if (!larb_node) {
+ dev_err(dev,
+ "Missing mediadek,larb phandle in %s node\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ larb_pdev = of_find_device_by_node(larb_node);
+ if (!larb_pdev) {
+ dev_warn(dev, "Waiting for larb device %s\n",
+ larb_node->full_name);
+ of_node_put(larb_node);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(larb_node);
+
+ comp->larb_dev = &larb_pdev->dev;
+
+ return 0;
+}
+
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ of_node_put(comp->dev_node);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
new file mode 100644
index 000000000000..63b3983ef1a4
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_COMP_H__
+#define __MTK_MDP_COMP_H__
+
+/**
+ * enum mtk_mdp_comp_type - the MDP component
+ * @MTK_MDP_RDMA: Read DMA
+ * @MTK_MDP_RSZ: Riszer
+ * @MTK_MDP_WDMA: Write DMA
+ * @MTK_MDP_WROT: Write DMA with rotation
+ */
+enum mtk_mdp_comp_type {
+ MTK_MDP_RDMA,
+ MTK_MDP_RSZ,
+ MTK_MDP_WDMA,
+ MTK_MDP_WROT,
+ MTK_MDP_COMP_TYPE_MAX,
+};
+
+enum mtk_mdp_comp_id {
+ MTK_MDP_COMP_RDMA0,
+ MTK_MDP_COMP_RDMA1,
+ MTK_MDP_COMP_RSZ0,
+ MTK_MDP_COMP_RSZ1,
+ MTK_MDP_COMP_RSZ2,
+ MTK_MDP_COMP_WDMA,
+ MTK_MDP_COMP_WROT0,
+ MTK_MDP_COMP_WROT1,
+ MTK_MDP_COMP_ID_MAX,
+};
+
+/**
+ * struct mtk_mdp_comp - the MDP's function component data
+ * @dev_node: component device node
+ * @clk: clocks required for component
+ * @regs: Mapped address of component registers.
+ * @larb_dev: SMI device required for component
+ * @type: component type
+ * @id: component ID
+ */
+struct mtk_mdp_comp {
+ struct device_node *dev_node;
+ struct clk *clk[2];
+ void __iomem *regs;
+ struct device *larb_dev;
+ enum mtk_mdp_comp_type type;
+ enum mtk_mdp_comp_id id;
+};
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+ struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id);
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp);
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+ enum mtk_mdp_comp_type comp_type);
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp);
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp);
+
+
+#endif /* __MTK_MDP_COMP_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
new file mode 100644
index 000000000000..9e4eb7dcc424
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_vpu.h"
+
+/* MDP debug log level (0-3). 3 shows all the logs. */
+int mtk_mdp_dbg_level;
+EXPORT_SYMBOL(mtk_mdp_dbg_level);
+
+module_param(mtk_mdp_dbg_level, int, 0644);
+
+static const struct of_device_id mtk_mdp_comp_dt_ids[] = {
+ {
+ .compatible = "mediatek,mt8173-mdp-rdma",
+ .data = (void *)MTK_MDP_RDMA
+ }, {
+ .compatible = "mediatek,mt8173-mdp-rsz",
+ .data = (void *)MTK_MDP_RSZ
+ }, {
+ .compatible = "mediatek,mt8173-mdp-wdma",
+ .data = (void *)MTK_MDP_WDMA
+ }, {
+ .compatible = "mediatek,mt8173-mdp-wrot",
+ .data = (void *)MTK_MDP_WROT
+ },
+ { },
+};
+
+static const struct of_device_id mtk_mdp_of_ids[] = {
+ { .compatible = "mediatek,mt8173-mdp", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mtk_mdp_of_ids);
+
+static void mtk_mdp_clock_on(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_clock_on(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_clock_off(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_clock_off(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_wdt_worker(struct work_struct *work)
+{
+ struct mtk_mdp_dev *mdp =
+ container_of(work, struct mtk_mdp_dev, wdt_work);
+ struct mtk_mdp_ctx *ctx;
+
+ mtk_mdp_err("Watchdog timeout");
+
+ list_for_each_entry(ctx, &mdp->ctx_list, list) {
+ mtk_mdp_dbg(0, "[%d] Change as state error", ctx->id);
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_CTX_ERROR);
+ }
+}
+
+static void mtk_mdp_reset_handler(void *priv)
+{
+ struct mtk_mdp_dev *mdp = priv;
+
+ queue_work(mdp->wdt_wq, &mdp->wdt_work);
+}
+
+static int mtk_mdp_probe(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node;
+ int i, ret = 0;
+
+ mdp = devm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return -ENOMEM;
+
+ mdp->id = pdev->id;
+ mdp->pdev = pdev;
+ INIT_LIST_HEAD(&mdp->ctx_list);
+
+ mutex_init(&mdp->lock);
+ mutex_init(&mdp->vpulock);
+
+ /* Iterate over sibling MDP function blocks */
+ for_each_child_of_node(dev->of_node, node) {
+ const struct of_device_id *of_id;
+ enum mtk_mdp_comp_type comp_type;
+ int comp_id;
+ struct mtk_mdp_comp *comp;
+
+ of_id = of_match_node(mtk_mdp_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+
+ if (!of_device_is_available(node)) {
+ dev_err(dev, "Skipping disabled component %s\n",
+ node->full_name);
+ continue;
+ }
+
+ comp_type = (enum mtk_mdp_comp_type)of_id->data;
+ comp_id = mtk_mdp_comp_get_id(dev, node, comp_type);
+ if (comp_id < 0) {
+ dev_warn(dev, "Skipping unknown component %s\n",
+ node->full_name);
+ continue;
+ }
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ ret = -ENOMEM;
+ goto err_comp;
+ }
+ mdp->comp[comp_id] = comp;
+
+ ret = mtk_mdp_comp_init(dev, node, comp, comp_id);
+ if (ret)
+ goto err_comp;
+ }
+
+ mdp->job_wq = create_singlethread_workqueue(MTK_MDP_MODULE_NAME);
+ if (!mdp->job_wq) {
+ dev_err(&pdev->dev, "unable to alloc job workqueue\n");
+ ret = -ENOMEM;
+ goto err_alloc_job_wq;
+ }
+
+ mdp->wdt_wq = create_singlethread_workqueue("mdp_wdt_wq");
+ if (!mdp->wdt_wq) {
+ dev_err(&pdev->dev, "unable to alloc wdt workqueue\n");
+ ret = -ENOMEM;
+ goto err_alloc_wdt_wq;
+ }
+ INIT_WORK(&mdp->wdt_work, mtk_mdp_wdt_worker);
+
+ ret = v4l2_device_register(dev, &mdp->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ ret = -EINVAL;
+ goto err_dev_register;
+ }
+
+ ret = mtk_mdp_register_m2m_device(mdp);
+ if (ret) {
+ v4l2_err(&mdp->v4l2_dev, "Failed to init mem2mem device\n");
+ goto err_m2m_register;
+ }
+
+ mdp->vpu_dev = vpu_get_plat_device(pdev);
+ vpu_wdt_reg_handler(mdp->vpu_dev, mtk_mdp_reset_handler, mdp,
+ VPU_RST_MDP);
+
+ platform_set_drvdata(pdev, mdp);
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_enable(dev);
+ dev_dbg(dev, "mdp-%d registered successfully\n", mdp->id);
+
+ return 0;
+
+err_m2m_register:
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+err_dev_register:
+ destroy_workqueue(mdp->wdt_wq);
+
+err_alloc_wdt_wq:
+ destroy_workqueue(mdp->job_wq);
+
+err_alloc_job_wq:
+
+err_comp:
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_deinit(dev, mdp->comp[i]);
+
+ dev_dbg(dev, "err %d\n", ret);
+ return ret;
+}
+
+static int mtk_mdp_remove(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+ int i;
+
+ pm_runtime_disable(&pdev->dev);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ mtk_mdp_unregister_m2m_device(mdp);
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+ flush_workqueue(mdp->job_wq);
+ destroy_workqueue(mdp->job_wq);
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_deinit(&pdev->dev, mdp->comp[i]);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_suspend(struct device *dev)
+{
+ struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+ mtk_mdp_clock_off(mdp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_resume(struct device *dev)
+{
+ struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+ mtk_mdp_clock_on(mdp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mtk_mdp_pm_suspend(dev);
+}
+
+static int __maybe_unused mtk_mdp_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mtk_mdp_pm_resume(dev);
+}
+
+static const struct dev_pm_ops mtk_mdp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_mdp_suspend, mtk_mdp_resume)
+ SET_RUNTIME_PM_OPS(mtk_mdp_pm_suspend, mtk_mdp_pm_resume, NULL)
+};
+
+static struct platform_driver mtk_mdp_driver = {
+ .probe = mtk_mdp_probe,
+ .remove = mtk_mdp_remove,
+ .driver = {
+ .name = MTK_MDP_MODULE_NAME,
+ .pm = &mtk_mdp_pm_ops,
+ .of_match_table = mtk_mdp_of_ids,
+ }
+};
+
+module_platform_driver(mtk_mdp_driver);
+
+MODULE_AUTHOR("Houlong Wei <houlong.wei@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek image processor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
new file mode 100644
index 000000000000..ad1cff306efd
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_CORE_H__
+#define __MTK_MDP_CORE_H__
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_mdp_vpu.h"
+#include "mtk_mdp_comp.h"
+
+
+#define MTK_MDP_MODULE_NAME "mtk-mdp"
+
+#define MTK_MDP_SHUTDOWN_TIMEOUT ((100*HZ)/1000) /* 100ms */
+#define MTK_MDP_MAX_CTRL_NUM 10
+
+#define MTK_MDP_FMT_FLAG_OUTPUT BIT(0)
+#define MTK_MDP_FMT_FLAG_CAPTURE BIT(1)
+
+#define MTK_MDP_VPU_INIT BIT(0)
+#define MTK_MDP_SRC_FMT BIT(1)
+#define MTK_MDP_DST_FMT BIT(2)
+#define MTK_MDP_CTX_ERROR BIT(5)
+
+/**
+ * struct mtk_mdp_pix_align - alignement of image
+ * @org_w: source alignment of width
+ * @org_h: source alignment of height
+ * @target_w: dst alignment of width
+ * @target_h: dst alignment of height
+ */
+struct mtk_mdp_pix_align {
+ u16 org_w;
+ u16 org_h;
+ u16 target_w;
+ u16 target_h;
+};
+
+/**
+ * struct mtk_mdp_fmt - the driver's internal color format data
+ * @pixelformat: the fourcc code for this format, 0 if not applicable
+ * @num_planes: number of physically non-contiguous data planes
+ * @num_comp: number of logical data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @row_depth: per plane driver's private 'number of bits per pixel per row'
+ * @flags: flags indicating which operation mode format applies to
+ MTK_MDP_FMT_FLAG_OUTPUT is used in OUTPUT stream
+ MTK_MDP_FMT_FLAG_CAPTURE is used in CAPTURE stream
+ * @align: pointer to a pixel alignment struct, NULL if using default value
+ */
+struct mtk_mdp_fmt {
+ u32 pixelformat;
+ u16 num_planes;
+ u16 num_comp;
+ u8 depth[VIDEO_MAX_PLANES];
+ u8 row_depth[VIDEO_MAX_PLANES];
+ u32 flags;
+ struct mtk_mdp_pix_align *align;
+};
+
+/**
+ * struct mtk_mdp_addr - the image processor physical address set
+ * @addr: address of planes
+ */
+struct mtk_mdp_addr {
+ dma_addr_t addr[MTK_MDP_MAX_NUM_PLANE];
+};
+
+/* struct mtk_mdp_ctrls - the image processor control set
+ * @rotate: rotation degree
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @global_alpha: the alpha value of current frame
+ */
+struct mtk_mdp_ctrls {
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *global_alpha;
+};
+
+/**
+ * struct mtk_mdp_frame - source/target frame properties
+ * @width: SRC : SRCIMG_WIDTH, DST : OUTPUTDMA_WHOLE_IMG_WIDTH
+ * @height: SRC : SRCIMG_HEIGHT, DST : OUTPUTDMA_WHOLE_IMG_HEIGHT
+ * @crop: cropped(source)/scaled(destination) size
+ * @payload: image size in bytes (w x h x bpp)
+ * @pitch: bytes per line of image in memory
+ * @addr: image frame buffer physical addresses
+ * @fmt: color format pointer
+ * @alpha: frame's alpha value
+ */
+struct mtk_mdp_frame {
+ u32 width;
+ u32 height;
+ struct v4l2_rect crop;
+ unsigned long payload[VIDEO_MAX_PLANES];
+ unsigned int pitch[VIDEO_MAX_PLANES];
+ struct mtk_mdp_addr addr;
+ const struct mtk_mdp_fmt *fmt;
+ u8 alpha;
+};
+
+/**
+ * struct mtk_mdp_variant - image processor variant information
+ * @pix_max: maximum limit of image size
+ * @pix_min: minimun limit of image size
+ * @pix_align: alignement of image
+ * @h_scale_up_max: maximum scale-up in horizontal
+ * @v_scale_up_max: maximum scale-up in vertical
+ * @h_scale_down_max: maximum scale-down in horizontal
+ * @v_scale_down_max: maximum scale-down in vertical
+ */
+struct mtk_mdp_variant {
+ struct mtk_mdp_pix_limit *pix_max;
+ struct mtk_mdp_pix_limit *pix_min;
+ struct mtk_mdp_pix_align *pix_align;
+ u16 h_scale_up_max;
+ u16 v_scale_up_max;
+ u16 h_scale_down_max;
+ u16 v_scale_down_max;
+};
+
+/**
+ * struct mtk_mdp_dev - abstraction for image processor entity
+ * @lock: the mutex protecting this data structure
+ * @vpulock: the mutex protecting the communication with VPU
+ * @pdev: pointer to the image processor platform device
+ * @variant: the IP variant information
+ * @id: image processor device index (0..MTK_MDP_MAX_DEVS)
+ * @comp: MDP function components
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx_list: list of struct mtk_mdp_ctx
+ * @vdev: video device for image processor driver
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @job_wq: processor work queue
+ * @vpu_dev: VPU platform device
+ * @ctx_num: counter of active MTK MDP context
+ * @id_counter: An integer id given to the next opened context
+ * @wdt_wq: work queue for VPU watchdog
+ * @wdt_work: worker for VPU watchdog
+ */
+struct mtk_mdp_dev {
+ struct mutex lock;
+ struct mutex vpulock;
+ struct platform_device *pdev;
+ struct mtk_mdp_variant *variant;
+ u16 id;
+ struct mtk_mdp_comp *comp[MTK_MDP_COMP_ID_MAX];
+ struct v4l2_m2m_dev *m2m_dev;
+ struct list_head ctx_list;
+ struct video_device *vdev;
+ struct v4l2_device v4l2_dev;
+ struct workqueue_struct *job_wq;
+ struct platform_device *vpu_dev;
+ int ctx_num;
+ unsigned long id_counter;
+ struct workqueue_struct *wdt_wq;
+ struct work_struct wdt_work;
+};
+
+/**
+ * mtk_mdp_ctx - the device context data
+ * @list: link to ctx_list of mtk_mdp_dev
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @id: index of the context that this structure describes
+ * @flags: additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ Protected by slock
+ * @rotation: rotates the image by specified angle
+ * @hflip: mirror the picture horizontally
+ * @vflip: mirror the picture vertically
+ * @mdp_dev: the image processor device this context applies to
+ * @m2m_ctx: memory-to-memory device context
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @ctrls image processor control set
+ * @ctrls_rdy: true if the control handler is initialized
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @quant: enum v4l2_quantization, colorspace quantization
+ * @vpu: VPU instance
+ * @slock: the mutex protecting mtp_mdp_ctx.state
+ * @work: worker for image processing
+ */
+struct mtk_mdp_ctx {
+ struct list_head list;
+ struct mtk_mdp_frame s_frame;
+ struct mtk_mdp_frame d_frame;
+ u32 flags;
+ u32 state;
+ int id;
+ int rotation;
+ u32 hflip:1;
+ u32 vflip:1;
+ struct mtk_mdp_dev *mdp_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct mtk_mdp_ctrls ctrls;
+ bool ctrls_rdy;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+
+ struct mtk_mdp_vpu vpu;
+ struct mutex slock;
+ struct work_struct work;
+};
+
+extern int mtk_mdp_dbg_level;
+
+#if defined(DEBUG)
+
+#define mtk_mdp_dbg(level, fmt, args...) \
+ do { \
+ if (mtk_mdp_dbg_level >= level) \
+ pr_info("[MTK_MDP] level=%d %s(),%d: " fmt "\n", \
+ level, __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mtk_mdp_err(fmt, args...) \
+ pr_err("[MTK_MDP][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
+ ##args)
+
+
+#define mtk_mdp_dbg_enter() mtk_mdp_dbg(3, "+")
+#define mtk_mdp_dbg_leave() mtk_mdp_dbg(3, "-")
+
+#else
+
+#define mtk_mdp_dbg(level, fmt, args...) {}
+#define mtk_mdp_err(fmt, args...)
+#define mtk_mdp_dbg_enter()
+#define mtk_mdp_dbg_leave()
+
+#endif
+
+#endif /* __MTK_MDP_CORE_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
new file mode 100644
index 000000000000..78e2cc0dead1
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_IPI_H__
+#define __MTK_MDP_IPI_H__
+
+#define MTK_MDP_MAX_NUM_PLANE 3
+
+enum mdp_ipi_msgid {
+ AP_MDP_INIT = 0xd000,
+ AP_MDP_DEINIT = 0xd001,
+ AP_MDP_PROCESS = 0xd002,
+
+ VPU_MDP_INIT_ACK = 0xe000,
+ VPU_MDP_DEINIT_ACK = 0xe001,
+ VPU_MDP_PROCESS_ACK = 0xe002
+};
+
+#pragma pack(push, 4)
+
+/**
+ * struct mdp_ipi_init - for AP_MDP_INIT
+ * @msg_id : AP_MDP_INIT
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ */
+struct mdp_ipi_init {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+};
+
+/**
+ * struct mdp_ipi_comm - for AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @msg_id : AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ */
+struct mdp_ipi_comm {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct mdp_ipi_comm_ack - for VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @msg_id : VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ * @status : VPU exeuction result
+ */
+struct mdp_ipi_comm_ack {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+ uint32_t vpu_inst_addr;
+ int32_t status;
+};
+
+/**
+ * struct mdp_config - configured for source/destination image
+ * @x : left
+ * @y : top
+ * @w : width
+ * @h : height
+ * @w_stride : bytes in horizontal
+ * @h_stride : bytes in vertical
+ * @crop_x : cropped left
+ * @crop_y : cropped top
+ * @crop_w : cropped width
+ * @crop_h : cropped height
+ * @format : color format
+ */
+struct mdp_config {
+ int32_t x;
+ int32_t y;
+ int32_t w;
+ int32_t h;
+ int32_t w_stride;
+ int32_t h_stride;
+ int32_t crop_x;
+ int32_t crop_y;
+ int32_t crop_w;
+ int32_t crop_h;
+ int32_t format;
+};
+
+struct mdp_buffer {
+ uint64_t addr_mva[MTK_MDP_MAX_NUM_PLANE];
+ int32_t plane_size[MTK_MDP_MAX_NUM_PLANE];
+ int32_t plane_num;
+};
+
+struct mdp_config_misc {
+ int32_t orientation; /* 0, 90, 180, 270 */
+ int32_t hflip; /* 1 will enable the flip */
+ int32_t vflip; /* 1 will enable the flip */
+ int32_t alpha; /* global alpha */
+};
+
+struct mdp_process_vsi {
+ struct mdp_config src_config;
+ struct mdp_buffer src_buffer;
+ struct mdp_config dst_config;
+ struct mdp_buffer dst_buffer;
+ struct mdp_config_misc misc;
+};
+
+#pragma pack(pop)
+
+#endif /* __MTK_MDP_IPI_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
new file mode 100644
index 000000000000..13afe48b9dc5
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -0,0 +1,1286 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_mdp_regs.h"
+#include "mtk_vpu.h"
+
+
+/**
+ * struct mtk_mdp_pix_limit - image pixel size limits
+ * @org_w: source pixel width
+ * @org_h: source pixel height
+ * @target_rot_dis_w: pixel dst scaled width with the rotator is off
+ * @target_rot_dis_h: pixel dst scaled height with the rotator is off
+ * @target_rot_en_w: pixel dst scaled width with the rotator is on
+ * @target_rot_en_h: pixel dst scaled height with the rotator is on
+ */
+struct mtk_mdp_pix_limit {
+ u16 org_w;
+ u16 org_h;
+ u16 target_rot_dis_w;
+ u16 target_rot_dis_h;
+ u16 target_rot_en_w;
+ u16 target_rot_en_h;
+};
+
+static struct mtk_mdp_pix_align mtk_mdp_size_align = {
+ .org_w = 16,
+ .org_h = 16,
+ .target_w = 2,
+ .target_h = 2,
+};
+
+static const struct mtk_mdp_fmt mtk_mdp_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_MT21C,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .align = &mtk_mdp_size_align,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .num_comp = 3,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .num_comp = 3,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_max = {
+ .target_rot_dis_w = 4096,
+ .target_rot_dis_h = 4096,
+ .target_rot_en_w = 4096,
+ .target_rot_en_h = 4096,
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_min = {
+ .org_w = 16,
+ .org_h = 16,
+ .target_rot_dis_w = 16,
+ .target_rot_dis_h = 16,
+ .target_rot_en_w = 16,
+ .target_rot_en_h = 16,
+};
+
+/* align size for normal raster scan pixel format */
+static struct mtk_mdp_pix_align mtk_mdp_rs_align = {
+ .org_w = 2,
+ .org_h = 2,
+ .target_w = 2,
+ .target_h = 2,
+};
+
+static struct mtk_mdp_variant mtk_mdp_default_variant = {
+ .pix_max = &mtk_mdp_size_max,
+ .pix_min = &mtk_mdp_size_min,
+ .pix_align = &mtk_mdp_rs_align,
+ .h_scale_up_max = 32,
+ .v_scale_up_max = 32,
+ .h_scale_down_max = 32,
+ .v_scale_down_max = 128,
+};
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt(u32 pixelformat, u32 type)
+{
+ u32 i, flag;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+ MTK_MDP_FMT_FLAG_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+ if (!(mtk_mdp_formats[i].flags & flag))
+ continue;
+ if (mtk_mdp_formats[i].pixelformat == pixelformat)
+ return &mtk_mdp_formats[i];
+ }
+ return NULL;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt_by_index(u32 index, u32 type)
+{
+ u32 i, flag, num = 0;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+ MTK_MDP_FMT_FLAG_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+ if (!(mtk_mdp_formats[i].flags & flag))
+ continue;
+ if (index == num)
+ return &mtk_mdp_formats[i];
+ num++;
+ }
+ return NULL;
+}
+
+static void mtk_mdp_bound_align_image(u32 *w, unsigned int wmin,
+ unsigned int wmax, unsigned int align_w,
+ u32 *h, unsigned int hmin,
+ unsigned int hmax, unsigned int align_h)
+{
+ int org_w, org_h, step_w, step_h;
+ int walign, halign;
+
+ org_w = *w;
+ org_h = *h;
+ walign = ffs(align_w) - 1;
+ halign = ffs(align_h) - 1;
+ v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+
+ step_w = 1 << walign;
+ step_h = 1 << halign;
+ if (*w < org_w && (*w + step_w) <= wmax)
+ *w += step_w;
+ if (*h < org_h && (*h + step_h) <= hmax)
+ *h += step_h;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_try_fmt_mplane(struct mtk_mdp_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct mtk_mdp_fmt *fmt;
+ u32 max_w, max_h, align_w, align_h;
+ u32 min_w, min_h, org_w, org_h;
+ int i;
+
+ fmt = mtk_mdp_find_fmt(pix_mp->pixelformat, f->type);
+ if (!fmt)
+ fmt = mtk_mdp_find_fmt_by_index(0, f->type);
+ if (!fmt) {
+ dev_dbg(&ctx->mdp_dev->pdev->dev,
+ "pixelformat format 0x%X invalid\n",
+ pix_mp->pixelformat);
+ return NULL;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = fmt->pixelformat;
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quant;
+ }
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+
+ max_w = variant->pix_max->target_rot_dis_w;
+ max_h = variant->pix_max->target_rot_dis_h;
+
+ if (fmt->align == NULL) {
+ /* use default alignment */
+ align_w = variant->pix_align->org_w;
+ align_h = variant->pix_align->org_h;
+ } else {
+ align_w = fmt->align->org_w;
+ align_h = fmt->align->org_h;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ min_w = variant->pix_min->org_w;
+ min_h = variant->pix_min->org_h;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+
+ mtk_mdp_dbg(2, "[%d] type:%d, wxh:%ux%u, align:%ux%u, max:%ux%u",
+ ctx->id, f->type, pix_mp->width, pix_mp->height,
+ align_w, align_h, max_w, max_h);
+ /*
+ * To check if image size is modified to adjust parameter against
+ * hardware abilities
+ */
+ org_w = pix_mp->width;
+ org_h = pix_mp->height;
+
+ mtk_mdp_bound_align_image(&pix_mp->width, min_w, max_w, align_w,
+ &pix_mp->height, min_h, max_h, align_h);
+
+ if (org_w != pix_mp->width || org_h != pix_mp->height)
+ mtk_mdp_dbg(1, "[%d] size change:%ux%u to %ux%u", ctx->id,
+ org_w, org_h, pix_mp->width, pix_mp->height);
+ pix_mp->num_planes = fmt->num_planes;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ int bpl = (pix_mp->width * fmt->row_depth[i]) / 8;
+ int sizeimage = (pix_mp->width * pix_mp->height *
+ fmt->depth[i]) / 8;
+
+ pix_mp->plane_fmt[i].bytesperline = bpl;
+ if (pix_mp->plane_fmt[i].sizeimage < sizeimage)
+ pix_mp->plane_fmt[i].sizeimage = sizeimage;
+ memset(pix_mp->plane_fmt[i].reserved, 0,
+ sizeof(pix_mp->plane_fmt[i].reserved));
+ mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%u (%u)", ctx->id,
+ i, bpl, pix_mp->plane_fmt[i].sizeimage, sizeimage);
+ }
+
+ return fmt;
+}
+
+static struct mtk_mdp_frame *mtk_mdp_ctx_get_frame(struct mtk_mdp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->s_frame;
+ return &ctx->d_frame;
+}
+
+static void mtk_mdp_check_crop_change(u32 new_w, u32 new_h, u32 *w, u32 *h)
+{
+ if (new_w != *w || new_h != *h) {
+ mtk_mdp_dbg(1, "size change:%dx%d to %dx%d",
+ *w, *h, new_w, new_h);
+
+ *w = new_w;
+ *h = new_h;
+ }
+}
+
+static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
+ struct v4l2_rect *r)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ u32 align_w, align_h, new_w, new_h;
+ u32 min_w, min_h, max_w, max_h;
+
+ if (r->top < 0 || r->left < 0) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+
+ mtk_mdp_dbg(2, "[%d] type:%d, set wxh:%dx%d", ctx->id, type,
+ r->width, r->height);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, type);
+ max_w = frame->width;
+ max_h = frame->height;
+ new_w = r->width;
+ new_h = r->height;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ align_w = 1;
+ align_h = 1;
+ min_w = 64;
+ min_h = 32;
+ } else {
+ align_w = variant->pix_align->target_w;
+ align_h = variant->pix_align->target_h;
+ if (ctx->ctrls.rotate->val == 90 ||
+ ctx->ctrls.rotate->val == 270) {
+ max_w = frame->height;
+ max_h = frame->width;
+ min_w = variant->pix_min->target_rot_en_w;
+ min_h = variant->pix_min->target_rot_en_h;
+ new_w = r->height;
+ new_h = r->width;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+ }
+
+ mtk_mdp_dbg(2, "[%d] align:%dx%d, min:%dx%d, new:%dx%d", ctx->id,
+ align_w, align_h, min_w, min_h, new_w, new_h);
+
+ mtk_mdp_bound_align_image(&new_w, min_w, max_w, align_w,
+ &new_h, min_h, max_h, align_h);
+
+ if (!V4L2_TYPE_IS_OUTPUT(type) &&
+ (ctx->ctrls.rotate->val == 90 ||
+ ctx->ctrls.rotate->val == 270))
+ mtk_mdp_check_crop_change(new_h, new_w,
+ &r->width, &r->height);
+ else
+ mtk_mdp_check_crop_change(new_w, new_h,
+ &r->width, &r->height);
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ /* Need to add code to algin left value with 2's multiple */
+ if (r->left + new_w > max_w)
+ r->left = max_w - new_w;
+ if (r->top + new_h > max_h)
+ r->top = max_h - new_h;
+
+ if (r->left & 1)
+ r->left -= 1;
+
+ mtk_mdp_dbg(2, "[%d] crop l,t,w,h:%d,%d,%d,%d, max:%dx%d", ctx->id,
+ r->left, r->top, r->width,
+ r->height, max_w, max_h);
+ return 0;
+}
+
+static inline struct mtk_mdp_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_mdp_ctx, fh);
+}
+
+static inline struct mtk_mdp_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_mdp_ctx, ctrl_handler);
+}
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state)
+{
+ mutex_lock(&ctx->slock);
+ ctx->state |= state;
+ mutex_unlock(&ctx->slock);
+}
+
+static void mtk_mdp_ctx_state_lock_clear(struct mtk_mdp_ctx *ctx, u32 state)
+{
+ mutex_lock(&ctx->slock);
+ ctx->state &= ~state;
+ mutex_unlock(&ctx->slock);
+}
+
+static bool mtk_mdp_ctx_state_is_set(struct mtk_mdp_ctx *ctx, u32 mask)
+{
+ bool ret;
+
+ mutex_lock(&ctx->slock);
+ ret = (ctx->state & mask) == mask;
+ mutex_unlock(&ctx->slock);
+ return ret;
+}
+
+static void mtk_mdp_ctx_lock(struct vb2_queue *vq)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+
+ mutex_lock(&ctx->mdp_dev->lock);
+}
+
+static void mtk_mdp_ctx_unlock(struct vb2_queue *vq)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&ctx->mdp_dev->lock);
+}
+
+static void mtk_mdp_set_frame_size(struct mtk_mdp_frame *frame, int width,
+ int height)
+{
+ frame->width = width;
+ frame->height = height;
+ frame->crop.width = width;
+ frame->crop.height = height;
+ frame->crop.left = 0;
+ frame->crop.top = 0;
+}
+
+static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_mdp_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
+ if (ret < 0)
+ mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
+ ctx->id, ret);
+
+ return 0;
+}
+
+static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ else
+ return v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct mtk_mdp_ctx *ctx = q->drv_priv;
+ struct vb2_buffer *vb;
+
+ vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+ while (vb != NULL) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
+ vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+ }
+
+ pm_runtime_put(&ctx->mdp_dev->pdev->dev);
+}
+
+static void mtk_mdp_m2m_job_abort(void *priv)
+{
+}
+
+/* The color format (num_planes) must be already configured. */
+static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx,
+ struct vb2_buffer *vb,
+ struct mtk_mdp_frame *frame,
+ struct mtk_mdp_addr *addr)
+{
+ u32 pix_size, planes, i;
+
+ pix_size = frame->width * frame->height;
+ planes = min_t(u32, frame->fmt->num_planes, ARRAY_SIZE(addr->addr));
+ for (i = 0; i < planes; i++)
+ addr->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ if (planes == 1) {
+ if (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) {
+ addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
+ addr->addr[2] = (dma_addr_t)(addr->addr[1] +
+ (pix_size >> 2));
+ } else {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Invalid pixelformat:0x%x\n",
+ frame->fmt->pixelformat);
+ }
+ }
+ mtk_mdp_dbg(3, "[%d] planes:%d, size:%d, addr:%p,%p,%p",
+ ctx->id, planes, pix_size, (void *)addr->addr[0],
+ (void *)addr->addr[1], (void *)addr->addr[2]);
+}
+
+static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *s_frame, *d_frame;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+
+ src_vbuf = to_vb2_v4l2_buffer(src_vb);
+ dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+ dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+}
+
+static void mtk_mdp_process_done(void *priv, int vb_state)
+{
+ struct mtk_mdp_dev *mdp = priv;
+ struct mtk_mdp_ctx *ctx;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vbuf = NULL, *dst_vbuf = NULL;
+
+ ctx = v4l2_m2m_get_curr_priv(mdp->m2m_dev);
+ if (!ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ src_vbuf = to_vb2_v4l2_buffer(src_vb);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+
+ dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+ dst_vbuf->timecode = src_vbuf->timecode;
+ dst_vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vbuf->flags |= src_vbuf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_vbuf, vb_state);
+ v4l2_m2m_buf_done(dst_vbuf, vb_state);
+ v4l2_m2m_job_finish(ctx->mdp_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_worker(struct work_struct *work)
+{
+ struct mtk_mdp_ctx *ctx =
+ container_of(work, struct mtk_mdp_ctx, work);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ int ret;
+
+ if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_CTX_ERROR)) {
+ dev_err(&mdp->pdev->dev, "ctx is in error state");
+ goto worker_end;
+ }
+
+ mtk_mdp_m2m_get_bufs(ctx);
+
+ mtk_mdp_hw_set_input_addr(ctx, &ctx->s_frame.addr);
+ mtk_mdp_hw_set_output_addr(ctx, &ctx->d_frame.addr);
+
+ mtk_mdp_hw_set_in_size(ctx);
+ mtk_mdp_hw_set_in_image_format(ctx);
+
+ mtk_mdp_hw_set_out_size(ctx);
+ mtk_mdp_hw_set_out_image_format(ctx);
+
+ mtk_mdp_hw_set_rotation(ctx);
+ mtk_mdp_hw_set_global_alpha(ctx);
+
+ ret = mtk_mdp_vpu_process(&ctx->vpu);
+ if (ret) {
+ dev_err(&mdp->pdev->dev, "processing failed: %d", ret);
+ goto worker_end;
+ }
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+worker_end:
+ mtk_mdp_process_done(mdp, buf_state);
+}
+
+static void mtk_mdp_m2m_device_run(void *priv)
+{
+ struct mtk_mdp_ctx *ctx = priv;
+
+ queue_work(ctx->mdp_dev->job_wq, &ctx->work);
+}
+
+static int mtk_mdp_m2m_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_mdp_frame *frame;
+ int i;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, vq->type);
+ *num_planes = frame->fmt->num_planes;
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ sizes[i] = frame->payload[i];
+ mtk_mdp_dbg(2, "[%d] type:%d, planes:%d, buffers:%d, size:%u,%u",
+ ctx->id, vq->type, *num_planes, *num_buffers,
+ sizes[0], sizes[1]);
+ return 0;
+}
+
+static int mtk_mdp_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_mdp_frame *frame;
+ int i;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, vb->vb2_queue->type);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+ }
+
+ return 0;
+}
+
+static void mtk_mdp_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static struct vb2_ops mtk_mdp_m2m_qops = {
+ .queue_setup = mtk_mdp_m2m_queue_setup,
+ .buf_prepare = mtk_mdp_m2m_buf_prepare,
+ .buf_queue = mtk_mdp_m2m_buf_queue,
+ .wait_prepare = mtk_mdp_ctx_unlock,
+ .wait_finish = mtk_mdp_ctx_lock,
+ .stop_streaming = mtk_mdp_m2m_stop_streaming,
+ .start_streaming = mtk_mdp_m2m_start_streaming,
+};
+
+static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+ strlcpy(cap->driver, MTK_MDP_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, mdp->pdev->name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:mt8173", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int mtk_mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f, u32 type)
+{
+ const struct mtk_mdp_fmt *fmt;
+
+ fmt = mtk_mdp_find_fmt_by_index(f->index, type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixelformat;
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+}
+
+static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int i;
+
+ mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+ pix_mp = &f->fmt.pix_mp;
+
+ pix_mp->width = frame->width;
+ pix_mp->height = frame->height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = frame->fmt->pixelformat;
+ pix_mp->num_planes = frame->fmt->num_planes;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quant;
+ mtk_mdp_dbg(2, "[%d] wxh:%dx%d", ctx->id,
+ pix_mp->width, pix_mp->height);
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ pix_mp->plane_fmt[i].bytesperline = (frame->width *
+ frame->fmt->row_depth[i]) / 8;
+ pix_mp->plane_fmt[i].sizeimage = (frame->width *
+ frame->height * frame->fmt->depth[i]) / 8;
+
+ mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%d", ctx->id, i,
+ pix_mp->plane_fmt[i].bytesperline,
+ pix_mp->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+ if (!mtk_mdp_try_fmt_mplane(ctx, f))
+ return -EINVAL;
+ return 0;
+}
+
+static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *vq;
+ struct mtk_mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ const struct mtk_mdp_fmt *fmt;
+ int i;
+
+ mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+ fmt = mtk_mdp_try_fmt_mplane(ctx, f);
+ if (!fmt) {
+ mtk_mdp_err("[%d] try_fmt failed, type:%d", ctx->id, f->type);
+ return -EINVAL;
+ }
+ frame->fmt = fmt;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_info(&ctx->mdp_dev->pdev->dev, "queue %d busy", f->type);
+ return -EBUSY;
+ }
+
+ pix_mp = &f->fmt.pix_mp;
+ for (i = 0; i < frame->fmt->num_planes; i++) {
+ frame->payload[i] = pix_mp->plane_fmt[i].sizeimage;
+ frame->pitch[i] = pix_mp->plane_fmt[i].bytesperline;
+ }
+
+ mtk_mdp_set_frame_size(frame, pix_mp->width, pix_mp->height);
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quant = pix_mp->quantization;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_SRC_FMT);
+ else
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_DST_FMT);
+
+ mtk_mdp_dbg(2, "[%d] type:%d, frame:%dx%d", ctx->id, f->type,
+ frame->width, frame->height);
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+ if (reqbufs->count == 0) {
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_SRC_FMT);
+ else
+ mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_DST_FMT);
+ }
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int mtk_mdp_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_SRC_FMT))
+ return -EINVAL;
+ } else if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT)) {
+ return -EINVAL;
+ }
+
+ if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_VPU_INIT)) {
+ ret = mtk_mdp_vpu_init(&ctx->vpu);
+ if (ret < 0) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "vpu init failed %d\n",
+ ret);
+ return -EINVAL;
+ }
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_VPU_INIT);
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static inline bool mtk_mdp_is_target_compose(u32 target)
+{
+ if (target == V4L2_SEL_TGT_COMPOSE_DEFAULT
+ || target == V4L2_SEL_TGT_COMPOSE_BOUNDS
+ || target == V4L2_SEL_TGT_COMPOSE)
+ return true;
+ return false;
+}
+
+static inline bool mtk_mdp_is_target_crop(u32 target)
+{
+ if (target == V4L2_SEL_TGT_CROP_DEFAULT
+ || target == V4L2_SEL_TGT_CROP_BOUNDS
+ || target == V4L2_SEL_TGT_CROP)
+ return true;
+ return false;
+}
+
+static int mtk_mdp_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (mtk_mdp_is_target_compose(s->target))
+ valid = true;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (mtk_mdp_is_target_crop(s->target))
+ valid = true;
+ }
+ if (!valid) {
+ mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+ s->target);
+ return -EINVAL;
+ }
+
+ frame = mtk_mdp_ctx_get_frame(ctx, s->type);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = frame->crop.left;
+ s->r.top = frame->crop.top;
+ s->r.width = frame->crop.width;
+ s->r.height = frame->crop.height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mtk_mdp_check_scaler_ratio(struct mtk_mdp_variant *var, int src_w,
+ int src_h, int dst_w, int dst_h, int rot)
+{
+ int tmp_w, tmp_h;
+
+ if (rot == 90 || rot == 270) {
+ tmp_w = dst_h;
+ tmp_h = dst_w;
+ } else {
+ tmp_w = dst_w;
+ tmp_h = dst_h;
+ }
+
+ if ((src_w / tmp_w) > var->h_scale_down_max ||
+ (src_h / tmp_h) > var->v_scale_down_max ||
+ (tmp_w / src_w) > var->h_scale_up_max ||
+ (tmp_h / src_h) > var->v_scale_up_max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_rect new_r;
+ struct mtk_mdp_variant *variant = ctx->mdp_dev->variant;
+ int ret;
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (s->target == V4L2_SEL_TGT_COMPOSE)
+ valid = true;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (s->target == V4L2_SEL_TGT_CROP)
+ valid = true;
+ }
+ if (!valid) {
+ mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+ s->target);
+ return -EINVAL;
+ }
+
+ new_r = s->r;
+ ret = mtk_mdp_try_crop(ctx, s->type, &new_r);
+ if (ret)
+ return ret;
+
+ if (mtk_mdp_is_target_crop(s->target))
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ /* Check to see if scaling ratio is within supported range */
+ if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT)) {
+ if (V4L2_TYPE_IS_OUTPUT(s->type)) {
+ ret = mtk_mdp_check_scaler_ratio(variant, new_r.width,
+ new_r.height, ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->ctrls.rotate->val);
+ } else {
+ ret = mtk_mdp_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height, new_r.width,
+ new_r.height, ctx->ctrls.rotate->val);
+ }
+
+ if (ret) {
+ dev_info(&ctx->mdp_dev->pdev->dev,
+ "Out of scaler range");
+ return -EINVAL;
+ }
+ }
+
+ s->r = new_r;
+ frame->crop = new_r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mtk_mdp_m2m_ioctl_ops = {
+ .vidioc_querycap = mtk_mdp_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_out,
+ .vidioc_g_fmt_vid_cap_mplane = mtk_mdp_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mtk_mdp_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = mtk_mdp_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mtk_mdp_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mtk_mdp_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mtk_mdp_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = mtk_mdp_m2m_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = mtk_mdp_m2m_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = mtk_mdp_m2m_g_selection,
+ .vidioc_s_selection = mtk_mdp_m2m_s_selection
+};
+
+static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_mdp_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &mtk_mdp_m2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = &ctx->mdp_dev->pdev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &mtk_mdp_m2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int mtk_mdp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_mdp_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ u32 state = MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ if (mtk_mdp_ctx_state_is_set(ctx, state)) {
+ ret = mtk_mdp_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height,
+ ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->ctrls.rotate->val);
+
+ if (ret)
+ return -EINVAL;
+ }
+
+ ctx->rotation = ctrl->val;
+ break;
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mtk_mdp_ctrl_ops = {
+ .s_ctrl = mtk_mdp_s_ctrl,
+};
+
+static int mtk_mdp_ctrls_create(struct mtk_mdp_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, MTK_MDP_MAX_CTRL_NUM);
+
+ ctx->ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctx->ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.global_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, 255, 1, 0);
+ ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Failed to create control handlers\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void mtk_mdp_set_default_params(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_frame *frame;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ frame->fmt = mtk_mdp_find_fmt_by_index(0,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ frame->width = mdp->variant->pix_min->org_w;
+ frame->height = mdp->variant->pix_min->org_h;
+ frame->payload[0] = frame->width * frame->height;
+ frame->payload[1] = frame->payload[0] / 2;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ frame->fmt = mtk_mdp_find_fmt_by_index(0,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ frame->width = mdp->variant->pix_min->target_rot_dis_w;
+ frame->height = mdp->variant->pix_min->target_rot_dis_h;
+ frame->payload[0] = frame->width * frame->height;
+ frame->payload[1] = frame->payload[0] / 2;
+
+}
+
+static int mtk_mdp_m2m_open(struct file *file)
+{
+ struct mtk_mdp_dev *mdp = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct mtk_mdp_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&mdp->lock)) {
+ ret = -ERESTARTSYS;
+ goto err_lock;
+ }
+
+ mutex_init(&ctx->slock);
+ ctx->id = mdp->id_counter++;
+ v4l2_fh_init(&ctx->fh, vfd);
+ file->private_data = &ctx->fh;
+ ret = mtk_mdp_ctrls_create(ctx);
+ if (ret)
+ goto error_ctrls;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+
+ ctx->mdp_dev = mdp;
+ mtk_mdp_set_default_params(ctx);
+
+ INIT_WORK(&ctx->work, mtk_mdp_m2m_worker);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx,
+ mtk_mdp_m2m_queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ dev_err(&mdp->pdev->dev, "Failed to initialize m2m context");
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_m2m_ctx;
+ }
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ if (mdp->ctx_num++ == 0) {
+ ret = vpu_load_firmware(mdp->vpu_dev);
+ if (ret < 0) {
+ dev_err(&mdp->pdev->dev,
+ "vpu_load_firmware failed %d\n", ret);
+ goto err_load_vpu;
+ }
+
+ ret = mtk_mdp_vpu_register(mdp->pdev);
+ if (ret < 0) {
+ dev_err(&mdp->pdev->dev,
+ "mdp_vpu register failed %d\n", ret);
+ goto err_load_vpu;
+ }
+ }
+
+ list_add(&ctx->list, &mdp->ctx_list);
+ mutex_unlock(&mdp->lock);
+
+ mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+ return 0;
+
+err_load_vpu:
+ mdp->ctx_num--;
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+error_m2m_ctx:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+error_ctrls:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&mdp->lock);
+err_lock:
+ kfree(ctx);
+
+ return ret;
+}
+
+static int mtk_mdp_m2m_release(struct file *file)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+ flush_workqueue(mdp->job_wq);
+ mutex_lock(&mdp->lock);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mtk_mdp_vpu_deinit(&ctx->vpu);
+ mdp->ctx_num--;
+ list_del_init(&ctx->list);
+
+ mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+ mutex_unlock(&mdp->lock);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_mdp_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_mdp_m2m_open,
+ .release = mtk_mdp_m2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
+ .device_run = mtk_mdp_m2m_device_run,
+ .job_abort = mtk_mdp_m2m_job_abort,
+};
+
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int ret;
+
+ mdp->variant = &mtk_mdp_default_variant;
+ mdp->vdev = video_device_alloc();
+ if (!mdp->vdev) {
+ dev_err(dev, "failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_video_alloc;
+ }
+ mdp->vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ mdp->vdev->fops = &mtk_mdp_m2m_fops;
+ mdp->vdev->ioctl_ops = &mtk_mdp_m2m_ioctl_ops;
+ mdp->vdev->release = video_device_release;
+ mdp->vdev->lock = &mdp->lock;
+ mdp->vdev->vfl_dir = VFL_DIR_M2M;
+ mdp->vdev->v4l2_dev = &mdp->v4l2_dev;
+ snprintf(mdp->vdev->name, sizeof(mdp->vdev->name), "%s:m2m",
+ MTK_MDP_MODULE_NAME);
+ video_set_drvdata(mdp->vdev, mdp);
+
+ mdp->m2m_dev = v4l2_m2m_init(&mtk_mdp_m2m_ops);
+ if (IS_ERR(mdp->m2m_dev)) {
+ dev_err(dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(mdp->m2m_dev);
+ goto err_m2m_init;
+ }
+
+ ret = video_register_device(mdp->vdev, VFL_TYPE_GRABBER, 2);
+ if (ret) {
+ dev_err(dev, "failed to register video device\n");
+ goto err_vdev_register;
+ }
+
+ v4l2_info(&mdp->v4l2_dev, "driver registered as /dev/video%d",
+ mdp->vdev->num);
+ return 0;
+
+err_vdev_register:
+ v4l2_m2m_release(mdp->m2m_dev);
+err_m2m_init:
+ video_device_release(mdp->vdev);
+err_video_alloc:
+
+ return ret;
+}
+
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp)
+{
+ video_unregister_device(mdp->vdev);
+ v4l2_m2m_release(mdp->m2m_dev);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
new file mode 100644
index 000000000000..45afd3655817
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_M2M_H__
+#define __MTK_MDP_M2M_H__
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state);
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp);
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp);
+
+#endif /* __MTK_MDP_M2M_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
new file mode 100644
index 000000000000..86d57f380c97
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_regs.h"
+
+
+#define MDP_COLORFMT_PACK(VIDEO, PLANE, COPLANE, HF, VF, BITS, GROUP, SWAP, ID)\
+ (((VIDEO) << 27) | ((PLANE) << 24) | ((COPLANE) << 22) |\
+ ((HF) << 20) | ((VF) << 18) | ((BITS) << 8) | ((GROUP) << 6) |\
+ ((SWAP) << 5) | ((ID) << 0))
+
+enum MDP_COLOR_ENUM {
+ MDP_COLOR_UNKNOWN = 0,
+ MDP_COLOR_NV12 = MDP_COLORFMT_PACK(0, 2, 1, 1, 1, 8, 1, 0, 12),
+ MDP_COLOR_I420 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 0, 8),
+ MDP_COLOR_YV12 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 1, 8),
+ /* Mediatek proprietary format */
+ MDP_COLOR_420_MT21 = MDP_COLORFMT_PACK(5, 2, 1, 1, 1, 256, 1, 0, 12),
+};
+
+static int32_t mtk_mdp_map_color_format(int v4l2_format)
+{
+ switch (v4l2_format) {
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV12:
+ return MDP_COLOR_NV12;
+ case V4L2_PIX_FMT_MT21C:
+ return MDP_COLOR_420_MT21;
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YUV420:
+ return MDP_COLOR_I420;
+ case V4L2_PIX_FMT_YVU420:
+ return MDP_COLOR_YV12;
+ }
+
+ mtk_mdp_err("Unknown format 0x%x", v4l2_format);
+
+ return MDP_COLOR_UNKNOWN;
+}
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr)
+{
+ struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+ src_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr)
+{
+ struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+ dst_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *frame = &ctx->s_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->src_config;
+
+ /* Set input pixel offset */
+ config->crop_x = frame->crop.left;
+ config->crop_y = frame->crop.top;
+
+ /* Set input cropped size */
+ config->crop_w = frame->crop.width;
+ config->crop_h = frame->crop.height;
+
+ /* Set input original size */
+ config->x = 0;
+ config->y = 0;
+ config->w = frame->width;
+ config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx)
+{
+ unsigned int i;
+ struct mtk_mdp_frame *frame = &ctx->s_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->src_config;
+ struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+
+ src_buf->plane_num = frame->fmt->num_comp;
+ config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+ config->w_stride = 0; /* MDP will calculate it by color format. */
+ config->h_stride = 0; /* MDP will calculate it by color format. */
+
+ for (i = 0; i < src_buf->plane_num; i++)
+ src_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *frame = &ctx->d_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+
+ config->crop_x = frame->crop.left;
+ config->crop_y = frame->crop.top;
+ config->crop_w = frame->crop.width;
+ config->crop_h = frame->crop.height;
+ config->x = 0;
+ config->y = 0;
+ config->w = frame->width;
+ config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx)
+{
+ unsigned int i;
+ struct mtk_mdp_frame *frame = &ctx->d_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+ struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+
+ dst_buf->plane_num = frame->fmt->num_comp;
+ config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+ config->w_stride = 0; /* MDP will calculate it by color format. */
+ config->h_stride = 0; /* MDP will calculate it by color format. */
+ for (i = 0; i < dst_buf->plane_num; i++)
+ dst_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx)
+{
+ struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+ misc->orientation = ctx->ctrls.rotate->val;
+ misc->hflip = ctx->ctrls.hflip->val;
+ misc->vflip = ctx->ctrls.vflip->val;
+}
+
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx)
+{
+ struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+ misc->alpha = ctx->ctrls.global_alpha->val;
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
new file mode 100644
index 000000000000..42bd057e76cc
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_REGS_H__
+#define __MTK_MDP_REGS_H__
+
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx);
+
+
+#endif /* __MTK_MDP_REGS_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
new file mode 100644
index 000000000000..4893825aa5dd
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_vpu.h"
+#include "mtk_vpu.h"
+
+
+static inline struct mtk_mdp_ctx *vpu_to_ctx(struct mtk_mdp_vpu *vpu)
+{
+ return container_of(vpu, struct mtk_mdp_ctx, vpu);
+}
+
+static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg)
+{
+ struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+ (unsigned long)msg->ap_inst;
+
+ /* mapping VPU address to kernel virtual address */
+ vpu->vsi = (struct mdp_process_vsi *)
+ vpu_mapping_dm_addr(vpu->pdev, msg->vpu_inst_addr);
+ vpu->inst_addr = msg->vpu_inst_addr;
+}
+
+static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ unsigned int msg_id = *(unsigned int *)data;
+ struct mdp_ipi_comm_ack *msg = (struct mdp_ipi_comm_ack *)data;
+ struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+ (unsigned long)msg->ap_inst;
+ struct mtk_mdp_ctx *ctx;
+
+ vpu->failure = msg->status;
+ if (!vpu->failure) {
+ switch (msg_id) {
+ case VPU_MDP_INIT_ACK:
+ mtk_mdp_vpu_handle_init_ack(data);
+ break;
+ case VPU_MDP_DEINIT_ACK:
+ case VPU_MDP_PROCESS_ACK:
+ break;
+ default:
+ ctx = vpu_to_ctx(vpu);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "handle unknown ipi msg:0x%x\n",
+ msg_id);
+ break;
+ }
+ } else {
+ ctx = vpu_to_ctx(vpu);
+ mtk_mdp_dbg(0, "[%d]:msg 0x%x, failure:%d", ctx->id,
+ msg_id, vpu->failure);
+ }
+}
+
+int mtk_mdp_vpu_register(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+ int err;
+
+ err = vpu_ipi_register(mdp->vpu_dev, IPI_MDP,
+ mtk_mdp_vpu_ipi_handler, "mdp_vpu", NULL);
+ if (err)
+ dev_err(&mdp->pdev->dev,
+ "vpu_ipi_registration fail status=%d\n", err);
+
+ return err;
+}
+
+static int mtk_mdp_vpu_send_msg(void *msg, int len, struct mtk_mdp_vpu *vpu,
+ int id)
+{
+ struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+ int err;
+
+ if (!vpu->pdev) {
+ mtk_mdp_dbg(1, "[%d]:vpu pdev is NULL", ctx->id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->mdp_dev->vpulock);
+ err = vpu_ipi_send(vpu->pdev, (enum ipi_id)id, msg, len);
+ if (err)
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "vpu_ipi_send fail status %d\n", err);
+ mutex_unlock(&ctx->mdp_dev->vpulock);
+
+ return err;
+}
+
+static int mtk_mdp_vpu_send_ap_ipi(struct mtk_mdp_vpu *vpu, uint32_t msg_id)
+{
+ int err;
+ struct mdp_ipi_comm msg;
+
+ msg.msg_id = msg_id;
+ msg.ipi_id = IPI_MDP;
+ msg.vpu_inst_addr = vpu->inst_addr;
+ msg.ap_inst = (unsigned long)vpu;
+ err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+ if (!err && vpu->failure)
+ err = -EINVAL;
+
+ return err;
+}
+
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu)
+{
+ int err;
+ struct mdp_ipi_init msg;
+ struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+
+ vpu->pdev = ctx->mdp_dev->vpu_dev;
+
+ msg.msg_id = AP_MDP_INIT;
+ msg.ipi_id = IPI_MDP;
+ msg.ap_inst = (unsigned long)vpu;
+ err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+ if (!err && vpu->failure)
+ err = -EINVAL;
+
+ return err;
+}
+
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu)
+{
+ return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_DEINIT);
+}
+
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu)
+{
+ return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_PROCESS);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
new file mode 100644
index 000000000000..df4bddaa438e
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_VPU_H__
+#define __MTK_MDP_VPU_H__
+
+#include "mtk_mdp_ipi.h"
+
+
+/**
+ * struct mtk_mdp_vpu - VPU instance for MDP
+ * @pdev : pointer to the VPU platform device
+ * @inst_addr : VPU MDP instance address
+ * @failure : VPU execution result status
+ * @vsi : VPU shared information
+ */
+struct mtk_mdp_vpu {
+ struct platform_device *pdev;
+ uint32_t inst_addr;
+ int32_t failure;
+ struct mdp_process_vsi *vsi;
+};
+
+int mtk_mdp_vpu_register(struct platform_device *pdev);
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu);
+
+#endif /* __MTK_MDP_VPU_H__ */
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index dc5cb006d600..852d9697ccfa 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -1,7 +1,16 @@
-
-obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-enc.o mtk-vcodec-common.o
-
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
+ mtk-vcodec-enc.o \
+ mtk-vcodec-common.o
+
+mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
+ vdec/vdec_vp8_if.o \
+ vdec/vdec_vp9_if.o \
+ mtk_vcodec_dec_drv.o \
+ vdec_drv_if.o \
+ vdec_vpu_if.o \
+ mtk_vcodec_dec.o \
+ mtk_vcodec_dec_pm.o \
mtk-vcodec-enc-y := venc/venc_vp8_if.o \
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
new file mode 100644
index 000000000000..074659227864
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -0,0 +1,1451 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec_pm.h"
+
+#define OUT_FMT_IDX 0
+#define CAP_FMT_IDX 3
+
+#define MTK_VDEC_MIN_W 64U
+#define MTK_VDEC_MIN_H 64U
+#define DFT_CFG_WIDTH MTK_VDEC_MIN_W
+#define DFT_CFG_HEIGHT MTK_VDEC_MIN_H
+
+static struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MT21C,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+
+static struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[MTK_Q_DATA_SRC];
+
+ return &ctx->q_data[MTK_Q_DATA_DST];
+}
+
+/*
+ * This function tries to clean all display buffers, the buffers will return
+ * in display order.
+ * Note the buffers returned from codec driver may still be in driver's
+ * reference list.
+ */
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_fb *disp_frame_buffer = NULL;
+ struct mtk_video_dec_buf *dstbuf;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_DISP_FRAME_BUFFER,
+ &disp_frame_buffer)) {
+ mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
+ ctx->id);
+ return NULL;
+ }
+
+ if (disp_frame_buffer == NULL) {
+ mtk_v4l2_debug(3, "No display frame buffer");
+ return NULL;
+ }
+
+ dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
+ ctx->picinfo.y_bs_sz);
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
+ ctx->picinfo.c_bs_sz);
+
+ dstbuf->ready_to_display = true;
+
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to done_list %d",
+ ctx->id, disp_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2);
+
+ v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+ ctx->decoded_frame_cnt++;
+ }
+ mutex_unlock(&ctx->lock);
+ return &dstbuf->vb.vb2_buf;
+}
+
+/*
+ * This function tries to clean all capture buffers that are not used as
+ * reference buffers by codec driver any more
+ * In this case, we need re-queue buffer to vb2 buffer if user space
+ * already returns this buffer to v4l2 or this buffer is just the output of
+ * previous sps/pps/resolution change decode, or do nothing if user
+ * space still owns this buffer
+ */
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_video_dec_buf *dstbuf;
+ struct vdec_fb *free_frame_buffer = NULL;
+
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_FREE_FRAME_BUFFER,
+ &free_frame_buffer)) {
+ mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+ return NULL;
+ }
+ if (free_frame_buffer == NULL) {
+ mtk_v4l2_debug(3, " No free frame buffer");
+ return NULL;
+ }
+
+ mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p",
+ ctx->id, free_frame_buffer);
+
+ dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ if ((dstbuf->queued_in_vb2) &&
+ (dstbuf->queued_in_v4l2) &&
+ (free_frame_buffer->status == FB_ST_FREE)) {
+ /*
+ * After decode sps/pps or non-display buffer, we don't
+ * need to return capture buffer to user space, but
+ * just re-queue this capture buffer to vb2 queue.
+ * This reduce overheads that dq/q unused capture
+ * buffer. In this case, queued_in_vb2 = true.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue %d",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ } else if ((dstbuf->queued_in_vb2 == false) &&
+ (dstbuf->queued_in_v4l2 == true)) {
+ /*
+ * If buffer in v4l2 driver but not in vb2 queue yet,
+ * and we get this buffer from free_list, it means
+ * that codec driver do not use this buffer as
+ * reference buffer anymore. We should q buffer to vb2
+ * queue, so later work thread could get this buffer
+ * for decode. In this case, queued_in_vb2 = false
+ * means this buffer is not from previous decode
+ * output.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ dstbuf->queued_in_vb2 = true;
+ } else {
+ /*
+ * Codec driver do not need to reference this capture
+ * buffer and this buffer is not in v4l2 driver.
+ * Then we don't need to do any thing, just add log when
+ * we need to debug buffer flow.
+ * When this buffer q from user space, it could
+ * directly q to vb2 buffer
+ */
+ mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2,
+ dstbuf->queued_in_v4l2);
+ }
+ dstbuf->used = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return &dstbuf->vb.vb2_buf;
+}
+
+static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_buffer *framptr;
+
+ do {
+ framptr = get_display_buffer(ctx);
+ } while (framptr);
+}
+
+static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_buffer *framptr;
+
+ do {
+ framptr = get_free_buffer(ctx);
+ } while (framptr);
+}
+
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ mtk_v4l2_debug(1, "[%d]", ctx->id);
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+ int ret = 0;
+
+ ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+}
+
+static void mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int dpbsize = 0;
+ int ret;
+
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_PIC_INFO,
+ &ctx->last_decoded_picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
+ return;
+ }
+
+ if (ctx->last_decoded_picinfo.pic_w == 0 ||
+ ctx->last_decoded_picinfo.pic_h == 0 ||
+ ctx->last_decoded_picinfo.buf_w == 0 ||
+ ctx->last_decoded_picinfo.buf_h == 0) {
+ mtk_v4l2_err("Cannot get correct pic info");
+ return;
+ }
+
+ if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
+ (ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
+ return;
+
+ mtk_v4l2_debug(1,
+ "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+ ctx->id, ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+
+ ctx->dpb_size = dpbsize;
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
+ decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct mtk_vcodec_mem buf;
+ struct vdec_fb *pfb;
+ bool res_chg = false;
+ int ret;
+ struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf == NULL) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (dst_buf == NULL) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ return;
+ }
+
+ src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
+ src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+ dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ buf.va = vb2_plane_vaddr(src_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ buf.size = (size_t)src_buf->planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
+ ctx->id, src_buf->index);
+ return;
+ }
+
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
+ pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;
+
+ pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
+ pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
+ pfb->status = 0;
+ mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+
+ mtk_v4l2_debug(3,
+ "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+ dst_buf->index, pfb,
+ pfb->base_y.va, &pfb->base_y.dma_addr,
+ &pfb->base_c.dma_addr, pfb->base_y.size);
+
+ if (src_buf_info->lastframe) {
+ /* update src buf status */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ src_buf_info->lastframe = false;
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+
+ /* update dst buf status */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ dst_buf_info->used = false;
+
+ vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ clean_display_buffer(ctx);
+ vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+ v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
+ clean_free_buffer(ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ return;
+ }
+ dst_buf_info->vb.vb2_buf.timestamp
+ = src_buf_info->vb.vb2_buf.timestamp;
+ dst_buf_info->vb.timecode
+ = src_buf_info->vb.timecode;
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = true;
+ mutex_unlock(&ctx->lock);
+ src_buf_info->used = true;
+
+ ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
+
+ if (ret) {
+ mtk_v4l2_err(
+ " <===[%d], src_buf[%d]%d sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id,
+ src_buf->index,
+ src_buf_info->lastframe,
+ buf.size,
+ src_buf_info->vb.vb2_buf.timestamp,
+ dst_buf->index,
+ ret, res_chg);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
+ } else if (res_chg == false) {
+ /*
+ * we only return src buffer with VB2_BUF_STATE_DONE
+ * when decode success without resolution change
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+ }
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ if (!ret && res_chg) {
+ mtk_vdec_pic_info_update(ctx);
+ /*
+ * On encountering a resolution change in the stream.
+ * The driver must first process and decode all
+ * remaining buffers from before the resolution change
+ * point, so call flush decode here
+ */
+ mtk_vdec_flush_decoder(ctx);
+ /*
+ * After all buffers containing decoded frames from
+ * before the resolution change point ready to be
+ * dequeued on the CAPTURE queue, the driver sends a
+ * V4L2_EVENT_SOURCE_CHANGE event for source change
+ * type V4L2_EVENT_SRC_CH_RESOLUTION
+ */
+ mtk_vdec_queue_res_chg_event(ctx);
+ }
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx)
+{
+ mutex_unlock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx)
+{
+ mutex_lock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx)
+{
+ vdec_if_deinit(ctx);
+ ctx->state = MTK_STATE_FREE;
+}
+
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_q_data *q_data;
+
+ ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+ INIT_WORK(&ctx->decode_work, mtk_vdec_worker);
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+
+ q_data->sizeimage[0] = DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
+ q_data->bytesperline[0] = 0;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->coded_width = DFT_CFG_WIDTH;
+ q_data->coded_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&q_data->coded_width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W, 4,
+ &q_data->coded_height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H, 5, 6);
+
+ q_data->sizeimage[0] = q_data->coded_width * q_data->coded_height;
+ q_data->bytesperline[0] = q_data->coded_width;
+ q_data->sizeimage[1] = q_data->sizeimage[0] / 2;
+ q_data->bytesperline[1] = q_data->coded_width;
+}
+
+static int vidioc_vdec_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ struct mtk_video_dec_buf *mtkbuf;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, buf->type);
+ if (buf->index >= vq->num_buffers) {
+ mtk_v4l2_debug(1, "buffer index %d out of range", buf->index);
+ return -EINVAL;
+ }
+ vb = vq->bufs[buf->index];
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ mtkbuf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ (buf->m.planes[0].bytesused == 0)) {
+ mtkbuf->lastframe = true;
+ mtk_v4l2_debug(1, "[%d] (%d) id=%d lastframe=%d (%d,%d, %d) vb=%p",
+ ctx->id, buf->type, buf->index,
+ mtkbuf->lastframe, buf->bytesused,
+ buf->m.planes[0].bytesused, buf->length,
+ vb);
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on DQBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, MTK_VCODEC_DEC_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
+ strlcpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ int i;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pix_fmt_mp->num_planes = 1;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ int tmp_w, tmp_h;
+
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W);
+
+ /*
+ * Find next closer width align 64, heign align 64, size align
+ * 64 rectangle
+ * Note: This only get default value, the real HW needed value
+ * only available when ctx in MTK_STATE_HEADER state
+ */
+ tmp_w = pix_fmt_mp->width;
+ tmp_h = pix_fmt_mp->height;
+ v4l_bound_align_image(&pix_fmt_mp->width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W, 6,
+ &pix_fmt_mp->height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H, 6, 9);
+
+ if (pix_fmt_mp->width < tmp_w &&
+ (pix_fmt_mp->width + 64) <= MTK_VDEC_MAX_W)
+ pix_fmt_mp->width += 64;
+ if (pix_fmt_mp->height < tmp_h &&
+ (pix_fmt_mp->height + 64) <= MTK_VDEC_MAX_H)
+ pix_fmt_mp->height += 64;
+
+ mtk_v4l2_debug(0,
+ "before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d",
+ tmp_w, tmp_h, pix_fmt_mp->width,
+ pix_fmt_mp->height,
+ pix_fmt_mp->width * pix_fmt_mp->height);
+
+ pix_fmt_mp->num_planes = fmt->num_planes;
+ pix_fmt_mp->plane_fmt[0].sizeimage =
+ pix_fmt_mp->width * pix_fmt_mp->height;
+ pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+ if (pix_fmt_mp->num_planes == 2) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2;
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->width;
+ }
+ }
+
+ for (i = 0; i < pix_fmt_mp->num_planes; i++)
+ memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
+ sizeof(pix_fmt_mp->plane_fmt[0].reserved));
+
+ pix_fmt_mp->flags = 0;
+ memset(&pix_fmt_mp->reserved, 0x0, sizeof(pix_fmt_mp->reserved));
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_vdec_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_vdec_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+
+ if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+ mtk_v4l2_err("sizeimage of output format must be given");
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_vdec_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_q_data *q_data;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.pic_w;
+ s->r.height = ctx->picinfo.pic_h;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.buf_w;
+ s->r.height = ctx->picinfo.buf_h;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (vdec_if_get_param(ctx, GET_PARAM_CROP_INFO, &(s->r))) {
+ /* set to default value if header info not ready yet*/
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ctx->state < MTK_STATE_HEADER) {
+ /* set to default value if header info not ready yet*/
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int vidioc_vdec_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.pic_w;
+ s->r.height = ctx->picinfo.pic_h;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_vdec_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct mtk_q_data *q_data;
+ int ret = 0;
+ struct mtk_video_fmt *fmt;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ q_data = mtk_vdec_get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix_mp = &f->fmt.pix_mp;
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
+ mtk_v4l2_err("out_q_ctx buffers already requested");
+ ret = -EBUSY;
+ }
+
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
+ mtk_v4l2_err("cap_q_ctx buffers already requested");
+ ret = -EBUSY;
+ }
+
+ fmt = mtk_vdec_find_format(f);
+ if (fmt == NULL) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f->fmt.pix.pixelformat =
+ mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ f->fmt.pix.pixelformat =
+ mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+ }
+
+ q_data->fmt = fmt;
+ vidioc_try_fmt(f, q_data->fmt);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ q_data->sizeimage[0] = pix_mp->plane_fmt[0].sizeimage;
+ q_data->coded_width = pix_mp->width;
+ q_data->coded_height = pix_mp->height;
+
+ ctx->colorspace = f->fmt.pix_mp.colorspace;
+ ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->quantization = f->fmt.pix_mp.quantization;
+ ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ if (ctx->state == MTK_STATE_FREE) {
+ ret = vdec_if_init(ctx, q_data->fmt->fourcc);
+ if (ret) {
+ mtk_v4l2_err("[%d]: vdec_if_init() fail ret=%d",
+ ctx->id, ret);
+ return -EINVAL;
+ }
+ ctx->state = MTK_STATE_INIT;
+ }
+ }
+
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ int i = 0;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
+ if (fsize->pixel_format != mtk_vdec_framesizes[i].fourcc)
+ continue;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = mtk_vdec_framesizes[i].stepwise;
+ if (!(ctx->dev->dec_capability &
+ VCODEC_CAPABILITY_4K_DISABLED)) {
+ mtk_v4l2_debug(3, "4K is enabled");
+ fsize->stepwise.max_width =
+ VCODEC_DEC_4K_CODED_WIDTH;
+ fsize->stepwise.max_height =
+ VCODEC_DEC_4K_CODED_HEIGHT;
+ }
+ mtk_v4l2_debug(1, "%x, %d %d %d %d %d %d",
+ ctx->dev->dec_capability,
+ fsize->stepwise.min_width,
+ fsize->stepwise.max_width,
+ fsize->stepwise.step_width,
+ fsize->stepwise.min_height,
+ fsize->stepwise.max_height,
+ fsize->stepwise.step_height);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+{
+ struct mtk_video_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (output_queue && (mtk_video_formats[i].type != MTK_FMT_DEC))
+ continue;
+ if (!output_queue &&
+ (mtk_video_formats[i].type != MTK_FMT_FRAME))
+ continue;
+
+ if (j == f->index)
+ break;
+ ++j;
+ }
+
+ if (i == NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &mtk_video_formats[i];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int vidioc_vdec_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false);
+}
+
+static int vidioc_vdec_enum_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true);
+}
+
+static int vidioc_vdec_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq) {
+ mtk_v4l2_err("no vb2 queue for type=%d", f->type);
+ return -EINVAL;
+ }
+
+ q_data = mtk_vdec_get_q_data(ctx, f->type);
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ pix_mp->xfer_func = ctx->xfer_func;
+
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ (ctx->state >= MTK_STATE_HEADER)) {
+ /* Until STREAMOFF is called on the CAPTURE queue
+ * (acknowledging the event), the driver operates as if
+ * the resolution hasn't changed yet.
+ * So we just return picinfo yet, and update picinfo in
+ * stop_streaming hook function
+ */
+ q_data->sizeimage[0] = ctx->picinfo.y_bs_sz +
+ ctx->picinfo.y_len_sz;
+ q_data->sizeimage[1] = ctx->picinfo.c_bs_sz +
+ ctx->picinfo.c_len_sz;
+ q_data->bytesperline[0] = ctx->last_decoded_picinfo.buf_w;
+ q_data->bytesperline[1] = ctx->last_decoded_picinfo.buf_w;
+ q_data->coded_width = ctx->picinfo.buf_w;
+ q_data->coded_height = ctx->picinfo.buf_h;
+
+ /*
+ * Width and height are set to the dimensions
+ * of the movie, the buffer is bigger and
+ * further processing stages should crop to this
+ * rectangle.
+ */
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
+
+ /*
+ * Set pixelformat to the format in which mt vcodec
+ * outputs the decoded frame
+ */
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+ pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /*
+ * This is run on OUTPUT
+ * The buffer contains compressed image
+ * so width and height have no meaning.
+ * Assign value here to pass v4l2-compliance test
+ */
+ pix_mp->width = q_data->visible_width;
+ pix_mp->height = q_data->visible_height;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ } else {
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+ pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+ mtk_v4l2_debug(1, "[%d] type=%d state=%d Format information could not be read, not ready yet!",
+ ctx->id, f->type, ctx->state);
+ }
+
+ return 0;
+}
+
+static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_q_data *q_data;
+ unsigned int i;
+
+ q_data = mtk_vdec_get_q_data(ctx, vq->type);
+
+ if (q_data == NULL) {
+ mtk_v4l2_err("vq->type=%d err\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*nplanes) {
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+ }
+ } else {
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ *nplanes = 2;
+ else
+ *nplanes = 1;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+ }
+
+ mtk_v4l2_debug(1,
+ "[%d]\t type = %d, get %d plane(s), %d buffer(s) of size 0x%x 0x%x ",
+ ctx->id, vq->type, *nplanes, *nbuffers,
+ sizes[0], sizes[1]);
+
+ return 0;
+}
+
+static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_q_data *q_data;
+ int i;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d",
+ ctx->id, vb->vb2_queue->type, vb->index);
+
+ q_data = mtk_vdec_get_q_data(ctx, vb->vb2_queue->type);
+
+ for (i = 0; i < q_data->fmt->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
+ i, vb2_plane_size(vb, i),
+ q_data->sizeimage[i]);
+ }
+ }
+
+ return 0;
+}
+
+static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_buffer *src_buf;
+ struct mtk_vcodec_mem src_mem;
+ bool res_chg = false;
+ int ret = 0;
+ unsigned int dpbsize = 1;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+ struct vb2_v4l2_buffer, vb2_buf);
+ struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
+ struct mtk_video_dec_buf, vb);
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
+ ctx->id, vb->vb2_queue->type,
+ vb->index, vb);
+ /*
+ * check if this buffer is ready to be used after decode
+ */
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mutex_lock(&ctx->lock);
+ if (buf->used == false) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx,
+ to_vb2_v4l2_buffer(vb));
+ buf->queued_in_vb2 = true;
+ buf->queued_in_v4l2 = true;
+ buf->ready_to_display = false;
+ } else {
+ buf->queued_in_vb2 = false;
+ buf->queued_in_v4l2 = true;
+ buf->ready_to_display = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+
+ if (ctx->state != MTK_STATE_INIT) {
+ mtk_v4l2_debug(3, "[%d] already init driver %d",
+ ctx->id, ctx->state);
+ return;
+ }
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ mtk_v4l2_err("No src buffer");
+ return;
+ }
+
+ src_mem.va = vb2_plane_vaddr(src_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src_mem.size = (size_t)src_buf->planes[0].bytesused;
+ mtk_v4l2_debug(2,
+ "[%d] buf id=%d va=%p dma=%pad size=%zx",
+ ctx->id, src_buf->index,
+ src_mem.va, &src_mem.dma_addr,
+ src_mem.size);
+
+ ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
+ if (ret || !res_chg) {
+ /*
+ * fb == NULL menas to parse SPS/PPS header or
+ * resolution info in src_mem. Decode can fail
+ * if there is no SPS header or picture info
+ * in bs
+ */
+ int log_level = ret ? 0 : 1;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_DONE);
+ mtk_v4l2_debug(log_level,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->index,
+ src_mem.size, ret, res_chg);
+ return;
+ }
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
+ return;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.y_bs_sz +
+ ctx->picinfo.y_len_sz;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
+ ctx->picinfo.c_bs_sz +
+ ctx->picinfo.c_len_sz;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] = ctx->picinfo.buf_w;
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+
+ ctx->dpb_size = dpbsize;
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+}
+
+static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct mtk_video_dec_buf *buf;
+
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return;
+
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ mutex_lock(&ctx->lock);
+ buf->queued_in_v4l2 = false;
+ buf->queued_in_vb2 = false;
+ mutex_unlock(&ctx->lock);
+}
+
+static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+ struct vb2_v4l2_buffer, vb2_buf);
+ struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
+ struct mtk_video_dec_buf, vb);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ buf->used = false;
+ buf->ready_to_display = false;
+ buf->queued_in_v4l2 = false;
+ } else {
+ buf->lastframe = false;
+ }
+
+ return 0;
+}
+
+static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (ctx->state == MTK_STATE_FLUSH)
+ ctx->state = MTK_STATE_HEADER;
+
+ return 0;
+}
+
+static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct vb2_buffer *src_buf = NULL, *dst_buf = NULL;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
+ ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ if (ctx->state >= MTK_STATE_HEADER) {
+
+ /* Until STREAMOFF is called on the CAPTURE queue
+ * (acknowledging the event), the driver operates
+ * as if the resolution hasn't changed yet, i.e.
+ * VIDIOC_G_FMT< etc. return previous resolution.
+ * So we update picinfo here
+ */
+ ctx->picinfo = ctx->last_decoded_picinfo;
+
+ mtk_v4l2_debug(2,
+ "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+ ctx->id, ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ mtk_vdec_flush_decoder(ctx);
+ }
+ ctx->state = MTK_STATE_FLUSH;
+
+ while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+ vb2_set_plane_payload(dst_buf, 0, 0);
+ vb2_set_plane_payload(dst_buf, 1, 0);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ }
+
+}
+
+static void m2mops_vdec_device_run(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_dev *dev = ctx->dev;
+
+ queue_work(dev->decode_workqueue, &ctx->decode_work);
+}
+
+static int m2mops_vdec_job_ready(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ if (ctx->state == MTK_STATE_ABORT)
+ return 0;
+
+ if ((ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w) ||
+ (ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h))
+ return 0;
+
+ if (ctx->state != MTK_STATE_HEADER)
+ return 0;
+
+ return 1;
+}
+
+static void m2mops_vdec_job_abort(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+
+ ctx->state = MTK_STATE_ABORT;
+}
+
+static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MTK_STATE_HEADER) {
+ ctrl->val = ctx->dpb_size;
+ } else {
+ mtk_v4l2_debug(0, "Seqinfo not ready");
+ ctrl->val = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+ .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
+};
+
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
+ &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+ 0, 32, 1, 1);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control failed %d",
+ ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+ return 0;
+}
+
+static void m2mops_vdec_lock(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mutex_lock(&ctx->dev->dev_mutex);
+}
+
+static void m2mops_vdec_unlock(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mutex_unlock(&ctx->dev->dev_mutex);
+}
+
+const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
+ .device_run = m2mops_vdec_device_run,
+ .job_ready = m2mops_vdec_job_ready,
+ .job_abort = m2mops_vdec_job_abort,
+ .lock = m2mops_vdec_lock,
+ .unlock = m2mops_vdec_unlock,
+};
+
+static const struct vb2_ops mtk_vdec_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .buf_queue = vb2ops_vdec_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+};
+
+const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_qbuf = vidioc_vdec_qbuf,
+ .vidioc_dqbuf = vidioc_vdec_dqbuf,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_vdec_g_fmt,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_vdec_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_vdec_enum_fmt_vid_out_mplane,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_querycap = vidioc_vdec_querycap,
+ .vidioc_subscribe_event = vidioc_vdec_subscribe_evt,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_selection = vidioc_vdec_g_selection,
+ .vidioc_s_selection = vidioc_vdec_s_selection,
+};
+
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ int ret = 0;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+ src_vq->ops = &mtk_vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret) {
+ mtk_v4l2_err("Failed to initialize videobuf2 queue(output)");
+ return ret;
+ }
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+ dst_vq->ops = &mtk_vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ mtk_v4l2_err("Failed to initialize videobuf2 queue(capture)");
+ }
+
+ return ret;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
new file mode 100644
index 000000000000..362f5a85762e
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_H_
+#define _MTK_VCODEC_DEC_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define VCODEC_CAPABILITY_4K_DISABLED 0x10
+#define VCODEC_DEC_4K_CODED_WIDTH 4096U
+#define VCODEC_DEC_4K_CODED_HEIGHT 2304U
+#define MTK_VDEC_MAX_W 2048U
+#define MTK_VDEC_MAX_H 1088U
+
+#define MTK_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000
+
+/**
+ * struct vdec_fb - decoder frame buffer
+ * @base_y : Y plane memory info
+ * @base_c : C plane memory info
+ * @status : frame buffer status (vdec_fb_status)
+ */
+struct vdec_fb {
+ struct mtk_vcodec_mem base_y;
+ struct mtk_vcodec_mem base_c;
+ unsigned int status;
+};
+
+/**
+ * struct mtk_video_dec_buf - Private data related to each VB2 buffer.
+ * @b: VB2 buffer
+ * @list: link list
+ * @used: Capture buffer contain decoded frame data and keep in
+ * codec data structure
+ * @ready_to_display: Capture buffer not display yet
+ * @queued_in_vb2: Capture buffer is queue in vb2
+ * @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
+ * queue yet
+ * @lastframe: Intput buffer is last buffer - EOS
+ * @frame_buffer: Decode status, and buffer information of Capture buffer
+ *
+ * Note : These status information help us track and debug buffer state
+ */
+struct mtk_video_dec_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+
+ bool used;
+ bool ready_to_display;
+ bool queued_in_vb2;
+ bool queued_in_v4l2;
+ bool lastframe;
+ struct vdec_fb frame_buffer;
+};
+
+extern const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops;
+extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
+
+
+/*
+ * mtk_vdec_lock/mtk_vdec_unlock are for ctx instance to
+ * get/release lock before/after access decoder hw.
+ * mtk_vdec_lock get decoder hw lock and set curr_ctx
+ * to ctx instance that get lock
+ */
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx);
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+
+
+#endif /* _MTK_VCODEC_DEC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
new file mode 100644
index 000000000000..d48287c727f4
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+#define VDEC_HW_ACTIVE 0x10
+#define VDEC_IRQ_CFG 0x11
+#define VDEC_IRQ_CLR 0x10
+#define VDEC_IRQ_CFG_REG 0xa4
+
+module_param(mtk_v4l2_dbg_level, int, 0644);
+module_param(mtk_vcodec_dbg, bool, 0644);
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct mtk_vcodec_ctx *ctx)
+{
+ ctx->int_cond = 1;
+ wake_up_interruptible(&ctx->queue);
+}
+
+static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ u32 cg_status = 0;
+ unsigned int dec_done_status = 0;
+ void __iomem *vdec_misc_addr = dev->reg_base[VDEC_MISC] +
+ VDEC_IRQ_CFG_REG;
+
+ ctx = mtk_vcodec_get_curr_ctx(dev);
+
+ /* check if HW active or not */
+ cg_status = readl(dev->reg_base[0]);
+ if ((cg_status & VDEC_HW_ACTIVE) != 0) {
+ mtk_v4l2_err("DEC ISR, VDEC active is not 0x0 (0x%08x)",
+ cg_status);
+ return IRQ_HANDLED;
+ }
+
+ dec_done_status = readl(vdec_misc_addr);
+ ctx->irq_status = dec_done_status;
+ if ((dec_done_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS) !=
+ MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+ return IRQ_HANDLED;
+
+ /* clear interrupt */
+ writel((readl(vdec_misc_addr) | VDEC_IRQ_CFG),
+ dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+ writel((readl(vdec_misc_addr) & ~VDEC_IRQ_CLR),
+ dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+
+ wake_up_ctx(ctx);
+
+ mtk_v4l2_debug(3,
+ "mtk_vcodec_dec_irq_handler :wake up ctx %d, dec_done_status=%x",
+ ctx->id, dec_done_status);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_vcodec_dec_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_err("Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ERROR",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int fops_vcodec_open(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_lock(&dev->dev_mutex);
+ ctx->id = dev->id_counter++;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+ ctx->dev = dev;
+ init_waitqueue_head(&ctx->queue);
+ mutex_init(&ctx->lock);
+
+ ctx->type = MTK_INST_DECODER;
+ ret = mtk_vcodec_dec_ctrls_setup(ctx);
+ if (ret) {
+ mtk_v4l2_err("Failed to setup mt vcodec controls");
+ goto err_ctrls_setup;
+ }
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx,
+ &mtk_vcodec_dec_queue_init);
+ if (IS_ERR((__force void *)ctx->m2m_ctx)) {
+ ret = PTR_ERR((__force void *)ctx->m2m_ctx);
+ mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
+ ret);
+ goto err_m2m_ctx_init;
+ }
+ mtk_vcodec_dec_set_default_params(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh)) {
+ mtk_vcodec_dec_pw_on(&dev->pm);
+ /*
+ * vpu_load_firmware checks if it was loaded already and
+ * does nothing in that case
+ */
+ ret = vpu_load_firmware(dev->vpu_plat_dev);
+ if (ret < 0) {
+ /*
+ * Return 0 if downloading firmware successfully,
+ * otherwise it is failed
+ */
+ mtk_v4l2_err("vpu_load_firmware failed!");
+ goto err_load_fw;
+ }
+
+ dev->dec_capability =
+ vpu_get_vdec_hw_capa(dev->vpu_plat_dev);
+ mtk_v4l2_debug(0, "decoder capability %x", dev->dec_capability);
+ }
+
+ list_add(&ctx->list, &dev->ctx_list);
+
+ mutex_unlock(&dev->dev_mutex);
+ mtk_v4l2_debug(0, "%s decoder [%d]", dev_name(&dev->plat_dev->dev),
+ ctx->id);
+ return ret;
+
+ /* Deinit when failure occurred */
+err_load_fw:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_m2m_ctx_init:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_ctrls_setup:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int fops_vcodec_release(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+
+ mtk_v4l2_debug(0, "[%d] decoder", ctx->id);
+ mutex_lock(&dev->dev_mutex);
+
+ /*
+ * Call v4l2_m2m_ctx_release before mtk_vcodec_dec_release. First, it
+ * makes sure the worker thread is not running after vdec_if_deinit.
+ * Second, the decoder will be flushed and all the buffers will be
+ * returned in stop_streaming.
+ */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ mtk_vcodec_dec_release(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh))
+ mtk_vcodec_dec_pw_off(&dev->pm);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ list_del_init(&ctx->list);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_vcodec_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_vcodec_open,
+ .release = fops_vcodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mtk_vcodec_probe(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev;
+ struct video_device *vfd_dec;
+ struct resource *res;
+ int i, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev->ctx_list);
+ dev->plat_dev = pdev;
+
+ dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
+ if (dev->vpu_plat_dev == NULL) {
+ mtk_v4l2_err("[VPU] vpu device in not ready");
+ return -EPROBE_DEFER;
+ }
+
+ vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_dec_reset_handler,
+ dev, VPU_RST_DEC);
+
+ ret = mtk_vcodec_init_dec_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get mt vcodec clock source");
+ return ret;
+ }
+
+ for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "get memory resource failed.");
+ ret = -ENXIO;
+ goto err_res;
+ }
+ dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((__force void *)dev->reg_base[i])) {
+ ret = PTR_ERR((__force void *)dev->reg_base[i]);
+ goto err_res;
+ }
+ mtk_v4l2_debug(2, "reg[%d] base=%p", i, dev->reg_base[i]);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get irq resource");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ dev->dec_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, dev->dec_irq,
+ mtk_vcodec_dec_irq_handler, 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install dev->dec_irq %d (%d)",
+ dev->dec_irq,
+ ret);
+ goto err_res;
+ }
+
+ disable_irq(dev->dec_irq);
+ mutex_init(&dev->dec_mutex);
+ mutex_init(&dev->dev_mutex);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+ "[/MTK_V4L2_VDEC]");
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ mtk_v4l2_err("v4l2_device_register err=%d", ret);
+ goto err_res;
+ }
+
+ init_waitqueue_head(&dev->queue);
+
+ vfd_dec = video_device_alloc();
+ if (!vfd_dec) {
+ mtk_v4l2_err("Failed to allocate video device");
+ ret = -ENOMEM;
+ goto err_dec_alloc;
+ }
+ vfd_dec->fops = &mtk_vcodec_fops;
+ vfd_dec->ioctl_ops = &mtk_vdec_ioctl_ops;
+ vfd_dec->release = video_device_release;
+ vfd_dec->lock = &dev->dev_mutex;
+ vfd_dec->v4l2_dev = &dev->v4l2_dev;
+ vfd_dec->vfl_dir = VFL_DIR_M2M;
+ vfd_dec->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_STREAMING;
+
+ snprintf(vfd_dec->name, sizeof(vfd_dec->name), "%s",
+ MTK_VCODEC_DEC_NAME);
+ video_set_drvdata(vfd_dec, dev);
+ dev->vfd_dec = vfd_dec;
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev_dec = v4l2_m2m_init(&mtk_vdec_m2m_ops);
+ if (IS_ERR((__force void *)dev->m2m_dev_dec)) {
+ mtk_v4l2_err("Failed to init mem2mem dec device");
+ ret = PTR_ERR((__force void *)dev->m2m_dev_dec);
+ goto err_dec_mem_init;
+ }
+
+ dev->decode_workqueue =
+ alloc_ordered_workqueue(MTK_VCODEC_DEC_NAME,
+ WQ_MEM_RECLAIM | WQ_FREEZABLE);
+ if (!dev->decode_workqueue) {
+ mtk_v4l2_err("Failed to create decode workqueue");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ mtk_v4l2_err("Failed to register video device");
+ goto err_dec_reg;
+ }
+
+ mtk_v4l2_debug(0, "decoder registered as /dev/video%d",
+ vfd_dec->num);
+
+ return 0;
+
+err_dec_reg:
+ destroy_workqueue(dev->decode_workqueue);
+err_event_workq:
+ v4l2_m2m_release(dev->m2m_dev_dec);
+err_dec_mem_init:
+ video_unregister_device(vfd_dec);
+err_dec_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_res:
+ mtk_vcodec_release_dec_pm(dev);
+ return ret;
+}
+
+static const struct of_device_id mtk_vcodec_match[] = {
+ {.compatible = "mediatek,mt8173-vcodec-dec",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_vcodec_match);
+
+static int mtk_vcodec_dec_remove(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+
+ flush_workqueue(dev->decode_workqueue);
+ destroy_workqueue(dev->decode_workqueue);
+ if (dev->m2m_dev_dec)
+ v4l2_m2m_release(dev->m2m_dev_dec);
+
+ if (dev->vfd_dec)
+ video_unregister_device(dev->vfd_dec);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ mtk_vcodec_release_dec_pm(dev);
+ return 0;
+}
+
+static struct platform_driver mtk_vcodec_dec_driver = {
+ .probe = mtk_vcodec_probe,
+ .remove = mtk_vcodec_dec_remove,
+ .driver = {
+ .name = MTK_VCODEC_DEC_NAME,
+ .of_match_table = mtk_vcodec_match,
+ },
+};
+
+module_platform_driver(mtk_vcodec_dec_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec V4L2 decoder driver");
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
new file mode 100644
index 000000000000..79ca03ac449c
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+ struct mtk_vcodec_pm *pm;
+ int ret = 0;
+
+ pdev = mtkdev->plat_dev;
+ pm = &mtkdev->pm;
+ pm->mtkdev = mtkdev;
+ node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0);
+ if (!node) {
+ mtk_v4l2_err("of_parse_phandle mediatek,larb fail!");
+ return -1;
+ }
+
+ pdev = of_find_device_by_node(node);
+ if (WARN_ON(!pdev)) {
+ of_node_put(node);
+ return -1;
+ }
+ pm->larbvdec = &pdev->dev;
+ pdev = mtkdev->plat_dev;
+ pm->dev = &pdev->dev;
+
+ pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll");
+ if (IS_ERR(pm->vcodecpll)) {
+ mtk_v4l2_err("devm_clk_get vcodecpll fail");
+ ret = PTR_ERR(pm->vcodecpll);
+ }
+
+ pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2");
+ if (IS_ERR(pm->univpll_d2)) {
+ mtk_v4l2_err("devm_clk_get univpll_d2 fail");
+ ret = PTR_ERR(pm->univpll_d2);
+ }
+
+ pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel");
+ if (IS_ERR(pm->clk_cci400_sel)) {
+ mtk_v4l2_err("devm_clk_get clk_cci400_sel fail");
+ ret = PTR_ERR(pm->clk_cci400_sel);
+ }
+
+ pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel");
+ if (IS_ERR(pm->vdec_sel)) {
+ mtk_v4l2_err("devm_clk_get vdec_sel fail");
+ ret = PTR_ERR(pm->vdec_sel);
+ }
+
+ pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll");
+ if (IS_ERR(pm->vdecpll)) {
+ mtk_v4l2_err("devm_clk_get vdecpll fail");
+ ret = PTR_ERR(pm->vdecpll);
+ }
+
+ pm->vencpll = devm_clk_get(&pdev->dev, "vencpll");
+ if (IS_ERR(pm->vencpll)) {
+ mtk_v4l2_err("devm_clk_get vencpll fail");
+ ret = PTR_ERR(pm->vencpll);
+ }
+
+ pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
+ if (IS_ERR(pm->venc_lt_sel)) {
+ mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
+ ret = PTR_ERR(pm->venc_lt_sel);
+ }
+
+ pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src");
+ if (IS_ERR(pm->vdec_bus_clk_src)) {
+ mtk_v4l2_err("devm_clk_get vdec_bus_clk_src");
+ ret = PTR_ERR(pm->vdec_bus_clk_src);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ return ret;
+}
+
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
+{
+ pm_runtime_disable(dev->pm.dev);
+}
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_put_sync(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_put_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000);
+ if (ret)
+ mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret);
+
+ ret = clk_set_rate(pm->vencpll, 800 * 1000000);
+ if (ret)
+ mtk_v4l2_err("clk_set_rate vencpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vcodecpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vencpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vdec_bus_clk_src);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->venc_lt_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->univpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret);
+
+ ret = clk_prepare_enable(pm->clk_cci400_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->vdecpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vdec_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->vdec_sel, pm->vdecpll);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret);
+
+ ret = mtk_smi_larb_get(pm->larbvdec);
+ if (ret)
+ mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret);
+
+}
+
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
+{
+ mtk_smi_larb_put(pm->larbvdec);
+ clk_disable_unprepare(pm->vdec_sel);
+ clk_disable_unprepare(pm->vdecpll);
+ clk_disable_unprepare(pm->univpll_d2);
+ clk_disable_unprepare(pm->clk_cci400_sel);
+ clk_disable_unprepare(pm->venc_lt_sel);
+ clk_disable_unprepare(pm->vdec_bus_clk_src);
+ clk_disable_unprepare(pm->vencpll);
+ clk_disable_unprepare(pm->vcodecpll);
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
new file mode 100644
index 000000000000..86a7825353e3
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_PM_H_
+#define _MTK_VCODEC_DEC_PM_H_
+
+#include "mtk_vcodec_drv.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
+
+#endif /* _MTK_VCODEC_DEC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index c8eaa41c00e6..d7eb8ef855d2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -22,13 +22,13 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
-
+#include "mtk_vcodec_util.h"
#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
+#define MTK_VCODEC_DEC_NAME "mtk-vcodec-dec"
#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
#define MTK_PLATFORM_STR "platform:mt8173"
-
#define MTK_VCODEC_MAX_PLANES 3
#define MTK_V4L2_BENCHMARK 0
#define WAIT_INTR_TIMEOUT_MS 1000
@@ -179,6 +179,9 @@ struct mtk_enc_params {
* struct mtk_vcodec_pm - Power management data structure
*/
struct mtk_vcodec_pm {
+ struct clk *vdec_bus_clk_src;
+ struct clk *vencpll;
+
struct clk *vcodecpll;
struct clk *univpll_d2;
struct clk *clk_cci400_sel;
@@ -196,6 +199,32 @@ struct mtk_vcodec_pm {
};
/**
+ * struct vdec_pic_info - picture size information
+ * @pic_w: picture width
+ * @pic_h: picture height
+ * @buf_w: picture buffer width (64 aligned up from pic_w)
+ * @buf_h: picture buffer heiht (64 aligned up from pic_h)
+ * @y_bs_sz: Y bitstream size
+ * @c_bs_sz: CbCr bitstream size
+ * @y_len_sz: additional size required to store decompress information for y
+ * plane
+ * @c_len_sz: additional size required to store decompress information for cbcr
+ * plane
+ * E.g. suppose picture size is 176x144,
+ * buffer size will be aligned to 176x160.
+ */
+struct vdec_pic_info {
+ unsigned int pic_w;
+ unsigned int pic_h;
+ unsigned int buf_w;
+ unsigned int buf_h;
+ unsigned int y_bs_sz;
+ unsigned int c_bs_sz;
+ unsigned int y_len_sz;
+ unsigned int c_len_sz;
+};
+
+/**
* struct mtk_vcodec_ctx - Context (instance) private data.
*
* @type: type of the instance - decoder or encoder
@@ -209,9 +238,12 @@ struct mtk_vcodec_pm {
* @state: state of the context
* @param_change: indicate encode parameter type
* @enc_params: encoding parameters
+ * @dec_if: hooked decoder driver interface
* @enc_if: hoooked encoder driver interface
* @drv_handle: driver handle for specific decode/encode instance
*
+ * @picinfo: store picture info after header parsing
+ * @dpb_size: store dpb count after header parsing
* @int_cond: variable used by the waitqueue
* @int_type: type of the last interrupt
* @queue: waitqueue that can be used to wait for this context to
@@ -219,12 +251,16 @@ struct mtk_vcodec_pm {
* @irq_status: irq status
*
* @ctrl_hdl: handler for v4l2 framework
+ * @decode_work: worker for the decoding
* @encode_work: worker for the encoding
+ * @last_decoded_picinfo: pic information get from latest decode
*
* @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @quantization: enum v4l2_quantization, colorspace quantization
* @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @lock: protect variables accessed by V4L2 threads and worker thread such as
+ * mtk_video_dec_buf.
*/
struct mtk_vcodec_ctx {
enum mtk_instance_type type;
@@ -239,28 +275,40 @@ struct mtk_vcodec_ctx {
enum mtk_encode_param param_change;
struct mtk_enc_params enc_params;
+ const struct vdec_common_if *dec_if;
const struct venc_common_if *enc_if;
unsigned long drv_handle;
+ struct vdec_pic_info picinfo;
+ int dpb_size;
+
int int_cond;
int int_type;
wait_queue_head_t queue;
unsigned int irq_status;
struct v4l2_ctrl_handler ctrl_hdl;
+ struct work_struct decode_work;
struct work_struct encode_work;
+ struct vdec_pic_info last_decoded_picinfo;
enum v4l2_colorspace colorspace;
enum v4l2_ycbcr_encoding ycbcr_enc;
enum v4l2_quantization quantization;
enum v4l2_xfer_func xfer_func;
+
+ int decoded_frame_cnt;
+ struct mutex lock;
+
};
/**
* struct mtk_vcodec_dev - driver data
* @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_dec: Video device for decoder
* @vfd_enc: Video device for encoder.
*
+ * @m2m_dev_dec: m2m device for decoder
* @m2m_dev_enc: m2m device for encoder.
* @plat_dev: platform device
* @vpu_plat_dev: mtk vpu platform device
@@ -271,7 +319,6 @@ struct mtk_vcodec_ctx {
* @reg_base: Mapped address of MTK Vcodec registers.
*
* @id_counter: used to identify current opened instance
- * @num_instances: counter of active MTK Vcodec instances
*
* @encode_workqueue: encode work queue
*
@@ -280,9 +327,11 @@ struct mtk_vcodec_ctx {
* @dev_mutex: video_device lock
* @queue: waitqueue for waiting for completion of device commands
*
+ * @dec_irq: decoder irq resource
* @enc_irq: h264 encoder irq resource
* @enc_lt_irq: vp8 encoder irq resource
*
+ * @dec_mutex: decoder hardware lock
* @enc_mutex: encoder hardware lock.
*
* @pm: power management control
@@ -291,8 +340,10 @@ struct mtk_vcodec_ctx {
*/
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
+ struct video_device *vfd_dec;
struct video_device *vfd_enc;
+ struct v4l2_m2m_dev *m2m_dev_dec;
struct v4l2_m2m_dev *m2m_dev_enc;
struct platform_device *plat_dev;
struct platform_device *vpu_plat_dev;
@@ -302,18 +353,19 @@ struct mtk_vcodec_dev {
void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
unsigned long id_counter;
- int num_instances;
+ struct workqueue_struct *decode_workqueue;
struct workqueue_struct *encode_workqueue;
-
int int_cond;
int int_type;
struct mutex dev_mutex;
wait_queue_head_t queue;
+ int dec_irq;
int enc_irq;
int enc_lt_irq;
+ struct mutex dec_mutex;
struct mutex enc_mutex;
struct mtk_vcodec_pm pm;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 5cd2151431bf..aa81f3ce9463 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -188,7 +188,6 @@ static int fops_vcodec_open(struct file *file)
mtk_v4l2_debug(2, "Create instance [%d]@%p m2m_ctx=%p ",
ctx->id, ctx, ctx->m2m_ctx);
- dev->num_instances++;
list_add(&ctx->list, &dev->ctx_list);
mutex_unlock(&dev->dev_mutex);
@@ -218,18 +217,13 @@ static int fops_vcodec_release(struct file *file)
mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
- /*
- * Call v4l2_m2m_ctx_release to make sure the worker thread is not
- * running after venc_if_deinit.
- */
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
list_del_init(&ctx->list);
- dev->num_instances--;
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
index 52e7e5c9afa0..113b2097f061 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
@@ -30,8 +30,7 @@ int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx, int command,
timeout_jiff = msecs_to_jiffies(timeout_ms);
ret = wait_event_interruptible_timeout(*waitqueue,
- (ctx->int_cond &&
- (ctx->int_type == command)),
+ ctx->int_cond,
timeout_jiff);
if (!ret) {
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index 5e3651372a3c..46768c056193 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -81,14 +81,37 @@ void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
return;
}
- dma_free_coherent(dev, size, mem->va, mem->dma_addr);
- mem->va = NULL;
- mem->dma_addr = 0;
- mem->size = 0;
-
mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
(unsigned long)mem->dma_addr);
mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+
+ dma_free_coherent(dev, size, mem->va, mem->dma_addr);
+ mem->va = NULL;
+ mem->dma_addr = 0;
+ mem->size = 0;
}
EXPORT_SYMBOL(mtk_vcodec_mem_free);
+
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+ struct mtk_vcodec_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->curr_ctx = ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+EXPORT_SYMBOL(mtk_vcodec_set_curr_ctx);
+
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev)
+{
+ unsigned long flags;
+ struct mtk_vcodec_ctx *ctx;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ ctx = dev->curr_ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ctx;
+}
+EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index d6345fc04840..7d55975d3185 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -26,6 +26,7 @@ struct mtk_vcodec_mem {
};
struct mtk_vcodec_ctx;
+struct mtk_vcodec_dev;
extern int mtk_v4l2_dbg_level;
extern bool mtk_vcodec_dbg;
@@ -84,4 +85,8 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_mem *mem);
void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_mem *mem);
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+ struct mtk_vcodec_ctx *ctx);
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev);
+
#endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
new file mode 100644
index 000000000000..57a842ff3097
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+#define NAL_NON_IDR_SLICE 0x01
+#define NAL_IDR_SLICE 0x05
+#define NAL_H264_PPS 0x08
+#define NAL_TYPE(value) ((value) & 0x1F)
+
+#define BUF_PREDICTION_SZ (32 * 1024)
+
+#define MB_UNIT_LEN 16
+
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ 64
+
+#define H264_MAX_FB_NUM 17
+#define HDR_PARSING_BUF_SZ 1024
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct h264_fb {
+ uint64_t vdec_fb_va;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ int32_t poc;
+ uint32_t reserved;
+};
+
+/**
+ * struct h264_ring_fb_list - ring frame buffer list
+ * @fb_list : frame buffer arrary
+ * @read_idx : read index
+ * @write_idx : write index
+ * @count : buffer count in list
+ */
+struct h264_ring_fb_list {
+ struct h264_fb fb_list[H264_MAX_FB_NUM];
+ unsigned int read_idx;
+ unsigned int write_idx;
+ unsigned int count;
+ unsigned int reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
+ * @reserved : for 8 bytes alignment
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+ uint32_t dpb_sz;
+ uint32_t resolution_changed;
+ uint32_t realloc_mv_buf;
+ uint32_t reserved;
+ uint64_t bs_dma;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
+ * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
+ * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp : display frame buffer ring list (AP-R, VPU-W)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ */
+struct vdec_h264_vsi {
+ unsigned char hdr_buf[HDR_PARSING_BUF_SZ];
+ uint64_t pred_buf_dma;
+ uint64_t mv_buf_dma[H264_MAX_FB_NUM];
+ struct h264_ring_fb_list list_free;
+ struct h264_ring_fb_list list_disp;
+ struct vdec_h264_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+};
+
+/**
+ * struct vdec_h264_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to mtk_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf : HW working motion vector buffer
+ * @vpu : VPU instance
+ * @vsi : VPU shared information
+ */
+struct vdec_h264_inst {
+ unsigned int num_nalu;
+ struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_mem pred_buf;
+ struct mtk_vcodec_mem mv_buf[H264_MAX_FB_NUM];
+ struct vdec_vpu_inst vpu;
+ struct vdec_h264_vsi *vsi;
+};
+
+static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
+{
+ return HW_MB_STORE_SZ * (width/MB_UNIT_LEN) * (height/MB_UNIT_LEN);
+}
+
+static int allocate_predication_buf(struct vdec_h264_inst *inst)
+{
+ int err = 0;
+
+ inst->pred_buf.size = BUF_PREDICTION_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ return err;
+ }
+
+ inst->vsi->pred_buf_dma = inst->pred_buf.dma_addr;
+ return 0;
+}
+
+static void free_predication_buf(struct vdec_h264_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = NULL;
+
+ mtk_vcodec_debug_enter(inst);
+
+ inst->vsi->pred_buf_dma = 0;
+ mem = &inst->pred_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+}
+
+static int alloc_mv_buf(struct vdec_h264_inst *inst, struct vdec_pic_info *pic)
+{
+ int i;
+ int err;
+ struct mtk_vcodec_mem *mem = NULL;
+ unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
+
+ for (i = 0; i < H264_MAX_FB_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ mem->size = buf_sz;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate mv buf");
+ return err;
+ }
+ inst->vsi->mv_buf_dma[i] = mem->dma_addr;
+ }
+
+ return 0;
+}
+
+static void free_mv_buf(struct vdec_h264_inst *inst)
+{
+ int i;
+ struct mtk_vcodec_mem *mem = NULL;
+
+ for (i = 0; i < H264_MAX_FB_NUM; i++) {
+ inst->vsi->mv_buf_dma[i] = 0;
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ }
+}
+
+static int check_list_validity(struct vdec_h264_inst *inst, bool disp_list)
+{
+ struct h264_ring_fb_list *list;
+
+ list = disp_list ? &inst->vsi->list_disp : &inst->vsi->list_free;
+
+ if (list->count > H264_MAX_FB_NUM ||
+ list->read_idx >= H264_MAX_FB_NUM ||
+ list->write_idx >= H264_MAX_FB_NUM) {
+ mtk_vcodec_err(inst, "%s list err: cnt=%d r_idx=%d w_idx=%d",
+ disp_list ? "disp" : "free", list->count,
+ list->read_idx, list->write_idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void put_fb_to_free(struct vdec_h264_inst *inst, struct vdec_fb *fb)
+{
+ struct h264_ring_fb_list *list;
+
+ if (fb) {
+ if (check_list_validity(inst, false))
+ return;
+
+ list = &inst->vsi->list_free;
+ if (list->count == H264_MAX_FB_NUM) {
+ mtk_vcodec_err(inst, "[FB] put fb free_list full");
+ return;
+ }
+
+ mtk_vcodec_debug(inst, "[FB] put fb into free_list @(%p, %llx)",
+ fb->base_y.va, (u64)fb->base_y.dma_addr);
+
+ list->fb_list[list->write_idx].vdec_fb_va = (u64)(uintptr_t)fb;
+ list->write_idx = (list->write_idx == H264_MAX_FB_NUM - 1) ?
+ 0 : list->write_idx + 1;
+ list->count++;
+ }
+}
+
+static void get_pic_info(struct vdec_h264_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi->crop.left;
+ cr->top = inst->vsi->crop.top;
+ cr->width = inst->vsi->crop.width;
+ cr->height = inst->vsi->crop.height;
+
+ mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi->dec.dpb_sz;
+ mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_h264_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_h264_inst *inst = NULL;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_H264;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ inst->vsi = (struct vdec_h264_vsi *)inst->vpu.vsi;
+ err = allocate_predication_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+
+ *h_vdec = (unsigned long)inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_deinit(unsigned long h_vdec)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_predication_buf(inst);
+ free_mv_buf(inst);
+
+ kfree(inst);
+}
+
+static int find_start_code(unsigned char *data, unsigned int data_sz)
+{
+ if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
+ return 3;
+
+ if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 &&
+ data[3] == 1)
+ return 4;
+
+ return -1;
+}
+
+static int vdec_h264_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ int nal_start_idx = 0;
+ int err = 0;
+ unsigned int nal_start;
+ unsigned int nal_type;
+ unsigned char *buf;
+ unsigned int buf_sz;
+ unsigned int data[2];
+ uint64_t vdec_fb_va = (u64)(uintptr_t)fb;
+ uint64_t y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ uint64_t c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL)
+ return vpu_dec_reset(vpu);
+
+ buf = (unsigned char *)bs->va;
+ buf_sz = bs->size;
+ nal_start_idx = find_start_code(buf, buf_sz);
+ if (nal_start_idx < 0)
+ goto err_free_fb_out;
+
+ nal_start = buf[nal_start_idx];
+ nal_type = NAL_TYPE(buf[nal_start_idx]);
+ mtk_vcodec_debug(inst, "\n + NALU[%d] type %d +\n", inst->num_nalu,
+ nal_type);
+
+ if (nal_type == NAL_H264_PPS) {
+ buf_sz -= nal_start_idx;
+ if (buf_sz > HDR_PARSING_BUF_SZ) {
+ err = -EILSEQ;
+ goto err_free_fb_out;
+ }
+ memcpy(inst->vsi->hdr_buf, buf + nal_start_idx, buf_sz);
+ }
+
+ inst->vsi->dec.bs_dma = (uint64_t)bs->dma_addr;
+ inst->vsi->dec.y_fb_dma = y_fb_dma;
+ inst->vsi->dec.c_fb_dma = c_fb_dma;
+ inst->vsi->dec.vdec_fb_va = vdec_fb_va;
+
+ data[0] = buf_sz;
+ data[1] = nal_start;
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ *res_chg = inst->vsi->dec.resolution_changed;
+ if (*res_chg) {
+ struct vdec_pic_info pic;
+
+ mtk_vcodec_debug(inst, "- resolution changed -");
+ get_pic_info(inst, &pic);
+
+ if (inst->vsi->dec.realloc_mv_buf) {
+ err = alloc_mv_buf(inst, &pic);
+ if (err)
+ goto err_free_fb_out;
+ }
+ }
+
+ if (nal_type == NAL_NON_IDR_SLICE || nal_type == NAL_IDR_SLICE) {
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+ if (err)
+ goto err_free_fb_out;
+
+ vpu_dec_end(vpu);
+ }
+
+ mtk_vcodec_debug(inst, "\n - NALU[%d] type=%d -\n", inst->num_nalu,
+ nal_type);
+ return 0;
+
+err_free_fb_out:
+ put_fb_to_free(inst, fb);
+ mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ return err;
+}
+
+static void vdec_h264_get_fb(struct vdec_h264_inst *inst,
+ struct h264_ring_fb_list *list,
+ bool disp_list, struct vdec_fb **out_fb)
+{
+ struct vdec_fb *fb;
+
+ if (check_list_validity(inst, disp_list))
+ return;
+
+ if (list->count == 0) {
+ mtk_vcodec_debug(inst, "[FB] there is no %s fb",
+ disp_list ? "disp" : "free");
+ *out_fb = NULL;
+ return;
+ }
+
+ fb = (struct vdec_fb *)
+ (uintptr_t)list->fb_list[list->read_idx].vdec_fb_va;
+ fb->status |= (disp_list ? FB_ST_DISPLAY : FB_ST_FREE);
+
+ *out_fb = fb;
+ mtk_vcodec_debug(inst, "[FB] get %s fb st=%d poc=%d %llx",
+ disp_list ? "disp" : "free",
+ fb->status, list->fb_list[list->read_idx].poc,
+ list->fb_list[list->read_idx].vdec_fb_va);
+
+ list->read_idx = (list->read_idx == H264_MAX_FB_NUM - 1) ?
+ 0 : list->read_idx + 1;
+ list->count--;
+}
+
+static int vdec_h264_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ vdec_h264_get_fb(inst, &inst->vsi->list_disp, true, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ vdec_h264_get_fb(inst, &inst->vsi->list_free, false, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct vdec_common_if vdec_h264_if = {
+ vdec_h264_init,
+ vdec_h264_decode,
+ vdec_h264_get_param,
+ vdec_h264_deinit,
+};
+
+struct vdec_common_if *get_h264_dec_comm_if(void);
+
+struct vdec_common_if *get_h264_dec_comm_if(void)
+{
+ return &vdec_h264_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
new file mode 100644
index 000000000000..6e7a62ae0842
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -0,0 +1,634 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+/* Decoding picture buffer size (3 reference frames plus current frame) */
+#define VP8_DPB_SIZE 4
+
+/* HW working buffer size (bytes) */
+#define VP8_WORKING_BUF_SZ (45 * 4096)
+
+/* HW control register address */
+#define VP8_SEGID_DRAM_ADDR 0x3c
+#define VP8_HW_VLD_ADDR 0x93C
+#define VP8_HW_VLD_VALUE 0x940
+#define VP8_BSASET 0x100
+#define VP8_BSDSET 0x104
+#define VP8_RW_CKEN_SET 0x0
+#define VP8_RW_DCM_CON 0x18
+#define VP8_WO_VLD_SRST 0x108
+#define VP8_RW_MISC_SYS_SEL 0x84
+#define VP8_RW_MISC_SPEC_CON 0xC8
+#define VP8_WO_VLD_SRST 0x108
+#define VP8_RW_VP8_CTRL 0xA4
+#define VP8_RW_MISC_DCM_CON 0xEC
+#define VP8_RW_MISC_SRST 0xF4
+#define VP8_RW_MISC_FUNC_CON 0xCC
+
+#define VP8_MAX_FRM_BUF_NUM 5
+#define VP8_MAX_FRM_BUF_NODE_NUM (VP8_MAX_FRM_BUF_NUM * 2)
+
+/* required buffer size (bytes) to store decode information */
+#define VP8_HW_SEGMENT_DATA_SZ 272
+#define VP8_HW_SEGMENT_UINT 4
+
+#define VP8_DEC_TABLE_PROC_LOOP 96
+#define VP8_DEC_TABLE_UNIT 3
+#define VP8_DEC_TABLE_SZ 300
+#define VP8_DEC_TABLE_OFFSET 2
+#define VP8_DEC_TABLE_RW_UNIT 4
+
+/**
+ * struct vdec_vp8_dec_info - decode misc information
+ * @working_buf_dma : working buffer dma address
+ * @prev_y_dma : previous decoded frame buffer Y plane address
+ * @cur_y_fb_dma : current plane Y frame buffer dma address
+ * @cur_c_fb_dma : current plane C frame buffer dma address
+ * @bs_dma : bitstream dma address
+ * @bs_sz : bitstream size
+ * @resolution_changed: resolution change flag 1 - changed, 0 - not change
+ * @show_frame : display this frame or not
+ * @wait_key_frame : wait key frame coming
+ */
+struct vdec_vp8_dec_info {
+ uint64_t working_buf_dma;
+ uint64_t prev_y_dma;
+ uint64_t cur_y_fb_dma;
+ uint64_t cur_c_fb_dma;
+ uint64_t bs_dma;
+ uint32_t bs_sz;
+ uint32_t resolution_changed;
+ uint32_t show_frame;
+ uint32_t wait_key_frame;
+};
+
+/**
+ * struct vdec_vp8_vsi - VPU shared information
+ * @dec : decoding information
+ * @pic : picture information
+ * @dec_table : decoder coefficient table
+ * @segment_buf : segmentation buffer
+ * @load_data : flag to indicate reload decode data
+ */
+struct vdec_vp8_vsi {
+ struct vdec_vp8_dec_info dec;
+ struct vdec_pic_info pic;
+ uint32_t dec_table[VP8_DEC_TABLE_SZ];
+ uint32_t segment_buf[VP8_HW_SEGMENT_DATA_SZ][VP8_HW_SEGMENT_UINT];
+ uint32_t load_data;
+};
+
+/**
+ * struct vdec_vp8_hw_reg_base - HW register base
+ * @sys : base address for sys
+ * @misc : base address for misc
+ * @ld : base address for ld
+ * @top : base address for top
+ * @cm : base address for cm
+ * @hwd : base address for hwd
+ * @hwb : base address for hwb
+ */
+struct vdec_vp8_hw_reg_base {
+ void __iomem *sys;
+ void __iomem *misc;
+ void __iomem *ld;
+ void __iomem *top;
+ void __iomem *cm;
+ void __iomem *hwd;
+ void __iomem *hwb;
+};
+
+/**
+ * struct vdec_vp8_vpu_inst - VPU instance for VP8 decode
+ * @wq_hd : Wait queue to wait VPU message ack
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not recevie
+ * @failure : VPU execution result status 0 - success, others - fail
+ * @inst_addr : VPU decoder instance address
+ */
+struct vdec_vp8_vpu_inst {
+ wait_queue_head_t wq_hd;
+ int signaled;
+ int failure;
+ uint32_t inst_addr;
+};
+
+/* frame buffer (fb) list
+ * [available_fb_node_list] - decode fb are initialized to 0 and populated in
+ * [fb_use_list] - fb is set after decode and is moved to this list
+ * [fb_free_list] - fb is not needed for reference will be moved from
+ * [fb_use_list] to [fb_free_list] and
+ * once user remove fb from [fb_free_list],
+ * it is circulated back to [available_fb_node_list]
+ * [fb_disp_list] - fb is set after decode and is moved to this list
+ * once user remove fb from [fb_disp_list] it is
+ * circulated back to [available_fb_node_list]
+ */
+
+/**
+ * struct vdec_vp8_inst - VP8 decoder instance
+ * @cur_fb : current frame buffer
+ * @dec_fb : decode frame buffer node
+ * @available_fb_node_list : list to store available frame buffer node
+ * @fb_use_list : list to store frame buffer in use
+ * @fb_free_list : list to store free frame buffer
+ * @fb_disp_list : list to store display ready frame buffer
+ * @working_buf : HW decoder working buffer
+ * @reg_base : HW register base address
+ * @frm_cnt : decode frame count
+ * @ctx : V4L2 context
+ * @dev : platform device
+ * @vpu : VPU instance for decoder
+ * @vsi : VPU share information
+ */
+struct vdec_vp8_inst {
+ struct vdec_fb *cur_fb;
+ struct vdec_fb_node dec_fb[VP8_MAX_FRM_BUF_NODE_NUM];
+ struct list_head available_fb_node_list;
+ struct list_head fb_use_list;
+ struct list_head fb_free_list;
+ struct list_head fb_disp_list;
+ struct mtk_vcodec_mem working_buf;
+ struct vdec_vp8_hw_reg_base reg_base;
+ unsigned int frm_cnt;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_vpu_inst vpu;
+ struct vdec_vp8_vsi *vsi;
+};
+
+static void get_hw_reg_base(struct vdec_vp8_inst *inst)
+{
+ inst->reg_base.top = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_TOP);
+ inst->reg_base.cm = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_CM);
+ inst->reg_base.hwd = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWD);
+ inst->reg_base.sys = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_SYS);
+ inst->reg_base.misc = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_MISC);
+ inst->reg_base.ld = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_LD);
+ inst->reg_base.hwb = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWB);
+}
+
+static void write_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 seg_id_addr;
+ u32 val;
+ void __iomem *cm = inst->reg_base.cm;
+ struct vdec_vp8_vsi *vsi = inst->vsi;
+
+ seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+ for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+ val = (1 << 16) + ((seg_id_addr + i) << 2) + j;
+ writel(val, cm + VP8_HW_VLD_ADDR);
+
+ val = vsi->segment_buf[i][j];
+ writel(val, cm + VP8_HW_VLD_VALUE);
+ }
+ }
+}
+
+static void read_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 seg_id_addr;
+ u32 val;
+ void __iomem *cm = inst->reg_base.cm;
+ struct vdec_vp8_vsi *vsi = inst->vsi;
+
+ seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+ for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+ val = ((seg_id_addr + i) << 2) + j;
+ writel(val, cm + VP8_HW_VLD_ADDR);
+
+ val = readl(cm + VP8_HW_VLD_VALUE);
+ vsi->segment_buf[i][j] = val;
+ }
+ }
+}
+
+/* reset HW and enable HW read/write data function */
+static void enable_hw_rw_function(struct vdec_vp8_inst *inst)
+{
+ u32 val = 0;
+ void __iomem *sys = inst->reg_base.sys;
+ void __iomem *misc = inst->reg_base.misc;
+ void __iomem *ld = inst->reg_base.ld;
+ void __iomem *hwb = inst->reg_base.hwb;
+ void __iomem *hwd = inst->reg_base.hwd;
+
+ writel(0x1, sys + VP8_RW_CKEN_SET);
+ writel(0x101, ld + VP8_WO_VLD_SRST);
+ writel(0x101, hwb + VP8_WO_VLD_SRST);
+
+ writel(1, sys);
+ val = readl(misc + VP8_RW_MISC_SRST);
+ writel((val & 0xFFFFFFFE), misc + VP8_RW_MISC_SRST);
+
+ writel(0x1, misc + VP8_RW_MISC_SYS_SEL);
+ writel(0x17F, misc + VP8_RW_MISC_SPEC_CON);
+ writel(0x71201100, misc + VP8_RW_MISC_FUNC_CON);
+ writel(0x0, ld + VP8_WO_VLD_SRST);
+ writel(0x0, hwb + VP8_WO_VLD_SRST);
+ writel(0x1, sys + VP8_RW_DCM_CON);
+ writel(0x1, misc + VP8_RW_MISC_DCM_CON);
+ writel(0x1, hwd + VP8_RW_VP8_CTRL);
+}
+
+static void store_dec_table(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 addr = 0, val = 0;
+ void __iomem *hwd = inst->reg_base.hwd;
+ u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+
+ for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+ writel(addr, hwd + VP8_BSASET);
+ for (j = 0; j < VP8_DEC_TABLE_UNIT ; j++) {
+ val = *p++;
+ writel(val, hwd + VP8_BSDSET);
+ }
+ addr += VP8_DEC_TABLE_RW_UNIT;
+ }
+}
+
+static void load_dec_table(struct vdec_vp8_inst *inst)
+{
+ int i;
+ u32 addr = 0;
+ u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+ void __iomem *hwd = inst->reg_base.hwd;
+
+ for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+ writel(addr, hwd + VP8_BSASET);
+ /* read total 11 bytes */
+ *p++ = readl(hwd + VP8_BSDSET);
+ *p++ = readl(hwd + VP8_BSDSET);
+ *p++ = readl(hwd + VP8_BSDSET) & 0xFFFFFF;
+ addr += VP8_DEC_TABLE_RW_UNIT;
+ }
+}
+
+static void get_pic_info(struct vdec_vp8_inst *inst, struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void vp8_dec_finish(struct vdec_vp8_inst *inst)
+{
+ struct vdec_fb_node *node;
+ uint64_t prev_y_dma = inst->vsi->dec.prev_y_dma;
+
+ mtk_vcodec_debug(inst, "prev fb base dma=%llx", prev_y_dma);
+
+ /* put last decode ok frame to fb_free_list */
+ if (prev_y_dma != 0) {
+ list_for_each_entry(node, &inst->fb_use_list, list) {
+ struct vdec_fb *fb = (struct vdec_fb *)node->fb;
+
+ if (prev_y_dma == (uint64_t)fb->base_y.dma_addr) {
+ list_move_tail(&node->list,
+ &inst->fb_free_list);
+ break;
+ }
+ }
+ }
+
+ /* available_fb_node_list -> fb_use_list */
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = inst->cur_fb;
+ list_move_tail(&node->list, &inst->fb_use_list);
+
+ /* available_fb_node_list -> fb_disp_list */
+ if (inst->vsi->dec.show_frame) {
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = inst->cur_fb;
+ list_move_tail(&node->list, &inst->fb_disp_list);
+ }
+}
+
+static void move_fb_list_use_to_free(struct vdec_vp8_inst *inst)
+{
+ struct vdec_fb_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+ list_move_tail(&node->list, &inst->fb_free_list);
+}
+
+static void init_list(struct vdec_vp8_inst *inst)
+{
+ int i;
+
+ INIT_LIST_HEAD(&inst->available_fb_node_list);
+ INIT_LIST_HEAD(&inst->fb_use_list);
+ INIT_LIST_HEAD(&inst->fb_free_list);
+ INIT_LIST_HEAD(&inst->fb_disp_list);
+
+ for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+ INIT_LIST_HEAD(&inst->dec_fb[i].list);
+ inst->dec_fb[i].fb = NULL;
+ list_add_tail(&inst->dec_fb[i].list,
+ &inst->available_fb_node_list);
+ }
+}
+
+static void add_fb_to_free_list(struct vdec_vp8_inst *inst, void *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (fb) {
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_free_list);
+ }
+}
+
+static int alloc_working_buf(struct vdec_vp8_inst *inst)
+{
+ int err;
+ struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+ mem->size = VP8_WORKING_BUF_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "Cannot allocate working buffer");
+ return err;
+ }
+
+ inst->vsi->dec.working_buf_dma = (uint64_t)mem->dma_addr;
+ return 0;
+}
+
+static void free_working_buf(struct vdec_vp8_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ inst->vsi->dec.working_buf_dma = 0;
+}
+
+static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_vp8_inst *inst;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_VP8;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_vp8 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ inst->vsi = (struct vdec_vp8_vsi *)inst->vpu.vsi;
+ init_list(inst);
+ err = alloc_working_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ get_hw_reg_base(inst);
+ mtk_vcodec_debug(inst, "VP8 Instance >> %p", inst);
+
+ *h_vdec = (unsigned long)inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static int vdec_vp8_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+ struct vdec_vp8_dec_info *dec = &inst->vsi->dec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ unsigned char *bs_va;
+ unsigned int data;
+ int err = 0;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL) {
+ move_fb_list_use_to_free(inst);
+ return vpu_dec_reset(vpu);
+ }
+
+ y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx fb=%p",
+ inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
+
+ inst->cur_fb = fb;
+ dec->bs_dma = (unsigned long)bs->dma_addr;
+ dec->bs_sz = bs->size;
+ dec->cur_y_fb_dma = y_fb_dma;
+ dec->cur_c_fb_dma = c_fb_dma;
+
+ mtk_vcodec_debug(inst, "\n + FRAME[%d] +\n", inst->frm_cnt);
+
+ write_hw_segmentation_data(inst);
+ enable_hw_rw_function(inst);
+ store_dec_table(inst);
+
+ bs_va = (unsigned char *)bs->va;
+
+ /* retrieve width/hight and scale info from header */
+ data = (*(bs_va + 9) << 24) | (*(bs_va + 8) << 16) |
+ (*(bs_va + 7) << 8) | *(bs_va + 6);
+ err = vpu_dec_start(vpu, &data, 1);
+ if (err) {
+ add_fb_to_free_list(inst, fb);
+ if (dec->wait_key_frame) {
+ mtk_vcodec_debug(inst, "wait key frame !");
+ return 0;
+ }
+
+ goto error;
+ }
+
+ if (dec->resolution_changed) {
+ mtk_vcodec_debug(inst, "- resolution_changed -");
+ *res_chg = true;
+ add_fb_to_free_list(inst, fb);
+ return 0;
+ }
+
+ /* wait decoder done interrupt */
+ mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+
+ if (inst->vsi->load_data)
+ load_dec_table(inst);
+
+ vp8_dec_finish(inst);
+ read_hw_segmentation_data(inst);
+
+ err = vpu_dec_end(vpu);
+ if (err)
+ goto error;
+
+ mtk_vcodec_debug(inst, "\n - FRAME[%d] - show=%d\n", inst->frm_cnt,
+ dec->show_frame);
+ inst->frm_cnt++;
+ *res_chg = false;
+ return 0;
+
+error:
+ mtk_vcodec_err(inst, "\n - FRAME[%d] - err=%d\n", inst->frm_cnt, err);
+ return err;
+}
+
+static void get_disp_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb;
+
+ node = list_first_entry_or_null(&inst->fb_disp_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_DISPLAY;
+ mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ fb = NULL;
+ mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void get_free_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb;
+
+ node = list_first_entry_or_null(&inst->fb_free_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_FREE;
+ mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ fb = NULL;
+ mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void get_crop_info(struct vdec_vp8_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = inst->vsi->pic.pic_w;
+ cr->height = inst->vsi->pic.pic_h;
+ mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp8_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ get_disp_fb(inst, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ get_free_fb(inst, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ *((unsigned int *)out) = VP8_DPB_SIZE;
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vdec_vp8_deinit(unsigned long h_vdec)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_working_buf(inst);
+ kfree(inst);
+}
+
+static struct vdec_common_if vdec_vp8_if = {
+ vdec_vp8_init,
+ vdec_vp8_decode,
+ vdec_vp8_get_param,
+ vdec_vp8_deinit,
+};
+
+struct vdec_common_if *get_vp8_dec_comm_if(void);
+
+struct vdec_common_if *get_vp8_dec_comm_if(void)
+{
+ return &vdec_vp8_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
new file mode 100644
index 000000000000..e91a3b425b0c
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -0,0 +1,967 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Kai-Sean Yang <kai-sean.yang@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_vpu_if.h"
+
+#define VP9_SUPER_FRAME_BS_SZ 64
+#define MAX_VP9_DPB_SIZE 9
+
+#define REFS_PER_FRAME 3
+#define MAX_NUM_REF_FRAMES 8
+#define VP9_MAX_FRM_BUF_NUM 9
+#define VP9_MAX_FRM_BUF_NODE_NUM (VP9_MAX_FRM_BUF_NUM * 2)
+
+/**
+ * struct vp9_dram_buf - contains buffer info for vpu
+ * @va : cpu address
+ * @pa : iova address
+ * @sz : buffer size
+ * @padding : for 64 bytes alignment
+ */
+struct vp9_dram_buf {
+ unsigned long va;
+ unsigned long pa;
+ unsigned int sz;
+ unsigned int padding;
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : frmae buffer
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_fb_info {
+ struct vdec_fb *fb;
+ unsigned int reserved[32];
+};
+
+/**
+ * struct vp9_ref_cnt_buf - contains reference buffer information
+ * @buf : referenced frame buffer
+ * @ref_cnt : referenced frame buffer's reference count.
+ * When reference count=0, remove it from reference list
+ */
+struct vp9_ref_cnt_buf {
+ struct vp9_fb_info buf;
+ unsigned int ref_cnt;
+};
+
+/**
+ * struct vp9_fb_info - contains current frame's reference buffer information
+ * @buf : reference buffer
+ * @idx : reference buffer index to frm_bufs
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_ref_buf {
+ struct vp9_fb_info *buf;
+ unsigned int idx;
+ unsigned int reserved[6];
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : super frame reference frame buffer
+ * @used : this reference frame info entry is used
+ * @padding : for 64 bytes size align
+ */
+struct vp9_sf_ref_fb {
+ struct vdec_fb fb;
+ int used;
+ int padding;
+};
+
+/*
+ * struct vdec_vp9_vsi - shared buffer between host and VPU firmware
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @sf_bs_buf : super frame backup buffer (AP-W, VPU-R)
+ * @sf_ref_fb : record supoer frame reference buffer information
+ * (AP-R/W, VPU-R/W)
+ * @sf_next_ref_fb_idx : next available super frame (AP-W, VPU-R)
+ * @sf_frm_cnt : super frame count, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_offset : super frame offset, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_sz : super frame size, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_idx : current super frame (AP-R, VPU-W)
+ * @sf_init : inform super frame info already parsed by vpu (AP-R, VPU-W)
+ * @fb : capture buffer (AP-W, VPU-R)
+ * @bs : bs buffer (AP-W, VPU-R)
+ * @cur_fb : current show capture buffer (AP-R/W, VPU-R/W)
+ * @pic_w : picture width (AP-R, VPU-W)
+ * @pic_h : picture height (AP-R, VPU-W)
+ * @buf_w : codec width (AP-R, VPU-W)
+ * @buf_h : coded height (AP-R, VPU-W)
+ * @buf_sz_y_bs : ufo compressed y plane size (AP-R, VPU-W)
+ * @buf_sz_c_bs : ufo compressed cbcr plane size (AP-R, VPU-W)
+ * @buf_len_sz_y : size used to store y plane ufo info (AP-R, VPU-W)
+ * @buf_len_sz_c : size used to store cbcr plane ufo info (AP-R, VPU-W)
+
+ * @profile : profile sparsed from vpu (AP-R, VPU-W)
+ * @show_frame : display this frame or not (AP-R, VPU-W)
+ * @show_existing_frame : inform this frame is show existing frame
+ * (AP-R, VPU-W)
+ * @frm_to_show_idx : index to show frame (AP-R, VPU-W)
+
+ * @refresh_frm_flags : indicate when frame need to refine reference count
+ * (AP-R, VPU-W)
+ * @resolution_changed : resolution change in this frame (AP-R, VPU-W)
+
+ * @frm_bufs : maintain reference buffer info (AP-R/W, VPU-R/W)
+ * @ref_frm_map : maintain reference buffer map info (AP-R/W, VPU-R/W)
+ * @new_fb_idx : index to frm_bufs array (AP-R, VPU-W)
+ * @frm_num : decoded frame number, include sub-frame count (AP-R, VPU-W)
+ * @mv_buf : motion vector working buffer (AP-W, VPU-R)
+ * @frm_refs : maintain three reference buffer info (AP-R/W, VPU-R/W)
+ */
+struct vdec_vp9_vsi {
+ unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
+ struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+ int sf_next_ref_fb_idx;
+ unsigned int sf_frm_cnt;
+ unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_idx;
+ unsigned int sf_init;
+ struct vdec_fb fb;
+ struct mtk_vcodec_mem bs;
+ struct vdec_fb cur_fb;
+ unsigned int pic_w;
+ unsigned int pic_h;
+ unsigned int buf_w;
+ unsigned int buf_h;
+ unsigned int buf_sz_y_bs;
+ unsigned int buf_sz_c_bs;
+ unsigned int buf_len_sz_y;
+ unsigned int buf_len_sz_c;
+ unsigned int profile;
+ unsigned int show_frame;
+ unsigned int show_existing_frame;
+ unsigned int frm_to_show_idx;
+ unsigned int refresh_frm_flags;
+ unsigned int resolution_changed;
+
+ struct vp9_ref_cnt_buf frm_bufs[VP9_MAX_FRM_BUF_NUM];
+ int ref_frm_map[MAX_NUM_REF_FRAMES];
+ unsigned int new_fb_idx;
+ unsigned int frm_num;
+ struct vp9_dram_buf mv_buf;
+
+ struct vp9_ref_buf frm_refs[REFS_PER_FRAME];
+};
+
+/*
+ * struct vdec_vp9_inst - vp9 decode instance
+ * @mv_buf : working buffer for mv
+ * @dec_fb : vdec_fb node to link fb to different fb_xxx_list
+ * @available_fb_node_list : current available vdec_fb node
+ * @fb_use_list : current used or referenced vdec_fb
+ * @fb_free_list : current available to free vdec_fb
+ * @fb_disp_list : current available to display vdec_fb
+ * @cur_fb : current frame buffer
+ * @ctx : current decode context
+ * @vpu : vpu instance information
+ * @vsi : shared buffer between host and VPU firmware
+ * @total_frm_cnt : total frame count, it do not include sub-frames in super
+ * frame
+ * @mem : instance memory information
+ */
+struct vdec_vp9_inst {
+ struct mtk_vcodec_mem mv_buf;
+
+ struct vdec_fb_node dec_fb[VP9_MAX_FRM_BUF_NODE_NUM];
+ struct list_head available_fb_node_list;
+ struct list_head fb_use_list;
+ struct list_head fb_free_list;
+ struct list_head fb_disp_list;
+ struct vdec_fb *cur_fb;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_vpu_inst vpu;
+ struct vdec_vp9_vsi *vsi;
+ unsigned int total_frm_cnt;
+ struct mtk_vcodec_mem mem;
+};
+
+static bool vp9_is_sf_ref_fb(struct vdec_vp9_inst *inst, struct vdec_fb *fb)
+{
+ int i;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+ if (fb == &vsi->sf_ref_fb[i].fb)
+ return true;
+ }
+ return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
+ *inst, void *addr)
+{
+ struct vdec_fb *fb = NULL;
+ struct vdec_fb_node *node;
+
+ list_for_each_entry(node, &inst->fb_use_list, list) {
+ fb = (struct vdec_fb *)node->fb;
+ if (fb->base_y.va == addr) {
+ list_move_tail(&node->list,
+ &inst->available_fb_node_list);
+ break;
+ }
+ }
+ return fb;
+}
+
+static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (fb) {
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_free_list);
+ }
+ } else {
+ mtk_vcodec_debug(inst, "No free fb node");
+ }
+}
+
+static void vp9_free_sf_ref_fb(struct vdec_fb *fb)
+{
+ struct vp9_sf_ref_fb *sf_ref_fb =
+ container_of(fb, struct vp9_sf_ref_fb, fb);
+
+ sf_ref_fb->used = 0;
+}
+
+static void vp9_ref_cnt_fb(struct vdec_vp9_inst *inst, int *idx,
+ int new_idx)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ int ref_idx = *idx;
+
+ if (ref_idx >= 0 && vsi->frm_bufs[ref_idx].ref_cnt > 0) {
+ vsi->frm_bufs[ref_idx].ref_cnt--;
+
+ if (vsi->frm_bufs[ref_idx].ref_cnt == 0) {
+ if (!vp9_is_sf_ref_fb(inst,
+ vsi->frm_bufs[ref_idx].buf.fb)) {
+ struct vdec_fb *fb;
+
+ fb = vp9_rm_from_fb_use_list(inst,
+ vsi->frm_bufs[ref_idx].buf.fb->base_y.va);
+ vp9_add_to_fb_free_list(inst, fb);
+ } else
+ vp9_free_sf_ref_fb(
+ vsi->frm_bufs[ref_idx].buf.fb);
+ }
+ }
+
+ *idx = new_idx;
+ vsi->frm_bufs[new_idx].ref_cnt++;
+}
+
+static void vp9_free_all_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+ int i;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+ if (vsi->sf_ref_fb[i].fb.base_y.va) {
+ mtk_vcodec_mem_free(inst->ctx,
+ &vsi->sf_ref_fb[i].fb.base_y);
+ mtk_vcodec_mem_free(inst->ctx,
+ &vsi->sf_ref_fb[i].fb.base_c);
+ vsi->sf_ref_fb[i].used = 0;
+ }
+ }
+}
+
+/* For each sub-frame except the last one, the driver will dynamically
+ * allocate reference buffer by calling vp9_get_sf_ref_fb()
+ * The last sub-frame will use the original fb provided by the
+ * vp9_dec_decode() interface
+ */
+static int vp9_get_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+ int idx;
+ struct mtk_vcodec_mem *mem_basy_y;
+ struct mtk_vcodec_mem *mem_basy_c;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (idx = 0;
+ idx < ARRAY_SIZE(vsi->sf_ref_fb);
+ idx++) {
+ if (vsi->sf_ref_fb[idx].fb.base_y.va &&
+ vsi->sf_ref_fb[idx].used == 0) {
+ return idx;
+ }
+ }
+
+ for (idx = 0;
+ idx < ARRAY_SIZE(vsi->sf_ref_fb);
+ idx++) {
+ if (vsi->sf_ref_fb[idx].fb.base_y.va == NULL)
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(vsi->sf_ref_fb)) {
+ mtk_vcodec_err(inst, "List Full");
+ return -1;
+ }
+
+ mem_basy_y = &vsi->sf_ref_fb[idx].fb.base_y;
+ mem_basy_y->size = vsi->buf_sz_y_bs +
+ vsi->buf_len_sz_y;
+
+ if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_y)) {
+ mtk_vcodec_err(inst, "Cannot allocate sf_ref_buf y_buf");
+ return -1;
+ }
+
+ mem_basy_c = &vsi->sf_ref_fb[idx].fb.base_c;
+ mem_basy_c->size = vsi->buf_sz_c_bs +
+ vsi->buf_len_sz_c;
+
+ if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_c)) {
+ mtk_vcodec_err(inst, "Cannot allocate sf_ref_fb c_buf");
+ return -1;
+ }
+ vsi->sf_ref_fb[idx].used = 0;
+
+ return idx;
+}
+
+static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ int result;
+ struct mtk_vcodec_mem *mem;
+
+ unsigned int max_pic_w;
+ unsigned int max_pic_h;
+
+
+ if (!(inst->ctx->dev->dec_capability &
+ VCODEC_CAPABILITY_4K_DISABLED)) {
+ max_pic_w = VCODEC_DEC_4K_CODED_WIDTH;
+ max_pic_h = VCODEC_DEC_4K_CODED_HEIGHT;
+ } else {
+ max_pic_w = MTK_VDEC_MAX_W;
+ max_pic_h = MTK_VDEC_MAX_H;
+ }
+
+ if ((vsi->pic_w > max_pic_w) ||
+ (vsi->pic_h > max_pic_h)) {
+ mtk_vcodec_err(inst, "Invalid w/h %d/%d",
+ vsi->pic_w, vsi->pic_h);
+ return false;
+ }
+
+ mtk_vcodec_debug(inst, "BUF CHG(%d): w/h/sb_w/sb_h=%d/%d/%d/%d",
+ vsi->resolution_changed,
+ vsi->pic_w,
+ vsi->pic_h,
+ vsi->buf_w,
+ vsi->buf_h);
+
+ mem = &inst->mv_buf;
+
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ mem->size = ((vsi->buf_w / 64) *
+ (vsi->buf_h / 64) + 2) * 36 * 16;
+
+ result = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (result) {
+ mem->size = 0;
+ mtk_vcodec_err(inst, "Cannot allocate mv_buf");
+ return false;
+ }
+ /* Set the va again */
+ vsi->mv_buf.va = (unsigned long)mem->va;
+ vsi->mv_buf.pa = (unsigned long)mem->dma_addr;
+ vsi->mv_buf.sz = (unsigned int)mem->size;
+
+ vp9_free_all_sf_ref_fb(inst);
+ vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+ return true;
+}
+
+static bool vp9_add_to_fb_disp_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (!fb) {
+ mtk_vcodec_err(inst, "fb == NULL");
+ return false;
+ }
+
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_disp_list);
+ } else {
+ mtk_vcodec_err(inst, "No available fb node");
+ return false;
+ }
+
+ return true;
+}
+
+/* If any buffer updating is signaled it should be done here. */
+static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ struct vp9_fb_info *frm_to_show;
+ int ref_index = 0, mask;
+
+ for (mask = vsi->refresh_frm_flags; mask; mask >>= 1) {
+ if (mask & 1)
+ vp9_ref_cnt_fb(inst, &vsi->ref_frm_map[ref_index],
+ vsi->new_fb_idx);
+ ++ref_index;
+ }
+
+ frm_to_show = &vsi->frm_bufs[vsi->new_fb_idx].buf;
+ vsi->frm_bufs[vsi->new_fb_idx].ref_cnt--;
+
+ if (frm_to_show->fb != inst->cur_fb) {
+ /* This frame is show exist frame and no decode output
+ * copy frame data from frm_to_show to current CAPTURE
+ * buffer
+ */
+ if ((frm_to_show->fb != NULL) &&
+ (inst->cur_fb->base_y.size >=
+ frm_to_show->fb->base_y.size)) {
+ memcpy((void *)inst->cur_fb->base_y.va,
+ (void *)frm_to_show->fb->base_y.va,
+ vsi->buf_w *
+ vsi->buf_h);
+ memcpy((void *)inst->cur_fb->base_c.va,
+ (void *)frm_to_show->fb->base_c.va,
+ vsi->buf_w *
+ vsi->buf_h / 2);
+ } else {
+ /* After resolution change case, current CAPTURE buffer
+ * may have less buffer size than frm_to_show buffer
+ * size
+ */
+ if (frm_to_show->fb != NULL)
+ mtk_vcodec_err(inst,
+ "inst->cur_fb->base_y.size=%zu, frm_to_show->fb.base_y.size=%zu",
+ inst->cur_fb->base_y.size,
+ frm_to_show->fb->base_y.size);
+ }
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+ if (vsi->show_frame)
+ vp9_add_to_fb_disp_list(inst, inst->cur_fb);
+ }
+ } else {
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+ if (vsi->show_frame)
+ vp9_add_to_fb_disp_list(inst, frm_to_show->fb);
+ }
+ }
+
+ /* when ref_cnt ==0, move this fb to fb_free_list. v4l2 driver will
+ * clean fb_free_list
+ */
+ if (vsi->frm_bufs[vsi->new_fb_idx].ref_cnt == 0) {
+ if (!vp9_is_sf_ref_fb(
+ inst, vsi->frm_bufs[vsi->new_fb_idx].buf.fb)) {
+ struct vdec_fb *fb;
+
+ fb = vp9_rm_from_fb_use_list(inst,
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb->base_y.va);
+
+ vp9_add_to_fb_free_list(inst, fb);
+ } else {
+ vp9_free_sf_ref_fb(
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb);
+ }
+ }
+
+ /* if this super frame and it is not last sub-frame, get next fb for
+ * sub-frame decode
+ */
+ if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+ vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+}
+
+static bool vp9_wait_dec_end(struct vdec_vp9_inst *inst)
+{
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+
+ if (ctx->irq_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+ return true;
+ else
+ return false;
+}
+
+static struct vdec_vp9_inst *vp9_alloc_inst(struct mtk_vcodec_ctx *ctx)
+{
+ int result;
+ struct mtk_vcodec_mem mem;
+ struct vdec_vp9_inst *inst;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.size = sizeof(struct vdec_vp9_inst);
+ result = mtk_vcodec_mem_alloc(ctx, &mem);
+ if (result)
+ return NULL;
+
+ inst = mem.va;
+ inst->mem = mem;
+
+ return inst;
+}
+
+static void vp9_free_inst(struct vdec_vp9_inst *inst)
+{
+ struct mtk_vcodec_mem mem;
+
+ mem = inst->mem;
+ if (mem.va)
+ mtk_vcodec_mem_free(inst->ctx, &mem);
+}
+
+static bool vp9_decode_end_proc(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ bool ret = false;
+
+ if (!vsi->show_existing_frame) {
+ ret = vp9_wait_dec_end(inst);
+ if (!ret) {
+ mtk_vcodec_err(inst, "Decode failed, Decode Timeout @[%d]",
+ vsi->frm_num);
+ return false;
+ }
+
+ if (vpu_dec_end(&inst->vpu)) {
+ mtk_vcodec_err(inst, "vp9_dec_vpu_end failed");
+ return false;
+ }
+ mtk_vcodec_debug(inst, "Decode Ok @%d (%d/%d)", vsi->frm_num,
+ vsi->pic_w, vsi->pic_h);
+ } else {
+ mtk_vcodec_debug(inst, "Decode Ok @%d (show_existing_frame)",
+ vsi->frm_num);
+ }
+
+ vp9_swap_frm_bufs(inst);
+ vsi->frm_num++;
+ return true;
+}
+
+static bool vp9_is_last_sub_frm(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ if (vsi->sf_frm_cnt <= 0 || vsi->sf_frm_idx == vsi->sf_frm_cnt)
+ return true;
+
+ return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_disp_list(struct vdec_vp9_inst *inst)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb = NULL;
+
+ node = list_first_entry_or_null(&inst->fb_disp_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_DISPLAY;
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+ node->fb, fb->status);
+ } else
+ mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+
+ return fb;
+}
+
+static bool vp9_add_to_fb_use_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (!fb) {
+ mtk_vcodec_debug(inst, "fb == NULL");
+ return false;
+ }
+
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_use_list);
+ } else {
+ mtk_vcodec_err(inst, "No free fb node");
+ return false;
+ }
+ return true;
+}
+
+static void vp9_reset(struct vdec_vp9_inst *inst)
+{
+ struct vdec_fb_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+ list_move_tail(&node->list, &inst->fb_free_list);
+
+ vp9_free_all_sf_ref_fb(inst);
+ inst->vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+ if (vpu_dec_reset(&inst->vpu))
+ mtk_vcodec_err(inst, "vp9_dec_vpu_reset failed");
+
+ /* Set the va again, since vpu_dec_reset will clear mv_buf in vpu */
+ inst->vsi->mv_buf.va = (unsigned long)inst->mv_buf.va;
+ inst->vsi->mv_buf.pa = (unsigned long)inst->mv_buf.dma_addr;
+ inst->vsi->mv_buf.sz = (unsigned long)inst->mv_buf.size;
+}
+
+static void init_all_fb_lists(struct vdec_vp9_inst *inst)
+{
+ int i;
+
+ INIT_LIST_HEAD(&inst->available_fb_node_list);
+ INIT_LIST_HEAD(&inst->fb_use_list);
+ INIT_LIST_HEAD(&inst->fb_free_list);
+ INIT_LIST_HEAD(&inst->fb_disp_list);
+
+ for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+ INIT_LIST_HEAD(&inst->dec_fb[i].list);
+ inst->dec_fb[i].fb = NULL;
+ list_add_tail(&inst->dec_fb[i].list,
+ &inst->available_fb_node_list);
+ }
+}
+
+static void get_pic_info(struct vdec_vp9_inst *inst, struct vdec_pic_info *pic)
+{
+ pic->y_bs_sz = inst->vsi->buf_sz_y_bs;
+ pic->c_bs_sz = inst->vsi->buf_sz_c_bs;
+ pic->y_len_sz = inst->vsi->buf_len_sz_y;
+ pic->c_len_sz = inst->vsi->buf_len_sz_c;
+
+ pic->pic_w = inst->vsi->pic_w;
+ pic->pic_h = inst->vsi->pic_h;
+ pic->buf_w = inst->vsi->buf_w;
+ pic->buf_h = inst->vsi->buf_h;
+
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_disp_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+
+ *out_fb = vp9_rm_from_fb_disp_list(inst);
+ if (*out_fb)
+ (*out_fb)->status |= FB_ST_DISPLAY;
+}
+
+static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb = NULL;
+
+ node = list_first_entry_or_null(&inst->fb_free_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_FREE;
+ mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void vdec_vp9_deinit(unsigned long h_vdec)
+{
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ struct mtk_vcodec_mem *mem;
+ int ret = 0;
+
+ ret = vpu_dec_deinit(&inst->vpu);
+ if (ret)
+ mtk_vcodec_err(inst, "vpu_dec_deinit failed");
+
+ mem = &inst->mv_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ vp9_free_all_sf_ref_fb(inst);
+ vp9_free_inst(inst);
+}
+
+static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_vp9_inst *inst;
+
+ inst = vp9_alloc_inst(ctx);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->total_frm_cnt = 0;
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_VP9;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ if (vpu_dec_init(&inst->vpu)) {
+ mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
+ goto err_deinit_inst;
+ }
+
+ inst->vsi = (struct vdec_vp9_vsi *)inst->vpu.vsi;
+ init_all_fb_lists(inst);
+
+ (*h_vdec) = (unsigned long)inst;
+ return 0;
+
+err_deinit_inst:
+ vp9_free_inst(inst);
+
+ return -EINVAL;
+}
+
+static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ int ret = 0;
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ u32 data[3];
+ int i;
+
+ *res_chg = false;
+
+ if ((bs == NULL) && (fb == NULL)) {
+ mtk_vcodec_debug(inst, "[EOS]");
+ vp9_reset(inst);
+ return ret;
+ }
+
+ if (bs == NULL) {
+ mtk_vcodec_err(inst, "bs == NULL");
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug(inst, "Input BS Size = %zu", bs->size);
+
+ while (1) {
+ struct vdec_fb *cur_fb = NULL;
+
+ data[0] = *((unsigned int *)bs->va);
+ data[1] = *((unsigned int *)(bs->va + 4));
+ data[2] = *((unsigned int *)(bs->va + 8));
+
+ vsi->bs = *bs;
+
+ if (fb)
+ vsi->fb = *fb;
+
+ if (!vsi->sf_init) {
+ unsigned int sf_bs_sz;
+ unsigned int sf_bs_off;
+ unsigned char *sf_bs_src;
+ unsigned char *sf_bs_dst;
+
+ sf_bs_sz = bs->size > VP9_SUPER_FRAME_BS_SZ ?
+ VP9_SUPER_FRAME_BS_SZ : bs->size;
+ sf_bs_off = VP9_SUPER_FRAME_BS_SZ - sf_bs_sz;
+ sf_bs_src = bs->va + bs->size - sf_bs_sz;
+ sf_bs_dst = vsi->sf_bs_buf + sf_bs_off;
+ memcpy(sf_bs_dst, sf_bs_src, sf_bs_sz);
+ } else {
+ if ((vsi->sf_frm_cnt > 0) &&
+ (vsi->sf_frm_idx < vsi->sf_frm_cnt)) {
+ unsigned int idx = vsi->sf_frm_idx;
+
+ memcpy((void *)bs->va,
+ (void *)(bs->va +
+ vsi->sf_frm_offset[idx]),
+ vsi->sf_frm_sz[idx]);
+ }
+ }
+ ret = vpu_dec_start(&inst->vpu, data, 3);
+ if (ret) {
+ mtk_vcodec_err(inst, "vpu_dec_start failed");
+ goto DECODE_ERROR;
+ }
+
+ if (vsi->resolution_changed) {
+ if (!vp9_alloc_work_buf(inst)) {
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+ }
+
+ if (vsi->sf_frm_cnt > 0) {
+ cur_fb = &vsi->sf_ref_fb[vsi->sf_next_ref_fb_idx].fb;
+
+ if (vsi->sf_frm_idx < vsi->sf_frm_cnt)
+ inst->cur_fb = cur_fb;
+ else
+ inst->cur_fb = fb;
+ } else {
+ inst->cur_fb = fb;
+ }
+
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb = inst->cur_fb;
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb))
+ vp9_add_to_fb_use_list(inst, inst->cur_fb);
+
+ mtk_vcodec_debug(inst, "[#pic %d]", vsi->frm_num);
+
+ if (vsi->show_existing_frame)
+ mtk_vcodec_debug(inst,
+ "drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+ if (vsi->show_existing_frame && (vsi->frm_to_show_idx <
+ VP9_MAX_FRM_BUF_NUM)) {
+ mtk_vcodec_err(inst,
+ "Skip Decode drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+ vp9_ref_cnt_fb(inst, &vsi->new_fb_idx,
+ vsi->frm_to_show_idx);
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+
+ /* VPU assign the buffer pointer in its address space,
+ * reassign here
+ */
+ for (i = 0; i < ARRAY_SIZE(vsi->frm_refs); i++) {
+ unsigned int idx = vsi->frm_refs[i].idx;
+
+ vsi->frm_refs[i].buf = &vsi->frm_bufs[idx].buf;
+ }
+
+ if (vsi->resolution_changed) {
+ *res_chg = true;
+ mtk_vcodec_debug(inst, "VDEC_ST_RESOLUTION_CHANGED");
+
+ ret = 0;
+ goto DECODE_ERROR;
+ }
+
+ if (vp9_decode_end_proc(inst) != true) {
+ mtk_vcodec_err(inst, "vp9_decode_end_proc");
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+
+ if (vp9_is_last_sub_frm(inst))
+ break;
+
+ }
+ inst->total_frm_cnt++;
+
+DECODE_ERROR:
+ if (ret < 0)
+ vp9_add_to_fb_free_list(inst, fb);
+
+ return ret;
+}
+
+static void get_crop_info(struct vdec_vp9_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = inst->vsi->pic_w;
+ cr->height = inst->vsi->pic_h;
+ mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d\n",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp9_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ int ret = 0;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ get_disp_fb(inst, out);
+ break;
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ get_free_fb(inst, out);
+ break;
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+ case GET_PARAM_DPB_SIZE:
+ *((unsigned int *)out) = MAX_VP9_DPB_SIZE;
+ break;
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+ default:
+ mtk_vcodec_err(inst, "not supported param type %d", type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct vdec_common_if vdec_vp9_if = {
+ vdec_vp9_init,
+ vdec_vp9_decode,
+ vdec_vp9_get_param,
+ vdec_vp9_deinit,
+};
+
+struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+struct vdec_common_if *get_vp9_dec_comm_if(void)
+{
+ return &vdec_vp9_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
new file mode 100644
index 000000000000..7e4c1a92bbd8
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_BASE_
+#define _VDEC_DRV_BASE_
+
+#include "mtk_vcodec_drv.h"
+
+#include "vdec_drv_if.h"
+
+struct vdec_common_if {
+ /**
+ * (*init)() - initialize decode driver
+ * @ctx : [in] mtk v4l2 context
+ * @h_vdec : [out] driver handle
+ */
+ int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec);
+
+ /**
+ * (*decode)() - trigger decode
+ * @h_vdec : [in] driver handle
+ * @bs : [in] input bitstream
+ * @fb : [in] frame buffer to store decoded frame
+ * @res_chg : [out] resolution change happen
+ */
+ int (*decode)(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg);
+
+ /**
+ * (*get_param)() - get driver's parameter
+ * @h_vdec : [in] driver handle
+ * @type : [in] input parameter type
+ * @out : [out] buffer to store query result
+ */
+ int (*get_param)(unsigned long h_vdec, enum vdec_get_param_type type,
+ void *out);
+
+ /**
+ * (*deinit)() - deinitialize driver.
+ * @h_vdec : [in] driver handle to be deinit
+ */
+ void (*deinit)(unsigned long h_vdec);
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
new file mode 100644
index 000000000000..5ffc468dd910
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec.h"
+#include "vdec_drv_base.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vpu.h"
+
+const struct vdec_common_if *get_h264_dec_comm_if(void);
+const struct vdec_common_if *get_vp8_dec_comm_if(void);
+const struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+{
+ int ret = 0;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_H264:
+ ctx->dec_if = get_h264_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_VP8:
+ ctx->dec_if = get_vp8_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_VP9:
+ ctx->dec_if = get_vp9_dec_comm_if();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mtk_vdec_lock(ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ ret = ctx->dec_if->init(ctx, &ctx->drv_handle);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ int ret = 0;
+
+ if (bs) {
+ if ((bs->dma_addr & 63) != 0) {
+ mtk_v4l2_err("bs dma_addr should 64 byte align");
+ return -EINVAL;
+ }
+ }
+
+ if (fb) {
+ if (((fb->base_y.dma_addr & 511) != 0) ||
+ ((fb->base_c.dma_addr & 511) != 0)) {
+ mtk_v4l2_err("frame buffer dma_addr should 512 byte align");
+ return -EINVAL;
+ }
+ }
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ mtk_vdec_lock(ctx);
+
+ mtk_vcodec_set_curr_ctx(ctx->dev, ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ enable_irq(ctx->dev->dec_irq);
+ ret = ctx->dec_if->decode(ctx->drv_handle, bs, fb, res_chg);
+ disable_irq(ctx->dev->dec_irq);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vcodec_set_curr_ctx(ctx->dev, NULL);
+
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+ void *out)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ mtk_vdec_lock(ctx);
+ ret = ctx->dec_if->get_param(ctx->drv_handle, type, out);
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
+{
+ if (ctx->drv_handle == 0)
+ return;
+
+ mtk_vdec_lock(ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ ctx->dec_if->deinit(ctx->drv_handle);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vdec_unlock(ctx);
+
+ ctx->drv_handle = 0;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
new file mode 100644
index 000000000000..db6b5205ffb1
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_IF_H_
+#define _VDEC_DRV_IF_H_
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_util.h"
+
+
+/**
+ * struct vdec_fb_status - decoder frame buffer status
+ * @FB_ST_NORMAL : initial state
+ * @FB_ST_DISPLAY : frmae buffer is ready to be displayed
+ * @FB_ST_FREE : frame buffer is not used by decoder any more
+ */
+enum vdec_fb_status {
+ FB_ST_NORMAL = 0,
+ FB_ST_DISPLAY = (1 << 0),
+ FB_ST_FREE = (1 << 1)
+};
+
+/* For GET_PARAM_DISP_FRAME_BUFFER and GET_PARAM_FREE_FRAME_BUFFER,
+ * the caller does not own the returned buffer. The buffer will not be
+ * released before vdec_if_deinit.
+ * GET_PARAM_DISP_FRAME_BUFFER : get next displayable frame buffer,
+ * struct vdec_fb**
+ * GET_PARAM_FREE_FRAME_BUFFER : get non-referenced framebuffer, vdec_fb**
+ * GET_PARAM_PIC_INFO : get picture info, struct vdec_pic_info*
+ * GET_PARAM_CROP_INFO : get crop info, struct v4l2_crop*
+ * GET_PARAM_DPB_SIZE : get dpb size, unsigned int*
+ */
+enum vdec_get_param_type {
+ GET_PARAM_DISP_FRAME_BUFFER,
+ GET_PARAM_FREE_FRAME_BUFFER,
+ GET_PARAM_PIC_INFO,
+ GET_PARAM_CROP_INFO,
+ GET_PARAM_DPB_SIZE
+};
+
+/**
+ * struct vdec_fb_node - decoder frame buffer node
+ * @list : list to hold this node
+ * @fb : point to frame buffer (vdec_fb), fb could point to frame buffer and
+ * working buffer this is for maintain buffers in different state
+ */
+struct vdec_fb_node {
+ struct list_head list;
+ struct vdec_fb *fb;
+};
+
+/**
+ * vdec_if_init() - initialize decode driver
+ * @ctx : [in] v4l2 context
+ * @fourcc : [in] video format fourcc, V4L2_PIX_FMT_H264/VP8/VP9..
+ */
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+
+/**
+ * vdec_if_deinit() - deinitialize decode driver
+ * @ctx : [in] v4l2 context
+ *
+ */
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
+
+/**
+ * vdec_if_decode() - trigger decode
+ * @ctx : [in] v4l2 context
+ * @bs : [in] input bitstream
+ * @fb : [in] frame buffer to store decoded frame, when null menas parse
+ * header only
+ * @res_chg : [out] resolution change happens if current bs have different
+ * picture width/height
+ * Note: To flush the decoder when reaching EOF, set input bitstream as NULL.
+ */
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg);
+
+/**
+ * vdec_if_get_param() - get driver's parameter
+ * @ctx : [in] v4l2 context
+ * @type : [in] input parameter type
+ * @out : [out] buffer to store query result
+ */
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+ void *out);
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
new file mode 100644
index 000000000000..5a8a629f4ac9
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_IPI_MSG_H_
+#define _VDEC_IPI_MSG_H_
+
+/**
+ * enum vdec_ipi_msgid - message id between AP and VPU
+ * @AP_IPIMSG_XXX : AP to VPU cmd message id
+ * @VPU_IPIMSG_XXX_ACK : VPU ack AP cmd message id
+ */
+enum vdec_ipi_msgid {
+ AP_IPIMSG_DEC_INIT = 0xA000,
+ AP_IPIMSG_DEC_START = 0xA001,
+ AP_IPIMSG_DEC_END = 0xA002,
+ AP_IPIMSG_DEC_DEINIT = 0xA003,
+ AP_IPIMSG_DEC_RESET = 0xA004,
+
+ VPU_IPIMSG_DEC_INIT_ACK = 0xB000,
+ VPU_IPIMSG_DEC_START_ACK = 0xB001,
+ VPU_IPIMSG_DEC_END_ACK = 0xB002,
+ VPU_IPIMSG_DEC_DEINIT_ACK = 0xB003,
+ VPU_IPIMSG_DEC_RESET_ACK = 0xB004,
+};
+
+/**
+ * struct vdec_ap_ipi_cmd - generic AP to VPU ipi command format
+ * @msg_id : vdec_ipi_msgid
+ * @vpu_inst_addr : VPU decoder instance address
+ */
+struct vdec_ap_ipi_cmd {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct vdec_vpu_ipi_ack - generic VPU to AP ipi command format
+ * @msg_id : vdec_ipi_msgid
+ * @status : VPU exeuction result
+ * @ap_inst_addr : AP video decoder instance address
+ */
+struct vdec_vpu_ipi_ack {
+ uint32_t msg_id;
+ int32_t status;
+ uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_init - for AP_IPIMSG_DEC_INIT
+ * @msg_id : AP_IPIMSG_DEC_INIT
+ * @reserved : Reserved field
+ * @ap_inst_addr : AP video decoder instance address
+ */
+struct vdec_ap_ipi_init {
+ uint32_t msg_id;
+ uint32_t reserved;
+ uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_dec_start - for AP_IPIMSG_DEC_START
+ * @msg_id : AP_IPIMSG_DEC_START
+ * @vpu_inst_addr : VPU decoder instance address
+ * @data : Header info
+ * H264 decoder [0]:buf_sz [1]:nal_start
+ * VP8 decoder [0]:width/height
+ * VP9 decoder [0]:profile, [1][2] width/height
+ * @reserved : Reserved field
+ */
+struct vdec_ap_ipi_dec_start {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+ uint32_t data[3];
+ uint32_t reserved;
+};
+
+/**
+ * struct vdec_vpu_ipi_init_ack - for VPU_IPIMSG_DEC_INIT_ACK
+ * @msg_id : VPU_IPIMSG_DEC_INIT_ACK
+ * @status : VPU exeuction result
+ * @ap_inst_addr : AP vcodec_vpu_inst instance address
+ * @vpu_inst_addr : VPU decoder instance address
+ */
+struct vdec_vpu_ipi_init_ack {
+ uint32_t msg_id;
+ int32_t status;
+ uint64_t ap_inst_addr;
+ uint32_t vpu_inst_addr;
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
new file mode 100644
index 000000000000..5a24c51aebb7
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_ipi_msg.h"
+#include "vdec_vpu_if.h"
+
+static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
+{
+ struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+ (unsigned long)msg->ap_inst_addr;
+
+ mtk_vcodec_debug(vpu, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
+
+ /* mapping VPU address to kernel virtual address */
+ /* the content in vsi is initialized to 0 in VPU */
+ vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+ vpu->inst_addr = msg->vpu_inst_addr;
+
+ mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+}
+
+/*
+ * This function runs in interrupt context and it means there's an IPI MSG
+ * from VPU.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct vdec_vpu_ipi_ack *msg = data;
+ struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+ (unsigned long)msg->ap_inst_addr;
+
+ mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+
+ if (msg->status == 0) {
+ switch (msg->msg_id) {
+ case VPU_IPIMSG_DEC_INIT_ACK:
+ handle_init_ack_msg(data);
+ break;
+
+ case VPU_IPIMSG_DEC_START_ACK:
+ case VPU_IPIMSG_DEC_END_ACK:
+ case VPU_IPIMSG_DEC_DEINIT_ACK:
+ case VPU_IPIMSG_DEC_RESET_ACK:
+ break;
+
+ default:
+ mtk_vcodec_err(vpu, "invalid msg=%X", msg->msg_id);
+ break;
+ }
+ }
+
+ mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
+ vpu->failure = msg->status;
+ vpu->signaled = 1;
+}
+
+static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
+{
+ int err;
+ uint32_t msg_id = *(uint32_t *)msg;
+
+ mtk_vcodec_debug(vpu, "id=%X", msg_id);
+
+ vpu->failure = 0;
+ vpu->signaled = 0;
+
+ err = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+ if (err) {
+ mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
+ vpu->id, msg_id, err);
+ return err;
+ }
+
+ return vpu->failure;
+}
+
+static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
+{
+ struct vdec_ap_ipi_cmd msg;
+ int err = 0;
+
+ mtk_vcodec_debug(vpu, "+ id=%X", msg_id);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = msg_id;
+ msg.vpu_inst_addr = vpu->inst_addr;
+
+ err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
+ return err;
+}
+
+int vpu_dec_init(struct vdec_vpu_inst *vpu)
+{
+ struct vdec_ap_ipi_init msg;
+ int err;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ init_waitqueue_head(&vpu->wq);
+
+ err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
+ if (err != 0) {
+ mtk_vcodec_err(vpu, "vpu_ipi_register fail status=%d", err);
+ return err;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = AP_IPIMSG_DEC_INIT;
+ msg.ap_inst_addr = (unsigned long)vpu;
+
+ mtk_vcodec_debug(vpu, "vdec_inst=%p", vpu);
+
+ err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- ret=%d", err);
+ return err;
+}
+
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
+{
+ struct vdec_ap_ipi_dec_start msg;
+ int i;
+ int err = 0;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ if (len > ARRAY_SIZE(msg.data)) {
+ mtk_vcodec_err(vpu, "invalid len = %d\n", len);
+ return -EINVAL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = AP_IPIMSG_DEC_START;
+ msg.vpu_inst_addr = vpu->inst_addr;
+
+ for (i = 0; i < len; i++)
+ msg.data[i] = data[i];
+
+ err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- ret=%d", err);
+ return err;
+}
+
+int vpu_dec_end(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_END);
+}
+
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_DEINIT);
+}
+
+int vpu_dec_reset(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_RESET);
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
new file mode 100644
index 000000000000..0dc9ed01fffe
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_VPU_IF_H_
+#define _VDEC_VPU_IF_H_
+
+#include "mtk_vpu.h"
+
+/**
+ * struct vdec_vpu_inst - VPU instance for video codec
+ * @ipi_id : ipi id for each decoder
+ * @vsi : driver structure allocated by VPU side and shared to AP side
+ * for control and info share
+ * @failure : VPU execution result status, 0: success, others: fail
+ * @inst_addr : VPU decoder instance address
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not received
+ * @ctx : context for v4l2 layer integration
+ * @dev : platform device of VPU
+ * @wq : wait queue to wait VPU message ack
+ * @handler : ipi handler for each decoder
+ */
+struct vdec_vpu_inst {
+ enum ipi_id id;
+ void *vsi;
+ int32_t failure;
+ uint32_t inst_addr;
+ unsigned int signaled;
+ struct mtk_vcodec_ctx *ctx;
+ struct platform_device *dev;
+ wait_queue_head_t wq;
+ ipi_handler_t handler;
+};
+
+/**
+ * vpu_dec_init - init decoder instance and allocate required resource in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_init(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_start - start decoding, basically the function will be invoked once
+ * every frame.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ * @data: meta data to pass bitstream info to VPU decoder
+ * @len : meta data length
+ */
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len);
+
+/**
+ * vpu_dec_end - end decoding, basically the function will be invoked once
+ * when HW decoding done interrupt received successfully. The
+ * decoder in VPU will continute to do referene frame management
+ * and check if there is a new decoded frame available to display.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ */
+int vpu_dec_end(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_deinit - deinit decoder instance and resource freed in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_reset - reset decoder, use for flush decoder when end of stream or
+ * seek. Remainig non displayed frame will be pushed to display.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_reset(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
+
+#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index c9bf58c97878..463b69c934be 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -134,6 +134,8 @@ struct vpu_wdt {
*
* @signaled: the signal of vpu initialization completed
* @fw_ver: VPU firmware version
+ * @dec_capability: decoder capability which is not used for now and
+ * the value is reserved for future use
* @enc_capability: encoder capability which is not used for now and
* the value is reserved for future use
* @wq: wait queue for VPU initialization status
@@ -141,6 +143,7 @@ struct vpu_wdt {
struct vpu_run {
u32 signaled;
char fw_ver[VPU_FW_VER_LEN];
+ unsigned int dec_capability;
unsigned int enc_capability;
wait_queue_head_t wq;
};
@@ -415,6 +418,14 @@ int vpu_wdt_reg_handler(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+ return vpu->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
+
unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
{
struct mtk_vpu *vpu = platform_get_drvdata(pdev);
@@ -523,9 +534,9 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
int vpu_load_firmware(struct platform_device *pdev)
{
- struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct mtk_vpu *vpu;
struct device *dev = &pdev->dev;
- struct vpu_run *run = &vpu->run;
+ struct vpu_run *run;
const struct firmware *vpu_fw = NULL;
int ret;
@@ -534,6 +545,9 @@ int vpu_load_firmware(struct platform_device *pdev)
return -EINVAL;
}
+ vpu = platform_get_drvdata(pdev);
+ run = &vpu->run;
+
mutex_lock(&vpu->vpu_mutex);
if (vpu->fw_loaded) {
mutex_unlock(&vpu->vpu_mutex);
@@ -600,6 +614,7 @@ static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
vpu->run.signaled = run->signaled;
strncpy(vpu->run.fw_ver, run->fw_ver, VPU_FW_VER_LEN);
+ vpu->run.dec_capability = run->dec_capability;
vpu->run.enc_capability = run->enc_capability;
wake_up_interruptible(&vpu->run.wq);
}
@@ -674,7 +689,7 @@ static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
GFP_KERNEL);
if (!vpu->extmem[fw_type].va) {
dev_err(dev, "Failed to allocate the extended program memory\n");
- return PTR_ERR(vpu->extmem[fw_type].va);
+ return -ENOMEM;
}
/* Disable extend0. Enable extend1 */
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h
index 5ab37f04bdfd..aec0268be3d0 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.h
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h
@@ -31,23 +31,41 @@ typedef void (*ipi_handler_t) (void *data,
* enum ipi_id - the id of inter-processor interrupt
*
* @IPI_VPU_INIT: The interrupt from vpu is to notfiy kernel
- VPU initialization completed.
- IPI_VPU_INIT is sent from VPU when firmware is
- loaded. AP doesn't need to send IPI_VPU_INIT
- command to VPU.
- For other IPI below, AP should send the request
- to VPU to trigger the interrupt.
+ * VPU initialization completed.
+ * IPI_VPU_INIT is sent from VPU when firmware is
+ * loaded. AP doesn't need to send IPI_VPU_INIT
+ * command to VPU.
+ * For other IPI below, AP should send the request
+ * to VPU to trigger the interrupt.
+ * @IPI_VDEC_H264: The interrupt from vpu is to notify kernel to
+ * handle H264 vidoe decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
+ * @IPI_VDEC_VP8: The interrupt from is to notify kernel to
+ * handle VP8 video decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
+ * @IPI_VDEC_VP9: The interrupt from vpu is to notify kernel to
+ * handle VP9 video decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
* @IPI_VENC_H264: The interrupt from vpu is to notify kernel to
- handle H264 video encoder job, and vice versa.
+ * handle H264 video encoder job, and vice versa.
* @IPI_VENC_VP8: The interrupt fro vpu is to notify kernel to
- handle VP8 video encoder job,, and vice versa.
+ * handle VP8 video encoder job,, and vice versa.
+ * @IPI_MDP: The interrupt from vpu is to notify kernel to
+ * handle MDP (Media Data Path) job, and vice versa.
* @IPI_MAX: The maximum IPI number
*/
enum ipi_id {
IPI_VPU_INIT = 0,
+ IPI_VDEC_H264,
+ IPI_VDEC_VP8,
+ IPI_VDEC_VP9,
IPI_VENC_H264,
IPI_VENC_VP8,
+ IPI_MDP,
IPI_MAX,
};
@@ -55,10 +73,14 @@ enum ipi_id {
* enum rst_id - reset id to register reset function for VPU watchdog timeout
*
* @VPU_RST_ENC: encoder reset id
+ * @VPU_RST_DEC: decoder reset id
+ * @VPU_RST_MDP: MDP (Media Data Path) reset id
* @VPU_RST_MAX: maximum reset id
*/
enum rst_id {
VPU_RST_ENC,
+ VPU_RST_DEC,
+ VPU_RST_MDP,
VPU_RST_MAX,
};
@@ -125,6 +147,16 @@ struct platform_device *vpu_get_plat_device(struct platform_device *pdev);
int vpu_wdt_reg_handler(struct platform_device *pdev,
void vpu_wdt_reset_func(void *),
void *priv, enum rst_id id);
+
+/**
+ * vpu_get_vdec_hw_capa - get video decoder hardware capability
+ *
+ * @pdev: VPU platform device
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev);
+
/**
* vpu_get_venc_hw_capa - get video encoder hardware capability
*
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index e68d271b10af..03e47e0f778d 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -724,10 +724,10 @@ static int emmaprp_buf_prepare(struct vb2_buffer *vb)
q_data = get_q_data(ctx, vb->vb2_queue->type);
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
- dprintk(ctx->dev, "%s data will not fit into plane"
- "(%lu < %lu)\n", __func__,
- vb2_plane_size(vb, 0),
- (long)q_data->sizeimage);
+ dprintk(ctx->dev,
+ "%s data will not fit into plane(%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
return -EINVAL;
}
@@ -937,7 +937,7 @@ static int emmaprp_probe(struct platform_device *pdev)
snprintf(vfd->name, sizeof(vfd->name), "%s", emmaprp_videodev.name);
pcdev->vfd = vfd;
v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
- " Device registered as /dev/video%d\n", vfd->num);
+ " Device registered as /dev/video%d\n", vfd->num);
platform_set_drvdata(pdev, pcdev);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index a31b95cb3b09..4d29860d27b4 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -408,8 +408,8 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
"%s enable=%d addr=%pad width=%d\n height=%d color_mode=%d\n"
"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
- "out_height=%d rotation_type=%d screen_width=%d\n",
- __func__, ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
+ "out_height=%d rotation_type=%d screen_width=%d\n", __func__,
+ ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
info.color_mode, info.rotation, info.mirror, info.pos_x,
info.pos_y, info.out_width, info.out_height, info.rotation_type,
info.screen_width);
@@ -791,7 +791,8 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
size, DMA_TO_DEVICE);
if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
- v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "dma_map_single failed\n");
vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
}
@@ -1657,8 +1658,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
/* Turn of the pipeline */
ret = omapvid_apply_changes(vout);
if (ret)
- v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
- " streamoff\n");
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to change mode in streamoff\n");
INIT_LIST_HEAD(&vout->dma_queue);
ret = videobuf_streamoff(&vout->vbq);
@@ -1858,8 +1859,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
vfd = vout->vfd = video_device_alloc();
if (!vfd) {
- printk(KERN_ERR VOUT_NAME ": could not allocate"
- " video device struct\n");
+ printk(KERN_ERR VOUT_NAME
+ ": could not allocate video device struct\n");
v4l2_ctrl_handler_free(hdl);
return -ENOMEM;
}
@@ -1984,16 +1985,17 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
*/
vfd = vout->vfd;
if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
- dev_err(&pdev->dev, ": Could not register "
- "Video for Linux device\n");
+ dev_err(&pdev->dev,
+ ": Could not register Video for Linux device\n");
vfd->minor = -1;
ret = -ENODEV;
goto error2;
}
video_set_drvdata(vfd, vout);
- dev_info(&pdev->dev, ": registered and initialized"
- " video device %d\n", vfd->minor);
+ dev_info(&pdev->dev,
+ ": registered and initialized video device %d\n",
+ vfd->minor);
if (k == (pdev->num_resources - 1))
return 0;
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index b8638e4e1627..92c4e1826356 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -139,8 +139,9 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
(void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
if (ret < 0) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
- " video%d\n", vfd->minor);
+ dev_info(&pdev->dev,
+ ": failed to allocate DMA Channel for video%d\n",
+ vfd->minor);
}
init_waitqueue_head(&vout->vrfb_dma_tx.wait);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 0321d84addc7..084ecf4aa9a4 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -480,8 +480,8 @@ void omap3isp_hist_dma_done(struct isp_device *isp)
omap3isp_stat_pcr_busy(&isp->isp_hist)) {
/* Histogram cannot be enabled in this frame anymore */
atomic_set(&isp->isp_hist.buf_err, 1);
- dev_dbg(isp->dev, "hist: Out of synchronization with "
- "CCDC. Ignoring next buffer.\n");
+ dev_dbg(isp->dev,
+ "hist: Out of synchronization with CCDC. Ignoring next buffer.\n");
}
}
@@ -2117,23 +2117,18 @@ static int isp_of_parse_nodes(struct device *dev,
struct isp_async_subdev *isd;
isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
- if (!isd) {
- of_node_put(node);
- return -ENOMEM;
- }
+ if (!isd)
+ goto error;
notifier->subdevs[notifier->num_subdevs] = &isd->asd;
- if (isp_of_parse_node(dev, node, isd)) {
- of_node_put(node);
- return -EINVAL;
- }
+ if (isp_of_parse_node(dev, node, isd))
+ goto error;
isd->asd.match.of.node = of_graph_get_remote_port_parent(node);
- of_node_put(node);
if (!isd->asd.match.of.node) {
dev_warn(dev, "bad remote port parent\n");
- return -EINVAL;
+ goto error;
}
isd->asd.match_type = V4L2_ASYNC_MATCH_OF;
@@ -2141,6 +2136,10 @@ static int isp_of_parse_nodes(struct device *dev,
}
return notifier->num_subdevs;
+
+error:
+ of_node_put(node);
+ return -EINVAL;
}
static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 882310eb45cc..7207558d722c 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -151,8 +151,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
}
if (lsc_cfg->offset & 3) {
- dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of "
- "4\n");
+ dev_dbg(isp->dev,
+ "CCDC: LSC: Offset must be a multiple of 4\n");
return -EINVAL;
}
@@ -416,8 +416,9 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
return 0;
if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) {
- dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table "
- "need to be supplied\n", __func__);
+ dev_dbg(to_device(ccdc),
+ "%s: Both LSC configuration and table need to be supplied\n",
+ __func__);
return -EINVAL;
}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index f75a1be29d84..7dae2fe0d42d 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -753,8 +753,8 @@ void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
ISPCSI2_PHY_IRQSTATUS);
isp_reg_writel(isp, cpxio1_irqstatus,
csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
- dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ "
- "%x\n", cpxio1_irqstatus);
+ dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ %x\n",
+ cpxio1_irqstatus);
pipe->error = true;
}
@@ -763,13 +763,8 @@ void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) {
- dev_dbg(isp->dev, "CSI2 Err:"
- " OCP:%d,"
- " Short_pack:%d,"
- " ECC:%d,"
- " CPXIO2:%d,"
- " FIFO_OVF:%d,"
- "\n",
+ dev_dbg(isp->dev,
+ "CSI2 Err: OCP:%d, Short_pack:%d, ECC:%d, CPXIO2:%d, FIFO_OVF:%d,\n",
(csi2_irqstatus &
ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0,
(csi2_irqstatus &
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 495447d66cfd..871d4fe09c7f 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -267,8 +267,8 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
int rval;
if (phy->vdd == NULL) {
- dev_err(phy->isp->dev, "Power regulator for CSI PHY not "
- "available\n");
+ dev_err(phy->isp->dev,
+ "Power regulator for CSI PHY not available\n");
return -ENODEV;
}
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index ccaf92f39236..d44626f20ac6 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -304,8 +304,8 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
GFP_KERNEL);
if (!aewb_recover_cfg) {
- dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
- "recover configuration.\n");
+ dev_err(aewb->isp->dev,
+ "AEWB: cannot allocate memory for recover configuration.\n");
return -ENOMEM;
}
@@ -321,8 +321,8 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
- dev_err(aewb->isp->dev, "AEWB: recover configuration is "
- "invalid.\n");
+ dev_err(aewb->isp->dev,
+ "AEWB: recover configuration is invalid.\n");
return -EINVAL;
}
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 92937f7eecef..99bd6cc21d86 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -367,8 +367,8 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
GFP_KERNEL);
if (!af_recover_cfg) {
- dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
- "configuration.\n");
+ dev_err(af->isp->dev,
+ "AF: cannot allocate memory for recover configuration.\n");
return -ENOMEM;
}
@@ -379,8 +379,8 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN;
af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN;
if (h3a_af_validate_params(af, af_recover_cfg)) {
- dev_err(af->isp->dev, "AF: recover configuration is "
- "invalid.\n");
+ dev_err(af->isp->dev,
+ "AF: recover configuration is invalid.\n");
return -EINVAL;
}
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index 7138b043a4aa..a4ed5d140d48 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dmaengine.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -486,27 +485,30 @@ int omap3isp_hist_init(struct isp_device *isp)
hist->isp = isp;
if (HIST_CONFIG_DMA) {
- struct platform_device *pdev = to_platform_device(isp->dev);
- struct resource *res;
- unsigned int sig = 0;
dma_cap_mask_t mask;
+ /*
+ * We need slave capable channel without DMA request line for
+ * reading out the data.
+ * For this we can use dma_request_chan_by_mask() as we are
+ * happy with any channel as long as it is capable of slave
+ * configuration.
+ */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
+ hist->dma_ch = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(hist->dma_ch)) {
+ ret = PTR_ERR(hist->dma_ch);
+ if (ret == -EPROBE_DEFER)
+ return ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- "hist");
- if (res)
- sig = res->start;
-
- hist->dma_ch = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn, &sig, isp->dev, "hist");
- if (!hist->dma_ch)
+ hist->dma_ch = NULL;
dev_warn(isp->dev,
"hist: DMA channel request failed, using PIO\n");
- else
+ } else {
dev_dbg(isp->dev, "hist: using DMA channel %s\n",
dma_chan_name(hist->dma_ch));
+ }
}
hist->ops = &hist_ops;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 1b9217d3b1b6..47cbc7e3d825 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -113,8 +113,9 @@ static int isp_stat_buf_check_magic(struct ispstat *stat,
ret = 0;
if (ret) {
- dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
- "match.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: beginning magic check does not match.\n",
+ stat->subdev.name);
return ret;
}
@@ -122,8 +123,9 @@ static int isp_stat_buf_check_magic(struct ispstat *stat,
for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
w < end; w++) {
if (unlikely(*w != MAGIC_NUM)) {
- dev_dbg(stat->isp->dev, "%s: ending magic check does "
- "not match.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: ending magic check does not match.\n",
+ stat->subdev.name);
return -EINVAL;
}
}
@@ -256,9 +258,9 @@ static void isp_stat_buf_next(struct ispstat *stat)
{
if (unlikely(stat->active_buf))
/* Overwriting unused active buffer */
- dev_dbg(stat->isp->dev, "%s: new buffer requested without "
- "queuing active one.\n",
- stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: new buffer requested without queuing active one.\n",
+ stat->subdev.name);
else
stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
}
@@ -292,8 +294,9 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
return ERR_PTR(-EBUSY);
}
if (isp_stat_buf_check_magic(stat, buf)) {
- dev_dbg(stat->isp->dev, "%s: current buffer has "
- "corrupted data\n.", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: current buffer has corrupted data\n.",
+ stat->subdev.name);
/* Mark empty because it doesn't have valid data. */
buf->empty = 1;
} else {
@@ -307,8 +310,9 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
if (buf->buf_size > data->buf_size) {
- dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
- "not enough.\n", stat->subdev.name);
+ dev_warn(stat->isp->dev,
+ "%s: userspace's buffer size is not enough.\n",
+ stat->subdev.name);
isp_stat_buf_release(stat);
return ERR_PTR(-EINVAL);
}
@@ -531,20 +535,22 @@ int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
mutex_lock(&stat->ioctl_lock);
- dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
- "size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
+ dev_dbg(stat->isp->dev,
+ "%s: configuring module with buffer size=0x%08lx\n",
+ stat->subdev.name, (unsigned long)buf_size);
ret = stat->ops->validate_params(stat, new_conf);
if (ret) {
mutex_unlock(&stat->ioctl_lock);
- dev_dbg(stat->isp->dev, "%s: configuration values are "
- "invalid.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n",
+ stat->subdev.name);
return ret;
}
if (buf_size != user_cfg->buf_size)
- dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
- "request to 0x%08lx\n", stat->subdev.name,
+ dev_dbg(stat->isp->dev,
+ "%s: driver has corrected buffer size request to 0x%08lx\n",
+ stat->subdev.name,
(unsigned long)user_cfg->buf_size);
/*
@@ -595,8 +601,9 @@ int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
/* Module has a valid configuration. */
stat->configured = 1;
- dev_dbg(stat->isp->dev, "%s: module has been successfully "
- "configured.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: module has been successfully configured.\n",
+ stat->subdev.name);
mutex_unlock(&stat->ioctl_lock);
@@ -762,8 +769,8 @@ int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
if (!stat->configured && enable) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
mutex_unlock(&stat->ioctl_lock);
- dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
- "never been successfully configured so far.\n",
+ dev_dbg(stat->isp->dev,
+ "%s: cannot enable module as it's never been successfully configured so far.\n",
stat->subdev.name);
return -EINVAL;
}
@@ -859,8 +866,8 @@ static void __stat_isr(struct ispstat *stat, int from_dma)
if (stat->state == ISPSTAT_ENABLED) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
dev_err(stat->isp->dev,
- "%s: interrupt occurred when module was still "
- "processing a buffer.\n", stat->subdev.name);
+ "%s: interrupt occurred when module was still processing a buffer.\n",
+ stat->subdev.name);
ret = STAT_NO_BUF;
goto out;
} else {
@@ -964,8 +971,9 @@ static void __stat_isr(struct ispstat *stat, int from_dma)
atomic_set(&stat->buf_err, 1);
ret = STAT_NO_BUF;
- dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
- "device is busy.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: cannot process buffer, device is busy.\n",
+ stat->subdev.name);
}
out:
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index c12209c701d3..929006f65cc7 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2125,17 +2125,22 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
pix->bytesperline, pix->height);
pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
v4l2_fill_mbus_format(mf, pix, pcdev->current_fmt->code);
- err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+
+ err = sensor_call(pcdev, core, s_power, 1);
if (err)
goto out;
+ err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+ if (err)
+ goto out_sensor_poweroff;
+
v4l2_fill_pix_format(pix, mf);
pr_info("%s(): colorspace=0x%x pixfmt=0x%x\n",
__func__, pix->colorspace, pix->pixelformat);
err = pxa_camera_init_videobuf2(pcdev);
if (err)
- goto out;
+ goto out_sensor_poweroff;
err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1);
if (err) {
@@ -2146,6 +2151,9 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
"PXA Camera driver attached to camera %s\n",
subdev->name);
}
+
+out_sensor_poweroff:
+ err = sensor_call(pcdev, core, s_power, 0);
out:
mutex_unlock(&pcdev->mlock);
return err;
@@ -2347,8 +2355,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
* Platform hasn't set available data widths. This is bad.
* Warn and use a default.
*/
- dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
- "data widths, using default 10 bit\n");
+ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available data widths, using default 10 bit\n");
pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
}
if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
@@ -2359,8 +2366,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->width_flags |= 1 << 9;
if (!pcdev->mclk) {
dev_warn(&pdev->dev,
- "mclk == 0! Please, fix your platform data. "
- "Using default 20MHz\n");
+ "mclk == 0! Please, fix your platform data. Using default 20MHz\n");
pcdev->mclk = 20000000;
}
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index f3a3f31cdfa9..7146fc5ef168 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -169,6 +169,7 @@ static const struct of_device_id rcar_fcp_of_match[] = {
{ .compatible = "renesas,fcpv" },
{ },
};
+MODULE_DEVICE_TABLE(of, rcar_fcp_of_match);
static struct platform_driver rcar_fcp_platform_driver = {
.probe = rcar_fcp_probe,
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
new file mode 100644
index 000000000000..674cc1309b43
--- /dev/null
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -0,0 +1,2445 @@
+/*
+ * Renesas RCar Fine Display Processor
+ *
+ * Video format converter and frame deinterlacer device.
+ *
+ * Author: Kieran Bingham, <kieran@bingham.xyz>
+ * Copyright (c) 2016 Renesas Electronics Corporation.
+ *
+ * This code is developed and inspired from the vim2m, rcar_jpu,
+ * m2m-deinterlace, and vsp1 drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <media/rcar-fcp.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activate debug info");
+
+/* Minimum and maximum frame width/height */
+#define FDP1_MIN_W 80U
+#define FDP1_MIN_H 80U
+
+#define FDP1_MAX_W 3840U
+#define FDP1_MAX_H 2160U
+
+#define FDP1_MAX_PLANES 3U
+#define FDP1_MAX_STRIDE 8190U
+
+/* Flags that indicate a format can be used for capture/output */
+#define FDP1_CAPTURE BIT(0)
+#define FDP1_OUTPUT BIT(1)
+
+#define DRIVER_NAME "rcar_fdp1"
+
+/* Number of Job's to have available on the processing queue */
+#define FDP1_NUMBER_JOBS 8
+
+#define dprintk(fdp1, fmt, arg...) \
+ v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/*
+ * FDP1 registers and bits
+ */
+
+/* FDP1 start register - Imm */
+#define FD1_CTL_CMD 0x0000
+#define FD1_CTL_CMD_STRCMD BIT(0)
+
+/* Sync generator register - Imm */
+#define FD1_CTL_SGCMD 0x0004
+#define FD1_CTL_SGCMD_SGEN BIT(0)
+
+/* Register set end register - Imm */
+#define FD1_CTL_REGEND 0x0008
+#define FD1_CTL_REGEND_REGEND BIT(0)
+
+/* Channel activation register - Vupdt */
+#define FD1_CTL_CHACT 0x000c
+#define FD1_CTL_CHACT_SMW BIT(9)
+#define FD1_CTL_CHACT_WR BIT(8)
+#define FD1_CTL_CHACT_SMR BIT(3)
+#define FD1_CTL_CHACT_RD2 BIT(2)
+#define FD1_CTL_CHACT_RD1 BIT(1)
+#define FD1_CTL_CHACT_RD0 BIT(0)
+
+/* Operation Mode Register - Vupdt */
+#define FD1_CTL_OPMODE 0x0010
+#define FD1_CTL_OPMODE_PRG BIT(4)
+#define FD1_CTL_OPMODE_VIMD_INTERRUPT (0 << 0)
+#define FD1_CTL_OPMODE_VIMD_BESTEFFORT (1 << 0)
+#define FD1_CTL_OPMODE_VIMD_NOINTERRUPT (2 << 0)
+
+#define FD1_CTL_VPERIOD 0x0014
+#define FD1_CTL_CLKCTRL 0x0018
+#define FD1_CTL_CLKCTRL_CSTP_N BIT(0)
+
+/* Software reset register */
+#define FD1_CTL_SRESET 0x001c
+#define FD1_CTL_SRESET_SRST BIT(0)
+
+/* Control status register (V-update-status) */
+#define FD1_CTL_STATUS 0x0024
+#define FD1_CTL_STATUS_VINT_CNT_MASK GENMASK(31, 16)
+#define FD1_CTL_STATUS_VINT_CNT_SHIFT 16
+#define FD1_CTL_STATUS_SGREGSET BIT(10)
+#define FD1_CTL_STATUS_SGVERR BIT(9)
+#define FD1_CTL_STATUS_SGFREND BIT(8)
+#define FD1_CTL_STATUS_BSY BIT(0)
+
+#define FD1_CTL_VCYCLE_STAT 0x0028
+
+/* Interrupt enable register */
+#define FD1_CTL_IRQENB 0x0038
+/* Interrupt status register */
+#define FD1_CTL_IRQSTA 0x003c
+/* Interrupt control register */
+#define FD1_CTL_IRQFSET 0x0040
+
+/* Common IRQ Bit settings */
+#define FD1_CTL_IRQ_VERE BIT(16)
+#define FD1_CTL_IRQ_VINTE BIT(4)
+#define FD1_CTL_IRQ_FREE BIT(0)
+#define FD1_CTL_IRQ_MASK (FD1_CTL_IRQ_VERE | \
+ FD1_CTL_IRQ_VINTE | \
+ FD1_CTL_IRQ_FREE)
+
+/* RPF */
+#define FD1_RPF_SIZE 0x0060
+#define FD1_RPF_SIZE_MASK GENMASK(12, 0)
+#define FD1_RPF_SIZE_H_SHIFT 16
+#define FD1_RPF_SIZE_V_SHIFT 0
+
+#define FD1_RPF_FORMAT 0x0064
+#define FD1_RPF_FORMAT_CIPM BIT(16)
+#define FD1_RPF_FORMAT_RSPYCS BIT(13)
+#define FD1_RPF_FORMAT_RSPUVS BIT(12)
+#define FD1_RPF_FORMAT_CF BIT(8)
+
+#define FD1_RPF_PSTRIDE 0x0068
+#define FD1_RPF_PSTRIDE_Y_SHIFT 16
+#define FD1_RPF_PSTRIDE_C_SHIFT 0
+
+/* RPF0 Source Component Y Address register */
+#define FD1_RPF0_ADDR_Y 0x006c
+
+/* RPF1 Current Picture Registers */
+#define FD1_RPF1_ADDR_Y 0x0078
+#define FD1_RPF1_ADDR_C0 0x007c
+#define FD1_RPF1_ADDR_C1 0x0080
+
+/* RPF2 next picture register */
+#define FD1_RPF2_ADDR_Y 0x0084
+
+#define FD1_RPF_SMSK_ADDR 0x0090
+#define FD1_RPF_SWAP 0x0094
+
+/* WPF */
+#define FD1_WPF_FORMAT 0x00c0
+#define FD1_WPF_FORMAT_PDV_SHIFT 24
+#define FD1_WPF_FORMAT_FCNL BIT(20)
+#define FD1_WPF_FORMAT_WSPYCS BIT(15)
+#define FD1_WPF_FORMAT_WSPUVS BIT(14)
+#define FD1_WPF_FORMAT_WRTM_601_16 (0 << 9)
+#define FD1_WPF_FORMAT_WRTM_601_0 (1 << 9)
+#define FD1_WPF_FORMAT_WRTM_709_16 (2 << 9)
+#define FD1_WPF_FORMAT_CSC BIT(8)
+
+#define FD1_WPF_RNDCTL 0x00c4
+#define FD1_WPF_RNDCTL_CBRM BIT(28)
+#define FD1_WPF_RNDCTL_CLMD_NOCLIP (0 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_16_235 (1 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_1_254 (2 << 12)
+
+#define FD1_WPF_PSTRIDE 0x00c8
+#define FD1_WPF_PSTRIDE_Y_SHIFT 16
+#define FD1_WPF_PSTRIDE_C_SHIFT 0
+
+/* WPF Destination picture */
+#define FD1_WPF_ADDR_Y 0x00cc
+#define FD1_WPF_ADDR_C0 0x00d0
+#define FD1_WPF_ADDR_C1 0x00d4
+#define FD1_WPF_SWAP 0x00d8
+#define FD1_WPF_SWAP_OSWAP_SHIFT 0
+#define FD1_WPF_SWAP_SSWAP_SHIFT 4
+
+/* WPF/RPF Common */
+#define FD1_RWPF_SWAP_BYTE BIT(0)
+#define FD1_RWPF_SWAP_WORD BIT(1)
+#define FD1_RWPF_SWAP_LWRD BIT(2)
+#define FD1_RWPF_SWAP_LLWD BIT(3)
+
+/* IPC */
+#define FD1_IPC_MODE 0x0100
+#define FD1_IPC_MODE_DLI BIT(8)
+#define FD1_IPC_MODE_DIM_ADAPT2D3D (0 << 0)
+#define FD1_IPC_MODE_DIM_FIXED2D (1 << 0)
+#define FD1_IPC_MODE_DIM_FIXED3D (2 << 0)
+#define FD1_IPC_MODE_DIM_PREVFIELD (3 << 0)
+#define FD1_IPC_MODE_DIM_NEXTFIELD (4 << 0)
+
+#define FD1_IPC_SMSK_THRESH 0x0104
+#define FD1_IPC_SMSK_THRESH_CONST 0x00010002
+
+#define FD1_IPC_COMB_DET 0x0108
+#define FD1_IPC_COMB_DET_CONST 0x00200040
+
+#define FD1_IPC_MOTDEC 0x010c
+#define FD1_IPC_MOTDEC_CONST 0x00008020
+
+/* DLI registers */
+#define FD1_IPC_DLI_BLEND 0x0120
+#define FD1_IPC_DLI_BLEND_CONST 0x0080ff02
+
+#define FD1_IPC_DLI_HGAIN 0x0124
+#define FD1_IPC_DLI_HGAIN_CONST 0x001000ff
+
+#define FD1_IPC_DLI_SPRS 0x0128
+#define FD1_IPC_DLI_SPRS_CONST 0x009004ff
+
+#define FD1_IPC_DLI_ANGLE 0x012c
+#define FD1_IPC_DLI_ANGLE_CONST 0x0004080c
+
+#define FD1_IPC_DLI_ISOPIX0 0x0130
+#define FD1_IPC_DLI_ISOPIX0_CONST 0xff10ff10
+
+#define FD1_IPC_DLI_ISOPIX1 0x0134
+#define FD1_IPC_DLI_ISOPIX1_CONST 0x0000ff10
+
+/* Sensor registers */
+#define FD1_IPC_SENSOR_TH0 0x0140
+#define FD1_IPC_SENSOR_TH0_CONST 0x20208080
+
+#define FD1_IPC_SENSOR_TH1 0x0144
+#define FD1_IPC_SENSOR_TH1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL0 0x0170
+#define FD1_IPC_SENSOR_CTL0_CONST 0x00002201
+
+#define FD1_IPC_SENSOR_CTL1 0x0174
+#define FD1_IPC_SENSOR_CTL1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL2 0x0178
+#define FD1_IPC_SENSOR_CTL2_X_SHIFT 16
+#define FD1_IPC_SENSOR_CTL2_Y_SHIFT 0
+
+#define FD1_IPC_SENSOR_CTL3 0x017c
+#define FD1_IPC_SENSOR_CTL3_0_SHIFT 16
+#define FD1_IPC_SENSOR_CTL3_1_SHIFT 0
+
+/* Line memory pixel number register */
+#define FD1_IPC_LMEM 0x01e0
+#define FD1_IPC_LMEM_LINEAR 1024
+#define FD1_IPC_LMEM_TILE 960
+
+/* Internal Data (HW Version) */
+#define FD1_IP_INTDATA 0x0800
+#define FD1_IP_H3 0x02010101
+#define FD1_IP_M3W 0x02010202
+
+/* LUTs */
+#define FD1_LUT_DIF_ADJ 0x1000
+#define FD1_LUT_SAD_ADJ 0x1400
+#define FD1_LUT_BLD_GAIN 0x1800
+#define FD1_LUT_DIF_GAIN 0x1c00
+#define FD1_LUT_MDET 0x2000
+
+/**
+ * struct fdp1_fmt - The FDP1 internal format data
+ * @fourcc: the fourcc code, to match the V4L2 API
+ * @bpp: bits per pixel per plane
+ * @num_planes: number of planes
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @fmt: 7-bit format code for the fdp1 hardware
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @swap: swap register control
+ * @types: types of queue this format is applicable to
+ */
+struct fdp1_fmt {
+ u32 fourcc;
+ u8 bpp[3];
+ u8 num_planes;
+ u8 hsub;
+ u8 vsub;
+ u8 fmt;
+ bool swap_yc;
+ bool swap_uv;
+ u8 swap;
+ u8 types;
+};
+
+static const struct fdp1_fmt fdp1_formats[] = {
+ /* RGB formats are only supported by the Write Pixel Formatter */
+
+ { V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+
+ /* YUV Formats are supported by Read and Write Pixel Formatters */
+
+ { V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+};
+
+static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
+{
+ return fmt->fmt <= 0x1b; /* Last RGB code */
+}
+
+/*
+ * FDP1 Lookup tables range from 0...255 only
+ *
+ * Each table must be less than 256 entries, and all tables
+ * are padded out to 256 entries by duplicating the last value.
+ */
+static const u8 fdp1_diff_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_sad_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_bld_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_dif_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_mdet[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+};
+
+/* Per-queue, driver-specific private data */
+struct fdp1_q_data {
+ const struct fdp1_fmt *fmt;
+ struct v4l2_pix_format_mplane format;
+
+ unsigned int vsize;
+ unsigned int stride_y;
+ unsigned int stride_c;
+};
+
+static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
+ fmt = &fdp1_formats[i];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+enum fdp1_deint_mode {
+ FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
+ FDP1_ADAPT2D3D,
+ FDP1_FIXED2D,
+ FDP1_FIXED3D,
+ FDP1_PREVFIELD,
+ FDP1_NEXTFIELD,
+};
+
+#define FDP1_DEINT_MODE_USES_NEXT(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_NEXTFIELD)
+
+#define FDP1_DEINT_MODE_USES_PREV(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_PREVFIELD)
+
+/*
+ * FDP1 operates on potentially 3 fields, which are tracked
+ * from the VB buffers using this context structure.
+ * Will always be a field or a full frame, never two fields.
+ */
+struct fdp1_field_buffer {
+ struct vb2_v4l2_buffer *vb;
+ dma_addr_t addrs[3];
+
+ /* Should be NONE:TOP:BOTTOM only */
+ enum v4l2_field field;
+
+ /* Flag to indicate this is the last field in the vb */
+ bool last_field;
+
+ /* Buffer queue lists */
+ struct list_head list;
+};
+
+struct fdp1_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+ struct fdp1_field_buffer fields[2];
+ unsigned int num_fields;
+};
+
+static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
+}
+
+struct fdp1_job {
+ struct fdp1_field_buffer *previous;
+ struct fdp1_field_buffer *active;
+ struct fdp1_field_buffer *next;
+ struct fdp1_field_buffer *dst;
+
+ /* A job can only be on one list at a time */
+ struct list_head list;
+};
+
+struct fdp1_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+ spinlock_t device_process_lock;
+
+ void __iomem *regs;
+ unsigned int irq;
+ struct device *dev;
+
+ /* Job Queues */
+ struct fdp1_job jobs[FDP1_NUMBER_JOBS];
+ struct list_head free_job_list;
+ struct list_head queued_job_list;
+ struct list_head hw_job_list;
+
+ unsigned int clk_rate;
+
+ struct rcar_fcp_device *fcp;
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct fdp1_ctx {
+ struct v4l2_fh fh;
+ struct fdp1_dev *fdp1;
+
+ struct v4l2_ctrl_handler hdl;
+ unsigned int sequence;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ /* Deinterlace processing mode */
+ enum fdp1_deint_mode deint_mode;
+
+ /*
+ * Adaptive 2D/3D mode uses a shared mask
+ * This is allocated at streamon, if the ADAPT2D3D mode
+ * is requested
+ */
+ unsigned int smsk_size;
+ dma_addr_t smsk_addr[2];
+ void *smsk_cpu;
+
+ /* Capture pipeline, can specify an alpha value
+ * for supported formats. 0-255 only
+ */
+ unsigned char alpha;
+
+ /* Source and destination queue data */
+ struct fdp1_q_data out_q; /* HW Source */
+ struct fdp1_q_data cap_q; /* HW Destination */
+
+ /*
+ * Field Queues
+ * Interlaced fields are used on 3 occasions, and tracked in this list.
+ *
+ * V4L2 Buffers are tracked inside the fdp1_buffer
+ * and released when the last 'field' completes
+ */
+ struct list_head fields_queue;
+ unsigned int buffers_queued;
+
+ /*
+ * For de-interlacing we need to track our previous buffer
+ * while preparing our job lists.
+ */
+ struct fdp1_field_buffer *previous;
+};
+
+static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct fdp1_ctx, fh);
+}
+
+static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ else
+ return &ctx->cap_q;
+}
+
+/*
+ * list_remove_job: Take the first item off the specified job list
+ *
+ * Returns: pointer to a job, or NULL if the list is empty.
+ */
+static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
+ struct list_head *list)
+{
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ job = list_first_entry_or_null(list, struct fdp1_job, list);
+ if (job)
+ list_del(&job->list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ return job;
+}
+
+/*
+ * list_add_job: Add a job to the specified job list
+ *
+ * Returns: void - always succeeds
+ */
+static void list_add_job(struct fdp1_dev *fdp1,
+ struct list_head *list,
+ struct fdp1_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ list_add_tail(&job->list, list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+}
+
+static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->free_job_list);
+}
+
+static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ /* Ensure that all residue from previous jobs is gone */
+ memset(job, 0, sizeof(struct fdp1_job));
+
+ list_add_job(fdp1, &fdp1->free_job_list, job);
+}
+
+static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->queued_job_list, job);
+}
+
+static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->queued_job_list);
+}
+
+static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->hw_job_list, job);
+}
+
+static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->hw_job_list);
+}
+
+/*
+ * Buffer lists handling
+ */
+static void fdp1_field_complete(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ /* job->previous may be on the first field */
+ if (!fbuf)
+ return;
+
+ if (fbuf->last_field)
+ v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
+}
+
+static void fdp1_queue_field(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ list_add_tail(&fbuf->list, &ctx->fields_queue);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ ctx->buffers_queued++;
+}
+
+static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ ctx->buffers_queued--;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ if (fbuf)
+ list_del(&fbuf->list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+/*
+ * Return the next field in the queue - or NULL,
+ * without removing the item from the list
+ */
+static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
+{
+ u32 value = ioread32(fdp1->regs + reg);
+
+ if (debug >= 2)
+ dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
+
+ return value;
+}
+
+static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
+{
+ if (debug >= 2)
+ dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
+
+ iowrite32(val, fdp1->regs + reg);
+}
+
+/* IPC registers are to be programmed with constant values */
+static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+
+ fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST, FD1_IPC_SMSK_THRESH);
+ fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST, FD1_IPC_COMB_DET);
+ fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST, FD1_IPC_MOTDEC);
+
+ fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST, FD1_IPC_DLI_BLEND);
+ fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST, FD1_IPC_DLI_HGAIN);
+ fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST, FD1_IPC_DLI_SPRS);
+ fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST, FD1_IPC_DLI_ANGLE);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST, FD1_IPC_DLI_ISOPIX0);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST, FD1_IPC_DLI_ISOPIX1);
+}
+
+
+static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ unsigned int x0, x1;
+ unsigned int hsize = src_q_data->format.width;
+ unsigned int vsize = src_q_data->format.height;
+
+ x0 = hsize / 3;
+ x1 = 2 * hsize / 3;
+
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
+
+ fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
+ ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
+ FD1_IPC_SENSOR_CTL2);
+
+ fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
+ (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
+ FD1_IPC_SENSOR_CTL3);
+}
+
+/*
+ * fdp1_write_lut: Write a padded LUT to the hw
+ *
+ * FDP1 uses constant data for de-interlacing processing,
+ * with large tables. These hardware tables are all 256 bytes
+ * long, however they often contain repeated data at the end.
+ *
+ * The last byte of the table is written to all remaining entries.
+ */
+static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
+ unsigned int len, unsigned int base)
+{
+ unsigned int i;
+ u8 pad;
+
+ /* Tables larger than the hw are clipped */
+ len = min(len, 256u);
+
+ for (i = 0; i < len; i++)
+ fdp1_write(fdp1, lut[i], base + (i*4));
+
+ /* Tables are padded with the last entry */
+ pad = lut[i-1];
+
+ for (; i < 256; i++)
+ fdp1_write(fdp1, pad, base + (i*4));
+}
+
+static void fdp1_set_lut(struct fdp1_dev *fdp1)
+{
+ fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
+ FD1_LUT_DIF_ADJ);
+ fdp1_write_lut(fdp1, fdp1_sad_adj, ARRAY_SIZE(fdp1_sad_adj),
+ FD1_LUT_SAD_ADJ);
+ fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
+ FD1_LUT_BLD_GAIN);
+ fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
+ FD1_LUT_DIF_GAIN);
+ fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
+ FD1_LUT_MDET);
+}
+
+static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 picture_size;
+ u32 pstride;
+ u32 format;
+ u32 smsk_addr;
+
+ struct fdp1_q_data *q_data = &ctx->out_q;
+
+ /* Picture size is common to Source and Destination frames */
+ picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
+ | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
+
+ /* Strides */
+ pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
+
+ /* Format control */
+ format = q_data->fmt->fmt;
+ if (q_data->fmt->swap_yc)
+ format |= FD1_RPF_FORMAT_RSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_RPF_FORMAT_RSPUVS;
+
+ if (job->active->field == V4L2_FIELD_BOTTOM) {
+ format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
+ smsk_addr = ctx->smsk_addr[0];
+ } else {
+ smsk_addr = ctx->smsk_addr[1];
+ }
+
+ /* Deint mode is non-zero when deinterlacing */
+ if (ctx->deint_mode)
+ format |= FD1_RPF_FORMAT_CIPM;
+
+ fdp1_write(fdp1, format, FD1_RPF_FORMAT);
+ fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
+ fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
+ fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
+ fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
+
+ /* Previous Field Channel (CH0) */
+ if (job->previous)
+ fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
+
+ /* Current Field Channel (CH1) */
+ fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
+ fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
+ fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
+
+ /* Next Field Channel (CH2) */
+ if (job->next)
+ fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
+}
+
+static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ struct fdp1_q_data *q_data = &ctx->cap_q;
+ u32 pstride;
+ u32 format;
+ u32 swap;
+ u32 rndctl;
+
+ pstride = q_data->format.plane_fmt[0].bytesperline
+ << FD1_WPF_PSTRIDE_Y_SHIFT;
+
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->format.plane_fmt[1].bytesperline
+ << FD1_WPF_PSTRIDE_C_SHIFT;
+
+ format = q_data->fmt->fmt; /* Output Format Code */
+
+ if (q_data->fmt->swap_yc)
+ format |= FD1_WPF_FORMAT_WSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_WPF_FORMAT_WSPUVS;
+
+ if (fdp1_fmt_is_rgb(q_data->fmt)) {
+ /* Enable Colour Space conversion */
+ format |= FD1_WPF_FORMAT_CSC;
+
+ /* Set WRTM */
+ if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
+ format |= FD1_WPF_FORMAT_WRTM_709_16;
+ else if (src_q_data->format.quantization ==
+ V4L2_QUANTIZATION_FULL_RANGE)
+ format |= FD1_WPF_FORMAT_WRTM_601_0;
+ else
+ format |= FD1_WPF_FORMAT_WRTM_601_16;
+ }
+
+ /* Set an alpha value into the Pad Value */
+ format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
+
+ /* Determine picture rounding and clipping */
+ rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
+ rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
+
+ /* WPF Swap needs both ISWAP and OSWAP setting */
+ swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
+ swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
+
+ fdp1_write(fdp1, format, FD1_WPF_FORMAT);
+ fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
+ fdp1_write(fdp1, swap, FD1_WPF_SWAP);
+ fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
+
+ fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
+ fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
+ fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
+}
+
+static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
+ u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
+ u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
+
+ /* De-interlacing Mode */
+ switch (ctx->deint_mode) {
+ default:
+ case FDP1_PROGRESSIVE:
+ dprintk(fdp1, "Progressive Mode\n");
+ opmode |= FD1_CTL_OPMODE_PRG;
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ break;
+ case FDP1_ADAPT2D3D:
+ dprintk(fdp1, "Adapt2D3D Mode\n");
+ if (ctx->sequence == 0 || ctx->aborting)
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ else
+ ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
+
+ if (ctx->sequence > 1) {
+ channels |= FD1_CTL_CHACT_SMW;
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ }
+
+ if (ctx->sequence > 2)
+ channels |= FD1_CTL_CHACT_SMR;
+
+ break;
+ case FDP1_FIXED3D:
+ dprintk(fdp1, "Fixed 3D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
+ /* Except for first and last frame, enable all channels */
+ if (!(ctx->sequence == 0 || ctx->aborting))
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ break;
+ case FDP1_FIXED2D:
+ dprintk(fdp1, "Fixed 2D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ /* No extra channels enabled */
+ break;
+ case FDP1_PREVFIELD:
+ dprintk(fdp1, "Previous Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
+ channels |= FD1_CTL_CHACT_RD0; /* Previous */
+ break;
+ case FDP1_NEXTFIELD:
+ dprintk(fdp1, "Next Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
+ channels |= FD1_CTL_CHACT_RD2; /* Next */
+ break;
+ }
+
+ fdp1_write(fdp1, channels, FD1_CTL_CHACT);
+ fdp1_write(fdp1, opmode, FD1_CTL_OPMODE);
+ fdp1_write(fdp1, ipcmode, FD1_IPC_MODE);
+}
+
+/*
+ * fdp1_device_process() - Run the hardware
+ *
+ * Configure and start the hardware to generate a single frame
+ * of output given our input parameters.
+ */
+static int fdp1_device_process(struct fdp1_ctx *ctx)
+
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->device_process_lock, flags);
+
+ /* Get a job to process */
+ job = get_queued_job(fdp1);
+ if (!job) {
+ /*
+ * VINT can call us to see if we can queue another job.
+ * If we have no work to do, we simply return.
+ */
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+ return 0;
+ }
+
+ /* First Frame only? ... */
+ fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
+
+ /* Set the mode, and configuration */
+ fdp1_configure_deint_mode(ctx, job);
+
+ /* DLI Static Configuration */
+ fdp1_set_ipc_dli(ctx);
+
+ /* Sensor Configuration */
+ fdp1_set_ipc_sensor(ctx);
+
+ /* Setup the source picture */
+ fdp1_configure_rpf(ctx, job);
+
+ /* Setup the destination picture */
+ fdp1_configure_wpf(ctx, job);
+
+ /* Line Memory Pixel Number Register for linear access */
+ fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
+
+ /* Enable Interrupts */
+ fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
+
+ /* Finally, the Immediate Registers */
+
+ /* This job is now in the HW queue */
+ queue_hw_job(fdp1, job);
+
+ /* Start the command */
+ fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
+
+ /* Registers will update to HW at next VINT */
+ fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
+
+ /* Enable VINT Generator */
+ fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
+
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int fdp1_m2m_job_ready(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ int srcbufs = 1;
+ int dstbufs = 1;
+
+ dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
+ v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
+
+ /* One output buffer is required for each field */
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ dstbufs = 2;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
+ dprintk(ctx->fdp1, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void fdp1_m2m_job_abort(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+
+ dprintk(ctx->fdp1, "+\n");
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+
+ /* Immediate abort sequence */
+ fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
+ fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
+}
+
+/*
+ * fdp1_prepare_job: Prepare and queue a new job for a single action of work
+ *
+ * Prepare the next field, (or frame in progressive) and an output
+ * buffer for the hardware to perform a single operation.
+ */
+static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct fdp1_buffer *fbuf;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned int buffers_required = 1;
+
+ dprintk(fdp1, "+\n");
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
+ buffers_required = 2;
+
+ if (ctx->buffers_queued < buffers_required)
+ return NULL;
+
+ job = fdp1_job_alloc(fdp1);
+ if (!job) {
+ dprintk(fdp1, "No free jobs currently available\n");
+ return NULL;
+ }
+
+ job->active = fdp1_dequeue_field(ctx);
+ if (!job->active) {
+ /* Buffer check should prevent this ever happening */
+ dprintk(fdp1, "No input buffers currently available\n");
+
+ fdp1_job_free(fdp1, job);
+ return NULL;
+ }
+
+ dprintk(fdp1, "+ Buffer en-route...\n");
+
+ /* Source buffers have been prepared on our buffer_queue
+ * Prepare our Output buffer
+ */
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ fbuf = to_fdp1_buffer(vbuf);
+ job->dst = &fbuf->fields[0];
+
+ job->active->vb->sequence = ctx->sequence;
+ job->dst->vb->sequence = ctx->sequence;
+ ctx->sequence++;
+
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
+ job->previous = ctx->previous;
+
+ /* Active buffer becomes the next job's previous buffer */
+ ctx->previous = job->active;
+ }
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
+ /* Must be called after 'active' is dequeued */
+ job->next = fdp1_peek_queued_field(ctx);
+ }
+
+ /* Transfer timestamps and flags from src->dst */
+
+ job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
+
+ job->dst->vb->flags = job->active->vb->flags &
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ /* Ideally, the frame-end function will just 'check' to see
+ * if there are more jobs instead
+ */
+ ctx->translen++;
+
+ /* Finally, Put this job on the processing queue */
+ queue_job(fdp1, job);
+
+ dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
+
+ return job;
+}
+
+/* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
+ *
+ * A single input buffer is taken and serialised into our fdp1_buffer
+ * queue. The queue is then processed to create as many jobs as possible
+ * from our available input.
+ */
+static void fdp1_m2m_device_run(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct vb2_v4l2_buffer *src_vb;
+ struct fdp1_buffer *buf;
+ unsigned int i;
+
+ dprintk(fdp1, "+\n");
+
+ ctx->translen = 0;
+
+ /* Get our incoming buffer of either one or two fields, or one frame */
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ buf = to_fdp1_buffer(src_vb);
+
+ for (i = 0; i < buf->num_fields; i++) {
+ struct fdp1_field_buffer *fbuf = &buf->fields[i];
+
+ fdp1_queue_field(ctx, fbuf);
+ dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
+ i, fbuf->last_field);
+ }
+
+ /* Queue as many jobs as our data provides for */
+ while (fdp1_prepare_job(ctx))
+ ;
+
+ if (ctx->translen == 0) {
+ dprintk(fdp1, "No jobs were processed. M2M action complete\n");
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ /* Kick the job processing action */
+ fdp1_device_process(ctx);
+}
+
+/*
+ * device_frame_end:
+ *
+ * Handles the M2M level after a buffer completion event.
+ */
+static void device_frame_end(struct fdp1_dev *fdp1,
+ enum vb2_buffer_state state)
+{
+ struct fdp1_ctx *ctx;
+ unsigned long flags;
+ struct fdp1_job *job = get_hw_queued_job(fdp1);
+
+ dprintk(fdp1, "+\n");
+
+ ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
+
+ if (ctx == NULL) {
+ v4l2_err(&fdp1->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ ctx->num_processed++;
+
+ /*
+ * fdp1_field_complete will call buf_done only when the last vb2_buffer
+ * reference is complete
+ */
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(job->dst->vb, state);
+ job->dst = NULL;
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ /* Move this job back to the free job list */
+ fdp1_job_free(fdp1, job);
+
+ dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
+ ctx->num_processed, ctx->translen);
+
+ if (ctx->num_processed == ctx->translen ||
+ ctx->aborting) {
+ dprintk(ctx->fdp1, "Finishing transaction\n");
+ ctx->num_processed = 0;
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ /*
+ * For pipelined performance support, this would
+ * be called from a VINT handler
+ */
+ fdp1_device_process(ctx);
+ }
+}
+
+/*
+ * video ioctls
+ */
+static int fdp1_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DRIVER_NAME);
+ return 0;
+}
+
+static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, num;
+
+ num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
+ if (fdp1_formats[i].types & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ /* Format not found */
+ if (i >= ARRAY_SIZE(fdp1_formats))
+ return -EINVAL;
+
+ /* Format found */
+ f->pixelformat = fdp1_formats[i].fourcc;
+
+ return 0;
+}
+
+static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_CAPTURE);
+}
+
+static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_OUTPUT);
+}
+
+static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_q_data *q_data;
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix_mp = q_data->format;
+
+ return 0;
+}
+
+static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
+ const struct fdp1_fmt *fmt)
+{
+ unsigned int i;
+
+ /* Compute and clamp the stride and image size. */
+ for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
+ unsigned int hsub = i > 0 ? fmt->hsub : 1;
+ unsigned int vsub = i > 0 ? fmt->vsub : 1;
+ /* From VSP : TODO: Confirm alignment limits for FDP1 */
+ unsigned int align = 128;
+ unsigned int bpl;
+
+ bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+ pix->width / hsub * fmt->bpp[i] / 8,
+ round_down(FDP1_MAX_STRIDE, align));
+
+ pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+ pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+ * pix->height / vsub;
+
+ memset(pix->plane_fmt[i].reserved, 0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+
+ if (fmt->num_planes == 3) {
+ /* The two chroma planes must have the same stride. */
+ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+ pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+
+ memset(pix->plane_fmt[2].reserved, 0,
+ sizeof(pix->plane_fmt[2].reserved));
+ }
+}
+
+static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int width;
+ unsigned int height;
+
+ /* Validate the pixel format to ensure the output queue supports it. */
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || !(fmt->types & FDP1_OUTPUT))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+
+ /*
+ * Progressive video and all interlaced field orders are acceptable.
+ * Default to V4L2_FIELD_INTERLACED.
+ */
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ !V4L2_FIELD_HAS_BOTH(pix->field))
+ pix->field = V4L2_FIELD_INTERLACED;
+
+ /*
+ * The deinterlacer doesn't care about the colorspace, accept all values
+ * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
+ * at the output of the deinterlacer supports a subset of encodings and
+ * quantization methods and will only be available when the colorspace
+ * allows it.
+ */
+ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ /*
+ * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
+ * them to the supported frame size range. The height boundary are
+ * related to the full frame, divide them by two when the format passes
+ * fields in separate buffers.
+ */
+ width = round_down(pix->width, fmt->hsub);
+ pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
+
+ height = round_down(pix->height, fmt->vsub);
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
+ else
+ pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ struct fdp1_q_data *src_data = &ctx->out_q;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ const struct fdp1_fmt *fmt;
+ bool allow_rgb;
+
+ /*
+ * Validate the pixel format. We can only accept RGB output formats if
+ * the input encoding and quantization are compatible with the format
+ * conversions supported by the hardware. The supported combinations are
+ *
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
+ * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
+ */
+ colorspace = src_data->format.colorspace;
+
+ ycbcr_enc = src_data->format.ycbcr_enc;
+ if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+ quantization = src_data->format.quantization;
+ if (quantization == V4L2_QUANTIZATION_DEFAULT)
+ quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
+ ycbcr_enc);
+
+ allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
+ (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE);
+
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+ pix->field = V4L2_FIELD_NONE;
+
+ /*
+ * The colorspace on the capture queue is copied from the output queue
+ * as the hardware can't change the colorspace. It can convert YCbCr to
+ * RGB though, in which case the encoding and quantization are set to
+ * default values as anything else wouldn't make sense.
+ */
+ pix->colorspace = src_data->format.colorspace;
+ pix->xfer_func = src_data->format.xfer_func;
+
+ if (fdp1_fmt_is_rgb(fmt)) {
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ } else {
+ pix->ycbcr_enc = src_data->format.ycbcr_enc;
+ pix->quantization = src_data->format.quantization;
+ }
+
+ /*
+ * The frame width is identical to the output queue, and the height is
+ * either doubled or identical depending on whether the output queue
+ * field order contains one or two fields per frame.
+ */
+ pix->width = src_data->format.width;
+ if (src_data->format.field == V4L2_FIELD_ALTERNATE)
+ pix->height = 2 * src_data->format.height;
+ else
+ pix->height = src_data->format.height;
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
+ else
+ fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
+
+ dprintk(ctx->fdp1, "Try %s format: %4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static void fdp1_set_format(struct fdp1_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix,
+ enum v4l2_buf_type type)
+{
+ struct fdp1_q_data *q_data = get_q_data(ctx, type);
+ const struct fdp1_fmt *fmtinfo;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, &fmtinfo, pix);
+ else
+ fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
+
+ q_data->fmt = fmtinfo;
+ q_data->format = *pix;
+
+ q_data->vsize = pix->height;
+ if (pix->field != V4L2_FIELD_NONE)
+ q_data->vsize /= 2;
+
+ q_data->stride_y = pix->plane_fmt[0].bytesperline;
+ q_data->stride_c = pix->plane_fmt[1].bytesperline;
+
+ /* Adjust strides for interleaved buffers */
+ if (pix->field == V4L2_FIELD_INTERLACED ||
+ pix->field == V4L2_FIELD_INTERLACED_TB ||
+ pix->field == V4L2_FIELD_INTERLACED_BT) {
+ q_data->stride_y *= 2;
+ q_data->stride_c *= 2;
+ }
+
+ /* Propagate the format from the output node to the capture node. */
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct fdp1_q_data *dst_data = &ctx->cap_q;
+
+ /*
+ * Copy the format, clear the per-plane bytes per line and image
+ * size, override the field and double the height if needed.
+ */
+ dst_data->format = q_data->format;
+ memset(dst_data->format.plane_fmt, 0,
+ sizeof(dst_data->format.plane_fmt));
+
+ dst_data->format.field = V4L2_FIELD_NONE;
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ dst_data->format.height *= 2;
+
+ fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
+
+ dst_data->vsize = dst_data->format.height;
+ dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
+ dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
+ }
+}
+
+static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
+
+ dprintk(ctx->fdp1, "Set %s format: %4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ ctrl->val = 2;
+ else
+ ctrl->val = 1;
+ return 0;
+ }
+
+ return 1;
+}
+
+static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->alpha = ctrl->val;
+ break;
+
+ case V4L2_CID_DEINTERLACING_MODE:
+ ctx->deint_mode = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
+ .s_ctrl = fdp1_s_ctrl,
+ .g_volatile_ctrl = fdp1_g_ctrl,
+};
+
+static const char * const fdp1_ctrl_deint_menu[] = {
+ "Progressive",
+ "Adaptive 2D/3D",
+ "Fixed 2D",
+ "Fixed 3D",
+ "Previous field",
+ "Next field",
+ NULL
+};
+
+static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
+ .vidioc_querycap = fdp1_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = fdp1_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = fdp1_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = fdp1_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = fdp1_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = fdp1_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int fdp1_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fdp1_q_data *q_data;
+ unsigned int i;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ if (*nplanes) {
+ if (*nplanes > FDP1_MAX_PLANES)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = q_data->format.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
+ struct vb2_v4l2_buffer *vbuf,
+ unsigned int field_num)
+{
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
+ unsigned int num_fields;
+ unsigned int i;
+
+ num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+
+ fbuf->vb = vbuf;
+ fbuf->last_field = (field_num + 1) == num_fields;
+
+ for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
+ fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
+
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ /*
+ * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
+ * top-bottom for 50Hz. As TV standards are not applicable to
+ * the mem-to-mem API, use the height as a heuristic.
+ */
+ fbuf->field = (q_data->format.height < 576) == field_num
+ ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_SEQ_TB:
+ fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_BT:
+ fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ default:
+ fbuf->field = vbuf->field;
+ break;
+ }
+
+ /* Buffer is completed */
+ if (!field_num)
+ return;
+
+ /* Adjust buffer addresses for second field */
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] +=
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] += q_data->vsize *
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ }
+}
+
+static int fdp1_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ unsigned int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ bool field_valid = true;
+
+ /* Validate the buffer field. */
+ switch (q_data->format.field) {
+ case V4L2_FIELD_NONE:
+ if (vbuf->field != V4L2_FIELD_NONE)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_ALTERNATE:
+ if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ if (vbuf->field != q_data->format.field)
+ field_valid = false;
+ break;
+ }
+
+ if (!field_valid) {
+ dprintk(ctx->fdp1,
+ "buffer field %u invalid for format field %u\n",
+ vbuf->field, q_data->format.field);
+ return -EINVAL;
+ }
+ } else {
+ vbuf->field = V4L2_FIELD_NONE;
+ }
+
+ /* Validate the planes sizes. */
+ for (i = 0; i < q_data->format.num_planes; i++) {
+ unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dprintk(ctx->fdp1,
+ "data will not fit into plane [%u/%u] (%lu < %lu)\n",
+ i, q_data->format.num_planes,
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ /* We have known size formats all around */
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+ for (i = 0; i < buf->num_fields; ++i)
+ fdp1_buf_prepare_field(q_data, vbuf, i);
+
+ return 0;
+}
+
+static void fdp1_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * Force our deint_mode when we are progressive,
+ * ignoring any setting on the device from the user,
+ * Otherwise, lock in the requested de-interlace mode.
+ */
+ if (q_data->format.field == V4L2_FIELD_NONE)
+ ctx->deint_mode = FDP1_PROGRESSIVE;
+
+ if (ctx->deint_mode == FDP1_ADAPT2D3D) {
+ u32 stride;
+ dma_addr_t smsk_base;
+ const u32 bpp = 2; /* bytes per pixel */
+
+ stride = round_up(q_data->format.width, 8);
+
+ ctx->smsk_size = bpp * stride * q_data->vsize;
+
+ ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
+ ctx->smsk_size, &smsk_base, GFP_KERNEL);
+
+ if (ctx->smsk_cpu == NULL) {
+ dprintk(ctx->fdp1, "Failed to alloc smsk\n");
+ return -ENOMEM;
+ }
+
+ ctx->smsk_addr[0] = smsk_base;
+ ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
+ }
+ }
+
+ return 0;
+}
+
+static void fdp1_stop_streaming(struct vb2_queue *q)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned long flags;
+
+ while (1) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ break;
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+ }
+
+ /* Empty Output queues */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /* Empty our internal queues */
+ struct fdp1_field_buffer *fbuf;
+
+ /* Free any queued buffers */
+ fbuf = fdp1_dequeue_field(ctx);
+ while (fbuf != NULL) {
+ fdp1_field_complete(ctx, fbuf);
+ fbuf = fdp1_dequeue_field(ctx);
+ }
+
+ /* Free smsk_data */
+ if (ctx->smsk_cpu) {
+ dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
+ ctx->smsk_cpu, ctx->smsk_addr[0]);
+ ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
+ ctx->smsk_cpu = NULL;
+ }
+
+ WARN(!list_empty(&ctx->fields_queue),
+ "Buffer queue not empty");
+ } else {
+ /* Empty Capture queues (Jobs) */
+ struct fdp1_job *job;
+
+ job = get_queued_job(ctx->fdp1);
+ while (job) {
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
+ job->dst = NULL;
+
+ job = get_queued_job(ctx->fdp1);
+ }
+
+ /* Free any held buffer in the ctx */
+ fdp1_field_complete(ctx, ctx->previous);
+
+ WARN(!list_empty(&ctx->fdp1->queued_job_list),
+ "Queued Job List not empty");
+
+ WARN(!list_empty(&ctx->fdp1->hw_job_list),
+ "HW Job list not empty");
+ }
+}
+
+static struct vb2_ops fdp1_qops = {
+ .queue_setup = fdp1_queue_setup,
+ .buf_prepare = fdp1_buf_prepare,
+ .buf_queue = fdp1_buf_queue,
+ .start_streaming = fdp1_start_streaming,
+ .stop_streaming = fdp1_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fdp1_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ src_vq->ops = &fdp1_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fdp1->dev_mutex;
+ src_vq->dev = ctx->fdp1->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ dst_vq->ops = &fdp1_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fdp1->dev_mutex;
+ dst_vq->dev = ctx->fdp1->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int fdp1_open(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct v4l2_pix_format_mplane format;
+ struct fdp1_ctx *ctx = NULL;
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&fdp1->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->fdp1 = fdp1;
+
+ /* Initialise Queues */
+ INIT_LIST_HEAD(&ctx->fields_queue);
+
+ ctx->translen = 1;
+ ctx->sequence = 0;
+
+ /* Initialise controls */
+
+ v4l2_ctrl_handler_init(&ctx->hdl, 3);
+ v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_DEINTERLACING_MODE,
+ FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
+ fdp1_ctrl_deint_menu);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (ctx->hdl.error) {
+ ret = ctx->hdl.error;
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ goto done;
+ }
+
+ ctx->fh.ctrl_handler = &ctx->hdl;
+ v4l2_ctrl_handler_setup(&ctx->hdl);
+
+ /* Configure default parameters. */
+ memset(&format, 0, sizeof(format));
+ fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
+ goto done;
+ }
+
+ /* Perform any power management required */
+ pm_runtime_get_sync(fdp1->dev);
+
+ v4l2_fh_add(&ctx->fh);
+
+ dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
+
+done:
+ mutex_unlock(&fdp1->dev_mutex);
+ return ret;
+}
+
+static int fdp1_release(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
+
+ dprintk(fdp1, "Releasing instance %p\n", ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(&fdp1->dev_mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&fdp1->dev_mutex);
+ kfree(ctx);
+
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations fdp1_fops = {
+ .owner = THIS_MODULE,
+ .open = fdp1_open,
+ .release = fdp1_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device fdp1_videodev = {
+ .name = DRIVER_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &fdp1_fops,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+ .ioctl_ops = &fdp1_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fdp1_m2m_device_run,
+ .job_ready = fdp1_m2m_job_ready,
+ .job_abort = fdp1_m2m_job_abort,
+};
+
+static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
+{
+ struct fdp1_dev *fdp1 = dev_id;
+ u32 int_status;
+ u32 ctl_status;
+ u32 vint_cnt;
+ u32 cycles;
+
+ int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
+ cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
+ ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
+ vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
+ FD1_CTL_STATUS_VINT_CNT_SHIFT;
+
+ /* Clear interrupts */
+ fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
+
+ if (debug >= 2) {
+ dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
+ int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
+ int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
+ int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
+
+ dprintk(fdp1, "CycleStatus = %d (%dms)\n",
+ cycles, cycles/(fdp1->clk_rate/1000));
+
+ dprintk(fdp1,
+ "Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
+ ctl_status, vint_cnt,
+ ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
+ ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
+ ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
+ ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
+ dprintk(fdp1, "***********************************\n");
+ }
+
+ /* Spurious interrupt */
+ if (!(FD1_CTL_IRQ_MASK & int_status))
+ return IRQ_NONE;
+
+ /* Work completed, release the frame */
+ if (FD1_CTL_IRQ_VERE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
+ else if (FD1_CTL_IRQ_FREE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static int fdp1_probe(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1;
+ struct video_device *vfd;
+ struct device_node *fcp_node;
+ struct resource *res;
+ struct clk *clk;
+ unsigned int i;
+
+ int ret;
+ int hw_version;
+
+ fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
+ if (!fdp1)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fdp1->free_job_list);
+ INIT_LIST_HEAD(&fdp1->queued_job_list);
+ INIT_LIST_HEAD(&fdp1->hw_job_list);
+
+ /* Initialise the jobs on the free list */
+ for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
+ list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
+
+ mutex_init(&fdp1->dev_mutex);
+
+ spin_lock_init(&fdp1->irqlock);
+ spin_lock_init(&fdp1->device_process_lock);
+ fdp1->dev = &pdev->dev;
+ platform_set_drvdata(pdev, fdp1);
+
+ /* Memory-mapped registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fdp1->regs))
+ return PTR_ERR(fdp1->regs);
+
+ /* Interrupt service routine registration */
+ fdp1->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
+ dev_name(&pdev->dev), fdp1);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
+ return ret;
+ }
+
+ /* FCP */
+ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+ if (fcp_node) {
+ fdp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(fdp1->fcp)) {
+ dev_err(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(fdp1->fcp));
+ return PTR_ERR(fdp1->fcp);
+ }
+ }
+
+ /* Determine our clock rate */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ fdp1->clk_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ /* V4L2 device registration */
+ ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ /* M2M registration */
+ fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fdp1->m2m_dev)) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(fdp1->m2m_dev);
+ goto unreg_dev;
+ }
+
+ /* Video registration */
+ fdp1->vfd = fdp1_videodev;
+ vfd = &fdp1->vfd;
+ vfd->lock = &fdp1->dev_mutex;
+ vfd->v4l2_dev = &fdp1->v4l2_dev;
+ video_set_drvdata(vfd, fdp1);
+ strlcpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ goto release_m2m;
+ }
+
+ v4l2_info(&fdp1->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ /* Power up the cells to read HW */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(fdp1->dev);
+
+ hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
+ switch (hw_version) {
+ case FD1_IP_H3:
+ dprintk(fdp1, "FDP1 Version R-Car H3\n");
+ break;
+ case FD1_IP_M3W:
+ dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
+ break;
+ default:
+ dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
+ hw_version);
+ }
+
+ /* Allow the hw to sleep until an open call puts it to use */
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+
+release_m2m:
+ v4l2_m2m_release(fdp1->m2m_dev);
+
+unreg_dev:
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+
+ return ret;
+}
+
+static int fdp1_remove(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(fdp1->m2m_dev);
+ video_unregister_device(&fdp1->vfd);
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ rcar_fcp_disable(fdp1->fcp);
+
+ return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ /* Program in the static LUTs */
+ fdp1_set_lut(fdp1);
+
+ return rcar_fcp_enable(fdp1->fcp);
+}
+
+static const struct dev_pm_ops fdp1_pm_ops = {
+ SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
+ fdp1_pm_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id fdp1_dt_ids[] = {
+ { .compatible = "renesas,fdp1" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
+
+static struct platform_driver fdp1_pdrv = {
+ .probe = fdp1_probe,
+ .remove = fdp1_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = fdp1_dt_ids,
+ .pm = &fdp1_pm_ops,
+ },
+};
+
+module_platform_driver(fdp1_pdrv);
+
+MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
+MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
index 0912d0a892e2..a1d823ab0c63 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -178,20 +178,12 @@ void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version)
unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
{
- unsigned int int_status;
-
- int_status = readl(base + EXYNOS4_INT_STATUS_REG);
-
- return int_status;
+ return readl(base + EXYNOS4_INT_STATUS_REG);
}
unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
{
- unsigned int fifo_status;
-
- fifo_status = readl(base + EXYNOS4_FIFO_STATUS_REG);
-
- return fifo_status;
+ return readl(base + EXYNOS4_FIFO_STATUS_REG);
}
void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
@@ -296,10 +288,7 @@ void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
{
- unsigned int size;
-
- size = readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
- return size;
+ return readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
}
void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index 83e01f3466e9..d2cd35916dc5 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -386,7 +386,8 @@
((w) * 144 + 8192 * (h) + 49216 + 1048576)
#define S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(w, h) \
(2096 * ((w) + (h) + 1))
-#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h) ((w) * 400)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h) \
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h)
#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(w, h) \
((w) * 32 + (h) * 128 + (((w) + 1) / 2) * 64 + 2112)
#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(w, h) \
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
index cc7cbec51b5e..4d1c3750eb5e 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
@@ -90,7 +90,7 @@
#define S5P_FIMV_E_H264_OPTIONS_V8 0xfb54
/* MFCv8 Context buffer sizes */
-#define MFC_CTX_BUF_SIZE_V8 (30 * SZ_1K) /* 30KB */
+#define MFC_CTX_BUF_SIZE_V8 (36 * SZ_1K) /* 36KB */
#define MFC_H264_DEC_CTX_BUF_SIZE_V8 (2 * SZ_1M) /* 2MB */
#define MFC_OTHER_DEC_CTX_BUF_SIZE_V8 (20 * SZ_1K) /* 20KB */
#define MFC_H264_ENC_CTX_BUF_SIZE_V8 (100 * SZ_1K) /* 100KB */
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index 6ccc3f8c122a..57b7e0be0596 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -393,6 +393,9 @@
#define S5P_FIMV_REG_CLEAR_COUNT 0
/* Error handling defines */
+#define S5P_FIMV_ERR_NO_VALID_SEQ_HDR 67
+#define S5P_FIMV_ERR_INCOMPLETE_FRAME 124
+#define S5P_FIMV_ERR_TIMEOUT 140
#define S5P_FIMV_ERR_WARNINGS_START 145
#define S5P_FIMV_ERR_DEC_MASK 0xFFFF
#define S5P_FIMV_ERR_DEC_SHIFT 0
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 0a5b8f5e011e..bb0a5887c9a9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -641,8 +641,11 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
case S5P_MFC_R2H_CMD_ERR_RET:
/* An error has occurred */
if (ctx->state == MFCINST_RUNNING &&
- s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
- dev->warn_start)
+ (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
+ dev->warn_start ||
+ err == S5P_FIMV_ERR_NO_VALID_SEQ_HDR ||
+ err == S5P_FIMV_ERR_INCOMPLETE_FRAME ||
+ err == S5P_FIMV_ERR_TIMEOUT))
s5p_mfc_handle_frame(ctx, reason, err);
else
s5p_mfc_handle_error(dev, ctx, reason, err);
@@ -848,6 +851,11 @@ static int s5p_mfc_open(struct file *file)
ret = -ENOENT;
goto err_queue_init;
}
+ /*
+ * We'll do mostly sequential access, so sacrifice TLB efficiency for
+ * faster allocation.
+ */
+ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
@@ -878,6 +886,12 @@ static int s5p_mfc_open(struct file *file)
* will keep the value of bytesused intact.
*/
q->allow_zero_bytesused = 1;
+
+ /*
+ * We'll do mostly sequential access, so sacrifice TLB efficiency for
+ * faster allocation.
+ */
+ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
@@ -926,10 +940,11 @@ static int s5p_mfc_release(struct file *file)
mfc_debug_enter();
if (dev)
mutex_lock(&dev->mfc_mutex);
- s5p_mfc_clock_on();
vb2_queue_release(&ctx->vq_src);
vb2_queue_release(&ctx->vq_dst);
if (dev) {
+ s5p_mfc_clock_on();
+
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
/*
@@ -948,12 +963,14 @@ static int s5p_mfc_release(struct file *file)
mfc_debug(2, "Last instance\n");
s5p_mfc_deinit_hw(dev);
del_timer_sync(&dev->watchdog_timer);
+ s5p_mfc_clock_off();
if (s5p_mfc_power_off() < 0)
mfc_err("Power off failed\n");
+ } else {
+ mfc_debug(2, "Shutting down clock\n");
+ s5p_mfc_clock_off();
}
}
- mfc_debug(2, "Shutting down clock\n");
- s5p_mfc_clock_off();
if (dev)
dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
@@ -1082,6 +1099,7 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
idx);
if (ret == 0)
return child;
+ device_del(child);
}
put_device(child);
@@ -1387,31 +1405,9 @@ static int s5p_mfc_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
-static int s5p_mfc_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
-
- atomic_set(&m_dev->pm.power, 0);
- return 0;
-}
-
-static int s5p_mfc_runtime_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
-
- atomic_set(&m_dev->pm.power, 1);
- return 0;
-}
-#endif
-
/* Power management */
static const struct dev_pm_ops s5p_mfc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
- SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
- NULL)
};
static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
@@ -1438,6 +1434,9 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
.buf_size = &buf_size_v5,
.buf_align = &mfc_buf_align_v5,
.fw_name[0] = "s5p-mfc.fw",
+ .clk_names = {"mfc", "sclk_mfc"},
+ .num_clocks = 2,
+ .use_clock_gating = true,
};
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
@@ -1470,6 +1469,8 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
* for init buffer command
*/
.fw_name[1] = "s5p-mfc-v6-v2.fw",
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
};
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
@@ -1497,6 +1498,8 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
.buf_size = &buf_size_v7,
.buf_align = &mfc_buf_align_v7,
.fw_name[0] = "s5p-mfc-v7.fw",
+ .clk_names = {"mfc", "sclk_mfc"},
+ .num_clocks = 2,
};
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
@@ -1524,6 +1527,19 @@ static struct s5p_mfc_variant mfc_drvdata_v8 = {
.buf_size = &buf_size_v8,
.buf_align = &mfc_buf_align_v8,
.fw_name[0] = "s5p-mfc-v8.fw",
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
+ .version = MFC_VERSION_V8,
+ .version_bit = MFC_V8_BIT,
+ .port_num = MFC_NUM_PORTS_V8,
+ .buf_size = &buf_size_v8,
+ .buf_align = &mfc_buf_align_v8,
+ .fw_name[0] = "s5p-mfc-v8.fw",
+ .clk_names = {"pclk", "aclk", "aclk_xiu"},
+ .num_clocks = 3,
};
static const struct of_device_id exynos_mfc_match[] = {
@@ -1539,6 +1555,9 @@ static const struct of_device_id exynos_mfc_match[] = {
}, {
.compatible = "samsung,mfc-v8",
.data = &mfc_drvdata_v8,
+ }, {
+ .compatible = "samsung,exynos5433-mfc",
+ .data = &mfc_drvdata_v8_5433,
},
{},
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 46b99f28cbd7..ab23236aa942 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -104,6 +104,8 @@ static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
#define S5P_MFC_R2H_CMD_ENC_BUFFER_FUL_RET 16
#define S5P_MFC_R2H_CMD_ERR_RET 32
+#define MFC_MAX_CLOCKS 4
+
#define mfc_read(dev, offset) readl(dev->regs_base + (offset))
#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \
(offset))
@@ -197,9 +199,12 @@ struct s5p_mfc_buf {
* struct s5p_mfc_pm - power management data structure
*/
struct s5p_mfc_pm {
- struct clk *clock;
struct clk *clock_gate;
- atomic_t power;
+ const char **clk_names;
+ struct clk *clocks[MFC_MAX_CLOCKS];
+ int num_clocks;
+ bool use_clock_gating;
+
struct device *device;
};
@@ -235,6 +240,9 @@ struct s5p_mfc_variant {
struct s5p_mfc_buf_size *buf_size;
struct s5p_mfc_buf_align *buf_align;
char *fw_name[MFC_FW_MAX_VERSIONS];
+ const char *clk_names[MFC_MAX_CLOCKS];
+ int num_clocks;
+ bool use_clock_gating;
};
/**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
index 5936923c631c..1936a5b868f5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -39,6 +39,12 @@ extern int mfc_debug_level;
__func__, __LINE__, ##args); \
} while (0)
+#define mfc_err_limited(fmt, args...) \
+ do { \
+ printk_ratelimited(KERN_ERR "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
#define mfc_info(fmt, args...) \
do { \
printk(KERN_INFO "%s:%d: " fmt, \
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 52081ddc9bf2..367ef8e8dbf0 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -642,7 +642,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
int ret;
if (ctx->state == MFCINST_ERROR) {
- mfc_err("Call on DQBUF after unrecoverable error\n");
+ mfc_err_limited("Call on DQBUF after unrecoverable error\n");
return -EIO;
}
@@ -793,18 +793,17 @@ static int vidioc_g_crop(struct file *file, void *priv,
cr->c.top = top;
cr->c.width = ctx->img_width - left - right;
cr->c.height = ctx->img_height - top - bottom;
- mfc_debug(2, "Cropping info [h264]: l=%d t=%d "
- "w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top,
- cr->c.width, cr->c.height, right, bottom,
- ctx->buf_width, ctx->buf_height);
+ mfc_debug(2, "Cropping info [h264]: l=%d t=%d w=%d h=%d (r=%d b=%d fw=%d fh=%d\n",
+ left, top, cr->c.width, cr->c.height, right, bottom,
+ ctx->buf_width, ctx->buf_height);
} else {
cr->c.left = 0;
cr->c.top = 0;
cr->c.width = ctx->img_width;
cr->c.height = ctx->img_height;
- mfc_debug(2, "Cropping info: w=%d h=%d fw=%d "
- "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width,
- ctx->buf_height);
+ mfc_debug(2, "Cropping info: w=%d h=%d fw=%d fh=%d\n",
+ cr->c.width, cr->c.height, ctx->buf_width,
+ ctx->buf_height);
}
return 0;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index fcc2e054c61f..e39d9e06e299 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1268,7 +1268,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
int ret;
if (ctx->state == MFCINST_ERROR) {
- mfc_err("Call on DQBUF after unrecoverable error\n");
+ mfc_err_limited("Call on DQBUF after unrecoverable error\n");
return -EIO;
}
if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 1e7250260a9a..99f65a92a6be 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -45,13 +45,13 @@ int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
if (!b->virt) {
- mfc_err("Allocating private buffer failed\n");
+ mfc_err("Allocating private buffer of size %zu failed\n",
+ b->size);
return -ENOMEM;
}
if (b->dma < base) {
- mfc_err("Invaling memory configuration!\n");
- mfc_err("Allocated buffer (%pad) is lower than memory base address (%pad)\n",
+ mfc_err("Invalid memory configuration - buffer (%pad) is below base memory address(%pad)\n",
&b->dma, &base);
dma_free_coherent(dev, b->size, b->virt, b->dma);
return -ENOMEM;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 81e1e4ce6c24..f4301d5bbd32 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1293,14 +1293,11 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
* First set the output frame buffers
*/
if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
- mfc_err("It seems that not all destionation buffers were "
- "mmaped\nMFC requires that all destination are mmaped "
- "before starting processing\n");
+ mfc_err("It seems that not all destionation buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n");
return -EAGAIN;
}
if (list_empty(&ctx->src_queue)) {
- mfc_err("Header has been deallocated in the middle of"
- " initialization\n");
+ mfc_err("Header has been deallocated in the middle of initialization\n");
return -EIO;
}
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 930dc2dddae6..eb85cedc5ef3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -18,129 +18,101 @@
#include "s5p_mfc_debug.h"
#include "s5p_mfc_pm.h"
-#define MFC_GATE_CLK_NAME "mfc"
-#define MFC_SCLK_NAME "sclk_mfc"
-#define MFC_SCLK_RATE (200 * 1000000)
-
-#define CLK_DEBUG
-
static struct s5p_mfc_pm *pm;
static struct s5p_mfc_dev *p_dev;
-
-#ifdef CLK_DEBUG
static atomic_t clk_ref;
-#endif
int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
{
- int ret = 0;
+ int i;
pm = &dev->pm;
p_dev = dev;
- pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME);
- if (IS_ERR(pm->clock_gate)) {
- mfc_err("Failed to get clock-gating control\n");
- ret = PTR_ERR(pm->clock_gate);
- goto err_g_ip_clk;
- }
- ret = clk_prepare(pm->clock_gate);
- if (ret) {
- mfc_err("Failed to prepare clock-gating control\n");
- goto err_p_ip_clk;
- }
+ pm->num_clocks = dev->variant->num_clocks;
+ pm->clk_names = dev->variant->clk_names;
+ pm->device = &dev->plat_dev->dev;
+ pm->clock_gate = NULL;
- if (dev->variant->version != MFC_VERSION_V6) {
- pm->clock = clk_get(&dev->plat_dev->dev, MFC_SCLK_NAME);
- if (IS_ERR(pm->clock)) {
- mfc_info("Failed to get MFC special clock control\n");
- pm->clock = NULL;
- } else {
- clk_set_rate(pm->clock, MFC_SCLK_RATE);
- ret = clk_prepare_enable(pm->clock);
- if (ret) {
- mfc_err("Failed to enable MFC special clock\n");
- goto err_s_clk;
- }
+ /* clock control */
+ for (i = 0; i < pm->num_clocks; i++) {
+ pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
+ if (IS_ERR(pm->clocks[i])) {
+ mfc_err("Failed to get clock: %s\n",
+ pm->clk_names[i]);
+ return PTR_ERR(pm->clocks[i]);
}
}
- atomic_set(&pm->power, 0);
-#ifdef CONFIG_PM
- pm->device = &dev->plat_dev->dev;
+ if (dev->variant->use_clock_gating)
+ pm->clock_gate = pm->clocks[0];
+
pm_runtime_enable(pm->device);
-#endif
-#ifdef CLK_DEBUG
atomic_set(&clk_ref, 0);
-#endif
return 0;
-
-err_s_clk:
- clk_put(pm->clock);
- pm->clock = NULL;
-err_p_ip_clk:
- clk_put(pm->clock_gate);
- pm->clock_gate = NULL;
-err_g_ip_clk:
- return ret;
}
void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
{
- if (dev->variant->version != MFC_VERSION_V6 &&
- pm->clock) {
- clk_disable_unprepare(pm->clock);
- clk_put(pm->clock);
- pm->clock = NULL;
- }
- clk_unprepare(pm->clock_gate);
- clk_put(pm->clock_gate);
- pm->clock_gate = NULL;
-#ifdef CONFIG_PM
pm_runtime_disable(pm->device);
-#endif
}
int s5p_mfc_clock_on(void)
{
- int ret = 0;
-#ifdef CLK_DEBUG
atomic_inc(&clk_ref);
mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
-#endif
- if (!IS_ERR_OR_NULL(pm->clock_gate))
- ret = clk_enable(pm->clock_gate);
- return ret;
+
+ return clk_enable(pm->clock_gate);
}
void s5p_mfc_clock_off(void)
{
-#ifdef CLK_DEBUG
atomic_dec(&clk_ref);
mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
-#endif
- if (!IS_ERR_OR_NULL(pm->clock_gate))
- clk_disable(pm->clock_gate);
+
+ clk_disable(pm->clock_gate);
}
int s5p_mfc_power_on(void)
{
-#ifdef CONFIG_PM
- return pm_runtime_get_sync(pm->device);
-#else
- atomic_set(&pm->power, 1);
+ int i, ret = 0;
+
+ ret = pm_runtime_get_sync(pm->device);
+ if (ret < 0)
+ return ret;
+
+ /* clock control */
+ for (i = 0; i < pm->num_clocks; i++) {
+ ret = clk_prepare_enable(pm->clocks[i]);
+ if (ret < 0) {
+ mfc_err("clock prepare failed for clock: %s\n",
+ pm->clk_names[i]);
+ i++;
+ goto err;
+ }
+ }
+
+ /* prepare for software clock gating */
+ clk_disable(pm->clock_gate);
+
return 0;
-#endif
+err:
+ while (--i > 0)
+ clk_disable_unprepare(pm->clocks[i]);
+ pm_runtime_put(pm->device);
+ return ret;
}
int s5p_mfc_power_off(void)
{
-#ifdef CONFIG_PM
+ int i;
+
+ /* finish software clock gating */
+ clk_enable(pm->clock_gate);
+
+ for (i = 0; i < pm->num_clocks; i++)
+ clk_disable_unprepare(pm->clocks[i]);
+
return pm_runtime_put_sync(pm->device);
-#else
- atomic_set(&pm->power, 0);
- return 0;
-#endif
}
-
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 45f82b5ddd77..823608112d89 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1337,6 +1337,7 @@ static int bdisp_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "failed to get IRQ resource\n");
+ ret = -EINVAL;
goto err_clk;
}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 30c148b9d65e..7652ce2ec1dc 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -112,8 +112,7 @@ static void channel_swdemux_tsklet(unsigned long data)
buf = (u8 *) channel->back_buffer_aligned;
dev_dbg(fei->dev,
- "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
- "rp=0x%lx, wp=0x%lx\n",
+ "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
for (n = 0; n < num_packets; n++) {
@@ -789,8 +788,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
/* sanity check value */
if (tsin->tsin_id > fei->hw_stats.num_ib) {
dev_err(&pdev->dev,
- "tsin-num %d specified greater than number\n\t"
- "of input block hw in SoC! (%d)",
+ "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
tsin->tsin_id, fei->hw_stats.num_ib);
ret = -EINVAL;
goto err_clk_disable;
@@ -815,6 +813,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
if (!i2c_bus) {
dev_err(&pdev->dev, "No i2c-bus found\n");
+ ret = -ENODEV;
goto err_clk_disable;
}
tsin->i2c_adapter =
@@ -822,6 +821,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
of_node_put(i2c_bus);
+ ret = -ENODEV;
goto err_clk_disable;
}
of_node_put(i2c_bus);
@@ -855,8 +855,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
tsin->demux_mapping = index;
dev_dbg(fei->dev,
- "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
- "serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
+ "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
fei->channel_data[index], index,
tsin->tsin_id, tsin->invert_ts_clk,
tsin->serial_not_parallel, tsin->async_not_sync,
@@ -888,8 +887,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- /* TODO uncomment when upstream has taken a reference on this clk */
- /*clk_disable_unprepare(fei->c8sectpfeclk);*/
+ clk_disable_unprepare(fei->c8sectpfeclk);
return ret;
}
@@ -924,11 +922,8 @@ static int c8sectpfe_remove(struct platform_device *pdev)
if (readl(fei->io + SYS_OTHER_CLKEN))
writel(0, fei->io + SYS_OTHER_CLKEN);
- /* TODO uncomment when upstream has taken a reference on this clk */
- /*
if (fei->c8sectpfeclk)
clk_disable_unprepare(fei->c8sectpfeclk);
- */
return 0;
}
@@ -1045,8 +1040,8 @@ static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
*/
dev_dbg(fei->dev,
- "Loading IMEM segment %d 0x%08x\n\t"
- " (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
+ "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
+seg_num,
phdr->p_paddr, phdr->p_filesz,
dest, phdr->p_memsz + phdr->p_memsz / 3);
@@ -1075,8 +1070,7 @@ static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
*/
dev_dbg(fei->dev,
- "Loading DMEM segment %d 0x%08x\n\t"
- "(0x%x bytes) -> 0x%p (0x%x bytes)\n",
+ "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
seg_num, phdr->p_paddr, phdr->p_filesz,
dst, phdr->p_memsz);
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index d341d4994528..68d625b412b6 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -245,7 +245,7 @@ static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
ctx->hw_err = true;
}
- if (hva->lmi_err_reg) {
+ if (hva->emi_err_reg) {
dev_err(dev, "%s external memory interface error: 0x%08x\n",
ctx->name, hva->emi_err_reg);
ctx->hw_err = true;
@@ -305,16 +305,16 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
/* get memory for registers */
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hva->regs = devm_ioremap_resource(dev, regs);
- if (IS_ERR_OR_NULL(hva->regs)) {
+ if (IS_ERR(hva->regs)) {
dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
return PTR_ERR(hva->regs);
}
/* get memory for esram */
esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (IS_ERR_OR_NULL(esram)) {
+ if (!esram) {
dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
- return PTR_ERR(esram);
+ return -ENODEV;
}
hva->esram_addr = esram->start;
hva->esram_size = resource_size(esram);
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index e236059a60ad..32504b724b5d 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -1,6 +1,12 @@
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
-
-ti-vpe-y := vpe.o sc.o csc.o vpdma.o
+obj-$(CONFIG_VIDEO_TI_VPDMA) += ti-vpdma.o
+obj-$(CONFIG_VIDEO_TI_SC) += ti-sc.o
+obj-$(CONFIG_VIDEO_TI_CSC) += ti-csc.o
+
+ti-vpe-y := vpe.o
+ti-vpdma-y := vpdma.o
+ti-sc-y := sc.o
+ti-csc-y := csc.o
ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 44323cb5d287..7a058b6e03d0 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -483,11 +483,7 @@ static void cal_get_hwinfo(struct cal_dev *dev)
static inline int cal_runtime_get(struct cal_dev *dev)
{
- int r;
-
- r = pm_runtime_get_sync(&dev->pdev->dev);
-
- return r;
+ return pm_runtime_get_sync(&dev->pdev->dev);
}
static inline void cal_runtime_put(struct cal_dev *dev)
@@ -1749,13 +1745,13 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
}
cleanup_exit:
- if (!remote_ep)
+ if (remote_ep)
of_node_put(remote_ep);
- if (!sensor_node)
+ if (sensor_node)
of_node_put(sensor_node);
- if (!ep_node)
+ if (ep_node)
of_node_put(ep_node);
- if (!port)
+ if (port)
of_node_put(port);
return ret;
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index bec674994752..44b8465cf101 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -96,6 +97,8 @@ void csc_dump_regs(struct csc_data *csc)
#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
ioread32(csc->base + CSC_##r))
+ dev_dbg(dev, "CSC Registers @ %pa:\n", &csc->res->start);
+
DUMPREG(CSC00);
DUMPREG(CSC01);
DUMPREG(CSC02);
@@ -105,11 +108,13 @@ void csc_dump_regs(struct csc_data *csc)
#undef DUMPREG
}
+EXPORT_SYMBOL(csc_dump_regs);
void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5)
{
*csc_reg5 |= CSC_BYPASS;
}
+EXPORT_SYMBOL(csc_set_coeff_bypass);
/*
* set the color space converter coefficient shadow register values
@@ -160,8 +165,9 @@ void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
for (; coeff < end_coeff; coeff += 2)
*shadow_csc++ = (*(coeff + 1) << 16) | *coeff;
}
+EXPORT_SYMBOL(csc_set_coeff);
-struct csc_data *csc_create(struct platform_device *pdev)
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name)
{
struct csc_data *csc;
@@ -176,9 +182,10 @@ struct csc_data *csc_create(struct platform_device *pdev)
csc->pdev = pdev;
csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "csc");
+ res_name);
if (csc->res == NULL) {
- dev_err(&pdev->dev, "missing platform resources data\n");
+ dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+ res_name);
return ERR_PTR(-ENODEV);
}
@@ -190,3 +197,8 @@ struct csc_data *csc_create(struct platform_device *pdev)
return csc;
}
+EXPORT_SYMBOL(csc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Color Space Converter");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
index 1ad2b6dad561..024700b15152 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -63,6 +63,6 @@ void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
enum v4l2_colorspace src_colorspace,
enum v4l2_colorspace dst_colorspace);
-struct csc_data *csc_create(struct platform_device *pdev);
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
index f82d1c7f667f..e9273b713782 100644
--- a/drivers/media/platform/ti-vpe/sc.c
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -27,6 +28,8 @@ void sc_dump_regs(struct sc_data *sc)
#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
ioread32(sc->base + CFG_##r))
+ dev_dbg(dev, "SC Registers @ %pa:\n", &sc->res->start);
+
DUMPREG(SC0);
DUMPREG(SC1);
DUMPREG(SC2);
@@ -52,6 +55,7 @@ void sc_dump_regs(struct sc_data *sc)
#undef DUMPREG
}
+EXPORT_SYMBOL(sc_dump_regs);
/*
* set the horizontal scaler coefficients according to the ratio of output to
@@ -84,9 +88,6 @@ void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
}
}
- if (idx == sc->hs_index)
- return;
-
cp = scaler_hs_coeffs[idx];
for (i = 0; i < SC_NUM_PHASES * 2; i++) {
@@ -101,10 +102,9 @@ void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS;
}
- sc->hs_index = idx;
-
sc->load_coeff_h = true;
}
+EXPORT_SYMBOL(sc_set_hs_coeffs);
/*
* set the vertical scaler coefficients according to the ratio of output to
@@ -130,9 +130,6 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
idx = VS_LT_9_16_SCALE + sixteenths - 8;
}
- if (idx == sc->vs_index)
- return;
-
cp = scaler_vs_coeffs[idx];
for (i = 0; i < SC_NUM_PHASES * 2; i++) {
@@ -146,9 +143,9 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS;
}
- sc->vs_index = idx;
sc->load_coeff_v = true;
}
+EXPORT_SYMBOL(sc_set_vs_coeffs);
void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
@@ -276,8 +273,9 @@ void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
*sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT);
}
+EXPORT_SYMBOL(sc_config_scaler);
-struct sc_data *sc_create(struct platform_device *pdev)
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name)
{
struct sc_data *sc;
@@ -291,9 +289,10 @@ struct sc_data *sc_create(struct platform_device *pdev)
sc->pdev = pdev;
- sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sc");
+ sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
if (!sc->res) {
- dev_err(&pdev->dev, "missing platform resources data\n");
+ dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+ res_name);
return ERR_PTR(-ENODEV);
}
@@ -305,3 +304,8 @@ struct sc_data *sc_create(struct platform_device *pdev)
return sc;
}
+EXPORT_SYMBOL(sc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Scaler");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti-vpe/sc.h
index 60e411e05c30..f1fe80b38c9f 100644
--- a/drivers/media/platform/ti-vpe/sc.h
+++ b/drivers/media/platform/ti-vpe/sc.h
@@ -173,6 +173,12 @@
/* number of taps expected by the scaler in it's coefficient memory */
#define SC_NUM_TAPS_MEM_ALIGN 8
+/* Maximum frame width the scaler can handle (in pixels) */
+#define SC_MAX_PIXEL_WIDTH 2047
+
+/* Maximum frame height the scaler can handle (in lines) */
+#define SC_MAX_PIXEL_HEIGHT 2047
+
/*
* coefficient memory size in bytes:
* num phases x num sets(luma and chroma) x num taps(aligned) x coeff size
@@ -189,9 +195,6 @@ struct sc_data {
bool load_coeff_h; /* have new h SC coeffs */
bool load_coeff_v; /* have new v SC coeffs */
- unsigned int hs_index; /* h SC coeffs selector */
- unsigned int vs_index; /* v SC coeffs selector */
-
struct platform_device *pdev;
};
@@ -203,6 +206,6 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
unsigned int dst_w, unsigned int dst_h);
-struct sc_data *sc_create(struct platform_device *pdev);
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 3e2e3a33e6ed..13bfd7184160 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -59,9 +59,9 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_C420,
.depth = 4,
},
- [VPDMA_DATA_FMT_YC422] = {
+ [VPDMA_DATA_FMT_YCR422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
- .data_type = DATA_TYPE_YC422,
+ .data_type = DATA_TYPE_YCR422,
.depth = 16,
},
[VPDMA_DATA_FMT_YC444] = {
@@ -69,12 +69,23 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_YC444,
.depth = 24,
},
- [VPDMA_DATA_FMT_CY422] = {
+ [VPDMA_DATA_FMT_CRY422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
- .data_type = DATA_TYPE_CY422,
+ .data_type = DATA_TYPE_CRY422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_CBY422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_YCB422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_YCB422,
.depth = 16,
},
};
+EXPORT_SYMBOL(vpdma_yuv_fmts);
const struct vpdma_data_format vpdma_rgb_fmts[] = {
[VPDMA_DATA_FMT_RGB565] = {
@@ -178,6 +189,30 @@ const struct vpdma_data_format vpdma_rgb_fmts[] = {
.depth = 32,
},
};
+EXPORT_SYMBOL(vpdma_rgb_fmts);
+
+/*
+ * To handle RAW format we are re-using the CBY422
+ * vpdma data type so that we use the vpdma to re-order
+ * the incoming bytes, as the parser assumes that the
+ * first byte presented on the bus is the MSB of a 2
+ * bytes value.
+ * RAW8 handles from 1 to 8 bits
+ * RAW16 handles from 9 to 16 bits
+ */
+const struct vpdma_data_format vpdma_raw_fmts[] = {
+ [VPDMA_DATA_FMT_RAW8] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_RAW16] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 16,
+ },
+};
+EXPORT_SYMBOL(vpdma_raw_fmts);
const struct vpdma_data_format vpdma_misc_fmts[] = {
[VPDMA_DATA_FMT_MV] = {
@@ -186,6 +221,7 @@ const struct vpdma_data_format vpdma_misc_fmts[] = {
.depth = 4,
},
};
+EXPORT_SYMBOL(vpdma_misc_fmts);
struct vpdma_channel_info {
int num; /* VPDMA channel number */
@@ -317,6 +353,7 @@ void vpdma_dump_regs(struct vpdma_data *vpdma)
DUMPREG(VIP_UP_UV_CSTAT);
DUMPREG(VPI_CTL_CSTAT);
}
+EXPORT_SYMBOL(vpdma_dump_regs);
/*
* Allocate a DMA buffer
@@ -333,6 +370,7 @@ int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
return 0;
}
+EXPORT_SYMBOL(vpdma_alloc_desc_buf);
void vpdma_free_desc_buf(struct vpdma_buf *buf)
{
@@ -341,6 +379,7 @@ void vpdma_free_desc_buf(struct vpdma_buf *buf)
buf->addr = NULL;
buf->size = 0;
}
+EXPORT_SYMBOL(vpdma_free_desc_buf);
/*
* map descriptor/payload DMA buffer, enabling DMA access
@@ -351,7 +390,7 @@ int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
WARN_ON(buf->mapped);
buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, buf->dma_addr)) {
dev_err(dev, "failed to map buffer\n");
return -EINVAL;
@@ -361,6 +400,7 @@ int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
return 0;
}
+EXPORT_SYMBOL(vpdma_map_desc_buf);
/*
* unmap descriptor/payload DMA buffer, disabling DMA access and
@@ -371,10 +411,62 @@ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
struct device *dev = &vpdma->pdev->dev;
if (buf->mapped)
- dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
+ dma_unmap_single(dev, buf->dma_addr, buf->size,
+ DMA_BIDIRECTIONAL);
buf->mapped = false;
}
+EXPORT_SYMBOL(vpdma_unmap_desc_buf);
+
+/*
+ * Cleanup all pending descriptors of a list
+ * First, stop the current list being processed.
+ * If the VPDMA was busy, this step makes vpdma to accept post lists.
+ * To cleanup the internal FSM, post abort list descriptor for all the
+ * channels from @channels array of size @size.
+ */
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+ int *channels, int size)
+{
+ struct vpdma_desc_list abort_list;
+ int i, ret, timeout = 500;
+
+ write_reg(vpdma, VPDMA_LIST_ATTR,
+ (list_num << VPDMA_LIST_NUM_SHFT) |
+ (1 << VPDMA_LIST_STOP_SHFT));
+
+ if (size <= 0 || !channels)
+ return 0;
+
+ ret = vpdma_create_desc_list(&abort_list,
+ size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
+
+ ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
+ if (ret)
+ return ret;
+ ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
+ if (ret)
+ return ret;
+
+ while (vpdma_list_busy(vpdma, list_num) && timeout--)
+ ;
+
+ if (timeout == 0) {
+ dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
+ return -EBUSY;
+ }
+
+ vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
+ vpdma_free_desc_buf(&abort_list.buf);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_list_cleanup);
/*
* create a descriptor list, the user of this list will append configuration,
@@ -396,6 +488,7 @@ int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
return 0;
}
+EXPORT_SYMBOL(vpdma_create_desc_list);
/*
* once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
@@ -405,6 +498,7 @@ void vpdma_reset_desc_list(struct vpdma_desc_list *list)
{
list->next = list->buf.addr;
}
+EXPORT_SYMBOL(vpdma_reset_desc_list);
/*
* free the buffer allocated fot the VPDMA descriptor list, this should be
@@ -416,20 +510,22 @@ void vpdma_free_desc_list(struct vpdma_desc_list *list)
list->next = NULL;
}
+EXPORT_SYMBOL(vpdma_free_desc_list);
-static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
{
return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
}
+EXPORT_SYMBOL(vpdma_list_busy);
/*
* submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
*/
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
+int vpdma_submit_descs(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, int list_num)
{
- /* we always use the first list */
- int list_num = 0;
int list_size;
+ unsigned long flags;
if (vpdma_list_busy(vpdma, list_num))
return -EBUSY;
@@ -437,15 +533,68 @@ int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
/* 16-byte granularity */
list_size = (list->next - list->buf.addr) >> 4;
+ spin_lock_irqsave(&vpdma->lock, flags);
write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
write_reg(vpdma, VPDMA_LIST_ATTR,
(list_num << VPDMA_LIST_NUM_SHFT) |
(list->type << VPDMA_LIST_TYPE_SHFT) |
list_size);
+ spin_unlock_irqrestore(&vpdma->lock, flags);
return 0;
}
+EXPORT_SYMBOL(vpdma_submit_descs);
+
+static void dump_dtd(struct vpdma_dtd *dtd);
+
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx)
+{
+ struct vpdma_dtd *dtd = list->buf.addr;
+ dma_addr_t write_desc_addr;
+ int offset;
+
+ dtd += idx;
+ vpdma_unmap_desc_buf(vpdma, &list->buf);
+
+ dtd->start_addr = dma_addr;
+
+ /* Calculate write address from the offset of write_dtd from start
+ * of the list->buf
+ */
+ offset = (void *)write_dtd - list->buf.addr;
+ write_desc_addr = list->buf.dma_addr + offset;
+
+ if (drop)
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 1, 0);
+ else
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 0, 0);
+
+ vpdma_map_desc_buf(vpdma, &list->buf);
+
+ dump_dtd(dtd);
+}
+EXPORT_SYMBOL(vpdma_update_dma_addr);
+
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+ u32 width, u32 height)
+{
+ if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
+ reg_addr != VPDMA_MAX_SIZE3)
+ reg_addr = VPDMA_MAX_SIZE1;
+
+ write_field_reg(vpdma, reg_addr, width - 1,
+ VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
+
+ write_field_reg(vpdma, reg_addr, height - 1,
+ VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
+
+}
+EXPORT_SYMBOL(vpdma_set_max_size);
static void dump_cfd(struct vpdma_cfd *cfd)
{
@@ -466,10 +615,10 @@ static void dump_cfd(struct vpdma_cfd *cfd)
pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
- pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
- "payload_len = %d\n", cfd_get_pkt_type(cfd),
- cfd_get_direct(cfd), class, cfd_get_dest(cfd),
- cfd_get_payload_len(cfd));
+ pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
+ cfd_get_pkt_type(cfd),
+ cfd_get_direct(cfd), class, cfd_get_dest(cfd),
+ cfd_get_payload_len(cfd));
}
/*
@@ -498,6 +647,7 @@ void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
dump_cfd(cfd);
}
+EXPORT_SYMBOL(vpdma_add_cfd_block);
/*
* append a configuration descriptor to the given descriptor list, where the
@@ -526,6 +676,7 @@ void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
dump_cfd(cfd);
};
+EXPORT_SYMBOL(vpdma_add_cfd_adb);
/*
* control descriptor format change based on what type of control descriptor it
@@ -563,6 +714,32 @@ void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
dump_ctd(ctd);
}
+EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
+
+/*
+ * append an 'abort_channel' type control descriptor to the given descriptor
+ * list, this descriptor aborts any DMA transaction happening using the
+ * specified channel
+ */
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+ int chan_num)
+{
+ struct vpdma_ctd *ctd;
+
+ ctd = list->next;
+ WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
+
+ ctd->w0 = 0;
+ ctd->w1 = 0;
+ ctd->w2 = 0;
+ ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
+ CTD_TYPE_ABORT_CHANNEL);
+
+ list->next = ctd + 1;
+
+ dump_ctd(ctd);
+}
+EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
static void dump_dtd(struct vpdma_dtd *dtd)
{
@@ -574,8 +751,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("%s data transfer descriptor for channel %d\n",
dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
- pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
- "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
+ pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
@@ -586,17 +762,16 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
- pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
- "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
- dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
- dtd_get_next_chan(dtd));
+ pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
+ dtd_get_pkt_type(dtd),
+ dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
+ dtd_get_next_chan(dtd));
if (dir == DTD_DIR_IN)
pr_debug("word4: frame_width = %d, frame_height = %d\n",
dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
else
- pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
- "drp_data = %d, use_desc_reg = %d\n",
+ pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
@@ -620,13 +795,25 @@ static void dump_dtd(struct vpdma_dtd *dtd)
* @c_rect: compose params of output image
* @fmt: vpdma data format of the buffer
* dma_addr: dma address as seen by VPDMA
+ * max_width: enum for maximum width of data transfer
+ * max_height: enum for maximum height of data transfer
* chan: VPDMA channel
* flags: VPDMA flags to configure some descriptor fileds
*/
void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
- enum vpdma_channel chan, u32 flags)
+ int max_w, int max_h, enum vpdma_channel chan, u32 flags)
+{
+ vpdma_rawchan_add_out_dtd(list, width, c_rect, fmt, dma_addr,
+ max_w, max_h, chan_info[chan].num, flags);
+}
+EXPORT_SYMBOL(vpdma_add_out_dtd);
+
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, int raw_vpdma_chan, u32 flags)
{
int priority = 0;
int field = 0;
@@ -637,7 +824,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
int stride;
struct vpdma_dtd *dtd;
- channel = next_chan = chan_info[chan].num;
+ channel = next_chan = raw_vpdma_chan;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
fmt->data_type == DATA_TYPE_C420) {
@@ -665,8 +852,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
DTD_DIR_OUT, channel, priority, next_chan);
dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
- dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920,
- MAX_OUT_HEIGHT_1080);
+ dtd->max_width_height = dtd_max_width_height(max_w, max_h);
dtd->client_attr0 = 0;
dtd->client_attr1 = 0;
@@ -674,6 +860,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
dump_dtd(dtd);
}
+EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
/*
* append an inbound data transfer descriptor to the given descriptor list,
@@ -747,27 +934,105 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
dump_dtd(dtd);
}
+EXPORT_SYMBOL(vpdma_add_in_dtd);
+
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
+{
+ int i, list_num = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpdma->lock, flags);
+ for (i = 0; i < VPDMA_MAX_NUM_LIST &&
+ vpdma->hwlist_used[i] == true; i++)
+ ;
+
+ if (i < VPDMA_MAX_NUM_LIST) {
+ list_num = i;
+ vpdma->hwlist_used[i] = true;
+ vpdma->hwlist_priv[i] = priv;
+ }
+ spin_unlock_irqrestore(&vpdma->lock, flags);
+
+ return list_num;
+}
+EXPORT_SYMBOL(vpdma_hwlist_alloc);
+
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
+{
+ if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
+ return NULL;
+
+ return vpdma->hwlist_priv[list_num];
+}
+EXPORT_SYMBOL(vpdma_hwlist_get_priv);
+
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
+{
+ void *priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpdma->lock, flags);
+ vpdma->hwlist_used[list_num] = false;
+ priv = vpdma->hwlist_priv;
+ spin_unlock_irqrestore(&vpdma->lock, flags);
+
+ return priv;
+}
+EXPORT_SYMBOL(vpdma_hwlist_release);
/* set or clear the mask for list complete interrupt */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
- bool enable)
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable)
{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
u32 val;
- val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
+ val = read_reg(vpdma, reg_addr);
if (enable)
val |= (1 << (list_num * 2));
else
val &= ~(1 << (list_num * 2));
- write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
+ write_reg(vpdma, reg_addr, val);
}
+EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
+
+/* get the LIST_STAT register */
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_stat);
+
+/* get the LIST_MASK register */
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_mask);
/* clear previosuly occured list intterupts in the LIST_STAT register */
-void vpdma_clear_list_stat(struct vpdma_data *vpdma)
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+ int list_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ write_reg(vpdma, reg_addr, 3 << (list_num * 2));
+}
+EXPORT_SYMBOL(vpdma_clear_list_stat);
+
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+ struct vpdma_data_format *fmt, u32 color)
{
- write_reg(vpdma, VPDMA_INT_LIST0_STAT,
- read_reg(vpdma, VPDMA_INT_LIST0_STAT));
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
+ write_reg(vpdma, VPDMA_BG_RGB, color);
+ else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
+ write_reg(vpdma, VPDMA_BG_YUV, color);
}
+EXPORT_SYMBOL(vpdma_set_bg_color);
/*
* configures the output mode of the line buffer for the given client, the
@@ -782,6 +1047,7 @@ void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
write_field_reg(vpdma, client_cstat, line_mode,
VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
}
+EXPORT_SYMBOL(vpdma_set_line_mode);
/*
* configures the event which should trigger VPDMA transfer for the given
@@ -796,6 +1062,7 @@ void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
write_field_reg(vpdma, client_cstat, fs_event,
VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
}
+EXPORT_SYMBOL(vpdma_set_frame_start_event);
static void vpdma_firmware_cb(const struct firmware *f, void *context)
{
@@ -871,42 +1138,40 @@ static int vpdma_load_firmware(struct vpdma_data *vpdma)
return 0;
}
-struct vpdma_data *vpdma_create(struct platform_device *pdev,
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
void (*cb)(struct platform_device *pdev))
{
struct resource *res;
- struct vpdma_data *vpdma;
int r;
dev_dbg(&pdev->dev, "vpdma_create\n");
- vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
- if (!vpdma) {
- dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
- return ERR_PTR(-ENOMEM);
- }
-
vpdma->pdev = pdev;
vpdma->cb = cb;
+ spin_lock_init(&vpdma->lock);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
if (res == NULL) {
dev_err(&pdev->dev, "missing platform resources data\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!vpdma->base) {
dev_err(&pdev->dev, "failed to ioremap\n");
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
r = vpdma_load_firmware(vpdma);
if (r) {
pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
- return ERR_PTR(r);
+ return r;
}
- return vpdma;
+ return 0;
}
+EXPORT_SYMBOL(vpdma_create);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_FIRMWARE(VPDMA_FIRMWARE);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 2bd8fb050381..131700c112b2 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -13,6 +13,7 @@
#ifndef __TI_VPDMA_H_
#define __TI_VPDMA_H_
+#define VPDMA_MAX_NUM_LIST 8
/*
* A vpdma_buf tracks the size, DMA address and mapping status of each
* driver DMA area.
@@ -35,6 +36,9 @@ struct vpdma_data {
struct platform_device *pdev;
+ spinlock_t lock;
+ bool hwlist_used[VPDMA_MAX_NUM_LIST];
+ void *hwlist_priv[VPDMA_MAX_NUM_LIST];
/* callback to VPE driver when the firmware is loaded */
void (*cb)(struct platform_device *pdev);
};
@@ -70,9 +74,11 @@ enum vpdma_yuv_formats {
VPDMA_DATA_FMT_C444,
VPDMA_DATA_FMT_C422,
VPDMA_DATA_FMT_C420,
- VPDMA_DATA_FMT_YC422,
+ VPDMA_DATA_FMT_YCR422,
VPDMA_DATA_FMT_YC444,
- VPDMA_DATA_FMT_CY422,
+ VPDMA_DATA_FMT_CRY422,
+ VPDMA_DATA_FMT_CBY422,
+ VPDMA_DATA_FMT_YCB422,
};
enum vpdma_rgb_formats {
@@ -98,12 +104,18 @@ enum vpdma_rgb_formats {
VPDMA_DATA_FMT_BGRA32,
};
+enum vpdma_raw_formats {
+ VPDMA_DATA_FMT_RAW8 = 0,
+ VPDMA_DATA_FMT_RAW16,
+};
+
enum vpdma_misc_formats {
VPDMA_DATA_FMT_MV = 0,
};
extern const struct vpdma_data_format vpdma_yuv_fmts[];
extern const struct vpdma_data_format vpdma_rgb_fmts[];
+extern const struct vpdma_data_format vpdma_raw_fmts[];
extern const struct vpdma_data_format vpdma_misc_fmts[];
enum vpdma_frame_start_event {
@@ -117,6 +129,30 @@ enum vpdma_frame_start_event {
VPDMA_FSEVENT_CHANNEL_ACTIVE,
};
+/* max width configurations */
+enum vpdma_max_width {
+ MAX_OUT_WIDTH_UNLIMITED = 0,
+ MAX_OUT_WIDTH_REG1,
+ MAX_OUT_WIDTH_REG2,
+ MAX_OUT_WIDTH_REG3,
+ MAX_OUT_WIDTH_352,
+ MAX_OUT_WIDTH_768,
+ MAX_OUT_WIDTH_1280,
+ MAX_OUT_WIDTH_1920,
+};
+
+/* max height configurations */
+enum vpdma_max_height {
+ MAX_OUT_HEIGHT_UNLIMITED = 0,
+ MAX_OUT_HEIGHT_REG1,
+ MAX_OUT_HEIGHT_REG2,
+ MAX_OUT_HEIGHT_REG3,
+ MAX_OUT_HEIGHT_288,
+ MAX_OUT_HEIGHT_576,
+ MAX_OUT_HEIGHT_720,
+ MAX_OUT_HEIGHT_1080,
+};
+
/*
* VPDMA channel numbers
*/
@@ -134,6 +170,13 @@ enum vpdma_channel {
VPE_CHAN_RGB_OUT,
};
+#define VIP_CHAN_VIP2_OFFSET 70
+#define VIP_CHAN_MULT_PORTB_OFFSET 16
+#define VIP_CHAN_YUV_PORTB_OFFSET 2
+#define VIP_CHAN_RGB_PORTB_OFFSET 1
+
+#define VPDMA_MAX_CHANNELS 256
+
/* flags for VPDMA data descriptors */
#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0)
#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1)
@@ -177,7 +220,17 @@ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
void vpdma_reset_desc_list(struct vpdma_desc_list *list);
void vpdma_free_desc_list(struct vpdma_desc_list *list);
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list);
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list,
+ int list_num);
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num);
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx);
+
+/* VPDMA hardware list funcs */
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv);
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num);
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num);
/* helpers for creating vpdma descriptors */
void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
@@ -186,31 +239,47 @@ void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
struct vpdma_buf *adb);
void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
enum vpdma_channel chan);
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+ int chan_num);
void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
- enum vpdma_channel chan, u32 flags);
+ int max_w, int max_h, enum vpdma_channel chan, u32 flags);
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, int raw_vpdma_chan, u32 flags);
+
void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
enum vpdma_channel chan, int field, u32 flags, int frame_width,
int frame_height, int start_h, int start_v);
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+ int *channels, int size);
/* vpdma list interrupt management */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
- bool enable);
-void vpdma_clear_list_stat(struct vpdma_data *vpdma);
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable);
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+ int list_num);
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num);
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num);
/* vpdma client configuration */
void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
enum vpdma_channel chan);
void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
enum vpdma_frame_start_event fs_event, enum vpdma_channel chan);
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+ u32 width, u32 height);
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+ struct vpdma_data_format *fmt, u32 color);
void vpdma_dump_regs(struct vpdma_data *vpdma);
/* initialize vpdma, passed with VPE's platform device pointer */
-struct vpdma_data *vpdma_create(struct platform_device *pdev,
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
void (*cb)(struct platform_device *pdev));
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index c1a6ce1884f3..72c7f13b4a9d 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -28,6 +28,10 @@
#define VPDMA_MAX_SIZE1 0x34
#define VPDMA_MAX_SIZE2 0x38
#define VPDMA_MAX_SIZE3 0x3c
+#define VPDMA_MAX_SIZE_WIDTH_MASK 0xffff
+#define VPDMA_MAX_SIZE_WIDTH_SHFT 16
+#define VPDMA_MAX_SIZE_HEIGHT_MASK 0xffff
+#define VPDMA_MAX_SIZE_HEIGHT_SHFT 0
/* Interrupts */
#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8)
@@ -39,9 +43,11 @@
#define VPDMA_INT_LIST0_STAT 0x88
#define VPDMA_INT_LIST0_MASK 0x8c
+#define VPDMA_INTX_OFFSET 0x50
+
#define VPDMA_PERFMON(i) (0x200 + i * 4)
-/* VPE specific client registers */
+/* VIP/VPE client registers */
#define VPDMA_DEI_CHROMA1_CSTAT 0x0300
#define VPDMA_DEI_LUMA1_CSTAT 0x0304
#define VPDMA_DEI_LUMA2_CSTAT 0x0308
@@ -50,6 +56,8 @@
#define VPDMA_DEI_CHROMA3_CSTAT 0x0314
#define VPDMA_DEI_MV_IN_CSTAT 0x0330
#define VPDMA_DEI_MV_OUT_CSTAT 0x033c
+#define VPDMA_VIP_LO_Y_CSTAT 0x0388
+#define VPDMA_VIP_LO_UV_CSTAT 0x038c
#define VPDMA_VIP_UP_Y_CSTAT 0x0390
#define VPDMA_VIP_UP_UV_CSTAT 0x0394
#define VPDMA_VPI_CTL_CSTAT 0x03d0
@@ -69,41 +77,63 @@
#define VPDMA_LIST_TYPE_SHFT 16
#define VPDMA_LIST_SIZE_MASK 0xffff
-/* VPDMA data type values for data formats */
+/*
+ * The YUV data type definition below are taken from
+ * both the TRM and i839 Errata information.
+ * Use the correct data type considering byte
+ * reordering of components.
+ *
+ * Also since the single use of "C" in the 422 case
+ * to mean "Cr" (i.e. V component). It was decided
+ * to explicitly label them CR to remove any confusion.
+ * Bear in mind that the type label refer to the memory
+ * packed order (LSB - MSB).
+ */
#define DATA_TYPE_Y444 0x0
#define DATA_TYPE_Y422 0x1
#define DATA_TYPE_Y420 0x2
#define DATA_TYPE_C444 0x4
#define DATA_TYPE_C422 0x5
#define DATA_TYPE_C420 0x6
-#define DATA_TYPE_YC422 0x7
#define DATA_TYPE_YC444 0x8
-#define DATA_TYPE_CY422 0x27
-
-#define DATA_TYPE_RGB16_565 0x0
-#define DATA_TYPE_ARGB_1555 0x1
-#define DATA_TYPE_ARGB_4444 0x2
-#define DATA_TYPE_RGBA_5551 0x3
-#define DATA_TYPE_RGBA_4444 0x4
-#define DATA_TYPE_ARGB24_6666 0x5
-#define DATA_TYPE_RGB24_888 0x6
-#define DATA_TYPE_ARGB32_8888 0x7
-#define DATA_TYPE_RGBA24_6666 0x8
-#define DATA_TYPE_RGBA32_8888 0x9
-#define DATA_TYPE_BGR16_565 0x10
-#define DATA_TYPE_ABGR_1555 0x11
-#define DATA_TYPE_ABGR_4444 0x12
-#define DATA_TYPE_BGRA_5551 0x13
-#define DATA_TYPE_BGRA_4444 0x14
-#define DATA_TYPE_ABGR24_6666 0x15
-#define DATA_TYPE_BGR24_888 0x16
-#define DATA_TYPE_ABGR32_8888 0x17
-#define DATA_TYPE_BGRA24_6666 0x18
-#define DATA_TYPE_BGRA32_8888 0x19
+#define DATA_TYPE_YCB422 0x7
+#define DATA_TYPE_YCR422 0x17
+#define DATA_TYPE_CBY422 0x27
+#define DATA_TYPE_CRY422 0x37
+
+/*
+ * The RGB data type definition below are defined
+ * to follow Errata i819.
+ * The initial values were taken from:
+ * VPDMA_data_type_mapping_v0.2vayu_c.pdf
+ * But some of the ARGB definition appeared to be wrong
+ * in the document also. As they would yield RGBA instead.
+ * They have been corrected based on experimentation.
+ */
+#define DATA_TYPE_RGB16_565 0x10
+#define DATA_TYPE_ARGB_1555 0x13
+#define DATA_TYPE_ARGB_4444 0x14
+#define DATA_TYPE_RGBA_5551 0x11
+#define DATA_TYPE_RGBA_4444 0x12
+#define DATA_TYPE_ARGB24_6666 0x18
+#define DATA_TYPE_RGB24_888 0x16
+#define DATA_TYPE_ARGB32_8888 0x17
+#define DATA_TYPE_RGBA24_6666 0x15
+#define DATA_TYPE_RGBA32_8888 0x19
+#define DATA_TYPE_BGR16_565 0x0
+#define DATA_TYPE_ABGR_1555 0x3
+#define DATA_TYPE_ABGR_4444 0x4
+#define DATA_TYPE_BGRA_5551 0x1
+#define DATA_TYPE_BGRA_4444 0x2
+#define DATA_TYPE_ABGR24_6666 0x8
+#define DATA_TYPE_BGR24_888 0x6
+#define DATA_TYPE_ABGR32_8888 0x7
+#define DATA_TYPE_BGRA24_6666 0x5
+#define DATA_TYPE_BGRA32_8888 0x9
#define DATA_TYPE_MV 0x3
-/* VPDMA channel numbers(only VPE channels for now) */
+/* VPDMA channel numbers, some are common between VIP/VPE and appear twice */
#define VPE_CHAN_NUM_LUMA1_IN 0
#define VPE_CHAN_NUM_CHROMA1_IN 1
#define VPE_CHAN_NUM_LUMA2_IN 2
@@ -112,10 +142,15 @@
#define VPE_CHAN_NUM_CHROMA3_IN 5
#define VPE_CHAN_NUM_MV_IN 12
#define VPE_CHAN_NUM_MV_OUT 15
+#define VIP1_CHAN_NUM_MULT_PORT_A_SRC0 38
+#define VIP1_CHAN_NUM_MULT_ANC_A_SRC0 70
#define VPE_CHAN_NUM_LUMA_OUT 102
#define VPE_CHAN_NUM_CHROMA_OUT 103
+#define VIP1_CHAN_NUM_PORT_A_LUMA 102
+#define VIP1_CHAN_NUM_PORT_A_CHROMA 103
#define VPE_CHAN_NUM_RGB_OUT 106
-
+#define VIP1_CHAN_NUM_PORT_A_RGB 106
+#define VIP1_CHAN_NUM_PORT_B_RGB 107
/*
* a VPDMA address data block payload for a configuration descriptor needs to
* have each sub block length as a multiple of 16 bytes. Therefore, the overall
@@ -203,6 +238,7 @@ struct vpdma_dtd {
#define DTD_V_START_MASK 0xffff
#define DTD_V_START_SHFT 0
+#define DTD_DESC_START_MASK 0xffffffe0
#define DTD_DESC_START_SHIFT 5
#define DTD_WRITE_DESC_MASK 0x01
#define DTD_WRITE_DESC_SHIFT 2
@@ -217,42 +253,6 @@ struct vpdma_dtd {
#define DTD_MAX_HEIGHT_MASK 0x07
#define DTD_MAX_HEIGHT_SHFT 0
-/* max width configurations */
- /* unlimited width */
-#define MAX_OUT_WIDTH_UNLIMITED 0
-/* as specified in max_size1 reg */
-#define MAX_OUT_WIDTH_REG1 1
-/* as specified in max_size2 reg */
-#define MAX_OUT_WIDTH_REG2 2
-/* as specified in max_size3 reg */
-#define MAX_OUT_WIDTH_REG3 3
-/* maximum of 352 pixels as width */
-#define MAX_OUT_WIDTH_352 4
-/* maximum of 768 pixels as width */
-#define MAX_OUT_WIDTH_768 5
-/* maximum of 1280 pixels width */
-#define MAX_OUT_WIDTH_1280 6
-/* maximum of 1920 pixels as width */
-#define MAX_OUT_WIDTH_1920 7
-
-/* max height configurations */
- /* unlimited height */
-#define MAX_OUT_HEIGHT_UNLIMITED 0
-/* as specified in max_size1 reg */
-#define MAX_OUT_HEIGHT_REG1 1
-/* as specified in max_size2 reg */
-#define MAX_OUT_HEIGHT_REG2 2
-/* as specified in max_size3 reg */
-#define MAX_OUT_HEIGHT_REG3 3
-/* maximum of 288 lines as height */
-#define MAX_OUT_HEIGHT_288 4
-/* maximum of 576 lines as height */
-#define MAX_OUT_HEIGHT_576 5
-/* maximum of 720 lines as height */
-#define MAX_OUT_HEIGHT_720 6
-/* maximum of 1080 lines as height */
-#define MAX_OUT_HEIGHT_1080 7
-
static inline u32 dtd_type_ctl_stride(int type, bool notify, int field,
bool one_d, bool even_line_skip, bool odd_line_skip,
int line_stride)
@@ -285,7 +285,7 @@ static inline u32 dtd_frame_width_height(int width, int height)
static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc,
bool drop_data, bool use_desc)
{
- return (addr << DTD_DESC_START_SHIFT) |
+ return (addr & DTD_DESC_START_MASK) |
(write_desc << DTD_WRITE_DESC_SHIFT) |
(drop_data << DTD_DROP_DATA_SHIFT) |
use_desc;
@@ -390,7 +390,7 @@ static inline int dtd_get_frame_height(struct vpdma_dtd *dtd)
static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd)
{
- return dtd->desc_write_addr >> DTD_DESC_START_SHIFT;
+ return dtd->desc_write_addr & DTD_DESC_START_MASK;
}
static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd)
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 0189f7f7cb03..f0156b7759e9 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -44,6 +44,7 @@
#include <media/videobuf2-dma-contig.h>
#include "vpdma.h"
+#include "vpdma_priv.h"
#include "vpe_regs.h"
#include "sc.h"
#include "csc.h"
@@ -53,8 +54,8 @@
/* minimum and maximum frame sizes */
#define MIN_W 32
#define MIN_H 32
-#define MAX_W 1920
-#define MAX_H 1080
+#define MAX_W 2048
+#define MAX_H 1184
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
@@ -141,7 +142,7 @@ struct vpe_dei_regs {
*/
static const struct vpe_dei_regs dei_regs = {
.mdt_spacial_freq_thr_reg = 0x020C0804u,
- .edi_config_reg = 0x0118100Fu,
+ .edi_config_reg = 0x0118100Cu,
.edi_lut_reg0 = 0x08040200u,
.edi_lut_reg1 = 0x1010100Cu,
.edi_lut_reg2 = 0x10101010u,
@@ -236,7 +237,7 @@ struct vpe_fmt {
static struct vpe_fmt vpe_formats[] = {
{
- .name = "YUV 422 co-planar",
+ .name = "NV16 YUV 422 co-planar",
.fourcc = V4L2_PIX_FMT_NV16,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 1,
@@ -245,7 +246,7 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "YUV 420 co-planar",
+ .name = "NV12 YUV 420 co-planar",
.fourcc = V4L2_PIX_FMT_NV12,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 1,
@@ -258,7 +259,7 @@ static struct vpe_fmt vpe_formats[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
- .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
},
},
{
@@ -266,7 +267,7 @@ static struct vpe_fmt vpe_formats[] = {
.fourcc = V4L2_PIX_FMT_UYVY,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
- .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
},
},
{
@@ -301,6 +302,22 @@ static struct vpe_fmt vpe_formats[] = {
.vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
},
},
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB565],
+ },
+ },
+ {
+ .name = "RGB5551",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGBA16_5551],
+ },
+ },
};
/*
@@ -310,6 +327,7 @@ static struct vpe_fmt vpe_formats[] = {
struct vpe_q_data {
unsigned int width; /* frame width */
unsigned int height; /* frame height */
+ unsigned int nplanes; /* Current number of planes */
unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
enum v4l2_colorspace colorspace;
enum v4l2_field field; /* supported field value */
@@ -320,9 +338,13 @@ struct vpe_q_data {
};
/* vpe_q_data flag bits */
-#define Q_DATA_FRAME_1D (1 << 0)
-#define Q_DATA_MODE_TILED (1 << 1)
-#define Q_DATA_INTERLACED (1 << 2)
+#define Q_DATA_FRAME_1D BIT(0)
+#define Q_DATA_MODE_TILED BIT(1)
+#define Q_DATA_INTERLACED_ALTERNATE BIT(2)
+#define Q_DATA_INTERLACED_SEQ_TB BIT(3)
+
+#define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
+ Q_DATA_INTERLACED_SEQ_TB)
enum {
Q_DATA_SRC = 0,
@@ -362,6 +384,7 @@ struct vpe_dev {
void __iomem *base;
struct resource *res;
+ struct vpdma_data vpdma_data;
struct vpdma_data *vpdma; /* vpdma data handle */
struct sc_data *sc; /* scaler data handle */
struct csc_data *csc; /* csc data handle */
@@ -416,7 +439,7 @@ static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &ctx->q_data[Q_DATA_DST];
default:
- BUG();
+ return NULL;
}
return NULL;
}
@@ -584,7 +607,10 @@ static void free_vbs(struct vpe_ctx *ctx)
spin_lock_irqsave(&dev->lock, flags);
if (ctx->src_vbs[2]) {
v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
- v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+ if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+ ctx->src_vbs[2] = NULL;
+ ctx->src_vbs[1] = NULL;
}
spin_unlock_irqrestore(&dev->lock, flags);
}
@@ -638,7 +664,7 @@ static void set_us_coefficients(struct vpe_ctx *ctx)
cp = &us_coeffs[0].anchor_fid0_c0;
- if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
+ if (s_q_data->flags & Q_IS_INTERLACED) /* interlaced */
cp += sizeof(us_coeffs[0]) / sizeof(*cp);
end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
@@ -655,14 +681,13 @@ static void set_us_coefficients(struct vpe_ctx *ctx)
/*
* Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
*/
-static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
+static void set_cfg_modes(struct vpe_ctx *ctx)
{
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
u32 *us1_reg0 = &mmr_adb->us1_regs[0];
u32 *us2_reg0 = &mmr_adb->us2_regs[0];
u32 *us3_reg0 = &mmr_adb->us3_regs[0];
- int line_mode = 1;
int cfg_mode = 1;
/*
@@ -670,15 +695,24 @@ static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
* Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
*/
- if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12)
cfg_mode = 0;
- line_mode = 0; /* double lines to line buffer */
- }
write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ ctx->load_mmrs = true;
+}
+
+static void set_line_modes(struct vpe_ctx *ctx)
+{
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
+ int line_mode = 1;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ line_mode = 0; /* double lines to line buffer */
+
/* regs for now */
vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
@@ -703,8 +737,6 @@ static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
/* frame start for MV in client */
vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
VPE_CHAN_MV_IN);
-
- ctx->load_mmrs = true;
}
/*
@@ -727,9 +759,11 @@ static void set_dst_registers(struct vpe_ctx *ctx)
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
u32 val = 0;
- if (clrspc == V4L2_COLORSPACE_SRGB)
+ if (clrspc == V4L2_COLORSPACE_SRGB) {
val |= VPE_RGB_OUT_SELECT;
- else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
+ vpdma_set_bg_color(ctx->dev->vpdma,
+ (struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
+ } else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
val |= VPE_COLOR_SEPARATE_422;
/*
@@ -765,8 +799,7 @@ static void set_dei_regs(struct vpe_ctx *ctx)
* for both progressive and interlace content in interlace bypass mode.
* It has been recommended not to use progressive bypass mode.
*/
- if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
- !(s_q_data->flags & Q_DATA_INTERLACED)) {
+ if (!(s_q_data->flags & Q_IS_INTERLACED) || !ctx->deinterlacing) {
deinterlace = false;
val = VPE_DEI_INTERLACE_BYPASS;
}
@@ -798,6 +831,23 @@ static void set_dei_shadow_registers(struct vpe_ctx *ctx)
ctx->load_mmrs = true;
}
+static void config_edi_input_mode(struct vpe_ctx *ctx, int mode)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *edi_config_reg = &mmr_adb->dei_regs[3];
+
+ if (mode & 0x2)
+ write_field(edi_config_reg, 1, 1, 2); /* EDI_ENABLE_3D */
+
+ if (mode & 0x3)
+ write_field(edi_config_reg, 1, 1, 3); /* EDI_CHROMA_3D */
+
+ write_field(edi_config_reg, mode, VPE_EDI_INP_MODE_MASK,
+ VPE_EDI_INP_MODE_SHIFT);
+
+ ctx->load_mmrs = true;
+}
+
/*
* Set the shadow registers whose values are modified when either the
* source or destination format is changed.
@@ -817,8 +867,8 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
ctx->sequence = 0;
ctx->field = V4L2_FIELD_TOP;
- if ((s_q_data->flags & Q_DATA_INTERLACED) &&
- !(d_q_data->flags & Q_DATA_INTERLACED)) {
+ if ((s_q_data->flags & Q_IS_INTERLACED) &&
+ !(d_q_data->flags & Q_IS_INTERLACED)) {
int bytes_per_line;
const struct vpdma_data_format *mv =
&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
@@ -842,12 +892,13 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
}
free_vbs(ctx);
+ ctx->src_vbs[2] = ctx->src_vbs[1] = ctx->src_vbs[0] = NULL;
ret = realloc_mv_buffers(ctx, mv_buf_size);
if (ret)
return ret;
- set_cfg_and_line_modes(ctx);
+ set_cfg_modes(ctx);
set_dei_regs(ctx);
csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
@@ -881,15 +932,14 @@ static struct vpe_ctx *file2ctx(struct file *file)
static int job_ready(void *priv)
{
struct vpe_ctx *ctx = priv;
- int needed = ctx->bufs_per_job;
-
- if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
- needed += 2; /* need additional two most recent fields */
-
- if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed)
- return 0;
- if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed)
+ /*
+ * This check is needed as this might be called directly from driver
+ * When called by m2m framework, this will always satisfy, but when
+ * called from vpe_irq, this might fail. (src stream with zero buffers)
+ */
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 ||
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0)
return 0;
return 1;
@@ -993,22 +1043,38 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
int mv_buf_selector = !ctx->src_mv_buf_selector;
dma_addr_t dma_addr;
u32 flags = 0;
+ u32 offset = 0;
if (port == VPE_PORT_MV_OUT) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ q_data = &ctx->q_data[Q_DATA_SRC];
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
vpdma_fmt = fmt->vpdma_fmt[plane];
- dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /*
+ * If we are using a single plane buffer and
+ * we need to set a separate vpdma chroma channel.
+ */
+ if (q_data->nplanes == 1 && plane) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Compute required offset */
+ offset = q_data->bytesperline[0] * q_data->height;
+ } else {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /* Use address as is, no offset */
+ offset = 0;
+ }
if (!dma_addr) {
vpe_err(ctx->dev,
"acquiring output buffer(%d) dma_addr failed\n",
port);
return;
}
+ /* Apply the offset */
+ dma_addr += offset;
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1016,8 +1082,12 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
if (q_data->flags & Q_DATA_MODE_TILED)
flags |= VPDMA_DATA_MODE_TILED;
+ vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
+ MAX_W, MAX_H);
+
vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
- vpdma_fmt, dma_addr, p_data->channel, flags);
+ vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
+ MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
static void add_in_dtd(struct vpe_ctx *ctx, int port)
@@ -1033,6 +1103,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
int frame_width, frame_height;
dma_addr_t dma_addr;
u32 flags = 0;
+ u32 offset = 0;
if (port == VPE_PORT_MV_IN) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
@@ -1042,14 +1113,49 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
int plane = fmt->coplanar ? p_data->vb_part : 0;
vpdma_fmt = fmt->vpdma_fmt[plane];
-
- dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /*
+ * If we are using a single plane buffer and
+ * we need to set a separate vpdma chroma channel.
+ */
+ if (q_data->nplanes == 1 && plane) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Compute required offset */
+ offset = q_data->bytesperline[0] * q_data->height;
+ } else {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /* Use address as is, no offset */
+ offset = 0;
+ }
if (!dma_addr) {
vpe_err(ctx->dev,
- "acquiring input buffer(%d) dma_addr failed\n",
+ "acquiring output buffer(%d) dma_addr failed\n",
port);
return;
}
+ /* Apply the offset */
+ dma_addr += offset;
+
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
+ /*
+ * Use top or bottom field from same vb alternately
+ * f,f-1,f-2 = TBT when seq is even
+ * f,f-1,f-2 = BTB when seq is odd
+ */
+ field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+
+ if (field) {
+ /*
+ * bottom field of a SEQ_TB buffer
+ * Skip the top field data by
+ */
+ int height = q_data->height / 2;
+ int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
+ 1 : (vpdma_fmt->depth >> 3);
+ if (plane)
+ height /= 2;
+ dma_addr += q_data->width * height * bpp;
+ }
+ }
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1077,7 +1183,7 @@ static void enable_irqs(struct vpe_ctx *ctx)
write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
VPE_DS1_UV_ERROR_INT);
- vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true);
}
static void disable_irqs(struct vpe_ctx *ctx)
@@ -1085,7 +1191,7 @@ static void disable_irqs(struct vpe_ctx *ctx)
write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
- vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false);
}
/* device_run() - prepares and starts the device
@@ -1098,23 +1204,49 @@ static void device_run(void *priv)
struct vpe_ctx *ctx = priv;
struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
- if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
- ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- WARN_ON(ctx->src_vbs[2] == NULL);
- ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- WARN_ON(ctx->src_vbs[1] == NULL);
+ if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
+ ctx->sequence % 2 == 0) {
+ /* When using SEQ_TB buffers, When using it first time,
+ * No need to remove the buffer as the next field is present
+ * in the same buffer. (so that job_ready won't fail)
+ * It will be removed when using bottom field
+ */
+ ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
+ } else {
+ ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
}
- ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- WARN_ON(ctx->src_vbs[0] == NULL);
ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->dst_vb == NULL);
+ if (ctx->deinterlacing) {
+
+ if (ctx->src_vbs[2] == NULL) {
+ ctx->src_vbs[2] = ctx->src_vbs[0];
+ WARN_ON(ctx->src_vbs[2] == NULL);
+ ctx->src_vbs[1] = ctx->src_vbs[0];
+ WARN_ON(ctx->src_vbs[1] == NULL);
+ }
+
+ /*
+ * we have output the first 2 frames through line average, we
+ * now switch to EDI de-interlacer
+ */
+ if (ctx->sequence == 2)
+ config_edi_input_mode(ctx, 0x3); /* EDI (Y + UV) */
+ }
+
/* config descriptors */
if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
+
+ set_line_modes(ctx);
+
ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
ctx->load_mmrs = false;
}
@@ -1202,7 +1334,7 @@ static void device_run(void *priv)
enable_irqs(ctx);
vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
- vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
+ vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0);
}
static void dei_error(struct vpe_ctx *ctx)
@@ -1225,6 +1357,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
struct vb2_v4l2_buffer *s_vb, *d_vb;
unsigned long flags;
u32 irqst0, irqst1;
+ bool list_complete = false;
irqst0 = read_reg(dev, VPE_INT0_STATUS0);
if (irqst0) {
@@ -1257,17 +1390,24 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
if (irqst0) {
if (irqst0 & VPE_INT0_LIST0_COMPLETE)
- vpdma_clear_list_stat(ctx->dev->vpdma);
+ vpdma_clear_list_stat(ctx->dev->vpdma, 0, 0);
irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
+ list_complete = true;
}
if (irqst0 | irqst1) {
- dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
- "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
+ dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
irqst0, irqst1);
}
+ /*
+ * Setup next operation only when list complete IRQ occurs
+ * otherwise, skip the following code
+ */
+ if (!list_complete)
+ goto handled;
+
disable_irqs(ctx);
vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
@@ -1295,7 +1435,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
- if (d_q_data->flags & Q_DATA_INTERLACED) {
+ if (d_q_data->flags & Q_IS_INTERLACED) {
d_vb->field = ctx->field;
if (ctx->field == V4L2_FIELD_BOTTOM) {
ctx->sequence++;
@@ -1309,12 +1449,28 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->sequence++;
}
- if (ctx->deinterlacing)
- s_vb = ctx->src_vbs[2];
+ if (ctx->deinterlacing) {
+ /*
+ * Allow source buffer to be dequeued only if it won't be used
+ * in the next iteration. All vbs are initialized to first
+ * buffer and we are shifting buffers every iteration, for the
+ * first two iterations, no buffer will be dequeued.
+ * This ensures that driver will keep (n-2)th (n-1)th and (n)th
+ * field when deinterlacing is enabled
+ */
+ if (ctx->src_vbs[2] != ctx->src_vbs[1])
+ s_vb = ctx->src_vbs[2];
+ else
+ s_vb = NULL;
+ }
spin_lock_irqsave(&dev->lock, flags);
- v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+
+ if (s_vb)
+ v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+
v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
+
spin_unlock_irqrestore(&dev->lock, flags);
if (ctx->deinterlacing) {
@@ -1322,8 +1478,16 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->src_vbs[1] = ctx->src_vbs[0];
}
+ /*
+ * Since the vb2_buf_done has already been called fir therse
+ * buffer we can now NULL them out so that we won't try
+ * to clean out stray pointer later on.
+ */
+ ctx->src_vbs[0] = NULL;
+ ctx->dst_vb = NULL;
+
ctx->bufs_completed++;
- if (ctx->bufs_completed < ctx->bufs_per_job) {
+ if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
device_run(ctx);
goto handled;
}
@@ -1414,7 +1578,7 @@ static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix->colorspace = s_q_data->colorspace;
}
- pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
+ pix->num_planes = q_data->nplanes;
for (i = 0; i < pix->num_planes; i++) {
pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
@@ -1430,7 +1594,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *plane_fmt;
unsigned int w_align;
- int i, depth, depth_bytes;
+ int i, depth, depth_bytes, height;
if (!fmt || !(fmt->types & type)) {
vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
@@ -1438,7 +1602,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
return -EINVAL;
}
- if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
+ if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
+ && pix->field != V4L2_FIELD_SEQ_TB)
pix->field = V4L2_FIELD_NONE;
depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
@@ -1450,28 +1615,53 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
*/
depth_bytes = depth >> 3;
- if (depth_bytes == 3)
+ if (depth_bytes == 3) {
/*
* if bpp is 3(as in some RGB formats), the pixel width doesn't
* really help in ensuring line stride is 16 byte aligned
*/
w_align = 4;
- else
+ } else {
/*
* for the remainder bpp(4, 2 and 1), the pixel width alignment
* can ensure a line stride alignment of 16 bytes. For example,
* if bpp is 2, then the line stride can be 16 byte aligned if
* the width is 8 byte aligned
*/
- w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
+
+ /*
+ * HACK: using order_base_2() here causes lots of asm output
+ * errors with smatch, on i386:
+ * ./arch/x86/include/asm/bitops.h:457:22:
+ * warning: asm output is not an lvalue
+ * Perhaps some gcc optimization is doing the wrong thing
+ * there.
+ * Let's get rid of them by doing the calculus on two steps
+ */
+ w_align = roundup_pow_of_two(VPDMA_DESC_ALIGN / depth_bytes);
+ w_align = ilog2(w_align);
+ }
v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
- pix->num_planes = fmt->coplanar ? 2 : 1;
+ if (!pix->num_planes)
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ else if (pix->num_planes > 1 && !fmt->coplanar)
+ pix->num_planes = 1;
+
pix->pixelformat = fmt->fourcc;
+ /*
+ * For the actual image parameters, we need to consider the field
+ * height of the image for SEQ_TB buffers.
+ */
+ if (pix->field == V4L2_FIELD_SEQ_TB)
+ height = pix->height / 2;
+ else
+ height = pix->height;
+
if (!pix->colorspace) {
if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
@@ -1479,7 +1669,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
fmt->fourcc == V4L2_PIX_FMT_BGR32) {
pix->colorspace = V4L2_COLORSPACE_SRGB;
} else {
- if (pix->height > 1280) /* HD */
+ if (height > 1280) /* HD */
pix->colorspace = V4L2_COLORSPACE_REC709;
else /* SD */
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -1496,6 +1686,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
else
plane_fmt->bytesperline = pix->width;
+ if (pix->num_planes == 1 && fmt->coplanar)
+ depth += fmt->vpdma_fmt[VPE_CHROMA]->depth;
plane_fmt->sizeimage =
(pix->height * pix->width * depth) >> 3;
@@ -1542,6 +1734,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
q_data->height = pix->height;
q_data->colorspace = pix->colorspace;
q_data->field = pix->field;
+ q_data->nplanes = pix->num_planes;
for (i = 0; i < pix->num_planes; i++) {
plane_fmt = &pix->plane_fmt[i];
@@ -1556,14 +1749,20 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
q_data->c_rect.height = q_data->height;
if (q_data->field == V4L2_FIELD_ALTERNATE)
- q_data->flags |= Q_DATA_INTERLACED;
+ q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
+ else if (q_data->field == V4L2_FIELD_SEQ_TB)
+ q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
else
- q_data->flags &= ~Q_DATA_INTERLACED;
+ q_data->flags &= ~Q_IS_INTERLACED;
+
+ /* the crop height is halved for the case of SEQ_TB buffers */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ q_data->c_rect.height /= 2;
vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
q_data->bytesperline[VPE_LUMA]);
- if (q_data->fmt->coplanar)
+ if (q_data->nplanes == 2)
vpe_dbg(ctx->dev, " bpl_uv %d\n",
q_data->bytesperline[VPE_CHROMA]);
@@ -1594,6 +1793,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
{
struct vpe_q_data *q_data;
+ int height;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
@@ -1628,13 +1828,22 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
return -EINVAL;
}
+ /*
+ * For SEQ_TB buffers, crop height should be less than the height of
+ * the field height, not the buffer height
+ */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ height = q_data->height / 2;
+ else
+ height = q_data->height;
+
if (s->r.top < 0 || s->r.left < 0) {
vpe_err(ctx->dev, "negative values for top and left\n");
s->r.top = s->r.left = 0;
}
v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
- &s->r.height, MIN_H, q_data->height, H_ALIGN, S_ALIGN);
+ &s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
/* adjust left/top if cropping rectangle is out of bounds */
if (s->r.left + s->r.width > q_data->width)
@@ -1784,6 +1993,7 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
@@ -1804,14 +2014,14 @@ static int vpe_queue_setup(struct vb2_queue *vq,
q_data = get_q_data(ctx, vq->type);
- *nplanes = q_data->fmt->coplanar ? 2 : 1;
+ *nplanes = q_data->nplanes;
for (i = 0; i < *nplanes; i++)
sizes[i] = q_data->sizeimage[i];
vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
sizes[VPE_LUMA]);
- if (q_data->fmt->coplanar)
+ if (q_data->nplanes == 2)
vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
return 0;
@@ -1827,14 +2037,15 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
- num_planes = q_data->fmt->coplanar ? 2 : 1;
+ num_planes = q_data->nplanes;
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- if (!(q_data->flags & Q_DATA_INTERLACED)) {
+ if (!(q_data->flags & Q_IS_INTERLACED)) {
vbuf->field = V4L2_FIELD_NONE;
} else {
if (vbuf->field != V4L2_FIELD_TOP &&
- vbuf->field != V4L2_FIELD_BOTTOM)
+ vbuf->field != V4L2_FIELD_BOTTOM &&
+ vbuf->field != V4L2_FIELD_SEQ_TB)
return -EINVAL;
}
}
@@ -1863,9 +2074,98 @@ static void vpe_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static int check_srcdst_sizes(struct vpe_ctx *ctx)
+{
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ unsigned int src_w = s_q_data->c_rect.width;
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int dst_w = d_q_data->c_rect.width;
+ unsigned int dst_h = d_q_data->c_rect.height;
+
+ if (src_w == dst_w && src_h == dst_h)
+ return 0;
+
+ if (src_h <= SC_MAX_PIXEL_HEIGHT &&
+ src_w <= SC_MAX_PIXEL_WIDTH &&
+ dst_h <= SC_MAX_PIXEL_HEIGHT &&
+ dst_w <= SC_MAX_PIXEL_WIDTH)
+ return 0;
+
+ return -1;
+}
+
+static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vb)
+ break;
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+ v4l2_m2m_buf_done(vb, state);
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ }
+
+ /*
+ * Cleanup the in-transit vb2 buffers that have been
+ * removed from their respective queue already but for
+ * which procecessing has not been completed yet.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+
+ if (ctx->src_vbs[2])
+ v4l2_m2m_buf_done(ctx->src_vbs[2], state);
+
+ if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[1], state);
+
+ if (ctx->src_vbs[0] &&
+ (ctx->src_vbs[0] != ctx->src_vbs[1]) &&
+ (ctx->src_vbs[0] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[0], state);
+
+ ctx->src_vbs[2] = NULL;
+ ctx->src_vbs[1] = NULL;
+ ctx->src_vbs[0] = NULL;
+
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ } else {
+ if (ctx->dst_vb) {
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+
+ v4l2_m2m_buf_done(ctx->dst_vb, state);
+ ctx->dst_vb = NULL;
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ }
+ }
+}
+
static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
{
- /* currently we do nothing here */
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+
+ /* Check any of the size exceed maximum scaling sizes */
+ if (check_srcdst_sizes(ctx)) {
+ vpe_err(ctx->dev,
+ "Conversion setup failed, check source and destination parameters\n"
+ );
+ vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED);
+ return -EINVAL;
+ }
+
+ if (ctx->deinterlacing)
+ config_edi_input_mode(ctx, 0x0);
+
+ if (ctx->sequence != 0)
+ set_srcdst_params(ctx);
return 0;
}
@@ -1876,6 +2176,8 @@ static void vpe_stop_streaming(struct vb2_queue *q)
vpe_dump_regs(ctx->dev);
vpdma_dump_regs(ctx->dev->vpdma);
+
+ vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR);
}
static const struct vb2_ops vpe_qops = {
@@ -1995,6 +2297,7 @@ static int vpe_open(struct file *file)
s_q_data->fmt = &vpe_formats[2];
s_q_data->width = 1920;
s_q_data->height = 1080;
+ s_q_data->nplanes = 1;
s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
@@ -2068,11 +2371,13 @@ static int vpe_release(struct file *file)
vpe_dbg(dev, "releasing instance %p\n", ctx);
mutex_lock(&dev->dev_mutex);
- free_vbs(ctx);
free_mv_buffers(ctx);
vpdma_free_desc_list(&ctx->desc_list);
vpdma_free_desc_buf(&ctx->mmr_adb);
+ vpdma_free_desc_buf(&ctx->sc_coeff_v);
+ vpdma_free_desc_buf(&ctx->sc_coeff_h);
+
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
@@ -2235,23 +2540,22 @@ static int vpe_probe(struct platform_device *pdev)
vpe_top_vpdma_reset(dev);
- dev->sc = sc_create(pdev);
+ dev->sc = sc_create(pdev, "sc");
if (IS_ERR(dev->sc)) {
ret = PTR_ERR(dev->sc);
goto runtime_put;
}
- dev->csc = csc_create(pdev);
+ dev->csc = csc_create(pdev, "csc");
if (IS_ERR(dev->csc)) {
ret = PTR_ERR(dev->csc);
goto runtime_put;
}
- dev->vpdma = vpdma_create(pdev, vpe_fw_cb);
- if (IS_ERR(dev->vpdma)) {
- ret = PTR_ERR(dev->vpdma);
+ dev->vpdma = &dev->vpdma_data;
+ ret = vpdma_create(pdev, dev->vpdma, vpe_fw_cb);
+ if (ret)
goto runtime_put;
- }
return 0;
@@ -2290,6 +2594,7 @@ static const struct of_device_id vpe_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, vpe_of_match);
#endif
static struct platform_driver vpe_pdrv = {
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 7ca12deba89c..e16f70a5df1d 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -39,15 +39,12 @@ MODULE_LICENSE("GPL");
static bool flip_image;
module_param(flip_image, bool, 0444);
MODULE_PARM_DESC(flip_image,
- "If set, the sensor will be instructed to flip the image "
- "vertically.");
+ "If set, the sensor will be instructed to flip the image vertically.");
static bool override_serial;
module_param(override_serial, bool, 0444);
MODULE_PARM_DESC(override_serial,
- "The camera driver will normally refuse to load if "
- "the XO 1.5 serial port is enabled. Set this option "
- "to force-enable the camera.");
+ "The camera driver will normally refuse to load if the XO 1.5 serial port is enabled. Set this option to force-enable the camera.");
/*
* The structure describing our camera.
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index 8e6918c5c87c..db0dd19d227a 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -25,7 +25,7 @@ config VIDEO_VIVID
config VIDEO_VIVID_CEC
bool "Enable CEC emulation support"
- depends on VIDEO_VIVID && MEDIA_CEC
+ depends on VIDEO_VIVID && MEDIA_CEC_SUPPORT
---help---
When selected the vivid module will emulate the optional
HDMI CEC feature.
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index f9f878b8e0a7..cb4933592a3c 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -216,7 +216,6 @@ static const struct cec_adap_ops vivid_cec_adap_ops = {
struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
- struct device *parent,
bool is_source)
{
char name[sizeof(dev->vid_out_dev.name) + 2];
@@ -227,5 +226,5 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
idx);
return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
- name, caps, 1, parent);
+ name, caps, 1);
}
diff --git a/drivers/media/platform/vivid/vivid-cec.h b/drivers/media/platform/vivid/vivid-cec.h
index 97892afa6b3b..3926b1422777 100644
--- a/drivers/media/platform/vivid/vivid-cec.h
+++ b/drivers/media/platform/vivid/vivid-cec.h
@@ -20,7 +20,6 @@
#ifdef CONFIG_VIDEO_VIVID_CEC
struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
- struct device *parent,
bool is_source);
void vivid_cec_bus_free_work(struct vivid_dev *dev);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 5464fefbaab9..51e37812ec98 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -183,7 +183,7 @@ static const u8 vivid_hdmi_edid[256] = {
0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
- 0x0c, 0x00, 0x10, 0x00, 0x00, 0x78, 0x21, 0x00,
+ 0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
@@ -194,7 +194,7 @@ static const u8 vivid_hdmi_edid[256] = {
0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
};
static int vidioc_querycap(struct file *file, void *priv,
@@ -1167,12 +1167,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (in_type_counter[HDMI]) {
struct cec_adapter *adap;
- adap = vivid_cec_alloc_adap(dev, 0, &pdev->dev, false);
+ adap = vivid_cec_alloc_adap(dev, 0, false);
ret = PTR_ERR_OR_ZERO(adap);
if (ret < 0)
goto unreg_dev;
dev->cec_rx_adap = adap;
- ret = cec_register_adapter(adap);
+ ret = cec_register_adapter(adap, &pdev->dev);
if (ret < 0) {
cec_delete_adapter(adap);
dev->cec_rx_adap = NULL;
@@ -1222,13 +1222,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->output_type[i] != HDMI)
continue;
dev->cec_output2bus_map[i] = bus_cnt;
- adap = vivid_cec_alloc_adap(dev, bus_cnt,
- &pdev->dev, true);
+ adap = vivid_cec_alloc_adap(dev, bus_cnt, true);
ret = PTR_ERR_OR_ZERO(adap);
if (ret < 0)
goto unreg_dev;
dev->cec_tx_adap[bus_cnt] = adap;
- ret = cec_register_adapter(adap);
+ ret = cec_register_adapter(adap, &pdev->dev);
if (ret < 0) {
cec_delete_adapter(adap);
dev->cec_tx_adap[bus_cnt] = NULL;
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index a7daa40d0a49..5cdf95bdc4d1 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -80,7 +80,7 @@ extern unsigned vivid_debug;
struct vivid_fmt {
u32 fourcc; /* v4l2 format id */
- bool is_yuv;
+ enum tgp_color_enc color_enc;
bool can_do_overlay;
u8 vdownsampling[TPG_MAX_PLANES];
u32 alpha_mask;
@@ -346,6 +346,7 @@ struct vivid_dev {
struct v4l2_dv_timings dv_timings_out;
u32 colorspace_out;
u32 ycbcr_enc_out;
+ u32 hsv_enc_out;
u32 quantization_out;
u32 xfer_func_out;
u32 service_set_out;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index aceb38d9f7e7..34731f71cc00 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -79,6 +79,7 @@
#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 40)
#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 41)
#define VIVID_CID_REDUCED_FPS (VIVID_CID_VIVID_BASE + 42)
+#define VIVID_CID_HSV_ENC (VIVID_CID_VIVID_BASE + 43)
#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60)
#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61)
@@ -378,6 +379,14 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
vivid_send_source_change(dev, HDMI);
vivid_send_source_change(dev, WEBCAM);
break;
+ case VIVID_CID_HSV_ENC:
+ tpg_s_hsv_enc(&dev->tpg, ctrl->val ? V4L2_HSV_ENC_256 :
+ V4L2_HSV_ENC_180);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
case VIVID_CID_QUANTIZATION:
tpg_s_quantization(&dev->tpg, ctrl->val);
vivid_send_source_change(dev, TV);
@@ -778,6 +787,21 @@ static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
.qmenu = vivid_ctrl_ycbcr_enc_strings,
};
+static const char * const vivid_ctrl_hsv_enc_strings[] = {
+ "Hue 0-179",
+ "Hue 0-256",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hsv_enc = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HSV_ENC,
+ .name = "HSV Encoding",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_hsv_enc_strings) - 2,
+ .qmenu = vivid_ctrl_hsv_enc_strings,
+};
+
static const char * const vivid_ctrl_quantization_strings[] = {
"Default",
"Full Range",
@@ -1454,6 +1478,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
&vivid_ctrl_colorspace, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_xfer_func, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_ycbcr_enc, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hsv_enc, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_quantization, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL);
}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index d5c84ecf2027..c52dd8787794 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -510,6 +510,13 @@ static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
return dev->ycbcr_enc_out;
}
+static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_hsv_enc(&dev->tpg);
+ return dev->hsv_enc_out;
+}
+
static unsigned vivid_quantization_cap(struct vivid_dev *dev)
{
if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
@@ -530,7 +537,10 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
mp->pixelformat = dev->fmt_cap->fourcc;
mp->colorspace = vivid_colorspace_cap(dev);
mp->xfer_func = vivid_xfer_func_cap(dev);
- mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
+ mp->hsv_enc = vivid_hsv_enc_cap(dev);
+ else
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
mp->quantization = vivid_quantization_cap(dev);
mp->num_planes = dev->fmt_cap->buffers;
for (p = 0; p < mp->num_planes; p++) {
@@ -618,7 +628,10 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
mp->colorspace = vivid_colorspace_cap(dev);
- mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ if (fmt->color_enc == TGP_COLOR_ENC_HSV)
+ mp->hsv_enc = vivid_hsv_enc_cap(dev);
+ else
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
mp->xfer_func = vivid_xfer_func_cap(dev);
mp->quantization = vivid_quantization_cap(dev);
memset(mp->reserved, 0, sizeof(mp->reserved));
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index fcda3ae4e6b0..5fc010f6ce67 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -48,7 +48,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
.data_offset = { PLANE0_DATA_OFFSET },
@@ -57,7 +57,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_UYVY,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
},
@@ -65,7 +65,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVYU,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
},
@@ -73,7 +73,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_VYUY,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
},
@@ -81,7 +81,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV422P,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 1,
},
@@ -89,7 +89,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV420,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 1,
},
@@ -97,7 +97,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU420,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 1,
},
@@ -105,7 +105,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV12,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -113,7 +113,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV21,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -121,7 +121,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV16,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -129,7 +129,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV61,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -137,7 +137,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV24,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -145,7 +145,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV42,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -184,7 +184,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_GREY,
.vdownsampling = { 1 },
.bit_depth = { 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_LUMA,
.planes = 1,
.buffers = 1,
},
@@ -192,7 +192,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_Y16,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_LUMA,
.planes = 1,
.buffers = 1,
},
@@ -200,7 +200,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_Y16_BE,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_LUMA,
.planes = 1,
.buffers = 1,
},
@@ -445,6 +445,22 @@ struct vivid_fmt vivid_formats[] = {
.planes = 1,
.buffers = 1,
},
+ {
+ .fourcc = V4L2_PIX_FMT_HSV24, /* HSV 24bits */
+ .color_enc = TGP_COLOR_ENC_HSV,
+ .vdownsampling = { 1 },
+ .bit_depth = { 24 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HSV32, /* HSV 32bits */
+ .color_enc = TGP_COLOR_ENC_HSV,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
/* Multiplanar formats */
@@ -452,7 +468,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV16M,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
.data_offset = { PLANE0_DATA_OFFSET, 0 },
@@ -461,7 +477,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV61M,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
.data_offset = { 0, PLANE0_DATA_OFFSET },
@@ -470,7 +486,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV420M,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -478,7 +494,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU420M,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -486,7 +502,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV12M,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
},
@@ -494,7 +510,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV21M,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
},
@@ -502,7 +518,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV422M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -510,7 +526,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU422M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -518,7 +534,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV444M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -526,7 +542,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU444M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -616,6 +632,7 @@ void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
mp->field = pix->field;
mp->colorspace = pix->colorspace;
mp->xfer_func = pix->xfer_func;
+ /* Also copies hsv_enc */
mp->ycbcr_enc = pix->ycbcr_enc;
mp->quantization = pix->quantization;
mp->num_planes = 1;
@@ -645,6 +662,7 @@ int fmt_sp2mp_func(struct file *file, void *priv,
pix->field = mp->field;
pix->colorspace = mp->colorspace;
pix->xfer_func = mp->xfer_func;
+ /* Also copies hsv_enc */
pix->ycbcr_enc = mp->ycbcr_enc;
pix->quantization = mp->quantization;
pix->sizeimage = ppix->sizeimage;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index dd609eea4753..7ba52ee98371 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -256,6 +256,7 @@ void vivid_update_format_out(struct vivid_dev *dev)
}
dev->xfer_func_out = V4L2_XFER_FUNC_DEFAULT;
dev->ycbcr_enc_out = V4L2_YCBCR_ENC_DEFAULT;
+ dev->hsv_enc_out = V4L2_HSV_ENC_180;
dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
dev->compose_out = dev->sink_rect;
dev->compose_bounds_out = dev->sink_rect;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 57c713a4e1df..aa237b48ad55 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -770,6 +770,7 @@ static const struct of_device_id vsp1_of_match[] = {
{ .compatible = "renesas,vsp2" },
{ },
};
+MODULE_DEVICE_TABLE(of, vsp1_of_match);
static struct platform_driver vsp1_platform_driver = {
.probe = vsp1_probe,
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 756ca4ea7668..280ba0804699 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -78,6 +78,14 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 66e4d7ea31d6..04104ef28fb5 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -37,6 +37,7 @@ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
{
static const unsigned int codes[] = {
MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
@@ -78,6 +79,7 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index d351b9c768d2..41e8b096dab8 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -124,6 +124,11 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
pix->pixelformat = info->fourcc;
pix->colorspace = V4L2_COLORSPACE_SRGB;
pix->field = V4L2_FIELD_NONE;
+
+ if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
+ info->fourcc == V4L2_PIX_FMT_HSV32)
+ pix->hsv_enc = V4L2_HSV_ENC_256;
+
memset(pix->reserved, 0, sizeof(pix->reserved));
/* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index cff1eb144a5c..ca051ccbc3e4 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -67,14 +67,10 @@ module_param(probe, bool, 0444);
MODULE_PARM_DESC(probe, "Enable automatic device probing.");
module_param(hardmute, bool, 0644);
-MODULE_PARM_DESC(hardmute, "Enable 'hard muting' by shutting down PLL, may "
- "reduce static noise.");
+MODULE_PARM_DESC(hardmute, "Enable 'hard muting' by shutting down PLL, may reduce static noise.");
module_param_array(io, int, NULL, 0444);
-MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic "
- "probing is disabled or fails. The most common I/O ports are: 0x20c "
- "0x30c, 0x24c or 0x34c (0x20c, 0x248 and 0x28c have been reported to "
- "work for the combined sound/radiocard).");
+MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic probing is disabled or fails. The most common I/O ports are: 0x20c 0x30c, 0x24c or 0x34c (0x20c, 0x248 and 0x28c have been reported to work for the combined sound/radiocard).");
module_param_array(radio_nr, int, NULL, 0444);
MODULE_PARM_DESC(radio_nr, "Radio device numbers");
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index a93f681aa9d6..9ce4b12299b4 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2068,8 +2068,7 @@ static int wl1273_fm_radio_probe(struct platform_device *pdev)
goto err_request_irq;
}
} else {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ"
- " not configured");
+ dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ not configured");
r = -EINVAL;
goto pdata_err;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index ee0470a3196b..9b81969d76b5 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -387,8 +387,8 @@ static int si470x_i2c_probe(struct i2c_client *client,
radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
dev_warn(&client->dev,
- "This driver is known to work with "
- "firmware version %hu,\n", RADIO_FW_VERSION);
+ "This driver is known to work with firmware version %hu,\n",
+ RADIO_FW_VERSION);
dev_warn(&client->dev,
"but the device has firmware version %hu.\n",
radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
@@ -400,8 +400,7 @@ static int si470x_i2c_probe(struct i2c_client *client,
dev_warn(&client->dev,
"If you have some trouble using this driver,\n");
dev_warn(&client->dev,
- "please report to V4L ML at "
- "linux-media@vger.kernel.org\n");
+ "please report to V4L ML at linux-media@vger.kernel.org\n");
}
/* set initial frequency */
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 4b132c29f290..1add136d37a3 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -351,8 +351,8 @@ static int si470x_get_scratch_page_versions(struct si470x_device *radio)
retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE);
if (retval < 0)
- dev_warn(&radio->intf->dev, "si470x_get_scratch: "
- "si470x_get_report returned %d\n", retval);
+ dev_warn(&radio->intf->dev, "si470x_get_scratch: si470x_get_report returned %d\n",
+ retval);
else {
radio->software_version = radio->usb_buf[1];
radio->hardware_version = radio->usb_buf[2];
@@ -688,8 +688,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
dev_warn(&intf->dev,
- "This driver is known to work with "
- "firmware version %hu,\n", RADIO_FW_VERSION);
+ "This driver is known to work with firmware version %hu,\n",
+ RADIO_FW_VERSION);
dev_warn(&intf->dev,
"but the device has firmware version %hu.\n",
radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
@@ -705,8 +705,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->software_version, radio->hardware_version);
if (radio->hardware_version < RADIO_HW_VERSION) {
dev_warn(&intf->dev,
- "This driver is known to work with "
- "hardware version %hu,\n", RADIO_HW_VERSION);
+ "This driver is known to work with hardware version %hu,\n",
+ RADIO_HW_VERSION);
dev_warn(&intf->dev,
"but the device has hardware version %hu.\n",
radio->hardware_version);
@@ -718,8 +718,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
dev_warn(&intf->dev,
"If you have some trouble using this driver,\n");
dev_warn(&intf->dev,
- "please report to V4L ML at "
- "linux-media@vger.kernel.org\n");
+ "please report to V4L ML at linux-media@vger.kernel.org\n");
}
/* set led to connect state */
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index 0b04b56571da..bc2a8b5442ae 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -716,9 +716,9 @@ static int si4713_tx_tune_status(struct si4713_device *sdev, u8 intack,
*power = val[5];
*antcap = val[6];
*noise = val[7];
- v4l2_dbg(1, debug, &sdev->sd, "%s: response: %d x 10 kHz "
- "(power %d, antcap %d, rnl %d)\n", __func__,
- *frequency, *power, *antcap, *noise);
+ v4l2_dbg(1, debug, &sdev->sd,
+ "%s: response: %d x 10 kHz (power %d, antcap %d, rnl %d)\n",
+ __func__, *frequency, *power, *antcap, *noise);
}
return err;
@@ -758,10 +758,9 @@ static int si4713_tx_rds_buff(struct si4713_device *sdev, u8 mode, u16 rdsb,
v4l2_dbg(1, debug, &sdev->sd,
"%s: status=0x%02x\n", __func__, val[0]);
*cbleft = (s8)val[2] - val[3];
- v4l2_dbg(1, debug, &sdev->sd, "%s: response: interrupts"
- " 0x%02x cb avail: %d cb used %d fifo avail"
- " %d fifo used %d\n", __func__, val[1],
- val[2], val[3], val[4], val[5]);
+ v4l2_dbg(1, debug, &sdev->sd,
+ "%s: response: interrupts 0x%02x cb avail: %d cb used %d fifo avail %d fifo used %d\n",
+ __func__, val[1], val[2], val[3], val[4], val[5]);
}
return err;
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 642b89c66bcb..4be07656fbc0 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -212,14 +212,14 @@ inline void dump_tx_skb_data(struct sk_buff *skb)
len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
if (len_org > 0) {
- printk("\n data(%d): ", cmd_hdr->dlen);
+ printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen);
len = min(len_org, 14);
for (index = 0; index < len; index++)
- printk("%x ",
+ printk(KERN_CONT "%x ",
skb->data[FM_CMD_MSG_HDR_SIZE + index]);
- printk("%s", (len_org > 14) ? ".." : "");
+ printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
/* To dump incoming FM Channel-8 packets */
@@ -230,21 +230,21 @@ inline void dump_rx_skb_data(struct sk_buff *skb)
struct fm_event_msg_hdr *evt_hdr;
evt_hdr = (struct fm_event_msg_hdr *)skb->data;
- printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x "
- "opcode:%02x type:%s dlen:%02x", evt_hdr->hdr, evt_hdr->len,
- evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
- (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
+ printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
+ evt_hdr->hdr, evt_hdr->len,
+ evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
+ (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
if (len_org > 0) {
- printk("\n data(%d): ", evt_hdr->dlen);
+ printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen);
len = min(len_org, 14);
for (index = 0; index < len; index++)
- printk("%x ",
+ printk(KERN_CONT "%x ",
skb->data[FM_EVT_MSG_HDR_SIZE + index]);
- printk("%s", (len_org > 14) ? ".." : "");
+ printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
#endif
@@ -271,9 +271,9 @@ static void recv_tasklet(unsigned long arg)
/* Process all packets in the RX queue */
while ((skb = skb_dequeue(&fmdev->rx_q))) {
if (skb->len < sizeof(struct fm_event_msg_hdr)) {
- fmerr("skb(%p) has only %d bytes, "
- "at least need %zu bytes to decode\n", skb,
- skb->len, sizeof(struct fm_event_msg_hdr));
+ fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
+ skb,
+ skb->len, sizeof(struct fm_event_msg_hdr));
kfree_skb(skb);
continue;
}
@@ -472,8 +472,7 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
if (!wait_for_completion_timeout(&fmdev->maintask_comp,
FM_DRV_TX_TIMEOUT)) {
- fmerr("Timeout(%d sec),didn't get reg"
- "completion signal from RX tasklet\n",
+ fmerr("Timeout(%d sec),didn't get regcompletion signal from RX tasklet\n",
jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
return -ETIMEDOUT;
}
@@ -523,8 +522,7 @@ static inline int check_cmdresp_status(struct fmdev *fmdev,
fm_evt_hdr = (void *)(*skb)->data;
if (fm_evt_hdr->status != 0) {
- fmerr("irq: opcode %x response status is not zero "
- "Initiating irq recovery process\n",
+ fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
fm_evt_hdr->op);
mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
@@ -564,8 +562,7 @@ static void int_timeout_handler(unsigned long data)
* reset stage index & retry count values */
fmirq->stage = 0;
fmirq->retry = 0;
- fmerr("Recovery action failed during"
- "irq processing, max retry reached\n");
+ fmerr("Recovery action failed duringirq processing, max retry reached\n");
return;
}
fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
@@ -1516,14 +1513,13 @@ int fmc_prepare(struct fmdev *fmdev)
if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
FM_ST_REG_TIMEOUT)) {
- fmerr("Timeout(%d sec), didn't get reg "
- "completion signal from ST\n",
+ fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
return -ETIMEDOUT;
}
if (fmdev->streg_cbdata != 0) {
- fmerr("ST reg comp CB called with error "
- "status %d\n", fmdev->streg_cbdata);
+ fmerr("ST reg comp CB called with error status %d\n",
+ fmdev->streg_cbdata);
return -EAGAIN;
}
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index cfaeb2417fbb..e7455f82fadc 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -120,8 +120,8 @@ int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
curr_frq_in_khz = (fmdev->rx.region.bot_freq + ((u32)curr_frq * FM_FREQ_MUL));
if (curr_frq_in_khz != freq) {
- pr_info("Frequency is set to (%d) but "
- "requested freq is (%d)\n", curr_frq_in_khz, freq);
+ pr_info("Frequency is set to (%d) but requested freq is (%d)\n",
+ curr_frq_in_khz, freq);
}
/* Update local cache */
@@ -390,8 +390,8 @@ int fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
new_frq = fmdev->rx.region.top_freq;
if (new_frq) {
- fmdbg("Current freq is not within band limit boundary,"
- "switching to %d KHz\n", new_frq);
+ fmdbg("Current freq is not within band limit boundary,switching to %d KHz\n",
+ new_frq);
/* Current RX frequency is not in range. So, update it */
ret = fm_rx_set_freq(fmdev, new_frq);
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 370e16e07867..629e8ca15ab3 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -389,4 +389,21 @@ config IR_SUNXI
To compile this driver as a module, choose M here: the module will
be called sunxi-ir.
+config IR_SERIAL
+ tristate "Homebrew Serial Port Receiver"
+ depends on RC_CORE
+ ---help---
+ Say Y if you want to use Homebrew Serial Port Receivers and
+ Transceivers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called serial-ir.
+
+config IR_SERIAL_TRANSMITTER
+ bool "Serial Port Transmitter"
+ default y
+ depends on IR_SERIAL
+ ---help---
+ Serial Port Transmitter support
+
endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 379a5c0f1379..3a984ee301e2 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
obj-$(CONFIG_RC_ST) += st_rc.o
obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
obj-$(CONFIG_IR_IMG) += img-ir/
+obj-$(CONFIG_IR_SERIAL) += serial_ir.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 9f5b59706741..0884b7dc0e71 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -527,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
remote_num = (data[3] >> 4) & 0x0f;
if (channel_mask & (1 << (remote_num + 1))) {
dbginfo(&ati_remote->interface->dev,
- "Masked input from channel 0x%02x: data %02x, "
- "mask= 0x%02lx\n",
+ "Masked input from channel 0x%02x: data %02x, mask= 0x%02lx\n",
remote_num, data[2], channel_mask);
return;
}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index d1c61cd035f6..bd5512e64aea 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1210,8 +1210,7 @@ MODULE_PARM_DESC(txsim,
MODULE_DEVICE_TABLE(pnp, ene_ids);
MODULE_DESCRIPTION
- ("Infrared input driver for KB3926B/C/D/E/F "
- "(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
+ ("Infrared input driver for KB3926B/C/D/E/F (aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
MODULE_AUTHOR("Maxim Levitsky");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index bd7b3bdb1a88..ecab69ea3d51 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -104,11 +104,7 @@ static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 of
/* read val from cir config register */
static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
{
- u8 val;
-
- val = inb(fintek->cir_addr + offset);
-
- return val;
+ return inb(fintek->cir_addr + offset);
}
/* dump current cir register contents */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 86cc70fe2534..0785a24af8fc 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -441,13 +441,11 @@ MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
static int display_type;
module_param(display_type, int, S_IRUGO);
-MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, "
- "1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
+MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, 1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
static int pad_stabilize = 1;
module_param(pad_stabilize, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
- "presses in arrow key mode. 0=disable, 1=enable (default).");
+MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD presses in arrow key mode. 0=disable, 1=enable (default).");
/*
* In certain use cases, mouse mode isn't really helpful, and could actually
@@ -455,14 +453,12 @@ MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
*/
static bool nomouse;
module_param(nomouse, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is "
- "open. 0=don't disable, 1=disable. (default: don't disable)");
+MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is open. 0=don't disable, 1=disable. (default: don't disable)");
/* threshold at which a pad push registers as an arrow key in kbd mode */
static int pad_thresh;
module_param(pad_thresh, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an "
- "arrow key in kbd mode (default: 28)");
+MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an arrow key in kbd mode (default: 28)");
static void free_imon_context(struct imon_context *ictx)
@@ -611,7 +607,7 @@ static int send_packet(struct imon_context *ictx)
ictx->tx_urb->actual_length = 0;
}
- init_completion(&ictx->tx.finished);
+ reinit_completion(&ictx->tx.finished);
ictx->tx.busy = true;
smp_rmb(); /* ensure later readers know we're busy */
@@ -785,9 +781,7 @@ static ssize_t show_associate_remote(struct device *d,
else
strcpy(buf, "closed\n");
- dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for "
- "instructions on how to associate your iMON 2.4G DT/LT "
- "remote\n");
+ dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n");
mutex_unlock(&ictx->lock);
return strlen(buf);
}
@@ -1115,8 +1109,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
if (*rc_type && !(*rc_type & rc->allowed_protocols))
- dev_warn(dev, "Looks like you're trying to use an IR protocol "
- "this device does not support\n");
+ dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n");
if (*rc_type & RC_BIT_RC6_MCE) {
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
@@ -1129,8 +1122,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
/* ir_proto_packet[0] = 0x00; // already the default */
*rc_type = RC_BIT_OTHER;
} else {
- dev_warn(dev, "Unsupported IR protocol specified, overriding "
- "to iMON IR protocol\n");
+ dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
@@ -1593,7 +1585,6 @@ static void imon_incoming_packet(struct imon_context *ictx,
struct device *dev = ictx->dev;
unsigned long flags;
u32 kc;
- int i;
u64 scancode;
int press_type = 0;
int msec;
@@ -1664,10 +1655,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
}
if (debug) {
- printk(KERN_INFO "intf%d decoded packet: ", intf);
- for (i = 0; i < len; ++i)
- printk("%02x ", buf[i]);
- printk("\n");
+ printk(KERN_INFO "intf%d decoded packet: %*ph\n",
+ intf, len, buf);
}
press_type = imon_parse_press_type(ictx, buf, ktype);
@@ -1722,8 +1711,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
not_input_data:
if (len != 8) {
- dev_warn(dev, "imon %s: invalid incoming packet "
- "size (len = %d, intf%d)\n", __func__, len, intf);
+ dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
+ __func__, len, intf);
return;
}
@@ -1879,8 +1868,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
allowed_protos = RC_BIT_RC6_MCE;
break;
default:
- dev_info(ictx->dev, "Unknown 0xffdc device, "
- "defaulting to VFD and iMON IR");
+ dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
/* We don't know which one it is, allow user to set the
* RC6 one from userspace if OTHER wasn't correct. */
@@ -1937,8 +1925,8 @@ static void imon_set_display_type(struct imon_context *ictx)
ictx->display_supported = false;
else
ictx->display_supported = true;
- dev_info(ictx->dev, "%s: overriding display type to %d via "
- "modparam\n", __func__, display_type);
+ dev_info(ictx->dev, "%s: overriding display type to %d via modparam\n",
+ __func__, display_type);
}
ictx->display_type = configured_display_type;
@@ -2159,8 +2147,8 @@ static bool imon_find_endpoints(struct imon_context *ictx,
if (!display_ep_found) {
tx_control = true;
display_ep_found = true;
- dev_dbg(ictx->dev, "%s: device uses control endpoint, not "
- "interface OUT endpoint\n", __func__);
+ dev_dbg(ictx->dev, "%s: device uses control endpoint, not interface OUT endpoint\n",
+ __func__);
}
/*
@@ -2228,6 +2216,8 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
ictx->tx_urb = tx_urb;
ictx->rf_device = false;
+ init_completion(&ictx->tx.finished);
+
ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
@@ -2369,8 +2359,8 @@ static void imon_init_display(struct imon_context *ictx,
/* set up sysfs entry for built-in clock */
ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group);
if (ret)
- dev_err(ictx->dev, "Could not create display sysfs "
- "entries(%d)", ret);
+ dev_err(ictx->dev, "Could not create display sysfs entries(%d)",
+ ret);
if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
ret = usb_register_dev(intf, &imon_lcd_class);
@@ -2378,8 +2368,7 @@ static void imon_init_display(struct imon_context *ictx,
ret = usb_register_dev(intf, &imon_vfd_class);
if (ret)
/* Not a fatal error, so ignore */
- dev_info(ictx->dev, "could not get a minor number for "
- "display\n");
+ dev_info(ictx->dev, "could not get a minor number for display\n");
}
@@ -2459,8 +2448,8 @@ static int imon_probe(struct usb_interface *interface,
mutex_unlock(&ictx->lock);
}
- dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
- "usb<%d:%d> initialized\n", vendor, product, ifnum,
+ dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
+ vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
mutex_unlock(&driver_lock);
@@ -2504,7 +2493,7 @@ static void imon_disconnect(struct usb_interface *interface)
/* Abort ongoing write */
if (ictx->tx.busy) {
usb_kill_urb(ictx->tx_urb);
- complete_all(&ictx->tx.finished);
+ complete(&ictx->tx.finished);
}
if (ifnum == 0) {
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index d0549fba711c..d26907e684dc 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -75,15 +75,22 @@ static void hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
{
u32 val;
- regmap_read(dev->regmap, IR_CLK, &val);
- if (on) {
- val &= ~IR_CLK_RESET;
- val |= IR_CLK_ENABLE;
+ if (dev->regmap) {
+ regmap_read(dev->regmap, IR_CLK, &val);
+ if (on) {
+ val &= ~IR_CLK_RESET;
+ val |= IR_CLK_ENABLE;
+ } else {
+ val &= ~IR_CLK_ENABLE;
+ val |= IR_CLK_RESET;
+ }
+ regmap_write(dev->regmap, IR_CLK, val);
} else {
- val &= ~IR_CLK_ENABLE;
- val |= IR_CLK_RESET;
+ if (on)
+ clk_prepare_enable(dev->clock);
+ else
+ clk_disable_unprepare(dev->clock);
}
- regmap_write(dev->regmap, IR_CLK, val);
}
static int hix5hd2_ir_config(struct hix5hd2_ir_priv *priv)
@@ -207,8 +214,8 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
priv->regmap = syscon_regmap_lookup_by_phandle(node,
"hisilicon,power-syscon");
if (IS_ERR(priv->regmap)) {
- dev_err(dev, "no power-reg\n");
- return -EINVAL;
+ dev_info(dev, "no power-reg\n");
+ priv->regmap = NULL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 7331e5e7c497..b07d9caebeb1 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -56,7 +56,8 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sanyo_dec *data = &dev->raw->sanyo;
u32 scancode;
- u8 address, command, not_command;
+ u16 address;
+ u8 command, not_command;
if (!is_timing_event(ev)) {
if (ev.reset) {
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 0f301903aa6f..367b28bed627 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -55,14 +55,12 @@ MODULE_PARM_DESC(debug, "Enable debugging output");
/* low limit for RX carrier freq, Hz, 0 for no RX demodulation */
static int rx_low_carrier_freq;
module_param(rx_low_carrier_freq, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx_low_carrier_freq, "Override low RX carrier frequency, Hz, "
- "0 for no RX demodulation");
+MODULE_PARM_DESC(rx_low_carrier_freq, "Override low RX carrier frequency, Hz, 0 for no RX demodulation");
/* high limit for RX carrier freq, Hz, 0 for no RX demodulation */
static int rx_high_carrier_freq;
module_param(rx_high_carrier_freq, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx_high_carrier_freq, "Override high RX carrier frequency, "
- "Hz, 0 for no RX demodulation");
+MODULE_PARM_DESC(rx_high_carrier_freq, "Override high RX carrier frequency, Hz, 0 for no RX demodulation");
/* override tx carrier frequency */
static int tx_carrier_freq;
@@ -263,6 +261,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
if (allowance > ITE_RXDCR_MAX)
allowance = ITE_RXDCR_MAX;
+
+ use_demodulator = true;
}
}
@@ -1484,8 +1484,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
if (model_number >= 0 && model_number < ARRAY_SIZE(ite_dev_descs)) {
model_no = model_number;
- ite_pr(KERN_NOTICE, "The model has been fixed by a module "
- "parameter.");
+ ite_pr(KERN_NOTICE, "The model has been fixed by a module parameter.");
}
ite_pr(KERN_NOTICE, "Using model: %s\n", ite_dev_descs[model_no].model);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 91f9bb87ce68..3854809e8531 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -150,9 +150,6 @@ static const struct file_operations lirc_dev_fops = {
.write = lirc_dev_fop_write,
.poll = lirc_dev_fop_poll,
.unlocked_ioctl = lirc_dev_fop_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_dev_fop_ioctl,
-#endif
.open = lirc_dev_fop_open,
.release = lirc_dev_fop_close,
.llseek = noop_llseek,
@@ -160,19 +157,19 @@ static const struct file_operations lirc_dev_fops = {
static int lirc_cdev_add(struct irctl *ir)
{
- int retval = -ENOMEM;
struct lirc_driver *d = &ir->d;
struct cdev *cdev;
+ int retval;
- cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ cdev = cdev_alloc();
if (!cdev)
- goto err_out;
+ return -ENOMEM;
if (d->fops) {
- cdev_init(cdev, d->fops);
+ cdev->ops = d->fops;
cdev->owner = d->owner;
} else {
- cdev_init(cdev, &lirc_dev_fops);
+ cdev->ops = &lirc_dev_fops;
cdev->owner = THIS_MODULE;
}
retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
@@ -180,17 +177,15 @@ static int lirc_cdev_add(struct irctl *ir)
goto err_out;
retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
- if (retval) {
- kobject_put(&cdev->kobj);
+ if (retval)
goto err_out;
- }
ir->cdev = cdev;
return 0;
err_out:
- kfree(cdev);
+ cdev_del(cdev);
return retval;
}
@@ -420,7 +415,6 @@ int lirc_unregister_driver(int minor)
} else {
lirc_irctl_cleanup(ir);
cdev_del(cdev);
- kfree(cdev);
kfree(ir);
irctls[minor] = NULL;
}
@@ -521,7 +515,6 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
lirc_irctl_cleanup(ir);
cdev_del(cdev);
irctls[ir->d.minor] = NULL;
- kfree(cdev);
kfree(ir);
}
@@ -684,7 +677,6 @@ ssize_t lirc_dev_fop_read(struct file *file,
* between while condition checking and scheduling)
*/
add_wait_queue(&ir->buf->wait_poll, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
@@ -709,19 +701,19 @@ ssize_t lirc_dev_fop_read(struct file *file,
}
mutex_unlock(&ir->irctl_lock);
- schedule();
set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ set_current_state(TASK_RUNNING);
if (mutex_lock_interruptible(&ir->irctl_lock)) {
ret = -ERESTARTSYS;
remove_wait_queue(&ir->buf->wait_poll, &wait);
- set_current_state(TASK_RUNNING);
goto out_unlocked;
}
if (!ir->attached) {
ret = -ENODEV;
- break;
+ goto out_locked;
}
} else {
lirc_buffer_read(ir->buf, buf);
@@ -735,7 +727,6 @@ ssize_t lirc_dev_fop_read(struct file *file,
}
remove_wait_queue(&ir->buf->wait_poll, &wait);
- set_current_state(TASK_RUNNING);
out_locked:
mutex_unlock(&ir->irctl_lock);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4f8c7effdcee..9bf69179eee0 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -153,15 +153,6 @@
#define MCE_COMMAND_IRDATA 0x80
#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
-/* general constants */
-#define SEND_FLAG_IN_PROGRESS 1
-#define SEND_FLAG_COMPLETE 2
-#define RECV_FLAG_IN_PROGRESS 3
-#define RECV_FLAG_COMPLETE 4
-
-#define MCEUSB_RX 1
-#define MCEUSB_TX 2
-
#define VENDOR_PHILIPS 0x0471
#define VENDOR_SMK 0x0609
#define VENDOR_TATUNG 0x1460
@@ -422,7 +413,6 @@ struct mceusb_dev {
struct rc_dev *rc;
/* optional features we can enable */
- bool carrier_report_enabled;
bool learning_enabled;
/* core device bits */
@@ -455,7 +445,6 @@ struct mceusb_dev {
} flags;
/* transmit support */
- int send_flags;
u32 carrier;
unsigned char tx_mask;
@@ -604,9 +593,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
break;
case MCE_RSP_EQWAKEVERSION:
if (!out)
- dev_dbg(dev, "Wake version, proto: 0x%02x, "
- "payload: 0x%02x, address: 0x%02x, "
- "version: 0x%02x",
+ dev_dbg(dev, "Wake version, proto: 0x%02x, payload: 0x%02x, address: 0x%02x, version: 0x%02x",
data1, data2, data3, data4);
break;
case MCE_RSP_GETPORTSTATUS:
@@ -740,52 +727,40 @@ static void mce_async_callback(struct urb *urb)
/* request incoming or send outgoing usb packet - used to initialize remote */
static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
- int size, int urb_type)
+ int size)
{
int res, pipe;
struct urb *async_urb;
struct device *dev = ir->dev;
unsigned char *async_buf;
- if (urb_type == MCEUSB_TX) {
- async_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (unlikely(!async_urb)) {
- dev_err(dev, "Error, couldn't allocate urb!\n");
- return;
- }
-
- async_buf = kzalloc(size, GFP_KERNEL);
- if (!async_buf) {
- dev_err(dev, "Error, couldn't allocate buf!\n");
- usb_free_urb(async_urb);
- return;
- }
-
- /* outbound data */
- if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
- pipe = usb_sndintpipe(ir->usbdev,
- ir->usb_ep_out->bEndpointAddress);
- usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
- size, mce_async_callback, ir,
- ir->usb_ep_out->bInterval);
- } else {
- pipe = usb_sndbulkpipe(ir->usbdev,
- ir->usb_ep_out->bEndpointAddress);
- usb_fill_bulk_urb(async_urb, ir->usbdev, pipe,
- async_buf, size, mce_async_callback,
- ir);
- }
- memcpy(async_buf, data, size);
+ async_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (unlikely(!async_urb)) {
+ dev_err(dev, "Error, couldn't allocate urb!\n");
+ return;
+ }
- } else if (urb_type == MCEUSB_RX) {
- /* standard request */
- async_urb = ir->urb_in;
- ir->send_flags = RECV_FLAG_IN_PROGRESS;
+ async_buf = kmalloc(size, GFP_KERNEL);
+ if (!async_buf) {
+ usb_free_urb(async_urb);
+ return;
+ }
+ /* outbound data */
+ if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
+ pipe = usb_sndintpipe(ir->usbdev,
+ ir->usb_ep_out->bEndpointAddress);
+ usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
+ size, mce_async_callback, ir,
+ ir->usb_ep_out->bInterval);
} else {
- dev_err(dev, "Error! Unknown urb type %d\n", urb_type);
- return;
+ pipe = usb_sndbulkpipe(ir->usbdev,
+ ir->usb_ep_out->bEndpointAddress);
+ usb_fill_bulk_urb(async_urb, ir->usbdev, pipe,
+ async_buf, size, mce_async_callback,
+ ir);
}
+ memcpy(async_buf, data, size);
dev_dbg(dev, "receive request called (size=%#x)", size);
@@ -806,19 +781,14 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
if (ir->need_reset) {
ir->need_reset = false;
- mce_request_packet(ir, DEVICE_RESUME, rsize, MCEUSB_TX);
+ mce_request_packet(ir, DEVICE_RESUME, rsize);
msleep(10);
}
- mce_request_packet(ir, data, size, MCEUSB_TX);
+ mce_request_packet(ir, data, size);
msleep(10);
}
-static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
-{
- mce_request_packet(ir, NULL, size, MCEUSB_RX);
-}
-
/* Send data out the IR blaster port(s) */
static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
@@ -1062,7 +1032,6 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
static void mceusb_dev_recv(struct urb *urb)
{
struct mceusb_dev *ir;
- int buf_len;
if (!urb)
return;
@@ -1073,18 +1042,10 @@ static void mceusb_dev_recv(struct urb *urb)
return;
}
- buf_len = urb->actual_length;
-
- if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
- ir->send_flags = SEND_FLAG_COMPLETE;
- dev_dbg(ir->dev, "setup answer received %d bytes\n",
- buf_len);
- }
-
switch (urb->status) {
/* success */
case 0:
- mceusb_process_ir_data(ir, buf_len);
+ mceusb_process_ir_data(ir, urb->actual_length);
break;
case -ECONNRESET:
@@ -1285,7 +1246,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
struct mceusb_dev *ir = NULL;
- int pipe, maxp, i;
+ int pipe, maxp, i, res;
char buf[63], name[128] = "";
enum mceusb_model_type model = id->driver_info;
bool is_gen3;
@@ -1388,7 +1349,9 @@ static int mceusb_dev_probe(struct usb_interface *intf,
/* flush buffers on the device */
dev_dbg(&intf->dev, "Flushing receive buffers\n");
- mce_flush_rx_buffer(ir, maxp);
+ res = usb_submit_urb(ir->urb_in, GFP_KERNEL);
+ if (res)
+ dev_err(&intf->dev, "failed to flush buffers: %d\n", res);
/* figure out which firmware/emulator version this hardware has */
mceusb_get_emulator_version(ir);
@@ -1423,6 +1386,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
/* Error-handling path */
rc_dev_fail:
usb_put_dev(ir->usbdev);
+ usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
urb_in_alloc_fail:
usb_free_coherent(dev, maxp, ir->buf_in, ir->dma_in);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 003fff07ade2..7eb3f4f1ddcd 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -218,6 +218,7 @@ static const struct of_device_id meson_ir_match[] = {
{ .compatible = "amlogic,meson-gxbb-ir" },
{ },
};
+MODULE_DEVICE_TABLE(of, meson_ir_match);
static struct platform_driver meson_ir_driver = {
.probe = meson_ir_probe,
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 04fedaa75612..4b78c891eb77 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -48,6 +48,11 @@ static const struct nvt_chip nvt_chips[] = {
{ "NCT6779D", NVT_6779D },
};
+static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
+{
+ return nvt->rdev->dev.parent;
+}
+
static inline bool is_w83667hg(struct nvt_dev *nvt)
{
return nvt->chip_ver == NVT_W83667HG;
@@ -182,7 +187,7 @@ static ssize_t wakeup_data_show(struct device *dev,
ssize_t buf_len = 0;
int i;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
@@ -199,7 +204,7 @@ static ssize_t wakeup_data_show(struct device *dev,
}
buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
return buf_len;
}
@@ -243,7 +248,7 @@ static ssize_t wakeup_data_store(struct device *dev,
/* hardcode the tolerance to 10% */
tolerance = DIV_ROUND_UP(count, 10);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt_clear_cir_wake_fifo(nvt);
nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -260,7 +265,7 @@ static ssize_t wakeup_data_store(struct device *dev,
nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
ret = len;
out:
@@ -385,6 +390,7 @@ static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
/* detect hardware features */
static int nvt_hw_detect(struct nvt_dev *nvt)
{
+ struct device *dev = nvt_get_dev(nvt);
const char *chip_name;
int chip_id;
@@ -405,8 +411,7 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
chip_id = nvt->chip_major << 8 | nvt->chip_minor;
if (chip_id == NVT_INVALID) {
- dev_err(&nvt->pdev->dev,
- "No device found on either EFM port\n");
+ dev_err(dev, "No device found on either EFM port\n");
return -ENODEV;
}
@@ -414,12 +419,11 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
/* warn, but still let the driver load, if we don't know this chip */
if (!chip_name)
- dev_warn(&nvt->pdev->dev,
+ dev_warn(dev,
"unknown chip, id: 0x%02x 0x%02x, it may not work...",
nvt->chip_major, nvt->chip_minor);
else
- dev_info(&nvt->pdev->dev,
- "found %s or compatible: chip id: 0x%02x 0x%02x",
+ dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
chip_name, nvt->chip_major, nvt->chip_minor);
return 0;
@@ -586,7 +590,7 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_efm_disable(nvt);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
@@ -595,11 +599,11 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
}
#if 0 /* Currently unused */
-/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
+/* rx carrier detect only works in learning mode, must be called w/lock */
static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
{
u32 count, carrier, duration = 0;
@@ -616,7 +620,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
duration *= SAMPLE_PERIOD;
if (!count || !duration) {
- dev_notice(&nvt->pdev->dev,
+ dev_notice(nvt_get_dev(nvt),
"Unable to determine carrier! (c:%u, d:%u)",
count, duration);
return 0;
@@ -684,7 +688,7 @@ static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
u8 iren;
int ret;
- spin_lock_irqsave(&nvt->tx.lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
nvt->tx.buf_count = (ret * sizeof(unsigned));
@@ -708,13 +712,13 @@ static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
for (i = 0; i < 9; i++)
nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
- spin_lock_irqsave(&nvt->tx.lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt->tx.tx_state = ST_TX_NONE;
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* restore enabled interrupts to prior state */
nvt_cir_reg_write(nvt, iren, CIR_IREN);
@@ -781,7 +785,7 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
{
- dev_warn(&nvt->pdev->dev, "RX FIFO overrun detected, flushing data!");
+ dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
nvt->pkts = 0;
nvt_clear_cir_fifo(nvt);
@@ -828,14 +832,7 @@ static void nvt_cir_log_irqs(u8 status, u8 iren)
static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
{
- unsigned long flags;
- u8 tx_state;
-
- spin_lock_irqsave(&nvt->tx.lock, flags);
- tx_state = nvt->tx.tx_state;
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
-
- return tx_state == ST_TX_NONE;
+ return nvt->tx.tx_state == ST_TX_NONE;
}
/* interrupt service routine for incoming and outgoing CIR data */
@@ -843,11 +840,10 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
{
struct nvt_dev *nvt = data;
u8 status, iren;
- unsigned long flags;
nvt_dbg_verbose("%s firing", __func__);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock(&nvt->lock);
/*
* Get IR Status register contents. Write 1 to ack/clear
@@ -869,7 +865,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
* logical device is being disabled.
*/
if (status == 0xff && iren == 0xff) {
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock(&nvt->lock);
nvt_dbg_verbose("Spurious interrupt detected");
return IRQ_HANDLED;
}
@@ -878,7 +874,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
* status bit whether the related interrupt source is enabled
*/
if (!(status & iren)) {
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock(&nvt->lock);
nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
return IRQ_NONE;
}
@@ -898,8 +894,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
nvt_get_rx_ir_data(nvt);
}
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
if (status & CIR_IRSTS_TE)
nvt_clear_tx_fifo(nvt);
@@ -907,8 +901,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
unsigned int pos, count;
u8 tmp;
- spin_lock_irqsave(&nvt->tx.lock, flags);
-
pos = nvt->tx.cur_buf_num;
count = nvt->tx.buf_count;
@@ -921,20 +913,17 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
tmp = nvt_cir_reg_read(nvt, CIR_IREN);
nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
}
-
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
-
}
if (status & CIR_IRSTS_TFU) {
- spin_lock_irqsave(&nvt->tx.lock, flags);
if (nvt->tx.tx_state == ST_TX_REPLY) {
nvt->tx.tx_state = ST_TX_REQUEST;
wake_up(&nvt->tx.queue);
}
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
}
+ spin_unlock(&nvt->lock);
+
nvt_dbg_verbose("%s done", __func__);
return IRQ_HANDLED;
}
@@ -943,7 +932,7 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
{
unsigned long flags;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
/* disable CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
@@ -958,7 +947,7 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
nvt_clear_cir_fifo(nvt);
nvt_clear_tx_fifo(nvt);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* disable the CIR logical device */
nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
@@ -969,7 +958,7 @@ static int nvt_open(struct rc_dev *dev)
struct nvt_dev *nvt = dev->priv;
unsigned long flags;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
/* set function enable flags */
nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
@@ -982,7 +971,7 @@ static int nvt_open(struct rc_dev *dev)
/* enable interrupts */
nvt_set_cir_iren(nvt);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* enable the CIR logical device */
nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
@@ -1002,40 +991,41 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
struct nvt_dev *nvt;
struct rc_dev *rdev;
- int ret = -ENOMEM;
+ int ret;
nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
if (!nvt)
- return ret;
+ return -ENOMEM;
/* input device for IR remote (and tx) */
- rdev = rc_allocate_device();
- if (!rdev)
- goto exit_free_dev_rdev;
+ nvt->rdev = devm_rc_allocate_device(&pdev->dev);
+ if (!nvt->rdev)
+ return -ENOMEM;
+ rdev = nvt->rdev;
- ret = -ENODEV;
/* activate pnp device */
- if (pnp_activate_dev(pdev) < 0) {
+ ret = pnp_activate_dev(pdev);
+ if (ret) {
dev_err(&pdev->dev, "Could not activate PNP device!\n");
- goto exit_free_dev_rdev;
+ return ret;
}
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0) ||
pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto exit_free_dev_rdev;
+ return -EINVAL;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
- goto exit_free_dev_rdev;
+ return -EINVAL;
}
if (!pnp_port_valid(pdev, 1) ||
pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
- goto exit_free_dev_rdev;
+ return -EINVAL;
}
nvt->cir_addr = pnp_port_start(pdev, 0);
@@ -1046,17 +1036,15 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
nvt->cr_efir = CR_EFIR;
nvt->cr_efdr = CR_EFDR;
- spin_lock_init(&nvt->nvt_lock);
- spin_lock_init(&nvt->tx.lock);
+ spin_lock_init(&nvt->lock);
pnp_set_drvdata(pdev, nvt);
- nvt->pdev = pdev;
init_waitqueue_head(&nvt->tx.queue);
ret = nvt_hw_detect(nvt);
if (ret)
- goto exit_free_dev_rdev;
+ return ret;
/* Initialize CIR & CIR Wake Logical Devices */
nvt_efm_enable(nvt);
@@ -1085,7 +1073,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
rdev->input_id.product = nvt->chip_major;
rdev->input_id.version = nvt->chip_minor;
- rdev->dev.parent = &pdev->dev;
rdev->driver_name = NVT_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
rdev->timeout = MS_TO_NS(100);
@@ -1097,29 +1084,27 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* tx bits */
rdev->tx_resolution = XYZ;
#endif
- nvt->rdev = rdev;
-
- ret = rc_register_device(rdev);
+ ret = devm_rc_register_device(&pdev->dev, rdev);
if (ret)
- goto exit_free_dev_rdev;
+ return ret;
- ret = -EBUSY;
/* now claim resources */
if (!devm_request_region(&pdev->dev, nvt->cir_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto exit_unregister_device;
+ return -EBUSY;
- if (devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
- IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt))
- goto exit_unregister_device;
+ ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
+ IRQF_SHARED, NVT_DRIVER_NAME, nvt);
+ if (ret)
+ return ret;
if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
- goto exit_unregister_device;
+ return -EBUSY;
ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
if (ret)
- goto exit_unregister_device;
+ return ret;
device_init_wakeup(&pdev->dev, true);
@@ -1130,14 +1115,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
}
return 0;
-
-exit_unregister_device:
- rc_unregister_device(rdev);
- rdev = NULL;
-exit_free_dev_rdev:
- rc_free_device(rdev);
-
- return ret;
}
static void nvt_remove(struct pnp_dev *pdev)
@@ -1150,8 +1127,6 @@ static void nvt_remove(struct pnp_dev *pdev)
/* enable CIR Wake (for IR power-on) */
nvt_enable_wake(nvt);
-
- rc_unregister_device(nvt->rdev);
}
static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
@@ -1161,16 +1136,14 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
nvt_dbg("%s called", __func__);
- spin_lock_irqsave(&nvt->tx.lock, flags);
- nvt->tx.tx_state = ST_TX_NONE;
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->tx.tx_state = ST_TX_NONE;
/* disable all CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* disable cir logical dev */
nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index acf735fc7170..c41c5765e1d2 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -78,17 +78,15 @@ struct nvt_chip {
};
struct nvt_dev {
- struct pnp_dev *pdev;
struct rc_dev *rdev;
- spinlock_t nvt_lock;
+ spinlock_t lock;
/* for rx */
u8 buf[RX_BUF_LEN];
unsigned int pkts;
struct {
- spinlock_t lock;
u8 buf[TX_BUF_LEN];
unsigned int buf_count;
unsigned int cur_buf_num;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 205ecc602e34..1c42a9f2f290 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -26,8 +26,7 @@ static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
-static DEFINE_MUTEX(available_protocols_lock);
-static u64 available_protocols;
+static atomic64_t available_protocols = ATOMIC64_INIT(0);
static int ir_raw_event_thread(void *data)
{
@@ -234,11 +233,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_handle);
u64
ir_raw_get_allowed_protocols(void)
{
- u64 protocols;
- mutex_lock(&available_protocols_lock);
- protocols = available_protocols;
- mutex_unlock(&available_protocols_lock);
- return protocols;
+ return atomic64_read(&available_protocols);
}
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
@@ -331,9 +326,7 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
if (ir_raw_handler->raw_register)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
- mutex_lock(&available_protocols_lock);
- available_protocols |= ir_raw_handler->protocols;
- mutex_unlock(&available_protocols_lock);
+ atomic64_or(ir_raw_handler->protocols, &available_protocols);
mutex_unlock(&ir_raw_handler_lock);
return 0;
@@ -352,9 +345,7 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
if (ir_raw_handler->raw_unregister)
ir_raw_handler->raw_unregister(raw->dev);
}
- mutex_lock(&available_protocols_lock);
- available_protocols &= ~protocols;
- mutex_unlock(&available_protocols_lock);
+ atomic64_andnot(protocols, &available_protocols);
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index d9c1f2ff7119..dedaf38c5ff6 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -12,6 +12,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/rc-core.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
@@ -66,7 +68,7 @@ struct rc_map *rc_map_get(const char *name)
if (!map) {
int rc = request_module("%s", name);
if (rc < 0) {
- printk(KERN_ERR "Couldn't load IR keymap %s\n", name);
+ pr_err("Couldn't load IR keymap %s\n", name);
return NULL;
}
msleep(20); /* Give some time for IR to register */
@@ -75,7 +77,7 @@ struct rc_map *rc_map_get(const char *name)
}
#endif
if (!map) {
- printk(KERN_ERR "IR keymap %s not found\n", name);
+ pr_err("IR keymap %s not found\n", name);
return NULL;
}
@@ -159,6 +161,7 @@ static void ir_free_table(struct rc_map *rc_map)
{
rc_map->size = 0;
kfree(rc_map->name);
+ rc_map->name = NULL;
kfree(rc_map->scan);
rc_map->scan = NULL;
}
@@ -660,8 +663,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_type protocol,
dev->last_toggle = toggle;
dev->last_keycode = keycode;
- IR_dprintk(1, "%s: key down event, "
- "key 0x%04x, protocol 0x%04x, scancode 0x%08x\n",
+ IR_dprintk(1, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08x\n",
dev->input_name, keycode, protocol, scancode);
input_report_key(dev->input_dev, keycode, 1);
@@ -1403,6 +1405,34 @@ void rc_free_device(struct rc_dev *dev)
}
EXPORT_SYMBOL_GPL(rc_free_device);
+static void devm_rc_alloc_release(struct device *dev, void *res)
+{
+ rc_free_device(*(struct rc_dev **)res);
+}
+
+struct rc_dev *devm_rc_allocate_device(struct device *dev)
+{
+ struct rc_dev **dr, *rc;
+
+ dr = devres_alloc(devm_rc_alloc_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ rc = rc_allocate_device();
+ if (!rc) {
+ devres_free(dr);
+ return NULL;
+ }
+
+ rc->dev.parent = dev;
+ rc->managed_alloc = true;
+ *dr = rc;
+ devres_add(dev, dr);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(devm_rc_allocate_device);
+
int rc_register_device(struct rc_dev *dev)
{
static bool raw_init = false; /* raw decoders loaded? */
@@ -1531,6 +1561,33 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(rc_register_device);
+static void devm_rc_release(struct device *dev, void *res)
+{
+ rc_unregister_device(*(struct rc_dev **)res);
+}
+
+int devm_rc_register_device(struct device *parent, struct rc_dev *dev)
+{
+ struct rc_dev **dr;
+ int ret;
+
+ dr = devres_alloc(devm_rc_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = rc_register_device(dev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = dev;
+ devres_add(parent, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_rc_register_device);
+
void rc_unregister_device(struct rc_dev *dev)
{
if (!dev)
@@ -1552,7 +1609,8 @@ void rc_unregister_device(struct rc_dev *dev)
ida_simple_remove(&rc_ida, dev->minor);
- rc_free_device(dev);
+ if (!dev->managed_alloc)
+ rc_free_device(dev);
}
EXPORT_SYMBOL_GPL(rc_unregister_device);
@@ -1565,7 +1623,7 @@ static int __init rc_core_init(void)
{
int rc = class_register(&rc_class);
if (rc) {
- printk(KERN_ERR "rc_core: unable to register rc class\n");
+ pr_err("rc_core: unable to register rc class\n");
return rc;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 05ba47bc0b61..2784f5dae398 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -81,6 +81,8 @@
#define RR3_RC_DET_ENABLE 0xbb
/* Stop capture with the RC receiver */
#define RR3_RC_DET_DISABLE 0xbc
+/* Start capture with the wideband receiver */
+#define RR3_MODSIG_CAPTURE 0xb2
/* Return the status of RC detector capture */
#define RR3_RC_DET_STATUS 0xbd
/* Reset redrat */
@@ -105,11 +107,13 @@
#define RR3_CLK_PER_COUNT 12
/* (RR3_CLK / RR3_CLK_PER_COUNT) */
#define RR3_CLK_CONV_FACTOR 2000000
-/* USB bulk-in IR data endpoint address */
-#define RR3_BULK_IN_EP_ADDR 0x82
+/* USB bulk-in wideband IR data endpoint address */
+#define RR3_WIDE_IN_EP_ADDR 0x81
+/* USB bulk-in narrowband IR data endpoint address */
+#define RR3_NARROW_IN_EP_ADDR 0x82
/* Size of the fixed-length portion of the signal */
-#define RR3_DRIVER_MAXLENS 128
+#define RR3_DRIVER_MAXLENS 255
#define RR3_MAX_SIG_SIZE 512
#define RR3_TIME_UNIT 50
#define RR3_END_OF_SIGNAL 0x7f
@@ -207,15 +211,22 @@ struct redrat3_dev {
struct urb *flash_urb;
u8 flash_in_buf;
+ /* learning */
+ bool wideband;
+ struct usb_ctrlrequest learn_control;
+ struct urb *learn_urb;
+ u8 learn_buf;
+
/* save off the usb device pointer */
struct usb_device *udev;
/* the receive endpoint */
- struct usb_endpoint_descriptor *ep_in;
+ struct usb_endpoint_descriptor *ep_narrow;
/* the buffer to receive data */
void *bulk_in_buf;
/* urb used to read ir data */
- struct urb *read_urb;
+ struct urb *narrow_urb;
+ struct urb *wide_urb;
/* the send endpoint */
struct usb_endpoint_descriptor *ep_out;
@@ -236,23 +247,6 @@ struct redrat3_dev {
char phys[64];
};
-/*
- * redrat3_issue_async
- *
- * Issues an async read to the ir data in port..
- * sets the callback to be redrat3_handle_async
- */
-static void redrat3_issue_async(struct redrat3_dev *rr3)
-{
- int res;
-
- res = usb_submit_urb(rr3->read_urb, GFP_ATOMIC);
- if (res)
- dev_dbg(rr3->dev,
- "%s: receive request FAILED! (res %d, len %d)\n",
- __func__, res, rr3->read_urb->transfer_buffer_length);
-}
-
static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
{
if (!rr3->transmitting && (code != 0x40))
@@ -265,8 +259,7 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
/* Codes 0x20 through 0x2f are IR Firmware Errors */
case 0x20:
- pr_cont("Initial signal pulse not long enough "
- "to measure carrier frequency\n");
+ pr_cont("Initial signal pulse not long enough to measure carrier frequency\n");
break;
case 0x21:
pr_cont("Not enough length values allocated for signal\n");
@@ -278,18 +271,15 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
pr_cont("Too many signal repeats\n");
break;
case 0x28:
- pr_cont("Insufficient memory available for IR signal "
- "data memory allocation\n");
+ pr_cont("Insufficient memory available for IR signal data memory allocation\n");
break;
case 0x29:
- pr_cont("Insufficient memory available "
- "for IrDa signal data memory allocation\n");
+ pr_cont("Insufficient memory available for IrDa signal data memory allocation\n");
break;
/* Codes 0x30 through 0x3f are USB Firmware Errors */
case 0x30:
- pr_cont("Insufficient memory available for bulk "
- "transfer structure\n");
+ pr_cont("Insufficient memory available for bulk transfer structure\n");
break;
/*
@@ -301,8 +291,7 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
pr_cont("Signal capture has been terminated\n");
break;
case 0x41:
- pr_cont("Attempt to set/get and unknown signal I/O "
- "algorithm parameter\n");
+ pr_cont("Attempt to set/get and unknown signal I/O algorithm parameter\n");
break;
case 0x42:
pr_cont("Signal capture already started\n");
@@ -368,15 +357,18 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
unsigned int i, sig_size, single_len, offset, val;
u32 mod_freq;
- if (!rr3) {
- pr_err("%s called with no context!\n", __func__);
- return;
- }
-
dev = rr3->dev;
mod_freq = redrat3_val_to_mod_freq(&rr3->irdata);
dev_dbg(dev, "Got mod_freq of %u\n", mod_freq);
+ if (mod_freq && rr3->wideband) {
+ DEFINE_IR_RAW_EVENT(ev);
+
+ ev.carrier_report = 1;
+ ev.carrier = mod_freq;
+
+ ir_raw_event_store(rr3->rc, &ev);
+ }
/* process each rr3 encoded byte into an int */
sig_size = be16_to_cpu(rr3->irdata.sig_size);
@@ -459,19 +451,31 @@ static int redrat3_enable_detector(struct redrat3_dev *rr3)
return -EIO;
}
- redrat3_issue_async(rr3);
+ ret = usb_submit_urb(rr3->narrow_urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(rr3->dev, "narrow band urb failed: %d", ret);
+ return ret;
+ }
+
+ ret = usb_submit_urb(rr3->wide_urb, GFP_KERNEL);
+ if (ret)
+ dev_err(rr3->dev, "wide band urb failed: %d", ret);
- return 0;
+ return ret;
}
static inline void redrat3_delete(struct redrat3_dev *rr3,
struct usb_device *udev)
{
- usb_kill_urb(rr3->read_urb);
+ usb_kill_urb(rr3->narrow_urb);
+ usb_kill_urb(rr3->wide_urb);
usb_kill_urb(rr3->flash_urb);
- usb_free_urb(rr3->read_urb);
+ usb_kill_urb(rr3->learn_urb);
+ usb_free_urb(rr3->narrow_urb);
+ usb_free_urb(rr3->wide_urb);
usb_free_urb(rr3->flash_urb);
- usb_free_coherent(udev, le16_to_cpu(rr3->ep_in->wMaxPacketSize),
+ usb_free_urb(rr3->learn_urb);
+ usb_free_coherent(udev, le16_to_cpu(rr3->ep_narrow->wMaxPacketSize),
rr3->bulk_in_buf, rr3->dma_in);
kfree(rr3);
@@ -485,10 +489,8 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
len = sizeof(*tmp);
tmp = kzalloc(len, GFP_KERNEL);
- if (!tmp) {
- dev_warn(rr3->dev, "Memory allocation faillure\n");
+ if (!tmp)
return timeout;
- }
pipe = usb_rcvctrlpipe(rr3->udev, 0);
ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
@@ -543,16 +545,14 @@ static void redrat3_reset(struct redrat3_dev *rr3)
struct device *dev = rr3->dev;
int rc, rxpipe, txpipe;
u8 *val;
- int len = sizeof(u8);
+ size_t const len = sizeof(*val);
rxpipe = usb_rcvctrlpipe(udev, 0);
txpipe = usb_sndctrlpipe(udev, 0);
val = kmalloc(len, GFP_KERNEL);
- if (!val) {
- dev_err(dev, "Memory allocation failure\n");
+ if (!val)
return;
- }
*val = 0x01;
rc = usb_control_msg(udev, rxpipe, RR3_RESET,
@@ -590,14 +590,12 @@ static void redrat3_reset(struct redrat3_dev *rr3)
static void redrat3_get_firmware_rev(struct redrat3_dev *rr3)
{
- int rc = 0;
+ int rc;
char *buffer;
- buffer = kzalloc(sizeof(char) * (RR3_FW_VERSION_LEN + 1), GFP_KERNEL);
- if (!buffer) {
- dev_err(rr3->dev, "Memory allocation failure\n");
+ buffer = kcalloc(RR3_FW_VERSION_LEN + 1, sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
return;
- }
rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0),
RR3_FW_VERSION,
@@ -704,25 +702,25 @@ out:
/* callback function from USB when async USB request has completed */
static void redrat3_handle_async(struct urb *urb)
{
- struct redrat3_dev *rr3;
+ struct redrat3_dev *rr3 = urb->context;
int ret;
- if (!urb)
- return;
-
- rr3 = urb->context;
- if (!rr3) {
- pr_err("%s called with invalid context!\n", __func__);
- usb_unlink_urb(urb);
- return;
- }
-
switch (urb->status) {
case 0:
ret = redrat3_get_ir_data(rr3, urb->actual_length);
+ if (!ret && rr3->wideband && !rr3->learn_urb->hcpriv) {
+ ret = usb_submit_urb(rr3->learn_urb, GFP_ATOMIC);
+ if (ret)
+ dev_err(rr3->dev, "Failed to submit learning urb: %d",
+ ret);
+ }
+
if (!ret) {
/* no error, prepare to read more */
- redrat3_issue_async(rr3);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ dev_err(rr3->dev, "Failed to resubmit urb: %d",
+ ret);
}
break;
@@ -785,11 +783,11 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
/* rr3 will disable rc detector on transmit */
rr3->transmitting = true;
- sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
- if (!sample_lens) {
- ret = -ENOMEM;
- goto out;
- }
+ sample_lens = kcalloc(RR3_DRIVER_MAXLENS,
+ sizeof(*sample_lens),
+ GFP_KERNEL);
+ if (!sample_lens)
+ return -ENOMEM;
irdata = kzalloc(sizeof(*irdata), GFP_KERNEL);
if (!irdata) {
@@ -857,8 +855,8 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
ret = count;
out:
- kfree(sample_lens);
kfree(irdata);
+ kfree(sample_lens);
rr3->transmitting = false;
/* rr3 re-enables rc detector because it was enabled before */
@@ -882,6 +880,42 @@ static void redrat3_brightness_set(struct led_classdev *led_dev, enum
}
}
+static int redrat3_wideband_receiver(struct rc_dev *rcdev, int enable)
+{
+ struct redrat3_dev *rr3 = rcdev->priv;
+ int ret = 0;
+
+ rr3->wideband = enable != 0;
+
+ if (enable) {
+ ret = usb_submit_urb(rr3->learn_urb, GFP_KERNEL);
+ if (ret)
+ dev_err(rr3->dev, "Failed to submit learning urb: %d",
+ ret);
+ }
+
+ return ret;
+}
+
+static void redrat3_learn_complete(struct urb *urb)
+{
+ struct redrat3_dev *rr3 = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ usb_unlink_urb(urb);
+ return;
+ case -EPIPE:
+ default:
+ dev_err(rr3->dev, "Error: learn urb status = %d", urb->status);
+ break;
+ }
+}
+
static void redrat3_led_complete(struct urb *urb)
{
struct redrat3_dev *rr3 = urb->context;
@@ -908,19 +942,16 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
struct rc_dev *rc;
- int ret = -ENODEV;
+ int ret;
u16 prod = le16_to_cpu(rr3->udev->descriptor.idProduct);
rc = rc_allocate_device();
- if (!rc) {
- dev_err(dev, "remote input dev allocation failed\n");
- goto out;
- }
+ if (!rc)
+ return NULL;
- snprintf(rr3->name, sizeof(rr3->name), "RedRat3%s "
- "Infrared Remote Transceiver (%04x:%04x)",
- prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "",
- le16_to_cpu(rr3->udev->descriptor.idVendor), prod);
+ snprintf(rr3->name, sizeof(rr3->name),
+ "RedRat3%s Infrared Remote Transceiver",
+ prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "");
usb_make_path(rr3->udev, rr3->phys, sizeof(rr3->phys));
@@ -937,6 +968,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->s_timeout = redrat3_set_timeout;
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
+ rc->s_carrier_report = redrat3_wideband_receiver;
rc->driver_name = DRIVER_NAME;
rc->rx_resolution = US_TO_NS(2);
rc->map_name = RC_MAP_HAUPPAUGE;
@@ -962,7 +994,8 @@ static int redrat3_dev_probe(struct usb_interface *intf,
struct usb_host_interface *uhi;
struct redrat3_dev *rr3;
struct usb_endpoint_descriptor *ep;
- struct usb_endpoint_descriptor *ep_in = NULL;
+ struct usb_endpoint_descriptor *ep_narrow = NULL;
+ struct usb_endpoint_descriptor *ep_wide = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
u8 addr, attrs;
int pipe, i;
@@ -976,15 +1009,16 @@ static int redrat3_dev_probe(struct usb_interface *intf,
addr = ep->bEndpointAddress;
attrs = ep->bmAttributes;
- if ((ep_in == NULL) &&
- ((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
+ if (((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
((attrs & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
dev_dbg(dev, "found bulk-in endpoint at 0x%02x\n",
ep->bEndpointAddress);
- /* data comes in on 0x82, 0x81 is for other data... */
- if (ep->bEndpointAddress == RR3_BULK_IN_EP_ADDR)
- ep_in = ep;
+ /* data comes in on 0x82, 0x81 is for learning */
+ if (ep->bEndpointAddress == RR3_NARROW_IN_EP_ADDR)
+ ep_narrow = ep;
+ if (ep->bEndpointAddress == RR3_WIDE_IN_EP_ADDR)
+ ep_wide = ep;
}
if ((ep_out == NULL) &&
@@ -997,68 +1031,76 @@ static int redrat3_dev_probe(struct usb_interface *intf,
}
}
- if (!ep_in || !ep_out) {
- dev_err(dev, "Couldn't find both in and out endpoints\n");
+ if (!ep_narrow || !ep_out || !ep_wide) {
+ dev_err(dev, "Couldn't find all endpoints\n");
retval = -ENODEV;
goto no_endpoints;
}
/* allocate memory for our device state and initialize it */
rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
- if (rr3 == NULL) {
- dev_err(dev, "Memory allocation failure\n");
+ if (!rr3)
goto no_endpoints;
- }
rr3->dev = &intf->dev;
+ rr3->ep_narrow = ep_narrow;
+ rr3->ep_out = ep_out;
+ rr3->udev = udev;
/* set up bulk-in endpoint */
- rr3->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rr3->read_urb)
- goto error;
+ rr3->narrow_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rr3->narrow_urb)
+ goto redrat_free;
- rr3->ep_in = ep_in;
- rr3->bulk_in_buf = usb_alloc_coherent(udev,
- le16_to_cpu(ep_in->wMaxPacketSize), GFP_KERNEL, &rr3->dma_in);
- if (!rr3->bulk_in_buf) {
- dev_err(dev, "Read buffer allocation failure\n");
- goto error;
- }
-
- pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress);
- usb_fill_bulk_urb(rr3->read_urb, udev, pipe, rr3->bulk_in_buf,
- le16_to_cpu(ep_in->wMaxPacketSize), redrat3_handle_async, rr3);
- rr3->read_urb->transfer_dma = rr3->dma_in;
- rr3->read_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ rr3->wide_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rr3->wide_urb)
+ goto redrat_free;
- rr3->ep_out = ep_out;
- rr3->udev = udev;
+ rr3->bulk_in_buf = usb_alloc_coherent(udev,
+ le16_to_cpu(ep_narrow->wMaxPacketSize),
+ GFP_KERNEL, &rr3->dma_in);
+ if (!rr3->bulk_in_buf)
+ goto redrat_free;
+
+ pipe = usb_rcvbulkpipe(udev, ep_narrow->bEndpointAddress);
+ usb_fill_bulk_urb(rr3->narrow_urb, udev, pipe, rr3->bulk_in_buf,
+ le16_to_cpu(ep_narrow->wMaxPacketSize),
+ redrat3_handle_async, rr3);
+ rr3->narrow_urb->transfer_dma = rr3->dma_in;
+ rr3->narrow_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ pipe = usb_rcvbulkpipe(udev, ep_wide->bEndpointAddress);
+ usb_fill_bulk_urb(rr3->wide_urb, udev, pipe, rr3->bulk_in_buf,
+ le16_to_cpu(ep_narrow->wMaxPacketSize),
+ redrat3_handle_async, rr3);
+ rr3->wide_urb->transfer_dma = rr3->dma_in;
+ rr3->wide_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
redrat3_reset(rr3);
redrat3_get_firmware_rev(rr3);
- /* might be all we need to do? */
- retval = redrat3_enable_detector(rr3);
- if (retval < 0)
- goto error;
-
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
- /* led control */
- rr3->led.name = "redrat3:red:feedback";
- rr3->led.default_trigger = "rc-feedback";
- rr3->led.brightness_set = redrat3_brightness_set;
- retval = led_classdev_register(&intf->dev, &rr3->led);
- if (retval)
- goto error;
-
atomic_set(&rr3->flash, 0);
rr3->flash_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rr3->flash_urb) {
- retval = -ENOMEM;
- goto led_free_error;
- }
+ if (!rr3->flash_urb)
+ goto redrat_free;
+
+ /* learn urb */
+ rr3->learn_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rr3->learn_urb)
+ goto redrat_free;
+
+ /* setup packet is 'c0 b2 0000 0000 0001' */
+ rr3->learn_control.bRequestType = 0xc0;
+ rr3->learn_control.bRequest = RR3_MODSIG_CAPTURE;
+ rr3->learn_control.wLength = cpu_to_le16(1);
+
+ usb_fill_control_urb(rr3->learn_urb, udev, usb_rcvctrlpipe(udev, 0),
+ (unsigned char *)&rr3->learn_control,
+ &rr3->learn_buf, sizeof(rr3->learn_buf),
+ redrat3_learn_complete, rr3);
/* setup packet is 'c0 b9 0000 0000 0001' */
rr3->flash_control.bRequestType = 0xc0;
@@ -1070,25 +1112,36 @@ static int redrat3_dev_probe(struct usb_interface *intf,
&rr3->flash_in_buf, sizeof(rr3->flash_in_buf),
redrat3_led_complete, rr3);
+ /* led control */
+ rr3->led.name = "redrat3:red:feedback";
+ rr3->led.default_trigger = "rc-feedback";
+ rr3->led.brightness_set = redrat3_brightness_set;
+ retval = led_classdev_register(&intf->dev, &rr3->led);
+ if (retval)
+ goto redrat_free;
+
rr3->rc = redrat3_init_rc_dev(rr3);
if (!rr3->rc) {
retval = -ENOMEM;
- goto led_free_error;
+ goto led_free;
}
+ /* might be all we need to do? */
+ retval = redrat3_enable_detector(rr3);
+ if (retval < 0)
+ goto led_free;
+
/* we can register the device now, as it is ready */
usb_set_intfdata(intf, rr3);
return 0;
-led_free_error:
+led_free:
led_classdev_unregister(&rr3->led);
-error:
+redrat_free:
redrat3_delete(rr3, rr3->udev);
no_endpoints:
- dev_err(dev, "%s: retval = %x", __func__, retval);
-
return retval;
}
@@ -1097,9 +1150,6 @@ static void redrat3_dev_disconnect(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
- if (!rr3)
- return;
-
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
led_classdev_unregister(&rr3->led);
@@ -1111,7 +1161,8 @@ static int redrat3_dev_suspend(struct usb_interface *intf, pm_message_t message)
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
led_classdev_suspend(&rr3->led);
- usb_kill_urb(rr3->read_urb);
+ usb_kill_urb(rr3->narrow_urb);
+ usb_kill_urb(rr3->wide_urb);
usb_kill_urb(rr3->flash_urb);
return 0;
}
@@ -1120,7 +1171,9 @@ static int redrat3_dev_resume(struct usb_interface *intf)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
- if (usb_submit_urb(rr3->read_urb, GFP_ATOMIC))
+ if (usb_submit_urb(rr3->narrow_urb, GFP_ATOMIC))
+ return -EIO;
+ if (usb_submit_urb(rr3->wide_urb, GFP_ATOMIC))
return -EIO;
led_classdev_resume(&rr3->led);
return 0;
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
new file mode 100644
index 000000000000..436bd58b5f05
--- /dev/null
+++ b/drivers/media/rc/serial_ir.c
@@ -0,0 +1,844 @@
+/*
+ * serial_ir.c
+ *
+ * serial_ir - Device driver that records pulse- and pause-lengths
+ * (space-lengths) between DDCD event on a serial port.
+ *
+ * Copyright (C) 1996,97 Ralph Metzler <rjkm@thp.uni-koeln.de>
+ * Copyright (C) 1998 Trent Piepho <xyzzy@u.washington.edu>
+ * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>
+ * Copyright (C) 1999 Christoph Bartelmus <lirc@bartelmus.de>
+ * Copyright (C) 2007 Andrei Tanas <andrei@tanas.ca> (suspend/resume support)
+ * Copyright (C) 2016 Sean Young <sean@mess.org> (port to rc-core)
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/serial_reg.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <media/rc-core.h>
+
+struct serial_ir_hw {
+ int signal_pin;
+ int signal_pin_change;
+ u8 on;
+ u8 off;
+ unsigned set_send_carrier:1;
+ unsigned set_duty_cycle:1;
+ void (*send_pulse)(unsigned int length, ktime_t edge);
+ void (*send_space)(void);
+ spinlock_t lock;
+};
+
+#define IR_HOMEBREW 0
+#define IR_IRDEO 1
+#define IR_IRDEO_REMOTE 2
+#define IR_ANIMAX 3
+#define IR_IGOR 4
+
+/* module parameters */
+static int type;
+static int io;
+static int irq;
+static bool iommap;
+static int ioshift;
+static bool softcarrier = true;
+static bool share_irq;
+static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
+static bool txsense; /* 0 = active high, 1 = active low */
+
+/* forward declarations */
+static void send_pulse_irdeo(unsigned int length, ktime_t edge);
+static void send_space_irdeo(void);
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+static void send_pulse_homebrew(unsigned int length, ktime_t edge);
+static void send_space_homebrew(void);
+#endif
+
+static struct serial_ir_hw hardware[] = {
+ [IR_HOMEBREW] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_HOMEBREW].lock),
+ .signal_pin = UART_MSR_DCD,
+ .signal_pin_change = UART_MSR_DDCD,
+ .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
+ .off = (UART_MCR_RTS | UART_MCR_OUT2),
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+ .send_pulse = send_pulse_homebrew,
+ .send_space = send_space_homebrew,
+ .set_send_carrier = true,
+ .set_duty_cycle = true,
+#endif
+ },
+
+ [IR_IRDEO] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO].lock),
+ .signal_pin = UART_MSR_DSR,
+ .signal_pin_change = UART_MSR_DDSR,
+ .on = UART_MCR_OUT2,
+ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ .send_pulse = send_pulse_irdeo,
+ .send_space = send_space_irdeo,
+ .set_duty_cycle = true,
+ },
+
+ [IR_IRDEO_REMOTE] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO_REMOTE].lock),
+ .signal_pin = UART_MSR_DSR,
+ .signal_pin_change = UART_MSR_DDSR,
+ .on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ .send_pulse = send_pulse_irdeo,
+ .send_space = send_space_irdeo,
+ .set_duty_cycle = true,
+ },
+
+ [IR_ANIMAX] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_ANIMAX].lock),
+ .signal_pin = UART_MSR_DCD,
+ .signal_pin_change = UART_MSR_DDCD,
+ .on = 0,
+ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ },
+
+ [IR_IGOR] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IGOR].lock),
+ .signal_pin = UART_MSR_DSR,
+ .signal_pin_change = UART_MSR_DDSR,
+ .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
+ .off = (UART_MCR_RTS | UART_MCR_OUT2),
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+ .send_pulse = send_pulse_homebrew,
+ .send_space = send_space_homebrew,
+ .set_send_carrier = true,
+ .set_duty_cycle = true,
+#endif
+ },
+};
+
+#define RS_ISR_PASS_LIMIT 256
+
+struct serial_ir {
+ ktime_t lastkt;
+ struct rc_dev *rcdev;
+ struct platform_device *pdev;
+
+ unsigned int freq;
+ unsigned int duty_cycle;
+
+ unsigned int pulse_width, space_width;
+};
+
+static struct serial_ir serial_ir;
+
+/* fetch serial input packet (1 byte) from register offset */
+static u8 sinp(int offset)
+{
+ if (iommap)
+ /* the register is memory-mapped */
+ offset <<= ioshift;
+
+ return inb(io + offset);
+}
+
+/* write serial output packet (1 byte) of value to register offset */
+static void soutp(int offset, u8 value)
+{
+ if (iommap)
+ /* the register is memory-mapped */
+ offset <<= ioshift;
+
+ outb(value, io + offset);
+}
+
+static void on(void)
+{
+ if (txsense)
+ soutp(UART_MCR, hardware[type].off);
+ else
+ soutp(UART_MCR, hardware[type].on);
+}
+
+static void off(void)
+{
+ if (txsense)
+ soutp(UART_MCR, hardware[type].on);
+ else
+ soutp(UART_MCR, hardware[type].off);
+}
+
+static void init_timing_params(unsigned int new_duty_cycle,
+ unsigned int new_freq)
+{
+ serial_ir.duty_cycle = new_duty_cycle;
+ serial_ir.freq = new_freq;
+
+ serial_ir.pulse_width = DIV_ROUND_CLOSEST(
+ new_duty_cycle * NSEC_PER_SEC, new_freq * 100l);
+ serial_ir.space_width = DIV_ROUND_CLOSEST(
+ (100l - new_duty_cycle) * NSEC_PER_SEC, new_freq * 100l);
+}
+
+static void send_pulse_irdeo(unsigned int length, ktime_t target)
+{
+ long rawbits;
+ int i;
+ unsigned char output;
+ unsigned char chunk, shifted;
+
+ /* how many bits have to be sent ? */
+ rawbits = length * 1152 / 10000;
+ if (serial_ir.duty_cycle > 50)
+ chunk = 3;
+ else
+ chunk = 1;
+ for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
+ shifted = chunk << (i * 3);
+ shifted >>= 1;
+ output &= (~shifted);
+ i++;
+ if (i == 3) {
+ soutp(UART_TX, output);
+ while (!(sinp(UART_LSR) & UART_LSR_THRE))
+ ;
+ output = 0x7f;
+ i = 0;
+ }
+ }
+ if (i != 0) {
+ soutp(UART_TX, output);
+ while (!(sinp(UART_LSR) & UART_LSR_TEMT))
+ ;
+ }
+}
+
+static void send_space_irdeo(void)
+{
+}
+
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+static void send_pulse_homebrew_softcarrier(unsigned int length, ktime_t edge)
+{
+ ktime_t now, target = ktime_add_us(edge, length);
+ /*
+ * delta should never exceed 4 seconds and on m68k
+ * ndelay(s64) does not compile; so use s32 rather than s64.
+ */
+ s32 delta;
+
+ for (;;) {
+ now = ktime_get();
+ if (ktime_compare(now, target) >= 0)
+ break;
+ on();
+ edge = ktime_add_ns(edge, serial_ir.pulse_width);
+ delta = ktime_to_ns(ktime_sub(edge, now));
+ if (delta > 0)
+ ndelay(delta);
+ now = ktime_get();
+ off();
+ if (ktime_compare(now, target) >= 0)
+ break;
+ edge = ktime_add_ns(edge, serial_ir.space_width);
+ delta = ktime_to_ns(ktime_sub(edge, now));
+ if (delta > 0)
+ ndelay(delta);
+ }
+}
+
+static void send_pulse_homebrew(unsigned int length, ktime_t edge)
+{
+ if (softcarrier)
+ send_pulse_homebrew_softcarrier(length, edge);
+ else
+ on();
+}
+
+static void send_space_homebrew(void)
+{
+ off();
+}
+#endif
+
+static void frbwrite(unsigned int l, bool is_pulse)
+{
+ /* simple noise filter */
+ static unsigned int ptr, pulse, space;
+ DEFINE_IR_RAW_EVENT(ev);
+
+ if (ptr > 0 && is_pulse) {
+ pulse += l;
+ if (pulse > 250000) {
+ ev.duration = space;
+ ev.pulse = false;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ev.duration = pulse;
+ ev.pulse = true;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ptr = 0;
+ pulse = 0;
+ }
+ return;
+ }
+ if (!is_pulse) {
+ if (ptr == 0) {
+ if (l > 20000000) {
+ space = l;
+ ptr++;
+ return;
+ }
+ } else {
+ if (l > 20000000) {
+ space += pulse;
+ if (space > IR_MAX_DURATION)
+ space = IR_MAX_DURATION;
+ space += l;
+ if (space > IR_MAX_DURATION)
+ space = IR_MAX_DURATION;
+ pulse = 0;
+ return;
+ }
+
+ ev.duration = space;
+ ev.pulse = false;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ev.duration = pulse;
+ ev.pulse = true;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ptr = 0;
+ pulse = 0;
+ }
+ }
+
+ ev.duration = l;
+ ev.pulse = is_pulse;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+}
+
+static irqreturn_t serial_ir_irq_handler(int i, void *blah)
+{
+ ktime_t kt;
+ int counter, dcd;
+ u8 status;
+ ktime_t delkt;
+ unsigned int data;
+ static int last_dcd = -1;
+
+ if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
+ /* not our interrupt */
+ return IRQ_NONE;
+ }
+
+ counter = 0;
+ do {
+ counter++;
+ status = sinp(UART_MSR);
+ if (counter > RS_ISR_PASS_LIMIT) {
+ dev_err(&serial_ir.pdev->dev, "Trapped in interrupt");
+ break;
+ }
+ if ((status & hardware[type].signal_pin_change) &&
+ sense != -1) {
+ /* get current time */
+ kt = ktime_get();
+
+ /*
+ * The driver needs to know if your receiver is
+ * active high or active low, or the space/pulse
+ * sense could be inverted.
+ */
+
+ /* calc time since last interrupt in nanoseconds */
+ dcd = (status & hardware[type].signal_pin) ? 1 : 0;
+
+ if (dcd == last_dcd) {
+ dev_err(&serial_ir.pdev->dev,
+ "ignoring spike: %d %d %lldns %lldns\n",
+ dcd, sense, ktime_to_ns(kt),
+ ktime_to_ns(serial_ir.lastkt));
+ continue;
+ }
+
+ delkt = ktime_sub(kt, serial_ir.lastkt);
+ if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
+ data = IR_MAX_DURATION; /* really long time */
+ if (!(dcd ^ sense)) {
+ /* sanity check */
+ dev_err(&serial_ir.pdev->dev,
+ "dcd unexpected: %d %d %lldns %lldns\n",
+ dcd, sense, ktime_to_ns(kt),
+ ktime_to_ns(serial_ir.lastkt));
+ /*
+ * detecting pulse while this
+ * MUST be a space!
+ */
+ sense = sense ? 0 : 1;
+ }
+ } else {
+ data = ktime_to_ns(delkt);
+ }
+ frbwrite(data, !(dcd ^ sense));
+ serial_ir.lastkt = kt;
+ last_dcd = dcd;
+ ir_raw_event_handle(serial_ir.rcdev);
+ }
+ } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
+ return IRQ_HANDLED;
+}
+
+static int hardware_init_port(void)
+{
+ u8 scratch, scratch2, scratch3;
+
+ /*
+ * This is a simple port existence test, borrowed from the autoconfig
+ * function in drivers/tty/serial/8250/8250_port.c
+ */
+ scratch = sinp(UART_IER);
+ soutp(UART_IER, 0);
+#ifdef __i386__
+ outb(0xff, 0x080);
+#endif
+ scratch2 = sinp(UART_IER) & 0x0f;
+ soutp(UART_IER, 0x0f);
+#ifdef __i386__
+ outb(0x00, 0x080);
+#endif
+ scratch3 = sinp(UART_IER) & 0x0f;
+ soutp(UART_IER, scratch);
+ if (scratch2 != 0 || scratch3 != 0x0f) {
+ /* we fail, there's nothing here */
+ pr_err("port existence test failed, cannot continue\n");
+ return -ENODEV;
+ }
+
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ /* First of all, disable all interrupts */
+ soutp(UART_IER, sinp(UART_IER) &
+ (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+
+ /* Clear registers. */
+ sinp(UART_LSR);
+ sinp(UART_RX);
+ sinp(UART_IIR);
+ sinp(UART_MSR);
+
+ /* Set line for power source */
+ off();
+
+ /* Clear registers again to be sure. */
+ sinp(UART_LSR);
+ sinp(UART_RX);
+ sinp(UART_IIR);
+ sinp(UART_MSR);
+
+ switch (type) {
+ case IR_IRDEO:
+ case IR_IRDEO_REMOTE:
+ /* setup port to 7N1 @ 115200 Baud */
+ /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
+
+ /* Set DLAB 1. */
+ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
+ /* Set divisor to 1 => 115200 Baud */
+ soutp(UART_DLM, 0);
+ soutp(UART_DLL, 1);
+ /* Set DLAB 0 + 7N1 */
+ soutp(UART_LCR, UART_LCR_WLEN7);
+ /* THR interrupt already disabled at this point */
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int serial_ir_probe(struct platform_device *dev)
+{
+ int i, nlow, nhigh, result;
+
+ result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
+ share_irq ? IRQF_SHARED : 0,
+ KBUILD_MODNAME, &hardware);
+ if (result < 0) {
+ if (result == -EBUSY)
+ dev_err(&dev->dev, "IRQ %d busy\n", irq);
+ else if (result == -EINVAL)
+ dev_err(&dev->dev, "Bad irq number or handler\n");
+ return result;
+ }
+
+ /* Reserve io region. */
+ if ((iommap &&
+ (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
+ KBUILD_MODNAME) == NULL)) ||
+ (!iommap && (devm_request_region(&dev->dev, io, 8,
+ KBUILD_MODNAME) == NULL))) {
+ dev_err(&dev->dev, "port %04x already in use\n", io);
+ dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
+ dev_warn(&dev->dev,
+ "or compile the serial port driver as module and\n");
+ dev_warn(&dev->dev, "make sure this module is loaded first\n");
+ return -EBUSY;
+ }
+
+ result = hardware_init_port();
+ if (result < 0)
+ return result;
+
+ /* Initialize pulse/space widths */
+ init_timing_params(50, 38000);
+
+ /* If pin is high, then this must be an active low receiver. */
+ if (sense == -1) {
+ /* wait 1/2 sec for the power supply */
+ msleep(500);
+
+ /*
+ * probe 9 times every 0.04s, collect "votes" for
+ * active high/low
+ */
+ nlow = 0;
+ nhigh = 0;
+ for (i = 0; i < 9; i++) {
+ if (sinp(UART_MSR) & hardware[type].signal_pin)
+ nlow++;
+ else
+ nhigh++;
+ msleep(40);
+ }
+ sense = nlow >= nhigh ? 1 : 0;
+ dev_info(&dev->dev, "auto-detected active %s receiver\n",
+ sense ? "low" : "high");
+ } else
+ dev_info(&dev->dev, "Manually using active %s receiver\n",
+ sense ? "low" : "high");
+
+ dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
+ return 0;
+}
+
+static int serial_ir_open(struct rc_dev *rcdev)
+{
+ unsigned long flags;
+
+ /* initialize timestamp */
+ serial_ir.lastkt = ktime_get();
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
+
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+
+ return 0;
+}
+
+static void serial_ir_close(struct rc_dev *rcdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ /* First of all, disable all interrupts */
+ soutp(UART_IER, sinp(UART_IER) &
+ (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+}
+
+static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+ unsigned int count)
+{
+ unsigned long flags;
+ ktime_t edge;
+ s64 delta;
+ int i;
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+ if (type == IR_IRDEO) {
+ /* DTR, RTS down */
+ on();
+ }
+
+ edge = ktime_get();
+ for (i = 0; i < count; i++) {
+ if (i % 2)
+ hardware[type].send_space();
+ else
+ hardware[type].send_pulse(txbuf[i], edge);
+
+ edge = ktime_add_us(edge, txbuf[i]);
+ delta = ktime_us_delta(edge, ktime_get());
+ if (delta > 25) {
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+ usleep_range(delta - 25, delta + 25);
+ spin_lock_irqsave(&hardware[type].lock, flags);
+ } else if (delta > 0) {
+ udelay(delta);
+ }
+ }
+ off();
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+ return count;
+}
+
+static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle)
+{
+ init_timing_params(cycle, serial_ir.freq);
+ return 0;
+}
+
+static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier)
+{
+ if (carrier > 500000 || carrier < 20000)
+ return -EINVAL;
+
+ init_timing_params(serial_ir.duty_cycle, carrier);
+ return 0;
+}
+
+static int serial_ir_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ /* Disable all interrupts */
+ soutp(UART_IER, sinp(UART_IER) &
+ (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+
+ /* Clear registers. */
+ sinp(UART_LSR);
+ sinp(UART_RX);
+ sinp(UART_IIR);
+ sinp(UART_MSR);
+
+ return 0;
+}
+
+static int serial_ir_resume(struct platform_device *dev)
+{
+ unsigned long flags;
+ int result;
+
+ result = hardware_init_port();
+ if (result < 0)
+ return result;
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+ /* Enable Interrupt */
+ serial_ir.lastkt = ktime_get();
+ soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
+ off();
+
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+
+ return 0;
+}
+
+static struct platform_driver serial_ir_driver = {
+ .probe = serial_ir_probe,
+ .suspend = serial_ir_suspend,
+ .resume = serial_ir_resume,
+ .driver = {
+ .name = "serial_ir",
+ },
+};
+
+static int __init serial_ir_init(void)
+{
+ int result;
+
+ result = platform_driver_register(&serial_ir_driver);
+ if (result)
+ return result;
+
+ serial_ir.pdev = platform_device_alloc("serial_ir", 0);
+ if (!serial_ir.pdev) {
+ result = -ENOMEM;
+ goto exit_driver_unregister;
+ }
+
+ result = platform_device_add(serial_ir.pdev);
+ if (result)
+ goto exit_device_put;
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(serial_ir.pdev);
+exit_driver_unregister:
+ platform_driver_unregister(&serial_ir_driver);
+ return result;
+}
+
+static void serial_ir_exit(void)
+{
+ platform_device_unregister(serial_ir.pdev);
+ platform_driver_unregister(&serial_ir_driver);
+}
+
+static int __init serial_ir_init_module(void)
+{
+ struct rc_dev *rcdev;
+ int result;
+
+ switch (type) {
+ case IR_HOMEBREW:
+ case IR_IRDEO:
+ case IR_IRDEO_REMOTE:
+ case IR_ANIMAX:
+ case IR_IGOR:
+ /* if nothing specified, use ttyS0/com1 and irq 4 */
+ io = io ? io : 0x3f8;
+ irq = irq ? irq : 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!softcarrier) {
+ switch (type) {
+ case IR_HOMEBREW:
+ case IR_IGOR:
+ hardware[type].set_send_carrier = false;
+ hardware[type].set_duty_cycle = false;
+ break;
+ }
+ }
+
+ /* make sure sense is either -1, 0, or 1 */
+ if (sense != -1)
+ sense = !!sense;
+
+ result = serial_ir_init();
+ if (result)
+ return result;
+
+ rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev);
+ if (!rcdev) {
+ result = -ENOMEM;
+ goto serial_cleanup;
+ }
+
+ if (hardware[type].send_pulse && hardware[type].send_space)
+ rcdev->tx_ir = serial_ir_tx;
+ if (hardware[type].set_send_carrier)
+ rcdev->s_tx_carrier = serial_ir_tx_carrier;
+ if (hardware[type].set_duty_cycle)
+ rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
+
+ switch (type) {
+ case IR_HOMEBREW:
+ rcdev->input_name = "Serial IR type home-brew";
+ break;
+ case IR_IRDEO:
+ rcdev->input_name = "Serial IR type IRdeo";
+ break;
+ case IR_IRDEO_REMOTE:
+ rcdev->input_name = "Serial IR type IRdeo remote";
+ break;
+ case IR_ANIMAX:
+ rcdev->input_name = "Serial IR type AnimaX";
+ break;
+ case IR_IGOR:
+ rcdev->input_name = "Serial IR type IgorPlug";
+ break;
+ }
+
+ rcdev->input_phys = KBUILD_MODNAME "/input0";
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->open = serial_ir_open;
+ rcdev->close = serial_ir_close;
+ rcdev->dev.parent = &serial_ir.pdev->dev;
+ rcdev->driver_type = RC_DRIVER_IR_RAW;
+ rcdev->allowed_protocols = RC_BIT_ALL;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->rx_resolution = 250000;
+
+ serial_ir.rcdev = rcdev;
+
+ result = rc_register_device(rcdev);
+
+ if (!result)
+ return 0;
+serial_cleanup:
+ serial_ir_exit();
+ return result;
+}
+
+static void __exit serial_ir_exit_module(void)
+{
+ rc_unregister_device(serial_ir.rcdev);
+ serial_ir_exit();
+}
+
+module_init(serial_ir_init_module);
+module_exit(serial_ir_exit_module);
+
+MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
+MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, Christoph Bartelmus, Andrei Tanas");
+MODULE_LICENSE("GPL");
+
+module_param(type, int, 0444);
+MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo, 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug");
+
+module_param(io, int, 0444);
+MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
+
+/* some architectures (e.g. intel xscale) have memory mapped registers */
+module_param(iommap, bool, 0444);
+MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O (0 = no memory mapped io)");
+
+/*
+ * some architectures (e.g. intel xscale) align the 8bit serial registers
+ * on 32bit word boundaries.
+ * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
+ */
+module_param(ioshift, int, 0444);
+MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
+
+module_param(irq, int, 0444);
+MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
+
+module_param(share_irq, bool, 0444);
+MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
+
+module_param(sense, int, 0444);
+MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit (0 = active high, 1 = active low )");
+
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+module_param(txsense, bool, 0444);
+MODULE_PARM_DESC(txsense, "Sense of transmitter circuit (0 = active high, 1 = active low )");
+#endif
+
+module_param(softcarrier, bool, 0444);
+MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 4004260a7c69..53f9b0af358a 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -297,8 +297,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
goto out;
}
- snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared "
- "Receiver (%04x:%04x)",
+ snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared Receiver (%04x:%04x)",
le16_to_cpu(sz->usbdev->descriptor.idVendor),
le16_to_cpu(sz->usbdev->descriptor.idProduct));
usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys));
@@ -364,15 +363,15 @@ static int streamzap_probe(struct usb_interface *intf,
sz->endpoint = &(iface_host->endpoint[0].desc);
if (!usb_endpoint_dir_in(sz->endpoint)) {
- dev_err(&intf->dev, "%s: endpoint doesn't match input device "
- "02%02x\n", __func__, sz->endpoint->bEndpointAddress);
+ dev_err(&intf->dev, "%s: endpoint doesn't match input device 02%02x\n",
+ __func__, sz->endpoint->bEndpointAddress);
retval = -ENODEV;
goto free_sz;
}
if (!usb_endpoint_xfer_int(sz->endpoint)) {
- dev_err(&intf->dev, "%s: endpoint attributes don't match xfer "
- "02%02x\n", __func__, sz->endpoint->bmAttributes);
+ dev_err(&intf->dev, "%s: endpoint attributes don't match xfer 02%02x\n",
+ __func__, sz->endpoint->bmAttributes);
retval = -ENODEV;
goto free_sz;
}
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 95ae60e659a1..78491ed48d92 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -227,8 +227,7 @@ struct wbcir_data {
static enum wbcir_protocol protocol = IR_PROTOCOL_RC6;
module_param(protocol, uint, 0444);
-MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command "
- "(0 = RC5, 1 = NEC, 2 = RC6A, default)");
+MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command (0 = RC5, 1 = NEC, 2 = RC6A, default)");
static bool invert; /* default = 0 */
module_param(invert, bool, 0444);
@@ -244,8 +243,7 @@ MODULE_PARM_DESC(wake_sc, "Scancode of the power-on IR command");
static unsigned int wake_rc6mode = 6;
module_param(wake_rc6mode, uint, 0644);
-MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command "
- "(0 = 0, 6 = 6A, default)");
+MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command (0 = 0, 6 = 6A, default)");
@@ -660,7 +658,7 @@ wbcir_tx(struct rc_dev *dev, unsigned *b, unsigned count)
unsigned i;
unsigned long flags;
- buf = kmalloc(count * sizeof(*b), GFP_KERNEL);
+ buf = kmalloc_array(count, sizeof(*b), GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1050,8 +1048,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
goto exit_free_data;
}
- dev_dbg(&device->dev, "Found device "
- "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
+ dev_dbg(&device->dev, "Found device (w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
data->wbase, data->ebase, data->sbase, data->irq);
data->led.name = "cir::activity";
@@ -1188,7 +1185,7 @@ static const struct pnp_device_id wbcir_ids[] = {
MODULE_DEVICE_TABLE(pnp, wbcir_ids);
static struct pnp_driver wbcir_driver = {
- .name = WBCIR_NAME,
+ .name = DRVNAME,
.id_table = wbcir_ids,
.probe = wbcir_probe,
.remove = wbcir_remove,
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
index d76f36233f43..330dcb2b2e44 100644
--- a/drivers/media/spi/gs1662.c
+++ b/drivers/media/spi/gs1662.c
@@ -453,17 +453,15 @@ static int gs_probe(struct spi_device *spi)
static int gs_remove(struct spi_device *spi)
{
struct v4l2_subdev *sd = spi_get_drvdata(spi);
- struct gs *gs = to_gs(sd);
v4l2_device_unregister_subdev(sd);
- kfree(gs);
+
return 0;
}
static struct spi_driver gs_driver = {
.driver = {
.name = "gs1662",
- .owner = THIS_MODULE,
},
.probe = gs_probe,
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 3932aa81e18c..00489a9df4e4 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -112,12 +112,10 @@ static int fc0011_readreg(struct fc0011_priv *priv, u8 reg, u8 *val)
return 0;
}
-static int fc0011_release(struct dvb_frontend *fe)
+static void fc0011_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int fc0011_init(struct dvb_frontend *fe)
@@ -262,8 +260,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW7M;
break;
default:
- dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. "
- "Using 6000 kHz.\n",
+ dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. Using 6000 kHz.\n",
bandwidth);
bandwidth = 6000;
/* fallthrough */
@@ -435,9 +432,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
if (err)
return err;
- dev_dbg(&priv->i2c->dev, "Tuned to "
- "fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X "
- "vcocal=%02X(%u) bw=%u\n",
+ dev_dbg(&priv->i2c->dev, "Tuned to fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X vcocal=%02X(%u) bw=%u\n",
(unsigned int)regs[FC11_REG_FA],
(unsigned int)regs[FC11_REG_FP],
(unsigned int)regs[FC11_REG_XINHI],
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index d74e92056810..30508f44e5f9 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -55,11 +55,10 @@ static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val)
return 0;
}
-static int fc0012_release(struct dvb_frontend *fe)
+static void fc0012_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int fc0012_init(struct dvb_frontend *fe)
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 522690d97b42..f7cf0e9e7c99 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -52,11 +52,10 @@ static int fc0013_readreg(struct fc0013_priv *priv, u8 reg, u8 *val)
return 0;
}
-static int fc0013_release(struct dvb_frontend *fe)
+static void fc0013_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int fc0013_init(struct dvb_frontend *fe)
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 353b178becf6..c3f10925b0d4 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -370,15 +370,13 @@ static int max2165_init(struct dvb_frontend *fe)
return 0;
}
-static int max2165_release(struct dvb_frontend *fe)
+static void max2165_release(struct dvb_frontend *fe)
{
struct max2165_priv *priv = fe->tuner_priv;
dprintk("%s()\n", __func__);
kfree(priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static const struct dvb_tuner_ops max2165_tuner_ops = {
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index f1b764074661..aba580b4ac2c 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -80,14 +80,12 @@ static int mc44s803_readreg(struct mc44s803_priv *priv, u8 reg, u32 *val)
return 0;
}
-static int mc44s803_release(struct dvb_frontend *fe)
+static void mc44s803_release(struct dvb_frontend *fe)
{
struct mc44s803_priv *priv = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(priv);
-
- return 0;
}
static int mc44s803_init(struct dvb_frontend *fe)
@@ -349,8 +347,8 @@ struct dvb_frontend *mc44s803_attach(struct dvb_frontend *fe,
id = MC44S803_REG_MS(reg, MC44S803_ID);
if (id != 0x14) {
- mc_printk(KERN_ERR, "unsupported ID "
- "(%x should be 0x14)\n", id);
+ mc_printk(KERN_ERR, "unsupported ID (%x should be 0x14)\n",
+ id);
goto error;
}
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index b87b2549d58d..94077ea78dde 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -332,11 +332,10 @@ static int mt2060_sleep(struct dvb_frontend *fe)
return ret;
}
-static int mt2060_release(struct dvb_frontend *fe)
+static void mt2060_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mt2060_tuner_ops = {
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index dfec23743afe..8b39d8dc97a0 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -2019,7 +2019,7 @@ static int mt2063_get_status(struct dvb_frontend *fe, u32 *tuner_status)
return 0;
}
-static int mt2063_release(struct dvb_frontend *fe)
+static void mt2063_release(struct dvb_frontend *fe)
{
struct mt2063_state *state = fe->tuner_priv;
@@ -2027,8 +2027,6 @@ static int mt2063_release(struct dvb_frontend *fe)
fe->tuner_priv = NULL;
kfree(state);
-
- return 0;
}
static int mt2063_set_analog_params(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/mt20xx.c b/drivers/media/tuners/mt20xx.c
index 52da4671b0e0..129bf8e1aff8 100644
--- a/drivers/media/tuners/mt20xx.c
+++ b/drivers/media/tuners/mt20xx.c
@@ -49,12 +49,10 @@ struct microtune_priv {
u32 frequency;
};
-static int microtune_release(struct dvb_frontend *fe)
+static void microtune_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int microtune_get_frequency(struct dvb_frontend *fe, u32 *frequency)
@@ -487,13 +485,8 @@ static void mt2050_set_if_freq(struct dvb_frontend *fe,unsigned int freq, unsign
buf[5]=div2a;
if(num2!=0) buf[5]=buf[5]|0x40;
- if (debug > 1) {
- int i;
- tuner_dbg("bufs is: ");
- for(i=0;i<6;i++)
- printk("%x ",buf[i]);
- printk("\n");
- }
+ if (debug > 1)
+ tuner_dbg("bufs is: %*ph\n", 6, buf);
ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,6);
if (ret!=6)
@@ -619,15 +612,9 @@ struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
tuner_i2c_xfer_send(&priv->i2c_props,buf,1);
tuner_i2c_xfer_recv(&priv->i2c_props,buf,21);
- if (debug) {
- int i;
- tuner_dbg("MT20xx hexdump:");
- for(i=0;i<21;i++) {
- printk(" %02x",buf[i]);
- if(((i+1)%8)==0) printk(" ");
- }
- printk("\n");
- }
+ if (debug)
+ tuner_dbg("MT20xx hexdump: %*ph\n", 21, buf);
+
company_code = buf[0x11] << 8 | buf[0x12];
tuner_info("microtune: companycode=%04x part=%02x rev=%02x\n",
company_code,buf[0x13],buf[0x14]);
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 6e2cdd2b6175..e7790e4afcfe 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -230,12 +230,11 @@ static int mt2131_init(struct dvb_frontend *fe)
return ret;
}
-static int mt2131_release(struct dvb_frontend *fe)
+static void mt2131_release(struct dvb_frontend *fe)
{
dprintk(1, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mt2131_tuner_ops = {
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index bca4d75e42d4..88edcc031e3c 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -296,11 +296,10 @@ static int mt2266_sleep(struct dvb_frontend *fe)
return 0;
}
-static int mt2266_release(struct dvb_frontend *fe)
+static void mt2266_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mt2266_tuner_ops = {
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 92a3be4fde87..353744fee053 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4063,12 +4063,11 @@ static int mxl5005s_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int mxl5005s_release(struct dvb_frontend *fe)
+static void mxl5005s_release(struct dvb_frontend *fe)
{
dprintk(1, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mxl5005s_tuner_ops = {
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 42569c6811e6..b16dfa5e85fb 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -776,7 +776,7 @@ static int mxl5007t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int mxl5007t_release(struct dvb_frontend *fe)
+static void mxl5007t_release(struct dvb_frontend *fe)
{
struct mxl5007t_state *state = fe->tuner_priv;
@@ -788,8 +788,6 @@ static int mxl5007t_release(struct dvb_frontend *fe)
mutex_unlock(&mxl5007t_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
/* ------------------------------------------------------------------------- */
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index ae8cbece6d2b..a2c6cd1c3923 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -377,11 +377,10 @@ static int qt1010_init(struct dvb_frontend *fe)
return qt1010_set_params(fe);
}
-static int qt1010_release(struct dvb_frontend *fe)
+static void qt1010_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int qt1010_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 08dca40356d2..ba80376a3b86 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -2286,7 +2286,7 @@ static int r820t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int r820t_release(struct dvb_frontend *fe)
+static void r820t_release(struct dvb_frontend *fe)
{
struct r820t_priv *priv = fe->tuner_priv;
@@ -2300,8 +2300,6 @@ static int r820t_release(struct dvb_frontend *fe)
mutex_unlock(&r820t_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static const struct dvb_tuner_ops r820t_tuner_ops = {
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 9300e9361e3b..8357a3c08a70 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -265,11 +265,10 @@ static int tda18218_init(struct dvb_frontend *fe)
return ret;
}
-static int tda18218_release(struct dvb_frontend *fe)
+static void tda18218_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops tda18218_tuner_ops = {
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index a26bb33102b8..7e81cd887c13 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -251,8 +251,8 @@ static int __tda18271_write_regs(struct dvb_frontend *fe, int idx, int len,
}
if (ret != 1)
- tda_err("ERROR: idx = 0x%x, len = %d, "
- "i2c_transfer returned: %d\n", idx, max, ret);
+ tda_err("ERROR: idx = 0x%x, len = %d, i2c_transfer returned: %d\n",
+ idx, max, ret);
return (ret == 1 ? 0 : ret);
}
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 2d50e8b1dce1..b4e5fa2ff5e5 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -26,8 +26,7 @@
int tda18271_debug;
module_param_named(debug, tda18271_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debug level "
- "(info=1, map=2, reg=4, adv=8, cal=16 (or-able))");
+MODULE_PARM_DESC(debug, "set debug level (info=1, map=2, reg=4, adv=8, cal=16 (or-able))");
static int tda18271_cal_on_startup = -1;
module_param_named(cal, tda18271_cal_on_startup, int, 0644);
@@ -1049,7 +1048,7 @@ fail:
return ret;
}
-static int tda18271_release(struct dvb_frontend *fe)
+static void tda18271_release(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
@@ -1061,8 +1060,6 @@ static int tda18271_release(struct dvb_frontend *fe)
mutex_unlock(&tda18271_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int tda18271_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tda18271-maps.c b/drivers/media/tuners/tda18271-maps.c
index 1e89dd93c4bb..7d114677b4ca 100644
--- a/drivers/media/tuners/tda18271-maps.c
+++ b/drivers/media/tuners/tda18271-maps.c
@@ -1024,11 +1024,7 @@ int tda18271_lookup_rf_band(struct dvb_frontend *fe, u32 *freq, u8 *rf_band)
while ((map[i].rfmax * 1000) < *freq) {
if (tda18271_debug & DBG_ADV)
- tda_map("(%d) rfmax = %d < freq = %d, "
- "rf1_def = %d, rf2_def = %d, rf3_def = %d, "
- "rf1 = %d, rf2 = %d, rf3 = %d, "
- "rf_a1 = %d, rf_a2 = %d, "
- "rf_b1 = %d, rf_b2 = %d\n",
+ tda_map("(%d) rfmax = %d < freq = %d, rf1_def = %d, rf2_def = %d, rf3_def = %d, rf1 = %d, rf2 = %d, rf3 = %d, rf_a1 = %d, rf_a2 = %d, rf_b1 = %d, rf_b2 = %d\n",
i, map[i].rfmax * 1000, *freq,
map[i].rf1_def, map[i].rf2_def, map[i].rf3_def,
map[i].rf1, map[i].rf2, map[i].rf3,
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index 5050ce9be423..2137eadf30f1 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -767,11 +767,10 @@ static void tda827xa_agcf(struct dvb_frontend *fe)
/* ------------------------------------------------------------------ */
-static int tda827x_release(struct dvb_frontend *fe)
+static void tda827x_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int tda827x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tda8290.c b/drivers/media/tuners/tda8290.c
index 998e82bba9c0..a59c567c55d6 100644
--- a/drivers/media/tuners/tda8290.c
+++ b/drivers/media/tuners/tda8290.c
@@ -617,8 +617,8 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
if (tuner_addrs == 0) {
tuner_addrs = 0x60;
- tuner_info("could not clearly identify tuner address, "
- "defaulting to %x\n", tuner_addrs);
+ tuner_info("could not clearly identify tuner address, defaulting to %x\n",
+ tuner_addrs);
} else {
tuner_addrs = tuner_addrs & 0xff;
tuner_info("setting tuner address to %x\n", tuner_addrs);
@@ -721,7 +721,7 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
return -ENODEV;
}
-static struct analog_demod_ops tda8290_ops = {
+static const struct analog_demod_ops tda8290_ops = {
.set_params = tda8290_set_params,
.has_signal = tda8290_has_signal,
.standby = tda8290_standby,
@@ -729,7 +729,7 @@ static struct analog_demod_ops tda8290_ops = {
.i2c_gate_ctrl = tda8290_i2c_bridge,
};
-static struct analog_demod_ops tda8295_ops = {
+static const struct analog_demod_ops tda8295_ops = {
.set_params = tda8295_set_params,
.has_signal = tda8295_has_signal,
.standby = tda8295_standby,
diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c
index 56be6c29399b..c0e815f8b951 100644
--- a/drivers/media/tuners/tda9887.c
+++ b/drivers/media/tuners/tda9887.c
@@ -659,7 +659,7 @@ static void tda9887_release(struct dvb_frontend *fe)
fe->analog_demod_priv = NULL;
}
-static struct analog_demod_ops tda9887_ops = {
+static const struct analog_demod_ops tda9887_ops = {
.info = {
.name = "tda9887",
},
diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c
index 36b0b1e1d05b..a9b1bb134409 100644
--- a/drivers/media/tuners/tea5761.c
+++ b/drivers/media/tuners/tea5761.c
@@ -274,24 +274,20 @@ int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
}
if ((buffer[13] != 0x2b) || (buffer[14] != 0x57) || (buffer[15] != 0x061)) {
- printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x."
- " It is not a TEA5761\n",
+ printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x. It is not a TEA5761\n",
buffer[13], buffer[14], buffer[15]);
return -EINVAL;
}
- printk(KERN_WARNING "tea5761: TEA%02x%02x detected. "
- "Manufacturer ID= 0x%02x\n",
+ printk(KERN_WARNING "tea5761: TEA%02x%02x detected. Manufacturer ID= 0x%02x\n",
buffer[14], buffer[15], buffer[13]);
return 0;
}
-static int tea5761_release(struct dvb_frontend *fe)
+static void tea5761_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int tea5761_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c
index d62a6d6b1f42..525b7ab90c80 100644
--- a/drivers/media/tuners/tea5767.c
+++ b/drivers/media/tuners/tea5767.c
@@ -401,12 +401,10 @@ int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
return 0;
}
-static int tea5767_release(struct dvb_frontend *fe)
+static void tea5767_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int tea5767_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 9ba9582e7765..3339b13dd3f5 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -275,8 +275,7 @@ static int simple_config_lookup(struct dvb_frontend *fe,
*config = t_params->ranges[i].config;
*cb = t_params->ranges[i].cb;
- tuner_dbg("freq = %d.%02d (%d), range = %d, "
- "config = 0x%02x, cb = 0x%02x\n",
+ tuner_dbg("freq = %d.%02d (%d), range = %d, config = 0x%02x, cb = 0x%02x\n",
*frequency / 16, *frequency % 16 * 100 / 16, *frequency,
i, *config, *cb);
@@ -404,12 +403,12 @@ static int simple_std_setup(struct dvb_frontend *fe,
i2c.addr = 0x0a;
rc = tuner_i2c_xfer_send(&i2c, &buffer[0], 2);
if (2 != rc)
- tuner_warn("i2c i/o error: rc == %d "
- "(should be 2)\n", rc);
+ tuner_warn("i2c i/o error: rc == %d (should be 2)\n",
+ rc);
rc = tuner_i2c_xfer_send(&i2c, &buffer[2], 2);
if (2 != rc)
- tuner_warn("i2c i/o error: rc == %d "
- "(should be 2)\n", rc);
+ tuner_warn("i2c i/o error: rc == %d (should be 2)\n",
+ rc);
break;
}
}
@@ -463,8 +462,8 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
rc = tuner_i2c_xfer_recv(&priv->i2c_props,
&status_byte, 1);
if (1 != rc) {
- tuner_warn("i2c i/o read error: rc == %d "
- "(should be 1)\n", rc);
+ tuner_warn("i2c i/o read error: rc == %d (should be 1)\n",
+ rc);
break;
}
if (status_byte & TUNER_PLL_LOCKED)
@@ -483,8 +482,8 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4);
if (4 != rc)
- tuner_warn("i2c i/o error: rc == %d "
- "(should be 4)\n", rc);
+ tuner_warn("i2c i/o error: rc == %d (should be 4)\n",
+ rc);
break;
}
}
@@ -499,8 +498,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
switch (priv->type) {
case TUNER_TENA_9533_DI:
case TUNER_YMEC_TVF_5533MF:
- tuner_dbg("This tuner doesn't have FM. "
- "Most cards have a TEA5767 for FM\n");
+ tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
return 0;
case TUNER_PHILIPS_FM1216ME_MK3:
case TUNER_PHILIPS_FM1236_MK3:
@@ -586,8 +584,7 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
div = params->frequency + IFPCoff + offset;
- tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, "
- "Offset=%d.%02d MHz, div=%0d\n",
+ tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, Offset=%d.%02d MHz, div=%0d\n",
params->frequency / 16, params->frequency % 16 * 100 / 16,
IFPCoff / 16, IFPCoff % 16 * 100 / 16,
offset / 16, offset % 16 * 100 / 16, div);
@@ -858,8 +855,7 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
if (!tun->stepsize) {
/* tuner-core was loaded before the digital tuner was
* configured and somehow picked the wrong tuner type */
- tuner_err("attempt to treat tuner %d (%s) as digital tuner "
- "without stepsize defined.\n",
+ tuner_err("attempt to treat tuner %d (%s) as digital tuner without stepsize defined.\n",
priv->type, priv->tun->name);
return 0; /* failure */
}
@@ -1005,7 +1001,7 @@ static int simple_sleep(struct dvb_frontend *fe)
return 0;
}
-static int simple_release(struct dvb_frontend *fe)
+static void simple_release(struct dvb_frontend *fe)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
@@ -1017,8 +1013,6 @@ static int simple_release(struct dvb_frontend *fe)
mutex_unlock(&tuner_simple_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int simple_get_frequency(struct dvb_frontend *fe, u32 *frequency)
@@ -1077,8 +1071,7 @@ struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
fe->ops.i2c_gate_ctrl(fe, 1);
if (1 != i2c_transfer(i2c_adap, &msg, 1))
- printk(KERN_WARNING "tuner-simple %d-%04x: "
- "unable to probe %s, proceeding anyway.",
+ printk(KERN_WARNING "tuner-simple %d-%04x: unable to probe %s, proceeding anyway.",
i2c_adapter_id(i2c_adap), i2c_addr,
tuners[type].name);
@@ -1123,18 +1116,16 @@ struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
if ((debug) || ((atv_input[priv->nr] > 0) ||
(dtv_input[priv->nr] > 0))) {
if (0 == atv_input[priv->nr])
- tuner_info("tuner %d atv rf input will be "
- "autoselected\n", priv->nr);
+ tuner_info("tuner %d atv rf input will be autoselected\n",
+ priv->nr);
else
- tuner_info("tuner %d atv rf input will be "
- "set to input %d (insmod option)\n",
+ tuner_info("tuner %d atv rf input will be set to input %d (insmod option)\n",
priv->nr, atv_input[priv->nr]);
if (0 == dtv_input[priv->nr])
- tuner_info("tuner %d dtv rf input will be "
- "autoselected\n", priv->nr);
+ tuner_info("tuner %d dtv rf input will be autoselected\n",
+ priv->nr);
else
- tuner_info("tuner %d dtv rf input will be "
- "set to input %d (insmod option)\n",
+ tuner_info("tuner %d dtv rf input will be set to input %d (insmod option)\n",
priv->nr, dtv_input[priv->nr]);
}
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 8d96a22647b3..b5b62b08159e 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -56,8 +56,7 @@ MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
static char audio_std[8];
module_param_string(audio_std, audio_std, sizeof(audio_std), 0);
MODULE_PARM_DESC(audio_std,
- "Audio standard. XC3028 audio decoder explicitly "
- "needs to know what audio\n"
+ "Audio standard. XC3028 audio decoder explicitly needs to know what audio\n"
"standard is needed for some video standards with audio A2 or NICAM.\n"
"The valid values are:\n"
"A2\n"
@@ -69,8 +68,8 @@ MODULE_PARM_DESC(audio_std,
static char firmware_name[30];
module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
-MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
- "default firmware name\n");
+MODULE_PARM_DESC(firmware_name,
+ "Firmware file name. Allows overriding the default firmware name\n");
static LIST_HEAD(hybrid_tuner_instance_list);
static DEFINE_MUTEX(xc2028_list_mutex);
@@ -179,67 +178,67 @@ static int xc2028_get_reg(struct xc2028_data *priv, u16 reg, u16 *val)
static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
{
if (type & BASE)
- printk("BASE ");
+ printk(KERN_CONT "BASE ");
if (type & INIT1)
- printk("INIT1 ");
+ printk(KERN_CONT "INIT1 ");
if (type & F8MHZ)
- printk("F8MHZ ");
+ printk(KERN_CONT "F8MHZ ");
if (type & MTS)
- printk("MTS ");
+ printk(KERN_CONT "MTS ");
if (type & D2620)
- printk("D2620 ");
+ printk(KERN_CONT "D2620 ");
if (type & D2633)
- printk("D2633 ");
+ printk(KERN_CONT "D2633 ");
if (type & DTV6)
- printk("DTV6 ");
+ printk(KERN_CONT "DTV6 ");
if (type & QAM)
- printk("QAM ");
+ printk(KERN_CONT "QAM ");
if (type & DTV7)
- printk("DTV7 ");
+ printk(KERN_CONT "DTV7 ");
if (type & DTV78)
- printk("DTV78 ");
+ printk(KERN_CONT "DTV78 ");
if (type & DTV8)
- printk("DTV8 ");
+ printk(KERN_CONT "DTV8 ");
if (type & FM)
- printk("FM ");
+ printk(KERN_CONT "FM ");
if (type & INPUT1)
- printk("INPUT1 ");
+ printk(KERN_CONT "INPUT1 ");
if (type & LCD)
- printk("LCD ");
+ printk(KERN_CONT "LCD ");
if (type & NOGD)
- printk("NOGD ");
+ printk(KERN_CONT "NOGD ");
if (type & MONO)
- printk("MONO ");
+ printk(KERN_CONT "MONO ");
if (type & ATSC)
- printk("ATSC ");
+ printk(KERN_CONT "ATSC ");
if (type & IF)
- printk("IF ");
+ printk(KERN_CONT "IF ");
if (type & LG60)
- printk("LG60 ");
+ printk(KERN_CONT "LG60 ");
if (type & ATI638)
- printk("ATI638 ");
+ printk(KERN_CONT "ATI638 ");
if (type & OREN538)
- printk("OREN538 ");
+ printk(KERN_CONT "OREN538 ");
if (type & OREN36)
- printk("OREN36 ");
+ printk(KERN_CONT "OREN36 ");
if (type & TOYOTA388)
- printk("TOYOTA388 ");
+ printk(KERN_CONT "TOYOTA388 ");
if (type & TOYOTA794)
- printk("TOYOTA794 ");
+ printk(KERN_CONT "TOYOTA794 ");
if (type & DIBCOM52)
- printk("DIBCOM52 ");
+ printk(KERN_CONT "DIBCOM52 ");
if (type & ZARLINK456)
- printk("ZARLINK456 ");
+ printk(KERN_CONT "ZARLINK456 ");
if (type & CHINA)
- printk("CHINA ");
+ printk(KERN_CONT "CHINA ");
if (type & F6MHZ)
- printk("F6MHZ ");
+ printk(KERN_CONT "F6MHZ ");
if (type & INPUT2)
- printk("INPUT2 ");
+ printk(KERN_CONT "INPUT2 ");
if (type & SCODE)
- printk("SCODE ");
+ printk(KERN_CONT "SCODE ");
if (type & HAS_IF)
- printk("HAS_IF_%d ", int_freq);
+ printk(KERN_CONT "HAS_IF_%d ", int_freq);
}
static v4l2_std_id parse_audio_std_option(void)
@@ -351,8 +350,7 @@ static int load_all_firmwares(struct dvb_frontend *fe,
n++;
if (n >= n_array) {
- tuner_err("More firmware images in file than "
- "were expected!\n");
+ tuner_err("More firmware images in file than were expected!\n");
goto corrupt;
}
@@ -379,8 +377,8 @@ static int load_all_firmwares(struct dvb_frontend *fe,
if (!size || size > endp - p) {
tuner_err("Firmware type ");
dump_firm_type(type);
- printk("(%x), id %llx is corrupted "
- "(size=%d, expected %d)\n",
+ printk(KERN_CONT
+ "(%x), id %llx is corrupted (size=%d, expected %d)\n",
type, (unsigned long long)id,
(unsigned)(endp - p), size);
goto corrupt;
@@ -395,7 +393,7 @@ static int load_all_firmwares(struct dvb_frontend *fe,
tuner_dbg("Reading firmware type ");
if (debug) {
dump_firm_type_and_int_freq(type, int_freq);
- printk("(%x), id %llx, size=%d.\n",
+ printk(KERN_CONT "(%x), id %llx, size=%d.\n",
type, (unsigned long long)id, size);
}
@@ -444,7 +442,8 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
tuner_dbg("%s called, want type=", __func__);
if (debug) {
dump_firm_type(type);
- printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
+ printk(KERN_CONT "(%x), id %016llx.\n",
+ type, (unsigned long long)*id);
}
if (!priv->firm) {
@@ -500,10 +499,11 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
}
if (best_nr_matches > 0) {
- tuner_dbg("Selecting best matching firmware (%d bits) for "
- "type=", best_nr_matches);
+ tuner_dbg("Selecting best matching firmware (%d bits) for type=",
+ best_nr_matches);
dump_firm_type(type);
- printk("(%x), id %016llx:\n", type, (unsigned long long)*id);
+ printk(KERN_CONT
+ "(%x), id %016llx:\n", type, (unsigned long long)*id);
i = best_i;
goto found;
}
@@ -520,7 +520,8 @@ ret:
tuner_dbg("%s firmware for type=", (i < 0) ? "Can't find" : "Found");
if (debug) {
dump_firm_type(type);
- printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
+ printk(KERN_CONT "(%x), id %016llx.\n",
+ type, (unsigned long long)*id);
}
return i;
}
@@ -560,8 +561,8 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
tuner_info("Loading firmware for type=");
dump_firm_type(priv->firm[pos].type);
- printk("(%x), id %016llx.\n", priv->firm[pos].type,
- (unsigned long long)*id);
+ printk(KERN_CONT "(%x), id %016llx.\n",
+ priv->firm[pos].type, (unsigned long long)*id);
p = priv->firm[pos].ptr;
endp = p + priv->firm[pos].size;
@@ -694,7 +695,7 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type,
tuner_info("Loading SCODE for type=");
dump_firm_type_and_int_freq(priv->firm[pos].type,
priv->firm[pos].int_freq);
- printk("(%x), id %016llx.\n", priv->firm[pos].type,
+ printk(KERN_CONT "(%x), id %016llx.\n", priv->firm[pos].type,
(unsigned long long)*id);
if (priv->firm_version < 0x0202)
@@ -746,15 +747,15 @@ retry:
tuner_dbg("checking firmware, user requested type=");
if (debug) {
dump_firm_type(new_fw.type);
- printk("(%x), id %016llx, ", new_fw.type,
+ printk(KERN_CONT "(%x), id %016llx, ", new_fw.type,
(unsigned long long)new_fw.std_req);
if (!int_freq) {
- printk("scode_tbl ");
+ printk(KERN_CONT "scode_tbl ");
dump_firm_type(priv->ctrl.scode_table);
- printk("(%x), ", priv->ctrl.scode_table);
+ printk(KERN_CONT "(%x), ", priv->ctrl.scode_table);
} else
- printk("int_freq %d, ", new_fw.int_freq);
- printk("scode_nr %d\n", new_fw.scode_nr);
+ printk(KERN_CONT "int_freq %d, ", new_fw.int_freq);
+ printk(KERN_CONT "scode_nr %d\n", new_fw.scode_nr);
}
/*
@@ -842,8 +843,7 @@ check_device:
goto fail;
}
- tuner_dbg("Device is Xceive %d version %d.%d, "
- "firmware version %d.%d\n",
+ tuner_dbg("Device is Xceive %d version %d.%d, firmware version %d.%d\n",
hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
(version & 0xf0) >> 4, version & 0xf);
@@ -857,8 +857,7 @@ check_device:
tuner_err("Incorrect readback of firmware version.\n");
goto fail;
} else {
- tuner_err("Returned an incorrect version. However, "
- "read is not reliable enough. Ignoring it.\n");
+ tuner_err("Returned an incorrect version. However, read is not reliable enough. Ignoring it.\n");
hwmodel = 3028;
}
}
@@ -869,8 +868,7 @@ check_device:
priv->hwvers = version & 0xff00;
} else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
priv->hwvers != (version & 0xff00)) {
- tuner_err("Read invalid device hardware information - tuner "
- "hung?\n");
+ tuner_err("Read invalid device hardware information - tuner hung?\n");
goto fail;
}
@@ -1327,7 +1325,7 @@ static int xc2028_sleep(struct dvb_frontend *fe)
return rc;
}
-static int xc2028_dvb_release(struct dvb_frontend *fe)
+static void xc2028_dvb_release(struct dvb_frontend *fe)
{
struct xc2028_data *priv = fe->tuner_priv;
@@ -1345,8 +1343,6 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
mutex_unlock(&xc2028_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int xc2028_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index d95c7e082ccf..03eef9b87a24 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -43,14 +43,11 @@ MODULE_PARM_DESC(debug, "Debugging level (0 to 2, default: 0 (off)).");
static int no_poweroff;
module_param(no_poweroff, int, 0644);
-MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, "
- "0 (default): use device-specific default mode).");
+MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, 0 (default): use device-specific default mode).");
static int audio_std;
module_param(audio_std, int, 0644);
-MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
- "needs to know what audio standard is needed for some video standards "
- "with audio A2 or NICAM. The valid settings are a sum of:\n"
+MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly needs to know what audio standard is needed for some video standards with audio A2 or NICAM. The valid settings are a sum of:\n"
" 1: use NICAM/B or A2/B instead of NICAM/A or A2/A\n"
" 2: use A2 instead of NICAM or BTSC\n"
" 4: use SECAM/K3 instead of K1\n"
@@ -60,8 +57,7 @@ MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
static char firmware_name[30];
module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
-MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
- "default firmware name.");
+MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the default firmware name.");
static DEFINE_MUTEX(xc4000_list_mutex);
static LIST_HEAD(hybrid_tuner_instance_list);
@@ -290,8 +286,7 @@ static int xc4000_tuner_reset(struct dvb_frontend *fe)
return -EREMOTEIO;
}
} else {
- printk(KERN_ERR "xc4000: no tuner reset callback function, "
- "fatal\n");
+ printk(KERN_ERR "xc4000: no tuner reset callback function, fatal\n");
return -EINVAL;
}
return 0;
@@ -679,8 +674,7 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
if (best_nr_diffs > 0U) {
printk(KERN_WARNING
- "Selecting best matching firmware (%u bits differ) for "
- "type=(%x), id %016llx:\n",
+ "Selecting best matching firmware (%u bits differ) for type=(%x), id %016llx:\n",
best_nr_diffs, type, (unsigned long long)*id);
i = best_i;
}
@@ -800,8 +794,7 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
n++;
if (n >= n_array) {
- printk(KERN_ERR "More firmware images in file than "
- "were expected!\n");
+ printk(KERN_ERR "More firmware images in file than were expected!\n");
goto corrupt;
}
@@ -1055,8 +1048,7 @@ check_device:
goto fail;
}
- dprintk(1, "Device is Xceive %d version %d.%d, "
- "firmware version %d.%d\n",
+ dprintk(1, "Device is Xceive %d version %d.%d, firmware version %d.%d\n",
hwmodel, hw_major, hw_minor, fw_major, fw_minor);
/* Check firmware version against what we downloaded. */
@@ -1076,8 +1068,7 @@ check_device:
} else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
priv->hwvers != ((hw_major << 8) | hw_minor)) {
printk(KERN_WARNING
- "Read invalid device hardware information - tuner "
- "hung?\n");
+ "Read invalid device hardware information - tuner hung?\n");
goto fail;
}
@@ -1627,7 +1618,7 @@ static int xc4000_init(struct dvb_frontend *fe)
return 0;
}
-static int xc4000_release(struct dvb_frontend *fe)
+static void xc4000_release(struct dvb_frontend *fe)
{
struct xc4000_priv *priv = fe->tuner_priv;
@@ -1641,8 +1632,6 @@ static int xc4000_release(struct dvb_frontend *fe)
mutex_unlock(&xc4000_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static const struct dvb_tuner_ops xc4000_tuner_ops = {
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e6e5e90d8d95..796e7638b3b2 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1326,7 +1326,7 @@ static int xc5000_init(struct dvb_frontend *fe)
return 0;
}
-static int xc5000_release(struct dvb_frontend *fe)
+static void xc5000_release(struct dvb_frontend *fe)
{
struct xc5000_priv *priv = fe->tuner_priv;
@@ -1346,8 +1346,6 @@ static int xc5000_release(struct dvb_frontend *fe)
mutex_unlock(&xc5000_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg)
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 7496f332f3f5..c9644b62f91a 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -60,5 +60,10 @@ source "drivers/media/usb/hackrf/Kconfig"
source "drivers/media/usb/msi2500/Kconfig"
endif
+if MEDIA_CEC_SUPPORT
+ comment "USB HDMI CEC adapters"
+source "drivers/media/usb/pulse8-cec/Kconfig"
+endif
+
endif #MEDIA_USB_SUPPORT
endif #USB
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 8874ba774a34..0f15e3351ddc 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_USBTV) += usbtv/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_DVB_AS102) += as102/
+obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 85dd9a8e83ff..7a10eaa38f67 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -253,8 +253,7 @@ static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->usbdev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
- printk("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
+ printk("unable to allocate %i bytes for transfer buffer %i%s\n",
sb_size, i,
in_interrupt() ? " while in int" : "");
au0828_uninit_isoc(dev);
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 52bc42da8a4c..788c73803138 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -33,8 +33,7 @@
static int debug;
module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info,ts=2,"
- "ctrl=4,i2c=8,v8mem=16 (or-able))." DEBSTATUS);
+MODULE_PARM_DESC(debug, "set debugging level (1=info,ts=2,ctrl=4,i2c=8,v8mem=16 (or-able))." DEBSTATUS);
#undef DEBSTATUS
#define deb_info(args...) dprintk(0x01, args)
@@ -433,8 +432,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
frame_size, i, j, ret;
int buffer_offset = 0;
- deb_ts("creating %d iso-urbs with %d frames "
- "each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB,
+ deb_ts("creating %d iso-urbs with %d frames each of %d bytes size = %d.\n",
+ B2C2_USB_NUM_ISO_URB,
B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev,
@@ -459,8 +458,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++) {
int frame_offset = 0;
struct urb *urb = fc_usb->iso_urb[i];
- deb_ts("initializing and submitting urb no. %d "
- "(buf_offset: %d).\n", i, buffer_offset);
+ deb_ts("initializing and submitting urb no. %d (buf_offset: %d).\n",
+ i, buffer_offset);
urb->dev = fc_usb->udev;
urb->context = fc_usb;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index e9100a235831..37f9b30b0abc 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -759,9 +759,7 @@ int cpia2_usb_stream_start(struct camera_data *cam, unsigned int alternate)
cam->params.camera_state.stream_mode = old_alt;
ret2 = set_alternate(cam, USBIF_CMDONLY);
if (ret2 < 0) {
- ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already "
- "failed. Then tried to call "
- "set_alternate(USBIF_CMDONLY) = %d.\n",
+ ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already failed. Then tried to call set_alternate(USBIF_CMDONLY) = %d.\n",
alternate, ret, ret2);
}
} else {
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 8b099fe1d592..550ec932f931 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -241,8 +241,7 @@ static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe,
int rc, i;
if (reg_debug) {
- printk(KERN_DEBUG "%s: (pipe 0x%08x): "
- "%s: %02x %02x %02x %02x %02x %02x %02x %02x ",
+ printk(KERN_DEBUG "%s: (pipe 0x%08x): %s: %02x %02x %02x %02x %02x %02x %02x %02x ",
dev->name,
pipe,
(requesttype & USB_DIR_IN) ? "IN" : "OUT",
@@ -441,8 +440,7 @@ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf,
if (reg_debug) {
int byte;
- cx231xx_isocdbg("(pipe 0x%08x): "
- "OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
+ cx231xx_isocdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
pipe,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
req, 0, val, reg & 0xff,
@@ -600,8 +598,8 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
return -1;
}
- cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,"
- "Interface = %d\n", alt, max_pkt_size,
+ cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,Interface = %d\n",
+ alt, max_pkt_size,
usb_interface_index);
if (usb_interface_index > 0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 1417515d30eb..2868546999ca 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -377,8 +377,8 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
cfg.i2c_addr = addr;
if (!dev->dvb->frontend) {
- dev_err(dev->dev, "%s/2: dvb frontend not attached. "
- "Can't attach xc5000\n", dev->name);
+ dev_err(dev->dev, "%s/2: dvb frontend not attached. Can't attach xc5000\n",
+ dev->name);
return -EINVAL;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 941ceff9b268..29011dfabb11 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1455,7 +1455,7 @@ static const struct usb_device_id af9015_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU,
&af9015_props, "Conceptronic USB2.0 DVB-T CTVDIGRCU V3.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810,
- &af9015_props, "KWorld Digial MC-810", NULL) },
+ &af9015_props, "KWorld Digital MC-810", NULL) },
{ DVB_USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03,
&af9015_props, "Genius TVGo DVB-T03", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2,
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8961dd732522..c673726d9b70 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -2095,6 +2095,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "TerraTec Cinergy T Stick (rev. 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, 0x0337,
&af9035_props, "AVerMedia HD Volar (A867)", NULL) },
+ { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_EVOLVEO_XTRATV_STICK,
+ &af9035_props, "EVOLVEO XtraTV stick", NULL) },
/* IT9135 devices */
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135,
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 02dbc6c45423..0636eac37bbb 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -851,6 +851,10 @@ static const struct usb_device_id dvbsky_id_table[] = {
USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2,
&dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI v1.1",
RC_MAP_TT_1500) },
+ { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_S2_4650_CI,
+ &dvbsky_s960c_props, "TechnoTrend TT-connect S2-4650 CI",
+ RC_MAP_TT_1500) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_H7_3,
&dvbsky_t680c_props, "Terratec H7 Rev.4",
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 0e8fb89896c4..5fea02672685 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -156,21 +156,19 @@ struct lme2510_state {
static int lme2510_bulk_write(struct usb_device *dev,
u8 *snd, int len, u8 pipe)
{
- int ret, actual_l;
+ int actual_l;
- ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
- snd, len , &actual_l, 100);
- return ret;
+ return usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
+ snd, len, &actual_l, 100);
}
static int lme2510_bulk_read(struct usb_device *dev,
u8 *rev, int len, u8 pipe)
{
- int ret, actual_l;
+ int actual_l;
- ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
- rev, len , &actual_l, 200);
- return ret;
+ return usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
+ rev, len, &actual_l, 200);
}
static int lme2510_usb_talk(struct dvb_usb_device *d,
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 047a32fe43ea..639e156e0c1b 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -549,7 +549,7 @@ static void mxl111sf_demod_release(struct dvb_frontend *fe)
fe->demodulator_priv = NULL;
}
-static struct dvb_frontend_ops mxl111sf_demod_ops = {
+static const struct dvb_frontend_ops mxl111sf_demod_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "MaxLinear MxL111SF DVB-T demodulator",
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 283495c84ba3..6427137a09ef 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -666,8 +666,8 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
if (rd_status[i] == 0x04) {
if (i < 7) {
- mxl_i2c("i2c fifo empty!"
- " @ %d", i);
+ mxl_i2c("i2c fifo empty! @ %d",
+ i);
msg->buf[(index*8)+i] =
i2c_r_data[(i*3)+1];
/* read again */
@@ -692,8 +692,7 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
}
goto stop_copy;
} else {
- mxl_i2c("readagain "
- "ERROR!");
+ mxl_i2c("readagain ERROR!");
}
} else {
msg->buf[(index*8)+i] =
@@ -827,9 +826,8 @@ int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
mxl111sf_i2c_hw_xfer_msg(state, &msg[i]) :
mxl111sf_i2c_sw_xfer_msg(state, &msg[i]);
if (mxl_fail(ret)) {
- mxl_debug_adv("failed with error %d on i2c "
- "transaction %d of %d, %sing %d bytes "
- "to/from 0x%02x", ret, i+1, num,
+ mxl_debug_adv("failed with error %d on i2c transaction %d of %d, %sing %d bytes to/from 0x%02x",
+ ret, i+1, num,
(msg[i].flags & I2C_M_RD) ?
"read" : "writ",
msg[i].len, msg[i].addr);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index f141dcc55cc9..f84bef6034dc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -455,13 +455,12 @@ static int mxl111sf_tuner_get_if_frequency(struct dvb_frontend *fe,
return 0;
}
-static int mxl111sf_tuner_release(struct dvb_frontend *fe)
+static void mxl111sf_tuner_release(struct dvb_frontend *fe)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
mxl_dbg("()");
kfree(state);
fe->tuner_priv = NULL;
- return 0;
}
/* ------------------------------------------------------------------------- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 5d676b533a3a..80c635980526 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -29,8 +29,7 @@
int dvb_usb_mxl111sf_debug;
module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level "
- "(1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
+MODULE_PARM_DESC(debug, "set debugging level (1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
static int dvb_usb_mxl111sf_isoc;
module_param_named(isoc, dvb_usb_mxl111sf_isoc, int, 0644);
@@ -137,8 +136,8 @@ int mxl111sf_write_reg_mask(struct mxl111sf_state *state,
#if 1
/* dont know why this usually errors out on the first try */
if (mxl_fail(ret))
- pr_err("error writing addr: 0x%02x, mask: 0x%02x, "
- "data: 0x%02x, retrying...", addr, mask, data);
+ pr_err("error writing addr: 0x%02x, mask: 0x%02x, data: 0x%02x, retrying...",
+ addr, mask, data);
ret = mxl111sf_read_reg(state, addr, &val);
#endif
@@ -946,8 +945,7 @@ static int mxl111sf_init(struct dvb_usb_device *d)
case 138001:
break;
default:
- printk(KERN_WARNING "%s: warning: "
- "unknown hauppauge model #%d\n",
+ printk(KERN_WARNING "%s: warning: unknown hauppauge model #%d\n",
__func__, state->tv.model);
}
#endif
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 09db3d02bd82..9862d3e6b8e8 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -1430,7 +1430,7 @@ static void af9005_fe_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops af9005_fe_ops;
+static const struct dvb_frontend_ops af9005_fe_ops;
struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
{
@@ -1455,7 +1455,7 @@ struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
return NULL;
}
-static struct dvb_frontend_ops af9005_fe_ops = {
+static const struct dvb_frontend_ops af9005_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "AF9005 USB DVB-T",
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 7853261906b1..f5f476841aea 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -826,7 +826,6 @@ static int af9005_frontend_attach(struct dvb_usb_adapter *adap)
printk("EEPROM DUMP\n");
for (i = 0; i < 255; i += 8) {
af9005_read_eeprom(adap->dev, i, buf, 8);
- printk("ADDR %x ", i);
debug_dump(buf, 8, printk);
}
}
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 290275bc7fde..6404205560eb 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -34,8 +34,7 @@
int dvb_usb_cinergyt2_debug;
module_param_named(debug, dvb_usb_cinergyt2_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 "
- "(or-able)).");
+MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 (or-able)).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -93,8 +92,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
if (ret < 0) {
- deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
- "state info\n");
+ deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep state info\n");
}
mutex_unlock(&d->data_mutex);
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index 2d29b4174dba..bbb10fab65bc 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -278,7 +278,7 @@ static void cinergyt2_fe_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cinergyt2_fe_ops;
+static const struct dvb_frontend_ops cinergyt2_fe_ops;
struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
{
@@ -295,7 +295,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
}
-static struct dvb_frontend_ops cinergyt2_fe_ops = {
+static const struct dvb_frontend_ops cinergyt2_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = DRIVER_NAME,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 243403081fa5..9b8771eb31d4 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -369,6 +369,26 @@ static int cxusb_aver_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return 0;
}
+static int cxusb_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ struct dvb_usb_adapter *adap = (struct dvb_usb_adapter *)fe->dvb->priv;
+ struct cxusb_state *state = (struct cxusb_state *)adap->dev->priv;
+ int ret;
+
+ ret = state->fe_read_status(fe, status);
+
+ /* it need resync slave fifo when signal change from unlock to lock.*/
+ if ((*status & FE_HAS_LOCK) && (!state->last_lock)) {
+ mutex_lock(&state->stream_mutex);
+ cxusb_streaming_ctrl(adap, 1);
+ mutex_unlock(&state->stream_mutex);
+ }
+
+ state->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0;
+ return ret;
+}
+
static void cxusb_d680_dmb_drain_message(struct dvb_usb_device *d)
{
int ep = d->props.generic_bulk_ctrl_endpoint;
@@ -1372,6 +1392,12 @@ static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
st->i2c_client_tuner = client_tuner;
+ /* hook fe: need to resync the slave fifo when signal locks. */
+ mutex_init(&st->stream_mutex);
+ st->last_lock = 0;
+ st->fe_read_status = adap->fe_adap[0].fe->ops.read_status;
+ adap->fe_adap[0].fe->ops.read_status = cxusb_read_status;
+
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 18acda19527a..66429d7f69b5 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -37,6 +37,11 @@ struct cxusb_state {
struct i2c_client *i2c_client_tuner;
unsigned char data[MAX_XFER_SIZE];
+
+ struct mutex stream_mutex;
+ u8 last_lock;
+ int (*fe_read_status)(struct dvb_frontend *fe,
+ enum fe_status *status);
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 47ce9d5de4c6..dd5edd3a17ee 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -16,10 +16,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,2=fw,4=fwdata,8=data (or-ab
static int nb_packet_buffer_size = 21;
module_param(nb_packet_buffer_size, int, 0644);
MODULE_PARM_DESC(nb_packet_buffer_size,
- "Set the dib0700 driver data buffer size. This parameter "
- "corresponds to the number of TS packets. The actual size of "
- "the data buffer corresponds to this parameter "
- "multiplied by 188 (default: 21)");
+ "Set the dib0700 driver data buffer size. This parameter corresponds to the number of TS packets. The actual size of the data buffer corresponds to this parameter multiplied by 188 (default: 21)");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index ef1b8ee75c57..b29d4894c2f1 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -26,8 +26,7 @@
static int force_lna_activation;
module_param(force_lna_activation, int, 0644);
-MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifyer(s) (LNA), "
- "if applicable for the device (default: 0=automatic/off).");
+MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifyer(s) (LNA), if applicable for the device (default: 0=automatic/off).");
struct dib0700_adapter_state {
int (*set_param_save) (struct dvb_frontend *);
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index de3ee2547479..8207e6900656 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -382,9 +382,9 @@ int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
if (buf[0] != 0)
deb_info("key: %*ph\n", 5, buf);
+ret:
kfree(buf);
-ret:
return ret;
}
EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index d66f56cc46a5..c989cac9343d 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -9,7 +9,6 @@
* see Documentation/dvb/README.dvb-usb for more information
*/
-#include <linux/kconfig.h>
#include "dibusb.h"
/* 3000MC/P stuff */
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index f5c042baa254..00f565fe7cc2 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -202,7 +202,7 @@ static void dtt200u_fe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops dtt200u_fe_ops;
+static const struct dvb_frontend_ops dtt200u_fe_ops;
struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
{
@@ -226,7 +226,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops dtt200u_fe_ops = {
+static const struct dvb_frontend_ops dtt200u_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "WideView USB DVB-T",
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index a04c0a250625..e5675da286cb 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -277,8 +277,7 @@ int dvb_usb_adapter_frontend_init(struct dvb_usb_adapter *adap)
for (i = 0; i < adap->props.num_frontends; i++) {
if (adap->props.fe[i].frontend_attach == NULL) {
- err("strange: '%s' #%d,%d "
- "doesn't want to attach a frontend.",
+ err("strange: '%s' #%d,%d doesn't want to attach a frontend.",
adap->dev->desc->name, adap->id, i);
return 0;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index dd048a7c461c..f0023dbb7276 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -49,8 +49,7 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
if (ret != hx.len) {
- err("error while transferring firmware "
- "(transferred size: %d, block size: %d)",
+ err("error while transferring firmware (transferred size: %d, block size: %d)",
ret,hx.len);
ret = -EINVAL;
break;
@@ -81,8 +80,7 @@ int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_device_pro
const struct firmware *fw = NULL;
if ((ret = request_firmware(&fw, props->firmware, &udev->dev)) != 0) {
- err("did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+ err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
props->firmware,ret);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 107255b08b2b..67f898b6f6d0 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -467,8 +467,10 @@ extern int dvb_usb_device_init(struct usb_interface *,
extern void dvb_usb_device_exit(struct usb_interface *);
/* the generic read/write method for device control */
-extern int dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16,int);
-extern int dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
+extern int __must_check
+dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16, int);
+extern int __must_check
+dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
/* commonly used remote control parsing */
extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 2c720cb2fb00..6ca502d834b4 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -86,8 +86,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
/* demod probe */
static int demod_probe = 1;
module_param_named(demod, demod_probe, int, 0644);
-MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 "
- "4=stv0903+stb6100(or-able)).");
+MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 4=stv0903+stb6100(or-able)).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -1642,6 +1641,7 @@ enum dw2102_table_entry {
TEVII_S632,
TERRATEC_CINERGY_S2_R2,
TERRATEC_CINERGY_S2_R3,
+ TERRATEC_CINERGY_S2_R4,
GOTVIEW_SAT_HD,
GENIATECH_T220,
TECHNOTREND_S2_4600,
@@ -1671,6 +1671,7 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
[TERRATEC_CINERGY_S2_R3] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R3)},
+ [TERRATEC_CINERGY_S2_R4] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
[TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
@@ -2343,12 +2344,7 @@ static struct usb_driver dw2102_driver = {
module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
-MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
- " DVB-C 3101 USB2.0,"
- " TeVii S421, S480, S482, S600, S630, S632, S650,"
- " TeVii S660, S662, Prof 1100, 7500 USB2.0,"
- " Geniatech SU3000, T220,"
- " TechnoTrend S2-4600, Terratec Cinergy S2 devices");
+MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101 USB2.0, TeVii S421, S480, S482, S600, S630, S632, S650, TeVii S660, S662, Prof 1100, 7500 USB2.0, Geniatech SU3000, T220, TechnoTrend S2-4600, Terratec Cinergy S2 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 979f05b4b87c..0251a4e91d47 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -401,7 +401,7 @@ static void jdvbt90502_release(struct dvb_frontend *fe)
}
-static struct dvb_frontend_ops jdvbt90502_ops;
+static const struct dvb_frontend_ops jdvbt90502_ops;
struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d)
{
@@ -432,7 +432,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops jdvbt90502_ops = {
+static const struct dvb_frontend_ops jdvbt90502_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "Comtech JDVBT90502 ISDB-T",
diff --git a/drivers/media/usb/dvb-usb/friio.c b/drivers/media/usb/dvb-usb/friio.c
index 474a17e4db0c..62abe6c43a32 100644
--- a/drivers/media/usb/dvb-usb/friio.c
+++ b/drivers/media/usb/dvb-usb/friio.c
@@ -320,8 +320,8 @@ restart:
*/
if (rbuf[0] & 0x80) { /* still in PowerOnReset state? */
if (++retry > 3) {
- deb_info("failed to get the correct"
- " FE demod status:0x%02x\n", rbuf[0]);
+ deb_info("failed to get the correct FE demod status:0x%02x\n",
+ rbuf[0]);
goto error;
}
msleep(100);
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 993bb7a72985..2360e7e32b06 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -135,8 +135,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
u8 *buf;
if ((ret = request_firmware(&fw, bcm4500_firmware,
&d->udev->dev)) != 0) {
- err("did not find the bcm4500 firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+ err("did not find the bcm4500 firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
bcm4500_firmware,ret);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index eafc5c82467f..70672e1e5ec7 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -55,13 +55,9 @@ static inline int m920x_read(struct usb_device *udev, u8 request, u16 value,
static inline int m920x_write(struct usb_device *udev, u8 request,
u16 value, u16 index)
{
- int ret;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- request, USB_TYPE_VENDOR | USB_DIR_OUT,
- value, index, NULL, 0, 2000);
-
- return ret;
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request,
+ USB_TYPE_VENDOR | USB_DIR_OUT, value, index,
+ NULL, 0, 2000);
}
static inline int m920x_write_seq(struct usb_device *udev, u8 request,
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 2566d2f1c2ad..946a5ccc8f1a 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -453,8 +453,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
info("start downloading fpga firmware %s",filename);
if ((ret = request_firmware(&fw, filename, &dev->dev)) != 0) {
- err("did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+ err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
filename);
return ret;
} else {
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 4706628a3ed5..02c3bee6f83b 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -50,8 +50,7 @@ MODULE_PARM_DESC(debug,
static int disable_led_control;
module_param(disable_led_control, int, 0444);
MODULE_PARM_DESC(disable_led_control,
- "disable LED control of the device "
- "(default: 0 - LED control is active).");
+ "disable LED control of the device (default: 0 - LED control is active).");
/* device private data */
struct technisat_usb2_state {
diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c
index 27398c08c69d..7ff31baa3682 100644
--- a/drivers/media/usb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/usb/dvb-usb/vp702x-fe.c
@@ -323,7 +323,7 @@ static void vp702x_fe_release(struct dvb_frontend* fe)
kfree(st);
}
-static struct dvb_frontend_ops vp702x_fe_ops;
+static const struct dvb_frontend_ops vp702x_fe_ops;
struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d)
{
@@ -345,7 +345,7 @@ error:
}
-static struct dvb_frontend_ops vp702x_fe_ops = {
+static const struct dvb_frontend_ops vp702x_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Twinhan DST-like frontend (VP7021/VP7020) DVB-S",
diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c
index 7765602ea658..4520ad9c2014 100644
--- a/drivers/media/usb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/usb/dvb-usb/vp7045-fe.c
@@ -140,7 +140,7 @@ static void vp7045_fe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops vp7045_fe_ops;
+static const struct dvb_frontend_ops vp7045_fe_ops;
struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d)
{
@@ -158,7 +158,7 @@ error:
}
-static struct dvb_frontend_ops vp7045_fe_ops = {
+static const struct dvb_frontend_ops vp7045_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Twinhan VP7045/46 USB DVB-T",
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index d917b0a2beb1..aa131cf9989b 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_EM28XX_V4L2
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
---help---
This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index e11fe46a547c..7969ddb9e2dd 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
*
- * Copyright (C) 2007-2014 Mauro Carvalho Chehab
+ * Copyright (C) 2007-2016 Mauro Carvalho Chehab
* - Port to work with the in-kernel driver
* - Cleanups, fixes, alsa-controls, etc.
*
@@ -25,6 +25,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/init.h>
@@ -44,7 +46,6 @@
#include <sound/tlv.h>
#include <sound/ac97_codec.h>
#include <media/v4l2-common.h>
-#include "em28xx.h"
static int debug;
module_param(debug, int, 0644);
@@ -54,10 +55,10 @@ MODULE_PARM_DESC(debug, "activates debug info");
#define EM28XX_MIN_AUDIO_PACKETS 64
#define dprintk(fmt, arg...) do { \
- if (debug) \
- printk(KERN_INFO "em28xx-audio %s: " fmt, \
- __func__, ##arg); \
- } while (0)
+ if (debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "video: %s: " fmt, __func__, ## arg); \
+} while (0)
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -91,7 +92,8 @@ static void em28xx_audio_isocirq(struct urb *urb)
struct snd_pcm_runtime *runtime;
if (dev->disconnected) {
- dprintk("device disconnected while streaming. URB status=%d.\n", urb->status);
+ dprintk("device disconnected while streaming. URB status=%d.\n",
+ urb->status);
atomic_set(&dev->adev.stream_started, 0);
return;
}
@@ -164,8 +166,9 @@ static void em28xx_audio_isocirq(struct urb *urb)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0)
- em28xx_errdev("resubmit of audio urb failed (error=%i)\n",
- status);
+ dev_err(&dev->intf->dev,
+ "resubmit of audio urb failed (error=%i)\n",
+ status);
return;
}
@@ -182,8 +185,9 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode) {
- em28xx_errdev("submit of audio urb failed (error=%i)\n",
- errCode);
+ dev_err(&dev->intf->dev,
+ "submit of audio urb failed (error=%i)\n",
+ errCode);
em28xx_deinit_isoc_audio(dev);
atomic_set(&dev->adev.stream_started, 0);
return errCode;
@@ -197,6 +201,7 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
size_t size)
{
+ struct em28xx *dev = snd_pcm_substream_chip(subs);
struct snd_pcm_runtime *runtime = subs->runtime;
dprintk("Allocating vbuffer\n");
@@ -254,8 +259,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
int nonblock, ret = 0;
if (!dev) {
- em28xx_err("BUG: em28xx can't find device struct."
- " Can't proceed with open\n");
+ pr_err("em28xx-audio: BUG: em28xx can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
@@ -275,6 +279,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
if (dev->adev.users == 0) {
if (dev->alt == 0 || dev->is_audio_only) {
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+
if (dev->is_audio_only)
/* audio is on a separate interface */
dev->alt = 1;
@@ -292,7 +298,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
*/
dprintk("changing alternate number on interface %d to %d\n",
dev->ifnum, dev->alt);
- usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+ usb_set_interface(udev, dev->ifnum, dev->alt);
}
/* Sets volume, mute, etc */
@@ -318,7 +324,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
err:
mutex_unlock(&dev->lock);
- em28xx_err("Error while configuring em28xx mixer\n");
+ dev_err(&dev->intf->dev,
+ "Error while configuring em28xx mixer\n");
return ret;
}
@@ -709,6 +716,7 @@ static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
static void em28xx_audio_free_urb(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
for (i = 0; i < dev->adev.num_urb; i++) {
@@ -717,7 +725,7 @@ static void em28xx_audio_free_urb(struct em28xx *dev)
if (!urb)
continue;
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ usb_free_coherent(udev, urb->transfer_buffer_length,
dev->adev.transfer_buffer[i],
urb->transfer_dma);
@@ -744,6 +752,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
{
struct usb_interface *intf;
struct usb_endpoint_descriptor *e, *ep = NULL;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i, ep_size, interval, num_urb, npackets;
int urb_size, bytes_per_transfer;
u8 alt;
@@ -753,10 +762,10 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
else
alt = 7;
- intf = usb_ifnum_to_if(dev->udev, dev->ifnum);
+ intf = usb_ifnum_to_if(udev, dev->ifnum);
if (intf->num_altsetting <= alt) {
- em28xx_errdev("alt %d doesn't exist on interface %d\n",
+ dev_err(&dev->intf->dev, "alt %d doesn't exist on interface %d\n",
dev->ifnum, alt);
return -ENODEV;
}
@@ -772,18 +781,17 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
}
if (!ep) {
- em28xx_errdev("Couldn't find an audio endpoint");
+ dev_err(&dev->intf->dev, "Couldn't find an audio endpoint");
return -ENODEV;
}
- ep_size = em28xx_audio_ep_packet_size(dev->udev, ep);
+ ep_size = em28xx_audio_ep_packet_size(udev, ep);
interval = 1 << (ep->bInterval - 1);
- em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
- EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
- dev->ifnum, alt,
- interval,
- ep_size);
+ dev_info(&dev->intf->dev,
+ "Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
+ EM28XX_EP_AUDIO, usb_speed_string(udev->speed),
+ dev->ifnum, alt, interval, ep_size);
/* Calculate the number and size of URBs to better fit the audio samples */
@@ -820,8 +828,9 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
if (urb_size > ep_size * npackets)
npackets = DIV_ROUND_UP(urb_size, ep_size);
- em28xx_info("Number of URBs: %d, with %d packets and %d size\n",
- num_urb, npackets, urb_size);
+ dev_info(&dev->intf->dev,
+ "Number of URBs: %d, with %d packets and %d size\n",
+ num_urb, npackets, urb_size);
/* Estimate the bytes per period */
dev->adev.period = urb_size * npackets;
@@ -855,18 +864,19 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
}
dev->adev.urb[i] = urb;
- buf = usb_alloc_coherent(dev->udev, npackets * ep_size, GFP_ATOMIC,
+ buf = usb_alloc_coherent(udev, npackets * ep_size, GFP_ATOMIC,
&urb->transfer_dma);
if (!buf) {
- em28xx_errdev("usb_alloc_coherent failed!\n");
+ dev_err(&dev->intf->dev,
+ "usb_alloc_coherent failed!\n");
em28xx_audio_free_urb(dev);
return -ENOMEM;
}
dev->adev.transfer_buffer[i] = buf;
- urb->dev = dev->udev;
+ urb->dev = udev;
urb->context = dev;
- urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
+ urb->pipe = usb_rcvisocpipe(udev, EM28XX_EP_AUDIO);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_buffer = buf;
urb->interval = interval;
@@ -886,6 +896,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
static int em28xx_audio_init(struct em28xx *dev)
{
struct em28xx_audio *adev = &dev->adev;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
struct snd_pcm *pcm;
struct snd_card *card;
static int devnr;
@@ -898,23 +909,23 @@ static int em28xx_audio_init(struct em28xx *dev)
return 0;
}
- em28xx_info("Binding audio extension\n");
+ dev_info(&dev->intf->dev, "Binding audio extension\n");
kref_get(&dev->ref);
- printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
- "Rechberger\n");
- printk(KERN_INFO
- "em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n");
+ dev_info(&dev->intf->dev,
+ "em28xx-audio.c: Copyright (C) 2006 Markus Rechberger\n");
+ dev_info(&dev->intf->dev,
+ "em28xx-audio.c: Copyright (C) 2007-2016 Mauro Carvalho Chehab\n");
- err = snd_card_new(&dev->udev->dev, index[devnr], "Em28xx Audio",
+ err = snd_card_new(&dev->intf->dev, index[devnr], "Em28xx Audio",
THIS_MODULE, 0, &card);
if (err < 0)
return err;
spin_lock_init(&adev->slock);
adev->sndcard = card;
- adev->udev = dev->udev;
+ adev->udev = udev;
err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
if (err < 0)
@@ -955,7 +966,7 @@ static int em28xx_audio_init(struct em28xx *dev)
if (err < 0)
goto urb_free;
- em28xx_info("Audio extension successfully initialized\n");
+ dev_info(&dev->intf->dev, "Audio extension successfully initialized\n");
return 0;
urb_free:
@@ -980,7 +991,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
return 0;
}
- em28xx_info("Closing audio extension\n");
+ dev_info(&dev->intf->dev, "Closing audio extension\n");
if (dev->adev.sndcard) {
snd_card_disconnect(dev->adev.sndcard);
@@ -1004,7 +1015,7 @@ static int em28xx_audio_suspend(struct em28xx *dev)
if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
- em28xx_info("Suspending audio extension\n");
+ dev_info(&dev->intf->dev, "Suspending audio extension\n");
em28xx_deinit_isoc_audio(dev);
atomic_set(&dev->adev.stream_started, 0);
return 0;
@@ -1018,7 +1029,7 @@ static int em28xx_audio_resume(struct em28xx *dev)
if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
- em28xx_info("Resuming audio extension\n");
+ dev_info(&dev->intf->dev, "Resuming audio extension\n");
/* Nothing to do other than schedule_work() ?? */
schedule_work(&dev->adev.wq_trigger);
return 0;
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 72f3f4d50253..89c890ba7dd6 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -19,14 +19,15 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/i2c.h>
+#include <linux/usb.h>
#include <media/soc_camera.h>
#include <media/i2c/mt9v011.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
-#include "em28xx.h"
-
/* Possible i2c addresses of Micron sensors */
static unsigned short micron_sensor_addrs[] = {
0xb8 >> 1, /* MT9V111, MT9V403 */
@@ -120,14 +121,16 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
ret = i2c_master_send(&client, &reg, 1);
if (ret < 0) {
if (ret != -ENXIO)
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id = be16_to_cpu(id_be);
@@ -135,14 +138,16 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
reg = 0xff;
ret = i2c_master_send(&client, &reg, 1);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
/* Validate chip ID to be sure we have a Micron device */
@@ -180,15 +185,17 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
dev->em28xx_sensor = EM28XX_MT9M001;
break;
default:
- em28xx_info("unknown Micron sensor detected: 0x%04x\n",
- id);
+ dev_info(&dev->intf->dev,
+ "unknown Micron sensor detected: 0x%04x\n", id);
return 0;
}
if (dev->em28xx_sensor == EM28XX_NOSENSOR)
- em28xx_info("unsupported sensor detected: %s\n", name);
+ dev_info(&dev->intf->dev,
+ "unsupported sensor detected: %s\n", name);
else
- em28xx_info("sensor %s detected\n", name);
+ dev_info(&dev->intf->dev,
+ "sensor %s detected\n", name);
dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
return 0;
@@ -218,16 +225,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
if (ret != -ENXIO)
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id = ret << 8;
reg = 0x1d;
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id += ret;
@@ -238,16 +247,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
reg = 0x0a;
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id = ret << 8;
reg = 0x0b;
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id += ret;
@@ -285,15 +296,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
name = "OV9655";
break;
default:
- em28xx_info("unknown OmniVision sensor detected: 0x%04x\n",
- id);
+ dev_info(&dev->intf->dev,
+ "unknown OmniVision sensor detected: 0x%04x\n",
+ id);
return 0;
}
if (dev->em28xx_sensor == EM28XX_NOSENSOR)
- em28xx_info("unsupported sensor detected: %s\n", name);
+ dev_info(&dev->intf->dev,
+ "unsupported sensor detected: %s\n", name);
else
- em28xx_info("sensor %s detected\n", name);
+ dev_info(&dev->intf->dev,
+ "sensor %s detected\n", name);
dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
return 0;
@@ -317,7 +331,8 @@ int em28xx_detect_sensor(struct em28xx *dev)
*/
if (dev->em28xx_sensor == EM28XX_NOSENSOR && ret < 0) {
- em28xx_info("No sensor detected\n");
+ dev_info(&dev->intf->dev,
+ "No sensor detected\n");
return -ENODEV;
}
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index e397f544f108..23c67494762d 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -23,6 +23,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -39,7 +41,6 @@
#include <media/v4l2-common.h>
#include <sound/ac97_codec.h>
-#include "em28xx.h"
#define DRIVER_NAME "em28xx"
@@ -1560,8 +1561,7 @@ struct em28xx_board em28xx_boards[] = {
} },
},
[EM2820_BOARD_PINNACLE_DVC_90] = {
- .name = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker "
- "/ Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U",
+ .name = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker / Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U",
.tuner_type = TUNER_ABSENT, /* capture only board */
.decoder = EM28XX_SAA711X,
.input = { {
@@ -2677,7 +2677,7 @@ static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev,
msleep(50);
}
- em28xx_warn("AC97 registers access is not reliable !\n");
+ dev_warn(&dev->intf->dev, "AC97 registers access is not reliable !\n");
return -ETIMEDOUT;
}
@@ -2831,16 +2831,14 @@ static int em28xx_hint_board(struct em28xx *dev)
dev->model = em28xx_eeprom_hash[i].model;
dev->tuner_type = em28xx_eeprom_hash[i].tuner;
- em28xx_errdev("Your board has no unique USB ID.\n");
- em28xx_errdev("A hint were successfully done, "
- "based on eeprom hash.\n");
- em28xx_errdev("This method is not 100%% failproof.\n");
- em28xx_errdev("If the board were missdetected, "
- "please email this log to:\n");
- em28xx_errdev("\tV4L Mailing List "
- " <linux-media@vger.kernel.org>\n");
- em28xx_errdev("Board detected as %s\n",
- em28xx_boards[dev->model].name);
+ dev_err(&dev->intf->dev,
+ "Your board has no unique USB ID.\n"
+ "A hint were successfully done, based on eeprom hash.\n"
+ "This method is not 100%% failproof.\n"
+ "If the board were missdetected, please email this log to:\n"
+ "\tV4L Mailing List <linux-media@vger.kernel.org>\n"
+ "Board detected as %s\n",
+ em28xx_boards[dev->model].name);
return 0;
}
@@ -2863,35 +2861,33 @@ static int em28xx_hint_board(struct em28xx *dev)
if (dev->i2c_hash == em28xx_i2c_hash[i].hash) {
dev->model = em28xx_i2c_hash[i].model;
dev->tuner_type = em28xx_i2c_hash[i].tuner;
- em28xx_errdev("Your board has no unique USB ID.\n");
- em28xx_errdev("A hint were successfully done, "
- "based on i2c devicelist hash.\n");
- em28xx_errdev("This method is not 100%% failproof.\n");
- em28xx_errdev("If the board were missdetected, "
- "please email this log to:\n");
- em28xx_errdev("\tV4L Mailing List "
- " <linux-media@vger.kernel.org>\n");
- em28xx_errdev("Board detected as %s\n",
- em28xx_boards[dev->model].name);
+ dev_err(&dev->intf->dev,
+ "Your board has no unique USB ID.\n"
+ "A hint were successfully done, based on i2c devicelist hash.\n"
+ "This method is not 100%% failproof.\n"
+ "If the board were missdetected, please email this log to:\n"
+ "\tV4L Mailing List <linux-media@vger.kernel.org>\n"
+ "Board detected as %s\n",
+ em28xx_boards[dev->model].name);
return 0;
}
}
- em28xx_errdev("Your board has no unique USB ID and thus need a "
- "hint to be detected.\n");
- em28xx_errdev("You may try to use card=<n> insmod option to "
- "workaround that.\n");
- em28xx_errdev("Please send an email with this log to:\n");
- em28xx_errdev("\tV4L Mailing List <linux-media@vger.kernel.org>\n");
- em28xx_errdev("Board eeprom hash is 0x%08lx\n", dev->hash);
- em28xx_errdev("Board i2c devicelist hash is 0x%08lx\n", dev->i2c_hash);
-
- em28xx_errdev("Here is a list of valid choices for the card=<n>"
- " insmod option:\n");
+ dev_err(&dev->intf->dev,
+ "Your board has no unique USB ID and thus need a hint to be detected.\n"
+ "You may try to use card=<n> insmod option to workaround that.\n"
+ "Please send an email with this log to:\n"
+ "\tV4L Mailing List <linux-media@vger.kernel.org>\n"
+ "Board eeprom hash is 0x%08lx\n"
+ "Board i2c devicelist hash is 0x%08lx\n",
+ dev->hash, dev->i2c_hash);
+
+ dev_err(&dev->intf->dev,
+ "Here is a list of valid choices for the card=<n> insmod option:\n");
for (i = 0; i < em28xx_bcount; i++) {
- em28xx_errdev(" card=%d -> %s\n",
- i, em28xx_boards[i].name);
+ dev_err(&dev->intf->dev,
+ " card=%d -> %s\n", i, em28xx_boards[i].name);
}
return -1;
}
@@ -2925,7 +2921,7 @@ static void em28xx_card_setup(struct em28xx *dev)
* hash identities which has not been determined as yet.
*/
if (em28xx_hint_board(dev) < 0)
- em28xx_errdev("Board not discovered\n");
+ dev_err(&dev->intf->dev, "Board not discovered\n");
else {
em28xx_set_model(dev);
em28xx_pre_card_setup(dev);
@@ -2935,8 +2931,8 @@ static void em28xx_card_setup(struct em28xx *dev)
em28xx_set_model(dev);
}
- em28xx_info("Identified as %s (card=%d)\n",
- dev->board.name, dev->model);
+ dev_info(&dev->intf->dev, "Identified as %s (card=%d)\n",
+ dev->board.name, dev->model);
dev->tuner_type = em28xx_boards[dev->model].tuner_type;
@@ -3034,12 +3030,11 @@ static void em28xx_card_setup(struct em28xx *dev)
}
if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) {
- em28xx_errdev("\n\n");
- em28xx_errdev("The support for this board weren't "
- "valid yet.\n");
- em28xx_errdev("Please send a report of having this working\n");
- em28xx_errdev("not to V4L mailing list (and/or to other "
- "addresses)\n\n");
+ dev_err(&dev->intf->dev,
+ "\n\n"
+ "The support for this board weren't valid yet.\n"
+ "Please send a report of having this working\n"
+ "not to V4L mailing list (and/or to other addresses)\n\n");
}
/* Free eeprom data memory */
@@ -3166,7 +3161,7 @@ static int em28xx_media_device_init(struct em28xx *dev,
else if (udev->manufacturer)
media_device_usb_init(mdev, udev, udev->manufacturer);
else
- media_device_usb_init(mdev, udev, dev->name);
+ media_device_usb_init(mdev, udev, dev_name(&dev->intf->dev));
dev->media_dev = mdev;
#endif
@@ -3193,6 +3188,8 @@ static void em28xx_unregister_media_device(struct em28xx *dev)
*/
static void em28xx_release_resources(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+
/*FIXME: I2C IR should be disconnected */
mutex_lock(&dev->lock);
@@ -3203,7 +3200,7 @@ static void em28xx_release_resources(struct em28xx *dev)
em28xx_i2c_unregister(dev, 1);
em28xx_i2c_unregister(dev, 0);
- usb_put_dev(dev->udev);
+ usb_put_dev(udev);
/* Mark device as unused */
clear_bit(dev->devno, em28xx_devused);
@@ -3222,7 +3219,7 @@ void em28xx_free_device(struct kref *ref)
{
struct em28xx *dev = kref_to_dev(ref);
- em28xx_info("Freeing device\n");
+ dev_info(&dev->intf->dev, "Freeing device\n");
if (!dev->disconnected)
em28xx_release_resources(dev);
@@ -3241,10 +3238,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
int minor)
{
int retval;
- static const char *default_chip_name = "em28xx";
- const char *chip_name = default_chip_name;
+ const char *chip_name = NULL;
- dev->udev = udev;
+ dev->intf = interface;
mutex_init(&dev->ctrl_urb_lock);
spin_lock_init(&dev->slock);
@@ -3282,9 +3278,8 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
break;
case CHIP_ID_EM2820:
chip_name = "em2710/2820";
- if (le16_to_cpu(dev->udev->descriptor.idVendor)
- == 0xeb1a) {
- __le16 idProd = dev->udev->descriptor.idProduct;
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0xeb1a) {
+ __le16 idProd = udev->descriptor.idProduct;
if (le16_to_cpu(idProd) == 0x2710)
chip_name = "em2710";
@@ -3327,21 +3322,13 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
dev->wait_after_write = 0;
dev->eeprom_addrwidth_16bit = 1;
break;
- default:
- printk(KERN_INFO DRIVER_NAME
- ": unknown em28xx chip ID (%d)\n", dev->chip_id);
}
}
-
- if (chip_name != default_chip_name)
- printk(KERN_INFO DRIVER_NAME
- ": chip ID is %s\n", chip_name);
-
- /*
- * For em2820/em2710, the name may change latter, after checking
- * if the device has a sensor (so, it is em2710) or not.
- */
- snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+ if (!chip_name)
+ dev_info(&dev->intf->dev,
+ "unknown em28xx chip ID (%d)\n", dev->chip_id);
+ else
+ dev_info(&dev->intf->dev, "chip ID is %s\n", chip_name);
em28xx_media_device_init(dev, udev);
@@ -3360,9 +3347,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
/* Resets I2C speed */
retval = em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
if (retval < 0) {
- em28xx_errdev("%s: em28xx_write_reg failed!"
- " retval [%d]\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_write_reg failed! retval [%d]\n",
+ __func__, retval);
return retval;
}
}
@@ -3375,8 +3362,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
else
retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX);
if (retval < 0) {
- em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_i2c_register bus 0 - error [%d]!\n",
+ __func__, retval);
return retval;
}
@@ -3389,8 +3377,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
retval = em28xx_i2c_register(dev, 1,
EM28XX_I2C_ALGO_EM28XX);
if (retval < 0) {
- em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_i2c_register bus 1 - error [%d]!\n",
+ __func__, retval);
em28xx_i2c_unregister(dev, 0);
@@ -3429,7 +3418,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS);
if (nr >= EM28XX_MAXBOARDS) {
/* No free device slots */
- printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
+ dev_err(&interface->dev,
+ "Driver supports up to %i em28xx boards.\n",
EM28XX_MAXBOARDS);
retval = -ENOMEM;
goto err_no_slot;
@@ -3438,8 +3428,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Don't register audio interfaces */
if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
- em28xx_err(DRIVER_NAME " audio device (%04x:%04x): "
- "interface %i, class %i\n",
+ dev_err(&interface->dev,
+ "audio device (%04x:%04x): interface %i, class %i\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
ifnum,
@@ -3452,7 +3442,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
- em28xx_err(DRIVER_NAME ": out of memory!\n");
retval = -ENOMEM;
goto err;
}
@@ -3462,7 +3451,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
kmalloc(sizeof(dev->alt_max_pkt_size_isoc[0]) *
interface->num_altsetting, GFP_KERNEL);
if (dev->alt_max_pkt_size_isoc == NULL) {
- em28xx_errdev("out of memory!\n");
kfree(dev);
retval = -ENOMEM;
goto err;
@@ -3501,8 +3489,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (usb_endpoint_xfer_isoc(e)) {
has_vendor_audio = true;
} else {
- printk(KERN_INFO DRIVER_NAME
- ": error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
+ dev_err(&interface->dev,
+ "error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
}
break;
case 0x84:
@@ -3575,9 +3563,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
speed = "unknown";
}
- printk(KERN_INFO DRIVER_NAME
- ": New device %s %s @ %s Mbps "
- "(%04x:%04x, interface %d, class %d)\n",
+ dev_err(&interface->dev,
+ "New device %s %s @ %s Mbps (%04x:%04x, interface %d, class %d)\n",
udev->manufacturer ? udev->manufacturer : "",
udev->product ? udev->product : "",
speed,
@@ -3592,9 +3579,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
* not enough even for most Digital TV streams.
*/
if (udev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
- printk(DRIVER_NAME ": Device initialization failed.\n");
- printk(DRIVER_NAME ": Device must be connected to a high-speed"
- " USB 2.0 port.\n");
+ dev_err(&interface->dev, "Device initialization failed.\n");
+ dev_err(&interface->dev,
+ "Device must be connected to a high-speed USB 2.0 port.\n");
retval = -ENODEV;
goto err_free;
}
@@ -3607,8 +3594,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->ifnum = ifnum;
if (has_vendor_audio) {
- printk(KERN_INFO DRIVER_NAME ": Audio interface %i found %s\n",
- ifnum, "(Vendor Class)");
+ dev_err(&interface->dev,
+ "Audio interface %i found (Vendor Class)\n", ifnum);
dev->usb_audio_type = EM28XX_USB_AUDIO_VENDOR;
}
/* Checks if audio is provided by a USB Audio Class interface */
@@ -3617,25 +3604,24 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
if (has_vendor_audio)
- em28xx_err("em28xx: device seems to have vendor AND usb audio class interfaces !\n"
- "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n");
+ dev_err(&interface->dev,
+ "em28xx: device seems to have vendor AND usb audio class interfaces !\n"
+ "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n");
dev->usb_audio_type = EM28XX_USB_AUDIO_CLASS;
break;
}
}
if (has_video)
- printk(KERN_INFO DRIVER_NAME
- ": Video interface %i found:%s%s\n",
- ifnum,
- dev->analog_ep_bulk ? " bulk" : "",
- dev->analog_ep_isoc ? " isoc" : "");
+ dev_err(&interface->dev, "Video interface %i found:%s%s\n",
+ ifnum,
+ dev->analog_ep_bulk ? " bulk" : "",
+ dev->analog_ep_isoc ? " isoc" : "");
if (has_dvb)
- printk(KERN_INFO DRIVER_NAME
- ": DVB interface %i found:%s%s\n",
- ifnum,
- dev->dvb_ep_bulk ? " bulk" : "",
- dev->dvb_ep_isoc ? " isoc" : "");
+ dev_err(&interface->dev, "DVB interface %i found:%s%s\n",
+ ifnum,
+ dev->dvb_ep_bulk ? " bulk" : "",
+ dev->dvb_ep_isoc ? " isoc" : "");
dev->num_alt = interface->num_altsetting;
@@ -3664,8 +3650,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Disable V4L2 if the device doesn't have a decoder */
if (has_video &&
dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) {
- printk(DRIVER_NAME
- ": Currently, V4L2 is not supported on this model\n");
+ dev_err(&interface->dev,
+ "Currently, V4L2 is not supported on this model\n");
has_video = false;
dev->has_video = false;
}
@@ -3674,14 +3660,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (has_video) {
if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
dev->analog_xfer_bulk = 1;
- em28xx_info("analog set to %s mode.\n",
- dev->analog_xfer_bulk ? "bulk" : "isoc");
+ dev_err(&interface->dev, "analog set to %s mode.\n",
+ dev->analog_xfer_bulk ? "bulk" : "isoc");
}
if (has_dvb) {
if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk))
dev->dvb_xfer_bulk = 1;
- em28xx_info("dvb set to %s mode.\n",
- dev->dvb_xfer_bulk ? "bulk" : "isoc");
+ dev_err(&interface->dev, "dvb set to %s mode.\n",
+ dev->dvb_xfer_bulk ? "bulk" : "isoc");
}
kref_init(&dev->ref);
@@ -3728,7 +3714,7 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
dev->disconnected = 1;
- em28xx_info("Disconnecting %s\n", dev->name);
+ dev_err(&dev->intf->dev, "Disconnecting\n");
flush_request_modules(dev);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index eebd5d7088d0..19ccff41c7eb 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -22,6 +22,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/list.h>
@@ -32,8 +34,6 @@
#include <sound/ac97_codec.h>
#include <media/v4l2-common.h>
-#include "em28xx.h"
-
#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
"Markus Rechberger <mrechberger@gmail.com>, " \
"Mauro Carvalho Chehab <mchehab@infradead.org>, " \
@@ -48,27 +48,31 @@ MODULE_VERSION(EM28XX_VERSION);
static unsigned int core_debug;
module_param(core_debug, int, 0644);
-MODULE_PARM_DESC(core_debug, "enable debug messages [core]");
+MODULE_PARM_DESC(core_debug, "enable debug messages [core and isoc]");
-#define em28xx_coredbg(fmt, arg...) do {\
- if (core_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
+#define em28xx_coredbg(fmt, arg...) do { \
+ if (core_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "core: %s: " fmt, __func__, ## arg); \
+} while (0)
static unsigned int reg_debug;
module_param(reg_debug, int, 0644);
MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
-#define em28xx_regdbg(fmt, arg...) do {\
- if (reg_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
-/* FIXME */
-#define em28xx_isocdbg(fmt, arg...) do {\
- if (core_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
+#define em28xx_regdbg(fmt, arg...) do { \
+ if (reg_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "reg: %s: " fmt, __func__, ## arg); \
+} while (0)
+
+/* FIXME: don't abuse core_debug */
+#define em28xx_isocdbg(fmt, arg...) do { \
+ if (core_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "core: %s: " fmt, __func__, ## arg); \
+} while (0)
/*
* em28xx_read_reg_req()
@@ -78,7 +82,8 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
char *buf, int len)
{
int ret;
- int pipe = usb_rcvctrlpipe(dev->udev, 0);
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+ int pipe = usb_rcvctrlpipe(udev, 0);
if (dev->disconnected)
return -ENODEV;
@@ -86,23 +91,22 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
if (len > URB_MAX_CTRL_SIZE)
return -EINVAL;
- if (reg_debug) {
- printk(KERN_DEBUG "(pipe 0x%08x): "
- "IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
- pipe,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, 0,
- reg & 0xff, reg >> 8,
- len & 0xff, len >> 8);
- }
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
+ pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8);
mutex_lock(&dev->ctrl_urb_lock);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = usb_control_msg(udev, pipe, req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, reg, dev->urb_buf, len, HZ);
if (ret < 0) {
- if (reg_debug)
- printk(" failed!\n");
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed\n",
+ pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8);
mutex_unlock(&dev->ctrl_urb_lock);
return usb_translate_errors(ret);
}
@@ -112,14 +116,11 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
mutex_unlock(&dev->ctrl_urb_lock);
- if (reg_debug) {
- int byte;
-
- printk("<<<");
- for (byte = 0; byte < len; byte++)
- printk(" %02x", (unsigned char)buf[byte]);
- printk("\n");
- }
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed <<< %*ph\n",
+ pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8, len, buf);
return ret;
}
@@ -154,7 +155,8 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
int len)
{
int ret;
- int pipe = usb_sndctrlpipe(dev->udev, 0);
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+ int pipe = usb_sndctrlpipe(udev, 0);
if (dev->disconnected)
return -ENODEV;
@@ -162,25 +164,16 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
return -EINVAL;
- if (reg_debug) {
- int byte;
-
- printk(KERN_DEBUG "(pipe 0x%08x): "
- "OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
- pipe,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, 0,
- reg & 0xff, reg >> 8,
- len & 0xff, len >> 8);
-
- for (byte = 0; byte < len; byte++)
- printk(" %02x", (unsigned char)buf[byte]);
- printk("\n");
- }
+ em28xx_regdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>> %*ph\n",
+ pipe,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8, len, buf);
mutex_lock(&dev->ctrl_urb_lock);
memcpy(dev->urb_buf, buf, len);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = usb_control_msg(udev, pipe, req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, reg, dev->urb_buf, len, HZ);
mutex_unlock(&dev->ctrl_urb_lock);
@@ -267,7 +260,8 @@ static int em28xx_is_ac97_ready(struct em28xx *dev)
msleep(5);
}
- em28xx_warn("AC97 command still being executed: not handled properly!\n");
+ dev_warn(&dev->intf->dev,
+ "AC97 command still being executed: not handled properly!\n");
return -EBUSY;
}
@@ -360,8 +354,9 @@ static int set_ac97_input(struct em28xx *dev)
ret = em28xx_write_ac97(dev, inputs[i].reg, 0x8000);
if (ret < 0)
- em28xx_warn("couldn't setup AC97 register %d\n",
- inputs[i].reg);
+ dev_warn(&dev->intf->dev,
+ "couldn't setup AC97 register %d\n",
+ inputs[i].reg);
}
return 0;
}
@@ -444,8 +439,9 @@ int em28xx_audio_analog_set(struct em28xx *dev)
for (i = 0; i < ARRAY_SIZE(outputs); i++) {
ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000);
if (ret < 0)
- em28xx_warn("couldn't setup AC97 register %d\n",
- outputs[i].reg);
+ dev_warn(&dev->intf->dev,
+ "couldn't setup AC97 register %d\n",
+ outputs[i].reg);
}
}
@@ -482,8 +478,9 @@ int em28xx_audio_analog_set(struct em28xx *dev)
ret = em28xx_write_ac97(dev, outputs[i].reg,
vol);
if (ret < 0)
- em28xx_warn("couldn't setup AC97 register %d\n",
- outputs[i].reg);
+ dev_warn(&dev->intf->dev,
+ "couldn't setup AC97 register %d\n",
+ outputs[i].reg);
}
if (dev->ctl_aoutput & EM28XX_AOUT_PCM_IN) {
@@ -519,7 +516,7 @@ int em28xx_audio_setup(struct em28xx *dev)
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
- em28xx_info("Config register raw data: 0x%02x\n", cfg);
+ dev_info(&dev->intf->dev, "Config register raw data: 0x%02x\n", cfg);
if (cfg < 0) { /* Register read error */
/* Be conservative */
dev->int_audio_type = EM28XX_INT_AUDIO_AC97;
@@ -540,8 +537,8 @@ int em28xx_audio_setup(struct em28xx *dev)
i2s_samplerates = 5;
else
i2s_samplerates = 3;
- em28xx_info("I2S Audio (%d sample rate(s))\n",
- i2s_samplerates);
+ dev_info(&dev->intf->dev, "I2S Audio (%d sample rate(s))\n",
+ i2s_samplerates);
/* Skip the code that does AC97 vendor detection */
dev->audio_mode.ac97 = EM28XX_NO_AC97;
goto init_audio;
@@ -558,7 +555,8 @@ int em28xx_audio_setup(struct em28xx *dev)
* Note: (some) em2800 devices without eeprom reports 0x91 on
* CHIPCFG register, even not having an AC97 chip
*/
- em28xx_warn("AC97 chip type couldn't be determined\n");
+ dev_warn(&dev->intf->dev,
+ "AC97 chip type couldn't be determined\n");
dev->audio_mode.ac97 = EM28XX_NO_AC97;
if (dev->usb_audio_type == EM28XX_USB_AUDIO_VENDOR)
dev->usb_audio_type = EM28XX_USB_AUDIO_NONE;
@@ -571,13 +569,13 @@ int em28xx_audio_setup(struct em28xx *dev)
goto init_audio;
vid = vid1 << 16 | vid2;
- em28xx_warn("AC97 vendor ID = 0x%08x\n", vid);
+ dev_warn(&dev->intf->dev, "AC97 vendor ID = 0x%08x\n", vid);
feat = em28xx_read_ac97(dev, AC97_RESET);
if (feat < 0)
goto init_audio;
- em28xx_warn("AC97 features = 0x%04x\n", feat);
+ dev_warn(&dev->intf->dev, "AC97 features = 0x%04x\n", feat);
/* Try to identify what audio processor we have */
if (((vid == 0xffffffff) || (vid == 0x83847650)) && (feat == 0x6a90))
@@ -589,17 +587,20 @@ init_audio:
/* Reports detected AC97 processor */
switch (dev->audio_mode.ac97) {
case EM28XX_NO_AC97:
- em28xx_info("No AC97 audio processor\n");
+ dev_info(&dev->intf->dev, "No AC97 audio processor\n");
break;
case EM28XX_AC97_EM202:
- em28xx_info("Empia 202 AC97 audio processor detected\n");
+ dev_info(&dev->intf->dev,
+ "Empia 202 AC97 audio processor detected\n");
break;
case EM28XX_AC97_SIGMATEL:
- em28xx_info("Sigmatel audio processor detected (stac 97%02x)\n",
- vid & 0xff);
+ dev_info(&dev->intf->dev,
+ "Sigmatel audio processor detected (stac 97%02x)\n",
+ vid & 0xff);
break;
case EM28XX_AC97_OTHER:
- em28xx_warn("Unknown AC97 audio processor detected!\n");
+ dev_warn(&dev->intf->dev,
+ "Unknown AC97 audio processor detected!\n");
break;
default:
break;
@@ -798,6 +799,7 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
{
struct urb *urb;
struct em28xx_usb_bufs *usb_bufs;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
em28xx_isocdbg("em28xx: called em28xx_uninit_usb_xfer in mode %d\n",
@@ -817,7 +819,7 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
usb_unlink_urb(urb);
if (usb_bufs->transfer_buffer[i]) {
- usb_free_coherent(dev->udev,
+ usb_free_coherent(udev,
urb->transfer_buffer_length,
usb_bufs->transfer_buffer[i],
urb->transfer_dma);
@@ -871,9 +873,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
int num_bufs, int max_pkt_size, int packet_multiplier)
{
struct em28xx_usb_bufs *usb_bufs;
+ struct urb *urb;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
int sb_size, pipe;
- struct urb *urb;
int j, k;
em28xx_isocdbg("em28xx: called em28xx_alloc_isoc in mode %d\n", mode);
@@ -883,21 +886,23 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
if (mode == EM28XX_DIGITAL_MODE) {
if ((xfer_bulk && !dev->dvb_ep_bulk) ||
(!xfer_bulk && !dev->dvb_ep_isoc)) {
- em28xx_errdev("no endpoint for DVB mode and transfer type %d\n",
- xfer_bulk > 0);
+ dev_err(&dev->intf->dev,
+ "no endpoint for DVB mode and transfer type %d\n",
+ xfer_bulk > 0);
return -EINVAL;
}
usb_bufs = &dev->usb_ctl.digital_bufs;
} else if (mode == EM28XX_ANALOG_MODE) {
if ((xfer_bulk && !dev->analog_ep_bulk) ||
(!xfer_bulk && !dev->analog_ep_isoc)) {
- em28xx_errdev("no endpoint for analog mode and transfer type %d\n",
- xfer_bulk > 0);
+ dev_err(&dev->intf->dev,
+ "no endpoint for analog mode and transfer type %d\n",
+ xfer_bulk > 0);
return -EINVAL;
}
usb_bufs = &dev->usb_ctl.analog_bufs;
} else {
- em28xx_errdev("invalid mode selected\n");
+ dev_err(&dev->intf->dev, "invalid mode selected\n");
return -EINVAL;
}
@@ -907,15 +912,12 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
usb_bufs->num_bufs = num_bufs;
usb_bufs->urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!usb_bufs->urb) {
- em28xx_errdev("cannot alloc memory for usb buffers\n");
+ if (!usb_bufs->urb)
return -ENOMEM;
- }
usb_bufs->transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
if (!usb_bufs->transfer_buffer) {
- em28xx_errdev("cannot allocate memory for usb transfer\n");
kfree(usb_bufs->urb);
return -ENOMEM;
}
@@ -939,33 +941,33 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
}
usb_bufs->urb[i] = urb;
- usb_bufs->transfer_buffer[i] = usb_alloc_coherent(dev->udev,
+ usb_bufs->transfer_buffer[i] = usb_alloc_coherent(udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!usb_bufs->transfer_buffer[i]) {
- em28xx_err("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
- sb_size, i,
- in_interrupt() ? " while in int" : "");
+ dev_err(&dev->intf->dev,
+ "unable to allocate %i bytes for transfer buffer %i%s\n",
+ sb_size, i,
+ in_interrupt() ? " while in int" : "");
em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
memset(usb_bufs->transfer_buffer[i], 0, sb_size);
if (xfer_bulk) { /* bulk */
- pipe = usb_rcvbulkpipe(dev->udev,
+ pipe = usb_rcvbulkpipe(udev,
mode == EM28XX_ANALOG_MODE ?
dev->analog_ep_bulk :
dev->dvb_ep_bulk);
- usb_fill_bulk_urb(urb, dev->udev, pipe,
+ usb_fill_bulk_urb(urb, udev, pipe,
usb_bufs->transfer_buffer[i], sb_size,
em28xx_irq_callback, dev);
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
} else { /* isoc */
- pipe = usb_rcvisocpipe(dev->udev,
+ pipe = usb_rcvisocpipe(udev,
mode == EM28XX_ANALOG_MODE ?
dev->analog_ep_isoc :
dev->dvb_ep_isoc);
- usb_fill_int_urb(urb, dev->udev, pipe,
+ usb_fill_int_urb(urb, udev, pipe,
usb_bufs->transfer_buffer[i], sb_size,
em28xx_irq_callback, dev, 1);
urb->transfer_flags = URB_ISO_ASAP |
@@ -997,6 +999,7 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
struct em28xx_dmaqueue *dma_q = &dev->vidq;
struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
struct em28xx_usb_bufs *usb_bufs;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
int rc;
int alloc;
@@ -1023,10 +1026,11 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
}
if (xfer_bulk) {
- rc = usb_clear_halt(dev->udev, usb_bufs->urb[0]->pipe);
+ rc = usb_clear_halt(udev, usb_bufs->urb[0]->pipe);
if (rc < 0) {
- em28xx_err("failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
- rc);
+ dev_err(&dev->intf->dev,
+ "failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
+ rc);
em28xx_uninit_usb_xfer(dev, mode);
return rc;
}
@@ -1041,8 +1045,8 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
for (i = 0; i < usb_bufs->num_bufs; i++) {
rc = usb_submit_urb(usb_bufs->urb[i], GFP_ATOMIC);
if (rc) {
- em28xx_err("submit of urb %i failed (error=%i)\n", i,
- rc);
+ dev_err(&dev->intf->dev,
+ "submit of urb %i failed (error=%i)\n", i, rc);
em28xx_uninit_usb_xfer(dev, mode);
return rc;
}
@@ -1075,7 +1079,7 @@ int em28xx_register_extension(struct em28xx_ops *ops)
ops->init(dev);
}
mutex_unlock(&em28xx_devlist_mutex);
- printk(KERN_INFO "em28xx: Registered (%s) extension\n", ops->name);
+ pr_info("em28xx: Registered (%s) extension\n", ops->name);
return 0;
}
EXPORT_SYMBOL(em28xx_register_extension);
@@ -1090,7 +1094,7 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
}
list_del(&ops->next);
mutex_unlock(&em28xx_devlist_mutex);
- printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
+ pr_info("em28xx: Removed (%s) extension\n", ops->name);
}
EXPORT_SYMBOL(em28xx_unregister_extension);
@@ -1124,7 +1128,7 @@ int em28xx_suspend_extension(struct em28xx *dev)
{
const struct em28xx_ops *ops = NULL;
- em28xx_info("Suspending extensions\n");
+ dev_info(&dev->intf->dev, "Suspending extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->suspend)
@@ -1138,7 +1142,7 @@ int em28xx_resume_extension(struct em28xx *dev)
{
const struct em28xx_ops *ops = NULL;
- em28xx_info("Resuming extensions\n");
+ dev_info(&dev->intf->dev, "Resuming extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->resume)
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 8cedef0daae4..75a75dab2e8e 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -21,11 +21,12 @@
the Free Software Foundation; either version 2 of the License.
*/
+#include "em28xx.h"
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include "em28xx.h"
#include <media/v4l2-common.h>
#include <dvb_demux.h>
#include <dvb_net.h>
@@ -72,9 +73,10 @@ MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk(level, fmt, arg...) do { \
-if (debug >= level) \
- printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
+#define dprintk(level, fmt, arg...) do { \
+ if (debug >= level) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "dvb: " fmt, ## arg); \
} while (0)
struct em28xx_dvb {
@@ -196,6 +198,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
int rc;
struct em28xx_i2c_bus *i2c_bus = dvb->adapter.priv;
struct em28xx *dev = i2c_bus->dev;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int dvb_max_packet_size, packet_multiplier, dvb_alt;
if (dev->dvb_xfer_bulk) {
@@ -214,7 +217,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
dvb_alt = dev->dvb_alt_isoc;
}
- usb_set_interface(dev->udev, dev->ifnum, dvb_alt);
+ usb_set_interface(udev, dev->ifnum, dvb_alt);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
@@ -734,13 +737,13 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
ret = gpio_request_one(dvb->lna_gpio, flags, NULL);
if (ret)
- em28xx_errdev("gpio request failed %d\n", ret);
+ dev_err(&dev->intf->dev, "gpio request failed %d\n", ret);
else
gpio_free(dvb->lna_gpio);
return ret;
#else
- dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n",
+ dev_warn(&dev->intf->dev, "%s: LNA control is disabled (lna=%u)\n",
KBUILD_MODNAME, c->lna);
return 0;
#endif
@@ -934,20 +937,20 @@ static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
cfg.ctrl = &ctl;
if (!dev->dvb->fe[0]) {
- em28xx_errdev("/2: dvb frontend not attached. "
- "Can't attach xc3028\n");
+ dev_err(&dev->intf->dev,
+ "dvb frontend not attached. Can't attach xc3028\n");
return -EINVAL;
}
fe = dvb_attach(xc2028_attach, dev->dvb->fe[0], &cfg);
if (!fe) {
- em28xx_errdev("/2: xc3028 attach failed\n");
+ dev_err(&dev->intf->dev, "xc3028 attach failed\n");
dvb_frontend_detach(dev->dvb->fe[0]);
dev->dvb->fe[0] = NULL;
return -EINVAL;
}
- em28xx_info("%s/2: xc3028 attached\n", dev->name);
+ dev_info(&dev->intf->dev, "xc3028 attached\n");
return 0;
}
@@ -963,11 +966,13 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
mutex_init(&dvb->lock);
/* register adapter */
- result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
- adapter_nr);
+ result = dvb_register_adapter(&dvb->adapter,
+ dev_name(&dev->intf->dev), module,
+ device, adapter_nr);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_register_adapter failed (errno = %d)\n",
+ result);
goto fail_adapter;
}
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
@@ -984,8 +989,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
/* register frontend */
result = dvb_register_frontend(&dvb->adapter, dvb->fe[0]);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_register_frontend failed (errno = %d)\n",
+ result);
goto fail_frontend0;
}
@@ -993,8 +999,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
if (dvb->fe[1]) {
result = dvb_register_frontend(&dvb->adapter, dvb->fe[1]);
if (result < 0) {
- printk(KERN_WARNING "%s: 2nd dvb_register_frontend failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "2nd dvb_register_frontend failed (errno = %d)\n",
+ result);
goto fail_frontend1;
}
}
@@ -1011,8 +1018,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_dmx_init failed (errno = %d)\n",
+ result);
goto fail_dmx;
}
@@ -1021,31 +1029,35 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
dvb->dmxdev.capabilities = 0;
result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_dmxdev_init failed (errno = %d)\n",
+ result);
goto fail_dmxdev;
}
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
- printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ result);
goto fail_fe_mem;
}
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "connect_frontend failed (errno = %d)\n",
+ result);
goto fail_fe_conn;
}
@@ -1117,13 +1129,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
return 0;
}
- em28xx_info("Binding DVB extension\n");
+ dev_info(&dev->intf->dev, "Binding DVB extension\n");
dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
- if (dvb == NULL) {
- em28xx_info("em28xx_dvb: memory allocation failed\n");
+ if (!dvb)
return -ENOMEM;
- }
+
dev->dvb = dvb;
dvb->fe[0] = dvb->fe[1] = NULL;
@@ -1142,7 +1153,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
EM28XX_DVB_NUM_ISOC_PACKETS);
}
if (result) {
- em28xx_errdev("em28xx_dvb: failed to pre-allocate USB transfer buffers for DVB.\n");
+ dev_err(&dev->intf->dev,
+ "failed to pre-allocate USB transfer buffers for DVB.\n");
kfree(dvb);
dev->dvb = NULL;
return result;
@@ -1259,7 +1271,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
dvb->fe[0] = dvb_attach(drxd_attach, &em28xx_drxd, NULL,
- &dev->i2c_adap[dev->def_i2c_bus], &dev->udev->dev);
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &dev->intf->dev);
if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
@@ -1321,8 +1334,9 @@ static int em28xx_dvb_init(struct em28xx *dev)
result = gpio_request_one(dvb->lna_gpio,
GPIOF_OUT_INIT_LOW, NULL);
if (result)
- em28xx_errdev("gpio request failed %d\n",
- result);
+ dev_err(&dev->intf->dev,
+ "gpio request failed %d\n",
+ result);
else
gpio_free(dvb->lna_gpio);
@@ -1937,12 +1951,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
break;
default:
- em28xx_errdev("/2: The frontend of your DVB/ATSC card"
- " isn't supported yet\n");
+ dev_err(&dev->intf->dev,
+ "The frontend of your DVB/ATSC card isn't supported yet\n");
break;
}
if (NULL == dvb->fe[0]) {
- em28xx_errdev("/2: frontend initialization failed\n");
+ dev_err(&dev->intf->dev, "frontend initialization failed\n");
result = -EINVAL;
goto out_free;
}
@@ -1952,12 +1966,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
dvb->fe[1]->callback = em28xx_tuner_callback;
/* register everything */
- result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
+ result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->intf->dev);
if (result < 0)
goto out_free;
- em28xx_info("DVB extension successfully initialized\n");
+ dev_info(&dev->intf->dev, "DVB extension successfully initialized\n");
kref_get(&dev->ref);
@@ -1997,7 +2011,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
if (!dev->dvb)
return 0;
- em28xx_info("Closing DVB extension\n");
+ dev_info(&dev->intf->dev, "Closing DVB extension\n");
dvb = dev->dvb;
@@ -2055,17 +2069,17 @@ static int em28xx_dvb_suspend(struct em28xx *dev)
if (!dev->board.has_dvb)
return 0;
- em28xx_info("Suspending DVB extension\n");
+ dev_info(&dev->intf->dev, "Suspending DVB extension\n");
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
if (dvb->fe[0]) {
ret = dvb_frontend_suspend(dvb->fe[0]);
- em28xx_info("fe0 suspend %d\n", ret);
+ dev_info(&dev->intf->dev, "fe0 suspend %d\n", ret);
}
if (dvb->fe[1]) {
dvb_frontend_suspend(dvb->fe[1]);
- em28xx_info("fe1 suspend %d\n", ret);
+ dev_info(&dev->intf->dev, "fe1 suspend %d\n", ret);
}
}
@@ -2082,18 +2096,18 @@ static int em28xx_dvb_resume(struct em28xx *dev)
if (!dev->board.has_dvb)
return 0;
- em28xx_info("Resuming DVB extension\n");
+ dev_info(&dev->intf->dev, "Resuming DVB extension\n");
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
if (dvb->fe[0]) {
ret = dvb_frontend_resume(dvb->fe[0]);
- em28xx_info("fe0 resume %d\n", ret);
+ dev_info(&dev->intf->dev, "fe0 resume %d\n", ret);
}
if (dvb->fe[1]) {
ret = dvb_frontend_resume(dvb->fe[1]);
- em28xx_info("fe1 resume %d\n", ret);
+ dev_info(&dev->intf->dev, "fe1 resume %d\n", ret);
}
}
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 8b690ac908a4..8c472d5adb50 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -22,13 +22,14 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/jiffies.h>
-#include "em28xx.h"
#include "tuner-xc2028.h"
#include <media/v4l2-common.h>
#include <media/tuner.h>
@@ -43,6 +44,13 @@ static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "i2c debug message level (1: normal debug, 2: show I2C transfers)");
+#define dprintk(level, fmt, arg...) do { \
+ if (i2c_debug > level) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "i2c: %s: " fmt, __func__, ## arg); \
+} while (0)
+
+
/*
* em2800_i2c_send_bytes()
* send up to 4 bytes to the em2800 i2c device
@@ -70,7 +78,8 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
/* trigger write */
ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
if (ret != 2 + len) {
- em28xx_warn("failed to trigger write to i2c address 0x%x (error=%i)\n",
+ dev_warn(&dev->intf->dev,
+ "failed to trigger write to i2c address 0x%x (error=%i)\n",
addr, ret);
return (ret < 0) ? ret : -EIO;
}
@@ -80,20 +89,18 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
if (ret == 0x80 + len - 1)
return len;
if (ret == 0x94 + len - 1) {
- if (i2c_debug == 1)
- em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "R05 returned 0x%02x: I2C ACK error\n", ret);
return -ENXIO;
}
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
msleep(5);
}
- if (i2c_debug)
- em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+ dprintk(0, "write to i2c device at 0x%x timed out\n", addr);
return -ETIMEDOUT;
}
@@ -116,8 +123,9 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
buf2[0] = addr;
ret = dev->em28xx_write_regs(dev, 0x04, buf2, 2);
if (ret != 2) {
- em28xx_warn("failed to trigger read from i2c address 0x%x (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "failed to trigger read from i2c address 0x%x (error=%i)\n",
+ addr, ret);
return (ret < 0) ? ret : -EIO;
}
@@ -127,29 +135,28 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
if (ret == 0x84 + len - 1)
break;
if (ret == 0x94 + len - 1) {
- if (i2c_debug == 1)
- em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "R05 returned 0x%02x: I2C ACK error\n",
+ ret);
return -ENXIO;
}
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
msleep(5);
}
if (ret != 0x84 + len - 1) {
- if (i2c_debug)
- em28xx_warn("read from i2c device at 0x%x timed out\n",
- addr);
+ dprintk(0, "read from i2c device at 0x%x timed out\n", addr);
}
/* get the received message */
ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
if (ret != len) {
- em28xx_warn("reading from i2c device at 0x%x failed: couldn't get the received message from the bridge (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "reading from i2c device at 0x%x failed: couldn't get the received message from the bridge (error=%i)\n",
+ addr, ret);
return (ret < 0) ? ret : -EIO;
}
for (i = 0; i < len; i++)
@@ -193,12 +200,14 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
ret = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
if (ret != len) {
if (ret < 0) {
- em28xx_warn("writing to i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "writing to i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
} else {
- em28xx_warn("%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
- len, addr, ret);
+ dev_warn(&dev->intf->dev,
+ "%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
+ len, addr, ret);
return -EIO;
}
}
@@ -209,14 +218,14 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (ret == 0) /* success */
return len;
if (ret == 0x10) {
- if (i2c_debug == 1)
- em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
- addr);
+ dprintk(1, "I2C ACK error on writing to addr 0x%02x\n",
+ addr);
return -ENXIO;
}
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
msleep(5);
@@ -229,14 +238,15 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (ret == 0x02 || ret == 0x04) {
/* NOTE: these errors seem to be related to clock stretching */
- if (i2c_debug)
- em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
- addr, ret);
+ dprintk(0,
+ "write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
return -ETIMEDOUT;
}
- em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+ addr, ret);
return -EIO;
}
@@ -258,8 +268,9 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
/* Read data from i2c device */
ret = dev->em28xx_read_reg_req_len(dev, 2, addr, buf, len);
if (ret < 0) {
- em28xx_warn("reading from i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "reading from i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
}
/*
@@ -276,27 +287,28 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
if (ret == 0) /* success */
return len;
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
if (ret == 0x10) {
- if (i2c_debug == 1)
- em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
- addr);
+ dprintk(1, "I2C ACK error on writing to addr 0x%02x\n",
+ addr);
return -ENXIO;
}
if (ret == 0x02 || ret == 0x04) {
/* NOTE: these errors seem to be related to clock stretching */
- if (i2c_debug)
- em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
- addr, ret);
+ dprintk(0,
+ "write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
return -ETIMEDOUT;
}
- em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+ addr, ret);
return -EIO;
}
@@ -335,12 +347,14 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
ret = dev->em28xx_write_regs_req(dev, 0x06, addr, buf, len);
if (ret != len) {
if (ret < 0) {
- em28xx_warn("writing to i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "writing to i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
} else {
- em28xx_warn("%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
- len, addr, ret);
+ dev_warn(&dev->intf->dev,
+ "%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
+ len, addr, ret);
return -EIO;
}
}
@@ -353,9 +367,7 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (!ret)
return len;
else if (ret > 0) {
- if (i2c_debug == 1)
- em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "Bus B R08 returned 0x%02x: I2C ACK error\n", ret);
return -ENXIO;
}
@@ -386,8 +398,9 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
/* Read value */
ret = dev->em28xx_read_reg_req_len(dev, 0x06, addr, buf, len);
if (ret < 0) {
- em28xx_warn("reading from i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "reading from i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
}
/*
@@ -408,9 +421,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (!ret)
return len;
else if (ret > 0) {
- if (i2c_debug == 1)
- em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "Bus B R08 returned 0x%02x: I2C ACK error\n", ret);
return -ENXIO;
}
@@ -528,57 +539,46 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
}
for (i = 0; i < num; i++) {
addr = msgs[i].addr << 1;
- if (i2c_debug > 1)
- printk(KERN_DEBUG "%s at %s: %s %s addr=%02x len=%d:",
- dev->name, __func__ ,
- (msgs[i].flags & I2C_M_RD) ? "read" : "write",
- i == num - 1 ? "stop" : "nonstop",
- addr, msgs[i].len);
if (!msgs[i].len) {
/*
* no len: check only for device presence
* This code is only called during device probe.
*/
rc = i2c_check_for_device(i2c_bus, addr);
- if (rc < 0) {
- if (rc == -ENXIO) {
- if (i2c_debug > 1)
- printk(KERN_CONT " no device\n");
- rc = -ENODEV;
- } else {
- if (i2c_debug > 1)
- printk(KERN_CONT " ERROR: %i\n", rc);
- }
- rt_mutex_unlock(&dev->i2c_bus_lock);
- return rc;
- }
+
+ if (rc == -ENXIO)
+ rc = -ENODEV;
} else if (msgs[i].flags & I2C_M_RD) {
/* read bytes */
rc = i2c_recv_bytes(i2c_bus, msgs[i]);
-
- if (i2c_debug > 1 && rc >= 0)
- printk(KERN_CONT " %*ph",
- msgs[i].len, msgs[i].buf);
} else {
- if (i2c_debug > 1)
- printk(KERN_CONT " %*ph",
- msgs[i].len, msgs[i].buf);
-
/* write bytes */
rc = i2c_send_bytes(i2c_bus, msgs[i], i == num - 1);
}
- if (rc < 0) {
- if (i2c_debug > 1)
- printk(KERN_CONT " ERROR: %i\n", rc);
- rt_mutex_unlock(&dev->i2c_bus_lock);
- return rc;
- }
- if (i2c_debug > 1)
- printk(KERN_CONT "\n");
+
+ if (rc < 0)
+ goto error;
+
+ dprintk(2, "%s %s addr=%02x len=%d: %*ph\n",
+ (msgs[i].flags & I2C_M_RD) ? "read" : "write",
+ i == num - 1 ? "stop" : "nonstop",
+ addr, msgs[i].len,
+ msgs[i].len, msgs[i].buf);
}
rt_mutex_unlock(&dev->i2c_bus_lock);
return num;
+
+error:
+ dprintk(2, "%s %s addr=%02x len=%d: %sERROR: %i\n",
+ (msgs[i].flags & I2C_M_RD) ? "read" : "write",
+ i == num - 1 ? "stop" : "nonstop",
+ addr, msgs[i].len,
+ (rc == -ENODEV) ? "no device " : "",
+ rc);
+
+ rt_mutex_unlock(&dev->i2c_bus_lock);
+ return rc;
}
/*
@@ -672,7 +672,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
/* Check if board has eeprom */
err = i2c_master_recv(&dev->i2c_client[bus], &buf, 0);
if (err < 0) {
- em28xx_info("board has no eeprom\n");
+ dev_info(&dev->intf->dev, "board has no eeprom\n");
return -ENODEV;
}
@@ -685,17 +685,19 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
dev->eeprom_addrwidth_16bit,
len, data);
if (err != len) {
- em28xx_errdev("failed to read eeprom (err=%d)\n", err);
+ dev_err(&dev->intf->dev,
+ "failed to read eeprom (err=%d)\n", err);
goto error;
}
if (i2c_debug) {
/* Display eeprom content */
- print_hex_dump(KERN_INFO, "eeprom ", DUMP_PREFIX_OFFSET,
+ print_hex_dump(KERN_DEBUG, "em28xx eeprom ", DUMP_PREFIX_OFFSET,
16, 1, data, len, true);
if (dev->eeprom_addrwidth_16bit)
- em28xx_info("eeprom %06x: ... (skipped)\n", 256);
+ dev_info(&dev->intf->dev,
+ "eeprom %06x: ... (skipped)\n", 256);
}
if (dev->eeprom_addrwidth_16bit &&
@@ -707,11 +709,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
dev->hash = em28xx_hash_mem(data, len, 32);
mc_start = (data[1] << 8) + 4; /* usually 0x0004 */
- em28xx_info("EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
- data[0], data[1], data[2], data[3], dev->hash);
- em28xx_info("EEPROM info:\n");
- em28xx_info("\tmicrocode start address = 0x%04x, boot configuration = 0x%02x\n",
- mc_start, data[2]);
+ dev_info(&dev->intf->dev,
+ "EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
+ data[0], data[1], data[2], data[3], dev->hash);
+ dev_info(&dev->intf->dev,
+ "EEPROM info:\n");
+ dev_info(&dev->intf->dev,
+ "\tmicrocode start address = 0x%04x, boot configuration = 0x%02x\n",
+ mc_start, data[2]);
/*
* boot configuration (address 0x0002):
* [0] microcode download speed: 1 = 400 kHz; 0 = 100 kHz
@@ -729,8 +734,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
err = em28xx_i2c_read_block(dev, bus, mc_start + 46, 1, 2,
data);
if (err != 2) {
- em28xx_errdev("failed to read hardware configuration data from eeprom (err=%d)\n",
- err);
+ dev_err(&dev->intf->dev,
+ "failed to read hardware configuration data from eeprom (err=%d)\n",
+ err);
goto error;
}
@@ -747,8 +753,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
err = em28xx_i2c_read_block(dev, bus, hwconf_offset, 1, len,
data);
if (err != len) {
- em28xx_errdev("failed to read hardware configuration data from eeprom (err=%d)\n",
- err);
+ dev_err(&dev->intf->dev,
+ "failed to read hardware configuration data from eeprom (err=%d)\n",
+ err);
goto error;
}
@@ -756,7 +763,8 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
/* NOTE: not all devices provide this type of dataset */
if (data[0] != 0x1a || data[1] != 0xeb ||
data[2] != 0x67 || data[3] != 0x95) {
- em28xx_info("\tno hardware configuration dataset found in eeprom\n");
+ dev_info(&dev->intf->dev,
+ "\tno hardware configuration dataset found in eeprom\n");
kfree(data);
return 0;
}
@@ -767,11 +775,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
data[0] == 0x1a && data[1] == 0xeb &&
data[2] == 0x67 && data[3] == 0x95) {
dev->hash = em28xx_hash_mem(data, len, 32);
- em28xx_info("EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
- data[0], data[1], data[2], data[3], dev->hash);
- em28xx_info("EEPROM info:\n");
+ dev_info(&dev->intf->dev,
+ "EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
+ data[0], data[1], data[2], data[3], dev->hash);
+ dev_info(&dev->intf->dev,
+ "EEPROM info:\n");
} else {
- em28xx_info("unknown eeprom format or eeprom corrupted !\n");
+ dev_info(&dev->intf->dev,
+ "unknown eeprom format or eeprom corrupted !\n");
err = -ENODEV;
goto error;
}
@@ -782,50 +793,55 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
case 0:
- em28xx_info("\tNo audio on board.\n");
+ dev_info(&dev->intf->dev, "\tNo audio on board.\n");
break;
case 1:
- em28xx_info("\tAC97 audio (5 sample rates)\n");
+ dev_info(&dev->intf->dev, "\tAC97 audio (5 sample rates)\n");
break;
case 2:
if (dev->chip_id < CHIP_ID_EM2860)
- em28xx_info("\tI2S audio, sample rate=32k\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, sample rate=32k\n");
else
- em28xx_info("\tI2S audio, 3 sample rates\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, 3 sample rates\n");
break;
case 3:
if (dev->chip_id < CHIP_ID_EM2860)
- em28xx_info("\tI2S audio, 3 sample rates\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, 3 sample rates\n");
else
- em28xx_info("\tI2S audio, 5 sample rates\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, 5 sample rates\n");
break;
}
if (le16_to_cpu(dev_config->chip_conf) & 1 << 3)
- em28xx_info("\tUSB Remote wakeup capable\n");
+ dev_info(&dev->intf->dev, "\tUSB Remote wakeup capable\n");
if (le16_to_cpu(dev_config->chip_conf) & 1 << 2)
- em28xx_info("\tUSB Self power capable\n");
+ dev_info(&dev->intf->dev, "\tUSB Self power capable\n");
switch (le16_to_cpu(dev_config->chip_conf) & 0x3) {
case 0:
- em28xx_info("\t500mA max power\n");
+ dev_info(&dev->intf->dev, "\t500mA max power\n");
break;
case 1:
- em28xx_info("\t400mA max power\n");
+ dev_info(&dev->intf->dev, "\t400mA max power\n");
break;
case 2:
- em28xx_info("\t300mA max power\n");
+ dev_info(&dev->intf->dev, "\t300mA max power\n");
break;
case 3:
- em28xx_info("\t200mA max power\n");
+ dev_info(&dev->intf->dev, "\t200mA max power\n");
break;
}
- em28xx_info("\tTable at offset 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n",
- dev_config->string_idx_table,
- le16_to_cpu(dev_config->string1),
- le16_to_cpu(dev_config->string2),
- le16_to_cpu(dev_config->string3));
+ dev_info(&dev->intf->dev,
+ "\tTable at offset 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n",
+ dev_config->string_idx_table,
+ le16_to_cpu(dev_config->string1),
+ le16_to_cpu(dev_config->string2),
+ le16_to_cpu(dev_config->string3));
return 0;
@@ -914,8 +930,9 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned bus)
if (rc < 0)
continue;
i2c_devicelist[i] = i;
- em28xx_info("found i2c device @ 0x%x on bus %d [%s]\n",
- i << 1, bus, i2c_devs[i] ? i2c_devs[i] : "???");
+ dev_info(&dev->intf->dev,
+ "found i2c device @ 0x%x on bus %d [%s]\n",
+ i << 1, bus, i2c_devs[i] ? i2c_devs[i] : "???");
}
if (bus == dev->def_i2c_bus)
@@ -939,8 +956,8 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
return -ENODEV;
dev->i2c_adap[bus] = em28xx_adap_template;
- dev->i2c_adap[bus].dev.parent = &dev->udev->dev;
- strcpy(dev->i2c_adap[bus].name, dev->name);
+ dev->i2c_adap[bus].dev.parent = &dev->intf->dev;
+ strcpy(dev->i2c_adap[bus].name, dev_name(&dev->intf->dev));
dev->i2c_bus[bus].bus = bus;
dev->i2c_bus[bus].algo_type = algo_type;
@@ -949,8 +966,9 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
retval = i2c_add_adapter(&dev->i2c_adap[bus]);
if (retval < 0) {
- em28xx_errdev("%s: i2c_add_adapter failed! retval [%d]\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: i2c_add_adapter failed! retval [%d]\n",
+ __func__, retval);
return retval;
}
@@ -961,8 +979,9 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
if (!bus) {
retval = em28xx_i2c_eeprom(dev, bus, &dev->eedata, &dev->eedata_len);
if ((retval < 0) && (retval != -ENODEV)) {
- em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_i2_eeprom failed! retval [%d]\n",
+ __func__, retval);
return retval;
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 4007356d991d..782ce095c8c5 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -21,6 +21,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "em28xx.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -29,8 +31,6 @@
#include <linux/slab.h>
#include <linux/bitrev.h>
-#include "em28xx.h"
-
#define EM28XX_SNAPSHOT_KEY KEY_CAMERA
#define EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL 500 /* [ms] */
#define EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL 100 /* [ms] */
@@ -41,10 +41,11 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
#define MODULE_NAME "em28xx"
-#define dprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
+#define dprintk( fmt, arg...) do { \
+ if (ir_debug) \
+ dev_printk(KERN_DEBUG, &ir->dev->intf->dev, \
+ "input: %s: " fmt, __func__, ## arg); \
+} while (0)
/**********************************************************
Polling structure used by em28xx IR's
@@ -458,8 +459,9 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
case CHIP_ID_EM28178:
return em2874_ir_change_protocol(rc_dev, rc_type);
default:
- printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
- dev->chip_id);
+ dev_err(&ir->dev->intf->dev,
+ "Unrecognized em28xx chip id 0x%02x: IR not supported\n",
+ dev->chip_id);
return -EINVAL;
}
}
@@ -564,15 +566,16 @@ static void em28xx_query_buttons(struct work_struct *work)
static int em28xx_register_snapshot_button(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
struct input_dev *input_dev;
int err;
- em28xx_info("Registering snapshot button...\n");
+ dev_info(&dev->intf->dev, "Registering snapshot button...\n");
input_dev = input_allocate_device();
if (!input_dev)
return -ENOMEM;
- usb_make_path(dev->udev, dev->snapshot_button_path,
+ usb_make_path(udev, dev->snapshot_button_path,
sizeof(dev->snapshot_button_path));
strlcat(dev->snapshot_button_path, "/sbutton",
sizeof(dev->snapshot_button_path));
@@ -584,14 +587,14 @@ static int em28xx_register_snapshot_button(struct em28xx *dev)
input_dev->keycodesize = 0;
input_dev->keycodemax = 0;
input_dev->id.bustype = BUS_USB;
- input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
+ input_dev->id.vendor = le16_to_cpu(udev->descriptor.idVendor);
+ input_dev->id.product = le16_to_cpu(udev->descriptor.idProduct);
input_dev->id.version = 1;
- input_dev->dev.parent = &dev->udev->dev;
+ input_dev->dev.parent = &dev->intf->dev;
err = input_register_device(input_dev);
if (err) {
- em28xx_errdev("input_register_device failed\n");
+ dev_err(&dev->intf->dev, "input_register_device failed\n");
input_free_device(input_dev);
return err;
}
@@ -631,7 +634,8 @@ static void em28xx_init_buttons(struct em28xx *dev)
} else if (button->role == EM28XX_BUTTON_ILLUMINATION) {
/* Check sanity */
if (!em28xx_find_led(dev, EM28XX_LED_ILLUMINATION)) {
- em28xx_errdev("BUG: illumination button defined, but no illumination LED.\n");
+ dev_err(&dev->intf->dev,
+ "BUG: illumination button defined, but no illumination LED.\n");
goto next_button;
}
}
@@ -667,7 +671,7 @@ static void em28xx_shutdown_buttons(struct em28xx *dev)
dev->num_button_polling_addresses = 0;
/* Deregister input devices */
if (dev->sbutton_input_dev != NULL) {
- em28xx_info("Deregistering snapshot button\n");
+ dev_info(&dev->intf->dev, "Deregistering snapshot button\n");
input_unregister_device(dev->sbutton_input_dev);
dev->sbutton_input_dev = NULL;
}
@@ -675,6 +679,7 @@ static void em28xx_shutdown_buttons(struct em28xx *dev)
static int em28xx_ir_init(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
struct em28xx_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
@@ -696,19 +701,20 @@ static int em28xx_ir_init(struct em28xx *dev)
i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
if (!i2c_rc_dev_addr) {
dev->board.has_ir_i2c = 0;
- em28xx_warn("No i2c IR remote control device found.\n");
+ dev_warn(&dev->intf->dev,
+ "No i2c IR remote control device found.\n");
return -ENODEV;
}
}
if (dev->board.ir_codes == NULL && !dev->board.has_ir_i2c) {
/* No remote control support */
- em28xx_warn("Remote control support is not available for "
- "this card.\n");
+ dev_warn(&dev->intf->dev,
+ "Remote control support is not available for this card.\n");
return 0;
}
- em28xx_info("Registering input extension\n");
+ dev_info(&dev->intf->dev, "Registering input extension\n");
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
if (!ir)
@@ -792,18 +798,19 @@ static int em28xx_ir_init(struct em28xx *dev)
ir->polling = 100; /* ms */
/* init input device */
- snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)", dev->name);
+ snprintf(ir->name, sizeof(ir->name), "%s IR",
+ dev_name(&dev->intf->dev));
- usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
+ usb_make_path(udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
rc->input_name = ir->name;
rc->input_phys = ir->phys;
rc->input_id.bustype = BUS_USB;
rc->input_id.version = 1;
- rc->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- rc->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
- rc->dev.parent = &dev->udev->dev;
+ rc->input_id.vendor = le16_to_cpu(udev->descriptor.idVendor);
+ rc->input_id.product = le16_to_cpu(udev->descriptor.idProduct);
+ rc->dev.parent = &dev->intf->dev;
rc->driver_name = MODULE_NAME;
/* all done */
@@ -811,7 +818,7 @@ static int em28xx_ir_init(struct em28xx *dev)
if (err)
goto error;
- em28xx_info("Input extension successfully initalized\n");
+ dev_info(&dev->intf->dev, "Input extension successfully initalized\n");
return 0;
@@ -832,7 +839,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
return 0;
}
- em28xx_info("Closing input extension\n");
+ dev_info(&dev->intf->dev, "Closing input extension\n");
em28xx_shutdown_buttons(dev);
@@ -861,7 +868,7 @@ static int em28xx_ir_suspend(struct em28xx *dev)
if (dev->is_audio_only)
return 0;
- em28xx_info("Suspending input extension\n");
+ dev_info(&dev->intf->dev, "Suspending input extension\n");
if (ir)
cancel_delayed_work_sync(&ir->work);
cancel_delayed_work_sync(&dev->buttons_query_work);
@@ -878,7 +885,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
if (dev->is_audio_only)
return 0;
- em28xx_info("Resuming input extension\n");
+ dev_info(&dev->intf->dev, "Resuming input extension\n");
/* if suspend calls ir_raw_event_unregister(), the should call
ir_raw_event_register() */
if (ir)
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 836c6b53b16c..0bac552bbe87 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -21,12 +21,14 @@
02110-1301, USA.
*/
+#include "em28xx.h"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/init.h>
+#include <linux/usb.h>
-#include "em28xx.h"
#include "em28xx-v4l.h"
/* ------------------------------------------------------------------ */
@@ -63,8 +65,9 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
size = v4l2->vbi_width * v4l2->vbi_height * 2;
if (vb2_plane_size(vb, 0) < size) {
- printk(KERN_INFO "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0), size);
+ dev_info(&dev->intf->dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, size);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 1f7fa059eb34..8d93100334ea 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -26,6 +26,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -37,7 +39,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
-#include "em28xx.h"
#include "em28xx-v4l.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -63,18 +64,17 @@ static int alt;
module_param(alt, int, 0644);
MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
-#define em28xx_videodbg(fmt, arg...) do {\
- if (video_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
+#define em28xx_videodbg(fmt, arg...) do { \
+ if (video_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "video: %s: " fmt, __func__, ## arg); \
+} while (0)
-#define em28xx_isocdbg(fmt, arg...) \
-do {\
- if (isoc_debug) { \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); \
- } \
- } while (0)
+#define em28xx_isocdbg(fmt, arg...) do {\
+ if (isoc_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "isoc: %s: " fmt, __func__, ## arg); \
+} while (0)
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
@@ -360,6 +360,7 @@ static int em28xx_resolution_set(struct em28xx *dev)
static int em28xx_set_alternate(struct em28xx *dev)
{
struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int errCode;
int i;
unsigned int min_pkt_size = v4l2->width * 2 + 4;
@@ -411,10 +412,11 @@ set_alt:
}
em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
dev->alt, dev->max_pkt_size);
- errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+ errCode = usb_set_interface(udev, dev->ifnum, dev->alt);
if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
- dev->alt, errCode);
+ dev_err(&dev->intf->dev,
+ "cannot change alternate number to %d (error=%i)\n",
+ dev->alt, errCode);
return errCode;
}
return 0;
@@ -505,8 +507,7 @@ static void em28xx_copy_video(struct em28xx *dev,
if ((char *)startwrite + lencopy > (char *)buf->vb_buf +
buf->length) {
- em28xx_isocdbg("Overflow of %zu bytes past buffer end"
- "(2)\n",
+ em28xx_isocdbg("Overflow of %zu bytes past buffer end(2)\n",
((char *)startwrite + lencopy) -
((char *)buf->vb_buf + buf->length));
lencopy = remain = (char *)buf->vb_buf + buf->length -
@@ -926,10 +927,11 @@ static int em28xx_enable_analog_tuner(struct em28xx *dev)
ret = media_entity_setup_link(link, flags);
if (ret) {
- pr_err("Couldn't change link %s->%s to %s. Error %d\n",
- source->name, sink->name,
- flags ? "enabled" : "disabled",
- ret);
+ dev_err(&dev->intf->dev,
+ "Couldn't change link %s->%s to %s. Error %d\n",
+ source->name, sink->name,
+ flags ? "enabled" : "disabled",
+ ret);
return ret;
} else
em28xx_videodbg("link %s->%s was %s\n",
@@ -957,14 +959,16 @@ static void em28xx_v4l2_create_entities(struct em28xx *dev)
v4l2->video_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&v4l2->vdev.entity, 1, &v4l2->video_pad);
if (ret < 0)
- pr_err("failed to initialize video media entity!\n");
+ dev_err(&dev->intf->dev,
+ "failed to initialize video media entity!\n");
if (em28xx_vbi_supported(dev)) {
v4l2->vbi_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&v4l2->vbi_dev.entity, 1,
&v4l2->vbi_pad);
if (ret < 0)
- pr_err("failed to initialize vbi media entity!\n");
+ dev_err(&dev->intf->dev,
+ "failed to initialize vbi media entity!\n");
}
/* Webcams don't have input connectors */
@@ -997,11 +1001,13 @@ static void em28xx_v4l2_create_entities(struct em28xx *dev)
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
if (ret < 0)
- pr_err("failed to initialize input pad[%d]!\n", i);
+ dev_err(&dev->intf->dev,
+ "failed to initialize input pad[%d]!\n", i);
ret = media_device_register_entity(dev->media_dev, ent);
if (ret < 0)
- pr_err("failed to register input entity %d!\n", i);
+ dev_err(&dev->intf->dev,
+ "failed to register input entity %d!\n", i);
}
#endif
}
@@ -1854,10 +1860,11 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
- usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
+ usb_make_path(udev, cap->bus_info, sizeof(cap->bus_info));
if (vdev->vfl_type == VFL_TYPE_GRABBER)
cap->device_caps = V4L2_CAP_READWRITE |
@@ -2048,8 +2055,9 @@ static int em28xx_v4l2_open(struct file *filp)
ret = v4l2_fh_open(filp);
if (ret) {
- em28xx_errdev("%s: v4l2_fh_open() returned error %d\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: v4l2_fh_open() returned error %d\n",
+ __func__, ret);
mutex_unlock(&dev->lock);
return ret;
}
@@ -2103,7 +2111,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
if (v4l2 == NULL)
return 0;
- em28xx_info("Closing video extension\n");
+ dev_info(&dev->intf->dev, "Closing video extension\n");
mutex_lock(&dev->lock);
@@ -2114,18 +2122,18 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
em28xx_v4l2_media_release(dev);
if (video_is_registered(&v4l2->radio_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->radio_dev));
+ dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->radio_dev));
video_unregister_device(&v4l2->radio_dev);
}
if (video_is_registered(&v4l2->vbi_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vbi_dev));
+ dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vbi_dev));
video_unregister_device(&v4l2->vbi_dev);
}
if (video_is_registered(&v4l2->vdev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vdev));
+ dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vdev));
video_unregister_device(&v4l2->vdev);
}
@@ -2154,7 +2162,7 @@ static int em28xx_v4l2_suspend(struct em28xx *dev)
if (!dev->has_video)
return 0;
- em28xx_info("Suspending video extension\n");
+ dev_info(&dev->intf->dev, "Suspending video extension\n");
em28xx_stop_urbs(dev);
return 0;
}
@@ -2167,7 +2175,7 @@ static int em28xx_v4l2_resume(struct em28xx *dev)
if (!dev->has_video)
return 0;
- em28xx_info("Resuming video extension\n");
+ dev_info(&dev->intf->dev, "Resuming video extension\n");
/* what do we do here */
return 0;
}
@@ -2181,6 +2189,7 @@ static int em28xx_v4l2_close(struct file *filp)
{
struct em28xx *dev = video_drvdata(filp);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int errCode;
em28xx_videodbg("users=%d\n", v4l2->users);
@@ -2202,10 +2211,11 @@ static int em28xx_v4l2_close(struct file *filp)
/* set alternate 0 */
dev->alt = 0;
em28xx_videodbg("setting alternate 0\n");
- errCode = usb_set_interface(dev->udev, 0, 0);
+ errCode = usb_set_interface(udev, 0, 0);
if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to "
- "0 (error=%i)\n", errCode);
+ dev_err(&dev->intf->dev,
+ "cannot change alternate number to 0 (error=%i)\n",
+ errCode);
}
}
@@ -2338,7 +2348,7 @@ static void em28xx_vdev_init(struct em28xx *dev,
vfd->tvnorms = 0;
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
- dev->name, type_name);
+ dev_name(&dev->intf->dev), type_name);
video_set_drvdata(vfd, dev);
}
@@ -2422,13 +2432,12 @@ static int em28xx_v4l2_init(struct em28xx *dev)
return 0;
}
- em28xx_info("Registering V4L2 extension\n");
+ dev_info(&dev->intf->dev, "Registering V4L2 extension\n");
mutex_lock(&dev->lock);
v4l2 = kzalloc(sizeof(struct em28xx_v4l2), GFP_KERNEL);
- if (v4l2 == NULL) {
- em28xx_info("em28xx_v4l: memory allocation failed\n");
+ if (!v4l2) {
mutex_unlock(&dev->lock);
return -ENOMEM;
}
@@ -2439,9 +2448,10 @@ static int em28xx_v4l2_init(struct em28xx *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
v4l2->v4l2_dev.mdev = dev->media_dev;
#endif
- ret = v4l2_device_register(&dev->udev->dev, &v4l2->v4l2_dev);
+ ret = v4l2_device_register(&dev->intf->dev, &v4l2->v4l2_dev);
if (ret < 0) {
- em28xx_errdev("Call to v4l2_device_register() failed!\n");
+ dev_err(&dev->intf->dev,
+ "Call to v4l2_device_register() failed!\n");
goto err;
}
@@ -2525,8 +2535,9 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* Configure audio */
ret = em28xx_audio_setup(dev);
if (ret < 0) {
- em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: Error while setting audio - error [%d]!\n",
+ __func__, ret);
goto unregister_dev;
}
if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
@@ -2553,16 +2564,18 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* Send a reset to other chips via gpio */
ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
if (ret < 0) {
- em28xx_errdev("%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
+ __func__, ret);
goto unregister_dev;
}
msleep(3);
ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
if (ret < 0) {
- em28xx_errdev("%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
+ __func__, ret);
goto unregister_dev;
}
msleep(3);
@@ -2663,8 +2676,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
ret = video_register_device(&v4l2->vdev, VFL_TYPE_GRABBER,
video_nr[dev->devno]);
if (ret) {
- em28xx_errdev("unable to register video device (error=%i).\n",
- ret);
+ dev_err(&dev->intf->dev,
+ "unable to register video device (error=%i).\n", ret);
goto unregister_dev;
}
@@ -2693,7 +2706,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
ret = video_register_device(&v4l2->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
if (ret < 0) {
- em28xx_errdev("unable to register vbi device\n");
+ dev_err(&dev->intf->dev,
+ "unable to register vbi device\n");
goto unregister_dev;
}
}
@@ -2704,11 +2718,13 @@ static int em28xx_v4l2_init(struct em28xx *dev)
ret = video_register_device(&v4l2->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
- em28xx_errdev("can't register radio device\n");
+ dev_err(&dev->intf->dev,
+ "can't register radio device\n");
goto unregister_dev;
}
- em28xx_info("Registered radio device as %s\n",
- video_device_node_name(&v4l2->radio_dev));
+ dev_info(&dev->intf->dev,
+ "Registered radio device as %s\n",
+ video_device_node_name(&v4l2->radio_dev));
}
/* Init entities at the Media Controller */
@@ -2717,18 +2733,21 @@ static int em28xx_v4l2_init(struct em28xx *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_mc_create_media_graph(dev->media_dev);
if (ret) {
- em28xx_errdev("failed to create media graph\n");
+ dev_err(&dev->intf->dev,
+ "failed to create media graph\n");
em28xx_v4l2_media_release(dev);
goto unregister_dev;
}
#endif
- em28xx_info("V4L2 video device registered as %s\n",
- video_device_node_name(&v4l2->vdev));
+ dev_info(&dev->intf->dev,
+ "V4L2 video device registered as %s\n",
+ video_device_node_name(&v4l2->vdev));
if (video_is_registered(&v4l2->vbi_dev))
- em28xx_info("V4L2 VBI device registered as %s\n",
- video_device_node_name(&v4l2->vbi_dev));
+ dev_info(&dev->intf->dev,
+ "V4L2 VBI device registered as %s\n",
+ video_device_node_name(&v4l2->vbi_dev));
/* Save some power by putting tuner to sleep */
v4l2_device_call_all(&v4l2->v4l2_dev, 0, core, s_power, 0);
@@ -2736,7 +2755,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* initialize videobuf2 stuff */
em28xx_vb2_setup(dev);
- em28xx_info("V4L2 extension successfully initialized\n");
+ dev_info(&dev->intf->dev,
+ "V4L2 extension successfully initialized\n");
kref_get(&dev->ref);
@@ -2745,18 +2765,21 @@ static int em28xx_v4l2_init(struct em28xx *dev)
unregister_dev:
if (video_is_registered(&v4l2->radio_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->radio_dev));
+ dev_info(&dev->intf->dev,
+ "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->radio_dev));
video_unregister_device(&v4l2->radio_dev);
}
if (video_is_registered(&v4l2->vbi_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vbi_dev));
+ dev_info(&dev->intf->dev,
+ "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vbi_dev));
video_unregister_device(&v4l2->vbi_dev);
}
if (video_is_registered(&v4l2->vdev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vdev));
+ dev_info(&dev->intf->dev,
+ "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vdev));
video_unregister_device(&v4l2->vdev);
}
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index d148463b22c1..ca59e2d4fccf 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -610,7 +610,6 @@ struct em28xx {
struct em28xx_IR *ir;
/* generic device properties */
- char name[30]; /* name (including minor) of the device */
int model; /* index in the device_data struct */
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
@@ -678,7 +677,7 @@ struct em28xx {
spinlock_t slock;
/* usb transfer */
- struct usb_device *udev; /* the usb device */
+ struct usb_interface *intf; /* the usb interface */
u8 ifnum; /* number of the assigned usb interface */
u8 analog_ep_isoc; /* address of isoc endpoint for analog */
u8 analog_ep_bulk; /* address of bulk endpoint for analog */
@@ -797,20 +796,4 @@ void em28xx_free_device(struct kref *ref);
int em28xx_detect_sensor(struct em28xx *dev);
int em28xx_init_camera(struct em28xx *dev);
-/* printk macros */
-
-#define em28xx_err(fmt, arg...) do {\
- printk(KERN_ERR fmt , ##arg); } while (0)
-
-#define em28xx_errdev(fmt, arg...) do {\
- printk(KERN_ERR "%s: "fmt,\
- dev->name , ##arg); } while (0)
-
-#define em28xx_info(fmt, arg...) do {\
- printk(KERN_INFO "%s: "fmt,\
- dev->name , ##arg); } while (0)
-#define em28xx_warn(fmt, arg...) do {\
- printk(KERN_WARNING "%s: "fmt,\
- dev->name , ##arg); } while (0)
-
#endif
diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
index 95a3af644a92..af1d02430931 100644
--- a/drivers/media/usb/go7007/Kconfig
+++ b/drivers/media/usb/go7007/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_GO7007
select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
---help---
This is a video4linux driver for the WIS GO7007 MPEG
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index af2395a76d8b..fa2cbb981905 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -201,8 +201,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
buffer_len = le16_to_cpu(ep->wMaxPacketSize);
interval = ep->bInterval;
- PDEBUG(D_CONF, "found int in endpoint: 0x%x, "
- "buffer_len=%u, interval=%u",
+ PDEBUG(D_CONF, "found int in endpoint: 0x%x, buffer_len=%u, interval=%u",
ep->bEndpointAddress, buffer_len, interval);
dev = gspca_dev->dev;
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index ac295f04bd18..b12ecb72df4c 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -299,10 +299,7 @@ static int jl2005c_stream_start_cif_small(struct gspca_dev *gspca_dev)
static int jl2005c_stop(struct gspca_dev *gspca_dev)
{
- int retval;
-
- retval = jl2005c_write_reg(gspca_dev, 0x07, 0x00);
- return retval;
+ return jl2005c_write_reg(gspca_dev, 0x07, 0x00);
}
/*
diff --git a/drivers/media/usb/gspca/m5602/m5602_core.c b/drivers/media/usb/gspca/m5602/m5602_core.c
index e4a0658e3f83..f1dcd9021983 100644
--- a/drivers/media/usb/gspca/m5602/m5602_core.c
+++ b/drivers/media/usb/gspca/m5602/m5602_core.c
@@ -154,8 +154,8 @@ int m5602_read_sensor(struct sd *sd, const u8 address,
err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
- PDEBUG(D_CONF, "Reading sensor register "
- "0x%x containing 0x%x ", address, *i2c_data);
+ PDEBUG(D_CONF, "Reading sensor register 0x%x containing 0x%x ",
+ address, *i2c_data);
}
return err;
}
@@ -441,13 +441,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(force_sensor, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(force_sensor,
- "forces detection of a sensor, "
- "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, "
- "4 = MT9M111, 5 = PO1030, 6 = OV7660");
+ "forces detection of a sensor, 1 = OV9650, 2 = S5K83A, 3 = S5K4AA, 4 = MT9M111, 5 = PO1030, 6 = OV7660");
module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
module_param(dump_sensor, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers "
- "at startup providing a sensor is found");
+MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers at startup providing a sensor is found");
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index f006e29ca019..6dfb364094ec 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -72,8 +72,7 @@
#define MR97310A_MIN_CLOCKDIV_MAX 8
#define MR97310A_MIN_CLOCKDIV_DEFAULT 3
-MODULE_AUTHOR("Kyle Guinn <elyk03@gmail.com>,"
- "Theodore Kilgore <kilgota@auburn.edu>");
+MODULE_AUTHOR("Kyle Guinn <elyk03@gmail.com>,Theodore Kilgore <kilgota@auburn.edu>");
MODULE_DESCRIPTION("GSPCA/Mars-Semi MR97310A USB Camera Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 965372a5ff2f..4dbca54cf2a8 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -4326,8 +4326,7 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
/* Frame end */
if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
(in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
- PERR("Invalid frame size, got: %dx%d,"
- " requested: %dx%d\n",
+ PERR("Invalid frame size, got: %dx%d, requested: %dx%d\n",
(in[9] + 1) * 8, (in[10] + 1) * 8,
gspca_dev->pixfmt.width,
gspca_dev->pixfmt.height);
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index 07529e5a0c56..51e11248bbb8 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -179,8 +179,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
PDEBUG(D_PROBE,
- "Pixart PAC207BCA Image Processor and Control Chip detected"
- " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
+ "Pixart PAC207BCA Image Processor and Control Chip detected (vid/pid 0x%04X:0x%04X)",
+ id->idVendor, id->idProduct);
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 8b08bd0172f4..be07a24c4518 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -105,8 +105,7 @@
#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
- "Thomas Kaiser thomas@kaiser-linux.li");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Thomas Kaiser thomas@kaiser-linux.li");
MODULE_DESCRIPTION("Pixart PAC7302");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 10269dad9d20..e7430b06526a 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -29,8 +29,7 @@
#include <linux/dmi.h>
-MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, "
- "microdia project <microdia@googlegroups.com>");
+MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, microdia project <microdia@googlegroups.com>");
MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -1948,8 +1947,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
if (intf->num_altsetting != 9) {
- pr_warn("sn9c20x camera with unknown number of alt "
- "settings (%d), please report!\n",
+ pr_warn("sn9c20x camera with unknown number of alt settings (%d), please report!\n",
intf->num_altsetting);
gspca_dev->alt = intf->num_altsetting;
return 0;
diff --git a/drivers/media/usb/gspca/spca506.c b/drivers/media/usb/gspca/spca506.c
index bcd2c04c770e..ee84863d27d4 100644
--- a/drivers/media/usb/gspca/spca506.c
+++ b/drivers/media/usb/gspca/spca506.c
@@ -581,8 +581,7 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x06e1, 0xa190)},
-/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
- {USB_DEVICE(0x0733, 0x0430)}, */
+/* {USB_DEVICE(0x0733, 0x0430)}, FIXME: may be IntelPCCameraPro BRIDGE_SPCA505 */
{USB_DEVICE(0x0734, 0x043b)},
{USB_DEVICE(0x99fa, 0x8988)},
{}
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index a7ae0ec9fa91..9424c33f0ddb 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -41,8 +41,7 @@
#include <linux/slab.h>
#include "gspca.h"
-MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, "
- "Theodore Kilgore <kilgota@auburn.edu>");
+MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, Theodore Kilgore <kilgota@auburn.edu>");
MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index aa21edc9502d..6c45dcc44eb0 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -210,8 +210,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
int ret;
PDEBUG(D_PROBE,
- "SQ9050 camera detected"
- " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
+ "SQ9050 camera detected (vid/pid 0x%04X:0x%04X)",
+ id->idVendor, id->idProduct);
ret = sq905c_command(gspca_dev, SQ905C_GET_ID, 0);
if (ret < 0) {
@@ -257,11 +257,8 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- int ret;
-
/* connect to the camera and reset it. */
- ret = sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
- return ret;
+ return sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
}
/* Set up for getting frames. */
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 6ac93d8db427..fef7a784b879 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -412,8 +412,7 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
len -= 4;
if (len < chunk_len) {
- PERR("URB packet length is smaller"
- " than the specified chunk length");
+ PERR("URB packet length is smaller than the specified chunk length");
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
@@ -455,8 +454,7 @@ frame_data:
sd->to_skip = gspca_dev->pixfmt.width * 4;
if (chunk_len)
- PERR("Chunk length is "
- "non-zero on a SOF");
+ PERR("Chunk length is non-zero on a SOF");
break;
case 0x8002:
@@ -469,8 +467,7 @@ frame_data:
NULL, 0);
if (chunk_len)
- PERR("Chunk length is "
- "non-zero on a EOF");
+ PERR("Chunk length is non-zero on a EOF");
break;
case 0x0005:
@@ -582,18 +579,12 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
- /* QuickCam Express */
- {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
- /* LEGO cam / QuickCam Web */
- {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },
- /* Dexxa WebCam USB */
- {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },
- /* QuickCam Messenger */
- {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },
- /* QuickCam Communicate */
- {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
- /* QuickCam Messenger (new) */
- {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
+ {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, /* QuickCam Express */
+ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 }, /* LEGO cam / QuickCam Web */
+ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 }, /* Dexxa WebCam USB */
+ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger */
+ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, /* QuickCam Communicate */
+ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger (new) */
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index 46c9f2229a18..38dc9e7aa313 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -368,8 +368,7 @@ static void spca504_read_info(struct gspca_dev *gspca_dev)
info[i] = gspca_dev->usb_buf[0];
}
PDEBUG(D_STREAM,
- "Read info: %d %d %d %d %d %d."
- " Should be 1,0,2,2,0,0",
+ "Read info: %d %d %d %d %d %d. Should be 1,0,2,2,0,0",
info[0], info[1], info[2],
info[3], info[4], info[5]);
}
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 15eb069ab60b..983fc6b500af 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -24,8 +24,7 @@
#include "gspca.h"
MODULE_DESCRIPTION("Topro TP6800/6810 gspca webcam driver");
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
- "Anders Blomdell <anders.blomdell@control.lth.se>");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Anders Blomdell <anders.blomdell@control.lth.se>");
MODULE_LICENSE("GPL");
static int force_sensor = -1;
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 5f7254d2bc9a..d5d8c7e81762 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -25,8 +25,7 @@
#include "gspca.h"
#include "jpeg.h"
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
- "Serge A. Suchkov <Serge.A.S@tochka.ru>");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Serge A. Suchkov <Serge.A.S@tochka.ru>");
MODULE_DESCRIPTION("GSPCA ZC03xx/VC3xx USB Camera Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index a61d8fd63c12..15f016ad5b89 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -41,13 +41,11 @@ MODULE_PARM_DESC(hdpvr_debug, "enable debugging output");
static uint default_video_input = HDPVR_VIDEO_INPUTS;
module_param(default_video_input, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / "
- "1=S-Video / 2=Composite");
+MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / 1=S-Video / 2=Composite");
static uint default_audio_input = HDPVR_AUDIO_INPUTS;
module_param(default_audio_input, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / "
- "1=RCA front / 2=S/PDIF");
+MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / 1=RCA front / 2=S/PDIF");
static bool boost_audio;
module_param(boost_audio, bool, S_IRUGO|S_IWUSR);
@@ -165,8 +163,7 @@ static int device_authorization(struct hdpvr_device *dev)
dev->flags |= HDPVR_FLAG_AC3_CAP;
break;
default:
- v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might"
- " not work.\n");
+ v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might not work.\n");
if (dev->fw_ver >= HDPVR_FIRMWARE_VERSION_AC3)
dev->flags |= HDPVR_FLAG_AC3_CAP;
else
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 9b641c4d4431..fcab55038d99 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -145,15 +145,14 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
msgs[0].len);
} else if (num == 2) {
if (msgs[0].addr != msgs[1].addr) {
- v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer "
- "with conflicting target addresses\n");
+ v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer with conflicting target addresses\n");
retval = -EINVAL;
goto out;
}
if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD)) {
- v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with "
- "r0=%d, r1=%d\n", msgs[0].flags & I2C_M_RD,
+ v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with r0=%d, r1=%d\n",
+ msgs[0].flags & I2C_M_RD,
msgs[1].flags & I2C_M_RD);
retval = -EINVAL;
goto out;
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 474c11e1d495..7fb036d6a86e 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -336,9 +336,7 @@ static int hdpvr_stop_streaming(struct hdpvr_device *dev)
buf = kmalloc(dev->bulk_in_size, GFP_KERNEL);
if (!buf)
- v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer "
- "for emptying the internal device buffer. "
- "Next capture start will be slow\n");
+ v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer for emptying the internal device buffer. Next capture start will be slow\n");
dev->status = STATUS_SHUTTING_DOWN;
hdpvr_config_call(dev, CTRL_STOP_STREAMING_VALUE, 0x00);
@@ -451,6 +449,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
if (buf->status != BUFSTAT_READY &&
dev->status != STATUS_DISCONNECTED) {
+ int err;
/* return nonblocking */
if (file->f_flags & O_NONBLOCK) {
if (!ret)
@@ -458,9 +457,24 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
goto err;
}
- if (wait_event_interruptible(dev->wait_data,
- buf->status == BUFSTAT_READY))
- return -ERESTARTSYS;
+ err = wait_event_interruptible_timeout(dev->wait_data,
+ buf->status == BUFSTAT_READY,
+ msecs_to_jiffies(1000));
+ if (err < 0) {
+ ret = err;
+ goto err;
+ }
+ if (!err) {
+ v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
+ "timeout: restart streaming\n");
+ hdpvr_stop_streaming(dev);
+ msecs_to_jiffies(4000);
+ err = hdpvr_start_streaming(dev);
+ if (err) {
+ ret = err;
+ goto err;
+ }
+ }
}
if (buf->status != BUFSTAT_READY)
diff --git a/drivers/staging/media/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index c6aa2d1c9df0..6ffc407de62f 100644
--- a/drivers/staging/media/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,6 @@
config USB_PULSE8_CEC
tristate "Pulse Eight HDMI CEC"
- depends on USB_ACM && MEDIA_CEC
+ depends on USB_ACM && MEDIA_CEC_SUPPORT
select SERIO
select SERIO_SERPORT
---help---
diff --git a/drivers/staging/media/pulse8-cec/Makefile b/drivers/media/usb/pulse8-cec/Makefile
index 9800690bc25a..9800690bc25a 100644
--- a/drivers/staging/media/pulse8-cec/Makefile
+++ b/drivers/media/usb/pulse8-cec/Makefile
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index 1732c3857b8e..7c18daeb0ade 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -375,27 +375,35 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
switch (log_addrs->primary_device_type[0]) {
case CEC_OP_PRIM_DEVTYPE_TV:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV;
break;
case CEC_OP_PRIM_DEVTYPE_RECORD:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_RECORD;
break;
case CEC_OP_PRIM_DEVTYPE_TUNER:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TUNER;
break;
case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK;
break;
case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM;
break;
case CEC_OP_PRIM_DEVTYPE_SWITCH:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
break;
case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
break;
default:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n",
log_addrs->primary_device_type[0]);
break;
@@ -651,7 +659,7 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->serio = serio;
pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
- "HDMI CEC", caps, 1, &serio->dev);
+ "HDMI CEC", caps, 1);
err = PTR_ERR_OR_ZERO(pulse8->adap);
if (err < 0)
goto free_device;
@@ -671,7 +679,7 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
if (err)
goto close_serio;
- err = cec_register_adapter(pulse8->adap);
+ err = cec_register_adapter(pulse8->adap, &serio->dev);
if (err < 0)
goto close_serio;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.c b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
index 5f953d837bf1..3bac50a248d4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
@@ -74,9 +74,7 @@ void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
input = sp->def[hdw->input_val];
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev msp3400 set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev msp3400 set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
index f82f0f0f2c04..7f29a0464f36 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
@@ -72,9 +72,7 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
(hdw->input_val < 0) ||
(hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev v4l2 set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev v4l2 set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
index 7d675fae1846..30eef97ef2ef 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -137,9 +137,7 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
(hdw->input_val < 0) ||
(hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev cx2584x set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev cx2584x set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
index e4022bcb155b..58ec706ebdb3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
@@ -176,9 +176,7 @@ int pvr2_debugifc_print_status(struct pvr2_hdw *hdw,
pvr2_stream_get_stats(sp, &stats, 0);
ccnt = scnprintf(
buf,acnt,
- "Bytes streamed=%u"
- " URBs: queued=%u idle=%u ready=%u"
- " processed=%u failed=%u\n",
+ "Bytes streamed=%u URBs: queued=%u idle=%u ready=%u processed=%u failed=%u\n",
stats.bytes_processed,
stats.buffers_in_queue,
stats.buffers_in_idle,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index e1907cd0c3b7..276b17fb9aad 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -56,8 +56,7 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
if (!eeprom) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to allocate memory"
- " required to read eeprom");
+ "Failed to allocate memory required to read eeprom");
return NULL;
}
@@ -74,8 +73,8 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
strange but it's what they do) */
mode16 = (addr & 1);
eepromSize = (mode16 ? 4096 : 256);
- trace_eeprom("Examining %d byte eeprom at location 0x%x"
- " using %d bit addressing",eepromSize,addr,
+ trace_eeprom("Examining %d byte eeprom at location 0x%x using %d bit addressing",
+ eepromSize, addr,
mode16 ? 16 : 8);
msg[0].addr = addr;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index 593b3e9b6bfd..f0483621d2a3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -188,9 +188,7 @@ static int pvr2_encoder_cmd(void *ctxt,
if (arg_cnt_send > (ARRAY_SIZE(wrData) - 4)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Failed to write cx23416 command"
- " - too many input arguments"
- " (was given %u limit %lu)",
+ "Failed to write cx23416 command - too many input arguments (was given %u limit %lu)",
arg_cnt_send, (long unsigned) ARRAY_SIZE(wrData) - 4);
return -EINVAL;
}
@@ -198,9 +196,7 @@ static int pvr2_encoder_cmd(void *ctxt,
if (arg_cnt_recv > (ARRAY_SIZE(rdData) - 4)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Failed to write cx23416 command"
- " - too many return arguments"
- " (was given %u limit %lu)",
+ "Failed to write cx23416 command - too many return arguments (was given %u limit %lu)",
arg_cnt_recv, (long unsigned) ARRAY_SIZE(rdData) - 4);
return -EINVAL;
}
@@ -248,14 +244,12 @@ static int pvr2_encoder_cmd(void *ctxt,
retry_flag = !0;
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Encoder timed out waiting for us"
- "; arranging to retry");
+ "Encoder timed out waiting for us; arranging to retry");
} else {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "***WARNING*** device's encoder"
- " appears to be stuck"
- " (status=0x%08x)",rdData[0]);
+ "***WARNING*** device's encoder appears to be stuck (status=0x%08x)",
+rdData[0]);
}
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
@@ -293,11 +287,7 @@ static int pvr2_encoder_cmd(void *ctxt,
}
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Giving up on command."
- " This is normally recovered via a firmware"
- " reload and re-initialization; concern"
- " is only warranted if this happens repeatedly"
- " and rapidly.");
+ "Giving up on command. This is normally recovered via a firmware reload and re-initialization; concern is only warranted if this happens repeatedly and rapidly.");
break;
}
wrData[0] = 0x7;
@@ -325,9 +315,7 @@ static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
if (args > ARRAY_SIZE(data)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Failed to write cx23416 command"
- " - too many arguments"
- " (was given %u limit %lu)",
+ "Failed to write cx23416 command - too many arguments (was given %u limit %lu)",
args, (long unsigned) ARRAY_SIZE(data));
return -EINVAL;
}
@@ -433,8 +421,7 @@ int pvr2_encoder_configure(struct pvr2_hdw *hdw)
{
int ret;
int val;
- pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure"
- " (cx2341x module)");
+ pvr2_trace(PVR2_TRACE_ENCODER, "pvr2_encoder_configure (cx2341x module)");
hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING;
hdw->enc_ctl_state.width = hdw->res_hor_val;
hdw->enc_ctl_state.height = hdw->res_ver_val;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 1eb4f7ba2967..e3ed8ffee9f7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -1371,8 +1371,7 @@ static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
fwnames[idx],
&hdw->usb_dev->dev);
if (!ret) {
- trace_firmware("Located %s firmware: %s;"
- " uploading...",
+ trace_firmware("Located %s firmware: %s; uploading...",
fwtypename,
fwnames[idx]);
return idx;
@@ -1383,21 +1382,17 @@ static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
return ret;
}
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "***WARNING***"
- " Device %s firmware"
- " seems to be missing.",
+ "***WARNING*** Device %s firmware seems to be missing.",
fwtypename);
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Did you install the pvrusb2 firmware files"
- " in their proper location?");
+ "Did you install the pvrusb2 firmware files in their proper location?");
if (fwcount == 1) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"request_firmware unable to locate %s file %s",
fwtypename,fwnames[0]);
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "request_firmware unable to locate"
- " one of the following %s files:",
+ "request_firmware unable to locate one of the following %s files:",
fwtypename);
for (idx = 0; idx < fwcount; idx++) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
@@ -1431,8 +1426,7 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
if (!hdw->hdw_desc->fx2_firmware.cnt) {
hdw->fw1_state = FW1_STATE_OK;
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Connected device type defines"
- " no firmware to upload; ignoring firmware");
+ "Connected device type defines no firmware to upload; ignoring firmware");
return -ENOTTY;
}
@@ -1457,13 +1451,11 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
(!(hdw->hdw_desc->flag_fx2_16kb && (fwsize == 0x4000)))) {
if (hdw->hdw_desc->flag_fx2_16kb) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Wrong fx2 firmware size"
- " (expected 8192 or 16384, got %u)",
+ "Wrong fx2 firmware size (expected 8192 or 16384, got %u)",
fwsize);
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Wrong fx2 firmware size"
- " (expected 8192, got %u)",
+ "Wrong fx2 firmware size (expected 8192, got %u)",
fwsize);
}
release_firmware(fw_entry);
@@ -1585,8 +1577,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
if (fw_len % sizeof(u32)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "size of %s firmware"
- " must be a multiple of %zu bytes",
+ "size of %s firmware must be a multiple of %zu bytes",
fw_files[fwidx],sizeof(u32));
release_firmware(fw_entry);
ret = -EINVAL;
@@ -1887,8 +1878,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),hdw->std_mask_eeprom);
pvr2_trace(PVR2_TRACE_STD,
- "Supported video standard(s) reported available"
- " in hardware: %.*s",
+ "Supported video standard(s) reported available in hardware: %.*s",
bcnt,buf);
hdw->std_mask_avail = hdw->std_mask_eeprom;
@@ -1897,8 +1887,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
if (std2) {
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std2);
pvr2_trace(PVR2_TRACE_STD,
- "Expanding supported video standards"
- " to include: %.*s",
+ "Expanding supported video standards to include: %.*s",
bcnt,buf);
hdw->std_mask_avail |= std2;
}
@@ -1917,8 +1906,8 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
if (std3) {
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std3);
pvr2_trace(PVR2_TRACE_STD,
- "Initial video standard"
- " (determined by device type): %.*s",bcnt,buf);
+ "Initial video standard (determined by device type): %.*s",
+ bcnt, buf);
hdw->std_mask_cur = std3;
hdw->std_dirty = !0;
return;
@@ -1980,8 +1969,7 @@ static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
}
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Executing cx25840 VBI hack",
+ "Module ID %u: Executing cx25840 VBI hack",
hdw->decoder_client_id);
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
@@ -2007,8 +1995,7 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
fname = (mid < ARRAY_SIZE(module_names)) ? module_names[mid] : NULL;
if (!fname) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Module ID %u for device %s has no name?"
- " The driver might have a configuration problem.",
+ "Module ID %u for device %s has no name? The driver might have a configuration problem.",
mid,
hdw->hdw_desc->description);
return -EINVAL;
@@ -2027,32 +2014,27 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
ARRAY_SIZE(i2caddr));
if (i2ccnt) {
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Using default i2c address list",
+ "Module ID %u: Using default i2c address list",
mid);
}
}
if (!i2ccnt) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Module ID %u (%s) for device %s:"
- " No i2c addresses."
- " The driver might have a configuration problem.",
+ "Module ID %u (%s) for device %s: No i2c addresses. The driver might have a configuration problem.",
mid, fname, hdw->hdw_desc->description);
return -EINVAL;
}
if (i2ccnt == 1) {
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Setting up with specified i2c address 0x%x",
+ "Module ID %u: Setting up with specified i2c address 0x%x",
mid, i2caddr[0]);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
fname, i2caddr[0], NULL);
} else {
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Setting up with address probe list",
+ "Module ID %u: Setting up with address probe list",
mid);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
fname, 0, i2caddr);
@@ -2060,9 +2042,7 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
if (!sd) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Module ID %u (%s) for device %s failed to load."
- " Possible missing sub-device kernel module or"
- " initialization failure within module.",
+ "Module ID %u (%s) for device %s failed to load. Possible missing sub-device kernel module or initialization failure within module.",
mid, fname, hdw->hdw_desc->description);
return -EIO;
}
@@ -2124,18 +2104,14 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
== 0);
if (reloadFl) {
pvr2_trace(PVR2_TRACE_INIT,
- "USB endpoint config looks strange"
- "; possibly firmware needs to be"
- " loaded");
+ "USB endpoint config looks strange; possibly firmware needs to be loaded");
}
}
if (!reloadFl) {
reloadFl = !pvr2_hdw_check_firmware(hdw);
if (reloadFl) {
pvr2_trace(PVR2_TRACE_INIT,
- "Check for FX2 firmware failed"
- "; possibly firmware needs to be"
- " loaded");
+ "Check for FX2 firmware failed; possibly firmware needs to be loaded");
}
}
if (reloadFl) {
@@ -2200,8 +2176,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
if (!pvr2_hdw_dev_ok(hdw)) return;
if (ret < 0) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Unable to determine location of eeprom,"
- " skipping");
+ "Unable to determine location of eeprom, skipping");
} else {
hdw->eeprom_addr = ret;
pvr2_eeprom_analyze(hdw);
@@ -2254,8 +2229,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
idx = get_default_error_tolerance(hdw);
if (idx) {
pvr2_trace(PVR2_TRACE_INIT,
- "pvr2_hdw_setup: video stream %p"
- " setting tolerance %u",
+ "pvr2_hdw_setup: video stream %p setting tolerance %u",
hdw->vid_stream,idx);
}
pvr2_stream_setup(hdw->vid_stream,hdw->usb_dev,
@@ -2285,16 +2259,13 @@ static void pvr2_hdw_setup(struct pvr2_hdw *hdw)
if (hdw->flag_init_ok) {
pvr2_trace(
PVR2_TRACE_INFO,
- "Device initialization"
- " completed successfully.");
+ "Device initialization completed successfully.");
break;
}
if (hdw->fw1_state == FW1_STATE_RELOAD) {
pvr2_trace(
PVR2_TRACE_INFO,
- "Device microcontroller firmware"
- " (re)loaded; it should now reset"
- " and reconnect.");
+ "Device microcontroller firmware (re)loaded; it should now reset and reconnect.");
break;
}
pvr2_trace(
@@ -2303,48 +2274,35 @@ static void pvr2_hdw_setup(struct pvr2_hdw *hdw)
if (hdw->fw1_state == FW1_STATE_MISSING) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Giving up since device"
- " microcontroller firmware"
- " appears to be missing.");
+ "Giving up since device microcontroller firmware appears to be missing.");
break;
}
}
if (hdw->flag_modulefail) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "***WARNING*** pvrusb2 driver initialization"
- " failed due to the failure of one or more"
- " sub-device kernel modules.");
+ "***WARNING*** pvrusb2 driver initialization failed due to the failure of one or more sub-device kernel modules.");
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "You need to resolve the failing condition"
- " before this driver can function. There"
- " should be some earlier messages giving more"
- " information about the problem.");
+ "You need to resolve the failing condition before this driver can function. There should be some earlier messages giving more information about the problem.");
break;
}
if (procreload) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Attempting pvrusb2 recovery by reloading"
- " primary firmware.");
+ "Attempting pvrusb2 recovery by reloading primary firmware.");
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "If this works, device should disconnect"
- " and reconnect in a sane state.");
+ "If this works, device should disconnect and reconnect in a sane state.");
hdw->fw1_state = FW1_STATE_UNKNOWN;
pvr2_upload_firmware1(hdw);
} else {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "***WARNING*** pvrusb2 device hardware"
- " appears to be jammed"
- " and I can't clear it.");
+ "***WARNING*** pvrusb2 device hardware appears to be jammed and I can't clear it.");
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "You might need to power cycle"
- " the pvrusb2 device"
- " in order to recover.");
+ "You might need to power cycle the pvrusb2 device in order to recover.");
}
} while (0);
pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) end",hdw);
@@ -2396,12 +2354,8 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
hdw_desc = (const struct pvr2_device_desc *)(devid->driver_info);
if (hdw_desc == NULL) {
- pvr2_trace(PVR2_TRACE_INIT, "pvr2_hdw_create:"
- " No device description pointer,"
- " unable to continue.");
- pvr2_trace(PVR2_TRACE_INIT, "If you have a new device type,"
- " please contact Mike Isely <isely@pobox.com>"
- " to get it included in the driver\n");
+ pvr2_trace(PVR2_TRACE_INIT, "pvr2_hdw_create: No device description pointer, unable to continue.");
+ pvr2_trace(PVR2_TRACE_INIT, "If you have a new device type, please contact Mike Isely <isely@pobox.com> to get it included in the driver\n");
goto fail;
}
@@ -2413,14 +2367,12 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
if (hdw_desc->flag_is_experimental) {
pvr2_trace(PVR2_TRACE_INFO, "**********");
pvr2_trace(PVR2_TRACE_INFO,
- "WARNING: Support for this device (%s) is"
- " experimental.", hdw_desc->description);
+ "WARNING: Support for this device (%s) is experimental.",
+ hdw_desc->description);
pvr2_trace(PVR2_TRACE_INFO,
- "Important functionality might not be"
- " entirely working.");
+ "Important functionality might not be entirely working.");
pvr2_trace(PVR2_TRACE_INFO,
- "Please consider contacting the driver author to"
- " help with further stabilization of the driver.");
+ "Please consider contacting the driver author to help with further stabilization of the driver.");
pvr2_trace(PVR2_TRACE_INFO, "**********");
}
if (!hdw) goto fail;
@@ -3375,8 +3327,7 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
if (!eeprom) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to allocate memory"
- " required to read eeprom");
+ "Failed to allocate memory required to read eeprom");
return NULL;
}
@@ -3393,8 +3344,8 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
strange but it's what they do) */
mode16 = (addr & 1);
eepromSize = (mode16 ? EEPROM_SIZE : 256);
- trace_eeprom("Examining %d byte eeprom at location 0x%x"
- " using %d bit addressing",eepromSize,addr,
+ trace_eeprom("Examining %d byte eeprom at location 0x%x using %d bit addressing",
+ eepromSize, addr,
mode16 ? 16 : 8);
msg[0].addr = addr;
@@ -3461,8 +3412,8 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw,
if (hdw->fw_cpu_flag) {
hdw->fw_size = (mode == 1) ? 0x4000 : 0x2000;
pvr2_trace(PVR2_TRACE_FIRMWARE,
- "Preparing to suck out CPU firmware"
- " (size=%u)", hdw->fw_size);
+ "Preparing to suck out CPU firmware (size=%u)",
+ hdw->fw_size);
hdw->fw_buffer = kzalloc(hdw->fw_size,GFP_KERNEL);
if (!hdw->fw_buffer) {
hdw->fw_size = 0;
@@ -3620,21 +3571,18 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
struct timer_list timer;
if (!hdw->ctl_lock_held) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute control transfer"
- " without lock!!");
+ "Attempted to execute control transfer without lock!!");
return -EDEADLK;
}
if (!hdw->flag_ok && !probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute control transfer"
- " when device not ok");
+ "Attempted to execute control transfer when device not ok");
return -EIO;
}
if (!(hdw->ctl_read_urb && hdw->ctl_write_urb)) {
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute control transfer"
- " when USB is disconnected");
+ "Attempted to execute control transfer when USB is disconnected");
}
return -ENOTTY;
}
@@ -3645,16 +3593,14 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
if (write_len > PVR2_CTL_BUFFSIZE) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute %d byte"
- " control-write transfer (limit=%d)",
+ "Attempted to execute %d byte control-write transfer (limit=%d)",
write_len,PVR2_CTL_BUFFSIZE);
return -EINVAL;
}
if (read_len > PVR2_CTL_BUFFSIZE) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute %d byte"
- " control-read transfer (limit=%d)",
+ "Attempted to execute %d byte control-read transfer (limit=%d)",
write_len,PVR2_CTL_BUFFSIZE);
return -EINVAL;
}
@@ -3703,8 +3649,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
if (status < 0) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to submit write-control"
- " URB status=%d",status);
+ "Failed to submit write-control URB status=%d",
+status);
hdw->ctl_write_pend_flag = 0;
goto done;
}
@@ -3727,8 +3673,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
if (status < 0) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to submit read-control"
- " URB status=%d",status);
+ "Failed to submit read-control URB status=%d",
+status);
hdw->ctl_read_pend_flag = 0;
goto done;
}
@@ -3770,8 +3716,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = hdw->ctl_write_urb->status;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-write URB failure,"
- " status=%d",
+ "control-write URB failure, status=%d",
status);
}
goto done;
@@ -3781,8 +3726,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = -EIO;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-write URB short,"
- " expected=%d got=%d",
+ "control-write URB short, expected=%d got=%d",
write_len,
hdw->ctl_write_urb->actual_length);
}
@@ -3800,8 +3744,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = hdw->ctl_read_urb->status;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-read URB failure,"
- " status=%d",
+ "control-read URB failure, status=%d",
status);
}
goto done;
@@ -3811,8 +3754,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = -EIO;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-read URB short,"
- " expected=%d got=%d",
+ "control-read URB short, expected=%d got=%d",
read_len,
hdw->ctl_read_urb->actual_length);
}
@@ -4799,9 +4741,7 @@ static unsigned int pvr2_hdw_report_unlocked(struct pvr2_hdw *hdw,int which,
0);
return scnprintf(
buf,acnt,
- "Bytes streamed=%u"
- " URBs: queued=%u idle=%u ready=%u"
- " processed=%u failed=%u",
+ "Bytes streamed=%u URBs: queued=%u idle=%u ready=%u processed=%u failed=%u",
stats.bytes_processed,
stats.buffers_in_queue,
stats.buffers_in_idle,
@@ -5013,8 +4953,7 @@ int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val)
if (ret) return ret;
nval = (cval & ~msk) | (val & msk);
pvr2_trace(PVR2_TRACE_GPIO,
- "GPIO direction changing 0x%x:0x%x"
- " from 0x%x to 0x%x",
+ "GPIO direction changing 0x%x:0x%x from 0x%x to 0x%x",
msk,val,cval,nval);
} else {
nval = val;
@@ -5057,9 +4996,7 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
now. (Of course, no sub-drivers seem to implement it either.
But now it's a a chicken and egg problem...) */
v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, g_tuner, vtp);
- pvr2_trace(PVR2_TRACE_CHIPS, "subdev status poll"
- " type=%u strength=%u audio=0x%x cap=0x%x"
- " low=%u hi=%u",
+ pvr2_trace(PVR2_TRACE_CHIPS, "subdev status poll type=%u strength=%u audio=0x%x cap=0x%x low=%u hi=%u",
vtp->type,
vtp->signal, vtp->rxsubchans, vtp->capability,
vtp->rangelow, vtp->rangehigh);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 6da5fb544817..cc63e5f4c26c 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -62,8 +62,7 @@ static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
if (!data) length = 0;
if (length > (sizeof(hdw->cmd_buffer) - 3)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Killing an I2C write to %u that is too large"
- " (desired=%u limit=%u)",
+ "Killing an I2C write to %u that is too large (desired=%u limit=%u)",
i2c_addr,
length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3));
return -ENOTSUPP;
@@ -90,8 +89,7 @@ static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
if (hdw->cmd_buffer[0] != 8) {
ret = -EIO;
if (hdw->cmd_buffer[0] != 7) {
- trace_i2c("unexpected status"
- " from i2_write[%d]: %d",
+ trace_i2c("unexpected status from i2_write[%d]: %d",
i2c_addr,hdw->cmd_buffer[0]);
}
}
@@ -116,16 +114,14 @@ static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
if (!data) dlen = 0;
if (dlen > (sizeof(hdw->cmd_buffer) - 4)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Killing an I2C read to %u that has wlen too large"
- " (desired=%u limit=%u)",
+ "Killing an I2C read to %u that has wlen too large (desired=%u limit=%u)",
i2c_addr,
dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4));
return -ENOTSUPP;
}
if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Killing an I2C read to %u that has rlen too large"
- " (desired=%u limit=%u)",
+ "Killing an I2C read to %u that has rlen too large (desired=%u limit=%u)",
i2c_addr,
rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1));
return -ENOTSUPP;
@@ -154,8 +150,7 @@ static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
if (hdw->cmd_buffer[0] != 8) {
ret = -EIO;
if (hdw->cmd_buffer[0] != 7) {
- trace_i2c("unexpected status"
- " from i2_read[%d]: %d",
+ trace_i2c("unexpected status from i2_read[%d]: %d",
i2c_addr,hdw->cmd_buffer[0]);
}
}
@@ -352,13 +347,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Detected a wedged cx25840 chip;"
- " the device will not work.");
+ "WARNING: Detected a wedged cx25840 chip; the device will not work.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"WARNING: Try power cycling the pvrusb2 device.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Disabling further access to the device"
- " to prevent other foul-ups.");
+ "WARNING: Disabling further access to the device to prevent other foul-ups.");
// This blocks all further communication with the part.
hdw->i2c_func[0x44] = NULL;
pvr2_hdw_render_useless(hdw);
@@ -444,8 +437,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
}
} else if (num == 2) {
if (msgs[0].addr != msgs[1].addr) {
- trace_i2c("i2c refusing 2 phase transfer with"
- " conflicting target addresses");
+ trace_i2c("i2c refusing 2 phase transfer with conflicting target addresses");
ret = -ENOTSUPP;
goto done;
}
@@ -477,8 +469,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
ret = 2;
goto done;
} else {
- trace_i2c("i2c refusing complex transfer"
- " read0=%d read1=%d",
+ trace_i2c("i2c refusing complex transfer read0=%d read1=%d",
(msgs[0].flags & I2C_M_RD),
(msgs[1].flags & I2C_M_RD));
}
@@ -492,8 +483,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
for (idx = 0; idx < num; idx++) {
cnt = msgs[idx].len;
printk(KERN_INFO
- "pvrusb2 i2c xfer %u/%u:"
- " addr=0x%x len=%d %s",
+ "pvrusb2 i2c xfer %u/%u: addr=0x%x len=%d %s",
idx+1,num,
msgs[idx].addr,
cnt,
@@ -501,18 +491,18 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
"read" : "write"));
if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
if (cnt > 8) cnt = 8;
- printk(" [");
+ printk(KERN_CONT " [");
for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
- if (offs) printk(" ");
- printk("%02x",msgs[idx].buf[offs]);
+ if (offs) printk(KERN_CONT " ");
+ printk(KERN_CONT "%02x",msgs[idx].buf[offs]);
}
- if (offs < cnt) printk(" ...");
- printk("]");
+ if (offs < cnt) printk(KERN_CONT " ...");
+ printk(KERN_CONT "]");
}
if (idx+1 == num) {
- printk(" result=%d",ret);
+ printk(KERN_CONT " result=%d",ret);
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
if (!num) {
printk(KERN_INFO
@@ -668,8 +658,7 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
the emulated IR receiver. */
if (do_i2c_probe(hdw, 0x71)) {
pvr2_trace(PVR2_TRACE_INFO,
- "Device has newer IR hardware;"
- " disabling unneeded virtual IR device");
+ "Device has newer IR hardware; disabling unneeded virtual IR device");
hdw->i2c_func[0x18] = NULL;
/* Remember that this is a different device... */
hdw->ir_scheme_active = PVR2_IR_SCHEME_24XXX_MCE;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.c b/drivers/media/usb/pvrusb2/pvrusb2-io.c
index e68ce24f27e3..e3103ecd4828 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.c
@@ -113,8 +113,7 @@ static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
{
pvr2_trace(PVR2_TRACE_INFO,
- "buffer%s%s %p state=%s id=%d status=%d"
- " stream=%p purb=%p sig=0x%x",
+ "buffer%s%s %p state=%s id=%d status=%d stream=%p purb=%p sig=0x%x",
(msg ? " " : ""),
(msg ? msg : ""),
bp,
@@ -156,8 +155,7 @@ static void pvr2_buffer_remove(struct pvr2_buffer *bp)
(*cnt)--;
(*bcnt) -= ccnt;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s dec cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s dec cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
bp->state = pvr2_buffer_state_none;
}
@@ -198,8 +196,7 @@ static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
(sp->r_count)++;
sp->r_bcount += bp->used_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s inc cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
sp->r_bcount,sp->r_count);
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -224,8 +221,7 @@ static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
(sp->i_count)++;
sp->i_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s inc cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
sp->i_bcount,sp->i_count);
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -249,8 +245,7 @@ static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
(sp->q_count)++;
sp->q_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s inc cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
sp->q_bcount,sp->q_count);
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -293,8 +288,8 @@ static void pvr2_buffer_done(struct pvr2_buffer *bp)
bp->signature = 0;
bp->stream = NULL;
usb_free_urb(bp->purb);
- pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
- " bufferDone %p",bp);
+ pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferDone %p",
+ bp);
}
static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
@@ -306,8 +301,7 @@ static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
if (cnt == sp->buffer_total_count) return 0;
pvr2_trace(PVR2_TRACE_BUF_POOL,
- "/*---TRACE_FLOW---*/ poolResize "
- " stream=%p cur=%d adj=%+d",
+ "/*---TRACE_FLOW---*/ poolResize stream=%p cur=%d adj=%+d",
sp,
sp->buffer_total_count,
cnt-sp->buffer_total_count);
@@ -374,8 +368,7 @@ static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
if (sp->buffer_total_count == sp->buffer_target_count) return 0;
pvr2_trace(PVR2_TRACE_BUF_POOL,
- "/*---TRACE_FLOW---*/"
- " poolCheck stream=%p cur=%d tgt=%d",
+ "/*---TRACE_FLOW---*/ poolCheck stream=%p cur=%d tgt=%d",
sp,sp->buffer_total_count,sp->buffer_target_count);
if (sp->buffer_total_count < sp->buffer_target_count) {
@@ -454,8 +447,8 @@ static void buffer_complete(struct urb *urb)
bp->used_count = urb->actual_length;
if (sp->fail_count) {
pvr2_trace(PVR2_TRACE_TOLERANCE,
- "stream %p transfer ok"
- " - fail count reset",sp);
+ "stream %p transfer ok - fail count reset",
+ sp);
sp->fail_count = 0;
}
} else if (sp->fail_count < sp->fail_tolerance) {
@@ -464,8 +457,7 @@ static void buffer_complete(struct urb *urb)
(sp->fail_count)++;
(sp->buffers_failed)++;
pvr2_trace(PVR2_TRACE_TOLERANCE,
- "stream %p ignoring error %d"
- " - fail count increased to %u",
+ "stream %p ignoring error %d - fail count increased to %u",
sp,urb->status,sp->fail_count);
} else {
(sp->buffers_failed)++;
@@ -666,8 +658,7 @@ int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
bp->max_count = cnt;
bp->stream->i_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/ bufferPool "
- " %8s cap cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s cap cap=%07d cnt=%02d",
pvr2_buffer_state_decode(
pvr2_buffer_state_idle),
bp->stream->i_bcount,bp->stream->i_count);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
index 614d55767a4e..70b8a052eb5b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
@@ -169,9 +169,7 @@ static int pvr2_ioread_start(struct pvr2_ioread *cp)
stat = pvr2_buffer_queue(bp);
if (stat < 0) {
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_start id=%p"
- " error=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_start id=%p error=%d",
cp,stat);
pvr2_ioread_stop(cp);
return stat;
@@ -209,8 +207,8 @@ int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
do {
if (cp->stream) {
pvr2_trace(PVR2_TRACE_START_STOP,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_setup (tear-down) id=%p",cp);
+ "/*---TRACE_READ---*/ pvr2_ioread_setup (tear-down) id=%p",
+ cp);
pvr2_ioread_stop(cp);
pvr2_stream_kill(cp->stream);
if (pvr2_stream_get_buffer_count(cp->stream)) {
@@ -220,8 +218,8 @@ int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
}
if (sp) {
pvr2_trace(PVR2_TRACE_START_STOP,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_setup (setup) id=%p",cp);
+ "/*---TRACE_READ---*/ pvr2_ioread_setup (setup) id=%p",
+ cp);
pvr2_stream_kill(sp);
ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT);
if (ret < 0) {
@@ -270,9 +268,7 @@ static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
if (stat < 0) {
// Streaming error...
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_read id=%p"
- " queue_error=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p queue_error=%d",
cp,stat);
pvr2_ioread_stop(cp);
return 0;
@@ -292,9 +288,7 @@ static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
if (stat < 0) {
// Streaming error...
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_read id=%p"
- " buffer_error=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p buffer_error=%d",
cp,stat);
pvr2_ioread_stop(cp);
// Give up.
@@ -347,8 +341,7 @@ static void pvr2_ioread_filter(struct pvr2_ioread *cp)
if (cp->sync_buf_offs >= cp->sync_key_len) {
cp->sync_trashed_count -= cp->sync_key_len;
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " sync_state <== 2 (skipped %u bytes)",
+ "/*---TRACE_READ---*/ sync_state <== 2 (skipped %u bytes)",
cp->sync_trashed_count);
cp->sync_state = 2;
cp->sync_buf_offs = 0;
@@ -358,8 +351,7 @@ static void pvr2_ioread_filter(struct pvr2_ioread *cp)
if (cp->c_data_offs < cp->c_data_len) {
// Sanity check - should NEVER get here
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "ERROR: pvr2_ioread filter sync problem"
- " len=%u offs=%u",
+ "ERROR: pvr2_ioread filter sync problem len=%u offs=%u",
cp->c_data_len,cp->c_data_offs);
// Get out so we don't get stuck in an infinite
// loop.
@@ -418,8 +410,8 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
if (!cnt) {
pvr2_trace(PVR2_TRACE_TRAP,
- "/*---TRACE_READ---*/ pvr2_ioread_read id=%p"
- " ZERO Request? Returning zero.",cp);
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p ZERO Request? Returning zero.",
+cp);
return 0;
}
@@ -477,8 +469,7 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
// Consumed entire key; switch mode
// to normal.
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " sync_state <== 0");
+ "/*---TRACE_READ---*/ sync_state <== 0");
cp->sync_state = 0;
}
} else {
@@ -502,8 +493,7 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
}
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/ pvr2_ioread_read"
- " id=%p request=%d result=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p request=%d result=%d",
cp,req_cnt,ret);
return ret;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index 9a596a3a4c27..cd7bc18a1ba2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -357,8 +357,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "WARNING:"
- " Failed to classify the following standard(s): %.*s",
+ "WARNING: Failed to classify the following standard(s): %.*s",
bcnt,buf);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
index 06fe63ced58c..d977976b8d91 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
@@ -116,7 +116,6 @@ static ssize_t show_type(struct device *class_dev,
}
pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",
cip->chptr, cip->ctl_id, name);
- if (!name) return -EINVAL;
return scnprintf(buf, PAGE_SIZE, "%s\n", name);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 2cc4d2b6f810..bbbe18d5275a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -949,8 +949,8 @@ static long pvr2_v4l2_ioctl(struct file *file,
if (ret < 0) {
if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl failure, ret=%ld"
- " command was:", ret);
+ "pvr2_v4l2_do_ioctl failure, ret=%ld command was:",
+ret);
v4l_printk_ioctl(pvr2_hdw_get_driver_name(hdw), cmd);
}
} else {
@@ -1254,8 +1254,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
nr_ptr = video_nr;
if (!dip->stream) {
pr_err(KBUILD_MODNAME
- ": Failed to set up pvrusb2 v4l video dev"
- " due to missing stream instance\n");
+ ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
return;
}
break;
@@ -1272,8 +1271,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
break;
default:
/* Bail out (this should be impossible) */
- pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev"
- " due to unrecognized config\n");
+ pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev due to unrecognized config\n");
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
index 105123ab36aa..6fee367139aa 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
@@ -91,9 +91,7 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
(hdw->input_val < 0) ||
(hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev v4l2 set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev v4l2 set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
index f1df94a2436f..7993983de5a6 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
@@ -49,8 +49,7 @@ void pvr2_wm8775_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
input = 2;
break;
}
- pvr2_trace(PVR2_TRACE_CHIPS, "subdev wm8775"
- " set_input(val=%d route=0x%x)",
+ pvr2_trace(PVR2_TRACE_CHIPS, "subdev wm8775 set_input(val=%d route=0x%x)",
hdw->input_val, input);
sd->ops->audio->s_routing(sd, input, 0, 0);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index ff657644b6b3..22420c14ac98 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -238,8 +238,8 @@ static void pwc_frame_complete(struct pwc_device *pdev)
} else {
/* Check for underflow first */
if (fbuf->filled < pdev->frame_total_size) {
- PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
- " discarded.\n", fbuf->filled);
+ PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes); discarded.\n",
+ fbuf->filled);
} else {
fbuf->vb.field = V4L2_FIELD_NONE;
fbuf->vb.sequence = pdev->vframe_count;
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 3d987984602f..92f04db6bbae 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -406,8 +406,7 @@ static void pwc_vidioc_fill_fmt(struct v4l2_format *f,
f->fmt.pix.bytesperline = f->fmt.pix.width;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.width * 3 / 2;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
- PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() "
- "width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
+ PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
f->fmt.pix.width,
f->fmt.pix.height,
f->fmt.pix.bytesperline,
@@ -473,8 +472,7 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
pixelformat = f->fmt.pix.pixelformat;
- PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
- "format=%c%c%c%c\n",
+ PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d format=%c%c%c%c\n",
f->fmt.pix.width, f->fmt.pix.height, pdev->vframes,
(pixelformat)&255,
(pixelformat>>8)&255,
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index c2e25876e93b..a4dcaec31d02 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -604,8 +604,8 @@ static int smsusb_resume(struct usb_interface *intf)
intf->cur_altsetting->desc.
bInterfaceNumber, 0);
if (rc < 0) {
- printk(KERN_INFO "%s usb_set_interface failed, "
- "rc %d\n", __func__, rc);
+ printk(KERN_INFO "%s usb_set_interface failed, rc %d\n",
+ __func__, rc);
return rc;
}
}
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index e546b014d7ad..fbccbb2eed9f 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -228,7 +228,7 @@
static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
return 1;
@@ -253,7 +253,7 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
return 1;
@@ -274,7 +274,7 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
return 1;
- *val = (u8) tmpval;
+ *val = tmpval;
return 0;
}
@@ -391,8 +391,8 @@ int stk_sensor_init(struct stk_camera *dev)
}
stk_sensor_write_regvals(dev, ov_initvals);
msleep(10);
- STK_INFO("OmniVision sensor detected, id %02X%02X"
- " at address %x\n", idh, idl, SENSOR_ADDRESS);
+ STK_INFO("OmniVision sensor detected, id %02X%02X at address %x\n",
+ idh, idl, SENSOR_ADDRESS);
return 0;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 22a9aae16291..a212248bc2a3 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -144,7 +144,7 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
return 0;
}
-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
+int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
{
struct usb_device *udev = dev->udev;
unsigned char *buf;
@@ -163,7 +163,7 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
sizeof(u8),
500);
if (ret >= 0)
- memcpy(value, buf, sizeof(u8));
+ *value = *buf;
kfree(buf);
return ret;
@@ -171,9 +171,10 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
static int stk_start_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i, ret;
- int value_116, value_117;
+ u8 value_116, value_117;
+
if (!is_present(dev))
return -ENODEV;
@@ -213,7 +214,7 @@ static int stk_start_stream(struct stk_camera *dev)
static int stk_stop_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i;
if (is_present(dev)) {
stk_camera_read_reg(dev, 0x0100, &value);
@@ -372,8 +373,7 @@ static void stk_isoc_handler(struct urb *urb)
if (fb->v4lbuf.bytesused != 0
&& fb->v4lbuf.bytesused != dev->frame_size) {
(void) (printk_ratelimit() &&
- STK_ERROR("frame %d, "
- "bytesused=%d, skipping\n",
+ STK_ERROR("frame %d, bytesused=%d, skipping\n",
i, fb->v4lbuf.bytesused));
fb->v4lbuf.bytesused = 0;
fill = fb->buffer;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 9bbfa3d9bfdd..92bb48e3c74e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -129,7 +129,7 @@ struct stk_camera {
#define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
int stk_camera_write_reg(struct stk_camera *, u16, u8);
-int stk_camera_read_reg(struct stk_camera *, u16, int *);
+int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
int stk_sensor_init(struct stk_camera *);
int stk_sensor_configure(struct stk_camera *);
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index f16fbd1f9f51..422322541af6 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -58,9 +58,7 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},"
- "{{Trident,tm6000},"
- "{{Trident,tm6010}");
+MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 7c32353c59db..8d104e5c4be3 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -602,8 +602,8 @@ int tm6000_init(struct tm6000_core *dev)
for (i = 0; i < size; i++) {
rc = tm6000_set_reg(dev, tab[i].req, tab[i].reg, tab[i].val);
if (rc < 0) {
- printk(KERN_ERR "Error %i while setting req %d, "
- "reg %d to value %d\n", rc,
+ printk(KERN_ERR "Error %i while setting req %d, reg %d to value %d\n",
+ rc,
tab[i].req, tab[i].reg, tab[i].val);
return rc;
}
@@ -761,9 +761,8 @@ int tm6000_tvaudio_set_mute(struct tm6000_core *dev, u8 mute)
if (dev->dev_type == TM6010)
tm6010_set_mute_sif(dev, mute);
else {
- printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has"
- " SIF audio inputs. Please check the %s"
- " configuration.\n", dev->name);
+ printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has SIF audio inputs. Please check the %s configuration.\n",
+ dev->name);
return -EINVAL;
}
break;
@@ -822,9 +821,8 @@ void tm6000_set_volume(struct tm6000_core *dev, int vol)
if (dev->dev_type == TM6010)
tm6010_set_volume_sif(dev, vol);
else
- printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has"
- " SIF audio inputs. Please check the %s"
- " configuration.\n", dev->name);
+ printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has SIF audio inputs. Please check the %s configuration.\n",
+ dev->name);
break;
case TM6000_AMUX_ADC1:
case TM6000_AMUX_ADC2:
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 0426b210383b..70dbaec1219e 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -35,9 +35,7 @@ MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV ca
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},"
- "{{Trident, tm6000},"
- "{{Trident, tm6010}");
+MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}");
static int debug;
@@ -292,13 +290,11 @@ static int register_dvb(struct tm6000_core *dev)
}
if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) {
- printk(KERN_ERR "tm6000: couldn't register "
- "frontend (xc3028)\n");
+ printk(KERN_ERR "tm6000: couldn't register frontend (xc3028)\n");
ret = -EINVAL;
goto frontend_err;
}
- printk(KERN_INFO "tm6000: XC2028/3028 asked to be "
- "attached to frontend!\n");
+ printk(KERN_INFO "tm6000: XC2028/3028 asked to be attached to frontend!\n");
break;
}
case TUNER_XC5000: {
@@ -315,13 +311,11 @@ static int register_dvb(struct tm6000_core *dev)
}
if (!dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &cfg)) {
- printk(KERN_ERR "tm6000: couldn't register "
- "frontend (xc5000)\n");
+ printk(KERN_ERR "tm6000: couldn't register frontend (xc5000)\n");
ret = -EINVAL;
goto frontend_err;
}
- printk(KERN_INFO "tm6000: XC5000 asked to be "
- "attached to frontend!\n");
+ printk(KERN_INFO "tm6000: XC5000 asked to be attached to frontend!\n");
break;
}
}
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index c7e23e3dd75e..b01d3ee56e77 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -173,8 +173,7 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
* immediately after a 1 or 2 byte write to select
* a register. We cannot fulfil this request.
*/
- i2c_dprintk(2, " read without preceding write not"
- " supported");
+ i2c_dprintk(2, " read without preceding write not supported");
rc = -EOPNOTSUPP;
goto err;
} else if (i + 1 < num && msgs[i].len <= 2 &&
diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/media/usb/tm6000/tm6000-stds.c
index 93a4b2434b6e..4064a5e8fae1 100644
--- a/drivers/media/usb/tm6000/tm6000-stds.c
+++ b/drivers/media/usb/tm6000/tm6000-stds.c
@@ -464,8 +464,7 @@ static int tm6000_load_std(struct tm6000_core *dev, struct tm6000_reg_settings *
for (i = 0; set[i].req; i++) {
rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
if (rc < 0) {
- printk(KERN_ERR "Error %i while setting "
- "req %d, reg %d to value %d\n",
+ printk(KERN_ERR "Error %i while setting req %d, reg %d to value %d\n",
rc, set[i].req, set[i].reg, set[i].value);
return rc;
}
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index dee7e7d3d47d..d9f3fa5db8dd 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -615,8 +615,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
return -ENOMEM;
}
- dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets"
- " (%d bytes) of %d bytes each to handle %u size\n",
+ dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets (%d bytes) of %d bytes each to handle %u size\n",
max_packets, num_bufs, sb_size,
dev->isoc_in.maxsize, size);
@@ -939,8 +938,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt) {
- dprintk(dev, 2, "Fourcc format (0x%08x)"
- " invalid.\n", f->fmt.pix.pixelformat);
+ dprintk(dev, 2, "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
return -EINVAL;
}
@@ -1366,14 +1365,13 @@ static int __tm6000_open(struct file *file)
fh->width = dev->width;
fh->height = dev->height;
- dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
- "dev->vidq=0x%08lx\n",
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n",
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&dev->vidq);
- dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
- "queued=%d\n", list_empty(&dev->vidq.queued));
- dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
- "active=%d\n", list_empty(&dev->vidq.active));
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty queued=%d\n",
+ list_empty(&dev->vidq.queued));
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty active=%d\n",
+ list_empty(&dev->vidq.active));
/* initialize hardware on analog mode */
rc = tm6000_init_analog_mode(dev);
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index d52d4a8d39ad..361e40b56045 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -767,8 +767,7 @@ static void ttusb_iso_irq(struct urb *urb)
for (i = 0; i < urb->number_of_packets; ++i) {
numpkt++;
if (time_after_eq(jiffies, lastj + HZ)) {
- dprintk("frames/s: %lu (ts: %d, stuff %d, "
- "sec: %d, invalid: %d, all: %d)\n",
+ dprintk("frames/s: %lu (ts: %d, stuff %d, sec: %d, invalid: %d, all: %d)\n",
numpkt * HZ / (jiffies - lastj),
numts, numstuff, numsec, numinvalid,
numts + numstuff + numsec + numinvalid);
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 4e7671a3a1e4..fc0219f1b7df 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -36,7 +36,6 @@
#include "dmxdev.h"
#include "dvb_demux.h"
-#include "dvb_filter.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "ttusbdecfe.h"
@@ -92,6 +91,15 @@ enum ttusb_dec_interface {
TTUSB_DEC_INTERFACE_OUT
};
+typedef int (dvb_filter_pes2ts_cb_t) (void *, unsigned char *);
+
+struct dvb_filter_pes2ts {
+ unsigned char buf[188];
+ unsigned char cc;
+ dvb_filter_pes2ts_cb_t *cb;
+ void *priv;
+};
+
struct ttusb_dec {
enum ttusb_dec_model model;
char *model_name;
@@ -201,6 +209,54 @@ static u16 rc_keys[] = {
KEY_RADIO
};
+static void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts,
+ unsigned short pid,
+ dvb_filter_pes2ts_cb_t *cb, void *priv)
+{
+ unsigned char *buf=p2ts->buf;
+
+ buf[0]=0x47;
+ buf[1]=(pid>>8);
+ buf[2]=pid&0xff;
+ p2ts->cc=0;
+ p2ts->cb=cb;
+ p2ts->priv=priv;
+}
+
+static int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts,
+ unsigned char *pes, int len, int payload_start)
+{
+ unsigned char *buf=p2ts->buf;
+ int ret=0, rest;
+
+ //len=6+((pes[4]<<8)|pes[5]);
+
+ if (payload_start)
+ buf[1]|=0x40;
+ else
+ buf[1]&=~0x40;
+ while (len>=184) {
+ buf[3]=0x10|((p2ts->cc++)&0x0f);
+ memcpy(buf+4, pes, 184);
+ if ((ret=p2ts->cb(p2ts->priv, buf)))
+ return ret;
+ len-=184; pes+=184;
+ buf[1]&=~0x40;
+ }
+ if (!len)
+ return 0;
+ buf[3]=0x30|((p2ts->cc++)&0x0f);
+ rest=183-len;
+ if (rest) {
+ buf[5]=0x00;
+ if (rest-1)
+ memset(buf+6, 0xff, rest-1);
+ }
+ buf[4]=rest;
+ memcpy(buf+5+rest, pes, len);
+ return p2ts->cb(p2ts->priv, buf);
+}
+
static void ttusb_dec_set_model(struct ttusb_dec *dec,
enum ttusb_dec_model model);
@@ -273,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
int param_length, const u8 params[],
int *result_length, u8 cmd_result[])
{
- int result, actual_len, i;
+ int result, actual_len;
u8 *b;
dprintk("%s\n", __func__);
@@ -297,10 +353,8 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
memcpy(&b[4], params, param_length);
if (debug) {
- printk("%s: command: ", __func__);
- for (i = 0; i < param_length + 4; i++)
- printk("0x%02X ", b[i]);
- printk("\n");
+ printk(KERN_DEBUG "%s: command: %*ph\n",
+ __func__, param_length, b);
}
result = usb_bulk_msg(dec->udev, dec->command_pipe, b,
@@ -325,10 +379,8 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
return result;
} else {
if (debug) {
- printk("%s: result: ", __func__);
- for (i = 0; i < actual_len; i++)
- printk("0x%02X ", b[i]);
- printk("\n");
+ printk(KERN_DEBUG "%s: result: %*ph\n",
+ __func__, actual_len, b);
}
if (result_length)
@@ -652,8 +704,8 @@ static void ttusb_dec_process_urb_frame(struct ttusb_dec *dec, u8 *b,
dec->packet_payload_length = 2;
dec->packet_state = 7;
} else {
- printk("%s: unknown packet type: "
- "%02x%02x\n", __func__,
+ printk("%s: unknown packet type: %02x%02x\n",
+ __func__,
dec->packet[0], dec->packet[1]);
dec->packet_state = 0;
}
@@ -905,8 +957,8 @@ static int ttusb_dec_start_iso_xfer(struct ttusb_dec *dec)
for (i = 0; i < ISO_BUF_COUNT; i++) {
if ((result = usb_submit_urb(dec->iso_urb[i],
GFP_ATOMIC))) {
- printk("%s: failed urb submission %d: "
- "error %d\n", __func__, i, result);
+ printk("%s: failed urb submission %d: error %d\n",
+ __func__, i, result);
while (i) {
usb_kill_urb(dec->iso_urb[i - 1]);
@@ -1319,8 +1371,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
memcpy(&tmp, &firmware[56], 4);
crc32_check = ntohl(tmp);
if (crc32_csum != crc32_check) {
- printk("%s: crc32 check of DSP code failed (calculated "
- "0x%08x != 0x%08x in file), file invalid.\n",
+ printk("%s: crc32 check of DSP code failed (calculated 0x%08x != 0x%08x in file), file invalid.\n",
__func__, crc32_csum, crc32_check);
release_firmware(fw_entry);
return -ENOENT;
@@ -1397,11 +1448,9 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
if (!mode) {
if (version == 0xABCDEFAB)
- printk(KERN_INFO "ttusb_dec: no version "
- "info in Firmware\n");
+ printk(KERN_INFO "ttusb_dec: no version info in Firmware\n");
else
- printk(KERN_INFO "ttusb_dec: Firmware "
- "%x.%02x%c%c\n",
+ printk(KERN_INFO "ttusb_dec: Firmware %x.%02x%c%c\n",
version >> 24, (version >> 16) & 0xff,
(version >> 8) & 0xff, version & 0xff);
@@ -1425,8 +1474,7 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
ttusb_dec_set_model(dec, TTUSB_DEC2540T);
break;
default:
- printk(KERN_ERR "%s: unknown model returned "
- "by firmware (%08x) - please report\n",
+ printk(KERN_ERR "%s: unknown model returned by firmware (%08x) - please report\n",
__func__, model);
return -ENOENT;
}
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index 8781335ab92f..2d9444905fdb 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -205,7 +205,7 @@ static void ttusbdecfe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops ttusbdecfe_dvbt_ops;
+static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops;
struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* config)
{
@@ -225,7 +225,7 @@ struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* conf
return &state->frontend;
}
-static struct dvb_frontend_ops ttusbdecfe_dvbs_ops;
+static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops;
struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* config)
{
@@ -247,7 +247,7 @@ struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* conf
return &state->frontend;
}
-static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
+static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "TechnoTrend/Hauppauge DEC2000-t Frontend",
@@ -270,7 +270,7 @@ static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.read_status = ttusbdecfe_dvbt_read_status,
};
-static struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
+static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "TechnoTrend/Hauppauge DEC3000-s Frontend",
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index dc76fd41e00f..ceb953be0770 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -71,6 +71,7 @@ static int usbtv_probe(struct usb_interface *intf,
int size;
struct device *dev = &intf->dev;
struct usbtv *usbtv;
+ struct usb_host_endpoint *ep;
/* Checks that the device is what we think it is. */
if (intf->num_altsetting != 2)
@@ -78,10 +79,12 @@ static int usbtv_probe(struct usb_interface *intf,
if (intf->altsetting[1].desc.bNumEndpoints != 4)
return -ENODEV;
+ ep = &intf->altsetting[1].endpoint[0];
+
/* Packet size is split into 11 bits of base size and count of
* extra multiplies of it.*/
- size = usb_endpoint_maxp(&intf->altsetting[1].endpoint[0].desc);
- size = (size & 0x07ff) * (((size & 0x1800) >> 11) + 1);
+ size = usb_endpoint_maxp(&ep->desc);
+ size = (size & 0x07ff) * usb_endpoint_maxp_mult(&ep->desc);
/* Device structure */
usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 6cbe4a245c9f..d3b6d3dfaa09 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Lubomir Rintel
+ * Copyright (c) 2013,2016 Lubomir Rintel
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -259,6 +259,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
if (ret)
return ret;
+ ret = v4l2_ctrl_handler_setup(&usbtv->ctrl);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -696,11 +700,91 @@ static const struct vb2_ops usbtv_vb2_ops = {
.stop_streaming = usbtv_stop_streaming,
};
+static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct usbtv *usbtv = container_of(ctrl->handler, struct usbtv,
+ ctrl);
+ u8 *data;
+ u16 index, size;
+ int ret;
+
+ data = kmalloc(3, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /*
+ * Read in the current brightness/contrast registers. We need them
+ * both, because the values are for some reason interleaved.
+ */
+ if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) {
+ ret = usb_control_msg(usbtv->udev,
+ usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
+ if (ret < 0)
+ goto error;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ index = USBTV_BASE + 0x0244;
+ size = 3;
+ data[0] &= 0xf0;
+ data[0] |= (ctrl->val >> 8) & 0xf;
+ data[2] = ctrl->val & 0xff;
+ break;
+ case V4L2_CID_CONTRAST:
+ index = USBTV_BASE + 0x0244;
+ size = 3;
+ data[0] &= 0x0f;
+ data[0] |= (ctrl->val >> 4) & 0xf0;
+ data[1] = ctrl->val & 0xff;
+ break;
+ case V4L2_CID_SATURATION:
+ index = USBTV_BASE + 0x0242;
+ data[0] = ctrl->val >> 8;
+ data[1] = ctrl->val & 0xff;
+ size = 2;
+ break;
+ case V4L2_CID_HUE:
+ index = USBTV_BASE + 0x0240;
+ size = 2;
+ if (ctrl->val > 0) {
+ data[0] = 0x92 + (ctrl->val >> 8);
+ data[1] = ctrl->val & 0xff;
+ } else {
+ data[0] = 0x82 + (-ctrl->val >> 8);
+ data[1] = -ctrl->val & 0xff;
+ }
+ break;
+ default:
+ kfree(data);
+ return -EINVAL;
+ }
+
+ ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0),
+ USBTV_CONTROL_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, (void *)data, size, 0);
+
+error:
+ if (ret < 0)
+ dev_warn(usbtv->dev, "Failed to submit a control request.\n");
+
+ kfree(data);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops usbtv_ctrl_ops = {
+ .s_ctrl = usbtv_s_ctrl,
+};
+
static void usbtv_release(struct v4l2_device *v4l2_dev)
{
struct usbtv *usbtv = container_of(v4l2_dev, struct usbtv, v4l2_dev);
v4l2_device_unregister(&usbtv->v4l2_dev);
+ v4l2_ctrl_handler_free(&usbtv->ctrl);
vb2_queue_release(&usbtv->vb2q);
kfree(usbtv);
}
@@ -731,7 +815,24 @@ int usbtv_video_init(struct usbtv *usbtv)
return ret;
}
+ /* controls */
+ v4l2_ctrl_handler_init(&usbtv->ctrl, 4);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 0x3ff, 1, 0x1d0);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 0x3ff, 1, 0x1c0);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 0x3ff, 1, 0x200);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_HUE, -0xdff, 0xdff, 1, 0x000);
+ ret = usbtv->ctrl.error;
+ if (ret < 0) {
+ dev_warn(usbtv->dev, "Could not initialize controls\n");
+ goto ctrl_fail;
+ }
+
/* v4l2 structure */
+ usbtv->v4l2_dev.ctrl_handler = &usbtv->ctrl;
usbtv->v4l2_dev.release = usbtv_release;
ret = v4l2_device_register(usbtv->dev, &usbtv->v4l2_dev);
if (ret < 0) {
@@ -760,6 +861,8 @@ int usbtv_video_init(struct usbtv *usbtv)
vdev_fail:
v4l2_device_unregister(&usbtv->v4l2_dev);
v4l2_fail:
+ctrl_fail:
+ v4l2_ctrl_handler_free(&usbtv->ctrl);
vb2_queue_release(&usbtv->vb2q);
return ret;
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 011f9fdc77a9..0231e449877e 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -38,6 +38,7 @@
#include <linux/usb.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
@@ -45,6 +46,7 @@
#define USBTV_VIDEO_ENDP 0x81
#define USBTV_AUDIO_ENDP 0x83
#define USBTV_BASE 0xc000
+#define USBTV_CONTROL_REG 11
#define USBTV_REQUEST_REG 12
/* Number of concurrent isochronous urbs submitted.
@@ -87,6 +89,7 @@ struct usbtv {
/* video */
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl;
struct video_device vdev;
struct vb2_queue vb2q;
struct mutex v4l2_lock;
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index c23bf73a68ea..bf041a9e69db 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -1656,8 +1656,8 @@ static int usbvision_set_video_format(struct usb_usbvision *usbvision, int forma
(__u16) USBVISION_FILT_CONT, value, 2, HZ);
if (rc < 0) {
- printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
}
usbvision->isoc_mode = format;
return rc;
@@ -1890,8 +1890,8 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
(__u16) USBVISION_INTRA_CYC, value, 5, HZ);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
return rc;
}
@@ -1921,8 +1921,8 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
(__u16) USBVISION_PCM_THR1, value, 6, HZ);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
}
return rc;
}
@@ -1960,8 +1960,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
rc = usbvision_write_reg(usbvision, USBVISION_VIN_REG1, value[0]);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
return rc;
}
@@ -2026,8 +2026,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0,
(__u16) USBVISION_LXSIZE_I, value, 8, HZ);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
return rc;
}
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index c8b4eb2ee7a2..a7529196c327 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1456,8 +1456,8 @@ static int usbvision_probe(struct usb_interface *intf,
}
if (interface->desc.bNumEndpoints < 2) {
- dev_err(&intf->dev, "interface %d has %d endpoints, but must"
- " have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
+ dev_err(&intf->dev, "interface %d has %d endpoints, but must have minimum 2\n",
+ ifnum, interface->desc.bNumEndpoints);
ret = -ENODEV;
goto err_usb;
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 302e284a95eb..04bf35063c4c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -168,6 +168,26 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_RW10,
.fcc = V4L2_PIX_FMT_SRGGB10P,
},
+ {
+ .name = "Bayer 16-bit (SBGGR16)",
+ .guid = UVC_GUID_FORMAT_BG16,
+ .fcc = V4L2_PIX_FMT_SBGGR16,
+ },
+ {
+ .name = "Bayer 16-bit (SGBRG16)",
+ .guid = UVC_GUID_FORMAT_GB16,
+ .fcc = V4L2_PIX_FMT_SGBRG16,
+ },
+ {
+ .name = "Bayer 16-bit (SRGGB16)",
+ .guid = UVC_GUID_FORMAT_RG16,
+ .fcc = V4L2_PIX_FMT_SRGGB16,
+ },
+ {
+ .name = "Bayer 16-bit (SGRBG16)",
+ .guid = UVC_GUID_FORMAT_GR16,
+ .fcc = V4L2_PIX_FMT_SGRBG16,
+ },
};
/* ------------------------------------------------------------------------
@@ -1309,7 +1329,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_VC_EXTENSION_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- XU %d", entity->id);
+ printk(KERN_CONT " <- XU %d", entity->id);
if (entity->bNrInPins != 1) {
uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has more "
@@ -1321,7 +1341,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_VC_PROCESSING_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- PU %d", entity->id);
+ printk(KERN_CONT " <- PU %d", entity->id);
if (chain->processing != NULL) {
uvc_trace(UVC_TRACE_DESCR, "Found multiple "
@@ -1334,7 +1354,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_VC_SELECTOR_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- SU %d", entity->id);
+ printk(KERN_CONT " <- SU %d", entity->id);
/* Single-input selector units are ignored. */
if (entity->bNrInPins == 1)
@@ -1353,7 +1373,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_ITT_CAMERA:
case UVC_ITT_MEDIA_TRANSPORT_INPUT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- IT %d\n", entity->id);
+ printk(KERN_CONT " <- IT %d\n", entity->id);
break;
@@ -1361,17 +1381,17 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_OTT_DISPLAY:
case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" OT %d", entity->id);
+ printk(KERN_CONT " OT %d", entity->id);
break;
case UVC_TT_STREAMING:
if (UVC_ENTITY_IS_ITERM(entity)) {
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- IT %d\n", entity->id);
+ printk(KERN_CONT " <- IT %d\n", entity->id);
} else {
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" OT %d", entity->id);
+ printk(KERN_CONT " OT %d", entity->id);
}
break;
@@ -1416,9 +1436,9 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
list_add_tail(&forward->chain, &chain->entities);
if (uvc_trace_param & UVC_TRACE_PROBE) {
if (!found)
- printk(" (->");
+ printk(KERN_CONT " (->");
- printk(" XU %d", forward->id);
+ printk(KERN_CONT " XU %d", forward->id);
found = 1;
}
break;
@@ -1436,16 +1456,16 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
list_add_tail(&forward->chain, &chain->entities);
if (uvc_trace_param & UVC_TRACE_PROBE) {
if (!found)
- printk(" (->");
+ printk(KERN_CONT " (->");
- printk(" OT %d", forward->id);
+ printk(KERN_CONT " OT %d", forward->id);
found = 1;
}
break;
}
}
if (found)
- printk(")");
+ printk(KERN_CONT ")");
return 0;
}
@@ -1471,7 +1491,7 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
}
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- IT");
+ printk(KERN_CONT " <- IT");
chain->selector = entity;
for (i = 0; i < entity->bNrInPins; ++i) {
@@ -1485,14 +1505,14 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
}
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" %d", term->id);
+ printk(KERN_CONT " %d", term->id);
list_add_tail(&term->chain, &chain->entities);
uvc_scan_chain_forward(chain, term, entity);
}
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk("\n");
+ printk(KERN_CONT "\n");
id = 0;
break;
@@ -1595,6 +1615,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
return buffer;
}
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (chain == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&chain->entities);
+ mutex_init(&chain->ctrl_mutex);
+ chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ * - Acer Integrated Camera (5986:055a)
+ * - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ struct uvc_entity *iterm = NULL;
+ struct uvc_entity *oterm = NULL;
+ struct uvc_entity *entity;
+ struct uvc_entity *prev;
+
+ /*
+ * Start by locating the input and output terminals. We only support
+ * devices with exactly one of each for now.
+ */
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (UVC_ENTITY_IS_ITERM(entity)) {
+ if (iterm)
+ return -EINVAL;
+ iterm = entity;
+ }
+
+ if (UVC_ENTITY_IS_OTERM(entity)) {
+ if (oterm)
+ return -EINVAL;
+ oterm = entity;
+ }
+ }
+
+ if (iterm == NULL || oterm == NULL)
+ return -EINVAL;
+
+ /* Allocate the chain and fill it. */
+ chain = uvc_alloc_chain(dev);
+ if (chain == NULL)
+ return -ENOMEM;
+
+ if (uvc_scan_chain_entity(chain, oterm) < 0)
+ goto error;
+
+ prev = oterm;
+
+ /*
+ * Add all Processing and Extension Units with two pads. The order
+ * doesn't matter much, use reverse list traversal to connect units in
+ * UVC descriptor order as we build the chain from output to input. This
+ * leads to units appearing in the order meant by the manufacturer for
+ * the cameras known to require this heuristic.
+ */
+ list_for_each_entry_reverse(entity, &dev->entities, list) {
+ if (entity->type != UVC_VC_PROCESSING_UNIT &&
+ entity->type != UVC_VC_EXTENSION_UNIT)
+ continue;
+
+ if (entity->num_pads != 2)
+ continue;
+
+ if (uvc_scan_chain_entity(chain, entity) < 0)
+ goto error;
+
+ prev->baSourceID[0] = entity->id;
+ prev = entity;
+ }
+
+ if (uvc_scan_chain_entity(chain, iterm) < 0)
+ goto error;
+
+ prev->baSourceID[0] = iterm->id;
+
+ list_add_tail(&chain->list, &dev->chains);
+
+ uvc_trace(UVC_TRACE_PROBE,
+ "Found a video chain by fallback heuristic (%s).\n",
+ uvc_print_chain(chain));
+
+ return 0;
+
+error:
+ kfree(chain);
+ return -EINVAL;
+}
+
/*
* Scan the device for video chains and register video devices.
*
@@ -1617,15 +1745,10 @@ static int uvc_scan_device(struct uvc_device *dev)
if (term->chain.next || term->chain.prev)
continue;
- chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ chain = uvc_alloc_chain(dev);
if (chain == NULL)
return -ENOMEM;
- INIT_LIST_HEAD(&chain->entities);
- mutex_init(&chain->ctrl_mutex);
- chain->dev = dev;
- v4l2_prio_init(&chain->prio);
-
term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
@@ -1639,6 +1762,9 @@ static int uvc_scan_device(struct uvc_device *dev)
list_add_tail(&chain->list, &dev->chains);
}
+ if (list_empty(&dev->chains))
+ uvc_scan_fallback(dev);
+
if (list_empty(&dev->chains)) {
uvc_printk(KERN_INFO, "No valid video chain found.\n");
return -1;
@@ -2564,6 +2690,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_FORCE_Y8 },
+ /* Oculus VR Rift Sensor */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x2833,
+ .idProduct = 0x0211,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FORCE_Y8 },
/* Generic USB Video Class */
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) },
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) },
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 05eed4be25df..3e7e283a44a8 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -66,19 +66,14 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
if (xmap->menu_count == 0 ||
xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
ret = -EINVAL;
- goto done;
+ goto free_map;
}
size = xmap->menu_count * sizeof(*map->menu_info);
- map->menu_info = kmalloc(size, GFP_KERNEL);
- if (map->menu_info == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(map->menu_info, xmap->menu_info, size)) {
- ret = -EFAULT;
- goto done;
+ map->menu_info = memdup_user(xmap->menu_info, size);
+ if (IS_ERR(map->menu_info)) {
+ ret = PTR_ERR(map->menu_info);
+ goto free_map;
}
map->menu_count = xmap->menu_count;
@@ -88,13 +83,13 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
"%u.\n", xmap->v4l2_type);
ret = -ENOTTY;
- goto done;
+ goto free_map;
}
ret = uvc_ctrl_add_mapping(chain, map);
-done:
kfree(map->menu_info);
+free_map:
kfree(map);
return ret;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index b5589d5f5da4..f3c1c852e401 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1467,6 +1467,7 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
struct usb_host_endpoint *ep)
{
u16 psize;
+ u16 mult;
switch (dev->speed) {
case USB_SPEED_SUPER:
@@ -1474,7 +1475,8 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
case USB_SPEED_HIGH:
psize = usb_endpoint_maxp(&ep->desc);
- return (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
+ mult = usb_endpoint_maxp_mult(&ep->desc);
+ return (psize & 0x07ff) * mult;
case USB_SPEED_WIRELESS:
psize = usb_endpoint_maxp(&ep->desc);
return psize;
@@ -1551,7 +1553,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
u16 psize;
u32 size;
- psize = usb_endpoint_maxp(&ep->desc) & 0x7ff;
+ psize = usb_endpoint_maxp(&ep->desc);
size = stream->ctrl.dwMaxPayloadTransferSize;
stream->bulk.max_payload_size = size;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 7e4d3eea371b..3d6cc62f3cd2 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -106,6 +106,18 @@
#define UVC_GUID_FORMAT_RGGB \
{ 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BG16 \
+ { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GB16 \
+ { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RG16 \
+ { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GR16 \
+ { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_RGBP \
{ 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index cc128db85723..3950708cbb32 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -633,8 +633,7 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
} else {
if (frm->cur_size + purb->actual_length > MAX_FRAME_SIZE) {
dev_info(&cam->udev->dev,
- "%s: buffer (%d bytes) too small to hold "
- "frame data. Discarding frame data.\n",
+ "%s: buffer (%d bytes) too small to hold frame data. Discarding frame data.\n",
__func__, MAX_FRAME_SIZE);
} else {
pdest += frm->cur_size;
@@ -1373,8 +1372,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
&cam->buffer.frame[i], i,
cam->buffer.frame[i].lpvbits);
if (cam->buffer.frame[i].lpvbits == NULL) {
- printk(KERN_INFO KBUILD_MODNAME ": out of memory. "
- "Using less frames\n");
+ printk(KERN_INFO KBUILD_MODNAME ": out of memory. Using less frames\n");
break;
}
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 367523a3c774..6b1b78ff1417 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -6,6 +6,7 @@
config VIDEO_V4L2
tristate
depends on (I2C || I2C=n) && VIDEO_DEV
+ select RATIONAL
default (I2C || I2C=n) && VIDEO_DEV
config VIDEO_ADV_DEBUG
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 731487be5baa..05b5c6652cfa 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -84,30 +84,16 @@ static const struct v4l2_subdev_ops tuner_ops;
* Debug macros
*/
-#define tuner_warn(fmt, arg...) do { \
- printk(KERN_WARNING "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_dbg(fmt, arg...) do { \
- if (tuner_debug) \
- printk(KERN_DEBUG "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
+#undef pr_fmt
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \
+ i2c_adapter_id(t->i2c->adapter), t->i2c->addr
+
+
+#define dprintk(fmt, arg...) do { \
+ if (tuner_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \
+} while (0)
/*
* Internal struct used inside the driver
@@ -208,7 +194,7 @@ static void fe_set_params(struct dvb_frontend *fe,
struct tuner *t = fe->analog_demod_priv;
if (NULL == fe_tuner_ops->set_analog_params) {
- tuner_warn("Tuner frontend module has no way to set freq\n");
+ pr_warn("Tuner frontend module has no way to set freq\n");
return;
}
fe_tuner_ops->set_analog_params(fe, params);
@@ -230,7 +216,7 @@ static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
if (fe_tuner_ops->set_config)
return fe_tuner_ops->set_config(fe, priv_cfg);
- tuner_warn("Tuner frontend module has no way to set config\n");
+ pr_warn("Tuner frontend module has no way to set config\n");
return 0;
}
@@ -273,14 +259,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
int tune_now = 1;
if (type == UNSET || type == TUNER_ABSENT) {
- tuner_dbg("tuner 0x%02x: Tuner type absent\n", c->addr);
+ dprintk("tuner 0x%02x: Tuner type absent\n", c->addr);
return;
}
t->type = type;
t->config = new_config;
if (tuner_callback != NULL) {
- tuner_dbg("defining GPIO callback\n");
+ dprintk("defining GPIO callback\n");
t->fe.callback = tuner_callback;
}
@@ -442,7 +428,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
t->sd.entity.name = t->name;
#endif
- tuner_dbg("type set to %s\n", t->name);
+ dprintk("type set to %s\n", t->name);
t->mode_mask = new_mode_mask;
@@ -459,13 +445,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
set_tv_freq(c, t->tv_freq);
}
- tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
+ dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
c->adapter->name, c->dev.driver->name, c->addr << 1, type,
t->mode_mask);
return;
attach_failed:
- tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
+ dprintk("Tuner attach for type = %d failed.\n", t->type);
t->type = TUNER_ABSENT;
return;
@@ -491,7 +477,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
struct tuner *t = to_tuner(sd);
struct i2c_client *c = v4l2_get_subdevdata(sd);
- tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
+ dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
tun_setup->type,
tun_setup->addr,
tun_setup->mode_mask,
@@ -503,8 +489,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
set_type(c, tun_setup->type, tun_setup->mode_mask,
tun_setup->config, tun_setup->tuner_callback);
} else
- tuner_dbg("set addr discarded for type %i, mask %x. "
- "Asked to change tuner at addr 0x%02x, with mask %x\n",
+ dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n",
t->type, t->mode_mask,
tun_setup->addr, tun_setup->mode_mask);
@@ -534,7 +519,7 @@ static int tuner_s_config(struct v4l2_subdev *sd,
return 0;
}
- tuner_dbg("Tuner frontend module has no way to set config\n");
+ dprintk("Tuner frontend module has no way to set config\n");
return 0;
}
@@ -618,14 +603,12 @@ static int tuner_probe(struct i2c_client *client,
if (show_i2c) {
unsigned char buffer[16];
- int i, rc;
+ int rc;
memset(buffer, 0, sizeof(buffer));
rc = i2c_master_recv(client, buffer, sizeof(buffer));
- tuner_info("I2C RECV = ");
- for (i = 0; i < rc; i++)
- printk(KERN_CONT "%02x ", buffer[i]);
- printk("\n");
+ if (rc >= 0)
+ pr_info("I2C RECV = %*ph\n", rc, buffer);
}
/* autodetection code based on the i2c addr */
@@ -653,7 +636,7 @@ static int tuner_probe(struct i2c_client *client,
since it can be tda9887*/
if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
t->i2c->addr) >= 0) {
- tuner_dbg("tda829x detected\n");
+ dprintk("tda829x detected\n");
} else {
/* Default is being tda9887 */
t->type = TUNER_TDA9887;
@@ -690,7 +673,7 @@ static int tuner_probe(struct i2c_client *client,
t->mode_mask = T_ANALOG_TV;
if (radio == NULL)
t->mode_mask |= T_RADIO;
- tuner_dbg("Setting mode_mask to 0x%02x\n", t->mode_mask);
+ dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask);
}
/* Should be just before return */
@@ -719,7 +702,7 @@ register_client:
}
if (ret < 0) {
- tuner_err("failed to initialize media entity!\n");
+ pr_err("failed to initialize media entity!\n");
kfree(t);
return ret;
}
@@ -732,7 +715,7 @@ register_client:
set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
list_add_tail(&t->list, &tuner_list);
- tuner_info("Tuner %d found with type(s)%s%s.\n",
+ pr_info("Tuner %d found with type(s)%s%s.\n",
t->type,
t->mode_mask & T_RADIO ? " Radio" : "",
t->mode_mask & T_ANALOG_TV ? " TV" : "");
@@ -809,15 +792,15 @@ static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
if (mode != t->mode) {
if (check_mode(t, mode) == -EINVAL) {
- tuner_dbg("Tuner doesn't support mode %d. "
- "Putting tuner to sleep\n", mode);
+ dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n",
+ mode);
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
return -EINVAL;
}
t->mode = mode;
- tuner_dbg("Changing to mode %d\n", mode);
+ dprintk("Changing to mode %d\n", mode);
}
return 0;
}
@@ -864,15 +847,15 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
};
if (t->type == UNSET) {
- tuner_warn("tuner type not set\n");
+ pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
- tuner_warn("Tuner has no way to set tv freq\n");
+ pr_warn("Tuner has no way to set tv freq\n");
return;
}
if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
- tuner_dbg("TV freq (%d.%02d) out of range (%d-%d)\n",
+ dprintk("TV freq (%d.%02d) out of range (%d-%d)\n",
freq / 16, freq % 16 * 100 / 16, tv_range[0],
tv_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
@@ -883,7 +866,7 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
freq = tv_range[1] * 16;
}
params.frequency = freq;
- tuner_dbg("tv freq set to %d.%02d\n",
+ dprintk("tv freq set to %d.%02d\n",
freq / 16, freq % 16 * 100 / 16);
t->tv_freq = freq;
t->standby = false;
@@ -933,7 +916,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
return V4L2_STD_PAL_Nc;
return V4L2_STD_PAL_N;
default:
- tuner_warn("pal= argument not recognised\n");
+ pr_warn("pal= argument not recognised\n");
break;
}
}
@@ -959,7 +942,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
return V4L2_STD_SECAM_LC;
return V4L2_STD_SECAM_L;
default:
- tuner_warn("secam= argument not recognised\n");
+ pr_warn("secam= argument not recognised\n");
break;
}
}
@@ -976,7 +959,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
case 'K':
return V4L2_STD_NTSC_M_KR;
default:
- tuner_info("ntsc= argument not recognised\n");
+ pr_info("ntsc= argument not recognised\n");
break;
}
}
@@ -1005,15 +988,15 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
};
if (t->type == UNSET) {
- tuner_warn("tuner type not set\n");
+ pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
- tuner_warn("tuner has no way to set radio frequency\n");
+ pr_warn("tuner has no way to set radio frequency\n");
return;
}
if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
- tuner_dbg("radio freq (%d.%02d) out of range (%d-%d)\n",
+ dprintk("radio freq (%d.%02d) out of range (%d-%d)\n",
freq / 16000, freq % 16000 * 100 / 16000,
radio_range[0], radio_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
@@ -1024,7 +1007,7 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
freq = radio_range[1] * 16000;
}
params.frequency = freq;
- tuner_dbg("radio freq set to %d.%02d\n",
+ dprintk("radio freq set to %d.%02d\n",
freq / 16000, freq % 16000 * 100 / 16000);
t->radio_freq = freq;
t->standby = false;
@@ -1075,10 +1058,10 @@ static void tuner_status(struct dvb_frontend *fe)
freq = t->tv_freq / 16;
freq_fraction = (t->tv_freq % 16) * 100 / 16;
}
- tuner_info("Tuner mode: %s%s\n", p,
+ pr_info("Tuner mode: %s%s\n", p,
t->standby ? " on standby mode" : "");
- tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
- tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
+ pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
+ pr_info("Standard: 0x%08lx\n", (unsigned long)t->std);
if (t->mode != V4L2_TUNER_RADIO)
return;
if (fe_tuner_ops->get_status) {
@@ -1086,15 +1069,15 @@ static void tuner_status(struct dvb_frontend *fe)
fe_tuner_ops->get_status(&t->fe, &tuner_status);
if (tuner_status & TUNER_STATUS_LOCKED)
- tuner_info("Tuner is locked.\n");
+ pr_info("Tuner is locked.\n");
if (tuner_status & TUNER_STATUS_STEREO)
- tuner_info("Stereo: yes\n");
+ pr_info("Stereo: yes\n");
}
if (analog_ops->has_signal) {
u16 signal;
if (!analog_ops->has_signal(fe, &signal))
- tuner_info("Signal strength: %hu\n", signal);
+ pr_info("Signal strength: %hu\n", signal);
}
}
@@ -1127,13 +1110,13 @@ static int tuner_s_power(struct v4l2_subdev *sd, int on)
if (on) {
if (t->standby && set_mode(t, t->mode) == 0) {
- tuner_dbg("Waking up tuner\n");
+ dprintk("Waking up tuner\n");
set_freq(t, 0);
}
return 0;
}
- tuner_dbg("Putting tuner to sleep\n");
+ dprintk("Putting tuner to sleep\n");
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
@@ -1149,7 +1132,7 @@ static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
t->std = tuner_fixup_std(t, std);
if (t->std != std)
- tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+ dprintk("Fixup standard %llx to %llx\n", std, t->std);
set_freq(t, 0);
return 0;
}
@@ -1298,7 +1281,7 @@ static int tuner_suspend(struct device *dev)
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
- tuner_dbg("suspend\n");
+ dprintk("suspend\n");
if (t->fe.ops.tuner_ops.suspend)
t->fe.ops.tuner_ops.suspend(&t->fe);
@@ -1313,7 +1296,7 @@ static int tuner_resume(struct device *dev)
struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
- tuner_dbg("resume\n");
+ dprintk("resume\n");
if (t->fe.ops.tuner_ops.resume)
t->fe.ops.tuner_ops.resume(&t->fe);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index bacecbd68a6d..eac9565dc3d8 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -409,7 +409,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
compat_caddr_t p;
- int num_planes;
int ret;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
@@ -429,12 +428,15 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- num_planes = kp->length;
- if (num_planes == 0) {
+ unsigned int num_planes;
+
+ if (kp->length == 0) {
kp->m.planes = NULL;
/* num_planes == 0 is legal, e.g. when userspace doesn't
* need planes array on DQBUF*/
return 0;
+ } else if (kp->length > VIDEO_MAX_PLANES) {
+ return -EINVAL;
}
if (get_user(p, &up->m.planes))
@@ -442,16 +444,16 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
uplane32 = compat_ptr(p);
if (!access_ok(VERIFY_READ, uplane32,
- num_planes * sizeof(struct v4l2_plane32)))
+ kp->length * sizeof(struct v4l2_plane32)))
return -EFAULT;
/* We don't really care if userspace decides to kill itself
* by passing a very big num_planes value */
- uplane = compat_alloc_user_space(num_planes *
- sizeof(struct v4l2_plane));
+ uplane = compat_alloc_user_space(kp->length *
+ sizeof(struct v4l2_plane));
kp->m.planes = (__force struct v4l2_plane *)uplane;
- while (--num_planes >= 0) {
+ for (num_planes = 0; num_planes < kp->length; num_planes++) {
ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
if (ret)
return ret;
@@ -665,7 +667,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
{
struct v4l2_ext_control32 __user *ucontrols;
struct v4l2_ext_control __user *kcontrols;
- int n;
+ unsigned int n;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
@@ -675,20 +677,22 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
copy_from_user(kp->reserved, up->reserved,
sizeof(kp->reserved)))
return -EFAULT;
- n = kp->count;
- if (n == 0) {
+ if (kp->count == 0) {
kp->controls = NULL;
return 0;
+ } else if (kp->count > V4L2_CID_MAX_CTRLS) {
+ return -EINVAL;
}
if (get_user(p, &up->controls))
return -EFAULT;
ucontrols = compat_ptr(p);
if (!access_ok(VERIFY_READ, ucontrols,
- n * sizeof(struct v4l2_ext_control32)))
+ kp->count * sizeof(struct v4l2_ext_control32)))
return -EFAULT;
- kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
+ kcontrols = compat_alloc_user_space(kp->count *
+ sizeof(struct v4l2_ext_control));
kp->controls = (__force struct v4l2_ext_control *)kcontrols;
- while (--n >= 0) {
+ for (n = 0; n < kp->count; n++) {
u32 id;
if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index adc2147fcff7..47001e25fd9e 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -885,6 +885,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_LINK_FREQ: return "Link Frequency";
case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
case V4L2_CID_TEST_PATTERN: return "Test Pattern";
+ case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
/* DV controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1058,6 +1059,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DV_RX_RGB_RANGE:
case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_DEINTERLACING_MODE:
case V4L2_CID_TUNE_DEEMPHASIS:
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
case V4L2_CID_DETECT_MD_MODE:
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 730a7c392c1d..5c8c49d240d1 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/rational.h>
#include <linux/videodev2.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-dv-timings.h>
@@ -224,6 +225,24 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
}
EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
+bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
+{
+ unsigned int i;
+
+ for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
+ const struct v4l2_bt_timings *bt =
+ &v4l2_dv_timings_presets[i].bt;
+
+ if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) &&
+ bt->cea861_vic == vic) {
+ *t = v4l2_dv_timings_presets[i];
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
+
/**
* v4l2_match_dv_timings - check if two timings match
* @t1 - compare this v4l2_dv_timings struct...
@@ -306,7 +325,8 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
bt->il_vsync, bt->il_vbackporch);
pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
- pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s\n", dev_prefix, bt->flags,
+ pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n",
+ dev_prefix, bt->flags,
(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
" REDUCED_BLANKING" : "",
((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
@@ -320,16 +340,51 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
" CE_VIDEO" : "",
(bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
- " FIRST_FIELD_EXTRA_LINE" : "");
+ " FIRST_FIELD_EXTRA_LINE" : "",
+ (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ?
+ " HAS_PICTURE_ASPECT" : "",
+ (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ?
+ " HAS_CEA861_VIC" : "",
+ (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ?
+ " HAS_HDMI_VIC" : "");
pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
(bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
(bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
(bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
(bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "",
(bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : "");
+ if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)
+ pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix,
+ bt->picture_aspect.numerator,
+ bt->picture_aspect.denominator);
+ if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC)
+ pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic);
+ if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC)
+ pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic);
}
EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
+struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
+{
+ struct v4l2_fract ratio = { 1, 1 };
+ unsigned long n, d;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return ratio;
+ if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT))
+ return ratio;
+
+ ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator;
+ ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator;
+
+ rational_best_approximation(ratio.numerator, ratio.denominator,
+ ratio.numerator, ratio.denominator, &n, &d);
+ ratio.numerator = n;
+ ratio.denominator = d;
+ return ratio;
+}
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio);
+
/*
* CVT defines
* Based on Coordinated Video Timings Standard
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
index ae7544d5469a..794e563f24f8 100644
--- a/drivers/media/v4l2-core/v4l2-flash-led-class.c
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -638,7 +638,7 @@ struct v4l2_flash *v4l2_flash_init(
v4l2_flash->iled_cdev = iled_cdev;
v4l2_flash->ops = ops;
sd->dev = dev;
- sd->of_node = of_node;
+ sd->of_node = of_node ? of_node : led_cdev->dev->of_node;
v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
sd->internal_ops = &v4l2_flash_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -654,10 +654,7 @@ struct v4l2_flash *v4l2_flash_init(
if (ret < 0)
goto err_init_controls;
- if (sd->of_node)
- of_node_get(sd->of_node);
- else
- of_node_get(led_cdev->dev->of_node);
+ of_node_get(sd->of_node);
ret = v4l2_async_register_subdev(sd);
if (ret < 0)
@@ -666,7 +663,7 @@ struct v4l2_flash *v4l2_flash_init(
return v4l2_flash;
err_async_register_sd:
- of_node_put(led_cdev->dev->of_node);
+ of_node_put(sd->of_node);
v4l2_ctrl_handler_free(sd->ctrl_handler);
err_init_controls:
media_entity_cleanup(&sd->entity);
@@ -678,20 +675,15 @@ EXPORT_SYMBOL_GPL(v4l2_flash_init);
void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
{
struct v4l2_subdev *sd;
- struct led_classdev *led_cdev;
if (IS_ERR_OR_NULL(v4l2_flash))
return;
sd = &v4l2_flash->sd;
- led_cdev = &v4l2_flash->fled_cdev->led_cdev;
v4l2_async_unregister_subdev(sd);
- if (sd->of_node)
- of_node_put(sd->of_node);
- else
- of_node_put(led_cdev->dev->of_node);
+ of_node_put(sd->of_node);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c52d94c018bb..0c3f238a2e76 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -174,8 +174,7 @@ static void v4l_print_querycap(const void *arg, bool write_only)
{
const struct v4l2_capability *p = arg;
- pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, "
- "capabilities=0x%08x, device_caps=0x%08x\n",
+ pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n",
(int)sizeof(p->driver), p->driver,
(int)sizeof(p->card), p->card,
(int)sizeof(p->bus_info), p->bus_info,
@@ -186,8 +185,7 @@ static void v4l_print_enuminput(const void *arg, bool write_only)
{
const struct v4l2_input *p = arg;
- pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, "
- "std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->tuner, (unsigned long long)p->std, p->status,
p->capabilities);
@@ -197,8 +195,7 @@ static void v4l_print_enumoutput(const void *arg, bool write_only)
{
const struct v4l2_output *p = arg;
- pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, "
- "modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->modulator, (unsigned long long)p->std, p->capabilities);
}
@@ -256,11 +253,7 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
pix = &p->fmt.pix;
- pr_cont(", width=%u, height=%u, "
- "pixelformat=%c%c%c%c, field=%s, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d, "
- "flags=0x%x, ycbcr_enc=%u, quantization=%u, "
- "xfer_func=%u\n",
+ pr_cont(", width=%u, height=%u, pixelformat=%c%c%c%c, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
pix->width, pix->height,
(pix->pixelformat & 0xff),
(pix->pixelformat >> 8) & 0xff,
@@ -274,10 +267,7 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
mp = &p->fmt.pix_mp;
- pr_cont(", width=%u, height=%u, "
- "format=%c%c%c%c, field=%s, "
- "colorspace=%d, num_planes=%u, flags=0x%x, "
- "ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ pr_cont(", width=%u, height=%u, format=%c%c%c%c, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
mp->width, mp->height,
(mp->pixelformat & 0xff),
(mp->pixelformat >> 8) & 0xff,
@@ -306,8 +296,7 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
vbi = &p->fmt.vbi;
- pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, "
- "sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
+ pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
vbi->sampling_rate, vbi->offset,
vbi->samples_per_line,
(vbi->sample_format & 0xff),
@@ -343,9 +332,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
{
const struct v4l2_framebuffer *p = arg;
- pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
- "height=%u, pixelformat=%c%c%c%c, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%c%c%c%c, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
p->capability, p->flags, p->base,
p->fmt.width, p->fmt.height,
(p->fmt.pixelformat & 0xff),
@@ -368,8 +355,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
else
- pr_cont("index=%u, name=%.*s, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
+ pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->capability,
p->rangelow, p->rangehigh, p->txsubchans);
}
@@ -381,9 +367,7 @@ static void v4l_print_tuner(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
else
- pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
- "rxsubchans=0x%x, audmode=%u\n",
+ pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n",
p->index, (int)sizeof(p->name), p->name, p->type,
p->capability, p->rangelow,
p->rangehigh, p->signal, p->afc,
@@ -402,8 +386,8 @@ static void v4l_print_standard(const void *arg, bool write_only)
{
const struct v4l2_standard *p = arg;
- pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, "
- "framelines=%u\n", p->index,
+ pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n",
+ p->index,
(unsigned long long)p->id, (int)sizeof(p->name), p->name,
p->frameperiod.numerator,
p->frameperiod.denominator,
@@ -419,8 +403,7 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
{
const struct v4l2_hw_freq_seek *p = arg;
- pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
- "rangelow=%u, rangehigh=%u\n",
+ pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n",
p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
p->rangelow, p->rangehigh);
}
@@ -442,8 +425,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
const struct v4l2_plane *plane;
int i;
- pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, "
- "flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s",
p->timestamp.tv_sec / 3600,
(int)(p->timestamp.tv_sec / 60) % 60,
(int)(p->timestamp.tv_sec % 60),
@@ -458,8 +440,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
for (i = 0; i < p->length; ++i) {
plane = &p->m.planes[i];
printk(KERN_DEBUG
- "plane %d: bytesused=%d, data_offset=0x%08x, "
- "offset/userptr=0x%lx, length=%d\n",
+ "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n",
i, plane->bytesused, plane->data_offset,
plane->m.userptr, plane->length);
}
@@ -468,8 +449,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
p->bytesused, p->m.userptr, p->length);
}
- printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, "
- "flags=0x%08x, frames=%d, userbits=0x%08x\n",
+ printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n",
tc->hours, tc->minutes, tc->seconds,
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
@@ -503,8 +483,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
const struct v4l2_captureparm *c = &p->parm.capture;
- pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, "
- "extendedmode=%d, readbuffers=%d\n",
+ pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n",
c->capability, c->capturemode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->readbuffers);
@@ -512,8 +491,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
const struct v4l2_outputparm *c = &p->parm.output;
- pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, "
- "extendedmode=%d, writebuffers=%d\n",
+ pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n",
c->capability, c->outputmode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->writebuffers);
@@ -526,8 +504,7 @@ static void v4l_print_queryctrl(const void *arg, bool write_only)
{
const struct v4l2_queryctrl *p = arg;
- pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, "
- "step=%d, default=%d, flags=0x%08x\n",
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n",
p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags);
@@ -537,9 +514,7 @@ static void v4l_print_query_ext_ctrl(const void *arg, bool write_only)
{
const struct v4l2_query_ext_ctrl *p = arg;
- pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, "
- "step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, "
- "nr_of_dims=%u, dims=%u,%u,%u,%u\n",
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n",
p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags,
@@ -583,9 +558,7 @@ static void v4l_print_cropcap(const void *arg, bool write_only)
{
const struct v4l2_cropcap *p = arg;
- pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
- "defrect wxh=%dx%d, x,y=%d,%d, "
- "pixelaspect %d/%d\n",
+ pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n",
prt_names(p->type, v4l2_type_names),
p->bounds.width, p->bounds.height,
p->bounds.left, p->bounds.top,
@@ -618,8 +591,7 @@ static void v4l_print_jpegcompression(const void *arg, bool write_only)
{
const struct v4l2_jpegcompression *p = arg;
- pr_cont("quality=%d, APPn=%d, APP_len=%d, "
- "COM_len=%d, jpeg_markers=0x%x\n",
+ pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n",
p->quality, p->APPn, p->APP_len,
p->COM_len, p->jpeg_markers);
}
@@ -686,14 +658,7 @@ static void v4l_print_dv_timings(const void *arg, bool write_only)
switch (p->type) {
case V4L2_DV_BT_656_1120:
- pr_cont("type=bt-656/1120, interlaced=%u, "
- "pixelclock=%llu, "
- "width=%u, height=%u, polarities=0x%x, "
- "hfrontporch=%u, hsync=%u, "
- "hbackporch=%u, vfrontporch=%u, "
- "vsync=%u, vbackporch=%u, "
- "il_vfrontporch=%u, il_vsync=%u, "
- "il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
+ pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
p->bt.interlaced, p->bt.pixelclock,
p->bt.width, p->bt.height,
p->bt.polarities, p->bt.hfrontporch,
@@ -723,8 +688,7 @@ static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
switch (p->type) {
case V4L2_DV_BT_656_1120:
- pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, "
- "pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
+ pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
p->bt.min_width, p->bt.max_width,
p->bt.min_height, p->bt.max_height,
p->bt.min_pixelclock, p->bt.max_pixelclock,
@@ -805,8 +769,7 @@ static void v4l_print_event(const void *arg, bool write_only)
const struct v4l2_event *p = arg;
const struct v4l2_event_ctrl *c;
- pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, "
- "timestamp=%lu.%9.9lu\n",
+ pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%lu.%9.9lu\n",
p->type, p->pending, p->sequence, p->id,
p->timestamp.tv_sec, p->timestamp.tv_nsec);
switch (p->type) {
@@ -822,8 +785,7 @@ static void v4l_print_event(const void *arg, bool write_only)
pr_cont("value64=%lld, ", c->value64);
else
pr_cont("value=%d, ", c->value);
- pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
- "default_value=%d\n",
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n",
c->flags, c->minimum, c->maximum,
c->step, c->default_value);
break;
@@ -859,8 +821,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
{
const struct v4l2_frequency_band *p = arg;
- pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n",
p->tuner, p->type, p->index,
p->capability, p->rangelow,
p->rangehigh, p->modulation);
@@ -1167,6 +1128,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
+ case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break;
case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break;
case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break;
@@ -1230,7 +1194,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break;
case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break;
case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break;
- case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR (Exp.)"; break;
+ case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break;
case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break;
case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break;
@@ -1239,6 +1206,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
+ case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
+ case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break;
case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break;
@@ -1269,6 +1238,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break;
case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
+ case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
@@ -1287,6 +1257,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break;
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
+ case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
default:
WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
if (fmt->description[0])
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index def84753c4c3..1dbf6f7785bb 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -572,8 +572,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
switch (b->memory) {
case V4L2_MEMORY_MMAP:
if (0 == buf->baddr) {
- dprintk(1, "qbuf: mmap requested "
- "but buffer addr is zero!\n");
+ dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
goto done;
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 1db0af6c7f94..ba63ca57ed7e 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -439,13 +439,12 @@ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
- (unsigned long)vmf->virtual_address,
- vma->vm_start, vma->vm_end);
+ vmf->address, vma->vm_start, vma->vm_end);
page = alloc_page(GFP_USER | __GFP_DMA32);
if (!page)
return VM_FAULT_OOM;
- clear_user_highpage(page, (unsigned long)vmf->virtual_address);
+ clear_user_highpage(page, vmf->address);
vmf->page = page;
return 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 21900202ff83..7c1d390ea438 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -358,8 +358,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
if (memory == VB2_MEMORY_MMAP) {
ret = __vb2_buf_mem_alloc(vb);
if (ret) {
- dprintk(1, "failed allocating memory for "
- "buffer %d\n", buffer);
+ dprintk(1, "failed allocating memory for buffer %d\n",
+ buffer);
q->bufs[vb->index] = NULL;
kfree(vb);
break;
@@ -372,8 +372,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
*/
ret = call_vb_qop(vb, buf_init, vb);
if (ret) {
- dprintk(1, "buffer %d %p initialization"
- " failed\n", buffer, vb);
+ dprintk(1, "buffer %d %p initialization failed\n",
+ buffer, vb);
__vb2_buf_mem_free(vb);
q->bufs[vb->index] = NULL;
kfree(vb);
@@ -997,13 +997,12 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
&& vb->planes[plane].length == planes[plane].length)
continue;
- dprintk(3, "userspace address for plane %d changed, "
- "reacquiring memory\n", plane);
+ dprintk(3, "userspace address for plane %d changed, reacquiring memory\n",
+ plane);
/* Check if the provided plane buffer is large enough */
if (planes[plane].length < vb->planes[plane].min_length) {
- dprintk(1, "provided buffer size %u is less than "
- "setup size %u for plane %d\n",
+ dprintk(1, "provided buffer size %u is less than setup size %u for plane %d\n",
planes[plane].length,
vb->planes[plane].min_length,
plane);
@@ -1032,8 +1031,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
planes[plane].m.userptr,
planes[plane].length, dma_dir);
if (IS_ERR(mem_priv)) {
- dprintk(1, "failed acquiring userspace "
- "memory for plane %d\n", plane);
+ dprintk(1, "failed acquiring userspace memory for plane %d\n",
+ plane);
ret = PTR_ERR(mem_priv);
goto err;
}
@@ -1123,8 +1122,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
planes[plane].length = dbuf->size;
if (planes[plane].length < vb->planes[plane].min_length) {
- dprintk(1, "invalid dmabuf length %u for plane %d, "
- "minimum length %u\n",
+ dprintk(1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
planes[plane].length, plane,
vb->planes[plane].min_length);
dma_buf_put(dbuf);
@@ -1472,8 +1470,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
}
if (nonblocking) {
- dprintk(1, "nonblocking and no buffers to dequeue, "
- "will not wait\n");
+ dprintk(1, "nonblocking and no buffers to dequeue, will not wait\n");
return -EAGAIN;
}
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 52ef8833f6b6..3529849d2218 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -60,14 +60,13 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
/* Is memory for copying plane information present? */
if (b->m.planes == NULL) {
- dprintk(1, "multi-planar buffer passed but "
- "planes array not provided\n");
+ dprintk(1, "multi-planar buffer passed but planes array not provided\n");
return -EINVAL;
}
if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
- dprintk(1, "incorrect planes array length, "
- "expected %d, got %d\n", vb->num_planes, b->length);
+ dprintk(1, "incorrect planes array length, expected %d, got %d\n",
+ vb->num_planes, b->length);
return -EINVAL;
}
@@ -316,8 +315,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
* that just says that it is either a top or a bottom field,
* but not which of the two it is.
*/
- dprintk(1, "the field is incorrectly set to ALTERNATE "
- "for an output buffer\n");
+ dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
return -EINVAL;
}
vb->timestamp = 0;
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index ab3227b75c84..3f778147cdef 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -151,8 +151,7 @@ static void *vb2_vmalloc_vaddr(void *buf_priv)
struct vb2_vmalloc_buf *buf = buf_priv;
if (!buf->vaddr) {
- pr_err("Address of an unallocated plane requested "
- "or cannot map user pointer\n");
+ pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
return NULL;
}
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 4b4c0c3c3d2f..ec80e35c8dfe 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -134,6 +134,14 @@ config MTK_SMI
mainly help enable/disable iommu and control the power domain and
clocks for each local arbiter.
+config DA8XX_DDRCTL
+ bool "Texas Instruments da8xx DDR2/mDDR driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ This driver is for the DDR2/mDDR Memory Controller present on
+ Texas Instruments da8xx SoCs. It's used to tweak various memory
+ controller configuration options.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b20ae38b5bfb..e88097fbc085 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_MTK_SMI) += mtk-smi.o
+obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index b5ed3bd082b5..047d6fcdcec2 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -657,7 +657,7 @@ static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np)
return -ENOMEM;
newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
- if (!newprop->name)
+ if (!newprop->value)
return -ENOMEM;
newprop->length = sizeof("disabled");
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 12080b05e3e6..b418b39af180 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -85,8 +85,4 @@ static struct platform_driver atmel_ramc_driver = {
},
};
-static int __init atmel_ramc_init(void)
-{
- return platform_driver_register(&atmel_ramc_driver);
-}
-device_initcall(atmel_ramc_init);
+builtin_platform_driver(atmel_ramc_driver);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
new file mode 100644
index 000000000000..030afbe29d0c
--- /dev/null
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -0,0 +1,173 @@
+/*
+ * TI da8xx DDR2/mDDR controller driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+struct da8xx_ddrctl_config_knob {
+ const char *name;
+ u32 reg;
+ u32 mask;
+ u32 shift;
+};
+
+static const struct da8xx_ddrctl_config_knob da8xx_ddrctl_knobs[] = {
+ {
+ .name = "da850-pbbpr",
+ .reg = 0x20,
+ .mask = 0xffffff00,
+ .shift = 0,
+ },
+};
+
+struct da8xx_ddrctl_setting {
+ const char *name;
+ u32 val;
+};
+
+struct da8xx_ddrctl_board_settings {
+ const char *board;
+ const struct da8xx_ddrctl_setting *settings;
+};
+
+static const struct da8xx_ddrctl_setting da850_lcdk_ddrctl_settings[] = {
+ {
+ .name = "da850-pbbpr",
+ .val = 0x20,
+ },
+ { }
+};
+
+static const struct da8xx_ddrctl_board_settings da8xx_ddrctl_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .settings = da850_lcdk_ddrctl_settings,
+ },
+};
+
+static const struct da8xx_ddrctl_config_knob *
+da8xx_ddrctl_match_knob(const struct da8xx_ddrctl_setting *setting)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_knobs); i++) {
+ knob = &da8xx_ddrctl_knobs[i];
+
+ if (strcmp(knob->name, setting->name) == 0)
+ return knob;
+ }
+
+ return NULL;
+}
+
+static const struct da8xx_ddrctl_setting *da8xx_ddrctl_get_board_settings(void)
+{
+ const struct da8xx_ddrctl_board_settings *board_settings;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_board_confs); i++) {
+ board_settings = &da8xx_ddrctl_board_confs[i];
+
+ if (of_machine_is_compatible(board_settings->board))
+ return board_settings->settings;
+ }
+
+ return NULL;
+}
+
+static int da8xx_ddrctl_probe(struct platform_device *pdev)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ const struct da8xx_ddrctl_setting *setting;
+ struct device_node *node;
+ struct resource *res;
+ void __iomem *ddrctl;
+ struct device *dev;
+ u32 reg;
+
+ dev = &pdev->dev;
+ node = dev->of_node;
+
+ setting = da8xx_ddrctl_get_board_settings();
+ if (!setting) {
+ dev_err(dev, "no settings defined for this board\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddrctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ddrctl)) {
+ dev_err(dev, "unable to map memory controller registers\n");
+ return PTR_ERR(ddrctl);
+ }
+
+ for (; setting->name; setting++) {
+ knob = da8xx_ddrctl_match_knob(setting);
+ if (!knob) {
+ dev_warn(dev,
+ "no such config option: %s\n", setting->name);
+ continue;
+ }
+
+ if (knob->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev,
+ "register offset of '%s' exceeds mapped memory size\n",
+ knob->name);
+ continue;
+ }
+
+ reg = readl(ddrctl + knob->reg);
+ reg &= knob->mask;
+ reg |= setting->val << knob->shift;
+
+ dev_dbg(dev, "writing 0x%08x to %s\n", reg, setting->name);
+
+ writel(reg, ddrctl + knob->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_ddrctl_of_match[] = {
+ { .compatible = "ti,da850-ddr-controller", },
+ { },
+};
+
+static struct platform_driver da8xx_ddrctl_driver = {
+ .probe = da8xx_ddrctl_probe,
+ .driver = {
+ .name = "da850-ddr-controller",
+ .of_match_table = da8xx_ddrctl_of_match,
+ },
+};
+module_platform_driver(da8xx_ddrctl_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx DDR2/mDDR controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index aacf584f2a42..f3512404bc52 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2006,7 +2006,7 @@ static int msb_prepare_req(struct request_queue *q, struct request *req)
blk_dump_rq_flags(req, "MS unsupported request");
return BLKPREP_KILL;
}
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c1472275fe57..fa0746d182ff 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -834,7 +834,7 @@ static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 89c7ed16b4df..1e73064b0fb2 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2585,10 +2585,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "LanAddr = %02X:%02X:%02X"
- ":%02X:%02X:%02X\n",
- ioc->name, a[5], a[4],
- a[3], a[2], a[1], a[0]));
+ "LanAddr = %pMR\n", ioc->name, a));
}
break;
@@ -2868,21 +2865,21 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name)
- printk("%s: ", ioc->prod_name);
- printk("Capabilities={");
+ pr_cont("%s: ", ioc->prod_name);
+ pr_cont("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
- printk("Initiator");
+ pr_cont("Initiator");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sTarget", i ? "," : "");
+ pr_cont("%sTarget", i ? "," : "");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
- printk("%sLAN", i ? "," : "");
+ pr_cont("%sLAN", i ? "," : "");
i++;
}
@@ -2891,12 +2888,12 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
* This would probably evoke more questions than it's worth
*/
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
- printk("%sLogBusAddr", i ? "," : "");
+ pr_cont("%sLogBusAddr", i ? "," : "");
i++;
}
#endif
- printk("}\n");
+ pr_cont("}\n");
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6783,8 +6780,7 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
if (ioc->bus_type == FC) {
if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, " LanAddr = %pMR\n", a);
}
seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
ioc->fc_port_page0[p].WWNN.High,
@@ -6861,8 +6857,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ y += sprintf(buffer+len+y, ", LanAddr=%pMR", a);
}
y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
@@ -6896,8 +6891,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
- seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
- a[5], a[4], a[3], a[2], a[1], a[0]);
+ seq_printf(m, ", LanAddr=%pMR", a);
}
seq_printf(m, ", IRQ=%d", ioc->pci_irq);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 6955c9e22d57..55dd71bbdc2a 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -549,16 +549,6 @@ mpt_lan_close(struct net_device *dev)
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static int
-mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Tx timeout handler. */
static void
mpt_lan_tx_timeout(struct net_device *dev)
@@ -1304,7 +1294,6 @@ static const struct net_device_ops mpt_netdev_ops = {
.ndo_open = mpt_lan_open,
.ndo_stop = mpt_lan_close,
.ndo_start_xmit = mpt_lan_sdu_send,
- .ndo_change_mtu = mpt_lan_change_mtu,
.ndo_tx_timeout = mpt_lan_tx_timeout,
};
@@ -1375,6 +1364,10 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
dev->netdev_ops = &mpt_netdev_ops;
dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
+ /* MTU range: 96 - 65280 */
+ dev->min_mtu = MPT_LAN_MIN_MTU;
+ dev->max_mtu = MPT_LAN_MAX_MTU;
+
dlprintk((KERN_INFO MYNAM ": Finished registering dev "
"and setting initial values\n"));
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6c9fc11efb87..08a807d6a44f 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1366,15 +1366,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt)
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
- && (SCpnt->device->tagged_supported)) {
+ if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) &&
+ SCpnt->device->tagged_supported)
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
- if (SCpnt->request && SCpnt->request->ioprio) {
- if (((SCpnt->request->ioprio & 0x7) == 1) ||
- !(SCpnt->request->ioprio & 0x7))
- scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
- }
- } else
+ else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 25e1aafae60c..227b99018657 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1132,8 +1132,7 @@ static int pm860x_dt_init(struct device_node *np,
return 0;
}
-static int pm860x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pm860x_probe(struct i2c_client *client)
{
struct pm860x_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
@@ -1259,7 +1258,7 @@ static struct i2c_driver pm860x_driver = {
.pm = &pm860x_pm_ops,
.of_match_table = pm860x_dt_ids,
},
- .probe = pm860x_probe,
+ .probe_new = pm860x_probe,
.remove = pm860x_remove,
.id_table = pm860x_id_table,
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index c6df6442ba2b..4ce3b6f11830 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -40,6 +40,22 @@ config MFD_ACT8945A
linear regulators, along with a complete ActivePath battery
charger.
+config MFD_SUN4I_GPADC
+ tristate "Allwinner sunxi platforms' GPADC MFD driver"
+ select MFD_CORE
+ select REGMAP_MMIO
+ select REGMAP_IRQ
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ Select this to get support for Allwinner SoCs (A10, A13 and A31) ADC.
+ This driver will only map the hardware interrupt and registers, you
+ have to select individual drivers based on this MFD to be able to use
+ the ADC or the thermal sensor. This will try to probe the ADC driver
+ sun4i-gpadc-iio and the hwmon driver iio_hwmon.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sun4i-gpadc.
+
config MFD_AS3711
bool "AMS AS3711"
select MFD_CORE
@@ -293,6 +309,7 @@ config MFD_DLN2
config MFD_EXYNOS_LPASS
tristate "Samsung Exynos SoC Low Power Audio Subsystem"
+ depends on ARCH_EXYNOS || COMPILE_TEST
select MFD_CORE
select REGMAP_MMIO
help
@@ -563,7 +580,7 @@ config MFD_MAX14577
config MFD_MAX77620
bool "Maxim Semiconductor MAX77620 and MAX20024 PMIC Support"
depends on I2C=y
- depends on OF
+ depends on OF || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -578,7 +595,7 @@ config MFD_MAX77620
config MFD_MAX77686
tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
depends on I2C
- depends on OF
+ depends on OF || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -756,24 +773,20 @@ config UCB1400_CORE
module will be called ucb1400_core.
config MFD_PM8XXX
- tristate
-
-config MFD_PM8921_CORE
- tristate "Qualcomm PM8921 PMIC chip"
+ tristate "Qualcomm PM8xxx PMIC chips driver"
depends on (ARM || HEXAGON)
select IRQ_DOMAIN
select MFD_CORE
- select MFD_PM8XXX
select REGMAP
help
If you say yes to this option, support will be included for the
- built-in PM8921 PMIC chip.
+ built-in PM8xxx PMIC chips.
- This is required if your board has a PM8921 and uses its features,
+ This is required if your board has a PM8xxx and uses its features,
such as: MPPs, GPIOs, regulators, interrupts, and PWM.
- Say M here if you want to include support for PM8921 chip as a module.
- This will build a module called "pm8921-core".
+ Say M here if you want to include support for PM8xxx chips as a
+ module. This will build a module called "pm8xxx-core".
config MFD_QCOM_RPM
tristate "Qualcomm Resource Power Manager (RPM)"
@@ -881,7 +894,8 @@ config MFD_RN5T618
select MFD_CORE
select REGMAP_I2C
help
- Say yes here to add support for the Ricoh RN5T567 or R5T618 PMIC.
+ Say yes here to add support for the Ricoh RN5T567,
+ RN5T618, RC5T619 PMIC.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -955,7 +969,7 @@ config MFD_SMSC
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
- default y if ARCH_U300 || ARCH_U8500
+ default y if ARCH_U300 || ARCH_U8500 || COMPILE_TEST
help
Say yes here if you have the ABX500 Mixed Signal IC family
chips. This core driver expose register access functions.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 9834e669d985..dda4d4f73ad7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -172,7 +172,7 @@ obj-$(CONFIG_MFD_SI476X_CORE) += si476x-core.o
obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o
-obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o
+obj-$(CONFIG_MFD_PM8XXX) += qcom-pm8xxx.o ssbi.o
obj-$(CONFIG_MFD_QCOM_RPM) += qcom_rpm.o
obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
@@ -211,3 +211,4 @@ obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
+obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 6a5a98806cb8..099635bed188 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -12,7 +12,7 @@
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -628,20 +628,10 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100)
exit_no_debugfs:
return;
}
-static inline void ab3100_remove_debugfs(void)
-{
- debugfs_remove(ab3100_set_reg_file);
- debugfs_remove(ab3100_get_reg_file);
- debugfs_remove(ab3100_reg_file);
- debugfs_remove(ab3100_dir);
-}
#else
static inline void ab3100_setup_debugfs(struct ab3100 *ab3100)
{
}
-static inline void ab3100_remove_debugfs(void)
-{
-}
#endif
/*
@@ -949,45 +939,22 @@ static int ab3100_probe(struct i2c_client *client,
return err;
}
-static int ab3100_remove(struct i2c_client *client)
-{
- struct ab3100 *ab3100 = i2c_get_clientdata(client);
-
- /* Unregister subdevices */
- mfd_remove_devices(&client->dev);
- ab3100_remove_debugfs();
- i2c_unregister_device(ab3100->testreg_client);
- return 0;
-}
-
static const struct i2c_device_id ab3100_id[] = {
{ "ab3100", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, ab3100_id);
static struct i2c_driver ab3100_driver = {
.driver = {
- .name = "ab3100",
+ .name = "ab3100",
+ .suppress_bind_attrs = true,
},
.id_table = ab3100_id,
.probe = ab3100_probe,
- .remove = ab3100_remove,
};
static int __init ab3100_i2c_init(void)
{
return i2c_add_driver(&ab3100_driver);
}
-
-static void __exit ab3100_i2c_exit(void)
-{
- i2c_del_driver(&ab3100_driver);
-}
-
subsys_initcall(ab3100_i2c_init);
-module_exit(ab3100_i2c_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("AB3100 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 589eebfc13df..6e00124cef01 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -14,7 +14,7 @@
#include <linux/irqdomain.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
@@ -123,6 +123,10 @@ static DEFINE_SPINLOCK(on_stat_lock);
static u8 turn_on_stat_mask = 0xFF;
static u8 turn_on_stat_set;
static bool no_bm; /* No battery management */
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param here.
+ */
module_param(no_bm, bool, S_IRUGO);
#define AB9540_MODEM_CTRL2_REG 0x23
@@ -1324,25 +1328,6 @@ static int ab8500_probe(struct platform_device *pdev)
return ret;
}
-static int ab8500_remove(struct platform_device *pdev)
-{
- struct ab8500 *ab8500 = platform_get_drvdata(pdev);
-
- if (((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
- ab8500->chip_id >= AB8500_CUT2P0) || is_ab8540(ab8500))
- sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
- else
- sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
-
- if ((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
- ab8500->chip_id >= AB8500_CUT2P0)
- sysfs_remove_group(&ab8500->dev->kobj, &ab8505_attr_group);
-
- mfd_remove_devices(ab8500->dev);
-
- return 0;
-}
-
static const struct platform_device_id ab8500_id[] = {
{ "ab8500-core", AB8500_VERSION_AB8500 },
{ "ab8505-i2c", AB8500_VERSION_AB8505 },
@@ -1354,9 +1339,9 @@ static const struct platform_device_id ab8500_id[] = {
static struct platform_driver ab8500_core_driver = {
.driver = {
.name = "ab8500-core",
+ .suppress_bind_attrs = true,
},
.probe = ab8500_probe,
- .remove = ab8500_remove,
.id_table = ab8500_id,
};
@@ -1364,14 +1349,4 @@ static int __init ab8500_core_init(void)
{
return platform_driver_register(&ab8500_core_driver);
}
-
-static void __exit ab8500_core_exit(void)
-{
- platform_driver_unregister(&ab8500_core_driver);
-}
core_initcall(ab8500_core_init);
-module_exit(ab8500_core_exit);
-
-MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
-MODULE_DESCRIPTION("AB8500 MFD core");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index acf6c00b14b9..c1c815241e02 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -74,7 +74,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -3234,33 +3234,16 @@ err:
return -ENOMEM;
}
-static int ab8500_debug_remove(struct platform_device *plf)
-{
- debugfs_remove_recursive(ab8500_dir);
-
- return 0;
-}
-
static struct platform_driver ab8500_debug_driver = {
.driver = {
.name = "ab8500-debug",
+ .suppress_bind_attrs = true,
},
.probe = ab8500_debug_probe,
- .remove = ab8500_debug_remove
};
static int __init ab8500_debug_init(void)
{
return platform_driver_register(&ab8500_debug_driver);
}
-
-static void __exit ab8500_debug_exit(void)
-{
- platform_driver_unregister(&ab8500_debug_driver);
-}
subsys_initcall(ab8500_debug_init);
-module_exit(ab8500_debug_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 DEBUG");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 97dcadc8fa8b..f4e94869d612 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -5,9 +5,9 @@
* Author: Arun R Murthy <arun.murthy@stericsson.com>
* Author: Daniel Willerud <daniel.willerud@stericsson.com>
* Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: M'boumba Cedric Madianga
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
@@ -1054,11 +1054,7 @@ static int __init ab8500_gpadc_init(void)
{
return platform_driver_register(&ab8500_gpadc_driver);
}
-
-static void __exit ab8500_gpadc_exit(void)
-{
- platform_driver_unregister(&ab8500_gpadc_driver);
-}
+subsys_initcall_sync(ab8500_gpadc_init);
/**
* ab8540_gpadc_get_otp() - returns OTP values
@@ -1077,14 +1073,3 @@ void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
*ibat_l = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo;
*ibat_h = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi;
}
-
-subsys_initcall_sync(ab8500_gpadc_init);
-module_exit(ab8500_gpadc_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Arun R Murthy");
-MODULE_AUTHOR("Daniel Willerud");
-MODULE_AUTHOR("Johan Palsson");
-MODULE_AUTHOR("M'boumba Cedric Madianga");
-MODULE_ALIAS("platform:ab8500_gpadc");
-MODULE_DESCRIPTION("AB8500 GPADC driver");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 207cc497958a..80c0efa66ac1 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -1,11 +1,14 @@
/*
+ * AB8500 system control driver
+ *
* Copyright (C) ST-Ericsson SA 2010
* Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/reboot.h>
@@ -158,7 +161,3 @@ static int __init ab8500_sysctrl_init(void)
return platform_driver_register(&ab8500_sysctrl_driver);
}
arch_initcall(ab8500_sysctrl_init);
-
-MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
-MODULE_DESCRIPTION("AB8500 system control driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index fe418995108c..0d3846a4767c 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -8,7 +8,8 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/mfd/abx500.h>
static LIST_HEAD(abx500_list);
@@ -150,7 +151,3 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
return -ENOTSUPP;
}
EXPORT_SYMBOL(abx500_startup_irq_enabled);
-
-MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
-MODULE_DESCRIPTION("ABX500 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 41767f7239bb..b6d4bc63c426 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -1553,6 +1553,7 @@ EXPORT_SYMBOL_GPL(arizona_dev_init);
int arizona_dev_exit(struct arizona *arizona)
{
+ disable_irq(arizona->irq);
pm_runtime_disable(arizona->dev);
regulator_disable(arizona->dcvdd);
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 5e18d3c77582..2e01975f042d 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -398,10 +398,10 @@ err_ctrlif:
err_boot_done:
free_irq(arizona->irq, arizona);
err_main_irq:
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 1),
arizona->irq_chip);
err_aod:
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 0),
arizona->aod_irq_chip);
err:
return ret;
@@ -413,9 +413,9 @@ int arizona_irq_exit(struct arizona *arizona)
free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR),
arizona);
free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona);
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 1),
arizona->irq_chip);
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 0),
arizona->aod_irq_chip);
free_irq(arizona->irq, arizona);
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index b1b865822c07..d35a5fe6c950 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -69,10 +69,11 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
};
MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
-/*
- * This is useless for OF-enabled devices, but it is needed by I2C subsystem
- */
static const struct i2c_device_id axp20x_i2c_id[] = {
+ { "axp152", 0 },
+ { "axp202", 0 },
+ { "axp209", 0 },
+ { "axp221", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index ba130be32e61..ed918de84238 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -98,6 +98,7 @@ static const struct regmap_range axp22x_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_PWR_OP_MODE),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
+ regmap_reg_range(AXP22X_PMIC_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_FG_RES, AXP20X_FG_RES),
};
@@ -135,6 +136,7 @@ static const struct regmap_range axp806_writeable_ranges[] = {
regmap_reg_range(AXP806_PWR_OUT_CTRL1, AXP806_CLDO3_V_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ2_EN),
regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE),
+ regmap_reg_range(AXP806_REG_ADDR_EXT, AXP806_REG_ADDR_EXT),
};
static const struct regmap_range axp806_volatile_ranges[] = {
@@ -305,7 +307,7 @@ static const struct regmap_config axp806_regmap_config = {
.val_bits = 8,
.wr_table = &axp806_writeable_table,
.volatile_table = &axp806_volatile_table,
- .max_register = AXP806_VREF_TEMP_WARN_L,
+ .max_register = AXP806_REG_ADDR_EXT,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 0d76d690176b..c572a35a9341 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -67,7 +67,7 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri,
/* Secondary I2C slave address is the base address with A(2) asserted */
bcm590xx->i2c_sec = i2c_new_dummy(i2c_pri->adapter,
i2c_pri->addr | BIT(2));
- if (IS_ERR_OR_NULL(bcm590xx->i2c_sec)) {
+ if (!bcm590xx->i2c_sec) {
dev_err(&i2c_pri->dev, "failed to add secondary I2C device\n");
return -ENODEV;
}
diff --git a/drivers/mfd/cs47l24-tables.c b/drivers/mfd/cs47l24-tables.c
index f6b78aafdb55..c090974340ad 100644
--- a/drivers/mfd/cs47l24-tables.c
+++ b/drivers/mfd/cs47l24-tables.c
@@ -292,6 +292,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
{ 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
{ 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
+ { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
{ 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
{ 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
{ 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
@@ -318,6 +319,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
{ 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
{ 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
+ { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
{ 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
{ 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
{ 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
@@ -340,6 +342,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
{ 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
{ 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
+ { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
{ 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
{ 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
{ 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
@@ -923,6 +926,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF1_RX_PIN_CTRL:
case ARIZONA_AIF1_RATE_CTRL:
case ARIZONA_AIF1_FORMAT:
+ case ARIZONA_AIF1_TX_BCLK_RATE:
case ARIZONA_AIF1_RX_BCLK_RATE:
case ARIZONA_AIF1_FRAME_CTRL_1:
case ARIZONA_AIF1_FRAME_CTRL_2:
@@ -949,6 +953,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF2_RX_PIN_CTRL:
case ARIZONA_AIF2_RATE_CTRL:
case ARIZONA_AIF2_FORMAT:
+ case ARIZONA_AIF2_TX_BCLK_RATE:
case ARIZONA_AIF2_RX_BCLK_RATE:
case ARIZONA_AIF2_FRAME_CTRL_1:
case ARIZONA_AIF2_FRAME_CTRL_2:
@@ -971,6 +976,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF3_RX_PIN_CTRL:
case ARIZONA_AIF3_RATE_CTRL:
case ARIZONA_AIF3_FORMAT:
+ case ARIZONA_AIF3_TX_BCLK_RATE:
case ARIZONA_AIF3_RX_BCLK_RATE:
case ARIZONA_AIF3_FRAME_CTRL_1:
case ARIZONA_AIF3_FRAME_CTRL_2:
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index dff2f19296b8..4d0a5f38038a 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -32,6 +32,7 @@
#include <sound/pcm.h>
#include <linux/mfd/davinci_voicecodec.h>
+#include <mach/hardware.h>
static const struct regmap_config davinci_vc_regmap = {
.reg_bits = 32,
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 77b2675cf8f5..ac430a396a89 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -187,6 +187,7 @@ static const struct of_device_id mx25_tsadc_ids[] = {
{ .compatible = "fsl,imx25-tsadc" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mx25_tsadc_ids);
static struct platform_driver mx25_tsadc_driver = {
.driver = {
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 0fc62995695b..ba706adee38b 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -169,6 +169,7 @@ static const struct of_device_id hi655x_pmic_match[] = {
{ .compatible = "hisilicon,hi655x-pmic", },
{},
};
+MODULE_DEVICE_TABLE(of, hi655x_pmic_match);
static struct platform_driver hi655x_pmic_driver = {
.driver = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 9ff243970e93..78dbcf8b0bef 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -41,6 +41,7 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
/* Probably it is enough to set this for iDMA capable devices only */
pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
ret = intel_lpss_probe(&pdev->dev, info);
if (ret)
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index f9a8c5203873..699c8c7c9052 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -42,6 +42,7 @@
#define BXTWC_GPIOIRQ0 0x4E0B
#define BXTWC_GPIOIRQ1 0x4E0C
#define BXTWC_CRITIRQ 0x4E0D
+#define BXTWC_TMUIRQ 0x4FB6
/* Interrupt MASK Registers */
#define BXTWC_MIRQLVL1 0x4E0E
@@ -59,6 +60,7 @@
#define BXTWC_MGPIO0IRQ 0x4E19
#define BXTWC_MGPIO1IRQ 0x4E1A
#define BXTWC_MCRITIRQ 0x4E1B
+#define BXTWC_MTMUIRQ 0x4FB7
/* Whiskey Cove PMIC share same ACPI ID between different platforms */
#define BROXTON_PMIC_WC_HRV 4
@@ -92,6 +94,7 @@ enum bxtwc_irqs_level2 {
BXTWC_GPIO0_IRQ,
BXTWC_GPIO1_IRQ,
BXTWC_CRIT_IRQ,
+ BXTWC_TMU_IRQ,
};
static const struct regmap_irq bxtwc_regmap_irqs[] = {
@@ -120,6 +123,10 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 9, 0x03),
};
+static const struct regmap_irq bxtwc_regmap_irqs_tmu[] = {
+ REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, 0x06),
+};
+
static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.name = "bxtwc_irq_chip",
.status_base = BXTWC_IRQLVL1,
@@ -138,6 +145,15 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_level2 = {
.num_regs = 10,
};
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
+ .name = "bxtwc_irq_chip_tmu",
+ .status_base = BXTWC_TMUIRQ,
+ .mask_base = BXTWC_MTMUIRQ,
+ .irqs = bxtwc_regmap_irqs_tmu,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_tmu),
+ .num_regs = 1,
+};
+
static struct resource gpio_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_GPIO0_IRQ, "GPIO0"),
DEFINE_RES_IRQ_NAMED(BXTWC_GPIO1_IRQ, "GPIO1"),
@@ -166,6 +182,10 @@ static struct resource bcu_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_BCU_IRQ, "BCU"),
};
+static struct resource tmu_resources[] = {
+ DEFINE_RES_IRQ_NAMED(BXTWC_TMU_IRQ, "TMU"),
+};
+
static struct mfd_cell bxt_wc_dev[] = {
{
.name = "bxt_wcove_gpadc",
@@ -193,6 +213,12 @@ static struct mfd_cell bxt_wc_dev[] = {
.resources = bcu_resources,
},
{
+ .name = "bxt_wcove_tmu",
+ .num_resources = ARRAY_SIZE(tmu_resources),
+ .resources = tmu_resources,
+ },
+
+ {
.name = "bxt_wcove_gpio",
.num_resources = ARRAY_SIZE(gpio_resources),
.resources = gpio_resources,
@@ -402,6 +428,15 @@ static int bxtwc_probe(struct platform_device *pdev)
goto err_irq_chip_level2;
}
+ ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ 0, &bxtwc_regmap_irq_chip_tmu,
+ &pmic->irq_chip_data_tmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
+ goto err_irq_chip_tmu;
+ }
+
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
ARRAY_SIZE(bxt_wc_dev), NULL, 0,
NULL);
@@ -431,6 +466,8 @@ static int bxtwc_probe(struct platform_device *pdev)
err_sysfs:
mfd_remove_devices(&pdev->dev);
err_mfd:
+ regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_tmu);
+err_irq_chip_tmu:
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2);
err_irq_chip_level2:
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
@@ -446,6 +483,7 @@ static int bxtwc_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2);
+ regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_tmu);
return 0;
}
@@ -481,7 +519,7 @@ static const struct acpi_device_id bxtwc_acpi_ids[] = {
{ "INT34D3", },
{ }
};
-MODULE_DEVICE_TABLE(acpi, pmic_acpi_ids);
+MODULE_DEVICE_TABLE(acpi, bxtwc_acpi_ids);
static struct platform_driver bxtwc_driver = {
.probe = bxtwc_probe,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index c8dee47b45d9..1ef7575547e6 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -493,6 +493,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_LPT] = {
.name = "Lynx Point",
.iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
},
[LPC_LPT_LP] = {
.name = "Lynx Point_LP",
@@ -530,6 +531,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_9S] = {
.name = "9 Series",
.iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
},
};
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 258757e216c4..b1700b5fa640 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -461,7 +461,7 @@ static int max77620_probe(struct i2c_client *client,
chip->rmap = devm_regmap_init_i2c(client, rmap_config);
if (IS_ERR(chip->rmap)) {
ret = PTR_ERR(chip->rmap);
- dev_err(chip->dev, "Failed to intialise regmap: %d\n", ret);
+ dev_err(chip->dev, "Failed to initialise regmap: %d\n", ret);
return ret;
}
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 8f8bacb67a15..ee9e9ea10444 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -431,9 +431,6 @@ static void palmas_power_off(void)
unsigned int addr;
int ret, slave;
- if (!palmas_dev)
- return;
-
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL);
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/qcom-pm8xxx.c
index 0e3a2ea25942..f08758f6b418 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -39,6 +39,20 @@
#define SSBI_REG_ADDR_IRQ_CONFIG (SSBI_REG_ADDR_IRQ_BASE + 7)
#define SSBI_REG_ADDR_IRQ_RT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 8)
+#define PM8821_SSBI_REG_ADDR_IRQ_BASE 0x100
+#define PM8821_SSBI_REG_ADDR_IRQ_MASTER0 (PM8821_SSBI_REG_ADDR_IRQ_BASE + 0x30)
+#define PM8821_SSBI_REG_ADDR_IRQ_MASTER1 (PM8821_SSBI_REG_ADDR_IRQ_BASE + 0xb0)
+#define PM8821_SSBI_REG(m, b, offset) \
+ ((m == 0) ? \
+ (PM8821_SSBI_REG_ADDR_IRQ_MASTER0 + b + offset) : \
+ (PM8821_SSBI_REG_ADDR_IRQ_MASTER1 + b + offset))
+#define PM8821_SSBI_ADDR_IRQ_ROOT(m, b) PM8821_SSBI_REG(m, b, 0x0)
+#define PM8821_SSBI_ADDR_IRQ_CLEAR(m, b) PM8821_SSBI_REG(m, b, 0x01)
+#define PM8821_SSBI_ADDR_IRQ_MASK(m, b) PM8821_SSBI_REG(m, b, 0x08)
+#define PM8821_SSBI_ADDR_IRQ_RT_STATUS(m, b) PM8821_SSBI_REG(m, b, 0x0f)
+
+#define PM8821_BLOCKS_PER_MASTER 7
+
#define PM_IRQF_LVL_SEL 0x01 /* level select */
#define PM_IRQF_MASK_FE 0x02 /* mask falling edge */
#define PM_IRQF_MASK_RE 0x04 /* mask rising edge */
@@ -53,7 +67,8 @@
#define REG_HWREV 0x002 /* PMIC4 revision */
#define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */
-#define PM8921_NR_IRQS 256
+#define PM8XXX_NR_IRQS 256
+#define PM8821_NR_IRQS 112
struct pm_irq_chip {
struct regmap *regmap;
@@ -65,6 +80,12 @@ struct pm_irq_chip {
u8 config[0];
};
+struct pm_irq_data {
+ int num_irqs;
+ const struct irq_domain_ops *irq_domain_ops;
+ void (*irq_handler)(struct irq_desc *desc);
+};
+
static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp,
unsigned int *ip)
{
@@ -182,6 +203,78 @@ static void pm8xxx_irq_handler(struct irq_desc *desc)
chained_irq_exit(irq_chip, desc);
}
+static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
+ int master, int block)
+{
+ int pmirq, irq, i, ret;
+ unsigned int bits;
+
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_ROOT(master, block), &bits);
+ if (ret) {
+ pr_err("Reading block %d failed ret=%d", block, ret);
+ return;
+ }
+
+ /* Convert block offset to global block number */
+ block += (master * PM8821_BLOCKS_PER_MASTER) - 1;
+
+ /* Check IRQ bits */
+ for (i = 0; i < 8; i++) {
+ if (bits & BIT(i)) {
+ pmirq = block * 8 + i;
+ irq = irq_find_mapping(chip->irqdomain, pmirq);
+ generic_handle_irq(irq);
+ }
+ }
+}
+
+static inline void pm8821_irq_master_handler(struct pm_irq_chip *chip,
+ int master, u8 master_val)
+{
+ int block;
+
+ for (block = 1; block < 8; block++)
+ if (master_val & BIT(block))
+ pm8821_irq_block_handler(chip, master, block);
+}
+
+static void pm8821_irq_handler(struct irq_desc *desc)
+{
+ struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+ unsigned int master;
+ int ret;
+
+ chained_irq_enter(irq_chip, desc);
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER0, &master);
+ if (ret) {
+ pr_err("Failed to read master 0 ret=%d\n", ret);
+ goto done;
+ }
+
+ /* bits 1 through 7 marks the first 7 blocks in master 0 */
+ if (master & GENMASK(7, 1))
+ pm8821_irq_master_handler(chip, 0, master);
+
+ /* bit 0 marks if master 1 contains any bits */
+ if (!(master & BIT(0)))
+ goto done;
+
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER1, &master);
+ if (ret) {
+ pr_err("Failed to read master 1 ret=%d\n", ret);
+ goto done;
+ }
+
+ pm8821_irq_master_handler(chip, 1, master);
+
+done:
+ chained_irq_exit(irq_chip, desc);
+}
+
static void pm8xxx_irq_mask_ack(struct irq_data *d)
{
struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
@@ -299,6 +392,104 @@ static const struct irq_domain_ops pm8xxx_irq_domain_ops = {
.map = pm8xxx_irq_domain_map,
};
+static void pm8821_irq_mask_ack(struct irq_data *d)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ u8 block, master;
+ int irq_bit, rc;
+
+ block = pmirq / 8;
+ master = block / PM8821_BLOCKS_PER_MASTER;
+ irq_bit = pmirq % 8;
+ block %= PM8821_BLOCKS_PER_MASTER;
+
+ rc = regmap_update_bits(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_MASK(master, block),
+ BIT(irq_bit), BIT(irq_bit));
+ if (rc) {
+ pr_err("Failed to mask IRQ:%d rc=%d\n", pmirq, rc);
+ return;
+ }
+
+ rc = regmap_update_bits(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_CLEAR(master, block),
+ BIT(irq_bit), BIT(irq_bit));
+ if (rc)
+ pr_err("Failed to CLEAR IRQ:%d rc=%d\n", pmirq, rc);
+}
+
+static void pm8821_irq_unmask(struct irq_data *d)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ int irq_bit, rc;
+ u8 block, master;
+
+ block = pmirq / 8;
+ master = block / PM8821_BLOCKS_PER_MASTER;
+ irq_bit = pmirq % 8;
+ block %= PM8821_BLOCKS_PER_MASTER;
+
+ rc = regmap_update_bits(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_MASK(master, block),
+ BIT(irq_bit), ~BIT(irq_bit));
+ if (rc)
+ pr_err("Failed to read/write unmask IRQ:%d rc=%d\n", pmirq, rc);
+
+}
+
+static int pm8821_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ int rc, pmirq = irqd_to_hwirq(d);
+ u8 block, irq_bit, master;
+ unsigned int bits;
+
+ block = pmirq / 8;
+ master = block / PM8821_BLOCKS_PER_MASTER;
+ irq_bit = pmirq % 8;
+ block %= PM8821_BLOCKS_PER_MASTER;
+
+ rc = regmap_read(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_RT_STATUS(master, block), &bits);
+ if (rc) {
+ pr_err("Reading Status of IRQ %d failed rc=%d\n", pmirq, rc);
+ return rc;
+ }
+
+ *state = !!(bits & BIT(irq_bit));
+
+ return rc;
+}
+
+static struct irq_chip pm8821_irq_chip = {
+ .name = "pm8821",
+ .irq_mask_ack = pm8821_irq_mask_ack,
+ .irq_unmask = pm8821_irq_unmask,
+ .irq_get_irqchip_state = pm8821_irq_get_irqchip_state,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int pm8821_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct pm_irq_chip *chip = d->host_data;
+
+ irq_set_chip_and_handler(irq, &pm8821_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, chip);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops pm8821_irq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = pm8821_irq_domain_map,
+};
+
static const struct regmap_config ssbi_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -308,22 +499,41 @@ static const struct regmap_config ssbi_regmap_config = {
.reg_write = ssbi_reg_write
};
-static const struct of_device_id pm8921_id_table[] = {
- { .compatible = "qcom,pm8018", },
- { .compatible = "qcom,pm8058", },
- { .compatible = "qcom,pm8921", },
+static const struct pm_irq_data pm8xxx_data = {
+ .num_irqs = PM8XXX_NR_IRQS,
+ .irq_domain_ops = &pm8xxx_irq_domain_ops,
+ .irq_handler = pm8xxx_irq_handler,
+};
+
+static const struct pm_irq_data pm8821_data = {
+ .num_irqs = PM8821_NR_IRQS,
+ .irq_domain_ops = &pm8821_irq_domain_ops,
+ .irq_handler = pm8821_irq_handler,
+};
+
+static const struct of_device_id pm8xxx_id_table[] = {
+ { .compatible = "qcom,pm8018", .data = &pm8xxx_data},
+ { .compatible = "qcom,pm8058", .data = &pm8xxx_data},
+ { .compatible = "qcom,pm8821", .data = &pm8821_data},
+ { .compatible = "qcom,pm8921", .data = &pm8xxx_data},
{ }
};
-MODULE_DEVICE_TABLE(of, pm8921_id_table);
+MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
-static int pm8921_probe(struct platform_device *pdev)
+static int pm8xxx_probe(struct platform_device *pdev)
{
+ const struct pm_irq_data *data;
struct regmap *regmap;
int irq, rc;
unsigned int val;
u32 rev;
struct pm_irq_chip *chip;
- unsigned int nirqs = PM8921_NR_IRQS;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -354,25 +564,26 @@ static int pm8921_probe(struct platform_device *pdev)
rev |= val << BITS_PER_BYTE;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip) +
- sizeof(chip->config[0]) * nirqs,
- GFP_KERNEL);
+ sizeof(chip->config[0]) * data->num_irqs,
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(pdev, chip);
chip->regmap = regmap;
- chip->num_irqs = nirqs;
+ chip->num_irqs = data->num_irqs;
chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
spin_lock_init(&chip->pm_irq_lock);
- chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node, nirqs,
- &pm8xxx_irq_domain_ops,
+ chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+ data->num_irqs,
+ data->irq_domain_ops,
chip);
if (!chip->irqdomain)
return -ENODEV;
- irq_set_chained_handler_and_data(irq, pm8xxx_irq_handler, chip);
+ irq_set_chained_handler_and_data(irq, data->irq_handler, chip);
irq_set_irq_wake(irq, 1);
rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
@@ -384,46 +595,46 @@ static int pm8921_probe(struct platform_device *pdev)
return rc;
}
-static int pm8921_remove_child(struct device *dev, void *unused)
+static int pm8xxx_remove_child(struct device *dev, void *unused)
{
platform_device_unregister(to_platform_device(dev));
return 0;
}
-static int pm8921_remove(struct platform_device *pdev)
+static int pm8xxx_remove(struct platform_device *pdev)
{
int irq = platform_get_irq(pdev, 0);
struct pm_irq_chip *chip = platform_get_drvdata(pdev);
- device_for_each_child(&pdev->dev, NULL, pm8921_remove_child);
+ device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
irq_set_chained_handler_and_data(irq, NULL, NULL);
irq_domain_remove(chip->irqdomain);
return 0;
}
-static struct platform_driver pm8921_driver = {
- .probe = pm8921_probe,
- .remove = pm8921_remove,
+static struct platform_driver pm8xxx_driver = {
+ .probe = pm8xxx_probe,
+ .remove = pm8xxx_remove,
.driver = {
- .name = "pm8921-core",
- .of_match_table = pm8921_id_table,
+ .name = "pm8xxx-core",
+ .of_match_table = pm8xxx_id_table,
},
};
-static int __init pm8921_init(void)
+static int __init pm8xxx_init(void)
{
- return platform_driver_register(&pm8921_driver);
+ return platform_driver_register(&pm8xxx_driver);
}
-subsys_initcall(pm8921_init);
+subsys_initcall(pm8xxx_init);
-static void __exit pm8921_exit(void)
+static void __exit pm8xxx_exit(void)
{
- platform_driver_unregister(&pm8921_driver);
+ platform_driver_unregister(&pm8xxx_driver);
}
-module_exit(pm8921_exit);
+module_exit(pm8xxx_exit);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("PMIC 8921 core driver");
+MODULE_DESCRIPTION("PMIC 8xxx core driver");
MODULE_VERSION("1.0");
-MODULE_ALIAS("platform:pm8921-core");
+MODULE_ALIAS("platform:pm8xxx-core");
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 0f8acc5882a4..2c9acdba7c2d 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -290,6 +290,24 @@ static void rk808_device_shutdown(void)
dev_err(&rk808_i2c_client->dev, "power off error!\n");
}
+static void rk818_device_shutdown(void)
+{
+ int ret;
+ struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+
+ if (!rk808) {
+ dev_warn(&rk808_i2c_client->dev,
+ "have no rk818, so do nothing here\n");
+ return;
+ }
+
+ ret = regmap_update_bits(rk808->regmap,
+ RK818_DEVCTRL_REG,
+ DEV_OFF, DEV_OFF);
+ if (ret)
+ dev_err(&rk808_i2c_client->dev, "power off error!\n");
+}
+
static const struct of_device_id rk808_of_match[] = {
{ .compatible = "rockchip,rk808" },
{ .compatible = "rockchip,rk818" },
@@ -304,6 +322,7 @@ static int rk808_probe(struct i2c_client *client,
struct rk808 *rk808;
const struct rk808_reg_data *pre_init_reg;
const struct mfd_cell *cells;
+ void (*pm_pwroff_fn)(void);
int nr_pre_init_regs;
int nr_cells;
int pm_off = 0;
@@ -331,6 +350,7 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg);
cells = rk808s;
nr_cells = ARRAY_SIZE(rk808s);
+ pm_pwroff_fn = rk808_device_shutdown;
break;
case RK818_ID:
rk808->regmap_cfg = &rk818_regmap_config;
@@ -339,6 +359,7 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg);
cells = rk818s;
nr_cells = ARRAY_SIZE(rk818s);
+ pm_pwroff_fn = rk818_device_shutdown;
break;
default:
dev_err(&client->dev, "Unsupported RK8XX ID %lu\n",
@@ -393,7 +414,7 @@ static int rk808_probe(struct i2c_client *client,
"rockchip,system-power-controller");
if (pm_off && !pm_power_off) {
rk808_i2c_client = client;
- pm_power_off = rk808_device_shutdown;
+ pm_power_off = pm_pwroff_fn;
}
return 0;
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index ee94080e1cbb..8131d1975745 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -87,6 +87,7 @@ static int rn5t618_restart(struct notifier_block *this,
static const struct of_device_id rn5t618_of_match[] = {
{ .compatible = "ricoh,rn5t567", .data = (void *)RN5T567 },
{ .compatible = "ricoh,rn5t618", .data = (void *)RN5T618 },
+ { .compatible = "ricoh,rc5t619", .data = (void *)RC5T619 },
{ }
};
MODULE_DEVICE_TABLE(of, rn5t618_of_match);
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index c180b7533bba..e6a3d999a376 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -753,7 +753,7 @@ static int si476x_core_probe(struct i2c_client *client,
ARRAY_SIZE(core->supplies),
core->supplies);
if (rval) {
- dev_err(&client->dev, "Failet to gett all of the regulators\n");
+ dev_err(&client->dev, "Failed to get all of the regulators\n");
goto free_gpio;
}
diff --git a/drivers/mfd/sun4i-gpadc.c b/drivers/mfd/sun4i-gpadc.c
new file mode 100644
index 000000000000..9cfc88134d03
--- /dev/null
+++ b/drivers/mfd/sun4i-gpadc.c
@@ -0,0 +1,181 @@
+/* ADC MFD core driver for sunxi platforms
+ *
+ * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/sun4i-gpadc.h>
+
+#define ARCH_SUN4I_A10 0
+#define ARCH_SUN5I_A13 1
+#define ARCH_SUN6I_A31 2
+
+static struct resource adc_resources[] = {
+ DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_FIFO_DATA, "FIFO_DATA_PENDING"),
+ DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_TEMP_DATA, "TEMP_DATA_PENDING"),
+};
+
+static const struct regmap_irq sun4i_gpadc_regmap_irq[] = {
+ REGMAP_IRQ_REG(SUN4I_GPADC_IRQ_FIFO_DATA, 0,
+ SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN),
+ REGMAP_IRQ_REG(SUN4I_GPADC_IRQ_TEMP_DATA, 0,
+ SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN),
+};
+
+static const struct regmap_irq_chip sun4i_gpadc_regmap_irq_chip = {
+ .name = "sun4i_gpadc_irq_chip",
+ .status_base = SUN4I_GPADC_INT_FIFOS,
+ .ack_base = SUN4I_GPADC_INT_FIFOS,
+ .mask_base = SUN4I_GPADC_INT_FIFOC,
+ .init_ack_masked = true,
+ .mask_invert = true,
+ .irqs = sun4i_gpadc_regmap_irq,
+ .num_irqs = ARRAY_SIZE(sun4i_gpadc_regmap_irq),
+ .num_regs = 1,
+};
+
+static struct mfd_cell sun4i_gpadc_cells[] = {
+ {
+ .name = "sun4i-a10-gpadc-iio",
+ .resources = adc_resources,
+ .num_resources = ARRAY_SIZE(adc_resources),
+ },
+ { .name = "iio_hwmon" }
+};
+
+static struct mfd_cell sun5i_gpadc_cells[] = {
+ {
+ .name = "sun5i-a13-gpadc-iio",
+ .resources = adc_resources,
+ .num_resources = ARRAY_SIZE(adc_resources),
+ },
+ { .name = "iio_hwmon" },
+};
+
+static struct mfd_cell sun6i_gpadc_cells[] = {
+ {
+ .name = "sun6i-a31-gpadc-iio",
+ .resources = adc_resources,
+ .num_resources = ARRAY_SIZE(adc_resources),
+ },
+ { .name = "iio_hwmon" },
+};
+
+static const struct regmap_config sun4i_gpadc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static const struct of_device_id sun4i_gpadc_of_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-ts",
+ .data = (void *)ARCH_SUN4I_A10,
+ }, {
+ .compatible = "allwinner,sun5i-a13-ts",
+ .data = (void *)ARCH_SUN5I_A13,
+ }, {
+ .compatible = "allwinner,sun6i-a31-ts",
+ .data = (void *)ARCH_SUN6I_A31,
+ }, { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_match);
+
+static int sun4i_gpadc_probe(struct platform_device *pdev)
+{
+ struct sun4i_gpadc_dev *dev;
+ struct resource *mem;
+ const struct of_device_id *of_id;
+ const struct mfd_cell *cells;
+ unsigned int irq, size;
+ int ret;
+
+ of_id = of_match_node(sun4i_gpadc_of_match, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ switch ((long)of_id->data) {
+ case ARCH_SUN4I_A10:
+ cells = sun4i_gpadc_cells;
+ size = ARRAY_SIZE(sun4i_gpadc_cells);
+ break;
+ case ARCH_SUN5I_A13:
+ cells = sun5i_gpadc_cells;
+ size = ARRAY_SIZE(sun5i_gpadc_cells);
+ break;
+ case ARCH_SUN6I_A31:
+ cells = sun6i_gpadc_cells;
+ size = ARRAY_SIZE(sun6i_gpadc_cells);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
+
+ dev->dev = &pdev->dev;
+ dev_set_drvdata(dev->dev, dev);
+
+ dev->regmap = devm_regmap_init_mmio(dev->dev, dev->base,
+ &sun4i_gpadc_regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ dev_err(&pdev->dev, "failed to init regmap: %d\n", ret);
+ return ret;
+ }
+
+ /* Disable all interrupts */
+ regmap_write(dev->regmap, SUN4I_GPADC_INT_FIFOC, 0);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_regmap_add_irq_chip(&pdev->dev, dev->regmap, irq,
+ IRQF_ONESHOT, 0,
+ &sun4i_gpadc_regmap_irq_chip,
+ &dev->regmap_irqc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add irq chip: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(dev->dev, 0, cells, size, NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver sun4i_gpadc_driver = {
+ .driver = {
+ .name = "sun4i-gpadc",
+ .of_match_table = of_match_ptr(sun4i_gpadc_of_match),
+ },
+ .probe = sun4i_gpadc_probe,
+};
+
+module_platform_driver(sun4i_gpadc_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi platforms' GPADC MFD core driver");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 274bf39968aa..cc9e563f23aa 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -53,7 +53,7 @@ int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg)
EXPORT_SYMBOL_GPL(tc3589x_reg_read);
/**
- * tc3589x_reg_read() - write a single TC3589x register
+ * tc3589x_reg_write() - write a single TC3589x register
* @tc3589x: Device to write to
* @reg: Register to read
* @data: Value to write
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(tc3589x_block_write);
* @tc3589x: Device to write to
* @reg: Register to write
* @mask: Mask of bits to set
- * @values: Value to set
+ * @val: Value to set
*/
int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val)
{
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c8f027b4ea4c..0f3fab47fe48 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -183,6 +183,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
tscadc->irq = err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tscadc->tscadc_phys_base = res->start;
tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 9a4d8684dd32..f769c7d4e335 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -42,26 +42,6 @@ static struct resource pb_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_PB, "PB"),
};
-struct tps65217_irq {
- int mask;
- int interrupt;
-};
-
-static const struct tps65217_irq tps65217_irqs[] = {
- [TPS65217_IRQ_PB] = {
- .mask = TPS65217_INT_PBM,
- .interrupt = TPS65217_INT_PBI,
- },
- [TPS65217_IRQ_AC] = {
- .mask = TPS65217_INT_ACM,
- .interrupt = TPS65217_INT_ACI,
- },
- [TPS65217_IRQ_USB] = {
- .mask = TPS65217_INT_USBM,
- .interrupt = TPS65217_INT_USBI,
- },
-};
-
static void tps65217_irq_lock(struct irq_data *data)
{
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
@@ -74,37 +54,32 @@ static void tps65217_irq_sync_unlock(struct irq_data *data)
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
int ret;
- ret = tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
- TPS65217_PROTECT_NONE);
+ ret = tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
+ tps->irq_mask, TPS65217_PROTECT_NONE);
if (ret != 0)
dev_err(tps->dev, "Failed to sync IRQ masks\n");
mutex_unlock(&tps->irq_lock);
}
-static inline const struct tps65217_irq *
-irq_to_tps65217_irq(struct tps65217 *tps, struct irq_data *data)
-{
- return &tps65217_irqs[data->hwirq];
-}
-
static void tps65217_irq_enable(struct irq_data *data)
{
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
- const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+ u8 mask = BIT(data->hwirq) << TPS65217_INT_SHIFT;
- tps->irq_mask &= ~irq_data->mask;
+ tps->irq_mask &= ~mask;
}
static void tps65217_irq_disable(struct irq_data *data)
{
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
- const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+ u8 mask = BIT(data->hwirq) << TPS65217_INT_SHIFT;
- tps->irq_mask |= irq_data->mask;
+ tps->irq_mask |= mask;
}
static struct irq_chip tps65217_irq_chip = {
+ .name = "tps65217",
.irq_bus_lock = tps65217_irq_lock,
.irq_bus_sync_unlock = tps65217_irq_sync_unlock,
.irq_enable = tps65217_irq_enable,
@@ -149,8 +124,8 @@ static irqreturn_t tps65217_irq_thread(int irq, void *data)
return IRQ_NONE;
}
- for (i = 0; i < ARRAY_SIZE(tps65217_irqs); i++) {
- if (status & tps65217_irqs[i].interrupt) {
+ for (i = 0; i < TPS65217_NUM_IRQ; i++) {
+ if (status & BIT(i)) {
handle_nested_irq(irq_find_mapping(tps->irq_domain, i));
handled = true;
}
@@ -188,10 +163,9 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
tps->irq = irq;
/* Mask all interrupt sources */
- tps->irq_mask = (TPS65217_INT_RESERVEDM | TPS65217_INT_PBM
- | TPS65217_INT_ACM | TPS65217_INT_USBM);
- tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
- TPS65217_PROTECT_NONE);
+ tps->irq_mask = TPS65217_INT_MASK;
+ tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
+ TPS65217_INT_MASK, TPS65217_PROTECT_NONE);
tps->irq_domain = irq_domain_add_linear(tps->dev->of_node,
TPS65217_NUM_IRQ, &tps65217_irq_domain_ops, tps);
@@ -209,6 +183,8 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
return ret;
}
+ enable_irq_wake(irq);
+
return 0;
}
@@ -424,6 +400,24 @@ static int tps65217_probe(struct i2c_client *client,
return 0;
}
+static int tps65217_remove(struct i2c_client *client)
+{
+ struct tps65217 *tps = i2c_get_clientdata(client);
+ unsigned int virq;
+ int i;
+
+ for (i = 0; i < TPS65217_NUM_IRQ; i++) {
+ virq = irq_find_mapping(tps->irq_domain, i);
+ if (virq)
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(tps->irq_domain);
+ tps->irq_domain = NULL;
+
+ return 0;
+}
+
static const struct i2c_device_id tps65217_id_table[] = {
{"tps65217", TPS65217},
{ /* sentinel */ }
@@ -437,6 +431,7 @@ static struct i2c_driver tps65217_driver = {
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
+ .remove = tps65217_remove,
};
static int __init tps65217_init(void)
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index ba610adbdbff..13834a0d2817 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -33,19 +33,17 @@
#define TPS65218_PASSWORD_REGS_UNLOCK 0x7D
-/**
- * tps65218_reg_read: Read a single tps65218 register.
- *
- * @tps: Device to read from.
- * @reg: Register to read.
- * @val: Contians the value
- */
-int tps65218_reg_read(struct tps65218 *tps, unsigned int reg,
- unsigned int *val)
-{
- return regmap_read(tps->regmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(tps65218_reg_read);
+static const struct mfd_cell tps65218_cells[] = {
+ {
+ .name = "tps65218-pwrbutton",
+ .of_compatible = "ti,tps65218-pwrbutton",
+ },
+ {
+ .name = "tps65218-gpio",
+ .of_compatible = "ti,tps65218-gpio",
+ },
+ { .name = "tps65218-regulator", },
+};
/**
* tps65218_reg_write: Write a single tps65218 register.
@@ -93,7 +91,7 @@ static int tps65218_update_bits(struct tps65218 *tps, unsigned int reg,
int ret;
unsigned int data;
- ret = tps65218_reg_read(tps, reg, &data);
+ ret = regmap_read(tps->regmap, reg, &data);
if (ret) {
dev_err(tps->dev, "Read from reg 0x%x failed\n", reg);
return ret;
@@ -251,7 +249,7 @@ static int tps65218_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = tps65218_reg_read(tps, TPS65218_REG_CHIPID, &chipid);
+ ret = regmap_read(tps->regmap, TPS65218_REG_CHIPID, &chipid);
if (ret) {
dev_err(tps->dev, "Failed to read chipid: %d\n", ret);
return ret;
@@ -259,8 +257,10 @@ static int tps65218_probe(struct i2c_client *client,
tps->rev = chipid & TPS65218_CHIPID_REV_MASK;
- ret = of_platform_populate(client->dev.of_node, NULL, NULL,
- &client->dev);
+ ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65218_cells,
+ ARRAY_SIZE(tps65218_cells), NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+
if (ret < 0)
goto err_irq;
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index a88cfa80dbc4..f33567bc428d 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -77,6 +77,23 @@ static struct regmap_irq_chip tps65912_irq_chip = {
.init_ack_masked = true,
};
+static const struct regmap_range tps65912_yes_ranges[] = {
+ regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5),
+};
+
+static const struct regmap_access_table tps65912_volatile_table = {
+ .yes_ranges = tps65912_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges),
+};
+
+const struct regmap_config tps65912_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65912_volatile_table,
+};
+EXPORT_SYMBOL_GPL(tps65912_regmap_config);
+
int tps65912_device_init(struct tps65912 *tps)
{
int ret;
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index ab8b23b5bd22..853113d97c1e 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -244,752 +244,752 @@ const struct regmap_irq_chip wm5102_irq = {
};
static const struct reg_default wm5102_reg_default[] = {
- { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
- { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
- { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
- { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
- { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
- { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
- { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
- { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
- { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
- { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
- { 0x00000040, 0x0000 }, /* R64 - Wake control */
- { 0x00000041, 0x0000 }, /* R65 - Sequence control */
- { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
- { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
- { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
- { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
+ { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
+ { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
+ { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
+ { 0x00000040, 0x0000 }, /* R64 - Wake control */
+ { 0x00000041, 0x0000 }, /* R65 - Sequence control */
+ { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
{ 0x00000066, 0x01FF }, /* R102 - Always On Triggers Sequence Select 1 */
{ 0x00000067, 0x01FF }, /* R103 - Always On Triggers Sequence Select 2 */
{ 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */
{ 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
{ 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
{ 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
- { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
- { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
- { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
- { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
- { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
- { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
- { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
- { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
- { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
+ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
+ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
+ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
{ 0x00000100, 0x0002 }, /* R256 - Clock 32k 1 */
- { 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
- { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
- { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
- { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
- { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
- { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
+ { 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
+ { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
+ { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
{ 0x00000114, 0x0011 }, /* R276 - Async sample rate 2 */
- { 0x00000149, 0x0000 }, /* R329 - Output system clock */
- { 0x0000014A, 0x0000 }, /* R330 - Output async clock */
- { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
- { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
- { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
- { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
- { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
- { 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
+ { 0x00000149, 0x0000 }, /* R329 - Output system clock */
+ { 0x0000014A, 0x0000 }, /* R330 - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
+ { 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
{ 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
- { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
- { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
- { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
- { 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
- { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
+ { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
+ { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
+ { 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
- { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
- { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
- { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
- { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
- { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
- { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
+ { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
+ { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
+ { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
+ { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
+ { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
+ { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
{ 0x00000187, 0x0001 }, /* R391 - FLL1 Synchroniser 7 */
- { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
- { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
- { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
- { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
- { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
- { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
- { 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
- { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
+ { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
+ { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
+ { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
+ { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
+ { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
+ { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
+ { 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
+ { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
- { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
- { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
- { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
- { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
- { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
- { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
+ { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
+ { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
+ { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
+ { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
+ { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
+ { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
{ 0x000001A7, 0x0001 }, /* R423 - FLL2 Synchroniser 7 */
- { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
- { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
- { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
- { 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
+ { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
+ { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
+ { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
+ { 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
{ 0x00000212, 0x0000 }, /* R530 - LDO1 Control 2 */
- { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
- { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
- { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
- { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
- { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
- { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
+ { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
+ { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
+ { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
+ { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
- { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
- { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
+ { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
+ { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
{ 0x000002A7, 0x2C37 }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
{ 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
- { 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
- { 0x000002CB, 0x0000 }, /* R715 - Isolation control */
- { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
- { 0x00000300, 0x0000 }, /* R768 - Input Enables */
- { 0x00000308, 0x0000 }, /* R776 - Input Rate */
- { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
- { 0x00000310, 0x2080 }, /* R784 - IN1L Control */
- { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
- { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
- { 0x00000314, 0x0080 }, /* R788 - IN1R Control */
- { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
- { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
- { 0x00000318, 0x2080 }, /* R792 - IN2L Control */
- { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
- { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
- { 0x0000031C, 0x0080 }, /* R796 - IN2R Control */
- { 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */
- { 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */
- { 0x00000320, 0x2080 }, /* R800 - IN3L Control */
- { 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */
- { 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */
- { 0x00000324, 0x0080 }, /* R804 - IN3R Control */
- { 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
- { 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */
- { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
- { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
- { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
+ { 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
+ { 0x000002CB, 0x0000 }, /* R715 - Isolation control */
+ { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 - Input Enables */
+ { 0x00000308, 0x0000 }, /* R776 - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
+ { 0x00000310, 0x2080 }, /* R784 - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
+ { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
+ { 0x00000314, 0x0080 }, /* R788 - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
+ { 0x00000318, 0x2080 }, /* R792 - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
+ { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
+ { 0x0000031C, 0x0080 }, /* R796 - IN2R Control */
+ { 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */
+ { 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */
+ { 0x00000320, 0x2080 }, /* R800 - IN3L Control */
+ { 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */
+ { 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */
+ { 0x00000324, 0x0080 }, /* R804 - IN3R Control */
+ { 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
+ { 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */
+ { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
{ 0x00000410, 0x6080 }, /* R1040 - Output Path Config 1L */
- { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
+ { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
{ 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
- { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
- { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
- { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
+ { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
{ 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
- { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
+ { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
{ 0x00000418, 0xA080 }, /* R1048 - Output Path Config 2L */
- { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
+ { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
{ 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
- { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
- { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
- { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
+ { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
+ { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
+ { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
{ 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
- { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
+ { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
{ 0x00000420, 0xA080 }, /* R1056 - Output Path Config 3L */
- { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
+ { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
{ 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
- { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
+ { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
{ 0x00000428, 0xE000 }, /* R1064 - Output Path Config 4L */
- { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
+ { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
{ 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
- { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
- { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
+ { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
+ { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
{ 0x0000042E, 0x0081 }, /* R1070 - Out Volume 4R */
- { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
- { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
- { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
+ { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
+ { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
{ 0x00000432, 0x0081 }, /* R1074 - DAC Volume Limit 5L */
- { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
- { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
+ { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
+ { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
{ 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000440, 0x0FFF }, /* R1088 - DRE Enable */
{ 0x00000442, 0x3F0A }, /* R1090 - DRE Control 2 */
{ 0x00000443, 0xDC1F }, /* R1090 - DRE Control 3 */
- { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
+ { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x000B }, /* R1112 - Noise Gate Control */
- { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
- { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
- { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
- { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
- { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
- { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
- { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
- { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
- { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
- { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
- { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
- { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
- { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
- { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
- { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
- { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
- { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
- { 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */
- { 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */
- { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
- { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
- { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
- { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
- { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
- { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
- { 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */
- { 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
- { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
- { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
- { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
- { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
- { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
- { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
- { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
- { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
- { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
- { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
- { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
- { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
- { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
- { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
- { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
- { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
- { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
- { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
- { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
- { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
- { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
- { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
- { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
- { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
- { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
- { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
- { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
- { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
- { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
- { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
- { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
- { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
- { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
- { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
- { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
- { 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */
- { 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */
- { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
- { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
- { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
- { 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */
- { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
- { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
- { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
- { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
- { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
- { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
- { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
- { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
- { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
- { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
- { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
- { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
- { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
- { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
- { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
- { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
- { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
- { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
- { 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */
- { 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */
- { 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */
- { 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */
- { 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */
- { 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */
- { 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */
- { 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */
- { 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */
- { 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */
- { 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */
- { 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */
- { 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */
- { 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */
- { 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */
- { 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */
- { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
- { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
- { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
- { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
- { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
- { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
- { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
- { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
- { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
- { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
- { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
- { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
- { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
- { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
- { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
- { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
- { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
- { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
- { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
- { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
- { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
- { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
- { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
- { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
- { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
- { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
- { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
- { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
- { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
- { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
- { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
- { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
- { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
- { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
- { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
- { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
- { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
- { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
- { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
- { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
- { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
- { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
- { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
- { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
- { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
- { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
- { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
- { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
- { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
- { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
- { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
- { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
- { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
- { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
- { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
- { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
- { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
- { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
- { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
- { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
- { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
- { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
- { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
- { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
- { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
- { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
- { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
- { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
- { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
- { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
- { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
- { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
- { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
- { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
- { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
- { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
- { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
- { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
- { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
- { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
- { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
- { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
- { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
- { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
- { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
- { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
- { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
- { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
- { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
- { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
- { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
- { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
- { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
- { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
- { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
- { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
- { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
- { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
- { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
- { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
- { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
- { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
- { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
- { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
- { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
- { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
- { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
- { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
- { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
- { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
- { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
- { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
- { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
- { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
- { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
- { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
- { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
- { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
- { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
- { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
- { 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
- { 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
- { 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
- { 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
- { 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
- { 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
- { 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
- { 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
- { 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
- { 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
- { 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
- { 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
- { 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
- { 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
- { 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
- { 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
- { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
- { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
- { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
- { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
- { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
- { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
- { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
- { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
- { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
- { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
- { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
- { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
- { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
- { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
- { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
- { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
- { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
- { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
- { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
- { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
- { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
- { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
- { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
- { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
- { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
- { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
- { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
- { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
- { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
- { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
- { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
- { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
- { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
- { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
- { 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */
- { 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */
- { 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */
- { 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */
- { 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */
- { 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */
- { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
- { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
- { 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */
- { 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */
- { 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */
- { 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */
- { 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */
- { 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */
- { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
- { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
- { 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */
- { 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */
- { 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */
- { 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */
- { 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */
- { 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */
- { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
- { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
- { 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */
- { 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */
- { 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */
- { 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */
- { 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */
- { 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */
- { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
- { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
- { 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */
- { 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */
- { 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */
- { 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */
- { 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */
- { 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */
- { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
- { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
- { 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */
- { 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */
- { 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */
- { 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */
- { 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */
- { 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */
- { 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */
- { 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */
- { 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */
- { 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */
- { 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */
- { 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */
- { 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */
- { 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */
- { 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */
- { 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */
- { 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */
- { 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */
- { 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */
- { 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */
- { 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */
- { 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */
- { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
- { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
- { 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
- { 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
- { 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
- { 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
- { 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
- { 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
- { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
- { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
- { 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
- { 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
- { 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
- { 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
- { 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
- { 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
- { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
- { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
- { 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
- { 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
- { 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
- { 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
- { 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
- { 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
- { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
- { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
- { 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
- { 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
- { 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
- { 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
- { 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
- { 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
- { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
- { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
- { 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
- { 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
- { 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
- { 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
- { 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
- { 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
- { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
- { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
- { 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
- { 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
- { 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
- { 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
- { 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
- { 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
- { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
- { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
- { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
- { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
- { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
- { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
- { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
- { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
- { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
- { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
- { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
- { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
- { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
- { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
- { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
- { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
- { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
- { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
- { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
- { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
- { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
- { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
- { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
- { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
- { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
- { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
- { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
- { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
- { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
- { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
- { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
- { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
- { 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
- { 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
- { 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
- { 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
- { 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
- { 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
- { 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
- { 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
- { 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
- { 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
- { 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
- { 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
- { 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
- { 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
- { 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
- { 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
- { 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
- { 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
- { 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
- { 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
- { 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
- { 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
- { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
- { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
- { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
- { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
- { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
- { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
- { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
- { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
- { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
- { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
- { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
- { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
- { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
- { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
- { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
- { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
- { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
- { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
- { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
- { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
+ { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
+ { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
+ { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
+ { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
+ { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
+ { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
+ { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
+ { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
+ { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
+ { 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */
+ { 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */
+ { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
+ { 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */
+ { 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
+ { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
+ { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
+ { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
+ { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
+ { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
+ { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
+ { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
+ { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
+ { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
+ { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
+ { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
+ { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
+ { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
+ { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
+ { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
+ { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
+ { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
+ { 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */
+ { 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */
+ { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
+ { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
+ { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
+ { 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */
+ { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
+ { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
+ { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
+ { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
+ { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
+ { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
+ { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
+ { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
+ { 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */
+ { 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */
+ { 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */
+ { 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */
+ { 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */
+ { 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */
+ { 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */
+ { 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */
+ { 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */
+ { 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */
+ { 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */
+ { 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */
+ { 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */
+ { 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */
+ { 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */
+ { 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
+ { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
+ { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
+ { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
+ { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
+ { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
+ { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
+ { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
+ { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
+ { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
+ { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
+ { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
+ { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
+ { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
+ { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
+ { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
+ { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
+ { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
+ { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
+ { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
+ { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
+ { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
+ { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
+ { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
+ { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
+ { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
+ { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
+ { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
+ { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
+ { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
+ { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
+ { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
+ { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
+ { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
+ { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
+ { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
+ { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
+ { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
+ { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
+ { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
+ { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
+ { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
+ { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
+ { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
+ { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
+ { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
+ { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
+ { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
+ { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
+ { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
+ { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
+ { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
+ { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
+ { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
+ { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
+ { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
+ { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
+ { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
+ { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
+ { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
+ { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
+ { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
+ { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
+ { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
+ { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
+ { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
+ { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
+ { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
+ { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
+ { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
+ { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
+ { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
+ { 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
+ { 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
+ { 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
+ { 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
+ { 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
+ { 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
+ { 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
+ { 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
+ { 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
+ { 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
+ { 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
+ { 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
+ { 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
+ { 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
+ { 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
+ { 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
+ { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
+ { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
+ { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
+ { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
+ { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
+ { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
+ { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
+ { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
+ { 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */
+ { 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */
+ { 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */
+ { 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */
+ { 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */
+ { 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */
+ { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
+ { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
+ { 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */
+ { 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */
+ { 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */
+ { 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */
+ { 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */
+ { 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */
+ { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
+ { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
+ { 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */
+ { 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */
+ { 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */
+ { 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */
+ { 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */
+ { 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */
+ { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
+ { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
+ { 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */
+ { 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */
+ { 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */
+ { 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */
+ { 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */
+ { 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */
+ { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
+ { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
+ { 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */
+ { 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */
+ { 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */
+ { 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */
+ { 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */
+ { 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */
+ { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
+ { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
+ { 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */
+ { 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */
+ { 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */
+ { 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */
+ { 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */
+ { 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */
+ { 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */
+ { 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */
+ { 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */
+ { 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */
+ { 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */
+ { 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */
+ { 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */
+ { 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */
+ { 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */
+ { 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */
+ { 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */
+ { 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */
+ { 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */
+ { 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */
+ { 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */
+ { 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */
+ { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
+ { 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
+ { 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
+ { 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
+ { 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
+ { 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
+ { 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
+ { 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
+ { 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
+ { 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
+ { 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
+ { 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
+ { 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
+ { 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
+ { 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
+ { 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
+ { 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
+ { 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
+ { 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
+ { 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
+ { 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
+ { 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
+ { 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
+ { 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
+ { 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
+ { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
+ { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
+ { 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
+ { 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
+ { 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
+ { 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
+ { 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
+ { 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
+ { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
+ { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
+ { 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
+ { 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
+ { 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
+ { 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
+ { 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
+ { 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
+ { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
+ { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
+ { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
+ { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
+ { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
+ { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
+ { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
+ { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
+ { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
+ { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
+ { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
+ { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
+ { 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
+ { 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
+ { 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
+ { 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
+ { 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
+ { 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
+ { 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
+ { 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
+ { 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
+ { 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
+ { 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
+ { 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
+ { 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
+ { 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
+ { 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
+ { 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
+ { 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
+ { 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
+ { 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
+ { 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
+ { 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
+ { 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
+ { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
+ { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
+ { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
+ { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
+ { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
+ { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
+ { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
+ { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
+ { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
+ { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
+ { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
+ { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
+ { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
+ { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
+ { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
+ { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
+ { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
+ { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
+ { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
+ { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
{ 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
- { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
- { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
- { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
- { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
- { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
- { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
- { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
- { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
- { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
- { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
- { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
- { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
- { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
- { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
- { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
- { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
+ { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
+ { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
+ { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
+ { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
+ { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
+ { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
+ { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
+ { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
+ { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
+ { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
+ { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
+ { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
+ { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
+ { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
+ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
+ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
{ 0x00000D41, 0x0000 }, /* R3393 - ADSP2 IRQ0 */
- { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
- { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
- { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
- { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
- { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
- { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
- { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
- { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
- { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
- { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
- { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
- { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
- { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
- { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
- { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
- { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
- { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
- { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
- { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
- { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
- { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
- { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
- { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
- { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
- { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
- { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
- { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
- { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
- { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
- { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
- { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
- { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
- { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
- { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
- { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
- { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
- { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
- { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
- { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
- { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
- { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
- { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
- { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
- { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
- { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
- { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
- { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
- { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
- { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
- { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
- { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
- { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
- { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
- { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
- { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
- { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
- { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
- { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
- { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
- { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
- { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
- { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
- { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
- { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
- { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
- { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
- { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
- { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
- { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
- { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
- { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
- { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
- { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
- { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
- { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
- { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
- { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
- { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
- { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
- { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
- { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
- { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
- { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
- { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
- { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
- { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
- { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
- { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
- { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
- { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
- { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
- { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
- { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
- { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
- { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
- { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
- { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
- { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
- { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
- { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
- { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
- { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
- { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
+ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
+ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
+ { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
+ { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
+ { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
+ { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
+ { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
+ { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
+ { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
+ { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
+ { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
+ { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
+ { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
+ { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
+ { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
+ { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
+ { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
+ { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
+ { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
+ { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
+ { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
+ { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
+ { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
+ { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
+ { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
+ { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
+ { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
+ { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
+ { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
+ { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
+ { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
+ { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
+ { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
+ { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
+ { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
+ { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
+ { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
+ { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
+ { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
+ { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
+ { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
+ { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
+ { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
+ { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
+ { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
+ { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
+ { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
+ { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
+ { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
+ { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
+ { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
+ { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
+ { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
+ { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
+ { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
+ { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
+ { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
+ { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
+ { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
+ { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
+ { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
+ { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
+ { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
+ { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
+ { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
+ { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
+ { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
+ { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
+ { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
+ { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
+ { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
+ { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
+ { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
+ { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
+ { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
+ { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
+ { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
+ { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
+ { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
+ { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
+ { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
+ { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
+ { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
+ { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
+ { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
+ { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
+ { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
+ { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
+ { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
+ { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
+ { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
+ { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
+ { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
+ { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
+ { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
+ { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
+ { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
+ { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
+ { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
+ { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
+ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
+ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
{ 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
- { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
- { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
- { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
- { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
- { 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
- { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
- { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
+ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
+ { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
+ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
+ { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
+ { 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
+ { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
+ { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
};
static bool wm5102_readable_register(struct device *dev, unsigned int reg)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 8588dbad3301..953d0790ffd5 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -406,8 +406,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err;
}
- ret = regulator_bulk_enable(wm8994->num_supplies,
- wm8994->supplies);
+ ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
goto err_regulator_free;
@@ -612,8 +611,7 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
{
pm_runtime_disable(wm8994->dev);
wm8994_irq_exit(wm8994);
- regulator_bulk_disable(wm8994->num_supplies,
- wm8994->supplies);
+ regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
mfd_remove_devices(wm8994->dev);
}
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 2e5233b60971..1b35e33d2434 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -9,18 +9,119 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <misc/cxl.h>
-#include <linux/fs.h>
#include <asm/pnv-pci.h>
#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/mount.h>
#include "cxl.h"
+/*
+ * Since we want to track memory mappings to be able to force-unmap
+ * when the AFU is no longer reachable, we need an inode. For devices
+ * opened through the cxl user API, this is not a problem, but a
+ * userland process can also get a cxl fd through the cxl_get_fd()
+ * API, which is used by the cxlflash driver.
+ *
+ * Therefore we implement our own simple pseudo-filesystem and inode
+ * allocator. We don't use the anonymous inode, as we need the
+ * meta-data associated with it (address_space) and it is shared by
+ * other drivers/processes, so it could lead to cxl unmapping VMAs
+ * from random processes.
+ */
+
+#define CXL_PSEUDO_FS_MAGIC 0x1697697f
+
+static int cxl_fs_cnt;
+static struct vfsmount *cxl_vfs_mount;
+
+static const struct dentry_operations cxl_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static struct dentry *cxl_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "cxl:", NULL, &cxl_fs_dops,
+ CXL_PSEUDO_FS_MAGIC);
+}
+
+static struct file_system_type cxl_fs_type = {
+ .name = "cxl",
+ .owner = THIS_MODULE,
+ .mount = cxl_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+
+void cxl_release_mapping(struct cxl_context *ctx)
+{
+ if (ctx->kernelapi && ctx->mapping)
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+}
+
+static struct file *cxl_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags)
+{
+ struct qstr this;
+ struct path path;
+ struct file *file;
+ struct inode *inode = NULL;
+ int rc;
+
+ /* strongly inspired by anon_inode_getfile() */
+
+ if (fops->owner && !try_module_get(fops->owner))
+ return ERR_PTR(-ENOENT);
+
+ rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
+ if (rc < 0) {
+ pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
+ file = ERR_PTR(rc);
+ goto err_module;
+ }
+
+ inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ file = ERR_CAST(inode);
+ goto err_fs;
+ }
+
+ file = ERR_PTR(-ENOMEM);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = 0;
+ path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
+ if (!path.dentry)
+ goto err_inode;
+
+ path.mnt = mntget(cxl_vfs_mount);
+ d_instantiate(path.dentry, inode);
+
+ file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ if (IS_ERR(file))
+ goto err_dput;
+ file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+ file->private_data = priv;
+
+ return file;
+
+err_dput:
+ path_put(&path);
+err_inode:
+ iput(inode);
+err_fs:
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+err_module:
+ module_put(fops->owner);
+ return file;
+}
+
struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
{
- struct address_space *mapping;
struct cxl_afu *afu;
struct cxl_context *ctx;
int rc;
@@ -30,38 +131,20 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
return ERR_CAST(afu);
ctx = cxl_context_alloc();
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- goto err_dev;
- }
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
ctx->kernelapi = true;
- /*
- * Make our own address space since we won't have one from the
- * filesystem like the user api has, and even if we do associate a file
- * with this context we don't want to use the global anonymous inode's
- * address space as that can invalidate unrelated users:
- */
- mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
- if (!mapping) {
- rc = -ENOMEM;
- goto err_ctx;
- }
- address_space_init_once(mapping);
-
/* Make it a slave context. We can promote it later? */
- rc = cxl_context_init(ctx, afu, false, mapping);
+ rc = cxl_context_init(ctx, afu, false);
if (rc)
- goto err_mapping;
+ goto err_ctx;
return ctx;
-err_mapping:
- kfree(mapping);
err_ctx:
kfree(ctx);
-err_dev:
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -340,6 +423,11 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
{
struct file *file;
int rc, flags, fdtmp;
+ char *name = NULL;
+
+ /* only allow one per context */
+ if (ctx->mapping)
+ return ERR_PTR(-EEXIST);
flags = O_RDWR | O_CLOEXEC;
@@ -363,12 +451,13 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
} else /* use default ops */
fops = (struct file_operations *)&afu_fops;
- file = anon_inode_getfile("cxl", fops, ctx, flags);
+ name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
+ file = cxl_getfile(name, fops, ctx, flags);
+ kfree(name);
if (IS_ERR(file))
goto err_fd;
- file->f_mapping = ctx->mapping;
-
+ cxl_context_set_mapping(ctx, file->f_mapping);
*fd = fdtmp;
return file;
@@ -541,7 +630,7 @@ int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (remaining > 0) {
new_ctx = cxl_dev_context_init(pdev);
- if (!new_ctx) {
+ if (IS_ERR(new_ctx)) {
pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
return -ENOSPC;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 5e506c19108a..3907387b6d15 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,8 +34,7 @@ struct cxl_context *cxl_context_alloc(void)
/*
* Initialises a CXL context.
*/
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
int i;
@@ -44,7 +43,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->master = master;
ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
- ctx->mapping = mapping;
+ ctx->mapping = NULL;
/*
* Allocate the segment table before we put it in the IDR so that we
@@ -114,16 +113,23 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping)
+{
+ mutex_lock(&ctx->mapping_lock);
+ ctx->mapping = mapping;
+ mutex_unlock(&ctx->mapping_lock);
+}
+
static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct cxl_context *ctx = vma->vm_file->private_data;
- unsigned long address = (unsigned long)vmf->virtual_address;
u64 area, offset;
offset = vmf->pgoff << PAGE_SHIFT;
pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
- __func__, ctx->pe, address, offset);
+ __func__, ctx->pe, vmf->address, offset);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
@@ -155,7 +161,7 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+ vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
@@ -300,8 +306,6 @@ static void reclaim_ctx(struct rcu_head *rcu)
if (ctx->ff_page)
__free_page(ctx->ff_page);
ctx->sstp = NULL;
- if (ctx->kernelapi)
- kfree(ctx->mapping);
kfree(ctx->irq_bitmap);
@@ -313,6 +317,8 @@ static void reclaim_ctx(struct rcu_head *rcu)
void cxl_context_free(struct cxl_context *ctx)
{
+ if (ctx->kernelapi && ctx->mapping)
+ cxl_release_mapping(ctx);
mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
mutex_unlock(&ctx->afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a144073593fa..b24d76723fb0 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -817,8 +817,9 @@ void cxl_dump_debug_buffer(void *addr, size_t size);
void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
@@ -877,6 +878,7 @@ void cxl_native_err_irq_dump_regs(struct cxl *adapter);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
+void cxl_release_mapping(struct cxl_context *ctx);
extern struct pci_driver cxl_pci_driver;
extern struct platform_driver cxl_of_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index ec7b8a017439..9c06ac8fa5ac 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -43,12 +43,14 @@ static int debugfs_io_u64_set(void *data, u64 val)
out_be64((u64 __iomem *)data, val);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
+ "0x%016llx\n");
static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
struct dentry *parent, u64 __iomem *value)
{
- return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
+ return debugfs_create_file_unsafe(name, mode, parent,
+ (void __force *)value, &fops_io_x64);
}
void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 77080cc5fa0a..859959f19f10 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -86,9 +86,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu;
}
- if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
+ rc = cxl_context_init(ctx, afu, master);
+ if (rc)
goto err_put_afu;
+ cxl_context_set_mapping(ctx, inode->i_mapping);
+
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
cxl_ctx_get();
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3e102cd6ed91..e04bc4ddfd74 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -887,7 +887,7 @@ static void afu_handle_errstate(struct work_struct *work)
afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
return;
- if (afu_guest->handle_err == true)
+ if (afu_guest->handle_err)
schedule_delayed_work(&afu_guest->work_err,
msecs_to_jiffies(3000));
}
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index dec60f58a767..1a402bbed687 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -104,7 +104,7 @@ irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_i
} else {
spin_lock(&ctx->lock);
ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = 1;
+ ctx->pending_afu_err = true;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index a217a74ccc98..09505f432eda 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -10,7 +10,6 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -54,7 +53,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
AFU_Cntl | command);
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- };
+ }
if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
/*
@@ -167,7 +166,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
cpu_relax();
}
PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- };
+ }
end = local_clock();
pr_devel("PSL purged in %lld ns\n", end - start);
@@ -931,9 +930,18 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
struct cxl_afu *afu = data;
struct cxl_context *ctx;
struct cxl_irq_info irq_info;
- int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
- int ret;
-
+ u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
+ int ph, ret;
+
+ /* check if eeh kicked in while the interrupt was in flight */
+ if (unlikely(phreg == ~0ULL)) {
+ dev_warn(&afu->dev,
+ "Ignoring slice interrupt(%d) due to fenced card",
+ irq);
+ return IRQ_HANDLED;
+ }
+ /* Mask the pe-handle from register value */
+ ph = phreg & 0xffff;
if ((ret = native_get_irq_info(afu, &irq_info))) {
WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
return fail_psl_irq(afu, &irq_info);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index e96be9ca4e60..80a87ab25b83 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1921,7 +1921,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
goto err;
ctx = cxl_dev_context_init(afu_dev);
- if (!ctx)
+ if (IS_ERR(ctx))
goto err;
afu_dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
index 0935d44c1770..6ec69ada19f4 100644
--- a/drivers/misc/cxl/phb.c
+++ b/drivers/misc/cxl/phb.c
@@ -20,7 +20,7 @@ bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu
* in the virtual phb, we'll need a default context to attach them to.
*/
ctx = cxl_dev_context_init(dev);
- if (!ctx)
+ if (IS_ERR(ctx))
return false;
dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index cb851c14ca4b..5813b5f25006 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -41,7 +41,6 @@
#include "genwqe_driver.h"
#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */
-#define GENWQE_FLAG_MSI_ENABLED (1 << 0)
#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */
#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index fc2794b513fa..147b83011b58 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -740,13 +740,10 @@ int genwqe_read_softreset(struct genwqe_dev *cd)
int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
{
int rc;
- struct pci_dev *pci_dev = cd->pci_dev;
- rc = pci_enable_msi_range(pci_dev, 1, count);
+ rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
if (rc < 0)
return rc;
-
- cd->flags |= GENWQE_FLAG_MSI_ENABLED;
return 0;
}
@@ -756,12 +753,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
*/
void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
{
- struct pci_dev *pci_dev = cd->pci_dev;
-
- if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
- pci_disable_msi(pci_dev);
- cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
- }
+ pci_free_irq_vectors(cd->pci_dev);
}
/**
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 6b3bf9ab051d..c5a456b0a564 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -170,7 +170,7 @@ static void ibmasm_remove_one(struct pci_dev *pdev)
ibmasm_unregister_uart(sp);
dbg("Sending OS down message\n");
if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN))
- err("failed to get repsonse to 'Send OS State' command\n");
+ err("failed to get response to 'Send OS State' command\n");
dbg("Disabling heartbeats\n");
ibmasm_heartbeat_exit(sp);
dbg("Disabling interrupts\n");
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index fdf954c2107f..cfa1039c62e7 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -21,6 +21,8 @@ void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
void lkdtm_ATOMIC_UNDERFLOW(void);
void lkdtm_ATOMIC_OVERFLOW(void);
+void lkdtm_CORRUPT_LIST_ADD(void);
+void lkdtm_CORRUPT_LIST_DEL(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index 182ae1894b32..91edd0b55e5c 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -5,8 +5,13 @@
* test source files.
*/
#include "lkdtm.h"
+#include <linux/list.h>
#include <linux/sched.h>
+struct lkdtm_list {
+ struct list_head node;
+};
+
/*
* Make sure our attempts to over run the kernel stack doesn't trigger
* a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
@@ -80,7 +85,8 @@ noinline void lkdtm_CORRUPT_STACK(void)
/* Use default char array length that triggers stack protection. */
char data[8];
- memset((void *)data, 0, 64);
+ memset((void *)data, 'a', 64);
+ pr_info("Corrupted stack with '%16s'...\n", data);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
@@ -146,3 +152,66 @@ void lkdtm_ATOMIC_OVERFLOW(void)
pr_info("attempting bad atomic overflow\n");
atomic_inc(&over);
}
+
+void lkdtm_CORRUPT_LIST_ADD(void)
+{
+ /*
+ * Initially, an empty list via LIST_HEAD:
+ * test_head.next = &test_head
+ * test_head.prev = &test_head
+ */
+ LIST_HEAD(test_head);
+ struct lkdtm_list good, bad;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ pr_info("attempting good list addition\n");
+
+ /*
+ * Adding to the list performs these actions:
+ * test_head.next->prev = &good.node
+ * good.node.next = test_head.next
+ * good.node.prev = test_head
+ * test_head.next = good.node
+ */
+ list_add(&good.node, &test_head);
+
+ pr_info("attempting corrupted list addition\n");
+ /*
+ * In simulating this "write what where" primitive, the "what" is
+ * the address of &bad.node, and the "where" is the address held
+ * by "redirection".
+ */
+ test_head.next = redirection;
+ list_add(&bad.node, &test_head);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_add() corruption not detected!\n");
+}
+
+void lkdtm_CORRUPT_LIST_DEL(void)
+{
+ LIST_HEAD(test_head);
+ struct lkdtm_list item;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ list_add(&item.node, &test_head);
+
+ pr_info("attempting good list removal\n");
+ list_del(&item.node);
+
+ pr_info("attempting corrupted list removal\n");
+ list_add(&item.node, &test_head);
+
+ /* As with the list_add() test above, this corrupts "next". */
+ item.node.next = redirection;
+ list_del(&item.node);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_del() corruption not detected!\n");
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index f9154b8d67f6..7eeb71a75549 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -197,6 +197,8 @@ struct crashtype crashtypes[] = {
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(OVERFLOW),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
index 45f1c0f96612..c7635a79341f 100644
--- a/drivers/misc/lkdtm_perms.c
+++ b/drivers/misc/lkdtm_perms.c
@@ -60,15 +60,18 @@ static noinline void execute_location(void *dst, bool write)
static void execute_user_location(void *dst)
{
+ int copied;
+
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
- if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
+ copied = access_process_vm(current, (unsigned long)dst, do_nothing,
+ EXEC_SIZE, FOLL_WRITE);
+ if (copied < EXEC_SIZE)
return;
- flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
pr_info("attempting bad execution at %p\n", func);
func();
}
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 7ae89b4a21d5..466afb2611c6 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -144,7 +144,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_state = MEI_IAMTHIF_WRITING;
cl->fp = cb->fp;
- ret = mei_cl_write(cl, cb, false);
+ ret = mei_cl_write(cl, cb);
if (ret < 0)
return ret;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 75b9d4ac8b1e..18e05ca7584f 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -38,6 +38,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
+ 0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
+
#define MEI_UUID_ANY NULL_UUID_LE
/**
@@ -69,6 +72,97 @@ static void blacklist(struct mei_cl_device *cldev)
cldev->do_match = 0;
}
+#define OSTYPE_LINUX 2
+struct mei_os_ver {
+ __le16 build;
+ __le16 reserved1;
+ u8 os_type;
+ u8 major;
+ u8 minor;
+ u8 reserved2;
+} __packed;
+
+#define MKHI_FEATURE_PTT 0x10
+
+struct mkhi_rule_id {
+ __le16 rule_type;
+ u8 feature_id;
+ u8 reserved;
+} __packed;
+
+struct mkhi_fwcaps {
+ struct mkhi_rule_id id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+#define MKHI_FWCAPS_GROUP_ID 0x3
+#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
+struct mkhi_msg_hdr {
+ u8 group_id;
+ u8 command;
+ u8 reserved;
+ u8 result;
+} __packed;
+
+struct mkhi_msg {
+ struct mkhi_msg_hdr hdr;
+ u8 data[0];
+} __packed;
+
+static int mei_osver(struct mei_cl_device *cldev)
+{
+ int ret;
+ const size_t size = sizeof(struct mkhi_msg_hdr) +
+ sizeof(struct mkhi_fwcaps) +
+ sizeof(struct mei_os_ver);
+ size_t length = 8;
+ char buf[size];
+ struct mkhi_msg *req;
+ struct mkhi_fwcaps *fwcaps;
+ struct mei_os_ver *os_ver;
+ unsigned int mode = MEI_CL_IO_TX_BLOCKING | MEI_CL_IO_TX_INTERNAL;
+
+ memset(buf, 0, size);
+
+ req = (struct mkhi_msg *)buf;
+ req->hdr.group_id = MKHI_FWCAPS_GROUP_ID;
+ req->hdr.command = MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD;
+
+ fwcaps = (struct mkhi_fwcaps *)req->data;
+
+ fwcaps->id.rule_type = 0x0;
+ fwcaps->id.feature_id = MKHI_FEATURE_PTT;
+ fwcaps->len = sizeof(*os_ver);
+ os_ver = (struct mei_os_ver *)fwcaps->data;
+ os_ver->os_type = OSTYPE_LINUX;
+
+ ret = __mei_cl_send(cldev->cl, buf, size, mode);
+ if (ret < 0)
+ return ret;
+
+ ret = __mei_cl_recv(cldev->cl, buf, length, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mei_mkhi_fix(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret)
+ return;
+
+ ret = mei_osver(cldev);
+ if (ret)
+ dev_err(&cldev->dev, "OS version command failed %d\n", ret);
+
+ mei_cldev_disable(cldev);
+}
+
/**
* mei_wd - wd client on the bus, change protocol version
* as the API has changed.
@@ -162,7 +256,8 @@ static int mei_nfc_if_version(struct mei_cl *cl,
WARN_ON(mutex_is_locked(&bus->device_lock));
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
+ MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(bus->dev, "Could not send IF version cmd\n");
return ret;
@@ -177,7 +272,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
return -ENOMEM;
ret = 0;
- bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+ bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0);
if (bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
@@ -309,6 +404,7 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
MEI_FIXUP(MEI_UUID_WD, mei_wd),
+ MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
};
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 8cac7ef9ad0d..0037153c80a6 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -36,12 +36,12 @@
* @cl: host client
* @buf: buffer to send
* @length: buffer length
- * @blocking: wait for write completion
+ * @mode: sending mode
*
* Return: written size bytes or < 0 on error
*/
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
- bool blocking)
+ unsigned int mode)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
@@ -80,9 +80,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
goto out;
}
+ cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
+ cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
memcpy(cb->buf.data, buf, length);
- rets = mei_cl_write(cl, cb, blocking);
+ rets = mei_cl_write(cl, cb);
out:
mutex_unlock(&bus->device_lock);
@@ -96,15 +98,18 @@ out:
* @cl: host client
* @buf: buffer to receive
* @length: buffer length
+ * @mode: io mode
*
* Return: read size in bytes of < 0 on error
*/
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+ unsigned int mode)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
size_t r_length;
ssize_t rets;
+ bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -125,6 +130,11 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
if (rets && rets != -EBUSY)
goto out;
+ if (nonblock) {
+ rets = -EAGAIN;
+ goto out;
+ }
+
/* wait on event only if there is no other waiter */
/* synchronized under device mutex */
if (!waitqueue_active(&cl->rx_wait)) {
@@ -185,14 +195,30 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
struct mei_cl *cl = cldev->cl;
- if (cl == NULL)
- return -ENODEV;
-
- return __mei_cl_send(cl, buf, length, 1);
+ return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING);
}
EXPORT_SYMBOL_GPL(mei_cldev_send);
/**
+ * mei_cldev_recv_nonblock - non block client receive (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to receive
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ * -EAGAIN if function will block.
+ */
+ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
+ size_t length)
+{
+ struct mei_cl *cl = cldev->cl;
+
+ return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
+
+/**
* mei_cldev_recv - client receive (read)
*
* @cldev: me client device
@@ -205,39 +231,45 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
struct mei_cl *cl = cldev->cl;
- if (cl == NULL)
- return -ENODEV;
-
- return __mei_cl_recv(cl, buf, length);
+ return __mei_cl_recv(cl, buf, length, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv);
/**
- * mei_cl_bus_event_work - dispatch rx event for a bus device
- * and schedule new work
+ * mei_cl_bus_rx_work - dispatch rx event for a bus device
*
* @work: work
*/
-static void mei_cl_bus_event_work(struct work_struct *work)
+static void mei_cl_bus_rx_work(struct work_struct *work)
{
struct mei_cl_device *cldev;
struct mei_device *bus;
- cldev = container_of(work, struct mei_cl_device, event_work);
+ cldev = container_of(work, struct mei_cl_device, rx_work);
bus = cldev->bus;
- if (cldev->event_cb)
- cldev->event_cb(cldev, cldev->events, cldev->event_context);
+ if (cldev->rx_cb)
+ cldev->rx_cb(cldev);
+
+ mutex_lock(&bus->device_lock);
+ mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+ mutex_unlock(&bus->device_lock);
+}
+
+/**
+ * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
+ *
+ * @work: work
+ */
+static void mei_cl_bus_notif_work(struct work_struct *work)
+{
+ struct mei_cl_device *cldev;
- cldev->events = 0;
+ cldev = container_of(work, struct mei_cl_device, notif_work);
- /* Prepare for the next read */
- if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
- mutex_lock(&bus->device_lock);
- mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
- mutex_unlock(&bus->device_lock);
- }
+ if (cldev->notif_cb)
+ cldev->notif_cb(cldev);
}
/**
@@ -252,18 +284,13 @@ bool mei_cl_bus_notify_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
- if (!cldev || !cldev->event_cb)
- return false;
-
- if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
+ if (!cldev || !cldev->notif_cb)
return false;
if (!cl->notify_ev)
return false;
- set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
-
- schedule_work(&cldev->event_work);
+ schedule_work(&cldev->notif_work);
cl->notify_ev = false;
@@ -271,7 +298,7 @@ bool mei_cl_bus_notify_event(struct mei_cl *cl)
}
/**
- * mei_cl_bus_rx_event - schedule rx event
+ * mei_cl_bus_rx_event - schedule rx event
*
* @cl: host client
*
@@ -282,66 +309,81 @@ bool mei_cl_bus_rx_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
- if (!cldev || !cldev->event_cb)
- return false;
-
- if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
+ if (!cldev || !cldev->rx_cb)
return false;
- set_bit(MEI_CL_EVENT_RX, &cldev->events);
-
- schedule_work(&cldev->event_work);
+ schedule_work(&cldev->rx_work);
return true;
}
/**
- * mei_cldev_register_event_cb - register event callback
+ * mei_cldev_register_rx_cb - register Rx event callback
*
* @cldev: me client devices
- * @event_cb: callback function
- * @events_mask: requested events bitmask
- * @context: driver context data
+ * @rx_cb: callback function
*
* Return: 0 on success
* -EALREADY if an callback is already registered
* <0 on other errors
*/
-int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
- unsigned long events_mask,
- mei_cldev_event_cb_t event_cb, void *context)
+int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
{
struct mei_device *bus = cldev->bus;
int ret;
- if (cldev->event_cb)
+ if (!rx_cb)
+ return -EINVAL;
+ if (cldev->rx_cb)
return -EALREADY;
- cldev->events = 0;
- cldev->events_mask = events_mask;
- cldev->event_cb = event_cb;
- cldev->event_context = context;
- INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
+ cldev->rx_cb = rx_cb;
+ INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
- if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
- mutex_lock(&bus->device_lock);
- ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
- mutex_unlock(&bus->device_lock);
- if (ret && ret != -EBUSY)
- return ret;
- }
+ mutex_lock(&bus->device_lock);
+ ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+ mutex_unlock(&bus->device_lock);
+ if (ret && ret != -EBUSY)
+ return ret;
- if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
- mutex_lock(&bus->device_lock);
- ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
- mutex_unlock(&bus->device_lock);
- if (ret)
- return ret;
- }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
+
+/**
+ * mei_cldev_register_notif_cb - register FW notification event callback
+ *
+ * @cldev: me client devices
+ * @notif_cb: callback function
+ *
+ * Return: 0 on success
+ * -EALREADY if an callback is already registered
+ * <0 on other errors
+ */
+int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
+ mei_cldev_cb_t notif_cb)
+{
+ struct mei_device *bus = cldev->bus;
+ int ret;
+
+ if (!notif_cb)
+ return -EINVAL;
+
+ if (cldev->notif_cb)
+ return -EALREADY;
+
+ cldev->notif_cb = notif_cb;
+ INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
+
+ mutex_lock(&bus->device_lock);
+ ret = mei_cl_notify_request(cldev->cl, NULL, 1);
+ mutex_unlock(&bus->device_lock);
+ if (ret)
+ return ret;
return 0;
}
-EXPORT_SYMBOL_GPL(mei_cldev_register_event_cb);
+EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
/**
* mei_cldev_get_drvdata - driver data getter
@@ -403,7 +445,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_ver);
*/
bool mei_cldev_enabled(struct mei_cl_device *cldev)
{
- return cldev->cl && mei_cl_is_connected(cldev->cl);
+ return mei_cl_is_connected(cldev->cl);
}
EXPORT_SYMBOL_GPL(mei_cldev_enabled);
@@ -423,14 +465,13 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
cl = cldev->cl;
- if (!cl) {
+ if (cl->state == MEI_FILE_UNINITIALIZED) {
mutex_lock(&bus->device_lock);
- cl = mei_cl_alloc_linked(bus);
+ ret = mei_cl_link(cl);
mutex_unlock(&bus->device_lock);
- if (IS_ERR(cl))
- return PTR_ERR(cl);
+ if (ret)
+ return ret;
/* update pointers */
- cldev->cl = cl;
cl->cldev = cldev;
}
@@ -471,19 +512,17 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
struct mei_cl *cl;
int err;
- if (!cldev || !cldev->cl)
+ if (!cldev)
return -ENODEV;
cl = cldev->cl;
bus = cldev->bus;
- cldev->event_cb = NULL;
-
mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
- dev_err(bus->dev, "Already disconnected");
+ dev_dbg(bus->dev, "Already disconnected");
err = 0;
goto out;
}
@@ -497,9 +536,6 @@ out:
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
- kfree(cl);
- cldev->cl = NULL;
-
mutex_unlock(&bus->device_lock);
return err;
}
@@ -629,9 +665,13 @@ static int mei_cl_device_remove(struct device *dev)
if (!cldev || !dev->driver)
return 0;
- if (cldev->event_cb) {
- cldev->event_cb = NULL;
- cancel_work_sync(&cldev->event_work);
+ if (cldev->rx_cb) {
+ cancel_work_sync(&cldev->rx_work);
+ cldev->rx_cb = NULL;
+ }
+ if (cldev->notif_cb) {
+ cancel_work_sync(&cldev->notif_work);
+ cldev->notif_cb = NULL;
}
cldrv = to_mei_cl_driver(dev->driver);
@@ -754,6 +794,7 @@ static void mei_cl_bus_dev_release(struct device *dev)
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
+ kfree(cldev->cl);
kfree(cldev);
}
@@ -786,17 +827,25 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
struct mei_me_client *me_cl)
{
struct mei_cl_device *cldev;
+ struct mei_cl *cl;
cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
if (!cldev)
return NULL;
+ cl = mei_cl_allocate(bus);
+ if (!cl) {
+ kfree(cldev);
+ return NULL;
+ }
+
device_initialize(&cldev->dev);
cldev->dev.parent = bus->dev;
cldev->dev.bus = &mei_cl_bus_type;
cldev->dev.type = &mei_cl_device_type;
cldev->bus = mei_dev_bus_get(bus);
cldev->me_cl = mei_me_cl_get(me_cl);
+ cldev->cl = cl;
mei_cl_bus_set_name(cldev);
cldev->is_added = 0;
INIT_LIST_HEAD(&cldev->bus_list);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 6fe02350578d..391936c1aa04 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -425,7 +425,7 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
*
* @cl: host client
* @length: size of the buffer
- * @type: operation type
+ * @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
@@ -459,7 +459,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
*
* @cl: host client
* @length: size of the buffer
- * @type: operation type
+ * @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
@@ -571,7 +571,7 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
INIT_LIST_HEAD(&cl->rd_pending);
INIT_LIST_HEAD(&cl->link);
cl->writing_state = MEI_IDLE;
- cl->state = MEI_FILE_INITIALIZING;
+ cl->state = MEI_FILE_UNINITIALIZED;
cl->dev = dev;
}
@@ -672,7 +672,12 @@ int mei_cl_unlink(struct mei_cl *cl)
list_del_init(&cl->link);
- cl->state = MEI_FILE_INITIALIZING;
+ cl->state = MEI_FILE_UNINITIALIZED;
+ cl->writing_state = MEI_IDLE;
+
+ WARN_ON(!list_empty(&cl->rd_completed) ||
+ !list_empty(&cl->rd_pending) ||
+ !list_empty(&cl->link));
return 0;
}
@@ -686,7 +691,7 @@ void mei_host_client_init(struct mei_device *dev)
pm_runtime_mark_last_busy(dev->dev);
dev_dbg(dev->dev, "rpm: autosuspend\n");
- pm_runtime_autosuspend(dev->dev);
+ pm_request_autosuspend(dev->dev);
}
/**
@@ -756,7 +761,7 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
struct mei_device *dev = cl->dev;
if (cl->state == MEI_FILE_DISCONNECTED ||
- cl->state == MEI_FILE_INITIALIZING)
+ cl->state <= MEI_FILE_INITIALIZING)
return;
cl->state = MEI_FILE_DISCONNECTED;
@@ -1598,18 +1603,17 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* @cl: host client
* @cb: write callback with filled data
- * @blocking: block until completed
*
* Return: number of bytes sent on success, <0 on failure.
*/
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
+int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
int size;
int rets;
-
+ bool blocking;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -1621,6 +1625,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
buf = &cb->buf;
size = buf->size;
+ blocking = cb->blocking;
cl_dbg(dev, cl, "size=%d\n", size);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index d2bfabecd882..f2545af9be7b 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -219,7 +219,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
struct mei_cl_cb *cmpl_list);
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
+int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7ad15d678878..c8307e8b4c16 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -122,6 +122,8 @@
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
+#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */
+
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 56c2101e80ad..a05375a3338a 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -246,6 +246,36 @@ static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
return hw->pg_state;
}
+static inline u32 me_intr_src(u32 hcsr)
+{
+ return hcsr & H_CSR_IS_MASK;
+}
+
+/**
+ * me_intr_disable - disables mei device interrupts
+ * using supplied hcsr register value.
+ *
+ * @dev: the device structure
+ * @hcsr: supplied hcsr register value
+ */
+static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
+{
+ hcsr &= ~H_CSR_IE_MASK;
+ mei_hcsr_set(dev, hcsr);
+}
+
+/**
+ * mei_me_intr_clear - clear and stop interrupts
+ *
+ * @dev: the device structure
+ * @hcsr: supplied hcsr register value
+ */
+static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
+{
+ if (me_intr_src(hcsr))
+ mei_hcsr_write(dev, hcsr);
+}
+
/**
* mei_me_intr_clear - clear and stop interrupts
*
@@ -255,8 +285,7 @@ static void mei_me_intr_clear(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- if (hcsr & H_CSR_IS_MASK)
- mei_hcsr_write(dev, hcsr);
+ me_intr_clear(dev, hcsr);
}
/**
* mei_me_intr_enable - enables mei device interrupts
@@ -280,8 +309,19 @@ static void mei_me_intr_disable(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr &= ~H_CSR_IE_MASK;
- mei_hcsr_set(dev, hcsr);
+ me_intr_disable(dev, hcsr);
+}
+
+/**
+ * mei_me_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_me_synchronize_irq(struct mei_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ synchronize_irq(pdev->irq);
}
/**
@@ -450,7 +490,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
/**
- * mei_me_write_message - writes a message to mei device.
+ * mei_me_hbuf_write - writes a message to host hw buffer.
*
* @dev: the device structure
* @header: mei HECI header of message
@@ -458,9 +498,9 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
*
* Return: -EIO if write has failed
*/
-static int mei_me_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *buf)
+static int mei_me_hbuf_write(struct mei_device *dev,
+ struct mei_msg_hdr *header,
+ const unsigned char *buf)
{
unsigned long rem;
unsigned long length = header->length;
@@ -956,13 +996,14 @@ static void mei_me_pg_legacy_intr(struct mei_device *dev)
* mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
*
* @dev: the device structure
+ * @intr_source: interrupt source
*/
-static void mei_me_d0i3_intr(struct mei_device *dev)
+static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
- (hw->intr_source & H_D0I3C_IS)) {
+ (intr_source & H_D0I3C_IS)) {
dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
if (hw->pg_state == MEI_PG_ON) {
hw->pg_state = MEI_PG_OFF;
@@ -981,7 +1022,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev)
wake_up(&dev->wait_pg);
}
- if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
+ if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
/*
* HW sent some data and we are in D0i3, so
* we got here because of HW initiated exit from D0i3.
@@ -996,13 +1037,14 @@ static void mei_me_d0i3_intr(struct mei_device *dev)
* mei_me_pg_intr - perform pg processing in interrupt thread handler
*
* @dev: the device structure
+ * @intr_source: interrupt source
*/
-static void mei_me_pg_intr(struct mei_device *dev)
+static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (hw->d0i3_supported)
- mei_me_d0i3_intr(dev);
+ mei_me_d0i3_intr(dev, intr_source);
else
mei_me_pg_legacy_intr(dev);
}
@@ -1121,19 +1163,16 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *)dev_id;
- struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr;
hcsr = mei_hcsr_read(dev);
- if (!(hcsr & H_CSR_IS_MASK))
+ if (!me_intr_src(hcsr))
return IRQ_NONE;
- hw->intr_source = hcsr & H_CSR_IS_MASK;
- dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
-
- /* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
- mei_hcsr_write(dev, hcsr);
+ dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
+ /* disable interrupts on device */
+ me_intr_disable(dev, hcsr);
return IRQ_WAKE_THREAD;
}
@@ -1152,11 +1191,16 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_cl_cb complete_list;
s32 slots;
+ u32 hcsr;
int rets = 0;
dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
+
+ hcsr = mei_hcsr_read(dev);
+ me_intr_clear(dev, hcsr);
+
mei_io_list_init(&complete_list);
/* check if ME wants a reset */
@@ -1166,7 +1210,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
goto end;
}
- mei_me_pg_intr(dev);
+ mei_me_pg_intr(dev, me_intr_src(hcsr));
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
@@ -1216,6 +1260,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
end:
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ mei_me_intr_enable(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
@@ -1238,12 +1283,13 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.intr_clear = mei_me_intr_clear,
.intr_enable = mei_me_intr_enable,
.intr_disable = mei_me_intr_disable,
+ .synchronize_irq = mei_me_synchronize_irq,
.hbuf_free_slots = mei_me_hbuf_empty_slots,
.hbuf_is_ready = mei_me_hbuf_is_empty,
.hbuf_max_len = mei_me_hbuf_max_len,
- .write = mei_me_write_message,
+ .write = mei_me_hbuf_write,
.rdbuf_full_slots = mei_me_count_full_read_slots,
.read_hdr = mei_me_mecbrw_read,
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 2ee14dc1b2ea..cf64847a35b9 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -51,14 +51,12 @@ struct mei_cfg {
*
* @cfg: per device generation config and ops
* @mem_addr: io memory address
- * @intr_source: interrupt source
* @pg_state: power gating state
* @d0i3_supported: di03 support
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
- u32 intr_source;
enum mei_pg_state pg_state;
bool d0i3_supported;
};
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 60415a2bfcbd..e9f8c0aeec13 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -19,7 +19,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/kthread.h>
-#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mei.h>
@@ -441,6 +441,18 @@ static void mei_txe_intr_enable(struct mei_device *dev)
}
/**
+ * mei_txe_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_synchronize_irq(struct mei_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ synchronize_irq(pdev->irq);
+}
+
+/**
* mei_txe_pending_interrupts - check if there are pending interrupts
* only Aliveness, Input ready, and output doorbell are of relevance
*
@@ -691,7 +703,8 @@ static void mei_txe_hw_config(struct mei_device *dev)
*/
static int mei_txe_write(struct mei_device *dev,
- struct mei_msg_hdr *header, unsigned char *buf)
+ struct mei_msg_hdr *header,
+ const unsigned char *buf)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
unsigned long rem;
@@ -1167,6 +1180,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
.intr_clear = mei_txe_intr_clear,
.intr_enable = mei_txe_intr_enable,
.intr_disable = mei_txe_intr_disable,
+ .synchronize_irq = mei_txe_synchronize_irq,
.hbuf_free_slots = mei_txe_hbuf_empty_slots,
.hbuf_is_ready = mei_txe_is_input_ready,
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 9a9c2484d107..41e5760a6886 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -122,6 +122,10 @@ int mei_reset(struct mei_device *dev)
mei_dev_state_str(state), fw_sts_str);
}
+ mei_clear_interrupts(dev);
+
+ mei_synchronize_irq(dev);
+
/* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error
* we need to call it before hw start
@@ -273,8 +277,6 @@ int mei_restart(struct mei_device *dev)
mutex_lock(&dev->device_lock);
- mei_clear_interrupts(dev);
-
dev->dev_state = MEI_DEV_POWER_UP;
dev->reset_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 5a4893ce9c24..b584749bcc4a 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -118,7 +118,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
if (!mei_cl_is_connected(cl)) {
cl_dbg(dev, cl, "not connected\n");
- list_move_tail(&cb->list, &complete_list->list);
cb->status = -ENODEV;
goto discard;
}
@@ -128,8 +127,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
if (buf_sz < cb->buf_idx) {
cl_err(dev, cl, "message is too big len %d idx %zu\n",
mei_hdr->length, cb->buf_idx);
-
- list_move_tail(&cb->list, &complete_list->list);
cb->status = -EMSGSIZE;
goto discard;
}
@@ -137,8 +134,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
if (cb->buf.size < buf_sz) {
cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
cb->buf.size, mei_hdr->length, cb->buf_idx);
-
- list_move_tail(&cb->list, &complete_list->list);
cb->status = -EMSGSIZE;
goto discard;
}
@@ -158,6 +153,8 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
return 0;
discard:
+ if (cb)
+ list_move_tail(&cb->list, &complete_list->list);
mei_irq_discard_msg(dev, mei_hdr);
return 0;
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index fa50635512e8..e1bf54481fd6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -322,7 +322,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- rets = mei_cl_write(cl, cb, false);
+ rets = mei_cl_write(cl, cb);
out:
mutex_unlock(&dev->device_lock);
return rets;
@@ -653,7 +653,7 @@ static int mei_fasync(int fd, struct file *file, int band)
}
/**
- * fw_status_show - mei device attribute show method
+ * fw_status_show - mei device fw_status attribute show method
*
* @device: device pointer
* @attr: attribute pointer
@@ -684,8 +684,49 @@ static ssize_t fw_status_show(struct device *device,
}
static DEVICE_ATTR_RO(fw_status);
+/**
+ * hbm_ver_show - display HBM protocol version negotiated with FW
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t hbm_ver_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ struct hbm_version ver;
+
+ mutex_lock(&dev->device_lock);
+ ver = dev->version;
+ mutex_unlock(&dev->device_lock);
+
+ return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
+}
+static DEVICE_ATTR_RO(hbm_ver);
+
+/**
+ * hbm_ver_drv_show - display HBM protocol version advertised by driver
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t hbm_ver_drv_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
+}
+static DEVICE_ATTR_RO(hbm_ver_drv);
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
+ &dev_attr_hbm_ver.attr,
+ &dev_attr_hbm_ver_drv.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 1169fd9e7d02..699693cd8c59 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -55,7 +55,8 @@ extern const uuid_le mei_amthif_guid;
/* File state */
enum file_state {
- MEI_FILE_INITIALIZING = 0,
+ MEI_FILE_UNINITIALIZED = 0,
+ MEI_FILE_INITIALIZING,
MEI_FILE_CONNECTING,
MEI_FILE_CONNECTED,
MEI_FILE_DISCONNECTING,
@@ -109,6 +110,21 @@ enum mei_cb_file_ops {
MEI_FOP_NOTIFY_STOP,
};
+/**
+ * enum mei_cl_io_mode - io mode between driver and fw
+ *
+ * @MEI_CL_IO_TX_BLOCKING: send is blocking
+ * @MEI_CL_IO_TX_INTERNAL: internal communication between driver and FW
+ *
+ * @MEI_CL_IO_RX_NONBLOCK: recv is non-blocking
+ */
+enum mei_cl_io_mode {
+ MEI_CL_IO_TX_BLOCKING = BIT(0),
+ MEI_CL_IO_TX_INTERNAL = BIT(1),
+
+ MEI_CL_IO_RX_NONBLOCK = BIT(2),
+};
+
/*
* Intel MEI message data struct
*/
@@ -169,6 +185,7 @@ struct mei_cl;
* @fp: pointer to file structure
* @status: io status of the cb
* @internal: communication between driver and FW flag
+ * @blocking: transmission blocking mode
* @completed: the transfer or reception has completed
*/
struct mei_cl_cb {
@@ -180,6 +197,7 @@ struct mei_cl_cb {
const struct file *fp;
int status;
u32 internal:1;
+ u32 blocking:1;
u32 completed:1;
};
@@ -253,6 +271,7 @@ struct mei_cl {
* @intr_clear : clear pending interrupts
* @intr_enable : enable interrupts
* @intr_disable : disable interrupts
+ * @synchronize_irq : synchronize irqs
*
* @hbuf_free_slots : query for write buffer empty slots
* @hbuf_is_ready : query if write buffer is empty
@@ -274,7 +293,6 @@ struct mei_hw_ops {
int (*hw_start)(struct mei_device *dev);
void (*hw_config)(struct mei_device *dev);
-
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
enum mei_pg_state (*pg_state)(struct mei_device *dev);
bool (*pg_in_transition)(struct mei_device *dev);
@@ -283,14 +301,14 @@ struct mei_hw_ops {
void (*intr_clear)(struct mei_device *dev);
void (*intr_enable)(struct mei_device *dev);
void (*intr_disable)(struct mei_device *dev);
+ void (*synchronize_irq)(struct mei_device *dev);
int (*hbuf_free_slots)(struct mei_device *dev);
bool (*hbuf_is_ready)(struct mei_device *dev);
size_t (*hbuf_max_len)(const struct mei_device *dev);
-
int (*write)(struct mei_device *dev,
struct mei_msg_hdr *hdr,
- unsigned char *buf);
+ const unsigned char *buf);
int (*rdbuf_full_slots)(struct mei_device *dev);
@@ -304,8 +322,9 @@ void mei_cl_bus_rescan(struct mei_device *bus);
void mei_cl_bus_rescan_work(struct work_struct *work);
void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
- bool blocking);
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
+ unsigned int mode);
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+ unsigned int mode);
bool mei_cl_bus_rx_event(struct mei_cl *cl);
bool mei_cl_bus_notify_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *bus);
@@ -627,6 +646,11 @@ static inline void mei_disable_interrupts(struct mei_device *dev)
dev->ops->intr_disable(dev);
}
+static inline void mei_synchronize_irq(struct mei_device *dev)
+{
+ dev->ops->synchronize_irq(dev);
+}
+
static inline bool mei_host_is_ready(struct mei_device *dev)
{
return dev->ops->host_is_ready(dev);
@@ -652,7 +676,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
}
static inline int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *hdr, void *buf)
+ struct mei_msg_hdr *hdr, const void *buf)
{
return dev->ops->write(dev, hdr, buf);
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f3ffd883b232..f9c6ec4b98ab 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -87,6 +87,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 33741ad4a74a..af2e077da4b8 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -932,7 +932,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long paddr, vaddr;
unsigned long expires;
- vaddr = (unsigned long)vmf->virtual_address;
+ vaddr = vmf->address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
vma, vaddr, GSEG_BASE(vaddr));
STAT(nopfn);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 557f9782c53c..0c26eaf5f62b 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -118,6 +118,8 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
* now, the default is 64KB.
*/
#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
+/* 68 comes from min TCP+IP+MAC header */
+#define XPNET_MIN_MTU 68
/* 32KB has been determined to be the ideal */
#define XPNET_DEF_MTU (0x8000UL)
@@ -330,22 +332,6 @@ xpnet_dev_stop(struct net_device *dev)
return 0;
}
-static int
-xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
-{
- /* 68 comes from min TCP+IP+MAC header */
- if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
- dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
- "between 68 and %ld\n", dev->name, new_mtu,
- XPNET_MAX_MTU);
- return -EINVAL;
- }
-
- dev->mtu = new_mtu;
- dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
- return 0;
-}
-
/*
* Notification that the other end has received the message and
* DMA'd the skb information. At this point, they are done with
@@ -519,7 +505,6 @@ static const struct net_device_ops xpnet_netdev_ops = {
.ndo_open = xpnet_dev_open,
.ndo_stop = xpnet_dev_stop,
.ndo_start_xmit = xpnet_dev_hard_start_xmit,
- .ndo_change_mtu = xpnet_dev_change_mtu,
.ndo_tx_timeout = xpnet_dev_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -555,6 +540,8 @@ xpnet_init(void)
xpnet_device->netdev_ops = &xpnet_netdev_ops;
xpnet_device->mtu = XPNET_DEF_MTU;
+ xpnet_device->min_mtu = XPNET_MIN_MTU;
+ xpnet_device->max_mtu = XPNET_MAX_MTU;
/*
* Multicast assumes the LSB of the first octet is set for multicast
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f84b53d6ce50..b33ab8ce47ab 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -19,12 +19,17 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <soc/at91/atmel-secumod.h>
#define SRAM_GRANULARITY 32
@@ -334,12 +339,33 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
return ret;
}
+static int atmel_securam_wait(void)
+{
+ struct regmap *regmap;
+ u32 val;
+
+ regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
+ if (IS_ERR(regmap))
+ return -ENODEV;
+
+ return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
+ val & AT91_SECUMOD_RAMRDY_READY,
+ 10000, 500000);
+}
+
+static const struct of_device_id sram_dt_ids[] = {
+ { .compatible = "mmio-sram" },
+ { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
+ {}
+};
+
static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
struct resource *res;
size_t size;
int ret;
+ int (*init_func)(void);
sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
if (!sram)
@@ -384,6 +410,13 @@ static int sram_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sram);
+ init_func = of_device_get_match_data(&pdev->dev);
+ if (init_func) {
+ ret = init_func();
+ if (ret)
+ return ret;
+ }
+
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
gen_pool_size(sram->pool) / 1024, sram->virt_base);
@@ -405,17 +438,10 @@ static int sram_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id sram_dt_ids[] = {
- { .compatible = "mmio-sram" },
- {}
-};
-#endif
-
static struct platform_driver sram_driver = {
.driver = {
.name = "sram",
- .of_match_table = of_match_ptr(sram_dt_ids),
+ .of_match_table = sram_dt_ids,
},
.probe = sram_probe,
.remove = sram_remove,
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38efa65..7e803fc454d1 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -23,8 +23,6 @@ if MMC
source "drivers/mmc/core/Kconfig"
-source "drivers/mmc/card/Kconfig"
-
source "drivers/mmc/host/Kconfig"
endif # MMC
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 400756ec7c49..416b6d1c9ec6 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -5,5 +5,4 @@
subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
-obj-$(CONFIG_MMC) += card/
obj-$(subst m,y,$(CONFIG_MMC)) += host/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
deleted file mode 100644
index 5562308699bc..000000000000
--- a/drivers/mmc/card/Kconfig
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# MMC/SD card drivers
-#
-
-comment "MMC/SD/SDIO Card Drivers"
-
-config MMC_BLOCK
- tristate "MMC block device driver"
- depends on BLOCK
- default y
- help
- Say Y here to enable the MMC block device driver support.
- This provides a block device driver, which you can use to
- mount the filesystem. Almost everyone wishing MMC support
- should say Y or M here.
-
-config MMC_BLOCK_MINORS
- int "Number of minors per block device"
- depends on MMC_BLOCK
- range 4 256
- default 8
- help
- Number of minors per block device. One is needed for every
- partition on the disk (plus one for the whole disk).
-
- Number of total MMC minors available is 256, so your number
- of supported block devices will be limited to 256 divided
- by this number.
-
- Default is 8 to be backwards compatible with previous
- hardwired device numbering.
-
- If unsure, say 8 here.
-
-config MMC_BLOCK_BOUNCE
- bool "Use bounce buffer for simple hosts"
- depends on MMC_BLOCK
- default y
- help
- SD/MMC is a high latency protocol where it is crucial to
- send large requests in order to get high performance. Many
- controllers, however, are restricted to continuous memory
- (i.e. they can't do scatter-gather), something the kernel
- rarely can provide.
-
- Say Y here to help these restricted hosts by bouncing
- requests back and forth from a large buffer. You will get
- a big performance gain at the cost of up to 64 KiB of
- physical memory.
-
- If unsure, say Y here.
-
-config SDIO_UART
- tristate "SDIO UART/GPS class support"
- depends on TTY
- help
- SDIO function driver for SDIO cards that implements the UART
- class, as well as the GPS class which appears like a UART.
-
-config MMC_TEST
- tristate "MMC host test driver"
- help
- Development driver that performs a series of reads and writes
- to a memory card in order to expose certain well known bugs
- in host controllers. The tests are executed by writing to the
- "test" file in debugfs under each card. Note that whatever is
- on your card will be overwritten by these tests.
-
- This driver is only of interest to those developing or
- testing a host driver. Most people should say N here.
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
deleted file mode 100644
index c73b406a06cd..000000000000
--- a/drivers/mmc/card/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for MMC/SD card drivers
-#
-
-obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
-mmc_block-objs := block.o queue.o
-obj-$(CONFIG_MMC_TEST) += mmc_test.o
-
-obj-$(CONFIG_SDIO_UART) += sdio_uart.o
-
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 250f223aaa80..cdfa8520a4b1 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -22,3 +22,69 @@ config PWRSEQ_SIMPLE
This driver can also be built as a module. If so, the module
will be called pwrseq_simple.
+
+config MMC_BLOCK
+ tristate "MMC block device driver"
+ depends on BLOCK
+ default y
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config MMC_BLOCK_MINORS
+ int "Number of minors per block device"
+ depends on MMC_BLOCK
+ range 4 256
+ default 8
+ help
+ Number of minors per block device. One is needed for every
+ partition on the disk (plus one for the whole disk).
+
+ Number of total MMC minors available is 256, so your number
+ of supported block devices will be limited to 256 divided
+ by this number.
+
+ Default is 8 to be backwards compatible with previous
+ hardwired device numbering.
+
+ If unsure, say 8 here.
+
+config MMC_BLOCK_BOUNCE
+ bool "Use bounce buffer for simple hosts"
+ depends on MMC_BLOCK
+ default y
+ help
+ SD/MMC is a high latency protocol where it is crucial to
+ send large requests in order to get high performance. Many
+ controllers, however, are restricted to continuous memory
+ (i.e. they can't do scatter-gather), something the kernel
+ rarely can provide.
+
+ Say Y here to help these restricted hosts by bouncing
+ requests back and forth from a large buffer. You will get
+ a big performance gain at the cost of up to 64 KiB of
+ physical memory.
+
+ If unsure, say Y here.
+
+config SDIO_UART
+ tristate "SDIO UART/GPS class support"
+ depends on TTY
+ help
+ SDIO function driver for SDIO cards that implements the UART
+ class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+ tristate "MMC host test driver"
+ help
+ Development driver that performs a series of reads and writes
+ to a memory card in order to expose certain well known bugs
+ in host controllers. The tests are executed by writing to the
+ "test" file in debugfs under each card. Note that whatever is
+ on your card will be overwritten by these tests.
+
+ This driver is only of interest to those developing or
+ testing a host driver. Most people should say N here.
+
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index f007151dfdc6..b2a257dc644f 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -12,3 +12,7 @@ mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+mmc_block-objs := block.o queue.o
+obj-$(CONFIG_MMC_TEST) += mmc_test.o
+obj-$(CONFIG_SDIO_UART) += sdio_uart.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/core/block.c
index 709a872ed484..bab3f07b1117 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/core/block.c
@@ -66,9 +66,6 @@ MODULE_ALIAS("mmc:block");
#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
(rq_data_dir(req) == WRITE))
-#define PACKED_CMD_VER 0x01
-#define PACKED_CMD_WR 0x02
-
static DEFINE_MUTEX(block_mutex);
/*
@@ -102,7 +99,6 @@ struct mmc_blk_data {
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
-#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
unsigned int usage;
unsigned int read_only;
@@ -126,12 +122,6 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
-enum {
- MMC_PACKED_NR_IDX = -1,
- MMC_PACKED_NR_ZERO,
- MMC_PACKED_NR_SINGLE,
-};
-
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
@@ -139,17 +129,6 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
-static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
-{
- struct mmc_packed *packed = mqrq->packed;
-
- mqrq->cmd_type = MMC_PACKED_NONE;
- packed->nr_entries = MMC_PACKED_NR_ZERO;
- packed->idx_failure = MMC_PACKED_NR_IDX;
- packed->retries = 0;
- packed->blocks = 0;
-}
-
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -854,7 +833,7 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
}
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- bool hw_busy_detect, struct request *req, int *gen_err)
+ bool hw_busy_detect, struct request *req, bool *gen_err)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
@@ -871,7 +850,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
if (status & R1_ERROR) {
pr_err("%s: %s: error sending status cmd, status %#x\n",
req->rq_disk->disk_name, __func__, status);
- *gen_err = 1;
+ *gen_err = true;
}
/* We may rely on the host hw to handle busy detection.*/
@@ -902,7 +881,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
}
static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, int *gen_err, u32 *stop_status)
+ struct request *req, bool *gen_err, u32 *stop_status)
{
struct mmc_host *host = card->host;
struct mmc_command cmd = {0};
@@ -940,7 +919,7 @@ static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
(*stop_status & R1_ERROR)) {
pr_err("%s: %s: general error sending stop command, resp %#x\n",
req->rq_disk->disk_name, __func__, *stop_status);
- *gen_err = 1;
+ *gen_err = true;
}
return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
@@ -1014,7 +993,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+ struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -1053,7 +1032,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
if ((status & R1_CARD_ECC_FAILED) ||
(brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
(brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
- *ecc_err = 1;
+ *ecc_err = true;
/* Flag General errors */
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
@@ -1062,7 +1041,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, __func__,
brq->stop.resp[0], status);
- *gen_err = 1;
+ *gen_err = true;
}
/*
@@ -1085,7 +1064,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
}
if (stop_status & R1_CARD_ECC_FAILED)
- *ecc_err = 1;
+ *ecc_err = true;
}
/* Check for set block count errors */
@@ -1154,7 +1133,7 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
int mmc_access_rpmb(struct mmc_queue *mq)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
/*
* If this is a RPMB partition access, return ture
*/
@@ -1166,7 +1145,7 @@ int mmc_access_rpmb(struct mmc_queue *mq)
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_DISCARD;
@@ -1210,7 +1189,7 @@ out:
static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
@@ -1276,7 +1255,7 @@ out:
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
int ret = 0;
@@ -1320,15 +1299,16 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
-static int mmc_blk_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
+static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
{
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
int need_retune = card->host->need_retune;
- int ecc_err = 0, gen_err = 0;
+ bool ecc_err = false;
+ bool gen_err = false;
/*
* sbc.error indicates a problem with the set block count
@@ -1378,7 +1358,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
req->rq_disk->disk_name, __func__,
brq->stop.resp[0]);
- gen_err = 1;
+ gen_err = true;
}
err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
@@ -1419,67 +1399,12 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
- if (mmc_packed_cmd(mq_mrq->cmd_type)) {
- if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
- return MMC_BLK_PARTIAL;
- else
- return MMC_BLK_SUCCESS;
- }
-
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
-static int mmc_blk_packed_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
- mmc_active);
- struct request *req = mq_rq->req;
- struct mmc_packed *packed = mq_rq->packed;
- int err, check, status;
- u8 *ext_csd;
-
- packed->retries--;
- check = mmc_blk_err_check(card, areq);
- err = get_card_status(card, &status, 0);
- if (err) {
- pr_err("%s: error %d sending status command\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_ABORT;
- }
-
- if (status & R1_EXCEPTION_EVENT) {
- err = mmc_get_ext_csd(card, &ext_csd);
- if (err) {
- pr_err("%s: error %d sending ext_csd\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_ABORT;
- }
-
- if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
- EXT_CSD_PACKED_FAILURE) &&
- (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
- EXT_CSD_PACKED_GENERIC_ERROR)) {
- if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
- EXT_CSD_PACKED_INDEXED_ERROR) {
- packed->idx_failure =
- ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
- check = MMC_BLK_PARTIAL;
- }
- pr_err("%s: packed cmd failed, nr %u, sectors %u, "
- "failure index: %d\n",
- req->rq_disk->disk_name, packed->nr_entries,
- packed->blocks, packed->idx_failure);
- }
- kfree(ext_csd);
- }
-
- return check;
-}
-
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1488,7 +1413,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
u32 readcmd, writecmd;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mqrq->req;
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
bool do_data_tag;
/*
@@ -1640,224 +1565,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_queue_bounce_pre(mqrq);
}
-static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
- struct mmc_card *card)
-{
- unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
- unsigned int max_seg_sz = queue_max_segment_size(q);
- unsigned int len, nr_segs = 0;
-
- do {
- len = min(hdr_sz, max_seg_sz);
- hdr_sz -= len;
- nr_segs++;
- } while (hdr_sz);
-
- return nr_segs;
-}
-
-static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
-{
- struct request_queue *q = mq->queue;
- struct mmc_card *card = mq->card;
- struct request *cur = req, *next = NULL;
- struct mmc_blk_data *md = mq->data;
- struct mmc_queue_req *mqrq = mq->mqrq_cur;
- bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
- unsigned int req_sectors = 0, phys_segments = 0;
- unsigned int max_blk_count, max_phys_segs;
- bool put_back = true;
- u8 max_packed_rw = 0;
- u8 reqs = 0;
-
- /*
- * We don't need to check packed for any further
- * operation of packed stuff as we set MMC_PACKED_NONE
- * and return zero for reqs if geting null packed. Also
- * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
- * it again when removing blk req.
- */
- if (!mqrq->packed) {
- md->flags &= (~MMC_BLK_PACKED_CMD);
- goto no_packed;
- }
-
- if (!(md->flags & MMC_BLK_PACKED_CMD))
- goto no_packed;
-
- if ((rq_data_dir(cur) == WRITE) &&
- mmc_host_packed_wr(card->host))
- max_packed_rw = card->ext_csd.max_packed_writes;
-
- if (max_packed_rw == 0)
- goto no_packed;
-
- if (mmc_req_rel_wr(cur) &&
- (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
- goto no_packed;
-
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(cur), 8))
- goto no_packed;
-
- mmc_blk_clear_packed(mqrq);
-
- max_blk_count = min(card->host->max_blk_count,
- card->host->max_req_size >> 9);
- if (unlikely(max_blk_count > 0xffff))
- max_blk_count = 0xffff;
-
- max_phys_segs = queue_max_segments(q);
- req_sectors += blk_rq_sectors(cur);
- phys_segments += cur->nr_phys_segments;
-
- if (rq_data_dir(cur) == WRITE) {
- req_sectors += mmc_large_sector(card) ? 8 : 1;
- phys_segments += mmc_calc_packed_hdr_segs(q, card);
- }
-
- do {
- if (reqs >= max_packed_rw - 1) {
- put_back = false;
- break;
- }
-
- spin_lock_irq(q->queue_lock);
- next = blk_fetch_request(q);
- spin_unlock_irq(q->queue_lock);
- if (!next) {
- put_back = false;
- break;
- }
-
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(next), 8))
- break;
-
- if (req_op(next) == REQ_OP_DISCARD ||
- req_op(next) == REQ_OP_SECURE_ERASE ||
- req_op(next) == REQ_OP_FLUSH)
- break;
-
- if (rq_data_dir(cur) != rq_data_dir(next))
- break;
-
- if (mmc_req_rel_wr(next) &&
- (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
- break;
-
- req_sectors += blk_rq_sectors(next);
- if (req_sectors > max_blk_count)
- break;
-
- phys_segments += next->nr_phys_segments;
- if (phys_segments > max_phys_segs)
- break;
-
- list_add_tail(&next->queuelist, &mqrq->packed->list);
- cur = next;
- reqs++;
- } while (1);
-
- if (put_back) {
- spin_lock_irq(q->queue_lock);
- blk_requeue_request(q, next);
- spin_unlock_irq(q->queue_lock);
- }
-
- if (reqs > 0) {
- list_add(&req->queuelist, &mqrq->packed->list);
- mqrq->packed->nr_entries = ++reqs;
- mqrq->packed->retries = reqs;
- return reqs;
- }
-
-no_packed:
- mqrq->cmd_type = MMC_PACKED_NONE;
- return 0;
-}
-
-static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
- struct mmc_card *card,
- struct mmc_queue *mq)
-{
- struct mmc_blk_request *brq = &mqrq->brq;
- struct request *req = mqrq->req;
- struct request *prq;
- struct mmc_blk_data *md = mq->data;
- struct mmc_packed *packed = mqrq->packed;
- bool do_rel_wr, do_data_tag;
- __le32 *packed_cmd_hdr;
- u8 hdr_blocks;
- u8 i = 1;
-
- mqrq->cmd_type = MMC_PACKED_WRITE;
- packed->blocks = 0;
- packed->idx_failure = MMC_PACKED_NR_IDX;
-
- packed_cmd_hdr = packed->cmd_hdr;
- memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
- hdr_blocks = mmc_large_sector(card) ? 8 : 1;
-
- /*
- * Argument for each entry of packed group
- */
- list_for_each_entry(prq, &packed->list, queuelist) {
- do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
- do_data_tag = (card->ext_csd.data_tag_unit_size) &&
- (prq->cmd_flags & REQ_META) &&
- (rq_data_dir(prq) == WRITE) &&
- blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
- /* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] = cpu_to_le32(
- (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
- (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq));
- /* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
- mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
- packed->blocks += blk_rq_sectors(prq);
- i++;
- }
-
- memset(brq, 0, sizeof(struct mmc_blk_request));
- brq->mrq.cmd = &brq->cmd;
- brq->mrq.data = &brq->data;
- brq->mrq.sbc = &brq->sbc;
- brq->mrq.stop = &brq->stop;
-
- brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
- brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
-
- brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
- brq->cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq->cmd.arg <<= 9;
- brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
- brq->data.blksz = 512;
- brq->data.blocks = packed->blocks + hdr_blocks;
- brq->data.flags = MMC_DATA_WRITE;
-
- brq->stop.opcode = MMC_STOP_TRANSMISSION;
- brq->stop.arg = 0;
- brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-
- mmc_set_data_timeout(&brq->data, card);
-
- brq->data.sg = mqrq->sg;
- brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
-
- mqrq->mmc_active.mrq = &brq->mrq;
- mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
-
- mmc_queue_bounce_pre(mqrq);
-}
-
static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
struct mmc_blk_request *brq, struct request *req,
int ret)
@@ -1881,97 +1588,25 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
ret = blk_end_request(req, 0, blocks << 9);
}
} else {
- if (!mmc_packed_cmd(mq_rq->cmd_type))
- ret = blk_end_request(req, 0, brq->data.bytes_xfered);
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
return ret;
}
-static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
-{
- struct request *prq;
- struct mmc_packed *packed = mq_rq->packed;
- int idx = packed->idx_failure, i = 0;
- int ret = 0;
-
- while (!list_empty(&packed->list)) {
- prq = list_entry_rq(packed->list.next);
- if (idx == i) {
- /* retry from error index */
- packed->nr_entries -= idx;
- mq_rq->req = prq;
- ret = 1;
-
- if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
- list_del_init(&prq->queuelist);
- mmc_blk_clear_packed(mq_rq);
- }
- return ret;
- }
- list_del_init(&prq->queuelist);
- blk_end_request(prq, 0, blk_rq_bytes(prq));
- i++;
- }
-
- mmc_blk_clear_packed(mq_rq);
- return ret;
-}
-
-static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
-{
- struct request *prq;
- struct mmc_packed *packed = mq_rq->packed;
-
- while (!list_empty(&packed->list)) {
- prq = list_entry_rq(packed->list.next);
- list_del_init(&prq->queuelist);
- blk_end_request(prq, -EIO, blk_rq_bytes(prq));
- }
-
- mmc_blk_clear_packed(mq_rq);
-}
-
-static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
- struct mmc_queue_req *mq_rq)
-{
- struct request *prq;
- struct request_queue *q = mq->queue;
- struct mmc_packed *packed = mq_rq->packed;
-
- while (!list_empty(&packed->list)) {
- prq = list_entry_rq(packed->list.prev);
- if (prq->queuelist.prev != &packed->list) {
- list_del_init(&prq->queuelist);
- spin_lock_irq(q->queue_lock);
- blk_requeue_request(mq->queue, prq);
- spin_unlock_irq(q->queue_lock);
- } else {
- list_del_init(&prq->queuelist);
- }
- }
-
- mmc_blk_clear_packed(mq_rq);
-}
-
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+ struct mmc_blk_request *brq;
int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req = rqc;
+ struct request *req;
struct mmc_async_req *areq;
- const u8 packed_nr = 2;
- u8 reqs = 0;
if (!rqc && !mq->mqrq_prev->req)
return 0;
- if (rqc)
- reqs = mmc_blk_prep_packed_list(mq, rqc);
-
do {
if (rqc) {
/*
@@ -1981,20 +1616,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
if (mmc_large_sector(card) &&
!IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- req->rq_disk->disk_name);
+ rqc->rq_disk->disk_name);
mq_rq = mq->mqrq_cur;
+ req = rqc;
+ rqc = NULL;
goto cmd_abort;
}
- if (reqs >= packed_nr)
- mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
- card, mq);
- else
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
areq = NULL;
- areq = mmc_start_req(card->host, areq, (int *) &status);
+ areq = mmc_start_req(card->host, areq, &status);
if (!areq) {
if (status == MMC_BLK_NEW_REQUEST)
mq->flags |= MMC_QUEUE_NEW_REQUEST;
@@ -2015,13 +1648,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
*/
mmc_blk_reset_success(md, type);
- if (mmc_packed_cmd(mq_rq->cmd_type)) {
- ret = mmc_blk_end_packed_req(mq_rq);
- break;
- } else {
- ret = blk_end_request(req, 0,
- brq->data.bytes_xfered);
- }
+ ret = blk_end_request(req, 0,
+ brq->data.bytes_xfered);
/*
* If the blk_end_request function returns non-zero even
@@ -2058,8 +1686,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV ||
- mmc_packed_cmd(mq_rq->cmd_type))
+ if (err == -ENODEV)
goto cmd_abort;
/* Fall through */
}
@@ -2090,23 +1717,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
}
if (ret) {
- if (mmc_packed_cmd(mq_rq->cmd_type)) {
- if (!mq_rq->packed->retries)
- goto cmd_abort;
- mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
- mmc_start_req(card->host,
- &mq_rq->mmc_active, NULL);
- } else {
-
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_req(card->host,
- &mq_rq->mmc_active, NULL);
- }
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card,
+ disable_multi, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
mq_rq->brq.retune_retry_done = retune_retry_done;
}
} while (ret);
@@ -2114,28 +1732,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 1;
cmd_abort:
- if (mmc_packed_cmd(mq_rq->cmd_type)) {
- mmc_blk_abort_packed_req(mq_rq);
- } else {
- if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
- while (ret)
- ret = blk_end_request(req, -EIO,
- blk_rq_cur_bytes(req));
- }
+ if (mmc_card_removed(card))
+ req->rq_flags |= RQF_QUIET;
+ while (ret)
+ ret = blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
start_new_req:
if (rqc) {
if (mmc_card_removed(card)) {
- rqc->cmd_flags |= REQ_QUIET;
+ rqc->rq_flags |= RQF_QUIET;
blk_end_request_all(rqc, -EIO);
} else {
- /*
- * If current request is packed, it needs to put back.
- */
- if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
- mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
-
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
mmc_start_req(card->host,
&mq->mqrq_cur->mmc_active, NULL);
@@ -2148,10 +1756,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- struct mmc_host *host = card->host;
- unsigned long flags;
bool req_is_special = mmc_req_is_special(req);
if (req && !mq->mqrq_prev->req)
@@ -2184,11 +1790,6 @@ int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
- if (!req && host->areq) {
- spin_lock_irqsave(&host->context_info.lock, flags);
- host->context_info.is_waiting_last_req = true;
- spin_unlock_irqrestore(&host->context_info.lock, flags);
- }
ret = mmc_blk_issue_rw_rq(mq, req);
}
@@ -2266,7 +1867,7 @@ again:
if (ret)
goto err_putdisk;
- md->queue.data = md;
+ md->queue.blkdata = md;
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx * perdev_minors;
@@ -2318,14 +1919,6 @@ again:
blk_queue_write_cache(md->queue.queue, true, true);
}
- if (mmc_card_mmc(card) &&
- (area_type == MMC_BLK_DATA_AREA_MAIN) &&
- (md->flags & MMC_BLK_CMD23) &&
- card->ext_csd.packed_event_en) {
- if (!mmc_packed_init(&md->queue, card))
- md->flags |= MMC_BLK_PACKED_CMD;
- }
-
return md;
err_putdisk:
@@ -2429,8 +2022,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
*/
card = md->queue.card;
mmc_cleanup_queue(&md->queue);
- if (md->flags & MMC_BLK_PACKED_CMD)
- mmc_packed_clean(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
diff --git a/drivers/mmc/card/block.h b/drivers/mmc/core/block.h
index cdabb2ee74be..cdabb2ee74be 100644
--- a/drivers/mmc/card/block.h
+++ b/drivers/mmc/core/block.h
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2553d903a82b..1076b9d89df3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -306,16 +306,16 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->sbc->mrq = mrq;
}
if (mrq->data) {
- BUG_ON(mrq->data->blksz > host->max_blk_size);
- BUG_ON(mrq->data->blocks > host->max_blk_count);
- BUG_ON(mrq->data->blocks * mrq->data->blksz >
- host->max_req_size);
-
+ if (mrq->data->blksz > host->max_blk_size ||
+ mrq->data->blocks > host->max_blk_count ||
+ mrq->data->blocks * mrq->data->blksz > host->max_req_size)
+ return -EINVAL;
#ifdef CONFIG_MMC_DEBUG
sz = 0;
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
sz += sg->length;
- BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
+ if (sz != mrq->data->blocks * mrq->data->blksz)
+ return -EINVAL;
#endif
mrq->cmd->data = mrq->data;
@@ -349,8 +349,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
int timeout;
bool use_busy_signal;
- BUG_ON(!card);
-
if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
return;
@@ -380,7 +378,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
mmc_retune_hold(card->host);
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout,
+ EXT_CSD_BKOPS_START, 1, timeout, 0,
use_busy_signal, true, false);
if (err) {
pr_warn("%s: Error %d starting bkops\n",
@@ -497,32 +495,27 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
*
* Returns enum mmc_blk_status after checking errors.
*/
-static int mmc_wait_for_data_req_done(struct mmc_host *host,
- struct mmc_request *mrq,
- struct mmc_async_req *next_req)
+static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
- int err;
- unsigned long flags;
+ enum mmc_blk_status status;
while (1) {
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
- spin_lock_irqsave(&context_info->lock, flags);
- context_info->is_waiting_last_req = false;
- spin_unlock_irqrestore(&context_info->lock, flags);
+
if (context_info->is_done_rcv) {
context_info->is_done_rcv = false;
- context_info->is_new_req = false;
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card)) {
- err = host->areq->err_check(host->card,
- host->areq);
- break; /* return err */
+ status = host->areq->err_check(host->card,
+ host->areq);
+ break; /* return status */
} else {
mmc_retune_recheck(host);
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
@@ -533,14 +526,12 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
__mmc_start_request(host, mrq);
continue; /* wait for done/new event again */
}
- } else if (context_info->is_new_req) {
- context_info->is_new_req = false;
- if (!next_req)
- return MMC_BLK_NEW_REQUEST;
}
+
+ return MMC_BLK_NEW_REQUEST;
}
mmc_retune_release(host);
- return err;
+ return status;
}
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
@@ -611,18 +602,15 @@ EXPORT_SYMBOL(mmc_is_req_done);
* mmc_pre_req - Prepare for a new request
* @host: MMC host to prepare command
* @mrq: MMC request to prepare for
- * @is_first_req: true if there is no previous started request
- * that may run in parellel to this call, otherwise false
*
* mmc_pre_req() is called in prior to mmc_start_req() to let
* host prepare for the new request. Preparation of a request may be
* performed while another request is running on the host.
*/
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
- bool is_first_req)
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
{
if (host->ops->pre_req)
- host->ops->pre_req(host, mrq, is_first_req);
+ host->ops->pre_req(host, mrq);
}
/**
@@ -658,21 +646,22 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
* is returned without waiting. NULL is not an error condition.
*/
struct mmc_async_req *mmc_start_req(struct mmc_host *host,
- struct mmc_async_req *areq, int *error)
+ struct mmc_async_req *areq,
+ enum mmc_blk_status *ret_stat)
{
- int err = 0;
+ enum mmc_blk_status status = MMC_BLK_SUCCESS;
int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
if (areq)
- mmc_pre_req(host, areq->mrq, !host->areq);
+ mmc_pre_req(host, areq->mrq);
if (host->areq) {
- err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
- if (err == MMC_BLK_NEW_REQUEST) {
- if (error)
- *error = err;
+ status = mmc_wait_for_data_req_done(host, host->areq->mrq);
+ if (status == MMC_BLK_NEW_REQUEST) {
+ if (ret_stat)
+ *ret_stat = status;
/*
* The previous request was not completed,
* nothing to return
@@ -695,27 +684,27 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
/* prepare the request again */
if (areq)
- mmc_pre_req(host, areq->mrq, !host->areq);
+ mmc_pre_req(host, areq->mrq);
}
}
- if (!err && areq)
+ if (status == MMC_BLK_SUCCESS && areq)
start_err = __mmc_start_data_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
/* Cancel a prepared request if it was not started. */
- if ((err || start_err) && areq)
+ if ((status != MMC_BLK_SUCCESS || start_err) && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
- if (err)
+ if (status != MMC_BLK_SUCCESS)
host->areq = NULL;
else
host->areq = areq;
- if (error)
- *error = err;
+ if (ret_stat)
+ *ret_stat = status;
return data;
}
EXPORT_SYMBOL(mmc_start_req);
@@ -754,8 +743,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
u32 status;
unsigned long prg_wait;
- BUG_ON(!card);
-
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
return 1;
@@ -850,7 +837,6 @@ int mmc_stop_bkops(struct mmc_card *card)
{
int err = 0;
- BUG_ON(!card);
err = mmc_interrupt_hpi(card);
/*
@@ -1666,8 +1652,6 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
int err = 0;
u32 clock;
- BUG_ON(!host);
-
/*
* Send CMD11 only if the request is to switch the card to
* 1.8V signalling.
@@ -1884,9 +1868,7 @@ void mmc_power_cycle(struct mmc_host *host, u32 ocr)
*/
static void __mmc_release_bus(struct mmc_host *host)
{
- BUG_ON(!host);
- BUG_ON(host->bus_refs);
- BUG_ON(!host->bus_dead);
+ WARN_ON(!host->bus_dead);
host->bus_ops = NULL;
}
@@ -1926,15 +1908,12 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
{
unsigned long flags;
- BUG_ON(!host);
- BUG_ON(!ops);
-
WARN_ON(!host->claimed);
spin_lock_irqsave(&host->lock, flags);
- BUG_ON(host->bus_ops);
- BUG_ON(host->bus_refs);
+ WARN_ON(host->bus_ops);
+ WARN_ON(host->bus_refs);
host->bus_ops = ops;
host->bus_refs = 1;
@@ -1950,8 +1929,6 @@ void mmc_detach_bus(struct mmc_host *host)
{
unsigned long flags;
- BUG_ON(!host);
-
WARN_ON(!host->claimed);
WARN_ON(!host->bus_ops);
@@ -2824,12 +2801,11 @@ void mmc_start_host(struct mmc_host *host)
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
- mmc_claim_host(host);
- if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
- mmc_power_off(host);
- else
+ if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
+ mmc_claim_host(host);
mmc_power_up(host, host->ocr_avail);
- mmc_release_host(host);
+ mmc_release_host(host);
+ }
mmc_gpiod_request_cd_irq(host);
_mmc_detect_change(host, 0, false);
@@ -2865,8 +2841,6 @@ void mmc_stop_host(struct mmc_host *host)
}
mmc_bus_put(host);
- BUG_ON(host->card);
-
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
@@ -3019,7 +2993,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
*/
void mmc_init_context_info(struct mmc_host *host)
{
- spin_lock_init(&host->context_info.lock);
host->context_info.is_new_req = false;
host->context_info.is_done_rcv = false;
host->context_info.is_waiting_last_req = false;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index c8451ce557ae..30623b8b86a4 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -321,7 +321,11 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
for (i = 0; i < 512; i++)
n += sprintf(buf + n, "%02x", ext_csd[i]);
n += sprintf(buf + n, "\n");
- BUG_ON(n != EXT_CSD_STR_LEN);
+
+ if (n != EXT_CSD_STR_LEN) {
+ err = -EINVAL;
+ goto out_free;
+ }
filp->private_data = buf;
kfree(ext_csd);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index df19777068a6..b61b52f9da3d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -618,6 +618,24 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
}
+
+ /* eMMC v5.1 or later */
+ if (card->ext_csd.rev >= 8) {
+ card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
+ EXT_CSD_CMDQ_SUPPORTED;
+ card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
+ EXT_CSD_CMDQ_DEPTH_MASK) + 1;
+ /* Exclude inefficiently small queue depths */
+ if (card->ext_csd.cmdq_depth <= 2) {
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ }
+ if (card->ext_csd.cmdq_support) {
+ pr_debug("%s: Command Queue supported depth %u\n",
+ mmc_hostname(card->host),
+ card->ext_csd.cmdq_depth);
+ }
+ }
out:
return err;
}
@@ -1003,19 +1021,6 @@ static int mmc_select_bus_width(struct mmc_card *card)
return err;
}
-/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
-{
- u32 status;
- int err;
-
- err = mmc_send_status(card, &status);
- if (err)
- return err;
-
- return mmc_switch_status_error(card->host, status);
-}
-
/*
* Switch to the high-speed mode
*/
@@ -1025,13 +1030,8 @@ static int mmc_select_hs(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
- card->ext_csd.generic_cmd6_time,
- true, false, true);
- if (!err) {
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
- err = mmc_switch_status(card);
- }
-
+ card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
+ true, true, true);
if (err)
pr_warn("%s: switch to high-speed failed, err:%d\n",
mmc_hostname(card->host), err);
@@ -1058,10 +1058,12 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BUS_WIDTH,
- ext_csd_bits,
- card->ext_csd.generic_cmd6_time);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits,
+ card->ext_csd.generic_cmd6_time,
+ MMC_TIMING_MMC_DDR52,
+ true, true, true);
if (err) {
pr_err("%s: switch to bus width %d ddr failed\n",
mmc_hostname(host), 1 << bus_width);
@@ -1104,9 +1106,6 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
if (err)
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
- if (!err)
- mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
-
return err;
}
@@ -1128,7 +1127,7 @@ static int mmc_select_hs400(struct mmc_card *card)
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err) {
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
@@ -1163,7 +1162,7 @@ static int mmc_select_hs400(struct mmc_card *card)
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err) {
pr_err("%s: switch to hs400 failed, err:%d\n",
@@ -1206,7 +1205,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
/* Switch HS400 to HS DDR */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
- val, card->ext_csd.generic_cmd6_time,
+ val, card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err)
goto out_err;
@@ -1220,7 +1219,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
/* Switch HS DDR to HS */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
- true, false, true);
+ 0, true, false, true);
if (err)
goto out_err;
@@ -1234,14 +1233,19 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
- val, card->ext_csd.generic_cmd6_time,
+ val, card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- err = mmc_switch_status(card);
+ /*
+ * For HS200, CRC errors are not a reliable way to know the switch
+ * failed. If there really is a problem, we would expect tuning will
+ * fail and the result ends up the same.
+ */
+ err = __mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1281,16 +1285,23 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
/* Switch card to HS mode */
- err = mmc_select_hs(card);
- if (err)
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time, 0,
+ true, false, true);
+ if (err) {
+ pr_err("%s: switch to hs for hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
goto out_err;
+ }
- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
-
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
err = mmc_switch_status(card);
if (err)
goto out_err;
+ mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1308,7 +1319,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err) {
pr_err("%s: switch to hs400es failed, err:%d\n",
@@ -1390,14 +1401,20 @@ static int mmc_select_hs200(struct mmc_card *card)
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err)
goto err;
old_timing = host->ios.timing;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- err = mmc_switch_status(card);
+ /*
+ * For HS200, CRC errors are not a reliable way to know the
+ * switch failed. If there really is a problem, we would expect
+ * tuning will fail and the result ends up the same.
+ */
+ err = __mmc_switch_status(card, false);
+
/*
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
@@ -1480,7 +1497,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
u32 cid[4];
u32 rocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
/* Set correct bus mode for MMC before attempting init */
@@ -1854,7 +1870,7 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout, true, false, false);
+ notify_type, timeout, 0, true, false, false);
if (err)
pr_err("%s: Power Off Notification timed out, %u\n",
mmc_hostname(card->host), timeout);
@@ -1870,9 +1886,6 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
*/
static void mmc_remove(struct mmc_host *host)
{
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_remove_card(host->card);
host->card = NULL;
}
@@ -1892,9 +1905,6 @@ static void mmc_detect(struct mmc_host *host)
{
int err;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_get_card(host->card);
/*
@@ -1920,9 +1930,6 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
EXT_CSD_POWER_OFF_LONG;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
@@ -1979,9 +1986,6 @@ static int _mmc_resume(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (!mmc_card_suspended(host->card))
@@ -2114,7 +2118,6 @@ int mmc_attach_mmc(struct mmc_host *host)
int err;
u32 ocr, rocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
/* Set correct bus mode for MMC before attempting attach */
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index ad6e9798e949..b11c3455b040 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -54,21 +54,15 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
-static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
- bool ignore_crc)
+int mmc_send_status(struct mmc_card *card, u32 *status)
{
int err;
struct mmc_command cmd = {0};
- BUG_ON(!card);
- BUG_ON(!card->host);
-
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- if (ignore_crc)
- cmd.flags &= ~MMC_RSP_CRC;
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
if (err)
@@ -83,17 +77,10 @@ static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
return 0;
}
-int mmc_send_status(struct mmc_card *card, u32 *status)
-{
- return __mmc_send_status(card, status, false);
-}
-
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
struct mmc_command cmd = {0};
- BUG_ON(!host);
-
cmd.opcode = MMC_SELECT_CARD;
if (card) {
@@ -109,7 +96,6 @@ static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
int mmc_select_card(struct mmc_card *card)
{
- BUG_ON(!card);
return _mmc_select_card(card->host, card);
}
@@ -181,8 +167,6 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
struct mmc_command cmd = {0};
int i, err = 0;
- BUG_ON(!host);
-
cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
@@ -221,9 +205,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
int err;
struct mmc_command cmd = {0};
- BUG_ON(!host);
- BUG_ON(!cid);
-
cmd.opcode = MMC_ALL_SEND_CID;
cmd.arg = 0;
cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
@@ -241,9 +222,6 @@ int mmc_set_relative_addr(struct mmc_card *card)
{
struct mmc_command cmd = {0};
- BUG_ON(!card);
- BUG_ON(!card->host);
-
cmd.opcode = MMC_SET_RELATIVE_ADDR;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -257,9 +235,6 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
int err;
struct mmc_command cmd = {0};
- BUG_ON(!host);
- BUG_ON(!cxd);
-
cmd.opcode = opcode;
cmd.arg = arg;
cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
@@ -440,7 +415,7 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
return err;
}
-int mmc_switch_status_error(struct mmc_host *host, u32 status)
+static int mmc_switch_status_error(struct mmc_host *host, u32 status)
{
if (mmc_host_is_spi(host)) {
if (status & R1_SPI_ILLEGAL_COMMAND)
@@ -455,6 +430,88 @@ int mmc_switch_status_error(struct mmc_host *host, u32 status)
return 0;
}
+/* Caller must hold re-tuning */
+int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
+{
+ u32 status;
+ int err;
+
+ err = mmc_send_status(card, &status);
+ if (!crc_err_fatal && err == -EILSEQ)
+ return 0;
+ if (err)
+ return err;
+
+ return mmc_switch_status_error(card->host, status);
+}
+
+int mmc_switch_status(struct mmc_card *card)
+{
+ return __mmc_switch_status(card, true);
+}
+
+static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ bool send_status, bool retry_crc_err)
+{
+ struct mmc_host *host = card->host;
+ int err;
+ unsigned long timeout;
+ u32 status = 0;
+ bool expired = false;
+ bool busy = false;
+
+ /* We have an unspecified cmd timeout, use the fallback value. */
+ if (!timeout_ms)
+ timeout_ms = MMC_OPS_TIMEOUT_MS;
+
+ /*
+ * In cases when not allowed to poll by using CMD13 or because we aren't
+ * capable of polling by using ->card_busy(), then rely on waiting the
+ * stated timeout to be sufficient.
+ */
+ if (!send_status && !host->ops->card_busy) {
+ mmc_delay(timeout_ms);
+ return 0;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ /*
+ * Due to the possibility of being preempted while polling,
+ * check the expiration time first.
+ */
+ expired = time_after(jiffies, timeout);
+
+ if (host->ops->card_busy) {
+ busy = host->ops->card_busy(host);
+ } else {
+ err = mmc_send_status(card, &status);
+ if (retry_crc_err && err == -EILSEQ) {
+ busy = true;
+ } else if (err) {
+ return err;
+ } else {
+ err = mmc_switch_status_error(host, status);
+ if (err)
+ return err;
+ busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
+ }
+ }
+
+ /* Timeout if the device still remains busy. */
+ if (expired && busy) {
+ pr_err("%s: Card stuck being busy! %s\n",
+ mmc_hostname(host), __func__);
+ return -ETIMEDOUT;
+ }
+ } while (busy);
+
+ if (host->ops->card_busy && send_status)
+ return mmc_switch_status(card);
+
+ return 0;
+}
+
/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
@@ -463,24 +520,22 @@ int mmc_switch_status_error(struct mmc_host *host, u32 status)
* @value: value to program into EXT_CSD register
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
+ * @timing: new timing to change to
* @use_busy_signal: use the busy signal as response type
* @send_status: send status cmd to poll for busy
- * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
+ * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms, bool use_busy_signal, bool send_status,
- bool ignore_crc)
+ unsigned int timeout_ms, unsigned char timing,
+ bool use_busy_signal, bool send_status, bool retry_crc_err)
{
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {0};
- unsigned long timeout;
- u32 status = 0;
bool use_r1b_resp = use_busy_signal;
- bool expired = false;
- bool busy = false;
+ unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
@@ -522,62 +577,24 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
goto out;
- /*
- * CRC errors shall only be ignored in cases were CMD13 is used to poll
- * to detect busy completion.
- */
- if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- ignore_crc = false;
-
- /* We have an unspecified cmd timeout, use the fallback value. */
- if (!timeout_ms)
- timeout_ms = MMC_OPS_TIMEOUT_MS;
-
- /* Must check status to be sure of no errors. */
- timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
- do {
- /*
- * Due to the possibility of being preempted after
- * sending the status command, check the expiration
- * time first.
- */
- expired = time_after(jiffies, timeout);
- if (send_status) {
- err = __mmc_send_status(card, &status, ignore_crc);
- if (err)
- goto out;
- }
- if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- break;
- if (host->ops->card_busy) {
- if (!host->ops->card_busy(host))
- break;
- busy = true;
- }
- if (mmc_host_is_spi(host))
- break;
+ /* Switch to new timing before poll and check switch status. */
+ if (timing)
+ mmc_set_timing(host, timing);
- /*
- * We are not allowed to issue a status command and the host
- * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
- * rely on waiting for the stated timeout to be sufficient.
- */
- if (!send_status && !host->ops->card_busy) {
- mmc_delay(timeout_ms);
- goto out;
- }
+ /*If SPI or used HW busy detection above, then we don't need to poll. */
+ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
+ mmc_host_is_spi(host)) {
+ if (send_status)
+ err = mmc_switch_status(card);
+ goto out_tim;
+ }
- /* Timeout if the device never leaves the program state. */
- if (expired &&
- (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) {
- pr_err("%s: Card stuck in programming state! %s\n",
- mmc_hostname(host), __func__);
- err = -ETIMEDOUT;
- goto out;
- }
- } while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy);
+ /* Let's try to poll to find out when the command is completed. */
+ err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
- err = mmc_switch_status_error(host, status);
+out_tim:
+ if (err && timing)
+ mmc_set_timing(host, old_timing);
out:
mmc_retune_release(host);
@@ -587,8 +604,8 @@ out:
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
- return __mmc_switch(card, set, index, value, timeout_ms, true, true,
- false);
+ return __mmc_switch(card, set, index, value, timeout_ms, 0,
+ true, true, false);
}
EXPORT_SYMBOL_GPL(mmc_switch);
@@ -661,6 +678,31 @@ out:
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);
+int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+{
+ struct mmc_command cmd = {0};
+
+ /*
+ * eMMC specification specifies that CMD12 can be used to stop a tuning
+ * command, but SD specification does not, so do nothing unless it is
+ * eMMC.
+ */
+ if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ return 0;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ /*
+ * For drivers that override R1 to R1b, set an arbitrary timeout based
+ * on the tuning timeout i.e. 150ms.
+ */
+ cmd.busy_timeout = 150;
+
+ return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index f1b8e81aaa28..abd525ed74be 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -27,10 +27,11 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
int mmc_can_ext_csd(struct mmc_card *card);
-int mmc_switch_status_error(struct mmc_host *host, u32 status);
+int mmc_switch_status(struct mmc_card *card);
+int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms, bool use_busy_signal, bool send_status,
- bool ignore_crc);
+ unsigned int timeout_ms, unsigned char timing,
+ bool use_busy_signal, bool send_status, bool retry_crc_err);
#endif
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 3678220964fe..3ab6e52d106c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/mmc_test.c
- *
* Copyright 2007-2008 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
@@ -214,7 +212,8 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
{
- BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
+ return;
if (blocks > 1) {
mrq->cmd->opcode = write ?
@@ -694,7 +693,8 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, int write)
{
- BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return;
if (mrq->data->blocks > 1) {
mrq->cmd->opcode = write ?
@@ -714,7 +714,8 @@ static int mmc_test_check_result(struct mmc_test_card *test,
{
int ret;
- BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return -EINVAL;
ret = 0;
@@ -736,15 +737,28 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
-static int mmc_test_check_result_async(struct mmc_card *card,
+static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
struct mmc_async_req *areq)
{
struct mmc_test_async_req *test_async =
container_of(areq, struct mmc_test_async_req, areq);
+ int ret;
mmc_test_wait_busy(test_async->test);
- return mmc_test_check_result(test_async->test, areq->mrq);
+ /*
+ * FIXME: this would earlier just casts a regular error code,
+ * either of the kernel type -ERRORCODE or the local test framework
+ * RESULT_* errorcode, into an enum mmc_blk_status and return as
+ * result check. Instead, convert it to some reasonable type by just
+ * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
+ * If possible, a reasonable error code should be returned.
+ */
+ ret = mmc_test_check_result(test_async->test, areq->mrq);
+ if (ret)
+ return MMC_BLK_CMD_ERR;
+
+ return MMC_BLK_SUCCESS;
}
/*
@@ -755,7 +769,8 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
{
int ret;
- BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return -EINVAL;
ret = 0;
@@ -817,8 +832,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
struct mmc_async_req *done_areq;
struct mmc_async_req *cur_areq = &test_areq[0].areq;
struct mmc_async_req *other_areq = &test_areq[1].areq;
+ enum mmc_blk_status status;
int i;
- int ret;
+ int ret = RESULT_OK;
test_areq[0].test = test;
test_areq[1].test = test;
@@ -834,10 +850,12 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
for (i = 0; i < count; i++) {
mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
blocks, blksz, write);
- done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+ done_areq = mmc_start_req(test->card->host, cur_areq, &status);
- if (ret || (!done_areq && i > 0))
+ if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
+ ret = RESULT_FAIL;
goto err;
+ }
if (done_areq) {
if (done_areq->mrq == &mrq2)
@@ -851,7 +869,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
dev_addr += blocks;
}
- done_areq = mmc_start_req(test->card->host, NULL, &ret);
+ done_areq = mmc_start_req(test->card->host, NULL, &status);
+ if (status != MMC_BLK_SUCCESS)
+ ret = RESULT_FAIL;
return ret;
err:
@@ -2351,6 +2371,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
+ enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
@@ -2378,9 +2399,11 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_req(host, &test_areq.areq, &ret);
- if (ret)
+ mmc_start_req(host, &test_areq.areq, &blkstat);
+ if (blkstat != MMC_BLK_SUCCESS) {
+ ret = RESULT_FAIL;
goto out_free;
+ }
} else {
mmc_wait_for_req(host, mrq);
}
@@ -2413,10 +2436,13 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
/* Wait for data request to complete */
- if (use_areq)
- mmc_start_req(host, NULL, &ret);
- else
+ if (use_areq) {
+ mmc_start_req(host, NULL, &blkstat);
+ if (blkstat != MMC_BLK_SUCCESS)
+ ret = RESULT_FAIL;
+ } else {
mmc_wait_for_req_done(test->card->host, mrq);
+ }
/*
* For cap_cmd_during_tfr request, upper layer must send stop if
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/core/queue.c
index 8037f73a109a..a6496d8027bc 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/queue.c
- *
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
*
@@ -44,7 +42,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
@@ -53,6 +51,7 @@ static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
+ struct mmc_context_info *cntx = &mq->card->host->context_info;
current->flags |= PF_MEMALLOC;
@@ -63,6 +62,19 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
+ mq->asleep = false;
+ cntx->is_waiting_last_req = false;
+ cntx->is_new_req = false;
+ if (!req) {
+ /*
+ * Dispatch queue is empty so set flags for
+ * mmc_request_fn() to wake us up.
+ */
+ if (mq->mqrq_prev->req)
+ cntx->is_waiting_last_req = true;
+ else
+ mq->asleep = true;
+ }
mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
@@ -115,31 +127,24 @@ static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
- unsigned long flags;
struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
__blk_end_request_all(req, -EIO);
}
return;
}
cntx = &mq->card->host->context_info;
- if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
- /*
- * New MMC request arrived when MMC thread may be
- * blocked on the previous request to be complete
- * with no current request fetched
- */
- spin_lock_irqsave(&cntx->lock, flags);
- if (cntx->is_waiting_last_req) {
- cntx->is_new_req = true;
- wake_up_interruptible(&cntx->wait);
- }
- spin_unlock_irqrestore(&cntx->lock, flags);
- } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+
+ if (cntx->is_waiting_last_req) {
+ cntx->is_new_req = true;
+ wake_up_interruptible(&cntx->wait);
+ }
+
+ if (mq->asleep)
wake_up_process(mq->thread);
}
@@ -179,6 +184,82 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
+ unsigned int bouncesz)
+{
+ int i;
+
+ for (i = 0; i < mq->qdepth; i++) {
+ mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mq->mqrq[i].bounce_buf)
+ goto out_err;
+ }
+
+ return true;
+
+out_err:
+ while (--i >= 0) {
+ kfree(mq->mqrq[i].bounce_buf);
+ mq->mqrq[i].bounce_buf = NULL;
+ }
+ pr_warn("%s: unable to allocate bounce buffers\n",
+ mmc_card_name(mq->card));
+ return false;
+}
+
+static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
+ unsigned int bouncesz)
+{
+ int i, ret;
+
+ for (i = 0; i < mq->qdepth; i++) {
+ mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ return ret;
+
+ mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
+{
+ int i, ret;
+
+ for (i = 0; i < mq->qdepth; i++) {
+ mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
+{
+ kfree(mqrq->bounce_sg);
+ mqrq->bounce_sg = NULL;
+
+ kfree(mqrq->sg);
+ mqrq->sg = NULL;
+
+ kfree(mqrq->bounce_buf);
+ mqrq->bounce_buf = NULL;
+}
+
+static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
+{
+ int i;
+
+ for (i = 0; i < mq->qdepth; i++)
+ mmc_queue_req_free_bufs(&mq->mqrq[i]);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -193,9 +274,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
- int ret;
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+ bool bounce = false;
+ int ret = -ENOMEM;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -205,8 +285,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (!mq->queue)
return -ENOMEM;
- mq->mqrq_cur = mqrq_cur;
- mq->mqrq_prev = mqrq_prev;
+ mq->qdepth = 2;
+ mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
+ GFP_KERNEL);
+ if (!mq->mqrq)
+ goto blk_cleanup;
+ mq->mqrq_cur = &mq->mqrq[0];
+ mq->mqrq_prev = &mq->mqrq[1];
mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -228,63 +313,29 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (bouncesz > (host->max_blk_count * 512))
bouncesz = host->max_blk_count * 512;
- if (bouncesz > 512) {
- mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mqrq_cur->bounce_buf) {
- pr_warn("%s: unable to allocate bounce cur buffer\n",
- mmc_card_name(card));
- } else {
- mqrq_prev->bounce_buf =
- kmalloc(bouncesz, GFP_KERNEL);
- if (!mqrq_prev->bounce_buf) {
- pr_warn("%s: unable to allocate bounce prev buffer\n",
- mmc_card_name(card));
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
- }
- }
- }
-
- if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
+ if (bouncesz > 512 &&
+ mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mqrq_cur->sg = mmc_alloc_sg(1, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_cur->bounce_sg =
- mmc_alloc_sg(bouncesz / 512, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_prev->sg = mmc_alloc_sg(1, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_prev->bounce_sg =
- mmc_alloc_sg(bouncesz / 512, &ret);
+ ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
if (ret)
goto cleanup_queue;
+ bounce = true;
}
}
#endif
- if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+ if (!bounce) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
- if (ret)
- goto cleanup_queue;
-
-
- mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ ret = mmc_queue_alloc_sgs(mq, host->max_segs);
if (ret)
goto cleanup_queue;
}
@@ -296,27 +347,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
- goto free_bounce_sg;
+ goto cleanup_queue;
}
return 0;
- free_bounce_sg:
- kfree(mqrq_cur->bounce_sg);
- mqrq_cur->bounce_sg = NULL;
- kfree(mqrq_prev->bounce_sg);
- mqrq_prev->bounce_sg = NULL;
cleanup_queue:
- kfree(mqrq_cur->sg);
- mqrq_cur->sg = NULL;
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
-
- kfree(mqrq_prev->sg);
- mqrq_prev->sg = NULL;
- kfree(mqrq_prev->bounce_buf);
- mqrq_prev->bounce_buf = NULL;
-
+ mmc_queue_reqs_free_bufs(mq);
+ kfree(mq->mqrq);
+ mq->mqrq = NULL;
+blk_cleanup:
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -325,8 +365,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
- struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
- struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -340,71 +378,14 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- kfree(mqrq_cur->bounce_sg);
- mqrq_cur->bounce_sg = NULL;
-
- kfree(mqrq_cur->sg);
- mqrq_cur->sg = NULL;
-
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
-
- kfree(mqrq_prev->bounce_sg);
- mqrq_prev->bounce_sg = NULL;
-
- kfree(mqrq_prev->sg);
- mqrq_prev->sg = NULL;
-
- kfree(mqrq_prev->bounce_buf);
- mqrq_prev->bounce_buf = NULL;
+ mmc_queue_reqs_free_bufs(mq);
+ kfree(mq->mqrq);
+ mq->mqrq = NULL;
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
-int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
-{
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
- int ret = 0;
-
-
- mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
- if (!mqrq_cur->packed) {
- pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
- mmc_card_name(card));
- ret = -ENOMEM;
- goto out;
- }
-
- mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
- if (!mqrq_prev->packed) {
- pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
- mmc_card_name(card));
- kfree(mqrq_cur->packed);
- mqrq_cur->packed = NULL;
- ret = -ENOMEM;
- goto out;
- }
-
- INIT_LIST_HEAD(&mqrq_cur->packed->list);
- INIT_LIST_HEAD(&mqrq_prev->packed->list);
-
-out:
- return ret;
-}
-
-void mmc_packed_clean(struct mmc_queue *mq)
-{
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
-
- kfree(mqrq_cur->packed);
- mqrq_cur->packed = NULL;
- kfree(mqrq_prev->packed);
- mqrq_prev->packed = NULL;
-}
-
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
@@ -449,41 +430,6 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
-static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
- struct mmc_packed *packed,
- struct scatterlist *sg,
- enum mmc_packed_type cmd_type)
-{
- struct scatterlist *__sg = sg;
- unsigned int sg_len = 0;
- struct request *req;
-
- if (mmc_packed_wr(cmd_type)) {
- unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
- unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
- unsigned int len, remain, offset = 0;
- u8 *buf = (u8 *)packed->cmd_hdr;
-
- remain = hdr_sz;
- do {
- len = min(remain, max_seg_sz);
- sg_set_buf(__sg, buf + offset, len);
- offset += len;
- remain -= len;
- sg_unmark_end(__sg++);
- sg_len++;
- } while (remain);
- }
-
- list_for_each_entry(req, &packed->list, queuelist) {
- sg_len += blk_rq_map_sg(mq->queue, req, __sg);
- __sg = sg + (sg_len - 1);
- sg_unmark_end(__sg++);
- }
- sg_mark_end(sg + (sg_len - 1));
- return sg_len;
-}
-
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
@@ -492,26 +438,12 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
- enum mmc_packed_type cmd_type;
int i;
- cmd_type = mqrq->cmd_type;
-
- if (!mqrq->bounce_buf) {
- if (mmc_packed_cmd(cmd_type))
- return mmc_queue_packed_map_sg(mq, mqrq->packed,
- mqrq->sg, cmd_type);
- else
- return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
- }
-
- BUG_ON(!mqrq->bounce_sg);
+ if (!mqrq->bounce_buf)
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
- if (mmc_packed_cmd(cmd_type))
- sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
- mqrq->bounce_sg, cmd_type);
- else
- sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/core/queue.h
index 342f1e3f301e..dac8c3d010dd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -11,6 +11,7 @@ static inline bool mmc_req_is_special(struct request *req)
struct request;
struct task_struct;
+struct mmc_blk_data;
struct mmc_blk_request {
struct mmc_request mrq;
@@ -21,23 +22,6 @@ struct mmc_blk_request {
int retune_retry_done;
};
-enum mmc_packed_type {
- MMC_PACKED_NONE = 0,
- MMC_PACKED_WRITE,
-};
-
-#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
-#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
-
-struct mmc_packed {
- struct list_head list;
- __le32 cmd_hdr[1024];
- unsigned int blocks;
- u8 nr_entries;
- u8 retries;
- s16 idx_failure;
-};
-
struct mmc_queue_req {
struct request *req;
struct mmc_blk_request brq;
@@ -46,8 +30,6 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req mmc_active;
- enum mmc_packed_type cmd_type;
- struct mmc_packed *packed;
};
struct mmc_queue {
@@ -57,11 +39,13 @@ struct mmc_queue {
unsigned int flags;
#define MMC_QUEUE_SUSPENDED (1 << 0)
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
- void *data;
+ bool asleep;
+ struct mmc_blk_data *blkdata;
struct request_queue *queue;
- struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq;
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
+ int qdepth;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -75,9 +59,6 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
-extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
-extern void mmc_packed_clean(struct mmc_queue *);
-
extern int mmc_access_rpmb(struct mmc_queue *);
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 73c762a28dfe..a614f37faf27 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -223,6 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
+ u32 *raw_ssr;
int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -231,14 +232,21 @@ static int mmc_read_ssr(struct mmc_card *card)
return 0;
}
- if (mmc_app_sd_status(card, card->raw_ssr)) {
+ raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
+ if (!raw_ssr)
+ return -ENOMEM;
+
+ if (mmc_app_sd_status(card, raw_ssr)) {
pr_warn("%s: problem reading SD Status register\n",
mmc_hostname(card->host));
+ kfree(raw_ssr);
return 0;
}
for (i = 0; i < 16; i++)
- card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
+ card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
+
+ kfree(raw_ssr);
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
@@ -927,7 +935,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
u32 cid[4];
u32 rocr = 0;
- BUG_ON(!host);
WARN_ON(!host->claimed);
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
@@ -1043,9 +1050,6 @@ free_card:
*/
static void mmc_sd_remove(struct mmc_host *host)
{
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_remove_card(host->card);
host->card = NULL;
}
@@ -1065,9 +1069,6 @@ static void mmc_sd_detect(struct mmc_host *host)
{
int err;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_get_card(host->card);
/*
@@ -1091,9 +1092,6 @@ static int _mmc_sd_suspend(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
@@ -1136,9 +1134,6 @@ static int _mmc_sd_resume(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (!mmc_card_suspended(host->card))
@@ -1221,7 +1216,6 @@ int mmc_attach_sd(struct mmc_host *host)
int err;
u32 ocr, rocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
err = mmc_send_app_op_cond(host, 0, &ocr);
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 16b774c18e75..de125a41aa7a 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -27,8 +27,8 @@ int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
int err;
struct mmc_command cmd = {0};
- BUG_ON(!host);
- BUG_ON(card && (card->host != host));
+ if (WARN_ON(card && card->host != host))
+ return -EINVAL;
cmd.opcode = MMC_APP_CMD;
@@ -72,8 +72,8 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
int i, err;
- BUG_ON(!cmd);
- BUG_ON(retries < 0);
+ if (retries < 0)
+ retries = MMC_CMD_RETRIES;
err = -EIO;
@@ -122,9 +122,6 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
struct mmc_command cmd = {0};
- BUG_ON(!card);
- BUG_ON(!card->host);
-
cmd.opcode = SD_APP_SET_BUS_WIDTH;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -147,8 +144,6 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
struct mmc_command cmd = {0};
int i, err = 0;
- BUG_ON(!host);
-
cmd.opcode = SD_APP_OP_COND;
if (mmc_host_is_spi(host))
cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
@@ -224,9 +219,6 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
int err;
struct mmc_command cmd = {0};
- BUG_ON(!host);
- BUG_ON(!rca);
-
cmd.opcode = SD_SEND_RELATIVE_ADDR;
cmd.arg = 0;
cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
@@ -249,10 +241,6 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
struct scatterlist sg;
void *data_buf;
- BUG_ON(!card);
- BUG_ON(!card->host);
- BUG_ON(!scr);
-
/* NOTE: caller guarantees scr is heap-allocated */
err = mmc_app_cmd(card->host, card);
@@ -307,9 +295,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
struct mmc_data data = {0};
struct scatterlist sg;
- BUG_ON(!card);
- BUG_ON(!card->host);
-
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
@@ -352,10 +337,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
struct mmc_data data = {0};
struct scatterlist sg;
- BUG_ON(!card);
- BUG_ON(!card->host);
- BUG_ON(!ssr);
-
/* NOTE: caller guarantees ssr is heap-allocated */
err = mmc_app_cmd(card->host, card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd44ba8116d1..ecbc52981ba5 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -63,7 +63,8 @@ static int sdio_init_func(struct mmc_card *card, unsigned int fn)
int ret;
struct sdio_func *func;
- BUG_ON(fn > SDIO_MAX_FUNCS);
+ if (WARN_ON(fn > SDIO_MAX_FUNCS))
+ return -EINVAL;
func = sdio_alloc_func(card);
if (IS_ERR(func))
@@ -555,7 +556,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
u32 rocr = 0;
u32 ocr_card = ocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
/* to query card if 1.8V signalling is supported */
@@ -791,9 +791,6 @@ static void mmc_sdio_remove(struct mmc_host *host)
{
int i;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
for (i = 0;i < host->card->sdio_funcs;i++) {
if (host->card->sdio_func[i]) {
sdio_remove_func(host->card->sdio_func[i]);
@@ -820,9 +817,6 @@ static void mmc_sdio_detect(struct mmc_host *host)
{
int err;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
err = pm_runtime_get_sync(&host->card->dev);
@@ -916,9 +910,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
/* Basic card reinitialization. */
mmc_claim_host(host);
@@ -970,9 +961,6 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
/*
@@ -1063,7 +1051,6 @@ int mmc_attach_sdio(struct mmc_host *host)
u32 ocr, rocr;
struct mmc_card *card;
- BUG_ON(!host);
WARN_ON(!host->claimed);
err = mmc_send_io_op_cond(host, 0, &ocr);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index dcb3dee59fa5..f8c372839d24 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -262,7 +262,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
else
prev = &card->tuples;
- BUG_ON(*prev);
+ if (*prev)
+ return -EINVAL;
do {
unsigned char tpl_code, tpl_link;
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 91bbbfb29f3f..f1faf9acc007 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -214,7 +214,9 @@ static int sdio_card_irq_put(struct mmc_card *card)
struct mmc_host *host = card->host;
WARN_ON(!host->claimed);
- BUG_ON(host->sdio_irqs < 1);
+
+ if (host->sdio_irqs < 1)
+ return -EINVAL;
if (!--host->sdio_irqs) {
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
@@ -261,8 +263,8 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
int ret;
unsigned char reg;
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return -EINVAL;
pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
@@ -304,8 +306,8 @@ int sdio_release_irq(struct sdio_func *func)
int ret;
unsigned char reg;
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return -EINVAL;
pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 5af6fb9a9ce2..d3c91f412b69 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/card/sdio_uart.c - SDIO UART/GPS driver
+ * SDIO UART/GPS driver
*
* Based on drivers/serial/8250.c and drivers/serial/serial_core.c
* by Russell King.
@@ -135,8 +135,6 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port)
{
struct sdio_func *func;
- BUG_ON(sdio_uart_table[port->index] != port);
-
spin_lock(&sdio_uart_table_lock);
sdio_uart_table[port->index] = NULL;
spin_unlock(&sdio_uart_table_lock);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 27117ba47073..babe591aea96 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -258,6 +258,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
}
EXPORT_SYMBOL(mmc_gpiod_request_cd);
+bool mmc_can_gpio_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->cd_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_cd);
+
/**
* mmc_gpiod_request_ro - request a gpio descriptor for write protection
* @host: mmc host
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 5274f503a39a..2eb97014dc3f 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -135,7 +135,6 @@ config MMC_SDHCI_OF_AT91
tristate "SDHCI OF support for the Atmel SDMMC controller"
depends on MMC_SDHCI_PLTFM
depends on OF
- select MMC_SDHCI_IO_ACCESSORS
help
This selects the Atmel SDMMC driver
@@ -144,6 +143,7 @@ config MMC_SDHCI_OF_ESDHC
depends on MMC_SDHCI_PLTFM
depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
select MMC_SDHCI_IO_ACCESSORS
+ select FSL_GUTS
help
This selects the Freescale eSDHC controller support.
@@ -165,6 +165,17 @@ config MMC_SDHCI_OF_HLWD
If unsure, say N.
+config MMC_SDHCI_CADENCE
+ tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Cadence SD/SDIO/eMMC driver.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_CNS3XXX
tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
depends on ARCH_CNS3XXX
@@ -322,6 +333,16 @@ config MMC_SDHCI_IPROC
If unsure, say N.
+config MMC_MESON_GX
+ tristate "Amlogic S905/GX* SD/MMC Host Controller support"
+ depends on ARCH_MESON && MMC
+ help
+ This selects support for the Amlogic SD/MMC Host Controller
+ found on the S905/GX* family of SoCs. This controller is
+ MMC 5.1 compliant and supports SD, eMMC and SDIO interfaces.
+
+ If you have a controller with this interface, say Y here.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e2bdaaf43184..ccc9c4cba154 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
+obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
@@ -62,6 +63,7 @@ obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_CADENCE) += sdhci-cadence.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 8fa478c3b0db..36b5af8eadb8 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -35,6 +35,7 @@
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/platform_data/mmc-davinci.h>
@@ -1029,9 +1030,10 @@ static int mmc_davinci_get_cd(struct mmc_host *mmc)
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
- if (!config || !config->get_cd)
- return -ENOSYS;
- return config->get_cd(pdev->id);
+ if (config && config->get_cd)
+ return config->get_cd(pdev->id);
+
+ return mmc_gpio_get_cd(mmc);
}
static int mmc_davinci_get_ro(struct mmc_host *mmc)
@@ -1039,9 +1041,10 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
- if (!config || !config->get_ro)
- return -ENOSYS;
- return config->get_ro(pdev->id);
+ if (config && config->get_ro)
+ return config->get_ro(pdev->id);
+
+ return mmc_gpio_get_ro(mmc);
}
static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1159,49 +1162,53 @@ static const struct of_device_id davinci_mmc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
-static struct davinci_mmc_config
- *mmc_parse_pdata(struct platform_device *pdev)
+static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
{
- struct device_node *np;
+ struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *pdata = pdev->dev.platform_data;
- const struct of_device_id *match =
- of_match_device(davinci_mmc_dt_ids, &pdev->dev);
- u32 data;
-
- np = pdev->dev.of_node;
- if (!np)
- return pdata;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n");
- goto nodata;
- }
+ struct mmc_davinci_host *host;
+ int ret;
- if (match)
- pdev->id_entry = match->data;
+ if (!pdata)
+ return -EINVAL;
- if (of_property_read_u32(np, "max-frequency", &pdata->max_freq))
- dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n");
+ host = mmc_priv(mmc);
+ if (!host)
+ return -EINVAL;
- of_property_read_u32(np, "bus-width", &data);
- switch (data) {
- case 1:
- case 4:
- case 8:
- pdata->wires = data;
- break;
- default:
- pdata->wires = 1;
- dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n");
- }
-nodata:
- return pdata;
+ if (pdata && pdata->nr_sg)
+ host->nr_sg = pdata->nr_sg - 1;
+
+ if (pdata && (pdata->wires == 4 || pdata->wires == 0))
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (pdata && (pdata->wires == 8))
+ mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
+
+ mmc->f_min = 312500;
+ mmc->f_max = 25000000;
+ if (pdata && pdata->max_freq)
+ mmc->f_max = pdata->max_freq;
+ if (pdata && pdata->caps)
+ mmc->caps |= pdata->caps;
+
+ /* Register a cd gpio, if there is not one, enable polling */
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ else if (ret)
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ return 0;
}
static int __init davinci_mmcsd_probe(struct platform_device *pdev)
{
- struct davinci_mmc_config *pdata = NULL;
+ const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
@@ -1209,12 +1216,6 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
size_t mem_size;
const struct platform_device_id *id_entry;
- pdata = mmc_parse_pdata(pdev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "Couldn't get platform data\n");
- return -ENOENT;
- }
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENODEV;
@@ -1253,14 +1254,28 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->mmc_input_clk = clk_get_rate(host->clk);
- init_mmcsd_host(host);
-
- if (pdata->nr_sg)
- host->nr_sg = pdata->nr_sg - 1;
+ match = of_match_device(davinci_mmc_dt_ids, &pdev->dev);
+ if (match) {
+ pdev->id_entry = match->data;
+ ret = mmc_of_parse(mmc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "could not parse of data: %d\n", ret);
+ goto parse_fail;
+ }
+ } else {
+ ret = mmc_davinci_parse_pdata(mmc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "could not parse platform data: %d\n", ret);
+ goto parse_fail;
+ } }
if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
host->nr_sg = MAX_NR_SG;
+ init_mmcsd_host(host);
+
host->use_dma = use_dma;
host->mmc_irq = irq;
host->sdio_irq = platform_get_irq(pdev, 1);
@@ -1273,27 +1288,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->use_dma = 0;
}
- /* REVISIT: someday, support IRQ-driven card detection. */
- mmc->caps |= MMC_CAP_NEEDS_POLL;
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- if (pdata && (pdata->wires == 4 || pdata->wires == 0))
- mmc->caps |= MMC_CAP_4_BIT_DATA;
-
- if (pdata && (pdata->wires == 8))
- mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
-
id_entry = platform_get_device_id(pdev);
if (id_entry)
host->version = id_entry->driver_data;
mmc->ops = &mmc_davinci_ops;
- mmc->f_min = 312500;
- mmc->f_max = 25000000;
- if (pdata && pdata->max_freq)
- mmc->f_max = pdata->max_freq;
- if (pdata && pdata->caps)
- mmc->caps |= pdata->caps;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* With no iommu coalescing pages, each phys_seg is a hw_seg.
@@ -1354,6 +1355,7 @@ mmc_add_host_fail:
mmc_davinci_cpufreq_deregister(host);
cpu_freq_fail:
davinci_release_dma_channels(host);
+parse_fail:
dma_probe_defer:
clk_disable_unprepare(host->clk);
clk_prepare_enable_fail:
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 7ab3d749b5ae..e1335289316c 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -17,6 +17,7 @@
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "dw_mmc.h"
@@ -161,20 +162,13 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_exynos_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_exynos_resume(struct device *dev)
+#ifdef CONFIG_PM
+static int dw_mci_exynos_runtime_resume(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
dw_mci_exynos_config_smu(host);
- return dw_mci_resume(host);
+ return dw_mci_runtime_resume(dev);
}
/**
@@ -211,10 +205,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
return 0;
}
#else
-#define dw_mci_exynos_suspend NULL
-#define dw_mci_exynos_resume NULL
#define dw_mci_exynos_resume_noirq NULL
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
@@ -524,14 +516,42 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
+ int ret;
match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
drv_data = match->data;
- return dw_mci_pltfm_register(pdev, drv_data);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = dw_mci_pltfm_register(pdev, drv_data);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mci_exynos_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return dw_mci_pltfm_remove(pdev);
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_exynos_runtime_resume,
+ NULL)
.resume_noirq = dw_mci_exynos_resume_noirq,
.thaw_noirq = dw_mci_exynos_resume_noirq,
.restore_noirq = dw_mci_exynos_resume_noirq,
@@ -539,7 +559,7 @@ static const struct dev_pm_ops dw_mci_exynos_pmops = {
static struct platform_driver dw_mci_exynos_pltfm_driver = {
.probe = dw_mci_exynos_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove = dw_mci_exynos_remove,
.driver = {
.name = "dwmmc_exynos",
.of_match_table = dw_mci_exynos_match,
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 624789496dce..9821e6bd5d5e 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -162,35 +163,13 @@ static int dw_mci_k3_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_k3_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
- int ret;
-
- ret = dw_mci_suspend(host);
- if (!ret)
- clk_disable_unprepare(host->ciu_clk);
-
- return ret;
-}
-
-static int dw_mci_k3_resume(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(host->ciu_clk);
- if (ret) {
- dev_err(host->dev, "failed to enable ciu clock\n");
- return ret;
- }
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume);
+static const struct dev_pm_ops dw_mci_k3_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
static struct platform_driver dw_mci_k3_pltfm_driver = {
.probe = dw_mci_k3_probe,
@@ -198,7 +177,7 @@ static struct platform_driver dw_mci_k3_pltfm_driver = {
.driver = {
.name = "dwmmc_k3",
.of_match_table = dw_mci_k3_match,
- .pm = &dw_mci_k3_pmops,
+ .pm = &dw_mci_k3_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index 4c69fbd29811..ab82796b01e2 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -79,25 +80,13 @@ static void dw_mci_pci_remove(struct pci_dev *pdev)
dw_mci_remove(host);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_mci *host = pci_get_drvdata(pdev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_mci *host = pci_get_drvdata(pdev);
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume);
+static const struct dev_pm_ops dw_mci_pci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
static const struct pci_device_id dw_mci_pci_id[] = {
{ PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) },
@@ -111,7 +100,7 @@ static struct pci_driver dw_mci_pci_driver = {
.probe = dw_mci_pci_probe,
.remove = dw_mci_pci_remove,
.driver = {
- .pm = &dw_mci_pci_pmops
+ .pm = &dw_mci_pci_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index dbbc4303bdd0..1236d49ba36e 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -58,26 +59,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(dw_mci_pltfm_register);
-#ifdef CONFIG_PM_SLEEP
-/*
- * TODO: we should probably disable the clock to the card in the suspend path.
- */
-static int dw_mci_pltfm_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_pltfm_resume(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
+const struct dev_pm_ops dw_mci_pltfm_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
static const struct of_device_id dw_mci_pltfm_match[] = {
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 25eae359a5ea..9a46e4694227 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -13,6 +13,8 @@
#include <linux/mmc/host.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/of_address.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "dw_mmc.h"
@@ -325,6 +327,7 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
+ int ret;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -332,16 +335,49 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node);
drv_data = match->data;
- return dw_mci_pltfm_register(pdev, drv_data);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ ret = dw_mci_pltfm_register(pdev, drv_data);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
}
+static int dw_mci_rockchip_remove(struct platform_device *pdev)
+{
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return dw_mci_pltfm_remove(pdev);
+}
+
+static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
+
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.probe = dw_mci_rockchip_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove = dw_mci_rockchip_remove,
.driver = {
.name = "dwmmc_rockchip",
.of_match_table = dw_mci_rockchip_match,
- .pm = &dw_mci_pltfm_pmops,
+ .pm = &dw_mci_rockchip_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index df478ae72e23..b44306b886cb 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -54,7 +54,7 @@
#define DW_MCI_DMA_THRESHOLD 16
#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
-#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
+#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
@@ -165,12 +165,14 @@ static const struct file_operations dw_mci_req_fops = {
static int dw_mci_regs_show(struct seq_file *s, void *v)
{
- seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
- seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
- seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
- seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
- seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
- seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+ struct dw_mci *host = s->private;
+
+ seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
+ seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
+ seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
+ seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
+ seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
+ seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
return 0;
}
@@ -234,7 +236,6 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
- struct mmc_data *data;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
u32 cmdr;
@@ -289,10 +290,9 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
if (cmd->flags & MMC_RSP_CRC)
cmdr |= SDMMC_CMD_RESP_CRC;
- data = cmd->data;
- if (data) {
+ if (cmd->data) {
cmdr |= SDMMC_CMD_DAT_EXP;
- if (data->flags & MMC_DATA_WRITE)
+ if (cmd->data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
@@ -335,6 +335,9 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
cmdr = stop->opcode | SDMMC_CMD_STOP |
SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
+ if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
+ cmdr |= SDMMC_CMD_USE_HOLD_REG;
+
return cmdr;
}
@@ -380,7 +383,7 @@ static void dw_mci_start_command(struct dw_mci *host,
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
{
- struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+ struct mmc_command *stop = &host->stop_abort;
dw_mci_start_command(host, stop, host->stop_cmdr);
}
@@ -409,12 +412,13 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
- if (data)
- if (!data->host_cookie)
- dma_unmap_sg(host->dev,
- data->sg,
- data->sg_len,
- dw_mci_get_dma_dir(data));
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ dma_unmap_sg(host->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
}
static void dw_mci_idmac_reset(struct dw_mci *host)
@@ -612,7 +616,7 @@ static inline int dw_mci_prepare_desc64(struct dw_mci *host,
return 0;
err_own_bit:
/* restore the descriptor chain as it's polluted */
- dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+ dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
dw_mci_idmac_init(host);
return -EINVAL;
@@ -688,7 +692,7 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
return 0;
err_own_bit:
/* restore the descriptor chain as it's polluted */
- dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+ dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
dw_mci_idmac_init(host);
return -EINVAL;
@@ -845,13 +849,13 @@ static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
struct mmc_data *data,
- bool next)
+ int cookie)
{
struct scatterlist *sg;
unsigned int i, sg_len;
- if (!next && data->host_cookie)
- return data->host_cookie;
+ if (data->host_cookie == COOKIE_PRE_MAPPED)
+ return data->sg_len;
/*
* We don't do DMA on "complex" transfers, i.e. with
@@ -876,15 +880,13 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
if (sg_len == 0)
return -EINVAL;
- if (next)
- data->host_cookie = sg_len;
+ data->host_cookie = cookie;
return sg_len;
}
static void dw_mci_pre_req(struct mmc_host *mmc,
- struct mmc_request *mrq,
- bool is_first_req)
+ struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
@@ -892,13 +894,12 @@ static void dw_mci_pre_req(struct mmc_host *mmc,
if (!slot->host->use_dma || !data)
return;
- if (data->host_cookie) {
- data->host_cookie = 0;
- return;
- }
+ /* This data might be unmapped at this time */
+ data->host_cookie = COOKIE_UNMAPPED;
- if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
- data->host_cookie = 0;
+ if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
+ COOKIE_PRE_MAPPED) < 0)
+ data->host_cookie = COOKIE_UNMAPPED;
}
static void dw_mci_post_req(struct mmc_host *mmc,
@@ -911,12 +912,12 @@ static void dw_mci_post_req(struct mmc_host *mmc,
if (!slot->host->use_dma || !data)
return;
- if (data->host_cookie)
+ if (data->host_cookie != COOKIE_UNMAPPED)
dma_unmap_sg(slot->host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
- data->host_cookie = 0;
+ data->host_cookie = COOKIE_UNMAPPED;
}
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
@@ -1022,7 +1023,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
if (!host->use_dma)
return -ENODEV;
- sg_len = dw_mci_pre_dma_transfer(host, data, 0);
+ sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
if (sg_len < 0) {
host->dma_ops->stop(host);
return sg_len;
@@ -1175,13 +1176,24 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
- if (clock != slot->__clk_old || force_clkinit)
+ if ((clock != slot->__clk_old &&
+ !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
+ force_clkinit) {
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
slot->id, host->bus_hz, clock,
div ? ((host->bus_hz / div) >> 1) :
host->bus_hz, div);
+ /*
+ * If card is polling, display the message only
+ * one time at boot time.
+ */
+ if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
+ slot->mmc->f_min == clock)
+ set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
+ }
+
/* disable clock */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
@@ -1273,10 +1285,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
- if (mrq->stop)
- host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
- else
- host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
+ host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
}
static void dw_mci_start_request(struct dw_mci *host,
@@ -1527,22 +1536,34 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc))
+ if (((mmc->caps & MMC_CAP_NEEDS_POLL)
+ || !mmc_card_is_removable(mmc))) {
present = 1;
- else if (gpio_cd >= 0)
+
+ if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+ if (mmc->caps & MMC_CAP_NEEDS_POLL) {
+ dev_info(&mmc->class_dev,
+ "card is polling.\n");
+ } else {
+ dev_info(&mmc->class_dev,
+ "card is non-removable.\n");
+ }
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ }
+
+ return present;
+ } else if (gpio_cd >= 0)
present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
== 0 ? 1 : 0;
spin_lock_bh(&host->lock);
- if (present) {
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is present\n");
- } else {
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ else if (!present &&
+ !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is not present\n");
- }
spin_unlock_bh(&host->lock);
return present;
@@ -1889,8 +1910,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
- if (data->stop ||
- !(host->data_status & (SDMMC_INT_DRTO |
+ if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
@@ -1926,8 +1946,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
- if (data->stop ||
- !(host->data_status & (SDMMC_INT_DRTO |
+ if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
@@ -2003,7 +2022,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
host->cmd = NULL;
host->data = NULL;
- if (mrq->stop)
+ if (!mrq->sbc && mrq->stop)
dw_mci_command_complete(host, mrq->stop);
else
host->cmd_status = 0;
@@ -2615,6 +2634,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->f_min = DW_MCI_FREQ_MIN;
mmc->f_max = DW_MCI_FREQ_MAX;
} else {
+ dev_info(host->dev,
+ "'clock-freq-min-max' property was deprecated.\n");
mmc->f_min = freq[0];
mmc->f_max = freq[1];
}
@@ -3267,26 +3288,46 @@ EXPORT_SYMBOL(dw_mci_remove);
-#ifdef CONFIG_PM_SLEEP
-/*
- * TODO: we should probably disable the clock to the card in the suspend path.
- */
-int dw_mci_suspend(struct dw_mci *host)
+#ifdef CONFIG_PM
+int dw_mci_runtime_suspend(struct device *dev)
{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
+ clk_disable_unprepare(host->ciu_clk);
+
+ if (host->cur_slot &&
+ (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+ !mmc_card_is_removable(host->cur_slot->mmc)))
+ clk_disable_unprepare(host->biu_clk);
+
return 0;
}
-EXPORT_SYMBOL(dw_mci_suspend);
+EXPORT_SYMBOL(dw_mci_runtime_suspend);
-int dw_mci_resume(struct dw_mci *host)
+int dw_mci_runtime_resume(struct device *dev)
{
- int i, ret;
+ int i, ret = 0;
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ if (host->cur_slot &&
+ (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+ !mmc_card_is_removable(host->cur_slot->mmc))) {
+ ret = clk_prepare_enable(host->biu_clk);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(host->ciu_clk);
+ if (ret)
+ goto err;
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
+ clk_disable_unprepare(host->ciu_clk);
ret = -ENODEV;
- return ret;
+ goto err;
}
if (host->use_dma && host->dma_ops->init)
@@ -3296,8 +3337,8 @@ int dw_mci_resume(struct dw_mci *host)
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
- host->prev_blksz = 0;
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
@@ -3323,9 +3364,17 @@ int dw_mci_resume(struct dw_mci *host)
dw_mci_enable_cd(host);
return 0;
+
+err:
+ if (host->cur_slot &&
+ (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+ !mmc_card_is_removable(host->cur_slot->mmc)))
+ clk_disable_unprepare(host->biu_clk);
+
+ return ret;
}
-EXPORT_SYMBOL(dw_mci_resume);
-#endif /* CONFIG_PM_SLEEP */
+EXPORT_SYMBOL(dw_mci_runtime_resume);
+#endif /* CONFIG_PM */
static int __init dw_mci_init(void)
{
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index e8cd2dec3263..c59465829387 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -234,9 +234,9 @@
extern int dw_mci_probe(struct dw_mci *host);
extern void dw_mci_remove(struct dw_mci *host);
-#ifdef CONFIG_PM_SLEEP
-extern int dw_mci_suspend(struct dw_mci *host);
-extern int dw_mci_resume(struct dw_mci *host);
+#ifdef CONFIG_PM
+extern int dw_mci_runtime_suspend(struct device *device);
+extern int dw_mci_runtime_resume(struct device *device);
#endif
/**
@@ -272,6 +272,7 @@ struct dw_mci_slot {
#define DW_MMC_CARD_NEED_INIT 1
#define DW_MMC_CARD_NO_LOW_PWR 2
#define DW_MMC_CARD_NO_USE_HOLD 3
+#define DW_MMC_CARD_NEEDS_POLL 4
int id;
int sdio_id;
};
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 684087db170b..819ad32964fc 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -320,8 +320,7 @@ dma_unmap:
}
static void jz4740_mmc_pre_request(struct mmc_host *mmc,
- struct mmc_request *mrq,
- bool is_first_req)
+ struct mmc_request *mrq)
{
struct jz4740_mmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
new file mode 100644
index 000000000000..b352760c041e
--- /dev/null
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -0,0 +1,851 @@
+/*
+ * Amlogic SD/eMMC driver for the GX/S905 family SoCs
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Kevin Hilman <khilman@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+
+#define DRIVER_NAME "meson-gx-mmc"
+
+#define SD_EMMC_CLOCK 0x0
+#define CLK_DIV_SHIFT 0
+#define CLK_DIV_WIDTH 6
+#define CLK_DIV_MASK 0x3f
+#define CLK_DIV_MAX 63
+#define CLK_SRC_SHIFT 6
+#define CLK_SRC_WIDTH 2
+#define CLK_SRC_MASK 0x3
+#define CLK_SRC_XTAL 0 /* external crystal */
+#define CLK_SRC_XTAL_RATE 24000000
+#define CLK_SRC_PLL 1 /* FCLK_DIV2 */
+#define CLK_SRC_PLL_RATE 1000000000
+#define CLK_PHASE_SHIFT 8
+#define CLK_PHASE_MASK 0x3
+#define CLK_PHASE_0 0
+#define CLK_PHASE_90 1
+#define CLK_PHASE_180 2
+#define CLK_PHASE_270 3
+#define CLK_ALWAYS_ON BIT(24)
+
+#define SD_EMMC_DElAY 0x4
+#define SD_EMMC_ADJUST 0x8
+#define SD_EMMC_CALOUT 0x10
+#define SD_EMMC_START 0x40
+#define START_DESC_INIT BIT(0)
+#define START_DESC_BUSY BIT(1)
+#define START_DESC_ADDR_SHIFT 2
+#define START_DESC_ADDR_MASK (~0x3)
+
+#define SD_EMMC_CFG 0x44
+#define CFG_BUS_WIDTH_SHIFT 0
+#define CFG_BUS_WIDTH_MASK 0x3
+#define CFG_BUS_WIDTH_1 0x0
+#define CFG_BUS_WIDTH_4 0x1
+#define CFG_BUS_WIDTH_8 0x2
+#define CFG_DDR BIT(2)
+#define CFG_BLK_LEN_SHIFT 4
+#define CFG_BLK_LEN_MASK 0xf
+#define CFG_RESP_TIMEOUT_SHIFT 8
+#define CFG_RESP_TIMEOUT_MASK 0xf
+#define CFG_RC_CC_SHIFT 12
+#define CFG_RC_CC_MASK 0xf
+#define CFG_STOP_CLOCK BIT(22)
+#define CFG_CLK_ALWAYS_ON BIT(18)
+#define CFG_AUTO_CLK BIT(23)
+
+#define SD_EMMC_STATUS 0x48
+#define STATUS_BUSY BIT(31)
+
+#define SD_EMMC_IRQ_EN 0x4c
+#define IRQ_EN_MASK 0x3fff
+#define IRQ_RXD_ERR_SHIFT 0
+#define IRQ_RXD_ERR_MASK 0xff
+#define IRQ_TXD_ERR BIT(8)
+#define IRQ_DESC_ERR BIT(9)
+#define IRQ_RESP_ERR BIT(10)
+#define IRQ_RESP_TIMEOUT BIT(11)
+#define IRQ_DESC_TIMEOUT BIT(12)
+#define IRQ_END_OF_CHAIN BIT(13)
+#define IRQ_RESP_STATUS BIT(14)
+#define IRQ_SDIO BIT(15)
+
+#define SD_EMMC_CMD_CFG 0x50
+#define SD_EMMC_CMD_ARG 0x54
+#define SD_EMMC_CMD_DAT 0x58
+#define SD_EMMC_CMD_RSP 0x5c
+#define SD_EMMC_CMD_RSP1 0x60
+#define SD_EMMC_CMD_RSP2 0x64
+#define SD_EMMC_CMD_RSP3 0x68
+
+#define SD_EMMC_RXD 0x94
+#define SD_EMMC_TXD 0x94
+#define SD_EMMC_LAST_REG SD_EMMC_TXD
+
+#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
+#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
+#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
+#define MUX_CLK_NUM_PARENTS 2
+
+struct meson_host {
+ struct device *dev;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+
+ spinlock_t lock;
+ void __iomem *regs;
+ int irq;
+ u32 ocr_mask;
+ struct clk *core_clk;
+ struct clk_mux mux;
+ struct clk *mux_clk;
+ struct clk *mux_parent[MUX_CLK_NUM_PARENTS];
+ unsigned long mux_parent_rate[MUX_CLK_NUM_PARENTS];
+
+ struct clk_divider cfg_div;
+ struct clk *cfg_div_clk;
+
+ unsigned int bounce_buf_size;
+ void *bounce_buf;
+ dma_addr_t bounce_dma_addr;
+
+ bool vqmmc_enabled;
+};
+
+struct sd_emmc_desc {
+ u32 cmd_cfg;
+ u32 cmd_arg;
+ u32 cmd_data;
+ u32 cmd_resp;
+};
+#define CMD_CFG_LENGTH_SHIFT 0
+#define CMD_CFG_LENGTH_MASK 0x1ff
+#define CMD_CFG_BLOCK_MODE BIT(9)
+#define CMD_CFG_R1B BIT(10)
+#define CMD_CFG_END_OF_CHAIN BIT(11)
+#define CMD_CFG_TIMEOUT_SHIFT 12
+#define CMD_CFG_TIMEOUT_MASK 0xf
+#define CMD_CFG_NO_RESP BIT(16)
+#define CMD_CFG_NO_CMD BIT(17)
+#define CMD_CFG_DATA_IO BIT(18)
+#define CMD_CFG_DATA_WR BIT(19)
+#define CMD_CFG_RESP_NOCRC BIT(20)
+#define CMD_CFG_RESP_128 BIT(21)
+#define CMD_CFG_RESP_NUM BIT(22)
+#define CMD_CFG_DATA_NUM BIT(23)
+#define CMD_CFG_CMD_INDEX_SHIFT 24
+#define CMD_CFG_CMD_INDEX_MASK 0x3f
+#define CMD_CFG_ERROR BIT(30)
+#define CMD_CFG_OWNER BIT(31)
+
+#define CMD_DATA_MASK (~0x3)
+#define CMD_DATA_BIG_ENDIAN BIT(1)
+#define CMD_DATA_SRAM BIT(0)
+#define CMD_RESP_MASK (~0x1)
+#define CMD_RESP_SRAM BIT(0)
+
+static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret = 0;
+ u32 cfg;
+
+ if (clk_rate) {
+ if (WARN_ON(clk_rate > mmc->f_max))
+ clk_rate = mmc->f_max;
+ else if (WARN_ON(clk_rate < mmc->f_min))
+ clk_rate = mmc->f_min;
+ }
+
+ if (clk_rate == mmc->actual_clock)
+ return 0;
+
+ /* stop clock */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ if (!(cfg & CFG_STOP_CLOCK)) {
+ cfg |= CFG_STOP_CLOCK;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+ }
+
+ dev_dbg(host->dev, "change clock rate %u -> %lu\n",
+ mmc->actual_clock, clk_rate);
+
+ if (clk_rate == 0) {
+ mmc->actual_clock = 0;
+ return 0;
+ }
+
+ ret = clk_set_rate(host->cfg_div_clk, clk_rate);
+ if (ret)
+ dev_warn(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
+ clk_rate, ret);
+ else if (clk_rate && clk_rate != clk_get_rate(host->cfg_div_clk))
+ dev_warn(host->dev, "divider requested rate %lu != actual rate %lu: ret=%d\n",
+ clk_rate, clk_get_rate(host->cfg_div_clk), ret);
+ else
+ mmc->actual_clock = clk_rate;
+
+ /* (re)start clock, if non-zero */
+ if (!ret && clk_rate) {
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ cfg &= ~CFG_STOP_CLOCK;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+ }
+
+ return ret;
+}
+
+/*
+ * The SD/eMMC IP block has an internal mux and divider used for
+ * generating the MMC clock. Use the clock framework to create and
+ * manage these clocks.
+ */
+static int meson_mmc_clk_init(struct meson_host *host)
+{
+ struct clk_init_data init;
+ char clk_name[32];
+ int i, ret = 0;
+ const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
+ unsigned int mux_parent_count = 0;
+ const char *clk_div_parents[1];
+ unsigned int f_min = UINT_MAX;
+ u32 clk_reg, cfg;
+
+ /* get the mux parents */
+ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
+ char name[16];
+
+ snprintf(name, sizeof(name), "clkin%d", i);
+ host->mux_parent[i] = devm_clk_get(host->dev, name);
+ if (IS_ERR(host->mux_parent[i])) {
+ ret = PTR_ERR(host->mux_parent[i]);
+ if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER)
+ dev_err(host->dev, "Missing clock %s\n", name);
+ host->mux_parent[i] = NULL;
+ return ret;
+ }
+
+ host->mux_parent_rate[i] = clk_get_rate(host->mux_parent[i]);
+ mux_parent_names[i] = __clk_get_name(host->mux_parent[i]);
+ mux_parent_count++;
+ if (host->mux_parent_rate[i] < f_min)
+ f_min = host->mux_parent_rate[i];
+ }
+
+ /* cacluate f_min based on input clocks, and max divider value */
+ if (f_min != UINT_MAX)
+ f_min = DIV_ROUND_UP(CLK_SRC_XTAL_RATE, CLK_DIV_MAX);
+ else
+ f_min = 4000000; /* default min: 400 MHz */
+ host->mmc->f_min = f_min;
+
+ /* create the mux */
+ snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
+ init.name = clk_name;
+ init.ops = &clk_mux_ops;
+ init.flags = 0;
+ init.parent_names = mux_parent_names;
+ init.num_parents = mux_parent_count;
+
+ host->mux.reg = host->regs + SD_EMMC_CLOCK;
+ host->mux.shift = CLK_SRC_SHIFT;
+ host->mux.mask = CLK_SRC_MASK;
+ host->mux.flags = 0;
+ host->mux.table = NULL;
+ host->mux.hw.init = &init;
+
+ host->mux_clk = devm_clk_register(host->dev, &host->mux.hw);
+ if (WARN_ON(IS_ERR(host->mux_clk)))
+ return PTR_ERR(host->mux_clk);
+
+ /* create the divider */
+ snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
+ init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL);
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(host->mux_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ host->cfg_div.reg = host->regs + SD_EMMC_CLOCK;
+ host->cfg_div.shift = CLK_DIV_SHIFT;
+ host->cfg_div.width = CLK_DIV_WIDTH;
+ host->cfg_div.hw.init = &init;
+ host->cfg_div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO;
+
+ host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw);
+ if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk)))
+ return PTR_ERR(host->cfg_div_clk);
+
+ /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
+ clk_reg = 0;
+ clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT;
+ clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT;
+ clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT;
+ clk_reg &= ~CLK_ALWAYS_ON;
+ writel(clk_reg, host->regs + SD_EMMC_CLOCK);
+
+ /* Ensure clock starts in "auto" mode, not "always on" */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ cfg &= ~CFG_CLK_ALWAYS_ON;
+ cfg |= CFG_AUTO_CLK;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+
+ ret = clk_prepare_enable(host->cfg_div_clk);
+ if (!ret)
+ ret = meson_mmc_clk_set(host, f_min);
+
+ if (!ret)
+ clk_disable_unprepare(host->cfg_div_clk);
+
+ return ret;
+}
+
+static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ u32 bus_width;
+ u32 val, orig;
+
+ /*
+ * GPIO regulator, only controls switching between 1v8 and
+ * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
+ */
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
+ }
+
+ break;
+
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ break;
+
+ case MMC_POWER_ON:
+ if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
+ int ret = regulator_enable(mmc->supply.vqmmc);
+
+ if (ret < 0)
+ dev_err(mmc_dev(mmc),
+ "failed to enable vqmmc regulator\n");
+ else
+ host->vqmmc_enabled = true;
+ }
+
+ break;
+ }
+
+
+ meson_mmc_clk_set(host, ios->clock);
+
+ /* Bus width */
+ val = readl(host->regs + SD_EMMC_CFG);
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ bus_width = CFG_BUS_WIDTH_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ bus_width = CFG_BUS_WIDTH_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+ bus_width = CFG_BUS_WIDTH_8;
+ break;
+ default:
+ dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
+ ios->bus_width);
+ bus_width = CFG_BUS_WIDTH_4;
+ return;
+ }
+
+ val = readl(host->regs + SD_EMMC_CFG);
+ orig = val;
+
+ val &= ~(CFG_BUS_WIDTH_MASK << CFG_BUS_WIDTH_SHIFT);
+ val |= bus_width << CFG_BUS_WIDTH_SHIFT;
+
+ val &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+ val |= ilog2(SD_EMMC_CFG_BLK_SIZE) << CFG_BLK_LEN_SHIFT;
+
+ val &= ~(CFG_RESP_TIMEOUT_MASK << CFG_RESP_TIMEOUT_SHIFT);
+ val |= ilog2(SD_EMMC_CFG_RESP_TIMEOUT) << CFG_RESP_TIMEOUT_SHIFT;
+
+ val &= ~(CFG_RC_CC_MASK << CFG_RC_CC_SHIFT);
+ val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT;
+
+ writel(val, host->regs + SD_EMMC_CFG);
+
+ if (val != orig)
+ dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
+ __func__, orig, val);
+}
+
+static int meson_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != mrq);
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+ mmc_request_done(host->mmc, mrq);
+
+ return 0;
+}
+
+static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ struct sd_emmc_desc *desc, desc_tmp;
+ u32 cfg;
+ u8 blk_len, cmd_cfg_timeout;
+ unsigned int xfer_bytes = 0;
+
+ /* Setup descriptors */
+ dma_rmb();
+ desc = &desc_tmp;
+ memset(desc, 0, sizeof(struct sd_emmc_desc));
+
+ desc->cmd_cfg |= (cmd->opcode & CMD_CFG_CMD_INDEX_MASK) <<
+ CMD_CFG_CMD_INDEX_SHIFT;
+ desc->cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
+ desc->cmd_arg = cmd->arg;
+
+ /* Response */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ desc->cmd_cfg &= ~CMD_CFG_NO_RESP;
+ if (cmd->flags & MMC_RSP_136)
+ desc->cmd_cfg |= CMD_CFG_RESP_128;
+ desc->cmd_cfg |= CMD_CFG_RESP_NUM;
+ desc->cmd_resp = 0;
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ desc->cmd_cfg |= CMD_CFG_RESP_NOCRC;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ desc->cmd_cfg |= CMD_CFG_R1B;
+ } else {
+ desc->cmd_cfg |= CMD_CFG_NO_RESP;
+ }
+
+ /* data? */
+ if (cmd->data) {
+ desc->cmd_cfg |= CMD_CFG_DATA_IO;
+ if (cmd->data->blocks > 1) {
+ desc->cmd_cfg |= CMD_CFG_BLOCK_MODE;
+ desc->cmd_cfg |=
+ (cmd->data->blocks & CMD_CFG_LENGTH_MASK) <<
+ CMD_CFG_LENGTH_SHIFT;
+
+ /* check if block-size matches, if not update */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+ blk_len >>= CFG_BLK_LEN_SHIFT;
+ if (blk_len != ilog2(cmd->data->blksz)) {
+ dev_warn(host->dev, "%s: update blk_len %d -> %d\n",
+ __func__, blk_len,
+ ilog2(cmd->data->blksz));
+ blk_len = ilog2(cmd->data->blksz);
+ cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+ cfg |= blk_len << CFG_BLK_LEN_SHIFT;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+ }
+ } else {
+ desc->cmd_cfg &= ~CMD_CFG_BLOCK_MODE;
+ desc->cmd_cfg |=
+ (cmd->data->blksz & CMD_CFG_LENGTH_MASK) <<
+ CMD_CFG_LENGTH_SHIFT;
+ }
+
+ cmd->data->bytes_xfered = 0;
+ xfer_bytes = cmd->data->blksz * cmd->data->blocks;
+ if (cmd->data->flags & MMC_DATA_WRITE) {
+ desc->cmd_cfg |= CMD_CFG_DATA_WR;
+ WARN_ON(xfer_bytes > host->bounce_buf_size);
+ sg_copy_to_buffer(cmd->data->sg, cmd->data->sg_len,
+ host->bounce_buf, xfer_bytes);
+ cmd->data->bytes_xfered = xfer_bytes;
+ dma_wmb();
+ } else {
+ desc->cmd_cfg &= ~CMD_CFG_DATA_WR;
+ }
+
+ if (xfer_bytes > 0) {
+ desc->cmd_cfg &= ~CMD_CFG_DATA_NUM;
+ desc->cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
+ } else {
+ /* write data to data_addr */
+ desc->cmd_cfg |= CMD_CFG_DATA_NUM;
+ desc->cmd_data = 0;
+ }
+
+ cmd_cfg_timeout = 12;
+ } else {
+ desc->cmd_cfg &= ~CMD_CFG_DATA_IO;
+ cmd_cfg_timeout = 10;
+ }
+ desc->cmd_cfg |= (cmd_cfg_timeout & CMD_CFG_TIMEOUT_MASK) <<
+ CMD_CFG_TIMEOUT_SHIFT;
+
+ host->cmd = cmd;
+
+ /* Last descriptor */
+ desc->cmd_cfg |= CMD_CFG_END_OF_CHAIN;
+ writel(desc->cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
+ writel(desc->cmd_data, host->regs + SD_EMMC_CMD_DAT);
+ writel(desc->cmd_resp, host->regs + SD_EMMC_CMD_RSP);
+ wmb(); /* ensure descriptor is written before kicked */
+ writel(desc->cmd_arg, host->regs + SD_EMMC_CMD_ARG);
+}
+
+static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ /* Stop execution */
+ writel(0, host->regs + SD_EMMC_START);
+
+ /* clear, ack, enable all interrupts */
+ writel(0, host->regs + SD_EMMC_IRQ_EN);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
+
+ host->mrq = mrq;
+
+ if (mrq->sbc)
+ meson_mmc_start_cmd(mmc, mrq->sbc);
+ else
+ meson_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct meson_host *host = mmc_priv(mmc);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
+ cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
+ cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
+ cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
+ }
+
+ return 0;
+}
+
+static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
+{
+ struct meson_host *host = dev_id;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd = host->cmd;
+ u32 irq_en, status, raw_status;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (WARN_ON(!host))
+ return IRQ_NONE;
+
+ mrq = host->mrq;
+
+ if (WARN_ON(!mrq))
+ return IRQ_NONE;
+
+ if (WARN_ON(!cmd))
+ return IRQ_NONE;
+
+ spin_lock(&host->lock);
+ irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
+ raw_status = readl(host->regs + SD_EMMC_STATUS);
+ status = raw_status & irq_en;
+
+ if (!status) {
+ dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
+ raw_status, irq_en);
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ cmd->error = 0;
+ if (status & IRQ_RXD_ERR_MASK) {
+ dev_dbg(host->dev, "Unhandled IRQ: RXD error\n");
+ cmd->error = -EILSEQ;
+ }
+ if (status & IRQ_TXD_ERR) {
+ dev_dbg(host->dev, "Unhandled IRQ: TXD error\n");
+ cmd->error = -EILSEQ;
+ }
+ if (status & IRQ_DESC_ERR)
+ dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n");
+ if (status & IRQ_RESP_ERR) {
+ dev_dbg(host->dev, "Unhandled IRQ: Response error\n");
+ cmd->error = -EILSEQ;
+ }
+ if (status & IRQ_RESP_TIMEOUT) {
+ dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n");
+ cmd->error = -ETIMEDOUT;
+ }
+ if (status & IRQ_DESC_TIMEOUT) {
+ dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n");
+ cmd->error = -ETIMEDOUT;
+ }
+ if (status & IRQ_SDIO)
+ dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n");
+
+ if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS))
+ ret = IRQ_WAKE_THREAD;
+ else {
+ dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
+ status, cmd->opcode, cmd->arg,
+ cmd->flags, mrq->stop ? 1 : 0);
+ if (cmd->data) {
+ struct mmc_data *data = cmd->data;
+
+ dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
+ data->blksz, data->blocks, data->flags,
+ data->flags & MMC_DATA_WRITE ? "write" : "",
+ data->flags & MMC_DATA_READ ? "read" : "");
+ }
+ }
+
+out:
+ /* ack all (enabled) interrupts */
+ writel(status, host->regs + SD_EMMC_STATUS);
+
+ if (ret == IRQ_HANDLED) {
+ meson_mmc_read_resp(host->mmc, cmd);
+ meson_mmc_request_done(host->mmc, cmd->mrq);
+ }
+
+ spin_unlock(&host->lock);
+ return ret;
+}
+
+static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
+{
+ struct meson_host *host = dev_id;
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data;
+ unsigned int xfer_bytes;
+ int ret = IRQ_HANDLED;
+
+ if (WARN_ON(!mrq))
+ ret = IRQ_NONE;
+
+ if (WARN_ON(!cmd))
+ ret = IRQ_NONE;
+
+ data = cmd->data;
+ if (data) {
+ xfer_bytes = data->blksz * data->blocks;
+ if (data->flags & MMC_DATA_READ) {
+ WARN_ON(xfer_bytes > host->bounce_buf_size);
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
+ data->bytes_xfered = xfer_bytes;
+ }
+ }
+
+ meson_mmc_read_resp(host->mmc, cmd);
+ if (!data || !data->stop || mrq->sbc)
+ meson_mmc_request_done(host->mmc, mrq);
+ else
+ meson_mmc_start_cmd(host->mmc, data->stop);
+
+ return ret;
+}
+
+/*
+ * NOTE: we only need this until the GPIO/pinctrl driver can handle
+ * interrupts. For now, the MMC core will use this for polling.
+ */
+static int meson_mmc_get_cd(struct mmc_host *mmc)
+{
+ int status = mmc_gpio_get_cd(mmc);
+
+ if (status == -ENOSYS)
+ return 1; /* assume present */
+
+ return status;
+}
+
+static const struct mmc_host_ops meson_mmc_ops = {
+ .request = meson_mmc_request,
+ .set_ios = meson_mmc_set_ios,
+ .get_cd = meson_mmc_get_cd,
+};
+
+static int meson_mmc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct meson_host *host;
+ struct mmc_host *mmc;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, host);
+
+ spin_lock_init(&host->lock);
+
+ /* Get regulators and the supported OCR mask */
+ host->vqmmc_enabled = false;
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto free_host;
+
+ ret = mmc_of_parse(mmc);
+ if (ret) {
+ dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
+ goto free_host;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs)) {
+ ret = PTR_ERR(host->regs);
+ goto free_host;
+ }
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq == 0) {
+ dev_err(&pdev->dev, "failed to get interrupt resource.\n");
+ ret = -EINVAL;
+ goto free_host;
+ }
+
+ host->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(host->core_clk)) {
+ ret = PTR_ERR(host->core_clk);
+ goto free_host;
+ }
+
+ ret = clk_prepare_enable(host->core_clk);
+ if (ret)
+ goto free_host;
+
+ ret = meson_mmc_clk_init(host);
+ if (ret)
+ goto free_host;
+
+ /* Stop execution */
+ writel(0, host->regs + SD_EMMC_START);
+
+ /* clear, ack, enable all interrupts */
+ writel(0, host->regs + SD_EMMC_IRQ_EN);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+
+ ret = devm_request_threaded_irq(&pdev->dev, host->irq,
+ meson_mmc_irq, meson_mmc_irq_thread,
+ IRQF_SHARED, DRIVER_NAME, host);
+ if (ret)
+ goto free_host;
+
+ /* data bounce buffer */
+ host->bounce_buf_size = SZ_512K;
+ host->bounce_buf =
+ dma_alloc_coherent(host->dev, host->bounce_buf_size,
+ &host->bounce_dma_addr, GFP_KERNEL);
+ if (host->bounce_buf == NULL) {
+ dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
+ ret = -ENOMEM;
+ goto free_host;
+ }
+
+ mmc->ops = &meson_mmc_ops;
+ mmc_add_host(mmc);
+
+ return 0;
+
+free_host:
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int meson_mmc_remove(struct platform_device *pdev)
+{
+ struct meson_host *host = dev_get_drvdata(&pdev->dev);
+
+ if (WARN_ON(!host))
+ return 0;
+
+ if (host->bounce_buf)
+ dma_free_coherent(host->dev, host->bounce_buf_size,
+ host->bounce_buf, host->bounce_dma_addr);
+
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+
+ mmc_free_host(host->mmc);
+ return 0;
+}
+
+static const struct of_device_id meson_mmc_of_match[] = {
+ { .compatible = "amlogic,meson-gx-mmc", },
+ { .compatible = "amlogic,meson-gxbb-mmc", },
+ { .compatible = "amlogic,meson-gxl-mmc", },
+ { .compatible = "amlogic,meson-gxm-mmc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
+
+static struct platform_driver meson_mmc_driver = {
+ .probe = meson_mmc_probe,
+ .remove = meson_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(meson_mmc_of_match),
+ },
+};
+
+module_platform_driver(meson_mmc_driver);
+
+MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver");
+MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index df990bb8c873..01a804792f30 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -71,7 +71,12 @@ static unsigned int fmax = 515633;
* @f_max: maximum clk frequency supported by the controller.
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
- * @busy_detect: true if busy detection on dat0 is supported
+ * @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
+ * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
+ * indicating that the card is busy
+ * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for
+ * getting busy end detection interrupts
* @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
* @explicit_mclk_control: enable explicit mclk control in driver.
* @qcom_fifo: enables qcom specific fifo pio read logic.
@@ -98,6 +103,9 @@ struct variant_data {
bool signal_direction;
bool pwrreg_clkgate;
bool busy_detect;
+ u32 busy_dpsm_flag;
+ u32 busy_detect_flag;
+ u32 busy_detect_mask;
bool pwrreg_nopower;
bool explicit_mclk_control;
bool qcom_fifo;
@@ -137,7 +145,7 @@ static struct variant_data variant_u300 = {
.clkreg_enable = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.datalength_bits = 16,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
@@ -152,7 +160,7 @@ static struct variant_data variant_nomadik = {
.clkreg = MCI_CLK_ENABLE,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.datalength_bits = 24,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -170,7 +178,7 @@ static struct variant_data variant_ux500 = {
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.datalength_bits = 24,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -178,6 +186,9 @@ static struct variant_data variant_ux500 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
+ .busy_detect_flag = MCI_ST_CARDBUSY,
+ .busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
};
@@ -188,9 +199,9 @@ static struct variant_data variant_ux500v2 = {
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
- .datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE,
+ .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.blksz_datactrl16 = true,
@@ -199,6 +210,9 @@ static struct variant_data variant_ux500v2 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
+ .busy_detect_flag = MCI_ST_CARDBUSY,
+ .busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
};
@@ -210,7 +224,7 @@ static struct variant_data variant_qcom = {
MCI_QCOM_CLK_SELECT_IN_FBCLK,
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
- .data_cmd_enable = MCI_QCOM_CSPM_DATCMD,
+ .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.blksz_datactrl4 = true,
.datalength_bits = 24,
.pwrreg_powerup = MCI_PWR_UP,
@@ -220,6 +234,7 @@ static struct variant_data variant_qcom = {
.qcom_dml = true,
};
+/* Busy detection for the ST Micro variant */
static int mmci_card_busy(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -227,7 +242,7 @@ static int mmci_card_busy(struct mmc_host *mmc)
int busy = 0;
spin_lock_irqsave(&host->lock, flags);
- if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
+ if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
busy = 1;
spin_unlock_irqrestore(&host->lock, flags);
@@ -294,8 +309,8 @@ static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
*/
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
{
- /* Keep ST Micro busy mode if enabled */
- datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
+ /* Keep busy mode in DPSM if enabled */
+ datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
if (host->datactrl_reg != datactrl) {
host->datactrl_reg = datactrl;
@@ -684,8 +699,7 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
next->dma_chan = NULL;
}
-static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
@@ -973,37 +987,66 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
void __iomem *base = host->base;
- bool sbc, busy_resp;
+ bool sbc;
if (!cmd)
return;
sbc = (cmd == host->mrq->sbc);
- busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
- if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
- MCI_CMDSENT|MCI_CMDRESPEND)))
+ /*
+ * We need to be one of these interrupts to be considered worth
+ * handling. Note that we tag on any latent IRQs postponed
+ * due to waiting for busy status.
+ */
+ if (!((status|host->busy_status) &
+ (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
return;
- /* Check if we need to wait for busy completion. */
- if (host->busy_status && (status & MCI_ST_CARDBUSY))
- return;
+ /*
+ * ST Micro variant: handle busy detection.
+ */
+ if (host->variant->busy_detect) {
+ bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
- /* Enable busy completion if needed and supported. */
- if (!host->busy_status && busy_resp &&
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
- (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
- writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
- base + MMCIMASK0);
- host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
- return;
- }
+ /* We are busy with a command, return */
+ if (host->busy_status &&
+ (status & host->variant->busy_detect_flag))
+ return;
+
+ /*
+ * We were not busy, but we now got a busy response on
+ * something that was not an error, and we double-check
+ * that the special busy status bit is still set before
+ * proceeding.
+ */
+ if (!host->busy_status && busy_resp &&
+ !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
+ /* Unmask the busy IRQ */
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ /*
+ * Now cache the last response status code (until
+ * the busy bit goes low), and return.
+ */
+ host->busy_status =
+ status & (MCI_CMDSENT|MCI_CMDRESPEND);
+ return;
+ }
- /* At busy completion, mask the IRQ and complete the request. */
- if (host->busy_status) {
- writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
- base + MMCIMASK0);
- host->busy_status = 0;
+ /*
+ * At this point we are not busy with a command, we have
+ * not received a new busy request, mask the busy IRQ and
+ * fall through to process the IRQ.
+ */
+ if (host->busy_status) {
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_status = 0;
+ }
}
host->cmd = NULL;
@@ -1257,9 +1300,11 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
mmci_data_irq(host, host->data, status);
}
- /* Don't poll for busy completion in irq context. */
- if (host->busy_status)
- status &= ~MCI_ST_CARDBUSY;
+ /*
+ * Don't poll for busy completion in irq context.
+ */
+ if (host->variant->busy_detect && host->busy_status)
+ status &= ~host->variant->busy_detect_flag;
ret = 1;
} while (status);
@@ -1612,9 +1657,18 @@ static int mmci_probe(struct amba_device *dev,
/* We support these capabilities. */
mmc->caps |= MMC_CAP_CMD23;
+ /*
+ * Enable busy detection.
+ */
if (variant->busy_detect) {
mmci_ops.card_busy = mmci_card_busy;
- mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
+ /*
+ * Not all variants have a flag to enable busy detection
+ * in the DPSM, but if they do, set it here.
+ */
+ if (variant->busy_dpsm_flag)
+ mmci_write_datactrlreg(host,
+ host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
mmc->max_busy_timeout = 0;
}
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index a1f5e4f49e2a..56322c6afba4 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -51,25 +51,27 @@
#define MCI_QCOM_CLK_SELECT_IN_DDR_MODE (BIT(14) | BIT(15))
#define MMCIARGUMENT 0x008
-#define MMCICOMMAND 0x00c
-#define MCI_CPSM_RESPONSE (1 << 6)
-#define MCI_CPSM_LONGRSP (1 << 7)
-#define MCI_CPSM_INTERRUPT (1 << 8)
-#define MCI_CPSM_PENDING (1 << 9)
-#define MCI_CPSM_ENABLE (1 << 10)
-/* Argument flag extenstions in the ST Micro versions */
-#define MCI_ST_SDIO_SUSP (1 << 11)
-#define MCI_ST_ENCMD_COMPL (1 << 12)
-#define MCI_ST_NIEN (1 << 13)
-#define MCI_ST_CE_ATACMD (1 << 14)
-/* Modified on Qualcomm Integrations */
-#define MCI_QCOM_CSPM_DATCMD BIT(12)
-#define MCI_QCOM_CSPM_MCIABORT BIT(13)
-#define MCI_QCOM_CSPM_CCSENABLE BIT(14)
-#define MCI_QCOM_CSPM_CCSDISABLE BIT(15)
-#define MCI_QCOM_CSPM_AUTO_CMD19 BIT(16)
-#define MCI_QCOM_CSPM_AUTO_CMD21 BIT(21)
+/* The command register controls the Command Path State Machine (CPSM) */
+#define MMCICOMMAND 0x00c
+#define MCI_CPSM_RESPONSE BIT(6)
+#define MCI_CPSM_LONGRSP BIT(7)
+#define MCI_CPSM_INTERRUPT BIT(8)
+#define MCI_CPSM_PENDING BIT(9)
+#define MCI_CPSM_ENABLE BIT(10)
+/* Command register flag extenstions in the ST Micro versions */
+#define MCI_CPSM_ST_SDIO_SUSP BIT(11)
+#define MCI_CPSM_ST_ENCMD_COMPL BIT(12)
+#define MCI_CPSM_ST_NIEN BIT(13)
+#define MCI_CPSM_ST_CE_ATACMD BIT(14)
+/* Command register flag extensions in the Qualcomm versions */
+#define MCI_CPSM_QCOM_PROGENA BIT(11)
+#define MCI_CPSM_QCOM_DATCMD BIT(12)
+#define MCI_CPSM_QCOM_MCIABORT BIT(13)
+#define MCI_CPSM_QCOM_CCSENABLE BIT(14)
+#define MCI_CPSM_QCOM_CCSDISABLE BIT(15)
+#define MCI_CPSM_QCOM_AUTO_CMD19 BIT(16)
+#define MCI_CPSM_QCOM_AUTO_CMD21 BIT(21)
#define MMCIRESPCMD 0x010
#define MMCIRESPONSE0 0x014
@@ -78,22 +80,27 @@
#define MMCIRESPONSE3 0x020
#define MMCIDATATIMER 0x024
#define MMCIDATALENGTH 0x028
+
+/* The data control register controls the Data Path State Machine (DPSM) */
#define MMCIDATACTRL 0x02c
-#define MCI_DPSM_ENABLE (1 << 0)
-#define MCI_DPSM_DIRECTION (1 << 1)
-#define MCI_DPSM_MODE (1 << 2)
-#define MCI_DPSM_DMAENABLE (1 << 3)
-#define MCI_DPSM_BLOCKSIZE (1 << 4)
+#define MCI_DPSM_ENABLE BIT(0)
+#define MCI_DPSM_DIRECTION BIT(1)
+#define MCI_DPSM_MODE BIT(2)
+#define MCI_DPSM_DMAENABLE BIT(3)
+#define MCI_DPSM_BLOCKSIZE BIT(4)
/* Control register extensions in the ST Micro U300 and Ux500 versions */
-#define MCI_ST_DPSM_RWSTART (1 << 8)
-#define MCI_ST_DPSM_RWSTOP (1 << 9)
-#define MCI_ST_DPSM_RWMOD (1 << 10)
-#define MCI_ST_DPSM_SDIOEN (1 << 11)
+#define MCI_DPSM_ST_RWSTART BIT(8)
+#define MCI_DPSM_ST_RWSTOP BIT(9)
+#define MCI_DPSM_ST_RWMOD BIT(10)
+#define MCI_DPSM_ST_SDIOEN BIT(11)
/* Control register extensions in the ST Micro Ux500 versions */
-#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
-#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
-#define MCI_ST_DPSM_BUSYMODE (1 << 14)
-#define MCI_ST_DPSM_DDRMODE (1 << 15)
+#define MCI_DPSM_ST_DMAREQCTL BIT(12)
+#define MCI_DPSM_ST_DBOOTMODEEN BIT(13)
+#define MCI_DPSM_ST_BUSYMODE BIT(14)
+#define MCI_DPSM_ST_DDRMODE BIT(15)
+/* Control register extensions in the Qualcomm versions */
+#define MCI_DPSM_QCOM_DATA_PEND BIT(17)
+#define MCI_DPSM_QCOM_RX_DATA_PEND BIT(20)
#define MMCIDATACNT 0x030
#define MMCISTATUS 0x034
@@ -167,7 +174,7 @@
/* Extended status bits for the ST Micro variants */
#define MCI_ST_SDIOITMASK (1 << 22)
#define MCI_ST_CEATAENDMASK (1 << 23)
-#define MCI_ST_BUSYEND (1 << 24)
+#define MCI_ST_BUSYENDMASK (1 << 24)
#define MMCIMASK1 0x040
#define MMCIFIFOCNT 0x048
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 84e9afcb5c09..10ef2ae1d2f6 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -927,8 +927,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
msdc_start_command(host, mrq, mrq->cmd);
}
-static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msdc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
@@ -1713,6 +1712,7 @@ static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt8135-mmc", },
{}
};
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5f2f24a7360d..ad11c4cc12ed 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1565,8 +1565,7 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
}
}
-static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 3ccaa1415f33..ecb99a8d2fa2 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -190,8 +190,7 @@ static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
return using_cookie;
}
-static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 6e9c0f8fddb1..dc1abd14acbc 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1374,6 +1374,8 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
mutex_init(&host->host_mutex);
rtsx_usb_init_host(host);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_enable(&pdev->dev);
#ifdef RTSX_USB_USE_LEDS_CLASS
@@ -1428,6 +1430,7 @@ static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
mmc_free_host(mmc);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
platform_set_drvdata(pdev, NULL);
dev_dbg(&(pdev->dev),
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index c531deef3258..932a4b1fed33 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -28,7 +28,6 @@
#include <mach/dma.h>
#include <mach/gpio-samsung.h>
-#include <linux/platform_data/dma-s3c24xx.h>
#include <linux/platform_data/mmc-s3cmci.h>
#include "s3cmci.h"
@@ -1682,19 +1681,13 @@ static int s3cmci_probe(struct platform_device *pdev)
gpio_direction_input(host->pdata->gpio_wprotect);
}
- /* depending on the dma state, get a dma channel to use. */
+ /* Depending on the dma state, get a DMA channel to use. */
if (s3cmci_host_usedma(host)) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma = dma_request_slave_channel_compat(mask,
- s3c24xx_dma_filter, (void *)DMACH_SDI, &pdev->dev, "rx-tx");
- if (!host->dma) {
+ host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+ ret = PTR_ERR_OR_ZERO(host->dma);
+ if (ret) {
dev_err(&pdev->dev, "cannot get DMA channel.\n");
- ret = -EBUSY;
goto probe_free_gpio_wp;
}
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 81d4dc034793..160f695cc09c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -328,6 +328,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
{ "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+ { "80860F14" , "2" , &sdhci_acpi_slot_int_sdio },
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
{ "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
new file mode 100644
index 000000000000..4b0ecb981842
--- /dev/null
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci-pltfm.h"
+
+/* HRS - Host Register Set (specific to Cadence) */
+#define SDHCI_CDNS_HRS04 0x10 /* PHY access port */
+#define SDHCI_CDNS_HRS04_ACK BIT(26)
+#define SDHCI_CDNS_HRS04_RD BIT(25)
+#define SDHCI_CDNS_HRS04_WR BIT(24)
+#define SDHCI_CDNS_HRS04_RDATA_SHIFT 12
+#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
+#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
+
+#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
+#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
+#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8
+#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f
+#define SDHCI_CDNS_HRS06_MODE_MASK 0x7
+#define SDHCI_CDNS_HRS06_MODE_SD 0x0
+#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
+#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
+#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4
+#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
+
+/* SRS - Slot Register Set (SDHCI-compatible) */
+#define SDHCI_CDNS_SRS_BASE 0x200
+
+/* PHY */
+#define SDHCI_CDNS_PHY_DLY_SD_HS 0x00
+#define SDHCI_CDNS_PHY_DLY_SD_DEFAULT 0x01
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR12 0x02
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR25 0x03
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR50 0x04
+#define SDHCI_CDNS_PHY_DLY_UHS_DDR50 0x05
+#define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06
+#define SDHCI_CDNS_PHY_DLY_EMMC_SDR 0x07
+#define SDHCI_CDNS_PHY_DLY_EMMC_DDR 0x08
+
+/*
+ * The tuned val register is 6 bit-wide, but not the whole of the range is
+ * available. The range 0-42 seems to be available (then 43 wraps around to 0)
+ * but I am not quite sure if it is official. Use only 0 to 39 for safety.
+ */
+#define SDHCI_CDNS_MAX_TUNING_LOOP 40
+
+struct sdhci_cdns_priv {
+ void __iomem *hrs_addr;
+};
+
+static void sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
+ u8 addr, u8 data)
+{
+ void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04;
+ u32 tmp;
+
+ tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
+ (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+ writel(tmp, reg);
+
+ tmp |= SDHCI_CDNS_HRS04_WR;
+ writel(tmp, reg);
+
+ tmp &= ~SDHCI_CDNS_HRS04_WR;
+ writel(tmp, reg);
+}
+
+static void sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
+{
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_HS, 4);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_DEFAULT, 4);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_LEGACY, 9);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_SDR, 2);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_DDR, 3);
+}
+
+static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return sdhci_pltfm_priv(pltfm_host);
+}
+
+static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
+{
+ /*
+ * Cadence's spec says the Timeout Clock Frequency is the same as the
+ * Base Clock Frequency. Divide it by 1000 to return a value in kHz.
+ */
+ return host->max_clk / 1000;
+}
+
+static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ u32 mode, tmp;
+
+ switch (timing) {
+ case MMC_TIMING_MMC_HS:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
+ break;
+ default:
+ mode = SDHCI_CDNS_HRS06_MODE_SD;
+ break;
+ }
+
+ /* The speed mode for eMMC is selected by HRS06 register */
+ tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
+ tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
+ tmp |= mode;
+ writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
+
+ /* For SD, fall back to the default handler */
+ if (mode == SDHCI_CDNS_HRS06_MODE_SD)
+ sdhci_set_uhs_signaling(host, timing);
+}
+
+static const struct sdhci_ops sdhci_cdns_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_timeout_clock = sdhci_cdns_get_timeout_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
+ .ops = &sdhci_cdns_ops,
+};
+
+static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
+ u32 tmp;
+
+ if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+ return -EINVAL;
+
+ tmp = readl(reg);
+ tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
+ tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+ tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
+ writel(tmp, reg);
+
+ return readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS06_TUNE_UP),
+ 0, 1);
+}
+
+static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int cur_streak = 0;
+ int max_streak = 0;
+ int end_of_streak = 0;
+ int i;
+
+ /*
+ * This handler only implements the eMMC tuning that is specific to
+ * this controller. Fall back to the standard method for SD timing.
+ */
+ if (host->timing != MMC_TIMING_MMC_HS200)
+ return sdhci_execute_tuning(mmc, opcode);
+
+ if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
+ return -EINVAL;
+
+ for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
+ if (sdhci_cdns_set_tune_val(host, i) ||
+ mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */
+ cur_streak = 0;
+ } else { /* good */
+ cur_streak++;
+ if (cur_streak > max_streak) {
+ max_streak = cur_streak;
+ end_of_streak = i;
+ }
+ }
+ }
+
+ if (!max_streak) {
+ dev_err(mmc_dev(host->mmc), "no tuning point found\n");
+ return -EIO;
+ }
+
+ return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+}
+
+static int sdhci_cdns_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_cdns_priv *priv;
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, sizeof(*priv));
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto disable_clk;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->clk = clk;
+
+ priv = sdhci_cdns_priv(host);
+ priv->hrs_addr = host->ioaddr;
+ host->ioaddr += SDHCI_CDNS_SRS_BASE;
+ host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto free;
+
+ sdhci_cdns_phy_init(priv);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto free;
+
+ return 0;
+free:
+ sdhci_pltfm_free(pdev);
+disable_clk:
+ clk_disable_unprepare(clk);
+
+ return ret;
+}
+
+static const struct of_device_id sdhci_cdns_match[] = {
+ { .compatible = "socionext,uniphier-sd4hc" },
+ { .compatible = "cdns,sd4hc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdhci_cdns_match);
+
+static struct platform_driver sdhci_cdns_driver = {
+ .driver = {
+ .name = "sdhci-cdns",
+ .pm = &sdhci_pltfm_pmops,
+ .of_match_table = sdhci_cdns_match,
+ },
+ .probe = sdhci_cdns_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+module_platform_driver(sdhci_cdns_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("Cadence SD/SDIO/eMMC Host Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 726246665850..d7046d67415a 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -143,6 +143,14 @@ static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg)
}
static const struct sdhci_ops sdhci_iproc_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_ops sdhci_iproc_32only_ops = {
.read_l = sdhci_iproc_readl,
.read_w = sdhci_iproc_readw,
.read_b = sdhci_iproc_readb,
@@ -156,6 +164,28 @@ static const struct sdhci_ops sdhci_iproc_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
+ .ops = &sdhci_iproc_32only_ops,
+};
+
+static const struct sdhci_iproc_data iproc_cygnus_data = {
+ .pdata = &sdhci_iproc_cygnus_pltfm_data,
+ .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_VDD_180 |
+ SDHCI_CAN_DO_SUSPEND |
+ SDHCI_CAN_DO_HISPD |
+ SDHCI_CAN_DO_ADMA2 |
+ SDHCI_CAN_DO_SDMA,
+ .caps1 = SDHCI_DRIVER_TYPE_C |
+ SDHCI_DRIVER_TYPE_D |
+ SDHCI_SUPPORT_DDR50,
+ .mmc_caps = MMC_CAP_1_8V_DDR,
+};
+
static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
@@ -182,7 +212,7 @@ static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_MISSING_CAPS,
- .ops = &sdhci_iproc_ops,
+ .ops = &sdhci_iproc_32only_ops,
};
static const struct sdhci_iproc_data bcm2835_data = {
@@ -194,7 +224,8 @@ static const struct sdhci_iproc_data bcm2835_data = {
static const struct of_device_id sdhci_iproc_of_match[] = {
{ .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
- { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_data },
+ { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data},
+ { .compatible = "brcm,sdhci-iproc", .data = &iproc_data },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 90ed2e12d345..32879b845b75 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -18,7 +18,9 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/mmc/mmc.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/iopoll.h>
#include "sdhci-pltfm.h"
@@ -31,6 +33,7 @@
#define HC_MODE_EN 0x1
#define CORE_POWER 0x0
#define CORE_SW_RST BIT(7)
+#define FF_CLK_SW_RST_DIS BIT(13)
#define CORE_PWRCTL_STATUS 0xdc
#define CORE_PWRCTL_MASK 0xe0
@@ -49,6 +52,7 @@
#define INT_MASK 0xf
#define MAX_PHASES 16
#define CORE_DLL_LOCK BIT(7)
+#define CORE_DDR_DLL_LOCK BIT(11)
#define CORE_DLL_EN BIT(16)
#define CORE_CDR_EN BIT(17)
#define CORE_CK_OUT_EN BIT(18)
@@ -56,18 +60,67 @@
#define CORE_DLL_PDN BIT(29)
#define CORE_DLL_RST BIT(30)
#define CORE_DLL_CONFIG 0x100
+#define CORE_CMD_DAT_TRACK_SEL BIT(0)
#define CORE_DLL_STATUS 0x108
+#define CORE_DLL_CONFIG_2 0x1b4
+#define CORE_DDR_CAL_EN BIT(0)
+#define CORE_FLL_CYCLE_CNT BIT(18)
+#define CORE_DLL_CLOCK_DISABLE BIT(21)
+
#define CORE_VENDOR_SPEC 0x10c
#define CORE_CLK_PWRSAVE BIT(1)
+#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
+#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
+#define CORE_HC_MCLK_SEL_MASK (3 << 8)
+#define CORE_HC_SELECT_IN_EN BIT(18)
+#define CORE_HC_SELECT_IN_HS400 (6 << 19)
+#define CORE_HC_SELECT_IN_MASK (7 << 19)
+
+#define CORE_CSR_CDC_CTLR_CFG0 0x130
+#define CORE_SW_TRIG_FULL_CALIB BIT(16)
+#define CORE_HW_AUTOCAL_ENA BIT(17)
+
+#define CORE_CSR_CDC_CTLR_CFG1 0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
+#define CORE_TIMER_ENA BIT(16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
+#define CORE_CDC_OFFSET_CFG 0x14C
+#define CORE_CSR_CDC_DELAY_CFG 0x150
+#define CORE_CDC_SLAVE_DDA_CFG 0x160
+#define CORE_CSR_CDC_STATUS0 0x164
+#define CORE_CALIBRATION_DONE BIT(0)
+
+#define CORE_CDC_ERROR_CODE_MASK 0x7000000
+
+#define CORE_CSR_CDC_GEN_CFG 0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
+#define CORE_CDC_SWITCH_RC_EN BIT(1)
+
+#define CORE_DDR_200_CFG 0x184
+#define CORE_CDC_T4_DLY_SEL BIT(0)
+#define CORE_START_CDC_TRAFFIC BIT(6)
+#define CORE_VENDOR_SPEC3 0x1b0
+#define CORE_PWRSAVE_DLL BIT(3)
+
+#define CORE_DDR_CONFIG 0x1b8
+#define DDR_CONFIG_POR_VAL 0x80040853
#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
+#define INVALID_TUNING_PHASE -1
+#define SDHCI_MSM_MIN_CLOCK 400000
+#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
+
#define CDR_SELEXT_SHIFT 20
#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
#define CMUX_SHIFT_PHASE_SHIFT 24
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
+#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
@@ -75,7 +128,14 @@ struct sdhci_msm_host {
struct clk *clk; /* main SD/MMC bus clock */
struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
+ struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ unsigned long clk_rate;
struct mmc_host *mmc;
+ bool use_14lpp_dll_reset;
+ bool tuning_done;
+ bool calibration_done;
+ u8 saved_tuning_phase;
+ bool use_cdclp533;
};
/* Platform specific tuning */
@@ -115,6 +175,9 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
u32 config;
struct mmc_host *mmc = host->mmc;
+ if (phase > 0xf)
+ return -EINVAL;
+
spin_lock_irqsave(&host->lock, flags);
config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
@@ -136,9 +199,9 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
- /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
rc = msm_dll_poll_ck_out_en(host, 1);
@@ -163,8 +226,8 @@ out:
* Find out the greatest range of consecuitive selected
* DLL clock output phases that can be used as sampling
* setting for SD3.0 UHS-I card read operation (in SDR104
- * timing mode) or for eMMC4.5 card read operation (in HS200
- * timing mode).
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
* Select the 3/4 of the range and configure the DLL with the
* selected DLL clock output phase.
*/
@@ -303,8 +366,11 @@ static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
static int msm_init_cm_dll(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
int wait_cnt = 50;
unsigned long flags;
+ u32 config;
spin_lock_irqsave(&host->lock, flags);
@@ -313,33 +379,73 @@ static int msm_init_cm_dll(struct sdhci_host *host)
* tuning is in progress. Keeping PWRSAVE ON may
* turn off the clock.
*/
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_CLK_PWRSAVE;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ if (msm_host->use_14lpp_dll_reset) {
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config |= CORE_DLL_CLOCK_DISABLE;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ }
- /* Write 1 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
- /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
msm_cm_dll_set_freq(host);
- /* Write 0 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ if (msm_host->use_14lpp_dll_reset &&
+ !IS_ERR_OR_NULL(msm_host->xo_clk)) {
+ u32 mclk_freq = 0;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= CORE_FLL_CYCLE_CNT;
+ if (config)
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
+ clk_get_rate(msm_host->xo_clk));
+ else
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
+ clk_get_rate(msm_host->xo_clk));
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= ~(0xFF << 10);
+ config |= mclk_freq << 10;
- /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ /* wait for 5us before enabling DLL clock */
+ udelay(5);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ if (msm_host->use_14lpp_dll_reset) {
+ msm_cm_dll_set_freq(host);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= ~CORE_DLL_CLOCK_DISABLE;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ }
- /* Set DLL_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
- /* Set CK_OUT_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
@@ -358,6 +464,200 @@ static int msm_init_cm_dll(struct sdhci_host *host)
return 0;
}
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 config, calib_done;
+ int ret;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CMD_DAT_TRACK_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config &= ~CORE_CDC_T4_DLY_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+ config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+ config |= CORE_CDC_SWITCH_RC_EN;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config &= ~CORE_START_CDC_TRAFFIC;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+ /*
+ * Perform CDC Register Initialization Sequence
+ *
+ * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
+ * CORE_CSR_CDC_CTLR_CFG1 0x3011111
+ * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
+ * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
+ * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
+ * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
+ * CORE_CSR_CDC_DELAY_CFG 0x3AC
+ * CORE_CDC_OFFSET_CFG 0x0
+ * CORE_CDC_SLAVE_DDA_CFG 0x16334
+ */
+
+ writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+ writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+ writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+ writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+ writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+ writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+ writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+ /* CDC HW Calibration */
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config |= CORE_SW_TRIG_FULL_CALIB;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config &= ~CORE_SW_TRIG_FULL_CALIB;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config |= CORE_HW_AUTOCAL_ENA;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ config |= CORE_TIMER_ENA;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+ ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+ calib_done,
+ (calib_done & CORE_CALIBRATION_DONE),
+ 1, 50);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CDC calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+ & CORE_CDC_ERROR_CODE_MASK;
+ if (ret) {
+ pr_err("%s: %s: CDC error code %d\n",
+ mmc_hostname(host->mmc), __func__, ret);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config |= CORE_START_CDC_TRAFFIC;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+{
+ u32 dll_status, config;
+ int ret;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Currently the CORE_DDR_CONFIG register defaults to desired
+ * configuration on reset. Currently reprogramming the power on
+ * reset (POR) value in case it might have been modified by
+ * bootloaders. In the future, if this changes, then the desired
+ * values will need to be programmed appropriately.
+ */
+ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config |= CORE_DDR_CAL_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+
+ ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
+ dll_status,
+ (dll_status & CORE_DDR_DLL_LOCK),
+ 10, 1000);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3);
+ config |= CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3);
+
+ /*
+ * Drain writebuffer to ensure above DLL calibration
+ * and PWRSAVE DLL is enabled.
+ */
+ wmb();
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+ u32 config;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CMD_DAT_TRACK_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ if (msm_host->use_cdclp533)
+ ret = sdhci_msm_cdclp533_calibration(host);
+ else
+ ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int tuning_seq_cnt = 3;
@@ -365,14 +665,17 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
int rc;
struct mmc_host *mmc = host->mmc;
struct mmc_ios ios = host->mmc->ios;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
* if clock frequency is greater than 100MHz in these modes.
*/
- if (host->clock <= 100 * 1000 * 1000 ||
- !((ios.timing == MMC_TIMING_MMC_HS200) ||
- (ios.timing == MMC_TIMING_UHS_SDR104)))
+ if (host->clock <= CORE_FREQ_100MHZ ||
+ !(ios.timing == MMC_TIMING_MMC_HS400 ||
+ ios.timing == MMC_TIMING_MMC_HS200 ||
+ ios.timing == MMC_TIMING_UHS_SDR104))
return 0;
retry:
@@ -388,6 +691,7 @@ retry:
if (rc)
return rc;
+ msm_host->saved_tuning_phase = phase;
rc = mmc_send_tuning(mmc, opcode, NULL);
if (!rc) {
/* Tuning is successful at this tuning point */
@@ -423,6 +727,8 @@ retry:
rc = -EIO;
}
+ if (!rc)
+ msm_host->tuning_done = true;
return rc;
}
@@ -430,7 +736,10 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
unsigned int uhs)
{
struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u16 ctrl_2;
+ u32 config;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
@@ -445,6 +754,7 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
case MMC_TIMING_UHS_SDR50:
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
break;
+ case MMC_TIMING_MMC_HS400:
case MMC_TIMING_MMC_HS200:
case MMC_TIMING_UHS_SDR104:
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
@@ -461,15 +771,42 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
* provide feedback clock, the mode selection can be any value less
* than 3'b011 in bits [2:0] of HOST CONTROL2 register.
*/
- if (host->clock <= 100000000 &&
- (uhs == MMC_TIMING_MMC_HS400 ||
- uhs == MMC_TIMING_MMC_HS200 ||
- uhs == MMC_TIMING_UHS_SDR104))
- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if (host->clock <= CORE_FREQ_100MHZ) {
+ if (uhs == MMC_TIMING_MMC_HS400 ||
+ uhs == MMC_TIMING_MMC_HS200 ||
+ uhs == MMC_TIMING_UHS_SDR104)
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ /*
+ * DLL is not required for clock <= 100MHz
+ * Thus, make sure DLL it is disabled when not required
+ */
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /*
+ * The DLL needs to be restored and CDCLP533 recalibrated
+ * when the clock frequency is set back to 400MHz.
+ */
+ msm_host->calibration_done = false;
+ }
dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+ spin_unlock_irq(&host->lock);
+ /* CDCLP533 HW calibration is only required for HS400 mode*/
+ if (host->clock > CORE_FREQ_100MHZ &&
+ msm_host->tuning_done && !msm_host->calibration_done &&
+ mmc->ios.timing == MMC_TIMING_MMC_HS400)
+ if (!sdhci_msm_hs400_dll_calibration(host))
+ msm_host->calibration_done = true;
+ spin_lock_irq(&host->lock);
}
static void sdhci_msm_voltage_switch(struct sdhci_host *host)
@@ -505,6 +842,183 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ return clk_round_rate(msm_host->clk, ULONG_MAX);
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+ return SDHCI_MSM_MIN_CLOCK;
+}
+
+/**
+ * __sdhci_msm_set_clock - sdhci_msm clock control.
+ *
+ * Description:
+ * MSM controller does not use internal divider and
+ * instead directly control the GCC clock as per
+ * HW recommendation.
+ **/
+void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ /*
+ * Keep actual_clock as zero -
+ * - since there is no divider used so no need of having actual_clock.
+ * - MSM controller uses SDCLK for data timeout calculation. If
+ * actual_clock is zero, host->clock is taken for calculation.
+ */
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ /*
+ * MSM controller do not use clock divider.
+ * Thus read SDHCI_CLOCK_CONTROL and only enable
+ * clock with no divider value programmed.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ sdhci_enable_clk(host, clk);
+}
+
+/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios curr_ios = host->mmc->ios;
+ u32 config, dll_lock;
+ int rc;
+
+ if (!clock) {
+ msm_host->clk_rate = clock;
+ goto out;
+ }
+
+ spin_unlock_irq(&host->lock);
+ /*
+ * The SDHC requires internal clock frequency to be double the
+ * actual clock that will be set for DDR mode. The controller
+ * uses the faster clock(100/400MHz) for some of its parts and
+ * send the actual required clock (50/200MHz) to the card.
+ */
+ if (curr_ios.timing == MMC_TIMING_UHS_DDR50 ||
+ curr_ios.timing == MMC_TIMING_MMC_DDR52 ||
+ curr_ios.timing == MMC_TIMING_MMC_HS400)
+ clock *= 2;
+ /*
+ * In general all timing modes are controlled via UHS mode select in
+ * Host Control2 register. eMMC specific HS200/HS400 doesn't have
+ * their respective modes defined here, hence we use these values.
+ *
+ * HS200 - SDR104 (Since they both are equivalent in functionality)
+ * HS400 - This involves multiple configurations
+ * Initially SDR104 - when tuning is required as HS200
+ * Then when switching to DDR @ 400MHz (HS400) we use
+ * the vendor specific HC_SELECT_IN to control the mode.
+ *
+ * In addition to controlling the modes we also need to select the
+ * correct input clock for DLL depending on the mode.
+ *
+ * HS400 - divided clock (free running MCLK/2)
+ * All other modes - default (free running MCLK)
+ */
+ if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
+ /* Select the divided clock (free running MCLK/2) */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_HS400;
+
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ /*
+ * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+ * register
+ */
+ if (msm_host->tuning_done && !msm_host->calibration_done) {
+ /*
+ * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
+ * field in VENDOR_SPEC_FUNC
+ */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config |= CORE_HC_SELECT_IN_HS400;
+ config |= CORE_HC_SELECT_IN_EN;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ }
+ if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
+ /*
+ * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
+ * CORE_DLL_STATUS to be set. This should get set
+ * within 15 us at 200 MHz.
+ */
+ rc = readl_relaxed_poll_timeout(host->ioaddr +
+ CORE_DLL_STATUS,
+ dll_lock,
+ (dll_lock &
+ (CORE_DLL_LOCK |
+ CORE_DDR_DLL_LOCK)), 10,
+ 1000);
+ if (rc == -ETIMEDOUT)
+ pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+ mmc_hostname(host->mmc), dll_lock);
+ }
+ } else {
+ if (!msm_host->use_cdclp533) {
+ config = readl_relaxed(host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ config &= ~CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_DFLT;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ /*
+ * Disable HC_SELECT_IN to be able to use the UHS mode select
+ * configuration from Host Control2 register for all other
+ * modes.
+ * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+ * in VENDOR_SPEC_FUNC
+ */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_SELECT_IN_EN;
+ config &= ~CORE_HC_SELECT_IN_MASK;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ }
+
+ /*
+ * Make sure above writes impacting free running MCLK are completed
+ * before changing the clk_rate at GCC.
+ */
+ wmb();
+
+ rc = clk_set_rate(msm_host->clk, clock);
+ if (rc) {
+ pr_err("%s: Failed to set clock at rate %u at timing %d\n",
+ mmc_hostname(host->mmc), clock,
+ curr_ios.timing);
+ goto out_lock;
+ }
+ msm_host->clk_rate = clock;
+ pr_debug("%s: Setting clock at rate %lu at timing %d\n",
+ mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ curr_ios.timing);
+
+out_lock:
+ spin_lock_irq(&host->lock);
+out:
+ __sdhci_msm_set_clock(host, clock);
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -515,7 +1029,9 @@ MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
static const struct sdhci_ops sdhci_msm_ops = {
.platform_execute_tuning = sdhci_msm_execute_tuning,
.reset = sdhci_reset,
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_msm_set_clock,
+ .get_min_clock = sdhci_msm_get_min_clock,
+ .get_max_clock = sdhci_msm_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.voltage_switch = sdhci_msm_voltage_switch,
@@ -524,7 +1040,9 @@ static const struct sdhci_ops sdhci_msm_ops = {
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_NO_CARD_NO_RESET |
- SDHCI_QUIRK_SINGLE_POWER_WRITE,
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_msm_ops,
};
@@ -536,7 +1054,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct resource *core_memres;
int ret;
u16 host_version, core_minor;
- u32 core_version, caps;
+ u32 core_version, config;
u8 core_major;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
@@ -554,6 +1072,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {
@@ -586,6 +1106,16 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto pclk_disable;
}
+ /*
+ * xo clock is needed for FLL feature of cm_dll.
+ * In case if xo clock is not mentioned in DT, warn and proceed.
+ */
+ msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(msm_host->xo_clk)) {
+ ret = PTR_ERR(msm_host->xo_clk);
+ dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
+ }
+
/* Vote for maximum clock rate for maximum performance */
ret = clk_set_rate(msm_host->clk, INT_MAX);
if (ret)
@@ -604,9 +1134,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
- /* Reset the core and Enable SDHC mode */
- writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
- CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+ config = readl_relaxed(msm_host->core_mem + CORE_POWER);
+ config |= CORE_SW_RST;
+ writel_relaxed(config, msm_host->core_mem + CORE_POWER);
/* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
usleep_range(1000, 5000);
@@ -619,6 +1149,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* Set HC_MODE_EN bit in HC_MODE register */
writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
+ config |= FF_CLK_SW_RST_DIS;
+ writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);
+
host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
@@ -631,14 +1165,24 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
core_version, core_major, core_minor);
+ if (core_major == 1 && core_minor >= 0x42)
+ msm_host->use_14lpp_dll_reset = true;
+
+ /*
+ * SDCC 5 controller with major version 1, minor version 0x34 and later
+ * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+ */
+ if (core_major == 1 && core_minor < 0x34)
+ msm_host->use_cdclp533 = true;
+
/*
* Support for some capabilities is not advertised by newer
* controller versions and must be explicitly enabled.
*/
if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
- caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
- caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
- writel_relaxed(caps, host->ioaddr +
+ config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+ config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
+ writel_relaxed(config, host->ioaddr +
CORE_VENDOR_SPEC_CAPABILITIES0);
}
@@ -659,12 +1203,26 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ MSM_MMC_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
ret = sdhci_add_host(host);
if (ret)
- goto clk_disable;
+ goto pm_runtime_disable;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
+pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
clk_disable:
clk_disable_unprepare(msm_host->clk);
pclk_disable:
@@ -686,6 +1244,11 @@ static int sdhci_msm_remove(struct platform_device *pdev)
0xffffffff);
sdhci_remove_host(host, dead);
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
clk_disable_unprepare(msm_host->clk);
clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR(msm_host->bus_clk))
@@ -694,12 +1257,57 @@ static int sdhci_msm_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int sdhci_msm_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ clk_disable_unprepare(msm_host->clk);
+ clk_disable_unprepare(msm_host->pclk);
+
+ return 0;
+}
+
+static int sdhci_msm_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = clk_prepare_enable(msm_host->clk);
+ if (ret) {
+ dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(msm_host->pclk);
+ if (ret) {
+ dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
+ clk_disable_unprepare(msm_host->clk);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_msm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
+ sdhci_msm_runtime_resume,
+ NULL)
+};
+
static struct platform_driver sdhci_msm_driver = {
.probe = sdhci_msm_probe,
.remove = sdhci_msm_remove,
.driver = {
.name = "sdhci_msm",
.of_match_table = sdhci_msm_dt_match,
+ .pm = &sdhci_msm_pm_ops,
},
};
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index a9b7fc06c434..2f9ad213377a 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -100,6 +100,7 @@ static const struct of_device_id sdhci_at91_dt_match[] = {
{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
{}
};
+MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
#ifdef CONFIG_PM
static int sdhci_at91_runtime_suspend(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1bb11e4a9fe5..9a6eb4492172 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/sys_soc.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -28,6 +29,7 @@
struct sdhci_esdhc {
u8 vendor_ver;
u8 spec_ver;
+ bool quirk_incorrect_hostver;
};
/**
@@ -87,6 +89,8 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
static u16 esdhc_readw_fixup(struct sdhci_host *host,
int spec_reg, u32 value)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u16 ret;
int shift = (spec_reg & 0x2) * 8;
@@ -94,6 +98,12 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
ret = value & 0xffff;
else
ret = (value >> shift) & 0xffff;
+ /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
+ * vendor version and spec version information.
+ */
+ if ((spec_reg == SDHCI_HOST_VERSION) &&
+ (esdhc->quirk_incorrect_hostver))
+ ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
return ret;
}
@@ -572,6 +582,12 @@ static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
.ops = &sdhci_esdhc_le_ops,
};
+static struct soc_device_attribute soc_incorrect_hostver[] = {
+ { .family = "QorIQ T4240", .revision = "1.0", },
+ { .family = "QorIQ T4240", .revision = "2.0", },
+ { },
+};
+
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -585,6 +601,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT;
esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
+ if (soc_device_match(soc_incorrect_hostver))
+ esdhc->quirk_incorrect_hostver = true;
+ else
+ esdhc->quirk_incorrect_hostver = false;
}
static int sdhci_esdhc_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1d9e00a00e9f..1a72d32af07f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -27,6 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/sdhci-pci-data.h>
+#include <linux/acpi.h>
#include "sdhci.h"
#include "sdhci-pci.h"
@@ -375,6 +376,44 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+#ifdef CONFIG_ACPI
+static int ni_set_max_freq(struct sdhci_pci_slot *slot)
+{
+ acpi_status status;
+ unsigned long long max_freq;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
+ "MXFQ", NULL, &max_freq);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&slot->chip->pdev->dev,
+ "MXFQ not found in acpi table\n");
+ return -EINVAL;
+ }
+
+ slot->host->mmc->f_max = max_freq * 1000000;
+
+ return 0;
+}
+#else
+static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
+{
+ return 0;
+}
+#endif
+
+static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err;
+
+ err = ni_set_max_freq(slot);
+ if (err)
+ return err;
+
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_WAIT_WHILE_BUSY;
+ return 0;
+}
+
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
@@ -390,7 +429,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
- slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) {
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) {
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
slot->host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
}
@@ -447,6 +487,15 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.ops = &sdhci_intel_byt_ops,
};
+static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .allow_runtime_pm = true,
+ .probe_slot = ni_byt_sdio_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
@@ -1079,6 +1128,14 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+ .subvendor = PCI_VENDOR_ID_NI,
+ .subdevice = 0x7884,
+ .driver_data = (kernel_ulong_t)&sdhci_ni_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
@@ -1277,6 +1334,30 @@ static const struct pci_device_id pci_ids[] = {
},
{
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_GLK_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_GLK_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_GLK_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
.subvendor = PCI_ANY_ID,
@@ -1735,11 +1816,16 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
- if (slot->cd_idx >= 0 &&
- mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
- slot->cd_override_level, 0, NULL)) {
- dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
- slot->cd_idx = -1;
+ if (slot->cd_idx >= 0) {
+ ret = mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
+ slot->cd_override_level, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ goto remove;
+
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
+ slot->cd_idx = -1;
+ }
}
ret = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 6bccf56bc5ff..4abdaed72bd4 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -34,6 +34,9 @@
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
+#define PCI_DEVICE_ID_INTEL_GLK_SD 0x31ca
+#define PCI_DEVICE_ID_INTEL_GLK_EMMC 0x31cc
+#define PCI_DEVICE_ID_INTEL_GLK_SDIO 0x31d0
/*
* PCI registers
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3280f2077959..957839d0fe37 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -106,7 +106,7 @@ extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
{
- return (void *)host->private;
+ return host->private;
}
extern const struct dev_pm_ops sdhci_pltfm_pmops;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 784c5a848fb4..de219ca7ea7c 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -121,7 +121,9 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
* speed possible with selected clock source and skip the division.
*/
if (ourhost->no_divider) {
+ spin_unlock_irq(&ourhost->host->lock);
rate = clk_round_rate(clksrc, wanted);
+ spin_lock_irq(&ourhost->host->lock);
return wanted - rate;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 42ef3ebb1d8c..23909804ffb8 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <linux/leds.h>
@@ -1343,20 +1344,10 @@ clock_set:
}
EXPORT_SYMBOL_GPL(sdhci_calc_clk);
-void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
{
- u16 clk;
unsigned long timeout;
- host->mmc->actual_clock = 0;
-
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
-
- if (clock == 0)
- return;
-
- clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
-
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1377,6 +1368,22 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
+EXPORT_SYMBOL_GPL(sdhci_enable_clk);
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_enable_clk(host, clk);
+}
EXPORT_SYMBOL_GPL(sdhci_set_clock);
static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
@@ -1569,6 +1576,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
u8 ctrl;
+ if (ios->power_mode == MMC_POWER_UNDEFINED)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD) {
@@ -1623,7 +1633,14 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if ((ios->timing == MMC_TIMING_SD_HS ||
- ios->timing == MMC_TIMING_MMC_HS)
+ ios->timing == MMC_TIMING_MMC_HS ||
+ ios->timing == MMC_TIMING_MMC_HS400 ||
+ ios->timing == MMC_TIMING_MMC_HS200 ||
+ ios->timing == MMC_TIMING_MMC_DDR52 ||
+ ios->timing == MMC_TIMING_UHS_SDR50 ||
+ ios->timing == MMC_TIMING_UHS_SDR104 ||
+ ios->timing == MMC_TIMING_UHS_DDR50 ||
+ ios->timing == MMC_TIMING_UHS_SDR25)
&& !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
ctrl |= SDHCI_CTRL_HISPD;
else
@@ -1632,16 +1649,6 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
- /* In case of UHS-I modes, set High Speed Enable */
- if ((ios->timing == MMC_TIMING_MMC_HS400) ||
- (ios->timing == MMC_TIMING_MMC_HS200) ||
- (ios->timing == MMC_TIMING_MMC_DDR52) ||
- (ios->timing == MMC_TIMING_UHS_SDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_UHS_DDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR25))
- ctrl |= SDHCI_CTRL_HISPD;
-
if (!host->preset_enabled) {
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/*
@@ -1948,11 +1955,157 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+static void sdhci_start_tuning(struct sdhci_host *host)
{
- struct sdhci_host *host = mmc_priv(mmc);
u16 ctrl;
- int tuning_loop_counter = MAX_TUNING_LOOP;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
+ ctrl |= SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ /*
+ * As per the Host Controller spec v3.00, tuning command
+ * generates Buffer Read Ready interrupt, so enable that.
+ *
+ * Note: The spec clearly says that when tuning sequence
+ * is being performed, the controller does not generate
+ * interrupts other than Buffer Read Ready interrupt. But
+ * to make sure we don't hit a controller bug, we _only_
+ * enable Buffer Read Ready interrupt here.
+ */
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_end_tuning(struct sdhci_host *host)
+{
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_reset_tuning(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode,
+ unsigned long flags)
+{
+ sdhci_reset_tuning(host);
+
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+ sdhci_end_tuning(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_abort_tuning(host->mmc, opcode);
+ spin_lock_irqsave(&host->lock, flags);
+}
+
+/*
+ * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
+ * tuning command does not have a data payload (or rather the hardware does it
+ * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
+ * interrupt setup is different to other commands and there is no timeout
+ * interrupt so special handling is needed.
+ */
+static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode,
+ unsigned long flags)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {NULL};
+
+ cmd.opcode = opcode;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ cmd.mrq = &mrq;
+
+ mrq.cmd = &cmd;
+ /*
+ * In response to CMD19, the card sends 64 bytes of tuning
+ * block to the Host Controller. So we set the block size
+ * to 64 here.
+ */
+ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
+ mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
+ else
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+
+ /*
+ * The tuning block is sent by the card to the host controller.
+ * So we set the TRNS_READ bit in the Transfer Mode register.
+ * This also takes care of setting DMA Enable and Multi Block
+ * Select in the same register to 0.
+ */
+ sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+ sdhci_send_command(host, &cmd);
+
+ host->cmd = NULL;
+
+ sdhci_del_timer(host, &mrq);
+
+ host->tuning_done = 0;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Wait for Buffer Read Ready interrupt */
+ wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
+ msecs_to_jiffies(50));
+
+ spin_lock_irqsave(&host->lock, flags);
+}
+
+static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode,
+ unsigned long flags)
+{
+ int i;
+
+ /*
+ * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
+ * of loops reaches 40 times.
+ */
+ for (i = 0; i < MAX_TUNING_LOOP; i++) {
+ u16 ctrl;
+
+ sdhci_send_tuning(host, opcode, flags);
+
+ if (!host->tuning_done) {
+ pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_abort_tuning(host, opcode, flags);
+ return;
+ }
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
+ if (ctrl & SDHCI_CTRL_TUNED_CLK)
+ return; /* Success! */
+ break;
+ }
+
+ /* eMMC spec does not require a delay between tuning cycles */
+ if (opcode == MMC_SEND_TUNING_BLOCK)
+ mdelay(1);
+ }
+
+ pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_reset_tuning(host);
+}
+
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
int err = 0;
unsigned long flags;
unsigned int tuning_count = 0;
@@ -2003,144 +2156,22 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->ops->platform_execute_tuning) {
spin_unlock_irqrestore(&host->lock, flags);
- err = host->ops->platform_execute_tuning(host, opcode);
- return err;
+ return host->ops->platform_execute_tuning(host, opcode);
}
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- ctrl |= SDHCI_CTRL_EXEC_TUNING;
- if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
- ctrl |= SDHCI_CTRL_TUNED_CLK;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
- /*
- * As per the Host Controller spec v3.00, tuning command
- * generates Buffer Read Ready interrupt, so enable that.
- *
- * Note: The spec clearly says that when tuning sequence
- * is being performed, the controller does not generate
- * interrupts other than Buffer Read Ready interrupt. But
- * to make sure we don't hit a controller bug, we _only_
- * enable Buffer Read Ready interrupt here.
- */
- sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
- sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
-
- /*
- * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
- * of loops reaches 40 times.
- */
- do {
- struct mmc_command cmd = {0};
- struct mmc_request mrq = {NULL};
-
- cmd.opcode = opcode;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
- cmd.retries = 0;
- cmd.data = NULL;
- cmd.mrq = &mrq;
- cmd.error = 0;
-
- if (tuning_loop_counter-- == 0)
- break;
-
- mrq.cmd = &cmd;
-
- /*
- * In response to CMD19, the card sends 64 bytes of tuning
- * block to the Host Controller. So we set the block size
- * to 64 here.
- */
- if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
- if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
- SDHCI_BLOCK_SIZE);
- else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
- } else {
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
- }
-
- /*
- * The tuning block is sent by the card to the host controller.
- * So we set the TRNS_READ bit in the Transfer Mode register.
- * This also takes care of setting DMA Enable and Multi Block
- * Select in the same register to 0.
- */
- sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
-
- sdhci_send_command(host, &cmd);
-
- host->cmd = NULL;
- sdhci_del_timer(host, &mrq);
-
- spin_unlock_irqrestore(&host->lock, flags);
- /* Wait for Buffer Read Ready interrupt */
- wait_event_timeout(host->buf_ready_int,
- (host->tuning_done == 1),
- msecs_to_jiffies(50));
- spin_lock_irqsave(&host->lock, flags);
-
- if (!host->tuning_done) {
- pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
-
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
-
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- ctrl &= ~SDHCI_CTRL_TUNED_CLK;
- ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
- err = -EIO;
- goto out;
- }
-
- host->tuning_done = 0;
-
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-
- /* eMMC spec does not require a delay between tuning cycles */
- if (opcode == MMC_SEND_TUNING_BLOCK)
- mdelay(1);
- } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
-
- /*
- * The Host Driver has exhausted the maximum number of loops allowed,
- * so use fixed sampling frequency.
- */
- if (tuning_loop_counter < 0) {
- ctrl &= ~SDHCI_CTRL_TUNED_CLK;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- }
- if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
- pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
- err = -EIO;
- }
+ host->mmc->retune_period = tuning_count;
-out:
- if (tuning_count) {
- /*
- * In case tuning fails, host controllers which support
- * re-tuning can try tuning again at a later time, when the
- * re-tuning timer expires. So for these controllers, we
- * return 0. Since there might be other controllers who do not
- * have this capability, we return error for them.
- */
- err = 0;
- }
+ sdhci_start_tuning(host);
- host->mmc->retune_period = err ? 0 : tuning_count;
+ __sdhci_execute_tuning(host, opcode, flags);
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ sdhci_end_tuning(host);
out_unlock:
spin_unlock_irqrestore(&host->lock, flags);
+
return err;
}
+EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
static int sdhci_select_drive_strength(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
@@ -2198,8 +2229,7 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
data->host_cookie = COOKIE_UNMAPPED;
}
-static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -2911,22 +2941,24 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
- /* Force clock and power re-program */
- host->pwr = 0;
- host->clock = 0;
- mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
- mmc->ops->set_ios(mmc, &mmc->ios);
+ if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
+ /* Force clock and power re-program */
+ host->pwr = 0;
+ host->clock = 0;
+ mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
- if ((host_flags & SDHCI_PV_ENABLED) &&
- !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
- spin_lock_irqsave(&host->lock, flags);
- sdhci_enable_preset_value(host, true);
- spin_unlock_irqrestore(&host->lock, flags);
- }
+ if ((host_flags & SDHCI_PV_ENABLED) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_preset_value(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
- if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
- mmc->ops->hs400_enhanced_strobe)
- mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+ if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+ mmc->ops->hs400_enhanced_strobe)
+ mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+ }
spin_lock_irqsave(&host->lock, flags);
@@ -3010,6 +3042,8 @@ static int sdhci_set_dma_mask(struct sdhci_host *host)
void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
{
u16 v;
+ u64 dt_caps_mask = 0;
+ u64 dt_caps = 0;
if (host->read_caps)
return;
@@ -3024,18 +3058,35 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
sdhci_do_reset(host, SDHCI_RESET_ALL);
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+ "sdhci-caps-mask", &dt_caps_mask);
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+ "sdhci-caps", &dt_caps);
+
v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
return;
- host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (caps) {
+ host->caps = *caps;
+ } else {
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps &= ~lower_32_bits(dt_caps_mask);
+ host->caps |= lower_32_bits(dt_caps);
+ }
if (host->version < SDHCI_SPEC_300)
return;
- host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ if (caps1) {
+ host->caps1 = *caps1;
+ } else {
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~upper_32_bits(dt_caps_mask);
+ host->caps1 |= upper_32_bits(dt_caps);
+ }
}
EXPORT_SYMBOL_GPL(__sdhci_read_caps);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2570455b219a..0b66f210ae82 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -656,7 +656,7 @@ extern void sdhci_free_host(struct sdhci_host *host);
static inline void *sdhci_priv(struct sdhci_host *host)
{
- return (void *)host->private;
+ return host->private;
}
extern void sdhci_card_detect(struct sdhci_host *host);
@@ -682,6 +682,7 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
unsigned int *actual_clock);
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_enable_clk(struct sdhci_host *host, u16 clk);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
@@ -689,6 +690,7 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 49edff7fee49..d46c2d00c182 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -47,31 +47,69 @@
#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
+struct sh_mobile_sdhi_scc {
+ unsigned long clk_rate; /* clock rate for SDR104 */
+ u32 tap; /* sampling clock position for SDR104 */
+};
+
struct sh_mobile_sdhi_of_data {
unsigned long tmio_flags;
+ u32 tmio_ocr_mask;
unsigned long capabilities;
unsigned long capabilities2;
enum dma_slave_buswidth dma_buswidth;
dma_addr_t dma_rx_offset;
unsigned bus_shift;
+ int scc_offset;
+ struct sh_mobile_sdhi_scc *taps;
+ int taps_num;
};
static const struct sh_mobile_sdhi_of_data of_default_cfg = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
};
+static const struct sh_mobile_sdhi_of_data of_rz_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT,
+ .tmio_ocr_mask = MMC_VDD_32_33,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
TMIO_MMC_CLK_ACTUAL,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
};
+/* Definitions for sampling clocks */
+static struct sh_mobile_sdhi_scc rcar_gen2_scc_taps[] = {
+ {
+ .clk_rate = 156000000,
+ .tap = 0x00000703,
+ },
+ {
+ .clk_rate = 0,
+ .tap = 0x00000300,
+ },
+};
+
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
+ .scc_offset = 0x0300,
+ .taps = rcar_gen2_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
+};
+
+/* Definitions for sampling clocks */
+static struct sh_mobile_sdhi_scc rcar_gen3_scc_taps[] = {
+ {
+ .clk_rate = 0,
+ .tap = 0x00000300,
+ },
};
static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
@@ -79,6 +117,9 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
};
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
@@ -86,6 +127,7 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
+ { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
@@ -105,6 +147,7 @@ struct sh_mobile_sdhi {
struct tmio_mmc_dma dma_priv;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default, *pins_uhs;
+ void __iomem *scc_ctl;
};
static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
@@ -255,6 +298,201 @@ static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
return pinctrl_select_state(priv->pinctrl, pin_state);
}
+/* SCC registers */
+#define SH_MOBILE_SDHI_SCC_DTCNTL 0x000
+#define SH_MOBILE_SDHI_SCC_TAPSET 0x002
+#define SH_MOBILE_SDHI_SCC_DT2FF 0x004
+#define SH_MOBILE_SDHI_SCC_CKSEL 0x006
+#define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008
+#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC_DTCNTL register */
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0)
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK 0xff
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC_CKSEL register */
+#define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL BIT(0)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSCNTL register */
+#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSREQ register */
+#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2)
+
+static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
+ struct sh_mobile_sdhi *priv, int addr)
+{
+ return readl(priv->scc_ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_scc_write32(struct tmio_mmc_host *host,
+ struct sh_mobile_sdhi *priv,
+ int addr, u32 val)
+{
+ writel(val, priv->scc_ctl + (addr << host->bus_shift));
+}
+
+static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv;
+
+ if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
+ return 0;
+
+ priv = host_to_priv(host);
+
+ /* set sampling clock selection range */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ 0x8 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+
+ /* Initialize SCC */
+ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, 0x0);
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+ SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+
+ /* Read TAPNUM */
+ return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT) &
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
+}
+
+static void sh_mobile_sdhi_prepare_tuning(struct tmio_mmc_host *host,
+ unsigned long tap)
+{
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+
+ /* Set sampling clock position */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
+}
+
+#define SH_MOBILE_SDHI_MAX_TAP 3
+
+static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ unsigned long tap_cnt; /* counter of tuning success */
+ unsigned long tap_set; /* tap position */
+ unsigned long tap_start;/* start position of tuning success */
+ unsigned long tap_end; /* end position of tuning success */
+ unsigned long ntap; /* temporary counter of tuning success */
+ unsigned long i;
+
+ /* Clear SCC_RVSREQ */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+
+ /*
+ * Find the longest consecutive run of successful probes. If that
+ * is more than SH_MOBILE_SDHI_MAX_TAP probes long then use the
+ * center index as the tap.
+ */
+ tap_cnt = 0;
+ ntap = 0;
+ tap_start = 0;
+ tap_end = 0;
+ for (i = 0; i < host->tap_num * 2; i++) {
+ if (test_bit(i, host->taps))
+ ntap++;
+ else {
+ if (ntap > tap_cnt) {
+ tap_start = i - ntap;
+ tap_end = i - 1;
+ tap_cnt = ntap;
+ }
+ ntap = 0;
+ }
+ }
+
+ if (ntap > tap_cnt) {
+ tap_start = i - ntap;
+ tap_end = i - 1;
+ tap_cnt = ntap;
+ }
+
+ if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP)
+ tap_set = (tap_start + tap_end) / 2 % host->tap_num;
+ else
+ return -EIO;
+
+ /* Set SCC */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap_set);
+
+ /* Enable auto re-tuning */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+ return 0;
+}
+
+
+static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv;
+
+ if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
+ return 0;
+
+ priv = host_to_priv(host);
+
+ /* Check SCC error */
+ if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
+ SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &&
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
+ SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
+ /* Clear SCC error */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+ return true;
+ }
+
+ return false;
+}
+
+static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv;
+
+ if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
+ return;
+
+ priv = host_to_priv(host);
+
+ /* Reset SCC */
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+ ~SH_MOBILE_SDHI_SCC_CKSEL_DTSEL &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+}
+
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
{
int timeout = 1000;
@@ -325,7 +563,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
struct tmio_mmc_host *host;
struct resource *res;
- int irq, ret, i = 0;
+ int irq, ret, i;
struct tmio_mmc_dma *dma_priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -364,6 +602,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
mmc_data->flags |= of_data->tmio_flags;
+ mmc_data->ocr_mask = of_data->tmio_ocr_mask;
mmc_data->capabilities |= of_data->capabilities;
mmc_data->capabilities2 |= of_data->capabilities2;
mmc_data->dma_rx_offset = of_data->dma_rx_offset;
@@ -384,6 +623,11 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
host->card_busy = sh_mobile_sdhi_card_busy;
host->start_signal_voltage_switch =
sh_mobile_sdhi_start_signal_voltage_switch;
+ host->init_tuning = sh_mobile_sdhi_init_tuning;
+ host->prepare_tuning = sh_mobile_sdhi_prepare_tuning;
+ host->select_tuning = sh_mobile_sdhi_select_tuning;
+ host->check_scc_error = sh_mobile_sdhi_check_scc_error;
+ host->hw_reset = sh_mobile_sdhi_hw_reset;
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -424,6 +668,34 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
if (ret < 0)
goto efree;
+ if (host->mmc->caps & MMC_CAP_UHS_SDR104) {
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+
+ if (of_id && of_id->data) {
+ const struct sh_mobile_sdhi_of_data *of_data;
+ const struct sh_mobile_sdhi_scc *taps;
+ bool hit = false;
+
+ of_data = of_id->data;
+ taps = of_data->taps;
+
+ for (i = 0; i < of_data->taps_num; i++) {
+ if (taps[i].clk_rate == 0 ||
+ taps[i].clk_rate == host->mmc->f_max) {
+ host->scc_tappos = taps->tap;
+ hit = true;
+ break;
+ }
+ }
+
+ if (!hit)
+ dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+
+ priv->scc_ctl = host->ctl + of_data->scc_offset;
+ }
+ }
+
+ i = 0;
while (1) {
irq = platform_get_irq(pdev, i);
if (irq < 0)
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index c0a5c676d0e8..b1d1303389a7 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -822,10 +822,13 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
case MMC_POWER_UP:
- host->ferror = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
- ios->vdd);
- if (host->ferror)
- return;
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->ferror = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ ios->vdd);
+ if (host->ferror)
+ return;
+ }
if (!IS_ERR(mmc->supply.vqmmc)) {
host->ferror = regulator_enable(mmc->supply.vqmmc);
@@ -847,7 +850,9 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
dev_dbg(mmc_dev(mmc), "power off!\n");
sunxi_mmc_reset_host(host);
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled)
regulator_disable(mmc->supply.vqmmc);
host->vqmmc_enabled = false;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 8e126afd988c..9e20bcf3aa8d 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -153,9 +153,12 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
+ u32 scc_tappos;
- int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ /* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
+
+ /* Optional callbacks */
unsigned int (*clk_update)(struct tmio_mmc_host *host,
unsigned int new_clock);
void (*clk_disable)(struct tmio_mmc_host *host);
@@ -164,6 +167,21 @@ struct tmio_mmc_host {
int (*card_busy)(struct mmc_host *mmc);
int (*start_signal_voltage_switch)(struct mmc_host *mmc,
struct mmc_ios *ios);
+ int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ void (*hw_reset)(struct tmio_mmc_host *host);
+ void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
+ bool (*check_scc_error)(struct tmio_mmc_host *host);
+
+ /*
+ * Mandatory callback for tuning to occur which is optional for SDR50
+ * and mandatory for SDR104.
+ */
+ unsigned int (*init_tuning)(struct tmio_mmc_host *host);
+ int (*select_tuning)(struct tmio_mmc_host *host);
+
+ /* Tuning values: 1 for success, 0 for failure */
+ DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
+ unsigned int tap_num;
};
struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
@@ -245,6 +263,12 @@ static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int ad
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
+static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
+ u32 *buf, int count)
+{
+ readsl(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
{
/* If there is a hook and it returns non-zero then there
@@ -267,4 +291,10 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
+static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
+ const u32 *buf, int count)
+{
+ writesl(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
#endif
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 700567603107..2064fa1a5bf1 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -22,7 +22,6 @@
* TODO:
* Investigate using a workqueue for PIO transfers
* Eliminate FIXMEs
- * SDIO support
* Better Power management
* Handle MMC errors better
* double buffer support
@@ -36,6 +35,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/tmio.h>
+#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
@@ -298,6 +298,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
if (mrq->cmd->error || (mrq->data && mrq->data->error))
tmio_mmc_abort_dma(host);
+ if (host->check_scc_error)
+ host->check_scc_error(host);
+
mmc_request_done(host->mmc, mrq);
}
@@ -393,6 +396,36 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
/*
* Transfer the data
*/
+ if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
+ u8 data[4] = { };
+
+ if (is_read)
+ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+ count >> 2);
+ else
+ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+ count >> 2);
+
+ /* if count was multiple of 4 */
+ if (!(count & 0x3))
+ return;
+
+ buf8 = (u8 *)(buf + (count >> 2));
+ count %= 4;
+
+ if (is_read) {
+ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
+ (u32 *)data, 1);
+ memcpy(buf8, data, count);
+ } else {
+ memcpy(data, buf8, count);
+ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
+ (u32 *)data, 1);
+ }
+
+ return;
+ }
+
if (is_read)
sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
else
@@ -522,7 +555,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
schedule_work(&host->done);
}
-static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
{
struct mmc_data *data;
spin_lock(&host->lock);
@@ -531,6 +564,9 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
if (!data)
goto out;
+ if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
+ stat & TMIO_STAT_TXUNDERRUN)
+ data->error = -EILSEQ;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
bool done = false;
@@ -579,8 +615,6 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
goto out;
}
- host->cmd = NULL;
-
/* This controller is sicker than the PXA one. Not only do we need to
* drop the top 8 bits of the first response word, we also need to
* modify the order of the response for short response command types.
@@ -600,14 +634,16 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
if (stat & TMIO_STAT_CMDTIMEOUT)
cmd->error = -ETIMEDOUT;
- else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
+ else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
+ stat & TMIO_STAT_STOPBIT_ERR ||
+ stat & TMIO_STAT_CMD_IDX_ERR)
cmd->error = -EILSEQ;
/* If there is data to handle we enable data IRQs here, and
* we will ultimatley finish the request in the data_end handler.
* If theres no data or we encountered an error, finish now.
*/
- if (host->data && !cmd->error) {
+ if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
if (host->data->flags & MMC_DATA_READ) {
if (host->force_pio || !host->chan_rx)
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
@@ -668,7 +704,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
/* Data transfer completion */
if (ireg & TMIO_STAT_DATAEND) {
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
- tmio_mmc_data_irq(host);
+ tmio_mmc_data_irq(host, status);
return true;
}
@@ -687,7 +723,7 @@ static void tmio_mmc_sdio_irq(int irq, void *devid)
return;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
- ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
+ ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
sdio_status = status & ~TMIO_SDIO_MASK_ALL;
if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
@@ -756,6 +792,63 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
return 0;
}
+static void tmio_mmc_hw_reset(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (host->hw_reset)
+ host->hw_reset(host);
+}
+
+static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ int i, ret = 0;
+
+ if (!host->tap_num) {
+ if (!host->init_tuning || !host->select_tuning)
+ /* Tuning is not supported */
+ goto out;
+
+ host->tap_num = host->init_tuning(host);
+ if (!host->tap_num)
+ /* Tuning is not supported */
+ goto out;
+ }
+
+ if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
+ dev_warn_once(&host->pdev->dev,
+ "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
+ goto out;
+ }
+
+ bitmap_zero(host->taps, host->tap_num * 2);
+
+ /* Issue CMD19 twice for each tap */
+ for (i = 0; i < 2 * host->tap_num; i++) {
+ if (host->prepare_tuning)
+ host->prepare_tuning(host, i % host->tap_num);
+
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (ret && ret != -EILSEQ)
+ goto out;
+ if (ret == 0)
+ set_bit(i, host->taps);
+
+ mdelay(1);
+ }
+
+ ret = host->select_tuning(host);
+
+out:
+ if (ret < 0) {
+ dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
+ tmio_mmc_hw_reset(mmc);
+ }
+
+ return ret;
+}
+
/* Process requests from the MMC layer */
static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
@@ -972,6 +1065,8 @@ static struct mmc_host_ops tmio_mmc_ops = {
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
.multi_io_quirk = tmio_multi_io_quirk,
+ .hw_reset = tmio_mmc_hw_reset,
+ .execute_tuning = tmio_mmc_execute_tuning,
};
static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
@@ -1218,6 +1313,11 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
}
EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
+static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
+{
+ return host->tap_num && mmc_can_retune(host->mmc);
+}
+
int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1231,6 +1331,9 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
tmio_mmc_enable_dma(host, true);
+ if (tmio_mmc_can_retune(host) && host->select_tuning(host))
+ dev_warn(&host->pdev->dev, "Tuning selection failed\n");
+
return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index c3fd16d997ca..80a3b11f3217 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1395,23 +1395,25 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma)
*/
host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->dma_addr))
+ goto kfree;
/*
* ISA DMA must be aligned on a 64k basis.
*/
if ((host->dma_addr & 0xffff) != 0)
- goto kfree;
+ goto unmap;
/*
* ISA cannot access memory above 16 MB.
*/
else if (host->dma_addr >= 0x1000000)
- goto kfree;
+ goto unmap;
host->dma = dma;
return;
-kfree:
+unmap:
/*
* If we've gotten here then there is some kind of alignment bug
*/
@@ -1421,6 +1423,7 @@ kfree:
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
host->dma_addr = 0;
+kfree:
kfree(host->dma_buffer);
host->dma_buffer = NULL;
@@ -1434,7 +1437,7 @@ err:
static void wbsd_release_dma(struct wbsd_host *host)
{
- if (host->dma_addr) {
+ if (!dma_mapping_error(mmc_dev(host->mmc), host->dma_addr)) {
dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
}
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 377947580203..283ff7e17a0f 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -229,12 +229,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
last_trx_part = curr_part - 1;
- /*
- * We have whole TRX scanned, skip to the next part. Use
- * roundown (not roundup), as the loop will increase
- * offset in next step.
- */
- offset = rounddown(offset + trx->length, blocksize);
+ /* Jump to the end of TRX */
+ offset = roundup(offset + trx->length, blocksize);
+ /* Next loop iteration will increase the offset */
+ offset -= blocksize;
continue;
}
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 1c65c15b31a1..514be04c0b6c 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -296,16 +296,30 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
dev_err(dev, "can't request region for resource %pR\n", res);
return -EBUSY;
}
- b47s->window = ioremap_cache(res->start, resource_size(res));
- if (!b47s->window) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
- return -ENOMEM;
- }
b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
b47s->cc_read = bcm47xxsflash_bcma_cc_read;
b47s->cc_write = bcm47xxsflash_bcma_cc_write;
+ /*
+ * On old MIPS devices cache was magically invalidated when needed,
+ * allowing us to use cached access and gain some performance. Trying
+ * the same on ARM based BCM53573 results in flash corruptions, we need
+ * to use uncached access for it.
+ *
+ * It may be arch specific, but right now there is only 1 ARM SoC using
+ * this driver, so let's follow Broadcom's reference code and check
+ * ChipCommon revision.
+ */
+ if (b47s->bcma_cc->core->id.rev == 54)
+ b47s->window = ioremap_nocache(res->start, resource_size(res));
+ else
+ b47s->window = ioremap_cache(res->start, resource_size(res));
+ if (!b47s->window) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ return -ENOMEM;
+ }
+
switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
b47s->type = BCM47XXSFLASH_TYPE_ST;
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 093edd51bdc7..9b1c13aa9f20 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -227,7 +227,7 @@ static void sc520cdp_setup_par(void)
static int __init init_sc520cdp(void)
{
- int i, devices_found = 0;
+ int i, j, devices_found = 0;
#ifdef REPROGRAM_PAR
/* reprogram PAR registers so flash appears at the desired addresses */
@@ -243,6 +243,12 @@ static int __init init_sc520cdp(void)
if (!sc520cdp_map[i].virt) {
printk("Failed to ioremap_nocache\n");
+ for (j = 0; j < i; j++) {
+ if (mymtd[j]) {
+ map_destroy(mymtd[j]);
+ iounmap(sc520cdp_map[j].virt);
+ }
+ }
return -EIO;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index d46e4adf6d2b..052772f7caef 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -46,8 +46,7 @@
#include "mtdcore.h"
-static struct backing_dev_info mtd_bdi = {
-};
+static struct backing_dev_info *mtd_bdi;
#ifdef CONFIG_PM_SLEEP
@@ -500,7 +499,7 @@ int add_mtd_device(struct mtd_info *mtd)
if (WARN_ONCE(mtd->backing_dev_info, "MTD already registered\n"))
return -EEXIST;
- mtd->backing_dev_info = &mtd_bdi;
+ mtd->backing_dev_info = mtd_bdi;
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
@@ -1274,8 +1273,8 @@ static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
int section,
struct mtd_oob_region *oobregion))
{
- struct mtd_oob_region oobregion = { };
- int section = 0, ret;
+ struct mtd_oob_region oobregion;
+ int section, ret;
ret = mtd_ooblayout_find_region(mtd, start, &section,
&oobregion, iter);
@@ -1283,7 +1282,7 @@ static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
while (!ret) {
int cnt;
- cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+ cnt = min_t(int, nbytes, oobregion.length);
memcpy(buf, oobbuf + oobregion.offset, cnt);
buf += cnt;
nbytes -= cnt;
@@ -1317,8 +1316,8 @@ static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
int section,
struct mtd_oob_region *oobregion))
{
- struct mtd_oob_region oobregion = { };
- int section = 0, ret;
+ struct mtd_oob_region oobregion;
+ int section, ret;
ret = mtd_ooblayout_find_region(mtd, start, &section,
&oobregion, iter);
@@ -1326,7 +1325,7 @@ static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
while (!ret) {
int cnt;
- cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+ cnt = min_t(int, nbytes, oobregion.length);
memcpy(oobbuf + oobregion.offset, buf, cnt);
buf += cnt;
nbytes -= cnt;
@@ -1354,7 +1353,7 @@ static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
int section,
struct mtd_oob_region *oobregion))
{
- struct mtd_oob_region oobregion = { };
+ struct mtd_oob_region oobregion;
int section = 0, ret, nbytes = 0;
while (1) {
@@ -1771,18 +1770,20 @@ static const struct file_operations mtd_proc_ops = {
/*====================================================================*/
/* Init code */
-static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+static struct backing_dev_info * __init mtd_bdi_init(char *name)
{
+ struct backing_dev_info *bdi;
int ret;
- ret = bdi_init(bdi);
- if (!ret)
- ret = bdi_register(bdi, NULL, "%s", name);
+ bdi = kzalloc(sizeof(*bdi), GFP_KERNEL);
+ if (!bdi)
+ return ERR_PTR(-ENOMEM);
+ ret = bdi_setup_and_register(bdi, name);
if (ret)
- bdi_destroy(bdi);
+ kfree(bdi);
- return ret;
+ return ret ? ERR_PTR(ret) : bdi;
}
static struct proc_dir_entry *proc_mtd;
@@ -1795,9 +1796,11 @@ static int __init init_mtd(void)
if (ret)
goto err_reg;
- ret = mtd_bdi_init(&mtd_bdi, "mtd");
- if (ret)
+ mtd_bdi = mtd_bdi_init("mtd");
+ if (IS_ERR(mtd_bdi)) {
+ ret = PTR_ERR(mtd_bdi);
goto err_bdi;
+ }
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
@@ -1810,6 +1813,8 @@ static int __init init_mtd(void)
out_procfs:
if (proc_mtd)
remove_proc_entry("mtd", NULL);
+ bdi_destroy(mtd_bdi);
+ kfree(mtd_bdi);
err_bdi:
class_unregister(&mtd_class);
err_reg:
@@ -1823,7 +1828,8 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
- bdi_destroy(&mtd_bdi);
+ bdi_destroy(mtd_bdi);
+ kfree(mtd_bdi);
idr_destroy(&mtd_idr);
}
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index cb06bdd21a1b..c40e2c951758 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -587,7 +587,7 @@ retry:
ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
erase.state == MTD_ERASE_FAILED);
if (ret) {
- dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
+ dev_err(d->dev, "Interrupted erase block %#llx erasure on %s\n",
erase.addr, mtd->name);
return -EINTR;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7b7a887b4709..353a9ddf6b97 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -179,15 +179,6 @@ config MTD_NAND_S3C2410_DEBUG
help
Enable debugging of the S3C NAND driver
-config MTD_NAND_S3C2410_HWECC
- bool "Samsung S3C NAND Hardware ECC"
- depends on MTD_NAND_S3C2410
- help
- Enable the use of the controller's internal ECC generator when
- using NAND. Early versions of the chips have had problems with
- incorrect ECC generation, and if using these, the default of
- software ECC is preferable.
-
config MTD_NAND_NDFC
tristate "NDFC NanD Flash Controller"
depends on 4xx
@@ -205,6 +196,13 @@ config MTD_NAND_S3C2410_CLKSTOP
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
+config MTD_NAND_TANGO
+ tristate "NAND Flash support for Tango chips"
+ depends on ARCH_TANGO || COMPILE_TEST
+ depends on HAS_DMA
+ help
+ Enables the NAND Flash controller on Tango chips.
+
config MTD_NAND_DISKONCHIP
tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
depends on HAS_IOMEM
@@ -426,6 +424,11 @@ config MTD_NAND_ORION
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
+config MTD_NAND_OXNAS
+ tristate "NAND Flash support for Oxford Semiconductor SoC"
+ help
+ This enables the NAND flash controller on Oxford Semiconductor SoCs.
+
config MTD_NAND_FSL_ELBC
tristate "NAND support for Freescale eLBC controllers"
depends on FSL_SOC
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index cafde6f3d957..19a66e404d5b 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
+obj-$(CONFIG_MTD_NAND_TANGO) += tango_nand.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
+obj-$(CONFIG_MTD_NAND_OXNAS) += oxnas_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 78e12cc8bac2..5d6c26f3cf7f 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -234,10 +234,9 @@ static int ams_delta_init(struct platform_device *pdev)
goto out_gpio;
/* Scan to find existence of the device */
- if (nand_scan(ams_delta_mtd, 1)) {
- err = -ENXIO;
+ err = nand_scan(ams_delta_mtd, 1);
+ if (err)
goto out_mtd;
- }
/* Register the partitions */
mtd_device_register(ams_delta_mtd, partition_info,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 68b9160108c9..9ebd5ecefea6 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -2267,10 +2267,9 @@ static int atmel_nand_probe(struct platform_device *pdev)
dev_info(host->dev, "No DMA support for NAND access.\n");
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan_ident(mtd, 1, NULL);
+ if (res)
goto err_scan_ident;
- }
if (host->board.on_flash_bbt || on_flash_bbt)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
@@ -2304,10 +2303,9 @@ static int atmel_nand_probe(struct platform_device *pdev)
}
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
+ res = nand_scan_tail(mtd);
+ if (res)
goto err_scan_tail;
- }
mtd->name = "atmel_nand";
res = mtd_device_register(mtd, host->board.parts,
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 9d2424bfdbf5..42ebd73f821d 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -2209,8 +2209,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_writereg(ctrl, cfg_offs,
nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
- if (nand_scan_ident(mtd, 1, NULL))
- return -ENXIO;
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
chip->options |= NAND_NO_SUBPAGE_WRITE;
/*
@@ -2234,8 +2235,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
if (ret)
return ret;
- if (nand_scan_tail(mtd))
- return -ENXIO;
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return ret;
return mtd_device_register(mtd, NULL, 0);
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 0b0c93702abb..d40c32d311d8 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -725,10 +725,9 @@ static int cafe_nand_probe(struct pci_dev *pdev,
usedma = 0;
/* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2, NULL)) {
- err = -ENXIO;
+ err = nand_scan_ident(mtd, 2, NULL);
+ if (err)
goto out_irq;
- }
cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
2112 + sizeof(struct nand_buffers) +
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 49133783ca53..226ac0bcafc6 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -195,9 +195,9 @@ static int __init cmx270_init(void)
this->write_buf = cmx270_write_buf;
/* Scan to find existence of the device */
- if (nand_scan (cmx270_nand_mtd, 1)) {
+ ret = nand_scan(cmx270_nand_mtd, 1);
+ if (ret) {
pr_notice("No NAND device\n");
- ret = -ENXIO;
goto err_scan;
}
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index a65e4e0f57a1..594b28684138 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -242,10 +242,9 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
}
/* Scan to find existence of the device */
- if (nand_scan(new_mtd, 1)) {
- err = -ENXIO;
+ err = nand_scan(new_mtd, 1);
+ if (err)
goto out_free;
- }
cs553x_mtd[cs] = new_mtd;
goto out;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0476ae8776d9..73b9d4e2dca0 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -21,7 +21,6 @@
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
-#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
@@ -182,9 +181,6 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
{
int i;
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
for (i = 0; i < denali->max_banks; i++)
iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
denali->flash_reg + INTR_STATUS(i));
@@ -234,9 +230,6 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
uint16_t acc_clks;
uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
en_lo = CEIL_DIV(Trp[mode], CLK_X);
en_hi = CEIL_DIV(Treh[mode], CLK_X);
#if ONFI_BLOOM_TIME
@@ -403,7 +396,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
break;
default:
dev_warn(denali->dev,
- "Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
+ "Unknown Hynix NAND (Device ID: 0x%x).\n"
"Will use default parameter values instead.\n",
device_id);
}
@@ -474,33 +467,6 @@ static void detect_max_banks(struct denali_nand_info *denali)
denali->max_banks = 1 << (features & FEATURES__N_BANKS);
}
-static void detect_partition_feature(struct denali_nand_info *denali)
-{
- /*
- * For MRST platform, denali->fwblks represent the
- * number of blocks firmware is taken,
- * FW is in protect partition and MTD driver has no
- * permission to access it. So let driver know how many
- * blocks it can't touch.
- */
- if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
- PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
- denali->fwblks =
- ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
- MIN_MAX_BANK__MIN_VALUE) *
- denali->blksperchip)
- +
- (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
- MIN_BLK_ADDR__VALUE);
- } else {
- denali->fwblks = SPECTRA_START_BLOCK;
- }
- } else {
- denali->fwblks = SPECTRA_START_BLOCK;
- }
-}
-
static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
{
uint16_t status = PASS;
@@ -508,9 +474,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
uint8_t maf_id, device_id;
int i;
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
/*
* Use read id method to get device ID and other params.
* For some NAND chips, controller can't report the correct
@@ -552,8 +515,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
find_valid_banks(denali);
- detect_partition_feature(denali);
-
/*
* If the user specified to override the default timings
* with a specific ONFI mode, we apply those changes here.
@@ -567,9 +528,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
static void denali_set_intr_modes(struct denali_nand_info *denali,
uint16_t INT_ENABLE)
{
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
if (INT_ENABLE)
iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
else
@@ -605,7 +563,6 @@ static void denali_irq_init(struct denali_nand_info *denali)
static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
{
denali_set_intr_modes(denali, false);
- free_irq(irqnum, denali);
}
static void denali_irq_enable(struct denali_nand_info *denali,
@@ -1437,9 +1394,6 @@ static struct nand_bbt_descr bbt_mirror_descr = {
/* initialize driver data structures */
static void denali_drv_init(struct denali_nand_info *denali)
{
- denali->idx = 0;
-
- /* setup interrupt handler */
/*
* the completion object will be used to notify
* the callee that the interrupt is done
@@ -1485,14 +1439,12 @@ int denali_init(struct denali_nand_info *denali)
denali_hw_init(denali);
denali_drv_init(denali);
- /*
- * denali_isr register is done after all the hardware
- * initilization is finished
- */
- if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
- DENALI_NAND_NAME, denali)) {
- pr_err("Spectra: Unable to allocate IRQ\n");
- return -ENODEV;
+ /* Request IRQ after all the hardware initialization is finished */
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
}
/* now that our ISR is registered, we can enable interrupts */
@@ -1510,10 +1462,9 @@ int denali_init(struct denali_nand_info *denali)
* this is the first stage in a two step process to register
* with the nand subsystem
*/
- if (nand_scan_ident(mtd, denali->max_banks, NULL)) {
- ret = -ENXIO;
+ ret = nand_scan_ident(mtd, denali->max_banks, NULL);
+ if (ret)
goto failed_req_irq;
- }
/* allocate the right size buffer now */
devm_kfree(denali->dev, denali->buf.buf);
@@ -1528,7 +1479,7 @@ int denali_init(struct denali_nand_info *denali)
/* Is 32-bit DMA supported? */
ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
if (ret) {
- pr_err("Spectra: no usable DMA configuration\n");
+ dev_err(denali->dev, "No usable DMA configuration\n");
goto failed_req_irq;
}
@@ -1536,7 +1487,7 @@ int denali_init(struct denali_nand_info *denali)
mtd->writesize + mtd->oobsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
- dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ dev_err(denali->dev, "Failed to map DMA buffer\n");
ret = -EIO;
goto failed_req_irq;
}
@@ -1547,16 +1498,16 @@ int denali_init(struct denali_nand_info *denali)
* the real pagesize and anything necessery
*/
denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
- denali->nand.chipsize <<= (denali->devnum - 1);
- denali->nand.page_shift += (denali->devnum - 1);
+ denali->nand.chipsize <<= denali->devnum - 1;
+ denali->nand.page_shift += denali->devnum - 1;
denali->nand.pagemask = (denali->nand.chipsize >>
denali->nand.page_shift) - 1;
- denali->nand.bbt_erase_shift += (denali->devnum - 1);
+ denali->nand.bbt_erase_shift += denali->devnum - 1;
denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
- denali->nand.chip_shift += (denali->devnum - 1);
- mtd->writesize <<= (denali->devnum - 1);
- mtd->oobsize <<= (denali->devnum - 1);
- mtd->erasesize <<= (denali->devnum - 1);
+ denali->nand.chip_shift += denali->devnum - 1;
+ mtd->writesize <<= denali->devnum - 1;
+ mtd->oobsize <<= denali->devnum - 1;
+ mtd->erasesize <<= denali->devnum - 1;
mtd->size = denali->nand.numchips * denali->nand.chipsize;
denali->bbtskipbytes *= denali->devnum;
@@ -1606,14 +1557,6 @@ int denali_init(struct denali_nand_info *denali)
denali->nand.ecc.bytes *= denali->devnum;
denali->nand.ecc.strength *= denali->devnum;
- /*
- * Let driver know the total blocks number and how many blocks
- * contained by each nand chip. blksperchip will help driver to
- * know how many blocks is taken by FW.
- */
- denali->totalblks = mtd->size >> denali->nand.phys_erase_shift;
- denali->blksperchip = denali->totalblks / denali->nand.numchips;
-
/* override the default read operations */
denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
denali->nand.ecc.read_page = denali_read_page;
@@ -1624,15 +1567,13 @@ int denali_init(struct denali_nand_info *denali)
denali->nand.ecc.write_oob = denali_write_oob;
denali->nand.erase = denali_erase;
- if (nand_scan_tail(mtd)) {
- ret = -ENXIO;
+ ret = nand_scan_tail(mtd);
+ if (ret)
goto failed_req_irq;
- }
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
- dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
- ret);
+ dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
goto failed_req_irq;
}
return 0;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index e7ab4866a5da..ea22191e8515 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -383,14 +383,6 @@
#define CLK_X 5
#define CLK_MULTI 4
-/* spectraswconfig.h */
-#define CMD_DMA 0
-
-#define SPECTRA_PARTITION_ID 0
-/**** Block Table and Reserved Block Parameters *****/
-#define SPECTRA_START_BLOCK 3
-#define NUM_FREE_BLOCKS_GATE 30
-
/* KBV - Updated to LNW scratch register address */
#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
#define SCRATCH_REG_SIZE 64
@@ -467,13 +459,9 @@ struct denali_nand_info {
spinlock_t irq_lock;
uint32_t irq_status;
int irq_debug_array[32];
- int idx;
int irq;
uint32_t devnum; /* represent how many nands connected */
- uint32_t fwblks; /* represent how many blocks FW used */
- uint32_t totalblks;
- uint32_t blksperchip;
uint32_t bbtskipbytes;
uint32_t max_banks;
};
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 0cb1e8d9fbfc..5607fcd3b8ed 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/slab.h>
#include "denali.h"
@@ -110,7 +109,7 @@ static int denali_dt_remove(struct platform_device *ofdev)
struct denali_dt *dt = platform_get_drvdata(ofdev);
denali_remove(&dt->denali);
- clk_disable(dt->clk);
+ clk_disable_unprepare(dt->clk);
return 0;
}
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index de31514df282..ac843238b77e 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/slab.h>
#include "denali.h"
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index d4f454a4b35e..4924b43977ef 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -926,8 +926,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/*
* Scan to find existence of the device
*/
- if (nand_scan_ident(mtd, 1, NULL)) {
- ret = -ENXIO;
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret) {
dev_err(&pdev->dev, "No NAND Device found!\n");
goto err_scan_ident;
}
@@ -992,10 +992,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
/* Second stage of scan to fill MTD data-structures */
- if (nand_scan_tail(mtd)) {
- ret = -ENXIO;
+ ret = nand_scan_tail(mtd);
+ if (ret)
goto err_probe;
- }
/*
* The partition information can is accessed by (in the same precedence)
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 6317f6836022..0d24857469ab 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -286,10 +286,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
- if (nand_scan(mtd, 1)) {
- ret = -ENXIO;
+ ret = nand_scan(mtd, 1);
+ if (ret)
goto err_wp;
- }
if (gpiomtd->plat.adjust_parts)
gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 9432546f4cd4..e40364eeb556 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -774,10 +774,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
}
ret = nand_scan_ident(mtd, max_chips, NULL);
- if (ret) {
- ret = -ENODEV;
+ if (ret)
goto err_res;
- }
host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
&host->dma_buffer, GFP_KERNEL);
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 852388171f20..5553a5d9efd1 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -747,10 +747,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* Scan to find existance of the device and
* Get the type of NAND device SMALL block or LARGE block
*/
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan_ident(mtd, 1, NULL);
+ if (res)
goto err_exit3;
- }
host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dma_buf) {
@@ -793,10 +792,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* Fills out all the uninitialized function pointers with the defaults
* And scans for a bad block table if appropriate.
*/
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
+ res = nand_scan_tail(mtd);
+ if (res)
goto err_exit4;
- }
mtd->name = DRV_NAME;
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 8d3edc34958e..53bafe23ab39 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -894,10 +894,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/* Find NAND device */
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan_ident(mtd, 1, NULL);
+ if (res)
goto err_exit3;
- }
/* OOB and ECC CPU and DMA work areas */
host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
@@ -929,10 +928,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/*
* Fills out all the uninitialized function pointers with the defaults
*/
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
+ res = nand_scan_tail(mtd);
+ if (res)
goto err_exit3;
- }
mtd->name = "nxp_lpc3220_slc";
res = mtd_device_register(mtd, host->ncfg->parts,
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 7eacb2f545f5..6d6eaed2d20c 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -777,9 +777,9 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
/* Detect NAND chips */
- if (nand_scan(mtd, be32_to_cpup(chips_no))) {
+ retval = nand_scan(mtd, be32_to_cpup(chips_no));
+ if (retval) {
dev_err(dev, "NAND Flash not found !\n");
- retval = -ENXIO;
goto error;
}
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index 5223a2182ee4..6c3eed3c2094 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -1297,7 +1297,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
ret = nand_scan_ident(mtd, nsels, NULL);
if (ret)
- return -ENODEV;
+ return ret;
/* store bbt magic in page, cause OOB is not protected */
if (nand->bbt_options & NAND_BBT_USE_FLASH)
@@ -1323,7 +1323,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
ret = nand_scan_tail(mtd);
if (ret)
- return -ENODEV;
+ return ret;
ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
if (ret) {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index d7f724b24fd7..61ca020c5272 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1747,10 +1747,9 @@ static int mxcnd_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
- err = -ENXIO;
+ err = nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL);
+ if (err)
goto escan;
- }
switch (this->ecc.mode) {
case NAND_ECC_HW:
@@ -1808,10 +1807,9 @@ static int mxcnd_probe(struct platform_device *pdev)
}
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
+ err = nand_scan_tail(mtd);
+ if (err)
goto escan;
- }
/* Register the partitions */
mtd_device_parse_register(mtd, part_probes,
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3bde96a3f7bf..ec1c28aaaf23 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -709,6 +709,25 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
nand_wait_ready(mtd);
}
+static void nand_ccs_delay(struct nand_chip *chip)
+{
+ /*
+ * The controller already takes care of waiting for tCCS when the RNDIN
+ * or RNDOUT command is sent, return directly.
+ */
+ if (!(chip->options & NAND_WAIT_TCCS))
+ return;
+
+ /*
+ * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
+ * (which should be safe for all NANDs).
+ */
+ if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
+ ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+ else
+ ndelay(500);
+}
+
/**
* nand_command_lp - [DEFAULT] Send command to NAND large page device
* @mtd: MTD device structure
@@ -773,10 +792,13 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
- case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
return;
+ case NAND_CMD_RNDIN:
+ nand_ccs_delay(chip);
+ return;
+
case NAND_CMD_RESET:
if (chip->dev_ready)
break;
@@ -795,6 +817,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
+
+ nand_ccs_delay(chip);
return;
case NAND_CMD_READ0:
@@ -1946,7 +1970,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
__func__, buf);
read_retry:
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ if (nand_standard_page_accessors(&chip->ecc))
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
/*
* Now read the page into the buffer. Absent an error,
@@ -2634,7 +2659,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
else
subpage = 0;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ if (nand_standard_page_accessors(&chip->ecc))
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
@@ -2657,7 +2683,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (!cached || !NAND_HAS_CACHEPROG(chip)) {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ if (nand_standard_page_accessors(&chip->ecc))
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
/*
* See if operation failed and additional status checks are
@@ -3985,10 +4012,9 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
-static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
- struct nand_chip *chip,
- int *maf_id, int *dev_id,
- struct nand_flash_dev *type)
+static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
+ int *maf_id, int *dev_id,
+ struct nand_flash_dev *type)
{
int busw;
int i, maf_idx;
@@ -4026,7 +4052,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
*maf_id, *dev_id, id_data[0], id_data[1]);
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
if (!type)
@@ -4053,7 +4079,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
}
if (!type->name)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (!mtd->name)
mtd->name = type->name;
@@ -4098,7 +4124,7 @@ ident_done:
pr_warn("bus width %d instead %d bit\n",
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
busw ? 16 : 8);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
nand_decode_bbm_options(mtd, chip, id_data);
@@ -4140,7 +4166,7 @@ ident_done:
pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
- return type;
+ return 0;
}
static const char * const nand_ecc_modes[] = {
@@ -4306,7 +4332,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
{
int i, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_flash_dev *type;
int ret;
ret = nand_dt_init(chip);
@@ -4329,14 +4354,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, &nand_maf_id,
- &nand_dev_id, table);
-
- if (IS_ERR(type)) {
+ ret = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, table);
+ if (ret) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
pr_warn("No NAND device found\n");
chip->select_chip(mtd, -1);
- return PTR_ERR(type);
+ return ret;
}
/* Initialize the ->data_interface field. */
@@ -4515,6 +4538,26 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
}
+static bool invalid_ecc_page_accessors(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (nand_standard_page_accessors(ecc))
+ return false;
+
+ /*
+ * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
+ * controller driver implements all the page accessors because
+ * default helpers are not suitable when the core does not
+ * send the READ0/PAGEPROG commands.
+ */
+ return (!ecc->read_page || !ecc->write_page ||
+ !ecc->read_page_raw || !ecc->write_page_raw ||
+ (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
+ (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
+ ecc->hwctl && ecc->calculate));
+}
+
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -4535,6 +4578,11 @@ int nand_scan_tail(struct mtd_info *mtd)
!(chip->bbt_options & NAND_BBT_USE_FLASH)))
return -EINVAL;
+ if (invalid_ecc_page_accessors(chip)) {
+ pr_err("Invalid ECC page accessors setup\n");
+ return -EINVAL;
+ }
+
if (!(chip->options & NAND_OWN_BUFFERS)) {
nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
+ mtd->oobsize * 3, GFP_KERNEL);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 2af9869a115e..b3a332f37e14 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -36,6 +36,9 @@ struct nand_flash_dev nand_flash_ids[] = {
{"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG2S0H 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
{"TC58NVG3S0F 8G 3.3V 8-bit",
{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 13a587407be3..f06312df3669 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -18,6 +18,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 20000,
.tALS_min = 50000,
@@ -58,6 +60,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 10000,
.tALS_min = 25000,
@@ -98,6 +102,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 10000,
.tALS_min = 15000,
@@ -138,6 +144,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
@@ -178,6 +186,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
@@ -218,6 +228,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
@@ -290,10 +302,22 @@ int onfi_init_data_interface(struct nand_chip *chip,
*iface = onfi_sdr_timings[timing_mode];
/*
- * TODO: initialize timings that cannot be deduced from timing mode:
+ * Initialize timings that cannot be deduced from timing mode:
* tR, tPROG, tCCS, ...
* These information are part of the ONFI parameter page.
*/
+ if (chip->onfi_version) {
+ struct nand_onfi_params *params = &chip->onfi_params;
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
+ timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
+ timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
+ }
return 0;
}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1eb934414eb5..c84742671a5f 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -525,24 +525,20 @@ static int nandsim_debugfs_create(struct nandsim *dev)
{
struct nandsim_debug_info *dbg = &dev->dbg;
struct dentry *dent;
- int err;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
dent = debugfs_create_dir("nandsim", NULL);
- if (IS_ERR_OR_NULL(dent)) {
- int err = dent ? -ENODEV : PTR_ERR(dent);
-
- NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
- err);
- return err;
+ if (!dent) {
+ NS_ERR("cannot create \"nandsim\" debugfs directory\n");
+ return -ENODEV;
}
dbg->dfs_root = dent;
dent = debugfs_create_file("wear_report", S_IRUSR,
dbg->dfs_root, dev, &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
+ if (!dent)
goto out_remove;
dbg->dfs_wear_report = dent;
@@ -550,8 +546,7 @@ static int nandsim_debugfs_create(struct nandsim *dev)
out_remove:
debugfs_remove_recursive(dbg->dfs_root);
- err = dent ? PTR_ERR(dent) : -ENODEV;
- return err;
+ return -ENODEV;
}
/**
@@ -2313,8 +2308,6 @@ static int __init ns_init_module(void)
retval = nand_scan_ident(nsmtd, 1, NULL);
if (retval) {
NS_ERR("cannot scan NAND Simulator device\n");
- if (retval > 0)
- retval = -ENXIO;
goto error;
}
@@ -2350,8 +2343,6 @@ static int __init ns_init_module(void)
retval = nand_scan_tail(nsmtd);
if (retval) {
NS_ERR("can't register NAND Simulator\n");
- if (retval > 0)
- retval = -ENXIO;
goto error;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5513bfd9cdc9..2a52101120d4 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1895,10 +1895,10 @@ static int omap_nand_probe(struct platform_device *pdev)
/* scan NAND device connected to chip controller */
nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
- if (nand_scan_ident(mtd, 1, NULL)) {
+ err = nand_scan_ident(mtd, 1, NULL);
+ if (err) {
dev_err(&info->pdev->dev,
"scan failed, may be bus-width mismatch\n");
- err = -ENXIO;
goto return_error;
}
@@ -2154,10 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
scan_tail:
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
+ err = nand_scan_tail(mtd);
+ if (err)
goto return_error;
- }
if (dev->of_node)
mtd_device_register(mtd, NULL, 0);
@@ -2197,6 +2196,7 @@ static const struct of_device_id omap_nand_ids[] = {
{ .compatible = "ti,omap2-nand", },
{},
};
+MODULE_DEVICE_TABLE(of, omap_nand_ids);
static struct platform_driver omap_nand_driver = {
.probe = omap_nand_probe,
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 40a7c4a2cf0d..4a91c5d000be 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -155,10 +155,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
clk_put(clk);
}
- if (nand_scan(mtd, 1)) {
- ret = -ENXIO;
+ ret = nand_scan(mtd, 1);
+ if (ret)
goto no_dev;
- }
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c
new file mode 100644
index 000000000000..3e3bf3b364d2
--- /dev/null
+++ b/drivers/mtd/nand/oxnas_nand.c
@@ -0,0 +1,195 @@
+/*
+ * Oxford Semiconductor OXNAS NAND driver
+
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Heavily based on plat_nand.c :
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+/* Nand commands */
+#define OXNAS_NAND_CMD_ALE BIT(18)
+#define OXNAS_NAND_CMD_CLE BIT(19)
+
+#define OXNAS_NAND_MAX_CHIPS 1
+
+struct oxnas_nand_ctrl {
+ struct nand_hw_control base;
+ void __iomem *io_base;
+ struct clk *clk;
+ struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
+};
+
+static uint8_t oxnas_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ return readb(oxnas->io_base);
+}
+
+static void oxnas_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ ioread8_rep(oxnas->io_base, buf, len);
+}
+
+static void oxnas_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ iowrite8_rep(oxnas->io_base, buf, len);
+}
+
+/* Single CS command control */
+static void oxnas_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE);
+ else if (ctrl & NAND_ALE)
+ writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE);
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int oxnas_nand_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *nand_np;
+ struct oxnas_nand_ctrl *oxnas;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *res;
+ int nchips = 0;
+ int count = 0;
+ int err = 0;
+
+ /* Allocate memory for the device structure (and zero it) */
+ oxnas = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!oxnas)
+ return -ENOMEM;
+
+ nand_hw_control_init(&oxnas->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ oxnas->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oxnas->io_base))
+ return PTR_ERR(oxnas->io_base);
+
+ oxnas->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(oxnas->clk))
+ oxnas->clk = NULL;
+
+ /* Only a single chip node is supported */
+ count = of_get_child_count(np);
+ if (count > 1)
+ return -EINVAL;
+
+ clk_prepare_enable(oxnas->clk);
+ device_reset_optional(&pdev->dev);
+
+ for_each_child_of_node(np, nand_np) {
+ chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->controller = &oxnas->base;
+
+ nand_set_flash_node(chip, nand_np);
+ nand_set_controller_data(chip, oxnas);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = &pdev->dev;
+ mtd->priv = chip;
+
+ chip->cmd_ctrl = oxnas_nand_cmd_ctrl;
+ chip->read_buf = oxnas_nand_read_buf;
+ chip->read_byte = oxnas_nand_read_byte;
+ chip->write_buf = oxnas_nand_write_buf;
+ chip->chip_delay = 30;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(mtd, 1);
+ if (err)
+ return err;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err) {
+ nand_release(mtd);
+ return err;
+ }
+
+ oxnas->chips[nchips] = chip;
+ ++nchips;
+ }
+
+ /* Exit if no chips found */
+ if (!nchips)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, oxnas);
+
+ return 0;
+}
+
+static int oxnas_nand_remove(struct platform_device *pdev)
+{
+ struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
+
+ if (oxnas->chips[0])
+ nand_release(nand_to_mtd(oxnas->chips[0]));
+
+ clk_disable_unprepare(oxnas->clk);
+
+ return 0;
+}
+
+static const struct of_device_id oxnas_nand_match[] = {
+ { .compatible = "oxsemi,ox820-nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, oxnas_nand_match);
+
+static struct platform_driver oxnas_nand_driver = {
+ .probe = oxnas_nand_probe,
+ .remove = oxnas_nand_remove,
+ .driver = {
+ .name = "oxnas_nand",
+ .of_match_table = oxnas_nand_match,
+ },
+};
+
+module_platform_driver(oxnas_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Oxnas NAND driver");
+MODULE_ALIAS("platform:oxnas_nand");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 5de7591b0510..074b8b01289e 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -156,10 +156,9 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
- if (nand_scan(pasemi_nand_mtd, 1)) {
- err = -ENXIO;
+ err = nand_scan(pasemi_nand_mtd, 1);
+ if (err)
goto out_lpc;
- }
if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
dev_err(dev, "Unable to register MTD device\n");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 415a53a0deeb..791de3e4bbb6 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -86,10 +86,9 @@ static int plat_nand_probe(struct platform_device *pdev)
}
/* Scan to find existence of the device */
- if (nand_scan(mtd, pdata->chip.nr_chips)) {
- err = -ENXIO;
+ err = nand_scan(mtd, pdata->chip.nr_chips);
+ if (err)
goto out;
- }
part_types = pdata->chip.part_probe_types;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index b121bf4ed73a..649ba8200832 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1680,8 +1680,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->ecc.strength = pdata->ecc_strength;
chip->ecc.size = pdata->ecc_step_size;
- if (nand_scan_ident(mtd, 1, NULL))
- return -ENODEV;
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
if (!pdata->keep_config) {
ret = pxa3xx_nand_init(host);
@@ -1774,8 +1775,11 @@ static int alloc_nand_resource(struct platform_device *pdev)
int ret, irq, cs;
pdata = dev_get_platdata(&pdev->dev);
- if (pdata->num_cs <= 0)
+ if (pdata->num_cs <= 0) {
+ dev_err(&pdev->dev, "invalid number of chip selects\n");
return -ENODEV;
+ }
+
info = devm_kzalloc(&pdev->dev,
sizeof(*info) + sizeof(*host) * pdata->num_cs,
GFP_KERNEL);
@@ -1813,8 +1817,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
nand_hw_control_init(chip->controller);
info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed to get nand clock\n");
- return PTR_ERR(info->clk);
+ ret = PTR_ERR(info->clk);
+ dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
+ return ret;
}
ret = clk_prepare_enable(info->clk);
if (ret < 0)
@@ -1842,6 +1847,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(info->mmio_base)) {
ret = PTR_ERR(info->mmio_base);
+ dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
goto fail_disable_clk;
}
info->mmio_phys = r->start;
@@ -1861,7 +1867,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
pxa3xx_nand_irq_thread, IRQF_ONESHOT,
pdev->name, info);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
goto fail_free_buf;
}
@@ -1960,10 +1966,8 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
ret = alloc_nand_resource(pdev);
- if (ret) {
- dev_err(&pdev->dev, "alloc nand resource failed\n");
+ if (ret)
return ret;
- }
info = platform_get_drvdata(pdev);
probe_success = 0;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d459c19d78de..f0b030d44f71 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -39,6 +39,8 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -185,6 +187,22 @@ struct s3c2410_nand_info {
#endif
};
+struct s3c24XX_nand_devtype_data {
+ enum s3c_cpu_type type;
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
+ .type = TYPE_S3C2410,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
+ .type = TYPE_S3C2412,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
+ .type = TYPE_S3C2440,
+};
+
/* conversion functions */
static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
@@ -497,7 +515,6 @@ static int s3c2412_nand_devready(struct mtd_info *mtd)
/* ECC handling functions */
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
@@ -649,7 +666,6 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
return 0;
}
-#endif
/* over-ride the standard functions for a little more speed. We can
* use read/write block to move the data buffers to/from the controller
@@ -796,6 +812,30 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
return -ENODEV;
}
+static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct s3c2410_platform_nand *pdata = info->platform;
+ const struct nand_sdr_timings *timings;
+ int tacls;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ tacls = timings->tCLS_min - timings->tWP_min;
+ if (tacls < 0)
+ tacls = 0;
+
+ pdata->tacls = DIV_ROUND_UP(tacls, 1000);
+ pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
+ pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
+
+ return s3c2410_nand_setrate(info);
+}
+
/**
* s3c2410_nand_init_chip - initialise a single instance of an chip
* @info: The base NAND controller the chip is on.
@@ -810,9 +850,12 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
{
+ struct device_node *np = info->device->of_node;
struct nand_chip *chip = &nmtd->chip;
void __iomem *regs = info->regs;
+ nand_set_flash_node(chip, set->of_node);
+
chip->write_buf = s3c2410_nand_write_buf;
chip->read_buf = s3c2410_nand_read_buf;
chip->select_chip = s3c2410_nand_select_chip;
@@ -821,6 +864,13 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->options = set->options;
chip->controller = &info->controller;
+ /*
+ * let's keep behavior unchanged for legacy boards booting via pdata and
+ * auto-detect timings only when booting with a device tree.
+ */
+ if (np)
+ chip->setup_data_interface = s3c2410_nand_setup_data_interface;
+
switch (info->cpu_type) {
case TYPE_S3C2410:
chip->IO_ADDR_W = regs + S3C2410_NFDATA;
@@ -858,58 +908,14 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->info = info;
nmtd->set = set;
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.strength = 1;
+ chip->ecc.mode = info->platform->ecc_mode;
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2412:
- chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
- chip->ecc.calculate = s3c2412_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2440:
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
- break;
- }
-#else
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
-#endif
-
- if (set->disable_ecc)
- chip->ecc.mode = NAND_ECC_NONE;
-
- switch (chip->ecc.mode) {
- case NAND_ECC_NONE:
- dev_info(info->device, "NAND ECC disabled\n");
- break;
- case NAND_ECC_SOFT:
- dev_info(info->device, "NAND soft ECC\n");
- break;
- case NAND_ECC_HW:
- dev_info(info->device, "NAND hardware ECC\n");
- break;
- default:
- dev_info(info->device, "NAND ECC UNKNOWN\n");
- break;
- }
-
- /* If you use u-boot BBT creation code, specifying this flag will
- * let the kernel fish out the BBT from the NAND, and also skip the
- * full NAND scan that can take 1/2s or so. Little things... */
- if (set->flash_bbt) {
+ /*
+ * If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND.
+ */
+ if (set->flash_bbt)
chip->bbt_options |= NAND_BBT_USE_FLASH;
- chip->options |= NAND_SKIP_BBTSCAN;
- }
}
/**
@@ -923,28 +929,146 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
*
* The internal state is currently limited to the ECC state information.
*/
-static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *nmtd)
+static int s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd)
{
struct nand_chip *chip = &nmtd->chip;
- dev_dbg(info->device, "chip %p => page shift %d\n",
- chip, chip->page_shift);
+ switch (chip->ecc.mode) {
- if (chip->ecc.mode != NAND_ECC_HW)
- return;
+ case NAND_ECC_NONE:
+ dev_info(info->device, "ECC disabled\n");
+ break;
+
+ case NAND_ECC_SOFT:
+ /*
+ * This driver expects Hamming based ECC when ecc_mode is set
+ * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+ * avoid adding an extra ecc_algo field to
+ * s3c2410_platform_nand.
+ */
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ dev_info(info->device, "soft ECC\n");
+ break;
+
+ case NAND_ECC_HW:
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+ }
+
+ dev_dbg(info->device, "chip %p => page shift %d\n",
+ chip, chip->page_shift);
/* change the behaviour depending on whether we are using
* the large or small page nand device */
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ mtd_set_ooblayout(nand_to_mtd(chip),
+ &s3c2410_ooblayout_ops);
+ }
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops);
+ dev_info(info->device, "hardware ECC\n");
+ break;
+
+ default:
+ dev_err(info->device, "invalid ECC mode!\n");
+ return -EINVAL;
}
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static const struct of_device_id s3c24xx_nand_dt_ids[] = {
+ {
+ .compatible = "samsung,s3c2410-nand",
+ .data = &s3c2410_nand_devtype_data,
+ }, {
+ /* also compatible with s3c6400 */
+ .compatible = "samsung,s3c2412-nand",
+ .data = &s3c2412_nand_devtype_data,
+ }, {
+ .compatible = "samsung,s3c2440-nand",
+ .data = &s3c2440_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
+
+static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
+{
+ const struct s3c24XX_nand_devtype_data *devtype_data;
+ struct s3c2410_platform_nand *pdata;
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct s3c2410_nand_set *sets;
+
+ devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!devtype_data)
+ return -ENODEV;
+
+ info->cpu_type = devtype_data->type;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdev->dev.platform_data = pdata;
+
+ pdata->nr_sets = of_get_child_count(np);
+ if (!pdata->nr_sets)
+ return 0;
+
+ sets = devm_kzalloc(&pdev->dev, sizeof(*sets) * pdata->nr_sets,
+ GFP_KERNEL);
+ if (!sets)
+ return -ENOMEM;
+
+ pdata->sets = sets;
+
+ for_each_available_child_of_node(np, child) {
+ sets->name = (char *)child->name;
+ sets->of_node = child;
+ sets->nr_chips = 1;
+
+ of_node_get(child);
+
+ sets++;
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+
+ info->cpu_type = platform_get_device_id(pdev)->driver_data;
+
+ return 0;
}
/* s3c24xx_nand_probe
@@ -956,8 +1080,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
*/
static int s3c24xx_nand_probe(struct platform_device *pdev)
{
- struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
- enum s3c_cpu_type cpu_type;
+ struct s3c2410_platform_nand *plat;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -967,8 +1090,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
int nr_sets;
int setno;
- cpu_type = platform_get_device_id(pdev)->driver_data;
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
@@ -990,6 +1111,16 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+ if (pdev->dev.of_node)
+ err = s3c24xx_nand_probe_dt(pdev);
+ else
+ err = s3c24xx_nand_probe_pdata(pdev);
+
+ if (err)
+ goto exit_error;
+
+ plat = to_nand_plat(pdev);
+
/* allocate and map the resource */
/* currently we assume we have the one resource */
@@ -998,7 +1129,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
info->device = &pdev->dev;
info->platform = plat;
- info->cpu_type = cpu_type;
info->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->regs)) {
@@ -1008,12 +1138,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
- /* initialise the hardware */
-
- err = s3c2410_nand_inithw(info);
- if (err != 0)
- goto exit_error;
-
sets = (plat != NULL) ? plat->sets : NULL;
nr_sets = (plat != NULL) ? plat->nr_sets : 1;
@@ -1046,7 +1170,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
NULL);
if (nmtd->scan_res == 0) {
- s3c2410_nand_update_chip(info, nmtd);
+ err = s3c2410_nand_update_chip(info, nmtd);
+ if (err < 0)
+ goto exit_error;
nand_scan_tail(mtd);
s3c2410_nand_add_partition(info, nmtd, sets);
}
@@ -1055,6 +1181,11 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
sets++;
}
+ /* initialise the hardware */
+ err = s3c2410_nand_inithw(info);
+ if (err != 0)
+ goto exit_error;
+
err = s3c2410_nand_cpufreq_register(info);
if (err < 0) {
dev_err(&pdev->dev, "failed to init cpufreq support\n");
@@ -1155,6 +1286,7 @@ static struct platform_driver s3c24xx_nand_driver = {
.id_table = s3c24xx_driver_ids,
.driver = {
.name = "s3c24xx-nand",
+ .of_match_table = s3c24xx_nand_dt_ids,
},
};
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 888fd314c62a..72369bd079af 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -187,17 +187,9 @@ static int socrates_nand_probe(struct platform_device *ofdev)
dev_set_drvdata(&ofdev->dev, host);
- /* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan(mtd, 1);
+ if (res)
goto out;
- }
-
- /* second phase scan */
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
- goto out;
- }
res = mtd_device_register(mtd, NULL, 0);
if (!res)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 8b8470c4e6d0..e40482a65de6 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -145,6 +145,7 @@
#define NFC_ECC_PIPELINE BIT(3)
#define NFC_ECC_EXCEPTION BIT(4)
#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
+#define NFC_ECC_BLOCK_512 BIT(5)
#define NFC_RANDOM_EN BIT(9)
#define NFC_RANDOM_DIRECTION BIT(10)
#define NFC_ECC_MODE_MSK GENMASK(15, 12)
@@ -817,6 +818,9 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
NFC_ECC_PIPELINE;
+ if (nand->ecc.size == 512)
+ ecc_ctl |= NFC_ECC_BLOCK_512;
+
writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
}
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
new file mode 100644
index 000000000000..28c7f474be77
--- /dev/null
+++ b/drivers/mtd/nand/tango_nand.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (C) 2016 Sigma Designs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+/* Offsets relative to chip->base */
+#define PBUS_CMD 0
+#define PBUS_ADDR 4
+#define PBUS_DATA 8
+
+/* Offsets relative to reg_base */
+#define NFC_STATUS 0x00
+#define NFC_FLASH_CMD 0x04
+#define NFC_DEVICE_CFG 0x08
+#define NFC_TIMING1 0x0c
+#define NFC_TIMING2 0x10
+#define NFC_XFER_CFG 0x14
+#define NFC_PKT_0_CFG 0x18
+#define NFC_PKT_N_CFG 0x1c
+#define NFC_BB_CFG 0x20
+#define NFC_ADDR_PAGE 0x24
+#define NFC_ADDR_OFFSET 0x28
+#define NFC_XFER_STATUS 0x2c
+
+/* NFC_STATUS values */
+#define CMD_READY BIT(31)
+
+/* NFC_FLASH_CMD values */
+#define NFC_READ 1
+#define NFC_WRITE 2
+
+/* NFC_XFER_STATUS values */
+#define PAGE_IS_EMPTY BIT(16)
+
+/* Offsets relative to mem_base */
+#define METADATA 0x000
+#define ERROR_REPORT 0x1c0
+
+/*
+ * Error reports are split in two bytes:
+ * byte 0 for the first packet in the page (PKT_0)
+ * byte 1 for other packets in the page (PKT_N, for N > 0)
+ * ERR_COUNT_PKT_N is the max error count over all but the first packet.
+ */
+#define DECODE_OK_PKT_0(v) ((v) & BIT(7))
+#define DECODE_OK_PKT_N(v) ((v) & BIT(15))
+#define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
+#define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
+
+/* Offsets relative to pbus_base */
+#define PBUS_CS_CTRL 0x83c
+#define PBUS_PAD_MODE 0x8f0
+
+/* PBUS_CS_CTRL values */
+#define PBUS_IORDY BIT(31)
+
+/*
+ * PBUS_PAD_MODE values
+ * In raw mode, the driver communicates directly with the NAND chips.
+ * In NFC mode, the NAND Flash controller manages the communication.
+ * We use NFC mode for read and write; raw mode for everything else.
+ */
+#define MODE_RAW 0
+#define MODE_NFC BIT(31)
+
+#define METADATA_SIZE 4
+#define BBM_SIZE 6
+#define FIELD_ORDER 15
+
+#define MAX_CS 4
+
+struct tango_nfc {
+ struct nand_hw_control hw;
+ void __iomem *reg_base;
+ void __iomem *mem_base;
+ void __iomem *pbus_base;
+ struct tango_chip *chips[MAX_CS];
+ struct dma_chan *chan;
+ int freq_kHz;
+};
+
+#define to_tango_nfc(ptr) container_of(ptr, struct tango_nfc, hw)
+
+struct tango_chip {
+ struct nand_chip nand_chip;
+ void __iomem *base;
+ u32 timing1;
+ u32 timing2;
+ u32 xfer_cfg;
+ u32 pkt_0_cfg;
+ u32 pkt_n_cfg;
+ u32 bb_cfg;
+};
+
+#define to_tango_chip(ptr) container_of(ptr, struct tango_chip, nand_chip)
+
+#define XFER_CFG(cs, page_count, steps, metadata_size) \
+ ((cs) << 24 | (page_count) << 16 | (steps) << 8 | (metadata_size))
+
+#define PKT_CFG(size, strength) ((size) << 16 | (strength))
+
+#define BB_CFG(bb_offset, bb_size) ((bb_offset) << 16 | (bb_size))
+
+#define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
+
+static void tango_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ if (ctrl & NAND_CLE)
+ writeb_relaxed(dat, tchip->base + PBUS_CMD);
+
+ if (ctrl & NAND_ALE)
+ writeb_relaxed(dat, tchip->base + PBUS_ADDR);
+}
+
+static int tango_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+
+ return readl_relaxed(nfc->pbus_base + PBUS_CS_CTRL) & PBUS_IORDY;
+}
+
+static u8 tango_read_byte(struct mtd_info *mtd)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ return readb_relaxed(tchip->base + PBUS_DATA);
+}
+
+static void tango_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ ioread8_rep(tchip->base + PBUS_DATA, buf, len);
+}
+
+static void tango_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ iowrite8_rep(tchip->base + PBUS_DATA, buf, len);
+}
+
+static void tango_select_chip(struct mtd_info *mtd, int idx)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ struct tango_chip *tchip = to_tango_chip(chip);
+
+ if (idx < 0)
+ return; /* No "chip unselect" function */
+
+ writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
+ writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
+ writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
+ writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG);
+ writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG);
+ writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
+}
+
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int check_erased_page(struct nand_chip *chip, u8 *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *meta = chip->oob_poi + BBM_SIZE;
+ u8 *ecc = chip->oob_poi + BBM_SIZE + METADATA_SIZE;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int i, res, meta_len, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; ++i) {
+ meta_len = i ? 0 : METADATA_SIZE;
+ res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+ meta, meta_len,
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+ ecc += ecc_size;
+ }
+
+ return bitflips;
+}
+
+static int decode_error_report(struct tango_nfc *nfc)
+{
+ u32 status, res;
+
+ status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
+ if (status & PAGE_IS_EMPTY)
+ return 0;
+
+ res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
+
+ if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
+ return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+
+ return -EBADMSG;
+}
+
+static void tango_dma_callback(void *arg)
+{
+ complete(arg);
+}
+
+static int do_dma(struct tango_nfc *nfc, int dir, int cmd, const void *buf,
+ int len, int page)
+{
+ void __iomem *addr = nfc->reg_base + NFC_STATUS;
+ struct dma_chan *chan = nfc->chan;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist sg;
+ struct completion tx_done;
+ int err = -EIO;
+ u32 res, val;
+
+ sg_init_one(&sg, buf, len);
+ if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1)
+ return -EIO;
+
+ desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir, DMA_PREP_INTERRUPT);
+ if (!desc)
+ goto dma_unmap;
+
+ desc->callback = tango_dma_callback;
+ desc->callback_param = &tx_done;
+ init_completion(&tx_done);
+
+ writel_relaxed(MODE_NFC, nfc->pbus_base + PBUS_PAD_MODE);
+
+ writel_relaxed(page, nfc->reg_base + NFC_ADDR_PAGE);
+ writel_relaxed(0, nfc->reg_base + NFC_ADDR_OFFSET);
+ writel_relaxed(cmd, nfc->reg_base + NFC_FLASH_CMD);
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ res = wait_for_completion_timeout(&tx_done, HZ);
+ if (res > 0)
+ err = readl_poll_timeout(addr, val, val & CMD_READY, 0, 1000);
+
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
+dma_unmap:
+ dma_unmap_sg(chan->device->dev, &sg, 1, dir);
+
+ return err;
+}
+
+static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ int err, res, len = mtd->writesize;
+
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ err = do_dma(nfc, DMA_FROM_DEVICE, NFC_READ, buf, len, page);
+ if (err)
+ return err;
+
+ res = decode_error_report(nfc);
+ if (res < 0) {
+ chip->ecc.read_oob_raw(mtd, chip, page);
+ res = check_erased_page(chip, buf);
+ }
+
+ return res;
+}
+
+static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page)
+{
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ int err, len = mtd->writesize;
+
+ /* Calling tango_write_oob() would send PAGEPROG twice */
+ if (oob_required)
+ return -ENOTSUPP;
+
+ writel_relaxed(0xffffffff, nfc->mem_base + METADATA);
+ err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ *pos += len;
+
+ if (!*buf) {
+ /* skip over "len" bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+ } else {
+ tango_read_buf(mtd, *buf, len);
+ *buf += len;
+ }
+}
+
+static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ *pos += len;
+
+ if (!*buf) {
+ /* skip over "len" bytes */
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, *pos, -1);
+ } else {
+ tango_write_buf(mtd, *buf, len);
+ *buf += len;
+ }
+}
+
+/*
+ * Physical page layout (not drawn to scale)
+ *
+ * NB: Bad Block Marker area splits PKT_N in two (N1, N2).
+ *
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ * | M | PKT_0 | ECC_0 | ... | N1 | BBM | N2 | ECC_N |
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ *
+ * Logical page layout:
+ *
+ * +-----+---+-------+-----+-------+
+ * oob = | BBM | M | ECC_0 | ... | ECC_N |
+ * +-----+---+-------+-----+-------+
+ *
+ * +-----------------+-----+-----------------+
+ * buf = | PKT_0 | ... | PKT_N |
+ * +-----------------+-----+-----------------+
+ */
+static void raw_read(struct nand_chip *chip, u8 *buf, u8 *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *oob_orig = oob;
+ const int page_size = mtd->writesize;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int pos = 0; /* position within physical page */
+ int rem = page_size; /* bytes remaining until BBM area */
+
+ if (oob)
+ oob += BBM_SIZE;
+
+ aux_read(chip, &oob, METADATA_SIZE, &pos);
+
+ while (rem > pkt_size) {
+ aux_read(chip, &buf, pkt_size, &pos);
+ aux_read(chip, &oob, ecc_size, &pos);
+ rem = page_size - pos;
+ }
+
+ aux_read(chip, &buf, rem, &pos);
+ aux_read(chip, &oob_orig, BBM_SIZE, &pos);
+ aux_read(chip, &buf, pkt_size - rem, &pos);
+ aux_read(chip, &oob, ecc_size, &pos);
+}
+
+static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *oob_orig = oob;
+ const int page_size = mtd->writesize;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int pos = 0; /* position within physical page */
+ int rem = page_size; /* bytes remaining until BBM area */
+
+ if (oob)
+ oob += BBM_SIZE;
+
+ aux_write(chip, &oob, METADATA_SIZE, &pos);
+
+ while (rem > pkt_size) {
+ aux_write(chip, &buf, pkt_size, &pos);
+ aux_write(chip, &oob, ecc_size, &pos);
+ rem = page_size - pos;
+ }
+
+ aux_write(chip, &buf, rem, &pos);
+ aux_write(chip, &oob_orig, BBM_SIZE, &pos);
+ aux_write(chip, &buf, pkt_size - rem, &pos);
+ aux_write(chip, &oob, ecc_size, &pos);
+}
+
+static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ raw_read(chip, buf, chip->oob_poi);
+ return 0;
+}
+
+static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ raw_write(chip, buf, chip->oob_poi);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ return 0;
+}
+
+static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ raw_read(chip, NULL, chip->oob_poi);
+ return 0;
+}
+
+static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ raw_write(chip, NULL, chip->oob_poi);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ chip->waitfunc(mtd, chip);
+ return 0;
+}
+
+static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (idx >= ecc->steps)
+ return -ERANGE;
+
+ res->offset = BBM_SIZE + METADATA_SIZE + ecc->bytes * idx;
+ res->length = ecc->bytes;
+
+ return 0;
+}
+
+static int oob_free(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+ return -ERANGE; /* no free space in spare area */
+}
+
+static const struct mtd_ooblayout_ops tango_nand_ooblayout_ops = {
+ .ecc = oob_ecc,
+ .free = oob_free,
+};
+
+static u32 to_ticks(int kHz, int ps)
+{
+ return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC);
+}
+
+static int tango_set_timings(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only)
+{
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ struct tango_chip *tchip = to_tango_chip(chip);
+ u32 Trdy, Textw, Twc, Twpw, Tacc, Thold, Trpw, Textr;
+ int kHz = nfc->freq_kHz;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (check_only)
+ return 0;
+
+ Trdy = to_ticks(kHz, sdr->tCEA_max - sdr->tREA_max);
+ Textw = to_ticks(kHz, sdr->tWB_max);
+ Twc = to_ticks(kHz, sdr->tWC_min);
+ Twpw = to_ticks(kHz, sdr->tWC_min - sdr->tWP_min);
+
+ Tacc = to_ticks(kHz, sdr->tREA_max);
+ Thold = to_ticks(kHz, sdr->tREH_min);
+ Trpw = to_ticks(kHz, sdr->tRC_min - sdr->tREH_min);
+ Textr = to_ticks(kHz, sdr->tRHZ_max);
+
+ tchip->timing1 = TIMING(Trdy, Textw, Twc, Twpw);
+ tchip->timing2 = TIMING(Tacc, Thold, Trpw, Textr);
+
+ return 0;
+}
+
+static int chip_init(struct device *dev, struct device_node *np)
+{
+ u32 cs;
+ int err, res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct tango_chip *tchip;
+ struct nand_ecc_ctrl *ecc;
+ struct tango_nfc *nfc = dev_get_drvdata(dev);
+
+ tchip = devm_kzalloc(dev, sizeof(*tchip), GFP_KERNEL);
+ if (!tchip)
+ return -ENOMEM;
+
+ res = of_property_count_u32_elems(np, "reg");
+ if (res < 0)
+ return res;
+
+ if (res != 1)
+ return -ENOTSUPP; /* Multi-CS chips are not supported */
+
+ err = of_property_read_u32_index(np, "reg", 0, &cs);
+ if (err)
+ return err;
+
+ if (cs >= MAX_CS)
+ return -EINVAL;
+
+ chip = &tchip->nand_chip;
+ ecc = &chip->ecc;
+ mtd = nand_to_mtd(chip);
+
+ chip->read_byte = tango_read_byte;
+ chip->write_buf = tango_write_buf;
+ chip->read_buf = tango_read_buf;
+ chip->select_chip = tango_select_chip;
+ chip->cmd_ctrl = tango_cmd_ctrl;
+ chip->dev_ready = tango_dev_ready;
+ chip->setup_data_interface = tango_set_timings;
+ chip->options = NAND_USE_BOUNCE_BUFFER |
+ NAND_NO_SUBPAGE_WRITE |
+ NAND_WAIT_TCCS;
+ chip->controller = &nfc->hw;
+ tchip->base = nfc->pbus_base + (cs * 256);
+
+ nand_set_flash_node(chip, np);
+ mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
+ mtd->dev.parent = dev;
+
+ err = nand_scan_ident(mtd, 1, NULL);
+ if (err)
+ return err;
+
+ ecc->mode = NAND_ECC_HW;
+ ecc->algo = NAND_ECC_BCH;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
+
+ ecc->read_page_raw = tango_read_page_raw;
+ ecc->write_page_raw = tango_write_page_raw;
+ ecc->read_page = tango_read_page;
+ ecc->write_page = tango_write_page;
+ ecc->read_oob = tango_read_oob;
+ ecc->write_oob = tango_write_oob;
+ ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
+
+ err = nand_scan_tail(mtd);
+ if (err)
+ return err;
+
+ tchip->xfer_cfg = XFER_CFG(cs, 1, ecc->steps, METADATA_SIZE);
+ tchip->pkt_0_cfg = PKT_CFG(ecc->size + METADATA_SIZE, ecc->strength);
+ tchip->pkt_n_cfg = PKT_CFG(ecc->size, ecc->strength);
+ tchip->bb_cfg = BB_CFG(mtd->writesize, BBM_SIZE);
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ return err;
+
+ nfc->chips[cs] = tchip;
+
+ return 0;
+}
+
+static int tango_nand_remove(struct platform_device *pdev)
+{
+ int cs;
+ struct tango_nfc *nfc = platform_get_drvdata(pdev);
+
+ dma_release_channel(nfc->chan);
+
+ for (cs = 0; cs < MAX_CS; ++cs) {
+ if (nfc->chips[cs])
+ nand_release(nand_to_mtd(&nfc->chips[cs]->nand_chip));
+ }
+
+ return 0;
+}
+
+static int tango_nand_probe(struct platform_device *pdev)
+{
+ int err;
+ struct clk *clk;
+ struct resource *res;
+ struct tango_nfc *nfc;
+ struct device_node *np;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->reg_base))
+ return PTR_ERR(nfc->reg_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ nfc->mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->mem_base))
+ return PTR_ERR(nfc->mem_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ nfc->pbus_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->pbus_base))
+ return PTR_ERR(nfc->pbus_base);
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox");
+ if (IS_ERR(nfc->chan))
+ return PTR_ERR(nfc->chan);
+
+ platform_set_drvdata(pdev, nfc);
+ nand_hw_control_init(&nfc->hw);
+ nfc->freq_kHz = clk_get_rate(clk) / 1000;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ err = chip_init(&pdev->dev, np);
+ if (err) {
+ tango_nand_remove(pdev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id tango_nand_ids[] = {
+ { .compatible = "sigma,smp8758-nand" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver tango_nand_driver = {
+ .probe = tango_nand_probe,
+ .remove = tango_nand_remove,
+ .driver = {
+ .name = "tango-nand",
+ .of_match_table = tango_nand_ids,
+ },
+};
+
+module_platform_driver(tango_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sigma Designs");
+MODULE_DESCRIPTION("Tango4 NAND Flash controller driver");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 08b30549ec0a..fc5e773f8b60 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -435,10 +435,10 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->waitfunc = tmio_nand_wait;
/* Scan to find existence of the device */
- if (nand_scan(mtd, 1)) {
- retval = -ENODEV;
+ retval = nand_scan(mtd, 1);
+ if (retval)
goto err_irq;
- }
+
/* Register the partitions */
retval = mtd_device_parse_register(mtd, NULL, NULL,
data ? data->partition : NULL,
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 3ad514c44dcb..3ea4bb19e12d 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -717,10 +717,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
vf610_nfc_preinit_controller(nfc);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
- err = -ENXIO;
+ err = nand_scan_ident(mtd, 1, NULL);
+ if (err)
goto error;
- }
vf610_nfc_init_controller(nfc);
@@ -775,10 +774,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
}
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
+ err = nand_scan_tail(mtd);
+ if (err)
goto error;
- }
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index d403ba7b8f43..d489fbd07c12 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -1077,12 +1077,14 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
/* Get flash device data */
for_each_available_child_of_node(dev->of_node, np) {
- if (of_property_read_u32(np, "reg", &cs)) {
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret) {
dev_err(dev, "Couldn't determine chip select.\n");
goto err;
}
- if (cs > CQSPI_MAX_CHIPSELECT) {
+ if (cs >= CQSPI_MAX_CHIPSELECT) {
+ ret = -EINVAL;
dev_err(dev, "Chip select %d out of range.\n", cs);
goto err;
}
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 5c82e4ef1904..b4d8953fb30a 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -224,7 +224,7 @@ struct fsl_qspi_devtype_data {
int driver_data;
};
-static struct fsl_qspi_devtype_data vybrid_data = {
+static const struct fsl_qspi_devtype_data vybrid_data = {
.devtype = FSL_QUADSPI_VYBRID,
.rxfifo = 128,
.txfifo = 64,
@@ -232,7 +232,7 @@ static struct fsl_qspi_devtype_data vybrid_data = {
.driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
};
-static struct fsl_qspi_devtype_data imx6sx_data = {
+static const struct fsl_qspi_devtype_data imx6sx_data = {
.devtype = FSL_QUADSPI_IMX6SX,
.rxfifo = 128,
.txfifo = 512,
@@ -241,7 +241,7 @@ static struct fsl_qspi_devtype_data imx6sx_data = {
| QUADSPI_QUIRK_TKT245618,
};
-static struct fsl_qspi_devtype_data imx7d_data = {
+static const struct fsl_qspi_devtype_data imx7d_data = {
.devtype = FSL_QUADSPI_IMX7D,
.rxfifo = 512,
.txfifo = 512,
@@ -250,7 +250,7 @@ static struct fsl_qspi_devtype_data imx7d_data = {
| QUADSPI_QUIRK_4X_INT_CLK,
};
-static struct fsl_qspi_devtype_data imx6ul_data = {
+static const struct fsl_qspi_devtype_data imx6ul_data = {
.devtype = FSL_QUADSPI_IMX6UL,
.rxfifo = 128,
.txfifo = 512,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d0fc165d7d66..da7cd69d4857 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -799,6 +799,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
@@ -825,6 +826,7 @@ static const struct flash_info spi_nor_ids[] = {
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
/* Fujitsu */
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
@@ -872,11 +874,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron */
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -905,7 +909,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
@@ -921,6 +925,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -1255,6 +1260,13 @@ static int spansion_quad_enable(struct spi_nor *nor)
return -EINVAL;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
/* read back and check it */
ret = read_cr(nor);
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index e90c6a7333d7..31f89f1c6123 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -59,7 +59,6 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops ipddp_netdev_ops = {
.ndo_start_xmit = ipddp_xmit,
.ndo_do_ioctl = ipddp_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 551f0f8dead3..c80b023092dd 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -950,13 +950,61 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
dev_queue_xmit(skb);
}
+struct alb_walk_data {
+ struct bonding *bond;
+ struct slave *slave;
+ u8 *mac_addr;
+ bool strict_match;
+};
+
+static int alb_upper_dev_walk(struct net_device *upper, void *_data)
+{
+ struct alb_walk_data *data = _data;
+ bool strict_match = data->strict_match;
+ struct bonding *bond = data->bond;
+ struct slave *slave = data->slave;
+ u8 *mac_addr = data->mac_addr;
+ struct bond_vlan_tag *tags;
+
+ if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+ if (strict_match &&
+ ether_addr_equal_64bits(mac_addr,
+ upper->dev_addr)) {
+ alb_send_lp_vid(slave, mac_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ } else if (!strict_match) {
+ alb_send_lp_vid(slave, upper->dev_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ }
+ }
+
+ /* If this is a macvlan device, then only send updates
+ * when strict_match is turned off.
+ */
+ if (netif_is_macvlan(upper) && !strict_match) {
+ tags = bond_verify_device_path(bond->dev, upper, 0);
+ if (IS_ERR_OR_NULL(tags))
+ BUG();
+ alb_send_lp_vid(slave, upper->dev_addr,
+ tags[0].vlan_proto, tags[0].vlan_id);
+ kfree(tags);
+ }
+
+ return 0;
+}
+
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
bool strict_match)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
- struct net_device *upper;
- struct list_head *iter;
- struct bond_vlan_tag *tags;
+ struct alb_walk_data data = {
+ .strict_match = strict_match,
+ .mac_addr = mac_addr,
+ .slave = slave,
+ .bond = bond,
+ };
/* send untagged */
alb_send_lp_vid(slave, mac_addr, 0, 0);
@@ -965,33 +1013,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
* for that device.
*/
rcu_read_lock();
- netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
- if (strict_match &&
- ether_addr_equal_64bits(mac_addr,
- upper->dev_addr)) {
- alb_send_lp_vid(slave, mac_addr,
- vlan_dev_vlan_proto(upper),
- vlan_dev_vlan_id(upper));
- } else if (!strict_match) {
- alb_send_lp_vid(slave, upper->dev_addr,
- vlan_dev_vlan_proto(upper),
- vlan_dev_vlan_id(upper));
- }
- }
-
- /* If this is a macvlan device, then only send updates
- * when strict_match is turned off.
- */
- if (netif_is_macvlan(upper) && !strict_match) {
- tags = bond_verify_device_path(bond->dev, upper, 0);
- if (IS_ERR_OR_NULL(tags))
- BUG();
- alb_send_lp_vid(slave, upper->dev_addr,
- tags[0].vlan_proto, tags[0].vlan_id);
- kfree(tags);
- }
- }
+ netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &data);
rcu_read_unlock();
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5fa36ebc0640..8029dd4912b6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -199,7 +199,7 @@ MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
-int bond_net_id __read_mostly;
+unsigned int bond_net_id __read_mostly;
static __be32 arp_target[BOND_MAX_ARP_TARGETS];
static int arp_ip_count;
@@ -2270,22 +2270,23 @@ re_arm:
}
}
+static int bond_upper_dev_walk(struct net_device *upper, void *data)
+{
+ __be32 ip = *((__be32 *)data);
+
+ return ip == bond_confirm_addr(upper, 0, ip);
+}
+
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
{
- struct net_device *upper;
- struct list_head *iter;
bool ret = false;
if (ip == bond_confirm_addr(bond->dev, 0, ip))
return true;
rcu_read_lock();
- netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (ip == bond_confirm_addr(upper, 0, ip)) {
- ret = true;
- break;
- }
- }
+ if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &ip))
+ ret = true;
rcu_read_unlock();
return ret;
@@ -4079,16 +4080,16 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-static int bond_ethtool_get_settings(struct net_device *bond_dev,
- struct ethtool_cmd *ecmd)
+static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
unsigned long speed = 0;
struct list_head *iter;
struct slave *slave;
- ecmd->duplex = DUPLEX_UNKNOWN;
- ecmd->port = PORT_OTHER;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
* do not need to check mode. Though link speed might not represent
@@ -4099,12 +4100,12 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
if (bond_slave_can_tx(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
- if (ecmd->duplex == DUPLEX_UNKNOWN &&
+ if (cmd->base.duplex == DUPLEX_UNKNOWN &&
slave->duplex != DUPLEX_UNKNOWN)
- ecmd->duplex = slave->duplex;
+ cmd->base.duplex = slave->duplex;
}
}
- ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
+ cmd->base.speed = speed ? : SPEED_UNKNOWN;
return 0;
}
@@ -4120,8 +4121,8 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
- .get_settings = bond_ethtool_get_settings,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = bond_ethtool_get_link_ksettings,
};
static const struct net_device_ops bond_netdev_ops = {
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 26ba4b794a0b..7a85495dbb0c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -31,5 +31,4 @@ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
-subdir-ccflags-y += -D__CHECK_ENDIAN__
subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index db9538d4b358..a7be12d9a139 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -15,7 +15,7 @@
* See "Documentation/ABI/testing/sysfs-class-net-grcan" for information on the
* sysfs interface.
*
- * See "Documentation/kernel-parameters.txt" for information on the module
+ * See "Documentation/admin-guide/kernel-parameters.rst" for information on the module
* parameters.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index d51e0c401b48..18cc529fb807 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -459,7 +459,7 @@ struct kvaser_usb {
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
- /* @max_tx_urbs: Firmware-reported maximum number of oustanding,
+ /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
* not yet ACKed, transmissions on this device. This value is
* also used as a sentinel for marking free tx contexts.
*/
@@ -2027,7 +2027,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
((dev->fw_version >> 16) & 0xff),
(dev->fw_version & 0xffff));
- dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
+ dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
err = kvaser_usb_get_card_info(dev);
if (err) {
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 221f5f011ff9..91c876a0a647 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -7,9 +7,6 @@
*
*/
-
-#include <linux/module.h>
-
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
@@ -264,7 +261,6 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_do_ioctl = e100_ioctl,
.ndo_set_mac_address = e100_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_config = e100_set_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e100_netpoll,
@@ -412,6 +408,7 @@ etrax_ethernet_init(void)
led_next_time = jiffies;
return 0;
}
+device_initcall(etrax_ethernet_init)
/* set MAC address of the interface. called from the core after a
* SIOCSIFADDR ioctl, and from the bootup above.
@@ -1715,11 +1712,6 @@ e100_netpoll(struct net_device* netdev)
}
#endif
-static int
-etrax_init_module(void)
-{
- return etrax_ethernet_init();
-}
static int __init
e100_boot_setup(char* str)
@@ -1742,5 +1734,3 @@ e100_boot_setup(char* str)
}
__setup("etrax100_eth=", e100_boot_setup);
-
-module_init(etrax_init_module);
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 486668813e15..1aaa7a95ebc4 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -1,6 +1,7 @@
config NET_DSA_MV88E6XXX
tristate "Marvell 88E6xxx Ethernet switch fabric support"
depends on NET_DSA
+ select IRQ_DOMAIN
select NET_DSA_TAG_EDSA
select NET_DSA_TAG_DSA
help
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 10ce820daa48..c36be318de1a 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
mv88e6xxx-objs += global1.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
+mv88e6xxx-objs += port.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 883fd9809dd2..f7222dc6581d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -18,11 +18,15 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
@@ -33,6 +37,7 @@
#include "mv88e6xxx.h"
#include "global1.h"
#include "global2.h"
+#include "port.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
@@ -217,22 +222,6 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
-static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
- u16 *val)
-{
- int addr = chip->info->port_base_addr + port;
-
- return mv88e6xxx_read(chip, addr, reg, val);
-}
-
-static int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
- u16 val)
-{
- int addr = chip->info->port_base_addr + port;
-
- return mv88e6xxx_write(chip, addr, reg, val);
-}
-
static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val)
{
@@ -323,6 +312,180 @@ static int mv88e6xxx_serdes_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
reg, val);
}
+static void mv88e6xxx_g1_irq_mask(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g1_irq.masked |= (1 << n);
+}
+
+static void mv88e6xxx_g1_irq_unmask(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g1_irq.masked &= ~(1 << n);
+}
+
+static irqreturn_t mv88e6xxx_g1_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ unsigned int n;
+ u16 reg;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &reg);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ goto out;
+
+ for (n = 0; n < chip->g1_irq.nirqs; ++n) {
+ if (reg & (1 << n)) {
+ sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void mv88e6xxx_g1_irq_bus_lock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&chip->reg_lock);
+}
+
+static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ u16 mask = GENMASK(chip->g1_irq.nirqs, 0);
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &reg);
+ if (err)
+ goto out;
+
+ reg &= ~mask;
+ reg |= (~chip->g1_irq.masked & mask);
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, reg);
+ if (err)
+ goto out;
+
+out:
+ mutex_unlock(&chip->reg_lock);
+}
+
+static struct irq_chip mv88e6xxx_g1_irq_chip = {
+ .name = "mv88e6xxx-g1",
+ .irq_mask = mv88e6xxx_g1_irq_mask,
+ .irq_unmask = mv88e6xxx_g1_irq_unmask,
+ .irq_bus_lock = mv88e6xxx_g1_irq_bus_lock,
+ .irq_bus_sync_unlock = mv88e6xxx_g1_irq_bus_sync_unlock,
+};
+
+static int mv88e6xxx_g1_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct mv88e6xxx_chip *chip = d->host_data;
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &chip->g1_irq.chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
+ .map = mv88e6xxx_g1_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
+{
+ int irq, virq;
+ u16 mask;
+
+ mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &mask);
+ mask |= GENMASK(chip->g1_irq.nirqs, 0);
+ mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, mask);
+
+ free_irq(chip->irq, chip);
+
+ for (irq = 0; irq < chip->g1_irq.nirqs; irq++) {
+ virq = irq_find_mapping(chip->g1_irq.domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(chip->g1_irq.domain);
+}
+
+static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err, irq, virq;
+ u16 reg, mask;
+
+ chip->g1_irq.nirqs = chip->info->g1_irqs;
+ chip->g1_irq.domain = irq_domain_add_simple(
+ NULL, chip->g1_irq.nirqs, 0,
+ &mv88e6xxx_g1_irq_domain_ops, chip);
+ if (!chip->g1_irq.domain)
+ return -ENOMEM;
+
+ for (irq = 0; irq < chip->g1_irq.nirqs; irq++)
+ irq_create_mapping(chip->g1_irq.domain, irq);
+
+ chip->g1_irq.chip = mv88e6xxx_g1_irq_chip;
+ chip->g1_irq.masked = ~0;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &mask);
+ if (err)
+ goto out_mapping;
+
+ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, mask);
+ if (err)
+ goto out_disable;
+
+ /* Reading the interrupt status clears (most of) them */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &reg);
+ if (err)
+ goto out_disable;
+
+ err = request_threaded_irq(chip->irq, NULL,
+ mv88e6xxx_g1_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ dev_name(chip->dev), chip);
+ if (err)
+ goto out_disable;
+
+ return 0;
+
+out_disable:
+ mask |= GENMASK(chip->g1_irq.nirqs, 0);
+ mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, mask);
+
+out_mapping:
+ for (irq = 0; irq < 16; irq++) {
+ virq = irq_find_mapping(chip->g1_irq.domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(chip->g1_irq.domain);
+
+ return err;
+}
+
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
{
int i;
@@ -364,56 +527,18 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
{
- u16 val;
- int i, err;
+ if (!chip->info->ops->ppu_disable)
+ return 0;
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
- val & ~GLOBAL_CONTROL_PPU_ENABLE);
- if (err)
- return err;
-
- for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
- if (err)
- return err;
-
- usleep_range(1000, 2000);
- if ((val & GLOBAL_STATUS_PPU_MASK) != GLOBAL_STATUS_PPU_POLLING)
- return 0;
- }
-
- return -ETIMEDOUT;
+ return chip->info->ops->ppu_disable(chip);
}
static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
{
- u16 val;
- int i, err;
+ if (!chip->info->ops->ppu_enable)
+ return 0;
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
- val | GLOBAL_CONTROL_PPU_ENABLE);
- if (err)
- return err;
-
- for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
- if (err)
- return err;
-
- usleep_range(1000, 2000);
- if ((val & GLOBAL_STATUS_PPU_MASK) == GLOBAL_STATUS_PPU_POLLING)
- return 0;
- }
-
- return -ETIMEDOUT;
+ return chip->info->ops->ppu_enable(chip);
}
static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
@@ -477,9 +602,8 @@ static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
{
mutex_init(&chip->ppu_mutex);
INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work);
- init_timer(&chip->ppu_timer);
- chip->ppu_timer.data = (unsigned long)chip;
- chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
+ setup_timer(&chip->ppu_timer, mv88e6xxx_ppu_reenable_timer,
+ (unsigned long)chip);
}
static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
@@ -515,11 +639,6 @@ static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
return err;
}
-static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6065;
-}
-
static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip)
{
return chip->info->family == MV88E6XXX_FAMILY_6095;
@@ -555,231 +674,152 @@ static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip)
return chip->info->family == MV88E6XXX_FAMILY_6352;
}
-/* We expect the switch to perform auto negotiation if there is a real
- * phy. However, in the case of a fixed link phy, we force the port
- * settings from the fixed link settings.
- */
-static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
+ int link, int speed, int duplex,
+ phy_interface_t mode)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- u16 reg;
int err;
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
-
- mutex_lock(&chip->reg_lock);
+ if (!chip->info->ops->port_set_link)
+ return 0;
- err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ /* Port's MAC control must not be changed unless the link is down */
+ err = chip->info->ops->port_set_link(chip, port, 0);
if (err)
- goto out;
-
- reg &= ~(PORT_PCS_CTRL_LINK_UP |
- PORT_PCS_CTRL_FORCE_LINK |
- PORT_PCS_CTRL_DUPLEX_FULL |
- PORT_PCS_CTRL_FORCE_DUPLEX |
- PORT_PCS_CTRL_UNFORCED);
-
- reg |= PORT_PCS_CTRL_FORCE_LINK;
- if (phydev->link)
- reg |= PORT_PCS_CTRL_LINK_UP;
-
- if (mv88e6xxx_6065_family(chip) && phydev->speed > SPEED_100)
- goto out;
+ return err;
- switch (phydev->speed) {
- case SPEED_1000:
- reg |= PORT_PCS_CTRL_1000;
- break;
- case SPEED_100:
- reg |= PORT_PCS_CTRL_100;
- break;
- case SPEED_10:
- reg |= PORT_PCS_CTRL_10;
- break;
- default:
- pr_info("Unknown speed");
- goto out;
+ if (chip->info->ops->port_set_speed) {
+ err = chip->info->ops->port_set_speed(chip, port, speed);
+ if (err && err != -EOPNOTSUPP)
+ goto restore_link;
}
- reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
- if (phydev->duplex == DUPLEX_FULL)
- reg |= PORT_PCS_CTRL_DUPLEX_FULL;
-
- if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) &&
- (port >= mv88e6xxx_num_ports(chip) - 2)) {
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
- PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
+ if (chip->info->ops->port_set_duplex) {
+ err = chip->info->ops->port_set_duplex(chip, port, duplex);
+ if (err && err != -EOPNOTSUPP)
+ goto restore_link;
}
- mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
-out:
- mutex_unlock(&chip->reg_lock);
-}
-
-static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
-{
- u16 val;
- int i, err;
-
- for (i = 0; i < 10; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_OP, &val);
- if ((val & GLOBAL_STATS_OP_BUSY) == 0)
- return 0;
+ if (chip->info->ops->port_set_rgmii_delay) {
+ err = chip->info->ops->port_set_rgmii_delay(chip, port, mode);
+ if (err && err != -EOPNOTSUPP)
+ goto restore_link;
}
- return -ETIMEDOUT;
-}
-
-static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
-{
- int err;
-
- if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
- port = (port + 1) << 5;
-
- /* Snapshot the hardware statistics counters for this port. */
- err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT |
- GLOBAL_STATS_OP_HIST_RX_TX | port);
- if (err)
- return err;
+ err = 0;
+restore_link:
+ if (chip->info->ops->port_set_link(chip, port, link))
+ netdev_err(chip->ds->ports[port].netdev,
+ "failed to restore MAC's link\n");
- /* Wait for the snapshotting to complete. */
- return _mv88e6xxx_stats_wait(chip);
+ return err;
}
-static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip,
- int stat, u32 *val)
+/* We expect the switch to perform auto negotiation if there is a real
+ * phy. However, in the case of a fixed link phy, we force the port
+ * settings from the fixed link settings.
+ */
+static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
{
- u32 value;
- u16 reg;
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
- *val = 0;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_READ_CAPTURED |
- GLOBAL_STATS_OP_HIST_RX_TX | stat);
- if (err)
- return;
-
- err = _mv88e6xxx_stats_wait(chip);
- if (err)
+ if (!phy_is_pseudo_fixed_link(phydev))
return;
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_32, &reg);
- if (err)
- return;
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
+ phydev->duplex, phydev->interface);
+ mutex_unlock(&chip->reg_lock);
- value = reg << 16;
+ if (err && err != -EOPNOTSUPP)
+ netdev_err(ds->ports[port].netdev, "failed to configure MAC\n");
+}
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_01, &reg);
- if (err)
- return;
+static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ if (!chip->info->ops->stats_snapshot)
+ return -EOPNOTSUPP;
- *val = value | reg;
+ return chip->info->ops->stats_snapshot(chip, port);
}
static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
- { "in_good_octets", 8, 0x00, BANK0, },
- { "in_bad_octets", 4, 0x02, BANK0, },
- { "in_unicast", 4, 0x04, BANK0, },
- { "in_broadcasts", 4, 0x06, BANK0, },
- { "in_multicasts", 4, 0x07, BANK0, },
- { "in_pause", 4, 0x16, BANK0, },
- { "in_undersize", 4, 0x18, BANK0, },
- { "in_fragments", 4, 0x19, BANK0, },
- { "in_oversize", 4, 0x1a, BANK0, },
- { "in_jabber", 4, 0x1b, BANK0, },
- { "in_rx_error", 4, 0x1c, BANK0, },
- { "in_fcs_error", 4, 0x1d, BANK0, },
- { "out_octets", 8, 0x0e, BANK0, },
- { "out_unicast", 4, 0x10, BANK0, },
- { "out_broadcasts", 4, 0x13, BANK0, },
- { "out_multicasts", 4, 0x12, BANK0, },
- { "out_pause", 4, 0x15, BANK0, },
- { "excessive", 4, 0x11, BANK0, },
- { "collisions", 4, 0x1e, BANK0, },
- { "deferred", 4, 0x05, BANK0, },
- { "single", 4, 0x14, BANK0, },
- { "multiple", 4, 0x17, BANK0, },
- { "out_fcs_error", 4, 0x03, BANK0, },
- { "late", 4, 0x1f, BANK0, },
- { "hist_64bytes", 4, 0x08, BANK0, },
- { "hist_65_127bytes", 4, 0x09, BANK0, },
- { "hist_128_255bytes", 4, 0x0a, BANK0, },
- { "hist_256_511bytes", 4, 0x0b, BANK0, },
- { "hist_512_1023bytes", 4, 0x0c, BANK0, },
- { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
- { "sw_in_discards", 4, 0x10, PORT, },
- { "sw_in_filtered", 2, 0x12, PORT, },
- { "sw_out_filtered", 2, 0x13, PORT, },
- { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
- { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_good_octets", 8, 0x00, STATS_TYPE_BANK0, },
+ { "in_bad_octets", 4, 0x02, STATS_TYPE_BANK0, },
+ { "in_unicast", 4, 0x04, STATS_TYPE_BANK0, },
+ { "in_broadcasts", 4, 0x06, STATS_TYPE_BANK0, },
+ { "in_multicasts", 4, 0x07, STATS_TYPE_BANK0, },
+ { "in_pause", 4, 0x16, STATS_TYPE_BANK0, },
+ { "in_undersize", 4, 0x18, STATS_TYPE_BANK0, },
+ { "in_fragments", 4, 0x19, STATS_TYPE_BANK0, },
+ { "in_oversize", 4, 0x1a, STATS_TYPE_BANK0, },
+ { "in_jabber", 4, 0x1b, STATS_TYPE_BANK0, },
+ { "in_rx_error", 4, 0x1c, STATS_TYPE_BANK0, },
+ { "in_fcs_error", 4, 0x1d, STATS_TYPE_BANK0, },
+ { "out_octets", 8, 0x0e, STATS_TYPE_BANK0, },
+ { "out_unicast", 4, 0x10, STATS_TYPE_BANK0, },
+ { "out_broadcasts", 4, 0x13, STATS_TYPE_BANK0, },
+ { "out_multicasts", 4, 0x12, STATS_TYPE_BANK0, },
+ { "out_pause", 4, 0x15, STATS_TYPE_BANK0, },
+ { "excessive", 4, 0x11, STATS_TYPE_BANK0, },
+ { "collisions", 4, 0x1e, STATS_TYPE_BANK0, },
+ { "deferred", 4, 0x05, STATS_TYPE_BANK0, },
+ { "single", 4, 0x14, STATS_TYPE_BANK0, },
+ { "multiple", 4, 0x17, STATS_TYPE_BANK0, },
+ { "out_fcs_error", 4, 0x03, STATS_TYPE_BANK0, },
+ { "late", 4, 0x1f, STATS_TYPE_BANK0, },
+ { "hist_64bytes", 4, 0x08, STATS_TYPE_BANK0, },
+ { "hist_65_127bytes", 4, 0x09, STATS_TYPE_BANK0, },
+ { "hist_128_255bytes", 4, 0x0a, STATS_TYPE_BANK0, },
+ { "hist_256_511bytes", 4, 0x0b, STATS_TYPE_BANK0, },
+ { "hist_512_1023bytes", 4, 0x0c, STATS_TYPE_BANK0, },
+ { "hist_1024_max_bytes", 4, 0x0d, STATS_TYPE_BANK0, },
+ { "sw_in_discards", 4, 0x10, STATS_TYPE_PORT, },
+ { "sw_in_filtered", 2, 0x12, STATS_TYPE_PORT, },
+ { "sw_out_filtered", 2, 0x13, STATS_TYPE_PORT, },
+ { "in_discards", 4, 0x00, STATS_TYPE_BANK1, },
+ { "in_filtered", 4, 0x01, STATS_TYPE_BANK1, },
+ { "in_accepted", 4, 0x02, STATS_TYPE_BANK1, },
+ { "in_bad_accepted", 4, 0x03, STATS_TYPE_BANK1, },
+ { "in_good_avb_class_a", 4, 0x04, STATS_TYPE_BANK1, },
+ { "in_good_avb_class_b", 4, 0x05, STATS_TYPE_BANK1, },
+ { "in_bad_avb_class_a", 4, 0x06, STATS_TYPE_BANK1, },
+ { "in_bad_avb_class_b", 4, 0x07, STATS_TYPE_BANK1, },
+ { "tcam_counter_0", 4, 0x08, STATS_TYPE_BANK1, },
+ { "tcam_counter_1", 4, 0x09, STATS_TYPE_BANK1, },
+ { "tcam_counter_2", 4, 0x0a, STATS_TYPE_BANK1, },
+ { "tcam_counter_3", 4, 0x0b, STATS_TYPE_BANK1, },
+ { "in_da_unknown", 4, 0x0e, STATS_TYPE_BANK1, },
+ { "in_management", 4, 0x0f, STATS_TYPE_BANK1, },
+ { "out_queue_0", 4, 0x10, STATS_TYPE_BANK1, },
+ { "out_queue_1", 4, 0x11, STATS_TYPE_BANK1, },
+ { "out_queue_2", 4, 0x12, STATS_TYPE_BANK1, },
+ { "out_queue_3", 4, 0x13, STATS_TYPE_BANK1, },
+ { "out_queue_4", 4, 0x14, STATS_TYPE_BANK1, },
+ { "out_queue_5", 4, 0x15, STATS_TYPE_BANK1, },
+ { "out_queue_6", 4, 0x16, STATS_TYPE_BANK1, },
+ { "out_queue_7", 4, 0x17, STATS_TYPE_BANK1, },
+ { "out_cut_through", 4, 0x18, STATS_TYPE_BANK1, },
+ { "out_octets_a", 4, 0x1a, STATS_TYPE_BANK1, },
+ { "out_octets_b", 4, 0x1b, STATS_TYPE_BANK1, },
+ { "out_management", 4, 0x1f, STATS_TYPE_BANK1, },
};
-static bool mv88e6xxx_has_stat(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_hw_stat *stat)
-{
- switch (stat->type) {
- case BANK0:
- return true;
- case BANK1:
- return mv88e6xxx_6320_family(chip);
- case PORT:
- return mv88e6xxx_6095_family(chip) ||
- mv88e6xxx_6185_family(chip) ||
- mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6352_family(chip);
- }
- return false;
-}
-
static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_hw_stat *s,
- int port)
+ int port, u16 bank1_select,
+ u16 histogram)
{
u32 low;
u32 high = 0;
+ u16 reg = 0;
int err;
- u16 reg;
u64 value;
switch (s->type) {
- case PORT:
+ case STATS_TYPE_PORT:
err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
if (err)
return UINT64_MAX;
@@ -792,26 +832,28 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
high = reg;
}
break;
- case BANK0:
- case BANK1:
- _mv88e6xxx_stats_read(chip, s->reg, &low);
+ case STATS_TYPE_BANK1:
+ reg = bank1_select;
+ /* fall through */
+ case STATS_TYPE_BANK0:
+ reg |= s->reg | histogram;
+ mv88e6xxx_g1_stats_read(chip, reg, &low);
if (s->sizeof_stat == 8)
- _mv88e6xxx_stats_read(chip, s->reg + 1, &high);
+ mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
}
value = (((u64)high) << 16) | low;
return value;
}
-static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
- uint8_t *data)
+static void mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data, int types)
{
- struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(chip, stat)) {
+ if (stat->type & types) {
memcpy(data + j * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
j++;
@@ -819,46 +861,142 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
}
}
-static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+static void mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+{
+ mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_PORT);
+}
+
+static void mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+{
+ mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
+}
+
+static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
+ uint8_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (chip->info->ops->stats_get_strings)
+ chip->info->ops->stats_get_strings(chip, data);
+}
+
+static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
+ int types)
+{
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(chip, stat))
+ if (stat->type & types)
j++;
}
return j;
}
+static int mv88e6095_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
+ STATS_TYPE_PORT);
+}
+
+static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
+ STATS_TYPE_BANK1);
+}
+
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (chip->info->ops->stats_get_sset_count)
+ return chip->info->ops->stats_get_sset_count(chip);
+
+ return 0;
+}
+
+static void mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data, int types,
+ u16 bank1_select, u16 histogram)
+{
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (stat->type & types) {
+ data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
+ bank1_select,
+ histogram);
+ j++;
+ }
+ }
+}
+
+static void mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_PORT,
+ 0, GLOBAL_STATS_OP_HIST_RX_TX);
+}
+
+static void mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
+ GLOBAL_STATS_OP_BANK_1_BIT_9,
+ GLOBAL_STATS_OP_HIST_RX_TX);
+}
+
+static void mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
+ GLOBAL_STATS_OP_BANK_1_BIT_10, 0);
+}
+
+static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ if (chip->info->ops->stats_get_stats)
+ chip->info->ops->stats_get_stats(chip, port, data);
+}
+
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_hw_stat *stat;
int ret;
- int i, j;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_stats_snapshot(chip, port);
+ ret = mv88e6xxx_stats_snapshot(chip, port);
if (ret < 0) {
mutex_unlock(&chip->reg_lock);
return;
}
- for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
- stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(chip, stat)) {
- data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port);
- j++;
- }
- }
+
+ mv88e6xxx_get_stats(chip, port, data);
mutex_unlock(&chip->reg_lock);
}
+static int mv88e6xxx_stats_set_histogram(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->stats_set_histogram)
+ return chip->info->ops->stats_set_histogram(chip);
+
+ return 0;
+}
+
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
return 32 * sizeof(u16);
@@ -1069,54 +1207,16 @@ static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
}
-static const char * const mv88e6xxx_port_state_names[] = {
- [PORT_CONTROL_STATE_DISABLED] = "Disabled",
- [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
- [PORT_CONTROL_STATE_LEARNING] = "Learning",
- [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
-};
-
-static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port,
- u8 state)
-{
- struct dsa_switch *ds = chip->ds;
- u16 reg;
- int err;
- u8 oldstate;
-
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
- if (err)
- return err;
-
- oldstate = reg & PORT_CONTROL_STATE_MASK;
-
- reg &= ~PORT_CONTROL_STATE_MASK;
- reg |= state;
-
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
- if (err)
- return err;
-
- netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
- mv88e6xxx_port_state_names[state],
- mv88e6xxx_port_state_names[oldstate]);
-
- return 0;
-}
-
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
struct net_device *bridge = chip->ports[port].bridge_dev;
- const u16 mask = (1 << mv88e6xxx_num_ports(chip)) - 1;
struct dsa_switch *ds = chip->ds;
u16 output_ports = 0;
- u16 reg;
- int err;
int i;
/* allow CPU port or DSA link(s) to send frames to every port */
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
- output_ports = mask;
+ output_ports = ~0;
} else {
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
/* allow sending frames to every group member */
@@ -1132,14 +1232,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
- err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
- if (err)
- return err;
-
- reg &= ~mask;
- reg |= output_ports & mask;
-
- return mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ return mv88e6xxx_port_set_vlan_map(chip, port, output_ports);
}
static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -1167,13 +1260,11 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
}
mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_port_state(chip, port, stp_state);
+ err = mv88e6xxx_port_set_state(chip, port, stp_state);
mutex_unlock(&chip->reg_lock);
if (err)
- netdev_err(ds->ports[port].netdev,
- "failed to update state to %s\n",
- mv88e6xxx_port_state_names[stp_state]);
+ netdev_err(ds->ports[port].netdev, "failed to update state\n");
}
static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
@@ -1189,49 +1280,6 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
}
-static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port,
- u16 *new, u16 *old)
-{
- struct dsa_switch *ds = chip->ds;
- u16 pvid, reg;
- int err;
-
- err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, &reg);
- if (err)
- return err;
-
- pvid = reg & PORT_DEFAULT_VLAN_MASK;
-
- if (new) {
- reg &= ~PORT_DEFAULT_VLAN_MASK;
- reg |= *new & PORT_DEFAULT_VLAN_MASK;
-
- err = mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, reg);
- if (err)
- return err;
-
- netdev_dbg(ds->ports[port].netdev,
- "DefaultVID %d (was %d)\n", *new, pvid);
- }
-
- if (old)
- *old = pvid;
-
- return 0;
-}
-
-static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_chip *chip,
- int port, u16 *pvid)
-{
- return _mv88e6xxx_port_pvid(chip, port, NULL, pvid);
-}
-
-static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip,
- int port, u16 pvid)
-{
- return _mv88e6xxx_port_pvid(chip, port, &pvid, NULL);
-}
-
static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY);
@@ -1411,7 +1459,7 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
+ err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
if (err)
goto unlock;
@@ -1575,75 +1623,6 @@ loadpurge:
return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
-static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
- u16 *new, u16 *old)
-{
- struct dsa_switch *ds = chip->ds;
- u16 upper_mask;
- u16 fid;
- u16 reg;
- int err;
-
- if (mv88e6xxx_num_databases(chip) == 4096)
- upper_mask = 0xff;
- else if (mv88e6xxx_num_databases(chip) == 256)
- upper_mask = 0xf;
- else
- return -EOPNOTSUPP;
-
- /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
- err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
- if (err)
- return err;
-
- fid = (reg & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
-
- if (new) {
- reg &= ~PORT_BASE_VLAN_FID_3_0_MASK;
- reg |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
-
- err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
- if (err)
- return err;
- }
-
- /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &reg);
- if (err)
- return err;
-
- fid |= (reg & upper_mask) << 4;
-
- if (new) {
- reg &= ~upper_mask;
- reg |= (*new >> 4) & upper_mask;
-
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, reg);
- if (err)
- return err;
-
- netdev_dbg(ds->ports[port].netdev,
- "FID %d (was %d)\n", *new, fid);
- }
-
- if (old)
- *old = fid;
-
- return 0;
-}
-
-static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_chip *chip,
- int port, u16 *fid)
-{
- return _mv88e6xxx_port_fid(chip, port, NULL, fid);
-}
-
-static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip,
- int port, u16 fid)
-{
- return _mv88e6xxx_port_fid(chip, port, &fid, NULL);
-}
-
static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
@@ -1654,7 +1633,7 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
/* Set every FID bit used by the (un)bridged ports */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = _mv88e6xxx_port_fid_get(chip, i, fid);
+ err = mv88e6xxx_port_get_fid(chip, i, fid);
if (err)
return err;
@@ -1796,6 +1775,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
+ if (!ds->ports[port].netdev)
+ continue;
+
if (vlan.data[i] ==
GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
@@ -1804,6 +1786,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
chip->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
+ if (!chip->ports[i].bridge_dev)
+ continue;
+
netdev_warn(ds->ports[port].netdev,
"hardware VLAN %d already used by %s\n",
vlan.vid,
@@ -1819,48 +1804,19 @@ unlock:
return err;
}
-static const char * const mv88e6xxx_port_8021q_mode_names[] = {
- [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
- [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
- [PORT_CONTROL_2_8021Q_CHECK] = "Check",
- [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
-};
-
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
+ u16 mode = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
PORT_CONTROL_2_8021Q_DISABLED;
- u16 reg;
int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
-
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
- if (err)
- goto unlock;
-
- old = reg & PORT_CONTROL_2_8021Q_MASK;
-
- if (new != old) {
- reg &= ~PORT_CONTROL_2_8021Q_MASK;
- reg |= new & PORT_CONTROL_2_8021Q_MASK;
-
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
- if (err)
- goto unlock;
-
- netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n",
- mv88e6xxx_port_8021q_mode_names[new],
- mv88e6xxx_port_8021q_mode_names[old]);
- }
-
- err = 0;
-unlock:
+ err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
mutex_unlock(&chip->reg_lock);
return err;
@@ -1928,7 +1884,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
"failed to add VLAN %d%c\n",
vid, untagged ? 'u' : 't');
- if (pvid && _mv88e6xxx_port_pvid_set(chip, port, vlan->vid_end))
+ if (pvid && mv88e6xxx_port_set_pvid(chip, port, vlan->vid_end))
netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n",
vlan->vid_end);
@@ -1983,7 +1939,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
+ err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
if (err)
goto unlock;
@@ -1993,7 +1949,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
goto unlock;
if (vid == pvid) {
- err = _mv88e6xxx_port_pvid_set(chip, port, 0);
+ err = mv88e6xxx_port_set_pvid(chip, port, 0);
if (err)
goto unlock;
}
@@ -2104,7 +2060,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
/* Null VLAN ID corresponds to the port private database */
if (vid == 0)
- err = _mv88e6xxx_port_fid_get(chip, port, &vlan.fid);
+ err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid);
else
err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
if (err)
@@ -2280,7 +2236,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
int err;
/* Dump port's default Filtering Information Database (VLAN ID 0) */
- err = _mv88e6xxx_port_fid_get(chip, port, &fid);
+ err = mv88e6xxx_port_get_fid(chip, port, &fid);
if (err)
return err;
@@ -2368,67 +2324,58 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
mutex_unlock(&chip->reg_lock);
}
-static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
{
- bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE);
- u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
- struct gpio_desc *gpiod = chip->reset;
- unsigned long timeout;
- u16 reg;
- int err;
- int i;
-
- /* Set all ports to the disabled state. */
- for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
- err = mv88e6xxx_port_read(chip, i, PORT_CONTROL, &reg);
- if (err)
- return err;
+ if (chip->info->ops->reset)
+ return chip->info->ops->reset(chip);
- err = mv88e6xxx_port_write(chip, i, PORT_CONTROL,
- reg & 0xfffc);
- if (err)
- return err;
- }
+ return 0;
+}
- /* Wait for transmit queues to drain. */
- usleep_range(2000, 4000);
+static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+{
+ struct gpio_desc *gpiod = chip->reset;
- /* If there is a gpio connected to the reset pin, toggle it */
+ /* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
}
+}
- /* Reset the switch. Keep the PPU active if requested. The PPU
- * needs to be active to support indirect phy register access
- * through global registers 0x18 and 0x19.
- */
- if (ppu_active)
- err = mv88e6xxx_g1_write(chip, 0x04, 0xc000);
- else
- err = mv88e6xxx_g1_write(chip, 0x04, 0xc400);
- if (err)
- return err;
+static int mv88e6xxx_disable_ports(struct mv88e6xxx_chip *chip)
+{
+ int i, err;
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, 0x00, &reg);
+ /* Set all ports to the Disabled state */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ err = mv88e6xxx_port_set_state(chip, i,
+ PORT_CONTROL_STATE_DISABLED);
if (err)
return err;
-
- if ((reg & is_reset) == is_reset)
- break;
- usleep_range(1000, 2000);
}
- if (time_after(jiffies, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
- return err;
+ /* Wait for transmit queues to drain,
+ * i.e. 2ms for a maximum frame to be transmitted at 10 Mbps.
+ */
+ usleep_range(2000, 4000);
+
+ return 0;
+}
+
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_disable_ports(chip);
+ if (err)
+ return err;
+
+ mv88e6xxx_hardware_reset(chip);
+
+ return mv88e6xxx_software_reset(chip);
}
static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
@@ -2449,42 +2396,93 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
return err;
}
-static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_setup_port_dsa(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port)
{
- struct dsa_switch *ds = chip->ds;
int err;
- u16 reg;
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
- mv88e6xxx_6065_family(chip) || mv88e6xxx_6320_family(chip)) {
- /* MAC Forcing register: don't force link, speed,
- * duplex or flow control state to any particular
- * values on physical ports, but force the CPU port
- * and all DSA ports to their maximum bandwidth and
- * full duplex.
- */
- err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
- reg &= ~PORT_PCS_CTRL_UNFORCED;
- reg |= PORT_PCS_CTRL_FORCE_LINK |
- PORT_PCS_CTRL_LINK_UP |
- PORT_PCS_CTRL_DUPLEX_FULL |
- PORT_PCS_CTRL_FORCE_DUPLEX;
- if (mv88e6xxx_6065_family(chip))
- reg |= PORT_PCS_CTRL_100;
- else
- reg |= PORT_PCS_CTRL_1000;
- } else {
- reg |= PORT_PCS_CTRL_UNFORCED;
- }
+ err = chip->info->ops->port_set_frame_mode(
+ chip, port, MV88E6XXX_FRAME_MODE_DSA);
+ if (err)
+ return err;
+
+ return chip->info->ops->port_set_egress_unknowns(
+ chip, port, port == upstream_port);
+}
+
+static int mv88e6xxx_setup_port_cpu(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ switch (chip->info->tag_protocol) {
+ case DSA_TAG_PROTO_EDSA:
+ err = chip->info->ops->port_set_frame_mode(
+ chip, port, MV88E6XXX_FRAME_MODE_ETHERTYPE);
+ if (err)
+ return err;
- err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ err = mv88e6xxx_port_set_egress_mode(
+ chip, port, PORT_CONTROL_EGRESS_ADD_TAG);
if (err)
return err;
+
+ if (chip->info->ops->port_set_ether_type)
+ err = chip->info->ops->port_set_ether_type(
+ chip, port, ETH_P_EDSA);
+ break;
+
+ case DSA_TAG_PROTO_DSA:
+ err = chip->info->ops->port_set_frame_mode(
+ chip, port, MV88E6XXX_FRAME_MODE_DSA);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_set_egress_mode(
+ chip, port, PORT_CONTROL_EGRESS_UNMODIFIED);
+ break;
+ default:
+ err = -EINVAL;
}
+ if (err)
+ return err;
+
+ return chip->info->ops->port_set_egress_unknowns(chip, port, true);
+}
+
+static int mv88e6xxx_setup_port_normal(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ err = chip->info->ops->port_set_frame_mode(
+ chip, port, MV88E6XXX_FRAME_MODE_NORMAL);
+ if (err)
+ return err;
+
+ return chip->info->ops->port_set_egress_unknowns(chip, port, false);
+}
+
+static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_switch *ds = chip->ds;
+ int err;
+ u16 reg;
+
+ /* MAC Forcing register: don't force link, speed, duplex or flow control
+ * state to any particular values on physical ports, but force the CPU
+ * port and all DSA ports to their maximum bandwidth and full duplex.
+ */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
+ SPEED_MAX, DUPLEX_FULL,
+ PHY_INTERFACE_MODE_NA);
+ else
+ err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
+ SPEED_UNFORCED, DUPLEX_UNFORCED,
+ PHY_INTERFACE_MODE_NA);
+ if (err)
+ return err;
+
/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
* disable Header mode, enable IGMP/MLD snooping, disable VLAN
* tunneling, determine priority by looking at 802.1p and IP
@@ -2499,44 +2497,23 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
- reg = 0;
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6095_family(chip) || mv88e6xxx_6065_family(chip) ||
- mv88e6xxx_6185_family(chip) || mv88e6xxx_6320_family(chip))
- reg = PORT_CONTROL_IGMP_MLD_SNOOP |
+ reg = PORT_CONTROL_IGMP_MLD_SNOOP |
PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
PORT_CONTROL_STATE_FORWARDING;
- if (dsa_is_cpu_port(ds, port)) {
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA))
- reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA |
- PORT_CONTROL_FORWARD_UNKNOWN_MC;
- else
- reg |= PORT_CONTROL_DSA_TAG;
- reg |= PORT_CONTROL_EGRESS_ADD_TAG |
- PORT_CONTROL_FORWARD_UNKNOWN;
- }
- if (dsa_is_dsa_port(ds, port)) {
- if (mv88e6xxx_6095_family(chip) ||
- mv88e6xxx_6185_family(chip))
- reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(chip) ||
- mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip)) {
- reg |= PORT_CONTROL_FRAME_MODE_DSA;
- }
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
- if (port == dsa_upstream_port(ds))
- reg |= PORT_CONTROL_FORWARD_UNKNOWN |
- PORT_CONTROL_FORWARD_UNKNOWN_MC;
- }
- if (reg) {
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
- if (err)
- return err;
+ if (dsa_is_cpu_port(ds, port)) {
+ err = mv88e6xxx_setup_port_cpu(chip, port);
+ } else if (dsa_is_dsa_port(ds, port)) {
+ err = mv88e6xxx_setup_port_dsa(chip, port,
+ dsa_upstream_port(ds));
+ } else {
+ err = mv88e6xxx_setup_port_normal(chip, port);
}
+ if (err)
+ return err;
/* If this port is connected to a SerDes, make sure the SerDes is not
* powered down.
@@ -2568,10 +2545,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
mv88e6xxx_6185_family(chip))
reg = PORT_CONTROL_2_MAP_DA;
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6320_family(chip))
- reg |= PORT_CONTROL_2_JUMBO_10240;
-
if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) {
/* Set the upstream port this port should use */
reg |= dsa_upstream_port(ds);
@@ -2590,6 +2563,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
+ if (chip->info->ops->port_jumbo_config) {
+ err = chip->info->ops->port_jumbo_config(chip, port);
+ if (err)
+ return err;
+ }
+
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
@@ -2609,17 +2588,15 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip)) {
- /* Do not limit the period of time that this port can
- * be paused for by the remote end or the period of
- * time that this port can pause the remote end.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, 0x0000);
+ if (chip->info->ops->port_pause_config) {
+ err = chip->info->ops->port_pause_config(chip, port);
if (err)
return err;
+ }
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
@@ -2633,45 +2610,16 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
0x0000);
if (err)
return err;
+ }
- /* Port Ethertype: use the Ethertype DSA Ethertype
- * value.
- */
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) {
- err = mv88e6xxx_port_write(chip, port, PORT_ETH_TYPE,
- ETH_P_EDSA);
- if (err)
- return err;
- }
-
- /* Tag Remap: use an identity 802.1p prio -> switch
- * prio mapping.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_0123,
- 0x3210);
- if (err)
- return err;
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch
- * prio mapping.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_4567,
- 0x7654);
+ if (chip->info->ops->port_tag_remap) {
+ err = chip->info->ops->port_tag_remap(chip, port);
if (err)
return err;
}
- /* Rate Control: disable ingress rate limiting. */
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip)) {
- err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
- 0x0001);
- if (err)
- return err;
- } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
- err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
- 0x0000);
+ if (chip->info->ops->port_egress_rate_limiting) {
+ err = chip->info->ops->port_egress_rate_limiting(chip, port);
if (err)
return err;
}
@@ -2687,7 +2635,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- err = _mv88e6xxx_port_fid_set(chip, port, 0);
+ err = mv88e6xxx_port_set_fid(chip, port, 0);
if (err)
return err;
@@ -2701,7 +2649,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, 0x0000);
}
-int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
int err;
@@ -2764,30 +2712,26 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
{
struct dsa_switch *ds = chip->ds;
u32 upstream_port = dsa_upstream_port(ds);
- u16 reg;
int err;
/* Enable the PHY Polling Unit if present, don't discard any packets,
* and mask all interrupt sources.
*/
- reg = 0;
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) ||
- mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
- reg |= GLOBAL_CONTROL_PPU_ENABLE;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, reg);
+ err = mv88e6xxx_ppu_enable(chip);
if (err)
return err;
- /* Configure the upstream port, and configure it as the port to which
- * ingress and egress and ARP monitor frames are to be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- err = mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
- if (err)
- return err;
+ if (chip->info->ops->g1_set_cpu_port) {
+ err = chip->info->ops->g1_set_cpu_port(chip, upstream_port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->g1_set_egress_port) {
+ err = chip->info->ops->g1_set_egress_port(chip, upstream_port);
+ if (err)
+ return err;
+ }
/* Disable remote management, and set the switch's DSA device number. */
err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL_2,
@@ -2850,6 +2794,11 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
+ /* Initialize the statistics unit */
+ err = mv88e6xxx_stats_set_histogram(chip);
+ if (err)
+ return err;
+
/* Clear the statistics counters for all ports */
err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_FLUSH_ALL);
@@ -2857,7 +2806,7 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
return err;
/* Wait for the flush to complete. */
- err = _mv88e6xxx_stats_wait(chip);
+ err = mv88e6xxx_g1_stats_wait(chip);
if (err)
return err;
@@ -2875,10 +2824,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_switch_reset(chip);
- if (err)
- goto unlock;
-
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
err = mv88e6xxx_setup_port(chip, i);
@@ -2898,6 +2843,17 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
}
+ /* Some generations have the configuration of sending reserved
+ * management frames to the CPU in global2, others in
+ * global1. Hence it does not fit the two setup functions
+ * above.
+ */
+ if (chip->info->ops->mgmt_rsvd2cpu) {
+ err = chip->info->ops->mgmt_rsvd2cpu(chip);
+ if (err)
+ goto unlock;
+ }
+
unlock:
mutex_unlock(&chip->reg_lock);
@@ -3203,119 +3159,653 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
}
static const struct mv88e6xxx_ops mv88e6085_ops = {
+ /* MV88E6XXX_FAMILY_6097 */
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6xxx_phy_ppu_read,
.phy_write = mv88e6xxx_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
+ /* MV88E6XXX_FAMILY_6095 */
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6xxx_phy_ppu_read,
.phy_write = mv88e6xxx_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6097_ops = {
+ /* MV88E6XXX_FAMILY_6097 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6123_ops = {
+ /* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_read,
.phy_write = mv88e6xxx_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6131_ops = {
+ /* MV88E6XXX_FAMILY_6185 */
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6xxx_phy_ppu_read,
.phy_write = mv88e6xxx_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
+ /* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_read,
.phy_write = mv88e6xxx_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
+ /* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_read,
.phy_write = mv88e6xxx_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6352_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6352_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
+ /* MV88E6XXX_FAMILY_6185 */
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6xxx_phy_ppu_read,
.phy_write = mv88e6xxx_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+ .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6190_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_config = mv88e6390_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6190x_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390x_port_set_speed,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_config = mv88e6390_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6191_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_config = mv88e6390_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6352_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6290_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_config = mv88e6390_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
+ /* MV88E6XXX_FAMILY_6320 */
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6320_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
+ /* MV88E6XXX_FAMILY_6321 */
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6320_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6352_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
};
+static const struct mv88e6xxx_ops mv88e6390_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6390_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6390x_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390x_port_set_speed,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6390_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6391_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_config = mv88e6390_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static int mv88e6xxx_verify_madatory_ops(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_ops *ops)
+{
+ if (!ops->port_set_frame_mode) {
+ dev_err(chip->dev, "Missing port_set_frame_mode");
+ return -EINVAL;
+ }
+
+ if (!ops->port_set_egress_unknowns) {
+ dev_err(chip->dev, "Missing port_set_egress_mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
@@ -3326,6 +3816,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
.ops = &mv88e6085_ops,
},
@@ -3339,10 +3831,27 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6095,
.ops = &mv88e6095_ops,
},
+ [MV88E6097] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6097,
+ .family = MV88E6XXX_FAMILY_6097,
+ .name = "Marvell 88E6097/88E6097F",
+ .num_databases = 4096,
+ .num_ports = 11,
+ .port_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6097,
+ .ops = &mv88e6097_ops,
+ },
+
[MV88E6123] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
.family = MV88E6XXX_FAMILY_6165,
@@ -3352,6 +3861,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6123_ops,
},
@@ -3365,6 +3876,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
.ops = &mv88e6131_ops,
},
@@ -3378,6 +3891,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6161_ops,
},
@@ -3391,6 +3906,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6165_ops,
},
@@ -3404,6 +3921,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6171_ops,
},
@@ -3417,6 +3936,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6172_ops,
},
@@ -3430,6 +3951,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6175_ops,
},
@@ -3443,6 +3966,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6176_ops,
},
@@ -3456,10 +3981,57 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
.ops = &mv88e6185_ops,
},
+ [MV88E6190] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6190,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6190",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .port_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6390,
+ .ops = &mv88e6190_ops,
+ },
+
+ [MV88E6190X] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6190X,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6190X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .port_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6390,
+ .ops = &mv88e6190x_ops,
+ },
+
+ [MV88E6191] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6191,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6191",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .port_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6390,
+ .ops = &mv88e6391_ops,
+ },
+
[MV88E6240] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
.family = MV88E6XXX_FAMILY_6352,
@@ -3469,10 +4041,27 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6240_ops,
},
+ [MV88E6290] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6290,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6290",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .port_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6390,
+ .ops = &mv88e6290_ops,
+ },
+
[MV88E6320] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
.family = MV88E6XXX_FAMILY_6320,
@@ -3482,6 +4071,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
.ops = &mv88e6320_ops,
},
@@ -3495,6 +4086,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
.ops = &mv88e6321_ops,
},
@@ -3508,6 +4101,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6350_ops,
},
@@ -3521,6 +4116,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6351_ops,
},
@@ -3534,9 +4131,39 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6352_ops,
},
+ [MV88E6390] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6390,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6390",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .port_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6390,
+ .ops = &mv88e6390_ops,
+ },
+ [MV88E6390X] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6390X,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6390X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .port_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6390,
+ .ops = &mv88e6390x_ops,
+ },
};
static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num)
@@ -3600,13 +4227,13 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
mv88e6xxx_ppu_state_init(chip);
}
static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
mv88e6xxx_ppu_state_destroy(chip);
}
@@ -3634,10 +4261,7 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA))
- return DSA_TAG_PROTO_EDSA;
-
- return DSA_TAG_PROTO_DSA;
+ return chip->info->tag_protocol;
}
static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
@@ -3667,6 +4291,12 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
if (err)
goto free;
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_switch_reset(chip);
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ goto free;
+
mv88e6xxx_phy_init(chip);
err = mv88e6xxx_mdio_register(chip, NULL);
@@ -3819,35 +4449,82 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->info = compat_info;
+ err = mv88e6xxx_verify_madatory_ops(chip, chip->info->ops);
+ if (err)
+ return err;
+
err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
if (err)
return err;
+ chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(chip->reset))
+ return PTR_ERR(chip->reset);
+
err = mv88e6xxx_detect(chip);
if (err)
return err;
mv88e6xxx_phy_init(chip);
- chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
- if (IS_ERR(chip->reset))
- return PTR_ERR(chip->reset);
-
if (chip->info->ops->get_eeprom &&
!of_property_read_u32(np, "eeprom-length", &eeprom_len))
chip->eeprom_len = eeprom_len;
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_switch_reset(chip);
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ goto out;
+
+ chip->irq = of_irq_get(np, 0);
+ if (chip->irq == -EPROBE_DEFER) {
+ err = chip->irq;
+ goto out;
+ }
+
+ if (chip->irq > 0) {
+ /* Has to be performed before the MDIO bus is created,
+ * because the PHYs will link there interrupts to these
+ * interrupt controllers
+ */
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g1_irq_setup(chip);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ goto out;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) {
+ err = mv88e6xxx_g2_irq_setup(chip);
+ if (err)
+ goto out_g1_irq;
+ }
+ }
+
err = mv88e6xxx_mdio_register(chip, np);
if (err)
- return err;
+ goto out_g2_irq;
err = mv88e6xxx_register_switch(chip, np);
- if (err) {
- mv88e6xxx_mdio_unregister(chip);
- return err;
- }
+ if (err)
+ goto out_mdio;
return 0;
+
+out_mdio:
+ mv88e6xxx_mdio_unregister(chip);
+out_g2_irq:
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0)
+ mv88e6xxx_g2_irq_free(chip);
+out_g1_irq:
+ if (chip->irq > 0) {
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_g1_irq_free(chip);
+ mutex_unlock(&chip->reg_lock);
+ }
+out:
+ return err;
}
static void mv88e6xxx_remove(struct mdio_device *mdiodev)
@@ -3858,6 +4535,12 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_mdio_unregister(chip);
+
+ if (chip->irq > 0) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT))
+ mv88e6xxx_g2_irq_free(chip);
+ mv88e6xxx_g1_irq_free(chip);
+ }
}
static const struct of_device_id mv88e6xxx_of_match[] = {
@@ -3865,6 +4548,10 @@ static const struct of_device_id mv88e6xxx_of_match[] = {
.compatible = "marvell,mv88e6085",
.data = &mv88e6xxx_table[MV88E6085],
},
+ {
+ .compatible = "marvell,mv88e6190",
+ .data = &mv88e6xxx_table[MV88E6190],
+ },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index d358720b6c2d..75af86a7fad8 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -32,3 +32,370 @@ int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
{
return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask);
}
+
+/* Offset 0x00: Switch Global Status Register */
+
+static int mv88e6185_g1_wait_ppu_disabled(struct mv88e6xxx_chip *chip)
+{
+ u16 state;
+ int i, err;
+
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ if (err)
+ return err;
+
+ /* Check the value of the PPUState bits 15:14 */
+ state &= GLOBAL_STATUS_PPU_STATE_MASK;
+ if (state != GLOBAL_STATUS_PPU_STATE_POLLING)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6185_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
+{
+ u16 state;
+ int i, err;
+
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ if (err)
+ return err;
+
+ /* Check the value of the PPUState bits 15:14 */
+ state &= GLOBAL_STATUS_PPU_STATE_MASK;
+ if (state == GLOBAL_STATUS_PPU_STATE_POLLING)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6352_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
+{
+ u16 state;
+ int i, err;
+
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ if (err)
+ return err;
+
+ /* Check the value of the PPUState (or InitState) bit 15 */
+ if (state & GLOBAL_STATUS_PPU_STATE)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ u16 val;
+ int err;
+
+ /* Wait up to 1 second for the switch to be ready. The InitReady bit 11
+ * is set to a one when all units inside the device (ATU, VTU, etc.)
+ * have finished their initialization and are ready to accept frames.
+ */
+ while (time_before(jiffies, timeout)) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
+
+ if (val & GLOBAL_STATUS_INIT_READY)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Offset 0x04: Switch Global Control Register */
+
+int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ /* Set the SWReset bit 15 along with the PPUEn bit 14, to also restart
+ * the PPU, including re-doing PHY detection and initialization
+ */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val |= GLOBAL_CONTROL_SW_RESET;
+ val |= GLOBAL_CONTROL_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_wait_init_ready(chip);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ /* Set the SWReset bit 15 */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val |= GLOBAL_CONTROL_SW_RESET;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_wait_init_ready(chip);
+ if (err)
+ return err;
+
+ return mv88e6352_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val |= GLOBAL_CONTROL_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
+
+ val &= ~GLOBAL_CONTROL_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_disabled(chip);
+}
+
+/* Offset 0x1a: Monitor Control */
+/* Offset 0x1a: Monitor & MGMT Control on some devices */
+
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_MONITOR_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(GLOBAL_MONITOR_CONTROL_INGRESS_MASK |
+ GLOBAL_MONITOR_CONTROL_EGRESS_MASK);
+
+ reg |= port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
+}
+
+/* Older generations also call this the ARP destination. It has been
+ * generalized in more modern devices such that more than ARP can
+ * egress it
+ */
+int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_MONITOR_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~GLOBAL_MONITOR_CONTROL_ARP_MASK;
+ reg |= port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
+}
+
+static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip,
+ u16 pointer, u8 data)
+{
+ u16 reg;
+
+ reg = GLOBAL_MONITOR_CONTROL_UPDATE | pointer | data;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
+}
+
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ err = mv88e6390_g1_monitor_write(chip, GLOBAL_MONITOR_CONTROL_INGRESS,
+ port);
+ if (err)
+ return err;
+
+ return mv88e6390_g1_monitor_write(chip, GLOBAL_MONITOR_CONTROL_EGRESS,
+ port);
+}
+
+int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6390_g1_monitor_write(chip, GLOBAL_MONITOR_CONTROL_CPU_DEST,
+ port);
+}
+
+int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* 01:c2:80:00:00:00:00-01:c2:80:00:00:00:07 are Management */
+ err = mv88e6390_g1_monitor_write(
+ chip, GLOBAL_MONITOR_CONTROL_0180C280000000XLO, 0xff);
+ if (err)
+ return err;
+
+ /* 01:c2:80:00:00:00:08-01:c2:80:00:00:00:0f are Management */
+ err = mv88e6390_g1_monitor_write(
+ chip, GLOBAL_MONITOR_CONTROL_0180C280000000XHI, 0xff);
+ if (err)
+ return err;
+
+ /* 01:c2:80:00:00:00:20-01:c2:80:00:00:00:27 are Management */
+ err = mv88e6390_g1_monitor_write(
+ chip, GLOBAL_MONITOR_CONTROL_0180C280000002XLO, 0xff);
+ if (err)
+ return err;
+
+ /* 01:c2:80:00:00:00:28-01:c2:80:00:00:00:2f are Management */
+ return mv88e6390_g1_monitor_write(
+ chip, GLOBAL_MONITOR_CONTROL_0180C280000002XHI, 0xff);
+}
+
+/* Offset 0x1c: Global Control 2 */
+
+int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL_2, &val);
+ if (err)
+ return err;
+
+ val |= GLOBAL_CONTROL_2_HIST_RX_TX;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL_2, val);
+
+ return err;
+}
+
+/* Offset 0x1d: Statistics Operation 2 */
+
+int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_wait(chip, GLOBAL_STATS_OP, GLOBAL_STATS_OP_BUSY);
+}
+
+int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ /* Snapshot the hardware statistics counters for this port. */
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
+ if (err)
+ return err;
+
+ /* Wait for the snapshotting to complete. */
+ return mv88e6xxx_g1_stats_wait(chip);
+}
+
+int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ port = (port + 1) << 5;
+
+ return mv88e6xxx_g1_stats_snapshot(chip, port);
+}
+
+int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ port = (port + 1) << 5;
+
+ /* Snapshot the hardware statistics counters for this port. */
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT | port);
+ if (err)
+ return err;
+
+ /* Wait for the snapshotting to complete. */
+ return mv88e6xxx_g1_stats_wait(chip);
+}
+
+void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val)
+{
+ u32 value;
+ u16 reg;
+ int err;
+
+ *val = 0;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED | stat);
+ if (err)
+ return;
+
+ err = mv88e6xxx_g1_stats_wait(chip);
+ if (err)
+ return;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_32, &reg);
+ if (err)
+ return;
+
+ value = reg << 16;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_01, &reg);
+ if (err)
+ return;
+
+ *val = value | reg;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 62291e6fe3a3..1aec7382c02d 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -20,4 +20,22 @@ int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+
+int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
+int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val);
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index cf686e7506a9..3e77071949ab 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1,5 +1,6 @@
/*
- * Marvell 88E6xxx Switch Global 2 Registers support (device address 0x1C)
+ * Marvell 88E6xxx Switch Global 2 Registers support (device address
+ * 0x1C)
*
* Copyright (c) 2008 Marvell Semiconductor
*
@@ -11,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/irqdomain.h>
#include "mv88e6xxx.h"
#include "global2.h"
@@ -36,6 +38,31 @@ static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
return mv88e6xxx_wait(chip, ADDR_GLOBAL2, reg, mask);
}
+/* Offset 0x02: Management Enable 2x */
+/* Offset 0x03: Management Enable 0x */
+
+int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:2x as MGMT.
+ */
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) {
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_2X, 0xffff);
+ if (err)
+ return err;
+ }
+
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:0x as MGMT.
+ */
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X))
+ return mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_0X, 0xffff);
+
+ return 0;
+}
+
/* Offset 0x06: Device Mapping Table register */
static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
@@ -417,29 +444,154 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
}
-int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
+static void mv88e6xxx_g2_irq_mask(struct irq_data *d)
{
- u16 reg;
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g2_irq.masked |= (1 << n);
+}
+
+static void mv88e6xxx_g2_irq_unmask(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g2_irq.masked &= ~(1 << n);
+}
+
+static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ unsigned int n;
int err;
+ u16 reg;
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) {
- /* Consider the frames with reserved multicast destination
- * addresses matching 01:80:c2:00:00:2x as MGMT.
- */
- err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_2X, 0xffff);
- if (err)
- return err;
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_INT_SOURCE, &reg);
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ goto out;
+
+ for (n = 0; n < 16; ++n) {
+ if (reg & (1 << n)) {
+ sub_irq = irq_find_mapping(chip->g2_irq.domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
}
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) {
- /* Consider the frames with reserved multicast destination
- * addresses matching 01:80:c2:00:00:0x as MGMT.
- */
- err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_0X, 0xffff);
- if (err)
- return err;
+static void mv88e6xxx_g2_irq_bus_lock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&chip->reg_lock);
+}
+
+static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+
+ mv88e6xxx_g2_write(chip, GLOBAL2_INT_MASK, ~chip->g2_irq.masked);
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static struct irq_chip mv88e6xxx_g2_irq_chip = {
+ .name = "mv88e6xxx-g2",
+ .irq_mask = mv88e6xxx_g2_irq_mask,
+ .irq_unmask = mv88e6xxx_g2_irq_unmask,
+ .irq_bus_lock = mv88e6xxx_g2_irq_bus_lock,
+ .irq_bus_sync_unlock = mv88e6xxx_g2_irq_bus_sync_unlock,
+};
+
+static int mv88e6xxx_g2_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct mv88e6xxx_chip *chip = d->host_data;
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &chip->g2_irq.chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mv88e6xxx_g2_irq_domain_ops = {
+ .map = mv88e6xxx_g2_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
+{
+ int irq, virq;
+
+ free_irq(chip->device_irq, chip);
+ irq_dispose_mapping(chip->device_irq);
+
+ for (irq = 0; irq < 16; irq++) {
+ virq = irq_find_mapping(chip->g2_irq.domain, irq);
+ irq_dispose_mapping(virq);
}
+ irq_domain_remove(chip->g2_irq.domain);
+}
+
+int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err, irq, virq;
+
+ if (!chip->dev->of_node)
+ return -EINVAL;
+
+ chip->g2_irq.domain = irq_domain_add_simple(
+ chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
+ if (!chip->g2_irq.domain)
+ return -ENOMEM;
+
+ for (irq = 0; irq < 16; irq++)
+ irq_create_mapping(chip->g2_irq.domain, irq);
+
+ chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
+ chip->g2_irq.masked = ~0;
+
+ chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
+ GLOBAL_STATUS_IRQ_DEVICE);
+ if (chip->device_irq < 0) {
+ err = chip->device_irq;
+ goto out;
+ }
+
+ err = request_threaded_irq(chip->device_irq, NULL,
+ mv88e6xxx_g2_irq_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-g1", chip);
+ if (err)
+ goto out;
+
+ return 0;
+
+out:
+ for (irq = 0; irq < 16; irq++) {
+ virq = irq_find_mapping(chip->g2_irq.domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(chip->g2_irq.domain);
+
+ return err;
+}
+
+int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+ int err;
+
/* Ignore removed tag data on doubly tagged packets, disable
* flow control messages, force flow control priority to the
* highest, and send all special multicast frames to the CPU
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index c4bb9035ee3a..9aefb7d8b0ad 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -33,6 +33,9 @@ int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
@@ -83,6 +86,20 @@ static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
+{
+}
+
+static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index e572121c196e..af54baea47cf 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -13,6 +13,7 @@
#define __MV88E6XXX_H
#include <linux/if_vlan.h>
+#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#ifndef UINT64_MAX
@@ -60,20 +61,29 @@
#define PORT_PCS_CTRL 0x01
#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
+#define PORT_PCS_CTRL_FORCE_SPEED BIT(13) /* 6390 */
+#define PORT_PCS_CTRL_ALTSPEED BIT(12) /* 6390 */
+#define PORT_PCS_CTRL_200BASE BIT(12) /* 6352 */
#define PORT_PCS_CTRL_FC BIT(7)
#define PORT_PCS_CTRL_FORCE_FC BIT(6)
#define PORT_PCS_CTRL_LINK_UP BIT(5)
#define PORT_PCS_CTRL_FORCE_LINK BIT(4)
#define PORT_PCS_CTRL_DUPLEX_FULL BIT(3)
#define PORT_PCS_CTRL_FORCE_DUPLEX BIT(2)
-#define PORT_PCS_CTRL_10 0x00
-#define PORT_PCS_CTRL_100 0x01
-#define PORT_PCS_CTRL_1000 0x02
-#define PORT_PCS_CTRL_UNFORCED 0x03
+#define PORT_PCS_CTRL_SPEED_MASK (0x03)
+#define PORT_PCS_CTRL_SPEED_10 (0x00)
+#define PORT_PCS_CTRL_SPEED_100 (0x01)
+#define PORT_PCS_CTRL_SPEED_200 (0x02) /* 6065 and non Gb chips */
+#define PORT_PCS_CTRL_SPEED_1000 (0x02)
+#define PORT_PCS_CTRL_SPEED_10000 (0x03) /* 6390X */
+#define PORT_PCS_CTRL_SPEED_UNFORCED (0x03)
#define PORT_PAUSE_CTRL 0x02
+#define PORT_FLOW_CTRL_LIMIT_IN ((0x00 << 8) | BIT(15))
+#define PORT_FLOW_CTRL_LIMIT_OUT ((0x01 << 8) | BIT(15))
#define PORT_SWITCH_ID 0x03
#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a
#define PORT_SWITCH_ID_PROD_NUM_6095 0x095
+#define PORT_SWITCH_ID_PROD_NUM_6097 0x099
#define PORT_SWITCH_ID_PROD_NUM_6131 0x106
#define PORT_SWITCH_ID_PROD_NUM_6320 0x115
#define PORT_SWITCH_ID_PROD_NUM_6123 0x121
@@ -84,11 +94,17 @@
#define PORT_SWITCH_ID_PROD_NUM_6175 0x175
#define PORT_SWITCH_ID_PROD_NUM_6176 0x176
#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7
+#define PORT_SWITCH_ID_PROD_NUM_6190 0x190
+#define PORT_SWITCH_ID_PROD_NUM_6190X 0x0a0
+#define PORT_SWITCH_ID_PROD_NUM_6191 0x191
#define PORT_SWITCH_ID_PROD_NUM_6240 0x240
+#define PORT_SWITCH_ID_PROD_NUM_6290 0x290
#define PORT_SWITCH_ID_PROD_NUM_6321 0x310
#define PORT_SWITCH_ID_PROD_NUM_6352 0x352
#define PORT_SWITCH_ID_PROD_NUM_6350 0x371
#define PORT_SWITCH_ID_PROD_NUM_6351 0x375
+#define PORT_SWITCH_ID_PROD_NUM_6390 0x390
+#define PORT_SWITCH_ID_PROD_NUM_6390X 0x0a1
#define PORT_CONTROL 0x04
#define PORT_CONTROL_USE_CORE_TAG BIT(15)
#define PORT_CONTROL_DROP_ON_LOCK BIT(14)
@@ -96,6 +112,7 @@
#define PORT_CONTROL_EGRESS_UNTAGGED (0x1 << 12)
#define PORT_CONTROL_EGRESS_TAGGED (0x2 << 12)
#define PORT_CONTROL_EGRESS_ADD_TAG (0x3 << 12)
+#define PORT_CONTROL_EGRESS_MASK (0x3 << 12)
#define PORT_CONTROL_HEADER BIT(11)
#define PORT_CONTROL_IGMP_MLD_SNOOP BIT(10)
#define PORT_CONTROL_DOUBLE_TAG BIT(9)
@@ -103,6 +120,7 @@
#define PORT_CONTROL_FRAME_MODE_DSA (0x1 << 8)
#define PORT_CONTROL_FRAME_MODE_PROVIDER (0x2 << 8)
#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA (0x3 << 8)
+#define PORT_CONTROL_FRAME_MASK (0x3 << 8)
#define PORT_CONTROL_DSA_TAG BIT(8)
#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
@@ -110,6 +128,10 @@
#define PORT_CONTROL_USE_TAG BIT(4)
#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3)
#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
+#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_DA (0x0 << 2)
+#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_MULTICAST_DA (0x1 << 2)
+#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_UNITCAST_DA (0x2 << 2)
+#define PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA (0x3 << 2)
#define PORT_CONTROL_STATE_MASK 0x03
#define PORT_CONTROL_STATE_DISABLED 0x00
#define PORT_CONTROL_STATE_BLOCKING 0x01
@@ -158,15 +180,34 @@
#define PORT_OUT_FILTERED 0x13
#define PORT_TAG_REGMAP_0123 0x18
#define PORT_TAG_REGMAP_4567 0x19
+#define PORT_IEEE_PRIO_MAP_TABLE 0x18 /* 6390 */
+#define PORT_IEEE_PRIO_MAP_TABLE_UPDATE BIT(15)
+#define PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP (0x0 << 12)
+#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP (0x1 << 12)
+#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP (0x2 << 12)
+#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP (0x3 << 12)
+#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_DSCP (0x5 << 12)
+#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_DSCP (0x6 << 12)
+#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_DSCP (0x7 << 12)
+#define PORT_IEEE_PRIO_MAP_TABLE_POINTER_SHIFT 9
#define GLOBAL_STATUS 0x00
#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
-/* Two bits for 6165, 6185 etc */
-#define GLOBAL_STATUS_PPU_MASK (0x3 << 14)
-#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14)
-#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14)
-#define GLOBAL_STATUS_PPU_DISABLED (0x2 << 14)
-#define GLOBAL_STATUS_PPU_POLLING (0x3 << 14)
+#define GLOBAL_STATUS_PPU_STATE_MASK (0x3 << 14) /* 6165 6185 */
+#define GLOBAL_STATUS_PPU_STATE_DISABLED_RST (0x0 << 14)
+#define GLOBAL_STATUS_PPU_STATE_INITIALIZING (0x1 << 14)
+#define GLOBAL_STATUS_PPU_STATE_DISABLED (0x2 << 14)
+#define GLOBAL_STATUS_PPU_STATE_POLLING (0x3 << 14)
+#define GLOBAL_STATUS_INIT_READY BIT(11)
+#define GLOBAL_STATUS_IRQ_AVB 8
+#define GLOBAL_STATUS_IRQ_DEVICE 7
+#define GLOBAL_STATUS_IRQ_STATS 6
+#define GLOBAL_STATUS_IRQ_VTU_PROBLEM 5
+#define GLOBAL_STATUS_IRQ_VTU_DONE 4
+#define GLOBAL_STATUS_IRQ_ATU_PROBLEM 3
+#define GLOBAL_STATUS_IRQ_ATU_DONE 2
+#define GLOBAL_STATUS_IRQ_TCAM_DONE 1
+#define GLOBAL_STATUS_IRQ_EEPROM_DONE 0
#define GLOBAL_MAC_01 0x01
#define GLOBAL_MAC_23 0x02
#define GLOBAL_MAC_45 0x03
@@ -254,14 +295,27 @@
#define GLOBAL_CORE_TAG_TYPE 0x19
#define GLOBAL_MONITOR_CONTROL 0x1a
#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT 12
+#define GLOBAL_MONITOR_CONTROL_INGRESS_MASK (0xf << 12)
#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT 8
+#define GLOBAL_MONITOR_CONTROL_EGRESS_MASK (0xf << 8)
#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT 4
+#define GLOBAL_MONITOR_CONTROL_ARP_MASK (0xf << 4)
#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT 0
#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED (0xf0)
+#define GLOBAL_MONITOR_CONTROL_UPDATE BIT(15)
+#define GLOBAL_MONITOR_CONTROL_0180C280000000XLO (0x00 << 8)
+#define GLOBAL_MONITOR_CONTROL_0180C280000000XHI (0x01 << 8)
+#define GLOBAL_MONITOR_CONTROL_0180C280000002XLO (0x02 << 8)
+#define GLOBAL_MONITOR_CONTROL_0180C280000002XHI (0x03 << 8)
+#define GLOBAL_MONITOR_CONTROL_INGRESS (0x20 << 8)
+#define GLOBAL_MONITOR_CONTROL_EGRESS (0x21 << 8)
+#define GLOBAL_MONITOR_CONTROL_CPU_DEST (0x30 << 8)
#define GLOBAL_CONTROL_2 0x1c
#define GLOBAL_CONTROL_2_NO_CASCADE 0xe000
#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE 0xf000
-
+#define GLOBAL_CONTROL_2_HIST_RX (0x1 << 6)
+#define GLOBAL_CONTROL_2_HIST_TX (0x2 << 6)
+#define GLOBAL_CONTROL_2_HIST_RX_TX (0x3 << 6)
#define GLOBAL_STATS_OP 0x1d
#define GLOBAL_STATS_OP_BUSY BIT(15)
#define GLOBAL_STATS_OP_NOP (0 << 12)
@@ -272,7 +326,8 @@
#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY)
#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY)
#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_BANK_1 BIT(9)
+#define GLOBAL_STATS_OP_BANK_1_BIT_9 BIT(9)
+#define GLOBAL_STATS_OP_BANK_1_BIT_10 BIT(10)
#define GLOBAL_STATS_COUNTER_32 0x1e
#define GLOBAL_STATS_COUNTER_01 0x1f
@@ -349,10 +404,18 @@
#define MV88E6XXX_N_FID 4096
+enum mv88e6xxx_frame_mode {
+ MV88E6XXX_FRAME_MODE_NORMAL,
+ MV88E6XXX_FRAME_MODE_DSA,
+ MV88E6XXX_FRAME_MODE_PROVIDER,
+ MV88E6XXX_FRAME_MODE_ETHERTYPE,
+};
+
/* List of supported models */
enum mv88e6xxx_model {
MV88E6085,
MV88E6095,
+ MV88E6097,
MV88E6123,
MV88E6131,
MV88E6161,
@@ -362,12 +425,18 @@ enum mv88e6xxx_model {
MV88E6175,
MV88E6176,
MV88E6185,
+ MV88E6190,
+ MV88E6190X,
+ MV88E6191,
MV88E6240,
+ MV88E6290,
MV88E6320,
MV88E6321,
MV88E6350,
MV88E6351,
MV88E6352,
+ MV88E6390,
+ MV88E6390X,
};
enum mv88e6xxx_family {
@@ -380,15 +449,10 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
+ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
};
enum mv88e6xxx_cap {
- /* Two different tag protocols can be used by the driver. All
- * switches support DSA, but only later generations support
- * EDSA.
- */
- MV88E6XXX_CAP_EDSA,
-
/* Energy Efficient Ethernet.
*/
MV88E6XXX_CAP_EEE,
@@ -417,6 +481,7 @@ enum mv88e6xxx_cap {
* The device contains a second set of global 16-bit registers.
*/
MV88E6XXX_CAP_GLOBAL2,
+ MV88E6XXX_CAP_G2_INT, /* (0x00) Interrupt Status */
MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */
MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */
MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */
@@ -425,12 +490,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */
MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
- /* PHY Polling Unit.
- * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
- */
- MV88E6XXX_CAP_PPU,
- MV88E6XXX_CAP_PPU_ACTIVE,
-
/* Per VLAN Spanning Tree Unit (STU).
* The Port State database, if present, is accessed through VTU
* operations and dedicated SID registers. See GLOBAL_VTU_SID.
@@ -450,7 +509,6 @@ enum mv88e6xxx_cap {
};
/* Bitmask of capabilities */
-#define MV88E6XXX_FLAG_EDSA BIT_ULL(MV88E6XXX_CAP_EDSA)
#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE)
#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD)
@@ -464,6 +522,7 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
+#define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT)
#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
#define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
@@ -472,8 +531,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
-#define MV88E6XXX_FLAG_PPU BIT_ULL(MV88E6XXX_CAP_PPU)
-#define MV88E6XXX_FLAG_PPU_ACTIVE BIT_ULL(MV88E6XXX_CAP_PPU_ACTIVE)
#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP)
#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
@@ -502,7 +559,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_MULTI_CHIP)
@@ -513,7 +569,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
@@ -524,6 +579,7 @@ enum mv88e6xxx_cap {
(MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
@@ -536,19 +592,17 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6185 \
(MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_VTU)
#define MV88E6XXX_FLAGS_FAMILY_6320 \
- (MV88E6XXX_FLAG_EDSA | \
- MV88E6XXX_FLAG_EEE | \
+ (MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
@@ -557,14 +611,13 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
- (MV88E6XXX_FLAG_EDSA | \
- MV88E6XXX_FLAG_G1_ATU_FID | \
+ (MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
@@ -573,15 +626,14 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6352 \
- (MV88E6XXX_FLAG_EDSA | \
- MV88E6XXX_FLAG_EEE | \
+ (MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
@@ -593,6 +645,17 @@ enum mv88e6xxx_cap {
struct mv88e6xxx_ops;
+#define MV88E6XXX_FLAGS_FAMILY_6390 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_TEMP_LIMIT | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
+ MV88E6XXX_FLAGS_PVT)
+
struct mv88e6xxx_info {
enum mv88e6xxx_family family;
u16 prod_num;
@@ -602,6 +665,8 @@ struct mv88e6xxx_info {
unsigned int port_base_addr;
unsigned int global1_addr;
unsigned int age_time_coeff;
+ unsigned int g1_irqs;
+ enum dsa_tag_protocol tag_protocol;
unsigned long long flags;
const struct mv88e6xxx_ops *ops;
};
@@ -628,6 +693,13 @@ struct mv88e6xxx_priv_port {
struct net_device *bridge_dev;
};
+struct mv88e6xxx_irq {
+ u16 masked;
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ unsigned int nirqs;
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -677,6 +749,14 @@ struct mv88e6xxx_chip {
/* And the MDIO bus itself */
struct mii_bus *mdio_bus;
+
+ /* There can be two interrupt controllers, which are chained
+ * off a GPIO as interrupt source
+ */
+ struct mv88e6xxx_irq g1_irq;
+ struct mv88e6xxx_irq g2_irq;
+ int irq;
+ int device_irq;
};
struct mv88e6xxx_bus_ops {
@@ -696,19 +776,93 @@ struct mv88e6xxx_ops {
u16 *val);
int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 val);
-};
-enum stat_type {
- BANK0,
- BANK1,
- PORT,
+ /* PHY Polling Unit (PPU) operations */
+ int (*ppu_enable)(struct mv88e6xxx_chip *chip);
+ int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+
+ /* Switch Software Reset */
+ int (*reset)(struct mv88e6xxx_chip *chip);
+
+ /* RGMII Receive/Transmit Timing Control
+ * Add delay on PHY_INTERFACE_MODE_RGMII_*ID, no delay otherwise.
+ */
+ int (*port_set_rgmii_delay)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+#define LINK_FORCED_DOWN 0
+#define LINK_FORCED_UP 1
+#define LINK_UNFORCED -2
+
+ /* Port's MAC link state
+ * Use LINK_FORCED_UP or LINK_FORCED_DOWN to force link up or down,
+ * or LINK_UNFORCED for normal link detection.
+ */
+ int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link);
+
+#define DUPLEX_UNFORCED -2
+
+ /* Port's MAC duplex mode
+ *
+ * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
+ * or DUPLEX_UNFORCED for normal duplex detection.
+ */
+ int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
+
+#define SPEED_MAX INT_MAX
+#define SPEED_UNFORCED -2
+
+ /* Port's MAC speed (in Mbps)
+ *
+ * Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
+ * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ */
+ int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
+
+ int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+ int (*port_set_egress_unknowns)(struct mv88e6xxx_chip *chip, int port,
+ bool on);
+ int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
+ int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
+
+ /* Snapshot the statistics for a port. The statistics can then
+ * be read back a leisure but still with a consistent view.
+ */
+ int (*stats_snapshot)(struct mv88e6xxx_chip *chip, int port);
+
+ /* Set the histogram mode for statistics, when the control registers
+ * are separated out of the STATS_OP register.
+ */
+ int (*stats_set_histogram)(struct mv88e6xxx_chip *chip);
+
+ /* Return the number of strings describing statistics */
+ int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
+ void (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
+ void (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
+ int (*g1_set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
+ int (*g1_set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+
+ /* Can be either in g1 or g2, so don't use a prefix */
+ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
};
+#define STATS_TYPE_PORT BIT(0)
+#define STATS_TYPE_BANK0 BIT(1)
+#define STATS_TYPE_BANK1 BIT(2)
+
struct mv88e6xxx_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
int reg;
- enum stat_type type;
+ int type;
};
static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
new file mode 100644
index 000000000000..0db7fa0373ae
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -0,0 +1,729 @@
+/*
+ * Marvell 88E6xxx Switch Port Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "port.h"
+
+int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 *val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+/* Offset 0x01: MAC (or PCS or Physical) Control Register
+ *
+ * Link, Duplex and Flow Control have one force bit, one value bit.
+ *
+ * For port's MAC speed, ForceSpd (or SpdValue) bits 1:0 program the value.
+ * Alternative values require the 200BASE (or AltSpeed) bit 12 set.
+ * Newer chips need a ForcedSpd bit 13 set to consider the value.
+ */
+
+static int mv88e6xxx_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
+ PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
+ PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ break;
+ default:
+ return 0;
+ }
+
+ err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ if (err)
+ return err;
+
+ netdev_dbg(chip->ds->ports[port].netdev, "delay RXCLK %s, TXCLK %s\n",
+ reg & PORT_PCS_CTRL_RGMII_DELAY_RXCLK ? "yes" : "no",
+ reg & PORT_PCS_CTRL_RGMII_DELAY_TXCLK ? "yes" : "no");
+
+ return 0;
+}
+
+int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port < 5)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
+int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 0)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
+int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP);
+
+ switch (link) {
+ case LINK_FORCED_DOWN:
+ reg |= PORT_PCS_CTRL_FORCE_LINK;
+ break;
+ case LINK_FORCED_UP:
+ reg |= PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP;
+ break;
+ case LINK_UNFORCED:
+ /* normal link detection */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ if (err)
+ return err;
+
+ netdev_dbg(chip->ds->ports[port].netdev, "%s link %s\n",
+ reg & PORT_PCS_CTRL_FORCE_LINK ? "Force" : "Unforce",
+ reg & PORT_PCS_CTRL_LINK_UP ? "up" : "down");
+
+ return 0;
+}
+
+int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(PORT_PCS_CTRL_FORCE_DUPLEX | PORT_PCS_CTRL_DUPLEX_FULL);
+
+ switch (dup) {
+ case DUPLEX_HALF:
+ reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
+ break;
+ case DUPLEX_FULL:
+ reg |= PORT_PCS_CTRL_FORCE_DUPLEX | PORT_PCS_CTRL_DUPLEX_FULL;
+ break;
+ case DUPLEX_UNFORCED:
+ /* normal duplex detection */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ if (err)
+ return err;
+
+ netdev_dbg(chip->ds->ports[port].netdev, "%s %s duplex\n",
+ reg & PORT_PCS_CTRL_FORCE_DUPLEX ? "Force" : "Unforce",
+ reg & PORT_PCS_CTRL_DUPLEX_FULL ? "full" : "half");
+
+ return 0;
+}
+
+static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
+ int speed, bool alt_bit, bool force_bit)
+{
+ u16 reg, ctrl;
+ int err;
+
+ switch (speed) {
+ case 10:
+ ctrl = PORT_PCS_CTRL_SPEED_10;
+ break;
+ case 100:
+ ctrl = PORT_PCS_CTRL_SPEED_100;
+ break;
+ case 200:
+ if (alt_bit)
+ ctrl = PORT_PCS_CTRL_SPEED_100 | PORT_PCS_CTRL_ALTSPEED;
+ else
+ ctrl = PORT_PCS_CTRL_SPEED_200;
+ break;
+ case 1000:
+ ctrl = PORT_PCS_CTRL_SPEED_1000;
+ break;
+ case 2500:
+ ctrl = PORT_PCS_CTRL_SPEED_1000 | PORT_PCS_CTRL_ALTSPEED;
+ break;
+ case 10000:
+ /* all bits set, fall through... */
+ case SPEED_UNFORCED:
+ ctrl = PORT_PCS_CTRL_SPEED_UNFORCED;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_PCS_CTRL_SPEED_MASK;
+ if (alt_bit)
+ reg &= ~PORT_PCS_CTRL_ALTSPEED;
+ if (force_bit) {
+ reg &= ~PORT_PCS_CTRL_FORCE_SPEED;
+ if (speed != SPEED_UNFORCED)
+ ctrl |= PORT_PCS_CTRL_FORCE_SPEED;
+ }
+ reg |= ctrl;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ if (err)
+ return err;
+
+ if (speed)
+ netdev_dbg(chip->ds->ports[port].netdev,
+ "Speed set to %d Mbps\n", speed);
+ else
+ netdev_dbg(chip->ds->ports[port].netdev, "Speed unforced\n");
+
+ return 0;
+}
+
+/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
+int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = 200;
+
+ if (speed > 200)
+ return -EOPNOTSUPP;
+
+ /* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
+ return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+}
+
+/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
+int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = 1000;
+
+ if (speed == 200 || speed > 1000)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+}
+
+/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
+int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = 1000;
+
+ if (speed > 1000)
+ return -EOPNOTSUPP;
+
+ if (speed == 200 && port < 5)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, true, false);
+}
+
+/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6390) */
+int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = port < 9 ? 1000 : 2500;
+
+ if (speed > 2500)
+ return -EOPNOTSUPP;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed == 2500 && port < 9)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+}
+
+/* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
+int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = port < 9 ? 1000 : 10000;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed >= 2500 && port < 9)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+}
+
+/* Offset 0x02: Pause Control
+ *
+ * Do not limit the period of time that this port can be paused for by
+ * the remote end or the period of time that this port can pause the
+ * remote end.
+ */
+int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, 0x0000);
+}
+
+int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL,
+ PORT_FLOW_CTRL_LIMIT_IN | 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL,
+ PORT_FLOW_CTRL_LIMIT_OUT | 0);
+}
+
+/* Offset 0x04: Port Control Register */
+
+static const char * const mv88e6xxx_port_state_names[] = {
+ [PORT_CONTROL_STATE_DISABLED] = "Disabled",
+ [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
+ [PORT_CONTROL_STATE_LEARNING] = "Learning",
+ [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
+};
+
+int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_CONTROL_STATE_MASK;
+ reg |= state;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
+
+ netdev_dbg(chip->ds->ports[port].netdev, "PortState set to %s\n",
+ mv88e6xxx_port_state_names[state]);
+
+ return 0;
+}
+
+int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
+ u16 mode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_CONTROL_EGRESS_MASK;
+ reg |= mode;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+}
+
+int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_CONTROL_FRAME_MODE_DSA;
+
+ switch (mode) {
+ case MV88E6XXX_FRAME_MODE_NORMAL:
+ reg |= PORT_CONTROL_FRAME_MODE_NORMAL;
+ break;
+ case MV88E6XXX_FRAME_MODE_DSA:
+ reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+}
+
+int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_CONTROL_FRAME_MASK;
+
+ switch (mode) {
+ case MV88E6XXX_FRAME_MODE_NORMAL:
+ reg |= PORT_CONTROL_FRAME_MODE_NORMAL;
+ break;
+ case MV88E6XXX_FRAME_MODE_DSA:
+ reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ break;
+ case MV88E6XXX_FRAME_MODE_PROVIDER:
+ reg |= PORT_CONTROL_FRAME_MODE_PROVIDER;
+ break;
+ case MV88E6XXX_FRAME_MODE_ETHERTYPE:
+ reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+}
+
+int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+ bool on)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
+
+ if (on)
+ reg |= PORT_CONTROL_FORWARD_UNKNOWN;
+ else
+ reg &= ~PORT_CONTROL_FORWARD_UNKNOWN;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+}
+
+int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+ bool on)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
+
+ if (on)
+ reg |= PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+ else
+ reg &= ~PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+}
+
+/* Offset 0x05: Port Control 1 */
+
+/* Offset 0x06: Port Based VLAN Map */
+
+int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
+{
+ const u16 mask = GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
+
+ reg &= ~mask;
+ reg |= map & mask;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ if (err)
+ return err;
+
+ netdev_dbg(chip->ds->ports[port].netdev, "VLANTable set to %.3x\n",
+ map);
+
+ return 0;
+}
+
+int mv88e6xxx_port_get_fid(struct mv88e6xxx_chip *chip, int port, u16 *fid)
+{
+ const u16 upper_mask = (mv88e6xxx_num_databases(chip) - 1) >> 4;
+ u16 reg;
+ int err;
+
+ /* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
+
+ *fid = (reg & 0xf000) >> 12;
+
+ /* Port's default FID upper bits are located in reg 0x05, offset 0 */
+ if (upper_mask) {
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &reg);
+ if (err)
+ return err;
+
+ *fid |= (reg & upper_mask) << 4;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid)
+{
+ const u16 upper_mask = (mv88e6xxx_num_databases(chip) - 1) >> 4;
+ u16 reg;
+ int err;
+
+ if (fid >= mv88e6xxx_num_databases(chip))
+ return -EINVAL;
+
+ /* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
+
+ reg &= 0x0fff;
+ reg |= (fid & 0x000f) << 12;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ if (err)
+ return err;
+
+ /* Port's default FID upper bits are located in reg 0x05, offset 0 */
+ if (upper_mask) {
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &reg);
+ if (err)
+ return err;
+
+ reg &= ~upper_mask;
+ reg |= (fid >> 4) & upper_mask;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, reg);
+ if (err)
+ return err;
+ }
+
+ netdev_dbg(chip->ds->ports[port].netdev, "FID set to %u\n", fid);
+
+ return 0;
+}
+
+/* Offset 0x07: Default Port VLAN ID & Priority */
+
+int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, &reg);
+ if (err)
+ return err;
+
+ *pvid = reg & PORT_DEFAULT_VLAN_MASK;
+
+ return 0;
+}
+
+int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_DEFAULT_VLAN_MASK;
+ reg |= pvid & PORT_DEFAULT_VLAN_MASK;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, reg);
+ if (err)
+ return err;
+
+ netdev_dbg(chip->ds->ports[port].netdev, "DefaultVID set to %u\n",
+ pvid);
+
+ return 0;
+}
+
+/* Offset 0x08: Port Control 2 Register */
+
+static const char * const mv88e6xxx_port_8021q_mode_names[] = {
+ [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
+ [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
+ [PORT_CONTROL_2_8021Q_CHECK] = "Check",
+ [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
+};
+
+int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
+ u16 mode)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_CONTROL_2_8021Q_MASK;
+ reg |= mode & PORT_CONTROL_2_8021Q_MASK;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (err)
+ return err;
+
+ netdev_dbg(chip->ds->ports[port].netdev, "802.1QMode set to %s\n",
+ mv88e6xxx_port_8021q_mode_names[mode]);
+
+ return 0;
+}
+
+int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
+ return err;
+
+ reg |= PORT_CONTROL_2_JUMBO_10240;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+}
+
+/* Offset 0x09: Port Rate Control */
+
+int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0000);
+}
+
+int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001);
+}
+
+/* Offset 0x0f: Port Ether type */
+
+int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_ETH_TYPE, etype);
+}
+
+/* Offset 0x18: Port IEEE Priority Remapping Registers [0-3]
+ * Offset 0x19: Port IEEE Priority Remapping Registers [4-7]
+ */
+
+int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ /* Use a direct priority mapping for all IEEE tagged frames */
+ err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_0123, 0x3210);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_4567, 0x7654);
+}
+
+static int mv88e6xxx_port_ieeepmt_write(struct mv88e6xxx_chip *chip,
+ int port, u16 table,
+ u8 pointer, u16 data)
+{
+ u16 reg;
+
+ reg = PORT_IEEE_PRIO_MAP_TABLE_UPDATE |
+ table |
+ (pointer << PORT_IEEE_PRIO_MAP_TABLE_POINTER_SHIFT) |
+ data;
+
+ return mv88e6xxx_port_write(chip, port, PORT_IEEE_PRIO_MAP_TABLE, reg);
+}
+
+int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
+{
+ int err, i;
+
+ for (i = 0; i <= 7; i++) {
+ err = mv88e6xxx_port_ieeepmt_write(
+ chip, port, PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP,
+ i, (i | i << 4));
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_ieeepmt_write(
+ chip, port, PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP,
+ i, i);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_ieeepmt_write(
+ chip, port, PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP,
+ i, i);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_ieeepmt_write(
+ chip, port, PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP,
+ i, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
new file mode 100644
index 000000000000..7b3bacaacbfe
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -0,0 +1,71 @@
+/*
+ * Marvell 88E6xxx Switch Port Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_PORT_H
+#define _MV88E6XXX_PORT_H
+
+#include "mv88e6xxx.h"
+
+int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 *val);
+int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 val);
+
+int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
+
+int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
+
+int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+
+int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
+
+int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
+
+int mv88e6xxx_port_get_fid(struct mv88e6xxx_chip *chip, int port, u16 *fid);
+int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid);
+
+int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid);
+int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid);
+
+int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
+ u16 mode);
+int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
+ u16 mode);
+int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+ bool on);
+int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+ bool on);
+int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
+int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
+int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
+int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port);
+
+#endif /* _MV88E6XXX_PORT_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 69fc8409a973..6421835f11b7 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -154,6 +154,9 @@ static void dummy_setup(struct net_device *dev)
dev->hw_features |= dev->features;
dev->hw_enc_features |= dev->features;
eth_hw_addr_random(dev);
+
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
}
static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 91ada52f776b..a7533780dddc 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -508,7 +508,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_get_stats = el3_get_stats,
.ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = el3_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1041,67 +1040,68 @@ el3_link_ok(struct net_device *dev)
}
static int
-el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd)
{
u16 tmp;
int ioaddr = dev->base_addr;
+ u32 supported;
EL3WINDOW(0);
/* obtain current transceiver via WN4_MEDIA? */
tmp = inw(ioaddr + WN0_ADDR_CONF);
- ecmd->transceiver = XCVR_INTERNAL;
switch (tmp >> 14) {
case 0:
- ecmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
break;
case 1:
- ecmd->port = PORT_AUI;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_AUI;
break;
case 3:
- ecmd->port = PORT_BNC;
+ cmd->base.port = PORT_BNC;
default:
break;
}
- ecmd->duplex = DUPLEX_HALF;
- ecmd->supported = 0;
+ cmd->base.duplex = DUPLEX_HALF;
+ supported = 0;
tmp = inw(ioaddr + WN0_CONF_CTRL);
if (tmp & (1<<13))
- ecmd->supported |= SUPPORTED_AUI;
+ supported |= SUPPORTED_AUI;
if (tmp & (1<<12))
- ecmd->supported |= SUPPORTED_BNC;
+ supported |= SUPPORTED_BNC;
if (tmp & (1<<9)) {
- ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half |
+ supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full; /* hmm... */
EL3WINDOW(4);
tmp = inw(ioaddr + WN4_NETDIAG);
if (tmp & FD_ENABLE)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
}
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ cmd->base.speed = SPEED_10;
EL3WINDOW(1);
return 0;
}
static int
-el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+el3_netdev_set_ecmd(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
u16 tmp;
int ioaddr = dev->base_addr;
- if (ecmd->speed != SPEED_10)
+ if (cmd->base.speed != SPEED_10)
return -EINVAL;
- if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL))
- return -EINVAL;
- if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL))
+ if ((cmd->base.duplex != DUPLEX_HALF) &&
+ (cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
/* change XCVR type */
EL3WINDOW(0);
tmp = inw(ioaddr + WN0_ADDR_CONF);
- switch (ecmd->port) {
+ switch (cmd->base.port) {
case PORT_TP:
tmp &= ~(3<<14);
dev->if_port = 0;
@@ -1131,7 +1131,7 @@ el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
EL3WINDOW(4);
tmp = inw(ioaddr + WN4_NETDIAG);
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
tmp |= FD_ENABLE;
else
tmp &= ~FD_ENABLE;
@@ -1147,24 +1147,26 @@ static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int el3_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct el3_private *lp = netdev_priv(dev);
int ret;
spin_lock_irq(&lp->lock);
- ret = el3_netdev_get_ecmd(dev, ecmd);
+ ret = el3_netdev_get_ecmd(dev, cmd);
spin_unlock_irq(&lp->lock);
return ret;
}
-static int el3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int el3_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct el3_private *lp = netdev_priv(dev);
int ret;
spin_lock_irq(&lp->lock);
- ret = el3_netdev_set_ecmd(dev, ecmd);
+ ret = el3_netdev_set_ecmd(dev, cmd);
spin_unlock_irq(&lp->lock);
return ret;
}
@@ -1192,11 +1194,11 @@ static void el3_set_msglevel(struct net_device *dev, u32 v)
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = el3_get_drvinfo,
- .get_settings = el3_get_settings,
- .set_settings = el3_set_settings,
.get_link = el3_get_link,
.get_msglevel = el3_get_msglevel,
.set_msglevel = el3_set_msglevel,
+ .get_link_ksettings = el3_get_link_ksettings,
+ .set_link_ksettings = el3_set_link_ksettings,
};
static void
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b26e038b4a0e..be5b80103bec 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -570,7 +570,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = corkscrew_timeout,
.ndo_get_stats = corkscrew_get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -628,6 +627,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
spin_lock_init(&vp->lock);
+ setup_timer(&vp->timer, corkscrew_timer, (unsigned long) dev);
+
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
for (i = 0; i < 0x18; i++) {
@@ -708,6 +709,7 @@ static int corkscrew_open(struct net_device *dev)
{
int ioaddr = dev->base_addr;
struct corkscrew_private *vp = netdev_priv(dev);
+ bool armtimer = false;
__u32 config;
int i;
@@ -732,12 +734,7 @@ static int corkscrew_open(struct net_device *dev)
if (corkscrew_debug > 1)
pr_debug("%s: Initial media type %s.\n",
dev->name, media_tbl[dev->if_port].name);
-
- init_timer(&vp->timer);
- vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
- vp->timer.data = (unsigned long) dev;
- vp->timer.function = corkscrew_timer; /* timer handler */
- add_timer(&vp->timer);
+ armtimer = true;
} else
dev->if_port = vp->default_media;
@@ -777,6 +774,9 @@ static int corkscrew_open(struct net_device *dev)
return -EAGAIN;
}
+ if (armtimer)
+ mod_timer(&vp->timer, jiffies + media_tbl[dev->if_port].wait);
+
if (corkscrew_debug > 1) {
EL3WINDOW(4);
pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n",
@@ -1427,7 +1427,7 @@ static int corkscrew_close(struct net_device *dev)
dev->name, rx_nocopy, rx_copy, queued_packet);
}
- del_timer(&vp->timer);
+ del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index b88afd759307..9359a37fedc0 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -254,7 +254,6 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_get_stats = el3_get_stats,
.ndo_do_ioctl = el3_ioctl,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 71396e4b87e3..e28254a00599 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -188,7 +188,6 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_set_config = el3_config,
.ndo_get_stats = el3_get_stats,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 9133e7926da5..b3560a364e53 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1062,7 +1062,6 @@ static const struct net_device_ops boomrang_netdev_ops = {
.ndo_do_ioctl = vortex_ioctl,
#endif
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1080,7 +1079,6 @@ static const struct net_device_ops vortex_netdev_ops = {
.ndo_do_ioctl = vortex_ioctl,
#endif
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2909,18 +2907,20 @@ static int vortex_nway_reset(struct net_device *dev)
return mii_nway_restart(&vp->mii);
}
-static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int vortex_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
- return mii_ethtool_gset(&vp->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&vp->mii, cmd);
}
-static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int vortex_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
- return mii_ethtool_sset(&vp->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&vp->mii, cmd);
}
static u32 vortex_get_msglevel(struct net_device *dev)
@@ -3033,13 +3033,13 @@ static const struct ethtool_ops vortex_ethtool_ops = {
.set_msglevel = vortex_set_msglevel,
.get_ethtool_stats = vortex_get_ethtool_stats,
.get_sset_count = vortex_get_sset_count,
- .get_settings = vortex_get_settings,
- .set_settings = vortex_set_settings,
.get_link = ethtool_op_get_link,
.nway_reset = vortex_nway_reset,
.get_wol = vortex_get_wol,
.set_wol = vortex_set_wol,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = vortex_get_link_ksettings,
+ .set_link_ksettings = vortex_set_link_ksettings,
};
#ifdef CONFIG_PCI
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 8f8418d2ac4a..a0cacbe846ba 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -996,28 +996,30 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static int
-typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+typhoon_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct typhoon *tp = netdev_priv(dev);
+ u32 supported, advertising = 0;
- cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg;
switch (tp->xcvr_select) {
case TYPHOON_XCVR_10HALF:
- cmd->advertising = ADVERTISED_10baseT_Half;
+ advertising = ADVERTISED_10baseT_Half;
break;
case TYPHOON_XCVR_10FULL:
- cmd->advertising = ADVERTISED_10baseT_Full;
+ advertising = ADVERTISED_10baseT_Full;
break;
case TYPHOON_XCVR_100HALF:
- cmd->advertising = ADVERTISED_100baseT_Half;
+ advertising = ADVERTISED_100baseT_Half;
break;
case TYPHOON_XCVR_100FULL:
- cmd->advertising = ADVERTISED_100baseT_Full;
+ advertising = ADVERTISED_100baseT_Full;
break;
case TYPHOON_XCVR_AUTONEG:
- cmd->advertising = ADVERTISED_10baseT_Half |
+ advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
@@ -1026,54 +1028,57 @@ typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
if(tp->capabilities & TYPHOON_FIBER) {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
- cmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
} else {
- cmd->supported |= SUPPORTED_10baseT_Half |
+ supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
- cmd->port = PORT_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
}
/* need to get stats to make these link speed/duplex valid */
typhoon_do_get_stats(tp);
- ethtool_cmd_speed_set(cmd, tp->speed);
- cmd->duplex = tp->duplex;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.speed = tp->speed;
+ cmd->base.duplex = tp->duplex;
+ cmd->base.phy_address = 0;
if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
static int
-typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+typhoon_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct typhoon *tp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
struct cmd_desc xp_cmd;
__le16 xcvr;
int err;
err = -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
xcvr = TYPHOON_XCVR_AUTONEG;
} else {
- if (cmd->duplex == DUPLEX_HALF) {
+ if (cmd->base.duplex == DUPLEX_HALF) {
if (speed == SPEED_10)
xcvr = TYPHOON_XCVR_10HALF;
else if (speed == SPEED_100)
xcvr = TYPHOON_XCVR_100HALF;
else
goto out;
- } else if (cmd->duplex == DUPLEX_FULL) {
+ } else if (cmd->base.duplex == DUPLEX_FULL) {
if (speed == SPEED_10)
xcvr = TYPHOON_XCVR_10FULL;
else if (speed == SPEED_100)
@@ -1091,12 +1096,12 @@ typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
goto out;
tp->xcvr_select = xcvr;
- if(cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
tp->speed = 0xff; /* invalid */
tp->duplex = 0xff; /* invalid */
} else {
tp->speed = speed;
- tp->duplex = cmd->duplex;
+ tp->duplex = cmd->base.duplex;
}
out:
@@ -1145,13 +1150,13 @@ typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
}
static const struct ethtool_ops typhoon_ethtool_ops = {
- .get_settings = typhoon_get_settings,
- .set_settings = typhoon_set_settings,
.get_drvinfo = typhoon_get_drvinfo,
.get_wol = typhoon_get_wol,
.set_wol = typhoon_set_wol,
.get_link = ethtool_op_get_link,
.get_ringparam = typhoon_get_ringparam,
+ .get_link_ksettings = typhoon_get_link_ksettings,
+ .set_link_ksettings = typhoon_set_link_ksettings,
};
static int
@@ -2255,7 +2260,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_get_stats = typhoon_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 5db1f55abef4..a43544af257b 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -64,7 +64,6 @@ const struct net_device_ops ei_netdev_ops = {
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index e8fc2e87e840..46d2257c4430 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -69,7 +69,6 @@ const struct net_device_ops eip_netdev_ops = {
.ndo_set_rx_mode = eip_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = eip_poll,
#endif
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 39ca9350d1b2..b0a3b85fc6f8 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -536,7 +536,6 @@ static const struct net_device_ops ax_netdev_ops = {
.ndo_set_rx_mode = ax_ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ax_ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 4ea717d68c95..1d84a0544ace 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -134,7 +134,6 @@ static const struct net_device_ops axnet_netdev_ops = {
.ndo_tx_timeout = axnet_tx_timeout,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index d686b9cac29f..11cbf22ad201 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -654,7 +654,6 @@ static const struct net_device_ops etherh_netdev_ops = {
.ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 0fe19d609c2e..8ae249195301 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -105,7 +105,6 @@ static const struct net_device_ops hydra_netdev_ops = {
.ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b9283901136e..9497f18eaba0 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -483,7 +483,6 @@ static const struct net_device_ops mac8390_netdev_ops = {
.ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index e1c055574a11..4bb967bc879e 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -308,7 +308,6 @@ static const struct net_device_ops mcf8390_netdev_ops = {
.ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 57e97910c728..07355302443d 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -209,7 +209,6 @@ static const struct net_device_ops ne2k_netdev_ops = {
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 2f79d29f17f2..63079a6e20d9 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -227,7 +227,6 @@ static const struct net_device_ops pcnet_netdev_ops = {
.ndo_do_ioctl = ei_ioctl,
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_tx_timeout = ei_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 139385dcdaa7..364b6514f65f 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -195,7 +195,6 @@ static const struct net_device_ops ultra_netdev_ops = {
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ultra_poll,
#endif
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index dd7d816bde52..ad019cbc698f 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -156,7 +156,6 @@ static const struct net_device_ops wd_netdev_ops = {
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 8308728fad05..6d93956b293b 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -284,7 +284,6 @@ static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_set_rx_mode = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 8cc7467b6c1f..e4c28fed61d5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -21,6 +21,7 @@ source "drivers/net/ethernet/3com/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
source "drivers/net/ethernet/altera/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index a09423df83f2..24330f4885a9 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
obj-$(CONFIG_ALTERA_TSE) += altera/
@@ -75,6 +76,7 @@ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
obj-$(CONFIG_SFC) += sfc/
+obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8af2c88d5b33..3aaad33cdbc6 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -634,7 +634,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef VLAN_SUPPORT
@@ -1817,21 +1816,23 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
spin_lock_irq(&np->lock);
- mii_ethtool_gset(&np->mii_if, ecmd);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
return 0;
}
-static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
int res;
spin_lock_irq(&np->lock);
- res = mii_ethtool_sset(&np->mii_if, ecmd);
+ res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
check_duplex(dev);
return res;
@@ -1862,12 +1863,12 @@ static void set_msglevel(struct net_device *dev, u32 val)
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = get_drvinfo,
- .get_settings = get_settings,
- .set_settings = set_settings,
.nway_reset = nway_reset,
.get_link = get_link,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index 6b94ba610399..98cc8f535021 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -58,7 +58,7 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
depends on BFIN_MAC && BF518
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
default y
---help---
To support the IEEE 1588 Precision Time Protocol (PTP), select y here
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 00f9ee3fc3e5..88164529b52a 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1571,7 +1571,6 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
.ndo_set_rx_mode = bfin_mac_set_multicast_list,
.ndo_do_ioctl = bfin_mac_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bfin_mac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index f8df8248035e..93def92f9997 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1290,15 +1290,6 @@ static int greth_mdio_probe(struct net_device *dev)
return 0;
}
-static inline int phy_aneg_done(struct phy_device *phydev)
-{
- int retval;
-
- retval = phy_read(phydev, MII_BMSR);
-
- return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
-}
-
static int greth_mdio_init(struct greth_private *greth)
{
int ret;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 906683851c7d..831bab352f8e 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -176,6 +176,8 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere S
#define NUM_FBRS 2
#define MAX_PACKETS_HANDLED 256
+#define ET131X_MIN_MTU 64
+#define ET131X_MAX_MTU 9216
#define ALCATEL_MULTICAST_PKT 0x01000000
#define ALCATEL_BROADCAST_PKT 0x02000000
@@ -3869,9 +3871,6 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
- if (new_mtu < 64 || new_mtu > 9216)
- return -EINVAL;
-
et131x_disable_txrx(netdev);
netdev->mtu = new_mtu;
@@ -3958,6 +3957,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
netdev->netdev_ops = &et131x_netdev_ops;
+ netdev->min_mtu = ET131X_MIN_MTU;
+ netdev->max_mtu = ET131X_MAX_MTU;
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &et131x_ethtool_ops;
diff --git a/drivers/net/ethernet/alacritech/Kconfig b/drivers/net/ethernet/alacritech/Kconfig
new file mode 100644
index 000000000000..09496e18cdc5
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/Kconfig
@@ -0,0 +1,28 @@
+config NET_VENDOR_ALACRITECH
+ bool "Alacritech devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about Alacritech devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_ALACRITECH
+
+config SLICOSS
+ tristate "Alacritech Slicoss support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver supports Gigabit Ethernet adapters based on the
+ Session Layer Interface (SLIC) technology by Alacritech.
+
+ Supported are Mojave (1 port) and Oasis (1, 2 and 4 port) cards,
+ both copper and fiber.
+
+ To compile this driver as a module, choose M here: the module
+ will be called slicoss. This is recommended.
+
+endif # NET_VENDOR_ALACRITECH
diff --git a/drivers/net/ethernet/alacritech/Makefile b/drivers/net/ethernet/alacritech/Makefile
new file mode 100644
index 000000000000..8790e9ed8496
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the Alacritech Slicoss driver
+#
+obj-$(CONFIG_SLICOSS) += slicoss.o
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
new file mode 100644
index 000000000000..08931b4afc96
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -0,0 +1,575 @@
+
+#ifndef _SLIC_H
+#define _SLIC_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock_types.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/u64_stats_sync.h>
+
+#define SLIC_VGBSTAT_XPERR 0x40000000
+#define SLIC_VGBSTAT_XERRSHFT 25
+#define SLIC_VGBSTAT_XCSERR 0x23
+#define SLIC_VGBSTAT_XUFLOW 0x22
+#define SLIC_VGBSTAT_XHLEN 0x20
+#define SLIC_VGBSTAT_NETERR 0x01000000
+#define SLIC_VGBSTAT_NERRSHFT 16
+#define SLIC_VGBSTAT_NERRMSK 0x1ff
+#define SLIC_VGBSTAT_NCSERR 0x103
+#define SLIC_VGBSTAT_NUFLOW 0x102
+#define SLIC_VGBSTAT_NHLEN 0x100
+#define SLIC_VGBSTAT_LNKERR 0x00000080
+#define SLIC_VGBSTAT_LERRMSK 0xff
+#define SLIC_VGBSTAT_LDEARLY 0x86
+#define SLIC_VGBSTAT_LBOFLO 0x85
+#define SLIC_VGBSTAT_LCODERR 0x84
+#define SLIC_VGBSTAT_LDBLNBL 0x83
+#define SLIC_VGBSTAT_LCRCERR 0x82
+#define SLIC_VGBSTAT_LOFLO 0x81
+#define SLIC_VGBSTAT_LUFLO 0x80
+
+#define SLIC_IRHDDR_FLEN_MSK 0x0000ffff
+#define SLIC_IRHDDR_SVALID 0x80000000
+#define SLIC_IRHDDR_ERR 0x10000000
+
+#define SLIC_VRHSTAT_802OE 0x80000000
+#define SLIC_VRHSTAT_TPOFLO 0x10000000
+#define SLIC_VRHSTATB_802UE 0x80000000
+#define SLIC_VRHSTATB_RCVE 0x40000000
+#define SLIC_VRHSTATB_BUFF 0x20000000
+#define SLIC_VRHSTATB_CARRE 0x08000000
+#define SLIC_VRHSTATB_LONGE 0x02000000
+#define SLIC_VRHSTATB_PREA 0x01000000
+#define SLIC_VRHSTATB_CRC 0x00800000
+#define SLIC_VRHSTATB_DRBL 0x00400000
+#define SLIC_VRHSTATB_CODE 0x00200000
+#define SLIC_VRHSTATB_TPCSUM 0x00100000
+#define SLIC_VRHSTATB_TPHLEN 0x00080000
+#define SLIC_VRHSTATB_IPCSUM 0x00040000
+#define SLIC_VRHSTATB_IPLERR 0x00020000
+#define SLIC_VRHSTATB_IPHERR 0x00010000
+
+#define SLIC_CMD_XMT_REQ 0x01
+#define SLIC_CMD_TYPE_DUMB 3
+
+#define SLIC_RESET_MAGIC 0xDEAD
+#define SLIC_ICR_INT_OFF 0
+#define SLIC_ICR_INT_ON 1
+#define SLIC_ICR_INT_MASK 2
+
+#define SLIC_ISR_ERR 0x80000000
+#define SLIC_ISR_RCV 0x40000000
+#define SLIC_ISR_CMD 0x20000000
+#define SLIC_ISR_IO 0x60000000
+#define SLIC_ISR_UPC 0x10000000
+#define SLIC_ISR_LEVENT 0x08000000
+#define SLIC_ISR_RMISS 0x02000000
+#define SLIC_ISR_UPCERR 0x01000000
+#define SLIC_ISR_XDROP 0x00800000
+#define SLIC_ISR_UPCBSY 0x00020000
+
+#define SLIC_ISR_PING_MASK 0x00700000
+#define SLIC_ISR_UPCERR_MASK (SLIC_ISR_UPCERR | SLIC_ISR_UPCBSY)
+#define SLIC_ISR_UPC_MASK (SLIC_ISR_UPC | SLIC_ISR_UPCERR_MASK)
+#define SLIC_WCS_START 0x80000000
+#define SLIC_WCS_COMPARE 0x40000000
+#define SLIC_RCVWCS_BEGIN 0x40000000
+#define SLIC_RCVWCS_FINISH 0x80000000
+
+#define SLIC_MIICR_REG_16 0x00100000
+#define SLIC_MRV_REG16_XOVERON 0x0068
+
+#define SLIC_GIG_LINKUP 0x0001
+#define SLIC_GIG_FULLDUPLEX 0x0002
+#define SLIC_GIG_SPEED_MASK 0x000C
+#define SLIC_GIG_SPEED_1000 0x0008
+#define SLIC_GIG_SPEED_100 0x0004
+#define SLIC_GIG_SPEED_10 0x0000
+
+#define SLIC_GMCR_RESET 0x80000000
+#define SLIC_GMCR_GBIT 0x20000000
+#define SLIC_GMCR_FULLD 0x10000000
+#define SLIC_GMCR_GAPBB_SHIFT 14
+#define SLIC_GMCR_GAPR1_SHIFT 7
+#define SLIC_GMCR_GAPR2_SHIFT 0
+#define SLIC_GMCR_GAPBB_1000 0x60
+#define SLIC_GMCR_GAPR1_1000 0x2C
+#define SLIC_GMCR_GAPR2_1000 0x40
+#define SLIC_GMCR_GAPBB_100 0x70
+#define SLIC_GMCR_GAPR1_100 0x2C
+#define SLIC_GMCR_GAPR2_100 0x40
+
+#define SLIC_XCR_RESET 0x80000000
+#define SLIC_XCR_XMTEN 0x40000000
+#define SLIC_XCR_PAUSEEN 0x20000000
+#define SLIC_XCR_LOADRNG 0x10000000
+
+#define SLIC_GXCR_RESET 0x80000000
+#define SLIC_GXCR_XMTEN 0x40000000
+#define SLIC_GXCR_PAUSEEN 0x20000000
+
+#define SLIC_GRCR_RESET 0x80000000
+#define SLIC_GRCR_RCVEN 0x40000000
+#define SLIC_GRCR_RCVALL 0x20000000
+#define SLIC_GRCR_RCVBAD 0x10000000
+#define SLIC_GRCR_CTLEN 0x08000000
+#define SLIC_GRCR_ADDRAEN 0x02000000
+#define SLIC_GRCR_HASHSIZE_SHIFT 17
+#define SLIC_GRCR_HASHSIZE 14
+
+/* Reset Register */
+#define SLIC_REG_RESET 0x0000
+/* Interrupt Control Register */
+#define SLIC_REG_ICR 0x0008
+/* Interrupt status pointer */
+#define SLIC_REG_ISP 0x0010
+/* Interrupt status */
+#define SLIC_REG_ISR 0x0018
+/* Header buffer address reg
+ * 31-8 - phy addr of set of contiguous hdr buffers
+ * 7-0 - number of buffers passed
+ * Buffers are 256 bytes long on 256-byte boundaries.
+ */
+#define SLIC_REG_HBAR 0x0020
+/* Data buffer handle & address reg
+ * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page.
+ */
+#define SLIC_REG_DBAR 0x0028
+/* Xmt Cmd buf addr regs.
+ * 1 per XMT interface
+ * 31-5 - phy addr of host command buffer
+ * 4-0 - length of cmd in multiples of 32 bytes
+ * Buffers are 32 bytes up to 512 bytes long
+ */
+#define SLIC_REG_CBAR 0x0030
+/* Write control store */
+#define SLIC_REG_WCS 0x0034
+/*Response buffer address reg.
+ * 31-8 - phy addr of set of contiguous response buffers
+ * 7-0 - number of buffers passed
+ * Buffers are 32 bytes long on 32-byte boundaries.
+ */
+#define SLIC_REG_RBAR 0x0038
+/* Read statistics (UPR) */
+#define SLIC_REG_RSTAT 0x0040
+/* Read link status */
+#define SLIC_REG_LSTAT 0x0048
+/* Write Mac Config */
+#define SLIC_REG_WMCFG 0x0050
+/* Write phy register */
+#define SLIC_REG_WPHY 0x0058
+/* Rcv Cmd buf addr reg */
+#define SLIC_REG_RCBAR 0x0060
+/* Read SLIC Config*/
+#define SLIC_REG_RCONFIG 0x0068
+/* Interrupt aggregation time */
+#define SLIC_REG_INTAGG 0x0070
+/* Write XMIT config reg */
+#define SLIC_REG_WXCFG 0x0078
+/* Write RCV config reg */
+#define SLIC_REG_WRCFG 0x0080
+/* Write rcv addr a low */
+#define SLIC_REG_WRADDRAL 0x0088
+/* Write rcv addr a high */
+#define SLIC_REG_WRADDRAH 0x0090
+/* Write rcv addr b low */
+#define SLIC_REG_WRADDRBL 0x0098
+/* Write rcv addr b high */
+#define SLIC_REG_WRADDRBH 0x00a0
+/* Low bits of mcast mask */
+#define SLIC_REG_MCASTLOW 0x00a8
+/* High bits of mcast mask */
+#define SLIC_REG_MCASTHIGH 0x00b0
+/* Ping the card */
+#define SLIC_REG_PING 0x00b8
+/* Dump command */
+#define SLIC_REG_DUMP_CMD 0x00c0
+/* Dump data pointer */
+#define SLIC_REG_DUMP_DATA 0x00c8
+/* Read card's pci_status register */
+#define SLIC_REG_PCISTATUS 0x00d0
+/* Write hostid field */
+#define SLIC_REG_WRHOSTID 0x00d8
+/* Put card in a low power state */
+#define SLIC_REG_LOW_POWER 0x00e0
+/* Force slic into quiescent state before soft reset */
+#define SLIC_REG_QUIESCE 0x00e8
+/* Reset interface queues */
+#define SLIC_REG_RESET_IFACE 0x00f0
+/* Register is only written when it has changed.
+ * Bits 63-32 for host i/f addrs.
+ */
+#define SLIC_REG_ADDR_UPPER 0x00f8
+/* 64 bit Header buffer address reg */
+#define SLIC_REG_HBAR64 0x0100
+/* 64 bit Data buffer handle & address reg */
+#define SLIC_REG_DBAR64 0x0108
+/* 64 bit Xmt Cmd buf addr regs. */
+#define SLIC_REG_CBAR64 0x0110
+/* 64 bit Response buffer address reg.*/
+#define SLIC_REG_RBAR64 0x0118
+/* 64 bit Rcv Cmd buf addr reg*/
+#define SLIC_REG_RCBAR64 0x0120
+/* Read statistics (64 bit UPR) */
+#define SLIC_REG_RSTAT64 0x0128
+/* Download Gigabit RCV sequencer ucode */
+#define SLIC_REG_RCV_WCS 0x0130
+/* Write VlanId field */
+#define SLIC_REG_WRVLANID 0x0138
+/* Read Transformer info */
+#define SLIC_REG_READ_XF_INFO 0x0140
+/* Write Transformer info */
+#define SLIC_REG_WRITE_XF_INFO 0x0148
+/* Write card ticks per second */
+#define SLIC_REG_TICKS_PER_SEC 0x0170
+#define SLIC_REG_HOSTID 0x1554
+
+#define PCI_VENDOR_ID_ALACRITECH 0x139A
+#define PCI_DEVICE_ID_ALACRITECH_MOJAVE 0x0005
+#define PCI_SUBDEVICE_ID_ALACRITECH_1000X1 0x0005
+#define PCI_SUBDEVICE_ID_ALACRITECH_1000X1_2 0x0006
+#define PCI_SUBDEVICE_ID_ALACRITECH_1000X1F 0x0007
+#define PCI_SUBDEVICE_ID_ALACRITECH_CICADA 0x0008
+#define PCI_SUBDEVICE_ID_ALACRITECH_SES1001T 0x2006
+#define PCI_SUBDEVICE_ID_ALACRITECH_SES1001F 0x2007
+#define PCI_DEVICE_ID_ALACRITECH_OASIS 0x0007
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XT 0x000B
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF 0x000C
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XT 0x000D
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF 0x000E
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF 0x000F
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2104ET 0x0010
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF 0x0011
+#define PCI_SUBDEVICE_ID_ALACRITECH_SEN2102ET 0x0012
+
+/* Note: power of two required for number descriptors */
+#define SLIC_NUM_RX_LES 256
+#define SLIC_RX_BUFF_SIZE 2048
+#define SLIC_RX_BUFF_ALIGN 256
+#define SLIC_RX_BUFF_HDR_SIZE 34
+#define SLIC_MAX_REQ_RX_DESCS 1
+
+#define SLIC_NUM_TX_DESCS 256
+#define SLIC_TX_DESC_ALIGN 32
+#define SLIC_MIN_TX_WAKEUP_DESCS 10
+#define SLIC_MAX_REQ_TX_DESCS 1
+#define SLIC_MAX_TX_COMPLETIONS 100
+
+#define SLIC_NUM_STAT_DESCS 128
+#define SLIC_STATS_DESC_ALIGN 256
+
+#define SLIC_NUM_STAT_DESC_ARRAYS 4
+#define SLIC_INVALID_STAT_DESC_IDX 0xffffffff
+
+#define SLIC_NAPI_WEIGHT 64
+
+#define SLIC_UPR_LSTAT 0
+#define SLIC_UPR_CONFIG 1
+
+#define SLIC_EEPROM_SIZE 128
+#define SLIC_EEPROM_MAGIC 0xa5a5
+
+#define SLIC_FIRMWARE_MOJAVE "slicoss/gbdownload.sys"
+#define SLIC_FIRMWARE_OASIS "slicoss/oasisdownload.sys"
+#define SLIC_RCV_FIRMWARE_MOJAVE "slicoss/gbrcvucode.sys"
+#define SLIC_RCV_FIRMWARE_OASIS "slicoss/oasisrcvucode.sys"
+#define SLIC_FIRMWARE_MIN_SIZE 64
+#define SLIC_FIRMWARE_MAX_SECTIONS 3
+
+#define SLIC_MODEL_MOJAVE 0
+#define SLIC_MODEL_OASIS 1
+
+#define SLIC_INC_STATS_COUNTER(st, counter) \
+do { \
+ u64_stats_update_begin(&(st)->syncp); \
+ (st)->counter++; \
+ u64_stats_update_end(&(st)->syncp); \
+} while (0)
+
+#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
+{ \
+ unsigned int start; \
+ do { \
+ start = u64_stats_fetch_begin_irq(&(st)->syncp); \
+ newst = (st)->counter; \
+ } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \
+}
+
+struct slic_upr {
+ dma_addr_t paddr;
+ unsigned int type;
+ struct list_head list;
+};
+
+struct slic_upr_list {
+ bool pending;
+ struct list_head list;
+ /* upr list lock */
+ spinlock_t lock;
+};
+
+/* SLIC EEPROM structure for Mojave */
+struct slic_mojave_eeprom {
+ __le16 id; /* 00 EEPROM/FLASH Magic code 'A5A5'*/
+ __le16 eeprom_code_size;/* 01 Size of EEPROM Codes (bytes * 4)*/
+ __le16 flash_size; /* 02 Flash size */
+ __le16 eeprom_size; /* 03 EEPROM Size */
+ __le16 vendor_id; /* 04 Vendor ID */
+ __le16 dev_id; /* 05 Device ID */
+ u8 rev_id; /* 06 Revision ID */
+ u8 class_code[3]; /* 07 Class Code */
+ u8 irqpin_dbg; /* 08 Debug Interrupt pin */
+ u8 irqpin; /* Network Interrupt Pin */
+ u8 min_grant; /* 09 Minimum grant */
+ u8 max_lat; /* Maximum Latency */
+ __le16 pci_stat; /* 10 PCI Status */
+ __le16 sub_vendor_id; /* 11 Subsystem Vendor Id */
+ __le16 sub_id; /* 12 Subsystem ID */
+ __le16 dev_id_dbg; /* 13 Debug Device Id */
+ __le16 ramrom; /* 14 Dram/Rom function */
+ __le16 dram_size2pci; /* 15 DRAM size to PCI (bytes * 64K) */
+ __le16 rom_size2pci; /* 16 ROM extension size to PCI (bytes * 4k) */
+ u8 pad[2]; /* 17 Padding */
+ u8 freetime; /* 18 FreeTime setting */
+ u8 ifctrl; /* 10-bit interface control (Mojave only) */
+ __le16 dram_size; /* 19 DRAM size (bytes * 64k) */
+ u8 mac[ETH_ALEN]; /* 20 MAC addresses */
+ u8 mac2[ETH_ALEN];
+ u8 pad2[6];
+ u16 dev_id2; /* Device ID for 2nd PCI function */
+ u8 irqpin2; /* Interrupt pin for 2nd PCI function */
+ u8 class_code2[3]; /* Class Code for 2nd PCI function */
+ u16 cfg_byte6; /* Config Byte 6 */
+ u16 pme_cap; /* Power Mgment capabilities */
+ u16 nwclk_ctrl; /* NetworkClockControls */
+ u8 fru_format; /* Alacritech FRU format type */
+ u8 fru_assembly[6]; /* Alacritech FRU information */
+ u8 fru_rev[2];
+ u8 fru_serial[14];
+ u8 fru_pad[3];
+ u8 oem_fru[28]; /* optional OEM FRU format type */
+ u8 pad3[4]; /* Pad to 128 bytes - includes 2 cksum bytes
+ * (if OEM FRU info exists) and two unusable
+ * bytes at the end
+ */
+};
+
+/* SLIC EEPROM structure for Oasis */
+struct slic_oasis_eeprom {
+ __le16 id; /* 00 EEPROM/FLASH Magic code 'A5A5' */
+ __le16 eeprom_code_size;/* 01 Size of EEPROM Codes (bytes * 4)*/
+ __le16 spidev0_cfg; /* 02 Flash Config for SPI device 0 */
+ __le16 spidev1_cfg; /* 03 Flash Config for SPI device 1 */
+ __le16 vendor_id; /* 04 Vendor ID */
+ __le16 dev_id; /* 05 Device ID (function 0) */
+ u8 rev_id; /* 06 Revision ID */
+ u8 class_code0[3]; /* 07 Class Code for PCI function 0 */
+ u8 irqpin1; /* 08 Interrupt pin for PCI function 1*/
+ u8 class_code1[3]; /* 09 Class Code for PCI function 1 */
+ u8 irqpin2; /* 10 Interrupt pin for PCI function 2*/
+ u8 irqpin0; /* Interrupt pin for PCI function 0*/
+ u8 min_grant; /* 11 Minimum grant */
+ u8 max_lat; /* Maximum Latency */
+ __le16 sub_vendor_id; /* 12 Subsystem Vendor Id */
+ __le16 sub_id; /* 13 Subsystem ID */
+ __le16 flash_size; /* 14 Flash size (bytes / 4K) */
+ __le16 dram_size2pci; /* 15 DRAM size to PCI (bytes / 64K) */
+ __le16 rom_size2pci; /* 16 Flash (ROM extension) size to PCI
+ * (bytes / 4K)
+ */
+ __le16 dev_id1; /* 17 Device Id (function 1) */
+ __le16 dev_id2; /* 18 Device Id (function 2) */
+ __le16 dev_stat_cfg; /* 19 Device Status Config Bytes 6-7 */
+ __le16 pme_cap; /* 20 Power Mgment capabilities */
+ u8 msi_cap; /* 21 MSI capabilities */
+ u8 clock_div; /* Clock divider */
+ __le16 pci_stat_lo; /* 22 PCI Status bits 15:0 */
+ __le16 pci_stat_hi; /* 23 PCI Status bits 31:16 */
+ __le16 dram_cfg_lo; /* 24 DRAM Configuration bits 15:0 */
+ __le16 dram_cfg_hi; /* 25 DRAM Configuration bits 31:16 */
+ __le16 dram_size; /* 26 DRAM size (bytes / 64K) */
+ __le16 gpio_tbi_ctrl; /* 27 GPIO/TBI controls for functions 1/0 */
+ __le16 eeprom_size; /* 28 EEPROM Size */
+ u8 mac[ETH_ALEN]; /* 29 MAC addresses (2 ports) */
+ u8 mac2[ETH_ALEN];
+ u8 fru_format; /* 35 Alacritech FRU format type */
+ u8 fru_assembly[6]; /* Alacritech FRU information */
+ u8 fru_rev[2];
+ u8 fru_serial[14];
+ u8 fru_pad[3];
+ u8 oem_fru[28]; /* optional OEM FRU information */
+ u8 pad[4]; /* Pad to 128 bytes - includes 2 checksum bytes
+ * (if OEM FRU info exists) and two unusable
+ * bytes at the end
+ */
+};
+
+struct slic_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_mcasts;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ /* HW STATS */
+ u64 rx_buff_miss;
+ u64 tx_dropped;
+ u64 irq_errs;
+ /* transport layer */
+ u64 rx_tpcsum;
+ u64 rx_tpoflow;
+ u64 rx_tphlen;
+ /* ip layer */
+ u64 rx_ipcsum;
+ u64 rx_iplen;
+ u64 rx_iphlen;
+ /* link layer */
+ u64 rx_early;
+ u64 rx_buffoflow;
+ u64 rx_lcode;
+ u64 rx_drbl;
+ u64 rx_crc;
+ u64 rx_oflow802;
+ u64 rx_uflow802;
+ /* oasis only */
+ u64 tx_carrier;
+ struct u64_stats_sync syncp;
+};
+
+struct slic_shmem_data {
+ __le32 isr;
+ __le32 link;
+};
+
+struct slic_shmem {
+ dma_addr_t isr_paddr;
+ dma_addr_t link_paddr;
+ struct slic_shmem_data *shmem_data;
+};
+
+struct slic_rx_info_oasis {
+ __le32 frame_status;
+ __le32 frame_status_b;
+ __le32 time_stamp;
+ __le32 checksum;
+};
+
+struct slic_rx_info_mojave {
+ __le32 frame_status;
+ __le16 byte_cnt;
+ __le16 tp_chksum;
+ __le16 ctx_hash;
+ __le16 mac_hash;
+ __le16 buff_lnk;
+};
+
+struct slic_stat_desc {
+ __le32 hnd;
+ __u8 pad[8];
+ __le32 status;
+ __u8 pad2[16];
+};
+
+struct slic_stat_queue {
+ struct slic_stat_desc *descs[SLIC_NUM_STAT_DESC_ARRAYS];
+ dma_addr_t paddr[SLIC_NUM_STAT_DESC_ARRAYS];
+ unsigned int addr_offset[SLIC_NUM_STAT_DESC_ARRAYS];
+ unsigned int active_array;
+ unsigned int len;
+ unsigned int done_idx;
+ size_t mem_size;
+};
+
+struct slic_tx_desc {
+ __le32 hnd;
+ __le32 rsvd;
+ u8 cmd;
+ u8 flags;
+ __le16 rsvd2;
+ __le32 totlen;
+ __le32 paddrl;
+ __le32 paddrh;
+ __le32 len;
+ __le32 type;
+};
+
+struct slic_tx_buffer {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(map_addr);
+ DEFINE_DMA_UNMAP_LEN(map_len);
+ struct slic_tx_desc *desc;
+ dma_addr_t desc_paddr;
+};
+
+struct slic_tx_queue {
+ struct dma_pool *dma_pool;
+ struct slic_tx_buffer *txbuffs;
+ unsigned int len;
+ unsigned int put_idx;
+ unsigned int done_idx;
+};
+
+struct slic_rx_desc {
+ u8 pad[16];
+ __le32 buffer;
+ __le32 length;
+ __le32 status;
+};
+
+struct slic_rx_buffer {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(map_addr);
+ DEFINE_DMA_UNMAP_LEN(map_len);
+ unsigned int addr_offset;
+};
+
+struct slic_rx_queue {
+ struct slic_rx_buffer *rxbuffs;
+ unsigned int len;
+ unsigned int done_idx;
+ unsigned int put_idx;
+};
+
+struct slic_device {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ void __iomem *regs;
+ /* upper address setting lock */
+ spinlock_t upper_lock;
+ struct slic_shmem shmem;
+ struct napi_struct napi;
+ struct slic_rx_queue rxq;
+ struct slic_tx_queue txq;
+ struct slic_stat_queue stq;
+ struct slic_stats stats;
+ struct slic_upr_list upr_list;
+ /* link configuration lock */
+ spinlock_t link_lock;
+ bool promisc;
+ int speed;
+ unsigned int duplex;
+ bool is_fiber;
+ unsigned char model;
+};
+
+static inline u32 slic_read(struct slic_device *sdev, unsigned int reg)
+{
+ return ioread32(sdev->regs + reg);
+}
+
+static inline void slic_write(struct slic_device *sdev, unsigned int reg,
+ u32 val)
+{
+ iowrite32(val, sdev->regs + reg);
+}
+
+static inline void slic_flush_write(struct slic_device *sdev)
+{
+ (void)ioread32(sdev->regs + SLIC_REG_HOSTID);
+}
+
+#endif /* _SLIC_H */
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
new file mode 100644
index 000000000000..b21d8aa8d653
--- /dev/null
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -0,0 +1,1870 @@
+/*
+ * Driver for Gigabit Ethernet adapters based on the Session Layer
+ * Interface (SLIC) technology by Alacritech. The driver does not
+ * support the hardware acceleration features provided by these cards.
+ *
+ * Copyright (C) 2016 Lino Sanfilippo <LinoSanfilippo@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/u64_stats_sync.h>
+
+#include "slic.h"
+
+#define DRV_NAME "slicoss"
+#define DRV_VERSION "1.0"
+
+static const struct pci_device_id slic_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
+ PCI_DEVICE_ID_ALACRITECH_MOJAVE) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
+ PCI_DEVICE_ID_ALACRITECH_OASIS) },
+ { 0 }
+};
+
+static const char slic_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_multicasts",
+ "rx_errors",
+ "rx_buff_miss",
+ "rx_tp_csum",
+ "rx_tp_oflow",
+ "rx_tp_hlen",
+ "rx_ip_csum",
+ "rx_ip_len",
+ "rx_ip_hdr_len",
+ "rx_early",
+ "rx_buff_oflow",
+ "rx_lcode",
+ "rx_drbl",
+ "rx_crc",
+ "rx_oflow_802",
+ "rx_uflow_802",
+ "tx_packets",
+ "tx_bytes",
+ "tx_carrier",
+ "tx_dropped",
+ "irq_errs",
+};
+
+static inline int slic_next_queue_idx(unsigned int idx, unsigned int qlen)
+{
+ return (idx + 1) & (qlen - 1);
+}
+
+static inline int slic_get_free_queue_descs(unsigned int put_idx,
+ unsigned int done_idx,
+ unsigned int qlen)
+{
+ if (put_idx >= done_idx)
+ return (qlen - (put_idx - done_idx) - 1);
+ return (done_idx - put_idx - 1);
+}
+
+static unsigned int slic_next_compl_idx(struct slic_device *sdev)
+{
+ struct slic_stat_queue *stq = &sdev->stq;
+ unsigned int active = stq->active_array;
+ struct slic_stat_desc *descs;
+ struct slic_stat_desc *stat;
+ unsigned int idx;
+
+ descs = stq->descs[active];
+ stat = &descs[stq->done_idx];
+
+ if (!stat->status)
+ return SLIC_INVALID_STAT_DESC_IDX;
+
+ idx = (le32_to_cpu(stat->hnd) & 0xffff) - 1;
+ /* reset desc */
+ stat->hnd = 0;
+ stat->status = 0;
+
+ stq->done_idx = slic_next_queue_idx(stq->done_idx, stq->len);
+ /* check for wraparound */
+ if (!stq->done_idx) {
+ dma_addr_t paddr = stq->paddr[active];
+
+ slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
+ stq->len);
+ /* make sure new status descriptors are immediately available */
+ slic_flush_write(sdev);
+ active++;
+ active &= (SLIC_NUM_STAT_DESC_ARRAYS - 1);
+ stq->active_array = active;
+ }
+ return idx;
+}
+
+static unsigned int slic_get_free_tx_descs(struct slic_tx_queue *txq)
+{
+ /* ensure tail idx is updated */
+ smp_mb();
+ return slic_get_free_queue_descs(txq->put_idx, txq->done_idx, txq->len);
+}
+
+static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq)
+{
+ return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len);
+}
+
+static void slic_clear_upr_list(struct slic_upr_list *upr_list)
+{
+ struct slic_upr *upr;
+ struct slic_upr *tmp;
+
+ spin_lock_bh(&upr_list->lock);
+ list_for_each_entry_safe(upr, tmp, &upr_list->list, list) {
+ list_del(&upr->list);
+ kfree(upr);
+ }
+ upr_list->pending = false;
+ spin_unlock_bh(&upr_list->lock);
+}
+
+static void slic_start_upr(struct slic_device *sdev, struct slic_upr *upr)
+{
+ u32 reg;
+
+ reg = (upr->type == SLIC_UPR_CONFIG) ? SLIC_REG_RCONFIG :
+ SLIC_REG_LSTAT;
+ slic_write(sdev, reg, lower_32_bits(upr->paddr));
+ slic_flush_write(sdev);
+}
+
+static void slic_queue_upr(struct slic_device *sdev, struct slic_upr *upr)
+{
+ struct slic_upr_list *upr_list = &sdev->upr_list;
+ bool pending;
+
+ spin_lock_bh(&upr_list->lock);
+ pending = upr_list->pending;
+ INIT_LIST_HEAD(&upr->list);
+ list_add_tail(&upr->list, &upr_list->list);
+ upr_list->pending = true;
+ spin_unlock_bh(&upr_list->lock);
+
+ if (!pending)
+ slic_start_upr(sdev, upr);
+}
+
+static struct slic_upr *slic_dequeue_upr(struct slic_device *sdev)
+{
+ struct slic_upr_list *upr_list = &sdev->upr_list;
+ struct slic_upr *next_upr = NULL;
+ struct slic_upr *upr = NULL;
+
+ spin_lock_bh(&upr_list->lock);
+ if (!list_empty(&upr_list->list)) {
+ upr = list_first_entry(&upr_list->list, struct slic_upr, list);
+ list_del(&upr->list);
+
+ if (list_empty(&upr_list->list))
+ upr_list->pending = false;
+ else
+ next_upr = list_first_entry(&upr_list->list,
+ struct slic_upr, list);
+ }
+ spin_unlock_bh(&upr_list->lock);
+ /* trigger processing of the next upr in list */
+ if (next_upr)
+ slic_start_upr(sdev, next_upr);
+
+ return upr;
+}
+
+static int slic_new_upr(struct slic_device *sdev, unsigned int type,
+ dma_addr_t paddr)
+{
+ struct slic_upr *upr;
+
+ upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
+ if (!upr)
+ return -ENOMEM;
+ upr->type = type;
+ upr->paddr = paddr;
+
+ slic_queue_upr(sdev, upr);
+
+ return 0;
+}
+
+static void slic_set_mcast_bit(u64 *mcmask, unsigned char const *addr)
+{
+ u64 mask = *mcmask;
+ u8 crc;
+ /* Get the CRC polynomial for the mac address: we use bits 1-8 (lsb),
+ * bitwise reversed, msb (= lsb bit 0 before bitrev) is automatically
+ * discarded.
+ */
+ crc = ether_crc(ETH_ALEN, addr) >> 23;
+ /* we only have space on the SLIC for 64 entries */
+ crc &= 0x3F;
+ mask |= (u64)1 << crc;
+ *mcmask = mask;
+}
+
+/* must be called with link_lock held */
+static void slic_configure_rcv(struct slic_device *sdev)
+{
+ u32 val;
+
+ val = SLIC_GRCR_RESET | SLIC_GRCR_ADDRAEN | SLIC_GRCR_RCVEN |
+ SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT | SLIC_GRCR_RCVBAD;
+
+ if (sdev->duplex == DUPLEX_FULL)
+ val |= SLIC_GRCR_CTLEN;
+
+ if (sdev->promisc)
+ val |= SLIC_GRCR_RCVALL;
+
+ slic_write(sdev, SLIC_REG_WRCFG, val);
+}
+
+/* must be called with link_lock held */
+static void slic_configure_xmt(struct slic_device *sdev)
+{
+ u32 val;
+
+ val = SLIC_GXCR_RESET | SLIC_GXCR_XMTEN;
+
+ if (sdev->duplex == DUPLEX_FULL)
+ val |= SLIC_GXCR_PAUSEEN;
+
+ slic_write(sdev, SLIC_REG_WXCFG, val);
+}
+
+/* must be called with link_lock held */
+static void slic_configure_mac(struct slic_device *sdev)
+{
+ u32 val;
+
+ if (sdev->speed == SPEED_1000) {
+ val = SLIC_GMCR_GAPBB_1000 << SLIC_GMCR_GAPBB_SHIFT |
+ SLIC_GMCR_GAPR1_1000 << SLIC_GMCR_GAPR1_SHIFT |
+ SLIC_GMCR_GAPR2_1000 << SLIC_GMCR_GAPR2_SHIFT |
+ SLIC_GMCR_GBIT; /* enable GMII */
+ } else {
+ val = SLIC_GMCR_GAPBB_100 << SLIC_GMCR_GAPBB_SHIFT |
+ SLIC_GMCR_GAPR1_100 << SLIC_GMCR_GAPR1_SHIFT |
+ SLIC_GMCR_GAPR2_100 << SLIC_GMCR_GAPR2_SHIFT;
+ }
+
+ if (sdev->duplex == DUPLEX_FULL)
+ val |= SLIC_GMCR_FULLD;
+
+ slic_write(sdev, SLIC_REG_WMCFG, val);
+}
+
+static void slic_configure_link_locked(struct slic_device *sdev, int speed,
+ unsigned int duplex)
+{
+ struct net_device *dev = sdev->netdev;
+
+ if (sdev->speed == speed && sdev->duplex == duplex)
+ return;
+
+ sdev->speed = speed;
+ sdev->duplex = duplex;
+
+ if (sdev->speed == SPEED_UNKNOWN) {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ } else {
+ /* (re)configure link settings */
+ slic_configure_mac(sdev);
+ slic_configure_xmt(sdev);
+ slic_configure_rcv(sdev);
+ slic_flush_write(sdev);
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ }
+}
+
+static void slic_configure_link(struct slic_device *sdev, int speed,
+ unsigned int duplex)
+{
+ spin_lock_bh(&sdev->link_lock);
+ slic_configure_link_locked(sdev, speed, duplex);
+ spin_unlock_bh(&sdev->link_lock);
+}
+
+static void slic_set_rx_mode(struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct netdev_hw_addr *hwaddr;
+ bool set_promisc;
+ u64 mcmask;
+
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ /* Turn on all multicast addresses. We have to do this for
+ * promiscuous mode as well as ALLMCAST mode (it saves the
+ * microcode from having to keep state about the MAC
+ * configuration).
+ */
+ mcmask = ~(u64)0;
+ } else {
+ mcmask = 0;
+
+ netdev_for_each_mc_addr(hwaddr, dev) {
+ slic_set_mcast_bit(&mcmask, hwaddr->addr);
+ }
+ }
+
+ slic_write(sdev, SLIC_REG_MCASTLOW, lower_32_bits(mcmask));
+ slic_write(sdev, SLIC_REG_MCASTHIGH, upper_32_bits(mcmask));
+
+ set_promisc = !!(dev->flags & IFF_PROMISC);
+
+ spin_lock_bh(&sdev->link_lock);
+ if (sdev->promisc != set_promisc) {
+ sdev->promisc = set_promisc;
+ slic_configure_rcv(sdev);
+ /* make sure writes to receiver cant leak out of the lock */
+ mmiowb();
+ }
+ spin_unlock_bh(&sdev->link_lock);
+}
+
+static void slic_xmit_complete(struct slic_device *sdev)
+{
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct net_device *dev = sdev->netdev;
+ unsigned int idx = txq->done_idx;
+ struct slic_tx_buffer *buff;
+ unsigned int frames = 0;
+ unsigned int bytes = 0;
+
+ /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new
+ * completions during processing keeps the loop running endlessly.
+ */
+ do {
+ idx = slic_next_compl_idx(sdev);
+ if (idx == SLIC_INVALID_STAT_DESC_IDX)
+ break;
+
+ txq->done_idx = idx;
+ buff = &txq->txbuffs[idx];
+
+ if (unlikely(!buff->skb)) {
+ netdev_warn(dev,
+ "no skb found for desc idx %i\n", idx);
+ continue;
+ }
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
+
+ bytes += buff->skb->len;
+ frames++;
+
+ dev_kfree_skb_any(buff->skb);
+ buff->skb = NULL;
+ } while (frames < SLIC_MAX_TX_COMPLETIONS);
+ /* make sure xmit sees the new value for done_idx */
+ smp_wmb();
+
+ u64_stats_update_begin(&sdev->stats.syncp);
+ sdev->stats.tx_bytes += bytes;
+ sdev->stats.tx_packets += frames;
+ u64_stats_update_end(&sdev->stats.syncp);
+
+ netif_tx_lock(dev);
+ if (netif_queue_stopped(dev) &&
+ (slic_get_free_tx_descs(txq) >= SLIC_MIN_TX_WAKEUP_DESCS))
+ netif_wake_queue(dev);
+ netif_tx_unlock(dev);
+}
+
+static void slic_refill_rx_queue(struct slic_device *sdev, gfp_t gfp)
+{
+ const unsigned int ALIGN_MASK = SLIC_RX_BUFF_ALIGN - 1;
+ unsigned int maplen = SLIC_RX_BUFF_SIZE;
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct net_device *dev = sdev->netdev;
+ struct slic_rx_buffer *buff;
+ struct slic_rx_desc *desc;
+ unsigned int misalign;
+ unsigned int offset;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) {
+ skb = alloc_skb(maplen + ALIGN_MASK, gfp);
+ if (!skb)
+ break;
+
+ paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
+ netdev_err(dev, "mapping rx packet failed\n");
+ /* drop skb */
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ /* ensure head buffer descriptors are 256 byte aligned */
+ offset = 0;
+ misalign = paddr & ALIGN_MASK;
+ if (misalign) {
+ offset = SLIC_RX_BUFF_ALIGN - misalign;
+ skb_reserve(skb, offset);
+ }
+ /* the HW expects dma chunks for descriptor + frame data */
+ desc = (struct slic_rx_desc *)skb->data;
+ /* temporarily sync descriptor for CPU to clear status */
+ dma_sync_single_for_cpu(&sdev->pdev->dev, paddr,
+ offset + sizeof(*desc),
+ DMA_FROM_DEVICE);
+ desc->status = 0;
+ /* return it to HW again */
+ dma_sync_single_for_device(&sdev->pdev->dev, paddr,
+ offset + sizeof(*desc),
+ DMA_FROM_DEVICE);
+
+ buff = &rxq->rxbuffs[rxq->put_idx];
+ buff->skb = skb;
+ dma_unmap_addr_set(buff, map_addr, paddr);
+ dma_unmap_len_set(buff, map_len, maplen);
+ buff->addr_offset = offset;
+ /* complete write to descriptor before it is handed to HW */
+ wmb();
+ /* head buffer descriptors are placed immediately before skb */
+ slic_write(sdev, SLIC_REG_HBAR, lower_32_bits(paddr) + offset);
+ rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len);
+ }
+}
+
+static void slic_handle_frame_error(struct slic_device *sdev,
+ struct sk_buff *skb)
+{
+ struct slic_stats *stats = &sdev->stats;
+
+ if (sdev->model == SLIC_MODEL_OASIS) {
+ struct slic_rx_info_oasis *info;
+ u32 status_b;
+ u32 status;
+
+ info = (struct slic_rx_info_oasis *)skb->data;
+ status = le32_to_cpu(info->frame_status);
+ status_b = le32_to_cpu(info->frame_status_b);
+ /* transport layer */
+ if (status_b & SLIC_VRHSTATB_TPCSUM)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
+ if (status & SLIC_VRHSTAT_TPOFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
+ if (status_b & SLIC_VRHSTATB_TPHLEN)
+ SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
+ /* ip layer */
+ if (status_b & SLIC_VRHSTATB_IPCSUM)
+ SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
+ if (status_b & SLIC_VRHSTATB_IPLERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_iplen);
+ if (status_b & SLIC_VRHSTATB_IPHERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
+ /* link layer */
+ if (status_b & SLIC_VRHSTATB_RCVE)
+ SLIC_INC_STATS_COUNTER(stats, rx_early);
+ if (status_b & SLIC_VRHSTATB_BUFF)
+ SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
+ if (status_b & SLIC_VRHSTATB_CODE)
+ SLIC_INC_STATS_COUNTER(stats, rx_lcode);
+ if (status_b & SLIC_VRHSTATB_DRBL)
+ SLIC_INC_STATS_COUNTER(stats, rx_drbl);
+ if (status_b & SLIC_VRHSTATB_CRC)
+ SLIC_INC_STATS_COUNTER(stats, rx_crc);
+ if (status & SLIC_VRHSTAT_802OE)
+ SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
+ if (status_b & SLIC_VRHSTATB_802UE)
+ SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
+ if (status_b & SLIC_VRHSTATB_CARRE)
+ SLIC_INC_STATS_COUNTER(stats, tx_carrier);
+ } else { /* mojave */
+ struct slic_rx_info_mojave *info;
+ u32 status;
+
+ info = (struct slic_rx_info_mojave *)skb->data;
+ status = le32_to_cpu(info->frame_status);
+ /* transport layer */
+ if (status & SLIC_VGBSTAT_XPERR) {
+ u32 xerr = status >> SLIC_VGBSTAT_XERRSHFT;
+
+ if (xerr == SLIC_VGBSTAT_XCSERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
+ if (xerr == SLIC_VGBSTAT_XUFLOW)
+ SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
+ if (xerr == SLIC_VGBSTAT_XHLEN)
+ SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
+ }
+ /* ip layer */
+ if (status & SLIC_VGBSTAT_NETERR) {
+ u32 nerr = status >> SLIC_VGBSTAT_NERRSHFT &
+ SLIC_VGBSTAT_NERRMSK;
+
+ if (nerr == SLIC_VGBSTAT_NCSERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
+ if (nerr == SLIC_VGBSTAT_NUFLOW)
+ SLIC_INC_STATS_COUNTER(stats, rx_iplen);
+ if (nerr == SLIC_VGBSTAT_NHLEN)
+ SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
+ }
+ /* link layer */
+ if (status & SLIC_VGBSTAT_LNKERR) {
+ u32 lerr = status & SLIC_VGBSTAT_LERRMSK;
+
+ if (lerr == SLIC_VGBSTAT_LDEARLY)
+ SLIC_INC_STATS_COUNTER(stats, rx_early);
+ if (lerr == SLIC_VGBSTAT_LBOFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
+ if (lerr == SLIC_VGBSTAT_LCODERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_lcode);
+ if (lerr == SLIC_VGBSTAT_LDBLNBL)
+ SLIC_INC_STATS_COUNTER(stats, rx_drbl);
+ if (lerr == SLIC_VGBSTAT_LCRCERR)
+ SLIC_INC_STATS_COUNTER(stats, rx_crc);
+ if (lerr == SLIC_VGBSTAT_LOFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
+ if (lerr == SLIC_VGBSTAT_LUFLO)
+ SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
+ }
+ }
+ SLIC_INC_STATS_COUNTER(stats, rx_errors);
+}
+
+static void slic_handle_receive(struct slic_device *sdev, unsigned int todo,
+ unsigned int *done)
+{
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct net_device *dev = sdev->netdev;
+ struct slic_rx_buffer *buff;
+ struct slic_rx_desc *desc;
+ unsigned int frames = 0;
+ unsigned int bytes = 0;
+ struct sk_buff *skb;
+ u32 status;
+ u32 len;
+
+ while (todo && (rxq->done_idx != rxq->put_idx)) {
+ buff = &rxq->rxbuffs[rxq->done_idx];
+
+ skb = buff->skb;
+ if (!skb)
+ break;
+
+ desc = (struct slic_rx_desc *)skb->data;
+
+ dma_sync_single_for_cpu(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ buff->addr_offset + sizeof(*desc),
+ DMA_FROM_DEVICE);
+
+ status = le32_to_cpu(desc->status);
+ if (!(status & SLIC_IRHDDR_SVALID)) {
+ dma_sync_single_for_device(&sdev->pdev->dev,
+ dma_unmap_addr(buff,
+ map_addr),
+ buff->addr_offset +
+ sizeof(*desc),
+ DMA_FROM_DEVICE);
+ break;
+ }
+
+ buff->skb = NULL;
+
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len),
+ DMA_FROM_DEVICE);
+
+ /* skip rx descriptor that is placed before the frame data */
+ skb_reserve(skb, SLIC_RX_BUFF_HDR_SIZE);
+
+ if (unlikely(status & SLIC_IRHDDR_ERR)) {
+ slic_handle_frame_error(sdev, skb);
+ dev_kfree_skb_any(skb);
+ } else {
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+
+ if (is_multicast_ether_addr(eh->h_dest))
+ SLIC_INC_STATS_COUNTER(&sdev->stats, rx_mcasts);
+
+ len = le32_to_cpu(desc->length) & SLIC_IRHDDR_FLEN_MSK;
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&sdev->napi, skb);
+
+ bytes += len;
+ frames++;
+ }
+ rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len);
+ todo--;
+ }
+
+ u64_stats_update_begin(&sdev->stats.syncp);
+ sdev->stats.rx_bytes += bytes;
+ sdev->stats.rx_packets += frames;
+ u64_stats_update_end(&sdev->stats.syncp);
+
+ slic_refill_rx_queue(sdev, GFP_ATOMIC);
+}
+
+static void slic_handle_link_irq(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ unsigned int duplex;
+ int speed;
+ u32 link;
+
+ link = le32_to_cpu(sm_data->link);
+
+ if (link & SLIC_GIG_LINKUP) {
+ if (link & SLIC_GIG_SPEED_1000)
+ speed = SPEED_1000;
+ else if (link & SLIC_GIG_SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ duplex = (link & SLIC_GIG_FULLDUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+ } else {
+ duplex = DUPLEX_UNKNOWN;
+ speed = SPEED_UNKNOWN;
+ }
+ slic_configure_link(sdev, speed, duplex);
+}
+
+static void slic_handle_upr_irq(struct slic_device *sdev, u32 irqs)
+{
+ struct slic_upr *upr;
+
+ /* remove upr that caused this irq (always the first entry in list) */
+ upr = slic_dequeue_upr(sdev);
+ if (!upr) {
+ netdev_warn(sdev->netdev, "no upr found on list\n");
+ return;
+ }
+
+ if (upr->type == SLIC_UPR_LSTAT) {
+ if (unlikely(irqs & SLIC_ISR_UPCERR_MASK)) {
+ /* try again */
+ slic_queue_upr(sdev, upr);
+ return;
+ }
+ slic_handle_link_irq(sdev);
+ }
+ kfree(upr);
+}
+
+static int slic_handle_link_change(struct slic_device *sdev)
+{
+ return slic_new_upr(sdev, SLIC_UPR_LSTAT, sdev->shmem.link_paddr);
+}
+
+static void slic_handle_err_irq(struct slic_device *sdev, u32 isr)
+{
+ struct slic_stats *stats = &sdev->stats;
+
+ if (isr & SLIC_ISR_RMISS)
+ SLIC_INC_STATS_COUNTER(stats, rx_buff_miss);
+ if (isr & SLIC_ISR_XDROP)
+ SLIC_INC_STATS_COUNTER(stats, tx_dropped);
+ if (!(isr & (SLIC_ISR_RMISS | SLIC_ISR_XDROP)))
+ SLIC_INC_STATS_COUNTER(stats, irq_errs);
+}
+
+static void slic_handle_irq(struct slic_device *sdev, u32 isr,
+ unsigned int todo, unsigned int *done)
+{
+ if (isr & SLIC_ISR_ERR)
+ slic_handle_err_irq(sdev, isr);
+
+ if (isr & SLIC_ISR_LEVENT)
+ slic_handle_link_change(sdev);
+
+ if (isr & SLIC_ISR_UPC_MASK)
+ slic_handle_upr_irq(sdev, isr);
+
+ if (isr & SLIC_ISR_RCV)
+ slic_handle_receive(sdev, todo, done);
+
+ if (isr & SLIC_ISR_CMD)
+ slic_xmit_complete(sdev);
+}
+
+static int slic_poll(struct napi_struct *napi, int todo)
+{
+ struct slic_device *sdev = container_of(napi, struct slic_device, napi);
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ u32 isr = le32_to_cpu(sm_data->isr);
+ int done = 0;
+
+ slic_handle_irq(sdev, isr, todo, &done);
+
+ if (done < todo) {
+ napi_complete_done(napi, done);
+ /* reenable irqs */
+ sm_data->isr = 0;
+ /* make sure sm_data->isr is cleard before irqs are reenabled */
+ wmb();
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+ }
+
+ return done;
+}
+
+static irqreturn_t slic_irq(int irq, void *dev_id)
+{
+ struct slic_device *sdev = dev_id;
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_MASK);
+ slic_flush_write(sdev);
+ /* make sure sm_data->isr is read after ICR_INT_MASK is set */
+ wmb();
+
+ if (!sm_data->isr) {
+ dma_rmb();
+ /* spurious interrupt */
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+ return IRQ_NONE;
+ }
+
+ napi_schedule_irqoff(&sdev->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void slic_card_reset(struct slic_device *sdev)
+{
+ u16 cmd;
+
+ slic_write(sdev, SLIC_REG_RESET, SLIC_RESET_MAGIC);
+ /* flush write by means of config space */
+ pci_read_config_word(sdev->pdev, PCI_COMMAND, &cmd);
+ mdelay(1);
+}
+
+static int slic_init_stat_queue(struct slic_device *sdev)
+{
+ const unsigned int DESC_ALIGN_MASK = SLIC_STATS_DESC_ALIGN - 1;
+ struct slic_stat_queue *stq = &sdev->stq;
+ struct slic_stat_desc *descs;
+ unsigned int misalign;
+ unsigned int offset;
+ dma_addr_t paddr;
+ size_t size;
+ int err;
+ int i;
+
+ stq->len = SLIC_NUM_STAT_DESCS;
+ stq->active_array = 0;
+ stq->done_idx = 0;
+
+ size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
+
+ for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
+ descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr,
+ GFP_KERNEL);
+ if (!descs) {
+ netdev_err(sdev->netdev,
+ "failed to allocate status descriptors\n");
+ err = -ENOMEM;
+ goto free_descs;
+ }
+ /* ensure correct alignment */
+ offset = 0;
+ misalign = paddr & DESC_ALIGN_MASK;
+ if (misalign) {
+ offset = SLIC_STATS_DESC_ALIGN - misalign;
+ descs += offset;
+ paddr += offset;
+ }
+
+ slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
+ stq->len);
+ stq->descs[i] = descs;
+ stq->paddr[i] = paddr;
+ stq->addr_offset[i] = offset;
+ }
+
+ stq->mem_size = size;
+
+ return 0;
+
+free_descs:
+ while (i--) {
+ dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
+ stq->descs[i] - stq->addr_offset[i],
+ stq->paddr[i] - stq->addr_offset[i]);
+ }
+
+ return err;
+}
+
+static void slic_free_stat_queue(struct slic_device *sdev)
+{
+ struct slic_stat_queue *stq = &sdev->stq;
+ int i;
+
+ for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
+ dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
+ stq->descs[i] - stq->addr_offset[i],
+ stq->paddr[i] - stq->addr_offset[i]);
+ }
+}
+
+static int slic_init_tx_queue(struct slic_device *sdev)
+{
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct slic_tx_buffer *buff;
+ struct slic_tx_desc *desc;
+ unsigned int i;
+ int err;
+
+ txq->len = SLIC_NUM_TX_DESCS;
+ txq->put_idx = 0;
+ txq->done_idx = 0;
+
+ txq->txbuffs = kcalloc(txq->len, sizeof(*buff), GFP_KERNEL);
+ if (!txq->txbuffs)
+ return -ENOMEM;
+
+ txq->dma_pool = dma_pool_create("slic_pool", &sdev->pdev->dev,
+ sizeof(*desc), SLIC_TX_DESC_ALIGN,
+ 4096);
+ if (!txq->dma_pool) {
+ err = -ENOMEM;
+ netdev_err(sdev->netdev, "failed to create dma pool\n");
+ goto free_buffs;
+ }
+
+ for (i = 0; i < txq->len; i++) {
+ buff = &txq->txbuffs[i];
+ desc = dma_pool_zalloc(txq->dma_pool, GFP_KERNEL,
+ &buff->desc_paddr);
+ if (!desc) {
+ netdev_err(sdev->netdev,
+ "failed to alloc pool chunk (%i)\n", i);
+ err = -ENOMEM;
+ goto free_descs;
+ }
+
+ desc->hnd = cpu_to_le32((u32)(i + 1));
+ desc->cmd = SLIC_CMD_XMT_REQ;
+ desc->flags = 0;
+ desc->type = cpu_to_le32(SLIC_CMD_TYPE_DUMB);
+ buff->desc = desc;
+ }
+
+ return 0;
+
+free_descs:
+ while (i--) {
+ buff = &txq->txbuffs[i];
+ dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
+ }
+ dma_pool_destroy(txq->dma_pool);
+
+free_buffs:
+ kfree(txq->txbuffs);
+
+ return err;
+}
+
+static void slic_free_tx_queue(struct slic_device *sdev)
+{
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct slic_tx_buffer *buff;
+ unsigned int i;
+
+ for (i = 0; i < txq->len; i++) {
+ buff = &txq->txbuffs[i];
+ dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
+ if (!buff->skb)
+ continue;
+
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
+ consume_skb(buff->skb);
+ }
+ dma_pool_destroy(txq->dma_pool);
+
+ kfree(txq->txbuffs);
+}
+
+static int slic_init_rx_queue(struct slic_device *sdev)
+{
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct slic_rx_buffer *buff;
+
+ rxq->len = SLIC_NUM_RX_LES;
+ rxq->done_idx = 0;
+ rxq->put_idx = 0;
+
+ buff = kcalloc(rxq->len, sizeof(*buff), GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ rxq->rxbuffs = buff;
+ slic_refill_rx_queue(sdev, GFP_KERNEL);
+
+ return 0;
+}
+
+static void slic_free_rx_queue(struct slic_device *sdev)
+{
+ struct slic_rx_queue *rxq = &sdev->rxq;
+ struct slic_rx_buffer *buff;
+ unsigned int i;
+
+ /* free rx buffers */
+ for (i = 0; i < rxq->len; i++) {
+ buff = &rxq->rxbuffs[i];
+
+ if (!buff->skb)
+ continue;
+
+ dma_unmap_single(&sdev->pdev->dev,
+ dma_unmap_addr(buff, map_addr),
+ dma_unmap_len(buff, map_len),
+ DMA_FROM_DEVICE);
+ consume_skb(buff->skb);
+ }
+ kfree(rxq->rxbuffs);
+}
+
+static void slic_set_link_autoneg(struct slic_device *sdev)
+{
+ unsigned int subid = sdev->pdev->subsystem_device;
+ u32 val;
+
+ if (sdev->is_fiber) {
+ /* We've got a fiber gigabit interface, and register 4 is
+ * different in fiber mode than in copper mode.
+ */
+ /* advertise FD only @1000 Mb */
+ val = MII_ADVERTISE << 16 | ADVERTISE_1000XFULL |
+ ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+ /* enable PAUSE frames */
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ /* reset phy, enable auto-neg */
+ val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ } else { /* copper gigabit */
+ /* We've got a copper gigabit interface, and register 4 is
+ * different in copper mode than in fiber mode.
+ */
+ /* advertise 10/100 Mb modes */
+ val = MII_ADVERTISE << 16 | ADVERTISE_100FULL |
+ ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF;
+ /* enable PAUSE frames */
+ val |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ /* required by the Cicada PHY */
+ val |= ADVERTISE_CSMA;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+
+ /* advertise FD only @1000 Mb */
+ val = MII_CTRL1000 << 16 | ADVERTISE_1000FULL;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+
+ if (subid != PCI_SUBDEVICE_ID_ALACRITECH_CICADA) {
+ /* if a Marvell PHY enable auto crossover */
+ val = SLIC_MIICR_REG_16 | SLIC_MRV_REG16_XOVERON;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+
+ /* reset phy, enable auto-neg */
+ val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ } else {
+ /* enable and restart auto-neg (don't reset) */
+ val = MII_BMCR << 16 | BMCR_ANENABLE | BMCR_ANRESTART;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ }
+ }
+}
+
+static void slic_set_mac_address(struct slic_device *sdev)
+{
+ u8 *addr = sdev->netdev->dev_addr;
+ u32 val;
+
+ val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
+
+ slic_write(sdev, SLIC_REG_WRADDRAL, val);
+ slic_write(sdev, SLIC_REG_WRADDRBL, val);
+
+ val = addr[0] << 8 | addr[1];
+
+ slic_write(sdev, SLIC_REG_WRADDRAH, val);
+ slic_write(sdev, SLIC_REG_WRADDRBH, val);
+ slic_flush_write(sdev);
+}
+
+static u32 slic_read_dword_from_firmware(const struct firmware *fw, int *offset)
+{
+ int idx = *offset;
+ __le32 val;
+
+ memcpy(&val, fw->data + *offset, sizeof(val));
+ idx += 4;
+ *offset = idx;
+
+ return le32_to_cpu(val);
+}
+
+MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_MOJAVE);
+MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_OASIS);
+
+static int slic_load_rcvseq_firmware(struct slic_device *sdev)
+{
+ const struct firmware *fw;
+ const char *file;
+ u32 codelen;
+ int idx = 0;
+ u32 instr;
+ u32 addr;
+ int err;
+
+ file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_RCV_FIRMWARE_OASIS :
+ SLIC_RCV_FIRMWARE_MOJAVE;
+ err = request_firmware(&fw, file, &sdev->pdev->dev);
+ if (err) {
+ dev_err(&sdev->pdev->dev,
+ "failed to load receive sequencer firmware %s\n", file);
+ return err;
+ }
+ /* Do an initial sanity check concerning firmware size now. A further
+ * check follows below.
+ */
+ if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
+ dev_err(&sdev->pdev->dev,
+ "invalid firmware size %zu (min %u expected)\n",
+ fw->size, SLIC_FIRMWARE_MIN_SIZE);
+ err = -EINVAL;
+ goto release;
+ }
+
+ codelen = slic_read_dword_from_firmware(fw, &idx);
+
+ /* do another sanity check against firmware size */
+ if ((codelen + 4) > fw->size) {
+ dev_err(&sdev->pdev->dev,
+ "invalid rcv-sequencer firmware size %zu\n", fw->size);
+ err = -EINVAL;
+ goto release;
+ }
+
+ /* download sequencer code to card */
+ slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
+ for (addr = 0; addr < codelen; addr++) {
+ __le32 val;
+ /* write out instruction address */
+ slic_write(sdev, SLIC_REG_RCV_WCS, addr);
+
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ /* write out the instruction data low addr */
+ slic_write(sdev, SLIC_REG_RCV_WCS, instr);
+
+ val = (__le32)fw->data[idx];
+ instr = le32_to_cpu(val);
+ idx++;
+ /* write out the instruction data high addr */
+ slic_write(sdev, SLIC_REG_RCV_WCS, instr);
+ }
+ /* finish download */
+ slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
+ slic_flush_write(sdev);
+release:
+ release_firmware(fw);
+
+ return err;
+}
+
+MODULE_FIRMWARE(SLIC_FIRMWARE_MOJAVE);
+MODULE_FIRMWARE(SLIC_FIRMWARE_OASIS);
+
+static int slic_load_firmware(struct slic_device *sdev)
+{
+ u32 sectstart[SLIC_FIRMWARE_MAX_SECTIONS];
+ u32 sectsize[SLIC_FIRMWARE_MAX_SECTIONS];
+ const struct firmware *fw;
+ unsigned int datalen;
+ const char *file;
+ int code_start;
+ unsigned int i;
+ u32 numsects;
+ int idx = 0;
+ u32 sect;
+ u32 instr;
+ u32 addr;
+ u32 base;
+ int err;
+
+ file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_FIRMWARE_OASIS :
+ SLIC_FIRMWARE_MOJAVE;
+ err = request_firmware(&fw, file, &sdev->pdev->dev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to load firmware %s\n", file);
+ return err;
+ }
+ /* Do an initial sanity check concerning firmware size now. A further
+ * check follows below.
+ */
+ if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
+ dev_err(&sdev->pdev->dev,
+ "invalid firmware size %zu (min is %u)\n", fw->size,
+ SLIC_FIRMWARE_MIN_SIZE);
+ err = -EINVAL;
+ goto release;
+ }
+
+ numsects = slic_read_dword_from_firmware(fw, &idx);
+ if (numsects == 0 || numsects > SLIC_FIRMWARE_MAX_SECTIONS) {
+ dev_err(&sdev->pdev->dev,
+ "invalid number of sections in firmware: %u", numsects);
+ err = -EINVAL;
+ goto release;
+ }
+
+ datalen = numsects * 8 + 4;
+ for (i = 0; i < numsects; i++) {
+ sectsize[i] = slic_read_dword_from_firmware(fw, &idx);
+ datalen += sectsize[i];
+ }
+
+ /* do another sanity check against firmware size */
+ if (datalen > fw->size) {
+ dev_err(&sdev->pdev->dev,
+ "invalid firmware size %zu (expected >= %u)\n",
+ fw->size, datalen);
+ err = -EINVAL;
+ goto release;
+ }
+ /* get sections */
+ for (i = 0; i < numsects; i++)
+ sectstart[i] = slic_read_dword_from_firmware(fw, &idx);
+
+ code_start = idx;
+ instr = slic_read_dword_from_firmware(fw, &idx);
+
+ for (sect = 0; sect < numsects; sect++) {
+ unsigned int ssize = sectsize[sect] >> 3;
+
+ base = sectstart[sect];
+
+ for (addr = 0; addr < ssize; addr++) {
+ /* write out instruction address */
+ slic_write(sdev, SLIC_REG_WCS, base + addr);
+ /* write out instruction to low addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ /* write out instruction to high addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ }
+ }
+
+ idx = code_start;
+
+ for (sect = 0; sect < numsects; sect++) {
+ unsigned int ssize = sectsize[sect] >> 3;
+
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ base = sectstart[sect];
+ if (base < 0x8000)
+ continue;
+
+ for (addr = 0; addr < ssize; addr++) {
+ /* write out instruction address */
+ slic_write(sdev, SLIC_REG_WCS,
+ SLIC_WCS_COMPARE | (base + addr));
+ /* write out instruction to low addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ /* write out instruction to high addr */
+ slic_write(sdev, SLIC_REG_WCS, instr);
+ instr = slic_read_dword_from_firmware(fw, &idx);
+ }
+ }
+ slic_flush_write(sdev);
+ mdelay(10);
+ /* everything OK, kick off the card */
+ slic_write(sdev, SLIC_REG_WCS, SLIC_WCS_START);
+ slic_flush_write(sdev);
+ /* wait long enough for ucode to init card and reach the mainloop */
+ mdelay(20);
+release:
+ release_firmware(fw);
+
+ return err;
+}
+
+static int slic_init_shmem(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data;
+ dma_addr_t paddr;
+
+ sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
+ &paddr, GFP_KERNEL);
+ if (!sm_data) {
+ dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
+ return -ENOMEM;
+ }
+
+ sm->shmem_data = sm_data;
+ sm->isr_paddr = paddr;
+ sm->link_paddr = paddr + offsetof(struct slic_shmem_data, link);
+
+ return 0;
+}
+
+static void slic_free_shmem(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+
+ dma_free_coherent(&sdev->pdev->dev, sizeof(*sm_data), sm_data,
+ sm->isr_paddr);
+}
+
+static int slic_init_iface(struct slic_device *sdev)
+{
+ struct slic_shmem *sm = &sdev->shmem;
+ int err;
+
+ sdev->upr_list.pending = false;
+
+ err = slic_init_shmem(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init shared memory\n");
+ return err;
+ }
+
+ err = slic_load_firmware(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to load firmware\n");
+ goto free_sm;
+ }
+
+ err = slic_load_rcvseq_firmware(sdev);
+ if (err) {
+ netdev_err(sdev->netdev,
+ "failed to load firmware for receive sequencer\n");
+ goto free_sm;
+ }
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
+ slic_flush_write(sdev);
+ mdelay(1);
+
+ err = slic_init_rx_queue(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init rx queue: %u\n", err);
+ goto free_sm;
+ }
+
+ err = slic_init_tx_queue(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init tx queue: %u\n", err);
+ goto free_rxq;
+ }
+
+ err = slic_init_stat_queue(sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to init status queue: %u\n",
+ err);
+ goto free_txq;
+ }
+
+ slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
+ napi_enable(&sdev->napi);
+ /* disable irq mitigation */
+ slic_write(sdev, SLIC_REG_INTAGG, 0);
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+
+ slic_set_mac_address(sdev);
+
+ spin_lock_bh(&sdev->link_lock);
+ sdev->duplex = DUPLEX_UNKNOWN;
+ sdev->speed = SPEED_UNKNOWN;
+ spin_unlock_bh(&sdev->link_lock);
+
+ slic_set_link_autoneg(sdev);
+
+ err = request_irq(sdev->pdev->irq, slic_irq, IRQF_SHARED, DRV_NAME,
+ sdev);
+ if (err) {
+ netdev_err(sdev->netdev, "failed to request irq: %u\n", err);
+ goto disable_napi;
+ }
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_ON);
+ slic_flush_write(sdev);
+ /* request initial link status */
+ err = slic_handle_link_change(sdev);
+ if (err)
+ netdev_warn(sdev->netdev,
+ "failed to set initial link state: %u\n", err);
+ return 0;
+
+disable_napi:
+ napi_disable(&sdev->napi);
+ slic_free_stat_queue(sdev);
+free_txq:
+ slic_free_tx_queue(sdev);
+free_rxq:
+ slic_free_rx_queue(sdev);
+free_sm:
+ slic_free_shmem(sdev);
+ slic_card_reset(sdev);
+
+ return err;
+}
+
+static int slic_open(struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = slic_init_iface(sdev);
+ if (err) {
+ netdev_err(dev, "failed to initialize interface: %i\n", err);
+ return err;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int slic_close(struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ u32 val;
+
+ netif_stop_queue(dev);
+
+ /* stop irq handling */
+ napi_disable(&sdev->napi);
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+
+ free_irq(sdev->pdev->irq, sdev);
+ /* turn off RCV and XMT and power down PHY */
+ val = SLIC_GXCR_RESET | SLIC_GXCR_PAUSEEN;
+ slic_write(sdev, SLIC_REG_WXCFG, val);
+
+ val = SLIC_GRCR_RESET | SLIC_GRCR_CTLEN | SLIC_GRCR_ADDRAEN |
+ SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT;
+ slic_write(sdev, SLIC_REG_WRCFG, val);
+
+ val = MII_BMCR << 16 | BMCR_PDOWN;
+ slic_write(sdev, SLIC_REG_WPHY, val);
+ slic_flush_write(sdev);
+
+ slic_clear_upr_list(&sdev->upr_list);
+ slic_write(sdev, SLIC_REG_QUIESCE, 0);
+
+ slic_free_stat_queue(sdev);
+ slic_free_tx_queue(sdev);
+ slic_free_rx_queue(sdev);
+ slic_free_shmem(sdev);
+
+ slic_card_reset(sdev);
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct slic_tx_queue *txq = &sdev->txq;
+ struct slic_tx_buffer *buff;
+ struct slic_tx_desc *desc;
+ dma_addr_t paddr;
+ u32 cbar_val;
+ u32 maplen;
+
+ if (unlikely(slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)) {
+ netdev_err(dev, "BUG! not enough tx LEs left: %u\n",
+ slic_get_free_tx_descs(txq));
+ return NETDEV_TX_BUSY;
+ }
+
+ maplen = skb_headlen(skb);
+ paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
+ netdev_err(dev, "failed to map tx buffer\n");
+ goto drop_skb;
+ }
+
+ buff = &txq->txbuffs[txq->put_idx];
+ buff->skb = skb;
+ dma_unmap_addr_set(buff, map_addr, paddr);
+ dma_unmap_len_set(buff, map_len, maplen);
+
+ desc = buff->desc;
+ desc->totlen = cpu_to_le32(maplen);
+ desc->paddrl = cpu_to_le32(lower_32_bits(paddr));
+ desc->paddrh = cpu_to_le32(upper_32_bits(paddr));
+ desc->len = cpu_to_le32(maplen);
+
+ txq->put_idx = slic_next_queue_idx(txq->put_idx, txq->len);
+
+ cbar_val = lower_32_bits(buff->desc_paddr) | 1;
+ /* complete writes to RAM and DMA before hardware is informed */
+ wmb();
+
+ slic_write(sdev, SLIC_REG_CBAR, cbar_val);
+
+ if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
+ netif_stop_queue(dev);
+ /* make sure writes to io-memory cant leak out of tx queue lock */
+ mmiowb();
+
+ return NETDEV_TX_OK;
+drop_skb:
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *lst)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct slic_stats *stats = &sdev->stats;
+
+ SLIC_GET_STATS_COUNTER(lst->rx_packets, stats, rx_packets);
+ SLIC_GET_STATS_COUNTER(lst->tx_packets, stats, tx_packets);
+ SLIC_GET_STATS_COUNTER(lst->rx_bytes, stats, rx_bytes);
+ SLIC_GET_STATS_COUNTER(lst->tx_bytes, stats, tx_bytes);
+ SLIC_GET_STATS_COUNTER(lst->rx_errors, stats, rx_errors);
+ SLIC_GET_STATS_COUNTER(lst->rx_dropped, stats, rx_buff_miss);
+ SLIC_GET_STATS_COUNTER(lst->tx_dropped, stats, tx_dropped);
+ SLIC_GET_STATS_COUNTER(lst->multicast, stats, rx_mcasts);
+ SLIC_GET_STATS_COUNTER(lst->rx_over_errors, stats, rx_buffoflow);
+ SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc);
+ SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802);
+ SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier);
+
+ return lst;
+}
+
+static int slic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(slic_stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void slic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *eth_stats, u64 *data)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+ struct slic_stats *stats = &sdev->stats;
+
+ SLIC_GET_STATS_COUNTER(data[0], stats, rx_packets);
+ SLIC_GET_STATS_COUNTER(data[1], stats, rx_bytes);
+ SLIC_GET_STATS_COUNTER(data[2], stats, rx_mcasts);
+ SLIC_GET_STATS_COUNTER(data[3], stats, rx_errors);
+ SLIC_GET_STATS_COUNTER(data[4], stats, rx_buff_miss);
+ SLIC_GET_STATS_COUNTER(data[5], stats, rx_tpcsum);
+ SLIC_GET_STATS_COUNTER(data[6], stats, rx_tpoflow);
+ SLIC_GET_STATS_COUNTER(data[7], stats, rx_tphlen);
+ SLIC_GET_STATS_COUNTER(data[8], stats, rx_ipcsum);
+ SLIC_GET_STATS_COUNTER(data[9], stats, rx_iplen);
+ SLIC_GET_STATS_COUNTER(data[10], stats, rx_iphlen);
+ SLIC_GET_STATS_COUNTER(data[11], stats, rx_early);
+ SLIC_GET_STATS_COUNTER(data[12], stats, rx_buffoflow);
+ SLIC_GET_STATS_COUNTER(data[13], stats, rx_lcode);
+ SLIC_GET_STATS_COUNTER(data[14], stats, rx_drbl);
+ SLIC_GET_STATS_COUNTER(data[15], stats, rx_crc);
+ SLIC_GET_STATS_COUNTER(data[16], stats, rx_oflow802);
+ SLIC_GET_STATS_COUNTER(data[17], stats, rx_uflow802);
+ SLIC_GET_STATS_COUNTER(data[18], stats, tx_packets);
+ SLIC_GET_STATS_COUNTER(data[19], stats, tx_bytes);
+ SLIC_GET_STATS_COUNTER(data[20], stats, tx_carrier);
+ SLIC_GET_STATS_COUNTER(data[21], stats, tx_dropped);
+ SLIC_GET_STATS_COUNTER(data[22], stats, irq_errs);
+}
+
+static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS) {
+ memcpy(data, slic_stats_strings, sizeof(slic_stats_strings));
+ data += sizeof(slic_stats_strings);
+ }
+}
+
+static void slic_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct slic_device *sdev = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops slic_ethtool_ops = {
+ .get_drvinfo = slic_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = slic_get_strings,
+ .get_ethtool_stats = slic_get_ethtool_stats,
+ .get_sset_count = slic_get_sset_count,
+};
+
+static const struct net_device_ops slic_netdev_ops = {
+ .ndo_open = slic_open,
+ .ndo_stop = slic_close,
+ .ndo_start_xmit = slic_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = slic_get_stats,
+ .ndo_set_rx_mode = slic_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static u16 slic_eeprom_csum(unsigned char *eeprom, unsigned int len)
+{
+ unsigned char *ptr = eeprom;
+ u32 csum = 0;
+ __le16 data;
+
+ while (len > 1) {
+ memcpy(&data, ptr, sizeof(data));
+ csum += le16_to_cpu(data);
+ ptr += 2;
+ len -= 2;
+ }
+ if (len > 0)
+ csum += *(u8 *)ptr;
+ while (csum >> 16)
+ csum = (csum & 0xFFFF) + ((csum >> 16) & 0xFFFF);
+ return ~csum;
+}
+
+/* check eeprom size, magic and checksum */
+static bool slic_eeprom_valid(unsigned char *eeprom, unsigned int size)
+{
+ const unsigned int MAX_SIZE = 128;
+ const unsigned int MIN_SIZE = 98;
+ __le16 magic;
+ __le16 csum;
+
+ if (size < MIN_SIZE || size > MAX_SIZE)
+ return false;
+ memcpy(&magic, eeprom, sizeof(magic));
+ if (le16_to_cpu(magic) != SLIC_EEPROM_MAGIC)
+ return false;
+ /* cut checksum bytes */
+ size -= 2;
+ memcpy(&csum, eeprom + size, sizeof(csum));
+
+ return (le16_to_cpu(csum) == slic_eeprom_csum(eeprom, size));
+}
+
+static int slic_read_eeprom(struct slic_device *sdev)
+{
+ unsigned int devfn = PCI_FUNC(sdev->pdev->devfn);
+ struct slic_shmem *sm = &sdev->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ const unsigned int MAX_LOOPS = 5000;
+ unsigned int codesize;
+ unsigned char *eeprom;
+ struct slic_upr *upr;
+ unsigned int i = 0;
+ dma_addr_t paddr;
+ int err = 0;
+ u8 *mac[2];
+
+ eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
+ &paddr, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
+ /* setup ISP temporarily */
+ slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
+
+ err = slic_new_upr(sdev, SLIC_UPR_CONFIG, paddr);
+ if (!err) {
+ for (i = 0; i < MAX_LOOPS; i++) {
+ if (le32_to_cpu(sm_data->isr) & SLIC_ISR_UPC)
+ break;
+ mdelay(1);
+ }
+ if (i == MAX_LOOPS) {
+ dev_err(&sdev->pdev->dev,
+ "timed out while waiting for eeprom data\n");
+ err = -ETIMEDOUT;
+ }
+ upr = slic_dequeue_upr(sdev);
+ kfree(upr);
+ }
+
+ slic_write(sdev, SLIC_REG_ISP, 0);
+ slic_write(sdev, SLIC_REG_ISR, 0);
+ slic_flush_write(sdev);
+
+ if (err)
+ goto free_eeprom;
+
+ if (sdev->model == SLIC_MODEL_OASIS) {
+ struct slic_oasis_eeprom *oee;
+
+ oee = (struct slic_oasis_eeprom *)eeprom;
+ mac[0] = oee->mac;
+ mac[1] = oee->mac2;
+ codesize = le16_to_cpu(oee->eeprom_code_size);
+ } else {
+ struct slic_mojave_eeprom *mee;
+
+ mee = (struct slic_mojave_eeprom *)eeprom;
+ mac[0] = mee->mac;
+ mac[1] = mee->mac2;
+ codesize = le16_to_cpu(mee->eeprom_code_size);
+ }
+
+ if (!slic_eeprom_valid(eeprom, codesize)) {
+ dev_err(&sdev->pdev->dev, "invalid checksum in eeprom\n");
+ err = -EINVAL;
+ goto free_eeprom;
+ }
+ /* set mac address */
+ ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
+free_eeprom:
+ dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
+
+ return err;
+}
+
+static int slic_init(struct slic_device *sdev)
+{
+ int err;
+
+ spin_lock_init(&sdev->upper_lock);
+ spin_lock_init(&sdev->link_lock);
+ INIT_LIST_HEAD(&sdev->upr_list.list);
+ spin_lock_init(&sdev->upr_list.lock);
+ u64_stats_init(&sdev->stats.syncp);
+
+ slic_card_reset(sdev);
+
+ err = slic_load_firmware(sdev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to load firmware\n");
+ return err;
+ }
+
+ /* we need the shared memory to read EEPROM so set it up temporarily */
+ err = slic_init_shmem(sdev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
+ return err;
+ }
+
+ err = slic_read_eeprom(sdev);
+ if (err) {
+ dev_err(&sdev->pdev->dev, "failed to read eeprom\n");
+ goto free_sm;
+ }
+
+ slic_card_reset(sdev);
+ slic_free_shmem(sdev);
+
+ return 0;
+free_sm:
+ slic_free_shmem(sdev);
+
+ return err;
+}
+
+static bool slic_is_fiber(unsigned short subdev)
+{
+ switch (subdev) {
+ /* Mojave */
+ case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: /* fallthrough */
+ /* Oasis */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: /* fallthrough */
+ return true;
+ }
+ return false;
+}
+
+static void slic_configure_pci(struct pci_dev *pdev)
+{
+ u16 old;
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &old);
+
+ cmd = old | PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ if (old != cmd)
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+}
+
+static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct slic_device *sdev;
+ struct net_device *dev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ slic_configure_pci(pdev);
+
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup DMA\n");
+ goto disable;
+ }
+
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "failed to obtain PCI regions\n");
+ goto disable;
+ }
+
+ dev = alloc_etherdev(sizeof(*sdev));
+ if (!dev) {
+ dev_err(&pdev->dev, "failed to alloc ethernet device\n");
+ err = -ENOMEM;
+ goto free_regions;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ pci_set_drvdata(pdev, dev);
+ dev->irq = pdev->irq;
+ dev->netdev_ops = &slic_netdev_ops;
+ dev->hw_features = NETIF_F_RXCSUM;
+ dev->features |= dev->hw_features;
+
+ dev->ethtool_ops = &slic_ethtool_ops;
+
+ sdev = netdev_priv(dev);
+ sdev->model = (pdev->device == PCI_DEVICE_ID_ALACRITECH_OASIS) ?
+ SLIC_MODEL_OASIS : SLIC_MODEL_MOJAVE;
+ sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
+ sdev->pdev = pdev;
+ sdev->netdev = dev;
+ sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!sdev->regs) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
+ err = slic_init(sdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize driver\n");
+ goto unmap;
+ }
+
+ netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device: %i\n", err);
+ goto unmap;
+ }
+
+ return 0;
+
+unmap:
+ iounmap(sdev->regs);
+free_netdev:
+ free_netdev(dev);
+free_regions:
+ pci_release_regions(pdev);
+disable:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void slic_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct slic_device *sdev = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ iounmap(sdev->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver slic_driver = {
+ .name = DRV_NAME,
+ .id_table = slic_id_tbl,
+ .probe = slic_probe,
+ .remove = slic_remove,
+};
+
+module_pci_driver(slic_driver);
+
+MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver");
+MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 6ffdff68bfc4..c8f4d26fc9d4 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -37,6 +37,11 @@
#define EMAC_MAX_FRAME_LEN 0x0600
+#define EMAC_DEFAULT_MSG_ENABLE 0x0000
+static int debug = -1; /* defaults above */;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debug message flags");
+
/* Transmit timeout, default 5 seconds. */
static int watchdog = 5000;
module_param(watchdog, int, 0400);
@@ -225,11 +230,27 @@ static void emac_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
+static u32 emac_get_msglevel(struct net_device *dev)
+{
+ struct emac_board_info *db = netdev_priv(dev);
+
+ return db->msg_enable;
+}
+
+static void emac_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct emac_board_info *db = netdev_priv(dev);
+
+ db->msg_enable = value;
+}
+
static const struct ethtool_ops emac_ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = emac_get_msglevel,
+ .set_msglevel = emac_set_msglevel,
};
static unsigned int emac_setup(struct net_device *ndev)
@@ -571,8 +592,7 @@ static void emac_rx(struct net_device *dev)
/* A packet ready now & Get status/length */
good_packet = true;
- emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
- &rxhdr, sizeof(rxhdr));
+ rxhdr = readl(db->membase + EMAC_RX_IO_DATA_REG);
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr)));
@@ -773,7 +793,6 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_tx_timeout = emac_timeout,
.ndo_set_rx_mode = emac_set_rx_mode,
.ndo_do_ioctl = emac_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -805,6 +824,7 @@ static int emac_probe(struct platform_device *pdev)
db->dev = &pdev->dev;
db->ndev = ndev;
db->pdev = pdev;
+ db->msg_enable = netif_msg_init(debug, EMAC_DEFAULT_MSG_ENABLE);
spin_lock_init(&db->lock);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b90a26b13fdf..16f0c70266bc 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -429,14 +429,16 @@ static const char version[] =
"acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
-static int ace_get_settings(struct net_device *, struct ethtool_cmd *);
-static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
+static int ace_get_link_ksettings(struct net_device *,
+ struct ethtool_link_ksettings *);
+static int ace_set_link_ksettings(struct net_device *,
+ const struct ethtool_link_ksettings *);
static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
static const struct ethtool_ops ace_ethtool_ops = {
- .get_settings = ace_get_settings,
- .set_settings = ace_set_settings,
.get_drvinfo = ace_get_drvinfo,
+ .get_link_ksettings = ace_get_link_ksettings,
+ .set_link_ksettings = ace_set_link_ksettings,
};
static void ace_watchdog(struct net_device *dev);
@@ -474,6 +476,8 @@ static int acenic_probe_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->watchdog_timeo = 5*HZ;
+ dev->min_mtu = 0;
+ dev->max_mtu = ACE_JUMBO_MTU;
dev->netdev_ops = &ace_netdev_ops;
dev->ethtool_ops = &ace_ethtool_ops;
@@ -2548,9 +2552,6 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
- if (new_mtu > ACE_JUMBO_MTU)
- return -EINVAL;
-
writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
dev->mtu = new_mtu;
@@ -2580,43 +2581,44 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int ace_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
u32 link;
+ u32 supported;
+
+ memset(cmd, 0, sizeof(struct ethtool_link_ksettings));
- memset(ecmd, 0, sizeof(struct ethtool_cmd));
- ecmd->supported =
- (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+ supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_FIBRE;
link = readl(&regs->GigLnkState);
- if (link & LNK_1000MB)
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- else {
+ if (link & LNK_1000MB) {
+ cmd->base.speed = SPEED_1000;
+ } else {
link = readl(&regs->FastLnkState);
if (link & LNK_100MB)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
else if (link & LNK_10MB)
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
else
- ethtool_cmd_speed_set(ecmd, 0);
+ cmd->base.speed = 0;
}
if (link & LNK_FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
if (link & LNK_NEGOTIATE)
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
#if 0
/*
@@ -2627,13 +2629,15 @@ static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->txcoal = readl(&regs->TuneTxCoalTicks);
ecmd->rxcoal = readl(&regs->TuneRxCoalTicks);
#endif
- ecmd->maxtxpkt = readl(&regs->TuneMaxTxDesc);
- ecmd->maxrxpkt = readl(&regs->TuneMaxRxDesc);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
return 0;
}
-static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int ace_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
@@ -2656,11 +2660,11 @@ static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
if (!ACE_IS_TIGON_I(ap))
link |= LNK_TX_FLOW_CTL_Y;
- if (ecmd->autoneg == AUTONEG_ENABLE)
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
link |= LNK_NEGOTIATE;
- if (ethtool_cmd_speed(ecmd) != speed) {
+ if (cmd->base.speed != speed) {
link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
- switch (ethtool_cmd_speed(ecmd)) {
+ switch (cmd->base.speed) {
case SPEED_1000:
link |= LNK_1000MB;
break;
@@ -2673,7 +2677,7 @@ static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
}
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
link |= LNK_FULL_DUPLEX;
if (link != ap->link) {
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index 3eff2fd3997e..d4a187e45369 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,4 +5,3 @@
obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
altera_msgdma.o altera_sgdma.o altera_utils.o
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index e0052003d16f..e2feee87180a 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -120,6 +120,17 @@
#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
+/* SGMII PCS register addresses
+ */
+#define SGMII_PCS_SCRATCH 0x10
+#define SGMII_PCS_REV 0x11
+#define SGMII_PCS_LINK_TIMER_0 0x12
+#define SGMII_PCS_LINK_TIMER_1 0x13
+#define SGMII_PCS_IF_MODE 0x14
+#define SGMII_PCS_DIS_READ_TO 0x15
+#define SGMII_PCS_READ_TO 0x16
+#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
+
/* MDIO registers within MAC register Space
*/
struct altera_tse_mdio {
@@ -443,7 +454,6 @@ struct altera_tse_private {
/* RX/TX MAC FIFO configs */
u32 tx_fifo_depth;
u32 rx_fifo_depth;
- u32 max_mtu;
/* Hash filter settings */
u32 hash_filter;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index a0eee7218695..25864bff25ee 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -37,6 +37,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
@@ -96,6 +97,27 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
}
+/* PCS Register read/write functions
+ */
+static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
+{
+ return csrrd32(priv->mac_dev,
+ tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
+}
+
+static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
+ u16 value)
+{
+ csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
+}
+
+/* Check PCS scratch memory */
+static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
+{
+ sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
+ return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
+}
+
/* MDIO specific functions
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -984,20 +1006,11 @@ static void tse_set_mac(struct altera_tse_private *priv, bool enable)
*/
static int tse_change_mtu(struct net_device *dev, int new_mtu)
{
- struct altera_tse_private *priv = netdev_priv(dev);
- unsigned int max_mtu = priv->max_mtu;
- unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
-
if (netif_running(dev)) {
netdev_err(dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
- if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
- netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
- return -EINVAL;
- }
-
dev->mtu = new_mtu;
netdev_update_features(dev);
@@ -1082,6 +1095,66 @@ static void tse_set_rx_mode(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
+/* Initialise (if necessary) the SGMII PCS component
+ */
+static int init_sgmii_pcs(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ int n;
+ unsigned int tmp_reg = 0;
+
+ if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
+ return 0; /* Nothing to do, not in SGMII mode */
+
+ /* The TSE SGMII PCS block looks a little like a PHY, it is
+ * mapped into the zeroth MDIO space of the MAC and it has
+ * ID registers like a PHY would. Sadly this is often
+ * configured to zeroes, so don't be surprised if it does
+ * show 0x00000000.
+ */
+
+ if (sgmii_pcs_scratch_test(priv, 0x0000) &&
+ sgmii_pcs_scratch_test(priv, 0xffff) &&
+ sgmii_pcs_scratch_test(priv, 0xa5a5) &&
+ sgmii_pcs_scratch_test(priv, 0x5a5a)) {
+ netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
+ sgmii_pcs_read(priv, MII_PHYSID1),
+ sgmii_pcs_read(priv, MII_PHYSID2));
+ } else {
+ netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
+ return -ENOMEM;
+ }
+
+ /* Starting on page 5-29 of the MegaCore Function User Guide
+ * Set SGMII Link timer to 1.6ms
+ */
+ sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
+ sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
+
+ /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
+ sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
+
+ /* Enable Autonegotiation */
+ tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
+ tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
+ sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
+
+ /* Reset PCS block */
+ tmp_reg |= BMCR_RESET;
+ sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
+ for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
+ if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
+ netdev_info(dev, "SGMII PCS block initialised OK\n");
+ return 0;
+ }
+ udelay(1);
+ }
+
+ /* We failed to reset the block, return a timeout */
+ netdev_err(dev, "SGMII PCS block reset failed.\n");
+ return -ETIMEDOUT;
+}
+
/* Open and initialize the interface
*/
static int tse_open(struct net_device *dev)
@@ -1106,6 +1179,15 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "TSE revision %x\n", priv->revision);
spin_lock(&priv->mac_cfg_lock);
+ /* no-op if MAC not operating in SGMII mode*/
+ ret = init_sgmii_pcs(dev);
+ if (ret) {
+ netdev_err(dev,
+ "Cannot init the SGMII PCS (error: %d)\n", ret);
+ spin_unlock(&priv->mac_cfg_lock);
+ goto phy_error;
+ }
+
ret = reset_mac(priv);
/* Note that reset_mac will fail if the clocks are gated by the PHY
* due to the PHY being put into isolation or power down mode.
@@ -1328,11 +1410,13 @@ static int altera_tse_probe(struct platform_device *pdev)
if (upper_32_bits(priv->rxdescmem_busaddr)) {
dev_dbg(priv->device,
"SGDMA bus addresses greater than 32-bits\n");
+ ret = -EINVAL;
goto err_free_netdev;
}
if (upper_32_bits(priv->txdescmem_busaddr)) {
dev_dbg(priv->device,
"SGDMA bus addresses greater than 32-bits\n");
+ ret = -EINVAL;
goto err_free_netdev;
}
} else if (priv->dmaops &&
@@ -1436,15 +1520,16 @@ static int altera_tse_probe(struct platform_device *pdev)
of_property_read_bool(pdev->dev.of_node,
"altr,has-supplementary-unicast");
+ priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
/* Max MTU is 1500, ETH_DATA_LEN */
- priv->max_mtu = ETH_DATA_LEN;
+ priv->dev->max_mtu = ETH_DATA_LEN;
/* Get the max mtu from the device tree. Note that the
* "max-frame-size" parameter is actually max mtu. Definition
* in the ePAPR v1.1 spec and usage differ, so go with usage.
*/
of_property_read_u32(pdev->dev.of_node, "max-frame-size",
- &priv->max_mtu);
+ &priv->dev->max_mtu);
/* The DMA buffer size already accounts for an alignment bias
* to avoid unaligned access exceptions for the NIOS processor,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index bfeaec5bd7b9..cc8b13ebfa75 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -103,13 +103,6 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
struct ena_adapter *adapter = netdev_priv(dev);
int ret;
- if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
- netif_err(adapter, drv, dev,
- "Invalid MTU setting. new_mtu: %d\n", new_mtu);
-
- return -EINVAL;
- }
-
ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
if (!ret) {
netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
@@ -2755,6 +2748,8 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
ena_set_dev_offloads(feat, netdev);
adapter->max_mtu = feat->dev_attr.max_mtu;
+ netdev->max_mtu = adapter->max_mtu;
+ netdev->min_mtu = ENA_MIN_MTU;
}
static int ena_rss_init_default(struct ena_adapter *adapter)
@@ -3018,12 +3013,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->last_keep_alive_jiffies = jiffies;
- init_timer(&adapter->timer_service);
- adapter->timer_service.expires = round_jiffies(jiffies + HZ);
- adapter->timer_service.function = ena_timer_service;
- adapter->timer_service.data = (unsigned long)adapter;
-
- add_timer(&adapter->timer_service);
+ setup_timer(&adapter->timer_service, ena_timer_service,
+ (unsigned long)adapter);
+ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 0038709fd317..d5c15e8bb3de 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -173,11 +173,13 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
- depends on ARM64 || COMPILE_TEST
+ depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
+ depends on X86 || ARM64 || COMPILE_TEST
select BITREVERSE
select CRC32
- select PTP_1588_CLOCK
+ select PHYLIB
+ select AMD_XGBE_HAVE_ECC if X86
+ imply PTP_1588_CLOCK
---help---
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
@@ -195,4 +197,8 @@ config AMD_XGBE_DCB
If unsure, say N.
+config AMD_XGBE_HAVE_ECC
+ bool
+ default n
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index a83cd1c4ce1d..ee4b94e3cda9 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -665,7 +665,6 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_tx_timeout = lance_tx_timeout,
.ndo_set_rx_mode = lance_set_multicast,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index fcdf5dda448f..b11e910850f7 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -663,7 +663,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
.ndo_set_rx_mode = am79c961_setmulticastlist,
.ndo_tx_timeout = am79c961_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = am79c961_poll_controller,
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index f92cc97151ec..11cf1e3e0295 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1421,21 +1421,23 @@ static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs,
amd8111e_read_regs(lp, buf);
}
-static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int amd8111e_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct amd8111e_priv *lp = netdev_priv(dev);
spin_lock_irq(&lp->lock);
- mii_ethtool_gset(&lp->mii_if, ecmd);
+ mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
return 0;
}
-static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int amd8111e_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct amd8111e_priv *lp = netdev_priv(dev);
int res;
spin_lock_irq(&lp->lock);
- res = mii_ethtool_sset(&lp->mii_if, ecmd);
+ res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
return res;
}
@@ -1482,12 +1484,12 @@ static const struct ethtool_ops ops = {
.get_drvinfo = amd8111e_get_drvinfo,
.get_regs_len = amd8111e_get_regs_len,
.get_regs = amd8111e_get_regs,
- .get_settings = amd8111e_get_settings,
- .set_settings = amd8111e_set_settings,
.nway_reset = amd8111e_nway_reset,
.get_link = amd8111e_get_link,
.get_wol = amd8111e_get_wol,
.set_wol = amd8111e_set_wol,
+ .get_link_ksettings = amd8111e_get_link_ksettings,
+ .set_link_ksettings = amd8111e_set_link_ksettings,
};
/* This function handles all the ethtool ioctls. It gives driver info,
@@ -1556,9 +1558,6 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
struct amd8111e_priv *lp = netdev_priv(dev);
int err;
- if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
- return -EINVAL;
-
if (!netif_running(dev)) {
/* new_mtu will be used
* when device starts netxt time
@@ -1874,6 +1873,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
dev->ethtool_ops = &ops;
dev->irq =pdev->irq;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
+ dev->min_mtu = AMD8111E_MIN_MTU;
+ dev->max_mtu = AMD8111E_MAX_MTU;
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
#if AMD8111E_VLAN_TAG_USED
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 968b7bfac8fc..5fd7b15b0574 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -706,7 +706,6 @@ static const struct net_device_ops ariadne_netdev_ops = {
.ndo_get_stats = ariadne_get_stats,
.ndo_set_rx_mode = set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index d2bc8e5dcd23..796c37a5bbde 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -460,7 +460,6 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_set_mac_address = lance_set_mac_address,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static unsigned long __init lance_probe1( struct net_device *dev,
@@ -1013,13 +1012,9 @@ static int lance_rx( struct net_device *dev )
u_char *data = PKTBUF_ADDR(head);
printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
- "data %02x %02x %02x %02x %02x %02x %02x %02x "
- "len %d\n",
+ "data %8ph len %d\n",
dev->name, ((u_short *)data)[6],
- &data[6], data,
- data[15], data[16], data[17], data[18],
- data[19], data[20], data[21], data[22],
- pkt_len);
+ &data[6], data, &data[15], pkt_len);
}
skb_reserve( skb, 2 ); /* 16 byte align */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index df664187cd82..a3c90fe5de00 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1103,7 +1103,6 @@ static const struct net_device_ops au1000_netdev_ops = {
.ndo_tx_timeout = au1000_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int au1000_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index b799c7ac899b..76e5fc7adff5 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1013,7 +1013,6 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_start_xmit = lance_start_xmit,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_set_rx_mode = lance_set_multicast,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 6c9de117ffc6..c3dbf1c8a269 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -72,7 +72,6 @@ static const struct net_device_ops hplance_netdev_ops = {
.ndo_stop = hplance_close,
.ndo_start_xmit = lance_start_xmit,
.ndo_set_rx_mode = lance_set_multicast,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index abb1ba228b26..61a641f23149 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -461,7 +461,6 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_get_stats = lance_get_stats,
.ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = lance_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 0660ac5846bb..0a920448522f 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -62,7 +62,6 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_start_xmit = lance_start_xmit,
.ndo_set_rx_mode = lance_set_multicast,
.ndo_tx_timeout = lance_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index cda53db75f17..5985bf220a8d 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -407,7 +407,6 @@ static const struct net_device_ops ni65_netdev_ops = {
.ndo_start_xmit = ni65_send_packet,
.ndo_tx_timeout = ni65_timeout,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 2807e181647b..113a3b3cc50c 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -427,7 +427,6 @@ static const struct net_device_ops mace_netdev_ops = {
.ndo_set_config = mace_config,
.ndo_get_stats = mace_get_stats,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index c22bf52d3320..41e58cca8fee 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -677,7 +677,8 @@ static void pcnet32_poll_controller(struct net_device *dev)
}
#endif
-static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int pcnet32_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
@@ -685,14 +686,15 @@ static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (lp->mii) {
spin_lock_irqsave(&lp->lock, flags);
- mii_ethtool_gset(&lp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
r = 0;
}
return r;
}
-static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int pcnet32_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
@@ -700,7 +702,7 @@ static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (lp->mii) {
spin_lock_irqsave(&lp->lock, flags);
- r = mii_ethtool_sset(&lp->mii_if, cmd);
+ r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
}
return r;
@@ -1440,8 +1442,6 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
static const struct ethtool_ops pcnet32_ethtool_ops = {
- .get_settings = pcnet32_get_settings,
- .set_settings = pcnet32_set_settings,
.get_drvinfo = pcnet32_get_drvinfo,
.get_msglevel = pcnet32_get_msglevel,
.set_msglevel = pcnet32_set_msglevel,
@@ -1455,6 +1455,8 @@ static const struct ethtool_ops pcnet32_ethtool_ops = {
.get_regs_len = pcnet32_get_regs_len,
.get_regs = pcnet32_get_regs,
.get_sset_count = pcnet32_get_sset_count,
+ .get_link_ksettings = pcnet32_get_link_ksettings,
+ .set_link_ksettings = pcnet32_set_link_ksettings,
};
/* only probes for non-PCI devices, the rest are handled by
@@ -1527,7 +1529,6 @@ static const struct net_device_ops pcnet32_netdev_ops = {
.ndo_get_stats = pcnet32_get_stats,
.ndo_set_rx_mode = pcnet32_set_multicast_list,
.ndo_do_ioctl = pcnet32_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 3d8c6b2cdea4..12bb4f1489fc 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -299,7 +299,6 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_start_xmit = lance_start_xmit,
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = NULL,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 9b56b40259dc..291ca5187f12 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1294,7 +1294,6 @@ static const struct net_device_ops sparc_lance_ops = {
.ndo_start_xmit = lance_start_xmit,
.ndo_set_rx_mode = lance_set_multicast,
.ndo_tx_timeout = lance_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 171a7e68048d..0dea8f5da899 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -2,7 +2,10 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-ptp.o
+ xgbe-ptp.o \
+ xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
+ xgbe-platform.o
+amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index bbef95973c27..5b7ba25e0065 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -159,6 +159,8 @@
#define DMA_ISR_MACIS_WIDTH 1
#define DMA_ISR_MTLIS_INDEX 16
#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_INTM_INDEX 12
+#define DMA_MR_INTM_WIDTH 2
#define DMA_MR_SWR_INDEX 0
#define DMA_MR_SWR_WIDTH 1
#define DMA_SBMR_EAME_INDEX 11
@@ -309,6 +311,11 @@
#define MAC_HWF0R 0x011c
#define MAC_HWF1R 0x0120
#define MAC_HWF2R 0x0124
+#define MAC_MDIOSCAR 0x0200
+#define MAC_MDIOSCCDR 0x0204
+#define MAC_MDIOISR 0x0214
+#define MAC_MDIOIER 0x0218
+#define MAC_MDIOCL22R 0x0220
#define MAC_GPIOCR 0x0278
#define MAC_GPIOSR 0x027c
#define MAC_MACA0HR 0x0300
@@ -409,10 +416,34 @@
#define MAC_ISR_MMCTXIS_WIDTH 1
#define MAC_ISR_PMTIS_INDEX 4
#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_SMI_INDEX 1
+#define MAC_ISR_SMI_WIDTH 1
#define MAC_ISR_TSIS_INDEX 12
#define MAC_ISR_TSIS_WIDTH 1
#define MAC_MACA1HR_AE_INDEX 31
#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
+#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
+#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
+#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
+#define MAC_MDIOSCAR_DA_INDEX 21
+#define MAC_MDIOSCAR_DA_WIDTH 5
+#define MAC_MDIOSCAR_PA_INDEX 16
+#define MAC_MDIOSCAR_PA_WIDTH 5
+#define MAC_MDIOSCAR_RA_INDEX 0
+#define MAC_MDIOSCAR_RA_WIDTH 16
+#define MAC_MDIOSCAR_REG_INDEX 0
+#define MAC_MDIOSCAR_REG_WIDTH 21
+#define MAC_MDIOSCCDR_BUSY_INDEX 22
+#define MAC_MDIOSCCDR_BUSY_WIDTH 1
+#define MAC_MDIOSCCDR_CMD_INDEX 16
+#define MAC_MDIOSCCDR_CMD_WIDTH 2
+#define MAC_MDIOSCCDR_CR_INDEX 19
+#define MAC_MDIOSCCDR_CR_WIDTH 3
+#define MAC_MDIOSCCDR_DATA_INDEX 0
+#define MAC_MDIOSCCDR_DATA_WIDTH 16
+#define MAC_MDIOSCCDR_SADDR_INDEX 18
+#define MAC_MDIOSCCDR_SADDR_WIDTH 1
#define MAC_PFR_HMC_INDEX 2
#define MAC_PFR_HMC_WIDTH 1
#define MAC_PFR_HPF_INDEX 10
@@ -790,6 +821,10 @@
#define MTL_Q_RQOMR_RSF_WIDTH 1
#define MTL_Q_RQOMR_RTC_INDEX 0
#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQDR_TRCSTS_INDEX 1
+#define MTL_Q_TQDR_TRCSTS_WIDTH 2
+#define MTL_Q_TQDR_TXQSTS_INDEX 4
+#define MTL_Q_TQDR_TXQSTS_WIDTH 1
#define MTL_Q_TQOMR_FTQ_INDEX 0
#define MTL_Q_TQOMR_FTQ_WIDTH 1
#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
@@ -852,14 +887,16 @@
#define MTL_TSA_SP 0x00
#define MTL_TSA_ETS 0x02
-/* PCS MMD select register offset
- * The MMD select register is used for accessing PCS registers
- * when the underlying APB3 interface is using indirect addressing.
- * Indirect addressing requires accessing registers in two phases,
- * an address phase and a data phase. The address phases requires
- * writing an address selection value to the MMD select regiesters.
- */
-#define PCS_MMD_SELECT 0xff
+/* PCS register offsets */
+#define PCS_V1_WINDOW_SELECT 0x03fc
+#define PCS_V2_WINDOW_DEF 0x9060
+#define PCS_V2_WINDOW_SELECT 0x9064
+
+/* PCS register entry bit positions and sizes */
+#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
+#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
+#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
/* SerDes integration register offsets */
#define SIR0_KR_RT_1 0x002c
@@ -903,6 +940,198 @@
#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+/* MAC Control register offsets */
+#define XP_PROP_0 0x0000
+#define XP_PROP_1 0x0004
+#define XP_PROP_2 0x0008
+#define XP_PROP_3 0x000c
+#define XP_PROP_4 0x0010
+#define XP_PROP_5 0x0014
+#define XP_MAC_ADDR_LO 0x0020
+#define XP_MAC_ADDR_HI 0x0024
+#define XP_ECC_ISR 0x0030
+#define XP_ECC_IER 0x0034
+#define XP_ECC_CNT0 0x003c
+#define XP_ECC_CNT1 0x0040
+#define XP_DRIVER_INT_REQ 0x0060
+#define XP_DRIVER_INT_RO 0x0064
+#define XP_DRIVER_SCRATCH_0 0x0068
+#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_EN 0x0078
+#define XP_I2C_MUTEX 0x0080
+#define XP_MDIO_MUTEX 0x0084
+
+/* MAC Control register entry bit positions and sizes */
+#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
+#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
+#define XP_DRIVER_INT_RO_STATUS_INDEX 0
+#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
+#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
+#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
+#define XP_ECC_CNT0_RX_DED_INDEX 24
+#define XP_ECC_CNT0_RX_DED_WIDTH 8
+#define XP_ECC_CNT0_RX_SEC_INDEX 16
+#define XP_ECC_CNT0_RX_SEC_WIDTH 8
+#define XP_ECC_CNT0_TX_DED_INDEX 8
+#define XP_ECC_CNT0_TX_DED_WIDTH 8
+#define XP_ECC_CNT0_TX_SEC_INDEX 0
+#define XP_ECC_CNT0_TX_SEC_WIDTH 8
+#define XP_ECC_CNT1_DESC_DED_INDEX 8
+#define XP_ECC_CNT1_DESC_DED_WIDTH 8
+#define XP_ECC_CNT1_DESC_SEC_INDEX 0
+#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+#define XP_ECC_IER_DESC_DED_INDEX 0
+#define XP_ECC_IER_DESC_DED_WIDTH 1
+#define XP_ECC_IER_DESC_SEC_INDEX 1
+#define XP_ECC_IER_DESC_SEC_WIDTH 1
+#define XP_ECC_IER_RX_DED_INDEX 2
+#define XP_ECC_IER_RX_DED_WIDTH 1
+#define XP_ECC_IER_RX_SEC_INDEX 3
+#define XP_ECC_IER_RX_SEC_WIDTH 1
+#define XP_ECC_IER_TX_DED_INDEX 4
+#define XP_ECC_IER_TX_DED_WIDTH 1
+#define XP_ECC_IER_TX_SEC_INDEX 5
+#define XP_ECC_IER_TX_SEC_WIDTH 1
+#define XP_ECC_ISR_DESC_DED_INDEX 0
+#define XP_ECC_ISR_DESC_DED_WIDTH 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 1
+#define XP_ECC_ISR_DESC_SEC_WIDTH 1
+#define XP_ECC_ISR_RX_DED_INDEX 2
+#define XP_ECC_ISR_RX_DED_WIDTH 1
+#define XP_ECC_ISR_RX_SEC_INDEX 3
+#define XP_ECC_ISR_RX_SEC_WIDTH 1
+#define XP_ECC_ISR_TX_DED_INDEX 4
+#define XP_ECC_ISR_TX_DED_WIDTH 1
+#define XP_ECC_ISR_TX_SEC_INDEX 5
+#define XP_ECC_ISR_TX_SEC_WIDTH 1
+#define XP_I2C_MUTEX_BUSY_INDEX 31
+#define XP_I2C_MUTEX_BUSY_WIDTH 1
+#define XP_I2C_MUTEX_ID_INDEX 29
+#define XP_I2C_MUTEX_ID_WIDTH 2
+#define XP_I2C_MUTEX_ACTIVE_INDEX 0
+#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
+#define XP_MAC_ADDR_HI_VALID_INDEX 31
+#define XP_MAC_ADDR_HI_VALID_WIDTH 1
+#define XP_PROP_0_CONN_TYPE_INDEX 28
+#define XP_PROP_0_CONN_TYPE_WIDTH 3
+#define XP_PROP_0_MDIO_ADDR_INDEX 16
+#define XP_PROP_0_MDIO_ADDR_WIDTH 5
+#define XP_PROP_0_PORT_ID_INDEX 0
+#define XP_PROP_0_PORT_ID_WIDTH 8
+#define XP_PROP_0_PORT_MODE_INDEX 8
+#define XP_PROP_0_PORT_MODE_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 23
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_1_MAX_RX_DMA_INDEX 24
+#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
+#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
+#define XP_PROP_1_MAX_TX_DMA_INDEX 16
+#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
+#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
+#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
+#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
+#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_3_GPIO_MASK_INDEX 28
+#define XP_PROP_3_GPIO_MASK_WIDTH 4
+#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
+#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
+#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
+#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
+#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
+#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
+#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
+#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
+#define XP_PROP_3_GPIO_ADDR_INDEX 8
+#define XP_PROP_3_GPIO_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_INDEX 0
+#define XP_PROP_3_MDIO_RESET_WIDTH 2
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
+#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
+#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
+#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
+#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
+#define XP_PROP_4_MUX_CHAN_INDEX 4
+#define XP_PROP_4_MUX_CHAN_WIDTH 3
+#define XP_PROP_4_REDRV_ADDR_INDEX 16
+#define XP_PROP_4_REDRV_ADDR_WIDTH 7
+#define XP_PROP_4_REDRV_IF_INDEX 23
+#define XP_PROP_4_REDRV_IF_WIDTH 1
+#define XP_PROP_4_REDRV_LANE_INDEX 24
+#define XP_PROP_4_REDRV_LANE_WIDTH 3
+#define XP_PROP_4_REDRV_MODEL_INDEX 28
+#define XP_PROP_4_REDRV_MODEL_WIDTH 3
+#define XP_PROP_4_REDRV_PRESENT_INDEX 31
+#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
+
+/* I2C Control register offsets */
+#define IC_CON 0x0000
+#define IC_TAR 0x0004
+#define IC_DATA_CMD 0x0010
+#define IC_INTR_STAT 0x002c
+#define IC_INTR_MASK 0x0030
+#define IC_RAW_INTR_STAT 0x0034
+#define IC_CLR_INTR 0x0040
+#define IC_CLR_TX_ABRT 0x0054
+#define IC_CLR_STOP_DET 0x0060
+#define IC_ENABLE 0x006c
+#define IC_TXFLR 0x0074
+#define IC_RXFLR 0x0078
+#define IC_TX_ABRT_SOURCE 0x0080
+#define IC_ENABLE_STATUS 0x009c
+#define IC_COMP_PARAM_1 0x00f4
+
+/* I2C Control register entry bit positions and sizes */
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
+#define IC_CON_MASTER_MODE_INDEX 0
+#define IC_CON_MASTER_MODE_WIDTH 1
+#define IC_CON_RESTART_EN_INDEX 5
+#define IC_CON_RESTART_EN_WIDTH 1
+#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
+#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
+#define IC_CON_SLAVE_DISABLE_INDEX 6
+#define IC_CON_SLAVE_DISABLE_WIDTH 1
+#define IC_CON_SPEED_INDEX 1
+#define IC_CON_SPEED_WIDTH 2
+#define IC_DATA_CMD_CMD_INDEX 8
+#define IC_DATA_CMD_CMD_WIDTH 1
+#define IC_DATA_CMD_STOP_INDEX 9
+#define IC_DATA_CMD_STOP_WIDTH 1
+#define IC_ENABLE_ABORT_INDEX 1
+#define IC_ENABLE_ABORT_WIDTH 1
+#define IC_ENABLE_EN_INDEX 0
+#define IC_ENABLE_EN_WIDTH 1
+#define IC_ENABLE_STATUS_EN_INDEX 0
+#define IC_ENABLE_STATUS_EN_WIDTH 1
+#define IC_INTR_MASK_TX_EMPTY_INDEX 4
+#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
+#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
+#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
+#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
+#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
+#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
+#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
+
+/* I2C Control register value */
+#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
+#define IC_TX_ABRT_ARB_LOST 0x1000
+
/* Descriptor/Packet entry bit positions and sizes */
#define RX_PACKET_ERRORS_CRC_INDEX 2
#define RX_PACKET_ERRORS_CRC_WIDTH 1
@@ -1027,6 +1256,10 @@
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
#endif
+#ifndef MDIO_PCS_DIG_CTRL
+#define MDIO_PCS_DIG_CTRL 0x8000
+#endif
+
#ifndef MDIO_AN_XNP
#define MDIO_AN_XNP 0x0016
#endif
@@ -1047,11 +1280,48 @@
#define MDIO_AN_INT 0x8002
#endif
+#ifndef MDIO_VEND2_AN_ADVERTISE
+#define MDIO_VEND2_AN_ADVERTISE 0x0004
+#endif
+
+#ifndef MDIO_VEND2_AN_LP_ABILITY
+#define MDIO_VEND2_AN_LP_ABILITY 0x0005
+#endif
+
+#ifndef MDIO_VEND2_AN_CTRL
+#define MDIO_VEND2_AN_CTRL 0x8001
+#endif
+
+#ifndef MDIO_VEND2_AN_STAT
+#define MDIO_VEND2_AN_STAT 0x8002
+#endif
+
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
+#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
+#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_RESTART
+#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS6
+#define MDIO_VEND2_CTRL1_SS6 BIT(6)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS13
+#define MDIO_VEND2_CTRL1_SS13 BIT(13)
+#endif
+
/* MDIO mask values */
+#define XGBE_AN_CL73_INT_CMPLT BIT(0)
+#define XGBE_AN_CL73_INC_LINK BIT(1)
+#define XGBE_AN_CL73_PG_RCV BIT(2)
+#define XGBE_AN_CL73_INT_MASK 0x07
+
#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
#define XGBE_XNP_ACK_PROCESSED BIT(12)
#define XGBE_XNP_MP_FORMATTED BIT(13)
@@ -1060,6 +1330,19 @@
#define XGBE_KR_TRAINING_START BIT(0)
#define XGBE_KR_TRAINING_ENABLE BIT(1)
+#define XGBE_PCS_CL37_BP BIT(12)
+
+#define XGBE_AN_CL37_INT_CMPLT BIT(0)
+#define XGBE_AN_CL37_INT_MASK 0x01
+
+#define XGBE_AN_CL37_HD_MASK 0x40
+#define XGBE_AN_CL37_FD_MASK 0x20
+
+#define XGBE_AN_CL37_PCS_MODE_MASK 0x06
+#define XGBE_AN_CL37_PCS_MODE_BASEX 0x00
+#define XGBE_AN_CL37_PCS_MODE_SGMII 0x04
+#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -1195,12 +1478,28 @@ do { \
/* Macros for building, reading or writing register values or bits
* within the register values of XPCS registers.
*/
-#define XPCS_IOWRITE(_pdata, _off, _val) \
+#define XPCS_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XPCS32_IOWRITE(_pdata, _off, _val) \
iowrite32(_val, (_pdata)->xpcs_regs + (_off))
-#define XPCS_IOREAD(_pdata, _off) \
+#define XPCS32_IOREAD(_pdata, _off) \
ioread32((_pdata)->xpcs_regs + (_off))
+#define XPCS16_IOWRITE(_pdata, _off, _val) \
+ iowrite16(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS16_IOREAD(_pdata, _off) \
+ ioread16((_pdata)->xpcs_regs + (_off))
+
/* Macros for building, reading or writing register values or bits
* within the register values of SerDes integration registers.
*/
@@ -1278,6 +1577,72 @@ do { \
} while (0)
/* Macros for building, reading or writing register values or bits
+ * within the register values of MAC Control registers.
+ */
+#define XP_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XP_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XP_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xprop_regs + (_reg))
+
+#define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XP_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xprop_regs + (_reg))
+
+#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XP_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of I2C Control registers.
+ */
+#define XI2C_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XI2C_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xi2c_regs + (_reg))
+
+#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XI2C_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xi2c_regs + (_reg))
+
+#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
* using MDIO. Different from above because of the use of standardized
* Linux include values. No shifting is performed with the bit
* operations, everything works on mask values.
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 96f485ab612e..7546b660d6b5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -153,7 +153,7 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
int ret;
if (*ppos != 0)
- return 0;
+ return -EINVAL;
if (count >= sizeof(workarea))
return -ENOSPC;
@@ -316,6 +316,126 @@ static const struct file_operations xpcs_reg_value_fops = {
.write = xpcs_reg_value_write,
};
+static ssize_t xprop_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xprop_reg);
+}
+
+static ssize_t xprop_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xprop_reg);
+}
+
+static ssize_t xprop_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XP_IOREAD(pdata, pdata->debugfs_xprop_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xprop_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XP_IOWRITE(pdata, pdata->debugfs_xprop_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xprop_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xprop_reg_addr_read,
+ .write = xprop_reg_addr_write,
+};
+
+static const struct file_operations xprop_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xprop_reg_value_read,
+ .write = xprop_reg_value_write,
+};
+
+static ssize_t xi2c_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xi2c_reg);
+}
+
+static ssize_t xi2c_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xi2c_reg);
+}
+
+static ssize_t xi2c_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XI2C_IOREAD(pdata, pdata->debugfs_xi2c_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xi2c_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XI2C_IOWRITE(pdata, pdata->debugfs_xi2c_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xi2c_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xi2c_reg_addr_read,
+ .write = xi2c_reg_addr_write,
+};
+
+static const struct file_operations xi2c_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xi2c_reg_value_read,
+ .write = xi2c_reg_value_write,
+};
+
void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
{
struct dentry *pfile;
@@ -367,6 +487,38 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
if (!pfile)
netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ if (pdata->xprop_regs) {
+ pfile = debugfs_create_file("xprop_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xprop_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xprop_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xprop_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_file failed\n");
+ }
+
+ if (pdata->xi2c_regs) {
+ pfile = debugfs_create_file("xi2c_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xi2c_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xi2c_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xi2c_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_file failed\n");
+ }
+
kfree(buf);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1babcc11a248..aaf0350076a9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -123,6 +123,11 @@
#include "xgbe.h"
#include "xgbe-common.h"
+static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
+{
+ return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+}
+
static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
unsigned int usec)
{
@@ -491,6 +496,27 @@ static void xgbe_config_rss(struct xgbe_prv_data *pdata)
"error configuring RSS, RSS disabled\n");
}
+static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
+ unsigned int queue)
+{
+ unsigned int prio, tc;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ /* Does this queue handle the priority? */
+ if (pdata->prio2q_map[prio] != queue)
+ continue;
+
+ /* Get the Traffic Class for this priority */
+ tc = pdata->ets->prio_tc[prio];
+
+ /* Check if PFC is enabled for this traffic class */
+ if (pdata->pfc->pfc_en & (1 << tc))
+ return true;
+ }
+
+ return false;
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
@@ -528,27 +554,14 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++) {
unsigned int ehfc = 0;
- if (pfc && ets) {
- unsigned int prio;
-
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
- unsigned int tc;
-
- /* Does this queue handle the priority? */
- if (pdata->prio2q_map[prio] != i)
- continue;
-
- /* Get the Traffic Class for this priority */
- tc = ets->prio_tc[prio];
-
- /* Check if flow control should be enabled */
- if (pfc->pfc_en & (1 << tc)) {
+ if (pdata->rx_rfd[i]) {
+ /* Flow control thresholds are established */
+ if (pfc && ets) {
+ if (xgbe_is_pfc_queue(pdata, i))
ehfc = 1;
- break;
- }
+ } else {
+ ehfc = 1;
}
- } else {
- ehfc = 1;
}
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
@@ -633,6 +646,11 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
unsigned int dma_ch_isr, dma_ch_ier;
unsigned int i;
+ /* Set the interrupt mode if supported */
+ if (pdata->channel_irq_mode)
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
+ pdata->channel_irq_mode);
+
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
/* Clear all the interrupts which are set */
@@ -654,19 +672,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
if (channel->tx_ring) {
/* Enable the following Tx interrupts
* TIE - Transmit Interrupt Enable (unless using
- * per channel interrupts)
+ * per channel interrupts in edge triggered
+ * mode)
*/
- if (!pdata->per_channel_irq)
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
}
if (channel->rx_ring) {
/* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable
* RIE - Receive Interrupt Enable (unless using
- * per channel interrupts)
+ * per channel interrupts in edge triggered
+ * mode)
*/
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
- if (!pdata->per_channel_irq)
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
}
@@ -702,34 +722,90 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
/* Enable all counter interrupts */
XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
+
+ /* Enable MDIO single command completion interrupt */
+ XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
}
-static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
{
- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
- return 0;
+ unsigned int ecc_isr, ecc_ier = 0;
- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
+ if (!pdata->vdata->ecc_support)
+ return;
- return 0;
+ /* Clear all the interrupts which are set */
+ ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
+ XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
+
+ /* Enable ECC interrupts */
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
+
+ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
}
-static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
{
- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
- return 0;
+ unsigned int ecc_ier;
- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
+ ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
- return 0;
+ /* Disable ECC DED interrupts */
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
+
+ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
}
-static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
+static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
+ enum xgbe_ecc_sec sec)
{
- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
- return 0;
+ unsigned int ecc_ier;
+
+ ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
+
+ /* Disable ECC SEC interrupt */
+ switch (sec) {
+ case XGBE_ECC_SEC_TX:
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
+ break;
+ case XGBE_ECC_SEC_RX:
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
+ break;
+ case XGBE_ECC_SEC_DESC:
+ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
+ break;
+ }
- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
+}
+
+static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ unsigned int ss;
+
+ switch (speed) {
+ case SPEED_1000:
+ ss = 0x03;
+ break;
+ case SPEED_2500:
+ ss = 0x02;
+ break;
+ case SPEED_10000:
+ ss = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
return 0;
}
@@ -1019,8 +1095,101 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
return 0;
}
-static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
+{
+ unsigned int reg;
+
+ if (gpio > 15)
+ return -EINVAL;
+
+ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
+
+ reg &= ~(1 << (gpio + 16));
+ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
+
+ return 0;
+}
+
+static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
+{
+ unsigned int reg;
+
+ if (gpio > 15)
+ return -EINVAL;
+
+ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
+
+ reg |= (1 << (gpio + 16));
+ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
+
+ return 0;
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned long flags;
+ unsigned int mmd_address, index, offset;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and reading 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned long flags;
+ unsigned int mmd_address, index, offset;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and writing 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+}
+
+static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
{
unsigned long flags;
unsigned int mmd_address;
@@ -1041,15 +1210,15 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
* offset 2 bits and reading 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
- mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
+ mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
return mmd_data;
}
-static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg, int mmd_data)
+static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
{
unsigned int mmd_address;
unsigned long flags;
@@ -1066,14 +1235,113 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
*
* The mmio interface is based on 32-bit offsets and values. All
* register offsets must therefore be adjusted by left shifting the
- * offset 2 bits and reading 32 bits of data.
+ * offset 2 bits and writing 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
- XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
+ XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
+static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case XGBE_XPCS_ACCESS_V1:
+ return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V2:
+ default:
+ return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+ }
+}
+
+static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case XGBE_XPCS_ACCESS_V1:
+ return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+
+ case XGBE_XPCS_ACCESS_V2:
+ default:
+ return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
+ }
+}
+
+static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca, mdio_sccd;
+
+ reinit_completion(&pdata->mdio_complete);
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
+ netdev_err(pdata->netdev, "mdio write operation timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca, mdio_sccd;
+
+ reinit_completion(&pdata->mdio_complete);
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
+ netdev_err(pdata->netdev, "mdio read operation timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
+}
+
+static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
+ enum xgbe_mdio_mode mode)
+{
+ unsigned int reg_val = 0;
+
+ switch (mode) {
+ case XGBE_MDIO_MODE_CL22:
+ if (port > XGMAC_MAX_C22_PORT)
+ return -EINVAL;
+ reg_val |= (1 << port);
+ break;
+ case XGBE_MDIO_MODE_CL45:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
+
+ return 0;
+}
+
static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
{
return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
@@ -1264,14 +1532,21 @@ static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
{
- unsigned int tx_snr;
+ unsigned int tx_snr, tx_ssr;
u64 nsec;
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
return 0;
- nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ nsec = tx_ssr;
nsec *= NSEC_PER_SEC;
nsec += tx_snr;
@@ -1327,106 +1602,6 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
return 0;
}
-static void xgbe_config_tc(struct xgbe_prv_data *pdata)
-{
- unsigned int offset, queue, prio;
- u8 i;
-
- netdev_reset_tc(pdata->netdev);
- if (!pdata->num_tcs)
- return;
-
- netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
-
- for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
- while ((queue < pdata->tx_q_count) &&
- (pdata->q2tc_map[queue] == i))
- queue++;
-
- netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
- i, offset, queue - 1);
- netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
- offset = queue;
- }
-
- if (!pdata->ets)
- return;
-
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
- netdev_set_prio_tc_map(pdata->netdev, prio,
- pdata->ets->prio_tc[prio]);
-}
-
-static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
-{
- struct ieee_ets *ets = pdata->ets;
- unsigned int total_weight, min_weight, weight;
- unsigned int mask, reg, reg_val;
- unsigned int i, prio;
-
- if (!ets)
- return;
-
- /* Set Tx to deficit weighted round robin scheduling algorithm (when
- * traffic class is using ETS algorithm)
- */
- XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
-
- /* Set Traffic Class algorithms */
- total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
- min_weight = total_weight / 100;
- if (!min_weight)
- min_weight = 1;
-
- for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
- /* Map the priorities to the traffic class */
- mask = 0;
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
- if (ets->prio_tc[prio] == i)
- mask |= (1 << prio);
- }
- mask &= 0xff;
-
- netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
- i, mask);
- reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
- reg_val = XGMAC_IOREAD(pdata, reg);
-
- reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
- reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
-
- XGMAC_IOWRITE(pdata, reg, reg_val);
-
- /* Set the traffic class algorithm */
- switch (ets->tc_tsa[i]) {
- case IEEE_8021QAZ_TSA_STRICT:
- netif_dbg(pdata, drv, pdata->netdev,
- "TC%u using SP\n", i);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
- MTL_TSA_SP);
- break;
- case IEEE_8021QAZ_TSA_ETS:
- weight = total_weight * ets->tc_tx_bw[i] / 100;
- weight = clamp(weight, min_weight, total_weight);
-
- netif_dbg(pdata, drv, pdata->netdev,
- "TC%u using DWRR (weight %u)\n", i, weight);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
- MTL_TSA_ETS);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
- weight);
- break;
- }
- }
-
- xgbe_config_tc(pdata);
-}
-
-static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
-{
- xgbe_config_flow_control(pdata);
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
@@ -1901,7 +2076,7 @@ static int xgbe_disable_int(struct xgbe_channel *channel,
return 0;
}
-static int xgbe_exit(struct xgbe_prv_data *pdata)
+static int __xgbe_exit(struct xgbe_prv_data *pdata)
{
unsigned int count = 2000;
@@ -1923,6 +2098,20 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
return 0;
}
+static int xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ /* To guard against possible incorrectly generated interrupts,
+ * issue the software reset twice.
+ */
+ ret = __xgbe_exit(pdata);
+ if (ret)
+ return ret;
+
+ return __xgbe_exit(pdata);
+}
+
static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
{
unsigned int i, count;
@@ -2000,61 +2189,331 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
}
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
- unsigned int queue_count)
+static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
+ unsigned int queue,
+ unsigned int q_fifo_size)
+{
+ unsigned int frame_fifo_size;
+ unsigned int rfa, rfd;
+
+ frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
+
+ if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
+ /* PFC is active for this queue */
+ rfa = pdata->pfc_rfa;
+ rfd = rfa + frame_fifo_size;
+ if (rfd > XGMAC_FLOW_CONTROL_MAX)
+ rfd = XGMAC_FLOW_CONTROL_MAX;
+ if (rfa >= XGMAC_FLOW_CONTROL_MAX)
+ rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
+ } else {
+ /* This path deals with just maximum frame sizes which are
+ * limited to a jumbo frame of 9,000 (plus headers, etc.)
+ * so we can never exceed the maximum allowable RFA/RFD
+ * values.
+ */
+ if (q_fifo_size <= 2048) {
+ /* rx_rfd to zero to signal no flow control */
+ pdata->rx_rfa[queue] = 0;
+ pdata->rx_rfd[queue] = 0;
+ return;
+ }
+
+ if (q_fifo_size <= 4096) {
+ /* Between 2048 and 4096 */
+ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
+ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= frame_fifo_size) {
+ /* Between 4096 and max-frame */
+ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
+ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= (frame_fifo_size * 3)) {
+ /* Between max-frame and 3 max-frames,
+ * trigger if we get just over a frame of data and
+ * resume when we have just under half a frame left.
+ */
+ rfa = q_fifo_size - frame_fifo_size;
+ rfd = rfa + (frame_fifo_size / 2);
+ } else {
+ /* Above 3 max-frames - trigger when just over
+ * 2 frames of space available
+ */
+ rfa = frame_fifo_size * 2;
+ rfa += XGMAC_FLOW_CONTROL_UNIT;
+ rfd = rfa + frame_fifo_size;
+ }
+ }
+
+ pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
+ pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
+}
+
+static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
+ unsigned int *fifo)
{
unsigned int q_fifo_size;
- unsigned int p_fifo;
+ unsigned int i;
- /* Calculate the configured fifo size */
- q_fifo_size = 1 << (fifo_size + 7);
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
+ xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+ }
+}
+
+static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+ pdata->rx_rfa[i]);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+ pdata->rx_rfd[i]);
+ }
+}
+
+static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
/* The configured value may not be the actual amount of fifo RAM */
- q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+ return min_t(unsigned int, pdata->tx_max_fifo_size,
+ pdata->hw_feat.tx_fifo_size);
+}
- q_fifo_size = q_fifo_size / queue_count;
+static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ /* The configured value may not be the actual amount of fifo RAM */
+ return min_t(unsigned int, pdata->rx_max_fifo_size,
+ pdata->hw_feat.rx_fifo_size);
+}
- /* Each increment in the queue fifo size represents 256 bytes of
- * fifo, with 0 representing 256 bytes. Distribute the fifo equally
- * between the queues.
+static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
+ unsigned int queue_count,
+ unsigned int *fifo)
+{
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
+ unsigned int i;
+
+ q_fifo_size = fifo_size / queue_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result by 1).
*/
- p_fifo = q_fifo_size / 256;
+ p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
if (p_fifo)
p_fifo--;
- return p_fifo;
+ /* Distribute the fifo equally amongst the queues */
+ for (i = 0; i < queue_count; i++)
+ fifo[i] = p_fifo;
+}
+
+static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
+ unsigned int queue_count,
+ unsigned int *fifo)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
+
+ if (queue_count <= IEEE_8021QAZ_MAX_TCS)
+ return fifo_size;
+
+ /* Rx queues 9 and up are for specialized packets,
+ * such as PTP or DCB control packets, etc. and
+ * don't require a large fifo
+ */
+ for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
+ fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
+ fifo_size -= XGMAC_FIFO_MIN_ALLOC;
+ }
+
+ return fifo_size;
+}
+
+static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
+{
+ unsigned int delay;
+
+ /* If a delay has been provided, use that */
+ if (pdata->pfc->delay)
+ return pdata->pfc->delay / 8;
+
+ /* Allow for two maximum size frames */
+ delay = xgbe_get_max_frame(pdata);
+ delay += XGMAC_ETH_PREAMBLE;
+ delay *= 2;
+
+ /* Allow for PFC frame */
+ delay += XGMAC_PFC_DATA_LEN;
+ delay += ETH_HLEN + ETH_FCS_LEN;
+ delay += XGMAC_ETH_PREAMBLE;
+
+ /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
+ delay += XGMAC_PFC_DELAYS;
+
+ return delay;
+}
+
+static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int count, prio_queues;
+ unsigned int i;
+
+ if (!pdata->pfc->pfc_en)
+ return 0;
+
+ count = 0;
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+ for (i = 0; i < prio_queues; i++) {
+ if (!xgbe_is_pfc_queue(pdata, i))
+ continue;
+
+ pdata->pfcq[i] = 1;
+ count++;
+ }
+
+ return count;
+}
+
+static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
+ unsigned int fifo_size,
+ unsigned int *fifo)
+{
+ unsigned int q_fifo_size, rem_fifo, addn_fifo;
+ unsigned int prio_queues;
+ unsigned int pfc_count;
+ unsigned int i;
+
+ q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+ pfc_count = xgbe_get_pfc_queues(pdata);
+
+ if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
+ /* No traffic classes with PFC enabled or can't do lossless */
+ xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
+ return;
+ }
+
+ /* Calculate how much fifo we have to play with */
+ rem_fifo = fifo_size - (q_fifo_size * prio_queues);
+
+ /* Calculate how much more than base fifo PFC needs, which also
+ * becomes the threshold activation point (RFA)
+ */
+ pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
+ pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
+
+ if (pdata->pfc_rfa > q_fifo_size) {
+ addn_fifo = pdata->pfc_rfa - q_fifo_size;
+ addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
+ } else {
+ addn_fifo = 0;
+ }
+
+ /* Calculate DCB fifo settings:
+ * - distribute remaining fifo between the VLAN priority
+ * queues based on traffic class PFC enablement and overall
+ * priority (0 is lowest priority, so start at highest)
+ */
+ i = prio_queues;
+ while (i > 0) {
+ i--;
+
+ fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
+
+ if (!pdata->pfcq[i] || !addn_fifo)
+ continue;
+
+ if (addn_fifo > rem_fifo) {
+ netdev_warn(pdata->netdev,
+ "RXq%u cannot set needed fifo size\n", i);
+ if (!rem_fifo)
+ continue;
+
+ addn_fifo = rem_fifo;
+ }
+
+ fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
+ rem_fifo -= addn_fifo;
+ }
+
+ if (rem_fifo) {
+ unsigned int inc_fifo = rem_fifo / prio_queues;
+
+ /* Distribute remaining fifo across queues */
+ for (i = 0; i < prio_queues; i++)
+ fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
+ }
}
static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
{
unsigned int fifo_size;
+ unsigned int fifo[XGBE_MAX_QUEUES];
unsigned int i;
- fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
- pdata->tx_q_count);
+ fifo_size = xgbe_get_tx_fifo_size(pdata);
+
+ xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
for (i = 0; i < pdata->tx_q_count; i++)
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
netif_info(pdata, drv, pdata->netdev,
"%d Tx hardware queues, %d byte fifo per queue\n",
- pdata->tx_q_count, ((fifo_size + 1) * 256));
+ pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
{
unsigned int fifo_size;
+ unsigned int fifo[XGBE_MAX_QUEUES];
+ unsigned int prio_queues;
unsigned int i;
- fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
- pdata->rx_q_count);
+ /* Clear any DCB related fifo/queue information */
+ memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
+ pdata->pfc_rfa = 0;
+
+ fifo_size = xgbe_get_rx_fifo_size(pdata);
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+
+ /* Assign a minimum fifo to the non-VLAN priority queues */
+ fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
+
+ if (pdata->pfc && pdata->ets)
+ xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
+ else
+ xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
for (i = 0; i < pdata->rx_q_count; i++)
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
- netif_info(pdata, drv, pdata->netdev,
- "%d Rx hardware queues, %d byte fifo per queue\n",
- pdata->rx_q_count, ((fifo_size + 1) * 256));
+ xgbe_calculate_flow_control_threshold(pdata, fifo);
+ xgbe_config_flow_control_threshold(pdata);
+
+ if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
+ netif_info(pdata, drv, pdata->netdev,
+ "%u Rx hardware queues\n", pdata->rx_q_count);
+ for (i = 0; i < pdata->rx_q_count; i++)
+ netif_info(pdata, drv, pdata->netdev,
+ "RxQ%u, %u byte fifo queue\n", i,
+ ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
+ } else {
+ netif_info(pdata, drv, pdata->netdev,
+ "%u Rx hardware queues, %u byte fifo per queue\n",
+ pdata->rx_q_count,
+ ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
+ }
}
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2090,8 +2549,7 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
}
/* Map the 8 VLAN priority values to available MTL Rx queues */
- prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
- pdata->rx_q_count);
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
@@ -2139,16 +2597,120 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+static void xgbe_config_tc(struct xgbe_prv_data *pdata)
{
- unsigned int i;
+ unsigned int offset, queue, prio;
+ u8 i;
- for (i = 0; i < pdata->rx_q_count; i++) {
- /* Activate flow control when less than 4k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
+ netdev_reset_tc(pdata->netdev);
+ if (!pdata->num_tcs)
+ return;
+
+ netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
+
+ for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
+ while ((queue < pdata->tx_q_count) &&
+ (pdata->q2tc_map[queue] == i))
+ queue++;
- /* De-activate flow control when more than 6k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
+ i, offset, queue - 1);
+ netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
+ offset = queue;
+ }
+
+ if (!pdata->ets)
+ return;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ netdev_set_prio_tc_map(pdata->netdev, prio,
+ pdata->ets->prio_tc[prio]);
+}
+
+static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int total_weight, min_weight, weight;
+ unsigned int mask, reg, reg_val;
+ unsigned int i, prio;
+
+ if (!ets)
+ return;
+
+ /* Set Tx to deficit weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
+
+ /* Set Traffic Class algorithms */
+ total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+ min_weight = total_weight / 100;
+ if (!min_weight)
+ min_weight = 1;
+
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ /* Map the priorities to the traffic class */
+ mask = 0;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if (ets->prio_tc[prio] == i)
+ mask |= (1 << prio);
+ }
+ mask &= 0xff;
+
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
+ i, mask);
+ reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
+ reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ /* Set the traffic class algorithm */
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using SP\n", i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_SP);
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ weight = total_weight * ets->tc_tx_bw[i] / 100;
+ weight = clamp(weight, min_weight, total_weight);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using DWRR (weight %u)\n", i, weight);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+ weight);
+ break;
+ }
+ }
+
+ xgbe_config_tc(pdata);
+}
+
+static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+{
+ if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
+ /* Just stop the Tx queues while Rx fifo is changed */
+ netif_tx_stop_all_queues(pdata->netdev);
+
+ /* Suspend Rx so that fifo's can be adjusted */
+ pdata->hw_if.disable_rx(pdata);
+ }
+
+ xgbe_config_rx_fifo_size(pdata);
+ xgbe_config_flow_control(pdata);
+
+ if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
+ /* Resume Rx */
+ pdata->hw_if.enable_rx(pdata);
+
+ /* Resume Tx queues */
+ netif_tx_start_all_queues(pdata->netdev);
}
}
@@ -2175,19 +2737,7 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
{
- switch (pdata->phy_speed) {
- case SPEED_10000:
- xgbe_set_xgmii_speed(pdata);
- break;
-
- case SPEED_2500:
- xgbe_set_gmii_2500_speed(pdata);
- break;
-
- case SPEED_1000:
- xgbe_set_gmii_speed(pdata);
- break;
- }
+ xgbe_set_speed(pdata, pdata->phy_speed);
}
static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
@@ -2223,17 +2773,33 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
bool read_hi;
u64 val;
- switch (reg_lo) {
- /* These registers are always 64 bit */
- case MMC_TXOCTETCOUNT_GB_LO:
- case MMC_TXOCTETCOUNT_G_LO:
- case MMC_RXOCTETCOUNT_GB_LO:
- case MMC_RXOCTETCOUNT_G_LO:
- read_hi = true;
- break;
+ if (pdata->vdata->mmc_64bit) {
+ switch (reg_lo) {
+ /* These registers are always 32 bit */
+ case MMC_RXRUNTERROR:
+ case MMC_RXJABBERERROR:
+ case MMC_RXUNDERSIZE_G:
+ case MMC_RXOVERSIZE_G:
+ case MMC_RXWATCHDOGERROR:
+ read_hi = false;
+ break;
- default:
- read_hi = false;
+ default:
+ read_hi = true;
+ }
+ } else {
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ }
}
val = XGMAC_IOREAD(pdata, reg_lo);
@@ -2563,20 +3129,48 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
}
+static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
+ * wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
+ if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
+ (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx queue %u to empty\n",
+ queue);
+}
+
static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
- struct xgbe_channel *channel)
+ unsigned int queue)
{
unsigned int tx_dsr, tx_pos, tx_qidx;
unsigned int tx_status;
unsigned long tx_timeout;
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
+ return xgbe_txq_prepare_tx_stop(pdata, queue);
+
/* Calculate the status register to read and the position within */
- if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ if (queue < DMA_DSRX_FIRST_QUEUE) {
tx_dsr = DMA_DSR0;
- tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
- DMA_DSR0_TPS_START;
+ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
} else {
- tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
@@ -2601,7 +3195,7 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
if (!time_before(jiffies, tx_timeout))
netdev_info(pdata->netdev,
"timed out waiting for Tx DMA channel %u to stop\n",
- channel->queue_index);
+ queue);
}
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
@@ -2633,13 +3227,8 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
unsigned int i;
/* Prepare for Tx DMA channel stop */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
-
- xgbe_prepare_tx_stop(pdata, channel);
- }
+ for (i = 0; i < pdata->tx_q_count; i++)
+ xgbe_prepare_tx_stop(pdata, i);
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2763,13 +3352,8 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
unsigned int i;
/* Prepare for Tx DMA channel stop */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
-
- xgbe_prepare_tx_stop(pdata, channel);
- }
+ for (i = 0; i < pdata->tx_q_count; i++)
+ xgbe_prepare_tx_stop(pdata, i);
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2856,12 +3440,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
xgbe_config_tx_fifo_size(pdata);
xgbe_config_rx_fifo_size(pdata);
- xgbe_config_flow_control_threshold(pdata);
/*TODO: Error Packet and undersized good Packet forwarding enable
(FEP and FUP)
*/
xgbe_config_dcb_tc(pdata);
- xgbe_config_dcb_pfc(pdata);
xgbe_enable_mtl_interrupts(pdata);
/*
@@ -2877,6 +3459,11 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_mmc(pdata);
xgbe_enable_mac_interrupts(pdata);
+ /*
+ * Initialize ECC related features
+ */
+ xgbe_enable_ecc_interrupts(pdata);
+
DBGPR("<--xgbe_init\n");
return 0;
@@ -2903,9 +3490,14 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->read_mmd_regs = xgbe_read_mmd_regs;
hw_if->write_mmd_regs = xgbe_write_mmd_regs;
- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+ hw_if->set_speed = xgbe_set_speed;
+
+ hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
+
+ hw_if->set_gpio = xgbe_set_gpio;
+ hw_if->clr_gpio = xgbe_clr_gpio;
hw_if->enable_tx = xgbe_enable_tx;
hw_if->disable_tx = xgbe_disable_tx;
@@ -2984,5 +3576,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+ /* For ECC */
+ hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
+ hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7f9216db026f..155190db682d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -114,7 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@@ -127,8 +127,35 @@
#include "xgbe.h"
#include "xgbe-common.h"
+static unsigned int ecc_sec_info_threshold = 10;
+static unsigned int ecc_sec_warn_threshold = 10000;
+static unsigned int ecc_sec_period = 600;
+static unsigned int ecc_ded_threshold = 2;
+static unsigned int ecc_ded_period = 600;
+
+#ifdef CONFIG_AMD_XGBE_HAVE_ECC
+/* Only expose the ECC parameters if supported */
+module_param(ecc_sec_info_threshold, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(ecc_sec_info_threshold,
+ " ECC corrected error informational threshold setting");
+
+module_param(ecc_sec_warn_threshold, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(ecc_sec_warn_threshold,
+ " ECC corrected error warning threshold setting");
+
+module_param(ecc_sec_period, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
+
+module_param(ecc_ded_threshold, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
+
+module_param(ecc_ded_period, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
+#endif
+
static int xgbe_one_poll(struct napi_struct *, int);
static int xgbe_all_poll(struct napi_struct *, int);
+static void xgbe_stop(struct xgbe_prv_data *);
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
@@ -160,18 +187,8 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
- if (pdata->per_channel_irq) {
- /* Get the DMA interrupt (offset 1) */
- ret = platform_get_irq(pdata->pdev, i + 1);
- if (ret < 0) {
- netdev_err(pdata->netdev,
- "platform_get_irq %u failed\n",
- i + 1);
- goto err_irq;
- }
-
- channel->dma_irq = ret;
- }
+ if (pdata->per_channel_irq)
+ channel->dma_irq = pdata->channel_irq[i];
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
@@ -194,9 +211,6 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
return 0;
-err_irq:
- kfree(rx_ring);
-
err_rx_ring:
kfree(tx_ring);
@@ -257,11 +271,6 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
{
unsigned int rx_buf_size;
- if (mtu > XGMAC_JUMBO_PACKET_MTU) {
- netdev_alert(netdev, "MTU exceeds maximum supported value\n");
- return -EINVAL;
- }
-
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
@@ -271,48 +280,161 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
return rx_buf_size;
}
-static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
enum xgbe_int int_id;
+
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ return;
+
+ hw_if->enable_int(channel, int_id);
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
unsigned int i;
channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (channel->tx_ring && channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
- else if (channel->tx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI;
- else if (channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_RI;
- else
- continue;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_enable_rx_tx_int(pdata, channel);
+}
- hw_if->enable_int(channel, int_id);
- }
+static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ enum xgbe_int int_id;
+
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ return;
+
+ hw_if->disable_int(channel, int_id);
}
static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
{
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
- enum xgbe_int int_id;
unsigned int i;
channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (channel->tx_ring && channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
- else if (channel->tx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI;
- else if (channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_RI;
- else
- continue;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_disable_rx_tx_int(pdata, channel);
+}
+
+static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
+ unsigned int *count, const char *area)
+{
+ if (time_before(jiffies, *period)) {
+ (*count)++;
+ } else {
+ *period = jiffies + (ecc_sec_period * HZ);
+ *count = 1;
+ }
- hw_if->disable_int(channel, int_id);
+ if (*count > ecc_sec_info_threshold)
+ dev_warn_once(pdata->dev,
+ "%s ECC corrected errors exceed informational threshold\n",
+ area);
+
+ if (*count > ecc_sec_warn_threshold) {
+ dev_warn_once(pdata->dev,
+ "%s ECC corrected errors exceed warning threshold\n",
+ area);
+ return true;
}
+
+ return false;
+}
+
+static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
+ unsigned int *count, const char *area)
+{
+ if (time_before(jiffies, *period)) {
+ (*count)++;
+ } else {
+ *period = jiffies + (ecc_ded_period * HZ);
+ *count = 1;
+ }
+
+ if (*count > ecc_ded_threshold) {
+ netdev_alert(pdata->netdev,
+ "%s ECC detected errors exceed threshold\n",
+ area);
+ return true;
+ }
+
+ return false;
+}
+
+static irqreturn_t xgbe_ecc_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+ unsigned int ecc_isr;
+ bool stop = false;
+
+ /* Mask status with only the interrupts we care about */
+ ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
+ ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
+ netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
+ stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
+ &pdata->tx_ded_count, "TX fifo");
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
+ stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
+ &pdata->rx_ded_count, "RX fifo");
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
+ stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
+ &pdata->desc_ded_count,
+ "descriptor cache");
+ }
+
+ if (stop) {
+ pdata->hw_if.disable_ecc_ded(pdata);
+ schedule_work(&pdata->stopdev_work);
+ goto out;
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
+ if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
+ &pdata->tx_sec_count, "TX fifo"))
+ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
+ }
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
+ if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
+ &pdata->rx_sec_count, "RX fifo"))
+ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
+
+ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
+ if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
+ &pdata->desc_sec_count, "descriptor cache"))
+ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
+
+out:
+ /* Clear all ECC interrupts */
+ XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
+
+ return IRQ_HANDLED;
}
static irqreturn_t xgbe_isr(int irq, void *data)
@@ -321,7 +443,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr, mac_tssr;
+ unsigned int mac_isr, mac_tssr, mac_mdioisr;
unsigned int i;
/* The DMA interrupt status register also reports MAC and MTL
@@ -358,6 +480,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
/* Turn on polling */
__napi_schedule_irqoff(&pdata->napi);
}
+ } else {
+ /* Don't clear Rx/Tx status if doing per channel DMA
+ * interrupts, these will be cleared by the ISR for
+ * per channel DMA interrupts.
+ */
+ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
+ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
}
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
@@ -367,13 +496,16 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
schedule_work(&pdata->restart_work);
- /* Clear all interrupt signals */
+ /* Clear interrupt signals */
XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
}
if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+ netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
+ mac_isr);
+
if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
hw_if->tx_mmc_int(pdata);
@@ -383,6 +515,9 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+ netif_dbg(pdata, intr, pdata->netdev,
+ "MAC_TSSR=%#010x\n", mac_tssr);
+
if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
@@ -391,8 +526,31 @@ static irqreturn_t xgbe_isr(int irq, void *data)
&pdata->tx_tstamp_work);
}
}
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
+ mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
+
+ netif_dbg(pdata, intr, pdata->netdev,
+ "MAC_MDIOISR=%#010x\n", mac_mdioisr);
+
+ if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
+ SNGLCOMPINT))
+ complete(&pdata->mdio_complete);
+ }
}
+ /* If there is not a separate AN irq, handle it here */
+ if (pdata->dev_irq == pdata->an_irq)
+ pdata->phy_if.an_isr(irq, pdata);
+
+ /* If there is not a separate ECC irq, handle it here */
+ if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
+ xgbe_ecc_isr(irq, pdata);
+
+ /* If there is not a separate I2C irq, handle it here */
+ if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
+ pdata->i2c_if.i2c_isr(irq, pdata);
+
isr_done:
return IRQ_HANDLED;
}
@@ -400,18 +558,29 @@ isr_done:
static irqreturn_t xgbe_dma_isr(int irq, void *data)
{
struct xgbe_channel *channel = data;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ unsigned int dma_status;
/* Per channel DMA interrupts are enabled, so we use the per
* channel napi structure and not the private data napi structure
*/
if (napi_schedule_prep(&channel->napi)) {
/* Disable Tx and Rx interrupts */
- disable_irq_nosync(channel->dma_irq);
+ if (pdata->channel_irq_mode)
+ xgbe_disable_rx_tx_int(pdata, channel);
+ else
+ disable_irq_nosync(channel->dma_irq);
/* Turn on polling */
__napi_schedule_irqoff(&channel->napi);
}
+ /* Clear Tx/Rx signals */
+ dma_status = 0;
+ XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
+ XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
+
return IRQ_HANDLED;
}
@@ -428,7 +597,10 @@ static void xgbe_tx_timer(unsigned long data)
if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */
if (pdata->per_channel_irq)
- disable_irq_nosync(channel->dma_irq);
+ if (pdata->channel_irq_mode)
+ xgbe_disable_rx_tx_int(pdata, channel);
+ else
+ disable_irq_nosync(channel->dma_irq);
else
xgbe_disable_rx_tx_ints(pdata);
@@ -595,6 +767,10 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->tx_ch_cnt++;
hw_feat->tc_cnt++;
+ /* Translate the fifo sizes into actual numbers */
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+
DBGPR("<--xgbe_get_all_hw_features\n");
}
@@ -657,6 +833,16 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
return ret;
}
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
+ ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
+ 0, pdata->ecc_name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting ecc irq %d\n",
+ pdata->ecc_irq);
+ goto err_dev_irq;
+ }
+ }
+
if (!pdata->per_channel_irq)
return 0;
@@ -673,17 +859,21 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
if (ret) {
netdev_alert(netdev, "error requesting irq %d\n",
channel->dma_irq);
- goto err_irq;
+ goto err_dma_irq;
}
}
return 0;
-err_irq:
+err_dma_irq:
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
for (i--, channel--; i < pdata->channel_count; i--, channel--)
devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+
+err_dev_irq:
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
return ret;
@@ -696,6 +886,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+
if (!pdata->per_channel_irq)
return;
@@ -783,7 +976,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_rx_data\n");
}
-static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
@@ -879,16 +1072,16 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->init(pdata);
- ret = phy_if->phy_start(pdata);
- if (ret)
- goto err_phy;
-
xgbe_napi_enable(pdata, 1);
ret = xgbe_request_irqs(pdata);
if (ret)
goto err_napi;
+ ret = phy_if->phy_start(pdata);
+ if (ret)
+ goto err_irqs;
+
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
@@ -897,16 +1090,18 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
xgbe_start_timers(pdata);
queue_work(pdata->dev_workqueue, &pdata->service_work);
+ clear_bit(XGBE_STOPPED, &pdata->dev_state);
+
DBGPR("<--xgbe_start\n");
return 0;
+err_irqs:
+ xgbe_free_irqs(pdata);
+
err_napi:
xgbe_napi_disable(pdata, 1);
- phy_if->phy_stop(pdata);
-
-err_phy:
hw_if->exit(pdata);
return ret;
@@ -923,6 +1118,9 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_stop\n");
+ if (test_bit(XGBE_STOPPED, &pdata->dev_state))
+ return;
+
netif_tx_stop_all_queues(netdev);
xgbe_stop_timers(pdata);
@@ -948,9 +1146,29 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
netdev_tx_reset_queue(txq);
}
+ set_bit(XGBE_STOPPED, &pdata->dev_state);
+
DBGPR("<--xgbe_stop\n");
}
+static void xgbe_stopdev(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ stopdev_work);
+
+ rtnl_lock();
+
+ xgbe_stop(pdata);
+
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
+
+ rtnl_unlock();
+
+ netdev_alert(pdata->netdev, "device stopped\n");
+}
+
static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_restart_dev\n");
@@ -1297,8 +1515,8 @@ static int xgbe_open(struct net_device *netdev)
DBGPR("-->xgbe_open\n");
- /* Initialize the phy */
- ret = xgbe_phy_init(pdata);
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
if (ret)
return ret;
@@ -1333,6 +1551,7 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->service_work, xgbe_service);
INIT_WORK(&pdata->restart_work, xgbe_restart);
+ INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
xgbe_init_timers(pdata);
@@ -2041,6 +2260,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
{
struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
napi);
+ struct xgbe_prv_data *pdata = channel->pdata;
int processed = 0;
DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
@@ -2057,7 +2277,10 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
napi_complete_done(napi, processed);
/* Enable Tx and Rx interrupts */
- enable_irq(channel->dma_irq);
+ if (pdata->channel_irq_mode)
+ xgbe_enable_rx_tx_int(pdata, channel);
+ else
+ enable_irq(channel->dma_irq);
}
DBGPR("<--xgbe_one_poll: received = %d\n", processed);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 4007b429c80c..920566a3a599 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -272,97 +272,86 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
return ret;
}
-static int xgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int xgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- cmd->phy_address = pdata->phy.address;
+ cmd->base.phy_address = pdata->phy.address;
- cmd->supported = pdata->phy.supported;
- cmd->advertising = pdata->phy.advertising;
- cmd->lp_advertising = pdata->phy.lp_advertising;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ pdata->phy.supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ pdata->phy.advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ pdata->phy.lp_advertising);
- cmd->autoneg = pdata->phy.autoneg;
- ethtool_cmd_speed_set(cmd, pdata->phy.speed);
- cmd->duplex = pdata->phy.duplex;
+ cmd->base.autoneg = pdata->phy.autoneg;
+ cmd->base.speed = pdata->phy.speed;
+ cmd->base.duplex = pdata->phy.duplex;
- cmd->port = PORT_NONE;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_NONE;
return 0;
}
-static int xgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int xgbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u32 advertising;
u32 speed;
int ret;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
- if (cmd->phy_address != pdata->phy.address) {
+ if (cmd->base.phy_address != pdata->phy.address) {
netdev_err(netdev, "invalid phy address %hhu\n",
- cmd->phy_address);
+ cmd->base.phy_address);
return -EINVAL;
}
- if ((cmd->autoneg != AUTONEG_ENABLE) &&
- (cmd->autoneg != AUTONEG_DISABLE)) {
+ if ((cmd->base.autoneg != AUTONEG_ENABLE) &&
+ (cmd->base.autoneg != AUTONEG_DISABLE)) {
netdev_err(netdev, "unsupported autoneg %hhu\n",
- cmd->autoneg);
+ cmd->base.autoneg);
return -EINVAL;
}
- if (cmd->autoneg == AUTONEG_DISABLE) {
- switch (speed) {
- case SPEED_10000:
- break;
- case SPEED_2500:
- if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) {
- netdev_err(netdev, "unsupported speed %u\n",
- speed);
- return -EINVAL;
- }
- break;
- case SPEED_1000:
- if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) {
- netdev_err(netdev, "unsupported speed %u\n",
- speed);
- return -EINVAL;
- }
- break;
- default:
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
netdev_err(netdev, "unsupported speed %u\n", speed);
return -EINVAL;
}
- if (cmd->duplex != DUPLEX_FULL) {
+ if (cmd->base.duplex != DUPLEX_FULL) {
netdev_err(netdev, "unsupported duplex %hhu\n",
- cmd->duplex);
+ cmd->base.duplex);
return -EINVAL;
}
}
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
netif_dbg(pdata, link, netdev,
"requested advertisement %#x, phy supported %#x\n",
- cmd->advertising, pdata->phy.supported);
+ advertising, pdata->phy.supported);
- cmd->advertising &= pdata->phy.supported;
- if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
+ advertising &= pdata->phy.supported;
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) && !advertising) {
netdev_err(netdev,
"unsupported requested advertisement\n");
return -EINVAL;
}
ret = 0;
- pdata->phy.autoneg = cmd->autoneg;
+ pdata->phy.autoneg = cmd->base.autoneg;
pdata->phy.speed = speed;
- pdata->phy.duplex = cmd->duplex;
- pdata->phy.advertising = cmd->advertising;
+ pdata->phy.duplex = cmd->base.duplex;
+ pdata->phy.advertising = advertising;
- if (cmd->autoneg == AUTONEG_ENABLE)
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
pdata->phy.advertising |= ADVERTISED_Autoneg;
else
pdata->phy.advertising &= ~ADVERTISED_Autoneg;
@@ -602,8 +591,6 @@ static int xgbe_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops xgbe_ethtool_ops = {
- .get_settings = xgbe_get_settings,
- .set_settings = xgbe_set_settings,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
.set_msglevel = xgbe_set_msglevel,
@@ -621,6 +608,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_rxfh = xgbe_get_rxfh,
.set_rxfh = xgbe_set_rxfh,
.get_ts_info = xgbe_get_ts_info,
+ .get_link_ksettings = xgbe_get_link_ksettings,
+ .set_link_ksettings = xgbe_set_link_ksettings,
};
const struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
new file mode 100644
index 000000000000..0c7088a426e9
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -0,0 +1,492 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_ABORT_COUNT 500
+#define XGBE_DISABLE_COUNT 1000
+
+#define XGBE_STD_SPEED 1
+
+#define XGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
+#define XGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
+#define XGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
+#define XGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
+#define XGBE_DEFAULT_INT_MASK (XGBE_INTR_RX_FULL | \
+ XGBE_INTR_TX_EMPTY | \
+ XGBE_INTR_TX_ABRT | \
+ XGBE_INTR_STOP_DET)
+
+#define XGBE_I2C_READ BIT(8)
+#define XGBE_I2C_STOP BIT(9)
+
+static int xgbe_i2c_abort(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait = XGBE_ABORT_COUNT;
+
+ /* Must be enabled to recognize the abort request */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
+
+ /* Issue the abort */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
+
+ while (wait--) {
+ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
+ return 0;
+
+ usleep_range(500, 600);
+ }
+
+ return -EBUSY;
+}
+
+static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
+{
+ unsigned int wait = XGBE_DISABLE_COUNT;
+ unsigned int mode = enable ? 1 : 0;
+
+ while (wait--) {
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
+ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
+ return 0;
+
+ usleep_range(100, 110);
+ }
+
+ return -EBUSY;
+}
+
+static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
+{
+ unsigned int ret;
+
+ ret = xgbe_i2c_set_enable(pdata, false);
+ if (ret) {
+ /* Disable failed, try an abort */
+ ret = xgbe_i2c_abort(pdata);
+ if (ret)
+ return ret;
+
+ /* Abort succeeded, try to disable again */
+ ret = xgbe_i2c_set_enable(pdata, false);
+ }
+
+ return ret;
+}
+
+static int xgbe_i2c_enable(struct xgbe_prv_data *pdata)
+{
+ return xgbe_i2c_set_enable(pdata, true);
+}
+
+static void xgbe_i2c_clear_all_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOREAD(pdata, IC_CLR_INTR);
+}
+
+static void xgbe_i2c_disable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
+}
+
+static void xgbe_i2c_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, XGBE_DEFAULT_INT_MASK);
+}
+
+static void xgbe_i2c_write(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int tx_slots;
+ unsigned int cmd;
+
+ /* Configured to never receive Rx overflows, so fill up Tx fifo */
+ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
+ while (tx_slots && state->tx_len) {
+ if (state->op->cmd == XGBE_I2C_CMD_READ)
+ cmd = XGBE_I2C_READ;
+ else
+ cmd = *state->tx_buf++;
+
+ if (state->tx_len == 1)
+ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
+
+ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
+
+ tx_slots--;
+ state->tx_len--;
+ }
+
+ /* No more Tx operations, so ignore TX_EMPTY and return */
+ if (!state->tx_len)
+ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
+}
+
+static void xgbe_i2c_read(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int rx_slots;
+
+ /* Anything to be read? */
+ if (state->op->cmd != XGBE_I2C_CMD_READ)
+ return;
+
+ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
+ while (rx_slots && state->rx_len) {
+ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
+ state->rx_len--;
+ rx_slots--;
+ }
+}
+
+static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
+ unsigned int isr)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+
+ if (isr & XGBE_INTR_TX_ABRT) {
+ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
+ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
+ }
+
+ if (isr & XGBE_INTR_STOP_DET)
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+}
+
+static irqreturn_t xgbe_i2c_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+ netif_dbg(pdata, intr, pdata->netdev,
+ "I2C interrupt received: status=%#010x\n", isr);
+
+ xgbe_i2c_clear_isr_interrupts(pdata, isr);
+
+ if (isr & XGBE_INTR_TX_ABRT) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "I2C TX_ABRT received (%#010x) for target %#04x\n",
+ state->tx_abort_source, state->op->target);
+
+ xgbe_i2c_disable_interrupts(pdata);
+
+ state->ret = -EIO;
+ goto out;
+ }
+
+ /* Check for data in the Rx fifo */
+ xgbe_i2c_read(pdata);
+
+ /* Fill up the Tx fifo next */
+ xgbe_i2c_write(pdata);
+
+out:
+ /* Complete on an error or STOP condition */
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ complete(&pdata->i2c_complete);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_i2c_set_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_CON);
+ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
+ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
+ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
+ XI2C_SET_BITS(reg, IC_CON, SPEED, XGBE_STD_SPEED);
+ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
+ XI2C_IOWRITE(pdata, IC_CON, reg);
+}
+
+static void xgbe_i2c_get_features(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c *i2c = &pdata->i2c;
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
+ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ MAX_SPEED_MODE);
+ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ RX_BUFFER_DEPTH);
+ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ TX_BUFFER_DEPTH);
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "I2C features: %s=%u, %s=%u, %s=%u\n",
+ "MAX_SPEED_MODE", i2c->max_speed_mode,
+ "RX_BUFFER_DEPTH", i2c->rx_fifo_size,
+ "TX_BUFFER_DEPTH", i2c->tx_fifo_size);
+}
+
+static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
+{
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+}
+
+static irqreturn_t xgbe_i2c_combined_isr(int irq, struct xgbe_prv_data *pdata)
+{
+ if (!XI2C_IOREAD(pdata, IC_RAW_INTR_STAT))
+ return IRQ_HANDLED;
+
+ return xgbe_i2c_isr(irq, pdata);
+}
+
+static int xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ int ret;
+
+ mutex_lock(&pdata->i2c_mutex);
+
+ reinit_completion(&pdata->i2c_complete);
+
+ ret = xgbe_i2c_disable(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "failed to disable i2c master\n");
+ goto unlock;
+ }
+
+ xgbe_i2c_set_target(pdata, op->target);
+
+ memset(state, 0, sizeof(*state));
+ state->op = op;
+ state->tx_len = op->len;
+ state->tx_buf = op->buf;
+ state->rx_len = op->len;
+ state->rx_buf = op->buf;
+
+ xgbe_i2c_clear_all_interrupts(pdata);
+ ret = xgbe_i2c_enable(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "failed to enable i2c master\n");
+ goto unlock;
+ }
+
+ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
+ * fire and begin to process the command via the ISR.
+ */
+ xgbe_i2c_enable_interrupts(pdata);
+
+ if (!wait_for_completion_timeout(&pdata->i2c_complete, HZ)) {
+ netdev_err(pdata->netdev, "i2c operation timed out\n");
+ ret = -ETIMEDOUT;
+ goto disable;
+ }
+
+ ret = state->ret;
+ if (ret) {
+ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
+ ret = -ENOTCONN;
+ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
+ ret = -EAGAIN;
+ }
+
+disable:
+ xgbe_i2c_disable_interrupts(pdata);
+ xgbe_i2c_disable(pdata);
+
+unlock:
+ mutex_unlock(&pdata->i2c_mutex);
+
+ return ret;
+}
+
+static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->i2c.started)
+ return;
+
+ netif_dbg(pdata, link, pdata->netdev, "stopping I2C\n");
+
+ pdata->i2c.started = 0;
+
+ xgbe_i2c_disable_interrupts(pdata);
+ xgbe_i2c_disable(pdata);
+ xgbe_i2c_clear_all_interrupts(pdata);
+
+ if (pdata->dev_irq != pdata->i2c_irq)
+ devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
+}
+
+static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (pdata->i2c.started)
+ return 0;
+
+ netif_dbg(pdata, link, pdata->netdev, "starting I2C\n");
+
+ /* If we have a separate I2C irq, enable it */
+ if (pdata->dev_irq != pdata->i2c_irq) {
+ ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
+ xgbe_i2c_isr, 0, pdata->i2c_name,
+ pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "i2c irq request failed\n");
+ return ret;
+ }
+ }
+
+ pdata->i2c.started = 1;
+
+ return 0;
+}
+
+static int xgbe_i2c_init(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ xgbe_i2c_disable_interrupts(pdata);
+
+ ret = xgbe_i2c_disable(pdata);
+ if (ret) {
+ dev_err(pdata->dev, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ xgbe_i2c_get_features(pdata);
+
+ xgbe_i2c_set_mode(pdata);
+
+ xgbe_i2c_clear_all_interrupts(pdata);
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *i2c_if)
+{
+ i2c_if->i2c_init = xgbe_i2c_init;
+
+ i2c_if->i2c_start = xgbe_i2c_start;
+ i2c_if->i2c_stop = xgbe_i2c_stop;
+
+ i2c_if->i2c_xfer = xgbe_i2c_xfer;
+
+ i2c_if->i2c_isr = xgbe_i2c_combined_isr;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 4f7635178200..17ac8f9a51a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -116,19 +116,10 @@
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/property.h>
-#include <linux/acpi.h>
-#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -145,42 +136,6 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP);
-static const u32 xgbe_serdes_blwc[] = {
- XGBE_SPEED_1000_BLWC,
- XGBE_SPEED_2500_BLWC,
- XGBE_SPEED_10000_BLWC,
-};
-
-static const u32 xgbe_serdes_cdr_rate[] = {
- XGBE_SPEED_1000_CDR,
- XGBE_SPEED_2500_CDR,
- XGBE_SPEED_10000_CDR,
-};
-
-static const u32 xgbe_serdes_pq_skew[] = {
- XGBE_SPEED_1000_PQ,
- XGBE_SPEED_2500_PQ,
- XGBE_SPEED_10000_PQ,
-};
-
-static const u32 xgbe_serdes_tx_amp[] = {
- XGBE_SPEED_1000_TXAMP,
- XGBE_SPEED_2500_TXAMP,
- XGBE_SPEED_10000_TXAMP,
-};
-
-static const u32 xgbe_serdes_dfe_tap_cfg[] = {
- XGBE_SPEED_1000_DFE_TAP_CONFIG,
- XGBE_SPEED_2500_DFE_TAP_CONFIG,
- XGBE_SPEED_10000_DFE_TAP_CONFIG,
-};
-
-static const u32 xgbe_serdes_dfe_tap_ena[] = {
- XGBE_SPEED_1000_DFE_TAP_ENABLE,
- XGBE_SPEED_2500_DFE_TAP_ENABLE,
- XGBE_SPEED_10000_DFE_TAP_ENABLE,
-};
-
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
@@ -206,455 +161,124 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
{
xgbe_init_function_ptrs_dev(&pdata->hw_if);
xgbe_init_function_ptrs_phy(&pdata->phy_if);
+ xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
xgbe_init_function_ptrs_desc(&pdata->desc_if);
-}
-
-#ifdef CONFIG_ACPI
-static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
- u32 property;
- int ret;
-
- /* Obtain the system clock setting */
- ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
- if (ret) {
- dev_err(dev, "unable to obtain %s property\n",
- XGBE_ACPI_DMA_FREQ);
- return ret;
- }
- pdata->sysclk_rate = property;
-
- /* Obtain the PTP clock setting */
- ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
- if (ret) {
- dev_err(dev, "unable to obtain %s property\n",
- XGBE_ACPI_PTP_FREQ);
- return ret;
- }
- pdata->ptpclk_rate = property;
- return 0;
+ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
}
-#else /* CONFIG_ACPI */
-static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_ACPI */
-#ifdef CONFIG_OF
-static int xgbe_of_support(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
-
- /* Obtain the system clock setting */
- pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
- if (IS_ERR(pdata->sysclk)) {
- dev_err(dev, "dma devm_clk_get failed\n");
- return PTR_ERR(pdata->sysclk);
- }
- pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
-
- /* Obtain the PTP clock setting */
- pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
- if (IS_ERR(pdata->ptpclk)) {
- dev_err(dev, "ptp devm_clk_get failed\n");
- return PTR_ERR(pdata->ptpclk);
- }
- pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
-
- return 0;
-}
-
-static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
- struct device_node *phy_node;
- struct platform_device *phy_pdev;
-
- phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
- if (phy_node) {
- /* Old style device tree:
- * The XGBE and PHY resources are separate
- */
- phy_pdev = of_find_device_by_node(phy_node);
- of_node_put(phy_node);
- } else {
- /* New style device tree:
- * The XGBE and PHY resources are grouped together with
- * the PHY resources listed last
- */
- get_device(dev);
- phy_pdev = pdata->pdev;
- }
-
- return phy_pdev;
-}
-#else /* CONFIG_OF */
-static int xgbe_of_support(struct xgbe_prv_data *pdata)
-{
- return -EINVAL;
-}
-
-static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
-{
- return NULL;
-}
-#endif /* CONFIG_OF */
-
-static unsigned int xgbe_resource_count(struct platform_device *pdev,
- unsigned int type)
-{
- unsigned int count;
- int i;
-
- for (i = 0, count = 0; i < pdev->num_resources; i++) {
- struct resource *res = &pdev->resource[i];
-
- if (type == resource_type(res))
- count++;
- }
-
- return count;
-}
-
-static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
-{
- struct platform_device *phy_pdev;
-
- if (pdata->use_acpi) {
- get_device(pdata->dev);
- phy_pdev = pdata->pdev;
- } else {
- phy_pdev = xgbe_of_get_phy_pdev(pdata);
- }
-
- return phy_pdev;
-}
-
-static int xgbe_probe(struct platform_device *pdev)
+struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
{
struct xgbe_prv_data *pdata;
struct net_device *netdev;
- struct device *dev = &pdev->dev, *phy_dev;
- struct platform_device *phy_pdev;
- struct resource *res;
- const char *phy_mode;
- unsigned int i, phy_memnum, phy_irqnum;
- enum dev_dma_attr attr;
- int ret;
-
- DBGPR("--> xgbe_probe\n");
netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
XGBE_MAX_DMA_CHANNELS);
if (!netdev) {
- dev_err(dev, "alloc_etherdev failed\n");
- ret = -ENOMEM;
- goto err_alloc;
+ dev_err(dev, "alloc_etherdev_mq failed\n");
+ return ERR_PTR(-ENOMEM);
}
SET_NETDEV_DEV(netdev, dev);
pdata = netdev_priv(netdev);
pdata->netdev = netdev;
- pdata->pdev = pdev;
- pdata->adev = ACPI_COMPANION(dev);
pdata->dev = dev;
- platform_set_drvdata(pdev, netdev);
spin_lock_init(&pdata->lock);
spin_lock_init(&pdata->xpcs_lock);
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
+ mutex_init(&pdata->i2c_mutex);
+ init_completion(&pdata->i2c_complete);
+ init_completion(&pdata->mdio_complete);
pdata->msg_enable = netif_msg_init(debug, default_msg_level);
set_bit(XGBE_DOWN, &pdata->dev_state);
+ set_bit(XGBE_STOPPED, &pdata->dev_state);
- /* Check if we should use ACPI or DT */
- pdata->use_acpi = dev->of_node ? 0 : 1;
-
- phy_pdev = xgbe_get_phy_pdev(pdata);
- if (!phy_pdev) {
- dev_err(dev, "unable to obtain phy device\n");
- ret = -EINVAL;
- goto err_phydev;
- }
- phy_dev = &phy_pdev->dev;
-
- if (pdev == phy_pdev) {
- /* New style device tree or ACPI:
- * The XGBE and PHY resources are grouped together with
- * the PHY resources listed last
- */
- phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
- phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
- } else {
- /* Old style device tree:
- * The XGBE and PHY resources are separate
- */
- phy_memnum = 0;
- phy_irqnum = 0;
- }
-
- /* Set and validate the number of descriptors for a ring */
- BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
- pdata->tx_desc_count = XGBE_TX_DESC_CNT;
- if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
- dev_err(dev, "tx descriptor count (%d) is not valid\n",
- pdata->tx_desc_count);
- ret = -EINVAL;
- goto err_io;
- }
- BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
- pdata->rx_desc_count = XGBE_RX_DESC_CNT;
- if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
- dev_err(dev, "rx descriptor count (%d) is not valid\n",
- pdata->rx_desc_count);
- ret = -EINVAL;
- goto err_io;
- }
-
- /* Obtain the mmio areas for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->xgmac_regs)) {
- dev_err(dev, "xgmac ioremap failed\n");
- ret = PTR_ERR(pdata->xgmac_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->xpcs_regs)) {
- dev_err(dev, "xpcs ioremap failed\n");
- ret = PTR_ERR(pdata->xpcs_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
-
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->rxtx_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->rxtx_regs)) {
- dev_err(dev, "rxtx ioremap failed\n");
- ret = PTR_ERR(pdata->rxtx_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
-
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir0_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->sir0_regs)) {
- dev_err(dev, "sir0 ioremap failed\n");
- ret = PTR_ERR(pdata->sir0_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
-
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir1_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->sir1_regs)) {
- dev_err(dev, "sir1 ioremap failed\n");
- ret = PTR_ERR(pdata->sir1_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
-
- /* Retrieve the MAC address */
- ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
- pdata->mac_addr,
- sizeof(pdata->mac_addr));
- if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
- dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
- if (!ret)
- ret = -EINVAL;
- goto err_io;
- }
-
- /* Retrieve the PHY mode - it must be "xgmii" */
- ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
- &phy_mode);
- if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
- dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
- if (!ret)
- ret = -EINVAL;
- goto err_io;
- }
- pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
-
- /* Check for per channel interrupt support */
- if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
- pdata->per_channel_irq = 1;
+ return pdata;
+}
- /* Retrieve the PHY speedset */
- ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
- &pdata->speed_set);
- if (ret) {
- dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
- goto err_io;
- }
+void xgbe_free_pdata(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- case XGBE_SPEEDSET_2500_10000:
- break;
- default:
- dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
- ret = -EINVAL;
- goto err_io;
- }
+ free_netdev(netdev);
+}
- /* Retrieve the PHY configuration properties */
- if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_BLWC_PROPERTY,
- pdata->serdes_blwc,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_BLWC_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
- sizeof(pdata->serdes_blwc));
- }
+void xgbe_set_counts(struct xgbe_prv_data *pdata)
+{
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
- if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_CDR_RATE_PROPERTY,
- pdata->serdes_cdr_rate,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_CDR_RATE_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
- sizeof(pdata->serdes_cdr_rate));
- }
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
- if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PQ_SKEW_PROPERTY,
- pdata->serdes_pq_skew,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PQ_SKEW_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
- sizeof(pdata->serdes_pq_skew));
- }
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_channel_count)
+ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
+ if (!pdata->rx_max_channel_count)
+ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
- if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_TX_AMP_PROPERTY,
- pdata->serdes_tx_amp,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_TX_AMP_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
- sizeof(pdata->serdes_tx_amp));
- }
+ if (!pdata->tx_max_q_count)
+ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
+ if (!pdata->rx_max_q_count)
+ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
- if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_DFE_CFG_PROPERTY,
- pdata->serdes_dfe_tap_cfg,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_DFE_CFG_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
- sizeof(pdata->serdes_dfe_tap_cfg));
- }
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues or maximum allowed
+ */
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->tx_max_channel_count);
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->tx_max_q_count);
- if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_DFE_ENA_PROPERTY,
- pdata->serdes_dfe_tap_ena,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_DFE_ENA_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
- sizeof(pdata->serdes_dfe_tap_ena));
- }
+ pdata->tx_q_count = pdata->tx_ring_count;
- /* Obtain device settings unique to ACPI/OF */
- if (pdata->use_acpi)
- ret = xgbe_acpi_support(pdata);
- else
- ret = xgbe_of_support(pdata);
- if (ret)
- goto err_io;
+ pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->rx_max_channel_count);
- /* Set the DMA coherency values */
- attr = device_get_dma_attr(dev);
- if (attr == DEV_DMA_NOT_SUPPORTED) {
- dev_err(dev, "DMA is not supported");
- goto err_io;
- }
- pdata->coherent = (attr == DEV_DMA_COHERENT);
- if (pdata->coherent) {
- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_OS_ARCACHE;
- pdata->awcache = XGBE_DMA_OS_AWCACHE;
- } else {
- pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_SYS_ARCACHE;
- pdata->awcache = XGBE_DMA_SYS_AWCACHE;
- }
+ pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
+ pdata->rx_max_q_count);
- /* Get the device interrupt */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq 0 failed\n");
- goto err_io;
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
+ pdata->tx_ring_count, pdata->rx_ring_count);
+ dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
+ pdata->tx_q_count, pdata->rx_q_count);
}
- pdata->dev_irq = ret;
+}
- /* Get the auto-negotiation interrupt */
- ret = platform_get_irq(phy_pdev, phy_irqnum++);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq phy 0 failed\n");
- goto err_io;
- }
- pdata->an_irq = ret;
+int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct device *dev = pdata->dev;
+ unsigned int i;
+ int ret;
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
- /* Set all the function pointers */
- xgbe_init_all_fptrs(pdata);
+ /* Initialize ECC timestamps */
+ pdata->tx_sec_period = jiffies;
+ pdata->tx_ded_period = jiffies;
+ pdata->rx_sec_period = jiffies;
+ pdata->rx_ded_period = jiffies;
+ pdata->desc_sec_period = jiffies;
+ pdata->desc_ded_period = jiffies;
/* Issue software reset to device */
pdata->hw_if.exit(pdata);
- /* Populate the hardware features */
- xgbe_get_all_hw_features(pdata);
-
/* Set default configuration data */
xgbe_default_config(pdata);
@@ -663,33 +287,46 @@ static int xgbe_probe(struct platform_device *pdev)
DMA_BIT_MASK(pdata->hw_feat.dma_width));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed\n");
- goto err_io;
+ return ret;
}
- /* Calculate the number of Tx and Rx rings to be created
- * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
- * the number of Tx queues to the number of Tx channels
- * enabled
- * -Rx (DMA) Channels do not map 1-to-1 so use the actual
- * number of Rx queues
- */
- pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
- pdata->hw_feat.tx_ch_cnt);
- pdata->tx_q_count = pdata->tx_ring_count;
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_fifo_size)
+ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+ pdata->rx_desc_count = XGBE_RX_DESC_CNT;
+
+ /* Adjust the number of queues based on interrupts assigned */
+ if (pdata->channel_irq_count) {
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->channel_irq_count);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->channel_irq_count);
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev,
+ "adjusted TX/RX DMA channel count = %u/%u\n",
+ pdata->tx_ring_count, pdata->rx_ring_count);
+ }
+
+ /* Set the number of queues */
ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
if (ret) {
dev_err(dev, "error setting real tx queue count\n");
- goto err_io;
+ return ret;
}
- pdata->rx_ring_count = min_t(unsigned int,
- netif_get_num_default_rss_queues(),
- pdata->hw_feat.rx_ch_cnt);
- pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
if (ret) {
dev_err(dev, "error setting real rx queue count\n");
- goto err_io;
+ return ret;
}
/* Initialize RSS hash key and lookup table */
@@ -704,7 +341,9 @@ static int xgbe_probe(struct platform_device *pdev)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Call MDIO/PHY initialization routine */
- pdata->phy_if.phy_init(pdata);
+ ret = pdata->phy_if.phy_init(pdata);
+ if (ret)
+ return ret;
/* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
@@ -738,6 +377,8 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->netdev_features = netdev->features;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->min_mtu = 0;
+ netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
/* Use default watchdog timeout */
netdev->watchdog_timeo = 0;
@@ -749,13 +390,21 @@ static int xgbe_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "net device registration failed\n");
- goto err_io;
+ return ret;
}
/* Create the PHY/ANEG name based on netdev name */
snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
netdev_name(netdev));
+ /* Create the ECC name based on netdev name */
+ snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
+ netdev_name(netdev));
+
+ /* Create the I2C name based on netdev name */
+ snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
+ netdev_name(netdev));
+
/* Create workqueues */
pdata->dev_workqueue =
create_singlethread_workqueue(netdev_name(netdev));
@@ -773,15 +422,15 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_wq;
}
- xgbe_ptp_register(pdata);
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_register(pdata);
xgbe_debugfs_init(pdata);
- platform_device_put(phy_pdev);
-
- netdev_notice(netdev, "net device enabled\n");
-
- DBGPR("<-- xgbe_probe\n");
+ netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
+ pdata->tx_ring_count);
+ netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
+ pdata->rx_ring_count);
return 0;
@@ -791,28 +440,19 @@ err_wq:
err_netdev:
unregister_netdev(netdev);
-err_io:
- platform_device_put(phy_pdev);
-
-err_phydev:
- free_netdev(netdev);
-
-err_alloc:
- dev_notice(dev, "net device not enabled\n");
-
return ret;
}
-static int xgbe_remove(struct platform_device *pdev)
+void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = platform_get_drvdata(pdev);
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- DBGPR("-->xgbe_remove\n");
+ struct net_device *netdev = pdata->netdev;
xgbe_debugfs_exit(pdata);
- xgbe_ptp_unregister(pdata);
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_unregister(pdata);
+
+ pdata->phy_if.phy_exit(pdata);
flush_workqueue(pdata->an_workqueue);
destroy_workqueue(pdata->an_workqueue);
@@ -821,94 +461,29 @@ static int xgbe_remove(struct platform_device *pdev)
destroy_workqueue(pdata->dev_workqueue);
unregister_netdev(netdev);
-
- free_netdev(netdev);
-
- DBGPR("<--xgbe_remove\n");
-
- return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int xgbe_suspend(struct device *dev)
+static int __init xgbe_mod_init(void)
{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret = 0;
-
- DBGPR("-->xgbe_suspend\n");
-
- if (netif_running(netdev))
- ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+ int ret;
- pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+ ret = xgbe_platform_init();
+ if (ret)
+ return ret;
- DBGPR("<--xgbe_suspend\n");
+ ret = xgbe_pci_init();
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-static int xgbe_resume(struct device *dev)
+static void __exit xgbe_mod_exit(void)
{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret = 0;
-
- DBGPR("-->xgbe_resume\n");
-
- pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
-
- if (netif_running(netdev)) {
- ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
-
- /* Schedule a restart in case the link or phy state changed
- * while we were powered down.
- */
- schedule_work(&pdata->restart_work);
- }
-
- DBGPR("<--xgbe_resume\n");
+ xgbe_pci_exit();
- return ret;
+ xgbe_platform_exit();
}
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgbe_acpi_match[] = {
- { "AMDI8001", 0 },
- {},
-};
-
-MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id xgbe_of_match[] = {
- { .compatible = "amd,xgbe-seattle-v1a", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, xgbe_of_match);
-#endif
-
-static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
-
-static struct platform_driver xgbe_driver = {
- .driver = {
- .name = "amd-xgbe",
-#ifdef CONFIG_ACPI
- .acpi_match_table = xgbe_acpi_match,
-#endif
-#ifdef CONFIG_OF
- .of_match_table = xgbe_of_match,
-#endif
- .pm = &xgbe_pm_ops,
- },
- .probe = xgbe_probe,
- .remove = xgbe_remove,
-};
-module_platform_driver(xgbe_driver);
+module_init(xgbe_mod_init);
+module_exit(xgbe_mod_exit);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 84c5d296d13e..4c5b90eea4af 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -125,303 +125,284 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
+static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ int reg;
- reg |= XGBE_KR_TRAINING_ENABLE;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
}
-static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
+static void xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
+ int reg;
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
- reg &= ~XGBE_KR_TRAINING_ENABLE;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg &= ~XGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
}
-static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
+static void xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ int reg;
- reg |= MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
- usleep_range(75, 100);
-
- reg &= ~MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg |= XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
}
-static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
+static void xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata)
{
- /* Assert Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
}
-static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
+static void xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int wait;
- u16 status;
-
- /* Release Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+}
- /* Wait for Rx and Tx ready */
- wait = XGBE_RATECHANGE_COUNT;
- while (wait--) {
- usleep_range(50, 75);
+static void xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK);
+}
- status = XSIR0_IOREAD(pdata, SIR0_STATUS);
- if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
- XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- goto rx_reset;
+static void xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_enable_interrupts(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_enable_interrupts(pdata);
+ break;
+ default:
+ break;
}
+}
- netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
- status);
-
-rx_reset:
- /* Perform Rx reset for the DFE changes */
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_clear_interrupts(pdata);
+ xgbe_an37_clear_interrupts(pdata);
}
-static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata)
{
unsigned int reg;
- /* Enable KR training */
- xgbe_an_enable_kr_training(pdata);
-
- /* Set MAC to 10G speed */
- pdata->hw_if.set_xgmii_speed(pdata);
-
- /* Set PCS to KR/10G speed */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- reg &= ~MDIO_PCS_CTRL2_TYPE;
- reg |= MDIO_PCS_CTRL2_10GBR;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg &= ~MDIO_CTRL1_SPEEDSEL;
- reg |= MDIO_CTRL1_SPEED10G;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
- xgbe_pcs_power_cycle(pdata);
+static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
- /* Set SerDes to 10G speed */
- xgbe_serdes_start_ratechange(pdata);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+ reg &= ~XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
- pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
- pdata->serdes_tx_amp[XGBE_SPEED_10000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
- pdata->serdes_blwc[XGBE_SPEED_10000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
- pdata->serdes_pq_skew[XGBE_SPEED_10000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
- XRXTX_IOWRITE(pdata, RXTX_REG22,
- pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
+static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
+{
+ /* Enable KR training */
+ xgbe_an73_enable_kr_training(pdata);
- xgbe_serdes_complete_ratechange(pdata);
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
- netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR);
}
-static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
/* Disable KR training */
- xgbe_an_disable_kr_training(pdata);
+ xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 2.5G speed */
- pdata->hw_if.set_gmii_2500_speed(pdata);
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
- /* Set PCS to KX/1G speed */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- reg &= ~MDIO_PCS_CTRL2_TYPE;
- reg |= MDIO_PCS_CTRL2_10GBX;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500);
+}
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg &= ~MDIO_CTRL1_SPEEDSEL;
- reg |= MDIO_CTRL1_SPEED1G;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ /* Disable KR training */
+ xgbe_an73_disable_kr_training(pdata);
- xgbe_pcs_power_cycle(pdata);
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- /* Set SerDes to 2.5G speed */
- xgbe_serdes_start_ratechange(pdata);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000);
+}
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
+{
+ /* If a KR re-driver is present, change to KR mode instead */
+ if (pdata->kr_redrv)
+ return xgbe_kr_mode(pdata);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
- pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
- pdata->serdes_tx_amp[XGBE_SPEED_2500]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
- pdata->serdes_blwc[XGBE_SPEED_2500]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
- pdata->serdes_pq_skew[XGBE_SPEED_2500]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
- XRXTX_IOWRITE(pdata, RXTX_REG22,
- pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
+ /* Disable KR training */
+ xgbe_an73_disable_kr_training(pdata);
- xgbe_serdes_complete_ratechange(pdata);
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
- netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SFI);
}
-static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+static void xgbe_x_mode(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
/* Disable KR training */
- xgbe_an_disable_kr_training(pdata);
+ xgbe_an73_disable_kr_training(pdata);
/* Set MAC to 1G speed */
- pdata->hw_if.set_gmii_speed(pdata);
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- /* Set PCS to KX/1G speed */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- reg &= ~MDIO_PCS_CTRL2_TYPE;
- reg |= MDIO_PCS_CTRL2_10GBX;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg &= ~MDIO_CTRL1_SPEEDSEL;
- reg |= MDIO_CTRL1_SPEED1G;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_X);
+}
- xgbe_pcs_power_cycle(pdata);
+static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+{
+ /* Disable KR training */
+ xgbe_an73_disable_kr_training(pdata);
- /* Set SerDes to 1G speed */
- xgbe_serdes_start_ratechange(pdata);
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
+}
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
- pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
- pdata->serdes_tx_amp[XGBE_SPEED_1000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
- pdata->serdes_blwc[XGBE_SPEED_1000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
- pdata->serdes_pq_skew[XGBE_SPEED_1000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
- XRXTX_IOWRITE(pdata, RXTX_REG22,
- pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
+static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
+{
+ /* Disable KR training */
+ xgbe_an73_disable_kr_training(pdata);
- xgbe_serdes_complete_ratechange(pdata);
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_100);
}
-static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
- enum xgbe_mode *mode)
+static enum xgbe_mode xgbe_cur_mode(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
- *mode = XGBE_MODE_KR;
- else
- *mode = XGBE_MODE_KX;
+ return pdata->phy_if.phy_impl.cur_mode(pdata);
}
static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
{
- enum xgbe_mode mode;
-
- xgbe_cur_mode(pdata, &mode);
+ return (xgbe_cur_mode(pdata) == XGBE_MODE_KR);
+}
- return (mode == XGBE_MODE_KR);
+static void xgbe_change_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_kr_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_100:
+ xgbe_sgmii_100_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_1000:
+ xgbe_sgmii_1000_mode(pdata);
+ break;
+ case XGBE_MODE_X:
+ xgbe_x_mode(pdata);
+ break;
+ case XGBE_MODE_SFI:
+ xgbe_sfi_mode(pdata);
+ break;
+ case XGBE_MODE_UNKNOWN:
+ break;
+ default:
+ netif_dbg(pdata, link, pdata->netdev,
+ "invalid operation mode requested (%u)\n", mode);
+ }
}
static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
{
- /* If we are in KR switch to KX, and vice-versa */
- if (xgbe_in_kr_mode(pdata)) {
- if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
- xgbe_gmii_mode(pdata);
- else
- xgbe_gmii_2500_mode(pdata);
- } else {
- xgbe_xgmii_mode(pdata);
- }
+ xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
}
static void xgbe_set_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode)
{
- enum xgbe_mode cur_mode;
+ if (mode == xgbe_cur_mode(pdata))
+ return;
- xgbe_cur_mode(pdata, &cur_mode);
- if (mode != cur_mode)
- xgbe_switch_mode(pdata);
+ xgbe_change_mode(pdata, mode);
}
-static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
+static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
{
- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
- return true;
- } else {
- if (pdata->phy.speed == SPEED_10000)
- return true;
- }
+ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
+}
+
+static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
+ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
- return false;
+ if (enable)
+ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (restart)
+ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
}
-static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
+static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
{
- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
- if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
- return true;
- } else {
- if (pdata->phy.speed == SPEED_2500)
- return true;
- }
+ xgbe_an37_enable_interrupts(pdata);
+ xgbe_an37_set(pdata, true, true);
- return false;
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN enabled/restarted\n");
}
-static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
+static void xgbe_an37_disable(struct xgbe_prv_data *pdata)
{
- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
- if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
- return true;
- } else {
- if (pdata->phy.speed == SPEED_1000)
- return true;
- }
+ xgbe_an37_set(pdata, false, false);
+ xgbe_an37_disable_interrupts(pdata);
- return false;
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN disabled\n");
}
-static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
+ bool restart)
{
unsigned int reg;
@@ -437,22 +418,62 @@ static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
-static void xgbe_restart_an(struct xgbe_prv_data *pdata)
+static void xgbe_an73_restart(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_enable_interrupts(pdata);
+ xgbe_an73_set(pdata, true, true);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN enabled/restarted\n");
+}
+
+static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
{
- xgbe_set_an(pdata, true, true);
+ xgbe_an73_set(pdata, false, false);
+ xgbe_an73_disable_interrupts(pdata);
- netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
+}
+
+static void xgbe_an_restart(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_restart(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_restart(pdata);
+ break;
+ default:
+ break;
+ }
}
-static void xgbe_disable_an(struct xgbe_prv_data *pdata)
+static void xgbe_an_disable(struct xgbe_prv_data *pdata)
{
- xgbe_set_an(pdata, false, false);
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_disable(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_disable(pdata);
+ break;
+ default:
+ break;
+ }
+}
- netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
+static void xgbe_an_disable_all(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_disable(pdata);
+ xgbe_an37_disable(pdata);
}
-static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
{
unsigned int ad_reg, lp_reg, reg;
@@ -476,13 +497,15 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
/* Start KR training */
reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
if (reg & XGBE_KR_TRAINING_ENABLE) {
- XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
reg);
- XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
@@ -491,8 +514,8 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
return XGBE_AN_PAGE_RECEIVED;
}
-static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static enum xgbe_an xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
{
u16 msg;
@@ -508,8 +531,8 @@ static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
return XGBE_AN_PAGE_RECEIVED;
}
-static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static enum xgbe_an xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
{
unsigned int link_support;
unsigned int reg, ad_reg, lp_reg;
@@ -528,12 +551,12 @@ static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
(lp_reg & XGBE_XNP_NP_EXCHANGE))
- ? xgbe_an_tx_xnp(pdata, state)
- : xgbe_an_tx_training(pdata, state);
+ ? xgbe_an73_tx_xnp(pdata, state)
+ : xgbe_an73_tx_training(pdata, state);
}
-static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static enum xgbe_an xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
{
unsigned int ad_reg, lp_reg;
@@ -543,11 +566,11 @@ static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
(lp_reg & XGBE_XNP_NP_EXCHANGE))
- ? xgbe_an_tx_xnp(pdata, state)
- : xgbe_an_tx_training(pdata, state);
+ ? xgbe_an73_tx_xnp(pdata, state)
+ : xgbe_an73_tx_training(pdata, state);
}
-static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata)
{
enum xgbe_rx *state;
unsigned long an_timeout;
@@ -566,20 +589,20 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
pdata->an_start = jiffies;
netif_dbg(pdata, link, pdata->netdev,
- "AN timed out, resetting state\n");
+ "CL73 AN timed out, resetting state\n");
}
}
state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
- : &pdata->kx_state;
+ : &pdata->kx_state;
switch (*state) {
case XGBE_RX_BPA:
- ret = xgbe_an_rx_bpa(pdata, state);
+ ret = xgbe_an73_rx_bpa(pdata, state);
break;
case XGBE_RX_XNP:
- ret = xgbe_an_rx_xnp(pdata, state);
+ ret = xgbe_an73_rx_xnp(pdata, state);
break;
default:
@@ -589,7 +612,7 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
return ret;
}
-static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
+static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
{
/* Be sure we aren't looping trying to negotiate */
if (xgbe_in_kr_mode(pdata)) {
@@ -611,23 +634,43 @@ static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
return XGBE_AN_NO_LINK;
}
- xgbe_disable_an(pdata);
+ xgbe_an73_disable(pdata);
xgbe_switch_mode(pdata);
- xgbe_restart_an(pdata);
+ xgbe_an73_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
}
-static irqreturn_t xgbe_an_isr(int irq, void *data)
+static void xgbe_an37_isr(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ unsigned int reg;
- netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+ /* Disable AN interrupts */
+ xgbe_an37_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ pdata->an_int = reg & XGBE_AN_CL37_INT_MASK;
+ pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK;
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ } else {
+ /* Enable AN interrupts */
+ xgbe_an37_enable_interrupts(pdata);
+ }
+}
+
+static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
+{
/* Disable AN interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ xgbe_an73_disable_interrupts(pdata);
/* Save the interrupt(s) that fired */
pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
@@ -639,13 +682,37 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
queue_work(pdata->an_workqueue, &pdata->an_irq_work);
} else {
/* Enable AN interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
- XGBE_AN_INT_MASK);
+ xgbe_an73_enable_interrupts(pdata);
+ }
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_isr(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_isr(pdata);
+ break;
+ default:
+ break;
}
return IRQ_HANDLED;
}
+static irqreturn_t xgbe_an_combined_isr(int irq, struct xgbe_prv_data *pdata)
+{
+ return xgbe_an_isr(irq, pdata);
+}
+
static void xgbe_an_irq_work(struct work_struct *work)
{
struct xgbe_prv_data *pdata = container_of(work,
@@ -679,36 +746,87 @@ static const char *xgbe_state_as_string(enum xgbe_an state)
}
}
-static void xgbe_an_state_machine(struct work_struct *work)
+static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- an_work);
enum xgbe_an cur_state = pdata->an_state;
- mutex_lock(&pdata->an_mutex);
+ if (!pdata->an_int)
+ return;
+
+ if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+ pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT;
+
+ /* If SGMII is enabled, check the link status */
+ if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) &&
+ !(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS))
+ pdata->an_state = XGBE_AN_NO_LINK;
+ }
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case XGBE_AN_READY:
+ break;
+
+ case XGBE_AN_COMPLETE:
+ netif_dbg(pdata, link, pdata->netdev,
+ "Auto negotiation successful\n");
+ break;
+
+ case XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == XGBE_AN_ERROR) {
+ netdev_err(pdata->netdev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ pdata->an_int = 0;
+ xgbe_an37_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = XGBE_AN_READY;
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+ xgbe_an37_enable_interrupts(pdata);
+}
+
+static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_an cur_state = pdata->an_state;
if (!pdata->an_int)
- goto out;
+ return;
next_int:
- if (pdata->an_int & XGBE_AN_PG_RCV) {
+ if (pdata->an_int & XGBE_AN_CL73_PG_RCV) {
pdata->an_state = XGBE_AN_PAGE_RECEIVED;
- pdata->an_int &= ~XGBE_AN_PG_RCV;
- } else if (pdata->an_int & XGBE_AN_INC_LINK) {
+ pdata->an_int &= ~XGBE_AN_CL73_PG_RCV;
+ } else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) {
pdata->an_state = XGBE_AN_INCOMPAT_LINK;
- pdata->an_int &= ~XGBE_AN_INC_LINK;
- } else if (pdata->an_int & XGBE_AN_INT_CMPLT) {
+ pdata->an_int &= ~XGBE_AN_CL73_INC_LINK;
+ } else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) {
pdata->an_state = XGBE_AN_COMPLETE;
- pdata->an_int &= ~XGBE_AN_INT_CMPLT;
+ pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT;
} else {
pdata->an_state = XGBE_AN_ERROR;
}
- pdata->an_result = pdata->an_state;
-
again:
- netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN %s\n",
xgbe_state_as_string(pdata->an_state));
cur_state = pdata->an_state;
@@ -719,14 +837,14 @@ again:
break;
case XGBE_AN_PAGE_RECEIVED:
- pdata->an_state = xgbe_an_page_received(pdata);
+ pdata->an_state = xgbe_an73_page_received(pdata);
pdata->an_supported++;
break;
case XGBE_AN_INCOMPAT_LINK:
pdata->an_supported = 0;
pdata->parallel_detect = 0;
- pdata->an_state = xgbe_an_incompat_link(pdata);
+ pdata->an_state = xgbe_an73_incompat_link(pdata);
break;
case XGBE_AN_COMPLETE:
@@ -745,14 +863,14 @@ again:
if (pdata->an_state == XGBE_AN_NO_LINK) {
pdata->an_int = 0;
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an73_clear_interrupts(pdata);
} else if (pdata->an_state == XGBE_AN_ERROR) {
netdev_err(pdata->netdev,
"error during auto-negotiation, state=%u\n",
cur_state);
pdata->an_int = 0;
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an73_clear_interrupts(pdata);
}
if (pdata->an_state >= XGBE_AN_COMPLETE) {
@@ -762,7 +880,7 @@ again:
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0;
- netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
}
@@ -772,20 +890,88 @@ again:
if (pdata->an_int)
goto next_int;
-out:
- /* Enable AN interrupts on the way out */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
+ xgbe_an73_enable_interrupts(pdata);
+}
+
+static void xgbe_an_state_machine(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_work);
+
+ mutex_lock(&pdata->an_mutex);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_state_machine(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_state_machine(pdata);
+ break;
+ default:
+ break;
+ }
mutex_unlock(&pdata->an_mutex);
}
-static void xgbe_an_init(struct xgbe_prv_data *pdata)
+static void xgbe_an37_init(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
+ unsigned int advertising, reg;
+
+ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
+
+ /* Set up Advertisement register */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+ if (advertising & ADVERTISED_Pause)
+ reg |= 0x100;
+ else
+ reg &= ~0x100;
+
+ if (advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ /* Full duplex, but not half */
+ reg |= XGBE_AN_CL37_FD_MASK;
+ reg &= ~XGBE_AN_CL37_HD_MASK;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg);
+
+ /* Set up the Control register */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~XGBE_AN_CL37_TX_CONFIG_MASK;
+ reg &= ~XGBE_AN_CL37_PCS_MODE_MASK;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL37:
+ reg |= XGBE_AN_CL37_PCS_MODE_BASEX;
+ break;
+ case XGBE_AN_MODE_CL37_SGMII:
+ reg |= XGBE_AN_CL37_PCS_MODE_SGMII;
+ break;
+ default:
+ break;
+ }
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
+ (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+}
+
+static void xgbe_an73_init(struct xgbe_prv_data *pdata)
+{
+ unsigned int advertising, reg;
+
+ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
/* Set up Advertisement register 3 first */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
+ if (advertising & ADVERTISED_10000baseR_FEC)
reg |= 0xc000;
else
reg &= ~0xc000;
@@ -794,13 +980,13 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
/* Set up Advertisement register 2 next */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ if (advertising & ADVERTISED_10000baseKR_Full)
reg |= 0x80;
else
reg &= ~0x80;
- if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
- (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ if ((advertising & ADVERTISED_1000baseKX_Full) ||
+ (advertising & ADVERTISED_2500baseX_Full))
reg |= 0x20;
else
reg &= ~0x20;
@@ -809,12 +995,12 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
/* Set up Advertisement register 1 last */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (pdata->phy.advertising & ADVERTISED_Pause)
+ if (advertising & ADVERTISED_Pause)
reg |= 0x400;
else
reg &= ~0x400;
- if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
+ if (advertising & ADVERTISED_Asym_Pause)
reg |= 0x800;
else
reg &= ~0x800;
@@ -824,7 +1010,25 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
- netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
+ netif_dbg(pdata, link, pdata->netdev, "CL73 AN initialized\n");
+}
+
+static void xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+ /* Set up advertisement registers based on current settings */
+ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_init(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_init(pdata);
+ break;
+ default:
+ break;
+ }
}
static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
@@ -842,6 +1046,8 @@ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
static const char *xgbe_phy_speed_string(int speed)
{
switch (speed) {
+ case SPEED_100:
+ return "100Mbps";
case SPEED_1000:
return "1Gbps";
case SPEED_2500:
@@ -907,24 +1113,32 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
xgbe_phy_print_status(pdata);
}
+static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ return pdata->phy_if.phy_impl.valid_speed(pdata, speed);
+}
+
static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
{
+ enum xgbe_mode mode;
+
netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
/* Disable auto-negotiation */
- xgbe_disable_an(pdata);
-
- /* Validate/Set specified speed */
- switch (pdata->phy.speed) {
- case SPEED_10000:
- xgbe_set_mode(pdata, XGBE_MODE_KR);
+ xgbe_an_disable(pdata);
+
+ /* Set specified mode for specified speed */
+ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ case XGBE_MODE_KX_2500:
+ case XGBE_MODE_KR:
+ case XGBE_MODE_SGMII_100:
+ case XGBE_MODE_SGMII_1000:
+ case XGBE_MODE_X:
+ case XGBE_MODE_SFI:
break;
-
- case SPEED_2500:
- case SPEED_1000:
- xgbe_set_mode(pdata, XGBE_MODE_KX);
- break;
-
+ case XGBE_MODE_UNKNOWN:
default:
return -EINVAL;
}
@@ -933,38 +1147,60 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL;
+ xgbe_set_mode(pdata, mode);
+
return 0;
}
static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{
+ int ret;
+
set_bit(XGBE_LINK_INIT, &pdata->dev_state);
pdata->link_check = jiffies;
- if (pdata->phy.autoneg != AUTONEG_ENABLE)
- return xgbe_phy_config_fixed(pdata);
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret)
+ return ret;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = xgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv)
+ return ret;
- netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+ netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n");
+ } else {
+ netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+ }
/* Disable auto-negotiation interrupt */
disable_irq(pdata->an_irq);
/* Start auto-negotiation in a supported mode */
- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
+ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
xgbe_set_mode(pdata, XGBE_MODE_KR);
- } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
- (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
- xgbe_set_mode(pdata, XGBE_MODE_KX);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SFI);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+ xgbe_set_mode(pdata, XGBE_MODE_X);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
} else {
enable_irq(pdata->an_irq);
return -EINVAL;
}
/* Disable and stop any in progress auto-negotiation */
- xgbe_disable_an(pdata);
+ xgbe_an_disable_all(pdata);
/* Clear any auto-negotitation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an_clear_interrupts_all(pdata);
pdata->an_result = XGBE_AN_READY;
pdata->an_state = XGBE_AN_READY;
@@ -974,11 +1210,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
/* Re-enable auto-negotiation interrupt */
enable_irq(pdata->an_irq);
- /* Set up advertisement registers based on current settings */
xgbe_an_init(pdata);
-
- /* Enable and start auto-negotiation */
- xgbe_restart_an(pdata);
+ xgbe_an_restart(pdata);
return 0;
}
@@ -1016,108 +1249,52 @@ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
+static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
{
- if (xgbe_in_kr_mode(pdata)) {
- pdata->phy.speed = SPEED_10000;
- } else {
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.speed = SPEED_1000;
- break;
-
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.speed = SPEED_2500;
- break;
- }
- }
- pdata->phy.duplex = DUPLEX_FULL;
+ return pdata->phy_if.phy_impl.an_outcome(pdata);
}
-static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
{
- unsigned int ad_reg, lp_reg;
+ enum xgbe_mode mode;
pdata->phy.lp_advertising = 0;
if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
- return xgbe_phy_status_force(pdata);
-
- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
- pdata->phy.lp_advertising |= ADVERTISED_Backplane;
-
- /* Compare Advertisement and Link Partner register 1 */
- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
- if (lp_reg & 0x400)
- pdata->phy.lp_advertising |= ADVERTISED_Pause;
- if (lp_reg & 0x800)
- pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
-
- if (pdata->phy.pause_autoneg) {
- /* Set flow control based on auto-negotiation result */
- pdata->phy.tx_pause = 0;
- pdata->phy.rx_pause = 0;
-
- if (ad_reg & lp_reg & 0x400) {
- pdata->phy.tx_pause = 1;
- pdata->phy.rx_pause = 1;
- } else if (ad_reg & lp_reg & 0x800) {
- if (ad_reg & 0x400)
- pdata->phy.rx_pause = 1;
- else if (lp_reg & 0x400)
- pdata->phy.tx_pause = 1;
- }
- }
-
- /* Compare Advertisement and Link Partner register 2 */
- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
- if (lp_reg & 0x80)
- pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
- if (lp_reg & 0x20) {
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
- break;
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
- break;
- }
- }
+ mode = xgbe_cur_mode(pdata);
+ else
+ mode = xgbe_phy_status_aneg(pdata);
- ad_reg &= lp_reg;
- if (ad_reg & 0x80) {
+ switch (mode) {
+ case XGBE_MODE_SGMII_100:
+ pdata->phy.speed = SPEED_100;
+ break;
+ case XGBE_MODE_X:
+ case XGBE_MODE_KX_1000:
+ case XGBE_MODE_SGMII_1000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+ case XGBE_MODE_KX_2500:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ case XGBE_MODE_KR:
+ case XGBE_MODE_SFI:
pdata->phy.speed = SPEED_10000;
- xgbe_set_mode(pdata, XGBE_MODE_KR);
- } else if (ad_reg & 0x20) {
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.speed = SPEED_1000;
- break;
-
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.speed = SPEED_2500;
- break;
- }
-
- xgbe_set_mode(pdata, XGBE_MODE_KX);
- } else {
+ break;
+ case XGBE_MODE_UNKNOWN:
+ default:
pdata->phy.speed = SPEED_UNKNOWN;
}
- /* Compare Advertisement and Link Partner register 3 */
- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
- if (lp_reg & 0xc000)
- pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
-
pdata->phy.duplex = DUPLEX_FULL;
+
+ xgbe_set_mode(pdata, mode);
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
{
- unsigned int reg, link_aneg;
+ unsigned int link_aneg;
+ int an_restart;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
netif_carrier_off(pdata->netdev);
@@ -1128,12 +1305,12 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
- /* Get the link status. Link status is latched low, so read
- * once to clear and then read again to get current state
- */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ &an_restart);
+ if (an_restart) {
+ xgbe_phy_config_aneg(pdata);
+ return;
+ }
if (pdata->phy.link) {
if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
@@ -1141,7 +1318,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
return;
}
- xgbe_phy_status_aneg(pdata);
+ xgbe_phy_status_result(pdata);
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
@@ -1155,7 +1332,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
return;
}
- xgbe_phy_status_aneg(pdata);
+ xgbe_phy_status_result(pdata);
netif_carrier_off(pdata->netdev);
}
@@ -1168,13 +1345,19 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+ if (!pdata->phy_started)
+ return;
+
+ /* Indicate the PHY is down */
+ pdata->phy_started = 0;
+
/* Disable auto-negotiation */
- xgbe_disable_an(pdata);
+ xgbe_an_disable_all(pdata);
- /* Disable auto-negotiation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ if (pdata->dev_irq != pdata->an_irq)
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ pdata->phy_if.phy_impl.stop(pdata);
pdata->phy.link = 0;
netif_carrier_off(pdata->netdev);
@@ -1189,64 +1372,74 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
- ret = devm_request_irq(pdata->dev, pdata->an_irq,
- xgbe_an_isr, 0, pdata->an_name,
- pdata);
- if (ret) {
- netdev_err(netdev, "phy irq request failed\n");
+ ret = pdata->phy_if.phy_impl.start(pdata);
+ if (ret)
return ret;
+
+ /* If we have a separate AN irq, enable it */
+ if (pdata->dev_irq != pdata->an_irq) {
+ ret = devm_request_irq(pdata->dev, pdata->an_irq,
+ xgbe_an_isr, 0, pdata->an_name,
+ pdata);
+ if (ret) {
+ netdev_err(netdev, "phy irq request failed\n");
+ goto err_stop;
+ }
}
/* Set initial mode - call the mode setting routines
* directly to insure we are properly configured
*/
- if (xgbe_use_xgmii_mode(pdata)) {
- xgbe_xgmii_mode(pdata);
- } else if (xgbe_use_gmii_mode(pdata)) {
- xgbe_gmii_mode(pdata);
- } else if (xgbe_use_gmii_2500_mode(pdata)) {
- xgbe_gmii_2500_mode(pdata);
+ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+ xgbe_kr_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+ xgbe_kx_2500_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+ xgbe_kx_1000_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+ xgbe_sfi_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+ xgbe_x_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+ xgbe_sgmii_1000_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+ xgbe_sgmii_100_mode(pdata);
} else {
ret = -EINVAL;
goto err_irq;
}
- /* Set up advertisement registers based on current settings */
- xgbe_an_init(pdata);
+ /* Indicate the PHY is up and running */
+ pdata->phy_started = 1;
- /* Enable auto-negotiation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
+ xgbe_an_init(pdata);
+ xgbe_an_enable_interrupts(pdata);
return xgbe_phy_config_aneg(pdata);
err_irq:
- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ if (pdata->dev_irq != pdata->an_irq)
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+
+err_stop:
+ pdata->phy_if.phy_impl.stop(pdata);
return ret;
}
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
- unsigned int count, reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg |= MDIO_CTRL1_RESET;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
-
- count = 50;
- do {
- msleep(20);
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- } while ((reg & MDIO_CTRL1_RESET) && --count);
+ int ret;
- if (reg & MDIO_CTRL1_RESET)
- return -ETIMEDOUT;
+ ret = pdata->phy_if.phy_impl.reset(pdata);
+ if (ret)
+ return ret;
/* Disable auto-negotiation for now */
- xgbe_disable_an(pdata);
+ xgbe_an_disable_all(pdata);
/* Clear auto-negotiation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an_clear_interrupts_all(pdata);
return 0;
}
@@ -1257,74 +1450,96 @@ static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
- dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ dev_dbg(dev, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
- dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ dev_dbg(dev, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
- dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ dev_dbg(dev, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
- dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ dev_dbg(dev, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
- dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ dev_dbg(dev, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
- dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ dev_dbg(dev, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
- dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n",
MDIO_AN_ADVERTISE,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
- dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n",
MDIO_AN_ADVERTISE + 1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
- dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n",
MDIO_AN_ADVERTISE + 2,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
- dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ dev_dbg(dev, "Auto-Neg Completion Reg (%#06x) = %#06x\n",
MDIO_AN_COMP_STAT,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
dev_dbg(dev, "\n*************************************************\n");
}
-static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
{
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return SPEED_2500;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
+ return SPEED_100;
+
+ return SPEED_UNKNOWN;
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ xgbe_phy_stop(pdata);
+
+ pdata->phy_if.phy_impl.exit(pdata);
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
mutex_init(&pdata->an_mutex);
INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
pdata->mdio_mmd = MDIO_MMD_PCS;
- /* Initialize supported features */
- pdata->phy.supported = SUPPORTED_Autoneg;
- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- pdata->phy.supported |= SUPPORTED_Backplane;
- pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
- break;
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.supported |= SUPPORTED_2500baseX_Full;
- break;
- }
-
+ /* Check for FEC support */
pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
MDIO_PMA_10GBR_FECABLE);
pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
MDIO_PMA_10GBR_FECABLE_ERRABLE);
- if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
- pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+ /* Setup the phy (including supported features) */
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return ret;
pdata->phy.advertising = pdata->phy.supported;
pdata->phy.address = 0;
- pdata->phy.autoneg = AUTONEG_ENABLE;
- pdata->phy.speed = SPEED_UNKNOWN;
- pdata->phy.duplex = DUPLEX_UNKNOWN;
+ if (pdata->phy.advertising & ADVERTISED_Autoneg) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ } else {
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata);
+ pdata->phy.duplex = DUPLEX_FULL;
+ }
pdata->phy.link = 0;
@@ -1346,11 +1561,14 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata)
if (netif_msg_drv(pdata))
xgbe_dump_phy_registers(pdata);
+
+ return 0;
}
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
{
phy_if->phy_init = xgbe_phy_init;
+ phy_if->phy_exit = xgbe_phy_exit;
phy_if->phy_reset = xgbe_phy_reset;
phy_if->phy_start = xgbe_phy_start;
@@ -1358,4 +1576,8 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
phy_if->phy_status = xgbe_phy_status;
phy_if->phy_config_aneg = xgbe_phy_config_aneg;
+
+ phy_if->phy_valid_speed = xgbe_phy_valid_speed;
+
+ phy_if->an_isr = xgbe_an_combined_isr;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
new file mode 100644
index 000000000000..e76b7f65b805
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -0,0 +1,529 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/log2.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_config_msi(struct xgbe_prv_data *pdata)
+{
+ unsigned int msi_count;
+ unsigned int i, j;
+ int ret;
+
+ msi_count = XGBE_MSIX_BASE_COUNT;
+ msi_count += max(pdata->rx_ring_count,
+ pdata->tx_ring_count);
+ msi_count = roundup_pow_of_two(msi_count);
+
+ ret = pci_enable_msi_exact(pdata->pcidev, msi_count);
+ if (ret < 0) {
+ dev_info(pdata->dev, "MSI request for %u interrupts failed\n",
+ msi_count);
+
+ ret = pci_enable_msi(pdata->pcidev);
+ if (ret < 0) {
+ dev_info(pdata->dev, "MSI enablement failed\n");
+ return ret;
+ }
+
+ msi_count = 1;
+ }
+
+ pdata->irq_count = msi_count;
+
+ pdata->dev_irq = pdata->pcidev->irq;
+
+ if (msi_count > 1) {
+ pdata->ecc_irq = pdata->pcidev->irq + 1;
+ pdata->i2c_irq = pdata->pcidev->irq + 2;
+ pdata->an_irq = pdata->pcidev->irq + 3;
+
+ for (i = XGBE_MSIX_BASE_COUNT, j = 0;
+ (i < msi_count) && (j < XGBE_MAX_DMA_CHANNELS);
+ i++, j++)
+ pdata->channel_irq[j] = pdata->pcidev->irq + i;
+ pdata->channel_irq_count = j;
+
+ pdata->per_channel_irq = 1;
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
+ } else {
+ pdata->ecc_irq = pdata->pcidev->irq;
+ pdata->i2c_irq = pdata->pcidev->irq;
+ pdata->an_irq = pdata->pcidev->irq;
+ }
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "MSI interrupts enabled\n");
+
+ return 0;
+}
+
+static int xgbe_config_msix(struct xgbe_prv_data *pdata)
+{
+ unsigned int msix_count;
+ unsigned int i, j;
+ int ret;
+
+ msix_count = XGBE_MSIX_BASE_COUNT;
+ msix_count += max(pdata->rx_ring_count,
+ pdata->tx_ring_count);
+
+ pdata->msix_entries = devm_kcalloc(pdata->dev, msix_count,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!pdata->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < msix_count; i++)
+ pdata->msix_entries[i].entry = i;
+
+ ret = pci_enable_msix_range(pdata->pcidev, pdata->msix_entries,
+ XGBE_MSIX_MIN_COUNT, msix_count);
+ if (ret < 0) {
+ dev_info(pdata->dev, "MSI-X enablement failed\n");
+ devm_kfree(pdata->dev, pdata->msix_entries);
+ pdata->msix_entries = NULL;
+ return ret;
+ }
+
+ pdata->irq_count = ret;
+
+ pdata->dev_irq = pdata->msix_entries[0].vector;
+ pdata->ecc_irq = pdata->msix_entries[1].vector;
+ pdata->i2c_irq = pdata->msix_entries[2].vector;
+ pdata->an_irq = pdata->msix_entries[3].vector;
+
+ for (i = XGBE_MSIX_BASE_COUNT, j = 0; i < ret; i++, j++)
+ pdata->channel_irq[j] = pdata->msix_entries[i].vector;
+ pdata->channel_irq_count = j;
+
+ pdata->per_channel_irq = 1;
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "MSI-X interrupts enabled\n");
+
+ return 0;
+}
+
+static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ ret = xgbe_config_msix(pdata);
+ if (!ret)
+ goto out;
+
+ ret = xgbe_config_msi(pdata);
+ if (!ret)
+ goto out;
+
+ pdata->irq_count = 1;
+ pdata->irq_shared = 1;
+
+ pdata->dev_irq = pdata->pcidev->irq;
+ pdata->ecc_irq = pdata->pcidev->irq;
+ pdata->i2c_irq = pdata->pcidev->irq;
+ pdata->an_irq = pdata->pcidev->irq;
+
+out:
+ if (netif_msg_probe(pdata)) {
+ unsigned int i;
+
+ dev_dbg(pdata->dev, " dev irq=%d\n", pdata->dev_irq);
+ dev_dbg(pdata->dev, " ecc irq=%d\n", pdata->ecc_irq);
+ dev_dbg(pdata->dev, " i2c irq=%d\n", pdata->i2c_irq);
+ dev_dbg(pdata->dev, " an irq=%d\n", pdata->an_irq);
+ for (i = 0; i < pdata->channel_irq_count; i++)
+ dev_dbg(pdata->dev, " dma%u irq=%d\n",
+ i, pdata->channel_irq[i]);
+ }
+
+ return 0;
+}
+
+static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct xgbe_prv_data *pdata;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ unsigned int ma_lo, ma_hi;
+ unsigned int reg;
+ int bar_mask;
+ int ret;
+
+ pdata = xgbe_alloc_pdata(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err_alloc;
+ }
+
+ pdata->pcidev = pdev;
+ pci_set_drvdata(pdev, pdata);
+
+ /* Get the version data */
+ pdata->vdata = (struct xgbe_version_data *)id->driver_data;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed\n");
+ goto err_pci_enable;
+ }
+
+ /* Obtain the mmio areas for the device */
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed\n");
+ goto err_pci_enable;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto err_pci_enable;
+ }
+
+ pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
+ if (!pdata->xgmac_regs) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_pci_enable;
+ }
+ pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
+ pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
+ dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
+ dev_dbg(dev, "xi2c_regs = %p\n", pdata->xi2c_regs);
+ }
+
+ pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
+ if (!pdata->xpcs_regs) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_pci_enable;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Configure the PCS indirect addressing support */
+ reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "xpcs window = %#010x\n",
+ pdata->xpcs_window);
+ dev_dbg(dev, "xpcs window size = %#010x\n",
+ pdata->xpcs_window_size);
+ dev_dbg(dev, "xpcs window mask = %#010x\n",
+ pdata->xpcs_window_mask);
+ }
+
+ pci_set_master(pdev);
+
+ /* Enable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ /* Retrieve the MAC address */
+ ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
+ ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
+ pdata->mac_addr[0] = ma_lo & 0xff;
+ pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
+ pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
+ pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
+ pdata->mac_addr[4] = ma_hi & 0xff;
+ pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
+ if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
+ !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid mac address\n");
+ ret = -EINVAL;
+ goto err_pci_enable;
+ }
+
+ /* Clock settings */
+ pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
+ pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
+
+ /* Set the maximum channels and queues */
+ reg = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
+ pdata->tx_max_channel_count,
+ pdata->tx_max_channel_count);
+ dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
+ pdata->tx_max_q_count, pdata->rx_max_q_count);
+ }
+
+ /* Set the hardware channel and queue counts */
+ xgbe_set_counts(pdata);
+
+ /* Set the maximum fifo amounts */
+ reg = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+ pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
+ pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
+
+ /* Configure interrupt support */
+ ret = xgbe_config_irqs(pdata);
+ if (ret)
+ goto err_pci_enable;
+
+ /* Configure the netdev resource */
+ ret = xgbe_config_netdev(pdata);
+ if (ret)
+ goto err_pci_enable;
+
+ netdev_notice(pdata->netdev, "net device enabled\n");
+
+ return 0;
+
+err_pci_enable:
+ xgbe_free_pdata(pdata);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static void xgbe_pci_remove(struct pci_dev *pdev)
+{
+ struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
+
+ xgbe_deconfig_netdev(pdata);
+
+ xgbe_free_pdata(pdata);
+}
+
+#ifdef CONFIG_PM
+static int xgbe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ if (netif_running(netdev))
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ return ret;
+}
+
+static int xgbe_pci_resume(struct pci_dev *pdev)
+{
+ struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ if (netif_running(netdev)) {
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ /* Schedule a restart in case the link or phy state changed
+ * while we were powered down.
+ */
+ schedule_work(&pdata->restart_work);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct xgbe_version_data xgbe_v2a = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+ .rx_max_fifo_size = 229376,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+};
+
+static const struct xgbe_version_data xgbe_v2b = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+};
+
+static const struct pci_device_id xgbe_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1458),
+ .driver_data = (kernel_ulong_t)&xgbe_v2a },
+ { PCI_VDEVICE(AMD, 0x1459),
+ .driver_data = (kernel_ulong_t)&xgbe_v2b },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, xgbe_pci_table);
+
+static struct pci_driver xgbe_driver = {
+ .name = XGBE_DRV_NAME,
+ .id_table = xgbe_pci_table,
+ .probe = xgbe_pci_probe,
+ .remove = xgbe_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = xgbe_pci_suspend,
+ .resume = xgbe_pci_resume,
+#endif
+};
+
+int xgbe_pci_init(void)
+{
+ return pci_register_driver(&xgbe_driver);
+}
+
+void xgbe_pci_exit(void)
+{
+ pci_unregister_driver(&xgbe_driver);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
new file mode 100644
index 000000000000..c75edcac5e0a
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -0,0 +1,845 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/property.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
+#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
+
+/* Default SerDes settings */
+#define XGBE_SPEED_1000_BLWC 1
+#define XGBE_SPEED_1000_CDR 0x2
+#define XGBE_SPEED_1000_PLL 0x0
+#define XGBE_SPEED_1000_PQ 0xa
+#define XGBE_SPEED_1000_RATE 0x3
+#define XGBE_SPEED_1000_TXAMP 0xf
+#define XGBE_SPEED_1000_WORD 0x1
+#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_2500_BLWC 1
+#define XGBE_SPEED_2500_CDR 0x2
+#define XGBE_SPEED_2500_PLL 0x0
+#define XGBE_SPEED_2500_PQ 0xa
+#define XGBE_SPEED_2500_RATE 0x1
+#define XGBE_SPEED_2500_TXAMP 0xf
+#define XGBE_SPEED_2500_WORD 0x1
+#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_10000_BLWC 0
+#define XGBE_SPEED_10000_CDR 0x7
+#define XGBE_SPEED_10000_PLL 0x1
+#define XGBE_SPEED_10000_PQ 0x12
+#define XGBE_SPEED_10000_RATE 0x0
+#define XGBE_SPEED_10000_TXAMP 0xa
+#define XGBE_SPEED_10000_WORD 0x7
+#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
+#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+static const u32 xgbe_phy_blwc[] = {
+ XGBE_SPEED_1000_BLWC,
+ XGBE_SPEED_2500_BLWC,
+ XGBE_SPEED_10000_BLWC,
+};
+
+static const u32 xgbe_phy_cdr_rate[] = {
+ XGBE_SPEED_1000_CDR,
+ XGBE_SPEED_2500_CDR,
+ XGBE_SPEED_10000_CDR,
+};
+
+static const u32 xgbe_phy_pq_skew[] = {
+ XGBE_SPEED_1000_PQ,
+ XGBE_SPEED_2500_PQ,
+ XGBE_SPEED_10000_PQ,
+};
+
+static const u32 xgbe_phy_tx_amp[] = {
+ XGBE_SPEED_1000_TXAMP,
+ XGBE_SPEED_2500_TXAMP,
+ XGBE_SPEED_10000_TXAMP,
+};
+
+static const u32 xgbe_phy_dfe_tap_cfg[] = {
+ XGBE_SPEED_1000_DFE_TAP_CONFIG,
+ XGBE_SPEED_2500_DFE_TAP_CONFIG,
+ XGBE_SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 xgbe_phy_dfe_tap_ena[] = {
+ XGBE_SPEED_1000_DFE_TAP_ENABLE,
+ XGBE_SPEED_2500_DFE_TAP_ENABLE,
+ XGBE_SPEED_10000_DFE_TAP_ENABLE,
+};
+
+struct xgbe_phy_data {
+ /* 1000/10000 vs 2500/10000 indicator */
+ unsigned int speed_set;
+
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ u32 blwc[XGBE_SPEEDS];
+ u32 cdr_rate[XGBE_SPEEDS];
+ u32 pq_skew[XGBE_SPEEDS];
+ u32 tx_amp[XGBE_SPEEDS];
+ u32 dfe_tap_cfg[XGBE_SPEEDS];
+ u32 dfe_tap_ena[XGBE_SPEEDS];
+};
+
+static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+}
+
+static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+}
+
+static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
+ else
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+ }
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ mode = XGBE_MODE_KR;
+ } else if (ad_reg & 0x20) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ } else {
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata)
+{
+ return pdata->phy.advertising;
+}
+
+static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for an configuration */
+ return 0;
+}
+
+static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
+{
+ return XGBE_AN_MODE_CL73;
+}
+
+static void xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+ reg |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ usleep_range(75, 100);
+
+ reg &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+}
+
+static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
+{
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait;
+ u16 status;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ usleep_range(50, 75);
+
+ status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+ goto rx_reset;
+ }
+
+ netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+ status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
+
+static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KR/10G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBR;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED10G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 10G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_10000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 2.5G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_2500]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_2500]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 1G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_1000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_1000]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= MDIO_PCS_CTRL2_TYPE;
+
+ if (reg == MDIO_PCS_CTRL2_10GBR) {
+ mode = XGBE_MODE_KR;
+ } else {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ }
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+
+ /* If we are in KR switch to KX, and vice-versa */
+ if (xgbe_phy_cur_mode(pdata) == XGBE_MODE_KR) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ } else {
+ mode = XGBE_MODE_KR;
+ }
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
+ int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (speed) {
+ case SPEED_1000:
+ return (phy_data->speed_set == XGBE_SPEEDSET_1000_10000)
+ ? XGBE_MODE_KX_1000 : XGBE_MODE_UNKNOWN;
+ case SPEED_2500:
+ return (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ ? XGBE_MODE_KX_2500 : XGBE_MODE_UNKNOWN;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_phy_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_phy_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_phy_kr_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode, u32 advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & advert)
+ return true;
+ } else {
+ enum xgbe_mode cur_mode;
+
+ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseKX_Full);
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseKR_Full);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (speed) {
+ case SPEED_1000:
+ if (phy_data->speed_set != XGBE_SPEEDSET_1000_10000)
+ return false;
+ return true;
+ case SPEED_2500:
+ if (phy_data->speed_set != XGBE_SPEEDSET_2500_10000)
+ return false;
+ return true;
+ case SPEED_10000:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+{
+ unsigned int reg;
+
+ *an_restart = 0;
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+
+ return (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for stop */
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for start */
+ return 0;
+}
+
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg, count;
+
+ /* Perform a software reset of the PCS */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg |= MDIO_CTRL1_RESET;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ count = 50;
+ do {
+ msleep(20);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ } while ((reg & MDIO_CTRL1_RESET) && --count);
+
+ if (reg & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for exit */
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data;
+ int ret;
+
+ phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+
+ /* Retrieve the PHY speedset */
+ ret = device_property_read_u32(pdata->phy_dev, XGBE_SPEEDSET_PROPERTY,
+ &phy_data->speed_set);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_SPEEDSET_PROPERTY);
+ return ret;
+ }
+
+ switch (phy_data->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ case XGBE_SPEEDSET_2500_10000:
+ break;
+ default:
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_SPEEDSET_PROPERTY);
+ return -EINVAL;
+ }
+
+ /* Retrieve the PHY configuration properties */
+ if (device_property_present(pdata->phy_dev, XGBE_BLWC_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_BLWC_PROPERTY,
+ phy_data->blwc,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_BLWC_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->blwc, xgbe_phy_blwc,
+ sizeof(phy_data->blwc));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_CDR_RATE_PROPERTY,
+ phy_data->cdr_rate,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_CDR_RATE_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->cdr_rate, xgbe_phy_cdr_rate,
+ sizeof(phy_data->cdr_rate));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_PQ_SKEW_PROPERTY,
+ phy_data->pq_skew,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_PQ_SKEW_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->pq_skew, xgbe_phy_pq_skew,
+ sizeof(phy_data->pq_skew));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_TX_AMP_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_TX_AMP_PROPERTY,
+ phy_data->tx_amp,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_TX_AMP_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->tx_amp, xgbe_phy_tx_amp,
+ sizeof(phy_data->tx_amp));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_DFE_CFG_PROPERTY,
+ phy_data->dfe_tap_cfg,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_DFE_CFG_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->dfe_tap_cfg, xgbe_phy_dfe_tap_cfg,
+ sizeof(phy_data->dfe_tap_cfg));
+ }
+
+ if (device_property_present(pdata->phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(pdata->phy_dev,
+ XGBE_DFE_ENA_PROPERTY,
+ phy_data->dfe_tap_ena,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(pdata->dev, "invalid %s property\n",
+ XGBE_DFE_ENA_PROPERTY);
+ return ret;
+ }
+ } else {
+ memcpy(phy_data->dfe_tap_ena, xgbe_phy_dfe_tap_ena,
+ sizeof(phy_data->dfe_tap_ena));
+ }
+
+ /* Initialize supported features */
+ pdata->phy.supported = SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ switch (phy_data->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ break;
+ }
+
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+
+ pdata->phy_data = phy_data;
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *phy_if)
+{
+ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = xgbe_phy_init;
+ phy_impl->exit = xgbe_phy_exit;
+
+ phy_impl->reset = xgbe_phy_reset;
+ phy_impl->start = xgbe_phy_start;
+ phy_impl->stop = xgbe_phy_stop;
+
+ phy_impl->link_status = xgbe_phy_link_status;
+
+ phy_impl->valid_speed = xgbe_phy_valid_speed;
+
+ phy_impl->use_mode = xgbe_phy_use_mode;
+ phy_impl->set_mode = xgbe_phy_set_mode;
+ phy_impl->get_mode = xgbe_phy_get_mode;
+ phy_impl->switch_mode = xgbe_phy_switch_mode;
+ phy_impl->cur_mode = xgbe_phy_cur_mode;
+
+ phy_impl->an_mode = xgbe_phy_an_mode;
+
+ phy_impl->an_config = xgbe_phy_an_config;
+
+ phy_impl->an_advertising = xgbe_phy_an_advertising;
+
+ phy_impl->an_outcome = xgbe_phy_an_outcome;
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
new file mode 100644
index 000000000000..9d8c953083b4
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -0,0 +1,3084 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_PHY_PORT_SPEED_100 BIT(0)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+
+#define XGBE_MUTEX_RELEASE 0x80000000
+
+#define XGBE_SFP_DIRECT 7
+
+/* I2C target addresses */
+#define XGBE_SFP_SERIAL_ID_ADDRESS 0x50
+#define XGBE_SFP_DIAG_INFO_ADDRESS 0x51
+#define XGBE_SFP_PHY_ADDRESS 0x56
+#define XGBE_GPIO_ADDRESS_PCA9555 0x20
+
+/* SFP sideband signal indicators */
+#define XGBE_GPIO_NO_TX_FAULT BIT(0)
+#define XGBE_GPIO_NO_RATE_SELECT BIT(1)
+#define XGBE_GPIO_NO_MOD_ABSENT BIT(2)
+#define XGBE_GPIO_NO_RX_LOS BIT(3)
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+enum xgbe_port_mode {
+ XGBE_PORT_MODE_RSVD = 0,
+ XGBE_PORT_MODE_BACKPLANE,
+ XGBE_PORT_MODE_BACKPLANE_2500,
+ XGBE_PORT_MODE_1000BASE_T,
+ XGBE_PORT_MODE_1000BASE_X,
+ XGBE_PORT_MODE_NBASE_T,
+ XGBE_PORT_MODE_10GBASE_T,
+ XGBE_PORT_MODE_10GBASE_R,
+ XGBE_PORT_MODE_SFP,
+ XGBE_PORT_MODE_MAX,
+};
+
+enum xgbe_conn_type {
+ XGBE_CONN_TYPE_NONE = 0,
+ XGBE_CONN_TYPE_SFP,
+ XGBE_CONN_TYPE_MDIO,
+ XGBE_CONN_TYPE_RSVD1,
+ XGBE_CONN_TYPE_BACKPLANE,
+ XGBE_CONN_TYPE_MAX,
+};
+
+/* SFP/SFP+ related definitions */
+enum xgbe_sfp_comm {
+ XGBE_SFP_COMM_DIRECT = 0,
+ XGBE_SFP_COMM_PCA9545,
+};
+
+enum xgbe_sfp_cable {
+ XGBE_SFP_CABLE_UNKNOWN = 0,
+ XGBE_SFP_CABLE_ACTIVE,
+ XGBE_SFP_CABLE_PASSIVE,
+};
+
+enum xgbe_sfp_base {
+ XGBE_SFP_BASE_UNKNOWN = 0,
+ XGBE_SFP_BASE_1000_T,
+ XGBE_SFP_BASE_1000_SX,
+ XGBE_SFP_BASE_1000_LX,
+ XGBE_SFP_BASE_1000_CX,
+ XGBE_SFP_BASE_10000_SR,
+ XGBE_SFP_BASE_10000_LR,
+ XGBE_SFP_BASE_10000_LRM,
+ XGBE_SFP_BASE_10000_ER,
+ XGBE_SFP_BASE_10000_CR,
+};
+
+enum xgbe_sfp_speed {
+ XGBE_SFP_SPEED_UNKNOWN = 0,
+ XGBE_SFP_SPEED_100_1000,
+ XGBE_SFP_SPEED_1000,
+ XGBE_SFP_SPEED_10000,
+};
+
+/* SFP Serial ID Base ID values relative to an offset of 0 */
+#define XGBE_SFP_BASE_ID 0
+#define XGBE_SFP_ID_SFP 0x03
+
+#define XGBE_SFP_BASE_EXT_ID 1
+#define XGBE_SFP_EXT_ID_SFP 0x04
+
+#define XGBE_SFP_BASE_10GBE_CC 3
+#define XGBE_SFP_BASE_10GBE_CC_SR BIT(4)
+#define XGBE_SFP_BASE_10GBE_CC_LR BIT(5)
+#define XGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
+#define XGBE_SFP_BASE_10GBE_CC_ER BIT(7)
+
+#define XGBE_SFP_BASE_1GBE_CC 6
+#define XGBE_SFP_BASE_1GBE_CC_SX BIT(0)
+#define XGBE_SFP_BASE_1GBE_CC_LX BIT(1)
+#define XGBE_SFP_BASE_1GBE_CC_CX BIT(2)
+#define XGBE_SFP_BASE_1GBE_CC_T BIT(3)
+
+#define XGBE_SFP_BASE_CABLE 8
+#define XGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
+#define XGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
+
+#define XGBE_SFP_BASE_BR 12
+#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
+#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
+
+#define XGBE_SFP_BASE_CU_CABLE_LEN 18
+
+#define XGBE_SFP_BASE_VENDOR_NAME 20
+#define XGBE_SFP_BASE_VENDOR_NAME_LEN 16
+#define XGBE_SFP_BASE_VENDOR_PN 40
+#define XGBE_SFP_BASE_VENDOR_PN_LEN 16
+#define XGBE_SFP_BASE_VENDOR_REV 56
+#define XGBE_SFP_BASE_VENDOR_REV_LEN 4
+
+#define XGBE_SFP_BASE_CC 63
+
+/* SFP Serial ID Extended ID values relative to an offset of 64 */
+#define XGBE_SFP_BASE_VENDOR_SN 4
+#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
+
+#define XGBE_SFP_EXTD_DIAG 28
+#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
+
+#define XGBE_SFP_EXTD_SFF_8472 30
+
+#define XGBE_SFP_EXTD_CC 31
+
+struct xgbe_sfp_eeprom {
+ u8 base[64];
+ u8 extd[32];
+ u8 vendor[32];
+};
+
+#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
+#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+
+struct xgbe_sfp_ascii {
+ union {
+ char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+ char partno[XGBE_SFP_BASE_VENDOR_PN_LEN + 1];
+ char rev[XGBE_SFP_BASE_VENDOR_REV_LEN + 1];
+ char serno[XGBE_SFP_BASE_VENDOR_SN_LEN + 1];
+ } u;
+};
+
+/* MDIO PHY reset types */
+enum xgbe_mdio_reset {
+ XGBE_MDIO_RESET_NONE = 0,
+ XGBE_MDIO_RESET_I2C_GPIO,
+ XGBE_MDIO_RESET_INT_GPIO,
+ XGBE_MDIO_RESET_MAX,
+};
+
+/* Re-driver related definitions */
+enum xgbe_phy_redrv_if {
+ XGBE_PHY_REDRV_IF_MDIO = 0,
+ XGBE_PHY_REDRV_IF_I2C,
+ XGBE_PHY_REDRV_IF_MAX,
+};
+
+enum xgbe_phy_redrv_model {
+ XGBE_PHY_REDRV_MODEL_4223 = 0,
+ XGBE_PHY_REDRV_MODEL_4227,
+ XGBE_PHY_REDRV_MODEL_MAX,
+};
+
+enum xgbe_phy_redrv_mode {
+ XGBE_PHY_REDRV_MODE_CX = 5,
+ XGBE_PHY_REDRV_MODE_SR = 9,
+};
+
+#define XGBE_PHY_REDRV_MODE_REG 0x12b0
+
+/* PHY related configuration information */
+struct xgbe_phy_data {
+ enum xgbe_port_mode port_mode;
+
+ unsigned int port_id;
+
+ unsigned int port_speeds;
+
+ enum xgbe_conn_type conn_type;
+
+ enum xgbe_mode cur_mode;
+ enum xgbe_mode start_mode;
+
+ unsigned int rrc_count;
+
+ unsigned int mdio_addr;
+
+ unsigned int comm_owned;
+
+ /* SFP Support */
+ enum xgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+ unsigned int sfp_mux_channel;
+
+ unsigned int sfp_gpio_address;
+ unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_rx_los;
+ unsigned int sfp_gpio_tx_fault;
+ unsigned int sfp_gpio_mod_absent;
+ unsigned int sfp_gpio_rate_select;
+
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+ unsigned int sfp_diags;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+ enum xgbe_sfp_base sfp_base;
+ enum xgbe_sfp_cable sfp_cable;
+ enum xgbe_sfp_speed sfp_speed;
+ struct xgbe_sfp_eeprom sfp_eeprom;
+
+ /* External PHY support */
+ enum xgbe_mdio_mode phydev_mode;
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ enum xgbe_mdio_reset mdio_reset;
+ unsigned int mdio_reset_addr;
+ unsigned int mdio_reset_gpio;
+
+ /* Re-driver support */
+ unsigned int redrv;
+ unsigned int redrv_if;
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
+};
+
+/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
+static DEFINE_MUTEX(xgbe_phy_comm_lock);
+
+static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+
+static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
+ struct xgbe_i2c_op *i2c_op)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Be sure we own the bus */
+ if (WARN_ON(!phy_data->comm_owned))
+ return -EIO;
+
+ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
+}
+
+static int xgbe_phy_redrv_write(struct xgbe_prv_data *pdata, unsigned int reg,
+ unsigned int val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ __be16 *redrv_val;
+ u8 redrv_data[5], csum;
+ unsigned int i, retry;
+ int ret;
+
+ /* High byte of register contains read/write indicator */
+ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
+ redrv_data[1] = reg & 0xff;
+ redrv_val = (__be16 *)&redrv_data[2];
+ *redrv_val = cpu_to_be16(val);
+
+ /* Calculate 1 byte checksum */
+ csum = 0;
+ for (i = 0; i < 4; i++) {
+ csum += redrv_data[i];
+ if (redrv_data[i] > csum)
+ csum++;
+ }
+ redrv_data[4] = ~csum;
+
+ retry = 1;
+again1:
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = sizeof(redrv_data);
+ i2c_op.buf = redrv_data;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ i2c_op.cmd = XGBE_I2C_CMD_READ;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = 1;
+ i2c_op.buf = redrv_data;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+ }
+
+ if (redrv_data[0] != 0xff) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "Redriver write checksum error\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target,
+ void *val, unsigned int val_len)
+{
+ struct xgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again:
+ /* Write the specfied register */
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again;
+
+ return ret;
+}
+
+static int xgbe_phy_i2c_read(struct xgbe_prv_data *pdata, unsigned int target,
+ void *reg, unsigned int reg_len,
+ void *val, unsigned int val_len)
+{
+ struct xgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again1:
+ /* Set the specified register to read */
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = reg_len;
+ i2c_op.buf = reg;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ /* Read the specfied register */
+ i2c_op.cmd = XGBE_I2C_CMD_READ;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+}
+
+static int xgbe_phy_sfp_put_mux(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select no mux channels */
+ mux_channel = 0;
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return xgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select desired mux channel */
+ mux_channel = 1 << phy_data->sfp_mux_channel;
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return xgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->comm_owned = 0;
+
+ mutex_unlock(&xgbe_phy_comm_lock);
+}
+
+static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned long timeout;
+ unsigned int mutex_id;
+
+ if (phy_data->comm_owned)
+ return 0;
+
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+ */
+ mutex_lock(&xgbe_phy_comm_lock);
+
+ /* Clear the mutexes */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, XGBE_MUTEX_RELEASE);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, XGBE_MUTEX_RELEASE);
+
+ /* Mutex formats are the same for I2C and MDIO/GPIO */
+ mutex_id = 0;
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
+
+ timeout = jiffies + (5 * HZ);
+ while (time_before(jiffies, timeout)) {
+ /* Must be all zeroes in order to obtain the mutex */
+ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
+ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ /* Obtain the mutex */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+ phy_data->comm_owned = 1;
+ return 0;
+ }
+
+ mutex_unlock(&xgbe_phy_comm_lock);
+
+ netdev_err(pdata->netdev, "unable to obtain hardware mutexes\n");
+
+ return -ETIMEDOUT;
+}
+
+static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (reg & MII_ADDR_C45) {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -ENOTSUPP;
+ } else {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -ENOTSUPP;
+ }
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val);
+}
+
+static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
+{
+ __be16 *mii_val;
+ u8 mii_data[3];
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret)
+ return ret;
+
+ mii_data[0] = reg & 0xff;
+ mii_val = (__be16 *)&mii_data[1];
+ *mii_val = cpu_to_be16(val);
+
+ ret = xgbe_phy_i2c_write(pdata, XGBE_SFP_PHY_ADDRESS,
+ mii_data, sizeof(mii_data));
+
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
+ else
+ ret = -ENOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (reg & MII_ADDR_C45) {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -ENOTSUPP;
+ } else {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -ENOTSUPP;
+ }
+
+ return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg);
+}
+
+static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
+{
+ __be16 mii_val;
+ u8 mii_reg;
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret)
+ return ret;
+
+ mii_reg = reg;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_PHY_ADDRESS,
+ &mii_reg, sizeof(mii_reg),
+ &mii_val, sizeof(mii_val));
+ if (!ret)
+ ret = be16_to_cpu(mii_val);
+
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = xgbe_phy_i2c_mii_read(pdata, reg);
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
+ else
+ ret = -ENOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising = pdata->phy.supported;
+ }
+
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_TP;
+ pdata->phy.advertising &= ~ADVERTISED_FIBRE;
+ pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+ case XGBE_SFP_BASE_10000_LRM:
+ case XGBE_SFP_BASE_10000_ER:
+ case XGBE_SFP_BASE_10000_CR:
+ default:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_CX:
+ case XGBE_SFP_BASE_10000_CR:
+ pdata->phy.advertising |= ADVERTISED_TP;
+ break;
+ default:
+ pdata->phy.advertising |= ADVERTISED_FIBRE;
+ }
+
+ switch (phy_data->sfp_speed) {
+ case XGBE_SFP_SPEED_100_1000:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case XGBE_SFP_SPEED_1000:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case XGBE_SFP_SPEED_10000:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ break;
+ default:
+ /* Choose the fastest supported speed */
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ }
+}
+
+static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+ enum xgbe_sfp_speed sfp_speed)
+{
+ u8 *sfp_base, min, max;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case XGBE_SFP_SPEED_1000:
+ min = XGBE_SFP_BASE_BR_1GBE_MIN;
+ max = XGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case XGBE_SFP_SPEED_10000:
+ min = XGBE_SFP_BASE_BR_10GBE_MIN;
+ max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+ }
+
+ return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
+ (sfp_base[XGBE_SFP_BASE_BR] <= max));
+}
+
+static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev) {
+ phy_detach(phy_data->phydev);
+ phy_device_remove(phy_data->phydev);
+ phy_device_free(phy_data->phydev);
+ phy_data->phydev = NULL;
+ }
+}
+
+static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int phy_id = phy_data->phydev->phy_id;
+
+ if ((phy_id & 0xfffffff0) != 0x01ff0cc0)
+ return false;
+
+ /* Enable Base-T AN */
+ phy_write(phy_data->phydev, 0x16, 0x0001);
+ phy_write(phy_data->phydev, 0x00, 0x9140);
+ phy_write(phy_data->phydev, 0x16, 0x0000);
+
+ /* Enable SGMII at 100Base-T/1000Base-T Full Duplex */
+ phy_write(phy_data->phydev, 0x1b, 0x9084);
+ phy_write(phy_data->phydev, 0x09, 0x0e00);
+ phy_write(phy_data->phydev, 0x00, 0x8140);
+ phy_write(phy_data->phydev, 0x04, 0x0d01);
+ phy_write(phy_data->phydev, 0x00, 0x9140);
+
+ phy_data->phydev->supported = PHY_GBIT_FEATURES;
+ phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phy_data->phydev->advertising = phy_data->phydev->supported;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "Finisar PHY quirk in place\n");
+
+ return true;
+}
+
+static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ if (xgbe_phy_finisar_phy_quirks(pdata))
+ return;
+}
+
+static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev;
+ int ret;
+
+ /* If we already have a PHY, just return */
+ if (phy_data->phydev)
+ return 0;
+
+ /* Check for the use of an external PHY */
+ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE)
+ return 0;
+
+ /* For SFP, only use an external PHY if available */
+ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
+ !phy_data->sfp_phy_avail)
+ return 0;
+
+ /* Create and connect to the PHY device */
+ phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr,
+ (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45));
+ if (IS_ERR(phydev)) {
+ netdev_err(pdata->netdev, "get_phy_device failed\n");
+ return -ENODEV;
+ }
+ netif_dbg(pdata, drv, pdata->netdev, "external PHY id is %#010x\n",
+ phydev->phy_id);
+
+ /*TODO: If c45, add request_module based on one of the MMD ids? */
+
+ ret = phy_device_register(phydev);
+ if (ret) {
+ netdev_err(pdata->netdev, "phy_device_register failed\n");
+ phy_device_free(phydev);
+ return ret;
+ }
+
+ ret = phy_attach_direct(pdata->netdev, phydev, phydev->dev_flags,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret) {
+ netdev_err(pdata->netdev, "phy_attach_direct failed\n");
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ return ret;
+ }
+ phy_data->phydev = phydev;
+
+ xgbe_phy_external_phy_quirks(pdata);
+ phydev->advertising &= pdata->phy.advertising;
+
+ phy_start_aneg(phy_data->phydev);
+
+ return 0;
+}
+
+static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ if (!phy_data->sfp_changed)
+ return;
+
+ phy_data->sfp_phy_avail = 0;
+
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return;
+
+ /* Check access to the PHY by reading CTRL1 */
+ ret = xgbe_phy_i2c_mii_read(pdata, MII_BMCR);
+ if (ret < 0)
+ return;
+
+ /* Successfully accessed the PHY */
+ phy_data->sfp_phy_avail = 1;
+}
+
+static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
+ return false;
+
+ if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) {
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
+ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+ phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
+ if (phy_data->sfp_changed)
+ netif_dbg(pdata, drv, pdata->netdev,
+ "Bel-Fuse SFP quirk in place\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata)
+{
+ if (xgbe_phy_belfuse_parse_quirks(pdata))
+ return true;
+
+ return false;
+}
+
+static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ u8 *sfp_base;
+
+ sfp_base = sfp_eeprom->base;
+
+ if (sfp_base[XGBE_SFP_BASE_ID] != XGBE_SFP_ID_SFP)
+ return;
+
+ if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
+ return;
+
+ if (xgbe_phy_sfp_parse_quirks(pdata))
+ return;
+
+ /* Assume ACTIVE cable unless told it is PASSIVE */
+ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
+ } else {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+ }
+
+ /* Determine the type of SFP */
+ if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LRM)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_LRM;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_ER)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_ER;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_SX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_LX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_LX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_CX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
+ else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_100_1000;
+ break;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+ case XGBE_SFP_BASE_10000_LRM:
+ case XGBE_SFP_BASE_10000_ER:
+ case XGBE_SFP_BASE_10000_CR:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_10000;
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_phy_sfp_eeprom_info(struct xgbe_prv_data *pdata,
+ struct xgbe_sfp_eeprom *sfp_eeprom)
+{
+ struct xgbe_sfp_ascii sfp_ascii;
+ char *sfp_data = (char *)&sfp_ascii;
+
+ netif_dbg(pdata, drv, pdata->netdev, "SFP detected:\n");
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_SFP_BASE_VENDOR_NAME_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_NAME_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " vendor: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_SFP_BASE_VENDOR_PN_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_PN_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " part number: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_REV],
+ XGBE_SFP_BASE_VENDOR_REV_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_REV_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " revision level: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->extd[XGBE_SFP_BASE_VENDOR_SN],
+ XGBE_SFP_BASE_VENDOR_SN_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_SN_LEN] = '\0';
+ netif_dbg(pdata, drv, pdata->netdev, " serial number: %s\n",
+ sfp_data);
+}
+
+static bool xgbe_phy_sfp_verify_eeprom(u8 cc_in, u8 *buf, unsigned int len)
+{
+ u8 cc;
+
+ for (cc = 0; len; buf++, len--)
+ cc += *buf;
+
+ return (cc == cc_in) ? true : false;
+}
+
+static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom sfp_eeprom;
+ u8 eeprom_addr;
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
+ return ret;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+ if (ret) {
+ netdev_err(pdata->netdev, "I2C error reading SFP EEPROM\n");
+ goto put;
+ }
+
+ /* Validate the contents read */
+ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[XGBE_SFP_BASE_CC],
+ sfp_eeprom.base,
+ sizeof(sfp_eeprom.base) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[XGBE_SFP_EXTD_CC],
+ sfp_eeprom.extd,
+ sizeof(sfp_eeprom.extd) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ /* Check for an added or changed SFP */
+ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
+ phy_data->sfp_changed = 1;
+
+ if (netif_msg_drv(pdata))
+ xgbe_phy_sfp_eeprom_info(pdata, &sfp_eeprom);
+
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+ if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) {
+ u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG];
+
+ if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+ phy_data->sfp_diags = 1;
+ }
+
+ xgbe_phy_free_phy_device(pdata);
+ } else {
+ phy_data->sfp_changed = 0;
+ }
+
+put:
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int gpio_input;
+ u8 gpio_reg, gpio_ports[2];
+ int ret;
+
+ /* Read the input port registers */
+ gpio_reg = 0;
+ ret = xgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+ netdev_err(pdata->netdev, "I2C error reading SFP GPIOs\n");
+ return;
+ }
+
+ gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
+ /* No GPIO, just assume the module is present for now */
+ phy_data->sfp_mod_absent = 0;
+ } else {
+ if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
+ phy_data->sfp_mod_absent = 0;
+ }
+
+ if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
+ phy_data->sfp_rx_los = 1;
+
+ if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
+ phy_data->sfp_tx_fault = 1;
+}
+
+static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_free_phy_device(pdata);
+
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_phy_avail = 0;
+ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
+}
+
+static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data)
+{
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_diags = 0;
+ phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN;
+}
+
+static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Reset the SFP signals and info */
+ xgbe_phy_sfp_reset(phy_data);
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ /* Read the SFP signals and check for module presence */
+ xgbe_phy_sfp_signals(pdata);
+ if (phy_data->sfp_mod_absent) {
+ xgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ ret = xgbe_phy_sfp_read_eeprom(pdata);
+ if (ret) {
+ /* Treat any error as if there isn't an SFP plugged in */
+ xgbe_phy_sfp_reset(phy_data);
+ xgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ xgbe_phy_sfp_parse_eeprom(pdata);
+
+ xgbe_phy_sfp_external_phy(pdata);
+
+put:
+ xgbe_phy_sfp_phy_settings(pdata);
+
+ xgbe_phy_put_comm_ownership(pdata);
+}
+
+static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 fc;
+
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (!phy_data->phydev)
+ return;
+
+ if (phy_data->phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ if (phy_data->phydev->pause) {
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ rmt_adv |= LPA_PAUSE_CAP;
+ }
+ if (phy_data->phydev->asym_pause) {
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+ rmt_adv |= LPA_PAUSE_ASYM;
+ }
+
+ fc = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (fc & FLOW_CTRL_TX)
+ pdata->phy.tx_pause = 1;
+ if (fc & FLOW_CTRL_RX)
+ pdata->phy.rx_pause = 1;
+}
+
+static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_TP;
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ xgbe_phy_phydev_flowctrl(pdata);
+
+ switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_100:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ pdata->phy.lp_advertising |= ADVERTISED_100baseT_Full;
+ mode = XGBE_MODE_SGMII_100;
+ } else {
+ /* Half-duplex not supported */
+ pdata->phy.lp_advertising |= ADVERTISED_100baseT_Half;
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+ case XGBE_SGMII_AN_LINK_SPEED_1000:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full;
+ mode = XGBE_MODE_SGMII_1000;
+ } else {
+ /* Half-duplex not supported */
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half;
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+ default:
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_FIBRE;
+
+ /* Compare Advertisement and Link Partner register */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY);
+ if (lp_reg & 0x100)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x100) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x80) {
+ if (ad_reg & 0x100)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x100)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ if (lp_reg & 0x40)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full;
+
+ /* Half duplex is not supported */
+ ad_reg &= lp_reg;
+ mode = (ad_reg & 0x20) ? XGBE_MODE_X : XGBE_MODE_UNKNOWN;
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ xgbe_phy_phydev_flowctrl(pdata);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ mode = XGBE_MODE_KR;
+ break;
+ default:
+ mode = XGBE_MODE_SFI;
+ break;
+ }
+ } else if (ad_reg & 0x20) {
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ mode = XGBE_MODE_KX_1000;
+ break;
+ case XGBE_PORT_MODE_1000BASE_X:
+ mode = XGBE_MODE_X;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
+ mode = XGBE_MODE_SGMII_100;
+ else
+ mode = XGBE_MODE_SGMII_1000;
+ break;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ default:
+ mode = XGBE_MODE_X;
+ break;
+ }
+ break;
+ default:
+ if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
+ mode = XGBE_MODE_SGMII_100;
+ else
+ mode = XGBE_MODE_SGMII_1000;
+ break;
+ }
+ } else {
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+ mode = XGBE_MODE_KR;
+ else if (ad_reg & 0x20)
+ mode = XGBE_MODE_KX_1000;
+ else
+ mode = XGBE_MODE_UNKNOWN;
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ return xgbe_phy_an73_outcome(pdata);
+ case XGBE_AN_MODE_CL73_REDRV:
+ return xgbe_phy_an73_redrv_outcome(pdata);
+ case XGBE_AN_MODE_CL37:
+ return xgbe_phy_an37_outcome(pdata);
+ case XGBE_AN_MODE_CL37_SGMII:
+ return xgbe_phy_an37_sgmii_outcome(pdata);
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int advertising;
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+ return pdata->phy.advertising;
+
+ /* With the KR re-driver we need to advertise a single speed */
+ advertising = pdata->phy.advertising;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
+ advertising &= ~ADVERTISED_10000baseKR_Full;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_NBASE_T:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case XGBE_PORT_MODE_10GBASE_T:
+ if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_10000))
+ advertising |= ADVERTISED_10000baseKR_Full;
+ else
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case XGBE_PORT_MODE_10GBASE_R:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+
+ return advertising;
+}
+
+static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_find_phy_device(pdata);
+ if (ret)
+ return ret;
+
+ if (!phy_data->phydev)
+ return 0;
+
+ phy_data->phydev->autoneg = pdata->phy.autoneg;
+ phy_data->phydev->advertising = phy_data->phydev->supported &
+ pdata->phy.advertising;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ phy_data->phydev->speed = pdata->phy.speed;
+ phy_data->phydev->duplex = pdata->phy.duplex;
+ }
+
+ ret = phy_start_aneg(phy_data->phydev);
+
+ return ret;
+}
+
+static enum xgbe_an_mode xgbe_phy_an_sfp_mode(struct xgbe_phy_data *phy_data)
+{
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ return XGBE_AN_MODE_CL37_SGMII;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ return XGBE_AN_MODE_CL37;
+ default:
+ return XGBE_AN_MODE_NONE;
+ }
+}
+
+static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* A KR re-driver will always require CL73 AN */
+ if (phy_data->redrv)
+ return XGBE_AN_MODE_CL73_REDRV;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return XGBE_AN_MODE_CL73;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return XGBE_AN_MODE_NONE;
+ case XGBE_PORT_MODE_1000BASE_T:
+ return XGBE_AN_MODE_CL37_SGMII;
+ case XGBE_PORT_MODE_1000BASE_X:
+ return XGBE_AN_MODE_CL37;
+ case XGBE_PORT_MODE_NBASE_T:
+ return XGBE_AN_MODE_CL37_SGMII;
+ case XGBE_PORT_MODE_10GBASE_T:
+ return XGBE_AN_MODE_CL73;
+ case XGBE_PORT_MODE_10GBASE_R:
+ return XGBE_AN_MODE_NONE;
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_an_sfp_mode(phy_data);
+ default:
+ return XGBE_AN_MODE_NONE;
+ }
+}
+
+static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
+ enum xgbe_phy_redrv_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u16 redrv_reg, redrv_val;
+
+ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+ redrv_val = (u16)mode;
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
+}
+
+static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
+ enum xgbe_phy_redrv_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int redrv_reg;
+ int ret;
+
+ /* Calculate the register to write */
+ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+
+ ret = xgbe_phy_redrv_write(pdata, redrv_reg, mode);
+
+ return ret;
+}
+
+static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_phy_redrv_mode mode;
+ int ret;
+
+ if (!phy_data->redrv)
+ return;
+
+ mode = XGBE_PHY_REDRV_MODE_CX;
+ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
+ (phy_data->sfp_base != XGBE_SFP_BASE_1000_CX) &&
+ (phy_data->sfp_base != XGBE_SFP_BASE_10000_CR))
+ mode = XGBE_PHY_REDRV_MODE_SR;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ if (phy_data->redrv_if)
+ xgbe_phy_set_redrv_mode_i2c(pdata, mode);
+ else
+ xgbe_phy_set_redrv_mode_mdio(pdata, mode);
+
+ xgbe_phy_put_comm_ownership(pdata);
+}
+
+static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
+{
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+
+ /* Log if a previous command did not complete */
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox not ready for command\n");
+}
+
+static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait;
+
+ /* Wait for command to complete */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+
+ usleep_range(1000, 2000);
+ }
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox command did not complete\n");
+}
+
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
+{
+ unsigned int s0;
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* Receiver Reset Cycle */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
+}
+
+static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+ netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
+}
+
+static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* 10G/SFI */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+ } else {
+ if (phy_data->sfp_cable_len <= 1)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+ else
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+ }
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_SFI;
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE SFI mode set\n");
+}
+
+static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* 1G/X */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_X;
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE X mode set\n");
+}
+
+static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* 1G/SGMII */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_1000;
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE SGMII mode set\n");
+}
+
+static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* 1G/SGMII */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_100;
+
+ netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n");
+}
+
+static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* 10G/KR */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_KR;
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* 2.5G/KX */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 2);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_KX_2500;
+
+ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ xgbe_phy_start_ratechange(pdata);
+
+ /* 1G/KX */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ phy_data->cur_mode = XGBE_MODE_KX_1000;
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ return phy_data->cur_mode;
+}
+
+static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* No switching if not 10GBase-T */
+ if (phy_data->port_mode != XGBE_PORT_MODE_10GBASE_T)
+ return xgbe_phy_cur_mode(pdata);
+
+ switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_SGMII_100:
+ case XGBE_MODE_SGMII_1000:
+ return XGBE_MODE_KR;
+ case XGBE_MODE_KR:
+ default:
+ return XGBE_MODE_SGMII_1000;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_switch_bp_2500_mode(struct xgbe_prv_data *pdata)
+{
+ return XGBE_MODE_KX_2500;
+}
+
+static enum xgbe_mode xgbe_phy_switch_bp_mode(struct xgbe_prv_data *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_KX_1000:
+ return XGBE_MODE_KR;
+ case XGBE_MODE_KR:
+ default:
+ return XGBE_MODE_KX_1000;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return xgbe_phy_switch_bp_mode(pdata);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_switch_bp_2500_mode(pdata);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_switch_baset_mode(pdata);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ case XGBE_PORT_MODE_SFP:
+ /* No switching, so just return current mode */
+ return xgbe_phy_cur_mode(pdata);
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_basex_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return XGBE_MODE_X;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return XGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ return XGBE_MODE_SGMII_1000;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return XGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
+ return XGBE_MODE_SGMII_1000;
+ else
+ return XGBE_MODE_X;
+ case SPEED_10000:
+ case SPEED_UNKNOWN:
+ return XGBE_MODE_SFI;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return XGBE_MODE_KX_2500;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return XGBE_MODE_KX_1000;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
+ int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return xgbe_phy_get_bp_mode(speed);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_get_bp_2500_mode(speed);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_get_baset_mode(phy_data, speed);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return xgbe_phy_get_basex_mode(phy_data, speed);
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_get_sfp_mode(phy_data, speed);
+ default:
+ return XGBE_MODE_UNKNOWN;
+ }
+}
+
+static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_phy_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_phy_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_phy_kr_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_100:
+ xgbe_phy_sgmii_100_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_1000:
+ xgbe_phy_sgmii_1000_mode(pdata);
+ break;
+ case XGBE_MODE_X:
+ xgbe_phy_x_mode(pdata);
+ break;
+ case XGBE_MODE_SFI:
+ xgbe_phy_sfi_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode, u32 advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & advert)
+ return true;
+ } else {
+ enum xgbe_mode cur_mode;
+
+ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_X:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_SGMII_100:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case XGBE_MODE_SGMII_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+ case XGBE_MODE_X:
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case XGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case XGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case XGBE_MODE_SFI:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseKX_Full);
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseKR_Full);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return xgbe_phy_use_bp_mode(pdata, mode);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_use_bp_2500_mode(pdata, mode);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_use_baset_mode(pdata, mode);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return xgbe_phy_use_basex_mode(pdata, mode);
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_use_sfp_mode(pdata, mode);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X);
+ case SPEED_10000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ case SPEED_1000:
+ return true;
+ case SPEED_10000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
+ case SPEED_1000:
+ return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000) ||
+ (phy_data->sfp_speed == XGBE_SFP_SPEED_1000));
+ case SPEED_10000:
+ return (phy_data->sfp_speed == XGBE_SFP_SPEED_10000);
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ case SPEED_10000:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return xgbe_phy_valid_speed_bp_mode(speed);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return xgbe_phy_valid_speed_bp_2500_mode(speed);
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return xgbe_phy_valid_speed_baset_mode(phy_data, speed);
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return xgbe_phy_valid_speed_basex_mode(phy_data, speed);
+ case XGBE_PORT_MODE_SFP:
+ return xgbe_phy_valid_speed_sfp_mode(phy_data, speed);
+ default:
+ return false;
+ }
+}
+
+static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+ int ret;
+
+ *an_restart = 0;
+
+ if (phy_data->port_mode == XGBE_PORT_MODE_SFP) {
+ /* Check SFP signals */
+ xgbe_phy_sfp_detect(pdata);
+
+ if (phy_data->sfp_changed) {
+ *an_restart = 1;
+ return 0;
+ }
+
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ return 0;
+ }
+
+ if (phy_data->phydev) {
+ /* Check external PHY */
+ ret = phy_read_status(phy_data->phydev);
+ if (ret < 0)
+ return 0;
+
+ if ((pdata->phy.autoneg == AUTONEG_ENABLE) &&
+ !phy_aneg_done(phy_data->phydev))
+ return 0;
+
+ if (!phy_data->phydev->link)
+ return 0;
+ }
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++) {
+ phy_data->rrc_count = 0;
+ xgbe_phy_rrc(pdata);
+ }
+
+ return 0;
+}
+
+static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+
+ phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
+
+ phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
+
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RX_LOS);
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_TX_FAULT);
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_MOD_ABS);
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RATE_SELECT);
+
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "SFP: gpio_address=%#x\n",
+ phy_data->sfp_gpio_address);
+ dev_dbg(pdata->dev, "SFP: gpio_mask=%#x\n",
+ phy_data->sfp_gpio_mask);
+ dev_dbg(pdata->dev, "SFP: gpio_rx_los=%u\n",
+ phy_data->sfp_gpio_rx_los);
+ dev_dbg(pdata->dev, "SFP: gpio_tx_fault=%u\n",
+ phy_data->sfp_gpio_tx_fault);
+ dev_dbg(pdata->dev, "SFP: gpio_mod_absent=%u\n",
+ phy_data->sfp_gpio_mod_absent);
+ dev_dbg(pdata->dev, "SFP: gpio_rate_select=%u\n",
+ phy_data->sfp_gpio_rate_select);
+ }
+}
+
+static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg, mux_addr_hi, mux_addr_lo;
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+
+ mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == XGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+ phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
+
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "SFP: mux_address=%#x\n",
+ phy_data->sfp_mux_address);
+ dev_dbg(pdata->dev, "SFP: mux_channel=%u\n",
+ phy_data->sfp_mux_channel);
+ }
+}
+
+static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
+{
+ xgbe_phy_sfp_comm_setup(pdata);
+ xgbe_phy_sfp_gpio_setup(pdata);
+}
+
+static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ret;
+
+ ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
+ if (ret)
+ return ret;
+
+ ret = pdata->hw_if.clr_gpio(pdata, phy_data->mdio_reset_gpio);
+
+ return ret;
+}
+
+static int xgbe_phy_i2c_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u8 gpio_reg, gpio_ports[2], gpio_data[3];
+ int ret;
+
+ /* Read the output port registers */
+ gpio_reg = 2;
+ ret = xgbe_phy_i2c_read(pdata, phy_data->mdio_reset_addr,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret)
+ return ret;
+
+ /* Prepare to write the GPIO data */
+ gpio_data[0] = 2;
+ gpio_data[1] = gpio_ports[0];
+ gpio_data[2] = gpio_ports[1];
+
+ /* Set the GPIO pin */
+ if (phy_data->mdio_reset_gpio < 8)
+ gpio_data[1] |= (1 << (phy_data->mdio_reset_gpio % 8));
+ else
+ gpio_data[2] |= (1 << (phy_data->mdio_reset_gpio % 8));
+
+ /* Write the output port registers */
+ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
+ gpio_data, sizeof(gpio_data));
+ if (ret)
+ return ret;
+
+ /* Clear the GPIO pin */
+ if (phy_data->mdio_reset_gpio < 8)
+ gpio_data[1] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
+ else
+ gpio_data[2] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
+
+ /* Write the output port registers */
+ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
+ gpio_data, sizeof(gpio_data));
+
+ return ret;
+}
+
+static int xgbe_phy_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
+ return 0;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO)
+ ret = xgbe_phy_i2c_mdio_reset(pdata);
+ else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO)
+ ret = xgbe_phy_int_mdio_reset(pdata);
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data)
+{
+ if (!phy_data->redrv)
+ return false;
+
+ if (phy_data->redrv_if >= XGBE_PHY_REDRV_IF_MAX)
+ return true;
+
+ switch (phy_data->redrv_model) {
+ case XGBE_PHY_REDRV_MODEL_4223:
+ if (phy_data->redrv_lane > 3)
+ return true;
+ break;
+ case XGBE_PHY_REDRV_MODEL_4227:
+ if (phy_data->redrv_lane > 1)
+ return true;
+ break;
+ default:
+ return true;
+ }
+
+ return false;
+}
+
+static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
+ return 0;
+
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+ phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case XGBE_MDIO_RESET_NONE:
+ case XGBE_MDIO_RESET_I2C_GPIO:
+ case XGBE_MDIO_RESET_INT_GPIO:
+ break;
+ default:
+ dev_err(pdata->dev, "unsupported MDIO reset (%#x)\n",
+ phy_data->mdio_reset);
+ return -EINVAL;
+ }
+
+ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_ADDR);
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) {
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+ }
+
+ return 0;
+}
+
+static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)
+ return false;
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
+ return false;
+ break;
+ case XGBE_PORT_MODE_1000BASE_X:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ return false;
+ break;
+ case XGBE_PORT_MODE_NBASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
+ return false;
+ break;
+ case XGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case XGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ return false;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE)
+ return false;
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ case XGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_MDIO)
+ return false;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
+ return false;
+ if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
+ return false;
+
+ return true;
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* If we have an external PHY, free it */
+ xgbe_phy_free_phy_device(pdata);
+
+ /* Reset SFP data */
+ xgbe_phy_sfp_reset(phy_data);
+ xgbe_phy_sfp_mod_absent(pdata);
+
+ /* Power off the PHY */
+ xgbe_phy_power_off(pdata);
+
+ /* Stop the I2C controller */
+ pdata->i2c_if.i2c_stop(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Start the I2C controller */
+ ret = pdata->i2c_if.i2c_start(pdata);
+ if (ret)
+ return ret;
+
+ /* Start in highest supported mode */
+ xgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_SFP:
+ xgbe_phy_sfp_detect(pdata);
+ break;
+ default:
+ break;
+ }
+
+ /* If we have an external PHY, start it */
+ ret = xgbe_phy_find_phy_device(pdata);
+ if (ret)
+ goto err_i2c;
+
+ return 0;
+
+err_i2c:
+ pdata->i2c_if.i2c_stop(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode cur_mode;
+ int ret;
+
+ /* Reset by power cycling the PHY */
+ cur_mode = phy_data->cur_mode;
+ xgbe_phy_power_off(pdata);
+ xgbe_phy_set_mode(pdata, cur_mode);
+
+ if (!phy_data->phydev)
+ return 0;
+
+ /* Reset the external PHY */
+ ret = xgbe_phy_mdio_reset(pdata);
+ if (ret)
+ return ret;
+
+ return phy_init_hw(phy_data->phydev);
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Unregister for driving external PHYs */
+ mdiobus_unregister(phy_data->mii);
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data;
+ struct mii_bus *mii;
+ unsigned int reg;
+ int ret;
+
+ /* Check if enabled */
+ if (!xgbe_phy_port_enabled(pdata)) {
+ dev_info(pdata->dev, "device is not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the I2C controller */
+ ret = pdata->i2c_if.i2c_init(pdata);
+ if (ret)
+ return ret;
+
+ phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ pdata->phy_data = phy_data;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode);
+ dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id);
+ dev_dbg(pdata->dev, "port speeds=%#x\n", phy_data->port_speeds);
+ dev_dbg(pdata->dev, "conn type=%u\n", phy_data->conn_type);
+ dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr);
+ }
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+ phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
+ if (phy_data->redrv && netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "redrv present\n");
+ dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if);
+ dev_dbg(pdata->dev, "redrv addr=%#x\n", phy_data->redrv_addr);
+ dev_dbg(pdata->dev, "redrv lane=%u\n", phy_data->redrv_lane);
+ dev_dbg(pdata->dev, "redrv model=%u\n", phy_data->redrv_model);
+ }
+
+ /* Validate the connection requested */
+ if (xgbe_phy_conn_type_mismatch(pdata)) {
+ dev_err(pdata->dev, "phy mode/connection mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->conn_type);
+ return -EINVAL;
+ }
+
+ /* Validate the mode requested */
+ if (xgbe_phy_port_mode_mismatch(pdata)) {
+ dev_err(pdata->dev, "phy mode/speed mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->port_speeds);
+ return -EINVAL;
+ }
+
+ /* Check for and validate MDIO reset support */
+ ret = xgbe_phy_mdio_reset_setup(pdata);
+ if (ret)
+ return ret;
+
+ /* Validate the re-driver information */
+ if (xgbe_phy_redrv_error(phy_data)) {
+ dev_err(pdata->dev, "phy re-driver settings error\n");
+ return -EINVAL;
+ }
+ pdata->kr_redrv = phy_data->redrv;
+
+ /* Indicate current mode is unknown */
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features */
+ pdata->phy.supported = 0;
+
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case XGBE_PORT_MODE_BACKPLANE:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ phy_data->start_mode = XGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+
+ /* MDIO 1GBase-T support */
+ case XGBE_PORT_MODE_1000BASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO Base-X support */
+ case XGBE_PORT_MODE_1000BASE_X:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_X;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO NBase-T support */
+ case XGBE_PORT_MODE_NBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-T support */
+ case XGBE_PORT_MODE_10GBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+
+ /* 10GBase-R support */
+ case XGBE_PORT_MODE_10GBASE_R:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = XGBE_MODE_SFI;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+
+ /* SFP support */
+ case XGBE_PORT_MODE_SFP:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SFI;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+
+ xgbe_phy_sfp_setup(pdata);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (netif_msg_probe(pdata))
+ dev_dbg(pdata->dev, "phy supported=%#x\n",
+ pdata->phy.supported);
+
+ if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ dev_err(pdata->dev,
+ "mdio port/clause not compatible (%d/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ XGBE_MDIO_MODE_CL22);
+ if (ret) {
+ dev_err(pdata->dev,
+ "redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return -EINVAL;
+ }
+ }
+
+ /* Register for driving external PHYs */
+ mii = devm_mdiobus_alloc(pdata->dev);
+ if (!mii) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mii->priv = pdata;
+ mii->name = "amd-xgbe-mii";
+ mii->read = xgbe_phy_mii_read;
+ mii->write = xgbe_phy_mii_write;
+ mii->parent = pdata->dev;
+ mii->phy_mask = ~0;
+ snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev));
+ ret = mdiobus_register(mii);
+ if (ret) {
+ dev_err(pdata->dev, "mdiobus_register failed\n");
+ return ret;
+ }
+ phy_data->mii = mii;
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
+{
+ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = xgbe_phy_init;
+ phy_impl->exit = xgbe_phy_exit;
+
+ phy_impl->reset = xgbe_phy_reset;
+ phy_impl->start = xgbe_phy_start;
+ phy_impl->stop = xgbe_phy_stop;
+
+ phy_impl->link_status = xgbe_phy_link_status;
+
+ phy_impl->valid_speed = xgbe_phy_valid_speed;
+
+ phy_impl->use_mode = xgbe_phy_use_mode;
+ phy_impl->set_mode = xgbe_phy_set_mode;
+ phy_impl->get_mode = xgbe_phy_get_mode;
+ phy_impl->switch_mode = xgbe_phy_switch_mode;
+ phy_impl->cur_mode = xgbe_phy_cur_mode;
+
+ phy_impl->an_mode = xgbe_phy_an_mode;
+
+ phy_impl->an_config = xgbe_phy_an_config;
+
+ phy_impl->an_advertising = xgbe_phy_an_advertising;
+
+ phy_impl->an_outcome = xgbe_phy_an_outcome;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
new file mode 100644
index 000000000000..84d4c51cab8c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -0,0 +1,642 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
+#include <linux/mdio.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_acpi_match[];
+
+static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(xgbe_acpi_match, pdata->dev);
+
+ return id ? (struct xgbe_version_data *)id->driver_data : NULL;
+}
+
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ u32 property;
+ int ret;
+
+ /* Obtain the system clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_DMA_FREQ);
+ return ret;
+ }
+ pdata->sysclk_rate = property;
+
+ /* Obtain the PTP clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_PTP_FREQ);
+ return ret;
+ }
+ pdata->ptpclk_rate = property;
+
+ return 0;
+}
+#else /* CONFIG_ACPI */
+static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static const struct of_device_id xgbe_of_match[];
+
+static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
+{
+ const struct of_device_id *id;
+
+ id = of_match_device(xgbe_of_match, pdata->dev);
+
+ return id ? (struct xgbe_version_data *)id->data : NULL;
+}
+
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+
+ /* Obtain the system clock setting */
+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+ if (IS_ERR(pdata->sysclk)) {
+ dev_err(dev, "dma devm_clk_get failed\n");
+ return PTR_ERR(pdata->sysclk);
+ }
+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+
+ /* Obtain the PTP clock setting */
+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+ if (IS_ERR(pdata->ptpclk)) {
+ dev_err(dev, "ptp devm_clk_get failed\n");
+ return PTR_ERR(pdata->ptpclk);
+ }
+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+
+ return 0;
+}
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct device_node *phy_node;
+ struct platform_device *phy_pdev;
+
+ phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (phy_node) {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_pdev = of_find_device_by_node(phy_node);
+ of_node_put(phy_node);
+ } else {
+ /* New style device tree:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ get_device(dev);
+ phy_pdev = pdata->platdev;
+ }
+
+ return phy_pdev;
+}
+#else /* CONFIG_OF */
+static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static unsigned int xgbe_resource_count(struct platform_device *pdev,
+ unsigned int type)
+{
+ unsigned int count;
+ int i;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (type == resource_type(res))
+ count++;
+ }
+
+ return count;
+}
+
+static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct platform_device *phy_pdev;
+
+ if (pdata->use_acpi) {
+ get_device(pdata->dev);
+ phy_pdev = pdata->platdev;
+ } else {
+ phy_pdev = xgbe_of_get_phy_pdev(pdata);
+ }
+
+ return phy_pdev;
+}
+
+static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
+{
+ return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
+ : xgbe_of_vdata(pdata);
+}
+
+static int xgbe_platform_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct platform_device *phy_pdev;
+ struct resource *res;
+ const char *phy_mode;
+ unsigned int phy_memnum, phy_irqnum;
+ unsigned int dma_irqnum, dma_irqend;
+ enum dev_dma_attr attr;
+ int ret;
+
+ pdata = xgbe_alloc_pdata(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err_alloc;
+ }
+
+ pdata->platdev = pdev;
+ pdata->adev = ACPI_COMPANION(dev);
+ platform_set_drvdata(pdev, pdata);
+
+ /* Check if we should use ACPI or DT */
+ pdata->use_acpi = dev->of_node ? 0 : 1;
+
+ /* Get the version data */
+ pdata->vdata = xgbe_get_vdata(pdata);
+
+ phy_pdev = xgbe_get_phy_pdev(pdata);
+ if (!phy_pdev) {
+ dev_err(dev, "unable to obtain phy device\n");
+ ret = -EINVAL;
+ goto err_phydev;
+ }
+ pdata->phy_platdev = phy_pdev;
+ pdata->phy_dev = &phy_pdev->dev;
+
+ if (pdev == phy_pdev) {
+ /* New style device tree or ACPI:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+ phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ dma_irqnum = 1;
+ dma_irqend = phy_irqnum;
+ } else {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_memnum = 0;
+ phy_irqnum = 0;
+ dma_irqnum = 1;
+ dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
+ }
+
+ /* Obtain the mmio areas for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(pdata->rxtx_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir0_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir1_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
+
+ /* Retrieve the MAC address */
+ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+ pdata->mac_addr,
+ sizeof(pdata->mac_addr));
+ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+ &phy_mode);
+ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+
+ /* Check for per channel interrupt support */
+ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
+ pdata->per_channel_irq = 1;
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE;
+ }
+
+ /* Obtain device settings unique to ACPI/OF */
+ if (pdata->use_acpi)
+ ret = xgbe_acpi_support(pdata);
+ else
+ ret = xgbe_of_support(pdata);
+ if (ret)
+ goto err_io;
+
+ /* Set the DMA coherency values */
+ attr = device_get_dma_attr(dev);
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ dev_err(dev, "DMA is not supported");
+ ret = -ENODEV;
+ goto err_io;
+ }
+ pdata->coherent = (attr == DEV_DMA_COHERENT);
+ if (pdata->coherent) {
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ } else {
+ pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+ }
+
+ /* Set the maximum fifo amounts */
+ pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
+ pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
+
+ /* Set the hardware channel and queue counts */
+ xgbe_set_counts(pdata);
+
+ /* Always have XGMAC and XPCS (auto-negotiation) interrupts */
+ pdata->irq_count = 2;
+
+ /* Get the device interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq 0 failed\n");
+ goto err_io;
+ }
+ pdata->dev_irq = ret;
+
+ /* Get the per channel DMA interrupts */
+ if (pdata->per_channel_irq) {
+ unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
+
+ for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
+ ret = platform_get_irq(pdata->platdev, dma_irqnum++);
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "platform_get_irq %u failed\n",
+ dma_irqnum - 1);
+ goto err_io;
+ }
+
+ pdata->channel_irq[i] = ret;
+ }
+
+ pdata->channel_irq_count = max;
+
+ pdata->irq_count += max;
+ }
+
+ /* Get the auto-negotiation interrupt */
+ ret = platform_get_irq(phy_pdev, phy_irqnum++);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq phy 0 failed\n");
+ goto err_io;
+ }
+ pdata->an_irq = ret;
+
+ /* Configure the netdev resource */
+ ret = xgbe_config_netdev(pdata);
+ if (ret)
+ goto err_io;
+
+ netdev_notice(pdata->netdev, "net device enabled\n");
+
+ return 0;
+
+err_io:
+ platform_device_put(phy_pdev);
+
+err_phydev:
+ xgbe_free_pdata(pdata);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static int xgbe_platform_remove(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
+
+ xgbe_deconfig_netdev(pdata);
+
+ platform_device_put(pdata->phy_platdev);
+
+ xgbe_free_pdata(pdata);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int xgbe_platform_suspend(struct device *dev)
+{
+ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_suspend\n");
+
+ if (netif_running(netdev))
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ DBGPR("<--xgbe_suspend\n");
+
+ return ret;
+}
+
+static int xgbe_platform_resume(struct device *dev)
+{
+ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
+ struct net_device *netdev = pdata->netdev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_resume\n");
+
+ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+
+ if (netif_running(netdev)) {
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ /* Schedule a restart in case the link or phy state changed
+ * while we were powered down.
+ */
+ schedule_work(&pdata->restart_work);
+ }
+
+ DBGPR("<--xgbe_resume\n");
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct xgbe_version_data xgbe_v1 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
+ .xpcs_access = XGBE_XPCS_ACCESS_V1,
+ .tx_max_fifo_size = 81920,
+ .rx_max_fifo_size = 81920,
+ .tx_tstamp_workaround = 1,
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_acpi_match[] = {
+ { .id = "AMDI8001",
+ .driver_data = (kernel_ulong_t)&xgbe_v1 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id xgbe_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v1a",
+ .data = &xgbe_v1 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgbe_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
+ xgbe_platform_suspend, xgbe_platform_resume);
+
+static struct platform_driver xgbe_driver = {
+ .driver = {
+ .name = XGBE_DRV_NAME,
+#ifdef CONFIG_ACPI
+ .acpi_match_table = xgbe_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = xgbe_of_match,
+#endif
+ .pm = &xgbe_platform_pm_ops,
+ },
+ .probe = xgbe_platform_probe,
+ .remove = xgbe_platform_remove,
+};
+
+int xgbe_platform_init(void)
+{
+ return platform_driver_register(&xgbe_driver);
+}
+
+void xgbe_platform_exit(void)
+{
+ platform_driver_unregister(&xgbe_driver);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 5dd17dcea2f8..f52a9bd05bac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -127,9 +127,10 @@
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <net/dcbnl.h>
+#include <linux/completion.h>
#define XGBE_DRV_NAME "amd-xgbe"
-#define XGBE_DRV_VERSION "1.0.2"
+#define XGBE_DRV_VERSION "1.0.3"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
@@ -158,7 +159,8 @@
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
-#define XGBE_DMA_STOP_TIMEOUT 5
+#define XGBE_PRIORITY_QUEUES 8
+#define XGBE_DMA_STOP_TIMEOUT 1
/* DMA cache settings - Outer sharable, write-back, write-allocate */
#define XGBE_DMA_OS_AXDOMAIN 0x2
@@ -170,6 +172,10 @@
#define XGBE_DMA_SYS_ARCACHE 0x0
#define XGBE_DMA_SYS_AWCACHE 0x0
+/* DMA channel interrupt modes */
+#define XGBE_IRQ_MODE_EDGE 0
+#define XGBE_IRQ_MODE_LEVEL 1
+
#define XGBE_DMA_INTERRUPT_MASK 0x31c7
#define XGMAC_MIN_PACKET 60
@@ -177,18 +183,19 @@
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
+
+#define XGMAC_PFC_DATA_LEN 46
+#define XGMAC_PFC_DELAYS 14000
+
+#define XGMAC_PRIO_QUEUES(_cnt) \
+ min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt))
/* Common property names */
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
#define XGBE_PHY_MODE_PROPERTY "phy-mode"
#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
-#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
-#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
-#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
-#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
-#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
-#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
@@ -198,6 +205,20 @@
#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
+/* PCI BAR mapping */
+#define XGBE_XGMAC_BAR 0
+#define XGBE_XPCS_BAR 1
+#define XGBE_MAC_PROP_OFFSET 0x1d000
+#define XGBE_I2C_CTRL_OFFSET 0x1e000
+
+/* PCI MSIx support */
+#define XGBE_MSIX_BASE_COUNT 4
+#define XGBE_MSIX_MIN_COUNT (XGBE_MSIX_BASE_COUNT + 1)
+
+/* PCI clock frequencies */
+#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
+#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
+
/* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec
*/
@@ -208,7 +229,12 @@
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
-#define XGBE_FIFO_MAX 81920
+#define XGMAC_FIFO_MIN_ALLOC 2048
+#define XGMAC_FIFO_UNIT 256
+#define XGMAC_FIFO_ALIGN(_x) \
+ (((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
+#define XGMAC_FIFO_FC_OFF 2048
+#define XGMAC_FIFO_FC_MIN 4096
#define XGBE_TC_MIN_QUANTUM 10
@@ -233,6 +259,14 @@
/* Flow control queue count */
#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+/* Flow control threshold units */
+#define XGMAC_FLOW_CONTROL_UNIT 512
+#define XGMAC_FLOW_CONTROL_ALIGN(_x) \
+ (((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1))
+#define XGMAC_FLOW_CONTROL_VALUE(_x) \
+ (((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2)
+#define XGMAC_FLOW_CONTROL_MAX 33280
+
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8
@@ -244,46 +278,19 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
-#define XGBE_LINK_TIMEOUT 10
-
-#define XGBE_AN_INT_CMPLT 0x01
-#define XGBE_AN_INC_LINK 0x02
-#define XGBE_AN_PG_RCV 0x04
-#define XGBE_AN_INT_MASK 0x07
-
-/* Rate-change complete wait/retry count */
-#define XGBE_RATECHANGE_COUNT 500
-
-/* Default SerDes settings */
-#define XGBE_SPEED_10000_BLWC 0
-#define XGBE_SPEED_10000_CDR 0x7
-#define XGBE_SPEED_10000_PLL 0x1
-#define XGBE_SPEED_10000_PQ 0x12
-#define XGBE_SPEED_10000_RATE 0x0
-#define XGBE_SPEED_10000_TXAMP 0xa
-#define XGBE_SPEED_10000_WORD 0x7
-#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
-#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
-
-#define XGBE_SPEED_2500_BLWC 1
-#define XGBE_SPEED_2500_CDR 0x2
-#define XGBE_SPEED_2500_PLL 0x0
-#define XGBE_SPEED_2500_PQ 0xa
-#define XGBE_SPEED_2500_RATE 0x1
-#define XGBE_SPEED_2500_TXAMP 0xf
-#define XGBE_SPEED_2500_WORD 0x1
-#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
-#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
-
-#define XGBE_SPEED_1000_BLWC 1
-#define XGBE_SPEED_1000_CDR 0x2
-#define XGBE_SPEED_1000_PLL 0x0
-#define XGBE_SPEED_1000_PQ 0xa
-#define XGBE_SPEED_1000_RATE 0x3
-#define XGBE_SPEED_1000_TXAMP 0xf
-#define XGBE_SPEED_1000_WORD 0x1
-#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
-#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+#define XGBE_LINK_TIMEOUT 5
+
+#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
+/* ECC correctable error notification window (seconds) */
+#define XGBE_ECC_LIMIT 60
+
+/* MDIO port types */
+#define XGMAC_MAX_C22_PORT 3
struct xgbe_prv_data;
@@ -461,6 +468,7 @@ enum xgbe_state {
XGBE_DOWN,
XGBE_LINK_INIT,
XGBE_LINK_ERR,
+ XGBE_STOPPED,
};
enum xgbe_int {
@@ -480,6 +488,12 @@ enum xgbe_int_state {
XGMAC_INT_STATE_RESTORE,
};
+enum xgbe_ecc_sec {
+ XGBE_ECC_SEC_TX,
+ XGBE_ECC_SEC_RX,
+ XGBE_ECC_SEC_DESC,
+};
+
enum xgbe_speed {
XGBE_SPEED_1000 = 0,
XGBE_SPEED_2500,
@@ -487,6 +501,19 @@ enum xgbe_speed {
XGBE_SPEEDS,
};
+enum xgbe_xpcs_access {
+ XGBE_XPCS_ACCESS_V1 = 0,
+ XGBE_XPCS_ACCESS_V2,
+};
+
+enum xgbe_an_mode {
+ XGBE_AN_MODE_CL73 = 0,
+ XGBE_AN_MODE_CL73_REDRV,
+ XGBE_AN_MODE_CL37,
+ XGBE_AN_MODE_CL37_SGMII,
+ XGBE_AN_MODE_NONE,
+};
+
enum xgbe_an {
XGBE_AN_READY = 0,
XGBE_AN_PAGE_RECEIVED,
@@ -504,8 +531,14 @@ enum xgbe_rx {
};
enum xgbe_mode {
- XGBE_MODE_KR = 0,
- XGBE_MODE_KX,
+ XGBE_MODE_KX_1000 = 0,
+ XGBE_MODE_KX_2500,
+ XGBE_MODE_KR,
+ XGBE_MODE_X,
+ XGBE_MODE_SGMII_100,
+ XGBE_MODE_SGMII_1000,
+ XGBE_MODE_SFI,
+ XGBE_MODE_UNKNOWN,
};
enum xgbe_speedset {
@@ -513,6 +546,12 @@ enum xgbe_speedset {
XGBE_SPEEDSET_2500_10000,
};
+enum xgbe_mdio_mode {
+ XGBE_MDIO_MODE_NONE = 0,
+ XGBE_MDIO_MODE_CL22,
+ XGBE_MDIO_MODE_CL45,
+};
+
struct xgbe_phy {
u32 supported;
u32 advertising;
@@ -531,6 +570,43 @@ struct xgbe_phy {
int rx_pause;
};
+enum xgbe_i2c_cmd {
+ XGBE_I2C_CMD_READ = 0,
+ XGBE_I2C_CMD_WRITE,
+};
+
+struct xgbe_i2c_op {
+ enum xgbe_i2c_cmd cmd;
+
+ unsigned int target;
+
+ void *buf;
+ unsigned int len;
+};
+
+struct xgbe_i2c_op_state {
+ struct xgbe_i2c_op *op;
+
+ unsigned int tx_len;
+ unsigned char *tx_buf;
+
+ unsigned int rx_len;
+ unsigned char *rx_buf;
+
+ unsigned int tx_abort_source;
+
+ int ret;
+};
+
+struct xgbe_i2c {
+ unsigned int started;
+ unsigned int max_speed_mode;
+ unsigned int rx_fifo_size;
+ unsigned int tx_fifo_size;
+
+ struct xgbe_i2c_op_state op_state;
+};
+
struct xgbe_mmc_stats {
/* Tx Stats */
u64 txoctetcount_gb;
@@ -601,9 +677,15 @@ struct xgbe_hw_if {
int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
- int (*set_gmii_speed)(struct xgbe_prv_data *);
- int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
- int (*set_xgmii_speed)(struct xgbe_prv_data *);
+ int (*set_speed)(struct xgbe_prv_data *, int);
+
+ int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
+ enum xgbe_mdio_mode);
+ int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
+ int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16);
+
+ int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
+ int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
void (*enable_tx)(struct xgbe_prv_data *);
void (*disable_tx)(struct xgbe_prv_data *);
@@ -682,11 +764,65 @@ struct xgbe_hw_if {
int (*disable_rss)(struct xgbe_prv_data *);
int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
+
+ /* For ECC */
+ void (*disable_ecc_ded)(struct xgbe_prv_data *);
+ void (*disable_ecc_sec)(struct xgbe_prv_data *, enum xgbe_ecc_sec);
+};
+
+/* This structure represents implementation specific routines for an
+ * implementation of a PHY. All routines are required unless noted below.
+ * Optional routines:
+ * kr_training_pre, kr_training_post
+ */
+struct xgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+ int (*init)(struct xgbe_prv_data *);
+ void (*exit)(struct xgbe_prv_data *);
+
+ /* Perform start/stop specific actions */
+ int (*reset)(struct xgbe_prv_data *);
+ int (*start)(struct xgbe_prv_data *);
+ void (*stop)(struct xgbe_prv_data *);
+
+ /* Return the link status */
+ int (*link_status)(struct xgbe_prv_data *, int *);
+
+ /* Indicate if a particular speed is valid */
+ bool (*valid_speed)(struct xgbe_prv_data *, int);
+
+ /* Check if the specified mode can/should be used */
+ bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode);
+ /* Switch the PHY into various modes */
+ void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode);
+ /* Retrieve mode needed for a specific speed */
+ enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int);
+ /* Retrieve new/next mode when trying to auto-negotiate */
+ enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *);
+ /* Retrieve current mode */
+ enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *);
+
+ /* Retrieve current auto-negotiation mode */
+ enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *);
+
+ /* Configure auto-negotiation settings */
+ int (*an_config)(struct xgbe_prv_data *);
+
+ /* Set/override auto-negotiation advertisement settings */
+ unsigned int (*an_advertising)(struct xgbe_prv_data *);
+
+ /* Process results of auto-negotiation */
+ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
+
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct xgbe_prv_data *);
+ void (*kr_training_post)(struct xgbe_prv_data *);
};
struct xgbe_phy_if {
- /* For initial PHY setup */
- void (*phy_init)(struct xgbe_prv_data *);
+ /* For PHY setup/teardown */
+ int (*phy_init)(struct xgbe_prv_data *);
+ void (*phy_exit)(struct xgbe_prv_data *);
/* For PHY support when setting device up/down */
int (*phy_reset)(struct xgbe_prv_data *);
@@ -696,6 +832,30 @@ struct xgbe_phy_if {
/* For PHY support while device is up */
void (*phy_status)(struct xgbe_prv_data *);
int (*phy_config_aneg)(struct xgbe_prv_data *);
+
+ /* For PHY settings validation */
+ bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
+
+ /* For single interrupt support */
+ irqreturn_t (*an_isr)(int, struct xgbe_prv_data *);
+
+ /* PHY implementation specific services */
+ struct xgbe_phy_impl_if phy_impl;
+};
+
+struct xgbe_i2c_if {
+ /* For initial I2C setup */
+ int (*i2c_init)(struct xgbe_prv_data *);
+
+ /* For I2C support when setting device up/down */
+ int (*i2c_start)(struct xgbe_prv_data *);
+ void (*i2c_stop)(struct xgbe_prv_data *);
+
+ /* For performing I2C operations */
+ int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *);
+
+ /* For single interrupt support */
+ irqreturn_t (*i2c_isr)(int, struct xgbe_prv_data *);
};
struct xgbe_desc_if {
@@ -755,11 +915,28 @@ struct xgbe_hw_features {
unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
};
+struct xgbe_version_data {
+ void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *);
+ enum xgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+ unsigned int rx_max_fifo_size;
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+};
+
struct xgbe_prv_data {
struct net_device *netdev;
- struct platform_device *pdev;
+ struct pci_dev *pcidev;
+ struct platform_device *platdev;
struct acpi_device *adev;
struct device *dev;
+ struct platform_device *phy_platdev;
+ struct device *phy_dev;
+
+ /* Version related data */
+ struct xgbe_version_data *vdata;
/* ACPI or DT flag */
unsigned int use_acpi;
@@ -770,12 +947,17 @@ struct xgbe_prv_data {
void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
+ void __iomem *xprop_regs; /* XGBE property registers */
+ void __iomem *xi2c_regs; /* XGBE I2C CSRs */
/* Overall device lock */
spinlock_t lock;
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
/* RSS addressing mutex */
struct mutex rss_mutex;
@@ -783,12 +965,39 @@ struct xgbe_prv_data {
/* Flags representing xgbe_state */
unsigned long dev_state;
+ /* ECC support */
+ unsigned long tx_sec_period;
+ unsigned long tx_ded_period;
+ unsigned long rx_sec_period;
+ unsigned long rx_ded_period;
+ unsigned long desc_sec_period;
+ unsigned long desc_ded_period;
+
+ unsigned int tx_sec_count;
+ unsigned int tx_ded_count;
+ unsigned int rx_sec_count;
+ unsigned int rx_ded_count;
+ unsigned int desc_ded_count;
+ unsigned int desc_sec_count;
+
+ struct msix_entry *msix_entries;
int dev_irq;
+ int ecc_irq;
+ int i2c_irq;
+ int channel_irq[XGBE_MAX_DMA_CHANNELS];
+
unsigned int per_channel_irq;
+ unsigned int irq_shared;
+ unsigned int irq_count;
+ unsigned int channel_irq_count;
+ unsigned int channel_irq_mode;
+
+ char ecc_name[IFNAMSIZ + 32];
struct xgbe_hw_if hw_if;
struct xgbe_phy_if phy_if;
struct xgbe_desc_if desc_if;
+ struct xgbe_i2c_if i2c_if;
/* AXI DMA settings */
unsigned int coherent;
@@ -803,12 +1012,16 @@ struct xgbe_prv_data {
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
+ unsigned int tx_max_channel_count;
+ unsigned int rx_max_channel_count;
unsigned int channel_count;
unsigned int tx_ring_count;
unsigned int tx_desc_count;
unsigned int rx_ring_count;
unsigned int rx_desc_count;
+ unsigned int tx_max_q_count;
+ unsigned int rx_max_q_count;
unsigned int tx_q_count;
unsigned int rx_q_count;
@@ -820,11 +1033,13 @@ struct xgbe_prv_data {
unsigned int tx_threshold;
unsigned int tx_pbl;
unsigned int tx_osp_mode;
+ unsigned int tx_max_fifo_size;
/* Rx settings */
unsigned int rx_sf_mode;
unsigned int rx_threshold;
unsigned int rx_pbl;
+ unsigned int rx_max_fifo_size;
/* Tx coalescing settings */
unsigned int tx_usecs;
@@ -842,6 +1057,8 @@ struct xgbe_prv_data {
unsigned int pause_autoneg;
unsigned int tx_pause;
unsigned int rx_pause;
+ unsigned int rx_rfa[XGBE_MAX_QUEUES];
+ unsigned int rx_rfd[XGBE_MAX_QUEUES];
/* Receive Side Scaling settings */
u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
@@ -881,13 +1098,16 @@ struct xgbe_prv_data {
struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
+ unsigned int pfcq[XGBE_MAX_QUEUES];
+ unsigned int pfc_rfa;
u8 num_tcs;
/* Hardware features of the device */
struct xgbe_hw_features hw_feat;
- /* Device restart work structure */
+ /* Device work structures */
struct work_struct restart_work;
+ struct work_struct stopdev_work;
/* Keeps track of power mode */
unsigned int power_down;
@@ -901,9 +1121,14 @@ struct xgbe_prv_data {
int phy_speed;
/* MDIO/PHY related settings */
+ unsigned int phy_started;
+ void *phy_data;
struct xgbe_phy phy;
int mdio_mmd;
unsigned long link_check;
+ struct completion mdio_complete;
+
+ unsigned int kr_redrv;
char an_name[IFNAMSIZ + 32];
struct workqueue_struct *an_workqueue;
@@ -911,23 +1136,9 @@ struct xgbe_prv_data {
int an_irq;
struct work_struct an_irq_work;
- unsigned int speed_set;
-
- /* SerDes UEFI configurable settings.
- * Switching between modes/speeds requires new values for some
- * SerDes settings. The values can be supplied as device
- * properties in array format. The first array entry is for
- * 1GbE, second for 2.5GbE and third for 10GbE
- */
- u32 serdes_blwc[XGBE_SPEEDS];
- u32 serdes_cdr_rate[XGBE_SPEEDS];
- u32 serdes_pq_skew[XGBE_SPEEDS];
- u32 serdes_tx_amp[XGBE_SPEEDS];
- u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
- u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
-
/* Auto-negotiation state machine support */
unsigned int an_int;
+ unsigned int an_status;
struct mutex an_mutex;
enum xgbe_an an_result;
enum xgbe_an an_state;
@@ -938,6 +1149,13 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
+ enum xgbe_an_mode an_mode;
+
+ /* I2C support */
+ struct xgbe_i2c i2c;
+ struct mutex i2c_mutex;
+ struct completion i2c_complete;
+ char i2c_name[IFNAMSIZ + 32];
unsigned int lpm_ctrl; /* CTRL1 for resume */
@@ -948,14 +1166,36 @@ struct xgbe_prv_data {
unsigned int debugfs_xpcs_mmd;
unsigned int debugfs_xpcs_reg;
+
+ unsigned int debugfs_xprop_reg;
+
+ unsigned int debugfs_xi2c_reg;
#endif
};
/* Function prototypes*/
+struct xgbe_prv_data *xgbe_alloc_pdata(struct device *);
+void xgbe_free_pdata(struct xgbe_prv_data *);
+void xgbe_set_counts(struct xgbe_prv_data *);
+int xgbe_config_netdev(struct xgbe_prv_data *);
+void xgbe_deconfig_netdev(struct xgbe_prv_data *);
+
+int xgbe_platform_init(void);
+void xgbe_platform_exit(void);
+#ifdef CONFIG_PCI
+int xgbe_pci_init(void);
+void xgbe_pci_exit(void);
+#else
+static inline int xgbe_pci_init(void) { return 0; }
+static inline void xgbe_pci_exit(void) { }
+#endif
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
+void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *);
+void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
+void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *);
const struct net_device_ops *xgbe_get_netdev_ops(void);
const struct ethtool_ops *xgbe_get_ethtool_ops(void);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
index 23d72af83d82..e1a51d8892fc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -52,6 +52,7 @@ static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
{
buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
+ SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) |
SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
@@ -78,10 +79,10 @@ static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
}
}
-static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn,
+static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn *dn,
u32 *buf, u32 jb)
{
- struct xgene_cle_ptree_branch *br;
+ const struct xgene_cle_ptree_branch *br;
u32 i, j = 0;
u32 npp;
@@ -204,17 +205,385 @@ static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
return 0;
}
+static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn[] = {
+ {
+ /* PKT_TYPE_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = NO_BYTE,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 2,
+ .branch = {
+ {
+ /* IPV4 */
+ .valid = 1,
+ .next_packet_pointer = 22,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = PKT_PROT_NODE,
+ .next_branch = 0,
+ .data = 0x8,
+ .mask = 0x0
+ },
+ {
+ .valid = 0,
+ .next_packet_pointer = 262,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ },
+ },
+ {
+ /* PKT_PROT_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = NO_BYTE,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 3,
+ .branch = {
+ {
+ /* TCP */
+ .valid = 1,
+ .next_packet_pointer = 26,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 0,
+ .data = 0x0600,
+ .mask = 0x00ff
+ },
+ {
+ /* UDP */
+ .valid = 1,
+ .next_packet_pointer = 26,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 0,
+ .data = 0x1100,
+ .mask = 0x00ff
+ },
+ {
+ .valid = 0,
+ .next_packet_pointer = 26,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_OTHERS_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ }
+ },
+ {
+ /* RSS_IPV4_TCP_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = BOTH_BYTES,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 6,
+ .branch = {
+ {
+ /* SRC IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 28,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 1,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* SRC IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 30,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 2,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 32,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 3,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 34,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 4,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP SRC Port */
+ .valid = 0,
+ .next_packet_pointer = 36,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 5,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP DST Port */
+ .valid = 0,
+ .next_packet_pointer = 256,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ }
+ },
+ {
+ /* RSS_IPV4_UDP_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = BOTH_BYTES,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 6,
+ .branch = {
+ {
+ /* SRC IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 28,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 1,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* SRC IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 30,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 2,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 32,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 3,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 34,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 4,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP SRC Port */
+ .valid = 0,
+ .next_packet_pointer = 36,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 5,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP DST Port */
+ .valid = 0,
+ .next_packet_pointer = 258,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ }
+ },
+ {
+ /* RSS_IPV4_OTHERS_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = BOTH_BYTES,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 6,
+ .branch = {
+ {
+ /* SRC IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 28,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_OTHERS_NODE,
+ .next_branch = 1,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* SRC IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 30,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_OTHERS_NODE,
+ .next_branch = 2,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 32,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_OTHERS_NODE,
+ .next_branch = 3,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 34,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_OTHERS_NODE,
+ .next_branch = 4,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP SRC Port */
+ .valid = 0,
+ .next_packet_pointer = 36,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_OTHERS_NODE,
+ .next_branch = 5,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP DST Port */
+ .valid = 0,
+ .next_packet_pointer = 260,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ }
+ },
+
+ {
+ /* LAST NODE */
+ .node_type = EWDN,
+ .last_node = 1,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = NO_BYTE,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 1,
+ .branch = {
+ {
+ .valid = 0,
+ .next_packet_pointer = 0,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = MAX_NODES,
+ .next_branch = 0,
+ .data = 0,
+ .mask = 0xffff
+ }
+ }
+ }
+};
+
static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
struct xgene_enet_cle *cle)
{
struct xgene_cle_ptree *ptree = &cle->ptree;
- struct xgene_cle_ptree_ewdn *dn = ptree->dn;
+ const struct xgene_cle_ptree_ewdn *dn = xgene_init_ptree_dn;
+ int num_dn = ARRAY_SIZE(xgene_init_ptree_dn);
struct xgene_cle_ptree_kn *kn = ptree->kn;
u32 buf[CLE_DRAM_REGS];
int i, j, ret;
memset(buf, 0, sizeof(buf));
- for (i = 0; i < ptree->num_dn; i++) {
+ for (i = 0; i < num_dn; i++) {
xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
PTREE_RAM, CLE_CMD_WR);
@@ -224,8 +593,8 @@ static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
/* continue node index for key node */
memset(buf, 0, sizeof(buf));
- for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) {
- xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf);
+ for (j = i; j < (ptree->num_kn + num_dn); j++) {
+ xgene_cle_kn_to_hw(&kn[j - num_dn], buf);
ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
PTREE_RAM, CLE_CMD_WR);
if (ret)
@@ -346,11 +715,15 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
idx = i % pdata->rxq_cnt;
pool_id = pdata->rx_ring[idx]->buf_pool->id;
- fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
+ fpsel = xgene_enet_get_fpsel(pool_id);
dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
nfpsel = 0;
- idt_reg = 0;
+ if (pdata->rx_ring[idx]->page_pool) {
+ pool_id = pdata->rx_ring[idx]->page_pool->id;
+ nfpsel = xgene_enet_get_fpsel(pool_id);
+ }
+ idt_reg = 0;
xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
RSS_IDT, CLE_CMD_WR);
@@ -400,320 +773,41 @@ static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_cle *enet_cle = &pdata->cle;
+ u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
- struct xgene_cle_ptree_branch *br;
- u32 def_qid, def_fpsel, pool_id;
struct xgene_cle_ptree *ptree;
struct xgene_cle_ptree_kn kn;
int ret;
- struct xgene_cle_ptree_ewdn ptree_dn[] = {
- {
- /* PKT_TYPE_NODE */
- .node_type = EWDN,
- .last_node = 0,
- .hdr_len_store = 1,
- .hdr_extn = NO_BYTE,
- .byte_store = NO_BYTE,
- .search_byte_store = NO_BYTE,
- .result_pointer = DB_RES_DROP,
- .num_branches = 2,
- .branch = {
- {
- /* IPV4 */
- .valid = 1,
- .next_packet_pointer = 22,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = PKT_PROT_NODE,
- .next_branch = 0,
- .data = 0x8,
- .mask = 0x0
- },
- {
- .valid = 0,
- .next_packet_pointer = 262,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = LAST_NODE,
- .next_branch = 0,
- .data = 0x0,
- .mask = 0xffff
- }
- },
- },
- {
- /* PKT_PROT_NODE */
- .node_type = EWDN,
- .last_node = 0,
- .hdr_len_store = 1,
- .hdr_extn = NO_BYTE,
- .byte_store = NO_BYTE,
- .search_byte_store = NO_BYTE,
- .result_pointer = DB_RES_DROP,
- .num_branches = 3,
- .branch = {
- {
- /* TCP */
- .valid = 1,
- .next_packet_pointer = 26,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_TCP_NODE,
- .next_branch = 0,
- .data = 0x0600,
- .mask = 0x00ff
- },
- {
- /* UDP */
- .valid = 1,
- .next_packet_pointer = 26,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_UDP_NODE,
- .next_branch = 0,
- .data = 0x1100,
- .mask = 0x00ff
- },
- {
- .valid = 0,
- .next_packet_pointer = 260,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = LAST_NODE,
- .next_branch = 0,
- .data = 0x0,
- .mask = 0xffff
- }
- }
- },
- {
- /* RSS_IPV4_TCP_NODE */
- .node_type = EWDN,
- .last_node = 0,
- .hdr_len_store = 1,
- .hdr_extn = NO_BYTE,
- .byte_store = NO_BYTE,
- .search_byte_store = BOTH_BYTES,
- .result_pointer = DB_RES_DROP,
- .num_branches = 6,
- .branch = {
- {
- /* SRC IPV4 B01 */
- .valid = 0,
- .next_packet_pointer = 28,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_TCP_NODE,
- .next_branch = 1,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* SRC IPV4 B23 */
- .valid = 0,
- .next_packet_pointer = 30,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_TCP_NODE,
- .next_branch = 2,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* DST IPV4 B01 */
- .valid = 0,
- .next_packet_pointer = 32,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_TCP_NODE,
- .next_branch = 3,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* DST IPV4 B23 */
- .valid = 0,
- .next_packet_pointer = 34,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_TCP_NODE,
- .next_branch = 4,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* TCP SRC Port */
- .valid = 0,
- .next_packet_pointer = 36,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_TCP_NODE,
- .next_branch = 5,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* TCP DST Port */
- .valid = 0,
- .next_packet_pointer = 256,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = LAST_NODE,
- .next_branch = 0,
- .data = 0x0,
- .mask = 0xffff
- }
- }
- },
- {
- /* RSS_IPV4_UDP_NODE */
- .node_type = EWDN,
- .last_node = 0,
- .hdr_len_store = 1,
- .hdr_extn = NO_BYTE,
- .byte_store = NO_BYTE,
- .search_byte_store = BOTH_BYTES,
- .result_pointer = DB_RES_DROP,
- .num_branches = 6,
- .branch = {
- {
- /* SRC IPV4 B01 */
- .valid = 0,
- .next_packet_pointer = 28,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_UDP_NODE,
- .next_branch = 1,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* SRC IPV4 B23 */
- .valid = 0,
- .next_packet_pointer = 30,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_UDP_NODE,
- .next_branch = 2,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* DST IPV4 B01 */
- .valid = 0,
- .next_packet_pointer = 32,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_UDP_NODE,
- .next_branch = 3,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* DST IPV4 B23 */
- .valid = 0,
- .next_packet_pointer = 34,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_UDP_NODE,
- .next_branch = 4,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* TCP SRC Port */
- .valid = 0,
- .next_packet_pointer = 36,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = RSS_IPV4_UDP_NODE,
- .next_branch = 5,
- .data = 0x0,
- .mask = 0xffff
- },
- {
- /* TCP DST Port */
- .valid = 0,
- .next_packet_pointer = 258,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = LAST_NODE,
- .next_branch = 0,
- .data = 0x0,
- .mask = 0xffff
- }
- }
- },
- {
- /* LAST NODE */
- .node_type = EWDN,
- .last_node = 1,
- .hdr_len_store = 1,
- .hdr_extn = NO_BYTE,
- .byte_store = NO_BYTE,
- .search_byte_store = NO_BYTE,
- .result_pointer = DB_RES_DROP,
- .num_branches = 1,
- .branch = {
- {
- .valid = 0,
- .next_packet_pointer = 0,
- .jump_bw = JMP_FW,
- .jump_rel = JMP_ABS,
- .operation = EQT,
- .next_node = MAX_NODES,
- .next_branch = 0,
- .data = 0,
- .mask = 0xffff
- }
- }
- }
- };
+
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ return -EINVAL;
ptree = &enet_cle->ptree;
ptree->start_pkt = 12; /* Ethertype */
- if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
- ret = xgene_cle_setup_rss(pdata);
- if (ret) {
- netdev_err(pdata->ndev, "RSS initialization failed\n");
- return ret;
- }
- } else {
- br = &ptree_dn[PKT_PROT_NODE].branch[0];
- br->valid = 0;
- br->next_packet_pointer = 260;
- br->next_node = LAST_NODE;
- br->data = 0x0000;
- br->mask = 0xffff;
+
+ ret = xgene_cle_setup_rss(pdata);
+ if (ret) {
+ netdev_err(pdata->ndev, "RSS initialization failed\n");
+ return ret;
}
def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
pool_id = pdata->rx_ring[0]->buf_pool->id;
- def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
+ def_fpsel = xgene_enet_get_fpsel(pool_id);
+ def_nxtfpsel = 0;
+ if (pdata->rx_ring[0]->page_pool) {
+ pool_id = pdata->rx_ring[0]->page_pool->id;
+ def_nxtfpsel = xgene_enet_get_fpsel(pool_id);
+ }
memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
dbptr[DB_RES_ACCEPT].fpsel = def_fpsel;
+ dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel;
dbptr[DB_RES_ACCEPT].dstqid = def_qid;
dbptr[DB_RES_ACCEPT].cle_priority = 1;
dbptr[DB_RES_DEF].fpsel = def_fpsel;
+ dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel;
dbptr[DB_RES_DEF].dstqid = def_qid;
dbptr[DB_RES_DEF].cle_priority = 7;
xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
@@ -727,10 +821,8 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
kn.key[0].priority = 0;
kn.key[0].result_pointer = DB_RES_ACCEPT;
- ptree->dn = ptree_dn;
ptree->kn = &kn;
ptree->dbptr = dbptr;
- ptree->num_dn = MAX_NODES;
ptree->num_kn = 1;
ptree->num_dbptr = DB_MAX_PTRS;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
index 9ac9f8e145ec..18fe8d56082c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -91,6 +91,8 @@
#define CLE_DSTQIDH_LEN 5
#define CLE_FPSEL_POS 21
#define CLE_FPSEL_LEN 4
+#define CLE_NFPSEL_POS 17
+#define CLE_NFPSEL_LEN 4
#define CLE_PRIORITY_POS 5
#define CLE_PRIORITY_LEN 3
@@ -104,6 +106,7 @@ enum xgene_cle_ptree_nodes {
PKT_PROT_NODE,
RSS_IPV4_TCP_NODE,
RSS_IPV4_UDP_NODE,
+ RSS_IPV4_OTHERS_NODE,
LAST_NODE,
MAX_NODES
};
@@ -275,10 +278,8 @@ struct xgene_cle_dbptr {
};
struct xgene_cle_ptree {
- struct xgene_cle_ptree_ewdn *dn;
struct xgene_cle_ptree_kn *kn;
struct xgene_cle_dbptr *dbptr;
- u32 num_dn;
u32 num_kn;
u32 num_dbptr;
u32 start_node;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index d372d4235c81..28fdedc30b74 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -163,6 +163,74 @@ static void xgene_get_ethtool_stats(struct net_device *ndev,
*data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
}
+static void xgene_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+
+ pp->autoneg = pdata->pause_autoneg;
+ pp->tx_pause = pdata->tx_pause;
+ pp->rx_pause = pdata->rx_pause;
+}
+
+static int xgene_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u32 oldadv, newadv;
+
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ if (!phydev)
+ return -EINVAL;
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ pp->rx_pause != pp->tx_pause))
+ return -EINVAL;
+
+ pdata->pause_autoneg = pp->autoneg;
+ pdata->tx_pause = pp->tx_pause;
+ pdata->rx_pause = pp->rx_pause;
+
+ oldadv = phydev->advertising;
+ newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (pp->rx_pause)
+ newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+ if (pp->tx_pause)
+ newadv ^= ADVERTISED_Asym_Pause;
+
+ if (oldadv ^ newadv) {
+ phydev->advertising = newadv;
+
+ if (phydev->autoneg)
+ return phy_start_aneg(phydev);
+
+ if (!pp->autoneg) {
+ pdata->mac_ops->flowctl_tx(pdata,
+ pdata->tx_pause);
+ pdata->mac_ops->flowctl_rx(pdata,
+ pdata->rx_pause);
+ }
+ }
+
+ } else {
+ if (pp->autoneg)
+ return -EINVAL;
+
+ pdata->tx_pause = pp->tx_pause;
+ pdata->rx_pause = pp->rx_pause;
+
+ pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
+ pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops xgene_ethtool_ops = {
.get_drvinfo = xgene_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -171,6 +239,8 @@ static const struct ethtool_ops xgene_ethtool_ops = {
.get_ethtool_stats = xgene_get_ethtool_stats,
.get_link_ksettings = xgene_get_link_ksettings,
.set_link_ksettings = xgene_set_link_ksettings,
+ .get_pauseparam = xgene_get_pauseparam,
+ .set_pauseparam = xgene_set_pauseparam
};
void xgene_enet_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 5390ae89136c..06e681697c17 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -504,6 +504,56 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
}
+static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
+{
+ xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
+}
+
+static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
+ bool enable)
+{
+ u32 data;
+
+ xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
+
+ if (enable)
+ data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
+ else
+ data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
+
+ xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
+}
+
+static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
+{
+ u32 data;
+
+ xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+
+ if (enable)
+ data |= TX_FLOW_EN;
+ else
+ data &= ~TX_FLOW_EN;
+
+ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+
+ pdata->mac_ops->enable_tx_pause(pdata, enable);
+}
+
+static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
+{
+ u32 data;
+
+ xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+
+ if (enable)
+ data |= RX_FLOW_EN;
+ else
+ data &= ~RX_FLOW_EN;
+
+ xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+}
+
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
{
u32 value;
@@ -527,6 +577,17 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
/* Rtype should be copied from FP */
xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
+ /* Configure HW pause frame generation */
+ xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
+ value = (DEF_QUANTA << 16) | (value & 0xFFFF);
+ xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
+
+ xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
+ xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
+
+ xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
+ xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
+
/* Rx-Tx traffic resume */
xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
@@ -550,12 +611,14 @@ static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
}
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
- u32 dst_ring_num, u16 bufpool_id)
+ u32 dst_ring_num, u16 bufpool_id,
+ u16 nxtbufpool_id)
{
u32 cb;
- u32 fpsel;
+ u32 fpsel, nxtfpsel;
- fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
+ fpsel = xgene_enet_get_fpsel(bufpool_id);
+ nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0;
@@ -565,6 +628,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
CFG_CLE_FPSEL0_SET(&cb, fpsel);
+ CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
}
@@ -652,16 +716,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring)
{
- u32 addr, val, data;
-
- val = xgene_enet_ring_bufnum(ring->id);
+ u32 addr, data;
if (xgene_enet_is_bufpool(ring->id)) {
addr = ENET_CFGSSQMIFPRESET_ADDR;
- data = BIT(val - 0x20);
+ data = BIT(xgene_enet_get_fpsel(ring->id));
} else {
addr = ENET_CFGSSQMIWQRESET_ADDR;
- data = BIT(val);
+ data = BIT(xgene_enet_ring_bufnum(ring->id));
}
xgene_enet_wr_ring_if(pdata, addr, data);
@@ -671,24 +733,24 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring;
- u32 pb, val;
+ u32 pb;
int i;
pb = 0;
for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i]->buf_pool;
+ pb |= BIT(xgene_enet_get_fpsel(ring->id));
+ ring = pdata->rx_ring[i]->page_pool;
+ if (ring)
+ pb |= BIT(xgene_enet_get_fpsel(ring->id));
- val = xgene_enet_ring_bufnum(ring->id);
- pb |= BIT(val - 0x20);
}
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
pb = 0;
for (i = 0; i < pdata->txq_cnt; i++) {
ring = pdata->tx_ring[i];
-
- val = xgene_enet_ring_bufnum(ring->id);
- pb |= BIT(val);
+ pb |= BIT(xgene_enet_ring_bufnum(ring->id));
}
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
@@ -698,6 +760,48 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
}
}
+static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u16 lcladv, rmtadv = 0;
+ u32 rx_pause, tx_pause;
+ u8 flowctl = 0;
+
+ if (!phydev->duplex || !pdata->pause_autoneg)
+ return 0;
+
+ if (pdata->tx_pause)
+ flowctl |= FLOW_CTRL_TX;
+
+ if (pdata->rx_pause)
+ flowctl |= FLOW_CTRL_RX;
+
+ lcladv = mii_advertise_flowctrl(flowctl);
+
+ if (phydev->pause)
+ rmtadv = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ rmtadv |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ tx_pause = !!(flowctl & FLOW_CTRL_TX);
+ rx_pause = !!(flowctl & FLOW_CTRL_RX);
+
+ if (tx_pause != pdata->tx_pause) {
+ pdata->tx_pause = tx_pause;
+ pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
+ }
+
+ if (rx_pause != pdata->rx_pause) {
+ pdata->rx_pause = rx_pause;
+ pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
+ }
+
+ return 0;
+}
+
static void xgene_enet_adjust_link(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -712,6 +816,8 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
mac_ops->tx_enable(pdata);
phy_print_status(phydev);
}
+
+ xgene_enet_flowctrl_cfg(ndev);
} else {
mac_ops->rx_disable(pdata);
mac_ops->tx_disable(pdata);
@@ -785,6 +891,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
phy_dev->supported &= ~SUPPORTED_10baseT_Half &
~SUPPORTED_100baseT_Half &
~SUPPORTED_1000baseT_Half;
+ phy_dev->supported |= SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
phy_dev->advertising = phy_dev->supported;
return 0;
@@ -902,6 +1010,10 @@ const struct xgene_mac_ops xgene_gmac_ops = {
.tx_disable = xgene_gmac_tx_disable,
.set_speed = xgene_gmac_set_speed,
.set_mac_addr = xgene_gmac_set_mac_addr,
+ .set_framesize = xgene_enet_set_frame_size,
+ .enable_tx_pause = xgene_gmac_enable_tx_pause,
+ .flowctl_tx = xgene_gmac_flowctl_tx,
+ .flowctl_rx = xgene_gmac_flowctl_rx,
};
const struct xgene_port_ops xgene_gport_ops = {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 06e598c8bc16..5f83037bb96b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -165,10 +165,23 @@ enum xgene_enet_rm {
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
+#define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4)
#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2)
#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16)
-#define CFG_CLE_DSTQID0(val) (val & GENMASK(11, 0))
-#define CFG_CLE_FPSEL0(val) ((val << 16) & GENMASK(19, 16))
+#define CFG_CLE_DSTQID0(val) ((val) & GENMASK(11, 0))
+#define CFG_CLE_FPSEL0(val) (((val) << 16) & GENMASK(19, 16))
+#define CSR_ECM_CFG_0_ADDR 0x0220
+#define CSR_ECM_CFG_1_ADDR 0x0224
+#define CSR_MULTI_DPF0_ADDR 0x0230
+#define RXBUF_PAUSE_THRESH 0x0534
+#define RXBUF_PAUSE_OFF_THRESH 0x0540
+#define DEF_PAUSE_THRES 0x7d
+#define DEF_PAUSE_OFF_THRES 0x6d
+#define DEF_QUANTA 0x8000
+#define NORM_PAUSE_OPCODE 0x0001
+#define PAUSE_XON_EN BIT(30)
+#define MULTI_DPF_AUTOCTRL BIT(28)
+#define CFG_CLE_NXTFPSEL0(val) (((val) << 20) & GENMASK(23, 20))
#define ICM_CONFIG0_REG_0_ADDR 0x0400
#define ICM_CONFIG2_REG_0_ADDR 0x0410
#define RX_DV_GATE_REG_0_ADDR 0x05fc
@@ -196,6 +209,8 @@ enum xgene_enet_rm {
#define SOFT_RESET1 BIT(31)
#define TX_EN BIT(0)
#define RX_EN BIT(2)
+#define TX_FLOW_EN BIT(4)
+#define RX_FLOW_EN BIT(5)
#define ENET_LHD_MODE BIT(25)
#define ENET_GHD_MODE BIT(26)
#define FULL_DUPLEX2 BIT(0)
@@ -346,6 +361,14 @@ static inline bool xgene_enet_is_bufpool(u16 id)
return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false;
}
+static inline u8 xgene_enet_get_fpsel(u16 id)
+{
+ if (xgene_enet_is_bufpool(id))
+ return xgene_enet_ring_bufnum(id) - RING_BUFNUM_BUFPOOL;
+
+ return 0;
+}
+
static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
{
bool is_bufpool = xgene_enet_is_bufpool(id);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 8158d4698734..523b8eff6d7b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -37,6 +37,9 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
struct xgene_enet_raw_desc16 *raw_desc;
int i;
+ if (!buf_pool)
+ return;
+
for (i = 0; i < buf_pool->slots; i++) {
raw_desc = &buf_pool->raw_desc16[i];
@@ -47,6 +50,86 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
}
}
+static u16 xgene_enet_get_data_len(u64 bufdatalen)
+{
+ u16 hw_len, mask;
+
+ hw_len = GET_VAL(BUFDATALEN, bufdatalen);
+
+ if (unlikely(hw_len == 0x7800)) {
+ return 0;
+ } else if (!(hw_len & BIT(14))) {
+ mask = GENMASK(13, 0);
+ return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
+ } else if (!(hw_len & GENMASK(13, 12))) {
+ mask = GENMASK(11, 0);
+ return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
+ } else {
+ mask = GENMASK(11, 0);
+ return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
+ }
+}
+
+static u16 xgene_enet_set_data_len(u32 size)
+{
+ u16 hw_len;
+
+ hw_len = (size == SIZE_4K) ? BIT(14) : 0;
+
+ return hw_len;
+}
+
+static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
+ u32 nbuf)
+{
+ struct xgene_enet_raw_desc16 *raw_desc;
+ struct xgene_enet_pdata *pdata;
+ struct net_device *ndev;
+ dma_addr_t dma_addr;
+ struct device *dev;
+ struct page *page;
+ u32 slots, tail;
+ u16 hw_len;
+ int i;
+
+ if (unlikely(!buf_pool))
+ return 0;
+
+ ndev = buf_pool->ndev;
+ pdata = netdev_priv(ndev);
+ dev = ndev_to_dev(ndev);
+ slots = buf_pool->slots - 1;
+ tail = buf_pool->tail;
+
+ for (i = 0; i < nbuf; i++) {
+ raw_desc = &buf_pool->raw_desc16[tail];
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ dma_addr = dma_map_page(dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ put_page(page);
+ return -ENOMEM;
+ }
+
+ hw_len = xgene_enet_set_data_len(PAGE_SIZE);
+ raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+ SET_VAL(BUFDATALEN, hw_len) |
+ SET_BIT(COHERENT));
+
+ buf_pool->frag_page[tail] = page;
+ tail = (tail + 1) & slots;
+ }
+
+ pdata->ring_ops->wr_cmd(buf_pool, nbuf);
+ buf_pool->tail = tail;
+
+ return 0;
+}
+
static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
u32 nbuf)
{
@@ -64,8 +147,9 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
ndev = buf_pool->ndev;
dev = ndev_to_dev(buf_pool->ndev);
pdata = netdev_priv(ndev);
+
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
- len = XGENE_ENET_MAX_MTU;
+ len = XGENE_ENET_STD_MTU;
for (i = 0; i < nbuf; i++) {
raw_desc = &buf_pool->raw_desc16[tail];
@@ -122,6 +206,25 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
}
}
+static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
+{
+ struct device *dev = ndev_to_dev(buf_pool->ndev);
+ dma_addr_t dma_addr;
+ struct page *page;
+ int i;
+
+ /* Free up the buffers held by hardware */
+ for (i = 0; i < buf_pool->slots; i++) {
+ page = buf_pool->frag_page[i];
+ if (page) {
+ dma_addr = buf_pool->frag_dma_addr[i];
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ }
+}
+
static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
{
struct xgene_enet_desc_ring *rx_ring = data;
@@ -216,11 +319,11 @@ static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
}
}
- spin_unlock(&pdata->mss_lock);
-
/* No slots with ref_count = 0 available, return busy */
if (!mss_index_found)
- return -EBUSY;
+ mss_index = -EBUSY;
+
+ spin_unlock(&pdata->mss_lock);
return mss_index;
}
@@ -515,23 +618,67 @@ static void xgene_enet_skip_csum(struct sk_buff *skb)
}
}
+static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
+ struct xgene_enet_raw_desc *raw_desc,
+ struct xgene_enet_raw_desc *exp_desc)
+{
+ __le64 *desc = (void *)exp_desc;
+ dma_addr_t dma_addr;
+ struct device *dev;
+ struct page *page;
+ u16 slots, head;
+ u32 frag_size;
+ int i;
+
+ if (!buf_pool || !raw_desc || !exp_desc ||
+ (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
+ return;
+
+ dev = ndev_to_dev(buf_pool->ndev);
+ slots = buf_pool->slots - 1;
+ head = buf_pool->head;
+
+ for (i = 0; i < 4; i++) {
+ frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
+ if (!frag_size)
+ break;
+
+ dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ page = buf_pool->frag_page[head];
+ put_page(page);
+
+ buf_pool->frag_page[head] = NULL;
+ head = (head + 1) & slots;
+ }
+ buf_pool->head = head;
+}
+
static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
- struct xgene_enet_raw_desc *raw_desc)
+ struct xgene_enet_raw_desc *raw_desc,
+ struct xgene_enet_raw_desc *exp_desc)
{
+ struct xgene_enet_desc_ring *buf_pool, *page_pool;
+ u32 datalen, frag_size, skb_index;
struct net_device *ndev;
- struct device *dev;
- struct xgene_enet_desc_ring *buf_pool;
- u32 datalen, skb_index;
+ dma_addr_t dma_addr;
struct sk_buff *skb;
+ struct device *dev;
+ struct page *page;
+ u16 slots, head;
+ int i, ret = 0;
+ __le64 *desc;
u8 status;
- int ret = 0;
+ bool nv;
ndev = rx_ring->ndev;
dev = ndev_to_dev(rx_ring->ndev);
buf_pool = rx_ring->buf_pool;
+ page_pool = rx_ring->page_pool;
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
- XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
+ XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = buf_pool->rx_skb[skb_index];
buf_pool->rx_skb[skb_index] = NULL;
@@ -541,6 +688,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
dev_kfree_skb_any(skb);
+ xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
status);
ret = -EIO;
@@ -548,11 +696,44 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
}
/* strip off CRC as HW isn't doing this */
- datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
- datalen = (datalen & DATALEN_MASK) - 4;
- prefetch(skb->data - NET_IP_ALIGN);
+ datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+
+ nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
+ if (!nv)
+ datalen -= 4;
+
skb_put(skb, datalen);
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ if (!nv)
+ goto skip_jumbo;
+ slots = page_pool->slots - 1;
+ head = page_pool->head;
+ desc = (void *)exp_desc;
+
+ for (i = 0; i < 4; i++) {
+ frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
+ if (!frag_size)
+ break;
+
+ dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ page = page_pool->frag_page[head];
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
+ frag_size, PAGE_SIZE);
+
+ datalen += frag_size;
+
+ page_pool->frag_page[head] = NULL;
+ head = (head + 1) & slots;
+ }
+
+ page_pool->head = head;
+ rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
+
+skip_jumbo:
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, ndev);
if (likely((ndev->features & NETIF_F_IP_CSUM) &&
@@ -563,7 +744,15 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
rx_ring->rx_packets++;
rx_ring->rx_bytes += datalen;
napi_gro_receive(&rx_ring->napi, skb);
+
out:
+ if (rx_ring->npagepool <= 0) {
+ ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
+ rx_ring->npagepool = NUM_NXTBUFPOOL;
+ if (ret)
+ return ret;
+ }
+
if (--rx_ring->nbufpool == 0) {
ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
rx_ring->nbufpool = NUM_BUFPOOL;
@@ -611,7 +800,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
desc_count++;
}
if (is_rx_desc(raw_desc)) {
- ret = xgene_enet_rx_frame(ring, raw_desc);
+ ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
} else {
ret = xgene_enet_tx_completion(ring, raw_desc);
is_completion = true;
@@ -854,7 +1043,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
{
- struct xgene_enet_desc_ring *buf_pool;
+ struct xgene_enet_desc_ring *buf_pool, *page_pool;
struct xgene_enet_desc_ring *ring;
int i;
@@ -867,18 +1056,28 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
xgene_enet_delete_ring(ring->cp_ring);
pdata->tx_ring[i] = NULL;
}
+
}
for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i];
if (ring) {
+ page_pool = ring->page_pool;
+ if (page_pool) {
+ xgene_enet_delete_pagepool(page_pool);
+ xgene_enet_delete_ring(page_pool);
+ pdata->port_ops->clear(pdata, page_pool);
+ }
+
buf_pool = ring->buf_pool;
xgene_enet_delete_bufpool(buf_pool);
xgene_enet_delete_ring(buf_pool);
pdata->port_ops->clear(pdata, buf_pool);
+
xgene_enet_delete_ring(ring);
pdata->rx_ring[i] = NULL;
}
+
}
}
@@ -931,8 +1130,10 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
{
+ struct xgene_enet_desc_ring *page_pool;
struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring;
+ void *p;
int i;
for (i = 0; i < pdata->txq_cnt; i++) {
@@ -940,10 +1141,13 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
if (ring) {
if (ring->cp_ring && ring->cp_ring->cp_skb)
devm_kfree(dev, ring->cp_ring->cp_skb);
+
if (ring->cp_ring && pdata->cq_cnt)
xgene_enet_free_desc_ring(ring->cp_ring);
+
xgene_enet_free_desc_ring(ring);
}
+
}
for (i = 0; i < pdata->rxq_cnt; i++) {
@@ -952,8 +1156,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
if (ring->buf_pool) {
if (ring->buf_pool->rx_skb)
devm_kfree(dev, ring->buf_pool->rx_skb);
+
xgene_enet_free_desc_ring(ring->buf_pool);
}
+
+ page_pool = ring->page_pool;
+ if (page_pool) {
+ p = page_pool->frag_page;
+ if (p)
+ devm_kfree(dev, p);
+
+ p = page_pool->frag_dma_addr;
+ if (p)
+ devm_kfree(dev, p);
+ }
+
xgene_enet_free_desc_ring(ring);
}
}
@@ -1071,19 +1288,20 @@ static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
- struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct xgene_enet_desc_ring *page_pool = NULL;
struct xgene_enet_desc_ring *buf_pool = NULL;
- enum xgene_ring_owner owner;
- dma_addr_t dma_exp_bufs;
- u8 cpu_bufnum;
+ struct device *dev = ndev_to_dev(ndev);
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
+ enum xgene_ring_owner owner;
+ dma_addr_t dma_exp_bufs;
+ u16 ring_id, slots;
__le64 *exp_bufs;
- u16 ring_id;
int i, ret, size;
+ u8 cpu_bufnum;
cpu_bufnum = xgene_start_cpu_bufnum(pdata);
@@ -1103,7 +1321,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
- RING_CFGSIZE_2KB,
+ RING_CFGSIZE_16KB,
ring_id);
if (!buf_pool) {
ret = -ENOMEM;
@@ -1111,7 +1329,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
rx_ring->nbufpool = NUM_BUFPOOL;
- rx_ring->buf_pool = buf_pool;
+ rx_ring->npagepool = NUM_NXTBUFPOOL;
rx_ring->irq = pdata->irqs[i];
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
sizeof(struct sk_buff *),
@@ -1124,6 +1342,42 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
rx_ring->buf_pool = buf_pool;
pdata->rx_ring[i] = rx_ring;
+
+ if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) ||
+ (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) {
+ break;
+ }
+
+ /* allocate next buffer pool for jumbo packets */
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
+ page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
+ RING_CFGSIZE_16KB,
+ ring_id);
+ if (!page_pool) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ slots = page_pool->slots;
+ page_pool->frag_page = devm_kcalloc(dev, slots,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!page_pool->frag_page) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
+ sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!page_pool->frag_dma_addr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
+ rx_ring->page_pool = page_pool;
}
for (i = 0; i < pdata->txq_cnt; i++) {
@@ -1247,13 +1501,31 @@ static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
+static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ int frame_size;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
+
+ xgene_enet_close(ndev);
+ ndev->mtu = new_mtu;
+ pdata->mac_ops->set_framesize(pdata, frame_size);
+ xgene_enet_open(ndev);
+
+ return 0;
+}
+
static const struct net_device_ops xgene_ndev_ops = {
.ndo_open = xgene_enet_open,
.ndo_stop = xgene_enet_close,
.ndo_start_xmit = xgene_enet_start_xmit,
.ndo_tx_timeout = xgene_enet_timeout,
.ndo_get_stats64 = xgene_enet_get_stats64,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = xgene_change_mtu,
.ndo_set_mac_address = xgene_enet_set_mac_address,
};
@@ -1382,9 +1654,13 @@ static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ pdata->sfp_gpio_en = false;
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
+ (!device_property_present(dev, "sfp-gpios") &&
+ !device_property_present(dev, "rxlos-gpios")))
return;
+ pdata->sfp_gpio_en = true;
pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
if (IS_ERR(pdata->sfp_rdy))
pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
@@ -1515,10 +1791,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_cle *enet_cle = &pdata->cle;
+ struct xgene_enet_desc_ring *page_pool;
struct net_device *ndev = pdata->ndev;
struct xgene_enet_desc_ring *buf_pool;
- u16 dst_ring_num;
+ u16 dst_ring_num, ring_id;
int i, ret;
+ u32 count;
ret = pdata->port_ops->reset(pdata);
if (ret)
@@ -1534,9 +1812,18 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) {
buf_pool = pdata->rx_ring[i]->buf_pool;
xgene_enet_init_bufpool(buf_pool);
- ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
+ page_pool = pdata->rx_ring[i]->page_pool;
+ xgene_enet_init_bufpool(page_pool);
+
+ count = pdata->rx_buff_cnt;
+ ret = xgene_enet_refill_bufpool(buf_pool, count);
if (ret)
goto err;
+
+ ret = xgene_enet_refill_pagepool(page_pool, count);
+ if (ret)
+ goto err;
+
}
dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
@@ -1555,10 +1842,17 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
netdev_err(ndev, "Preclass Tree init error\n");
goto err;
}
+
} else {
- pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
+ dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
+ buf_pool = pdata->rx_ring[0]->buf_pool;
+ page_pool = pdata->rx_ring[0]->page_pool;
+ ring_id = (page_pool) ? page_pool->id : 0;
+ pdata->port_ops->cle_bypass(pdata, dst_ring_num,
+ buf_pool->id, ring_id);
}
+ ndev->max_mtu = XGENE_ENET_MAX_MTU;
pdata->phy_speed = SPEED_UNKNOWN;
pdata->mac_ops->init(pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 0cda58f5a840..52571741da9f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -41,11 +41,14 @@
#include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0"
-#define XGENE_ENET_MAX_MTU 1536
-#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
+#define XGENE_ENET_STD_MTU 1536
+#define XGENE_ENET_MAX_MTU 9600
+#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
+
#define BUFLEN_16K (16 * 1024)
-#define NUM_PKT_BUF 64
+#define NUM_PKT_BUF 1024
#define NUM_BUFPOOL 32
+#define NUM_NXTBUFPOOL 8
#define MAX_EXP_BUFFS 256
#define NUM_MSS_REG 4
#define XGENE_MIN_ENET_FRAME_SIZE 60
@@ -88,6 +91,12 @@ enum xgene_enet_id {
XGENE_ENET2
};
+enum xgene_enet_buf_len {
+ SIZE_2K = 2048,
+ SIZE_4K = 4096,
+ SIZE_16K = 16384
+};
+
/* software context of a descriptor ring */
struct xgene_enet_desc_ring {
struct net_device *ndev;
@@ -107,14 +116,18 @@ struct xgene_enet_desc_ring {
dma_addr_t irq_mbox_dma;
void *irq_mbox_addr;
u16 dst_ring_num;
- u8 nbufpool;
+ u16 nbufpool;
+ int npagepool;
u8 index;
+ u32 flags;
struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb);
dma_addr_t *frag_dma_addr;
+ struct page *(*frag_page);
enum xgene_enet_ring_cfgsize cfgsize;
struct xgene_enet_desc_ring *cp_ring;
struct xgene_enet_desc_ring *buf_pool;
+ struct xgene_enet_desc_ring *page_pool;
struct napi_struct napi;
union {
void *desc_addr;
@@ -143,8 +156,12 @@ struct xgene_mac_ops {
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+ void (*set_framesize)(struct xgene_enet_pdata *pdata, int framesize);
void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index);
void (*link_state)(struct work_struct *work);
+ void (*enable_tx_pause)(struct xgene_enet_pdata *pdata, bool enable);
+ void (*flowctl_rx)(struct xgene_enet_pdata *pdata, bool enable);
+ void (*flowctl_tx)(struct xgene_enet_pdata *pdata, bool enable);
};
struct xgene_port_ops {
@@ -152,7 +169,7 @@ struct xgene_port_ops {
void (*clear)(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring);
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
- u32 dst_ring_num, u16 bufpool_id);
+ u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id);
void (*shutdown)(struct xgene_enet_pdata *pdata);
};
@@ -219,6 +236,10 @@ struct xgene_enet_pdata {
u8 rx_delay;
bool mdio_driver;
struct gpio_desc *sfp_rdy;
+ bool sfp_gpio_en;
+ u32 pause_autoneg;
+ bool tx_pause;
+ bool rx_pause;
};
struct xgene_indirect_ctl {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
index af51dd5844ce..4ff40559f970 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -119,6 +119,7 @@ static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
ring_id_buf |= PREFETCH_BUF_EN;
+
if (is_bufpool)
ring_id_buf |= IS_BUFFER_POOL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index d12e9cbae820..a8e063bdee3b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -343,6 +343,11 @@ static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
}
+static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
+{
+ xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
+}
+
static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
{
u32 data, loop = 10;
@@ -360,11 +365,39 @@ static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
netdev_err(p->ndev, "Auto-negotiation failed\n");
}
+static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
+{
+ u32 data;
+
+ data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
+
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+
+ xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
+}
+
+static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable)
+{
+ xgene_sgmac_rxtx(p, TX_FLOW_EN, enable);
+
+ p->mac_ops->enable_tx_pause(p, enable);
+}
+
+static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
+{
+ xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable);
+}
+
static void xgene_sgmac_init(struct xgene_enet_pdata *p)
{
+ u32 pause_thres_reg, pause_off_thres_reg;
u32 enet_spare_cfg_reg, rsif_config_reg;
u32 cfg_bypass_reg, rx_dv_gate_reg;
- u32 data, offset;
+ u32 data, data1, data2, offset;
+ u32 multi_dpf_reg;
if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
xgene_sgmac_reset(p);
@@ -400,24 +433,50 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
xgene_enet_wr_csr(p, rsif_config_reg, data);
- /* Bypass traffic gating */
- xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
- xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
- xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
-}
+ /* Configure HW pause frame generation */
+ multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR :
+ XG_MCX_MULTI_DPF0_ADDR;
+ data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg);
+ data = (DEF_QUANTA << 16) | (data & 0xffff);
+ xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data);
+
+ if (p->enet_id != XGENE_ENET1) {
+ data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR);
+ data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
+ xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data);
+ }
-static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
-{
- u32 data;
+ pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH :
+ XG_RXBUF_PAUSE_THRESH;
+ pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ?
+ RXBUF_PAUSE_OFF_THRESH : 0;
- data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
+ if (p->enet_id == XGENE_ENET1) {
+ data1 = xgene_enet_rd_csr(p, pause_thres_reg);
+ data2 = xgene_enet_rd_csr(p, pause_off_thres_reg);
+
+ if (!(p->port_id % 2)) {
+ data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES;
+ data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES;
+ } else {
+ data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16);
+ data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16);
+ }
- if (set)
- data |= bits;
- else
- data &= ~bits;
+ xgene_enet_wr_csr(p, pause_thres_reg, data1);
+ xgene_enet_wr_csr(p, pause_off_thres_reg, data2);
+ } else {
+ data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES;
+ xgene_enet_wr_csr(p, pause_thres_reg, data);
+ }
- xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
+ xgene_sgmac_flowctl_tx(p, p->tx_pause);
+ xgene_sgmac_flowctl_rx(p, p->rx_pause);
+
+ /* Bypass traffic gating */
+ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+ xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+ xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
}
static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
@@ -484,11 +543,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
}
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
- u32 dst_ring_num, u16 bufpool_id)
+ u32 dst_ring_num, u16 bufpool_id,
+ u16 nxtbufpool_id)
{
- u32 data, fpsel;
u32 cle_bypass_reg0, cle_bypass_reg1;
u32 offset = p->port_id * MAC_OFFSET;
+ u32 data, fpsel, nxtfpsel;
if (p->enet_id == XGENE_ENET1) {
cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
@@ -501,24 +561,24 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
data = CFG_CLE_BYPASS_EN0;
xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
- fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
- data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
+ fpsel = xgene_enet_get_fpsel(bufpool_id);
+ nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
+ data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) |
+ CFG_CLE_NXTFPSEL0(nxtfpsel);
xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
}
static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring)
{
- u32 addr, val, data;
-
- val = xgene_enet_ring_bufnum(ring->id);
+ u32 addr, data;
if (xgene_enet_is_bufpool(ring->id)) {
addr = ENET_CFGSSQMIFPRESET_ADDR;
- data = BIT(val - 0x20);
+ data = BIT(xgene_enet_get_fpsel(ring->id));
} else {
addr = ENET_CFGSSQMIWQRESET_ADDR;
- data = BIT(val);
+ data = BIT(xgene_enet_ring_bufnum(ring->id));
}
xgene_enet_wr_ring_if(pdata, addr, data);
@@ -528,24 +588,23 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
struct device *dev = &p->pdev->dev;
struct xgene_enet_desc_ring *ring;
- u32 pb, val;
+ u32 pb;
int i;
pb = 0;
for (i = 0; i < p->rxq_cnt; i++) {
ring = p->rx_ring[i]->buf_pool;
-
- val = xgene_enet_ring_bufnum(ring->id);
- pb |= BIT(val - 0x20);
+ pb |= BIT(xgene_enet_get_fpsel(ring->id));
+ ring = p->rx_ring[i]->page_pool;
+ if (ring)
+ pb |= BIT(xgene_enet_get_fpsel(ring->id));
}
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
pb = 0;
for (i = 0; i < p->txq_cnt; i++) {
ring = p->tx_ring[i];
-
- val = xgene_enet_ring_bufnum(ring->id);
- pb |= BIT(val);
+ pb |= BIT(xgene_enet_ring_bufnum(ring->id));
}
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
@@ -586,6 +645,25 @@ static void xgene_enet_link_state(struct work_struct *work)
schedule_delayed_work(&p->link_work, poll_interval);
}
+static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable)
+{
+ u32 data, ecm_cfg_addr;
+
+ if (p->enet_id == XGENE_ENET1) {
+ ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR :
+ CSR_ECM_CFG_1_ADDR;
+ } else {
+ ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR;
+ }
+
+ data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr);
+ if (enable)
+ data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
+ else
+ data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
+ xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data);
+}
+
const struct xgene_mac_ops xgene_sgmac_ops = {
.init = xgene_sgmac_init,
.reset = xgene_sgmac_reset,
@@ -595,7 +673,11 @@ const struct xgene_mac_ops xgene_sgmac_ops = {
.tx_disable = xgene_sgmac_tx_disable,
.set_speed = xgene_sgmac_set_speed,
.set_mac_addr = xgene_sgmac_set_mac_addr,
- .link_state = xgene_enet_link_state
+ .set_framesize = xgene_sgmac_set_frame_size,
+ .link_state = xgene_enet_link_state,
+ .enable_tx_pause = xgene_sgmac_enable_tx_pause,
+ .flowctl_tx = xgene_sgmac_flowctl_tx,
+ .flowctl_rx = xgene_sgmac_flowctl_rx
};
const struct xgene_port_ops xgene_sgport_ops = {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 6475f383ba83..ece19e6d68e3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -101,6 +101,14 @@ static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
wr_addr);
}
+static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+ iowrite32(val, addr);
+}
+
static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
u32 offset, u32 *val)
{
@@ -174,6 +182,14 @@ static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
return success;
}
+static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 *val)
+{
+ void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+ *val = ioread32(addr);
+}
+
static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
{
struct net_device *ndev = pdata->ndev;
@@ -250,6 +266,12 @@ static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
}
+static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
+{
+ xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR,
+ ((((size + 2) >> 2) << 16) | size));
+}
+
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -259,6 +281,51 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
return data;
}
+static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
+ bool enable)
+{
+ u32 data;
+
+ xgene_enet_rd_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, &data);
+
+ if (enable)
+ data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
+ else
+ data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
+
+ xgene_enet_wr_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, data);
+}
+
+static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
+{
+ u32 data;
+
+ xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+
+ if (enable)
+ data |= HSTTCTLEN;
+ else
+ data &= ~HSTTCTLEN;
+
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
+
+ pdata->mac_ops->enable_tx_pause(pdata, enable);
+}
+
+static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
+{
+ u32 data;
+
+ xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+
+ if (enable)
+ data |= HSTRCTLEN;
+ else
+ data &= ~HSTRCTLEN;
+
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
+}
+
static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -282,6 +349,23 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
+
+ /* Configure HW pause frame generation */
+ xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, &data);
+ data = (DEF_QUANTA << 16) | (data & 0xFFFF);
+ xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, data);
+
+ if (pdata->enet_id != XGENE_ENET1) {
+ xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, &data);
+ data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
+ xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, data);
+ }
+
+ data = (XG_DEF_PAUSE_OFF_THRES << 16) | XG_DEF_PAUSE_THRES;
+ xgene_enet_wr_csr(pdata, XG_RXBUF_PAUSE_THRESH, data);
+
+ xgene_xgmac_flowctl_tx(pdata, pdata->tx_pause);
+ xgene_xgmac_flowctl_rx(pdata, pdata->rx_pause);
}
static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
@@ -350,44 +434,47 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
}
static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
- u32 dst_ring_num, u16 bufpool_id)
+ u32 dst_ring_num, u16 bufpool_id,
+ u16 nxtbufpool_id)
{
- u32 cb, fpsel;
+ u32 cb, fpsel, nxtfpsel;
xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0;
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
- fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
+ fpsel = xgene_enet_get_fpsel(bufpool_id);
+ nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
CFG_CLE_FPSEL0_SET(&cb, fpsel);
+ CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
+ pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel);
}
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring;
- u32 pb, val;
+ u32 pb;
int i;
pb = 0;
for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i]->buf_pool;
-
- val = xgene_enet_ring_bufnum(ring->id);
- pb |= BIT(val - 0x20);
+ pb |= BIT(xgene_enet_get_fpsel(ring->id));
+ ring = pdata->rx_ring[i]->page_pool;
+ if (ring)
+ pb |= BIT(xgene_enet_get_fpsel(ring->id));
}
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
pb = 0;
for (i = 0; i < pdata->txq_cnt; i++) {
ring = pdata->tx_ring[i];
-
- val = xgene_enet_ring_bufnum(ring->id);
- pb |= BIT(val);
+ pb |= BIT(xgene_enet_ring_bufnum(ring->id));
}
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
@@ -400,31 +487,44 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring)
{
- u32 addr, val, data;
-
- val = xgene_enet_ring_bufnum(ring->id);
+ u32 addr, data;
if (xgene_enet_is_bufpool(ring->id)) {
addr = ENET_CFGSSQMIFPRESET_ADDR;
- data = BIT(val - 0x20);
+ data = BIT(xgene_enet_get_fpsel(ring->id));
} else {
addr = ENET_CFGSSQMIWQRESET_ADDR;
- data = BIT(val);
+ data = BIT(xgene_enet_ring_bufnum(ring->id));
}
xgene_enet_wr_ring_if(pdata, addr, data);
}
+static int xgene_enet_gpio_lookup(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+
+ pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
+ if (IS_ERR(pdata->sfp_rdy))
+ pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
+
+ if (IS_ERR(pdata->sfp_rdy))
+ return -ENODEV;
+
+ return 0;
+}
+
static void xgene_enet_link_state(struct work_struct *work)
{
struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
struct xgene_enet_pdata, link_work);
- struct gpio_desc *sfp_rdy = pdata->sfp_rdy;
struct net_device *ndev = pdata->ndev;
u32 link_status, poll_interval;
link_status = xgene_enet_link_status(pdata);
- if (link_status && !IS_ERR(sfp_rdy) && !gpiod_get_value(sfp_rdy))
+ if (pdata->sfp_gpio_en && link_status &&
+ (!IS_ERR(pdata->sfp_rdy) || !xgene_enet_gpio_lookup(pdata)) &&
+ !gpiod_get_value(pdata->sfp_rdy))
link_status = 0;
if (link_status) {
@@ -458,8 +558,12 @@ const struct xgene_mac_ops xgene_xgmac_ops = {
.rx_disable = xgene_xgmac_rx_disable,
.tx_disable = xgene_xgmac_tx_disable,
.set_mac_addr = xgene_xgmac_set_mac_addr,
+ .set_framesize = xgene_xgmac_set_frame_size,
.set_mss = xgene_xgmac_set_mss,
- .link_state = xgene_enet_link_state
+ .link_state = xgene_enet_link_state,
+ .enable_tx_pause = xgene_xgmac_enable_tx_pause,
+ .flowctl_rx = xgene_xgmac_flowctl_rx,
+ .flowctl_tx = xgene_xgmac_flowctl_tx
};
const struct xgene_port_ops xgene_xgport_ops = {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 360ccbd95566..03b847ad8937 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -59,6 +59,11 @@
#define HSTMAXFRAME_LENGTH_ADDR 0x0020
#define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004
+#define XG_MCX_ECM_CFG_0_ADDR 0x0074
+#define XG_MCX_MULTI_DPF0_ADDR 0x007c
+#define XG_MCX_MULTI_DPF1_ADDR 0x0080
+#define XG_DEF_PAUSE_THRES 0x390
+#define XG_DEF_PAUSE_OFF_THRES 0x2c0
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
@@ -70,6 +75,10 @@
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
+#define XGENET_CSR_ECM_CFG_0_ADDR 0x0880
+#define XGENET_CSR_MULTI_DPF0_ADDR 0x0888
+#define XGENET_CSR_MULTI_DPF1_ADDR 0x088c
+#define XG_RXBUF_PAUSE_THRESH 0x0020
#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0
#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a65d7a60f116..2b2d87089987 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1237,7 +1237,6 @@ static const struct net_device_ops bmac_netdev_ops = {
.ndo_start_xmit = bmac_output,
.ndo_set_rx_mode = bmac_set_multicast,
.ndo_set_mac_address = bmac_set_address,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index e58a7c73766e..96dd5300e0e5 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -102,7 +102,6 @@ static const struct net_device_ops mace_netdev_ops = {
.ndo_start_xmit = mace_xmit_start,
.ndo_set_rx_mode = mace_set_multicast,
.ndo_set_mac_address = mace_set_address,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 89914ca17a49..857df9c45f04 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -186,7 +186,6 @@ static const struct net_device_ops mace_netdev_ops = {
.ndo_tx_timeout = mace_tx_timeout,
.ndo_set_rx_mode = mace_set_multicast,
.ndo_set_mac_address = mace_set_address,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 689045186064..e743ddf46343 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -17,13 +17,14 @@ if NET_VENDOR_ARC
config ARC_EMAC_CORE
tristate
+ depends on ARC || ARCH_ROCKCHIP || COMPILE_TEST
select MII
select PHYLIB
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && HAS_DMA
+ depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST)
---help---
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -32,7 +33,7 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA
+ depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST)
---help---
Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
This selects Rockchip SoC glue layer support for the
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index be865b4dada2..abc9f2a59054 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -636,7 +636,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
stats->tx_dropped++;
stats->tx_errors++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
index 5901fa407d52..ed4a605874a3 100644
--- a/drivers/net/ethernet/atheros/alx/Makefile
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -1,3 +1,2 @@
obj-$(CONFIG_ALX) += alx.o
alx-objs := main.o ethtool.o hw.o
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 6cac919272ea..d4a409139ea2 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -50,6 +50,10 @@ struct alx_buffer {
};
struct alx_rx_queue {
+ struct net_device *netdev;
+ struct device *dev;
+ struct alx_napi *np;
+
struct alx_rrd *rrd;
dma_addr_t rrd_dma;
@@ -58,16 +62,26 @@ struct alx_rx_queue {
struct alx_buffer *bufs;
+ u16 count;
u16 write_idx, read_idx;
u16 rrd_read_idx;
+ u16 queue_idx;
};
#define ALX_RX_ALLOC_THRESH 32
struct alx_tx_queue {
+ struct net_device *netdev;
+ struct device *dev;
+
struct alx_txd *tpd;
dma_addr_t tpd_dma;
+
struct alx_buffer *bufs;
+
+ u16 count;
u16 write_idx, read_idx;
+ u16 queue_idx;
+ u16 p_reg, c_reg;
};
#define ALX_DEFAULT_TX_WORK 128
@@ -76,6 +90,18 @@ enum alx_device_quirks {
ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
};
+struct alx_napi {
+ struct napi_struct napi;
+ struct alx_priv *alx;
+ struct alx_rx_queue *rxq;
+ struct alx_tx_queue *txq;
+ int vec_idx;
+ u32 vec_mask;
+ char irq_lbl[IFNAMSIZ + 8];
+};
+
+#define ALX_MAX_NAPIS 8
+
#define ALX_FLAG_USING_MSIX BIT(0)
#define ALX_FLAG_USING_MSI BIT(1)
@@ -87,7 +113,6 @@ struct alx_priv {
/* msi-x vectors */
int num_vec;
struct msix_entry *msix_entries;
- char irq_lbl[IFNAMSIZ + 8];
/* all descriptor memory */
struct {
@@ -96,6 +121,11 @@ struct alx_priv {
unsigned int size;
} descmem;
+ struct alx_napi *qnapi[ALX_MAX_NAPIS];
+ int num_txq;
+ int num_rxq;
+ int num_napi;
+
/* protect int_mask updates */
spinlock_t irq_lock;
u32 int_mask;
@@ -104,10 +134,6 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
- struct napi_struct napi;
- struct alx_tx_queue txq;
- struct alx_rx_queue rxq;
-
struct work_struct link_check_wk;
struct work_struct reset_wk;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 08e22df2a300..2f4eabf652e8 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -125,64 +125,75 @@ static u32 alx_get_supported_speeds(struct alx_hw *hw)
return supported;
}
-static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int alx_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
+ u32 supported, advertising;
- ecmd->supported = SUPPORTED_Autoneg |
+ supported = SUPPORTED_Autoneg |
SUPPORTED_TP |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
if (alx_hw_giga(hw))
- ecmd->supported |= SUPPORTED_1000baseT_Full;
- ecmd->supported |= alx_get_supported_speeds(hw);
+ supported |= SUPPORTED_1000baseT_Full;
+ supported |= alx_get_supported_speeds(hw);
- ecmd->advertising = ADVERTISED_TP;
+ advertising = ADVERTISED_TP;
if (hw->adv_cfg & ADVERTISED_Autoneg)
- ecmd->advertising |= hw->adv_cfg;
+ advertising |= hw->adv_cfg;
- ecmd->port = PORT_TP;
- ecmd->phy_address = 0;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
if (hw->adv_cfg & ADVERTISED_Autoneg)
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
if (hw->flowctrl & ALX_FC_RX) {
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
if (!(hw->flowctrl & ALX_FC_TX))
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
} else if (hw->flowctrl & ALX_FC_TX) {
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
}
}
- ethtool_cmd_speed_set(ecmd, hw->link_speed);
- ecmd->duplex = hw->duplex;
+ cmd->base.speed = hw->link_speed;
+ cmd->base.duplex = hw->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int alx_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
u32 adv_cfg;
+ u32 advertising;
ASSERT_RTNL();
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- if (ecmd->advertising & ~alx_get_supported_speeds(hw))
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (advertising & ~alx_get_supported_speeds(hw))
return -EINVAL;
- adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
+ adv_cfg = advertising | ADVERTISED_Autoneg;
} else {
- adv_cfg = alx_speed_to_ethadv(ethtool_cmd_speed(ecmd),
- ecmd->duplex);
+ adv_cfg = alx_speed_to_ethadv(cmd->base.speed,
+ cmd->base.duplex);
if (!adv_cfg || adv_cfg == ADVERTISED_1000baseT_Full)
return -EINVAL;
@@ -300,8 +311,6 @@ static int alx_get_sset_count(struct net_device *netdev, int sset)
}
const struct ethtool_ops alx_ethtool_ops = {
- .get_settings = alx_get_settings,
- .set_settings = alx_set_settings,
.get_pauseparam = alx_get_pauseparam,
.set_pauseparam = alx_set_pauseparam,
.get_msglevel = alx_get_msglevel,
@@ -310,4 +319,6 @@ const struct ethtool_ops alx_ethtool_ops = {
.get_strings = alx_get_strings,
.get_sset_count = alx_get_sset_count,
.get_ethtool_stats = alx_get_ethtool_stats,
+ .get_link_ksettings = alx_get_link_ksettings,
+ .set_link_ksettings = alx_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index 0191477ace51..e42d7e0947eb 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -351,7 +351,6 @@ struct alx_rrd {
#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
#define ALX_MAX_TSO_PKT_SIZE (7*1024)
#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
-#define ALX_MIN_FRAME_SIZE (ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN)
#define ALX_MAX_RX_QUEUES 8
#define ALX_MAX_TX_QUEUES 4
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c0f84b73574d..c8f525574d68 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -51,16 +51,12 @@
const char alx_drv_name[] = "alx";
-static bool msix = false;
-module_param(msix, bool, 0);
-MODULE_PARM_DESC(msix, "Enable msi-x interrupt support");
-
-static void alx_free_txbuf(struct alx_priv *alx, int entry)
+static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
{
- struct alx_buffer *txb = &alx->txq.bufs[entry];
+ struct alx_buffer *txb = &txq->bufs[entry];
if (dma_unmap_len(txb, size)) {
- dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_single(txq->dev,
dma_unmap_addr(txb, dma),
dma_unmap_len(txb, size),
DMA_TO_DEVICE);
@@ -75,7 +71,7 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
- struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
struct sk_buff *skb;
struct alx_buffer *cur_buf;
dma_addr_t dma;
@@ -143,24 +139,42 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
return count;
}
-static inline int alx_tpd_avail(struct alx_priv *alx)
+static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
+ struct sk_buff *skb)
{
- struct alx_tx_queue *txq = &alx->txq;
+ unsigned int r_idx = skb->queue_mapping;
+
+ if (r_idx >= alx->num_txq)
+ r_idx = r_idx % alx->num_txq;
+ return alx->qnapi[r_idx]->txq;
+}
+
+static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
+{
+ return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
+}
+
+static inline int alx_tpd_avail(struct alx_tx_queue *txq)
+{
if (txq->write_idx >= txq->read_idx)
- return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
+ return txq->count + txq->read_idx - txq->write_idx - 1;
return txq->read_idx - txq->write_idx - 1;
}
-static bool alx_clean_tx_irq(struct alx_priv *alx)
+static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
{
- struct alx_tx_queue *txq = &alx->txq;
+ struct alx_priv *alx;
+ struct netdev_queue *tx_queue;
u16 hw_read_idx, sw_read_idx;
unsigned int total_bytes = 0, total_packets = 0;
int budget = ALX_DEFAULT_TX_WORK;
+ alx = netdev_priv(txq->netdev);
+ tx_queue = alx_get_tx_queue(txq);
+
sw_read_idx = txq->read_idx;
- hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
+ hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
if (sw_read_idx != hw_read_idx) {
while (sw_read_idx != hw_read_idx && budget > 0) {
@@ -173,19 +187,19 @@ static bool alx_clean_tx_irq(struct alx_priv *alx)
budget--;
}
- alx_free_txbuf(alx, sw_read_idx);
+ alx_free_txbuf(txq, sw_read_idx);
- if (++sw_read_idx == alx->tx_ringsz)
+ if (++sw_read_idx == txq->count)
sw_read_idx = 0;
}
txq->read_idx = sw_read_idx;
- netdev_completed_queue(alx->dev, total_packets, total_bytes);
+ netdev_tx_completed_queue(tx_queue, total_packets, total_bytes);
}
- if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
- alx_tpd_avail(alx) > alx->tx_ringsz/4)
- netif_wake_queue(alx->dev);
+ if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
+ alx_tpd_avail(txq) > txq->count / 4)
+ netif_tx_wake_queue(tx_queue);
return sw_read_idx == hw_read_idx;
}
@@ -200,15 +214,17 @@ static void alx_schedule_reset(struct alx_priv *alx)
schedule_work(&alx->reset_wk);
}
-static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
{
- struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_priv *alx;
struct alx_rrd *rrd;
struct alx_buffer *rxb;
struct sk_buff *skb;
u16 length, rfd_cleaned = 0;
int work = 0;
+ alx = netdev_priv(rxq->netdev);
+
while (work < budget) {
rrd = &rxq->rrd[rxq->rrd_read_idx];
if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
@@ -224,7 +240,7 @@ static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
}
rxb = &rxq->bufs[rxq->read_idx];
- dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_single(rxq->dev,
dma_unmap_addr(rxb, dma),
dma_unmap_len(rxb, size),
DMA_FROM_DEVICE);
@@ -242,7 +258,7 @@ static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
RRD_PKTLEN) - ETH_FCS_LEN;
skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, alx->dev);
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
skb_checksum_none_assert(skb);
if (alx->dev->features & NETIF_F_RXCSUM &&
@@ -259,13 +275,13 @@ static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
}
}
- napi_gro_receive(&alx->napi, skb);
+ napi_gro_receive(&rxq->np->napi, skb);
work++;
next_pkt:
- if (++rxq->read_idx == alx->rx_ringsz)
+ if (++rxq->read_idx == rxq->count)
rxq->read_idx = 0;
- if (++rxq->rrd_read_idx == alx->rx_ringsz)
+ if (++rxq->rrd_read_idx == rxq->count)
rxq->rrd_read_idx = 0;
if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
@@ -280,23 +296,26 @@ next_pkt:
static int alx_poll(struct napi_struct *napi, int budget)
{
- struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+ struct alx_napi *np = container_of(napi, struct alx_napi, napi);
+ struct alx_priv *alx = np->alx;
struct alx_hw *hw = &alx->hw;
unsigned long flags;
- bool tx_complete;
- int work;
+ bool tx_complete = true;
+ int work = 0;
- tx_complete = alx_clean_tx_irq(alx);
- work = alx_clean_rx_irq(alx, budget);
+ if (np->txq)
+ tx_complete = alx_clean_tx_irq(np->txq);
+ if (np->rxq)
+ work = alx_clean_rx_irq(np->rxq, budget);
if (!tx_complete || work == budget)
return budget;
- napi_complete(&alx->napi);
+ napi_complete(&np->napi);
/* enable interrupt */
if (alx->flags & ALX_FLAG_USING_MSIX) {
- alx_mask_msix(hw, 1, false);
+ alx_mask_msix(hw, np->vec_idx, false);
} else {
spin_lock_irqsave(&alx->irq_lock, flags);
alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
@@ -350,7 +369,7 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
goto out;
if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
- napi_schedule(&alx->napi);
+ napi_schedule(&alx->qnapi[0]->napi);
/* mask rx/tx interrupt, enable them when napi complete */
alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
@@ -365,15 +384,15 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
static irqreturn_t alx_intr_msix_ring(int irq, void *data)
{
- struct alx_priv *alx = data;
- struct alx_hw *hw = &alx->hw;
+ struct alx_napi *np = data;
+ struct alx_hw *hw = &np->alx->hw;
/* mask interrupt to ACK chip */
- alx_mask_msix(hw, 1, true);
+ alx_mask_msix(hw, np->vec_idx, true);
/* clear interrupt status */
- alx_write_mem32(hw, ALX_ISR, (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0));
+ alx_write_mem32(hw, ALX_ISR, np->vec_mask);
- napi_schedule(&alx->napi);
+ napi_schedule(&np->napi);
return IRQ_HANDLED;
}
@@ -424,63 +443,79 @@ static irqreturn_t alx_intr_legacy(int irq, void *data)
return alx_intr_handle(alx, intr);
}
+static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO,
+ ALX_TPD_PRI1_ADDR_LO,
+ ALX_TPD_PRI2_ADDR_LO,
+ ALX_TPD_PRI3_ADDR_LO};
+
static void alx_init_ring_ptrs(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
+ struct alx_napi *np;
+ int i;
+
+ for (i = 0; i < alx->num_napi; i++) {
+ np = alx->qnapi[i];
+ if (np->txq) {
+ np->txq->read_idx = 0;
+ np->txq->write_idx = 0;
+ alx_write_mem32(hw,
+ txring_header_reg[np->txq->queue_idx],
+ np->txq->tpd_dma);
+ }
+
+ if (np->rxq) {
+ np->rxq->read_idx = 0;
+ np->rxq->write_idx = 0;
+ np->rxq->rrd_read_idx = 0;
+ alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
+ alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
+ }
+ }
+
+ alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
+ alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
- alx->rxq.read_idx = 0;
- alx->rxq.write_idx = 0;
- alx->rxq.rrd_read_idx = 0;
alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
- alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
- alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
- alx->txq.read_idx = 0;
- alx->txq.write_idx = 0;
- alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
- alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
- alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
-
/* load these pointers into the chip */
alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
}
-static void alx_free_txring_buf(struct alx_priv *alx)
+static void alx_free_txring_buf(struct alx_tx_queue *txq)
{
- struct alx_tx_queue *txq = &alx->txq;
int i;
if (!txq->bufs)
return;
- for (i = 0; i < alx->tx_ringsz; i++)
- alx_free_txbuf(alx, i);
+ for (i = 0; i < txq->count; i++)
+ alx_free_txbuf(txq, i);
- memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
- memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
+ memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
+ memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
txq->write_idx = 0;
txq->read_idx = 0;
- netdev_reset_queue(alx->dev);
+ netdev_tx_reset_queue(alx_get_tx_queue(txq));
}
-static void alx_free_rxring_buf(struct alx_priv *alx)
+static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
{
- struct alx_rx_queue *rxq = &alx->rxq;
struct alx_buffer *cur_buf;
u16 i;
- if (rxq == NULL)
+ if (!rxq->bufs)
return;
- for (i = 0; i < alx->rx_ringsz; i++) {
+ for (i = 0; i < rxq->count; i++) {
cur_buf = rxq->bufs + i;
if (cur_buf->skb) {
- dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_single(rxq->dev,
dma_unmap_addr(cur_buf, dma),
dma_unmap_len(cur_buf, size),
DMA_FROM_DEVICE);
@@ -498,8 +533,14 @@ static void alx_free_rxring_buf(struct alx_priv *alx)
static void alx_free_buffers(struct alx_priv *alx)
{
- alx_free_txring_buf(alx);
- alx_free_rxring_buf(alx);
+ int i;
+
+ for (i = 0; i < alx->num_txq; i++)
+ if (alx->qnapi[i] && alx->qnapi[i]->txq)
+ alx_free_txring_buf(alx->qnapi[i]->txq);
+
+ if (alx->qnapi[0] && alx->qnapi[0]->rxq)
+ alx_free_rxring_buf(alx->qnapi[0]->rxq);
}
static int alx_reinit_rings(struct alx_priv *alx)
@@ -573,19 +614,41 @@ static int alx_set_mac_address(struct net_device *netdev, void *data)
return 0;
}
-static int alx_alloc_descriptors(struct alx_priv *alx)
+static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
+ int offset)
{
- alx->txq.bufs = kcalloc(alx->tx_ringsz,
- sizeof(struct alx_buffer),
- GFP_KERNEL);
- if (!alx->txq.bufs)
+ txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
+ if (!txq->bufs)
return -ENOMEM;
- alx->rxq.bufs = kcalloc(alx->rx_ringsz,
- sizeof(struct alx_buffer),
- GFP_KERNEL);
- if (!alx->rxq.bufs)
- goto out_free;
+ txq->tpd = alx->descmem.virt + offset;
+ txq->tpd_dma = alx->descmem.dma + offset;
+ offset += sizeof(struct alx_txd) * txq->count;
+
+ return offset;
+}
+
+static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
+ int offset)
+{
+ rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
+ if (!rxq->bufs)
+ return -ENOMEM;
+
+ rxq->rrd = alx->descmem.virt + offset;
+ rxq->rrd_dma = alx->descmem.dma + offset;
+ offset += sizeof(struct alx_rrd) * rxq->count;
+
+ rxq->rfd = alx->descmem.virt + offset;
+ rxq->rfd_dma = alx->descmem.dma + offset;
+ offset += sizeof(struct alx_rfd) * rxq->count;
+
+ return offset;
+}
+
+static int alx_alloc_rings(struct alx_priv *alx)
+{
+ int i, offset = 0;
/* physical tx/rx ring descriptors
*
@@ -593,7 +656,8 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
* 4G boundary (hardware has a single register for high 32 bits
* of addresses only)
*/
- alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
+ alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
+ alx->num_txq +
sizeof(struct alx_rrd) * alx->rx_ringsz +
sizeof(struct alx_rfd) * alx->rx_ringsz;
alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
@@ -601,87 +665,178 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
&alx->descmem.dma,
GFP_KERNEL);
if (!alx->descmem.virt)
- goto out_free;
-
- alx->txq.tpd = alx->descmem.virt;
- alx->txq.tpd_dma = alx->descmem.dma;
+ return -ENOMEM;
- /* alignment requirement for next block */
+ /* alignment requirements */
BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
+ BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
- alx->rxq.rrd =
- (void *)((u8 *)alx->descmem.virt +
- sizeof(struct alx_txd) * alx->tx_ringsz);
- alx->rxq.rrd_dma = alx->descmem.dma +
- sizeof(struct alx_txd) * alx->tx_ringsz;
+ for (i = 0; i < alx->num_txq; i++) {
+ offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
+ if (offset < 0) {
+ netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
+ return -ENOMEM;
+ }
+ }
- /* alignment requirement for next block */
- BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
+ offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
+ if (offset < 0) {
+ netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
+ return -ENOMEM;
+ }
- alx->rxq.rfd =
- (void *)((u8 *)alx->descmem.virt +
- sizeof(struct alx_txd) * alx->tx_ringsz +
- sizeof(struct alx_rrd) * alx->rx_ringsz);
- alx->rxq.rfd_dma = alx->descmem.dma +
- sizeof(struct alx_txd) * alx->tx_ringsz +
- sizeof(struct alx_rrd) * alx->rx_ringsz;
+ alx_reinit_rings(alx);
return 0;
-out_free:
- kfree(alx->txq.bufs);
- kfree(alx->rxq.bufs);
- return -ENOMEM;
}
-static int alx_alloc_rings(struct alx_priv *alx)
+static void alx_free_rings(struct alx_priv *alx)
{
- int err;
+ int i;
- err = alx_alloc_descriptors(alx);
- if (err)
- return err;
+ alx_free_buffers(alx);
- alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
- alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ for (i = 0; i < alx->num_txq; i++)
+ if (alx->qnapi[i] && alx->qnapi[i]->txq)
+ kfree(alx->qnapi[i]->txq->bufs);
- netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
+ if (alx->qnapi[0] && alx->qnapi[0]->rxq)
+ kfree(alx->qnapi[0]->rxq->bufs);
- alx_reinit_rings(alx);
- return 0;
+ if (!alx->descmem.virt)
+ dma_free_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ alx->descmem.virt,
+ alx->descmem.dma);
}
-static void alx_free_rings(struct alx_priv *alx)
+static void alx_free_napis(struct alx_priv *alx)
{
- netif_napi_del(&alx->napi);
- alx_free_buffers(alx);
+ struct alx_napi *np;
+ int i;
- kfree(alx->txq.bufs);
- kfree(alx->rxq.bufs);
+ for (i = 0; i < alx->num_napi; i++) {
+ np = alx->qnapi[i];
+ if (!np)
+ continue;
- dma_free_coherent(&alx->hw.pdev->dev,
- alx->descmem.size,
- alx->descmem.virt,
- alx->descmem.dma);
+ netif_napi_del(&np->napi);
+ kfree(np->txq);
+ kfree(np->rxq);
+ kfree(np);
+ alx->qnapi[i] = NULL;
+ }
}
+static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX,
+ ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX};
+static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX,
+ ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX};
+static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1,
+ ALX_ISR_TX_Q2, ALX_ISR_TX_Q3};
+static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1,
+ ALX_ISR_RX_Q2, ALX_ISR_RX_Q3,
+ ALX_ISR_RX_Q4, ALX_ISR_RX_Q5,
+ ALX_ISR_RX_Q6, ALX_ISR_RX_Q7};
+
+static int alx_alloc_napis(struct alx_priv *alx)
+{
+ struct alx_napi *np;
+ struct alx_rx_queue *rxq;
+ struct alx_tx_queue *txq;
+ int i;
+
+ alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+
+ /* allocate alx_napi structures */
+ for (i = 0; i < alx->num_napi; i++) {
+ np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
+ if (!np)
+ goto err_out;
+
+ np->alx = alx;
+ netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
+ alx->qnapi[i] = np;
+ }
+
+ /* allocate tx queues */
+ for (i = 0; i < alx->num_txq; i++) {
+ np = alx->qnapi[i];
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ goto err_out;
+
+ np->txq = txq;
+ txq->p_reg = tx_pidx_reg[i];
+ txq->c_reg = tx_cidx_reg[i];
+ txq->queue_idx = i;
+ txq->count = alx->tx_ringsz;
+ txq->netdev = alx->dev;
+ txq->dev = &alx->hw.pdev->dev;
+ np->vec_mask |= tx_vect_mask[i];
+ alx->int_mask |= tx_vect_mask[i];
+ }
+
+ /* allocate rx queues */
+ np = alx->qnapi[0];
+ rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
+ if (!rxq)
+ goto err_out;
+
+ np->rxq = rxq;
+ rxq->np = alx->qnapi[0];
+ rxq->queue_idx = 0;
+ rxq->count = alx->rx_ringsz;
+ rxq->netdev = alx->dev;
+ rxq->dev = &alx->hw.pdev->dev;
+ np->vec_mask |= rx_vect_mask[0];
+ alx->int_mask |= rx_vect_mask[0];
+
+ return 0;
+
+err_out:
+ netdev_err(alx->dev, "error allocating internal structures\n");
+ alx_free_napis(alx);
+ return -ENOMEM;
+}
+
+static const int txq_vec_mapping_shift[] = {
+ 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT,
+ 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT,
+ 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT,
+ 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT,
+};
+
static void alx_config_vector_mapping(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
- u32 tbl = 0;
+ u32 tbl[2] = {0, 0};
+ int i, vector, idx, shift;
if (alx->flags & ALX_FLAG_USING_MSIX) {
- tbl |= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT;
- tbl |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
+ /* tx mappings */
+ for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
+ idx = txq_vec_mapping_shift[i * 2];
+ shift = txq_vec_mapping_shift[i * 2 + 1];
+ tbl[idx] |= vector << shift;
+ }
+
+ /* rx mapping */
+ tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
}
- alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl);
- alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]);
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]);
alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
}
static bool alx_enable_msix(struct alx_priv *alx)
{
- int i, err, num_vec = 2;
+ int i, err, num_vec, num_txq, num_rxq;
+
+ num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
+ num_rxq = 1;
+ num_vec = max_t(int, num_txq, num_rxq) + 1;
alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -701,6 +856,10 @@ static bool alx_enable_msix(struct alx_priv *alx)
}
alx->num_vec = num_vec;
+ alx->num_napi = num_vec - 1;
+ alx->num_txq = num_txq;
+ alx->num_rxq = num_rxq;
+
return true;
}
@@ -714,14 +873,29 @@ static int alx_request_msix(struct alx_priv *alx)
if (err)
goto out_err;
- vector++;
- sprintf(alx->irq_lbl, "%s-TxRx-0", netdev->name);
-
- err = request_irq(alx->msix_entries[vector].vector,
- alx_intr_msix_ring, 0, alx->irq_lbl, alx);
+ for (i = 0; i < alx->num_napi; i++) {
+ struct alx_napi *np = alx->qnapi[i];
+
+ vector++;
+
+ if (np->txq && np->rxq)
+ sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name,
+ np->txq->queue_idx);
+ else if (np->txq)
+ sprintf(np->irq_lbl, "%s-tx-%u", netdev->name,
+ np->txq->queue_idx);
+ else if (np->rxq)
+ sprintf(np->irq_lbl, "%s-rx-%u", netdev->name,
+ np->rxq->queue_idx);
+ else
+ sprintf(np->irq_lbl, "%s-unused", netdev->name);
+
+ np->vec_idx = vector;
+ err = request_irq(alx->msix_entries[vector].vector,
+ alx_intr_msix_ring, 0, np->irq_lbl, np);
if (err)
goto out_free;
-
+ }
return 0;
out_free:
@@ -729,7 +903,8 @@ out_free:
vector--;
for (i = 0; i < vector; i++)
- free_irq(alx->msix_entries[free_vector++].vector, alx);
+ free_irq(alx->msix_entries[free_vector++].vector,
+ alx->qnapi[i]);
out_err:
return err;
@@ -744,6 +919,9 @@ static void alx_init_intr(struct alx_priv *alx, bool msix)
if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
alx->num_vec = 1;
+ alx->num_napi = 1;
+ alx->num_txq = 1;
+ alx->num_rxq = 1;
if (!pci_enable_msi(alx->hw.pdev))
alx->flags |= ALX_FLAG_USING_MSI;
@@ -799,6 +977,25 @@ static void alx_irq_disable(struct alx_priv *alx)
}
}
+static int alx_realloc_resources(struct alx_priv *alx)
+{
+ int err;
+
+ alx_free_rings(alx);
+ alx_free_napis(alx);
+ alx_disable_advanced_intr(alx);
+
+ err = alx_alloc_napis(alx);
+ if (err)
+ return err;
+
+ err = alx_alloc_rings(alx);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int alx_request_irq(struct alx_priv *alx)
{
struct pci_dev *pdev = alx->hw.pdev;
@@ -815,8 +1012,9 @@ static int alx_request_irq(struct alx_priv *alx)
goto out;
/* msix request failed, realloc resources */
- alx_disable_advanced_intr(alx);
- alx_init_intr(alx, false);
+ err = alx_realloc_resources(alx);
+ if (err)
+ goto out;
}
if (alx->flags & ALX_FLAG_USING_MSI) {
@@ -845,12 +1043,13 @@ out:
static void alx_free_irq(struct alx_priv *alx)
{
struct pci_dev *pdev = alx->hw.pdev;
- int i;
+ int i, vector = 0;
if (alx->flags & ALX_FLAG_USING_MSIX) {
- /* we have only 2 vectors without multi queue support */
- for (i = 0; i < 2; i++)
- free_irq(alx->msix_entries[i].vector, alx);
+ free_irq(alx->msix_entries[vector++].vector, alx);
+ for (i = 0; i < alx->num_napi; i++)
+ free_irq(alx->msix_entries[vector++].vector,
+ alx->qnapi[i]);
} else {
free_irq(pdev->irq, alx);
}
@@ -892,6 +1091,9 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
+ /* MTU range: 34 - 9256 */
+ alx->dev->min_mtu = 34;
+ alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@@ -932,11 +1134,14 @@ static netdev_features_t alx_fix_features(struct net_device *netdev,
static void alx_netif_stop(struct alx_priv *alx)
{
+ int i;
+
netif_trans_update(alx->dev);
if (netif_carrier_ok(alx->dev)) {
netif_carrier_off(alx->dev);
netif_tx_disable(alx->dev);
- napi_disable(&alx->napi);
+ for (i = 0; i < alx->num_napi; i++)
+ napi_disable(&alx->qnapi[i]->napi);
}
}
@@ -994,13 +1199,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- if ((max_frame < ALX_MIN_FRAME_SIZE) ||
- (max_frame > ALX_MAX_FRAME_SIZE))
- return -EINVAL;
-
- if (netdev->mtu == mtu)
- return 0;
-
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
@@ -1012,8 +1210,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
static void alx_netif_start(struct alx_priv *alx)
{
+ int i;
+
netif_tx_wake_all_queues(alx->dev);
- napi_enable(&alx->napi);
+ for (i = 0; i < alx->num_napi; i++)
+ napi_enable(&alx->qnapi[i]->napi);
netif_carrier_on(alx->dev);
}
@@ -1021,21 +1222,28 @@ static int __alx_open(struct alx_priv *alx, bool resume)
{
int err;
- alx_init_intr(alx, msix);
+ alx_init_intr(alx, true);
if (!resume)
netif_carrier_off(alx->dev);
- err = alx_alloc_rings(alx);
+ err = alx_alloc_napis(alx);
if (err)
goto out_disable_adv_intr;
+ err = alx_alloc_rings(alx);
+ if (err)
+ goto out_free_rings;
+
alx_configure(alx);
err = alx_request_irq(alx);
if (err)
goto out_free_rings;
+ netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
+ netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
+
/* clear old interrupts */
alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
@@ -1049,6 +1257,7 @@ static int __alx_open(struct alx_priv *alx, bool resume)
out_free_rings:
alx_free_rings(alx);
+ alx_free_napis(alx);
out_disable_adv_intr:
alx_disable_advanced_intr(alx);
return err;
@@ -1059,6 +1268,7 @@ static void __alx_stop(struct alx_priv *alx)
alx_halt(alx);
alx_free_irq(alx);
alx_free_rings(alx);
+ alx_free_napis(alx);
}
static const char *alx_speed_desc(struct alx_hw *hw)
@@ -1241,9 +1451,8 @@ static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
return 1;
}
-static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
+static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
{
- struct alx_tx_queue *txq = &alx->txq;
struct alx_txd *tpd, *first_tpd;
dma_addr_t dma;
int maplen, f, first_idx = txq->write_idx;
@@ -1252,7 +1461,7 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
tpd = first_tpd;
if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
- if (++txq->write_idx == alx->tx_ringsz)
+ if (++txq->write_idx == txq->count)
txq->write_idx = 0;
tpd = &txq->tpd[txq->write_idx];
@@ -1262,9 +1471,9 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
}
maplen = skb_headlen(skb);
- dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
+ dma = dma_map_single(txq->dev, skb->data, maplen,
DMA_TO_DEVICE);
- if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ if (dma_mapping_error(txq->dev, dma))
goto err_dma;
dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
@@ -1278,16 +1487,16 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
frag = &skb_shinfo(skb)->frags[f];
- if (++txq->write_idx == alx->tx_ringsz)
+ if (++txq->write_idx == txq->count)
txq->write_idx = 0;
tpd = &txq->tpd[txq->write_idx];
tpd->word1 = first_tpd->word1;
maplen = skb_frag_size(frag);
- dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
+ dma = skb_frag_dma_map(txq->dev, frag, 0,
maplen, DMA_TO_DEVICE);
- if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ if (dma_mapping_error(txq->dev, dma))
goto err_dma;
dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
@@ -1300,7 +1509,7 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
txq->bufs[txq->write_idx].skb = skb;
- if (++txq->write_idx == alx->tx_ringsz)
+ if (++txq->write_idx == txq->count)
txq->write_idx = 0;
return 0;
@@ -1308,23 +1517,24 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
err_dma:
f = first_idx;
while (f != txq->write_idx) {
- alx_free_txbuf(alx, f);
- if (++f == alx->tx_ringsz)
+ alx_free_txbuf(txq, f);
+ if (++f == txq->count)
f = 0;
}
return -ENOMEM;
}
-static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
+ struct alx_tx_queue *txq)
{
- struct alx_priv *alx = netdev_priv(netdev);
- struct alx_tx_queue *txq = &alx->txq;
+ struct alx_priv *alx;
struct alx_txd *first;
int tso;
- if (alx_tpd_avail(alx) < alx_tpd_req(skb)) {
- netif_stop_queue(alx->dev);
+ alx = netdev_priv(txq->netdev);
+
+ if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
+ netif_tx_stop_queue(alx_get_tx_queue(txq));
goto drop;
}
@@ -1337,17 +1547,17 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
else if (!tso && alx_tx_csum(skb, first))
goto drop;
- if (alx_map_tx_skb(alx, skb) < 0)
+ if (alx_map_tx_skb(txq, skb) < 0)
goto drop;
- netdev_sent_queue(alx->dev, skb->len);
+ netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
/* flush updates before updating hardware */
wmb();
- alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
+ alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
- if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
- netif_stop_queue(alx->dev);
+ if (alx_tpd_avail(txq) < txq->count / 8)
+ netif_tx_stop_queue(alx_get_tx_queue(txq));
return NETDEV_TX_OK;
@@ -1356,6 +1566,13 @@ drop:
return NETDEV_TX_OK;
}
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
+}
+
static void alx_tx_timeout(struct net_device *dev)
{
struct alx_priv *alx = netdev_priv(dev);
@@ -1413,10 +1630,12 @@ static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static void alx_poll_controller(struct net_device *netdev)
{
struct alx_priv *alx = netdev_priv(netdev);
+ int i;
if (alx->flags & ALX_FLAG_USING_MSIX) {
alx_intr_msix_misc(0, alx);
- alx_intr_msix_ring(0, alx);
+ for (i = 0; i < alx->num_txq; i++)
+ alx_intr_msix_ring(0, alx->qnapi[i]);
} else if (alx->flags & ALX_FLAG_USING_MSI)
alx_intr_msi(0, alx);
else
@@ -1533,7 +1752,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_pci_release;
}
- netdev = alloc_etherdev(sizeof(*alx));
+ netdev = alloc_etherdev_mqs(sizeof(*alx),
+ ALX_MAX_TX_QUEUES, 1);
if (!netdev) {
err = -ENOMEM;
goto out_pci_release;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 872b7abb0196..cfe86a20c899 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -26,46 +26,52 @@
#include "atl1c.h"
-static int atl1c_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl1c_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
+ u32 supported, advertising;
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M)
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_TP;
+ advertising = ADVERTISED_TP;
- ecmd->advertising |= hw->autoneg_advertised;
+ advertising |= hw->autoneg_advertised;
- ecmd->port = PORT_TP;
- ecmd->phy_address = 0;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
if (adapter->link_speed != SPEED_0) {
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ cmd->base.speed = adapter->link_speed;
if (adapter->link_duplex == FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int atl1c_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl1c_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
@@ -74,12 +80,12 @@ static int atl1c_set_settings(struct net_device *netdev,
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
autoneg_advertised = ADVERTISED_Autoneg;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
if (speed == SPEED_1000) {
- if (ecmd->duplex != DUPLEX_FULL) {
+ if (cmd->base.duplex != DUPLEX_FULL) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev,
"1000M half is invalid\n");
@@ -88,12 +94,12 @@ static int atl1c_set_settings(struct net_device *netdev,
}
autoneg_advertised = ADVERTISED_1000baseT_Full;
} else if (speed == SPEED_100) {
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
autoneg_advertised = ADVERTISED_100baseT_Full;
else
autoneg_advertised = ADVERTISED_100baseT_Half;
} else {
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
autoneg_advertised = ADVERTISED_10baseT_Full;
else
autoneg_advertised = ADVERTISED_10baseT_Half;
@@ -284,8 +290,6 @@ static int atl1c_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops atl1c_ethtool_ops = {
- .get_settings = atl1c_get_settings,
- .set_settings = atl1c_set_settings,
.get_drvinfo = atl1c_get_drvinfo,
.get_regs_len = atl1c_get_regs_len,
.get_regs = atl1c_get_regs,
@@ -297,6 +301,8 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_eeprom_len = atl1c_get_eeprom_len,
.get_eeprom = atl1c_get_eeprom,
+ .get_link_ksettings = atl1c_get_link_ksettings,
+ .set_link_ksettings = atl1c_set_link_ksettings,
};
void atl1c_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a3200ea6d765..773d3b7d8dd5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -519,6 +519,26 @@ static int atl1c_set_features(struct net_device *netdev,
return 0;
}
+static void atl1c_set_max_mtu(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+
+ switch (hw->nic_type) {
+ /* These (GbE) devices support jumbo packets, max_mtu 6122 */
+ case athr_l1c:
+ case athr_l1d:
+ case athr_l1d_2:
+ netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ break;
+ /* The 10/100 devices don't support jumbo packets, max_mtu 1500 */
+ default:
+ netdev->max_mtu = ETH_DATA_LEN;
+ break;
+ }
+}
+
/**
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
@@ -529,22 +549,9 @@ static int atl1c_set_features(struct net_device *netdev,
static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- struct atl1c_hw *hw = &adapter->hw;
- int old_mtu = netdev->mtu;
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- /* Fast Ethernet controller doesn't support jumbo packet */
- if (((hw->nic_type == athr_l2c ||
- hw->nic_type == athr_l2c_b ||
- hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) ||
- max_frame < ETH_ZLEN + ETH_FCS_LEN ||
- max_frame > MAX_JUMBO_FRAME_SIZE) {
- if (netif_msg_link(adapter))
- dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
- return -EINVAL;
- }
+
/* set MTU */
- if (old_mtu != new_mtu && netif_running(netdev)) {
+ if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
netdev->mtu = new_mtu;
@@ -2511,6 +2518,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->netdev_ops = &atl1c_netdev_ops;
netdev->watchdog_timeo = AT_TX_WATCHDOG;
+ netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
atl1c_set_ethtool_ops(netdev);
/* TODO: add when ready */
@@ -2613,6 +2621,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "net device private data init failed\n");
goto err_sw_init;
}
+ /* set max MTU */
+ atl1c_set_max_mtu(netdev);
+
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
/* Init GPHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 8e3dbd4d9f79..cb489e7e8374 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -26,73 +26,83 @@
#include "atl1e.h"
-static int atl1e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl1e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
+ u32 supported, advertising;
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
if (hw->nic_type == athr_l1e)
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_TP;
+ advertising = ADVERTISED_TP;
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->advertising |= hw->autoneg_advertised;
+ advertising |= ADVERTISED_Autoneg;
+ advertising |= hw->autoneg_advertised;
- ecmd->port = PORT_TP;
- ecmd->phy_address = 0;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
if (adapter->link_speed != SPEED_0) {
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ cmd->base.speed = adapter->link_speed;
if (adapter->link_duplex == FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int atl1e_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl1e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u16 adv4, adv9;
- if ((ecmd->advertising&ADVERTISE_1000_FULL)) {
+ if (advertising & ADVERTISE_1000_FULL) {
if (hw->nic_type == athr_l1e) {
hw->autoneg_advertised =
- ecmd->advertising & AT_ADV_MASK;
+ advertising & AT_ADV_MASK;
} else {
clear_bit(__AT_RESETTING, &adapter->flags);
return -EINVAL;
}
- } else if (ecmd->advertising&ADVERTISE_1000_HALF) {
+ } else if (advertising & ADVERTISE_1000_HALF) {
clear_bit(__AT_RESETTING, &adapter->flags);
return -EINVAL;
} else {
hw->autoneg_advertised =
- ecmd->advertising & AT_ADV_MASK;
+ advertising & AT_ADV_MASK;
}
- ecmd->advertising = hw->autoneg_advertised |
+ advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
@@ -367,8 +377,6 @@ static int atl1e_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops atl1e_ethtool_ops = {
- .get_settings = atl1e_get_settings,
- .set_settings = atl1e_set_settings,
.get_drvinfo = atl1e_get_drvinfo,
.get_regs_len = atl1e_get_regs_len,
.get_regs = atl1e_get_regs,
@@ -380,6 +388,8 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
.get_eeprom_len = atl1e_get_eeprom_len,
.get_eeprom = atl1e_get_eeprom,
.set_eeprom = atl1e_set_eeprom,
+ .get_link_ksettings = atl1e_get_link_ksettings,
+ .set_link_ksettings = atl1e_set_link_ksettings,
};
void atl1e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 974713b19ab6..e96091b652a7 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -439,16 +439,10 @@ static int atl1e_set_features(struct net_device *netdev,
static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- int old_mtu = netdev->mtu;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
- (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- netdev_warn(adapter->netdev, "invalid MTU setting\n");
- return -EINVAL;
- }
/* set MTU */
- if (old_mtu != new_mtu && netif_running(netdev)) {
+ if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
netdev->mtu = new_mtu;
@@ -2272,6 +2266,10 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->netdev_ops = &atl1e_netdev_ops;
netdev->watchdog_timeo = AT_TX_WATCHDOG;
+ /* MTU range: 42 - 8170 */
+ netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
+ netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
atl1e_set_ethtool_ops(netdev);
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 529bca718334..7dad8e4b9d2a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2701,23 +2701,15 @@ static void atl1_reset_dev_task(struct work_struct *work)
static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- int old_mtu = netdev->mtu;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
- (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- if (netif_msg_link(adapter))
- dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
- return -EINVAL;
- }
-
adapter->hw.max_frame_size = max_frame;
adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
adapter->rx_buffer_len = (max_frame + 7) & ~7;
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
netdev->mtu = new_mtu;
- if ((old_mtu != new_mtu) && netif_running(netdev)) {
+ if (netif_running(netdev)) {
atl1_down(adapter);
atl1_up(adapter);
}
@@ -3031,6 +3023,11 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* is this valid? see atl1_setup_mac_ctrl() */
netdev->features |= NETIF_F_RXCSUM;
+ /* MTU range: 42 - 10218 */
+ netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
+ netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
/*
* patch for some L1 of old version,
* the final version of L1 may not need these
@@ -3217,66 +3214,72 @@ static int atl1_get_sset_count(struct net_device *netdev, int sset)
}
}
-static int atl1_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl1_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
+ u32 supported, advertising;
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ advertising = ADVERTISED_TP;
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->advertising |=
+ advertising |= ADVERTISED_Autoneg;
+ advertising |=
(ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Full);
} else
- ecmd->advertising |= (ADVERTISED_1000baseT_Full);
+ advertising |= (ADVERTISED_1000baseT_Full);
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = 0;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
if (netif_carrier_ok(adapter->netdev)) {
u16 link_speed, link_duplex;
atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
- ethtool_cmd_speed_set(ecmd, link_speed);
+ cmd->base.speed = link_speed;
if (link_duplex == FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL)
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int atl1_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl1_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u16 phy_data;
int ret_val = 0;
u16 old_media_type = hw->media_type;
+ u32 advertising;
if (netif_running(adapter->netdev)) {
if (netif_msg_link(adapter))
@@ -3285,12 +3288,12 @@ static int atl1_set_settings(struct net_device *netdev,
atl1_down(adapter);
}
- if (ecmd->autoneg == AUTONEG_ENABLE)
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
if (speed == SPEED_1000) {
- if (ecmd->duplex != DUPLEX_FULL) {
+ if (cmd->base.duplex != DUPLEX_FULL) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev,
"1000M half is invalid\n");
@@ -3299,12 +3302,12 @@ static int atl1_set_settings(struct net_device *netdev,
}
hw->media_type = MEDIA_TYPE_1000M_FULL;
} else if (speed == SPEED_100) {
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
hw->media_type = MEDIA_TYPE_100M_FULL;
else
hw->media_type = MEDIA_TYPE_100M_HALF;
} else {
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
hw->media_type = MEDIA_TYPE_10M_FULL;
else
hw->media_type = MEDIA_TYPE_10M_HALF;
@@ -3312,7 +3315,7 @@ static int atl1_set_settings(struct net_device *netdev,
}
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
- ecmd->advertising =
+ advertising =
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -3321,12 +3324,12 @@ static int atl1_set_settings(struct net_device *netdev,
ADVERTISED_Autoneg | ADVERTISED_TP;
break;
case MEDIA_TYPE_1000M_FULL:
- ecmd->advertising =
+ advertising =
ADVERTISED_1000baseT_Full |
ADVERTISED_Autoneg | ADVERTISED_TP;
break;
default:
- ecmd->advertising = 0;
+ advertising = 0;
break;
}
if (atl1_phy_setup_autoneg_adv(hw)) {
@@ -3666,8 +3669,6 @@ static int atl1_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops atl1_ethtool_ops = {
- .get_settings = atl1_get_settings,
- .set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,
.get_wol = atl1_get_wol,
.set_wol = atl1_set_wol,
@@ -3684,6 +3685,8 @@ static const struct ethtool_ops atl1_ethtool_ops = {
.nway_reset = atl1_nway_reset,
.get_ethtool_stats = atl1_get_ethtool_stats,
.get_sset_count = atl1_get_sset_count,
+ .get_link_ksettings = atl1_get_link_ksettings,
+ .set_link_ksettings = atl1_set_link_ksettings,
};
module_pci_driver(atl1_driver);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 2ff465848b65..63f2deec2a52 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -253,7 +253,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
/* set MTU */
ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
- ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
/* 1590 */
ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
@@ -925,15 +925,11 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
- if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
- return -EINVAL;
-
/* set MTU */
- if (hw->max_frame_size != new_mtu) {
- netdev->mtu = new_mtu;
- ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
- VLAN_SIZE + ETHERNET_FCS_SIZE);
- }
+ netdev->mtu = new_mtu;
+ hw->max_frame_size = new_mtu;
+ ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
+ VLAN_HLEN + ETH_FCS_LEN);
return 0;
}
@@ -1398,6 +1394,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl2_netdev_ops;
netdev->ethtool_ops = &atl2_ethtool_ops;
netdev->watchdog_timeo = 5 * HZ;
+ netdev->min_mtu = 40;
+ netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
netdev->mem_start = mmio_start;
@@ -1739,81 +1737,87 @@ static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
pci_write_config_word(adapter->pdev, reg, *value);
}
-static int atl2_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl2_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
+ u32 supported, advertising;
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ advertising = ADVERTISED_TP;
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->advertising |= hw->autoneg_advertised;
+ advertising |= ADVERTISED_Autoneg;
+ advertising |= hw->autoneg_advertised;
- ecmd->port = PORT_TP;
- ecmd->phy_address = 0;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
if (adapter->link_speed != SPEED_0) {
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ cmd->base.speed = adapter->link_speed;
if (adapter->link_duplex == FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int atl2_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int atl2_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
msleep(1);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
#define MY_ADV_MASK (ADVERTISE_10_HALF | \
ADVERTISE_10_FULL | \
ADVERTISE_100_HALF| \
ADVERTISE_100_FULL)
- if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
+ if ((advertising & MY_ADV_MASK) == MY_ADV_MASK) {
hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
hw->autoneg_advertised = MY_ADV_MASK;
- } else if ((ecmd->advertising & MY_ADV_MASK) ==
- ADVERTISE_100_FULL) {
+ } else if ((advertising & MY_ADV_MASK) == ADVERTISE_100_FULL) {
hw->MediaType = MEDIA_TYPE_100M_FULL;
hw->autoneg_advertised = ADVERTISE_100_FULL;
- } else if ((ecmd->advertising & MY_ADV_MASK) ==
- ADVERTISE_100_HALF) {
+ } else if ((advertising & MY_ADV_MASK) == ADVERTISE_100_HALF) {
hw->MediaType = MEDIA_TYPE_100M_HALF;
hw->autoneg_advertised = ADVERTISE_100_HALF;
- } else if ((ecmd->advertising & MY_ADV_MASK) ==
- ADVERTISE_10_FULL) {
+ } else if ((advertising & MY_ADV_MASK) == ADVERTISE_10_FULL) {
hw->MediaType = MEDIA_TYPE_10M_FULL;
hw->autoneg_advertised = ADVERTISE_10_FULL;
- } else if ((ecmd->advertising & MY_ADV_MASK) ==
- ADVERTISE_10_HALF) {
+ } else if ((advertising & MY_ADV_MASK) == ADVERTISE_10_HALF) {
hw->MediaType = MEDIA_TYPE_10M_HALF;
hw->autoneg_advertised = ADVERTISE_10_HALF;
} else {
clear_bit(__ATL2_RESETTING, &adapter->flags);
return -EINVAL;
}
- ecmd->advertising = hw->autoneg_advertised |
+ advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
} else {
clear_bit(__ATL2_RESETTING, &adapter->flags);
@@ -2082,8 +2086,6 @@ static int atl2_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops atl2_ethtool_ops = {
- .get_settings = atl2_get_settings,
- .set_settings = atl2_set_settings,
.get_drvinfo = atl2_get_drvinfo,
.get_regs_len = atl2_get_regs_len,
.get_regs = atl2_get_regs,
@@ -2096,6 +2098,8 @@ static const struct ethtool_ops atl2_ethtool_ops = {
.get_eeprom_len = atl2_get_eeprom_len,
.get_eeprom = atl2_get_eeprom,
.set_eeprom = atl2_set_eeprom,
+ .get_link_ksettings = atl2_get_link_ksettings,
+ .set_link_ksettings = atl2_set_link_ksettings,
};
#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index 2f27d4c4c3ad..c64a6bdfa7ae 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -228,12 +228,9 @@ static void atl2_force_ps(struct atl2_hw *hw);
#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */
/* The size (in bytes) of a ethernet packet */
-#define ENET_HEADER_SIZE 14
#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
-#define ETHERNET_FCS_SIZE 4
#define MAX_JUMBO_FRAME_SIZE 0x2000
-#define VLAN_SIZE 4
struct tx_pkt_header {
unsigned pkt_size:11;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index e078d8da978c..5711fbbd6ae3 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -975,8 +975,10 @@ static int nb8800_open(struct net_device *dev)
phydev = of_phy_connect(dev, priv->phy_node,
nb8800_link_reconfigure, 0,
priv->phy_mode);
- if (!phydev)
+ if (!phydev) {
+ err = -ENODEV;
goto err_free_irq;
+ }
nb8800_pause_adv(dev);
@@ -1032,20 +1034,9 @@ static const struct net_device_ops nb8800_netdev_ops = {
.ndo_set_mac_address = nb8800_set_mac_address,
.ndo_set_rx_mode = nb8800_set_rx_mode,
.ndo_do_ioctl = nb8800_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
-static int nb8800_nway_reset(struct net_device *dev)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return -ENODEV;
-
- return genphy_restart_aneg(phydev);
-}
-
static void nb8800_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pp)
{
@@ -1164,7 +1155,7 @@ static void nb8800_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops nb8800_ethtool_ops = {
- .nway_reset = nb8800_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_pauseparam = nb8800_get_pauseparam,
.set_pauseparam = nb8800_set_pauseparam,
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index bd8c80c0b71c..940fb24bba21 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -110,7 +110,7 @@ config TIGON3
depends on PCI
select PHYLIB
select HWMON
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -120,7 +120,7 @@ config TIGON3
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select FW_LOADER
select ZLIB_INFLATE
select LIBCRC32C
@@ -203,4 +203,14 @@ config BNXT_SRIOV
Virtualization support in the NetXtreme-C/E products. This
allows for virtual function acceleration in virtual environments.
+config BNXT_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on BNXT && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 17aa33c5567d..1df3048a3cdb 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -59,8 +59,8 @@
#define B44_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */
-#define B44_MIN_MTU 60
-#define B44_MAX_MTU 1500
+#define B44_MIN_MTU ETH_ZLEN
+#define B44_MAX_MTU ETH_DATA_LEN
#define B44_RX_RING_SIZE 512
#define B44_DEF_RX_RING_PENDING 200
@@ -1064,9 +1064,6 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
{
struct b44 *bp = netdev_priv(dev);
- if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
- return -EINVAL;
-
if (!netif_running(dev)) {
/* We'll just catch it later when the
* device is up'd.
@@ -2377,6 +2374,8 @@ static int b44_init_one(struct ssb_device *sdev,
dev->netdev_ops = &b44_netdev_ops;
netif_napi_add(dev, &bp->napi, b44_poll, 64);
dev->watchdog_timeo = B44_TX_TIMEOUT;
+ dev->min_mtu = B44_MIN_MTU;
+ dev->max_mtu = B44_MAX_MTU;
dev->irq = sdev->irq;
dev->ethtool_ops = &b44_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 537090952c45..3b14d5144228 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1434,11 +1434,8 @@ static int bcm_enet_nway_reset(struct net_device *dev)
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
- if (priv->has_phy) {
- if (!dev->phydev)
- return -ENODEV;
- return genphy_restart_aneg(dev->phydev);
- }
+ if (priv->has_phy)
+ return phy_ethtool_nway_reset(dev);
return -EOPNOTSUPP;
}
@@ -1623,20 +1620,19 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
/*
- * calculate actual hardware mtu
+ * adjust mtu, can't be called while device is running
*/
-static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
+static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
{
- int actual_mtu;
+ struct bcm_enet_priv *priv = netdev_priv(dev);
+ int actual_mtu = new_mtu;
- actual_mtu = mtu;
+ if (netif_running(dev))
+ return -EBUSY;
/* add ethernet header + vlan tag size */
actual_mtu += VLAN_ETH_HLEN;
- if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
- return -EINVAL;
-
/*
* setup maximum size before we get overflow mark in
* descriptor, note that this will not prevent reception of
@@ -1651,22 +1647,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
*/
priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
priv->dma_maxburst * 4);
- return 0;
-}
-
-/*
- * adjust mtu, can't be called while device is running
- */
-static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
-{
- int ret;
-
- if (netif_running(dev))
- return -EBUSY;
- ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
- if (ret)
- return ret;
dev->mtu = new_mtu;
return 0;
}
@@ -1756,7 +1737,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->enet_is_sw = false;
priv->dma_maxburst = BCMENET_DMA_MAXBURST;
- ret = compute_hw_mtu(priv, dev->mtu);
+ ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
goto out;
@@ -1889,6 +1870,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enet_ethtool_ops;
+ /* MTU range: 46 - 2028 */
+ dev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
@@ -2743,7 +2727,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->dma_chan_width = pd->dma_chan_width;
}
- ret = compute_hw_mtu(priv, dev->mtu);
+ ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
goto out;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index c16ec3a51876..4a4ffc0c4c65 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -80,6 +80,24 @@ static void bcma_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, u32 mask,
bcma_maskset32(bgmac->bcma.cmn, offset, mask, set);
}
+static int bcma_phy_connect(struct bgmac *bgmac)
+{
+ struct phy_device *phy_dev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+
+ /* Connect to the PHY */
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ dev_err(bgmac->dev, "PHY connection failed\n");
+ return PTR_ERR(phy_dev);
+ }
+
+ return 0;
+}
+
static const struct bcma_device_id bgmac_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT,
BCMA_ANY_REV, BCMA_ANY_CLASS),
@@ -275,6 +293,10 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = bcma_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32;
+ if (bgmac->mii_bus)
+ bgmac->phy_connect = bcma_phy_connect;
+ else
+ bgmac->phy_connect = bgmac_phy_connect_direct;
err = bgmac_enet_probe(bgmac);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index be52f270c2c1..6f736c19872f 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -14,11 +14,21 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
#include <linux/of_address.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "bgmac.h"
+#define NICPM_IOMUX_CTRL 0x00000008
+
+#define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000
+#define NICPM_IOMUX_CTRL_SPD_SHIFT 10
+#define NICPM_IOMUX_CTRL_SPD_10M 0
+#define NICPM_IOMUX_CTRL_SPD_100M 1
+#define NICPM_IOMUX_CTRL_SPD_1000M 2
+
static u32 platform_bgmac_read(struct bgmac *bgmac, u16 offset)
{
return readl(bgmac->plat.base + offset);
@@ -86,6 +96,54 @@ static void platform_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset,
WARN_ON(1);
}
+static void bgmac_nicpm_speed_set(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ u32 val;
+
+ if (!bgmac->plat.nicpm_base)
+ return;
+
+ val = NICPM_IOMUX_CTRL_INIT_VAL;
+ switch (bgmac->net_dev->phydev->speed) {
+ default:
+ netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n");
+ case SPEED_1000:
+ val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT;
+ break;
+ case SPEED_100:
+ val |= NICPM_IOMUX_CTRL_SPD_100M << NICPM_IOMUX_CTRL_SPD_SHIFT;
+ break;
+ case SPEED_10:
+ val |= NICPM_IOMUX_CTRL_SPD_10M << NICPM_IOMUX_CTRL_SPD_SHIFT;
+ break;
+ }
+
+ writel(val, bgmac->plat.nicpm_base + NICPM_IOMUX_CTRL);
+
+ bgmac_adjust_link(bgmac->net_dev);
+}
+
+static int platform_phy_connect(struct bgmac *bgmac)
+{
+ struct phy_device *phy_dev;
+
+ if (bgmac->plat.nicpm_base)
+ phy_dev = of_phy_get_and_connect(bgmac->net_dev,
+ bgmac->dev->of_node,
+ bgmac_nicpm_speed_set);
+ else
+ phy_dev = of_phy_get_and_connect(bgmac->net_dev,
+ bgmac->dev->of_node,
+ bgmac_adjust_link);
+ if (!phy_dev) {
+ dev_err(bgmac->dev, "PHY connection failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -102,7 +160,6 @@ static int bgmac_probe(struct platform_device *pdev)
/* Set the features of the 4707 family */
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
- bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
@@ -142,6 +199,14 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.idm_base))
return PTR_ERR(bgmac->plat.idm_base);
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
+
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
bgmac->idm_read = platform_bgmac_idm_read;
@@ -151,6 +216,12 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
+ if (of_parse_phandle(np, "phy-handle", 0)) {
+ bgmac->phy_connect = platform_phy_connect;
+ } else {
+ bgmac->phy_connect = bgmac_phy_connect_direct;
+ bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
+ }
return bgmac_enet_probe(bgmac);
}
@@ -167,6 +238,7 @@ static int bgmac_remove(struct platform_device *pdev)
static const struct of_device_id bgmac_of_enet_match[] = {
{.compatible = "brcm,amac",},
{.compatible = "brcm,nsp-amac",},
+ {.compatible = "brcm,ns2-amac",},
{},
};
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 49f4cafe5438..0e066dc6b8cc 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1085,6 +1085,9 @@ static void bgmac_enable(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
static void bgmac_chip_init(struct bgmac *bgmac)
{
+ /* Clear any erroneously pending interrupts */
+ bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
+
/* 1 interrupt per received frame */
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
@@ -1391,7 +1394,7 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
* MII
**************************************************/
-static void bgmac_adjust_link(struct net_device *net_dev)
+void bgmac_adjust_link(struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
struct phy_device *phy_dev = net_dev->phydev;
@@ -1414,8 +1417,9 @@ static void bgmac_adjust_link(struct net_device *net_dev)
phy_print_status(phy_dev);
}
}
+EXPORT_SYMBOL_GPL(bgmac_adjust_link);
-static int bgmac_phy_connect_direct(struct bgmac *bgmac)
+int bgmac_phy_connect_direct(struct bgmac *bgmac)
{
struct fixed_phy_status fphy_status = {
.link = 1,
@@ -1440,24 +1444,7 @@ static int bgmac_phy_connect_direct(struct bgmac *bgmac)
return err;
}
-
-static int bgmac_phy_connect(struct bgmac *bgmac)
-{
- struct phy_device *phy_dev;
- char bus_id[MII_BUS_ID_SIZE + 3];
-
- /* Connect to the PHY */
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
- bgmac->phyaddr);
- phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connection failed\n");
- return PTR_ERR(phy_dev);
- }
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
int bgmac_enet_probe(struct bgmac *info)
{
@@ -1510,10 +1497,7 @@ int bgmac_enet_probe(struct bgmac *info)
netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
- if (!bgmac->mii_bus)
- err = bgmac_phy_connect_direct(bgmac);
- else
- err = bgmac_phy_connect(bgmac);
+ err = bgmac_phy_connect(bgmac);
if (err) {
dev_err(bgmac->dev, "Cannot connect to phy\n");
goto err_dma_free;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 80836b4c9f38..71f493f2451f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -463,6 +463,7 @@ struct bgmac {
struct {
void *base;
void *idm_base;
+ void *nicpm_base;
} plat;
struct {
struct bcma_device *core;
@@ -513,10 +514,13 @@ struct bgmac {
u32 (*get_bus_clock)(struct bgmac *bgmac);
void (*cmn_maskset32)(struct bgmac *bgmac, u16 offset, u32 mask,
u32 set);
+ int (*phy_connect)(struct bgmac *bgmac);
};
int bgmac_enet_probe(struct bgmac *info);
void bgmac_enet_remove(struct bgmac *bgmac);
+void bgmac_adjust_link(struct net_device *net_dev);
+int bgmac_phy_connect_direct(struct bgmac *bgmac);
struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
@@ -583,4 +587,9 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
{
bgmac_maskset(bgmac, offset, ~0, set);
}
+
+static inline int bgmac_phy_connect(struct bgmac *bgmac)
+{
+ return bgmac->phy_connect(bgmac);
+}
#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 1f7034d739b0..d5d1026be4b7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -254,13 +254,10 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
{
u32 diff;
- /* Tell compiler to fetch tx_prod and tx_cons from memory. */
- barrier();
-
/* The ring uses 256 indices for 255 entries, one of them
* needs to be skipped.
*/
- diff = txr->tx_prod - txr->tx_cons;
+ diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
diff &= 0xffff;
if (diff == BNX2_TX_DESC_CNT)
@@ -2304,7 +2301,7 @@ bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
- if (bp->dev->mtu > 1500) {
+ if (bp->dev->mtu > ETH_DATA_LEN) {
u32 val;
/* Set extended packet length bit */
@@ -2358,7 +2355,7 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
}
- if (bp->dev->mtu > 1500) {
+ if (bp->dev->mtu > ETH_DATA_LEN) {
/* Set extended packet length bit */
bnx2_write_phy(bp, 0x18, 0x7);
bnx2_read_phy(bp, 0x18, &val);
@@ -2839,10 +2836,8 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
{
u16 cons;
- /* Tell compiler that status block fields can change. */
- barrier();
- cons = *bnapi->hw_tx_cons_ptr;
- barrier();
+ cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
+
if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
cons++;
return cons;
@@ -3141,10 +3136,8 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
{
u16 cons;
- /* Tell compiler that status block fields can change. */
- barrier();
- cons = *bnapi->hw_rx_cons_ptr;
- barrier();
+ cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
+
if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
cons++;
return cons;
@@ -5007,12 +5000,12 @@ bnx2_init_chip(struct bnx2 *bp)
/* Program the MTU. Also include 4 bytes for CRC32. */
mtu = bp->dev->mtu;
val = mtu + ETH_HLEN + ETH_FCS_LEN;
- if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
+ if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
- if (mtu < 1500)
- mtu = 1500;
+ if (mtu < ETH_DATA_LEN)
+ mtu = ETH_DATA_LEN;
bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
@@ -6904,12 +6897,14 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
/* All ethtool functions called with rtnl_lock */
static int
-bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+bnx2_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bnx2 *bp = netdev_priv(dev);
int support_serdes = 0, support_copper = 0;
+ u32 supported, advertising;
- cmd->supported = SUPPORTED_Autoneg;
+ supported = SUPPORTED_Autoneg;
if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
support_serdes = 1;
support_copper = 1;
@@ -6919,56 +6914,59 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
support_copper = 1;
if (support_serdes) {
- cmd->supported |= SUPPORTED_1000baseT_Full |
+ supported |= SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
- cmd->supported |= SUPPORTED_2500baseX_Full;
-
+ supported |= SUPPORTED_2500baseX_Full;
}
if (support_copper) {
- cmd->supported |= SUPPORTED_10baseT_Half |
+ supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_TP;
-
}
spin_lock_bh(&bp->phy_lock);
- cmd->port = bp->phy_port;
- cmd->advertising = bp->advertising;
+ cmd->base.port = bp->phy_port;
+ advertising = bp->advertising;
if (bp->autoneg & AUTONEG_SPEED) {
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
}
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, bp->line_speed);
- cmd->duplex = bp->duplex;
+ cmd->base.speed = bp->line_speed;
+ cmd->base.duplex = bp->duplex;
if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
}
else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
spin_unlock_bh(&bp->phy_lock);
- cmd->transceiver = XCVR_INTERNAL;
- cmd->phy_address = bp->phy_addr;
+ cmd->base.phy_address = bp->phy_addr;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
static int
-bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+bnx2_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bnx2 *bp = netdev_priv(dev);
u8 autoneg = bp->autoneg;
@@ -6979,24 +6977,26 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_lock_bh(&bp->phy_lock);
- if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
+ if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
goto err_out_unlock;
- if (cmd->port != bp->phy_port &&
+ if (cmd->base.port != bp->phy_port &&
!(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
goto err_out_unlock;
/* If device is down, we can store the settings only if the user
* is setting the currently active port.
*/
- if (!netif_running(dev) && cmd->port != bp->phy_port)
+ if (!netif_running(dev) && cmd->base.port != bp->phy_port)
goto err_out_unlock;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
- advertising = cmd->advertising;
- if (cmd->port == PORT_TP) {
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, cmd->link_modes.advertising);
+
+ if (cmd->base.port == PORT_TP) {
advertising &= ETHTOOL_ALL_COPPER_SPEED;
if (!advertising)
advertising = ETHTOOL_ALL_COPPER_SPEED;
@@ -7008,11 +7008,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
advertising |= ADVERTISED_Autoneg;
}
else {
- u32 speed = ethtool_cmd_speed(cmd);
- if (cmd->port == PORT_FIBRE) {
+ u32 speed = cmd->base.speed;
+
+ if (cmd->base.port == PORT_FIBRE) {
if ((speed != SPEED_1000 &&
speed != SPEED_2500) ||
- (cmd->duplex != DUPLEX_FULL))
+ (cmd->base.duplex != DUPLEX_FULL))
goto err_out_unlock;
if (speed == SPEED_2500 &&
@@ -7023,7 +7024,7 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
autoneg &= ~AUTONEG_SPEED;
req_line_speed = speed;
- req_duplex = cmd->duplex;
+ req_duplex = cmd->base.duplex;
advertising = 0;
}
@@ -7037,7 +7038,7 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
* brought up.
*/
if (netif_running(dev))
- err = bnx2_setup_phy(bp, cmd->port);
+ err = bnx2_setup_phy(bp, cmd->base.port);
err_out_unlock:
spin_unlock_bh(&bp->phy_lock);
@@ -7822,8 +7823,6 @@ static int bnx2_set_channels(struct net_device *dev,
}
static const struct ethtool_ops bnx2_ethtool_ops = {
- .get_settings = bnx2_get_settings,
- .set_settings = bnx2_set_settings,
.get_drvinfo = bnx2_get_drvinfo,
.get_regs_len = bnx2_get_regs_len,
.get_regs = bnx2_get_regs,
@@ -7847,6 +7846,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
.get_sset_count = bnx2_get_sset_count,
.get_channels = bnx2_get_channels,
.set_channels = bnx2_set_channels,
+ .get_link_ksettings = bnx2_get_link_ksettings,
+ .set_link_ksettings = bnx2_set_link_ksettings,
};
/* Called with rtnl_lock */
@@ -7923,10 +7924,6 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2 *bp = netdev_priv(dev);
- if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
- return -EINVAL;
-
dev->mtu = new_mtu;
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
@@ -8619,6 +8616,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= dev->hw_features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
+ dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 380234d72b95..a09ec47461c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6530,9 +6530,9 @@ struct l2_fhdr {
#define MII_BNX2_AER_AER_AN_MMD 0x3800
#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0 0xffe0
-#define MIN_ETHERNET_PACKET_SIZE 60
-#define MAX_ETHERNET_PACKET_SIZE 1514
-#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014
+#define MIN_ETHERNET_PACKET_SIZE (ETH_ZLEN - ETH_HLEN)
+#define MAX_ETHERNET_PACKET_SIZE ETH_DATA_LEN
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9000
#define BNX2_RX_COPY_THRESH 128
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7dd7490fdac1..0a23034bbe3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1396,9 +1396,9 @@ struct bnx2x {
int tx_ring_size;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
-#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
-#define ETH_MIN_PACKET_SIZE 60
-#define ETH_MAX_PACKET_SIZE 1500
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE (ETH_ZLEN - ETH_HLEN)
+#define ETH_MAX_PACKET_SIZE ETH_DATA_LEN
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* TCP with Timestamp Option (32) + IPv6 (40) */
#define ETH_MAX_TPA_HEADER_SIZE 72
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0a9108cd4c45..3e199d3e461e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -724,7 +724,7 @@ static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
void (*gro_func)(struct bnx2x*, struct sk_buff*))
{
- skb_set_network_header(skb, 0);
+ skb_reset_network_header(skb);
gro_func(bp, skb);
tcp_gro_complete(skb);
}
@@ -2023,7 +2023,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
mtu = bp->dev->mtu;
fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
IP_HEADER_ALIGNMENT_PADDING +
- ETH_OVREHEAD +
+ ETH_OVERHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
@@ -3248,13 +3248,14 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
rmb();
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
- napi_complete(napi);
- /* Re-enable interrupts */
- DP(NETIF_MSG_RX_STATUS,
- "Update index to %d\n", fp->fp_hc_idx);
- bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
- le16_to_cpu(fp->fp_hc_idx),
- IGU_INT_ENABLE, 1);
+ if (napi_complete_done(napi, rx_work_done)) {
+ /* Re-enable interrupts */
+ DP(NETIF_MSG_RX_STATUS,
+ "Update index to %d\n", fp->fp_hc_idx);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
+ le16_to_cpu(fp->fp_hc_idx),
+ IGU_INT_ENABLE, 1);
+ }
} else {
rx_work_done = budget;
}
@@ -4855,12 +4856,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
return -EAGAIN;
}
- if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
- BNX2X_ERR("Can't support requested MTU size\n");
- return -EINVAL;
- }
-
/* This does not race with packet allocation
* because the actual alloc size is
* only updated as part of load
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 1fb80100e5e7..05356efdbf93 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -34,12 +34,6 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
u8 dev_addr, u16 addr, u8 byte_cnt,
u8 *o_buf, u8);
/********************************************************/
-#define ETH_HLEN 14
-/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
-#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
-#define ETH_MIN_PACKET_SIZE 60
-#define ETH_MAX_PACKET_SIZE 1500
-#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
#define WC_LANE_MAX 4
#define I2C_SWITCH_WIDTH 2
@@ -1917,7 +1911,7 @@ static int bnx2x_emac_enable(struct link_params *params,
/* Enable emac for jumbo packets */
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
- (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
+ (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD)));
/* Strip CRC */
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
@@ -2314,19 +2308,19 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
/* Set rx mtu */
- wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
/* Set tx mtu */
- wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
/* Set cnt max size */
- wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2384,18 +2378,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
/* Set RX MTU */
- wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
/* Set TX MTU */
- wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
/* Set cnt max size */
- wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
udelay(30);
@@ -2516,7 +2510,7 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
- ETH_OVREHEAD)/16;
+ ETH_OVERHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
/* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 4febe60eadc2..688617ac8c29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12080,8 +12080,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
mtu_size, mtu);
/* if valid: update device mtu */
- if (((mtu_size + ETH_HLEN) >=
- ETH_MIN_PACKET_SIZE) &&
+ if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
(mtu_size <=
ETH_MAX_JUMBO_PACKET_SIZE))
bp->dev->mtu = mtu_size;
@@ -13315,6 +13314,10 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->dcbnl_ops = &bnx2x_dcbnl_ops;
#endif
+ /* MTU range, 46 - 9600 */
+ dev->min_mtu = ETH_MIN_PACKET_SIZE;
+ dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
+
/* get_port_hwinfo() will set prtad and mmds properly */
bp->mdio.prtad = MDIO_PRTAD_NONE;
bp->mdio.mmds = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3f77d0863543..6fad22adbbb9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -585,7 +585,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
mcast.mcast_list_len = mc_num;
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
if (rc)
- BNX2X_ERR("Faled to set multicasts\n");
+ BNX2X_ERR("Failed to set multicasts\n");
} else {
/* clear existing mcasts */
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 97e78e217928..6082ed1b5ea0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f08a20b921e7..9608cb49a11c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -52,8 +52,10 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
+#include "bnxt_dcb.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -186,11 +188,11 @@ static const u16 bnxt_vf_req_snif[] = {
};
static const u16 bnxt_async_events_arr[] = {
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+ ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
static bool bnxt_vf_pciid(enum board_idx idx)
@@ -1476,8 +1478,8 @@ next_rx_no_prod:
}
#define BNXT_GET_EVENT_PORT(data) \
- ((data) & \
- HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+ ((data) & \
+ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
@@ -1486,7 +1488,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
u32 data1 = le32_to_cpu(cmpl->event_data1);
struct bnxt_link_info *link_info = &bp->link_info;
@@ -1499,15 +1501,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
netdev_warn(bp->dev, "Link speed %d no longer supported\n",
speed);
}
+ set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
}
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+ case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
u32 data1 = le32_to_cpu(cmpl->event_data1);
u16 port_id = BNXT_GET_EVENT_PORT(data1);
@@ -1520,18 +1523,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
break;
}
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
if (BNXT_PF(bp))
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
default:
- netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
- event_id);
goto async_event_process_exit;
}
schedule_work(&bp->sp_task);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -3115,27 +3117,46 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
return rc;
}
-static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
+ int bmap_size)
{
struct hwrm_func_drv_rgtr_input req = {0};
- int i;
DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables =
- cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
- FUNC_DRV_RGTR_REQ_ENABLES_VER |
- FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ if (bmap && bmap_size) {
+ for (i = 0; i < bmap_size; i++) {
+ if (test_bit(i, bmap))
+ __set_bit(i, async_events_bmap);
+ }
+ }
+
for (i = 0; i < 8; i++)
req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_rgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER);
+
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req.ver_maj = DRV_VER_MAJ;
req.ver_min = DRV_VER_MIN;
@@ -3144,6 +3165,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
if (BNXT_PF(bp)) {
DECLARE_BITMAP(vf_req_snif_bmap, 256);
u32 *data = (u32 *)vf_req_snif_bmap;
+ int i;
memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
@@ -3433,13 +3455,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
- vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
-
- req.hash_type = cpu_to_le32(vnic->hash_type);
-
+ req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
max_rings = bp->rx_nr_rings - 1;
@@ -3531,7 +3547,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
return rc;
}
-static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -3579,6 +3595,9 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ req.flags |=
+ cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -4156,7 +4175,7 @@ func_qcfg_exit:
return rc;
}
-int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
@@ -4170,6 +4189,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
+ if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+ bp->flags |= BNXT_FLAG_ROCEV1_CAP;
+ if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+ bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+
bp->tx_push_thresh = 0;
if (resp->flags &
cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
@@ -4266,12 +4290,16 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
goto qportcfg_exit;
}
bp->max_tc = resp->max_configurable_queues;
+ bp->max_lltc = resp->max_configurable_lossless_queues;
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
bp->max_tc = 1;
+ if (bp->max_lltc > bp->max_tc)
+ bp->max_lltc = bp->max_tc;
+
qptr = &resp->queue_id0;
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
@@ -4743,16 +4771,134 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0;
}
-static int bnxt_setup_msix(struct bnxt *bp)
+static void bnxt_setup_msix(struct bnxt *bp)
{
- struct msix_entry *msix_ent;
+ const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
- int i, total_vecs, rc = 0, min = 1;
+ int tcs, i;
+
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1) {
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+ if (bp->tx_nr_rings_per_tc == 0) {
+ netdev_reset_tc(dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ } else {
+ int i, off, count;
+
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
+ }
+ }
+ }
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ char *attr;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ attr = "TxRx";
+ else if (i < bp->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
+ i);
+ bp->irq_tbl[i].handler = bnxt_msix;
+ }
+}
+
+static void bnxt_setup_inta(struct bnxt *bp)
+{
const int len = sizeof(bp->irq_tbl[0].name);
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
- total_vecs = bp->cp_nr_rings;
+ if (netdev_get_num_tc(bp->dev))
+ netdev_reset_tc(bp->dev);
+
+ snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
+ 0);
+ bp->irq_tbl[0].handler = bnxt_inta;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+ int rc;
+
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ bnxt_setup_msix(bp);
+ else
+ bnxt_setup_inta(bp);
+ rc = bnxt_set_real_num_queues(bp);
+ return rc;
+}
+
+unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_stat_ctxs;
+#endif
+ return bp->pf.max_stat_ctxs;
+}
+
+void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_stat_ctxs = max;
+ else
+#endif
+ bp->pf.max_stat_ctxs = max;
+}
+
+unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_cp_rings;
+#endif
+ return bp->pf.max_cp_rings;
+}
+
+void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_cp_rings = max;
+ else
+#endif
+ bp->pf.max_cp_rings = max;
+}
+
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_irqs;
+#endif
+ return bp->pf.max_irqs;
+}
+
+void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_irqs = max_irqs;
+ else
+#endif
+ bp->pf.max_irqs = max_irqs;
+}
+
+static int bnxt_init_msix(struct bnxt *bp)
+{
+ int i, total_vecs, rc = 0, min = 1;
+ struct msix_entry *msix_ent;
+
+ total_vecs = bnxt_get_max_func_irqs(bp);
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
@@ -4773,8 +4919,10 @@ static int bnxt_setup_msix(struct bnxt *bp)
bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
if (bp->irq_tbl) {
- int tcs;
+ for (i = 0; i < total_vecs; i++)
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
total_vecs, min == 1);
@@ -4782,43 +4930,10 @@ static int bnxt_setup_msix(struct bnxt *bp)
goto msix_setup_exit;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- tcs = netdev_get_num_tc(dev);
- if (tcs > 1) {
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
- if (bp->tx_nr_rings_per_tc == 0) {
- netdev_reset_tc(dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- } else {
- int i, off, count;
-
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
- for (i = 0; i < tcs; i++) {
- count = bp->tx_nr_rings_per_tc;
- off = i * count;
- netdev_set_tc_queue(dev, i, count, off);
- }
- }
- }
- bp->cp_nr_rings = total_vecs;
+ bp->cp_nr_rings = (min == 1) ?
+ max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+ bp->tx_nr_rings + bp->rx_nr_rings;
- for (i = 0; i < bp->cp_nr_rings; i++) {
- char *attr;
-
- bp->irq_tbl[i].vector = msix_ent[i].vector;
- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
- attr = "TxRx";
- else if (i < bp->rx_nr_rings)
- attr = "rx";
- else
- attr = "tx";
-
- snprintf(bp->irq_tbl[i].name, len,
- "%s-%s-%d", dev->name, attr, i);
- bp->irq_tbl[i].handler = bnxt_msix;
- }
- rc = bnxt_set_real_num_queues(bp);
- if (rc)
- goto msix_setup_exit;
} else {
rc = -ENOMEM;
goto msix_setup_exit;
@@ -4828,52 +4943,54 @@ static int bnxt_setup_msix(struct bnxt *bp)
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
pci_disable_msix(bp->pdev);
kfree(msix_ent);
return rc;
}
-static int bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_inta(struct bnxt *bp)
{
- int rc;
- const int len = sizeof(bp->irq_tbl[0].name);
-
- if (netdev_get_num_tc(bp->dev))
- netdev_reset_tc(bp->dev);
-
bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl) {
- rc = -ENOMEM;
- return rc;
- }
+ if (!bp->irq_tbl)
+ return -ENOMEM;
+
+ bp->total_irqs = 1;
bp->rx_nr_rings = 1;
bp->tx_nr_rings = 1;
bp->cp_nr_rings = 1;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
bp->flags |= BNXT_FLAG_SHARED_RINGS;
bp->irq_tbl[0].vector = bp->pdev->irq;
- snprintf(bp->irq_tbl[0].name, len,
- "%s-%s-%d", bp->dev->name, "TxRx", 0);
- bp->irq_tbl[0].handler = bnxt_inta;
- rc = bnxt_set_real_num_queues(bp);
- return rc;
+ return 0;
}
-static int bnxt_setup_int_mode(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
int rc = 0;
if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_setup_msix(bp);
+ rc = bnxt_init_msix(bp);
if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */
- rc = bnxt_setup_inta(bp);
+ rc = bnxt_init_inta(bp);
}
return rc;
}
+static void bnxt_clear_int_mode(struct bnxt *bp)
+{
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -4892,10 +5009,6 @@ static void bnxt_free_irq(struct bnxt *bp)
free_irq(irq->vector, bp->bnapi[i]);
irq->requested = 0;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
- kfree(bp->irq_tbl);
- bp->irq_tbl = NULL;
}
static int bnxt_request_irq(struct bnxt *bp)
@@ -4967,7 +5080,6 @@ static void bnxt_init_napi(struct bnxt *bp)
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
bnxt_poll_nitroa0, 64);
- napi_hash_add(&bnapi->napi);
}
} else {
bnapi = bp->bnapi[0];
@@ -4999,7 +5111,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
}
}
-static void bnxt_tx_disable(struct bnxt *bp)
+void bnxt_tx_disable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
@@ -5017,7 +5129,7 @@ static void bnxt_tx_disable(struct bnxt *bp)
netif_carrier_off(bp->dev);
}
-static void bnxt_tx_enable(struct bnxt *bp)
+void bnxt_tx_enable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
@@ -5109,6 +5221,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
u8 link_up = link_info->link_up;
+ u16 diff;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@ -5196,6 +5309,23 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->link_up = 0;
}
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ diff = link_info->support_auto_speeds ^ link_info->advertising;
+ if ((link_info->support_auto_speeds | diff) !=
+ link_info->support_auto_speeds) {
+ /* An advertised speed is no longer supported, so we need to
+ * update the advertisement settings. See bnxt_reset() for
+ * comments about the rtnl_lock() sequence below.
+ */
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
+ link_info->advertising = link_info->support_auto_speeds;
+ if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
+ (link_info->autoneg & BNXT_AUTONEG_SPEED))
+ bnxt_hwrm_set_link_setting(bp, true, false);
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_unlock();
+ }
return 0;
}
@@ -5360,7 +5490,7 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
- req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
+ req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -5423,6 +5553,12 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
update_link = true;
}
+ /* The last close may have shutdown the link, so need to call
+ * PHY_CFG to bring it back up.
+ */
+ if (!netif_carrier_ok(bp->dev))
+ update_link = true;
+
if (!bnxt_eee_config_ok(bp))
update_eee = true;
@@ -5543,22 +5679,7 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
- if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
- rc);
- rc = -EBUSY;
- return rc;
- }
- /* Do func_reset during the 1st PF open only to prevent killing
- * the VFs when the PF is brought down and up.
- */
- if (BNXT_PF(bp))
- set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
- }
return __bnxt_open_nic(bp, true, true);
}
@@ -6116,6 +6237,10 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+ &bp->sp_event))
+ bnxt_hwrm_phy_qcaps(bp);
+
rc = bnxt_update_link(bp, true);
if (rc)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
@@ -6303,9 +6428,6 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
- if (new_mtu < 60 || new_mtu > 9500)
- return -EINVAL;
-
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
@@ -6318,17 +6440,10 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
- u8 tc;
-
- if (ntc->type != TC_SETUP_MQPRIO)
- return -EINVAL;
-
- tc = ntc->tc;
if (tc > bp->max_tc) {
netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
@@ -6371,6 +6486,15 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return 0;
}
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *ntc)
+{
+ if (ntc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return bnxt_setup_mq_tc(dev, ntc->tc);
+}
+
#ifdef CONFIG_RFS_ACCEL
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
@@ -6659,11 +6783,15 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_work_sync(&bp->sp_task);
bp->sp_event = 0;
+ bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_dcb_free(bp);
pci_iounmap(pdev, bp->bar2);
pci_iounmap(pdev, bp->bar1);
pci_iounmap(pdev, bp->bar0);
+ kfree(bp->edev);
+ bp->edev = NULL;
free_netdev(dev);
pci_release_regions(pdev);
@@ -6772,6 +6900,39 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
}
+static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+ if (rc)
+ return rc;
+
+ if (bp->flags & BNXT_FLAG_ROCE_CAP) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnxt_get_max_func_cp_rings(bp);
+ max_stat = bnxt_get_max_func_stat_ctxs(bp);
+ max_irq = bnxt_get_max_func_irqs(bp);
+ if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNXT_MIN_ROCE_CP_RINGS;
+ max_irq -= BNXT_MIN_ROCE_CP_RINGS;
+ max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(int, max_cp, max_irq);
+ max_cp = min_t(int, max_cp, max_stat);
+ rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+ return rc;
+}
+
static int bnxt_set_dflt_rings(struct bnxt *bp)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -6780,7 +6941,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
- rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
+ rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
@@ -6796,6 +6957,13 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc;
}
+void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+{
+ ASSERT_RTNL();
+ bnxt_hwrm_func_qcaps(bp);
+ bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+}
+
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -6884,6 +7052,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 60 - 9500 */
+ dev->min_mtu = ETH_ZLEN;
+ dev->max_mtu = 9500;
+
+ bnxt_dcb_init(bp);
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
@@ -6895,6 +7069,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
+ rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ if (rc)
+ goto init_err;
+
+ bp->ulp_probe = bnxt_ulp_probe;
+
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
@@ -6916,14 +7096,22 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
- if (BNXT_PF(bp))
- bp->pf.max_irqs = max_irqs;
-#if defined(CONFIG_BNXT_SRIOV)
- else
- bp->vf.max_irqs = max_irqs;
-#endif
+ bnxt_set_max_func_irqs(bp, max_irqs);
bnxt_set_dflt_rings(bp);
+ /* Default RSS hash cfg. */
+ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
+ !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
+ bp->hwrm_spec_code >= 0x10501) {
+ bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ }
+
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
@@ -6939,10 +7127,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
- rc = register_netdev(dev);
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ goto init_err;
+
+ rc = bnxt_init_int_mode(bp);
if (rc)
goto init_err;
+ rc = register_netdev(dev);
+ if (rc)
+ goto init_err_clr_int;
+
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
@@ -6951,6 +7147,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+init_err_clr_int:
+ bnxt_clear_int_mode(bp);
+
init_err:
pci_iounmap(pdev, bp->bar0);
pci_release_regions(pdev);
@@ -6980,6 +7179,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
netif_device_detach(netdev);
+ bnxt_ulp_stop(bp);
+
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
@@ -6988,8 +7189,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
- /* So that func_reset will be done during slot_reset */
- clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev);
rtnl_unlock();
@@ -7023,11 +7222,14 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
- if (netif_running(netdev))
+ err = bnxt_hwrm_func_reset(bp);
+ if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- if (!err)
+ if (!err) {
result = PCI_ERS_RESULT_RECOVERED;
+ bnxt_ulp_start(bp);
+ }
}
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 51b164a0e844..16defe9ececc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.5.0"
+#define DRV_MODULE_VERSION "1.6.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 5
+#define DRV_VER_MIN 6
#define DRV_VER_UPD 0
struct tx_bd {
@@ -387,6 +387,9 @@ struct rx_tpa_end_cmp_ext {
#define DB_KEY_TX_PUSH (0x4 << 28)
#define DB_LONG_TX_PUSH (0x2 << 24)
+#define BNXT_MIN_ROCE_CP_RINGS 2
+#define BNXT_MIN_ROCE_STAT_CTXS 1
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -700,7 +703,6 @@ struct bnxt_vnic_info {
u8 *uc_list;
u16 *fw_grp_ids;
- u16 hash_type;
dma_addr_t rss_table_dma_addr;
__le16 *rss_table;
dma_addr_t rss_hash_key_dma_addr;
@@ -952,7 +954,12 @@ struct bnxt {
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
+ #define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
+ #define BNXT_FLAG_ROCEV1_CAP 0x8000
+ #define BNXT_FLAG_ROCEV2_CAP 0x10000
+ #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
+ BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -965,6 +972,9 @@ struct bnxt {
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+ struct bnxt_en_dev *edev;
+ struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
+
struct bnxt_napi **bnapi;
struct bnxt_rx_ring_info *rx_ring;
@@ -1007,8 +1017,10 @@ struct bnxt {
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
int nr_vnics;
+ u32 rss_hash_cfg;
u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
unsigned int current_interval;
@@ -1019,11 +1031,18 @@ struct bnxt {
unsigned long state;
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
-#define BNXT_STATE_FN_RST_DONE 2
struct bnxt_irq *irq_tbl;
+ int total_irqs;
u8 mac_addr[ETH_ALEN];
+#ifdef CONFIG_BNXT_DCB
+ struct ieee_pfc *ieee_pfc;
+ struct ieee_ets *ieee_ets;
+ u8 dcbx_cap;
+ u8 default_pri;
+#endif /* CONFIG_BNXT_DCB */
+
u32 msg_enable;
u32 hwrm_spec_code;
@@ -1089,6 +1108,7 @@ struct bnxt {
#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
+#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
@@ -1114,6 +1134,13 @@ struct bnxt {
u32 lpi_tmr_hi;
};
+#define BNXT_RX_STATS_OFFSET(counter) \
+ (offsetof(struct rx_port_stats, counter) / 8)
+
+#define BNXT_TX_STATS_OFFSET(counter) \
+ ((offsetof(struct tx_port_stats, counter) + \
+ sizeof(struct rx_port_stats) + 512) / 8)
+
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
{
@@ -1216,12 +1243,23 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
+ int bmap_size);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int bnxt_hwrm_set_coal(struct bnxt *);
-int bnxt_hwrm_func_qcaps(struct bnxt *);
+unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
+void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
+unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
+void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
+void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
+void bnxt_tx_disable(struct bnxt *bp);
+void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
+void bnxt_restore_pf_fw_resources(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
new file mode 100644
index 000000000000..fdf2d8caf7bf
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -0,0 +1,502 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_dcb.h"
+
+#ifdef CONFIG_BNXT_DCB
+static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+ struct hwrm_queue_pri2cos_cfg_input req = {0};
+ int rc = 0, i;
+ u8 *pri2cos;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
+ req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
+ QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
+
+ pri2cos = &req.pri0_cos_queue_id;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ req.enables |= cpu_to_le32(
+ QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
+
+ pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+ struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_pri2cos_qcfg_input req = {0};
+ int rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
+ req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ u8 *pri2cos = &resp->pri0_cos_queue_id;
+ int i, j;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 queue_id = pri2cos[i];
+
+ for (j = 0; j < bp->max_tc; j++) {
+ if (bp->q_info[j].queue_id == queue_id) {
+ ets->prio_tc[i] = j;
+ break;
+ }
+ }
+ }
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
+ u8 max_tc)
+{
+ struct hwrm_queue_cos2bw_cfg_input req = {0};
+ struct bnxt_cos2bw_cfg cos2bw;
+ int rc = 0, i;
+ void *data;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
+ data = &req.unused_0;
+ for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
+ req.enables |= cpu_to_le32(
+ QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+
+ memset(&cos2bw, 0, sizeof(cos2bw));
+ cos2bw.queue_id = bp->q_info[i].queue_id;
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
+ cos2bw.tsa =
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
+ cos2bw.pri_lvl = i;
+ } else {
+ cos2bw.tsa =
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
+ cos2bw.bw_weight = ets->tc_tx_bw[i];
+ }
+ memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
+ if (i == 0) {
+ req.queue_id0 = cos2bw.queue_id;
+ req.unused_0 = 0;
+ }
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+ struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_cos2bw_qcfg_input req = {0};
+ struct bnxt_cos2bw_cfg cos2bw;
+ void *data;
+ int rc, i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
+ for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+ int j;
+
+ memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+ if (i == 0)
+ cos2bw.queue_id = resp->queue_id0;
+
+ for (j = 0; j < bp->max_tc; j++) {
+ if (bp->q_info[j].queue_id != cos2bw.queue_id)
+ continue;
+ if (cos2bw.tsa ==
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
+ ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
+ } else {
+ ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
+ ets->tc_tx_bw[j] = cos2bw.bw_weight;
+ }
+ }
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
+{
+ struct hwrm_queue_cfg_input req = {0};
+ int i;
+
+ if (netif_running(bp->dev))
+ bnxt_tx_disable(bp);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
+ req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
+ req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
+
+ /* Configure lossless queues to lossy first */
+ req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+ for (i = 0; i < bp->max_tc; i++) {
+ if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
+ req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+ hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ bp->q_info[i].queue_profile =
+ QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+ }
+ }
+
+ /* Now configure desired queues to lossless */
+ req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+ for (i = 0; i < bp->max_tc; i++) {
+ if (lltc_mask & (1 << i)) {
+ req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+ hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ bp->q_info[i].queue_profile =
+ QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+ }
+ }
+ if (netif_running(bp->dev))
+ bnxt_tx_enable(bp);
+
+ return 0;
+}
+
+static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
+{
+ struct hwrm_queue_pfcenable_cfg_input req = {0};
+ struct ieee_ets *my_ets = bp->ieee_ets;
+ unsigned int tc_mask = 0, pri_mask = 0;
+ u8 i, pri, lltc_count = 0;
+ bool need_q_recfg = false;
+ int rc;
+
+ if (!my_ets)
+ return -EINVAL;
+
+ for (i = 0; i < bp->max_tc; i++) {
+ for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
+ if ((pfc->pfc_en & (1 << pri)) &&
+ (my_ets->prio_tc[pri] == i)) {
+ pri_mask |= 1 << pri;
+ tc_mask |= 1 << i;
+ }
+ }
+ if (tc_mask & (1 << i))
+ lltc_count++;
+ }
+ if (lltc_count > bp->max_lltc)
+ return -EINVAL;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
+ req.flags = cpu_to_le32(pri_mask);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < bp->max_tc; i++) {
+ if (tc_mask & (1 << i)) {
+ if (!BNXT_LLQ(bp->q_info[i].queue_profile))
+ need_q_recfg = true;
+ }
+ }
+
+ if (need_q_recfg)
+ rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
+
+ return rc;
+}
+
+static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
+{
+ struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_pfcenable_qcfg_input req = {0};
+ u8 pri_mask;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ pri_mask = le32_to_cpu(resp->flags);
+ pfc->pfc_en = pri_mask;
+ return 0;
+}
+
+static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
+{
+ int total_ets_bw = 0;
+ u8 max_tc = 0;
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->prio_tc[i] > bp->max_tc) {
+ netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
+ ets->prio_tc[i]);
+ return -EINVAL;
+ }
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
+ return -EINVAL;
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ total_ets_bw += ets->tc_tx_bw[i];
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+ if (total_ets_bw > 100)
+ return -EINVAL;
+
+ *tc = max_tc + 1;
+ return 0;
+}
+
+static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ieee_ets *my_ets = bp->ieee_ets;
+
+ ets->ets_cap = bp->max_tc;
+
+ if (!my_ets) {
+ int rc;
+
+ if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return 0;
+
+ my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+ if (!my_ets)
+ return 0;
+ rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
+ if (rc)
+ return 0;
+ rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
+ if (rc)
+ return 0;
+ }
+
+ ets->cbs = my_ets->cbs;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ return 0;
+}
+
+static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ieee_ets *my_ets = bp->ieee_ets;
+ u8 max_tc = 0;
+ int rc, i;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ rc = bnxt_ets_validate(bp, ets, &max_tc);
+ if (!rc) {
+ if (!my_ets) {
+ my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+ if (!my_ets)
+ return -ENOMEM;
+ /* initialize PRI2TC mappings to invalid value */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
+ bp->ieee_ets = my_ets;
+ }
+ rc = bnxt_setup_mq_tc(dev, max_tc);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
+ if (rc)
+ return rc;
+ memcpy(my_ets, ets, sizeof(*my_ets));
+ }
+ return rc;
+}
+
+static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
+ struct ieee_pfc *my_pfc = bp->ieee_pfc;
+ long rx_off, tx_off;
+ int i, rc;
+
+ pfc->pfc_cap = bp->max_lltc;
+
+ if (!my_pfc) {
+ if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return 0;
+
+ my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
+ if (!my_pfc)
+ return 0;
+ bp->ieee_pfc = my_pfc;
+ rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
+ if (rc)
+ return 0;
+ }
+
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->mbc = my_pfc->mbc;
+ pfc->delay = my_pfc->delay;
+
+ if (!stats)
+ return 0;
+
+ rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
+ tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
+ pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
+ pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
+ }
+
+ return 0;
+}
+
+static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ieee_pfc *my_pfc = bp->ieee_pfc;
+ int rc;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ if (!my_pfc) {
+ my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
+ if (!my_pfc)
+ return -ENOMEM;
+ bp->ieee_pfc = my_pfc;
+ }
+ rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
+ if (!rc)
+ memcpy(my_pfc, pfc, sizeof(*my_pfc));
+
+ return rc;
+}
+
+static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = -EINVAL;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ rc = dcb_ieee_setapp(dev, app);
+ return rc;
+}
+
+static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ rc = dcb_ieee_delapp(dev, app);
+ return rc;
+}
+
+static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->dcbx_cap;
+}
+
+static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* only support IEEE */
+ if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
+ return 1;
+
+ if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp))
+ return 1;
+
+ if (mode == bp->dcbx_cap)
+ return 0;
+
+ bp->dcbx_cap = mode;
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = bnxt_dcbnl_ieee_getets,
+ .ieee_setets = bnxt_dcbnl_ieee_setets,
+ .ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
+ .ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
+ .ieee_setapp = bnxt_dcbnl_ieee_setapp,
+ .ieee_delapp = bnxt_dcbnl_ieee_delapp,
+ .getdcbx = bnxt_dcbnl_getdcbx,
+ .setdcbx = bnxt_dcbnl_setdcbx,
+};
+
+void bnxt_dcb_init(struct bnxt *bp)
+{
+ if (bp->hwrm_spec_code < 0x10501)
+ return;
+
+ bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
+ if (BNXT_PF(bp))
+ bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
+ else
+ bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
+ bp->dev->dcbnl_ops = &dcbnl_ops;
+}
+
+void bnxt_dcb_free(struct bnxt *bp)
+{
+ kfree(bp->ieee_pfc);
+ kfree(bp->ieee_ets);
+ bp->ieee_pfc = NULL;
+ bp->ieee_ets = NULL;
+}
+
+#else
+
+void bnxt_dcb_init(struct bnxt *bp)
+{
+}
+
+void bnxt_dcb_free(struct bnxt *bp)
+{
+}
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
new file mode 100644
index 000000000000..35a0d28cf2fd
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -0,0 +1,41 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_DCB_H
+#define BNXT_DCB_H
+
+#include <net/dcbnl.h>
+
+struct bnxt_dcb {
+ u8 max_tc;
+ struct ieee_pfc *ieee_pfc;
+ struct ieee_ets *ieee_ets;
+ u8 dcbx_cap;
+ u8 default_pri;
+};
+
+struct bnxt_cos2bw_cfg {
+ u8 pad[3];
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ u8 unused;
+};
+
+#define BNXT_LLQ(q_profile) \
+ ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS)
+
+#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
+
+void bnxt_dcb_init(struct bnxt *bp);
+void bnxt_dcb_free(struct bnxt *bp);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index a7e04ff4eaed..784aa77610bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -107,16 +107,9 @@ static int bnxt_set_coalesce(struct net_device *dev,
#define BNXT_NUM_STATS 21
-#define BNXT_RX_STATS_OFFSET(counter) \
- (offsetof(struct rx_port_stats, counter) / 8)
-
#define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
-#define BNXT_TX_STATS_OFFSET(counter) \
- ((offsetof(struct tx_port_stats, counter) + \
- sizeof(struct rx_port_stats) + 512) / 8)
-
#define BNXT_TX_STATS_ENTRY(counter) \
{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
@@ -150,6 +143,14 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_tagged_frames),
BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
BNXT_RX_STATS_ENTRY(rx_good_frames),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
+ BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
@@ -179,6 +180,14 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
BNXT_TX_STATS_ENTRY(tx_err),
BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
+ BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
BNXT_TX_STATS_ENTRY(tx_total_collisions),
@@ -542,6 +551,146 @@ fltr_err:
return rc;
}
+#endif
+
+static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
+{
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
+ return RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+}
+
+static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
+{
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
+ return RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+}
+
+static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ cmd->data |= get_ethtool_ipv4_rss(bp);
+ break;
+ case UDP_V4_FLOW:
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= get_ethtool_ipv4_rss(bp);
+ break;
+
+ case TCP_V6_FLOW:
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ cmd->data |= get_ethtool_ipv6_rss(bp);
+ break;
+ case UDP_V6_FLOW:
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= get_ethtool_ipv6_rss(bp);
+ break;
+ }
+ return 0;
+}
+
+#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
+
+static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ u32 rss_hash_cfg = bp->rss_hash_cfg;
+ int tuple, rc = 0;
+
+ if (cmd->data == RXH_4TUPLE)
+ tuple = 4;
+ else if (cmd->data == RXH_2TUPLE)
+ tuple = 2;
+ else if (!cmd->data)
+ tuple = 0;
+ else
+ return -EINVAL;
+
+ if (cmd->flow_type == TCP_V4_FLOW) {
+ rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
+ } else if (cmd->flow_type == UDP_V4_FLOW) {
+ if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ return -EINVAL;
+ rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
+ } else if (cmd->flow_type == TCP_V6_FLOW) {
+ rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ } else if (cmd->flow_type == UDP_V6_FLOW) {
+ if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ return -EINVAL;
+ rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ } else if (tuple == 4) {
+ return -EINVAL;
+ }
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (tuple == 2)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
+ else if (!tuple)
+ rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
+ break;
+
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (tuple == 2)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
+ else if (!tuple)
+ rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
+ break;
+ }
+
+ if (bp->rss_hash_cfg == rss_hash_cfg)
+ return 0;
+
+ bp->rss_hash_cfg = rss_hash_cfg;
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ }
+ return rc;
+}
static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -550,6 +699,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0;
switch (cmd->cmd) {
+#ifdef CONFIG_RFS_ACCEL
case ETHTOOL_GRXRINGS:
cmd->data = bp->rx_nr_rings;
break;
@@ -566,6 +716,11 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRULE:
rc = bnxt_grxclsrule(bp, cmd);
break;
+#endif
+
+ case ETHTOOL_GRXFH:
+ rc = bnxt_grxfh(bp, cmd);
+ break;
default:
rc = -EOPNOTSUPP;
@@ -574,7 +729,23 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return rc;
}
-#endif
+
+static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ rc = bnxt_srxfh(bp, cmd);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ return rc;
+}
static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
@@ -1885,9 +2056,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_ringparam = bnxt_get_ringparam,
.get_channels = bnxt_get_channels,
.set_channels = bnxt_set_channels,
-#ifdef CONFIG_RFS_ACCEL
.get_rxnfc = bnxt_get_rxnfc,
-#endif
+ .set_rxnfc = bnxt_set_rxnfc,
.get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 04a96cc3498a..2ddfa51519a1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,29 +11,22 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* per-context HW statistics -- chip view */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
+/* HSI and HWRM Specification 1.6.0 */
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 6
+#define HWRM_VERSION_UPDATE 0
+
+#define HWRM_VERSION_STR "1.6.0"
+/*
+ * Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
/* Statistics Ejection Buffer Completion Record (16 bytes) */
struct eject_cmpl {
@@ -50,77 +44,77 @@ struct eject_cmpl {
/* HWRM Completion Record (16 bytes) */
struct hwrm_cmpl {
__le16 type;
- #define HWRM_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_CMPL_TYPE_SFT 0
- #define HWRM_CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
__le16 sequence_id;
__le32 unused_1;
__le32 v;
- #define HWRM_CMPL_V 0x1UL
+ #define CMPL_V 0x1UL
__le32 unused_3;
};
/* HWRM Forwarded Request (16 bytes) */
struct hwrm_fwd_req_cmpl {
__le16 req_len_type;
- #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
- #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
__le16 source_id;
__le32 unused_0;
__le32 req_buf_addr_v[2];
- #define HWRM_FWD_REQ_CMPL_V 0x1UL
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
};
/* HWRM Forwarded Response (16 bytes) */
struct hwrm_fwd_resp_cmpl {
__le16 type;
- #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
- #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
__le16 source_id;
__le16 resp_len;
__le16 unused_1;
__le32 resp_buf_addr_v[2];
- #define HWRM_FWD_RESP_CMPL_V 0x1UL
- #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
};
/* HWRM Asynchronous Event Completion Record (16 bytes) */
struct hwrm_async_event_cmpl {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
@@ -129,670 +123,391 @@ struct hwrm_async_event_cmpl {
/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
struct hwrm_async_event_cmpl_link_status_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
};
/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
struct hwrm_async_event_cmpl_link_mtu_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
};
/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
struct hwrm_async_event_cmpl_link_speed_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
struct hwrm_async_event_cmpl_dcb_config_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
+ #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
};
/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
};
/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
struct hwrm_async_event_cmpl_link_speed_cfg_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
struct hwrm_async_event_cmpl_func_drvr_unload {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
struct hwrm_async_event_cmpl_func_drvr_load {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
+struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
};
/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
struct hwrm_async_event_cmpl_pf_drvr_load {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
};
/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
struct hwrm_async_event_cmpl_vf_flr {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
struct hwrm_async_event_cmpl_vf_mac_addr_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
};
/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
};
/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
__le32 event_data2;
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
};
/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* HW Resource Manager Specification 1.5.1 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 5
-#define HWRM_VERSION_UPDATE 1
-
-#define HWRM_VERSION_STR "1.5.1"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-/* Input (16 bytes) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET (0x0UL)
- #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
- #define HWRM_FUNC_VF_CFG (0xfUL)
- #define RESERVED1 (0x10UL)
- #define HWRM_FUNC_RESET (0x11UL)
- #define HWRM_FUNC_GETFID (0x12UL)
- #define HWRM_FUNC_VF_ALLOC (0x13UL)
- #define HWRM_FUNC_VF_FREE (0x14UL)
- #define HWRM_FUNC_QCAPS (0x15UL)
- #define HWRM_FUNC_QCFG (0x16UL)
- #define HWRM_FUNC_CFG (0x17UL)
- #define HWRM_FUNC_QSTATS (0x18UL)
- #define HWRM_FUNC_CLR_STATS (0x19UL)
- #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
- #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
- #define HWRM_FUNC_DRV_RGTR (0x1dUL)
- #define HWRM_FUNC_DRV_QVER (0x1eUL)
- #define HWRM_FUNC_BUF_RGTR (0x1fUL)
- #define HWRM_PORT_PHY_CFG (0x20UL)
- #define HWRM_PORT_MAC_CFG (0x21UL)
- #define HWRM_PORT_TS_QUERY (0x22UL)
- #define HWRM_PORT_QSTATS (0x23UL)
- #define HWRM_PORT_LPBK_QSTATS (0x24UL)
- #define HWRM_PORT_CLR_STATS (0x25UL)
- #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
- #define HWRM_PORT_PHY_QCFG (0x27UL)
- #define HWRM_PORT_MAC_QCFG (0x28UL)
- #define HWRM_PORT_BLINK_LED (0x29UL)
- #define HWRM_PORT_PHY_QCAPS (0x2aUL)
- #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
- #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
- #define HWRM_QUEUE_QPORTCFG (0x30UL)
- #define HWRM_QUEUE_QCFG (0x31UL)
- #define HWRM_QUEUE_CFG (0x32UL)
- #define RESERVED2 (0x33UL)
- #define RESERVED3 (0x34UL)
- #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
- #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
- #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
- #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
- #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
- #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
- #define HWRM_VNIC_ALLOC (0x40UL)
- #define HWRM_VNIC_FREE (0x41UL)
- #define HWRM_VNIC_CFG (0x42UL)
- #define HWRM_VNIC_QCFG (0x43UL)
- #define HWRM_VNIC_TPA_CFG (0x44UL)
- #define HWRM_VNIC_TPA_QCFG (0x45UL)
- #define HWRM_VNIC_RSS_CFG (0x46UL)
- #define HWRM_VNIC_RSS_QCFG (0x47UL)
- #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
- #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
- #define HWRM_VNIC_QCAPS (0x4aUL)
- #define HWRM_RING_ALLOC (0x50UL)
- #define HWRM_RING_FREE (0x51UL)
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
- #define HWRM_RING_RESET (0x5eUL)
- #define HWRM_RING_GRP_ALLOC (0x60UL)
- #define HWRM_RING_GRP_FREE (0x61UL)
- #define RESERVED5 (0x64UL)
- #define RESERVED6 (0x65UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
- #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
- #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
- #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
- #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define RESERVED4 (0x94UL)
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
- #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
- #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
- #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
- #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
- #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
- #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
- #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
- #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
- #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
- #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
- #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
- #define HWRM_STAT_CTX_ALLOC (0xb0UL)
- #define HWRM_STAT_CTX_FREE (0xb1UL)
- #define HWRM_STAT_CTX_QUERY (0xb2UL)
- #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
- #define HWRM_FW_RESET (0xc0UL)
- #define HWRM_FW_QSTATUS (0xc1UL)
- #define HWRM_FW_SET_TIME (0xc8UL)
- #define HWRM_FW_GET_TIME (0xc9UL)
- #define HWRM_EXEC_FWD_RESP (0xd0UL)
- #define HWRM_REJECT_FWD_RESP (0xd1UL)
- #define HWRM_FWD_RESP (0xd2UL)
- #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
- #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
- #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
- #define HWRM_WOL_FILTER_FREE (0xf1UL)
- #define HWRM_WOL_FILTER_QCFG (0xf2UL)
- #define HWRM_WOL_REASON_QCFG (0xf3UL)
- #define HWRM_DBG_READ_DIRECT (0xff10UL)
- #define HWRM_DBG_READ_INDIRECT (0xff11UL)
- #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
- #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
- #define HWRM_DBG_DUMP (0xff14UL)
- #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
- #define HWRM_NVM_MODIFY (0xfff4UL)
- #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
- #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
- #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
- #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
- #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
- #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
- #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
- #define HWRM_NVM_RAW_DUMP (0xfffcUL)
- #define HWRM_NVM_READ (0xfffdUL)
- #define HWRM_NVM_WRITE (0xfffeUL)
- #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS (0x0UL)
- #define HWRM_ERR_CODE_FAIL (0x1UL)
- #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
- #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
- #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
- #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
- #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
/* hwrm_ver_get */
@@ -1041,6 +756,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1090,6 +806,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1166,6 +883,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
#define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
#define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE 0x200UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1399,6 +1117,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1531,6 +1250,7 @@ struct hwrm_func_drv_qver_output {
#define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1549,7 +1269,7 @@ struct hwrm_port_phy_cfg_input {
__le64 resp_addr;
__le32 flags;
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
#define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
@@ -1562,6 +1282,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -2091,31 +1812,6 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
-/* hwrm_port_blink_led */
-/* Input (24 bytes) */
-struct hwrm_port_blink_led_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 num_blinks;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_port_blink_led_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_port_phy_qcaps */
/* Input (24 bytes) */
struct hwrm_port_phy_qcaps_input {
@@ -2337,6 +2033,39 @@ struct hwrm_queue_cfg_output {
u8 valid;
};
+/* hwrm_queue_pfcenable_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
/* hwrm_queue_pfcenable_cfg */
/* Input (24 bytes) */
struct hwrm_queue_pfcenable_cfg_input {
@@ -2371,6 +2100,48 @@ struct hwrm_queue_pfcenable_cfg_output {
u8 valid;
};
+/* hwrm_queue_pri2cos_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
/* hwrm_queue_pri2cos_cfg */
/* Input (40 bytes) */
struct hwrm_queue_pri2cos_cfg_input {
@@ -2421,6 +2192,257 @@ struct hwrm_queue_pri2cos_cfg_output {
u8 valid;
};
+/* hwrm_queue_cos2bw_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (112 bytes) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
/* hwrm_queue_cos2bw_cfg */
/* Input (128 bytes) */
struct hwrm_queue_cos2bw_cfg_input {
@@ -3802,7 +3824,9 @@ struct hwrm_stat_ctx_alloc_input {
__le64 resp_addr;
__le64 stats_dma_addr;
__le32 update_period_ms;
- __le32 unused_0;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ u8 unused_0[3];
};
/* Output (16 bytes) */
@@ -4023,6 +4047,75 @@ struct hwrm_fw_set_time_output {
u8 valid;
};
+/* hwrm_fw_set_structured_data */
+/* Input (32 bytes) */
+struct hwrm_fw_set_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0;
+ __le16 port_id;
+ __le16 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data */
+/* Input (40 bytes) */
+struct hwrm_fw_get_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ u8 count;
+ u8 unused_0;
+ __le16 port_id;
+ __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_get_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
@@ -4515,4 +4608,363 @@ struct hwrm_nvm_install_update_output {
u8 valid;
};
+/* Hardware Resource Manager Specification */
+/* Input (16 bytes) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET (0x0UL)
+ #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
+ #define HWRM_FUNC_VF_CFG (0xfUL)
+ #define RESERVED1 (0x10UL)
+ #define HWRM_FUNC_RESET (0x11UL)
+ #define HWRM_FUNC_GETFID (0x12UL)
+ #define HWRM_FUNC_VF_ALLOC (0x13UL)
+ #define HWRM_FUNC_VF_FREE (0x14UL)
+ #define HWRM_FUNC_QCAPS (0x15UL)
+ #define HWRM_FUNC_QCFG (0x16UL)
+ #define HWRM_FUNC_CFG (0x17UL)
+ #define HWRM_FUNC_QSTATS (0x18UL)
+ #define HWRM_FUNC_CLR_STATS (0x19UL)
+ #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
+ #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
+ #define HWRM_FUNC_DRV_RGTR (0x1dUL)
+ #define HWRM_FUNC_DRV_QVER (0x1eUL)
+ #define HWRM_FUNC_BUF_RGTR (0x1fUL)
+ #define HWRM_PORT_PHY_CFG (0x20UL)
+ #define HWRM_PORT_MAC_CFG (0x21UL)
+ #define HWRM_PORT_TS_QUERY (0x22UL)
+ #define HWRM_PORT_QSTATS (0x23UL)
+ #define HWRM_PORT_LPBK_QSTATS (0x24UL)
+ #define HWRM_PORT_CLR_STATS (0x25UL)
+ #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
+ #define HWRM_PORT_PHY_QCFG (0x27UL)
+ #define HWRM_PORT_MAC_QCFG (0x28UL)
+ #define RESERVED7 (0x29UL)
+ #define HWRM_PORT_PHY_QCAPS (0x2aUL)
+ #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
+ #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
+ #define HWRM_PORT_LED_CFG (0x2dUL)
+ #define HWRM_PORT_LED_QCFG (0x2eUL)
+ #define HWRM_PORT_LED_QCAPS (0x2fUL)
+ #define HWRM_QUEUE_QPORTCFG (0x30UL)
+ #define HWRM_QUEUE_QCFG (0x31UL)
+ #define HWRM_QUEUE_CFG (0x32UL)
+ #define RESERVED2 (0x33UL)
+ #define RESERVED3 (0x34UL)
+ #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
+ #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
+ #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
+ #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
+ #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
+ #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
+ #define HWRM_VNIC_ALLOC (0x40UL)
+ #define HWRM_VNIC_FREE (0x41UL)
+ #define HWRM_VNIC_CFG (0x42UL)
+ #define HWRM_VNIC_QCFG (0x43UL)
+ #define HWRM_VNIC_TPA_CFG (0x44UL)
+ #define HWRM_VNIC_TPA_QCFG (0x45UL)
+ #define HWRM_VNIC_RSS_CFG (0x46UL)
+ #define HWRM_VNIC_RSS_QCFG (0x47UL)
+ #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
+ #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_VNIC_QCAPS (0x4aUL)
+ #define HWRM_RING_ALLOC (0x50UL)
+ #define HWRM_RING_FREE (0x51UL)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
+ #define HWRM_RING_RESET (0x5eUL)
+ #define HWRM_RING_GRP_ALLOC (0x60UL)
+ #define HWRM_RING_GRP_FREE (0x61UL)
+ #define RESERVED5 (0x64UL)
+ #define RESERVED6 (0x65UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
+ #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
+ #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
+ #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
+ #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
+ #define RESERVED4 (0x94UL)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
+ #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
+ #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
+ #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
+ #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
+ #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
+ #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
+ #define HWRM_STAT_CTX_ALLOC (0xb0UL)
+ #define HWRM_STAT_CTX_FREE (0xb1UL)
+ #define HWRM_STAT_CTX_QUERY (0xb2UL)
+ #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
+ #define HWRM_FW_RESET (0xc0UL)
+ #define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_FW_SET_TIME (0xc8UL)
+ #define HWRM_FW_GET_TIME (0xc9UL)
+ #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL)
+ #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL)
+ #define HWRM_FW_IPC_MAILBOX (0xccUL)
+ #define HWRM_EXEC_FWD_RESP (0xd0UL)
+ #define HWRM_REJECT_FWD_RESP (0xd1UL)
+ #define HWRM_FWD_RESP (0xd2UL)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
+ #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
+ #define HWRM_WOL_FILTER_FREE (0xf1UL)
+ #define HWRM_WOL_FILTER_QCFG (0xf2UL)
+ #define HWRM_WOL_REASON_QCFG (0xf3UL)
+ #define HWRM_DBG_READ_DIRECT (0xff10UL)
+ #define HWRM_DBG_READ_INDIRECT (0xff11UL)
+ #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
+ #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
+ #define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_GET_VARIABLE (0xfff1UL)
+ #define HWRM_NVM_SET_VARIABLE (0xfff2UL)
+ #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
+ #define HWRM_NVM_MODIFY (0xfff4UL)
+ #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
+ #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
+ #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
+ #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
+ #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
+ #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
+ #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
+ #define HWRM_NVM_RAW_DUMP (0xfffcUL)
+ #define HWRM_NVM_READ (0xfffdUL)
+ #define HWRM_NVM_WRITE (0xfffeUL)
+ #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS (0x0UL)
+ #define HWRM_ERR_CODE_FAIL (0x1UL)
+ #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
+ #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
+ #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
+ #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* Periodic Statistics Context DMA to host (160 bytes) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* Structure data header (16 bytes) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL
+ __le16 len;
+ u8 version;
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ __le16 unused_0[3];
+};
+
+/* DCBX Application configuration structure (8 bytes) */
+struct hwrm_struct_data_dcbx_app_cfg {
+ __le16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 60e2af8678bd..c69602508666 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -34,8 +34,7 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
/* broadcast this async event to all VFs */
req.encap_async_event_target_id = cpu_to_le16(0xffff);
async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
- async_cmpl->type =
- cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -288,7 +287,7 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
}
if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
}
@@ -421,15 +420,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
/* Remaining rings are distributed equally amongs VF's for now */
- /* TODO: the following workaroud is needed to restrict total number
- * of vf_cp_rings not exceed number of HW ring groups. This WA should
- * be removed once new HWRM provides HW ring groups capability in
- * hwrm_func_qcap.
- */
- vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
- vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
- /* TODO: restore this logic below once the WA above is removed */
- /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
+ vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -578,8 +569,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
if (pci_vfs_assigned(bp->pdev)) {
bnxt_hwrm_fwd_async_event_cmpl(
- bp, NULL,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
+ bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
num_vfs);
} else {
@@ -592,7 +582,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
bp->pf.active_vfs = 0;
/* Reclaim all resources for the PF. */
- bnxt_hwrm_func_qcaps(bp);
+ rtnl_lock();
+ bnxt_restore_pf_fw_resources(bp);
+ rtnl_unlock();
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
new file mode 100644
index 000000000000..8b7464b76501
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -0,0 +1,346 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <asm/byteorder.h>
+#include <linux/bitmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+
+static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_ulp_ops *ulp_ops, void *handle)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+
+ ASSERT_RTNL();
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ ulp = &edev->ulp_tbl[ulp_id];
+ if (rcu_access_pointer(ulp->ulp_ops)) {
+ netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
+ return -EBUSY;
+ }
+ if (ulp_id == BNXT_ROCE_ULP) {
+ unsigned int max_stat_ctxs;
+
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+ bp->num_stat_ctxs == max_stat_ctxs)
+ return -ENOMEM;
+ bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
+ BNXT_MIN_ROCE_STAT_CTXS);
+ }
+
+ atomic_set(&ulp->ref_count, 0);
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
+
+ if (ulp_id == BNXT_ROCE_ULP) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_hwrm_vnic_cfg(bp, 0);
+ }
+
+ return 0;
+}
+
+static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+ int i = 0;
+
+ ASSERT_RTNL();
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ ulp = &edev->ulp_tbl[ulp_id];
+ if (!rcu_access_pointer(ulp->ulp_ops)) {
+ netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
+ return -EINVAL;
+ }
+ if (ulp_id == BNXT_ROCE_ULP) {
+ unsigned int max_stat_ctxs;
+
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+ }
+ if (ulp->max_async_event_id)
+ bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+ msleep(100);
+ i++;
+ }
+ return 0;
+}
+
+static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_msix_entry *ent, int num_msix)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ int max_idx, max_cp_rings;
+ int avail_msix, i, idx;
+
+ ASSERT_RTNL();
+ if (ulp_id != BNXT_ROCE_ULP)
+ return -EINVAL;
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ return -ENODEV;
+
+ max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+ max_idx = min_t(int, bp->total_irqs, max_cp_rings);
+ avail_msix = max_idx - bp->cp_nr_rings;
+ if (!avail_msix)
+ return -ENOMEM;
+ if (avail_msix > num_msix)
+ avail_msix = num_msix;
+
+ idx = max_idx - avail_msix;
+ for (i = 0; i < avail_msix; i++) {
+ ent[i].vector = bp->irq_tbl[idx + i].vector;
+ ent[i].ring_idx = idx + i;
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
+ bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ return avail_msix;
+}
+
+static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ int max_cp_rings, msix_requested;
+
+ ASSERT_RTNL();
+ if (ulp_id != BNXT_ROCE_ULP)
+ return -EINVAL;
+
+ max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+ msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
+ edev->ulp_tbl[ulp_id].msix_requested = 0;
+ bnxt_set_max_func_irqs(bp, bp->total_irqs);
+ return 0;
+}
+
+void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
+{
+ ASSERT_RTNL();
+ if (bnxt_ulp_registered(bp->edev, ulp_id)) {
+ struct bnxt_en_dev *edev = bp->edev;
+ unsigned int msix_req, max;
+
+ msix_req = edev->ulp_tbl[ulp_id].msix_requested;
+ max = bnxt_get_max_func_cp_rings(bp);
+ bnxt_set_max_func_cp_rings(bp, max - msix_req);
+ max = bnxt_get_max_func_stat_ctxs(bp);
+ bnxt_set_max_func_stat_ctxs(bp, max - 1);
+ }
+}
+
+static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_fw_msg *fw_msg)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct input *req;
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ req = fw_msg->msg;
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+ rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
+ fw_msg->timeout);
+ if (!rc) {
+ struct output *resp = bp->hwrm_cmd_resp_addr;
+ u32 len = le16_to_cpu(resp->resp_len);
+
+ if (fw_msg->resp_max_len < len)
+ len = fw_msg->resp_max_len;
+
+ memcpy(fw_msg->resp, resp, len);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_ulp_get(struct bnxt_ulp *ulp)
+{
+ atomic_inc(&ulp->ref_count);
+}
+
+static void bnxt_ulp_put(struct bnxt_ulp *ulp)
+{
+ atomic_dec(&ulp->ref_count);
+}
+
+void bnxt_ulp_stop(struct bnxt *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_stop)
+ continue;
+ ops->ulp_stop(ulp->handle);
+ }
+}
+
+void bnxt_ulp_start(struct bnxt *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_start)
+ continue;
+ ops->ulp_start(ulp->handle);
+ }
+}
+
+void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ rcu_read_lock();
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_sriov_config) {
+ rcu_read_unlock();
+ continue;
+ }
+ bnxt_ulp_get(ulp);
+ rcu_read_unlock();
+ ops->ulp_sriov_config(ulp->handle, num_vfs);
+ bnxt_ulp_put(ulp);
+ }
+}
+
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ continue;
+ if (!ulp->async_events_bmap ||
+ event_id > ulp->max_async_event_id)
+ continue;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+ }
+ rcu_read_unlock();
+}
+
+static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
+ unsigned long *events_bmap, u16 max_id)
+{
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ ulp = &edev->ulp_tbl[ulp_id];
+ ulp->async_events_bmap = events_bmap;
+ /* Make sure bnxt_ulp_async_events() sees this order */
+ smp_wmb();
+ ulp->max_async_event_id = max_id;
+ bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
+ return 0;
+}
+
+static const struct bnxt_en_ops bnxt_en_ops_tbl = {
+ .bnxt_register_device = bnxt_register_dev,
+ .bnxt_unregister_device = bnxt_unregister_dev,
+ .bnxt_request_msix = bnxt_req_msix_vecs,
+ .bnxt_free_msix = bnxt_free_msix_vecs,
+ .bnxt_send_fw_msg = bnxt_send_msg,
+ .bnxt_register_fw_async_events = bnxt_register_async_events,
+};
+
+struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_en_dev *edev;
+
+ edev = bp->edev;
+ if (!edev) {
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return ERR_PTR(-ENOMEM);
+ edev->en_ops = &bnxt_en_ops_tbl;
+ if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
+ edev->net = dev;
+ edev->pdev = bp->pdev;
+ bp->edev = edev;
+ }
+ return bp->edev;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
new file mode 100644
index 000000000000..74f816e46a33
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -0,0 +1,93 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ULP_H
+#define BNXT_ULP_H
+
+#define BNXT_ROCE_ULP 0
+#define BNXT_OTHER_ULP 1
+#define BNXT_MAX_ULP 2
+
+#define BNXT_MIN_ROCE_CP_RINGS 2
+#define BNXT_MIN_ROCE_STAT_CTXS 1
+
+struct hwrm_async_event_cmpl;
+struct bnxt;
+
+struct bnxt_ulp_ops {
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_stop)(void *);
+ void (*ulp_start)(void *);
+ void (*ulp_sriov_config)(void *, int);
+};
+
+struct bnxt_msix_entry {
+ u32 vector;
+ u32 ring_idx;
+ u32 db_offset;
+};
+
+struct bnxt_fw_msg {
+ void *msg;
+ int msg_len;
+ void *resp;
+ int resp_max_len;
+ int timeout;
+};
+
+struct bnxt_ulp {
+ void *handle;
+ struct bnxt_ulp_ops __rcu *ulp_ops;
+ unsigned long *async_events_bmap;
+ u16 max_async_event_id;
+ u16 msix_requested;
+ atomic_t ref_count;
+};
+
+struct bnxt_en_dev {
+ struct net_device *net;
+ struct pci_dev *pdev;
+ u32 flags;
+ #define BNXT_EN_FLAG_ROCEV1_CAP 0x1
+ #define BNXT_EN_FLAG_ROCEV2_CAP 0x2
+ #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
+ BNXT_EN_FLAG_ROCEV2_CAP)
+ const struct bnxt_en_ops *en_ops;
+ struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+};
+
+struct bnxt_en_ops {
+ int (*bnxt_register_device)(struct bnxt_en_dev *, int,
+ struct bnxt_ulp_ops *, void *);
+ int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
+ int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
+ struct bnxt_msix_entry *, int);
+ int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
+ int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
+ struct bnxt_fw_msg *);
+ int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
+ unsigned long *, u16);
+};
+
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+{
+ if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ return true;
+ return false;
+}
+
+void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
+void bnxt_ulp_stop(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
+struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a4e60e56c14f..f92896835d2a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -971,13 +971,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phy_ethtool_set_eee(priv->phydev, e);
}
-static int bcmgenet_nway_reset(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
- return genphy_restart_aneg(priv->phydev);
-}
-
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
@@ -991,7 +984,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.set_wol = bcmgenet_set_wol,
.get_eee = bcmgenet_get_eee,
.set_eee = bcmgenet_set_eee,
- .nway_reset = bcmgenet_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_coalesce = bcmgenet_get_coalesce,
.set_coalesce = bcmgenet_set_coalesce,
.get_link_ksettings = bcmgenet_get_link_ksettings,
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f1b81187a201..435a2e4739d1 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2147,15 +2147,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
}
}
-static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
-{
- if (new_mtu > ENET_PACKET_SIZE)
- return -EINVAL;
- _dev->mtu = new_mtu;
- pr_info("changing the mtu to %d\n", new_mtu);
- return 0;
-}
-
static const struct net_device_ops sbmac_netdev_ops = {
.ndo_open = sbmac_open,
.ndo_stop = sbmac_close,
@@ -2163,7 +2154,6 @@ static const struct net_device_ops sbmac_netdev_ops = {
.ndo_set_rx_mode = sbmac_set_rx_mode,
.ndo_tx_timeout = sbmac_tx_timeout,
.ndo_do_ioctl = sbmac_mii_ioctl,
- .ndo_change_mtu = sb1250_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2229,6 +2219,8 @@ static int sbmac_init(struct platform_device *pldev, long long base)
dev->netdev_ops = &sbmac_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
+ dev->min_mtu = 0;
+ dev->max_mtu = ENET_PACKET_SIZE;
netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a927a730da10..185e9e047aa9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -124,7 +124,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */
-#define TG3_MIN_MTU 60
+#define TG3_MIN_MTU ETH_ZLEN
#define TG3_MAX_MTU(tp) \
(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
@@ -14199,9 +14199,6 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
int err;
bool reset_phy = false;
- if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
- return -EINVAL;
-
if (!netif_running(dev)) {
/* We'll just catch it later when the
* device is up'd.
@@ -17799,6 +17796,10 @@ static int tg3_init_one(struct pci_dev *pdev,
dev->hw_features |= features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 60 - 9000 or 1500, depending on hardware */
+ dev->min_mtu = TG3_MIN_MTU;
+ dev->max_mtu = TG3_MAX_MTU(tp);
+
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 4e5c3874a50f..bba81735ce87 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1676,10 +1676,10 @@ bna_cb_ioceth_reset(void *arg)
}
static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
- bna_cb_ioceth_enable,
- bna_cb_ioceth_disable,
- bna_cb_ioceth_hbfail,
- bna_cb_ioceth_reset
+ .enable_cbfn = bna_cb_ioceth_enable,
+ .disable_cbfn = bna_cb_ioceth_disable,
+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
+ .reset_cbfn = bna_cb_ioceth_reset
};
static void bna_attr_init(struct bna_ioceth *ioceth)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f42f672b0e7e..112030828c4b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3296,9 +3296,6 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
struct bnad *bnad = netdev_priv(netdev);
u32 rx_count = 0, frame, new_frame;
- if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
- return -EINVAL;
-
mutex_lock(&bnad->conf_mutex);
mtu = netdev->mtu;
@@ -3465,6 +3462,10 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
+ /* MTU range: 46 - 9000 */
+ netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ netdev->max_mtu = BNAD_JUMBO_MTU;
+
netdev->netdev_ops = &bnad_netdev_ops;
bnad_set_ethtool_ops(netdev);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 31f61a744d66..286593922139 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -240,40 +240,46 @@ static const char *bnad_net_stats_strings[] = {
#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
static int
-bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+bnad_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = SUPPORTED_10000baseT_Full;
- cmd->advertising = ADVERTISED_10000baseT_Full;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
- cmd->port = PORT_FIBRE;
- cmd->phy_address = 0;
+ u32 supported, advertising;
+
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.phy_address = 0;
if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(cmd, SPEED_10000);
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
static int
-bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+bnad_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
/* 10G full duplex setting supported only */
- if (cmd->autoneg == AUTONEG_ENABLE)
- return -EOPNOTSUPP; else {
- if ((ethtool_cmd_speed(cmd) == SPEED_10000)
- && (cmd->duplex == DUPLEX_FULL))
- return 0;
- }
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP;
+
+ if ((cmd->base.speed == SPEED_10000) &&
+ (cmd->base.duplex == DUPLEX_FULL))
+ return 0;
return -EOPNOTSUPP;
}
@@ -1118,8 +1124,6 @@ out:
}
static const struct ethtool_ops bnad_ethtool_ops = {
- .get_settings = bnad_get_settings,
- .set_settings = bnad_set_settings,
.get_drvinfo = bnad_get_drvinfo,
.get_wol = bnad_get_wol,
.get_link = ethtool_op_get_link,
@@ -1137,6 +1141,8 @@ static const struct ethtool_ops bnad_ethtool_ops = {
.set_eeprom = bnad_set_eeprom,
.flash_device = bnad_flash_device,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = bnad_get_link_ksettings,
+ .set_link_ksettings = bnad_set_link_ksettings,
};
void
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index f0bcb15d3fec..608bea171956 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -31,4 +31,13 @@ config MACB
To compile this driver as a module, choose M here: the module
will be called macb.
+config MACB_PCI
+ tristate "Cadence PCI MACB/GEM support"
+ depends on MACB && PCI && COMMON_CLK
+ ---help---
+ This is PCI wrapper for MACB driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macb_pci.
+
endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 91f79b1f0505..4ba75594d5c5 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_MACB) += macb.o
+obj-$(CONFIG_MACB_PCI) += macb_pci.o
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ec09fcece711..c0fb80acc2da 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -32,19 +32,28 @@
#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
#include "macb.h"
#define MACB_RX_BUFFER_SIZE 128
#define RX_BUFFER_MULTIPLE 64 /* bytes */
-#define RX_RING_SIZE 512 /* must be power of 2 */
-#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
-#define TX_RING_SIZE 128 /* must be power of 2 */
-#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
+#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
+#define MIN_RX_RING_SIZE 64
+#define MAX_RX_RING_SIZE 8192
+#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
+ * (bp)->rx_ring_size)
+
+#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
+#define MIN_TX_RING_SIZE 64
+#define MAX_TX_RING_SIZE 4096
+#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
+ * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
-#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
+#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
| MACB_BIT(ISR_ROVR))
@@ -53,10 +62,13 @@
| MACB_BIT(TXERR))
#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
-#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
-#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
+/* Max length of transmit frame must be a multiple of 8 bytes */
+#define MACB_TX_LEN_ALIGN 8
+#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
+#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
-#define GEM_MTU_MIN_SIZE 68
+#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
+#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO)
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
@@ -67,45 +79,47 @@
#define MACB_HALT_TIMEOUT 1230
/* Ring buffer accessors */
-static unsigned int macb_tx_ring_wrap(unsigned int index)
+static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
{
- return index & (TX_RING_SIZE - 1);
+ return index & (bp->tx_ring_size - 1);
}
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
unsigned int index)
{
- return &queue->tx_ring[macb_tx_ring_wrap(index)];
+ return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
}
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
unsigned int index)
{
- return &queue->tx_skb[macb_tx_ring_wrap(index)];
+ return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
}
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
{
dma_addr_t offset;
- offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
+ offset = macb_tx_ring_wrap(queue->bp, index) *
+ sizeof(struct macb_dma_desc);
return queue->tx_ring_dma + offset;
}
-static unsigned int macb_rx_ring_wrap(unsigned int index)
+static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
{
- return index & (RX_RING_SIZE - 1);
+ return index & (bp->rx_ring_size - 1);
}
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
{
- return &bp->rx_ring[macb_rx_ring_wrap(index)];
+ return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
}
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
{
- return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
+ return bp->rx_buffers + bp->rx_buffer_size *
+ macb_rx_ring_wrap(bp, index);
}
/* I/O accessors */
@@ -390,6 +404,8 @@ static int macb_mii_probe(struct net_device *dev)
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
}
+ } else {
+ phydev->irq = PHY_POLL;
}
/* attach the mac to the phy */
@@ -468,6 +484,9 @@ static int macb_mii_init(struct macb *bp)
goto err_out_unregister_bus;
}
} else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
if (pdata)
bp->mii_bus->phy_mask = pdata->phy_mask;
@@ -608,7 +627,8 @@ static void macb_tx_error_task(struct work_struct *work)
*/
if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
- macb_tx_ring_wrap(tail), skb->data);
+ macb_tx_ring_wrap(bp, tail),
+ skb->data);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
}
@@ -700,7 +720,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
- macb_tx_ring_wrap(tail), skb->data);
+ macb_tx_ring_wrap(bp, tail),
+ skb->data);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
}
@@ -720,7 +741,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
- TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
+ bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
}
@@ -731,8 +752,8 @@ static void gem_rx_refill(struct macb *bp)
dma_addr_t paddr;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
- RX_RING_SIZE) > 0) {
- entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+ bp->rx_ring_size) > 0) {
+ entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -759,7 +780,7 @@ static void gem_rx_refill(struct macb *bp)
bp->rx_skbuff[entry] = skb;
- if (entry == RX_RING_SIZE - 1)
+ if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
macb_set_addr(&(bp->rx_ring[entry]), paddr);
bp->rx_ring[entry].ctrl = 0;
@@ -813,7 +834,7 @@ static int gem_rx(struct macb *bp, int budget)
dma_addr_t addr;
bool rxused;
- entry = macb_rx_ring_wrap(bp->rx_tail);
+ entry = macb_rx_ring_wrap(bp, bp->rx_tail);
desc = &bp->rx_ring[entry];
/* Make hw descriptor updates visible to CPU */
@@ -895,8 +916,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- macb_rx_ring_wrap(first_frag),
- macb_rx_ring_wrap(last_frag), len);
+ macb_rx_ring_wrap(bp, first_frag),
+ macb_rx_ring_wrap(bp, last_frag), len);
/* The ethernet header starts NET_IP_ALIGN bytes into the
* first buffer. Since the header is 14 bytes, this makes the
@@ -969,12 +990,12 @@ static inline void macb_init_rx_ring(struct macb *bp)
int i;
addr = bp->rx_buffers_dma;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < bp->rx_ring_size; i++) {
bp->rx_ring[i].addr = addr;
bp->rx_ring[i].ctrl = 0;
addr += bp->rx_buffer_size;
}
- bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+ bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
bp->rx_tail = 0;
}
@@ -1214,7 +1235,8 @@ static void macb_poll_controller(struct net_device *dev)
static unsigned int macb_tx_map(struct macb *bp,
struct macb_queue *queue,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int hdrlen)
{
dma_addr_t mapping;
unsigned int len, entry, i, tx_head = queue->tx_head;
@@ -1222,15 +1244,28 @@ static unsigned int macb_tx_map(struct macb *bp,
struct macb_dma_desc *desc;
unsigned int offset, size, count = 0;
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int eof = 1;
- u32 ctrl;
+ unsigned int eof = 1, mss_mfs = 0;
+ u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
+
+ /* LSO */
+ if (skb_shinfo(skb)->gso_size != 0) {
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ /* UDP - UFO */
+ lso_ctrl = MACB_LSO_UFO_ENABLE;
+ else
+ /* TCP - TSO */
+ lso_ctrl = MACB_LSO_TSO_ENABLE;
+ }
/* First, map non-paged data */
len = skb_headlen(skb);
+
+ /* first buffer length */
+ size = hdrlen;
+
offset = 0;
while (len) {
- size = min(len, bp->max_tx_length);
- entry = macb_tx_ring_wrap(tx_head);
+ entry = macb_tx_ring_wrap(bp, tx_head);
tx_skb = &queue->tx_skb[entry];
mapping = dma_map_single(&bp->pdev->dev,
@@ -1249,6 +1284,8 @@ static unsigned int macb_tx_map(struct macb *bp,
offset += size;
count++;
tx_head++;
+
+ size = min(len, bp->max_tx_length);
}
/* Then, map paged data from fragments */
@@ -1259,7 +1296,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
size = min(len, bp->max_tx_length);
- entry = macb_tx_ring_wrap(tx_head);
+ entry = macb_tx_ring_wrap(bp, tx_head);
tx_skb = &queue->tx_skb[entry];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
@@ -1297,14 +1334,29 @@ static unsigned int macb_tx_map(struct macb *bp,
* to set the end of TX queue
*/
i = tx_head;
- entry = macb_tx_ring_wrap(i);
+ entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
desc = &queue->tx_ring[entry];
desc->ctrl = ctrl;
+ if (lso_ctrl) {
+ if (lso_ctrl == MACB_LSO_UFO_ENABLE)
+ /* include header and FCS in value given to h/w */
+ mss_mfs = skb_shinfo(skb)->gso_size +
+ skb_transport_offset(skb) +
+ ETH_FCS_LEN;
+ else /* TSO */ {
+ mss_mfs = skb_shinfo(skb)->gso_size;
+ /* TCP Sequence Number Source Select
+ * can be set only for TSO
+ */
+ seq_ctrl = 0;
+ }
+ }
+
do {
i--;
- entry = macb_tx_ring_wrap(i);
+ entry = macb_tx_ring_wrap(bp, i);
tx_skb = &queue->tx_skb[entry];
desc = &queue->tx_ring[entry];
@@ -1313,9 +1365,19 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BIT(TX_LAST);
eof = 0;
}
- if (unlikely(entry == (TX_RING_SIZE - 1)))
+ if (unlikely(entry == (bp->tx_ring_size - 1)))
ctrl |= MACB_BIT(TX_WRAP);
+ /* First descriptor is header descriptor */
+ if (i == queue->tx_head) {
+ ctrl |= MACB_BF(TX_LSO, lso_ctrl);
+ ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
+ } else
+ /* Only set MSS/MFS on payload descriptors
+ * (second or later descriptor)
+ */
+ ctrl |= MACB_BF(MSS_MFS, mss_mfs);
+
/* Set TX buffer descriptor */
macb_set_addr(desc, tx_skb->mapping);
/* desc->addr must be visible to hardware before clearing
@@ -1341,6 +1403,43 @@ dma_error:
return 0;
}
+static netdev_features_t macb_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int nr_frags, f;
+ unsigned int hdrlen;
+
+ /* Validate LSO compatibility */
+
+ /* there is only one buffer */
+ if (!skb_is_nonlinear(skb))
+ return features;
+
+ /* length of header */
+ hdrlen = skb_transport_offset(skb);
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ hdrlen += tcp_hdrlen(skb);
+
+ /* For LSO:
+ * When software supplies two or more payload buffers all payload buffers
+ * apart from the last must be a multiple of 8 bytes in size.
+ */
+ if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
+ return features & ~MACB_NETIF_LSO;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ /* No need to check last fragment */
+ nr_frags--;
+ for (f = 0; f < nr_frags; f++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+ if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
+ return features & ~MACB_NETIF_LSO;
+ }
+ return features;
+}
+
static inline int macb_clear_csum(struct sk_buff *skb)
{
/* no change for packets without checksum offloading */
@@ -1365,7 +1464,28 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue = &bp->queues[queue_index];
unsigned long flags;
- unsigned int count, nr_frags, frag_size, f;
+ unsigned int desc_cnt, nr_frags, frag_size, f;
+ unsigned int hdrlen;
+ bool is_lso, is_udp = 0;
+
+ is_lso = (skb_shinfo(skb)->gso_size != 0);
+
+ if (is_lso) {
+ is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
+
+ /* length of headers */
+ if (is_udp)
+ /* only queue eth + ip headers separately for UDP */
+ hdrlen = skb_transport_offset(skb);
+ else
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_headlen(skb) < hdrlen) {
+ netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
+ /* if this is required, would need to copy to single buffer */
+ return NETDEV_TX_BUSY;
+ }
+ } else
+ hdrlen = min(skb_headlen(skb), bp->max_tx_length);
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
@@ -1380,17 +1500,22 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
* socket buffer: skb fragments of jumbo frames may need to be
* split into many buffer descriptors.
*/
- count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
+ if (is_lso && (skb_headlen(skb) > hdrlen))
+ /* extra header descriptor if also payload in first buffer */
+ desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
+ else
+ desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
- count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
+ desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
- if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
+ bp->tx_ring_size) < desc_cnt) {
netif_stop_subqueue(dev, queue_index);
spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
@@ -1404,7 +1529,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Map socket buffer for DMA transfer */
- if (!macb_tx_map(bp, queue, skb)) {
+ if (!macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb);
goto unlock;
}
@@ -1416,7 +1541,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
@@ -1455,7 +1580,7 @@ static void gem_free_rx_buffers(struct macb *bp)
if (!bp->rx_skbuff)
return;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < bp->rx_ring_size; i++) {
skb = bp->rx_skbuff[i];
if (!skb)
@@ -1480,7 +1605,7 @@ static void macb_free_rx_buffers(struct macb *bp)
{
if (bp->rx_buffers) {
dma_free_coherent(&bp->pdev->dev,
- RX_RING_SIZE * bp->rx_buffer_size,
+ bp->rx_ring_size * bp->rx_buffer_size,
bp->rx_buffers, bp->rx_buffers_dma);
bp->rx_buffers = NULL;
}
@@ -1493,7 +1618,7 @@ static void macb_free_consistent(struct macb *bp)
bp->macbgem_ops.mog_free_rx_buffers(bp);
if (bp->rx_ring) {
- dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
+ dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
}
@@ -1502,7 +1627,7 @@ static void macb_free_consistent(struct macb *bp)
kfree(queue->tx_skb);
queue->tx_skb = NULL;
if (queue->tx_ring) {
- dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+ dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
queue->tx_ring, queue->tx_ring_dma);
queue->tx_ring = NULL;
}
@@ -1513,14 +1638,14 @@ static int gem_alloc_rx_buffers(struct macb *bp)
{
int size;
- size = RX_RING_SIZE * sizeof(struct sk_buff *);
+ size = bp->rx_ring_size * sizeof(struct sk_buff *);
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
if (!bp->rx_skbuff)
return -ENOMEM;
-
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- RX_RING_SIZE, bp->rx_skbuff);
+ else
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ bp->rx_ring_size, bp->rx_skbuff);
return 0;
}
@@ -1528,7 +1653,7 @@ static int macb_alloc_rx_buffers(struct macb *bp)
{
int size;
- size = RX_RING_SIZE * bp->rx_buffer_size;
+ size = bp->rx_ring_size * bp->rx_buffer_size;
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
@@ -1547,7 +1672,7 @@ static int macb_alloc_consistent(struct macb *bp)
int size;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES;
+ size = TX_RING_BYTES(bp);
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->tx_ring_dma,
GFP_KERNEL);
@@ -1558,13 +1683,13 @@ static int macb_alloc_consistent(struct macb *bp)
q, size, (unsigned long)queue->tx_ring_dma,
queue->tx_ring);
- size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
+ size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
}
- size = RX_RING_BYTES;
+ size = RX_RING_BYTES(bp);
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring)
@@ -1590,11 +1715,11 @@ static void gem_init_rings(struct macb *bp)
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- for (i = 0; i < TX_RING_SIZE; i++) {
- macb_set_addr(&(queue->tx_ring[i]), 0);
+ for (i = 0; i < bp->tx_ring_size; i++) {
+ queue->tx_ring[i].addr = 0;
queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
}
- queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
}
@@ -1611,13 +1736,13 @@ static void macb_init_rings(struct macb *bp)
macb_init_rx_ring(bp);
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < bp->tx_ring_size; i++) {
bp->queues[0].tx_ring[i].addr = 0;
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
}
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
- bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
}
static void macb_reset_hw(struct macb *bp)
@@ -1986,19 +2111,9 @@ static int macb_close(struct net_device *dev)
static int macb_change_mtu(struct net_device *dev, int new_mtu)
{
- struct macb *bp = netdev_priv(dev);
- u32 max_mtu;
-
if (netif_running(dev))
return -EBUSY;
- max_mtu = ETH_DATA_LEN;
- if (bp->caps & MACB_CAPS_JUMBO)
- max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
-
- if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
- return -EINVAL;
-
dev->mtu = new_mtu;
return 0;
@@ -2158,8 +2273,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
| MACB_GREGS_VERSION;
- tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
- head = macb_tx_ring_wrap(bp->queues[0].tx_head);
+ tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
+ head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
regs_buff[0] = macb_readl(bp, NCR);
regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
@@ -2214,6 +2329,56 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static void macb_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ ring->rx_max_pending = MAX_RX_RING_SIZE;
+ ring->tx_max_pending = MAX_TX_RING_SIZE;
+
+ ring->rx_pending = bp->rx_ring_size;
+ ring->tx_pending = bp->tx_ring_size;
+}
+
+static int macb_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct macb *bp = netdev_priv(netdev);
+ u32 new_rx_size, new_tx_size;
+ unsigned int reset = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_size = clamp_t(u32, ring->rx_pending,
+ MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
+ new_rx_size = roundup_pow_of_two(new_rx_size);
+
+ new_tx_size = clamp_t(u32, ring->tx_pending,
+ MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
+ new_tx_size = roundup_pow_of_two(new_tx_size);
+
+ if ((new_tx_size == bp->tx_ring_size) &&
+ (new_rx_size == bp->rx_ring_size)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ if (netif_running(bp->dev)) {
+ reset = 1;
+ macb_close(bp->dev);
+ }
+
+ bp->rx_ring_size = new_rx_size;
+ bp->tx_ring_size = new_tx_size;
+
+ if (reset)
+ macb_open(bp->dev);
+
+ return 0;
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -2223,6 +2388,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
.set_wol = macb_set_wol,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ringparam = macb_get_ringparam,
+ .set_ringparam = macb_set_ringparam,
};
static const struct ethtool_ops gem_ethtool_ops = {
@@ -2235,6 +2402,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_sset_count = gem_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ringparam = macb_get_ringparam,
+ .set_ringparam = macb_set_ringparam,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2298,6 +2467,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_poll_controller = macb_poll_controller,
#endif
.ndo_set_features = macb_set_features,
+ .ndo_features_check = macb_features_check,
};
/* Configure peripheral capabilities according to device tree
@@ -2358,16 +2528,24 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk)
{
+ struct macb_platform_data *pdata;
int err;
- *pclk = devm_clk_get(&pdev->dev, "pclk");
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ *pclk = pdata->pclk;
+ *hclk = pdata->hclk;
+ } else {
+ *pclk = devm_clk_get(&pdev->dev, "pclk");
+ *hclk = devm_clk_get(&pdev->dev, "hclk");
+ }
+
if (IS_ERR(*pclk)) {
err = PTR_ERR(*pclk);
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
return err;
}
- *hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(*hclk)) {
err = PTR_ERR(*hclk);
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
@@ -2429,6 +2607,9 @@ static int macb_init(struct platform_device *pdev)
int err;
u32 val;
+ bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
+
/* set the queue register mapping once for all: queue0 has a special
* register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time.
@@ -2501,6 +2682,11 @@ static int macb_init(struct platform_device *pdev)
/* Set features */
dev->hw_features = NETIF_F_SG;
+
+ /* Check LSO capability */
+ if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
+ dev->hw_features |= MACB_NETIF_LSO;
+
/* Checksum offload is only available on gem with packet buffer */
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
@@ -2800,7 +2986,6 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
#endif
@@ -2935,15 +3120,23 @@ static const struct of_device_id macb_dt_ids[] = {
MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */
+static const struct macb_config default_gem_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
static int macb_probe(struct platform_device *pdev)
{
+ const struct macb_config *macb_config = &default_gem_config;
int (*clk_init)(struct platform_device *, struct clk **,
struct clk **, struct clk **, struct clk **)
- = macb_clk_init;
- int (*init)(struct platform_device *) = macb_init;
+ = macb_config->clk_init;
+ int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node;
- const struct macb_config *macb_config = NULL;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
@@ -3035,6 +3228,13 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_free_netdev;
}
+ /* MTU range: 68 - 1500 or 10240 */
+ dev->min_mtu = GEM_MTU_MIN_SIZE;
+ if (bp->caps & MACB_CAPS_JUMBO)
+ dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+ else
+ dev->max_mtu = ETH_DATA_LEN;
+
mac = of_get_mac_address(np);
if (mac)
ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 8bed4b52fef5..d67adad67be1 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -382,6 +382,10 @@
#define GEM_TX_PKT_BUFF_OFFSET 21
#define GEM_TX_PKT_BUFF_SIZE 1
+/* Bitfields in DCFG6. */
+#define GEM_PBUF_LSO_OFFSET 27
+#define GEM_PBUF_LSO_SIZE 1
+
/* Constants for CLK */
#define MACB_CLK_DIV8 0
#define MACB_CLK_DIV16 1
@@ -414,6 +418,10 @@
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
+/* LSO settings */
+#define MACB_LSO_UFO_ENABLE 0x01
+#define MACB_LSO_TSO_ENABLE 0x02
+
/* Bit manipulation macros */
#define MACB_BIT(name) \
(1 << MACB_##name##_OFFSET)
@@ -545,6 +553,12 @@ struct macb_dma_desc {
#define MACB_TX_LAST_SIZE 1
#define MACB_TX_NOCRC_OFFSET 16
#define MACB_TX_NOCRC_SIZE 1
+#define MACB_MSS_MFS_OFFSET 16
+#define MACB_MSS_MFS_SIZE 14
+#define MACB_TX_LSO_OFFSET 17
+#define MACB_TX_LSO_SIZE 2
+#define MACB_TX_TCP_SEQ_SRC_OFFSET 19
+#define MACB_TX_TCP_SEQ_SRC_SIZE 1
#define MACB_TX_BUF_EXHAUSTED_OFFSET 27
#define MACB_TX_BUF_EXHAUSTED_SIZE 1
#define MACB_TX_UNDERRUN_OFFSET 28
@@ -811,6 +825,9 @@ struct macb {
void *rx_buffers;
size_t rx_buffer_size;
+ unsigned int rx_ring_size;
+ unsigned int tx_ring_size;
+
unsigned int num_queues;
unsigned int queue_mask;
struct macb_queue queues[MACB_MAX_QUEUES];
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
new file mode 100644
index 000000000000..92be2cd8f817
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -0,0 +1,153 @@
+/**
+ * macb_pci.c - Cadence GEM PCI wrapper.
+ *
+ * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
+ *
+ * Authors: Rafal Ozieblo <rafalo@cadence.com>
+ * Bartosz Folta <bfolta@cadence.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include "macb.h"
+
+#define PCI_DRIVER_NAME "macb_pci"
+#define PLAT_DRIVER_NAME "macb"
+
+#define CDNS_VENDOR_ID 0x17cd
+#define CDNS_DEVICE_ID 0xe007
+
+#define GEM_PCLK_RATE 50000000
+#define GEM_HCLK_RATE 50000000
+
+static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int err;
+ struct platform_device *plat_dev;
+ struct platform_device_info plat_info;
+ struct macb_platform_data plat_data;
+ struct resource res[2];
+
+ /* sanity check */
+ if (!id)
+ return -EINVAL;
+
+ /* enable pci device */
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
+ err);
+ return -EACCES;
+ }
+
+ pci_set_master(pdev);
+
+ /* set up resources */
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+ res[0].start = pdev->resource[0].start;
+ res[0].end = pdev->resource[0].end;
+ res[0].name = PCI_DRIVER_NAME;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = pdev->irq;
+ res[1].name = PCI_DRIVER_NAME;
+ res[1].flags = IORESOURCE_IRQ;
+
+ dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
+ (void *)(uintptr_t)pci_resource_start(pdev, 0));
+
+ /* set up macb platform data */
+ memset(&plat_data, 0, sizeof(plat_data));
+
+ /* initialize clocks */
+ plat_data.pclk = clk_register_fixed_rate(&pdev->dev, "pclk", NULL, 0,
+ GEM_PCLK_RATE);
+ if (IS_ERR(plat_data.pclk)) {
+ err = PTR_ERR(plat_data.pclk);
+ goto err_pclk_register;
+ }
+
+ plat_data.hclk = clk_register_fixed_rate(&pdev->dev, "hclk", NULL, 0,
+ GEM_HCLK_RATE);
+ if (IS_ERR(plat_data.hclk)) {
+ err = PTR_ERR(plat_data.hclk);
+ goto err_hclk_register;
+ }
+
+ /* set up platform device info */
+ memset(&plat_info, 0, sizeof(plat_info));
+ plat_info.parent = &pdev->dev;
+ plat_info.fwnode = pdev->dev.fwnode;
+ plat_info.name = PLAT_DRIVER_NAME;
+ plat_info.id = pdev->devfn;
+ plat_info.res = res;
+ plat_info.num_res = ARRAY_SIZE(res);
+ plat_info.data = &plat_data;
+ plat_info.size_data = sizeof(plat_data);
+ plat_info.dma_mask = DMA_BIT_MASK(32);
+
+ /* register platform device */
+ plat_dev = platform_device_register_full(&plat_info);
+ if (IS_ERR(plat_dev)) {
+ err = PTR_ERR(plat_dev);
+ goto err_plat_dev_register;
+ }
+
+ pci_set_drvdata(pdev, plat_dev);
+
+ return 0;
+
+err_plat_dev_register:
+ clk_unregister(plat_data.hclk);
+
+err_hclk_register:
+ clk_unregister(plat_data.pclk);
+
+err_pclk_register:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void macb_remove(struct pci_dev *pdev)
+{
+ struct platform_device *plat_dev = pci_get_drvdata(pdev);
+ struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
+
+ platform_device_unregister(plat_dev);
+ pci_disable_device(pdev);
+ clk_unregister(plat_data->pclk);
+ clk_unregister(plat_data->hclk);
+}
+
+static struct pci_device_id dev_id_table[] = {
+ { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { 0, }
+};
+
+static struct pci_driver macb_pci_driver = {
+ .name = PCI_DRIVER_NAME,
+ .id_table = dev_id_table,
+ .probe = macb_probe,
+ .remove = macb_remove,
+};
+
+module_pci_driver(macb_pci_driver);
+MODULE_DEVICE_TABLE(pci, dev_id_table);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence NIC PCI wrapper");
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 63efa0dc45ba..ce7de6f72512 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -394,7 +394,7 @@ struct xgmac_priv {
};
/* XGMAC Configuration Settings */
-#define MAX_MTU 9000
+#define XGMAC_MAX_MTU 9000
#define PAUSE_TIME 0x400
#define DMA_RX_RING_SZ 256
@@ -1360,20 +1360,6 @@ out:
*/
static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
{
- struct xgmac_priv *priv = netdev_priv(dev);
- int old_mtu;
-
- if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
- netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
- return -EINVAL;
- }
-
- old_mtu = dev->mtu;
-
- /* return early if the buffer sizes will not change */
- if (old_mtu == new_mtu)
- return 0;
-
/* Stop everything, get ready to change the MTU */
if (!netif_running(dev))
return 0;
@@ -1544,15 +1530,14 @@ static const struct net_device_ops xgmac_netdev_ops = {
.ndo_set_features = xgmac_set_features,
};
-static int xgmac_ethtool_getsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int xgmac_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->autoneg = 0;
- cmd->duplex = DUPLEX_FULL;
- ethtool_cmd_speed_set(cmd, 10000);
- cmd->supported = 0;
- cmd->advertising = 0;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.autoneg = 0;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = 10000;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 0);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 0);
return 0;
}
@@ -1695,7 +1680,6 @@ static int xgmac_set_wol(struct net_device *dev,
}
static const struct ethtool_ops xgmac_ethtool_ops = {
- .get_settings = xgmac_ethtool_getsettings,
.get_link = ethtool_op_get_link,
.get_pauseparam = xgmac_get_pauseparam,
.set_pauseparam = xgmac_set_pauseparam,
@@ -1704,6 +1688,7 @@ static const struct ethtool_ops xgmac_ethtool_ops = {
.get_wol = xgmac_get_wol,
.set_wol = xgmac_set_wol,
.get_sset_count = xgmac_get_sset_count,
+ .get_link_ksettings = xgmac_ethtool_get_link_ksettings,
};
/**
@@ -1804,6 +1789,10 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->features |= ndev->hw_features;
ndev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 46 - 9000 */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ ndev->max_mtu = XGMAC_MAX_MTU;
+
/* Get the MAC address */
xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
if (!is_valid_ether_addr(ndev->dev_addr))
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 92f411c9f0df..bbc8bd16cb97 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -53,7 +53,7 @@ config THUNDER_NIC_RGX
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
---help---
@@ -74,4 +74,16 @@ config OCTEON_MGMT_ETHERNET
port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
CN54XX, CN52XX, and CN6XXX chips.
+config LIQUIDIO_VF
+ tristate "Cavium LiquidIO VF support"
+ depends on 64BIT && PCI_MSI
+ select PTP_1588_CLOCK
+ ---help---
+ This driver supports Cavium LiquidIO Intelligent Server Adapter
+ based on CN23XX chips.
+
+ To compile this driver as a module, choose M here: The module
+ will be called liquidio_vf. MSI-X interrupt support is required
+ for this driver to work correctly
+
endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
index 5a27b2a44039..c4d411d1aa28 100644
--- a/drivers/net/ethernet/cavium/liquidio/Makefile
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -11,8 +11,32 @@ liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \
cn66xx_device.o \
cn68xx_device.o \
cn23xx_pf_device.o \
+ cn23xx_vf_device.o \
+ octeon_mailbox.o \
octeon_mem_ops.o \
octeon_droq.o \
octeon_nic.o
liquidio-objs := lio_main.o octeon_console.o $(liquidio-y)
+
+obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o
+
+ifeq ($(CONFIG_LIQUIDIO)$(CONFIG_LIQUIDIO_VF), yy)
+ liquidio_vf-objs := lio_vf_main.o
+else
+liquidio_vf-$(CONFIG_LIQUIDIO_VF) += lio_ethtool.o \
+ lio_core.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ cn23xx_pf_device.o \
+ cn23xx_vf_device.o \
+ octeon_mailbox.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_nic.o
+
+liquidio_vf-objs := lio_vf_main.o $(liquidio_vf-y)
+endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 380a64115a98..962dcbcef8b5 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1,28 +1,23 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/pci.h>
-#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -30,6 +25,7 @@
#include "octeon_device.h"
#include "cn23xx_pf_device.h"
#include "octeon_main.h"
+#include "octeon_mailbox.h"
#define RESET_NOTDONE 0
#define RESET_DONE 1
@@ -40,11 +36,6 @@
*/
#define CN23XX_INPUT_JABBER 64600
-#define LIOLUT_RING_DISTRIBUTION 9
-const int liolut_num_vfs_to_rings_per_vf[LIOLUT_RING_DISTRIBUTION] = {
- 0, 8, 4, 2, 2, 2, 1, 1, 1
-};
-
void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
{
int i = 0;
@@ -309,9 +300,10 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
{
- u64 reg_val;
u16 mac_no = oct->pcie_port;
u16 pf_num = oct->pf_num;
+ u64 reg_val;
+ u64 temp;
/* programming SRN and TRS for each MAC(0..3) */
@@ -333,6 +325,14 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
/* setting TRS <23:16> */
reg_val = reg_val |
(oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
+ /* setting RPVF <39:32> */
+ temp = oct->sriov_info.rings_per_vf & 0xff;
+ reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS);
+
+ /* setting NVFS <55:48> */
+ temp = oct->sriov_info.max_vfs & 0xff;
+ reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
+
/* write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -399,11 +399,12 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ struct octeon_instr_queue *iq;
+ u64 intr_threshold, reg_val;
u32 q_no, ern, srn;
u64 pf_num;
- u64 intr_threshold, reg_val;
- struct octeon_instr_queue *iq;
- struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 vf_num;
pf_num = oct->pf_num;
@@ -414,12 +415,22 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
return -1;
/** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
- * bits 29:30 indicate the MAC num.
- * bits 32:47 indicate the PVF num.
- */
+ * for all queues.Only PF can set these bits.
+ * bits 29:30 indicate the MAC num.
+ * bits 32:47 indicate the PVF num.
+ */
for (q_no = 0; q_no < ern; q_no++) {
reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+
+ /* for VF assigned queues. */
+ if (q_no < oct->sriov_info.pf_srn) {
+ vf_num = q_no / oct->sriov_info.rings_per_vf;
+ vf_num += 1; /* VF1, VF2,........ */
+ } else {
+ vf_num = 0;
+ }
+
+ reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS;
reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
@@ -530,8 +541,8 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
/** Disabling setting OQs in reset when ring has no dorebells
- * enabling this will cause of head of line blocking
- */
+ * enabling this will cause of head of line blocking
+ */
/* Do it only for pass1.1. and pass1.2 */
if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
(oct->rev_id == OCTEON_CN23XX_REV_1_1))
@@ -662,6 +673,118 @@ static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
}
}
+static void cn23xx_pf_mbox_thread(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
+ struct octeon_device *oct = mbox->oct_dev;
+ u64 mbox_int_val, val64;
+ u32 q_no, i;
+
+ if (oct->rev_id < OCTEON_CN23XX_REV_1_1) {
+ /*read and clear by writing 1*/
+ mbox_int_val = readq(mbox->mbox_int_reg);
+ writeq(mbox_int_val, mbox->mbox_int_reg);
+
+ for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+
+ val64 = readq(oct->mbox[q_no]->mbox_write_reg);
+
+ if (val64 && (val64 != OCTEON_PFVFACK)) {
+ if (octeon_mbox_read(oct->mbox[q_no]))
+ octeon_mbox_process_message(
+ oct->mbox[q_no]);
+ }
+ }
+
+ schedule_delayed_work(&wk->work, msecs_to_jiffies(10));
+ } else {
+ octeon_mbox_process_message(mbox);
+ }
+}
+
+static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
+{
+ struct octeon_mbox *mbox = NULL;
+ u16 mac_no = oct->pcie_port;
+ u16 pf_num = oct->pf_num;
+ u32 q_no, i;
+
+ if (!oct->sriov_info.max_vfs)
+ return 0;
+
+ for (i = 0; i < oct->sriov_info.max_vfs; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+
+ mbox = vmalloc(sizeof(*mbox));
+ if (!mbox)
+ goto free_mbox;
+
+ memset(mbox, 0, sizeof(struct octeon_mbox));
+
+ spin_lock_init(&mbox->lock);
+
+ mbox->oct_dev = oct;
+
+ mbox->q_no = q_no;
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num);
+
+ /* PF writes into SIG0 reg */
+ mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0);
+
+ /* PF reads from SIG1 reg */
+ mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
+
+ /*Mail Box Thread creation*/
+ INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
+ cn23xx_pf_mbox_thread);
+ mbox->mbox_poll_wk.ctxptr = (void *)mbox;
+
+ oct->mbox[q_no] = mbox;
+
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ }
+
+ if (oct->rev_id < OCTEON_CN23XX_REV_1_1)
+ schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
+ msecs_to_jiffies(0));
+
+ return 0;
+
+free_mbox:
+ while (i) {
+ i--;
+ vfree(oct->mbox[i]);
+ }
+
+ return 1;
+}
+
+static int cn23xx_free_pf_mbox(struct octeon_device *oct)
+{
+ u32 q_no, i;
+
+ if (!oct->sriov_info.max_vfs)
+ return 0;
+
+ for (i = 0; i < oct->sriov_info.max_vfs; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+ cancel_delayed_work_sync(
+ &oct->mbox[q_no]->mbox_poll_wk.work);
+ vfree(oct->mbox[q_no]);
+ }
+
+ return 0;
+}
+
static int cn23xx_enable_io_queues(struct octeon_device *oct)
{
u64 reg_val;
@@ -856,6 +979,29 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
return ret;
}
+static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct)
+{
+ struct delayed_work *work;
+ u64 mbox_int_val;
+ u32 i, q_no;
+
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+
+ for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+
+ if (mbox_int_val & BIT_ULL(q_no)) {
+ writeq(BIT_ULL(q_no),
+ oct->mbox[0]->mbox_int_reg);
+ if (octeon_mbox_read(oct->mbox[q_no])) {
+ work = &oct->mbox[q_no]->mbox_poll_wk.work;
+ schedule_delayed_work(work,
+ msecs_to_jiffies(0));
+ }
+ }
+ }
+}
+
static irqreturn_t cn23xx_interrupt_handler(void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
@@ -871,6 +1017,10 @@ static irqreturn_t cn23xx_interrupt_handler(void *dev)
dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
oct->octeon_id, CVM_CAST64(intr64));
+ /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */
+ if (intr64 & CN23XX_INTR_VF_MBOX)
+ cn23xx_handle_pf_mbox_intr(oct);
+
if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
if (intr64 & CN23XX_INTR_PKT_DATA)
oct->int_status |= OCT_DEV_INTR_PKT_DATA;
@@ -961,6 +1111,13 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
intr_val = readq(cn23xx->intr_enb_reg64);
intr_val |= CN23XX_INTR_PKT_DATA;
writeq(intr_val, cn23xx->intr_enb_reg64);
+ } else if ((intr_flag & OCTEON_MBOX_INTR) &&
+ (oct->sriov_info.max_vfs > 0)) {
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val |= CN23XX_INTR_VF_MBOX;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
}
}
@@ -976,6 +1133,13 @@ static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
intr_val = readq(cn23xx->intr_enb_reg64);
intr_val &= ~CN23XX_INTR_PKT_DATA;
writeq(intr_val, cn23xx->intr_enb_reg64);
+ } else if ((intr_flag & OCTEON_MBOX_INTR) &&
+ (oct->sriov_info.max_vfs > 0)) {
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val &= ~CN23XX_INTR_VF_MBOX;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
}
}
@@ -1048,50 +1212,59 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct)
static int cn23xx_sriov_config(struct octeon_device *oct)
{
- u32 total_rings;
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
- /* num_vfs is already filled for us */
+ u32 max_rings, total_rings, max_vfs, rings_per_vf;
u32 pf_srn, num_pf_rings;
+ u32 max_possible_vfs;
cn23xx->conf =
- (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
+ (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
switch (oct->rev_id) {
case OCTEON_CN23XX_REV_1_0:
- total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
+ max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
+ max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0;
break;
case OCTEON_CN23XX_REV_1_1:
- total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1;
break;
default:
- total_rings = CN23XX_MAX_RINGS_PER_PF;
+ max_rings = CN23XX_MAX_RINGS_PER_PF;
+ max_possible_vfs = CN23XX_MAX_VFS_PER_PF;
break;
}
- if (!oct->sriov_info.num_pf_rings) {
- if (total_rings > num_present_cpus())
- num_pf_rings = num_present_cpus();
- else
- num_pf_rings = total_rings;
- } else {
- num_pf_rings = oct->sriov_info.num_pf_rings;
- if (num_pf_rings > total_rings) {
- dev_warn(&oct->pci_dev->dev,
- "num_queues_per_pf requested %u is more than available rings. Reducing to %u\n",
- num_pf_rings, total_rings);
- num_pf_rings = total_rings;
- }
- }
+ if (max_rings <= num_present_cpus())
+ num_pf_rings = 1;
+ else
+ num_pf_rings = num_present_cpus();
+
+#ifdef CONFIG_PCI_IOV
+ max_vfs = min_t(u32,
+ (max_rings - num_pf_rings), max_possible_vfs);
+ rings_per_vf = 1;
+#else
+ max_vfs = 0;
+ rings_per_vf = 0;
+#endif
+
+ total_rings = num_pf_rings + max_vfs;
- total_rings = num_pf_rings;
/* the first ring of the pf */
pf_srn = total_rings - num_pf_rings;
oct->sriov_info.trs = total_rings;
+ oct->sriov_info.max_vfs = max_vfs;
+ oct->sriov_info.rings_per_vf = rings_per_vf;
oct->sriov_info.pf_srn = pf_srn;
oct->sriov_info.num_pf_rings = num_pf_rings;
- dev_dbg(&oct->pci_dev->dev, "trs:%d pf_srn:%d num_pf_rings:%d\n",
- oct->sriov_info.trs, oct->sriov_info.pf_srn,
- oct->sriov_info.num_pf_rings);
+ dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n",
+ oct->sriov_info.trs, oct->sriov_info.max_vfs,
+ oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn,
+ oct->sriov_info.num_pf_rings);
+
+ oct->sriov_info.sriov_enabled = 0;
+
return 0;
}
@@ -1119,6 +1292,9 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
+ oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox;
+ oct->fn_list.free_mbox = cn23xx_free_pf_mbox;
+
oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
@@ -1209,8 +1385,7 @@ void cn23xx_dump_iq_regs(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
CVM_CAST64(octeon_read_csr64
- (oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
+ (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
}
pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
@@ -1235,3 +1410,24 @@ int cn23xx_fw_loaded(struct octeon_device *oct)
val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1);
return (val >> 1) & 1ULL;
}
+
+void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
+ u8 *mac)
+{
+ if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) {
+ struct octeon_mbox_cmd mbox_cmd;
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = 0;
+ ether_addr_copy(mbox_cmd.msg.s.params, mac);
+ mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
+ octeon_mbox_write(oct, &mbox_cmd);
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 21b5c9051967..2fedd91f3df8 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -1,34 +1,31 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file cn23xx_device.h
* \brief Host Driver: Routines that perform CN23XX specific operations.
-*/
+ */
#ifndef __CN23XX_PF_DEVICE_H__
#define __CN23XX_PF_DEVICE_H__
#include "cn23xx_pf_regs.h"
+#define LIO_CMD_WAIT_TM 100
+
/* Register address and configuration for a CN23XX devices.
* If device specific changes need to be made then add a struct to include
* device specific fields as shown in the commented section
@@ -56,4 +53,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
int cn23xx_fw_loaded(struct octeon_device *oct);
+
+void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
+ u8 *mac);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index 03d79d95ab75..e6d4ad99cc38 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -1,29 +1,24 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file cn23xx_regs.h
* \brief Host Driver: Register Address and Register Mask values for
* Octeon CN23XX devices.
-*/
+ */
#ifndef __CN23XX_PF_REGS_H__
#define __CN23XX_PF_REGS_H__
@@ -63,7 +58,7 @@
#define CN23XX_CONFIG_SRIOV_BAR_START 0x19C
#define CN23XX_CONFIG_SRIOV_BARX(i) \
- (CN23XX_CONFIG_SRIOV_BAR_START + (i * 4))
+ (CN23XX_CONFIG_SRIOV_BAR_START + ((i) * 4))
#define CN23XX_CONFIG_SRIOV_BAR_PF 0x08
#define CN23XX_CONFIG_SRIOV_BAR_64BIT 0x04
#define CN23XX_CONFIG_SRIOV_BAR_IO 0x01
@@ -513,7 +508,7 @@
/* 4 Registers (64 - bit) */
#define CN23XX_SLI_S2M_PORT_CTL_START 0x23D80
#define CN23XX_SLI_S2M_PORTX_CTL(port) \
- (CN23XX_SLI_S2M_PORT_CTL_START + (port * 0x10))
+ (CN23XX_SLI_S2M_PORT_CTL_START + ((port) * 0x10))
#define CN23XX_SLI_MAC_NUMBER 0x20050
@@ -554,26 +549,26 @@
* Provides DMA Engine Queue Enable
*/
#define CN23XX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
-#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + (eng * 8))
+#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + ((eng) * 8))
/* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL
* Provides control bits for transaction on 8 Queues
*/
#define CN23XX_DPI_DMA_REQQ0_CTL 0x0001df0000000180ULL
#define CN23XX_DPI_DMA_REQQ_CTL(q_no) \
- (CN23XX_DPI_DMA_REQQ0_CTL + (q_no * 8))
+ (CN23XX_DPI_DMA_REQQ0_CTL + ((q_no) * 8))
/* 6 register (64-bit) - DPI_ENG(0..5)_BUF
* Provides DMA Engine FIFO (Queue) Size
*/
#define CN23XX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
#define CN23XX_DPI_DMA_ENG_BUF(eng) \
- (CN23XX_DPI_DMA_ENG0_BUF + (eng * 8))
+ (CN23XX_DPI_DMA_ENG0_BUF + ((eng) * 8))
/* 4 Registers (64-bit) */
#define CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL
#define CN23XX_DPI_SLI_PRTX_CFG(port) \
- (CN23XX_DPI_SLI_PRT_CFG_START + (port * 0x8))
+ (CN23XX_DPI_SLI_PRT_CFG_START + ((port) * 0x8))
/* Masks for DPI_DMA_CONTROL Register */
#define CN23XX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
new file mode 100644
index 000000000000..b6117b6a1de2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -0,0 +1,722 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_vf_device.h"
+#include "octeon_main.h"
+#include "octeon_mailbox.h"
+
+u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us;
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues)
+{
+ u32 loop = BUSY_READING_REG_VF_LOOP_COUNT;
+ int ret_val = 0;
+ u32 q_no;
+ u64 d64;
+
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = octeon_read_csr64(oct,
+ CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 |= CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+ d64);
+ }
+
+ /* wait until the RST bit is clear or the RST and QUIET bits are set */
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ u64 reg_val = octeon_read_csr64(oct,
+ CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop) {
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
+ loop--;
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+ ~CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(reg_val));
+
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
+ if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct)
+{
+ struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+ struct octeon_instr_queue *iq;
+ u64 q_no, intr_threshold;
+ u64 d64;
+
+ if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf))
+ return -1;
+
+ for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
+ void __iomem *inst_cnt_reg;
+
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no),
+ 0xFFFFFFFF);
+ iq = oct->instr_queue[q_no];
+
+ if (iq)
+ inst_cnt_reg = iq->inst_cnt_reg;
+ else
+ inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no);
+
+ d64 = octeon_read_csr64(oct,
+ CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no));
+
+ d64 &= 0xEFFFFFFFFFFFFFFFL;
+
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+ d64);
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * the Input Queues
+ */
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+ CN23XX_PKT_INPUT_CTL_MASK);
+
+ /* set the wmark level to trigger PI_INT */
+ intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+ writeq((readq(inst_cnt_reg) &
+ ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+ CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+ (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+ inst_cnt_reg);
+ }
+ return 0;
+}
+
+static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 reg_val;
+ u32 q_no;
+
+ for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+
+ reg_val =
+ octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));
+
+ reg_val &= 0xEFFFFFFFFFFFFFFFL;
+
+ reg_val =
+ octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* set IPTR & DPTR */
+ reg_val |=
+ (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue ScatterList reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+}
+
+static int cn23xx_setup_vf_device_regs(struct octeon_device *oct)
+{
+ if (cn23xx_vf_setup_global_input_regs(oct))
+ return -1;
+
+ cn23xx_vf_setup_global_output_regs(oct);
+
+ return 0;
+}
+
+static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+ u64 pkt_in_done;
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = readq(iq->inst_cnt_reg);
+
+ if (oct->msix_on) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+ iq->inst_cnt_reg);
+ }
+ iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ struct octeon_droq *droq = oct->droq[oq_no];
+
+ octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no);
+}
+
+static void cn23xx_vf_mbox_thread(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
+
+ octeon_mbox_process_message(mbox);
+}
+
+static int cn23xx_free_vf_mbox(struct octeon_device *oct)
+{
+ cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work);
+ vfree(oct->mbox[0]);
+ return 0;
+}
+
+static int cn23xx_setup_vf_mbox(struct octeon_device *oct)
+{
+ struct octeon_mbox *mbox = NULL;
+
+ mbox = vmalloc(sizeof(*mbox));
+ if (!mbox)
+ return 1;
+
+ memset(mbox, 0, sizeof(struct octeon_mbox));
+
+ spin_lock_init(&mbox->lock);
+
+ mbox->oct_dev = oct;
+
+ mbox->q_no = 0;
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0);
+ /* VF reads from SIG0 reg */
+ mbox->mbox_read_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
+ /* VF writes into SIG1 reg */
+ mbox->mbox_write_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
+
+ INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
+ cn23xx_vf_mbox_thread);
+
+ mbox->mbox_poll_wk.ctxptr = mbox;
+
+ oct->mbox[0] = mbox;
+
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+}
+
+static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
+{
+ u32 q_no;
+
+ for (q_no = 0; q_no < oct->num_iqs; q_no++) {
+ u64 reg_val;
+
+ /* set the corresponding IQ IS_64B bit */
+ if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (oct->io_qmask.iq & BIT_ULL(q_no)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+ }
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ u32 reg_val;
+
+ /* set the corresponding OQ ENB bit */
+ if (oct->io_qmask.oq & BIT_ULL(q_no)) {
+ reg_val = octeon_read_csr(
+ oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ octeon_write_csr(
+ oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+static void cn23xx_disable_vf_io_queues(struct octeon_device *oct)
+{
+ u32 num_queues = oct->num_iqs;
+
+ /* per HRM, rings can only be disabled via reset operation,
+ * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
+ */
+ if (num_queues < oct->num_oqs)
+ num_queues = oct->num_oqs;
+
+ cn23xx_vf_reset_io_queues(oct, num_queues);
+}
+
+void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
+{
+ struct octeon_mbox_cmd mbox_cmd;
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = 0;
+
+ octeon_mbox_write(oct, &mbox_cmd);
+}
+
+static void octeon_pfvf_hs_callback(struct octeon_device *oct,
+ struct octeon_mbox_cmd *cmd,
+ void *arg)
+{
+ u32 major = 0;
+
+ memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params,
+ CN23XX_MAILBOX_MSGPARAM_SIZE);
+ if (cmd->recv_len > 1) {
+ major = ((struct lio_version *)(cmd->data))->major;
+ major = major << 16;
+ }
+
+ atomic_set((atomic_t *)arg, major | 1);
+}
+
+int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
+{
+ struct octeon_mbox_cmd mbox_cmd;
+ u32 q_no, count = 0;
+ atomic_t status;
+ u32 pfmajor;
+ u32 vfmajor;
+ u32 ret;
+
+ /* Sending VF_ACTIVE indication to the PF driver */
+ dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n");
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE;
+ mbox_cmd.msg.s.len = 2;
+ mbox_cmd.data[0] = 0;
+ ((struct lio_version *)&mbox_cmd.data[0])->major =
+ LIQUIDIO_BASE_MAJOR_VERSION;
+ ((struct lio_version *)&mbox_cmd.data[0])->minor =
+ LIQUIDIO_BASE_MINOR_VERSION;
+ ((struct lio_version *)&mbox_cmd.data[0])->micro =
+ LIQUIDIO_BASE_MICRO_VERSION;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
+ mbox_cmd.fn_arg = &status;
+
+ /* Interrupts are not enabled at this point.
+ * Enable them with default oq ticks
+ */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+ octeon_mbox_write(oct, &mbox_cmd);
+
+ atomic_set(&status, 0);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ } while ((!atomic_read(&status)) && (count++ < 100000));
+
+ /* Disable the interrupt so that the interrupsts will be reenabled
+ * with the oq ticks received from the PF
+ */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ ret = atomic_read(&status);
+ if (!ret) {
+ dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
+ return 1;
+ }
+
+ for (q_no = 0 ; q_no < oct->num_iqs ; q_no++)
+ oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
+
+ vfmajor = LIQUIDIO_BASE_MAJOR_VERSION;
+ pfmajor = ret >> 16;
+ if (pfmajor != vfmajor) {
+ dev_err(&oct->pci_dev->dev,
+ "VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev,
+ "VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+
+ dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n",
+ oct->pfvf_hsword.pkind);
+
+ return 0;
+}
+
+static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector)
+{
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ u64 mbox_int_val;
+
+ if (!ioq_vector->droq_index) {
+ /* read and clear by writing 1 */
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+ writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+ if (octeon_mbox_read(oct->mbox[0]))
+ schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
+ msecs_to_jiffies(0));
+ }
+}
+
+static u64 cn23xx_vf_msix_interrupt_handler(void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+ u64 pkts_sent;
+ u64 ret = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+ pkts_sent = readq(droq->pkts_sent_reg);
+
+ /* If our device has interrupted, then proceed. Also check
+ * for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+ return ret;
+
+ /* Write count reg in sli_pkt_cnts to clear these int. */
+ if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+ (pkts_sent & CN23XX_INTR_PI_INT)) {
+ if (pkts_sent & CN23XX_INTR_PO_INT)
+ ret |= MSIX_PO_INT;
+ }
+
+ if (pkts_sent & CN23XX_INTR_PI_INT)
+ /* We will clear the count when we update the read_index. */
+ ret |= MSIX_PI_INT;
+
+ if (pkts_sent & CN23XX_INTR_MBOX_INT) {
+ cn23xx_handle_vf_mbox_intr(ioq_vector);
+ ret |= MSIX_MBOX_INT;
+ }
+
+ return ret;
+}
+
+static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done;
+ u32 new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index. The iq->reset_instr_cnt is always zero for
+ * cn23xx, so no extra adjustments are needed.
+ */
+ new_idx = (iq->octeon_read_index +
+ (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+
+ return new_idx;
+}
+
+static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+ u32 q_no, time_threshold;
+
+ if (intr_flag & OCTEON_OUTPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ /* Set up interrupt packet and time thresholds
+ * for all the OQs
+ */
+ time_threshold = cn23xx_vf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+ ((u64)time_threshold << 32)));
+ }
+ }
+
+ if (intr_flag & OCTEON_INPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+ ((octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
+ ~CN23XX_PKT_IN_DONE_CNT_MASK) |
+ CN23XX_INTR_CINT_ENB));
+ }
+ }
+
+ /* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */
+ if (intr_flag & OCTEON_MBOX_INTR) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
+ (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) |
+ CN23XX_INTR_MBOX_ENB));
+ }
+}
+
+static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ u32 q_no;
+
+ if (intr_flag & OCTEON_OUTPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ /* Write all 1's in INT_LEVEL reg to disable PO_INT */
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ 0x3fffffffffffff);
+ }
+ }
+ if (intr_flag & OCTEON_INPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
+ ~(CN23XX_INTR_CINT_ENB |
+ CN23XX_PKT_IN_DONE_CNT_MASK)));
+ }
+ }
+
+ if (intr_flag & OCTEON_MBOX_INTR) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
+ (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) &
+ ~CN23XX_INTR_MBOX_ENB));
+ }
+}
+
+int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
+{
+ struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+ u32 rings_per_vf, ring_flag;
+ u64 reg_val;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ /* INPUT_CONTROL[RPVF] gives the VF IOq count */
+ reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0));
+
+ oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+ oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
+
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+
+ rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+
+ ring_flag = 0;
+
+ cn23xx->conf = oct_get_config_info(oct, LIO_23XX);
+ if (!cn23xx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ if (oct->sriov_info.rings_per_vf > rings_per_vf) {
+ dev_warn(&oct->pci_dev->dev,
+ "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n",
+ oct->sriov_info.rings_per_vf, rings_per_vf,
+ rings_per_vf);
+ oct->sriov_info.rings_per_vf = rings_per_vf;
+ } else {
+ if (rings_per_vf > num_present_cpus()) {
+ dev_warn(&oct->pci_dev->dev,
+ "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
+ rings_per_vf,
+ num_present_cpus(),
+ num_present_cpus());
+ oct->sriov_info.rings_per_vf =
+ num_present_cpus();
+ } else {
+ oct->sriov_info.rings_per_vf = rings_per_vf;
+ }
+ }
+
+ oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs;
+ oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs;
+ oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox;
+ oct->fn_list.free_mbox = cn23xx_free_vf_mbox;
+
+ oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler;
+
+ oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs;
+ oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
+
+ oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt;
+ oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt;
+
+ oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues;
+ oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues;
+
+ return 0;
+}
+
+void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
+{
+ u32 regval, q_no;
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
+ CN23XX_VF_SLI_IQ_DOORBELL(0),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
+ CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
+ CN23XX_VF_SLI_IQ_SIZE(0),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
+
+ for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
+ q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
+ }
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
+ CN23XX_CONFIG_PCIE_DEVCTL, regval);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
new file mode 100644
index 000000000000..3f98c7334957
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -0,0 +1,50 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+ */
+
+#ifndef __CN23XX_VF_DEVICE_H__
+#define __CN23XX_VF_DEVICE_H__
+
+#include "cn23xx_vf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_vf {
+ struct octeon_config *conf;
+};
+
+#define BUSY_READING_REG_VF_LOOP_COUNT 10000
+
+#define CN23XX_MAILBOX_MSGPARAM_SIZE 6
+
+#define MAX_VF_IP_OP_PENDING_PKT_COUNT 100
+
+void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct);
+
+int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
+
+int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
+
+u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+
+void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
new file mode 100644
index 000000000000..d33dd8f4226f
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -0,0 +1,274 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn23xx_vf_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX vf functions.
+ */
+
+#ifndef __CN23XX_VF_REGS_H__
+#define __CN23XX_VF_REGS_H__
+
+#define CN23XX_CONFIG_XPANSION_BAR 0x38
+
+#define CN23XX_CONFIG_PCIE_CAP 0x70
+#define CN23XX_CONFIG_PCIE_DEVCAP 0x74
+#define CN23XX_CONFIG_PCIE_DEVCTL 0x78
+#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C
+#define CN23XX_CONFIG_PCIE_LINKCTL 0x80
+#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84
+#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
+
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this need to be reduced to 60000
+ * in order to to H/W TSO and avoid the WQE malfarmation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
+
+/* ############## BAR0 Registers ################ */
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_VF_IQ_OFFSET 0x20000
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN23XX_VF_SLI_IQ_BASE_ADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN23XX_VF_SLI_IQ_DOORBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN23XX_VF_SLI_IQ_SIZE_START 0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 0x10000
+
+/*------- Request Queue Macros ---------*/
+#define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_SIZE(iq) \
+ (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24)
+#define CN23XX_PKT_INPUT_CTL_RST BIT(23)
+#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22)
+#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1)
+
+/** Rings per Virtual Function [RO] **/
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F)
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48)
+/* These bits[47:44][RO] give the Physical function number info within the MAC*/
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7)
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45)
+/** These bits[43:32][RO] give the virtual function number info within the PF*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF)
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29)
+#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL)
+#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32)
+#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE \
+ | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE \
+ | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN23XX_PKT_INPUT_CTL_USE_CSR \
+ | CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62)
+#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_VF_SLI_OQ_PKT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN23XX_VF_SLI_OQ_BASE_ADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN23XX_VF_SLI_OQ_PKT_CREDITS_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN23XX_VF_SLI_OQ_SIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN23XX_VF_SLI_OQ_PKT_SENT_START 0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_VF_OQ_OFFSET 0x20000
+
+/*------- Output Queue Macros ---------*/
+
+#define CN23XX_VF_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_VF_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_SIZE(oq) \
+ (CN23XX_VF_SLI_OQ_SIZE_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+/* Macro's for accessing CNT and TIME separately from INT_LEVELS */
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_VF_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13)
+#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12)
+#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11)
+#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+#define CN23XX_VF_SLI_INT_SUM_START 0x100D0
+
+#define CN23XX_VF_SLI_INT_SUM(q) \
+ (CN23XX_VF_SLI_INT_SUM_START + ((q) * CN23XX_VF_IQ_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN23XX_INTR_PO_INT BIT_ULL(63)
+#define CN23XX_INTR_PI_INT BIT_ULL(62)
+#define CN23XX_INTR_MBOX_INT BIT_ULL(61)
+#define CN23XX_INTR_RESEND BIT_ULL(60)
+
+#define CN23XX_INTR_CINT_ENB BIT_ULL(48)
+#define CN23XX_INTR_MBOX_ENB BIT(0)
+
+/*############################ MIO #########################*/
+#define CN23XX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL
+#define CN23XX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL
+#define CN23XX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL
+#define CN23XX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL
+#define CN23XX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL
+#define CN23XX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL
+#define CN23XX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define CN23XX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define CN23XX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL
+#define CN23XX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL
+#define CN23XX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL
+#define CN23XX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL
+#define CN23XX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL
+#define CN23XX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL
+
+/*############################ RST #########################*/
+#define CN23XX_RST_BOOT 0x0001180006001600ULL
+
+/*######################## MSIX TABLE #########################*/
+
+#define CN23XX_MSIX_TABLE_ADDR_START 0x0
+#define CN23XX_MSIX_TABLE_DATA_START 0x8
+
+#define CN23XX_MSIX_TABLE_SIZE 0x10
+#define CN23XX_MSIX_TABLE_ENTRIES 0x41
+
+#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32)
+
+#define CN23XX_MSIX_TABLE_ADDR(idx) \
+ (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define CN23XX_MSIX_TABLE_DATA(idx) \
+ (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index e779af88621b..bdec051107a6 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -1,24 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
@@ -275,7 +271,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
{
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
- /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
/* Write the start of the input queue's ring and its size */
@@ -378,7 +373,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Reset the doorbell register for each Input queue. */
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
@@ -400,9 +395,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
;
/* Reset the doorbell register for each Output queue. */
- /* for (i = 0; i < oct->num_oqs; i++) { */
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
@@ -537,15 +531,14 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
oct->droq_intr = 0;
- /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
- if (!(droq_mask & (1ULL << oq_no)))
+ if (!(droq_mask & BIT_ULL(oq_no)))
continue;
droq = oct->droq[oq_no];
pkt_count = octeon_droq_check_hw_for_pkts(droq);
if (pkt_count) {
- oct->droq_intr |= (1ULL << oq_no);
+ oct->droq_intr |= BIT_ULL(oq_no);
if (droq->ops.poll_mode) {
u32 value;
u32 reg;
@@ -721,8 +714,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
struct octeon_config *conf6xxx)
{
- /* int total_instrs = 0; */
-
if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
__func__, CFG_GET_IQ_MAX_Q(conf6xxx),
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index a40a91394079..8ed57134ee0c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file cn66xx_device.h
* \brief Host Driver: Routines that perform CN66XX specific operations.
*/
@@ -96,8 +91,8 @@ void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-int lio_setup_cn66xx_octeon_device(struct octeon_device *);
+int lio_setup_cn66xx_octeon_device(struct octeon_device *oct);
int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
- struct octeon_config *);
+ struct octeon_config *conf6xxx);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
index 5e3aff242ad3..b248966837b4 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file cn66xx_regs.h
* \brief Host Driver: Register Address and Register Mask values for
* Octeon CN66XX devices.
@@ -443,10 +438,10 @@
#define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80
#define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90
#define CN6XXX_SLI_S2M_PORTX_CTL(port) \
- (CN6XXX_SLI_S2M_PORT0_CTL + (port * 0x10))
+ (CN6XXX_SLI_S2M_PORT0_CTL + ((port) * 0x10))
#define CN6XXX_SLI_INT_ENB64(port) \
- (CN6XXX_SLI_INT_ENB64_PORT0 + (port * 0x10))
+ (CN6XXX_SLI_INT_ENB64_PORT0 + ((port) * 0x10))
#define CN6XXX_SLI_MAC_NUMBER 0x3E00
@@ -458,7 +453,7 @@
#define CN6XXX_PCI_BAR1_OFFSET 0x8
#define CN6XXX_BAR1_REG(idx, port) \
- (CN6XXX_BAR1_INDEX_START + (port * CN6XXX_PEM_OFFSET) + \
+ (CN6XXX_BAR1_INDEX_START + ((port) * CN6XXX_PEM_OFFSET) + \
(CN6XXX_PCI_BAR1_OFFSET * (idx)))
/*############################ DPI #########################*/
@@ -476,17 +471,17 @@
#define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
#define CN6XXX_DPI_DMA_ENG_ENB(q_no) \
- (CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8))
+ (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8))
#define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
#define CN6XXX_DPI_DMA_ENG_BUF(q_no) \
- (CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8))
+ (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8))
#define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL
#define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL
#define CN6XXX_DPI_SLI_PRTX_CFG(port) \
- (CN6XXX_DPI_SLI_PRT0_CFG + (port * 0x10))
+ (CN6XXX_DPI_SLI_PRT0_CFG + ((port) * 0x10))
#define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
#define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index dbf3566ead53..50b533ff58e6 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -1,24 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
@@ -76,7 +72,7 @@ static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct)
pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
/* 68XX specific */
- max_oqs = CFG_GET_OQ_MAX_Q(CHIP_FIELD(oct, cn6xxx, conf));
+ max_oqs = CFG_GET_OQ_MAX_Q(CHIP_CONF(oct, cn6xxx));
tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE);
tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */
tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
index ea7bdcce6044..66b8d6bf5ec4 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file cn68xx_device.h
* \brief Host Driver: Routines that perform CN68XX specific operations.
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
index d45a0f4aaf1f..0b742f09e49d 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file cn68xx_regs.h
* \brief Host Driver: Register Address and Register Mask values for
* Octeon CN68XX devices. The register map for CN66XX is the same
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 201eddb3013a..f629c2fe04a4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1,24 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/pci.h>
#include <linux/if_vlan.h>
#include "liquidio_common.h"
@@ -89,13 +85,6 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype,
}
(*pkts_compl)++;
-/*TODO, Use some other pound define to suggest
- * the fact that iqs are not tied to netdevs
- * and can take traffic from different netdevs
- * hence bql reporting is done per packet
- * than in bulk. Usage of NO_NAPI in txq completion is
- * a little confusing
- */
*bytes_compl += skb->len;
}
@@ -264,3 +253,34 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
nctrl->ncmd.s.cmd);
}
}
+
+void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
+{
+ bool macaddr_changed = false;
+ struct net_device *netdev;
+ struct lio *lio;
+
+ rtnl_lock();
+
+ netdev = oct->props[0].netdev;
+ lio = GET_LIO(netdev);
+
+ lio->linfo.macaddr_is_admin_asgnd = true;
+
+ if (!ether_addr_equal(netdev->dev_addr, mac)) {
+ macaddr_changed = true;
+ ether_addr_copy(netdev->dev_addr, mac);
+ ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
+ }
+
+ rtnl_unlock();
+
+ if (macaddr_changed)
+ dev_info(&oct->pci_dev->dev,
+ "PF changed VF's MAC address to %pM\n", mac);
+
+ /* no need to notify the firmware of the macaddr change because
+ * the PF did that already
+ */
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index f163e0abbeb2..b00c3002360e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -1,24 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
#include <linux/pci.h>
@@ -33,6 +29,7 @@
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
static int octnet_get_link_stats(struct net_device *netdev);
@@ -74,9 +71,9 @@ enum {
INTERFACE_MODE_MIXED,
};
-#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define OCT_ETHTOOL_REGDUMP_LEN 4096
#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
+#define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
#define OCT_ETHTOOL_REGSVER 1
/* statistics of PF */
@@ -87,9 +84,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"tx_bytes",
"rx_errors", /*jabber_err+l2_err+frame_err */
"tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
- "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
- *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
- */
+ "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
+ *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
+ */
"tx_dropped",
"tx_total_sent",
@@ -152,6 +149,19 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"link_state_changes",
};
+/* statistics of VF */
+static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors", /* jabber_err + l2_err+frame_err */
+ "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
+ "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
+ "tx_dropped",
+ "link_state_changes",
+};
+
/* statistics of host tx queue */
static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
"packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
@@ -197,25 +207,28 @@ static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define OCTNIC_NCMD_AUTONEG_ON 0x1
#define OCTNIC_NCMD_PHY_ON 0x2
-static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int lio_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
+ u32 supported, advertising;
linfo = &lio->linfo;
if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
- ecmd->port = PORT_FIBRE;
- ecmd->supported =
- (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
- SUPPORTED_Pause);
- ecmd->advertising =
- (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
- ecmd->transceiver = XCVR_EXTERNAL;
- ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->base.port = PORT_FIBRE;
+ supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
+ SUPPORTED_Pause);
+ advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+ ethtool_convert_legacy_u32_to_link_mode(
+ ecmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ ecmd->link_modes.advertising, advertising);
+ ecmd->base.autoneg = AUTONEG_DISABLE;
} else {
dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
@@ -223,11 +236,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
if (linfo->link.s.link_up) {
- ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
- ecmd->duplex = linfo->link.s.duplex;
+ ecmd->base.speed = linfo->link.s.speed;
+ ecmd->base.duplex = linfo->link.s.duplex;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
@@ -251,6 +264,23 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
}
static void
+lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct octeon_device *oct;
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(drvinfo->driver, "liquidio_vf");
+ strcpy(drvinfo->version, LIQUIDIO_VERSION);
+ strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ ETHTOOL_FWVERS_LEN);
+ strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+}
+
+static void
lio_ethtool_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
@@ -259,14 +289,14 @@ lio_ethtool_get_channels(struct net_device *dev,
u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
if (OCTEON_CN6XXX(oct)) {
- struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+ struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
max_rx = CFG_GET_OQ_MAX_Q(conf6x);
max_tx = CFG_GET_IQ_MAX_Q(conf6x);
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) {
- struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+ struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
max_rx = CFG_GET_OQ_MAX_Q(conf23);
max_tx = CFG_GET_IQ_MAX_Q(conf23);
@@ -589,14 +619,14 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
rx_pending = 0;
if (OCTEON_CN6XXX(oct)) {
- struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+ struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) {
- struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+ struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
@@ -757,9 +787,6 @@ lio_get_ethtool_stats(struct net_device *netdev,
/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
data[i++] = CVM_CAST64(netstats->tx_dropped);
- /*data[i++] = CVM_CAST64(stats->multicast); */
- /*data[i++] = CVM_CAST64(stats->collisions); */
-
/* firmware tx stats */
/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
*fromhost.fw_total_sent
@@ -910,9 +937,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
/*lio->link_changes*/
data[i++] = CVM_CAST64(lio->link_changes);
- /* TX -- lio_update_stats(lio); */
for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
- if (!(oct_dev->io_qmask.iq & (1ULL << j)))
+ if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
continue;
/*packets to network port*/
/*# of packets tx to network */
@@ -954,9 +980,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
}
/* RX */
- /* for (j = 0; j < oct_dev->num_oqs; j++) { */
for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
- if (!(oct_dev->io_qmask.oq & (1ULL << j)))
+ if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
continue;
/*packets send to TCP/IP network stack */
@@ -992,6 +1017,109 @@ lio_get_ethtool_stats(struct net_device *netdev,
}
}
+static void lio_vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats
+ __attribute__((unused)),
+ u64 *data)
+{
+ struct net_device_stats *netstats = &netdev->stats;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i = 0, j, vj;
+
+ netdev->netdev_ops->ndo_get_stats(netdev);
+ /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
+ data[i++] = CVM_CAST64(netstats->rx_packets);
+ /* sum of oct->instr_queue[iq_no]->stats.tx_done */
+ data[i++] = CVM_CAST64(netstats->tx_packets);
+ /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
+ data[i++] = CVM_CAST64(netstats->rx_bytes);
+ /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
+ data[i++] = CVM_CAST64(netstats->tx_bytes);
+ data[i++] = CVM_CAST64(netstats->rx_errors);
+ data[i++] = CVM_CAST64(netstats->tx_errors);
+ /* sum of oct->droq[oq_no]->stats->rx_dropped +
+ * oct->droq[oq_no]->stats->dropped_nodispatch +
+ * oct->droq[oq_no]->stats->dropped_toomany +
+ * oct->droq[oq_no]->stats->dropped_nomem
+ */
+ data[i++] = CVM_CAST64(netstats->rx_dropped);
+ /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
+ data[i++] = CVM_CAST64(netstats->tx_dropped);
+ /* lio->link_changes */
+ data[i++] = CVM_CAST64(lio->link_changes);
+
+ for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
+ j = lio->linfo.txpciq[vj].s.q_no;
+
+ /* packets to network port */
+ /* # of packets tx to network */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ /* # of bytes tx to network */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ /* # of packets dropped */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_dropped);
+ /* # of tx fails due to queue full */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ /* XXX gather entries sent */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.sgentry_sent);
+
+ /* instruction to firmware: data and control */
+ /* # of instructions to the queue */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_posted);
+ /* # of instructions processed */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
+ /* # of instructions could not be processed */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
+ /* bytes sent through the queue */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.bytes_sent);
+ /* tso request */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
+ /* vxlan request */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
+ /* txq restart */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_restart);
+ }
+
+ /* RX */
+ for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
+ j = lio->linfo.rxpciq[vj].s.q_no;
+
+ /* packets send to TCP/IP network stack */
+ /* # of packets to network stack */
+ data[i++] = CVM_CAST64(
+ oct_dev->droq[j]->stats.rx_pkts_received);
+ /* # of bytes to network stack */
+ data[i++] = CVM_CAST64(
+ oct_dev->droq[j]->stats.rx_bytes_received);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
+ oct_dev->droq[j]->stats.dropped_toomany +
+ oct_dev->droq[j]->stats.rx_dropped);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+
+ /* control and data path */
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
+ }
+}
+
static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
{
struct octeon_device *oct_dev = lio->oct_dev;
@@ -999,6 +1127,7 @@ static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
switch (oct_dev->chip_id) {
case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
sprintf(data, "%s", oct_priv_flags_strings[i]);
data += ETH_GSTRING_LEN;
@@ -1030,7 +1159,55 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
- if (!(oct_dev->io_qmask.iq & (1ULL << i)))
+ if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "tx-%d-%s", i,
+ oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "rx-%d-%s", i,
+ oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ lio_get_priv_flags_strings(lio, data);
+ break;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
+ break;
+ }
+}
+
+static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ int num_iq_stats, num_oq_stats, i, j;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int num_stats;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(oct_vf_stats_strings);
+ for (j = 0; j < num_stats; j++) {
+ sprintf(data, "%s", oct_vf_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
continue;
for (j = 0; j < num_iq_stats; j++) {
sprintf(data, "tx-%d-%s", i,
@@ -1040,9 +1217,8 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
- /* for (i = 0; i < oct_dev->num_oqs; i++) { */
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
- if (!(oct_dev->io_qmask.oq & (1ULL << i)))
+ if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
continue;
for (j = 0; j < num_oq_stats; j++) {
sprintf(data, "rx-%d-%s", i,
@@ -1067,6 +1243,7 @@ static int lio_get_priv_flags_ss_count(struct lio *lio)
switch (oct_dev->chip_id) {
case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
return ARRAY_SIZE(oct_priv_flags_strings);
case OCTEON_CN68XX:
case OCTEON_CN66XX:
@@ -1094,6 +1271,23 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (ARRAY_SIZE(oct_vf_stats_strings) +
+ ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
+ ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ case ETH_SS_PRIV_FLAGS:
+ return lio_get_priv_flags_ss_count(lio);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int lio_get_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *intr_coal)
{
@@ -1106,6 +1300,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
switch (oct->chip_id) {
case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
if (!intrmod_cfg->rx_enable) {
intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
intr_coal->rx_max_coalesced_frames =
@@ -1152,7 +1347,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intr_coal->rx_max_coalesced_frames_low =
intrmod_cfg->rx_mincnt_trigger;
}
- if (OCTEON_CN23XX_PF(oct) &&
+ if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
(intrmod_cfg->tx_enable)) {
intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
intr_coal->tx_max_coalesced_frames_high =
@@ -1510,6 +1705,26 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
oct->intrmod.rx_frames = rx_max_coalesced_frames;
break;
}
+ case OCTEON_CN23XX_VF_VID: {
+ int q_no;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
+ (0x3fffff00000000UL)) |
+ rx_max_coalesced_frames);
+ /* consider writing to resend bit here */
+ }
+ oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1563,6 +1778,27 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
oct->intrmod.rx_usecs = rx_coalesce_usecs;
break;
}
+ case OCTEON_CN23XX_VF_VID: {
+ u64 time_threshold;
+ int q_no;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+
+ time_threshold =
+ cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (oct->intrmod.rx_frames |
+ (time_threshold << 32)));
+ /* consider setting resend bit */
+ }
+ oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1584,6 +1820,7 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
case OCTEON_CN68XX:
case OCTEON_CN66XX:
break;
+ case OCTEON_CN23XX_VF_VID:
case OCTEON_CN23XX_PF_VID: {
int q_no;
@@ -1642,6 +1879,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
}
break;
case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
break;
default:
return -EINVAL;
@@ -1704,86 +1942,6 @@ static int lio_get_ts_info(struct net_device *netdev,
return 0;
}
-static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct oct_link_info *linfo;
- struct octnic_ctrl_pkt nctrl;
- int ret = 0;
-
- /* get the link info */
- linfo = &lio->linfo;
-
- if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
- return -EINVAL;
-
- if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
- ecmd->speed != SPEED_10) ||
- (ecmd->duplex != DUPLEX_HALF &&
- ecmd->duplex != DUPLEX_FULL)))
- return -EINVAL;
-
- /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
- * as they operate at fixed Speed and Duplex settings
- */
- if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
- dev_info(&oct->pci_dev->dev,
- "Autonegotiation, duplex and speed settings cannot be modified.\n");
- return -EINVAL;
- }
-
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
-
- nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 1000;
- nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
-
- /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
- * to SE core application using ncmd.s.more & ncmd.s.param
- */
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- /* Autoneg ON */
- nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
- OCTNIC_NCMD_AUTONEG_ON;
- nctrl.ncmd.s.param1 = ecmd->advertising;
- } else {
- /* Autoneg OFF */
- nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
-
- nctrl.ncmd.s.param2 = ecmd->duplex;
-
- nctrl.ncmd.s.param1 = ecmd->speed;
- }
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
- return -1;
- }
-
- return 0;
-}
-
-static int lio_nway_reset(struct net_device *netdev)
-{
- if (netif_running(netdev)) {
- struct ethtool_cmd ecmd;
-
- memset(&ecmd, 0, sizeof(struct ethtool_cmd));
- ecmd.autoneg = 0;
- ecmd.speed = 0;
- ecmd.duplex = 0;
- lio_set_settings(netdev, &ecmd);
- }
- return 0;
-}
-
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
{
@@ -1793,6 +1951,8 @@ static int lio_get_regs_len(struct net_device *dev)
switch (oct->chip_id) {
case OCTEON_CN23XX_PF_VID:
return OCT_ETHTOOL_REGDUMP_LEN_23XX;
+ case OCTEON_CN23XX_VF_VID:
+ return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
default:
return OCT_ETHTOOL_REGDUMP_LEN;
}
@@ -2018,6 +2178,123 @@ static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
return len;
}
+static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ int len = 0;
+ u32 reg;
+ int i;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_SIZE(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_SIZE(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ return len;
+}
+
static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
{
u32 reg;
@@ -2164,6 +2441,10 @@ static void lio_get_regs(struct net_device *dev,
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
len += cn23xx_read_csr_reg(regbuf + len, oct);
break;
+ case OCTEON_CN23XX_VF_VID:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
+ len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
+ break;
case OCTEON_CN68XX:
case OCTEON_CN66XX:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
@@ -2194,7 +2475,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
}
static const struct ethtool_ops lio_ethtool_ops = {
- .get_settings = lio_get_settings,
+ .get_link_ksettings = lio_get_link_ksettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam,
@@ -2211,8 +2492,26 @@ static const struct ethtool_ops lio_ethtool_ops = {
.get_msglevel = lio_get_msglevel,
.set_msglevel = lio_set_msglevel,
.get_sset_count = lio_get_sset_count,
- .nway_reset = lio_nway_reset,
- .set_settings = lio_set_settings,
+ .get_coalesce = lio_get_intr_coalesce,
+ .set_coalesce = lio_set_intr_coalesce,
+ .get_priv_flags = lio_get_priv_flags,
+ .set_priv_flags = lio_set_priv_flags,
+ .get_ts_info = lio_get_ts_info,
+};
+
+static const struct ethtool_ops lio_vf_ethtool_ops = {
+ .get_link_ksettings = lio_get_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = lio_get_vf_drvinfo,
+ .get_ringparam = lio_ethtool_get_ringparam,
+ .get_channels = lio_ethtool_get_channels,
+ .get_strings = lio_vf_get_strings,
+ .get_ethtool_stats = lio_vf_get_ethtool_stats,
+ .get_regs_len = lio_get_regs_len,
+ .get_regs = lio_get_regs,
+ .get_msglevel = lio_get_msglevel,
+ .set_msglevel = lio_set_msglevel,
+ .get_sset_count = lio_vf_get_sset_count,
.get_coalesce = lio_get_intr_coalesce,
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
@@ -2222,5 +2521,11 @@ static const struct ethtool_ops lio_ethtool_ops = {
void liquidio_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &lio_ethtool_ops;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if (OCTEON_CN23XX_VF(oct))
+ netdev->ethtool_ops = &lio_vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &lio_ethtool_ops;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index afc6f9dc8119..39a9665c9d00 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1,28 +1,22 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-#include <linux/version.h>
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/pci.h>
#include <linux/firmware.h>
-#include <linux/ptp_clock_kernel.h>
#include <net/vxlan.h>
#include <linux/kthread.h>
#include "liquidio_common.h"
@@ -46,6 +40,7 @@ MODULE_VERSION(LIQUIDIO_VERSION);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX);
static int ddr_timeout = 10000;
module_param(ddr_timeout, int, 0644);
@@ -54,9 +49,6 @@ MODULE_PARM_DESC(ddr_timeout,
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
- (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
-
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
@@ -65,10 +57,6 @@ static char fw_type[LIO_MAX_FW_TYPE_LEN];
module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
-static int conf_type;
-module_param(conf_type, int, 0);
-MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
-
static int ptp_enable = 1;
/* Bit mask values for lio->ifstate */
@@ -180,6 +168,10 @@ struct octeon_device_priv {
unsigned long napi_mask;
};
+#ifdef CONFIG_PCI_IOV
+static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
+#endif
+
static int octeon_device_init(struct octeon_device *);
static int liquidio_stop(struct net_device *netdev);
static void liquidio_remove(struct pci_dev *pdev);
@@ -197,9 +189,8 @@ static void octeon_droq_bh(unsigned long pdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
- /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
- if (!(oct->io_qmask.oq & (1ULL << q_no)))
+ if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
@@ -234,7 +225,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
pending_pkts = 0;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
}
@@ -316,7 +307,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
struct octeon_instr_queue *iq;
- if (!(oct->io_qmask.iq & (1ULL << i)))
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
iq = oct->instr_queue[i];
@@ -382,7 +373,6 @@ static void stop_pci_io(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
lio_get_state_string(&oct->status));
- /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
/* making it a common function for all OCTEON models */
cleanup_aer_uncorrect_error_status(oct->pci_dev);
}
@@ -518,6 +508,9 @@ static struct pci_driver liquidio_pci_driver = {
.suspend = liquidio_suspend,
.resume = liquidio_resume,
#endif
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = liquidio_enable_sriov,
+#endif
};
/**
@@ -763,6 +756,7 @@ static void delete_glists(struct lio *lio)
}
kfree((void *)lio->glist);
+ kfree((void *)lio->glist_lock);
}
/**
@@ -933,7 +927,6 @@ static inline void update_link_status(struct net_device *netdev,
if (lio->linfo.link.s.link_up) {
netif_carrier_on(netdev);
- /* start_txq(netdev); */
txqs_wake(netdev);
} else {
netif_carrier_off(netdev);
@@ -1011,7 +1004,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
oq_no++) {
- if (!(oct->droq_intr & (1ULL << oq_no)))
+ if (!(oct->droq_intr & BIT_ULL(oq_no)))
continue;
droq = oct->droq[oq_no];
@@ -1322,6 +1315,7 @@ liquidio_probe(struct pci_dev *pdev,
complete(&first_stage);
if (octeon_device_init(oct_dev)) {
+ complete(&hs->init);
liquidio_remove(pdev);
return -ENOMEM;
}
@@ -1346,7 +1340,15 @@ liquidio_probe(struct pci_dev *pdev,
oct_dev->watchdog_task = kthread_create(
liquidio_watchdog, oct_dev,
"liowd/%02hhx:%02hhx.%hhx", bus, device, function);
- wake_up_process(oct_dev->watchdog_task);
+ if (!IS_ERR(oct_dev->watchdog_task)) {
+ wake_up_process(oct_dev->watchdog_task);
+ } else {
+ oct_dev->watchdog_task = NULL;
+ dev_err(&oct_dev->pci_dev->dev,
+ "failed to create kernel_thread\n");
+ liquidio_remove(pdev);
+ return -1;
+ }
}
}
@@ -1410,6 +1412,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (lio_wait_for_oq_pkts(oct))
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+ /* fallthrough */
+ case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
@@ -1436,12 +1440,20 @@ static void octeon_destroy_resources(struct octeon_device *oct)
pci_disable_msi(oct->pci_dev);
}
+ /* fallthrough */
+ case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
if (OCTEON_CN23XX_PF(oct))
octeon_free_ioq_vector(oct);
+
+ /* fallthrough */
+ case OCT_DEV_MBOX_SETUP_DONE:
+ if (OCTEON_CN23XX_PF(oct))
+ oct->fn_list.free_mbox(oct);
+
/* fallthrough */
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
- /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
+ /* Wait for any pending operations */
mdelay(100);
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & BIT_ULL(i)))
@@ -1472,6 +1484,10 @@ static void octeon_destroy_resources(struct octeon_device *oct)
continue;
octeon_delete_instr_queue(oct, i);
}
+#ifdef CONFIG_PCI_IOV
+ if (oct->sriov_info.sriov_enabled)
+ pci_disable_sriov(oct->pci_dev);
+#endif
/* fallthrough */
case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
octeon_free_sc_buffer_pool(oct);
@@ -1491,10 +1507,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_unmap_pci_barx(oct, 1);
/* fallthrough */
- case OCT_DEV_BEGIN_STATE:
+ case OCT_DEV_PCI_ENABLE_DONE:
+ pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
+ /* fallthrough */
+ case OCT_DEV_BEGIN_STATE:
/* Nothing to be done here either */
break;
} /* end switch (oct->status) */
@@ -1764,6 +1783,7 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+ pci_disable_device(oct->pci_dev);
return 1;
}
@@ -2426,7 +2446,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
* Return back if tx_done is false.
*/
update_txq_status(oct, iq_no);
- /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
} else {
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
__func__, iq_no);
@@ -2868,17 +2887,6 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
struct octnic_ctrl_pkt nctrl;
int ret = 0;
- /* Limit the MTU to make sure the ethernet packets are between 68 bytes
- * and 16000 bytes
- */
- if ((new_mtu < LIO_MIN_MTU_SIZE) ||
- (new_mtu > LIO_MAX_MTU_SIZE)) {
- dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
- dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
- LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
- return -EINVAL;
- }
-
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
@@ -3567,7 +3575,152 @@ static void liquidio_del_vxlan_port(struct net_device *netdev,
OCTNET_CMD_VXLAN_PORT_DEL);
}
-static struct net_device_ops lionetdevops = {
+static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
+ u8 *mac, bool is_admin_assigned)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
+ return -EINVAL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ nctrl.ncmd.s.param1 = vfidx + 1;
+ nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
+ nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.cb_fn = 0;
+ nctrl.wait_time = LIO_CMD_WAIT_TM;
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
+
+ oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
+
+ octnet_send_nic_ctrl_pkt(oct, &nctrl);
+
+ return 0;
+}
+
+static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int retval;
+
+ retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
+ if (!retval)
+ cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
+
+ return retval;
+}
+
+static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ u16 vlantci;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (vlan >= VLAN_N_VID || qos > 7)
+ return -EINVAL;
+
+ if (vlan)
+ vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
+ else
+ vlantci = 0;
+
+ if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
+ return 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ if (vlan)
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ else
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+
+ nctrl.ncmd.s.param1 = vlantci;
+ nctrl.ncmd.s.param2 =
+ vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
+ nctrl.ncmd.s.more = 0;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.cb_fn = 0;
+ nctrl.wait_time = LIO_CMD_WAIT_TM;
+
+ octnet_send_nic_ctrl_pkt(oct, &nctrl);
+
+ oct->sriov_info.vf_vlantci[vfidx] = vlantci;
+
+ return 0;
+}
+
+static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u8 *macaddr;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ ivi->vf = vfidx;
+ macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
+ ether_addr_copy(&ivi->mac[0], macaddr);
+ ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
+ ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
+ ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
+ return 0;
+}
+
+static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
+ int linkstate)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
+ return 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
+ nctrl.ncmd.s.param1 =
+ vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ nctrl.ncmd.s.param2 = linkstate;
+ nctrl.ncmd.s.more = 0;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.cb_fn = 0;
+ nctrl.wait_time = LIO_CMD_WAIT_TM;
+
+ octnet_send_nic_ctrl_pkt(oct, &nctrl);
+
+ oct->sriov_info.vf_linkstate[vfidx] = linkstate;
+
+ return 0;
+}
+
+static const struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
.ndo_start_xmit = liquidio_xmit,
@@ -3584,6 +3737,11 @@ static struct net_device_ops lionetdevops = {
.ndo_set_features = liquidio_set_features,
.ndo_udp_tunnel_add = liquidio_add_vxlan_port,
.ndo_udp_tunnel_del = liquidio_del_vxlan_port,
+ .ndo_set_vf_mac = liquidio_set_vf_mac,
+ .ndo_set_vf_vlan = liquidio_set_vf_vlan,
+ .ndo_get_vf_config = liquidio_get_vf_config,
+ .ndo_set_vf_link_state = liquidio_set_vf_link_state,
+ .ndo_select_queue = select_q
};
/** \brief Entry point for the liquidio module
@@ -3595,7 +3753,7 @@ static int __init liquidio_init(void)
init_completion(&first_stage);
- octeon_init_device_list(conf_type);
+ octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
if (liquidio_init_pci())
return -EINVAL;
@@ -3816,9 +3974,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
- if (num_iqueues > 1)
- lionetdevops.ndo_select_queue = select_q;
-
/* Associate the routines that will handle different
* netdev tasks.
*/
@@ -3891,6 +4046,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->hw_features = netdev->hw_features &
~NETIF_F_HW_VLAN_CTAG_RX;
+ /* MTU range: 68 - 16000 */
+ netdev->min_mtu = LIO_MIN_MTU_SIZE;
+ netdev->max_mtu = LIO_MAX_MTU_SIZE;
+
/* Point to the properties for octeon device to which this
* interface belongs.
*/
@@ -3902,6 +4061,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"if%d gmx: %d hw_addr: 0x%llx\n", i,
lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+ for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
+ u8 vfmac[ETH_ALEN];
+
+ random_ether_addr(&vfmac[0]);
+ if (__liquidio_set_vf_mac(netdev, j,
+ &vfmac[0], false)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Error setting VF%d MAC address\n",
+ j);
+ goto setup_nic_dev_fail;
+ }
+ }
+
/* 64-bit swap required on LE machines */
octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
for (j = 0; j < 6; j++)
@@ -3997,6 +4169,101 @@ setup_nic_wait_intr:
return -ENODEV;
}
+#ifdef CONFIG_PCI_IOV
+static int octeon_enable_sriov(struct octeon_device *oct)
+{
+ unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
+ struct pci_dev *vfdev;
+ int err;
+ u32 u;
+
+ if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
+ err = pci_enable_sriov(oct->pci_dev,
+ oct->sriov_info.num_vfs_alloced);
+ if (err) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Failed to enable PCI sriov: %d\n",
+ err);
+ oct->sriov_info.num_vfs_alloced = 0;
+ return err;
+ }
+ oct->sriov_info.sriov_enabled = 1;
+
+ /* init lookup table that maps DPI ring number to VF pci_dev
+ * struct pointer
+ */
+ u = 0;
+ vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ OCTEON_CN23XX_VF_VID, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn &&
+ (vfdev->physfn == oct->pci_dev)) {
+ oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
+ vfdev;
+ u += oct->sriov_info.rings_per_vf;
+ }
+ vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ OCTEON_CN23XX_VF_VID, vfdev);
+ }
+ }
+
+ return num_vfs_alloced;
+}
+
+static int lio_pci_sriov_disable(struct octeon_device *oct)
+{
+ int u;
+
+ if (pci_vfs_assigned(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
+ return -EPERM;
+ }
+
+ pci_disable_sriov(oct->pci_dev);
+
+ u = 0;
+ while (u < MAX_POSSIBLE_VFS) {
+ oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
+ u += oct->sriov_info.rings_per_vf;
+ }
+
+ oct->sriov_info.num_vfs_alloced = 0;
+ dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
+ oct->pf_num);
+
+ return 0;
+}
+
+static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
+{
+ struct octeon_device *oct = pci_get_drvdata(dev);
+ int ret = 0;
+
+ if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
+ (oct->sriov_info.sriov_enabled)) {
+ dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
+ oct->pf_num, num_vfs);
+ return 0;
+ }
+
+ if (!num_vfs) {
+ ret = lio_pci_sriov_disable(oct);
+ } else if (num_vfs > oct->sriov_info.max_vfs) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Max allowed VFs:%d user requested:%d",
+ oct->sriov_info.max_vfs, num_vfs);
+ ret = -EPERM;
+ } else {
+ oct->sriov_info.num_vfs_alloced = num_vfs;
+ ret = octeon_enable_sriov(oct);
+ dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
+ oct->pf_num, num_vfs);
+ }
+
+ return ret;
+}
+#endif
+
/**
* \brief initialize the NIC
* @param oct octeon device
@@ -4102,6 +4369,52 @@ static void nic_starter(struct work_struct *work)
complete(&handshake[oct->octeon_id].started);
}
+static int
+octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ int i, notice, vf_idx;
+ u64 *data, vf_num;
+
+ notice = recv_pkt->rh.r.ossp;
+ data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
+
+ /* the first 64-bit word of data is the vf_num */
+ vf_num = data[0];
+ octeon_swap_8B_data(&vf_num, 1);
+ vf_idx = (int)vf_num - 1;
+
+ if (notice == VF_DRV_LOADED) {
+ if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
+ oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
+ dev_info(&oct->pci_dev->dev,
+ "driver for VF%d was loaded\n", vf_idx);
+ try_module_get(THIS_MODULE);
+ }
+ } else if (notice == VF_DRV_REMOVED) {
+ if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
+ oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
+ dev_info(&oct->pci_dev->dev,
+ "driver for VF%d was removed\n", vf_idx);
+ module_put(THIS_MODULE);
+ }
+ } else if (notice == VF_DRV_MACADDR_CHANGED) {
+ u8 *b = (u8 *)&data[1];
+
+ oct->sriov_info.vf_macaddr[vf_idx] = data[1];
+ dev_info(&oct->pci_dev->dev,
+ "VF driver changed VF%d's MAC address to %pM\n",
+ vf_idx, b + 2);
+ }
+
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+
+ return 0;
+}
+
/**
* \brief Device initialization for each Octeon device that is probed
* @param octeon_dev octeon device
@@ -4121,6 +4434,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (octeon_pci_os_setup(octeon_dev))
return 1;
+ atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
+
/* Identify the Octeon type and map the BAR address space. */
if (octeon_chip_specific_setup(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
@@ -4160,6 +4475,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_core_drv_init,
octeon_dev);
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_VF_DRV_NOTICE,
+ octeon_recv_vf_drv_notice, octeon_dev);
INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
schedule_delayed_work(&octeon_dev->nic_poll_work.work,
@@ -4167,7 +4485,10 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
- octeon_set_io_queues_off(octeon_dev);
+ if (octeon_set_io_queues_off(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
+ return 1;
+ }
if (OCTEON_CN23XX_PF(octeon_dev)) {
ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
@@ -4189,9 +4510,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (octeon_setup_instr_queues(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev,
"instruction queue initialization failed\n");
- /* On error, release any previously allocated queues */
- for (j = 0; j < octeon_dev->num_iqs; j++)
- octeon_delete_instr_queue(octeon_dev, j);
return 1;
}
atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
@@ -4207,19 +4525,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (octeon_setup_output_queues(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
- /* Release any previously allocated queues */
- for (j = 0; j < octeon_dev->num_oqs; j++)
- octeon_delete_droq(octeon_dev, j);
return 1;
}
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
+
if (octeon_allocate_ioq_vector(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
return 1;
}
+ atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
} else {
/* The input and output queue registers were setup earlier (the
@@ -4247,6 +4569,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
+ atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
+
/* Enable the input and output queues for this Octeon device */
ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
if (ret) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
new file mode 100644
index 000000000000..70d96c10c673
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -0,0 +1,3251 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <net/vxlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn23xx_vf_device.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LIQUIDIO_VERSION);
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+/* Bit mask values for lio->ifstate */
+#define LIO_IFSTATE_DROQ_OPS 0x01
+#define LIO_IFSTATE_REGISTERED 0x02
+#define LIO_IFSTATE_RUNNING 0x04
+#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
+struct liquidio_if_cfg_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
+struct liquidio_if_cfg_resp {
+ u64 rh;
+ struct liquidio_if_cfg_info cfg_info;
+ u64 status;
+};
+
+struct liquidio_rx_ctl_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
+struct oct_timestamp_resp {
+ u64 rh;
+ u64 timestamp;
+ u64 status;
+};
+
+union tx_info {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gso_size;
+ u16 gso_segs;
+ u32 reserved;
+#else
+ u32 reserved;
+ u16 gso_segs;
+ u16 gso_size;
+#endif
+ } s;
+};
+
+#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
+
+#define OCTNIC_GSO_MAX_HEADER_SIZE 128
+#define OCTNIC_GSO_MAX_SIZE \
+ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
+
+struct octnic_gather {
+ /* List manipulation. Next and prev pointers. */
+ struct list_head list;
+
+ /* Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /* Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /* Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct octeon_sg_entry *sg;
+};
+
+struct octeon_device_priv {
+ /* Tasklet structures for this device. */
+ struct tasklet_struct droq_tasklet;
+ unsigned long napi_mask;
+};
+
+static int
+liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void liquidio_vf_remove(struct pci_dev *pdev);
+static int octeon_device_init(struct octeon_device *oct);
+static int liquidio_stop(struct net_device *netdev);
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT;
+ int pkt_cnt = 0, pending_pkts;
+ int i;
+
+ do {
+ pending_pkts = 0;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
+ }
+ if (pkt_cnt > 0) {
+ pending_pkts += pkt_cnt;
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ pkt_cnt = 0;
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && pending_pkts);
+
+ return pkt_cnt;
+}
+
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static int wait_for_pending_requests(struct octeon_device *oct)
+{
+ int i, pcount = 0;
+
+ for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) {
+ pcount = atomic_read(
+ &oct->response_list[OCTEON_ORDERED_SC_LIST]
+ .pending_req_count);
+ if (pcount)
+ schedule_timeout_uninterruptible(HZ / 10);
+ else
+ break;
+ }
+
+ if (pcount)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * \brief Cause device to go quiet so it can be safely removed/reset/etc
+ * @param oct Pointer to Octeon device
+ */
+static void pcierror_quiesce_device(struct octeon_device *oct)
+{
+ int i;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet processing
+ * to finish.
+ */
+
+ /* To allow for in-flight requests */
+ schedule_timeout_uninterruptible(100);
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to complete. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq, 0);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ /* Force all pending ordered list requests to time out. */
+ lio_process_ordered_list(oct, 1);
+
+ /* We do not need to wait for output queue packets to be processed. */
+}
+
+/**
+ * \brief Cleanup PCI AER uncorrectable error status
+ * @param dev Pointer to PCI device
+ */
+static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ u32 status, mask;
+ int pos = 0x100;
+
+ pr_info("%s :\n", __func__);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+ if (dev->error_state == pci_channel_io_normal)
+ status &= ~mask; /* Clear corresponding nonfatal bits */
+ else
+ status &= mask; /* Clear corresponding fatal bits */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * \brief Stop all PCI IO to a given device
+ * @param dev Pointer to Octeon device
+ */
+static void stop_pci_io(struct octeon_device *oct)
+{
+ struct msix_entry *msix_entries;
+ int i;
+
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ for (i = 0; i < oct->ifcount; i++)
+ netif_device_detach(oct->props[i].netdev);
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ pcierror_quiesce_device(oct);
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs; i++) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ octeon_free_ioq_vector(oct);
+ }
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ /* making it a common function for all OCTEON models */
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+
+ pci_disable_device(oct->pci_dev);
+}
+
+/**
+ * \brief called when PCI error is detected
+ * @param pdev Pointer to PCI device
+ * @param state The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct octeon_device *oct = pci_get_drvdata(pdev);
+
+ /* Non-correctable Non-fatal errors */
+ if (state == pci_channel_io_normal) {
+ dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
+ /* Non-correctable Fatal errors */
+ dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
+ stop_pci_io(oct);
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/* For PCI-E Advanced Error Recovery (AER) Interface */
+static const struct pci_error_handlers liquidio_vf_err_handler = {
+ .error_detected = liquidio_pcie_error_detected,
+};
+
+static const struct pci_device_id liquidio_vf_pci_tbl[] = {
+ {
+ PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0
+ }
+};
+MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
+
+static struct pci_driver liquidio_vf_pci_driver = {
+ .name = "LiquidIO_VF",
+ .id_table = liquidio_vf_pci_tbl,
+ .probe = liquidio_vf_probe,
+ .remove = liquidio_vf_remove,
+ .err_handler = &liquidio_vf_err_handler, /* For AER */
+};
+
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static int ifstate_check(struct lio *lio, int state_flag)
+{
+ return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static void ifstate_set(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static void ifstate_reset(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
+/**
+ * \brief Stop Tx queues
+ * @param netdev network device
+ */
+static void txqs_stop(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+ } else {
+ netif_stop_queue(netdev);
+ }
+}
+
+/**
+ * \brief Start Tx queues
+ * @param netdev network device
+ */
+static void txqs_start(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+ } else {
+ netif_start_queue(netdev);
+ }
+}
+
+/**
+ * \brief Wake Tx queues
+ * @param netdev network device
+ */
+static void txqs_wake(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)]
+ .s.q_no;
+ if (__netif_subqueue_stopped(netdev, i)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, i);
+ }
+ }
+ } else {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
+ netif_wake_queue(netdev);
+ }
+}
+
+/**
+ * \brief Start Tx queue
+ * @param netdev network device
+ */
+static void start_txq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->linfo.link.s.link_up) {
+ txqs_start(netdev);
+ return;
+ }
+}
+
+/**
+ * \brief Wake a queue
+ * @param netdev network device
+ * @param q which queue to wake
+ */
+static void wake_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_wake_subqueue(netdev, q);
+ else
+ netif_wake_queue(netdev);
+}
+
+/**
+ * \brief Stop a queue
+ * @param netdev network device
+ * @param q which queue to stop
+ */
+static void stop_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_stop_subqueue(netdev, q);
+ else
+ netif_stop_queue(netdev);
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static struct list_head *list_delete_head(struct list_head *root)
+{
+ struct list_head *node;
+
+ if ((root->prev == root) && (root->next == root))
+ node = NULL;
+ else
+ node = root->next;
+
+ if (node)
+ list_del(node);
+
+ return node;
+}
+
+/**
+ * \brief Delete gather lists
+ * @param lio per-network private data
+ */
+static void delete_glists(struct lio *lio)
+{
+ struct octnic_gather *g;
+ int i;
+
+ if (!lio->glist)
+ return;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ do {
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist[i]);
+ if (g) {
+ if (g->sg)
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ kfree(g);
+ }
+ } while (g);
+ }
+
+ kfree(lio->glist);
+ kfree(lio->glist_lock);
+}
+
+/**
+ * \brief Setup gather lists
+ * @param lio per-network private data
+ */
+static int setup_glists(struct lio *lio, int num_iqs)
+{
+ struct octnic_gather *g;
+ int i, j;
+
+ lio->glist_lock =
+ kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
+ if (!lio->glist_lock)
+ return 1;
+
+ lio->glist =
+ kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
+ if (!lio->glist) {
+ kfree(lio->glist_lock);
+ return 1;
+ }
+
+ for (i = 0; i < num_iqs; i++) {
+ spin_lock_init(&lio->glist_lock[i]);
+
+ INIT_LIST_HEAD(&lio->glist[i]);
+
+ for (j = 0; j < lio->tx_qsize; j++) {
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+
+ g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
+ OCT_SG_ENTRY_SIZE);
+
+ g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+ if (!g->sg) {
+ kfree(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit
+ * boundary
+ */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg = (struct octeon_sg_entry *)
+ ((unsigned long)g->sg + g->adjust);
+ }
+ list_add_tail(&g->list, &lio->glist[i]);
+ }
+
+ if (j != lio->tx_qsize) {
+ delete_glists(lio);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Print link information
+ * @param netdev network device
+ */
+static void print_link_info(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
+ struct oct_link_info *linfo = &lio->linfo;
+
+ if (linfo->link.s.link_up) {
+ netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
+ linfo->link.s.speed,
+ (linfo->link.s.duplex) ? "Full" : "Half");
+ } else {
+ netif_info(lio, link, lio->netdev, "Link Down\n");
+ }
+ }
+}
+
+/**
+ * \brief Routine to notify MTU change
+ * @param work work_struct data structure
+ */
+static void octnet_link_status_change(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
+ rtnl_unlock();
+}
+
+/**
+ * \brief Sets up the mtu status change work
+ * @param netdev network device
+ */
+static int setup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->link_status_wq.wq = alloc_workqueue("link-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->link_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
+ octnet_link_status_change);
+ lio->link_status_wq.wk.ctxptr = lio;
+
+ return 0;
+}
+
+static void cleanup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->link_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
+ destroy_workqueue(lio->link_status_wq.wq);
+ }
+}
+
+/**
+ * \brief Update link status
+ * @param netdev network device
+ * @param ls link status structure
+ *
+ * Called on receipt of a link status response from the core application to
+ * update each interface's link status.
+ */
+static void update_link_status(struct net_device *netdev,
+ union oct_link_status *ls)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
+ lio->linfo.link.u64 = ls->u64;
+
+ print_link_info(netdev);
+ lio->link_changes++;
+
+ if (lio->linfo.link.s.link_up) {
+ netif_carrier_on(netdev);
+ txqs_wake(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ txqs_stop(netdev);
+ }
+
+ if (lio->linfo.link.s.mtu < netdev->mtu) {
+ dev_warn(&oct->pci_dev->dev,
+ "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n",
+ netdev->mtu, lio->linfo.link.s.mtu);
+ lio->mtu = lio->linfo.link.s.mtu;
+ netdev->mtu = lio->linfo.link.s.mtu;
+ queue_delayed_work(lio->link_status_wq.wq,
+ &lio->link_status_wq.wk.work, 0);
+ }
+ }
+}
+
+static void update_txq_status(struct octeon_device *oct, int iq_num)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
+ struct net_device *netdev;
+ struct lio *lio;
+
+ netdev = oct->props[iq->ifidx].netdev;
+ lio = GET_LIO(netdev);
+ if (netif_is_multiqueue(netdev)) {
+ if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, iq_num))) {
+ netif_wake_subqueue(netdev, iq->q_index);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
+ tx_restart, 1);
+ } else {
+ if (!octnet_iq_is_full(oct, lio->txq)) {
+ INCR_INSTRQUEUE_PKT_COUNT(
+ lio->oct_dev, lio->txq, tx_restart, 1);
+ wake_q(netdev, lio->txq);
+ }
+ }
+ }
+}
+
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+ struct octeon_device *oct = droq->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ } else {
+ if (ret & MSIX_PO_INT) {
+ dev_err(&oct->pci_dev->dev,
+ "should not come here should not get rx when poll mode = 0 for vf\n");
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ return 1;
+ }
+ /* this will be flushed periodically by check iq db */
+ if (ret & MSIX_PI_INT)
+ return 0;
+ }
+ return 0;
+}
+
+static irqreturn_t
+liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+ u64 ret;
+
+ ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+ if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
+ liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * \brief Setup interrupt for octeon device
+ * @param oct octeon device
+ *
+ * Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+static int octeon_setup_interrupt(struct octeon_device *oct)
+{
+ struct msix_entry *msix_entries;
+ int num_alloc_ioq_vectors;
+ int num_ioq_vectors;
+ int irqret;
+ int i;
+
+ if (oct->msix_on) {
+ oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
+
+ oct->msix_entries = kcalloc(
+ oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ return 1;
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+
+ for (i = 0; i < oct->num_msix_irqs; i++)
+ msix_entries[i].entry = i;
+ num_alloc_ioq_vectors = pci_enable_msix_range(
+ oct->pci_dev, msix_entries,
+ oct->num_msix_irqs,
+ oct->num_msix_irqs);
+ if (num_alloc_ioq_vectors < 0) {
+ dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+ num_ioq_vectors = oct->num_msix_irqs;
+
+ for (i = 0; i < num_ioq_vectors; i++) {
+ irqret = request_irq(msix_entries[i].vector,
+ liquidio_msix_intr_handler, 0,
+ "octeon", &oct->ioq_vector[i]);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+
+ while (i) {
+ i--;
+ irq_set_affinity_hint(
+ msix_entries[i].vector, NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ oct->ioq_vector[i].vector = msix_entries[i].vector;
+ /* assign the cpu mask for this msix interrupt vector */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ (&oct->ioq_vector[i].affinity_mask));
+ }
+ dev_dbg(&oct->pci_dev->dev,
+ "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id);
+ }
+ return 0;
+}
+
+/**
+ * \brief PCI probe handler
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static int
+liquidio_vf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent __attribute__((unused)))
+{
+ struct octeon_device *oct_dev = NULL;
+
+ oct_dev = octeon_allocate_device(pdev->device,
+ sizeof(struct octeon_device_priv));
+
+ if (!oct_dev) {
+ dev_err(&pdev->dev, "Unable to allocate device\n");
+ return -ENOMEM;
+ }
+ oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
+ dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+ (u32)pdev->vendor, (u32)pdev->device);
+
+ /* Assign octeon_device for this device to the private data area. */
+ pci_set_drvdata(pdev, oct_dev);
+
+ /* set linux specific device pointer */
+ oct_dev->pci_dev = pdev;
+
+ if (octeon_device_init(oct_dev)) {
+ liquidio_vf_remove(pdev);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+ return 0;
+}
+
+/**
+ * \brief PCI FLR for each Octeon device.
+ * @param oct octeon device
+ */
+static void octeon_pci_flr(struct octeon_device *oct)
+{
+ u16 status;
+
+ pci_save_state(oct->pci_dev);
+
+ pci_cfg_access_lock(oct->pci_dev);
+
+ /* Quiesce the device completely */
+ pci_write_config_word(oct->pci_dev, PCI_COMMAND,
+ PCI_COMMAND_INTX_DISABLE);
+
+ /* Wait for Transaction Pending bit clean */
+ msleep(100);
+ pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status);
+ if (status & PCI_EXP_DEVSTA_TRPND) {
+ dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
+ ssleep(5);
+ pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA,
+ &status);
+ if (status & PCI_EXP_DEVSTA_TRPND)
+ dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n");
+ }
+ pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR);
+ mdelay(100);
+
+ pci_cfg_access_unlock(oct->pci_dev);
+
+ pci_restore_state(oct->pci_dev);
+}
+
+/**
+ *\brief Destroy resources associated with octeon device
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+ struct msix_entry *msix_entries;
+ int i;
+
+ switch (atomic_read(&oct->status)) {
+ case OCT_DEV_RUNNING:
+ case OCT_DEV_CORE_OK:
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ oct->app_mode = CVM_DRV_INVALID_APP;
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ /* fallthrough */
+ case OCT_DEV_HOST_OK:
+ /* fallthrough */
+ case OCT_DEV_IO_QUEUES_DONE:
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet
+ * processing to finish.
+ */
+ oct->fn_list.disable_io_queues(oct);
+
+ if (lio_wait_for_oq_pkts(oct))
+ dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+ case OCT_DEV_INTR_SET_DONE:
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs; i++) {
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ }
+ /* Soft reset the octeon device before exiting */
+ if (oct->pci_dev->reset_fn)
+ octeon_pci_flr(oct);
+ else
+ cn23xx_vf_ask_pf_to_do_flr(oct);
+
+ /* fallthrough */
+ case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
+ octeon_free_ioq_vector(oct);
+
+ /* fallthrough */
+ case OCT_DEV_MBOX_SETUP_DONE:
+ oct->fn_list.free_mbox(oct);
+
+ /* fallthrough */
+ case OCT_DEV_IN_RESET:
+ case OCT_DEV_DROQ_INIT_DONE:
+ mdelay(100);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ /* fallthrough */
+ case OCT_DEV_RESP_LIST_INIT_DONE:
+ octeon_delete_response_list(oct);
+
+ /* fallthrough */
+ case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+
+ /* fallthrough */
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
+
+ /* fallthrough */
+ case OCT_DEV_DISPATCH_INIT_DONE:
+ octeon_delete_dispatch_list(oct);
+ cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+ /* fallthrough */
+ case OCT_DEV_PCI_MAP_DONE:
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+
+ /* fallthrough */
+ case OCT_DEV_PCI_ENABLE_DONE:
+ pci_clear_master(oct->pci_dev);
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
+ /* fallthrough */
+ case OCT_DEV_BEGIN_STATE:
+ /* Nothing to be done here either */
+ break;
+ }
+}
+
+/**
+ * \brief Callback for rx ctrl
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void rx_ctl_callback(struct octeon_device *oct,
+ u32 status, void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_rx_ctl_context *ctx;
+
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (status)
+ dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ WRITE_ONCE(ctx->cond, 1);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Send Rx control command
+ * @param lio per-network private data
+ * @param start_stop whether to start or stop
+ */
+static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+{
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ int ctx_size = sizeof(struct liquidio_rx_ctl_context);
+ struct liquidio_rx_ctl_context *ctx;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ int retval;
+
+ if (oct->props[lio->ifidx].rx_on == start_stop)
+ return;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 16, ctx_size);
+
+ ncmd = (union octnet_cmd *)sc->virtdptr;
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct);
+ init_waitqueue_head(&ctx->wc);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = OCTNET_CMD_RX_CTL;
+ ncmd->s.param1 = start_stop;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_CMD, 0, 0, 0);
+
+ sc->callback = rx_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
+ return;
+ oct->props[lio->ifidx].rx_on = start_stop;
+ }
+
+ octeon_free_soft_command(oct, sc);
+}
+
+/**
+ * \brief Destroy NIC device interface
+ * @param oct octeon device
+ * @param ifidx which interface to destroy
+ *
+ * Cleanup associated with each interface for an Octeon device when NIC
+ * module is being unloaded or if initialization fails during load.
+ */
+static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
+{
+ struct net_device *netdev = oct->props[ifidx].netdev;
+ struct napi_struct *napi, *n;
+ struct lio *lio;
+
+ if (!netdev) {
+ dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
+ __func__, ifidx);
+ return;
+ }
+
+ lio = GET_LIO(netdev);
+
+ dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
+ liquidio_stop(netdev);
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
+ unregister_netdev(netdev);
+
+ cleanup_link_status_change_wq(netdev);
+
+ delete_glists(lio);
+
+ free_netdev(netdev);
+
+ oct->props[ifidx].gmxport = -1;
+
+ oct->props[ifidx].netdev = NULL;
+}
+
+/**
+ * \brief Stop complete NIC functionality
+ * @param oct octeon device
+ */
+static int liquidio_stop_nic_module(struct octeon_device *oct)
+{
+ struct lio *lio;
+ int i, j;
+
+ dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ if (!oct->ifcount) {
+ dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
+ return 1;
+ }
+
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ oct->cmd_resp_state = OCT_DRV_OFFLINE;
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+
+ for (i = 0; i < oct->ifcount; i++) {
+ lio = GET_LIO(oct->props[i].netdev);
+ for (j = 0; j < lio->linfo.num_rxpciq; j++)
+ octeon_unregister_droq_ops(oct,
+ lio->linfo.rxpciq[j].s.q_no);
+ }
+
+ for (i = 0; i < oct->ifcount; i++)
+ liquidio_destroy_nic_device(oct, i);
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
+ return 0;
+}
+
+/**
+ * \brief Cleans up resources at unload time
+ * @param pdev PCI device structure
+ */
+static void liquidio_vf_remove(struct pci_dev *pdev)
+{
+ struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+ if (oct_dev->app_mode == CVM_DRV_NIC_APP)
+ liquidio_stop_nic_module(oct_dev);
+
+ /* Reset the octeon device and cleanup all memory allocated for
+ * the octeon device by driver.
+ */
+ octeon_destroy_resources(oct_dev);
+
+ dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+ /* This octeon device has been removed. Update the global
+ * data structure to reflect this. Free the device structure.
+ */
+ octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * \brief PCI initialization for each Octeon device.
+ * @param oct octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+#ifdef CONFIG_PCI_IOV
+ /* setup PCI stuff first */
+ if (!oct->pci_dev->physfn)
+ octeon_pci_flr(oct);
+#endif
+
+ if (pci_enable_device(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+ return 1;
+ }
+
+ if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+ dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+ pci_disable_device(oct->pci_dev);
+ return 1;
+ }
+
+ /* Enable PCI DMA Master. */
+ pci_set_master(oct->pci_dev);
+
+ return 0;
+}
+
+static int skb_iq(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0;
+
+ if (netif_is_multiqueue(lio->netdev))
+ q = skb->queue_mapping % lio->linfo.num_txpciq;
+
+ return q;
+}
+
+/**
+ * \brief Check Tx queue state for a given network buffer
+ * @param lio per-network private data
+ * @param skb network buffer
+ */
+static int check_txq_state(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0, iq = 0;
+
+ if (netif_is_multiqueue(lio->netdev)) {
+ q = skb->queue_mapping;
+ iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
+ } else {
+ iq = lio->txq;
+ q = iq;
+ }
+
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ return 0;
+
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
+ wake_q(lio->netdev, q);
+ }
+
+ return 1;
+}
+
+/**
+ * \brief Unmap and free network buffer
+ * @param buf buffer
+ */
+static void free_netbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
+ DMA_TO_DEVICE);
+
+ check_txq_state(lio, skb);
+
+ tx_buffer_free(skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer
+ * @param buf buffer
+ */
+static void free_netsgbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octnic_gather *g;
+ struct sk_buff *skb;
+ int i, frags, iq;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ iq = skb_iq(lio, skb);
+
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
+
+ check_txq_state(lio, skb); /* mq support: sub-queue state check */
+
+ tx_buffer_free(skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer with response
+ * @param buf buffer
+ */
+static void free_netsgbuf_with_resp(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octeon_soft_command *sc;
+ struct octnic_gather *g;
+ struct sk_buff *skb;
+ int i, frags, iq;
+ struct lio *lio;
+
+ sc = (struct octeon_soft_command *)buf;
+ skb = (struct sk_buff *)sc->callback_arg;
+ finfo = (struct octnet_buf_free_info *)&skb->cb;
+
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ iq = skb_iq(lio, skb);
+
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
+
+ /* Don't free the skb yet */
+
+ check_txq_state(lio, skb);
+}
+
+/**
+ * \brief Setup output queue
+ * @param oct octeon device
+ * @param q_no which queue
+ * @param num_descs how many descriptors
+ * @param desc_size size of each descriptor
+ * @param app_ctx application context
+ */
+static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+ int desc_size, void *app_ctx)
+{
+ int ret_val;
+
+ dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+ /* droq creation and local register settings. */
+ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+ if (ret_val < 0)
+ return ret_val;
+
+ if (ret_val == 1) {
+ dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+ return 0;
+ }
+
+ /* Enable the droq queues */
+ octeon_set_droq_pkt_op(oct, q_no, 1);
+
+ /* Send Credit for Octeon Output queues. Credits are always
+ * sent after the output queue is enabled.
+ */
+ writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
+
+ return ret_val;
+}
+
+/**
+ * \brief Callback for getting interface configuration
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void if_cfg_callback(struct octeon_device *oct,
+ u32 status __attribute__((unused)), void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_if_cfg_context *ctx;
+ struct liquidio_if_cfg_resp *resp;
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (resp->status)
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
+ CVM_CAST64(resp->status));
+ WRITE_ONCE(ctx->cond, 1);
+
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Select queue based on hash
+ * @param dev Net device
+ * @param skb sk_buff structure
+ * @returns selected queue number
+ */
+static u16 select_q(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv __attribute__((unused)),
+ select_queue_fallback_t fallback __attribute__((unused)))
+{
+ struct lio *lio;
+ u32 qindex;
+
+ lio = GET_LIO(dev);
+
+ qindex = skb_tx_hash(dev, skb);
+
+ return (u16)(qindex % (lio->linfo.num_txpciq));
+}
+
+/** Routine to push packets arriving on Octeon interface upto network layer.
+ * @param oct_id - octeon device id.
+ * @param skbuff - skbuff struct to be passed to network layer.
+ * @param len - size of total data received.
+ * @param rh - Control header associated with the packet
+ * @param param - additional control data with the packet
+ * @param arg - farg registered in droq_ops
+ */
+static void
+liquidio_push_packet(u32 octeon_id __attribute__((unused)),
+ void *skbuff,
+ u32 len,
+ union octeon_rh *rh,
+ void *param,
+ void *arg)
+{
+ struct napi_struct *napi = param;
+ struct octeon_droq *droq =
+ container_of(param, struct octeon_droq, napi);
+ struct net_device *netdev = (struct net_device *)arg;
+ struct sk_buff *skb = (struct sk_buff *)skbuff;
+ u16 vtag = 0;
+
+ if (netdev) {
+ struct lio *lio = GET_LIO(netdev);
+ int packet_was_received;
+
+ /* Do not proceed if the interface is not in RUNNING state. */
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ recv_buffer_free(skb);
+ droq->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = netdev;
+
+ skb_record_rx_queue(skb, droq->q_no);
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ /* For Paged allocation use the frags */
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset +
+ MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ }
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+ skb_copy_to_linear_data(skb,
+ page_address(pg_info->page) +
+ pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+
+ skb_pull(skb, rh->r_dh.len * 8);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ (((rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
+ (!(rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
+ /* checksum has already been verified */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Setting Encapsulation field on basis of status received
+ * from the firmware
+ */
+ if (rh->r_dh.encap_on) {
+ skb->encapsulation = 1;
+ skb->csum_level = 1;
+ droq->stats.rx_vxlan++;
+ }
+
+ /* inbound VLAN tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ rh->r_dh.vlan) {
+ u16 priority = rh->r_dh.priority;
+ u16 vid = rh->r_dh.vlan;
+
+ vtag = (priority << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
+ packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
+
+ if (packet_was_received) {
+ droq->stats.rx_bytes_received += len;
+ droq->stats.rx_pkts_received++;
+ netdev->last_rx = jiffies;
+ } else {
+ droq->stats.rx_dropped++;
+ netif_info(lio, rx_err, lio->netdev,
+ "droq:%d error rx_dropped:%llu\n",
+ droq->q_no, droq->stats.rx_dropped);
+ }
+
+ } else {
+ recv_buffer_free(skb);
+ }
+}
+
+/**
+ * \brief callback when receive interrupt occurs and we are in NAPI mode
+ * @param arg pointer to octeon output queue
+ */
+static void liquidio_vf_napi_drv_callback(void *arg)
+{
+ struct octeon_droq *droq = arg;
+
+ napi_schedule_irqoff(&droq->napi);
+}
+
+/**
+ * \brief Entry point for NAPI polling
+ * @param napi NAPI structure
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_device *oct;
+ struct octeon_droq *droq;
+ int tx_done = 0, iq_no;
+ int work_done;
+
+ droq = container_of(napi, struct octeon_droq, napi);
+ oct = droq->oct_dev;
+ iq_no = droq->q_no;
+
+ /* Handle Droq descriptors */
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
+
+ /* Flush the instruction queue */
+ iq = oct->instr_queue[iq_no];
+ if (iq) {
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, 1, budget);
+ /* Update iq read-index rather than waiting for next interrupt.
+ * Return back if tx_done is false.
+ */
+ update_txq_status(oct, iq_no);
+ } else {
+ dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
+ __func__, iq_no);
+ }
+
+ if ((work_done < budget) && (tx_done)) {
+ napi_complete(napi);
+ octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+ POLL_EVENT_ENABLE_INTR, 0);
+ return 0;
+ }
+
+ return (!tx_done) ? (budget) : (work_done);
+}
+
+/**
+ * \brief Setup input and output queues
+ * @param octeon_dev octeon device
+ * @param ifidx Interface index
+ *
+ * Note: Queues are with respect to the octeon device. Thus
+ * an input queue is for egress packets, and output queues
+ * are for ingress packets.
+ */
+static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
+{
+ struct octeon_droq_ops droq_ops;
+ struct net_device *netdev;
+ static int cpu_id_modulus;
+ struct octeon_droq *droq;
+ struct napi_struct *napi;
+ static int cpu_id;
+ int num_tx_descs;
+ struct lio *lio;
+ int retval = 0;
+ int q, q_no;
+
+ netdev = octeon_dev->props[ifidx].netdev;
+
+ lio = GET_LIO(netdev);
+
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+ droq_ops.farg = netdev;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_vf_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
+
+ /* set up DROQs. */
+ for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+
+ retval = octeon_setup_droq(
+ octeon_dev, q_no,
+ CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ NULL);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "%s : Runtime DROQ(RxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ droq = octeon_dev->droq[q_no];
+ napi = &droq->napi;
+ netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+
+ /* designate a CPU for this droq */
+ droq->cpu_id = cpu_id;
+ cpu_id++;
+ if (cpu_id >= cpu_id_modulus)
+ cpu_id = 0;
+
+ octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+ }
+
+ /* 23XX VF can send/recv control messages (via the first VF-owned
+ * droq) from the firmware even if the ethX interface is down,
+ * so that's why poll_mode must be off for the first droq.
+ */
+ octeon_dev->droq[0]->ops.poll_mode = 0;
+
+ /* set up IQs. */
+ for (q = 0; q < lio->linfo.num_txpciq; q++) {
+ num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
+ octeon_get_conf(octeon_dev), lio->ifidx);
+ retval = octeon_setup_iq(octeon_dev, ifidx, q,
+ lio->linfo.txpciq[q], num_tx_descs,
+ netdev_get_tx_queue(netdev, q));
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime IQ(TxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Net device open for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct napi_struct *napi, *n;
+
+ if (!oct->props[lio->ifidx].napi_enabled) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 1;
+
+ oct->droq[0]->ops.poll_mode = 1;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_RUNNING);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ start_txq(netdev);
+
+ /* tell Octeon to start forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 1);
+
+ dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
+
+ return 0;
+}
+
+/**
+ * \brief Net device stop for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_stop(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ /* Inform that netif carrier is down */
+ lio->intf_open = 0;
+ lio->linfo.link.s.link_up = 0;
+
+ netif_carrier_off(netdev);
+ lio->link_changes++;
+
+ /* tell Octeon to stop forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 0);
+
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ txqs_stop(netdev);
+
+ dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+
+ return 0;
+}
+
+/**
+ * \brief Converts a mask based on net device flags
+ * @param netdev network device
+ *
+ * This routine generates a octnet_ifflags mask from the net device flags
+ * received from the OS.
+ */
+static enum octnet_ifflags get_new_flags(struct net_device *netdev)
+{
+ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
+
+ if (netdev->flags & IFF_PROMISC)
+ f |= OCTNET_IFFLAG_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+
+ if (netdev->flags & IFF_MULTICAST) {
+ f |= OCTNET_IFFLAG_MULTICAST;
+
+ /* Accept all multicast addresses if there are more than we
+ * can handle
+ */
+ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+ }
+
+ if (netdev->flags & IFF_BROADCAST)
+ f |= OCTNET_IFFLAG_BROADCAST;
+
+ return f;
+}
+
+static void liquidio_set_uc_list(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct netdev_hw_addr *ha;
+ u64 *mac;
+
+ if (lio->netdev_uc_count == netdev_uc_count(netdev))
+ return;
+
+ if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
+ dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
+ return;
+ }
+
+ lio->netdev_uc_count = netdev_uc_count(netdev);
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
+ nctrl.ncmd.s.more = lio->netdev_uc_count;
+ nctrl.ncmd.s.param1 = oct->vf_num;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ mac = &nctrl.udd[0];
+ netdev_for_each_uc_addr(ha, netdev) {
+ ether_addr_copy(((u8 *)mac) + 2, ha->addr);
+ mac++;
+ }
+
+ octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+}
+
+/**
+ * \brief Net device set_multicast_list
+ * @param netdev network device
+ */
+static void liquidio_set_mcast_list(struct net_device *netdev)
+{
+ int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct netdev_hw_addr *ha;
+ u64 *mc;
+ int ret;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
+ nctrl.ncmd.s.param1 = get_new_flags(netdev);
+ nctrl.ncmd.s.param2 = mc_count;
+ nctrl.ncmd.s.more = mc_count;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ mc = &nctrl.udd[0];
+ netdev_for_each_mc_addr(ha, netdev) {
+ *mc = 0;
+ ether_addr_copy(((u8 *)mc) + 2, ha->addr);
+ /* no need to swap bytes */
+ if (++mc > &nctrl.udd[mc_count])
+ break;
+ }
+
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ /* Apparently, any activity in this call from the kernel has to
+ * be atomic. So we won't wait for response.
+ */
+ nctrl.wait_time = 0;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+
+ liquidio_set_uc_list(netdev);
+}
+
+/**
+ * \brief Net device set_mac_address
+ * @param netdev network device
+ */
+static int liquidio_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ return 0;
+
+ if (lio->linfo.macaddr_is_admin_asgnd)
+ return -EPERM;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ nctrl.ncmd.s.param1 = 0;
+ nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ nctrl.wait_time = 100;
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
+ return -ENOMEM;
+ }
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
+
+ return 0;
+}
+
+/**
+ * \brief Net device get_stats
+ * @param netdev network device
+ */
+static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+ u64 pkts = 0, drop = 0, bytes = 0;
+ struct oct_droq_stats *oq_stats;
+ struct oct_iq_stats *iq_stats;
+ struct octeon_device *oct;
+ int i, iq_no, oq_no;
+
+ oct = lio->oct_dev;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ iq_no = lio->linfo.txpciq[i].s.q_no;
+ iq_stats = &oct->instr_queue[iq_no]->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < lio->linfo.num_rxpciq; i++) {
+ oq_no = lio->linfo.rxpciq[i].s.q_no;
+ oq_stats = &oct->droq[oq_no]->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_nodispatch +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+
+ stats->rx_bytes = bytes;
+ stats->rx_packets = pkts;
+ stats->rx_dropped = drop;
+
+ return stats;
+}
+
+/**
+ * \brief Net device change_mtu
+ * @param netdev network device
+ */
+static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->mtu = new_mtu;
+
+ netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
+ netdev->mtu, new_mtu);
+ dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+ netdev->name, netdev->mtu, new_mtu);
+
+ netdev->mtu = new_mtu;
+
+ return 0;
+}
+
+/**
+ * \brief Handler for SIOCSHWTSTAMP ioctl
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct hwtstamp_config conf;
+
+ if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
+ return -EFAULT;
+
+ if (conf.flags)
+ return -EINVAL;
+
+ switch (conf.tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (conf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ else
+ ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+}
+
+/**
+ * \brief ioctl handler
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return hwtstamp_ioctl(netdev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
+{
+ struct sk_buff *skb = (struct sk_buff *)buf;
+ struct octnet_buf_free_info *finfo;
+ struct oct_timestamp_resp *resp;
+ struct octeon_soft_command *sc;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ lio = finfo->lio;
+ sc = finfo->sc;
+ oct = lio->oct_dev;
+ resp = (struct oct_timestamp_resp *)sc->virtrptr;
+
+ if (status != OCTEON_REQUEST_DONE) {
+ dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ resp->timestamp = 0;
+ }
+
+ octeon_swap_8B_data(&resp->timestamp, 1);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns = resp->timestamp;
+
+ netif_info(lio, tx_done, lio->netdev,
+ "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
+ skb, (unsigned long long)ns);
+ ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ octeon_free_soft_command(oct, sc);
+ tx_buffer_free(skb);
+}
+
+/* \brief Send a data packet that will be timestamped
+ * @param oct octeon device
+ * @param ndata pointer to network data
+ * @param finfo pointer to private network data
+ */
+static int send_nic_timestamp_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ struct octnet_buf_free_info *finfo)
+{
+ struct octeon_soft_command *sc;
+ int ring_doorbell;
+ struct lio *lio;
+ int retval;
+ u32 len;
+
+ lio = finfo->lio;
+
+ sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
+ sizeof(struct oct_timestamp_resp));
+ finfo->sc = sc;
+
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
+ return IQ_SEND_FAILED;
+ }
+
+ if (ndata->reqtype == REQTYPE_NORESP_NET)
+ ndata->reqtype = REQTYPE_RESP_NET;
+ else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
+ ndata->reqtype = REQTYPE_RESP_NET_SG;
+
+ sc->callback = handle_timestamp;
+ sc->callback_arg = finfo->skb;
+ sc->iq_no = ndata->q_no;
+
+ len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
+
+ ring_doorbell = 1;
+
+ retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
+ sc, len, ndata->reqtype);
+
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct, sc);
+ } else {
+ netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
+ }
+
+ return retval;
+}
+
+/** \brief Transmit networks packets to the Octeon interface
+ * @param skbuff skbuff struct to be passed to network layer.
+ * @param netdev pointer to network device
+ * @returns whether the packet was transmitted to the device okay or not
+ * (NETDEV_TX_OK or NETDEV_TX_BUSY)
+ */
+static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct octnet_buf_free_info *finfo;
+ union octnic_cmd_setup cmdsetup;
+ struct octnic_data_pkt ndata;
+ struct octeon_instr_irh *irh;
+ struct oct_iq_stats *stats;
+ struct octeon_device *oct;
+ int q_idx = 0, iq_no = 0;
+ union tx_info *tx_info;
+ struct lio *lio;
+ int status = 0;
+ u64 dptr = 0;
+ u32 tag = 0;
+ int j;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ if (netif_is_multiqueue(netdev)) {
+ q_idx = skb->queue_mapping;
+ q_idx = (q_idx % (lio->linfo.num_txpciq));
+ tag = q_idx;
+ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
+ } else {
+ iq_no = lio->txq;
+ }
+
+ stats = &oct->instr_queue[iq_no]->stats;
+
+ /* Check for all conditions in which the current packet cannot be
+ * transmitted.
+ */
+ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
+ (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
+ lio->linfo.link.s.link_up);
+ goto lio_xmit_failed;
+ }
+
+ /* Use space in skb->cb to store info used to unmap and
+ * free the buffers.
+ */
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ finfo->lio = lio;
+ finfo->skb = skb;
+ finfo->sc = NULL;
+
+ /* Prepare the attributes for the data to be passed to OSI. */
+ memset(&ndata, 0, sizeof(struct octnic_data_pkt));
+
+ ndata.buf = finfo;
+
+ ndata.q_no = iq_no;
+
+ if (netif_is_multiqueue(netdev)) {
+ if (octnet_iq_is_full(oct, ndata.q_no)) {
+ /* defer sending if queue is full */
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ stats->tx_iq_busy++;
+ return NETDEV_TX_BUSY;
+ }
+ } else {
+ if (octnet_iq_is_full(oct, lio->txq)) {
+ /* defer sending if queue is full */
+ stats->tx_iq_busy++;
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ ndata.datasize = skb->len;
+
+ cmdsetup.u64 = 0;
+ cmdsetup.s.iq_no = iq_no;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->encapsulation) {
+ cmdsetup.s.tnl_csum = 1;
+ stats->tx_vxlan++;
+ } else {
+ cmdsetup.s.transport_csum = 1;
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cmdsetup.s.timestamp = 1;
+ }
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ cmdsetup.s.u.datasize = skb->len;
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+ /* Offload checksum calculation for TCP/UDP packets */
+ dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ ndata.cmd.cmd3.dptr = dptr;
+ finfo->dptr = dptr;
+ ndata.reqtype = REQTYPE_NORESP_NET;
+
+ } else {
+ struct skb_frag_struct *frag;
+ struct octnic_gather *g;
+ int i, frags;
+
+ spin_lock(&lio->glist_lock[q_idx]);
+ g = (struct octnic_gather *)list_delete_head(
+ &lio->glist[q_idx]);
+ spin_unlock(&lio->glist_lock[q_idx]);
+
+ if (!g) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit scatter gather: glist null!\n");
+ goto lio_xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+
+ g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+ add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
+
+ frags = skb_shinfo(skb)->nr_frags;
+ i = 1;
+ while (frags--) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ dma_map_page(&oct->pci_dev->dev,
+ frag->page.p,
+ frag->page_offset,
+ frag->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg[i >> 2].ptr[i & 3])) {
+ dma_unmap_single(&oct->pci_dev->dev,
+ g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ for (j = 1; j < i; j++) {
+ frag = &skb_shinfo(skb)->frags[j - 1];
+ dma_unmap_page(&oct->pci_dev->dev,
+ g->sg[j >> 2].ptr[j & 3],
+ frag->size,
+ DMA_TO_DEVICE);
+ }
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ i++;
+ }
+
+ dptr = dma_map_single(&oct->pci_dev->dev,
+ g->sg, g->sg_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
+ __func__);
+ dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ for (j = 1; j <= frags; j++) {
+ frag = &skb_shinfo(skb)->frags[j - 1];
+ dma_unmap_page(&oct->pci_dev->dev,
+ g->sg[j >> 2].ptr[j & 3],
+ frag->size, DMA_TO_DEVICE);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ ndata.cmd.cmd3.dptr = dptr;
+ finfo->dptr = dptr;
+ finfo->g = g;
+
+ ndata.reqtype = REQTYPE_NORESP_NET_SG;
+ }
+
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
+
+ if (skb_shinfo(skb)->gso_size) {
+ tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
+ tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ }
+
+ /* HW insert VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
+ irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
+ }
+
+ if (unlikely(cmdsetup.s.timestamp))
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo);
+ else
+ status = octnet_send_nic_data_pkt(oct, &ndata);
+ if (status == IQ_SEND_FAILED)
+ goto lio_xmit_failed;
+
+ netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
+
+ if (status == IQ_SEND_STOP) {
+ dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
+ iq_no);
+ stop_q(lio->netdev, q_idx);
+ }
+
+ netif_trans_update(netdev);
+
+ if (skb_shinfo(skb)->gso_size)
+ stats->tx_done += skb_shinfo(skb)->gso_segs;
+ else
+ stats->tx_done++;
+ stats->tx_tot_bytes += skb->len;
+
+ return NETDEV_TX_OK;
+
+lio_xmit_failed:
+ stats->tx_dropped++;
+ netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
+ iq_no, stats->tx_dropped);
+ if (dptr)
+ dma_unmap_single(&oct->pci_dev->dev, dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+ tx_buffer_free(skb);
+ return NETDEV_TX_OK;
+}
+
+/** \brief Network device Tx timeout
+ * @param netdev pointer to network device
+ */
+static void liquidio_tx_timeout(struct net_device *netdev)
+{
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
+ netdev->stats.tx_dropped);
+ netif_trans_update(netdev);
+ txqs_wake(netdev);
+}
+
+static int
+liquidio_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)), u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int
+liquidio_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)), u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to enable/disable RX checksum offload
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
+ * OCTNET_CMD_RXCSUM_DISABLE
+ * @returns SUCCESS or FAILURE
+ */
+static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.param1 = rx_cmd;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to add/delete VxLAN UDP port to firmware
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @param vxlan_port VxLAN port to be added or deleted
+ * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
+ * OCTNET_CMD_VXLAN_PORT_DEL
+ * @returns SUCCESS or FAILURE
+ */
+static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
+ u16 vxlan_port, u8 vxlan_cmd_bit)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.more = vxlan_cmd_bit;
+ nctrl.ncmd.s.param1 = vxlan_port;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev,
+ "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** \brief Net device fix features
+ * @param netdev pointer to network device
+ * @param request features requested
+ * @returns updated features list
+ */
+static netdev_features_t liquidio_fix_features(struct net_device *netdev,
+ netdev_features_t request)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if ((request & NETIF_F_RXCSUM) &&
+ !(lio->dev_capability & NETIF_F_RXCSUM))
+ request &= ~NETIF_F_RXCSUM;
+
+ if ((request & NETIF_F_HW_CSUM) &&
+ !(lio->dev_capability & NETIF_F_HW_CSUM))
+ request &= ~NETIF_F_HW_CSUM;
+
+ if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
+ request &= ~NETIF_F_TSO;
+
+ if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
+ request &= ~NETIF_F_TSO6;
+
+ if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ /* Disable LRO if RXCSUM is off */
+ if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ return request;
+}
+
+/** \brief Net device set features
+ * @param netdev pointer to network device
+ * @param features features to enable/disable
+ */
+static int liquidio_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if (!((netdev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+ else if (!(features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+ if (!(netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ (features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ else if ((netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ !(features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_DISABLE);
+
+ return 0;
+}
+
+static void liquidio_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static void liquidio_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
+static const struct net_device_ops lionetdevops = {
+ .ndo_open = liquidio_open,
+ .ndo_stop = liquidio_stop,
+ .ndo_start_xmit = liquidio_xmit,
+ .ndo_get_stats = liquidio_get_stats,
+ .ndo_set_mac_address = liquidio_set_mac,
+ .ndo_set_rx_mode = liquidio_set_mcast_list,
+ .ndo_tx_timeout = liquidio_tx_timeout,
+ .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
+ .ndo_change_mtu = liquidio_change_mtu,
+ .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_fix_features = liquidio_fix_features,
+ .ndo_set_features = liquidio_set_features,
+ .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
+ .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
+ .ndo_select_queue = select_q,
+};
+
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ union oct_link_status *ls;
+ int gmxport = 0;
+ int i;
+
+ if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
+ dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
+ recv_pkt->buffer_size[0],
+ recv_pkt->rh.r_nic_info.gmxport);
+ goto nic_info_err;
+ }
+
+ gmxport = recv_pkt->rh.r_nic_info.gmxport;
+ ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+
+ octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
+
+ for (i = 0; i < oct->ifcount; i++) {
+ if (oct->props[i].gmxport == gmxport) {
+ update_link_status(oct->props[i].netdev, ls);
+ break;
+ }
+ }
+
+nic_info_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+/**
+ * \brief Setup network interfaces
+ * @param octeon_dev octeon device
+ *
+ * Called during init time for each device. It assumes the NIC
+ * is already up and running. The link information for each
+ * interface is passed in link_info.
+ */
+static int setup_nic_devices(struct octeon_device *octeon_dev)
+{
+ int retval, num_iqueues, num_oqueues;
+ struct liquidio_if_cfg_context *ctx;
+ u32 resp_size, ctx_size, data_size;
+ struct liquidio_if_cfg_resp *resp;
+ struct octeon_soft_command *sc;
+ union oct_nic_if_cfg if_cfg;
+ struct octdev_props *props;
+ struct net_device *netdev;
+ struct lio_version *vdata;
+ struct lio *lio = NULL;
+ u8 mac[ETH_ALEN], i, j;
+ u32 ifidx_or_pfnum;
+
+ ifidx_or_pfnum = octeon_dev->pf_num;
+
+ /* This is to handle link status changes */
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
+ lio_nic_info, octeon_dev);
+
+ /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
+ * They are handled directly.
+ */
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
+ free_netbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
+ free_netsgbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
+ free_netsgbuf_with_resp);
+
+ for (i = 0; i < octeon_dev->ifcount; i++) {
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ ctx_size = sizeof(struct liquidio_if_cfg_context);
+ data_size = sizeof(struct lio_version);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(octeon_dev, data_size,
+ resp_size, ctx_size);
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ *((u64 *)vdata) = 0;
+ vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
+
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(octeon_dev);
+ init_waitqueue_head(&ctx->wc);
+
+ if_cfg.u64 = 0;
+
+ if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
+ if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
+ if_cfg.s.base_queue = 0;
+
+ sc->iq_no = 0;
+
+ octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
+ 0);
+
+ sc->callback = if_cfg_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(octeon_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed status: %x\n", retval);
+ /* Soft instr is freed by driver in case of failure. */
+ goto setup_nic_dev_fail;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
+ goto setup_nic_wait_intr;
+ }
+
+ retval = resp->status;
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ num_iqueues = hweight64(resp->cfg_info.iqmask);
+ num_oqueues = hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
+ resp->cfg_info.iqmask, resp->cfg_info.oqmask);
+ goto setup_nic_dev_fail;
+ }
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+ i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+
+ netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+
+ if (!netdev) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
+
+ /* Associate the routines that will handle different
+ * netdev tasks.
+ */
+ netdev->netdev_ops = &lionetdevops;
+
+ lio = GET_LIO(netdev);
+
+ memset(lio, 0, sizeof(struct lio));
+
+ lio->ifidx = ifidx_or_pfnum;
+
+ props = &octeon_dev->props[i];
+ props->gmxport = resp->cfg_info.linfo.gmxport;
+ props->netdev = netdev;
+
+ lio->linfo.num_rxpciq = num_oqueues;
+ lio->linfo.num_txpciq = num_iqueues;
+
+ for (j = 0; j < num_oqueues; j++) {
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
+ }
+ for (j = 0; j < num_iqueues; j++) {
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
+ }
+
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+ lio->linfo.macaddr_is_admin_asgnd =
+ resp->cfg_info.linfo.macaddr_is_admin_asgnd;
+
+ lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_GRO
+ | NETIF_F_LRO;
+ netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device
+ */
+ lio->enc_dev_capability = NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL
+ | NETIF_F_HW_CSUM | NETIF_F_SG
+ | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+
+ netdev->hw_enc_features =
+ (lio->enc_dev_capability & ~NETIF_F_LRO);
+ netdev->vlan_features = lio->dev_capability;
+ /* Add any unchangeable hw features */
+ lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
+
+ netdev->hw_features = lio->dev_capability;
+
+ /* MTU range: 68 - 16000 */
+ netdev->min_mtu = LIO_MIN_MTU_SIZE;
+ netdev->max_mtu = LIO_MAX_MTU_SIZE;
+
+ /* Point to the properties for octeon device to which this
+ * interface belongs.
+ */
+ lio->oct_dev = octeon_dev;
+ lio->octprops = props;
+ lio->netdev = netdev;
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "if%d gmx: %d hw_addr: 0x%llx\n", i,
+ lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+
+ /* 64-bit swap required on LE machines */
+ octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
+ for (j = 0; j < ETH_ALEN; j++)
+ mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
+
+ /* Copy MAC Address to OS network device structure */
+ ether_addr_copy(netdev->dev_addr, mac);
+
+ if (setup_io_queues(octeon_dev, i)) {
+ dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
+
+ /* For VFs, enable Octeon device interrupts here,
+ * as this is contingent upon IO queue setup
+ */
+ octeon_dev->fn_list.enable_interrupt(octeon_dev,
+ OCTEON_ALL_INTR);
+
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+
+ lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
+ lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
+
+ if (setup_glists(lio, num_iqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Gather list allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ /* Register ethtool support */
+ liquidio_set_ethtool_ops(netdev);
+ if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
+ octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
+ else
+ octeon_dev->priv_flags = 0x0;
+
+ if (netdev->features & NETIF_F_LRO)
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ if ((debug != -1) && (debug & NETIF_MSG_HW))
+ liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE,
+ 0);
+
+ if (setup_link_status_change_wq(netdev))
+ goto setup_nic_dev_fail;
+
+ /* Register the network device with the OS */
+ if (register_netdev(netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
+ i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ netif_carrier_off(netdev);
+ lio->link_changes++;
+
+ ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+
+ /* Sending command to firmware to enable Rx checksum offload
+ * by default at the time of setup of Liquidio driver for
+ * this device
+ */
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
+ OCTNET_CMD_TXCSUM_ENABLE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup successful\n", i);
+
+ octeon_free_soft_command(octeon_dev, sc);
+ }
+
+ return 0;
+
+setup_nic_dev_fail:
+
+ octeon_free_soft_command(octeon_dev, sc);
+
+setup_nic_wait_intr:
+
+ while (i--) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup failed\n", i);
+ liquidio_destroy_nic_device(octeon_dev, i);
+ }
+ return -ENODEV;
+}
+
+/**
+ * \brief initialize the NIC
+ * @param oct octeon device
+ *
+ * This initialization routine is called once the Octeon device application is
+ * up and running
+ */
+static int liquidio_init_nic_module(struct octeon_device *oct)
+{
+ struct oct_intrmod_cfg *intrmod_cfg;
+ int num_nic_ports = 1;
+ int i, retval = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
+
+ /* only default iq and oq were initialized
+ * initialize the rest as well run port_config command for each port
+ */
+ oct->ifcount = num_nic_ports;
+ memset(oct->props, 0,
+ sizeof(struct octdev_props) * num_nic_ports);
+
+ for (i = 0; i < MAX_OCTEON_LINKS; i++)
+ oct->props[i].gmxport = -1;
+
+ retval = setup_nic_devices(oct);
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
+ goto octnet_init_failure;
+ }
+
+ /* Initialize interrupt moderation params */
+ intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
+ intrmod_cfg->rx_enable = 1;
+ intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+ intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+ intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
+ intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
+ intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
+ intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
+ intrmod_cfg->tx_enable = 1;
+ intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
+ intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
+ intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
+
+ return retval;
+
+octnet_init_failure:
+
+ oct->ifcount = 0;
+
+ return retval;
+}
+
+/**
+ * \brief Device initialization for each Octeon device that is probed
+ * @param octeon_dev octeon device
+ */
+static int octeon_device_init(struct octeon_device *oct)
+{
+ u32 rev_id;
+ int j;
+
+ atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
+
+ /* Enable access to the octeon device and make its DMA capability
+ * known to the OS.
+ */
+ if (octeon_pci_os_setup(oct))
+ return 1;
+ atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
+
+ oct->chip_id = OCTEON_CN23XX_VF_VID;
+ pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+ oct->rev_id = rev_id & 0xff;
+
+ if (cn23xx_setup_octeon_vf_device(oct))
+ return 1;
+
+ atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
+
+ oct->app_mode = CVM_DRV_NIC_APP;
+
+ /* Initialize the dispatch mechanism used to push packets arriving on
+ * Octeon Output queues.
+ */
+ if (octeon_init_dispatch_list(oct))
+ return 1;
+
+ atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+ if (octeon_set_io_queues_off(oct)) {
+ dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
+ return 1;
+ }
+
+ if (oct->fn_list.setup_device_regs(oct)) {
+ dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
+ return 1;
+ }
+
+ /* Initialize soft command buffer pool */
+ if (octeon_setup_sc_buffer_pool(oct)) {
+ dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+ /* Setup the data structures that manage this Octeon's Input queues. */
+ if (octeon_setup_instr_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from user & kernel applications for this octeon device.
+ */
+ if (octeon_setup_response_list(oct)) {
+ dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+ if (octeon_setup_output_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
+
+ if (oct->fn_list.setup_mbox(oct)) {
+ dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
+
+ if (octeon_allocate_ioq_vector(oct)) {
+ dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
+
+ dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
+ LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
+
+ /* Setup the interrupt handler and record the INT SUM register address*/
+ if (octeon_setup_interrupt(oct))
+ return 1;
+
+ if (cn23xx_octeon_pfvf_handshake(oct))
+ return 1;
+
+ /* Enable Octeon device interrupts */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+ atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
+
+ /* Enable the input and output queues for this Octeon device */
+ if (oct->fn_list.enable_io_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
+ return 1;
+ }
+
+ atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
+
+ atomic_set(&oct->status, OCT_DEV_HOST_OK);
+
+ /* Send Credit for Octeon Output queues. Credits are always sent after
+ * the output queue is enabled.
+ */
+ for (j = 0; j < oct->num_oqs; j++)
+ writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
+
+ /* Packets can start arriving on the output queues from this point. */
+
+ atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+ atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+ if (liquidio_init_nic_module(oct))
+ return 1;
+
+ return 0;
+}
+
+static int __init liquidio_vf_init(void)
+{
+ octeon_init_device_list(0);
+ return pci_register_driver(&liquidio_vf_pci_driver);
+}
+
+static void __exit liquidio_vf_exit(void)
+{
+ pci_unregister_driver(&liquidio_vf_pci_driver);
+
+ pr_info("LiquidIO_VF network module is now unloaded\n");
+}
+
+module_init(liquidio_vf_init);
+module_exit(liquidio_vf_exit);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 0d990accb65e..ba329f6ca779 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file liquidio_common.h
* \brief Common: Structures and macros used in PCI-NIC package by core and
* host driver.
@@ -68,12 +63,10 @@ enum octeon_tag_type {
*/
#define OPCODE_CORE 0 /* used for generic core operations */
#define OPCODE_NIC 1 /* used for NIC operations */
-#define OPCODE_LAST OPCODE_NIC
-
/* Subcodes are used by host driver/apps to identify the sub-operation
* for the core. They only need to by unique for a given subsystem.
*/
-#define OPCODE_SUBCODE(op, sub) (((op & 0x0f) << 8) | ((sub) & 0x7f))
+#define OPCODE_SUBCODE(op, sub) ((((op) & 0x0f) << 8) | ((sub) & 0x7f))
/** OPCODE_CORE subcodes. For future use. */
@@ -89,13 +82,13 @@ enum octeon_tag_type {
#define OPCODE_NIC_TIMESTAMP 0x07
#define OPCODE_NIC_INTRMOD_CFG 0x08
#define OPCODE_NIC_IF_CFG 0x09
+#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
+#define VF_DRV_LOADED 1
+#define VF_DRV_REMOVED -1
+#define VF_DRV_MACADDR_CHANGED 2
#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
-#define OPCODE_SLOW_PATH(rh) \
- (OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode) != \
- OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA))
-
/* Application codes advertised by the core driver initialization packet. */
#define CVM_DRV_APP_START 0x0
#define CVM_DRV_NO_APP 0
@@ -105,31 +98,15 @@ enum octeon_tag_type {
#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2)
#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1)
-/* Macro to increment index.
- * Index is incremented by count; if the sum exceeds
- * max, index is wrapped-around to the start.
- */
-#define INCR_INDEX(index, count, max) \
-do { \
- if (((index) + (count)) >= (max)) \
- index = ((index) + (count)) - (max); \
- else \
- index += (count); \
-} while (0)
-
-#define INCR_INDEX_BY1(index, max) \
-do { \
- if ((++(index)) == (max)) \
- index = 0; \
-} while (0)
-
-#define DECR_INDEX(index, count, max) \
-do { \
- if ((count) > (index)) \
- index = ((max) - ((count - index))); \
- else \
- index -= count; \
-} while (0)
+static inline u32 incr_index(u32 index, u32 count, u32 max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
#define OCT_BOARD_NAME 32
#define OCT_SERIAL_LEN 64
@@ -235,6 +212,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_ID_ACTIVE 0x1a
+#define OCTNET_CMD_SET_UC_LIST 0x1b
+#define OCTNET_CMD_SET_VF_LINKSTATE 0x1c
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
@@ -731,13 +710,15 @@ struct oct_link_info {
#ifdef __BIG_ENDIAN_BITFIELD
u64 gmxport:16;
- u64 rsvd:32;
+ u64 macaddr_is_admin_asgnd:1;
+ u64 rsvd:31;
u64 num_txpciq:8;
u64 num_rxpciq:8;
#else
u64 num_rxpciq:8;
u64 num_txpciq:8;
- u64 rsvd:32;
+ u64 rsvd:31;
+ u64 macaddr_is_admin_asgnd:1;
u64 gmxport:16;
#endif
@@ -827,6 +808,16 @@ struct oct_link_stats {
};
+static inline int opcode_slow_path(union octeon_rh *rh)
+{
+ u16 subcode1, subcode2;
+
+ subcode1 = OPCODE_SUBCODE((rh)->r.opcode, (rh)->r.subcode);
+ subcode2 = OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA);
+
+ return (subcode2 != subcode1);
+}
+
#define LIO68XX_LED_CTRL_ADDR 0x3501
#define LIO68XX_LED_CTRL_CFGON 0x1f
#define LIO68XX_LED_CTRL_CFGOFF 0x100
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
index 93819bd8602b..78a3685f6fe0 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
@@ -1,24 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#ifndef _LIQUIDIO_IMAGE_H_
#define _LIQUIDIO_IMAGE_H_
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index c76556809ed1..1cb3514fc949 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file octeon_config.h
* \brief Host Driver: Configuration data structures for the host driver.
*/
@@ -65,9 +60,15 @@
#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_VFS_PER_PF_PASS_1_0 8
+#define CN23XX_MAX_VFS_PER_PF_PASS_1_1 31
+#define CN23XX_MAX_VFS_PER_PF 63
+#define CN23XX_MAX_RINGS_PER_VF 8
+
#define CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12
#define CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32
#define CN23XX_MAX_RINGS_PER_PF 64
+#define CN23XX_MAX_RINGS_PER_VF 8
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
#define CN23XX_MAX_IQ_DESCRIPTORS 2048
@@ -466,4 +467,7 @@ struct octeon_config {
#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
+
+#define MAX_POSSIBLE_VFS 64
+
#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 01a50f3b0c8e..3265e0b7923e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/**
* @file octeon_console.c
*/
@@ -76,9 +71,9 @@ MODULE_PARM_DESC(console_bitmask,
#define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */
/* First three members of cvmx_bootmem_desc are left in original
-** positions for backwards compatibility.
-** Assumes big endian target
-*/
+ * positions for backwards compatibility.
+ * Assumes big endian target
+ */
struct cvmx_bootmem_desc {
/** spinlock to control access to list */
u32 lock;
@@ -143,46 +138,6 @@ struct octeon_pci_console_desc {
};
/**
- * This macro returns the size of a member of a structure.
- * Logically it is the same as "sizeof(s::field)" in C++, but
- * C lacks the "::" operator.
- */
-#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
-
-/**
- * This macro returns a member of the cvmx_bootmem_desc
- * structure. These members can't be directly addressed as
- * they might be in memory not directly reachable. In the case
- * where bootmem is compiled with LINUX_HOST, the structure
- * itself might be located on a remote Octeon. The argument
- * "field" is the member name of the cvmx_bootmem_desc to read.
- * Regardless of the type of the field, the return type is always
- * a u64.
- */
-#define CVMX_BOOTMEM_DESC_GET_FIELD(oct, field) \
- __cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr, \
- offsetof(struct cvmx_bootmem_desc, field), \
- SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
-
-#define __cvmx_bootmem_lock(flags) (flags = flags)
-#define __cvmx_bootmem_unlock(flags) (flags = flags)
-
-/**
- * This macro returns a member of the
- * cvmx_bootmem_named_block_desc structure. These members can't
- * be directly addressed as they might be in memory not directly
- * reachable. In the case where bootmem is compiled with
- * LINUX_HOST, the structure itself might be located on a remote
- * Octeon. The argument "field" is the member name of the
- * cvmx_bootmem_named_block_desc to read. Regardless of the type
- * of the field, the return type is always a u64. The "addr"
- * parameter is the physical address of the structure.
- */
-#define CVMX_BOOTMEM_NAMED_GET_FIELD(oct, addr, field) \
- __cvmx_bootmem_desc_get(oct, addr, \
- offsetof(struct cvmx_bootmem_named_block_desc, field), \
- SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
-/**
* \brief determines if a given console has debug enabled.
* @param console console to check
* @returns 1 = enabled. 0 otherwise
@@ -263,10 +218,15 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct,
oct->bootmem_desc_addr =
octeon_read_device_mem64(oct,
BOOTLOADER_PCI_READ_DESC_ADDR);
- major_version =
- (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, major_version);
- minor_version =
- (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, minor_version);
+ major_version = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc, major_version),
+ FIELD_SIZEOF(struct cvmx_bootmem_desc, major_version));
+ minor_version = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc, minor_version),
+ FIELD_SIZEOF(struct cvmx_bootmem_desc, minor_version));
+
dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
major_version);
if ((major_version > 3) ||
@@ -289,10 +249,20 @@ static const struct cvmx_bootmem_named_block_desc
u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags);
if (named_addr) {
- desc->base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
- base_addr);
- desc->size =
- CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, size);
+ desc->base_addr = __cvmx_bootmem_desc_get(
+ oct, named_addr,
+ offsetof(struct cvmx_bootmem_named_block_desc,
+ base_addr),
+ FIELD_SIZEOF(
+ struct cvmx_bootmem_named_block_desc,
+ base_addr));
+ desc->size = __cvmx_bootmem_desc_get(oct, named_addr,
+ offsetof(struct cvmx_bootmem_named_block_desc,
+ size),
+ FIELD_SIZEOF(
+ struct cvmx_bootmem_named_block_desc,
+ size));
+
strncpy(desc->name, name, sizeof(desc->name));
desc->name[sizeof(desc->name) - 1] = 0;
return &oct->bootmem_named_block_desc;
@@ -307,22 +277,41 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
{
u64 result = 0;
- __cvmx_bootmem_lock(flags);
if (!__cvmx_bootmem_check_version(oct, 3)) {
u32 i;
- u64 named_block_array_addr =
- CVMX_BOOTMEM_DESC_GET_FIELD(oct,
- named_block_array_addr);
- u32 num_blocks = (u32)
- CVMX_BOOTMEM_DESC_GET_FIELD(oct, nb_num_blocks);
- u32 name_length = (u32)
- CVMX_BOOTMEM_DESC_GET_FIELD(oct, named_block_name_len);
+
+ u64 named_block_array_addr = __cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc,
+ named_block_array_addr),
+ FIELD_SIZEOF(struct cvmx_bootmem_desc,
+ named_block_array_addr));
+ u32 num_blocks = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc,
+ nb_num_blocks),
+ FIELD_SIZEOF(struct cvmx_bootmem_desc,
+ nb_num_blocks));
+
+ u32 name_length = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc,
+ named_block_name_len),
+ FIELD_SIZEOF(struct cvmx_bootmem_desc,
+ named_block_name_len));
+
u64 named_addr = named_block_array_addr;
for (i = 0; i < num_blocks; i++) {
- u64 named_size =
- CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
- size);
+ u64 named_size = __cvmx_bootmem_desc_get(
+ oct, named_addr,
+ offsetof(
+ struct cvmx_bootmem_named_block_desc,
+ size),
+ FIELD_SIZEOF(
+ struct cvmx_bootmem_named_block_desc,
+ size));
+
if (name && named_size) {
char *name_tmp =
kmalloc(name_length + 1, GFP_KERNEL);
@@ -347,7 +336,6 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
sizeof(struct cvmx_bootmem_named_block_desc);
}
}
- __cvmx_bootmem_unlock(flags);
return result;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 586b68899b06..a8df493a5012 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1,24 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
@@ -32,6 +28,7 @@
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
/** Default configuration
* for CN66XX OCTEON Models.
@@ -520,11 +517,6 @@ static struct octeon_config default_cn23xx_conf = {
}
};
-enum {
- OCTEON_CONFIG_TYPE_DEFAULT = 0,
- NUM_OCTEON_CONFS,
-};
-
static struct octeon_config_ptr {
u32 conf_type;
} oct_conf_info[MAX_OCTEON_DEVICES] = {
@@ -580,15 +572,17 @@ static void *__retrieve_octeon_config_info(struct octeon_device *oct,
switch (oct_conf_info[oct_id].conf_type) {
case OCTEON_CONFIG_TYPE_DEFAULT:
if (oct->chip_id == OCTEON_CN66XX) {
- ret = (void *)&default_cn66xx_conf;
+ ret = &default_cn66xx_conf;
} else if ((oct->chip_id == OCTEON_CN68XX) &&
(card_type == LIO_210NV)) {
- ret = (void *)&default_cn68xx_210nv_conf;
+ ret = &default_cn68xx_210nv_conf;
} else if ((oct->chip_id == OCTEON_CN68XX) &&
(card_type == LIO_410NV)) {
- ret = (void *)&default_cn68xx_conf;
+ ret = &default_cn68xx_conf;
} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
- ret = (void *)&default_cn23xx_conf;
+ ret = &default_cn23xx_conf;
+ } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
+ ret = &default_cn23xx_conf;
}
break;
default:
@@ -604,6 +598,7 @@ static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
case OCTEON_CN68XX:
return lio_validate_cn6xxx_config_info(oct, conf);
case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
return 0;
default:
break;
@@ -649,12 +644,12 @@ void octeon_free_device_mem(struct octeon_device *oct)
int i;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (oct->io_qmask.oq & (1ULL << i))
+ if (oct->io_qmask.oq & BIT_ULL(i))
vfree(oct->droq[i]);
}
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (oct->io_qmask.iq & (1ULL << i))
+ if (oct->io_qmask.iq & BIT_ULL(i))
vfree(oct->instr_queue[i]);
}
@@ -681,6 +676,9 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
case OCTEON_CN23XX_PF_VID:
configsize = sizeof(struct octeon_cn23xx_pf);
break;
+ case OCTEON_CN23XX_VF_VID:
+ configsize = sizeof(struct octeon_cn23xx_vf);
+ break;
default:
pr_err("%s: Unknown PCI Device: 0x%x\n",
__func__,
@@ -756,6 +754,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
if (OCTEON_CN23XX_PF(oct))
num_ioqs = oct->sriov_info.num_pf_rings;
+ else if (OCTEON_CN23XX_VF(oct))
+ num_ioqs = oct->sriov_info.rings_per_vf;
+
size = sizeof(struct octeon_ioq_vector) * num_ioqs;
oct->ioq_vector = vmalloc(size);
@@ -767,6 +768,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
ioq_vector->oct_dev = oct;
ioq_vector->iq_index = i;
ioq_vector->droq_index = i;
+ ioq_vector->mbox = oct->mbox[i];
cpu_num = i % num_online_cpus();
cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
@@ -795,10 +797,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
if (OCTEON_CN6XXX(oct))
num_descs =
- CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx));
else if (OCTEON_CN23XX_PF(oct))
- num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
- conf));
+ num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf));
+ else if (OCTEON_CN23XX_VF(oct))
+ num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf));
oct->num_iqs = 0;
@@ -821,6 +824,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
/* prevent memory leak */
vfree(oct->instr_queue[0]);
+ oct->instr_queue[0] = NULL;
return 1;
}
@@ -837,14 +841,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
if (OCTEON_CN6XXX(oct)) {
num_descs =
- CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx));
desc_size =
- CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+ CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx));
} else if (OCTEON_CN23XX_PF(oct)) {
- num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
- conf));
- desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn23xx_pf,
- conf));
+ num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf));
+ desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf));
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf));
+ desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf));
}
oct->num_oqs = 0;
oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
@@ -853,19 +858,63 @@ int octeon_setup_output_queues(struct octeon_device *oct)
if (!oct->droq[0])
return 1;
- if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
+ if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) {
+ vfree(oct->droq[oq_no]);
+ oct->droq[oq_no] = NULL;
return 1;
+ }
oct->num_oqs++;
return 0;
}
-void octeon_set_io_queues_off(struct octeon_device *oct)
+int octeon_set_io_queues_off(struct octeon_device *oct)
{
+ int loop = BUSY_READING_REG_VF_LOOP_COUNT;
+
if (OCTEON_CN6XXX(oct)) {
octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
+ u32 q_no;
+
+ /* IOQs will already be in reset.
+ * If RST bit is set, wait for quiet bit to be set.
+ * Once quiet bit is set, clear the RST bit.
+ */
+ for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
+ u64 reg_val = octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ loop--;
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "unable to reset qno %u\n", q_no);
+ return -1;
+ }
+ }
}
+ return 0;
}
void octeon_set_droq_pkt_op(struct octeon_device *oct,
@@ -1070,10 +1119,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
if (OCTEON_CN6XXX(oct))
num_nic_ports =
- CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+ CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx));
else if (OCTEON_CN23XX_PF(oct))
num_nic_ports =
- CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn23xx_pf, conf));
+ CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf));
if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
@@ -1143,7 +1192,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
{
if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
- (oct->io_qmask.iq & (1ULL << q_no)))
+ (oct->io_qmask.iq & BIT_ULL(q_no)))
return oct->instr_queue[q_no]->max_count;
return -1;
@@ -1152,7 +1201,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
{
if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
- (oct->io_qmask.oq & (1ULL << q_no)))
+ (oct->io_qmask.oq & BIT_ULL(q_no)))
return oct->droq[q_no]->max_count;
return -1;
}
@@ -1168,10 +1217,13 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
if (OCTEON_CN6XXX(oct)) {
default_oct_conf =
- (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+ (struct octeon_config *)(CHIP_CONF(oct, cn6xxx));
} else if (OCTEON_CN23XX_PF(oct)) {
default_oct_conf = (struct octeon_config *)
- (CHIP_FIELD(oct, cn23xx_pf, conf));
+ (CHIP_CONF(oct, cn23xx_pf));
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ default_oct_conf = (struct octeon_config *)
+ (CHIP_CONF(oct, cn23xx_vf));
}
return default_oct_conf;
}
@@ -1322,7 +1374,7 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
/*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
*to trigger tx interrupts as well, if they are pending.
*/
- if (oct && OCTEON_CN23XX_PF(oct)) {
+ if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) {
if (droq)
writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg);
/*we race with firmrware here. read and write the IN_DONE_CNTS*/
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index da15c2ae9330..18f6836250a6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -1,25 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
-
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file octeon_device.h
* \brief Host Driver: This file defines the octeon device structure.
*/
@@ -38,6 +33,7 @@
#define OCTEON_CN68XX 0x0091
#define OCTEON_CN66XX 0x0092
#define OCTEON_CN23XX_PF_VID 0x9702
+#define OCTEON_CN23XX_VF_VID 0x9712
/**RevisionId for the chips */
#define OCTEON_CN23XX_REV_1_0 0x00
@@ -52,7 +48,14 @@ enum octeon_pci_swap_mode {
OCTEON_PCI_32BIT_LW_SWAP = 3
};
+enum {
+ OCTEON_CONFIG_TYPE_DEFAULT = 0,
+ NUM_OCTEON_CONFS,
+};
+
+#define OCTEON_INPUT_INTR (1)
#define OCTEON_OUTPUT_INTR (2)
+#define OCTEON_MBOX_INTR (4)
#define OCTEON_ALL_INTR 0xff
/*--------------- PCI BAR1 index registers -------------*/
@@ -70,26 +73,30 @@ enum octeon_pci_swap_mode {
* as it is initialized.
*/
#define OCT_DEV_BEGIN_STATE 0x0
-#define OCT_DEV_PCI_MAP_DONE 0x1
-#define OCT_DEV_DISPATCH_INIT_DONE 0x2
-#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x3
-#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4
-#define OCT_DEV_RESP_LIST_INIT_DONE 0x5
-#define OCT_DEV_DROQ_INIT_DONE 0x6
-#define OCT_DEV_IO_QUEUES_DONE 0x7
-#define OCT_DEV_CONSOLE_INIT_DONE 0x8
-#define OCT_DEV_HOST_OK 0x9
-#define OCT_DEV_CORE_OK 0xa
-#define OCT_DEV_RUNNING 0xb
-#define OCT_DEV_IN_RESET 0xc
-#define OCT_DEV_STATE_INVALID 0xd
+#define OCT_DEV_PCI_ENABLE_DONE 0x1
+#define OCT_DEV_PCI_MAP_DONE 0x2
+#define OCT_DEV_DISPATCH_INIT_DONE 0x3
+#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x4
+#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x5
+#define OCT_DEV_RESP_LIST_INIT_DONE 0x6
+#define OCT_DEV_DROQ_INIT_DONE 0x7
+#define OCT_DEV_MBOX_SETUP_DONE 0x8
+#define OCT_DEV_MSIX_ALLOC_VECTOR_DONE 0x9
+#define OCT_DEV_INTR_SET_DONE 0xa
+#define OCT_DEV_IO_QUEUES_DONE 0xb
+#define OCT_DEV_CONSOLE_INIT_DONE 0xc
+#define OCT_DEV_HOST_OK 0xd
+#define OCT_DEV_CORE_OK 0xe
+#define OCT_DEV_RUNNING 0xf
+#define OCT_DEV_IN_RESET 0x10
+#define OCT_DEV_STATE_INVALID 0x11
#define OCT_DEV_STATES OCT_DEV_STATE_INVALID
/** Octeon Device interrupts
- * These interrupt bits are set in int_status filed of
- * octeon_device structure
- */
+ * These interrupt bits are set in int_status filed of
+ * octeon_device structure
+ */
#define OCT_DEV_INTR_DMA0_FORCE 0x01
#define OCT_DEV_INTR_DMA1_FORCE 0x02
#define OCT_DEV_INTR_PKT_DATA 0x04
@@ -208,6 +215,10 @@ struct octeon_fn_list {
irqreturn_t (*process_interrupt_regs)(void *);
u64 (*msix_interrupt_handler)(void *);
+
+ int (*setup_mbox)(struct octeon_device *);
+ int (*free_mbox)(struct octeon_device *);
+
int (*soft_reset)(struct octeon_device *);
int (*setup_device_regs)(struct octeon_device *);
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
@@ -284,6 +295,7 @@ struct octdev_props {
#define LIO_FLAG_MSIX_ENABLED 0x1
#define MSIX_PO_INT 0x1
#define MSIX_PI_INT 0x2
+#define MSIX_MBOX_INT 0x4
struct octeon_pf_vf_hs_word {
#ifdef __LITTLE_ENDIAN_BITFIELD
@@ -322,14 +334,39 @@ struct octeon_pf_vf_hs_word {
};
struct octeon_sriov_info {
+ /* Number of rings assigned to VF */
+ u32 rings_per_vf;
+
+ /** Max Number of VF devices that can be enabled. This variable can
+ * specified during load time or it will be derived after allocating
+ * PF queues. When max_vfs is derived then each VF will get one queue
+ **/
+ u32 max_vfs;
+
+ /** Number of VF devices enabled using sysfs. */
+ u32 num_vfs_alloced;
+
/* Actual rings left for PF device */
u32 num_pf_rings;
- /* SRN of PF usable IO queues */
+ /* SRN of PF usable IO queues */
u32 pf_srn;
+
/* total pf rings */
u32 trs;
+ u32 sriov_enabled;
+
+ /*lookup table that maps DPI ring number to VF pci_dev struct pointer*/
+ struct pci_dev *dpiring_to_vfpcidev_lut[MAX_POSSIBLE_VFS];
+
+ u64 vf_macaddr[MAX_POSSIBLE_VFS];
+
+ u16 vf_vlantci[MAX_POSSIBLE_VFS];
+
+ int vf_linkstate[MAX_POSSIBLE_VFS];
+
+ u64 vf_drv_loaded_mask;
};
struct octeon_ioq_vector {
@@ -337,6 +374,7 @@ struct octeon_ioq_vector {
int iq_index;
int droq_index;
int vector;
+ struct octeon_mbox *mbox;
struct cpumask affinity_mask;
u32 ioq_num;
};
@@ -365,8 +403,13 @@ struct octeon_device {
/** Octeon Chip type. */
u16 chip_id;
+
u16 rev_id;
+
u16 pf_num;
+
+ u16 vf_num;
+
/** This device's id - set by the driver. */
u32 octeon_id;
@@ -474,6 +517,9 @@ struct octeon_device {
int msix_on;
+ /** Mail Box details of each octeon queue. */
+ struct octeon_mbox *mbox[MAX_POSSIBLE_VFS];
+
/** IOq information of it's corresponding MSI-X interrupt. */
struct octeon_ioq_vector *ioq_vector;
@@ -490,11 +536,14 @@ struct octeon_device {
#define OCT_DRV_ONLINE 1
#define OCT_DRV_OFFLINE 2
-#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
- (oct->chip_id == OCTEON_CN68XX))
-#define OCTEON_CN23XX_PF(oct) (oct->chip_id == OCTEON_CN23XX_PF_VID)
-#define CHIP_FIELD(oct, TYPE, field) \
- (((struct octeon_ ## TYPE *)(oct->chip))->field)
+#define OCTEON_CN6XXX(oct) ({ \
+ typeof(oct) _oct = (oct); \
+ ((_oct->chip_id == OCTEON_CN66XX) || \
+ (_oct->chip_id == OCTEON_CN68XX)); })
+#define OCTEON_CN23XX_PF(oct) ((oct)->chip_id == OCTEON_CN23XX_PF_VID)
+#define OCTEON_CN23XX_VF(oct) ((oct)->chip_id == OCTEON_CN23XX_VF_VID)
+#define CHIP_CONF(oct, TYPE) \
+ (((struct octeon_ ## TYPE *)((oct)->chip))->conf)
struct oct_intrmod_cmd {
struct octeon_device *oct_dev;
@@ -508,7 +557,7 @@ struct oct_intrmod_cmd {
void octeon_init_device_list(int conf_type);
/** Free memory for Input and Output queue structures for a octeon device */
-void octeon_free_device_mem(struct octeon_device *);
+void octeon_free_device_mem(struct octeon_device *oct);
/* Look up a free entry in the octeon_device table and allocate resources
* for the octeon_device structure for an octeon device. Called at init
@@ -606,16 +655,16 @@ void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr);
/* Routines for reading and writing CSRs */
#define octeon_write_csr(oct_dev, reg_off, value) \
- writel(value, oct_dev->mmio[0].hw_addr + reg_off)
+ writel(value, (oct_dev)->mmio[0].hw_addr + (reg_off))
#define octeon_write_csr64(oct_dev, reg_off, val64) \
- writeq(val64, oct_dev->mmio[0].hw_addr + reg_off)
+ writeq(val64, (oct_dev)->mmio[0].hw_addr + (reg_off))
#define octeon_read_csr(oct_dev, reg_off) \
- readl(oct_dev->mmio[0].hw_addr + reg_off)
+ readl((oct_dev)->mmio[0].hw_addr + (reg_off))
#define octeon_read_csr64(oct_dev, reg_off) \
- readq(oct_dev->mmio[0].hw_addr + reg_off)
+ readq((oct_dev)->mmio[0].hw_addr + (reg_off))
/**
* Checks if memory access is okay
@@ -724,7 +773,7 @@ int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
/** Turns off the input and output queues for the device
* @param oct which octeon to disable
*/
-void octeon_set_io_queues_off(struct octeon_device *oct);
+int octeon_set_io_queues_off(struct octeon_device *oct);
/** Turns on or off the given output queue for the device
* @param oct which octeon to change
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index f60e5320daf4..0be87d119a97 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -1,24 +1,20 @@
/**********************************************************************
-* Author: Cavium, Inc.
-*
-* Contact: support@cavium.com
-* Please include "LiquidIO" in the subject.
-*
-* Copyright (c) 2003-2015 Cavium, Inc.
-*
-* This file is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License, Version 2, as
-* published by the Free Software Foundation.
-*
-* This file is distributed in the hope that it will be useful, but
-* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
-* NONINFRINGEMENT. See the GNU General Public License for more
-* details.
-*
-* This file may also be available under a different license from Cavium.
-* Contact Cavium, Inc. for more information
-**********************************************************************/
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
@@ -32,9 +28,7 @@
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn23xx_pf_device.h"
-
-#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
-#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
+#include "cn23xx_vf_device.h"
struct niclist {
struct list_head list;
@@ -258,13 +252,18 @@ int octeon_init_droq(struct octeon_device *oct,
c_num_descs = num_descs;
c_buf_size = desc_size;
if (OCTEON_CN6XXX(oct)) {
- struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+ struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
c_refill_threshold =
(u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
} else if (OCTEON_CN23XX_PF(oct)) {
- struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+ struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf);
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
@@ -337,7 +336,7 @@ int octeon_init_droq(struct octeon_device *oct,
/* For 56xx Pass1, this function won't be called, so no checks. */
oct->fn_list.setup_oq_regs(oct, q_no);
- oct->io_qmask.oq |= (1ULL << q_no);
+ oct->io_qmask.oq |= BIT_ULL(q_no);
return 0;
@@ -409,7 +408,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
droq->recv_buf_list[idx].buffer = NULL;
- INCR_INDEX_BY1(idx, droq->max_count);
+ idx = incr_index(idx, 1, droq->max_count);
bytes_left -= droq->buffer_size;
i++;
buf_cnt--;
@@ -440,14 +439,15 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
droq->recv_buf_list[refill_index].buffer = NULL;
desc_ring[refill_index].buffer_ptr = 0;
do {
- INCR_INDEX_BY1(droq->refill_idx,
- droq->max_count);
+ droq->refill_idx = incr_index(droq->refill_idx,
+ 1,
+ droq->max_count);
desc_refilled++;
droq->refill_count--;
} while (droq->recv_buf_list[droq->refill_idx].
buffer);
}
- INCR_INDEX_BY1(refill_index, droq->max_count);
+ refill_index = incr_index(refill_index, 1, droq->max_count);
} /* while */
return desc_refilled;
}
@@ -514,7 +514,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
/* Reset any previous values in the length field. */
droq->info_list[droq->refill_idx].length = 0;
- INCR_INDEX_BY1(droq->refill_idx, droq->max_count);
+ droq->refill_idx = incr_index(droq->refill_idx, 1,
+ droq->max_count);
desc_refilled++;
droq->refill_count--;
}
@@ -599,7 +600,8 @@ static inline void octeon_droq_drop_packets(struct octeon_device *oct,
buf_cnt = 1;
}
- INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->read_idx = incr_index(droq->read_idx, buf_cnt,
+ droq->max_count);
droq->refill_count += buf_cnt;
}
}
@@ -639,11 +641,12 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
rh = &info->rh;
total_len += (u32)info->length;
- if (OPCODE_SLOW_PATH(rh)) {
+ if (opcode_slow_path(rh)) {
u32 buf_cnt;
buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
- INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->read_idx = incr_index(droq->read_idx,
+ buf_cnt, droq->max_count);
droq->refill_count += buf_cnt;
} else {
if (info->length <= droq->buffer_size) {
@@ -657,7 +660,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
droq->recv_buf_list[droq->read_idx].buffer =
NULL;
- INCR_INDEX_BY1(droq->read_idx, droq->max_count);
+ droq->read_idx = incr_index(droq->read_idx, 1,
+ droq->max_count);
droq->refill_count++;
} else {
nicbuf = octeon_fast_packet_alloc((u32)
@@ -689,8 +693,9 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
}
pkt_len += cpy_len;
- INCR_INDEX_BY1(droq->read_idx,
- droq->max_count);
+ droq->read_idx =
+ incr_index(droq->read_idx, 1,
+ droq->max_count);
droq->refill_count++;
}
}
@@ -804,9 +809,8 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
while (total_pkts_processed < budget) {
octeon_droq_check_hw_for_pkts(droq);
- pkts_available =
- CVM_MIN((budget - total_pkts_processed),
- (u32)(atomic_read(&droq->pkts_pending)));
+ pkts_available = min((budget - total_pkts_processed),
+ (u32)(atomic_read(&droq->pkts_pending)));
if (pkts_available == 0)
break;
@@ -891,6 +895,10 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
}
break;
+
+ case OCTEON_CN23XX_VF_VID:
+ lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
+ break;
}
return 0;
}
@@ -988,7 +996,8 @@ int octeon_create_droq(struct octeon_device *oct,
if (!droq)
droq = vmalloc(sizeof(*droq));
if (!droq)
- goto create_droq_fail;
+ return -1;
+
memset(droq, 0, sizeof(struct octeon_droq));
/*Disable the pkt o/p for this Q */
@@ -996,7 +1005,11 @@ int octeon_create_droq(struct octeon_device *oct,
oct->droq[q_no] = droq;
/* Initialize the Droq */
- octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx);
+ if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
+ vfree(oct->droq[q_no]);
+ oct->droq[q_no] = NULL;
+ return -1;
+ }
oct->num_oqs++;
@@ -1009,8 +1022,4 @@ int octeon_create_droq(struct octeon_device *oct,
* the same time.
*/
return 0;
-
-create_droq_fail:
- octeon_delete_droq(oct, q_no);
- return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 5be002d5dba4..e62074090681 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -13,13 +13,8 @@
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
- **********************************************************************/
-
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file octeon_droq.h
* \brief Implementation of Octeon Output queues. "Output" is with
* respect to the Octeon device on the NIC. From this driver's point of
@@ -81,7 +76,7 @@ struct octeon_skb_page_info {
* the Octeon device. Since the descriptor ring keeps physical (bus)
* addresses, this field is required for the driver to keep track of
* the virtual address pointers.
-*/
+ */
struct octeon_recv_buffer {
/** Packet buffer, including metadata. */
void *buffer;
@@ -121,7 +116,6 @@ struct oct_droq_stats {
/** Num of Packets dropped due to receive path failures. */
u64 rx_dropped;
- /** Num of vxlan packets received; */
u64 rx_vxlan;
/** Num of failures of recv_buffer_alloc() */
@@ -359,7 +353,7 @@ struct octeon_droq {
* @param q_no - droq no. ranges from 0 - 3.
* @param app_ctx - pointer to application context
* @return Success: 0 Failure: 1
-*/
+ */
int octeon_init_droq(struct octeon_device *oct_dev,
u32 q_no,
u32 num_descs,
@@ -372,7 +366,7 @@ int octeon_init_droq(struct octeon_device *oct_dev,
* @param oct_dev - pointer to the octeon device structure
* @param q_no - droq no. ranges from 0 - 3.
* @return: Success: 0 Failure: 1
-*/
+ */
int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
/** Register a change in droq operations. The ops field has a pointer to a
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index e4d426ba18dc..e04ca8f0b4a7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -13,13 +13,8 @@
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
- **********************************************************************/
-
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file octeon_iq.h
* \brief Host Driver: Implementation of Octeon input queues. "Input" is
* with respect to the Octeon device on the NIC. From this driver's
@@ -69,7 +64,6 @@ struct oct_iq_stats {
u64 tx_vxlan; /* tunnel */
u64 tx_dmamap_fail;
u64 tx_restart;
- /*u64 tx_timeout_count;*/
};
#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
@@ -78,7 +72,7 @@ struct oct_iq_stats {
* The input queue is used to post raw (instruction) mode data or packet
* data to Octeon device from the host. Each input queue (upto 4) for
* a Octeon device has one such structure to represent it.
-*/
+ */
struct octeon_instr_queue {
struct octeon_device *oct_dev;
@@ -118,8 +112,8 @@ struct octeon_instr_queue {
u32 octeon_read_index;
/** This index aids in finding the window in the queue where Octeon
- * has read the commands.
- */
+ * has read the commands.
+ */
u32 flush_index;
/** This field keeps track of the instructions pending in this queue. */
@@ -150,8 +144,8 @@ struct octeon_instr_queue {
u64 last_db_time;
/** The doorbell timeout. If the doorbell was not rung for this time and
- * fill_cnt is non-zero, ring the doorbell again.
- */
+ * fill_cnt is non-zero, ring the doorbell again.
+ */
u32 db_timeout;
/** Statistics for this input queue. */
@@ -309,6 +303,9 @@ struct octeon_sc_buffer_pool {
atomic_t alloc_buf_count;
};
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count)
+
int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
int octeon_free_sc_buffer_pool(struct octeon_device *oct);
struct octeon_soft_command *
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
new file mode 100644
index 000000000000..73696b427f06
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -0,0 +1,318 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "octeon_mailbox.h"
+
+/**
+ * octeon_mbox_read:
+ * @oct: Pointer mailbox
+ *
+ * Reads the 8-bytes of data from the mbox register
+ * Writes back the acknowldgement inidcating completion of read
+ */
+int octeon_mbox_read(struct octeon_mbox *mbox)
+{
+ union octeon_mbox_message msg;
+ int ret = 0;
+
+ spin_lock(&mbox->lock);
+
+ msg.u64 = readq(mbox->mbox_read_reg);
+
+ if ((msg.u64 == OCTEON_PFVFACK) || (msg.u64 == OCTEON_PFVFSIG)) {
+ spin_unlock(&mbox->lock);
+ return 0;
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) {
+ mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = msg.u64;
+ mbox->mbox_req.recv_len++;
+ } else {
+ if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) {
+ mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] =
+ msg.u64;
+ mbox->mbox_resp.recv_len++;
+ } else {
+ if ((mbox->state & OCTEON_MBOX_STATE_IDLE) &&
+ (msg.s.type == OCTEON_MBOX_REQUEST)) {
+ mbox->state &= ~OCTEON_MBOX_STATE_IDLE;
+ mbox->state |=
+ OCTEON_MBOX_STATE_REQUEST_RECEIVING;
+ mbox->mbox_req.msg.u64 = msg.u64;
+ mbox->mbox_req.q_no = mbox->q_no;
+ mbox->mbox_req.recv_len = 1;
+ } else {
+ if ((mbox->state &
+ OCTEON_MBOX_STATE_RESPONSE_PENDING) &&
+ (msg.s.type == OCTEON_MBOX_RESPONSE)) {
+ mbox->state &=
+ ~OCTEON_MBOX_STATE_RESPONSE_PENDING;
+ mbox->state |=
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVING
+ ;
+ mbox->mbox_resp.msg.u64 = msg.u64;
+ mbox->mbox_resp.q_no = mbox->q_no;
+ mbox->mbox_resp.recv_len = 1;
+ } else {
+ writeq(OCTEON_PFVFERR,
+ mbox->mbox_read_reg);
+ mbox->state |= OCTEON_MBOX_STATE_ERROR;
+ spin_unlock(&mbox->lock);
+ return 1;
+ }
+ }
+ }
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) {
+ if (mbox->mbox_req.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVING;
+ mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) {
+ if (mbox->mbox_resp.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &=
+ ~OCTEON_MBOX_STATE_RESPONSE_RECEIVING;
+ mbox->state |=
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ WARN_ON(1);
+ }
+ }
+
+ writeq(OCTEON_PFVFACK, mbox->mbox_read_reg);
+
+ spin_unlock(&mbox->lock);
+
+ return ret;
+}
+
+/**
+ * octeon_mbox_write:
+ * @oct: Pointer Octeon Device
+ * @mbox_cmd: Cmd to send to mailbox.
+ *
+ * Populates the queue specific mbox structure
+ * with cmd information.
+ * Write the cmd to mbox register
+ */
+int octeon_mbox_write(struct octeon_device *oct,
+ struct octeon_mbox_cmd *mbox_cmd)
+{
+ struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no];
+ u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+
+ if ((mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) &&
+ !(mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return OCTEON_MBOX_STATUS_FAILED;
+ }
+
+ if ((mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) &&
+ !(mbox->state & OCTEON_MBOX_STATE_IDLE)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return OCTEON_MBOX_STATUS_BUSY;
+ }
+
+ if (mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) {
+ memcpy(&mbox->mbox_resp, mbox_cmd,
+ sizeof(struct octeon_mbox_cmd));
+ mbox->state = OCTEON_MBOX_STATE_RESPONSE_PENDING;
+ }
+
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ count = 0;
+
+ while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) {
+ schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME);
+ if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
+ ret = OCTEON_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+
+ if (ret == OCTEON_MBOX_STATUS_SUCCESS) {
+ writeq(mbox_cmd->msg.u64, mbox->mbox_write_reg);
+ for (i = 0; i < (u32)(mbox_cmd->msg.s.len - 1); i++) {
+ count = 0;
+ while (readq(mbox->mbox_write_reg) !=
+ OCTEON_PFVFACK) {
+ schedule_timeout_uninterruptible(10);
+ if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
+ ret = OCTEON_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+ writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ }
+ }
+
+ spin_lock_irqsave(&mbox->lock, flags);
+ if (mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) {
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ } else {
+ if ((!mbox_cmd->msg.s.resp_needed) ||
+ (ret == OCTEON_MBOX_STATUS_FAILED)) {
+ mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING;
+ if (!(mbox->state &
+ (OCTEON_MBOX_STATE_REQUEST_RECEIVING |
+ OCTEON_MBOX_STATE_REQUEST_RECEIVED)))
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ return ret;
+}
+
+/**
+ * octeon_mbox_process_cmd:
+ * @mbox: Pointer mailbox
+ * @mbox_cmd: Pointer to command received
+ *
+ * Process the cmd received in mbox
+ */
+static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
+ struct octeon_mbox_cmd *mbox_cmd)
+{
+ struct octeon_device *oct = mbox->oct_dev;
+
+ switch (mbox_cmd->msg.s.cmd) {
+ case OCTEON_VF_ACTIVE:
+ dev_dbg(&oct->pci_dev->dev, "got vfactive sending data back\n");
+ mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE;
+ mbox_cmd->msg.s.resp_needed = 1;
+ mbox_cmd->msg.s.len = 2;
+ mbox_cmd->data[0] = 0; /* VF version is in mbox_cmd->data[0] */
+ ((struct lio_version *)&mbox_cmd->data[0])->major =
+ LIQUIDIO_BASE_MAJOR_VERSION;
+ ((struct lio_version *)&mbox_cmd->data[0])->minor =
+ LIQUIDIO_BASE_MINOR_VERSION;
+ ((struct lio_version *)&mbox_cmd->data[0])->micro =
+ LIQUIDIO_BASE_MICRO_VERSION;
+ memcpy(mbox_cmd->msg.s.params, (uint8_t *)&oct->pfvf_hsword, 6);
+ /* Sending core cofig info to the corresponding active VF.*/
+ octeon_mbox_write(oct, mbox_cmd);
+ break;
+
+ case OCTEON_VF_FLR_REQUEST:
+ dev_info(&oct->pci_dev->dev,
+ "got a request for FLR from VF that owns DPI ring %u\n",
+ mbox->q_no);
+ pcie_capability_set_word(
+ oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no],
+ PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ break;
+
+ case OCTEON_PF_CHANGED_VF_MACADDR:
+ if (OCTEON_CN23XX_VF(oct))
+ octeon_pf_changed_vf_macaddr(oct,
+ mbox_cmd->msg.s.params);
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ *octeon_mbox_process_message:
+ *
+ * Process the received mbox message.
+ */
+int octeon_mbox_process_message(struct octeon_mbox *mbox)
+{
+ struct octeon_mbox_cmd mbox_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+
+ if (mbox->state & OCTEON_MBOX_STATE_ERROR) {
+ if (mbox->state & (OCTEON_MBOX_STATE_RESPONSE_PENDING |
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVING)) {
+ memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct octeon_mbox_cmd));
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ mbox_cmd.recv_status = 1;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->oct_dev, &mbox_cmd,
+ mbox_cmd.fn_arg);
+ return 0;
+ }
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return 0;
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVED) {
+ memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct octeon_mbox_cmd));
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ mbox_cmd.recv_status = 0;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, mbox_cmd.fn_arg);
+ return 0;
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED) {
+ memcpy(&mbox_cmd, &mbox->mbox_req,
+ sizeof(struct octeon_mbox_cmd));
+ if (!mbox_cmd.msg.s.resp_needed) {
+ mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVED;
+ if (!(mbox->state &
+ OCTEON_MBOX_STATE_RESPONSE_PENDING))
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ }
+
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ octeon_mbox_process_cmd(mbox, &mbox_cmd);
+ return 0;
+ }
+
+ WARN_ON(1);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
new file mode 100644
index 000000000000..fe60a3e6247b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -0,0 +1,115 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#ifndef __MAILBOX_H__
+#define __MAILBOX_H__
+
+/* Macros for Mail Box Communication */
+
+#define OCTEON_MBOX_DATA_MAX 32
+
+#define OCTEON_VF_ACTIVE 0x1
+#define OCTEON_VF_FLR_REQUEST 0x2
+#define OCTEON_PF_CHANGED_VF_MACADDR 0x4
+
+/*Macro for Read acknowldgement*/
+#define OCTEON_PFVFACK 0xffffffffffffffff
+#define OCTEON_PFVFSIG 0x1122334455667788
+#define OCTEON_PFVFERR 0xDEADDEADDEADDEAD
+
+#define LIO_MBOX_WRITE_WAIT_CNT 1000
+#define LIO_MBOX_WRITE_WAIT_TIME 10
+
+enum octeon_mbox_cmd_status {
+ OCTEON_MBOX_STATUS_SUCCESS = 0,
+ OCTEON_MBOX_STATUS_FAILED = 1,
+ OCTEON_MBOX_STATUS_BUSY = 2
+};
+
+enum octeon_mbox_message_type {
+ OCTEON_MBOX_REQUEST = 0,
+ OCTEON_MBOX_RESPONSE = 1
+};
+
+union octeon_mbox_message {
+ u64 u64;
+ struct {
+ u16 type : 1;
+ u16 resp_needed : 1;
+ u16 cmd : 6;
+ u16 len : 8;
+ u8 params[6];
+ } s;
+};
+
+typedef void (*octeon_mbox_callback_t)(void *, void *, void *);
+
+struct octeon_mbox_cmd {
+ union octeon_mbox_message msg;
+ u64 data[OCTEON_MBOX_DATA_MAX];
+ u32 q_no;
+ u32 recv_len;
+ u32 recv_status;
+ octeon_mbox_callback_t fn;
+ void *fn_arg;
+};
+
+enum octeon_mbox_state {
+ OCTEON_MBOX_STATE_IDLE = 1,
+ OCTEON_MBOX_STATE_REQUEST_RECEIVING = 2,
+ OCTEON_MBOX_STATE_REQUEST_RECEIVED = 4,
+ OCTEON_MBOX_STATE_RESPONSE_PENDING = 8,
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVING = 16,
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 16,
+ OCTEON_MBOX_STATE_ERROR = 32
+};
+
+struct octeon_mbox {
+ /** A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ struct octeon_device *oct_dev;
+
+ u32 q_no;
+
+ enum octeon_mbox_state state;
+
+ struct cavium_wk mbox_poll_wk;
+
+ /** SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ void *mbox_int_reg;
+
+ /** SLI_PKT_PF_VF_MBOX_SIG(0) for PF, SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ void *mbox_write_reg;
+
+ /** SLI_PKT_PF_VF_MBOX_SIG(1) for PF, SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ void *mbox_read_reg;
+
+ struct octeon_mbox_cmd mbox_req;
+
+ struct octeon_mbox_cmd mbox_resp;
+
+};
+
+int octeon_mbox_read(struct octeon_mbox *mbox);
+int octeon_mbox_write(struct octeon_device *oct,
+ struct octeon_mbox_cmd *mbox_cmd);
+int octeon_mbox_process_message(struct octeon_mbox *mbox);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 366298f7bcb2..8cd389148166 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -13,13 +13,8 @@
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
- **********************************************************************/
-
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
/*! \file octeon_main.h
* \brief Host Driver: This file is included by all host driver source files
* to include common definitions.
@@ -66,7 +61,7 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype,
unsigned int *bytes_compl);
void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
unsigned int bytes_compl);
-
+void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
/** Swap 8B blocks */
static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
{
@@ -78,10 +73,10 @@ static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
}
/**
- * \brief unmaps a PCI BAR
- * @param oct Pointer to Octeon device
- * @param baridx bar index
- */
+ * \brief unmaps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ */
static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
{
dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
@@ -116,7 +111,7 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
mapped_len = oct->mmio[baridx].len;
if (!mapped_len)
- return 1;
+ goto err_release_region;
if (max_map_len && (mapped_len > max_map_len))
mapped_len = max_map_len;
@@ -132,11 +127,15 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
if (!oct->mmio[baridx].hw_addr) {
dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
baridx);
- return 1;
+ goto err_release_region;
}
oct->mmio[baridx].done = 1;
return 0;
+
+err_release_region:
+ pci_release_region(oct->pci_dev, baridx * 2);
+ return 1;
}
static inline void *
@@ -203,24 +202,6 @@ out:
return errno;
}
-static inline void
-sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
-{
- wait_queue_t we;
-
- init_waitqueue_entry(&we, current);
- add_wait_queue(waitq, &we);
- while (!atomic_read(pcond)) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current))
- goto out;
- schedule();
- }
-out:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(waitq, &we);
-}
-
/* Gives up the CPU for a timeout period.
* Check that the condition is not true before we go to sleep for a
* timeout period.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 0dc081a99b30..13a18c9a7a51 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/netdevice.h>
#include "liquidio_common.h"
@@ -39,7 +36,7 @@ octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
oct->fn_list.bar1_idx_write(oct, idx, mask);
}
#else
-#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
+#define octeon_toggle_bar1_swapmode(oct, idx)
#endif
static void
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
index 11b183377b44..bae2fdd89503 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
/*! \file octeon_mem_ops.h
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index e5d1debd05ad..6bb89419006e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
/*! \file octeon_network.h
@@ -29,7 +26,7 @@
#include <linux/ptp_clock_kernel.h>
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
-#define LIO_MIN_MTU_SIZE 68
+#define LIO_MIN_MTU_SIZE ETH_MIN_MTU
struct oct_nic_stats_resp {
u64 rh;
@@ -126,12 +123,13 @@ struct lio {
/* work queue for link status */
struct cavium_wq link_status_wq;
+ int netdev_uc_count;
};
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
-#define CIU3_WDOG(c) (0x1010000020000ULL + (c << 3))
+#define CIU3_WDOG(c) (0x1010000020000ULL + ((c) << 3))
#define CIU3_WDOG_MASK 12ULL
#define LIO_MONITOR_WDOG_EXPIRE 1
#define LIO_MONITOR_CORE_STUCK_MSGD 2
@@ -342,9 +340,9 @@ static inline void tx_buffer_free(void *buffer)
}
#define lio_dma_alloc(oct, size, dma_addr) \
- dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL)
+ dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
- dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
+ dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
static inline
void *get_rbd(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 40ac1fe88956..c3d6a8228362 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 4b8da67b995f..0c7a5c9b2932 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
/*! \file octeon_nic.h
@@ -67,7 +64,7 @@ struct octnic_ctrl_pkt {
octnic_ctrl_pkt_cb_fn_t cb_fn;
};
-#define MAX_UDD_SIZE(nctrl) (sizeof(nctrl->udd))
+#define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd))
/** Structure of data information passed by the NIC module to the OSI
* layer when forwarding data to Octeon device software.
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 90866bb50033..3ce66759e80a 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -31,9 +28,7 @@
#include "octeon_network.h"
#include "cn66xx_device.h"
#include "cn23xx_pf_device.h"
-
-#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
- (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+#include "cn23xx_vf_device.h"
struct iq_post_status {
int status;
@@ -71,9 +66,12 @@ int octeon_init_instr_queue(struct octeon_device *oct,
int numa_node = cpu_to_node(iq_no % num_online_cpus());
if (OCTEON_CN6XXX(oct))
- conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
+ conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
else if (OCTEON_CN23XX_PF(oct))
- conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf)));
+ conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
+ else if (OCTEON_CN23XX_VF(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
+
if (!conf) {
dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
oct->chip_id);
@@ -145,7 +143,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
spin_lock_init(&iq->iq_flush_running_lock);
- oct->io_qmask.iq |= (1ULL << iq_no);
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
@@ -157,6 +155,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
WQ_MEM_RECLAIM,
0);
if (!oct->check_db_wq[iq_no].wq) {
+ vfree(iq->request_list);
+ iq->request_list = NULL;
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
iq_no);
@@ -183,10 +183,13 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
if (OCTEON_CN6XXX(oct))
desc_size =
- CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+ CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
else if (OCTEON_CN23XX_PF(oct))
desc_size =
- CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf));
+ CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
+ else if (OCTEON_CN23XX_VF(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
vfree(iq->request_list);
@@ -239,7 +242,9 @@ int octeon_setup_iq(struct octeon_device *oct,
}
oct->num_iqs++;
- oct->fn_list.enable_io_queues(oct);
+ if (oct->fn_list.enable_io_queues(oct))
+ return 1;
+
return 0;
}
@@ -250,9 +255,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
do {
instr_cnt = 0;
- /*for (i = 0; i < oct->num_iqs; i++) {*/
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
pending =
atomic_read(&oct->
@@ -319,7 +323,8 @@ __post_command2(struct octeon_instr_queue *iq, u8 *cmd)
/* "index" is returned, host_write_index is modified. */
st.index = iq->host_write_index;
- INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+ iq->host_write_index = incr_index(iq->host_write_index, 1,
+ iq->max_count);
iq->fill_cnt++;
/* Flush the command into memory. We need to be sure the data is in
@@ -389,7 +394,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_SOFT_COMMAND:
sc = buf;
- if (OCTEON_CN23XX_PF(oct))
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
irh = (struct octeon_instr_irh *)
&sc->cmd.cmd3.irh;
else
@@ -434,7 +439,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
skip_this:
inst_count++;
- INCR_INDEX_BY1(old, iq->max_count);
+ old = incr_index(old, 1, iq->max_count);
if ((napi_budget) && (inst_count >= napi_budget))
break;
@@ -577,8 +582,6 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
/* This is only done here to expedite packets being flushed
* for cases where there are no IQ completion interrupts.
*/
- /*if (iq->do_auto_flush)*/
- /* octeon_flush_iq(oct, iq, 2, 0);*/
return st.status;
}
@@ -604,7 +607,7 @@ octeon_prepare_soft_command(struct octeon_device *oct,
oct_cfg = octeon_get_conf(oct);
- if (OCTEON_CN23XX_PF(oct)) {
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
@@ -697,7 +700,7 @@ int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_instr_irh *irh;
u32 len;
- if (OCTEON_CN23XX_PF(oct)) {
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
WARN_ON(!sc->dmadptr);
@@ -749,8 +752,10 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
lio_dma_alloc(oct,
SOFT_COMMAND_BUFFER_SIZE,
(dma_addr_t *)&dma_addr);
- if (!sc)
+ if (!sc) {
+ octeon_free_sc_buffer_pool(oct);
return 1;
+ }
sc->dma_addr = dma_addr;
sc->size = SOFT_COMMAND_BUFFER_SIZE;
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index be52178d8cb6..2fbaae96b505 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -81,17 +78,14 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
spin_lock_bh(&ordered_sc_list->lock);
if (ordered_sc_list->head.next == &ordered_sc_list->head) {
- /* ordered_sc_list is empty; there is
- * nothing to process
- */
- spin_unlock_bh
- (&ordered_sc_list->lock);
+ spin_unlock_bh(&ordered_sc_list->lock);
return 1;
}
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
- if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (OCTEON_CN23XX_PF(octeon_dev) ||
+ OCTEON_CN23XX_VF(octeon_dev)) {
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
rptr = sc->cmd.cmd3.rptr;
} else {
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
index 7a48752dcb10..cbb2d84e8932 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.h
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -4,7 +4,7 @@
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
- * Copyright (c) 2003-2015 Cavium, Inc.
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -15,9 +15,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium, Inc. for more information
**********************************************************************/
/*! \file response_manager.h
@@ -85,7 +82,6 @@ enum {
/** A value of 0x00000000 indicates no error i.e. success */
#define DRIVER_ERROR_NONE 0x00000000
-/** (Major number: 0x0000; Minor Number: 0x0001) */
#define DRIVER_ERROR_REQ_PENDING 0x00000001
#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003
#define DRIVER_ERROR_REQ_EINTR 0x00000004
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4ab404f45b21..21f80f5744ba 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -645,16 +645,6 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
- /* Limit the MTU to make sure the ethernet packets are between
- * 64 bytes and 16383 bytes.
- */
- if (size_without_fcs < 64 || size_without_fcs > 16383) {
- dev_warn(p->dev, "MTU must be between %d and %d.\n",
- 64 - OCTEON_MGMT_RX_HEADROOM,
- 16383 - OCTEON_MGMT_RX_HEADROOM);
- return -EINVAL;
- }
-
netdev->mtu = new_mtu;
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
@@ -1479,6 +1469,12 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
p->agl_prt_ctl_size);
+ if (!p->mix || !p->agl || !p->agl_prt_ctl) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ result = -ENOMEM;
+ goto err;
+ }
+
spin_lock_init(&p->lock);
skb_queue_head_init(&p->tx_list);
@@ -1491,6 +1487,9 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
+ netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
+ netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;
+
mac = of_get_mac_address(pdev->dev.of_node);
if (mac)
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 86bd93ce2ea3..e739c7153562 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -149,6 +149,12 @@ struct nicvf_rss_info {
u64 key[RSS_HASH_KEY_SIZE];
} ____cacheline_aligned_in_smp;
+struct nicvf_pfc {
+ u8 autoneg;
+ u8 fc_rx;
+ u8 fc_tx;
+};
+
enum rx_stats_reg_offset {
RX_OCTS = 0x0,
RX_UCAST = 0x1,
@@ -292,11 +298,13 @@ struct nicvf {
u8 node;
u8 cpi_alg;
bool link_up;
+ u8 mac_type;
u8 duplex;
u32 speed;
bool tns_mode;
bool loopback_supported;
struct nicvf_rss_info rss_info;
+ struct nicvf_pfc pfc;
struct tasklet_struct qs_err_task;
struct work_struct reset_task;
@@ -357,6 +365,7 @@ struct nicvf {
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
+#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -446,6 +455,7 @@ struct bgx_stats_msg {
/* Physical interface link status */
struct bgx_link_status {
u8 msg;
+ u8 mac_type;
u8 link_up;
u8 duplex;
u32 speed;
@@ -498,6 +508,14 @@ struct reset_stat_cfg {
u16 sq_stat_mask;
};
+struct pfc {
+ u8 msg;
+ u8 get; /* Get or set PFC settings */
+ u8 autoneg;
+ u8 fc_rx;
+ u8 fc_tx;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -516,6 +534,7 @@ union nic_mbx {
struct nicvf_ptr nicvf;
struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
+ struct pfc pfc;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6677b96e1f3f..767234e2e8f9 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -809,6 +809,15 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+ /* Enable moving average calculation.
+ * Keep the LVL/AVG delay to HW enforced minimum so that, not too many
+ * packets sneek in between average calculations.
+ */
+ nic_reg_write(nic, NIC_PF_CQ_AVG_CFG,
+ (BIT_ULL(20) | 0x2ull << 14 | 0x1));
+ nic_reg_write(nic, NIC_PF_RRM_AVG_CFG,
+ (BIT_ULL(20) | 0x3ull << 14 | 0x1));
+
return 0;
}
@@ -889,6 +898,30 @@ static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
}
+static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
+{
+ int bgx, lmac;
+ struct pfc pfc;
+ union nic_mbx mbx = {};
+
+ if (vf >= nic->num_vf_en)
+ return;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ if (cfg->get) {
+ bgx_lmac_get_pfc(nic->node, bgx, lmac, &pfc);
+ mbx.pfc.msg = NIC_MBOX_MSG_PFC;
+ mbx.pfc.autoneg = pfc.autoneg;
+ mbx.pfc.fc_rx = pfc.fc_rx;
+ mbx.pfc.fc_tx = pfc.fc_tx;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+ } else {
+ bgx_lmac_set_pfc(nic->node, bgx, lmac, cfg);
+ nic_mbx_send_ack(nic, vf);
+ }
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -1028,6 +1061,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_RESET_STAT_COUNTER:
ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
break;
+ case NIC_MBOX_MSG_PFC:
+ nic_pause_frame(nic, vf, &mbx.pfc);
+ goto unlock;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1258,6 +1294,7 @@ static void nic_poll_for_link(struct work_struct *work)
mbx.link_status.link_up = link.link_up;
mbx.link_status.duplex = link.duplex;
mbx.link_status.speed = link.speed;
+ mbx.link_status.mac_type = link.mac_type;
nic_send_msg_to_vf(nic, vf, &mbx);
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 432bf6be57cb..2e74bbaa38e1 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -116,29 +116,65 @@ static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
-static int nicvf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int nicvf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct nicvf *nic = netdev_priv(netdev);
+ u32 supported, advertising;
- cmd->supported = 0;
- cmd->transceiver = XCVR_EXTERNAL;
+ supported = 0;
+ advertising = 0;
if (!nic->link_up) {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
return 0;
}
- if (nic->speed <= 1000) {
- cmd->port = PORT_MII;
- cmd->autoneg = AUTONEG_ENABLE;
- } else {
- cmd->port = PORT_FIBRE;
- cmd->autoneg = AUTONEG_DISABLE;
+ switch (nic->speed) {
+ case SPEED_1000:
+ cmd->base.port = PORT_MII | PORT_TP;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ supported |= SUPPORTED_MII | SUPPORTED_TP;
+ supported |= SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half;
+ break;
+ case SPEED_10000:
+ if (nic->mac_type == BGX_MODE_RXAUI) {
+ cmd->base.port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ } else {
+ cmd->base.port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ }
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_10000baseT_Full;
+ break;
+ case SPEED_40000:
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_FIBRE;
+ supported |= SUPPORTED_40000baseCR4_Full;
+ break;
}
- cmd->duplex = nic->duplex;
- ethtool_cmd_speed_set(cmd, nic->speed);
+ cmd->base.duplex = nic->duplex;
+ cmd->base.speed = nic->speed;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
@@ -690,8 +726,56 @@ static int nicvf_set_channels(struct net_device *dev,
return err;
}
+static void nicvf_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ union nic_mbx mbx = {};
+
+ /* Supported only for 10G/40G interfaces */
+ if ((nic->mac_type == BGX_MODE_SGMII) ||
+ (nic->mac_type == BGX_MODE_QSGMII) ||
+ (nic->mac_type == BGX_MODE_RGMII))
+ return;
+
+ mbx.pfc.msg = NIC_MBOX_MSG_PFC;
+ mbx.pfc.get = 1;
+ if (!nicvf_send_msg_to_pf(nic, &mbx)) {
+ pause->autoneg = nic->pfc.autoneg;
+ pause->rx_pause = nic->pfc.fc_rx;
+ pause->tx_pause = nic->pfc.fc_tx;
+ }
+}
+
+static int nicvf_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ union nic_mbx mbx = {};
+
+ /* Supported only for 10G/40G interfaces */
+ if ((nic->mac_type == BGX_MODE_SGMII) ||
+ (nic->mac_type == BGX_MODE_QSGMII) ||
+ (nic->mac_type == BGX_MODE_RGMII))
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ mbx.pfc.msg = NIC_MBOX_MSG_PFC;
+ mbx.pfc.get = 0;
+ mbx.pfc.fc_rx = pause->rx_pause;
+ mbx.pfc.fc_tx = pause->tx_pause;
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return -EAGAIN;
+
+ nic->pfc.fc_rx = pause->rx_pause;
+ nic->pfc.fc_tx = pause->tx_pause;
+
+ return 0;
+}
+
static const struct ethtool_ops nicvf_ethtool_ops = {
- .get_settings = nicvf_get_settings,
.get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
.get_msglevel = nicvf_get_msglevel,
@@ -711,7 +795,10 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.set_rxfh = nicvf_set_rxfh,
.get_channels = nicvf_get_channels,
.set_channels = nicvf_set_channels,
+ .get_pauseparam = nicvf_get_pauseparam,
+ .set_pauseparam = nicvf_set_pauseparam,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = nicvf_get_link_ksettings,
};
void nicvf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8a37012c9c89..2006f58b14b1 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -221,6 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->link_up = mbx.link_status.link_up;
nic->duplex = mbx.link_status.duplex;
nic->speed = mbx.link_status.speed;
+ nic->mac_type = mbx.link_status.mac_type;
if (nic->link_up) {
netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
nic->netdev->name, nic->speed,
@@ -255,6 +256,12 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
nic->pf_acked = true;
break;
+ case NIC_MBOX_MSG_PFC:
+ nic->pfc.autoneg = mbx.pfc.autoneg;
+ nic->pfc.fc_rx = mbx.pfc.fc_rx;
+ nic->pfc.fc_tx = mbx.pfc.fc_tx;
+ nic->pf_acked = true;
+ break;
default:
netdev_err(nic->netdev,
"Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
@@ -637,6 +644,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
+ struct snd_queue *sq;
unsigned int tx_pkts = 0, tx_bytes = 0;
spin_lock_bh(&cq->lock);
@@ -702,16 +710,20 @@ loop:
done:
/* Wakeup TXQ if its stopped earlier due to SQ full */
- if (tx_done) {
+ sq = &nic->qs->sq[cq_idx];
+ if (tx_done ||
+ (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
netdev = nic->pnicvf->netdev;
txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx));
if (tx_pkts)
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
- nic = nic->pnicvf;
+ /* To read updated queue and carrier status */
+ smp_mb();
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
- netif_tx_start_queue(txq);
+ netif_tx_wake_queue(txq);
+ nic = nic->pnicvf;
this_cpu_inc(nic->drv_stats->txq_wake);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
@@ -1047,6 +1059,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
struct nicvf *nic = netdev_priv(netdev);
int qid = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+ struct nicvf *snic;
+ struct snd_queue *sq;
+ int tmp;
/* Check for minimum packet length */
if (skb->len <= ETH_HLEN) {
@@ -1054,13 +1069,39 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
+ snic = nic;
+ /* Get secondary Qset's SQ structure */
+ if (qid >= MAX_SND_QUEUES_PER_QS) {
+ tmp = qid / MAX_SND_QUEUES_PER_QS;
+ snic = (struct nicvf *)nic->snicvf[tmp - 1];
+ if (!snic) {
+ netdev_warn(nic->netdev,
+ "Secondary Qset#%d's ptr not initialized\n",
+ tmp - 1);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ qid = qid % MAX_SND_QUEUES_PER_QS;
+ }
+
+ sq = &snic->qs->sq[qid];
+ if (!netif_tx_queue_stopped(txq) &&
+ !nicvf_sq_append_skb(snic, sq, skb, qid)) {
netif_tx_stop_queue(txq);
- this_cpu_inc(nic->drv_stats->txq_stop);
- if (netif_msg_tx_err(nic))
- netdev_warn(netdev,
- "%s: Transmit ring full, stopping SQ%d\n",
- netdev->name, qid);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb();
+
+ /* Check again, incase another cpu freed descriptors */
+ if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
+ netif_tx_wake_queue(txq);
+ } else {
+ this_cpu_inc(nic->drv_stats->txq_stop);
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit ring full, stopping SQ%d\n",
+ netdev->name, qid);
+ }
return NETDEV_TX_BUSY;
}
@@ -1291,20 +1332,17 @@ napi_del:
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
-
- if (new_mtu > NIC_HW_MAX_FRS)
- return -EINVAL;
-
- if (new_mtu < NIC_HW_MIN_FRS)
- return -EINVAL;
+ int orig_mtu = netdev->mtu;
netdev->mtu = new_mtu;
if (!netif_running(netdev))
return 0;
- if (nicvf_update_hw_max_frs(nic, new_mtu))
+ if (nicvf_update_hw_max_frs(nic, new_mtu)) {
+ netdev->mtu = orig_mtu;
return -EINVAL;
+ }
return 0;
}
@@ -1631,6 +1669,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+ /* MTU range: 64 - 9200 */
+ netdev->min_mtu = NIC_HW_MIN_FRS;
+ netdev->max_mtu = NIC_HW_MAX_FRS;
+
INIT_WORK(&nic->reset_task, nicvf_reset_task);
err = register_netdev(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 747ef0882976..d2ac133e36f1 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -544,14 +544,18 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_send_msg_to_pf(nic, &mbx);
mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
- mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+ mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
+ (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
+ (qs->vnic_id << 0);
nicvf_send_msg_to_pf(nic, &mbx);
/* RQ drop config
* Enable CQ drop to reserve sufficient CQEs for all tx packets
*/
mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
- mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+ mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
+ (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
+ (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
nicvf_send_msg_to_pf(nic, &mbx);
if (!nic->sqs_mode && (qidx == 0)) {
@@ -650,6 +654,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
sq_cfg.ldwb = 0;
sq_cfg.qsize = SND_QSIZE;
sq_cfg.tstmp_bgx_intf = 0;
+ sq_cfg.cq_limit = 0;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
/* Set threshold value for interrupt generation */
@@ -1185,30 +1190,12 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
}
/* Append an skb to a SQ for packet transfer. */
-int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
+int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
+ struct sk_buff *skb, u8 sq_num)
{
int i, size;
int subdesc_cnt, tso_sqe = 0;
- int sq_num, qentry;
- struct queue_set *qs;
- struct snd_queue *sq;
-
- sq_num = skb_get_queue_mapping(skb);
- if (sq_num >= MAX_SND_QUEUES_PER_QS) {
- /* Get secondary Qset's SQ structure */
- i = sq_num / MAX_SND_QUEUES_PER_QS;
- if (!nic->snicvf[i - 1]) {
- netdev_warn(nic->netdev,
- "Secondary Qset#%d's ptr not initialized\n",
- i - 1);
- return 1;
- }
- nic = (struct nicvf *)nic->snicvf[i - 1];
- sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
- }
-
- qs = nic->qs;
- sq = &qs->sq[sq_num];
+ int qentry;
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
if (subdesc_cnt > atomic_read(&sq->free_cnt))
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 2e3c940c1093..9e2104675bc9 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -85,12 +85,26 @@
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
-/* Calculate number of CQEs to reserve for all SQEs.
- * Its 1/256th level of CQ size.
- * '+ 1' to account for pipelining
+
+/* RED and Backpressure levels of CQ for pkt reception
+ * For CQ, level is a measure of emptiness i.e 0x0 means full
+ * eg: For CQ of size 4K, and for pass/drop levels of 160/144
+ * HW accepts pkt if unused CQE >= 2560
+ * RED accepts pkt if unused CQE < 2304 & >= 2560
+ * DROPs pkts if unused CQE < 2304
+ */
+#define RQ_PASS_CQ_LVL 160ULL
+#define RQ_DROP_CQ_LVL 144ULL
+
+/* RED and Backpressure levels of RBDR for pkt reception
+ * For RBDR, level is a measure of fullness i.e 0x0 means empty
+ * eg: For RBDR of size 8K, and for pass/drop levels of 4/0
+ * HW accepts pkt if unused RBs >= 256
+ * RED accepts pkt if unused RBs < 256 & >= 0
+ * DROPs pkts if unused RBs < 0
*/
-#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
- (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
+#define RQ_PASS_RBDR_LVL 8ULL
+#define RQ_DROP_RBDR_LVL 0ULL
/* Descriptor size in bytes */
#define SND_QUEUE_DESC_SIZE 16
@@ -292,7 +306,8 @@ void nicvf_sq_disable(struct nicvf *nic, int qidx);
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
void nicvf_sq_free_used_descs(struct net_device *netdev,
struct snd_queue *sq, int qidx);
-int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
+int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
+ struct sk_buff *skb, u8 sq_num);
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
void nicvf_rbdr_task(unsigned long data);
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
index 9e6d9876bfd0..f36347237a54 100644
--- a/drivers/net/ethernet/cavium/thunder/q_struct.h
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -624,7 +624,9 @@ struct cq_cfg {
struct sq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63:44;
+ u64 reserved_32_63:32;
+ u64 cq_limit:8;
+ u64 reserved_20_23:4;
u64 ena:1;
u64 reserved_18_18:1;
u64 reset:1;
@@ -642,7 +644,9 @@ struct sq_cfg {
u64 reset:1;
u64 reserved_18_18:1;
u64 ena:1;
- u64 reserved_20_63:44;
+ u64 reserved_20_23:4;
+ u64 cq_limit:8;
+ u64 reserved_32_63:32;
#endif
};
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 050e21fbb147..9211c750e064 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -161,6 +161,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
return;
lmac = &bgx->lmac[lmacid];
+ link->mac_type = lmac->lmac_type;
link->link_up = lmac->link_up;
link->duplex = lmac->last_duplex;
link->speed = lmac->last_speed;
@@ -211,6 +212,47 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
+{
+ struct pfc *pfc = (struct pfc *)pause;
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!bgx)
+ return;
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->is_sgmii)
+ return;
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
+ pfc->fc_rx = cfg & RX_EN;
+ pfc->fc_tx = cfg & TX_EN;
+ pfc->autoneg = 0;
+}
+EXPORT_SYMBOL(bgx_lmac_get_pfc);
+
+void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
+{
+ struct pfc *pfc = (struct pfc *)pause;
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!bgx)
+ return;
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->is_sgmii)
+ return;
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
+ cfg &= ~(RX_EN | TX_EN);
+ cfg |= (pfc->fc_rx ? RX_EN : 0x00);
+ cfg |= (pfc->fc_tx ? TX_EN : 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
+}
+EXPORT_SYMBOL(bgx_lmac_set_pfc);
+
static void bgx_sgmii_change_link_state(struct lmac *lmac)
{
struct bgx *bgx = lmac->bgx;
@@ -524,6 +566,18 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
cfg |= SMU_TX_CTL_DIC_EN;
bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+ /* Enable receive and transmission of pause frames */
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
+ BCK_EN | DRP_EN | TX_EN | RX_EN));
+ /* Configure pause time and interval */
+ bgx_reg_write(bgx, lmacid,
+ BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFull;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME - 0x1000));
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
+
/* take lmac_count into account */
bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
/* max packet size */
@@ -970,11 +1024,25 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
lmac_set_training(bgx, lmac, lmac->lmacid);
lmac_set_lane2sds(bgx, lmac);
- /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */
olmac = &bgx->lmac[idx + 1];
- olmac->lmac_type = lmac->lmac_type;
+ /* Check if other LMAC on the same DLM is already configured by
+ * firmware, if so use the same config or else set as same, as
+ * that of LMAC 0/2.
+ * This check is needed as on 80xx only one lane of each of the
+ * DLM of BGX0 is used, so have to rely on firmware for
+ * distingushing 80xx from 81xx.
+ */
+ cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
+ lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+ lane_to_sds = (u8)(cmr_cfg & 0xFF);
+ if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
+ olmac->lmac_type = lmac->lmac_type;
+ lmac_set_lane2sds(bgx, olmac);
+ } else {
+ olmac->lmac_type = lmac_type;
+ olmac->lane_to_sds = lane_to_sds;
+ }
lmac_set_training(bgx, olmac, olmac->lmacid);
- lmac_set_lane2sds(bgx, olmac);
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 01cc7c859131..c18ebfeb2039 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -27,6 +27,7 @@
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
+#define DEFAULT_PAUSE_TIME 0xFFFF
#define BGX_ID_MASK 0x3
@@ -126,7 +127,10 @@
#define SMU_RX_CTL_STATUS (3ull << 0)
#define BGX_SMUX_TX_APPEND 0x20100
#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
+#define BGX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define BGX_SMUX_TX_MIN_PKT 0x20118
+#define BGX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define BGX_SMUX_TX_PAUSE_ZERO 0x20138
#define BGX_SMUX_TX_INT 0x20140
#define BGX_SMUX_TX_CTL 0x20178
#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
@@ -136,6 +140,11 @@
#define BGX_SMUX_CTL 0x20200
#define SMU_CTL_RX_IDLE BIT_ULL(0)
#define SMU_CTL_TX_IDLE BIT_ULL(1)
+#define BGX_SMUX_CBFC_CTL 0x20218
+#define RX_EN BIT_ULL(0)
+#define TX_EN BIT_ULL(1)
+#define BCK_EN BIT_ULL(2)
+#define DRP_EN BIT_ULL(3)
#define BGX_GMP_PCS_MRX_CTL 0x30000
#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
@@ -207,6 +216,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
+void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
+
void xcv_init_hw(void);
void xcv_setup_link(bool link_up, int link_speed);
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 53b1f9478383..6916c62f2487 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -85,6 +85,11 @@ struct t1_rx_mode {
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
+/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
+#define PM3393_MAX_FRAME_SIZE 9600
+
+#define VSC7326_MAX_MTU 9600
+
enum {
CHBT_BOARD_N110,
CHBT_BOARD_N210,
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index f5f1b0b51ebd..3a05f9098e75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -568,28 +568,33 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
+ u32 supported, advertising;
- cmd->supported = p->link_config.supported;
- cmd->advertising = p->link_config.advertising;
+ supported = p->link_config.supported;
+ advertising = p->link_config.advertising;
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, p->link_config.speed);
- cmd->duplex = p->link_config.duplex;
+ cmd->base.speed = p->link_config.speed;
+ cmd->base.duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy->mdio.prtad;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->base.phy_address = p->phy->mdio.prtad;
+ cmd->base.autoneg = p->link_config.autoneg;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -628,36 +633,41 @@ static int speed_duplex_to_caps(int speed, int duplex)
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
ADVERTISED_10000baseT_Full)
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
struct link_config *lc = &p->link_config;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(lc->supported & SUPPORTED_Autoneg))
return -EOPNOTSUPP; /* can't change speed/duplex */
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
lc->requested_speed = speed;
- lc->requested_duplex = cmd->duplex;
+ lc->requested_duplex = cmd->base.duplex;
lc->advertising = 0;
} else {
- cmd->advertising &= ADVERTISED_MASK;
- if (cmd->advertising & (cmd->advertising - 1))
- cmd->advertising = lc->supported;
- cmd->advertising &= lc->supported;
- if (!cmd->advertising)
+ advertising &= ADVERTISED_MASK;
+ if (advertising & (advertising - 1))
+ advertising = lc->supported;
+ advertising &= lc->supported;
+ if (!advertising)
return -EINVAL;
lc->requested_speed = SPEED_INVALID;
lc->requested_duplex = DUPLEX_INVALID;
- lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ lc->advertising = advertising | ADVERTISED_Autoneg;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = cmd->base.autoneg;
if (netif_running(dev))
t1_link_start(p->phy, p->mac, lc);
return 0;
@@ -788,8 +798,6 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
}
static const struct ethtool_ops t1_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -807,6 +815,8 @@ static const struct ethtool_ops t1_ethtool_ops = {
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -825,8 +835,6 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
if (!mac->ops->set_mtu)
return -EOPNOTSUPP;
- if (new_mtu < 68)
- return -EINVAL;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
dev->mtu = new_mtu;
@@ -1101,6 +1109,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
netdev->ethtool_ops = &t1_ethtool_ops;
+
+ switch (bi->board) {
+ case CHBT_BOARD_CHT110:
+ case CHBT_BOARD_N110:
+ case CHBT_BOARD_N210:
+ case CHBT_BOARD_CHT210:
+ netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN);
+ break;
+ case CHBT_BOARD_CHN204:
+ netdev->max_mtu = VSC7326_MAX_MTU;
+ break;
+ default:
+ netdev->max_mtu = ETH_DATA_LEN;
+ break;
+ }
}
if (t1_init_sw_modules(adapter, bi) < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index eb462d7db427..c27908e66f5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -47,9 +47,6 @@
#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
-/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
-#define MAX_FRAME_SIZE 9600
-
#define IPG 12
#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
@@ -331,10 +328,7 @@ static int pm3393_set_mtu(struct cmac *cmac, int mtu)
{
int enabled = cmac->instance->enabled;
- /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
- mtu += 14 + 4;
- if (mtu > MAX_FRAME_SIZE)
- return -EINVAL;
+ mtu += ETH_HLEN + ETH_FCS_LEN;
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index 6f30b6f78553..bdc895bd2a46 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -11,8 +11,6 @@
/* 30 minutes for full statistics update */
#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
-#define MAX_MTU 9600
-
/* The egress WM value 0x01a01fff should be used only when the
* interface is down (MAC port disabled). This is a workaround
* for disabling the T2/MAC flow-control. When the interface is
@@ -452,9 +450,6 @@ static int mac_set_mtu(struct cmac *mac, int mtu)
{
int port = mac->instance->index;
- if (mtu > MAX_MTU)
- return -EINVAL;
-
/* max_len includes header and FCS */
vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 43da891fab97..7b2224ae72f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1801,27 +1801,31 @@ static int set_phys_id(struct net_device *dev,
return 0;
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct port_info *p = netdev_priv(dev);
+ u32 supported;
- cmd->supported = p->link_config.supported;
- cmd->advertising = p->link_config.advertising;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ p->link_config.supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ p->link_config.advertising);
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, p->link_config.speed);
- cmd->duplex = p->link_config.duplex;
+ cmd->base.speed = p->link_config.speed;
+ cmd->base.duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy.mdio.prtad;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->base.phy_address = p->phy.mdio.prtad;
+ cmd->base.autoneg = p->link_config.autoneg;
return 0;
}
@@ -1860,44 +1864,49 @@ static int speed_duplex_to_caps(int speed, int duplex)
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
ADVERTISED_10000baseT_Full)
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_config;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(lc->supported & SUPPORTED_Autoneg)) {
/*
* PHY offers a single speed/duplex. See if that's what's
* being requested.
*/
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (lc->supported & cap)
return 0;
}
return -EINVAL;
}
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
lc->requested_speed = speed;
- lc->requested_duplex = cmd->duplex;
+ lc->requested_duplex = cmd->base.duplex;
lc->advertising = 0;
} else {
- cmd->advertising &= ADVERTISED_MASK;
- cmd->advertising &= lc->supported;
- if (!cmd->advertising)
+ advertising &= ADVERTISED_MASK;
+ advertising &= lc->supported;
+ if (!advertising)
return -EINVAL;
lc->requested_speed = SPEED_INVALID;
lc->requested_duplex = DUPLEX_INVALID;
- lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ lc->advertising = advertising | ADVERTISED_Autoneg;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = cmd->base.autoneg;
if (netif_running(dev))
t3_link_start(&p->phy, &p->mac, lc);
return 0;
@@ -2097,8 +2106,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -2120,6 +2127,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_regs_len = get_regs_len,
.get_regs = get_regs,
.get_wol = get_wol,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int in_range(int val, int lo, int hi)
@@ -2531,8 +2540,6 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
struct adapter *adapter = pi->adapter;
int ret;
- if (new_mtu < 81) /* accommodate SACK */
- return -EINVAL;
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
dev->mtu = new_mtu;
@@ -3295,6 +3302,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &cxgb_netdev_ops;
netdev->ethtool_ops = &cxgb_ethtool_ops;
+ netdev->min_mtu = 81;
+ netdev->max_mtu = ETH_MAX_MTU;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2125903043fb..0bce1bf9ca0f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -635,6 +635,7 @@ struct tx_sw_desc;
struct sge_txq {
unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int q_type; /* Q type Eth/Ctrl/Ofld */
unsigned int size; /* # of descriptors */
unsigned int cidx; /* SW consumer index */
unsigned int pidx; /* producer index */
@@ -665,7 +666,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp;
-struct sge_ofld_txq { /* state for an SGE offload Tx queue */
+struct sge_uld_txq { /* state for an SGE offload Tx queue */
struct sge_txq q;
struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */
@@ -693,14 +694,20 @@ struct sge_uld_rxq_info {
u8 uld; /* uld type */
};
+struct sge_uld_txq_info {
+ struct sge_uld_txq *uldtxq; /* Txq's for ULD */
+ atomic_t users; /* num users */
+ u16 ntxq; /* # of egress uld queues */
+};
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
- struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info **uld_rxq_info;
+ struct sge_uld_txq_info **uld_txq_info;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
@@ -1298,8 +1305,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
unsigned int cmplqid);
int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
unsigned int cmplqid);
-int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
- struct net_device *dev, unsigned int iqid);
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
@@ -1661,4 +1669,7 @@ int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
void t4_register_netevent_notifier(void);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
+void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+ unsigned int n, bool unmap);
+void free_txq(struct adapter *adap, struct sge_txq *q);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 20455d082cb8..acc231293e4d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2512,18 +2512,6 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (ofld_idx < ofld_entries) {
- const struct sge_ofld_txq *tx =
- &adap->sge.ofldtxq[ofld_idx * 4];
- int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
-
- S("QType:", "OFLD-Txq");
- T("TxQ ID:", q.cntxt_id);
- T("TxQ size:", q.size);
- T("TxQ inuse:", q.in_use);
- T("TxQ CIDX:", q.cidx);
- T("TxQ PIDX:", q.pidx);
-
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
int n = min(4, adap->params.nports - 4 * ctrl_idx);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 19dc9e25aa72..66c37fac59b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -134,24 +134,6 @@ MODULE_FIRMWARE(FW5_FNAME);
MODULE_FIRMWARE(FW6_FNAME);
/*
- * Normally we're willing to become the firmware's Master PF but will be happy
- * if another PF has already become the Master and initialized the adapter.
- * Setting "force_init" will cause this driver to forcibly establish itself as
- * the Master PF and initialize the adapter.
- */
-static uint force_init;
-
-module_param(force_init, uint, 0644);
-MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
- "deprecated parameter");
-
-static int dflt_msg_enable = DFLT_MSG_ENABLE;
-
-module_param(dflt_msg_enable, int, 0644);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
- "deprecated parameter");
-
-/*
* The driver uses the best interrupt scheme available on a platform in the
* order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
* of these schemes the driver may consider as follows:
@@ -179,16 +161,6 @@ MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
*/
static int rx_dma_offset = 2;
-#ifdef CONFIG_PCI_IOV
-/* Configure the number of PCI-E Virtual Function which are to be instantiated
- * on SR-IOV Capable Physical Functions.
- */
-static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
-
-module_param_array(num_vf, uint, NULL, 0644);
-MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
-#endif
-
/* TX Queue select used to determine what algorithm to use for selecting TX
* queue. Select between the kernel provided function (select_queue=0) or user
* cxgb_select_queue function (select_queue=1)
@@ -530,15 +502,15 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
txq->restarts++;
- if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
+ if (txq->q_type == CXGB4_TXQ_ETH) {
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
netif_tx_wake_queue(eq->txq);
} else {
- struct sge_ofld_txq *oq;
+ struct sge_uld_txq *oq;
- oq = container_of(txq, struct sge_ofld_txq, q);
+ oq = container_of(txq, struct sge_uld_txq, q);
tasklet_schedule(&oq->qresume_tsk);
}
} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
@@ -885,15 +857,6 @@ static int setup_sge_queues(struct adapter *adap)
}
}
- j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */
- for_each_ofldtxq(s, i) {
- err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
- adap->port[i / j],
- s->fw_evtq.cntxt_id);
- if (err)
- goto freeout;
- }
-
for_each_port(adap, i) {
/* Note that cmplqid below is 0 if we don't
* have RDMA queues, and that's the right value.
@@ -1922,8 +1885,18 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_ofldtxq(&adap->sge, i)
- disable_txq_db(&adap->sge.ofldtxq[i].q);
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+
+ if (txq_info) {
+ for_each_ofldtxq(&adap->sge, i) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ disable_txq_db(&txq->q);
+ }
+ }
+ }
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
}
@@ -1934,8 +1907,18 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldtxq(&adap->sge, i)
- enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+
+ if (txq_info) {
+ for_each_ofldtxq(&adap->sge, i) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ enable_txq_db(adap, &txq->q);
+ }
+ }
+ }
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
}
@@ -2006,8 +1989,17 @@ static void recover_all_queues(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldtxq(&adap->sge, i)
- sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+ if (txq_info) {
+ for_each_ofldtxq(&adap->sge, i) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ sync_txq_pidx(adap, &txq->q);
+ }
+ }
+ }
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}
@@ -2502,8 +2494,6 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
int ret;
struct port_info *pi = netdev_priv(dev);
- if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
- return -EINVAL;
ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-1, -1, -1, true);
if (!ret)
@@ -3993,7 +3983,7 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int i, n10g = 0, qidx = 0;
+ int i = 0, n10g = 0, qidx = 0;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
@@ -4008,8 +3998,7 @@ static void cfg_queues(struct adapter *adap)
adap->params.crypto = 0;
}
- for_each_port(adap, i)
- n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -4077,9 +4066,6 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
s->ctrlq[i].q.size = 512;
- for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
- s->ofldtxq[i].q.size = 1024;
-
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
}
@@ -4715,7 +4701,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->name = pci_name(pdev);
adapter->mbox = func;
adapter->pf = func;
- adapter->msg_enable = dflt_msg_enable;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
spin_lock_init(&adapter->stats_lock);
@@ -4803,6 +4789,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 81 - 9600 */
+ netdev->min_mtu = 81;
+ netdev->max_mtu = MAX_MTU;
+
netdev->netdev_ops = &cxgb4_netdev_ops;
#ifdef CONFIG_CHELSIO_T4_DCB
netdev->dcbnl_ops = &cxgb4_dcb_ops;
@@ -4971,17 +4961,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sriov:
#ifdef CONFIG_PCI_IOV
- if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
- dev_warn(&pdev->dev,
- "Enabling SR-IOV VFs using the num_vf module "
- "parameter is deprecated - please use the pci sysfs "
- "interface instead.\n");
- if (pci_enable_sriov(pdev, num_vf[func]) == 0)
- dev_info(&pdev->dev,
- "instantiated %u virtual functions\n",
- num_vf[func]);
- }
-
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 49d2debb334e..52af62e0ecb6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -113,7 +113,7 @@ static int fill_action_fields(struct adapter *adap,
}
/* Re-direct to specified port in hardware. */
- if (is_tcf_mirred_redirect(a)) {
+ if (is_tcf_mirred_egress_redirect(a)) {
struct net_device *n_dev;
unsigned int i, index;
bool found = false;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 2471ff465d5c..8098902c094a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -447,6 +447,106 @@ static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
}
+static void
+free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
+{
+ int nq = txq_info->ntxq;
+ int i;
+
+ for (i = 0; i < nq; i++) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ if (txq && txq->q.desc) {
+ tasklet_kill(&txq->qresume_tsk);
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
+ txq->q.cntxt_id);
+ free_tx_desc(adap, &txq->q, txq->q.in_use, false);
+ kfree(txq->q.sdesc);
+ __skb_queue_purge(&txq->sendq);
+ free_txq(adap, &txq->q);
+ }
+ }
+}
+
+static int
+alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
+ unsigned int uld_type)
+{
+ struct sge *s = &adap->sge;
+ int nq = txq_info->ntxq;
+ int i, j, err;
+
+ j = nq / adap->params.nports;
+ for (i = 0; i < nq; i++) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ txq->q.size = 1024;
+ err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
+ s->fw_evtq.cntxt_id, uld_type);
+ if (err)
+ goto freeout;
+ }
+ return 0;
+freeout:
+ free_sge_txq_uld(adap, txq_info);
+ return err;
+}
+
+static void
+release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_txq_info *txq_info = NULL;
+ int tx_uld_type = TX_ULD(uld_type);
+
+ txq_info = adap->sge.uld_txq_info[tx_uld_type];
+
+ if (txq_info && atomic_dec_and_test(&txq_info->users)) {
+ free_sge_txq_uld(adap, txq_info);
+ kfree(txq_info->uldtxq);
+ kfree(txq_info);
+ adap->sge.uld_txq_info[tx_uld_type] = NULL;
+ }
+}
+
+static int
+setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
+ const struct cxgb4_uld_info *uld_info)
+{
+ struct sge_uld_txq_info *txq_info = NULL;
+ int tx_uld_type, i;
+
+ tx_uld_type = TX_ULD(uld_type);
+ txq_info = adap->sge.uld_txq_info[tx_uld_type];
+
+ if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
+ (atomic_inc_return(&txq_info->users) > 1))
+ return 0;
+
+ txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
+ if (!txq_info)
+ return -ENOMEM;
+
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
+ txq_info->ntxq = roundup(i, adap->params.nports);
+
+ txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
+ GFP_KERNEL);
+ if (!txq_info->uldtxq) {
+ kfree(txq_info);
+ return -ENOMEM;
+ }
+
+ if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
+ kfree(txq_info->uldtxq);
+ kfree(txq_info);
+ return -ENOMEM;
+ }
+
+ atomic_inc(&txq_info->users);
+ adap->sge.uld_txq_info[tx_uld_type] = txq_info;
+ return 0;
+}
+
static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
@@ -472,7 +572,15 @@ int t4_uld_mem_alloc(struct adapter *adap)
if (!s->uld_rxq_info)
goto err_uld;
+ s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
+ sizeof(struct sge_uld_txq_info *),
+ GFP_KERNEL);
+ if (!s->uld_txq_info)
+ goto err_uld_rx;
return 0;
+
+err_uld_rx:
+ kfree(s->uld_rxq_info);
err_uld:
kfree(adap->uld);
return -ENOMEM;
@@ -482,6 +590,7 @@ void t4_uld_mem_free(struct adapter *adap)
{
struct sge *s = &adap->sge;
+ kfree(s->uld_txq_info);
kfree(s->uld_rxq_info);
kfree(adap->uld);
}
@@ -616,6 +725,9 @@ int cxgb4_register_uld(enum cxgb4_uld type,
ret = -EBUSY;
goto free_irq;
}
+ ret = setup_sge_txq_uld(adap, type, p);
+ if (ret)
+ goto free_irq;
adap->uld[type] = *p;
uld_attach(adap, type);
adap_idx++;
@@ -644,6 +756,7 @@ out:
break;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
+ release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
@@ -679,6 +792,7 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
+ release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 2996793b1aaa..4c856605fdfa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -77,6 +77,8 @@ enum {
/* Special asynchronous notification message */
#define CXGB4_MSG_AN ((void *)1)
+#define TX_ULD(uld)(((uld) != CXGB4_ULD_CRYPTO) ? CXGB4_TX_OFLD :\
+ CXGB4_TX_CRYPTO)
struct serv_entry {
void *data;
@@ -223,6 +225,19 @@ enum cxgb4_uld {
CXGB4_ULD_MAX
};
+enum cxgb4_tx_uld {
+ CXGB4_TX_OFLD,
+ CXGB4_TX_CRYPTO,
+ CXGB4_TX_MAX
+};
+
+enum cxgb4_txq_type {
+ CXGB4_TXQ_ETH,
+ CXGB4_TXQ_ULD,
+ CXGB4_TXQ_CTRL,
+ CXGB4_TXQ_MAX
+};
+
enum cxgb4_state {
CXGB4_STATE_UP,
CXGB4_STATE_START_RECOVERY,
@@ -316,6 +331,7 @@ struct cxgb4_uld_info {
void *handle;
unsigned int nrxq;
unsigned int rxq_size;
+ unsigned int ntxq;
bool ciq;
bool lro;
void *(*add)(const struct cxgb4_lld_info *p);
@@ -333,6 +349,7 @@ struct cxgb4_uld_info {
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e19a0ca8e5dd..9f606478c29c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -377,8 +377,8 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
* Reclaims Tx descriptors from an SGE Tx queue and frees the associated
* Tx buffers. Called with the Tx queue lock held.
*/
-static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
- unsigned int n, bool unmap)
+void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+ unsigned int n, bool unmap)
{
struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
@@ -1543,7 +1543,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
* inability to map packets. A periodic timer attempts to restart
* queues so marked.
*/
-static void txq_stop_maperr(struct sge_ofld_txq *q)
+static void txq_stop_maperr(struct sge_uld_txq *q)
{
q->mapping_err++;
q->q.stops++;
@@ -1559,7 +1559,7 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
* Stops an offload Tx queue that has become full and modifies the packet
* being written to request a wakeup.
*/
-static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
+static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb)
{
struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
@@ -1586,7 +1586,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
* boolean "service_ofldq_running" to make sure that only one instance
* is ever running at a time ...
*/
-static void service_ofldq(struct sge_ofld_txq *q)
+static void service_ofldq(struct sge_uld_txq *q)
{
u64 *pos, *before, *end;
int credits;
@@ -1706,7 +1706,7 @@ static void service_ofldq(struct sge_ofld_txq *q)
*
* Send an offload packet through an SGE offload queue.
*/
-static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
+static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
{
skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
spin_lock(&q->sendq.lock);
@@ -1735,7 +1735,7 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
*/
static void restart_ofldq(unsigned long data)
{
- struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
+ struct sge_uld_txq *q = (struct sge_uld_txq *)data;
spin_lock(&q->sendq.lock);
q->full = 0; /* the queue actually is completely empty now */
@@ -1767,17 +1767,23 @@ static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
return skb->queue_mapping & 1;
}
-static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
+static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
+ unsigned int tx_uld_type)
{
+ struct sge_uld_txq_info *txq_info;
+ struct sge_uld_txq *txq;
unsigned int idx = skb_txq(skb);
+ txq_info = adap->sge.uld_txq_info[tx_uld_type];
+ txq = &txq_info->uldtxq[idx];
+
if (unlikely(is_ctrl_pkt(skb))) {
/* Single ctrl queue is a requirement for LE workaround path */
if (adap->tids.nsftids)
idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
}
- return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
+ return ofld_xmit(txq, skb);
}
/**
@@ -1794,7 +1800,7 @@ int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
int ret;
local_bh_disable();
- ret = ofld_send(adap, skb);
+ ret = uld_send(adap, skb, CXGB4_TX_OFLD);
local_bh_enable();
return ret;
}
@@ -1813,6 +1819,39 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL(cxgb4_ofld_send);
+/**
+ * t4_crypto_send - send crypto packet
+ * @adap: the adapter
+ * @skb: the packet
+ *
+ * Sends crypto packet. We use the packet queue_mapping to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-15 select the queue.
+ */
+static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * cxgb4_crypto_send - send crypto packet
+ * @dev: the net device
+ * @skb: the packet
+ *
+ * Sends crypto packet. This is an exported version of @t4_crypto_send,
+ * intended for ULDs.
+ */
+int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
+{
+ return t4_crypto_send(netdev2adap(dev), skb);
+}
+EXPORT_SYMBOL(cxgb4_crypto_send);
+
static inline void copy_frags(struct sk_buff *skb,
const struct pkt_gl *gl, unsigned int offset)
{
@@ -2479,7 +2518,7 @@ static void sge_tx_timer_cb(unsigned long data)
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
- struct sge_ofld_txq *txq = s->egr_map[id];
+ struct sge_uld_txq *txq = s->egr_map[id];
clear_bit(id, s->txq_maperr);
tasklet_schedule(&txq->qresume_tsk);
@@ -2799,6 +2838,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return ret;
}
+ txq->q.q_type = CXGB4_TXQ_ETH;
init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
txq->tso = txq->tx_cso = txq->vlan_ins = 0;
@@ -2852,6 +2892,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return ret;
}
+ txq->q.q_type = CXGB4_TXQ_CTRL;
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
@@ -2872,13 +2913,15 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
-int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
- struct net_device *dev, unsigned int iqid)
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type)
{
int ret, nentries;
struct fw_eq_ofld_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
+ int cmd = FW_EQ_OFLD_CMD;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -2891,7 +2934,9 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
+ if (unlikely(uld_type == CXGB4_TX_CRYPTO))
+ cmd = FW_EQ_CTRL_CMD;
+ c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
FW_EQ_OFLD_CMD_VFN_V(0));
@@ -2919,6 +2964,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return ret;
}
+ txq->q.q_type = CXGB4_TXQ_ULD;
init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
@@ -2928,7 +2974,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return 0;
}
-static void free_txq(struct adapter *adap, struct sge_txq *q)
+void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
@@ -3025,21 +3071,6 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- /* clean up offload Tx queues */
- for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
- struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
-
- if (q->q.desc) {
- tasklet_kill(&q->qresume_tsk);
- t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
- q->q.cntxt_id);
- free_tx_desc(adap, &q->q, q->q.in_use, false);
- kfree(q->q.sdesc);
- __skb_queue_purge(&q->sendq);
- free_txq(adap, &q->q);
- }
- }
-
/* clean up control Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
@@ -3092,12 +3123,34 @@ void t4_sge_stop(struct adapter *adap)
if (s->tx_timer.function)
del_timer_sync(&s->tx_timer);
- for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
- struct sge_ofld_txq *q = &s->ofldtxq[i];
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info;
+
+ txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+ if (txq_info) {
+ struct sge_uld_txq *txq = txq_info->uldtxq;
- if (q->q.desc)
- tasklet_kill(&q->qresume_tsk);
+ for_each_ofldtxq(&adap->sge, i) {
+ if (txq->q.desc)
+ tasklet_kill(&txq->qresume_tsk);
+ }
+ }
}
+
+ if (is_pci_uld(adap)) {
+ struct sge_uld_txq_info *txq_info;
+
+ txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+ if (txq_info) {
+ struct sge_uld_txq *txq = txq_info->uldtxq;
+
+ for_each_ofldtxq(&adap->sge, i) {
+ if (txq->q.desc)
+ tasklet_kill(&txq->qresume_tsk);
+ }
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
struct sge_ctrl_txq *cq = &s->ctrlq[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fba3b2ad382d..a267173f5997 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -76,6 +76,7 @@ enum {
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_RX_ISCSI_CMP = 0x45,
CPL_TRACE_PKT_T5 = 0x48,
CPL_RX_ISCSI_DDP = 0x49,
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
__u8 status;
};
+struct cpl_rx_iscsi_cmp {
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
struct cpl_tx_data_iso {
__be32 op_to_scsi;
__u8 reserved1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index a37481c04a87..0d1a134c8174 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -70,13 +70,6 @@
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
-static int dflt_msg_enable = DFLT_MSG_ENABLE;
-
-module_param(dflt_msg_enable, int, 0644);
-MODULE_PARM_DESC(dflt_msg_enable,
- "default adapter ethtool message level bitmap, "
- "deprecated parameter");
-
/*
* The driver uses the best interrupt scheme available on a platform in the
* order MSI-X then MSI. This parameter determines which of these schemes the
@@ -1108,10 +1101,6 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
int ret;
struct port_info *pi = netdev_priv(dev);
- /* accommodate SACK */
- if (new_mtu < 81)
- return -EINVAL;
-
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true);
if (!ret)
@@ -2895,7 +2884,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Initialize adapter level features.
*/
adapter->name = pci_name(pdev);
- adapter->msg_enable = dflt_msg_enable;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
err = adap_init0(adapter);
if (err)
goto err_unmap_bar;
@@ -2966,6 +2955,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_HIGHDMA;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->min_mtu = 81;
+ netdev->max_mtu = ETH_MAX_MTU;
netdev->netdev_ops = &cxgb4vf_netdev_ops;
netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index c363b58552e9..3647b28e8de0 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1266,7 +1266,6 @@ static const struct net_device_ops net_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = net_poll_controller,
#endif
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 9a161e981529..396c88678eab 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -715,16 +715,18 @@ static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ep93xx_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_gset(&ep->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&ep->mii, cmd);
}
-static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ep93xx_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_sset(&ep->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&ep->mii, cmd);
}
static int ep93xx_nway_reset(struct net_device *dev)
@@ -741,10 +743,10 @@ static u32 ep93xx_get_link(struct net_device *dev)
static const struct ethtool_ops ep93xx_ethtool_ops = {
.get_drvinfo = ep93xx_get_drvinfo,
- .get_settings = ep93xx_get_settings,
- .set_settings = ep93xx_set_settings,
.nway_reset = ep93xx_nway_reset,
.get_link = ep93xx_get_link,
+ .get_link_ksettings = ep93xx_get_link_ksettings,
+ .set_link_ksettings = ep93xx_set_link_ksettings,
};
static const struct net_device_ops ep93xx_netdev_ops = {
@@ -753,7 +755,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_start_xmit = ep93xx_xmit,
.ndo_do_ioctl = ep93xx_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 07719676c305..b600fbbbf679 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -172,7 +172,6 @@ static const struct net_device_ops mac89x0_netdev_ops = {
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
/* Probe for the CS8900 card in slot E. We won't bother looking
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 130f910e4785..9023c858715d 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.20"
+#define DRV_VERSION "2.3.0.31"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 48f82ab6c25b..cdd7a1a59aa7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1166,12 +1166,18 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
if (netdev->features & NETIF_F_RXHASH) {
- skb_set_hash(skb, rss_hash,
- (rss_type &
- (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
- PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ break;
+ }
}
/* Hardware does not provide whole packet checksum. It only
@@ -1843,9 +1849,6 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
struct enic *enic = netdev_priv(netdev);
int running = netif_running(netdev);
- if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
- return -EINVAL;
-
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
@@ -2751,6 +2754,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 68 - 9000 */
+ netdev->min_mtu = ENIC_MIN_MTU;
+ netdev->max_mtu = ENIC_MAX_MTU;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Cannot register net device, aborting\n");
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 69f60afd6577..81f98a8b60e9 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -30,7 +30,7 @@
#define ENIC_MIN_RQ_DESCS 64
#define ENIC_MAX_RQ_DESCS 4096
-#define ENIC_MIN_MTU 68
+#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
#define ENIC_MULTICAST_PERFECT_FILTERS 32
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index f45385f5c6e5..008dc8161775 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -570,19 +570,21 @@ static void dm9000_set_msglevel(struct net_device *dev, u32 value)
dm->msg_enable = value;
}
-static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int dm9000_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct board_info *dm = to_dm9000_board(dev);
- mii_ethtool_gset(&dm->mii, cmd);
+ mii_ethtool_get_link_ksettings(&dm->mii, cmd);
return 0;
}
-static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int dm9000_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct board_info *dm = to_dm9000_board(dev);
- return mii_ethtool_sset(&dm->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
}
static int dm9000_nway_reset(struct net_device *dev)
@@ -741,8 +743,6 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
static const struct ethtool_ops dm9000_ethtool_ops = {
.get_drvinfo = dm9000_get_drvinfo,
- .get_settings = dm9000_get_settings,
- .set_settings = dm9000_set_settings,
.get_msglevel = dm9000_get_msglevel,
.set_msglevel = dm9000_set_msglevel,
.nway_reset = dm9000_nway_reset,
@@ -752,6 +752,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.get_eeprom_len = dm9000_get_eeprom_len,
.get_eeprom = dm9000_get_eeprom,
.set_eeprom = dm9000_set_eeprom,
+ .get_link_ksettings = dm9000_get_link_ksettings,
+ .set_link_ksettings = dm9000_set_link_ksettings,
};
static void dm9000_show_carrier(struct board_info *db,
@@ -1382,7 +1384,6 @@ static const struct net_device_ops dm9000_netdev_ops = {
.ndo_tx_timeout = dm9000_timeout,
.ndo_set_rx_mode = dm9000_hash_table,
.ndo_do_ioctl = dm9000_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_features = dm9000_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index cadcee645f74..90c573b8ccaf 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1956,7 +1956,6 @@ static const struct net_device_ops de_netdev_ops = {
.ndo_start_xmit = de_start_xmit,
.ndo_get_stats = de_get_stats,
.ndo_tx_timeout = de_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 6620fc861c47..51fda3a6b13f 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1085,7 +1085,6 @@ static const struct net_device_ops de4x5_netdev_ops = {
.ndo_get_stats = de4x5_get_stats,
.ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = de4x5_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address= eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 8ed0fd8b1dda..df4994919456 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -352,7 +352,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = dmfe_stop,
.ndo_start_xmit = dmfe_start_xmit,
.ndo_set_rx_mode = dmfe_set_filter_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index bbde90bc74fe..5f1377449b8f 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1282,7 +1282,6 @@ static const struct net_device_ops tulip_netdev_ops = {
.ndo_get_stats = tulip_get_stats,
.ndo_do_ioctl = private_ioctl,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index e750b5ddc0fb..e1c4133b8787 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -269,7 +269,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = uli526x_stop,
.ndo_start_xmit = uli526x_start_xmit,
.ndo_set_rx_mode = uli526x_set_filter_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 1f62b9423851..feda96d585e7 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -353,7 +353,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 0e721cedfa67..19e4ea15b504 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -174,7 +174,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = xircom_open,
.ndo_stop = xircom_close,
.ndo_start_xmit = xircom_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 78f144696d6b..8c95a8a81e3c 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -76,7 +76,6 @@ static void rio_free_tx (struct net_device *dev, int irq);
static void tx_error (struct net_device *dev, int tx_status);
static int receive_packet (struct net_device *dev);
static void rio_error (struct net_device *dev, int int_status);
-static int change_mtu (struct net_device *dev, int new_mtu);
static void set_multicast (struct net_device *dev);
static struct net_device_stats *get_stats (struct net_device *dev);
static int clear_stats (struct net_device *dev);
@@ -106,7 +105,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_rx_mode = set_multicast,
.ndo_do_ioctl = rio_ioctl,
.ndo_tx_timeout = rio_tx_timeout,
- .ndo_change_mtu = change_mtu,
};
static int
@@ -230,6 +228,10 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
#if 0
dev->features = NETIF_F_IP_CSUM;
#endif
+ /* MTU range: 68 - 1536 or 8000 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE;
+
pci_set_drvdata (pdev, dev);
ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
@@ -1198,22 +1200,6 @@ clear_stats (struct net_device *dev)
return 0;
}
-
-static int
-change_mtu (struct net_device *dev, int new_mtu)
-{
- struct netdev_private *np = netdev_priv(dev);
- int max = (np->jumbo) ? MAX_JUMBO : 1536;
-
- if ((new_mtu < 68) || (new_mtu > max)) {
- return -EINVAL;
- }
-
- dev->mtu = new_mtu;
-
- return 0;
-}
-
static void
set_multicast (struct net_device *dev)
{
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 79d80090eac8..eab36acfc0d1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -580,6 +580,10 @@ static int sundance_probe1(struct pci_dev *pdev,
dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
+ /* MTU range: 68 - 8191 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = 8191;
+
pci_set_drvdata(pdev, dev);
i = register_netdev(dev);
@@ -713,8 +717,6 @@ err_out_netdev:
static int change_mtu(struct net_device *dev, int new_mtu)
{
- if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
- return -EINVAL;
if (netif_running(dev))
return -EBUSY;
dev->mtu = new_mtu;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index c3b64cdd0dec..2a17c59f69f9 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -767,7 +767,6 @@ static const struct net_device_ops dnet_netdev_ops = {
.ndo_do_ioctl = dnet_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int dnet_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index f7b42483921c..57650953ff83 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -482,7 +482,6 @@ static const struct net_device_ops ec_bhf_netdev_ops = {
.ndo_open = ec_bhf_open,
.ndo_stop = ec_bhf_stop,
.ndo_get_stats64 = ec_bhf_get_stats,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr
};
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 6cfa63a5e9b4..4c30c44b242e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -65,7 +65,7 @@
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN ((u16) 64)
/* allocate extra space to allow tunneling decapsulation without head reallocation */
-#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
+#define BE_RX_SKB_ALLOC_SIZE 256
#define BE_MAX_JUMBO_FRAME_SIZE 9018
#define BE_MIN_MTU 256
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 93aa2939142a..7e1633bf5a22 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1406,23 +1406,6 @@ drop:
return NETDEV_TX_OK;
}
-static int be_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct device *dev = &adapter->pdev->dev;
-
- if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
- dev_info(dev, "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU, BE_MAX_MTU);
- return -EINVAL;
- }
-
- dev_info(dev, "MTU changed from %d to %d bytes\n",
- netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
- return 0;
-}
-
static inline bool be_in_all_promisc(struct be_adapter *adapter)
{
return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
@@ -5215,7 +5198,6 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_start_xmit = be_xmit,
.ndo_set_rx_mode = be_set_rx_mode,
.ndo_set_mac_address = be_mac_addr_set,
- .ndo_change_mtu = be_change_mtu,
.ndo_get_stats64 = be_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = be_vlan_add_vid,
@@ -5265,6 +5247,10 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
netdev->ethtool_ops = &be_ethtool_ops;
+
+ /* MTU range: 256 - 9000 */
+ netdev->min_mtu = BE_MIN_MTU;
+ netdev->max_mtu = BE_MAX_MTU;
}
static void be_cleanup(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index c044667a0a25..45abc81f6f55 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include <linux/module.h>
#include <net/ethoc.h>
@@ -221,6 +222,9 @@ struct ethoc {
struct mii_bus *mdio;
struct clk *clk;
s8 phy_id;
+
+ int old_link;
+ int old_duplex;
};
/**
@@ -572,7 +576,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
/* We always handle the dropped packet interrupt */
if (pending & INT_MASK_BUSY) {
- dev_err(&dev->dev, "packet dropped\n");
+ dev_dbg(&dev->dev, "packet dropped\n");
dev->stats.rx_dropped++;
}
@@ -667,6 +671,32 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
static void ethoc_mdio_poll(struct net_device *dev)
{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ bool changed = false;
+ u32 mode;
+
+ if (priv->old_link != phydev->link) {
+ changed = true;
+ priv->old_link = phydev->link;
+ }
+
+ if (priv->old_duplex != phydev->duplex) {
+ changed = true;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ if (!changed)
+ return;
+
+ mode = ethoc_read(priv, MODER);
+ if (phydev->duplex == DUPLEX_FULL)
+ mode |= MODER_FULLD;
+ else
+ mode &= ~MODER_FULLD;
+ ethoc_write(priv, MODER, mode);
+
+ phy_print_status(phydev);
}
static int ethoc_mdio_probe(struct net_device *dev)
@@ -685,6 +715,9 @@ static int ethoc_mdio_probe(struct net_device *dev)
return -ENXIO;
}
+ priv->old_duplex = -1;
+ priv->old_link = -1;
+
err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
PHY_INTERFACE_MODE_GMII);
if (err) {
@@ -721,6 +754,9 @@ static int ethoc_open(struct net_device *dev)
netif_start_queue(dev);
}
+ priv->old_link = -1;
+ priv->old_duplex = -1;
+
phy_start(dev->phydev);
napi_enable(&priv->napi);
@@ -966,6 +1002,7 @@ static int ethoc_set_ringparam(struct net_device *dev,
const struct ethtool_ops ethoc_ethtool_ops = {
.get_regs_len = ethoc_get_regs_len,
.get_regs = ethoc_get_regs,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = ethoc_get_ringparam,
.set_ringparam = ethoc_set_ringparam,
@@ -1122,11 +1159,9 @@ static int ethoc_probe(struct platform_device *pdev)
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
- const uint8_t *mac;
+ const void *mac;
- mac = of_get_property(pdev->dev.of_node,
- "local-mac-address",
- NULL);
+ mac = of_get_mac_address(pdev->dev.of_node);
if (mac)
memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
priv->phy_id = -1;
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c08bd763172a..6967b287b6e7 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -472,7 +472,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = mii_ioctl,
.ndo_tx_timeout = fealnx_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index d1ca45fbb164..6e490fd2345d 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -8,7 +8,7 @@ config NET_VENDOR_FREESCALE
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
- ARCH_LAYERSCAPE
+ ARCH_LAYERSCAPE || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -25,7 +25,7 @@ config FEC
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -65,6 +65,7 @@ config FSL_PQ_MDIO
config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
+ depends on OF
select OF_MDIO
---help---
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
@@ -85,6 +86,7 @@ config UGETH_TX_ON_DEMAND
config GIANFAR
tristate "Gianfar Ethernet"
+ depends on HAS_DMA
select FSL_PQ_MDIO
select PHYLIB
select CRC32
@@ -93,4 +95,6 @@ config GIANFAR
and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
on the 8540.
+source "drivers/net/ethernet/freescale/dpaa/Kconfig"
+
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index cbe21dc7e37e..c46df5c82af5 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -4,8 +4,6 @@
obj-$(CONFIG_FEC) += fec.o
fec-objs :=fec_main.o fec_ptp.o
-CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
-CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
@@ -22,3 +20,4 @@ obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
obj-$(CONFIG_FSL_FMAN) += fman/
+obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
new file mode 100644
index 000000000000..a654736237a9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -0,0 +1,10 @@
+menuconfig FSL_DPAA_ETH
+ tristate "DPAA Ethernet"
+ depends on FSL_DPAA && FSL_FMAN
+ select PHYLIB
+ select FSL_FMAN_MAC
+ ---help---
+ Data Path Acceleration Architecture Ethernet driver,
+ supporting the Freescale QorIQ chips.
+ Depends on Freescale Buffer Manager and Queue Manager
+ driver and Frame Manager Driver.
diff --git a/drivers/net/ethernet/freescale/dpaa/Makefile b/drivers/net/ethernet/freescale/dpaa/Makefile
new file mode 100644
index 000000000000..7db50bccb137
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Freescale DPAA Ethernet controllers
+#
+
+# Include FMan headers
+FMAN = $(srctree)/drivers/net/ethernet/freescale/fman
+ccflags-y += -I$(FMAN)
+
+obj-$(CONFIG_FSL_DPAA_ETH) += fsl_dpa.o
+
+fsl_dpa-objs += dpaa_eth.o dpaa_ethtool.o dpaa_eth_sysfs.o
+CFLAGS_dpaa_eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
new file mode 100644
index 000000000000..624ba9058dc4
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -0,0 +1,2756 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/io.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/icmp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/percpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/sort.h>
+#include <soc/fsl/bman.h>
+#include <soc/fsl/qman.h>
+
+#include "fman.h"
+#include "fman_port.h"
+#include "mac.h"
+#include "dpaa_eth.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa_eth_trace.h"
+
+static int debug = -1;
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
+
+static u16 tx_timeout = 1000;
+module_param(tx_timeout, ushort, 0444);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+#define FM_FD_STAT_RX_ERRORS \
+ (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
+ FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
+ FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
+ FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
+ FM_FD_ERR_PRS_HDR_ERR)
+
+#define FM_FD_STAT_TX_ERRORS \
+ (FM_FD_ERR_UNSUPPORTED_FORMAT | \
+ FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
+
+#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+ NETIF_MSG_IFDOWN)
+
+#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
+/* Ingress congestion threshold on FMan ports
+ * The size in bytes of the ingress tail-drop threshold on FMan ports.
+ * Traffic piling up above this value will be rejected by QMan and discarded
+ * by FMan.
+ */
+
+/* Size in bytes of the FQ taildrop threshold */
+#define DPAA_FQ_TD 0x200000
+
+#define DPAA_CS_THRESHOLD_1G 0x06000000
+/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
+ * The size in bytes of the egress Congestion State notification threshold on
+ * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
+ * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
+ * and the larger the frame size, the more acute the problem.
+ * So we have to find a balance between these factors:
+ * - avoiding the device staying congested for a prolonged time (risking
+ * the netdev watchdog to fire - see also the tx_timeout module param);
+ * - affecting performance of protocols such as TCP, which otherwise
+ * behave well under the congestion notification mechanism;
+ * - preventing the Tx cores from tightly-looping (as if the congestion
+ * threshold was too low to be effective);
+ * - running out of memory if the CS threshold is set too high.
+ */
+
+#define DPAA_CS_THRESHOLD_10G 0x10000000
+/* The size in bytes of the egress Congestion State notification threshold on
+ * 10G ports, range 0x1000 .. 0x10000000
+ */
+
+/* Largest value that the FQD's OAL field can hold */
+#define FSL_QMAN_MAX_OAL 127
+
+/* Default alignment for start of data in an Rx FD */
+#define DPAA_FD_DATA_ALIGNMENT 16
+
+/* Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+/* Values for the L4R field of the FM Parse Results */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP 0x20
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
+
+#define FSL_DPAA_BPID_INV 0xff
+#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
+#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
+
+#define DPAA_TX_PRIV_DATA_SIZE 16
+#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
+#define DPAA_TIME_STAMP_SIZE 8
+#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
+ dpaa_rx_extra_headroom)
+
+#define DPAA_ETH_RX_QUEUES 128
+
+#define DPAA_ENQUEUE_RETRIES 100000
+
+enum port_type {RX, TX};
+
+struct fm_port_fqs {
+ struct dpaa_fq *tx_defq;
+ struct dpaa_fq *tx_errq;
+ struct dpaa_fq *rx_defq;
+ struct dpaa_fq *rx_errq;
+};
+
+/* All the dpa bps in use at any moment */
+static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
+
+/* The raw buffer size must be cacheline aligned */
+#define DPAA_BP_RAW_SIZE 4096
+/* When using more than one buffer pool, the raw sizes are as follows:
+ * 1 bp: 4KB
+ * 2 bp: 2KB, 4KB
+ * 3 bp: 1KB, 2KB, 4KB
+ * 4 bp: 1KB, 2KB, 4KB, 8KB
+ */
+static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
+{
+ size_t res = DPAA_BP_RAW_SIZE / 4;
+ u8 i;
+
+ for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
+ res *= 2;
+ return res;
+}
+
+/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
+ * half-page-aligned buffers, so we reserve some more space for start-of-buffer
+ * alignment.
+ */
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+
+static int dpaa_max_frm;
+
+static int dpaa_rx_extra_headroom;
+
+#define dpaa_get_max_mtu() \
+ (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+
+static int dpaa_netdev_init(struct net_device *net_dev,
+ const struct net_device_ops *dpaa_ops,
+ u16 tx_timeout)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa_percpu_priv *percpu_priv;
+ const u8 *mac_addr;
+ int i, err;
+
+ /* Although we access another CPU's private data here
+ * we do it at initialization so it is safe
+ */
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv->net_dev = net_dev;
+ }
+
+ net_dev->netdev_ops = dpaa_ops;
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ net_dev->min_mtu = ETH_MIN_MTU;
+ net_dev->max_mtu = dpaa_get_max_mtu();
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+ /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
+ * For conformity, we'll still declare GSO explicitly.
+ */
+ net_dev->features |= NETIF_F_GSO;
+
+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ /* we do not want shared skbs on TX */
+ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
+ net_dev->features |= net_dev->hw_features;
+ net_dev->vlan_features = net_dev->features;
+
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+ net_dev->ethtool_ops = &dpaa_ethtool_ops;
+
+ net_dev->needed_headroom = priv->tx_headroom;
+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ /* start without the RUNNING flag, phylib controls it later */
+ netif_carrier_off(net_dev);
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa_stop(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int i, err, error;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ netif_tx_stop_all_queues(net_dev);
+ /* Allow the Fman (Tx) port to process in-flight frames before we
+ * try switching it off.
+ */
+ usleep_range(5000, 10000);
+
+ err = mac_dev->stop(mac_dev);
+ if (err < 0)
+ netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
+ err);
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ error = fman_port_disable(mac_dev->port[i]);
+ if (error)
+ err = error;
+ }
+
+ if (net_dev->phydev)
+ phy_disconnect(net_dev->phydev);
+ net_dev->phydev = NULL;
+
+ return err;
+}
+
+static void dpaa_tx_timeout(struct net_device *net_dev)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
+ jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
+
+ percpu_priv->stats.tx_errors++;
+}
+
+/* Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *s)
+{
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ u64 *netstats = (u64 *)s;
+ u64 *cpustats;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ /* add stats from all CPUs */
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+
+ return s;
+}
+
+static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
+{
+ struct platform_device *of_dev;
+ struct dpaa_eth_data *eth_data;
+ struct device *dpaa_dev, *dev;
+ struct device_node *mac_node;
+ struct mac_device *mac_dev;
+
+ dpaa_dev = &pdev->dev;
+ eth_data = dpaa_dev->platform_data;
+ if (!eth_data)
+ return ERR_PTR(-ENODEV);
+
+ mac_node = eth_data->mac_node;
+
+ of_dev = of_find_device_by_node(mac_node);
+ if (!of_dev) {
+ dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n",
+ mac_node->full_name);
+ of_node_put(mac_node);
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(mac_node);
+
+ dev = &of_dev->dev;
+
+ mac_dev = dev_get_drvdata(dev);
+ if (!mac_dev) {
+ dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
+ dev_name(dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ return mac_dev;
+}
+
+static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpaa_priv *priv;
+ struct mac_device *mac_dev;
+ struct sockaddr old_addr;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
+ return err;
+ }
+
+ mac_dev = priv->mac_dev;
+
+ err = mac_dev->change_addr(mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
+ err);
+ /* reverting to previous address */
+ eth_mac_addr(net_dev, &old_addr);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void dpaa_set_rx_mode(struct net_device *net_dev)
+{
+ const struct dpaa_priv *priv;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
+ err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
+ priv->mac_dev->promisc);
+ if (err < 0)
+ netif_err(priv, drv, net_dev,
+ "mac_dev->set_promisc() = %d\n",
+ err);
+ }
+
+ err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ if (err < 0)
+ netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ err);
+}
+
+static struct dpaa_bp *dpaa_bpid2pool(int bpid)
+{
+ if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
+ return NULL;
+
+ return dpaa_bp_array[bpid];
+}
+
+/* checks if this bpool is already allocated */
+static bool dpaa_bpid2pool_use(int bpid)
+{
+ if (dpaa_bpid2pool(bpid)) {
+ atomic_inc(&dpaa_bp_array[bpid]->refs);
+ return true;
+ }
+
+ return false;
+}
+
+/* called only once per bpid by dpaa_bp_alloc_pool() */
+static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
+{
+ dpaa_bp_array[bpid] = dpaa_bp;
+ atomic_set(&dpaa_bp->refs, 1);
+}
+
+static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
+{
+ int err;
+
+ if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
+ pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* If the pool is already specified, we only create one per bpid */
+ if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
+ dpaa_bpid2pool_use(dpaa_bp->bpid))
+ return 0;
+
+ if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
+ dpaa_bp->pool = bman_new_pool();
+ if (!dpaa_bp->pool) {
+ pr_err("%s: bman_new_pool() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
+ }
+
+ if (dpaa_bp->seed_cb) {
+ err = dpaa_bp->seed_cb(dpaa_bp);
+ if (err)
+ goto pool_seed_failed;
+ }
+
+ dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
+
+ return 0;
+
+pool_seed_failed:
+ pr_err("%s: pool seeding failed\n", __func__);
+ bman_free_pool(dpaa_bp->pool);
+
+ return err;
+}
+
+/* remove and free all the buffers from the given buffer pool */
+static void dpaa_bp_drain(struct dpaa_bp *bp)
+{
+ u8 num = 8;
+ int ret;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ ret = bman_acquire(bp->pool, bmb, num);
+ if (ret < 0) {
+ if (num == 8) {
+ /* we have less than 8 buffers left;
+ * drain them one by one
+ */
+ num = 1;
+ ret = 1;
+ continue;
+ } else {
+ /* Pool is fully drained */
+ break;
+ }
+ }
+
+ if (bp->free_buf_cb)
+ for (i = 0; i < num; i++)
+ bp->free_buf_cb(bp, &bmb[i]);
+ } while (ret > 0);
+}
+
+static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
+{
+ struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
+
+ /* the mapping between bpid and dpaa_bp is done very late in the
+ * allocation procedure; if something failed before the mapping, the bp
+ * was not configured, therefore we don't need the below instructions
+ */
+ if (!bp)
+ return;
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->free_buf_cb)
+ dpaa_bp_drain(bp);
+
+ dpaa_bp_array[bp->bpid] = NULL;
+ bman_free_pool(bp->pool);
+}
+
+static void dpaa_bps_free(struct dpaa_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < DPAA_BPS_NUM; i++)
+ dpaa_bp_free(priv->dpaa_bps[i]);
+}
+
+/* Use multiple WQs for FQ assignment:
+ * - Tx Confirmation queues go to WQ1.
+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ3).
+ * - Rx Default and Tx queues go to WQ3 (no differentiation between
+ * Rx and Tx traffic).
+ * This ensures that Tx-confirmed buffers are timely released. In particular,
+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
+ * are greatly outnumbered by other FQs in the system, while
+ * dequeue scheduling is round-robin.
+ */
+static inline void dpaa_assign_wq(struct dpaa_fq *fq)
+{
+ switch (fq->fq_type) {
+ case FQ_TYPE_TX_CONFIRM:
+ case FQ_TYPE_TX_CONF_MQ:
+ fq->wq = 1;
+ break;
+ case FQ_TYPE_RX_ERROR:
+ case FQ_TYPE_TX_ERROR:
+ fq->wq = 2;
+ break;
+ case FQ_TYPE_RX_DEFAULT:
+ case FQ_TYPE_TX:
+ fq->wq = 3;
+ break;
+ default:
+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
+ fq->fq_type, fq->fqid);
+ }
+}
+
+static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
+ u32 start, u32 count,
+ struct list_head *list,
+ enum dpaa_fq_type fq_type)
+{
+ struct dpaa_fq *dpaa_fq;
+ int i;
+
+ dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count,
+ GFP_KERNEL);
+ if (!dpaa_fq)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ dpaa_fq[i].fq_type = fq_type;
+ dpaa_fq[i].fqid = start ? start + i : 0;
+ list_add_tail(&dpaa_fq[i].list, list);
+ }
+
+ for (i = 0; i < count; i++)
+ dpaa_assign_wq(dpaa_fq + i);
+
+ return dpaa_fq;
+}
+
+static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs)
+{
+ struct dpaa_fq *dpaa_fq;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_defq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ goto fq_alloc_failed;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_defq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ goto fq_alloc_failed;
+
+ return 0;
+
+fq_alloc_failed:
+ dev_err(dev, "dpaa_fq_alloc() failed\n");
+ return -ENOMEM;
+}
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+static int dpaa_get_channel(void)
+{
+ spin_lock(&rx_pool_channel_init);
+ if (!rx_pool_channel) {
+ u32 pool;
+ int ret;
+
+ ret = qman_alloc_pool(&pool);
+
+ if (!ret)
+ rx_pool_channel = pool;
+ }
+ spin_unlock(&rx_pool_channel_init);
+ if (!rx_pool_channel)
+ return -ENOMEM;
+ return rx_pool_channel;
+}
+
+static void dpaa_release_channel(void)
+{
+ qman_release_pool(rx_pool_channel);
+}
+
+static void dpaa_eth_add_channel(u16 channel)
+{
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct qman_portal *portal;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ portal = qman_get_affine_portal(cpu);
+ qman_p_static_dequeue_add(portal, pool);
+ }
+}
+
+/* Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+ int congested)
+{
+ struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
+ struct dpaa_priv, cgr_data.cgr);
+
+ if (congested) {
+ priv->cgr_data.congestion_start_jiffies = jiffies;
+ netif_tx_stop_all_queues(priv->net_dev);
+ priv->cgr_data.cgr_congested_count++;
+ } else {
+ priv->cgr_data.congested_jiffies +=
+ (jiffies - priv->cgr_data.congestion_start_jiffies);
+ netif_tx_wake_all_queues(priv->net_dev);
+ }
+}
+
+static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d allocating CGR ID\n",
+ __func__, err);
+ goto out_error;
+ }
+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+ /* Enable Congestion State Change Notifications and CS taildrop */
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+
+ /* Set different thresholds based on the MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at a speed
+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
+ * In such cases, we ought to reconfigure the threshold, too.
+ */
+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ else
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d creating CGR with ID %d\n",
+ __func__, err, priv->cgr_data.cgr.cgrid);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ goto out_error;
+ }
+ if (netif_msg_drv(priv))
+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+ priv->cgr_data.cgr.chan);
+
+out_error:
+ return err;
+}
+
+static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ struct fman_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = (u16)fman_port_get_qman_channel_id(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+static void dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
+{
+ int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+ u16 portals[NR_CPUS];
+ struct dpaa_fq *fq;
+
+ for_each_cpu(cpu, affine_cpus)
+ portals[num_portals++] = qman_affine_channel(cpu);
+ if (num_portals == 0)
+ dev_err(priv->net_dev->dev.parent,
+ "No Qman software (affine) channels found");
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_TX:
+ dpaa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TXQ_NUM)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ /* fall through */
+ case FQ_TYPE_TX_CONFIRM:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ break;
+ case FQ_TYPE_TX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+ break;
+ default:
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+ break;
+ }
+ }
+
+ /* Make sure all CPUs receive a corresponding Tx queue. */
+ while (egress_cnt < DPAA_ETH_TXQ_NUM) {
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TXQ_NUM)
+ break;
+ }
+ }
+}
+
+static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ if (priv->egress_fqs[i] == tx_fq)
+ return i;
+
+ return -EINVAL;
+}
+
+static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
+{
+ const struct dpaa_priv *priv;
+ struct qman_fq *confq = NULL;
+ struct qm_mcc_initfq initfq;
+ struct device *dev;
+ struct qman_fq *fq;
+ int queue_id;
+ int err;
+
+ priv = netdev_priv(dpaa_fq->net_dev);
+ dev = dpaa_fq->net_dev->dev.parent;
+
+ if (dpaa_fq->fqid == 0)
+ dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
+ if (err) {
+ dev_err(dev, "qman_create_fq() failed\n");
+ return err;
+ }
+ fq = &dpaa_fq->fq_base;
+
+ if (dpaa_fq->init) {
+ memset(&initfq, 0, sizeof(initfq));
+
+ initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
+ /* Note: we may get to keep an empty FQ in cache */
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
+
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+
+ /* FQ placement */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+
+ qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
+
+ /* Put all egress queues in a congestion group of their own.
+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
+ * rather than Tx - but they nonetheless account for the
+ * memory footprint on behalf of egress traffic. We therefore
+ * place them in the netdev's CGR, along with the Tx FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
+ /* Set a fixed overhead accounting, in an attempt to
+ * reduce the impact of fixed-size skb shells and the
+ * driver's needed headroom on system memory. This is
+ * especially the case when the egress traffic is
+ * composed of small datagrams.
+ * Unfortunately, QMan's OAL value is capped to an
+ * insufficient value, but even that is better than
+ * no overhead accounting at all.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+ min(sizeof(struct sk_buff) +
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ if (td_enable) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
+ qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
+ }
+
+ if (dpaa_fq->fq_type == FQ_TYPE_TX) {
+ queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
+ if (queue_id >= 0)
+ confq = priv->conf_fqs[queue_id];
+ if (confq) {
+ initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * A0V=1 (contextA A0 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
+ */
+ qm_fqd_context_a_set64(&initfq.fqd,
+ 0x1e00000080000000ULL);
+ }
+ }
+
+ /* Put all the ingress queues in our "ingress CGR". */
+ if (priv->use_ingress_cgr &&
+ (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
+ /* Set a fixed overhead accounting, just like for the
+ * egress CGR.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+ min(sizeof(struct sk_buff) +
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ /* Initialization common to all ingress queues */
+ if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ qm_fqd_set_stashing(&initfq.fqd, 1, 2,
+ DIV_ROUND_UP(sizeof(struct qman_fq),
+ 64));
+ }
+
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (err < 0) {
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+ qman_destroy_fq(fq);
+ return err;
+ }
+ }
+
+ dpaa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+
+static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
+{
+ const struct dpaa_priv *priv;
+ struct dpaa_fq *dpaa_fq;
+ int err, error;
+
+ err = 0;
+
+ dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ priv = netdev_priv(dpaa_fq->net_dev);
+
+ if (dpaa_fq->init) {
+ err = qman_retire_fq(fq, NULL);
+ if (err < 0 && netif_msg_drv(priv))
+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+
+ error = qman_oos_fq(fq);
+ if (error < 0 && netif_msg_drv(priv)) {
+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), error);
+ if (err >= 0)
+ err = error;
+ }
+ }
+
+ qman_destroy_fq(fq);
+ list_del(&dpaa_fq->list);
+
+ return err;
+}
+
+static int dpaa_fq_free(struct device *dev, struct list_head *list)
+{
+ struct dpaa_fq *dpaa_fq, *tmp;
+ int err, error;
+
+ err = 0;
+ list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
+ error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
+ if (error < 0 && err >= 0)
+ err = error;
+ }
+
+ return err;
+}
+
+static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_params params;
+ int err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ params.specific_params.non_rx_params.err_fqid = errq->fqid;
+ params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_err("%s: fman_port_config failed\n", __func__);
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+}
+
+static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
+ size_t count, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_rx_params *rx_p;
+ struct fman_port_params params;
+ int i, err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ rx_p = &params.specific_params.rx_params;
+ rx_p->err_fqid = errq->fqid;
+ rx_p->dflt_fqid = defq->fqid;
+
+ count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
+ rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
+ for (i = 0; i < count; i++) {
+ rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
+ }
+
+ err = fman_port_config(port, &params);
+ if (err)
+ pr_err("%s: fman_port_config failed\n", __func__);
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err)
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+}
+
+static void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpaa_bp **bps, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpaa_buffer_layout *buf_layout,
+ struct device *dev)
+{
+ struct fman_port *rxport = mac_dev->port[RX];
+ struct fman_port *txport = mac_dev->port[TX];
+
+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
+}
+
+static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
+ struct bm_buffer *bmb, int cnt)
+{
+ int err;
+
+ err = bman_release(dpaa_bp->pool, bmb, cnt);
+ /* Should never occur, address anyway to avoid leaking the buffers */
+ if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
+ while (cnt-- > 0)
+ dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
+
+ return cnt;
+}
+
+static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
+{
+ struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
+ struct dpaa_bp *dpaa_bp;
+ int i = 0, j;
+
+ memset(bmb, 0, sizeof(bmb));
+
+ do {
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ return;
+
+ j = 0;
+ do {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
+
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]) &&
+ sgt[i - 1].bpid == sgt[i].bpid);
+
+ dpaa_bman_release(dpaa_bp, bmb, j);
+ } while (!qm_sg_entry_is_final(&sgt[i - 1]));
+}
+
+static void dpaa_fd_release(const struct net_device *net_dev,
+ const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpaa_bp *dpaa_bp;
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+ void *vaddr;
+
+ bmb.data = 0;
+ bm_buffer_set64(&bmb, qm_fd_addr(fd));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ return;
+
+ if (qm_fd_get_format(fd) == qm_fd_sg) {
+ vaddr = phys_to_virt(qm_fd_addr(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
+
+ dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
+ DMA_FROM_DEVICE);
+
+ dpaa_release_sgt_members(sgt);
+
+ addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->dev, addr)) {
+ dev_err(dpaa_bp->dev, "DMA mapping failed");
+ return;
+ }
+ bm_buffer_set64(&bmb, addr);
+ }
+
+ dpaa_bman_release(dpaa_bp, &bmb, 1);
+}
+
+static void count_ern(struct dpaa_percpu_priv *percpu_priv,
+ const union qm_mr_entry *msg)
+{
+ switch (msg->ern.rc & QM_MR_RC_MASK) {
+ case QM_MR_RC_CGR_TAILDROP:
+ percpu_priv->ern_cnt.cg_tdrop++;
+ break;
+ case QM_MR_RC_WRED:
+ percpu_priv->ern_cnt.wred++;
+ break;
+ case QM_MR_RC_ERROR:
+ percpu_priv->ern_cnt.err_cond++;
+ break;
+ case QM_MR_RC_ORPWINDOW_EARLY:
+ percpu_priv->ern_cnt.early_window++;
+ break;
+ case QM_MR_RC_ORPWINDOW_LATE:
+ percpu_priv->ern_cnt.late_window++;
+ break;
+ case QM_MR_RC_FQ_TAILDROP:
+ percpu_priv->ern_cnt.fq_tdrop++;
+ break;
+ case QM_MR_RC_ORPWINDOW_RETIRED:
+ percpu_priv->ern_cnt.fq_retired++;
+ break;
+ case QM_MR_RC_ORP_ZERO:
+ percpu_priv->ern_cnt.orp_zero++;
+ break;
+ }
+}
+
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
+ struct sk_buff *skb,
+ struct qm_fd *fd,
+ char *parse_results)
+{
+ struct fman_prs_result *parse_result;
+ u16 ethertype = ntohs(skb->protocol);
+ struct ipv6hdr *ipv6h = NULL;
+ struct iphdr *iph;
+ int retval = 0;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (struct fman_prs_result *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
+ iph = ip_hdr(skb);
+ WARN_ON(!iph);
+ l4_proto = iph->protocol;
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
+ ipv6h = ipv6_hdr(skb);
+ WARN_ON(!ipv6h);
+ l4_proto = ipv6h->nexthdr;
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = (u8)skb_network_offset(skb);
+ parse_result->l4_off = (u8)skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+
+static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
+{
+ struct device *dev = dpaa_bp->dev;
+ struct bm_buffer bmb[8];
+ dma_addr_t addr;
+ void *new_buf;
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+ new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
+ if (unlikely(!new_buf)) {
+ dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
+ dpaa_bp->raw_size);
+ goto release_previous_buffs;
+ }
+ new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
+
+ addr = dma_map_single(dev, new_buf,
+ dpaa_bp->size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dpaa_bp->dev, "DMA map failed");
+ goto release_previous_buffs;
+ }
+
+ bmb[i].data = 0;
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+release_bufs:
+ return dpaa_bman_release(dpaa_bp, bmb, i);
+
+release_previous_buffs:
+ WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
+
+ bm_buffer_set64(&bmb[i], 0);
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buffer.
+ */
+ if (likely(i))
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
+{
+ int i;
+
+ /* Give each CPU an allotment of "config_count" buffers */
+ for_each_possible_cpu(i) {
+ int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
+ int j;
+
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ for (j = 0; j < dpaa_bp->config_count; j += 8)
+ *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
+ }
+ return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
+{
+ int count = *countptr;
+ int new_bufs;
+
+ if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ do {
+ new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the
+ * next NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+ *countptr = count;
+ if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
+{
+ struct dpaa_bp *dpaa_bp;
+ int *countptr;
+ int res, i;
+
+ for (i = 0; i < DPAA_BPS_NUM; i++) {
+ dpaa_bp = priv->dpaa_bps[i];
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct sk_buff **skbh, *skb;
+ int nr_frags, i;
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+ sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+ qm_sg_entry_get_len(&sgt[0]), dma_dir);
+
+ /* remaining pages were mapped with skb_frag_dma_map() */
+ for (i = 1; i < nr_frags; i++) {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ qm_sg_entry_get_len(&sgt[i]), dma_dir);
+ }
+
+ /* Free the page frag that we allocated on Tx */
+ skb_free_frag(phys_to_virt(addr));
+ } else {
+ dma_unmap_single(dev, addr,
+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ }
+
+ return skb;
+}
+
+/* Build a linear skb around the received buffer.
+ * We are guaranteed there is enough room at the end of the data buffer to
+ * accommodate the shared info area of the skb.
+ */
+static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpaa_bp *dpaa_bp;
+ struct sk_buff *skb;
+ void *vaddr;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ goto free_buffer;
+
+ skb = build_skb(vaddr, dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ WARN_ONCE(1, "Build skb failure on Rx\n");
+ goto free_buffer;
+ }
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_fd_get_length(fd));
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return skb;
+
+free_buffer:
+ skb_free_frag(vaddr);
+ return NULL;
+}
+
+/* Build an skb with the data of the first S/G entry in the linear portion and
+ * the rest of the frame as skb fragments.
+ *
+ * The page fragment holding the S/G Table is recycled here.
+ */
+static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct page *page, *head_page;
+ struct dpaa_bp *dpaa_bp;
+ void *vaddr, *sg_vaddr;
+ int frag_off, frag_len;
+ struct sk_buff *skb;
+ dma_addr_t sg_addr;
+ int page_offset;
+ unsigned int sz;
+ int *count_ptr;
+ int i;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ /* Iterate through the SGT entries and add data buffers to the skb */
+ sgt = vaddr + fd_off;
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
+ /* Extension bit is not supported */
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
+ SMP_CACHE_BYTES));
+
+ /* We may use multiple Rx pools */
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ goto free_buffers;
+
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
+ DMA_FROM_DEVICE);
+ if (i == 0) {
+ sz = dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(sg_vaddr, sz);
+ if (WARN_ON(unlikely(!skb)))
+ goto free_buffers;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Make sure forwarded skbs will have enough space
+ * on Tx, if extra headers are added.
+ */
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
+ } else {
+ /* Not the first S/G entry; all data from buffer will
+ * be added in an skb fragment; fragment index is offset
+ * by one since first S/G entry was incorporated in the
+ * linear part of the skb.
+ *
+ * Caution: 'page' may be a tail page.
+ */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Compute offset in (possibly tail) page */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+ /* page_offset only refers to the beginning of sgt[i];
+ * but the buffer itself may have an internal offset.
+ */
+ frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
+ frag_len = qm_sg_entry_get_len(&sgt[i]);
+ /* skb_add_rx_frag() does no checking on the page; if
+ * we pass it a tail page, we'll end up with
+ * bad page accounting and eventually with segafults.
+ */
+ skb_add_rx_frag(skb, i - 1, head_page, frag_off,
+ frag_len, dpaa_bp->size);
+ }
+ /* Update the pool count for the current {cpu x bpool} */
+ (*count_ptr)--;
+
+ if (qm_sg_entry_is_final(&sgt[i]))
+ break;
+ }
+ WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
+
+ /* free the SG table buffer */
+ skb_free_frag(vaddr);
+
+ return skb;
+
+free_buffers:
+ /* compensate sw bpool counter changes */
+ for (i--; i > 0; i--) {
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)++;
+ }
+ }
+ /* free all the SG entries */
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ skb_free_frag(sg_vaddr);
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
+
+ if (qm_sg_entry_is_final(&sgt[i]))
+ break;
+ }
+ /* free the SGT fragment */
+ skb_free_frag(vaddr);
+
+ return NULL;
+}
+
+static int skb_to_contig_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd,
+ int *offset)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ enum dma_data_direction dma_dir;
+ unsigned char *buffer_start;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ int err;
+
+ /* We are guaranteed to have at least tx_headroom bytes
+ * available, so just use that for offset.
+ */
+ fd->bpid = FSL_DPAA_BPID_INV;
+ buffer_start = skb->data - priv->tx_headroom;
+ dma_dir = DMA_TO_DEVICE;
+
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ return err;
+ }
+
+ /* Fill in the rest of the FD fields */
+ qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+
+ /* Map the entire buffer size that may be seen by FMan, but no more */
+ addr = dma_map_single(dev, skbh,
+ skb_tail_pointer(skb) - buffer_start, dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
+ return -EINVAL;
+ }
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+}
+
+static int skb_to_sg_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ const int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct qm_sg_entry *sgt;
+ struct sk_buff **skbh;
+ int i, j, err, sz;
+ void *buffer_start;
+ skb_frag_t *frag;
+ dma_addr_t addr;
+ size_t frag_len;
+ void *sgt_buf;
+
+ /* get a page frag to store the SGTable */
+ sz = SKB_DATA_ALIGN(priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ sgt_buf = netdev_alloc_frag(sz);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
+ sz);
+ return -ENOMEM;
+ }
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ goto csum_failed;
+ }
+
+ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
+ sgt[0].bpid = FSL_DPAA_BPID_INV;
+ sgt[0].offset = 0;
+ addr = dma_map_single(dev, skb->data,
+ skb_headlen(skb), dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg0_map_failed;
+ }
+ qm_sg_entry_set64(&sgt[0], addr);
+
+ /* populate the rest of SGT entries */
+ frag = &skb_shinfo(skb)->frags[0];
+ frag_len = frag->size;
+ for (i = 1; i <= nr_frags; i++, frag++) {
+ WARN_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dev, frag, 0,
+ frag_len, dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ qm_sg_entry_set_len(&sgt[i], frag_len);
+ sgt[i].bpid = FSL_DPAA_BPID_INV;
+ sgt[i].offset = 0;
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[i], addr);
+ frag_len = frag->size;
+ }
+ qm_sg_entry_set_f(&sgt[i - 1], frag_len);
+
+ qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
+
+ /* DMA map the SGT page */
+ buffer_start = (void *)sgt - priv->tx_headroom;
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
+ sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+ goto sgt_map_failed;
+ }
+
+ fd->bpid = FSL_DPAA_BPID_INV;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+
+sgt_map_failed:
+sg_map_failed:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
+ qm_sg_entry_get_len(&sgt[j]), dma_dir);
+sg0_map_failed:
+csum_failed:
+ skb_free_frag(sgt_buf);
+
+ return err;
+}
+
+static inline int dpaa_xmit(struct dpaa_priv *priv,
+ struct rtnl_link_stats64 *percpu_stats,
+ int queue,
+ struct qm_fd *fd)
+{
+ struct qman_fq *egress_fq;
+ int err, i;
+
+ egress_fq = priv->egress_fqs[queue];
+ if (fd->bpid == FSL_DPAA_BPID_INV)
+ fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
+
+ /* Trace this Tx fd */
+ trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
+
+ for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
+ err = qman_enqueue(egress_fq, fd);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ percpu_stats->tx_fifo_errors++;
+ return err;
+ }
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += qm_fd_get_length(fd);
+
+ return 0;
+}
+
+static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ const int queue_mapping = skb_get_queue_mapping(skb);
+ bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
+ int offset = 0;
+ int err = 0;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+
+ qm_fd_clear_fd(&fd);
+
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+ *
+ * We've made sure skb is not shared in dev->priv_flags,
+ * we need to verify the skb head is not cloned
+ */
+ if (skb_cow_head(skb, priv->tx_headroom))
+ goto enomem;
+
+ WARN_ON(skb_is_nonlinear(skb));
+ }
+
+ /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
+ * make sure we don't feed FMan with more fragments than it supports.
+ */
+ if (nonlinear &&
+ likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* If the egress skb contains more fragments than we support
+ * we have no choice but to linearize it ourselves.
+ */
+ if (unlikely(nonlinear) && __skb_linearize(skb))
+ goto enomem;
+
+ /* Finally, create a contig FD from this skb */
+ err = skb_to_contig_fd(priv, skb, &fd, &offset);
+ }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
+
+ if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+ return NETDEV_TX_OK;
+
+ dpaa_cleanup_tx_fd(priv, &fd);
+skb_to_fd_failed:
+enomem:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void dpaa_rx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ if (net_ratelimit())
+ netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
+ percpu_priv->rx_errors.dme++;
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
+ percpu_priv->rx_errors.fpe++;
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
+ percpu_priv->rx_errors.fse++;
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
+ percpu_priv->rx_errors.phe++;
+
+ dpaa_fd_release(net_dev, fd);
+}
+
+static void dpaa_tx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ struct sk_buff *skb;
+
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+
+ skb = dpaa_cleanup_tx_fd(priv, fd);
+ dev_kfree_skb(skb);
+}
+
+static int dpaa_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa_napi_portal *np =
+ container_of(napi, struct dpaa_napi_portal, napi);
+
+ int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+ if (cleaned < budget) {
+ napi_complete(napi);
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+
+ } else if (np->down) {
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ }
+
+ return cleaned;
+}
+
+static void dpaa_tx_conf(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ struct sk_buff *skb;
+
+ if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) &
+ FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ percpu_priv->tx_confirm++;
+
+ skb = dpaa_cleanup_tx_fd(priv, fd);
+
+ consume_skb(skb);
+}
+
+static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
+ struct qman_portal *portal)
+{
+ if (unlikely(in_irq() || !in_serving_softirq())) {
+ /* Disable QMan IRQ and invoke NAPI */
+ qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
+
+ percpu_priv->np.p = portal;
+ napi_schedule(&percpu_priv->np.napi);
+ percpu_priv->in_interrupt++;
+ return 1;
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ struct dpaa_percpu_priv *percpu_priv;
+ struct net_device *net_dev;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
+
+ net_dev = dpaa_fq->net_dev;
+ priv = netdev_priv(net_dev);
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ if (dpaa_eth_refill_bpools(priv))
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpaa_fd_release(net_dev, &dq->fd);
+ else
+ dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+ dma_addr_t addr = qm_fd_addr(fd);
+ enum qm_fd_format fd_format;
+ struct net_device *net_dev;
+ u32 fd_status = fd->status;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
+ unsigned int skb_len;
+ struct sk_buff *skb;
+ int *count_ptr;
+
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+ /* Trace the Rx fd */
+ trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ return qman_cb_dqrr_stop;
+
+ /* Make sure we didn't run out of buffers */
+ if (unlikely(dpaa_eth_refill_bpools(priv))) {
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpaa_fd_release(net_dev, &dq->fd);
+ return qman_cb_dqrr_consume;
+ }
+
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_stats->rx_errors++;
+ dpaa_fd_release(net_dev, fd);
+ return qman_cb_dqrr_consume;
+ }
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+ dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+
+ /* prefetch the first 64 bytes of the frame or the SGT start */
+ prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
+
+ fd_format = qm_fd_get_format(fd);
+ /* The only FD types that we may receive are contig and S/G */
+ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+
+ if (likely(fd_format == qm_fd_contig))
+ skb = contig_fd_to_skb(priv, fd);
+ else
+ skb = sg_fd_to_skb(priv, fd);
+ if (!skb)
+ return qman_cb_dqrr_consume;
+
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ skb_len = skb->len;
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ return qman_cb_dqrr_consume;
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += skb_len;
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ /* Trace the fd */
+ trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ return qman_cb_dqrr_stop;
+
+ dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static void egress_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ const struct qm_fd *fd = &msg->ern.fd;
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
+ struct net_device *net_dev;
+ struct sk_buff *skb;
+
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+
+ skb = dpaa_cleanup_tx_fd(priv, fd);
+ dev_kfree_skb_any(skb);
+}
+
+static const struct dpaa_fq_cbs dpaa_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
+ .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
+ .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
+ .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
+ .egress_ern = { .cb = { .ern = egress_ern } }
+};
+
+static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ percpu_priv->np.down = 0;
+ napi_enable(&percpu_priv->np.napi);
+ }
+}
+
+static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ percpu_priv->np.down = 1;
+ napi_disable(&percpu_priv->np.napi);
+ }
+}
+
+static int dpaa_open(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int err, i;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+ dpaa_eth_napi_enable(priv);
+
+ net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
+ if (!net_dev->phydev) {
+ netif_err(priv, ifup, net_dev, "init_phy() failed\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ err = fman_port_enable(mac_dev->port[i]);
+ if (err)
+ goto mac_start_failed;
+ }
+
+ err = priv->mac_dev->start(mac_dev);
+ if (err < 0) {
+ netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ goto mac_start_failed;
+ }
+
+ netif_tx_start_all_queues(net_dev);
+
+ return 0;
+
+mac_start_failed:
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
+ fman_port_disable(mac_dev->port[i]);
+
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+static int dpaa_eth_stop(struct net_device *net_dev)
+{
+ struct dpaa_priv *priv;
+ int err;
+
+ err = dpaa_stop(net_dev);
+
+ priv = netdev_priv(net_dev);
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+static const struct net_device_ops dpaa_ops = {
+ .ndo_open = dpaa_open,
+ .ndo_start_xmit = dpaa_start_xmit,
+ .ndo_stop = dpaa_eth_stop,
+ .ndo_tx_timeout = dpaa_tx_timeout,
+ .ndo_get_stats64 = dpaa_get_stats64,
+ .ndo_set_mac_address = dpaa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = dpaa_set_rx_mode,
+};
+
+static int dpaa_napi_add(struct net_device *net_dev)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ netif_napi_add(net_dev, &percpu_priv->np.napi,
+ dpaa_eth_poll, NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void dpaa_napi_del(struct net_device *net_dev)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ netif_napi_del(&percpu_priv->np.napi);
+ }
+}
+
+static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
+ struct bm_buffer *bmb)
+{
+ dma_addr_t addr = bm_buf_addr(bmb);
+
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
+
+ skb_free_frag(phys_to_virt(addr));
+}
+
+/* Alloc the dpaa_bp struct and configure default values */
+static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
+{
+ struct dpaa_bp *dpaa_bp;
+
+ dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
+ if (!dpaa_bp)
+ return ERR_PTR(-ENOMEM);
+
+ dpaa_bp->bpid = FSL_DPAA_BPID_INV;
+ dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
+ dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+ dpaa_bp->seed_cb = dpaa_bp_seed;
+ dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
+
+ return dpaa_bp;
+}
+
+/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
+ * We won't be sending congestion notifications to FMan; for now, we just use
+ * this CGR to generate enqueue rejections to FMan in order to drop the frames
+ * before they reach our ingress queues and eat up memory.
+ */
+static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("Error %d allocating CGR ID\n", err);
+ goto out_error;
+ }
+
+ /* Enable CS TD, but disable Congestion State Change Notifications. */
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+ cs_th = DPAA_INGRESS_CS_THRESHOLD;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ /* This CGR will be associated with the SWP affined to the current CPU.
+ * However, we'll place all our ingress FQs in it.
+ */
+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("Error %d creating ingress CGR with ID %d\n",
+ err, priv->ingress_cgr.cgrid);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ goto out_error;
+ }
+ if (netif_msg_drv(priv))
+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
+
+ priv->use_ingress_cgr = true;
+
+out_error:
+ return err;
+}
+
+static const struct of_device_id dpaa_match[];
+
+static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+{
+ u16 headroom;
+
+ /* The frame headroom must accommodate:
+ * - the driver private data area
+ * - parse results, hash results, timestamp if selected
+ * If either hash results or time stamp are selected, both will
+ * be copied to/from the frame headroom, as TS is located between PR and
+ * HR in the IC and IC copy size has a granularity of 16bytes
+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
+ *
+ * Also make sure the headroom is a multiple of data_align bytes
+ */
+ headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
+ DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
+
+ return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
+ DPAA_FD_DATA_ALIGNMENT) :
+ headroom;
+}
+
+static int dpaa_eth_probe(struct platform_device *pdev)
+{
+ struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
+ struct dpaa_percpu_priv *percpu_priv;
+ struct net_device *net_dev = NULL;
+ struct dpaa_fq *dpaa_fq, *tmp;
+ struct dpaa_priv *priv = NULL;
+ struct fm_port_fqs port_fqs;
+ struct mac_device *mac_dev;
+ int err = 0, i, channel;
+ struct device *dev;
+
+ dev = &pdev->dev;
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ goto alloc_etherdev_mq_failed;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+ priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
+
+ mac_dev = dpaa_mac_dev_get(pdev);
+ if (IS_ERR(mac_dev)) {
+ dev_err(dev, "dpaa_mac_dev_get() failed\n");
+ err = PTR_ERR(mac_dev);
+ goto mac_probe_failed;
+ }
+
+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+ * in the same LAN.
+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+ * start with the maximum allowed.
+ */
+ net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
+
+ netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
+ net_dev->mtu);
+
+ priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
+ priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
+
+ /* device used for DMA mapping */
+ arch_setup_dma_ops(dev, 0, 0, NULL, false);
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err) {
+ dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
+ goto dev_mask_failed;
+ }
+
+ /* bp init */
+ for (i = 0; i < DPAA_BPS_NUM; i++) {
+ int err;
+
+ dpaa_bps[i] = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bps[i]))
+ return PTR_ERR(dpaa_bps[i]);
+ /* the raw size of the buffers used for reception */
+ dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
+ dpaa_bps[i]->dev = dev;
+
+ err = dpaa_bp_alloc_pool(dpaa_bps[i]);
+ if (err < 0) {
+ dpaa_bps_free(priv);
+ priv->dpaa_bps[i] = NULL;
+ goto bp_create_failed;
+ }
+ priv->dpaa_bps[i] = dpaa_bps[i];
+ }
+
+ INIT_LIST_HEAD(&priv->dpaa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
+ if (err < 0) {
+ dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
+ goto fq_probe_failed;
+ }
+
+ priv->mac_dev = mac_dev;
+
+ channel = dpaa_get_channel();
+ if (channel < 0) {
+ dev_err(dev, "dpaa_get_channel() failed\n");
+ err = channel;
+ goto get_channel_failed;
+ }
+
+ priv->channel = (u16)channel;
+
+ /* Start a thread that will walk the CPUs with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ dpaa_eth_add_channel(priv->channel);
+
+ dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
+ */
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto tx_cgr_init_failed;
+ }
+
+ err = dpaa_ingress_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing ingress CGR\n");
+ goto rx_cgr_init_failed;
+ }
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
+ err = dpaa_fq_init(dpaa_fq, false);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
+ priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
+
+ /* All real interfaces need their ports initialized */
+ dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ &priv->buf_layout[0], dev);
+
+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+ if (!priv->percpu_priv) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ /* Initialize NAPI */
+ err = dpaa_napi_add(net_dev);
+ if (err < 0)
+ goto napi_add_failed;
+
+ err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ netif_info(priv, probe, net_dev, "Probed interface %s\n",
+ net_dev->name);
+
+ return 0;
+
+netdev_init_failed:
+napi_add_failed:
+ dpaa_napi_del(net_dev);
+alloc_percpu_failed:
+ dpaa_fq_free(dev, &priv->dpaa_fq_list);
+fq_alloc_failed:
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+rx_cgr_init_failed:
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+tx_cgr_init_failed:
+get_channel_failed:
+ dpaa_bps_free(priv);
+bp_create_failed:
+fq_probe_failed:
+dev_mask_failed:
+mac_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+alloc_etherdev_mq_failed:
+ for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
+ if (atomic_read(&dpaa_bps[i]->refs) == 0)
+ devm_kfree(dev, dpaa_bps[i]);
+ }
+ return err;
+}
+
+static int dpaa_remove(struct platform_device *pdev)
+{
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+ struct device *dev;
+ int err;
+
+ dev = &pdev->dev;
+ net_dev = dev_get_drvdata(dev);
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
+
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+
+ dpaa_napi_del(net_dev);
+
+ dpaa_bps_free(priv);
+
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static struct platform_device_id dpaa_devtype[] = {
+ {
+ .name = "dpaa-ethernet",
+ .driver_data = 0,
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(platform, dpaa_devtype);
+
+static struct platform_driver dpaa_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .id_table = dpaa_devtype,
+ .probe = dpaa_eth_probe,
+ .remove = dpaa_remove
+};
+
+static int __init dpaa_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA Ethernet driver\n");
+
+ /* initialize dpaa_eth mirror values */
+ dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
+ dpaa_max_frm = fman_get_max_frm();
+
+ err = platform_driver_register(&dpaa_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(dpaa_load);
+
+static void __exit dpaa_unload(void)
+{
+ platform_driver_unregister(&dpaa_driver);
+
+ /* Only one channel is used and needs to be released after all
+ * interfaces are removed
+ */
+ dpaa_release_channel();
+}
+module_exit(dpaa_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
new file mode 100644
index 000000000000..1f9aebf3f3c5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -0,0 +1,185 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_H
+#define __DPAA_H
+
+#include <linux/netdevice.h>
+#include <soc/fsl/qman.h>
+#include <soc/fsl/bman.h>
+
+#include "fman.h"
+#include "mac.h"
+#include "dpaa_eth_trace.h"
+
+#define DPAA_ETH_TXQ_NUM NR_CPUS
+
+#define DPAA_BPS_NUM 3 /* number of bpools per interface */
+
+/* More detailed FQ types - used for fine-grained WQ assignments */
+enum dpaa_fq_type {
+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
+ FQ_TYPE_TX, /* "Real" Tx FQs */
+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
+};
+
+struct dpaa_fq {
+ struct qman_fq fq_base;
+ struct list_head list;
+ struct net_device *net_dev;
+ bool init;
+ u32 fqid;
+ u32 flags;
+ u16 channel;
+ u8 wq;
+ enum dpaa_fq_type fq_type;
+};
+
+struct dpaa_fq_cbs {
+ struct qman_fq rx_defq;
+ struct qman_fq tx_defq;
+ struct qman_fq rx_errq;
+ struct qman_fq tx_errq;
+ struct qman_fq egress_ern;
+};
+
+struct dpaa_bp {
+ /* device used in the DMA mapping operations */
+ struct device *dev;
+ /* current number of buffers in the buffer pool alloted to each CPU */
+ int __percpu *percpu_count;
+ /* all buffers allocated for this pool have this raw size */
+ size_t raw_size;
+ /* all buffers in this pool have this same usable size */
+ size_t size;
+ /* the buffer pools are initialized with config_count buffers for each
+ * CPU; at runtime the number of buffers per CPU is constantly brought
+ * back to this level
+ */
+ u16 config_count;
+ u8 bpid;
+ struct bman_pool *pool;
+ /* bpool can be seeded before use by this cb */
+ int (*seed_cb)(struct dpaa_bp *);
+ /* bpool can be emptied before freeing by this cb */
+ void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
+ atomic_t refs;
+};
+
+struct dpaa_rx_errors {
+ u64 dme; /* DMA Error */
+ u64 fpe; /* Frame Physical Error */
+ u64 fse; /* Frame Size Error */
+ u64 phe; /* Header Error */
+};
+
+/* Counters for QMan ERN frames - one counter per rejection code */
+struct dpaa_ern_cnt {
+ u64 cg_tdrop; /* Congestion group taildrop */
+ u64 wred; /* WRED congestion */
+ u64 err_cond; /* Error condition */
+ u64 early_window; /* Order restoration, frame too early */
+ u64 late_window; /* Order restoration, frame too late */
+ u64 fq_tdrop; /* FQ taildrop */
+ u64 fq_retired; /* FQ is retired */
+ u64 orp_zero; /* ORP disabled */
+};
+
+struct dpaa_napi_portal {
+ struct napi_struct napi;
+ struct qman_portal *p;
+ bool down;
+};
+
+struct dpaa_percpu_priv {
+ struct net_device *net_dev;
+ struct dpaa_napi_portal np;
+ u64 in_interrupt;
+ u64 tx_confirm;
+ /* fragmented (non-linear) skbuffs received from the stack */
+ u64 tx_frag_skbuffs;
+ struct rtnl_link_stats64 stats;
+ struct dpaa_rx_errors rx_errors;
+ struct dpaa_ern_cnt ern_cnt;
+};
+
+struct dpaa_buffer_layout {
+ u16 priv_data_size;
+};
+
+struct dpaa_priv {
+ struct dpaa_percpu_priv __percpu *percpu_priv;
+ struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
+ /* Store here the needed Tx headroom for convenience and speed
+ * (even though it can be computed based on the fields of buf_layout)
+ */
+ u16 tx_headroom;
+ struct net_device *net_dev;
+ struct mac_device *mac_dev;
+ struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
+
+ u16 channel;
+ struct list_head dpaa_fq_list;
+
+ u32 msg_enable; /* net_device message level */
+
+ struct {
+ /* All egress queues to a given net device belong to one
+ * (and the same) congestion group.
+ */
+ struct qman_cgr cgr;
+ /* If congested, when it began. Used for performance stats. */
+ u32 congestion_start_jiffies;
+ /* Number of jiffies the Tx port was congested. */
+ u32 congested_jiffies;
+ /* Counter for the number of times the CGR
+ * entered congestion state
+ */
+ u32 cgr_congested_count;
+ } cgr_data;
+ /* Use a per-port CGR for ingress traffic. */
+ bool use_ingress_cgr;
+ struct qman_cgr ingress_cgr;
+
+ struct dpaa_buffer_layout buf_layout[2];
+ u16 rx_headroom;
+};
+
+/* from dpaa_ethtool.c */
+extern const struct ethtool_ops dpaa_ethtool_ops;
+
+/* from dpaa_eth_sysfs.c */
+void dpaa_eth_sysfs_remove(struct device *dev);
+void dpaa_eth_sysfs_init(struct device *dev);
+#endif /* __DPAA_H */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
new file mode 100644
index 000000000000..ec75d1c6fa89
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -0,0 +1,165 @@
+/* Copyright 2008-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_net.h>
+#include "dpaa_eth.h"
+#include "mac.h"
+
+static ssize_t dpaa_eth_show_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev)
+ return sprintf(buf, "%llx",
+ (unsigned long long)mac_dev->res->start);
+ else
+ return sprintf(buf, "none");
+}
+
+static ssize_t dpaa_eth_show_fqids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
+ struct dpaa_fq *prev = NULL;
+ char *prevstr = NULL;
+ struct dpaa_fq *tmp;
+ struct dpaa_fq *fq;
+ u32 first_fqid = 0;
+ u32 last_fqid = 0;
+ ssize_t bytes = 0;
+ char *str;
+ int i = 0;
+
+ list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ str = "Rx default";
+ break;
+ case FQ_TYPE_RX_ERROR:
+ str = "Rx error";
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ str = "Tx default confirmation";
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ str = "Tx confirmation (mq)";
+ break;
+ case FQ_TYPE_TX_ERROR:
+ str = "Tx error";
+ break;
+ case FQ_TYPE_TX:
+ str = "Tx";
+ break;
+ default:
+ str = "Unknown";
+ }
+
+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
+ str != prevstr)) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes,
+ "%s: %d\n", prevstr, prev->fqid);
+ else
+ bytes += sprintf(buf + bytes,
+ "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ if (prev && abs(fq->fqid - prev->fqid) == 1 &&
+ str == prevstr) {
+ last_fqid = fq->fqid;
+ } else {
+ first_fqid = fq->fqid;
+ last_fqid = fq->fqid;
+ }
+
+ prev = fq;
+ prevstr = str;
+ i++;
+ }
+
+ if (prev) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
+ prev->fqid);
+ else
+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ return bytes;
+}
+
+static ssize_t dpaa_eth_show_bpids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
+ ssize_t bytes = 0;
+ int i = 0;
+
+ for (i = 0; i < DPAA_BPS_NUM; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+ priv->dpaa_bps[i]->bpid);
+
+ return bytes;
+}
+
+static struct device_attribute dpaa_eth_attrs[] = {
+ __ATTR(device_addr, 0444, dpaa_eth_show_addr, NULL),
+ __ATTR(fqids, 0444, dpaa_eth_show_fqids, NULL),
+ __ATTR(bpids, 0444, dpaa_eth_show_bpids, NULL),
+};
+
+void dpaa_eth_sysfs_init(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ if (device_create_file(dev, &dpaa_eth_attrs[i])) {
+ dev_err(dev, "Error creating sysfs file\n");
+ while (i > 0)
+ device_remove_file(dev, &dpaa_eth_attrs[--i]);
+ return;
+ }
+}
+
+void dpaa_eth_sysfs_remove(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ device_remove_file(dev, &dpaa_eth_attrs[i]);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
new file mode 100644
index 000000000000..409c1dc39430
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -0,0 +1,141 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa_eth
+
+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa_eth.h"
+#include <linux/tracepoint.h>
+
+#define fd_format_name(format) { qm_fd_##format, #format }
+#define fd_format_list \
+ fd_format_name(contig), \
+ fd_format_name(sg)
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor and the FQ on which it was
+ * transmitted/received.
+ */
+DECLARE_EVENT_CLASS(dpaa_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fq, fd),
+
+ /* A structure containing the relevant information we want to record.
+ * Declare name and type for each normal element, name, type and size
+ * for arrays. Use __string for variable length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u32, fqid)
+ __field(u64, fd_addr)
+ __field(u8, fd_format)
+ __field(u16, fd_offset)
+ __field(u32, fd_length)
+ __field(u32, fd_status)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared fields */
+ TP_fast_assign(
+ __entry->fqid = fq->fqid;
+ __entry->fd_addr = qm_fd_addr_get64(fd);
+ __entry->fd_format = qm_fd_get_format(fd);
+ __entry->fd_offset = qm_fd_get_offset(fd);
+ __entry->fd_length = qm_fd_get_length(fd);
+ __entry->fd_status = fd->status;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is triggered */
+ TP_printk("[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u, status=0x%08x",
+ __get_str(name), __entry->fqid, __entry->fd_addr,
+ __print_symbolic(__entry->fd_format, fd_format_list),
+ __entry->fd_offset, __entry->fd_length, __entry->fd_status)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa_eth_fd, dpaa_rx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_conf_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa_eth_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
new file mode 100644
index 000000000000..27e7044667d1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -0,0 +1,417 @@
+/* Copyright 2008-2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+
+#include "dpaa_eth.h"
+#include "mac.h"
+
+static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
+ "interrupts",
+ "rx packets",
+ "tx packets",
+ "tx confirm",
+ "tx S/G",
+ "tx error",
+ "rx error",
+};
+
+static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
+ /* dpa rx errors */
+ "rx dma error",
+ "rx frame physical error",
+ "rx frame size error",
+ "rx header error",
+
+ /* demultiplexing errors */
+ "qman cg_tdrop",
+ "qman wred",
+ "qman error cond",
+ "qman early window",
+ "qman late window",
+ "qman fq tdrop",
+ "qman fq retired",
+ "qman orp disabled",
+
+ /* congestion related stats */
+ "congestion time (ms)",
+ "entered congestion",
+ "congested (0/1)"
+};
+
+#define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
+#define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
+
+static int dpaa_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ int err;
+
+ if (!net_dev->phydev) {
+ netdev_dbg(net_dev, "phy device not initialized\n");
+ return 0;
+ }
+
+ err = phy_ethtool_gset(net_dev->phydev, et_cmd);
+
+ return err;
+}
+
+static int dpaa_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *et_cmd)
+{
+ int err;
+
+ if (!net_dev->phydev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ err = phy_ethtool_sset(net_dev->phydev, et_cmd);
+ if (err < 0)
+ netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err);
+
+ return err;
+}
+
+static void dpaa_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ int len;
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ sizeof(drvinfo->driver));
+ len = snprintf(drvinfo->version, sizeof(drvinfo->version),
+ "%X", 0);
+ len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%X", 0);
+
+ if (len >= sizeof(drvinfo->fw_version)) {
+ /* Truncated output */
+ netdev_notice(net_dev, "snprintf() = %d\n", len);
+ }
+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static u32 dpaa_get_msglevel(struct net_device *net_dev)
+{
+ return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
+}
+
+static void dpaa_set_msglevel(struct net_device *net_dev,
+ u32 msg_enable)
+{
+ ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
+}
+
+static int dpaa_nway_reset(struct net_device *net_dev)
+{
+ int err;
+
+ if (!net_dev->phydev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ err = 0;
+ if (net_dev->phydev->autoneg) {
+ err = phy_start_aneg(net_dev->phydev);
+ if (err < 0)
+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ err);
+ }
+
+ return err;
+}
+
+static void dpaa_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ if (!net_dev->phydev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return;
+ }
+
+ epause->autoneg = mac_dev->autoneg_pause;
+ epause->rx_pause = mac_dev->rx_pause_active;
+ epause->tx_pause = mac_dev->tx_pause_active;
+}
+
+static int dpaa_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct mac_device *mac_dev;
+ struct phy_device *phydev;
+ bool rx_pause, tx_pause;
+ struct dpaa_priv *priv;
+ u32 newadv, oldadv;
+ int err;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ phydev = net_dev->phydev;
+ if (!phydev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ /* The MAC should know how to handle PAUSE frame autonegotiation before
+ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
+ * settings.
+ */
+ mac_dev->autoneg_pause = !!epause->autoneg;
+ mac_dev->rx_pause_req = !!epause->rx_pause;
+ mac_dev->tx_pause_req = !!epause->tx_pause;
+
+ /* Determine the sym/asym advertised PAUSE capabilities from the desired
+ * rx/tx pause settings.
+ */
+ newadv = 0;
+ if (epause->rx_pause)
+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ if (epause->tx_pause)
+ newadv |= ADVERTISED_Asym_Pause;
+
+ oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ /* If there are differences between the old and the new advertised
+ * values, restart PHY autonegotiation and advertise the new values.
+ */
+ if (oldadv != newadv) {
+ phydev->advertising &= ~(ADVERTISED_Pause
+ | ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg) {
+ err = phy_start_aneg(phydev);
+ if (err < 0)
+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ err);
+ }
+ }
+
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
+
+ return err;
+}
+
+static int dpaa_get_sset_count(struct net_device *net_dev, int type)
+{
+ unsigned int total_stats, num_stats;
+
+ num_stats = num_online_cpus() + 1;
+ total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
+ DPAA_STATS_GLOBAL_LEN;
+
+ switch (type) {
+ case ETH_SS_STATS:
+ return total_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
+ int crr_cpu, u64 *bp_count, u64 *data)
+{
+ int num_values = num_cpus + 1;
+ int crr = 0, j;
+
+ /* update current CPU's stats and also add them to the total values */
+ data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
+ data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
+
+ for (j = 0; j < DPAA_BPS_NUM; j++) {
+ data[crr * num_values + crr_cpu] = bp_count[j];
+ data[crr++ * num_values + num_cpus] += bp_count[j];
+ }
+}
+
+static void dpaa_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct dpaa_rx_errors rx_errors;
+ unsigned int num_cpus, offset;
+ struct dpaa_ern_cnt ern_cnt;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
+ int total_stats, i, j;
+ bool cg_status;
+
+ total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
+ priv = netdev_priv(net_dev);
+ num_cpus = num_online_cpus();
+
+ memset(&bp_count, 0, sizeof(bp_count));
+ memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
+ memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
+ memset(data, 0, total_stats * sizeof(u64));
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ for (j = 0; j < DPAA_BPS_NUM; j++) {
+ dpaa_bp = priv->dpaa_bps[j];
+ if (!dpaa_bp->percpu_count)
+ continue;
+ bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
+ }
+ rx_errors.dme += percpu_priv->rx_errors.dme;
+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
+ rx_errors.fse += percpu_priv->rx_errors.fse;
+ rx_errors.phe += percpu_priv->rx_errors.phe;
+
+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
+
+ copy_stats(percpu_priv, num_cpus, i, bp_count, data);
+ }
+
+ offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
+ memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
+
+ offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
+ memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
+
+ /* gather congestion related counters */
+ cg_num = 0;
+ cg_status = 0;
+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
+ if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
+ cg_num = priv->cgr_data.cgr_congested_count;
+
+ /* reset congestion stats (like QMan API does */
+ priv->cgr_data.congested_jiffies = 0;
+ priv->cgr_data.cgr_congested_count = 0;
+ }
+
+ offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
+ data[offset++] = cg_time;
+ data[offset++] = cg_num;
+ data[offset++] = cg_status;
+}
+
+static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
+ u8 *data)
+{
+ unsigned int i, j, num_cpus, size;
+ char string_cpu[ETH_GSTRING_LEN];
+ u8 *strings;
+
+ memset(string_cpu, 0, sizeof(string_cpu));
+ strings = data;
+ num_cpus = num_online_cpus();
+ size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+
+ for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
+ dpaa_stats_percpu[i], j);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
+ dpaa_stats_percpu[i]);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < DPAA_BPS_NUM; i++) {
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN,
+ "bpool %c [CPU %d]", 'a' + i, j);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
+ 'a' + i);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ memcpy(strings, dpaa_stats_global, size);
+}
+
+const struct ethtool_ops dpaa_ethtool_ops = {
+ .get_settings = dpaa_get_settings,
+ .set_settings = dpaa_set_settings,
+ .get_drvinfo = dpaa_get_drvinfo,
+ .get_msglevel = dpaa_get_msglevel,
+ .set_msglevel = dpaa_set_msglevel,
+ .nway_reset = dpaa_nway_reset,
+ .get_pauseparam = dpaa_get_pauseparam,
+ .set_pauseparam = dpaa_set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_sset_count = dpaa_get_sset_count,
+ .get_ethtool_stats = dpaa_get_ethtool_stats,
+ .get_strings = dpaa_get_strings,
+};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 12aef1b15356..38160c2bebcb 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1841,11 +1841,11 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ahb);
if (ret)
return ret;
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
- goto failed_clk_enet_out;
- }
+
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
ret = clk_prepare_enable(fep->clk_ptp);
@@ -1857,23 +1857,20 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
mutex_unlock(&fep->ptp_clk_mutex);
}
- if (fep->clk_ref) {
- ret = clk_prepare_enable(fep->clk_ref);
- if (ret)
- goto failed_clk_ref;
- }
+
+ ret = clk_prepare_enable(fep->clk_ref);
+ if (ret)
+ goto failed_clk_ref;
} else {
clk_disable_unprepare(fep->clk_ahb);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
+ clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
clk_disable_unprepare(fep->clk_ptp);
fep->ptp_clk_on = false;
mutex_unlock(&fep->ptp_clk_mutex);
}
- if (fep->clk_ref)
- clk_disable_unprepare(fep->clk_ref);
+ clk_disable_unprepare(fep->clk_ref);
}
return 0;
@@ -2365,16 +2362,6 @@ static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
}
#endif /* !defined(CONFIG_M5272) */
-static int fec_enet_nway_reset(struct net_device *dev)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return -ENODEV;
-
- return genphy_restart_aneg(phydev);
-}
-
/* ITR clock source is enet system clock (clk_ahb).
* TCTT unit is cycle_ns * 64 cycle
* So, the ICTT value = X us / (cycle_ns * 64)
@@ -2574,7 +2561,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_drvinfo = fec_enet_get_drvinfo,
.get_regs_len = fec_enet_get_regs_len,
.get_regs = fec_enet_get_regs,
- .nway_reset = fec_enet_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = fec_enet_get_coalesce,
.set_coalesce = fec_enet_set_coalesce,
@@ -3075,7 +3062,6 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_stop = fec_enet_close,
.ndo_start_xmit = fec_enet_start_xmit,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 446ae9d60c71..aa8cf5d2a53c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -802,7 +802,6 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mpc52xx_fec_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mpc52xx_fec_tx_timeout,
.ndo_get_stats = mpc52xx_fec_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 79b7c84b7869..dc0850b3b517 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -1,6 +1,6 @@
config FSL_FMAN
tristate "FMan support"
- depends on FSL_SOC || COMPILE_TEST
+ depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
default n
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index dafd9e1baba2..f60845f0c6ca 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1890,6 +1890,7 @@ static int fman_reset(struct fman *fman)
goto _return;
} else {
+#ifdef CONFIG_PPC
struct device_node *guts_node;
struct ccsr_guts __iomem *guts_regs;
u32 devdisr2, reg;
@@ -1921,6 +1922,7 @@ static int fman_reset(struct fman *fman)
/* Enable all MACs */
iowrite32be(reg, &guts_regs->devdisr2);
+#endif
/* Perform FMan reset */
iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
@@ -1932,25 +1934,31 @@ static int fman_reset(struct fman *fman)
} while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
FPM_RSTC_FM_RESET) && --count);
if (count == 0) {
+#ifdef CONFIG_PPC
iounmap(guts_regs);
of_node_put(guts_node);
+#endif
err = -EBUSY;
goto _return;
}
+#ifdef CONFIG_PPC
/* Restore devdisr2 value */
iowrite32be(devdisr2, &guts_regs->devdisr2);
iounmap(guts_regs);
of_node_put(guts_node);
+#endif
goto _return;
+#ifdef CONFIG_PPC
guts_regs:
of_node_put(guts_node);
guts_node:
dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
__func__);
+#endif
}
_return:
return err;
@@ -2868,6 +2876,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dev = &of_dev->dev;
+ err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+ __func__);
+ goto fman_free;
+ }
+
return fman;
fman_node_put:
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 736db9d9b0ad..0b31f8502ada 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -594,6 +594,7 @@ static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
[PHY_INTERFACE_MODE_XGMII] = SPEED_10000
};
@@ -879,13 +880,17 @@ static int mac_probe(struct platform_device *_of_dev)
priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
GFP_KERNEL);
- if (!priv->fixed_link)
+ if (!priv->fixed_link) {
+ err = -ENOMEM;
goto _return_dev_set_drvdata;
+ }
priv->phy_node = of_node_get(mac_node);
phy = of_phy_find_device(priv->phy_node);
- if (!phy)
+ if (!phy) {
+ err = -EINVAL;
goto _return_dev_set_drvdata;
+ }
priv->fixed_link->link = phy->link;
priv->fixed_link->speed = phy->speed;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 4b86260584a0..d9f3a480ca1b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -118,22 +118,22 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
if (sc & BD_ENET_TX_HB) /* No heartbeat */
- fep->stats.tx_heartbeat_errors++;
+ dev->stats.tx_heartbeat_errors++;
if (sc & BD_ENET_TX_LC) /* Late collision */
- fep->stats.tx_window_errors++;
+ dev->stats.tx_window_errors++;
if (sc & BD_ENET_TX_RL) /* Retrans limit */
- fep->stats.tx_aborted_errors++;
+ dev->stats.tx_aborted_errors++;
if (sc & BD_ENET_TX_UN) /* Underrun */
- fep->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
if (sc & BD_ENET_TX_CSL) /* Carrier lost */
- fep->stats.tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
- fep->stats.tx_errors++;
+ dev->stats.tx_errors++;
do_restart = 1;
}
} else
- fep->stats.tx_packets++;
+ dev->stats.tx_packets++;
if (sc & BD_ENET_TX_READY) {
dev_warn(fep->dev,
@@ -145,7 +145,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
* but we eventually sent the packet OK.
*/
if (sc & BD_ENET_TX_DEF)
- fep->stats.collisions++;
+ dev->stats.collisions++;
/* unmap */
if (fep->mapped_as_page[dirtyidx])
@@ -212,19 +212,19 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
*/
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
- fep->stats.rx_errors++;
+ dev->stats.rx_errors++;
/* Frame too long or too short. */
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
- fep->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
/* Frame alignment */
if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
- fep->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
/* CRC Error */
if (sc & BD_ENET_RX_CR)
- fep->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
/* FIFO overrun */
if (sc & BD_ENET_RX_OV)
- fep->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
skbn = fep->rx_skbuff[curidx];
} else {
@@ -233,9 +233,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
/*
* Process the incoming frame.
*/
- fep->stats.rx_packets++;
+ dev->stats.rx_packets++;
pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
- fep->stats.rx_bytes += pkt_len + 4;
+ dev->stats.rx_bytes += pkt_len + 4;
if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */
@@ -277,7 +277,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
received++;
netif_receive_skb(skb);
} else {
- fep->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
skbn = skb;
}
}
@@ -543,7 +543,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
curidx = bdp - fep->tx_bd_base;
len = skb->len;
- fep->stats.tx_bytes += len;
+ dev->stats.tx_bytes += len;
if (nr_frags)
len -= skb->data_len;
fep->tx_free -= nr_frags + 1;
@@ -619,7 +619,7 @@ static void fs_timeout(struct net_device *dev)
unsigned long flags;
int wake = 0;
- fep->stats.tx_errors++;
+ dev->stats.tx_errors++;
spin_lock_irqsave(&fep->lock, flags);
@@ -774,12 +774,6 @@ static int fs_enet_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- return &fep->stats;
-}
-
/*************************************************************************/
static void fs_get_drvinfo(struct net_device *dev,
@@ -813,11 +807,6 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 0;
}
-static int fs_nway_reset(struct net_device *dev)
-{
- return 0;
-}
-
static u32 fs_get_msglevel(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -871,7 +860,7 @@ static int fs_set_tunable(struct net_device *dev,
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
- .nway_reset = fs_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = fs_get_msglevel,
.set_msglevel = fs_set_msglevel,
@@ -905,14 +894,12 @@ extern void fs_mii_disconnect(struct net_device *dev);
static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_open = fs_enet_open,
.ndo_stop = fs_enet_close,
- .ndo_get_stats = fs_enet_get_stats,
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
.ndo_do_ioctl = fs_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = fs_enet_netpoll,
#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index fee24c822fad..5ce516c8a62a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -137,7 +137,6 @@ struct fs_enet_private {
cbd_t __iomem *cur_rx;
cbd_t __iomem *cur_tx;
int tx_free;
- struct net_device_stats stats;
struct timer_list phy_timer_list;
const struct phy_info *phy;
u32 msg_enable;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9061c2f82b9c..756f7e763d5f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1339,7 +1339,10 @@ static int gfar_probe(struct platform_device *ofdev)
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
+ /* MTU range: 50 - 9586 */
dev->mtu = 1500;
+ dev->min_mtu = 50;
+ dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
dev->netdev_ops = &gfar_netdev_ops;
dev->ethtool_ops = &gfar_ethtool_ops;
@@ -2600,12 +2603,6 @@ static int gfar_set_mac_address(struct net_device *dev)
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
struct gfar_private *priv = netdev_priv(dev);
- int frame_size = new_mtu + ETH_HLEN;
-
- if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
- netif_err(priv, drv, dev, "Invalid MTU setting\n");
- return -EINVAL;
- }
while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
cpu_relax();
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 57798814160d..721be13081f9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -72,7 +72,7 @@ struct gianfar_ptp_registers {
/* Bit definitions for the TMR_CTRL register */
#define ALM1P (1<<31) /* Alarm1 output polarity */
#define ALM2P (1<<30) /* Alarm2 output polarity */
-#define FS (1<<28) /* FIPER start indication */
+#define FIPERST (1<<28) /* FIPER start indication */
#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
@@ -280,21 +280,26 @@ static irqreturn_t isr(int irq, void *priv)
* PTP clock operations
*/
-static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int ptp_gianfar_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- u64 adj;
- u32 diff, tmr_add;
+ u64 adj, diff;
+ u32 tmr_add;
int neg_adj = 0;
struct etsects *etsects = container_of(ptp, struct etsects, caps);
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
tmr_add = etsects->tmr_add;
adj = tmr_add;
- adj *= ppb;
- diff = div_u64(adj, 1000000000ULL);
+
+ /* calculate diff as adj*(scaled_ppm/65536)/1000000
+ * and round() to the nearest integer
+ */
+ adj *= scaled_ppm;
+ diff = div_u64(adj, 8000000);
+ diff = (diff >> 13) + ((diff >> 12) & 1);
tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
@@ -415,7 +420,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.n_per_out = 0,
.n_pins = 0,
.pps = 1,
- .adjfreq = ptp_gianfar_adjfreq,
+ .adjfine = ptp_gianfar_adjfine,
.adjtime = ptp_gianfar_adjtime,
.gettime64 = ptp_gianfar_gettime,
.settime64 = ptp_gianfar_settime,
@@ -502,7 +507,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
set_alarm(etsects);
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
+ gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
spin_unlock_irqrestore(&etsects->lock, flags);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index f76d33279454..53c5fcf1436c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3681,7 +3681,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
.ndo_do_ioctl = ucc_geth_ioctl,
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 812a968a78e9..8ba636f61b50 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -332,13 +332,6 @@ static void uec_get_ethtool_stats(struct net_device *netdev,
}
}
-static int uec_nway_reset(struct net_device *netdev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(netdev);
-
- return phy_start_aneg(ugeth->phydev);
-}
-
/* Report driver information */
static void
uec_get_drvinfo(struct net_device *netdev,
@@ -394,7 +387,7 @@ static const struct ethtool_ops uec_ethtool_ops = {
.get_regs = uec_get_regs,
.get_msglevel = uec_get_msglevel,
.set_msglevel = uec_set_msglevel,
- .nway_reset = uec_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = uec_get_ringparam,
.set_ringparam = uec_set_ringparam,
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 399cfd217288..51c4abc51bf4 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -225,7 +225,6 @@ static const struct net_device_ops fjn_netdev_ops = {
.ndo_tx_timeout = fjn_tx_timeout,
.ndo_set_config = fjn_config,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 39778892b3b3..97b184774784 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -769,7 +769,6 @@ static const struct net_device_ops hip04_netdev_ops = {
.ndo_set_mac_address = hip04_set_mac_address,
.ndo_tx_timeout = hip04_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
@@ -829,6 +828,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(d, res);
@@ -898,14 +898,12 @@ static int hip04_mac_probe(struct platform_device *pdev)
INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
- ether_setup(ndev);
ndev->netdev_ops = &hip04_netdev_ops;
ndev->ethtool_ops = &hip04_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
- SET_NETDEV_DEV(ndev, &pdev->dev);
hip04_reset_ppe(priv);
if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index ced185962ef8..979852d56f31 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -712,7 +712,6 @@ static const struct net_device_ops hisi_femac_netdev_ops = {
.ndo_do_ioctl = hisi_femac_net_ioctl,
.ndo_set_mac_address = hisi_femac_set_mac_address,
.ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
};
static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
@@ -806,6 +805,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
priv->dev = dev;
@@ -883,7 +883,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->netdev_ops = &hisi_femac_netdev_ops;
ndev->ethtool_ops = &hisi_femac_ethtools_ops;
netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
- SET_NETDEV_DEV(ndev, &pdev->dev);
hisi_femac_port_init(priv);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e69a6bed31a9..418ca1f3774a 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -11,8 +11,10 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/circ_buf.h>
@@ -183,12 +185,28 @@
#define DESC_DATA_LEN_OFF 16
#define DESC_BUFF_LEN_OFF 0
#define DESC_DATA_MASK 0x7ff
+#define DESC_SG BIT(30)
+#define DESC_FRAGS_NUM_OFF 11
/* DMA descriptor ring helpers */
#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
#define dma_cnt(n) ((n) >> 5)
#define dma_byte(n) ((n) << 5)
+#define HW_CAP_TSO BIT(0)
+#define GEMAC_V1 0
+#define GEMAC_V2 (GEMAC_V1 | HW_CAP_TSO)
+#define HAS_CAP_TSO(hw_cap) ((hw_cap) & HW_CAP_TSO)
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
struct hix5hd2_desc {
__le32 buff_addr;
__le32 cmd;
@@ -201,6 +219,27 @@ struct hix5hd2_desc_sw {
unsigned int size;
};
+struct hix5hd2_sg_desc_ring {
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+};
+
+struct frags_info {
+ __le32 addr;
+ __le32 size;
+};
+
+/* hardware supported max skb frags num */
+#define SG_MAX_SKB_FRAGS 17
+struct sg_desc {
+ __le32 total_len;
+ __le32 resvd0;
+ __le32 linear_addr;
+ __le32 linear_len;
+ /* reserve one more frags for memory alignment */
+ struct frags_info frags[SG_MAX_SKB_FRAGS + 1];
+};
+
#define QUEUE_NUMS 4
struct hix5hd2_priv {
struct hix5hd2_desc_sw pool[QUEUE_NUMS];
@@ -208,6 +247,7 @@ struct hix5hd2_priv {
#define rx_bq pool[1]
#define tx_bq pool[2]
#define tx_rq pool[3]
+ struct hix5hd2_sg_desc_ring tx_ring;
void __iomem *base;
void __iomem *ctrl_base;
@@ -221,15 +261,30 @@ struct hix5hd2_priv {
struct device_node *phy_node;
phy_interface_t phy_mode;
+ unsigned long hw_cap;
unsigned int speed;
unsigned int duplex;
- struct clk *clk;
+ struct clk *mac_core_clk;
+ struct clk *mac_ifc_clk;
+ struct reset_control *mac_core_rst;
+ struct reset_control *mac_ifc_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
struct mii_bus *bus;
struct napi_struct napi;
struct work_struct tx_timeout_task;
};
+static inline void hix5hd2_mac_interface_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_ifc_rst)
+ return;
+
+ reset_control_assert(priv->mac_ifc_rst);
+ reset_control_deassert(priv->mac_ifc_rst);
+}
+
static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
@@ -262,6 +317,7 @@ static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
if (duplex)
val |= GMAC_FULL_DUPLEX;
writel_relaxed(val, priv->ctrl_base);
+ hix5hd2_mac_interface_reset(priv);
writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
if (speed == SPEED_1000)
@@ -511,6 +567,27 @@ next:
return num;
}
+static void hix5hd2_clean_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ u32 len;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ addr = le32_to_cpu(desc->linear_addr);
+ len = le32_to_cpu(desc->linear_len);
+ dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ addr = le32_to_cpu(desc->frags[i].addr);
+ len = le32_to_cpu(desc->frags[i].size);
+ dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
static void hix5hd2_xmit_reclaim(struct net_device *dev)
{
struct sk_buff *skb;
@@ -538,8 +615,15 @@ static void hix5hd2_xmit_reclaim(struct net_device *dev)
pkts_compl++;
bytes_compl += skb->len;
desc = priv->tx_rq.desc + pos;
- addr = le32_to_cpu(desc->buff_addr);
- dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+
+ if (skb_shinfo(skb)->nr_frags) {
+ hix5hd2_clean_sg_desc(priv, skb, pos);
+ } else {
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len,
+ DMA_TO_DEVICE);
+ }
+
priv->tx_skb[pos] = NULL;
dev_consume_skb_any(skb);
pos = dma_ring_incr(pos, TX_DESC_NUM);
@@ -600,12 +684,66 @@ static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static u32 hix5hd2_get_desc_cmd(struct sk_buff *skb, unsigned long hw_cap)
+{
+ u32 cmd = 0;
+
+ if (HAS_CAP_TSO(hw_cap)) {
+ if (skb_shinfo(skb)->nr_frags)
+ cmd |= DESC_SG;
+ cmd |= skb_shinfo(skb)->nr_frags << DESC_FRAGS_NUM_OFF;
+ } else {
+ cmd |= DESC_FL_FULL |
+ ((skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
+ }
+
+ cmd |= (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF;
+ cmd |= DESC_VLD_BUSY;
+
+ return cmd;
+}
+
+static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ int ret;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ desc->total_len = cpu_to_le32(skb->len);
+ addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr)))
+ return -EINVAL;
+ desc->linear_addr = cpu_to_le32(addr);
+ desc->linear_len = cpu_to_le32(skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = frag->size;
+
+ addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(priv->dev, addr);
+ if (unlikely(ret))
+ return -EINVAL;
+ desc->frags[i].addr = cpu_to_le32(addr);
+ desc->frags[i].size = cpu_to_le32(len);
+ }
+
+ return 0;
+}
+
static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
struct hix5hd2_desc *desc;
dma_addr_t addr;
u32 pos;
+ u32 cmd;
+ int ret;
/* software write pointer */
pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
@@ -616,18 +754,31 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, addr)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
desc = priv->tx_bq.desc + pos;
+
+ cmd = hix5hd2_get_desc_cmd(skb, priv->hw_cap);
+ desc->cmd = cpu_to_le32(cmd);
+
+ if (skb_shinfo(skb)->nr_frags) {
+ ret = hix5hd2_fill_sg_desc(priv, skb, pos);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ addr = priv->tx_ring.phys_addr + pos * sizeof(struct sg_desc);
+ } else {
+ addr = dma_map_single(priv->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
desc->buff_addr = cpu_to_le32(addr);
+
priv->tx_skb[pos] = skb;
- desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
- (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
- (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
/* ensure desc updated */
wmb();
@@ -681,16 +832,26 @@ static int hix5hd2_net_open(struct net_device *dev)
struct phy_device *phy;
int ret;
- ret = clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->mac_core_clk);
+ if (ret < 0) {
+ netdev_err(dev, "failed to enable mac core clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
if (ret < 0) {
- netdev_err(dev, "failed to enable clk %d\n", ret);
+ clk_disable_unprepare(priv->mac_core_clk);
+ netdev_err(dev, "failed to enable mac ifc clk %d\n", ret);
return ret;
}
phy = of_phy_connect(dev, priv->phy_node,
&hix5hd2_adjust_link, 0, priv->phy_mode);
- if (!phy)
+ if (!phy) {
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
return -ENODEV;
+ }
phy_start(phy);
hix5hd2_hw_init(priv);
@@ -721,7 +882,8 @@ static int hix5hd2_net_close(struct net_device *dev)
phy_disconnect(dev->phydev);
}
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
return 0;
}
@@ -862,10 +1024,82 @@ error_free_pool:
return -ENOMEM;
}
+static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+
+ desc = (struct sg_desc *)dma_alloc_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ &phys_addr, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ priv->tx_ring.desc = desc;
+ priv->tx_ring.phys_addr = phys_addr;
+
+ return 0;
+}
+
+static void hix5hd2_destroy_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ if (priv->tx_ring.desc) {
+ dma_free_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ priv->tx_ring.desc, priv->tx_ring.phys_addr);
+ priv->tx_ring.desc = NULL;
+ }
+}
+
+static inline void hix5hd2_mac_core_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_core_rst)
+ return;
+
+ reset_control_assert(priv->mac_core_rst);
+ reset_control_deassert(priv->mac_core_rst);
+}
+
+static void hix5hd2_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hix5hd2_phy_reset(struct hix5hd2_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hix5hd2_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hix5hd2_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hix5hd2_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static const struct of_device_id hix5hd2_of_match[];
+
static int hix5hd2_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
+ const struct of_device_id *of_id = NULL;
struct net_device *ndev;
struct hix5hd2_priv *priv;
struct resource *res;
@@ -883,6 +1117,13 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
priv->dev = dev;
priv->netdev = ndev;
+ of_id = of_match_device(hix5hd2_of_match, dev);
+ if (!of_id) {
+ ret = -EINVAL;
+ goto out_free_netdev;
+ }
+ priv->hw_cap = (unsigned long)of_id->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->base)) {
@@ -897,23 +1138,55 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto out_free_netdev;
}
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- netdev_err(ndev, "failed to get clk\n");
+ priv->mac_core_clk = devm_clk_get(&pdev->dev, "mac_core");
+ if (IS_ERR(priv->mac_core_clk)) {
+ netdev_err(ndev, "failed to get mac core clk\n");
ret = -ENODEV;
goto out_free_netdev;
}
- ret = clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->mac_core_clk);
if (ret < 0) {
- netdev_err(ndev, "failed to enable clk %d\n", ret);
+ netdev_err(ndev, "failed to enable mac core clk %d\n", ret);
goto out_free_netdev;
}
+ priv->mac_ifc_clk = devm_clk_get(&pdev->dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_clk))
+ priv->mac_ifc_clk = NULL;
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable mac ifc clk %d\n", ret);
+ goto out_disable_mac_core_clk;
+ }
+
+ priv->mac_core_rst = devm_reset_control_get(dev, "mac_core");
+ if (IS_ERR(priv->mac_core_rst))
+ priv->mac_core_rst = NULL;
+ hix5hd2_mac_core_reset(priv);
+
+ priv->mac_ifc_rst = devm_reset_control_get(dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_rst))
+ priv->mac_ifc_rst = NULL;
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hix5hd2_phy_reset(priv);
+ }
+
bus = mdiobus_alloc();
if (bus == NULL) {
ret = -ENOMEM;
- goto out_free_netdev;
+ goto out_disable_clk;
}
bus->priv = priv;
@@ -972,22 +1245,38 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
ndev->ethtool_ops = &hix5hd2_ethtools_ops;
SET_NETDEV_DEV(ndev, dev);
+ if (HAS_CAP_TSO(priv->hw_cap))
+ ndev->hw_features |= NETIF_F_SG;
+
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->vlan_features |= ndev->features;
+
ret = hix5hd2_init_hw_desc_queue(priv);
if (ret)
goto out_phy_node;
netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+
+ if (HAS_CAP_TSO(priv->hw_cap)) {
+ ret = hix5hd2_init_sg_desc_queue(priv);
+ if (ret)
+ goto out_destroy_queue;
+ }
+
ret = register_netdev(priv->netdev);
if (ret) {
netdev_err(ndev, "register_netdev failed!");
goto out_destroy_queue;
}
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
return ret;
out_destroy_queue:
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
netif_napi_del(&priv->napi);
hix5hd2_destroy_hw_desc_queue(priv);
out_phy_node:
@@ -996,6 +1285,10 @@ err_mdiobus:
mdiobus_unregister(bus);
err_free_mdio:
mdiobus_free(bus);
+out_disable_clk:
+ clk_disable_unprepare(priv->mac_ifc_clk);
+out_disable_mac_core_clk:
+ clk_disable_unprepare(priv->mac_core_clk);
out_free_netdev:
free_netdev(ndev);
@@ -1012,6 +1305,8 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
mdiobus_unregister(priv->bus);
mdiobus_free(priv->bus);
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
hix5hd2_destroy_hw_desc_queue(priv);
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
@@ -1021,7 +1316,11 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id hix5hd2_of_match[] = {
- {.compatible = "hisilicon,hix5hd2-gmac",},
+ { .compatible = "hisilicon,hisi-gmac-v1", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hisi-gmac-v2", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hix5hd2-gmac", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hi3798cv200-gmac", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hi3516a-gmac", .data = (void *)GEMAC_V2 },
{},
};
@@ -1029,7 +1328,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
static struct platform_driver hix5hd2_dev_driver = {
.driver = {
- .name = "hix5hd2-gmac",
+ .name = "hisi-gmac",
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
@@ -1038,6 +1337,6 @@ static struct platform_driver hix5hd2_dev_driver = {
module_platform_driver(hix5hd2_dev_driver);
-MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
+MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hix5hd2-gmac");
+MODULE_ALIAS("platform:hisi-gmac");
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e093cbf26c8c..8016854796fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -99,6 +99,8 @@ enum hnae_led_state {
#define HNS_RX_FLAG_L3ID_IPV6 0x1
#define HNS_RX_FLAG_L4ID_UDP 0x0
#define HNS_RX_FLAG_L4ID_TCP 0x1
+#define HNS_RX_FLAG_L4ID_SCTP 0x3
+
#define HNS_TXD_ASID_S 0
#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
@@ -426,8 +428,14 @@ enum hnae_media_type {
* get mac address
* set_mac_addr()
* set mac address
+ * clr_mc_addr()
+ * clear mcast tcam table
* set_mc_addr()
* set multicast mode
+ * add_uc_addr()
+ * add ucast address
+ * rm_uc_addr()
+ * remove ucast address
* set_mtu()
* set mtu
* update_stats()
@@ -488,6 +496,11 @@ struct hnae_ae_ops {
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*add_uc_addr)(struct hnae_handle *handle,
+ const unsigned char *addr);
+ int (*rm_uc_addr)(struct hnae_handle *handle,
+ const unsigned char *addr);
+ int (*clr_mc_addr)(struct hnae_handle *handle);
int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
void (*set_tso_stats)(struct hnae_handle *handle, int enable);
@@ -590,7 +603,7 @@ static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
if (ret)
return ret;
- ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
return 0;
}
@@ -621,14 +634,14 @@ static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
bops->unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
- ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
}
static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
- ring->desc[i].addr = (__le64)(ring->desc_cb[i].dma
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
+ ring->desc_cb[i].page_offset);
ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 2d0cb609adc3..0a9cdf00b31a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -18,9 +18,6 @@
#include "hns_dsaf_rcb.h"
#define AE_NAME_PORT_ID_IDX 6
-#define ETH_STATIC_REG 1
-#define ETH_DUMP_REG 5
-#define ETH_GSTRING_LEN 32
static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
{
@@ -202,6 +199,28 @@ static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
return 0;
}
+static int hns_ae_add_uc_address(struct hnae_handle *handle,
+ const unsigned char *addr)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return -ENOSPC;
+
+ return hns_mac_add_uc_addr(mac_cb, handle->vf_id, addr);
+}
+
+static int hns_ae_rm_uc_address(struct hnae_handle *handle,
+ const unsigned char *addr)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return -ENOSPC;
+
+ return hns_mac_rm_uc_addr(mac_cb, handle->vf_id, addr);
+}
+
static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
{
int ret;
@@ -235,6 +254,16 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
return ret;
}
+static int hns_ae_clr_multicast(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return 0;
+
+ return hns_mac_clr_multicast(mac_cb, handle->vf_id);
+}
+
static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
@@ -823,7 +852,10 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_coalesce_range = hns_ae_get_coalesce_range,
.set_promisc_mode = hns_ae_set_promisc_mode,
.set_mac_addr = hns_ae_set_mac_address,
+ .add_uc_addr = hns_ae_add_uc_address,
+ .rm_uc_addr = hns_ae_rm_uc_address,
.set_mc_addr = hns_ae_set_multicast_one,
+ .clr_mc_addr = hns_ae_clr_multicast,
.set_mtu = hns_ae_set_mtu,
.update_stats = hns_ae_update_stats,
.set_tso_stats = hns_ae_set_tso_stats,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 1e1eb92998fb..3382441fe7b5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -37,8 +37,8 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
{"gmac_rx_very_long_err", MAC_STATS_FIELD_OFF(rx_long_err)},
{"gmac_rx_runt_err", MAC_STATS_FIELD_OFF(rx_minto64)},
{"gmac_rx_short_err", MAC_STATS_FIELD_OFF(rx_under_min)},
- {"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
- {"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+ {"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+ {"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
{"gmac_rx_overrun_cnt", MAC_STATS_FIELD_OFF(rx_fifo_overrun_err)},
{"gmac_rx_length_err", MAC_STATS_FIELD_OFF(rx_len_err)},
{"gmac_rx_fail_comma", MAC_STATS_FIELD_OFF(rx_comma_err)},
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index ec8c738af726..3239d27143b9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -263,6 +263,46 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
return 0;
}
+int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return -ENOSPC;
+
+ memset(&mac_entry, 0, sizeof(mac_entry));
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vf_id, &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ return hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+}
+
+int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return -ENOSPC;
+
+ memset(&mac_entry, 0, sizeof(mac_entry));
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vf_id, &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ return hns_dsaf_rm_mac_addr(dsaf_dev, &mac_entry);
+}
+
int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
u32 port_num, char *addr, bool enable)
{
@@ -330,13 +370,24 @@ int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
return 0;
}
+int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 port_num;
+ int ret = hns_mac_get_inner_port_num(mac_cb, vfn, &port_num);
+
+ if (ret)
+ return ret;
+
+ return hns_dsaf_clr_mac_mc_port(dsaf_dev, mac_cb->mac_id, port_num);
+}
+
static void hns_mac_param_get(struct mac_params *param,
struct hns_mac_cb *mac_cb)
{
param->vaddr = (void *)mac_cb->vaddr;
param->mac_mode = hns_get_enet_interface(mac_cb);
- memcpy(param->addr, mac_cb->addr_entry_idx[0].addr,
- MAC_NUM_OCTETS_PER_ADDR);
+ ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
param->mac_id = mac_cb->mac_id;
param->dev = mac_cb->dev;
}
@@ -353,8 +404,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
{
int ret;
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
- u8 addr[MAC_NUM_OCTETS_PER_ADDR]
- = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct dsaf_drv_mac_single_dest_entry mac_entry;
/* directy return ok in debug network mode */
@@ -389,8 +439,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
int ret;
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
u8 port_num;
- u8 addr[MAC_NUM_OCTETS_PER_ADDR]
- = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mac_entry_idx *uc_mac_entry;
struct dsaf_drv_mac_single_dest_entry mac_entry;
@@ -453,8 +502,7 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
max_frm = MAC_MAX_MTU_DBG;
- if ((new_mtu < MAC_MIN_MTU) || (new_frm > max_frm) ||
- (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
+ if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
return -EINVAL;
if (!drv->config_max_frame_length)
@@ -869,6 +917,13 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
}
}
+ if (fwnode_property_read_u8_array(mac_cb->fw_port, "mc-mac-mask",
+ mac_cb->mc_mask, ETH_ALEN)) {
+ dev_warn(mac_cb->dev,
+ "no mc-mac-mask property, set to default value.\n");
+ eth_broadcast_addr(mac_cb->mc_mask);
+ }
+
return 0;
}
@@ -1082,6 +1137,8 @@ void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ hns_dsaf_set_promisc_tcam(mac_cb->dsaf_dev, mac_cb->mac_id, !!en);
+
if (mac_ctrl_drv->set_promiscuous)
mac_ctrl_drv->set_promiscuous(mac_ctrl_drv, en);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index d3a1f72ece0e..2bb3d1e93c64 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -31,7 +31,7 @@ struct dsaf_device;
#define MAC_MIN_MTU 68
#define MAC_MAX_MTU_DBG MAC_DEFAULT_MTU
-#define MAC_DEFAULT_PAUSE_TIME 0xff
+#define MAC_DEFAULT_PAUSE_TIME 0xffff
#define MAC_GMAC_IDX 0
#define MAC_XGMAC_IDX 1
@@ -56,9 +56,6 @@ struct dsaf_device;
/*check mac addr multicast*/
#define MAC_IS_MULTICAST(p) ((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
-/**< Number of octets (8-bit bytes) in an ethernet address */
-#define MAC_NUM_OCTETS_PER_ADDR 6
-
struct mac_priv {
void *mac;
};
@@ -189,7 +186,7 @@ struct mac_statistics {
/*mac para struct ,mac get param from nic or dsaf when initialize*/
struct mac_params {
- char addr[MAC_NUM_OCTETS_PER_ADDR];
+ char addr[ETH_ALEN];
void *vaddr; /*virtual address*/
struct device *dev;
u8 mac_id;
@@ -214,7 +211,7 @@ struct mac_info {
};
struct mac_entry_idx {
- u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u8 addr[ETH_ALEN];
u16 vlan_id:12;
u16 valid:1;
u16 qos:3;
@@ -317,6 +314,7 @@ struct hns_mac_cb {
u8 __iomem *serdes_vaddr;
struct regmap *serdes_ctrl;
struct regmap *cpld_ctrl;
+ char mc_mask[ETH_ALEN];
u32 cpld_ctrl_reg;
u32 port_rst_off;
u32 port_mode_off;
@@ -409,7 +407,7 @@ struct mac_driver {
};
struct mac_stats_string {
- char desc[64];
+ char desc[ETH_GSTRING_LEN];
unsigned long offset;
};
@@ -463,5 +461,10 @@ int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
u8 vmid, u8 *port_num);
+int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr);
+int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr);
+int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 8ea3d95fa483..90dbda792614 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -591,6 +591,16 @@ static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
}
}
+static void hns_dsaf_tbl_tcam_match_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_L_REG,
+ ptbl_tcam_data->tbl_tcam_data_low);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_H_REG,
+ ptbl_tcam_data->tbl_tcam_data_high);
+}
+
/**
* hns_dsaf_tbl_tcam_data_cfg - tbl
* @dsaf_id: dsa fabric id
@@ -755,7 +765,7 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul(
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
{
- if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) && !HNS_DSAF_IS_DEBUG(dsaf_dev))
dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
DSAF_CFG_MIX_MODE_S, !!en);
}
@@ -894,15 +904,16 @@ static void hns_dsaf_tcam_uc_cfg(
}
/**
- * hns_dsaf_tcam_mc_cfg - INT
- * @dsaf_id: dsa fabric id
- * @address,
- * @ptbl_tcam_data,
- * @ptbl_tcam_mcast,
+ * hns_dsaf_tcam_mc_cfg - cfg the tcam for mc
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address: tcam index
+ * @ptbl_tcam_data: tcam data struct pointer
+ * @ptbl_tcam_mcast: tcam mask struct pointer, it must be null for HNSv1
*/
static void hns_dsaf_tcam_mc_cfg(
struct dsaf_device *dsaf_dev, u32 address,
struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_mask,
struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
{
spin_lock_bh(&dsaf_dev->tcam_lock);
@@ -913,7 +924,11 @@ static void hns_dsaf_tcam_mc_cfg(
hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
/*Write Tcam Mcast*/
hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
- /*Write Plus*/
+ /* Write Match Data */
+ if (ptbl_tcam_mask)
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, ptbl_tcam_mask);
+
+ /* Write Puls */
hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
spin_unlock_bh(&dsaf_dev->tcam_lock);
@@ -944,6 +959,16 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
spin_unlock_bh(&dsaf_dev->tcam_lock);
}
+void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
+{
+ addr[0] = mac_key->high.bits.mac_0;
+ addr[1] = mac_key->high.bits.mac_1;
+ addr[2] = mac_key->high.bits.mac_2;
+ addr[3] = mac_key->high.bits.mac_3;
+ addr[4] = mac_key->low.bits.mac_4;
+ addr[5] = mac_key->low.bits.mac_5;
+}
+
/**
* hns_dsaf_tcam_uc_get - INT
* @dsaf_id: dsa fabric id
@@ -1369,6 +1394,12 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
if (HNS_DSAF_IS_DEBUG(dsaf_dev))
return 0;
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ dsaf_dev->tcam_max_num = DSAF_TCAM_SUM;
+ else
+ dsaf_dev->tcam_max_num =
+ DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM;
+
spin_lock_init(&dsaf_dev->tcam_lock);
ret = hns_dsaf_init_hw(dsaf_dev);
if (ret)
@@ -1424,7 +1455,7 @@ static u16 hns_dsaf_find_soft_mac_entry(
u32 i;
soft_mac_entry = priv->soft_mac_tbl;
- for (i = 0; i < DSAF_TCAM_SUM; i++) {
+ for (i = 0; i < dsaf_dev->tcam_max_num; i++) {
/* invall tab entry */
if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) &&
(soft_mac_entry->tcam_key.high.val == mac_key->high.val) &&
@@ -1449,7 +1480,7 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
u32 i;
soft_mac_entry = priv->soft_mac_tbl;
- for (i = 0; i < DSAF_TCAM_SUM; i++) {
+ for (i = 0; i < dsaf_dev->tcam_max_num; i++) {
/* inv all entry */
if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
/* return find result --soft index */
@@ -1488,8 +1519,12 @@ static void hns_dsaf_set_mac_key(
mac_key->high.bits.mac_3 = addr[3];
mac_key->low.bits.mac_4 = addr[4];
mac_key->low.bits.mac_5 = addr[5];
- mac_key->low.bits.vlan = vlan_id;
- mac_key->low.bits.port = port;
+ dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
+ DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
+ dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S, port);
+
+ mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
}
/**
@@ -1507,6 +1542,7 @@ int hns_dsaf_set_mac_uc_entry(
struct dsaf_drv_priv *priv =
(struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_tbl_tcam_data tcam_data;
/* mac addr check */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
@@ -1548,9 +1584,10 @@ int hns_dsaf_set_mac_uc_entry(
/* default config dvc to 0 */
mac_data.tbl_ucast_dvc = 0;
mac_data.tbl_ucast_out_port = mac_entry->port_num;
- hns_dsaf_tcam_uc_cfg(
- dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+ tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
+ tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+
+ hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
/* config software entry */
soft_mac_entry += entry_index;
@@ -1561,6 +1598,55 @@ int hns_dsaf_set_mac_uc_entry(
return 0;
}
+int hns_dsaf_rm_mac_addr(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ /* mac addr check */
+ if (!is_valid_ether_addr(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "rm_uc_addr %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
+ return -EINVAL;
+ }
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* can not find the tcam entry, return 0 */
+ dev_info(dsaf_dev->dev,
+ "rm_uc_addr no tcam, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return 0;
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "rm_uc_addr, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ hns_dsaf_tcam_uc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key,
+ &mac_data);
+
+ /* unicast entry not used locally should not clear */
+ if (mac_entry->port_num != mac_data.tbl_ucast_out_port)
+ return -EFAULT;
+
+ return hns_dsaf_del_mac_entry(dsaf_dev,
+ mac_entry->in_vlan_id,
+ mac_entry->in_port_num,
+ mac_entry->addr);
+}
+
/**
* hns_dsaf_set_mac_mc_entry - set mac mc-entry
* @dsaf_dev: dsa fabric device struct pointer
@@ -1577,6 +1663,7 @@ int hns_dsaf_set_mac_mc_entry(
(struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+ struct dsaf_tbl_tcam_data tcam_data;
/* mac addr check */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
@@ -1609,9 +1696,12 @@ int hns_dsaf_set_mac_mc_entry(
0, sizeof(mac_data.tbl_mcast_port_msk));
} else {
/* config hardware entry */
- hns_dsaf_tcam_mc_get(
- dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
+ &mac_data);
+
+ tmp_mac_key.high.val =
+ le32_to_cpu(tcam_data.tbl_tcam_data_high);
+ tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
}
mac_data.tbl_mcast_old_en = 0;
mac_data.tbl_mcast_item_vld = 1;
@@ -1623,9 +1713,11 @@ int hns_dsaf_set_mac_mc_entry(
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- hns_dsaf_tcam_mc_cfg(
- dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+ tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
+ tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, NULL,
+ &mac_data);
/* config software entry */
soft_mac_entry += entry_index;
@@ -1636,6 +1728,16 @@ int hns_dsaf_set_mac_mc_entry(
return 0;
}
+static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
+{
+ u16 *a = (u16 *)dst;
+ const u16 *b = (const u16 *)src;
+
+ a[0] &= b[0];
+ a[1] &= b[1];
+ a[2] &= b[2];
+}
+
/**
* hns_dsaf_add_mac_mc_port - add mac mc-port
* @dsaf_dev: dsa fabric device struct pointer
@@ -1646,11 +1748,15 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
{
u16 entry_index = DSAF_INVALID_ENTRY_IDX;
struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_tbl_tcam_key mask_key;
+ struct dsaf_tbl_tcam_data *pmask_key = NULL;
struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_drv_priv *priv =
- (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+ struct dsaf_tbl_tcam_data tcam_data;
+ u8 mc_addr[ETH_ALEN];
+ u8 *mc_mask;
int mskid;
/*chechk mac addr */
@@ -1660,14 +1766,32 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
return -EINVAL;
}
+ ether_addr_copy(mc_addr, mac_entry->addr);
+ mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask;
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ /* prepare for key data setting */
+ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
+
+ /* config key mask */
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key,
+ 0x0,
+ 0xff,
+ mc_mask);
+
+ mask_key.high.val = le32_to_cpu(mask_key.high.val);
+ mask_key.low.val = le32_to_cpu(mask_key.low.val);
+
+ pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
+ }
+
/*config key */
hns_dsaf_set_mac_key(
dsaf_dev, &mac_key, mac_entry->in_vlan_id,
- mac_entry->in_port_num, mac_entry->addr);
+ mac_entry->in_port_num, mc_addr);
memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg));
- /*check exist? */
+ /* check if the tcam is exist */
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
/*if hasnot , find a empty*/
@@ -1681,11 +1805,15 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
return -EINVAL;
}
} else {
- /*if exist, add in */
- hns_dsaf_tcam_mc_get(
- dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+ /* if exist, add in */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
+ &mac_data);
+
+ tmp_mac_key.high.val =
+ le32_to_cpu(tcam_data.tbl_tcam_data_high);
+ tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
}
+
/* config hardware entry */
if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
mskid = mac_entry->port_num;
@@ -1708,9 +1836,12 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- hns_dsaf_tcam_mc_cfg(
- dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+ tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
+ tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+
+ /* config mc entry with mask */
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
+ pmask_key, &mac_data);
/*config software entry */
soft_mac_entry += entry_index;
@@ -1782,15 +1913,18 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
{
u16 entry_index = DSAF_INVALID_ENTRY_IDX;
struct dsaf_drv_tbl_tcam_key mac_key;
- struct dsaf_drv_priv *priv =
- (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
u16 vlan_id;
u8 in_port_num;
struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+ struct dsaf_tbl_tcam_data tcam_data;
int mskid;
const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
+ struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key;
+ struct dsaf_tbl_tcam_data *pmask_key = NULL;
+ u8 mc_addr[ETH_ALEN];
+ u8 *mc_mask;
if (!(void *)mac_entry) {
dev_err(dsaf_dev->dev,
@@ -1798,10 +1932,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
return -EINVAL;
}
- /*get key info*/
- vlan_id = mac_entry->in_vlan_id;
- in_port_num = mac_entry->in_port_num;
-
/*check mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
@@ -1809,11 +1939,31 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
return -EINVAL;
}
- /*config key */
- hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num,
- mac_entry->addr);
+ /* always mask vlan_id field */
+ ether_addr_copy(mc_addr, mac_entry->addr);
+ mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask;
+
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ /* prepare for key data setting */
+ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
+
+ /* config key mask */
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_addr);
+
+ mask_key.high.val = le32_to_cpu(mask_key.high.val);
+ mask_key.low.val = le32_to_cpu(mask_key.low.val);
- /*check is exist? */
+ pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
+ }
+
+ /* get key info */
+ vlan_id = mac_entry->in_vlan_id;
+ in_port_num = mac_entry->in_port_num;
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, mc_addr);
+
+ /* check if the tcam entry is exist */
entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
if (entry_index == DSAF_INVALID_ENTRY_IDX) {
/*find none */
@@ -1829,10 +1979,11 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- /*read entry*/
- hns_dsaf_tcam_mc_get(
- dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+ /* read entry */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
+ tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
/*del the port*/
if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
@@ -1857,15 +2008,87 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* del soft entry */
soft_mac_entry += entry_index;
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
- } else { /* not zer, just del port, updata*/
- hns_dsaf_tcam_mc_cfg(
- dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+ } else { /* not zero, just del port, update */
+ tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
+ tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
+ &tcam_data,
+ pmask_key, &mac_data);
}
return 0;
}
+int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id,
+ u8 port_num)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ int ret = 0, i;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
+ for (i = 0; i < DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM; i++) {
+ u8 addr[ETH_ALEN];
+ u8 port;
+
+ soft_mac_entry = priv->soft_mac_tbl + i;
+
+ hns_dsaf_tcam_addr_get(&soft_mac_entry->tcam_key, addr);
+ port = dsaf_get_field(
+ soft_mac_entry->tcam_key.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S);
+ /* check valid tcam mc entry */
+ if (soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX &&
+ port == mac_id &&
+ is_multicast_ether_addr(addr) &&
+ !is_broadcast_ether_addr(addr)) {
+ const u32 empty_msk[DSAF_PORT_MSK_NUM] = {0};
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ /* disable receiving of this multicast address for
+ * the VF.
+ */
+ ether_addr_copy(mac_entry.addr, addr);
+ mac_entry.in_vlan_id = dsaf_get_field(
+ soft_mac_entry->tcam_key.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_VLAN_M,
+ DSAF_TBL_TCAM_KEY_VLAN_S);
+ mac_entry.in_port_num = mac_id;
+ mac_entry.port_num = port_num;
+ if (hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry)) {
+ ret = -EINVAL;
+ continue;
+ }
+
+ /* disable receiving of this multicast address for
+ * the mac port if all VF are disable
+ */
+ hns_dsaf_tcam_mc_get(dsaf_dev, i,
+ (struct dsaf_tbl_tcam_data *)
+ (&soft_mac_entry->tcam_key),
+ &mac_data);
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mac_id / 32],
+ mac_id % 32, 0);
+ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+ sizeof(u32) * DSAF_PORT_MSK_NUM)) {
+ mac_entry.port_num = mac_id;
+ if (hns_dsaf_del_mac_mc_port(dsaf_dev,
+ &mac_entry)) {
+ ret = -EINVAL;
+ continue;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
/**
* hns_dsaf_get_mac_uc_entry - get mac uc entry
* @dsaf_dev: dsa fabric device struct pointer
@@ -1878,6 +2101,7 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_tbl_tcam_data tcam_data;
/* check macaddr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
@@ -1906,9 +2130,12 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- /*read entry*/
- hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+ /* read entry */
+ hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
+ mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
+
mac_entry->port_num = mac_data.tbl_ucast_out_port;
return 0;
@@ -1926,6 +2153,7 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_tbl_tcam_key mac_key;
struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_tbl_tcam_data tcam_data;
/*check mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
@@ -1955,8 +2183,10 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
mac_key.low.val, entry_index);
/*read entry */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
+ mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
return 0;
@@ -1976,9 +2206,10 @@ int hns_dsaf_get_mac_entry_by_index(
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
- char mac_addr[MAC_NUM_OCTETS_PER_ADDR] = {0};
+ struct dsaf_tbl_tcam_data tcam_data;
+ char mac_addr[ETH_ALEN] = {0};
- if (entry_index >= DSAF_TCAM_SUM) {
+ if (entry_index >= dsaf_dev->tcam_max_num) {
/* find none, del error */
dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
dsaf_dev->ae_dev.name);
@@ -1986,8 +2217,10 @@ int hns_dsaf_get_mac_entry_by_index(
}
/* mc entry, do read opt */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
+ mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
@@ -2004,9 +2237,12 @@ int hns_dsaf_get_mac_entry_by_index(
/**mc donot do*/
} else {
/*is not mc, just uc... */
- hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)&mac_key,
+ hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data,
&mac_uc_data);
+
+ mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
+ mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
+
mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
}
@@ -2670,6 +2906,59 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
+/* Reserve the last TCAM entry for promisc support */
+#define dsaf_promisc_tcam_entry(port) \
+ (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ u16 entry_index;
+ struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
+
+ if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return;
+
+ /* find the tcam entry index for promisc */
+ entry_index = dsaf_promisc_tcam_entry(port);
+
+ /* config key mask */
+ if (enable) {
+ memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
+ memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
+ dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S, port);
+ dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
+
+ /* SUB_QID */
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
+ DSAF_SERVICE_NW_NUM, true);
+ mac_data.tbl_mcast_item_vld = true; /* item_vld bit */
+ } else {
+ mac_data.tbl_mcast_item_vld = false; /* item_vld bit */
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
+ tbl_tcam_data.low.val, entry_index);
+
+ /* config promisc entry with mask */
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
+ (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
+ &mac_data);
+
+ /* config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
+}
+
/**
* dsaf_probe - probo dsaf dev
* @pdev: dasf platform device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index c494fc52be74..cef6bf46ae93 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -35,8 +35,6 @@ struct hns_mac_cb;
#define DSAF_CFG_READ_CNT 30
-#define MAC_NUM_OCTETS_PER_ADDR 6
-
#define DSAF_DUMP_REGS_NUM 504
#define DSAF_STATIC_NUM 28
#define DSAF_V2_STATIC_NUM 44
@@ -165,7 +163,7 @@ enum dsaf_mode {
/*mac entry, mc or uc entry*/
struct dsaf_drv_mac_single_dest_entry {
/* mac addr, match the entry*/
- u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u8 addr[ETH_ALEN];
u16 in_vlan_id; /* value of VlanId */
/* the vld input port num, dsaf-mode fix 0, */
@@ -179,7 +177,7 @@ struct dsaf_drv_mac_single_dest_entry {
/*only mc entry*/
struct dsaf_drv_mac_multi_dest_entry {
/* mac addr, match the entry*/
- u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u8 addr[ETH_ALEN];
u16 in_vlan_id;
/* this mac addr output port,*/
/* bit0-bit5 means Port0-Port5(1bit is vld)**/
@@ -308,8 +306,6 @@ struct dsaf_misc_op {
/* reset series function, it will be reset if the dereset is 0 */
void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
- void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
- bool dereset);
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
@@ -343,6 +339,7 @@ struct dsaf_device {
enum hal_dsaf_mode dsaf_en;
enum hal_dsaf_tc_mode dsaf_tc_mode;
u32 dsaf_ver;
+ u16 tcam_max_num; /* max TCAM entry for user except promisc */
struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
@@ -360,6 +357,11 @@ static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
return (void *)((u8 *)dsaf_dev + sizeof(*dsaf_dev));
}
+#define DSAF_TBL_TCAM_KEY_PORT_S 0
+#define DSAF_TBL_TCAM_KEY_PORT_M (((1ULL << 4) - 1) << 0)
+#define DSAF_TBL_TCAM_KEY_VLAN_S 4
+#define DSAF_TBL_TCAM_KEY_VLAN_M (((1ULL << 12) - 1) << 4)
+
struct dsaf_drv_tbl_tcam_key {
union {
struct {
@@ -373,11 +375,9 @@ struct dsaf_drv_tbl_tcam_key {
} high;
union {
struct {
- u32 port:4; /* port id, */
- /* dsaf-mode fixed 0, non-dsaf-mode port id*/
- u32 vlan:12; /* vlan id */
- u32 mac_5:8;
- u32 mac_4:8;
+ u16 port_vlan;
+ u8 mac_5;
+ u8 mac_4;
} bits;
u32 val;
@@ -461,10 +461,19 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port,
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable);
void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 *en);
int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 en);
+int hns_dsaf_rm_mac_addr(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+
+int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
+ u8 mac_id, u8 port_num);
+
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 67accce1d33d..a2c22d084ce9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -23,7 +23,6 @@ enum _dsm_op_index {
enum _dsm_rst_type {
HNS_DSAF_RESET_FUNC = 0x1,
HNS_PPE_RESET_FUNC = 0x2,
- HNS_XGE_CORE_RESET_FUNC = 0x3,
HNS_XGE_RESET_FUNC = 0x4,
HNS_GE_RESET_FUNC = 0x5,
HNS_DSAF_CHN_RESET_FUNC = 0x6,
@@ -213,26 +212,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
- u32 port, bool dereset)
-{
- u32 reg_val = 0;
- u32 reg_addr;
-
- if (port >= DSAF_XGE_NUM)
- return;
-
- reg_val |= XGMAC_TRX_CORE_SRST_M
- << dsaf_dev->mac_cb[port]->port_rst_off;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
-}
-
/**
* hns_dsaf_srst_chns - reset dsaf channels
* @dsaf_dev: dsaf device struct pointer
@@ -293,14 +272,6 @@ void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
HNS_ROCE_RESET_FUNC, 0, dereset);
}
-static void
-hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
- u32 port, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_XGE_CORE_RESET_FUNC, port, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -597,7 +568,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->dsaf_reset = hns_dsaf_rst;
misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
- misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port;
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
@@ -615,7 +585,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->dsaf_reset = hns_dsaf_rst_acpi;
misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
- misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi;
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 878950a42e6c..87226685f742 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -41,6 +41,9 @@
#define DSAF_SW_PORT_NUM 8
#define DSAF_TOTAL_QUEUE_NUM 129
+/* reserved a tcam entry for each port to support promisc by fuzzy match */
+#define DSAFV2_MAC_FUZZY_TCAM_NUM DSAF_MAX_PORT_NUM
+
#define DSAF_TCAM_SUM 512
#define DSAF_LINE_SUM (2048 * 14)
@@ -297,6 +300,8 @@
#define DSAF_TBL_LKUP_NUM_I_0_REG 0x50C0
#define DSAF_TBL_LKUP_NUM_O_0_REG 0x50E0
#define DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG 0x510C
+#define DSAF_TBL_TCAM_MATCH_CFG_H_REG 0x5130
+#define DSAF_TBL_TCAM_MATCH_CFG_L_REG 0x5134
#define DSAF_INODE_FIFO_WL_0_REG 0x6000
#define DSAF_ONODE_FIFO_WL_0_REG 0x6020
@@ -309,7 +314,6 @@
#define PPE_COM_INTEN_REG 0x110
#define PPE_COM_RINT_REG 0x114
#define PPE_COM_INTSTS_REG 0x118
-#define PPE_COM_COMMON_CNT_CLR_CE_REG 0x1120
#define PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG 0x300
#define PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG 0x600
#define PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG 0x900
@@ -698,8 +702,6 @@
#define XGMAC_RX_SYMBOLERRPKTS 0x0210
#define XGMAC_RX_FCSERRPKTS 0x0218
-#define XGMAC_TRX_CORE_SRST_M 0x2080
-
#define DSAF_SRAM_INIT_OVER_M 0xff
#define DSAFV2_SRAM_INIT_OVER_M 0x3ff
#define DSAF_SRAM_INIT_OVER_S 0
@@ -978,6 +980,11 @@
#define XGMAC_ENABLE_TX_B 0
#define XGMAC_ENABLE_RX_B 1
+#define XGMAC_UNIDIR_EN_B 0
+#define XGMAC_RF_TX_EN_B 1
+#define XGMAC_LF_RF_INSERT_S 2
+#define XGMAC_LF_RF_INSERT_M (0x3 << XGMAC_LF_RF_INSERT_S)
+
#define XGMAC_CTL_TX_FCS_B 0
#define XGMAC_CTL_TX_PAD_B 1
#define XGMAC_CTL_TX_PREAMBLE_TRANS_B 3
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 8f4f0e8da984..aae830a93050 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -108,6 +108,31 @@ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
}
/**
+ * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac
+ * @mac_drv: mac driver
+ * @mode: inserf rf or lf
+ */
+static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode)
+{
+ dsaf_set_dev_field(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG,
+ XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, mode);
+}
+
+/**
+ * hns_xgmac__lf_rf_control_init - initial the lf rf control register
+ * @mac_drv: mac driver
+ */
+static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
+{
+ u32 val = 0;
+
+ dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
+ dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
+ dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
+ dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+}
+
+/**
*hns_xgmac_enable - enable xgmac port
*@drv: mac driver
*@mode: mode of mac port
@@ -115,12 +140,8 @@ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
- struct dsaf_device *dsaf_dev
- = (struct dsaf_device *)dev_get_drvdata(drv->dev);
- u32 port = drv->mac_id;
- dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 1);
- mdelay(10);
+ hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_NO_LF_RF_INSERT);
/*enable XGE rX/tX */
if (mode == MAC_COMM_MODE_TX) {
@@ -143,9 +164,6 @@ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
- struct dsaf_device *dsaf_dev
- = (struct dsaf_device *)dev_get_drvdata(drv->dev);
- u32 port = drv->mac_id;
if (mode == MAC_COMM_MODE_TX) {
hns_xgmac_tx_enable(drv, 0);
@@ -155,9 +173,7 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
hns_xgmac_tx_enable(drv, 0);
hns_xgmac_rx_enable(drv, 0);
}
-
- mdelay(10);
- dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 0);
+ hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_LF_INSERT);
}
/**
@@ -203,6 +219,7 @@ static void hns_xgmac_init(void *mac_drv)
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
mdelay(100);
+ hns_xgmac_lf_rf_control_init(drv);
hns_xgmac_exc_irq_en(drv, 0);
hns_xgmac_pma_fec_enable(drv, 0x0, 0x0);
@@ -788,7 +805,7 @@ static int hns_xgmac_get_sset_count(int stringset)
*/
static int hns_xgmac_get_regs_count(void)
{
- return ETH_XGMAC_DUMP_NUM;
+ return HNS_XGMAC_DUMP_NUM;
}
void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
index 139f7297c7b4..da6c5343d3e1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
@@ -10,6 +10,7 @@
#ifndef _HNS_XGMAC_H
#define _HNS_XGMAC_H
-#define ETH_XGMAC_DUMP_NUM (214)
-
+#define HNS_XGMAC_DUMP_NUM 214
+#define HNS_XGMAC_NO_LF_RF_INSERT 0x0
+#define HNS_XGMAC_LF_INSERT 0x2
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index dff7b60345d8..672b64606321 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -22,6 +22,7 @@
#include "hnae.h"
#include "hns_enet.h"
+#include "hns_dsaf_mac.h"
#define NIC_MAX_Q_PER_VF 16
#define HNS_NIC_TX_TIMEOUT (5 * HZ)
@@ -565,6 +566,71 @@ static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
}
+static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb, u32 flag)
+{
+ struct net_device *netdev = ring_data->napi.dev;
+ u32 l3id;
+ u32 l4id;
+
+ /* check if RX checksum offload is enabled */
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ return;
+
+ /* In hardware, we only support checksum for the following protocols:
+ * 1) IPv4,
+ * 2) TCP(over IPv4 or IPv6),
+ * 3) UDP(over IPv4 or IPv6),
+ * 4) SCTP(over IPv4 or IPv6)
+ * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
+ * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
+ *
+ * Hardware limitation:
+ * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
+ * Error" bit (which usually can be used to indicate whether checksum
+ * was calculated by the hardware and if there was any error encountered
+ * during checksum calculation).
+ *
+ * Software workaround:
+ * We do get info within the RX descriptor about the kind of L3/L4
+ * protocol coming in the packet and the error status. These errors
+ * might not just be checksum errors but could be related to version,
+ * length of IPv4, UDP, TCP etc.
+ * Because there is no-way of knowing if it is a L3/L4 error due to bad
+ * checksum or any other L3/L4 error, we will not (cannot) convey
+ * checksum status for such cases to upper stack and will not maintain
+ * the RX L3/L4 checksum counters as well.
+ */
+
+ l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
+ l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
+
+ /* check L3 protocol for which checksum is supported */
+ if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
+ return;
+
+ /* check for any(not just checksum)flagged L3 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
+ return;
+
+ /* we do not support checksum of fragmented packets */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
+ return;
+
+ /* check L4 protocol for which checksum is supported */
+ if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
+ (l4id != HNS_RX_FLAG_L4ID_UDP) &&
+ (l4id != HNS_RX_FLAG_L4ID_SCTP))
+ return;
+
+ /* check for any(not just checksum)flagged L4 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
+ return;
+
+ /* now, this has to be a packet with valid RX checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff **out_skb, int *out_bnum)
{
@@ -683,13 +749,10 @@ out_bnum_err:
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
- if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
- hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
- ring->stats.l3l4_csum_err++;
- return 0;
- }
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* indicate to upper stack if our hardware has already calculated
+ * the RX checksum
+ */
+ hns_nic_rx_checksum(ring_data, skb, bnum_flag);
return 0;
}
@@ -1426,10 +1489,6 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
struct hnae_handle *h = priv->ae_handle;
int ret;
- /* MTU < 68 is an error and causes problems on some kernels */
- if (new_mtu < 68)
- return -EINVAL;
-
if (!h->dev->ops->set_mtu)
return -ENOTSUPP;
@@ -1496,6 +1555,29 @@ static netdev_features_t hns_nic_fix_features(
return features;
}
+static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->add_uc_addr)
+ return h->dev->ops->add_uc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns_nic_uc_unsync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->rm_uc_addr)
+ return h->dev->ops->rm_uc_addr(h, addr);
+
+ return 0;
+}
+
/**
* nic_set_multicast_list - set mutl mac address
* @netdev: net device
@@ -1514,6 +1596,10 @@ void hns_set_multicast_list(struct net_device *ndev)
return;
}
+ if (h->dev->ops->clr_mc_addr)
+ if (h->dev->ops->clr_mc_addr(h))
+ netdev_err(ndev, "clear multicast address fail\n");
+
if (h->dev->ops->set_mc_addr) {
netdev_for_each_mc_addr(ha, ndev)
if (h->dev->ops->set_mc_addr(h, ha->addr))
@@ -1534,6 +1620,9 @@ void hns_nic_set_rx_mode(struct net_device *ndev)
}
hns_set_multicast_list(ndev);
+
+ if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
+ netdev_err(ndev, "sync uc address fail\n");
}
struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
@@ -1992,14 +2081,20 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+ /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
+ ndev->min_mtu = MAC_MIN_MTU;
switch (priv->enet_ver) {
case AE_VERSION_2:
ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->max_mtu = MAC_MAX_MTU_V2 -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
default:
+ ndev->max_mtu = MAC_MAX_MTU -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 87d5c94b2810..3ac2183dbd21 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1178,7 +1178,8 @@ static int hns_nic_nway_reset(struct net_device *netdev)
struct phy_device *phy = netdev->phydev;
if (netif_running(netdev)) {
- if (phy)
+ /* if autoneg is disabled, don't restart auto-negotiation */
+ if (phy && phy->autoneg == AUTONEG_ENABLE)
ret = genphy_restart_aneg(phy);
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 631dbc7b4dbb..1a31bee6e728 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -427,7 +427,6 @@ static const struct net_device_ops hp100_bm_netdev_ops = {
.ndo_start_xmit = hp100_start_xmit_bm,
.ndo_get_stats = hp100_get_stats,
.ndo_set_rx_mode = hp100_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -438,7 +437,6 @@ static const struct net_device_ops hp100_netdev_ops = {
.ndo_start_xmit = hp100_start_xmit,
.ndo_get_stats = hp100_get_stats,
.ndo_set_rx_mode = hp100_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index ce235b776793..945883842533 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1118,7 +1118,6 @@ static const struct net_device_ops i596_netdev_ops = {
.ndo_start_xmit = i596_start_xmit,
.ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = i596_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index 5d353c660068..dc983450354b 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -981,7 +981,6 @@ static const struct net_device_ops ether1_netdev_ops = {
.ndo_set_rx_mode = ether1_setmulticastlist,
.ndo_tx_timeout = ether1_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 3dbc53c21baa..e86773325cbe 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1037,7 +1037,6 @@ static const struct net_device_ops i596_netdev_ops = {
.ndo_start_xmit = i596_start_xmit,
.ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = i596_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 21c84cc9c871..8bb15a8c2a40 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -337,7 +337,6 @@ static const struct net_device_ops sun3_82586_netdev_ops = {
.ndo_get_stats = sun3_82586_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index bd719e25dd76..702446a93697 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1981,14 +1981,6 @@ out:
ehea_update_bcmc_registrations();
}
-static int ehea_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
@@ -2970,7 +2962,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ehea_set_multicast_list,
- .ndo_change_mtu = ehea_change_mtu,
.ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
.ndo_tx_timeout = ehea_tx_watchdog,
@@ -3043,13 +3034,16 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
NETIF_F_IP_CSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+ /* MTU range: 68 - 9022 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = EHEA_MAX_PACKET_SIZE;
+
INIT_WORK(&port->reset_task, ehea_reset_port);
INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
init_waitqueue_head(&port->swqe_avail_wq);
init_waitqueue_head(&port->restart_wq);
- memset(&port->stats, 0, sizeof(struct net_device_stats));
ret = register_netdev(dev);
if (ret) {
pr_err("register_netdev failed. ret=%d\n", ret);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 8f139197f1aa..52a69c925965 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1099,9 +1099,6 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu)
struct emac_instance *dev = netdev_priv(ndev);
int ret = 0;
- if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
- return -EINVAL;
-
DBG(dev, "change_mtu(%d)" NL, new_mtu);
if (netif_running(ndev)) {
@@ -2564,7 +2561,7 @@ static int emac_init_config(struct emac_instance *dev)
if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
return -ENXIO;
if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
- dev->max_mtu = 1500;
+ dev->max_mtu = ETH_DATA_LEN;
if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
dev->rx_fifo_size = 2048;
if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
@@ -2718,7 +2715,6 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
};
static const struct net_device_ops emac_gige_netdev_ops = {
@@ -2891,6 +2887,10 @@ static int emac_probe(struct platform_device *ofdev)
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &emac_ethtool_ops;
+ /* MTU range: 46 - 1500 or whatever is in OF */
+ ndev->min_mtu = EMAC_MIN_MTU;
+ ndev->max_mtu = dev->max_mtu;
+
netif_carrier_off(ndev);
err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a36022ba4e42..a831f947ca8c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1181,7 +1181,9 @@ map_failed:
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
{
+ struct tcphdr *tcph;
int offset = 0;
+ int hdr_len;
/* only TCP packets will be aggregated */
if (skb->protocol == htons(ETH_P_IP)) {
@@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
/* if mss is not set through Large Packet bit/mss in rx buffer,
* expect that the mss will be written to the tcp header checksum.
*/
+ tcph = (struct tcphdr *)(skb->data + offset);
if (lrg_pkt) {
skb_shinfo(skb)->gso_size = mss;
} else if (offset) {
- struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
-
skb_shinfo(skb)->gso_size = ntohs(tcph->check);
tcph->check = 0;
}
+
+ if (skb_shinfo(skb)->gso_size) {
+ hdr_len = offset + tcph->doff * 4;
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+ }
}
static int ibmveth_poll(struct napi_struct *napi, int budget)
@@ -1410,9 +1418,6 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
int i, rc;
int need_restart = 0;
- if (new_mtu < IBMVETH_MIN_MTU)
- return -EINVAL;
-
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
break;
@@ -1612,6 +1617,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->hw_features |= NETIF_F_TSO;
}
+ netdev->min_mtu = IBMVETH_MIN_MTU;
+ netdev->max_mtu = ETH_MAX_MTU;
+
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
if (firmware_has_feature(FW_FEATURE_CMO))
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fbf686f5e7c..c12596676bbb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -901,17 +901,6 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
-static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-
- if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
- return -EINVAL;
-
- netdev->mtu = new_mtu;
- return 0;
-}
-
static void ibmvnic_tx_timeout(struct net_device *dev)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -1028,7 +1017,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_rx_mode = ibmvnic_set_multi,
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = ibmvnic_change_mtu,
.ndo_tx_timeout = ibmvnic_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ibmvnic_netpoll_controller,
@@ -2638,10 +2626,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
break;
case MIN_MTU:
adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
+ netdev->min_mtu = adapter->min_mtu;
netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
break;
case MAX_MTU:
adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
+ netdev->max_mtu = adapter->max_mtu;
netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
break;
case MAX_MULTICAST_FILTERS:
@@ -3667,6 +3657,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
netdev->real_num_tx_queues = adapter->req_tx_queues;
netdev->mtu = adapter->req_mtu;
+ netdev->min_mtu = adapter->min_mtu;
+ netdev->max_mtu = adapter->max_mtu;
if (adapter->failover) {
adapter->failover = false;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index c0e17433f623..1349b45f014d 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -58,7 +58,7 @@ config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -83,7 +83,7 @@ config E1000E_HWTS
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select I2C
select I2C_ALGOBIT
---help---
@@ -156,7 +156,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
@@ -213,7 +213,7 @@ config IXGBEVF
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -264,7 +264,7 @@ config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
depends on PCI_MSI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 068789e694c9..25c6dfd500b4 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2286,14 +2286,6 @@ static int e100_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
-static int e100_change_mtu(struct net_device *netdev, int new_mtu)
-{
- if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
- return -EINVAL;
- netdev->mtu = new_mtu;
- return 0;
-}
-
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
@@ -2834,7 +2826,6 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = e100_set_multicast_list,
.ndo_set_mac_address = e100_set_mac_address,
- .ndo_change_mtu = e100_change_mtu,
.ndo_do_ioctl = e100_do_ioctl,
.ndo_tx_timeout = e100_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f42129d09e2c..93fc6c67306b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1085,6 +1085,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 46 - 16110 */
+ netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
/* initialize eeprom parameters */
@@ -3549,13 +3553,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
-
- if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
- (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- e_err(probe, "Invalid MTU setting\n");
- return -EINVAL;
- }
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Adapter-specific max frame size limits. */
switch (hw->mac_type) {
@@ -5257,8 +5255,8 @@ static void e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- disable_irq(adapter->pdev->irq);
- e1000_intr(adapter->pdev->irq, netdev);
+ if (disable_hardirq(adapter->pdev->irq))
+ e1000_intr(adapter->pdev->irq, netdev);
enable_irq(adapter->pdev->irq);
}
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7017281ba2dc..ffcf35af4881 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5974,19 +5974,12 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
+ if ((new_mtu > ETH_DATA_LEN) &&
!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
- /* Supported frame sizes */
- if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
- (max_frame > adapter->max_hw_frame_size)) {
- e_err("Unsupported MTU setting\n");
- return -EINVAL;
- }
-
/* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
if ((adapter->hw.mac.type >= e1000_pch2lan) &&
!(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
@@ -6762,13 +6755,13 @@ static void e1000_netpoll(struct net_device *netdev)
e1000_intr_msix(adapter->pdev->irq, netdev);
break;
case E1000E_INT_MODE_MSI:
- disable_irq(adapter->pdev->irq);
- e1000_intr_msi(adapter->pdev->irq, netdev);
+ if (disable_hardirq(adapter->pdev->irq))
+ e1000_intr_msi(adapter->pdev->irq, netdev);
enable_irq(adapter->pdev->irq);
break;
default: /* E1000E_INT_MODE_LEGACY */
- disable_irq(adapter->pdev->irq);
- e1000_intr(adapter->pdev->irq, netdev);
+ if (disable_hardirq(adapter->pdev->irq))
+ e1000_intr(adapter->pdev->irq, netdev);
enable_irq(adapter->pdev->irq);
break;
}
@@ -7187,6 +7180,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
+ /* MTU range: 68 - max_hw_frame_size */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = adapter->max_hw_frame_size -
+ (VLAN_ETH_HLEN + ETH_FCS_LEN);
+
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 05629381be6b..bc5ef6eb3dd6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -706,16 +706,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
return err;
}
-static int fm10k_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE)
- return -EINVAL;
-
- dev->mtu = new_mtu;
-
- return 0;
-}
-
/**
* fm10k_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
@@ -1405,7 +1395,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_start_xmit = fm10k_xmit_frame,
.ndo_set_mac_address = fm10k_set_mac,
- .ndo_change_mtu = fm10k_change_mtu,
.ndo_tx_timeout = fm10k_tx_timeout,
.ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
@@ -1490,5 +1479,9 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
dev->hw_features |= hw_features;
+ /* MTU range: 68 - 15342 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = FM10K_MAX_JUMBO_FRAME_SIZE;
+
return dev;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 6d61e443bdf8..ba8d30984bee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -39,6 +39,7 @@
#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/hashtable.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -355,9 +356,11 @@ struct i40e_pf {
#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45)
#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46)
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
-#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
+#define I40E_FLAG_PHY_CONTROLS_LEDS BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50)
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
+#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
+#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -428,11 +431,13 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
- unsigned long last_rx_ptp_check;
- spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+ struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u64 ptp_base_adj;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
+ u32 latch_event_flags;
+ spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
+ unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size; /* HW RSS table size */
@@ -445,6 +450,20 @@ struct i40e_pf {
u16 phy_led_val;
};
+/**
+ * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * @macaddr: the MAC Address as the base key
+ *
+ * Simply copies the address and returns it as a u64 for hashing
+ **/
+static inline u64 i40e_addr_to_hkey(const u8 *macaddr)
+{
+ u64 key = 0;
+
+ ether_addr_copy((u8 *)&key, macaddr);
+ return key;
+}
+
enum i40e_filter_state {
I40E_FILTER_INVALID = 0, /* Invalid state */
I40E_FILTER_NEW, /* New, not sent to FW yet */
@@ -454,13 +473,10 @@ enum i40e_filter_state {
/* There is no 'removed' state; the filter struct is freed */
};
struct i40e_mac_filter {
- struct list_head list;
+ struct hlist_node hlist;
u8 macaddr[ETH_ALEN];
#define I40E_VLAN_ANY -1
s16 vlan;
- u8 counter; /* number of instances of this filter */
- bool is_vf; /* filter belongs to a VF */
- bool is_netdev; /* filter belongs to a netdev */
enum i40e_filter_state state;
};
@@ -501,9 +517,11 @@ struct i40e_vsi {
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
- /* Per VSI lock to protect elements/list (MAC filter) */
- spinlock_t mac_filter_list_lock;
- struct list_head mac_filter_list;
+ /* Per VSI lock to protect elements/hash (MAC filter) */
+ spinlock_t mac_filter_hash_lock;
+ /* Fixed size hash table with 2^8 buckets for MAC filters */
+ DECLARE_HASHTABLE(mac_filter_hash, 8);
+ bool has_vlan_filter;
/* VSI stats */
struct rtnl_link_stats64 net_stats;
@@ -579,6 +597,7 @@ struct i40e_vsi {
u16 veb_idx; /* index of VEB parent */
struct kobject *kobj; /* sysfs object */
bool current_isup; /* Sync 'link up' logging */
+ enum i40e_aq_link_speed current_speed; /* Sync link speed logging */
void *priv; /* client driver data reference. */
@@ -608,6 +627,8 @@ struct i40e_q_vector {
unsigned long hung_detected; /* Set/Reset for hung_detection logic */
cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
@@ -705,6 +726,25 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
+/**
+ * i40e_find_vsi_by_type - Find and return Flow Director VSI
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ **/
+static inline struct i40e_vsi *
+i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct i40e_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
+}
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
@@ -721,16 +761,12 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev);
-void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev);
+ const u8 *macaddr, s16 vlan);
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
-struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
- struct i40e_vsi *start_vsi);
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
@@ -740,7 +776,8 @@ void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
-int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_vsi_start_rings(struct i40e_vsi *vsi);
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
@@ -815,15 +852,15 @@ int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
-int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
+ const u8 *macaddr);
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
-struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 738b42a44f20..56fb27298936 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -964,11 +964,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 67e396b2b347..b2101a51534c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1642,6 +1642,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1650,6 +1654,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1657,7 +1662,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1680,6 +1686,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1690,7 +1698,22 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1712,7 +1735,20 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1792,9 +1828,18 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 external_power_ability;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 250db0b244b7..7fe72abc0b4a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -287,6 +287,7 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
}
cdev->client->ops->close(&cdev->lan_info, cdev->client,
reset);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
}
}
@@ -406,37 +407,6 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
}
/**
- * i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi
- * @pf: board private structure
- * @type: vsi type
- * @start_vsi: a VSI pointer from where to start the search
- *
- * Returns non NULL on success or NULL for failure
- **/
-struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
- enum i40e_vsi_type type,
- struct i40e_vsi *start_vsi)
-{
- struct i40e_vsi *vsi;
- int i = 0;
-
- if (start_vsi) {
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- vsi = pf->vsi[i];
- if (vsi == start_vsi)
- break;
- }
- }
- for (; i < pf->num_alloc_vsi; i++) {
- vsi = pf->vsi[i];
- if (vsi && vsi->type == type)
- return vsi;
- }
-
- return NULL;
-}
-
-/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
* @client: pointer to a client struct in the client list.
@@ -565,7 +535,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue;
} else {
- dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
+ dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n",
client->name);
}
@@ -575,29 +545,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
continue;
if (!existing) {
- /* Also up the ref_cnt for no. of instances of this
- * client.
- */
- atomic_inc(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
client->name, pf->hw.pf_id,
pf->hw.bus.device, pf->hw.bus.func);
}
mutex_lock(&i40e_client_instance_mutex);
- /* Send an Open request to the client */
- atomic_inc(&cdev->ref_cnt);
- if (client->ops && client->ops->open)
- ret = client->ops->open(&cdev->lan_info, client);
- atomic_dec(&cdev->ref_cnt);
- if (!ret) {
- set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
- } else {
- /* remove client instance */
- mutex_unlock(&i40e_client_instance_mutex);
- i40e_client_del_instance(pf, client);
- atomic_dec(&client->ref_cnt);
- continue;
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ /* Send an Open request to the client */
+ if (client->ops && client->ops->open)
+ ret = client->ops->open(&cdev->lan_info,
+ client);
+ if (!ret) {
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ } else {
+ /* remove client instance */
+ i40e_client_del_instance(pf, client);
+ }
}
mutex_unlock(&i40e_client_instance_mutex);
}
@@ -694,10 +660,6 @@ static int i40e_client_release(struct i40e_client *client)
continue;
pf = (struct i40e_pf *)cdev->lan_info.pf;
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- if (atomic_read(&cdev->ref_cnt) > 0) {
- ret = I40E_ERR_NOT_READY;
- goto out;
- }
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
false);
@@ -710,11 +672,9 @@ static int i40e_client_release(struct i40e_client *client)
}
/* delete the client instance from the list */
list_move(&cdev->list, &cdevs_tmp);
- atomic_dec(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
}
-out:
mutex_unlock(&i40e_client_instance_mutex);
/* free the client device and release its vsi */
@@ -1040,17 +1000,10 @@ int i40e_unregister_client(struct i40e_client *client)
ret = -ENODEV;
goto out;
}
- if (atomic_read(&client->ref_cnt) == 0) {
- clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
- list_del(&client->list);
- pr_info("i40e: Unregistered client %s with return code %d\n",
- client->name, ret);
- } else {
- ret = I40E_ERR_NOT_READY;
- pr_err("i40e: Client %s failed unregister - client has open instances\n",
- client->name);
- }
-
+ clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
+ list_del(&client->list);
+ pr_info("i40e: Unregistered client %s with return code %d\n",
+ client->name, ret);
out:
mutex_unlock(&i40e_client_mutex);
return ret;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 38a6c36a6a0e..528bd79b05fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -203,8 +203,6 @@ struct i40e_client_instance {
struct i40e_info lan_info;
struct i40e_client *client;
unsigned long state;
- /* A count of all the in-progress calls to the client */
- atomic_t ref_cnt;
};
struct i40e_client {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 2154a34c1dd8..128735975caa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -53,6 +53,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_KX_X722:
@@ -1183,6 +1185,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@@ -1197,6 +1201,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1204,6 +1209,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -1608,8 +1614,10 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
- if (report_init)
+ if (report_init) {
hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+ hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ }
return status;
}
@@ -1701,10 +1709,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@@ -1849,12 +1860,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = false;
- if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
+ if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = true;
else
hw_link_info->lse_enable = false;
- if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
@@ -2169,6 +2181,40 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2494,7 +2540,10 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, false, false,
&abilities, NULL);
if (status)
@@ -3144,6 +3193,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
break;
case I40E_AQ_CAP_ID_NPAR_ACTIVE:
p->npar_enable = number;
@@ -3310,8 +3367,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
@@ -4391,7 +4450,92 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
- * i40e_read_phy_register
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -4400,9 +4544,8 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 *value)
+i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
i40e_status status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -4412,8 +4555,8 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -4435,8 +4578,8 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_READ) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -4466,7 +4609,7 @@ phy_read_end:
}
/**
- * i40e_write_phy_register
+ * i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -4475,9 +4618,8 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 value)
+i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
i40e_status status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -4487,8 +4629,8 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -4512,8 +4654,8 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_WRITE) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -4534,6 +4676,78 @@ phy_write_end:
}
/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ i40e_status status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ i40e_status status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -4575,14 +4789,16 @@ i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
@@ -4594,20 +4810,18 @@ i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
msleep(interval);
@@ -4615,8 +4829,9 @@ i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
}
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
@@ -4647,8 +4862,10 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- temp_addr, phy_addr, &reg_val);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
if (status)
return status;
*val = reg_val;
@@ -4681,41 +4898,42 @@ i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
-
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0c1875b5b16d..f1f41f12902f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -134,7 +134,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct rtnl_link_stats64 *nstat;
struct i40e_mac_filter *f;
struct i40e_vsi *vsi;
- int i;
+ int i, bkt;
vsi = i40e_dbg_find_vsi(pf, seid);
if (!vsi) {
@@ -166,13 +166,13 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
pf->hw.mac.addr,
pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
dev_info(&pf->pdev->dev,
- " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n",
- f->macaddr, f->vlan, f->is_netdev, f->is_vf,
- f->counter, i40e_filter_state_string[f->state]);
+ " mac_filter_hash: %pM vid=%d, state %s\n",
+ f->macaddr, f->vlan,
+ i40e_filter_state_string[f->state]);
}
- dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n",
+ dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold,
(test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
"ON" : "OFF"));
@@ -867,86 +867,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
-
- } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
- struct i40e_mac_filter *f;
- int vlan = 0;
- u8 ma[6];
- int ret;
-
- cnt = sscanf(&cmd_buf[11],
- "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
- &vsi_seid,
- &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
- &vlan);
- if (cnt == 7) {
- vlan = 0;
- } else if (cnt != 8) {
- dev_info(&pf->pdev->dev,
- "add macaddr: bad command string, cnt=%d\n",
- cnt);
- goto command_write_done;
- }
-
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add macaddr: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, ma, vlan, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- ret = i40e_sync_vsi_filters(vsi);
- if (f && !ret)
- dev_info(&pf->pdev->dev,
- "add macaddr: %pM vlan=%d added to VSI %d\n",
- ma, vlan, vsi_seid);
- else
- dev_info(&pf->pdev->dev,
- "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
- ma, vlan, vsi_seid, f, ret);
-
- } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
- int vlan = 0;
- u8 ma[6];
- int ret;
-
- cnt = sscanf(&cmd_buf[11],
- "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
- &vsi_seid,
- &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
- &vlan);
- if (cnt == 7) {
- vlan = 0;
- } else if (cnt != 8) {
- dev_info(&pf->pdev->dev,
- "del macaddr: bad command string, cnt=%d\n",
- cnt);
- goto command_write_done;
- }
-
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "del macaddr: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_filter(vsi, ma, vlan, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- ret = i40e_sync_vsi_filters(vsi);
- if (!ret)
- dev_info(&pf->pdev->dev,
- "del macaddr: %pM vlan=%d removed from VSI %d\n",
- ma, vlan, vsi_seid);
- else
- dev_info(&pf->pdev->dev,
- "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
- ma, vlan, vsi_seid, ret);
-
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
i40e_status ret;
u16 vid;
@@ -1210,24 +1130,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev,
"dump debug fwdata <cluster_id> <table_id> <index>\n");
}
-
- } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
- u32 level;
- cnt = sscanf(&cmd_buf[10], "%i", &level);
- if (cnt) {
- if (I40E_DEBUG_USER & level) {
- pf->hw.debug_mask = level;
- dev_info(&pf->pdev->dev,
- "set hw.debug_mask = 0x%08x\n",
- pf->hw.debug_mask);
- }
- pf->msg_enable = level;
- dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
- pf->msg_enable);
- } else {
- dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
- pf->msg_enable);
- }
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
@@ -1633,8 +1535,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
- dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
- dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
dev_info(&pf->pdev->dev, " dump switch\n");
@@ -1644,7 +1544,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " dump desc aq\n");
dev_info(&pf->pdev->dev, " dump reset stats\n");
dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
- dev_info(&pf->pdev->dev, " msg_enable [level]\n");
dev_info(&pf->pdev->dev, " read <reg>\n");
dev_info(&pf->pdev->dev, " write <reg> <value>\n");
dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index dd4457d29e98..8e46098bad57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -39,6 +39,8 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 92bc8846f1ba..cc1465aac2ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -104,7 +104,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
* The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev, not on the VMDq or FCoE netdev.
*/
-static struct i40e_stats i40e_gstrings_stats[] = {
+static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
@@ -216,7 +216,6 @@ enum i40e_ethtool_test_id {
I40E_ETH_TEST_REG = 0,
I40E_ETH_TEST_EEPROM,
I40E_ETH_TEST_INTR,
- I40E_ETH_TEST_LOOPBACK,
I40E_ETH_TEST_LINK,
};
@@ -224,32 +223,27 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Eeprom test (offline)",
"Interrupt test (offline)",
- "Loopback test (offline)",
"Link test (on/offline)"
};
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
-static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"MFP",
"LinkPolling",
"flow-director-atr",
"veb-stats",
"hw-atr-eviction",
- "vf-true-promisc-support",
};
-#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
-static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "NPAR",
- "LinkPolling",
- "flow-director-atr",
- "veb-stats",
- "hw-atr-eviction",
+/* Private flags with a global effect, restricted to PF 0 */
+static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "vf-true-promisc-support",
};
-#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
+#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings)
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -271,8 +265,9 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
u32 *advertising)
{
- enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
+ u64 phy_types = pf->hw.phy.phy_types;
+
*supported = 0x0;
*advertising = 0x0;
@@ -351,11 +346,13 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
*advertising |= ADVERTISED_20000baseKR2_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
- *supported |= SUPPORTED_10000baseKR_Full |
- SUPPORTED_Autoneg;
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_Autoneg;
*advertising |= ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- *advertising |= ADVERTISED_10000baseKR_Full;
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *advertising |= ADVERTISED_10000baseKR_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
*supported |= SUPPORTED_10000baseKX4_Full |
@@ -365,11 +362,20 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
*advertising |= ADVERTISED_10000baseKX4_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
- *supported |= SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg;
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
*advertising |= ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- *advertising |= ADVERTISED_1000baseKX_Full;
+ if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER))
+ *advertising |= ADVERTISED_1000baseKX_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
}
}
@@ -493,6 +499,14 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ADVERTISED_1000baseKX_Full |
ADVERTISED_Autoneg;
break;
+ case I40E_PHY_TYPE_25GBASE_KR:
+ case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_SR:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ ecmd->supported = SUPPORTED_Autoneg;
+ ecmd->advertising = ADVERTISED_Autoneg;
+ /* TODO: add speeds when ethtool is ready to support*/
+ break;
default:
/* if we got here and link is up something bad is afoot */
netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
@@ -514,6 +528,14 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_LINK_SPEED_40GB:
ethtool_cmd_speed_set(ecmd, SPEED_40000);
break;
+ case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+ ethtool_cmd_speed_set(ecmd, SPEED_25000);
+#else
+ netdev_info(netdev,
+ "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+ break;
case I40E_LINK_SPEED_20GB:
ethtool_cmd_speed_set(ecmd, SPEED_20000);
break;
@@ -978,6 +1000,10 @@ static u32 i40e_get_msglevel(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ u32 debug_mask = pf->hw.debug_mask;
+
+ if (debug_mask)
+ netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask);
return pf->msg_enable;
}
@@ -989,7 +1015,8 @@ static void i40e_set_msglevel(struct net_device *netdev, u32 data)
if (I40E_DEBUG_USER & data)
pf->hw.debug_mask = data;
- pf->msg_enable = data;
+ else
+ pf->msg_enable = data;
}
static int i40e_get_regs_len(struct net_device *netdev)
@@ -1191,10 +1218,9 @@ static void i40e_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
if (pf->hw.pf_id == 0)
- drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
- else
- drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
+ drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1219,6 +1245,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
{
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
@@ -1311,10 +1338,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- /* this is to allow wr32 to have something to write to
- * during early allocation of Rx buffers
- */
- u32 __iomem faketail = 0;
struct i40e_ring *ring;
u16 unused;
@@ -1326,7 +1349,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
- rx_rings[i].tail = (u8 __iomem *)&faketail;
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+ */
+ rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
@@ -1422,10 +1448,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
return I40E_VSI_STATS_LEN(netdev);
}
case ETH_SS_PRIV_FLAGS:
- if (pf->hw.pf_id == 0)
- return I40E_PRIV_FLAGS_GL_STR_LEN;
- else
- return I40E_PRIV_FLAGS_STR_LEN;
+ return I40E_PRIV_FLAGS_STR_LEN +
+ (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
default:
return -EOPNOTSUPP;
}
@@ -1536,10 +1560,8 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- for (i = 0; i < I40E_TEST_LEN; i++) {
- memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ memcpy(data, i40e_gstrings_test,
+ I40E_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
@@ -1623,19 +1645,12 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
case ETH_SS_PRIV_FLAGS:
- if (pf->hw.pf_id == 0) {
- for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings_gl[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- } else {
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- }
+ memcpy(data, i40e_priv_flags_strings,
+ I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN;
+ if (pf->hw.pf_id == 0)
+ memcpy(data, i40e_gl_priv_flags_strings,
+ I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
default:
break;
@@ -1666,8 +1681,19 @@ static int i40e_get_ts_info(struct net_device *dev,
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
return 0;
}
@@ -1739,17 +1765,6 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_loopback_test(struct net_device *netdev, u64 *data)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
-
- netif_info(pf, hw, netdev, "loopback test not implemented\n");
- *data = 0;
-
- return *data;
-}
-
static inline bool i40e_active_vfs(struct i40e_pf *pf)
{
struct i40e_vf *vfs = pf->vf;
@@ -1763,17 +1778,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
{
- struct i40e_vsi **vsi = pf->vsi;
- int i;
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!vsi[i])
- continue;
- if (vsi[i]->type == I40E_VSI_VMDQ2)
- return true;
- }
-
- return false;
+ return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
}
static void i40e_diag_test(struct net_device *netdev,
@@ -1795,7 +1800,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LOOPBACK] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
@@ -1823,9 +1827,6 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
/* run reg test last, a reset is required after it */
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1846,7 +1847,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 0;
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
- data[I40E_ETH_TEST_LOOPBACK] = 0;
}
skip_ol_tests:
@@ -1925,7 +1925,7 @@ static int i40e_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
pf->led_status = i40e_led_get(hw);
} else {
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
@@ -1935,20 +1935,20 @@ static int i40e_set_phys_id(struct net_device *netdev,
}
return blink_freq;
case ETHTOOL_ID_ON:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0xf, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break;
case ETHTOOL_ID_OFF:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS))
i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break;
case ETHTOOL_ID_INACTIVE:
- if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
- i40e_led_set(hw, false, pf->led_status);
+ if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) {
+ i40e_led_set(hw, pf->led_status, false);
} else {
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 58e6c1570335..b077ef8b00fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1522,12 +1522,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
* same PCI function.
*/
netdev->dev_port = 1;
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
- i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_filter(vsi, hw->mac.san_addr, 0);
+ i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0);
+ i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0);
+ i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 31c97e3937a4..ad4cf639430e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 16
+#define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -86,6 +86,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
/* required last entry */
{0, }
};
@@ -93,8 +95,8 @@ MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
#define I40E_MAX_VF_COUNT 128
static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
@@ -286,8 +288,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
void i40e_service_event_schedule(struct i40e_pf *pf)
{
if (!test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
- !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -1145,25 +1146,22 @@ void i40e_update_stats(struct i40e_vsi *vsi)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL
**/
static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+ const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (vlan == f->vlan) &&
- (!is_vf || f->is_vf) &&
- (!is_netdev || f->is_netdev))
+ (vlan == f->vlan))
return f;
}
return NULL;
@@ -1173,24 +1171,21 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
* i40e_find_mac - Find a mac addr in the macvlan filters list
* @vsi: the VSI to be searched
* @macaddr: the MAC address we are searching for
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns the first filter with the provided MAC address or NULL if
* MAC address was not found
**/
-struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (!is_vf || f->is_vf) &&
- (!is_netdev || f->is_netdev))
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
+ if ((ether_addr_equal(macaddr, f->macaddr)))
return f;
}
return NULL;
@@ -1204,86 +1199,132 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
**/
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f;
+ /* If we have a PVID, always operate in VLAN mode */
+ if (vsi->info.pvid)
+ return true;
- /* Only -1 for all the filters denotes not in vlan mode
- * so we have to go through all the list in order to make sure
+ /* We need to operate in VLAN mode whenever we have any filters with
+ * a VLAN other than I40E_VLAN_ALL. We could check the table each
+ * time, incurring search cost repeatedly. However, we can notice two
+ * things:
+ *
+ * 1) the only place where we can gain a VLAN filter is in
+ * i40e_add_filter.
+ *
+ * 2) the only place where filters are actually removed is in
+ * i40e_sync_filters_subtask.
+ *
+ * Thus, we can simply use a boolean value, has_vlan_filters which we
+ * will set to true when we add a VLAN filter in i40e_add_filter. Then
+ * we have to perform the full search after deleting filters in
+ * i40e_sync_filters_subtask, but we already have to search
+ * filters here and can perform the check at the same time. This
+ * results in avoiding embedding a loop for VLAN mode inside another
+ * loop over all the filters, and should maintain correctness as noted
+ * above.
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (f->vlan >= 0 || vsi->info.pvid)
- return true;
- }
-
- return false;
+ return vsi->has_vlan_filter;
}
/**
- * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
- * @vsi: the VSI to be searched
- * @macaddr: the mac address to be filtered
- * @is_vf: true if it is a VF
- * @is_netdev: true if it is a netdev
+ * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
+ * @vsi: the VSI to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
*
- * Goes through all the macvlan filters and adds a
- * macvlan filter for each unique vlan that already exists
+ * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
+ * behave as expected. If we have any active VLAN filters remaining or about
+ * to be added then we need to update non-VLAN filters to be marked as VLAN=0
+ * so that they only match against untagged traffic. If we no longer have any
+ * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
+ * so that they match against both tagged and untagged traffic. In this way,
+ * we ensure that we correctly receive the desired traffic. This ensures that
+ * when we have an active VLAN we will receive only untagged traffic and
+ * traffic matching active VLANs. If we have no active VLANs then we will
+ * operate in non-VLAN mode and receive all traffic, tagged or untagged.
*
- * Returns first filter found on success, else NULL
- **/
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
+ * Finally, in a similar fashion, this function also corrects filters when
+ * there is an active PVID assigned to this VSI.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters)
{
- struct i40e_mac_filter *f;
+ struct i40e_mac_filter *f, *add_head;
+ struct hlist_node *h;
+ int bkt, new_vlan;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (vsi->info.pvid)
- f->vlan = le16_to_cpu(vsi->info.pvid);
- if (!i40e_find_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev)) {
- if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
- return NULL;
- }
- }
+ /* To determine if a particular filter needs to be replaced we
+ * have the three following conditions:
+ *
+ * a) if we have a PVID assigned, then all filters which are
+ * not marked as VLAN=PVID must be replaced with filters that
+ * are.
+ * b) otherwise, if we have any active VLANS, all filters
+ * which are marked as VLAN=-1 must be replaced with
+ * filters marked as VLAN=0
+ * c) finally, if we do not have any active VLANS, all filters
+ * which are marked as VLAN=0 must be replaced with filters
+ * marked as VLAN=-1
+ */
- return list_first_entry_or_null(&vsi->mac_filter_list,
- struct i40e_mac_filter, list);
-}
+ /* Update the filters about to be added in place */
+ hlist_for_each_entry(f, tmp_add_list, hlist) {
+ if (vsi->info.pvid && f->vlan != vsi->info.pvid)
+ f->vlan = vsi->info.pvid;
+ else if (vlan_filters && f->vlan == I40E_VLAN_ANY)
+ f->vlan = 0;
+ else if (!vlan_filters && f->vlan == 0)
+ f->vlan = I40E_VLAN_ANY;
+ }
+
+ /* Update the remaining active filters */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ /* Combine the checks for whether a filter needs to be changed
+ * and then determine the new VLAN inside the if block, in
+ * order to avoid duplicating code for adding the new filter
+ * then deleting the old filter.
+ */
+ if ((vsi->info.pvid && f->vlan != vsi->info.pvid) ||
+ (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!vlan_filters && f->vlan == 0)) {
+ /* Determine the new vlan we will be adding */
+ if (vsi->info.pvid)
+ new_vlan = vsi->info.pvid;
+ else if (vlan_filters)
+ new_vlan = 0;
+ else
+ new_vlan = I40E_VLAN_ANY;
-/**
- * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
- * @vsi: the VSI to be searched
- * @macaddr: the mac address to be removed
- * @is_vf: true if it is a VF
- * @is_netdev: true if it is a netdev
- *
- * Removes a given MAC address from a VSI, regardless of VLAN
- *
- * Returns 0 for success, or error
- **/
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
-{
- struct i40e_mac_filter *f = NULL;
- int changed = 0;
+ /* Create the new filter */
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
- WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
- "Missing mac_filter_list_lock\n");
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (is_vf == f->is_vf) &&
- (is_netdev == f->is_netdev)) {
- f->counter--;
- changed = 1;
- if (f->counter == 0)
- f->state = I40E_FILTER_REMOVE;
+ /* Put the replacement filter into the add list */
+ hash_del(&add_head->hlist);
+ hlist_add_head(&add_head->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
}
}
- if (changed) {
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
- return 0;
- }
- return -ENOENT;
+
+ vsi->has_vlan_filter = !!vlan_filters;
+
+ return 0;
}
/**
@@ -1324,36 +1365,32 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
**/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+ const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
- int changed = false;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- /* Do not allow broadcast filter to be added since broadcast filter
- * is added as part of add VSI for any newly created VSI except
- * FDIR VSI
- */
- if (is_broadcast_ether_addr(macaddr))
- return NULL;
-
- f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ f = i40e_find_filter(vsi, macaddr, vlan);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
- goto add_filter_out;
+ return NULL;
+
+ /* Update the boolean indicating if we need to function in
+ * VLAN mode.
+ */
+ if (vlan >= 0)
+ vsi->has_vlan_filter = true;
ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
@@ -1365,100 +1402,148 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
f->state = I40E_FILTER_FAILED;
else
f->state = I40E_FILTER_NEW;
- changed = true;
- INIT_LIST_HEAD(&f->list);
- list_add_tail(&f->list, &vsi->mac_filter_list);
- }
+ INIT_HLIST_NODE(&f->hlist);
- /* increment counter and add a new flag if needed */
- if (is_vf) {
- if (!f->is_vf) {
- f->is_vf = true;
- f->counter++;
- }
- } else if (is_netdev) {
- if (!f->is_netdev) {
- f->is_netdev = true;
- f->counter++;
- }
- } else {
- f->counter++;
- }
+ key = i40e_addr_to_hkey(macaddr);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
- if (changed) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
-add_filter_out:
+ /* If we're asked to add a filter that has been marked for removal, it
+ * is safe to simply restore it to active state. __i40e_del_filter
+ * will have simply deleted any filters which were previously marked
+ * NEW or FAILED, so if it is currently marked REMOVE it must have
+ * previously been ACTIVE. Since we haven't yet run the sync filters
+ * task, just restore this filter to the ACTIVE state so that the
+ * sync task leaves it in place
+ */
+ if (f->state == I40E_FILTER_REMOVE)
+ f->state = I40E_FILTER_ACTIVE;
+
return f;
}
/**
- * i40e_del_filter - Remove a mac/vlan filter from the VSI
+ * __i40e_del_filter - Remove a specific filter from the VSI
+ * @vsi: VSI to remove from
+ * @f: the filter to remove from the list
+ *
+ * This function should be called instead of i40e_del_filter only if you know
+ * the exact filter you will remove already, such as via i40e_find_filter or
+ * i40e_find_mac.
+ *
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
+ * being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
+ **/
+static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+{
+ if (!f)
+ return;
+
+ if ((f->state == I40E_FILTER_FAILED) ||
+ (f->state == I40E_FILTER_NEW)) {
+ /* this one never got added by the FW. Just remove it,
+ * no need to sync anything.
+ */
+ hash_del(&f->hlist);
+ kfree(f);
+ } else {
+ f->state = I40E_FILTER_REMOVE;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
- * @vlan: the vlan
- * @is_vf: make sure it's a VF filter, else doesn't matter
- * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ * @vlan: the VLAN
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
* ANOTHER NOTE: This function MUST be called from within the context of
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry().
**/
-void i40e_del_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
if (!vsi || !macaddr)
return;
- f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
- if (!f || f->counter == 0)
- return;
+ f = i40e_find_filter(vsi, macaddr, vlan);
+ __i40e_del_filter(vsi, f);
+}
- if (is_vf) {
- if (f->is_vf) {
- f->is_vf = false;
- f->counter--;
- }
- } else if (is_netdev) {
- if (f->is_netdev) {
- f->is_netdev = false;
- f->counter--;
- }
- } else {
- /* make sure we don't remove a filter in use by VF or netdev */
- int min_f = 0;
+/**
+ * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ *
+ * Goes through all the macvlan filters and adds a macvlan filter for each
+ * unique vlan that already exists. If a PVID has been assigned, instead only
+ * add the macaddr to that VLAN.
+ *
+ * Returns last filter added on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
+ const u8 *macaddr)
+{
+ struct i40e_mac_filter *f, *add = NULL;
+ struct hlist_node *h;
+ int bkt;
- min_f += (f->is_vf ? 1 : 0);
- min_f += (f->is_netdev ? 1 : 0);
+ if (vsi->info.pvid)
+ return i40e_add_filter(vsi, macaddr,
+ le16_to_cpu(vsi->info.pvid));
- if (f->counter > min_f)
- f->counter--;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
+ continue;
+ add = i40e_add_filter(vsi, macaddr, f->vlan);
+ if (!add)
+ return NULL;
}
- /* counter == 0 tells sync_filters_subtask to
- * remove the filter from the firmware's list
- */
- if (f->counter == 0) {
- if ((f->state == I40E_FILTER_FAILED) ||
- (f->state == I40E_FILTER_NEW)) {
- /* this one never got added by the FW. Just remove it,
- * no need to sync anything.
- */
- list_del(&f->list);
- kfree(f);
- } else {
- f->state = I40E_FILTER_REMOVE;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ return add;
+}
+
+/**
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ *
+ * Removes a given MAC address from a VSI, regardless of VLAN
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ bool found = false;
+ int bkt;
+
+ WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
+ "Missing mac_filter_hash_lock\n");
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (ether_addr_equal(macaddr, f->macaddr)) {
+ __i40e_del_filter(vsi, f);
+ found = true;
}
}
+
+ if (found)
+ return 0;
+ else
+ return -ENOENT;
}
/**
@@ -1499,10 +1584,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true);
- i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
+ i40e_put_mac_in_vlan(vsi, addr->sa_data);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@@ -1666,6 +1751,52 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
}
/**
+ * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_mac_filter *f;
+
+ if (i40e_is_vsi_in_vlan(vsi))
+ f = i40e_put_mac_in_vlan(vsi, addr);
+ else
+ f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
+
+ if (f)
+ return 0;
+ else
+ return -ENOMEM;
+}
+
+/**
+ * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_del_mac_all_vlan(vsi, addr);
+ else
+ i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
+
+ return 0;
+}
+
+/**
* i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
@@ -1676,62 +1807,14 @@ static void i40e_set_rx_mode(struct net_device *netdev)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
- struct netdev_hw_addr *uca;
- struct netdev_hw_addr *mca;
- struct netdev_hw_addr *ha;
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- /* add addr if not already in the filter list */
- netdev_for_each_uc_addr(uca, netdev) {
- if (!i40e_find_mac(vsi, uca->addr, false, true)) {
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_put_mac_in_vlan(vsi, uca->addr,
- false, true);
- else
- i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
- false, true);
- }
- }
- netdev_for_each_mc_addr(mca, netdev) {
- if (!i40e_find_mac(vsi, mca->addr, false, true)) {
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_put_mac_in_vlan(vsi, mca->addr,
- false, true);
- else
- i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
- false, true);
- }
- }
-
- /* remove filter if not in netdev list */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-
- if (!f->is_netdev)
- continue;
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
- netdev_for_each_mc_addr(mca, netdev)
- if (ether_addr_equal(mca->addr, f->macaddr))
- goto bottom_of_search_loop;
+ __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
+ __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
- netdev_for_each_uc_addr(uca, netdev)
- if (ether_addr_equal(uca->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- for_each_dev_addr(netdev, ha)
- if (ether_addr_equal(ha->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
-
-bottom_of_search_loop:
- continue;
- }
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1746,21 +1829,26 @@ bottom_of_search_loop:
}
/**
- * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
- * @vsi: pointer to vsi struct
+ * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to VSI struct
* @from: Pointer to list which contains MAC filter entries - changes to
* those entries needs to be undone.
*
- * MAC filter entries from list were slated to be removed from device.
+ * MAC filter entries from list were slated to be sent to firmware, either for
+ * addition or deletion.
**/
-static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
- struct list_head *from)
+static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(f, h, from, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
- list_for_each_entry_safe(f, ftmp, from, list) {
/* Move the element back into MAC filter list*/
- list_move_tail(&f->list, &vsi->mac_filter_list);
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
}
}
@@ -1770,7 +1858,6 @@ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
* @count: Number of filters added
* @add_list: return data from fw
* @head: pointer to first filter in current batch
- * @aq_err: status from fw
*
* MAC filter entries from list were slated to be added to device. Returns
* number of successful filters. Note that 0 does NOT mean success!
@@ -1778,45 +1865,146 @@ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
static int
i40e_update_filter_state(int count,
struct i40e_aqc_add_macvlan_element_data *add_list,
- struct i40e_mac_filter *add_head, int aq_err)
+ struct i40e_mac_filter *add_head)
{
int retval = 0;
int i;
-
- if (!aq_err) {
- retval = count;
- /* Everything's good, mark all filters active. */
- for (i = 0; i < count ; i++) {
- add_head->state = I40E_FILTER_ACTIVE;
- add_head = list_next_entry(add_head, list);
- }
- } else if (aq_err == I40E_AQ_RC_ENOSPC) {
- /* Device ran out of filter space. Check the return value
- * for each filter to see which ones are active.
+ for (i = 0; i < count; i++) {
+ /* Always check status of each filter. We don't need to check
+ * the firmware return status because we pre-set the filter
+ * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
+ * request to the adminq. Thus, if it no longer matches then
+ * we know the filter is active.
*/
- for (i = 0; i < count ; i++) {
- if (add_list[i].match_method ==
- I40E_AQC_MM_ERR_NO_RES) {
- add_head->state = I40E_FILTER_FAILED;
- } else {
- add_head->state = I40E_FILTER_ACTIVE;
- retval++;
- }
- add_head = list_next_entry(add_head, list);
- }
- } else {
- /* Some other horrible thing happened, fail all filters */
- retval = 0;
- for (i = 0; i < count ; i++) {
+ if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
add_head->state = I40E_FILTER_FAILED;
- add_head = list_next_entry(add_head, list);
+ } else {
+ add_head->state = I40E_FILTER_ACTIVE;
+ retval++;
}
+
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
+
return retval;
}
/**
+ * i40e_aqc_del_filters - Request firmware to delete a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @num_del: the number of filters to delete
+ * @retval: Set to -EIO on failure to delete
+ *
+ * Send a request to firmware via AdminQ to delete a set of filters. Uses
+ * *retval instead of a return value so that success does not force ret_val to
+ * be set to 0. This ensures that a sequence of calls to this function
+ * preserve the previous value of *retval on successful delete.
+ */
+static
+void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_remove_macvlan_element_data *list,
+ int num_del, int *retval)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ i40e_status aq_ret;
+ int aq_err;
+
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
+
+ /* Explicitly ignore and do not report when firmware returns ENOENT */
+ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ *retval = -EIO;
+ dev_info(&vsi->back->pdev->dev,
+ "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
+ vsi_name, i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
+ }
+}
+
+/**
+ * i40e_aqc_add_filters - Request firmware to add a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @add_head: Position in the add hlist
+ * @num_add: the number of filters to add
+ * @promisc_change: set to true on exit if promiscuous mode was forced on
+ *
+ * Send a request to firmware via AdminQ to add a chunk of filters. Will set
+ * promisc_changed to true if the firmware has run out of space for more
+ * filters.
+ */
+static
+void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_add_macvlan_element_data *list,
+ struct i40e_mac_filter *add_head,
+ int num_add, bool *promisc_changed)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ int aq_err, fcnt;
+
+ i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
+ aq_err = hw->aq.asq_last_status;
+ fcnt = i40e_update_filter_state(num_add, list, add_head);
+
+ if (fcnt != num_add) {
+ *promisc_changed = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err),
+ vsi_name);
+ }
+}
+
+/**
+ * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
+ * @vsi: pointer to the VSI
+ * @f: filter data
+ *
+ * This function sets or clears the promiscuous broadcast flags for VLAN
+ * filters in order to properly receive broadcast frames. Assumes that only
+ * broadcast filters are passed.
+ **/
+static
+void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_mac_filter *f)
+{
+ bool enable = f->state == I40E_FILTER_NEW;
+ struct i40e_hw *hw = &vsi->back->hw;
+ i40e_status aq_ret;
+
+ if (f->vlan == I40E_VLAN_ANY) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw,
+ vsi->seid,
+ enable,
+ NULL);
+ } else {
+ aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
+ vsi->seid,
+ enable,
+ f->vlan,
+ NULL);
+ }
+
+ if (aq_ret) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s setting broadcast promiscuous mode on %s\n",
+ i40e_aq_str(hw, hw->aq.asq_last_status),
+ vsi_name);
+ f->state = I40E_FILTER_FAILED;
+ } else if (enable) {
+ f->state = I40E_FILTER_ACTIVE;
+ }
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -1826,22 +2014,24 @@ i40e_update_filter_state(int count,
**/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
- struct list_head tmp_add_list, tmp_del_list;
+ struct hlist_head tmp_add_list, tmp_del_list;
+ struct i40e_mac_filter *f, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
+ unsigned int failed_filters = 0;
+ unsigned int vlan_filters = 0;
bool promisc_changed = false;
char vsi_name[16] = "PF";
int filter_list_len = 0;
- u32 changed_flags = 0;
i40e_status aq_ret = 0;
- int retval = 0;
+ u32 changed_flags = 0;
+ struct hlist_node *h;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
- int aq_err = 0;
+ int retval = 0;
u16 cmd_flags;
int list_size;
- int fcnt;
+ int bkt;
/* empty array typed pointers, kcalloc later */
struct i40e_aqc_add_macvlan_element_data *add_list;
@@ -1856,8 +2046,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi->current_netdev_flags = vsi->netdev->flags;
}
- INIT_LIST_HEAD(&tmp_add_list);
- INIT_LIST_HEAD(&tmp_del_list);
+ INIT_HLIST_HEAD(&tmp_add_list);
+ INIT_HLIST_HEAD(&tmp_del_list);
if (vsi->type == I40E_VSI_SRIOV)
snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
@@ -1867,43 +2057,64 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* Create a list of filters to delete. */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE) {
- WARN_ON(f->counter != 0);
/* Move the element into temporary del_list */
- list_move_tail(&f->list, &tmp_del_list);
- vsi->active_filters--;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_del_list);
+
+ /* Avoid counting removed filters */
+ continue;
}
if (f->state == I40E_FILTER_NEW) {
- WARN_ON(f->counter == 0);
- /* Move the element into temporary add_list */
- list_move_tail(&f->list, &tmp_add_list);
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_add_list);
}
+
+ /* Count the number of active (current and new) VLAN
+ * filters we have now. Does not count filters which
+ * are marked for deletion.
+ */
+ if (f->vlan > 0)
+ vlan_filters++;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ retval = i40e_correct_mac_vlan_filters(vsi,
+ &tmp_add_list,
+ &tmp_del_list,
+ vlan_filters);
+ if (retval)
+ goto err_no_memory_locked;
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
/* Now process 'del_list' outside the lock */
- if (!list_empty(&tmp_del_list)) {
+ if (!hlist_empty(&tmp_del_list)) {
filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data);
del_list = kzalloc(list_size, GFP_ATOMIC);
- if (!del_list) {
- /* Undo VSI's MAC filter entry element updates */
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- retval = -ENOMEM;
- goto out;
- }
+ if (!del_list)
+ goto err_no_memory;
- list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
+ hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
cmd_flags = 0;
+ /* handle broadcast filters by updating the broadcast
+ * promiscuous flag instead of deleting a MAC filter.
+ */
+ if (is_broadcast_ether_addr(f->macaddr)) {
+ i40e_aqc_broadcast_filter(vsi, vsi_name, f);
+
+ hlist_del(&f->hlist);
+ kfree(f);
+ continue;
+ }
+
/* add to delete list */
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
if (f->vlan == I40E_VLAN_ANY) {
@@ -1920,73 +2131,57 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- del_list,
- num_del, NULL);
- aq_err = hw->aq.asq_last_status;
- num_del = 0;
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
memset(del_list, 0, list_size);
-
- /* Explicitly ignore and do not report when
- * firmware returns ENOENT.
- */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
- retval = -EIO;
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
- }
+ num_del = 0;
}
/* Release memory for MAC filter entries which were
* synced up with HW.
*/
- list_del(&f->list);
+ hlist_del(&f->hlist);
kfree(f);
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
- num_del, NULL);
- aq_err = hw->aq.asq_last_status;
- num_del = 0;
-
- /* Explicitly ignore and do not report when firmware
- * returns ENOENT.
- */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
- retval = -EIO;
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error on %s, err %s aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
- }
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
}
kfree(del_list);
del_list = NULL;
}
- if (!list_empty(&tmp_add_list)) {
+ if (!hlist_empty(&tmp_add_list)) {
/* Do all the adds now. */
filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data);
list_size = filter_list_len *
sizeof(struct i40e_aqc_add_macvlan_element_data);
add_list = kzalloc(list_size, GFP_ATOMIC);
- if (!add_list) {
- retval = -ENOMEM;
- goto out;
- }
+ if (!add_list)
+ goto err_no_memory;
+
num_add = 0;
- list_for_each_entry(f, &tmp_add_list, list) {
+ hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
f->state = I40E_FILTER_FAILED;
continue;
}
+
+ /* handle broadcast filters by updating the broadcast
+ * promiscuous flag instead of adding a MAC filter.
+ */
+ if (is_broadcast_ether_addr(f->macaddr)) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
+ i40e_aqc_broadcast_filter(vsi, vsi_name, f);
+
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ continue;
+ }
+
/* add to add array */
if (num_add == 0)
add_head = f;
@@ -2000,88 +2195,70 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cpu_to_le16((u16)(f->vlan));
}
add_list[num_add].queue_number = 0;
+ /* set invalid match method for later detection */
+ add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
- add_list, num_add,
- NULL);
- aq_err = hw->aq.asq_last_status;
- fcnt = i40e_update_filter_state(num_add,
- add_list,
- add_head,
- aq_ret);
- vsi->active_filters += fcnt;
-
- if (fcnt != num_add) {
- promisc_changed = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state);
- vsi->promisc_threshold =
- (vsi->active_filters * 3) / 4;
- dev_warn(&pf->pdev->dev,
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err),
- vsi_name);
- }
+ i40e_aqc_add_filters(vsi, vsi_name, add_list,
+ add_head, num_add,
+ &promisc_changed);
memset(add_list, 0, list_size);
num_add = 0;
}
}
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
- add_list, num_add, NULL);
- aq_err = hw->aq.asq_last_status;
- fcnt = i40e_update_filter_state(num_add, add_list,
- add_head, aq_ret);
- vsi->active_filters += fcnt;
- if (fcnt != num_add) {
- promisc_changed = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state);
- vsi->promisc_threshold =
- (vsi->active_filters * 3) / 4;
- dev_warn(&pf->pdev->dev,
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err), vsi_name);
- }
+ i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
+ num_add, &promisc_changed);
}
/* Now move all of the filters from the temp add list back to
* the VSI's list.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
- list_move_tail(&f->list, &vsi->mac_filter_list);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
+
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list);
add_list = NULL;
}
- /* Check to see if we can drop out of overflow promiscuous mode. */
+ /* Determine the number of active and failed filters. */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->state == I40E_FILTER_ACTIVE)
+ vsi->active_filters++;
+ else if (f->state == I40E_FILTER_FAILED)
+ failed_filters++;
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* If promiscuous mode has changed, we need to calculate a new
+ * threshold for when we are safe to exit
+ */
+ if (promisc_changed)
+ vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
+
+ /* Check if we are able to exit overflow promiscuous mode. We can
+ * safely exit if we didn't just enter, we no longer have any failed
+ * filters, and we have reduced filters below the threshold value.
+ */
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ !promisc_changed && !failed_filters &&
(vsi->active_filters < vsi->promisc_threshold)) {
- int failed_count = 0;
- /* See if we have any failed filters. We can't drop out of
- * promiscuous until these have all been deleted.
- */
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (f->state == I40E_FILTER_FAILED)
- failed_count++;
- }
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (!failed_count) {
- dev_info(&pf->pdev->dev,
- "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
- vsi_name);
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
- promisc_changed = true;
- vsi->promisc_threshold = 0;
- }
+ dev_info(&pf->pdev->dev,
+ "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
+ vsi_name);
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ promisc_changed = true;
+ vsi->promisc_threshold = 0;
}
/* if the VF is not trusted do not do promisc */
@@ -2201,6 +2378,18 @@ out:
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return retval;
+
+err_no_memory:
+ /* Restore elements on the temporary add and delete lists */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+err_no_memory_locked:
+ i40e_undo_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_filter_entries(vsi, &tmp_add_list);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ return -ENOMEM;
}
/**
@@ -2239,13 +2428,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_vsi *vsi = np->vsi;
- /* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
- return -EINVAL;
-
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
@@ -2354,88 +2538,54 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
}
/**
- * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
* @vsi: the vsi being configured
* @vid: vlan id to be added (0 = untagged only , -1 = any)
+ *
+ * This is a helper function for adding a new MAC/VLAN filter with the
+ * specified VLAN for each existing MAC address already in the hash table.
+ * This function does *not* perform any accounting to update filters based on
+ * VLAN mode.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
**/
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
{
- struct i40e_mac_filter *f, *ftmp, *add_f;
- bool is_netdev, is_vf;
-
- is_vf = (vsi->type == I40E_VSI_SRIOV);
- is_netdev = !!(vsi->netdev);
-
- /* Locked once because all functions invoked below iterates list*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- if (is_netdev) {
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
- is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add vlan filter %d for %pM\n",
- vid, vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
+ struct i40e_mac_filter *f, *add_f;
+ struct hlist_node *h;
+ int bkt;
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
+ continue;
+ add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
- /* Now if we add a vlan tag, make sure to check if it is the first
- * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
- * with 0, so we now accept untagged and specified tagged traffic
- * (and not all tags along with untagged)
- */
- if (vid > 0) {
- if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
- I40E_VLAN_ANY,
- is_vf, is_netdev)) {
- i40e_del_filter(vsi, vsi->netdev->dev_addr,
- I40E_VLAN_ANY, is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
- is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
- }
+ return 0;
+}
- /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
- if (vid > 0 && !vsi->info.pvid) {
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev))
- continue;
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr,
- 0, is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
- }
+/**
+ * i40e_vsi_add_vlan - Add VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be added (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ int err;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ /* Locked once because all functions invoked below iterates list*/
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ err = i40e_add_vlan_all_mac(vsi, vid);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (err)
+ return err;
/* schedule our worker thread which will take care of
* applying the new filter changes
@@ -2445,82 +2595,45 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
/**
- * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
* @vsi: the vsi being configured
* @vid: vlan id to be removed (0 = untagged only , -1 = any)
*
- * Return: 0 on success or negative otherwise
- **/
-int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+ * This function should be used to remove all VLAN filters which match the
+ * given VID. It does not schedule the service event and does not take the
+ * mac_filter_hash_lock so it may be combined with other operations under
+ * a single invocation of the mac_filter_hash_lock.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
{
- struct net_device *netdev = vsi->netdev;
- struct i40e_mac_filter *f, *ftmp, *add_f;
- bool is_vf, is_netdev;
- int filter_count = 0;
-
- is_vf = (vsi->type == I40E_VSI_SRIOV);
- is_netdev = !!(netdev);
-
- /* Locked once because all functions invoked below iterates list */
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- if (is_netdev)
- i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
-
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
-
- /* go through all the filters for this VSI and if there is only
- * vid == 0 it means there are no other filters, so vid 0 must
- * be replaced with -1. This signifies that we should from now
- * on accept any traffic (with any tag present, or untagged)
- */
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (is_netdev) {
- if (f->vlan &&
- ether_addr_equal(netdev->dev_addr, f->macaddr))
- filter_count++;
- }
-
- if (f->vlan)
- filter_count++;
- }
-
- if (!filter_count && is_netdev) {
- i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
- f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- if (!f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter %d for %pM\n",
- I40E_VLAN_ANY, netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
- if (!filter_count) {
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter %d for %pM\n",
- I40E_VLAN_ANY, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->vlan == vid)
+ __i40e_del_filter(vsi, f);
}
+}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+/**
+ * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be removed (0 = untagged only , -1 = any)
+ **/
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_rm_vlan_all_mac(vsi, vid);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
i40e_service_event_schedule(vsi->back);
- return 0;
}
/**
@@ -2542,7 +2655,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
int ret = 0;
- if (vid > 4095)
+ if (vid >= VLAN_N_VID)
return -EINVAL;
/* If the network stack called us with vid = 0 then
@@ -2554,7 +2667,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
if (vid)
ret = i40e_vsi_add_vlan(vsi, vid);
- if (!ret && (vid < VLAN_N_VID))
+ if (!ret)
set_bit(vid, vsi->active_vlans);
return ret;
@@ -3322,6 +3435,33 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
}
/**
+ * i40e_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(notify, struct i40e_q_vector, affinity_notify);
+
+ q_vector->affinity_mask = *mask;
+}
+
+/**
+ * i40e_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void i40e_irq_affinity_release(struct kref *ref) {}
+
+/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -3336,10 +3476,13 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
+ int irq_num;
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
+ irq_num = pf->msix_entries[base + vector].vector;
+
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
@@ -3354,7 +3497,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = request_irq(pf->msix_entries[base + vector].vector,
+ err = request_irq(irq_num,
vsi->irq_handler,
0,
q_vector->name,
@@ -3364,9 +3507,13 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
"MSIX request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
+
+ /* register for affinity change notifications */
+ q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
+ q_vector->affinity_notify.release = i40e_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */
- irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
- &q_vector->affinity_mask);
+ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
vsi->irqs_ready = true;
@@ -3375,10 +3522,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
- NULL);
- free_irq(pf->msix_entries[base + vector].vector,
- &(vsi->q_vectors[vector]));
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_set_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
}
@@ -3480,7 +3627,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
}
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -3973,30 +4120,36 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
}
/**
- * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * i40e_vsi_start_rings - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_start_rings(struct i40e_vsi *vsi)
{
int ret = 0;
/* do rx first for enable and last for disable */
- if (request) {
- ret = i40e_vsi_control_rx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_tx(vsi, request);
- } else {
- /* Ignore return value, we need to shutdown whatever we can */
- i40e_vsi_control_tx(vsi, request);
- i40e_vsi_control_rx(vsi, request);
- }
+ ret = i40e_vsi_control_rx(vsi, true);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_tx(vsi, true);
return ret;
}
/**
+ * i40e_vsi_stop_rings - Stop a VSI's rings
+ * @vsi: the VSI being configured
+ **/
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
+{
+ /* do rx first for enable and last for disable
+ * Ignore return value, we need to shutdown whatever we can
+ */
+ i40e_vsi_control_tx(vsi, false);
+ i40e_vsi_control_rx(vsi, false);
+}
+
+/**
* i40e_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured
**/
@@ -4017,19 +4170,23 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
- u16 vector = i + base;
+ int irq_num;
+ u16 vector;
+
+ vector = i + base;
+ irq_num = pf->msix_entries[vector].vector;
/* free only the irqs that were actually requested */
if (!vsi->q_vectors[i] ||
!vsi->q_vectors[i]->num_ringpairs)
continue;
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(pf->msix_entries[vector].vector,
- NULL);
- synchronize_irq(pf->msix_entries[vector].vector);
- free_irq(pf->msix_entries[vector].vector,
- vsi->q_vectors[i]);
+ irq_set_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
*
@@ -5116,12 +5273,16 @@ out:
*/
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
+ enum i40e_aq_link_speed new_speed;
char *speed = "Unknown";
char *fc = "Unknown";
- if (vsi->current_isup == isup)
+ new_speed = vsi->back->hw.phy.link_info.link_speed;
+
+ if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
return;
vsi->current_isup = isup;
+ vsi->current_speed = new_speed;
if (!isup) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
return;
@@ -5143,6 +5304,9 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_20GB:
speed = "20 G";
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
case I40E_LINK_SPEED_10GB:
speed = "10 G";
break;
@@ -5190,7 +5354,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
i40e_configure_msi_and_legacy(vsi);
/* start rings */
- err = i40e_vsi_control_rings(vsi, true);
+ err = i40e_vsi_start_rings(vsi);
if (err)
return err;
@@ -5287,7 +5451,7 @@ void i40e_down(struct i40e_vsi *vsi)
netif_tx_disable(vsi->netdev);
}
i40e_vsi_disable_irq(vsi);
- i40e_vsi_control_rings(vsi, false);
+ i40e_vsi_stop_rings(vsi);
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -5833,19 +5997,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_service_event_complete - Finish up the service event
- * @pf: board private structure
- **/
-static void i40e_service_event_complete(struct i40e_pf *pf)
-{
- WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
-
- /* flush memory to make sure state is correct before next watchog */
- smp_mb__before_atomic();
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
-}
-
-/**
* i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
* @pf: board private structure
**/
@@ -6670,7 +6821,6 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- int i;
/* quick workaround for an NVM issue that leaves a critical register
* uninitialized
@@ -6681,6 +6831,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
0x95b3a76d};
+ int i;
for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
@@ -6690,13 +6841,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
return;
/* find existing VSI and see if it needs configuring */
- vsi = NULL;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
/* create a new VSI if none exists */
if (!vsi) {
@@ -6718,15 +6863,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
**/
static void i40e_fdir_teardown(struct i40e_pf *pf)
{
- int i;
+ struct i40e_vsi *vsi;
i40e_fdir_filter_exit(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_release(pf->vsi[i]);
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_release(vsi);
}
/**
@@ -7163,10 +7305,12 @@ static void i40e_service_task(struct work_struct *work)
/* don't bother with service tasks if a reset is in progress */
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
- i40e_service_event_complete(pf);
return;
}
+ if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ return;
+
i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
@@ -7179,7 +7323,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
- i40e_service_event_complete(pf);
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
/* If the tasks have taken longer than one timer cycle or there
* is more work to be done, reschedule the service task now
@@ -7354,7 +7500,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
pf->rss_table_size : 64;
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
- INIT_LIST_HEAD(&vsi->mac_filter_list);
+ hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi);
@@ -7369,7 +7515,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
/* Initialize VSI lock */
- spin_lock_init(&vsi->mac_filter_list_lock);
+ spin_lock_init(&vsi->mac_filter_hash_lock);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
goto unlock_pf;
@@ -8345,8 +8491,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
i40e_pf_config_rss(pf);
}
- dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n",
- pf->alloc_rss_size, pf->rss_size_max);
+ dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
+ vsi->req_queue_pairs, pf->rss_size_max);
return pf->alloc_rss_size;
}
@@ -8489,15 +8635,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
int err = 0;
int size;
- pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
- (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
- if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
- if (I40E_DEBUG_USER & debug)
- pf->hw.debug_mask = debug;
- pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
- I40E_DEFAULT_MSG_ENABLE);
- }
-
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
@@ -8605,7 +8742,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_NO_PCI_LINK_CHECK |
I40E_FLAG_USE_SET_LLDP_MIB |
- I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ I40E_FLAG_GENEVE_OFFLOAD_CAPABLE |
+ I40E_FLAG_PTP_L4_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
((pf->hw.aq.api_maj_ver == 1) &&
(pf->hw.aq.api_min_ver > 4))) {
@@ -9037,10 +9175,6 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
0, 0, nlflags, filter_mask, NULL);
}
-/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
- * inner mac plus all inner ethertypes.
- */
-#define I40E_MAX_TUNNEL_HDR_LEN 128
/**
* i40e_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
@@ -9051,12 +9185,52 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- if (skb->encapsulation &&
- ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
- I40E_MAX_TUNNEL_HDR_LEN))
- return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ /* We cannot support GSO if the MSS is going to be less than
+ * 64 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ features &= ~NETIF_F_GSO_MASK;
+
+ /* MACLEN can support at most 63 words */
+ len = skb_network_header(skb) - skb->data;
+ if (len & ~(63 * 2))
+ goto out_err;
+
+ /* IPLEN and EIPLEN can support at most 127 dwords */
+ len = skb_transport_header(skb) - skb_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+
+ if (skb->encapsulation) {
+ /* L4TUNLEN can support 127 words */
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (len & ~(127 * 2))
+ goto out_err;
+
+ /* IPLEN can support at most 127 dwords */
+ len = skb_inner_transport_header(skb) -
+ skb_inner_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+ }
+
+ /* No need to validate L4LEN as TCP is the only protocol with a
+ * a flexible value and we support all possible values supported
+ * by TCP, which is at most 15 dwords
+ */
return features;
+out_err:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
static const struct net_device_ops i40e_netdev_ops = {
@@ -9109,6 +9283,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
struct net_device *netdev;
+ u8 broadcast[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
int etherdev_size;
@@ -9169,20 +9344,38 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* which must be replaced by a normal filter.
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr);
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
+ /* Add the broadcast filter so that we initially will receive
+ * broadcast packets. Note that when a new VLAN is first added the
+ * driver will convert all filters marked I40E_VLAN_ANY into VLAN
+ * specific filters as part of transitioning into "vlan" operation.
+ * When more VLANs are added, the driver will copy each existing MAC
+ * filter and add it for the new VLAN.
+ *
+ * Broadcast filters are handled specially by
+ * i40e_sync_filters_subtask, as the driver must to set the broadcast
+ * promiscuous bit instead of adding this directly as a MAC/VLAN
+ * filter. The subtask will update the correct broadcast promiscuous
+ * bits as VLANs become active or inactive.
+ */
+ eth_broadcast_addr(broadcast);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -9198,6 +9391,11 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
i40e_fcoe_config_netdev(netdev, vsi);
#endif
+ /* MTU range: 68 - 9706 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = I40E_MAX_RXBUFFER -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
return 0;
}
@@ -9260,11 +9458,12 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
- i40e_status aq_ret = 0;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0;
@@ -9448,28 +9647,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
}
- /* Except FDIR VSI, for all othet VSI set the broadcast filter */
- if (vsi->type != I40E_VSI_FDIR) {
- aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
- if (aq_ret) {
- ret = i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- }
- }
vsi->active_filters = 0;
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW;
f_count++;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -9499,11 +9686,12 @@ err:
**/
int i40e_vsi_release(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
struct i40e_veb *veb = NULL;
struct i40e_pf *pf;
u16 uplink_seid;
- int i, n;
+ int i, n, bkt;
pf = vsi->back;
@@ -9533,11 +9721,19 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, f->vlan,
- f->is_vf, f->is_netdev);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ /* clear the sync flag on all filters */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
+
+ /* make sure any remaining filters are marked for deletion */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ __i40e_del_filter(vsi, f);
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_sync_vsi_filters(vsi);
@@ -10806,10 +11002,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
- if (debug != -1) {
- pf->msg_enable = pf->hw.debug_mask;
- pf->msg_enable = debug;
- }
+ pf->msg_enable = netif_msg_init(debug,
+ NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK);
+ if (debug < -1)
+ pf->hw.debug_mask = debug;
/* do a special CORER for clearing PXE mode once at init */
if (hw->revision_id == 0 &&
@@ -10951,7 +11149,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -11209,7 +11407,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -11221,9 +11418,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->main_vsi_seid);
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
- (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
- pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
-
+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+ pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS;
+ if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
+ pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER;
/* print a string summarizing features */
i40e_print_features(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 954efe3118db..38ee18f11124 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -722,9 +722,20 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return 0;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -1074,6 +1085,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 4660c5abc855..2551fc827444 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -144,6 +144,9 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable,
u16 vid,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -362,10 +365,18 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index f1feceab758a..9e49ffafce28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -159,16 +159,15 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct timespec64 now, then;
- unsigned long flags;
then = ns_to_timespec64(delta);
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now);
now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
@@ -184,11 +183,10 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- unsigned long flags;
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, ts);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
@@ -205,11 +203,10 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- unsigned long flags;
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_write(pf, ts);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
@@ -230,6 +227,47 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
}
/**
+ * i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events
+ * @pf: the PF data structure
+ *
+ * This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
+ * for noticed latch events. This allows the driver to keep track of the first
+ * time a latch event was noticed which will be used to help clear out Rx
+ * timestamps for packets that got dropped or lost.
+ *
+ * This function will return the current value of I40E_PRTTSYN_STAT_1 and is
+ * expected to be called only while under the ptp_rx_lock.
+ **/
+static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 prttsyn_stat, new_latch_events;
+ int i;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ new_latch_events = prttsyn_stat & ~pf->latch_event_flags;
+
+ /* Update the jiffies time for any newly latched timestamp. This
+ * ensures that we store the time that we first discovered a timestamp
+ * was latched by the hardware. The service task will later determine
+ * if we should free the latch and drop that timestamp should too much
+ * time pass. This flow ensures that we only update jiffies for new
+ * events latched since the last time we checked, and not all events
+ * currently latched, so that the service task accounting remains
+ * accurate.
+ */
+ for (i = 0; i < 4; i++) {
+ if (new_latch_events & BIT(i))
+ pf->latch_events[i] = jiffies;
+ }
+
+ /* Finally, we store the current status of the Rx timestamp latches */
+ pf->latch_event_flags = prttsyn_stat;
+
+ return prttsyn_stat;
+}
+
+/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @vsi: The VSI with the rings relevant to 1588
*
@@ -242,10 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct i40e_ring *rx_ring;
- unsigned long rx_event;
- u32 prttsyn_stat;
- int n;
+ int i;
/* Since we cannot turn off the Rx timestamp logic if the device is
* configured for Tx timestamping, we check if Rx timestamping is
@@ -255,42 +290,30 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
return;
- prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ spin_lock_bh(&pf->ptp_rx_lock);
- /* Unless all four receive timestamp registers are latched, we are not
- * concerned about a possible PTP Rx hang, so just update the timeout
- * counter and exit.
- */
- if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
- I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
- I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
- I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
- I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
- pf->last_rx_ptp_check = jiffies;
- return;
- }
+ /* Update current latch times for Rx events */
+ i40e_ptp_get_rx_events(pf);
- /* Determine the most recent watchdog or rx_timestamp event. */
- rx_event = pf->last_rx_ptp_check;
- for (n = 0; n < vsi->num_queue_pairs; n++) {
- rx_ring = vsi->rx_rings[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
+ /* Check all the currently latched Rx events and see whether they have
+ * been latched for over a second. It is assumed that any timestamp
+ * should have been cleared within this time, or else it was captured
+ * for a dropped frame that the driver never received. Thus, we will
+ * clear any timestamp that has been latched for over 1 second.
+ */
+ for (i = 0; i < 4; i++) {
+ if ((pf->latch_event_flags & BIT(i)) &&
+ time_is_before_jiffies(pf->latch_events[i] + HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
+ pf->latch_event_flags &= ~BIT(i);
+ pf->rx_hwtstamp_cleared++;
+ dev_warn(&pf->pdev->dev,
+ "Clearing a missed Rx timestamp event for RXTIME[%d]\n",
+ i);
+ }
}
- /* Only need to read the high RXSTMP register to clear the lock */
- if (time_is_before_jiffies(rx_event + 5 * HZ)) {
- rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
- pf->last_rx_ptp_check = jiffies;
- pf->rx_hwtstamp_cleared++;
- WARN_ONCE(1, "Detected Rx timestamp register hang\n");
- }
+ spin_unlock_bh(&pf->ptp_rx_lock);
}
/**
@@ -353,14 +376,25 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
hw = &pf->hw;
- prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ spin_lock_bh(&pf->ptp_rx_lock);
- if (!(prttsyn_stat & BIT(index)))
+ /* Get current Rx events and update latch times */
+ prttsyn_stat = i40e_ptp_get_rx_events(pf);
+
+ /* TODO: Should we warn about missing Rx timestamp event? */
+ if (!(prttsyn_stat & BIT(index))) {
+ spin_unlock_bh(&pf->ptp_rx_lock);
return;
+ }
+
+ /* Clear the latched event since we're about to read its register */
+ pf->latch_event_flags &= ~BIT(index);
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+ spin_unlock_bh(&pf->ptp_rx_lock);
+
ns = (((u64)hi) << 32) | lo;
i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
@@ -487,6 +521,8 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE))
+ return -ERANGE;
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
@@ -494,19 +530,26 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE))
+ return -ERANGE;
+ /* fall through */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
- I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
- I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
- config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2;
+ if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE) {
+ tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ } else {
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ }
break;
case HWTSTAMP_FILTER_ALL:
default:
@@ -514,12 +557,15 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
}
/* Clear out all 1588-related registers to clear and unlatch them. */
+ spin_lock_bh(&pf->ptp_rx_lock);
rd32(hw, I40E_PRTTSYN_STAT_0);
rd32(hw, I40E_PRTTSYN_TXTIME_H);
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->latch_event_flags = 0;
+ spin_unlock_bh(&pf->ptp_rx_lock);
/* Enable/disable the Tx timestamp interrupt based on user input. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
@@ -658,10 +704,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
return;
}
- /* we have to initialize the lock first, since we can't control
- * when the user will enter the PHC device entry points
- */
- spin_lock_init(&pf->tmreg_lock);
+ mutex_init(&pf->tmreg_lock);
+ spin_lock_init(&pf->ptp_rx_lock);
/* ensure we have a clock device */
err = i40e_ptp_create_clock(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6287bf63c43c..352cf7cd2ef4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -122,14 +122,10 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
struct device *dev;
dma_addr_t dma;
u32 td_cmd = 0;
- u16 delay = 0;
u16 i;
/* find existing FDIR VSI */
- vsi = NULL;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
- vsi = pf->vsi[i];
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
if (!vsi)
return -ENOENT;
@@ -137,15 +133,11 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
dev = tx_ring->dev;
/* we need two descriptors to add/del a filter and we can wait */
- do {
- if (I40E_DESC_UNUSED(tx_ring) > 1)
- break;
+ for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
+ if (!i)
+ return -EAGAIN;
msleep_interruptible(1);
- delay++;
- } while (delay < I40E_FD_CLEAN_DELAY);
-
- if (!(I40E_DESC_UNUSED(tx_ring) > 1))
- return -EAGAIN;
+ }
dma = dma_map_single(dev, raw_packet,
I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
@@ -335,22 +327,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
return err ? -EOPNOTSUPP : 0;
}
-/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @add: true adds a filter, false removes it
- *
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
-{
- return -EOPNOTSUPP;
-}
-
#define I40E_IP_DUMMY_PACKET_LEN 34
/**
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
@@ -433,12 +409,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case UDP_V4_FLOW:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
- case SCTP_V4_FLOW:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
- break;
- case IPV4_FLOW:
- ret = i40e_add_del_fdir_ipv4(vsi, input, add);
- break;
case IP_USER_FLOW:
switch (input->ip4_proto) {
case IPPROTO_TCP:
@@ -447,15 +417,16 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case IPPROTO_UDP:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
- case IPPROTO_SCTP:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
- break;
- default:
+ case IPPROTO_IP:
ret = i40e_add_del_fdir_ipv4(vsi, input, add);
break;
+ default:
+ /* We cannot support masking based on protocol */
+ goto unsupported_flow;
}
break;
default:
+unsupported_flow:
dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
input->flow_type);
ret = -EINVAL;
@@ -645,7 +616,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
-#define WB_STRIDE 0x3
+#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
@@ -761,7 +732,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
- ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
+ ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
@@ -1246,7 +1217,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
- rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
@@ -1437,13 +1407,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
+ u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- if (unlikely(rsyn)) {
- i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
- rx_ring->last_rx_timestamp = jiffies;
- }
+ if (unlikely(tsynvalid))
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
@@ -1767,7 +1736,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
- u32 rx_status;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1781,21 +1749,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
-
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
-
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero
*/
- if (!rx_desc->wb.qword1.status_error_len)
+ if (!i40e_test_staterr(rx_desc,
+ BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1829,6 +1789,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+
/* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@@ -2025,12 +1989,25 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+ const cpumask_t *aff_mask = &q_vector->affinity_mask;
+ int cpu_id = smp_processor_id();
+
+ /* It is possible that the interrupt affinity has changed but,
+ * if the cpu is pegged at 100%, polling will never exit while
+ * traffic continues and the interrupt will be stuck on this
+ * cpu. We check to make sure affinity is correct before we
+ * continue to poll, otherwise we must stop polling so the
+ * interrupt can move to the correct cpu.
+ */
+ if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
+ !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
tx_only:
- if (arm_wb) {
- q_vector->tx.ring[0].tx_stats.tx_force_wb++;
- i40e_enable_wb_on_itr(vsi, q_vector);
+ if (arm_wb) {
+ q_vector->tx.ring[0].tx_stats.tx_force_wb++;
+ i40e_enable_wb_on_itr(vsi, q_vector);
+ }
+ return budget;
}
- return budget;
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
@@ -2038,12 +2015,19 @@ tx_only:
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
- i40e_update_enable_itr(vsi, q_vector);
- } else { /* Legacy mode */
+
+ /* If we're prematurely stopping polling to fix the interrupt
+ * affinity we want to make sure polling starts back up so we
+ * issue a call to i40e_force_wb which triggers a SW interrupt.
+ */
+ if (!clean_complete)
+ i40e_force_wb(vsi, q_vector);
+ else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
i40e_irq_dynamic_enable_icr0(vsi->back, false);
- }
- return 0;
+ else
+ i40e_update_enable_itr(vsi, q_vector);
+
+ return min(work_done, budget - 1);
}
/**
@@ -2716,9 +2700,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- u16 desc_count = 0;
- bool tail_bump = true;
- bool do_rs = false;
+ u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2801,8 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
@@ -2810,66 +2791,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ /* write last descriptor with EOP bit */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+
+ /* We can OR these values together as they both are checked against
+ * 4 below and at this point desc_count will be used as a boolean value
+ * after this if/else block.
+ */
+ desc_count |= ++tx_ring->packet_stride;
+
/* Algorithm to optimize tail and RS bit setting:
- * if xmit_more is supported
- * if xmit_more is true
- * do not update tail and do not mark RS bit.
- * if xmit_more is false and last xmit_more was false
- * if every packet spanned less than 4 desc
- * then set RS bit on 4th packet and update tail
- * on every packet
- * else
- * update tail and set RS bit on every packet.
- * if xmit_more is false and last_xmit_more was true
- * update tail and set RS bit.
+ * if queue is stopped
+ * mark RS bit
+ * reset packet counter
+ * else if xmit_more is supported and is true
+ * advance packet counter to 4
+ * reset desc_count to 0
*
- * Optimization: wmb to be issued only in case of tail update.
- * Also optimize the Descriptor WB path for RS bit with the same
- * algorithm.
+ * if desc_count >= 4
+ * mark RS bit
+ * reset packet counter
+ * if desc_count > 0
+ * update tail
*
- * Note: If there are less than 4 packets
+ * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
- if (skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring))) {
- tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- tail_bump = false;
- } else if (!skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring)) &&
- (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
- (tx_ring->packet_stride < WB_STRIDE) &&
- (desc_count < WB_STRIDE)) {
- tx_ring->packet_stride++;
- } else {
+ if (netif_xmit_stopped(txring_txq(tx_ring))) {
+ goto do_rs;
+ } else if (skb->xmit_more) {
+ /* set stride to arm on next packet and reset desc_count */
+ tx_ring->packet_stride = WB_STRIDE;
+ desc_count = 0;
+ } else if (desc_count >= WB_STRIDE) {
+do_rs:
+ /* write last descriptor with RS bit set */
+ td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
- tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- do_rs = true;
}
- if (do_rs)
- tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
- I40E_TX_DESC_CMD_EOP) <<
- I40E_TXD_QW1_CMD_SHIFT);
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (!tail_bump) {
- prefetchw(tx_desc + 1);
- } else {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ if (desc_count) {
writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
+
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 508840585645..e065321ce8ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -173,26 +173,37 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
#define I40E_MAX_DATA_PER_TXD_ALIGNED \
(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
-/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
- * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
- * that 12K is not a power of 2 and division is expensive. It is used to
- * approximate the number of descriptors used per linear buffer. Note
- * that this will overestimate in some cases as it doesn't account for the
- * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
- * the error should not impact things much as large buffers usually mean
- * we will use fewer descriptors then there are frags in an skb.
+/**
+ * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
*/
static inline unsigned int i40e_txd_use_count(unsigned int size)
{
- const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
- const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
- unsigned int adjust = ~(u32)0;
-
- /* if we rounded up on the reciprocal pull down the adjustment */
- if ((max * reciprocal) > adjust)
- adjust = ~(u32)(reciprocal - 1);
-
- return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+ return ((size * 85) >> 20) + 1;
}
/* Tx Descriptors needed, worst case */
@@ -307,15 +318,12 @@ struct i40e_ring {
u8 atr_sample_rate;
u8 atr_count;
- unsigned long last_rx_timestamp;
-
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index bd5f13bef83c..edc0abdf4783 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -90,14 +90,23 @@ enum i40e_debug_mask {
I40E_DEBUG_ALL = 0xFFFFFFFF
};
-#define I40E_MDIO_STCODE 0
-#define I40E_MDIO_OPCODE_ADDRESS 0
-#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
@@ -204,47 +213,59 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
- BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- enum i40e_aq_capabilities_phy_type phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -254,6 +275,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -366,6 +391,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index f861d3109d1a..974ba2baf6ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -165,6 +165,10 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 54b8ee2583f1..a6198b727e24 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -674,6 +674,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
}
if (type == I40E_VSI_SRIOV) {
u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u8 broadcast[ETH_ALEN];
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
@@ -686,17 +687,23 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
+ vf->port_vlan_id ?
+ vf->port_vlan_id : -1);
if (!f)
dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ eth_broadcast_addr(broadcast);
+ f = i40e_add_filter(vsi, broadcast,
+ vf->port_vlan_id ? vf->port_vlan_id : -1);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF broadcast filter\n");
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
(u32)hena);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
@@ -811,6 +818,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
+ vf->num_mac = 0;
}
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -990,7 +998,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (vf->lan_vsi_idx == 0)
goto complete_reset;
- i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
@@ -1031,8 +1039,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
- i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
- false);
+ i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
@@ -1449,9 +1456,9 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
- int num_vlans = 0;
+ int num_vlans = 0, bkt;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
num_vlans++;
}
@@ -1481,6 +1488,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
struct i40e_vsi *vsi;
bool alluni = false;
int aq_err = 0;
+ int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
@@ -1507,7 +1515,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
@@ -1535,7 +1543,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
- goto error_param_int;
+ goto error_param;
}
}
@@ -1557,7 +1565,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
aq_ret = 0;
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
aq_ret =
@@ -1580,15 +1588,16 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
allmulti, NULL,
true);
aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret)
+ if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
vf->vf_id, info->flags,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+ goto error_param;
+ }
}
-error_param_int:
if (!aq_ret) {
dev_info(&pf->pdev->dev,
"VF %d successfully set unicast promiscuous mode\n",
@@ -1757,7 +1766,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
+ if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the VF */
@@ -1796,8 +1805,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
- aq_ret = I40E_ERR_TIMEOUT;
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
error_param:
/* send the response to the VF */
@@ -1927,20 +1935,18 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* Lock once, because all function inside for loop accesses VSI's
* MAC filter list which needs to be protected using same lock.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
- f = i40e_find_mac(vsi, al->list[i].addr, true, false);
+ f = i40e_find_mac(vsi, al->list[i].addr);
if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
- true, false);
+ f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
else
- f = i40e_add_filter(vsi, al->list[i].addr, -1,
- true, false);
+ f = i40e_add_filter(vsi, al->list[i].addr, -1);
}
if (!f) {
@@ -1948,13 +1954,13 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac++;
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
@@ -2003,18 +2009,18 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
vsi = pf->vsi[vf->lan_vsi_idx];
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
- if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
+ if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac--;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
@@ -2139,9 +2145,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
for (i = 0; i < vfl->num_elements; i++) {
- int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
- if (!ret)
- vf->num_vlan--;
+ i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ vf->num_vlan--;
if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
@@ -2153,11 +2158,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
false,
vfl->vlan_id[i],
NULL);
-
- if (ret)
- dev_err(&pf->pdev->dev,
- "Unable to delete VLAN filter %d for VF %d, error %d\n",
- vfl->vlan_id[i], vf->vf_id, ret);
}
error_param:
@@ -2689,6 +2689,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
+ int bkt;
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
@@ -2715,23 +2716,22 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* Lock once because below invoked function add/del_filter requires
- * mac_filter_list_lock to be held
+ * mac_filter_hash_lock to be held
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr))
i40e_del_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
+ vf->port_vlan_id ? vf->port_vlan_id : -1);
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
+ i40e_del_filter(vsi, f->macaddr, f->vlan);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
@@ -2766,7 +2766,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- bool is_vsi_in_vlan = false;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
@@ -2803,11 +2802,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
- spin_lock_bh(&vsi->mac_filter_list_lock);
- is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ /* Locked once because multiple functions below iterate list */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
- if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
+ if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
@@ -2830,19 +2828,23 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
*/
if ((!(vlan_id || qos) ||
vlanprio != le16_to_cpu(vsi->info.pvid)) &&
- vsi->info.pvid)
- ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
-
- if (vsi->info.pvid) {
- /* kill old VLAN */
- ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
- VLAN_VID_MASK));
+ vsi->info.pvid) {
+ ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "remove VLAN failed, ret=%d, aq_err=%d\n",
- ret, pf->hw.aq.asq_last_status);
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_pvid;
}
}
+
+ if (vsi->info.pvid) {
+ /* remove all filters on the old VLAN */
+ i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+ }
+
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
@@ -2852,24 +2854,30 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
- /* add new VLAN filter */
- ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ /* add new VLAN filter for each MAC */
+ ret = i40e_add_vlan_all_mac(vsi, vlan_id);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"add VF VLAN failed, ret=%d aq_err=%d\n", ret,
vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_pvid;
}
- /* Kill non-vlan MAC filters - ignore error return since
- * there might not be any non-vlan MAC filters.
- */
- i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
+
+ /* remove the previously added non-VLAN MAC filters */
+ i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
}
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* Schedule the worker thread to take care of applying changes */
+ i40e_service_event_schedule(vsi->back);
+
if (ret) {
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+
/* The Port VLAN needs to be saved across resets the same as the
* default LAN MAC address.
*/
@@ -2926,6 +2934,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
case I40E_LINK_SPEED_40GB:
speed = 40000;
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = 25000;
+ break;
case I40E_LINK_SPEED_20GB:
speed = 20000;
break;
@@ -2940,7 +2951,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
if (max_tx_rate > speed) {
- dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
max_tx_rate, vf->vf_id);
ret = -EINVAL;
goto error;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 44f7ed7583dd..96385156b824 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -912,11 +912,11 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 40b0eafd0c71..eeb9864bc5b1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1639,6 +1639,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1647,6 +1651,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1654,7 +1659,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1677,6 +1683,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1687,7 +1695,22 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1709,7 +1732,20 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1789,9 +1825,18 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 external_power_ability;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7953c13451b9..aa63b7fb993d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -53,6 +53,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_SFP_X722:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 70235706915e..21dcaee1ad1d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -39,6 +39,8 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index d89d52109efa..ba6c6bda0e22 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -115,6 +115,10 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 75f2a2cdd738..df67ef37b7f3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
-#define WB_STRIDE 0x3
+#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget &&
- ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
+ ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
@@ -705,7 +705,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
- rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
@@ -1209,7 +1208,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
- u32 rx_status;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1223,21 +1221,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
-
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
-
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero
*/
- if (!rx_desc->wb.qword1.status_error_len)
+ if (!i40e_test_staterr(rx_desc,
+ BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1271,6 +1261,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+
/* populate checksum, VLAN, and protocol */
i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@@ -1461,12 +1455,24 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+ const cpumask_t *aff_mask = &q_vector->affinity_mask;
+ int cpu_id = smp_processor_id();
+
+ /* It is possible that the interrupt affinity has changed but,
+ * if the cpu is pegged at 100%, polling will never exit while
+ * traffic continues and the interrupt will be stuck on this
+ * cpu. We check to make sure affinity is correct before we
+ * continue to poll, otherwise we must stop polling so the
+ * interrupt can move to the correct cpu.
+ */
+ if (likely(cpumask_test_cpu(cpu_id, aff_mask))) {
tx_only:
- if (arm_wb) {
- q_vector->tx.ring[0].tx_stats.tx_force_wb++;
- i40e_enable_wb_on_itr(vsi, q_vector);
+ if (arm_wb) {
+ q_vector->tx.ring[0].tx_stats.tx_force_wb++;
+ i40e_enable_wb_on_itr(vsi, q_vector);
+ }
+ return budget;
}
- return budget;
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
@@ -1474,8 +1480,17 @@ tx_only:
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
- i40e_update_enable_itr(vsi, q_vector);
- return 0;
+
+ /* If we're prematurely stopping polling to fix the interrupt
+ * affinity we want to make sure polling starts back up so we
+ * issue a call to i40evf_force_wb which triggers a SW interrupt.
+ */
+ if (!clean_complete)
+ i40evf_force_wb(vsi, q_vector);
+ else
+ i40e_update_enable_itr(vsi, q_vector);
+
+ return min(work_done, budget - 1);
}
/**
@@ -1935,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- u16 desc_count = 0;
- bool tail_bump = true;
- bool do_rs = false;
+ u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2020,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
@@ -2029,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ /* write last descriptor with EOP bit */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+
+ /* We can OR these values together as they both are checked against
+ * 4 below and at this point desc_count will be used as a boolean value
+ * after this if/else block.
+ */
+ desc_count |= ++tx_ring->packet_stride;
+
/* Algorithm to optimize tail and RS bit setting:
- * if xmit_more is supported
- * if xmit_more is true
- * do not update tail and do not mark RS bit.
- * if xmit_more is false and last xmit_more was false
- * if every packet spanned less than 4 desc
- * then set RS bit on 4th packet and update tail
- * on every packet
- * else
- * update tail and set RS bit on every packet.
- * if xmit_more is false and last_xmit_more was true
- * update tail and set RS bit.
+ * if queue is stopped
+ * mark RS bit
+ * reset packet counter
+ * else if xmit_more is supported and is true
+ * advance packet counter to 4
+ * reset desc_count to 0
*
- * Optimization: wmb to be issued only in case of tail update.
- * Also optimize the Descriptor WB path for RS bit with the same
- * algorithm.
+ * if desc_count >= 4
+ * mark RS bit
+ * reset packet counter
+ * if desc_count > 0
+ * update tail
*
- * Note: If there are less than 4 packets
+ * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
- if (skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring))) {
- tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- tail_bump = false;
- } else if (!skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring)) &&
- (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
- (tx_ring->packet_stride < WB_STRIDE) &&
- (desc_count < WB_STRIDE)) {
- tx_ring->packet_stride++;
- } else {
+ if (netif_xmit_stopped(txring_txq(tx_ring))) {
+ goto do_rs;
+ } else if (skb->xmit_more) {
+ /* set stride to arm on next packet and reset desc_count */
+ tx_ring->packet_stride = WB_STRIDE;
+ desc_count = 0;
+ } else if (desc_count >= WB_STRIDE) {
+do_rs:
+ /* write last descriptor with RS bit set */
+ td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
- tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- do_rs = true;
}
- if (do_rs)
- tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
- I40E_TX_DESC_CMD_EOP) <<
- I40E_TXD_QW1_CMD_SHIFT);
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (!tail_bump) {
- prefetchw(tx_desc + 1);
- } else {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ if (desc_count) {
writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
+
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index abcdecabbc56..a5fc789f78eb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -173,26 +173,37 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
#define I40E_MAX_DATA_PER_TXD_ALIGNED \
(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
-/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
- * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
- * that 12K is not a power of 2 and division is expensive. It is used to
- * approximate the number of descriptors used per linear buffer. Note
- * that this will overestimate in some cases as it doesn't account for the
- * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
- * the error should not impact things much as large buffers usually mean
- * we will use fewer descriptors then there are frags in an skb.
+/**
+ * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
*/
static inline unsigned int i40e_txd_use_count(unsigned int size)
{
- const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
- const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
- unsigned int adjust = ~(u32)0;
-
- /* if we rounded up on the reciprocal pull down the adjustment */
- if ((max * reciprocal) > adjust)
- adjust = ~(u32)(reciprocal - 1);
-
- return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+ return ((size * 85) >> 20) + 1;
}
/* Tx Descriptors needed, worst case */
@@ -309,7 +320,6 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
-#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 97f96e0d9c4c..c85e8a31c072 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -187,47 +187,59 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
- BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- enum i40e_aq_capabilities_phy_type phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -237,6 +249,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -348,6 +364,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index bd691ad86673..fc374f833aa9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -162,6 +162,10 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index c5fd724313c7..fffe4cf2c20b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -107,7 +107,8 @@ struct i40e_q_vector {
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
bool arm_wb_state;
- cpumask_var_t affinity_mask;
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index a9940154eead..272d600c1ed0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -85,6 +85,14 @@ static int i40evf_get_settings(struct net_device *netdev,
case I40E_LINK_SPEED_40GB:
ethtool_cmd_speed_set(ecmd, SPEED_40000);
break;
+ case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+ ethtool_cmd_speed_set(ecmd, SPEED_25000);
+#else
+ netdev_info(netdev,
+ "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+ break;
case I40E_LINK_SPEED_20GB:
ethtool_cmd_speed_set(ecmd, SPEED_20000);
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 14372810fc27..c0fc53361800 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 16
+#define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -207,6 +207,9 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+ if (!adapter->msix_entries)
+ return;
+
wr32(hw, I40E_VFINT_DYN_CTL01, 0);
/* read flush */
@@ -496,6 +499,33 @@ static void i40evf_netpoll(struct net_device *netdev)
#endif
/**
+ * i40evf_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(notify, struct i40e_q_vector, affinity_notify);
+
+ q_vector->affinity_mask = *mask;
+}
+
+/**
+ * i40evf_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void i40evf_irq_affinity_release(struct kref *ref) {}
+
+/**
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
*
@@ -507,6 +537,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
{
int vector, err, q_vectors;
int rx_int_idx = 0, tx_int_idx = 0;
+ int irq_num;
i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -514,6 +545,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
+ irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@@ -532,21 +564,23 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
/* skip this unused q_vector */
continue;
}
- err = request_irq(
- adapter->msix_entries[vector + NONQ_VECS].vector,
- i40evf_msix_clean_rings,
- 0,
- q_vector->name,
- q_vector);
+ err = request_irq(irq_num,
+ i40evf_msix_clean_rings,
+ 0,
+ q_vector->name,
+ q_vector);
if (err) {
dev_info(&adapter->pdev->dev,
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
+ /* register for affinity change notifications */
+ q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
+ q_vector->affinity_notify.release =
+ i40evf_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */
- irq_set_affinity_hint(
- adapter->msix_entries[vector + NONQ_VECS].vector,
- q_vector->affinity_mask);
+ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
return 0;
@@ -554,11 +588,10 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(
- adapter->msix_entries[vector + NONQ_VECS].vector,
- NULL);
- free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
- &adapter->q_vectors[vector]);
+ irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_set_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
}
@@ -599,16 +632,18 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
**/
static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
{
- int i;
- int q_vectors;
+ int vector, irq_num, q_vectors;
+
+ if (!adapter->msix_entries)
+ return;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- for (i = 0; i < q_vectors; i++) {
- irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
- NULL);
- free_irq(adapter->msix_entries[i+1].vector,
- &adapter->q_vectors[i]);
+ for (vector = 0; vector < q_vectors; vector++) {
+ irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_set_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &adapter->q_vectors[vector]);
}
}
@@ -622,6 +657,9 @@ static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ if (!adapter->msix_entries)
+ return;
+
free_irq(adapter->msix_entries[0].vector, netdev);
}
@@ -1396,6 +1434,9 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
int q_idx, num_q_vectors;
int napi_vectors;
+ if (!adapter->q_vectors)
+ return;
+
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
napi_vectors = adapter->num_active_queues;
@@ -1405,6 +1446,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
netif_napi_del(&q_vector->napi);
}
kfree(adapter->q_vectors);
+ adapter->q_vectors = NULL;
}
/**
@@ -1414,6 +1456,9 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
**/
void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
{
+ if (!adapter->msix_entries)
+ return;
+
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -1664,6 +1709,49 @@ restart_watchdog:
schedule_work(&adapter->adminq_task);
}
+static void i40evf_disable_vf(struct i40evf_adapter *adapter)
+{
+ struct i40evf_mac_filter *f, *ftmp;
+ struct i40evf_vlan_filter *fv, *fvtmp;
+
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ netif_carrier_off(adapter->netdev);
+ netif_tx_disable(adapter->netdev);
+ adapter->link_up = false;
+ i40evf_napi_disable_all(adapter);
+ i40evf_irq_disable(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
+
+ /* Delete all of the filters, both MAC and VLAN. */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
+ list_del(&fv->list);
+ kfree(fv);
+ }
+
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_queues(adapter);
+ i40evf_free_q_vectors(adapter);
+ kfree(adapter->vf_res);
+ i40evf_shutdown_adminq(&adapter->hw);
+ adapter->netdev->flags &= ~IFF_UP;
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ adapter->state = __I40EVF_DOWN;
+ dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
+}
+
#define I40EVF_RESET_WAIT_MS 10
#define I40EVF_RESET_WAIT_COUNT 500
/**
@@ -1717,60 +1805,21 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ /* sleep first to make sure a minimum wait time is met */
+ msleep(I40EVF_RESET_WAIT_MS);
+
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == I40E_VFR_VFACTIVE)
break;
- msleep(I40EVF_RESET_WAIT_MS);
}
+
pci_set_master(adapter->pdev);
- /* extra wait to make sure minimum wait is met */
- msleep(I40EVF_RESET_WAIT_MS);
- if (i == I40EVF_RESET_WAIT_COUNT) {
- struct i40evf_mac_filter *ftmp;
- struct i40evf_vlan_filter *fv, *fvtmp;
- /* reset never finished */
+ if (i == I40EVF_RESET_WAIT_COUNT) {
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
- adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
-
- if (netif_running(adapter->netdev)) {
- set_bit(__I40E_DOWN, &adapter->vsi.state);
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- adapter->link_up = false;
- i40evf_napi_disable_all(adapter);
- i40evf_irq_disable(adapter);
- i40evf_free_traffic_irqs(adapter);
- i40evf_free_all_tx_resources(adapter);
- i40evf_free_all_rx_resources(adapter);
- }
-
- /* Delete all of the filters, both MAC and VLAN. */
- list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
- list) {
- list_del(&f->list);
- kfree(f);
- }
-
- list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list,
- list) {
- list_del(&fv->list);
- kfree(fv);
- }
-
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
- i40evf_free_queues(adapter);
- i40evf_free_q_vectors(adapter);
- kfree(adapter->vf_res);
- i40evf_shutdown_adminq(hw);
- adapter->netdev->flags &= ~IFF_UP;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
- adapter->state = __I40EVF_DOWN;
- dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
+ i40evf_disable_vf(adapter);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2133,10 +2182,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
-
- if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
- return -EINVAL;
netdev->mtu = new_mtu;
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
@@ -2145,6 +2190,64 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+/**
+ * i40evf_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40evf_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ /* We cannot support GSO if the MSS is going to be less than
+ * 64 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ features &= ~NETIF_F_GSO_MASK;
+
+ /* MACLEN can support at most 63 words */
+ len = skb_network_header(skb) - skb->data;
+ if (len & ~(63 * 2))
+ goto out_err;
+
+ /* IPLEN and EIPLEN can support at most 127 dwords */
+ len = skb_transport_header(skb) - skb_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+
+ if (skb->encapsulation) {
+ /* L4TUNLEN can support 127 words */
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (len & ~(127 * 2))
+ goto out_err;
+
+ /* IPLEN can support at most 127 dwords */
+ len = skb_inner_transport_header(skb) -
+ skb_inner_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+ }
+
+ /* No need to validate L4LEN as TCP is the only protocol with a
+ * a flexible value and we support all possible values supported
+ * by TCP, which is at most 15 dwords
+ */
+
+ return features;
+out_err:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
NETIF_F_HW_VLAN_CTAG_RX |\
NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -2179,6 +2282,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+ .ndo_features_check = i40evf_features_check,
.ndo_fix_features = i40evf_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
@@ -2424,6 +2528,10 @@ static void i40evf_init_task(struct work_struct *work)
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
+ /* MTU range: 68 - 9710 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
+
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);
@@ -2764,12 +2872,10 @@ static void i40evf_remove(struct pci_dev *pdev)
msleep(50);
}
- if (adapter->msix_entries) {
- i40evf_misc_irq_disable(adapter);
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
- i40evf_free_q_vectors(adapter);
- }
+ i40evf_misc_irq_disable(adapter);
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_q_vectors(adapter);
if (adapter->watchdog_timer.function)
del_timer_sync(&adapter->watchdog_timer);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index ddf478d6322b..2059a8e88908 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -836,6 +836,9 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
case I40E_LINK_SPEED_40GB:
speed = "40 G";
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
case I40E_LINK_SPEED_20GB:
speed = "20 G";
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2688180a7acd..8aee314332a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -357,7 +357,8 @@
#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
-#define MAX_JUMBO_FRAME_SIZE 0x2600
+#define MAX_JUMBO_FRAME_SIZE 0x2600
+#define MAX_STD_JUMBO_FRAME_SIZE 9216
/* PBA constants */
#define E1000_PBA_34K 0x0022
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d11093dce1b9..acbc3abe2ddd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -210,7 +210,12 @@ struct igb_tx_buffer {
struct igb_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct igb_tx_queue_stats {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9affd7c198bd..a761001308dc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2468,6 +2468,10 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 68 - 9216 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
+
adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
/* before reading the NVM, reset the controller to put the device in a
@@ -3943,11 +3947,23 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
if (!buffer_info->page)
continue;
- dma_unmap_page(rx_ring->dev,
- buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(buffer_info->page);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ buffer_info->dma,
+ buffer_info->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_drain(buffer_info->page, 0,
+ buffer_info->pagecnt_bias);
buffer_info->page = NULL;
}
@@ -5412,17 +5428,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&pdev->dev, "Invalid MTU setting\n");
- return -EINVAL;
- }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9238
- if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
- return -EINVAL;
- }
-
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
@@ -6819,12 +6824,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
/* transfer page from old buffer to new buffer */
*new_buff = *old_buff;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
- old_buff->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
}
static inline bool igb_page_is_reserved(struct page *page)
@@ -6836,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_ref_count(page) != pagecnt_bias))
return false;
/* flip page offset to other buffer */
@@ -6855,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
return false;
#endif
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- page_ref_inc(page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -6910,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return true;
/* this page cannot be reused so discard it */
- __free_page(page);
return false;
}
@@ -6945,6 +6949,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
page = rx_buffer->page;
prefetchw(page);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
@@ -6969,21 +6980,18 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
prefetchw(skb->data);
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
/* pull page into skb */
if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
@@ -7241,7 +7249,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -7256,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ bi->pagecnt_bias = 1;
return true;
}
@@ -7282,6 +7292,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index a7895c4cbcc3..c30eea8399a7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -226,7 +226,7 @@ static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
return 0;
}
-static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
@@ -235,13 +235,13 @@ static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
u64 rate;
u32 inca;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate = ppb;
- rate <<= 26;
- rate = div_u64(rate, 1953125);
+ rate = scaled_ppm;
+ rate <<= 13;
+ rate = div_u64(rate, 15625);
inca = rate & INCVALUE_MASK;
if (neg_adj)
@@ -1103,7 +1103,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.max_adj = 62499999;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
@@ -1131,7 +1131,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.pin_config = adapter->sdp_config;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index ee1ef08d7fc4..f1789d192e24 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -85,7 +85,8 @@
#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */
-#define MAX_JUMBO_FRAME_SIZE 0x3F00
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+#define MAX_STD_JUMBO_FRAME_SIZE 9216
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 7dff7f6239cd..839ba110f7fb 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2360,16 +2360,6 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
struct igbvf_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
- max_frame > MAX_JUMBO_FRAME_SIZE)
- return -EINVAL;
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
- if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
- return -EINVAL;
- }
-
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
usleep_range(1000, 2000);
/* igbvf_down has a dependency on max_frame_size */
@@ -2790,6 +2780,10 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
+ /* MTU range: 68 - 9216 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
+
/*reset the controller to put the device in a known good state */
err = hw->mac.ops.reset_hw(hw);
if (err) {
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 31f91459312f..5826b1ddedcf 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -487,6 +487,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
+ /* MTU range: 68 - 16114 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
+
/* make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
@@ -1619,18 +1623,6 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
- int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
-
- /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
- if ((new_mtu < 68) ||
- (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
- netif_err(adapter, probe, adapter->netdev,
- "Invalid MTU setting %d\n", new_mtu);
- return -EINVAL;
- }
-
- if (old_max_frame == max_frame)
- return 0;
if (netif_running(netdev))
ixgb_down(adapter, true);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b06e32d0d22a..ef81c3d8c295 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1027,4 +1027,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_ring *tx_ring);
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index fb51be74dd4c..805ab319e578 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -367,7 +367,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
}
/* Negotiate the fc mode to use */
- ixgbe_fc_autoneg(hw);
+ hw->mac.ops.fc_autoneg(hw);
/* Disable any previous flow control settings */
fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -1179,6 +1179,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = {
.get_link_capabilities = &ixgbe_get_link_capabilities_82598,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
.blink_led_start = &ixgbe_blink_led_start_generic,
.blink_led_stop = &ixgbe_blink_led_stop_generic,
.set_rar = &ixgbe_set_rar_generic,
@@ -1193,6 +1194,7 @@ static const struct ixgbe_mac_operations mac_ops_82598 = {
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
.setup_fc = ixgbe_setup_fc_generic,
+ .fc_autoneg = ixgbe_fc_autoneg,
.set_fw_drv_ver = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 63b25006ac90..e00aaeb91827 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -2204,6 +2204,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = {
.get_link_capabilities = &ixgbe_get_link_capabilities_82599,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
.blink_led_start = &ixgbe_blink_led_start_generic,
.blink_led_stop = &ixgbe_blink_led_stop_generic,
.set_rar = &ixgbe_set_rar_generic,
@@ -2219,6 +2220,7 @@ static const struct ixgbe_mac_operations mac_ops_82599 = {
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
.setup_fc = ixgbe_setup_fc_generic,
+ .fc_autoneg = ixgbe_fc_autoneg,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77d3039283f6..8832df3eba25 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -298,10 +298,12 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
- /* Setup flow control */
- ret_val = hw->mac.ops.setup_fc(hw);
- if (ret_val)
- return ret_val;
+ /* Setup flow control if method for doing so */
+ if (hw->mac.ops.setup_fc) {
+ ret_val = hw->mac.ops.setup_fc(hw);
+ if (ret_val)
+ return ret_val;
+ }
/* Cashe bit indicating need for crosstalk fix */
switch (hw->mac.type) {
@@ -390,6 +392,9 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
status = hw->mac.ops.start_hw(hw);
}
+ /* Initialize the LED link active for LED blink support */
+ hw->mac.ops.init_led_link_act(hw);
+
return status;
}
@@ -773,6 +778,49 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
+ * @hw: pointer to hardware structure
+ *
+ * Store the index for the link active LED. This will be used to support
+ * blinking the LED.
+ **/
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 led_reg, led_mode;
+ u16 i;
+
+ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* Get LED link active from the LEDCTL register */
+ for (i = 0; i < 4; i++) {
+ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
+
+ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
+ IXGBE_LED_LINK_ACTIVE) {
+ mac->led_link_act = i;
+ return 0;
+ }
+ }
+
+ /* If LEDCTL register does not have the LED link active set, then use
+ * known MAC defaults.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_x550em_a:
+ mac->led_link_act = 0;
+ break;
+ case ixgbe_mac_X550EM_x:
+ mac->led_link_act = 1;
+ break;
+ default:
+ mac->led_link_act = 2;
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_led_on_generic - Turns on the software controllable LEDs.
* @hw: pointer to hardware structure
* @index: led number to turn on
@@ -2127,7 +2175,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Negotiate the fc mode to use */
- ixgbe_fc_autoneg(hw);
+ hw->mac.ops.fc_autoneg(hw);
/* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
@@ -2231,8 +2279,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
return IXGBE_ERR_FC_NOT_NEGOTIATED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6d4c260d0cbd..5b3e3c65927e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -49,6 +49,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f49f80380aa5..fd192bf29b26 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2225,11 +2225,11 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 2;
case ETHTOOL_ID_ON:
- hw->mac.ops.led_on(hw, hw->bus.func);
+ hw->mac.ops.led_on(hw, hw->mac.led_link_act);
break;
case ETHTOOL_ID_OFF:
- hw->mac.ops.led_off(hw, hw->bus.func);
+ hw->mac.ops.led_off(hw, hw->mac.led_link_act);
break;
case ETHTOOL_ID_INACTIVE:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fee1f2918ead..1e2f39ebd824 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -54,6 +54,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/vxlan.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -3070,6 +3071,9 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
return;
}
+ if (!adapter->msix_entries)
+ return;
+
for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
@@ -5012,24 +5016,23 @@ fwd_queue_err:
return err;
}
-static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
{
- struct net_device *upper;
- struct list_head *iter;
- int err;
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
-
- if (dfwd->fwd_priv) {
- err = ixgbe_fwd_ring_up(upper, vadapter);
- if (err)
- continue;
- }
- }
+ if (dfwd->fwd_priv)
+ ixgbe_fwd_ring_up(upper, vadapter);
}
+
+ return 0;
+}
+
+static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+{
+ netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ ixgbe_upper_dev_walk, NULL);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
@@ -5448,12 +5451,25 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
+static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
+{
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv) {
+ netif_tx_stop_all_queues(upper);
+ netif_carrier_off(upper);
+ netif_tx_disable(upper);
+ }
+ }
+
+ return 0;
+}
+
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *upper;
- struct list_head *iter;
int i;
/* signal that we are down to the interrupt handler */
@@ -5477,17 +5493,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_tx_disable(netdev);
/* disable any upper devices */
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv) {
- netif_tx_stop_all_queues(upper);
- netif_carrier_off(upper);
- netif_tx_disable(upper);
- }
- }
- }
+ netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ ixgbe_disable_macvlan, NULL);
ixgbe_irq_disable(adapter);
@@ -5618,7 +5625,8 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
+static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -5634,6 +5642,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ /* get_invariants needs the device IDs */
+ ii->get_invariants(hw);
+
/* Set common capability flags and settings */
rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
@@ -6049,11 +6060,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
-
- /* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
- return -EINVAL;
/*
* For 82599EB we cannot allow legacy VFs to enable their receive
@@ -6062,7 +6068,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
+ (new_mtu > ETH_DATA_LEN))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -6728,6 +6734,18 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
#endif
}
+static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
+{
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv)
+ netif_tx_wake_all_queues(upper);
+ }
+
+ return 0;
+}
+
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -6737,8 +6755,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *upper;
- struct list_head *iter;
u32 link_speed = adapter->link_speed;
const char *speed_str;
bool flow_rx, flow_tx;
@@ -6809,14 +6825,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/* enable any upper devices */
rtnl_lock();
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_wake_all_queues(upper);
- }
- }
+ netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ ixgbe_enable_macvlan, NULL);
rtnl_unlock();
/* update the default user priority for VFs */
@@ -7655,11 +7665,17 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
+ if (unlikely(hdr.network <= skb->data))
+ return;
if (skb->encapsulation &&
first->protocol == htons(ETH_P_IP) &&
- hdr.ipv4->protocol != IPPROTO_UDP) {
+ hdr.ipv4->protocol == IPPROTO_UDP) {
struct ixgbe_adapter *adapter = q_vector->adapter;
+ if (unlikely(skb_tail_pointer(skb) < hdr.network +
+ VXLAN_HEADROOM))
+ return;
+
/* verify the port is recognized as VXLAN */
if (adapter->vxlan_port &&
udp_hdr(skb)->dest == adapter->vxlan_port)
@@ -7670,6 +7686,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.network = skb_inner_network_header(skb);
}
+ /* Make sure we have at least [minimum IPv4 header + TCP]
+ * or [IPv6 header] bytes
+ */
+ if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
+ return;
+
/* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) {
case IPVERSION:
@@ -7689,6 +7711,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
if (l4_proto != IPPROTO_TCP)
return;
+ if (unlikely(skb_tail_pointer(skb) < hdr.network +
+ hlen + sizeof(struct tcphdr)))
+ return;
+
th = (struct tcphdr *)(hdr.network + hlen);
/* skip this packet since the socket is closing */
@@ -8354,12 +8380,38 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
}
#ifdef CONFIG_NET_CLS_ACT
+struct upper_walk_data {
+ struct ixgbe_adapter *adapter;
+ u64 action;
+ int ifindex;
+ u8 queue;
+};
+
+static int get_macvlan_queue(struct net_device *upper, void *_data)
+{
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct upper_walk_data *data = _data;
+ struct ixgbe_adapter *adapter = data->adapter;
+ int ifindex = data->ifindex;
+
+ if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+ data->action = data->queue;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
u8 *queue, u64 *action)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ struct upper_walk_data data;
struct net_device *upper;
- struct list_head *iter;
/* redirect to a SRIOV VF */
for (vf = 0; vf < num_vfs; ++vf) {
@@ -8377,17 +8429,16 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
}
/* redirect to a offloaded macvlan netdev */
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
-
- if (vadapter && vadapter->netdev->ifindex == ifindex) {
- *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
- *action = *queue;
- return 0;
- }
- }
+ data.adapter = adapter;
+ data.ifindex = ifindex;
+ data.action = 0;
+ data.queue = 0;
+ if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ get_macvlan_queue, &data)) {
+ *action = data.action;
+ *queue = data.queue;
+
+ return 0;
}
return -EINVAL;
@@ -8414,7 +8465,7 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
}
/* Redirect to a VF or a offloaded macvlan */
- if (is_tcf_mirred_redirect(a)) {
+ if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
err = handle_redirect_action(adapter, ifindex, queue,
@@ -9481,6 +9532,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
hw->mvals = ii->mvals;
+ if (ii->link_ops)
+ hw->link.ops = *ii->link_ops;
/* EEPROM */
hw->eeprom.ops = *ii->eeprom_ops;
@@ -9504,10 +9557,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
- ii->get_invariants(hw);
-
/* setup the private structure */
- err = ixgbe_sw_init(adapter);
+ err = ixgbe_sw_init(adapter, ii);
if (err)
goto err_sw_init;
@@ -9616,6 +9667,10 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* MTU range: 68 - 9710 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
netdev->dcbnl_ops = &dcbnl_ops;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 021ab9b89c71..3b8362085f57 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -109,8 +109,8 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
*
* Returns an error code on error.
*/
-static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val, bool lock)
+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 10;
@@ -178,36 +178,6 @@ fail:
}
/**
- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to read from
- * @reg: I2C device register to read from
- * @val: pointer to location to receive read value
- *
- * Returns an error code on error.
- */
-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val)
-{
- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
-}
-
-/**
- * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to read from
- * @reg: I2C device register to read from
- * @val: pointer to location to receive read value
- *
- * Returns an error code on error.
- */
-s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val)
-{
- return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
-}
-
-/**
* ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to write to
@@ -217,8 +187,8 @@ s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
*/
-static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 val, bool lock)
+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 1;
@@ -273,33 +243,39 @@ fail:
}
/**
- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to write to
- * @reg: I2C device register to write to
- * @val: value to write
+ * ixgbe_probe_phy - Probe a single address for a PHY
+ * @hw: pointer to hardware structure
+ * @phy_addr: PHY address to probe
*
- * Returns an error code on error.
- */
-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
- u8 addr, u16 reg, u16 val)
+ * Returns true if PHY found
+ **/
+static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
{
- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
-}
+ u16 ext_ability = 0;
-/**
- * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to write to
- * @reg: I2C device register to write to
- * @val: value to write
- *
- * Returns an error code on error.
- */
-s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
- u8 addr, u16 reg, u16 val)
-{
- return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
+ hw->phy.mdio.prtad = phy_addr;
+ if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
+ return false;
+
+ if (ixgbe_get_phy_id(hw))
+ return false;
+
+ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw,
+ MDIO_PMA_EXTABLE,
+ MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability &
+ (MDIO_PMA_EXTABLE_10GBT |
+ MDIO_PMA_EXTABLE_1000BT))
+ hw->phy.type = ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type = ixgbe_phy_generic;
+ }
+
+ return true;
}
/**
@@ -311,7 +287,7 @@ s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
u32 phy_addr;
- u16 ext_ability = 0;
+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
if (!hw->phy.phy_semaphore_mask) {
if (hw->bus.lan_id)
@@ -320,37 +296,34 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
}
- if (hw->phy.type == ixgbe_phy_unknown) {
- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
- hw->phy.mdio.prtad = phy_addr;
- if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
- ixgbe_get_phy_id(hw);
- hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
-
- if (hw->phy.type == ixgbe_phy_unknown) {
- hw->phy.ops.read_reg(hw,
- MDIO_PMA_EXTABLE,
- MDIO_MMD_PMAPMD,
- &ext_ability);
- if (ext_ability &
- (MDIO_PMA_EXTABLE_10GBT |
- MDIO_PMA_EXTABLE_1000BT))
- hw->phy.type =
- ixgbe_phy_cu_unknown;
- else
- hw->phy.type =
- ixgbe_phy_generic;
- }
+ if (hw->phy.type != ixgbe_phy_unknown)
+ return 0;
- return 0;
- }
+ if (hw->phy.nw_mng_if_sel) {
+ phy_addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ if (ixgbe_probe_phy(hw, phy_addr))
+ return 0;
+ else
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
+
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ if (ixgbe_probe_phy(hw, phy_addr)) {
+ status = 0;
+ break;
}
- /* indicate no PHY found */
- hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
- return IXGBE_ERR_PHY_ADDR_INVALID;
}
- return 0;
+
+ /* Certain media types do not have a phy so an address will not
+ * be found and the code will take this path. Caller has to
+ * decide if it is an error or not.
+ */
+ if (status)
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
+
+ return status;
}
/**
@@ -416,7 +389,8 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID:
+ case X550_PHY_ID2:
+ case X550_PHY_ID3:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -427,6 +401,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
phy_type = ixgbe_phy_nl;
break;
case X557_PHY_ID:
+ case X557_PHY_ID2:
phy_type = ixgbe_phy_x550em_ext_t;
break;
default:
@@ -477,8 +452,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*/
for (i = 0; i < 30; i++) {
msleep(100);
- hw->phy.ops.read_reg(hw, MDIO_CTRL1,
- MDIO_MMD_PHYXS, &ctrl);
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl);
if (!(ctrl & MDIO_CTRL1_RESET)) {
udelay(2);
break;
@@ -705,53 +679,52 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- /* Set or unset auto-negotiation 10G advertisement */
- hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
- MDIO_MMD_AN,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
- autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
-
- hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
- MDIO_MMD_AN,
- autoneg_reg);
- }
+ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_10GB_FULL))
+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- MDIO_MMD_AN,
- &autoneg_reg);
+ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ MDIO_MMD_AN, &autoneg_reg);
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- MDIO_MMD_AN,
- autoneg_reg);
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Set or unset auto-negotiation 5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
}
- if (speed & IXGBE_LINK_SPEED_100_FULL) {
- /* Set or unset auto-negotiation 100M advertisement */
- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
- MDIO_MMD_AN,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 1G advertisement */
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_1GB_FULL))
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
- autoneg_reg &= ~(ADVERTISE_100FULL |
- ADVERTISE_100HALF);
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- autoneg_reg |= ADVERTISE_100FULL;
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ MDIO_MMD_AN, autoneg_reg);
- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
- MDIO_MMD_AN,
- autoneg_reg);
- }
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
+
+ autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
+ (speed & IXGBE_LINK_SPEED_100_FULL))
+ autoneg_reg |= ADVERTISE_100FULL;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
/* Blocked by MNG FW so don't reset PHY */
if (ixgbe_check_reset_blocked(hw))
@@ -830,6 +803,7 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
break;
default:
@@ -2396,9 +2370,7 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
if (!on && ixgbe_mng_present(hw))
return 0;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &reg);
+ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, &reg);
if (status)
return status;
@@ -2410,8 +2382,6 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
}
- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- reg);
+ status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
return status;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index cc735ec3e045..ecf05f838fc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -195,12 +195,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val);
-s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val);
-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 val);
-s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 val);
+s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 *val, bool lock);
+s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 val, bool lock);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 31d82e3abac8..cf21273db201 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -874,19 +874,13 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */
-#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
-#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
-#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400
#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800
-#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
-#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
#define IXGBE_MII_RESTART 0x200
-#define IXGBE_MII_AUTONEG_COMPLETE 0x20
#define IXGBE_MII_AUTONEG_LINK_UP 0x04
#define IXGBE_MII_AUTONEG_REG 0x0
@@ -1320,30 +1314,20 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
-#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
-#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
-#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
#define IXGBE_TWINAX_DEV 1
#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
-#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
-#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
-#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
-#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
@@ -1393,8 +1377,10 @@ struct ixgbe_thermal_sensor_data {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
-#define X550_PHY_ID 0x01540220
+#define X550_PHY_ID2 0x01540223
+#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
+#define X557_PHY_ID2 0x01540250
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
@@ -3352,6 +3338,7 @@ struct ixgbe_mac_operations {
s32 (*led_off)(struct ixgbe_hw *, u32);
s32 (*blink_led_start)(struct ixgbe_hw *, u32);
s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+ s32 (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
@@ -3372,6 +3359,7 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
s32 (*setup_fc)(struct ixgbe_hw *);
+ void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3410,16 +3398,28 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
- s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
- s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
s32 (*enter_lplu)(struct ixgbe_hw *);
s32 (*handle_lasi)(struct ixgbe_hw *hw);
- s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
- u16 *value);
- s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
- u16 value);
+ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ u8 *value);
+ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ u8 value);
+};
+
+struct ixgbe_link_operations {
+ s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 *val);
+ s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 val);
+};
+
+struct ixgbe_link_info {
+ struct ixgbe_link_operations ops;
+ u8 addr;
};
struct ixgbe_eeprom_info {
@@ -3462,6 +3462,7 @@ struct ixgbe_mac_info {
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
+ u8 led_link_act;
};
struct ixgbe_phy_info {
@@ -3523,6 +3524,7 @@ struct ixgbe_hw {
struct ixgbe_addr_filter_info addr_ctrl;
struct ixgbe_fc_info fc;
struct ixgbe_phy_info phy;
+ struct ixgbe_link_info link;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
struct ixgbe_mbx_info mbx;
@@ -3546,6 +3548,7 @@ struct ixgbe_info {
const struct ixgbe_eeprom_operations *eeprom_ops;
const struct ixgbe_phy_operations *phy_ops;
const struct ixgbe_mbx_operations *mbx_ops;
+ const struct ixgbe_link_operations *link_ops;
const u32 *mvals;
};
@@ -3593,17 +3596,35 @@ struct ixgbe_info {
#define IXGBE_FUSES0_REV_MASK (3u << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31)
+
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11)
@@ -3618,6 +3639,7 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26)
+#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31)
@@ -3627,6 +3649,8 @@ struct ixgbe_info {
#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0)
#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11)
#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f2b1d48a16c3..e2ff823ee202 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -851,6 +851,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = {
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
.blink_led_start = &ixgbe_blink_led_start_X540,
.blink_led_stop = &ixgbe_blink_led_stop_X540,
.set_rar = &ixgbe_set_rar_generic,
@@ -866,6 +867,7 @@ static const struct ixgbe_mac_operations mac_ops_X540 = {
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
.setup_fc = ixgbe_setup_fc_generic,
+ .fc_autoneg = ixgbe_fc_autoneg,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 7e6b9267ca9d..11fb433eb924 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -28,11 +28,31 @@
static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
+static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
+static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
+static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_link_info *link = &hw->link;
+
+ /* Start with X540 invariants, since so simular */
+ ixgbe_get_invariants_X540(hw);
+
+ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+ phy->ops.set_phy_power = NULL;
+
+ link->addr = IXGBE_CS4227;
+
+ return 0;
+}
+
+static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
/* Start with X540 invariants, since so simular */
ixgbe_get_invariants_X540(hw);
@@ -69,8 +89,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
*/
static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
{
- return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
- value);
+ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
}
/**
@@ -83,8 +102,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
*/
static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
{
- return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
- value);
+ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
}
/**
@@ -322,6 +340,68 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
return IXGBE_NOT_IMPLEMENTED;
}
+/**
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+static s32
+ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+static s32
+ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
* @hw: pointer to hardware structure
*
@@ -1128,47 +1208,17 @@ out:
return ret;
}
-/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+/**
+ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
* @hw: pointer to hardware structure
- * @speed: the link speed to force
*
- * Configures the integrated KR PHY to use iXFI mode. Used to connect an
- * internal and external PHY at a specific speed, without autonegotiation.
+ * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
{
s32 status;
u32 reg_val;
- /* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status)
- return status;
-
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
-
- /* Select forced link speed for internal PHY. */
- switch (*speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
- break;
- default:
- /* Other link speeds are not supported by internal KR PHY. */
- return IXGBE_ERR_LINK_SETUP;
- }
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status)
- return status;
-
/* Disable training protocol FSM. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
@@ -1228,20 +1278,106 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
status = ixgbe_write_iosf_sb_reg_x550(hw,
IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status)
+ return status;
+}
+
+/**
+ * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
+ * internal PHY
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 link_ctrl;
+
+ /* Restart auto-negotiation. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
+
+ if (status) {
+ hw_dbg(hw, "Auto-negotiation did not complete\n");
return status;
+ }
- /* Toggle port SW reset by AN reset. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
+
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ u32 flx_mask_st20;
+
+ /* Indicate to FW that AN restart has been asserted */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
+
+ if (status) {
+ hw_dbg(hw, "Auto-negotiation did not complete\n");
+ return status;
+ }
+
+ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
+ }
+
+ return status;
+}
+
+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
return status;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
status = ixgbe_write_iosf_sb_reg_x550(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Additional configuration needed for x550em_x */
+ if (hw->mac.type == ixgbe_mac_X550EM_x) {
+ status = ixgbe_setup_ixfi_x550em_x(hw);
+ if (status)
+ return status;
+ }
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
return status;
}
@@ -1292,7 +1428,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
__always_unused bool autoneg_wait_to_complete)
{
s32 status;
- u16 slice, value;
+ u16 reg_slice, reg_val;
bool setup_linear = false;
/* Check if SFP module is supported and linear */
@@ -1308,71 +1444,68 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
if (status)
return status;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
- /* Configure CS4227 LINE side to 10G SR. */
- slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12);
- value = IXGBE_CS4227_SPEED_10G;
- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227,
- slice, value);
- if (status)
- goto i2c_err;
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12);
- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227,
- slice, value);
- if (status)
- goto i2c_err;
-
- /* Configure CS4227 for HOST connection rate then type. */
- slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12);
- value = speed & IXGBE_LINK_SPEED_10GB_FULL ?
- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227,
- slice, value);
- if (status)
- goto i2c_err;
+ /* Configure CS4227 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12);
- if (setup_linear)
- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
- else
- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227,
- slice, value);
- if (status)
- goto i2c_err;
+ status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
+ reg_val);
- /* Setup XFI internal link. */
- status = ixgbe_setup_ixfi_x550em(hw, &speed);
- if (status) {
- hw_dbg(hw, "setup_ixfi failed with %d\n", status);
- return status;
- }
- } else {
- /* Configure internal PHY for KR/KX. */
- status = ixgbe_setup_kr_speed_x550em(hw, speed);
- if (status) {
- hw_dbg(hw, "setup_kr_speed failed with %d\n", status);
- return status;
- }
+ return status;
+}
- /* Configure CS4227 LINE side to proper mode. */
- slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12);
- if (setup_linear)
- value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
- else
- value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
- status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227,
- slice, value);
- if (status)
- goto i2c_err;
+/**
+ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated PHY for native SFI mode. Used to connect the
+ * internal PHY directly to an SFP cage, without autonegotiation.
+ **/
+static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 status;
+ u32 reg_val;
+
+ /* Disable all AN and force speed to 10G Serial. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal PHY. */
+ return IXGBE_ERR_LINK_SETUP;
}
- return 0;
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
-i2c_err:
- hw_dbg(hw, "combined i2c access failed with %d\n", status);
return status;
}
@@ -1388,45 +1521,39 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
{
bool setup_linear = false;
u32 reg_phy_int;
- s32 rc;
+ s32 ret_val;
/* Check if SFP module is supported and linear */
- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
return 0;
- if (!rc)
- return rc;
+ if (!ret_val)
+ return ret_val;
- /* Configure internal PHY for native SFI */
- rc = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY,
- &reg_phy_int);
- if (rc)
- return rc;
+ /* Configure internal PHY for native SFI based on module type */
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
+ if (!ret_val)
+ return ret_val;
- if (setup_linear) {
- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
- } else {
- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
- }
+ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
+ if (!setup_linear)
+ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
- rc = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY,
- reg_phy_int);
- if (rc)
- return rc;
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
+ if (!ret_val)
+ return ret_val;
- /* Setup XFI/SFI internal link */
- return ixgbe_setup_ixfi_x550em(hw, &speed);
+ /* Setup SFI internal link. */
+ return ixgbe_setup_sfi_x550a(hw, &speed);
}
/**
@@ -1442,19 +1569,19 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
u32 reg_slice, slice_offset;
bool setup_linear = false;
u16 reg_phy_ext;
- s32 rc;
+ s32 ret_val;
/* Check if SFP module is supported and linear */
- rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
return 0;
- if (!rc)
- return rc;
+ if (!ret_val)
+ return ret_val;
/* Configure internal PHY for KR/KX. */
ixgbe_setup_kr_speed_x550em(hw, speed);
@@ -1463,10 +1590,10 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
return IXGBE_ERR_PHY_ADDR_INVALID;
/* Get external PHY device id */
- rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
- if (rc)
- return rc;
+ if (ret_val)
+ return ret_val;
/* When configuring quad port CS4223, the MAC instance is part
* of the slice offset.
@@ -1538,7 +1665,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
bool link_up_wait_to_complete)
{
u32 status;
- u16 autoneg_status;
+ u16 i, autoneg_status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
@@ -1550,14 +1677,18 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
if (status || !(*link_up))
return status;
- /* MAC link is up, so check external PHY link.
- * Read this twice back to back to indicate current status.
- */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
- if (status)
- return status;
+ /* MAC link is up, so check external PHY link.
+ * Link status is latching low, and can only be used to detect link
+ * drop, and not the current status of the link without performing
+ * back-to-back reads.
+ */
+ for (i = 0; i < 2; i++) {
+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
+ &autoneg_status);
+
+ if (status)
+ return status;
+ }
/* If external PHY link is not up, then indicate link not up */
if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
@@ -1575,7 +1706,7 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
struct ixgbe_mac_info *mac = &hw->mac;
- u32 lval, sval;
+ u32 lval, sval, flx_val;
s32 rc;
rc = mac->ops.read_iosf_sb_reg(hw,
@@ -1609,14 +1740,55 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
if (rc)
return rc;
- lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
rc = mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+ rc = ixgbe_restart_an_internal_phy_x550em(hw);
return rc;
}
+/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ switch (mac->ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ mac->ops.setup_fc = NULL;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
+ break;
+ case ixgbe_media_type_backplane:
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
+ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
+ break;
+ default:
+ break;
+ }
+}
+
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -1664,6 +1836,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
default:
break;
}
+
+ /* Additional modification for X550em_a devices */
+ if (hw->mac.type == ixgbe_mac_x550em_a)
+ ixgbe_init_mac_link_ops_X550em_a(hw);
}
/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
@@ -1740,7 +1916,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
/* Vendor alarm triggered */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
@@ -1748,7 +1924,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
/* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
@@ -1757,7 +1933,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
/* Global alarm triggered */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status)
@@ -1772,7 +1948,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
/* device fault alarm triggered */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status)
return status;
@@ -1787,14 +1963,14 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
/* Vendor alarm 2 triggered */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+ MDIO_MMD_AN, &reg);
if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
return status;
/* link connect/disconnect event occurred */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+ MDIO_MMD_AN, &reg);
if (status)
return status;
@@ -1826,20 +2002,20 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
/* Enable link status change alarm */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+ MDIO_MMD_AN, &reg);
if (status)
return status;
reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+ MDIO_MMD_AN, reg);
if (status)
return status;
/* Enable high temperature failure and global fault alarms */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status)
return status;
@@ -1848,14 +2024,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
reg);
if (status)
return status;
/* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status)
return status;
@@ -1864,14 +2040,14 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
IXGBE_MDIO_GLOBAL_ALARM_1_INT);
status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
reg);
if (status)
return status;
/* Enable chip-wide vendor alarm */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status)
return status;
@@ -1879,7 +2055,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
reg);
return status;
@@ -1945,13 +2121,31 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
status = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- return status;
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ /* Set lane mode to KR auto negotiation */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ }
+
+ return ixgbe_restart_an_internal_phy_x550em(hw);
}
/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
@@ -2020,14 +2214,12 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
*link_up = false;
/* read this twice back to back to indicate current status */
- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
&autoneg_status);
if (ret)
return ret;
- ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
&autoneg_status);
if (ret)
return ret;
@@ -2073,7 +2265,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
return 0;
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ MDIO_MMD_AN,
&speed);
if (status)
return status;
@@ -2134,10 +2326,10 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ MDIO_MMD_VEND1, &phy_data);
phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+ MDIO_MMD_VEND1, phy_data);
return 0;
}
@@ -2156,10 +2348,10 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ MDIO_MMD_VEND1, &phy_data);
phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+ MDIO_MMD_VEND1, phy_data);
return 0;
}
@@ -2180,7 +2372,7 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ MDIO_MMD_AN,
&an_lp_status);
if (status)
return status;
@@ -2281,6 +2473,90 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
return rc;
}
+/**
+ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
+{
+ u32 link_s1, lp_an_page_low, an_cntl_1;
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ hw_err(hw, "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ hw_err(hw, "The link is down");
+ goto out;
+ }
+
+ /* Check at auto-negotiation has completed */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_S1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
+
+ if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
+ hw_dbg(hw, "Auto-Negotiation did not complete\n");
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
+
+ if (status) {
+ hw_dbg(hw, "Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
+
+ if (status) {
+ hw_dbg(hw, "Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
+ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
+
+out:
+ if (!status) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
+{
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
/** ixgbe_enter_lplu_x550em - Transition to low power states
* @hw: pointer to hardware structure
*
@@ -2327,7 +2603,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
return ixgbe_set_copper_phy_power(hw, false);
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ MDIO_MMD_AN,
&speed);
if (status)
return status;
@@ -2349,20 +2625,20 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
/* Clear AN completed indication */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ MDIO_MMD_AN,
&autoneg_reg);
if (status)
return status;
- status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
&an_10g_cntl_reg);
if (status)
return status;
status = hw->phy.ops.read_reg(hw,
IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ MDIO_MMD_AN,
&autoneg_reg);
if (status)
return status;
@@ -2520,7 +2796,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
status = hw->phy.ops.read_reg(hw,
IXGBE_MDIO_TX_VENDOR_ALARMS_3,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ MDIO_MMD_PMAPMD,
&reg);
if (status)
return status;
@@ -2531,7 +2807,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
status = hw->phy.ops.read_reg(hw,
IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&reg);
if (status)
return status;
@@ -2540,7 +2816,7 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
status = hw->phy.ops.write_reg(hw,
IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
reg);
if (status)
return status;
@@ -2729,6 +3005,90 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 an_cntl = 0;
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do FC autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
+
+ if (status) {
+ hw_dbg(hw, "Auto-Negotiation did not complete\n");
+ return status;
+ }
+
+ /* The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ break;
+ default:
+ hw_err(hw, "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
+
+ /* Restart auto-negotiation. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return status;
+}
+
+/**
* ixgbe_set_mux - Set mux for port 1 access with CS4227
* @hw: pointer to hardware structure
* @state: set mux if 1, clear if 0
@@ -2934,6 +3294,7 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_generic,
.led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.get_media_type = &ixgbe_get_media_type_X540,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
@@ -2948,12 +3309,14 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
.prot_autoc_read = prot_autoc_read_generic,
.prot_autoc_write = prot_autoc_write_generic,
.setup_fc = ixgbe_setup_fc_generic,
+ .fc_autoneg = ixgbe_fc_autoneg,
};
static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_t_x550em,
.led_off = ixgbe_led_off_t_x550em,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
.reset_hw = &ixgbe_reset_hw_X550em,
.get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
@@ -2966,6 +3329,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
.release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
.init_swfw_sync = &ixgbe_init_swfw_sync_X540,
.setup_fc = NULL, /* defined later */
+ .fc_autoneg = ixgbe_fc_autoneg,
.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
};
@@ -2974,6 +3338,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_t_x550em,
.led_off = ixgbe_led_off_t_x550em,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
.reset_hw = ixgbe_reset_hw_X550em,
.get_media_type = ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
@@ -2985,6 +3350,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
.release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
.setup_fc = ixgbe_setup_fc_x550em,
+ .fc_autoneg = ixgbe_fc_autoneg,
.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
@@ -3036,11 +3402,6 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
- .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
- .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
- .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
- .write_i2c_combined_unlocked =
- &ixgbe_write_i2c_combined_generic_unlocked,
};
static const struct ixgbe_phy_operations phy_ops_x550em_a = {
@@ -3053,6 +3414,13 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
.write_reg_mdi = &ixgbe_write_phy_reg_mdi,
};
+static const struct ixgbe_link_operations link_ops_x550em_x = {
+ .read_link = &ixgbe_read_i2c_combined_generic,
+ .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
+ .write_link = &ixgbe_write_i2c_combined_generic,
+ .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked,
+};
+
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550)
};
@@ -3083,11 +3451,12 @@ const struct ixgbe_info ixgbe_X550EM_x_info = {
.phy_ops = &phy_ops_X550EM_x,
.mbx_ops = &mbx_ops_generic,
.mvals = ixgbe_mvals_X550EM_x,
+ .link_ops = &link_ops_x550em_x,
};
const struct ixgbe_info ixgbe_x550em_a_info = {
.mac = ixgbe_mac_x550em_a,
- .get_invariants = &ixgbe_get_invariants_X550_x,
+ .get_invariants = &ixgbe_get_invariants_X550_a,
.mac_ops = &mac_ops_x550em_a,
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_x550em_a,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index cbf70fe4028a..6d4bef5803f2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1498,6 +1498,9 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
{
int i, q_vectors;
+ if (!adapter->msix_entries)
+ return;
+
q_vectors = adapter->num_msix_vectors;
i = q_vectors - 1;
@@ -2552,6 +2555,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
{
+ if (!adapter->msix_entries)
+ return;
+
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -3746,24 +3752,8 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
int ret;
- switch (adapter->hw.api_version) {
- case ixgbe_mbox_api_11:
- case ixgbe_mbox_api_12:
- max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
- break;
- default:
- if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
- max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
- break;
- }
-
- /* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > max_possible_frame))
- return -EINVAL;
-
spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, max_frame);
@@ -3814,11 +3804,10 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
ixgbevf_free_irq(adapter);
ixgbevf_free_all_tx_resources(adapter);
ixgbevf_free_all_rx_resources(adapter);
+ ixgbevf_clear_interrupt_scheme(adapter);
rtnl_unlock();
}
- ixgbevf_clear_interrupt_scheme(adapter);
-
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
if (retval)
@@ -4108,6 +4097,23 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 68 - 1504 or 9710 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
+ netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN);
+ break;
+ default:
+ if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
+ netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN);
+ else
+ netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
+ break;
+ }
+
if (IXGBE_REMOVED(hw->hw_addr)) {
err = -EIO;
goto err_sw_init;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 836ebd8ee768..f9fcab54783c 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2357,14 +2357,6 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
{
struct jme_adapter *jme = netdev_priv(netdev);
- if (new_mtu == jme->old_mtu)
- return 0;
-
- if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
- ((new_mtu) < IPV6_MIN_MTU))
- return -EINVAL;
-
-
netdev->mtu = new_mtu;
netdev_update_features(netdev);
@@ -3063,6 +3055,10 @@ jme_init_one(struct pci_dev *pdev,
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ /* MTU range: 1280 - 9202*/
+ netdev->min_mtu = IPV6_MIN_MTU;
+ netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN;
+
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 1799fe1415df..cbeea915f026 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1085,7 +1085,6 @@ static const struct net_device_ops korina_netdev_ops = {
.ndo_set_rx_mode = korina_multicast_list,
.ndo_tx_timeout = korina_tx_timeout,
.ndo_do_ioctl = korina_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index a167fd7ee13e..faea52da8dae 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -303,15 +303,9 @@ ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int
-ltq_etop_nway_reset(struct net_device *dev)
-{
- return phy_start_aneg(dev->phydev);
-}
-
static const struct ethtool_ops ltq_etop_ethtool_ops = {
.get_drvinfo = ltq_etop_get_drvinfo,
- .nway_reset = ltq_etop_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
@@ -519,18 +513,16 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
static int
ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
{
- int ret = eth_change_mtu(dev, new_mtu);
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ unsigned long flags;
- if (!ret) {
- struct ltq_etop_priv *priv = netdev_priv(dev);
- unsigned long flags;
+ dev->mtu = new_mtu;
- spin_lock_irqsave(&priv->lock, flags);
- ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
- LTQ_ETOP_IGPLEN);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
- return ret;
+ spin_lock_irqsave(&priv->lock, flags);
+ ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 2664827ddecd..f4b7cf18fb0f 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_MARVELL
bool "Marvell devices"
default y
- depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET
+ depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,7 +18,8 @@ if NET_VENDOR_MARVELL
config MV643XX_ETH
tristate "Marvell Discovery (643XX) and Orion ethernet support"
- depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
+ depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET
+ depends on HAS_DMA
select PHYLIB
select MVMDIO
---help---
@@ -43,6 +44,7 @@ config MVMDIO
config MVNETA_BM_ENABLE
tristate "Marvell Armada 38x/XP network interface BM support"
depends on MVNETA
+ depends on !64BIT
---help---
This driver supports auxiliary block of the network
interface units in the Marvell ARMADA XP and ARMADA 38x SoC
@@ -54,13 +56,15 @@ config MVNETA_BM_ENABLE
buffer management.
config MVNETA
- tristate "Marvell Armada 370/38x/XP network interface support"
- depends on PLAT_ORION
+ tristate "Marvell Armada 370/38x/XP/37xx network interface support"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_DMA
select MVMDIO
select FIXED_PHY
---help---
This driver supports the network interface units in the
- Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
+ Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
+ ARMADA 37xx SoC family.
Note that this driver is distinct from the mv643xx_eth
driver, which should be used for the older Marvell SoCs
@@ -68,16 +72,20 @@ config MVNETA
config MVNETA_BM
tristate
+ depends on !64BIT
default y if MVNETA=y && MVNETA_BM_ENABLE!=n
default MVNETA_BM_ENABLE
select HWBM
+ select GENERIC_ALLOCATOR
help
MVNETA_BM must not be 'm' if MVNETA=y, so this symbol ensures
that all dependencies are met.
config MVPP2
tristate "Marvell Armada 375 network interface support"
- depends on MACH_ARMADA_375
+ depends on MACH_ARMADA_375 || COMPILE_TEST
+ depends on HAS_DMA
+ depends on !64BIT
select MVMDIO
---help---
This driver supports the network interface units in the
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5b12022adf1f..1fa7c03edec2 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -384,8 +384,6 @@ struct mv643xx_eth_private {
struct net_device *dev;
- struct phy_device *phy;
-
struct timer_list mib_counters_timer;
spinlock_t mib_counters_lock;
struct mib_counters mib_counters;
@@ -1236,7 +1234,7 @@ static void mv643xx_eth_adjust_link(struct net_device *dev)
DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
DISABLE_AUTO_NEG_FOR_DUPLEX;
- if (mp->phy->autoneg == AUTONEG_ENABLE) {
+ if (dev->phydev->autoneg == AUTONEG_ENABLE) {
/* enable auto negotiation */
pscr &= ~autoneg_disable;
goto out_write;
@@ -1244,7 +1242,7 @@ static void mv643xx_eth_adjust_link(struct net_device *dev)
pscr |= autoneg_disable;
- if (mp->phy->speed == SPEED_1000) {
+ if (dev->phydev->speed == SPEED_1000) {
/* force gigabit, half duplex not supported */
pscr |= SET_GMII_SPEED_TO_1000;
pscr |= SET_FULL_DUPLEX_MODE;
@@ -1253,12 +1251,12 @@ static void mv643xx_eth_adjust_link(struct net_device *dev)
pscr &= ~SET_GMII_SPEED_TO_1000;
- if (mp->phy->speed == SPEED_100)
+ if (dev->phydev->speed == SPEED_100)
pscr |= SET_MII_SPEED_TO_100;
else
pscr &= ~SET_MII_SPEED_TO_100;
- if (mp->phy->duplex == DUPLEX_FULL)
+ if (dev->phydev->duplex == DUPLEX_FULL)
pscr |= SET_FULL_DUPLEX_MODE;
else
pscr &= ~SET_FULL_DUPLEX_MODE;
@@ -1499,55 +1497,69 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
};
static int
-mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
- struct ethtool_cmd *cmd)
+mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
+ struct ethtool_link_ksettings *cmd)
{
+ struct net_device *dev = mp->dev;
int err;
+ u32 supported, advertising;
- err = phy_read_status(mp->phy);
+ err = phy_read_status(dev->phydev);
if (err == 0)
- err = phy_ethtool_gset(mp->phy, cmd);
+ err = phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
*/
- cmd->supported &= ~SUPPORTED_1000baseT_Half;
- cmd->advertising &= ~ADVERTISED_1000baseT_Half;
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+ supported &= ~SUPPORTED_1000baseT_Half;
+ advertising &= ~ADVERTISED_1000baseT_Half;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return err;
}
static int
-mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
- struct ethtool_cmd *cmd)
+mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
+ struct ethtool_link_ksettings *cmd)
{
u32 port_status;
+ u32 supported, advertising;
port_status = rdlp(mp, PORT_STATUS);
- cmd->supported = SUPPORTED_MII;
- cmd->advertising = ADVERTISED_MII;
+ supported = SUPPORTED_MII;
+ advertising = ADVERTISED_MII;
switch (port_status & PORT_SPEED_MASK) {
case PORT_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
case PORT_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case PORT_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
default:
- cmd->speed = -1;
+ cmd->base.speed = -1;
break;
}
- cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = PORT_MII;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
+ cmd->base.duplex = (port_status & FULL_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
@@ -1555,23 +1567,21 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
static void
mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
wol->supported = 0;
wol->wolopts = 0;
- if (mp->phy)
- phy_ethtool_get_wol(mp->phy, wol);
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
}
static int
mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
int err;
- if (mp->phy == NULL)
+ if (!dev->phydev)
return -EOPNOTSUPP;
- err = phy_ethtool_set_wol(mp->phy, wol);
+ err = phy_ethtool_set_wol(dev->phydev, wol);
/* Given that mv643xx_eth works without the marvell-specific PHY driver,
* this debugging hint is useful to have.
*/
@@ -1581,31 +1591,38 @@ mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static int
-mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+mv643xx_eth_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- if (mp->phy != NULL)
- return mv643xx_eth_get_settings_phy(mp, cmd);
+ if (dev->phydev)
+ return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
else
- return mv643xx_eth_get_settings_phyless(mp, cmd);
+ return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
}
static int
-mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+mv643xx_eth_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
+ struct ethtool_link_ksettings c = *cmd;
+ u32 advertising;
int ret;
- if (mp->phy == NULL)
+ if (!dev->phydev)
return -EINVAL;
/*
* The MAC does not support 1000baseT_Half.
*/
- cmd->advertising &= ~ADVERTISED_1000baseT_Half;
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ c.link_modes.advertising);
+ advertising &= ~ADVERTISED_1000baseT_Half;
+ ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
+ advertising);
- ret = phy_ethtool_sset(mp->phy, cmd);
+ ret = phy_ethtool_ksettings_set(dev->phydev, &c);
if (!ret)
mv643xx_eth_adjust_link(dev);
return ret;
@@ -1622,16 +1639,6 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
-static int mv643xx_eth_nway_reset(struct net_device *dev)
-{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
-
- if (mp->phy == NULL)
- return -EINVAL;
-
- return genphy_restart_aneg(mp->phy);
-}
-
static int
mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
@@ -1754,10 +1761,8 @@ static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
}
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
- .get_settings = mv643xx_eth_get_settings,
- .set_settings = mv643xx_eth_set_settings,
.get_drvinfo = mv643xx_eth_get_drvinfo,
- .nway_reset = mv643xx_eth_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = mv643xx_eth_get_coalesce,
.set_coalesce = mv643xx_eth_set_coalesce,
@@ -1769,6 +1774,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = mv643xx_eth_get_wol,
.set_wol = mv643xx_eth_set_wol,
+ .get_link_ksettings = mv643xx_eth_get_link_ksettings,
+ .set_link_ksettings = mv643xx_eth_set_link_ksettings,
};
@@ -2328,19 +2335,21 @@ static inline void oom_timer_wrapper(unsigned long data)
static void port_start(struct mv643xx_eth_private *mp)
{
+ struct net_device *dev = mp->dev;
u32 pscr;
int i;
/*
* Perform PHY reset, if there is a PHY.
*/
- if (mp->phy != NULL) {
- struct ethtool_cmd cmd;
+ if (dev->phydev) {
+ struct ethtool_link_ksettings cmd;
- mv643xx_eth_get_settings(mp->dev, &cmd);
- phy_init_hw(mp->phy);
- mv643xx_eth_set_settings(mp->dev, &cmd);
- phy_start(mp->phy);
+ mv643xx_eth_get_link_ksettings(dev, &cmd);
+ phy_init_hw(dev->phydev);
+ mv643xx_eth_set_link_ksettings(
+ dev, (const struct ethtool_link_ksettings *)&cmd);
+ phy_start(dev->phydev);
}
/*
@@ -2352,7 +2361,7 @@ static void port_start(struct mv643xx_eth_private *mp)
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
pscr |= DO_NOT_FORCE_LINK_FAIL;
- if (mp->phy == NULL)
+ if (!dev->phydev)
pscr |= FORCE_LINK_PASS;
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
@@ -2536,8 +2545,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev);
- if (mp->phy)
- phy_stop(mp->phy);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
free_irq(dev->irq, dev);
port_reset(mp);
@@ -2555,13 +2564,12 @@ static int mv643xx_eth_stop(struct net_device *dev)
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
int ret;
- if (mp->phy == NULL)
+ if (!dev->phydev)
return -ENOTSUPP;
- ret = phy_mii_ioctl(mp->phy, ifr, cmd);
+ ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
if (!ret)
mv643xx_eth_adjust_link(dev);
return ret;
@@ -2571,9 +2579,6 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- if (new_mtu < 64 || new_mtu > 9500)
- return -EINVAL;
-
dev->mtu = new_mtu;
mv643xx_eth_recalc_skb_size(mp);
tx_set_rate(mp, 1000000000, 16777216);
@@ -2708,7 +2713,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = {
MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
#endif
-#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
+#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
#define mv643xx_eth_property(_np, _name, _v) \
do { \
u32 tmp; \
@@ -3024,7 +3029,8 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
{
- struct phy_device *phy = mp->phy;
+ struct net_device *dev = mp->dev;
+ struct phy_device *phy = dev->phydev;
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
@@ -3042,6 +3048,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
{
+ struct net_device *dev = mp->dev;
u32 pscr;
pscr = rdlp(mp, PORT_SERIAL_CONTROL);
@@ -3051,7 +3058,7 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
}
pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
- if (mp->phy == NULL) {
+ if (!dev->phydev) {
pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
if (speed == SPEED_1000)
pscr |= SET_GMII_SPEED_TO_1000;
@@ -3090,6 +3097,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct mv643xx_eth_platform_data *pd;
struct mv643xx_eth_private *mp;
struct net_device *dev;
+ struct phy_device *phydev = NULL;
struct resource *res;
int err;
@@ -3146,18 +3154,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
err = 0;
if (pd->phy_node) {
- mp->phy = of_phy_connect(mp->dev, pd->phy_node,
- mv643xx_eth_adjust_link, 0,
- get_phy_mode(mp));
- if (!mp->phy)
+ phydev = of_phy_connect(mp->dev, pd->phy_node,
+ mv643xx_eth_adjust_link, 0,
+ get_phy_mode(mp));
+ if (!phydev)
err = -ENODEV;
else
- phy_addr_set(mp, mp->phy->mdio.addr);
+ phy_addr_set(mp, phydev->mdio.addr);
} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
- mp->phy = phy_scan(mp, pd->phy_addr);
+ phydev = phy_scan(mp, pd->phy_addr);
- if (IS_ERR(mp->phy))
- err = PTR_ERR(mp->phy);
+ if (IS_ERR(phydev))
+ err = PTR_ERR(phydev);
else
phy_init(mp, pd->speed, pd->duplex);
}
@@ -3206,6 +3214,10 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+ /* MTU range: 64 - 9500 */
+ dev->min_mtu = 64;
+ dev->max_mtu = 9500;
+
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
@@ -3239,10 +3251,11 @@ out:
static int mv643xx_eth_remove(struct platform_device *pdev)
{
struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
+ struct net_device *dev = mp->dev;
unregister_netdev(mp->dev);
- if (mp->phy != NULL)
- phy_disconnect(mp->phy);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
cancel_work_sync(&mp->tx_timeout_task);
if (!IS_ERR(mp->clk))
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 707bc4680b9b..e05e22705cf7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -296,6 +296,12 @@
/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE 32
+/* Number of bytes to be taken into account by HW when putting incoming data
+ * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
+ * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
+ */
+#define MVNETA_RX_PKT_OFFSET_CORRECTION 64
+
#define MVNETA_RX_PKT_SIZE(mtu) \
ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
ETH_HLEN + ETH_FCS_LEN, \
@@ -391,6 +397,9 @@ struct mvneta_port {
spinlock_t lock;
bool is_stopped;
+ u32 cause_rx_tx;
+ struct napi_struct napi;
+
/* Core clock */
struct clk *clk;
/* AXI clock */
@@ -416,6 +425,10 @@ struct mvneta_port {
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
+
+ /* Flags for special SoC configurations */
+ bool neta_armada3700;
+ u16 rx_offset_correction;
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -561,6 +574,9 @@ struct mvneta_rx_queue {
u32 pkts_coal;
u32 time_coal;
+ /* Virtual address of the RX buffer */
+ void **buf_virt_addr;
+
/* Virtual address of the RX DMA descriptors array */
struct mvneta_rx_desc *descs;
@@ -955,14 +971,9 @@ static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
return 0;
}
-/* Assign and initialize pools for port. In case of fail
- * buffer manager will remain disabled for current port.
- */
-static int mvneta_bm_port_init(struct platform_device *pdev,
- struct mvneta_port *pp)
+static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
{
- struct device_node *dn = pdev->dev.of_node;
- u32 long_pool_id, short_pool_id, wsize;
+ u32 wsize;
u8 target, attr;
int err;
@@ -981,6 +992,25 @@ static int mvneta_bm_port_init(struct platform_device *pdev,
netdev_info(pp->dev, "fail to configure mbus window to BM\n");
return err;
}
+ return 0;
+}
+
+/* Assign and initialize pools for port. In case of fail
+ * buffer manager will remain disabled for current port.
+ */
+static int mvneta_bm_port_init(struct platform_device *pdev,
+ struct mvneta_port *pp)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ u32 long_pool_id, short_pool_id;
+
+ if (!pp->neta_armada3700) {
+ int ret;
+
+ ret = mvneta_bm_port_mbus_init(pp);
+ if (ret)
+ return ret;
+ }
if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
netdev_info(pp->dev, "missing long pool id\n");
@@ -1349,22 +1379,27 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
for_each_present_cpu(cpu) {
int rxq_map = 0, txq_map = 0;
int rxq, txq;
+ if (!pp->neta_armada3700) {
+ for (rxq = 0; rxq < rxq_number; rxq++)
+ if ((rxq % max_cpu) == cpu)
+ rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
+
+ for (txq = 0; txq < txq_number; txq++)
+ if ((txq % max_cpu) == cpu)
+ txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
+
+ /* With only one TX queue we configure a special case
+ * which will allow to get all the irq on a single
+ * CPU
+ */
+ if (txq_number == 1)
+ txq_map = (cpu == pp->rxq_def) ?
+ MVNETA_CPU_TXQ_ACCESS(1) : 0;
- for (rxq = 0; rxq < rxq_number; rxq++)
- if ((rxq % max_cpu) == cpu)
- rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
-
- for (txq = 0; txq < txq_number; txq++)
- if ((txq % max_cpu) == cpu)
- txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
-
- /* With only one TX queue we configure a special case
- * which will allow to get all the irq on a single
- * CPU
- */
- if (txq_number == 1)
- txq_map = (cpu == pp->rxq_def) ?
- MVNETA_CPU_TXQ_ACCESS(1) : 0;
+ } else {
+ txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+ rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
+ }
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
}
@@ -1573,10 +1608,14 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
- u32 phys_addr, u32 cookie)
+ u32 phys_addr, void *virt_addr,
+ struct mvneta_rx_queue *rxq)
{
- rx_desc->buf_cookie = cookie;
+ int i;
+
rx_desc->buf_phys_addr = phys_addr;
+ i = rx_desc - rxq->descs;
+ rxq->buf_virt_addr[i] = virt_addr;
}
/* Decrement sent descriptors counter */
@@ -1781,7 +1820,8 @@ EXPORT_SYMBOL_GPL(mvneta_frag_free);
/* Refill processing for SW buffer management */
static int mvneta_rx_refill(struct mvneta_port *pp,
- struct mvneta_rx_desc *rx_desc)
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq)
{
dma_addr_t phys_addr;
@@ -1799,7 +1839,8 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
return -ENOMEM;
}
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
+ phys_addr += pp->rx_offset_correction;
+ mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
return 0;
}
@@ -1861,7 +1902,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
- void *data = (void *)rx_desc->buf_cookie;
+ void *data = rxq->buf_virt_addr[i];
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
@@ -1894,12 +1935,13 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
unsigned char *data;
dma_addr_t phys_addr;
u32 rx_status, frag_size;
- int rx_bytes, err;
+ int rx_bytes, err, index;
rx_done++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
- data = (unsigned char *)rx_desc->buf_cookie;
+ index = rx_desc - rxq->descs;
+ data = rxq->buf_virt_addr[index];
phys_addr = rx_desc->buf_phys_addr;
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
@@ -1918,7 +1960,7 @@ err_drop_frame:
goto err_drop_frame;
dma_sync_single_range_for_cpu(dev->dev.parent,
- rx_desc->buf_phys_addr,
+ phys_addr,
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
DMA_FROM_DEVICE);
@@ -1938,7 +1980,7 @@ err_drop_frame:
}
/* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc);
+ err = mvneta_rx_refill(pp, rx_desc, rxq);
if (err) {
netdev_err(dev, "Linux processing - Can't refill\n");
rxq->missed++;
@@ -2020,7 +2062,7 @@ static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
rx_done++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
- data = (unsigned char *)rx_desc->buf_cookie;
+ data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
phys_addr = rx_desc->buf_phys_addr;
pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
bm_pool = &pp->bm_priv->bm_pools[pool_id];
@@ -2610,6 +2652,17 @@ static void mvneta_set_rx_mode(struct net_device *dev)
/* Interrupt handling - the callback for request_irq() */
static irqreturn_t mvneta_isr(int irq, void *dev_id)
{
+ struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ napi_schedule(&pp->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handling - the callback for request_percpu_irq() */
+static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
+{
struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
disable_percpu_irq(port->pp->dev->irq);
@@ -2657,7 +2710,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
- napi_complete(&port->napi);
+ napi_complete(napi);
return rx_done;
}
@@ -2686,7 +2739,8 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
*/
rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
- cause_rx_tx |= port->cause_rx_tx;
+ cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
+ port->cause_rx_tx;
if (rx_queue) {
rx_queue = rx_queue - 1;
@@ -2700,11 +2754,27 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
if (budget > 0) {
cause_rx_tx = 0;
- napi_complete(&port->napi);
- enable_percpu_irq(pp->dev->irq, 0);
+ napi_complete(napi);
+
+ if (pp->neta_armada3700) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
+ local_irq_restore(flags);
+ } else {
+ enable_percpu_irq(pp->dev->irq, 0);
+ }
}
- port->cause_rx_tx = cause_rx_tx;
+ if (pp->neta_armada3700)
+ pp->cause_rx_tx = cause_rx_tx;
+ else
+ port->cause_rx_tx = cause_rx_tx;
+
return rx_done;
}
@@ -2716,7 +2786,7 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
- if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
+ if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
__func__, rxq->id, i, num);
break;
@@ -2773,7 +2843,7 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
/* Set Offset */
- mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
+ mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
/* Set coalescing pkts and time */
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
@@ -2784,14 +2854,14 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
mvneta_rxq_buf_size_set(pp, rxq,
MVNETA_RX_BUF_SIZE(pp->pkt_size));
mvneta_rxq_bm_disable(pp, rxq);
+ mvneta_rxq_fill(pp, rxq, rxq->size);
} else {
mvneta_rxq_bm_enable(pp, rxq);
mvneta_rxq_long_pool_set(pp, rxq);
mvneta_rxq_short_pool_set(pp, rxq);
+ mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
}
- mvneta_rxq_fill(pp, rxq, rxq->size);
-
return 0;
}
@@ -2974,11 +3044,16 @@ static void mvneta_start_dev(struct mvneta_port *pp)
/* start the Rx/Tx activity */
mvneta_port_enable(pp);
- /* Enable polling on the port */
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ /* Enable polling on the port */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_enable(&port->napi);
+ napi_enable(&port->napi);
+ }
+ } else {
+ napi_enable(&pp->napi);
}
/* Unmask interrupts. It has to be done from each CPU */
@@ -3000,10 +3075,15 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
phy_stop(ndev->phydev);
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_disable(&port->napi);
+ napi_disable(&port->napi);
+ }
+ } else {
+ napi_disable(&pp->napi);
}
netif_carrier_off(pp->dev);
@@ -3024,29 +3104,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
mvneta_rx_reset(pp);
}
-/* Return positive if MTU is valid */
-static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
-{
- if (mtu < 68) {
- netdev_err(dev, "cannot change mtu to less than 68\n");
- return -EINVAL;
- }
-
- /* 9676 == 9700 - 20 and rounding to 8 */
- if (mtu > 9676) {
- netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
- mtu = 9676;
- }
-
- if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
- netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
- mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
- mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
- }
-
- return mtu;
-}
-
static void mvneta_percpu_enable(void *arg)
{
struct mvneta_port *pp = arg;
@@ -3067,9 +3124,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
struct mvneta_port *pp = netdev_priv(dev);
int ret;
- mtu = mvneta_check_mtu_valid(dev, mtu);
- if (mtu < 0)
- return -EINVAL;
+ if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
+ mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
+ }
dev->mtu = mtu;
@@ -3434,31 +3493,37 @@ static int mvneta_open(struct net_device *dev)
goto err_cleanup_rxqs;
/* Connect to port interrupt line */
- ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
- MVNETA_DRIVER_NAME, pp->ports);
+ if (pp->neta_armada3700)
+ ret = request_irq(pp->dev->irq, mvneta_isr, 0,
+ dev->name, pp);
+ else
+ ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
+ dev->name, pp->ports);
if (ret) {
netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
goto err_cleanup_txqs;
}
- /* Enable per-CPU interrupt on all the CPU to handle our RX
- * queue interrupts
- */
- on_each_cpu(mvneta_percpu_enable, pp, true);
+ if (!pp->neta_armada3700) {
+ /* Enable per-CPU interrupt on all the CPU to handle our RX
+ * queue interrupts
+ */
+ on_each_cpu(mvneta_percpu_enable, pp, true);
- pp->is_stopped = false;
- /* Register a CPU notifier to handle the case where our CPU
- * might be taken offline.
- */
- ret = cpuhp_state_add_instance_nocalls(online_hpstate,
- &pp->node_online);
- if (ret)
- goto err_free_irq;
+ pp->is_stopped = false;
+ /* Register a CPU notifier to handle the case where our CPU
+ * might be taken offline.
+ */
+ ret = cpuhp_state_add_instance_nocalls(online_hpstate,
+ &pp->node_online);
+ if (ret)
+ goto err_free_irq;
- ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
- &pp->node_dead);
- if (ret)
- goto err_free_online_hp;
+ ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+ if (ret)
+ goto err_free_online_hp;
+ }
/* In default link is down */
netif_carrier_off(pp->dev);
@@ -3474,13 +3539,20 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_dead_hp:
- cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
- &pp->node_dead);
+ if (!pp->neta_armada3700)
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
err_free_online_hp:
- cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
+ if (!pp->neta_armada3700)
+ cpuhp_state_remove_instance_nocalls(online_hpstate,
+ &pp->node_online);
err_free_irq:
- on_each_cpu(mvneta_percpu_disable, pp, true);
- free_percpu_irq(pp->dev->irq, pp->ports);
+ if (pp->neta_armada3700) {
+ free_irq(pp->dev->irq, pp);
+ } else {
+ on_each_cpu(mvneta_percpu_disable, pp, true);
+ free_percpu_irq(pp->dev->irq, pp->ports);
+ }
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
err_cleanup_rxqs:
@@ -3493,23 +3565,31 @@ static int mvneta_stop(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- /* Inform that we are stopping so we don't want to setup the
- * driver for new CPUs in the notifiers. The code of the
- * notifier for CPU online is protected by the same spinlock,
- * so when we get the lock, the notifer work is done.
- */
- spin_lock(&pp->lock);
- pp->is_stopped = true;
- spin_unlock(&pp->lock);
+ if (!pp->neta_armada3700) {
+ /* Inform that we are stopping so we don't want to setup the
+ * driver for new CPUs in the notifiers. The code of the
+ * notifier for CPU online is protected by the same spinlock,
+ * so when we get the lock, the notifer work is done.
+ */
+ spin_lock(&pp->lock);
+ pp->is_stopped = true;
+ spin_unlock(&pp->lock);
- mvneta_stop_dev(pp);
- mvneta_mdio_remove(pp);
+ mvneta_stop_dev(pp);
+ mvneta_mdio_remove(pp);
+
+ cpuhp_state_remove_instance_nocalls(online_hpstate,
+ &pp->node_online);
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+ &pp->node_dead);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
+ free_percpu_irq(dev->irq, pp->ports);
+ } else {
+ mvneta_stop_dev(pp);
+ mvneta_mdio_remove(pp);
+ free_irq(dev->irq, pp);
+ }
- cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
- cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
- &pp->node_dead);
- on_each_cpu(mvneta_percpu_disable, pp, true);
- free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
@@ -3788,6 +3868,11 @@ static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mvneta_port *pp = netdev_priv(dev);
+
+ /* Current code for Armada 3700 doesn't support RSS features yet */
+ if (pp->neta_armada3700)
+ return -EOPNOTSUPP;
+
/* We require at least one supported parameter to be changed
* and no change in any of the unsupported parameters
*/
@@ -3808,6 +3893,10 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
{
struct mvneta_port *pp = netdev_priv(dev);
+ /* Current code for Armada 3700 doesn't support RSS features yet */
+ if (pp->neta_armada3700)
+ return -EOPNOTSUPP;
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
@@ -3832,6 +3921,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
};
const struct ethtool_ops mvneta_eth_tool_ops = {
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
.get_coalesce = mvneta_ethtool_get_coalesce,
@@ -3885,6 +3975,11 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
rxq->size = pp->rx_ring_size;
rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
rxq->time_coal = MVNETA_RX_COAL_USEC;
+ rxq->buf_virt_addr = devm_kmalloc(pp->dev->dev.parent,
+ rxq->size * sizeof(void *),
+ GFP_KERNEL);
+ if (!rxq->buf_virt_addr)
+ return -ENOMEM;
}
return 0;
@@ -3909,16 +4004,29 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
win_enable = 0x3f;
win_protect = 0;
- for (i = 0; i < dram->num_cs; i++) {
- const struct mbus_dram_window *cs = dram->cs + i;
- mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
- (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
+ if (dram) {
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ mvreg_write(pp, MVNETA_WIN_BASE(i),
+ (cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id);
- mvreg_write(pp, MVNETA_WIN_SIZE(i),
- (cs->size - 1) & 0xffff0000);
+ mvreg_write(pp, MVNETA_WIN_SIZE(i),
+ (cs->size - 1) & 0xffff0000);
- win_enable &= ~(1 << i);
- win_protect |= 3 << (2 * i);
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+ } else {
+ /* For Armada3700 open default 4GB Mbus window, leaving
+ * arbitration of target/attribute to a different layer
+ * of configuration.
+ */
+ mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
+ win_enable &= ~BIT(0);
+ win_protect = 3;
}
mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
@@ -4039,8 +4147,19 @@ static int mvneta_probe(struct platform_device *pdev)
pp->rxq_def = rxq_def;
+ /* Set RX packet offset correction for platforms, whose
+ * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
+ * platforms and 0B for 32-bit ones.
+ */
+ pp->rx_offset_correction =
+ max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
+
pp->indir[0] = rxq_def;
+ /* Get special SoC configurations */
+ if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
+ pp->neta_armada3700 = true;
+
pp->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(pp->clk))
pp->clk = devm_clk_get(&pdev->dev, NULL);
@@ -4108,7 +4227,11 @@ static int mvneta_probe(struct platform_device *pdev)
pp->tx_csum_limit = tx_csum_limit;
dram_target_info = mv_mbus_dram_info();
- if (dram_target_info)
+ /* Armada3700 requires setting default configuration of Mbus
+ * windows, however without using filled mbus_dram_target_info
+ * structure.
+ */
+ if (dram_target_info || pp->neta_armada3700)
mvneta_conf_mbus_windows(pp, dram_target_info);
pp->tx_ring_size = MVNETA_MAX_TXD;
@@ -4141,11 +4264,20 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_netdev;
}
- for_each_present_cpu(cpu) {
- struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+ /* Armada3700 network controller does not support per-cpu
+ * operation, so only single NAPI should be initialized.
+ */
+ if (pp->neta_armada3700) {
+ netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ } else {
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port =
+ per_cpu_ptr(pp->ports, cpu);
- netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
- port->pp = pp;
+ netif_napi_add(dev, &port->napi, mvneta_poll,
+ NAPI_POLL_WEIGHT);
+ port->pp = pp;
+ }
}
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
@@ -4154,6 +4286,11 @@ static int mvneta_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ /* MTU range: 68 - 9676 */
+ dev->min_mtu = ETH_MIN_MTU;
+ /* 9676 == 9700 - 20 and rounding to 8 */
+ dev->max_mtu = 9676;
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
@@ -4230,6 +4367,7 @@ static int mvneta_remove(struct platform_device *pdev)
static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
+ { .compatible = "marvell,armada-3700-neta" },
{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 1026c452e39d..cda04b3126bc 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -770,6 +770,17 @@ struct mvpp2_rx_desc {
u32 reserved8;
};
+struct mvpp2_txq_pcpu_buf {
+ /* Transmitted SKB */
+ struct sk_buff *skb;
+
+ /* Physical address of transmitted buffer */
+ dma_addr_t phys;
+
+ /* Size transmitted */
+ size_t size;
+};
+
/* Per-CPU Tx queue control */
struct mvpp2_txq_pcpu {
int cpu;
@@ -785,11 +796,8 @@ struct mvpp2_txq_pcpu {
/* Number of Tx DMA descriptors reserved for each CPU */
int reserved_num;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
-
- /* Array of transmitted buffers' physical addresses */
- dma_addr_t *tx_buffs;
+ /* Infos about transmitted buffers */
+ struct mvpp2_txq_pcpu_buf *buffs;
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -979,10 +987,11 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc)
{
- txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
- if (skb)
- txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
- tx_desc->buf_phys_addr;
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_put_index;
+ tx_buf->skb = skb;
+ tx_buf->size = tx_desc->data_size;
+ tx_buf->phys = tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -4401,17 +4410,16 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i;
for (i = 0; i < num; i++) {
- dma_addr_t buf_phys_addr =
- txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
- struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_get_index;
mvpp2_txq_inc_get(txq_pcpu);
- dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (!skb)
+ dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+ tx_buf->size, DMA_TO_DEVICE);
+ if (!tx_buf->skb)
continue;
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(tx_buf->skb);
}
}
@@ -4651,15 +4659,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
txq_pcpu->size = txq->size;
- txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
- sizeof(*txq_pcpu->tx_skb),
- GFP_KERNEL);
- if (!txq_pcpu->tx_skb)
- goto error;
-
- txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
- sizeof(dma_addr_t), GFP_KERNEL);
- if (!txq_pcpu->tx_buffs)
+ txq_pcpu->buffs = kmalloc(txq_pcpu->size *
+ sizeof(struct mvpp2_txq_pcpu_buf),
+ GFP_KERNEL);
+ if (!txq_pcpu->buffs)
goto error;
txq_pcpu->count = 0;
@@ -4673,8 +4676,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
error:
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->tx_skb);
- kfree(txq_pcpu->tx_buffs);
+ kfree(txq_pcpu->buffs);
}
dma_free_coherent(port->dev->dev.parent,
@@ -4693,8 +4695,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->tx_skb);
- kfree(txq_pcpu->tx_buffs);
+ kfree(txq_pcpu->buffs);
}
if (txq->descs)
@@ -5453,29 +5454,6 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
phy_stop(ndev->phydev);
}
-/* Return positive if MTU is valid */
-static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
-{
- if (mtu < 68) {
- netdev_err(dev, "cannot change mtu to less than 68\n");
- return -EINVAL;
- }
-
- /* 9676 == 9700 - 20 and rounding to 8 */
- if (mtu > 9676) {
- netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
- mtu = 9676;
- }
-
- if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
- netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
- ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
- mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
- }
-
- return mtu;
-}
-
static int mvpp2_check_ringparam_valid(struct net_device *dev,
struct ethtool_ringparam *ring)
{
@@ -5717,10 +5695,10 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
struct mvpp2_port *port = netdev_priv(dev);
int err;
- mtu = mvpp2_check_mtu_valid(dev, mtu);
- if (mtu < 0) {
- err = mtu;
- goto error;
+ if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
+ ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
if (!netif_running(dev)) {
@@ -5946,6 +5924,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvpp2_ethtool_set_coalesce,
.get_coalesce = mvpp2_ethtool_get_coalesce,
@@ -6212,6 +6191,11 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
dev->vlan_features |= features;
+ /* MTU range: 68 - 9676 */
+ dev->min_mtu = ETH_MIN_MTU;
+ /* 9676 == 9700 - 20 and rounding to 8 */
+ dev->max_mtu = 9676;
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 5d5000c8edf1..3af2814ada23 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1209,9 +1209,6 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
int retval;
struct pxa168_eth_private *pep = netdev_priv(dev);
- if ((mtu > 9500) || (mtu < 68))
- return -EINVAL;
-
dev->mtu = mtu;
retval = set_port_config_ext(pep);
@@ -1396,6 +1393,7 @@ static void pxa168_get_drvinfo(struct net_device *dev,
static const struct ethtool_ops pxa168_ethtool_ops = {
.get_drvinfo = pxa168_get_drvinfo,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = pxa168_get_link_ksettings,
@@ -1459,6 +1457,10 @@ static int pxa168_eth_probe(struct platform_device *pdev)
dev->base_addr = 0;
dev->ethtool_ops = &pxa168_ethtool_ops;
+ /* MTU range: 68 - 9500 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = 9500;
+
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
if (pdev->dev.of_node)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 7173836fe361..9146a514fb33 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1048,7 +1048,7 @@ static const char *skge_pause(enum pause_status status)
static void skge_link_up(struct skge_port *skge)
{
skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
- LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
+ LED_BLK_OFF|LED_SYNC_OFF|LED_REG_ON);
netif_carrier_on(skge->netdev);
netif_wake_queue(skge->netdev);
@@ -1062,7 +1062,7 @@ static void skge_link_up(struct skge_port *skge)
static void skge_link_down(struct skge_port *skge)
{
- skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF);
netif_carrier_off(skge->netdev);
netif_stop_queue(skge->netdev);
@@ -2668,7 +2668,7 @@ static int skge_down(struct net_device *dev)
if (hw->ports == 1)
free_irq(hw->pdev->irq, hw);
- skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF);
if (is_genesis(hw))
genesis_stop(skge);
else
@@ -2900,9 +2900,6 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
{
int err;
- if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
- return -EINVAL;
-
if (!netif_running(dev)) {
dev->mtu = new_mtu;
return 0;
@@ -3857,6 +3854,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->watchdog_timeo = TX_WATCHDOG;
dev->irq = hw->pdev->irq;
+ /* MTU range: 60 - 9000 */
+ dev->min_mtu = ETH_ZLEN;
+ dev->max_mtu = ETH_JUMBO_MTU;
+
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index a2eb34115844..3ea151ff9c43 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -662,8 +662,8 @@ enum {
LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */
LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */
LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */
- LED_ON = 1<<1, /* switch LED on */
- LED_OFF = 1<<0, /* switch LED off */
+ LED_REG_ON = 1<<1, /* switch LED on */
+ LED_REG_OFF = 1<<0, /* switch LED off */
};
/* Receive GMAC FIFO (YUKON) */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 941c8e2c944e..b60ad0e56a9f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2398,16 +2398,6 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u16 ctl, mode;
u32 imask;
- /* MTU size outside the spec */
- if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
- return -EINVAL;
-
- /* MTU > 1500 on yukon FE and FE+ not allowed */
- if (new_mtu > ETH_DATA_LEN &&
- (hw->chip_id == CHIP_ID_YUKON_FE ||
- hw->chip_id == CHIP_ID_YUKON_FE_P))
- return -EINVAL;
-
if (!netif_running(dev)) {
dev->mtu = new_mtu;
netdev_update_features(dev);
@@ -4808,6 +4798,14 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
dev->features |= dev->hw_features;
+ /* MTU range: 60 - 1500 or 9000 */
+ dev->min_mtu = ETH_ZLEN;
+ if (hw->chip_id == CHIP_ID_YUKON_FE ||
+ hw->chip_id == CHIP_ID_YUKON_FE_P)
+ dev->max_mtu = ETH_DATA_LEN;
+ else
+ dev->max_mtu = ETH_JUMBO_MTU;
+
/* try to get mac address in the following order:
* 1) from device tree data
* 2) from internal registers set by bootloader
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 86a89cbd3ec9..3dd87889e67e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -845,7 +845,7 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
spin_unlock(&eth->page_lock);
stats->tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2247,7 +2247,6 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_set_mac_address = mtk_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mtk_do_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
.ndo_fix_features = mtk_fix_features,
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 5098e7f21987..22b1cc012bc9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -7,7 +7,7 @@ config MLX4_EN
depends on MAY_USE_DEVLINK
depends on PCI
select MLX4_CORE
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e36bebcab3f2..a49072b4fa52 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2679,15 +2679,13 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
if (!mailbox)
return ERR_PTR(-ENOMEM);
- mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
- &mailbox->dma);
+ mailbox->buf = pci_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
+ &mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
}
- memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
-
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index e3be7e44ff51..09dd3776db76 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -65,7 +65,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->ring = ring;
- cq->is_tx = mode;
+ cq->type = mode;
cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
@@ -104,7 +104,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
- if (cq->is_tx == RX) {
+ if (cq->type == RX) {
if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
cq->vector)) {
cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
@@ -127,25 +127,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
- int xdp_index;
-
- /* The xdp tx irq must align with the rx ring that forwards to
- * it, so reindex these from 0. This should only happen when
- * tx_ring_num is not a multiple of rx_ring_num.
- */
- xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
- if (xdp_index >= 0)
- cq_idx = xdp_index;
+
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
}
- if (!cq->is_tx)
+ if (cq->type == RX)
cq->size = priv->rx_ring[cq->ring]->actual_size;
- if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
- (!cq->is_tx && priv->hwtstamp_config.rx_filter))
+ if ((cq->type != RX && priv->hwtstamp_config.tx_type) ||
+ (cq->type == RX && priv->hwtstamp_config.rx_filter))
timestamp_en = 1;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
@@ -154,10 +146,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err)
goto free_eq;
- cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
+ cq->mcq.comp = cq->type != RX ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (cq->is_tx)
+ if (cq->type != RX)
netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
else
@@ -181,7 +173,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
- cq->is_tx == RX)
+ cq->type == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
cq->vector = 0;
cq->buf_size = 0;
@@ -193,10 +185,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
napi_disable(&cq->napi);
- if (!cq->is_tx) {
- napi_hash_del(&cq->napi);
- synchronize_rcu();
- }
netif_napi_del(&cq->napi);
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bdda17d2ea0f..d9c9f86a30df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -49,16 +49,19 @@
static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
{
- int i;
+ int i, t;
int err = 0;
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i]->moder_cnt = priv->tx_frames;
- priv->tx_cq[i]->moder_time = priv->tx_usecs;
- if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
- if (err)
- return err;
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
+ priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv,
+ priv->tx_cq[t][i]);
+ if (err)
+ return err;
+ }
}
}
@@ -192,6 +195,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"tx_prio_7_packets", "tx_prio_7_bytes",
"tx_novlan_packets", "tx_novlan_bytes",
+ /* xdp statistics */
+ "rx_xdp_drop",
+ "rx_xdp_tx",
+ "rx_xdp_tx_full",
};
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
@@ -336,8 +343,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
- (priv->tx_ring_num * 2) +
- (priv->rx_ring_num * 3);
+ (priv->tx_ring_num[TX] * 2) +
+ (priv->rx_ring_num * (3 + NUM_XDP_STATS));
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -360,6 +367,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
spin_lock_bh(&priv->stats_lock);
+ mlx4_en_fold_software_stats(dev);
+
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&dev->stats)[i];
@@ -397,14 +406,21 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->pkstats)[i];
- for (i = 0; i < priv->tx_ring_num; i++) {
- data[index++] = priv->tx_ring[i]->packets;
- data[index++] = priv->tx_ring[i]->bytes;
+ for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
+
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ data[index++] = priv->tx_ring[TX][i]->packets;
+ data[index++] = priv->tx_ring[TX][i]->bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i]->bytes;
data[index++] = priv->rx_ring[i]->dropped;
+ data[index++] = priv->rx_ring[i]->xdp_drop;
+ data[index++] = priv->rx_ring[i]->xdp_tx;
+ data[index++] = priv->rx_ring[i]->xdp_tx_full;
}
spin_unlock_bh(&priv->stats_lock);
@@ -467,7 +483,13 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
- for (i = 0; i < priv->tx_ring_num; i++) {
+ for (i = 0; i < NUM_XDP_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
@@ -480,6 +502,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_bytes", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_dropped", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_drop", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_tx", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_tx_full", i);
}
break;
case ETH_SS_PRIV_FLAGS:
@@ -1060,7 +1088,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
- tx_size == priv->tx_ring[0]->size)
+ tx_size == priv->tx_ring[TX][0]->size)
return 0;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
@@ -1105,7 +1133,7 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ?
priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
- param->tx_pending = priv->tx_ring[0]->size;
+ param->tx_pending = priv->tx_ring[TX][0]->size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
@@ -1710,7 +1738,7 @@ static void mlx4_en_get_channels(struct net_device *dev,
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
channel->rx_count = priv->rx_ring_num;
- channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
+ channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP;
}
static int mlx4_en_set_channels(struct net_device *dev,
@@ -1721,6 +1749,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int port_up = 0;
+ int xdp_count;
int err = 0;
if (channel->other_count || channel->combined_count ||
@@ -1729,20 +1758,25 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
- if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) {
- en_err(priv, "Minimum %d tx channels required with XDP on\n",
- priv->xdp_ring_num / MLX4_EN_NUM_UP + 1);
- return -EINVAL;
- }
-
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
+ xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
+ if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) {
+ err = -EINVAL;
+ en_err(priv,
+ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
+ channel->tx_count * MLX4_EN_NUM_UP + xdp_count,
+ MAX_TX_RINGS);
+ goto out;
+ }
+
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_tx_rings_p_up = channel->tx_count;
- new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX_XDP] = xdp_count;
new_prof.rx_ring_num = channel->rx_count;
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
@@ -1756,14 +1790,13 @@ static int mlx4_en_set_channels(struct net_device *dev,
mlx4_en_safe_replace_resources(priv, tmp);
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
- priv->xdp_ring_num);
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
if (dev->num_tc)
mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
- en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
+ en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
if (port_up) {
@@ -1774,8 +1807,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
err = mlx4_en_moderation_update(priv);
out:
- kfree(tmp);
mutex_unlock(&mdev->state_lock);
+ kfree(tmp);
return err;
}
@@ -1823,11 +1856,15 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
int ret = 0;
if (bf_enabled_new != bf_enabled_old) {
+ int t;
+
if (bf_enabled_new) {
bool bf_supported = true;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ bf_supported &=
+ priv->tx_ring[t][i]->bf_alloced;
if (!bf_supported) {
en_err(priv, "BlueFlame is not supported\n");
@@ -1839,8 +1876,10 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ priv->tx_ring[t][i]->bf_enabled =
+ bf_enabled_new;
en_info(priv, "BlueFlame %s\n",
bf_enabled_new ? "Enabled" : "Disabled");
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index bf7628db098a..36a7a54bbb82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -169,7 +169,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
- params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
+ params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up *
MLX4_EN_NUM_UP;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fb8bb027b69c..bcd955339058 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -51,6 +51,9 @@
#include "mlx4_en.h"
#include "en_port.h"
+#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
+ XDP_PACKET_HEADROOM))
+
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1217,8 +1220,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
struct mlx4_en_cq *cq;
int i;
- for (i = 0; i < priv->tx_ring_num; i++) {
- cq = priv->tx_cq[i];
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ cq = priv->tx_cq[TX][i];
napi_schedule(&cq->napi);
}
}
@@ -1302,12 +1305,14 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (netif_msg_timer(priv))
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
- for (i = 0; i < priv->tx_ring_num; i++) {
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
+
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue;
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
- priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
+ i, tx_ring->qpn, tx_ring->sp_cqn,
+ tx_ring->cons, tx_ring->prod);
}
priv->port_stats.tx_timeout++;
@@ -1322,6 +1327,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
+ mlx4_en_fold_software_stats(dev);
netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
@@ -1331,7 +1337,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
{
struct mlx4_en_cq *cq;
- int i;
+ int i, t;
/* If we haven't received a specific coalescing setting
* (module param), we set the moderation parameters as follows:
@@ -1356,10 +1362,12 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->last_moder_bytes[i] = 0;
}
- for (i = 0; i < priv->tx_ring_num; i++) {
- cq = priv->tx_cq[i];
- cq->moder_cnt = priv->tx_frames;
- cq->moder_time = priv->tx_usecs;
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ cq = priv->tx_cq[t][i];
+ cq->moder_cnt = priv->tx_frames;
+ cq->moder_time = priv->tx_usecs;
+ }
}
/* Reset auto-moderation params */
@@ -1390,10 +1398,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
return;
for (ring = 0; ring < priv->rx_ring_num; ring++) {
- spin_lock_bh(&priv->stats_lock);
- rx_packets = priv->rx_ring[ring]->packets;
- rx_bytes = priv->rx_ring[ring]->bytes;
- spin_unlock_bh(&priv->stats_lock);
+ rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
+ rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
rx_pkt_diff = ((unsigned long) (rx_packets -
priv->last_moder_packets[ring]));
@@ -1529,19 +1535,13 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
int tx_ring_idx)
{
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
- int rr_index;
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
+ int rr_index = tx_ring_idx;
- rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
- if (rr_index >= 0) {
- tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
- tx_ring->recycle_ring = priv->rx_ring[rr_index];
- en_dbg(DRV, priv,
- "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
- tx_ring_idx, rr_index);
- } else {
- tx_ring->recycle_ring = NULL;
- }
+ tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
+ tx_ring->recycle_ring = priv->rx_ring[rr_index];
+ en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
+ TX_XDP, tx_ring_idx, rr_index);
}
int mlx4_en_start_port(struct net_device *dev)
@@ -1551,9 +1551,8 @@ int mlx4_en_start_port(struct net_device *dev)
struct mlx4_en_cq *cq;
struct mlx4_en_tx_ring *tx_ring;
int rx_index = 0;
- int tx_index = 0;
int err = 0;
- int i;
+ int i, t;
int j;
u8 mc_list[16] = {0};
@@ -1638,43 +1637,51 @@ int mlx4_en_start_port(struct net_device *dev)
goto rss_err;
/* Configure tx cq's and rings */
- for (i = 0; i < priv->tx_ring_num; i++) {
- /* Configure cq */
- cq = priv->tx_cq[i];
- err = mlx4_en_activate_cq(priv, cq, i);
- if (err) {
- en_err(priv, "Failed allocating Tx CQ\n");
- goto tx_err;
- }
- err = mlx4_en_set_cq_moder(priv, cq);
- if (err) {
- en_err(priv, "Failed setting cq moderation parameters\n");
- mlx4_en_deactivate_cq(priv, cq);
- goto tx_err;
- }
- en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
- cq->buf->wqe_index = cpu_to_be16(0xffff);
-
- /* Configure ring */
- tx_ring = priv->tx_ring[i];
- err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
- i / priv->num_tx_rings_p_up);
- if (err) {
- en_err(priv, "Failed allocating Tx ring\n");
- mlx4_en_deactivate_cq(priv, cq);
- goto tx_err;
- }
- tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
- mlx4_en_init_recycle_ring(priv, i);
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ /* Configure cq */
+ cq = priv->tx_cq[t][i];
+ err = mlx4_en_activate_cq(priv, cq, i);
+ if (err) {
+ en_err(priv, "Failed allocating Tx CQ\n");
+ goto tx_err;
+ }
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ en_err(priv, "Failed setting cq moderation parameters\n");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ en_dbg(DRV, priv,
+ "Resetting index of collapsed CQ:%d to -1\n", i);
+ cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+ /* Configure ring */
+ tx_ring = priv->tx_ring[t][i];
+ err = mlx4_en_activate_tx_ring(priv, tx_ring,
+ cq->mcq.cqn,
+ i / num_tx_rings_p_up);
+ if (err) {
+ en_err(priv, "Failed allocating Tx ring\n");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ if (t != TX_XDP) {
+ tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ tx_ring->recycle_ring = NULL;
+ } else {
+ mlx4_en_init_recycle_ring(priv, i);
+ }
- /* Arm CQ for TX completions */
- mlx4_en_arm_cq(priv, cq);
+ /* Arm CQ for TX completions */
+ mlx4_en_arm_cq(priv, cq);
- /* Set initial ownership of all Tx TXBBs to SW (1) */
- for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
- *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
- ++tx_index;
+ /* Set initial ownership of all Tx TXBBs to SW (1) */
+ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+ *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
+ }
}
/* Configure port */
@@ -1749,9 +1756,18 @@ int mlx4_en_start_port(struct net_device *dev)
return 0;
tx_err:
- while (tx_index--) {
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
- mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
+ if (t == MLX4_EN_NUM_TX_TYPES) {
+ t--;
+ i = priv->tx_ring_num[t];
+ }
+ while (t >= 0) {
+ while (i--) {
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
+ }
+ if (!t--)
+ break;
+ i = priv->tx_ring_num[t];
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
@@ -1776,7 +1792,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp;
struct ethtool_flow_id *flow, *tmp_flow;
- int i;
+ int i, t;
u8 mc_list[16] = {0};
if (!priv->port_up) {
@@ -1796,8 +1812,12 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
netif_tx_disable(dev);
+ spin_lock_bh(&priv->stats_lock);
+ mlx4_en_fold_software_stats(dev);
/* Set port as not active */
priv->port_up = false;
+ spin_unlock_bh(&priv->stats_lock);
+
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
@@ -1862,14 +1882,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
mlx4_en_destroy_drop_qp(priv);
/* Free TX Rings */
- for (i = 0; i < priv->tx_ring_num; i++) {
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
- mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
+ }
}
msleep(10);
- for (i = 0; i < priv->tx_ring_num; i++)
- mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
mlx4_en_delete_rss_steer_rules(priv);
@@ -1918,6 +1941,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring **tx_ring;
int i;
if (!mlx4_is_slave(mdev->dev))
@@ -1935,15 +1959,16 @@ static void mlx4_en_clear_stats(struct net_device *dev)
sizeof(priv->tx_priority_flowstats));
memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_ring[i]->bytes = 0;
- priv->tx_ring[i]->packets = 0;
- priv->tx_ring[i]->tx_csum = 0;
- priv->tx_ring[i]->tx_dropped = 0;
- priv->tx_ring[i]->queue_stopped = 0;
- priv->tx_ring[i]->wake_queue = 0;
- priv->tx_ring[i]->tso_packets = 0;
- priv->tx_ring[i]->xmit_more = 0;
+ tx_ring = priv->tx_ring[TX];
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ tx_ring[i]->bytes = 0;
+ tx_ring[i]->packets = 0;
+ tx_ring[i]->tx_csum = 0;
+ tx_ring[i]->tx_dropped = 0;
+ tx_ring[i]->queue_stopped = 0;
+ tx_ring[i]->wake_queue = 0;
+ tx_ring[i]->tso_packets = 0;
+ tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -1999,17 +2024,20 @@ static int mlx4_en_close(struct net_device *dev)
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
- int i;
+ int i, t;
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap = NULL;
#endif
- for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring && priv->tx_ring[i])
- mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq && priv->tx_cq[i])
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ if (priv->tx_ring[t] && priv->tx_ring[t][i])
+ mlx4_en_destroy_tx_ring(priv,
+ &priv->tx_ring[t][i]);
+ if (priv->tx_cq[t] && priv->tx_cq[t][i])
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
+ }
}
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2025,20 +2053,22 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
- int i;
+ int i, t;
int node;
/* Create tx Rings */
- for (i = 0; i < priv->tx_ring_num; i++) {
- node = cpu_to_node(i % num_online_cpus());
- if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
- prof->tx_ring_size, i, TX, node))
- goto err;
-
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
- prof->tx_ring_size, TXBB_SIZE,
- node, i))
- goto err;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ node = cpu_to_node(i % num_online_cpus());
+ if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
+ prof->tx_ring_size, i, t, node))
+ goto err;
+
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
+ prof->tx_ring_size,
+ TXBB_SIZE, node, i))
+ goto err;
+ }
}
/* Create rx Rings */
@@ -2070,11 +2100,14 @@ err:
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
- for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring[i])
- mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq[i])
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ if (priv->tx_ring[t][i])
+ mlx4_en_destroy_tx_ring(priv,
+ &priv->tx_ring[t][i]);
+ if (priv->tx_cq[t][i])
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
+ }
}
return -ENOMEM;
}
@@ -2084,10 +2117,11 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src,
struct mlx4_en_port_profile *prof)
{
+ int t;
+
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
sizeof(dst->hwtstamp_config));
dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
- dst->tx_ring_num = prof->tx_ring_num;
dst->rx_ring_num = prof->rx_ring_num;
dst->flags = prof->flags;
dst->mdev = src->mdev;
@@ -2097,33 +2131,50 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
- dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!dst->tx_ring)
- return -ENOMEM;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ dst->tx_ring_num[t] = prof->tx_ring_num[t];
+ if (!dst->tx_ring_num[t])
+ continue;
- dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!dst->tx_cq) {
- kfree(dst->tx_ring);
- return -ENOMEM;
+ dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!dst->tx_ring[t])
+ goto err_free_tx;
+
+ dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!dst->tx_cq[t]) {
+ kfree(dst->tx_ring[t]);
+ goto err_free_tx;
+ }
}
+
return 0;
+
+err_free_tx:
+ while (t--) {
+ kfree(dst->tx_ring[t]);
+ kfree(dst->tx_cq[t]);
+ }
+ return -ENOMEM;
}
static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src)
{
+ int t;
memcpy(dst->rx_ring, src->rx_ring,
sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
memcpy(dst->rx_cq, src->rx_cq,
sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
sizeof(dst->hwtstamp_config));
- dst->tx_ring_num = src->tx_ring_num;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ dst->tx_ring_num[t] = src->tx_ring_num[t];
+ dst->tx_ring[t] = src->tx_ring[t];
+ dst->tx_cq[t] = src->tx_cq[t];
+ }
dst->rx_ring_num = src->rx_ring_num;
- dst->tx_ring = src->tx_ring;
- dst->tx_cq = src->tx_cq;
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
}
@@ -2131,14 +2182,18 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp,
struct mlx4_en_port_profile *prof)
{
+ int t;
+
mlx4_en_copy_priv(tmp, priv, prof);
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,
"%s: Resource allocation failed, using previous configuration\n",
__func__);
- kfree(tmp->tx_ring);
- kfree(tmp->tx_cq);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ kfree(tmp->tx_ring[t]);
+ kfree(tmp->tx_cq[t]);
+ }
return -ENOMEM;
}
return 0;
@@ -2155,6 +2210,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ int t;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2188,12 +2244,27 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mlx4_en_free_resources(priv);
mutex_unlock(&mdev->state_lock);
- kfree(priv->tx_ring);
- kfree(priv->tx_cq);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ kfree(priv->tx_ring[t]);
+ kfree(priv->tx_cq[t]);
+ }
free_netdev(dev);
}
+static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (mtu > MLX4_EN_MAX_XDP_MTU) {
+ en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
+ mtu, MLX4_EN_MAX_XDP_MTU);
+ return false;
+ }
+
+ return true;
+}
+
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -2203,15 +2274,10 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
dev->mtu, new_mtu);
- if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
- en_err(priv, "Bad MTU size:%d.\n", new_mtu);
- return -EPERM;
- }
- if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
- en_err(priv, "MTU size:%d requires frags but XDP running\n",
- new_mtu);
- return -EOPNOTSUPP;
- }
+ if (priv->tx_ring_num[TX_XDP] &&
+ !mlx4_en_check_xdp_mtu(dev, new_mtu))
+ return -ENOTSUPP;
+
dev->mtu = new_mtu;
if (netif_running(dev)) {
@@ -2598,7 +2664,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
struct mlx4_update_qp_params params;
int err;
@@ -2626,18 +2692,21 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
struct bpf_prog *old_prog;
+ struct mlx4_en_priv *tmp;
+ int tx_changed = 0;
int xdp_ring_num;
int port_up = 0;
int err;
int i;
- xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
+ xdp_ring_num = prog ? priv->rx_ring_num : 0;
/* No need to reconfigure buffers when simply swapping the
* program for a new one.
*/
- if (priv->xdp_ring_num == xdp_ring_num) {
+ if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
if (prog) {
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
if (IS_ERR(prog))
@@ -2656,33 +2725,47 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
return 0;
}
- if (priv->num_frags > 1) {
- en_err(priv, "Cannot set XDP if MTU requires multiple frags\n");
+ if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
return -EOPNOTSUPP;
- }
- if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
- en_err(priv,
- "Minimum %d tx channels required to run XDP\n",
- (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
- return -EINVAL;
- }
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
if (prog) {
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto out;
+ }
}
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
+
+ if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
+ tx_changed = 1;
+ new_prof.tx_ring_num[TX] =
+ MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
+ en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
+ }
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err) {
+ if (prog)
+ bpf_prog_sub(prog, priv->rx_ring_num - 1);
+ goto unlock_out;
+ }
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- priv->xdp_ring_num = xdp_ring_num;
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
- priv->xdp_ring_num);
+ mlx4_en_safe_replace_resources(priv, tmp);
+ if (tx_changed)
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
for (i = 0; i < priv->rx_ring_num; i++) {
old_prog = rcu_dereference_protected(
@@ -2702,15 +2785,18 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
}
}
+unlock_out:
mutex_unlock(&mdev->state_lock);
- return 0;
+out:
+ kfree(tmp);
+ return err;
}
static bool mlx4_xdp_attached(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return !!priv->xdp_ring_num;
+ return !!priv->tx_ring_num[TX_XDP];
}
static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -3047,6 +3133,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
if (!mlx4_is_slave(dev))
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
+ last_i += NUM_PKT_STATS;
+
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
+ last_i += NUM_XDP_STATS;
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -3054,7 +3144,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
{
struct net_device *dev;
struct mlx4_en_priv *priv;
- int i;
+ int i, t;
int err;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
@@ -3062,7 +3152,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (dev == NULL)
return -ENOMEM;
- netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
+ netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
@@ -3099,21 +3189,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
- priv->tx_ring_num = prof->tx_ring_num;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!priv->tx_ring) {
- err = -ENOMEM;
- goto out;
- }
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!priv->tx_cq) {
- err = -ENOMEM;
- goto out;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ priv->tx_ring_num[t] = prof->tx_ring_num[t];
+ if (!priv->tx_ring_num[t])
+ continue;
+
+ priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!priv->tx_ring[t]) {
+ err = -ENOMEM;
+ goto err_free_tx;
+ }
+ priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!priv->tx_cq[t]) {
+ kfree(priv->tx_ring[t]);
+ err = -ENOMEM;
+ goto out;
+ }
}
priv->rx_ring_num = prof->rx_ring_num;
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
@@ -3196,7 +3292,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
else
dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
dev->ethtool_ops = &mlx4_en_ethtool_ops;
@@ -3286,13 +3382,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ /* MTU range: 46 - hw-specific max */
+ dev->min_mtu = MLX4_EN_MIN_MTU;
+ dev->max_mtu = priv->max_mtu;
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
- en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+ en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
@@ -3352,6 +3452,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
return 0;
+err_free_tx:
+ while (t--) {
+ kfree(priv->tx_ring[t]);
+ kfree(priv->tx_cq[t]);
+ }
out:
mlx4_en_destroy_netdev(dev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 59473a0ebcdf..9166d90e7328 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -147,6 +147,39 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
return ret;
}
+void mlx4_en_fold_software_stats(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ unsigned long packets, bytes;
+ int i;
+
+ if (!priv->port_up || mlx4_is_master(mdev->dev))
+ return;
+
+ packets = 0;
+ bytes = 0;
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
+
+ packets += READ_ONCE(ring->packets);
+ bytes += READ_ONCE(ring->bytes);
+ }
+ dev->stats.rx_packets = packets;
+ dev->stats.rx_bytes = bytes;
+
+ packets = 0;
+ bytes = 0;
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
+
+ packets += READ_ONCE(ring->packets);
+ bytes += READ_ONCE(ring->bytes);
+ }
+ dev->stats.tx_packets = packets;
+ dev->stats.tx_bytes = bytes;
+}
+
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_counter tmp_counter_stats;
@@ -159,6 +192,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
+ unsigned long sw_tx_dropped = 0;
unsigned long sw_rx_dropped = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
@@ -174,40 +208,42 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
- stats->rx_packets = 0;
- stats->rx_bytes = 0;
+ mlx4_en_fold_software_stats(dev);
+
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
priv->port_stats.rx_chksum_complete = 0;
+ priv->xdp_stats.rx_xdp_drop = 0;
+ priv->xdp_stats.rx_xdp_tx = 0;
+ priv->xdp_stats.rx_xdp_tx_full = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- stats->rx_packets += priv->rx_ring[i]->packets;
- stats->rx_bytes += priv->rx_ring[i]->bytes;
- sw_rx_dropped += priv->rx_ring[i]->dropped;
- priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
- priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
- priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
+ const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
+
+ sw_rx_dropped += READ_ONCE(ring->dropped);
+ priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
+ priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
+ priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
+ priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
+ priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
+ priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
}
- stats->tx_packets = 0;
- stats->tx_bytes = 0;
- stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
priv->port_stats.tso_packets = 0;
priv->port_stats.xmit_more = 0;
- for (i = 0; i < priv->tx_ring_num; i++) {
- const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];
-
- stats->tx_packets += ring->packets;
- stats->tx_bytes += ring->bytes;
- stats->tx_dropped += ring->tx_dropped;
- priv->port_stats.tx_chksum_offload += ring->tx_csum;
- priv->port_stats.queue_stopped += ring->queue_stopped;
- priv->port_stats.wake_queue += ring->wake_queue;
- priv->port_stats.tso_packets += ring->tso_packets;
- priv->port_stats.xmit_more += ring->xmit_more;
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
+
+ sw_tx_dropped += READ_ONCE(ring->tx_dropped);
+ priv->port_stats.tx_chksum_offload += READ_ONCE(ring->tx_csum);
+ priv->port_stats.queue_stopped += READ_ONCE(ring->queue_stopped);
+ priv->port_stats.wake_queue += READ_ONCE(ring->wake_queue);
+ priv->port_stats.tso_packets += READ_ONCE(ring->tso_packets);
+ priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more);
}
+
if (mlx4_is_master(mdev->dev)) {
stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
&mlx4_en_stats->RTOT_prio_1,
@@ -245,7 +281,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) +
+ sw_tx_dropped;
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f2e8beddcf44..3c37e216bbf3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -96,7 +96,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
const struct mlx4_en_frag_info *frag_info;
struct page *page;
- dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++) {
@@ -115,9 +114,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++) {
frags[i] = ring_alloc[i];
- dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
+ frags[i].page_offset += priv->frag_info[i].rx_headroom;
+ rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
+ frags[i].page_offset);
ring_alloc[i] = page_alloc[i];
- rx_desc->data[i].addr = cpu_to_be64(dma);
}
return 0;
@@ -250,7 +250,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
if (ring->page_cache.index > 0) {
frags[0] = ring->page_cache.buf[--ring->page_cache.index];
- rx_desc->data[0].addr = cpu_to_be64(frags[0].dma);
+ rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
+ frags[0].page_offset);
return 0;
}
@@ -688,18 +689,23 @@ out_loopback:
dev_kfree_skb_any(skb);
}
-static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
{
- int index = ring->prod & ring->size_mask;
+ u32 missing = ring->actual_size - (ring->prod - ring->cons);
- while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- if (mlx4_en_prepare_rx_desc(priv, ring, index,
+ /* Try to batch allocations, but not too much. */
+ if (missing < 8)
+ return false;
+ do {
+ if (mlx4_en_prepare_rx_desc(priv, ring,
+ ring->prod & ring->size_mask,
GFP_ATOMIC | __GFP_COLD))
break;
ring->prod++;
- index = ring->prod & ring->size_mask;
- }
+ } while (--missing);
+
+ return true;
}
/* When hardware doesn't strip the vlan, we need to calculate the checksum
@@ -788,7 +794,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct bpf_prog *xdp_prog;
int doorbell_pending;
struct sk_buff *skb;
- int tx_index;
int index;
int nr;
unsigned int length;
@@ -808,7 +813,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
doorbell_pending = 0;
- tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
@@ -877,8 +881,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
- ring->bytes += length;
- ring->packets++;
l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
@@ -888,6 +890,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (xdp_prog) {
struct xdp_buff xdp;
dma_addr_t dma;
+ void *orig_data;
u32 act;
dma = be64_to_cpu(rx_desc->data[0].addr);
@@ -895,31 +898,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data = page_address(frags[0].page) +
- frags[0].page_offset;
+ xdp.data_hard_start = page_address(frags[0].page);
+ xdp.data = xdp.data_hard_start + frags[0].page_offset;
xdp.data_end = xdp.data + length;
+ orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ if (xdp.data != orig_data) {
+ length = xdp.data_end - xdp.data;
+ frags[0].page_offset = xdp.data -
+ xdp.data_hard_start;
+ }
+
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- if (likely(!mlx4_en_xmit_frame(frags, dev,
- length, tx_index,
+ if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
+ length, cq->ring,
&doorbell_pending)))
goto consumed;
- goto xdp_drop; /* Drop on xmit failure */
+ goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
case XDP_DROP:
-xdp_drop:
+ ring->xdp_drop++;
+xdp_drop_no_cnt:
if (likely(mlx4_en_rx_recycle(ring, frags)))
goto consumed;
goto next;
}
}
+ ring->bytes += length;
+ ring->packets++;
+
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
@@ -1081,15 +1096,20 @@ consumed:
out:
rcu_read_unlock();
- if (doorbell_pending)
- mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
+ if (polled) {
+ if (doorbell_pending)
+ mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq->ring]);
+
+ mlx4_cq_set_ci(&cq->mcq);
+ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+ ring->cons = cq->mcq.cons_index;
+ }
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
- mlx4_cq_set_ci(&cq->mcq);
- wmb(); /* ensure HW sees CQ consumer before we post new buffers */
- ring->cons = cq->mcq.cons_index;
- mlx4_en_refill_rx_buffers(priv, ring);
- mlx4_en_update_rx_prod_db(ring);
+
+ if (mlx4_en_refill_rx_buffers(priv, ring))
+ mlx4_en_update_rx_prod_db(ring);
+
return polled;
}
@@ -1131,14 +1151,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
return budget;
/* Current cpu is not according to smp_irq_affinity -
- * probably affinity changed. need to stop this NAPI
- * poll, and restart it on the right CPU
+ * probably affinity changed. Need to stop this NAPI
+ * poll, and restart it on the right CPU.
+ * Try to avoid returning a too small value (like 0),
+ * to not fool net_rx_action() and its netdev_budget
*/
- done = 0;
+ if (done)
+ done--;
}
/* Done for now */
- napi_complete_done(napi, done);
- mlx4_en_arm_cq(priv, cq);
+ if (napi_complete_done(napi, done))
+ mlx4_en_arm_cq(priv, cq);
return done;
}
@@ -1151,37 +1174,41 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
- enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE;
struct mlx4_en_priv *priv = netdev_priv(dev);
int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
- int order = MLX4_EN_ALLOC_PREFER_ORDER;
- u32 align = SMP_CACHE_BYTES;
- int buf_size = 0;
int i = 0;
/* bpf requires buffers to be set up as 1 packet per page.
* This only works when num_frags == 1.
*/
- if (priv->xdp_ring_num) {
- dma_dir = PCI_DMA_BIDIRECTIONAL;
- /* This will gain efficient xdp frame recycling at the expense
- * of more costly truesize accounting
+ if (priv->tx_ring_num[TX_XDP]) {
+ priv->frag_info[0].order = 0;
+ priv->frag_info[0].frag_size = eff_mtu;
+ priv->frag_info[0].frag_prefix_size = 0;
+ /* This will gain efficient xdp frame recycling at the
+ * expense of more costly truesize accounting
*/
- align = PAGE_SIZE;
- order = 0;
- }
-
- while (buf_size < eff_mtu) {
- priv->frag_info[i].order = order;
- priv->frag_info[i].frag_size =
- (eff_mtu > buf_size + frag_sizes[i]) ?
- frag_sizes[i] : eff_mtu - buf_size;
- priv->frag_info[i].frag_prefix_size = buf_size;
- priv->frag_info[i].frag_stride =
- ALIGN(priv->frag_info[i].frag_size, align);
- priv->frag_info[i].dma_dir = dma_dir;
- buf_size += priv->frag_info[i].frag_size;
- i++;
+ priv->frag_info[0].frag_stride = PAGE_SIZE;
+ priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL;
+ priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM;
+ i = 1;
+ } else {
+ int buf_size = 0;
+
+ while (buf_size < eff_mtu) {
+ priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER;
+ priv->frag_info[i].frag_size =
+ (eff_mtu > buf_size + frag_sizes[i]) ?
+ frag_sizes[i] : eff_mtu - buf_size;
+ priv->frag_info[i].frag_prefix_size = buf_size;
+ priv->frag_info[i].frag_stride =
+ ALIGN(priv->frag_info[i].frag_size,
+ SMP_CACHE_BYTES);
+ priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE;
+ priv->frag_info[i].rx_headroom = 0;
+ buf_size += priv->frag_info[i].frag_size;
+ i++;
+ }
}
priv->num_frags = i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index c06346a82496..95290e1fc9fe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -68,7 +68,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
for (i = 0; i < packet_size; ++i) /* fill our packet */
packet[i] = (unsigned char)(i & 0xff);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e2509bba3e7c..5886ad78058f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -66,7 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
- ring->stride = stride;
+ ring->sp_stride = stride;
ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
@@ -90,22 +90,22 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
goto err_info;
}
}
- ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+ ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE);
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
- ring->buf = ring->wqres.buf.direct.buf;
+ ring->buf = ring->sp_wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
ring, ring->buf, ring->size, ring->buf_size,
- (unsigned long long) ring->wqres.buf.direct.map);
+ (unsigned long long) ring->sp_wqres.buf.direct.map);
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
MLX4_RESERVE_ETH_BF_QP);
@@ -114,12 +114,12 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
goto err_hwq_res;
}
- err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
- ring->qp.event = mlx4_en_sqp_event;
+ ring->sp_qp.event = mlx4_en_sqp_event;
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
@@ -141,7 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
if (queue_index < priv->num_tx_rings_p_up)
cpumask_set_cpu(cpumask_local_spread(queue_index,
priv->mdev->dev->numa_node),
- &ring->affinity_mask);
+ &ring->sp_affinity_mask);
*pring = ring;
return 0;
@@ -149,7 +149,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_hwq_res:
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
@@ -171,10 +171,10 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
if (ring->bf_alloced)
mlx4_bf_free(mdev->dev, &ring->bf);
- mlx4_qp_remove(mdev->dev, &ring->qp);
- mlx4_qp_free(mdev->dev, &ring->qp);
+ mlx4_qp_remove(mdev->dev, &ring->sp_qp);
+ mlx4_qp_free(mdev->dev, &ring->sp_qp);
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
kvfree(ring->tx_info);
@@ -190,7 +190,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- ring->cqn = cq;
+ ring->sp_cqn = cq;
ring->prod = 0;
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
@@ -198,21 +198,21 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
memset(ring->buf, 0, ring->buf_size);
ring->free_tx_desc = mlx4_en_free_tx_desc;
- ring->qp_state = MLX4_QP_STATE_RST;
- ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8);
+ ring->sp_qp_state = MLX4_QP_STATE_RST;
+ ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8);
ring->mr_key = cpu_to_be32(mdev->mr.key);
- mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
- ring->cqn, user_prio, &ring->context);
+ mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn,
+ ring->sp_cqn, user_prio, &ring->sp_context);
if (ring->bf_alloced)
- ring->context.usr_page =
+ ring->sp_context.usr_page =
cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
ring->bf.uar->index));
- err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
- &ring->qp, &ring->qp_state);
- if (!cpumask_empty(&ring->affinity_mask))
- netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context,
+ &ring->sp_qp, &ring->sp_qp_state);
+ if (!cpumask_empty(&ring->sp_affinity_mask))
+ netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask,
ring->queue_index);
return err;
@@ -223,8 +223,8 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
- mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
- MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+ mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp);
}
static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
@@ -354,7 +354,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc frame = {
.page = tx_info->page,
.dma = tx_info->map0_dma,
- .page_offset = 0,
+ .page_offset = XDP_PACKET_HEADROOM,
.page_size = PAGE_SIZE,
};
@@ -392,7 +392,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
cnt++;
}
- netdev_tx_reset_queue(ring->tx_queue);
+ if (ring->tx_queue)
+ netdev_tx_reset_queue(ring->tx_queue);
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
@@ -405,7 +406,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
- struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
+ struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
struct mlx4_cqe *cqe;
u16 index;
u16 new_index, ring_index, stamp_index;
@@ -807,7 +808,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
- ring = priv->tx_ring[tx_ind];
+ ring = priv->tx_ring[TX][tx_ind];
if (!priv->port_up)
goto tx_drop;
@@ -1078,7 +1079,8 @@ tx_drop:
return NETDEV_TX_OK;
}
-netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
+ struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
int tx_ind, int *doorbell_pending)
{
@@ -1101,7 +1103,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE,
"mlx4_en_xmit_frame requires minimum size tx desc");
- ring = priv->tx_ring[tx_ind];
+ ring = priv->tx_ring[TX_XDP][tx_ind];
if (!priv->port_up)
goto tx_drop;
@@ -1130,7 +1132,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
tx_info->page = frame->page;
frame->page = NULL;
tx_info->map0_dma = dma;
- tx_info->map0_byte_count = length;
+ tx_info->map0_byte_count = PAGE_SIZE;
tx_info->nr_txbb = nr_txbb;
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
tx_info->data_offset = (void *)data - (void *)tx_desc;
@@ -1139,9 +1141,10 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
tx_info->linear = 1;
tx_info->inl = 0;
- dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE);
+ dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
+ length, PCI_DMA_TODEVICE);
- data->addr = cpu_to_be64(dma);
+ data->addr = cpu_to_be64(dma + frame->page_offset);
data->lkey = ring->mr_key;
dma_wmb();
data->byte_count = cpu_to_be32(length);
@@ -1153,8 +1156,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- ring->packets++;
- ring->bytes += tx_info->nr_bytes;
+ rx_ring->xdp_tx++;
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
ring->prod += nr_txbb;
@@ -1178,7 +1180,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
return NETDEV_TX_OK;
tx_drop_count:
- ring->tx_dropped++;
+ rx_ring->xdp_tx_full++;
tx_drop:
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75d07fa9d0b1..b2ca8a635b2e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4020,49 +4020,51 @@ int mlx4_restart_one(struct pci_dev *pdev)
return err;
}
+#define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
+#define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
+#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
+
static const struct pci_device_id mlx4_pci_table[] = {
- /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
+ /* MT25408 "Hermon" */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */
+ /* MT25458 ConnectX EN 10GBASE-T */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */
+ /* MT26468 ConnectX EN 10GigE PCIe Gen2*/
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
+ /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
+ /* MT26478 ConnectX2 40GigE PCIe Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
+ /* MT25400 Family [ConnectX-2] */
+ MLX_VF(0x1002), /* Virtual Function */
/* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
- /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
- { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
+ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
+ MLX_VF(0x1004), /* Virtual Function */
+ MLX_GN(0x1005), /* MT27510 Family */
+ MLX_GN(0x1006), /* MT27511 Family */
+ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */
+ MLX_GN(0x1008), /* MT27521 Family */
+ MLX_GN(0x1009), /* MT27530 Family */
+ MLX_GN(0x100a), /* MT27531 Family */
+ MLX_GN(0x100b), /* MT27540 Family */
+ MLX_GN(0x100c), /* MT27541 Family */
+ MLX_GN(0x100d), /* MT27550 Family */
+ MLX_GN(0x100e), /* MT27551 Family */
+ MLX_GN(0x100f), /* MT27560 Family */
+ MLX_GN(0x1010), /* MT27561 Family */
+
+ /*
+ * See the mellanox_check_broken_intx_masking() quirk when
+ * adding devices
+ */
+
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index a3528dd1e72e..ba1c6cd0cc79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -207,8 +207,11 @@ enum {
*/
enum cq_type {
- RX = 0,
- TX = 1,
+ /* keep tx types first */
+ TX,
+ TX_XDP,
+#define MLX4_EN_NUM_TX_TYPES (TX_XDP + 1)
+ RX,
};
@@ -278,46 +281,50 @@ struct mlx4_en_tx_ring {
u32 last_nr_txbb;
u32 cons;
unsigned long wake_queue;
+ struct netdev_queue *tx_queue;
+ u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner,
+ u64 timestamp, int napi_mode);
+ struct mlx4_en_rx_ring *recycle_ring;
/* cache line used and dirtied in mlx4_en_xmit() */
u32 prod ____cacheline_aligned_in_smp;
+ unsigned int tx_dropped;
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
- unsigned int tx_dropped;
struct mlx4_bf bf;
- unsigned long queue_stopped;
/* Following part should be mostly read */
- cpumask_t affinity_mask;
- struct mlx4_qp qp;
- struct mlx4_hwq_resources wqres;
+ __be32 doorbell_qpn;
+ __be32 mr_key;
u32 size; /* number of TXBBs */
u32 size_mask;
- u16 stride;
u32 full_size;
- u16 cqn; /* index of port CQ associated with this ring */
u32 buf_size;
- __be32 doorbell_qpn;
- __be32 mr_key;
void *buf;
struct mlx4_en_tx_info *tx_info;
- struct mlx4_en_rx_ring *recycle_ring;
- u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring,
- int index, u8 owner,
- u64 timestamp, int napi_mode);
- u8 *bounce_buf;
- struct mlx4_qp_context context;
int qpn;
- enum mlx4_qp_state qp_state;
u8 queue_index;
bool bf_enabled;
bool bf_alloced;
- struct netdev_queue *tx_queue;
- int hwtstamp_tx_type;
+ u8 hwtstamp_tx_type;
+ u8 *bounce_buf;
+
+ /* Not used in fast path
+ * Only queue_stopped might be used if BQL is not properly working.
+ */
+ unsigned long queue_stopped;
+ struct mlx4_hwq_resources sp_wqres;
+ struct mlx4_qp sp_qp;
+ struct mlx4_qp_context sp_context;
+ cpumask_t sp_affinity_mask;
+ enum mlx4_qp_state sp_qp_state;
+ u16 sp_stride;
+ u16 sp_cqn; /* index of port CQ associated with this ring */
} ____cacheline_aligned_in_smp;
struct mlx4_en_rx_desc {
@@ -347,6 +354,9 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long xdp_drop;
+ unsigned long xdp_tx;
+ unsigned long xdp_tx_full;
unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
@@ -361,7 +371,7 @@ struct mlx4_en_cq {
int size;
int buf_size;
int vector;
- enum cq_type is_tx;
+ enum cq_type type;
u16 moder_time;
u16 moder_cnt;
struct mlx4_cqe *buf;
@@ -372,7 +382,7 @@ struct mlx4_en_cq {
struct mlx4_en_port_profile {
u32 flags;
- u32 tx_ring_num;
+ u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES];
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
@@ -465,7 +475,8 @@ struct mlx4_en_frag_info {
u16 frag_prefix_size;
u32 frag_stride;
enum dma_data_direction dma_dir;
- int order;
+ u16 order;
+ u16 rx_headroom;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -569,17 +580,16 @@ struct mlx4_en_priv {
u32 flags;
u8 num_tx_rings_p_up;
u32 tx_work_limit;
- u32 tx_ring_num;
+ u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES];
u32 rx_ring_num;
u32 rx_skb_size;
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
u16 num_frags;
u16 log_rx_info;
- int xdp_ring_num;
- struct mlx4_en_tx_ring **tx_ring;
+ struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
- struct mlx4_en_cq **tx_cq;
+ struct mlx4_en_cq **tx_cq[MLX4_EN_NUM_TX_TYPES];
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
@@ -597,6 +607,7 @@ struct mlx4_en_priv {
struct mlx4_en_flow_stats_rx rx_flowstats;
struct mlx4_en_flow_stats_tx tx_flowstats;
struct mlx4_en_port_stats port_stats;
+ struct mlx4_en_xdp_stats xdp_stats;
struct mlx4_en_stats_bitmap stats_bitmap;
struct list_head mc_list;
struct list_head curr_list;
@@ -685,7 +696,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
-netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
+ struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
int tx_ind, int *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
@@ -744,6 +756,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
+void mlx4_en_fold_software_stats(struct net_device *dev);
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 7fd466c0b929..48641cb0367f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -55,6 +55,13 @@ struct mlx4_en_perf_stats {
#define NUM_PERF_COUNTERS 6
};
+struct mlx4_en_xdp_stats {
+ unsigned long rx_xdp_drop;
+ unsigned long rx_xdp_tx;
+ unsigned long rx_xdp_tx_full;
+#define NUM_XDP_STATS 3
+};
+
#define NUM_MAIN_STATS 21
#define MLX4_NUM_PRIORITIES 8
@@ -107,7 +114,8 @@ enum {
};
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
- NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)
+ NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \
+ NUM_XDP_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 521cfdb7d11e..ddb4ca4ff930 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,7 +14,7 @@ config MLX5_CORE
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 0343725d7f44..9f43beb86250 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -8,6 +8,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
- en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o
+ en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 2c6e3c7b7417..66bd213f35ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -106,6 +106,63 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node)
+{
+ int i;
+
+ buf->size = size;
+ buf->npages = 1 << get_order(size);
+ buf->page_shift = PAGE_SHIFT;
+ buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
+ GFP_KERNEL);
+ if (!buf->frags)
+ goto err_out;
+
+ for (i = 0; i < buf->npages; i++) {
+ struct mlx5_buf_list *frag = &buf->frags[i];
+ int frag_sz = min_t(int, size, PAGE_SIZE);
+
+ frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
+ &frag->map, node);
+ if (!frag->buf)
+ goto err_free_buf;
+ if (frag->map & ((1 << buf->page_shift) - 1)) {
+ dma_free_coherent(&dev->pdev->dev, frag_sz,
+ buf->frags[i].buf, buf->frags[i].map);
+ mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
+ &frag->map, buf->page_shift);
+ goto err_free_buf;
+ }
+ size -= frag_sz;
+ }
+
+ return 0;
+
+err_free_buf:
+ while (i--)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
+ buf->frags[i].map);
+ kfree(buf->frags);
+err_out:
+ return -ENOMEM;
+}
+
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
+{
+ int size = buf->size;
+ int i;
+
+ for (i = 0; i < buf->npages; i++) {
+ int frag_sz = min_t(int, size, PAGE_SIZE);
+
+ dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
+ buf->frags[i].map);
+ size -= frag_sz;
+ }
+ kfree(buf->frags);
+}
+
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
@@ -230,3 +287,12 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
}
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
+
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
+{
+ int i;
+
+ for (i = 0; i < buf->npages; i++)
+ pas[i] = cpu_to_be64(buf->frags[i].map);
+}
+EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index bfe410e8a469..3797cc7c1288 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -54,14 +54,6 @@ enum {
};
enum {
- NUM_LONG_LISTS = 2,
- NUM_MED_LISTS = 64,
- LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
- MLX5_CMD_DATA_BLOCK_SIZE,
- MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
-};
-
-enum {
MLX5_CMD_DELIVERY_STAT_OK = 0x0,
MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
@@ -313,6 +305,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -414,11 +408,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
-
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -575,6 +572,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
+ MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
+ MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
default: return "unknown command opcode";
}
}
@@ -1058,14 +1061,13 @@ static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
if (!mailbox)
return ERR_PTR(-ENOMEM);
- mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
- &mailbox->dma);
+ mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags,
+ &mailbox->dma);
if (!mailbox->buf) {
mlx5_core_dbg(dev, "failed allocation\n");
kfree(mailbox);
return ERR_PTR(-ENOMEM);
}
- memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
mailbox->next = NULL;
return mailbox;
@@ -1356,10 +1358,10 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
{
unsigned long flags;
- if (msg->cache) {
- spin_lock_irqsave(&msg->cache->lock, flags);
- list_add_tail(&msg->list, &msg->cache->head);
- spin_unlock_irqrestore(&msg->cache->lock, flags);
+ if (msg->parent) {
+ spin_lock_irqsave(&msg->parent->lock, flags);
+ list_add_tail(&msg->list, &msg->parent->head);
+ spin_unlock_irqrestore(&msg->parent->lock, flags);
} else {
mlx5_free_cmd_msg(dev, msg);
}
@@ -1456,30 +1458,37 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
gfp_t gfp)
{
struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
+ struct cmd_msg_cache *ch = NULL;
struct mlx5_cmd *cmd = &dev->cmd;
- struct cache_ent *ent = NULL;
-
- if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
- ent = &cmd->cache.large;
- else if (in_size > 16 && in_size <= MED_LIST_SIZE)
- ent = &cmd->cache.med;
-
- if (ent) {
- spin_lock_irq(&ent->lock);
- if (!list_empty(&ent->head)) {
- msg = list_entry(ent->head.next, typeof(*msg), list);
- /* For cached lists, we must explicitly state what is
- * the real size
- */
- msg->len = in_size;
- list_del(&msg->list);
+ int i;
+
+ if (in_size <= 16)
+ goto cache_miss;
+
+ for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+ ch = &cmd->cache[i];
+ if (in_size > ch->max_inbox_size)
+ continue;
+ spin_lock_irq(&ch->lock);
+ if (list_empty(&ch->head)) {
+ spin_unlock_irq(&ch->lock);
+ continue;
}
- spin_unlock_irq(&ent->lock);
+ msg = list_entry(ch->head.next, typeof(*msg), list);
+ /* For cached lists, we must explicitly state what is
+ * the real size
+ */
+ msg->len = in_size;
+ list_del(&msg->list);
+ spin_unlock_irq(&ch->lock);
+ break;
}
- if (IS_ERR(msg))
- msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
+ if (!IS_ERR(msg))
+ return msg;
+cache_miss:
+ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg;
}
@@ -1577,58 +1586,56 @@ EXPORT_SYMBOL(mlx5_cmd_exec_cb);
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd *cmd = &dev->cmd;
+ struct cmd_msg_cache *ch;
struct mlx5_cmd_msg *msg;
struct mlx5_cmd_msg *n;
+ int i;
- list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
- list_del(&msg->list);
- mlx5_free_cmd_msg(dev, msg);
- }
-
- list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
- list_del(&msg->list);
- mlx5_free_cmd_msg(dev, msg);
+ for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+ ch = &dev->cmd.cache[i];
+ list_for_each_entry_safe(msg, n, &ch->head, list) {
+ list_del(&msg->list);
+ mlx5_free_cmd_msg(dev, msg);
+ }
}
}
-static int create_msg_cache(struct mlx5_core_dev *dev)
+static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
+ 512, 32, 16, 8, 2
+};
+
+static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
+ 16 + MLX5_CMD_DATA_BLOCK_SIZE,
+ 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
+ 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
+ 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
+ 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
+};
+
+static void create_msg_cache(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
+ struct cmd_msg_cache *ch;
struct mlx5_cmd_msg *msg;
- int err;
int i;
-
- spin_lock_init(&cmd->cache.large.lock);
- INIT_LIST_HEAD(&cmd->cache.large.head);
- spin_lock_init(&cmd->cache.med.lock);
- INIT_LIST_HEAD(&cmd->cache.med.head);
-
- for (i = 0; i < NUM_LONG_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
- if (IS_ERR(msg)) {
- err = PTR_ERR(msg);
- goto ex_err;
- }
- msg->cache = &cmd->cache.large;
- list_add_tail(&msg->list, &cmd->cache.large.head);
- }
-
- for (i = 0; i < NUM_MED_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
- if (IS_ERR(msg)) {
- err = PTR_ERR(msg);
- goto ex_err;
+ int k;
+
+ /* Initialize and fill the caches with initial entries */
+ for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
+ ch = &cmd->cache[k];
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->head);
+ ch->num_ent = cmd_cache_num_ent[k];
+ ch->max_inbox_size = cmd_cache_ent_size[k];
+ for (i = 0; i < ch->num_ent; i++) {
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
+ ch->max_inbox_size, 0);
+ if (IS_ERR(msg))
+ break;
+ msg->parent = ch;
+ list_add_tail(&msg->list, &ch->head);
}
- msg->cache = &cmd->cache.med;
- list_add_tail(&msg->list, &cmd->cache.med.head);
}
-
- return 0;
-
-ex_err:
- destroy_msg_cache(dev);
- return err;
}
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
@@ -1751,11 +1758,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->mode = CMD_MODE_POLLING;
- err = create_msg_cache(dev);
- if (err) {
- dev_err(&dev->pdev->dev, "failed to create command cache\n");
- goto err_free_page;
- }
+ create_msg_cache(dev);
set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 71382df59fc0..951dbd58594d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -77,9 +77,9 @@
MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
-#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
- (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
-#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
+#define MLX5E_REQUIRED_MTTS(wqes) \
+ (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
@@ -150,12 +150,6 @@ static inline int mlx5_max_log_rq_size(int wq_type)
}
}
-enum {
- MLX5E_INLINE_MODE_L2,
- MLX5E_INLINE_MODE_VPORT_CONTEXT,
- MLX5_INLINE_MODE_NOT_REQUIRED,
-};
-
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -173,22 +167,28 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_data_seg data;
};
+extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
+
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
+ "rx_cqe_compress",
};
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
+ MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
};
-#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \
- do { \
- if (enable) \
- priv->pflags |= pflag; \
- else \
- priv->pflags &= ~pflag; \
+#define MLX5E_SET_PFLAG(priv, pflag, enable) \
+ do { \
+ if (enable) \
+ (priv)->params.pflags |= (pflag); \
+ else \
+ (priv)->params.pflags &= ~(pflag); \
} while (0)
+#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
@@ -207,8 +207,7 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
u8 rx_cq_period_mode;
- bool rx_cqe_compress_admin;
- bool rx_cqe_compress;
+ bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
u16 min_rx_wqes;
@@ -220,12 +219,34 @@ struct mlx5e_params {
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- struct ieee_ets ets;
-#endif
bool rx_am_enabled;
u32 lro_timeout;
+ u32 pflags;
+};
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+struct mlx5e_cee_config {
+ /* bw pct for priority group */
+ u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
+ u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
+ bool pfc_setting[CEE_DCBX_MAX_PRIO];
+ bool pfc_enable;
+};
+
+enum {
+ MLX5_DCB_CHG_RESET,
+ MLX5_DCB_NO_CHG,
+ MLX5_DCB_CHG_NO_RESET,
+};
+
+struct mlx5e_dcbx {
+ enum mlx5_dcbx_oper_mode mode;
+ struct mlx5e_cee_config cee_cfg; /* pending configuration */
+
+ /* The only setting that cannot be read from FW */
+ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
};
+#endif
struct mlx5e_tstamp {
rwlock_t lock;
@@ -265,7 +286,7 @@ struct mlx5e_cq {
u16 decmprs_wqe_counter;
/* control */
- struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_frag_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
struct mlx5e_rq;
@@ -326,7 +347,6 @@ struct mlx5e_rq {
struct {
struct mlx5e_mpw_info *info;
void *mtt_no_align;
- u32 mtt_offset;
} mpwqe;
};
struct {
@@ -361,6 +381,7 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
+ struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp;
struct mlx5e_umr_dma_info {
@@ -524,7 +545,7 @@ struct mlx5e_vxlan_db {
struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
};
struct mlx5e_flow_table {
@@ -545,10 +566,10 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
- struct mlx5_flow_rule *untagged_rule;
- struct mlx5_flow_rule *any_vlan_rule;
- bool filter_disabled;
+ struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *untagged_rule;
+ struct mlx5_flow_handle *any_vlan_rule;
+ bool filter_disabled;
};
struct mlx5e_l2_table {
@@ -566,14 +587,14 @@ struct mlx5e_l2_table {
/* L3/L4 traffic type classifier */
struct mlx5e_ttc_table {
struct mlx5e_flow_table ft;
- struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
+ struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
};
#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
- struct mlx5_flow_rule *default_rule;
+ struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
@@ -668,7 +689,6 @@ struct mlx5e_priv {
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
- struct mlx5_core_mkey umr_mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
@@ -688,12 +708,15 @@ struct mlx5e_priv {
struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
- u32 pflags;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_tstamp tstamp;
u16 q_counter;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ struct mlx5e_dcbx dcbx;
+#endif
+
const struct mlx5e_profile *profile;
void *ppriv;
};
@@ -735,6 +758,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+int mlx5e_self_test_num(struct mlx5e_priv *priv);
+void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf);
int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
int location);
int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
@@ -811,8 +837,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{
- return rq->mpwqe.mtt_offset +
- wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+ return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
@@ -825,6 +850,7 @@ extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
+void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
#endif
#ifndef CONFIG_RFS_ACCEL
@@ -860,7 +886,8 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
+int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
+ bool enable_uc_lb);
struct mlx5_eswitch_rep;
int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
@@ -874,6 +901,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
@@ -890,8 +918,16 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
-struct rtnl_link_stats64 *
-mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+void mlx5e_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti);
+void mlx5e_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti);
+
+int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp);
+bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
+bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
+bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index a8cb38789774..68419a01db36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -56,7 +56,7 @@ struct arfs_tuple {
struct arfs_rule {
struct mlx5e_priv *priv;
struct work_struct arfs_work;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
struct hlist_node hlist;
int rxq;
/* Flow ID passed to ndo_rx_flow_steer */
@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
- &dest);
+ &dest, NULL);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed\n",
@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i);
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
- &dest);
+ &dest, NULL);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed err=%d\n",
@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
static void arfs_destroy_table(struct arfs_table *arfs_t)
{
- mlx5_del_flow_rule(arfs_t->default_rule);
+ mlx5_del_flow_rules(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft);
}
@@ -174,6 +174,11 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_spec *spec;
@@ -205,10 +210,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
goto out;
}
- arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
- &dest);
+ arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
+ &flow_act,
+ &dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
@@ -324,7 +328,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
+ MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -396,7 +400,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
- mlx5_del_flow_rule(arfs_rule->rule);
+ mlx5_del_flow_rules(arfs_rule->rule);
hlist_del(&arfs_rule->hlist);
kfree(arfs_rule);
}
@@ -420,7 +424,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
if (rule->rule)
- mlx5_del_flow_rule(rule->rule);
+ mlx5_del_flow_rules(rule->rule);
hlist_del(&rule->hlist);
kfree(rule);
}
@@ -462,12 +466,17 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
return NULL;
}
-static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
- struct arfs_rule *arfs_rule)
+static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
+ struct arfs_rule *arfs_rule)
{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
- struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest;
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
@@ -544,9 +553,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
- rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
- &dest);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
@@ -559,14 +566,14 @@ out:
}
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
- struct mlx5_flow_rule *rule, u16 rxq)
+ struct mlx5_flow_handle *rule, u16 rxq)
{
struct mlx5_flow_destination dst;
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->direct_tir[rxq].tirn;
- err = mlx5_modify_rule_destination(rule, &dst);
+ err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
"Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
@@ -578,7 +585,7 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 13dc388667b6..2cd8e56a573b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -94,7 +94,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin);
+ mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -111,6 +111,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Disable CQE compression */
+ netdev_warn(dev, "Disabling cqe compression");
mlx5e_modify_rx_cqe_compression(priv, false);
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 029e856f72a0..f175518ff07a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -137,7 +137,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_unmap_free_uar(mdev, &res->cq_uar);
}
-int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
+int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
+ bool enable_uc_lb)
{
struct mlx5e_tir *tir;
void *in;
@@ -149,6 +150,10 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
if (!in)
return -ENOMEM;
+ if (enable_uc_lb)
+ MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
+
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 762af16ed021..7f6c225666c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -38,16 +38,77 @@
#define MLX5E_100MB (100000)
#define MLX5E_1GB (1000000)
+#define MLX5E_CEE_STATE_UP 1
+#define MLX5E_CEE_STATE_DOWN 0
+
+/* If dcbx mode is non-host set the dcbx mode to host.
+ */
+static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
+ enum mlx5_dcbx_oper_mode mode)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 param[MLX5_ST_SZ_DW(dcbx_param)];
+ int err;
+
+ err = mlx5_query_port_dcbx_param(mdev, param);
+ if (err)
+ return err;
+
+ MLX5_SET(dcbx_param, param, version_admin, mode);
+ if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
+ MLX5_SET(dcbx_param, param, willing_admin, 1);
+
+ return mlx5_set_port_dcbx_param(mdev, param);
+}
+
+static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
+{
+ struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, dcbx))
+ return 0;
+
+ if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+ return 0;
+
+ err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
+ if (err)
+ return err;
+
+ dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
+ return 0;
+}
+
static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
struct ieee_ets *ets)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err = 0;
+ int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -ENOTSUPP;
- memcpy(ets, &priv->params.ets, sizeof(*ets));
- return 0;
+ ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < ets->ets_cap; i++) {
+ err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ets->ets_cap; i++) {
+ err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
+ if (err)
+ return err;
+ if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
+ priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+ }
+
+ memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
+
+ return err;
}
enum {
@@ -110,9 +171,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
int max_tc = mlx5_max_tc(mdev);
int err;
- if (!MLX5_CAP_GEN(mdev, ets))
- return -ENOTSUPP;
-
mlx5e_build_tc_group(ets, tc_group, max_tc);
mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
@@ -124,7 +182,14 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
if (err)
return err;
- return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
+ err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
+
+ if (err)
+ return err;
+
+ memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ return err;
}
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
@@ -170,6 +235,9 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ if (!MLX5_CAP_GEN(priv->mdev, ets))
+ return -ENOTSUPP;
+
err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
return err;
@@ -178,9 +246,6 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
if (err)
return err;
- memcpy(&priv->params.ets, ets, sizeof(*ets));
- priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
-
return 0;
}
@@ -222,13 +287,39 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
{
- return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
+
+ if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+ mode |= DCB_CAP_DCBX_HOST;
+
+ return mode;
}
static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_dcbx *dcbx = &priv->dcbx;
+
+ if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
+ if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
+ return 0;
+
+ /* set dcbx to fw controlled */
+ if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
+ dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+ return 0;
+ }
+
+ return 1;
+ }
+
+ if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
+ return 1;
+
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
- (mode & DCB_CAP_DCBX_VER_CEE) ||
+ !(mode & DCB_CAP_DCBX_VER_CEE) ||
!(mode & DCB_CAP_DCBX_VER_IEEE) ||
!(mode & DCB_CAP_DCBX_HOST))
return 1;
@@ -304,6 +395,284 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
}
+static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct ieee_ets ets;
+ struct ieee_pfc pfc;
+ int err = -ENOTSUPP;
+ int i;
+
+ if (!MLX5_CAP_GEN(mdev, ets))
+ goto out;
+
+ memset(&ets, 0, sizeof(ets));
+ memset(&pfc, 0, sizeof(pfc));
+
+ ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
+ for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
+ ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
+ ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
+ ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+ ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
+ }
+
+ err = mlx5e_dbcnl_validate_ets(netdev, &ets);
+ if (err) {
+ netdev_err(netdev,
+ "%s, Failed to validate ETS: %d\n", __func__, err);
+ goto out;
+ }
+
+ err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ if (err) {
+ netdev_err(netdev,
+ "%s, Failed to set ETS: %d\n", __func__, err);
+ goto out;
+ }
+
+ /* Set PFC */
+ pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
+ if (!cee_cfg->pfc_enable)
+ pfc.pfc_en = 0;
+ else
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+ pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
+
+ err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
+ if (err) {
+ netdev_err(netdev,
+ "%s, Failed to set PFC: %d\n", __func__, err);
+ goto out;
+ }
+out:
+ return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
+}
+
+static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
+{
+ return MLX5E_CEE_STATE_UP;
+}
+
+static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (!perm_addr)
+ return;
+
+ mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
+}
+
+static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
+ int priority, u8 prio_type,
+ u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+ if (priority >= CEE_DCBX_MAX_PRIO) {
+ netdev_err(netdev,
+ "%s, priority is out of range\n", __func__);
+ return;
+ }
+
+ if (pgid >= CEE_DCBX_MAX_PGS) {
+ netdev_err(netdev,
+ "%s, priority group is out of range\n", __func__);
+ return;
+ }
+
+ cee_cfg->prio_to_pg_map[priority] = pgid;
+}
+
+static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+ if (pgid >= CEE_DCBX_MAX_PGS) {
+ netdev_err(netdev,
+ "%s, priority group is out of range\n", __func__);
+ return;
+ }
+
+ cee_cfg->pg_bw_pct[pgid] = bw_pct;
+}
+
+static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
+ int priority, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (priority >= CEE_DCBX_MAX_PRIO) {
+ netdev_err(netdev,
+ "%s, priority is out of range\n", __func__);
+ return;
+ }
+
+ *prio_type = 0;
+ *bw_pct = 0;
+ *up_map = 0;
+
+ if (mlx5_query_port_prio_tc(mdev, priority, pgid))
+ *pgid = 0;
+}
+
+static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (pgid >= CEE_DCBX_MAX_PGS) {
+ netdev_err(netdev,
+ "%s, priority group is out of range\n", __func__);
+ return;
+ }
+
+ if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
+ *bw_pct = 0;
+}
+
+static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
+ int priority, u8 setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+ if (priority >= CEE_DCBX_MAX_PRIO) {
+ netdev_err(netdev,
+ "%s, priority is out of range\n", __func__);
+ return;
+ }
+
+ if (setting > 1)
+ return;
+
+ cee_cfg->pfc_setting[priority] = setting;
+}
+
+static int
+mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
+ int priority, u8 *setting)
+{
+ struct ieee_pfc pfc;
+ int err;
+
+ err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
+
+ if (err)
+ *setting = 0;
+ else
+ *setting = (pfc.pfc_en >> priority) & 0x01;
+
+ return err;
+}
+
+static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
+ int priority, u8 *setting)
+{
+ if (priority >= CEE_DCBX_MAX_PRIO) {
+ netdev_err(netdev,
+ "%s, priority is out of range\n", __func__);
+ return;
+ }
+
+ if (!setting)
+ return;
+
+ mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
+}
+
+static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
+ int capid, u8 *cap)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 rval = 0;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 1 << mlx5_max_tc(mdev);
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << mlx5_max_tc(mdev);
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = (DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_STATIC);
+ break;
+ default:
+ *cap = 0;
+ rval = 1;
+ break;
+ }
+
+ return rval;
+}
+
+static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
+ int tcs_id, u8 *num)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ switch (tcs_id) {
+ case DCB_NUMTCS_ATTR_PG:
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = mlx5_max_tc(mdev) + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct ieee_pfc pfc;
+
+ if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
+ return MLX5E_CEE_STATE_DOWN;
+
+ return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
+}
+
+static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+ if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
+ return;
+
+ cee_cfg->pfc_enable = state;
+}
+
const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_getets = mlx5e_dcbnl_ieee_getets,
.ieee_setets = mlx5e_dcbnl_ieee_setets,
@@ -313,4 +682,70 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
+
+/* CEE interfaces */
+ .setall = mlx5e_dcbnl_setall,
+ .getstate = mlx5e_dcbnl_getstate,
+ .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
+
+ .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
+ .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
+ .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
+
+ .setpfccfg = mlx5e_dcbnl_setpfccfg,
+ .getpfccfg = mlx5e_dcbnl_getpfccfg,
+ .getcap = mlx5e_dcbnl_getcap,
+ .getnumtcs = mlx5e_dcbnl_getnumtcs,
+ .getpfcstate = mlx5e_dcbnl_getpfcstate,
+ .setpfcstate = mlx5e_dcbnl_setpfcstate,
};
+
+static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
+ enum mlx5_dcbx_oper_mode *mode)
+{
+ u32 out[MLX5_ST_SZ_DW(dcbx_param)];
+
+ *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
+
+ if (!mlx5_query_port_dcbx_param(priv->mdev, out))
+ *mode = MLX5_GET(dcbx_param, out, version_oper);
+
+ /* From driver's point of view, we only care if the mode
+ * is host (HOST) or non-host (AUTO)
+ */
+ if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
+ *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+}
+
+static void mlx5e_ets_init(struct mlx5e_priv *priv)
+{
+ int i;
+ struct ieee_ets ets;
+
+ memset(&ets, 0, sizeof(ets));
+ ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < ets.ets_cap; i++) {
+ ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
+ ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
+ ets.prio_tc[i] = i;
+ }
+
+ memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
+
+ /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+ ets.prio_tc[0] = 1;
+ ets.prio_tc[1] = 0;
+
+ mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+}
+
+void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
+{
+ struct mlx5e_dcbx *dcbx = &priv->dcbx;
+
+ if (MLX5_CAP_GEN(priv->mdev, dcbx))
+ mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
+
+ mlx5e_ets_init(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 27ff401cec20..352462af8d51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,11 +171,17 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+ NUM_PCIE_COUNTERS +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
- MLX5E_NUM_PFC_COUNTERS(priv);
+ MLX5E_NUM_PFC_COUNTERS(priv) +
+ ARRAY_SIZE(mlx5e_pme_status_desc) +
+ ARRAY_SIZE(mlx5e_pme_error_desc);
+
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
+ case ETH_SS_TEST:
+ return mlx5e_self_test_num(priv);
/* fallthrough */
default:
return -EOPNOTSUPP;
@@ -213,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_tas_stats_desc[i].format);
+
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -237,6 +251,13 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
}
}
+ /* port module event counters */
+ for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+
+ for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -267,6 +288,9 @@ static void mlx5e_get_strings(struct net_device *dev,
break;
case ETH_SS_TEST:
+ for (i = 0; i < mlx5e_self_test_num(priv); i++)
+ strcpy(data + i * ETH_GSTRING_LEN,
+ mlx5e_self_tests[i]);
break;
case ETH_SS_STATS:
@@ -279,6 +303,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
unsigned long pfc_combined;
@@ -314,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i);
+
+ for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
+ pcie_tas_stats_desc, i);
+
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
@@ -335,6 +368,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
}
+ /* port module event counters */
+ mlx5_priv = &priv->mdev->priv;
+ for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+ mlx5e_pme_status_desc, i);
+
+ for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+ mlx5e_pme_error_desc, i);
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -456,8 +499,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return -EINVAL;
}
- num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
- rx_pending_wqes);
+ num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
@@ -522,7 +564,6 @@ static int mlx5e_set_channels(struct net_device *dev,
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
- u32 num_mtts;
int err = 0;
if (!count) {
@@ -541,14 +582,6 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
- num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
- if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
- !MLX5E_VALID_NUM_MTTS(num_mtts)) {
- netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
- __func__, count);
- return -EINVAL;
- }
-
if (priv->params.num_channels == count)
return 0;
@@ -1438,6 +1471,35 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return err;
}
+static int set_pflag_rx_cqe_compress(struct net_device *netdev,
+ bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err = 0;
+ bool reset;
+
+ if (!MLX5_CAP_GEN(mdev, cqe_compression))
+ return -ENOTSUPP;
+
+ if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
+ return -EINVAL;
+ }
+
+ reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ if (reset)
+ mlx5e_close_locked(netdev);
+
+ MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
+ priv->params.rx_cqe_compress_def = enable;
+
+ if (reset)
+ err = mlx5e_open_locked(netdev);
+ return err;
+}
+
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
enum mlx5e_priv_flag flag,
@@ -1445,7 +1507,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
bool enable = !!(wanted_flags & flag);
- u32 changes = wanted_flags ^ priv->pflags;
+ u32 changes = wanted_flags ^ priv->params.pflags;
int err;
if (!(changes & flag))
@@ -1458,7 +1520,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
return err;
}
- MLX5E_SET_PRIV_FLAG(priv, flag, enable);
+ MLX5E_SET_PFLAG(priv, flag, enable);
return 0;
}
@@ -1468,20 +1530,26 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
int err;
mutex_lock(&priv->state_lock);
-
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_BASED_MODER,
set_pflag_rx_cqe_based_moder);
+ if (err)
+ goto out;
+
+ err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_RX_CQE_COMPRESS,
+ set_pflag_rx_cqe_compress);
+out:
mutex_unlock(&priv->state_lock);
- return err ? -EINVAL : 0;
+ return err;
}
static u32 mlx5e_get_priv_flags(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return priv->pflags;
+ return priv->params.pflags;
}
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
@@ -1535,5 +1603,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
.get_priv_flags = mlx5e_get_priv_flags,
- .set_priv_flags = mlx5e_set_priv_flags
+ .set_priv_flags = mlx5e_set_priv_flags,
+ .self_test = mlx5e_self_test,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 36fbc6b21a33..1fe80de5d68f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -158,9 +158,14 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
- struct mlx5_flow_rule **rule_p;
+ struct mlx5_flow_handle **rule_p;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -187,10 +192,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break;
}
- *rule_p = mlx5_add_flow_rule(ft, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
- &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
@@ -229,20 +231,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
if (priv->fs.vlan.untagged_rule) {
- mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
+ mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
priv->fs.vlan.untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
if (priv->fs.vlan.any_vlan_rule) {
- mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
+ mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
priv->fs.vlan.any_vlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_vlans_rule[vid]) {
- mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
+ mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
priv->fs.vlan.active_vlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
@@ -560,7 +562,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
for (i = 0; i < MLX5E_NUM_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->rules[i])) {
- mlx5_del_flow_rule(ttc->rules[i]);
+ mlx5_del_flow_rules(ttc->rules[i]);
ttc->rules[i] = NULL;
}
}
@@ -616,13 +618,19 @@ static struct {
},
};
-static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_destination *dest,
- u16 etype,
- u8 proto)
+static struct mlx5_flow_handle *
+mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_destination *dest,
+ u16 etype,
+ u8 proto)
{
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
+ struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
@@ -643,10 +651,7 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
}
- rule = mlx5_add_flow_rule(ft, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
- dest);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
@@ -660,7 +665,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5e_ttc_table *ttc;
- struct mlx5_flow_rule **rules;
+ struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
int tt;
int err;
@@ -776,7 +781,7 @@ static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
+ MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -801,7 +806,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
- mlx5_del_flow_rule(ai->rule);
+ mlx5_del_flow_rules(ai->rule);
ai->rule = NULL;
}
}
@@ -809,6 +814,11 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
@@ -847,9 +857,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break;
}
- ai->rule = mlx5_add_flow_rule(ft, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac);
@@ -947,7 +955,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
+ MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1037,7 +1045,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
+ MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d17c24227900..3691451c728c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -36,7 +36,7 @@
struct mlx5e_ethtool_rule {
struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
struct mlx5e_ethtool_table *eth_ft;
};
@@ -99,7 +99,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
MLX5E_ETHTOOL_NUM_ENTRIES);
ft = mlx5_create_auto_grouped_flow_table(ns, prio,
table_size,
- MLX5E_ETHTOOL_NUM_GROUPS, 0);
+ MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
if (IS_ERR(ft))
return (void *)ft;
@@ -284,15 +284,16 @@ static bool outer_header_zero(u32 *match_criteria)
size - 1);
}
-static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
- struct mlx5_flow_table *ft,
- struct ethtool_rx_flow_spec *fs)
+static struct mlx5_flow_handle *
+add_ethtool_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct ethtool_rx_flow_spec *fs)
{
struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
int err = 0;
- u32 action;
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec)
@@ -303,7 +304,7 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
goto free;
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
- action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
@@ -313,12 +314,12 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
- action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
- rule = mlx5_add_flow_rule(ft, spec, action,
- MLX5_FS_DEFAULT_FLOW_TAG, dst);
+ flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule)
{
if (eth_rule->rule)
- mlx5_del_flow_rule(eth_rule->rule);
+ mlx5_del_flow_rules(eth_rule->rule);
list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
int num_tuples;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 246d98ebb588..cbfa38fc72c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -84,7 +84,8 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ?
+ priv->params.mpwqe_log_stride_sz =
+ MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
MLX5_MPWRQ_LOG_STRIDE_SIZE;
priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
@@ -101,7 +102,7 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
BIT(priv->params.log_rq_size),
BIT(priv->params.mpwqe_log_stride_sz),
- priv->params.rx_cqe_compress_admin);
+ MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
@@ -290,12 +291,36 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ void *out;
+ u32 *in;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
+ return;
+
+ out = pcie_stats->pcie_perf_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+ out = pcie_stats->pcie_tas_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+ kvfree(in);
+}
+
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
mlx5e_update_q_counter(priv);
mlx5e_update_vport_counters(priv);
mlx5e_update_pport_counters(priv);
mlx5e_update_sw_counters(priv);
+ mlx5e_update_pcie_counters(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
@@ -446,14 +471,50 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
kfree(rq->mpwqe.info);
}
-static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
+ u64 npages, u8 page_shift,
+ struct mlx5_core_mkey *umr_mkey)
{
- struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
- if (rep && rep->vport != FDB_UPLINK_VPORT)
- return true;
+ if (!MLX5E_VALID_NUM_MTTS(npages))
+ return -EINVAL;
- return false;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
+
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET64(mkc, mkc, len, npages << page_shift);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ MLX5_MTT_OCTW(npages));
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+
+ err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = rq->priv;
+ u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
+
+ return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
}
static int mlx5e_create_rq(struct mlx5e_channel *c,
@@ -489,7 +550,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->priv = c->priv;
- rq->xdp_prog = priv->xdp_prog;
+
+ rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
+ if (IS_ERR(rq->xdp_prog)) {
+ err = PTR_ERR(rq->xdp_prog);
+ rq->xdp_prog = NULL;
+ goto err_rq_wq_destroy;
+ }
rq->buff.map_dir = DMA_FROM_DEVICE;
if (rq->xdp_prog)
@@ -506,18 +573,20 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe.mtt_offset = c->ix *
- MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
-
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
byte_count = rq->buff.wqe_sz;
- rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
- err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+
+ err = mlx5e_create_rq_umr_mkey(rq);
if (err)
goto err_rq_wq_destroy;
+ rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
+
+ err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+ if (err)
+ goto err_destroy_umr_mkey;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
@@ -566,12 +635,14 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
- if (rq->xdp_prog)
- bpf_prog_add(rq->xdp_prog, 1);
-
return 0;
+err_destroy_umr_mkey:
+ mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+
err_rq_wq_destroy:
+ if (rq->xdp_prog)
+ bpf_prog_put(rq->xdp_prog);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -587,6 +658,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
+ mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
kfree(rq->dma_info);
@@ -940,7 +1012,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
sq->min_inline_mode =
- MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
+ MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
param->min_inline_mode : 0;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
@@ -1184,7 +1256,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
{
- mlx5_wq_destroy(&cq->wq_ctrl);
+ mlx5_cqwq_destroy(&cq->wq_ctrl);
}
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
@@ -1201,7 +1273,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
- sizeof(u64) * cq->wq_ctrl.buf.npages;
+ sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
@@ -1210,15 +1282,15 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
memcpy(cqc, param->cqc, sizeof(param->cqc));
- mlx5_fill_page_array(&cq->wq_ctrl.buf,
- (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
- MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -1536,7 +1608,6 @@ err_close_icosq_cq:
err_napi_del:
netif_napi_del(&c->napi);
- napi_hash_del(&c->napi);
kfree(c);
return err;
@@ -1557,9 +1628,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
- napi_hash_del(&c->napi);
- synchronize_rcu();
-
kfree(c);
}
@@ -1652,7 +1720,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
- if (priv->params.rx_cqe_compress) {
+ if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
@@ -2124,7 +2192,7 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
- err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
+ err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
if (err) {
netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
__func__, err);
@@ -2642,7 +2710,7 @@ mqprio:
return mlx5e_setup_tc(dev, tc->tc);
}
-struct rtnl_link_stats64 *
+static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2650,13 +2718,20 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- stats->rx_packets = sstats->rx_packets;
- stats->rx_bytes = sstats->rx_bytes;
- stats->tx_packets = sstats->tx_packets;
- stats->tx_bytes = sstats->tx_bytes;
+ if (mlx5e_is_uplink_rep(priv)) {
+ stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
+ stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
+ stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
+ stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
+ } else {
+ stats->rx_packets = sstats->rx_packets;
+ stats->rx_bytes = sstats->rx_bytes;
+ stats->tx_packets = sstats->tx_packets;
+ stats->tx_bytes = sstats->tx_bytes;
+ stats->tx_dropped = sstats->tx_queue_dropped;
+ }
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
- stats->tx_dropped = sstats->tx_queue_dropped;
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -2853,31 +2928,13 @@ static int mlx5e_set_features(struct net_device *netdev,
return err ? -EINVAL : 0;
}
-#define MXL5_HW_MIN_MTU 64
-#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
-
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
- u16 max_mtu;
- u16 min_mtu;
int err = 0;
bool reset;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
-
- max_mtu = MLX5E_HW2SW_MTU(max_mtu);
- min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
-
- if (new_mtu > max_mtu || new_mtu < min_mtu) {
- netdev_err(netdev,
- "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
- __func__, new_mtu, min_mtu, max_mtu);
- return -EINVAL;
- }
-
mutex_lock(&priv->state_lock);
reset = !priv->params.lro_en &&
@@ -2947,6 +3004,20 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
}
+
+static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (min_tx_rate)
+ return -EOPNOTSUPP;
+
+ return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
+ max_tx_rate);
+}
+
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
@@ -3003,8 +3074,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
-static void mlx5e_add_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+void mlx5e_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3017,8 +3088,8 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
}
-static void mlx5e_del_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+void mlx5e_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3112,6 +3183,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
bool reset, was_opened;
int i;
+ if (prog && prog->xdp_adjust_head) {
+ netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&priv->state_lock);
if ((netdev->features & NETIF_F_LRO) && prog) {
@@ -3126,11 +3202,21 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
if (was_opened && reset)
mlx5e_close_locked(netdev);
+ if (was_opened && !reset) {
+ /* num_channels is invariant here, so we can take the
+ * batched reference right upfront.
+ */
+ prog = bpf_prog_add(prog, priv->params.num_channels);
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto unlock;
+ }
+ }
- /* exchange programs */
+ /* exchange programs, extra prog reference we got from caller
+ * as long as we don't fail from this point onwards.
+ */
old_prog = xchg(&priv->xdp_prog, prog);
- if (prog)
- bpf_prog_add(prog, 1);
if (old_prog)
bpf_prog_put(old_prog);
@@ -3146,7 +3232,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* exchanging programs w/o reset, we update ref counts on behalf
* of the channels RQs here.
*/
- bpf_prog_add(prog, priv->params.num_channels);
for (i = 0; i < priv->params.num_channels; i++) {
struct mlx5e_channel *c = priv->channel[i];
@@ -3254,6 +3339,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
.ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
.ndo_set_vf_trust = mlx5e_set_vf_trust,
+ .ndo_set_vf_rate = mlx5e_set_vf_rate,
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
@@ -3262,6 +3348,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
+ .ndo_has_offload_stats = mlx5e_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_get_offload_stats,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -3298,24 +3386,6 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-static void mlx5e_ets_init(struct mlx5e_priv *priv)
-{
- int i;
-
- priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
- for (i = 0; i < priv->params.ets.ets_cap; i++) {
- priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
- priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
- priv->params.ets.prio_tc[i] = i;
- }
-
- /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
- priv->params.ets.prio_tc[0] = 1;
- priv->params.ets.prio_tc[1] = 0;
-}
-#endif
-
void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
int num_channels)
@@ -3390,14 +3460,13 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode)
{
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
- case MLX5E_INLINE_MODE_L2:
+ case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2;
break;
- case MLX5E_INLINE_MODE_VPORT_CONTEXT:
- mlx5_query_nic_vport_min_inline(mdev,
- min_inline_mode);
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
break;
- case MLX5_INLINE_MODE_NOT_REQUIRED:
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE;
break;
}
@@ -3439,17 +3508,16 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
- priv->params.rx_cqe_compress_admin = false;
+ priv->params.rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
MLX5_CAP_GEN(mdev, vport_group_manager)) {
mlx5e_get_max_linkspeed(mdev, &link_speed);
mlx5e_get_pci_bw(mdev, &pci_bw);
mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
link_speed, pci_bw);
- priv->params.rx_cqe_compress_admin =
+ priv->params.rx_cqe_compress_def =
cqe_compress_heuristic(link_speed, pci_bw);
}
- priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
mlx5e_set_rq_priv_params(priv);
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
@@ -3480,12 +3548,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Initialize pflags */
- MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- mlx5e_ets_init(priv);
-#endif
+ MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def);
mutex_init(&priv->state_lock);
@@ -3523,7 +3588,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
#ifdef CONFIG_MLX5_CORE_EN_DCB
- netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+ if (MLX5_CAP_GEN(mdev, qos))
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
#endif
} else {
netdev->netdev_ops = &mlx5e_netdev_ops_basic;
@@ -3619,43 +3685,6 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
}
-static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
- BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
- int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- void *mkc;
- u32 *in;
- int err;
-
- in = mlx5_vzalloc(inlen);
- if (!in)
- return -ENOMEM;
-
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-
- npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
-
- MLX5_SET(mkc, mkc, free, 1);
- MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET(mkc, mkc, lw, 1);
- MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
-
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
- MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
- MLX5_SET(mkc, mkc, translations_octword_size,
- MLX5_MTT_OCTW(npages));
- MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
-
- err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
-
- kvfree(in);
- return err;
-}
-
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
@@ -3677,6 +3706,9 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
if (MLX5_CAP_GEN(mdev, vport_group_manager))
mlx5_eswitch_unregister_vport_rep(esw, 0);
+
+ if (priv->xdp_prog)
+ bpf_prog_put(priv->xdp_prog);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -3759,7 +3791,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
}
#ifdef CONFIG_MLX5_CORE_EN_DCB
- mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
+ mlx5e_dcbnl_initialize(priv);
#endif
return 0;
}
@@ -3787,7 +3819,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
rep.vport = FDB_UPLINK_VPORT;
- rep.priv_data = priv;
+ rep.netdev = netdev;
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
}
@@ -3852,21 +3884,16 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
{
const struct mlx5e_profile *profile;
struct mlx5e_priv *priv;
+ u16 max_mtu;
int err;
priv = netdev_priv(netdev);
profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
- err = mlx5e_create_umr_mkey(priv);
- if (err) {
- mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
- goto out;
- }
-
err = profile->init_tx(priv);
if (err)
- goto err_destroy_umr_mkey;
+ goto out;
err = mlx5e_open_drop_rq(priv);
if (err) {
@@ -3882,6 +3909,11 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
mlx5e_init_l2_addr(priv);
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+
mlx5e_set_dev_port_mtu(netdev);
if (profile->enable)
@@ -3901,9 +3933,6 @@ err_close_drop_rq:
err_cleanup_tx:
profile->cleanup_tx(priv);
-err_destroy_umr_mkey:
- mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
-
out:
return err;
}
@@ -3952,7 +3981,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
profile->cleanup_rx(priv);
mlx5e_close_drop_rq(priv);
profile->cleanup_tx(priv);
- mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
cancel_delayed_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index bf1c09ca73c0..850378893b25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -72,7 +72,29 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
}
}
-static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
+static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct rtnl_link_stats64 *vport_stats;
+ struct ifla_vf_stats vf_stats;
+ int err;
+
+ err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
+ if (err) {
+ pr_warn("vport %d error %d reading stats\n", rep->vport, err);
+ return;
+ }
+
+ vport_stats = &priv->stats.vf_vport;
+ /* flip tx/rx as we are reporting the counters for the switch vport */
+ vport_stats->rx_packets = vf_stats.tx_packets;
+ vport_stats->rx_bytes = vf_stats.tx_bytes;
+ vport_stats->tx_packets = vf_stats.rx_packets;
+ vport_stats->tx_bytes = vf_stats.rx_bytes;
+}
+
+static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
struct mlx5e_rq_stats *rq_stats;
@@ -95,6 +117,12 @@ static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
}
}
+static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_rep_update_sw_counters(priv);
+ mlx5e_rep_update_hw_counters(priv);
+}
+
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -106,7 +134,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_sw_rep_counters(priv);
+ mlx5e_rep_update_sw_counters(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
@@ -180,7 +208,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = rep->priv_data;
+ struct net_device *netdev = rep->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
return mlx5e_add_sqs_fwd_rules(priv);
@@ -198,7 +227,8 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = rep->priv_data;
+ struct net_device *netdev = rep->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
@@ -208,6 +238,35 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
mlx5e_tc_init(priv);
}
+static int mlx5e_rep_open(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
+
+ err = mlx5e_open(dev);
+ if (err)
+ return err;
+
+ err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP);
+ if (!err)
+ netif_carrier_on(dev);
+
+ return 0;
+}
+
+static int mlx5e_rep_close(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+
+ return mlx5e_close(dev);
+}
+
static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
@@ -230,6 +289,14 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
return -EOPNOTSUPP;
+ if (tc->egress_dev) {
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
+
+ return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
+ proto, tc);
+ }
+
switch (tc->type) {
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
@@ -245,17 +312,92 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
}
}
+bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS)
+ return true;
+
+ return false;
+}
+
+bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+
+ if (rep && rep->vport != FDB_UPLINK_VPORT)
+ return true;
+
+ return false;
+}
+
+bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
+ return true;
+ }
+
+ return false;
+}
+
+static int
+mlx5e_get_sw_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
+
+ stats->rx_packets = sstats->rx_packets;
+ stats->rx_bytes = sstats->rx_bytes;
+ stats->tx_packets = sstats->tx_packets;
+ stats->tx_bytes = sstats->tx_bytes;
+
+ stats->tx_dropped = sstats->tx_queue_dropped;
+
+ return 0;
+}
+
+int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return mlx5e_get_sw_stats64(dev, sp);
+ }
+
+ return -EINVAL;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
+ return stats;
+}
+
static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
static const struct net_device_ops mlx5e_netdev_ops_rep = {
- .ndo_open = mlx5e_open,
- .ndo_stop = mlx5e_close,
+ .ndo_open = mlx5e_rep_open,
+ .ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
- .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_get_stats64 = mlx5e_rep_get_stats,
+ .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_has_offload_stats = mlx5e_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_get_offload_stats,
};
static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
@@ -328,7 +470,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_handle *flow_rule;
int err;
int i;
@@ -360,7 +502,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0;
err_del_flow_rule:
- mlx5_del_flow_rule(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rep->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@@ -375,7 +517,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
int i;
mlx5e_tc_cleanup(priv);
- mlx5_del_flow_rule(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
@@ -405,7 +547,7 @@ static struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx,
- .update_stats = mlx5e_update_sw_rep_counters,
+ .update_stats = mlx5e_rep_update_stats,
.max_nch = mlx5e_get_rep_max_num_channels,
.max_tc = 1,
};
@@ -423,7 +565,7 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
return -EINVAL;
}
- rep->priv_data = netdev_priv(netdev);
+ rep->netdev = netdev;
err = mlx5e_attach_netdev(esw->dev, netdev);
if (err) {
@@ -445,7 +587,7 @@ err_detach_netdev:
mlx5e_detach_netdev(esw->dev, netdev);
err_destroy_netdev:
- mlx5e_destroy_netdev(esw->dev, rep->priv_data);
+ mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
return err;
@@ -454,10 +596,9 @@ err_destroy_netdev:
void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = rep->priv_data;
- struct net_device *netdev = priv->netdev;
+ struct net_device *netdev = rep->netdev;
unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
- mlx5e_destroy_netdev(esw->dev, priv);
+ mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 33495d88aeb2..0e2fb3ed1790 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -164,14 +164,14 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
mutex_lock(&priv->state_lock);
- if (priv->params.rx_cqe_compress == val)
+ if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
goto unlock;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(priv->netdev);
- priv->params.rx_cqe_compress = val;
+ MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
if (was_opened)
mlx5e_open_locked(priv->netdev);
@@ -737,10 +737,10 @@ static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 wqe_counter, u32 cqe_bcnt)
{
- struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
struct mlx5e_dma_info *di;
struct sk_buff *skb;
void *va, *data;
+ bool consumed;
di = &rq->dma_info[wqe_counter];
va = page_address(di->page);
@@ -759,7 +759,11 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return NULL;
}
- if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
+ rcu_read_lock();
+ consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
+ cqe_bcnt);
+ rcu_read_unlock();
+ if (consumed)
return NULL; /* page/packet was consumed by XDP */
skb = build_skb(va, RQ_PAGE_SIZE(rq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
new file mode 100644
index 000000000000..65442c36a6e1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/udp.h>
+#include "en.h"
+
+enum {
+ MLX5E_ST_LINK_STATE,
+ MLX5E_ST_LINK_SPEED,
+ MLX5E_ST_HEALTH_INFO,
+#ifdef CONFIG_INET
+ MLX5E_ST_LOOPBACK,
+#endif
+ MLX5E_ST_NUM,
+};
+
+const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = {
+ "Link Test",
+ "Speed Test",
+ "Health Test",
+#ifdef CONFIG_INET
+ "Loopback Test",
+#endif
+};
+
+int mlx5e_self_test_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_self_tests);
+}
+
+static int mlx5e_test_health_info(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_health *health = &priv->mdev->priv.health;
+
+ return health->sick ? 1 : 0;
+}
+
+static int mlx5e_test_link_state(struct mlx5e_priv *priv)
+{
+ u8 port_state;
+
+ if (!netif_carrier_ok(priv->netdev))
+ return 1;
+
+ port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+ return port_state == VPORT_STATE_UP ? 0 : 1;
+}
+
+static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 eth_proto_oper;
+ int i;
+
+ if (!netif_carrier_ok(priv->netdev))
+ return 1;
+
+ if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
+ return 1;
+
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
+ if (eth_proto_oper & MLX5E_PROT_MASK(i))
+ return 0;
+ }
+ return 1;
+}
+
+#ifdef CONFIG_INET
+/* loopback test */
+#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN)
+static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
+#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
+
+struct mlx5ehdr {
+ __be32 version;
+ __be64 magic;
+ char text[ETH_GSTRING_LEN];
+};
+
+static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
+{
+ struct sk_buff *skb = NULL;
+ struct mlx5ehdr *mlxh;
+ struct ethhdr *ethh;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ int datalen, iplen;
+
+ datalen = MLX5E_TEST_PKT_SIZE -
+ (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
+
+ skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
+ if (!skb) {
+ netdev_err(priv->netdev, "\tFailed to alloc loopback skb\n");
+ return NULL;
+ }
+
+ prefetchw(skb->data);
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ /* Reserve for ethernet and IP header */
+ ethh = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+
+ skb_set_network_header(skb, skb->len);
+ iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
+
+ skb_set_transport_header(skb, skb->len);
+ udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
+
+ /* Fill ETH header */
+ ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr);
+ eth_zero_addr(ethh->h_source);
+ ethh->h_proto = htons(ETH_P_IP);
+
+ /* Fill UDP header */
+ udph->source = htons(9);
+ udph->dest = htons(9); /* Discard Protocol */
+ udph->len = htons(datalen + sizeof(struct udphdr));
+ udph->check = 0;
+
+ /* Fill IP header */
+ iph->ihl = 5;
+ iph->ttl = 32;
+ iph->version = 4;
+ iph->protocol = IPPROTO_UDP;
+ iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
+ iph->tot_len = htons(iplen);
+ iph->frag_off = 0;
+ iph->saddr = 0;
+ iph->daddr = 0;
+ iph->tos = 0;
+ iph->id = 0;
+ ip_send_check(iph);
+
+ /* Fill test header and data */
+ mlxh = (struct mlx5ehdr *)skb_put(skb, sizeof(*mlxh));
+ mlxh->version = 0;
+ mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
+ strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
+ datalen -= sizeof(*mlxh);
+ memset(skb_put(skb, datalen), 0, datalen);
+
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ udp4_hwcsum(skb, iph->saddr, iph->daddr);
+
+ skb->protocol = htons(ETH_P_IP);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = priv->netdev;
+
+ return skb;
+}
+
+struct mlx5e_lbt_priv {
+ struct packet_type pt;
+ struct completion comp;
+ bool loopback_ok;
+};
+
+static int
+mlx5e_test_loopback_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct mlx5e_lbt_priv *lbtp = pt->af_packet_priv;
+ struct mlx5ehdr *mlxh;
+ struct ethhdr *ethh;
+ struct udphdr *udph;
+ struct iphdr *iph;
+
+ /* We are only going to peek, no need to clone the SKB */
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+
+ if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
+ goto out;
+
+ ethh = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal(ethh->h_dest, orig_ndev->dev_addr))
+ goto out;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol != IPPROTO_UDP)
+ goto out;
+
+ udph = udp_hdr(skb);
+ if (udph->dest != htons(9))
+ goto out;
+
+ mlxh = (struct mlx5ehdr *)((char *)udph + sizeof(*udph));
+ if (mlxh->magic != cpu_to_be64(MLX5E_TEST_MAGIC))
+ goto out; /* so close ! */
+
+ /* bingo */
+ lbtp->loopback_ok = true;
+ complete(&lbtp->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
+ struct mlx5e_lbt_priv *lbtp)
+{
+ int err = 0;
+
+ err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true);
+ if (err) {
+ netdev_err(priv->netdev,
+ "\tFailed to enable UC loopback err(%d)\n", err);
+ return err;
+ }
+
+ lbtp->loopback_ok = false;
+ init_completion(&lbtp->comp);
+
+ lbtp->pt.type = htons(ETH_P_ALL);
+ lbtp->pt.func = mlx5e_test_loopback_validate;
+ lbtp->pt.dev = priv->netdev;
+ lbtp->pt.af_packet_priv = lbtp;
+ dev_add_pack(&lbtp->pt);
+ return err;
+}
+
+static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
+ struct mlx5e_lbt_priv *lbtp)
+{
+ dev_remove_pack(&lbtp->pt);
+ mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
+}
+
+#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
+static int mlx5e_test_loopback(struct mlx5e_priv *priv)
+{
+ struct mlx5e_lbt_priv *lbtp;
+ struct sk_buff *skb = NULL;
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ netdev_err(priv->netdev,
+ "\tCan't perform loobpack test while device is down\n");
+ return -ENODEV;
+ }
+
+ lbtp = kzalloc(sizeof(*lbtp), GFP_KERNEL);
+ if (!lbtp)
+ return -ENOMEM;
+ lbtp->loopback_ok = false;
+
+ err = mlx5e_test_loopback_setup(priv, lbtp);
+ if (err)
+ goto out;
+
+ skb = mlx5e_test_get_udp_skb(priv);
+ if (!skb) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ err = dev_queue_xmit(skb);
+ if (err) {
+ netdev_err(priv->netdev,
+ "\tFailed to xmit loopback packet err(%d)\n",
+ err);
+ goto cleanup;
+ }
+
+ wait_for_completion_timeout(&lbtp->comp, MLX5E_LB_VERIFY_TIMEOUT);
+ err = !lbtp->loopback_ok;
+
+cleanup:
+ mlx5e_test_loopback_cleanup(priv, lbtp);
+out:
+ kfree(lbtp);
+ return err;
+}
+#endif
+
+static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = {
+ mlx5e_test_link_state,
+ mlx5e_test_link_speed,
+ mlx5e_test_health_info,
+#ifdef CONFIG_INET
+ mlx5e_test_loopback,
+#endif
+};
+
+void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
+ u64 *buf)
+{
+ struct mlx5e_priv *priv = netdev_priv(ndev);
+ int i;
+
+ memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM);
+
+ mutex_lock(&priv->state_lock);
+ netdev_info(ndev, "Self test begin..\n");
+
+ for (i = 0; i < MLX5E_ST_NUM; i++) {
+ netdev_info(ndev, "\t[%d] %s start..\n",
+ i, mlx5e_self_tests[i]);
+ buf[i] = mlx5e_st_func[i](priv);
+ netdev_info(ndev, "\t[%d] %s end: result(%lld)\n",
+ i, mlx5e_self_tests[i], buf[i]);
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < MLX5E_ST_NUM; i++) {
+ if (buf[i]) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ break;
+ }
+ }
+ netdev_info(ndev, "Self test out: status flags(0x%x)\n",
+ etest->flags);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 57452fdc5154..f202f872f57f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
- be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+ be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -276,6 +276,32 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
+#define PCIE_PERF_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+#define PCIE_PERF_GET(pcie_stats, c) \
+ MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
+ counter_set.pcie_perf_cntrs_grp_data_layout.c)
+#define PCIE_TAS_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
+#define PCIE_TAS_GET(pcie_stats, c) \
+ MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
+ counter_set.pcie_tas_cntrs_grp_data_layout.c)
+
+struct mlx5e_pcie_stats {
+ __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
+ __be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
+};
+
+static const struct counter_desc pcie_perf_stats_desc[] = {
+ { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+ { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
+static const struct counter_desc pcie_tas_stats_desc[] = {
+ { "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
+ { "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
+};
+
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -360,6 +386,8 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
+#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
@@ -369,6 +397,7 @@ static const struct counter_desc sq_stats_desc[] = {
NUM_PPORT_2819_COUNTERS + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
+#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
@@ -377,6 +406,25 @@ struct mlx5e_stats {
struct mlx5e_qcounter_stats qcnt;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
+ struct mlx5e_pcie_stats pcie;
+ struct rtnl_link_stats64 vf_vport;
+};
+
+static const struct counter_desc mlx5e_pme_status_desc[] = {
+ { "module_plug", 0 },
+ { "module_unplug", 8 },
+};
+
+static const struct counter_desc mlx5e_pme_error_desc[] = {
+ { "module_pwr_budget_exd", 0 }, /* power budget exceed */
+ { "module_long_range", 8 }, /* long range for non MLNX cable */
+ { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
+ { "module_no_eeprom", 24 }, /* no eeprom/retry time out */
+ { "module_enforce_part", 32 }, /* enforce part number list */
+ { "module_unknown_id", 40 }, /* unknown identifier */
+ { "module_high_temp", 48 }, /* high temperature */
+ { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
+ { "module_unknown_status", 64 },
};
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6bb21b31cfeb..f8829b517156 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -31,6 +31,7 @@
*/
#include <net/flow_dissector.h>
+#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_skbedit.h>
@@ -40,28 +41,43 @@
#include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
+#include <net/vxlan.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
+#include "vxlan.h"
struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
+ struct list_head encap; /* flows sharing the same encap */
struct mlx5_esw_flow_attr *attr;
};
+enum {
+ MLX5_HEADER_TYPE_VXLAN = 0x0,
+ MLX5_HEADER_TYPE_NVGRE = 0x1,
+};
+
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
-static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- u32 action, u32 flow_tag)
+static struct mlx5_flow_handle *
+mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 flow_tag)
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_flow_act flow_act = {
+ .action = action,
+ .flow_tag = flow_tag,
+ .encap_id = 0,
+ };
struct mlx5_fc *counter = NULL;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
bool table_created = false;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
@@ -82,7 +98,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_PRIO,
MLX5E_TC_TABLE_NUM_ENTRIES,
MLX5E_TC_TABLE_NUM_GROUPS,
- 0);
+ 0, 0);
if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
@@ -94,9 +110,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
- action, flow_tag,
- &dest);
+ rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
if (IS_ERR(rule))
goto err_add_rule;
@@ -114,9 +128,10 @@ err_create_ft:
return rule;
}
-static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+static struct mlx5_flow_handle *
+mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
@@ -128,19 +143,39 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow) {
+ struct list_head *next = flow->encap.next;
+
+ list_del(&flow->encap);
+ if (list_empty(next)) {
+ struct mlx5_encap_entry *e;
+
+ e = list_entry(next, struct mlx5_encap_entry, flows);
+ if (e->n) {
+ mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ neigh_release(e->n);
+ }
+ hlist_del_rcu(&e->encap_hlist);
+ kfree(e);
+ }
+}
+
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_rule *rule,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
- counter = mlx5_flow_rule_counter(rule);
+ counter = mlx5_flow_rule_counter(flow->rule);
- if (esw && esw->mode == SRIOV_OFFLOADS)
- mlx5_eswitch_del_vlan_action(esw, attr);
+ mlx5_del_flow_rules(flow->rule);
- mlx5_del_flow_rule(rule);
+ if (esw && esw->mode == SRIOV_OFFLOADS) {
+ mlx5_eswitch_del_vlan_action(esw, flow->attr);
+ if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ mlx5e_detach_encap(priv, flow);
+ }
mlx5_fc_destroy(priv->mdev, counter);
@@ -150,8 +185,125 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
}
}
-static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->key);
+ struct flow_dissector_key_keyid *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+ MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
+ be32_to_cpu(mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
+ be32_to_cpu(key->keyid));
+ }
+}
+
+static int parse_tunnel_attr(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ struct flow_dissector_key_ports *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ f->key);
+ struct flow_dissector_key_ports *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ f->mask);
+
+ /* Full udp dst port must be given */
+ if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
+ return -EOPNOTSUPP;
+
+ /* udp src port isn't supported */
+ if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
+ return -EOPNOTSUPP;
+
+ if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
+ MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
+ parse_vxlan_attr(spec, f);
+ else
+ return -EOPNOTSUPP;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_dport, ntohs(mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_dport, ntohs(key->dst));
+
+ } else { /* udp dst port must be given */
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ f->mask);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(key->dst));
+ }
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+
+ /* Enforce DMAC when offloading incoming tunneled flows.
+ * Flow counters require a match on the DMAC.
+ */
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16), priv->netdev->dev_addr);
+
+ /* let software handle IP fragments */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+
+ return 0;
+}
+
+static int __parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ u8 *min_inline)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
@@ -160,6 +312,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
u16 addr_type = 0;
u8 ip_proto = 0;
+ *min_inline = MLX5_INLINE_MODE_L2;
+
if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -167,18 +321,61 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
}
+ if ((dissector_uses_key(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
+ dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
+ dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
+ dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ f->key);
+ switch (key->addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ if (parse_tunnel_attr(priv, spec, f))
+ return -EOPNOTSUPP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* In decap flow, header pointers should point to the inner
+ * headers, outer header were already set by parse_tunnel_attr
+ */
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ inner_headers);
+ }
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_CONTROL,
f->key);
+
+ struct flow_dissector_key_control *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->mask);
addr_type = key->addr_type;
+
+ if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ key->flags & FLOW_DIS_IS_FRAGMENT);
+ }
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -201,6 +398,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
key->ip_proto);
+
+ if (mask->ip_proto)
+ *min_inline = MLX5_INLINE_MODE_IP;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
@@ -271,6 +471,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&key->dst, sizeof(key->dst));
+
+ if (mask->src || mask->dst)
+ *min_inline = MLX5_INLINE_MODE_IP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
@@ -296,6 +499,10 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&key->dst, sizeof(key->dst));
+
+ if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
+ ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
+ *min_inline = MLX5_INLINE_MODE_IP;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
@@ -336,11 +543,39 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
"Only UDP and TCP transport are supported\n");
return -EINVAL;
}
+
+ if (mask->src || mask->dst)
+ *min_inline = MLX5_INLINE_MODE_TCP_UDP;
}
return 0;
}
+static int parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ u8 min_inline;
+ int err;
+
+ err = __parse_cls_flower(priv, spec, f, &min_inline);
+
+ if (!err && esw->mode == SRIOV_OFFLOADS &&
+ rep->vport != FDB_UPLINK_VPORT) {
+ if (min_inline > esw->offloads.inline_mode) {
+ netdev_warn(priv->netdev,
+ "Flow is not offloaded due to min inline setting, required %d actual %d\n",
+ min_inline, esw->offloads.inline_mode);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return err;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 *action, u32 *flow_tag)
{
@@ -387,11 +622,243 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
+static inline int cmp_encap_info(struct mlx5_encap_info *a,
+ struct mlx5_encap_info *b)
+{
+ return memcmp(a, b, sizeof(*a));
+}
+
+static inline int hash_encap_info(struct mlx5_encap_info *info)
+{
+ return jhash(info, sizeof(*info), 0);
+}
+
+static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct flowi4 *fl4,
+ struct neighbour **out_n,
+ __be32 *saddr,
+ int *out_ttl)
+{
+ struct rtable *rt;
+ struct neighbour *n = NULL;
+ int ttl;
+
+#if IS_ENABLED(CONFIG_INET)
+ rt = ip_route_output_key(dev_net(mirred_dev), fl4);
+ if (IS_ERR(rt)) {
+ pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
+ return -EOPNOTSUPP;
+ }
+#else
+ return -EOPNOTSUPP;
+#endif
+
+ if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
+ pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
+ __func__);
+ ip_rt_put(rt);
+ return -EOPNOTSUPP;
+ }
+
+ ttl = ip4_dst_hoplimit(&rt->dst);
+ n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
+ ip_rt_put(rt);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ *saddr = fl4->saddr;
+ *out_ttl = ttl;
+ *out_dev = rt->dst.dev;
+
+ return 0;
+}
+
+static int gen_vxlan_header_ipv4(struct net_device *out_dev,
+ char buf[],
+ unsigned char h_dest[ETH_ALEN],
+ int ttl,
+ __be32 daddr,
+ __be32 saddr,
+ __be16 udp_dst_port,
+ __be32 vx_vni)
+{
+ int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
+ struct ethhdr *eth = (struct ethhdr *)buf;
+ struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
+ struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
+ struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+
+ memset(buf, 0, encap_size);
+
+ ether_addr_copy(eth->h_dest, h_dest);
+ ether_addr_copy(eth->h_source, out_dev->dev_addr);
+ eth->h_proto = htons(ETH_P_IP);
+
+ ip->daddr = daddr;
+ ip->saddr = saddr;
+
+ ip->ttl = ttl;
+ ip->protocol = IPPROTO_UDP;
+ ip->version = 0x4;
+ ip->ihl = 0x5;
+
+ udp->dest = udp_dst_port;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(vx_vni);
+
+ return encap_size;
+}
+
+static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5_encap_entry *e,
+ struct net_device **out_dev)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct flowi4 fl4 = {};
+ struct neighbour *n;
+ char *encap_header;
+ int encap_size;
+ __be32 saddr;
+ int ttl;
+ int err;
+
+ encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+ if (!encap_header)
+ return -ENOMEM;
+
+ switch (e->tunnel_type) {
+ case MLX5_HEADER_TYPE_VXLAN:
+ fl4.flowi4_proto = IPPROTO_UDP;
+ fl4.fl4_dport = e->tun_info.tp_dst;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ fl4.daddr = e->tun_info.daddr;
+
+ err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
+ &fl4, &n, &saddr, &ttl);
+ if (err)
+ goto out;
+
+ e->n = n;
+ e->out_dev = *out_dev;
+
+ if (!(n->nud_state & NUD_VALID)) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+
+ neigh_ha_snapshot(e->h_dest, n, *out_dev);
+
+ switch (e->tunnel_type) {
+ case MLX5_HEADER_TYPE_VXLAN:
+ encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+ e->h_dest, ttl,
+ e->tun_info.daddr,
+ saddr, e->tun_info.tp_dst,
+ e->tun_info.tun_id);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+ encap_size, encap_header, &e->encap_id);
+out:
+ kfree(encap_header);
+ return err;
+}
+
+static int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct ip_tunnel_info *tun_info,
+ struct net_device *mirred_dev,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ unsigned short family = ip_tunnel_info_af(tun_info);
+ struct ip_tunnel_key *key = &tun_info->key;
+ struct mlx5_encap_info info;
+ struct mlx5_encap_entry *e;
+ struct net_device *out_dev;
+ uintptr_t hash_key;
+ bool found = false;
+ int tunnel_type;
+ int err;
+
+ /* udp dst port must be given */
+ if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
+ return -EOPNOTSUPP;
+
+ if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
+ MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
+ info.tp_dst = key->tp_dst;
+ info.tun_id = tunnel_id_to_key32(key->tun_id);
+ tunnel_type = MLX5_HEADER_TYPE_VXLAN;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (family) {
+ case AF_INET:
+ info.daddr = key->u.ipv4.dst;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hash_key = hash_encap_info(&info);
+
+ hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
+ encap_hlist, hash_key) {
+ if (!cmp_encap_info(&e->tun_info, &info)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ attr->encap = e;
+ return 0;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->tun_info = info;
+ e->tunnel_type = tunnel_type;
+ INIT_LIST_HEAD(&e->flows);
+
+ err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ if (err)
+ goto out_err;
+
+ attr->encap = e;
+ hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+
+ return err;
+
+out_err:
+ kfree(e);
+ return err;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5e_tc_flow *flow)
{
+ struct mlx5_esw_flow_attr *attr = flow->attr;
+ struct ip_tunnel_info *info = NULL;
const struct tc_action *a;
LIST_HEAD(actions);
+ bool encap = false;
+ int err;
if (tc_no_actions(exts))
return -EINVAL;
@@ -407,22 +874,44 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue;
}
- if (is_tcf_mirred_redirect(a)) {
+ if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
- if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
+ if (switchdev_port_same_parent_id(priv->netdev,
+ out_dev)) {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ out_priv = netdev_priv(out_dev);
+ attr->out_rep = out_priv->ppriv;
+ } else if (encap) {
+ err = mlx5e_attach_encap(priv, info,
+ out_dev, attr);
+ if (err)
+ return err;
+ list_add(&flow->encap, &attr->encap->flows);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ out_priv = netdev_priv(attr->encap->out_dev);
+ attr->out_rep = out_priv->ppriv;
+ } else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EINVAL;
}
+ continue;
+ }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- out_priv = netdev_priv(out_dev);
- attr->out_rep = out_priv->ppriv;
+ if (is_tcf_tunnel_set(a)) {
+ info = tcf_tunnel_info(a);
+ if (info)
+ encap = true;
+ else
+ return -EOPNOTSUPP;
continue;
}
@@ -439,6 +928,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue;
}
+ if (is_tcf_tunnel_release(a)) {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ continue;
+ }
+
return -EINVAL;
}
return 0;
@@ -453,25 +947,17 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
- struct mlx5_flow_rule *old = NULL;
- struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (esw && esw->mode == SRIOV_OFFLOADS)
fdb_flow = true;
- flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
- tc->ht_params);
- if (flow) {
- old = flow->rule;
- old_attr = flow->attr;
- } else {
- if (fdb_flow)
- flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
- GFP_KERNEL);
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
- }
+ if (fdb_flow)
+ flow = kzalloc(sizeof(*flow) +
+ sizeof(struct mlx5_esw_flow_attr),
+ GFP_KERNEL);
+ else
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
@@ -487,7 +973,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (fdb_flow) {
flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
- err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
+ err = parse_tc_fdb_actions(priv, f->exts, flow);
if (err < 0)
goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
@@ -508,17 +994,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err)
goto err_del_rule;
- if (old)
- mlx5e_tc_del_flow(priv, old, old_attr);
-
goto out;
err_del_rule:
- mlx5_del_flow_rule(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
err_free:
- if (!old)
- kfree(flow);
+ kfree(flow);
out:
kvfree(spec);
return err;
@@ -537,7 +1019,8 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
- mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
+ mlx5e_tc_del_flow(priv, flow);
+
kfree(flow);
@@ -594,7 +1077,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = arg;
- mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
+ mlx5e_tc_del_flow(priv, flow);
kfree(flow);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index aaca09002ca6..8ffcc8808e50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -139,6 +139,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PORT_CHANGE";
case MLX5_EVENT_TYPE_GPIO_EVENT:
return "MLX5_EVENT_TYPE_GPIO_EVENT";
+ case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
+ return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
case MLX5_EVENT_TYPE_REMOTE_CONFIG:
return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
@@ -285,6 +287,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
break;
#endif
+
+ case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
+ mlx5_port_module_event(dev, eqe);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -469,7 +476,7 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
int mlx5_start_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
- u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+ u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
if (MLX5_CAP_GEN(dev, pg))
@@ -480,6 +487,11 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
mlx5_core_is_pf(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
+ if (MLX5_CAP_GEN(dev, port_module_event))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
+ else
+ mlx5_core_dbg(dev, "port_module_event is not set\n");
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index be1f7333ab7f..d6807c3cc461 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -56,7 +56,7 @@ struct esw_uc_addr {
/* E-Switch MC FDB table hash node */
struct esw_mc_addr { /* SRIOV only */
struct l2addr_node node;
- struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
+ struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
u32 refcnt;
};
@@ -65,7 +65,7 @@ struct vport_addr {
struct l2addr_node node;
u8 action;
u32 vport;
- struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+ struct mlx5_flow_handle *flow_rule; /* SRIOV only */
/* A flag indicating that mac was added due to mc promiscuous vport */
bool mc_promisc;
};
@@ -237,13 +237,14 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
}
/* E-Switch FDB */
-static struct mlx5_flow_rule *
+static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
MLX5_MATCH_OUTER_HEADERS);
- struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_handle *flow_rule = NULL;
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
@@ -285,10 +286,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule =
- mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest);
+ mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
@@ -300,7 +301,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
return flow_rule;
}
-static struct mlx5_flow_rule *
+static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{
u8 mac_c[ETH_ALEN];
@@ -309,7 +310,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
}
-static struct mlx5_flow_rule *
+static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
{
u8 mac_c[ETH_ALEN];
@@ -322,7 +323,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
}
-static struct mlx5_flow_rule *
+static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
{
u8 mac_c[ETH_ALEN];
@@ -361,7 +362,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
+ fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
@@ -515,7 +516,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
del_l2_table_entry(esw->dev, esw_uc->table_index);
if (vaddr->flow_rule)
- mlx5_del_flow_rule(vaddr->flow_rule);
+ mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL;
l2addr_hash_del(esw_uc);
@@ -562,7 +563,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
case MLX5_ACTION_DEL:
if (!iter_vaddr)
continue;
- mlx5_del_flow_rule(iter_vaddr->flow_rule);
+ mlx5_del_flow_rules(iter_vaddr->flow_rule);
l2addr_hash_del(iter_vaddr);
break;
}
@@ -632,7 +633,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule);
if (vaddr->flow_rule)
- mlx5_del_flow_rule(vaddr->flow_rule);
+ mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL;
/* If the multicast mac is added as a result of mc promiscuous vport,
@@ -645,7 +646,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
update_allmulti_vports(esw, vaddr, esw_mc);
if (esw_mc->uplink_rule)
- mlx5_del_flow_rule(esw_mc->uplink_rule);
+ mlx5_del_flow_rules(esw_mc->uplink_rule);
l2addr_hash_del(esw_mc);
return 0;
@@ -828,14 +829,14 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
UPLINK_VPORT);
allmulti_addr->refcnt++;
} else if (vport->allmulti_rule) {
- mlx5_del_flow_rule(vport->allmulti_rule);
+ mlx5_del_flow_rules(vport->allmulti_rule);
vport->allmulti_rule = NULL;
if (--allmulti_addr->refcnt > 0)
goto promisc;
if (allmulti_addr->uplink_rule)
- mlx5_del_flow_rule(allmulti_addr->uplink_rule);
+ mlx5_del_flow_rules(allmulti_addr->uplink_rule);
allmulti_addr->uplink_rule = NULL;
}
@@ -847,7 +848,7 @@ promisc:
vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
vport_num);
} else if (vport->promisc_rule) {
- mlx5_del_flow_rule(vport->promisc_rule);
+ mlx5_del_flow_rules(vport->promisc_rule);
vport->promisc_rule = NULL;
}
}
@@ -1018,10 +1019,10 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
- mlx5_del_flow_rule(vport->egress.allowed_vlan);
+ mlx5_del_flow_rules(vport->egress.allowed_vlan);
if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
- mlx5_del_flow_rule(vport->egress.drop_rule);
+ mlx5_del_flow_rules(vport->egress.drop_rule);
vport->egress.allowed_vlan = NULL;
vport->egress.drop_rule = NULL;
@@ -1179,10 +1180,10 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
- mlx5_del_flow_rule(vport->ingress.drop_rule);
+ mlx5_del_flow_rules(vport->ingress.drop_rule);
if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
- mlx5_del_flow_rule(vport->ingress.allow_rule);
+ mlx5_del_flow_rules(vport->ingress.allow_rule);
vport->ingress.drop_rule = NULL;
vport->ingress.allow_rule = NULL;
@@ -1212,6 +1213,7 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
u8 *smac_v;
@@ -1264,10 +1266,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule =
- mlx5_add_flow_rule(vport->ingress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW,
- 0, NULL);
+ mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
@@ -1278,10 +1280,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
memset(spec, 0, sizeof(*spec));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->ingress.drop_rule =
- mlx5_add_flow_rule(vport->ingress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_DROP,
- 0, NULL);
+ mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
@@ -1301,6 +1303,7 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
@@ -1338,10 +1341,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->egress.allowed_vlan =
- mlx5_add_flow_rule(vport->egress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW,
- 0, NULL);
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
esw_warn(esw->dev,
@@ -1353,10 +1356,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->egress.drop_rule =
- mlx5_add_flow_rule(vport->egress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_DROP,
- 0, NULL);
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
@@ -1369,6 +1372,147 @@ out:
return err;
}
+/* Vport QoS management */
+static int esw_create_tsar(struct mlx5_eswitch *esw)
+{
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ struct mlx5_core_dev *dev = esw->dev;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return 0;
+
+ if (esw->qos.enabled)
+ return -EEXIST;
+
+ err = mlx5_create_scheduling_element_cmd(dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ &tsar_ctx,
+ &esw->qos.root_tsar_id);
+ if (err) {
+ esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
+ return err;
+ }
+
+ esw->qos.enabled = true;
+ return 0;
+}
+
+static void esw_destroy_tsar(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ if (!esw->qos.enabled)
+ return;
+
+ err = mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ esw->qos.root_tsar_id);
+ if (err)
+ esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
+
+ esw->qos.enabled = false;
+}
+
+static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
+ u32 initial_max_rate)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct mlx5_core_dev *dev = esw->dev;
+ void *vport_elem;
+ int err = 0;
+
+ if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
+ !MLX5_CAP_QOS(dev, esw_scheduling))
+ return 0;
+
+ if (vport->qos.enabled)
+ return -EEXIST;
+
+ MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+ vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ element_attributes);
+ MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+ MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ esw->qos.root_tsar_id);
+ MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ initial_max_rate);
+
+ err = mlx5_create_scheduling_element_cmd(dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ &sched_ctx,
+ &vport->qos.esw_tsar_ix);
+ if (err) {
+ esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
+ vport_num, err);
+ return err;
+ }
+
+ vport->qos.enabled = true;
+ return 0;
+}
+
+static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ int err = 0;
+
+ if (!vport->qos.enabled)
+ return;
+
+ err = mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ vport->qos.esw_tsar_ix);
+ if (err)
+ esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
+ vport_num, err);
+
+ vport->qos.enabled = false;
+}
+
+static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
+ u32 max_rate)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct mlx5_core_dev *dev = esw->dev;
+ void *vport_elem;
+ u32 bitmask = 0;
+ int err = 0;
+
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return -EOPNOTSUPP;
+
+ if (!vport->qos.enabled)
+ return -EIO;
+
+ MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+ vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ element_attributes);
+ MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+ MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ esw->qos.root_tsar_id);
+ MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ max_rate);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+
+ err = mlx5_modify_scheduling_element_cmd(dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ &sched_ctx,
+ vport->qos.esw_tsar_ix,
+ bitmask);
+ if (err) {
+ esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
+ vport_num, err);
+ return err;
+ }
+
+ return 0;
+}
+
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{
((u8 *)node_guid)[7] = mac[0];
@@ -1404,6 +1548,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
esw_vport_egress_config(esw, vport);
}
}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1417,6 +1562,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
+ /* Attach vport to the eswitch rate limiter */
+ if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate))
+ esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
+
/* Sync with current vport context */
vport->enabled_events = enable_events;
vport->enabled = true;
@@ -1455,7 +1604,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
-
+ esw_vport_disable_qos(esw, vport_num);
if (vport_num && esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
@@ -1501,6 +1650,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (err)
goto abort;
+ err = esw_create_tsar(esw);
+ if (err)
+ esw_warn(esw->dev, "Failed to create eswitch TSAR");
+
enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
for (i = 0; i <= nvfs; i++)
esw_enable_vport(esw, i, enabled_events);
@@ -1535,7 +1688,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_disable_vport(esw, i);
if (mc_promisc && mc_promisc->uplink_rule)
- mlx5_del_flow_rule(mc_promisc->uplink_rule);
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_tsar(esw);
if (esw->mode == SRIOV_LEGACY)
esw_destroy_legacy_fdb_table(esw);
@@ -1627,6 +1782,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->state_lock);
for (vport_num = 0; vport_num < total_vports; vport_num++) {
@@ -1642,6 +1798,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->total_vports = total_vports;
esw->enabled_vports = 0;
esw->mode = SRIOV_NONE;
+ esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
dev->priv.eswitch = esw;
return 0;
@@ -1795,6 +1952,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
+ ivi->max_tx_rate = evport->info.max_rate;
mutex_unlock(&esw->state_lock);
return 0;
@@ -1888,6 +2046,27 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
return 0;
}
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
+ int vport, u32 max_rate)
+{
+ struct mlx5_vport *evport;
+ int err = 0;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ mutex_lock(&esw->state_lock);
+ evport = &esw->vports[vport];
+ err = esw_vport_qos_config(esw, vport, max_rate);
+ if (!err)
+ evport->info.max_rate = max_rate;
+
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2e2938e08cda..8661dd3f542c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -97,16 +97,16 @@ struct vport_ingress {
struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp;
- struct mlx5_flow_rule *allow_rule;
- struct mlx5_flow_rule *drop_rule;
+ struct mlx5_flow_handle *allow_rule;
+ struct mlx5_flow_handle *drop_rule;
};
struct vport_egress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp;
- struct mlx5_flow_rule *allowed_vlan;
- struct mlx5_flow_rule *drop_rule;
+ struct mlx5_flow_handle *allowed_vlan;
+ struct mlx5_flow_handle *drop_rule;
};
struct mlx5_vport_info {
@@ -115,6 +115,7 @@ struct mlx5_vport_info {
u8 qos;
u64 node_guid;
int link_state;
+ u32 max_rate;
bool spoofchk;
bool trusted;
};
@@ -124,8 +125,8 @@ struct mlx5_vport {
int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
- struct mlx5_flow_rule *promisc_rule;
- struct mlx5_flow_rule *allmulti_rule;
+ struct mlx5_flow_handle *promisc_rule;
+ struct mlx5_flow_handle *allmulti_rule;
struct work_struct vport_change_handler;
struct vport_ingress ingress;
@@ -133,6 +134,11 @@ struct mlx5_vport {
struct mlx5_vport_info info;
+ struct {
+ bool enabled;
+ u32 esw_tsar_ix;
+ } qos;
+
bool enabled;
u16 enabled_events;
};
@@ -156,7 +162,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_table *fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
- struct mlx5_flow_rule *miss_rule;
+ struct mlx5_flow_handle *miss_rule;
int vlan_push_pop_refcount;
} offloads;
};
@@ -169,7 +175,7 @@ enum {
};
struct mlx5_esw_sq {
- struct mlx5_flow_rule *send_to_vport_rule;
+ struct mlx5_flow_handle *send_to_vport_rule;
struct list_head list;
};
@@ -180,9 +186,9 @@ struct mlx5_eswitch_rep {
struct mlx5_eswitch_rep *rep);
u16 vport;
u8 hw_id[ETH_ALEN];
- void *priv_data;
+ struct net_device *netdev;
- struct mlx5_flow_rule *vport_rx_rule;
+ struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
u16 vlan;
u32 vlan_refcount;
@@ -193,6 +199,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
+ DECLARE_HASHTABLE(encap_tbl, 8);
+ u8 inline_mode;
};
struct mlx5_eswitch {
@@ -209,6 +217,12 @@ struct mlx5_eswitch {
*/
struct mutex state_lock;
struct esw_mc_addr *mc_promisc;
+
+ struct {
+ bool enabled;
+ u32 root_tsar_id;
+ } qos;
+
struct mlx5_esw_offload offloads;
int mode;
};
@@ -234,6 +248,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk);
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting);
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
+ int vport, u32 max_rate);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -243,11 +259,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
-struct mlx5_flow_rule *
+struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr);
-struct mlx5_flow_rule *
+struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
enum {
@@ -258,6 +274,24 @@ enum {
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
+struct mlx5_encap_info {
+ __be32 daddr;
+ __be32 tun_id;
+ __be16 tp_dst;
+};
+
+struct mlx5_encap_entry {
+ struct hlist_node encap_hlist;
+ struct list_head flows;
+ u32 encap_id;
+ struct neighbour *n;
+ struct mlx5_encap_info tun_info;
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+
+ struct net_device *out_dev;
+ int tunnel_type;
+};
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
@@ -265,6 +299,7 @@ struct mlx5_esw_flow_attr {
int action;
u16 vlan;
bool vlan_handled;
+ struct mlx5_encap_entry *encap;
};
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
@@ -275,11 +310,15 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
+int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
struct mlx5_eswitch_rep *rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport_index);
+struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d239f5d0ea36..466e161010f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -43,33 +43,36 @@ enum {
FDB_SLOW_PATH
};
-struct mlx5_flow_rule *
+struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
- struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_fc *counter = NULL;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
void *misc;
- int action;
+ int i = 0;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
/* per flow vlan pop/push is emulated, don't set that into the firmware */
- action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = attr->out_rep->vport;
- action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[i].vport_num = attr->out_rep->vport;
+ i++;
+ }
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter = counter;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[i].counter = counter;
+ i++;
}
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
@@ -80,10 +83,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS;
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
- rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb,
- spec, action, 0, &dest);
+ if (attr->encap)
+ flow_act.encap_id = attr->encap->encap_id;
+ rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
+ spec, &flow_act, dest, i);
if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter);
@@ -270,11 +277,12 @@ out:
return err;
}
-static struct mlx5_flow_rule *
+static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
- struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
@@ -296,10 +304,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out:
@@ -316,7 +324,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
return;
list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
- mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
+ mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
list_del(&esw_sq->list);
kfree(esw_sq);
}
@@ -326,7 +334,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num)
{
- struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_handle *flow_rule;
struct mlx5_esw_sq *esw_sq;
int err;
int i;
@@ -362,8 +370,9 @@ out_err:
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
- struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
int err = 0;
@@ -376,10 +385,10 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
@@ -406,6 +415,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
u32 *flow_group_in;
void *match_criteria;
int table_size, ix, err = 0;
+ u32 flags = 0;
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
@@ -420,9 +430,14 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
+
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
ESW_OFFLOADS_NUM_ENTRIES,
- ESW_OFFLOADS_NUM_GROUPS, 0);
+ ESW_OFFLOADS_NUM_GROUPS, 0,
+ flags);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
@@ -431,7 +446,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.fdb = fdb;
table_size = nvports + MAX_PF_SQ + 1;
- fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0);
+ fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
@@ -502,7 +517,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
return;
esw_debug(esw->dev, "Destroy offloads FDB Table\n");
- mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
@@ -523,7 +538,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -ENOMEM;
}
- ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
+ ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
if (IS_ERR(ft_offloads)) {
err = PTR_ERR(ft_offloads);
esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
@@ -586,11 +601,12 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
}
-struct mlx5_flow_rule *
+struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
- struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
@@ -611,9 +627,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn;
- flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
@@ -641,6 +657,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
}
+ if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
+ if (mlx5_eswitch_inline_mode_get(esw,
+ num_vfs,
+ &esw->offloads.inline_mode)) {
+ esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
+ esw_warn(esw->dev, "Inline mode is different between vports\n");
+ }
+ }
return err;
}
@@ -755,6 +779,50 @@ static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
return 0;
}
+static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
+{
+ switch (mode) {
+ case DEVLINK_ESWITCH_INLINE_MODE_NONE:
+ *mlx5_mode = MLX5_INLINE_MODE_NONE;
+ break;
+ case DEVLINK_ESWITCH_INLINE_MODE_LINK:
+ *mlx5_mode = MLX5_INLINE_MODE_L2;
+ break;
+ case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
+ *mlx5_mode = MLX5_INLINE_MODE_IP;
+ break;
+ case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
+ *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
+{
+ switch (mlx5_mode) {
+ case MLX5_INLINE_MODE_NONE:
+ *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
+ break;
+ case MLX5_INLINE_MODE_L2:
+ *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
+ break;
+ case MLX5_INLINE_MODE_IP:
+ *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
+ break;
+ case MLX5_INLINE_MODE_TCP_UDP:
+ *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
{
struct mlx5_core_dev *dev;
@@ -799,6 +867,95 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int num_vports = esw->enabled_vports;
+ int err;
+ int vport;
+ u8 mlx5_mode;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+ return -EOPNOTSUPP;
+
+ err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
+ if (err)
+ goto out;
+
+ for (vport = 1; vport < num_vports; vport++) {
+ err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
+ if (err) {
+ esw_warn(dev, "Failed to set min inline on vport %d\n",
+ vport);
+ goto revert_inline_mode;
+ }
+ }
+
+ esw->offloads.inline_mode = mlx5_mode;
+ return 0;
+
+revert_inline_mode:
+ while (--vport > 0)
+ mlx5_modify_nic_vport_min_inline(dev,
+ vport,
+ esw->offloads.inline_mode);
+out:
+ return err;
+}
+
+int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+ return -EOPNOTSUPP;
+
+ return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
+}
+
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int vport;
+ u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+ return -EOPNOTSUPP;
+
+ for (vport = 1; vport <= nvfs; vport++) {
+ mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+ if (vport > 1 && prev_mlx5_mode != mlx5_mode)
+ return -EINVAL;
+ prev_mlx5_mode = mlx5_mode;
+ }
+
+ *mode = mlx5_mode;
+ return 0;
+}
+
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
struct mlx5_eswitch_rep *__rep)
@@ -813,7 +970,7 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
rep->load = __rep->load;
rep->unload = __rep->unload;
rep->vport = __rep->vport;
- rep->priv_data = __rep->priv_data;
+ rep->netdev = __rep->netdev;
ether_addr_copy(rep->hw_id, __rep->hw_id);
INIT_LIST_HEAD(&rep->vport_sqs_list);
@@ -833,3 +990,13 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep->valid = false;
}
+
+struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+{
+#define UPLINK_REP_INDEX 0
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[UPLINK_REP_INDEX];
+ return rep->netdev;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 113c32326333..c4478ecd8056 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -37,6 +37,7 @@
#include "fs_core.h"
#include "fs_cmd.h"
#include "mlx5_core.h"
+#include "eswitch.h"
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
@@ -61,8 +62,9 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
- *next_ft, unsigned int *table_id)
+ *next_ft, unsigned int *table_id, u32 flags)
{
+ int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err;
@@ -78,6 +80,9 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
+ MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
+ MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
+
switch (op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
@@ -243,6 +248,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
+ MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
@@ -453,27 +459,32 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*bytes = MLX5_GET64(traffic_counter, stats, octets);
}
-#define MAX_ENCAP_SIZE (128)
-
-int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
- int header_type,
- size_t size,
- void *encap_header,
- u32 *encap_id)
+int mlx5_encap_alloc(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id)
{
+ int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
- u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
- (MAX_ENCAP_SIZE / sizeof(u32))];
- void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
- encap_header);
- void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
- encap_header);
- int inlen = header - (void *)in + size;
+ void *encap_header_in;
+ void *header;
+ int inlen;
int err;
+ u32 *in;
- if (size > MAX_ENCAP_SIZE)
+ if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
return -EINVAL;
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
+ GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
+ header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
+ inlen = header - (void *)in + size;
+
memset(in, 0, inlen);
MLX5_SET(alloc_encap_header_in, in, opcode,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
@@ -485,10 +496,11 @@ int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
+ kfree(in);
return err;
}
-void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
+void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index c5bc4686c832..8fad80688536 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -38,7 +38,7 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
- *next_ft, unsigned int *table_id);
+ *next_ft, unsigned int *table_id, u32 flags);
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft);
@@ -89,11 +89,4 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u16 id,
u64 *packets, u64 *bytes);
-int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
- int header_type,
- size_t size,
- void *encap_header,
- u32 *encap_id);
-void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id);
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 914e5466f729..a263d8904a4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -153,6 +153,11 @@ static void del_rule(struct fs_node *node);
static void del_flow_table(struct fs_node *node);
static void del_flow_group(struct fs_node *node);
static void del_fte(struct fs_node *node);
+static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ struct mlx5_flow_destination *d2);
+static struct mlx5_flow_rule *
+find_flow_rule(struct fs_fte *fte,
+ struct mlx5_flow_destination *dest);
static void tree_init_node(struct fs_node *node,
unsigned int refcount,
@@ -369,6 +374,7 @@ static void del_rule(struct fs_node *node)
struct mlx5_core_dev *dev = get_dev(node);
int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
int err;
+ bool update_fte = false;
match_value = mlx5_vzalloc(match_len);
if (!match_value) {
@@ -387,13 +393,23 @@ static void del_rule(struct fs_node *node)
list_del(&rule->next_ft);
mutex_unlock(&rule->dest_attr.ft->lock);
}
+
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
+ --fte->dests_size) {
+ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ update_fte = true;
+ goto out;
+ }
+
if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
- err = mlx5_cmd_update_fte(dev, ft,
- fg->id,
- modify_mask,
- fte);
+ update_fte = true;
+ }
+out:
+ if (update_fte && fte->dests_size) {
+ err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
@@ -444,8 +460,7 @@ static void del_flow_group(struct fs_node *node)
fg->id, ft->id);
}
-static struct fs_fte *alloc_fte(u8 action,
- u32 flow_tag,
+static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
u32 *match_value,
unsigned int index)
{
@@ -457,9 +472,10 @@ static struct fs_fte *alloc_fte(u8 action,
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
- fte->flow_tag = flow_tag;
+ fte->flow_tag = flow_act->flow_tag;
fte->index = index;
- fte->action = action;
+ fte->action = flow_act->action;
+ fte->encap_id = flow_act->encap_id;
return fte;
}
@@ -489,7 +505,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
enum fs_flow_table_type table_type,
- enum fs_flow_table_op_mod op_mod)
+ enum fs_flow_table_op_mod op_mod,
+ u32 flags)
{
struct mlx5_flow_table *ft;
@@ -503,6 +520,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->type = table_type;
ft->vport = vport;
ft->max_fte = max_fte;
+ ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -641,8 +659,8 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
-int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
- struct mlx5_flow_destination *dest)
+static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest)
{
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
@@ -667,6 +685,28 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
return err;
}
+int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
+ struct mlx5_flow_destination *new_dest,
+ struct mlx5_flow_destination *old_dest)
+{
+ int i;
+
+ if (!old_dest) {
+ if (handle->num_rules != 1)
+ return -EINVAL;
+ return _mlx5_modify_rule_destination(handle->rule[0],
+ new_dest);
+ }
+
+ for (i = 0; i < handle->num_rules; i++) {
+ if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
+ return _mlx5_modify_rule_destination(handle->rule[i],
+ new_dest);
+ }
+
+ return -EINVAL;
+}
+
/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
static int connect_fwd_rules(struct mlx5_core_dev *dev,
struct mlx5_flow_table *new_next_ft,
@@ -689,7 +729,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
mutex_unlock(&old_next_ft->lock);
list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
- err = mlx5_modify_rule_destination(iter, &dest);
+ err = _mlx5_modify_rule_destination(iter, &dest);
if (err)
pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
new_next_ft->id);
@@ -739,7 +779,8 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
enum fs_flow_table_op_mod op_mod,
u16 vport, int prio,
- int max_fte, u32 level)
+ int max_fte, u32 level,
+ u32 flags)
{
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_table *ft;
@@ -772,7 +813,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
vport,
max_fte ? roundup_pow_of_two(max_fte) : 0,
root->table_type,
- op_mod);
+ op_mod, flags);
if (!ft) {
err = -ENOMEM;
goto unlock_root;
@@ -782,7 +823,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
- ft->level, log_table_sz, next_ft, &ft->id);
+ ft->level, log_table_sz, next_ft, &ft->id,
+ ft->flags);
if (err)
goto free_ft;
@@ -807,10 +849,11 @@ unlock_root:
struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
- u32 level)
+ u32 level,
+ u32 flags)
{
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
- max_fte, level);
+ max_fte, level, flags);
}
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
@@ -818,7 +861,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace
u32 level, u16 vport)
{
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
- max_fte, level);
+ max_fte, level, 0);
}
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
@@ -826,7 +869,7 @@ struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
int prio, u32 level)
{
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
- level);
+ level, 0);
}
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
@@ -834,14 +877,15 @@ struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_nam
int prio,
int num_flow_table_entries,
int max_num_groups,
- u32 level)
+ u32 level,
+ u32 flags)
{
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL);
- ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
+ ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags);
if (IS_ERR(ft))
return ft;
@@ -918,55 +962,133 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
return rule;
}
-/* fte should not be deleted while calling this function */
-static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
- struct mlx5_flow_group *fg,
- struct mlx5_flow_destination *dest)
+static struct mlx5_flow_handle *alloc_handle(int num_rules)
+{
+ struct mlx5_flow_handle *handle;
+
+ handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) *
+ num_rules, GFP_KERNEL);
+ if (!handle)
+ return NULL;
+
+ handle->num_rules = num_rules;
+
+ return handle;
+}
+
+static void destroy_flow_handle(struct fs_fte *fte,
+ struct mlx5_flow_handle *handle,
+ struct mlx5_flow_destination *dest,
+ int i)
+{
+ for (; --i >= 0;) {
+ if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
+ fte->dests_size--;
+ list_del(&handle->rule[i]->node.list);
+ kfree(handle->rule[i]);
+ }
+ }
+ kfree(handle);
+}
+
+static struct mlx5_flow_handle *
+create_flow_handle(struct fs_fte *fte,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ int *modify_mask,
+ bool *new_rule)
{
+ struct mlx5_flow_handle *handle;
+ struct mlx5_flow_rule *rule = NULL;
+ static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ int type;
+ int i = 0;
+
+ handle = alloc_handle((dest_num) ? dest_num : 1);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ if (dest) {
+ rule = find_flow_rule(fte, dest + i);
+ if (rule) {
+ atomic_inc(&rule->node.refcount);
+ goto rule_found;
+ }
+ }
+
+ *new_rule = true;
+ rule = alloc_rule(dest + i);
+ if (!rule)
+ goto free_rules;
+
+ /* Add dest to dests list- we need flow tables to be in the
+ * end of the list for forward to next prio rules.
+ */
+ tree_init_node(&rule->node, 1, del_rule);
+ if (dest &&
+ dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ list_add(&rule->node.list, &fte->node.children);
+ else
+ list_add_tail(&rule->node.list, &fte->node.children);
+ if (dest) {
+ fte->dests_size++;
+
+ type = dest[i].type ==
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ *modify_mask |= type ? count : dst;
+ }
+rule_found:
+ handle->rule[i] = rule;
+ } while (++i < dest_num);
+
+ return handle;
+
+free_rules:
+ destroy_flow_handle(fte, handle, dest, i);
+ return ERR_PTR(-ENOMEM);
+}
+
+/* fte should not be deleted while calling this function */
+static struct mlx5_flow_handle *
+add_rule_fte(struct fs_fte *fte,
+ struct mlx5_flow_group *fg,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ bool update_action)
+{
+ struct mlx5_flow_handle *handle;
struct mlx5_flow_table *ft;
- struct mlx5_flow_rule *rule;
int modify_mask = 0;
int err;
+ bool new_rule = false;
- rule = alloc_rule(dest);
- if (!rule)
- return ERR_PTR(-ENOMEM);
+ handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
+ &new_rule);
+ if (IS_ERR(handle) || !new_rule)
+ goto out;
- fs_get_obj(ft, fg->node.parent);
- /* Add dest to dests list- we need flow tables to be in the
- * end of the list for forward to next prio rules.
- */
- tree_init_node(&rule->node, 1, del_rule);
- if (dest && dest->type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
- list_add(&rule->node.list, &fte->node.children);
- else
- list_add_tail(&rule->node.list, &fte->node.children);
- if (dest) {
- fte->dests_size++;
+ if (update_action)
+ modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
- modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ?
- BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) :
- BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
- }
-
- if (fte->dests_size == 1 || !dest)
+ fs_get_obj(ft, fg->node.parent);
+ if (!(fte->status & FS_FTE_STATUS_EXISTING))
err = mlx5_cmd_create_fte(get_dev(&ft->node),
ft, fg->id, fte);
else
err = mlx5_cmd_update_fte(get_dev(&ft->node),
ft, fg->id, modify_mask, fte);
if (err)
- goto free_rule;
+ goto free_handle;
fte->status |= FS_FTE_STATUS_EXISTING;
- return rule;
+out:
+ return handle;
-free_rule:
- list_del(&rule->node.list);
- kfree(rule);
- if (dest)
- fte->dests_size--;
+free_handle:
+ destroy_flow_handle(fte, handle, dest, handle->num_rules);
return ERR_PTR(err);
}
@@ -995,15 +1117,14 @@ static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
/* prev is output, prev->next = new_fte */
static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
u32 *match_value,
- u8 action,
- u32 flow_tag,
+ struct mlx5_flow_act *flow_act,
struct list_head **prev)
{
struct fs_fte *fte;
int index;
index = get_free_fte_index(fg, prev);
- fte = alloc_fte(action, flow_tag, match_value, index);
+ fte = alloc_fte(flow_act, match_value, index);
if (IS_ERR(fte))
return fte;
@@ -1067,71 +1188,81 @@ out:
return fg;
}
+static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ struct mlx5_flow_destination *d2)
+{
+ if (d1->type == d2->type) {
+ if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ d1->vport_num == d2->vport_num) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ d1->ft == d2->ft) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ d1->tir_num == d2->tir_num))
+ return true;
+ }
+
+ return false;
+}
+
static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_rule *rule;
list_for_each_entry(rule, &fte->node.children, node.list) {
- if (rule->dest_attr.type == dest->type) {
- if ((dest->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
- dest->vport_num == rule->dest_attr.vport_num) ||
- (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
- dest->ft == rule->dest_attr.ft) ||
- (dest->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
- dest->tir_num == rule->dest_attr.tir_num))
- return rule;
- }
+ if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
+ return rule;
}
return NULL;
}
-static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
- u32 *match_value,
- u8 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest)
+static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
+ u32 *match_value,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
{
- struct fs_fte *fte;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *handle;
struct mlx5_flow_table *ft;
struct list_head *prev;
+ struct fs_fte *fte;
+ int i;
nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
fs_for_each_fte(fte, fg) {
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
if (compare_match_value(&fg->mask, match_value, &fte->val) &&
- action == fte->action && flow_tag == fte->flow_tag) {
- rule = find_flow_rule(fte, dest);
- if (rule) {
- atomic_inc(&rule->node.refcount);
- unlock_ref_node(&fte->node);
- unlock_ref_node(&fg->node);
- return rule;
+ (flow_act->action & fte->action) &&
+ flow_act->flow_tag == fte->flow_tag) {
+ int old_action = fte->action;
+
+ fte->action |= flow_act->action;
+ handle = add_rule_fte(fte, fg, dest, dest_num,
+ old_action != flow_act->action);
+ if (IS_ERR(handle)) {
+ fte->action = old_action;
+ goto unlock_fte;
+ } else {
+ goto add_rules;
}
- rule = add_rule_fte(fte, fg, dest);
- unlock_ref_node(&fte->node);
- if (IS_ERR(rule))
- goto unlock_fg;
- else
- goto add_rule;
}
unlock_ref_node(&fte->node);
}
fs_get_obj(ft, fg->node.parent);
if (fg->num_ftes >= fg->max_ftes) {
- rule = ERR_PTR(-ENOSPC);
+ handle = ERR_PTR(-ENOSPC);
goto unlock_fg;
}
- fte = create_fte(fg, match_value, action, flow_tag, &prev);
+ fte = create_fte(fg, match_value, flow_act, &prev);
if (IS_ERR(fte)) {
- rule = (void *)fte;
+ handle = (void *)fte;
goto unlock_fg;
}
tree_init_node(&fte->node, 0, del_fte);
- rule = add_rule_fte(fte, fg, dest);
- if (IS_ERR(rule)) {
+ nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
+ handle = add_rule_fte(fte, fg, dest, dest_num, false);
+ if (IS_ERR(handle)) {
kfree(fte);
goto unlock_fg;
}
@@ -1140,19 +1271,24 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
tree_add_node(&fte->node, &fg->node);
list_add(&fte->node.list, prev);
-add_rule:
- tree_add_node(&rule->node, &fte->node);
+add_rules:
+ for (i = 0; i < handle->num_rules; i++) {
+ if (atomic_read(&handle->rule[i]->node.refcount) == 1)
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ }
+unlock_fte:
+ unlock_ref_node(&fte->node);
unlock_fg:
unlock_ref_node(&fg->node);
- return rule;
+ return handle;
}
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule)
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_rule *dst;
struct fs_fte *fte;
- fs_get_obj(fte, rule->node.parent);
+ fs_get_obj(fte, handle->rule[0]->node.parent);
fs_for_each_dst(dst, fte) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
@@ -1170,8 +1306,8 @@ static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
if (!counter)
return false;
- /* Hardware support counter for a drop action only */
- return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT);
+ return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
}
static bool dest_is_valid(struct mlx5_flow_destination *dest,
@@ -1191,18 +1327,22 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
}
-static struct mlx5_flow_rule *
-_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- u32 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest)
+static struct mlx5_flow_handle *
+_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
+
{
struct mlx5_flow_group *g;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
+ int i;
- if (!dest_is_valid(dest, action, ft))
- return ERR_PTR(-EINVAL);
+ for (i = 0; i < dest_num; i++) {
+ if (!dest_is_valid(&dest[i], flow_act->action, ft))
+ return ERR_PTR(-EINVAL);
+ }
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
fs_for_each_fg(g, ft)
@@ -1211,7 +1351,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
g->mask.match_criteria,
spec->match_criteria)) {
rule = add_rule_fg(g, spec->match_value,
- action, flow_tag, dest);
+ flow_act, dest, dest_num);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock;
}
@@ -1223,8 +1363,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
goto unlock;
}
- rule = add_rule_fg(g, spec->match_value,
- action, flow_tag, dest);
+ rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0.
@@ -1245,22 +1384,22 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
}
-struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- u32 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest)
+struct mlx5_flow_handle *
+mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_destination gen_dest;
struct mlx5_flow_table *next_ft = NULL;
- struct mlx5_flow_rule *rule = NULL;
- u32 sw_action = action;
+ struct mlx5_flow_handle *handle = NULL;
+ u32 sw_action = flow_act->action;
struct fs_prio *prio;
fs_get_obj(prio, ft->node.parent);
- if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+ if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
if (dest)
@@ -1271,34 +1410,40 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
gen_dest.ft = next_ft;
dest = &gen_dest;
- action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest_num = 1;
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else {
mutex_unlock(&root->chain_lock);
return ERR_PTR(-EOPNOTSUPP);
}
}
- rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
- if (!IS_ERR_OR_NULL(rule) &&
- (list_empty(&rule->next_ft))) {
+ if (!IS_ERR_OR_NULL(handle) &&
+ (list_empty(&handle->rule[0]->next_ft))) {
mutex_lock(&next_ft->lock);
- list_add(&rule->next_ft, &next_ft->fwd_rules);
+ list_add(&handle->rule[0]->next_ft,
+ &next_ft->fwd_rules);
mutex_unlock(&next_ft->lock);
- rule->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
}
mutex_unlock(&root->chain_lock);
}
- return rule;
+ return handle;
}
-EXPORT_SYMBOL(mlx5_add_flow_rule);
+EXPORT_SYMBOL(mlx5_add_flow_rules);
-void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
+void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
{
- tree_remove_node(&rule->node);
+ int i;
+
+ for (i = handle->num_rules - 1; i >= 0; i--)
+ tree_remove_node(&handle->rule[i]->node);
+ kfree(handle);
}
-EXPORT_SYMBOL(mlx5_del_flow_rule);
+EXPORT_SYMBOL(mlx5_del_flow_rules);
/* Assuming prio->node.children(flow tables) is sorted by level */
static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
@@ -1678,7 +1823,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns)
return -EINVAL;
- ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
+ ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
if (IS_ERR(ft)) {
mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 71ff03bceabb..8e668c63f69e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -94,6 +94,11 @@ struct mlx5_flow_rule {
u32 sw_action;
};
+struct mlx5_flow_handle {
+ int num_rules;
+ struct mlx5_flow_rule *rule[];
+};
+
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
@@ -112,6 +117,7 @@ struct mlx5_flow_table {
struct mutex lock;
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
+ u32 flags;
};
struct mlx5_fc_cache {
@@ -145,6 +151,7 @@ struct fs_fte {
u32 flow_tag;
u32 index;
u32 action;
+ u32 encap_id;
enum fs_fte_status status;
struct mlx5_fc *counter;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3b026c151cf2..7431f633de31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -75,7 +75,7 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
struct rb_node *parent = NULL;
while (*new) {
- struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
+ struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
int result = counter->id - this->id;
parent = *new;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ada24e103b02..7b4c339a8a9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -174,6 +174,41 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
return err;
}
+static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
+{
+ int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
+ driver_version);
+ u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
+ u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
+ int remaining_size = driver_ver_sz;
+ char *string;
+
+ if (!MLX5_CAP_GEN(dev, driver_version))
+ return;
+
+ string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
+
+ strncpy(string, "Linux", remaining_size);
+
+ remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+ strncat(string, ",", remaining_size);
+
+ remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+ strncat(string, DRIVER_NAME, remaining_size);
+
+ remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+ strncat(string, ",", remaining_size);
+
+ remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+ strncat(string, DRIVER_VERSION, remaining_size);
+
+ /*Send the command*/
+ MLX5_SET(set_driver_version_in, in, opcode,
+ MLX5_CMD_OP_SET_DRIVER_VERSION);
+
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
static int set_dma_caps(struct pci_dev *pdev)
{
int err;
@@ -1017,6 +1052,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_pagealloc_stop;
}
+ mlx5_set_driver_version(dev);
+
mlx5_start_health_poll(dev);
err = mlx5_query_hca_caps(dev);
@@ -1204,6 +1241,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_CORE_EN
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
.eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
+ .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
+ .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
#endif
};
@@ -1419,6 +1458,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
+ { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 63b9a0dba885..e0a8fbdd1446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -86,6 +86,7 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
+void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
@@ -97,6 +98,13 @@ int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+ void *context, u32 *element_id);
+int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+ void *context, u32 element_id,
+ u32 modify_bitmask);
+int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+ u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
@@ -119,6 +127,12 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
+int mlx5_encap_alloc(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id);
+void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 34e7184e23c9..d2ec9d232a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -548,6 +548,26 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev)
return num_tc - 1;
}
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(dcbx_param)] = {0};
+
+ MLX5_SET(dcbx_param, in, port_number, 1);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(in), MLX5_REG_DCBX_PARAM, 0, 0);
+}
+
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in)
+{
+ u32 out[MLX5_ST_SZ_DW(dcbx_param)];
+
+ MLX5_SET(dcbx_param, in, port_number, 1);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(out), out,
+ sizeof(out), MLX5_REG_DCBX_PARAM, 0, 1);
+}
+
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
{
u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0};
@@ -572,6 +592,28 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc)
+{
+ u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(qtct_reg, in, port_number, 1);
+ MLX5_SET(qtct_reg, in, prio, prio);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QTCT, 0, 0);
+ if (!err)
+ *tc = MLX5_GET(qtct_reg, out, tclass);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
+
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
{
@@ -625,6 +667,27 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct)
+{
+ u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+ void *ets_tcn_conf;
+ int err;
+
+ err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
+ tc_configuration[tc]);
+
+ *bw_pct = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+ bw_allocation);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
+
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_units)
@@ -746,3 +809,60 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
*supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
}
+
+static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
+ "Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */
+ "Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */
+ "Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */
+};
+
+static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
+ "Power budget exceeded",
+ "Long Range for non MLNX cable",
+ "Bus stuck(I2C or data shorted)",
+ "No EEPROM/retry timeout",
+ "Enforce part number list",
+ "Unknown identifier",
+ "High Temperature",
+ "Bad or shorted cable/module",
+ "Unknown status",
+};
+
+void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+ enum port_module_event_status_type module_status;
+ enum port_module_event_error_type error_type;
+ struct mlx5_eqe_port_module *module_event_eqe;
+ struct mlx5_priv *priv = &dev->priv;
+ u8 module_num;
+
+ module_event_eqe = &eqe->data.port_module;
+ module_num = module_event_eqe->module;
+ module_status = module_event_eqe->module_status &
+ PORT_MODULE_EVENT_MODULE_STATUS_MASK;
+ error_type = module_event_eqe->error_type &
+ PORT_MODULE_EVENT_ERROR_TYPE_MASK;
+
+ if (module_status < MLX5_MODULE_STATUS_ERROR) {
+ priv->pme_stats.status_counters[module_status - 1]++;
+ } else if (module_status == MLX5_MODULE_STATUS_ERROR) {
+ if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
+ /* Unknown error type */
+ error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
+ priv->pme_stats.error_counters[error_type]++;
+ }
+
+ if (!printk_ratelimit())
+ return;
+
+ if (module_status < MLX5_MODULE_STATUS_ERROR)
+ mlx5_core_info(dev,
+ "Port module event: module %u, %s\n",
+ module_num, mlx5_pme_status[module_status - 1]);
+
+ else if (module_status == MLX5_MODULE_STATUS_ERROR)
+ mlx5_core_info(dev,
+ "Port module event[error]: module %u, %s, %s\n",
+ module_num, mlx5_pme_status[module_status - 1],
+ mlx5_pme_error[error_type]);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 104902a93a0b..e651e4c02867 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -36,6 +36,71 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+/* Scheduling element fw management */
+int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+ void *ctx, u32 *element_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+ void *schedc;
+ int err;
+
+ schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
+ scheduling_context);
+ MLX5_SET(create_scheduling_element_in, in, opcode,
+ MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
+ MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
+ hierarchy);
+ memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *element_id = MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ return 0;
+}
+
+int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+ void *ctx, u32 element_id,
+ u32 modify_bitmask)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+ void *schedc;
+
+ schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
+ scheduling_context);
+ MLX5_SET(modify_scheduling_element_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
+ MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
+ element_id);
+ MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
+ modify_bitmask);
+ MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
+ hierarchy);
+ memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+ u32 element_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+
+ MLX5_SET(destroy_scheduling_element_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+ MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
+ element_id);
+ MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
+ hierarchy);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
/* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 525f17af108e..269e4401c342 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -113,15 +113,17 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
}
-void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
- u8 *min_inline_mode)
+int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *min_inline)
{
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+ int err;
- mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out));
-
- *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out,
- nic_vport_context.min_wqe_inline_mode);
+ err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
+ if (!err)
+ *min_inline = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.min_wqe_inline_mode);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 821a087c7ae2..921673c42bc9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -101,13 +101,15 @@ err_db_free:
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
- struct mlx5_wq_ctrl *wq_ctrl)
+ struct mlx5_frag_wq_ctrl *wq_ctrl)
{
int err;
- wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
- wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
- wq->sz_m1 = (1 << wq->log_sz) - 1;
+ wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ wq->sz_m1 = (1 << wq->log_sz) - 1;
+ wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
+ wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -115,14 +117,16 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
- err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
- &wq_ctrl->buf, param->buf_numa_node);
+ err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ &wq_ctrl->frag_buf,
+ param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
+ err);
goto err_db_free;
}
- wq->buf = wq_ctrl->buf.direct.buf;
+ wq->frag_buf = wq_ctrl->frag_buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@@ -184,3 +188,9 @@ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
}
+
+void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
+{
+ mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
+ mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 6c2a8f95093c..d8afed898c31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -47,6 +47,12 @@ struct mlx5_wq_ctrl {
struct mlx5_db db;
};
+struct mlx5_frag_wq_ctrl {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_frag_buf frag_buf;
+ struct mlx5_db db;
+};
+
struct mlx5_wq_cyc {
void *buf;
__be32 *db;
@@ -55,12 +61,14 @@ struct mlx5_wq_cyc {
};
struct mlx5_cqwq {
- void *buf;
+ struct mlx5_frag_buf frag_buf;
__be32 *db;
u32 sz_m1;
+ u32 frag_sz_m1;
u32 cc; /* consumer counter */
u8 log_sz;
u8 log_stride;
+ u8 log_frag_strides;
};
struct mlx5_wq_ll {
@@ -81,7 +89,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
- struct mlx5_wq_ctrl *wq_ctrl);
+ struct mlx5_frag_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
@@ -90,6 +98,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{
@@ -116,7 +125,10 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{
- return wq->buf + (ix << wq->log_stride);
+ unsigned int frag = (ix >> wq->log_frag_strides);
+
+ return wq->frag_buf.frags[frag].buf +
+ ((wq->frag_sz_m1 & ix) << wq->log_stride);
}
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 5989f7cb5462..16f44b9aa076 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -19,6 +19,15 @@ config MLXSW_CORE_HWMON
---help---
Say Y here if you want to expose HWMON interface on mlxsw devices.
+config MLXSW_CORE_THERMAL
+ bool "Thermal zone support for Mellanox Technologies Switch ASICs"
+ depends on MLXSW_CORE && THERMAL
+ depends on !(MLXSW_CORE=y && THERMAL=m)
+ default y
+ ---help---
+ Say Y here if you want to automatically control fans speed according
+ ambient temperature reported by ASIC.
+
config MLXSW_PCI
tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
@@ -29,9 +38,30 @@ config MLXSW_PCI
To compile this driver as a module, choose M here: the
module will be called mlxsw_pci.
+config MLXSW_I2C
+ tristate "I2C bus implementation for Mellanox Technologies Switch ASICs"
+ depends on I2C && MLXSW_CORE
+ default m
+ ---help---
+ This is I2C bus implementation for Mellanox Technologies Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_i2c.
+
+config MLXSW_SWITCHIB
+ tristate "Mellanox Technologies SwitchIB and SwitchIB-2 support"
+ depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies SwitchIB and SwitchIB-2
+ Infiniband Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_switchib.
+
config MLXSW_SWITCHX2
tristate "Mellanox Technologies SwitchX-2 support"
- depends on MLXSW_CORE && NET_SWITCHDEV
+ depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV
default m
---help---
This driver supports Mellanox Technologies SwitchX-2 Ethernet
@@ -42,7 +72,7 @@ config MLXSW_SWITCHX2
config MLXSW_SPECTRUM
tristate "Mellanox Technologies Spectrum support"
- depends on MLXSW_CORE && NET_SWITCHDEV && VLAN_8021Q
+ depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
@@ -58,3 +88,14 @@ config MLXSW_SPECTRUM_DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the
driver.
+
+config MLXSW_MINIMAL
+ tristate "Mellanox Technologies minimal I2C support"
+ depends on MLXSW_CORE && MLXSW_I2C
+ default m
+ ---help---
+ This driver supports I2C access for Mellanox Technologies Switch
+ ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_minimal.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index d20ae1838a64..fe8dadba15ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,8 +1,13 @@
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
+mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
mlxsw_pci-objs := pci.o
+obj-$(CONFIG_MLXSW_I2C) += mlxsw_i2c.o
+mlxsw_i2c-objs := i2c.o
+obj-$(CONFIG_MLXSW_SWITCHIB) += mlxsw_switchib.o
+mlxsw_switchib-objs := switchib.o
obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
@@ -10,3 +15,5 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
+obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
+mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 28271bedd957..56e19b0d2f8f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -513,6 +513,11 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
* are no more sources in the table, will return resource id 0xFFF to indicate
* it.
*/
+
+#define MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID 0xffff
+#define MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES 100
+#define MLXSW_CMD_QUERY_RESOURCES_PER_QUERY 32
+
static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core,
char *out_mbox, int index)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index aa33d58b9f81..57a98849551b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -67,6 +67,7 @@
#include "trap.h"
#include "emad.h"
#include "reg.h"
+#include "resources.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -76,6 +77,7 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core";
static struct dentry *mlxsw_core_dbg_root;
static struct workqueue_struct *mlxsw_wq;
+static struct workqueue_struct *mlxsw_owq;
struct mlxsw_core_pcpu_stats {
u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
@@ -89,6 +91,23 @@ struct mlxsw_core_pcpu_stats {
u32 port_rx_invalid;
};
+struct mlxsw_core_port {
+ struct devlink_port devlink_port;
+ void *port_driver_priv;
+ u8 local_port;
+};
+
+void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
+{
+ return mlxsw_core_port->port_driver_priv;
+}
+EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
+
+static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
+{
+ return mlxsw_core_port->port_driver_priv != NULL;
+}
+
struct mlxsw_core {
struct mlxsw_driver *driver;
const struct mlxsw_bus *bus;
@@ -111,8 +130,10 @@ struct mlxsw_core {
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
- struct mlxsw_resources resources;
+ struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
+ struct mlxsw_thermal *thermal;
+ struct mlxsw_core_port ports[MLXSW_PORT_MAX_PORTS];
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -552,33 +573,18 @@ free_skb:
dev_kfree_skb(skb);
}
-static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
- .func = mlxsw_emad_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_ETHEMAD,
-};
-
-static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
-{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
- int err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
- MLXSW_TRAP_ID_ETHEMAD);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
-}
+static const struct mlxsw_listener mlxsw_emad_rx_listener =
+ MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
+ EMAD, DISCARD);
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
u64 tid;
int err;
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
/* Set the upper 32 bits of the transaction ID field to a random
* number. This allows us to discard EMADs addressed to other
* devices.
@@ -590,39 +596,33 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
spin_lock_init(&mlxsw_core->emad.trans_list_lock);
- err = mlxsw_core_rx_listener_register(mlxsw_core,
- &mlxsw_emad_rx_listener,
- mlxsw_core);
+ err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
+ mlxsw_core);
if (err)
return err;
- err = mlxsw_emad_traps_set(mlxsw_core);
+ err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
if (err)
goto err_emad_trap_set;
-
mlxsw_core->emad.use_emad = true;
return 0;
err_emad_trap_set:
- mlxsw_core_rx_listener_unregister(mlxsw_core,
- &mlxsw_emad_rx_listener,
- mlxsw_core);
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
+ mlxsw_core);
return err;
}
static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
{
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
- mlxsw_core->emad.use_emad = false;
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
- MLXSW_TRAP_ID_ETHEMAD);
- mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return;
- mlxsw_core_rx_listener_unregister(mlxsw_core,
- &mlxsw_emad_rx_listener,
- mlxsw_core);
+ mlxsw_core->emad.use_emad = false;
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
+ mlxsw_core);
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
@@ -822,17 +822,6 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
spin_lock(&mlxsw_core_driver_list_lock);
mlxsw_driver = __driver_find(kind);
- if (!mlxsw_driver) {
- spin_unlock(&mlxsw_core_driver_list_lock);
- request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
- spin_lock(&mlxsw_core_driver_list_lock);
- mlxsw_driver = __driver_find(kind);
- }
- if (mlxsw_driver) {
- if (!try_module_get(mlxsw_driver->owner))
- mlxsw_driver = NULL;
- }
-
spin_unlock(&mlxsw_core_driver_list_lock);
return mlxsw_driver;
}
@@ -844,9 +833,6 @@ static void mlxsw_core_driver_put(const char *kind)
spin_lock(&mlxsw_core_driver_list_lock);
mlxsw_driver = __driver_find(kind);
spin_unlock(&mlxsw_core_driver_list_lock);
- if (!mlxsw_driver)
- return;
- module_put(mlxsw_driver->owner);
}
static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
@@ -933,6 +919,21 @@ static void *__dl_port(struct devlink_port *devlink_port)
return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
}
+static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->port_type_set)
+ return -EOPNOTSUPP;
+
+ return mlxsw_driver->port_type_set(mlxsw_core,
+ mlxsw_core_port->local_port,
+ port_type);
+}
+
static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
@@ -941,7 +942,8 @@ static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
- if (!mlxsw_driver->sb_port_pool_get)
+ if (!mlxsw_driver->sb_port_pool_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
pool_index, p_threshold);
@@ -955,7 +957,8 @@ static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
- if (!mlxsw_driver->sb_port_pool_set)
+ if (!mlxsw_driver->sb_port_pool_set ||
+ !mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
pool_index, threshold);
@@ -971,7 +974,8 @@ mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
- if (!mlxsw_driver->sb_tc_pool_bind_get)
+ if (!mlxsw_driver->sb_tc_pool_bind_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
tc_index, pool_type,
@@ -988,7 +992,8 @@ mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
- if (!mlxsw_driver->sb_tc_pool_bind_set)
+ if (!mlxsw_driver->sb_tc_pool_bind_set ||
+ !mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
tc_index, pool_type,
@@ -1026,7 +1031,8 @@ mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
- if (!mlxsw_driver->sb_occ_port_pool_get)
+ if (!mlxsw_driver->sb_occ_port_pool_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
pool_index, p_cur, p_max);
@@ -1042,7 +1048,8 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
- if (!mlxsw_driver->sb_occ_tc_port_bind_get)
+ if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
sb_index, tc_index,
@@ -1050,6 +1057,7 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
}
static const struct devlink_ops mlxsw_devlink_ops = {
+ .port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
.sb_pool_get = mlxsw_devlink_sb_pool_get,
@@ -1101,14 +1109,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->resources);
+ &mlxsw_core->res);
if (err)
goto err_bus_init;
- if (mlxsw_core->resources.max_lag_valid &&
- mlxsw_core->resources.max_ports_in_lag_valid) {
- alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
- mlxsw_core->resources.max_ports_in_lag;
+ if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
+ MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
+ alloc_size = sizeof(u8) *
+ MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
+ MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
@@ -1128,9 +1137,16 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_hwmon_init;
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
+ err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
+ &mlxsw_core->thermal);
if (err)
- goto err_driver_init;
+ goto err_thermal_init;
+
+ if (mlxsw_driver->init) {
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_driver_init;
+ }
err = mlxsw_core_debugfs_init(mlxsw_core);
if (err)
@@ -1139,8 +1155,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
return 0;
err_debugfs_init:
- mlxsw_core->driver->fini(mlxsw_core);
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
+ mlxsw_thermal_fini(mlxsw_core->thermal);
+err_thermal_init:
err_hwmon_init:
devlink_unregister(devlink);
err_devlink_register:
@@ -1165,11 +1184,13 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
struct devlink *devlink = priv_to_devlink(mlxsw_core);
mlxsw_core_debugfs_fini(mlxsw_core);
- mlxsw_core->driver->fini(mlxsw_core);
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
+ mlxsw_thermal_fini(mlxsw_core->thermal);
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
- mlxsw_core->bus->fini(mlxsw_core->bus_priv);
kfree(mlxsw_core->lag.mapping);
+ mlxsw_core->bus->fini(mlxsw_core->bus_priv);
free_percpu(mlxsw_core->pcpu_stats);
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
@@ -1346,6 +1367,75 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv)
+{
+ if (listener->is_event)
+ return mlxsw_core_event_listener_register(mlxsw_core,
+ &listener->u.event_listener,
+ priv);
+ else
+ return mlxsw_core_rx_listener_register(mlxsw_core,
+ &listener->u.rx_listener,
+ priv);
+}
+
+static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv)
+{
+ if (listener->is_event)
+ mlxsw_core_event_listener_unregister(mlxsw_core,
+ &listener->u.event_listener,
+ priv);
+ else
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &listener->u.rx_listener,
+ priv);
+}
+
+int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener, void *priv)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
+ listener->trap_group, listener->is_ctrl);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_trap_set;
+
+ return 0;
+
+err_trap_set:
+ mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_trap_register);
+
+void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ if (!listener->is_event) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
+ listener->trap_id, listener->trap_group,
+ listener->is_ctrl);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ }
+
+ mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
+}
+EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
{
return atomic64_inc_return(&mlxsw_core->emad.tid);
@@ -1615,7 +1705,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
- return mlxsw_core->resources.max_ports_in_lag * lag_id +
+ return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
port_index;
}
@@ -1644,7 +1734,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
{
int i;
- for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
@@ -1654,34 +1744,97 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
-struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
+bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id)
{
- return &mlxsw_core->resources;
+ return mlxsw_res_valid(&mlxsw_core->res, res_id);
}
-EXPORT_SYMBOL(mlxsw_core_resources_get);
+EXPORT_SYMBOL(mlxsw_core_res_valid);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
- struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
- struct net_device *dev, bool split, u32 split_group)
+u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id)
+{
+ return mlxsw_res_get(&mlxsw_core->res, res_id);
+}
+EXPORT_SYMBOL(mlxsw_core_res_get);
+
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+ int err;
- if (split)
- devlink_port_split_set(devlink_port, split_group);
- devlink_port_type_eth_set(devlink_port, dev);
- return devlink_port_register(devlink, devlink_port, local_port);
+ mlxsw_core_port->local_port = local_port;
+ err = devlink_port_register(devlink, devlink_port, local_port);
+ if (err)
+ memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
+ return err;
}
EXPORT_SYMBOL(mlxsw_core_port_init);
-void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
devlink_port_unregister(devlink_port);
+ memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
EXPORT_SYMBOL(mlxsw_core_port_fini);
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+ void *port_driver_priv, struct net_device *dev,
+ bool split, u32 split_group)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ mlxsw_core_port->port_driver_priv = port_driver_priv;
+ if (split)
+ devlink_port_split_set(devlink_port, split_group);
+ devlink_port_type_eth_set(devlink_port, dev);
+}
+EXPORT_SYMBOL(mlxsw_core_port_eth_set);
+
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+ void *port_driver_priv)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ mlxsw_core_port->port_driver_priv = port_driver_priv;
+ devlink_port_type_ib_set(devlink_port, NULL);
+}
+EXPORT_SYMBOL(mlxsw_core_port_ib_set);
+
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+ void *port_driver_priv)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ mlxsw_core_port->port_driver_priv = port_driver_priv;
+ devlink_port_type_clear(devlink_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_clear);
+
+enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ return devlink_port->type;
+}
+EXPORT_SYMBOL(mlxsw_core_port_type_get);
+
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
@@ -1748,6 +1901,18 @@ int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
}
EXPORT_SYMBOL(mlxsw_core_schedule_dw);
+int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(mlxsw_owq, dwork, delay);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_odw);
+
+void mlxsw_core_flush_owq(void)
+{
+ flush_workqueue(mlxsw_owq);
+}
+EXPORT_SYMBOL(mlxsw_core_flush_owq);
+
static int __init mlxsw_core_module_init(void)
{
int err;
@@ -1755,6 +1920,12 @@ static int __init mlxsw_core_module_init(void)
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
if (!mlxsw_wq)
return -ENOMEM;
+ mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+ mlxsw_core_driver_name);
+ if (!mlxsw_owq) {
+ err = -ENOMEM;
+ goto err_alloc_ordered_workqueue;
+ }
mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
if (!mlxsw_core_dbg_root) {
err = -ENOMEM;
@@ -1763,6 +1934,8 @@ static int __init mlxsw_core_module_init(void)
return 0;
err_debugfs_create_dir:
+ destroy_workqueue(mlxsw_owq);
+err_alloc_ordered_workqueue:
destroy_workqueue(mlxsw_wq);
return err;
}
@@ -1770,6 +1943,7 @@ err_debugfs_create_dir:
static void __exit mlxsw_core_module_exit(void)
{
debugfs_remove_recursive(mlxsw_core_dbg_root);
+ destroy_workqueue(mlxsw_owq);
destroy_workqueue(mlxsw_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c4f550b6f783..a7f94fbc898b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -48,17 +48,11 @@
#include "trap.h"
#include "reg.h"
-
#include "cmd.h"
-
-#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
-#define MODULE_MLXSW_DRIVER_ALIAS(kind) \
- MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
-
-#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
-#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum"
+#include "resources.h"
struct mlxsw_core;
+struct mlxsw_core_port;
struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
@@ -96,6 +90,50 @@ struct mlxsw_event_listener {
enum mlxsw_event_trap_id trap_id;
};
+struct mlxsw_listener {
+ u16 trap_id;
+ union {
+ struct mlxsw_rx_listener rx_listener;
+ struct mlxsw_event_listener event_listener;
+ } u;
+ enum mlxsw_reg_hpkt_action action;
+ enum mlxsw_reg_hpkt_action unreg_action;
+ u8 trap_group;
+ bool is_ctrl; /* should go via control buffer or not */
+ bool is_event;
+};
+
+#define MLXSW_RXL(_func, _trap_id, _action, _is_ctrl, _trap_group, \
+ _unreg_action) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .u.rx_listener = \
+ { \
+ .func = _func, \
+ .local_port = MLXSW_PORT_DONT_CARE, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .action = MLXSW_REG_HPKT_ACTION_##_action, \
+ .unreg_action = MLXSW_REG_HPKT_ACTION_##_unreg_action, \
+ .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
+ .is_ctrl = _is_ctrl, \
+ .is_event = false, \
+ }
+
+#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .u.event_listener = \
+ { \
+ .func = _func, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .action = MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, \
+ .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
+ .is_ctrl = false, \
+ .is_event = true, \
+ }
+
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
void *priv);
@@ -110,6 +148,13 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_event_listener *el,
void *priv);
+int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv);
+void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv);
+
typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
size_t payload_len, unsigned long cb_priv);
@@ -148,25 +193,22 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
-struct mlxsw_core_port {
- struct devlink_port devlink_port;
-};
-
-static inline void *
-mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
-{
- /* mlxsw_core_port is ensured to always be the first field in driver
- * port structure.
- */
- return mlxsw_core_port;
-}
-
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
- struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
- struct net_device *dev, bool split, u32 split_group);
-void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
+void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port);
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+ void *port_driver_priv, struct net_device *dev,
+ bool split, u32 split_group);
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+ void *port_driver_priv);
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+ void *port_driver_priv);
+enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
+int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay);
+void mlxsw_core_flush_owq(void);
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
@@ -221,11 +263,13 @@ struct mlxsw_config_profile {
struct mlxsw_driver {
struct list_head list;
const char *kind;
- struct module *owner;
size_t priv_size;
int (*init)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info);
void (*fini)(struct mlxsw_core *mlxsw_core);
+ int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
+ int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ enum devlink_port_type new_type);
int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
@@ -266,45 +310,25 @@ struct mlxsw_driver {
const struct mlxsw_config_profile *profile;
};
-struct mlxsw_resources {
- u32 max_span_valid:1,
- max_lag_valid:1,
- max_ports_in_lag_valid:1,
- kvd_size_valid:1,
- kvd_single_min_size_valid:1,
- kvd_double_min_size_valid:1,
- max_virtual_routers_valid:1,
- max_system_ports_valid:1,
- max_vlan_groups_valid:1,
- max_regions_valid:1,
- max_rif_valid:1;
- u8 max_span;
- u8 max_lag;
- u8 max_ports_in_lag;
- u32 kvd_size;
- u32 kvd_single_min_size;
- u32 kvd_double_min_size;
- u16 max_virtual_routers;
- u16 max_system_ports;
- u16 max_vlan_groups;
- u16 max_regions;
- u16 max_rif;
+bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id);
- /* Internal resources.
- * Determined by the SW, not queried from the HW.
- */
- u32 kvd_single_size;
- u32 kvd_double_size;
- u32 kvd_linear_size;
-};
+#define MLXSW_CORE_RES_VALID(res, short_res_id) \
+ mlxsw_core_res_valid(res, MLXSW_RES_ID_##short_res_id)
+
+u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id);
-struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
+#define MLXSW_CORE_RES_GET(res, short_res_id) \
+ mlxsw_core_res_get(res, MLXSW_RES_ID_##short_res_id)
+
+#define MLXSW_BUS_F_TXRX BIT(0)
struct mlxsw_bus {
const char *kind;
int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
- struct mlxsw_resources *resources);
+ struct mlxsw_res *res);
void (*fini)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
@@ -315,6 +339,7 @@ struct mlxsw_bus {
char *in_mbox, size_t in_mbox_size,
char *out_mbox, size_t out_mbox_size,
u8 *p_status);
+ u8 features;
};
struct mlxsw_bus_info {
@@ -350,4 +375,28 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
#endif
+struct mlxsw_thermal;
+
+#ifdef CONFIG_MLXSW_CORE_THERMAL
+
+int mlxsw_thermal_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct mlxsw_thermal **p_thermal);
+void mlxsw_thermal_fini(struct mlxsw_thermal *thermal);
+
+#else
+
+static inline int mlxsw_thermal_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct mlxsw_thermal **p_thermal)
+{
+ return 0;
+}
+
+static inline void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
+{
+}
+
+#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 1ac8bf187168..ab710e37af99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -262,7 +262,7 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
- char mtcap_pl[MLXSW_REG_MTCAP_LEN];
+ char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 sensor_count;
int i;
@@ -295,7 +295,7 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
- char mfcr_pl[MLXSW_REG_MFCR_LEN];
+ char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0};
enum mlxsw_reg_mfcr_pwm_frequency freq;
unsigned int type_index;
unsigned int num;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
new file mode 100644
index 000000000000..d866c98c1a97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -0,0 +1,442 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+ * Copyright (c) 2016 Ivan Vecera <cera@cera.cz>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+
+#include "core.h"
+
+#define MLXSW_THERMAL_POLL_INT 1000 /* ms */
+#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */
+#define MLXSW_THERMAL_MAX_STATE 10
+#define MLXSW_THERMAL_MAX_DUTY 255
+
+struct mlxsw_thermal_trip {
+ int type;
+ int temp;
+ int min_state;
+ int max_state;
+};
+
+static const struct mlxsw_thermal_trip default_thermal_trips[] = {
+ { /* In range - 0-40% PWM */
+ .type = THERMAL_TRIP_ACTIVE,
+ .temp = 75000,
+ .min_state = 0,
+ .max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ },
+ { /* High - 40-100% PWM */
+ .type = THERMAL_TRIP_ACTIVE,
+ .temp = 80000,
+ .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ .max_state = MLXSW_THERMAL_MAX_STATE,
+ },
+ {
+ /* Very high - 100% PWM */
+ .type = THERMAL_TRIP_ACTIVE,
+ .temp = 85000,
+ .min_state = MLXSW_THERMAL_MAX_STATE,
+ .max_state = MLXSW_THERMAL_MAX_STATE,
+ },
+ { /* Warning */
+ .type = THERMAL_TRIP_HOT,
+ .temp = 105000,
+ .min_state = MLXSW_THERMAL_MAX_STATE,
+ .max_state = MLXSW_THERMAL_MAX_STATE,
+ },
+ { /* Critical - soft poweroff */
+ .type = THERMAL_TRIP_CRITICAL,
+ .temp = MLXSW_THERMAL_MAX_TEMP,
+ .min_state = MLXSW_THERMAL_MAX_STATE,
+ .max_state = MLXSW_THERMAL_MAX_STATE,
+ }
+};
+
+#define MLXSW_THERMAL_NUM_TRIPS ARRAY_SIZE(default_thermal_trips)
+
+/* Make sure all trips are writable */
+#define MLXSW_THERMAL_TRIP_MASK (BIT(MLXSW_THERMAL_NUM_TRIPS) - 1)
+
+struct mlxsw_thermal {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ struct thermal_zone_device *tzdev;
+ struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ enum thermal_device_mode mode;
+};
+
+static inline u8 mlxsw_state_to_duty(int state)
+{
+ return DIV_ROUND_CLOSEST(state * MLXSW_THERMAL_MAX_DUTY,
+ MLXSW_THERMAL_MAX_STATE);
+}
+
+static inline int mlxsw_duty_to_state(u8 duty)
+{
+ return DIV_ROUND_CLOSEST(duty * MLXSW_THERMAL_MAX_STATE,
+ MLXSW_THERMAL_MAX_DUTY);
+}
+
+static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ if (thermal->cdevs[i] == cdev)
+ return i;
+
+ return -ENODEV;
+}
+
+static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ int i, err;
+
+ /* If the cooling device is one of ours bind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ const struct mlxsw_thermal_trip *trip = &thermal->trips[i];
+
+ err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+ trip->max_state,
+ trip->min_state,
+ THERMAL_WEIGHT_DEFAULT);
+ if (err < 0) {
+ dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ int i;
+ int err;
+
+ /* If the cooling device is our one unbind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
+ if (err < 0) {
+ dev_err(dev, "Failed to unbind cooling device\n");
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int mlxsw_thermal_get_mode(struct thermal_zone_device *tzdev,
+ enum thermal_device_mode *mode)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ *mode = thermal->mode;
+
+ return 0;
+}
+
+static int mlxsw_thermal_set_mode(struct thermal_zone_device *tzdev,
+ enum thermal_device_mode mode)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ mutex_lock(&tzdev->lock);
+
+ if (mode == THERMAL_DEVICE_ENABLED)
+ tzdev->polling_delay = MLXSW_THERMAL_POLL_INT;
+ else
+ tzdev->polling_delay = 0;
+
+ mutex_unlock(&tzdev->lock);
+
+ thermal->mode = mode;
+ thermal_zone_device_update(tzdev, THERMAL_EVENT_UNSPECIFIED);
+
+ return 0;
+}
+
+static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ unsigned int temp;
+ int err;
+
+ mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false);
+
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(dev, "Failed to query temp sensor\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+
+ *p_temp = (int) temp;
+ return 0;
+}
+
+static int mlxsw_thermal_get_trip_type(struct thermal_zone_device *tzdev,
+ int trip,
+ enum thermal_trip_type *p_type)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_type = thermal->trips[trip].type;
+ return 0;
+}
+
+static int mlxsw_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
+ int trip, int *p_temp)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_temp = thermal->trips[trip].temp;
+ return 0;
+}
+
+static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
+ int trip, int temp)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS ||
+ temp > MLXSW_THERMAL_MAX_TEMP)
+ return -EINVAL;
+
+ thermal->trips[trip].temp = temp;
+ return 0;
+}
+
+static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+ .bind = mlxsw_thermal_bind,
+ .unbind = mlxsw_thermal_unbind,
+ .get_mode = mlxsw_thermal_get_mode,
+ .set_mode = mlxsw_thermal_set_mode,
+ .get_temp = mlxsw_thermal_get_temp,
+ .get_trip_type = mlxsw_thermal_get_trip_type,
+ .get_trip_temp = mlxsw_thermal_get_trip_temp,
+ .set_trip_temp = mlxsw_thermal_set_trip_temp,
+};
+
+static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *p_state)
+{
+ *p_state = MLXSW_THERMAL_MAX_STATE;
+ return 0;
+}
+
+static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *p_state)
+
+{
+ struct mlxsw_thermal *thermal = cdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ char mfsc_pl[MLXSW_REG_MFSC_LEN];
+ int err, idx;
+ u8 duty;
+
+ idx = mlxsw_get_cooling_device_idx(thermal, cdev);
+ if (idx < 0)
+ return idx;
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err) {
+ dev_err(dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
+ *p_state = mlxsw_duty_to_state(duty);
+ return 0;
+}
+
+static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ struct mlxsw_thermal *thermal = cdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ char mfsc_pl[MLXSW_REG_MFSC_LEN];
+ int err, idx;
+
+ idx = mlxsw_get_cooling_device_idx(thermal, cdev);
+ if (idx < 0)
+ return idx;
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err) {
+ dev_err(dev, "Failed to write PWM duty\n");
+ return err;
+ }
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
+ .get_max_state = mlxsw_thermal_get_max_state,
+ .get_cur_state = mlxsw_thermal_get_cur_state,
+ .set_cur_state = mlxsw_thermal_set_cur_state,
+};
+
+int mlxsw_thermal_init(struct mlxsw_core *core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_thermal **p_thermal)
+{
+ char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 };
+ enum mlxsw_reg_mfcr_pwm_frequency freq;
+ struct device *dev = bus_info->dev;
+ struct mlxsw_thermal *thermal;
+ u16 tacho_active;
+ u8 pwm_active;
+ int err, i;
+
+ thermal = devm_kzalloc(dev, sizeof(*thermal),
+ GFP_KERNEL);
+ if (!thermal)
+ return -ENOMEM;
+
+ thermal->core = core;
+ thermal->bus_info = bus_info;
+ memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
+ if (err) {
+ dev_err(dev, "Failed to probe PWMs\n");
+ goto err_free_thermal;
+ }
+ mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active);
+
+ for (i = 0; i < MLXSW_MFCR_TACHOS_MAX; i++) {
+ if (tacho_active & BIT(i)) {
+ char mfsl_pl[MLXSW_REG_MFSL_LEN];
+
+ mlxsw_reg_mfsl_pack(mfsl_pl, i, 0, 0);
+
+ /* We need to query the register to preserve maximum */
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsl),
+ mfsl_pl);
+ if (err)
+ goto err_free_thermal;
+
+ /* set the minimal RPMs to 0 */
+ mlxsw_reg_mfsl_tach_min_set(mfsl_pl, 0);
+ err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsl),
+ mfsl_pl);
+ if (err)
+ goto err_free_thermal;
+ }
+ }
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
+ if (pwm_active & BIT(i)) {
+ struct thermal_cooling_device *cdev;
+
+ cdev = thermal_cooling_device_register("Fan", thermal,
+ &mlxsw_cooling_ops);
+ if (IS_ERR(cdev)) {
+ err = PTR_ERR(cdev);
+ dev_err(dev, "Failed to register cooling device\n");
+ goto err_unreg_cdevs;
+ }
+ thermal->cdevs[i] = cdev;
+ }
+ }
+
+ thermal->tzdev = thermal_zone_device_register("mlxsw",
+ MLXSW_THERMAL_NUM_TRIPS,
+ MLXSW_THERMAL_TRIP_MASK,
+ thermal,
+ &mlxsw_thermal_ops,
+ NULL, 0,
+ MLXSW_THERMAL_POLL_INT);
+ if (IS_ERR(thermal->tzdev)) {
+ err = PTR_ERR(thermal->tzdev);
+ dev_err(dev, "Failed to register thermal zone\n");
+ goto err_unreg_cdevs;
+ }
+
+ thermal->mode = THERMAL_DEVICE_ENABLED;
+ *p_thermal = thermal;
+ return 0;
+err_unreg_cdevs:
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ if (thermal->cdevs[i])
+ thermal_cooling_device_unregister(thermal->cdevs[i]);
+err_free_thermal:
+ devm_kfree(dev, thermal);
+ return err;
+}
+
+void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
+{
+ int i;
+
+ if (thermal->tzdev) {
+ thermal_zone_device_unregister(thermal->tzdev);
+ thermal->tzdev = NULL;
+ }
+
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
+ if (thermal->cdevs[i]) {
+ thermal_cooling_device_unregister(thermal->cdevs[i]);
+ thermal->cdevs[i] = NULL;
+ }
+ }
+
+ devm_kfree(thermal->bus_info->dev, thermal);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
new file mode 100644
index 000000000000..e50c8db2602a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -0,0 +1,582 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/i2c.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+
+#include "cmd.h"
+#include "core.h"
+#include "i2c.h"
+
+static const char mlxsw_i2c_driver_name[] = "mlxsw_i2c";
+
+#define MLXSW_I2C_CIR2_BASE 0x72000
+#define MLXSW_I2C_CIR_STATUS_OFF 0x18
+#define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \
+ MLXSW_I2C_CIR_STATUS_OFF)
+#define MLXSW_I2C_OPMOD_SHIFT 12
+#define MLXSW_I2C_GO_BIT_SHIFT 23
+#define MLXSW_I2C_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_I2C_GO_BIT BIT(MLXSW_I2C_GO_BIT_SHIFT)
+#define MLXSW_I2C_GO_OPMODE BIT(MLXSW_I2C_OPMOD_SHIFT)
+#define MLXSW_I2C_SET_IMM_CMD (MLXSW_I2C_GO_OPMODE | \
+ MLXSW_CMD_OPCODE_QUERY_FW)
+#define MLXSW_I2C_PUSH_IMM_CMD (MLXSW_I2C_GO_BIT | \
+ MLXSW_I2C_SET_IMM_CMD)
+#define MLXSW_I2C_SET_CMD (MLXSW_CMD_OPCODE_ACCESS_REG)
+#define MLXSW_I2C_PUSH_CMD (MLXSW_I2C_GO_BIT | MLXSW_I2C_SET_CMD)
+#define MLXSW_I2C_TLV_HDR_SIZE 0x10
+#define MLXSW_I2C_ADDR_WIDTH 4
+#define MLXSW_I2C_PUSH_CMD_SIZE (MLXSW_I2C_ADDR_WIDTH + 4)
+#define MLXSW_I2C_READ_SEMA_SIZE 4
+#define MLXSW_I2C_PREP_SIZE (MLXSW_I2C_ADDR_WIDTH + 28)
+#define MLXSW_I2C_MBOX_SIZE 20
+#define MLXSW_I2C_MBOX_OUT_PARAM_OFF 12
+#define MLXSW_I2C_MAX_BUFF_SIZE 32
+#define MLXSW_I2C_MBOX_OFFSET_BITS 20
+#define MLXSW_I2C_MBOX_SIZE_BITS 12
+#define MLXSW_I2C_ADDR_BUF_SIZE 4
+#define MLXSW_I2C_BLK_MAX 32
+#define MLXSW_I2C_RETRY 5
+#define MLXSW_I2C_TIMEOUT_MSECS 5000
+
+/**
+ * struct mlxsw_i2c - device private data:
+ * @cmd.mb_size_in: input mailbox size;
+ * @cmd.mb_off_in: input mailbox offset in register space;
+ * @cmd.mb_size_out: output mailbox size;
+ * @cmd.mb_off_out: output mailbox offset in register space;
+ * @cmd.lock: command execution lock;
+ * @dev: I2C device;
+ * @core: switch core pointer;
+ * @bus_info: bus info block;
+ */
+struct mlxsw_i2c {
+ struct {
+ u32 mb_size_in;
+ u32 mb_off_in;
+ u32 mb_size_out;
+ u32 mb_off_out;
+ struct mutex lock;
+ } cmd;
+ struct device *dev;
+ struct mlxsw_core *core;
+ struct mlxsw_bus_info bus_info;
+};
+
+#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \
+ { .addr = (_client)->addr, \
+ .buf = (_addr_buf), \
+ .len = MLXSW_I2C_ADDR_BUF_SIZE, \
+ .flags = 0 }, \
+ { .addr = (_client)->addr, \
+ .buf = (_buf), \
+ .len = (_len), \
+ .flags = I2C_M_RD } }
+
+#define MLXSW_I2C_WRITE_MSG(_client, _buf, _len) \
+ { .addr = (_client)->addr, \
+ .buf = (u8 *)(_buf), \
+ .len = (_len), \
+ .flags = 0 }
+
+/* Routine converts in and out mail boxes offset and size. */
+static inline void
+mlxsw_i2c_convert_mbox(struct mlxsw_i2c *mlxsw_i2c, u8 *buf)
+{
+ u32 tmp;
+
+ /* Local in/out mailboxes: 20 bits for offset, 12 for size */
+ tmp = be32_to_cpup((__be32 *) buf);
+ mlxsw_i2c->cmd.mb_off_in = tmp &
+ GENMASK(MLXSW_I2C_MBOX_OFFSET_BITS - 1, 0);
+ mlxsw_i2c->cmd.mb_size_in = (tmp & GENMASK(31,
+ MLXSW_I2C_MBOX_OFFSET_BITS)) >>
+ MLXSW_I2C_MBOX_OFFSET_BITS;
+
+ tmp = be32_to_cpup((__be32 *) (buf + MLXSW_I2C_ADDR_WIDTH));
+ mlxsw_i2c->cmd.mb_off_out = tmp &
+ GENMASK(MLXSW_I2C_MBOX_OFFSET_BITS - 1, 0);
+ mlxsw_i2c->cmd.mb_size_out = (tmp & GENMASK(31,
+ MLXSW_I2C_MBOX_OFFSET_BITS)) >>
+ MLXSW_I2C_MBOX_OFFSET_BITS;
+}
+
+/* Routine obtains register size from mail box buffer. */
+static inline int mlxsw_i2c_get_reg_size(u8 *in_mbox)
+{
+ u16 tmp = be16_to_cpup((__be16 *) (in_mbox + MLXSW_I2C_TLV_HDR_SIZE));
+
+ return (tmp & 0x7ff) * 4 + MLXSW_I2C_TLV_HDR_SIZE;
+}
+
+/* Routine sets I2C device internal offset in the transaction buffer. */
+static inline void mlxsw_i2c_set_slave_addr(u8 *buf, u32 off)
+{
+ __be32 *val = (__be32 *) buf;
+
+ *val = htonl(off);
+}
+
+/* Routine waits until go bit is cleared. */
+static int mlxsw_i2c_wait_go_bit(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c, u8 *p_status)
+{
+ u8 addr_buf[MLXSW_I2C_ADDR_BUF_SIZE];
+ u8 buf[MLXSW_I2C_READ_SEMA_SIZE];
+ int len = MLXSW_I2C_READ_SEMA_SIZE;
+ struct i2c_msg read_sema[] =
+ MLXSW_I2C_READ_MSG(client, addr_buf, buf, len);
+ bool wait_done = false;
+ unsigned long end;
+ int i = 0, err;
+
+ mlxsw_i2c_set_slave_addr(addr_buf, MLXSW_I2C_CIR2_OFF_STATUS);
+
+ end = jiffies + msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
+ do {
+ u32 ctrl;
+
+ err = i2c_transfer(client->adapter, read_sema,
+ ARRAY_SIZE(read_sema));
+
+ ctrl = be32_to_cpu(*(__be32 *) buf);
+ if (err == ARRAY_SIZE(read_sema)) {
+ if (!(ctrl & MLXSW_I2C_GO_BIT)) {
+ wait_done = true;
+ *p_status = ctrl >>
+ MLXSW_I2C_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ }
+ cond_resched();
+ } while ((time_before(jiffies, end)) || (i++ < MLXSW_I2C_RETRY));
+
+ if (wait_done) {
+ if (*p_status)
+ err = -EIO;
+ } else {
+ return -ETIMEDOUT;
+ }
+
+ return err > 0 ? 0 : err;
+}
+
+/* Routine posts a command to ASIC though mail box. */
+static int mlxsw_i2c_write_cmd(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c,
+ int immediate)
+{
+ __be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = {
+ 0, cpu_to_be32(MLXSW_I2C_PUSH_IMM_CMD)
+ };
+ __be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = {
+ 0, 0, 0, 0, 0, 0,
+ cpu_to_be32(client->adapter->nr & 0xffff),
+ cpu_to_be32(MLXSW_I2C_SET_IMM_CMD)
+ };
+ struct i2c_msg push_cmd =
+ MLXSW_I2C_WRITE_MSG(client, push_cmd_buf,
+ MLXSW_I2C_PUSH_CMD_SIZE);
+ struct i2c_msg prep_cmd =
+ MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE);
+ int err;
+
+ if (!immediate) {
+ push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_CMD);
+ prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_SET_CMD);
+ }
+ mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf,
+ MLXSW_I2C_CIR2_BASE);
+ mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf,
+ MLXSW_I2C_CIR2_OFF_STATUS);
+
+ /* Prepare Command Interface Register for transaction */
+ err = i2c_transfer(client->adapter, &prep_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ /* Write out Command Interface Register GO bit to push transaction */
+ err = i2c_transfer(client->adapter, &push_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ return 0;
+}
+
+/* Routine obtains mail box offsets from ASIC register space. */
+static int mlxsw_i2c_get_mbox(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c)
+{
+ u8 addr_buf[MLXSW_I2C_ADDR_BUF_SIZE];
+ u8 buf[MLXSW_I2C_MBOX_SIZE];
+ struct i2c_msg mbox_cmd[] =
+ MLXSW_I2C_READ_MSG(client, addr_buf, buf, MLXSW_I2C_MBOX_SIZE);
+ int err;
+
+ /* Read mail boxes offsets. */
+ mlxsw_i2c_set_slave_addr(addr_buf, MLXSW_I2C_CIR2_BASE);
+ err = i2c_transfer(client->adapter, mbox_cmd, 2);
+ if (err != 2) {
+ dev_err(&client->dev, "Could not obtain mail boxes\n");
+ if (!err)
+ return -EIO;
+ else
+ return err;
+ }
+
+ /* Convert mail boxes. */
+ mlxsw_i2c_convert_mbox(mlxsw_i2c, &buf[MLXSW_I2C_MBOX_OUT_PARAM_OFF]);
+
+ return err;
+}
+
+/* Routine sends I2C write transaction to ASIC device. */
+static int
+mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
+ u8 *p_status)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
+ unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
+ u8 tran_buf[MLXSW_I2C_MAX_BUFF_SIZE + MLXSW_I2C_ADDR_BUF_SIZE];
+ int off = mlxsw_i2c->cmd.mb_off_in, chunk_size, i, j;
+ unsigned long end;
+ struct i2c_msg write_tran =
+ MLXSW_I2C_WRITE_MSG(client, tran_buf, MLXSW_I2C_PUSH_CMD_SIZE);
+ int err;
+
+ for (i = 0; i < num; i++) {
+ chunk_size = (in_mbox_size > MLXSW_I2C_BLK_MAX) ?
+ MLXSW_I2C_BLK_MAX : in_mbox_size;
+ write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
+ mlxsw_i2c_set_slave_addr(tran_buf, off);
+ memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
+ chunk_size * i, chunk_size);
+
+ j = 0;
+ end = jiffies + timeout;
+ do {
+ err = i2c_transfer(client->adapter, &write_tran, 1);
+ if (err == 1)
+ break;
+
+ cond_resched();
+ } while ((time_before(jiffies, end)) ||
+ (j++ < MLXSW_I2C_RETRY));
+
+ if (err != 1) {
+ if (!err)
+ err = -EIO;
+ return err;
+ }
+
+ off += chunk_size;
+ in_mbox_size -= chunk_size;
+ }
+
+ /* Prepare and write out Command Interface Register for transaction. */
+ err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 0);
+ if (err) {
+ dev_err(&client->dev, "Could not start transaction");
+ return -EIO;
+ }
+
+ /* Wait until go bit is cleared. */
+ err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, p_status);
+ if (err) {
+ dev_err(&client->dev, "HW semaphore is not released");
+ return err;
+ }
+
+ /* Validate transaction completion status. */
+ if (*p_status) {
+ dev_err(&client->dev, "Bad transaction completion status %x\n",
+ *p_status);
+ return -EIO;
+ }
+
+ return err > 0 ? 0 : err;
+}
+
+/* Routine executes I2C command. */
+static int
+mlxsw_i2c_cmd(struct device *dev, size_t in_mbox_size, u8 *in_mbox,
+ size_t out_mbox_size, u8 *out_mbox, u8 *status)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
+ unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
+ u8 tran_buf[MLXSW_I2C_ADDR_BUF_SIZE];
+ int num, chunk_size, reg_size, i, j;
+ int off = mlxsw_i2c->cmd.mb_off_out;
+ unsigned long end;
+ struct i2c_msg read_tran[] =
+ MLXSW_I2C_READ_MSG(client, tran_buf, NULL, 0);
+ int err;
+
+ WARN_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+
+ reg_size = mlxsw_i2c_get_reg_size(in_mbox);
+ num = reg_size / MLXSW_I2C_BLK_MAX;
+ if (reg_size % MLXSW_I2C_BLK_MAX)
+ num++;
+
+ if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
+ dev_err(&client->dev, "Could not acquire lock");
+ return -EINVAL;
+ }
+
+ err = mlxsw_i2c_write(dev, reg_size, in_mbox, num, status);
+ if (err)
+ goto cmd_fail;
+
+ /* No out mailbox is case of write transaction. */
+ if (!out_mbox) {
+ mutex_unlock(&mlxsw_i2c->cmd.lock);
+ return 0;
+ }
+
+ /* Send read transaction to get output mailbox content. */
+ read_tran[1].buf = out_mbox;
+ for (i = 0; i < num; i++) {
+ chunk_size = (reg_size > MLXSW_I2C_BLK_MAX) ?
+ MLXSW_I2C_BLK_MAX : reg_size;
+ read_tran[1].len = chunk_size;
+ mlxsw_i2c_set_slave_addr(tran_buf, off);
+
+ j = 0;
+ end = jiffies + timeout;
+ do {
+ err = i2c_transfer(client->adapter, read_tran,
+ ARRAY_SIZE(read_tran));
+ if (err == ARRAY_SIZE(read_tran))
+ break;
+
+ cond_resched();
+ } while ((time_before(jiffies, end)) ||
+ (j++ < MLXSW_I2C_RETRY));
+
+ if (err != ARRAY_SIZE(read_tran)) {
+ if (!err)
+ err = -EIO;
+
+ goto cmd_fail;
+ }
+
+ off += chunk_size;
+ reg_size -= chunk_size;
+ read_tran[1].buf += chunk_size;
+ }
+
+ mutex_unlock(&mlxsw_i2c->cmd.lock);
+
+ return 0;
+
+cmd_fail:
+ mutex_unlock(&mlxsw_i2c->cmd.lock);
+ return err;
+}
+
+static int mlxsw_i2c_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *status)
+{
+ struct mlxsw_i2c *mlxsw_i2c = bus_priv;
+
+ return mlxsw_i2c_cmd(mlxsw_i2c->dev, in_mbox_size, in_mbox,
+ out_mbox_size, out_mbox, status);
+}
+
+static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return false;
+}
+
+static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return 0;
+}
+
+static int
+mlxsw_i2c_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_res *resources)
+{
+ struct mlxsw_i2c *mlxsw_i2c = bus_priv;
+
+ mlxsw_i2c->core = mlxsw_core;
+
+ return 0;
+}
+
+static void mlxsw_i2c_fini(void *bus_priv)
+{
+ struct mlxsw_i2c *mlxsw_i2c = bus_priv;
+
+ mlxsw_i2c->core = NULL;
+}
+
+static const struct mlxsw_bus mlxsw_i2c_bus = {
+ .kind = "i2c",
+ .init = mlxsw_i2c_init,
+ .fini = mlxsw_i2c_fini,
+ .skb_transmit_busy = mlxsw_i2c_skb_transmit_busy,
+ .skb_transmit = mlxsw_i2c_skb_transmit,
+ .cmd_exec = mlxsw_i2c_cmd_exec,
+};
+
+static int mlxsw_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mlxsw_i2c *mlxsw_i2c;
+ u8 status;
+ int err;
+
+ mlxsw_i2c = devm_kzalloc(&client->dev, sizeof(*mlxsw_i2c), GFP_KERNEL);
+ if (!mlxsw_i2c)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, mlxsw_i2c);
+ mutex_init(&mlxsw_i2c->cmd.lock);
+
+ /* In order to use mailboxes through the i2c, special area is reserved
+ * on the i2c address space that can be used for input and output
+ * mailboxes. Such mailboxes are called local mailboxes. When using a
+ * local mailbox, software should specify 0 as the Input/Output
+ * parameters. The location of the Local Mailbox addresses on the i2c
+ * space can be retrieved through the QUERY_FW command.
+ * For this purpose QUERY_FW is to be issued with opcode modifier equal
+ * 0x01. For such command the output parameter is an immediate value.
+ * Here QUERY_FW command is invoked for ASIC probing and for getting
+ * local mailboxes addresses from immedate output parameters.
+ */
+
+ /* Prepare and write out Command Interface Register for transaction */
+ err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 1);
+ if (err) {
+ dev_err(&client->dev, "Could not start transaction");
+ goto errout;
+ }
+
+ /* Wait until go bit is cleared. */
+ err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, &status);
+ if (err) {
+ dev_err(&client->dev, "HW semaphore is not released");
+ goto errout;
+ }
+
+ /* Validate transaction completion status. */
+ if (status) {
+ dev_err(&client->dev, "Bad transaction completion status %x\n",
+ status);
+ err = -EIO;
+ goto errout;
+ }
+
+ /* Get mailbox offsets. */
+ err = mlxsw_i2c_get_mbox(client, mlxsw_i2c);
+ if (err < 0) {
+ dev_err(&client->dev, "Fail to get mailboxes\n");
+ goto errout;
+ }
+
+ dev_info(&client->dev, "%s mb size=%x off=0x%08x out mb size=%x off=0x%08x\n",
+ id->name, mlxsw_i2c->cmd.mb_size_in,
+ mlxsw_i2c->cmd.mb_off_in, mlxsw_i2c->cmd.mb_size_out,
+ mlxsw_i2c->cmd.mb_off_out);
+
+ /* Register device bus. */
+ mlxsw_i2c->bus_info.device_kind = id->name;
+ mlxsw_i2c->bus_info.device_name = client->name;
+ mlxsw_i2c->bus_info.dev = &client->dev;
+ mlxsw_i2c->dev = &client->dev;
+
+ err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
+ &mlxsw_i2c_bus, mlxsw_i2c);
+ if (err) {
+ dev_err(&client->dev, "Fail to register core bus\n");
+ return err;
+ }
+
+ return 0;
+
+errout:
+ i2c_set_clientdata(client, NULL);
+
+ return err;
+}
+
+static int mlxsw_i2c_remove(struct i2c_client *client)
+{
+ struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
+
+ mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
+
+ return 0;
+}
+
+int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
+{
+ i2c_driver->probe = mlxsw_i2c_probe;
+ i2c_driver->remove = mlxsw_i2c_remove;
+ return i2c_add_driver(i2c_driver);
+}
+EXPORT_SYMBOL(mlxsw_i2c_driver_register);
+
+void mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver)
+{
+ i2c_del_driver(i2c_driver);
+}
+EXPORT_SYMBOL(mlxsw_i2c_driver_unregister);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch I2C interface driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.h b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
new file mode 100644
index 000000000000..daa24b213ea4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
@@ -0,0 +1,60 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/i2c.h
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_I2C_H
+#define _MLXSW_I2C_H
+
+#include <linux/i2c.h>
+
+#if IS_ENABLED(CONFIG_MLXSW_I2C)
+
+int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver);
+void mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver);
+
+#else
+
+static inline int
+mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
+{
+ return -ENODEV;
+}
+
+static inline void
+mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/ib.h b/drivers/net/ethernet/mellanox/mlxsw/ib.h
new file mode 100644
index 000000000000..ce313aaa6336
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/ib.h
@@ -0,0 +1,39 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/ib.h
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_IB_H
+#define _MLXSW_IB_H
+
+#define MLXSW_IB_DEFAULT_MTU 4096
+
+#endif /* _MLXSW_IB_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index a94dbda6590b..3c95e3ddd9c2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -55,7 +55,7 @@ struct mlxsw_item {
};
static inline unsigned int
-__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+__mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
size_t typesize)
{
BUG_ON(index && !item->step);
@@ -72,7 +72,8 @@ __mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
typesize);
}
-static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+static inline u16 __mlxsw_item_get16(const char *buf,
+ const struct mlxsw_item *item,
unsigned short index)
{
unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
@@ -87,7 +88,7 @@ static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
return tmp;
}
-static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item,
unsigned short index, u16 val)
{
unsigned int offset = __mlxsw_item_offset(item, index,
@@ -105,7 +106,8 @@ static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
b[offset] = cpu_to_be16(tmp);
}
-static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+static inline u32 __mlxsw_item_get32(const char *buf,
+ const struct mlxsw_item *item,
unsigned short index)
{
unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
@@ -120,7 +122,7 @@ static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
return tmp;
}
-static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item,
unsigned short index, u32 val)
{
unsigned int offset = __mlxsw_item_offset(item, index,
@@ -138,7 +140,8 @@ static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
b[offset] = cpu_to_be32(tmp);
}
-static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+static inline u64 __mlxsw_item_get64(const char *buf,
+ const struct mlxsw_item *item,
unsigned short index)
{
unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
@@ -153,7 +156,7 @@ static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
return tmp;
}
-static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item,
unsigned short index, u64 val)
{
unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
@@ -170,8 +173,8 @@ static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
b[offset] = cpu_to_be64(tmp);
}
-static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
- struct mlxsw_item *item,
+static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst,
+ const struct mlxsw_item *item,
unsigned short index)
{
unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
@@ -180,7 +183,7 @@ static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
}
static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
- struct mlxsw_item *item,
+ const struct mlxsw_item *item,
unsigned short index)
{
unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
@@ -189,7 +192,8 @@ static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
}
static inline u16
-__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+__mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
+ u16 index, u8 *shift)
{
u16 max_index, be_index;
u16 offset; /* byte offset inside the array */
@@ -212,7 +216,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
return item->offset + offset;
}
-static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+static inline u8 __mlxsw_item_bit_array_get(const char *buf,
+ const struct mlxsw_item *item,
u16 index)
{
u8 shift, tmp;
@@ -224,7 +229,8 @@ static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
return tmp;
}
-static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+static inline void __mlxsw_item_bit_array_set(char *buf,
+ const struct mlxsw_item *item,
u16 index, u8 val)
{
u8 shift, tmp;
@@ -254,7 +260,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
@@ -275,7 +281,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.name = #_type "_" #_cname "_" #_iname, \
}; \
static inline u16 \
-mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
@@ -295,7 +301,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
@@ -316,7 +322,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.name = #_type "_" #_cname "_" #_iname, \
}; \
static inline u32 \
-mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
@@ -336,7 +342,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
@@ -357,7 +363,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.name = #_type "_" #_cname "_" #_iname, \
}; \
static inline u64 \
-mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
@@ -377,7 +383,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.name = #_type "_" #_cname "_" #_iname, \
}; \
static inline void \
-mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
{ \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
@@ -399,7 +405,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.name = #_type "_" #_cname "_" #_iname, \
}; \
static inline void \
-mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
unsigned short index, \
char *dst) \
{ \
@@ -424,7 +430,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.name = #_type "_" #_cname "_" #_iname, \
}; \
static inline u8 \
-mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
{ \
return __mlxsw_item_bit_array_get(buf, \
&__ITEM_NAME(_type, _cname, _iname), \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
new file mode 100644
index 000000000000..3dd16267b76c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -0,0 +1,97 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/minimal.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "i2c.h"
+
+static const char mlxsw_minimal_driver_name[] = "mlxsw_minimal";
+
+static const struct mlxsw_config_profile mlxsw_minimal_config_profile;
+
+static struct mlxsw_driver mlxsw_minimal_driver = {
+ .kind = mlxsw_minimal_driver_name,
+ .priv_size = 1,
+ .profile = &mlxsw_minimal_config_profile,
+};
+
+static const struct i2c_device_id mlxsw_minimal_i2c_id[] = {
+ { "mlxsw_minimal", 0},
+ { },
+};
+
+static struct i2c_driver mlxsw_minimal_i2c_driver = {
+ .driver.name = "mlxsw_minimal",
+ .class = I2C_CLASS_HWMON,
+ .id_table = mlxsw_minimal_i2c_id,
+};
+
+static int __init mlxsw_minimal_module_init(void)
+{
+ int err;
+
+ err = mlxsw_core_driver_register(&mlxsw_minimal_driver);
+ if (err)
+ return err;
+
+ err = mlxsw_i2c_driver_register(&mlxsw_minimal_i2c_driver);
+ if (err)
+ goto err_i2c_driver_register;
+
+ return 0;
+
+err_i2c_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_minimal_driver);
+
+ return err;
+}
+
+static void __exit mlxsw_minimal_module_exit(void)
+{
+ mlxsw_i2c_driver_unregister(&mlxsw_minimal_i2c_driver);
+ mlxsw_core_driver_unregister(&mlxsw_minimal_driver);
+}
+
+module_init(mlxsw_minimal_module_init);
+module_exit(mlxsw_minimal_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox minimal driver");
+MODULE_DEVICE_TABLE(i2c, mlxsw_minimal_i2c_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 912f71f84209..a223c85dfde0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -48,33 +48,17 @@
#include <linux/seq_file.h>
#include <linux/string.h>
+#include "pci_hw.h"
#include "pci.h"
#include "core.h"
#include "cmd.h"
#include "port.h"
+#include "resources.h"
static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
-static const struct pci_device_id mlxsw_pci_id_table[] = {
- {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
- {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
- {0, }
-};
-
static struct dentry *mlxsw_pci_dbg_root;
-static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
-{
- switch (id->device) {
- case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
- return MLXSW_DEVICE_KIND_SWITCHX2;
- case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
- return MLXSW_DEVICE_KIND_SPECTRUM;
- default:
- BUG();
- }
-}
-
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
#define mlxsw_pci_read32(mlxsw_pci, reg) \
@@ -238,8 +222,9 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
return owner_bit != !!(q->consumer_counter & q->count);
}
-static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
- u32 (*get_elem_owner_func)(char *))
+static char *
+mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+ u32 (*get_elem_owner_func)(const char *))
{
struct mlxsw_pci_queue_elem_info *elem_info;
char *elem;
@@ -1154,76 +1139,8 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
}
-#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
-#define MLXSW_MAX_SPAN_ID 0x2420
-#define MLXSW_MAX_LAG_ID 0x2520
-#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521
-#define MLXSW_KVD_SIZE_ID 0x1001
-#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002
-#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003
-#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01
-#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502
-#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906
-#define MLXSW_MAX_REGIONS_ID 0x2901
-#define MLXSW_MAX_RIF_ID 0x2C02
-#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
-#define MLXSW_RESOURCES_PER_QUERY 32
-
-static void mlxsw_pci_resources_query_parse(int id, u64 val,
- struct mlxsw_resources *resources)
-{
- switch (id) {
- case MLXSW_MAX_SPAN_ID:
- resources->max_span = val;
- resources->max_span_valid = 1;
- break;
- case MLXSW_MAX_LAG_ID:
- resources->max_lag = val;
- resources->max_lag_valid = 1;
- break;
- case MLXSW_MAX_PORTS_IN_LAG_ID:
- resources->max_ports_in_lag = val;
- resources->max_ports_in_lag_valid = 1;
- break;
- case MLXSW_KVD_SIZE_ID:
- resources->kvd_size = val;
- resources->kvd_size_valid = 1;
- break;
- case MLXSW_KVD_SINGLE_MIN_SIZE_ID:
- resources->kvd_single_min_size = val;
- resources->kvd_single_min_size_valid = 1;
- break;
- case MLXSW_KVD_DOUBLE_MIN_SIZE_ID:
- resources->kvd_double_min_size = val;
- resources->kvd_double_min_size_valid = 1;
- break;
- case MLXSW_MAX_VIRTUAL_ROUTERS_ID:
- resources->max_virtual_routers = val;
- resources->max_virtual_routers_valid = 1;
- break;
- case MLXSW_MAX_SYSTEM_PORT_ID:
- resources->max_system_ports = val;
- resources->max_system_ports_valid = 1;
- break;
- case MLXSW_MAX_VLAN_GROUPS_ID:
- resources->max_vlan_groups = val;
- resources->max_vlan_groups_valid = 1;
- break;
- case MLXSW_MAX_REGIONS_ID:
- resources->max_regions = val;
- resources->max_regions_valid = 1;
- break;
- case MLXSW_MAX_RIF_ID:
- resources->max_rif = val;
- resources->max_rif_valid = 1;
- break;
- default:
- break;
- }
-}
-
static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
- struct mlxsw_resources *resources,
+ struct mlxsw_res *res,
u8 query_enabled)
{
int index, i;
@@ -1237,19 +1154,20 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_zero(mbox);
- for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) {
+ for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
+ index++) {
err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
if (err)
return err;
- for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) {
+ for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
- if (id == MLXSW_RESOURCES_TABLE_END_ID)
+ if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
return 0;
- mlxsw_pci_resources_query_parse(id, data, resources);
+ mlxsw_res_parse(res, id, data);
}
}
@@ -1259,13 +1177,14 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
return -EIO;
}
-static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
- struct mlxsw_resources *resources)
+static int
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+ struct mlxsw_res *res)
{
- u32 singles_size, doubles_size, linear_size;
+ u32 single_size, double_size, linear_size;
- if (!resources->kvd_single_min_size_valid ||
- !resources->kvd_double_min_size_valid ||
+ if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
+ !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
!profile->used_kvd_split_data)
return -EIO;
@@ -1277,31 +1196,31 @@ static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *pr
* Both sizes must be a multiplications of the
* granularity from the profile.
*/
- doubles_size = (resources->kvd_size - linear_size);
- doubles_size *= profile->kvd_hash_double_parts;
- doubles_size /= (profile->kvd_hash_double_parts +
- profile->kvd_hash_single_parts);
- doubles_size /= profile->kvd_hash_granularity;
- doubles_size *= profile->kvd_hash_granularity;
- singles_size = resources->kvd_size - doubles_size -
- linear_size;
+ double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ double_size /= profile->kvd_hash_granularity;
+ double_size *= profile->kvd_hash_granularity;
+ single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
+ linear_size;
/* Check results are legal. */
- if (singles_size < resources->kvd_single_min_size ||
- doubles_size < resources->kvd_double_min_size ||
- resources->kvd_size < linear_size)
+ if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
+ double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
+ MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
return -EIO;
- resources->kvd_single_size = singles_size;
- resources->kvd_double_size = doubles_size;
- resources->kvd_linear_size = linear_size;
+ MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
+ MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
+ MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
return 0;
}
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
const struct mlxsw_config_profile *profile,
- struct mlxsw_resources *resources)
+ struct mlxsw_res *res)
{
int i;
int err;
@@ -1390,22 +1309,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
- if (resources->kvd_size_valid) {
- err = mlxsw_pci_profile_get_kvd_sizes(profile, resources);
+ if (MLXSW_RES_VALID(res, KVD_SIZE)) {
+ err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
if (err)
return err;
mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
- resources->kvd_linear_size);
+ MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1);
mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
- resources->kvd_single_size);
+ MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
- resources->kvd_double_size);
+ MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
}
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
@@ -1543,7 +1462,7 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
- struct mlxsw_resources *resources)
+ struct mlxsw_res *res)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -1602,12 +1521,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_boardinfo;
- err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources,
+ err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res,
profile->resource_query_enable);
if (err)
goto err_query_resources;
- err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources);
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
if (err)
goto err_config_profile;
@@ -1617,7 +1536,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err = request_irq(mlxsw_pci->msix_entry.vector,
mlxsw_pci_eq_irq_handler, 0,
- mlxsw_pci_driver_name, mlxsw_pci);
+ mlxsw_pci->bus_info.device_kind, mlxsw_pci);
if (err) {
dev_err(&pdev->dev, "IRQ request failed\n");
goto err_request_eq_irq;
@@ -1836,6 +1755,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
+ .features = MLXSW_BUS_F_TXRX,
};
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
@@ -1863,6 +1783,7 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ const char *driver_name = pdev->driver->name;
struct mlxsw_pci *mlxsw_pci;
int err;
@@ -1876,7 +1797,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_enable_device;
}
- err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+ err = pci_request_regions(pdev, driver_name);
if (err) {
dev_err(&pdev->dev, "pci_request_regions failed\n");
goto err_pci_request_regions;
@@ -1927,7 +1848,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_msix_init;
}
- mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+ mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
@@ -1979,33 +1900,30 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
kfree(mlxsw_pci);
}
-static struct pci_driver mlxsw_pci_driver = {
- .name = mlxsw_pci_driver_name,
- .id_table = mlxsw_pci_id_table,
- .probe = mlxsw_pci_probe,
- .remove = mlxsw_pci_remove,
-};
+int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
+{
+ pci_driver->probe = mlxsw_pci_probe;
+ pci_driver->remove = mlxsw_pci_remove;
+ return pci_register_driver(pci_driver);
+}
+EXPORT_SYMBOL(mlxsw_pci_driver_register);
-static int __init mlxsw_pci_module_init(void)
+void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
{
- int err;
+ pci_unregister_driver(pci_driver);
+}
+EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
+static int __init mlxsw_pci_module_init(void)
+{
mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
if (!mlxsw_pci_dbg_root)
return -ENOMEM;
- err = pci_register_driver(&mlxsw_pci_driver);
- if (err)
- goto err_register_driver;
return 0;
-
-err_register_driver:
- debugfs_remove_recursive(mlxsw_pci_dbg_root);
- return err;
}
static void __exit mlxsw_pci_module_exit(void)
{
- pci_unregister_driver(&mlxsw_pci_driver);
debugfs_remove_recursive(mlxsw_pci_dbg_root);
}
@@ -2015,4 +1933,3 @@ module_exit(mlxsw_pci_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
-MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index d942a3e6fa41..d65582325cd5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/pci.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,197 +35,31 @@
#ifndef _MLXSW_PCI_H
#define _MLXSW_PCI_H
-#include <linux/bitops.h>
+#include <linux/pci.h>
-#include "item.h"
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
+#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
+#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
-#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
-#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
-#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
-#define MLXSW_PCI_PAGE_SIZE 4096
+#if IS_ENABLED(CONFIG_MLXSW_PCI)
-#define MLXSW_PCI_CIR_BASE 0x71000
-#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE
-#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04)
-#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08)
-#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C)
-#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10)
-#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14)
-#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18)
-#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23)
-#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22)
-#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12
-#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
-#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+int mlxsw_pci_driver_register(struct pci_driver *pci_driver);
+void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver);
-#define MLXSW_PCI_SW_RESET 0xF0010
-#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
-#define MLXSW_PCI_FW_READY 0xA1844
-#define MLXSW_PCI_FW_READY_MASK 0xFF
-#define MLXSW_PCI_FW_READY_MAGIC 0x5E
+#else
-#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
-#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
-#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400
-#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600
-#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800
-#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00
+static inline int
+mlxsw_pci_driver_register(struct pci_driver *pci_driver)
+{
+ return 0;
+}
-#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
- ((offset) + (type_offset) + (num) * 4)
+static inline void
+mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
+{
+}
-#define MLXSW_PCI_CQS_MAX 96
-#define MLXSW_PCI_EQS_COUNT 2
-#define MLXSW_PCI_EQ_ASYNC_NUM 0
-#define MLXSW_PCI_EQ_COMP_NUM 1
-
-#define MLXSW_PCI_AQ_PAGES 8
-#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
-#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
-#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
-#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
-#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
-#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
-#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
-#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
-
-#define MLXSW_PCI_WQE_SG_ENTRIES 3
-#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA
-
-/* pci_wqe_c
- * If set it indicates that a completion should be reported upon
- * execution of this descriptor.
- */
-MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
-
-/* pci_wqe_lp
- * Local Processing, set if packet should be processed by the local
- * switch hardware:
- * For Ethernet EMAD (Direct Route and non Direct Route) -
- * must be set if packet destination is local device
- * For InfiniBand CTL - must be set if packet destination is local device
- * Otherwise it must be clear
- * Local Process packets must not exceed the size of 2K (including payload
- * and headers).
- */
-MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
-
-/* pci_wqe_type
- * Packet type.
- */
-MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
-
-/* pci_wqe_byte_count
- * Size of i-th scatter/gather entry, 0 if entry is unused.
- */
-MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
-
-/* pci_wqe_address
- * Physical address of i-th scatter/gather entry.
- * Gather Entries must be 2Byte aligned.
- */
-MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
-
-/* pci_cqe_lag
- * Packet arrives from a port which is a LAG
- */
-MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
-
-/* pci_cqe_system_port/lag_id
- * When lag=0: System port on which the packet was received
- * When lag=1:
- * bits [15:4] LAG ID on which the packet was received
- * bits [3:0] sub_port on which the packet was received
- */
-MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
-MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
-MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
-
-/* pci_cqe_wqe_counter
- * WQE count of the WQEs completed on the associated dqn
- */
-MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
-
-/* pci_cqe_byte_count
- * Byte count of received packets including additional two
- * Reserved Bytes that are append to the end of the frame.
- * Reserved for Send CQE.
- */
-MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
-
-/* pci_cqe_trap_id
- * Trap ID that captured the packet.
- */
-MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
-
-/* pci_cqe_crc
- * Length include CRC. Indicates the length field includes
- * the packet's CRC.
- */
-MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
-
-/* pci_cqe_e
- * CQE with Error.
- */
-MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
-
-/* pci_cqe_sr
- * 1 - Send Queue
- * 0 - Receive Queue
- */
-MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
-
-/* pci_cqe_dqn
- * Descriptor Queue (DQ) Number.
- */
-MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
-
-/* pci_cqe_owner
- * Ownership bit.
- */
-MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
-
-/* pci_eqe_event_type
- * Event type.
- */
-MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
-#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00
-#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A
-
-/* pci_eqe_event_sub_type
- * Event type.
- */
-MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
-
-/* pci_eqe_cqn
- * Completion Queue that triggeret this EQE.
- */
-MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
-
-/* pci_eqe_owner
- * Ownership bit.
- */
-MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
-
-/* pci_eqe_cmd_token
- * Command completion event - token
- */
-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
-
-/* pci_eqe_cmd_status
- * Command completion event - status
- */
-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
-
-/* pci_eqe_cmd_out_param_h
- * Command completion event - output parameter - higher part
- */
-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
-
-/* pci_eqe_cmd_out_param_l
- * Command completion event - output parameter - lower part
- */
-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
new file mode 100644
index 000000000000..d147ddd97997
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -0,0 +1,229 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+ * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_HW_H
+#define _MLXSW_PCI_HW_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE 4096
+
+#define MLXSW_PCI_CIR_BASE 0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+
+#define MLXSW_PCI_SW_RESET 0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_FW_READY 0xA1844
+#define MLXSW_PCI_FW_READY_MASK 0xFFFF
+#define MLXSW_PCI_FW_READY_MAGIC 0x5E
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
+ ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_CQS_MAX 96
+#define MLXSW_PCI_EQS_COUNT 2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM 1
+
+#define MLXSW_PCI_AQ_PAGES 8
+#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES 3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port/lag_id
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
+MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index af371a82c35b..3d42146473b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -44,6 +44,7 @@
#define MLXSW_PORT_SWID_DISABLED_PORT 255
#define MLXSW_PORT_SWID_ALL_SWIDS 254
+#define MLXSW_PORT_SWID_TYPE_IB 1
#define MLXSW_PORT_SWID_TYPE_ETH 2
#define MLXSW_PORT_MID 0xd000
@@ -51,6 +52,9 @@
#define MLXSW_PORT_MAX_PHY_PORTS 0x40
#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
+#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
+#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
+
#define MLXSW_PORT_DEVID_BITS_OFFSET 10
#define MLXSW_PORT_PHY_BITS_OFFSET 4
#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6460c7256f2b..1357fe04391b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -48,8 +48,16 @@
struct mlxsw_reg_info {
u16 id;
u16 len; /* In u8 */
+ const char *name;
};
+#define MLXSW_REG_DEFINE(_name, _id, _len) \
+static const struct mlxsw_reg_info mlxsw_reg_##_name = { \
+ .id = _id, \
+ .len = _len, \
+ .name = #_name, \
+}
+
#define MLXSW_REG(type) (&mlxsw_reg_##type)
#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
@@ -61,10 +69,7 @@ struct mlxsw_reg_info {
#define MLXSW_REG_SGCR_ID 0x2000
#define MLXSW_REG_SGCR_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
- .id = MLXSW_REG_SGCR_ID,
- .len = MLXSW_REG_SGCR_LEN,
-};
+MLXSW_REG_DEFINE(sgcr, MLXSW_REG_SGCR_ID, MLXSW_REG_SGCR_LEN);
/* reg_sgcr_llb
* Link Local Broadcast (Default=0)
@@ -87,10 +92,7 @@ static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
#define MLXSW_REG_SPAD_ID 0x2002
#define MLXSW_REG_SPAD_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_spad = {
- .id = MLXSW_REG_SPAD_ID,
- .len = MLXSW_REG_SPAD_LEN,
-};
+MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN);
/* reg_spad_base_mac
* Base MAC address for the switch partitions.
@@ -109,10 +111,7 @@ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
#define MLXSW_REG_SMID_ID 0x2007
#define MLXSW_REG_SMID_LEN 0x240
-static const struct mlxsw_reg_info mlxsw_reg_smid = {
- .id = MLXSW_REG_SMID_ID,
- .len = MLXSW_REG_SMID_LEN,
-};
+MLXSW_REG_DEFINE(smid, MLXSW_REG_SMID_ID, MLXSW_REG_SMID_LEN);
/* reg_smid_swid
* Switch partition ID.
@@ -156,10 +155,7 @@ static inline void mlxsw_reg_smid_pack(char *payload, u16 mid,
#define MLXSW_REG_SSPR_ID 0x2008
#define MLXSW_REG_SSPR_LEN 0x8
-static const struct mlxsw_reg_info mlxsw_reg_sspr = {
- .id = MLXSW_REG_SSPR_ID,
- .len = MLXSW_REG_SSPR_LEN,
-};
+MLXSW_REG_DEFINE(sspr, MLXSW_REG_SSPR_ID, MLXSW_REG_SSPR_LEN);
/* reg_sspr_m
* Master - if set, then the record describes the master system port.
@@ -215,10 +211,7 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
#define MLXSW_REG_SFDAT_ID 0x2009
#define MLXSW_REG_SFDAT_LEN 0x8
-static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
- .id = MLXSW_REG_SFDAT_ID,
- .len = MLXSW_REG_SFDAT_LEN,
-};
+MLXSW_REG_DEFINE(sfdat, MLXSW_REG_SFDAT_ID, MLXSW_REG_SFDAT_LEN);
/* reg_sfdat_swid
* Switch partition ID.
@@ -256,10 +249,7 @@ static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \
MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
-static const struct mlxsw_reg_info mlxsw_reg_sfd = {
- .id = MLXSW_REG_SFD_ID,
- .len = MLXSW_REG_SFD_LEN,
-};
+MLXSW_REG_DEFINE(sfd, MLXSW_REG_SFD_ID, MLXSW_REG_SFD_LEN);
/* reg_sfd_swid
* Switch partition ID for queries. Reserved on Write.
@@ -580,10 +570,7 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \
MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
-static const struct mlxsw_reg_info mlxsw_reg_sfn = {
- .id = MLXSW_REG_SFN_ID,
- .len = MLXSW_REG_SFN_LEN,
-};
+MLXSW_REG_DEFINE(sfn, MLXSW_REG_SFN_ID, MLXSW_REG_SFN_LEN);
/* reg_sfn_swid
* Switch partition ID.
@@ -701,10 +688,7 @@ static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
#define MLXSW_REG_SPMS_ID 0x200D
#define MLXSW_REG_SPMS_LEN 0x404
-static const struct mlxsw_reg_info mlxsw_reg_spms = {
- .id = MLXSW_REG_SPMS_ID,
- .len = MLXSW_REG_SPMS_LEN,
-};
+MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN);
/* reg_spms_local_port
* Local port number.
@@ -748,10 +732,7 @@ static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
#define MLXSW_REG_SPVID_ID 0x200E
#define MLXSW_REG_SPVID_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_spvid = {
- .id = MLXSW_REG_SPVID_ID,
- .len = MLXSW_REG_SPVID_LEN,
-};
+MLXSW_REG_DEFINE(spvid, MLXSW_REG_SPVID_ID, MLXSW_REG_SPVID_LEN);
/* reg_spvid_local_port
* Local port number.
@@ -792,10 +773,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
-static const struct mlxsw_reg_info mlxsw_reg_spvm = {
- .id = MLXSW_REG_SPVM_ID,
- .len = MLXSW_REG_SPVM_LEN,
-};
+MLXSW_REG_DEFINE(spvm, MLXSW_REG_SPVM_ID, MLXSW_REG_SPVM_LEN);
/* reg_spvm_pt
* Priority tagged. If this bit is set, packets forwarded to the port with
@@ -891,10 +869,7 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
#define MLXSW_REG_SPAFT_ID 0x2010
#define MLXSW_REG_SPAFT_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_spaft = {
- .id = MLXSW_REG_SPAFT_ID,
- .len = MLXSW_REG_SPAFT_LEN,
-};
+MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN);
/* reg_spaft_local_port
* Local port number.
@@ -947,10 +922,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
#define MLXSW_REG_SFGC_ID 0x2011
#define MLXSW_REG_SFGC_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
- .id = MLXSW_REG_SFGC_ID,
- .len = MLXSW_REG_SFGC_LEN,
-};
+MLXSW_REG_DEFINE(sfgc, MLXSW_REG_SFGC_ID, MLXSW_REG_SFGC_LEN);
enum mlxsw_reg_sfgc_type {
MLXSW_REG_SFGC_TYPE_BROADCAST,
@@ -1045,10 +1017,7 @@ mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
#define MLXSW_REG_SFTR_ID 0x2012
#define MLXSW_REG_SFTR_LEN 0x420
-static const struct mlxsw_reg_info mlxsw_reg_sftr = {
- .id = MLXSW_REG_SFTR_ID,
- .len = MLXSW_REG_SFTR_LEN,
-};
+MLXSW_REG_DEFINE(sftr, MLXSW_REG_SFTR_ID, MLXSW_REG_SFTR_LEN);
/* reg_sftr_swid
* Switch partition ID with which to associate the port.
@@ -1118,10 +1087,7 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
#define MLXSW_REG_SFDF_ID 0x2013
#define MLXSW_REG_SFDF_LEN 0x14
-static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
- .id = MLXSW_REG_SFDF_ID,
- .len = MLXSW_REG_SFDF_LEN,
-};
+MLXSW_REG_DEFINE(sfdf, MLXSW_REG_SFDF_ID, MLXSW_REG_SFDF_LEN);
/* reg_sfdf_swid
* Switch partition ID.
@@ -1205,10 +1171,7 @@ MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
#define MLXSW_REG_SLDR_ID 0x2014
#define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */
-static const struct mlxsw_reg_info mlxsw_reg_sldr = {
- .id = MLXSW_REG_SLDR_ID,
- .len = MLXSW_REG_SLDR_LEN,
-};
+MLXSW_REG_DEFINE(sldr, MLXSW_REG_SLDR_ID, MLXSW_REG_SLDR_LEN);
enum mlxsw_reg_sldr_op {
/* Indicates a creation of a new LAG-ID, lag_id must be valid */
@@ -1288,10 +1251,7 @@ static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id,
#define MLXSW_REG_SLCR_ID 0x2015
#define MLXSW_REG_SLCR_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_slcr = {
- .id = MLXSW_REG_SLCR_ID,
- .len = MLXSW_REG_SLCR_LEN,
-};
+MLXSW_REG_DEFINE(slcr, MLXSW_REG_SLCR_ID, MLXSW_REG_SLCR_LEN);
enum mlxsw_reg_slcr_pp {
/* Global Configuration (for all ports) */
@@ -1404,10 +1364,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
#define MLXSW_REG_SLCOR_ID 0x2016
#define MLXSW_REG_SLCOR_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_slcor = {
- .id = MLXSW_REG_SLCOR_ID,
- .len = MLXSW_REG_SLCOR_LEN,
-};
+MLXSW_REG_DEFINE(slcor, MLXSW_REG_SLCOR_ID, MLXSW_REG_SLCOR_LEN);
enum mlxsw_reg_slcor_col {
/* Port is added with collector disabled */
@@ -1490,10 +1447,7 @@ static inline void mlxsw_reg_slcor_col_disable_pack(char *payload,
#define MLXSW_REG_SPMLR_ID 0x2018
#define MLXSW_REG_SPMLR_LEN 0x8
-static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
- .id = MLXSW_REG_SPMLR_ID,
- .len = MLXSW_REG_SPMLR_LEN,
-};
+MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN);
/* reg_spmlr_local_port
* Local port number.
@@ -1544,10 +1498,7 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
#define MLXSW_REG_SVFA_ID 0x201C
#define MLXSW_REG_SVFA_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_svfa = {
- .id = MLXSW_REG_SVFA_ID,
- .len = MLXSW_REG_SVFA_LEN,
-};
+MLXSW_REG_DEFINE(svfa, MLXSW_REG_SVFA_ID, MLXSW_REG_SVFA_LEN);
/* reg_svfa_swid
* Switch partition ID.
@@ -1636,10 +1587,7 @@ static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
#define MLXSW_REG_SVPE_ID 0x201E
#define MLXSW_REG_SVPE_LEN 0x4
-static const struct mlxsw_reg_info mlxsw_reg_svpe = {
- .id = MLXSW_REG_SVPE_ID,
- .len = MLXSW_REG_SVPE_LEN,
-};
+MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN);
/* reg_svpe_local_port
* Local port number
@@ -1672,10 +1620,7 @@ static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
#define MLXSW_REG_SFMR_ID 0x201F
#define MLXSW_REG_SFMR_LEN 0x18
-static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
- .id = MLXSW_REG_SFMR_ID,
- .len = MLXSW_REG_SFMR_LEN,
-};
+MLXSW_REG_DEFINE(sfmr, MLXSW_REG_SFMR_ID, MLXSW_REG_SFMR_LEN);
enum mlxsw_reg_sfmr_op {
MLXSW_REG_SFMR_OP_CREATE_FID,
@@ -1762,10 +1707,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
MLXSW_REG_SPVMLR_REC_LEN * \
MLXSW_REG_SPVMLR_REC_MAX_COUNT)
-static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
- .id = MLXSW_REG_SPVMLR_ID,
- .len = MLXSW_REG_SPVMLR_LEN,
-};
+MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN);
/* reg_spvmlr_local_port
* Local ingress port.
@@ -1815,6 +1757,146 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* QPCR - QoS Policer Configuration Register
+ * -----------------------------------------
+ * The QPCR register is used to create policers - that limit
+ * the rate of bytes or packets via some trap group.
+ */
+#define MLXSW_REG_QPCR_ID 0x4004
+#define MLXSW_REG_QPCR_LEN 0x28
+
+MLXSW_REG_DEFINE(qpcr, MLXSW_REG_QPCR_ID, MLXSW_REG_QPCR_LEN);
+
+enum mlxsw_reg_qpcr_g {
+ MLXSW_REG_QPCR_G_GLOBAL = 2,
+ MLXSW_REG_QPCR_G_STORM_CONTROL = 3,
+};
+
+/* reg_qpcr_g
+ * The policer type.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpcr, g, 0x00, 14, 2);
+
+/* reg_qpcr_pid
+ * Policer ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpcr, pid, 0x00, 0, 14);
+
+/* reg_qpcr_color_aware
+ * Is the policer aware of colors.
+ * Must be 0 (unaware) for cpu port.
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, color_aware, 0x04, 15, 1);
+
+/* reg_qpcr_bytes
+ * Is policer limit is for bytes per sec or packets per sec.
+ * 0 - packets
+ * 1 - bytes
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, bytes, 0x04, 14, 1);
+
+enum mlxsw_reg_qpcr_ir_units {
+ MLXSW_REG_QPCR_IR_UNITS_M,
+ MLXSW_REG_QPCR_IR_UNITS_K,
+};
+
+/* reg_qpcr_ir_units
+ * Policer's units for cir and eir fields (for bytes limits only)
+ * 1 - 10^3
+ * 0 - 10^6
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, qpcr, ir_units, 0x04, 12, 1);
+
+enum mlxsw_reg_qpcr_rate_type {
+ MLXSW_REG_QPCR_RATE_TYPE_SINGLE = 1,
+ MLXSW_REG_QPCR_RATE_TYPE_DOUBLE = 2,
+};
+
+/* reg_qpcr_rate_type
+ * Policer can have one limit (single rate) or 2 limits with specific operation
+ * for packets that exceed the lower rate but not the upper one.
+ * (For cpu port must be single rate)
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, rate_type, 0x04, 8, 2);
+
+/* reg_qpc_cbs
+ * Policer's committed burst size.
+ * The policer is working with time slices of 50 nano sec. By default every
+ * slice is granted the proportionate share of the committed rate. If we want to
+ * allow a slice to exceed that share (while still keeping the rate per sec) we
+ * can allow burst. The burst size is between the default proportionate share
+ * (and no lower than 8) to 32Gb. (Even though giving a number higher than the
+ * committed rate will result in exceeding the rate). The burst size must be a
+ * log of 2 and will be determined by 2^cbs.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpcr, cbs, 0x08, 24, 6);
+
+/* reg_qpcr_cir
+ * Policer's committed rate.
+ * The rate used for sungle rate, the lower rate for double rate.
+ * For bytes limits, the rate will be this value * the unit from ir_units.
+ * (Resolution error is up to 1%).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpcr, cir, 0x0C, 0, 32);
+
+/* reg_qpcr_eir
+ * Policer's exceed rate.
+ * The higher rate for double rate, reserved for single rate.
+ * Lower rate for double rate policer.
+ * For bytes limits, the rate will be this value * the unit from ir_units.
+ * (Resolution error is up to 1%).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpcr, eir, 0x10, 0, 32);
+
+#define MLXSW_REG_QPCR_DOUBLE_RATE_ACTION 2
+
+/* reg_qpcr_exceed_action.
+ * What to do with packets between the 2 limits for double rate.
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, exceed_action, 0x14, 0, 4);
+
+enum mlxsw_reg_qpcr_action {
+ /* Discard */
+ MLXSW_REG_QPCR_ACTION_DISCARD = 1,
+ /* Forward and set color to red.
+ * If the packet is intended to cpu port, it will be dropped.
+ */
+ MLXSW_REG_QPCR_ACTION_FORWARD = 2,
+};
+
+/* reg_qpcr_violate_action
+ * What to do with packets that cross the cir limit (for single rate) or the eir
+ * limit (for double rate).
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, violate_action, 0x18, 0, 4);
+
+static inline void mlxsw_reg_qpcr_pack(char *payload, u16 pid,
+ enum mlxsw_reg_qpcr_ir_units ir_units,
+ bool bytes, u32 cir, u16 cbs)
+{
+ MLXSW_REG_ZERO(qpcr, payload);
+ mlxsw_reg_qpcr_pid_set(payload, pid);
+ mlxsw_reg_qpcr_g_set(payload, MLXSW_REG_QPCR_G_GLOBAL);
+ mlxsw_reg_qpcr_rate_type_set(payload, MLXSW_REG_QPCR_RATE_TYPE_SINGLE);
+ mlxsw_reg_qpcr_violate_action_set(payload,
+ MLXSW_REG_QPCR_ACTION_DISCARD);
+ mlxsw_reg_qpcr_cir_set(payload, cir);
+ mlxsw_reg_qpcr_ir_units_set(payload, ir_units);
+ mlxsw_reg_qpcr_bytes_set(payload, bytes);
+ mlxsw_reg_qpcr_cbs_set(payload, cbs);
+}
+
/* QTCT - QoS Switch Traffic Class Table
* -------------------------------------
* Configures the mapping between the packet switch priority and the
@@ -1823,10 +1905,7 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
#define MLXSW_REG_QTCT_ID 0x400A
#define MLXSW_REG_QTCT_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_qtct = {
- .id = MLXSW_REG_QTCT_ID,
- .len = MLXSW_REG_QTCT_LEN,
-};
+MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN);
/* reg_qtct_local_port
* Local port number.
@@ -1875,10 +1954,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
#define MLXSW_REG_QEEC_ID 0x400D
#define MLXSW_REG_QEEC_LEN 0x1C
-static const struct mlxsw_reg_info mlxsw_reg_qeec = {
- .id = MLXSW_REG_QEEC_ID,
- .len = MLXSW_REG_QEEC_LEN,
-};
+MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
/* reg_qeec_local_port
* Local port number.
@@ -2000,10 +2076,7 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
#define MLXSW_REG_PMLP_ID 0x5002
#define MLXSW_REG_PMLP_LEN 0x40
-static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
- .id = MLXSW_REG_PMLP_ID,
- .len = MLXSW_REG_PMLP_LEN,
-};
+MLXSW_REG_DEFINE(pmlp, MLXSW_REG_PMLP_ID, MLXSW_REG_PMLP_LEN);
/* reg_pmlp_rxtx
* 0 - Tx value is used for both Tx and Rx.
@@ -2059,10 +2132,7 @@ static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
#define MLXSW_REG_PMTU_ID 0x5003
#define MLXSW_REG_PMTU_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
- .id = MLXSW_REG_PMTU_ID,
- .len = MLXSW_REG_PMTU_LEN,
-};
+MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN);
/* reg_pmtu_local_port
* Local port number.
@@ -2116,10 +2186,7 @@ static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
#define MLXSW_REG_PTYS_ID 0x5004
#define MLXSW_REG_PTYS_LEN 0x40
-static const struct mlxsw_reg_info mlxsw_reg_ptys = {
- .id = MLXSW_REG_PTYS_ID,
- .len = MLXSW_REG_PTYS_LEN,
-};
+MLXSW_REG_DEFINE(ptys, MLXSW_REG_PTYS_ID, MLXSW_REG_PTYS_LEN);
/* reg_ptys_local_port
* Local port number.
@@ -2127,6 +2194,7 @@ static const struct mlxsw_reg_info mlxsw_reg_ptys = {
*/
MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+#define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0)
#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
/* reg_ptys_proto_mask
@@ -2185,18 +2253,61 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+/* reg_ptys_ib_link_width_cap
+ * IB port supported widths.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ib_link_width_cap, 0x10, 16, 16);
+
+#define MLXSW_REG_PTYS_IB_SPEED_SDR BIT(0)
+#define MLXSW_REG_PTYS_IB_SPEED_DDR BIT(1)
+#define MLXSW_REG_PTYS_IB_SPEED_QDR BIT(2)
+#define MLXSW_REG_PTYS_IB_SPEED_FDR10 BIT(3)
+#define MLXSW_REG_PTYS_IB_SPEED_FDR BIT(4)
+#define MLXSW_REG_PTYS_IB_SPEED_EDR BIT(5)
+
+/* reg_ptys_ib_proto_cap
+ * IB port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ib_proto_cap, 0x10, 0, 16);
+
/* reg_ptys_eth_proto_admin
* Speed and protocol to set port to.
* Access: RW
*/
MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+/* reg_ptys_ib_link_width_admin
+ * IB width to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, ib_link_width_admin, 0x1C, 16, 16);
+
+/* reg_ptys_ib_proto_admin
+ * IB speeds and protocols to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, ib_proto_admin, 0x1C, 0, 16);
+
/* reg_ptys_eth_proto_oper
* The current speed and protocol configured for the port.
* Access: RO
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+/* reg_ptys_ib_link_width_oper
+ * The current IB width to set port to.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ib_link_width_oper, 0x28, 16, 16);
+
+/* reg_ptys_ib_proto_oper
+ * The current IB speed and protocol.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16);
+
/* reg_ptys_eth_proto_lp_advertise
* The protocols that were advertised by the link partner during
* autonegotiation.
@@ -2204,8 +2315,8 @@ MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
-static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
- u32 proto_admin)
+static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
+ u32 proto_admin)
{
MLXSW_REG_ZERO(ptys, payload);
mlxsw_reg_ptys_local_port_set(payload, local_port);
@@ -2213,9 +2324,10 @@ static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
}
-static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
- u32 *p_eth_proto_adm,
- u32 *p_eth_proto_oper)
+static inline void mlxsw_reg_ptys_eth_unpack(char *payload,
+ u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_adm,
+ u32 *p_eth_proto_oper)
{
if (p_eth_proto_cap)
*p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
@@ -2225,6 +2337,33 @@ static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
*p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
}
+static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port,
+ u16 proto_admin, u16 link_width)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_IB);
+ mlxsw_reg_ptys_ib_proto_admin_set(payload, proto_admin);
+ mlxsw_reg_ptys_ib_link_width_admin_set(payload, link_width);
+}
+
+static inline void mlxsw_reg_ptys_ib_unpack(char *payload, u16 *p_ib_proto_cap,
+ u16 *p_ib_link_width_cap,
+ u16 *p_ib_proto_oper,
+ u16 *p_ib_link_width_oper)
+{
+ if (p_ib_proto_cap)
+ *p_ib_proto_cap = mlxsw_reg_ptys_ib_proto_cap_get(payload);
+ if (p_ib_link_width_cap)
+ *p_ib_link_width_cap =
+ mlxsw_reg_ptys_ib_link_width_cap_get(payload);
+ if (p_ib_proto_oper)
+ *p_ib_proto_oper = mlxsw_reg_ptys_ib_proto_oper_get(payload);
+ if (p_ib_link_width_oper)
+ *p_ib_link_width_oper =
+ mlxsw_reg_ptys_ib_link_width_oper_get(payload);
+}
+
/* PPAD - Port Physical Address Register
* -------------------------------------
* The PPAD register configures the per port physical MAC address.
@@ -2232,10 +2371,7 @@ static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
#define MLXSW_REG_PPAD_ID 0x5005
#define MLXSW_REG_PPAD_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_ppad = {
- .id = MLXSW_REG_PPAD_ID,
- .len = MLXSW_REG_PPAD_LEN,
-};
+MLXSW_REG_DEFINE(ppad, MLXSW_REG_PPAD_ID, MLXSW_REG_PPAD_LEN);
/* reg_ppad_single_base_mac
* 0: base_mac, local port should be 0 and mac[7:0] is
@@ -2273,10 +2409,7 @@ static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
#define MLXSW_REG_PAOS_ID 0x5006
#define MLXSW_REG_PAOS_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_paos = {
- .id = MLXSW_REG_PAOS_ID,
- .len = MLXSW_REG_PAOS_LEN,
-};
+MLXSW_REG_DEFINE(paos, MLXSW_REG_PAOS_ID, MLXSW_REG_PAOS_LEN);
/* reg_paos_swid
* Switch partition ID with which to associate the port.
@@ -2356,10 +2489,7 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
#define MLXSW_REG_PFCC_ID 0x5007
#define MLXSW_REG_PFCC_LEN 0x20
-static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
- .id = MLXSW_REG_PFCC_ID,
- .len = MLXSW_REG_PFCC_LEN,
-};
+MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN);
/* reg_pfcc_local_port
* Local port number.
@@ -2495,10 +2625,7 @@ static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
#define MLXSW_REG_PPCNT_ID 0x5008
#define MLXSW_REG_PPCNT_LEN 0x100
-static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
- .id = MLXSW_REG_PPCNT_ID,
- .len = MLXSW_REG_PPCNT_LEN,
-};
+MLXSW_REG_DEFINE(ppcnt, MLXSW_REG_PPCNT_ID, MLXSW_REG_PPCNT_LEN);
/* reg_ppcnt_swid
* For HCA: must be always 0.
@@ -2761,6 +2888,27 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
}
+/* PLIB - Port Local to InfiniBand Port
+ * ------------------------------------
+ * The PLIB register performs mapping from Local Port into InfiniBand Port.
+ */
+#define MLXSW_REG_PLIB_ID 0x500A
+#define MLXSW_REG_PLIB_LEN 0x10
+
+MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN);
+
+/* reg_plib_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, plib, local_port, 0x00, 16, 8);
+
+/* reg_plib_ib_port
+ * InfiniBand port remapping for local_port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, plib, ib_port, 0x00, 0, 8);
+
/* PPTB - Port Prio To Buffer Register
* -----------------------------------
* Configures the switch priority to buffer table.
@@ -2768,10 +2916,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
#define MLXSW_REG_PPTB_ID 0x500B
#define MLXSW_REG_PPTB_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_pptb = {
- .id = MLXSW_REG_PPTB_ID,
- .len = MLXSW_REG_PPTB_LEN,
-};
+MLXSW_REG_DEFINE(pptb, MLXSW_REG_PPTB_ID, MLXSW_REG_PPTB_LEN);
enum {
MLXSW_REG_PPTB_MM_UM,
@@ -2865,10 +3010,7 @@ static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
#define MLXSW_REG_PBMC_ID 0x500C
#define MLXSW_REG_PBMC_LEN 0x6C
-static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
- .id = MLXSW_REG_PBMC_ID,
- .len = MLXSW_REG_PBMC_LEN,
-};
+MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN);
/* reg_pbmc_local_port
* Local port number.
@@ -2978,10 +3120,7 @@ static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
#define MLXSW_REG_PSPA_ID 0x500D
#define MLXSW_REG_PSPA_LEN 0x8
-static const struct mlxsw_reg_info mlxsw_reg_pspa = {
- .id = MLXSW_REG_PSPA_ID,
- .len = MLXSW_REG_PSPA_LEN,
-};
+MLXSW_REG_DEFINE(pspa, MLXSW_REG_PSPA_ID, MLXSW_REG_PSPA_LEN);
/* reg_pspa_swid
* Switch partition ID.
@@ -3017,10 +3156,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
#define MLXSW_REG_HTGT_ID 0x7002
#define MLXSW_REG_HTGT_LEN 0x100
-static const struct mlxsw_reg_info mlxsw_reg_htgt = {
- .id = MLXSW_REG_HTGT_ID,
- .len = MLXSW_REG_HTGT_LEN,
-};
+MLXSW_REG_DEFINE(htgt, MLXSW_REG_HTGT_ID, MLXSW_REG_HTGT_LEN);
/* reg_htgt_swid
* Switch partition ID.
@@ -3038,8 +3174,21 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_TRAP_GROUP_RX,
- MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
+ MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
};
/* reg_htgt_trap_group
@@ -3061,6 +3210,8 @@ enum {
*/
MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+#define MLXSW_REG_HTGT_INVALID_POLICER 0xff
+
/* reg_htgt_pid
* Policer ID for the trap group.
* Access: RW
@@ -3086,6 +3237,8 @@ MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
*/
MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+#define MLXSW_REG_HTGT_DEFAULT_PRIORITY 0
+
/* reg_htgt_priority
* Trap group priority.
* In case a packet matches multiple classification rules, the packet will
@@ -3099,52 +3252,47 @@ MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
*/
MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+#define MLXSW_REG_HTGT_DEFAULT_TC 7
+
/* reg_htgt_local_path_cpu_tclass
* CPU ingress traffic class for the trap group.
* Access: RW
*/
MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
-#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
-#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
-#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13
-
+enum mlxsw_reg_htgt_local_path_rdq {
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL = 0x13,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX = 0x14,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD = 0x15,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SIB_EMAD = 0x15,
+};
/* reg_htgt_local_path_rdq
* Receive descriptor queue (RDQ) to use for the trap group.
* Access: RW
*/
MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
-static inline void mlxsw_reg_htgt_pack(char *payload,
- enum mlxsw_reg_htgt_trap_group group)
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 group, u8 policer_id,
+ u8 priority, u8 tc)
{
- u8 swid, rdq;
-
MLXSW_REG_ZERO(htgt, payload);
- switch (group) {
- case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
- swid = MLXSW_PORT_SWID_ALL_SWIDS;
- rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_RX:
- swid = 0;
- rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
- swid = 0;
- rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
- break;
+
+ if (policer_id == MLXSW_REG_HTGT_INVALID_POLICER) {
+ mlxsw_reg_htgt_pide_set(payload,
+ MLXSW_REG_HTGT_POLICER_DISABLE);
+ } else {
+ mlxsw_reg_htgt_pide_set(payload,
+ MLXSW_REG_HTGT_POLICER_ENABLE);
+ mlxsw_reg_htgt_pid_set(payload, policer_id);
}
- mlxsw_reg_htgt_swid_set(payload, swid);
+
mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
mlxsw_reg_htgt_trap_group_set(payload, group);
- mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
- mlxsw_reg_htgt_pid_set(payload, 0);
mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
- mlxsw_reg_htgt_priority_set(payload, 0);
- mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
- mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+ mlxsw_reg_htgt_priority_set(payload, priority);
+ mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, tc);
+ mlxsw_reg_htgt_local_path_rdq_set(payload, group);
}
/* HPKT - Host Packet Trap
@@ -3154,10 +3302,7 @@ static inline void mlxsw_reg_htgt_pack(char *payload,
#define MLXSW_REG_HPKT_ID 0x7003
#define MLXSW_REG_HPKT_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
- .id = MLXSW_REG_HPKT_ID,
- .len = MLXSW_REG_HPKT_LEN,
-};
+MLXSW_REG_DEFINE(hpkt, MLXSW_REG_HPKT_ID, MLXSW_REG_HPKT_LEN);
enum {
MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
@@ -3221,6 +3366,7 @@ enum {
/* reg_hpkt_ctrl
* Configure dedicated buffer resources for control packets.
+ * Ignored by SwitchX-2.
* 0 - Keep factory defaults.
* 1 - Do not use control buffer for this trap ID.
* 2 - Use control buffer for this trap ID.
@@ -3228,25 +3374,18 @@ enum {
*/
MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
-static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id,
+ enum mlxsw_reg_htgt_trap_group trap_group,
+ bool is_ctrl)
{
- enum mlxsw_reg_htgt_trap_group trap_group;
-
MLXSW_REG_ZERO(hpkt, payload);
mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
mlxsw_reg_hpkt_action_set(payload, action);
- switch (trap_id) {
- case MLXSW_TRAP_ID_ETHEMAD:
- case MLXSW_TRAP_ID_PUDE:
- trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
- break;
- default:
- trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
- break;
- }
mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
- mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+ mlxsw_reg_hpkt_ctrl_set(payload, is_ctrl ?
+ MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER :
+ MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER);
}
/* RGCR - Router General Configuration Register
@@ -3256,10 +3395,7 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
#define MLXSW_REG_RGCR_ID 0x8001
#define MLXSW_REG_RGCR_LEN 0x28
-static const struct mlxsw_reg_info mlxsw_reg_rgcr = {
- .id = MLXSW_REG_RGCR_ID,
- .len = MLXSW_REG_RGCR_LEN,
-};
+MLXSW_REG_DEFINE(rgcr, MLXSW_REG_RGCR_ID, MLXSW_REG_RGCR_LEN);
/* reg_rgcr_ipv4_en
* IPv4 router enable.
@@ -3330,10 +3466,7 @@ static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en)
#define MLXSW_REG_RITR_ID 0x8002
#define MLXSW_REG_RITR_LEN 0x40
-static const struct mlxsw_reg_info mlxsw_reg_ritr = {
- .id = MLXSW_REG_RITR_ID,
- .len = MLXSW_REG_RITR_LEN,
-};
+MLXSW_REG_DEFINE(ritr, MLXSW_REG_RITR_ID, MLXSW_REG_RITR_LEN);
/* reg_ritr_enable
* Enables routing on the router interface.
@@ -3533,10 +3666,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
#define MLXSW_REG_RATR_ID 0x8008
#define MLXSW_REG_RATR_LEN 0x2C
-static const struct mlxsw_reg_info mlxsw_reg_ratr = {
- .id = MLXSW_REG_RATR_ID,
- .len = MLXSW_REG_RATR_LEN,
-};
+MLXSW_REG_DEFINE(ratr, MLXSW_REG_RATR_ID, MLXSW_REG_RATR_LEN);
enum mlxsw_reg_ratr_op {
/* Read */
@@ -3663,10 +3793,7 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
#define MLXSW_REG_RALTA_ID 0x8010
#define MLXSW_REG_RALTA_LEN 0x04
-static const struct mlxsw_reg_info mlxsw_reg_ralta = {
- .id = MLXSW_REG_RALTA_ID,
- .len = MLXSW_REG_RALTA_LEN,
-};
+MLXSW_REG_DEFINE(ralta, MLXSW_REG_RALTA_ID, MLXSW_REG_RALTA_LEN);
/* reg_ralta_op
* opcode (valid for Write, must be 0 on Read)
@@ -3718,10 +3845,7 @@ static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
#define MLXSW_REG_RALST_ID 0x8011
#define MLXSW_REG_RALST_LEN 0x104
-static const struct mlxsw_reg_info mlxsw_reg_ralst = {
- .id = MLXSW_REG_RALST_ID,
- .len = MLXSW_REG_RALST_LEN,
-};
+MLXSW_REG_DEFINE(ralst, MLXSW_REG_RALST_ID, MLXSW_REG_RALST_LEN);
/* reg_ralst_root_bin
* The bin number of the root bin.
@@ -3788,10 +3912,7 @@ static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
#define MLXSW_REG_RALTB_ID 0x8012
#define MLXSW_REG_RALTB_LEN 0x04
-static const struct mlxsw_reg_info mlxsw_reg_raltb = {
- .id = MLXSW_REG_RALTB_ID,
- .len = MLXSW_REG_RALTB_LEN,
-};
+MLXSW_REG_DEFINE(raltb, MLXSW_REG_RALTB_ID, MLXSW_REG_RALTB_LEN);
/* reg_raltb_virtual_router
* Virtual Router ID
@@ -3832,10 +3953,7 @@ static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
#define MLXSW_REG_RALUE_ID 0x8013
#define MLXSW_REG_RALUE_LEN 0x38
-static const struct mlxsw_reg_info mlxsw_reg_ralue = {
- .id = MLXSW_REG_RALUE_ID,
- .len = MLXSW_REG_RALUE_LEN,
-};
+MLXSW_REG_DEFINE(ralue, MLXSW_REG_RALUE_ID, MLXSW_REG_RALUE_LEN);
/* reg_ralue_protocol
* Protocol.
@@ -4095,10 +4213,7 @@ mlxsw_reg_ralue_act_ip2me_pack(char *payload)
#define MLXSW_REG_RAUHT_ID 0x8014
#define MLXSW_REG_RAUHT_LEN 0x74
-static const struct mlxsw_reg_info mlxsw_reg_rauht = {
- .id = MLXSW_REG_RAUHT_ID,
- .len = MLXSW_REG_RAUHT_LEN,
-};
+MLXSW_REG_DEFINE(rauht, MLXSW_REG_RAUHT_ID, MLXSW_REG_RAUHT_LEN);
enum mlxsw_reg_rauht_type {
MLXSW_REG_RAUHT_TYPE_IPV4,
@@ -4234,10 +4349,7 @@ static inline void mlxsw_reg_rauht_pack4(char *payload,
#define MLXSW_REG_RALEU_ID 0x8015
#define MLXSW_REG_RALEU_LEN 0x28
-static const struct mlxsw_reg_info mlxsw_reg_raleu = {
- .id = MLXSW_REG_RALEU_ID,
- .len = MLXSW_REG_RALEU_LEN,
-};
+MLXSW_REG_DEFINE(raleu, MLXSW_REG_RALEU_ID, MLXSW_REG_RALEU_LEN);
/* reg_raleu_protocol
* Protocol.
@@ -4309,10 +4421,7 @@ static inline void mlxsw_reg_raleu_pack(char *payload,
MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
-static const struct mlxsw_reg_info mlxsw_reg_rauhtd = {
- .id = MLXSW_REG_RAUHTD_ID,
- .len = MLXSW_REG_RAUHTD_LEN,
-};
+MLXSW_REG_DEFINE(rauhtd, MLXSW_REG_RAUHTD_ID, MLXSW_REG_RAUHTD_LEN);
#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
@@ -4444,10 +4553,7 @@ static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
#define MLXSW_REG_MFCR_ID 0x9001
#define MLXSW_REG_MFCR_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_mfcr = {
- .id = MLXSW_REG_MFCR_ID,
- .len = MLXSW_REG_MFCR_LEN,
-};
+MLXSW_REG_DEFINE(mfcr, MLXSW_REG_MFCR_ID, MLXSW_REG_MFCR_LEN);
enum mlxsw_reg_mfcr_pwm_frequency {
MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00,
@@ -4464,7 +4570,7 @@ enum mlxsw_reg_mfcr_pwm_frequency {
* Controls the frequency of the PWM signal.
* Access: RW
*/
-MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 6);
+MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 7);
#define MLXSW_MFCR_TACHOS_MAX 10
@@ -4507,10 +4613,7 @@ mlxsw_reg_mfcr_unpack(char *payload,
#define MLXSW_REG_MFSC_ID 0x9002
#define MLXSW_REG_MFSC_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_mfsc = {
- .id = MLXSW_REG_MFSC_ID,
- .len = MLXSW_REG_MFSC_LEN,
-};
+MLXSW_REG_DEFINE(mfsc, MLXSW_REG_MFSC_ID, MLXSW_REG_MFSC_LEN);
/* reg_mfsc_pwm
* Fan pwm to control / monitor.
@@ -4541,10 +4644,7 @@ static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm,
#define MLXSW_REG_MFSM_ID 0x9003
#define MLXSW_REG_MFSM_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_mfsm = {
- .id = MLXSW_REG_MFSM_ID,
- .len = MLXSW_REG_MFSM_LEN,
-};
+MLXSW_REG_DEFINE(mfsm, MLXSW_REG_MFSM_ID, MLXSW_REG_MFSM_LEN);
/* reg_mfsm_tacho
* Fan tachometer index.
@@ -4564,6 +4664,54 @@ static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho)
mlxsw_reg_mfsm_tacho_set(payload, tacho);
}
+/* MFSL - Management Fan Speed Limit Register
+ * ------------------------------------------
+ * The Fan Speed Limit register is used to configure the fan speed
+ * event / interrupt notification mechanism. Fan speed threshold are
+ * defined for both under-speed and over-speed.
+ */
+#define MLXSW_REG_MFSL_ID 0x9004
+#define MLXSW_REG_MFSL_LEN 0x0C
+
+MLXSW_REG_DEFINE(mfsl, MLXSW_REG_MFSL_ID, MLXSW_REG_MFSL_LEN);
+
+/* reg_mfsl_tacho
+ * Fan tachometer index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mfsl, tacho, 0x00, 24, 4);
+
+/* reg_mfsl_tach_min
+ * Tachometer minimum value (minimum RPM).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfsl, tach_min, 0x04, 0, 16);
+
+/* reg_mfsl_tach_max
+ * Tachometer maximum value (maximum RPM).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfsl, tach_max, 0x08, 0, 16);
+
+static inline void mlxsw_reg_mfsl_pack(char *payload, u8 tacho,
+ u16 tach_min, u16 tach_max)
+{
+ MLXSW_REG_ZERO(mfsl, payload);
+ mlxsw_reg_mfsl_tacho_set(payload, tacho);
+ mlxsw_reg_mfsl_tach_min_set(payload, tach_min);
+ mlxsw_reg_mfsl_tach_max_set(payload, tach_max);
+}
+
+static inline void mlxsw_reg_mfsl_unpack(char *payload, u8 tacho,
+ u16 *p_tach_min, u16 *p_tach_max)
+{
+ if (p_tach_min)
+ *p_tach_min = mlxsw_reg_mfsl_tach_min_get(payload);
+
+ if (p_tach_max)
+ *p_tach_max = mlxsw_reg_mfsl_tach_max_get(payload);
+}
+
/* MTCAP - Management Temperature Capabilities
* -------------------------------------------
* This register exposes the capabilities of the device and
@@ -4572,10 +4720,7 @@ static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho)
#define MLXSW_REG_MTCAP_ID 0x9009
#define MLXSW_REG_MTCAP_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_mtcap = {
- .id = MLXSW_REG_MTCAP_ID,
- .len = MLXSW_REG_MTCAP_LEN,
-};
+MLXSW_REG_DEFINE(mtcap, MLXSW_REG_MTCAP_ID, MLXSW_REG_MTCAP_LEN);
/* reg_mtcap_sensor_count
* Number of sensors supported by the device.
@@ -4593,10 +4738,7 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
#define MLXSW_REG_MTMP_ID 0x900A
#define MLXSW_REG_MTMP_LEN 0x20
-static const struct mlxsw_reg_info mlxsw_reg_mtmp = {
- .id = MLXSW_REG_MTMP_ID,
- .len = MLXSW_REG_MTMP_LEN,
-};
+MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
/* reg_mtmp_sensor_index
* Sensors index to access.
@@ -4679,10 +4821,7 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
#define MLXSW_REG_MPAT_ID 0x901A
#define MLXSW_REG_MPAT_LEN 0x78
-static const struct mlxsw_reg_info mlxsw_reg_mpat = {
- .id = MLXSW_REG_MPAT_ID,
- .len = MLXSW_REG_MPAT_LEN,
-};
+MLXSW_REG_DEFINE(mpat, MLXSW_REG_MPAT_ID, MLXSW_REG_MPAT_LEN);
/* reg_mpat_pa_id
* Port Analyzer ID.
@@ -4742,10 +4881,7 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
#define MLXSW_REG_MPAR_ID 0x901B
#define MLXSW_REG_MPAR_LEN 0x08
-static const struct mlxsw_reg_info mlxsw_reg_mpar = {
- .id = MLXSW_REG_MPAR_ID,
- .len = MLXSW_REG_MPAR_LEN,
-};
+MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
/* reg_mpar_local_port
* The local port to mirror the packets from.
@@ -4795,10 +4931,7 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
#define MLXSW_REG_MLCR_ID 0x902B
#define MLXSW_REG_MLCR_LEN 0x0C
-static const struct mlxsw_reg_info mlxsw_reg_mlcr = {
- .id = MLXSW_REG_MLCR_ID,
- .len = MLXSW_REG_MLCR_LEN,
-};
+MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN);
/* reg_mlcr_local_port
* Local port number.
@@ -4839,10 +4972,7 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
#define MLXSW_REG_SBPR_ID 0xB001
#define MLXSW_REG_SBPR_LEN 0x14
-static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
- .id = MLXSW_REG_SBPR_ID,
- .len = MLXSW_REG_SBPR_LEN,
-};
+MLXSW_REG_DEFINE(sbpr, MLXSW_REG_SBPR_ID, MLXSW_REG_SBPR_LEN);
/* shared direstion enum for SBPR, SBCM, SBPM */
enum mlxsw_reg_sbxx_dir {
@@ -4899,10 +5029,7 @@ static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
#define MLXSW_REG_SBCM_ID 0xB002
#define MLXSW_REG_SBCM_LEN 0x28
-static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
- .id = MLXSW_REG_SBCM_ID,
- .len = MLXSW_REG_SBCM_LEN,
-};
+MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN);
/* reg_sbcm_local_port
* Local port number.
@@ -4979,10 +5106,7 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
#define MLXSW_REG_SBPM_ID 0xB003
#define MLXSW_REG_SBPM_LEN 0x28
-static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
- .id = MLXSW_REG_SBPM_ID,
- .len = MLXSW_REG_SBPM_LEN,
-};
+MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN);
/* reg_sbpm_local_port
* Local port number.
@@ -5073,10 +5197,7 @@ static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
#define MLXSW_REG_SBMM_ID 0xB004
#define MLXSW_REG_SBMM_LEN 0x28
-static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
- .id = MLXSW_REG_SBMM_ID,
- .len = MLXSW_REG_SBMM_LEN,
-};
+MLXSW_REG_DEFINE(sbmm, MLXSW_REG_SBMM_ID, MLXSW_REG_SBMM_LEN);
/* reg_sbmm_prio
* Switch Priority.
@@ -5135,10 +5256,7 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
MLXSW_REG_SBSR_REC_LEN * \
MLXSW_REG_SBSR_REC_MAX_COUNT)
-static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
- .id = MLXSW_REG_SBSR_ID,
- .len = MLXSW_REG_SBSR_LEN,
-};
+MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN);
/* reg_sbsr_clr
* Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
@@ -5228,10 +5346,7 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
#define MLXSW_REG_SBIB_ID 0xB006
#define MLXSW_REG_SBIB_LEN 0x10
-static const struct mlxsw_reg_info mlxsw_reg_sbib = {
- .id = MLXSW_REG_SBIB_ID,
- .len = MLXSW_REG_SBIB_LEN,
-};
+MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN);
/* reg_sbib_local_port
* Local port number
@@ -5256,132 +5371,83 @@ static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
mlxsw_reg_sbib_buff_size_set(payload, buff_size);
}
+static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
+ MLXSW_REG(sgcr),
+ MLXSW_REG(spad),
+ MLXSW_REG(smid),
+ MLXSW_REG(sspr),
+ MLXSW_REG(sfdat),
+ MLXSW_REG(sfd),
+ MLXSW_REG(sfn),
+ MLXSW_REG(spms),
+ MLXSW_REG(spvid),
+ MLXSW_REG(spvm),
+ MLXSW_REG(spaft),
+ MLXSW_REG(sfgc),
+ MLXSW_REG(sftr),
+ MLXSW_REG(sfdf),
+ MLXSW_REG(sldr),
+ MLXSW_REG(slcr),
+ MLXSW_REG(slcor),
+ MLXSW_REG(spmlr),
+ MLXSW_REG(svfa),
+ MLXSW_REG(svpe),
+ MLXSW_REG(sfmr),
+ MLXSW_REG(spvmlr),
+ MLXSW_REG(qpcr),
+ MLXSW_REG(qtct),
+ MLXSW_REG(qeec),
+ MLXSW_REG(pmlp),
+ MLXSW_REG(pmtu),
+ MLXSW_REG(ptys),
+ MLXSW_REG(ppad),
+ MLXSW_REG(paos),
+ MLXSW_REG(pfcc),
+ MLXSW_REG(ppcnt),
+ MLXSW_REG(plib),
+ MLXSW_REG(pptb),
+ MLXSW_REG(pbmc),
+ MLXSW_REG(pspa),
+ MLXSW_REG(htgt),
+ MLXSW_REG(hpkt),
+ MLXSW_REG(rgcr),
+ MLXSW_REG(ritr),
+ MLXSW_REG(ratr),
+ MLXSW_REG(ralta),
+ MLXSW_REG(ralst),
+ MLXSW_REG(raltb),
+ MLXSW_REG(ralue),
+ MLXSW_REG(rauht),
+ MLXSW_REG(raleu),
+ MLXSW_REG(rauhtd),
+ MLXSW_REG(mfcr),
+ MLXSW_REG(mfsc),
+ MLXSW_REG(mfsm),
+ MLXSW_REG(mfsl),
+ MLXSW_REG(mtcap),
+ MLXSW_REG(mtmp),
+ MLXSW_REG(mpat),
+ MLXSW_REG(mpar),
+ MLXSW_REG(mlcr),
+ MLXSW_REG(sbpr),
+ MLXSW_REG(sbcm),
+ MLXSW_REG(sbpm),
+ MLXSW_REG(sbmm),
+ MLXSW_REG(sbsr),
+ MLXSW_REG(sbib),
+};
+
static inline const char *mlxsw_reg_id_str(u16 reg_id)
{
- switch (reg_id) {
- case MLXSW_REG_SGCR_ID:
- return "SGCR";
- case MLXSW_REG_SPAD_ID:
- return "SPAD";
- case MLXSW_REG_SMID_ID:
- return "SMID";
- case MLXSW_REG_SSPR_ID:
- return "SSPR";
- case MLXSW_REG_SFDAT_ID:
- return "SFDAT";
- case MLXSW_REG_SFD_ID:
- return "SFD";
- case MLXSW_REG_SFN_ID:
- return "SFN";
- case MLXSW_REG_SPMS_ID:
- return "SPMS";
- case MLXSW_REG_SPVID_ID:
- return "SPVID";
- case MLXSW_REG_SPVM_ID:
- return "SPVM";
- case MLXSW_REG_SPAFT_ID:
- return "SPAFT";
- case MLXSW_REG_SFGC_ID:
- return "SFGC";
- case MLXSW_REG_SFTR_ID:
- return "SFTR";
- case MLXSW_REG_SFDF_ID:
- return "SFDF";
- case MLXSW_REG_SLDR_ID:
- return "SLDR";
- case MLXSW_REG_SLCR_ID:
- return "SLCR";
- case MLXSW_REG_SLCOR_ID:
- return "SLCOR";
- case MLXSW_REG_SPMLR_ID:
- return "SPMLR";
- case MLXSW_REG_SVFA_ID:
- return "SVFA";
- case MLXSW_REG_SVPE_ID:
- return "SVPE";
- case MLXSW_REG_SFMR_ID:
- return "SFMR";
- case MLXSW_REG_SPVMLR_ID:
- return "SPVMLR";
- case MLXSW_REG_QTCT_ID:
- return "QTCT";
- case MLXSW_REG_QEEC_ID:
- return "QEEC";
- case MLXSW_REG_PMLP_ID:
- return "PMLP";
- case MLXSW_REG_PMTU_ID:
- return "PMTU";
- case MLXSW_REG_PTYS_ID:
- return "PTYS";
- case MLXSW_REG_PPAD_ID:
- return "PPAD";
- case MLXSW_REG_PAOS_ID:
- return "PAOS";
- case MLXSW_REG_PFCC_ID:
- return "PFCC";
- case MLXSW_REG_PPCNT_ID:
- return "PPCNT";
- case MLXSW_REG_PPTB_ID:
- return "PPTB";
- case MLXSW_REG_PBMC_ID:
- return "PBMC";
- case MLXSW_REG_PSPA_ID:
- return "PSPA";
- case MLXSW_REG_HTGT_ID:
- return "HTGT";
- case MLXSW_REG_HPKT_ID:
- return "HPKT";
- case MLXSW_REG_RGCR_ID:
- return "RGCR";
- case MLXSW_REG_RITR_ID:
- return "RITR";
- case MLXSW_REG_RATR_ID:
- return "RATR";
- case MLXSW_REG_RALTA_ID:
- return "RALTA";
- case MLXSW_REG_RALST_ID:
- return "RALST";
- case MLXSW_REG_RALTB_ID:
- return "RALTB";
- case MLXSW_REG_RALUE_ID:
- return "RALUE";
- case MLXSW_REG_RAUHT_ID:
- return "RAUHT";
- case MLXSW_REG_RALEU_ID:
- return "RALEU";
- case MLXSW_REG_RAUHTD_ID:
- return "RAUHTD";
- case MLXSW_REG_MFCR_ID:
- return "MFCR";
- case MLXSW_REG_MFSC_ID:
- return "MFSC";
- case MLXSW_REG_MFSM_ID:
- return "MFSM";
- case MLXSW_REG_MTCAP_ID:
- return "MTCAP";
- case MLXSW_REG_MPAT_ID:
- return "MPAT";
- case MLXSW_REG_MPAR_ID:
- return "MPAR";
- case MLXSW_REG_MTMP_ID:
- return "MTMP";
- case MLXSW_REG_MLCR_ID:
- return "MLCR";
- case MLXSW_REG_SBPR_ID:
- return "SBPR";
- case MLXSW_REG_SBCM_ID:
- return "SBCM";
- case MLXSW_REG_SBPM_ID:
- return "SBPM";
- case MLXSW_REG_SBMM_ID:
- return "SBMM";
- case MLXSW_REG_SBSR_ID:
- return "SBSR";
- case MLXSW_REG_SBIB_ID:
- return "SBIB";
- default:
- return "*UNKNOWN*";
+ const struct mlxsw_reg_info *reg_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_reg_infos); i++) {
+ reg_info = mlxsw_reg_infos[i];
+ if (reg_info->id == reg_id)
+ return reg_info->name;
}
+ return "*UNKNOWN*";
}
/* PUDE - Port Up / Down Event
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
new file mode 100644
index 000000000000..3c2171dbdba4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/resources.h
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_RESOURCES_H
+#define _MLXSW_RESOURCES_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+enum mlxsw_res_id {
+ MLXSW_RES_ID_KVD_SIZE,
+ MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
+ MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_MAX_TRAP_GROUPS,
+ MLXSW_RES_ID_MAX_SPAN,
+ MLXSW_RES_ID_MAX_SYSTEM_PORT,
+ MLXSW_RES_ID_MAX_LAG,
+ MLXSW_RES_ID_MAX_LAG_MEMBERS,
+ MLXSW_RES_ID_MAX_BUFFER_SIZE,
+ MLXSW_RES_ID_MAX_CPU_POLICERS,
+ MLXSW_RES_ID_MAX_VRS,
+ MLXSW_RES_ID_MAX_RIFS,
+
+ /* Internal resources.
+ * Determined by the SW, not queried from the HW.
+ */
+ MLXSW_RES_ID_KVD_SINGLE_SIZE,
+ MLXSW_RES_ID_KVD_DOUBLE_SIZE,
+ MLXSW_RES_ID_KVD_LINEAR_SIZE,
+
+ __MLXSW_RES_ID_MAX,
+};
+
+static u16 mlxsw_res_ids[] = {
+ [MLXSW_RES_ID_KVD_SIZE] = 0x1001,
+ [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
+ [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+ [MLXSW_RES_ID_MAX_SPAN] = 0x2420,
+ [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
+ [MLXSW_RES_ID_MAX_LAG] = 0x2520,
+ [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
+ [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
+ [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
+ [MLXSW_RES_ID_MAX_VRS] = 0x2C01,
+ [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+};
+
+struct mlxsw_res {
+ bool valid[__MLXSW_RES_ID_MAX];
+ u64 values[__MLXSW_RES_ID_MAX];
+};
+
+static inline bool mlxsw_res_valid(struct mlxsw_res *res,
+ enum mlxsw_res_id res_id)
+{
+ return res->valid[res_id];
+}
+
+#define MLXSW_RES_VALID(res, short_res_id) \
+ mlxsw_res_valid(res, MLXSW_RES_ID_##short_res_id)
+
+static inline u64 mlxsw_res_get(struct mlxsw_res *res,
+ enum mlxsw_res_id res_id)
+{
+ if (WARN_ON(!res->valid[res_id]))
+ return 0;
+ return res->values[res_id];
+}
+
+#define MLXSW_RES_GET(res, short_res_id) \
+ mlxsw_res_get(res, MLXSW_RES_ID_##short_res_id)
+
+static inline void mlxsw_res_set(struct mlxsw_res *res,
+ enum mlxsw_res_id res_id, u64 value)
+{
+ res->valid[res_id] = true;
+ res->values[res_id] = value;
+}
+
+#define MLXSW_RES_SET(res, short_res_id, value) \
+ mlxsw_res_set(res, MLXSW_RES_ID_##short_res_id, value)
+
+static inline void mlxsw_res_parse(struct mlxsw_res *res, u16 id, u64 value)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_res_ids); i++) {
+ if (mlxsw_res_ids[i] == id) {
+ mlxsw_res_set(res, i, value);
+ return;
+ }
+ }
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index dda5761e91bc..d768c7b6c6d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -53,12 +54,12 @@
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
#include <net/switchdev.h>
-#include <generated/utsrelease.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h>
#include "spectrum.h"
+#include "pci.h"
#include "core.h"
#include "reg.h"
#include "port.h"
@@ -156,7 +157,7 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
{
- char spad_pl[MLXSW_REG_SPAD_LEN];
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
int err;
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
@@ -168,14 +169,13 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- if (!resources->max_span_valid)
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
return -EIO;
- mlxsw_sp->span.entries_count = resources->max_span;
+ mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_SPAN);
mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
sizeof(struct mlxsw_sp_span_entry),
GFP_KERNEL);
@@ -857,7 +857,7 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
return 0;
}
-static bool mlxsw_sp_port_has_offload_stats(int attr_id)
+static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
@@ -1239,8 +1239,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
tcf_exts_to_list(cls->exts, &actions);
list_for_each_entry(a, &actions, list) {
- if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
+ if (!is_tcf_mirred_egress_mirror(a) ||
+ protocol != htons(ETH_P_ALL)) {
return -ENOTSUPP;
+ }
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls,
a, ingress);
@@ -1413,7 +1415,7 @@ err_port_pause_configure:
struct mlxsw_sp_port_hw_stats {
char str[ETH_GSTRING_LEN];
- u64 (*getter)(char *payload);
+ u64 (*getter)(const char *payload);
};
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
@@ -1534,7 +1536,7 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
-static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl)
+static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
{
u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
@@ -2002,12 +2004,12 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
int err;
autoneg = mlxsw_sp_port->link.autoneg;
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
- &eth_proto_oper);
+ mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper);
mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
@@ -2036,11 +2038,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
bool autoneg;
int err;
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+ mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
eth_proto_new = autoneg ?
@@ -2053,7 +2055,8 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
return -EINVAL;
}
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_new);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2091,8 +2094,8 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
u32 eth_proto_admin;
eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_admin);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_admin);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -2210,8 +2213,8 @@ static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
+static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
@@ -2221,6 +2224,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
@@ -2284,6 +2288,9 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC;
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
+
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
@@ -2352,20 +2359,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_register_netdev;
}
- err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
- mlxsw_sp_port->local_port, dev,
- mlxsw_sp_port->split, module);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
- mlxsw_sp_port->local_port);
- goto err_core_port_init;
- }
-
+ mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
+ mlxsw_sp_port, dev, mlxsw_sp_port->split,
+ module);
mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
return 0;
-err_core_port_init:
- unregister_netdev(dev);
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
@@ -2394,14 +2393,34 @@ err_port_active_vlans_alloc:
return err;
}
-static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
+{
+ int err;
+
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ return err;
+ }
+ err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
+ module, width, lane);
+ if (err)
+ goto err_port_create;
+ return 0;
+
+err_port_create:
+ mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+ return err;
+}
+
+static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- if (!mlxsw_sp_port)
- return;
cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
- mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
+ mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
@@ -2417,12 +2436,24 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
free_netdev(mlxsw_sp_port->dev);
}
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ __mlxsw_sp_port_remove(mlxsw_sp, local_port);
+ mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+}
+
+static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ return mlxsw_sp->ports[local_port] != NULL;
+}
+
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
int i;
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
- mlxsw_sp_port_remove(mlxsw_sp, i);
+ if (mlxsw_sp_port_created(mlxsw_sp, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
kfree(mlxsw_sp->ports);
}
@@ -2446,8 +2477,8 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
- lane);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false,
+ module, width, lane);
if (err)
goto err_port_create;
}
@@ -2456,7 +2487,8 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
err_port_create:
err_port_module_info_get:
for (i--; i >= 1; i--)
- mlxsw_sp_port_remove(mlxsw_sp, i);
+ if (mlxsw_sp_port_created(mlxsw_sp, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
kfree(mlxsw_sp->ports);
return err;
}
@@ -2498,7 +2530,8 @@ static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
err_port_create:
for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
i = count;
err_port_swid_set:
for (i--; i >= 0; i--)
@@ -2588,7 +2621,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
}
for (i = 0; i < count; i++)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
if (err) {
@@ -2633,7 +2667,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
base_port = base_port + 2;
for (i = 0; i < count; i++)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
@@ -2663,54 +2698,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
}
}
-static struct mlxsw_event_listener mlxsw_sp_pude_event = {
- .func = mlxsw_sp_pude_event_func,
- .trap_id = MLXSW_TRAP_ID_PUDE,
-};
-
-static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_event_trap_id trap_id)
-{
- struct mlxsw_event_listener *el;
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
- int err;
-
- switch (trap_id) {
- case MLXSW_TRAP_ID_PUDE:
- el = &mlxsw_sp_pude_event;
- break;
- }
- err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
- if (err)
- return err;
-
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
- if (err)
- goto err_event_trap_set;
-
- return 0;
-
-err_event_trap_set:
- mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
- return err;
-}
-
-static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_event_trap_id trap_id)
-{
- struct mlxsw_event_listener *el;
-
- switch (trap_id) {
- case MLXSW_TRAP_ID_PUDE:
- el = &mlxsw_sp_pude_event;
- break;
- }
- mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
-}
-
-static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
- void *priv)
+static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
+ u8 local_port, void *priv)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -2738,107 +2727,212 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
skb->offload_fwd_mark = 1;
- return mlxsw_sp_rx_listener_func(skb, local_port, priv);
+ return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
+}
+
+#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
+ _is_ctrl, SP_##_trap_group, DISCARD)
+
+#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
+ _is_ctrl, SP_##_trap_group, DISCARD)
+
+#define MLXSW_SP_EVENTL(_func, _trap_id) \
+ MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
+
+static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ /* Events */
+ MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
+ /* L2 traps */
+ MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
+ MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
+ MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
+ MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
+ MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
+ MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
+ MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
+ MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
+ MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
+ /* L3 traps */
+ MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
+ MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
+ MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
+ MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
+ MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
+};
+
+static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
+{
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ enum mlxsw_reg_qpcr_ir_units ir_units;
+ int max_cpu_policers;
+ bool is_bytes;
+ u8 burst_size;
+ u32 rate;
+ int i, err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
+ return -EIO;
+
+ max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
+
+ ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
+ for (i = 0; i < max_cpu_policers; i++) {
+ is_bytes = false;
+ switch (i) {
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
+ rate = 128;
+ burst_size = 7;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
+ rate = 16 * 1024;
+ burst_size = 10;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
+ rate = 1024;
+ burst_size = 7;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
+ is_bytes = true;
+ rate = 4 * 1024;
+ burst_size = 4;
+ break;
+ default:
+ continue;
+ }
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
+ burst_size);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
-#define MLXSW_SP_RXL(_func, _trap_id, _action) \
- { \
- .func = _func, \
- .local_port = MLXSW_PORT_DONT_CARE, \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- .action = MLXSW_REG_HPKT_ACTION_##_action, \
+static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ enum mlxsw_reg_htgt_trap_group i;
+ int max_cpu_policers;
+ int max_trap_groups;
+ u8 priority, tc;
+ u16 policer_id;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
+ return -EIO;
+
+ max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
+ max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
+
+ for (i = 0; i < max_trap_groups; i++) {
+ policer_id = i;
+ switch (i) {
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
+ priority = 5;
+ tc = 5;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
+ priority = 4;
+ tc = 4;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
+ priority = 3;
+ tc = 3;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
+ priority = 2;
+ tc = 2;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
+ priority = 1;
+ tc = 1;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
+ priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
+ tc = MLXSW_REG_HTGT_DEFAULT_TC;
+ policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
+ break;
+ default:
+ continue;
+ }
+
+ if (max_cpu_policers <= policer_id &&
+ policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
+ return -EIO;
+
+ mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
}
-static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU),
- /* Traps for specific L2 packet types, not trapped as FDB MC */
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU),
- /* L3 traps */
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU),
- MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU),
-};
+ return 0;
+}
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
int i;
int err;
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
if (err)
return err;
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
if (err)
return err;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
- err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
- &mlxsw_sp_rx_listener[i],
- mlxsw_sp);
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
+ err = mlxsw_core_trap_register(mlxsw_sp->core,
+ &mlxsw_sp_listener[i],
+ mlxsw_sp);
if (err)
- goto err_rx_listener_register;
+ goto err_listener_register;
- mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action,
- mlxsw_sp_rx_listener[i].trap_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
- if (err)
- goto err_rx_trap_set;
}
return 0;
-err_rx_trap_set:
- mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
- &mlxsw_sp_rx_listener[i],
- mlxsw_sp);
-err_rx_listener_register:
+err_listener_register:
for (i--; i >= 0; i--) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
- mlxsw_sp_rx_listener[i].trap_id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
-
- mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
- &mlxsw_sp_rx_listener[i],
- mlxsw_sp);
+ mlxsw_core_trap_unregister(mlxsw_sp->core,
+ &mlxsw_sp_listener[i],
+ mlxsw_sp);
}
return err;
}
static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
- mlxsw_sp_rx_listener[i].trap_id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
-
- mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
- &mlxsw_sp_rx_listener[i],
- mlxsw_sp);
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
+ mlxsw_core_trap_unregister(mlxsw_sp->core,
+ &mlxsw_sp_listener[i],
+ mlxsw_sp);
}
}
@@ -2889,7 +2983,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
int err;
@@ -2906,11 +2999,11 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
return -EIO;
- mlxsw_sp->lags = kcalloc(resources->max_lag,
+ mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
sizeof(struct mlxsw_sp_upper),
GFP_KERNEL);
if (!mlxsw_sp->lags)
@@ -2924,6 +3017,17 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->lags);
}
+static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+}
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -2942,16 +3046,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
- return err;
- }
-
err = mlxsw_sp_traps_init(mlxsw_sp);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
- goto err_rx_listener_register;
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ return err;
}
err = mlxsw_sp_flood_init(mlxsw_sp);
@@ -3011,8 +3109,6 @@ err_lag_init:
err_buffers_init:
err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
-err_rx_listener_register:
- mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
return err;
}
@@ -3027,7 +3123,6 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
- mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids));
}
@@ -3065,11 +3160,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
};
static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = MLXSW_DEVICE_KIND_SPECTRUM,
- .owner = THIS_MODULE,
+ .kind = mlxsw_sp_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
.init = mlxsw_sp_init,
.fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3092,19 +3187,30 @@ static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
}
+static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
+{
+ struct mlxsw_sp_port **port = data;
+ int ret = 0;
+
+ if (mlxsw_sp_port_dev_check(lower_dev)) {
+ *port = netdev_priv(lower_dev);
+ ret = 1;
+ }
+
+ return ret;
+}
+
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
- struct net_device *lower_dev;
- struct list_head *iter;
+ struct mlxsw_sp_port *port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
- if (mlxsw_sp_port_dev_check(lower_dev))
- return netdev_priv(lower_dev);
- }
- return NULL;
+ port = NULL;
+ netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port);
+
+ return port;
}
static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
@@ -3117,17 +3223,15 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
- struct net_device *lower_dev;
- struct list_head *iter;
+ struct mlxsw_sp_port *port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
- if (mlxsw_sp_port_dev_check(lower_dev))
- return netdev_priv(lower_dev);
- }
- return NULL;
+ port = NULL;
+ netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port);
+
+ return port;
}
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
@@ -3171,11 +3275,9 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_rif; i++)
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
if (!mlxsw_sp->rifs[i])
return i;
@@ -3698,14 +3800,15 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
- struct mlxsw_resources *resources;
+ u64 max_lag_members;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_ports_in_lag; i++) {
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ for (i = 0; i < max_lag_members; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
@@ -3911,13 +4014,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
u16 *p_lag_id)
{
- struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
+ u64 max_lag;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_lag; i++) {
+ max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
+ for (i = 0; i < max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
@@ -3951,11 +4054,12 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
- struct mlxsw_resources *resources;
+ u64 max_lag_members;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_ports_in_lag; i++) {
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ for (i = 0; i < max_lag_members; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;
@@ -4652,6 +4756,16 @@ static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
.notifier_call = mlxsw_sp_router_netevent_event,
};
+static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp_pci_driver = {
+ .name = mlxsw_sp_driver_name,
+ .id_table = mlxsw_sp_pci_id_table,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
@@ -4663,8 +4777,15 @@ static int __init mlxsw_sp_module_init(void)
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
goto err_core_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
+ if (err)
+ goto err_pci_driver_register;
+
return 0;
+err_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp_driver);
err_core_driver_register:
unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4674,6 +4795,7 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
+ mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4686,4 +4808,4 @@ module_exit(mlxsw_sp_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
-MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 97bbc1d21df8..cc1af19d699a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -316,7 +316,6 @@ struct mlxsw_sp_port_pcpu_stats {
};
struct mlxsw_sp_port {
- struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
@@ -479,12 +478,9 @@ static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
- struct mlxsw_resources *resources;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
-
- for (i = 0; i < resources->max_rif; i++)
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index bcaed8a38037..a7468262f118 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -611,6 +611,9 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
enum mlxsw_reg_sbpr_mode mode;
+ if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ return -EINVAL;
+
mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e83072da6272..01d0efa9c5c7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -382,12 +382,10 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_virtual_routers; i++) {
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
@@ -429,14 +427,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
- struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_virtual_routers; i++) {
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
@@ -572,21 +568,20 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
+ u64 max_vrs;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- if (!resources->max_virtual_routers_valid)
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
return -EIO;
- mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
- sizeof(struct mlxsw_sp_vr),
+ max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
+ mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
GFP_KERNEL);
if (!mlxsw_sp->router.vrs)
return -ENOMEM;
- for (i = 0; i < resources->max_virtual_routers; i++) {
+ for (i = 0; i < max_vrs; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
@@ -598,6 +593,14 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
{
+ /* At this stage we're guaranteed not to have new incoming
+ * FIB notifications and the work queue is free from FIBs
+ * sitting on top of mlxsw netdevs. However, we can still
+ * have other FIBs queued. Flush the queue before flushing
+ * the device's tables. No need for locks, as we're the only
+ * writer.
+ */
+ mlxsw_core_flush_owq();
mlxsw_sp_router_fib_flush(mlxsw_sp);
kfree(mlxsw_sp->router.vrs);
}
@@ -939,7 +942,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
char rauht_pl[MLXSW_REG_RAUHT_LEN];
struct net_device *dev;
bool entry_connected;
- u8 nud_state;
+ u8 nud_state, dead;
bool updating;
bool removing;
bool adding;
@@ -950,10 +953,11 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
dip = ntohl(*((__be32 *) n->primary_key));
memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
nud_state = n->nud_state;
+ dead = n->dead;
dev = n->dev;
read_unlock_bh(&n->lock);
- entry_connected = nud_state & NUD_VALID;
+ entry_connected = nud_state & NUD_VALID && !dead;
adding = (!neigh_entry->offloaded) && entry_connected;
updating = neigh_entry->offloaded && entry_connected;
removing = neigh_entry->offloaded && !entry_connected;
@@ -1348,7 +1352,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry;
struct net_device *dev = fib_nh->nh_dev;
struct neighbour *n;
- u8 nud_state;
+ u8 nud_state, dead;
/* Take a reference of neigh here ensuring that neigh would
* not be detructed before the nexthop entry is finished.
@@ -1380,8 +1384,9 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
read_lock_bh(&n->lock);
nud_state = n->nud_state;
+ dead = n->dead;
read_unlock_bh(&n->lock);
- __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
+ __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
return 0;
}
@@ -1391,6 +1396,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ __mlxsw_sp_nexthop_neigh_update(nh, true);
list_del(&nh->neigh_list_node);
/* If that is the last nexthop connected to that neigh, remove from
@@ -1449,6 +1455,8 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
nh = &nh_grp->nexthops[i];
mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
}
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nh_grp->adj_index_valid);
kfree(nh_grp);
}
@@ -1872,14 +1880,12 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_entry *tmp;
struct mlxsw_sp_vr *vr;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_virtual_routers; i++) {
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
@@ -1903,6 +1909,9 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
{
int err;
+ if (mlxsw_sp->router.aborted)
+ return;
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
mlxsw_sp_router_fib_flush(mlxsw_sp);
mlxsw_sp->router.aborted = true;
err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
@@ -1912,21 +1921,21 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ u64 max_rifs;
int err;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- if (!resources->max_rif_valid)
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
- mlxsw_sp->rifs = kcalloc(resources->max_rif,
- sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
if (!mlxsw_sp->rifs)
return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
goto err_rgcr_fail;
@@ -1940,47 +1949,101 @@ err_rgcr_fail:
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_resources *resources;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_rif; i++)
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->rifs[i]);
kfree(mlxsw_sp->rifs);
}
-static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+struct mlxsw_sp_fib_event_work {
+ struct delayed_work dw;
+ struct fib_entry_notifier_info fen_info;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long event;
+};
+
+static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
{
- struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
- struct fib_entry_notifier_info *fen_info = ptr;
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, dw.work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- if (!net_eq(fen_info->info.net, &init_net))
- return NOTIFY_DONE;
-
- switch (event) {
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_ADD:
- err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info);
if (err)
mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
+ mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
mlxsw_sp_router_fib4_abort(mlxsw_sp);
break;
}
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct mlxsw_sp_fib_event_work *fib_work;
+ struct fib_notifier_info *info = ptr;
+
+ if (!net_eq(info->net, &init_net))
+ return NOTIFY_DONE;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ INIT_DELAYED_WORK(&fib_work->dw, mlxsw_sp_router_fib_event_work);
+ fib_work->mlxsw_sp = mlxsw_sp;
+ fib_work->event = event;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+ memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
+ /* Take referece on fib_info to prevent it from being
+ * freed while work is queued. Release it afterwards.
+ */
+ fib_info_hold(fib_work->fen_info.fi);
+ break;
+ }
+
+ mlxsw_core_schedule_odw(&fib_work->dw, 0);
+
return NOTIFY_DONE;
}
+static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+
+ /* Flush pending FIB notifications and then flush the device's
+ * table before requesting another dump. The FIB notification
+ * block is unregistered, so no need to take RTNL.
+ */
+ mlxsw_core_flush_owq();
+ mlxsw_sp_router_fib_flush(mlxsw_sp);
+}
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
@@ -1996,14 +2059,20 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_vrs_init;
- err = mlxsw_sp_neigh_init(mlxsw_sp);
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
if (err)
goto err_neigh_init;
mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
- register_fib_notifier(&mlxsw_sp->fib_nb);
+ err = register_fib_notifier(&mlxsw_sp->fib_nb,
+ mlxsw_sp_router_fib_dump_flush);
+ if (err)
+ goto err_register_fib_notifier;
+
return 0;
+err_register_fib_notifier:
+ mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 1e2c8eca3af1..b87ba7d36bc4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1196,11 +1196,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_resources *resources;
+ u64 max_lag_members;
int i;
- resources = mlxsw_core_resources_get(mlxsw_sp->core);
- for (i = 0; i < resources->max_ports_in_lag; i++) {
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ for (i = 0; i < max_lag_members; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
new file mode 100644
index 000000000000..74341fe0eb25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -0,0 +1,605 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchib.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+
+#include "pci.h"
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+#include "ib.h"
+
+static const char mlxsw_sib_driver_name[] = "mlxsw_switchib";
+static const char mlxsw_sib2_driver_name[] = "mlxsw_switchib2";
+
+struct mlxsw_sib_port;
+
+struct mlxsw_sib {
+ struct mlxsw_sib_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+};
+
+struct mlxsw_sib_port {
+ struct mlxsw_sib *mlxsw_sib;
+ u8 local_port;
+ struct {
+ u8 module;
+ } mapping;
+};
+
+/* tx_v1_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx_v1, hdr, version, 0x00, 28, 4);
+
+/* tx_v1_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx_v1, hdr, ctl, 0x00, 26, 2);
+
+/* tx_v1_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx_v1, hdr, proto, 0x00, 21, 3);
+
+/* tx_v1_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx_v1, hdr, swid, 0x00, 12, 3);
+
+/* tx_v1_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx_v1, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_v1_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx_v1, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_v1_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx_v1, hdr, type, 0x0C, 0, 4);
+
+static void
+mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_v1_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_v1_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_v1_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_v1_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_v1_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_v1_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int
+mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port,
+ bool is_up)
+{
+ struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sib_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sib_port_mtu_set(struct mlxsw_sib_port *mlxsw_sib_port,
+ u16 mtu)
+{
+ struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sib_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sib_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sib_port_set(struct mlxsw_sib_port *mlxsw_sib_port, u8 port)
+{
+ struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
+ char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
+ int err;
+
+ mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sib_port->local_port);
+ mlxsw_reg_plib_ib_port_set(plib_pl, port);
+ err = mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(plib), plib_pl);
+ return err;
+}
+
+static int mlxsw_sib_port_swid_set(struct mlxsw_sib_port *mlxsw_sib_port,
+ u8 swid)
+{
+ struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sib_port->local_port);
+ return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sib_port_module_info_get(struct mlxsw_sib *mlxsw_sib,
+ u8 local_port, u8 *p_module,
+ u8 *p_width)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ return 0;
+}
+
+static int mlxsw_sib_port_speed_set(struct mlxsw_sib_port *mlxsw_sib_port,
+ u16 speed, u16 width)
+{
+ struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+ mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sib_port->local_port, speed,
+ width);
+ return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static bool mlxsw_sib_port_created(struct mlxsw_sib *mlxsw_sib, u8 local_port)
+{
+ return mlxsw_sib->ports[local_port] != NULL;
+}
+
+static int __mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
+ u8 module, u8 width)
+{
+ struct mlxsw_sib_port *mlxsw_sib_port;
+ int err;
+
+ mlxsw_sib_port = kzalloc(sizeof(*mlxsw_sib_port), GFP_KERNEL);
+ if (!mlxsw_sib_port)
+ return -ENOMEM;
+ mlxsw_sib_port->mlxsw_sib = mlxsw_sib;
+ mlxsw_sib_port->local_port = local_port;
+ mlxsw_sib_port->mapping.module = module;
+
+ err = mlxsw_sib_port_swid_set(mlxsw_sib_port, 0);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sib_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ /* Expose the IB port number as it's front panel name */
+ err = mlxsw_sib_port_set(mlxsw_sib_port, module + 1);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set IB port\n",
+ mlxsw_sib_port->local_port);
+ goto err_port_ib_set;
+ }
+
+ /* Supports all speeds from SDR to FDR (bitmask) and support bus width
+ * of 1x, 2x and 4x (3 bits bitmask)
+ */
+ err = mlxsw_sib_port_speed_set(mlxsw_sib_port,
+ MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
+ BIT(3) - 1);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set speed\n",
+ mlxsw_sib_port->local_port);
+ goto err_port_speed_set;
+ }
+
+ /* Change to the maximum MTU the device supports, the SMA will take
+ * care of the active MTU
+ */
+ err = mlxsw_sib_port_mtu_set(mlxsw_sib_port, MLXSW_IB_DEFAULT_MTU);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sib_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sib_port_admin_status_set(mlxsw_sib_port, true);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
+ mlxsw_sib_port->local_port);
+ goto err_port_admin_set;
+ }
+
+ mlxsw_core_port_ib_set(mlxsw_sib->core, mlxsw_sib_port->local_port,
+ mlxsw_sib_port);
+ mlxsw_sib->ports[local_port] = mlxsw_sib_port;
+ return 0;
+
+err_port_admin_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_ib_set:
+ mlxsw_sib_port_swid_set(mlxsw_sib_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+ kfree(mlxsw_sib_port);
+ return err;
+}
+
+static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
+ u8 module, u8 width)
+{
+ int err;
+
+ err = mlxsw_core_port_init(mlxsw_sib->core, local_port);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ return err;
+ }
+ err = __mlxsw_sib_port_create(mlxsw_sib, local_port, module, width);
+ if (err)
+ goto err_port_create;
+
+ return 0;
+
+err_port_create:
+ mlxsw_core_port_fini(mlxsw_sib->core, local_port);
+ return err;
+}
+
+static void __mlxsw_sib_port_remove(struct mlxsw_sib *mlxsw_sib, u8 local_port)
+{
+ struct mlxsw_sib_port *mlxsw_sib_port = mlxsw_sib->ports[local_port];
+
+ mlxsw_core_port_clear(mlxsw_sib->core, local_port, mlxsw_sib);
+ mlxsw_sib->ports[local_port] = NULL;
+ mlxsw_sib_port_admin_status_set(mlxsw_sib_port, false);
+ mlxsw_sib_port_swid_set(mlxsw_sib_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ kfree(mlxsw_sib_port);
+}
+
+static void mlxsw_sib_port_remove(struct mlxsw_sib *mlxsw_sib, u8 local_port)
+{
+ __mlxsw_sib_port_remove(mlxsw_sib, local_port);
+ mlxsw_core_port_fini(mlxsw_sib->core, local_port);
+}
+
+static void mlxsw_sib_ports_remove(struct mlxsw_sib *mlxsw_sib)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_IB_PORTS; i++)
+ if (mlxsw_sib_port_created(mlxsw_sib, i))
+ mlxsw_sib_port_remove(mlxsw_sib, i);
+ kfree(mlxsw_sib->ports);
+}
+
+static int mlxsw_sib_ports_create(struct mlxsw_sib *mlxsw_sib)
+{
+ size_t alloc_size;
+ u8 module, width;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sib_port *) * MLXSW_PORT_MAX_IB_PORTS;
+ mlxsw_sib->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sib->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_IB_PORTS; i++) {
+ err = mlxsw_sib_port_module_info_get(mlxsw_sib, i, &module,
+ &width);
+ if (err)
+ goto err_port_module_info_get;
+ if (!width)
+ continue;
+ err = mlxsw_sib_port_create(mlxsw_sib, i, module, width);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+err_port_module_info_get:
+ for (i--; i >= 1; i--)
+ if (mlxsw_sib_port_created(mlxsw_sib, i))
+ mlxsw_sib_port_remove(mlxsw_sib, i);
+ kfree(mlxsw_sib->ports);
+ return err;
+}
+
+static void
+mlxsw_sib_pude_ib_event_func(struct mlxsw_sib_port *mlxsw_sib_port,
+ enum mlxsw_reg_pude_oper_status status)
+{
+ if (status == MLXSW_PORT_OPER_STATUS_UP)
+ pr_info("ib link for port %d - up\n",
+ mlxsw_sib_port->mapping.module + 1);
+ else
+ pr_info("ib link for port %d - down\n",
+ mlxsw_sib_port->mapping.module + 1);
+}
+
+static void mlxsw_sib_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sib *mlxsw_sib = priv;
+ struct mlxsw_sib_port *mlxsw_sib_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sib_port = mlxsw_sib->ports[local_port];
+ if (!mlxsw_sib_port) {
+ dev_warn(mlxsw_sib->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ mlxsw_sib_pude_ib_event_func(mlxsw_sib_port, status);
+}
+
+static const struct mlxsw_listener mlxsw_sib_listener[] = {
+ MLXSW_EVENTL(mlxsw_sib_pude_event_func, PUDE, EMAD),
+};
+
+static int mlxsw_sib_taps_init(struct mlxsw_sib *mlxsw_sib)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sib_listener); i++) {
+ err = mlxsw_core_trap_register(mlxsw_sib->core,
+ &mlxsw_sib_listener[i],
+ mlxsw_sib);
+ if (err)
+ goto err_rx_listener_register;
+ }
+
+ return 0;
+
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_core_trap_unregister(mlxsw_sib->core,
+ &mlxsw_sib_listener[i],
+ mlxsw_sib);
+ }
+
+ return err;
+}
+
+static void mlxsw_sib_traps_fini(struct mlxsw_sib *mlxsw_sib)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sib_listener); i++) {
+ mlxsw_core_trap_unregister(mlxsw_sib->core,
+ &mlxsw_sib_listener[i], mlxsw_sib);
+ }
+}
+
+static int mlxsw_sib_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
+ mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SIB_EMAD);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+}
+
+static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core);
+ int err;
+
+ mlxsw_sib->core = mlxsw_core;
+ mlxsw_sib->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sib_ports_create(mlxsw_sib);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n");
+ return err;
+ }
+
+ err = mlxsw_sib_taps_init(mlxsw_sib);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Failed to set traps\n");
+ goto err_traps_init_err;
+ }
+
+ return 0;
+
+err_traps_init_err:
+ mlxsw_sib_ports_remove(mlxsw_sib);
+ return err;
+}
+
+static void mlxsw_sib_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sib_traps_fini(mlxsw_sib);
+ mlxsw_sib_ports_remove(mlxsw_sib);
+}
+
+static struct mlxsw_config_profile mlxsw_sib_config_profile = {
+ .used_max_system_port = 1,
+ .max_system_port = 48000,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 27,
+ .used_max_pkey = 1,
+ .max_pkey = 32,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_IB,
+ }
+ },
+ .resource_query_enable = 0,
+};
+
+static struct mlxsw_driver mlxsw_sib_driver = {
+ .kind = mlxsw_sib_driver_name,
+ .priv_size = sizeof(struct mlxsw_sib),
+ .init = mlxsw_sib_init,
+ .fini = mlxsw_sib_fini,
+ .basic_trap_groups_set = mlxsw_sib_basic_trap_groups_set,
+ .txhdr_construct = mlxsw_sib_tx_v1_hdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sib_config_profile,
+};
+
+static struct mlxsw_driver mlxsw_sib2_driver = {
+ .kind = mlxsw_sib2_driver_name,
+ .priv_size = sizeof(struct mlxsw_sib),
+ .init = mlxsw_sib_init,
+ .fini = mlxsw_sib_fini,
+ .basic_trap_groups_set = mlxsw_sib_basic_trap_groups_set,
+ .txhdr_construct = mlxsw_sib_tx_v1_hdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sib_config_profile,
+};
+
+static const struct pci_device_id mlxsw_sib_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHIB), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sib_pci_driver = {
+ .name = mlxsw_sib_driver_name,
+ .id_table = mlxsw_sib_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sib2_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHIB2), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sib2_pci_driver = {
+ .name = mlxsw_sib2_driver_name,
+ .id_table = mlxsw_sib2_pci_id_table,
+};
+
+static int __init mlxsw_sib_module_init(void)
+{
+ int err;
+
+ err = mlxsw_core_driver_register(&mlxsw_sib_driver);
+ if (err)
+ return err;
+
+ err = mlxsw_core_driver_register(&mlxsw_sib2_driver);
+ if (err)
+ goto err_sib2_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sib_pci_driver);
+ if (err)
+ goto err_sib_pci_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sib2_pci_driver);
+ if (err)
+ goto err_sib2_pci_driver_register;
+
+ return 0;
+
+err_sib2_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sib_pci_driver);
+err_sib_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sib2_driver);
+err_sib2_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sib_driver);
+ return err;
+}
+
+static void __exit mlxsw_sib_module_exit(void)
+{
+ mlxsw_pci_driver_unregister(&mlxsw_sib2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sib_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sib2_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sib_driver);
+}
+
+module_init(mlxsw_sib_module_init);
+module_exit(mlxsw_sib_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Elad Raz <eladr@@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchIB and SwitchIB-2 driver");
+MODULE_ALIAS("mlxsw_switchib2");
+MODULE_DEVICE_TABLE(pci, mlxsw_sib_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sib2_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 92bda8703f87..150ccf5192a9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -3,7 +3,7 @@
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
@@ -44,13 +45,14 @@
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
-#include <generated/utsrelease.h>
+#include "pci.h"
#include "core.h"
#include "reg.h"
#include "port.h"
#include "trap.h"
#include "txheader.h"
+#include "ib.h"
static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
static const char mlxsw_sx_driver_version[] = "1.0";
@@ -74,11 +76,13 @@ struct mlxsw_sx_port_pcpu_stats {
};
struct mlxsw_sx_port {
- struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sx *mlxsw_sx;
u8 local_port;
+ struct {
+ u8 module;
+ } mapping;
};
/* tx_hdr_version
@@ -214,14 +218,14 @@ static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
return 0;
}
-static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 mtu)
{
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
int max_mtu;
int err;
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
if (err)
@@ -235,6 +239,32 @@ static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
}
+static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 mtu)
+{
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+}
+
+static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 mtu)
+{
+ return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+}
+
+static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u8 ib_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
+ int err;
+
+ mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
+ return err;
+}
+
static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
{
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
@@ -254,18 +284,19 @@ mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
}
-static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
- bool *p_usable)
+static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
+ u8 local_port, u8 *p_module,
+ u8 *p_width)
{
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
if (err)
return err;
- *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
return 0;
}
@@ -343,7 +374,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
int err;
- err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+ err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
if (err)
return err;
dev->mtu = mtu;
@@ -382,12 +413,26 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
return stats;
}
+static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
+ size_t len)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_open = mlxsw_sx_port_open,
.ndo_stop = mlxsw_sx_port_stop,
.ndo_start_xmit = mlxsw_sx_port_xmit,
.ndo_change_mtu = mlxsw_sx_port_change_mtu,
.ndo_get_stats64 = mlxsw_sx_port_get_stats64,
+ .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
};
static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
@@ -410,7 +455,7 @@ static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sx_port_hw_stats {
char str[ETH_GSTRING_LEN];
- u64 (*getter)(char *payload);
+ u64 (*getter)(const char *payload);
};
static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
@@ -642,6 +687,7 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
};
#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+#define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
{
@@ -741,14 +787,14 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev,
u32 eth_proto_oper;
int err;
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to get proto");
return err;
}
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
+ mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
@@ -789,6 +835,18 @@ static u32 mlxsw_sx_to_ptys_speed(u32 speed)
return ptys_proto;
}
+static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
static int mlxsw_sx_port_set_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
@@ -808,13 +866,14 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev,
mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
mlxsw_sx_to_ptys_speed(speed);
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to get proto");
return err;
}
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+ mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
+ NULL);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
@@ -824,7 +883,8 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev,
if (eth_proto_new == eth_proto_admin)
return 0;
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
+ eth_proto_new);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to set proto admin");
@@ -888,7 +948,7 @@ static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
{
- char spad_pl[MLXSW_REG_SPAD_LEN];
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
int err;
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
@@ -935,13 +995,28 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
return err;
}
-static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
- u32 speed)
+static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 speed, u16 width)
{
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+ mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
+ width);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_admin;
+
+ eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
+ eth_proto_admin);
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -956,20 +1031,22 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
}
-static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
+ u8 module, u8 width)
{
struct mlxsw_sx_port *mlxsw_sx_port;
struct net_device *dev;
- bool usable;
int err;
dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
mlxsw_sx_port = netdev_priv(dev);
mlxsw_sx_port->dev = dev;
mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
mlxsw_sx_port->local_port = local_port;
+ mlxsw_sx_port->mapping.module = module;
mlxsw_sx_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
@@ -994,24 +1071,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
NETIF_F_VLAN_CHALLENGED;
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
+
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
dev->needed_headroom = MLXSW_TXHDR_LEN;
- err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
- mlxsw_sx_port->local_port);
- goto err_port_module_check;
- }
-
- if (!usable) {
- dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
- mlxsw_sx_port->local_port);
- goto port_not_usable;
- }
-
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1026,15 +1093,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto err_port_swid_set;
}
- err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+ err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
mlxsw_sx_port->local_port);
goto err_port_speed_set;
}
- err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+ err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
mlxsw_sx_port->local_port);
@@ -1069,19 +1135,11 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto err_register_netdev;
}
- err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port,
- mlxsw_sx_port->local_port, dev, false, 0);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
- mlxsw_sx_port->local_port);
- goto err_core_port_init;
- }
-
+ mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
+ mlxsw_sx_port, dev, false, 0);
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
-err_core_port_init:
- unregister_netdev(dev);
err_register_netdev:
err_port_mac_learning_mode_set:
err_port_stp_state_set:
@@ -1091,8 +1149,6 @@ err_port_speed_set:
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
-port_not_usable:
-err_port_module_check:
err_dev_addr_get:
free_percpu(mlxsw_sx_port->pcpu_stats);
err_alloc_stats:
@@ -1100,31 +1156,168 @@ err_alloc_stats:
return err;
}
-static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
+ u8 module, u8 width)
+{
+ int err;
+
+ err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ return err;
+ }
+ err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
+ if (err)
+ goto err_port_create;
+
+ return 0;
+
+err_port_create:
+ mlxsw_core_port_fini(mlxsw_sx->core, local_port);
+ return err;
+}
+
+static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
- if (!mlxsw_sx_port)
- return;
- mlxsw_core_port_fini(&mlxsw_sx_port->core_port);
+ mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+ mlxsw_sx->ports[local_port] = NULL;
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
free_percpu(mlxsw_sx_port->pcpu_stats);
free_netdev(mlxsw_sx_port->dev);
}
+static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ return mlxsw_sx->ports[local_port] != NULL;
+}
+
+static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
+ u8 module, u8 width)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ int err;
+
+ mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
+ if (!mlxsw_sx_port)
+ return -ENOMEM;
+ mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+ mlxsw_sx_port->local_port = local_port;
+ mlxsw_sx_port->mapping.module = module;
+
+ err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ /* Adding port to Infiniband swid (1) */
+ err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ /* Expose the IB port number as it's front panel name */
+ err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_ib_set;
+ }
+
+ /* Supports all speeds from SDR to FDR (bitmask) and support bus width
+ * of 1x, 2x and 4x (3 bits bitmask)
+ */
+ err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
+ MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
+ BIT(3) - 1);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_speed_set;
+ }
+
+ /* Change to the maximum MTU the device supports, the SMA will take
+ * care of the active MTU
+ */
+ err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_admin_set;
+ }
+
+ mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
+ mlxsw_sx_port);
+ mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+ return 0;
+
+err_port_admin_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_ib_set:
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+err_port_system_port_mapping_set:
+ kfree(mlxsw_sx_port);
+ return err;
+}
+
+static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+ mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
+ mlxsw_sx->ports[local_port] = NULL;
+ mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ kfree(mlxsw_sx_port);
+}
+
+static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ enum devlink_port_type port_type =
+ mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
+
+ if (port_type == DEVLINK_PORT_TYPE_ETH)
+ __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
+ else if (port_type == DEVLINK_PORT_TYPE_IB)
+ __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ __mlxsw_sx_port_remove(mlxsw_sx, local_port);
+ mlxsw_core_port_fini(mlxsw_sx->core, local_port);
+}
+
static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
{
int i;
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
- mlxsw_sx_port_remove(mlxsw_sx, i);
+ if (mlxsw_sx_port_created(mlxsw_sx, i))
+ mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
}
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
{
size_t alloc_size;
+ u8 module, width;
int i;
int err;
@@ -1134,25 +1327,57 @@ static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
return -ENOMEM;
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
- err = mlxsw_sx_port_create(mlxsw_sx, i);
+ err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
+ &width);
+ if (err)
+ goto err_port_module_info_get;
+ if (!width)
+ continue;
+ err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
if (err)
goto err_port_create;
}
return 0;
err_port_create:
+err_port_module_info_get:
for (i--; i >= 1; i--)
- mlxsw_sx_port_remove(mlxsw_sx, i);
+ if (mlxsw_sx_port_created(mlxsw_sx, i))
+ mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
return err;
}
+static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
+ enum mlxsw_reg_pude_oper_status status)
+{
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
+ netdev_info(mlxsw_sx_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sx_port->dev);
+ } else {
+ netdev_info(mlxsw_sx_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sx_port->dev);
+ }
+}
+
+static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
+ enum mlxsw_reg_pude_oper_status status)
+{
+ if (status == MLXSW_PORT_OPER_STATUS_UP)
+ pr_info("ib link for port %d - up\n",
+ mlxsw_sx_port->mapping.module + 1);
+ else
+ pr_info("ib link for port %d - down\n",
+ mlxsw_sx_port->mapping.module + 1);
+}
+
static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
char *pude_pl, void *priv)
{
struct mlxsw_sx *mlxsw_sx = priv;
struct mlxsw_sx_port *mlxsw_sx_port;
enum mlxsw_reg_pude_oper_status status;
+ enum devlink_port_type port_type;
u8 local_port;
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
@@ -1164,59 +1389,11 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
}
status = mlxsw_reg_pude_oper_status_get(pude_pl);
- if (status == MLXSW_PORT_OPER_STATUS_UP) {
- netdev_info(mlxsw_sx_port->dev, "link up\n");
- netif_carrier_on(mlxsw_sx_port->dev);
- } else {
- netdev_info(mlxsw_sx_port->dev, "link down\n");
- netif_carrier_off(mlxsw_sx_port->dev);
- }
-}
-
-static struct mlxsw_event_listener mlxsw_sx_pude_event = {
- .func = mlxsw_sx_pude_event_func,
- .trap_id = MLXSW_TRAP_ID_PUDE,
-};
-
-static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
- enum mlxsw_event_trap_id trap_id)
-{
- struct mlxsw_event_listener *el;
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
- int err;
-
- switch (trap_id) {
- case MLXSW_TRAP_ID_PUDE:
- el = &mlxsw_sx_pude_event;
- break;
- }
- err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
- if (err)
- return err;
-
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
- if (err)
- goto err_event_trap_set;
-
- return 0;
-
-err_event_trap_set:
- mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
- return err;
-}
-
-static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
- enum mlxsw_event_trap_id trap_id)
-{
- struct mlxsw_event_listener *el;
-
- switch (trap_id) {
- case MLXSW_TRAP_ID_PUDE:
- el = &mlxsw_sx_pude_event;
- break;
- }
- mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+ port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
+ if (port_type == DEVLINK_PORT_TYPE_ETH)
+ mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
+ else if (port_type == DEVLINK_PORT_TYPE_IB)
+ mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
}
static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
@@ -1244,142 +1421,110 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
netif_receive_skb(skb);
}
-static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_FDB_MC,
- },
- /* Traps for specific L2 packet types, not trapped as FDB MC */
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_STP,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LACP,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_EAPOL,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LLDP,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MMRP,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MVRP,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_RPVST,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_DHCP,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
- },
- {
- .func = mlxsw_sx_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
- },
+static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+ enum devlink_port_type new_type)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
+ u8 module, width;
+ int err;
+
+ if (new_type == DEVLINK_PORT_TYPE_AUTO)
+ return -EOPNOTSUPP;
+
+ __mlxsw_sx_port_remove(mlxsw_sx, local_port);
+ err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
+ &width);
+ if (err)
+ goto err_port_module_info_get;
+
+ if (new_type == DEVLINK_PORT_TYPE_ETH)
+ err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
+ width);
+ else if (new_type == DEVLINK_PORT_TYPE_IB)
+ err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
+ width);
+
+err_port_module_info_get:
+ return err;
+}
+
+#define MLXSW_SX_RXL(_trap_id) \
+ MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
+ false, SX2_RX, FORWARD)
+
+static const struct mlxsw_listener mlxsw_sx_listener[] = {
+ MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
+ MLXSW_SX_RXL(FDB_MC),
+ MLXSW_SX_RXL(STP),
+ MLXSW_SX_RXL(LACP),
+ MLXSW_SX_RXL(EAPOL),
+ MLXSW_SX_RXL(LLDP),
+ MLXSW_SX_RXL(MMRP),
+ MLXSW_SX_RXL(MVRP),
+ MLXSW_SX_RXL(RPVST),
+ MLXSW_SX_RXL(DHCP),
+ MLXSW_SX_RXL(IGMP_QUERY),
+ MLXSW_SX_RXL(IGMP_V1_REPORT),
+ MLXSW_SX_RXL(IGMP_V2_REPORT),
+ MLXSW_SX_RXL(IGMP_V2_LEAVE),
+ MLXSW_SX_RXL(IGMP_V3_REPORT),
};
static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
{
char htgt_pl[MLXSW_REG_HTGT_LEN];
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
int i;
int err;
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
+
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
if (err)
return err;
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
+
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
if (err)
return err;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
- err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
- &mlxsw_sx_rx_listener[i],
- mlxsw_sx);
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
+ err = mlxsw_core_trap_register(mlxsw_sx->core,
+ &mlxsw_sx_listener[i],
+ mlxsw_sx);
if (err)
- goto err_rx_listener_register;
+ goto err_listener_register;
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
- mlxsw_sx_rx_listener[i].trap_id);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
- if (err)
- goto err_rx_trap_set;
}
return 0;
-err_rx_trap_set:
- mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
- &mlxsw_sx_rx_listener[i],
- mlxsw_sx);
-err_rx_listener_register:
+err_listener_register:
for (i--; i >= 0; i--) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- mlxsw_sx_rx_listener[i].trap_id);
- mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
-
- mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
- &mlxsw_sx_rx_listener[i],
- mlxsw_sx);
+ mlxsw_core_trap_unregister(mlxsw_sx->core,
+ &mlxsw_sx_listener[i],
+ mlxsw_sx);
}
return err;
}
static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
{
- char hpkt_pl[MLXSW_REG_HPKT_LEN];
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- mlxsw_sx_rx_listener[i].trap_id);
- mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
-
- mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
- &mlxsw_sx_rx_listener[i],
- mlxsw_sx);
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
+ mlxsw_core_trap_unregister(mlxsw_sx->core,
+ &mlxsw_sx_listener[i],
+ mlxsw_sx);
}
}
@@ -1451,6 +1596,20 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
}
+static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
+ mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+}
+
static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -1472,16 +1631,10 @@ static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
- goto err_event_register;
- }
-
err = mlxsw_sx_traps_init(mlxsw_sx);
if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
- goto err_rx_listener_register;
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
+ goto err_listener_register;
}
err = mlxsw_sx_flood_init(mlxsw_sx);
@@ -1494,9 +1647,7 @@ static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
err_flood_init:
mlxsw_sx_traps_fini(mlxsw_sx);
-err_rx_listener_register:
- mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
-err_event_register:
+err_listener_register:
mlxsw_sx_ports_remove(mlxsw_sx);
return err;
}
@@ -1506,7 +1657,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sx_traps_fini(mlxsw_sx);
- mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
mlxsw_sx_ports_remove(mlxsw_sx);
}
@@ -1529,36 +1679,66 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.used_flood_mode = 1,
.flood_mode = 3,
.used_max_ib_mc = 1,
- .max_ib_mc = 0,
+ .max_ib_mc = 6,
.used_max_pkey = 1,
.max_pkey = 0,
.swid_config = {
{
.used_type = 1,
.type = MLXSW_PORT_SWID_TYPE_ETH,
+ },
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_IB,
}
},
.resource_query_enable = 0,
};
static struct mlxsw_driver mlxsw_sx_driver = {
- .kind = MLXSW_DEVICE_KIND_SWITCHX2,
- .owner = THIS_MODULE,
+ .kind = mlxsw_sx_driver_name,
.priv_size = sizeof(struct mlxsw_sx),
.init = mlxsw_sx_init,
.fini = mlxsw_sx_fini,
+ .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
.txhdr_construct = mlxsw_sx_txhdr_construct,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sx_config_profile,
+ .port_type_set = mlxsw_sx_port_type_set,
+};
+
+static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sx_pci_driver = {
+ .name = mlxsw_sx_driver_name,
+ .id_table = mlxsw_sx_pci_id_table,
};
static int __init mlxsw_sx_module_init(void)
{
- return mlxsw_core_driver_register(&mlxsw_sx_driver);
+ int err;
+
+ err = mlxsw_core_driver_register(&mlxsw_sx_driver);
+ if (err)
+ return err;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
+ if (err)
+ goto err_pci_driver_register;
+
+ return 0;
+
+err_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+ return err;
}
static void __exit mlxsw_sx_module_exit(void)
{
+ mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
mlxsw_core_driver_unregister(&mlxsw_sx_driver);
}
@@ -1568,4 +1748,4 @@ module_exit(mlxsw_sx_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
-MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
+MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index ed8e30186400..7ab275deacac 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -62,6 +62,7 @@ enum {
MLXSW_TRAP_ID_OSPF = 0x55,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_BGP_IPV4 = 0x88,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
MLXSW_TRAP_ID_MAX = 0x1FF
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 1edc973df4c4..e7e1aff40bd9 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1063,7 +1063,6 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_start_xmit = ks8851_start_xmit,
.ndo_set_mac_address = ks8851_set_mac_address,
.ndo_set_rx_mode = ks8851_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 2fc5cd56c0a8..db628078a4e6 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1285,7 +1285,6 @@ static const struct net_device_ops ks_netdev_ops = {
.ndo_start_xmit = ks_start_xmit,
.ndo_set_mac_address = ks_set_mac_address,
.ndo_set_rx_mode = ks_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 280e761d3a97..97f6ef1fa7d0 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5807,24 +5807,19 @@ static int netdev_change_mtu(struct net_device *dev, int new_mtu)
if (hw->dev_count > 1)
if (dev != hw_priv->dev)
return 0;
- if (new_mtu < 60)
- return -EINVAL;
- if (dev->mtu != new_mtu) {
- hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
- if (hw_mtu > MAX_RX_BUF_SIZE)
- return -EINVAL;
- if (hw_mtu > REGULAR_RX_BUF_SIZE) {
- hw->features |= RX_HUGE_FRAME;
- hw_mtu = MAX_RX_BUF_SIZE;
- } else {
- hw->features &= ~RX_HUGE_FRAME;
- hw_mtu = REGULAR_RX_BUF_SIZE;
- }
- hw_mtu = (hw_mtu + 3) & ~3;
- hw_priv->mtu = hw_mtu;
- dev->mtu = new_mtu;
+ hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
+ if (hw_mtu > REGULAR_RX_BUF_SIZE) {
+ hw->features |= RX_HUGE_FRAME;
+ hw_mtu = MAX_RX_BUF_SIZE;
+ } else {
+ hw->features &= ~RX_HUGE_FRAME;
+ hw_mtu = REGULAR_RX_BUF_SIZE;
}
+ hw_mtu = (hw_mtu + 3) & ~3;
+ hw_priv->mtu = hw_mtu;
+ dev->mtu = new_mtu;
+
return 0;
}
@@ -7099,6 +7094,12 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev->netdev_ops = &netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
+
+ /* MTU range: 60 - 1894 */
+ dev->min_mtu = ETH_ZLEN;
+ dev->max_mtu = MAX_RX_BUF_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
if (register_netdev(dev))
goto pcidev_init_reg_err;
port_set_power_saving(port, true);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 0a26b11ca8f6..045b9106c0ff 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1544,7 +1544,6 @@ static const struct net_device_ops enc28j60_netdev_ops = {
.ndo_set_rx_mode = enc28j60_set_multicast_list,
.ndo_set_mac_address = enc28j60_set_mac_address,
.ndo_tx_timeout = enc28j60_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index f3bb9055a292..44bb04d4d21b 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -26,11 +26,11 @@ static inline bool is_bits_set(int value, int mask)
}
static int encx24j600_switch_bank(struct encx24j600_context *ctx,
- int bank)
+ int bank)
{
int ret = 0;
-
int bank_opcode = BANK_SELECT(bank);
+
ret = spi_write(ctx->spi, &bank_opcode, 1);
if (ret == 0)
ctx->bank = bank;
@@ -39,7 +39,7 @@ static int encx24j600_switch_bank(struct encx24j600_context *ctx,
}
static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
- const void *buf, size_t len)
+ const void *buf, size_t len)
{
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
@@ -54,12 +54,14 @@ static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
static void regmap_lock_mutex(void *context)
{
struct encx24j600_context *ctx = context;
+
mutex_lock(&ctx->mutex);
}
static void regmap_unlock_mutex(void *context)
{
struct encx24j600_context *ctx = context;
+
mutex_unlock(&ctx->mutex);
}
@@ -128,6 +130,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
if (reg < 0x80) {
int ret = 0;
+
cmd = banked_code | banked_reg;
if ((banked_reg < 0x16) && (ctx->bank != bank))
ret = encx24j600_switch_bank(ctx, bank);
@@ -174,6 +177,7 @@ static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
size_t len)
{
struct encx24j600_context *ctx = context;
+
return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
}
@@ -228,9 +232,9 @@ int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
if (reg < 0xc0)
return encx24j600_cmdn(ctx, reg, data, count);
- else
- /* SPI 1-byte command. Ignore data */
- return spi_write(ctx->spi, &reg, 1);
+
+ /* SPI 1-byte command. Ignore data */
+ return spi_write(ctx->spi, &reg, 1);
}
EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
@@ -495,6 +499,7 @@ static struct regmap_config phycfg = {
.writeable_reg = encx24j600_phymap_writeable,
.volatile_reg = encx24j600_phymap_volatile,
};
+
static struct regmap_bus phymap_encx24j600 = {
.reg_write = regmap_encx24j600_phy_reg_write,
.reg_read = regmap_encx24j600_phy_reg_read,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b14f0305aa31..fbce6166504e 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -30,7 +30,7 @@
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
static int debug = -1;
-module_param(debug, int, 0);
+module_param(debug, int, 0000);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* SRAM memory layout:
@@ -105,6 +105,7 @@ static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
struct net_device *dev = priv->ndev;
unsigned int val = 0;
int ret = regmap_read(priv->ctx.regmap, reg, &val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
__func__, ret, reg);
@@ -115,6 +116,7 @@ static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.regmap, reg, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
__func__, ret, reg, val);
@@ -125,6 +127,7 @@ static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
{
struct net_device *dev = priv->ndev;
int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
__func__, ret, reg, val, mask);
@@ -135,6 +138,7 @@ static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
struct net_device *dev = priv->ndev;
unsigned int val = 0;
int ret = regmap_read(priv->ctx.phymap, reg, &val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
__func__, ret, reg);
@@ -145,6 +149,7 @@ static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.phymap, reg, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
__func__, ret, reg, val);
@@ -164,6 +169,7 @@ static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
__func__, ret, cmd);
@@ -173,6 +179,7 @@ static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
size_t count)
{
int ret;
+
mutex_lock(&priv->ctx.mutex);
ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
mutex_unlock(&priv->ctx.mutex);
@@ -184,6 +191,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
const u8 *data, size_t count)
{
int ret;
+
mutex_lock(&priv->ctx.mutex);
ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
mutex_unlock(&priv->ctx.mutex);
@@ -194,6 +202,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
{
u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+
if (priv->autoneg == AUTONEG_ENABLE) {
phcon1 |= ANEN | RENEG;
} else {
@@ -328,6 +337,7 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
{
struct net_device *dev = priv->ndev;
struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+
if (!skb) {
pr_err_ratelimited("RX: OOM: packet dropped\n");
dev->stats.rx_dropped++;
@@ -346,7 +356,6 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
/* Maintain stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += rsv->len;
- priv->next_packet = rsv->next_packet;
netif_rx(skb);
@@ -383,6 +392,8 @@ static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
encx24j600_receive_packet(priv, &rsv);
}
+ priv->next_packet = rsv.next_packet;
+
newrxtail = priv->next_packet - 2;
if (newrxtail == ENC_RX_BUF_START)
newrxtail = SRAM_SIZE - 2;
@@ -827,6 +838,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
static void encx24j600_hw_tx(struct encx24j600_priv *priv)
{
struct net_device *dev = priv->ndev;
+
netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
priv->tx_skb->len);
@@ -894,7 +906,6 @@ static void encx24j600_tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
netif_wake_queue(dev);
- return;
}
static int encx24j600_get_regs_len(struct net_device *dev)
@@ -957,12 +968,14 @@ static int encx24j600_set_settings(struct net_device *dev,
static u32 encx24j600_get_msglevel(struct net_device *dev)
{
struct encx24j600_priv *priv = netdev_priv(dev);
+
return priv->msg_enable;
}
static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
{
struct encx24j600_priv *priv = netdev_priv(dev);
+
priv->msg_enable = val;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 4367dd6879a2..9774b50cff6e 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -444,7 +444,6 @@ static struct net_device_ops moxart_netdev_ops = {
.ndo_set_rx_mode = moxart_mac_set_rx_mode,
.ndo_set_mac_address = moxart_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int moxart_mac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 6d1a956e3f77..e506ca876d0d 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -289,7 +289,7 @@ static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
{[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
0444);
-MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
+MODULE_PARM_DESC(myri10ge_fw_names, "Firmware image names per board");
static int myri10ge_ecrc_enable = 1;
module_param(myri10ge_ecrc_enable, int, S_IRUGO);
@@ -3232,10 +3232,6 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
struct myri10ge_priv *mgp = netdev_priv(dev);
int error = 0;
- if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
- netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
- return -EINVAL;
- }
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
/* if we change the mtu on an active device, we must
@@ -4086,13 +4082,19 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
myri10ge_setup_dca(mgp);
#endif
pci_set_drvdata(pdev, mgp);
- if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
- myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
- if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
- myri10ge_initial_mtu = 68;
- netdev->netdev_ops = &myri10ge_netdev_ops;
+ /* MTU range: 68 - 9000 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+
+ if (myri10ge_initial_mtu > netdev->max_mtu)
+ myri10ge_initial_mtu = netdev->max_mtu;
+ if (myri10ge_initial_mtu < netdev->min_mtu)
+ myri10ge_initial_mtu = netdev->min_mtu;
+
netdev->mtu = myri10ge_initial_mtu;
+
+ netdev->netdev_ops = &myri10ge_netdev_ops;
netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
/* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index acf3f11e38cc..a6caeb567c0d 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -110,7 +110,6 @@ static const struct net_device_ops sonic_netdev_ops = {
.ndo_get_stats = sonic_get_stats,
.ndo_set_rx_mode = sonic_multicast_list,
.ndo_tx_timeout = sonic_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index d98f5b8a1c66..3ca6ae7caf55 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -190,7 +190,6 @@ static const struct net_device_ops macsonic_netdev_ops = {
.ndo_tx_timeout = sonic_tx_timeout,
.ndo_get_stats = sonic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index ed89029ff75b..22b0821c1da0 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -929,6 +929,10 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &ethtool_ops;
+ /* MTU range: 64 - 2024 */
+ dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
+ dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
+
if (mtu)
dev->mtu = mtu;
@@ -2526,9 +2530,6 @@ static void __set_rx_mode(struct net_device *dev)
static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
{
- if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
- return -EINVAL;
-
dev->mtu = new_mtu;
/* synchronized against open : rtnl_lock() held by caller */
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 569ade6cf85c..93c4bdc0cdca 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -919,7 +919,7 @@ netdev_mangle_me_harder_failed:
ndev->stats.rx_dropped++;
}
} else {
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
nr++;
@@ -1679,14 +1679,6 @@ static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
}
}
-static int ns83820_change_mtu(struct net_device *ndev, int new_mtu)
-{
- if (new_mtu > RX_BUF_SIZE)
- return -EINVAL;
- ndev->mtu = new_mtu;
- return 0;
-}
-
static void ns83820_set_multicast(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
@@ -1933,7 +1925,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = ns83820_stop,
.ndo_start_xmit = ns83820_hard_start_xmit,
.ndo_get_stats = ns83820_get_stats,
- .ndo_change_mtu = ns83820_change_mtu,
.ndo_set_rx_mode = ns83820_set_multicast,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -2190,6 +2181,8 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
ndev->features |= NETIF_F_SG;
ndev->features |= NETIF_F_IP_CSUM;
+ ndev->min_mtu = 0;
+
#ifdef NS83820_VLAN_ACCEL_SUPPORT
/* We also support hardware vlan acceleration */
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 7007d212f3e4..9ee0f69a83c0 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -124,7 +124,6 @@ static const struct net_device_ops xtsonic_netdev_ops = {
.ndo_set_rx_mode = sonic_multicast_list,
.ndo_tx_timeout = sonic_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index eaa37c079a7c..564f682fa4dc 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6678,11 +6678,6 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
- if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
- DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
- return -EPERM;
- }
-
dev->mtu = new_mtu;
if (netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
@@ -8019,6 +8014,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
}
+ /* MTU range: 46 - 9600 */
+ dev->min_mtu = MIN_MTU;
+ dev->max_mtu = S2IO_JUMBO_SIZE;
+
/* store mac addresses from CAM to s2io_nic structure */
do_s2io_store_unicast_mc(sp);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 6ce4412fcc1a..cfa970417f81 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -27,7 +27,7 @@
(((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
#endif
-#define VXGE_HW_MIN_MTU 68
+#define VXGE_HW_MIN_MTU ETH_MIN_MTU
#define VXGE_HW_MAX_MTU 9600
#define VXGE_HW_DEFAULT_MTU 1500
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e0993eba5df3..e07b936f64ec 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3074,11 +3074,6 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
vxge_debug_entryexit(vdev->level_trace,
"%s:%d", __func__, __LINE__);
- if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
- vxge_debug_init(vdev->level_err,
- "%s: mtu size is invalid", dev->name);
- return -EPERM;
- }
/* check if device is down already */
if (unlikely(!is_vxge_card_up(vdev))) {
@@ -3462,6 +3457,10 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
"%s : using High DMA", __func__);
}
+ /* MTU range: 68 - 9600 */
+ ndev->min_mtu = VXGE_HW_MIN_MTU;
+ ndev->max_mtu = VXGE_HW_MAX_MTU;
+
ret = register_netdev(ndev);
if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
index 87aa8a3e9112..76a19f1796af 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
@@ -62,6 +62,7 @@ enum nfp_bpf_action_type {
NN_ACT_TC_DROP,
NN_ACT_TC_REDIR,
NN_ACT_DIRECT,
+ NN_ACT_XDP,
};
/* Software register representation, hardware encoding in asm.h */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
index f8df5300f49c..335beb8b8b45 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -1126,7 +1126,7 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
meta->insn.src_reg * 2, true, 4);
}
-static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (meta->insn.off == offsetof(struct sk_buff, len))
emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
@@ -1134,12 +1134,42 @@ static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
else
return -ENOTSUPP;
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ return 0;
+}
+
+static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 dst = reg_both(meta->insn.dst_reg * 2);
+
+ if (meta->insn.off != offsetof(struct xdp_md, data) &&
+ meta->insn.off != offsetof(struct xdp_md, data_end))
+ return -ENOTSUPP;
+
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+
+ if (meta->insn.off == offsetof(struct xdp_md, data))
+ return 0;
+
+ emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN);
return 0;
}
-static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ int ret;
+
+ if (nfp_prog->act == NN_ACT_XDP)
+ ret = mem_ldx4_xdp(nfp_prog, meta);
+ else
+ ret = mem_ldx4_skb(nfp_prog, meta);
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return ret;
+}
+
+static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (meta->insn.off == offsetof(struct sk_buff, mark))
return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
@@ -1147,6 +1177,18 @@ static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return -ENOTSUPP;
}
+static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return -ENOTSUPP;
+}
+
+static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (nfp_prog->act == NN_ACT_XDP)
+ return mem_stx4_xdp(nfp_prog, meta);
+ return mem_stx4_skb(nfp_prog, meta);
+}
+
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (meta->insn.off < 0) /* TODO */
@@ -1530,6 +1572,47 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
}
+static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
+{
+ /* XDP return codes:
+ * 0 aborted 0x82 -> drop, count as stat3
+ * 1 drop 0x22 -> drop, count as stat1
+ * 2 pass 0x11 -> pass, count as stat0
+ * 3 tx 0x44 -> redir, count as stat2
+ * * unknown 0x82 -> drop, count as stat3
+ */
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+
+ /* if R0 > 3 jump to abort */
+ emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
+ emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
+
+ wrp_immed(nfp_prog, reg_b(2), 0x44112282);
+
+ emit_shf(nfp_prog, reg_a(1),
+ reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_b(2),
+ reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
+}
+
static void nfp_outro(struct nfp_prog *nfp_prog)
{
switch (nfp_prog->act) {
@@ -1540,6 +1623,9 @@ static void nfp_outro(struct nfp_prog *nfp_prog)
case NN_ACT_TC_REDIR:
nfp_outro_tc_legacy(nfp_prog);
break;
+ case NN_ACT_XDP:
+ nfp_outro_xdp(nfp_prog);
+ break;
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
index 144cae87f63a..b3361f9b8e5c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
@@ -80,6 +80,9 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
{
const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+ if (nfp_prog->act == NN_ACT_XDP)
+ return 0;
+
if (reg0->type != CONST_IMM) {
pr_info("unsupported exit state: %d, imm: %llx\n",
reg0->type, reg0->imm);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index ed824e11a1e3..2115f446031e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -75,7 +75,6 @@
/* Default size for MTU and freelist buffer sizes */
#define NFP_NET_DEFAULT_MTU 1500
-#define NFP_NET_DEFAULT_RX_BUFSZ 2048
/* Maximum number of bytes prepended to a packet */
#define NFP_NET_MAX_PREPEND 64
@@ -88,6 +87,9 @@
/* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
#define NFP_NET_MAX_RX_RINGS 64 /* Max. # of Rx rings per device */
+#define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
+ NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
+#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
#define NFP_NET_MIN_TX_DESCS 256 /* Min. # of Tx descs per ring */
#define NFP_NET_MIN_RX_DESCS 256 /* Min. # of Rx descs per ring */
@@ -102,6 +104,10 @@
/* Offload definitions */
#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
+#define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
/* Forward declarations */
struct nfp_net;
struct nfp_net_r_vector;
@@ -165,7 +171,10 @@ struct nfp_net_tx_desc {
* on the head's buffer). Equal to skb->len for non-TSO packets.
*/
struct nfp_net_tx_buf {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ void *frag;
+ };
dma_addr_t dma_addr;
short int fidx;
u16 pkt_cnt;
@@ -278,11 +287,11 @@ struct nfp_net_rx_hash {
/**
* struct nfp_net_rx_buf - software RX buffer descriptor
- * @skb: sk_buff associated with this buffer
+ * @frag: page fragment buffer
* @dma_addr: DMA mapping address of the buffer
*/
struct nfp_net_rx_buf {
- struct sk_buff *skb;
+ void *frag;
dma_addr_t dma_addr;
};
@@ -335,6 +344,7 @@ struct nfp_net_rx_ring {
* @napi: NAPI structure for this ring vec
* @tx_ring: Pointer to TX ring
* @rx_ring: Pointer to RX ring
+ * @xdp_ring: Pointer to an extra TX ring for XDP
* @irq_idx: Index into MSI-X table
* @rx_sync: Seqlock for atomic updates of RX stats
* @rx_pkts: Number of received packets
@@ -378,6 +388,8 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_error;
+ struct nfp_net_tx_ring *xdp_ring;
+
struct u64_stats_sync tx_sync;
u64 tx_pkts;
u64 tx_bytes;
@@ -421,12 +433,13 @@ struct nfp_stat_pair {
* @netdev: Backpointer to net_device structure
* @nfp_fallback: Is the driver used in fallback mode?
* @is_vf: Is the driver attached to a VF?
- * @is_nfp3200: Is the driver for a NFP-3200 card?
* @fw_loaded: Is the firmware loaded?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
+ * @bpf_offload_xdp: Offloaded BPF program is XDP
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts
+ * @xdp_prog: Installed XDP program
* @cpp: Pointer to the CPP handle
* @nfp_dev_cpp: Pointer to the NFP Device handle
* @ctrl_area: Pointer to the CPP area for the control BAR
@@ -446,12 +459,13 @@ struct nfp_stat_pair {
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
* @num_tx_rings: Currently configured number of TX rings
+ * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
* @num_rx_rings: Currently configured number of RX rings
* @txd_cnt: Size of the TX ring in number of descriptors
* @rxd_cnt: Size of the RX ring in number of descriptors
* @tx_rings: Array of pre-allocated TX ring structures
* @rx_rings: Array of pre-allocated RX ring structures
- * @num_irqs: Number of allocated interrupt vectors
+ * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @num_r_vecs: Number of used ring vectors
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
@@ -487,15 +501,17 @@ struct nfp_net {
unsigned nfp_fallback:1;
unsigned is_vf:1;
- unsigned is_nfp3200:1;
unsigned fw_loaded:1;
unsigned bpf_offload_skip_sw:1;
+ unsigned bpf_offload_xdp:1;
u32 ctrl;
u32 fl_bufsz;
u32 rx_offset;
+ struct bpf_prog *xdp_prog;
+
struct nfp_net_tx_ring *tx_rings;
struct nfp_net_rx_ring *rx_rings;
@@ -524,11 +540,12 @@ struct nfp_net {
struct timer_list rx_filter_stats_timer;
spinlock_t rx_filter_lock;
- int max_tx_rings;
- int max_rx_rings;
+ unsigned int max_tx_rings;
+ unsigned int max_rx_rings;
- int num_tx_rings;
- int num_rx_rings;
+ unsigned int num_tx_rings;
+ unsigned int num_stack_tx_rings;
+ unsigned int num_rx_rings;
int stride_tx;
int stride_rx;
@@ -536,11 +553,10 @@ struct nfp_net {
int txd_cnt;
int rxd_cnt;
- u8 num_irqs;
- u8 num_r_vecs;
- struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS];
- struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
- NFP_NET_MAX_TX_RINGS];
+ unsigned int max_r_vecs;
+ unsigned int num_r_vecs;
+ struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
+ struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
irq_handler_t lsc_handler;
char lsc_name[IFNAMSIZ + 8];
@@ -580,6 +596,13 @@ struct nfp_net {
struct dentry *debugfs_dir;
};
+struct nfp_net_ring_set {
+ unsigned int n_rings;
+ unsigned int mtu;
+ unsigned int dcnt;
+ void *rings;
+};
+
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
@@ -593,16 +616,13 @@ static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
writeb(val, nn->ctrl_bar + off);
}
-/* NFP-3200 can't handle 16-bit accesses too well */
static inline u16 nn_readw(struct nfp_net *nn, int off)
{
- WARN_ON_ONCE(nn->is_nfp3200);
return readw(nn->ctrl_bar + off);
}
static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
{
- WARN_ON_ONCE(nn->is_nfp3200);
writew(val, nn->ctrl_bar + off);
}
@@ -650,7 +670,7 @@ static inline void nn_pci_flush(struct nfp_net *nn)
#define NFP_QCP_QUEUE_STS_HI 0x000c
#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
-/* The offset of a QCP queues in the PCIe Target (same on NFP3200 and NFP6000 */
+/* The offset of a QCP queues in the PCIe Target */
#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
@@ -757,8 +777,9 @@ extern const char nfp_net_driver_version[];
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
-struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
- int max_tx_rings, int max_rx_rings);
+struct nfp_net *
+nfp_net_netdev_alloc(struct pci_dev *pdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_netdev_free(struct nfp_net *nn);
int nfp_net_netdev_init(struct net_device *netdev);
void nfp_net_netdev_clean(struct net_device *netdev);
@@ -770,7 +791,9 @@ void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_irqs_alloc(struct nfp_net *nn);
void nfp_net_irqs_disable(struct nfp_net *nn);
-int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt);
+int
+nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
+ struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
#ifdef CONFIG_NFP_NET_DEBUG
void nfp_net_debugfs_create(void);
@@ -796,8 +819,6 @@ static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
#endif /* CONFIG_NFP_NET_DEBUG */
void nfp_net_filter_stats_timer(unsigned long data);
-int
-nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
- struct tc_cls_bpf_offload *cls_bpf);
+int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
#endif /* _NFP_NET_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index aee3fd2b6538..e8d448109e03 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,6 +41,7 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
+#include <linux/bpf.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -50,6 +51,7 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/page_ref.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/msi.h>
@@ -80,6 +82,22 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
+static dma_addr_t
+nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
+ int direction)
+{
+ return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+}
+
+static void
+nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
+ unsigned int bufsz, int direction)
+{
+ dma_unmap_single(&nn->pdev->dev, dma_addr,
+ bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+}
+
/* Firmware reconfig
*
* Firmware reconfig may take a while so we have two versions of it -
@@ -249,43 +267,14 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*/
/**
- * nfp_net_irq_unmask_msix() - Unmask MSI-X after automasking
- * @nn: NFP Network structure
- * @entry_nr: MSI-X table entry
- *
- * Clear the MSI-X table mask bit for the given entry bypassing Linux irq
- * handling subsystem. Use *only* to reenable automasked vectors.
- */
-static void nfp_net_irq_unmask_msix(struct nfp_net *nn, unsigned int entry_nr)
-{
- struct list_head *msi_head = &nn->pdev->dev.msi_list;
- struct msi_desc *entry;
- u32 off;
-
- /* All MSI-Xs have the same mask_base */
- entry = list_first_entry(msi_head, struct msi_desc, list);
-
- off = (PCI_MSIX_ENTRY_SIZE * entry_nr) +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
- writel(0, entry->mask_base + off);
- readl(entry->mask_base);
-}
-
-/**
* nfp_net_irq_unmask() - Unmask automasked interrupt
* @nn: NFP Network structure
* @entry_nr: MSI-X table entry
*
- * If MSI-X auto-masking is enabled clear the mask bit, otherwise
- * clear the ICR for the entry.
+ * Clear the ICR for the IRQ entry.
*/
static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
{
- if (nn->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
- nfp_net_irq_unmask_msix(nn, entry_nr);
- return;
- }
-
nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
nn_pci_flush(nn);
}
@@ -320,28 +309,6 @@ static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
}
/**
- * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want
- * @nn: NFP Network structure
- *
- * We want a vector per CPU (or ring), whatever is smaller plus
- * NFP_NET_NON_Q_VECTORS for LSC etc.
- *
- * Return: Number of interrupts wanted
- */
-static int nfp_net_irqs_wanted(struct nfp_net *nn)
-{
- int ncpus;
- int vecs;
-
- ncpus = num_online_cpus();
-
- vecs = max_t(int, nn->num_tx_rings, nn->num_rx_rings);
- vecs = min_t(int, vecs, ncpus);
-
- return vecs + NFP_NET_NON_Q_VECTORS;
-}
-
-/**
* nfp_net_irqs_alloc() - allocates MSI-X irqs
* @nn: NFP Network structure
*
@@ -350,22 +317,24 @@ static int nfp_net_irqs_wanted(struct nfp_net *nn)
int nfp_net_irqs_alloc(struct nfp_net *nn)
{
int wanted_irqs;
+ unsigned int n;
- wanted_irqs = nfp_net_irqs_wanted(nn);
+ wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS;
- nn->num_irqs = nfp_net_msix_alloc(nn, wanted_irqs);
- if (nn->num_irqs == 0) {
+ n = nfp_net_msix_alloc(nn, wanted_irqs);
+ if (n == 0) {
nn_err(nn, "Failed to allocate MSI-X IRQs\n");
return 0;
}
- nn->num_r_vecs = nn->num_irqs - NFP_NET_NON_Q_VECTORS;
+ nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
+ nn->num_r_vecs = nn->max_r_vecs;
- if (nn->num_irqs < wanted_irqs)
+ if (n < wanted_irqs)
nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
- wanted_irqs, nn->num_irqs);
+ wanted_irqs, n);
- return nn->num_irqs;
+ return n;
}
/**
@@ -515,18 +484,19 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
struct nfp_net_r_vector *r_vec;
int r;
- /* Assumes nn->num_tx_rings == nn->num_rx_rings */
- if (nn->num_tx_rings > nn->num_r_vecs) {
- nn_warn(nn, "More rings (%d) than vectors (%d).\n",
- nn->num_tx_rings, nn->num_r_vecs);
- nn->num_tx_rings = nn->num_r_vecs;
- nn->num_rx_rings = nn->num_r_vecs;
- }
+ if (nn->num_rx_rings > nn->num_r_vecs ||
+ nn->num_tx_rings > nn->num_r_vecs)
+ nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
+ nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
+
+ nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
+ nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
+ nn->num_stack_tx_rings = nn->num_tx_rings;
nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->max_r_vecs; r++) {
r_vec = &nn->r_vecs[r];
r_vec->nfp_net = nn;
r_vec->handler = nfp_net_irq_rxtx;
@@ -605,7 +575,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
*
* Return: True if the ring is full.
*/
-static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
+static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
{
return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
}
@@ -745,6 +715,13 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
u64_stats_update_end(&r_vec->tx_sync);
}
+static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
+{
+ wmb();
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
+ tx_ring->wr_ptr_add = 0;
+}
+
/**
* nfp_net_tx() - Main transmit entry point
* @skb: SKB to transmit
@@ -790,7 +767,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (dma_mapping_error(&nn->pdev->dev, dma_addr))
goto err_free;
- wr_idx = tx_ring->wr_p % tx_ring->cnt;
+ wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
@@ -834,7 +811,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (dma_mapping_error(&nn->pdev->dev, dma_addr))
goto err_unmap;
- wr_idx = (wr_idx + 1) % tx_ring->cnt;
+ wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
tx_ring->txbufs[wr_idx].skb = skb;
tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
tx_ring->txbufs[wr_idx].fidx = f;
@@ -859,12 +836,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
- /* force memory write before we let HW know */
- wmb();
- nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
- tx_ring->wr_ptr_add = 0;
- }
+ if (!skb->xmit_more || netif_xmit_stopped(nd_q))
+ nfp_net_tx_xmit_more_flush(tx_ring);
skb_tx_timestamp(skb);
@@ -929,7 +902,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
while (todo--) {
- idx = tx_ring->rd_p % tx_ring->cnt;
+ idx = tx_ring->rd_p & (tx_ring->cnt - 1);
tx_ring->rd_p++;
skb = tx_ring->txbufs[idx].skb;
@@ -986,6 +959,56 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
}
+static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net *nn = r_vec->nfp_net;
+ u32 done_pkts = 0, done_bytes = 0;
+ int idx, todo;
+ u32 qcp_rd_p;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return;
+
+ if (qcp_rd_p > tx_ring->qcp_rd_p)
+ todo = qcp_rd_p - tx_ring->qcp_rd_p;
+ else
+ todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+
+ while (todo--) {
+ idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ tx_ring->rd_p++;
+
+ if (!tx_ring->txbufs[idx].frag)
+ continue;
+
+ nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr,
+ nn->fl_bufsz, DMA_BIDIRECTIONAL);
+ __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
+
+ done_pkts++;
+ done_bytes += tx_ring->txbufs[idx].real_len;
+
+ tx_ring->txbufs[idx].dma_addr = 0;
+ tx_ring->txbufs[idx].frag = NULL;
+ tx_ring->txbufs[idx].fidx = -2;
+ }
+
+ tx_ring->qcp_rd_p = qcp_rd_p;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
/**
* nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
* @nn: NFP Net device
@@ -996,39 +1019,47 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
static void
nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
const struct skb_frag_struct *frag;
- struct netdev_queue *nd_q;
struct pci_dev *pdev = nn->pdev;
+ struct netdev_queue *nd_q;
while (tx_ring->rd_p != tx_ring->wr_p) {
- int nr_frags, fidx, idx;
- struct sk_buff *skb;
+ struct nfp_net_tx_buf *tx_buf;
+ int idx;
- idx = tx_ring->rd_p % tx_ring->cnt;
- skb = tx_ring->txbufs[idx].skb;
- nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_ring->txbufs[idx].fidx;
+ idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ tx_buf = &tx_ring->txbufs[idx];
- if (fidx == -1) {
- /* unmap head */
- dma_unmap_single(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
+ if (tx_ring == r_vec->xdp_ring) {
+ nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr,
+ nn->fl_bufsz, DMA_BIDIRECTIONAL);
+ __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
} else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
+ struct sk_buff *skb = tx_ring->txbufs[idx].skb;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (tx_buf->fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
+ dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
- /* check for last gather fragment */
- if (fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
+ /* check for last gather fragment */
+ if (tx_buf->fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+ }
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].skb = NULL;
- tx_ring->txbufs[idx].fidx = -2;
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
@@ -1040,6 +1071,9 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
+ if (tx_ring == r_vec->xdp_ring)
+ return;
+
nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
@@ -1049,7 +1083,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->num_tx_rings; i++) {
+ for (i = 0; i < nn->netdev->real_num_tx_queues; i++) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
continue;
nn_warn(nn, "TX timeout on ring: %d\n", i);
@@ -1059,69 +1093,112 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
/* Receive processing
*/
+static unsigned int
+nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
+{
+ unsigned int fl_bufsz;
-/**
- * nfp_net_rx_space() - return the number of free slots on the RX ring
- * @rx_ring: RX ring structure
- *
- * Make sure we leave at least one slot free.
- *
- * Return: True if there is space on the RX ring
- */
-static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
+ fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
+ if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ fl_bufsz += NFP_NET_MAX_PREPEND;
+ else
+ fl_bufsz += nn->rx_offset;
+ fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu;
+
+ fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
+ fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return fl_bufsz;
+}
+
+static void
+nfp_net_free_frag(void *frag, bool xdp)
{
- return (rx_ring->cnt - 1) - (rx_ring->wr_p - rx_ring->rd_p);
+ if (!xdp)
+ skb_free_frag(frag);
+ else
+ __free_page(virt_to_page(frag));
}
/**
- * nfp_net_rx_alloc_one() - Allocate and map skb for RX
+ * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
* @rx_ring: RX ring structure of the skb
* @dma_addr: Pointer to storage for DMA address (output param)
* @fl_bufsz: size of freelist buffers
+ * @xdp: Whether XDP is enabled
*
- * This function will allcate a new skb, map it for DMA.
+ * This function will allcate a new page frag, map it for DMA.
*
- * Return: allocated skb or NULL on failure.
+ * Return: allocated page frag or NULL on failure.
*/
-static struct sk_buff *
+static void *
nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
- unsigned int fl_bufsz)
+ unsigned int fl_bufsz, bool xdp)
{
struct nfp_net *nn = rx_ring->r_vec->nfp_net;
- struct sk_buff *skb;
+ int direction;
+ void *frag;
- skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
- if (!skb) {
- nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
+ if (!xdp)
+ frag = netdev_alloc_frag(fl_bufsz);
+ else
+ frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
+ if (!frag) {
+ nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
return NULL;
}
- *dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
- fl_bufsz, DMA_FROM_DEVICE);
+ direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+
+ *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
- dev_kfree_skb_any(skb);
+ nfp_net_free_frag(frag, xdp);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
return NULL;
}
- return skb;
+ return frag;
+}
+
+static void *
+nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!nn->xdp_prog)
+ frag = napi_alloc_frag(nn->fl_bufsz);
+ else
+ frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
+ if (!frag) {
+ nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+ return NULL;
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
+ if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, nn->xdp_prog);
+ nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
}
/**
* nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
* @rx_ring: RX ring structure
- * @skb: Skb to put on rings
+ * @frag: page fragment buffer
* @dma_addr: DMA address of skb mapping
*/
static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
- struct sk_buff *skb, dma_addr_t dma_addr)
+ void *frag, dma_addr_t dma_addr)
{
unsigned int wr_idx;
- wr_idx = rx_ring->wr_p % rx_ring->cnt;
+ wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
/* Stash SKB and DMA address away */
- rx_ring->rxbufs[wr_idx].skb = skb;
+ rx_ring->rxbufs[wr_idx].frag = frag;
rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
/* Fill freelist descriptor */
@@ -1153,12 +1230,12 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
unsigned int wr_idx, last_idx;
/* Move the empty entry to the end of the list */
- wr_idx = rx_ring->wr_p % rx_ring->cnt;
+ wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
last_idx = rx_ring->cnt - 1;
rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
- rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
+ rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
rx_ring->rxbufs[last_idx].dma_addr = 0;
- rx_ring->rxbufs[last_idx].skb = NULL;
+ rx_ring->rxbufs[last_idx].frag = NULL;
memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
rx_ring->wr_p = 0;
@@ -1170,15 +1247,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
* nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
* @nn: NFP Net device
* @rx_ring: RX ring to remove buffers from
+ * @xdp: Whether XDP is enabled
*
* Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
* entries. After device is disabled nfp_net_rx_ring_reset() must be called
* to restore required ring geometry.
*/
static void
-nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
+nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
+ bool xdp)
{
- struct pci_dev *pdev = nn->pdev;
+ int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1186,14 +1265,14 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
* fails to allocate enough buffers and calls here to free
* already allocated ones.
*/
- if (!rx_ring->rxbufs[i].skb)
+ if (!rx_ring->rxbufs[i].frag)
continue;
- dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
- rx_ring->bufsz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
+ nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
+ rx_ring->bufsz, direction);
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
rx_ring->rxbufs[i].dma_addr = 0;
- rx_ring->rxbufs[i].skb = NULL;
+ rx_ring->rxbufs[i].frag = NULL;
}
}
@@ -1201,9 +1280,11 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
* nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
* @nn: NFP Net device
* @rx_ring: RX ring to remove buffers from
+ * @xdp: Whether XDP is enabled
*/
static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
+nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
+ bool xdp)
{
struct nfp_net_rx_buf *rxbufs;
unsigned int i;
@@ -1211,11 +1292,11 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
rxbufs = rx_ring->rxbufs;
for (i = 0; i < rx_ring->cnt - 1; i++) {
- rxbufs[i].skb =
+ rxbufs[i].frag =
nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
- rx_ring->bufsz);
- if (!rxbufs[i].skb) {
- nfp_net_rx_ring_bufs_free(nn, rx_ring);
+ rx_ring->bufsz, xdp);
+ if (!rxbufs[i].frag) {
+ nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp);
return -ENOMEM;
}
}
@@ -1232,7 +1313,7 @@ static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++)
- nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
+ nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag,
rx_ring->rxbufs[i].dma_addr);
}
@@ -1359,6 +1440,87 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
return data;
}
+static void
+nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ /* skb is build based on the frag, free_skb() would free the frag
+ * so to be able to reuse it we need an extra ref.
+ */
+ if (skb && rxbuf && skb->head == rxbuf->frag)
+ page_ref_inc(virt_to_head_page(rxbuf->frag));
+ if (rxbuf)
+ nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static void
+nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
+ unsigned int pkt_len)
+{
+ struct nfp_net_tx_buf *txbuf;
+ struct nfp_net_tx_desc *txd;
+ dma_addr_t new_dma_addr;
+ void *new_frag;
+ int wr_idx;
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+ return;
+ }
+
+ new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+ return;
+ }
+ nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+
+ wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->frag = rxbuf->frag;
+ txbuf->dma_addr = rxbuf->dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = pkt_len;
+
+ dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
+ pkt_len, DMA_TO_DEVICE);
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = PCIE_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(pkt_len);
+ nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off);
+ txd->data_len = cpu_to_le16(pkt_len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->l4_offset = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+}
+
+static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
+{
+ struct xdp_buff xdp;
+
+ xdp.data = data;
+ xdp.data_end = data + len;
+
+ return bpf_prog_run_xdp(prog, &xdp);
+}
+
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1368,62 +1530,39 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
* more cleanly separate packet receive code from other bookkeeping
* functions performed in the napi poll function.
*
- * There are differences between the NFP-3200 firmware and the
- * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue
- * to indicate that new packets have arrived. The NFP-6000 does not
- * have this queue and uses the DD bit in the RX descriptor. This
- * method cannot be used on the NFP-3200 as it causes a race
- * condition: The RX ring write pointer on the NFP-3200 is updated
- * after packets (and descriptors) have been DMAed. If the DD bit is
- * used and subsequently the read pointer is updated this may lead to
- * the RX queue to underflow (if the firmware has not yet update the
- * write pointer). Therefore we use slightly ugly conditional code
- * below to handle the differences. We may, in the future update the
- * NFP-3200 firmware to behave the same as the firmware on the
- * NFP-6000.
- *
* Return: Number of packets received.
*/
static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
- unsigned int data_len, meta_len;
- int avail = 0, pkts_polled = 0;
- struct sk_buff *skb, *new_skb;
- struct nfp_net_rx_desc *rxd;
- dma_addr_t new_dma_addr;
- u32 qcp_wr_p;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ unsigned int true_bufsz;
+ struct sk_buff *skb;
+ int pkts_polled = 0;
+ int rx_dma_map_dir;
int idx;
- if (nn->is_nfp3200) {
- /* Work out how many packets arrived */
- qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
- idx = rx_ring->rd_p % rx_ring->cnt;
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(nn->xdp_prog);
+ rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
+ tx_ring = r_vec->xdp_ring;
- if (qcp_wr_p == idx)
- /* No new packets */
- return 0;
-
- if (qcp_wr_p > idx)
- avail = qcp_wr_p - idx;
- else
- avail = qcp_wr_p + rx_ring->cnt - idx;
- } else {
- avail = budget + 1;
- }
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ void *new_frag;
- while (avail > 0 && pkts_polled < budget) {
- idx = rx_ring->rd_p % rx_ring->cnt;
+ idx = rx_ring->rd_p & (rx_ring->cnt - 1);
rxd = &rx_ring->rxds[idx];
- if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) {
- if (nn->is_nfp3200)
- nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n",
- rx_ring->idx, idx,
- rxd->vals[0], rxd->vals[1]);
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
break;
- }
+
/* Memory barrier to ensure that we won't do other reads
* before the DD bit.
*/
@@ -1431,27 +1570,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rx_ring->rd_p++;
pkts_polled++;
- avail--;
-
- skb = rx_ring->rxbufs[idx].skb;
-
- new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
- nn->fl_bufsz);
- if (!new_skb) {
- nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
- rx_ring->rxbufs[idx].dma_addr);
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_drops++;
- u64_stats_update_end(&r_vec->rx_sync);
- continue;
- }
-
- dma_unmap_single(&nn->pdev->dev,
- rx_ring->rxbufs[idx].dma_addr,
- nn->fl_bufsz, DMA_FROM_DEVICE);
-
- nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
+ rxbuf = &rx_ring->rxbufs[idx];
/* < meta_len >
* <-- [rx_offset] -->
* ---------------------------------------------------------
@@ -1466,19 +1586,66 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
*/
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- skb_reserve(skb, meta_len);
+ pkt_off = meta_len;
else
- skb_reserve(skb, nn->rx_offset);
- skb_put(skb, data_len - meta_len);
+ pkt_off = nn->rx_offset;
+ data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
- r_vec->rx_bytes += skb->len;
+ r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync);
+ if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
+ nn->bpf_offload_xdp)) {
+ int act;
+
+ dma_sync_single_for_cpu(&nn->pdev->dev,
+ rxbuf->dma_addr + pkt_off,
+ pkt_len, DMA_FROM_DEVICE);
+ act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
+ pkt_len);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf,
+ pkt_off, pkt_len);
+ continue;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ nfp_net_rx_give_one(rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ }
+ }
+
+ skb = build_skb(rxbuf->frag, true_bufsz);
+ if (unlikely(!skb)) {
+ nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+ new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir,
+ &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+
+ nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz,
+ rx_dma_map_dir);
+
+ nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, data_off);
+ skb_put(skb, pkt_len);
+
if (nn->fw_ver.major <= 3) {
nfp_net_set_hash_desc(nn->netdev, skb, rxd);
} else if (meta_len) {
@@ -1486,12 +1653,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
if (unlikely(end != skb->data)) {
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_drops++;
- u64_stats_update_end(&r_vec->rx_sync);
-
- dev_kfree_skb_any(skb);
nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
+ nfp_net_rx_drop(r_vec, rx_ring, NULL, skb);
continue;
}
}
@@ -1508,8 +1671,9 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}
- if (nn->is_nfp3200)
- nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled);
+ if (xdp_prog && tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ rcu_read_unlock();
return pkts_polled;
}
@@ -1525,21 +1689,19 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
{
struct nfp_net_r_vector *r_vec =
container_of(napi, struct nfp_net_r_vector, napi);
- struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
- struct nfp_net_tx_ring *tx_ring = r_vec->tx_ring;
- struct nfp_net *nn = r_vec->nfp_net;
- struct netdev_queue *txq;
- unsigned int pkts_polled;
-
- tx_ring = &nn->tx_rings[rx_ring->idx];
- txq = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
- nfp_net_tx_complete(tx_ring);
+ unsigned int pkts_polled = 0;
- pkts_polled = nfp_net_rx(rx_ring, budget);
+ if (r_vec->tx_ring)
+ nfp_net_tx_complete(r_vec->tx_ring);
+ if (r_vec->rx_ring) {
+ pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
+ if (r_vec->xdp_ring)
+ nfp_net_xdp_complete(r_vec->xdp_ring);
+ }
if (pkts_polled < budget) {
napi_complete_done(napi, pkts_polled);
- nfp_net_irq_unmask(nn, r_vec->irq_idx);
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx);
}
return pkts_polled;
@@ -1575,10 +1737,12 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
* @tx_ring: TX Ring structure to allocate
* @cnt: Ring buffer count
+ * @is_xdp: True if ring will be used for XDP
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
+static int
+nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
@@ -1598,11 +1762,14 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
if (!tx_ring->txbufs)
goto err_alloc;
- netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
+ if (!is_xdp)
+ netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask,
+ tx_ring->idx);
- nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
+ nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
tx_ring->idx, tx_ring->qcidx,
- tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds);
+ tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
+ is_xdp ? "XDP" : "");
return 0;
@@ -1612,23 +1779,29 @@ err_alloc:
}
static struct nfp_net_tx_ring *
-nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
+nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
+ unsigned int num_stack_tx_rings)
{
struct nfp_net_tx_ring *rings;
unsigned int r;
- rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
+ rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
if (!rings)
return NULL;
- for (r = 0; r < nn->num_tx_rings; r++) {
- nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
+ for (r = 0; r < s->n_rings; r++) {
+ int bias = 0;
+
+ if (r >= num_stack_tx_rings)
+ bias = num_stack_tx_rings;
- if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
+ nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r);
+
+ if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias))
goto err_free_prev;
}
- return rings;
+ return s->rings = rings;
err_free_prev:
while (r--)
@@ -1637,28 +1810,27 @@ err_free_prev:
return NULL;
}
-static struct nfp_net_tx_ring *
-nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+static void
+nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
{
- struct nfp_net_tx_ring *old = nn->tx_rings;
- unsigned int r;
+ struct nfp_net_ring_set new = *s;
- for (r = 0; r < nn->num_tx_rings; r++)
- old[r].r_vec->tx_ring = &rings[r];
+ s->dcnt = nn->txd_cnt;
+ s->rings = nn->tx_rings;
+ s->n_rings = nn->num_tx_rings;
- nn->tx_rings = rings;
- return old;
+ nn->txd_cnt = new.dcnt;
+ nn->tx_rings = new.rings;
+ nn->num_tx_rings = new.n_rings;
}
static void
-nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
{
+ struct nfp_net_tx_ring *rings = s->rings;
unsigned int r;
- if (!rings)
- return;
-
- for (r = 0; r < nn->num_tx_rings; r++)
+ for (r = 0; r < s->n_rings; r++)
nfp_net_tx_ring_free(&rings[r]);
kfree(rings);
@@ -1730,31 +1902,32 @@ err_alloc:
}
static struct nfp_net_rx_ring *
-nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
- u32 buf_cnt)
+nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
+ bool xdp)
{
+ unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
struct nfp_net_rx_ring *rings;
unsigned int r;
- rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
+ rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
if (!rings)
return NULL;
- for (r = 0; r < nn->num_rx_rings; r++) {
- nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
+ for (r = 0; r < s->n_rings; r++) {
+ nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r);
- if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
+ if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
goto err_free_prev;
- if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
+ if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp))
goto err_free_ring;
}
- return rings;
+ return s->rings = rings;
err_free_prev:
while (r--) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+ nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
err_free_ring:
nfp_net_rx_ring_free(&rings[r]);
}
@@ -1762,35 +1935,50 @@ err_free_ring:
return NULL;
}
-static struct nfp_net_rx_ring *
-nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
+static void
+nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
{
- struct nfp_net_rx_ring *old = nn->rx_rings;
- unsigned int r;
+ struct nfp_net_ring_set new = *s;
- for (r = 0; r < nn->num_rx_rings; r++)
- old[r].r_vec->rx_ring = &rings[r];
+ s->mtu = nn->netdev->mtu;
+ s->dcnt = nn->rxd_cnt;
+ s->rings = nn->rx_rings;
+ s->n_rings = nn->num_rx_rings;
- nn->rx_rings = rings;
- return old;
+ nn->netdev->mtu = new.mtu;
+ nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
+ nn->rxd_cnt = new.dcnt;
+ nn->rx_rings = new.rings;
+ nn->num_rx_rings = new.n_rings;
}
static void
-nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
+nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
+ bool xdp)
{
+ struct nfp_net_rx_ring *rings = s->rings;
unsigned int r;
- if (!rings)
- return;
-
- for (r = 0; r < nn->num_r_vecs; r++) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+ for (r = 0; r < s->n_rings; r++) {
+ nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
nfp_net_rx_ring_free(&rings[r]);
}
kfree(rings);
}
+static void
+nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ int idx)
+{
+ r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
+ r_vec->tx_ring =
+ idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL;
+
+ r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ?
+ &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL;
+}
+
static int
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx)
@@ -1798,25 +1986,20 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
int err;
- r_vec->tx_ring = &nn->tx_rings[idx];
- nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
-
- r_vec->rx_ring = &nn->rx_rings[idx];
- nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
+ /* Setup NAPI */
+ netif_napi_add(nn->netdev, &r_vec->napi,
+ nfp_net_poll, NAPI_POLL_WEIGHT);
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nn->netdev->name, idx);
err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
if (err) {
+ netif_napi_del(&r_vec->napi);
nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
return err;
}
disable_irq(entry->vector);
- /* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
-
irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
@@ -1879,13 +2062,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
(factor * nn->rx_coalesce_usecs);
- for (i = 0; i < nn->num_r_vecs; i++)
+ for (i = 0; i < nn->num_rx_rings; i++)
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
/* copy TX interrupt coalesce parameters */
value = (nn->tx_coalesce_max_frames << 16) |
(factor * nn->tx_coalesce_usecs);
- for (i = 0; i < nn->num_r_vecs; i++)
+ for (i = 0; i < nn->num_tx_rings; i++)
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
}
@@ -1901,9 +2084,8 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
get_unaligned_be32(nn->netdev->dev_addr));
- /* We can't do writew for NFP-3200 compatibility */
- nn_writel(nn, NFP_NET_CFG_MACADDR + 4,
- get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
+ nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
+ get_unaligned_be16(nn->netdev->dev_addr + 4));
}
static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -1944,27 +2126,33 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- for (r = 0; r < nn->num_r_vecs; r++) {
- nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
- nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
+ for (r = 0; r < nn->num_rx_rings; r++)
+ nfp_net_rx_ring_reset(&nn->rx_rings[r]);
+ for (r = 0; r < nn->num_tx_rings; r++)
+ nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]);
+ for (r = 0; r < nn->num_r_vecs; r++)
nfp_net_vec_clear_ring_data(nn, r);
- }
nn->ctrl = new_ctrl;
}
static void
-nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
- unsigned int idx)
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_rx_ring *rx_ring, unsigned int idx)
{
/* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx);
+}
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
+static void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_tx_ring *tx_ring, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx);
}
static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
@@ -1989,8 +2177,10 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
- for (r = 0; r < nn->num_r_vecs; r++)
- nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
+ for (r = 0; r < nn->num_tx_rings; r++)
+ nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r);
+ for (r = 0; r < nn->num_rx_rings; r++)
+ nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r);
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
@@ -2016,8 +2206,8 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn->ctrl = new_ctrl;
- for (r = 0; r < nn->num_r_vecs; r++)
- nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
+ for (r = 0; r < nn->num_rx_rings; r++)
+ nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]);
/* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it.
@@ -2068,6 +2258,15 @@ static void nfp_net_open_stack(struct nfp_net *nn)
static int nfp_net_netdev_open(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_ring_set rx = {
+ .n_rings = nn->num_rx_rings,
+ .mtu = nn->netdev->mtu,
+ .dcnt = nn->rxd_cnt,
+ };
+ struct nfp_net_ring_set tx = {
+ .n_rings = nn->num_tx_rings,
+ .dcnt = nn->txd_cnt,
+ };
int err, r;
if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
@@ -2092,39 +2291,29 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
- GFP_KERNEL);
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
+ if (err)
+ goto err_cleanup_vec_p;
+ }
+
+ nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog);
if (!nn->rx_rings) {
err = -ENOMEM;
- goto err_free_lsc;
+ goto err_cleanup_vec;
}
- nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
- GFP_KERNEL);
+
+ nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx,
+ nn->num_stack_tx_rings);
if (!nn->tx_rings) {
err = -ENOMEM;
goto err_free_rx_rings;
}
- for (r = 0; r < nn->num_r_vecs; r++) {
- err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
- if (err)
- goto err_free_prev_vecs;
+ for (r = 0; r < nn->max_r_vecs; r++)
+ nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
- err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
- if (err)
- goto err_cleanup_vec_p;
-
- err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
- nn->fl_bufsz, nn->rxd_cnt);
- if (err)
- goto err_free_tx_ring_p;
-
- err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
- if (err)
- goto err_flush_rx_ring_p;
- }
-
- err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
+ err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings);
if (err)
goto err_free_rings;
@@ -2154,21 +2343,14 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0;
err_free_rings:
+ nfp_net_tx_ring_set_free(nn, &tx);
+err_free_rx_rings:
+ nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog);
+err_cleanup_vec:
r = nn->num_r_vecs;
-err_free_prev_vecs:
- while (r--) {
- nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
-err_flush_rx_ring_p:
- nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
-err_free_tx_ring_p:
- nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
err_cleanup_vec_p:
+ while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- }
- kfree(nn->tx_rings);
-err_free_rx_rings:
- kfree(nn->rx_rings);
-err_free_lsc:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
@@ -2203,12 +2385,14 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
{
unsigned int r;
- for (r = 0; r < nn->num_r_vecs; r++) {
- nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
- nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
- nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
- nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ for (r = 0; r < nn->num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog);
+ nfp_net_rx_ring_free(&nn->rx_rings[r]);
}
+ for (r = 0; r < nn->num_tx_rings; r++)
+ nfp_net_tx_ring_free(&nn->tx_rings[r]);
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
kfree(nn->rx_rings);
kfree(nn->tx_rings);
@@ -2271,94 +2455,135 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
nn->ctrl = new_ctrl;
}
-static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
+static void nfp_net_rss_init_itbl(struct nfp_net *nn)
{
- unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
- struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_rx_ring *tmp_rings;
- int err;
-
- if (new_mtu < 68 || new_mtu > nn->max_mtu) {
- nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
- return -EINVAL;
- }
-
- old_mtu = netdev->mtu;
- old_fl_bufsz = nn->fl_bufsz;
- new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
-
- if (!netif_running(netdev)) {
- netdev->mtu = new_mtu;
- nn->fl_bufsz = new_fl_bufsz;
- return 0;
- }
+ int i;
- /* Prepare new rings */
- tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
- nn->rxd_cnt);
- if (!tmp_rings)
- return -ENOMEM;
+ for (i = 0; i < sizeof(nn->rss_itbl); i++)
+ nn->rss_itbl[i] =
+ ethtool_rxfh_indir_default(i, nn->num_rx_rings);
+}
- /* Stop device, swap in new rings, try to start the firmware */
- nfp_net_close_stack(nn);
- nfp_net_clear_config_and_disable(nn);
+static int
+nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
+ unsigned int *stack_tx_rings,
+ struct bpf_prog **xdp_prog,
+ struct nfp_net_ring_set *rx,
+ struct nfp_net_ring_set *tx)
+{
+ unsigned int r;
+ int err;
- tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+ if (rx)
+ nfp_net_rx_ring_set_swap(nn, rx);
+ if (tx)
+ nfp_net_tx_ring_set_swap(nn, tx);
- netdev->mtu = new_mtu;
- nn->fl_bufsz = new_fl_bufsz;
+ swap(*num_vecs, nn->num_r_vecs);
+ swap(*stack_tx_rings, nn->num_stack_tx_rings);
+ *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
- err = nfp_net_set_config_and_enable(nn);
- if (err) {
- const int err_new = err;
+ for (r = 0; r < nn->max_r_vecs; r++)
+ nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
- /* Try with old configuration and old rings */
- tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+ if (!netif_is_rxfh_configured(nn->netdev))
+ nfp_net_rss_init_itbl(nn);
- netdev->mtu = old_mtu;
- nn->fl_bufsz = old_fl_bufsz;
+ err = netif_set_real_num_rx_queues(nn->netdev,
+ nn->num_rx_rings);
+ if (err)
+ return err;
- err = __nfp_net_set_config_and_enable(nn);
+ if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) {
+ err = netif_set_real_num_tx_queues(nn->netdev,
+ nn->num_stack_tx_rings);
if (err)
- nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
- err_new, err);
+ return err;
}
- nfp_net_shadow_rx_rings_free(nn, tmp_rings);
+ return __nfp_net_set_config_and_enable(nn);
+}
- nfp_net_open_stack(nn);
+static int
+nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
+ struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+{
+ /* XDP-enabled tests */
+ if (!xdp_prog)
+ return 0;
+ if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) {
+ nn_warn(nn, "MTU too large w/ XDP enabled\n");
+ return -EINVAL;
+ }
+ if (tx && tx->n_rings > nn->max_tx_rings) {
+ nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
+ return -EINVAL;
+ }
- return err;
+ return 0;
}
-int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
-{
- struct nfp_net_tx_ring *tx_rings = NULL;
- struct nfp_net_rx_ring *rx_rings = NULL;
- u32 old_rxd_cnt, old_txd_cnt;
+static void
+nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
+ struct nfp_net_ring_set *rx,
+ struct nfp_net_ring_set *tx,
+ unsigned int stack_tx_rings, unsigned int num_vecs)
+{
+ nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
+ nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
+ nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
+ nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
+ nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
+ nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
+ nn->num_stack_tx_rings = stack_tx_rings;
+ nn->num_r_vecs = num_vecs;
+ *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
+
+ if (!netif_is_rxfh_configured(nn->netdev))
+ nfp_net_rss_init_itbl(nn);
+}
+
+int
+nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
+ struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+{
+ unsigned int stack_tx_rings, num_vecs, r;
int err;
+ stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
+ if (*xdp_prog)
+ stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings;
+
+ num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings);
+
+ err = nfp_net_check_config(nn, *xdp_prog, rx, tx);
+ if (err)
+ return err;
+
if (!netif_running(nn->netdev)) {
- nn->rxd_cnt = rxd_cnt;
- nn->txd_cnt = txd_cnt;
+ nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx,
+ stack_tx_rings, num_vecs);
return 0;
}
- old_rxd_cnt = nn->rxd_cnt;
- old_txd_cnt = nn->txd_cnt;
-
/* Prepare new rings */
- if (nn->rxd_cnt != rxd_cnt) {
- rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
- rxd_cnt);
- if (!rx_rings)
- return -ENOMEM;
+ for (r = nn->num_r_vecs; r < num_vecs; r++) {
+ err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
+ if (err) {
+ num_vecs = r;
+ goto err_cleanup_vecs;
+ }
}
- if (nn->txd_cnt != txd_cnt) {
- tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
- if (!tx_rings) {
- nfp_net_shadow_rx_rings_free(nn, rx_rings);
- return -ENOMEM;
+ if (rx) {
+ if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) {
+ err = -ENOMEM;
+ goto err_cleanup_vecs;
+ }
+ }
+ if (tx) {
+ if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) {
+ err = -ENOMEM;
+ goto err_free_rx;
}
}
@@ -2366,39 +2591,51 @@ int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
nfp_net_close_stack(nn);
nfp_net_clear_config_and_disable(nn);
- if (rx_rings)
- rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
- if (tx_rings)
- tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
-
- nn->rxd_cnt = rxd_cnt;
- nn->txd_cnt = txd_cnt;
-
- err = nfp_net_set_config_and_enable(nn);
+ err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
+ xdp_prog, rx, tx);
if (err) {
- const int err_new = err;
-
- /* Try with old configuration and old rings */
- if (rx_rings)
- rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
- if (tx_rings)
- tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+ int err2;
- nn->rxd_cnt = old_rxd_cnt;
- nn->txd_cnt = old_txd_cnt;
+ nfp_net_clear_config_and_disable(nn);
- err = __nfp_net_set_config_and_enable(nn);
- if (err)
+ /* Try with old configuration and old rings */
+ err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
+ xdp_prog, rx, tx);
+ if (err2)
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
- err_new, err);
+ err, err2);
}
+ for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- nfp_net_shadow_rx_rings_free(nn, rx_rings);
- nfp_net_shadow_tx_rings_free(nn, tx_rings);
+ if (rx)
+ nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
+ if (tx)
+ nfp_net_tx_ring_set_free(nn, tx);
nfp_net_open_stack(nn);
return err;
+
+err_free_rx:
+ if (rx)
+ nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
+err_cleanup_vecs:
+ for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ return err;
+}
+
+static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_ring_set rx = {
+ .n_rings = nn->num_rx_rings,
+ .mtu = new_mtu,
+ .dcnt = nn->rxd_cnt,
+ };
+
+ return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
}
static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
@@ -2455,8 +2692,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
if (proto != htons(ETH_P_ALL))
return -ENOTSUPP;
- if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
- return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
+ if (!nn->bpf_offload_xdp)
+ return nfp_net_bpf_offload(nn, tc->cls_bpf);
+ else
+ return -EBUSY;
+ }
return -EINVAL;
}
@@ -2664,6 +2905,91 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0);
}
+static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
+{
+ struct tc_cls_bpf_offload cmd = {
+ .prog = prog,
+ };
+ int ret;
+
+ if (!nfp_net_ebpf_capable(nn))
+ return -EINVAL;
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (!nn->bpf_offload_xdp)
+ return prog ? -EBUSY : 0;
+ cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
+ } else {
+ if (!prog)
+ return 0;
+ cmd.command = TC_CLSBPF_ADD;
+ }
+
+ ret = nfp_net_bpf_offload(nn, &cmd);
+ /* Stop offload if replace not possible */
+ if (ret && cmd.command == TC_CLSBPF_REPLACE)
+ nfp_net_xdp_offload(nn, NULL);
+ nn->bpf_offload_xdp = prog && !ret;
+ return ret;
+}
+
+static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
+{
+ struct nfp_net_ring_set rx = {
+ .n_rings = nn->num_rx_rings,
+ .mtu = nn->netdev->mtu,
+ .dcnt = nn->rxd_cnt,
+ };
+ struct nfp_net_ring_set tx = {
+ .n_rings = nn->num_tx_rings,
+ .dcnt = nn->txd_cnt,
+ };
+ int err;
+
+ if (prog && prog->xdp_adjust_head) {
+ nn_err(nn, "Does not support bpf_xdp_adjust_head()\n");
+ return -EOPNOTSUPP;
+ }
+ if (!prog && !nn->xdp_prog)
+ return 0;
+ if (prog && nn->xdp_prog) {
+ prog = xchg(&nn->xdp_prog, prog);
+ bpf_prog_put(prog);
+ nfp_net_xdp_offload(nn, nn->xdp_prog);
+ return 0;
+ }
+
+ tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings;
+
+ /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
+ err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx);
+ if (err)
+ return err;
+
+ /* @prog got swapped and is now the old one */
+ if (prog)
+ bpf_prog_put(prog);
+
+ nfp_net_xdp_offload(nn, nn->xdp_prog);
+
+ return 0;
+}
+
+static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return nfp_net_xdp_setup(nn, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!nn->xdp_prog;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
@@ -2678,6 +3004,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_features_check = nfp_net_features_check,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
+ .ndo_xdp = nfp_net_xdp,
};
/**
@@ -2686,8 +3013,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
*/
void nfp_net_info(struct nfp_net *nn)
{
- nn_info(nn, "Netronome %s %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
- nn->is_nfp3200 ? "NFP-32xx" : "NFP-6xxx",
+ nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
nn->is_vf ? "VF " : "",
nn->num_tx_rings, nn->max_tx_rings,
nn->num_rx_rings, nn->max_rx_rings);
@@ -2728,11 +3054,11 @@ void nfp_net_info(struct nfp_net *nn)
* Return: NFP Net device structure, or ERR_PTR on error.
*/
struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
- int max_tx_rings, int max_rx_rings)
+ unsigned int max_tx_rings,
+ unsigned int max_rx_rings)
{
struct net_device *netdev;
struct nfp_net *nn;
- int nqs;
netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
max_tx_rings, max_rx_rings);
@@ -2748,9 +3074,12 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
- nqs = netif_get_num_default_rss_queues();
- nn->num_tx_rings = min_t(int, nqs, max_tx_rings);
- nn->num_rx_rings = min_t(int, nqs, max_rx_rings);
+ nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus());
+ nn->num_rx_rings = min_t(unsigned int, max_rx_rings,
+ netif_get_num_default_rss_queues());
+
+ nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings);
+ nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus());
nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
@@ -2782,13 +3111,9 @@ void nfp_net_netdev_free(struct nfp_net *nn)
*/
static void nfp_net_rss_init(struct nfp_net *nn)
{
- int i;
-
netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
- for (i = 0; i < sizeof(nn->rss_itbl); i++)
- nn->rss_itbl[i] =
- ethtool_rxfh_indir_default(i, nn->num_rx_rings);
+ nfp_net_rss_init_itbl(nn);
/* Enable IPv4/IPv6 TCP by default */
nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
@@ -2826,12 +3151,18 @@ int nfp_net_netdev_init(struct net_device *netdev)
nfp_net_write_mac_addr(nn);
+ /* Determine RX packet/metadata boundary offset */
+ if (nn->fw_ver.major >= 2)
+ nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
+ else
+ nn->rx_offset = NFP_NET_RX_OFFSET;
+
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
netdev->mtu = nn->max_mtu;
else
netdev->mtu = NFP_NET_DEFAULT_MTU;
- nn->fl_bufsz = NFP_NET_DEFAULT_RX_BUFSZ;
+ nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu);
/* Advertise/enable offloads based on capabilities
*
@@ -2902,18 +3233,6 @@ int nfp_net_netdev_init(struct net_device *netdev)
nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- /* On NFP-3200 enable MSI-X auto-masking, if supported and the
- * interrupts are not shared.
- */
- if (nn->is_nfp3200 && nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO)
- nn->ctrl |= NFP_NET_CFG_CTRL_MSIXAUTO;
-
- /* On NFP4000/NFP6000, determine RX packet/metadata boundary offset */
- if (nn->fw_ver.major >= 2)
- nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
- else
- nn->rx_offset = NFP_NET_RX_OFFSET;
-
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -2927,9 +3246,13 @@ int nfp_net_netdev_init(struct net_device *netdev)
return err;
/* Finalise the netdev setup */
- ether_setup(netdev);
netdev->netdev_ops = &nfp_net_netdev_ops;
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = nn->max_mtu;
+
netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev);
@@ -2944,5 +3267,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
*/
void nfp_net_netdev_clean(struct net_device *netdev)
{
- unregister_netdev(netdev);
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (nn->xdp_prog)
+ bpf_prog_put(nn->xdp_prog);
+ if (nn->bpf_offload_xdp)
+ nfp_net_xdp_offload(nn, NULL);
+ unregister_netdev(nn->netdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 93b10b441acb..385ba355c965 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -50,7 +50,7 @@
/**
* Configuration BAR size.
*
- * The configuration BAR is 8K in size, but on the NFP6000, due to
+ * The configuration BAR is 8K in size, but due to
* THB-350, 32k needs to be reserved.
*/
#define NFP_NET_CFG_BAR_SZ (32 * 1024)
@@ -186,18 +186,13 @@
#define NFP_NET_CFG_START_RXQ 0x004c
/**
- * NFP-3200 workaround (0x0050 - 0x0058)
- * @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix)
- */
-#define NFP_NET_CFG_SPARE_ADDR 0x0050
-/**
- * NFP6000/NFP4000 - Prepend configuration
+ * Prepend configuration
*/
#define NFP_NET_CFG_RX_OFFSET 0x0050
#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
/**
- * NFP6000/NFP4000 - VXLAN/UDP encap configuration
+ * VXLAN/UDP encap configuration
* @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
@@ -205,7 +200,7 @@
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
- * NFP6000 - BPF section
+ * BPF section
* @NFP_NET_CFG_BPF_ABI: BPF ABI version
* @NFP_NET_CFG_BPF_CAP: BPF capabilities
* @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index f7c9a5bc4aa3..c66f3f954aa8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -44,8 +44,8 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_rx_ring *rx_ring;
struct nfp_net_rx_desc *rxd;
- struct sk_buff *skb;
struct nfp_net *nn;
+ void *frag;
int i;
rtnl_lock();
@@ -73,10 +73,9 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
seq_printf(file, "%04d: 0x%08x 0x%08x", i,
rxd->vals[0], rxd->vals[1]);
- skb = READ_ONCE(rx_ring->rxbufs[i].skb);
- if (skb)
- seq_printf(file, " skb->head=%p skb->data=%p",
- skb->head, skb->data);
+ frag = READ_ONCE(rx_ring->rxbufs[i].frag);
+ if (frag)
+ seq_printf(file, " frag=%p", frag);
if (rx_ring->rxbufs[i].dma_addr)
seq_printf(file, " dma_addr=%pad",
@@ -115,6 +114,16 @@ static const struct file_operations nfp_rx_q_fops = {
.llseek = seq_lseek
};
+static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f);
+
+static const struct file_operations nfp_tx_q_fops = {
+ .owner = THIS_MODULE,
+ .open = nfp_net_debugfs_tx_q_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
@@ -127,10 +136,13 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
rtnl_lock();
- if (!r_vec->nfp_net || !r_vec->tx_ring)
+ if (debugfs_real_fops(file->file) == &nfp_tx_q_fops)
+ tx_ring = r_vec->tx_ring;
+ else
+ tx_ring = r_vec->xdp_ring;
+ if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
- tx_ring = r_vec->tx_ring;
if (!netif_running(nn->netdev))
goto out;
@@ -149,9 +161,14 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
txd->vals[2], txd->vals[3]);
skb = READ_ONCE(tx_ring->txbufs[i].skb);
- if (skb)
- seq_printf(file, " skb->head=%p skb->data=%p",
- skb->head, skb->data);
+ if (skb) {
+ if (tx_ring == r_vec->tx_ring)
+ seq_printf(file, " skb->head=%p skb->data=%p",
+ skb->head, skb->data);
+ else
+ seq_printf(file, " frag=%p", skb);
+ }
+
if (tx_ring->txbufs[i].dma_addr)
seq_printf(file, " dma_addr=%pad",
&tx_ring->txbufs[i].dma_addr);
@@ -177,7 +194,7 @@ static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f)
return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private);
}
-static const struct file_operations nfp_tx_q_fops = {
+static const struct file_operations nfp_xdp_q_fops = {
.owner = THIS_MODULE,
.open = nfp_net_debugfs_tx_q_open,
.release = single_release,
@@ -187,7 +204,7 @@ static const struct file_operations nfp_tx_q_fops = {
void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
{
- struct dentry *queues, *tx, *rx;
+ struct dentry *queues, *tx, *rx, *xdp;
char int_name[16];
int i;
@@ -205,16 +222,19 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
rx = debugfs_create_dir("rx", queues);
tx = debugfs_create_dir("tx", queues);
- if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx))
+ xdp = debugfs_create_dir("xdp", queues);
+ if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp))
return;
- for (i = 0; i < nn->num_rx_rings; i++) {
+ for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, rx,
&nn->r_vecs[i], &nfp_rx_q_fops);
+ debugfs_create_file(int_name, S_IRUSR, xdp,
+ &nn->r_vecs[i], &nfp_xdp_q_fops);
}
- for (i = 0; i < nn->num_tx_rings; i++) {
+ for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, tx,
&nn->r_vecs[i], &nfp_tx_q_fops);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 3418f2277e9d..1b26e9646574 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -158,6 +158,28 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->tx_pending = nn->txd_cnt;
}
+static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+{
+ struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
+ struct nfp_net_ring_set rx = {
+ .n_rings = nn->num_rx_rings,
+ .mtu = nn->netdev->mtu,
+ .dcnt = rxd_cnt,
+ };
+ struct nfp_net_ring_set tx = {
+ .n_rings = nn->num_tx_rings,
+ .dcnt = txd_cnt,
+ };
+
+ if (nn->rxd_cnt != rxd_cnt)
+ reconfig_rx = &rx;
+ if (nn->txd_cnt != txd_cnt)
+ reconfig_tx = &tx;
+
+ return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
+ reconfig_rx, reconfig_tx);
+}
+
static int nfp_net_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -614,6 +636,76 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
}
+static void nfp_net_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channel)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int num_tx_rings;
+
+ num_tx_rings = nn->num_tx_rings;
+ if (nn->xdp_prog)
+ num_tx_rings -= nn->num_rx_rings;
+
+ channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
+ channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
+ channel->max_combined = min(channel->max_rx, channel->max_tx);
+ channel->max_other = NFP_NET_NON_Q_VECTORS;
+ channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
+ channel->rx_count = nn->num_rx_rings - channel->combined_count;
+ channel->tx_count = num_tx_rings - channel->combined_count;
+ channel->other_count = NFP_NET_NON_Q_VECTORS;
+}
+
+static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
+ unsigned int total_tx)
+{
+ struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
+ struct nfp_net_ring_set rx = {
+ .n_rings = total_rx,
+ .mtu = nn->netdev->mtu,
+ .dcnt = nn->rxd_cnt,
+ };
+ struct nfp_net_ring_set tx = {
+ .n_rings = total_tx,
+ .dcnt = nn->txd_cnt,
+ };
+
+ if (nn->num_rx_rings != total_rx)
+ reconfig_rx = &rx;
+ if (nn->num_stack_tx_rings != total_tx ||
+ (nn->xdp_prog && reconfig_rx))
+ reconfig_tx = &tx;
+
+ /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
+ if (nn->xdp_prog)
+ tx.n_rings += total_rx;
+
+ return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
+ reconfig_rx, reconfig_tx);
+}
+
+static int nfp_net_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channel)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int total_rx, total_tx;
+
+ /* Reject unsupported */
+ if (!channel->combined_count ||
+ channel->other_count != NFP_NET_NON_Q_VECTORS ||
+ (channel->rx_count && channel->tx_count))
+ return -EINVAL;
+
+ total_rx = channel->combined_count + channel->rx_count;
+ total_tx = channel->combined_count + channel->tx_count;
+
+ if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
+ total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
+ return -EINVAL;
+
+ return nfp_net_set_num_rings(nn, total_rx, total_tx);
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -632,6 +724,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_regs = nfp_net_get_regs,
.get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce,
+ .get_channels = nfp_net_get_channels,
+ .set_channels = nfp_net_set_channels,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
index 8acfb631a0ea..18a851eb3508 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -111,6 +111,9 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
const struct tc_action *a;
LIST_HEAD(actions);
+ if (!cls_bpf->exts)
+ return NN_ACT_XDP;
+
/* TC direct action */
if (cls_bpf->exts_integrated) {
if (tc_no_actions(cls_bpf->exts))
@@ -128,7 +131,7 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
if (is_tcf_gact_shot(a))
return NN_ACT_TC_DROP;
- if (is_tcf_mirred_redirect(a) &&
+ if (is_tcf_mirred_egress_redirect(a) &&
tcf_mirred_ifindex(a) == nn->netdev->ifindex)
return NN_ACT_TC_REDIR;
}
@@ -233,9 +236,7 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
-int
-nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
- struct tc_cls_bpf_offload *cls_bpf)
+int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
{
struct nfp_bpf_result res;
dma_addr_t dma_addr;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 2800bbf65a89..d065235034d4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -63,9 +63,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
u8 mac_addr[ETH_ALEN];
put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]);
- /* We can't do readw for NFP-3200 compatibility */
- put_unaligned_be16(nn_readl(nn, NFP_NET_CFG_MACADDR + 4) >> 16,
- &mac_addr[4]);
+ put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
if (!is_valid_ether_addr(mac_addr)) {
eth_hw_addr_random(nn->netdev);
@@ -86,7 +84,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
int tx_bar_no, rx_bar_no;
u8 __iomem *ctrl_bar;
struct nfp_net *nn;
- int is_nfp3200;
u32 startq;
int stride;
int err;
@@ -101,15 +98,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
goto err_pci_disable;
}
- switch (pdev->device) {
- case PCI_DEVICE_NFP6000VF:
- is_nfp3200 = 0;
- break;
- default:
- err = -ENODEV;
- goto err_pci_regions;
- }
-
pci_set_master(pdev);
err = dma_set_mask_and_coherent(&pdev->dev,
@@ -149,15 +137,9 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
} else {
switch (fw_ver.major) {
case 1 ... 4:
- if (is_nfp3200) {
- stride = 2;
- tx_bar_no = NFP_NET_Q0_BAR;
- rx_bar_no = NFP_NET_Q1_BAR;
- } else {
- stride = 4;
- tx_bar_no = NFP_NET_Q0_BAR;
- rx_bar_no = tx_bar_no;
- }
+ stride = 4;
+ tx_bar_no = NFP_NET_Q0_BAR;
+ rx_bar_no = tx_bar_no;
break;
default:
dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
@@ -189,20 +171,10 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
}
- /* XXX Implement a workaround for THB-350 here. Ideally, we
- * have a different PCI ID for A rev VFs.
- */
- switch (pdev->device) {
- case PCI_DEVICE_NFP6000VF:
- startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tx_bar_off = NFP_PCIE_QUEUE(startq);
- startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- rx_bar_off = NFP_PCIE_QUEUE(startq);
- break;
- default:
- err = -ENODEV;
- goto err_ctrl_unmap;
- }
+ startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+ tx_bar_off = NFP_PCIE_QUEUE(startq);
+ startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
+ rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings);
@@ -214,7 +186,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nn->fw_ver = fw_ver;
nn->ctrl_bar = ctrl_bar;
nn->is_vf = 1;
- nn->is_nfp3200 = is_nfp3200;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index adbc47f2d132..df4188cb43e0 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -304,7 +304,6 @@ static const struct net_device_ops netx_eth_netdev_ops = {
.ndo_start_xmit = netx_eth_hard_start_xmit,
.ndo_tx_timeout = netx_eth_timeout,
.ndo_set_rx_mode = netx_eth_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 712d8bcb7d8c..119f6dca71f0 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -915,7 +915,6 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_set_mac_address = w90p910_set_mac_address,
.ndo_do_ioctl = w90p910_ether_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static void __init get_mac_address(struct net_device *dev)
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 9b0d7f463ff3..3913f07279d2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3008,17 +3008,12 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
struct fe_priv *np = netdev_priv(dev);
int old_mtu;
- if (new_mtu < 64 || new_mtu > np->pkt_limit)
- return -EINVAL;
-
old_mtu = dev->mtu;
dev->mtu = new_mtu;
/* return early if the buffer sizes will not change */
if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
return 0;
- if (old_mtu == new_mtu)
- return 0;
/* synchronized against open : rtnl_lock() held by caller */
if (netif_running(dev)) {
@@ -5719,6 +5714,10 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
/* Add loopback capability to the device. */
dev->hw_features |= NETIF_F_LOOPBACK;
+ /* MTU range: 64 - 1500 or 9100 */
+ dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
+ dev->max_mtu = np->pkt_limit;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8e13ec84c538..dd6b0d0f7fa5 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1256,7 +1256,6 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_do_ioctl = lpc_eth_ioctl,
.ndo_set_mac_address = lpc_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int lpc_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3cd87a41ac92..d461f419948e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2260,16 +2260,10 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- int max_frame;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
int err;
- max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
- (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
- netdev_err(netdev, "Invalid MTU setting\n");
- return -EINVAL;
- }
if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
@@ -2633,6 +2627,11 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->features = netdev->hw_features;
pch_gbe_set_ethtool_ops(netdev);
+ /* MTU range: 46 - 10300 */
+ netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN);
+
pch_gbe_mac_load_mac_addr(&adapter->hw);
pch_gbe_mac_reset_hw(&adapter->hw);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 91be2f02ef1c..2d04679a923a 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -568,7 +568,6 @@ static const struct net_device_ops hamachi_netdev_ops = {
.ndo_start_xmit = hamachi_start_xmit,
.ndo_get_stats = hamachi_get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = hamachi_tx_timeout,
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fb1d1031b091..2a2ca5fa0c69 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -360,7 +360,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_stop = yellowfin_close,
.ndo_start_xmit = yellowfin_start_xmit,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 2f4a837f0d6a..badfa1d562a4 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -53,7 +53,7 @@
* - Multiqueue RX/TX
*/
-#define PE_MIN_MTU 64
+#define PE_MIN_MTU (ETH_ZLEN + ETH_HLEN)
#define PE_MAX_MTU 9000
#define PE_DEF_MTU ETH_DATA_LEN
@@ -1611,9 +1611,6 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
int running;
int ret = 0;
- if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
- return -EINVAL;
-
running = netif_running(dev);
if (running) {
@@ -1635,7 +1632,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
}
/* Setup checksum channels if large MTU and none already allocated */
- if (new_mtu > 1500 && !mac->num_cs) {
+ if (new_mtu > PE_DEF_MTU && !mac->num_cs) {
pasemi_mac_setup_csrings(mac);
if (!mac->num_cs) {
ret = -ENOMEM;
@@ -1757,6 +1754,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &pasemi_netdev_ops;
dev->mtu = PE_DEF_MTU;
+
+ /* MTU range: 64 - 9000 */
+ dev->min_mtu = PE_MIN_MTU;
+ dev->max_mtu = PE_MAX_MTU;
+
/* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 32f2a45f4ab2..3cfd10503446 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -110,4 +110,7 @@ config QEDE
config QED_RDMA
bool
+config QED_ISCSI
+ bool
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 2b10f1bcd151..a996801d442d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -987,20 +987,8 @@ int netxen_send_lro_cleanup(struct netxen_adapter *adapter)
int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- int max_mtu;
int rc = 0;
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- max_mtu = P3_MAX_MTU;
- else
- max_mtu = P2_MAX_MTU;
-
- if (mtu > max_mtu) {
- printk(KERN_ERR "%s: mtu > %d bytes unsupported\n",
- netdev->name, max_mtu);
- return -EINVAL;
- }
-
if (adapter->set_mtu)
rc = adapter->set_mtu(adapter, mtu);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 7a0281a36c28..561fb94c7267 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1572,6 +1572,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->physical_port = i;
}
+ /* MTU range: 0 - 8000 (P2) or 9600 (P3) */
+ netdev->min_mtu = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netdev->max_mtu = P3_MAX_MTU;
+ else
+ netdev->max_mtu = P2_MAX_MTU;
+
netxen_nic_clear_stats(adapter);
err = netxen_setup_intr(adapter);
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 967acf322c09..729e43768e99 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -6,3 +6,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 653bb5735f0c..44c184ebe3b0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -35,6 +35,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
+#define ISCSI_BDQ_ID(_port_id) (_port_id)
#define QED_WID_SIZE (1024)
#define QED_PF_DEMS_SIZE (4)
@@ -154,7 +155,10 @@ struct qed_qm_iids {
u32 tids;
};
-enum QED_RESOURCES {
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum qed_resources {
QED_SB,
QED_L2_QUEUE,
QED_VPORT,
@@ -166,6 +170,7 @@ enum QED_RESOURCES {
QED_RDMA_CNQ_RAM,
QED_ILT,
QED_LL2_QUEUE,
+ QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_MAX_RESC,
};
@@ -174,6 +179,7 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
+ QED_VF_L2_QUE,
QED_MAX_FEATURES,
};
@@ -195,6 +201,11 @@ enum qed_dev_cap {
QED_DEV_CAP_ROCE,
};
+enum qed_wol_support {
+ QED_WOL_SUPPORT_NONE,
+ QED_WOL_SUPPORT_PME,
+};
+
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
@@ -226,15 +237,9 @@ struct qed_hw_info {
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
-};
+ u16 mtu;
-struct qed_hw_cid_data {
- u32 cid;
- bool b_cid_allocated;
-
- /* Additional identifiers */
- u16 opaque_fid;
- u8 vport_id;
+ enum qed_wol_support b_wol_support;
};
/* maximun size of read/write commands (HW limit) */
@@ -378,7 +383,9 @@ struct qed_hwfn {
/* Protocol related */
bool using_ll2;
struct qed_ll2_info *p_ll2_info;
+ struct qed_ooo_info *p_ooo_info;
struct qed_rdma_info *p_rdma_info;
+ struct qed_iscsi_info *p_iscsi_info;
struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs;
@@ -403,9 +410,6 @@ struct qed_hwfn {
struct qed_dcbx_info *p_dcbx_info;
- struct qed_hw_cid_data *p_tx_cids;
- struct qed_hw_cid_data *p_rx_cids;
-
struct qed_dmae_info dmae_info;
/* QM init */
@@ -538,7 +542,9 @@ struct qed_dev {
u8 mcp_rev;
u8 boot_mode;
- u8 wol;
+ /* WoL related configurations */
+ u8 wol_config;
+ u8 wol_mac[ETH_ALEN];
u32 int_mode;
enum qed_coalescing_mode int_coalescing_mode;
@@ -578,6 +584,8 @@ struct qed_dev {
/* Linux specific here */
struct qede_dev *edev;
struct pci_dev *pdev;
+ u32 flags;
+#define QED_FLAG_STORAGE_STARTED (BIT(0))
int msg_enable;
struct pci_params pci_params;
@@ -591,6 +599,7 @@ struct qed_dev {
union {
struct qed_common_cb_ops *common;
struct qed_eth_cb_ops *eth;
+ struct qed_iscsi_cb_ops *iscsi;
} protocol_ops;
void *ops_cookie;
@@ -600,7 +609,7 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
-
+ DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
u32 rdma_max_sge;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index edae5fc5fccd..3b2250021c5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -29,8 +29,10 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
+#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
+#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
@@ -137,15 +139,6 @@ void qed_resc_free(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- kfree(p_hwfn->p_tx_cids);
- p_hwfn->p_tx_cids = NULL;
- kfree(p_hwfn->p_rx_cids);
- p_hwfn->p_rx_cids = NULL;
- }
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-
qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn);
qed_spq_free(p_hwfn);
@@ -155,6 +148,10 @@ void qed_resc_free(struct qed_dev *cdev)
#ifdef CONFIG_QED_LL2
qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
#endif
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
+ qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+ }
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -411,6 +408,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev)
{
+ struct qed_iscsi_info *p_iscsi_info;
+ struct qed_ooo_info *p_ooo_info;
#ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info;
#endif
@@ -425,23 +424,6 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (!cdev->fw_data)
return -ENOMEM;
- /* Allocate Memory for the Queue->CID mapping */
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- int tx_size = sizeof(struct qed_hw_cid_data) *
- RESC_NUM(p_hwfn, QED_L2_QUEUE);
- int rx_size = sizeof(struct qed_hw_cid_data) *
- RESC_NUM(p_hwfn, QED_L2_QUEUE);
-
- p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
- if (!p_hwfn->p_tx_cids)
- goto alloc_no_mem;
-
- p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
- if (!p_hwfn->p_rx_cids)
- goto alloc_no_mem;
- }
-
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u32 n_eqes, num_cons;
@@ -533,6 +515,16 @@ int qed_resc_alloc(struct qed_dev *cdev)
p_hwfn->p_ll2_info = p_ll2_info;
}
#endif
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ p_iscsi_info = qed_iscsi_alloc(p_hwfn);
+ if (!p_iscsi_info)
+ goto alloc_no_mem;
+ p_hwfn->p_iscsi_info = p_iscsi_info;
+ p_ooo_info = qed_ooo_alloc(p_hwfn);
+ if (!p_ooo_info)
+ goto alloc_no_mem;
+ p_hwfn->p_ooo_info = p_ooo_info;
+ }
/* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn);
@@ -586,6 +578,10 @@ void qed_resc_setup(struct qed_dev *cdev)
if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
#endif
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
+ qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
+ }
}
}
@@ -1057,8 +1053,10 @@ int qed_hw_init(struct qed_dev *cdev,
bool allow_npar_tx_switch,
const u8 *bin_fw_data)
{
- u32 load_code, param;
- int rc, mfw_rc, i;
+ u32 load_code, param, drv_mb_param;
+ bool b_default_mtu = true;
+ struct qed_hwfn *p_hwfn;
+ int rc = 0, mfw_rc, i;
if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
@@ -1074,6 +1072,12 @@ int qed_hw_init(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ /* If management didn't provide a default, set one of our own */
+ if (!p_hwfn->hw_info.mtu) {
+ p_hwfn->hw_info.mtu = 1500;
+ b_default_mtu = false;
+ }
+
if (IS_VF(cdev)) {
p_hwfn->b_int_enabled = 1;
continue;
@@ -1157,6 +1161,38 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->hw_init_done = true;
}
+ if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ drv_mb_param = (FW_MAJOR_VERSION << 24) |
+ (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) |
+ (FW_ENGINEERING_VERSION);
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+ drv_mb_param, &load_code, &param);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+ if (!b_default_mtu) {
+ rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.mtu);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to update default mtu\n");
+ }
+
+ rc = qed_mcp_ov_update_driver_state(p_hwfn,
+ p_hwfn->p_main_ptt,
+ QED_OV_DRIVER_STATE_DISABLED);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+ rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+ QED_OV_ESWITCH_VEB);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
+ }
+
return 0;
}
@@ -1324,8 +1360,24 @@ int qed_hw_reset(struct qed_dev *cdev)
{
int rc = 0;
u32 unload_resp, unload_param;
+ u32 wol_param;
int i;
+ switch (cdev->wol_config) {
+ case QED_OV_WOL_DISABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
+ break;
+ case QED_OV_WOL_ENABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
+ break;
+ default:
+ DP_NOTICE(cdev,
+ "Unknown WoL configuration %02x\n", cdev->wol_config);
+ /* Fallthrough */
+ case QED_OV_WOL_DEFAULT:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+ }
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1354,8 +1406,7 @@ int qed_hw_reset(struct qed_dev *cdev)
/* Send unload command to MCP */
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ,
- DRV_MB_PARAM_UNLOAD_WOL_MCP,
+ DRV_MSG_CODE_UNLOAD_REQ, wol_param,
&unload_resp, &unload_param);
if (rc) {
DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
@@ -1421,6 +1472,7 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
+ struct qed_sb_cnt_info sb_cnt_info;
int num_features = 1;
if (IS_ENABLED(CONFIG_QED_RDMA) &&
@@ -1439,53 +1491,257 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
- DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
- feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
- num_features);
+
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[QED_VF_L2_QUE] =
+ min_t(u32,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt);
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_PROBE,
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
+ (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
+ RESC_NUM(p_hwfn, QED_SB), num_features);
}
-static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case QED_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case QED_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case QED_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case QED_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case QED_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case QED_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case QED_MAC:
+ case QED_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case QED_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case QED_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ case QED_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id)
{
- u8 enabled_func_idx = p_hwfn->enabled_func_idx;
- u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
- u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info;
- int i, max_vf_vlan_filters;
+ u32 dflt_resc_num = 0;
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ switch (res_id) {
+ case QED_SB:
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ dflt_resc_num = sb_cnt_info.sb_cnt;
+ break;
+ case QED_L2_QUEUE:
+ dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ break;
+ case QED_VPORT:
+ dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
+ break;
+ case QED_RSS_ENG:
+ dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ break;
+ case QED_PQ:
+ /* The granularity of the PQs is 8 */
+ dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
+ dflt_resc_num &= ~0x7;
+ break;
+ case QED_RL:
+ dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ break;
+ case QED_MAC:
+ case QED_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ break;
+ case QED_ILT:
+ dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ break;
+ case QED_LL2_QUEUE:
+ dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ case QED_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
+ break;
+ default:
+ break;
+ }
-#ifdef CONFIG_QED_SRIOV
- max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
-#else
- max_vf_vlan_filters = 0;
-#endif
+ return dflt_resc_num;
+}
+
+static const char *qed_hw_get_resc_name(enum qed_resources res_id)
+{
+ switch (res_id) {
+ case QED_SB:
+ return "SB";
+ case QED_L2_QUEUE:
+ return "L2_QUEUE";
+ case QED_VPORT:
+ return "VPORT";
+ case QED_RSS_ENG:
+ return "RSS_ENG";
+ case QED_PQ:
+ return "PQ";
+ case QED_RL:
+ return "RL";
+ case QED_MAC:
+ return "MAC";
+ case QED_VLAN:
+ return "VLAN";
+ case QED_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
+ case QED_ILT:
+ return "ILT";
+ case QED_LL2_QUEUE:
+ return "LL2_QUEUE";
+ case QED_CMDQS_CQS:
+ return "CMDQS_CQS";
+ case QED_RDMA_STATS_QUEUE:
+ return "RDMA_STATS_QUEUE";
+ default:
+ return "UNKNOWN_RESOURCE";
+ }
+}
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id)
+{
+ u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
+ u32 *p_resc_num, *p_resc_start;
+ struct resource_info resc_info;
+ int rc;
+
+ p_resc_num = &RESC_NUM(p_hwfn, res_id);
+ p_resc_start = &RESC_START(p_hwfn, res_id);
+
+ /* Default values assumes that each function received equal share */
+ dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
+ if (!dflt_resc_num) {
+ DP_ERR(p_hwfn,
+ "Failed to get default amount for resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return -EINVAL;
+ }
+ dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
+
+ memset(&resc_info, 0, sizeof(resc_info));
+ resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
+ if (resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
+ &mcp_resp, &mcp_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "MFW response failure for an allocation request for resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
- resc_num[QED_SB] = min_t(u32,
- (MAX_SB_PER_PATH_BB / num_funcs),
- sb_cnt_info.sb_cnt);
- resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
- resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
- resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
- resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
- resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
- resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
- resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
- num_funcs;
- resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
- resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
- resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
- resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB /
- num_funcs;
-
- for (i = 0; i < QED_MAX_RESC; i++)
- resc_start[i] = resc_num[i] * enabled_func_idx;
+ /* Default driver values are applied in the following cases:
+ * - The resource allocation MB command is not supported by the MFW
+ * - There is an internal error in the MFW while processing the request
+ * - The resource ID is unknown to the MFW
+ */
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
+ mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
+ DP_NOTICE(p_hwfn,
+ "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
+ res_id,
+ qed_hw_get_resc_name(res_id),
+ mcp_resp, dflt_resc_num, dflt_resc_start);
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
+
+ /* Special handling for status blocks; Would be revised in future */
+ if (res_id == QED_SB) {
+ resc_info.size -= 1;
+ resc_info.offset -= p_hwfn->enabled_func_idx;
+ }
+
+ *p_resc_num = resc_info.size;
+ *p_resc_start = resc_info.offset;
+
+out:
+ /* PQs have to divide by 8 [that's the HW granularity].
+ * Reduce number so it would fit.
+ */
+ if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
+ DP_INFO(p_hwfn,
+ "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
+ *p_resc_num,
+ (*p_resc_num) & ~0x7,
+ *p_resc_start, (*p_resc_start) & ~0x7);
+ *p_resc_num &= ~0x7;
+ *p_resc_start &= ~0x7;
+ }
+
+ return 0;
+}
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+ u8 res_id;
+ int rc;
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+ rc = qed_hw_set_resc_info(p_hwfn, res_id);
+ if (rc)
+ return rc;
+ }
/* Sanity for ILT */
- if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
+ if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, QED_ILT),
RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1495,34 +1751,12 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
qed_hw_set_feat(p_hwfn);
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
- "The numbers for each resource are:\n"
- "SB = %d start = %d\n"
- "L2_QUEUE = %d start = %d\n"
- "VPORT = %d start = %d\n"
- "PQ = %d start = %d\n"
- "RL = %d start = %d\n"
- "MAC = %d start = %d\n"
- "VLAN = %d start = %d\n"
- "ILT = %d start = %d\n"
- "LL2_QUEUE = %d start = %d\n",
- p_hwfn->hw_info.resc_num[QED_SB],
- p_hwfn->hw_info.resc_start[QED_SB],
- p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
- p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
- p_hwfn->hw_info.resc_num[QED_VPORT],
- p_hwfn->hw_info.resc_start[QED_VPORT],
- p_hwfn->hw_info.resc_num[QED_PQ],
- p_hwfn->hw_info.resc_start[QED_PQ],
- p_hwfn->hw_info.resc_num[QED_RL],
- p_hwfn->hw_info.resc_start[QED_RL],
- p_hwfn->hw_info.resc_num[QED_MAC],
- p_hwfn->hw_info.resc_start[QED_MAC],
- p_hwfn->hw_info.resc_num[QED_VLAN],
- p_hwfn->hw_info.resc_start[QED_VLAN],
- p_hwfn->hw_info.resc_num[QED_ILT],
- p_hwfn->hw_info.resc_start[QED_ILT],
- RESC_NUM(p_hwfn, QED_LL2_QUEUE),
- RESC_START(p_hwfn, QED_LL2_QUEUE));
+ "The numbers for each resource are:\n");
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
+ qed_hw_get_resc_name(res_id),
+ RESC_NUM(p_hwfn, res_id),
+ RESC_START(p_hwfn, res_id));
return 0;
}
@@ -1801,6 +2035,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_get_num_funcs(p_hwfn, p_ptt);
+ if (qed_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
return qed_hw_get_resc(p_hwfn);
}
@@ -1975,8 +2212,13 @@ int qed_hw_prepare(struct qed_dev *cdev,
void qed_hw_remove(struct qed_dev *cdev)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
int i;
+ if (IS_PF(cdev))
+ qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+ QED_OV_DRIVER_STATE_NOT_LOADED);
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -2037,12 +2279,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
- u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
+ u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
if (!pp_virt_addr_tbl)
return;
- if (!p_chain->pbl.p_virt_table)
+ if (!p_pbl_virt)
goto out;
for (i = 0; i < page_cnt; i++) {
@@ -2060,7 +2302,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
dma_free_coherent(&cdev->pdev->dev,
pbl_size,
- p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
+ p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table);
out:
vfree(p_chain->pbl.pp_virt_addr_tbl);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 2777d5bb4380..785ab03683eb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -8526,6 +8526,41 @@ struct mdump_config_stc {
u32 valid_logs;
};
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT (1 << 0)
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -8543,9 +8578,9 @@ union drv_union_data {
struct drv_version_stc drv_version;
struct lan_stats_stc lan_stats;
- u64 reserved_stats[11];
struct ocbb_data_stc ocbb_info;
struct temperature_status_stc temp_info;
+ struct resource_info resource;
struct bist_nvm_image_att nvm_image_att;
struct mdump_config_stc mdump_config;
};
@@ -8561,9 +8596,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INIT_PHY 0x22000000
#define DRV_MSG_CODE_LINK_RESET 0x23000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
+#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
+#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
+#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
+#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
+#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
+#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
+#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
@@ -8571,6 +8616,13 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
#define DRV_MSG_CODE_MCP_HALT 0x00100000
+#define DRV_MSG_CODE_SET_VMAC 0x00110000
+#define DRV_MSG_CODE_GET_VMAC 0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
#define DRV_MSG_CODE_GET_STATS 0x00130000
#define DRV_MSG_CODE_STATS_TYPE_LAN 1
@@ -8582,11 +8634,16 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
+#define DRV_MSG_CODE_OS_WOL 0x002e0000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
@@ -8599,13 +8656,59 @@ struct public_drv_mb {
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
+ DRV_MB_PARAM_WOL_DISABLED | \
+ DRV_MB_PARAM_WOL_ENABLED)
+#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
+#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
+#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
+
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+ /* Resource Allocation params - Driver version support */
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
#define DRV_MB_PARAM_BIST_RC_PASSED 1
@@ -8614,6 +8717,8 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
@@ -8628,15 +8733,27 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
#define FW_MSG_CODE_NVM_OK 0x00010000
#define FW_MSG_CODE_OK 0x00160000
+#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
+#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
+
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
+ /* get pf rdma protocol command responce */
+#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
+#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
+#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
+#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 2adedc6fb6cf..c68dbf7092b1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1377,7 +1377,7 @@ static const char *attn_master_to_str(u8 master)
case 9: return "DBU";
case 10: return "DMAE";
default:
- return "Unkown";
+ return "Unknown";
}
}
@@ -1555,7 +1555,7 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
DORQ_REG_DB_DROP_DETAILS);
DP_INFO(p_hwfn->cdev,
- "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+ "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS),
(u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
@@ -3030,6 +3030,31 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
}
}
+
+ /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
+ * the number of VF SBs [especially for first VF on engine, as we can't
+ * diffrentiate between empty entries and its entries].
+ * Since we don't really support more SBs than VFs today, prevent any
+ * such configuration by sanitizing the number of SBs to equal the
+ * number of VFs.
+ */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ if (total_vfs < p_igu_info->free_blks) {
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_INTR | QED_MSG_IOV),
+ "Limiting number of SBs for IOV - %04x --> %04x\n",
+ p_igu_info->free_blks,
+ p_hwfn->cdev->p_iov_info->total_vfs);
+ p_igu_info->free_blks = total_vfs;
+ } else if (total_vfs > p_igu_info->free_blks) {
+ DP_NOTICE(p_hwfn,
+ "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
+ p_igu_info->free_blks, total_vfs);
+ return -EINVAL;
+ }
+ }
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(
@@ -3163,7 +3188,12 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
- return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+ /* We want the first VF queue to be adjacent to the
+ * last PF queue. Since L2 queues can be partial to
+ * SBs, we'll use the feature instead.
+ */
+ return sb_id - p_info->igu_base_sb_iov +
+ FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
new file mode 100644
index 000000000000..17a70122df05
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -0,0 +1,1277 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_iscsi_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_reg_addr.h"
+
+struct qed_iscsi_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+
+ u8 layer_code;
+ u8 offl_flags;
+ u8 connect_mode;
+ u32 initial_ack;
+ dma_addr_t sq_pbl_addr;
+ struct qed_chain r2tq;
+ struct qed_chain xhq;
+ struct qed_chain uhq;
+
+ struct tcp_upload_params *tcp_upload_params_virt_addr;
+ dma_addr_t tcp_upload_params_phys_addr;
+ struct scsi_terminate_extra_params *queue_cnts_virt_addr;
+ dma_addr_t queue_cnts_phys_addr;
+ dma_addr_t syn_phy_addr;
+
+ u16 syn_ip_payload_length;
+ u8 local_mac[6];
+ u8 remote_mac[6];
+ u16 vlan_id;
+ u8 tcp_flags;
+ u8 ip_version;
+ u32 remote_ip[4];
+ u32 local_ip[4];
+ u8 ka_max_probe_cnt;
+ u8 dup_ack_theshold;
+ u32 rcv_next;
+ u32 snd_una;
+ u32 snd_next;
+ u32 snd_max;
+ u32 snd_wnd;
+ u32 rcv_wnd;
+ u32 snd_wl1;
+ u32 cwnd;
+ u32 ss_thresh;
+ u16 srtt;
+ u16 rtt_var;
+ u32 ts_time;
+ u32 ts_recent;
+ u32 ts_recent_age;
+ u32 total_rt;
+ u32 ka_timeout_delta;
+ u32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 initial_rcv_wnd;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 snd_wnd_scale;
+ u8 rcv_wnd_scale;
+ u32 ts_ticks_per_second;
+ u16 da_timeout_value;
+ u8 ack_frequency;
+
+ u8 update_flag;
+ u8 default_cq;
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u32 exp_stat_sn;
+ u32 stat_sn;
+ u16 physical_q0;
+ u16 physical_q1;
+ u8 abortive_dsconnect;
+};
+
+static int
+qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr,
+ void *event_context, iscsi_event_cb_t async_event_cb)
+{
+ struct iscsi_init_ramrod_params *p_ramrod = NULL;
+ struct scsi_init_func_queues *p_queue = NULL;
+ struct qed_iscsi_pf_params *p_params = NULL;
+ struct iscsi_spe_func_init *p_init = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = 0;
+ u32 dval;
+ u16 val;
+ u8 i;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_init;
+ p_init = &p_ramrod->iscsi_init_spe;
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+ p_queue = &p_init->q_params;
+
+ SET_FIELD(p_init->hdr.flags,
+ ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
+ p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
+
+ val = p_params->half_way_close_timeout;
+ p_init->half_way_close_timeout = cpu_to_le16(val);
+ p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
+ p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
+ p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->func_params.log_page_size = p_params->log_page_size;
+ val = p_params->num_tasks;
+ p_init->func_params.num_tasks = cpu_to_le16(val);
+ p_init->debug_mode.flags = p_params->debug_mode;
+
+ DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
+ p_params->glbl_q_params_addr);
+
+ val = p_params->cq_num_entries;
+ p_queue->cq_num_entries = cpu_to_le16(val);
+ val = p_params->cmdq_num_entries;
+ p_queue->cmdq_num_entries = cpu_to_le16(val);
+ p_queue->num_queues = p_params->num_queues;
+ dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
+ p_queue->queue_relative_offset = (u8)dval;
+ p_queue->cq_sb_pi = p_params->gl_rq_pi;
+ p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
+
+ for (i = 0; i < p_params->num_queues; i++) {
+ val = p_hwfn->sbs_info[i]->igu_sb_id;
+ p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
+ }
+
+ p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id);
+
+ DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
+ p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
+ p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] =
+ p_params->bdq_pbl_num_entries[BDQ_ID_RQ];
+ val = p_params->bdq_xoff_threshold[BDQ_ID_RQ];
+ p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
+ val = p_params->bdq_xon_threshold[BDQ_ID_RQ];
+ p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
+
+ DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA],
+ p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
+ p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
+ p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
+ val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
+ p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
+ val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
+ p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
+ val = p_params->rq_buffer_size;
+ p_queue->rq_buffer_size = cpu_to_le16(val);
+ if (p_params->is_target) {
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
+ } else {
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ }
+ p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
+ val = p_params->tx_sws_timer;
+ p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
+ p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+
+ p_hwfn->p_iscsi_info->event_context = event_context;
+ p_hwfn->p_iscsi_info->event_cb = async_event_cb;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_conn_offload *p_ramrod = NULL;
+ struct tcp_offload_params_opt2 *p_tcp2 = NULL;
+ struct tcp_offload_params *p_tcp = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params pq_params;
+ u16 pq0_id = 0, pq1_id = 0;
+ dma_addr_t r2tq_pbl_addr;
+ dma_addr_t xhq_pbl_addr;
+ dma_addr_t uhq_pbl_addr;
+ int rc = 0;
+ u32 dval;
+ u16 wval;
+ u8 i;
+ u16 *p;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
+
+ /* Transmission PQ is the first of the PF */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
+ p_conn->physical_q0 = cpu_to_le16(pq0_id);
+ p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id);
+
+ /* iSCSI Pure-ACK PQ */
+ pq_params.iscsi.q_idx = 1;
+ pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
+ p_conn->physical_q1 = cpu_to_le16(pq1_id);
+ p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id);
+
+ p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
+ SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
+ p_conn->layer_code);
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+
+ DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
+
+ r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
+ DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr);
+
+ xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
+ DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr);
+
+ uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
+ DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr);
+
+ p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack);
+ p_ramrod->iscsi.flags = p_conn->offl_flags;
+ p_ramrod->iscsi.default_cq = p_conn->default_cq;
+ p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn);
+
+ if (!GET_FIELD(p_ramrod->iscsi.flags,
+ ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) {
+ p_tcp = &p_ramrod->tcp;
+
+ p = (u16 *)p_conn->local_mac;
+ p_tcp->local_mac_addr_hi = swab16(get_unaligned(p));
+ p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1));
+ p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+ p = (u16 *)p_conn->remote_mac;
+ p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p));
+ p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
+ p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+ p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
+
+ p_tcp->flags = p_conn->tcp_flags;
+ p_tcp->ip_version = p_conn->ip_version;
+ for (i = 0; i < 4; i++) {
+ dval = p_conn->remote_ip[i];
+ p_tcp->remote_ip[i] = cpu_to_le32(dval);
+ dval = p_conn->local_ip[i];
+ p_tcp->local_ip[i] = cpu_to_le32(dval);
+ }
+ p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
+ p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold;
+
+ p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next);
+ p_tcp->snd_una = cpu_to_le32(p_conn->snd_una);
+ p_tcp->snd_next = cpu_to_le32(p_conn->snd_next);
+ p_tcp->snd_max = cpu_to_le32(p_conn->snd_max);
+ p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd);
+ p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd);
+ p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1);
+ p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh);
+ p_tcp->srtt = cpu_to_le16(p_conn->srtt);
+ p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var);
+ p_tcp->ts_time = cpu_to_le32(p_conn->ts_time);
+ p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent);
+ p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age);
+ p_tcp->total_rt = cpu_to_le32(p_conn->total_rt);
+ dval = p_conn->ka_timeout_delta;
+ p_tcp->ka_timeout_delta = cpu_to_le32(dval);
+ dval = p_conn->rt_timeout_delta;
+ p_tcp->rt_timeout_delta = cpu_to_le32(dval);
+ p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt;
+ p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt;
+ p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt;
+ p_tcp->rt_cnt = p_conn->rt_cnt;
+ p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
+ p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ dval = p_conn->initial_rcv_wnd;
+ p_tcp->initial_rcv_wnd = cpu_to_le32(dval);
+ p_tcp->ttl = p_conn->ttl;
+ p_tcp->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp->mss = cpu_to_le16(p_conn->mss);
+ p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale;
+ p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ dval = p_conn->ts_ticks_per_second;
+ p_tcp->ts_ticks_per_second = cpu_to_le32(dval);
+ wval = p_conn->da_timeout_value;
+ p_tcp->da_timeout_value = cpu_to_le16(wval);
+ p_tcp->ack_frequency = p_conn->ack_frequency;
+ p_tcp->connect_mode = p_conn->connect_mode;
+ } else {
+ p_tcp2 =
+ &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp;
+
+ p = (u16 *)p_conn->local_mac;
+ p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p));
+ p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1));
+ p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+ p = (u16 *)p_conn->remote_mac;
+ p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p));
+ p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
+ p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+ p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
+ p_tcp2->flags = p_conn->tcp_flags;
+
+ p_tcp2->ip_version = p_conn->ip_version;
+ for (i = 0; i < 4; i++) {
+ dval = p_conn->remote_ip[i];
+ p_tcp2->remote_ip[i] = cpu_to_le32(dval);
+ dval = p_conn->local_ip[i];
+ p_tcp2->local_ip[i] = cpu_to_le32(dval);
+ }
+
+ p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp2->ttl = p_conn->ttl;
+ p_tcp2->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp2->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp2->mss = cpu_to_le16(p_conn->mss);
+ p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ p_tcp2->connect_mode = p_conn->connect_mode;
+ wval = p_conn->syn_ip_payload_length;
+ p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
+ p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
+ p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_conn_update_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u32 dval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_update;
+ p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
+ SET_FIELD(p_ramrod->hdr.flags,
+ ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+ p_ramrod->flags = p_conn->update_flag;
+ p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
+ dval = p_conn->max_recv_pdu_length;
+ p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->max_send_pdu_length;
+ p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->first_seq_length;
+ p_ramrod->first_seq_length = cpu_to_le32(dval);
+ p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_conn_termination *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_TERMINATION_CONN,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
+ p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
+ SET_FIELD(p_ramrod->hdr.flags,
+ ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+ p_ramrod->abortive = p_conn->abortive_dsconnect;
+
+ DMA_REGPAIR_LE(p_ramrod->query_params_addr,
+ p_conn->tcp_upload_params_phys_addr);
+ DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_slow_path_hdr *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_CLEAR_SQ,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_empty;
+ p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
+ SET_FIELD(p_ramrod->flags,
+ ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_func_dstry *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_destroy;
+ p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
+
+ return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
+ bdq_id);
+}
+
+static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
+
+ return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
+ bdq_id);
+}
+
+static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
+{
+ if (!p_conn->queue_cnts_virt_addr)
+ goto nomem;
+ memset(p_conn->queue_cnts_virt_addr, 0,
+ sizeof(*p_conn->queue_cnts_virt_addr));
+
+ if (!p_conn->tcp_upload_params_virt_addr)
+ goto nomem;
+ memset(p_conn->tcp_upload_params_virt_addr, 0,
+ sizeof(*p_conn->tcp_upload_params_virt_addr));
+
+ if (!p_conn->r2tq.p_virt_addr)
+ goto nomem;
+ qed_chain_pbl_zero_mem(&p_conn->r2tq);
+
+ if (!p_conn->uhq.p_virt_addr)
+ goto nomem;
+ qed_chain_pbl_zero_mem(&p_conn->uhq);
+
+ if (!p_conn->xhq.p_virt_addr)
+ goto nomem;
+ qed_chain_pbl_zero_mem(&p_conn->xhq);
+
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+
+static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn **p_out_conn)
+{
+ u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0;
+ struct scsi_terminate_extra_params *p_q_cnts = NULL;
+ struct qed_iscsi_pf_params *p_params = NULL;
+ struct tcp_upload_params *p_tcp = NULL;
+ struct qed_iscsi_conn *p_conn = NULL;
+ int rc = 0;
+
+ /* Try finding a free connection that can be used */
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ if (!list_empty(&p_hwfn->p_iscsi_info->free_list))
+ p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+ struct qed_iscsi_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+ *p_out_conn = p_conn;
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+
+ /* Need to allocate a new connection */
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_q_cnts),
+ &p_conn->queue_cnts_phys_addr,
+ GFP_KERNEL);
+ if (!p_q_cnts)
+ goto nomem_queue_cnts_param;
+ p_conn->queue_cnts_virt_addr = p_q_cnts;
+
+ p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_tcp),
+ &p_conn->tcp_upload_params_phys_addr,
+ GFP_KERNEL);
+ if (!p_tcp)
+ goto nomem_upload_param;
+ p_conn->tcp_upload_params_virt_addr = p_tcp;
+
+ r2tq_num_elements = p_params->num_r2tq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / 0x80;
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ r2tq_num_elements, 0x80, &p_conn->r2tq);
+ if (rc)
+ goto nomem_r2tq;
+
+ uhq_num_elements = p_params->num_uhq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ uhq_num_elements,
+ sizeof(struct iscsi_uhqe), &p_conn->uhq);
+ if (rc)
+ goto nomem_uhq;
+
+ xhq_num_elements = uhq_num_elements;
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ xhq_num_elements,
+ sizeof(struct iscsi_xhqe), &p_conn->xhq);
+ if (rc)
+ goto nomem;
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+ return 0;
+
+nomem:
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+nomem_uhq:
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+nomem_r2tq:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct tcp_upload_params),
+ p_conn->tcp_upload_params_virt_addr,
+ p_conn->tcp_upload_params_phys_addr);
+nomem_upload_param:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct scsi_terminate_extra_params),
+ p_conn->queue_cnts_virt_addr,
+ p_conn->queue_cnts_phys_addr);
+nomem_queue_cnts_param:
+ kfree(p_conn);
+
+ return -ENOMEM;
+}
+
+static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_in_conn,
+ struct qed_iscsi_conn **p_out_conn)
+{
+ struct qed_iscsi_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+ if (rc)
+ return rc;
+
+ /* Use input connection or allocate a new one */
+ if (p_in_conn)
+ p_conn = p_in_conn;
+ else
+ rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
+
+ if (!rc)
+ rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+ return rc;
+ }
+
+ p_conn->icid = icid;
+ p_conn->conn_id = (u16)icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+}
+
+struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iscsi_info *p_iscsi_info;
+
+ p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
+ if (!p_iscsi_info)
+ return NULL;
+
+ INIT_LIST_HEAD(&p_iscsi_info->free_list);
+ return p_iscsi_info;
+}
+
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_info *p_iscsi_info)
+{
+ spin_lock_init(&p_iscsi_info->lock);
+}
+
+void qed_iscsi_free(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_info *p_iscsi_info)
+{
+ kfree(p_iscsi_info);
+}
+
+static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct tstorm_iscsi_stats_drv tstats;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->iscsi_rx_bytes_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
+ p_stats->iscsi_rx_packet_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
+ p_stats->iscsi_cmdq_threshold_cnt =
+ le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
+ p_stats->iscsi_rq_threshold_cnt =
+ le32_to_cpu(tstats.iscsi_rq_threshold_cnt);
+ p_stats->iscsi_immq_threshold_cnt =
+ le32_to_cpu(tstats.iscsi_immq_threshold_cnt);
+}
+
+static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct mstorm_iscsi_stats_drv mstats;
+ u32 mstats_addr;
+
+ memset(&mstats, 0, sizeof(mstats));
+ mstats_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats));
+
+ p_stats->iscsi_rx_dropped_pdus_task_not_valid =
+ HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid);
+}
+
+static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct ustorm_iscsi_stats_drv ustats;
+ u32 ustats_addr;
+
+ memset(&ustats, 0, sizeof(ustats));
+ ustats_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+ p_stats->iscsi_rx_data_pdu_cnt =
+ HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt);
+ p_stats->iscsi_rx_r2t_pdu_cnt =
+ HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt);
+ p_stats->iscsi_rx_total_pdu_cnt =
+ HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt);
+}
+
+static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct xstorm_iscsi_stats_drv xstats;
+ u32 xstats_addr;
+
+ memset(&xstats, 0, sizeof(xstats));
+ xstats_addr = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats));
+
+ p_stats->iscsi_tx_go_to_slow_start_event_cnt =
+ HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt);
+ p_stats->iscsi_tx_fast_retransmit_event_cnt =
+ HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt);
+}
+
+static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct ystorm_iscsi_stats_drv ystats;
+ u32 ystats_addr;
+
+ memset(&ystats, 0, sizeof(ystats));
+ ystats_addr = BAR0_MAP_REG_YSDM_RAM +
+ YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats));
+
+ p_stats->iscsi_tx_data_pdu_cnt =
+ HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt);
+ p_stats->iscsi_tx_r2t_pdu_cnt =
+ HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt);
+ p_stats->iscsi_tx_total_pdu_cnt =
+ HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt);
+}
+
+static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct pstorm_iscsi_stats_drv pstats;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->iscsi_tx_bytes_cnt =
+ HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt);
+ p_stats->iscsi_tx_packet_cnt =
+ HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt);
+}
+
+static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_stats *stats)
+{
+ struct qed_ptt *p_ptt;
+
+ memset(stats, 0, sizeof(*stats));
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EAGAIN;
+ }
+
+ _qed_iscsi_get_tstats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_mstats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_ustats(p_hwfn, p_ptt, stats);
+
+ _qed_iscsi_get_xstats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_ystats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_pstats(p_hwfn, p_ptt, stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+struct qed_hash_iscsi_con {
+ struct hlist_node node;
+ struct qed_iscsi_conn *con;
+};
+
+static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
+ struct qed_dev_iscsi_info *info)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+
+ info->primary_dbq_rq_addr =
+ qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->secondary_bdq_rq_addr =
+ qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+
+ return rc;
+}
+
+static void qed_register_iscsi_ops(struct qed_dev *cdev,
+ struct qed_iscsi_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.iscsi = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_iscsi_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || (hash_con->con->icid != handle))
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_iscsi_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "iscsi already stopped\n");
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop iscsi - not all connections were returned\n");
+ return -EINVAL;
+ }
+
+ /* Stop the iscsi */
+ rc = qed_sp_iscsi_func_stop(QED_LEADING_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+ return rc;
+}
+
+static int qed_iscsi_start(struct qed_dev *cdev,
+ struct qed_iscsi_tid *tasks,
+ void *event_context,
+ iscsi_event_cb_t async_event_cb)
+{
+ int rc;
+ struct qed_tid_mem *tid_info;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "iscsi already started;\n");
+ return 0;
+ }
+
+ rc = qed_sp_iscsi_func_start(QED_LEADING_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL, event_context,
+ async_event_cb);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start iscsi\n");
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (!tasks)
+ return 0;
+
+ tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
+
+ if (!tid_info) {
+ qed_iscsi_stop(cdev);
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev),
+ tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_iscsi_stop(cdev);
+ kfree(tid_info);
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_ISCSI * sizeof(u8 *));
+
+ kfree(tid_info);
+
+ return 0;
+}
+
+static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_iscsi_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
+ if (!hash_con)
+ return -ENOMEM;
+
+ /* Acquire the connection */
+ rc = qed_iscsi_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+
+ if (p_doorbell)
+ *p_doorbell = qed_iscsi_get_db_addr(QED_LEADING_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_iscsi_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_iscsi_offload_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_offload *conn_info)
+{
+ struct qed_hash_iscsi_con *hash_con;
+ struct qed_iscsi_conn *con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ ether_addr_copy(con->local_mac, conn_info->src.mac);
+ ether_addr_copy(con->remote_mac, conn_info->dst.mac);
+ memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
+ memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
+ con->local_port = conn_info->src.port;
+ con->remote_port = conn_info->dst.port;
+
+ con->layer_code = conn_info->layer_code;
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->initial_ack = conn_info->initial_ack;
+ con->vlan_id = conn_info->vlan_id;
+ con->tcp_flags = conn_info->tcp_flags;
+ con->ip_version = conn_info->ip_version;
+ con->default_cq = conn_info->default_cq;
+ con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
+ con->dup_ack_theshold = conn_info->dup_ack_theshold;
+ con->rcv_next = conn_info->rcv_next;
+ con->snd_una = conn_info->snd_una;
+ con->snd_next = conn_info->snd_next;
+ con->snd_max = conn_info->snd_max;
+ con->snd_wnd = conn_info->snd_wnd;
+ con->rcv_wnd = conn_info->rcv_wnd;
+ con->snd_wl1 = conn_info->snd_wl1;
+ con->cwnd = conn_info->cwnd;
+ con->ss_thresh = conn_info->ss_thresh;
+ con->srtt = conn_info->srtt;
+ con->rtt_var = conn_info->rtt_var;
+ con->ts_time = conn_info->ts_time;
+ con->ts_recent = conn_info->ts_recent;
+ con->ts_recent_age = conn_info->ts_recent_age;
+ con->total_rt = conn_info->total_rt;
+ con->ka_timeout_delta = conn_info->ka_timeout_delta;
+ con->rt_timeout_delta = conn_info->rt_timeout_delta;
+ con->dup_ack_cnt = conn_info->dup_ack_cnt;
+ con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt;
+ con->ka_probe_cnt = conn_info->ka_probe_cnt;
+ con->rt_cnt = conn_info->rt_cnt;
+ con->flow_label = conn_info->flow_label;
+ con->ka_timeout = conn_info->ka_timeout;
+ con->ka_interval = conn_info->ka_interval;
+ con->max_rt_time = conn_info->max_rt_time;
+ con->initial_rcv_wnd = conn_info->initial_rcv_wnd;
+ con->ttl = conn_info->ttl;
+ con->tos_or_tc = conn_info->tos_or_tc;
+ con->remote_port = conn_info->remote_port;
+ con->local_port = conn_info->local_port;
+ con->mss = conn_info->mss;
+ con->snd_wnd_scale = conn_info->snd_wnd_scale;
+ con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
+ con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
+ con->da_timeout_value = conn_info->da_timeout_value;
+ con->ack_frequency = conn_info->ack_frequency;
+
+ /* Set default values on other connection fields */
+ con->offl_flags = 0x1;
+
+ return qed_sp_iscsi_conn_offload(QED_LEADING_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_update_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_update *conn_info)
+{
+ struct qed_hash_iscsi_con *hash_con;
+ struct qed_iscsi_conn *con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ con->update_flag = conn_info->update_flag;
+ con->max_seq_size = conn_info->max_seq_size;
+ con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
+ con->max_send_pdu_length = conn_info->max_send_pdu_length;
+ con->first_seq_length = conn_info->first_seq_length;
+ con->exp_stat_sn = conn_info->exp_stat_sn;
+
+ return qed_sp_iscsi_conn_update(QED_LEADING_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ return qed_sp_iscsi_conn_clear_sq(QED_LEADING_HWFN(cdev),
+ hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
+ u32 handle, u8 abrt_conn)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ hash_con->con->abortive_dsconnect = abrt_conn;
+
+ return qed_sp_iscsi_conn_terminate(QED_LEADING_HWFN(cdev),
+ hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
+{
+ return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
+}
+
+static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_iscsi_dev_info,
+ .register_ops = &qed_register_iscsi_ops,
+ .start = &qed_iscsi_start,
+ .stop = &qed_iscsi_stop,
+ .acquire_conn = &qed_iscsi_acquire_conn,
+ .release_conn = &qed_iscsi_release_conn,
+ .offload_conn = &qed_iscsi_offload_conn,
+ .update_conn = &qed_iscsi_update_conn,
+ .destroy_conn = &qed_iscsi_destroy_conn,
+ .clear_sq = &qed_iscsi_clear_conn_sq,
+ .get_stats = &qed_iscsi_stats,
+};
+
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
+{
+ return &qed_iscsi_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_iscsi_ops);
+
+void qed_put_iscsi_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_iscsi_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
new file mode 100644
index 000000000000..67c25f3db4d5
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -0,0 +1,52 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ISCSI_H
+#define _QED_ISCSI_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+struct qed_iscsi_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+ u16 max_num_outstanding_tasks;
+ void *event_context;
+ iscsi_event_cb_t event_cb;
+};
+
+#ifdef CONFIG_QED_LL2
+extern const struct qed_ll2_ops qed_ll2_ops_pass;
+#endif
+
+#if IS_ENABLED(CONFIG_QED_ISCSI)
+struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_info *p_iscsi_info);
+
+void qed_iscsi_free(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_info *p_iscsi_info);
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static inline struct qed_iscsi_info *qed_iscsi_alloc(
+ struct qed_hwfn *p_hwfn) { return NULL; }
+static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_info *p_iscsi_info) {}
+static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_info *p_iscsi_info) {}
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index ddd410a91e13..6a3727c4c0c6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -23,6 +23,7 @@
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/bug.h>
+#include <linux/vmalloc.h>
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
@@ -41,6 +42,124 @@
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
+void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+ if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
+ qed_cxt_release_cid(p_hwfn, p_cid->cid);
+ vfree(p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+struct qed_queue_cid *
+_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ u8 vf_qid,
+ struct qed_queue_start_common_params *p_params)
+{
+ bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
+ struct qed_queue_cid *p_cid;
+ int rc;
+
+ p_cid = vmalloc(sizeof(*p_cid));
+ if (!p_cid)
+ return NULL;
+ memset(p_cid, 0, sizeof(*p_cid));
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->vf_qid = vf_qid;
+ p_cid->rel = *p_params;
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->cdev)) {
+ p_cid->abs = p_cid->rel;
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * This would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc)
+ goto fail;
+
+ rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
+ if (rc)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (b_is_same) {
+ rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+ /* SBs relevant information was already provided as absolute */
+ p_cid->abs.sb = p_cid->rel.sb;
+ p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+ /* This is tricky - we're actually interested in whehter this is a PF
+ * entry meant for the VF.
+ */
+ if (!b_is_same)
+ p_cid->is_vf = true;
+out:
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid,
+ p_cid->cid,
+ p_cid->rel.vport_id,
+ p_cid->abs.vport_id,
+ p_cid->rel.queue_id,
+ p_cid->abs.queue_id,
+ p_cid->rel.stats_id,
+ p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
+
+ return p_cid;
+
+fail:
+ vfree(p_cid);
+ return NULL;
+}
+
+static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid, struct
+ qed_queue_start_common_params
+ *p_params)
+{
+ struct qed_queue_cid *p_cid;
+ u32 cid = 0;
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->cdev)) {
+ if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return NULL;
+ }
+ }
+
+ p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
+ if (!p_cid && IS_PF(p_hwfn->cdev))
+ qed_cxt_release_cid(p_hwfn, cid);
+
+ return p_cid;
+}
+
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
@@ -496,61 +615,26 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
return 0;
}
-static int qed_sp_release_queue_cid(
- struct qed_hwfn *p_hwfn,
- struct qed_hw_cid_data *p_cid_data)
-{
- if (!p_cid_data->b_cid_allocated)
- return 0;
-
- qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
-
- p_cid_data->b_cid_allocated = false;
-
- return 0;
-}
-
-int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *p_params,
- u8 stats_id,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod)
+int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- struct qed_hw_cid_data *p_rx_cid;
- u16 abs_rx_q_id = 0;
- u8 abs_vport_id = 0;
int rc = -EINVAL;
- /* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = p_params->vport_id;
-
- rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc)
- return rc;
-
- rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
- if (rc)
- return rc;
-
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid,
- cid, p_params->queue_id, p_params->vport_id, p_params->sb);
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -561,11 +645,11 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = cpu_to_le16(p_params->sb);
- p_ramrod->sb_index = p_params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = stats_id;
- p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
@@ -575,85 +659,85 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (p_params->vf_qid || b_use_zone_a_prod) {
- p_ramrod->vf_rx_prod_index = p_params->vf_qid;
+ if (p_cid->is_vf) {
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- b_use_zone_a_prod ? " [legacy]" : "",
- p_params->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
}
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int
-qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- struct qed_queue_start_common_params *p_params,
+qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, void __iomem **pp_prod)
{
- struct qed_hw_cid_data *p_rx_cid;
u32 init_prod_val = 0;
- u16 abs_l2_queue = 0;
- u8 abs_stats_id = 0;
- int rc;
- if (IS_VF(p_hwfn->cdev)) {
- return qed_vf_pf_rxq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr, cqe_pbl_size, pp_prod);
- }
-
- rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
- if (rc)
- return rc;
-
- rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
- if (rc)
- return rc;
-
- *pp_prod = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+ *pp_prod = p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
+ return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+static int
+qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct qed_rxq_start_ret_params *p_ret_params)
+{
+ struct qed_queue_cid *p_cid;
+ int rc;
+
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
- return rc;
- }
- p_rx_cid->b_cid_allocated = true;
+ p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ if (!p_cid)
+ return -ENOMEM;
- rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
- opaque_fid,
- p_rx_cid->cid,
- p_params,
- abs_stats_id,
+ if (IS_PF(p_hwfn->cdev)) {
+ rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ } else {
+ rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
bd_max_bytes,
bd_chain_phys_addr,
- cqe_pbl_addr, cqe_pbl_size, false);
+ cqe_pbl_addr,
+ cqe_pbl_size, &p_ret_params->p_prod);
+ }
+ /* Provide the caller with a reference to as handler */
if (rc)
- qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handles,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -663,8 +747,7 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
struct rx_queue_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- struct qed_hw_cid_data *p_rx_cid;
- u16 qid, abs_rx_q_id = 0;
+ struct qed_queue_cid *p_cid;
int rc = -EINVAL;
u8 i;
@@ -673,12 +756,11 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
init_data.p_comp_data = p_comp_data;
for (i = 0; i < num_rxqs; i++) {
- qid = rx_queue_id + i;
- p_rx_cid = &p_hwfn->p_rx_cids[qid];
+ p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
/* Get SPQ entry */
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
@@ -687,10 +769,9 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = complete_cqe_flg;
p_ramrod->complete_event_flg = complete_event_flg;
@@ -702,24 +783,19 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only, bool cqe_completion)
+static int
+qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ bool b_eq_completion_only, bool b_cqe_completion)
{
- struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- u16 abs_rx_q_id = 0;
- int rc = -EINVAL;
-
- if (IS_VF(p_hwfn->cdev))
- return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+ int rc;
- /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -729,62 +805,53 @@ int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_stop;
-
- qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
- p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
- p_ramrod->complete_cqe_flg =
- (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
- !eq_completion_only) || cqe_completion;
- p_ramrod->complete_event_flg =
- !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
- eq_completion_only;
+ p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
+ !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc)
- return rc;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only, bool cqe_completion)
+{
+ struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
+ int rc = -EINVAL;
+
+ if (IS_PF(p_hwfn->cdev))
+ rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
- return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+ if (!rc)
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
}
-int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *p_params,
- u8 stats_id,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union qed_qm_pq_params *p_pq_params)
+int
+qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- struct qed_hw_cid_data *p_tx_cid;
- u16 pq_id, abs_tx_q_id = 0;
int rc = -EINVAL;
- u8 abs_vport_id;
-
- /* Store information for the stop */
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- p_tx_cid->cid = cid;
- p_tx_cid->opaque_fid = opaque_fid;
-
- rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc)
- return rc;
-
- rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
- if (rc)
- return rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -794,96 +861,92 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = cpu_to_le16(p_params->sb);
- p_ramrod->sb_index = p_params->sb_idx;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
- p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
+ p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int
-qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- struct qed_queue_start_common_params *p_params,
+qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u8 tc,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
- struct qed_hw_cid_data *p_tx_cid;
union qed_qm_pq_params pq_params;
- u8 abs_stats_id = 0;
int rc;
- if (IS_VF(p_hwfn->cdev)) {
- return qed_vf_pf_txq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- p_params->sb_idx,
- pbl_addr, pbl_size, pp_doorbell);
- }
+ memset(&pq_params, 0, sizeof(pq_params));
- rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
+ &pq_params));
if (rc)
return rc;
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- memset(p_tx_cid, 0, sizeof(*p_tx_cid));
- memset(&pq_params, 0, sizeof(pq_params));
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = p_hwfn->doorbells +
+ qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
- /* Allocate a CID for the queue */
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
- return rc;
- }
- p_tx_cid->b_cid_allocated = true;
+ return 0;
+}
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, p_tx_cid->cid,
- p_params->queue_id, p_params->vport_id, p_params->sb);
-
- rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
- opaque_fid,
- p_tx_cid->cid,
- p_params,
- abs_stats_id,
- pbl_addr,
- pbl_size,
- &pq_params);
-
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
- qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+static int
+qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct qed_txq_start_ret_params *p_ret_params)
+{
+ struct qed_queue_cid *p_cid;
+ int rc;
+
+ p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ if (!p_cid)
+ return -EINVAL;
+
+ if (IS_PF(p_hwfn->cdev))
+ rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
if (rc)
- qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
-int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
+static int
+qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
{
- struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
-
- if (IS_VF(p_hwfn->cdev))
- return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+ int rc;
- /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
- init_data.cid = p_tx_cid->cid;
- init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -892,11 +955,22 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
if (rc)
return rc;
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc)
- return rc;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
+{
+ struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
+ int rc;
+
+ if (IS_PF(p_hwfn->cdev))
+ rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
- return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ if (!rc)
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
}
static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
@@ -1652,6 +1726,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0;
+ int max_vf_mac_filters = 0;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i)
@@ -1665,11 +1740,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_queues = cdev->num_hwfns;
}
- if (IS_QED_SRIOV(cdev))
+ if (IS_QED_SRIOV(cdev)) {
max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
QED_ETH_VF_NUM_VLAN_FILTERS;
- info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
+ max_vf_mac_filters = cdev->p_iov_info->total_vfs *
+ QED_ETH_VF_NUM_MAC_FILTERS;
+ }
+ info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
+ QED_VLAN) -
max_vf_vlan_filters;
+ info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
+ QED_MAC) -
+ max_vf_mac_filters;
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -1683,7 +1765,9 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
}
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
- &info->num_vlan_filters);
+ (u8 *)&info->num_vlan_filters);
+ qed_vf_get_num_mac_filters(&cdev->hwfns[0],
+ (u8 *)&info->num_mac_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
@@ -1870,58 +1954,53 @@ static int qed_update_vport(struct qed_dev *cdev,
}
static int qed_start_rxq(struct qed_dev *cdev,
- struct qed_queue_start_common_params *params,
+ u8 rss_num,
+ struct qed_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
- void __iomem **pp_prod)
+ struct qed_rxq_start_ret_params *ret_params)
{
struct qed_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = params->rss_id % cdev->num_hwfns;
+ hwfn_index = rss_num % cdev->num_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
- /* Fix queue ID in 100g mode */
- params->queue_id /= cdev->num_hwfns;
-
- rc = qed_sp_eth_rx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- params,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size,
- pp_prod);
+ p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+ rc = qed_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size, ret_params);
if (rc) {
- DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+ DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
return rc;
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
- params->queue_id, params->rss_id, params->vport_id,
- params->sb);
+ "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->sb);
return 0;
}
-static int qed_stop_rxq(struct qed_dev *cdev,
- struct qed_stop_rxq_params *params)
+static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
{
int rc, hwfn_index;
struct qed_hwfn *p_hwfn;
- hwfn_index = params->rss_id % cdev->num_hwfns;
- p_hwfn = &cdev->hwfns[hwfn_index];
+ hwfn_index = rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
- rc = qed_sp_eth_rx_queue_stop(p_hwfn,
- params->rx_queue_id / cdev->num_hwfns,
- params->eq_completion_only, false);
+ rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
if (rc) {
- DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
return rc;
}
@@ -1929,26 +2008,24 @@ static int qed_stop_rxq(struct qed_dev *cdev,
}
static int qed_start_txq(struct qed_dev *cdev,
+ u8 rss_num,
struct qed_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
u16 pbl_size,
- void __iomem **pp_doorbell)
+ struct qed_txq_start_ret_params *ret_params)
{
struct qed_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = p_params->rss_id % cdev->num_hwfns;
- p_hwfn = &cdev->hwfns[hwfn_index];
-
- /* Fix queue ID in 100g mode */
- p_params->queue_id /= cdev->num_hwfns;
+ hwfn_index = rss_num % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
- rc = qed_sp_eth_tx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- pbl_addr,
- pbl_size,
- pp_doorbell);
+ rc = qed_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params, 0,
+ pbl_addr, pbl_size, ret_params);
if (rc) {
DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
@@ -1956,8 +2033,8 @@ static int qed_start_txq(struct qed_dev *cdev,
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
- p_params->queue_id, p_params->rss_id, p_params->vport_id,
+ "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
p_params->sb);
return 0;
@@ -1971,19 +2048,17 @@ static int qed_fastpath_stop(struct qed_dev *cdev)
return 0;
}
-static int qed_stop_txq(struct qed_dev *cdev,
- struct qed_stop_txq_params *params)
+static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
{
struct qed_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = params->rss_id % cdev->num_hwfns;
- p_hwfn = &cdev->hwfns[hwfn_index];
+ hwfn_index = rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
- rc = qed_sp_eth_tx_queue_stop(p_hwfn,
- params->tx_queue_id / cdev->num_hwfns);
+ rc = qed_eth_tx_queue_stop(p_hwfn, handle);
if (rc) {
- DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index e495d62fcc03..48c9bfc28140 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -78,11 +78,34 @@ struct qed_filter_mcast {
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};
-int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only, bool cqe_completion);
+/**
+ * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
+ *
+ * @param p_hwfn
+ * @param p_rxq Handler of queue to close
+ * @param eq_completion_only If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @param cqe_completion If True completion will be
+ * receive on CQe.
+ * @return int
+ */
+int
+qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only, bool cqe_completion);
-int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+/**
+ * @brief qed_eth_tx_queue_stop - closes a Tx queue
+ *
+ * @param p_hwfn
+ * @param p_txq - handle to Tx queue needed to be closed
+ *
+ * @return int
+ */
+int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
enum qed_tpa_mode {
QED_TPA_MODE_NONE,
@@ -196,19 +219,19 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
* @note At the moment - only used by non-linux VFs.
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
- * @param num_rxqs Allow to update multiple rx
- * queues, from rx_queue_id to
- * (rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers An array of queue handlers to be updated.
+ * @param num_rxqs number of queues to update.
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
*
* @return int
*/
int
qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handlers,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -217,27 +240,79 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
-int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+struct qed_queue_cid {
+ /* 'Relative' is a relative term ;-). Usually the indices [not counting
+ * SBs] would be PF-relative, but there are some cases where that isn't
+ * the case - specifically for a PF configuring its VF indices it's
+ * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+ */
+ struct qed_queue_start_common_params rel;
+ struct qed_queue_start_common_params abs;
+ u32 cid;
+ u16 opaque_fid;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ bool is_vf;
+ u8 vf_qid;
+
+ /* Legacy VFs might have Rx producer located elsewhere */
+ bool b_legacy_vf;
+};
-int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *params,
- u8 stats_id,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod);
-
-int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *p_params,
- u8 stats_id,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union qed_qm_pq_params *p_pq_params);
+void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid);
+
+struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ u8 vf_qid,
+ struct qed_queue_start_common_params
+ *p_params);
+
+int
+qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+/**
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param bd_max_bytes
+ * @param bd_chain_phys_addr
+ * @param cqe_pbl_addr
+ * @param cqe_pbl_size
+ *
+ * @return int
+ */
+int
+qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+/**
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param pbl_addr
+ * @param pbl_size
+ * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ *
+ * @return int
+ */
+int
+qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
u8 qed_mcast_bin_from_mac(u8 *mac);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 62ae55bd81b8..8e5cb7605b0f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -36,6 +36,7 @@
#include "qed_int.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
+#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_roce.h"
@@ -296,25 +297,34 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
- p_tx->cur_completing_packet = *p_pkt;
- p_tx->cur_completing_bd_idx = 1;
- b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
- tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->gsi_enable)
- qed_ll2b_release_tx_gsi_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag,
- b_last_packet);
- else
- qed_ll2b_complete_tx_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag,
- b_last_packet);
+ if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ struct qed_ooo_buffer *p_buffer;
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer);
+ } else {
+ p_tx->cur_completing_packet = *p_pkt;
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag =
+ p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_release_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->
+ my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+ }
}
}
@@ -540,12 +550,457 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
- rx_buf_addr = p_pkt->rx_buf_addr;
- cookie = p_pkt->cookie;
+ if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ struct qed_ooo_buffer *p_buffer;
+
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer);
+ } else {
+ rx_buf_addr = p_pkt->rx_buf_addr;
+ cookie = p_pkt->cookie;
+
+ b_last = list_empty(&p_rx->active_descq);
+ }
+ }
+}
+
+#if IS_ENABLED(CONFIG_QED_ISCSI)
+static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+{
+ u8 bd_flags = 0;
+
+ if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
+ SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+
+ return bd_flags;
+}
+
+static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ u16 packet_length = 0, parse_flags = 0, vlan = 0;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ u32 num_ooo_add_to_peninsula = 0, cid;
+ union core_rx_cqe_union *cqe = NULL;
+ u16 cq_new_idx = 0, cq_old_idx = 0;
+ struct qed_ooo_buffer *p_buffer;
+ struct ooo_opaque *iscsi_ooo;
+ u8 placement_offset = 0;
+ u8 cqe_type;
+
+ cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+ if (cq_new_idx == cq_old_idx)
+ return 0;
+
+ while (cq_new_idx != cq_old_idx) {
+ struct core_rx_fast_path_cqe *p_cqe_fp;
+
+ cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+ cqe_type = cqe->rx_cqe_sp.type;
+
+ if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+ DP_NOTICE(p_hwfn,
+ "Got a non-regular LB LL2 completion [type 0x%02x]\n",
+ cqe_type);
+ return -EINVAL;
+ }
+ p_cqe_fp = &cqe->rx_cqe_fp;
+
+ placement_offset = p_cqe_fp->placement_offset;
+ parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
+ packet_length = le16_to_cpu(p_cqe_fp->packet_length);
+ vlan = le16_to_cpu(p_cqe_fp->vlan);
+ iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+ qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
+ iscsi_ooo);
+ cid = le32_to_cpu(iscsi_ooo->cid);
+
+ /* Process delete isle first */
+ if (iscsi_ooo->drop_size)
+ qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
+ iscsi_ooo->drop_isle,
+ iscsi_ooo->drop_size);
+
+ if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+ continue;
+
+ /* Now process create/add/join isles */
+ if (list_empty(&p_rx->active_descq)) {
+ DP_NOTICE(p_hwfn,
+ "LL2 OOO RX chain has no submitted buffers\n"
+ );
+ return -EIO;
+ }
+
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+
+ if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
+ (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
+ (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
+ (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
+ (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "LL2 OOO RX packet is not valid\n");
+ return -EIO;
+ }
+ list_del(&p_pkt->list_entry);
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ p_buffer->packet_length = packet_length;
+ p_buffer->parse_flags = parse_flags;
+ p_buffer->vlan = vlan;
+ p_buffer->placement_offset = placement_offset;
+ qed_chain_consume(&p_rx->rxq_chain);
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ switch (iscsi_ooo->ooo_opcode) {
+ case TCP_EVENT_ADD_NEW_ISLE:
+ qed_ooo_add_new_isle(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ iscsi_ooo->ooo_isle,
+ p_buffer);
+ break;
+ case TCP_EVENT_ADD_ISLE_RIGHT:
+ qed_ooo_add_new_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ iscsi_ooo->ooo_isle,
+ p_buffer,
+ QED_OOO_RIGHT_BUF);
+ break;
+ case TCP_EVENT_ADD_ISLE_LEFT:
+ qed_ooo_add_new_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ iscsi_ooo->ooo_isle,
+ p_buffer,
+ QED_OOO_LEFT_BUF);
+ break;
+ case TCP_EVENT_JOIN:
+ qed_ooo_add_new_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ iscsi_ooo->ooo_isle +
+ 1,
+ p_buffer,
+ QED_OOO_LEFT_BUF);
+ qed_ooo_join_isles(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid, iscsi_ooo->ooo_isle);
+ break;
+ case TCP_EVENT_ADD_PEN:
+ num_ooo_add_to_peninsula++;
+ qed_ooo_put_ready_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ p_buffer, true);
+ break;
+ }
+ } else {
+ DP_NOTICE(p_hwfn,
+ "Unexpected event (%d) TX OOO completion\n",
+ iscsi_ooo->ooo_opcode);
+ }
+ }
+
+ return 0;
+}
+
+static void
+qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ooo_buffer *p_buffer;
+ int rc;
+ u16 l4_hdr_offset_w;
+ dma_addr_t first_frag;
+ u16 parse_flags;
+ u8 bd_flags;
+
+ /* Submit Tx buffers here */
+ while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ l4_hdr_offset_w = 0;
+ bd_flags = 0;
+
+ first_frag = p_buffer->rx_buffer_phys_addr +
+ p_buffer->placement_offset;
+ parse_flags = p_buffer->parse_flags;
+ bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
+ SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
+ p_buffer->vlan, bd_flags,
+ l4_hdr_offset_w,
+ p_ll2_conn->tx_dest, 0,
+ first_frag,
+ p_buffer->packet_length,
+ p_buffer, true);
+ if (rc) {
+ qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer, false);
+ break;
+ }
+ }
+}
+
+static void
+qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ooo_buffer *p_buffer;
+ int rc;
+
+ while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ rc = qed_ll2_post_rx_buffer(p_hwfn,
+ p_ll2_conn->my_id,
+ p_buffer->rx_buffer_phys_addr,
+ 0, p_buffer, true);
+ if (rc) {
+ qed_ooo_put_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info, p_buffer);
+ break;
+ }
+ }
+}
+
+static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+ int rc;
+
+ rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+ qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
+
+ return 0;
+}
+
+static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ooo_buffer *p_buffer;
+ bool b_dont_submit_rx = false;
+ u16 new_idx = 0, num_bds = 0;
+ int rc;
+
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+
+ if (!num_bds)
+ return 0;
+
+ while (num_bds) {
+ if (list_empty(&p_tx->active_descq))
+ return -EINVAL;
+
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ return -EINVAL;
+
+ if (p_pkt->bd_used != 1) {
+ DP_NOTICE(p_hwfn,
+ "Unexpectedly many BDs(%d) in TX OOO completion\n",
+ p_pkt->bd_used);
+ return -EINVAL;
+ }
+
+ list_del(&p_pkt->list_entry);
+
+ num_bds--;
+ p_tx->bds_idx++;
+ qed_chain_consume(&p_tx->txq_chain);
+
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+ if (b_dont_submit_rx) {
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer);
+ continue;
+ }
+
+ rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
+ p_buffer->rx_buffer_phys_addr, 0,
+ p_buffer, true);
+ if (rc != 0) {
+ qed_ooo_put_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info, p_buffer);
+ b_dont_submit_rx = true;
+ }
+ }
+
+ qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
+
+ return 0;
+}
+
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ u16 rx_num_ooo_buffers, u16 mtu)
+{
+ struct qed_ooo_buffer *p_buf = NULL;
+ void *p_virt;
+ u16 buf_idx;
+ int rc = 0;
+
+ if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ return rc;
+
+ if (!rx_num_ooo_buffers)
+ return -EINVAL;
+
+ for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
+ p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+ if (!p_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+ p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+ ETH_CACHE_LINE_SIZE - 1) &
+ ~(ETH_CACHE_LINE_SIZE - 1);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buf->rx_buffer_size,
+ &p_buf->rx_buffer_phys_addr,
+ GFP_KERNEL);
+ if (!p_virt) {
+ kfree(p_buf);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_virt_addr = p_virt;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+ rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
+ return rc;
+}
+
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+}
+
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ooo_buffer *p_buffer;
+
+ if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buffer->rx_buffer_size,
+ p_buffer->rx_buffer_virt_addr,
+ p_buffer->rx_buffer_phys_addr);
+ kfree(p_buffer);
+ }
+}
+
+static void qed_ll2_stop_ooo(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+
+ DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
+ *handle);
+
+ qed_ll2_terminate_connection(hwfn, *handle);
+ qed_ll2_release_connection(hwfn, *handle);
+ *handle = QED_LL2_UNUSED_HANDLE;
+}
+
+static int qed_ll2_start_ooo(struct qed_dev *cdev,
+ struct qed_ll2_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ struct qed_ll2_info *ll2_info;
+ int rc;
+
+ ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
+ if (!ll2_info)
+ return -ENOMEM;
+ ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
+ ll2_info->mtu = params->mtu;
+ ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info->tx_tc = OOO_LB_TC;
+ ll2_info->tx_dest = CORE_TX_DEST_LB;
+
+ rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+ QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+ handle);
+ kfree(ll2_info);
+ if (rc) {
+ DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+ goto out;
+ }
- b_last = list_empty(&p_rx->active_descq);
+ rc = qed_ll2_establish_connection(hwfn, *handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+ goto fail;
}
+
+ return 0;
+
+fail:
+ qed_ll2_release_connection(hwfn, *handle);
+out:
+ *handle = QED_LL2_UNUSED_HANDLE;
+ return rc;
}
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
+ void *p_cookie) { return -EINVAL; }
+static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
+ void *p_cookie) { return -EINVAL; }
+static inline int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
+static inline void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void
+qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
+static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
+ struct qed_ll2_params *params)
+ { return -EINVAL; }
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
@@ -588,7 +1043,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
- p_ramrod->main_func_queue = 1;
+ p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
+ : 1;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
@@ -619,6 +1075,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
+ if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ p_ll2_conn->tx_stats_en = 0;
+ else
+ p_ll2_conn->tx_stats_en = 1;
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_ll2_conn->cid;
@@ -636,7 +1097,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index;
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
- p_ll2_conn->tx_stats_en = 1;
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@@ -860,9 +1320,19 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (rc)
goto q_allocate_fail;
+ rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
+ rx_num_desc * 2, p_params->mtu);
+ if (rc)
+ goto q_allocate_fail;
+
/* Register callbacks for the Rx/Tx queues */
- comp_rx_cb = qed_ll2_rxq_completion;
- comp_tx_cb = qed_ll2_txq_completion;
+ if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ comp_rx_cb = qed_ll2_lb_rxq_completion;
+ comp_tx_cb = qed_ll2_lb_txq_completion;
+ } else {
+ comp_rx_cb = qed_ll2_rxq_completion;
+ comp_tx_cb = qed_ll2_txq_completion;
+ }
if (rx_num_desc) {
qed_int_register_cb(p_hwfn, comp_rx_cb,
@@ -975,6 +1445,8 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+ qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
+
return rc;
}
@@ -1213,6 +1685,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan,
u8 bd_flags,
u16 l4_hdr_offset_w,
+ enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw)
@@ -1222,6 +1695,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
enum core_roce_flavor_type roce_flavor;
struct qed_ll2_tx_queue *p_tx;
struct qed_chain *p_tx_chain;
+ enum core_tx_dest tx_dest;
unsigned long flags;
int rc = 0;
@@ -1252,6 +1726,8 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
goto out;
}
+ tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
+ CORE_TX_DEST_LB;
if (qed_roce_flavor == QED_LL2_ROCE) {
roce_flavor = CORE_ROCE;
} else if (qed_roce_flavor == QED_LL2_RROCE) {
@@ -1266,7 +1742,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
num_of_bds, first_frag,
first_frag_len, cookie, notify_fw);
qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
- num_of_bds, CORE_TX_DEST_NW,
+ num_of_bds, tx_dest,
vlan, bd_flags, l4_hdr_offset_w,
roce_flavor,
first_frag, first_frag_len);
@@ -1341,6 +1817,9 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
+ if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+
return rc;
}
@@ -1371,6 +1850,8 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+ qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
+
mutex_lock(&p_ll2_conn->mutex);
p_ll2_conn->b_active = false;
mutex_unlock(&p_ll2_conn->mutex);
@@ -1517,6 +1998,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
int rc, i;
+ u8 gsi_enable = 1;
/* Initialize LL2 locks & lists */
INIT_LIST_HEAD(&cdev->ll2->list);
@@ -1548,6 +2030,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
case QED_PCI_ISCSI:
conn_type = QED_LL2_TYPE_ISCSI;
+ gsi_enable = 0;
break;
case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE;
@@ -1564,7 +2047,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info.tx_tc = 0;
ll2_info.tx_dest = CORE_TX_DEST_NW;
- ll2_info.gsi_enable = 1;
+ ll2_info.gsi_enable = gsi_enable;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
@@ -1611,6 +2094,17 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate;
}
+ if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
+ cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
+ rc = qed_ll2_start_ooo(cdev, params);
+ if (rc) {
+ DP_INFO(cdev,
+ "Failed to initialize the OOO LL2 queue\n");
+ goto release_terminate;
+ }
+ }
+
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (!p_ptt) {
DP_INFO(cdev, "Failed to acquire PTT\n");
@@ -1660,6 +2154,10 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address);
+ if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
+ cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+ qed_ll2_stop_ooo(cdev);
+
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
cdev->ll2->handle);
if (rc)
@@ -1714,7 +2212,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
cdev->ll2->handle,
1 + skb_shinfo(skb)->nr_frags,
- vlan, flags, 0, 0 /* RoCE FLAVOR */,
+ vlan, flags, 0, QED_LL2_TX_DEST_NW,
+ 0 /* RoCE FLAVOR */,
mapping, skb->len, skb, 1);
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 4e3d62a16cab..6625a3ae5a33 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -41,6 +41,12 @@ enum qed_ll2_conn_type {
MAX_QED_LL2_RX_CONN_TYPE
};
+enum qed_ll2_tx_dest {
+ QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
+ QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
+ QED_LL2_TX_DEST_MAX
+};
+
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@@ -192,6 +198,8 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
* @param l4_hdr_offset_w L4 Header Offset from start of packet
* (in words). This is needed if both l4_csum
* and ipv6_ext are set
+ * @param e_tx_dest indicates if the packet is to be transmitted via
+ * loopback or to the network
* @param first_frag
* @param first_frag_len
* @param cookie
@@ -206,6 +214,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan,
u8 bd_flags,
u16 l4_hdr_offset_w,
+ enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 333c7442e48a..aeb98d8c5626 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -221,6 +221,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
+
+ if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
+ QED_WOL_SUPPORT_PME)
+ dev_info->wol_support = true;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
@@ -243,6 +247,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
&dev_info->mfw_rev, NULL);
}
+ dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
+
return 0;
}
@@ -1430,11 +1436,106 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
+static int qed_update_wol(struct qed_dev *cdev, bool enabled)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
+ : QED_OV_WOL_DISABLED);
+ if (rc)
+ goto out;
+ rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
+static int qed_update_drv_state(struct qed_dev *cdev, bool active)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int status = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
+ QED_OV_DRIVER_STATE_ACTIVE :
+ QED_OV_DRIVER_STATE_DISABLED);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
+static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int status = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
+ if (status)
+ goto out;
+
+ status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+ qed_ptt_release(hwfn, ptt);
+ return status;
+}
+
+static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int status = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
+ if (status)
+ goto out;
+
+ status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+ qed_ptt_release(hwfn, ptt);
+ return status;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
.selftest_clock = &qed_selftest_clock,
+ .selftest_nvram = &qed_selftest_nvram,
};
const struct qed_common_ops qed_common_ops_pass = {
@@ -1464,6 +1565,10 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
+ .update_drv_state = &qed_update_drv_state,
+ .update_mac = &qed_update_mac,
+ .update_mtu = &qed_update_mtu,
+ .update_wol = &qed_update_wol,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index bdc9ba92f6d4..6dd3ce443484 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/etherdevice.h>
#include "qed.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
@@ -329,6 +330,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_mcp_mb_params *p_mb_params)
{
u32 union_data_addr;
+
int rc;
/* MCP not initialized */
@@ -374,11 +376,32 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param)
{
struct qed_mcp_mb_params mb_params;
+ union drv_union_data data_src;
int rc;
memset(&mb_params, 0, sizeof(mb_params));
+ memset(&data_src, 0, sizeof(data_src));
mb_params.cmd = cmd;
mb_params.param = param;
+
+ /* In case of UNLOAD_DONE, set the primary MAC */
+ if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
+ (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
+ u8 *p_mac = p_hwfn->cdev->wol_mac;
+
+ data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
+ data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
+ p_mac[4] << 8 | p_mac[5];
+
+ DP_VERBOSE(p_hwfn,
+ (QED_MSG_SP | NETIF_MSG_IFDOWN),
+ "Setting WoL MAC: %pM --> [%08x,%08x]\n",
+ p_mac, data_src.wol_mac.mac_upper,
+ data_src.wol_mac.mac_lower);
+
+ mb_params.p_data_src = &data_src;
+ }
+
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
@@ -1001,28 +1024,89 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
return 0;
}
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
+ enum qed_pci_personality *p_proto)
+{
+ /* There wasn't ever a legacy MFW that published iwarp.
+ * So at this point, this is either plain l2 or RoCE.
+ */
+ if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
+ *p_proto = QED_PCI_ETH_ROCE;
+ else
+ *p_proto = QED_PCI_ETH;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "According to Legacy capabilities, L2 personality is %08x\n",
+ (u32) *p_proto);
+}
+
+static int
+qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality *p_proto)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
+ if (rc)
+ return rc;
+ if (resp != FW_MSG_CODE_OK) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "MFW lacks support for command; Returns %08x\n",
+ resp);
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case FW_MB_PARAM_GET_PF_RDMA_NONE:
+ *p_proto = QED_PCI_ETH;
+ break;
+ case FW_MB_PARAM_GET_PF_RDMA_ROCE:
+ *p_proto = QED_PCI_ETH_ROCE;
+ break;
+ case FW_MB_PARAM_GET_PF_RDMA_BOTH:
+ DP_NOTICE(p_hwfn,
+ "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
+ *p_proto = QED_PCI_ETH_ROCE;
+ break;
+ case FW_MB_PARAM_GET_PF_RDMA_IWARP:
+ default:
+ DP_NOTICE(p_hwfn,
+ "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
+ param);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFUP,
+ "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+ (u32) *p_proto, resp, param);
+ return 0;
+}
+
static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info,
+ struct qed_ptt *p_ptt,
enum qed_pci_personality *p_proto)
{
int rc = 0;
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- if (test_bit(QED_DEV_CAP_ROCE,
- &p_hwfn->hw_info.device_capabilities))
- *p_proto = QED_PCI_ETH_ROCE;
- else
- *p_proto = QED_PCI_ETH;
+ if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
+ qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
break;
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
case FUNC_MF_CFG_PROTOCOL_ROCE:
DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
- rc = -EINVAL;
- break;
+ /* Fallthrough */
default:
rc = -EINVAL;
}
@@ -1042,7 +1126,8 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
@@ -1057,6 +1142,9 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
info->mac[5] = (u8)(shmem_info.mac_lower);
+
+ /* Store primary MAC for later possible WoL */
+ memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
} else {
DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
}
@@ -1068,13 +1156,30 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
+ p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
+ if (qed_mcp_is_init(p_hwfn)) {
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
+ if (rc)
+ return rc;
+ if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
+ p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
+ }
+
DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
- "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
info->pause_on_host, info->protocol,
info->bandwidth_min, info->bandwidth_max,
info->mac[0], info->mac[1], info->mac[2],
info->mac[3], info->mac[4], info->mac[5],
- info->wwn_port, info->wwn_node, info->ovlan);
+ info->wwn_port, info->wwn_node,
+ info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
return 0;
}
@@ -1223,6 +1328,178 @@ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
}
+int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_client client)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ switch (client) {
+ case QED_OV_CLIENT_DRV:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+ break;
+ case QED_OV_CLIENT_USER:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+ break;
+ case QED_OV_CLIENT_VENDOR_SPEC:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_driver_state drv_state)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ switch (drv_state) {
+ case QED_OV_DRIVER_STATE_NOT_LOADED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+ break;
+ case QED_OV_DRIVER_STATE_DISABLED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+ break;
+ case QED_OV_DRIVER_STATE_ACTIVE:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send driver state\n");
+
+ return rc;
+}
+
+int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 mtu)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *mac)
+{
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
+ mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
+ DRV_MSG_CODE_VMAC_TYPE_SHIFT;
+ mb_params.param |= MCP_PF_ID(p_hwfn);
+ ether_addr_copy(&union_data.raw_data[0], mac);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
+
+ /* Store primary MAC for later possible WoL */
+ memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
+
+ return rc;
+}
+
+int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_ov_wol wol)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Can't change WoL configuration when WoL isn't supported\n");
+ return -EINVAL;
+ }
+
+ switch (wol) {
+ case QED_OV_WOL_DEFAULT:
+ drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
+ break;
+ case QED_OV_WOL_DISABLED:
+ drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
+ break;
+ case QED_OV_WOL_ENABLED:
+ drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
+
+ /* Store the WoL update for a future unload */
+ p_hwfn->cdev->wol_config = (u8)wol;
+
+ return rc;
+}
+
+int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_eswitch eswitch)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ switch (eswitch) {
+ case QED_OV_ESWITCH_NONE:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
+ break;
+ case QED_OV_ESWITCH_VEB:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
+ break;
+ case QED_OV_ESWITCH_VEPA:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
+
+ return rc;
+}
+
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum qed_led_mode mode)
{
@@ -1271,6 +1548,52 @@ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
return rc;
}
+int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
+{
+ u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ u32 resp = 0, resp_param = 0;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ while (bytes_left > 0) {
+ bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ addr + offset +
+ (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_SHIFT),
+ &resp, &resp_param,
+ &read_len,
+ (u32 *)(p_buf + offset));
+
+ if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
+ DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
+ break;
+ }
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptable. Sleep a bit to prevent CPU hogging.
+ */
+ if (bytes_left % 0x1000 <
+ (bytes_left - read_len) % 0x1000)
+ usleep_range(1000, 2000);
+
+ offset += read_len;
+ bytes_left -= read_len;
+ }
+
+ cdev->mcp_nvm_resp = resp;
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
@@ -1312,3 +1635,101 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return rc;
}
+
+int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *num_images)
+{
+ u32 drv_mb_param = 0, rsp;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, num_images);
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index)
+{
+ u32 buf_size = 0, param, resp = 0, resp_param = 0;
+ int rc;
+
+ param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
+ param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_BIST_TEST, param,
+ &resp, &resp_param,
+ &buf_size,
+ (u32 *)p_image_att);
+ if (rc)
+ return rc;
+
+ if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (p_image_att->return_code != 1))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+#define QED_RESC_ALLOC_VERSION_MAJOR 1
+#define QED_RESC_ALLOC_VERSION_MINOR 0
+#define QED_RESC_ALLOC_VERSION \
+ ((QED_RESC_ALLOC_VERSION_MAJOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ (QED_RESC_ALLOC_VERSION_MINOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct resource_info *p_resc_info,
+ u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ memset(&union_data, 0, sizeof(union_data));
+ mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.param = QED_RESC_ALLOC_VERSION;
+
+ /* Need to have a sufficient large struct, as the cmd_and_union
+ * is going to do memcpy from and to it.
+ */
+ memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
+
+ mb_params.p_data_src = &union_data;
+ mb_params.p_data_dst = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ /* Copy the data back */
+ memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
+ *p_mcp_resp = mb_params.mcp_resp;
+ *p_mcp_param = mb_params.mcp_param;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
+ *p_mcp_param,
+ p_resc_info->res_id,
+ p_resc_info->size,
+ p_resc_info->offset,
+ p_resc_info->vf_size,
+ p_resc_info->vf_offset, p_resc_info->flags);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index dff520ed069b..407a2c1830fb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -92,6 +92,8 @@ struct qed_mcp_function_info {
#define QED_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
+
+ u16 mtu;
};
struct qed_mcp_nvm_common {
@@ -147,6 +149,30 @@ union qed_mcp_protocol_stats {
struct qed_mcp_rdma_stats rdma_stats;
};
+enum qed_ov_eswitch {
+ QED_OV_ESWITCH_NONE,
+ QED_OV_ESWITCH_VEB,
+ QED_OV_ESWITCH_VEPA
+};
+
+enum qed_ov_client {
+ QED_OV_CLIENT_DRV,
+ QED_OV_CLIENT_USER,
+ QED_OV_CLIENT_VENDOR_SPEC
+};
+
+enum qed_ov_driver_state {
+ QED_OV_DRIVER_STATE_NOT_LOADED,
+ QED_OV_DRIVER_STATE_DISABLED,
+ QED_OV_DRIVER_STATE_ACTIVE
+};
+
+enum qed_ov_wol {
+ QED_OV_WOL_DEFAULT,
+ QED_OV_WOL_DISABLED,
+ QED_OV_WOL_ENABLED
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -278,6 +304,69 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_mcp_drv_version *p_ver);
/**
+ * @brief Notify MFW about the change in base device properties
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param client - qed client type
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_client client);
+
+/**
+ * @brief Notify MFW about the driver state
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param drv_state - Driver state
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_driver_state drv_state);
+
+/**
+ * @brief Send MTU size to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mtu - MTU size
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 mtu);
+
+/**
+ * @brief Send MAC address to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mac - MAC address
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *mac);
+
+/**
+ * @brief Send WOL mode to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param wol - WOL mode
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_wol wol);
+
+/**
* @brief Set LED status
*
* @param p_hwfn
@@ -291,6 +380,18 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
enum qed_led_mode mode);
/**
+ * @brief Read from nvm
+ *
+ * @param cdev
+ * @param addr - nvm offset
+ * @param p_buf - nvm read buffer
+ * @param len - buffer len
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
+
+/**
* @brief Bist register test
*
* @param p_hwfn - hw function
@@ -312,6 +413,35 @@ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
+/**
+ * @brief Bist nvm test - get number of images
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param num_images - number of images if operation was
+ * successful. 0 if not.
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *num_images);
+
+/**
+ * @brief Bist nvm test - get image attributes by index
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_image_att - Attributes of image
+ * @param image_index - Index of image to get information for
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -546,4 +676,32 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
+/**
+ * @brief Send eswitch mode to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param eswitch - eswitch mode
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_eswitch eswitch);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_resc_info - descriptor of requested resource
+ * @param p_mcp_resp
+ * @param p_mcp_param
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct resource_info *p_resc_info,
+ u32 *p_mcp_resp, u32 *p_mcp_param);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
new file mode 100644
index 000000000000..155abcb507fd
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -0,0 +1,501 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_ooo.h"
+
+static struct qed_ooo_archipelago
+*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info
+ *p_ooo_info,
+ u32 cid)
+{
+ struct qed_ooo_archipelago *p_archipelago = NULL;
+
+ list_for_each_entry(p_archipelago,
+ &p_ooo_info->archipelagos_list, list_entry) {
+ if (p_archipelago->cid == cid)
+ return p_archipelago;
+ }
+
+ return NULL;
+}
+
+static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 isle)
+{
+ struct qed_ooo_archipelago *p_archipelago = NULL;
+ struct qed_ooo_isle *p_isle = NULL;
+ u8 the_num_of_isle = 1;
+
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (!p_archipelago) {
+ DP_NOTICE(p_hwfn,
+ "Connection %d is not found in OOO list\n", cid);
+ return NULL;
+ }
+
+ list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) {
+ if (the_num_of_isle == isle)
+ return p_isle;
+ the_num_of_isle++;
+ }
+
+ return NULL;
+}
+
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct ooo_opaque *p_cqe)
+{
+ struct qed_ooo_history *p_history = &p_ooo_info->ooo_history;
+
+ if (p_history->head_idx == p_history->num_of_cqes)
+ p_history->head_idx = 0;
+ p_history->p_cqes[p_history->head_idx] = *p_cqe;
+ p_history->head_idx++;
+}
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ooo_info *p_ooo_info;
+ u16 max_num_archipelagos = 0;
+ u16 max_num_isles = 0;
+ u32 i;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate qed_ooo_info: unknown personality\n");
+ return NULL;
+ }
+
+ max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
+ max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+
+ if (!max_num_archipelagos) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate qed_ooo_info: unknown amount of connections\n");
+ return NULL;
+ }
+
+ p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
+ if (!p_ooo_info)
+ return NULL;
+
+ INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
+ INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
+ INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
+ INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
+ INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
+
+ p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
+ sizeof(struct qed_ooo_isle),
+ GFP_KERNEL);
+ if (!p_ooo_info->p_isles_mem)
+ goto no_isles_mem;
+
+ for (i = 0; i < max_num_isles; i++) {
+ INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list);
+ list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry,
+ &p_ooo_info->free_isles_list);
+ }
+
+ p_ooo_info->p_archipelagos_mem =
+ kcalloc(max_num_archipelagos,
+ sizeof(struct qed_ooo_archipelago),
+ GFP_KERNEL);
+ if (!p_ooo_info->p_archipelagos_mem)
+ goto no_archipelagos_mem;
+
+ for (i = 0; i < max_num_archipelagos; i++) {
+ INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
+ list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
+ &p_ooo_info->free_archipelagos_list);
+ }
+
+ p_ooo_info->ooo_history.p_cqes =
+ kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
+ sizeof(struct ooo_opaque),
+ GFP_KERNEL);
+ if (!p_ooo_info->ooo_history.p_cqes)
+ goto no_history_mem;
+
+ return p_ooo_info;
+
+no_history_mem:
+ kfree(p_ooo_info->p_archipelagos_mem);
+no_archipelagos_mem:
+ kfree(p_ooo_info->p_isles_mem);
+no_isles_mem:
+ kfree(p_ooo_info);
+ return NULL;
+}
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid)
+{
+ struct qed_ooo_archipelago *p_archipelago;
+ struct qed_ooo_buffer *p_buffer;
+ struct qed_ooo_isle *p_isle;
+ bool b_found = false;
+
+ if (list_empty(&p_ooo_info->archipelagos_list))
+ return;
+
+ list_for_each_entry(p_archipelago,
+ &p_ooo_info->archipelagos_list, list_entry) {
+ if (p_archipelago->cid == cid) {
+ list_del(&p_archipelago->list_entry);
+ b_found = true;
+ break;
+ }
+ }
+
+ if (!b_found)
+ return;
+
+ while (!list_empty(&p_archipelago->isles_list)) {
+ p_isle = list_first_entry(&p_archipelago->isles_list,
+ struct qed_ooo_isle, list_entry);
+
+ list_del(&p_isle->list_entry);
+
+ while (!list_empty(&p_isle->buffers_list)) {
+ p_buffer = list_first_entry(&p_isle->buffers_list,
+ struct qed_ooo_buffer,
+ list_entry);
+
+ if (!p_buffer)
+ break;
+
+ list_del(&p_buffer->list_entry);
+ list_add_tail(&p_buffer->list_entry,
+ &p_ooo_info->free_buffers_list);
+ }
+ list_add_tail(&p_isle->list_entry,
+ &p_ooo_info->free_isles_list);
+ }
+
+ list_add_tail(&p_archipelago->list_entry,
+ &p_ooo_info->free_archipelagos_list);
+}
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+{
+ struct qed_ooo_archipelago *p_arch;
+ struct qed_ooo_buffer *p_buffer;
+ struct qed_ooo_isle *p_isle;
+
+ while (!list_empty(&p_ooo_info->archipelagos_list)) {
+ p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
+ struct qed_ooo_archipelago,
+ list_entry);
+
+ list_del(&p_arch->list_entry);
+
+ while (!list_empty(&p_arch->isles_list)) {
+ p_isle = list_first_entry(&p_arch->isles_list,
+ struct qed_ooo_isle,
+ list_entry);
+
+ list_del(&p_isle->list_entry);
+
+ while (!list_empty(&p_isle->buffers_list)) {
+ p_buffer =
+ list_first_entry(&p_isle->buffers_list,
+ struct qed_ooo_buffer,
+ list_entry);
+
+ if (!p_buffer)
+ break;
+
+ list_del(&p_buffer->list_entry);
+ list_add_tail(&p_buffer->list_entry,
+ &p_ooo_info->free_buffers_list);
+ }
+ list_add_tail(&p_isle->list_entry,
+ &p_ooo_info->free_isles_list);
+ }
+ list_add_tail(&p_arch->list_entry,
+ &p_ooo_info->free_archipelagos_list);
+ }
+ if (!list_empty(&p_ooo_info->ready_buffers_list))
+ list_splice_tail_init(&p_ooo_info->ready_buffers_list,
+ &p_ooo_info->free_buffers_list);
+}
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+ qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+ memset(p_ooo_info->ooo_history.p_cqes, 0,
+ p_ooo_info->ooo_history.num_of_cqes *
+ sizeof(struct ooo_opaque));
+ p_ooo_info->ooo_history.head_idx = 0;
+}
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+ struct qed_ooo_buffer *p_buffer;
+
+ qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+ while (!list_empty(&p_ooo_info->free_buffers_list)) {
+ p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+ struct qed_ooo_buffer, list_entry);
+
+ if (!p_buffer)
+ break;
+
+ list_del(&p_buffer->list_entry);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buffer->rx_buffer_size,
+ p_buffer->rx_buffer_virt_addr,
+ p_buffer->rx_buffer_phys_addr);
+ kfree(p_buffer);
+ }
+
+ kfree(p_ooo_info->p_isles_mem);
+ kfree(p_ooo_info->p_archipelagos_mem);
+ kfree(p_ooo_info->ooo_history.p_cqes);
+ kfree(p_ooo_info);
+}
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer)
+{
+ list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+{
+ struct qed_ooo_buffer *p_buffer = NULL;
+
+ if (!list_empty(&p_ooo_info->free_buffers_list)) {
+ p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+ struct qed_ooo_buffer, list_entry);
+
+ list_del(&p_buffer->list_entry);
+ }
+
+ return p_buffer;
+}
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer, u8 on_tail)
+{
+ if (on_tail)
+ list_add_tail(&p_buffer->list_entry,
+ &p_ooo_info->ready_buffers_list);
+ else
+ list_add(&p_buffer->list_entry,
+ &p_ooo_info->ready_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+{
+ struct qed_ooo_buffer *p_buffer = NULL;
+
+ if (!list_empty(&p_ooo_info->ready_buffers_list)) {
+ p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list,
+ struct qed_ooo_buffer, list_entry);
+
+ list_del(&p_buffer->list_entry);
+ }
+
+ return p_buffer;
+}
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 drop_isle, u8 drop_size)
+{
+ struct qed_ooo_archipelago *p_archipelago = NULL;
+ struct qed_ooo_isle *p_isle = NULL;
+ u8 isle_idx;
+
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
+ p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
+ if (!p_isle) {
+ DP_NOTICE(p_hwfn,
+ "Isle %d is not found(cid %d)\n",
+ drop_isle, cid);
+ return;
+ }
+ if (list_empty(&p_isle->buffers_list))
+ DP_NOTICE(p_hwfn,
+ "Isle %d is empty(cid %d)\n", drop_isle, cid);
+ else
+ list_splice_tail_init(&p_isle->buffers_list,
+ &p_ooo_info->free_buffers_list);
+
+ list_del(&p_isle->list_entry);
+ p_ooo_info->cur_isles_number--;
+ list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
+ }
+
+ if (list_empty(&p_archipelago->isles_list)) {
+ list_del(&p_archipelago->list_entry);
+ list_add(&p_archipelago->list_entry,
+ &p_ooo_info->free_archipelagos_list);
+ }
+}
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer)
+{
+ struct qed_ooo_archipelago *p_archipelago = NULL;
+ struct qed_ooo_isle *p_prev_isle = NULL;
+ struct qed_ooo_isle *p_isle = NULL;
+
+ if (ooo_isle > 1) {
+ p_prev_isle = qed_ooo_seek_isle(p_hwfn,
+ p_ooo_info, cid, ooo_isle - 1);
+ if (!p_prev_isle) {
+ DP_NOTICE(p_hwfn,
+ "Isle %d is not found(cid %d)\n",
+ ooo_isle - 1, cid);
+ return;
+ }
+ }
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (!p_archipelago && (ooo_isle != 1)) {
+ DP_NOTICE(p_hwfn,
+ "Connection %d is not found in OOO list\n", cid);
+ return;
+ }
+
+ if (!list_empty(&p_ooo_info->free_isles_list)) {
+ p_isle = list_first_entry(&p_ooo_info->free_isles_list,
+ struct qed_ooo_isle, list_entry);
+
+ list_del(&p_isle->list_entry);
+ if (!list_empty(&p_isle->buffers_list)) {
+ DP_NOTICE(p_hwfn, "Free isle is not empty\n");
+ INIT_LIST_HEAD(&p_isle->buffers_list);
+ }
+ } else {
+ DP_NOTICE(p_hwfn, "No more free isles\n");
+ return;
+ }
+
+ if (!p_archipelago &&
+ !list_empty(&p_ooo_info->free_archipelagos_list)) {
+ p_archipelago =
+ list_first_entry(&p_ooo_info->free_archipelagos_list,
+ struct qed_ooo_archipelago, list_entry);
+
+ list_del(&p_archipelago->list_entry);
+ if (!list_empty(&p_archipelago->isles_list)) {
+ DP_NOTICE(p_hwfn,
+ "Free OOO connection is not empty\n");
+ INIT_LIST_HEAD(&p_archipelago->isles_list);
+ }
+ p_archipelago->cid = cid;
+ list_add(&p_archipelago->list_entry,
+ &p_ooo_info->archipelagos_list);
+ } else if (!p_archipelago) {
+ DP_NOTICE(p_hwfn, "No more free OOO connections\n");
+ list_add(&p_isle->list_entry,
+ &p_ooo_info->free_isles_list);
+ list_add(&p_buffer->list_entry,
+ &p_ooo_info->free_buffers_list);
+ return;
+ }
+
+ list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+ p_ooo_info->cur_isles_number++;
+ p_ooo_info->gen_isles_number++;
+
+ if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
+ p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
+
+ if (!p_prev_isle)
+ list_add(&p_isle->list_entry, &p_archipelago->isles_list);
+ else
+ list_add(&p_isle->list_entry, &p_prev_isle->list_entry);
+}
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid,
+ u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer, u8 buffer_side)
+{
+ struct qed_ooo_isle *p_isle = NULL;
+
+ p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
+ if (!p_isle) {
+ DP_NOTICE(p_hwfn,
+ "Isle %d is not found(cid %d)\n", ooo_isle, cid);
+ return;
+ }
+
+ if (buffer_side == QED_OOO_LEFT_BUF)
+ list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+ else
+ list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
+}
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
+{
+ struct qed_ooo_archipelago *p_archipelago = NULL;
+ struct qed_ooo_isle *p_right_isle = NULL;
+ struct qed_ooo_isle *p_left_isle = NULL;
+
+ p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+ left_isle + 1);
+ if (!p_right_isle) {
+ DP_NOTICE(p_hwfn,
+ "Right isle %d is not found(cid %d)\n",
+ left_isle + 1, cid);
+ return;
+ }
+
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ list_del(&p_right_isle->list_entry);
+ p_ooo_info->cur_isles_number--;
+ if (left_isle) {
+ p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+ left_isle);
+ if (!p_left_isle) {
+ DP_NOTICE(p_hwfn,
+ "Left isle %d is not found(cid %d)\n",
+ left_isle, cid);
+ return;
+ }
+ list_splice_tail_init(&p_right_isle->buffers_list,
+ &p_left_isle->buffers_list);
+ } else {
+ list_splice_tail_init(&p_right_isle->buffers_list,
+ &p_ooo_info->ready_buffers_list);
+ if (list_empty(&p_archipelago->isles_list)) {
+ list_del(&p_archipelago->list_entry);
+ list_add(&p_archipelago->list_entry,
+ &p_ooo_info->free_archipelagos_list);
+ }
+ }
+ list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
new file mode 100644
index 000000000000..7a0670a9a074
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -0,0 +1,176 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_OOO_H
+#define _QED_OOO_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+#define QED_MAX_NUM_ISLES 256
+#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512
+
+#define QED_OOO_LEFT_BUF 0
+#define QED_OOO_RIGHT_BUF 1
+
+struct qed_ooo_buffer {
+ struct list_head list_entry;
+ void *rx_buffer_virt_addr;
+ dma_addr_t rx_buffer_phys_addr;
+ u32 rx_buffer_size;
+ u16 packet_length;
+ u16 parse_flags;
+ u16 vlan;
+ u8 placement_offset;
+};
+
+struct qed_ooo_isle {
+ struct list_head list_entry;
+ struct list_head buffers_list;
+};
+
+struct qed_ooo_archipelago {
+ struct list_head list_entry;
+ struct list_head isles_list;
+ u32 cid;
+};
+
+struct qed_ooo_history {
+ struct ooo_opaque *p_cqes;
+ u32 head_idx;
+ u32 num_of_cqes;
+};
+
+struct qed_ooo_info {
+ struct list_head free_buffers_list;
+ struct list_head ready_buffers_list;
+ struct list_head free_isles_list;
+ struct list_head free_archipelagos_list;
+ struct list_head archipelagos_list;
+ struct qed_ooo_archipelago *p_archipelagos_mem;
+ struct qed_ooo_isle *p_isles_mem;
+ struct qed_ooo_history ooo_history;
+ u32 cur_isles_number;
+ u32 max_isles_number;
+ u32 gen_isles_number;
+};
+
+#if IS_ENABLED(CONFIG_QED_ISCSI)
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct ooo_opaque *p_cqe);
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid);
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer);
+
+struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer, u8 on_tail);
+
+struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 drop_isle, u8 drop_size);
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid,
+ u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid,
+ u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer, u8 buffer_side);
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid,
+ u8 left_isle);
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct ooo_opaque *p_cqe) {}
+
+static inline struct qed_ooo_info *qed_ooo_alloc(
+ struct qed_hwfn *p_hwfn) { return NULL; }
+
+static inline void
+qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid) {}
+
+static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+ {}
+
+static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info) {}
+
+static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info) {}
+
+static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer) {}
+
+static inline struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info) { return NULL; }
+
+static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer,
+ u8 on_tail) {}
+
+static inline struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info) { return NULL; }
+
+static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 drop_isle, u8 drop_size) {}
+
+static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer) {}
+
+static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer,
+ u8 buffer_side) {}
+
+static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid,
+ u8 left_isle) {}
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index b414a0542177..97544205a8c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -82,6 +82,8 @@
0x1c80000UL
#define BAR0_MAP_REG_XSDM_RAM \
0x1e00000UL
+#define BAR0_MAP_REG_YSDM_RAM \
+ 0x1e80000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
#define PRS_REG_SEARCH_TCP \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f3a825a8f8d5..2a16547c8966 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2658,7 +2658,6 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
return -ENOMEM;
}
- memset(roce_ll2, 0, sizeof(*roce_ll2));
roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
roce_ll2->cbs = params->cbs;
roce_ll2->cb_cookie = params->cb_cookie;
@@ -2772,6 +2771,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
1 + pkt->n_seg, 0, flags, 0,
+ QED_LL2_TX_DEST_NW,
qed_roce_flavor, pkt->header.baddr,
pkt->header.len, pkt, 1);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
index 9b7678f26909..48bfaecaf6dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -1,3 +1,4 @@
+#include <linux/crc32.h>
#include "qed.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
@@ -75,3 +76,103 @@ int qed_selftest_clock(struct qed_dev *cdev)
return rc;
}
+
+int qed_selftest_nvram(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 num_images, i, j, nvm_crc, calc_crc;
+ struct bist_nvm_image_att image_att;
+ u8 *buf = NULL;
+ __be32 val;
+ int rc;
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Acquire from MFW the amount of available images */
+ rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
+ if (rc || !num_images) {
+ DP_ERR(p_hwfn, "Failed getting number of images\n");
+ return -EINVAL;
+ }
+
+ /* Iterate over images and validate CRC */
+ for (i = 0; i < num_images; i++) {
+ /* This mailbox returns information about the image required for
+ * reading it.
+ */
+ rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
+ &image_att, i);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed getting image index %d attributes\n",
+ i);
+ goto err0;
+ }
+
+ /* After MFW crash dump is collected - the image's CRC stops
+ * being valid.
+ */
+ if (image_att.image_type == NVM_TYPE_MDUMP)
+ continue;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n",
+ i, image_att.len);
+
+ /* Allocate a buffer for holding the nvram image */
+ buf = kzalloc(image_att.len, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ /* Read image into buffer */
+ rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr,
+ buf, image_att.len);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed reading image index %d from nvm.\n", i);
+ goto err1;
+ }
+
+ /* Convert the buffer into big-endian format (excluding the
+ * closing 4 bytes of CRC).
+ */
+ for (j = 0; j < image_att.len - 4; j += 4) {
+ val = cpu_to_be32(*(u32 *)&buf[j]);
+ *(u32 *)&buf[j] = (__force u32)val;
+ }
+
+ /* Calc CRC for the "actual" image buffer, i.e. not including
+ * the last 4 CRC bytes.
+ */
+ nvm_crc = *(u32 *)(buf + image_att.len - 4);
+ calc_crc = crc32(0xffffffff, buf, image_att.len - 4);
+ calc_crc = (__force u32)~cpu_to_be32(calc_crc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc);
+
+ if (calc_crc != nvm_crc) {
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ /* Done with this image; Free to prevent double release
+ * on subsequent failure.
+ */
+ kfree(buf);
+ buf = NULL;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+
+err1:
+ kfree(buf);
+err0:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
index 50eb0b49950f..739ddb730967 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -37,4 +37,14 @@ int qed_selftest_register(struct qed_dev *cdev);
* @return int
*/
int qed_selftest_clock(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_nvram - Perform nvram test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_nvram(struct qed_dev *cdev);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index b2c08e4d2a9b..9c897bc68d05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -110,8 +110,8 @@ union qed_spq_req_comp {
};
struct qed_spq_comp_done {
- u64 done;
- u8 fw_return_code;
+ unsigned int done;
+ u8 fw_return_code;
};
struct qed_spq_entry {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 2888eb0628f8..a39ef2e7a9a6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -347,11 +347,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
- p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
- p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
@@ -369,7 +369,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
- DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+ DP_NOTICE(p_hwfn, "Unknown personality %d\n",
p_hwfn->hw_info.personality);
p_ramrod->personality = PERSONALITY_ETH;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 9fbaf9429fd0..f022469bdcf8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -24,7 +24,9 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iscsi.h"
#include "qed_mcp.h"
+#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
@@ -35,7 +37,11 @@
***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
-#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER (10)
+#define SPQ_BLOCK_DELAY_US (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
+#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
@@ -48,60 +54,88 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
comp_done = (struct qed_spq_comp_done *)cookie;
- comp_done->done = 0x1;
- comp_done->fw_return_code = fw_return_code;
+ comp_done->fw_return_code = fw_return_code;
- /* make update visible to waiting thread */
- smp_wmb();
+ /* Make sure completion done is visible on waiting thread */
+ smp_store_release(&comp_done->done, 0x1);
}
-static int qed_spq_block(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- u8 *p_fw_ret)
+static int __qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret, bool sleep_between_iter)
{
- int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
struct qed_spq_comp_done *comp_done;
- int rc;
+ u32 iter_cnt;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- while (sleep_count) {
- /* validate we receive completion update */
- smp_rmb();
- if (comp_done->done == 1) {
+ iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+ : SPQ_BLOCK_DELAY_MAX_ITER;
+
+ while (iter_cnt--) {
+ /* Validate we receive completion update */
+ if (READ_ONCE(comp_done->done) == 1) {
+ /* Read updated FW return value */
+ smp_read_barrier_depends();
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
- usleep_range(5000, 10000);
- sleep_count--;
+
+ if (sleep_between_iter)
+ msleep(SPQ_BLOCK_SLEEP_MS);
+ else
+ udelay(SPQ_BLOCK_DELAY_US);
+ }
+
+ return -EBUSY;
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret, bool skip_quick_poll)
+{
+ struct qed_spq_comp_done *comp_done;
+ int rc;
+
+ /* A relatively short polling period w/o sleeping, to allow the FW to
+ * complete the ramrod and thus possibly to avoid the following sleeps.
+ */
+ if (!skip_quick_poll) {
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+ if (!rc)
+ return 0;
}
+ /* Move to polling with a sleeping period between iterations */
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (!rc)
+ return 0;
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != 0)
+ if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
+ goto err;
+ }
/* Retry after drain */
- sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
- while (sleep_count) {
- /* validate we receive completion update */
- smp_rmb();
- if (comp_done->done == 1) {
- if (p_fw_ret)
- *p_fw_ret = comp_done->fw_return_code;
- return 0;
- }
- usleep_range(5000, 10000);
- sleep_count--;
- }
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (!rc)
+ return 0;
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
-
- DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+err:
+ DP_NOTICE(p_hwfn,
+ "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ le16_to_cpu(p_ent->elem.hdr.echo));
return -EBUSY;
}
@@ -245,6 +279,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
+ case PROTOCOLID_ISCSI:
+ if (!IS_ENABLED(CONFIG_QED_ISCSI))
+ return -EINVAL;
+ if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
+ u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
+
+ qed_ooo_release_connection_isles(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid);
+ return 0;
+ }
+
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ p_eqe->opcode, &p_eqe->data);
+ } else {
+ DP_NOTICE(p_hwfn,
+ "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
default:
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
@@ -725,7 +781,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer perform the cleanup here.
*/
- rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
+ p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) {
/* This is an allocated p_ent which does not need to
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index d2d6621fe0e5..85b09dd1787a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -109,7 +109,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
}
static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id, bool b_enabled_only)
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -124,6 +125,10 @@ static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
b_enabled_only)
return false;
+ if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+ b_non_malicious)
+ return false;
+
return true;
}
@@ -138,7 +143,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return NULL;
}
- if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+ b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
@@ -542,7 +548,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
-static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
+ int vfid, bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
@@ -550,12 +557,17 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
return false;
/* Check VF validity */
- if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+ if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
return false;
return true;
}
+bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+ return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
u16 rel_vf_id, u8 to_disable)
{
@@ -652,6 +664,9 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+ /* It's possible VF was previously considered malicious */
+ vf->b_malicious = false;
+
rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
if (rc)
return rc;
@@ -793,37 +808,70 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 rel_vf_id, u16 num_rx_queues)
+ struct qed_iov_vf_init_params *p_params)
{
u8 num_of_vf_avaiable_chains = 0;
struct qed_vf_info *vf = NULL;
+ u16 qid, num_irqs;
int rc = 0;
u32 cids;
u8 i;
- vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
return -EINVAL;
}
if (vf->b_init) {
- DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+ DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
+ p_params->rel_vf_id);
return -EINVAL;
}
+ /* Perform sanity checking on the requested queue_id */
+ for (i = 0; i < p_params->num_queues; i++) {
+ u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
+ u16 max_vf_qzone = min_vf_qzone +
+ FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
+
+ qid = p_params->req_rx_queue[i];
+ if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ qid,
+ p_params->rel_vf_id,
+ min_vf_qzone, max_vf_qzone);
+ return -EINVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn,
+ "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+ qid, p_params->rel_vf_id, max_vf_qzone);
+ return -EINVAL;
+ }
+
+ /* If client *really* wants, Tx qid can be shared with PF */
+ if (qid < min_vf_qzone)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+ p_params->rel_vf_id, qid, i);
+ }
+
/* Limit number of queues according to number of CIDs */
qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
- vf->relative_vf_id, num_rx_queues, (u16) cids);
- num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
p_ptt,
- vf,
- num_rx_queues);
+ vf, num_irqs);
if (!num_of_vf_avaiable_chains) {
DP_ERR(p_hwfn, "no available igu sbs\n");
return -ENOMEM;
@@ -834,25 +882,22 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
vf->num_txqs = num_of_vf_avaiable_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
- vf->igu_sbs[i]);
+ struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
- if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
- DP_NOTICE(p_hwfn,
- "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
- vf->relative_vf_id, queue_id);
- return -EINVAL;
- }
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
/* CIDs are per-VF, so no problem having them 0-based. */
- vf->vf_queues[i].fw_rx_qid = queue_id;
- vf->vf_queues[i].fw_tx_qid = queue_id;
- vf->vf_queues[i].fw_cid = i;
+ p_queue->fw_cid = i;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
- vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
+ vf->relative_vf_id,
+ i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid,
+ p_queue->fw_tx_qid, p_queue->fw_cid);
}
+
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (!rc) {
vf->b_init = true;
@@ -1172,8 +1217,19 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->num_active_rxqs = 0;
- for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
- p_vf->vf_queues[i].rxq_active = 0;
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+ struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
+
+ if (p_queue->p_rx_cid) {
+ qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
+ p_queue->p_rx_cid = NULL;
+ }
+
+ if (p_queue->p_tx_cid) {
+ qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
+ p_queue->p_tx_cid = NULL;
+ }
+ }
memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
@@ -1579,21 +1635,21 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
/* Update all the Rx queues */
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
- u16 qid;
+ struct qed_queue_cid *p_cid;
- if (!p_vf->vf_queues[i].rxq_active)
+ p_cid = p_vf->vf_queues[i].p_rx_cid;
+ if (!p_cid)
continue;
- qid = p_vf->vf_queues[i].fw_rx_qid;
-
- rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+ rc = qed_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
1, 0, 1,
QED_SPQ_MODE_EBLOCK,
NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send Rx update fo queue[0x%04x]\n",
- qid);
+ p_cid->rel.queue_id);
return rc;
}
}
@@ -1767,23 +1823,34 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct qed_vf_q_info *p_queue;
struct vfpf_start_rxq_tlv *req;
bool b_legacy_vf = false;
int rc;
- memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_rxq;
if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
- params.vf_qid = req->rx_qid;
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->rx_qid];
+
+ memset(&params, 0, sizeof(params));
+ params.queue_id = p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
+ p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
+ vf->opaque_fid,
+ p_queue->fw_cid,
+ req->rx_qid, &params);
+ if (!p_queue->p_rx_cid)
+ goto out;
+
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
@@ -1796,21 +1863,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
}
+ p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
- rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
- vf->vf_queues[req->rx_qid].fw_cid,
- &params,
- vf->abs_vf_id + 0x10,
- req->bd_max_bytes,
- req->rxq_addr,
- req->cqe_pbl_addr, req->cqe_pbl_size,
- b_legacy_vf);
-
+ rc = qed_eth_rxq_start_ramrod(p_hwfn,
+ p_queue->p_rx_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr, req->cqe_pbl_size);
if (rc) {
status = PFVF_STATUS_FAILURE;
+ qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
+ p_queue->p_rx_cid = NULL;
} else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
@@ -1867,7 +1932,9 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
u8 status = PFVF_STATUS_NO_RESOURCE;
union qed_qm_pq_params pq_params;
struct vfpf_start_txq_tlv *req;
+ struct qed_vf_q_info *p_queue;
int rc;
+ u16 pq;
/* Prepare the parameters which would choose the right PQ */
memset(&pq_params, 0, sizeof(pq_params));
@@ -1881,24 +1948,31 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->tx_qid];
+
+ params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
- rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
- vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_cid,
- &params,
- vf->abs_vf_id + 0x10,
- req->pbl_addr,
- req->pbl_size, &pq_params);
+ p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
+ vf->opaque_fid,
+ p_queue->fw_cid,
+ req->tx_qid, &params);
+ if (!p_queue->p_tx_cid)
+ goto out;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
+ rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+ req->pbl_addr, req->pbl_size, pq);
if (rc) {
status = PFVF_STATUS_FAILURE;
+ qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
+ p_queue->p_tx_cid = NULL;
} else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->tx_qid].txq_active = true;
}
out:
@@ -1909,6 +1983,7 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
u16 rxq_id, u8 num_rxqs, bool cqe_completion)
{
+ struct qed_vf_q_info *p_queue;
int rc = 0;
int qid;
@@ -1916,16 +1991,18 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
return -EINVAL;
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- if (vf->vf_queues[qid].rxq_active) {
- rc = qed_sp_eth_rx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_rx_qid, false,
- cqe_completion);
+ p_queue = &vf->vf_queues[qid];
- if (rc)
- return rc;
- }
- vf->vf_queues[qid].rxq_active = false;
+ if (!p_queue->p_rx_cid)
+ continue;
+
+ rc = qed_eth_rx_queue_stop(p_hwfn,
+ p_queue->p_rx_cid,
+ false, cqe_completion);
+ if (rc)
+ return rc;
+
+ vf->vf_queues[qid].p_rx_cid = NULL;
vf->num_active_rxqs--;
}
@@ -1936,22 +2013,24 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
{
int rc = 0;
+ struct qed_vf_q_info *p_queue;
int qid;
if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
return -EINVAL;
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- if (vf->vf_queues[qid].txq_active) {
- rc = qed_sp_eth_tx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_tx_qid);
+ p_queue = &vf->vf_queues[qid];
+ if (!p_queue->p_tx_cid)
+ continue;
- if (rc)
- return rc;
- }
- vf->vf_queues[qid].txq_active = false;
+ rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+ if (rc)
+ return rc;
+
+ p_queue->p_tx_cid = NULL;
}
+
return rc;
}
@@ -2006,10 +2085,11 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
+ struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_rxq_tlv *req;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
u16 qid;
@@ -2020,29 +2100,36 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+ /* Validate inputs */
+ if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
+ !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
+ DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+ goto out;
+ }
+
for (i = 0; i < req->num_rxqs; i++) {
qid = req->rx_qid + i;
-
- if (!vf->vf_queues[qid].rxq_active) {
- DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
- qid);
- status = PFVF_STATUS_FAILURE;
- break;
+ if (!vf->vf_queues[qid].p_rx_cid) {
+ DP_INFO(p_hwfn,
+ "VF[%d] rx_qid = %d isn`t active!\n",
+ vf->relative_vf_id, qid);
+ goto out;
}
- rc = qed_sp_eth_rx_queues_update(p_hwfn,
- vf->vf_queues[qid].fw_rx_qid,
- 1,
- complete_cqe_flg,
- complete_event_flg,
- QED_SPQ_MODE_EBLOCK, NULL);
-
- if (rc) {
- status = PFVF_STATUS_FAILURE;
- break;
- }
+ handlers[i] = vf->vf_queues[qid].p_rx_cid;
}
+ rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
length, status);
}
@@ -2253,7 +2340,7 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"rss_ind_table[%d] = %d, rxq is out of range\n",
i, q_idx);
- else if (!vf->vf_queues[q_idx].rxq_active)
+ else if (!vf->vf_queues[q_idx].p_rx_cid)
DP_NOTICE(p_hwfn,
"rss_ind_table[%d] = %d, rxq is not active\n",
i, q_idx);
@@ -2804,6 +2891,13 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
return rc;
}
+ /* Workaround to make VF-PF channel ready, as FW
+ * doesn't do that as a part of FLR.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
*/
@@ -2942,7 +3036,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
mbx->first_tlv = mbx->req_virt->first_tlv;
/* check if tlv type is known */
- if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+ !p_vf->b_malicious) {
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
@@ -2984,6 +3079,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
}
+ } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_MALICIOUS);
} else {
/* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which
@@ -3033,20 +3137,30 @@ static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
}
-static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
- u16 abs_vfid, struct regpair *vf_msg)
+static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid)
{
- u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
- struct qed_vf_info *p_vf;
+ u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
- if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+ "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
abs_vfid);
- return 0;
+ return NULL;
}
- p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid, struct regpair *vf_msg)
+{
+ struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
+ abs_vfid);
+
+ if (!p_vf)
+ return 0;
/* List the physical address of the request so that handler
* could later on copy the message from it.
@@ -3060,6 +3174,23 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct malicious_vf_eqe_data *p_data)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
+
+ if (!p_vf)
+ return;
+
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+
+ p_vf->b_malicious = true;
+}
+
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data)
{
@@ -3067,6 +3198,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr);
+ case COMMON_EVENT_MALICIOUS_VF:
+ qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+ return 0;
default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode);
@@ -3083,7 +3217,7 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
goto out;
for (i = rel_vf_id; i < p_iov->total_vfs; i++)
- if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
return i;
out:
@@ -3130,6 +3264,12 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can't set forced MAC to malicious VF [%d]\n", vfid);
+ return;
+ }
+
feature = 1 << MAC_ADDR_FORCED;
memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
@@ -3153,6 +3293,12 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can't set forced vlan to malicious VF [%d]\n", vfid);
+ return;
+ }
+
feature = 1 << VLAN_ADDR_FORCED;
vf_info->bulletin.p_virt->pvid = pvid;
if (pvid)
@@ -3367,7 +3513,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
qed_for_each_vf(hwfn, j) {
int k;
- if (!qed_iov_is_valid_vfid(hwfn, j, true))
+ if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
continue;
/* Wait until VF is disabled before releasing */
@@ -3394,9 +3540,28 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
return 0;
}
+static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
+ u16 vfid,
+ struct qed_iov_vf_init_params *params)
+{
+ u16 base, i;
+
+ /* Since we have an equal resource distribution per-VF, and we assume
+ * PF has acquired the QED_PF_L2_QUE first queues, we start setting
+ * sequentially from there.
+ */
+ base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
+
+ params->rel_vf_id = vfid;
+ for (i = 0; i < params->num_queues; i++) {
+ params->req_rx_queue[i] = base + i;
+ params->req_tx_queue[i] = base + i;
+ }
+}
+
static int qed_sriov_enable(struct qed_dev *cdev, int num)
{
- struct qed_sb_cnt_info sb_cnt_info;
+ struct qed_iov_vf_init_params params;
int i, j, rc;
if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -3405,11 +3570,17 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
return -EINVAL;
}
+ memset(&params, 0, sizeof(params));
+
/* Initialize HW for VF access */
for_each_hwfn(cdev, j) {
struct qed_hwfn *hwfn = &cdev->hwfns[j];
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
- int num_sbs = 0, limit = 16;
+
+ /* Make sure not to use more than 16 queues per VF */
+ params.num_queues = min_t(int,
+ FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
+ 16);
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
@@ -3417,19 +3588,12 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
goto err;
}
- if (IS_MF_DEFAULT(hwfn))
- limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
-
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(hwfn, &sb_cnt_info);
- num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
-
for (i = 0; i < num; i++) {
- if (!qed_iov_is_valid_vfid(hwfn, i, false))
+ if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
continue;
- rc = qed_iov_init_hw_for_vf(hwfn,
- ptt, i, num_sbs / num);
+ qed_sriov_enable_qid_config(hwfn, i, &params);
+ rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
if (rc) {
DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
qed_ptt_release(hwfn, ptt);
@@ -3477,7 +3641,7 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
return -EINVAL;
}
- if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set VF[%d] MAC (VF is not active)\n", vfid);
return -EINVAL;
@@ -3509,7 +3673,7 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
return -EINVAL;
}
- if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set VF[%d] MAC (VF is not active)\n", vfid);
return -EINVAL;
@@ -3543,7 +3707,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
if (IS_VF(cdev))
return -EINVAL;
- if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"VF index [%d] isn't active\n", vf_id);
return -EINVAL;
@@ -3647,7 +3811,7 @@ static int qed_set_vf_link_state(struct qed_dev *cdev,
if (IS_VF(cdev))
return -EINVAL;
- if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV,
"VF index [%d] isn't active\n", vf_id);
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 0dd23e409b3f..509c02b4772e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -58,6 +58,23 @@ struct qed_public_vf_info {
int tx_rate;
};
+struct qed_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
+};
+
/* This struct is part of qed_dev and contains data relevant to all hwfns;
* Initialized only if SR-IOV cpabability is exposed in PCIe config space.
*/
@@ -99,10 +116,10 @@ struct qed_iov_vf_mbx {
struct qed_vf_q_info {
u16 fw_rx_qid;
+ struct qed_queue_cid *p_rx_cid;
u16 fw_tx_qid;
+ struct qed_queue_cid *p_tx_cid;
u8 fw_cid;
- u8 rxq_active;
- u8 txq_active;
};
enum vf_state {
@@ -132,6 +149,7 @@ struct qed_vf_info {
struct qed_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
+ bool b_malicious;
u8 to_disable;
struct qed_bulletin bulletin;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index abf5bf11f865..60b31a8ede73 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -388,18 +388,18 @@ free_p_iov:
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
- u8 rx_qid,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, void __iomem **pp_prod)
+int
+qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req;
+ u8 rx_qid = p_cid->rel.queue_id;
int rc;
/* clear mailbox and prep first tlv */
@@ -409,21 +409,22 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
/* If PF is legacy, we'll need to calculate producers ourselves
* as well as clean them.
*/
- if (pp_prod && p_iov->b_pre_fp_hsi) {
+ if (p_iov->b_pre_fp_hsi) {
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0;
- *pp_prod = (u8 __iomem *)p_hwfn->regview +
- MSTORM_QZONE_START(p_hwfn->cdev) +
- hw_qid * MSTORM_QZONE_SIZE;
+ *pp_prod = (u8 __iomem *)
+ p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE;
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
@@ -444,7 +445,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
}
/* Learn the address of the producer from the response */
- if (pp_prod && !p_iov->b_pre_fp_hsi) {
+ if (!p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
@@ -462,7 +463,8 @@ exit:
return rc;
}
-int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid, bool cqe_completion)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req;
@@ -472,7 +474,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
- req->rx_qid = rx_qid;
+ req->rx_qid = p_cid->rel.queue_id;
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
@@ -496,28 +498,28 @@ exit:
return rc;
}
-int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size, void __iomem **pp_doorbell)
+int
+qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
+ u16 qid = p_cid->rel.queue_id;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
- req->tx_qid = tx_queue_id;
+ req->tx_qid = qid;
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
@@ -533,33 +535,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
goto exit;
}
- if (pp_doorbell) {
- /* Modern PFs provide the actual offsets, while legacy
- * provided only the queue id.
- */
- if (!p_iov->b_pre_fp_hsi) {
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
- resp->offset;
- } else {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
- u32 db_addr;
-
- db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
- db_addr;
- }
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
- tx_queue_id, *pp_doorbell, resp->offset);
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr_vf(cid,
+ DQ_DEMS_LEGACY);
}
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, *pp_doorbell, resp->offset);
exit:
qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
-int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req;
@@ -569,7 +567,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
- req->tx_qid = tx_qid;
+ req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
/* add list termination tlv */
@@ -1171,6 +1169,13 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
}
+void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters)
+{
+ struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info;
+
+ *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
+}
+
bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
{
struct qed_bulletin_content *bulletin;
@@ -1230,8 +1235,8 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced);
- if (is_mac_exist && is_mac_forced && cookie)
- ops->force_mac(cookie, mac);
+ if (is_mac_exist && cookie)
+ ops->force_mac(cookie, mac, !!is_mac_forced);
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 35db7a28aa13..11eb3854e6f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -40,6 +40,7 @@ enum {
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
+ PFVF_STATUS_MALICIOUS,
};
/* vf pf channel tlvs */
@@ -622,6 +623,14 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
+ * @brief Get number of MAC filters allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated MAC filters
+ */
+void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
+
+/**
* @brief Check if VF can set a MAC address
*
* @param p_hwfn
@@ -657,10 +666,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/**
* @brief VF - start the RX Queue by sending a message to the PF
* @param p_hwfn
- * @param cid - zero based within the VF
- * @param rx_queue_id - zero based within the VF
- * @param sb - VF status block for this queue
- * @param sb_index - Index within the status block
+ * @param p_cid - Only relative fields are relevant
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
@@ -671,9 +677,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
* @return int
*/
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
- u8 rx_queue_id,
- u16 sb,
- u8 sb_index,
+ struct qed_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -693,24 +697,23 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
*
* @return int
*/
-int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size, void __iomem **pp_doorbell);
+int
+qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
- * @param rx_qid
+ * @param p_cid
* @param cqe_completion
*
* @return int
*/
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
- u16 rx_qid, bool cqe_completion);
+ struct qed_queue_cid *p_cid, bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
@@ -720,7 +723,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
*
* @return int
*/
-int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
/**
* @brief VF - send a vport update command
@@ -871,6 +874,11 @@ static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
{
}
+static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_mac_filters)
+{
+}
+
static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
{
return false;
@@ -888,9 +896,7 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
}
static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
- u8 rx_queue_id,
- u16 sb,
- u8 sb_index,
+ struct qed_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_adr,
dma_addr_t cqe_pbl_addr,
@@ -900,9 +906,7 @@ static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
}
static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
+ struct qed_queue_cid *p_cid,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
@@ -910,12 +914,14 @@ static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
}
static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
- u16 rx_qid, bool cqe_completion)
+ struct qed_queue_cid *p_cid,
+ bool cqe_completion)
{
return -EINVAL;
}
-static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
{
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 974689a13337..c79dc78746fc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -16,6 +16,7 @@
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/bpf.h>
#include <linux/io.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/eth_common.h>
@@ -127,10 +128,9 @@ struct qede_dev {
const struct qed_eth_ops *ops;
- struct qed_dev_eth_info dev_info;
+ struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
-#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
- (edev)->dev_info.num_tc)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
struct qede_fastpath *fp_array;
u8 req_num_tx;
@@ -139,17 +139,9 @@ struct qede_dev {
u8 fp_num_rx;
u16 req_queues;
u16 num_queues;
- u8 num_tc;
#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
-#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
- (edev)->num_tc)
-#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
- QEDE_TSS_COUNT(edev))
-#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
-#define QEDE_TX_QUEUE(edev, txqidx) \
- (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
- (edev), (txqidx))])
+#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info;
unsigned char primary_mac[ETH_ALEN];
@@ -193,7 +185,11 @@ struct qede_dev {
u16 vxlan_dst_port;
u16 geneve_dst_port;
+ bool wol_enabled;
+
struct qede_rdma_dev rdma_info;
+
+ struct bpf_prog *xdp_prog;
};
enum QEDE_STATE {
@@ -223,39 +219,67 @@ enum qede_agg_state {
};
struct qede_agg_info {
- struct sw_rx_data replace_buf;
- dma_addr_t replace_buf_mapping;
- struct sw_rx_data start_buf;
- dma_addr_t start_buf_mapping;
- struct eth_fast_path_rx_tpa_start_cqe start_cqe;
- enum qede_agg_state agg_state;
+ /* rx_buf is a data buffer that can be placed / consumed from rx bd
+ * chain. It has two purposes: We will preallocate the data buffer
+ * for each aggregation when we open the interface and will place this
+ * buffer on the rx-bd-ring when we receive TPA_START. We don't want
+ * to be in a state where allocation fails, as we can't reuse the
+ * consumer buffer in the rx-chain since FW may still be writing to it
+ * (since header needs to be modified for TPA).
+ * The second purpose is to keep a pointer to the bd buffer during
+ * aggregation.
+ */
+ struct sw_rx_data buffer;
+ dma_addr_t buffer_mapping;
+
struct sk_buff *skb;
- int frag_id;
+
+ /* We need some structs from the start cookie until termination */
u16 vlan_tag;
+ u16 start_cqe_bd_len;
+ u8 start_cqe_placement_offset;
+
+ u8 state;
+ u8 frag_id;
+
+ u8 tunnel_type;
};
struct qede_rx_queue {
- __le16 *hw_cons_ptr;
- struct sw_rx_data *sw_rx_ring;
- u16 sw_rx_cons;
- u16 sw_rx_prod;
- struct qed_chain rx_bd_ring;
- struct qed_chain rx_comp_ring;
- void __iomem *hw_rxq_prod_addr;
+ __le16 *hw_cons_ptr;
+ void __iomem *hw_rxq_prod_addr;
+
+ /* Required for the allocation of replacement buffers */
+ struct device *dev;
+
+ struct bpf_prog *xdp_prog;
+
+ u16 sw_rx_cons;
+ u16 sw_rx_prod;
+
+ u16 num_rx_buffers; /* Slowpath */
+ u8 data_direction;
+ u8 rxq_id;
+
+ u32 rx_buf_size;
+ u32 rx_buf_seg_size;
+
+ u64 rcv_pkts;
+
+ struct sw_rx_data *sw_rx_ring;
+ struct qed_chain rx_bd_ring;
+ struct qed_chain rx_comp_ring ____cacheline_aligned;
/* GRO */
- struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
- int rx_buf_size;
- unsigned int rx_buf_seg_size;
+ u64 rx_hw_errors;
+ u64 rx_alloc_errors;
+ u64 rx_ip_frags;
- u16 num_rx_buffers;
- u16 rxq_id;
+ u64 xdp_no_pass;
- u64 rcv_pkts;
- u64 rx_hw_errors;
- u64 rx_alloc_errors;
- u64 rx_ip_frags;
+ void *handle;
};
union db_prod {
@@ -271,20 +295,39 @@ struct sw_tx_bd {
};
struct qede_tx_queue {
- int index; /* Queue index */
- __le16 *hw_cons_ptr;
- struct sw_tx_bd *sw_tx_ring;
- u16 sw_tx_cons;
- u16 sw_tx_prod;
- struct qed_chain tx_pbl;
- void __iomem *doorbell_addr;
- union db_prod tx_db;
-
- u16 num_tx_buffers;
- u64 xmit_pkts;
- u64 stopped_cnt;
-
- bool is_legacy;
+ u8 is_xdp;
+ bool is_legacy;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ u16 num_tx_buffers; /* Slowpath only */
+
+ u64 xmit_pkts;
+ u64 stopped_cnt;
+
+ __le16 *hw_cons_ptr;
+
+ /* Needed for the mapping of packets */
+ struct device *dev;
+
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+ int index; /* Slowpath only */
+#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
+ QEDE_MAX_TSS_CNT(edev))
+#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+
+ /* Regular Tx requires skb + metadata for release purpose,
+ * while XDP requires only the pages themselves.
+ */
+ union {
+ struct sw_tx_bd *skbs;
+ struct page **pages;
+ } sw_tx_ring;
+
+ struct qed_chain tx_pbl;
+
+ /* Slowpath; Should be kept in end [unless missing padding] */
+ void *handle;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -301,13 +344,16 @@ struct qede_fastpath {
struct qede_dev *edev;
#define QEDE_FASTPATH_TX BIT(0)
#define QEDE_FASTPATH_RX BIT(1)
+#define QEDE_FASTPATH_XDP BIT(2)
#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
u8 type;
u8 id;
+ u8 xdp_xmit;
struct napi_struct napi;
struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq;
- struct qede_tx_queue *txqs;
+ struct qede_tx_queue *txq;
+ struct qede_tx_queue *xdp_tx;
#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[VEC_NAME_SIZE];
@@ -320,6 +366,7 @@ struct qede_fastpath {
#define XMIT_L4_CSUM BIT(0)
#define XMIT_LSO BIT(1)
#define XMIT_ENC BIT(2)
+#define XMIT_ENC_GSO_L4_CSUM BIT(3)
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
@@ -329,8 +376,13 @@ struct qede_fastpath {
#define QEDE_SP_VXLAN_PORT_CONFIG 2
#define QEDE_SP_GENEVE_PORT_CONFIG 3
-union qede_reload_args {
- u16 mtu;
+struct qede_reload_args {
+ void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
+ union {
+ netdev_features_t features;
+ struct bpf_prog *new_prog;
+ u16 mtu;
+ } u;
};
#ifdef CONFIG_DCB
@@ -339,15 +391,14 @@ void qede_set_dcbnl_ops(struct net_device *ndev);
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
- void (*func)(struct qede_dev *edev,
- union qede_reload_args *args),
- union qede_reload_args *args);
+ struct qede_reload_args *args, bool is_locked);
int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev);
+void __qede_lock(struct qede_dev *edev);
+void __qede_unlock(struct qede_dev *edev);
bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
- u8 count);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
@@ -362,8 +413,9 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
-#define QEDE_MIN_PKT_LEN 64
-#define QEDE_RX_HDR_SIZE 256
+#define QEDE_MIN_PKT_LEN 64
+#define QEDE_RX_HDR_SIZE 256
+#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 7567cc464b88..1c48f445c93b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -16,13 +16,6 @@
#include <linux/capability.h>
#include "qede.h"
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
- {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
-
#define QEDE_RQSTAT_OFFSET(stat_name) \
(offsetof(struct qede_rx_queue, stat_name))
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
@@ -39,12 +32,10 @@ static const struct {
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
QEDE_RQSTAT(rx_ip_frags),
+ QEDE_RQSTAT(xdp_no_pass),
};
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
-#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
- (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
- qede_rqstats_arr[(sindex)].offset)))
#define QEDE_TQSTAT_OFFSET(stat_name) \
(offsetof(struct qede_tx_queue, stat_name))
#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
@@ -59,10 +50,12 @@ static const struct {
QEDE_TQSTAT(stopped_cnt),
};
-#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \
- (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\
- qede_tqstats_arr[(sindex)].offset)))
-
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+ {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -136,10 +129,6 @@ static const struct {
QEDE_STAT(coalesced_bytes),
};
-#define QEDE_STATS_DATA(dev, index) \
- (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
- + qede_stats_arr[(index)].offset)))
-
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
enum {
@@ -157,6 +146,7 @@ enum qede_ethtool_tests {
QEDE_ETHTOOL_MEMORY_TEST,
QEDE_ETHTOOL_REGISTER_TEST,
QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_NVRAM_TEST,
QEDE_ETHTOOL_TEST_MAX
};
@@ -166,41 +156,63 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
"Memory (online)\t\t",
"Register (online)\t",
"Clock (online)\t\t",
+ "Nvram (online)\t\t",
};
+static void qede_get_strings_stats_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq, u8 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ if (txq->is_xdp)
+ sprintf(*buf, "%d [XDP]: %s",
+ QEDE_TXQ_XDP_TO_IDX(edev, txq),
+ qede_tqstats_arr[i].string);
+ else
+ sprintf(*buf, "%d: %s", txq->index,
+ qede_tqstats_arr[i].string);
+ *buf += ETH_GSTRING_LEN;
+ }
+}
+
+static void qede_get_strings_stats_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq, u8 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+ sprintf(*buf, "%d: %s", rxq->rxq_id,
+ qede_rqstats_arr[i].string);
+ *buf += ETH_GSTRING_LEN;
+ }
+}
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
- int i, j, k;
+ struct qede_fastpath *fp;
+ int i;
- for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
- int tc;
+ /* Account for queue statistics */
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ fp = &edev->fp_array[i];
- if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
- for (j = 0; j < QEDE_NUM_RQSTATS; j++)
- sprintf(buf + (k + j) * ETH_GSTRING_LEN,
- "%d: %s", i,
- qede_rqstats_arr[j].string);
- k += QEDE_NUM_RQSTATS;
- }
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (j = 0; j < QEDE_NUM_TQSTATS; j++)
- sprintf(buf + (k + j) *
- ETH_GSTRING_LEN,
- "%d.%d: %s", i, tc,
- qede_tqstats_arr[j].string);
- k += QEDE_NUM_TQSTATS;
- }
- }
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
+
+ if (fp->type & QEDE_FASTPATH_TX)
+ qede_get_strings_stats_txq(edev, fp->txq, &buf);
}
- for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ /* Account for non-queue statistics */
+ for (i = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
- strcpy(buf + (k + j) * ETH_GSTRING_LEN,
- qede_stats_arr[i].string);
- j++;
+ strcpy(buf, qede_stats_arr[i].string);
+ buf += ETH_GSTRING_LEN;
}
}
@@ -226,42 +238,61 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
+static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
+ (*buf)++;
+ }
+}
+
+static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+ **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
+ (*buf)++;
+ }
+}
+
static void qede_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
struct qede_dev *edev = netdev_priv(dev);
- int sidx, cnt = 0;
- int qid;
+ struct qede_fastpath *fp;
+ int i;
qede_fill_by_demand_stats(edev);
- mutex_lock(&edev->qede_lock);
+ /* Need to protect the access to the fastpath array */
+ __qede_lock(edev);
- for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
- int tc;
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ fp = &edev->fp_array[i];
- if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
- for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
- buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
- }
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_get_ethtool_stats_rxq(fp->rxq, &buf);
- if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
- buf[cnt++] = QEDE_TQSTATS_DATA(edev,
- sidx,
- qid, tc);
- }
- }
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
+
+ if (fp->type & QEDE_FASTPATH_TX)
+ qede_get_ethtool_stats_txq(fp->txq, &buf);
}
- for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
- if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+ for (i = 0; i < QEDE_NUM_STATS; i++) {
+ if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
- buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+ *buf = *((u64 *)(((void *)&edev->stats) +
+ qede_stats_arr[i].offset));
+
+ buf++;
}
- mutex_unlock(&edev->qede_lock);
+ __qede_unlock(edev);
}
static int qede_get_sset_count(struct net_device *dev, int stringset)
@@ -278,8 +309,18 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
- return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
- QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
+
+ /* Account for the Regular Tx statistics */
+ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+
+ /* Account for the Regular Rx statistics */
+ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
+
+ /* Account for XDP statistics [if needed] */
+ if (edev->xdp_prog)
+ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+ return num_stats;
+
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
@@ -325,7 +366,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
- for (i = 0; i < QED_LM_COUNT; i++) { \
+ for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if ((caps) & (qed_lm_map[i].qed_link_mode)) \
__set_bit(qed_lm_map[i].ethtool_link_mode,\
lk_ksettings->link_modes.name); \
@@ -336,7 +377,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
- for (i = 0; i < QED_LM_COUNT; i++) { \
+ for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if (test_bit(qed_lm_map[i].ethtool_link_mode, \
lk_ksettings->link_modes.name)) \
caps |= qed_lm_map[i].qed_link_mode; \
@@ -350,6 +391,8 @@ static int qede_get_link_ksettings(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
+ __qede_lock(edev);
+
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
@@ -369,6 +412,9 @@ static int qede_get_link_ksettings(struct net_device *dev,
base->speed = SPEED_UNKNOWN;
base->duplex = DUPLEX_UNKNOWN;
}
+
+ __qede_unlock(edev);
+
base->port = current_link.port;
base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
AUTONEG_DISABLE;
@@ -488,6 +534,45 @@ static void qede_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
+static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (edev->dev_info.common.wol_support) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
+ }
+}
+
+static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ bool wol_requested;
+ int rc;
+
+ if (wol->wolopts & ~WAKE_MAGIC) {
+ DP_INFO(edev,
+ "Can't support WoL options other than magic-packet\n");
+ return -EINVAL;
+ }
+
+ wol_requested = !!(wol->wolopts & WAKE_MAGIC);
+ if (wol_requested == edev->wol_enabled)
+ return 0;
+
+ /* Need to actually change configuration */
+ if (!edev->dev_info.common.wol_support) {
+ DP_INFO(edev, "Device doesn't support WoL\n");
+ return -EINVAL;
+ }
+
+ rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
+ if (!rc)
+ edev->wol_enabled = wol_requested;
+
+ return rc;
+}
+
static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -638,8 +723,7 @@ static int qede_set_ringparam(struct net_device *dev,
edev->q_num_rx_buffers = ering->rx_pending;
edev->q_num_tx_buffers = ering->tx_pending;
- if (netif_running(edev->ndev))
- qede_reload(edev, NULL, NULL);
+ qede_reload(edev, NULL, false);
return 0;
}
@@ -724,35 +808,27 @@ static int qede_get_regs_len(struct net_device *ndev)
return -EINVAL;
}
-static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+static void qede_update_mtu(struct qede_dev *edev,
+ struct qede_reload_args *args)
{
- edev->ndev->mtu = args->mtu;
+ edev->ndev->mtu = args->u.mtu;
}
/* Netdevice NDOs */
-#define ETH_MAX_JUMBO_PACKET_SIZE 9600
-#define ETH_MIN_PACKET_SIZE 60
int qede_change_mtu(struct net_device *ndev, int new_mtu)
{
struct qede_dev *edev = netdev_priv(ndev);
- union qede_reload_args args;
-
- if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
- DP_ERR(edev, "Can't support requested MTU size\n");
- return -EINVAL;
- }
+ struct qede_reload_args args;
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Configuring MTU size of %d\n", new_mtu);
- /* Set the mtu field and re-start the interface if needed*/
- args.mtu = new_mtu;
-
- if (netif_running(edev->ndev))
- qede_reload(edev, &qede_update_mtu, &args);
+ /* Set the mtu field and re-start the interface if needed */
+ args.u.mtu = new_mtu;
+ args.func = &qede_update_mtu;
+ qede_reload(edev, &args, false);
- qede_update_mtu(edev, &args);
+ edev->ops->common->update_mtu(edev->cdev, new_mtu);
return 0;
}
@@ -836,8 +912,7 @@ static int qede_set_channels(struct net_device *dev,
sizeof(edev->rss_params.rss_ind_table));
}
- if (netif_running(dev))
- qede_reload(edev, NULL, NULL);
+ qede_reload(edev, NULL, false);
return 0;
}
@@ -1143,7 +1218,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- txq = edev->fp_array[i].txqs;
+ txq = edev->fp_array[i].txq;
break;
}
}
@@ -1155,7 +1230,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- txq->sw_tx_ring[idx].skb = skb;
+ txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
@@ -1209,7 +1284,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
- txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0;
}
@@ -1277,13 +1352,13 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
break;
}
- qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qede_recycle_rx_bd_ring(rxq, 1);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
}
DP_INFO(edev, "Not the transmitted packet\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qede_recycle_rx_bd_ring(rxq, 1);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
@@ -1405,6 +1480,11 @@ static void qede_self_test(struct net_device *dev,
buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
+
+ if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
+ buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
}
static int qede_set_tunable(struct net_device *dev,
@@ -1455,6 +1535,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_drvinfo = qede_get_drvinfo,
.get_regs_len = qede_get_regs_len,
.get_regs = qede_get_regs,
+ .get_wol = qede_get_wol,
+ .set_wol = qede_set_wol,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 85f46dbecd5b..aecdd1c5c0ea 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -94,11 +94,26 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
#define TX_TIMEOUT (5 * HZ)
+/* Utilize last protocol index for XDP */
+#define XDP_PI 11
+
static void qede_remove(struct pci_dev *pdev);
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
- struct qede_rx_queue *rxq);
+static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+ mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+ mutex_unlock(&edev->qede_lock);
+}
+
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -166,15 +181,20 @@ static struct pci_driver qede_pci_driver = {
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
+ .shutdown = qede_shutdown,
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
};
-static void qede_force_mac(void *dev, u8 *mac)
+static void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
+ /* MAC hints take effect only if we haven't set one already */
+ if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+ return;
+
ether_addr_copy(edev->ndev->dev_addr, mac);
ether_addr_copy(edev->primary_mac, mac);
}
@@ -284,12 +304,12 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len)
{
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
- struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_1st_bd *first_bd;
struct eth_tx_bd *tx_data_bd;
int bds_consumed = 0;
int nbds;
- bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+ bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
int i, split_bd_len = 0;
if (unlikely(!skb)) {
@@ -329,20 +349,19 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
/* Free skb */
dev_kfree_skb_any(skb);
- txq->sw_tx_ring[idx].skb = NULL;
- txq->sw_tx_ring[idx].flags = 0;
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
return 0;
}
/* Unmap the data and free skb when mapping failed during start_xmit */
-static void qede_free_failed_tx_pkt(struct qede_dev *edev,
- struct qede_tx_queue *txq,
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd,
int nbd, bool data_split)
{
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_bd *tx_data_bd;
int i, split_bd_len = 0;
@@ -359,7 +378,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
- dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
@@ -367,7 +386,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
tx_data_bd = (struct eth_tx_bd *)
qed_chain_produce(&txq->tx_pbl);
if (tx_data_bd->nbytes)
- dma_unmap_page(&edev->pdev->dev,
+ dma_unmap_page(txq->dev,
BD_UNMAP_ADDR(tx_data_bd),
BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
}
@@ -378,12 +397,11 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Free skb */
dev_kfree_skb_any(skb);
- txq->sw_tx_ring[idx].skb = NULL;
- txq->sw_tx_ring[idx].flags = 0;
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
}
-static u32 qede_xmit_type(struct qede_dev *edev,
- struct sk_buff *skb, int *ipv6_ext)
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
{
u32 rc = XMIT_L4_CSUM;
__be16 l3_proto;
@@ -396,8 +414,19 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1;
- if (skb->encapsulation)
+ if (skb->encapsulation) {
rc |= XMIT_ENC;
+ if (skb_is_gso(skb)) {
+ unsigned short gso_type = skb_shinfo(skb)->gso_type;
+
+ if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
+ (gso_type & SKB_GSO_GRE_CSUM))
+ rc |= XMIT_ENC_GSO_L4_CSUM;
+
+ rc |= XMIT_LSO;
+ return rc;
+ }
+ }
if (skb_is_gso(skb))
rc |= XMIT_LSO;
@@ -439,18 +468,16 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
}
-static int map_frag_to_bd(struct qede_dev *edev,
+static int map_frag_to_bd(struct qede_tx_queue *txq,
skb_frag_t *frag, struct eth_tx_bd *bd)
{
dma_addr_t mapping;
/* Map skb non-linear frag data for DMA */
- mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+ mapping = skb_frag_dma_map(txq->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+ if (unlikely(dma_mapping_error(txq->dev, mapping)))
return -ENOMEM;
- }
/* Setup the data pointer of the frag data */
BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
@@ -470,8 +497,7 @@ static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
- u8 xmit_type)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
{
int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
@@ -507,6 +533,47 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
mmiowb();
}
+static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
+ struct sw_rx_data *metadata, u16 padding, u16 length)
+{
+ struct qede_tx_queue *txq = fp->xdp_tx;
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct eth_tx_1st_bd *first_bd;
+
+ if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+ txq->stopped_cnt++;
+ return -ENOMEM;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+ first_bd->data.bitfields |=
+ (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ first_bd->data.nbds = 1;
+
+ /* We can safely ignore the offset, as it's 0 for XDP */
+ BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+
+ /* Synchronize the buffer back to device, as program [probably]
+ * has changed it.
+ */
+ dma_sync_single_for_device(&edev->pdev->dev,
+ metadata->mapping + padding,
+ length, PCI_DMA_TODEVICE);
+
+ txq->sw_tx_ring.pages[idx] = metadata->data;
+ txq->sw_tx_prod++;
+
+ /* Mark the fastpath for future XDP doorbell */
+ fp->xdp_xmit = 1;
+
+ return 0;
+}
+
/* Main transmit function */
static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
@@ -530,15 +597,15 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = QEDE_TX_QUEUE(edev, txq_index);
+ txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
- xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+ xmit_type = qede_xmit_type(skb, &ipv6_ext);
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
- if (qede_pkt_req_lin(edev, skb, xmit_type)) {
+ if (qede_pkt_req_lin(skb, xmit_type)) {
if (skb_linearize(skb)) {
DP_NOTICE(edev,
"SKB linearization failed - silently dropping this SKB\n");
@@ -550,7 +617,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- txq->sw_tx_ring[idx].skb = skb;
+ txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = (struct eth_tx_1st_bd *)
qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
@@ -558,11 +625,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
/* Map skb linear data for DMA and set in the first BD */
- mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ mapping = dma_map_single(txq->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ if (unlikely(dma_mapping_error(txq->dev, mapping))) {
DP_NOTICE(edev, "SKB mapping failed\n");
- qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -633,6 +700,12 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(xmit_type & XMIT_ENC)) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+
+ if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
+ u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+
+ first_bd->data.bd_flags.bitfields |= 1 << tmp;
+ }
hlen = qede_get_skb_hlen(skb, true);
} else {
first_bd->data.bd_flags.bitfields |=
@@ -664,7 +737,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* this marks the BD as one that has no
* individual mapping
*/
- txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+ txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
first_bd->nbytes = cpu_to_le16(hlen);
@@ -680,12 +753,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Handle fragmented skb */
/* special handle for frags inside 2nd and 3rd bds.. */
while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
- rc = map_frag_to_bd(edev,
+ rc = map_frag_to_bd(txq,
&skb_shinfo(skb)->frags[frag_idx],
tx_data_bd);
if (rc) {
- qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
- data_split);
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -705,12 +777,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
memset(tx_data_bd, 0, sizeof(*tx_data_bd));
- rc = map_frag_to_bd(edev,
+ rc = map_frag_to_bd(txq,
&skb_shinfo(skb)->frags[frag_idx],
tx_data_bd);
if (rc) {
- qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
- data_split);
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -775,6 +846,27 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
}
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct eth_tx_1st_bd *bd;
+ u16 hw_bd_cons;
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
+ NUM_TX_BDS_MAX]);
+
+ txq->sw_tx_cons++;
+ txq->xmit_pkts++;
+ }
+}
+
static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
@@ -858,16 +950,6 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq)
return hw_comp_cons != sw_comp_cons;
}
-static bool qede_has_tx_work(struct qede_fastpath *fp)
-{
- u8 tc;
-
- for (tc = 0; tc < fp->edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
- return true;
- return false;
-}
-
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
{
qed_chain_consume(&rxq->rx_bd_ring);
@@ -877,8 +959,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
/* This function reuses the buffer(from an offset) from
* consumer index to producer index in the bd ring
*/
-static inline void qede_reuse_page(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
struct sw_rx_data *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
@@ -900,27 +981,62 @@ static inline void qede_reuse_page(struct qede_dev *edev,
/* In case of allocation failures reuse buffers
* from consumer index to produce buffers for firmware
*/
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
- struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
{
struct sw_rx_data *curr_cons;
for (; count > 0; count--) {
curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- qede_reuse_page(edev, rxq, curr_cons);
+ qede_reuse_page(rxq, curr_cons);
qede_rx_bd_ring_consume(rxq);
}
}
-static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
+static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ struct page *data;
+
+ data = alloc_pages(GFP_ATOMIC, 0);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ /* Map the entire page as it would be used
+ * for multiple RX buffer segment size mapping.
+ */
+ mapping = dma_map_page(rxq->dev, data, 0,
+ PAGE_SIZE, rxq->data_direction);
+ if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+ __free_page(data);
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->page_offset = 0;
+ sw_rx_data->data = data;
+ sw_rx_data->mapping = mapping;
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+ rxq->sw_rx_prod++;
+
+ return 0;
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
struct sw_rx_data *curr_cons)
{
/* Move to the next segment in the page */
curr_cons->page_offset += rxq->rx_buf_seg_size;
if (curr_cons->page_offset == PAGE_SIZE) {
- if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ if (unlikely(qede_alloc_rx_buffer(rxq))) {
/* Since we failed to allocate new buffer
* current buffer can be used again.
*/
@@ -929,15 +1045,15 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return -ENOMEM;
}
- dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rxq->dev, curr_cons->mapping,
+ PAGE_SIZE, rxq->data_direction);
} else {
/* Increment refcount of the page as we don't want
* network stack to take the ownership of the page
* which can be recycled multiple times by the driver.
*/
page_ref_inc(curr_cons->data);
- qede_reuse_page(edev, rxq, curr_cons);
+ qede_reuse_page(rxq, curr_cons);
}
return 0;
@@ -971,22 +1087,20 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
mmiowb();
}
-static u32 qede_get_rxhash(struct qede_dev *edev,
- u8 bitfields,
- __le32 rss_hash, enum pkt_hash_types *rxhash_type)
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
enum rss_hash_type htype;
+ u32 hash = 0;
htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-
- if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
- *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
- (htype == RSS_HASH_TYPE_IPV6)) ?
- PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
- return le32_to_cpu(rss_hash);
+ if (htype) {
+ hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ hash = le32_to_cpu(rss_hash);
}
- *rxhash_type = PKT_HASH_TYPE_NONE;
- return 0;
+ skb_set_hash(skb, hash, hash_type);
}
static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
@@ -1002,12 +1116,14 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
static inline void qede_skb_receive(struct qede_dev *edev,
struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
struct sk_buff *skb, u16 vlan_tag)
{
if (vlan_tag)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
+ fp->rxq->rcv_pkts++;
}
static void qede_set_gro_params(struct qede_dev *edev,
@@ -1035,7 +1151,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
struct sk_buff *skb = tpa_info->skb;
- if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
goto out;
/* Add one frag and update the appropriate fields in the skb */
@@ -1043,7 +1159,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
current_bd->data, current_bd->page_offset,
len_on_bd);
- if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+ if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
/* Incr page ref count to reuse on allocation failure
* so that it doesn't get freed while freeing SKB.
*/
@@ -1061,8 +1177,9 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
return 0;
out:
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
- qede_recycle_rx_bd_ring(rxq, edev, 1);
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, 1);
+
return -ENOMEM;
}
@@ -1073,12 +1190,10 @@ static void qede_tpa_start(struct qede_dev *edev,
struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
- dma_addr_t mapping = tpa_info->replace_buf_mapping;
+ struct sw_rx_data *replace_buf = &tpa_info->buffer;
+ dma_addr_t mapping = tpa_info->buffer_mapping;
struct sw_rx_data *sw_rx_data_cons;
struct sw_rx_data *sw_rx_data_prod;
- enum pkt_hash_types rxhash_type;
- u32 rxhash;
sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
@@ -1099,11 +1214,11 @@ static void qede_tpa_start(struct qede_dev *edev,
/* move partial skb from cons to pool (don't unmap yet)
* save mapping, incase we drop the packet later on.
*/
- tpa_info->start_buf = *sw_rx_data_cons;
+ tpa_info->buffer = *sw_rx_data_cons;
mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
le32_to_cpu(rx_bd_cons->addr.lo));
- tpa_info->start_buf_mapping = mapping;
+ tpa_info->buffer_mapping = mapping;
rxq->sw_rx_cons++;
/* set tpa state to start only if we are able to allocate skb
@@ -1114,27 +1229,27 @@ static void qede_tpa_start(struct qede_dev *edev,
le16_to_cpu(cqe->len_on_first_bd));
if (unlikely(!tpa_info->skb)) {
DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
goto cons_buf;
}
- skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
- memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
-
/* Start filling in the aggregation info */
+ skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
tpa_info->frag_id = 0;
- tpa_info->agg_state = QEDE_AGG_STATE_START;
+ tpa_info->state = QEDE_AGG_STATE_START;
- rxhash = qede_get_rxhash(edev, cqe->bitfields,
- cqe->rss_hash, &rxhash_type);
- skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
+ /* Store some information from first CQE */
+ tpa_info->start_cqe_placement_offset = cqe->placement_offset;
+ tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
if ((le16_to_cpu(cqe->pars_flags.flags) >>
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
- PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
else
tpa_info->vlan_tag = 0;
+ qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
/* This is needed in order to enable forwarding support */
qede_set_gro_params(edev, tpa_info->skb, cqe);
@@ -1146,7 +1261,7 @@ cons_buf: /* We still need to handle bd_len_list to consume buffers */
if (unlikely(cqe->ext_bd_len_list[1])) {
DP_ERR(edev,
"Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
}
}
@@ -1197,7 +1312,7 @@ static void qede_gro_receive(struct qede_dev *edev,
#ifdef CONFIG_INET
if (skb_shinfo(skb)->gso_size) {
- skb_set_network_header(skb, 0);
+ skb_reset_network_header(skb);
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -1216,7 +1331,7 @@ static void qede_gro_receive(struct qede_dev *edev,
send_skb:
skb_record_rx_queue(skb, fp->rxq->rxq_id);
- qede_skb_receive(edev, fp, skb, vlan_tag);
+ qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
}
static inline void qede_tpa_cont(struct qede_dev *edev,
@@ -1253,7 +1368,7 @@ static void qede_tpa_end(struct qede_dev *edev,
DP_ERR(edev,
"Strange - TPA emd with more than a single len_list entry\n");
- if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
goto err;
/* Sanity */
@@ -1267,14 +1382,9 @@ static void qede_tpa_end(struct qede_dev *edev,
le16_to_cpu(cqe->total_packet_len), skb->len);
memcpy(skb->data,
- page_address(tpa_info->start_buf.data) +
- tpa_info->start_cqe.placement_offset +
- tpa_info->start_buf.page_offset,
- le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
-
- /* Recycle [mapped] start buffer for the next replacement */
- tpa_info->replace_buf = tpa_info->start_buf;
- tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+ page_address(tpa_info->buffer.data) +
+ tpa_info->start_cqe_placement_offset +
+ tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
/* Finalize the SKB */
skb->protocol = eth_type_trans(skb, edev->ndev);
@@ -1287,18 +1397,11 @@ static void qede_tpa_end(struct qede_dev *edev,
qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
- tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ tpa_info->state = QEDE_AGG_STATE_NONE;
return;
err:
- /* The BD starting the aggregation is still mapped; Re-use it for
- * future aggregations [as replacement buffer]
- */
- memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
- sizeof(struct sw_rx_data));
- tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
- tpa_info->start_buf.data = NULL;
- tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ tpa_info->state = QEDE_AGG_STATE_NONE;
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
}
@@ -1380,238 +1483,364 @@ static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
return false;
}
-static int qede_rx_int(struct qede_fastpath *fp, int budget)
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ struct bpf_prog *prog,
+ struct sw_rx_data *bd,
+ struct eth_fast_path_rx_reg_cqe *cqe)
{
- struct qede_dev *edev = fp->edev;
- struct qede_rx_queue *rxq = fp->rxq;
-
- u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
- int rx_pkt = 0;
- u8 csum_flag;
+ u16 len = le16_to_cpu(cqe->len_on_first_bd);
+ struct xdp_buff xdp;
+ enum xdp_action act;
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ xdp.data = page_address(bd->data) + cqe->placement_offset;
+ xdp.data_end = xdp.data + len;
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD in the while-loop before reading hw_comp_cons. If the CQE is
- * read before it is written by FW, then FW writes CQE and SB, and then
- * the CPU reads the hw_comp_cons, it will use an old CQE.
+ /* Queues always have a full reset currently, so for the time
+ * being until there's atomic program replace just mark read
+ * side for map helpers.
*/
- rmb();
+ rcu_read_lock();
+ act = bpf_prog_run_xdp(prog, &xdp);
+ rcu_read_unlock();
- /* Loop to complete all indicated BDs */
- while (sw_comp_cons != hw_comp_cons) {
- struct eth_fast_path_rx_reg_cqe *fp_cqe;
- enum pkt_hash_types rxhash_type;
- enum eth_rx_cqe_type cqe_type;
- struct sw_rx_data *sw_rx_data;
- union eth_rx_cqe *cqe;
- struct sk_buff *skb;
- struct page *data;
- __le16 flags;
- u16 len, pad;
- u32 rx_hash;
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)
- qed_chain_consume(&rxq->rx_comp_ring);
- cqe_type = cqe->fast_path_regular.type;
-
- if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
- edev->ops->eth_cqe_completion(
- edev->cdev, fp->id,
- (struct eth_slow_path_rx_cqe *)cqe);
- goto next_cqe;
+ if (act == XDP_PASS)
+ return true;
+
+ /* Count number of packets not to be passed to stack */
+ rxq->xdp_no_pass++;
+
+ switch (act) {
+ case XDP_TX:
+ /* We need the replacement buffer before transmit. */
+ if (qede_alloc_rx_buffer(rxq)) {
+ qede_recycle_rx_bd_ring(rxq, 1);
+ return false;
}
- if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
- switch (cqe_type) {
- case ETH_RX_CQE_TYPE_TPA_START:
- qede_tpa_start(edev, rxq,
- &cqe->fast_path_tpa_start);
- goto next_cqe;
- case ETH_RX_CQE_TYPE_TPA_CONT:
- qede_tpa_cont(edev, rxq,
- &cqe->fast_path_tpa_cont);
- goto next_cqe;
- case ETH_RX_CQE_TYPE_TPA_END:
- qede_tpa_end(edev, fp,
- &cqe->fast_path_tpa_end);
- goto next_rx_only;
- default:
- break;
- }
+ /* Now if there's a transmission problem, we'd still have to
+ * throw current buffer, as replacement was already allocated.
+ */
+ if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+ dma_unmap_page(rxq->dev, bd->mapping,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(bd->data);
}
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- data = sw_rx_data->data;
-
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
- flags = cqe->fast_path_regular.pars_flags.flags;
-
- /* If this is an error packet then drop it */
- parse_flag = le16_to_cpu(flags);
-
- csum_flag = qede_check_csum(parse_flag);
- if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
- if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
- parse_flag)) {
- rxq->rx_ip_frags++;
- goto alloc_skb;
- }
+ /* Regardless, we've consumed an Rx BD */
+ qede_rx_bd_ring_consume(rxq);
+ return false;
- DP_NOTICE(edev,
- "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
- sw_comp_cons, parse_flag);
- rxq->rx_hw_errors++;
- qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
- goto next_cqe;
- }
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+ }
-alloc_skb:
- skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- DP_NOTICE(edev,
- "skb allocation failed, dropping incoming packet\n");
- qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
- rxq->rx_alloc_errors++;
- goto next_cqe;
+ return false;
+}
+
+static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len,
+ u16 pad)
+{
+ unsigned int offset = bd->page_offset;
+ struct skb_frag_struct *frag;
+ struct page *page = bd->data;
+ unsigned int pull_len;
+ struct sk_buff *skb;
+ unsigned char *va;
+
+ /* Allocate a new SKB with a sufficient large header len */
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Copy data into SKB - if it's small, we can simply copy it and
+ * re-use the already allcoated & mapped memory.
+ */
+ if (len + pad <= edev->rx_copybreak) {
+ memcpy(skb_put(skb, len),
+ page_address(page) + pad + offset, len);
+ qede_reuse_page(rxq, bd);
+ goto out;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, pad + offset, len, rxq->rx_buf_seg_size);
+
+ va = skb_frag_address(frag);
+ pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+ /* Align the pull_len to optimize memcpy */
+ memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+ /* Correct the skb & frag sizes offset after the pull */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+ /* Incr page ref count to reuse on allocation failure so
+ * that it doesn't get freed while freeing SKB [as its
+ * already mapped there].
+ */
+ page_ref_inc(page);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+out:
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+ return skb;
+}
+
+static int qede_rx_build_jumbo(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 first_bd_len)
+{
+ u16 pkt_len = le16_to_cpu(cqe->pkt_len);
+ struct sw_rx_data *bd;
+ u16 bd_cons_idx;
+ u8 num_frags;
+
+ pkt_len -= first_bd_len;
+
+ /* We've already used one BD for the SKB. Now take care of the rest */
+ for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
+ u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ goto out;
}
- /* Copy data into SKB */
- if (len + pad <= edev->rx_copybreak) {
- memcpy(skb_put(skb, len),
- page_address(data) + pad +
- sw_rx_data->page_offset, len);
- qede_reuse_page(edev, rxq, sw_rx_data);
+ /* We need a replacement buffer for each BD */
+ if (unlikely(qede_alloc_rx_buffer(rxq)))
+ goto out;
+
+ /* Now that we've allocated the replacement buffer,
+ * we can safely consume the next BD and map it to the SKB.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+ qede_rx_bd_ring_consume(rxq);
+
+ dma_unmap_page(rxq->dev, bd->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ bd->data, 0, cur_size);
+
+ skb->truesize += PAGE_SIZE;
+ skb->data_len += cur_size;
+ skb->len += cur_size;
+ pkt_len -= cur_size;
+ }
+
+ if (unlikely(pkt_len))
+ DP_ERR(edev,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+
+out:
+ return num_frags;
+}
+
+static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ union eth_rx_cqe *cqe,
+ enum eth_rx_cqe_type type)
+{
+ switch (type) {
+ case ETH_RX_CQE_TYPE_TPA_START:
+ qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int qede_rx_process_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ u16 len, pad, bd_cons_idx, parse_flag;
+ enum eth_rx_cqe_type cqe_type;
+ union eth_rx_cqe *cqe;
+ struct sw_rx_data *bd;
+ struct sk_buff *skb;
+ __le16 flags;
+ u8 csum_flag;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ /* Process an unlikely slowpath event */
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ struct eth_slow_path_rx_cqe *sp_cqe;
+
+ sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
+ edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
+ return 0;
+ }
+
+ /* Handle TPA cqes */
+ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
+ return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
+
+ /* Get the data from the SW ring; Consume it only after it's evident
+ * we wouldn't recycle it.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ pad = fp_cqe->placement_offset;
+
+ /* Run eBPF program if one is attached */
+ if (xdp_prog)
+ if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
+ return 1;
+
+ /* If this is an error packet then drop it */
+ flags = cqe->fast_path_regular.pars_flags.flags;
+ parse_flag = le16_to_cpu(flags);
+
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
+ rxq->rx_ip_frags++;
} else {
- struct skb_frag_struct *frag;
- unsigned int pull_len;
- unsigned char *va;
-
- frag = &skb_shinfo(skb)->frags[0];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
- pad + sw_rx_data->page_offset,
- len, rxq->rx_buf_seg_size);
-
- va = skb_frag_address(frag);
- pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
- /* Align the pull_len to optimize memcpy */
- memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-
- if (unlikely(qede_realloc_rx_buffer(edev, rxq,
- sw_rx_data))) {
- DP_ERR(edev, "Failed to allocate rx buffer\n");
- /* Incr page ref count to reuse on allocation
- * failure so that it doesn't get freed while
- * freeing SKB.
- */
-
- page_ref_inc(sw_rx_data->data);
- rxq->rx_alloc_errors++;
- qede_recycle_rx_bd_ring(rxq, edev,
- fp_cqe->bd_num);
- dev_kfree_skb_any(skb);
- goto next_cqe;
- }
+ DP_NOTICE(edev,
+ "CQE has error, flags = %x, dropping incoming packet\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+ return 0;
}
+ }
- qede_rx_bd_ring_consume(rxq);
+ /* Basic validation passed; Need to prepare an SKB. This would also
+ * guarantee to finally consume the first BD upon success.
+ */
+ skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+ if (!skb) {
+ rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+ return 0;
+ }
- if (fp_cqe->bd_num != 1) {
- u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
- u8 num_frags;
-
- pkt_len -= len;
-
- for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
- num_frags--) {
- u16 cur_size = pkt_len > rxq->rx_buf_size ?
- rxq->rx_buf_size : pkt_len;
- if (unlikely(!cur_size)) {
- DP_ERR(edev,
- "Still got %d BDs for mapping jumbo, but length became 0\n",
- num_frags);
- qede_recycle_rx_bd_ring(rxq, edev,
- num_frags);
- dev_kfree_skb_any(skb);
- goto next_cqe;
- }
-
- if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
- qede_recycle_rx_bd_ring(rxq, edev,
- num_frags);
- dev_kfree_skb_any(skb);
- goto next_cqe;
- }
-
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- qede_rx_bd_ring_consume(rxq);
-
- dma_unmap_page(&edev->pdev->dev,
- sw_rx_data->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- skb_fill_page_desc(skb,
- skb_shinfo(skb)->nr_frags++,
- sw_rx_data->data, 0,
- cur_size);
-
- skb->truesize += PAGE_SIZE;
- skb->data_len += cur_size;
- skb->len += cur_size;
- pkt_len -= cur_size;
- }
+ /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
+ * by a single cqe.
+ */
+ if (fp_cqe->bd_num > 1) {
+ u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
+ fp_cqe, len);
- if (unlikely(pkt_len))
- DP_ERR(edev,
- "Mapped all BDs of jumbo, but still have %d bytes\n",
- pkt_len);
+ if (unlikely(unmapped_frags > 0)) {
+ qede_recycle_rx_bd_ring(rxq, unmapped_frags);
+ dev_kfree_skb_any(skb);
+ return 0;
}
+ }
- skb->protocol = eth_type_trans(skb, edev->ndev);
+ /* The SKB contains all the data. Now prepare meta-magic */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
+ qede_set_skb_csum(skb, csum_flag);
+ skb_record_rx_queue(skb, rxq->rxq_id);
- rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
- fp_cqe->rss_hash, &rxhash_type);
+ /* SKB is prepared - pass it to stack */
+ qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
- skb_set_hash(skb, rx_hash, rxhash_type);
+ return 1;
+}
- qede_set_skb_csum(skb, csum_flag);
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_dev *edev = fp->edev;
+ u16 hw_comp_cons, sw_comp_cons;
+ int work_done = 0;
- skb_record_rx_queue(skb, fp->rxq->rxq_id);
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
- qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-next_rx_only:
- rx_pkt++;
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
-next_cqe: /* don't consume bd rx buffer */
+ /* Loop to complete all indicated BDs */
+ while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
+ qede_rx_process_cqe(edev, fp, rxq);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
- /* CR TPA - revisit how to handle budget in TPA perhaps
- * increase on "end"
- */
- if (rx_pkt == budget)
- break;
- } /* repeat while sw_comp_cons != hw_comp_cons... */
+ work_done++;
+ }
/* Update producers */
qede_update_rx_prod(edev, rxq);
- rxq->rcv_pkts += rx_pkt;
+ return work_done;
+}
+
+static bool qede_poll_is_more_work(struct qede_fastpath *fp)
+{
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* *_has_*_work() reads the status block, thus we need to ensure that
+ * status block indices have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that we won't write the
+ * "newer" value of the status block to HW (if there was a DMA right
+ * after qede_has_rx_work and if there is no rmb, the memory reading
+ * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
+ * In this case there will never be another interrupt until there is
+ * another update of the status block, while there is still unhandled
+ * work.
+ */
+ rmb();
+
+ if (likely(fp->type & QEDE_FASTPATH_RX))
+ if (qede_has_rx_work(fp->rxq))
+ return true;
- return rx_pkt;
+ if (fp->type & QEDE_FASTPATH_XDP)
+ if (qede_txq_has_work(fp->xdp_tx))
+ return true;
+
+ if (likely(fp->type & QEDE_FASTPATH_TX))
+ if (qede_txq_has_work(fp->txq))
+ return true;
+
+ return false;
}
static int qede_poll(struct napi_struct *napi, int budget)
@@ -1620,48 +1849,35 @@ static int qede_poll(struct napi_struct *napi, int budget)
napi);
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
- u8 tc;
- for (tc = 0; tc < edev->num_tc; tc++)
- if (likely(fp->type & QEDE_FASTPATH_TX) &&
- qede_txq_has_work(&fp->txqs[tc]))
- qede_tx_int(edev, &fp->txqs[tc]);
+ if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
+ qede_tx_int(edev, fp->txq);
+
+ if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+ qede_xdp_tx_int(edev, fp->xdp_tx);
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
- qed_sb_update_sb_idx(fp->sb_info);
- /* *_has_*_work() reads the status block,
- * thus we need to ensure that status block indices
- * have been actually read (qed_sb_update_sb_idx)
- * prior to this check (*_has_*_work) so that
- * we won't write the "newer" value of the status block
- * to HW (if there was a DMA right after
- * qede_has_rx_work and if there is no rmb, the memory
- * reading (qed_sb_update_sb_idx) may be postponed
- * to right before *_ack_sb). In this case there
- * will never be another interrupt until there is
- * another update of the status block, while there
- * is still unhandled work.
- */
- rmb();
-
- /* Fall out from the NAPI loop if needed */
- if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
- qede_has_rx_work(fp->rxq)) ||
- (likely(fp->type & QEDE_FASTPATH_TX) &&
- qede_has_tx_work(fp)))) {
+ if (!qede_poll_is_more_work(fp)) {
napi_complete(napi);
/* Update and reenable interrupts */
- qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
- 1 /*update*/);
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
} else {
rx_work_done = budget;
}
}
+ if (fp->xdp_xmit) {
+ u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+ fp->xdp_xmit = 0;
+ fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+ qede_update_tx_producer(fp->xdp_tx);
+ }
+
return rx_work_done;
}
@@ -1912,7 +2128,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_vlan *vlan, *tmp;
- int rc;
+ int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
@@ -1936,6 +2152,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
}
/* If interface is down, cache this VLAN ID and return */
+ __qede_lock(edev);
if (edev->state != QEDE_STATE_OPEN) {
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
"Interface is down, VLAN %d will be configured when interface is up\n",
@@ -1943,8 +2160,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (vid != 0)
edev->non_configured_vlans++;
list_add(&vlan->list, &edev->vlan_list);
-
- return 0;
+ goto out;
}
/* Check for the filter limit.
@@ -1960,7 +2176,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
DP_ERR(edev, "Failed to configure VLAN %d\n",
vlan->vid);
kfree(vlan);
- return -EINVAL;
+ goto out;
}
vlan->configured = true;
@@ -1977,7 +2193,9 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
list_add(&vlan->list, &edev->vlan_list);
- return 0;
+out:
+ __qede_unlock(edev);
+ return rc;
}
static void qede_del_vlan_from_list(struct qede_dev *edev,
@@ -2054,11 +2272,12 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_vlan *vlan = NULL;
- int rc;
+ int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
/* Find whether entry exists */
+ __qede_lock(edev);
list_for_each_entry(vlan, &edev->vlan_list, list)
if (vlan->vid == vid)
break;
@@ -2066,7 +2285,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan || (vlan->vid != vid)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Vlan isn't configured\n");
- return 0;
+ goto out;
}
if (edev->state != QEDE_STATE_OPEN) {
@@ -2076,7 +2295,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
"Interface is down, removing VLAN from list only\n");
qede_del_vlan_from_list(edev, vlan);
- return 0;
+ goto out;
}
/* Remove vlan */
@@ -2085,7 +2304,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
vid);
if (rc) {
DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
- return -EINVAL;
+ goto out;
}
}
@@ -2096,6 +2315,8 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
*/
rc = qede_configure_vlan_filters(edev);
+out:
+ __qede_unlock(edev);
return rc;
}
@@ -2125,7 +2346,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
-static int qede_set_features(struct net_device *dev, netdev_features_t features)
+static void qede_set_features_reload(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ edev->ndev->features = args->u.features;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
@@ -2139,9 +2366,23 @@ static int qede_set_features(struct net_device *dev, netdev_features_t features)
need_reload = edev->gro_disable;
}
- if (need_reload && netif_running(edev->ndev)) {
- dev->features = features;
- qede_reload(edev, NULL, NULL);
+ if (need_reload) {
+ struct qede_reload_args args;
+
+ args.u.features = features;
+ args.func = &qede_set_features_reload;
+
+ /* Make sure that we definitely need to reload.
+ * In case of an eBPF attached program, there will be no FW
+ * aggregations, so no need to actually reload.
+ */
+ __qede_lock(edev);
+ if (edev->xdp_prog)
+ args.func(edev, &args);
+ else
+ qede_reload(edev, &args, true);
+ __qede_unlock(edev);
+
return 1;
}
@@ -2218,6 +2459,82 @@ static void qede_udp_tunnel_del(struct net_device *dev,
schedule_delayed_work(&edev->sp_task, 0);
}
+/* 8B udp header + 8B base tunnel header + 32B option length */
+#define QEDE_MAX_TUN_HDR_LEN 48
+
+static netdev_features_t qede_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb->encapsulation) {
+ u8 l4_proto = 0;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ /* Disable offloads for geneve tunnels, as HW can't parse
+ * the geneve header which has option length greater than 32B.
+ */
+ if ((l4_proto == IPPROTO_UDP) &&
+ ((skb_inner_mac_header(skb) -
+ skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
+ return features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_GSO_MASK);
+ }
+
+ return features;
+}
+
+static void qede_xdp_reload_func(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ struct bpf_prog *old;
+
+ old = xchg(&edev->xdp_prog, args->u.new_prog);
+ if (old)
+ bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+ struct qede_reload_args args;
+
+ if (prog && prog->xdp_adjust_head) {
+ DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we're called, there was already a bpf reference increment */
+ args.func = &qede_xdp_reload_func;
+ args.u.new_prog = prog;
+ qede_reload(edev, &args, false);
+
+ return 0;
+}
+
+static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return qede_xdp_set(edev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!edev->xdp_prog;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -2242,6 +2559,8 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
+ .ndo_features_check = qede_features_check,
+ .ndo_xdp = qede_xdp,
};
/* -------------------------------------------------------------------------
@@ -2282,8 +2601,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, info, sizeof(*info));
- edev->num_tc = edev->dev_info.num_tc;
-
INIT_LIST_HEAD(&edev->vlan_list);
return edev;
@@ -2308,6 +2625,8 @@ static void qede_init_ndev(struct qede_dev *edev)
qede_set_ethtool_ops(ndev);
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
/* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -2315,11 +2634,14 @@ static void qede_init_ndev(struct qede_dev *edev)
/* Encap features*/
hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO_ECN;
+ NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
@@ -2329,8 +2651,14 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->hw_features = hw_features;
+ /* MTU range: 46 - 9600 */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
+
/* Set network device HW mac */
ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+
+ ndev->mtu = edev->dev_info.common.mtu;
}
/* This function converts from 32b param to two params of level and module
@@ -2370,7 +2698,8 @@ static void qede_free_fp_array(struct qede_dev *edev)
kfree(fp->sb_info);
kfree(fp->rxq);
- kfree(fp->txqs);
+ kfree(fp->xdp_tx);
+ kfree(fp->txq);
}
kfree(edev->fp_array);
}
@@ -2403,7 +2732,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
for_each_queue(i) {
fp = &edev->fp_array[i];
- fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
if (!fp->sb_info) {
DP_NOTICE(edev, "sb info struct allocation failed\n");
goto err;
@@ -2420,21 +2749,22 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
- GFP_KERNEL);
- if (!fp->txqs) {
- DP_NOTICE(edev,
- "TXQ array allocation failed\n");
+ fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+ if (!fp->txq)
goto err;
- }
}
if (fp->type & QEDE_FASTPATH_RX) {
- fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
- if (!fp->rxq) {
- DP_NOTICE(edev,
- "RXQ struct allocation failed\n");
+ fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq)
goto err;
+
+ if (edev->xdp_prog) {
+ fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
+ GFP_KERNEL);
+ if (!fp->xdp_tx)
+ goto err;
+ fp->type |= QEDE_FASTPATH_XDP;
}
}
}
@@ -2451,12 +2781,11 @@ static void qede_sp_task(struct work_struct *work)
sp_task.work);
struct qed_dev *cdev = edev->cdev;
- mutex_lock(&edev->qede_lock);
+ __qede_lock(edev);
- if (edev->state == QEDE_STATE_OPEN) {
- if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ if (edev->state == QEDE_STATE_OPEN)
qede_config_rx_mode(edev->ndev);
- }
if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
struct qed_tunn_params tunn_params;
@@ -2476,16 +2805,16 @@ static void qede_sp_task(struct work_struct *work)
qed_ops->tunn_config(cdev, &tunn_params);
}
- mutex_unlock(&edev->qede_lock);
+ __qede_unlock(edev);
}
static void qede_update_pf_params(struct qed_dev *cdev)
{
struct qed_pf_params pf_params;
- /* 64 rx + 64 tx */
+ /* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
- pf_params.eth_pf_params.num_cons = 128;
+ pf_params.eth_pf_params.num_cons = 192;
qed_ops->common->update_pf_params(cdev, &pf_params);
}
@@ -2634,10 +2963,16 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
pci_set_drvdata(pdev, NULL);
+ /* Release edev's reference to XDP's bpf if such exist */
+ if (edev->xdp_prog)
+ bpf_prog_put(edev->xdp_prog);
+
free_netdev(ndev);
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
+ if (system_state == SYSTEM_POWER_OFF)
+ return;
qed_ops->common->remove(cdev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
@@ -2648,6 +2983,11 @@ static void qede_remove(struct pci_dev *pdev)
__qede_remove(pdev, QEDE_REMOVE_NORMAL);
}
+static void qede_shutdown(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
/* -------------------------------------------------------------------------
* START OF LOAD / UNLOAD
* -------------------------------------------------------------------------
@@ -2731,7 +3071,7 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
data = rx_buf->data;
dma_unmap_page(&edev->pdev->dev,
- rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
+ rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
rx_buf->data = NULL;
__free_page(data);
@@ -2747,7 +3087,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+ struct sw_rx_data *replace_buf = &tpa_info->buffer;
if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
@@ -2773,52 +3113,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
}
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
-{
- struct sw_rx_data *sw_rx_data;
- struct eth_rx_bd *rx_bd;
- dma_addr_t mapping;
- struct page *data;
-
- data = alloc_pages(GFP_ATOMIC, 0);
- if (unlikely(!data)) {
- DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
- return -ENOMEM;
- }
-
- /* Map the entire page as it would be used
- * for multiple RX buffer segment size mapping.
- */
- mapping = dma_map_page(&edev->pdev->dev, data, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- __free_page(data);
- DP_NOTICE(edev, "Failed to map Rx buffer\n");
- return -ENOMEM;
- }
-
- sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
- sw_rx_data->page_offset = 0;
- sw_rx_data->data = data;
- sw_rx_data->mapping = mapping;
-
- /* Advance PROD and get BD pointer */
- rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
- WARN_ON(!rx_bd);
- rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-
- rxq->sw_rx_prod++;
-
- return 0;
-}
-
static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
dma_addr_t mapping;
int i;
+ /* Don't perform FW aggregations in case of XDP */
+ if (edev->xdp_prog)
+ edev->gro_disable = 1;
+
if (edev->gro_disable)
return 0;
@@ -2829,7 +3132,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+ struct sw_rx_data *replace_buf = &tpa_info->buffer;
replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!replace_buf->data)) {
@@ -2847,10 +3150,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
replace_buf->mapping = mapping;
- tpa_info->replace_buf.page_offset = 0;
-
- tpa_info->replace_buf_mapping = mapping;
- tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ tpa_info->buffer.page_offset = 0;
+ tpa_info->buffer_mapping = mapping;
+ tpa_info->state = QEDE_AGG_STATE_NONE;
}
return 0;
@@ -2872,8 +3174,13 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
if (rxq->rx_buf_size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE;
- /* Segment size to spilt a page in multiple equal parts */
- rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+ /* Segment size to spilt a page in multiple equal parts,
+ * unless XDP is used in which case we'd use the entire page.
+ */
+ if (!edev->xdp_prog)
+ rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+ else
+ rxq->rx_buf_seg_size = PAGE_SIZE;
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
@@ -2909,7 +3216,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
/* Allocate buffers for the Rx ring */
for (i = 0; i < rxq->num_rx_buffers; i++) {
- rc = qede_alloc_rx_buffer(edev, rxq);
+ rc = qede_alloc_rx_buffer(rxq);
if (rc) {
DP_ERR(edev,
"Rx buffers allocation failed at index %d\n", i);
@@ -2925,7 +3232,10 @@ err:
static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
- kfree(txq->sw_tx_ring);
+ if (txq->is_xdp)
+ kfree(txq->sw_tx_ring.pages);
+ else
+ kfree(txq->sw_tx_ring.skbs);
/* Free the real RQ ring used by FW */
edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
@@ -2934,17 +3244,22 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* This function allocates all memory needed per Tx queue */
static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- int size, rc;
union eth_tx_bd_types *p_virt;
+ int size, rc;
txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */
- size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
- txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
- if (!txq->sw_tx_ring) {
- DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
- goto err;
+ if (txq->is_xdp) {
+ size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
+ txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.pages)
+ goto err;
+ } else {
+ size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+ txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.skbs)
+ goto err;
}
rc = edev->ops->common->chain_alloc(edev->cdev,
@@ -2966,16 +3281,13 @@ err:
/* This function frees all memory of a single fp */
static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- int tc;
-
qede_free_mem_sb(edev, fp->sb_info);
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq);
if (fp->type & QEDE_FASTPATH_TX)
- for (tc = 0; tc < edev->num_tc; tc++)
- qede_free_mem_txq(edev, &fp->txqs[tc]);
+ qede_free_mem_txq(edev, fp->txq);
}
/* This function allocates all memory needed for a single fp (i.e. an entity
@@ -2983,28 +3295,31 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
*/
static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- int rc, tc;
+ int rc = 0;
rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
if (rc)
- goto err;
+ goto out;
if (fp->type & QEDE_FASTPATH_RX) {
rc = qede_alloc_mem_rxq(edev, fp->rxq);
if (rc)
- goto err;
+ goto out;
+ }
+
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
+ if (rc)
+ goto out;
}
if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
- if (rc)
- goto err;
- }
+ rc = qede_alloc_mem_txq(edev, fp->txq);
+ if (rc)
+ goto out;
}
- return 0;
-err:
+out:
return rc;
}
@@ -3043,7 +3358,7 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
- int queue_id, rxq_index = 0, txq_index = 0, tc;
+ int queue_id, rxq_index = 0, txq_index = 0;
struct qede_fastpath *fp;
for_each_queue(queue_id) {
@@ -3052,25 +3367,28 @@ static void qede_init_fp(struct qede_dev *edev)
fp->edev = edev;
fp->id = queue_id;
- memset((void *)&fp->napi, 0, sizeof(fp->napi));
-
- memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
+ rxq_index);
+ fp->xdp_tx->is_xdp = 1;
+ }
if (fp->type & QEDE_FASTPATH_RX) {
- memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
fp->rxq->rxq_id = rxq_index++;
+
+ /* Determine how to map buffers for this queue */
+ if (fp->type & QEDE_FASTPATH_XDP)
+ fp->rxq->data_direction = DMA_BIDIRECTIONAL;
+ else
+ fp->rxq->data_direction = DMA_FROM_DEVICE;
+ fp->rxq->dev = &edev->pdev->dev;
}
if (fp->type & QEDE_FASTPATH_TX) {
- memset((void *)fp->txqs, 0,
- (edev->num_tc * sizeof(*fp->txqs)));
- for (tc = 0; tc < edev->num_tc; tc++) {
- fp->txqs[tc].index = txq_index +
- tc * QEDE_TSS_COUNT(edev);
- if (edev->dev_info.is_legacy)
- fp->txqs[tc].is_legacy = true;
- }
- txq_index++;
+ fp->txq->index = txq_index++;
+ if (edev->dev_info.is_legacy)
+ fp->txq->is_legacy = 1;
+ fp->txq->dev = &edev->pdev->dev;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -3238,11 +3556,18 @@ static int qede_drain_txq(struct qede_dev *edev,
return 0;
}
+static int qede_stop_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq, int rss_id)
+{
+ return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
+}
+
static int qede_stop_queues(struct qede_dev *edev)
{
struct qed_update_vport_params vport_update_params;
struct qed_dev *cdev = edev->cdev;
- int rc, tc, i;
+ struct qede_fastpath *fp;
+ int rc, i;
/* Disable the vport */
memset(&vport_update_params, 0, sizeof(vport_update_params));
@@ -3259,53 +3584,49 @@ static int qede_stop_queues(struct qede_dev *edev)
/* Flush Tx queues. If needed, request drain from MCP */
for_each_queue(i) {
- struct qede_fastpath *fp = &edev->fp_array[i];
+ fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
+ rc = qede_drain_txq(edev, fp->txq, true);
+ if (rc)
+ return rc;
+ }
- rc = qede_drain_txq(edev, txq, true);
- if (rc)
- return rc;
- }
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_drain_txq(edev, fp->xdp_tx, true);
+ if (rc)
+ return rc;
}
}
/* Stop all Queues in reverse order */
for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
- struct qed_stop_rxq_params rx_params;
+ fp = &edev->fp_array[i];
/* Stop the Tx Queue(s) */
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
- u8 val;
-
- tx_params.rss_id = i;
- val = edev->fp_array[i].txqs[tc].index;
- tx_params.tx_queue_id = val;
- rc = edev->ops->q_tx_stop(cdev, &tx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
- return rc;
- }
- }
+ if (fp->type & QEDE_FASTPATH_TX) {
+ rc = qede_stop_txq(edev, fp->txq, i);
+ if (rc)
+ return rc;
}
/* Stop the Rx Queue */
- if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
-
- rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
return rc;
}
}
+
+ /* Stop the XDP forwarding queue */
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_stop_txq(edev, fp->xdp_tx, i);
+ if (rc)
+ return rc;
+
+ bpf_prog_put(fp->rxq->xdp_prog);
+ }
}
/* Stop the vport */
@@ -3316,9 +3637,55 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
+static int qede_start_txq(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
+{
+ dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+ u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
+ struct qed_queue_start_common_params params;
+ struct qed_txq_start_ret_params ret_params;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ memset(&ret_params, 0, sizeof(ret_params));
+
+ /* Let the XDP queue share the queue-zone with one of the regular txq.
+ * We don't really care about its coalescing.
+ */
+ if (txq->is_xdp)
+ params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
+ else
+ params.queue_id = txq->index;
+
+ params.sb = fp->sb_info->igu_sb_id;
+ params.sb_idx = sb_idx;
+
+ rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
+ page_cnt, &ret_params);
+ if (rc) {
+ DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
+ return rc;
+ }
+
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ /* Determine the FW consumer address associated */
+ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
+
+ /* Prepare the doorbell parameters */
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+
+ return rc;
+}
+
static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
- int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_params vport_update_params;
@@ -3326,6 +3693,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
bool reset_rss_indir = false;
+ int rc, i;
if (!edev->num_queues) {
DP_ERR(edev,
@@ -3357,11 +3725,12 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
u32 page_cnt;
if (fp->type & QEDE_FASTPATH_RX) {
+ struct qed_rxq_start_ret_params ret_params;
struct qede_rx_queue *rxq = fp->rxq;
__le16 *val;
+ memset(&ret_params, 0, sizeof(ret_params));
memset(&q_params, 0, sizeof(q_params));
- q_params.rss_id = i;
q_params.queue_id = rxq->rxq_id;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
@@ -3371,60 +3740,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
- rc = edev->ops->q_rx_start(cdev, &q_params,
+ rc = edev->ops->q_rx_start(cdev, i, &q_params,
rxq->rx_buf_size,
rxq->rx_bd_ring.p_phys_addr,
p_phys_table,
- page_cnt,
- &rxq->hw_rxq_prod_addr);
+ page_cnt, &ret_params);
if (rc) {
DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
rc);
return rc;
}
+ /* Use the return parameters */
+ rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ rxq->handle = ret_params.p_handle;
+
val = &fp->sb_info->sb_virt->pi_array[RX_PI];
rxq->hw_cons_ptr = val;
qede_update_rx_prod(edev, rxq);
}
- if (!(fp->type & QEDE_FASTPATH_TX))
- continue;
-
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
-
- p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
- page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
-
- memset(&q_params, 0, sizeof(q_params));
- q_params.rss_id = i;
- q_params.queue_id = txq->index;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = TX_PI(tc);
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
+ if (rc)
+ return rc;
- rc = edev->ops->q_tx_start(cdev, &q_params,
- p_phys_table, page_cnt,
- &txq->doorbell_addr);
- if (rc) {
- DP_ERR(edev, "Start TXQ #%d failed %d\n",
- txq->index, rc);
+ fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
+ if (IS_ERR(fp->rxq->xdp_prog)) {
+ rc = PTR_ERR(fp->rxq->xdp_prog);
+ fp->rxq->xdp_prog = NULL;
return rc;
}
+ }
- txq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
- DB_AGG_CMD_SET);
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_ETH_TX_BD_PROD_CMD);
-
- txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
+ if (rc)
+ return rc;
}
}
@@ -3519,15 +3872,18 @@ enum qede_unload_mode {
QEDE_UNLOAD_NORMAL,
};
-static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
+ bool is_locked)
{
struct qed_link_params link_params;
int rc;
DP_INFO(edev, "Starting qede unload\n");
+ if (!is_locked)
+ __qede_lock(edev);
+
qede_roce_dev_event_close(edev);
- mutex_lock(&edev->qede_lock);
edev->state = QEDE_STATE_CLOSED;
/* Close OS Tx */
@@ -3559,7 +3915,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
qede_free_fp_array(edev);
out:
- mutex_unlock(&edev->qede_lock);
+ if (!is_locked)
+ __qede_unlock(edev);
DP_INFO(edev, "Ending qede unload\n");
}
@@ -3568,7 +3925,8 @@ enum qede_load_mode {
QEDE_LOAD_RELOAD,
};
-static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
+ bool is_locked)
{
struct qed_link_params link_params;
struct qed_link_output link_output;
@@ -3576,21 +3934,24 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
DP_INFO(edev, "Starting qede load\n");
+ if (!is_locked)
+ __qede_lock(edev);
+
rc = qede_set_num_queues(edev);
if (rc)
- goto err0;
+ goto out;
rc = qede_alloc_fp_array(edev);
if (rc)
- goto err0;
+ goto out;
qede_init_fp(edev);
rc = qede_alloc_mem_load(edev);
if (rc)
goto err1;
- DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_QUEUE_CNT(edev), edev->num_tc);
+ DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
+ QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
rc = qede_set_real_num_queues(edev);
if (rc)
@@ -3612,10 +3973,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
/* Add primary mac and set Rx filters */
ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
- mutex_lock(&edev->qede_lock);
- edev->state = QEDE_STATE_OPEN;
- mutex_unlock(&edev->qede_lock);
-
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
@@ -3630,10 +3987,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
qede_roce_dev_event_open(edev);
qede_link_update(edev, &link_output);
+ edev->state = QEDE_STATE_OPEN;
+
DP_INFO(edev, "Ending successfully qede load\n");
- return 0;
+ goto out;
err4:
qede_sync_free_irqs(edev);
memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
@@ -3647,26 +4006,40 @@ err1:
edev->num_queues = 0;
edev->fp_num_tx = 0;
edev->fp_num_rx = 0;
-err0:
+out:
+ if (!is_locked)
+ __qede_unlock(edev);
+
return rc;
}
+/* 'func' should be able to run between unload and reload assuming interface
+ * is actually running, or afterwards in case it's currently DOWN.
+ */
void qede_reload(struct qede_dev *edev,
- void (*func)(struct qede_dev *, union qede_reload_args *),
- union qede_reload_args *args)
+ struct qede_reload_args *args, bool is_locked)
{
- qede_unload(edev, QEDE_UNLOAD_NORMAL);
- /* Call function handler to update parameters
- * needed for function load.
- */
- if (func)
- func(edev, args);
+ if (!is_locked)
+ __qede_lock(edev);
- qede_load(edev, QEDE_LOAD_RELOAD);
+ /* Since qede_lock is held, internal state wouldn't change even
+ * if netdev state would start transitioning. Check whether current
+ * internal configuration indicates device is up, then reload.
+ */
+ if (edev->state == QEDE_STATE_OPEN) {
+ qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
+ if (args)
+ args->func(edev, args);
+ qede_load(edev, QEDE_LOAD_RELOAD, true);
+
+ /* Since no one is going to do it for us, re-configure */
+ qede_config_rx_mode(edev->ndev);
+ } else if (args) {
+ args->func(edev, args);
+ }
- mutex_lock(&edev->qede_lock);
- qede_config_rx_mode(edev->ndev);
- mutex_unlock(&edev->qede_lock);
+ if (!is_locked)
+ __qede_unlock(edev);
}
/* called with rtnl_lock */
@@ -3679,13 +4052,14 @@ static int qede_open(struct net_device *ndev)
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
- rc = qede_load(edev, QEDE_LOAD_NORMAL);
-
+ rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
if (rc)
return rc;
udp_tunnel_get_rx_info(ndev);
+ edev->ops->common->update_drv_state(edev->cdev, true);
+
return 0;
}
@@ -3693,7 +4067,9 @@ static int qede_close(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
- qede_unload(edev, QEDE_UNLOAD_NORMAL);
+ qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
+
+ edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
@@ -3755,6 +4131,8 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
if (rc)
return rc;
+ edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+
/* Add MAC filter according to the new unicast HW MAC address */
ether_addr_copy(edev->primary_mac, ndev->dev_addr);
return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
@@ -3821,15 +4199,8 @@ static void qede_set_rx_mode(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
- DP_INFO(edev, "qede_set_rx_mode called\n");
-
- if (edev->state != QEDE_STATE_OPEN) {
- DP_INFO(edev,
- "qede_set_rx_mode called while interface is down\n");
- } else {
- set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
- schedule_delayed_work(&edev->sp_task, 0);
- }
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
}
/* Must be called with qede_lock held */
@@ -3877,7 +4248,7 @@ static void qede_config_rx_mode(struct net_device *ndev)
/* Check for promiscuous */
if ((ndev->flags & IFF_PROMISC) ||
- (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+ (uc_count > edev->dev_info.num_mac_filters - 1)) {
accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
} else {
/* Add MAC filters according to the unicast secondary macs */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
index 9867f960b063..49272716a7c4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -191,8 +191,8 @@ int qede_roce_register_driver(struct qedr_driver *drv)
}
mutex_unlock(&qedr_dev_list_lock);
- DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
- qedr_counter);
+ pr_notice("qedr: discovered and registered %d RoCE funcs\n",
+ qedr_counter);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b09a6b80d107..5c100ab86c00 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3755,7 +3755,6 @@ static const struct net_device_ops ql3xxx_netdev_ops = {
.ndo_open = ql3xxx_open,
.ndo_start_xmit = ql3xxx_send,
.ndo_stop = ql3xxx_close,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ql3xxx_set_mac_address,
.ndo_tx_timeout = ql3xxx_tx_timeout,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 509b596cf1e8..838cc0ceafd8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1024,12 +1024,6 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int rc = 0;
- if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
- dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
- " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
- return -EINVAL;
- }
-
rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
if (!rc)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 3ae3968b0edf..4c0cce962585 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2342,6 +2342,10 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
+ /* MTU range: 68 - 9600 */
+ netdev->min_mtu = P3P_MIN_MTU;
+ netdev->max_mtu = P3P_MAX_MTU;
+
err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
adapter->drv_sds_rings);
if (err)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index fd4a8e473f11..1409412ab39d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4788,6 +4788,13 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->ethtool_ops = &qlge_ethtool_ops;
ndev->watchdog_timeo = 10 * HZ;
+ /* MTU range: this driver only supports 1500 or 9000, so this only
+ * filters out values above or below, and we'll rely on
+ * qlge_change_mtu to make sure only 1500 or 9000 are allowed
+ */
+ ndev->min_mtu = ETH_DATA_LEN;
+ ndev->max_mtu = 9000;
+
err = register_netdev(ndev);
if (err) {
dev_err(&pdev->dev, "net device registration failed.\n");
diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile
index 01ee144c6386..7a6687982dae 100644
--- a/drivers/net/ethernet/qualcomm/emac/Makefile
+++ b/drivers/net/ethernet/qualcomm/emac/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o
-qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o
+qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o \
+ emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \
+ emac-sgmii-qdf2400.o
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
new file mode 100644
index 000000000000..af690e1a6e7b
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
@@ -0,0 +1,245 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. FSM9900 EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include "emac.h"
+
+/* EMAC_QSERDES register offsets */
+#define EMAC_QSERDES_COM_SYS_CLK_CTRL 0x0000
+#define EMAC_QSERDES_COM_PLL_CNTRL 0x0014
+#define EMAC_QSERDES_COM_PLL_IP_SETI 0x0018
+#define EMAC_QSERDES_COM_PLL_CP_SETI 0x0024
+#define EMAC_QSERDES_COM_PLL_IP_SETP 0x0028
+#define EMAC_QSERDES_COM_PLL_CP_SETP 0x002c
+#define EMAC_QSERDES_COM_SYSCLK_EN_SEL 0x0038
+#define EMAC_QSERDES_COM_RESETSM_CNTRL 0x0040
+#define EMAC_QSERDES_COM_PLLLOCK_CMP1 0x0044
+#define EMAC_QSERDES_COM_PLLLOCK_CMP2 0x0048
+#define EMAC_QSERDES_COM_PLLLOCK_CMP3 0x004c
+#define EMAC_QSERDES_COM_PLLLOCK_CMP_EN 0x0050
+#define EMAC_QSERDES_COM_DEC_START1 0x0064
+#define EMAC_QSERDES_COM_DIV_FRAC_START1 0x0098
+#define EMAC_QSERDES_COM_DIV_FRAC_START2 0x009c
+#define EMAC_QSERDES_COM_DIV_FRAC_START3 0x00a0
+#define EMAC_QSERDES_COM_DEC_START2 0x00a4
+#define EMAC_QSERDES_COM_PLL_CRCTRL 0x00ac
+#define EMAC_QSERDES_COM_RESET_SM 0x00bc
+#define EMAC_QSERDES_TX_BIST_MODE_LANENO 0x0100
+#define EMAC_QSERDES_TX_TX_EMP_POST1_LVL 0x0108
+#define EMAC_QSERDES_TX_TX_DRV_LVL 0x010c
+#define EMAC_QSERDES_TX_LANE_MODE 0x0150
+#define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN 0x0170
+#define EMAC_QSERDES_RX_CDR_CONTROL 0x0200
+#define EMAC_QSERDES_RX_CDR_CONTROL2 0x0210
+#define EMAC_QSERDES_RX_RX_EQ_GAIN12 0x0230
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_SERDES_START 0x0000
+#define EMAC_SGMII_PHY_CMN_PWR_CTRL 0x0004
+#define EMAC_SGMII_PHY_RX_PWR_CTRL 0x0008
+#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x000C
+#define EMAC_SGMII_PHY_LANE_CTRL1 0x0018
+#define EMAC_SGMII_PHY_CDR_CTRL0 0x0058
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x0080
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4
+
+#define PLL_IPSETI(x) ((x) & 0x3f)
+
+#define PLL_CPSETI(x) ((x) & 0xff)
+
+#define PLL_IPSETP(x) ((x) & 0x3f)
+
+#define PLL_CPSETP(x) ((x) & 0x1f)
+
+#define PLL_RCTRL(x) (((x) & 0xf) << 4)
+#define PLL_CCTRL(x) ((x) & 0xf)
+
+#define LANE_MODE(x) ((x) & 0x1f)
+
+#define SYSCLK_CM BIT(4)
+#define SYSCLK_AC_COUPLE BIT(3)
+
+#define OCP_EN BIT(5)
+#define PLL_DIV_FFEN BIT(2)
+#define PLL_DIV_ORD BIT(1)
+
+#define SYSCLK_SEL_CMOS BIT(3)
+
+#define FRQ_TUNE_MODE BIT(4)
+
+#define PLLLOCK_CMP_EN BIT(0)
+
+#define DEC_START1_MUX BIT(7)
+#define DEC_START1(x) ((x) & 0x7f)
+
+#define DIV_FRAC_START_MUX BIT(7)
+#define DIV_FRAC_START(x) ((x) & 0x7f)
+
+#define DIV_FRAC_START3_MUX BIT(4)
+#define DIV_FRAC_START3(x) ((x) & 0xf)
+
+#define DEC_START2_MUX BIT(1)
+#define DEC_START2 BIT(0)
+
+#define READY BIT(5)
+
+#define TX_EMP_POST1_LVL_MUX BIT(5)
+#define TX_EMP_POST1_LVL(x) ((x) & 0x1f)
+
+#define TX_DRV_LVL_MUX BIT(4)
+#define TX_DRV_LVL(x) ((x) & 0xf)
+
+#define EMP_EN_MUX BIT(1)
+#define EMP_EN BIT(0)
+
+#define SECONDORDERENABLE BIT(6)
+#define FIRSTORDER_THRESH(x) (((x) & 0x7) << 3)
+#define SECONDORDERGAIN(x) ((x) & 0x7)
+
+#define RX_EQ_GAIN2(x) (((x) & 0xf) << 4)
+#define RX_EQ_GAIN1(x) ((x) & 0xf)
+
+#define SERDES_START BIT(0)
+
+#define BIAS_EN BIT(6)
+#define PLL_EN BIT(5)
+#define SYSCLK_EN BIT(4)
+#define CLKBUF_L_EN BIT(3)
+#define PLL_TXCLK_EN BIT(1)
+#define PLL_RXCLK_EN BIT(0)
+
+#define L0_RX_SIGDET_EN BIT(7)
+#define L0_RX_TERM_MODE(x) (((x) & 3) << 4)
+#define L0_RX_I_EN BIT(1)
+
+#define L0_TX_EN BIT(5)
+#define L0_CLKBUF_EN BIT(4)
+#define L0_TRAN_BIAS_EN BIT(1)
+
+#define L0_RX_EQUALIZE_ENABLE BIT(6)
+#define L0_RESET_TSYNC_EN BIT(4)
+#define L0_DRV_LVL(x) ((x) & 0xf)
+
+#define PWRDN_B BIT(0)
+#define CDR_MAX_CNT(x) ((x) & 0xff)
+
+#define PLLLOCK_CMP(x) ((x) & 0xff)
+
+#define SERDES_START_WAIT_TIMES 100
+
+struct emac_reg_write {
+ unsigned int offset;
+ u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+ const struct emac_reg_write *itr, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; ++itr, ++i)
+ writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write physical_coding_sublayer_programming[] = {
+ {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+ {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+ {EMAC_SGMII_PHY_CMN_PWR_CTRL,
+ BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN},
+ {EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN | L0_TRAN_BIAS_EN},
+ {EMAC_SGMII_PHY_RX_PWR_CTRL,
+ L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN},
+ {EMAC_SGMII_PHY_CMN_PWR_CTRL,
+ BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN |
+ PLL_RXCLK_EN},
+ {EMAC_SGMII_PHY_LANE_CTRL1,
+ L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN | L0_DRV_LVL(15)},
+};
+
+static const struct emac_reg_write sysclk_refclk_setting[] = {
+ {EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS},
+ {EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE},
+};
+
+static const struct emac_reg_write pll_setting[] = {
+ {EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)},
+ {EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)},
+ {EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)},
+ {EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)},
+ {EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)},
+ {EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN | PLL_DIV_ORD},
+ {EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)},
+ {EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2},
+ {EMAC_QSERDES_COM_DIV_FRAC_START1,
+ DIV_FRAC_START_MUX | DIV_FRAC_START(85)},
+ {EMAC_QSERDES_COM_DIV_FRAC_START2,
+ DIV_FRAC_START_MUX | DIV_FRAC_START(42)},
+ {EMAC_QSERDES_COM_DIV_FRAC_START3,
+ DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN},
+ {EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE},
+};
+
+static const struct emac_reg_write cdr_setting[] = {
+ {EMAC_QSERDES_RX_CDR_CONTROL,
+ SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)},
+ {EMAC_QSERDES_RX_CDR_CONTROL2,
+ SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)},
+};
+
+static const struct emac_reg_write tx_rx_setting[] = {
+ {EMAC_QSERDES_TX_BIST_MODE_LANENO, 0},
+ {EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)},
+ {EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN},
+ {EMAC_QSERDES_TX_TX_EMP_POST1_LVL,
+ TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)},
+ {EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)},
+ {EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)},
+};
+
+int emac_sgmii_init_fsm9900(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ unsigned int i;
+
+ emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
+ ARRAY_SIZE(physical_coding_sublayer_programming));
+ emac_reg_write_all(phy->base, sysclk_refclk_setting,
+ ARRAY_SIZE(sysclk_refclk_setting));
+ emac_reg_write_all(phy->base, pll_setting, ARRAY_SIZE(pll_setting));
+ emac_reg_write_all(phy->base, cdr_setting, ARRAY_SIZE(cdr_setting));
+ emac_reg_write_all(phy->base, tx_rx_setting, ARRAY_SIZE(tx_rx_setting));
+
+ /* Power up the Ser/Des engine */
+ writel(SERDES_START, phy->base + EMAC_SGMII_PHY_SERDES_START);
+
+ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+ if (readl(phy->base + EMAC_QSERDES_COM_RESET_SM) & READY)
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (i == SERDES_START_WAIT_TIMES) {
+ netdev_err(adpt->netdev, "error: ser/des failed to start\n");
+ return -EIO;
+ }
+ /* Mask out all the SGMII Interrupt */
+ writel(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
new file mode 100644
index 000000000000..5b8419498ef1
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -0,0 +1,217 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. QDF2400 EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include "emac.h"
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x000C
+#define EMAC_SGMII_PHY_LANE_CTRL1 0x0018
+#define EMAC_SGMII_PHY_CDR_CTRL0 0x0058
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x0080
+#define EMAC_SGMII_PHY_RESET_CTRL 0x00a8
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4
+
+/* SGMII digital lane registers */
+#define EMAC_SGMII_LN_DRVR_CTRL0 0x000C
+#define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018
+#define EMAC_SGMII_LN_TX_MARGINING 0x001C
+#define EMAC_SGMII_LN_TX_PRE 0x0020
+#define EMAC_SGMII_LN_TX_POST 0x0024
+#define EMAC_SGMII_LN_TX_BAND_MODE 0x0060
+#define EMAC_SGMII_LN_LANE_MODE 0x0064
+#define EMAC_SGMII_LN_PARALLEL_RATE 0x007C
+#define EMAC_SGMII_LN_CML_CTRL_MODE0 0x00C0
+#define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x00D8
+#define EMAC_SGMII_LN_VGA_INITVAL 0x013C
+#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x0184
+#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x0190
+#define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x019C
+#define EMAC_SGMII_LN_RX_BAND 0x01A4
+#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x01C0
+#define EMAC_SGMII_LN_RSM_CONFIG 0x01F8
+#define EMAC_SGMII_LN_SIGDET_ENABLES 0x0230
+#define EMAC_SGMII_LN_SIGDET_CNTRL 0x0234
+#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x0238
+#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02AC
+#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02B8
+#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02C8
+
+/* SGMII digital lane register values */
+#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
+#define UCDR_xO_GAIN_MODE(x) ((x) & 0x7f)
+#define UCDR_ENABLE BIT(6)
+#define UCDR_SO_SATURATION(x) ((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS4 BIT(7)
+#define SIGDET_EN_PS0_TO_PS2 BIT(6)
+
+#define TXVAL_VALID_INIT BIT(4)
+#define KR_PCIGEN3_MODE BIT(0)
+
+#define MAIN_EN BIT(0)
+
+#define TX_MARGINING_MUX BIT(6)
+#define TX_MARGINING(x) ((x) & 0x3f)
+
+#define TX_PRE_MUX BIT(6)
+
+#define TX_POST_MUX BIT(6)
+
+#define CML_GEAR_MODE(x) (((x) & 7) << 3)
+#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
+
+#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
+#define MIXER_DATARATE_MODE(x) ((x) & 3)
+
+#define VGA_THRESH_DFE(x) ((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5)
+#define SIGDET_FLT_BYP BIT(0)
+
+#define SIGDET_LVL(x) (((x) & 0xf) << 4)
+
+#define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1)
+
+#define INVERT_PCS_RX_CLK BIT(7)
+
+#define DRVR_LOGIC_CLK_EN BIT(4)
+#define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf)
+
+#define PARALLEL_RATE_MODE0(x) ((x) & 0x3)
+
+#define BAND_MODE0(x) ((x) & 0x3)
+
+#define LANE_MODE(x) ((x) & 0x1f)
+
+#define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5)
+#define EN_DLL_MODE0 BIT(4)
+#define EN_IQ_DCC_MODE0 BIT(3)
+#define EN_IQCAL_MODE0 BIT(2)
+
+#define BYPASS_RSM_SAMP_CAL BIT(1)
+#define BYPASS_RSM_DLL_CAL BIT(0)
+
+#define L0_RX_EQUALIZE_ENABLE BIT(6)
+
+#define PWRDN_B BIT(0)
+
+#define CDR_MAX_CNT(x) ((x) & 0xff)
+
+#define SERDES_START_WAIT_TIMES 100
+
+struct emac_reg_write {
+ unsigned int offset;
+ u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+ const struct emac_reg_write *itr, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; ++itr, ++i)
+ writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write sgmii_laned[] = {
+ /* CDR Settings */
+ {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
+ UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
+ {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
+ {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
+
+ /* TX/RX Settings */
+ {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
+
+ {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
+ {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
+ {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
+ {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
+ {EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
+
+ {EMAC_SGMII_LN_CML_CTRL_MODE0,
+ CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
+ {EMAC_SGMII_LN_MIXER_CTRL_MODE0,
+ MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
+ {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
+ {EMAC_SGMII_LN_SIGDET_ENABLES,
+ SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
+ {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
+
+ {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
+ {EMAC_SGMII_LN_RX_MISC_CNTRL0, INVERT_PCS_RX_CLK},
+ {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
+ DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
+
+ {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
+ {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)},
+ {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)},
+ {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
+ {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) |
+ EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0},
+ {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
+};
+
+static const struct emac_reg_write physical_coding_sublayer_programming[] = {
+ {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+ {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+ {EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
+ {EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
+};
+
+int emac_sgmii_init_qdf2400(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ void __iomem *phy_regs = phy->base;
+ void __iomem *laned = phy->digital;
+ unsigned int i;
+ u32 lnstatus;
+
+ /* PCS lane-x init */
+ emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
+ ARRAY_SIZE(physical_coding_sublayer_programming));
+
+ /* SGMII lane-x init */
+ emac_reg_write_all(phy->digital, sgmii_laned, ARRAY_SIZE(sgmii_laned));
+
+ /* Power up PCS and start reset lane state machine */
+
+ writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
+ writel(1, laned + SGMII_LN_RSM_START);
+
+ /* Wait for c_ready assertion */
+ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+ lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
+ if (lnstatus & BIT(1))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (i == SERDES_START_WAIT_TIMES) {
+ netdev_err(adpt->netdev, "SGMII failed to start\n");
+ return -EIO;
+ }
+
+ /* Disable digital and SERDES loopback */
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
+ writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
+
+ /* Mask out all the SGMII Interrupt */
+ writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
new file mode 100644
index 000000000000..6170200d7479
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
@@ -0,0 +1,210 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. QDF2432 EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include "emac.h"
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x000C
+#define EMAC_SGMII_PHY_LANE_CTRL1 0x0018
+#define EMAC_SGMII_PHY_CDR_CTRL0 0x0058
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x0080
+#define EMAC_SGMII_PHY_RESET_CTRL 0x00a8
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4
+
+/* SGMII digital lane registers */
+#define EMAC_SGMII_LN_DRVR_CTRL0 0x000C
+#define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018
+#define EMAC_SGMII_LN_TX_MARGINING 0x001C
+#define EMAC_SGMII_LN_TX_PRE 0x0020
+#define EMAC_SGMII_LN_TX_POST 0x0024
+#define EMAC_SGMII_LN_TX_BAND_MODE 0x0060
+#define EMAC_SGMII_LN_LANE_MODE 0x0064
+#define EMAC_SGMII_LN_PARALLEL_RATE 0x0078
+#define EMAC_SGMII_LN_CML_CTRL_MODE0 0x00B8
+#define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x00D0
+#define EMAC_SGMII_LN_VGA_INITVAL 0x0134
+#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x017C
+#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x0188
+#define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x0194
+#define EMAC_SGMII_LN_RX_BAND 0x019C
+#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x01B8
+#define EMAC_SGMII_LN_RSM_CONFIG 0x01F0
+#define EMAC_SGMII_LN_SIGDET_ENABLES 0x0224
+#define EMAC_SGMII_LN_SIGDET_CNTRL 0x0228
+#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x022C
+#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02A0
+#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02AC
+#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02BC
+
+/* SGMII digital lane register values */
+#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
+#define UCDR_xO_GAIN_MODE(x) ((x) & 0x7f)
+#define UCDR_ENABLE BIT(6)
+#define UCDR_SO_SATURATION(x) ((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS4 BIT(7)
+#define SIGDET_EN_PS0_TO_PS2 BIT(6)
+
+#define TXVAL_VALID_INIT BIT(4)
+#define KR_PCIGEN3_MODE BIT(0)
+
+#define MAIN_EN BIT(0)
+
+#define TX_MARGINING_MUX BIT(6)
+#define TX_MARGINING(x) ((x) & 0x3f)
+
+#define TX_PRE_MUX BIT(6)
+
+#define TX_POST_MUX BIT(6)
+
+#define CML_GEAR_MODE(x) (((x) & 7) << 3)
+#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
+
+#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
+#define MIXER_DATARATE_MODE(x) ((x) & 3)
+
+#define VGA_THRESH_DFE(x) ((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5)
+#define SIGDET_FLT_BYP BIT(0)
+
+#define SIGDET_LVL(x) (((x) & 0xf) << 4)
+
+#define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1)
+
+#define DRVR_LOGIC_CLK_EN BIT(4)
+#define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf)
+
+#define PARALLEL_RATE_MODE0(x) ((x) & 0x3)
+
+#define BAND_MODE0(x) ((x) & 0x3)
+
+#define LANE_MODE(x) ((x) & 0x1f)
+
+#define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5)
+#define BYPASS_RSM_SAMP_CAL BIT(1)
+#define BYPASS_RSM_DLL_CAL BIT(0)
+
+#define L0_RX_EQUALIZE_ENABLE BIT(6)
+
+#define PWRDN_B BIT(0)
+
+#define CDR_MAX_CNT(x) ((x) & 0xff)
+
+#define SERDES_START_WAIT_TIMES 100
+
+struct emac_reg_write {
+ unsigned int offset;
+ u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+ const struct emac_reg_write *itr, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; ++itr, ++i)
+ writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write sgmii_laned[] = {
+ /* CDR Settings */
+ {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
+ UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
+ {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
+ {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
+
+ /* TX/RX Settings */
+ {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
+
+ {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
+ {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
+ {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
+ {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
+ {EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
+
+ {EMAC_SGMII_LN_CML_CTRL_MODE0,
+ CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
+ {EMAC_SGMII_LN_MIXER_CTRL_MODE0,
+ MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
+ {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
+ {EMAC_SGMII_LN_SIGDET_ENABLES,
+ SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
+ {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
+
+ {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
+ {EMAC_SGMII_LN_RX_MISC_CNTRL0, 0},
+ {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
+ DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
+
+ {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
+ {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)},
+ {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)},
+ {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
+ {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)},
+ {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
+};
+
+static const struct emac_reg_write physical_coding_sublayer_programming[] = {
+ {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+ {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+ {EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
+ {EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
+};
+
+int emac_sgmii_init_qdf2432(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ void __iomem *phy_regs = phy->base;
+ void __iomem *laned = phy->digital;
+ unsigned int i;
+ u32 lnstatus;
+
+ /* PCS lane-x init */
+ emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
+ ARRAY_SIZE(physical_coding_sublayer_programming));
+
+ /* SGMII lane-x init */
+ emac_reg_write_all(phy->digital, sgmii_laned, ARRAY_SIZE(sgmii_laned));
+
+ /* Power up PCS and start reset lane state machine */
+
+ writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
+ writel(1, laned + SGMII_LN_RSM_START);
+
+ /* Wait for c_ready assertion */
+ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+ lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
+ if (lnstatus & BIT(1))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (i == SERDES_START_WAIT_TIMES) {
+ netdev_err(adpt->netdev, "SGMII failed to start\n");
+ return -EIO;
+ }
+
+ /* Disable digital and SERDES loopback */
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
+ writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
+
+ /* Mask out all the SGMII Interrupt */
+ writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 72fe343c7a36..bf722a9bb09d 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -20,448 +20,33 @@
#include "emac-mac.h"
#include "emac-sgmii.h"
-/* EMAC_QSERDES register offsets */
-#define EMAC_QSERDES_COM_SYS_CLK_CTRL 0x000000
-#define EMAC_QSERDES_COM_PLL_CNTRL 0x000014
-#define EMAC_QSERDES_COM_PLL_IP_SETI 0x000018
-#define EMAC_QSERDES_COM_PLL_CP_SETI 0x000024
-#define EMAC_QSERDES_COM_PLL_IP_SETP 0x000028
-#define EMAC_QSERDES_COM_PLL_CP_SETP 0x00002c
-#define EMAC_QSERDES_COM_SYSCLK_EN_SEL 0x000038
-#define EMAC_QSERDES_COM_RESETSM_CNTRL 0x000040
-#define EMAC_QSERDES_COM_PLLLOCK_CMP1 0x000044
-#define EMAC_QSERDES_COM_PLLLOCK_CMP2 0x000048
-#define EMAC_QSERDES_COM_PLLLOCK_CMP3 0x00004c
-#define EMAC_QSERDES_COM_PLLLOCK_CMP_EN 0x000050
-#define EMAC_QSERDES_COM_DEC_START1 0x000064
-#define EMAC_QSERDES_COM_DIV_FRAC_START1 0x000098
-#define EMAC_QSERDES_COM_DIV_FRAC_START2 0x00009c
-#define EMAC_QSERDES_COM_DIV_FRAC_START3 0x0000a0
-#define EMAC_QSERDES_COM_DEC_START2 0x0000a4
-#define EMAC_QSERDES_COM_PLL_CRCTRL 0x0000ac
-#define EMAC_QSERDES_COM_RESET_SM 0x0000bc
-#define EMAC_QSERDES_TX_BIST_MODE_LANENO 0x000100
-#define EMAC_QSERDES_TX_TX_EMP_POST1_LVL 0x000108
-#define EMAC_QSERDES_TX_TX_DRV_LVL 0x00010c
-#define EMAC_QSERDES_TX_LANE_MODE 0x000150
-#define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN 0x000170
-#define EMAC_QSERDES_RX_CDR_CONTROL 0x000200
-#define EMAC_QSERDES_RX_CDR_CONTROL2 0x000210
-#define EMAC_QSERDES_RX_RX_EQ_GAIN12 0x000230
-
/* EMAC_SGMII register offsets */
-#define EMAC_SGMII_PHY_SERDES_START 0x000000
-#define EMAC_SGMII_PHY_CMN_PWR_CTRL 0x000004
-#define EMAC_SGMII_PHY_RX_PWR_CTRL 0x000008
-#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x00000C
-#define EMAC_SGMII_PHY_LANE_CTRL1 0x000018
-#define EMAC_SGMII_PHY_AUTONEG_CFG2 0x000048
-#define EMAC_SGMII_PHY_CDR_CTRL0 0x000058
-#define EMAC_SGMII_PHY_SPEED_CFG1 0x000074
-#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x000080
-#define EMAC_SGMII_PHY_RESET_CTRL 0x0000a8
-#define EMAC_SGMII_PHY_IRQ_CMD 0x0000ac
-#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x0000b0
-#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x0000b4
-#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x0000b8
-#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x0000d4
-#define EMAC_SGMII_PHY_AUTONEG0_STATUS 0x0000e0
-#define EMAC_SGMII_PHY_AUTONEG1_STATUS 0x0000e4
-
-/* EMAC_QSERDES_COM_PLL_IP_SETI */
-#define PLL_IPSETI(x) ((x) & 0x3f)
-
-/* EMAC_QSERDES_COM_PLL_CP_SETI */
-#define PLL_CPSETI(x) ((x) & 0xff)
-
-/* EMAC_QSERDES_COM_PLL_IP_SETP */
-#define PLL_IPSETP(x) ((x) & 0x3f)
-
-/* EMAC_QSERDES_COM_PLL_CP_SETP */
-#define PLL_CPSETP(x) ((x) & 0x1f)
-
-/* EMAC_QSERDES_COM_PLL_CRCTRL */
-#define PLL_RCTRL(x) (((x) & 0xf) << 4)
-#define PLL_CCTRL(x) ((x) & 0xf)
-
-/* SGMII v2 PHY registers per lane */
-#define EMAC_SGMII_PHY_LN_OFFSET 0x0400
-
-/* SGMII v2 digital lane registers */
-#define EMAC_SGMII_LN_DRVR_CTRL0 0x00C
-#define EMAC_SGMII_LN_DRVR_TAP_EN 0x018
-#define EMAC_SGMII_LN_TX_MARGINING 0x01C
-#define EMAC_SGMII_LN_TX_PRE 0x020
-#define EMAC_SGMII_LN_TX_POST 0x024
-#define EMAC_SGMII_LN_TX_BAND_MODE 0x060
-#define EMAC_SGMII_LN_LANE_MODE 0x064
-#define EMAC_SGMII_LN_PARALLEL_RATE 0x078
-#define EMAC_SGMII_LN_CML_CTRL_MODE0 0x0B8
-#define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x0D0
-#define EMAC_SGMII_LN_VGA_INITVAL 0x134
-#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x17C
-#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x188
-#define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x194
-#define EMAC_SGMII_LN_RX_BAND 0x19C
-#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x1B8
-#define EMAC_SGMII_LN_RSM_CONFIG 0x1F0
-#define EMAC_SGMII_LN_SIGDET_ENABLES 0x224
-#define EMAC_SGMII_LN_SIGDET_CNTRL 0x228
-#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x22C
-#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x2A0
-#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x2AC
-#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x2BC
-
-/* SGMII v2 digital lane register values */
-#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
-#define UCDR_xO_GAIN_MODE(x) ((x) & 0x7f)
-#define UCDR_ENABLE BIT(6)
-#define UCDR_SO_SATURATION(x) ((x) & 0x3f)
-#define SIGDET_LP_BYP_PS4 BIT(7)
-#define SIGDET_EN_PS0_TO_PS2 BIT(6)
-#define EN_ACCOUPLEVCM_SW_MUX BIT(5)
-#define EN_ACCOUPLEVCM_SW BIT(4)
-#define RX_SYNC_EN BIT(3)
-#define RXTERM_HIGHZ_PS5 BIT(2)
-#define SIGDET_EN_PS3 BIT(1)
-#define EN_ACCOUPLE_VCM_PS3 BIT(0)
-#define UFS_MODE BIT(5)
-#define TXVAL_VALID_INIT BIT(4)
-#define TXVAL_VALID_MUX BIT(3)
-#define TXVAL_VALID BIT(2)
-#define USB3P1_MODE BIT(1)
-#define KR_PCIGEN3_MODE BIT(0)
-#define PRE_EN BIT(3)
-#define POST_EN BIT(2)
-#define MAIN_EN_MUX BIT(1)
-#define MAIN_EN BIT(0)
-#define TX_MARGINING_MUX BIT(6)
-#define TX_MARGINING(x) ((x) & 0x3f)
-#define TX_PRE_MUX BIT(6)
-#define TX_PRE(x) ((x) & 0x3f)
-#define TX_POST_MUX BIT(6)
-#define TX_POST(x) ((x) & 0x3f)
-#define CML_GEAR_MODE(x) (((x) & 7) << 3)
-#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
-#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
-#define MIXER_DATARATE_MODE(x) ((x) & 3)
-#define VGA_THRESH_DFE(x) ((x) & 0x3f)
-#define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5)
-#define SIGDET_LP_BYP_MUX BIT(4)
-#define SIGDET_LP_BYP BIT(3)
-#define SIGDET_EN_MUX BIT(2)
-#define SIGDET_EN BIT(1)
-#define SIGDET_FLT_BYP BIT(0)
-#define SIGDET_LVL(x) (((x) & 0xf) << 4)
-#define SIGDET_BW_CTRL(x) ((x) & 0xf)
-#define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1)
-#define SIGDET_DEGLITCH_BYP BIT(0)
-#define INVERT_PCS_RX_CLK BIT(7)
-#define PWM_EN BIT(6)
-#define RXBIAS_SEL(x) (((x) & 0x3) << 4)
-#define EBDAC_SIGN BIT(3)
-#define EDAC_SIGN BIT(2)
-#define EN_AUXTAP1SIGN_INVERT BIT(1)
-#define EN_DAC_CHOPPING BIT(0)
-#define DRVR_LOGIC_CLK_EN BIT(4)
-#define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf)
-#define PARALLEL_RATE_MODE2(x) (((x) & 0x3) << 4)
-#define PARALLEL_RATE_MODE1(x) (((x) & 0x3) << 2)
-#define PARALLEL_RATE_MODE0(x) ((x) & 0x3)
-#define BAND_MODE2(x) (((x) & 0x3) << 4)
-#define BAND_MODE1(x) (((x) & 0x3) << 2)
-#define BAND_MODE0(x) ((x) & 0x3)
-#define LANE_SYNC_MODE BIT(5)
-#define LANE_MODE(x) ((x) & 0x1f)
-#define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5)
-#define EN_DLL_MODE0 BIT(4)
-#define EN_IQ_DCC_MODE0 BIT(3)
-#define EN_IQCAL_MODE0 BIT(2)
-#define EN_QPATH_MODE0 BIT(1)
-#define EN_EPATH_MODE0 BIT(0)
-#define FORCE_TSYNC_ACK BIT(7)
-#define FORCE_CMN_ACK BIT(6)
-#define FORCE_CMN_READY BIT(5)
-#define EN_RCLK_DEGLITCH BIT(4)
-#define BYPASS_RSM_CDR_RESET BIT(3)
-#define BYPASS_RSM_TSYNC BIT(2)
-#define BYPASS_RSM_SAMP_CAL BIT(1)
-#define BYPASS_RSM_DLL_CAL BIT(0)
-
-/* EMAC_QSERDES_COM_SYS_CLK_CTRL */
-#define SYSCLK_CM BIT(4)
-#define SYSCLK_AC_COUPLE BIT(3)
-
-/* EMAC_QSERDES_COM_PLL_CNTRL */
-#define OCP_EN BIT(5)
-#define PLL_DIV_FFEN BIT(2)
-#define PLL_DIV_ORD BIT(1)
-
-/* EMAC_QSERDES_COM_SYSCLK_EN_SEL */
-#define SYSCLK_SEL_CMOS BIT(3)
-
-/* EMAC_QSERDES_COM_RESETSM_CNTRL */
-#define FRQ_TUNE_MODE BIT(4)
-
-/* EMAC_QSERDES_COM_PLLLOCK_CMP_EN */
-#define PLLLOCK_CMP_EN BIT(0)
-
-/* EMAC_QSERDES_COM_DEC_START1 */
-#define DEC_START1_MUX BIT(7)
-#define DEC_START1(x) ((x) & 0x7f)
-
-/* EMAC_QSERDES_COM_DIV_FRAC_START1 * EMAC_QSERDES_COM_DIV_FRAC_START2 */
-#define DIV_FRAC_START_MUX BIT(7)
-#define DIV_FRAC_START(x) ((x) & 0x7f)
-
-/* EMAC_QSERDES_COM_DIV_FRAC_START3 */
-#define DIV_FRAC_START3_MUX BIT(4)
-#define DIV_FRAC_START3(x) ((x) & 0xf)
-
-/* EMAC_QSERDES_COM_DEC_START2 */
-#define DEC_START2_MUX BIT(1)
-#define DEC_START2 BIT(0)
-
-/* EMAC_QSERDES_COM_RESET_SM */
-#define READY BIT(5)
-
-/* EMAC_QSERDES_TX_TX_EMP_POST1_LVL */
-#define TX_EMP_POST1_LVL_MUX BIT(5)
-#define TX_EMP_POST1_LVL(x) ((x) & 0x1f)
-#define TX_EMP_POST1_LVL_BMSK 0x1f
-#define TX_EMP_POST1_LVL_SHFT 0
-
-/* EMAC_QSERDES_TX_TX_DRV_LVL */
-#define TX_DRV_LVL_MUX BIT(4)
-#define TX_DRV_LVL(x) ((x) & 0xf)
-
-/* EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN */
-#define EMP_EN_MUX BIT(1)
-#define EMP_EN BIT(0)
-
-/* EMAC_QSERDES_RX_CDR_CONTROL & EMAC_QSERDES_RX_CDR_CONTROL2 */
-#define HBW_PD_EN BIT(7)
-#define SECONDORDERENABLE BIT(6)
-#define FIRSTORDER_THRESH(x) (((x) & 0x7) << 3)
-#define SECONDORDERGAIN(x) ((x) & 0x7)
-
-/* EMAC_QSERDES_RX_RX_EQ_GAIN12 */
-#define RX_EQ_GAIN2(x) (((x) & 0xf) << 4)
-#define RX_EQ_GAIN1(x) ((x) & 0xf)
-
-/* EMAC_SGMII_PHY_SERDES_START */
-#define SERDES_START BIT(0)
-
-/* EMAC_SGMII_PHY_CMN_PWR_CTRL */
-#define BIAS_EN BIT(6)
-#define PLL_EN BIT(5)
-#define SYSCLK_EN BIT(4)
-#define CLKBUF_L_EN BIT(3)
-#define PLL_TXCLK_EN BIT(1)
-#define PLL_RXCLK_EN BIT(0)
-
-/* EMAC_SGMII_PHY_RX_PWR_CTRL */
-#define L0_RX_SIGDET_EN BIT(7)
-#define L0_RX_TERM_MODE(x) (((x) & 3) << 4)
-#define L0_RX_I_EN BIT(1)
-
-/* EMAC_SGMII_PHY_TX_PWR_CTRL */
-#define L0_TX_EN BIT(5)
-#define L0_CLKBUF_EN BIT(4)
-#define L0_TRAN_BIAS_EN BIT(1)
-
-/* EMAC_SGMII_PHY_LANE_CTRL1 */
-#define L0_RX_EQUALIZE_ENABLE BIT(6)
-#define L0_RESET_TSYNC_EN BIT(4)
-#define L0_DRV_LVL(x) ((x) & 0xf)
-
-/* EMAC_SGMII_PHY_AUTONEG_CFG2 */
+#define EMAC_SGMII_PHY_AUTONEG_CFG2 0x0048
+#define EMAC_SGMII_PHY_SPEED_CFG1 0x0074
+#define EMAC_SGMII_PHY_IRQ_CMD 0x00ac
+#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0
+#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8
+
#define FORCE_AN_TX_CFG BIT(5)
#define FORCE_AN_RX_CFG BIT(4)
#define AN_ENABLE BIT(0)
-/* EMAC_SGMII_PHY_SPEED_CFG1 */
#define DUPLEX_MODE BIT(4)
#define SPDMODE_1000 BIT(1)
#define SPDMODE_100 BIT(0)
#define SPDMODE_10 0
-#define SPDMODE_BMSK 3
-#define SPDMODE_SHFT 0
-
-/* EMAC_SGMII_PHY_POW_DWN_CTRL0 */
-#define PWRDN_B BIT(0)
-#define CDR_MAX_CNT(x) ((x) & 0xff)
-
-/* EMAC_QSERDES_TX_BIST_MODE_LANENO */
-#define BIST_LANE_NUMBER(x) (((x) & 3) << 5)
-#define BISTMODE(x) ((x) & 0x1f)
-
-/* EMAC_QSERDES_COM_PLLLOCK_CMPx */
-#define PLLLOCK_CMP(x) ((x) & 0xff)
-/* EMAC_SGMII_PHY_RESET_CTRL */
-#define PHY_SW_RESET BIT(0)
-
-/* EMAC_SGMII_PHY_IRQ_CMD */
#define IRQ_GLOBAL_CLEAR BIT(0)
-/* EMAC_SGMII_PHY_INTERRUPT_MASK */
#define DECODE_CODE_ERR BIT(7)
#define DECODE_DISP_ERR BIT(6)
-#define PLL_UNLOCK BIT(5)
-#define AN_ILLEGAL_TERM BIT(4)
-#define SYNC_FAIL BIT(3)
-#define AN_START BIT(2)
-#define AN_END BIT(1)
-#define AN_REQUEST BIT(0)
#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10
-#define SGMII_PHY_INTERRUPT_ERR (\
- DECODE_CODE_ERR |\
- DECODE_DISP_ERR)
-
-#define SGMII_ISR_AN_MASK (\
- AN_REQUEST |\
- AN_START |\
- AN_END |\
- AN_ILLEGAL_TERM |\
- PLL_UNLOCK |\
- SYNC_FAIL)
-
-#define SGMII_ISR_MASK (\
- SGMII_PHY_INTERRUPT_ERR |\
- SGMII_ISR_AN_MASK)
-
-/* SGMII TX_CONFIG */
-#define TXCFG_LINK 0x8000
-#define TXCFG_MODE_BMSK 0x1c00
-#define TXCFG_1000_FULL 0x1800
-#define TXCFG_100_FULL 0x1400
-#define TXCFG_100_HALF 0x0400
-#define TXCFG_10_FULL 0x1000
-#define TXCFG_10_HALF 0x0000
+#define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR)
#define SERDES_START_WAIT_TIMES 100
-struct emac_reg_write {
- unsigned int offset;
- u32 val;
-};
-
-static void emac_reg_write_all(void __iomem *base,
- const struct emac_reg_write *itr, size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; ++itr, ++i)
- writel(itr->val, base + itr->offset);
-}
-
-static const struct emac_reg_write physical_coding_sublayer_programming_v1[] = {
- {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
- {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
- {EMAC_SGMII_PHY_CMN_PWR_CTRL,
- BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN},
- {EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN | L0_TRAN_BIAS_EN},
- {EMAC_SGMII_PHY_RX_PWR_CTRL,
- L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN},
- {EMAC_SGMII_PHY_CMN_PWR_CTRL,
- BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN |
- PLL_RXCLK_EN},
- {EMAC_SGMII_PHY_LANE_CTRL1,
- L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN | L0_DRV_LVL(15)},
-};
-
-static const struct emac_reg_write sysclk_refclk_setting[] = {
- {EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS},
- {EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE},
-};
-
-static const struct emac_reg_write pll_setting[] = {
- {EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)},
- {EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)},
- {EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)},
- {EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)},
- {EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)},
- {EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN | PLL_DIV_ORD},
- {EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)},
- {EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2},
- {EMAC_QSERDES_COM_DIV_FRAC_START1,
- DIV_FRAC_START_MUX | DIV_FRAC_START(85)},
- {EMAC_QSERDES_COM_DIV_FRAC_START2,
- DIV_FRAC_START_MUX | DIV_FRAC_START(42)},
- {EMAC_QSERDES_COM_DIV_FRAC_START3,
- DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)},
- {EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)},
- {EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)},
- {EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)},
- {EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN},
- {EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE},
-};
-
-static const struct emac_reg_write cdr_setting[] = {
- {EMAC_QSERDES_RX_CDR_CONTROL,
- SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)},
- {EMAC_QSERDES_RX_CDR_CONTROL2,
- SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)},
-};
-
-static const struct emac_reg_write tx_rx_setting[] = {
- {EMAC_QSERDES_TX_BIST_MODE_LANENO, 0},
- {EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)},
- {EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN},
- {EMAC_QSERDES_TX_TX_EMP_POST1_LVL,
- TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)},
- {EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)},
- {EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)},
-};
-
-static const struct emac_reg_write sgmii_v2_laned[] = {
- /* CDR Settings */
- {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
- UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
- {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
- {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
-
- /* TX/RX Settings */
- {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
-
- {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
- {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
- {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
- {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
- {EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
-
- {EMAC_SGMII_LN_CML_CTRL_MODE0,
- CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
- {EMAC_SGMII_LN_MIXER_CTRL_MODE0,
- MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
- {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
- {EMAC_SGMII_LN_SIGDET_ENABLES,
- SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
- {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
-
- {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
- {EMAC_SGMII_LN_RX_MISC_CNTRL0, 0},
- {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
- DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
-
- {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
- {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)},
- {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)},
- {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
- {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)},
- {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
-};
-
-static const struct emac_reg_write physical_coding_sublayer_programming_v2[] = {
- {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
- {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
- {EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
- {EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
-};
-
static int emac_sgmii_link_init(struct emac_adapter *adpt)
{
struct phy_device *phydev = adpt->phydev;
@@ -536,98 +121,6 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
return 0;
}
-int emac_sgmii_init_v1(struct emac_adapter *adpt)
-{
- struct emac_phy *phy = &adpt->phy;
- unsigned int i;
- int ret;
-
- ret = emac_sgmii_link_init(adpt);
- if (ret)
- return ret;
-
- emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v1,
- ARRAY_SIZE(physical_coding_sublayer_programming_v1));
- emac_reg_write_all(phy->base, sysclk_refclk_setting,
- ARRAY_SIZE(sysclk_refclk_setting));
- emac_reg_write_all(phy->base, pll_setting, ARRAY_SIZE(pll_setting));
- emac_reg_write_all(phy->base, cdr_setting, ARRAY_SIZE(cdr_setting));
- emac_reg_write_all(phy->base, tx_rx_setting,
- ARRAY_SIZE(tx_rx_setting));
-
- /* Power up the Ser/Des engine */
- writel(SERDES_START, phy->base + EMAC_SGMII_PHY_SERDES_START);
-
- for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
- if (readl(phy->base + EMAC_QSERDES_COM_RESET_SM) & READY)
- break;
- usleep_range(100, 200);
- }
-
- if (i == SERDES_START_WAIT_TIMES) {
- netdev_err(adpt->netdev, "error: ser/des failed to start\n");
- return -EIO;
- }
- /* Mask out all the SGMII Interrupt */
- writel(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
-
- emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
-
- return 0;
-}
-
-int emac_sgmii_init_v2(struct emac_adapter *adpt)
-{
- struct emac_phy *phy = &adpt->phy;
- void __iomem *phy_regs = phy->base;
- void __iomem *laned = phy->digital;
- unsigned int i;
- u32 lnstatus;
- int ret;
-
- ret = emac_sgmii_link_init(adpt);
- if (ret)
- return ret;
-
- /* PCS lane-x init */
- emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v2,
- ARRAY_SIZE(physical_coding_sublayer_programming_v2));
-
- /* SGMII lane-x init */
- emac_reg_write_all(phy->digital,
- sgmii_v2_laned, ARRAY_SIZE(sgmii_v2_laned));
-
- /* Power up PCS and start reset lane state machine */
-
- writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
- writel(1, laned + SGMII_LN_RSM_START);
-
- /* Wait for c_ready assertion */
- for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
- lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
- if (lnstatus & BIT(1))
- break;
- usleep_range(100, 200);
- }
-
- if (i == SERDES_START_WAIT_TIMES) {
- netdev_err(adpt->netdev, "SGMII failed to start\n");
- return -EIO;
- }
-
- /* Disable digital and SERDES loopback */
- writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
- writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
- writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
-
- /* Mask out all the SGMII Interrupt */
- writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
-
- emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
-
- return 0;
-}
-
static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
{
struct emac_phy *phy = &adpt->phy;
@@ -651,44 +144,72 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
{
int ret;
- clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
emac_sgmii_reset_prepare(adpt);
+ ret = emac_sgmii_link_init(adpt);
+ if (ret) {
+ netdev_err(adpt->netdev, "unsupported link speed\n");
+ return;
+ }
+
ret = adpt->phy.initialize(adpt);
if (ret)
netdev_err(adpt->netdev,
"could not reinitialize internal PHY (error=%i)\n",
ret);
-
- clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
}
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
+#ifdef CONFIG_ACPI
static const struct acpi_device_id match_table[] = {
{
.id = "QCOM8071",
- .driver_data = (kernel_ulong_t)emac_sgmii_init_v2,
},
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
emac_sgmii_initialize *initialize = data;
- if (id)
- *initialize = (emac_sgmii_initialize)id->driver_data;
+ if (id) {
+ acpi_handle handle = ACPI_HANDLE(dev);
+ unsigned long long hrv;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, "_HRV", NULL, &hrv);
+ if (status) {
+ if (status == AE_NOT_FOUND)
+ /* Older versions of the QDF2432 ACPI tables do
+ * not have an _HRV property.
+ */
+ hrv = 1;
+ else
+ /* Something is wrong with the tables */
+ return 0;
+ }
- return !!id;
+ switch (hrv) {
+ case 1:
+ *initialize = emac_sgmii_init_qdf2432;
+ return 1;
+ case 2:
+ *initialize = emac_sgmii_init_qdf2400;
+ return 1;
+ }
+ }
+#endif
+
+ return 0;
}
static const struct of_device_id emac_sgmii_dt_match[] = {
{
.compatible = "qcom,fsm9900-emac-sgmii",
- .data = emac_sgmii_init_v1,
+ .data = emac_sgmii_init_fsm9900,
},
{
.compatible = "qcom,qdf2432-emac-sgmii",
- .data = emac_sgmii_init_v2,
+ .data = emac_sgmii_init_qdf2432,
},
{}
};
@@ -765,6 +286,8 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
if (ret)
goto error;
+ emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+
/* We've remapped the addresses, so we don't need the device any
* more. of_find_device_by_node() says we should release it.
*/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
index ce79212ff403..80ed3dc3157a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -16,9 +16,11 @@
struct emac_adapter;
struct platform_device;
-int emac_sgmii_init_v1(struct emac_adapter *adpt);
-int emac_sgmii_init_v2(struct emac_adapter *adpt);
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
void emac_sgmii_reset(struct emac_adapter *adpt);
+int emac_sgmii_init_fsm9900(struct emac_adapter *adpt);
+int emac_sgmii_init_qdf2432(struct emac_adapter *adpt);
+int emac_sgmii_init_qdf2400(struct emac_adapter *adpt);
+
#endif
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 57b35aeac51a..422289c232bc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -239,15 +239,8 @@ static void emac_rx_mode_set(struct net_device *netdev)
/* Change the Maximum Transfer Unit (MTU) */
static int emac_change_mtu(struct net_device *netdev, int new_mtu)
{
- unsigned int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct emac_adapter *adpt = netdev_priv(netdev);
- if ((max_frame < EMAC_MIN_ETH_FRAME_SIZE) ||
- (max_frame > EMAC_MAX_ETH_FRAME_SIZE)) {
- netdev_err(adpt->netdev, "error: invalid MTU setting\n");
- return -EINVAL;
- }
-
netif_info(adpt, hw, adpt->netdev,
"changing MTU from %d to %d\n", netdev->mtu,
new_mtu);
@@ -467,6 +460,12 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
{
int ret;
+ /* On ACPI platforms, clocks are controlled by firmware and/or
+ * ACPI, not by drivers.
+ */
+ if (has_acpi_companion(&pdev->dev))
+ return 0;
+
ret = emac_clks_get(pdev, adpt);
if (ret)
return ret;
@@ -492,6 +491,9 @@ static int emac_clks_phase2_init(struct platform_device *pdev,
{
int ret;
+ if (has_acpi_companion(&pdev->dev))
+ return 0;
+
ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
if (ret)
return ret;
@@ -680,6 +682,12 @@ static int emac_probe(struct platform_device *pdev)
netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
+ /* MTU range: 46 - 9194 */
+ netdev->min_mtu = EMAC_MIN_ETH_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ netdev->max_mtu = EMAC_MAX_ETH_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
INIT_WORK(&adpt->work_thread, emac_work_thread);
/* Initialize queues */
diff --git a/drivers/net/ethernet/qualcomm/qca_framing.h b/drivers/net/ethernet/qualcomm/qca_framing.h
index 5d965959c978..d5e795dcdf47 100644
--- a/drivers/net/ethernet/qualcomm/qca_framing.h
+++ b/drivers/net/ethernet/qualcomm/qca_framing.h
@@ -43,9 +43,9 @@
/* Frame length is invalid */
#define QCAFRM_INVFRAME (QCAFRM_ERR_BASE - 4)
-/* Min/Max Ethernet MTU */
-#define QCAFRM_ETHMINMTU 46
-#define QCAFRM_ETHMAXMTU 1500
+/* Min/Max Ethernet MTU: 46/1500 */
+#define QCAFRM_ETHMINMTU (ETH_ZLEN - ETH_HLEN)
+#define QCAFRM_ETHMAXMTU ETH_DATA_LEN
/* Min/Max frame lengths */
#define QCAFRM_ETHMINLEN (QCAFRM_ETHMINMTU + ETH_HLEN)
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 6e2add979471..513e6c74e199 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -780,24 +780,12 @@ qcaspi_netdev_uninit(struct net_device *dev)
dev_kfree_skb(qca->rx_skb);
}
-static int
-qcaspi_netdev_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < QCAFRM_ETHMINMTU) || (new_mtu > QCAFRM_ETHMAXMTU))
- return -EINVAL;
-
- dev->mtu = new_mtu;
-
- return 0;
-}
-
static const struct net_device_ops qcaspi_netdev_ops = {
.ndo_init = qcaspi_netdev_init,
.ndo_uninit = qcaspi_netdev_uninit,
.ndo_open = qcaspi_netdev_open,
.ndo_stop = qcaspi_netdev_close,
.ndo_start_xmit = qcaspi_netdev_xmit,
- .ndo_change_mtu = qcaspi_netdev_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = qcaspi_netdev_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@@ -814,6 +802,10 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->tx_queue_len = 100;
+ /* MTU range: 46 - 1500 */
+ dev->min_mtu = QCAFRM_ETHMINMTU;
+ dev->max_mtu = QCAFRM_ETHMAXMTU;
+
qca = netdev_priv(dev);
memset(qca, 0, sizeof(struct qcaspi));
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 5ef5d728c250..aa11b70b9ca4 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -472,8 +472,6 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
-
- phy_stop(dev->phydev);
}
static int r6040_close(struct net_device *dev)
@@ -481,12 +479,12 @@ static int r6040_close(struct net_device *dev)
struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
- spin_lock_irq(&lp->lock);
+ phy_stop(dev->phydev);
napi_disable(&lp->napi);
netif_stop_queue(dev);
- r6040_down(dev);
- free_irq(dev->irq, dev);
+ spin_lock_irq(&lp->lock);
+ r6040_down(dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
@@ -496,6 +494,8 @@ static int r6040_close(struct net_device *dev)
spin_unlock_irq(&lp->lock);
+ free_irq(dev->irq, dev);
+
/* Free Descriptor memory */
if (lp->rx_ring) {
pci_free_consistent(pdev,
@@ -969,7 +969,6 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_start_xmit = r6040_start_xmit,
.ndo_get_stats = r6040_get_stats,
.ndo_set_rx_mode = r6040_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = r6040_ioctl,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5297bf77211c..b7c89ebcf4a2 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1277,10 +1277,6 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
struct cp_private *cp = netdev_priv(dev);
- /* check for invalid MTU, according to hardware limits */
- if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
- return -EINVAL;
-
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
dev->mtu = new_mtu;
@@ -2010,6 +2006,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
+ /* MTU range: 60 - 4096 */
+ dev->min_mtu = CP_MIN_MTU;
+ dev->max_mtu = CP_MAX_MTU;
+
rc = register_netdev(dev);
if (rc)
goto err_out_iomap;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da4c2d8a4173..9bc047ac883b 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -924,19 +924,10 @@ static int rtl8139_set_features(struct net_device *dev, netdev_features_t featur
return 0;
}
-static int rtl8139_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68 || new_mtu > MAX_ETH_DATA_SIZE)
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_open = rtl8139_open,
.ndo_stop = rtl8139_close,
.ndo_get_stats64 = rtl8139_get_stats64,
- .ndo_change_mtu = rtl8139_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = rtl8139_set_mac_address,
.ndo_start_xmit = rtl8139_start_xmit,
@@ -1022,6 +1013,10 @@ static int rtl8139_init_one(struct pci_dev *pdev,
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ /* MTU range: 68 - 1770 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = MAX_ETH_DATA_SIZE;
+
/* tp zeroed and aligned in alloc_etherdev */
tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 5cb96785fb63..570ed3bd3cbf 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -245,7 +245,6 @@ static const struct net_device_ops atp_netdev_ops = {
.ndo_start_xmit = atp_send_packet,
.ndo_set_rx_mode = set_rx_mode,
.ndo_tx_timeout = tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index bf000d819a21..f9b97f5946f8 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2344,6 +2344,13 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
+static int rtl8169_nway_reset(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return mii_nway_restart(&tp->mii);
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
@@ -2359,6 +2366,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
+ .nway_reset = rtl8169_nway_reset,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -6673,10 +6681,6 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- if (new_mtu < ETH_ZLEN ||
- new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
- return -EINVAL;
-
if (new_mtu > ETH_DATA_LEN)
rtl_hw_jumbo_enable(tp);
else
@@ -8431,6 +8435,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ /* MTU range: 60 - hw-specific max */
+ dev->min_mtu = ETH_ZLEN;
+ dev->max_mtu = rtl_chip_infos[chipset].jumbo_max;
+
tp->hw_start = cfg->hw_start;
tp->event_slow = cfg->event_slow;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 85ec447c2d18..27be51f0a421 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -37,7 +37,7 @@ config RAVB
select MII
select MDIO_BITBANG
select PHYLIB
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
help
Renesas Ethernet AVB device driver.
This driver supports the following SoCs:
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d6a217874a8b..92d7692c840d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1789,7 +1789,6 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_do_ioctl = ravb_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
};
/* MDIO bus init function */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1a92de705199..f341c1bc7001 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2914,7 +2914,6 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static const struct net_device_ops sh_eth_netdev_ops_tsu = {
@@ -2929,7 +2928,6 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
};
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 2eb9b49569d5..ee9675db5bf9 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -72,6 +72,7 @@ struct rocker {
struct rocker_dma_ring_info event_ring;
struct notifier_block fib_nb;
struct rocker_world_ops *wops;
+ struct workqueue_struct *rocker_owq;
void *wpriv;
};
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 24b746406bc7..7c450b5a1138 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -28,6 +28,7 @@
#include <linux/if_bridge.h>
#include <linux/bitops.h>
#include <linux/ctype.h>
+#include <linux/workqueue.h>
#include <net/switchdev.h>
#include <net/rtnetlink.h>
#include <net/netevent.h>
@@ -1953,12 +1954,6 @@ static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
int running = netif_running(dev);
int err;
-#define ROCKER_PORT_MIN_MTU 68
-#define ROCKER_PORT_MAX_MTU 9000
-
- if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
- return -EINVAL;
-
if (running)
rocker_port_stop(dev);
@@ -2171,28 +2166,70 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_obj_dump = rocker_port_obj_dump,
};
-static int rocker_router_fib_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+struct rocker_fib_event_work {
+ struct work_struct work;
+ struct fib_entry_notifier_info fen_info;
+ struct rocker *rocker;
+ unsigned long event;
+};
+
+static void rocker_router_fib_event_work(struct work_struct *work)
{
- struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
- struct fib_entry_notifier_info *fen_info = ptr;
+ struct rocker_fib_event_work *fib_work =
+ container_of(work, struct rocker_fib_event_work, work);
+ struct rocker *rocker = fib_work->rocker;
int err;
- switch (event) {
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_ADD:
- err = rocker_world_fib4_add(rocker, fen_info);
+ err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
if (err)
rocker_world_fib4_abort(rocker);
- else
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- rocker_world_fib4_del(rocker, fen_info);
+ rocker_world_fib4_del(rocker, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
rocker_world_fib4_abort(rocker);
break;
}
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int rocker_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
+ struct rocker_fib_event_work *fib_work;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&fib_work->work, rocker_router_fib_event_work);
+ fib_work->rocker = rocker;
+ fib_work->event = event;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+ memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
+ /* Take referece on fib_info to prevent it from being
+ * freed while work is queued. Release it afterwards.
+ */
+ fib_info_hold(fib_work->fen_info.fi);
+ break;
+ }
+
+ queue_work(rocker->rocker_owq, &fib_work->work);
+
return NOTIFY_DONE;
}
@@ -2536,9 +2573,11 @@ static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
}
}
+#define ROCKER_PORT_MIN_MTU ETH_MIN_MTU
+#define ROCKER_PORT_MAX_MTU 9000
static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
{
- const struct pci_dev *pdev = rocker->pdev;
+ struct pci_dev *pdev = rocker->pdev;
struct rocker_port *rocker_port;
struct net_device *dev;
int err;
@@ -2546,6 +2585,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
dev = alloc_etherdev(sizeof(struct rocker_port));
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
rocker_port = netdev_priv(dev);
rocker_port->dev = dev;
rocker_port->rocker = rocker;
@@ -2570,6 +2610,10 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
+ /* MTU range: 68 - 9000 */
+ dev->min_mtu = ROCKER_PORT_MIN_MTU;
+ dev->max_mtu = ROCKER_PORT_MAX_MTU;
+
err = rocker_world_port_pre_init(rocker_port);
if (err) {
dev_err(&pdev->dev, "port world pre-init failed\n");
@@ -2753,6 +2797,21 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_request_event_irq;
}
+ rocker->rocker_owq = alloc_ordered_workqueue(rocker_driver_name,
+ WQ_MEM_RECLAIM);
+ if (!rocker->rocker_owq) {
+ err = -ENOMEM;
+ goto err_alloc_ordered_workqueue;
+ }
+
+ /* Only FIBs pointing to our own netdevs are programmed into
+ * the device, so no need to pass a callback.
+ */
+ rocker->fib_nb.notifier_call = rocker_router_fib_event;
+ err = register_fib_notifier(&rocker->fib_nb, NULL);
+ if (err)
+ goto err_register_fib_notifier;
+
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
err = rocker_probe_ports(rocker);
@@ -2761,15 +2820,16 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_probe_ports;
}
- rocker->fib_nb.notifier_call = rocker_router_fib_event;
- register_fib_notifier(&rocker->fib_nb);
-
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
(int)sizeof(rocker->hw.id), &rocker->hw.id);
return 0;
err_probe_ports:
+ unregister_fib_notifier(&rocker->fib_nb);
+err_register_fib_notifier:
+ destroy_workqueue(rocker->rocker_owq);
+err_alloc_ordered_workqueue:
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
err_request_event_irq:
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
@@ -2795,9 +2855,10 @@ static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ rocker_remove_ports(rocker);
unregister_fib_notifier(&rocker->fib_nb);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
- rocker_remove_ports(rocker);
+ destroy_workqueue(rocker->rocker_owq);
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
rocker_dma_rings_fini(rocker);
@@ -2839,20 +2900,37 @@ static bool rocker_port_dev_check_under(const struct net_device *dev,
return true;
}
+struct rocker_walk_data {
+ struct rocker *rocker;
+ struct rocker_port *port;
+};
+
+static int rocker_lower_dev_walk(struct net_device *lower_dev, void *_data)
+{
+ struct rocker_walk_data *data = _data;
+ int ret = 0;
+
+ if (rocker_port_dev_check_under(lower_dev, data->rocker)) {
+ data->port = netdev_priv(lower_dev);
+ ret = 1;
+ }
+
+ return ret;
+}
+
struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
struct rocker *rocker)
{
- struct net_device *lower_dev;
- struct list_head *iter;
+ struct rocker_walk_data data;
if (rocker_port_dev_check_under(dev, rocker))
return netdev_priv(dev);
- netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
- if (rocker_port_dev_check_under(lower_dev, rocker))
- return netdev_priv(lower_dev);
- }
- return NULL;
+ data.rocker = rocker;
+ data.port = NULL;
+ netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &data);
+
+ return data.port;
}
static int rocker_netdevice_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 4ca461322d60..7cd76b6b5cb9 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2516,6 +2516,7 @@ static void ofdpa_fini(struct rocker *rocker)
int bkt;
del_timer_sync(&ofdpa->fdb_cleanup_timer);
+ flush_workqueue(rocker->rocker_owq);
spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
index 2360d8150777..fbd5e06654c6 100644
--- a/drivers/net/ethernet/samsung/Kconfig
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -21,7 +21,7 @@ config SXGBE_ETH
depends on HAS_IOMEM && HAS_DMA
select PHYLIB
select CRC32
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This is the driver for the SXGBE 10G Ethernet IP block found on
Samsung platforms.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 5cb51b609f02..c61f260e18a4 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -384,7 +384,6 @@ struct sxgbe_tx_queue {
dma_addr_t *tx_skbuff_dma;
struct sk_buff **tx_skbuff;
struct timer_list txtimer;
- spinlock_t tx_lock; /* lock for tx queues */
unsigned int cur_tx;
unsigned int dirty_tx;
u32 tx_count_frames;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index ea44a2456ce1..cddcff5a00a7 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -426,9 +426,6 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
tx_ring->dirty_tx = 0;
tx_ring->cur_tx = 0;
- /* initialise TX queue lock */
- spin_lock_init(&tx_ring->tx_lock);
-
return 0;
dmamem_err:
@@ -743,7 +740,7 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
- spin_lock(&tqueue->tx_lock);
+ __netif_tx_lock(dev_txq, smp_processor_id());
priv->xstats.tx_clean++;
while (tqueue->dirty_tx != tqueue->cur_tx) {
@@ -781,18 +778,13 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
/* wake up queue */
if (unlikely(netif_tx_queue_stopped(dev_txq) &&
- sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
- netif_tx_lock(priv->dev);
- if (netif_tx_queue_stopped(dev_txq) &&
- sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
- if (netif_msg_tx_done(priv))
- pr_debug("%s: restart transmit\n", __func__);
- netif_tx_wake_queue(dev_txq);
- }
- netif_tx_unlock(priv->dev);
+ sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
+ if (netif_msg_tx_done(priv))
+ pr_debug("%s: restart transmit\n", __func__);
+ netif_tx_wake_queue(dev_txq);
}
- spin_unlock(&tqueue->tx_lock);
+ __netif_tx_unlock(dev_txq);
}
/**
@@ -1304,9 +1296,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
tqueue->hwts_tx_en)))
ctxt_desc_req = 1;
- /* get the spinlock */
- spin_lock(&tqueue->tx_lock);
-
if (priv->tx_path_in_lpi_mode)
sxgbe_disable_eee_mode(priv);
@@ -1316,8 +1305,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
__func__, txq_index);
}
- /* release the spin lock in case of BUSY */
- spin_unlock(&tqueue->tx_lock);
return NETDEV_TX_BUSY;
}
@@ -1436,8 +1423,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
- spin_unlock(&tqueue->tx_lock);
-
return NETDEV_TX_OK;
}
@@ -1820,19 +1805,6 @@ static int sxgbe_set_features(struct net_device *dev,
*/
static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
{
- /* RFC 791, page 25, "Every internet module must be able to forward
- * a datagram of 68 octets without further fragmentation."
- */
- if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
- netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
- MIN_MTU, MAX_MTU);
- return -EINVAL;
- }
-
- /* Return if the buffer sizes will not change */
- if (dev->mtu == new_mtu)
- return 0;
-
dev->mtu = new_mtu;
if (!netif_running(dev))
@@ -2144,6 +2116,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
/* assign filtering support */
ndev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 68 - 9000 */
+ ndev->min_mtu = MIN_MTU;
+ ndev->max_mtu = MAX_MTU;
+
priv->msg_enable = netif_msg_init(debug, default_msg_level);
/* Enable TCP segmentation offload for all DMA channels */
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index bdac936a68bc..244c1e171017 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -745,7 +745,6 @@ static const struct net_device_ops ether3_netdev_ops = {
.ndo_set_rx_mode = ether3_setmulticastlist,
.ndo_tx_timeout = ether3_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c2bd5378ffda..ed34196028b8 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -714,7 +714,6 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
.ndo_tx_timeout = timeout,
.ndo_set_rx_mode = sgiseeq_set_multicast,
.ndo_set_mac_address = sgiseeq_set_mac_address,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4dd92b7b80f4..2c032629c369 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,20 +1,36 @@
+#
+# Solarflare device configuration
+#
+
+config NET_VENDOR_SOLARFLARE
+ bool "Solarflare devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Solarflare devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_SOLARFLARE
+
config SFC
- tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
+ tristate "Solarflare SFC9000/SFC9100-family support"
depends on PCI
select MDIO
select CRC32
select I2C
select I2C_ALGOBIT
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports 10/40-gigabit Ethernet cards based on
- the Solarflare SFC4000, SFC9000-family and SFC9100-family
- controllers.
+ the Solarflare SFC9000-family and SFC9100-family controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
- bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
+ bool "Solarflare SFC9000/SFC9100-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---
@@ -45,3 +61,7 @@ config SFC_MCDI_LOGGING
Driver-Interface) commands and responses, allowing debugging of
driver/firmware interaction. The tracing is actually enabled by
a sysfs file 'mcdi_logging' under the PCI device.
+
+source "drivers/net/ethernet/sfc/falcon/Kconfig"
+
+endif # NET_VENDOR_SOLARFLARE
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ce8470fe79d5..520cfcc17785 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,7 +1,6 @@
-sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
- rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
- tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_port.o mcdi_mon.o ptp.o
+sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
+ selftest.o ethtool.o ptp.o tx_tso.o \
+ mcdi.o mcdi_port.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 00279da6a1e8..de2947ccc5ad 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2086,6 +2086,92 @@ static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
ER_DZ_TX_DESC_UPD, tx_queue->queue);
}
+/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
+ */
+static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb,
+ bool *data_mapped)
+{
+ struct efx_tx_buffer *buffer;
+ struct tcphdr *tcp;
+ struct iphdr *ip;
+
+ u16 ipv4_id;
+ u32 seqnum;
+ u32 mss;
+
+ EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ if (unlikely(mss < 4)) {
+ WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
+ return -EINVAL;
+ }
+
+ ip = ip_hdr(skb);
+ if (ip->version == 4) {
+ /* Modify IPv4 header if needed. */
+ ip->tot_len = 0;
+ ip->check = 0;
+ ipv4_id = ip->id;
+ } else {
+ /* Modify IPv6 header if needed. */
+ struct ipv6hdr *ipv6 = ipv6_hdr(skb);
+
+ ipv6->payload_len = 0;
+ ipv4_id = 0;
+ }
+
+ tcp = tcp_hdr(skb);
+ seqnum = ntohl(tcp->seq);
+
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+ buffer->flags = EFX_TX_BUF_OPTION;
+ buffer->len = 0;
+ buffer->unmap_len = 0;
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
+ );
+ ++tx_queue->insert_count;
+
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+ buffer->flags = EFX_TX_BUF_OPTION;
+ buffer->len = 0;
+ buffer->unmap_len = 0;
+ EFX_POPULATE_QWORD_4(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_TCP_MSS, mss
+ );
+ ++tx_queue->insert_count;
+
+ return 0;
+}
+
+static u32 efx_ef10_tso_versions(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 tso_versions = 0;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
+ tso_versions |= BIT(1);
+ if (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
+ tso_versions |= BIT(2);
+ return tso_versions;
+}
+
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
@@ -2095,6 +2181,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ bool tso_v2 = false;
size_t inlen;
dma_addr_t dma_addr;
efx_qword_t *txd;
@@ -2102,13 +2189,21 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
int i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+ /* TSOv2 is a limited resource that can only be configured on a limited
+ * number of queues. TSO without checksum offload is not really a thing,
+ * so we only enable it for those queues.
+ */
+ if (csum_offload && (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) {
+ tso_v2 = true;
+ netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
+ channel->channel);
+ }
+
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
- MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
- INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
- INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
@@ -2124,10 +2219,30 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
- rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- NULL, 0, NULL);
- if (rc)
- goto fail;
+ do {
+ MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS,
+ /* This flag was removed from mcdi_pcol.h for
+ * the non-_EXT version of INIT_TXQ. However,
+ * firmware still honours it.
+ */
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ NULL, 0, NULL);
+ if (rc == -ENOSPC && tso_v2) {
+ /* Retry without TSOv2 if we're short on contexts. */
+ tso_v2 = false;
+ netif_warn(efx, probe, efx->net_dev,
+ "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
+ MC_CMD_INIT_TXQ_EXT_IN_LEN,
+ NULL, 0, rc);
+ goto fail;
+ }
+ } while (rc);
/* A previous user of this TX queue might have set us up the
* bomb by writing a descriptor to the TX push collector but
@@ -2146,8 +2261,11 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
tx_queue->write_count = 1;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
+ if (tso_v2) {
+ tx_queue->handle_tso = efx_ef10_tx_tso_desc;
+ tx_queue->tso_version = 2;
+ } else if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
tx_queue->tso_version = 1;
}
@@ -2202,6 +2320,25 @@ static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
}
+#define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
+
+static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
+ /* If we need to break across multiple descriptors we should
+ * stop at a page boundary. This assumes the length limit is
+ * greater than the page size.
+ */
+ dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
+
+ BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
+ len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
+ }
+
+ return len;
+}
+
static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
{
unsigned int old_write_count = tx_queue->write_count;
@@ -2245,6 +2382,86 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
+#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
+ 1 << RSS_MODE_HASH_DST_ADDR_LBN)
+#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
+ 1 << RSS_MODE_HASH_DST_PORT_LBN)
+#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
+
+static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
+{
+ /* Firmware had a bug (sfc bug 61952) where it would not actually
+ * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
+ * This meant that it would always contain whatever was previously
+ * in the MCDI buffer. Fortunately, all firmware versions with
+ * this bug have the same default flags value for a newly-allocated
+ * RSS context, and the only time we want to get the flags is just
+ * after allocating. Moreover, the response has a 32-bit hole
+ * where the context ID would be in the request, so we can use an
+ * overlength buffer in the request and pre-fill the flags field
+ * with what we believe the default to be. Thus if the firmware
+ * has the bug, it will leave our pre-filled value in the flags
+ * field of the response, and we will get the right answer.
+ *
+ * However, this does mean that this function should NOT be used if
+ * the RSS context flags might not be their defaults - it is ONLY
+ * reliably correct for a newly-allocated RSS context.
+ */
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ /* Check we have a hole for the context ID */
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
+ RSS_CONTEXT_FLAGS_DEFAULT);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc == 0) {
+ if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
+ rc = -EIO;
+ else
+ *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
+ }
+ return rc;
+}
+
+/* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
+ * If we fail, we just leave the RSS context at its default hash settings,
+ * which is safe but may slightly reduce performance.
+ * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
+ * just need to set the UDP ports flags (for both IP versions).
+ */
+static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
+ u32 flags;
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
+
+ if (efx_ef10_get_rss_flags(efx, context, &flags) != 0)
+ return;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context);
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
+ if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL))
+ /* Succeeded, so UDP 4-tuple is now enabled */
+ efx->rx_hash_udp_4tuple = true;
+}
+
static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
bool exclusive, unsigned *context_size)
{
@@ -2290,6 +2507,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
if (context_size)
*context_size = rss_spread;
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ efx_ef10_set_rss_flags(efx, *context);
+
return 0;
}
@@ -5385,6 +5606,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
+ .tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
@@ -5491,6 +5713,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
+ .tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
@@ -5550,6 +5773,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
#endif
.get_mac_address = efx_ef10_get_mac_address_pf,
.set_mac_address = efx_ef10_set_mac_address,
+ .tso_versions = efx_ef10_tso_versions,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 62a55dde61d5..2c4bf9476c37 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
- * Copyright 2012-2013 Solarflare Communications Inc.
+ * Copyright 2012-2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -147,8 +147,14 @@
#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
#define ESF_DZ_RX_DROP_EVENT_LBN 58
#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
-#define ESF_DZ_RX_EV_RSVD2_LBN 54
-#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
+#define ESF_DD_RX_EV_RSVD2_LBN 54
+#define ESF_DD_RX_EV_RSVD2_WIDTH 4
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_EV_RSVD2_LBN 54
+#define ESF_EZ_RX_EV_RSVD2_WIDTH 2
#define ESF_DZ_RX_EV_SOFT2_LBN 52
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
@@ -192,12 +198,21 @@
#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
#define ESE_DZ_MAC_CLASS_MCAST 1
#define ESE_DZ_MAC_CLASS_UCAST 0
-#define ESF_DZ_RX_EV_SOFT1_LBN 32
-#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
-#define ESF_DZ_RX_EV_RSVD1_LBN 31
-#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
-#define ESF_DZ_RX_ABORT_LBN 30
-#define ESF_DZ_RX_ABORT_WIDTH 1
+#define ESF_DD_RX_EV_SOFT1_LBN 32
+#define ESF_DD_RX_EV_SOFT1_WIDTH 3
+#define ESF_EZ_RX_EV_SOFT1_LBN 34
+#define ESF_EZ_RX_EV_SOFT1_WIDTH 1
+#define ESF_EZ_RX_ENCAP_HDR_LBN 32
+#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2
+#define ESE_EZ_ENCAP_HDR_GRE 2
+#define ESE_EZ_ENCAP_HDR_VXLAN 1
+#define ESE_EZ_ENCAP_HDR_NONE 0
+#define ESF_DD_RX_EV_RSVD1_LBN 30
+#define ESF_DD_RX_EV_RSVD1_WIDTH 2
+#define ESF_EZ_RX_EV_RSVD1_LBN 31
+#define ESF_EZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_EZ_RX_ABORT_LBN 30
+#define ESF_EZ_RX_ABORT_WIDTH 1
#define ESF_DZ_RX_ECC_ERR_LBN 29
#define ESF_DZ_RX_ECC_ERR_WIDTH 1
#define ESF_DZ_RX_CRC1_ERR_LBN 28
@@ -235,6 +250,12 @@
#define ESE_DZ_TX_OPTION_DESC_TSO 7
#define ESE_DZ_TX_OPTION_DESC_VLAN 6
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
#define ESF_DZ_TX_TIMESTAMP_LBN 5
#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
@@ -257,14 +278,22 @@
#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
#define ESF_DZ_TX_DROP_EVENT_LBN 58
#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
-#define ESF_DZ_TX_EV_RSVD_LBN 48
-#define ESF_DZ_TX_EV_RSVD_WIDTH 10
+#define ESF_DD_TX_EV_RSVD_LBN 48
+#define ESF_DD_TX_EV_RSVD_WIDTH 10
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_TX_EV_RSVD_LBN 48
+#define ESF_EZ_TX_EV_RSVD_WIDTH 8
#define ESF_DZ_TX_SOFT2_LBN 32
#define ESF_DZ_TX_SOFT2_WIDTH 16
-#define ESF_DZ_TX_CAN_MERGE_LBN 31
-#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
-#define ESF_DZ_TX_SOFT1_LBN 24
-#define ESF_DZ_TX_SOFT1_WIDTH 7
+#define ESF_DD_TX_SOFT1_LBN 24
+#define ESF_DD_TX_SOFT1_WIDTH 8
+#define ESF_EZ_TX_CAN_MERGE_LBN 31
+#define ESF_EZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_EZ_TX_SOFT1_LBN 24
+#define ESF_EZ_TX_SOFT1_WIDTH 7
#define ESF_DZ_TX_QLABEL_LBN 16
#define ESF_DZ_TX_QLABEL_WIDTH 5
#define ESF_DZ_TX_DESCR_INDX_LBN 0
@@ -301,6 +330,10 @@
#define ESE_DZ_TX_OPTION_DESC_TSO 7
#define ESE_DZ_TX_OPTION_DESC_VLAN 6
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
#define ESF_DZ_TX_TSO_IP_ID_LBN 32
@@ -308,6 +341,46 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+/* TX_TSO_FATSO2A_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2B_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+
+
/*************************************************************************/
/* TX_DESC_UPD_REG: Transmit descriptor update register.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6b89e4a7b164..5a5dcad8c49a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -82,7 +82,6 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
@@ -356,7 +355,7 @@ static int efx_probe_eventq(struct efx_channel *channel)
/* Build an event queue with room for one event per tx and rx buffer,
* plus some extra for link state events and MCDI completions. */
entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
- EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
return efx_nic_probe_eventq(channel);
@@ -733,16 +732,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
}
rc = efx->type->fini_dmaq(efx);
- if (rc && EFX_WORKAROUND_7803(efx)) {
- /* Schedule a reset to recover from the flush failure. The
- * descriptor caches reference memory we're about to free,
- * but falcon_reconfigure_mac_wrapper() won't reconnect
- * the MACs because of the pending reset.
- */
- netif_err(efx, drv, efx->net_dev,
- "Resetting to recover from flush failure\n");
- efx_schedule_reset(efx, RESET_TYPE_ALL);
- } else if (rc) {
+ if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
netif_dbg(efx, drv, efx->net_dev,
@@ -1892,15 +1882,13 @@ static void efx_start_all(struct efx_nic *efx)
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
- /* If link state detection is normally event-driven, we have
+ /* Link state detection is normally event-driven; we have
* to poll now because we could have missed a change
*/
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- mutex_lock(&efx->mac_lock);
- if (efx->phy_op->poll(efx))
- efx_link_status_changed(efx);
- mutex_unlock(&efx->mac_lock);
- }
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
efx->type->start_stats(efx);
efx->type->pull_stats(efx);
@@ -2113,10 +2101,9 @@ static void efx_init_napi(struct efx_nic *efx)
static void efx_fini_napi_channel(struct efx_channel *channel)
{
- if (channel->napi_dev) {
+ if (channel->napi_dev)
netif_napi_del(&channel->napi_str);
- napi_hash_del(&channel->napi_str);
- }
+
channel->napi_dev = NULL;
}
@@ -2266,18 +2253,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
rc = efx_check_disabled(efx);
if (rc)
return rc;
- if (new_mtu > EFX_MAX_MTU) {
- netif_err(efx, drv, efx->net_dev,
- "Requested MTU of %d too big (max: %d)\n",
- new_mtu, EFX_MAX_MTU);
- return -EINVAL;
- }
- if (new_mtu < EFX_MIN_MTU) {
- netif_err(efx, drv, efx->net_dev,
- "Requested MTU of %d too small (min: %d)\n",
- new_mtu, EFX_MIN_MTU);
- return -EINVAL;
- }
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
@@ -2481,6 +2456,8 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+ net_dev->min_mtu = EFX_MIN_MTU;
+ net_dev->max_mtu = EFX_MAX_MTU;
rtnl_lock();
@@ -2853,12 +2830,6 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* PCI device ID table */
static const struct pci_device_id efx_pci_table[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
- .driver_data = (unsigned long) &falcon_a1_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
- .driver_data = (unsigned long) &falcon_b0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
@@ -3211,23 +3182,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data;
efx->fixed_features |= NETIF_F_HIGHDMA;
- net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_RXCSUM);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
- net_dev->features |= NETIF_F_TSO6;
- /* Mask for features that also apply to VLAN devices */
- net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
- NETIF_F_RXCSUM);
-
- net_dev->hw_features = net_dev->features & ~efx->fixed_features;
-
- /* Disable VLAN filtering by default. It may be enforced if
- * the feature is fixed (i.e. VLAN filters are required to
- * receive VLAN tagged packets due to vPort restrictions).
- */
- net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- net_dev->features |= efx->fixed_features;
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
@@ -3250,6 +3204,27 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail3;
+ net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_RXCSUM);
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+ net_dev->features |= NETIF_F_TSO6;
+ /* Check whether device supports TSO */
+ if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+ net_dev->features &= ~NETIF_F_ALL_TSO;
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+
+ net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+
+ /* Disable VLAN filtering by default. It may be enforced if
+ * the feature is fixed (i.e. VLAN filters are required to
+ * receive VLAN tagged packets due to vPort restrictions).
+ */
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->features |= efx->fixed_features;
+
rc = efx_register_netdev(efx);
if (rc)
goto fail4;
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index c94f56271dd4..6fa824211d91 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -148,7 +148,6 @@ enum efx_loopback_mode {
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
- * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
@@ -166,15 +165,13 @@ enum reset_type {
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
- RESET_TYPE_RX_RECOVERY,
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
/* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
* it doesn't fit the scope hierarchy (not well-ordered by inclusion).
* We encode this by having its enum value be greater than
- * RESET_TYPE_MAX_METHOD. This also prevents issuing it with
- * efx_ioctl_reset.
+ * RESET_TYPE_MAX_METHOD.
*/
RESET_TYPE_MCDI_TIMEOUT,
RESET_TYPE_MAX,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 445ccdb6bc67..87bdc56b4e3a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -69,8 +69,10 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -118,44 +120,53 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
}
/* This must be called with rtnl_lock held. */
-static int efx_ethtool_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+efx_ethtool_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
+ u32 supported;
mutex_lock(&efx->mac_lock);
- efx->phy_op->get_settings(efx, ecmd);
+ efx->phy_op->get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
- ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
if (LOOPBACK_INTERNAL(efx)) {
- ethtool_cmd_speed_set(ecmd, link_state->speed);
- ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.speed = link_state->speed;
+ cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/* This must be called with rtnl_lock held. */
-static int efx_ethtool_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+efx_ethtool_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
- if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
- (ecmd->duplex != DUPLEX_FULL)) {
+ if ((cmd->base.speed == SPEED_1000) &&
+ (cmd->base.duplex != DUPLEX_FULL)) {
netif_dbg(efx, drv, efx->net_dev,
"rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
}
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_settings(efx, ecmd);
+ rc = efx->phy_op->set_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -167,9 +178,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- efx_mcdi_print_fwver(efx, info->fw_version,
- sizeof(info->fw_version));
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
@@ -332,12 +342,12 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
"core", 0, "registers", NULL);
if (efx->phy_op->run_tests != NULL) {
- EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
+ EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
for (i = 0; true; ++i) {
const char *name;
- EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
name = efx->phy_op->test_name(efx, i);
if (name == NULL)
break;
@@ -964,35 +974,33 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
case ETHTOOL_GRXFH: {
- unsigned min_revision = 0;
-
info->data = 0;
switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ if (efx->rx_hash_udp_4tuple)
+ /* fall through */
case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
- case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case IPV4_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EFX_REV_FALCON_B0;
break;
+ case UDP_V6_FLOW:
+ if (efx->rx_hash_udp_4tuple)
+ /* fall through */
case TCP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
- case UDP_V6_FLOW:
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EFX_REV_SIENA_A0;
break;
default:
break;
}
- if (efx_nic_rev(efx) < min_revision)
- info->data = 0;
return 0;
}
@@ -1265,9 +1273,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
- efx->n_rx_channels == 1) ?
- 0 : ARRAY_SIZE(efx->rx_indir_table));
+ return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table);
}
static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
@@ -1345,8 +1351,6 @@ static int efx_ethtool_get_module_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
- .get_settings = efx_ethtool_get_settings,
- .set_settings = efx_ethtool_set_settings,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_regs_len = efx_ethtool_get_regs_len,
.get_regs = efx_ethtool_get_regs,
@@ -1376,4 +1380,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
+ .get_link_ksettings = efx_ethtool_get_link_ksettings,
+ .set_link_ksettings = efx_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/sfc/falcon/Kconfig b/drivers/net/ethernet/sfc/falcon/Kconfig
new file mode 100644
index 000000000000..6248e96253a2
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/Kconfig
@@ -0,0 +1,21 @@
+config SFC_FALCON
+ tristate "Solarflare SFC4000 support"
+ depends on PCI
+ select MDIO
+ select CRC32
+ select I2C
+ select I2C_ALGOBIT
+ ---help---
+ This driver supports 10-gigabit Ethernet cards based on
+ the Solarflare SFC4000 controller.
+
+ To compile this driver as a module, choose M here. The module
+ will be called sfc-falcon.
+config SFC_FALCON_MTD
+ bool "Solarflare SFC4000 MTD support"
+ depends on SFC_FALCON && MTD && !(SFC_FALCON=y && MTD=m)
+ default y
+ ---help---
+ This exposes the on-board flash and/or EEPROM as MTD devices
+ (e.g. /dev/mtd1). This is required to update the boot
+ configuration under Linux.
diff --git a/drivers/net/ethernet/sfc/falcon/Makefile b/drivers/net/ethernet/sfc/falcon/Makefile
new file mode 100644
index 000000000000..aa1b45979ca4
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/Makefile
@@ -0,0 +1,6 @@
+sfc-falcon-y += efx.o nic.o farch.o falcon.o tx.o rx.o selftest.o \
+ ethtool.o qt202x_phy.o mdio_10g.o tenxpress.o \
+ txc43128_phy.o falcon_boards.o
+
+sfc-falcon-$(CONFIG_SFC_FALCON_MTD) += mtd.o
+obj-$(CONFIG_SFC_FALCON) += sfc-falcon.o
diff --git a/drivers/net/ethernet/sfc/falcon/bitfield.h b/drivers/net/ethernet/sfc/falcon/bitfield.h
new file mode 100644
index 000000000000..230fd77bd311
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/bitfield.h
@@ -0,0 +1,542 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_BITFIELD_H
+#define EF4_BITFIELD_H
+
+/*
+ * Efx bitfield access
+ *
+ * Efx NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (ef4_oword_t, ef4_qword_t and
+ * ef4_dword_t) to be little-endian.
+ */
+
+/* Lowest bit numbers and widths */
+#define EF4_DUMMY_FIELD_LBN 0
+#define EF4_DUMMY_FIELD_WIDTH 0
+#define EF4_WORD_0_LBN 0
+#define EF4_WORD_0_WIDTH 16
+#define EF4_WORD_1_LBN 16
+#define EF4_WORD_1_WIDTH 16
+#define EF4_DWORD_0_LBN 0
+#define EF4_DWORD_0_WIDTH 32
+#define EF4_DWORD_1_LBN 32
+#define EF4_DWORD_1_WIDTH 32
+#define EF4_DWORD_2_LBN 64
+#define EF4_DWORD_2_WIDTH 32
+#define EF4_DWORD_3_LBN 96
+#define EF4_DWORD_3_WIDTH 32
+#define EF4_QWORD_0_LBN 0
+#define EF4_QWORD_0_WIDTH 64
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define EF4_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define EF4_LOW_BIT(field) EF4_VAL(field, LBN)
+/* Bit width of the specified field */
+#define EF4_WIDTH(field) EF4_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define EF4_HIGH_BIT(field) (EF4_LOW_BIT(field) + EF4_WIDTH(field) - 1)
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 64 bits.
+ */
+#define EF4_MASK64(width) \
+ ((width) == 64 ? ~((u64) 0) : \
+ (((((u64) 1) << (width))) - 1))
+
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 32 bits. Use
+ * EF4_MASK64 for higher width fields.
+ */
+#define EF4_MASK32(width) \
+ ((width) == 32 ? ~((u32) 0) : \
+ (((((u32) 1) << (width))) - 1))
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+typedef union ef4_dword {
+ __le32 u32[1];
+} ef4_dword_t;
+
+/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
+typedef union ef4_qword {
+ __le64 u64[1];
+ __le32 u32[2];
+ ef4_dword_t dword[2];
+} ef4_qword_t;
+
+/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
+typedef union ef4_oword {
+ __le64 u64[2];
+ ef4_qword_t qword[2];
+ __le32 u32[4];
+ ef4_dword_t dword[4];
+} ef4_oword_t;
+
+/* Format string and value expanders for printk */
+#define EF4_DWORD_FMT "%08x"
+#define EF4_QWORD_FMT "%08x:%08x"
+#define EF4_OWORD_FMT "%08x:%08x:%08x:%08x"
+#define EF4_DWORD_VAL(dword) \
+ ((unsigned int) le32_to_cpu((dword).u32[0]))
+#define EF4_QWORD_VAL(qword) \
+ ((unsigned int) le32_to_cpu((qword).u32[1])), \
+ ((unsigned int) le32_to_cpu((qword).u32[0]))
+#define EF4_OWORD_VAL(oword) \
+ ((unsigned int) le32_to_cpu((oword).u32[3])), \
+ ((unsigned int) le32_to_cpu((oword).u32[2])), \
+ ((unsigned int) le32_to_cpu((oword).u32[1])), \
+ ((unsigned int) le32_to_cpu((oword).u32[0]))
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EF4_EXTRACT ( element, 32, 63, 28, 45 ) would give
+ *
+ * ( element ) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EF4_EXTRACT_NATIVE(native_element, min, max, low, high) \
+ ((low) > (max) || (high) < (min) ? 0 : \
+ (low) > (min) ? \
+ (native_element) >> ((low) - (min)) : \
+ (native_element) << ((min) - (low)))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EF4_EXTRACT64(element, min, max, low, high) \
+ EF4_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EF4_EXTRACT32(element, min, max, low, high) \
+ EF4_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
+
+#define EF4_EXTRACT_OWORD64(oword, low, high) \
+ ((EF4_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
+ EF4_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
+ EF4_MASK64((high) + 1 - (low)))
+
+#define EF4_EXTRACT_QWORD64(qword, low, high) \
+ (EF4_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
+ EF4_MASK64((high) + 1 - (low)))
+
+#define EF4_EXTRACT_OWORD32(oword, low, high) \
+ ((EF4_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
+ EF4_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
+ EF4_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
+ EF4_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
+ EF4_MASK32((high) + 1 - (low)))
+
+#define EF4_EXTRACT_QWORD32(qword, low, high) \
+ ((EF4_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
+ EF4_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
+ EF4_MASK32((high) + 1 - (low)))
+
+#define EF4_EXTRACT_DWORD(dword, low, high) \
+ (EF4_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
+ EF4_MASK32((high) + 1 - (low)))
+
+#define EF4_OWORD_FIELD64(oword, field) \
+ EF4_EXTRACT_OWORD64(oword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field))
+
+#define EF4_QWORD_FIELD64(qword, field) \
+ EF4_EXTRACT_QWORD64(qword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field))
+
+#define EF4_OWORD_FIELD32(oword, field) \
+ EF4_EXTRACT_OWORD32(oword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field))
+
+#define EF4_QWORD_FIELD32(qword, field) \
+ EF4_EXTRACT_QWORD32(qword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field))
+
+#define EF4_DWORD_FIELD(dword, field) \
+ EF4_EXTRACT_DWORD(dword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field))
+
+#define EF4_OWORD_IS_ZERO64(oword) \
+ (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
+
+#define EF4_QWORD_IS_ZERO64(qword) \
+ (((qword).u64[0]) == (__force __le64) 0)
+
+#define EF4_OWORD_IS_ZERO32(oword) \
+ (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
+ == (__force __le32) 0)
+
+#define EF4_QWORD_IS_ZERO32(qword) \
+ (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
+
+#define EF4_DWORD_IS_ZERO(dword) \
+ (((dword).u32[0]) == (__force __le32) 0)
+
+#define EF4_OWORD_IS_ALL_ONES64(oword) \
+ (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
+
+#define EF4_QWORD_IS_ALL_ONES64(qword) \
+ ((qword).u64[0] == ~((__force __le64) 0))
+
+#define EF4_OWORD_IS_ALL_ONES32(oword) \
+ (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
+ == ~((__force __le32) 0))
+
+#define EF4_QWORD_IS_ALL_ONES32(qword) \
+ (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
+
+#define EF4_DWORD_IS_ALL_ONES(dword) \
+ ((dword).u32[0] == ~((__force __le32) 0))
+
+#if BITS_PER_LONG == 64
+#define EF4_OWORD_FIELD EF4_OWORD_FIELD64
+#define EF4_QWORD_FIELD EF4_QWORD_FIELD64
+#define EF4_OWORD_IS_ZERO EF4_OWORD_IS_ZERO64
+#define EF4_QWORD_IS_ZERO EF4_QWORD_IS_ZERO64
+#define EF4_OWORD_IS_ALL_ONES EF4_OWORD_IS_ALL_ONES64
+#define EF4_QWORD_IS_ALL_ONES EF4_QWORD_IS_ALL_ONES64
+#else
+#define EF4_OWORD_FIELD EF4_OWORD_FIELD32
+#define EF4_QWORD_FIELD EF4_QWORD_FIELD32
+#define EF4_OWORD_IS_ZERO EF4_OWORD_IS_ZERO32
+#define EF4_QWORD_IS_ZERO EF4_QWORD_IS_ZERO32
+#define EF4_OWORD_IS_ALL_ONES EF4_OWORD_IS_ALL_ONES32
+#define EF4_QWORD_IS_ALL_ONES EF4_QWORD_IS_ALL_ONES32
+#endif
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+#define EF4_INSERT_NATIVE64(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u64) (value)) << (low - min)) : \
+ (((u64) (value)) >> (min - low))))
+
+#define EF4_INSERT_NATIVE32(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u32) (value)) << (low - min)) : \
+ (((u32) (value)) >> (min - low))))
+
+#define EF4_INSERT_NATIVE(min, max, low, high, value) \
+ ((((max - min) >= 32) || ((high - low) >= 32)) ? \
+ EF4_INSERT_NATIVE64(min, max, low, high, value) : \
+ EF4_INSERT_NATIVE32(min, max, low, high, value))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EF4_INSERT_FIELD_NATIVE(min, max, field, value) \
+ EF4_INSERT_NATIVE(min, max, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field), value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EF4_INSERT_FIELDS_NATIVE(min, max, \
+ field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7, \
+ field8, value8, \
+ field9, value9, \
+ field10, value10) \
+ (EF4_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
+ EF4_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
+
+#define EF4_INSERT_FIELDS64(...) \
+ cpu_to_le64(EF4_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EF4_INSERT_FIELDS32(...) \
+ cpu_to_le32(EF4_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EF4_POPULATE_OWORD64(oword, ...) do { \
+ (oword).u64[0] = EF4_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ (oword).u64[1] = EF4_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EF4_POPULATE_QWORD64(qword, ...) do { \
+ (qword).u64[0] = EF4_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EF4_POPULATE_OWORD32(oword, ...) do { \
+ (oword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (oword).u32[1] = EF4_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ (oword).u32[2] = EF4_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
+ (oword).u32[3] = EF4_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EF4_POPULATE_QWORD32(qword, ...) do { \
+ (qword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (qword).u32[1] = EF4_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EF4_POPULATE_DWORD(dword, ...) do { \
+ (dword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ } while (0)
+
+#if BITS_PER_LONG == 64
+#define EF4_POPULATE_OWORD EF4_POPULATE_OWORD64
+#define EF4_POPULATE_QWORD EF4_POPULATE_QWORD64
+#else
+#define EF4_POPULATE_OWORD EF4_POPULATE_OWORD32
+#define EF4_POPULATE_QWORD EF4_POPULATE_QWORD32
+#endif
+
+/* Populate an octword field with various numbers of arguments */
+#define EF4_POPULATE_OWORD_10 EF4_POPULATE_OWORD
+#define EF4_POPULATE_OWORD_9(oword, ...) \
+ EF4_POPULATE_OWORD_10(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_8(oword, ...) \
+ EF4_POPULATE_OWORD_9(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_7(oword, ...) \
+ EF4_POPULATE_OWORD_8(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_6(oword, ...) \
+ EF4_POPULATE_OWORD_7(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_5(oword, ...) \
+ EF4_POPULATE_OWORD_6(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_4(oword, ...) \
+ EF4_POPULATE_OWORD_5(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_3(oword, ...) \
+ EF4_POPULATE_OWORD_4(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_2(oword, ...) \
+ EF4_POPULATE_OWORD_3(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_OWORD_1(oword, ...) \
+ EF4_POPULATE_OWORD_2(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_ZERO_OWORD(oword) \
+ EF4_POPULATE_OWORD_1(oword, EF4_DUMMY_FIELD, 0)
+#define EF4_SET_OWORD(oword) \
+ EF4_POPULATE_OWORD_4(oword, \
+ EF4_DWORD_0, 0xffffffff, \
+ EF4_DWORD_1, 0xffffffff, \
+ EF4_DWORD_2, 0xffffffff, \
+ EF4_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EF4_POPULATE_QWORD_10 EF4_POPULATE_QWORD
+#define EF4_POPULATE_QWORD_9(qword, ...) \
+ EF4_POPULATE_QWORD_10(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_8(qword, ...) \
+ EF4_POPULATE_QWORD_9(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_7(qword, ...) \
+ EF4_POPULATE_QWORD_8(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_6(qword, ...) \
+ EF4_POPULATE_QWORD_7(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_5(qword, ...) \
+ EF4_POPULATE_QWORD_6(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_4(qword, ...) \
+ EF4_POPULATE_QWORD_5(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_3(qword, ...) \
+ EF4_POPULATE_QWORD_4(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_2(qword, ...) \
+ EF4_POPULATE_QWORD_3(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_QWORD_1(qword, ...) \
+ EF4_POPULATE_QWORD_2(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_ZERO_QWORD(qword) \
+ EF4_POPULATE_QWORD_1(qword, EF4_DUMMY_FIELD, 0)
+#define EF4_SET_QWORD(qword) \
+ EF4_POPULATE_QWORD_2(qword, \
+ EF4_DWORD_0, 0xffffffff, \
+ EF4_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EF4_POPULATE_DWORD_10 EF4_POPULATE_DWORD
+#define EF4_POPULATE_DWORD_9(dword, ...) \
+ EF4_POPULATE_DWORD_10(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_8(dword, ...) \
+ EF4_POPULATE_DWORD_9(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_7(dword, ...) \
+ EF4_POPULATE_DWORD_8(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_6(dword, ...) \
+ EF4_POPULATE_DWORD_7(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_5(dword, ...) \
+ EF4_POPULATE_DWORD_6(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_4(dword, ...) \
+ EF4_POPULATE_DWORD_5(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_3(dword, ...) \
+ EF4_POPULATE_DWORD_4(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_2(dword, ...) \
+ EF4_POPULATE_DWORD_3(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_POPULATE_DWORD_1(dword, ...) \
+ EF4_POPULATE_DWORD_2(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EF4_ZERO_DWORD(dword) \
+ EF4_POPULATE_DWORD_1(dword, EF4_DUMMY_FIELD, 0)
+#define EF4_SET_DWORD(dword) \
+ EF4_POPULATE_DWORD_1(dword, EF4_DWORD_0, 0xffffffff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ *
+ */
+#define EF4_INVERT_OWORD(oword) do { \
+ (oword).u64[0] = ~((oword).u64[0]); \
+ (oword).u64[1] = ~((oword).u64[1]); \
+ } while (0)
+
+#define EF4_AND_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
+ } while (0)
+
+#define EF4_OR_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
+ } while (0)
+
+#define EF4_INSERT64(min, max, low, high, value) \
+ cpu_to_le64(EF4_INSERT_NATIVE(min, max, low, high, value))
+
+#define EF4_INSERT32(min, max, low, high, value) \
+ cpu_to_le32(EF4_INSERT_NATIVE(min, max, low, high, value))
+
+#define EF4_INPLACE_MASK64(min, max, low, high) \
+ EF4_INSERT64(min, max, low, high, EF4_MASK64((high) + 1 - (low)))
+
+#define EF4_INPLACE_MASK32(min, max, low, high) \
+ EF4_INSERT32(min, max, low, high, EF4_MASK32((high) + 1 - (low)))
+
+#define EF4_SET_OWORD64(oword, low, high, value) do { \
+ (oword).u64[0] = (((oword).u64[0] \
+ & ~EF4_INPLACE_MASK64(0, 63, low, high)) \
+ | EF4_INSERT64(0, 63, low, high, value)); \
+ (oword).u64[1] = (((oword).u64[1] \
+ & ~EF4_INPLACE_MASK64(64, 127, low, high)) \
+ | EF4_INSERT64(64, 127, low, high, value)); \
+ } while (0)
+
+#define EF4_SET_QWORD64(qword, low, high, value) do { \
+ (qword).u64[0] = (((qword).u64[0] \
+ & ~EF4_INPLACE_MASK64(0, 63, low, high)) \
+ | EF4_INSERT64(0, 63, low, high, value)); \
+ } while (0)
+
+#define EF4_SET_OWORD32(oword, low, high, value) do { \
+ (oword).u32[0] = (((oword).u32[0] \
+ & ~EF4_INPLACE_MASK32(0, 31, low, high)) \
+ | EF4_INSERT32(0, 31, low, high, value)); \
+ (oword).u32[1] = (((oword).u32[1] \
+ & ~EF4_INPLACE_MASK32(32, 63, low, high)) \
+ | EF4_INSERT32(32, 63, low, high, value)); \
+ (oword).u32[2] = (((oword).u32[2] \
+ & ~EF4_INPLACE_MASK32(64, 95, low, high)) \
+ | EF4_INSERT32(64, 95, low, high, value)); \
+ (oword).u32[3] = (((oword).u32[3] \
+ & ~EF4_INPLACE_MASK32(96, 127, low, high)) \
+ | EF4_INSERT32(96, 127, low, high, value)); \
+ } while (0)
+
+#define EF4_SET_QWORD32(qword, low, high, value) do { \
+ (qword).u32[0] = (((qword).u32[0] \
+ & ~EF4_INPLACE_MASK32(0, 31, low, high)) \
+ | EF4_INSERT32(0, 31, low, high, value)); \
+ (qword).u32[1] = (((qword).u32[1] \
+ & ~EF4_INPLACE_MASK32(32, 63, low, high)) \
+ | EF4_INSERT32(32, 63, low, high, value)); \
+ } while (0)
+
+#define EF4_SET_DWORD32(dword, low, high, value) do { \
+ (dword).u32[0] = (((dword).u32[0] \
+ & ~EF4_INPLACE_MASK32(0, 31, low, high)) \
+ | EF4_INSERT32(0, 31, low, high, value)); \
+ } while (0)
+
+#define EF4_SET_OWORD_FIELD64(oword, field, value) \
+ EF4_SET_OWORD64(oword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field), value)
+
+#define EF4_SET_QWORD_FIELD64(qword, field, value) \
+ EF4_SET_QWORD64(qword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field), value)
+
+#define EF4_SET_OWORD_FIELD32(oword, field, value) \
+ EF4_SET_OWORD32(oword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field), value)
+
+#define EF4_SET_QWORD_FIELD32(qword, field, value) \
+ EF4_SET_QWORD32(qword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field), value)
+
+#define EF4_SET_DWORD_FIELD(dword, field, value) \
+ EF4_SET_DWORD32(dword, EF4_LOW_BIT(field), \
+ EF4_HIGH_BIT(field), value)
+
+
+
+#if BITS_PER_LONG == 64
+#define EF4_SET_OWORD_FIELD EF4_SET_OWORD_FIELD64
+#define EF4_SET_QWORD_FIELD EF4_SET_QWORD_FIELD64
+#else
+#define EF4_SET_OWORD_FIELD EF4_SET_OWORD_FIELD32
+#define EF4_SET_QWORD_FIELD EF4_SET_QWORD_FIELD32
+#endif
+
+/* Used to avoid compiler warnings about shift range exceeding width
+ * of the data types when dma_addr_t is only 32 bits wide.
+ */
+#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
+#define EF4_DMA_TYPE_WIDTH(width) \
+ (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
+
+
+/* Static initialiser */
+#define EF4_OWORD32(a, b, c, d) \
+ { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
+ cpu_to_le32(c), cpu_to_le32(d) } }
+
+#endif /* EF4_BITFIELD_H */
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
new file mode 100644
index 000000000000..5c5cb3c4c12e
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -0,0 +1,3350 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/topology.h>
+#include <linux/gfp.h>
+#include <linux/aer.h>
+#include <linux/interrupt.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "selftest.h"
+
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Type name strings
+ *
+ **************************************************************************
+ */
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX;
+const char *const ef4_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+const unsigned int ef4_reset_type_max = RESET_TYPE_MAX;
+const char *const ef4_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ *************************************************************************/
+
+/*
+ * Use separate channels for TX and RX events
+ *
+ * Set this to 1 to use separate channels for TX and RX. It allows us
+ * to control interrupt affinity separately for TX and RX.
+ *
+ * This is only used in MSI-X interrupt mode
+ */
+bool ef4_separate_tx_channels;
+module_param(ef4_separate_tx_channels, bool, 0444);
+MODULE_PARM_DESC(ef4_separate_tx_channels,
+ "Use separate channels for TX and RX");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int ef4_monitor_interval = 1 * HZ;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * The default for RX should strike a balance between increasing the
+ * round-trip latency and reducing overhead.
+ */
+static unsigned int rx_irq_mod_usec = 60;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * This default is chosen to ensure that a 10G link does not go idle
+ * while a TX queue is stopped after it has become full. A queue is
+ * restarted when it drops below half full. The time this takes (assuming
+ * worst case 3 descriptors per packet and 1024 descriptors) is
+ * 512 / 3 * 1.2 = 205 usec.
+ */
+static unsigned int tx_irq_mod_usec = 150;
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static bool phy_flash_cfg;
+module_param(phy_flash_cfg, bool, 0644);
+MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+
+static unsigned irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/**************************************************************************
+ *
+ * Utility functions and prototypes
+ *
+ *************************************************************************/
+
+static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
+static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
+static void ef4_remove_channel(struct ef4_channel *channel);
+static void ef4_remove_channels(struct ef4_nic *efx);
+static const struct ef4_channel_type ef4_default_channel_type;
+static void ef4_remove_port(struct ef4_nic *efx);
+static void ef4_init_napi_channel(struct ef4_channel *channel);
+static void ef4_fini_napi(struct ef4_nic *efx);
+static void ef4_fini_napi_channel(struct ef4_channel *channel);
+static void ef4_fini_struct(struct ef4_nic *efx);
+static void ef4_start_all(struct ef4_nic *efx);
+static void ef4_stop_all(struct ef4_nic *efx);
+
+#define EF4_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+static int ef4_check_disabled(struct ef4_nic *efx)
+{
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int ef4_process_channel(struct ef4_channel *channel, int budget)
+{
+ struct ef4_tx_queue *tx_queue;
+ int spent;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
+ spent = ef4_nic_process_eventq(channel, budget);
+ if (spent && ef4_channel_has_rx_queue(channel)) {
+ struct ef4_rx_queue *rx_queue =
+ ef4_channel_get_rx_queue(channel);
+
+ ef4_rx_flush_packet(channel);
+ ef4_fast_push_rx_descriptors(rx_queue, true);
+ }
+
+ /* Update BQL */
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl, tx_queue->bytes_compl);
+ }
+ }
+
+ return spent;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by ef4_process_channel().
+ */
+static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
+static int ef4_poll(struct napi_struct *napi, int budget)
+{
+ struct ef4_channel *channel =
+ container_of(napi, struct ef4_channel, napi_str);
+ struct ef4_nic *efx = channel->efx;
+ int spent;
+
+ if (!ef4_channel_lock_napi(channel))
+ return budget;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = ef4_process_channel(channel, budget);
+
+ if (spent < budget) {
+ if (ef4_channel_has_rx_queue(channel) &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ ef4_update_irq_mod(efx, channel);
+ }
+
+ ef4_filter_rfs_expire(channel);
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since ef4_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+ napi_complete(napi);
+ ef4_nic_eventq_read_ack(channel);
+ }
+
+ ef4_channel_unlock_napi(channel);
+ return spent;
+}
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+static int ef4_probe_eventq(struct ef4_channel *channel)
+{
+ struct ef4_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions. */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1;
+
+ return ef4_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+static int ef4_init_eventq(struct ef4_channel *channel)
+{
+ struct ef4_nic *efx = channel->efx;
+ int rc;
+
+ EF4_WARN_ON_PARANOID(channel->eventq_init);
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = ef4_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void ef4_start_eventq(struct ef4_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* Make sure the NAPI handler sees the enabled flag set */
+ channel->enabled = true;
+ smp_wmb();
+
+ ef4_channel_enable(channel);
+ napi_enable(&channel->napi_str);
+ ef4_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void ef4_stop_eventq(struct ef4_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ while (!ef4_channel_disable(channel))
+ usleep_range(1000, 20000);
+ channel->enabled = false;
+}
+
+static void ef4_fini_eventq(struct ef4_channel *channel)
+{
+ if (!channel->eventq_init)
+ return;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ ef4_nic_fini_eventq(channel);
+ channel->eventq_init = false;
+}
+
+static void ef4_remove_eventq(struct ef4_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ ef4_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure. */
+static struct ef4_channel *
+ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
+{
+ struct ef4_channel *channel;
+ struct ef4_rx_queue *rx_queue;
+ struct ef4_tx_queue *tx_queue;
+ int j;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &ef4_default_channel_type;
+
+ for (j = 0; j < EF4_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EF4_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+static struct ef4_channel *
+ef4_copy_channel(const struct ef4_channel *old_channel)
+{
+ struct ef4_channel *channel;
+ struct ef4_rx_queue *rx_queue;
+ struct ef4_tx_queue *tx_queue;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EF4_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
+static int ef4_probe_channel(struct ef4_channel *channel)
+{
+ struct ef4_tx_queue *tx_queue;
+ struct ef4_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
+ rc = ef4_probe_eventq(channel);
+ if (rc)
+ goto fail;
+
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = ef4_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ ef4_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = ef4_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ef4_remove_channel(channel);
+ return rc;
+}
+
+static void
+ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len)
+{
+ struct ef4_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+ if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (channel->channel < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+static void ef4_set_channel_names(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
+}
+
+static int ef4_probe_channels(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ ef4_for_each_channel_rev(channel, efx) {
+ rc = ef4_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ ef4_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ ef4_remove_channels(efx);
+ return rc;
+}
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void ef4_start_datapath(struct ef4_nic *efx)
+{
+ netdev_features_t old_features = efx->net_dev->features;
+ bool old_rx_scatter = efx->rx_scatter;
+ struct ef4_tx_queue *tx_queue;
+ struct ef4_rx_queue *rx_queue;
+ struct ef4_channel *channel;
+ size_t rx_buf_len;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct ef4_rx_page_state) +
+ efx->rx_ip_align + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+ BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) +
+ 2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE,
+ EF4_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ ef4_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
+ /* RX filters may also have scatter-enabled flags */
+ if (efx->rx_scatter != old_rx_scatter)
+ efx->type->filter_update_rx_scatter(efx);
+
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+ /* Initialise the channels */
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ ef4_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
+
+ ef4_for_each_channel_rx_queue(rx_queue, channel) {
+ ef4_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
+ ef4_stop_eventq(channel);
+ ef4_fast_push_rx_descriptors(rx_queue, false);
+ ef4_start_eventq(channel);
+ }
+
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void ef4_stop_datapath(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+ struct ef4_tx_queue *tx_queue;
+ struct ef4_rx_queue *rx_queue;
+ int rc;
+
+ EF4_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ /* Stop RX refill */
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
+ }
+
+ ef4_for_each_channel(channel, efx) {
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (ef4_channel_has_rx_queue(channel)) {
+ ef4_stop_eventq(channel);
+ ef4_start_eventq(channel);
+ }
+ }
+
+ rc = efx->type->fini_dmaq(efx);
+ if (rc && EF4_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ ef4_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_rx_queue(rx_queue, channel)
+ ef4_fini_rx_queue(rx_queue);
+ ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
+ ef4_fini_tx_queue(tx_queue);
+ }
+}
+
+static void ef4_remove_channel(struct ef4_channel *channel)
+{
+ struct ef4_tx_queue *tx_queue;
+ struct ef4_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ ef4_for_each_channel_rx_queue(rx_queue, channel)
+ ef4_remove_rx_queue(rx_queue);
+ ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
+ ef4_remove_tx_queue(tx_queue);
+ ef4_remove_eventq(channel);
+ channel->type->post_remove(channel);
+}
+
+static void ef4_remove_channels(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ ef4_remove_channel(channel);
+}
+
+int
+ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel;
+ u32 old_rxq_entries, old_txq_entries;
+ unsigned i, next_buffer_table = 0;
+ int rc, rc2;
+
+ rc = ef4_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ ef4_for_each_channel(channel, efx) {
+ struct ef4_rx_queue *rx_queue;
+ struct ef4_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ ef4_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ ef4_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
+
+ ef4_device_detach_sync(efx);
+ ef4_stop_all(efx);
+ ef4_soft_disable_interrupts(efx);
+
+ /* Clone channels (where possible) */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
+
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = ef4_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ ef4_init_napi_channel(efx->channel[i]);
+ }
+
+out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ ef4_fini_napi_channel(channel);
+ ef4_remove_channel(channel);
+ kfree(channel);
+ }
+ }
+
+ rc2 = ef4_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ ef4_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ }
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
+}
+
+static const struct ef4_channel_type ef4_default_channel_type = {
+ .pre_probe = ef4_channel_dummy_op_int,
+ .post_remove = ef4_channel_dummy_op_void,
+ .get_name = ef4_get_channel_name,
+ .copy = ef4_copy_channel,
+ .keep_eventq = false,
+};
+
+int ef4_channel_dummy_op_int(struct ef4_channel *channel)
+{
+ return 0;
+}
+
+void ef4_channel_dummy_op_void(struct ef4_channel *channel)
+{
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void ef4_link_status_changed(struct ef4_nic *efx)
+{
+ struct ef4_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up)
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu);
+ else
+ netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
+{
+ efx->link_advertising = advertising;
+ if (advertising) {
+ if (advertising & ADVERTISED_Pause)
+ efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
+ else
+ efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
+ if (advertising & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EF4_FC_TX;
+ }
+}
+
+void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
+{
+ efx->wanted_fc = wanted_fc;
+ if (efx->link_advertising) {
+ if (wanted_fc & EF4_FC_RX)
+ efx->link_advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else
+ efx->link_advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ if (wanted_fc & EF4_FC_TX)
+ efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ }
+}
+
+static void ef4_fini_port(struct ef4_nic *efx);
+
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void ef4_mac_reconfigure(struct ef4_nic *efx)
+{
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+}
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through ef4_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __ef4_reconfigure_port(struct ef4_nic *efx)
+{
+ enum ef4_phy_mode phy_mode;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled. */
+int ef4_reconfigure_port(struct ef4_nic *efx)
+{
+ int rc;
+
+ EF4_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __ef4_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly. */
+static void ef4_mac_work(struct work_struct *data)
+{
+ struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ ef4_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+}
+
+static int ef4_probe_port(struct ef4_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "create port\n");
+
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
+ /* Connect up MAC/PHY operations table */
+ rc = efx->type->probe_port(efx);
+ if (rc)
+ return rc;
+
+ /* Initialise MAC address to permanent address */
+ ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+
+ return 0;
+}
+
+static int ef4_init_port(struct ef4_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "init port\n");
+
+ mutex_lock(&efx->mac_lock);
+
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail1;
+
+ efx->port_initialized = true;
+
+ /* Reconfigure the MAC before creating dma queues (required for
+ * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
+ ef4_mac_reconfigure(efx);
+
+ /* Ensure the PHY advertises the correct flow control settings */
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
+ goto fail2;
+
+ mutex_unlock(&efx->mac_lock);
+ return 0;
+
+fail2:
+ efx->phy_op->fini(efx);
+fail1:
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void ef4_start_port(struct ef4_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* Ensure MAC ingress/egress is enabled */
+ ef4_mac_reconfigure(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void ef4_stop_port(struct ef4_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ EF4_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against ef4_set_multicast_list() */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ ef4_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
+}
+
+static void ef4_fini_port(struct ef4_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
+
+ if (!efx->port_initialized)
+ return;
+
+ efx->phy_op->fini(efx);
+ efx->port_initialized = false;
+
+ efx->link_state.up = false;
+ ef4_link_status_changed(efx);
+}
+
+static void ef4_remove_port(struct ef4_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
+
+ efx->type->remove_port(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC handling
+ *
+ **************************************************************************/
+
+static LIST_HEAD(ef4_primary_list);
+static LIST_HEAD(ef4_unassociated_list);
+
+static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void ef4_associate(struct ef4_nic *efx)
+{
+ struct ef4_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &ef4_primary_list);
+
+ list_for_each_entry_safe(other, next, &ef4_unassociated_list,
+ node) {
+ if (ef4_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &ef4_primary_list, node) {
+ if (ef4_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &ef4_unassociated_list);
+ }
+}
+
+static void ef4_dissociate(struct ef4_nic *efx)
+{
+ struct ef4_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &ef4_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+static int ef4_init_io(struct ef4_nic *efx)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ dma_addr_t dma_mask = efx->type->max_dma_mask;
+ unsigned int mem_map_size = efx->type->mem_map_size(efx);
+ int rc, bar;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ bar = efx->type->mem_bar;
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+ if (rc == 0)
+ break;
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long) dma_mask);
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ rc = pci_request_region(pci_dev, bar, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
+ efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys, mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
+
+ return 0;
+
+ fail4:
+ pci_release_region(efx->pci_dev, bar);
+ fail3:
+ efx->membase_phys = 0;
+ fail2:
+ pci_disable_device(efx->pci_dev);
+ fail1:
+ return rc;
+}
+
+static void ef4_fini_io(struct ef4_nic *efx)
+{
+ int bar;
+
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ bar = efx->type->mem_bar;
+ pci_release_region(efx->pci_dev, bar);
+ efx->membase_phys = 0;
+ }
+
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+}
+
+static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
+{
+ cpumask_var_t thread_mask;
+ unsigned int count;
+ int cpu;
+
+ if (rss_cpus) {
+ count = rss_cpus;
+ } else {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
+ ++count;
+ cpumask_or(thread_mask, thread_mask,
+ topology_sibling_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(thread_mask);
+ }
+
+ return count;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+static int ef4_probe_interrupts(struct ef4_nic *efx)
+{
+ unsigned int extra_channels = 0;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
+
+ if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
+ struct msix_entry xentries[EF4_MAX_CHANNELS];
+ unsigned int n_channels;
+
+ n_channels = ef4_wanted_parallelism(efx);
+ if (ef4_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+ n_channels = min(n_channels, efx->max_channels);
+
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev,
+ xentries, 1, n_channels);
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EF4_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ } else if (rc < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %u).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = rc;
+ }
+
+ if (rc > 0) {
+ efx->n_channels = n_channels;
+ if (n_channels > extra_channels)
+ n_channels -= extra_channels;
+ if (ef4_separate_tx_channels) {
+ efx->n_tx_channels = min(max(n_channels / 2,
+ 1U),
+ efx->max_tx_channels);
+ efx->n_rx_channels = max(n_channels -
+ efx->n_tx_channels,
+ 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels,
+ efx->max_tx_channels);
+ efx->n_rx_channels = n_channels;
+ }
+ for (i = 0; i < efx->n_channels; i++)
+ ef4_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ efx->interrupt_mode = EF4_INT_MODE_LEGACY;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ /* Assign extra channels if possible */
+ j = efx->n_channels;
+ for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
+ efx->n_channels <= extra_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ ef4_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ }
+ }
+
+ efx->rss_spread = efx->n_rx_channels;
+
+ return 0;
+}
+
+static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ efx->irq_soft_enabled = true;
+ smp_wmb();
+
+ ef4_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq) {
+ rc = ef4_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ ef4_start_eventq(channel);
+ }
+
+ return 0;
+fail:
+ end_channel = channel;
+ ef4_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ ef4_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ ef4_fini_eventq(channel);
+ }
+
+ return rc;
+}
+
+static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ if (efx->state == STATE_DISABLED)
+ return;
+
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
+ synchronize_irq(efx->legacy_irq);
+
+ ef4_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ ef4_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ ef4_fini_eventq(channel);
+ }
+}
+
+static int ef4_enable_interrupts(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ ef4_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = ef4_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = ef4_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ ef4_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
+ ef4_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+static void ef4_disable_interrupts(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ ef4_soft_disable_interrupts(efx);
+
+ ef4_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ ef4_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+}
+
+static void ef4_remove_interrupts(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ ef4_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+static void ef4_set_channels(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+ struct ef4_tx_queue *tx_queue;
+
+ efx->tx_channel_offset =
+ ef4_separate_tx_channels ?
+ efx->n_channels - efx->n_tx_channels : 0;
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ ef4_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+
+ ef4_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EF4_TXQ_TYPES);
+ }
+}
+
+static int ef4_probe_nic(struct ef4_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
+
+ /* Carry out hardware-type specific initialisation */
+ rc = efx->type->probe(efx);
+ if (rc)
+ return rc;
+
+ do {
+ if (!efx->max_channels || !efx->max_tx_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources to allocate"
+ " any channels\n");
+ rc = -ENOSPC;
+ goto fail1;
+ }
+
+ /* Determine the number of channels and queues by trying
+ * to hook in MSI-X interrupts.
+ */
+ rc = ef4_probe_interrupts(efx);
+ if (rc)
+ goto fail1;
+
+ ef4_set_channels(efx);
+
+ /* dimension_resources can fail with EAGAIN */
+ rc = efx->type->dimension_resources(efx);
+ if (rc != 0 && rc != -EAGAIN)
+ goto fail2;
+
+ if (rc == -EAGAIN)
+ /* try again with new max_channels */
+ ef4_remove_interrupts(efx);
+
+ } while (rc == -EAGAIN);
+
+ if (efx->n_channels > 1)
+ netdev_rss_key_fill(&efx->rx_hash_key,
+ sizeof(efx->rx_hash_key));
+ ef4_set_default_rx_indir_table(efx);
+
+ netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+
+ /* Initialise the interrupt moderation settings */
+ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
+ ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
+ true);
+
+ return 0;
+
+fail2:
+ ef4_remove_interrupts(efx);
+fail1:
+ efx->type->remove(efx);
+ return rc;
+}
+
+static void ef4_remove_nic(struct ef4_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
+
+ ef4_remove_interrupts(efx);
+ efx->type->remove(efx);
+}
+
+static int ef4_probe_filters(struct ef4_nic *efx)
+{
+ int rc;
+
+ spin_lock_init(&efx->filter_lock);
+ init_rwsem(&efx->filter_sem);
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ goto out_unlock;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ struct ef4_channel *channel;
+ int i, success = 1;
+
+ ef4_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ }
+
+ if (!success) {
+ ef4_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
+ efx->type->filter_table_remove(efx);
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+
+ efx->rps_expire_index = efx->rps_expire_channel = 0;
+ }
+#endif
+out_unlock:
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void ef4_remove_filters(struct ef4_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
+#endif
+ down_write(&efx->filter_sem);
+ efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
+
+static void ef4_restore_filters(struct ef4_nic *efx)
+{
+ down_read(&efx->filter_sem);
+ efx->type->filter_table_restore(efx);
+ up_read(&efx->filter_sem);
+}
+
+/**************************************************************************
+ *
+ * NIC startup/shutdown
+ *
+ *************************************************************************/
+
+static int ef4_probe_all(struct ef4_nic *efx)
+{
+ int rc;
+
+ rc = ef4_probe_nic(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
+ goto fail1;
+ }
+
+ rc = ef4_probe_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create port\n");
+ goto fail2;
+ }
+
+ BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT);
+ if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
+ rc = -EINVAL;
+ goto fail3;
+ }
+ efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
+
+ rc = ef4_probe_filters(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create filter tables\n");
+ goto fail4;
+ }
+
+ rc = ef4_probe_channels(efx);
+ if (rc)
+ goto fail5;
+
+ return 0;
+
+ fail5:
+ ef4_remove_filters(efx);
+ fail4:
+ fail3:
+ ef4_remove_port(efx);
+ fail2:
+ ef4_remove_nic(efx);
+ fail1:
+ return rc;
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+static void ef4_start_all(struct ef4_nic *efx)
+{
+ EF4_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock */
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
+ return;
+
+ ef4_start_port(efx);
+ ef4_start_datapath(efx);
+
+ /* Start the hardware monitor if there is one */
+ if (efx->type->monitor != NULL)
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ ef4_monitor_interval);
+
+ efx->type->start_stats(efx);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
+static void ef4_stop_all(struct ef4_nic *efx)
+{
+ EF4_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ efx->type->stop_stats(efx);
+ ef4_stop_port(efx);
+
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ ef4_stop_datapath(efx);
+}
+
+static void ef4_remove_all(struct ef4_nic *efx)
+{
+ ef4_remove_channels(efx);
+ ef4_remove_filters(efx);
+ ef4_remove_port(efx);
+ ef4_remove_nic(efx);
+}
+
+/**************************************************************************
+ *
+ * Interrupt moderation
+ *
+ **************************************************************************/
+unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
+{
+ if (usecs == 0)
+ return 0;
+ if (usecs * 1000 < efx->timer_quantum_ns)
+ return 1; /* never round down to 0 */
+ return usecs * 1000 / efx->timer_quantum_ns;
+}
+
+unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
+{
+ /* We must round up when converting ticks to microseconds
+ * because we round down when converting the other way.
+ */
+ return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
+}
+
+/* Set interrupt moderation parameters */
+int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx)
+{
+ struct ef4_channel *channel;
+ unsigned int timer_max_us;
+
+ EF4_ASSERT_RESET_SERIALISED(efx);
+
+ timer_max_us = efx->timer_max_ns / 1000;
+
+ if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
+ return -EINVAL;
+
+ if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
+ !rx_may_override_tx) {
+ netif_err(efx, drv, efx->net_dev, "Channels are shared. "
+ "RX and TX IRQ moderation must be equal\n");
+ return -EINVAL;
+ }
+
+ efx->irq_rx_adaptive = rx_adaptive;
+ efx->irq_rx_moderation_us = rx_usecs;
+ ef4_for_each_channel(channel, efx) {
+ if (ef4_channel_has_rx_queue(channel))
+ channel->irq_moderation_us = rx_usecs;
+ else if (ef4_channel_has_tx_queues(channel))
+ channel->irq_moderation_us = tx_usecs;
+ }
+
+ return 0;
+}
+
+void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive)
+{
+ *rx_adaptive = efx->irq_rx_adaptive;
+ *rx_usecs = efx->irq_rx_moderation_us;
+
+ /* If channels are shared between RX and TX, so is IRQ
+ * moderation. Otherwise, IRQ moderation is the same for all
+ * TX channels and is not adaptive.
+ */
+ if (efx->tx_channel_offset == 0) {
+ *tx_usecs = *rx_usecs;
+ } else {
+ struct ef4_channel *tx_channel;
+
+ tx_channel = efx->channel[efx->tx_channel_offset];
+ *tx_usecs = tx_channel->irq_moderation_us;
+ }
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void ef4_monitor(struct work_struct *data)
+{
+ struct ef4_nic *efx = container_of(data, struct ef4_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway. */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ ef4_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * ioctls
+ *
+ *************************************************************************/
+
+/* Net device ioctl
+ * Context: process, rtnl_lock() held.
+ */
+static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+ (data->phy_id & 0xfc00) == 0x0400)
+ data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+ return mdio_mii_ioctl(&efx->mdio, data, cmd);
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ **************************************************************************/
+
+static void ef4_init_napi_channel(struct ef4_channel *channel)
+{
+ struct ef4_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ ef4_poll, napi_weight);
+ ef4_channel_busy_poll_init(channel);
+}
+
+static void ef4_init_napi(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ ef4_init_napi_channel(channel);
+}
+
+static void ef4_fini_napi_channel(struct ef4_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+
+ channel->napi_dev = NULL;
+}
+
+static void ef4_fini_napi(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ ef4_fini_napi_channel(channel);
+}
+
+/**************************************************************************
+ *
+ * Kernel netpoll interface
+ *
+ *************************************************************************/
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/* Although in the common case interrupts will be disabled, this is not
+ * guaranteed. However, all our work happens inside the NAPI callback,
+ * so no locking is required.
+ */
+static void ef4_netpoll(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ ef4_schedule_channel(channel);
+}
+
+#endif
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int ef4_busy_poll(struct napi_struct *napi)
+{
+ struct ef4_channel *channel =
+ container_of(napi, struct ef4_channel, napi_str);
+ struct ef4_nic *efx = channel->efx;
+ int budget = 4;
+ int old_rx_packets, rx_packets;
+
+ if (!netif_running(efx->net_dev))
+ return LL_FLUSH_FAILED;
+
+ if (!ef4_channel_try_lock_poll(channel))
+ return LL_FLUSH_BUSY;
+
+ old_rx_packets = channel->rx_queue.rx_packets;
+ ef4_process_channel(channel, budget);
+
+ rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
+
+ /* There is no race condition with NAPI here.
+ * NAPI will automatically be rescheduled if it yielded during busy
+ * polling, because it was not able to take the lock and thus returned
+ * the full budget.
+ */
+ ef4_channel_unlock_poll(channel);
+
+ return rx_packets;
+}
+#endif
+
+/**************************************************************************
+ *
+ * Kernel net device interface
+ *
+ *************************************************************************/
+
+/* Context: process, rtnl_lock() held. */
+int ef4_net_open(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
+
+ rc = ef4_check_disabled(efx);
+ if (rc)
+ return rc;
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ return -EBUSY;
+
+ /* Notify the kernel of the link state polled during driver load,
+ * before the monitor starts running */
+ ef4_link_status_changed(efx);
+
+ ef4_start_all(efx);
+ ef4_selftest_async_start(efx);
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+int ef4_net_stop(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
+
+ /* Stop the device and flush all the channels */
+ ef4_stop_all(efx);
+
+ return 0;
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+static struct rtnl_link_stats64 *ef4_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, stats);
+ spin_unlock_bh(&efx->stats_lock);
+
+ return stats;
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+static void ef4_watchdog(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
+
+ ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
+
+/* Context: process, rtnl_lock() held. */
+static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = ef4_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ ef4_device_detach_sync(efx);
+ ef4_stop_all(efx);
+
+ mutex_lock(&efx->mac_lock);
+ net_dev->mtu = new_mtu;
+ ef4_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ ef4_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ return 0;
+}
+
+static int ef4_set_mac_address(struct net_device *net_dev, void *data)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct sockaddr *addr = data;
+ u8 *new_addr = addr->sa_data;
+ u8 old_addr[6];
+ int rc;
+
+ if (!is_valid_ether_addr(new_addr)) {
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
+ return -EADDRNOTAVAIL;
+ }
+
+ /* save old address */
+ ether_addr_copy(old_addr, net_dev->dev_addr);
+ ether_addr_copy(net_dev->dev_addr, new_addr);
+ if (efx->type->set_mac_address) {
+ rc = efx->type->set_mac_address(efx);
+ if (rc) {
+ ether_addr_copy(net_dev->dev_addr, old_addr);
+ return rc;
+ }
+ }
+
+ /* Reconfigure the MAC */
+ mutex_lock(&efx->mac_lock);
+ ef4_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+static void ef4_set_rx_mode(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ if (efx->port_enabled)
+ queue_work(efx->workqueue, &efx->mac_work);
+ /* Otherwise ef4_start_port() will do this */
+}
+
+static int ef4_set_features(struct net_device *net_dev, netdev_features_t data)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+ rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
+ if (rc)
+ return rc;
+ }
+
+ /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
+ if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ /* ef4_set_rx_mode() will schedule MAC work to update filters
+ * when a new features are finally set in net_dev.
+ */
+ ef4_set_rx_mode(net_dev);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops ef4_netdev_ops = {
+ .ndo_open = ef4_net_open,
+ .ndo_stop = ef4_net_stop,
+ .ndo_get_stats64 = ef4_net_stats,
+ .ndo_tx_timeout = ef4_watchdog,
+ .ndo_start_xmit = ef4_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ef4_ioctl,
+ .ndo_change_mtu = ef4_change_mtu,
+ .ndo_set_mac_address = ef4_set_mac_address,
+ .ndo_set_rx_mode = ef4_set_rx_mode,
+ .ndo_set_features = ef4_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ef4_netpoll,
+#endif
+ .ndo_setup_tc = ef4_setup_tc,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = ef4_busy_poll,
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = ef4_filter_rfs,
+#endif
+};
+
+static void ef4_update_name(struct ef4_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+ ef4_mtd_rename(efx);
+ ef4_set_channel_names(efx);
+}
+
+static int ef4_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
+
+ if ((net_dev->netdev_ops == &ef4_netdev_ops) &&
+ event == NETDEV_CHANGENAME)
+ ef4_update_name(netdev_priv(net_dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ef4_netdev_notifier = {
+ .notifier_call = ef4_netdev_event,
+};
+
+static ssize_t
+show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", efx->phy_type);
+}
+static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+
+static int ef4_register_netdev(struct ef4_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct ef4_channel *channel;
+ int rc;
+
+ net_dev->watchdog_timeo = 5 * HZ;
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &ef4_netdev_ops;
+ net_dev->ethtool_ops = &ef4_ethtool_ops;
+ net_dev->gso_max_segs = EF4_TSO_MAX_SEGS;
+ net_dev->min_mtu = EF4_MIN_MTU;
+ net_dev->max_mtu = EF4_MAX_MTU;
+
+ rtnl_lock();
+
+ /* Enable resets to be scheduled and check whether any were
+ * already requested. If so, the NIC is probably hosed so we
+ * abort.
+ */
+ efx->state = STATE_READY;
+ smp_mb(); /* ensure we change state before checking reset_pending */
+ if (efx->reset_pending) {
+ netif_err(efx, probe, efx->net_dev,
+ "aborting probe due to scheduled reset\n");
+ rc = -EIO;
+ goto fail_locked;
+ }
+
+ rc = dev_alloc_name(net_dev, net_dev->name);
+ if (rc < 0)
+ goto fail_locked;
+ ef4_update_name(efx);
+
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(net_dev);
+
+ rc = register_netdevice(net_dev);
+ if (rc)
+ goto fail_locked;
+
+ ef4_for_each_channel(channel, efx) {
+ struct ef4_tx_queue *tx_queue;
+ ef4_for_each_channel_tx_queue(tx_queue, channel)
+ ef4_init_tx_queue_core_txq(tx_queue);
+ }
+
+ ef4_associate(efx);
+
+ rtnl_unlock();
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_registered;
+ }
+ return 0;
+
+fail_registered:
+ rtnl_lock();
+ ef4_dissociate(efx);
+ unregister_netdevice(net_dev);
+fail_locked:
+ efx->state = STATE_UNINIT;
+ rtnl_unlock();
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+ return rc;
+}
+
+static void ef4_unregister_netdev(struct ef4_nic *efx)
+{
+ if (!efx->net_dev)
+ return;
+
+ BUG_ON(netdev_priv(efx->net_dev) != efx);
+
+ if (ef4_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset. */
+void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
+{
+ EF4_ASSERT_RESET_SERIALISED(efx);
+
+ ef4_stop_all(efx);
+ ef4_disable_interrupts(efx);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * ef4_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE. */
+int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EF4_ASSERT_RESET_SERIALISED(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ rc = ef4_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ down_read(&efx->filter_sem);
+ ef4_restore_filters(efx);
+ up_read(&efx->filter_sem);
+
+ mutex_unlock(&efx->mac_lock);
+
+ ef4_start_all(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int ef4_reset(struct ef4_nic *efx, enum reset_type method)
+{
+ int rc, rc2;
+ bool disabled;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ ef4_device_detach_sync(efx);
+ ef4_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests. */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
+ rc2 = ef4_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ netif_device_attach(efx->net_dev);
+ }
+ return rc;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int ef4_try_recovery(struct ef4_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void ef4_reset_work(struct work_struct *data)
+{
+ struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = ACCESS_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ ef4_try_recovery(efx))
+ return;
+
+ if (!pending)
+ return;
+
+ rtnl_lock();
+
+ /* We checked the state in ef4_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)ef4_reset(efx, method);
+
+ rtnl_unlock();
+}
+
+void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (ACCESS_ONCE(efx->state) != STATE_READY)
+ return;
+
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+/**************************************************************************
+ *
+ * List of NICs we support
+ *
+ **************************************************************************/
+
+/* PCI device ID table */
+static const struct pci_device_id ef4_pci_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
+ .driver_data = (unsigned long) &falcon_a1_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
+ .driver_data = (unsigned long) &falcon_b0_nic_type},
+ {0} /* end of list */
+};
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int ef4_port_dummy_op_int(struct ef4_nic *efx)
+{
+ return 0;
+}
+void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
+
+static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
+{
+ return false;
+}
+
+static const struct ef4_phy_operations ef4_dummy_phy_operations = {
+ .init = ef4_port_dummy_op_int,
+ .reconfigure = ef4_port_dummy_op_int,
+ .poll = ef4_port_dummy_op_poll,
+ .fini = ef4_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * ef4_nic (including all sub-structures).
+ */
+static int ef4_init_struct(struct ef4_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int i;
+
+ /* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_FALCON_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, ef4_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
+ INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_UNINIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+ spin_lock_init(&efx->stats_lock);
+ mutex_init(&efx->mac_lock);
+ efx->phy_op = &ef4_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, ef4_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
+
+ for (i = 0; i < EF4_MAX_CHANNELS; i++) {
+ efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ goto fail;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
+ }
+
+ /* Higher numbered interrupt modes are less capable! */
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue)
+ goto fail;
+
+ return 0;
+
+fail:
+ ef4_fini_struct(efx);
+ return -ENOMEM;
+}
+
+static void ef4_fini_struct(struct ef4_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < EF4_MAX_CHANNELS; i++)
+ kfree(efx->channel[i]);
+
+ kfree(efx->vpd_sn);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
+{
+ u64 n_rx_nodesc_trunc = 0;
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
+ stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
+ stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
+}
+
+/**************************************************************************
+ *
+ * PCI interface
+ *
+ **************************************************************************/
+
+/* Main body of final NIC shutdown code
+ * This is called only at module unload (or hotplug removal).
+ */
+static void ef4_pci_remove_main(struct ef4_nic *efx)
+{
+ /* Flush reset_work. It can no longer be scheduled since we
+ * are not READY.
+ */
+ BUG_ON(efx->state == STATE_READY);
+ cancel_work_sync(&efx->reset_work);
+
+ ef4_disable_interrupts(efx);
+ ef4_nic_fini_interrupt(efx);
+ ef4_fini_port(efx);
+ efx->type->fini(efx);
+ ef4_fini_napi(efx);
+ ef4_remove_all(efx);
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
+ */
+static void ef4_pci_remove(struct pci_dev *pci_dev)
+{
+ struct ef4_nic *efx;
+
+ efx = pci_get_drvdata(pci_dev);
+ if (!efx)
+ return;
+
+ /* Mark the NIC as fini, then stop the interface */
+ rtnl_lock();
+ ef4_dissociate(efx);
+ dev_close(efx->net_dev);
+ ef4_disable_interrupts(efx);
+ efx->state = STATE_UNINIT;
+ rtnl_unlock();
+
+ ef4_unregister_netdev(efx);
+
+ ef4_mtd_remove(efx);
+
+ ef4_pci_remove_main(efx);
+
+ ef4_fini_io(efx);
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+ ef4_fini_struct(efx);
+ free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
+};
+
+/* NIC VPD information
+ * Called during probe to display the part number of the
+ * installed NIC. VPD is potentially very large but this should
+ * always appear within the first 512 bytes.
+ */
+#define SFC_VPD_LEN 512
+static void ef4_probe_vpd_strings(struct ef4_nic *efx)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ char vpd_data[SFC_VPD_LEN];
+ ssize_t vpd_size;
+ int ro_start, ro_size, i, j;
+
+ /* Get the vpd data from the device */
+ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+ if (vpd_size <= 0) {
+ netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
+ return;
+ }
+
+ /* Get the Read only section */
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
+ netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
+ return;
+ }
+
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ if (i + j > vpd_size)
+ j = vpd_size - i;
+
+ /* Get the Part number */
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Part number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ return;
+ }
+
+ netif_info(efx, drv, efx->net_dev,
+ "Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+}
+
+
+/* Main body of NIC initialisation
+ * This is called at module load (or hotplug insertion, theoretically).
+ */
+static int ef4_pci_probe_main(struct ef4_nic *efx)
+{
+ int rc;
+
+ /* Do start-of-day initialisation */
+ rc = ef4_probe_all(efx);
+ if (rc)
+ goto fail1;
+
+ ef4_init_napi(efx);
+
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise NIC\n");
+ goto fail3;
+ }
+
+ rc = ef4_init_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise port\n");
+ goto fail4;
+ }
+
+ rc = ef4_nic_init_interrupt(efx);
+ if (rc)
+ goto fail5;
+ rc = ef4_enable_interrupts(efx);
+ if (rc)
+ goto fail6;
+
+ return 0;
+
+ fail6:
+ ef4_nic_fini_interrupt(efx);
+ fail5:
+ ef4_fini_port(efx);
+ fail4:
+ efx->type->fini(efx);
+ fail3:
+ ef4_fini_napi(efx);
+ ef4_remove_all(efx);
+ fail1:
+ return rc;
+}
+
+/* NIC initialisation
+ *
+ * This is called at module load (or hotplug insertion,
+ * theoretically). It sets up PCI mappings, resets the NIC,
+ * sets up and registers the network devices with the kernel and hooks
+ * the interrupt service routine. It does not prepare the device for
+ * transmission; this is left to the first time one of the network
+ * interfaces is brought up (i.e. ef4_net_open).
+ */
+static int ef4_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
+{
+ struct net_device *net_dev;
+ struct ef4_nic *efx;
+ int rc;
+
+ /* Allocate and initialise a struct net_device and struct ef4_nic */
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
+ EF4_MAX_RX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ efx = netdev_priv(net_dev);
+ efx->type = (const struct ef4_nic_type *) entry->driver_data;
+ efx->fixed_features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+ rc = ef4_init_struct(efx, pci_dev, net_dev);
+ if (rc)
+ goto fail1;
+
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare NIC detected\n");
+
+ ef4_probe_vpd_strings(efx);
+
+ /* Set up basic I/O (BAR mappings etc) */
+ rc = ef4_init_io(efx);
+ if (rc)
+ goto fail2;
+
+ rc = ef4_pci_probe_main(efx);
+ if (rc)
+ goto fail3;
+
+ net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+ NETIF_F_RXCSUM);
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_RXCSUM);
+
+ net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+
+ /* Disable VLAN filtering by default. It may be enforced if
+ * the feature is fixed (i.e. VLAN filters are required to
+ * receive VLAN tagged packets due to vPort restrictions).
+ */
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->features |= efx->fixed_features;
+
+ rc = ef4_register_netdev(efx);
+ if (rc)
+ goto fail4;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+ /* Try to create MTDs, but allow this to fail */
+ rtnl_lock();
+ rc = ef4_mtd_probe(efx);
+ rtnl_unlock();
+ if (rc && rc != -EPERM)
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to create MTDs (%d)\n", rc);
+
+ rc = pci_enable_pcie_error_reporting(pci_dev);
+ if (rc && rc != -EINVAL)
+ netif_notice(efx, probe, efx->net_dev,
+ "PCIE error reporting unavailable (%d).\n",
+ rc);
+
+ return 0;
+
+ fail4:
+ ef4_pci_remove_main(efx);
+ fail3:
+ ef4_fini_io(efx);
+ fail2:
+ ef4_fini_struct(efx);
+ fail1:
+ WARN_ON(rc > 0);
+ netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
+ free_netdev(net_dev);
+ return rc;
+}
+
+static int ef4_pm_freeze(struct device *dev)
+{
+ struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_UNINIT;
+
+ ef4_device_detach_sync(efx);
+
+ ef4_stop_all(efx);
+ ef4_disable_interrupts(efx);
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ef4_pm_thaw(struct device *dev)
+{
+ int rc;
+ struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ rc = ef4_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ ef4_start_all(efx);
+
+ netif_device_attach(efx->net_dev);
+
+ efx->state = STATE_READY;
+
+ efx->type->resume_wol(efx);
+ }
+
+ rtnl_unlock();
+
+ /* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
+ queue_work(reset_workqueue, &efx->reset_work);
+
+ return 0;
+
+fail:
+ rtnl_unlock();
+
+ return rc;
+}
+
+static int ef4_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct ef4_nic *efx = pci_get_drvdata(pci_dev);
+
+ efx->type->fini(efx);
+
+ efx->reset_pending = 0;
+
+ pci_save_state(pci_dev);
+ return pci_set_power_state(pci_dev, PCI_D3hot);
+}
+
+/* Used for both resume and restore */
+static int ef4_pm_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct ef4_nic *efx = pci_get_drvdata(pci_dev);
+ int rc;
+
+ rc = pci_set_power_state(pci_dev, PCI_D0);
+ if (rc)
+ return rc;
+ pci_restore_state(pci_dev);
+ rc = pci_enable_device(pci_dev);
+ if (rc)
+ return rc;
+ pci_set_master(efx->pci_dev);
+ rc = efx->type->reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ return rc;
+ rc = efx->type->init(efx);
+ if (rc)
+ return rc;
+ rc = ef4_pm_thaw(dev);
+ return rc;
+}
+
+static int ef4_pm_suspend(struct device *dev)
+{
+ int rc;
+
+ ef4_pm_freeze(dev);
+ rc = ef4_pm_poweroff(dev);
+ if (rc)
+ ef4_pm_resume(dev);
+ return rc;
+}
+
+static const struct dev_pm_ops ef4_pm_ops = {
+ .suspend = ef4_pm_suspend,
+ .resume = ef4_pm_resume,
+ .freeze = ef4_pm_freeze,
+ .thaw = ef4_pm_thaw,
+ .poweroff = ef4_pm_poweroff,
+ .restore = ef4_pm_resume,
+};
+
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ struct ef4_nic *efx = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_RECOVERY;
+ efx->reset_pending = 0;
+
+ ef4_device_detach_sync(efx);
+
+ ef4_stop_all(efx);
+ ef4_disable_interrupts(efx);
+
+ status = PCI_ERS_RESULT_NEED_RESET;
+ } else {
+ /* If the interface is disabled we don't want to do anything
+ * with it.
+ */
+ status = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+/* Fake a successful reset, which will be performed later in ef4_io_resume. */
+static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
+{
+ struct ef4_nic *efx = pci_get_drvdata(pdev);
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ int rc;
+
+ if (pci_enable_device(pdev)) {
+ netif_err(efx, hw, efx->net_dev,
+ "Cannot re-enable PCI device after reset.\n");
+ status = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ rc = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
+ /* Non-fatal error. Continue. */
+ }
+
+ return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void ef4_io_resume(struct pci_dev *pdev)
+{
+ struct ef4_nic *efx = pci_get_drvdata(pdev);
+ int rc;
+
+ rtnl_lock();
+
+ if (efx->state == STATE_DISABLED)
+ goto out;
+
+ rc = ef4_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "ef4_reset failed after PCI error (%d)\n", rc);
+ } else {
+ efx->state = STATE_READY;
+ netif_dbg(efx, hw, efx->net_dev,
+ "Done resetting and resuming IO after PCI error.\n");
+ }
+
+out:
+ rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+static const struct pci_error_handlers ef4_err_handlers = {
+ .error_detected = ef4_io_error_detected,
+ .slot_reset = ef4_io_slot_reset,
+ .resume = ef4_io_resume,
+};
+
+static struct pci_driver ef4_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ef4_pci_table,
+ .probe = ef4_pci_probe,
+ .remove = ef4_pci_remove,
+ .driver.pm = &ef4_pm_ops,
+ .err_handler = &ef4_err_handlers,
+};
+
+/**************************************************************************
+ *
+ * Kernel module interface
+ *
+ *************************************************************************/
+
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+static int __init ef4_init_module(void)
+{
+ int rc;
+
+ printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n");
+
+ rc = register_netdevice_notifier(&ef4_netdev_notifier);
+ if (rc)
+ goto err_notifier;
+
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ rc = -ENOMEM;
+ goto err_reset;
+ }
+
+ rc = pci_register_driver(&ef4_pci_driver);
+ if (rc < 0)
+ goto err_pci;
+
+ return 0;
+
+ err_pci:
+ destroy_workqueue(reset_workqueue);
+ err_reset:
+ unregister_netdevice_notifier(&ef4_netdev_notifier);
+ err_notifier:
+ return rc;
+}
+
+static void __exit ef4_exit_module(void)
+{
+ printk(KERN_INFO "Solarflare Falcon driver unloading\n");
+
+ pci_unregister_driver(&ef4_pci_driver);
+ destroy_workqueue(reset_workqueue);
+ unregister_netdevice_notifier(&ef4_netdev_notifier);
+
+}
+
+module_init(ef4_init_module);
+module_exit(ef4_exit_module);
+
+MODULE_AUTHOR("Solarflare Communications and "
+ "Michael Brown <mbrown@fensystems.co.uk>");
+MODULE_DESCRIPTION("Solarflare Falcon network driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ef4_pci_table);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h
new file mode 100644
index 000000000000..c89456fa148c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/efx.h
@@ -0,0 +1,277 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_EFX_H
+#define EF4_EFX_H
+
+#include "net_driver.h"
+#include "filter.h"
+
+/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All VFs use BAR 0/1 for memory */
+#define EF4_MEM_BAR 2
+#define EF4_MEM_VF_BAR 0
+
+int ef4_net_open(struct net_device *net_dev);
+int ef4_net_stop(struct net_device *net_dev);
+
+/* TX */
+int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue);
+void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue);
+void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue);
+void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue);
+void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue);
+netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
+void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc);
+unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
+extern bool ef4_separate_tx_channels;
+
+/* RX */
+void ef4_set_default_rx_indir_table(struct ef4_nic *efx);
+void ef4_rx_config_page_split(struct ef4_nic *efx);
+int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue);
+void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
+void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
+void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
+void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
+void ef4_rx_slow_fill(unsigned long context);
+void __ef4_rx_packet(struct ef4_channel *channel);
+void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags);
+static inline void ef4_rx_flush_packet(struct ef4_channel *channel)
+{
+ if (channel->rx_pkt_n_frags)
+ __ef4_rx_packet(channel);
+}
+void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue);
+
+#define EF4_MAX_DMAQ_SIZE 4096UL
+#define EF4_DEFAULT_DMAQ_SIZE 1024UL
+#define EF4_MIN_DMAQ_SIZE 512UL
+
+#define EF4_MAX_EVQ_SIZE 16384UL
+#define EF4_MIN_EVQ_SIZE 512UL
+
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EF4_TSO_MAX_SEGS 100
+
+/* The smallest [rt]xq_entries that the driver supports. RX minimum
+ * is a bit arbitrary. For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EF4_RXQ_MIN_ENT 128U
+#define EF4_TXQ_MIN_ENT(efx) (2 * ef4_tx_max_skb_descs(efx))
+
+static inline bool ef4_rss_enabled(struct ef4_nic *efx)
+{
+ return efx->rss_spread > 1;
+}
+
+/* Filters */
+
+void ef4_mac_reconfigure(struct ef4_nic *efx);
+
+/**
+ * ef4_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ * is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !ef4_filter_is_mc_recipient(@spec), or the NIC does not
+ * support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 ef4_filter_insert_filter(struct ef4_nic *efx,
+ struct ef4_filter_spec *spec,
+ bool replace_equal)
+{
+ return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * ef4_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @ef4_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int ef4_filter_remove_id_safe(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 filter_id)
+{
+ return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * ef4_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @ef4_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+ef4_filter_get_filter_safe(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 filter_id, struct ef4_filter_spec *spec)
+{
+ return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+static inline u32 ef4_filter_count_rx_used(struct ef4_nic *efx,
+ enum ef4_filter_priority priority)
+{
+ return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 ef4_filter_get_rx_id_limit(struct ef4_nic *efx)
+{
+ return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 ef4_filter_get_rx_ids(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
+#ifdef CONFIG_RFS_ACCEL
+int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned quota);
+static inline void ef4_filter_rfs_expire(struct ef4_channel *channel)
+{
+ if (channel->rfs_filters_added >= 60 &&
+ __ef4_filter_rfs_expire(channel->efx, 100))
+ channel->rfs_filters_added -= 60;
+}
+#define ef4_filter_rfs_enabled() 1
+#else
+static inline void ef4_filter_rfs_expire(struct ef4_channel *channel) {}
+#define ef4_filter_rfs_enabled() 0
+#endif
+bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec);
+
+/* Channels */
+int ef4_channel_dummy_op_int(struct ef4_channel *channel);
+void ef4_channel_dummy_op_void(struct ef4_channel *channel);
+int ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries);
+
+/* Ports */
+int ef4_reconfigure_port(struct ef4_nic *efx);
+int __ef4_reconfigure_port(struct ef4_nic *efx);
+
+/* Ethtool support */
+extern const struct ethtool_ops ef4_ethtool_ops;
+
+/* Reset handling */
+int ef4_reset(struct ef4_nic *efx, enum reset_type method);
+void ef4_reset_down(struct ef4_nic *efx, enum reset_type method);
+int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok);
+int ef4_try_recovery(struct ef4_nic *efx);
+
+/* Global */
+void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
+unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
+unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
+int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
+void ef4_stop_eventq(struct ef4_channel *channel);
+void ef4_start_eventq(struct ef4_channel *channel);
+
+/* Dummy PHY ops for PHY drivers */
+int ef4_port_dummy_op_int(struct ef4_nic *efx);
+void ef4_port_dummy_op_void(struct ef4_nic *efx);
+
+/* Update the generic software stats in the passed stats array */
+void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats);
+
+/* MTD */
+#ifdef CONFIG_SFC_FALCON_MTD
+int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
+static inline int ef4_mtd_probe(struct ef4_nic *efx)
+{
+ return efx->type->mtd_probe(efx);
+}
+void ef4_mtd_rename(struct ef4_nic *efx);
+void ef4_mtd_remove(struct ef4_nic *efx);
+#else
+static inline int ef4_mtd_probe(struct ef4_nic *efx) { return 0; }
+static inline void ef4_mtd_rename(struct ef4_nic *efx) {}
+static inline void ef4_mtd_remove(struct ef4_nic *efx) {}
+#endif
+
+static inline void ef4_schedule_channel(struct ef4_channel *channel)
+{
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d scheduling NAPI poll on CPU%d\n",
+ channel->channel, raw_smp_processor_id());
+
+ napi_schedule(&channel->napi_str);
+}
+
+static inline void ef4_schedule_channel_irq(struct ef4_channel *channel)
+{
+ channel->event_test_cpu = raw_smp_processor_id();
+ ef4_schedule_channel(channel);
+}
+
+void ef4_link_status_changed(struct ef4_nic *efx);
+void ef4_link_set_advertising(struct ef4_nic *efx, u32);
+void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8);
+
+static inline void ef4_device_detach_sync(struct ef4_nic *efx)
+{
+ struct net_device *dev = efx->net_dev;
+
+ /* Lock/freeze all TX queues so that we can be sure the
+ * TX scheduler is stopped when we're done and before
+ * netif_device_present() becomes false.
+ */
+ netif_tx_lock_bh(dev);
+ netif_device_detach(dev);
+ netif_tx_unlock_bh(dev);
+}
+
+static inline bool ef4_rwsem_assert_write_locked(struct rw_semaphore *sem)
+{
+ if (WARN_ON(down_read_trylock(sem))) {
+ up_read(sem);
+ return false;
+ }
+ return true;
+}
+
+#endif /* EF4_EFX_H */
diff --git a/drivers/net/ethernet/sfc/falcon/enum.h b/drivers/net/ethernet/sfc/falcon/enum.h
new file mode 100644
index 000000000000..30a1136fc909
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/enum.h
@@ -0,0 +1,171 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_ENUM_H
+#define EF4_ENUM_H
+
+/**
+ * enum ef4_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_DATA: data path loopback
+ * @LOOPBACK_GMAC: loopback within GMAC
+ * @LOOPBACK_XGMII: loopback after XMAC
+ * @LOOPBACK_XGXS: loopback within BPX after XGXS
+ * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
+ * @LOOPBACK_GMII: loopback within BPX after GMAC
+ * @LOOPBACK_SGMII: loopback within BPX within SGMII
+ * @LOOPBACK_XGBR: loopback within BPX within XGBR
+ * @LOOPBACK_XFI: loopback within BPX before XFI serdes
+ * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
+ * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
+ * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
+ * @LOOPBACK_XFI_FAR: loopback after XFI serdes
+ * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
+ * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
+ * @LOOPBACK_XPORT: cross port loopback
+ * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
+ * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
+ * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
+ * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
+ * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
+ * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
+ * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
+ * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
+ */
+/* Please keep up-to-date w.r.t the following two #defines */
+enum ef4_loopback_mode {
+ LOOPBACK_NONE = 0,
+ LOOPBACK_DATA = 1,
+ LOOPBACK_GMAC = 2,
+ LOOPBACK_XGMII = 3,
+ LOOPBACK_XGXS = 4,
+ LOOPBACK_XAUI = 5,
+ LOOPBACK_GMII = 6,
+ LOOPBACK_SGMII = 7,
+ LOOPBACK_XGBR = 8,
+ LOOPBACK_XFI = 9,
+ LOOPBACK_XAUI_FAR = 10,
+ LOOPBACK_GMII_FAR = 11,
+ LOOPBACK_SGMII_FAR = 12,
+ LOOPBACK_XFI_FAR = 13,
+ LOOPBACK_GPHY = 14,
+ LOOPBACK_PHYXS = 15,
+ LOOPBACK_PCS = 16,
+ LOOPBACK_PMAPMD = 17,
+ LOOPBACK_XPORT = 18,
+ LOOPBACK_XGMII_WS = 19,
+ LOOPBACK_XAUI_WS = 20,
+ LOOPBACK_XAUI_WS_FAR = 21,
+ LOOPBACK_XAUI_WS_NEAR = 22,
+ LOOPBACK_GMII_WS = 23,
+ LOOPBACK_XFI_WS = 24,
+ LOOPBACK_XFI_WS_FAR = 25,
+ LOOPBACK_PHYXS_WS = 26,
+ LOOPBACK_MAX
+};
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
+ (1 << LOOPBACK_GMAC) | \
+ (1 << LOOPBACK_XGMII)| \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI) | \
+ (1 << LOOPBACK_GMII) | \
+ (1 << LOOPBACK_SGMII) | \
+ (1 << LOOPBACK_SGMII) | \
+ (1 << LOOPBACK_XGBR) | \
+ (1 << LOOPBACK_XFI) | \
+ (1 << LOOPBACK_XAUI_FAR) | \
+ (1 << LOOPBACK_GMII_FAR) | \
+ (1 << LOOPBACK_SGMII_FAR) | \
+ (1 << LOOPBACK_XFI_FAR) | \
+ (1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR))
+
+#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+#define LOOPBACKS_EXTERNAL(_efx) \
+ ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
+ ~(1 << LOOPBACK_NONE))
+
+#define LOOPBACK_MASK(_efx) \
+ (1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx) \
+ (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
+
+#define LOOPBACK_EXTERNAL(_efx) \
+ (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
+
+#define LOOPBACK_CHANGED(_from, _to, _mask) \
+ (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask) \
+ ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
+
+/*****************************************************************************/
+
+/**
+ * enum reset_type - reset types
+ *
+ * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
+ * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
+ * other valuesspecify reasons, which ef4_schedule_reset() will choose
+ * a method for.
+ *
+ * Reset methods are numbered in order of increasing scope.
+ *
+ * @RESET_TYPE_INVISIBLE: Reset datapath and MAC
+ * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
+ * if unsuccessful.
+ * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
+ * @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
+ * unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
+ * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
+ * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
+ * @RESET_TYPE_INT_ERROR: reset due to internal error
+ * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
+ * @RESET_TYPE_DMA_ERROR: DMA error
+ * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
+ */
+enum reset_type {
+ RESET_TYPE_INVISIBLE,
+ RESET_TYPE_RECOVER_OR_ALL,
+ RESET_TYPE_ALL,
+ RESET_TYPE_WORLD,
+ RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_DATAPATH,
+ RESET_TYPE_DISABLE,
+ RESET_TYPE_MAX_METHOD,
+ RESET_TYPE_TX_WATCHDOG,
+ RESET_TYPE_INT_ERROR,
+ RESET_TYPE_RX_RECOVERY,
+ RESET_TYPE_DMA_ERROR,
+ RESET_TYPE_TX_SKIP,
+ RESET_TYPE_MAX,
+};
+
+#endif /* EF4_ENUM_H */
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
new file mode 100644
index 000000000000..8e1929b01a32
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -0,0 +1,1343 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include "net_driver.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include "efx.h"
+#include "filter.h"
+#include "nic.h"
+
+struct ef4_sw_stat_desc {
+ const char *name;
+ enum {
+ EF4_ETHTOOL_STAT_SOURCE_nic,
+ EF4_ETHTOOL_STAT_SOURCE_channel,
+ EF4_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned offset;
+ u64(*get_stat) (void *field); /* Reader function */
+};
+
+/* Initialiser for a struct ef4_sw_stat_desc with type-checking */
+#define EF4_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EF4_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct ef4_##source_name *)0)->field) ? \
+ offsetof(struct ef4_##source_name, field) : \
+ offsetof(struct ef4_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 ef4_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 ef4_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EF4_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, ef4_get_atomic_stat)
+
+#define EF4_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EF4_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, ef4_get_uint_stat)
+
+#define EF4_ETHTOOL_UINT_TXQ_STAT(field) \
+ EF4_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, ef4_get_uint_stat)
+
+static const struct ef4_sw_stat_desc ef4_sw_stat_desc[] = {
+ EF4_ETHTOOL_UINT_TXQ_STAT(merge_events),
+ EF4_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EF4_ETHTOOL_UINT_TXQ_STAT(cb_packets),
+ EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+};
+
+#define EF4_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(ef4_sw_stat_desc)
+
+#define EF4_ETHTOOL_EEPROM_MAGIC 0xEFAB
+
+/**************************************************************************
+ *
+ * Ethtool operations
+ *
+ **************************************************************************
+ */
+
+/* Identify device by flashing LEDs */
+static int ef4_ethtool_phys_id(struct net_device *net_dev,
+ enum ethtool_phys_id_state state)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ enum ef4_led_mode mode = EF4_LED_DEFAULT;
+
+ switch (state) {
+ case ETHTOOL_ID_ON:
+ mode = EF4_LED_ON;
+ break;
+ case ETHTOOL_ID_OFF:
+ mode = EF4_LED_OFF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ mode = EF4_LED_DEFAULT;
+ break;
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ }
+
+ efx->type->set_id_led(efx, mode);
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int ef4_ethtool_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct ef4_link_state *link_state = &efx->link_state;
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->get_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+
+ /* Both MACs support pause frames (bidirectional and respond-only) */
+ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ if (LOOPBACK_INTERNAL(efx)) {
+ ethtool_cmd_speed_set(ecmd, link_state->speed);
+ ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int ef4_ethtool_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* GMAC does not support 1000Mbps HD */
+ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
+ (ecmd->duplex != DUPLEX_FULL)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "rejecting unsupported 1000Mbps HD setting\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->set_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void ef4_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+static int ef4_ethtool_get_regs_len(struct net_device *net_dev)
+{
+ return ef4_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void ef4_ethtool_get_regs(struct net_device *net_dev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ regs->version = efx->type->revision;
+ ef4_nic_get_regs(efx, buf);
+}
+
+static u32 ef4_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ return efx->msg_enable;
+}
+
+static void ef4_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ efx->msg_enable = msg_enable;
+}
+
+/**
+ * ef4_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void ef4_fill_test(unsigned int test_index, u8 *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str, sizeof(unit_str),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
+ }
+}
+
+#define EF4_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EF4_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EF4_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EF4_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, ef4_loopback_mode)
+
+/**
+ * ef4_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
+ */
+static int ef4_fill_loopback_test(struct ef4_nic *efx,
+ struct ef4_loopback_self_tests *lb_tests,
+ enum ef4_loopback_mode mode,
+ unsigned int test_index,
+ u8 *strings, u64 *data)
+{
+ struct ef4_channel *channel =
+ ef4_get_channel(efx, efx->tx_channel_offset);
+ struct ef4_tx_queue *tx_queue;
+
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ ef4_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EF4_TX_QUEUE_NAME(tx_queue),
+ EF4_LOOPBACK_NAME(mode, "tx_sent"));
+ ef4_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EF4_TX_QUEUE_NAME(tx_queue),
+ EF4_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ ef4_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EF4_LOOPBACK_NAME(mode, "rx_good"));
+ ef4_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EF4_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * ef4_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
+ */
+static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx,
+ struct ef4_self_tests *tests,
+ u8 *strings, u64 *data)
+{
+ struct ef4_channel *channel;
+ unsigned int n = 0, i;
+ enum ef4_loopback_mode mode;
+
+ ef4_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ ef4_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ ef4_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ ef4_for_each_channel(channel, efx) {
+ ef4_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EF4_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ ef4_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EF4_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ }
+
+ ef4_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
+ ef4_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ if (efx->phy_op->run_tests != NULL) {
+ EF4_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EF4_BUG_ON_PARANOID(i >= EF4_MAX_PHY_TESTS);
+ name = efx->phy_op->test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ ef4_fill_test(n++, strings, data, &tests->phy_ext[i],
+ "phy", 0, name, NULL);
+ }
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = ef4_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
+{
+ size_t n_stats = 0;
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx) {
+ if (ef4_channel_has_tx_queues(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EF4_TXQ_TYPES);
+
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ ef4_for_each_channel(channel, efx) {
+ if (ef4_channel_has_rx_queue(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "rx-%d.rx_packets", channel->channel);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ return n_stats;
+}
+
+static int ef4_ethtool_get_sset_count(struct net_device *net_dev,
+ int string_set)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return efx->type->describe_stats(efx, NULL) +
+ EF4_ETHTOOL_SW_STAT_COUNT +
+ ef4_describe_per_queue_stats(efx, NULL);
+ case ETH_SS_TEST:
+ return ef4_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ef4_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ strings += (ef4_describe_per_queue_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_TEST:
+ ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+static void ef4_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ const struct ef4_sw_stat_desc *stat;
+ struct ef4_channel *channel;
+ struct ef4_tx_queue *tx_queue;
+ struct ef4_rx_queue *rx_queue;
+ int i;
+
+ spin_lock_bh(&efx->stats_lock);
+
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
+
+ /* Get software statistics */
+ for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &ef4_sw_stat_desc[i];
+ switch (stat->source) {
+ case EF4_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EF4_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ ef4_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EF4_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+ data += EF4_ETHTOOL_SW_STAT_COUNT;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ ef4_for_each_channel(channel, efx) {
+ if (ef4_channel_has_tx_queues(channel)) {
+ *data = 0;
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ *data += tx_queue->tx_packets;
+ }
+ data++;
+ }
+ }
+ ef4_for_each_channel(channel, efx) {
+ if (ef4_channel_has_rx_queue(channel)) {
+ *data = 0;
+ ef4_for_each_channel_rx_queue(rx_queue, channel) {
+ *data += rx_queue->rx_packets;
+ }
+ data++;
+ }
+ }
+}
+
+static void ef4_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct ef4_self_tests *ef4_tests;
+ bool already_up;
+ int rc = -ENOMEM;
+
+ ef4_tests = kzalloc(sizeof(*ef4_tests), GFP_KERNEL);
+ if (!ef4_tests)
+ goto fail;
+
+ if (efx->state != STATE_READY) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+ /* We need rx buffers and interrupts. */
+ already_up = (efx->net_dev->flags & IFF_UP);
+ if (!already_up) {
+ rc = dev_open(efx->net_dev);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed opening device.\n");
+ goto out;
+ }
+ }
+
+ rc = ef4_selftest(efx, ef4_tests, test->flags);
+
+ if (!already_up)
+ dev_close(efx->net_dev);
+
+ netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+out:
+ ef4_ethtool_fill_self_tests(efx, ef4_tests, NULL, data);
+ kfree(ef4_tests);
+fail:
+ if (rc)
+ test->flags |= ETH_TEST_FL_FAILED;
+}
+
+/* Restart autonegotiation */
+static int ef4_ethtool_nway_reset(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ return mdio45_nway_restart(&efx->mdio);
+}
+
+/*
+ * Each channel has a single IRQ and moderation timer, started by any
+ * completion (or other event). Unless the module parameter
+ * separate_tx_channels is set, IRQs and moderation are therefore
+ * shared between RX and TX completions. In this case, when RX IRQ
+ * moderation is explicitly changed then TX IRQ moderation is
+ * automatically changed too, but otherwise we fail if the two values
+ * are requested to be different.
+ *
+ * The hardware does not support a limit on the number of completions
+ * before an IRQ, so we do not use the max_frames fields. We should
+ * report and require that max_frames == (usecs != 0), but this would
+ * invalidate existing user documentation.
+ *
+ * The hardware does not have distinct settings for interrupt
+ * moderation while the previous IRQ is being handled, so we should
+ * not use the 'irq' fields. However, an earlier developer
+ * misunderstood the meaning of the 'irq' fields and the driver did
+ * not support the standard fields. To avoid invalidating existing
+ * user documentation, we report and accept changes through either the
+ * standard or 'irq' fields. If both are changed at the same time, we
+ * prefer the standard field.
+ *
+ * We implement adaptive IRQ moderation, but use a different algorithm
+ * from that assumed in the definition of struct ethtool_coalesce.
+ * Therefore we do not use any of the adaptive moderation parameters
+ * in it.
+ */
+
+static int ef4_ethtool_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ unsigned int tx_usecs, rx_usecs;
+ bool rx_adaptive;
+
+ ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
+
+ coalesce->tx_coalesce_usecs = tx_usecs;
+ coalesce->tx_coalesce_usecs_irq = tx_usecs;
+ coalesce->rx_coalesce_usecs = rx_usecs;
+ coalesce->rx_coalesce_usecs_irq = rx_usecs;
+ coalesce->use_adaptive_rx_coalesce = rx_adaptive;
+
+ return 0;
+}
+
+static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct ef4_channel *channel;
+ unsigned int tx_usecs, rx_usecs;
+ bool adaptive, rx_may_override_tx;
+ int rc;
+
+ if (coalesce->use_adaptive_tx_coalesce)
+ return -EINVAL;
+
+ ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
+
+ if (coalesce->rx_coalesce_usecs != rx_usecs)
+ rx_usecs = coalesce->rx_coalesce_usecs;
+ else
+ rx_usecs = coalesce->rx_coalesce_usecs_irq;
+
+ adaptive = coalesce->use_adaptive_rx_coalesce;
+
+ /* If channels are shared, TX IRQ moderation can be quietly
+ * overridden unless it is changed from its old value.
+ */
+ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
+ coalesce->tx_coalesce_usecs_irq == tx_usecs);
+ if (coalesce->tx_coalesce_usecs != tx_usecs)
+ tx_usecs = coalesce->tx_coalesce_usecs;
+ else
+ tx_usecs = coalesce->tx_coalesce_usecs_irq;
+
+ rc = ef4_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
+ rx_may_override_tx);
+ if (rc != 0)
+ return rc;
+
+ ef4_for_each_channel(channel, efx)
+ efx->type->push_irq_moderation(channel);
+
+ return 0;
+}
+
+static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EF4_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EF4_MAX_DMAQ_SIZE;
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+}
+
+static int ef4_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ u32 txq_entries;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EF4_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EF4_MAX_DMAQ_SIZE)
+ return -EINVAL;
+
+ if (ring->rx_pending < EF4_RXQ_MIN_ENT) {
+ netif_err(efx, drv, efx->net_dev,
+ "RX queues cannot be smaller than %u\n",
+ EF4_RXQ_MIN_ENT);
+ return -EINVAL;
+ }
+
+ txq_entries = max(ring->tx_pending, EF4_TXQ_MIN_ENT(efx));
+ if (txq_entries != ring->tx_pending)
+ netif_warn(efx, drv, efx->net_dev,
+ "increasing TX queue size to minimum of %u\n",
+ txq_entries);
+
+ return ef4_realloc_channels(efx, ring->rx_pending, txq_entries);
+}
+
+static int ef4_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ u8 wanted_fc, old_fc;
+ u32 old_adv;
+ int rc = 0;
+
+ mutex_lock(&efx->mac_lock);
+
+ wanted_fc = ((pause->rx_pause ? EF4_FC_RX : 0) |
+ (pause->tx_pause ? EF4_FC_TX : 0) |
+ (pause->autoneg ? EF4_FC_AUTO : 0));
+
+ if ((wanted_fc & EF4_FC_TX) && !(wanted_fc & EF4_FC_RX)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Flow control unsupported: tx ON rx OFF\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((wanted_fc & EF4_FC_AUTO) && !efx->link_advertising) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Autonegotiation is disabled\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Hook for Falcon bug 11482 workaround */
+ if (efx->type->prepare_enable_fc_tx &&
+ (wanted_fc & EF4_FC_TX) && !(efx->wanted_fc & EF4_FC_TX))
+ efx->type->prepare_enable_fc_tx(efx);
+
+ old_adv = efx->link_advertising;
+ old_fc = efx->wanted_fc;
+ ef4_link_set_wanted_fc(efx, wanted_fc);
+ if (efx->link_advertising != old_adv ||
+ (efx->wanted_fc ^ old_fc) & EF4_FC_AUTO) {
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to advertise requested flow "
+ "control setting\n");
+ goto out;
+ }
+ }
+
+ /* Reconfigure the MAC. The PHY *may* generate a link state change event
+ * if the user just changed the advertised capabilities, but there's no
+ * harm doing this twice */
+ ef4_mac_reconfigure(efx);
+
+out:
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+static void ef4_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EF4_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EF4_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EF4_FC_AUTO);
+}
+
+static void ef4_ethtool_get_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ return efx->type->get_wol(efx, wol);
+}
+
+
+static int ef4_ethtool_set_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ return efx->type->set_wol(efx, wol->wolopts);
+}
+
+static int ef4_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->map_reset_flags(flags);
+ if (rc < 0)
+ return rc;
+
+ return ef4_reset(efx, rc);
+}
+
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
+
+#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
+#define IP_PROTO_FULL_MASK 0xFF
+#define PORT_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static inline void ip6_fill_mask(__be32 *mask)
+{
+ mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
+}
+
+static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+ struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct ef4_filter_spec spec;
+ int rc;
+
+ rc = ef4_filter_get_filter_safe(efx, EF4_FILTER_PRI_MANUAL,
+ rule->location, &spec);
+ if (rc)
+ return rc;
+
+ if (spec.dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
+ rule->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ rule->ring_cookie = spec.dmaq_id;
+
+ if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
+ EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V4_FLOW : UDP_V4_FLOW);
+ if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
+ ip_entry->ip4dst = spec.loc_host[0];
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
+ ip_entry->ip4src = spec.rem_host[0];
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
+ ip_entry->pdst = spec.loc_port;
+ ip_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
+ ip_entry->psrc = spec.rem_port;
+ ip_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IPV6) &&
+ (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
+ EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V6_FLOW : UDP_V6_FLOW);
+ if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
+ memcpy(ip6_entry->ip6dst, spec.loc_host,
+ sizeof(ip6_entry->ip6dst));
+ ip6_fill_mask(ip6_mask->ip6dst);
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
+ memcpy(ip6_entry->ip6src, spec.rem_host,
+ sizeof(ip6_entry->ip6src));
+ ip6_fill_mask(ip6_mask->ip6src);
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
+ ip6_entry->pdst = spec.loc_port;
+ ip6_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
+ ip6_entry->psrc = spec.rem_port;
+ ip6_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if (!(spec.match_flags &
+ ~(EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG |
+ EF4_FILTER_MATCH_REM_MAC | EF4_FILTER_MATCH_ETHER_TYPE |
+ EF4_FILTER_MATCH_OUTER_VID))) {
+ rule->flow_type = ETHER_FLOW;
+ if (spec.match_flags &
+ (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG)) {
+ ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
+ if (spec.match_flags & EF4_FILTER_MATCH_LOC_MAC)
+ eth_broadcast_addr(mac_mask->h_dest);
+ else
+ ether_addr_copy(mac_mask->h_dest,
+ mac_addr_ig_mask);
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_REM_MAC) {
+ ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+ eth_broadcast_addr(mac_mask->h_source);
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) {
+ mac_entry->h_proto = spec.ether_type;
+ mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ !(spec.match_flags &
+ ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
+ EF4_FILTER_MATCH_IP_PROTO))) {
+ rule->flow_type = IPV4_USER_FLOW;
+ uip_entry->ip_ver = ETH_RX_NFC_IP4;
+ if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
+ uip_mask->proto = IP_PROTO_FULL_MASK;
+ uip_entry->proto = spec.ip_proto;
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
+ uip_entry->ip4dst = spec.loc_host[0];
+ uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
+ uip_entry->ip4src = spec.rem_host[0];
+ uip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
+ spec.ether_type == htons(ETH_P_IPV6) &&
+ !(spec.match_flags &
+ ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
+ EF4_FILTER_MATCH_IP_PROTO))) {
+ rule->flow_type = IPV6_USER_FLOW;
+ if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
+ uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
+ uip6_entry->l4_proto = spec.ip_proto;
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
+ memcpy(uip6_entry->ip6dst, spec.loc_host,
+ sizeof(uip6_entry->ip6dst));
+ ip6_fill_mask(uip6_mask->ip6dst);
+ }
+ if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
+ memcpy(uip6_entry->ip6src, spec.rem_host,
+ sizeof(uip6_entry->ip6src));
+ ip6_fill_mask(uip6_mask->ip6src);
+ }
+ } else {
+ /* The above should handle all filters that we insert */
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (spec.match_flags & EF4_FILTER_MATCH_OUTER_VID) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = spec.outer_vid;
+ rule->m_ext.vlan_tci = htons(0xfff);
+ }
+
+ return rc;
+}
+
+static int
+ef4_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
+
+ case ETHTOOL_GRXFH: {
+ unsigned min_revision = 0;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EF4_REV_FALCON_B0;
+ break;
+ default:
+ break;
+ }
+ if (ef4_nic_rev(efx) < min_revision)
+ info->data = 0;
+ return 0;
+ }
+
+ case ETHTOOL_GRXCLSRLCNT:
+ info->data = ef4_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ info->data |= RX_CLS_LOC_SPECIAL;
+ info->rule_cnt =
+ ef4_filter_count_rx_used(efx, EF4_FILTER_PRI_MANUAL);
+ return 0;
+
+ case ETHTOOL_GRXCLSRULE:
+ if (ef4_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+ return ef4_ethtool_get_class_rule(efx, &info->fs);
+
+ case ETHTOOL_GRXCLSRLALL: {
+ s32 rc;
+ info->data = ef4_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ rc = ef4_filter_get_rx_ids(efx, EF4_FILTER_PRI_MANUAL,
+ rule_locs, info->rule_cnt);
+ if (rc < 0)
+ return rc;
+ info->rule_cnt = rc;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline bool ip6_mask_is_full(__be32 mask[4])
+{
+ return !~(mask[0] & mask[1] & mask[2] & mask[3]);
+}
+
+static inline bool ip6_mask_is_empty(__be32 mask[4])
+{
+ return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
+static int ef4_ethtool_set_class_rule(struct ef4_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+ struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct ef4_filter_spec spec;
+ int rc;
+
+ /* Check that user wants us to choose the location */
+ if (rule->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ /* Range-check ring_cookie */
+ if (rule->ring_cookie >= efx->n_rx_channels &&
+ rule->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
+ rule->m_ext.data[1]))
+ return -EINVAL;
+
+ ef4_filter_init_rx(&spec, EF4_FILTER_PRI_MANUAL,
+ efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
+ (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+ EF4_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
+
+ switch (rule->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
+ EF4_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IP);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (ip_mask->ip4dst) {
+ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = ip_entry->ip4dst;
+ }
+ if (ip_mask->ip4src) {
+ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = ip_entry->ip4src;
+ }
+ if (ip_mask->pdst) {
+ if (ip_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip_entry->pdst;
+ }
+ if (ip_mask->psrc) {
+ if (ip_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip_entry->psrc;
+ }
+ if (ip_mask->tos)
+ return -EINVAL;
+ break;
+
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
+ EF4_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IPV6);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
+ if (!ip6_mask_is_full(ip6_mask->ip6dst))
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
+ memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
+ }
+ if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
+ if (!ip6_mask_is_full(ip6_mask->ip6src))
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
+ memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
+ }
+ if (ip6_mask->pdst) {
+ if (ip6_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip6_entry->pdst;
+ }
+ if (ip6_mask->psrc) {
+ if (ip6_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip6_entry->psrc;
+ }
+ if (ip6_mask->tclass)
+ return -EINVAL;
+ break;
+
+ case IPV4_USER_FLOW:
+ if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
+ uip_entry->ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = htons(ETH_P_IP);
+ if (uip_mask->ip4dst) {
+ if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = uip_entry->ip4dst;
+ }
+ if (uip_mask->ip4src) {
+ if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = uip_entry->ip4src;
+ }
+ if (uip_mask->proto) {
+ if (uip_mask->proto != IP_PROTO_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
+ spec.ip_proto = uip_entry->proto;
+ }
+ break;
+
+ case IPV6_USER_FLOW:
+ if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
+ return -EINVAL;
+ spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = htons(ETH_P_IPV6);
+ if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
+ if (!ip6_mask_is_full(uip6_mask->ip6dst))
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
+ memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
+ }
+ if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
+ if (!ip6_mask_is_full(uip6_mask->ip6src))
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
+ memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
+ }
+ if (uip6_mask->l4_proto) {
+ if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
+ spec.ip_proto = uip6_entry->l4_proto;
+ }
+ break;
+
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest)) {
+ if (ether_addr_equal(mac_mask->h_dest,
+ mac_addr_ig_mask))
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
+ else if (is_broadcast_ether_addr(mac_mask->h_dest))
+ spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC;
+ else
+ return -EINVAL;
+ ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
+ }
+ if (!is_zero_ether_addr(mac_mask->h_source)) {
+ if (!is_broadcast_ether_addr(mac_mask->h_source))
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_REM_MAC;
+ ether_addr_copy(spec.rem_mac, mac_entry->h_source);
+ }
+ if (mac_mask->h_proto) {
+ if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = mac_entry->h_proto;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+ if (rule->m_ext.vlan_tci != htons(0xfff))
+ return -EINVAL;
+ spec.match_flags |= EF4_FILTER_MATCH_OUTER_VID;
+ spec.outer_vid = rule->h_ext.vlan_tci;
+ }
+
+ rc = ef4_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
+
+ rule->location = rc;
+ return 0;
+}
+
+static int ef4_ethtool_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ if (ef4_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return ef4_ethtool_set_class_rule(efx, &info->fs);
+
+ case ETHTOOL_SRXCLSRLDEL:
+ return ef4_filter_remove_id_safe(efx, EF4_FILTER_PRI_MANUAL,
+ info->fs.location);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 ef4_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ return ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0 ||
+ efx->n_rx_channels == 1) ?
+ 0 : ARRAY_SIZE(efx->rx_indir_table));
+}
+
+static int ef4_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (indir)
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ return 0;
+}
+
+static int ef4_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
+ return efx->type->rx_push_rss_config(efx, true, indir);
+}
+
+static int ef4_ethtool_get_module_eeprom(struct net_device *net_dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int ret;
+
+ if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->mac_lock);
+ ret = efx->phy_op->get_module_eeprom(efx, ee, data);
+ mutex_unlock(&efx->mac_lock);
+
+ return ret;
+}
+
+static int ef4_ethtool_get_module_info(struct net_device *net_dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ int ret;
+
+ if (!efx->phy_op || !efx->phy_op->get_module_info)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->mac_lock);
+ ret = efx->phy_op->get_module_info(efx, modinfo);
+ mutex_unlock(&efx->mac_lock);
+
+ return ret;
+}
+
+const struct ethtool_ops ef4_ethtool_ops = {
+ .get_settings = ef4_ethtool_get_settings,
+ .set_settings = ef4_ethtool_set_settings,
+ .get_drvinfo = ef4_ethtool_get_drvinfo,
+ .get_regs_len = ef4_ethtool_get_regs_len,
+ .get_regs = ef4_ethtool_get_regs,
+ .get_msglevel = ef4_ethtool_get_msglevel,
+ .set_msglevel = ef4_ethtool_set_msglevel,
+ .nway_reset = ef4_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = ef4_ethtool_get_coalesce,
+ .set_coalesce = ef4_ethtool_set_coalesce,
+ .get_ringparam = ef4_ethtool_get_ringparam,
+ .set_ringparam = ef4_ethtool_set_ringparam,
+ .get_pauseparam = ef4_ethtool_get_pauseparam,
+ .set_pauseparam = ef4_ethtool_set_pauseparam,
+ .get_sset_count = ef4_ethtool_get_sset_count,
+ .self_test = ef4_ethtool_self_test,
+ .get_strings = ef4_ethtool_get_strings,
+ .set_phys_id = ef4_ethtool_phys_id,
+ .get_ethtool_stats = ef4_ethtool_get_stats,
+ .get_wol = ef4_ethtool_get_wol,
+ .set_wol = ef4_ethtool_set_wol,
+ .reset = ef4_ethtool_reset,
+ .get_rxnfc = ef4_ethtool_get_rxnfc,
+ .set_rxnfc = ef4_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
+ .get_rxfh = ef4_ethtool_get_rxfh,
+ .set_rxfh = ef4_ethtool_set_rxfh,
+ .get_module_info = ef4_ethtool_get_module_info,
+ .get_module_eeprom = ef4_ethtool_get_module_eeprom,
+};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 1a7092602aec..c6ff0cc5ef18 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -145,7 +145,7 @@
#define GENERIC_SW_STAT(ext_name) \
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
-static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
+static const struct ef4_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
FALCON_DMA_STAT(tx_bytes, XgTxOctets),
FALCON_DMA_STAT(tx_packets, XgTxPkts),
FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
@@ -273,34 +273,34 @@ struct falcon_nvconfig_board_v3 {
#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
#define SPI_DEV_TYPE_FIELD(type, field) \
- (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+ (((type) >> EF4_LOW_BIT(field)) & EF4_MASK32(EF4_WIDTH(field)))
#define FALCON_NVCONFIG_OFFSET 0x300
#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
struct falcon_nvconfig {
- efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
+ ef4_oword_t ee_vpd_cfg_reg; /* 0x300 */
u8 mac_address[2][8]; /* 0x310 */
- efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
- efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
- efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
- efx_oword_t hw_init_reg; /* 0x350 */
- efx_oword_t nic_stat_reg; /* 0x360 */
- efx_oword_t glb_ctl_reg; /* 0x370 */
- efx_oword_t srm_cfg_reg; /* 0x380 */
- efx_oword_t spare_reg; /* 0x390 */
+ ef4_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
+ ef4_oword_t pcie_sd_ctl45_reg; /* 0x330 */
+ ef4_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
+ ef4_oword_t hw_init_reg; /* 0x350 */
+ ef4_oword_t nic_stat_reg; /* 0x360 */
+ ef4_oword_t glb_ctl_reg; /* 0x370 */
+ ef4_oword_t srm_cfg_reg; /* 0x380 */
+ ef4_oword_t spare_reg; /* 0x390 */
__le16 board_magic_num; /* 0x3A0 */
__le16 board_struct_ver;
__le16 board_checksum;
struct falcon_nvconfig_board_v2 board_v2;
- efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ ef4_oword_t ee_base_page_reg; /* 0x3B0 */
struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
} __packed;
/*************************************************************************/
-static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
-static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
+static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method);
+static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx);
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -326,40 +326,40 @@ default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
*/
static void falcon_setsda(void *data, int state)
{
- struct efx_nic *efx = (struct efx_nic *)data;
- efx_oword_t reg;
+ struct ef4_nic *efx = (struct ef4_nic *)data;
+ ef4_oword_t reg;
- efx_reado(efx, &reg, FR_AB_GPIO_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
- efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+ ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
+ ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
}
static void falcon_setscl(void *data, int state)
{
- struct efx_nic *efx = (struct efx_nic *)data;
- efx_oword_t reg;
+ struct ef4_nic *efx = (struct ef4_nic *)data;
+ ef4_oword_t reg;
- efx_reado(efx, &reg, FR_AB_GPIO_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
- efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+ ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
+ ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
}
static int falcon_getsda(void *data)
{
- struct efx_nic *efx = (struct efx_nic *)data;
- efx_oword_t reg;
+ struct ef4_nic *efx = (struct ef4_nic *)data;
+ ef4_oword_t reg;
- efx_reado(efx, &reg, FR_AB_GPIO_CTL);
- return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
+ ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
+ return EF4_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
}
static int falcon_getscl(void *data)
{
- struct efx_nic *efx = (struct efx_nic *)data;
- efx_oword_t reg;
+ struct ef4_nic *efx = (struct ef4_nic *)data;
+ ef4_oword_t reg;
- efx_reado(efx, &reg, FR_AB_GPIO_CTL);
- return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
+ ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
+ return EF4_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
}
static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
@@ -372,35 +372,35 @@ static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.timeout = DIV_ROUND_UP(HZ, 20),
};
-static void falcon_push_irq_moderation(struct efx_channel *channel)
+static void falcon_push_irq_moderation(struct ef4_channel *channel)
{
- efx_dword_t timer_cmd;
- struct efx_nic *efx = channel->efx;
+ ef4_dword_t timer_cmd;
+ struct ef4_nic *efx = channel->efx;
/* Set timer register */
if (channel->irq_moderation_us) {
unsigned int ticks;
- ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
- EFX_POPULATE_DWORD_2(timer_cmd,
+ ticks = ef4_usecs_to_ticks(efx, channel->irq_moderation_us);
+ EF4_POPULATE_DWORD_2(timer_cmd,
FRF_AB_TC_TIMER_MODE,
FFE_BB_TIMER_MODE_INT_HLDOFF,
FRF_AB_TC_TIMER_VAL,
ticks - 1);
} else {
- EFX_POPULATE_DWORD_2(timer_cmd,
+ EF4_POPULATE_DWORD_2(timer_cmd,
FRF_AB_TC_TIMER_MODE,
FFE_BB_TIMER_MODE_DIS,
FRF_AB_TC_TIMER_VAL, 0);
}
BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
- efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+ ef4_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
channel->channel);
}
-static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
+static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx);
-static void falcon_prepare_flush(struct efx_nic *efx)
+static void falcon_prepare_flush(struct ef4_nic *efx)
{
falcon_deconfigure_mac_wrapper(efx);
@@ -420,26 +420,26 @@ static void falcon_prepare_flush(struct efx_nic *efx)
*
* NB most hardware supports MSI interrupts
*/
-static inline void falcon_irq_ack_a1(struct efx_nic *efx)
+static inline void falcon_irq_ack_a1(struct ef4_nic *efx)
{
- efx_dword_t reg;
+ ef4_dword_t reg;
- EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
- efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
- efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
+ EF4_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
+ ef4_writed(efx, &reg, FR_AA_INT_ACK_KER);
+ ef4_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
}
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
- struct efx_nic *efx = dev_id;
- efx_oword_t *int_ker = efx->irq_status.addr;
+ struct ef4_nic *efx = dev_id;
+ ef4_oword_t *int_ker = efx->irq_status.addr;
int syserr;
int queues;
/* Check to see if this is our interrupt. If it isn't, we
* exit without having touched the hardware.
*/
- if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
+ if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) {
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d not for me\n", irq,
raw_smp_processor_id());
@@ -447,30 +447,30 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
}
efx->last_irq_cpu = raw_smp_processor_id();
netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
- return efx_farch_fatal_interrupt(efx);
+ return ef4_farch_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
*/
- BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
- queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
- EFX_ZERO_OWORD(*int_ker);
+ BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS);
+ queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
+ EF4_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
if (queues & 1)
- efx_schedule_channel_irq(efx_get_channel(efx, 0));
+ ef4_schedule_channel_irq(ef4_get_channel(efx, 0));
if (queues & 2)
- efx_schedule_channel_irq(efx_get_channel(efx, 1));
+ ef4_schedule_channel_irq(ef4_get_channel(efx, 1));
return IRQ_HANDLED;
}
@@ -480,7 +480,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*
**************************************************************************
*/
-static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
+static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user,
const u32 *rx_indir_table)
{
(void) efx;
@@ -489,19 +489,19 @@ static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
return -ENOSYS;
}
-static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
+static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user,
const u32 *rx_indir_table)
{
- efx_oword_t temp;
+ ef4_oword_t temp;
(void) user;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ ef4_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
memcpy(efx->rx_indir_table, rx_indir_table,
sizeof(efx->rx_indir_table));
- efx_farch_rx_push_indir_table(efx);
+ ef4_farch_rx_push_indir_table(efx);
return 0;
}
@@ -512,17 +512,17 @@ static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
**************************************************************************
*/
-#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
+#define FALCON_SPI_MAX_LEN sizeof(ef4_oword_t)
-static int falcon_spi_poll(struct efx_nic *efx)
+static int falcon_spi_poll(struct ef4_nic *efx)
{
- efx_oword_t reg;
- efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
- return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
+ ef4_oword_t reg;
+ ef4_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
+ return EF4_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
}
/* Wait for SPI command completion */
-static int falcon_spi_wait(struct efx_nic *efx)
+static int falcon_spi_wait(struct ef4_nic *efx)
{
/* Most commands will finish quickly, so we start polling at
* very short intervals. Sometimes the command may have to
@@ -550,13 +550,13 @@ static int falcon_spi_wait(struct efx_nic *efx)
}
static int
-falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
+falcon_spi_cmd(struct ef4_nic *efx, const struct falcon_spi_device *spi,
unsigned int command, int address,
const void *in, void *out, size_t len)
{
bool addressed = (address >= 0);
bool reading = (out != NULL);
- efx_oword_t reg;
+ ef4_oword_t reg;
int rc;
/* Input validation */
@@ -570,18 +570,18 @@ falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
/* Program address register, if we have an address */
if (addressed) {
- EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
- efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
+ EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
+ ef4_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
}
/* Program data register, if we have data */
if (in != NULL) {
memcpy(&reg, in, len);
- efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
+ ef4_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
}
/* Issue read/write command */
- EFX_POPULATE_OWORD_7(reg,
+ EF4_POPULATE_OWORD_7(reg,
FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
FRF_AB_EE_SPI_HCMD_DABCNT, len,
@@ -590,7 +590,7 @@ falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
FRF_AB_EE_SPI_HCMD_ADBCNT,
(addressed ? spi->addr_len : 0),
FRF_AB_EE_SPI_HCMD_ENC, command);
- efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
+ ef4_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
/* Wait for read/write to complete */
rc = falcon_spi_wait(efx);
@@ -599,7 +599,7 @@ falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
/* Read data */
if (out != NULL) {
- efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
+ ef4_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
memcpy(out, &reg, len);
}
@@ -614,7 +614,7 @@ falcon_spi_munge_command(const struct falcon_spi_device *spi,
}
static int
-falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
+falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, u8 *buffer)
{
size_t block_len, pos = 0;
@@ -644,10 +644,10 @@ falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
return rc;
}
-#ifdef CONFIG_SFC_MTD
+#ifdef CONFIG_SFC_FALCON_MTD
struct falcon_mtd_partition {
- struct efx_mtd_partition common;
+ struct ef4_mtd_partition common;
const struct falcon_spi_device *spi;
size_t offset;
};
@@ -664,7 +664,7 @@ falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
/* Wait up to 10 ms for buffered write completion */
static int
-falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
+falcon_spi_wait_write(struct ef4_nic *efx, const struct falcon_spi_device *spi)
{
unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
u8 status;
@@ -689,7 +689,7 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
}
static int
-falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
+falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, const u8 *buffer)
{
u8 verify_buffer[FALCON_SPI_MAX_LEN];
@@ -741,7 +741,7 @@ static int
falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
{
const struct falcon_spi_device *spi = part->spi;
- struct efx_nic *efx = part->common.mtd.priv;
+ struct ef4_nic *efx = part->common.mtd.priv;
u8 status;
int rc, i;
@@ -765,7 +765,7 @@ falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
}
static int
-falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
+falcon_spi_unlock(struct ef4_nic *efx, const struct falcon_spi_device *spi)
{
const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
SPI_STATUS_BP0);
@@ -805,7 +805,7 @@ static int
falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
{
const struct falcon_spi_device *spi = part->spi;
- struct efx_nic *efx = part->common.mtd.priv;
+ struct ef4_nic *efx = part->common.mtd.priv;
unsigned pos, block_len;
u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
@@ -849,9 +849,9 @@ falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
return rc;
}
-static void falcon_mtd_rename(struct efx_mtd_partition *part)
+static void falcon_mtd_rename(struct ef4_mtd_partition *part)
{
- struct efx_nic *efx = part->mtd.priv;
+ struct ef4_nic *efx = part->mtd.priv;
snprintf(part->name, sizeof(part->name), "%s %s",
efx->name, part->type_name);
@@ -861,7 +861,7 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
- struct efx_nic *efx = mtd->priv;
+ struct ef4_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
@@ -877,7 +877,7 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
- struct efx_nic *efx = mtd->priv;
+ struct ef4_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
@@ -893,7 +893,7 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
- struct efx_nic *efx = mtd->priv;
+ struct ef4_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
@@ -909,7 +909,7 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
static int falcon_mtd_sync(struct mtd_info *mtd)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
- struct efx_nic *efx = mtd->priv;
+ struct ef4_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
@@ -919,7 +919,7 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
return rc;
}
-static int falcon_mtd_probe(struct efx_nic *efx)
+static int falcon_mtd_probe(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_mtd_partition *parts;
@@ -963,13 +963,13 @@ static int falcon_mtd_probe(struct efx_nic *efx)
n_parts++;
}
- rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ rc = ef4_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
if (rc)
kfree(parts);
return rc;
}
-#endif /* CONFIG_SFC_MTD */
+#endif /* CONFIG_SFC_FALCON_MTD */
/**************************************************************************
*
@@ -979,27 +979,27 @@ static int falcon_mtd_probe(struct efx_nic *efx)
*/
/* Configure the XAUI driver that is an output from Falcon */
-static void falcon_setup_xaui(struct efx_nic *efx)
+static void falcon_setup_xaui(struct ef4_nic *efx)
{
- efx_oword_t sdctl, txdrv;
+ ef4_oword_t sdctl, txdrv;
/* Move the XAUI into low power, unless there is no PHY, in
* which case the XAUI will have to drive a cable. */
if (efx->phy_type == PHY_TYPE_NONE)
return;
- efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
-
- EFX_POPULATE_OWORD_8(txdrv,
+ ef4_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ ef4_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
+
+ EF4_POPULATE_OWORD_8(txdrv,
FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
@@ -1008,27 +1008,27 @@ static void falcon_setup_xaui(struct efx_nic *efx)
FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
- efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
+ ef4_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
}
-int falcon_reset_xaui(struct efx_nic *efx)
+int falcon_reset_xaui(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
+ ef4_oword_t reg;
int count;
/* Don't fetch MAC statistics over an XMAC reset */
WARN_ON(nic_data->stats_disable_count == 0);
/* Start reset sequence */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
+ EF4_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+ ef4_writeo(efx, &reg, FR_AB_XX_PWR_RST);
/* Wait up to 10 ms for completion, then reinitialise */
for (count = 0; count < 1000; count++) {
- efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
- if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
- EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
+ ef4_reado(efx, &reg, FR_AB_XX_PWR_RST);
+ if (EF4_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+ EF4_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
falcon_setup_xaui(efx);
return 0;
}
@@ -1039,12 +1039,12 @@ int falcon_reset_xaui(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static void falcon_ack_status_intr(struct efx_nic *efx)
+static void falcon_ack_status_intr(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
+ ef4_oword_t reg;
- if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
+ if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
return;
/* We expect xgmii faults if the wireside link is down */
@@ -1056,33 +1056,33 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
if (nic_data->xmac_poll_required)
return;
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
+ ef4_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
}
-static bool falcon_xgxs_link_ok(struct efx_nic *efx)
+static bool falcon_xgxs_link_ok(struct ef4_nic *efx)
{
- efx_oword_t reg;
+ ef4_oword_t reg;
bool align_done, link_ok = false;
int sync_status;
/* Read link status */
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
- sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+ align_done = EF4_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+ sync_status = EF4_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
link_ok = true;
/* Clear link status ready for next read */
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+ ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
return link_ok;
}
-static bool falcon_xmac_link_ok(struct efx_nic *efx)
+static bool falcon_xmac_link_ok(struct ef4_nic *efx)
{
/*
* Check MAC's XGXS link status except when using XGMII loopback
@@ -1094,66 +1094,66 @@ static bool falcon_xmac_link_ok(struct efx_nic *efx)
falcon_xgxs_link_ok(efx)) &&
(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
LOOPBACK_INTERNAL(efx) ||
- efx_mdio_phyxgxs_lane_sync(efx));
+ ef4_mdio_phyxgxs_lane_sync(efx));
}
-static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+static void falcon_reconfigure_xmac_core(struct ef4_nic *efx)
{
unsigned int max_frame_len;
- efx_oword_t reg;
- bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
- bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
+ ef4_oword_t reg;
+ bool rx_fc = !!(efx->link_state.fc & EF4_FC_RX);
+ bool tx_fc = !!(efx->link_state.fc & EF4_FC_TX);
/* Configure MAC - cut-thru mode is hard wired on */
- EFX_POPULATE_OWORD_3(reg,
+ EF4_POPULATE_OWORD_3(reg,
FRF_AB_XM_RX_JUMBO_MODE, 1,
FRF_AB_XM_TX_STAT_EN, 1,
FRF_AB_XM_RX_STAT_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+ ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
/* Configure TX */
- EFX_POPULATE_OWORD_6(reg,
+ EF4_POPULATE_OWORD_6(reg,
FRF_AB_XM_TXEN, 1,
FRF_AB_XM_TX_PRMBL, 1,
FRF_AB_XM_AUTO_PAD, 1,
FRF_AB_XM_TXCRC, 1,
FRF_AB_XM_FCNTL, tx_fc,
FRF_AB_XM_IPG, 0x3);
- efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
+ ef4_writeo(efx, &reg, FR_AB_XM_TX_CFG);
/* Configure RX */
- EFX_POPULATE_OWORD_5(reg,
+ EF4_POPULATE_OWORD_5(reg,
FRF_AB_XM_RXEN, 1,
FRF_AB_XM_AUTO_DEPAD, 0,
FRF_AB_XM_ACPT_ALL_MCAST, 1,
FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
FRF_AB_XM_PASS_CRC_ERR, 1);
- efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
+ ef4_writeo(efx, &reg, FR_AB_XM_RX_CFG);
/* Set frame length */
- max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
- efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
- EFX_POPULATE_OWORD_2(reg,
+ max_frame_len = EF4_MAX_FRAME_LEN(efx->net_dev->mtu);
+ EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+ ef4_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+ EF4_POPULATE_OWORD_2(reg,
FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
FRF_AB_XM_TX_JUMBO_MODE, 1);
- efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+ ef4_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
- EFX_POPULATE_OWORD_2(reg,
+ EF4_POPULATE_OWORD_2(reg,
FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
FRF_AB_XM_DIS_FCNTL, !rx_fc);
- efx_writeo(efx, &reg, FR_AB_XM_FC);
+ ef4_writeo(efx, &reg, FR_AB_XM_FC);
/* Set MAC address */
memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+ ef4_writeo(efx, &reg, FR_AB_XM_ADR_LO);
memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
+ ef4_writeo(efx, &reg, FR_AB_XM_ADR_HI);
}
-static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+static void falcon_reconfigure_xgxs_core(struct ef4_nic *efx)
{
- efx_oword_t reg;
+ ef4_oword_t reg;
bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
@@ -1161,12 +1161,12 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
/* XGXS block is flaky and will need to be reset if moving
* into our out of XGMII, XGXS or XAUI loopbacks. */
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
- old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
+ ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+ old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
+ ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ old_xaui_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
/* The PHY driver may have turned XAUI off */
if ((xgxs_loopback != old_xgxs_loopback) ||
@@ -1174,30 +1174,30 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
(xgmii_loopback != old_xgmii_loopback))
falcon_reset_xaui(efx);
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
+ ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
(xgxs_loopback || xaui_loopback) ?
FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+ ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
+ ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+ ef4_writeo(efx, &reg, FR_AB_XX_SD_CTL);
}
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
+static bool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries)
{
bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
- efx_phy_mode_disabled(efx->phy_mode))
+ ef4_phy_mode_disabled(efx->phy_mode))
/* XAUI link is expected to be down */
return mac_up;
@@ -1217,16 +1217,16 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
return mac_up;
}
-static bool falcon_xmac_check_fault(struct efx_nic *efx)
+static bool falcon_xmac_check_fault(struct ef4_nic *efx)
{
return !falcon_xmac_link_ok_retry(efx, 5);
}
-static int falcon_reconfigure_xmac(struct efx_nic *efx)
+static int falcon_reconfigure_xmac(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_farch_filter_sync_rx_mode(efx);
+ ef4_farch_filter_sync_rx_mode(efx);
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
@@ -1239,7 +1239,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
return 0;
}
-static void falcon_poll_xmac(struct efx_nic *efx)
+static void falcon_poll_xmac(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -1258,32 +1258,32 @@ static void falcon_poll_xmac(struct efx_nic *efx)
**************************************************************************
*/
-static void falcon_push_multicast_hash(struct efx_nic *efx)
+static void falcon_push_multicast_hash(struct ef4_nic *efx)
{
- union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
- efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
- efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
+ ef4_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
+ ef4_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
}
-static void falcon_reset_macs(struct efx_nic *efx)
+static void falcon_reset_macs(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg, mac_ctrl;
+ ef4_oword_t reg, mac_ctrl;
int count;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
/* It's not safe to use GLB_CTL_REG to reset the
* macs, so instead use the internal MAC resets
*/
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
- efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+ EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
+ ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
for (count = 0; count < 10000; count++) {
- efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
- if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
+ ef4_reado(efx, &reg, FR_AB_XM_GLB_CFG);
+ if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
0)
return;
udelay(10);
@@ -1296,22 +1296,22 @@ static void falcon_reset_macs(struct efx_nic *efx)
/* Mac stats will fail whist the TX fifo is draining */
WARN_ON(nic_data->stats_disable_count == 0);
- efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
- EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
- efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+ ef4_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+ EF4_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
+ ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
- efx_reado(efx, &reg, FR_AB_GLB_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
- efx_writeo(efx, &reg, FR_AB_GLB_CTL);
+ ef4_reado(efx, &reg, FR_AB_GLB_CTL);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
+ EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
+ ef4_writeo(efx, &reg, FR_AB_GLB_CTL);
count = 0;
while (1) {
- efx_reado(efx, &reg, FR_AB_GLB_CTL);
- if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
- !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
- !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
+ ef4_reado(efx, &reg, FR_AB_GLB_CTL);
+ if (!EF4_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
+ !EF4_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
+ !EF4_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
netif_dbg(efx, hw, efx->net_dev,
"Completed MAC reset after %d loops\n",
count);
@@ -1327,47 +1327,47 @@ static void falcon_reset_macs(struct efx_nic *efx)
/* Ensure the correct MAC is selected before statistics
* are re-enabled by the caller */
- efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+ ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
falcon_setup_xaui(efx);
}
-static void falcon_drain_tx_fifo(struct efx_nic *efx)
+static void falcon_drain_tx_fifo(struct ef4_nic *efx)
{
- efx_oword_t reg;
+ ef4_oword_t reg;
- if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
+ if ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0) ||
(efx->loopback_mode != LOOPBACK_NONE))
return;
- efx_reado(efx, &reg, FR_AB_MAC_CTRL);
+ ef4_reado(efx, &reg, FR_AB_MAC_CTRL);
/* There is no point in draining more than once */
- if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
+ if (EF4_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
return;
falcon_reset_macs(efx);
}
-static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
+static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx)
{
- efx_oword_t reg;
+ ef4_oword_t reg;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
return;
/* Isolate the MAC -> RX */
- efx_reado(efx, &reg, FR_AZ_RX_CFG);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
- efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+ ef4_reado(efx, &reg, FR_AZ_RX_CFG);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
+ ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
/* Isolate TX -> MAC */
falcon_drain_tx_fifo(efx);
}
-static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
{
- struct efx_link_state *link_state = &efx->link_state;
- efx_oword_t reg;
+ struct ef4_link_state *link_state = &efx->link_state;
+ ef4_oword_t reg;
int link_speed, isolate;
isolate = !!ACCESS_ONCE(efx->reset_pending);
@@ -1383,7 +1383,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
* while the link is down. */
- EFX_POPULATE_OWORD_5(reg,
+ EF4_POPULATE_OWORD_5(reg,
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
FRF_AB_MAC_BCAD_ACPT, 1,
FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
@@ -1391,30 +1391,30 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
FRF_AB_MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get
* discarded. */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
+ EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
!link_state->up || isolate);
}
- efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
+ ef4_writeo(efx, &reg, FR_AB_MAC_CTRL);
/* Restore the multicast hash registers. */
falcon_push_multicast_hash(efx);
- efx_reado(efx, &reg, FR_AZ_RX_CFG);
+ ef4_reado(efx, &reg, FR_AZ_RX_CFG);
/* Enable XOFF signal from RX FIFO (we enabled it during NIC
* initialisation but it may read back as 0) */
- EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
+ EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
/* Unisolate the MAC -> RX */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
- efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
+ ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
}
-static void falcon_stats_request(struct efx_nic *efx)
+static void falcon_stats_request(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
+ ef4_oword_t reg;
WARN_ON(nic_data->stats_pending);
WARN_ON(nic_data->stats_disable_count);
@@ -1424,16 +1424,16 @@ static void falcon_stats_request(struct efx_nic *efx)
wmb(); /* ensure done flag is clear */
/* Initiate DMA transfer of stats */
- EFX_POPULATE_OWORD_2(reg,
+ EF4_POPULATE_OWORD_2(reg,
FRF_AB_MAC_STAT_DMA_CMD, 1,
FRF_AB_MAC_STAT_DMA_ADR,
efx->stats_buffer.dma_addr);
- efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
+ ef4_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
}
-static void falcon_stats_complete(struct efx_nic *efx)
+static void falcon_stats_complete(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -1443,7 +1443,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
nic_data->stats_pending = false;
if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
rmb(); /* read the done flag before the stats */
- efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, nic_data->stats,
efx->stats_buffer.addr, true);
} else {
@@ -1454,7 +1454,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
static void falcon_stats_timer_func(unsigned long context)
{
- struct efx_nic *efx = (struct efx_nic *)context;
+ struct ef4_nic *efx = (struct ef4_nic *)context;
struct falcon_nic_data *nic_data = efx->nic_data;
spin_lock(&efx->stats_lock);
@@ -1466,9 +1466,9 @@ static void falcon_stats_timer_func(unsigned long context)
spin_unlock(&efx->stats_lock);
}
-static bool falcon_loopback_link_poll(struct efx_nic *efx)
+static bool falcon_loopback_link_poll(struct ef4_nic *efx)
{
- struct efx_link_state old_state = efx->link_state;
+ struct ef4_link_state old_state = efx->link_state;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
WARN_ON(!LOOPBACK_INTERNAL(efx));
@@ -1478,14 +1478,14 @@ static bool falcon_loopback_link_poll(struct efx_nic *efx)
efx->link_state.up = true;
efx->link_state.speed = 10000;
- return !efx_link_state_equal(&efx->link_state, &old_state);
+ return !ef4_link_state_equal(&efx->link_state, &old_state);
}
-static int falcon_reconfigure_port(struct efx_nic *efx)
+static int falcon_reconfigure_port(struct ef4_nic *efx)
{
int rc;
- WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
+ WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0);
/* Poll the PHY link state *before* reconfiguring it. This means we
* will pick up the correct speed (in loopback) to select the correct
@@ -1508,7 +1508,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
falcon_start_nic_stats(efx);
/* Synchronise efx->link_state with the kernel */
- efx_link_status_changed(efx);
+ ef4_link_status_changed(efx);
return 0;
}
@@ -1520,13 +1520,13 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
* flow control on this end.
*/
-static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
+static void falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx)
{
/* Schedule a reset to recover */
- efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+ ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE);
}
-static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
+static void falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx)
{
/* Recover by resetting the EM block */
falcon_stop_nic_stats(efx);
@@ -1543,21 +1543,21 @@ static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
*/
/* Wait for GMII access to complete */
-static int falcon_gmii_wait(struct efx_nic *efx)
+static int falcon_gmii_wait(struct ef4_nic *efx)
{
- efx_oword_t md_stat;
+ ef4_oword_t md_stat;
int count;
/* wait up to 50ms - taken max from datasheet */
for (count = 0; count < 5000; count++) {
- efx_reado(efx, &md_stat, FR_AB_MD_STAT);
- if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
- if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
- EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
+ ef4_reado(efx, &md_stat, FR_AB_MD_STAT);
+ if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
+ if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
+ EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
netif_err(efx, hw, efx->net_dev,
"error from GMII access "
- EFX_OWORD_FMT"\n",
- EFX_OWORD_VAL(md_stat));
+ EF4_OWORD_FMT"\n",
+ EF4_OWORD_VAL(md_stat));
return -EIO;
}
return 0;
@@ -1572,9 +1572,9 @@ static int falcon_gmii_wait(struct efx_nic *efx)
static int falcon_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct ef4_nic *efx = netdev_priv(net_dev);
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
+ ef4_oword_t reg;
int rc;
netif_vdbg(efx, hw, efx->net_dev,
@@ -1589,30 +1589,30 @@ static int falcon_mdio_write(struct net_device *net_dev,
goto out;
/* Write the address/ID register */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
- efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
+ EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+ ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
- EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+ EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
FRF_AB_MD_DEV_ADR, devad);
- efx_writeo(efx, &reg, FR_AB_MD_ID);
+ ef4_writeo(efx, &reg, FR_AB_MD_ID);
/* Write data */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
- efx_writeo(efx, &reg, FR_AB_MD_TXD);
+ EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
+ ef4_writeo(efx, &reg, FR_AB_MD_TXD);
- EFX_POPULATE_OWORD_2(reg,
+ EF4_POPULATE_OWORD_2(reg,
FRF_AB_MD_WRC, 1,
FRF_AB_MD_GC, 0);
- efx_writeo(efx, &reg, FR_AB_MD_CS);
+ ef4_writeo(efx, &reg, FR_AB_MD_CS);
/* Wait for data to be written */
rc = falcon_gmii_wait(efx);
if (rc) {
/* Abort the write operation */
- EFX_POPULATE_OWORD_2(reg,
+ EF4_POPULATE_OWORD_2(reg,
FRF_AB_MD_WRC, 0,
FRF_AB_MD_GC, 1);
- efx_writeo(efx, &reg, FR_AB_MD_CS);
+ ef4_writeo(efx, &reg, FR_AB_MD_CS);
udelay(10);
}
@@ -1625,9 +1625,9 @@ out:
static int falcon_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct ef4_nic *efx = netdev_priv(net_dev);
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
+ ef4_oword_t reg;
int rc;
mutex_lock(&nic_data->mdio_lock);
@@ -1637,31 +1637,31 @@ static int falcon_mdio_read(struct net_device *net_dev,
if (rc)
goto out;
- EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
- efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
+ EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+ ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
- EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+ EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
FRF_AB_MD_DEV_ADR, devad);
- efx_writeo(efx, &reg, FR_AB_MD_ID);
+ ef4_writeo(efx, &reg, FR_AB_MD_ID);
/* Request data to be read */
- EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
- efx_writeo(efx, &reg, FR_AB_MD_CS);
+ EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
+ ef4_writeo(efx, &reg, FR_AB_MD_CS);
/* Wait for data to become available */
rc = falcon_gmii_wait(efx);
if (rc == 0) {
- efx_reado(efx, &reg, FR_AB_MD_RXD);
- rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
+ ef4_reado(efx, &reg, FR_AB_MD_RXD);
+ rc = EF4_OWORD_FIELD(reg, FRF_AB_MD_RXD);
netif_vdbg(efx, hw, efx->net_dev,
"read from MDIO %d register %d.%d, got %04x\n",
prtad, devad, addr, rc);
} else {
/* Abort the read operation */
- EFX_POPULATE_OWORD_2(reg,
+ EF4_POPULATE_OWORD_2(reg,
FRF_AB_MD_RIC, 0,
FRF_AB_MD_GC, 1);
- efx_writeo(efx, &reg, FR_AB_MD_CS);
+ ef4_writeo(efx, &reg, FR_AB_MD_CS);
netif_dbg(efx, hw, efx->net_dev,
"read from MDIO %d register %d.%d, got error %d\n",
@@ -1674,7 +1674,7 @@ out:
}
/* This call is responsible for hooking in the MAC and PHY operations */
-static int falcon_probe_port(struct efx_nic *efx)
+static int falcon_probe_port(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
@@ -1709,15 +1709,15 @@ static int falcon_probe_port(struct efx_nic *efx)
efx->link_state.fd = true;
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
+ efx->wanted_fc = EF4_FC_RX | EF4_FC_TX;
else
- efx->wanted_fc = EFX_FC_RX;
+ efx->wanted_fc = EF4_FC_RX;
if (efx->mdio.mmds & MDIO_DEVS_AN)
- efx->wanted_fc |= EFX_FC_AUTO;
+ efx->wanted_fc |= EF4_FC_AUTO;
/* Allocate buffer for stats */
- rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ rc = ef4_nic_alloc_buffer(efx, &efx->stats_buffer,
FALCON_MAC_STATS_SIZE, GFP_KERNEL);
if (rc)
return rc;
@@ -1730,40 +1730,40 @@ static int falcon_probe_port(struct efx_nic *efx)
return 0;
}
-static void falcon_remove_port(struct efx_nic *efx)
+static void falcon_remove_port(struct ef4_nic *efx)
{
efx->phy_op->remove(efx);
- efx_nic_free_buffer(efx, &efx->stats_buffer);
+ ef4_nic_free_buffer(efx, &efx->stats_buffer);
}
/* Global events are basically PHY events */
static bool
-falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
+falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event)
{
- struct efx_nic *efx = channel->efx;
+ struct ef4_nic *efx = channel->efx;
struct falcon_nic_data *nic_data = efx->nic_data;
- if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
- EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
- EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
+ if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+ EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+ EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
/* Ignored */
return true;
- if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
- EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
+ if ((ef4_nic_rev(efx) == EF4_REV_FALCON_B0) &&
+ EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
nic_data->xmac_poll_required = true;
return true;
}
- if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
- EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
- EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
+ if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ?
+ EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+ EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
netif_err(efx, rx_err, efx->net_dev,
"channel %d seen global RX_RESET event. Resetting.\n",
channel->channel);
atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+ ef4_schedule_reset(efx, EF4_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
return true;
}
@@ -1778,7 +1778,7 @@ falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
**************************************************************************/
static int
-falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
+falcon_read_nvram(struct ef4_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
@@ -1849,52 +1849,52 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
return rc;
}
-static int falcon_test_nvram(struct efx_nic *efx)
+static int falcon_test_nvram(struct ef4_nic *efx)
{
return falcon_read_nvram(efx, NULL);
}
-static const struct efx_farch_register_test falcon_b0_register_tests[] = {
+static const struct ef4_farch_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+ EF4_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
- EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
{ FR_AZ_TX_CFG,
- EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_TX_RESERVED,
- EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+ EF4_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
{ FR_AB_MAC_CTRL,
- EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_SRM_TX_DC_CFG,
- EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_DC_CFG,
- EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_DC_PF_WM,
- EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_BZ_DP_CTRL,
- EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_GM_CFG2,
- EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_GMF_CFG0,
- EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_XM_GLB_CFG,
- EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_XM_TX_CFG,
- EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_XM_RX_CFG,
- EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_XM_RX_PARAM,
- EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_XM_FC,
- EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_XM_ADR_LO,
- EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AB_XX_SD_CTL,
- EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
+ EF4_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};
static int
-falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+falcon_b0_test_chip(struct ef4_nic *efx, struct ef4_self_tests *tests)
{
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
int rc, rc2;
@@ -1908,18 +1908,18 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
else
efx->loopback_mode = __ffs(efx->loopback_modes);
}
- __efx_reconfigure_port(efx);
+ __ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
- efx_reset_down(efx, reset_method);
+ ef4_reset_down(efx, reset_method);
tests->registers =
- efx_farch_test_registers(efx, falcon_b0_register_tests,
+ ef4_farch_test_registers(efx, falcon_b0_register_tests,
ARRAY_SIZE(falcon_b0_register_tests))
? -1 : 1;
rc = falcon_reset_hw(efx, reset_method);
- rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ rc2 = ef4_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
}
@@ -1974,10 +1974,10 @@ static int falcon_map_reset_flags(u32 *flags)
/* Resets NIC to known state. This routine must be called in process
* context and is allowed to sleep. */
-static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+static int __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t glb_ctl_reg_ker;
+ ef4_oword_t glb_ctl_reg_ker;
int rc;
netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
@@ -1992,7 +1992,7 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
"function prior to hardware reset\n");
goto fail1;
}
- if (efx_nic_is_dual_func(efx)) {
+ if (ef4_nic_is_dual_func(efx)) {
rc = pci_save_state(nic_data->pci_dev2);
if (rc) {
netif_err(efx, drv, efx->net_dev,
@@ -2003,12 +2003,12 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
}
}
- EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
+ EF4_POPULATE_OWORD_2(glb_ctl_reg_ker,
FRF_AB_EXT_PHY_RST_DUR,
FFE_AB_EXT_PHY_RST_DUR_10240US,
FRF_AB_SWRST, 1);
} else {
- EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
+ EF4_POPULATE_OWORD_7(glb_ctl_reg_ker,
/* exclude PHY from "invisible" reset */
FRF_AB_EXT_PHY_RST_CTL,
method == RESET_TYPE_INVISIBLE,
@@ -2021,14 +2021,14 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
FFE_AB_EXT_PHY_RST_DUR_10240US,
FRF_AB_SWRST, 1);
}
- efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+ ef4_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
schedule_timeout_uninterruptible(HZ / 20);
/* Restore PCI configuration if needed */
if (method == RESET_TYPE_WORLD) {
- if (efx_nic_is_dual_func(efx))
+ if (ef4_nic_is_dual_func(efx))
pci_restore_state(nic_data->pci_dev2);
pci_restore_state(efx->pci_dev);
netif_dbg(efx, drv, efx->net_dev,
@@ -2036,8 +2036,8 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
}
/* Assert that reset complete */
- efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
- if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
+ ef4_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+ if (EF4_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
rc = -ETIMEDOUT;
netif_err(efx, hw, efx->net_dev,
"timed out waiting for hardware reset\n");
@@ -2055,7 +2055,7 @@ fail3:
return rc;
}
-static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
{
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
@@ -2067,7 +2067,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
return rc;
}
-static void falcon_monitor(struct efx_nic *efx)
+static void falcon_monitor(struct ef4_nic *efx)
{
bool link_changed;
int rc;
@@ -2080,7 +2080,7 @@ static void falcon_monitor(struct efx_nic *efx)
"Board sensor %s; shutting down PHY\n",
(rc == -ERANGE) ? "reported fault" : "failed");
efx->phy_mode |= PHY_MODE_LOW_POWER;
- rc = __efx_reconfigure_port(efx);
+ rc = __ef4_reconfigure_port(efx);
WARN_ON(rc);
}
@@ -2099,7 +2099,7 @@ static void falcon_monitor(struct efx_nic *efx)
falcon_start_nic_stats(efx);
- efx_link_status_changed(efx);
+ ef4_link_status_changed(efx);
}
falcon_poll_xmac(efx);
@@ -2108,22 +2108,22 @@ static void falcon_monitor(struct efx_nic *efx)
/* Zeroes out the SRAM contents. This routine must be called in
* process context and is allowed to sleep.
*/
-static int falcon_reset_sram(struct efx_nic *efx)
+static int falcon_reset_sram(struct ef4_nic *efx)
{
- efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
+ ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
int count;
/* Set the SRAM wake/sleep GPIO appropriately. */
- efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
- EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
- EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
- efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+ ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+ EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
+ EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
+ ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
/* Initiate SRAM reset */
- EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
+ EF4_POPULATE_OWORD_2(srm_cfg_reg_ker,
FRF_AZ_SRM_INIT_EN, 1,
FRF_AZ_SRM_NB_SZ, 0);
- efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+ ef4_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
/* Wait for SRAM reset to complete */
count = 0;
@@ -2135,8 +2135,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
schedule_timeout_uninterruptible(HZ / 50);
/* Check for reset complete */
- efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
- if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
+ ef4_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+ if (!EF4_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
netif_dbg(efx, hw, efx->net_dev,
"SRAM reset complete\n");
@@ -2148,7 +2148,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static void falcon_spi_device_init(struct efx_nic *efx,
+static void falcon_spi_device_init(struct ef4_nic *efx,
struct falcon_spi_device *spi_device,
unsigned int device_id, u32 device_type)
{
@@ -2174,7 +2174,7 @@ static void falcon_spi_device_init(struct efx_nic *efx,
}
/* Extract non-volatile configuration */
-static int falcon_probe_nvconfig(struct efx_nic *efx)
+static int falcon_probe_nvconfig(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
@@ -2215,7 +2215,7 @@ out:
return rc;
}
-static int falcon_dimension_resources(struct efx_nic *efx)
+static int falcon_dimension_resources(struct ef4_nic *efx)
{
efx->rx_dc_base = 0x20000;
efx->tx_dc_base = 0x26000;
@@ -2223,18 +2223,18 @@ static int falcon_dimension_resources(struct efx_nic *efx)
}
/* Probe all SPI devices on the NIC */
-static void falcon_probe_spi_devices(struct efx_nic *efx)
+static void falcon_probe_spi_devices(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
+ ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
int boot_dev;
- efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
- efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
- efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
+ ef4_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
+ ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+ ef4_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
- if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
- boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
+ if (EF4_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
+ boot_dev = (EF4_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
@@ -2246,12 +2246,12 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev,
"Booted from internal ASIC settings;"
" setting SPI config\n");
- EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
+ EF4_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
/* 125 MHz / 7 ~= 20 MHz */
FRF_AB_EE_SF_CLOCK_DIV, 7,
/* 125 MHz / 63 ~= 2 MHz */
FRF_AB_EE_EE_CLOCK_DIV, 63);
- efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
+ ef4_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
}
mutex_init(&nic_data->spi_lock);
@@ -2266,12 +2266,12 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
large_eeprom_type);
}
-static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
+static unsigned int falcon_a1_mem_map_size(struct ef4_nic *efx)
{
return 0x20000;
}
-static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
+static unsigned int falcon_b0_mem_map_size(struct ef4_nic *efx)
{
/* Map everything up to and including the RSS indirection table.
* The PCI core takes care of mapping the MSI-X tables.
@@ -2280,7 +2280,7 @@ static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
}
-static int falcon_probe_nic(struct efx_nic *efx)
+static int falcon_probe_nic(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data;
struct falcon_board *board;
@@ -2296,14 +2296,14 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV;
- if (efx_farch_fpga_ver(efx) != 0) {
+ if (ef4_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Falcon FPGA not supported\n");
goto fail1;
}
- if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
- efx_oword_t nic_stat;
+ if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
+ ef4_oword_t nic_stat;
struct pci_dev *dev;
u8 pci_rev = efx->pci_dev->revision;
@@ -2312,13 +2312,13 @@ static int falcon_probe_nic(struct efx_nic *efx)
"Falcon rev A0 not supported\n");
goto fail1;
}
- efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
- if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
+ ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+ if (EF4_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
netif_err(efx, probe, efx->net_dev,
"Falcon rev A1 1G not supported\n");
goto fail1;
}
- if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
+ if (EF4_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
netif_err(efx, probe, efx->net_dev,
"Falcon rev A1 PCI-X not supported\n");
goto fail1;
@@ -2350,7 +2350,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ rc = ef4_nic_alloc_buffer(efx, &efx->irq_status, sizeof(ef4_oword_t),
GFP_KERNEL);
if (rc)
goto fail4;
@@ -2372,8 +2372,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail5;
}
- efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
- EFX_MAX_CHANNELS);
+ efx->max_channels = (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 4 :
+ EF4_MAX_CHANNELS);
efx->max_tx_channels = efx->max_channels;
efx->timer_quantum_ns = 4968; /* 621 cycles */
efx->timer_max_ns = efx->type->timer_period_max *
@@ -2409,7 +2409,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
i2c_del_adapter(&board->i2c_adap);
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
fail5:
- efx_nic_free_buffer(efx, &efx->irq_status);
+ ef4_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
if (nic_data->pci_dev2) {
@@ -2422,66 +2422,66 @@ static int falcon_probe_nic(struct efx_nic *efx)
return rc;
}
-static void falcon_init_rx_cfg(struct efx_nic *efx)
+static void falcon_init_rx_cfg(struct ef4_nic *efx)
{
/* RX control FIFO thresholds (32 entries) */
const unsigned ctrl_xon_thr = 20;
const unsigned ctrl_xoff_thr = 25;
- efx_oword_t reg;
+ ef4_oword_t reg;
- efx_reado(efx, &reg, FR_AZ_RX_CFG);
- if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
+ ef4_reado(efx, &reg, FR_AZ_RX_CFG);
+ if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
/* Data FIFO size is 5.5K. The RX DMA engine only
* supports scattering for user-mode queues, but will
* split DMA writes at intervals of RX_USR_BUF_SIZE
* (32-byte units) even for kernel-mode queues. We
* set it to be so large that that never happens.
*/
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
+ EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
+ EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
(3 * 4096) >> 5);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
+ EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
+ EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
+ EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
+ EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
} else {
/* Data FIFO size is 80K; register fields moved */
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
- EFX_RX_USR_BUF_SIZE >> 5);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
+ EF4_RX_USR_BUF_SIZE >> 5);
/* Send XON and XOFF at ~3 * max MTU away from empty/full */
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
/* Enable hash insertion. This is broken for the
* 'Falcon' hash so also select Toeplitz TCP/IPv4 and
* IPv4 hashes. */
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
}
/* Always enable XOFF signal from RX FIFO. We enable
* or disable transmission of pause frames at the MAC. */
- EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
- efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+ EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
+ ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
}
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
*/
-static int falcon_init_nic(struct efx_nic *efx)
+static int falcon_init_nic(struct ef4_nic *efx)
{
- efx_oword_t temp;
+ ef4_oword_t temp;
int rc;
/* Use on-chip SRAM */
- efx_reado(efx, &temp, FR_AB_NIC_STAT);
- EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
- efx_writeo(efx, &temp, FR_AB_NIC_STAT);
+ ef4_reado(efx, &temp, FR_AB_NIC_STAT);
+ EF4_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
+ ef4_writeo(efx, &temp, FR_AB_NIC_STAT);
rc = falcon_reset_sram(efx);
if (rc)
@@ -2490,55 +2490,55 @@ static int falcon_init_nic(struct efx_nic *efx)
/* Clear the parity enables on the TX data fifos as
* they produce false parity errors because of timing issues
*/
- if (EFX_WORKAROUND_5129(efx)) {
- efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
- EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
- efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
+ if (EF4_WORKAROUND_5129(efx)) {
+ ef4_reado(efx, &temp, FR_AZ_CSR_SPARE);
+ EF4_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
+ ef4_writeo(efx, &temp, FR_AZ_CSR_SPARE);
}
- if (EFX_WORKAROUND_7244(efx)) {
- efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
- efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
+ if (EF4_WORKAROUND_7244(efx)) {
+ ef4_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
+ EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
+ EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
+ EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
+ EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
+ ef4_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
}
/* XXX This is documented only for Falcon A0/A1 */
/* Setup RX. Wait for descriptor is broken and must
* be disabled. RXDP recovery shouldn't be needed, but is.
*/
- efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
- EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
- if (EFX_WORKAROUND_5583(efx))
- EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
- efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
+ ef4_reado(efx, &temp, FR_AA_RX_SELF_RST);
+ EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
+ EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
+ if (EF4_WORKAROUND_5583(efx))
+ EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
+ ef4_writeo(efx, &temp, FR_AA_RX_SELF_RST);
/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
* descriptors (which is bad).
*/
- efx_reado(efx, &temp, FR_AZ_TX_CFG);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
- efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+ ef4_reado(efx, &temp, FR_AZ_TX_CFG);
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ ef4_writeo(efx, &temp, FR_AZ_TX_CFG);
falcon_init_rx_cfg(efx);
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Set destination of both TX and RX Flush events */
- EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
- efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
+ EF4_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+ ef4_writeo(efx, &temp, FR_BZ_DP_CTRL);
}
- efx_farch_init_common(efx);
+ ef4_farch_init_common(efx);
return 0;
}
-static void falcon_remove_nic(struct efx_nic *efx)
+static void falcon_remove_nic(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_board *board = falcon_board(efx);
@@ -2549,7 +2549,7 @@ static void falcon_remove_nic(struct efx_nic *efx)
i2c_del_adapter(&board->i2c_adap);
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
- efx_nic_free_buffer(efx, &efx->irq_status);
+ ef4_nic_free_buffer(efx, &efx->irq_status);
__falcon_reset_hw(efx, RESET_TYPE_ALL);
@@ -2564,40 +2564,40 @@ static void falcon_remove_nic(struct efx_nic *efx)
efx->nic_data = NULL;
}
-static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
+static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
{
- return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, names);
}
-static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+static size_t falcon_update_nic_stats(struct ef4_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats)
{
struct falcon_nic_data *nic_data = efx->nic_data;
u64 *stats = nic_data->stats;
- efx_oword_t cnt;
+ ef4_oword_t cnt;
if (!nic_data->stats_disable_count) {
- efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+ ef4_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
- EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+ EF4_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
if (nic_data->stats_pending &&
FALCON_XMAC_STATS_DMA_FLAG(efx)) {
nic_data->stats_pending = false;
rmb(); /* read the done flag before the stats */
- efx_nic_update_stats(
+ ef4_nic_update_stats(
falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask,
stats, efx->stats_buffer.addr, true);
}
/* Update derived statistic */
- efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
+ ef4_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
stats[FALCON_STAT_rx_bytes] -
stats[FALCON_STAT_rx_good_bytes] -
stats[FALCON_STAT_rx_control] * 64);
- efx_update_sw_stats(efx, stats);
+ ef4_update_sw_stats(efx, stats);
}
if (full_stats)
@@ -2628,7 +2628,7 @@ static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
return FALCON_STAT_COUNT;
}
-void falcon_start_nic_stats(struct efx_nic *efx)
+void falcon_start_nic_stats(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -2641,12 +2641,12 @@ void falcon_start_nic_stats(struct efx_nic *efx)
/* We don't acutally pull stats on falcon. Wait 10ms so that
* they arrive when we call this just after start_stats
*/
-static void falcon_pull_nic_stats(struct efx_nic *efx)
+static void falcon_pull_nic_stats(struct ef4_nic *efx)
{
msleep(10);
}
-void falcon_stop_nic_stats(struct efx_nic *efx)
+void falcon_stop_nic_stats(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
int i;
@@ -2672,7 +2672,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
}
-static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+static void falcon_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
falcon_board(efx)->type->set_id_led(efx, mode);
}
@@ -2684,14 +2684,14 @@ static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
**************************************************************************
*/
-static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+static void falcon_get_wol(struct ef4_nic *efx, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-static int falcon_set_wol(struct efx_nic *efx, u32 type)
+static int falcon_set_wol(struct ef4_nic *efx, u32 type)
{
if (type != 0)
return -EINVAL;
@@ -2705,9 +2705,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
**************************************************************************
*/
-const struct efx_nic_type falcon_a1_nic_type = {
- .is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+const struct ef4_nic_type falcon_a1_nic_type = {
+ .mem_bar = EF4_MEM_BAR,
.mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2721,11 +2720,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
- .fini_dmaq = efx_farch_fini_dmaq,
+ .fini_dmaq = ef4_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
- .finish_flush = efx_port_dummy_op_void,
- .prepare_flr = efx_port_dummy_op_void,
- .finish_flr = efx_farch_finish_flr,
+ .finish_flush = ef4_port_dummy_op_void,
+ .prepare_flr = ef4_port_dummy_op_void,
+ .finish_flr = ef4_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
@@ -2739,47 +2738,48 @@ const struct efx_nic_type falcon_a1_nic_type = {
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
- .resume_wol = efx_port_dummy_op_void,
+ .resume_wol = ef4_port_dummy_op_void,
.test_nvram = falcon_test_nvram,
- .irq_enable_master = efx_farch_irq_enable_master,
- .irq_test_generate = efx_farch_irq_test_generate,
- .irq_disable_non_ev = efx_farch_irq_disable_master,
- .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_enable_master = ef4_farch_irq_enable_master,
+ .irq_test_generate = ef4_farch_irq_test_generate,
+ .irq_disable_non_ev = ef4_farch_irq_disable_master,
+ .irq_handle_msi = ef4_farch_msi_interrupt,
.irq_handle_legacy = falcon_legacy_interrupt_a1,
- .tx_probe = efx_farch_tx_probe,
- .tx_init = efx_farch_tx_init,
- .tx_remove = efx_farch_tx_remove,
- .tx_write = efx_farch_tx_write,
+ .tx_probe = ef4_farch_tx_probe,
+ .tx_init = ef4_farch_tx_init,
+ .tx_remove = ef4_farch_tx_remove,
+ .tx_write = ef4_farch_tx_write,
+ .tx_limit_len = ef4_farch_tx_limit_len,
.rx_push_rss_config = dummy_rx_push_rss_config,
- .rx_probe = efx_farch_rx_probe,
- .rx_init = efx_farch_rx_init,
- .rx_remove = efx_farch_rx_remove,
- .rx_write = efx_farch_rx_write,
- .rx_defer_refill = efx_farch_rx_defer_refill,
- .ev_probe = efx_farch_ev_probe,
- .ev_init = efx_farch_ev_init,
- .ev_fini = efx_farch_ev_fini,
- .ev_remove = efx_farch_ev_remove,
- .ev_process = efx_farch_ev_process,
- .ev_read_ack = efx_farch_ev_read_ack,
- .ev_test_generate = efx_farch_ev_test_generate,
+ .rx_probe = ef4_farch_rx_probe,
+ .rx_init = ef4_farch_rx_init,
+ .rx_remove = ef4_farch_rx_remove,
+ .rx_write = ef4_farch_rx_write,
+ .rx_defer_refill = ef4_farch_rx_defer_refill,
+ .ev_probe = ef4_farch_ev_probe,
+ .ev_init = ef4_farch_ev_init,
+ .ev_fini = ef4_farch_ev_fini,
+ .ev_remove = ef4_farch_ev_remove,
+ .ev_process = ef4_farch_ev_process,
+ .ev_read_ack = ef4_farch_ev_read_ack,
+ .ev_test_generate = ef4_farch_ev_test_generate,
/* We don't expose the filter table on Falcon A1 as it is not
* mapped into function 0, but these implementations still
* work with a degenerate case of all tables set to size 0.
*/
- .filter_table_probe = efx_farch_filter_table_probe,
- .filter_table_restore = efx_farch_filter_table_restore,
- .filter_table_remove = efx_farch_filter_table_remove,
- .filter_insert = efx_farch_filter_insert,
- .filter_remove_safe = efx_farch_filter_remove_safe,
- .filter_get_safe = efx_farch_filter_get_safe,
- .filter_clear_rx = efx_farch_filter_clear_rx,
- .filter_count_rx_used = efx_farch_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
-
-#ifdef CONFIG_SFC_MTD
+ .filter_table_probe = ef4_farch_filter_table_probe,
+ .filter_table_restore = ef4_farch_filter_table_restore,
+ .filter_table_remove = ef4_farch_filter_table_remove,
+ .filter_insert = ef4_farch_filter_insert,
+ .filter_remove_safe = ef4_farch_filter_remove_safe,
+ .filter_get_safe = ef4_farch_filter_get_safe,
+ .filter_clear_rx = ef4_farch_filter_clear_rx,
+ .filter_count_rx_used = ef4_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
+
+#ifdef CONFIG_SFC_FALCON_MTD
.mtd_probe = falcon_mtd_probe,
.mtd_rename = falcon_mtd_rename,
.mtd_read = falcon_mtd_read,
@@ -2788,7 +2788,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_sync = falcon_mtd_sync,
#endif
- .revision = EFX_REV_FALCON_A1,
+ .revision = EF4_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
@@ -2797,21 +2797,19 @@ const struct efx_nic_type falcon_a1_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_padding = 0x24,
.can_rx_scatter = false,
- .max_interrupt_mode = EFX_INT_MODE_MSI,
+ .max_interrupt_mode = EF4_INT_MODE_MSI,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
- .mcdi_max_ver = -1,
};
-const struct efx_nic_type falcon_b0_nic_type = {
- .is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+const struct ef4_nic_type falcon_b0_nic_type = {
+ .mem_bar = EF4_MEM_BAR,
.mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
.dimension_resources = falcon_dimension_resources,
- .fini = efx_port_dummy_op_void,
+ .fini = ef4_port_dummy_op_void,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
.map_reset_flags = falcon_map_reset_flags,
@@ -2819,11 +2817,11 @@ const struct efx_nic_type falcon_b0_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
- .fini_dmaq = efx_farch_fini_dmaq,
+ .fini_dmaq = ef4_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
- .finish_flush = efx_port_dummy_op_void,
- .prepare_flr = efx_port_dummy_op_void,
- .finish_flr = efx_farch_finish_flr,
+ .finish_flush = ef4_port_dummy_op_void,
+ .prepare_flr = ef4_port_dummy_op_void,
+ .finish_flr = ef4_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
@@ -2837,47 +2835,48 @@ const struct efx_nic_type falcon_b0_nic_type = {
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
- .resume_wol = efx_port_dummy_op_void,
+ .resume_wol = ef4_port_dummy_op_void,
.test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
- .irq_enable_master = efx_farch_irq_enable_master,
- .irq_test_generate = efx_farch_irq_test_generate,
- .irq_disable_non_ev = efx_farch_irq_disable_master,
- .irq_handle_msi = efx_farch_msi_interrupt,
- .irq_handle_legacy = efx_farch_legacy_interrupt,
- .tx_probe = efx_farch_tx_probe,
- .tx_init = efx_farch_tx_init,
- .tx_remove = efx_farch_tx_remove,
- .tx_write = efx_farch_tx_write,
+ .irq_enable_master = ef4_farch_irq_enable_master,
+ .irq_test_generate = ef4_farch_irq_test_generate,
+ .irq_disable_non_ev = ef4_farch_irq_disable_master,
+ .irq_handle_msi = ef4_farch_msi_interrupt,
+ .irq_handle_legacy = ef4_farch_legacy_interrupt,
+ .tx_probe = ef4_farch_tx_probe,
+ .tx_init = ef4_farch_tx_init,
+ .tx_remove = ef4_farch_tx_remove,
+ .tx_write = ef4_farch_tx_write,
+ .tx_limit_len = ef4_farch_tx_limit_len,
.rx_push_rss_config = falcon_b0_rx_push_rss_config,
- .rx_probe = efx_farch_rx_probe,
- .rx_init = efx_farch_rx_init,
- .rx_remove = efx_farch_rx_remove,
- .rx_write = efx_farch_rx_write,
- .rx_defer_refill = efx_farch_rx_defer_refill,
- .ev_probe = efx_farch_ev_probe,
- .ev_init = efx_farch_ev_init,
- .ev_fini = efx_farch_ev_fini,
- .ev_remove = efx_farch_ev_remove,
- .ev_process = efx_farch_ev_process,
- .ev_read_ack = efx_farch_ev_read_ack,
- .ev_test_generate = efx_farch_ev_test_generate,
- .filter_table_probe = efx_farch_filter_table_probe,
- .filter_table_restore = efx_farch_filter_table_restore,
- .filter_table_remove = efx_farch_filter_table_remove,
- .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
- .filter_insert = efx_farch_filter_insert,
- .filter_remove_safe = efx_farch_filter_remove_safe,
- .filter_get_safe = efx_farch_filter_get_safe,
- .filter_clear_rx = efx_farch_filter_clear_rx,
- .filter_count_rx_used = efx_farch_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+ .rx_probe = ef4_farch_rx_probe,
+ .rx_init = ef4_farch_rx_init,
+ .rx_remove = ef4_farch_rx_remove,
+ .rx_write = ef4_farch_rx_write,
+ .rx_defer_refill = ef4_farch_rx_defer_refill,
+ .ev_probe = ef4_farch_ev_probe,
+ .ev_init = ef4_farch_ev_init,
+ .ev_fini = ef4_farch_ev_fini,
+ .ev_remove = ef4_farch_ev_remove,
+ .ev_process = ef4_farch_ev_process,
+ .ev_read_ack = ef4_farch_ev_read_ack,
+ .ev_test_generate = ef4_farch_ev_test_generate,
+ .filter_table_probe = ef4_farch_filter_table_probe,
+ .filter_table_restore = ef4_farch_filter_table_restore,
+ .filter_table_remove = ef4_farch_filter_table_remove,
+ .filter_update_rx_scatter = ef4_farch_filter_update_rx_scatter,
+ .filter_insert = ef4_farch_filter_insert,
+ .filter_remove_safe = ef4_farch_filter_remove_safe,
+ .filter_get_safe = ef4_farch_filter_get_safe,
+ .filter_clear_rx = ef4_farch_filter_clear_rx,
+ .filter_count_rx_used = ef4_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_insert = efx_farch_filter_rfs_insert,
- .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+ .filter_rfs_insert = ef4_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = ef4_farch_filter_rfs_expire_one,
#endif
-#ifdef CONFIG_SFC_MTD
+#ifdef CONFIG_SFC_FALCON_MTD
.mtd_probe = falcon_mtd_probe,
.mtd_rename = falcon_mtd_rename,
.mtd_read = falcon_mtd_read,
@@ -2886,7 +2885,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_sync = falcon_mtd_sync,
#endif
- .revision = EFX_REV_FALCON_B0,
+ .revision = EF4_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
@@ -2897,9 +2896,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
- .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .max_interrupt_mode = EF4_INT_MODE_MSIX,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
- .mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
index f6883b2b5da3..dec83a217093 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
@@ -66,7 +66,7 @@
#if IS_ENABLED(CONFIG_SENSORS_LM87)
-static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
+static int ef4_poke_lm87(struct i2c_client *client, const u8 *reg_values)
{
while (*reg_values) {
u8 reg = *reg_values++;
@@ -87,7 +87,7 @@ static const u8 falcon_lm87_common_regs[] = {
0
};
-static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
+static int ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
struct falcon_board *board = falcon_board(efx);
@@ -101,10 +101,10 @@ static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
- rc = efx_poke_lm87(client, reg_values);
+ rc = ef4_poke_lm87(client, reg_values);
if (rc)
goto err;
- rc = efx_poke_lm87(client, falcon_lm87_common_regs);
+ rc = ef4_poke_lm87(client, falcon_lm87_common_regs);
if (rc)
goto err;
@@ -116,12 +116,12 @@ err:
return rc;
}
-static void efx_fini_lm87(struct efx_nic *efx)
+static void ef4_fini_lm87(struct ef4_nic *efx)
{
i2c_unregister_device(falcon_board(efx)->hwmon_client);
}
-static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+static int ef4_check_lm87(struct ef4_nic *efx, unsigned mask)
{
struct i2c_client *client = falcon_board(efx)->hwmon_client;
bool temp_crit, elec_fault, is_failure;
@@ -129,7 +129,7 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
s32 reg;
/* If link is up then do not monitor temperature */
- if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
+ if (EF4_WORKAROUND_7884(efx) && efx->link_state.up)
return 0;
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
@@ -179,15 +179,15 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
#else /* !CONFIG_SENSORS_LM87 */
static inline int
-efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
+ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
return 0;
}
-static inline void efx_fini_lm87(struct efx_nic *efx)
+static inline void ef4_fini_lm87(struct ef4_nic *efx)
{
}
-static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+static inline int ef4_check_lm87(struct ef4_nic *efx, unsigned mask)
{
return 0;
}
@@ -255,7 +255,7 @@ static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
#define MAX664X_REG_RSL 0x02
#define MAX664X_REG_WLHO 0x0B
-static void sfe4001_poweroff(struct efx_nic *efx)
+static void sfe4001_poweroff(struct ef4_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
@@ -269,7 +269,7 @@ static void sfe4001_poweroff(struct efx_nic *efx)
i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
}
-static int sfe4001_poweron(struct efx_nic *efx)
+static int sfe4001_poweron(struct ef4_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
@@ -360,7 +360,7 @@ fail_on:
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
@@ -368,8 +368,8 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
- enum efx_phy_mode old_mode, new_mode;
+ struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ enum ef4_phy_mode old_mode, new_mode;
int err;
rtnl_lock();
@@ -390,7 +390,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
falcon_stop_nic_stats(efx);
err = sfe4001_poweron(efx);
if (!err)
- err = efx_reconfigure_port(efx);
+ err = ef4_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
falcon_start_nic_stats(efx);
}
@@ -401,7 +401,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
-static void sfe4001_fini(struct efx_nic *efx)
+static void sfe4001_fini(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@@ -413,13 +413,13 @@ static void sfe4001_fini(struct efx_nic *efx)
i2c_unregister_device(board->hwmon_client);
}
-static int sfe4001_check_hw(struct efx_nic *efx)
+static int sfe4001_check_hw(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
s32 status;
/* If XAUI link is up then do not monitor */
- if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
+ if (EF4_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
return 0;
/* Check the powered status of the PHY. Lack of power implies that
@@ -450,7 +450,7 @@ static const struct i2c_board_info sfe4001_hwmon_info = {
* be turned on before the PHY can be used.
* Context: Process context, rtnl lock held
*/
-static int sfe4001_init(struct efx_nic *efx)
+static int sfe4001_init(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
int rc;
@@ -537,7 +537,7 @@ static const struct i2c_board_info sfe4002_hwmon_info = {
#define SFE4002_RX_LED (0) /* Green */
#define SFE4002_TX_LED (1) /* Amber */
-static void sfe4002_init_phy(struct efx_nic *efx)
+static void sfe4002_init_phy(struct ef4_nic *efx)
{
/* Set the TX and RX LEDs to reflect status and activity, and the
* fault LED off */
@@ -548,14 +548,14 @@ static void sfe4002_init_phy(struct efx_nic *efx)
falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
}
-static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+static void sfe4002_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
falcon_qt202x_set_led(
efx, SFE4002_FAULT_LED,
- (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
+ (mode == EF4_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
}
-static int sfe4002_check_hw(struct efx_nic *efx)
+static int sfe4002_check_hw(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@@ -565,12 +565,12 @@ static int sfe4002_check_hw(struct efx_nic *efx)
(board->major == 0 && board->minor == 0) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
- return efx_check_lm87(efx, alarm_mask);
+ return ef4_check_lm87(efx, alarm_mask);
}
-static int sfe4002_init(struct efx_nic *efx)
+static int sfe4002_init(struct ef4_nic *efx)
{
- return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
+ return ef4_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
}
/*****************************************************************************
@@ -599,7 +599,7 @@ static const struct i2c_board_info sfn4112f_hwmon_info = {
#define SFN4112F_ACT_LED 0
#define SFN4112F_LINK_LED 1
-static void sfn4112f_init_phy(struct efx_nic *efx)
+static void sfn4112f_init_phy(struct ef4_nic *efx)
{
falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
@@ -607,15 +607,15 @@ static void sfn4112f_init_phy(struct efx_nic *efx)
QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
}
-static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+static void sfn4112f_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
int reg;
switch (mode) {
- case EFX_LED_OFF:
+ case EF4_LED_OFF:
reg = QUAKE_LED_OFF;
break;
- case EFX_LED_ON:
+ case EF4_LED_ON:
reg = QUAKE_LED_ON;
break;
default:
@@ -626,15 +626,15 @@ static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
}
-static int sfn4112f_check_hw(struct efx_nic *efx)
+static int sfn4112f_check_hw(struct ef4_nic *efx)
{
/* Mask out unused sensors */
- return efx_check_lm87(efx, ~0x48);
+ return ef4_check_lm87(efx, ~0x48);
}
-static int sfn4112f_init(struct efx_nic *efx)
+static int sfn4112f_init(struct ef4_nic *efx)
{
- return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
+ return ef4_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
}
/*****************************************************************************
@@ -663,7 +663,7 @@ static const struct i2c_board_info sfe4003_hwmon_info = {
#define SFE4003_LED_ON 1
#define SFE4003_LED_OFF 0
-static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+static void sfe4003_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
struct falcon_board *board = falcon_board(efx);
@@ -673,10 +673,10 @@ static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
falcon_txc_set_gpio_val(
efx, SFE4003_RED_LED_GPIO,
- (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
+ (mode == EF4_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
}
-static void sfe4003_init_phy(struct efx_nic *efx)
+static void sfe4003_init_phy(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@@ -688,7 +688,7 @@ static void sfe4003_init_phy(struct efx_nic *efx)
falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
}
-static int sfe4003_check_hw(struct efx_nic *efx)
+static int sfe4003_check_hw(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@@ -698,19 +698,19 @@ static int sfe4003_check_hw(struct efx_nic *efx)
(board->major == 0 && board->minor <= 2) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
- return efx_check_lm87(efx, alarm_mask);
+ return ef4_check_lm87(efx, alarm_mask);
}
-static int sfe4003_init(struct efx_nic *efx)
+static int sfe4003_init(struct ef4_nic *efx)
{
- return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
+ return ef4_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
}
static const struct falcon_board_type board_types[] = {
{
.id = FALCON_BOARD_SFE4001,
.init = sfe4001_init,
- .init_phy = efx_port_dummy_op_void,
+ .init_phy = ef4_port_dummy_op_void,
.fini = sfe4001_fini,
.set_id_led = tenxpress_set_id_led,
.monitor = sfe4001_check_hw,
@@ -719,7 +719,7 @@ static const struct falcon_board_type board_types[] = {
.id = FALCON_BOARD_SFE4002,
.init = sfe4002_init,
.init_phy = sfe4002_init_phy,
- .fini = efx_fini_lm87,
+ .fini = ef4_fini_lm87,
.set_id_led = sfe4002_set_id_led,
.monitor = sfe4002_check_hw,
},
@@ -727,7 +727,7 @@ static const struct falcon_board_type board_types[] = {
.id = FALCON_BOARD_SFE4003,
.init = sfe4003_init,
.init_phy = sfe4003_init_phy,
- .fini = efx_fini_lm87,
+ .fini = ef4_fini_lm87,
.set_id_led = sfe4003_set_id_led,
.monitor = sfe4003_check_hw,
},
@@ -735,13 +735,13 @@ static const struct falcon_board_type board_types[] = {
.id = FALCON_BOARD_SFN4112F,
.init = sfn4112f_init,
.init_phy = sfn4112f_init_phy,
- .fini = efx_fini_lm87,
+ .fini = ef4_fini_lm87,
.set_id_led = sfn4112f_set_id_led,
.monitor = sfn4112f_check_hw,
},
};
-int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+int falcon_probe_board(struct ef4_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
new file mode 100644
index 000000000000..05916c710d8c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -0,0 +1,2892 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Falcon-architecture (SFC4000) support */
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EF4_MAX_INT_ERRORS internal errors occur within
+ * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EF4_INT_ERROR_EXPIRE 3600
+#define EF4_MAX_INT_ERRORS 5
+
+/* Depth of RX flush request fifo */
+#define EF4_RX_FLUSH_COUNT 4
+
+/* Driver generated events */
+#define _EF4_CHANNEL_MAGIC_TEST 0x000101
+#define _EF4_CHANNEL_MAGIC_FILL 0x000102
+#define _EF4_CHANNEL_MAGIC_RX_DRAIN 0x000103
+#define _EF4_CHANNEL_MAGIC_TX_DRAIN 0x000104
+
+#define _EF4_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
+#define _EF4_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
+
+#define EF4_CHANNEL_MAGIC_TEST(_channel) \
+ _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
+#define EF4_CHANNEL_MAGIC_FILL(_rx_queue) \
+ _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL, \
+ ef4_rx_queue_index(_rx_queue))
+#define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
+ _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN, \
+ ef4_rx_queue_index(_rx_queue))
+#define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
+ _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
+
+static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);
+
+/**************************************************************************
+ *
+ * Hardware access
+ *
+ **************************************************************************/
+
+static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
+ unsigned int index)
+{
+ ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
+ const ef4_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int ef4_farch_test_registers(struct ef4_nic *efx,
+ const struct ef4_farch_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0;
+ int i, j;
+ ef4_oword_t mask, imask, original, reg, buf;
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EF4_INVERT_OWORD(imask);
+
+ ef4_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EF4_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EF4_AND_OWORD(reg, original, mask);
+ EF4_SET_OWORD32(reg, j, j, 1);
+
+ ef4_writeo(efx, &reg, address);
+ ef4_reado(efx, &buf, address);
+
+ if (ef4_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EF4_OR_OWORD(reg, original, mask);
+ EF4_SET_OWORD32(reg, j, j, 0);
+
+ ef4_writeo(efx, &reg, address);
+ ef4_reado(efx, &buf, address);
+
+ if (ef4_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ ef4_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
+ " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
+ EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * ef4_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
+{
+ ef4_qword_t buf_desc;
+ unsigned int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EF4_BUG_ON_PARANOID(!buffer->buf.addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EF4_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ ef4_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
+{
+ ef4_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EF4_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int ef4_alloc_special_buffer(struct ef4_nic *efx,
+ struct ef4_special_buffer *buffer,
+ unsigned int len)
+{
+ len = ALIGN(len, EF4_BUF_SIZE);
+
+ if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
+ return -ENOMEM;
+ buffer->entries = len / EF4_BUF_SIZE;
+ BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ return 0;
+}
+
+static void
+ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
+{
+ if (!buffer->buf.addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, buffer->buf.len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ ef4_nic_free_buffer(efx, &buffer->buf);
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ ef4_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ ef4_writed_page(tx_queue->efx, &reg,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
+ const ef4_qword_t *txd)
+{
+ unsigned write_ptr;
+ ef4_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ ef4_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_tx_buffer *buffer;
+ ef4_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ tx_queue->xmit_more_available = false;
+ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
+ return;
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = ef4_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
+
+ /* Create TX descriptor ring entry */
+ BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
+ EF4_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT,
+ buffer->flags & EF4_TX_BUF_CONT,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = ef4_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ ef4_farch_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ ef4_farch_notify_tx_desc(tx_queue);
+ }
+}
+
+unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ /* Don't cross 4K boundaries with descriptors. */
+ unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1;
+
+ len = min(limit, len);
+
+ if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
+ len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
+
+ return len;
+}
+
+
+/* Allocate hardware resources for a TX queue */
+int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return ef4_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(ef4_qword_t));
+}
+
+void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ ef4_oword_t reg;
+
+ /* Pin TX descriptor ring */
+ ef4_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EF4_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ ef4_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128);
+
+ ef4_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
+ __clear_bit_le(tx_queue->queue, &reg);
+ else
+ __set_bit_le(tx_queue->queue, &reg);
+ ef4_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
+ EF4_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ ef4_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ ef4_oword_t tx_flush_descq;
+
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
+ atomic_set(&tx_queue->flush_outstanding, 1);
+
+ EF4_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ ef4_oword_t tx_desc_ptr;
+
+ /* Remove TX descriptor ring from card */
+ EF4_ZERO_OWORD(tx_desc_ptr);
+ ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ ef4_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
+{
+ ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
+{
+ struct ef4_rx_buffer *rx_buf;
+ ef4_qword_t *rxd;
+
+ rxd = ef4_rx_desc(rx_queue, index);
+ rx_buf = ef4_rx_buffer(rx_queue, index);
+ EF4_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ ef4_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ ef4_farch_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ ef4_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ ef4_rx_queue_index(rx_queue));
+}
+
+int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(ef4_qword_t));
+}
+
+void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
+{
+ ef4_oword_t rx_desc_ptr;
+ struct ef4_nic *efx = rx_queue->efx;
+ bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->scatter_n = 0;
+
+ /* Pin RX descriptor ring */
+ ef4_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EF4_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ ef4_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ ef4_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ ef4_rx_queue_index(rx_queue));
+}
+
+static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ ef4_oword_t rx_flush_descq;
+
+ EF4_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ ef4_rx_queue_index(rx_queue));
+ ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
+{
+ ef4_oword_t rx_desc_ptr;
+ struct ef4_nic *efx = rx_queue->efx;
+
+ /* Remove RX descriptor ring from card */
+ EF4_ZERO_OWORD(rx_desc_ptr);
+ ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ ef4_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ ef4_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
+{
+ ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+/* ef4_farch_flush_queues() must be woken up when all flushes are completed,
+ * or more RX flushes can be kicked off.
+ */
+static bool ef4_farch_flush_wake(struct ef4_nic *efx)
+{
+ /* Ensure that all updates are visible to ef4_farch_flush_queues() */
+ smp_mb();
+
+ return (atomic_read(&efx->active_queues) == 0 ||
+ (atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+}
+
+static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
+{
+ bool i = true;
+ ef4_oword_t txd_ptr_tbl;
+ struct ef4_channel *channel;
+ struct ef4_tx_queue *tx_queue;
+
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ ef4_reado_table(efx, &txd_ptr_tbl,
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
+ if (EF4_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_FLUSH) ||
+ EF4_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush did not complete on TXQ %d\n",
+ tx_queue->queue);
+ i = false;
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
+ 1, 0)) {
+ /* The flush is complete, but we didn't
+ * receive a flush completion event
+ */
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush complete on TXQ %d, so drain "
+ "the queue\n", tx_queue->queue);
+ /* Don't need to increment active_queues as it
+ * has already been incremented for the queues
+ * which did not drain
+ */
+ ef4_farch_magic_event(channel,
+ EF4_CHANNEL_MAGIC_TX_DRAIN(
+ tx_queue));
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be received so that there
+ * are no more RX and TX events left on any channel. */
+static int ef4_farch_do_flush(struct ef4_nic *efx)
+{
+ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
+ struct ef4_channel *channel;
+ struct ef4_rx_queue *rx_queue;
+ struct ef4_tx_queue *tx_queue;
+ int rc = 0;
+
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ ef4_farch_flush_tx_queue(tx_queue);
+ }
+ ef4_for_each_channel_rx_queue(rx_queue, channel) {
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ }
+ }
+
+ while (timeout && atomic_read(&efx->active_queues) > 0) {
+ /* The hardware supports four concurrent rx flushes, each of
+ * which may need to be retried if there is an outstanding
+ * descriptor fetch
+ */
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_rx_queue(rx_queue, channel) {
+ if (atomic_read(&efx->rxq_flush_outstanding) >=
+ EF4_RX_FLUSH_COUNT)
+ break;
+
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ atomic_inc(&efx->rxq_flush_outstanding);
+ ef4_farch_flush_rx_queue(rx_queue);
+ }
+ }
+ }
+
+ timeout = wait_event_timeout(efx->flush_wq,
+ ef4_farch_flush_wake(efx),
+ timeout);
+ }
+
+ if (atomic_read(&efx->active_queues) &&
+ !ef4_check_tx_flush_complete(efx)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->active_queues),
+ atomic_read(&efx->rxq_flush_outstanding),
+ atomic_read(&efx->rxq_flush_pending));
+ rc = -ETIMEDOUT;
+
+ atomic_set(&efx->active_queues, 0);
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+ return rc;
+}
+
+int ef4_farch_fini_dmaq(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+ struct ef4_tx_queue *tx_queue;
+ struct ef4_rx_queue *rx_queue;
+ int rc = 0;
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ /* Only perform flush if DMA is enabled */
+ if (efx->pci_dev->is_busmaster) {
+ efx->type->prepare_flush(efx);
+ rc = ef4_farch_do_flush(efx);
+ efx->type->finish_flush(efx);
+ }
+
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_channel_rx_queue(rx_queue, channel)
+ ef4_farch_rx_fini(rx_queue);
+ ef4_for_each_channel_tx_queue(tx_queue, channel)
+ ef4_farch_tx_fini(tx_queue);
+ }
+ }
+
+ return rc;
+}
+
+/* Reset queue and flush accounting after FLR
+ *
+ * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
+ * mastering was disabled), in which case we don't receive (RXQ) flush
+ * completion events. This means that efx->rxq_flush_outstanding remained at 4
+ * after the FLR; also, efx->active_queues was non-zero (as no flush completion
+ * events were received, and we didn't go through ef4_check_tx_flush_complete())
+ * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
+ * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
+ * for batched flush requests; and the efx->active_queues gets messed up because
+ * we keep incrementing for the newly initialised queues, but it never went to
+ * zero previously. Then we get a timeout every time we try to restart the
+ * queues, as it doesn't go back to zero when we should be flushing the queues.
+ */
+void ef4_farch_finish_flr(struct ef4_nic *efx)
+{
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ atomic_set(&efx->active_queues, 0);
+}
+
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void ef4_farch_ev_read_ack(struct ef4_channel *channel)
+{
+ ef4_dword_t reg;
+ struct ef4_nic *efx = channel->efx;
+
+ EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
+ */
+ ef4_writed(efx, &reg,
+ efx->type->evq_rptr_tbl_base +
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
+ ef4_qword_t *event)
+{
+ ef4_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
+ ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
+{
+ ef4_qword_t event;
+
+ EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ ef4_farch_generate_event(channel->efx, channel->channel, &event);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct ef4_tx_queue *tx_queue;
+ struct ef4_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = ef4_channel_get_tx_queue(
+ channel, tx_ev_q_label % EF4_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ ef4_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = ef4_channel_get_tx_queue(
+ channel, tx_ev_q_label % EF4_TXQ_TYPES);
+
+ netif_tx_lock(efx->net_dev);
+ ef4_farch_notify_tx_desc(tx_queue);
+ netif_tx_unlock(efx->net_dev);
+ } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
+ ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EF4_QWORD_FMT"\n", channel->channel,
+ EF4_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
+ const ef4_qword_t *event)
+{
+ struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
+ struct ef4_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
+ 0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+
+ /* The frame must be discarded if any of these are true. */
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm) ?
+ EF4_RX_PKT_DISCARD : 0;
+}
+
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
+ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
+{
+ struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
+ struct ef4_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
+ u16 flags;
+ struct ef4_rx_queue *rx_queue;
+ struct ef4_nic *efx = channel->efx;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return;
+
+ rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
+ WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = ef4_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ ef4_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ ef4_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EF4_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK then we can rely on the
+ * hardware checksum and classification.
+ */
+ flags = 0;
+ switch (rx_ev_hdr_type) {
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags |= EF4_RX_PKT_TCP;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags |= EF4_RX_PKT_CSUMMED;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ break;
+ }
+ } else {
+ flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ flags |= EF4_RX_PKT_DISCARD;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ ef4_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+}
+
+/* If this flush done event corresponds to a &struct ef4_tx_queue, then
+ * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
+ * of all transmit completions.
+ */
+static void
+ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
+{
+ struct ef4_tx_queue *tx_queue;
+ int qid;
+
+ qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
+ qid % EF4_TXQ_TYPES);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ ef4_farch_magic_event(tx_queue->channel,
+ EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
+ }
+}
+
+/* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
+ * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * the RX queue back to the mask of RX queues in need of flushing.
+ */
+static void
+ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
+{
+ struct ef4_channel *channel;
+ struct ef4_rx_queue *rx_queue;
+ int qid;
+ bool failed;
+
+ qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (qid >= efx->n_channels)
+ return;
+ channel = ef4_get_channel(efx, qid);
+ if (!ef4_channel_has_rx_queue(channel))
+ return;
+ rx_queue = ef4_channel_get_rx_queue(channel);
+
+ if (failed) {
+ netif_info(efx, hw, efx->net_dev,
+ "RXQ %d flush retry\n", qid);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ } else {
+ ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
+ EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
+ }
+ atomic_dec(&efx->rxq_flush_outstanding);
+ if (ef4_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void
+ef4_farch_handle_drain_event(struct ef4_channel *channel)
+{
+ struct ef4_nic *efx = channel->efx;
+
+ WARN_ON(atomic_read(&efx->active_queues) == 0);
+ atomic_dec(&efx->active_queues);
+ if (ef4_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
+ ef4_qword_t *event)
+{
+ struct ef4_nic *efx = channel->efx;
+ struct ef4_rx_queue *rx_queue =
+ ef4_channel_has_rx_queue(channel) ?
+ ef4_channel_get_rx_queue(channel) : NULL;
+ unsigned magic, code;
+
+ magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ code = _EF4_CHANNEL_MAGIC_CODE(magic);
+
+ if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
+ channel->event_test_cpu = raw_smp_processor_id();
+ } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
+ /* The queue must be empty, so we won't receive any rx
+ * events, so ef4_process_channel() won't refill the
+ * queue. Refill it here */
+ ef4_fast_push_rx_descriptors(rx_queue, true);
+ } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
+ ef4_farch_handle_drain_event(channel);
+ } else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
+ ef4_farch_handle_drain_event(channel);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EF4_QWORD_FMT"\n",
+ channel->channel, EF4_QWORD_VAL(*event));
+ }
+}
+
+static void
+ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
+{
+ struct ef4_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ ef4_farch_handle_tx_flush_done(efx, event);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ ef4_farch_handle_rx_flush_done(efx, event);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ ef4_schedule_reset(efx,
+ EF4_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
+{
+ struct ef4_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ ef4_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ if (budget <= 0)
+ return spent;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = ef4_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!ef4_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EF4_QWORD_FMT"\n",
+ channel->channel, EF4_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EF4_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ ef4_farch_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += ef4_farch_handle_tx_event(channel,
+ &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ ef4_farch_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ ef4_farch_handle_driver_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EF4_QWORD_FMT ")\n", channel->channel,
+ ev_code, EF4_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Allocate buffer table entries for event queue */
+int ef4_farch_ev_probe(struct ef4_channel *channel)
+{
+ struct ef4_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return ef4_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(ef4_qword_t));
+}
+
+int ef4_farch_ev_init(struct ef4_channel *channel)
+{
+ ef4_oword_t reg;
+ struct ef4_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ /* Pin event queue buffer */
+ ef4_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ /* Push event queue to card */
+ EF4_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ return 0;
+}
+
+void ef4_farch_ev_fini(struct ef4_channel *channel)
+{
+ ef4_oword_t reg;
+ struct ef4_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EF4_ZERO_OWORD(reg);
+ ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ /* Unpin event queue */
+ ef4_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void ef4_farch_ev_remove(struct ef4_channel *channel)
+{
+ ef4_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void ef4_farch_ev_test_generate(struct ef4_channel *channel)
+{
+ ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel));
+}
+
+void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
+{
+ ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
+ EF4_CHANNEL_MAGIC_FILL(rx_queue));
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void ef4_farch_interrupts(struct ef4_nic *efx,
+ bool enabled, bool force)
+{
+ ef4_oword_t int_en_reg_ker;
+
+ EF4_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void ef4_farch_irq_enable_master(struct ef4_nic *efx)
+{
+ EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ ef4_farch_interrupts(efx, true, false);
+}
+
+void ef4_farch_irq_disable_master(struct ef4_nic *efx)
+{
+ /* Disable interrupts */
+ ef4_farch_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+int ef4_farch_irq_test_generate(struct ef4_nic *efx)
+{
+ ef4_farch_interrupts(efx, true, true);
+ return 0;
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ ef4_oword_t *int_ker = efx->irq_status.addr;
+ ef4_oword_t fatal_intr;
+ int error, mem_perr;
+
+ ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
+ EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
+ EF4_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ ef4_oword_t reg;
+ ef4_reado(efx, &reg, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
+ EF4_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (ef4_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ ef4_farch_irq_disable_master(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EF4_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
+{
+ struct ef4_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ ef4_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct ef4_channel *channel;
+ ef4_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Read the ISR which also ACKs the interrupts */
+ ef4_readd(efx, &reg, FR_BZ_INT_ISR0);
+ queues = EF4_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Legacy interrupts are disabled too late by the EEH kernel
+ * code. Disable them earlier.
+ * If an EEH error occurred, the read will have returned all ones.
+ */
+ if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
+ !efx->eeh_disabled_legacy_irq) {
+ disable_irq_nosync(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = true;
+ }
+
+ /* Handle non-event-queue sources */
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
+ syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return ef4_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ if (queues != 0) {
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ if (likely(soft_enabled)) {
+ ef4_for_each_channel(channel, efx) {
+ if (queues & 1)
+ ef4_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+ result = IRQ_HANDLED;
+
+ } else {
+ ef4_qword_t *event;
+
+ /* Legacy ISR read can return zero once (SF bug 15783) */
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ if (likely(soft_enabled)) {
+ ef4_for_each_channel(channel, efx) {
+ event = ef4_event(channel,
+ channel->eventq_read_ptr);
+ if (ef4_event_present(event))
+ ef4_schedule_channel_irq(channel);
+ else
+ ef4_farch_ev_read_ack(channel);
+ }
+ }
+ }
+
+ if (result == IRQ_HANDLED)
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
+{
+ struct ef4_msi_context *context = dev_id;
+ struct ef4_nic *efx = context->efx;
+ ef4_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
+
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
+ /* Handle non-event-queue sources */
+ if (context->index == efx->irq_level) {
+ syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return ef4_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ /* Schedule processing of the channel */
+ ef4_schedule_channel_irq(efx->channel[context->index]);
+
+ return IRQ_HANDLED;
+}
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
+{
+ size_t i = 0;
+ ef4_dword_t dword;
+
+ BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ ef4_writed(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ }
+}
+
+/* Looks at available SRAM resources and works out how many queues we
+ * can support, and where things like descriptor caches should live.
+ *
+ * SRAM is split up as follows:
+ * 0 buftbl entries for channels
+ * efx->vf_buftbl_base buftbl entries for SR-IOV
+ * efx->rx_dc_base RX descriptor caches
+ * efx->tx_dc_base TX descriptor caches
+ */
+void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE +
+ efx->n_channels * EF4_MAX_EVQ_SIZE)
+ * sizeof(ef4_qword_t) / EF4_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
+u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
+{
+ ef4_oword_t altera_build;
+ ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void ef4_farch_init_common(struct ef4_nic *efx)
+{
+ ef4_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
+ ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
+ ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EF4_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EF4_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ /* Use a valid MSI-X vector */
+ efx->irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EF4_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ EF4_INVERT_OWORD(temp);
+ ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
+ EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
+ EF4_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/**************************************************************************
+ *
+ * Filter tables
+ *
+ **************************************************************************
+ */
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
+#define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum search limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in ef4_farch_filter_search() when the
+ * table is full.
+ */
+#define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
+
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
+
+enum ef4_farch_filter_type {
+ EF4_FARCH_FILTER_TCP_FULL = 0,
+ EF4_FARCH_FILTER_TCP_WILD,
+ EF4_FARCH_FILTER_UDP_FULL,
+ EF4_FARCH_FILTER_UDP_WILD,
+ EF4_FARCH_FILTER_MAC_FULL = 4,
+ EF4_FARCH_FILTER_MAC_WILD,
+ EF4_FARCH_FILTER_UC_DEF = 8,
+ EF4_FARCH_FILTER_MC_DEF,
+ EF4_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
+};
+
+enum ef4_farch_filter_table_id {
+ EF4_FARCH_FILTER_TABLE_RX_IP = 0,
+ EF4_FARCH_FILTER_TABLE_RX_MAC,
+ EF4_FARCH_FILTER_TABLE_RX_DEF,
+ EF4_FARCH_FILTER_TABLE_TX_MAC,
+ EF4_FARCH_FILTER_TABLE_COUNT,
+};
+
+enum ef4_farch_filter_index {
+ EF4_FARCH_FILTER_INDEX_UC_DEF,
+ EF4_FARCH_FILTER_INDEX_MC_DEF,
+ EF4_FARCH_FILTER_SIZE_RX_DEF,
+};
+
+struct ef4_farch_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+struct ef4_farch_filter_table {
+ enum ef4_farch_filter_table_id id;
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct ef4_farch_filter_spec *spec;
+ unsigned search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
+};
+
+struct ef4_farch_filter_state {
+ struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
+};
+
+static void
+ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
+ struct ef4_farch_filter_table *table,
+ unsigned int filter_idx);
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 ef4_farch_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 ef4_farch_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum ef4_farch_filter_table_id
+ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
+{
+ BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
+ (EF4_FARCH_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
+ (EF4_FARCH_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
+ (EF4_FARCH_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
+ (EF4_FARCH_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
+ (EF4_FARCH_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
+ (EF4_FARCH_FILTER_MAC_WILD >> 2));
+ BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC !=
+ EF4_FARCH_FILTER_TABLE_RX_MAC + 2);
+ return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0);
+}
+
+static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ struct ef4_farch_filter_table *table;
+ ef4_oword_t filter_ctl;
+
+ ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
+ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+
+ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
+ if (table->size) {
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
+ table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
+ EF4_FILTER_FLAG_RX_RSS));
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
+ table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EF4_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EF4_FILTER_FLAG_RX_SCATTER));
+ } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EF4_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
+ }
+
+ ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ struct ef4_farch_filter_table *table;
+ ef4_oword_t tx_cfg;
+
+ ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
+
+ table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
+ if (table->size) {
+ EF4_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EF4_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
+ EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
+}
+
+static int
+ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
+ const struct ef4_filter_spec *gen_spec)
+{
+ bool is_full = false;
+
+ if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
+ gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
+ return -EINVAL;
+
+ spec->priority = gen_spec->priority;
+ spec->flags = gen_spec->flags;
+ spec->dmaq_id = gen_spec->dmaq_id;
+
+ switch (gen_spec->match_flags) {
+ case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
+ EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
+ is_full = true;
+ /* fall through */
+ case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
+ __be32 rhost, host1, host2;
+ __be16 rport, port1, port2;
+
+ EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
+
+ if (gen_spec->ether_type != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+ if (gen_spec->loc_port == 0 ||
+ (is_full && gen_spec->rem_port == 0))
+ return -EADDRNOTAVAIL;
+ switch (gen_spec->ip_proto) {
+ case IPPROTO_TCP:
+ spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
+ EF4_FARCH_FILTER_TCP_WILD);
+ break;
+ case IPPROTO_UDP:
+ spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
+ EF4_FARCH_FILTER_UDP_WILD);
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ rhost = is_full ? gen_spec->rem_host[0] : 0;
+ rport = is_full ? gen_spec->rem_port : 0;
+ host1 = rhost;
+ host2 = gen_spec->loc_host[0];
+ if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
+ port1 = gen_spec->loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->loc_port;
+ }
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+
+ break;
+ }
+
+ case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
+ is_full = true;
+ /* fall through */
+ case EF4_FILTER_MATCH_LOC_MAC:
+ spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
+ EF4_FARCH_FILTER_MAC_WILD);
+ spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
+ spec->data[1] = (gen_spec->loc_mac[2] << 24 |
+ gen_spec->loc_mac[3] << 16 |
+ gen_spec->loc_mac[4] << 8 |
+ gen_spec->loc_mac[5]);
+ spec->data[2] = (gen_spec->loc_mac[0] << 8 |
+ gen_spec->loc_mac[1]);
+ break;
+
+ case EF4_FILTER_MATCH_LOC_MAC_IG:
+ spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
+ EF4_FARCH_FILTER_MC_DEF :
+ EF4_FARCH_FILTER_UC_DEF);
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ break;
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return 0;
+}
+
+static void
+ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
+ const struct ef4_farch_filter_spec *spec)
+{
+ bool is_full = false;
+
+ /* *gen_spec should be completely initialised, to be consistent
+ * with ef4_filter_init_{rx,tx}() and in case we want to copy
+ * it back to userland.
+ */
+ memset(gen_spec, 0, sizeof(*gen_spec));
+
+ gen_spec->priority = spec->priority;
+ gen_spec->flags = spec->flags;
+ gen_spec->dmaq_id = spec->dmaq_id;
+
+ switch (spec->type) {
+ case EF4_FARCH_FILTER_TCP_FULL:
+ case EF4_FARCH_FILTER_UDP_FULL:
+ is_full = true;
+ /* fall through */
+ case EF4_FARCH_FILTER_TCP_WILD:
+ case EF4_FARCH_FILTER_UDP_WILD: {
+ __be32 host1, host2;
+ __be16 port1, port2;
+
+ gen_spec->match_flags =
+ EF4_FILTER_MATCH_ETHER_TYPE |
+ EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
+ if (is_full)
+ gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
+ EF4_FILTER_MATCH_REM_PORT);
+ gen_spec->ether_type = htons(ETH_P_IP);
+ gen_spec->ip_proto =
+ (spec->type == EF4_FARCH_FILTER_TCP_FULL ||
+ spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
+ IPPROTO_TCP : IPPROTO_UDP;
+
+ host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ port1 = htons(spec->data[0]);
+ host2 = htonl(spec->data[2]);
+ port2 = htons(spec->data[1] >> 16);
+ if (spec->flags & EF4_FILTER_FLAG_TX) {
+ gen_spec->loc_host[0] = host1;
+ gen_spec->rem_host[0] = host2;
+ } else {
+ gen_spec->loc_host[0] = host2;
+ gen_spec->rem_host[0] = host1;
+ }
+ if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
+ (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
+ gen_spec->loc_port = port1;
+ gen_spec->rem_port = port2;
+ } else {
+ gen_spec->loc_port = port2;
+ gen_spec->rem_port = port1;
+ }
+
+ break;
+ }
+
+ case EF4_FARCH_FILTER_MAC_FULL:
+ is_full = true;
+ /* fall through */
+ case EF4_FARCH_FILTER_MAC_WILD:
+ gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
+ if (is_full)
+ gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
+ gen_spec->loc_mac[0] = spec->data[2] >> 8;
+ gen_spec->loc_mac[1] = spec->data[2];
+ gen_spec->loc_mac[2] = spec->data[1] >> 24;
+ gen_spec->loc_mac[3] = spec->data[1] >> 16;
+ gen_spec->loc_mac[4] = spec->data[1] >> 8;
+ gen_spec->loc_mac[5] = spec->data[1];
+ gen_spec->outer_vid = htons(spec->data[0]);
+ break;
+
+ case EF4_FARCH_FILTER_UC_DEF:
+ case EF4_FARCH_FILTER_MC_DEF:
+ gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
+ gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
+ struct ef4_farch_filter_spec *spec)
+{
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ spec->priority = EF4_FILTER_PRI_AUTO;
+ spec->flags = (EF4_FILTER_FLAG_RX |
+ (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
+ (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
+ spec->dmaq_id = 0;
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 ef4_farch_filter_build(ef4_oword_t *filter,
+ struct ef4_farch_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (ef4_farch_filter_spec_table_id(spec)) {
+ case EF4_FARCH_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
+ spec->type == EF4_FARCH_FILTER_UDP_WILD);
+ EF4_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EF4_DWORD_2, spec->data[2],
+ EF4_DWORD_1, spec->data[1],
+ EF4_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EF4_FARCH_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
+ EF4_POPULATE_OWORD_7(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ case EF4_FARCH_FILTER_TABLE_TX_MAC: {
+ bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
+ EF4_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
+ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild | spec->dmaq_id << 1;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
+ const struct ef4_farch_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ if (left->flags & EF4_FILTER_FLAG_TX &&
+ left->dmaq_id != right->dmaq_id)
+ return false;
+
+ return true;
+}
+
+/*
+ * Construct/deconstruct external filter IDs. At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
+ *
+ * Deconstruction needs to be robust against invalid IDs so that
+ * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
+ * accept user-provided IDs.
+ */
+
+#define EF4_FARCH_FILTER_MATCH_PRI_COUNT 5
+
+static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = {
+ [EF4_FARCH_FILTER_TCP_FULL] = 0,
+ [EF4_FARCH_FILTER_UDP_FULL] = 0,
+ [EF4_FARCH_FILTER_TCP_WILD] = 1,
+ [EF4_FARCH_FILTER_UDP_WILD] = 1,
+ [EF4_FARCH_FILTER_MAC_FULL] = 2,
+ [EF4_FARCH_FILTER_MAC_WILD] = 3,
+ [EF4_FARCH_FILTER_UC_DEF] = 4,
+ [EF4_FARCH_FILTER_MC_DEF] = 4,
+};
+
+static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = {
+ EF4_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
+ EF4_FARCH_FILTER_TABLE_RX_IP,
+ EF4_FARCH_FILTER_TABLE_RX_MAC,
+ EF4_FARCH_FILTER_TABLE_RX_MAC,
+ EF4_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
+ EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
+ EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
+};
+
+#define EF4_FARCH_FILTER_INDEX_WIDTH 13
+#define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32
+ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
+ unsigned int index)
+{
+ unsigned int range;
+
+ range = ef4_farch_filter_type_match_pri[spec->type];
+ if (!(spec->flags & EF4_FILTER_FLAG_RX))
+ range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
+
+ return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum ef4_farch_filter_table_id
+ef4_farch_filter_id_table_id(u32 id)
+{
+ unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
+
+ if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
+ return ef4_farch_filter_range_table[range];
+ else
+ return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */
+}
+
+static inline unsigned int ef4_farch_filter_id_index(u32 id)
+{
+ return id & EF4_FARCH_FILTER_INDEX_MASK;
+}
+
+u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
+ enum ef4_farch_filter_table_id table_id;
+
+ do {
+ table_id = ef4_farch_filter_range_table[range];
+ if (state->table[table_id].size != 0)
+ return range << EF4_FARCH_FILTER_INDEX_WIDTH |
+ state->table[table_id].size;
+ } while (range--);
+
+ return 0;
+}
+
+s32 ef4_farch_filter_insert(struct ef4_nic *efx,
+ struct ef4_filter_spec *gen_spec,
+ bool replace_equal)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ struct ef4_farch_filter_table *table;
+ struct ef4_farch_filter_spec spec;
+ ef4_oword_t filter;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
+ int rc;
+
+ rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec);
+ if (rc)
+ return rc;
+
+ table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
+ if (table->size == 0)
+ return -EINVAL;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_limit=%d", __func__, spec.type,
+ table->search_limit[spec.type]);
+
+ if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF !=
+ EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF);
+ rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF;
+ ins_index = rep_index;
+
+ spin_lock_bh(&efx->filter_lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = ef4_farch_filter_build(&filter, &spec);
+ unsigned int hash = ef4_farch_filter_hash(key);
+ unsigned int incr = ef4_farch_filter_increment(key);
+ unsigned int max_rep_depth = table->search_limit[spec.type];
+ unsigned int max_ins_depth =
+ spec.priority <= EF4_FILTER_PRI_HINT ?
+ EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
+ EF4_FARCH_FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (ef4_farch_filter_equal(&spec,
+ &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct ef4_farch_filter_spec *saved_spec =
+ &table->spec[rep_index];
+
+ if (spec.priority == saved_spec->priority && !replace_equal) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec.priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto out;
+ }
+ if (saved_spec->priority == EF4_FILTER_PRI_AUTO ||
+ saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO)
+ spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO;
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
+ ++table->used;
+ }
+ table->spec[ins_index] = spec;
+
+ if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
+ ef4_farch_filter_push_rx_config(efx);
+ } else {
+ if (table->search_limit[spec.type] < depth) {
+ table->search_limit[spec.type] = depth;
+ if (spec.flags & EF4_FILTER_FLAG_TX)
+ ef4_farch_filter_push_tx_limits(efx);
+ else
+ ef4_farch_filter_push_rx_config(efx);
+ }
+
+ ef4_writeo(efx, &filter,
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ ef4_farch_filter_table_clear_entry(efx, table,
+ rep_index);
+ }
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec.type, ins_index, spec.dmaq_id);
+ rc = ef4_farch_filter_make_id(&spec, ins_index);
+
+out:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
+ struct ef4_farch_filter_table *table,
+ unsigned int filter_idx)
+{
+ static ef4_oword_t filter;
+
+ EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
+ BUG_ON(table->offset == 0); /* can't clear MAC default filters */
+
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ /* If this filter required a greater search depth than
+ * any other, the search limit for its type can now be
+ * decreased. However, it is hard to determine that
+ * unless the table has become completely empty - in
+ * which case, all its search limits can be set to 0.
+ */
+ if (unlikely(table->used == 0)) {
+ memset(table->search_limit, 0, sizeof(table->search_limit));
+ if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
+ ef4_farch_filter_push_tx_limits(efx);
+ else
+ ef4_farch_filter_push_rx_config(efx);
+ }
+}
+
+static int ef4_farch_filter_remove(struct ef4_nic *efx,
+ struct ef4_farch_filter_table *table,
+ unsigned int filter_idx,
+ enum ef4_filter_priority priority)
+{
+ struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
+
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ spec->priority != priority)
+ return -ENOENT;
+
+ if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) {
+ ef4_farch_filter_init_rx_auto(efx, spec);
+ ef4_farch_filter_push_rx_config(efx);
+ } else {
+ ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
+ }
+
+ return 0;
+}
+
+int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 filter_id)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ enum ef4_farch_filter_table_id table_id;
+ struct ef4_farch_filter_table *table;
+ unsigned int filter_idx;
+ struct ef4_farch_filter_spec *spec;
+ int rc;
+
+ table_id = ef4_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = ef4_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+ rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+int ef4_farch_filter_get_safe(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 filter_id, struct ef4_filter_spec *spec_buf)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ enum ef4_farch_filter_table_id table_id;
+ struct ef4_farch_filter_table *table;
+ struct ef4_farch_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ table_id = ef4_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = ef4_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority) {
+ ef4_farch_filter_to_gen_spec(spec_buf, spec);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+static void
+ef4_farch_filter_table_clear(struct ef4_nic *efx,
+ enum ef4_farch_filter_table_id table_id,
+ enum ef4_filter_priority priority)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ struct ef4_farch_filter_table *table = &state->table[table_id];
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
+ if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
+ ef4_farch_filter_remove(efx, table,
+ filter_idx, priority);
+ }
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
+ enum ef4_filter_priority priority)
+{
+ ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
+ priority);
+ ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
+ priority);
+ ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
+ priority);
+ return 0;
+}
+
+u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
+ enum ef4_filter_priority priority)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ enum ef4_farch_filter_table_id table_id;
+ struct ef4_farch_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ enum ef4_farch_filter_table_id table_id;
+ struct ef4_farch_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = ef4_farch_filter_make_id(
+ &table->spec[filter_idx], filter_idx);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+/* Restore filter stater after reset */
+void ef4_farch_filter_table_restore(struct ef4_nic *efx)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ enum ef4_farch_filter_table_id table_id;
+ struct ef4_farch_filter_table *table;
+ ef4_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+
+ /* Check whether this is a regular register table */
+ if (table->step == 0)
+ continue;
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
+ ef4_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ ef4_farch_filter_push_rx_config(efx);
+ ef4_farch_filter_push_tx_limits(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void ef4_farch_filter_table_remove(struct ef4_nic *efx)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ enum ef4_farch_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+ kfree(state);
+}
+
+int ef4_farch_filter_table_probe(struct ef4_nic *efx)
+{
+ struct ef4_farch_filter_state *state;
+ struct ef4_farch_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
+ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
+ table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ }
+
+ table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ /* RX default filters must always exist */
+ struct ef4_farch_filter_spec *spec;
+ unsigned i;
+
+ for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) {
+ spec = &table->spec[i];
+ spec->type = EF4_FARCH_FILTER_UC_DEF + i;
+ ef4_farch_filter_init_rx_auto(efx, spec);
+ __set_bit(i, table->used_bitmap);
+ }
+ }
+
+ ef4_farch_filter_push_rx_config(efx);
+
+ return 0;
+
+fail:
+ ef4_farch_filter_table_remove(efx);
+ return -ENOMEM;
+}
+
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ enum ef4_farch_filter_table_id table_id;
+ struct ef4_farch_filter_table *table;
+ ef4_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EF4_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EF4_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
+ /* Pushed by ef4_farch_filter_push_rx_config() */
+ continue;
+
+ ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
+ ef4_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ ef4_farch_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
+ struct ef4_filter_spec *gen_spec)
+{
+ return ef4_farch_filter_insert(efx, gen_spec, true);
+}
+
+bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
+ unsigned int index)
+{
+ struct ef4_farch_filter_state *state = efx->filter_state;
+ struct ef4_farch_filter_table *table =
+ &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
+
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EF4_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
+ flow_id, index)) {
+ ef4_farch_filter_table_clear_entry(efx, table, index);
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *ha;
+ union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ if (!ef4_dev_registered(efx))
+ return;
+
+ netif_addr_lock_bh(net_dev);
+
+ efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EF4_MCAST_HASH_ENTRIES - 1);
+ __set_bit_le(bit, mc_hash);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ __set_bit_le(0xff, mc_hash);
+ }
+
+ netif_addr_unlock_bh(net_dev);
+}
diff --git a/drivers/net/ethernet/sfc/falcon/farch_regs.h b/drivers/net/ethernet/sfc/falcon/farch_regs.h
new file mode 100644
index 000000000000..8095f273d574
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/farch_regs.h
@@ -0,0 +1,2932 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_FARCH_REGS_H
+#define EF4_FARCH_REGS_H
+
+/*
+ * Falcon hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * F<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register MC register Host memory structure
+ * -------------------------------------------------------------
+ * Address R MCR
+ * Bitfield RF MCRF SF
+ * Enumerator FE MCFE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * A: Falcon A1 (SFC4000AB)
+ * B: Falcon B0 (SFC4000BA)
+ * C: Siena A0 (SFL9021AA)
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* ADR_REGION_REG: Address region register */
+#define FR_AZ_ADR_REGION 0x00000000
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
+#define FR_AZ_INT_EN_KER 0x00000010
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
+#define FR_BZ_INT_EN_CHAR 0x00000020
+#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_BZ_CHAR_INT_CHAR_LBN 4
+#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_BZ_CHAR_INT_KER_LBN 3
+#define FRF_BZ_CHAR_INT_KER_WIDTH 1
+#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
+
+/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
+#define FR_AZ_INT_ADR_KER 0x00000030
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+
+/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
+#define FR_BZ_INT_ADR_CHAR 0x00000040
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_BZ_INT_ADR_CHAR_LBN 0
+#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
+
+/* INT_ACK_KER: Kernel interrupt acknowledge register */
+#define FR_AA_INT_ACK_KER 0x00000050
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
+#define FR_BZ_INT_ISR0 0x00000090
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+
+/* HW_INIT_REG: Hardware initialization register */
+#define FR_AZ_HW_INIT 0x000000c0
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AB_TRGT_MASK_ALL_LBN 100
+#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AA_FC_BLOCKING_EN_LBN 45
+#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
+#define FRF_BZ_B2B_REQ_EN_LBN 45
+#define FRF_BZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AA_B2B_REQ_EN_LBN 44
+#define FRF_AA_B2B_REQ_EN_WIDTH 1
+#define FRF_BB_FC_BLOCKING_EN_LBN 44
+#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+#define FR_AB_EE_SPI_HCMD 0x00000100
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+/* USR_EV_CFG: User Level Event Configuration register */
+#define FR_CZ_USR_EV_CFG 0x00000100
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+/* EE_SPI_HADR_REG: SPI host address register */
+#define FR_AB_EE_SPI_HADR 0x00000110
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+/* EE_SPI_HDATA_REG: SPI host data register */
+#define FR_AB_EE_SPI_HDATA 0x00000120
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
+#define FR_AB_EE_BASE_PAGE 0x00000130
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
+#define FR_AB_EE_VPD_CFG0 0x00000140
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
+#define FR_AB_EE_VPD_SW_CNTL 0x00000150
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+/* EE_VPD_SW_DATA_REG: VPD access SW data register */
+#define FR_AB_EE_VPD_SW_DATA 0x00000160
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+/* PBMX_DBG_IADDR_REG: Capture Module address register */
+#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
+#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
+#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
+
+/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
+#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+/* PBMX_DBG_IDATA_REG: Capture Module data register */
+#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
+#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
+#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
+
+/* NIC_STAT_REG: NIC status register */
+#define FR_AB_NIC_STAT 0x00000200
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+/* GPIO_CTL_REG: GPIO control register */
+#define FR_AB_GPIO_CTL 0x00000210
+#define FRF_AB_GPIO_OUT3_LBN 112
+#define FRF_AB_GPIO_OUT3_WIDTH 16
+#define FRF_AB_GPIO_IN3_LBN 104
+#define FRF_AB_GPIO_IN3_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
+#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
+#define FRF_AB_GPIO_OUT2_LBN 80
+#define FRF_AB_GPIO_OUT2_WIDTH 16
+#define FRF_AB_GPIO_IN2_LBN 72
+#define FRF_AB_GPIO_IN2_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
+#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_CLK156_OUT_EN_LBN 31
+#define FRF_AB_CLK156_OUT_EN_WIDTH 1
+#define FRF_AB_USE_NIC_CLK_LBN 30
+#define FRF_AB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO7_OUT_LBN 23
+#define FRF_AB_GPIO7_OUT_WIDTH 1
+#define FRF_AB_GPIO6_OUT_LBN 22
+#define FRF_AB_GPIO6_OUT_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO7_IN_LBN 15
+#define FRF_AB_GPIO7_IN_WIDTH 1
+#define FRF_AB_GPIO6_IN_LBN 14
+#define FRF_AB_GPIO6_IN_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
+#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
+#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+/* GLB_CTL_REG: Global control register */
+#define FR_AB_GLB_CTL 0x00000220
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FR_AZ_FATAL_INTR_KER 0x00000230
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
+#define FR_BZ_FATAL_INTR_CHAR 0x00000240
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+/* DP_CTRL_REG: Datapath control register */
+#define FR_BZ_DP_CTRL 0x00000250
+#define FRF_BZ_FLS_EVQ_ID_LBN 0
+#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
+
+/* MEM_STAT_REG: Memory status register */
+#define FR_AZ_MEM_STAT 0x00000260
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 38
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+
+/* CS_DEBUG_REG: Debug register */
+#define FR_AZ_CS_DEBUG 0x00000270
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+/* DRIVER_REG: Driver scratch register [0-7] */
+#define FR_AZ_DRIVER 0x00000280
+#define FR_AZ_DRIVER_STEP 16
+#define FR_AZ_DRIVER_ROWS 8
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+/* ALTERA_BUILD_REG: Altera build register */
+#define FR_AZ_ALTERA_BUILD 0x00000300
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+/* CSR_SPARE_REG: Spare register */
+#define FR_AZ_CSR_SPARE 0x00000310
+#define FRF_AB_MEM_PERR_EN_LBN 64
+#define FRF_AB_MEM_PERR_EN_WIDTH 38
+#define FRF_CZ_MEM_PERR_EN_LBN 64
+#define FRF_CZ_MEM_PERR_EN_WIDTH 35
+#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
+#define FR_AB_PCIE_SD_CTL0123 0x00000320
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
+#define FR_AB_PCIE_SD_CTL45 0x00000330
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
+#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
+#define FR_BB_DEBUG_DATA_OUT 0x00000350
+#define FRF_BB_DEBUG2_PORT_LBN 25
+#define FRF_BB_DEBUG2_PORT_WIDTH 15
+#define FRF_BB_DEBUG1_PORT_LBN 0
+#define FRF_BB_DEBUG1_PORT_WIDTH 25
+
+/* EVQ_RPTR_REGP0: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR_P0 0x00000400
+#define FR_BZ_EVQ_RPTR_P0_STEP 8192
+#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
+/* EVQ_RPTR_REG_KER: Event queue read pointer register */
+#define FR_AA_EVQ_RPTR_KER 0x00011b00
+#define FR_AA_EVQ_RPTR_KER_STEP 4
+#define FR_AA_EVQ_RPTR_KER_ROWS 4
+/* EVQ_RPTR_REG: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR 0x00fa0000
+#define FR_BZ_EVQ_RPTR_STEP 16
+#define FR_BB_EVQ_RPTR_ROWS 4096
+#define FR_CZ_EVQ_RPTR_ROWS 1024
+/* EVQ_RPTR_REGP123: Event queue read pointer register */
+#define FR_BB_EVQ_RPTR_P123 0x01000400
+#define FR_BB_EVQ_RPTR_P123_STEP 8192
+#define FR_BB_EVQ_RPTR_P123_ROWS 3072
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+/* TIMER_COMMAND_REGP0: Timer Command Registers */
+#define FR_BZ_TIMER_COMMAND_P0 0x00000420
+#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
+/* TIMER_COMMAND_REG_KER: Timer Command Registers */
+#define FR_AA_TIMER_COMMAND_KER 0x00000420
+#define FR_AA_TIMER_COMMAND_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_KER_ROWS 4
+/* TIMER_COMMAND_REGP123: Timer Command Registers */
+#define FR_BB_TIMER_COMMAND_P123 0x01000420
+#define FR_BB_TIMER_COMMAND_P123_STEP 8192
+#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+/* DRV_EV_REG: Driver generated event register */
+#define FR_AZ_DRV_EV 0x00000440
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+
+/* EVQ_CTL_REG: Event queue control register */
+#define FR_AZ_EVQ_CTL 0x00000450
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+/* EVQ_CNT1_REG: Event counter 1 register */
+#define FR_AZ_EVQ_CNT1 0x00000460
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+/* EVQ_CNT2_REG: Event counter 2 register */
+#define FR_AZ_EVQ_CNT2 0x00000470
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+/* USR_EV_REG: Event mailbox register */
+#define FR_CZ_USR_EV 0x00000540
+#define FR_CZ_USR_EV_STEP 8192
+#define FR_CZ_USR_EV_ROWS 1024
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+/* BUF_TBL_CFG_REG: Buffer table configuration register */
+#define FR_AZ_BUF_TBL_CFG 0x00000600
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
+#define FR_AZ_SRM_RX_DC_CFG 0x00000610
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
+#define FR_AZ_SRM_TX_DC_CFG 0x00000620
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_CFG_REG: SRAM configuration register */
+#define FR_AZ_SRM_CFG 0x00000630
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+/* BUF_TBL_UPD_REG: Buffer table update register */
+#define FR_AZ_BUF_TBL_UPD 0x00000650
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+/* SRM_UPD_EVQ_REG: Buffer table update register */
+#define FR_AZ_SRM_UPD_EVQ 0x00000660
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+/* SRAM_PARITY_REG: SRAM parity register. */
+#define FR_AZ_SRAM_PARITY 0x00000670
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+
+/* RX_CFG_REG: Receive configuration register */
+#define FR_AZ_RX_CFG 0x00000800
+#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
+#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+/* RX_FILTER_CTL_REG: Receive filter control registers */
+#define FR_BZ_RX_FILTER_CTL 0x00000810
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_NUM_KER_LBN 24
+#define FRF_BZ_NUM_KER_WIDTH 2
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
+#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+#define FR_BZ_RX_DESC_UPD_P0 0x00000830
+#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
+/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
+#define FR_AA_RX_DESC_UPD_KER 0x00000830
+#define FR_AA_RX_DESC_UPD_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_KER_ROWS 4
+/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
+#define FR_BB_RX_DESC_UPD_P123 0x01000830
+#define FR_BB_RX_DESC_UPD_P123_STEP 8192
+#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+
+/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
+#define FR_AZ_RX_DC_CFG 0x00000840
+#define FRF_AB_RX_MAX_PF_LBN 2
+#define FRF_AB_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
+#define FR_AZ_RX_DC_PF_WM 0x00000850
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
+#define FR_BZ_RX_RSS_TKEY 0x00000860
+#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
+#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
+
+/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
+#define FR_AZ_RX_NODESC_DROP 0x00000880
+#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
+#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX_SELF_RST_REG: Receive self reset register */
+#define FR_AA_RX_SELF_RST 0x00000890
+#define FRF_AA_RX_ISCSI_DIS_LBN 17
+#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AA_RX_SW_RST_REG_LBN 16
+#define FRF_AA_RX_SW_RST_REG_WIDTH 1
+#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
+#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
+#define FRF_AA_RX_SELF_RST_EN_LBN 8
+#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AA_RX_MAX_PF_LAT_LBN 4
+#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AA_RX_MAX_LU_LAT_LBN 0
+#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
+
+/* RX_DEBUG_REG: undocumented register */
+#define FR_AZ_RX_DEBUG 0x000008a0
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+
+/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
+#define FR_AZ_RX_PUSH_DROP 0x000008b0
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
+#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+
+/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
+#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+
+/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
+#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+
+/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
+#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
+#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
+/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
+#define FR_AA_TX_DESC_UPD_KER 0x00000a10
+#define FR_AA_TX_DESC_UPD_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_KER_ROWS 8
+/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
+#define FR_BB_TX_DESC_UPD_P123 0x01000a10
+#define FR_BB_TX_DESC_UPD_P123_STEP 8192
+#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+
+/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
+#define FR_AZ_TX_DC_CFG 0x00000a20
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
+#define FR_AA_TX_CHKSM_CFG 0x00000a30
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+/* TX_CFG_REG: Transmit configuration register */
+#define FR_AZ_TX_CFG 0x00000a50
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+/* TX_PUSH_DROP_REG: Transmit push dropped register */
+#define FR_AZ_TX_PUSH_DROP 0x00000a60
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+/* TX_RESERVED_REG: Transmit configuration register */
+#define FR_AZ_TX_RESERVED 0x00000a80
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+/* TX_PACE_REG: Transmit pace control register */
+#define FR_BZ_TX_PACE 0x00000a90
+#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_SB_AF_LBN 9
+#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
+
+/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
+#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+/* TX_VLAN_REG: Transmit VLAN tag register */
+#define FR_BB_TX_VLAN 0x00000ae0
+#define FRF_BB_TX_VLAN_EN_LBN 127
+#define FRF_BB_TX_VLAN_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_LBN 112
+#define FRF_BB_TX_VLAN7_WIDTH 12
+#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_LBN 96
+#define FRF_BB_TX_VLAN6_WIDTH 12
+#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_LBN 80
+#define FRF_BB_TX_VLAN5_WIDTH 12
+#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_LBN 64
+#define FRF_BB_TX_VLAN4_WIDTH 12
+#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_LBN 48
+#define FRF_BB_TX_VLAN3_WIDTH 12
+#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_LBN 32
+#define FRF_BB_TX_VLAN2_WIDTH 12
+#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_LBN 16
+#define FRF_BB_TX_VLAN1_WIDTH 12
+#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_LBN 0
+#define FRF_BB_TX_VLAN0_WIDTH 12
+
+/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
+#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
+#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+/* TX_IPFIL_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_IPFIL_TBL 0x00000b00
+#define FR_BB_TX_IPFIL_TBL_STEP 16
+#define FR_BB_TX_IPFIL_TBL_ROWS 16
+#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
+
+/* MD_TXD_REG: PHY management transmit data register */
+#define FR_AB_MD_TXD 0x00000c00
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+/* MD_RXD_REG: PHY management receive data register */
+#define FR_AB_MD_RXD 0x00000c10
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+/* MD_CS_REG: PHY management configuration & status register */
+#define FR_AB_MD_CS 0x00000c20
+#define FRF_AB_MD_RD_EN_CMD_LBN 15
+#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
+#define FRF_AB_MD_WR_EN_CMD_LBN 14
+#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+/* MD_PHY_ADR_REG: PHY management PHY address register */
+#define FR_AB_MD_PHY_ADR 0x00000c30
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+/* MD_ID_REG: PHY management ID register */
+#define FR_AB_MD_ID 0x00000c40
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+/* MD_STAT_REG: PHY management status & mask register */
+#define FR_AB_MD_STAT 0x00000c50
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
+#define FR_AB_MAC_STAT_DMA 0x00000c60
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+
+/* MAC_CTRL_REG: Port MAC control register */
+#define FR_AB_MAC_CTRL 0x00000c80
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FFE_AB_MAC_SPEED_10G 3
+#define FFE_AB_MAC_SPEED_1G 2
+#define FFE_AB_MAC_SPEED_100M 1
+#define FFE_AB_MAC_SPEED_10M 0
+
+/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
+#define FR_BB_GEN_MODE 0x00000c90
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+/* MAC_MC_HASH_REG0: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+
+/* MAC_MC_HASH_REG1: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+
+/* GM_CFG1_REG: GMAC configuration register 1 */
+#define FR_AB_GM_CFG1 0x00000e00
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+/* GM_CFG2_REG: GMAC configuration register 2 */
+#define FR_AB_GM_CFG2 0x00000e10
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FFE_AB_IF_MODE_BYTE_MODE 2
+#define FFE_AB_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+/* GM_IPG_REG: GMAC IPG register */
+#define FR_AB_GM_IPG 0x00000e20
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+/* GM_HD_REG: GMAC half duplex register */
+#define FR_AB_GM_HD 0x00000e30
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
+#define FR_AB_GM_MAX_FLEN 0x00000e40
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+/* GM_TEST_REG: GMAC test register */
+#define FR_AB_GM_TEST 0x00000e70
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+/* GM_ADR1_REG: GMAC station address register 1 */
+#define FR_AB_GM_ADR1 0x00000f00
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+/* GM_ADR2_REG: GMAC station address register 2 */
+#define FR_AB_GM_ADR2 0x00000f10
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
+#define FR_AB_GMF_CFG0 0x00000f20
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
+#define FR_AB_GMF_CFG1 0x00000f30
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
+#define FR_AB_GMF_CFG2 0x00000f40
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
+#define FR_AB_GMF_CFG3 0x00000f50
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FR_AB_GMF_CFG4 0x00000f60
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FR_AB_GMF_CFG5 0x00000f70
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_SRC_MAC_TBL 0x00001000
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+
+/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
+#define FR_BB_TX_SRC_MAC_CTL 0x00001100
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+/* XM_ADR_LO_REG: XGMAC address register low */
+#define FR_AB_XM_ADR_LO 0x00001200
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+/* XM_ADR_HI_REG: XGMAC address register high */
+#define FR_AB_XM_ADR_HI 0x00001210
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+/* XM_GLB_CFG_REG: XGMAC global configuration */
+#define FR_AB_XM_GLB_CFG 0x00001220
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+/* XM_TX_CFG_REG: XGMAC transmit configuration */
+#define FR_AB_XM_TX_CFG 0x00001230
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+/* XM_RX_CFG_REG: XGMAC receive configuration */
+#define FR_AB_XM_RX_CFG 0x00001240
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
+#define FR_AB_XM_MGT_INT_MASK 0x00001250
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+/* XM_FC_REG: XGMAC flow control register */
+#define FR_AB_XM_FC 0x00001270
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+/* XM_PAUSE_TIME_REG: XGMAC pause time register */
+#define FR_AB_XM_PAUSE_TIME 0x00001290
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FR_AB_XM_TX_PARAM 0x000012d0
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FR_AB_XM_RX_PARAM 0x000012e0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
+#define FR_AB_XM_MGT_INT_MSK 0x000012f0
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
+#define FR_AB_XX_PWR_RST 0x00001300
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
+#define FR_AB_XX_SD_CTL 0x00001310
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+#define FR_AB_XX_TXDRV_CTL 0x00001320
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
+#define FR_AB_XX_PRBS_CTL 0x00001330
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
+#define FR_AB_XX_PRBS_CHK 0x00001340
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
+#define FR_AB_XX_PRBS_ERR 0x00001350
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+#define FR_AB_XX_CORE_STAT 0x00001360
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
+#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
+#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
+#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AA_RX_RESET_LBN 89
+#define FRF_AA_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
+#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
+#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
+#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+/* EVQ_PTR_TBL_KER: Event queue pointer table */
+#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/* EVQ_PTR_TBL: Event queue pointer table */
+#define FR_BZ_EVQ_PTR_TBL 0x00f60000
+#define FR_BZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_BB_EVQ_PTR_TBL_ROWS 4096
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
+#define FR_AA_BUF_HALF_TBL_KER 0x00018000
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
+#define FR_BZ_BUF_HALF_TBL 0x00800000
+#define FR_BZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_BB_BUF_HALF_TBL_ROWS 524288
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
+#define FR_AA_BUF_FULL_TBL_KER 0x00018000
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
+#define FR_BZ_BUF_FULL_TBL 0x00800000
+#define FR_BZ_BUF_FULL_TBL_STEP 8
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_BB_BUF_FULL_TBL_ROWS 917504
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
+#define FR_BZ_RX_FILTER_TBL0 0x00f00000
+#define FR_BZ_RX_FILTER_TBL0_STEP 32
+#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
+/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
+#define FR_BB_RX_FILTER_TBL1 0x00f00010
+#define FR_BB_RX_FILTER_TBL1_STEP 32
+#define FR_BB_RX_FILTER_TBL1_ROWS 8192
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_BZ_TCP_UDP_LBN 108
+#define FRF_BZ_TCP_UDP_WIDTH 1
+#define FRF_BZ_RXQ_ID_LBN 96
+#define FRF_BZ_RXQ_ID_WIDTH 12
+#define FRF_BZ_DEST_IP_LBN 64
+#define FRF_BZ_DEST_IP_WIDTH 32
+#define FRF_BZ_DEST_PORT_TCP_LBN 48
+#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_BZ_SRC_IP_LBN 16
+#define FRF_BZ_SRC_IP_WIDTH 32
+#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
+#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+/* TIMER_TBL: Timer table */
+#define FR_BZ_TIMER_TBL 0x00f70000
+#define FR_BZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_BB_TIMER_TBL_ROWS 4096
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_BB_TIMER_MODE_LBN 12
+#define FRF_BB_TIMER_MODE_WIDTH 2
+#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_BB_TIMER_MODE_TRIG_START 2
+#define FFE_BB_TIMER_MODE_IMMED_START 1
+#define FFE_BB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_BB_TIMER_VAL_LBN 0
+#define FRF_BB_TIMER_VAL_WIDTH 12
+
+/* TX_PACE_TBL: Transmit pacing table */
+#define FR_BZ_TX_PACE_TBL 0x00f80000
+#define FR_BZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+#define FRF_BZ_TX_PACE_LBN 0
+#define FRF_BZ_TX_PACE_WIDTH 5
+
+/* RX_INDIRECTION_TBL: RX Indirection Table */
+#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
+#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
+#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+/* MC_TREG_SMEM: MC Shared Memory */
+#define FR_CZ_MC_TREG_SMEM 0x00ff0000
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_MSIX_PBA_TABLE 0x00008000
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* SRM_DBG_REG: SRAM debug access */
+#define FR_BZ_SRM_DBG 0x03000000
+#define FR_BZ_SRM_DBG_STEP 8
+#define FR_CZ_SRM_DBG_ROWS 262144
+#define FR_BB_SRM_DBG_ROWS 2097152
+#define FRF_BZ_SRM_DBG_LBN 0
+#define FRF_BZ_SRM_DBG_WIDTH 64
+
+/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
+#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
+#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_BZ_TX_DSC_ERROR_EV 15
+#define FSE_BZ_RX_DSC_ERROR_EV 14
+#define FSE_AA_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AB_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+/* EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_CZ_EV_CODE_MCDI_EV 12
+#define FSE_CZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+
+/* GLOBAL_EV */
+#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
+#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+/* LEGACY_INT_VEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+/* MC_XGMAC_FLTR_RULE_DEF */
+#define FSF_CZ_MC_XFRC_MODE_LBN 416
+#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
+#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
+#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
+#define FSF_CZ_MC_XFRC_HASH_LBN 384
+#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+/* RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
+#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+/* RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+
+/* RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+/* TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+/* TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+
+/* TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+/* USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon B0 PCIe core indirect registers
+ *
+ **************************************************************************
+ */
+
+#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
+
+#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
+
+#define FPCR_BB_ACK_RPL_TIMER 0x700
+#define FPCRF_BB_ACK_TL_LBN 0
+#define FPCRF_BB_ACK_TL_WIDTH 16
+#define FPCRF_BB_RPL_TL_LBN 16
+#define FPCRF_BB_RPL_TL_WIDTH 16
+
+#define FPCR_BB_ACK_FREQ 0x70C
+#define FPCRF_BB_ACK_FREQ_LBN 0
+#define FPCRF_BB_ACK_FREQ_WIDTH 7
+
+/**************************************************************************
+ *
+ * Pseudo-registers and fields
+ *
+ **************************************************************************
+ */
+
+/* Interrupt acknowledge work-around register (A0/A1 only) */
+#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+/* Values for the EE_SPI_HCMD_SF_SEL register field */
+#define FFE_AB_SPI_DEVICE_EEPROM 0
+#define FFE_AB_SPI_DEVICE_FLASH 1
+
+/* NIC_STAT_REG: NIC status register */
+#define FRF_AB_STRAP_10G_LBN 2
+#define FRF_AB_STRAP_10G_WIDTH 1
+#define FRF_AA_STRAP_PCIE_LBN 0
+#define FRF_AA_STRAP_PCIE_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FRF_AZ_FATAL_INTR_LBN 0
+#define FRF_AZ_FATAL_INTR_WIDTH 12
+
+/* SRM_CFG_REG: SRAM configuration register */
+/* We treat the number of SRAM banks and bank size as a single field */
+#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
+#define FRF_AZ_SRM_NB_SZ_WIDTH \
+ (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
+#define FFE_AB_SRM_NB1_SZ2M 0
+#define FFE_AB_SRM_NB1_SZ4M 1
+#define FFE_AB_SRM_NB1_SZ8M 2
+#define FFE_AB_SRM_NB_SZ_DEF 3
+#define FFE_AB_SRM_NB2_SZ4M 4
+#define FFE_AB_SRM_NB2_SZ8M 5
+#define FFE_AB_SRM_NB2_SZ16M 6
+#define FFE_AB_SRM_NB_SZ_RES 7
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+/* We write just the last dword of these registers */
+#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
+ FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
+ FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+/* Default values */
+#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
+#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
+#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+/* XGXS all-lanes status fields */
+#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
+#define FRF_AB_XX_SYNC_STAT_WIDTH 4
+#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
+#define FRF_AB_XX_COMMA_DET_WIDTH 4
+#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
+#define FRF_AB_XX_CHAR_ERR_WIDTH 4
+#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
+#define FRF_AB_XX_DISPERR_WIDTH 4
+#define FFE_AB_XX_STAT_ALL_LANES 0xf
+#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
+#define FRF_AB_XX_FORCE_SIG_WIDTH 8
+#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+
+/* RX_MAC_FILTER_TBL0 */
+/* RMFT_DEST_MAC is wider than 32 bits */
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
+#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
+
+/* TX_MAC_FILTER_TBL0 */
+/* TMFT_SRC_MAC is wider than 32 bits */
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
+#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
+
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+/* EVENT_ENTRY */
+/* Magic number field for event test */
+#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
+#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
+
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
+
+#endif /* EF4_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/falcon/filter.h b/drivers/net/ethernet/sfc/falcon/filter.h
new file mode 100644
index 000000000000..647f6b2725c5
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/filter.h
@@ -0,0 +1,272 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_FILTER_H
+#define EF4_FILTER_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/**
+ * enum ef4_filter_match_flags - Flags for hardware filter match type
+ * @EF4_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EF4_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EF4_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EF4_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EF4_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EF4_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EF4_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EF4_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EF4_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EF4_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EF4_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * Used for RX default unicast and multicast/broadcast filters.
+ *
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ * local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ * or local 2-tuple, or local MAC with or without outer VID, and RX
+ * default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ * with or without outer and inner VID
+ */
+enum ef4_filter_match_flags {
+ EF4_FILTER_MATCH_REM_HOST = 0x0001,
+ EF4_FILTER_MATCH_LOC_HOST = 0x0002,
+ EF4_FILTER_MATCH_REM_MAC = 0x0004,
+ EF4_FILTER_MATCH_REM_PORT = 0x0008,
+ EF4_FILTER_MATCH_LOC_MAC = 0x0010,
+ EF4_FILTER_MATCH_LOC_PORT = 0x0020,
+ EF4_FILTER_MATCH_ETHER_TYPE = 0x0040,
+ EF4_FILTER_MATCH_INNER_VID = 0x0080,
+ EF4_FILTER_MATCH_OUTER_VID = 0x0100,
+ EF4_FILTER_MATCH_IP_PROTO = 0x0200,
+ EF4_FILTER_MATCH_LOC_MAC_IG = 0x0400,
+};
+
+/**
+ * enum ef4_filter_priority - priority of a hardware filter specification
+ * @EF4_FILTER_PRI_HINT: Performance hint
+ * @EF4_FILTER_PRI_AUTO: Automatic filter based on device address list
+ * or hardware requirements. This may only be used by the filter
+ * implementation for each NIC type.
+ * @EF4_FILTER_PRI_MANUAL: Manually configured filter
+ * @EF4_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
+ * networking and SR-IOV)
+ */
+enum ef4_filter_priority {
+ EF4_FILTER_PRI_HINT = 0,
+ EF4_FILTER_PRI_AUTO,
+ EF4_FILTER_PRI_MANUAL,
+ EF4_FILTER_PRI_REQUIRED,
+};
+
+/**
+ * enum ef4_filter_flags - flags for hardware filter specifications
+ * @EF4_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
+ * By default, matching packets will be delivered only to the
+ * specified queue. If this flag is set, they will be delivered
+ * to a range of queues offset from the specified queue number
+ * according to the indirection table.
+ * @EF4_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
+ * queue.
+ * @EF4_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ * overriding an automatic filter (priority
+ * %EF4_FILTER_PRI_AUTO). This may only be set by the filter
+ * implementation for each type. A removal request will restore
+ * the automatic filter in its place.
+ * @EF4_FILTER_FLAG_RX: Filter is for RX
+ * @EF4_FILTER_FLAG_TX: Filter is for TX
+ */
+enum ef4_filter_flags {
+ EF4_FILTER_FLAG_RX_RSS = 0x01,
+ EF4_FILTER_FLAG_RX_SCATTER = 0x02,
+ EF4_FILTER_FLAG_RX_OVER_AUTO = 0x04,
+ EF4_FILTER_FLAG_RX = 0x08,
+ EF4_FILTER_FLAG_TX = 0x10,
+};
+
+/**
+ * struct ef4_filter_spec - specification for a hardware filter
+ * @match_flags: Match type flags, from &enum ef4_filter_match_flags
+ * @priority: Priority of the filter, from &enum ef4_filter_priority
+ * @flags: Miscellaneous flags, from &enum ef4_filter_flags
+ * @rss_context: RSS context to use, if %EF4_FILTER_FLAG_RX_RSS is set
+ * @dmaq_id: Source/target queue index, or %EF4_FILTER_RX_DMAQ_ID_DROP for
+ * an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EF4_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EF4_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EF4_FILTER_MATCH_LOC_MAC or
+ * %EF4_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EF4_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EF4_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EF4_FILTER_MATCH_IP_PROTO
+ * is set
+ * @loc_host: Local IP host to match, if %EF4_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EF4_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EF4_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EF4_FILTER_MATCH_REM_PORT is set
+ *
+ * The ef4_filter_init_rx() or ef4_filter_init_tx() function *must* be
+ * used to initialise the structure. The ef4_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one. The hardware priority of a filter
+ * depends on which fields are matched.
+ */
+struct ef4_filter_spec {
+ u32 match_flags:12;
+ u32 priority:2;
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ __be16 ether_type;
+ u8 ip_proto;
+ __be32 loc_host[4];
+ __be32 rem_host[4];
+ __be16 loc_port;
+ __be16 rem_port;
+ /* total 64 bytes */
+};
+
+enum {
+ EF4_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
+ EF4_FILTER_RX_DMAQ_ID_DROP = 0xfff
+};
+
+static inline void ef4_filter_init_rx(struct ef4_filter_spec *spec,
+ enum ef4_filter_priority priority,
+ enum ef4_filter_flags flags,
+ unsigned rxq_id)
+{
+ memset(spec, 0, sizeof(*spec));
+ spec->priority = priority;
+ spec->flags = EF4_FILTER_FLAG_RX | flags;
+ spec->rss_context = EF4_FILTER_RSS_CONTEXT_DEFAULT;
+ spec->dmaq_id = rxq_id;
+}
+
+static inline void ef4_filter_init_tx(struct ef4_filter_spec *spec,
+ unsigned txq_id)
+{
+ memset(spec, 0, sizeof(*spec));
+ spec->priority = EF4_FILTER_PRI_REQUIRED;
+ spec->flags = EF4_FILTER_FLAG_TX;
+ spec->dmaq_id = txq_id;
+}
+
+/**
+ * ef4_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+ef4_filter_set_ipv4_local(struct ef4_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ spec->match_flags |=
+ EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = host;
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
+ * ef4_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+ef4_filter_set_ipv4_full(struct ef4_filter_spec *spec, u8 proto,
+ __be32 lhost, __be16 lport,
+ __be32 rhost, __be16 rport)
+{
+ spec->match_flags |=
+ EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
+ EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = lhost;
+ spec->loc_port = lport;
+ spec->rem_host[0] = rhost;
+ spec->rem_port = rport;
+ return 0;
+}
+
+enum {
+ EF4_FILTER_VID_UNSPEC = 0xffff,
+};
+
+/**
+ * ef4_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EF4_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int ef4_filter_set_eth_local(struct ef4_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ if (vid == EF4_FILTER_VID_UNSPEC && addr == NULL)
+ return -EINVAL;
+
+ if (vid != EF4_FILTER_VID_UNSPEC) {
+ spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
+ spec->outer_vid = htons(vid);
+ }
+ if (addr != NULL) {
+ spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC;
+ ether_addr_copy(spec->loc_mac, addr);
+ }
+ return 0;
+}
+
+/**
+ * ef4_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int ef4_filter_set_uc_def(struct ef4_filter_spec *spec)
+{
+ spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
+ return 0;
+}
+
+/**
+ * ef4_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int ef4_filter_set_mc_def(struct ef4_filter_spec *spec)
+{
+ spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
+ spec->loc_mac[0] = 1;
+ return 0;
+}
+
+#endif /* EF4_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/falcon/io.h b/drivers/net/ethernet/sfc/falcon/io.h
new file mode 100644
index 000000000000..7085ee1d5e2b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/io.h
@@ -0,0 +1,290 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_IO_H
+#define EF4_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy for the Falcon architecture:
+ *
+ * Many CSRs are very wide and cannot be read or written atomically.
+ * Writes from the host are buffered by the Bus Interface Unit (BIU)
+ * up to 128 bits. Whenever the host writes part of such a register,
+ * the BIU collects the written value and does not write to the
+ * underlying register until all 4 dwords have been written. A
+ * similar buffering scheme applies to host access to the NIC's 64-bit
+ * SRAM.
+ *
+ * Writes to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes. We use
+ * ef4_nic::biu_lock for this.
+ *
+ * We also serialise reads from 128-bit CSRs and SRAM with the same
+ * spinlock. This may not be necessary, but it doesn't really matter
+ * as there are no such reads on the fast path.
+ *
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
+ *
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ * replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ * (i.e. the high 32 bits) the underlying register will always be
+ * written. If the collector and the current write together do not
+ * provide values for all 128 bits of the register, the low 96 bits
+ * will be written as zero.
+ * - If the host writes to the address of any other part of such a
+ * register while the collector already holds values for some other
+ * register, the write is discarded and the collector maintains its
+ * current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
+ */
+
+#if BITS_PER_LONG == 64
+#define EF4_USE_QWORD_IO 1
+#endif
+
+#ifdef EF4_USE_QWORD_IO
+static inline void _ef4_writeq(struct ef4_nic *efx, __le64 value,
+ unsigned int reg)
+{
+ __raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _ef4_readq(struct ef4_nic *efx, unsigned int reg)
+{
+ return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _ef4_writed(struct ef4_nic *efx, __le32 value,
+ unsigned int reg)
+{
+ __raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _ef4_readd(struct ef4_nic *efx, unsigned int reg)
+{
+ return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Write a normal 128-bit CSR, locking as appropriate. */
+static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EF4_OWORD_FMT "\n", reg,
+ EF4_OWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EF4_USE_QWORD_IO
+ _ef4_writeq(efx, value->u64[0], reg + 0);
+ _ef4_writeq(efx, value->u64[1], reg + 8);
+#else
+ _ef4_writed(efx, value->u32[0], reg + 0);
+ _ef4_writed(efx, value->u32[1], reg + 4);
+ _ef4_writed(efx, value->u32[2], reg + 8);
+ _ef4_writed(efx, value->u32[3], reg + 12);
+#endif
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase,
+ const ef4_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing SRAM address %x with " EF4_QWORD_FMT "\n",
+ addr, EF4_QWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EF4_USE_QWORD_IO
+ __raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+ __raw_writel((__force u32)value->u32[0], membase + addr);
+ __raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
+static inline void ef4_writed(struct ef4_nic *efx, const ef4_dword_t *value,
+ unsigned int reg)
+{
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with "EF4_DWORD_FMT"\n",
+ reg, EF4_DWORD_VAL(*value));
+
+ /* No lock required */
+ _ef4_writed(efx, value->u32[0], reg);
+}
+
+/* Read a 128-bit CSR, locking as appropriate. */
+static inline void ef4_reado(struct ef4_nic *efx, ef4_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ value->u32[0] = _ef4_readd(efx, reg + 0);
+ value->u32[1] = _ef4_readd(efx, reg + 4);
+ value->u32[2] = _ef4_readd(efx, reg + 8);
+ value->u32[3] = _ef4_readd(efx, reg + 12);
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got " EF4_OWORD_FMT "\n", reg,
+ EF4_OWORD_VAL(*value));
+}
+
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void ef4_sram_readq(struct ef4_nic *efx, void __iomem *membase,
+ ef4_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EF4_USE_QWORD_IO
+ value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+ value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from SRAM address %x, got "EF4_QWORD_FMT"\n",
+ addr, EF4_QWORD_VAL(*value));
+}
+
+/* Read a 32-bit CSR or SRAM */
+static inline void ef4_readd(struct ef4_nic *efx, ef4_dword_t *value,
+ unsigned int reg)
+{
+ value->u32[0] = _ef4_readd(efx, reg);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got "EF4_DWORD_FMT"\n",
+ reg, EF4_DWORD_VAL(*value));
+}
+
+/* Write a 128-bit CSR forming part of a table */
+static inline void
+ef4_writeo_table(struct ef4_nic *efx, const ef4_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ ef4_writeo(efx, value, reg + index * sizeof(ef4_oword_t));
+}
+
+/* Read a 128-bit CSR forming part of a table */
+static inline void ef4_reado_table(struct ef4_nic *efx, ef4_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ ef4_reado(efx, value, reg + index * sizeof(ef4_oword_t));
+}
+
+/* Page size used as step between per-VI registers */
+#define EF4_VI_PAGE_SIZE 0x2000
+
+/* Calculate offset to page-mapped register */
+#define EF4_PAGED_REG(page, reg) \
+ ((page) * EF4_VI_PAGE_SIZE + (reg))
+
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _ef4_writeo_page(struct ef4_nic *efx, ef4_oword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ reg = EF4_PAGED_REG(page, reg);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EF4_OWORD_FMT "\n", reg,
+ EF4_OWORD_VAL(*value));
+
+#ifdef EF4_USE_QWORD_IO
+ _ef4_writeq(efx, value->u64[0], reg + 0);
+ _ef4_writeq(efx, value->u64[1], reg + 8);
+#else
+ _ef4_writed(efx, value->u32[0], reg + 0);
+ _ef4_writed(efx, value->u32[1], reg + 4);
+ _ef4_writed(efx, value->u32[2], reg + 8);
+ _ef4_writed(efx, value->u32[3], reg + 12);
+#endif
+}
+#define ef4_writeo_page(efx, value, reg, page) \
+ _ef4_writeo_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+ page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void
+_ef4_writed_page(struct ef4_nic *efx, const ef4_dword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
+}
+#define ef4_writed_page(efx, value, reg, page) \
+ _ef4_writed_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ (reg) != 0x420 && \
+ (reg) != 0x830 && \
+ (reg) != 0x83c && \
+ (reg) != 0xa18 && \
+ (reg) != 0xa1c), \
+ page)
+
+/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _ef4_writed_page_locked(struct ef4_nic *efx,
+ const ef4_dword_t *value,
+ unsigned int reg,
+ unsigned int page)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ if (page == 0) {
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+ } else {
+ ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
+ }
+}
+#define ef4_writed_page_locked(efx, value, reg, page) \
+ _ef4_writed_page_locked(efx, value, \
+ reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+ page)
+
+#endif /* EF4_IO_H */
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/falcon/mdio_10g.c
index 8ff954c59efa..e7d7c09296aa 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.c
@@ -16,7 +16,7 @@
#include "mdio_10g.h"
#include "workarounds.h"
-unsigned efx_mdio_id_oui(u32 id)
+unsigned ef4_mdio_id_oui(u32 id)
{
unsigned oui = 0;
int i;
@@ -31,19 +31,19 @@ unsigned efx_mdio_id_oui(u32 id)
return oui;
}
-int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
+int ef4_mdio_reset_mmd(struct ef4_nic *port, int mmd,
int spins, int spintime)
{
u32 ctrl;
/* Catch callers passing values in the wrong units (or just silly) */
- EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
+ EF4_BUG_ON_PARANOID(spins * spintime >= 5000);
- efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
+ ef4_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
/* Wait for the reset bit to clear. */
do {
msleep(spintime);
- ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1);
+ ctrl = ef4_mdio_read(port, mmd, MDIO_CTRL1);
spins--;
} while (spins && (ctrl & MDIO_CTRL1_RESET));
@@ -51,13 +51,13 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
return spins ? spins : -ETIMEDOUT;
}
-static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
+static int ef4_mdio_check_mmd(struct ef4_nic *efx, int mmd)
{
int status;
if (mmd != MDIO_MMD_AN) {
/* Read MMD STATUS2 to check it is responding. */
- status = efx_mdio_read(efx, mmd, MDIO_STAT2);
+ status = ef4_mdio_read(efx, mmd, MDIO_STAT2);
if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
netif_err(efx, hw, efx->net_dev,
"PHY MMD %d not responding.\n", mmd);
@@ -72,7 +72,7 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
#define MDIO45_RESET_TIME 1000 /* ms */
#define MDIO45_RESET_ITERS 100
-int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
+int ef4_mdio_wait_reset_mmds(struct ef4_nic *efx, unsigned int mmd_mask)
{
const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
int tries = MDIO45_RESET_ITERS;
@@ -86,7 +86,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
in_reset = 0;
while (mask) {
if (mask & 1) {
- stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
+ stat = ef4_mdio_read(efx, mmd, MDIO_CTRL1);
if (stat < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read status of"
@@ -113,7 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
return rc;
}
-int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
+int ef4_mdio_check_mmds(struct ef4_nic *efx, unsigned int mmd_mask)
{
int mmd = 0, probe_mmd, devs1, devs2;
u32 devices;
@@ -125,8 +125,8 @@ int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
__ffs(mmd_mask);
/* Check all the expected MMDs are present */
- devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
- devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
+ devs1 = ef4_mdio_read(efx, probe_mmd, MDIO_DEVS1);
+ devs2 = ef4_mdio_read(efx, probe_mmd, MDIO_DEVS2);
if (devs1 < 0 || devs2 < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read devices present\n");
@@ -143,7 +143,7 @@ int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
- if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
+ if ((mmd_mask & 1) && ef4_mdio_check_mmd(efx, mmd))
return -EIO;
mmd_mask = mmd_mask >> 1;
mmd++;
@@ -152,7 +152,7 @@ int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
return 0;
}
-bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
+bool ef4_mdio_links_ok(struct ef4_nic *efx, unsigned int mmd_mask)
{
/* If the port is in loopback, then we should only consider a subset
* of mmd's */
@@ -160,7 +160,7 @@ bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return true;
else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
return false;
- else if (efx_phy_mode_disabled(efx->phy_mode))
+ else if (ef4_phy_mode_disabled(efx->phy_mode))
return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_DEVS_PHYXS |
@@ -178,59 +178,59 @@ bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return mdio45_links_ok(&efx->mdio, mmd_mask);
}
-void efx_mdio_transmit_disable(struct efx_nic *efx)
+void ef4_mdio_transmit_disable(struct ef4_nic *efx)
{
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
efx->phy_mode & PHY_MODE_TX_DISABLED);
}
-void efx_mdio_phy_reconfigure(struct efx_nic *efx)
+void ef4_mdio_phy_reconfigure(struct ef4_nic *efx)
{
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PMAPMD);
- efx_mdio_set_flag(efx, MDIO_MMD_PCS,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PCS,
MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PCS);
- efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS,
MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PHYXS_WS);
}
-static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
+static void ef4_mdio_set_mmd_lpower(struct ef4_nic *efx,
int lpower, int mmd)
{
- int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
+ int stat = ef4_mdio_read(efx, mmd, MDIO_STAT1);
netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
mmd, lpower);
if (stat & MDIO_STAT1_LPOWERABLE) {
- efx_mdio_set_flag(efx, mmd, MDIO_CTRL1,
+ ef4_mdio_set_flag(efx, mmd, MDIO_CTRL1,
MDIO_CTRL1_LPOWER, lpower);
}
}
-void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
+void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx,
int low_power, unsigned int mmd_mask)
{
int mmd = 0;
mmd_mask &= ~MDIO_DEVS_AN;
while (mmd_mask) {
if (mmd_mask & 1)
- efx_mdio_set_mmd_lpower(efx, low_power, mmd);
+ ef4_mdio_set_mmd_lpower(efx, low_power, mmd);
mmd_mask = (mmd_mask >> 1);
mmd++;
}
}
/**
- * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
+ * ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO.
* @efx: Efx NIC
* @ecmd: New settings
*/
-int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
@@ -252,16 +252,16 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
(ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
return -EINVAL;
- efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
- efx_mdio_an_reconfigure(efx);
+ ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
+ ef4_mdio_an_reconfigure(efx);
return 0;
}
/**
- * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
+ * ef4_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
* @efx: Efx NIC
*/
-void efx_mdio_an_reconfigure(struct efx_nic *efx)
+void ef4_mdio_an_reconfigure(struct ef4_nic *efx)
{
int reg;
@@ -273,32 +273,32 @@ void efx_mdio_an_reconfigure(struct efx_nic *efx)
reg |= ADVERTISE_PAUSE_CAP;
if (efx->link_advertising & ADVERTISED_Asym_Pause)
reg |= ADVERTISE_PAUSE_ASYM;
- efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+ ef4_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
/* Set up the (extended) next page */
efx->phy_op->set_npage_adv(efx, efx->link_advertising);
/* Enable and restart AN */
- reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
+ reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
- efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
+ ef4_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
-u8 efx_mdio_get_pause(struct efx_nic *efx)
+u8 ef4_mdio_get_pause(struct ef4_nic *efx)
{
- BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
+ BUILD_BUG_ON(EF4_FC_AUTO & (EF4_FC_RX | EF4_FC_TX));
- if (!(efx->wanted_fc & EFX_FC_AUTO))
+ if (!(efx->wanted_fc & EF4_FC_AUTO))
return efx->wanted_fc;
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
return mii_resolve_flowctrl_fdx(
mii_advertise_flowctrl(efx->wanted_fc),
- efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
+ ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
-int efx_mdio_test_alive(struct efx_nic *efx)
+int ef4_mdio_test_alive(struct ef4_nic *efx)
{
int rc;
int devad = __ffs(efx->mdio.mmds);
@@ -306,8 +306,8 @@ int efx_mdio_test_alive(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
- physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
- physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
+ physid1 = ef4_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = ef4_mdio_read(efx, devad, MDIO_DEVID2);
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
@@ -315,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
"no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
- rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
+ rc = ef4_mdio_check_mmds(efx, efx->mdio.mmds);
}
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/falcon/mdio_10g.h
index 4a2dc4c281b7..885cf7a834a6 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.h
@@ -7,8 +7,8 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
-#ifndef EFX_MDIO_10G_H
-#define EFX_MDIO_10G_H
+#ifndef EF4_MDIO_10G_H
+#define EF4_MDIO_10G_H
#include <linux/mdio.h>
@@ -18,35 +18,35 @@
#include "efx.h"
-static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
-static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-unsigned efx_mdio_id_oui(u32 id);
+static inline unsigned ef4_mdio_id_rev(u32 id) { return id & 0xf; }
+static inline unsigned ef4_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
+unsigned ef4_mdio_id_oui(u32 id);
-static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
+static inline int ef4_mdio_read(struct ef4_nic *efx, int devad, int addr)
{
return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
}
static inline void
-efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value)
+ef4_mdio_write(struct ef4_nic *efx, int devad, int addr, int value)
{
efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
}
-static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd)
+static inline u32 ef4_mdio_read_id(struct ef4_nic *efx, int mmd)
{
- u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2);
- u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1);
+ u16 id_low = ef4_mdio_read(efx, mmd, MDIO_DEVID2);
+ u16 id_hi = ef4_mdio_read(efx, mmd, MDIO_DEVID1);
return (id_hi << 16) | (id_low);
}
-static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
+static inline bool ef4_mdio_phyxgxs_lane_sync(struct ef4_nic *efx)
{
int i, lane_status;
bool sync;
for (i = 0; i < 2; ++i)
- lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS,
+ lane_status = ef4_mdio_read(efx, MDIO_MMD_PHYXS,
MDIO_PHYXS_LNSTAT);
sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
return sync;
}
-const char *efx_mdio_mmd_name(int mmd);
+const char *ef4_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
@@ -64,47 +64,47 @@ const char *efx_mdio_mmd_name(int mmd);
*
* This function will sleep
*/
-int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
+int ef4_mdio_reset_mmd(struct ef4_nic *efx, int mmd, int spins, int spintime);
-/* As efx_mdio_check_mmd but for multiple MMDs */
-int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
+/* As ef4_mdio_check_mmd but for multiple MMDs */
+int ef4_mdio_check_mmds(struct ef4_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
-bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool ef4_mdio_links_ok(struct ef4_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
-void efx_mdio_transmit_disable(struct efx_nic *efx);
+void ef4_mdio_transmit_disable(struct ef4_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
-void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void ef4_mdio_phy_reconfigure(struct ef4_nic *efx);
/* Set the power state of the specified MMDs */
-void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power,
unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
-int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
/* Push advertising flags and restart autonegotiation */
-void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void ef4_mdio_an_reconfigure(struct ef4_nic *efx);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
*/
-u8 efx_mdio_get_pause(struct efx_nic *efx);
+u8 ef4_mdio_get_pause(struct ef4_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
-int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
+int ef4_mdio_wait_reset_mmds(struct ef4_nic *efx, unsigned int mmd_mask);
/* Set or clear flag, debouncing */
static inline void
-efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
+ef4_mdio_set_flag(struct ef4_nic *efx, int devad, int addr,
int mask, bool state)
{
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
/* Liveness self-test for MDIO PHYs */
-int efx_mdio_test_alive(struct efx_nic *efx);
+int ef4_mdio_test_alive(struct ef4_nic *efx);
-#endif /* EFX_MDIO_10G_H */
+#endif /* EF4_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/falcon/mtd.c b/drivers/net/ethernet/sfc/falcon/mtd.c
new file mode 100644
index 000000000000..cde593cb1052
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/mtd.c
@@ -0,0 +1,133 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "efx.h"
+
+#define to_ef4_mtd_partition(mtd) \
+ container_of(mtd, struct ef4_mtd_partition, mtd)
+
+/* MTD interface */
+
+static int ef4_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct ef4_nic *efx = mtd->priv;
+ int rc;
+
+ rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
+ if (rc == 0) {
+ erase->state = MTD_ERASE_DONE;
+ } else {
+ erase->state = MTD_ERASE_FAILED;
+ erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ }
+ mtd_erase_callback(erase);
+ return rc;
+}
+
+static void ef4_mtd_sync(struct mtd_info *mtd)
+{
+ struct ef4_mtd_partition *part = to_ef4_mtd_partition(mtd);
+ struct ef4_nic *efx = mtd->priv;
+ int rc;
+
+ rc = efx->type->mtd_sync(mtd);
+ if (rc)
+ pr_err("%s: %s sync failed (%d)\n",
+ part->name, part->dev_type_name, rc);
+}
+
+static void ef4_mtd_remove_partition(struct ef4_mtd_partition *part)
+{
+ int rc;
+
+ for (;;) {
+ rc = mtd_device_unregister(&part->mtd);
+ if (rc != -EBUSY)
+ break;
+ ssleep(1);
+ }
+ WARN_ON(rc);
+ list_del(&part->node);
+}
+
+int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
+{
+ struct ef4_mtd_partition *part;
+ size_t i;
+
+ for (i = 0; i < n_parts; i++) {
+ part = (struct ef4_mtd_partition *)((char *)parts +
+ i * sizeof_part);
+
+ part->mtd.writesize = 1;
+
+ part->mtd.owner = THIS_MODULE;
+ part->mtd.priv = efx;
+ part->mtd.name = part->name;
+ part->mtd._erase = ef4_mtd_erase;
+ part->mtd._read = efx->type->mtd_read;
+ part->mtd._write = efx->type->mtd_write;
+ part->mtd._sync = ef4_mtd_sync;
+
+ efx->type->mtd_rename(part);
+
+ if (mtd_device_register(&part->mtd, NULL, 0))
+ goto fail;
+
+ /* Add to list in order - ef4_mtd_remove() depends on this */
+ list_add_tail(&part->node, &efx->mtd_list);
+ }
+
+ return 0;
+
+fail:
+ while (i--) {
+ part = (struct ef4_mtd_partition *)((char *)parts +
+ i * sizeof_part);
+ ef4_mtd_remove_partition(part);
+ }
+ /* Failure is unlikely here, but probably means we're out of memory */
+ return -ENOMEM;
+}
+
+void ef4_mtd_remove(struct ef4_nic *efx)
+{
+ struct ef4_mtd_partition *parts, *part, *next;
+
+ WARN_ON(ef4_dev_registered(efx));
+
+ if (list_empty(&efx->mtd_list))
+ return;
+
+ parts = list_first_entry(&efx->mtd_list, struct ef4_mtd_partition,
+ node);
+
+ list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+ ef4_mtd_remove_partition(part);
+
+ kfree(parts);
+}
+
+void ef4_mtd_rename(struct ef4_nic *efx)
+{
+ struct ef4_mtd_partition *part;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(part, &efx->mtd_list, node)
+ efx->type->mtd_rename(part);
+}
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
new file mode 100644
index 000000000000..210b28f7d2a1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -0,0 +1,1464 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Common definitions for all Efx net driver code */
+
+#ifndef EF4_NET_DRIVER_H
+#define EF4_NET_DRIVER_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/mdio.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/vmalloc.h>
+#include <linux/i2c.h>
+#include <linux/mtd/mtd.h>
+#include <net/busy_poll.h>
+
+#include "enum.h"
+#include "bitfield.h"
+#include "filter.h"
+
+/**************************************************************************
+ *
+ * Build definitions
+ *
+ **************************************************************************/
+
+#define EF4_DRIVER_VERSION "4.1"
+
+#ifdef DEBUG
+#define EF4_BUG_ON_PARANOID(x) BUG_ON(x)
+#define EF4_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define EF4_BUG_ON_PARANOID(x) do {} while (0)
+#define EF4_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+/**************************************************************************
+ *
+ * Efx data structures
+ *
+ **************************************************************************/
+
+#define EF4_MAX_CHANNELS 32U
+#define EF4_MAX_RX_QUEUES EF4_MAX_CHANNELS
+#define EF4_EXTRA_CHANNEL_IOV 0
+#define EF4_EXTRA_CHANNEL_PTP 1
+#define EF4_MAX_EXTRA_CHANNELS 2U
+
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EF4_MAX_TX_TC 2
+#define EF4_MAX_CORE_TX_QUEUES (EF4_MAX_TX_TC * EF4_MAX_CHANNELS)
+#define EF4_TXQ_TYPE_OFFLOAD 1 /* flag */
+#define EF4_TXQ_TYPE_HIGHPRI 2 /* flag */
+#define EF4_TXQ_TYPES 4
+#define EF4_MAX_TX_QUEUES (EF4_TXQ_TYPES * EF4_MAX_CHANNELS)
+
+/* Maximum possible MTU the driver supports */
+#define EF4_MAX_MTU (9 * 1024)
+
+/* Minimum MTU, from RFC791 (IP) */
+#define EF4_MIN_MTU 68
+
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EF4_RX_USR_BUF_SIZE (2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer. Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EF4_RX_BUF_ALIGNMENT L1_CACHE_BYTES
+#else
+#define EF4_RX_BUF_ALIGNMENT 4
+#endif
+
+struct ef4_self_tests;
+
+/**
+ * struct ef4_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ *
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
+ */
+struct ef4_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ unsigned int len;
+};
+
+/**
+ * struct ef4_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct ef4_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EF4_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that). On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves. On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct ef4_special_buffer {
+ struct ef4_buffer buf;
+ unsigned int index;
+ unsigned int entries;
+};
+
+/**
+ * struct ef4_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EF4_TX_BUF_SKB, the associated socket buffer to be
+ * freed when descriptor completes
+ * @option: When @flags & %EF4_TX_BUF_OPTION, a NIC-specific option descriptor.
+ * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
+ * @len: Length of this fragment.
+ * This field is zero when the queue slot is empty.
+ * @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
+ */
+struct ef4_tx_buffer {
+ const struct sk_buff *skb;
+ union {
+ ef4_qword_t option;
+ dma_addr_t dma_addr;
+ };
+ unsigned short flags;
+ unsigned short len;
+ unsigned short unmap_len;
+ unsigned short dma_offset;
+};
+#define EF4_TX_BUF_CONT 1 /* not last descriptor of packet */
+#define EF4_TX_BUF_SKB 2 /* buffer is last part of skb */
+#define EF4_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
+#define EF4_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
+
+/**
+ * struct ef4_tx_queue - An Efx TX queue
+ *
+ * This is a ring buffer of TX fragments.
+ * Since the TX completion path always executes on the same
+ * CPU and the xmit path can operate on different CPUs,
+ * performance is increased by ensuring that the completion
+ * path and the xmit path operate on different cache lines.
+ * This is particularly important if the xmit path is always
+ * executing on one CPU which is different from the completion
+ * path. There is also a cache line for members which are
+ * read but not written on the fast path.
+ *
+ * @efx: The associated Efx NIC
+ * @queue: DMA queue number
+ * @channel: The associated channel
+ * @core_txq: The networking core TX queue structure
+ * @buffer: The software buffer ring
+ * @cb_page: Array of pages of copy buffers. Carved up according to
+ * %EF4_TX_CB_ORDER into %EF4_TX_CB_SIZE-sized chunks.
+ * @txd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
+ * @tx_min_size: Minimum transmit size for this queue. Depends on HW.
+ * @read_count: Current read pointer.
+ * This is the number of buffers that have been removed from both rings.
+ * @old_write_count: The value of @write_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of @write_count if this
+ * variable indicates that the queue is empty. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @merge_events: Number of TX merged completion events
+ * @insert_count: Current insert pointer
+ * This is the number of buffers that have been added to the
+ * software ring.
+ * @write_count: Current write pointer
+ * This is the number of buffers that have been added to the
+ * hardware ring.
+ * @old_read_count: The value of read_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of read_count if this
+ * variable indicates that the queue is full. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @pushes: Number of times the TX push feature has been used
+ * @xmit_more_available: Are any packets waiting to be pushed to the NIC
+ * @cb_packets: Number of times the TX copybreak feature has been used
+ * @empty_read_count: If the completion path has seen the queue as empty
+ * and the transmission path has not yet checked this, the value of
+ * @read_count bitwise-added to %EF4_EMPTY_COUNT_VALID; otherwise 0.
+ */
+struct ef4_tx_queue {
+ /* Members which don't change on the fast path */
+ struct ef4_nic *efx ____cacheline_aligned_in_smp;
+ unsigned queue;
+ struct ef4_channel *channel;
+ struct netdev_queue *core_txq;
+ struct ef4_tx_buffer *buffer;
+ struct ef4_buffer *cb_page;
+ struct ef4_special_buffer txd;
+ unsigned int ptr_mask;
+ bool initialised;
+ unsigned int tx_min_size;
+
+ /* Function pointers used in the fast path. */
+ int (*handle_tso)(struct ef4_tx_queue*, struct sk_buff*, bool *);
+
+ /* Members used mainly on the completion path */
+ unsigned int read_count ____cacheline_aligned_in_smp;
+ unsigned int old_write_count;
+ unsigned int merge_events;
+ unsigned int bytes_compl;
+ unsigned int pkts_compl;
+
+ /* Members used only on the xmit path */
+ unsigned int insert_count ____cacheline_aligned_in_smp;
+ unsigned int write_count;
+ unsigned int old_read_count;
+ unsigned int pushes;
+ bool xmit_more_available;
+ unsigned int cb_packets;
+ /* Statistics to supplement MAC stats */
+ unsigned long tx_packets;
+
+ /* Members shared between paths and sometimes updated */
+ unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EF4_EMPTY_COUNT_VALID 0x80000000
+ atomic_t flush_outstanding;
+};
+
+#define EF4_TX_CB_ORDER 7
+#define EF4_TX_CB_SIZE (1 << EF4_TX_CB_ORDER) - NET_IP_ALIGN
+
+/**
+ * struct ef4_rx_buffer - An Efx RX data buffer
+ * @dma_addr: DMA base address of the buffer
+ * @page: The associated page buffer.
+ * Will be %NULL if the buffer slot is currently free.
+ * @page_offset: If pending: offset in @page of DMA base address.
+ * If completed: offset in @page of Ethernet header.
+ * @len: If pending: length for DMA descriptor.
+ * If completed: received length, excluding hash prefix.
+ * @flags: Flags for buffer and packet state. These are only set on the
+ * first buffer of a scattered packet.
+ */
+struct ef4_rx_buffer {
+ dma_addr_t dma_addr;
+ struct page *page;
+ u16 page_offset;
+ u16 len;
+ u16 flags;
+};
+#define EF4_RX_BUF_LAST_IN_PAGE 0x0001
+#define EF4_RX_PKT_CSUMMED 0x0002
+#define EF4_RX_PKT_DISCARD 0x0004
+#define EF4_RX_PKT_TCP 0x0040
+#define EF4_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
+
+/**
+ * struct ef4_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @dma_addr: The dma address of this page.
+ */
+struct ef4_rx_page_state {
+ dma_addr_t dma_addr;
+
+ unsigned int __pad[0] ____cacheline_aligned;
+};
+
+/**
+ * struct ef4_rx_queue - An Efx RX queue
+ * @efx: The associated Efx NIC
+ * @core_index: Index of network core RX queue. Will be >= 0 iff this
+ * is associated with a real RX queue.
+ * @buffer: The software buffer ring
+ * @rxd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @refill_enabled: Enable refill whenever fill level is low
+ * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
+ * @rxq_flush_pending.
+ * @added_count: Number of buffers added to the receive queue.
+ * @notified_count: Number of buffers given to NIC (<= @added_count).
+ * @removed_count: Number of buffers removed from the receive queue.
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
+ * @page_ring: The ring to store DMA mapped pages for reuse.
+ * @page_add: Counter to calculate the write pointer for the recycle ring.
+ * @page_remove: Counter to calculate the read pointer for the recycle ring.
+ * @page_recycle_count: The number of pages that have been recycled.
+ * @page_recycle_failed: The number of pages that couldn't be recycled because
+ * the kernel still held a reference to them.
+ * @page_recycle_full: The number of pages that were released because the
+ * recycle ring was full.
+ * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
+ * @max_fill: RX descriptor maximum fill level (<= ring size)
+ * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
+ * (<= @max_fill)
+ * @min_fill: RX descriptor minimum non-zero fill level.
+ * This records the minimum fill level observed when a ring
+ * refill was triggered.
+ * @recycle_count: RX buffer recycle counter.
+ * @slow_fill: Timer used to defer ef4_nic_generate_fill_event().
+ */
+struct ef4_rx_queue {
+ struct ef4_nic *efx;
+ int core_index;
+ struct ef4_rx_buffer *buffer;
+ struct ef4_special_buffer rxd;
+ unsigned int ptr_mask;
+ bool refill_enabled;
+ bool flush_pending;
+
+ unsigned int added_count;
+ unsigned int notified_count;
+ unsigned int removed_count;
+ unsigned int scatter_n;
+ unsigned int scatter_len;
+ struct page **page_ring;
+ unsigned int page_add;
+ unsigned int page_remove;
+ unsigned int page_recycle_count;
+ unsigned int page_recycle_failed;
+ unsigned int page_recycle_full;
+ unsigned int page_ptr_mask;
+ unsigned int max_fill;
+ unsigned int fast_fill_trigger;
+ unsigned int min_fill;
+ unsigned int min_overfill;
+ unsigned int recycle_count;
+ struct timer_list slow_fill;
+ unsigned int slow_fill_count;
+ /* Statistics to supplement MAC stats */
+ unsigned long rx_packets;
+};
+
+/**
+ * struct ef4_channel - An Efx channel
+ *
+ * A channel comprises an event queue, at least one TX queue, at least
+ * one RX queue, and an associated tasklet for processing the event
+ * queue.
+ *
+ * @efx: Associated Efx NIC
+ * @channel: Channel instance number
+ * @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
+ * @enabled: Channel enabled indicator
+ * @irq: IRQ number (MSI and MSI-X only)
+ * @irq_moderation_us: IRQ moderation value (in microseconds)
+ * @napi_dev: Net device used with NAPI
+ * @napi_str: NAPI control structure
+ * @state: state for NAPI vs busy polling
+ * @state_lock: lock protecting @state
+ * @eventq: Event queue buffer
+ * @eventq_mask: Event queue pointer mask
+ * @eventq_read_ptr: Event queue read pointer
+ * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
+ * @irq_count: Number of IRQs since last adaptive moderation decision
+ * @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
+ * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
+ * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
+ * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
+ * @n_rx_mcast_mismatch: Count of unmatched multicast frames
+ * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
+ * @n_rx_overlength: Count of RX_OVERLENGTH errors
+ * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
+ * lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
+ * __ef4_rx_packet(), or zero if there is none
+ * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
+ * by __ef4_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_queue: RX queue for this channel
+ * @tx_queue: TX queues for this channel
+ */
+struct ef4_channel {
+ struct ef4_nic *efx;
+ int channel;
+ const struct ef4_channel_type *type;
+ bool eventq_init;
+ bool enabled;
+ int irq;
+ unsigned int irq_moderation_us;
+ struct net_device *napi_dev;
+ struct napi_struct napi_str;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long busy_poll_state;
+#endif
+ struct ef4_special_buffer eventq;
+ unsigned int eventq_mask;
+ unsigned int eventq_read_ptr;
+ int event_test_cpu;
+
+ unsigned int irq_count;
+ unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
+#endif
+
+ unsigned n_rx_tobe_disc;
+ unsigned n_rx_ip_hdr_chksum_err;
+ unsigned n_rx_tcp_udp_chksum_err;
+ unsigned n_rx_mcast_mismatch;
+ unsigned n_rx_frm_trunc;
+ unsigned n_rx_overlength;
+ unsigned n_skbuff_leaks;
+ unsigned int n_rx_nodesc_trunc;
+ unsigned int n_rx_merge_events;
+ unsigned int n_rx_merge_packets;
+
+ unsigned int rx_pkt_n_frags;
+ unsigned int rx_pkt_index;
+
+ struct ef4_rx_queue rx_queue;
+ struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES];
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum ef4_channel_busy_poll_state {
+ EF4_CHANNEL_STATE_IDLE = 0,
+ EF4_CHANNEL_STATE_NAPI = BIT(0),
+ EF4_CHANNEL_STATE_NAPI_REQ_BIT = 1,
+ EF4_CHANNEL_STATE_NAPI_REQ = BIT(1),
+ EF4_CHANNEL_STATE_POLL_BIT = 2,
+ EF4_CHANNEL_STATE_POLL = BIT(2),
+ EF4_CHANNEL_STATE_DISABLE_BIT = 3,
+};
+
+static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel)
+{
+ WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE);
+}
+
+/* Called from the device poll routine to get ownership of a channel. */
+static inline bool ef4_channel_lock_napi(struct ef4_channel *channel)
+{
+ unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
+
+ while (1) {
+ switch (old) {
+ case EF4_CHANNEL_STATE_POLL:
+ /* Ensure ef4_channel_try_lock_poll() wont starve us */
+ set_bit(EF4_CHANNEL_STATE_NAPI_REQ_BIT,
+ &channel->busy_poll_state);
+ /* fallthrough */
+ case EF4_CHANNEL_STATE_POLL | EF4_CHANNEL_STATE_NAPI_REQ:
+ return false;
+ default:
+ break;
+ }
+ prev = cmpxchg(&channel->busy_poll_state, old,
+ EF4_CHANNEL_STATE_NAPI);
+ if (unlikely(prev != old)) {
+ /* This is likely to mean we've just entered polling
+ * state. Go back round to set the REQ bit.
+ */
+ old = prev;
+ continue;
+ }
+ return true;
+ }
+}
+
+static inline void ef4_channel_unlock_napi(struct ef4_channel *channel)
+{
+ /* Make sure write has completed from ef4_channel_lock_napi() */
+ smp_wmb();
+ WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE);
+}
+
+/* Called from ef4_busy_poll(). */
+static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel)
+{
+ return cmpxchg(&channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE,
+ EF4_CHANNEL_STATE_POLL) == EF4_CHANNEL_STATE_IDLE;
+}
+
+static inline void ef4_channel_unlock_poll(struct ef4_channel *channel)
+{
+ clear_bit_unlock(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
+}
+
+static inline bool ef4_channel_busy_polling(struct ef4_channel *channel)
+{
+ return test_bit(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
+}
+
+static inline void ef4_channel_enable(struct ef4_channel *channel)
+{
+ clear_bit_unlock(EF4_CHANNEL_STATE_DISABLE_BIT,
+ &channel->busy_poll_state);
+}
+
+/* Stop further polling or napi access.
+ * Returns false if the channel is currently busy polling.
+ */
+static inline bool ef4_channel_disable(struct ef4_channel *channel)
+{
+ set_bit(EF4_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
+ /* Implicit barrier in ef4_channel_busy_polling() */
+ return !ef4_channel_busy_polling(channel);
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel)
+{
+}
+
+static inline bool ef4_channel_lock_napi(struct ef4_channel *channel)
+{
+ return true;
+}
+
+static inline void ef4_channel_unlock_napi(struct ef4_channel *channel)
+{
+}
+
+static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel)
+{
+ return false;
+}
+
+static inline void ef4_channel_unlock_poll(struct ef4_channel *channel)
+{
+}
+
+static inline bool ef4_channel_busy_polling(struct ef4_channel *channel)
+{
+ return false;
+}
+
+static inline void ef4_channel_enable(struct ef4_channel *channel)
+{
+}
+
+static inline bool ef4_channel_disable(struct ef4_channel *channel)
+{
+ return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+/**
+ * struct ef4_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct ef4_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct ef4_msi_context {
+ struct ef4_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
+/**
+ * struct ef4_channel_type - distinguishes traffic and extra channels
+ * @handle_no_channel: Handle failure to allocate an extra channel
+ * @pre_probe: Set up extra state prior to initialisation
+ * @post_remove: Tear down extra state after finalisation, if allocated.
+ * May be called on channels that have not been probed.
+ * @get_name: Generate the channel's name (used for its IRQ handler)
+ * @copy: Copy the channel state prior to reallocation. May be %NULL if
+ * reallocation is not supported.
+ * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @keep_eventq: Flag for whether event queue should be kept initialised
+ * while the device is stopped
+ */
+struct ef4_channel_type {
+ void (*handle_no_channel)(struct ef4_nic *);
+ int (*pre_probe)(struct ef4_channel *);
+ void (*post_remove)(struct ef4_channel *);
+ void (*get_name)(struct ef4_channel *, char *buf, size_t len);
+ struct ef4_channel *(*copy)(const struct ef4_channel *);
+ bool (*receive_skb)(struct ef4_channel *, struct sk_buff *);
+ bool keep_eventq;
+};
+
+enum ef4_led_mode {
+ EF4_LED_OFF = 0,
+ EF4_LED_ON = 1,
+ EF4_LED_DEFAULT = 2
+};
+
+#define STRING_TABLE_LOOKUP(val, member) \
+ ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
+
+extern const char *const ef4_loopback_mode_names[];
+extern const unsigned int ef4_loopback_mode_max;
+#define LOOPBACK_MODE(efx) \
+ STRING_TABLE_LOOKUP((efx)->loopback_mode, ef4_loopback_mode)
+
+extern const char *const ef4_reset_type_names[];
+extern const unsigned int ef4_reset_type_max;
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, ef4_reset_type)
+
+enum ef4_int_mode {
+ /* Be careful if altering to correct macro below */
+ EF4_INT_MODE_MSIX = 0,
+ EF4_INT_MODE_MSI = 1,
+ EF4_INT_MODE_LEGACY = 2,
+ EF4_INT_MODE_MAX /* Insert any new items before this */
+};
+#define EF4_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EF4_INT_MODE_MSI)
+
+enum nic_state {
+ STATE_UNINIT = 0, /* device being probed/removed or is frozen */
+ STATE_READY = 1, /* hardware ready and netdev registered */
+ STATE_DISABLED = 2, /* device disabled due to hardware errors */
+ STATE_RECOVERY = 3, /* device recovering from PCI error */
+};
+
+/* Forward declaration */
+struct ef4_nic;
+
+/* Pseudo bit-mask flow control field */
+#define EF4_FC_RX FLOW_CTRL_RX
+#define EF4_FC_TX FLOW_CTRL_TX
+#define EF4_FC_AUTO 4
+
+/**
+ * struct ef4_link_state - Current state of the link
+ * @up: Link is up
+ * @fd: Link is full-duplex
+ * @fc: Actual flow control flags
+ * @speed: Link speed (Mbps)
+ */
+struct ef4_link_state {
+ bool up;
+ bool fd;
+ u8 fc;
+ unsigned int speed;
+};
+
+static inline bool ef4_link_state_equal(const struct ef4_link_state *left,
+ const struct ef4_link_state *right)
+{
+ return left->up == right->up && left->fd == right->fd &&
+ left->fc == right->fc && left->speed == right->speed;
+}
+
+/**
+ * struct ef4_phy_operations - Efx PHY operations table
+ * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
+ * efx->loopback_modes.
+ * @init: Initialise PHY
+ * @fini: Shut down PHY
+ * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
+ * @poll: Update @link_state and report whether it changed.
+ * Serialised by the mac_lock.
+ * @get_settings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @set_npage_adv: Set abilities advertised in (Extended) Next Page
+ * (only needed where AN bit is set in mmds)
+ * @test_alive: Test that PHY is 'alive' (online)
+ * @test_name: Get the name of a PHY-specific test/result
+ * @run_tests: Run tests and record results as appropriate (offline).
+ * Flags are the ethtool tests flags.
+ */
+struct ef4_phy_operations {
+ int (*probe) (struct ef4_nic *efx);
+ int (*init) (struct ef4_nic *efx);
+ void (*fini) (struct ef4_nic *efx);
+ void (*remove) (struct ef4_nic *efx);
+ int (*reconfigure) (struct ef4_nic *efx);
+ bool (*poll) (struct ef4_nic *efx);
+ void (*get_settings) (struct ef4_nic *efx,
+ struct ethtool_cmd *ecmd);
+ int (*set_settings) (struct ef4_nic *efx,
+ struct ethtool_cmd *ecmd);
+ void (*set_npage_adv) (struct ef4_nic *efx, u32);
+ int (*test_alive) (struct ef4_nic *efx);
+ const char *(*test_name) (struct ef4_nic *efx, unsigned int index);
+ int (*run_tests) (struct ef4_nic *efx, int *results, unsigned flags);
+ int (*get_module_eeprom) (struct ef4_nic *efx,
+ struct ethtool_eeprom *ee,
+ u8 *data);
+ int (*get_module_info) (struct ef4_nic *efx,
+ struct ethtool_modinfo *modinfo);
+};
+
+/**
+ * enum ef4_phy_mode - PHY operating mode flags
+ * @PHY_MODE_NORMAL: on and should pass traffic
+ * @PHY_MODE_TX_DISABLED: on with TX disabled
+ * @PHY_MODE_LOW_POWER: set to low power through MDIO
+ * @PHY_MODE_OFF: switched off through external control
+ * @PHY_MODE_SPECIAL: on but will not pass traffic
+ */
+enum ef4_phy_mode {
+ PHY_MODE_NORMAL = 0,
+ PHY_MODE_TX_DISABLED = 1,
+ PHY_MODE_LOW_POWER = 2,
+ PHY_MODE_OFF = 4,
+ PHY_MODE_SPECIAL = 8,
+};
+
+static inline bool ef4_phy_mode_disabled(enum ef4_phy_mode mode)
+{
+ return !!(mode & ~PHY_MODE_TX_DISABLED);
+}
+
+/**
+ * struct ef4_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ * it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
+ */
+struct ef4_hw_stat_desc {
+ const char *name;
+ u16 dma_width;
+ u16 offset;
+};
+
+/* Number of bits used in a multicast filter hash address */
+#define EF4_MCAST_HASH_BITS 8
+
+/* Number of (single-bit) entries in a multicast filter hash */
+#define EF4_MCAST_HASH_ENTRIES (1 << EF4_MCAST_HASH_BITS)
+
+/* An Efx multicast filter hash */
+union ef4_multicast_hash {
+ u8 byte[EF4_MCAST_HASH_ENTRIES / 8];
+ ef4_oword_t oword[EF4_MCAST_HASH_ENTRIES / sizeof(ef4_oword_t) / 8];
+};
+
+/**
+ * struct ef4_nic - an Efx NIC
+ * @name: Device name (net device name or bus id before net device registered)
+ * @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct ef4_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct ef4_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
+ * @type: Controller type attributes
+ * @legacy_irq: IRQ number
+ * @workqueue: Workqueue for port reconfigures and the HW monitor.
+ * Work items do not hold and must not acquire RTNL.
+ * @workqueue_name: Name of workqueue
+ * @reset_work: Scheduled reset workitem
+ * @membase_phys: Memory BAR value as physical address
+ * @membase: Memory BAR value
+ * @interrupt_mode: Interrupt mode
+ * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
+ * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
+ * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
+ * @irq_rx_moderation_us: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
+ * @reset_pending: Bitmask for pending resets
+ * @tx_queue: TX DMA queues
+ * @rx_queue: RX DMA queues
+ * @channel: Channels
+ * @msi_context: Context for each MSI
+ * @extra_channel_types: Types of extra (non-traffic) channels that
+ * should be allocated for this NIC
+ * @rxq_entries: Size of receive queues requested by user.
+ * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
+ * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
+ * @sram_lim_qw: Qword address limit of SRAM
+ * @next_buffer_table: First available buffer table id
+ * @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
+ * @rx_ip_align: RX DMA address offset to have IP header aligned in
+ * in accordance with NET_IP_ALIGN
+ * @rx_dma_len: Current maximum RX DMA length
+ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+ * for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ * (valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ * (valid only for NICs that set %EF4_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
+ * @rx_hash_key: Toeplitz hash key for RSS
+ * @rx_indir_table: Indirection table for RSS
+ * @rx_scatter: Scatter mode enabled for receives
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
+ * @irq_status: Interrupt status buffer
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @irq_level: IRQ level/index for IRQs not triggered by an event queue
+ * @selftest_work: Work item for asynchronous self-test
+ * @mtd_list: List of MTDs attached to the NIC
+ * @nic_data: Hardware dependent state
+ * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
+ * ef4_monitor() and ef4_reconfigure_port()
+ * @port_enabled: Port enabled indicator.
+ * Serialises ef4_stop_all(), ef4_start_all(), ef4_monitor() and
+ * ef4_mac_work() with kernel interfaces. Safe to read under any
+ * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
+ * be held to modify it.
+ * @port_initialized: Port initialized?
+ * @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @fixed_features: Features which cannot be turned off
+ * @stats_buffer: DMA buffer for statistics
+ * @phy_type: PHY type
+ * @phy_op: PHY interface
+ * @phy_data: PHY private data (including PHY-specific stats)
+ * @mdio: PHY MDIO interface
+ * @phy_mode: PHY operating mode. Serialised by @mac_lock.
+ * @link_advertising: Autonegotiation advertising flags
+ * @link_state: Current state of the link
+ * @n_link_state_changes: Number of times the link has changed state
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ * Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ * Protected by @mac_lock.
+ * @wanted_fc: Wanted flow control flags
+ * @fc_disable: When non-zero flow control is disabled. Typically used to
+ * ensure that network back pressure doesn't delay dma queue flushes.
+ * Serialised by the rtnl lock.
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
+ * @filter_sem: Filter table rw_semaphore, for freeing the table
+ * @filter_lock: Filter table lock, for mere content changes
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ * @rps_expire_channel's @rps_flow_id
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
+ * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
+ * Decremented when the ef4_flush_rx_queue() is called.
+ * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
+ * completed (either success or failure). Not used when MCDI is used to
+ * flush receive queues.
+ * @flush_wq: wait queue used by ef4_nic_flush_queues() to wait for flush completions.
+ * @vpd_sn: Serial number read from VPD
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
+ * field is used by ef4_test_interrupts() to verify that an
+ * interrupt has occurred.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ * ef4_nic_type::{update,start,stop}_stats.
+ * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ *
+ * This is stored in the private area of the &struct net_device.
+ */
+struct ef4_nic {
+ /* The following fields should be written very rarely */
+
+ char name[IFNAMSIZ];
+ struct list_head node;
+ struct ef4_nic *primary;
+ struct list_head secondary_list;
+ struct pci_dev *pci_dev;
+ unsigned int port_num;
+ const struct ef4_nic_type *type;
+ int legacy_irq;
+ bool eeh_disabled_legacy_irq;
+ struct workqueue_struct *workqueue;
+ char workqueue_name[16];
+ struct work_struct reset_work;
+ resource_size_t membase_phys;
+ void __iomem *membase;
+
+ enum ef4_int_mode interrupt_mode;
+ unsigned int timer_quantum_ns;
+ unsigned int timer_max_ns;
+ bool irq_rx_adaptive;
+ unsigned int irq_mod_step_us;
+ unsigned int irq_rx_moderation_us;
+ u32 msg_enable;
+
+ enum nic_state state;
+ unsigned long reset_pending;
+
+ struct ef4_channel *channel[EF4_MAX_CHANNELS];
+ struct ef4_msi_context msi_context[EF4_MAX_CHANNELS];
+ const struct ef4_channel_type *
+ extra_channel_type[EF4_MAX_EXTRA_CHANNELS];
+
+ unsigned rxq_entries;
+ unsigned txq_entries;
+ unsigned int txq_stop_thresh;
+ unsigned int txq_wake_thresh;
+
+ unsigned tx_dc_base;
+ unsigned rx_dc_base;
+ unsigned sram_lim_qw;
+ unsigned next_buffer_table;
+
+ unsigned int max_channels;
+ unsigned int max_tx_channels;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned rss_spread;
+ unsigned tx_channel_offset;
+ unsigned n_tx_channels;
+ unsigned int rx_ip_align;
+ unsigned int rx_dma_len;
+ unsigned int rx_buffer_order;
+ unsigned int rx_buffer_truesize;
+ unsigned int rx_page_buf_step;
+ unsigned int rx_bufs_per_page;
+ unsigned int rx_pages_per_batch;
+ unsigned int rx_prefix_size;
+ int rx_packet_hash_offset;
+ int rx_packet_len_offset;
+ int rx_packet_ts_offset;
+ u8 rx_hash_key[40];
+ u32 rx_indir_table[128];
+ bool rx_scatter;
+
+ unsigned int_error_count;
+ unsigned long int_error_expire;
+
+ bool irq_soft_enabled;
+ struct ef4_buffer irq_status;
+ unsigned irq_zero_count;
+ unsigned irq_level;
+ struct delayed_work selftest_work;
+
+#ifdef CONFIG_SFC_FALCON_MTD
+ struct list_head mtd_list;
+#endif
+
+ void *nic_data;
+
+ struct mutex mac_lock;
+ struct work_struct mac_work;
+ bool port_enabled;
+
+ bool mc_bist_for_other_fn;
+ bool port_initialized;
+ struct net_device *net_dev;
+
+ netdev_features_t fixed_features;
+
+ struct ef4_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
+
+ unsigned int phy_type;
+ const struct ef4_phy_operations *phy_op;
+ void *phy_data;
+ struct mdio_if_info mdio;
+ enum ef4_phy_mode phy_mode;
+
+ u32 link_advertising;
+ struct ef4_link_state link_state;
+ unsigned int n_link_state_changes;
+
+ bool unicast_filter;
+ union ef4_multicast_hash multicast_hash;
+ u8 wanted_fc;
+ unsigned fc_disable;
+
+ atomic_t rx_reset;
+ enum ef4_loopback_mode loopback_mode;
+ u64 loopback_modes;
+
+ void *loopback_selftest;
+
+ struct rw_semaphore filter_sem;
+ spinlock_t filter_lock;
+ void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int rps_expire_channel;
+ unsigned int rps_expire_index;
+#endif
+
+ atomic_t active_queues;
+ atomic_t rxq_flush_pending;
+ atomic_t rxq_flush_outstanding;
+ wait_queue_head_t flush_wq;
+
+ char *vpd_sn;
+
+ /* The following fields may be written more often */
+
+ struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+ spinlock_t biu_lock;
+ int last_irq_cpu;
+ spinlock_t stats_lock;
+ atomic_t n_rx_noskb_drops;
+};
+
+static inline int ef4_dev_registered(struct ef4_nic *efx)
+{
+ return efx->net_dev->reg_state == NETREG_REGISTERED;
+}
+
+static inline unsigned int ef4_port_num(struct ef4_nic *efx)
+{
+ return efx->port_num;
+}
+
+struct ef4_mtd_partition {
+ struct list_head node;
+ struct mtd_info mtd;
+ const char *dev_type_name;
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
+/**
+ * struct ef4_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
+ * @mem_map_size: Get memory BAR mapped size
+ * @probe: Probe the controller
+ * @remove: Free resources allocated by probe()
+ * @init: Initialise the controller
+ * @dimension_resources: Dimension controller resources (buffer table,
+ * and VIs once the available interrupt resources are clear)
+ * @fini: Shut down the controller
+ * @monitor: Periodic function for polling link state and hardware monitor
+ * @map_reset_reason: Map ethtool reset reason to a reset method
+ * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
+ * @reset: Reset the controller hardware and possibly the PHY. This will
+ * be called while the controller is uninitialised.
+ * @probe_port: Probe the MAC and PHY
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
+ * (for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ * architecture)
+ * @prepare_flr: Prepare for an FLR
+ * @finish_flr: Clean up after an FLR
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ * Either argument may be %NULL.
+ * @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
+ * @stop_stats: Stop the regular fetching of statistics
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @push_irq_moderation: Apply interrupt moderation value
+ * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
+ * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
+ * to the hardware. Serialised by the mac_lock.
+ * @check_mac_fault: Check MAC fault state. True if fault present.
+ * @get_wol: Get WoL configuration from driver state
+ * @set_wol: Push WoL configuration to the NIC
+ * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+ * @test_chip: Test registers. May use ef4_farch_test_registers(), and is
+ * expected to reset the NIC.
+ * @test_nvram: Test validity of NVRAM contents
+ * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
+ * be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
+ * queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
+ * a pointer to the &struct ef4_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
+ * is a pointer to the &struct ef4_nic.
+ * @tx_probe: Allocate resources for TX queue
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ * equal to the given priority and is not %EF4_FILTER_PRI_AUTO
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_insert: Add or replace a filter for RFS. This must be
+ * atomic. The hardware change may be asynchronous but should
+ * not be delayed for long. It may fail if this can't be done
+ * atomically.
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ * This must check whether the specified table entry is used by RFS
+ * and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ * using ef4_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition. This
+ * also notifies the driver that a writer has finished using this
+ * partition.
+ * @set_mac_address: Set the MAC address of the device
+ * @revision: Hardware architecture revision
+ * @txd_ptr_tbl_base: TX descriptor ring base address
+ * @rxd_ptr_tbl_base: RX descriptor ring base address
+ * @buf_tbl_base: Buffer table base address
+ * @evq_ptr_tbl_base: Event queue pointer table base address
+ * @evq_rptr_tbl_base: Event queue read-pointer table base address
+ * @max_dma_mask: Maximum possible DMA mask
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
+ * @rx_buffer_padding: Size of padding at end of RX packet
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
+ * @max_interrupt_mode: Highest capability interrupt mode supported
+ * from &enum ef4_init_mode.
+ * @timer_period_max: Maximum period of interrupt timer (in ticks)
+ * @offload_features: net_device feature flags for protocol offload
+ * features implemented in hardware
+ */
+struct ef4_nic_type {
+ unsigned int mem_bar;
+ unsigned int (*mem_map_size)(struct ef4_nic *efx);
+ int (*probe)(struct ef4_nic *efx);
+ void (*remove)(struct ef4_nic *efx);
+ int (*init)(struct ef4_nic *efx);
+ int (*dimension_resources)(struct ef4_nic *efx);
+ void (*fini)(struct ef4_nic *efx);
+ void (*monitor)(struct ef4_nic *efx);
+ enum reset_type (*map_reset_reason)(enum reset_type reason);
+ int (*map_reset_flags)(u32 *flags);
+ int (*reset)(struct ef4_nic *efx, enum reset_type method);
+ int (*probe_port)(struct ef4_nic *efx);
+ void (*remove_port)(struct ef4_nic *efx);
+ bool (*handle_global_event)(struct ef4_channel *channel, ef4_qword_t *);
+ int (*fini_dmaq)(struct ef4_nic *efx);
+ void (*prepare_flush)(struct ef4_nic *efx);
+ void (*finish_flush)(struct ef4_nic *efx);
+ void (*prepare_flr)(struct ef4_nic *efx);
+ void (*finish_flr)(struct ef4_nic *efx);
+ size_t (*describe_stats)(struct ef4_nic *efx, u8 *names);
+ size_t (*update_stats)(struct ef4_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
+ void (*start_stats)(struct ef4_nic *efx);
+ void (*pull_stats)(struct ef4_nic *efx);
+ void (*stop_stats)(struct ef4_nic *efx);
+ void (*set_id_led)(struct ef4_nic *efx, enum ef4_led_mode mode);
+ void (*push_irq_moderation)(struct ef4_channel *channel);
+ int (*reconfigure_port)(struct ef4_nic *efx);
+ void (*prepare_enable_fc_tx)(struct ef4_nic *efx);
+ int (*reconfigure_mac)(struct ef4_nic *efx);
+ bool (*check_mac_fault)(struct ef4_nic *efx);
+ void (*get_wol)(struct ef4_nic *efx, struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct ef4_nic *efx, u32 type);
+ void (*resume_wol)(struct ef4_nic *efx);
+ int (*test_chip)(struct ef4_nic *efx, struct ef4_self_tests *tests);
+ int (*test_nvram)(struct ef4_nic *efx);
+ void (*irq_enable_master)(struct ef4_nic *efx);
+ int (*irq_test_generate)(struct ef4_nic *efx);
+ void (*irq_disable_non_ev)(struct ef4_nic *efx);
+ irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+ irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+ int (*tx_probe)(struct ef4_tx_queue *tx_queue);
+ void (*tx_init)(struct ef4_tx_queue *tx_queue);
+ void (*tx_remove)(struct ef4_tx_queue *tx_queue);
+ void (*tx_write)(struct ef4_tx_queue *tx_queue);
+ unsigned int (*tx_limit_len)(struct ef4_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
+ int (*rx_push_rss_config)(struct ef4_nic *efx, bool user,
+ const u32 *rx_indir_table);
+ int (*rx_probe)(struct ef4_rx_queue *rx_queue);
+ void (*rx_init)(struct ef4_rx_queue *rx_queue);
+ void (*rx_remove)(struct ef4_rx_queue *rx_queue);
+ void (*rx_write)(struct ef4_rx_queue *rx_queue);
+ void (*rx_defer_refill)(struct ef4_rx_queue *rx_queue);
+ int (*ev_probe)(struct ef4_channel *channel);
+ int (*ev_init)(struct ef4_channel *channel);
+ void (*ev_fini)(struct ef4_channel *channel);
+ void (*ev_remove)(struct ef4_channel *channel);
+ int (*ev_process)(struct ef4_channel *channel, int quota);
+ void (*ev_read_ack)(struct ef4_channel *channel);
+ void (*ev_test_generate)(struct ef4_channel *channel);
+ int (*filter_table_probe)(struct ef4_nic *efx);
+ void (*filter_table_restore)(struct ef4_nic *efx);
+ void (*filter_table_remove)(struct ef4_nic *efx);
+ void (*filter_update_rx_scatter)(struct ef4_nic *efx);
+ s32 (*filter_insert)(struct ef4_nic *efx,
+ struct ef4_filter_spec *spec, bool replace);
+ int (*filter_remove_safe)(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 filter_id);
+ int (*filter_get_safe)(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 filter_id, struct ef4_filter_spec *);
+ int (*filter_clear_rx)(struct ef4_nic *efx,
+ enum ef4_filter_priority priority);
+ u32 (*filter_count_rx_used)(struct ef4_nic *efx,
+ enum ef4_filter_priority priority);
+ u32 (*filter_get_rx_id_limit)(struct ef4_nic *efx);
+ s32 (*filter_get_rx_ids)(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+ s32 (*filter_rfs_insert)(struct ef4_nic *efx,
+ struct ef4_filter_spec *spec);
+ bool (*filter_rfs_expire_one)(struct ef4_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+#ifdef CONFIG_SFC_FALCON_MTD
+ int (*mtd_probe)(struct ef4_nic *efx);
+ void (*mtd_rename)(struct ef4_mtd_partition *part);
+ int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+ int (*get_mac_address)(struct ef4_nic *efx, unsigned char *perm_addr);
+ int (*set_mac_address)(struct ef4_nic *efx);
+
+ int revision;
+ unsigned int txd_ptr_tbl_base;
+ unsigned int rxd_ptr_tbl_base;
+ unsigned int buf_tbl_base;
+ unsigned int evq_ptr_tbl_base;
+ unsigned int evq_rptr_tbl_base;
+ u64 max_dma_mask;
+ unsigned int rx_prefix_size;
+ unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
+ unsigned int rx_buffer_padding;
+ bool can_rx_scatter;
+ bool always_rx_scatter;
+ unsigned int max_interrupt_mode;
+ unsigned int timer_period_max;
+ netdev_features_t offload_features;
+ unsigned int max_rx_ip_filters;
+};
+
+/**************************************************************************
+ *
+ * Prototypes and inline functions
+ *
+ *************************************************************************/
+
+static inline struct ef4_channel *
+ef4_get_channel(struct ef4_nic *efx, unsigned index)
+{
+ EF4_BUG_ON_PARANOID(index >= efx->n_channels);
+ return efx->channel[index];
+}
+
+/* Iterate over all used channels */
+#define ef4_for_each_channel(_channel, _efx) \
+ for (_channel = (_efx)->channel[0]; \
+ _channel; \
+ _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
+ (_efx)->channel[_channel->channel + 1] : NULL)
+
+/* Iterate over all used channels in reverse */
+#define ef4_for_each_channel_rev(_channel, _efx) \
+ for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
+ _channel; \
+ _channel = _channel->channel ? \
+ (_efx)->channel[_channel->channel - 1] : NULL)
+
+static inline struct ef4_tx_queue *
+ef4_get_tx_queue(struct ef4_nic *efx, unsigned index, unsigned type)
+{
+ EF4_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EF4_TXQ_TYPES);
+ return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
+
+static inline bool ef4_channel_has_tx_queues(struct ef4_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+static inline struct ef4_tx_queue *
+ef4_channel_get_tx_queue(struct ef4_channel *channel, unsigned type)
+{
+ EF4_BUG_ON_PARANOID(!ef4_channel_has_tx_queues(channel) ||
+ type >= EF4_TXQ_TYPES);
+ return &channel->tx_queue[type];
+}
+
+static inline bool ef4_tx_queue_used(struct ef4_tx_queue *tx_queue)
+{
+ return !(tx_queue->efx->net_dev->num_tc < 2 &&
+ tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI);
+}
+
+/* Iterate over all TX queues belonging to a channel */
+#define ef4_for_each_channel_tx_queue(_tx_queue, _channel) \
+ if (!ef4_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EF4_TXQ_TYPES && \
+ ef4_tx_queue_used(_tx_queue); \
+ _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define ef4_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
+ if (!ef4_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EF4_TXQ_TYPES; \
+ _tx_queue++)
+
+static inline bool ef4_channel_has_rx_queue(struct ef4_channel *channel)
+{
+ return channel->rx_queue.core_index >= 0;
+}
+
+static inline struct ef4_rx_queue *
+ef4_channel_get_rx_queue(struct ef4_channel *channel)
+{
+ EF4_BUG_ON_PARANOID(!ef4_channel_has_rx_queue(channel));
+ return &channel->rx_queue;
+}
+
+/* Iterate over all RX queues belonging to a channel */
+#define ef4_for_each_channel_rx_queue(_rx_queue, _channel) \
+ if (!ef4_channel_has_rx_queue(_channel)) \
+ ; \
+ else \
+ for (_rx_queue = &(_channel)->rx_queue; \
+ _rx_queue; \
+ _rx_queue = NULL)
+
+static inline struct ef4_channel *
+ef4_rx_queue_channel(struct ef4_rx_queue *rx_queue)
+{
+ return container_of(rx_queue, struct ef4_channel, rx_queue);
+}
+
+static inline int ef4_rx_queue_index(struct ef4_rx_queue *rx_queue)
+{
+ return ef4_rx_queue_channel(rx_queue)->channel;
+}
+
+/* Returns a pointer to the specified receive buffer in the RX
+ * descriptor queue.
+ */
+static inline struct ef4_rx_buffer *ef4_rx_buffer(struct ef4_rx_queue *rx_queue,
+ unsigned int index)
+{
+ return &rx_queue->buffer[index];
+}
+
+/**
+ * EF4_MAX_FRAME_LEN - calculate maximum frame length
+ *
+ * This calculates the maximum frame length that will be used for a
+ * given MTU. The frame length will be equal to the MTU plus a
+ * constant amount of header space and padding. This is the quantity
+ * that the net driver will program into the MAC as the maximum frame
+ * length.
+ *
+ * The 10G MAC requires 8-byte alignment on the frame
+ * length, so we round up to the nearest 8.
+ *
+ * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
+ * XGMII cycle). If the frame length reaches the maximum value in the
+ * same cycle, the XMAC can miss the IPG altogether. We work around
+ * this by adding a further 16 bytes.
+ */
+#define EF4_FRAME_PAD 16
+#define EF4_MAX_FRAME_LEN(mtu) \
+ (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EF4_FRAME_PAD), 8))
+
+/* Get all supported features.
+ * If a feature is not fixed, it is present in hw_features.
+ * If a feature is fixed, it does not present in hw_features, but
+ * always in features.
+ */
+static inline netdev_features_t ef4_supported_features(const struct ef4_nic *efx)
+{
+ const struct net_device *net_dev = efx->net_dev;
+
+ return net_dev->features | net_dev->hw_features;
+}
+
+/* Get the current TX queue insert index. */
+static inline unsigned int
+ef4_tx_queue_get_insert_index(const struct ef4_tx_queue *tx_queue)
+{
+ return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+/* Get a TX buffer. */
+static inline struct ef4_tx_buffer *
+__ef4_tx_queue_get_insert_buffer(const struct ef4_tx_queue *tx_queue)
+{
+ return &tx_queue->buffer[ef4_tx_queue_get_insert_index(tx_queue)];
+}
+
+/* Get a TX buffer, checking it's not currently in use. */
+static inline struct ef4_tx_buffer *
+ef4_tx_queue_get_insert_buffer(const struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_tx_buffer *buffer =
+ __ef4_tx_queue_get_insert_buffer(tx_queue);
+
+ EF4_BUG_ON_PARANOID(buffer->len);
+ EF4_BUG_ON_PARANOID(buffer->flags);
+ EF4_BUG_ON_PARANOID(buffer->unmap_len);
+
+ return buffer;
+}
+
+#endif /* EF4_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
new file mode 100644
index 000000000000..a8ecb33390da
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -0,0 +1,527 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/cpu_rmap.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Generic buffer handling
+ * These buffers are used for interrupt status, MAC stats, etc.
+ *
+ **************************************************************************/
+
+int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
+ unsigned int len, gfp_t gfp_flags)
+{
+ buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
+ if (!buffer->addr)
+ return -ENOMEM;
+ buffer->len = len;
+ return 0;
+}
+
+void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer)
+{
+ if (buffer->addr) {
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
+ buffer->addr = NULL;
+ }
+}
+
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool ef4_nic_event_present(struct ef4_channel *channel)
+{
+ return ef4_event_present(ef4_event(channel, channel->eventq_read_ptr));
+}
+
+void ef4_nic_event_test_start(struct ef4_channel *channel)
+{
+ channel->event_test_cpu = -1;
+ smp_wmb();
+ channel->efx->type->ev_test_generate(channel);
+}
+
+int ef4_nic_irq_test_start(struct ef4_nic *efx)
+{
+ efx->last_irq_cpu = -1;
+ smp_wmb();
+ return efx->type->irq_test_generate(efx);
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int ef4_nic_init_interrupt(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+ unsigned int n_irqs;
+ int rc;
+
+ if (!EF4_INT_MODE_USE_MSI(efx)) {
+ rc = request_irq(efx->legacy_irq,
+ efx->type->irq_handle_legacy, IRQF_SHARED,
+ efx->name, efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook legacy IRQ %d\n",
+ efx->pci_dev->irq);
+ goto fail1;
+ }
+ return 0;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
+ efx->net_dev->rx_cpu_rmap =
+ alloc_irq_cpu_rmap(efx->n_rx_channels);
+ if (!efx->net_dev->rx_cpu_rmap) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+ }
+#endif
+
+ /* Hook MSI or MSI-X interrupt */
+ n_irqs = 0;
+ ef4_for_each_channel(channel, efx) {
+ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
+ IRQF_PROBE_SHARED, /* Not shared */
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook IRQ %d\n", channel->irq);
+ goto fail2;
+ }
+ ++n_irqs;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->interrupt_mode == EF4_INT_MODE_MSIX &&
+ channel->channel < efx->n_rx_channels) {
+ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+ channel->irq);
+ if (rc)
+ goto fail2;
+ }
+#endif
+ }
+
+ return 0;
+
+ fail2:
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+ ef4_for_each_channel(channel, efx) {
+ if (n_irqs-- == 0)
+ break;
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
+ }
+ fail1:
+ return rc;
+}
+
+void ef4_nic_fini_interrupt(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+
+ if (EF4_INT_MODE_USE_MSI(efx)) {
+ /* Disable MSI/MSI-X interrupts */
+ ef4_for_each_channel(channel, efx)
+ free_irq(channel->irq,
+ &efx->msi_context[channel->channel]);
+ } else {
+ /* Disable legacy interrupt */
+ free_irq(efx->legacy_irq, efx);
+ }
+}
+
+/* Register dump */
+
+#define REGISTER_REVISION_FA 1
+#define REGISTER_REVISION_FB 2
+#define REGISTER_REVISION_FC 3
+#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
+#define REGISTER_REVISION_ED 4
+#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
+
+struct ef4_nic_reg {
+ u32 offset:24;
+ u32 min_revision:3, max_revision:3;
+};
+
+#define REGISTER(name, arch, min_rev, max_rev) { \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev \
+}
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+
+static const struct ef4_nic_reg ef4_nic_regs[] = {
+ REGISTER_AZ(ADR_REGION),
+ REGISTER_AZ(INT_EN_KER),
+ REGISTER_BZ(INT_EN_CHAR),
+ REGISTER_AZ(INT_ADR_KER),
+ REGISTER_BZ(INT_ADR_CHAR),
+ /* INT_ACK_KER is WO */
+ /* INT_ISR0 is RC */
+ REGISTER_AZ(HW_INIT),
+ REGISTER_CZ(USR_EV_CFG),
+ REGISTER_AB(EE_SPI_HCMD),
+ REGISTER_AB(EE_SPI_HADR),
+ REGISTER_AB(EE_SPI_HDATA),
+ REGISTER_AB(EE_BASE_PAGE),
+ REGISTER_AB(EE_VPD_CFG0),
+ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+ /* PCIE_CORE_INDIRECT is indirect */
+ REGISTER_AB(NIC_STAT),
+ REGISTER_AB(GPIO_CTL),
+ REGISTER_AB(GLB_CTL),
+ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+ REGISTER_BZ(DP_CTRL),
+ REGISTER_AZ(MEM_STAT),
+ REGISTER_AZ(CS_DEBUG),
+ REGISTER_AZ(ALTERA_BUILD),
+ REGISTER_AZ(CSR_SPARE),
+ REGISTER_AB(PCIE_SD_CTL0123),
+ REGISTER_AB(PCIE_SD_CTL45),
+ REGISTER_AB(PCIE_PCS_CTL_STAT),
+ /* DEBUG_DATA_OUT is not used */
+ /* DRV_EV is WO */
+ REGISTER_AZ(EVQ_CTL),
+ REGISTER_AZ(EVQ_CNT1),
+ REGISTER_AZ(EVQ_CNT2),
+ REGISTER_AZ(BUF_TBL_CFG),
+ REGISTER_AZ(SRM_RX_DC_CFG),
+ REGISTER_AZ(SRM_TX_DC_CFG),
+ REGISTER_AZ(SRM_CFG),
+ /* BUF_TBL_UPD is WO */
+ REGISTER_AZ(SRM_UPD_EVQ),
+ REGISTER_AZ(SRAM_PARITY),
+ REGISTER_AZ(RX_CFG),
+ REGISTER_BZ(RX_FILTER_CTL),
+ /* RX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(RX_DC_CFG),
+ REGISTER_AZ(RX_DC_PF_WM),
+ REGISTER_BZ(RX_RSS_TKEY),
+ /* RX_NODESC_DROP is RC */
+ REGISTER_AA(RX_SELF_RST),
+ /* RX_DEBUG, RX_PUSH_DROP are not used */
+ REGISTER_CZ(RX_RSS_IPV6_REG1),
+ REGISTER_CZ(RX_RSS_IPV6_REG2),
+ REGISTER_CZ(RX_RSS_IPV6_REG3),
+ /* TX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(TX_DC_CFG),
+ REGISTER_AA(TX_CHKSM_CFG),
+ REGISTER_AZ(TX_CFG),
+ /* TX_PUSH_DROP is not used */
+ REGISTER_AZ(TX_RESERVED),
+ REGISTER_BZ(TX_PACE),
+ /* TX_PACE_DROP_QID is RC */
+ REGISTER_BB(TX_VLAN),
+ REGISTER_BZ(TX_IPFIL_PORTEN),
+ REGISTER_AB(MD_TXD),
+ REGISTER_AB(MD_RXD),
+ REGISTER_AB(MD_CS),
+ REGISTER_AB(MD_PHY_ADR),
+ REGISTER_AB(MD_ID),
+ /* MD_STAT is RC */
+ REGISTER_AB(MAC_STAT_DMA),
+ REGISTER_AB(MAC_CTRL),
+ REGISTER_BB(GEN_MODE),
+ REGISTER_AB(MAC_MC_HASH_REG0),
+ REGISTER_AB(MAC_MC_HASH_REG1),
+ REGISTER_AB(GM_CFG1),
+ REGISTER_AB(GM_CFG2),
+ /* GM_IPG and GM_HD are not used */
+ REGISTER_AB(GM_MAX_FLEN),
+ /* GM_TEST is not used */
+ REGISTER_AB(GM_ADR1),
+ REGISTER_AB(GM_ADR2),
+ REGISTER_AB(GMF_CFG0),
+ REGISTER_AB(GMF_CFG1),
+ REGISTER_AB(GMF_CFG2),
+ REGISTER_AB(GMF_CFG3),
+ REGISTER_AB(GMF_CFG4),
+ REGISTER_AB(GMF_CFG5),
+ REGISTER_BB(TX_SRC_MAC_CTL),
+ REGISTER_AB(XM_ADR_LO),
+ REGISTER_AB(XM_ADR_HI),
+ REGISTER_AB(XM_GLB_CFG),
+ REGISTER_AB(XM_TX_CFG),
+ REGISTER_AB(XM_RX_CFG),
+ REGISTER_AB(XM_MGT_INT_MASK),
+ REGISTER_AB(XM_FC),
+ REGISTER_AB(XM_PAUSE_TIME),
+ REGISTER_AB(XM_TX_PARAM),
+ REGISTER_AB(XM_RX_PARAM),
+ /* XM_MGT_INT_MSK (note no 'A') is RC */
+ REGISTER_AB(XX_PWR_RST),
+ REGISTER_AB(XX_SD_CTL),
+ REGISTER_AB(XX_TXDRV_CTL),
+ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+ /* XX_CORE_STAT is partly RC */
+};
+
+struct ef4_nic_reg_table {
+ u32 offset:24;
+ u32 min_revision:3, max_revision:3;
+ u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
+ offset, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev, \
+ step, rows \
+}
+#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
+ REGISTER_TABLE_DIMENSIONS( \
+ name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ arch, min_rev, max_rev, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
+#define REGISTER_TABLE_BB_CZ(name) \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_BB_ ## name ## _ROWS), \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+
+static const struct ef4_nic_reg_table ef4_nic_reg_tables[] = {
+ /* DRIVER is not used */
+ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+ REGISTER_TABLE_BB(TX_IPFIL_TBL),
+ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+ /* We can't reasonably read all of the buffer table (up to 8MB!).
+ * However this driver will only use a few entries. Reading
+ * 1K entries allows for some expansion of queue count and
+ * size before we need to change the version. */
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+ F, A, A, 8, 1024),
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+ F, B, Z, 8, 1024),
+ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_BB_CZ(TIMER_TBL),
+ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+ /* TX_FILTER_TBL0 is huge and not used by this driver */
+ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_CZ(MC_TREG_SMEM),
+ /* MSIX_PBA_TABLE is not mapped */
+ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+ REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+};
+
+size_t ef4_nic_get_regs_len(struct ef4_nic *efx)
+{
+ const struct ef4_nic_reg *reg;
+ const struct ef4_nic_reg_table *table;
+ size_t len = 0;
+
+ for (reg = ef4_nic_regs;
+ reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
+ reg++)
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision)
+ len += sizeof(ef4_oword_t);
+
+ for (table = ef4_nic_reg_tables;
+ table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
+ table++)
+ if (efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision)
+ len += table->rows * min_t(size_t, table->step, 16);
+
+ return len;
+}
+
+void ef4_nic_get_regs(struct ef4_nic *efx, void *buf)
+{
+ const struct ef4_nic_reg *reg;
+ const struct ef4_nic_reg_table *table;
+
+ for (reg = ef4_nic_regs;
+ reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
+ reg++) {
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision) {
+ ef4_reado(efx, (ef4_oword_t *)buf, reg->offset);
+ buf += sizeof(ef4_oword_t);
+ }
+ }
+
+ for (table = ef4_nic_reg_tables;
+ table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
+ table++) {
+ size_t size, i;
+
+ if (!(efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision))
+ continue;
+
+ size = min_t(size_t, table->step, 16);
+
+ for (i = 0; i < table->rows; i++) {
+ switch (table->step) {
+ case 4: /* 32-bit SRAM */
+ ef4_readd(efx, buf, table->offset + 4 * i);
+ break;
+ case 8: /* 64-bit SRAM */
+ ef4_sram_readq(efx,
+ efx->membase + table->offset,
+ buf, i);
+ break;
+ case 16: /* 128-bit-readable register */
+ ef4_reado_table(efx, buf, table->offset, i);
+ break;
+ case 32: /* 128-bit register, interleaved */
+ ef4_reado_table(efx, buf, table->offset, 2 * i);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ buf += size;
+ }
+ }
+}
+
+/**
+ * ef4_nic_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct ef4_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL. The names are copied
+ * starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names)
+{
+ size_t visible = 0;
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].name) {
+ if (names) {
+ strlcpy(names, desc[index].name,
+ ETH_GSTRING_LEN);
+ names += ETH_GSTRING_LEN;
+ }
+ ++visible;
+ }
+ }
+
+ return visible;
+}
+
+/**
+ * ef4_nic_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct ef4_hw_stat_desc describing the DMA buffer
+ * layout. DMA widths of 0, 16, 32 and 64 are supported; where
+ * the width is specified as 0 the corresponding element of
+ * @stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics. The length
+ * of this array must be at least @count.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ * directly stored to the corresponding elements of @stats
+ */
+void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate)
+{
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].dma_width) {
+ const void *addr = dma_buf + desc[index].offset;
+ u64 val;
+
+ switch (desc[index].dma_width) {
+ case 16:
+ val = le16_to_cpup((__le16 *)addr);
+ break;
+ case 32:
+ val = le32_to_cpup((__le32 *)addr);
+ break;
+ case 64:
+ val = le64_to_cpup((__le64 *)addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ break;
+ }
+
+ if (accumulate)
+ stats[index] += val;
+ else
+ stats[index] = val;
+ }
+ }
+}
+
+void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
new file mode 100644
index 000000000000..a4c4592f6023
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -0,0 +1,513 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_NIC_H
+#define EF4_NIC_H
+
+#include <linux/net_tstamp.h>
+#include <linux/i2c-algo-bit.h>
+#include "net_driver.h"
+#include "efx.h"
+
+enum {
+ EF4_REV_FALCON_A0 = 0,
+ EF4_REV_FALCON_A1 = 1,
+ EF4_REV_FALCON_B0 = 2,
+};
+
+static inline int ef4_nic_rev(struct ef4_nic *efx)
+{
+ return efx->type->revision;
+}
+
+u32 ef4_farch_fpga_ver(struct ef4_nic *efx);
+
+/* NIC has two interlinked PCI functions for the same port. */
+static inline bool ef4_nic_is_dual_func(struct ef4_nic *efx)
+{
+ return ef4_nic_rev(efx) < EF4_REV_FALCON_B0;
+}
+
+/* Read the current event from the event queue */
+static inline ef4_qword_t *ef4_event(struct ef4_channel *channel,
+ unsigned int index)
+{
+ return ((ef4_qword_t *) (channel->eventq.buf.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int ef4_event_present(ef4_qword_t *event)
+{
+ return !(EF4_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EF4_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline ef4_qword_t *
+ef4_tx_desc(struct ef4_tx_queue *tx_queue, unsigned int index)
+{
+ return ((ef4_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_queue)
+{
+ if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
+ return tx_queue - EF4_TXQ_TYPE_OFFLOAD;
+ else
+ return tx_queue + EF4_TXQ_TYPE_OFFLOAD;
+}
+
+/* Report whether this TX queue would be empty for the given write_count.
+ * May return false negative.
+ */
+static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ return ((empty_read_count ^ write_count) & ~EF4_EMPTY_COUNT_VALID) == 0;
+}
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell. This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless. Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ * We use the write_count used for the last doorbell push, to get the
+ * NIC's view of the tx queue.
+ */
+static inline bool ef4_nic_may_push_tx_desc(struct ef4_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ bool was_empty = __ef4_nic_tx_is_empty(tx_queue, write_count);
+
+ tx_queue->empty_read_count = 0;
+ return was_empty && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline ef4_qword_t *
+ef4_rx_desc(struct ef4_rx_queue *rx_queue, unsigned int index)
+{
+ return ((ef4_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
+enum {
+ PHY_TYPE_NONE = 0,
+ PHY_TYPE_TXC43128 = 1,
+ PHY_TYPE_88E1111 = 2,
+ PHY_TYPE_SFX7101 = 3,
+ PHY_TYPE_QT2022C2 = 4,
+ PHY_TYPE_PM8358 = 6,
+ PHY_TYPE_SFT9001A = 8,
+ PHY_TYPE_QT2025C = 9,
+ PHY_TYPE_SFT9001B = 10,
+};
+
+#define FALCON_XMAC_LOOPBACKS \
+ ((1 << LOOPBACK_XGMII) | \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI))
+
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EF4_PAGE_SIZE 4096
+/* Size and alignment of buffer table entries (same) */
+#define EF4_BUF_SIZE EF4_PAGE_SIZE
+
+/* NIC-generic software stats */
+enum {
+ GENERIC_STAT_rx_noskb_drops,
+ GENERIC_STAT_rx_nodesc_trunc,
+ GENERIC_STAT_COUNT
+};
+
+/**
+ * struct falcon_board_type - board operations and type information
+ * @id: Board type id, as found in NVRAM
+ * @init: Allocate resources and initialise peripheral hardware
+ * @init_phy: Do board-specific PHY initialisation
+ * @fini: Shut down hardware and free resources
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @monitor: Board-specific health check function
+ */
+struct falcon_board_type {
+ u8 id;
+ int (*init) (struct ef4_nic *nic);
+ void (*init_phy) (struct ef4_nic *efx);
+ void (*fini) (struct ef4_nic *nic);
+ void (*set_id_led) (struct ef4_nic *efx, enum ef4_led_mode mode);
+ int (*monitor) (struct ef4_nic *nic);
+};
+
+/**
+ * struct falcon_board - board information
+ * @type: Type of board
+ * @major: Major rev. ('A', 'B' ...)
+ * @minor: Minor rev. (0, 1, ...)
+ * @i2c_adap: I2C adapter for on-board peripherals
+ * @i2c_data: Data for bit-banging algorithm
+ * @hwmon_client: I2C client for hardware monitor
+ * @ioexp_client: I2C client for power/port control
+ */
+struct falcon_board {
+ const struct falcon_board_type *type;
+ int major;
+ int minor;
+ struct i2c_adapter i2c_adap;
+ struct i2c_algo_bit_data i2c_data;
+ struct i2c_client *hwmon_client, *ioexp_client;
+};
+
+/**
+ * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @erase_command: Erase command (or 0 if sector erase not needed).
+ * @erase_size: Erase sector size (in bytes)
+ * Erase commands affect sectors with this size and alignment.
+ * This must be a power of two.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ */
+struct falcon_spi_device {
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ u8 erase_command;
+ unsigned int erase_size;
+ unsigned int block_size;
+};
+
+static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
+enum {
+ FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
+ FALCON_STAT_tx_packets,
+ FALCON_STAT_tx_pause,
+ FALCON_STAT_tx_control,
+ FALCON_STAT_tx_unicast,
+ FALCON_STAT_tx_multicast,
+ FALCON_STAT_tx_broadcast,
+ FALCON_STAT_tx_lt64,
+ FALCON_STAT_tx_64,
+ FALCON_STAT_tx_65_to_127,
+ FALCON_STAT_tx_128_to_255,
+ FALCON_STAT_tx_256_to_511,
+ FALCON_STAT_tx_512_to_1023,
+ FALCON_STAT_tx_1024_to_15xx,
+ FALCON_STAT_tx_15xx_to_jumbo,
+ FALCON_STAT_tx_gtjumbo,
+ FALCON_STAT_tx_non_tcpudp,
+ FALCON_STAT_tx_mac_src_error,
+ FALCON_STAT_tx_ip_src_error,
+ FALCON_STAT_rx_bytes,
+ FALCON_STAT_rx_good_bytes,
+ FALCON_STAT_rx_bad_bytes,
+ FALCON_STAT_rx_packets,
+ FALCON_STAT_rx_good,
+ FALCON_STAT_rx_bad,
+ FALCON_STAT_rx_pause,
+ FALCON_STAT_rx_control,
+ FALCON_STAT_rx_unicast,
+ FALCON_STAT_rx_multicast,
+ FALCON_STAT_rx_broadcast,
+ FALCON_STAT_rx_lt64,
+ FALCON_STAT_rx_64,
+ FALCON_STAT_rx_65_to_127,
+ FALCON_STAT_rx_128_to_255,
+ FALCON_STAT_rx_256_to_511,
+ FALCON_STAT_rx_512_to_1023,
+ FALCON_STAT_rx_1024_to_15xx,
+ FALCON_STAT_rx_15xx_to_jumbo,
+ FALCON_STAT_rx_gtjumbo,
+ FALCON_STAT_rx_bad_lt64,
+ FALCON_STAT_rx_bad_gtjumbo,
+ FALCON_STAT_rx_overflow,
+ FALCON_STAT_rx_symbol_error,
+ FALCON_STAT_rx_align_error,
+ FALCON_STAT_rx_length_error,
+ FALCON_STAT_rx_internal_error,
+ FALCON_STAT_rx_nodesc_drop_cnt,
+ FALCON_STAT_COUNT
+};
+
+/**
+ * struct falcon_nic_data - Falcon NIC state
+ * @pci_dev2: Secondary function of Falcon A
+ * @board: Board state and functions
+ * @stats: Hardware statistics
+ * @stats_disable_count: Nest count for disabling statistics fetches
+ * @stats_pending: Is there a pending DMA of MAC statistics.
+ * @stats_timer: A timer for regularly fetching MAC statistics.
+ * @spi_flash: SPI flash device
+ * @spi_eeprom: SPI EEPROM device
+ * @spi_lock: SPI bus lock
+ * @mdio_lock: MDIO bus lock
+ * @xmac_poll_required: XMAC link state needs polling
+ */
+struct falcon_nic_data {
+ struct pci_dev *pci_dev2;
+ struct falcon_board board;
+ u64 stats[FALCON_STAT_COUNT];
+ unsigned int stats_disable_count;
+ bool stats_pending;
+ struct timer_list stats_timer;
+ struct falcon_spi_device spi_flash;
+ struct falcon_spi_device spi_eeprom;
+ struct mutex spi_lock;
+ struct mutex mdio_lock;
+ bool xmac_poll_required;
+};
+
+static inline struct falcon_board *falcon_board(struct ef4_nic *efx)
+{
+ struct falcon_nic_data *data = efx->nic_data;
+ return &data->board;
+}
+
+struct ethtool_ts_info;
+
+extern const struct ef4_nic_type falcon_a1_nic_type;
+extern const struct ef4_nic_type falcon_b0_nic_type;
+
+/**************************************************************************
+ *
+ * Externs
+ *
+ **************************************************************************
+ */
+
+int falcon_probe_board(struct ef4_nic *efx, u16 revision_info);
+
+/* TX data path */
+static inline int ef4_nic_probe_tx(struct ef4_tx_queue *tx_queue)
+{
+ return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void ef4_nic_init_tx(struct ef4_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void ef4_nic_remove_tx(struct ef4_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void ef4_nic_push_buffers(struct ef4_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_write(tx_queue);
+}
+
+/* RX data path */
+static inline int ef4_nic_probe_rx(struct ef4_rx_queue *rx_queue)
+{
+ return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void ef4_nic_init_rx(struct ef4_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void ef4_nic_remove_rx(struct ef4_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void ef4_nic_notify_rx_desc(struct ef4_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void ef4_nic_generate_fill_event(struct ef4_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
+
+/* Event data path */
+static inline int ef4_nic_probe_eventq(struct ef4_channel *channel)
+{
+ return channel->efx->type->ev_probe(channel);
+}
+static inline int ef4_nic_init_eventq(struct ef4_channel *channel)
+{
+ return channel->efx->type->ev_init(channel);
+}
+static inline void ef4_nic_fini_eventq(struct ef4_channel *channel)
+{
+ channel->efx->type->ev_fini(channel);
+}
+static inline void ef4_nic_remove_eventq(struct ef4_channel *channel)
+{
+ channel->efx->type->ev_remove(channel);
+}
+static inline int
+ef4_nic_process_eventq(struct ef4_channel *channel, int quota)
+{
+ return channel->efx->type->ev_process(channel, quota);
+}
+static inline void ef4_nic_eventq_read_ack(struct ef4_channel *channel)
+{
+ channel->efx->type->ev_read_ack(channel);
+}
+void ef4_nic_event_test_start(struct ef4_channel *channel);
+
+/* queue operations */
+int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue);
+void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue);
+void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue);
+void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue);
+void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue);
+unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
+int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue);
+void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue);
+void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue);
+void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue);
+void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue);
+void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue);
+int ef4_farch_ev_probe(struct ef4_channel *channel);
+int ef4_farch_ev_init(struct ef4_channel *channel);
+void ef4_farch_ev_fini(struct ef4_channel *channel);
+void ef4_farch_ev_remove(struct ef4_channel *channel);
+int ef4_farch_ev_process(struct ef4_channel *channel, int quota);
+void ef4_farch_ev_read_ack(struct ef4_channel *channel);
+void ef4_farch_ev_test_generate(struct ef4_channel *channel);
+
+/* filter operations */
+int ef4_farch_filter_table_probe(struct ef4_nic *efx);
+void ef4_farch_filter_table_restore(struct ef4_nic *efx);
+void ef4_farch_filter_table_remove(struct ef4_nic *efx);
+void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx);
+s32 ef4_farch_filter_insert(struct ef4_nic *efx, struct ef4_filter_spec *spec,
+ bool replace);
+int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
+ enum ef4_filter_priority priority,
+ u32 filter_id);
+int ef4_farch_filter_get_safe(struct ef4_nic *efx,
+ enum ef4_filter_priority priority, u32 filter_id,
+ struct ef4_filter_spec *);
+int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
+ enum ef4_filter_priority priority);
+u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
+ enum ef4_filter_priority priority);
+u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx);
+s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
+ enum ef4_filter_priority priority, u32 *buf,
+ u32 size);
+#ifdef CONFIG_RFS_ACCEL
+s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
+ struct ef4_filter_spec *spec);
+bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx);
+
+bool ef4_nic_event_present(struct ef4_channel *channel);
+
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously. If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value. Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void ef4_update_diff_stat(u64 *stat, u64 diff)
+{
+ if ((s64)(diff - *stat) > 0)
+ *stat = diff;
+}
+
+/* Interrupts */
+int ef4_nic_init_interrupt(struct ef4_nic *efx);
+int ef4_nic_irq_test_start(struct ef4_nic *efx);
+void ef4_nic_fini_interrupt(struct ef4_nic *efx);
+void ef4_farch_irq_enable_master(struct ef4_nic *efx);
+int ef4_farch_irq_test_generate(struct ef4_nic *efx);
+void ef4_farch_irq_disable_master(struct ef4_nic *efx);
+irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
+
+static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
+{
+ return ACCESS_ONCE(channel->event_test_cpu);
+}
+static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
+{
+ return ACCESS_ONCE(efx->last_irq_cpu);
+}
+
+/* Global Resources */
+int ef4_nic_flush_queues(struct ef4_nic *efx);
+int ef4_farch_fini_dmaq(struct ef4_nic *efx);
+void ef4_farch_finish_flr(struct ef4_nic *efx);
+void falcon_start_nic_stats(struct ef4_nic *efx);
+void falcon_stop_nic_stats(struct ef4_nic *efx);
+int falcon_reset_xaui(struct ef4_nic *efx);
+void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
+void ef4_farch_init_common(struct ef4_nic *efx);
+void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
+
+int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
+ unsigned int len, gfp_t gfp_flags);
+void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer);
+
+/* Tests */
+struct ef4_farch_register_test {
+ unsigned address;
+ ef4_oword_t mask;
+};
+int ef4_farch_test_registers(struct ef4_nic *efx,
+ const struct ef4_farch_register_test *regs,
+ size_t n_regs);
+
+size_t ef4_nic_get_regs_len(struct ef4_nic *efx);
+void ef4_nic_get_regs(struct ef4_nic *efx, void *buf);
+
+size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u64 *stats,
+ const void *dma_buf, bool accumulate);
+void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
+
+#define EF4_MAX_FLUSH_TIME 5000
+
+void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
+ ef4_qword_t *event);
+
+#endif /* EF4_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/falcon/phy.h
index 803bf445c08e..362141cee313 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/falcon/phy.h
@@ -7,20 +7,20 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
-#ifndef EFX_PHY_H
-#define EFX_PHY_H
+#ifndef EF4_PHY_H
+#define EF4_PHY_H
/****************************************************************************
* 10Xpress (SFX7101) PHY
*/
-extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
+extern const struct ef4_phy_operations falcon_sfx7101_phy_ops;
-void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
*/
-extern const struct efx_phy_operations falcon_qt202x_phy_ops;
+extern const struct ef4_phy_operations falcon_qt202x_phy_ops;
/* These PHYs provide various H/W control states for LEDs */
#define QUAKE_LED_LINK_INVAL (0)
@@ -34,17 +34,17 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
#define QUAKE_LED_TXLINK (0)
#define QUAKE_LED_RXLINK (8)
-void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct ef4_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
*/
-extern const struct efx_phy_operations falcon_txc_phy_ops;
+extern const struct ef4_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
-void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct ef4_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct ef4_nic *efx, int pin, int val);
#endif
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c
index efa3612affca..d29331652548 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c
@@ -50,14 +50,14 @@
#define PCS_VEND1_REG 0xc000
#define PCS_VEND1_LBTXD_LBN 5
-void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
+void falcon_qt202x_set_led(struct ef4_nic *p, int led, int mode)
{
int addr = MDIO_QUAKE_LED0_REG + led;
- efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
+ ef4_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
}
struct qt202x_phy_data {
- enum efx_phy_mode phy_mode;
+ enum ef4_phy_mode phy_mode;
bool bug17190_in_bad_state;
unsigned long bug17190_timer;
u32 firmware_ver;
@@ -73,7 +73,7 @@ struct qt202x_phy_data {
#define BUG17190_INTERVAL (2 * HZ)
-static int qt2025c_wait_heartbeat(struct efx_nic *efx)
+static int qt2025c_wait_heartbeat(struct ef4_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
@@ -81,7 +81,7 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx)
/* Wait for firmware heartbeat to start */
for (;;) {
int counter;
- reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
+ reg = ef4_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
if (reg < 0)
return reg;
counter = ((reg >> PCS_FW_HEARTB_LBN) &
@@ -105,14 +105,14 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx)
return 0;
}
-static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+static int qt2025c_wait_fw_status_good(struct ef4_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
int reg;
/* Wait for firmware status to look good */
for (;;) {
- reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
+ reg = ef4_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
if (reg < 0)
return reg;
if ((reg &
@@ -127,15 +127,15 @@ static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
return 0;
}
-static void qt2025c_restart_firmware(struct efx_nic *efx)
+static void qt2025c_restart_firmware(struct ef4_nic *efx)
{
/* Restart microcontroller execution of firmware from RAM */
- efx_mdio_write(efx, 3, 0xe854, 0x00c0);
- efx_mdio_write(efx, 3, 0xe854, 0x0040);
+ ef4_mdio_write(efx, 3, 0xe854, 0x00c0);
+ ef4_mdio_write(efx, 3, 0xe854, 0x0040);
msleep(50);
}
-static int qt2025c_wait_reset(struct efx_nic *efx)
+static int qt2025c_wait_reset(struct ef4_nic *efx)
{
int rc;
@@ -160,14 +160,14 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
return rc;
}
-static void qt2025c_firmware_id(struct efx_nic *efx)
+static void qt2025c_firmware_id(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
u8 firmware_id[9];
size_t i;
for (i = 0; i < sizeof(firmware_id); i++)
- firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+ firmware_id[i] = ef4_mdio_read(efx, MDIO_MMD_PCS,
PCS_FW_PRODUCT_CODE_1 + i);
netif_info(efx, probe, efx->net_dev,
"QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
@@ -180,7 +180,7 @@ static void qt2025c_firmware_id(struct efx_nic *efx)
(firmware_id[4] << 8) | firmware_id[5];
}
-static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+static void qt2025c_bug17190_workaround(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
@@ -191,7 +191,7 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx)
* recover it.
*/
if (efx->link_state.up ||
- !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+ !ef4_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
phy_data->bug17190_in_bad_state = false;
return;
}
@@ -204,16 +204,16 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx)
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, false);
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
}
}
-static int qt2025c_select_phy_mode(struct efx_nic *efx)
+static int qt2025c_select_phy_mode(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
struct falcon_board *board = falcon_board(efx);
@@ -233,7 +233,7 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
/* Only change mode if really necessary */
- reg = efx_mdio_read(efx, 1, 0xc319);
+ reg = ef4_mdio_read(efx, 1, 0xc319);
if ((reg & 0x0038) == phy_op_mode)
return 0;
netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
@@ -243,52 +243,52 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
* EEPROM (including the differences between board revisions), except
* that the operating mode is changed, and the PHY is prevented from
* unnecessarily reloading the main firmware image again. */
- efx_mdio_write(efx, 1, 0xc300, 0x0000);
+ ef4_mdio_write(efx, 1, 0xc300, 0x0000);
/* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
* STOPs onto the firmware/module I2C bus to reset it, varies across
* board revisions, as the bus is connected to different GPIO/LED
* outputs on the PHY.) */
if (board->major == 0 && board->minor < 2) {
- efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ ef4_mdio_write(efx, 1, 0xc303, 0x4498);
for (i = 0; i < 9; i++) {
- efx_mdio_write(efx, 1, 0xc303, 0x4488);
- efx_mdio_write(efx, 1, 0xc303, 0x4480);
- efx_mdio_write(efx, 1, 0xc303, 0x4490);
- efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ ef4_mdio_write(efx, 1, 0xc303, 0x4488);
+ ef4_mdio_write(efx, 1, 0xc303, 0x4480);
+ ef4_mdio_write(efx, 1, 0xc303, 0x4490);
+ ef4_mdio_write(efx, 1, 0xc303, 0x4498);
}
} else {
- efx_mdio_write(efx, 1, 0xc303, 0x0920);
- efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ ef4_mdio_write(efx, 1, 0xc303, 0x0920);
+ ef4_mdio_write(efx, 1, 0xd008, 0x0004);
for (i = 0; i < 9; i++) {
- efx_mdio_write(efx, 1, 0xc303, 0x0900);
- efx_mdio_write(efx, 1, 0xd008, 0x0005);
- efx_mdio_write(efx, 1, 0xc303, 0x0920);
- efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ ef4_mdio_write(efx, 1, 0xc303, 0x0900);
+ ef4_mdio_write(efx, 1, 0xd008, 0x0005);
+ ef4_mdio_write(efx, 1, 0xc303, 0x0920);
+ ef4_mdio_write(efx, 1, 0xd008, 0x0004);
}
- efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ ef4_mdio_write(efx, 1, 0xc303, 0x4900);
}
- efx_mdio_write(efx, 1, 0xc303, 0x4900);
- efx_mdio_write(efx, 1, 0xc302, 0x0004);
- efx_mdio_write(efx, 1, 0xc316, 0x0013);
- efx_mdio_write(efx, 1, 0xc318, 0x0054);
- efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
- efx_mdio_write(efx, 1, 0xc31a, 0x0098);
- efx_mdio_write(efx, 3, 0x0026, 0x0e00);
- efx_mdio_write(efx, 3, 0x0027, 0x0013);
- efx_mdio_write(efx, 3, 0x0028, 0xa528);
- efx_mdio_write(efx, 1, 0xd006, 0x000a);
- efx_mdio_write(efx, 1, 0xd007, 0x0009);
- efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ ef4_mdio_write(efx, 1, 0xc303, 0x4900);
+ ef4_mdio_write(efx, 1, 0xc302, 0x0004);
+ ef4_mdio_write(efx, 1, 0xc316, 0x0013);
+ ef4_mdio_write(efx, 1, 0xc318, 0x0054);
+ ef4_mdio_write(efx, 1, 0xc319, phy_op_mode);
+ ef4_mdio_write(efx, 1, 0xc31a, 0x0098);
+ ef4_mdio_write(efx, 3, 0x0026, 0x0e00);
+ ef4_mdio_write(efx, 3, 0x0027, 0x0013);
+ ef4_mdio_write(efx, 3, 0x0028, 0xa528);
+ ef4_mdio_write(efx, 1, 0xd006, 0x000a);
+ ef4_mdio_write(efx, 1, 0xd007, 0x0009);
+ ef4_mdio_write(efx, 1, 0xd008, 0x0004);
/* This additional write is not present in the boot EEPROM. It
* prevents the PHY's internal boot ROM doing another pointless (and
* slow) reload of the firmware image (the microcontroller's code
* memory is not affected by the microcontroller reset). */
- efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ ef4_mdio_write(efx, 1, 0xc317, 0x00ff);
/* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
* restart doesn't reset it. We need to do that ourselves. */
- efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
+ ef4_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
1 << PMA_PMD_RXIN_SEL_LBN, false);
- efx_mdio_write(efx, 1, 0xc300, 0x0002);
+ ef4_mdio_write(efx, 1, 0xc300, 0x0002);
msleep(20);
/* Restart microcontroller execution of firmware from RAM */
@@ -306,7 +306,7 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
return 0;
}
-static int qt202x_reset_phy(struct efx_nic *efx)
+static int qt202x_reset_phy(struct ef4_nic *efx)
{
int rc;
@@ -319,7 +319,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
} else {
/* Reset the PHYXS MMD. This is documented as doing
* a complete soft reset. */
- rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
+ rc = ef4_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
QT2022C2_MAX_RESET_TIME /
QT2022C2_RESET_WAIT,
QT2022C2_RESET_WAIT);
@@ -339,7 +339,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
return rc;
}
-static int qt202x_phy_probe(struct efx_nic *efx)
+static int qt202x_phy_probe(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data;
@@ -357,7 +357,7 @@ static int qt202x_phy_probe(struct efx_nic *efx)
return 0;
}
-static int qt202x_phy_init(struct efx_nic *efx)
+static int qt202x_phy_init(struct ef4_nic *efx)
{
u32 devid;
int rc;
@@ -368,11 +368,11 @@ static int qt202x_phy_init(struct efx_nic *efx)
return rc;
}
- devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
+ devid = ef4_mdio_read_id(efx, MDIO_MMD_PHYXS);
netif_info(efx, probe, efx->net_dev,
"PHY ID reg %x (OUI %06x model %02x revision %x)\n",
- devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
- efx_mdio_id_rev(devid));
+ devid, ef4_mdio_id_oui(devid), ef4_mdio_id_model(devid),
+ ef4_mdio_id_rev(devid));
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_firmware_id(efx);
@@ -380,12 +380,12 @@ static int qt202x_phy_init(struct efx_nic *efx)
return 0;
}
-static int qt202x_link_ok(struct efx_nic *efx)
+static int qt202x_link_ok(struct ef4_nic *efx)
{
- return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
+ return ef4_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
}
-static bool qt202x_phy_poll(struct efx_nic *efx)
+static bool qt202x_phy_poll(struct ef4_nic *efx)
{
bool was_up = efx->link_state.up;
@@ -400,7 +400,7 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
return efx->link_state.up != was_up;
}
-static int qt202x_phy_reconfigure(struct efx_nic *efx)
+static int qt202x_phy_reconfigure(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
@@ -427,29 +427,29 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
(phy_data->phy_mode & PHY_MODE_TX_DISABLED))
qt202x_reset_phy(efx);
- efx_mdio_transmit_disable(efx);
+ ef4_mdio_transmit_disable(efx);
}
- efx_mdio_phy_reconfigure(efx);
+ ef4_mdio_phy_reconfigure(efx);
phy_data->phy_mode = efx->phy_mode;
return 0;
}
-static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
-static void qt202x_phy_remove(struct efx_nic *efx)
+static void qt202x_phy_remove(struct ef4_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
efx->phy_data = NULL;
}
-static int qt202x_phy_get_module_info(struct efx_nic *efx,
+static int qt202x_phy_get_module_info(struct ef4_nic *efx,
struct ethtool_modinfo *modinfo)
{
modinfo->type = ETH_MODULE_SFF_8079;
@@ -457,10 +457,10 @@ static int qt202x_phy_get_module_info(struct efx_nic *efx,
return 0;
}
-static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
+static int qt202x_phy_get_module_eeprom(struct ef4_nic *efx,
struct ethtool_eeprom *ee, u8 *data)
{
- int mmd, reg_base, rc, i;
+ int mmd, reg_base, rc, i;
if (efx->phy_type == PHY_TYPE_QT2025C) {
mmd = MDIO_MMD_PCS;
@@ -471,7 +471,7 @@ static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
}
for (i = 0; i < ee->len; i++) {
- rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i);
+ rc = ef4_mdio_read(efx, mmd, reg_base + ee->offset + i);
if (rc < 0)
return rc;
data[i] = rc;
@@ -480,16 +480,16 @@ static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
return 0;
}
-const struct efx_phy_operations falcon_qt202x_phy_ops = {
+const struct ef4_phy_operations falcon_qt202x_phy_ops = {
.probe = qt202x_phy_probe,
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
- .fini = efx_port_dummy_op_void,
+ .fini = ef4_port_dummy_op_void,
.remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
- .set_settings = efx_mdio_set_settings,
- .test_alive = efx_mdio_test_alive,
+ .set_settings = ef4_mdio_set_settings,
+ .test_alive = ef4_mdio_test_alive,
.get_module_eeprom = qt202x_phy_get_module_eeprom,
.get_module_info = qt202x_phy_get_module_info,
};
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
new file mode 100644
index 000000000000..250458cbdb4d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -0,0 +1,974 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/prefetch.h>
+#include <linux/moduleparam.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "filter.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* Preferred number of descriptors to fill at once */
+#define EF4_RX_PREFERRED_BATCH 8U
+
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EF4_RECYCLE_RING_SIZE_IOMMU 4096
+#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
+
+/* Size of buffer allocated for skb header area. */
+#define EF4_SKB_HEADERS 128u
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
+ EF4_RX_USR_BUF_SIZE)
+
+/*
+ * RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
+
+static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
+{
+ return page_address(buf->page) + buf->page_offset;
+}
+
+static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_hash_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+static inline struct ef4_rx_buffer *
+ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return ef4_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
+static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
+ struct ef4_rx_buffer *rx_buf,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+ DMA_FROM_DEVICE);
+}
+
+void ef4_rx_config_page_split(struct ef4_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
+ EF4_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ struct page *page;
+ struct ef4_rx_page_state *state;
+ unsigned index;
+
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
+
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
+ }
+
+ return NULL;
+}
+
+/**
+ * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct ef4_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ struct ef4_rx_buffer *rx_buf;
+ struct page *page;
+ unsigned int page_offset;
+ struct ef4_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+
+ count = 0;
+ do {
+ page = ef4_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
+ }
+
+ dma_addr += sizeof(struct ef4_rx_page_state);
+ page_offset = sizeof(struct ef4_rx_page_state);
+
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = ef4_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+ get_page(page);
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
+
+ return 0;
+}
+
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
+static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
+ struct ef4_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct ef4_rx_page_state *state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
+ struct ef4_rx_buffer *rx_buf,
+ unsigned int num_bufs)
+{
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
+}
+
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void ef4_recycle_rx_page(struct ef4_channel *channel,
+ struct ef4_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+ struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
+ struct ef4_nic *efx = rx_queue->efx;
+ unsigned index;
+
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
+ return;
+
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
+
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ ef4_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
+}
+
+static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
+ struct ef4_rx_buffer *rx_buf)
+{
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
+ ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ ef4_free_rx_buffers(rx_queue, rx_buf, 1);
+ }
+ rx_buf->page = NULL;
+}
+
+/* Recycle the pages that are used by buffers that have just been received. */
+static void ef4_recycle_rx_pages(struct ef4_channel *channel,
+ struct ef4_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
+
+ do {
+ ef4_recycle_rx_page(channel, rx_buf);
+ rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
+}
+
+static void ef4_discard_rx_packet(struct ef4_channel *channel,
+ struct ef4_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
+
+ ef4_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
+}
+
+/**
+ * ef4_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
+ int space, rc = 0;
+
+ if (!rx_queue->refill_enabled)
+ return;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ space = rx_queue->max_fill - fill_level;
+ EF4_BUG_ON_PARANOID(space < batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d\n",
+ ef4_rx_queue_index(rx_queue), fill_level,
+ rx_queue->max_fill);
+
+
+ do {
+ rc = ef4_init_rx_buffers(rx_queue, atomic);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ if (rx_queue->added_count == rx_queue->removed_count)
+ ef4_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= batch_size) >= batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", ef4_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ ef4_nic_notify_rx_desc(rx_queue);
+}
+
+void ef4_rx_slow_fill(unsigned long context)
+{
+ struct ef4_rx_queue *rx_queue = (struct ef4_rx_queue *)context;
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ ef4_nic_generate_fill_event(rx_queue);
+ ++rx_queue->slow_fill_count;
+}
+
+static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
+ struct ef4_rx_buffer *rx_buf,
+ int len)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
+
+ if (likely(len <= max_len))
+ return;
+
+ /* The packet must be discarded, but this is only a fatal error
+ * if the caller indicated it was
+ */
+ rx_buf->flags |= EF4_RX_PKT_DISCARD;
+
+ if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d seriously overlength "
+ "RX event (0x%x > 0x%x+0x%x). Leaking\n",
+ ef4_rx_queue_index(rx_queue), len, max_len,
+ efx->type->rx_buffer_padding);
+ ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
+ } else {
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d overlength RX event "
+ "(0x%x > 0x%x)\n",
+ ef4_rx_queue_index(rx_queue), len, max_len);
+ }
+
+ ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
+}
+
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
+ */
+static void
+ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
+{
+ struct napi_struct *napi = &channel->napi_str;
+ gro_result_t gro_result;
+ struct ef4_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ struct ef4_rx_queue *rx_queue;
+
+ rx_queue = ef4_channel_get_rx_queue(channel);
+ ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
+ skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ gro_result = napi_gro_frags(napi);
+ if (gro_result != GRO_DROP)
+ channel->irq_mod_score += 2;
+}
+
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
+ struct ef4_rx_buffer *rx_buf,
+ unsigned int n_frags,
+ u8 *eh, int hdr_len)
+{
+ struct ef4_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ /* Allocate an SKB to store the headers */
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
+ if (unlikely(skb == NULL)) {
+ atomic_inc(&efx->n_rx_noskb_drops);
+ return NULL;
+ }
+
+ EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
+
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
+
+ /* Append the remaining page(s) onto the frag list */
+ if (rx_buf->len > hdr_len) {
+ rx_buf->page_offset += hdr_len;
+ rx_buf->len -= hdr_len;
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ skb->data_len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ } else {
+ __free_pages(rx_buf->page, efx->rx_buffer_order);
+ rx_buf->page = NULL;
+ n_frags = 0;
+ }
+
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+ skb_mark_napi_id(skb, &channel->napi_str);
+
+ return skb;
+}
+
+void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
+ struct ef4_rx_buffer *rx_buf;
+
+ rx_queue->rx_packets++;
+
+ rx_buf = ef4_rx_buffer(rx_queue, index);
+ rx_buf->flags |= flags;
+
+ /* Validate the number of fragments and completed length */
+ if (n_frags == 1) {
+ if (!(flags & EF4_RX_PKT_PREFIX_LEN))
+ ef4_rx_packet__check_len(rx_queue, rx_buf, len);
+ } else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
+ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+ unlikely(len > n_frags * efx->rx_dma_len) ||
+ unlikely(!efx->rx_scatter)) {
+ /* If this isn't an explicit discard request, either
+ * the hardware or the driver is broken.
+ */
+ WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
+ rx_buf->flags |= EF4_RX_PKT_DISCARD;
+ }
+
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received ids %x-%x len %d %s%s\n",
+ ef4_rx_queue_index(rx_queue), index,
+ (index + n_frags - 1) & rx_queue->ptr_mask, len,
+ (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
+ (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
+
+ /* Discard packet, if instructed to do so. Process the
+ * previous receive first.
+ */
+ if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
+ ef4_rx_flush_packet(channel);
+ ef4_discard_rx_packet(channel, rx_buf, n_frags);
+ return;
+ }
+
+ if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
+ rx_buf->len = len;
+
+ /* Release and/or sync the DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue.
+ */
+ ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+
+ /* Prefetch nice and early so data will (hopefully) be in cache by
+ * the time we look at it.
+ */
+ prefetch(ef4_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->rx_prefix_size;
+ rx_buf->len -= efx->rx_prefix_size;
+
+ if (n_frags > 1) {
+ /* Release/sync DMA mapping for additional fragments.
+ * Fix length for last fragment.
+ */
+ unsigned int tail_frags = n_frags - 1;
+
+ for (;;) {
+ rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
+ if (--tail_frags == 0)
+ break;
+ ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
+ }
+ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
+ ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+ }
+
+ /* All fragments have been DMA-synced, so recycle pages. */
+ rx_buf = ef4_rx_buffer(rx_queue, index);
+ ef4_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ /* Pipeline receives so that we give time for packet headers to be
+ * prefetched into cache.
+ */
+ ef4_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = n_frags;
+ channel->rx_pkt_index = index;
+}
+
+static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
+ struct ef4_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct sk_buff *skb;
+ u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
+
+ skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+ if (unlikely(skb == NULL)) {
+ struct ef4_rx_queue *rx_queue;
+
+ rx_queue = ef4_channel_get_rx_queue(channel);
+ ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ /* Set the SKB flags */
+ skb_checksum_none_assert(skb);
+ if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (channel->type->receive_skb)
+ if (channel->type->receive_skb(channel, skb))
+ return;
+
+ /* Pass the packet up */
+ netif_receive_skb(skb);
+}
+
+/* Handle a received packet. Second half: Touches packet payload. */
+void __ef4_rx_packet(struct ef4_channel *channel)
+{
+ struct ef4_nic *efx = channel->efx;
+ struct ef4_rx_buffer *rx_buf =
+ ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ u8 *eh = ef4_rx_buf_va(rx_buf);
+
+ /* Read length from the prefix if necessary. This already
+ * excludes the length of the prefix itself.
+ */
+ if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
+ rx_buf->len = le16_to_cpup((__le16 *)
+ (eh + efx->rx_packet_len_offset));
+
+ /* If we're in loopback test, then pass the packet directly to the
+ * loopback layer, and free the rx_buf here
+ */
+ if (unlikely(efx->loopback_selftest)) {
+ struct ef4_rx_queue *rx_queue;
+
+ ef4_loopback_rx_packet(efx, eh, rx_buf->len);
+ rx_queue = ef4_channel_get_rx_queue(channel);
+ ef4_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ goto out;
+ }
+
+ if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
+ rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
+
+ if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb &&
+ !ef4_channel_busy_polling(channel))
+ ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
+ else
+ ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+ channel->rx_pkt_n_frags = 0;
+}
+
+int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
+ EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ ef4_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = ef4_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+
+ return rc;
+}
+
+static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
+ struct ef4_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (iommu_present(&pci_bus_type))
+ bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
+void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
+{
+ struct ef4_nic *efx = rx_queue->efx;
+ unsigned int max_fill, trigger, max_trigger;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+ ef4_init_rx_recycle_ring(efx, rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ if (rx_refill_threshold != 0) {
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ if (trigger > max_trigger)
+ trigger = max_trigger;
+ } else {
+ trigger = max_trigger;
+ }
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
+
+ /* Set up RX descriptor ring */
+ ef4_nic_init_rx(rx_queue);
+}
+
+void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
+{
+ int i;
+ struct ef4_nic *efx = rx_queue->efx;
+ struct ef4_rx_buffer *rx_buf;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+
+ /* Release RX buffers from the current read ptr to the write ptr */
+ if (rx_queue->buffer) {
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned index = i & rx_queue->ptr_mask;
+ rx_buf = ef4_rx_buffer(rx_queue, index);
+ ef4_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct ef4_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
+}
+
+void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
+
+ ef4_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring refill threshold (%)");
+
+#ifdef CONFIG_RFS_ACCEL
+
+int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct ef4_channel *channel;
+ struct ef4_filter_spec spec;
+ struct flow_keys fk;
+ int rc;
+
+ if (flow_id == RPS_FLOW_ID_INVALID)
+ return -EINVAL;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
+ return -EPROTONOSUPPORT;
+
+ ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
+ efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
+ spec.match_flags =
+ EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
+ EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
+ EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
+ spec.ether_type = fk.basic.n_proto;
+ spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ spec.rem_host[0] = fk.addrs.v4addrs.src;
+ spec.loc_host[0] = fk.addrs.v4addrs.dst;
+ } else {
+ memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+ memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
+ }
+
+ spec.rem_port = fk.ports.src;
+ spec.loc_port = fk.ports.dst;
+
+ rc = efx->type->filter_rfs_insert(efx, &spec);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ channel = ef4_get_channel(efx, rxq_index);
+ channel->rps_flow_id[rc] = flow_id;
+ ++channel->rfs_filters_added;
+
+ if (spec.ether_type == htons(ETH_P_IP))
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
+ else
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
+{
+ bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
+ unsigned int channel_idx, index, size;
+ u32 flow_id;
+
+ if (!spin_trylock_bh(&efx->filter_lock))
+ return false;
+
+ expire_one = efx->type->filter_rfs_expire_one;
+ channel_idx = efx->rps_expire_channel;
+ index = efx->rps_expire_index;
+ size = efx->type->max_rx_ip_filters;
+ while (quota--) {
+ struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID &&
+ expire_one(efx, flow_id, index)) {
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [queue %u flow %u]\n",
+ index, channel_idx, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ }
+ if (++index == size) {
+ if (++channel_idx == efx->n_channels)
+ channel_idx = 0;
+ index = 0;
+ }
+ }
+ efx->rps_expire_channel = channel_idx;
+ efx->rps_expire_index = index;
+
+ spin_unlock_bh(&efx->filter_lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+/**
+ * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
+{
+ if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
+ (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/sfc/falcon/selftest.c b/drivers/net/ethernet/sfc/falcon/selftest.c
new file mode 100644
index 000000000000..92bc34c91547
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/selftest.c
@@ -0,0 +1,808 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* IRQ latency can be enormous because:
+ * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
+ * slow serial console or an old IDE driver doing error recovery
+ * - The PREEMPT_RT patches mostly deal with this, but also allow a
+ * tasklet or normal task to be given higher priority than our IRQ
+ * threads
+ * Try to avoid blaming the hardware for this.
+ */
+#define IRQ_TIMEOUT HZ
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct ef4_loopback_payload {
+ struct ethhdr header;
+ struct iphdr ip;
+ struct udphdr udp;
+ __be16 iteration;
+ char msg[64];
+} __packed;
+
+/* Loopback test source MAC address */
+static const u8 payload_source[ETH_ALEN] __aligned(2) = {
+ 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char payload_msg[] =
+ "Hello world! This is an Efx loopback test in progress!";
+
+/* Interrupt mode names */
+static const unsigned int ef4_interrupt_mode_max = EF4_INT_MODE_MAX;
+static const char *const ef4_interrupt_mode_names[] = {
+ [EF4_INT_MODE_MSIX] = "MSI-X",
+ [EF4_INT_MODE_MSI] = "MSI",
+ [EF4_INT_MODE_LEGACY] = "legacy",
+};
+#define INT_MODE(efx) \
+ STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode)
+
+/**
+ * ef4_loopback_state - persistent state during a loopback selftest
+ * @flush: Drop all packets in ef4_loopback_rx_packet
+ * @packet_count: Number of packets being used in this test
+ * @skbs: An array of skbs transmitted
+ * @offload_csum: Checksums are being offloaded
+ * @rx_good: RX good packet count
+ * @rx_bad: RX bad packet count
+ * @payload: Payload used in tests
+ */
+struct ef4_loopback_state {
+ bool flush;
+ int packet_count;
+ struct sk_buff **skbs;
+ bool offload_csum;
+ atomic_t rx_good;
+ atomic_t rx_bad;
+ struct ef4_loopback_payload payload;
+};
+
+/* How long to wait for all the packets to arrive (in ms) */
+#define LOOPBACK_TIMEOUT_MS 1000
+
+/**************************************************************************
+ *
+ * MII, NVRAM and register tests
+ *
+ **************************************************************************/
+
+static int ef4_test_phy_alive(struct ef4_nic *efx, struct ef4_self_tests *tests)
+{
+ int rc = 0;
+
+ if (efx->phy_op->test_alive) {
+ rc = efx->phy_op->test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+static int ef4_test_nvram(struct ef4_nic *efx, struct ef4_self_tests *tests)
+{
+ int rc = 0;
+
+ if (efx->type->test_nvram) {
+ rc = efx->type->test_nvram(efx);
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ tests->nvram = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int ef4_test_interrupts(struct ef4_nic *efx,
+ struct ef4_self_tests *tests)
+{
+ unsigned long timeout, wait;
+ int cpu;
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
+ tests->interrupt = -1;
+
+ rc = ef4_nic_irq_test_start(efx);
+ if (rc == -ENOTSUPP) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "direct interrupt testing not supported\n");
+ tests->interrupt = 0;
+ return 0;
+ }
+
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
+
+ /* Wait for arrival of test interrupt. */
+ netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
+ do {
+ schedule_timeout_uninterruptible(wait);
+ cpu = ef4_nic_irq_test_irq_cpu(efx);
+ if (cpu >= 0)
+ goto success;
+ wait *= 2;
+ } while (time_before(jiffies, timeout));
+
+ netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
+ return -ETIMEDOUT;
+
+ success:
+ netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+ INT_MODE(efx), cpu);
+ tests->interrupt = 1;
+ return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int ef4_test_eventq_irq(struct ef4_nic *efx,
+ struct ef4_self_tests *tests)
+{
+ struct ef4_channel *channel;
+ unsigned int read_ptr[EF4_MAX_CHANNELS];
+ unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
+ unsigned long timeout, wait;
+
+ BUILD_BUG_ON(EF4_MAX_CHANNELS > BITS_PER_LONG);
+
+ ef4_for_each_channel(channel, efx) {
+ read_ptr[channel->channel] = channel->eventq_read_ptr;
+ set_bit(channel->channel, &dma_pend);
+ set_bit(channel->channel, &int_pend);
+ ef4_nic_event_test_start(channel);
+ }
+
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
+
+ /* Wait for arrival of interrupts. NAPI processing may or may
+ * not complete in time, but we can cope in any case.
+ */
+ do {
+ schedule_timeout_uninterruptible(wait);
+
+ ef4_for_each_channel(channel, efx) {
+ ef4_stop_eventq(channel);
+ if (channel->eventq_read_ptr !=
+ read_ptr[channel->channel]) {
+ set_bit(channel->channel, &napi_ran);
+ clear_bit(channel->channel, &dma_pend);
+ clear_bit(channel->channel, &int_pend);
+ } else {
+ if (ef4_nic_event_present(channel))
+ clear_bit(channel->channel, &dma_pend);
+ if (ef4_nic_event_test_irq_cpu(channel) >= 0)
+ clear_bit(channel->channel, &int_pend);
+ }
+ ef4_start_eventq(channel);
+ }
+
+ wait *= 2;
+ } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
+
+ ef4_for_each_channel(channel, efx) {
+ bool dma_seen = !test_bit(channel->channel, &dma_pend);
+ bool int_seen = !test_bit(channel->channel, &int_pend);
+
+ tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
+ tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
+
+ if (dma_seen && int_seen) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "channel %d event queue passed (with%s NAPI)\n",
+ channel->channel,
+ test_bit(channel->channel, &napi_ran) ?
+ "" : "out");
+ } else {
+ /* Report failure and whether either interrupt or DMA
+ * worked
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d timed out waiting for event queue\n",
+ channel->channel);
+ if (int_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt "
+ "during event queue test\n",
+ channel->channel);
+ if (dma_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n",
+ channel->channel);
+ }
+ }
+
+ return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
+}
+
+static int ef4_test_phy(struct ef4_nic *efx, struct ef4_self_tests *tests,
+ unsigned flags)
+{
+ int rc;
+
+ if (!efx->phy_op->run_tests)
+ return 0;
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
+ mutex_unlock(&efx->mac_lock);
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ netif_info(efx, drv, efx->net_dev,
+ "%s phy selftest\n", rc ? "Failed" : "Passed");
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void ef4_loopback_rx_packet(struct ef4_nic *efx,
+ const char *buf_ptr, int pkt_len)
+{
+ struct ef4_loopback_state *state = efx->loopback_selftest;
+ struct ef4_loopback_payload *received;
+ struct ef4_loopback_payload *payload;
+
+ BUG_ON(!buf_ptr);
+
+ /* If we are just flushing, then drop the packet */
+ if ((state == NULL) || state->flush)
+ return;
+
+ payload = &state->payload;
+
+ received = (struct ef4_loopback_payload *) buf_ptr;
+ received->ip.saddr = payload->ip.saddr;
+ if (state->offload_csum)
+ received->ip.check = payload->ip.check;
+
+ /* Check that header exists */
+ if (pkt_len < sizeof(received->header)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that the ethernet header exists */
+ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check packet length */
+ if (pkt_len != sizeof(*payload)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that IP header matches */
+ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that msg and padding matches */
+ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that iteration matches */
+ if (received->iteration != payload->iteration) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Increase correct RX count */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
+
+ atomic_inc(&state->rx_good);
+ return;
+
+ err:
+#ifdef DEBUG
+ if (atomic_read(&state->rx_bad) == 0) {
+ netif_err(efx, drv, efx->net_dev, "received packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ buf_ptr, pkt_len, 0);
+ netif_err(efx, drv, efx->net_dev, "expected packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ &state->payload, sizeof(state->payload), 0);
+ }
+#endif
+ atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an ef4_selftest_state for a new iteration */
+static void ef4_iterate_state(struct ef4_nic *efx)
+{
+ struct ef4_loopback_state *state = efx->loopback_selftest;
+ struct net_device *net_dev = efx->net_dev;
+ struct ef4_loopback_payload *payload = &state->payload;
+
+ /* Initialise the layerII header */
+ ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
+ ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
+ payload->header.h_proto = htons(ETH_P_IP);
+
+ /* saddr set later and used as incrementing count */
+ payload->ip.daddr = htonl(INADDR_LOOPBACK);
+ payload->ip.ihl = 5;
+ payload->ip.check = (__force __sum16) htons(0xdead);
+ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+ payload->ip.version = IPVERSION;
+ payload->ip.protocol = IPPROTO_UDP;
+
+ /* Initialise udp header */
+ payload->udp.source = 0;
+ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+ sizeof(struct iphdr));
+ payload->udp.check = 0; /* checksum ignored */
+
+ /* Fill out payload */
+ payload->iteration = htons(ntohs(payload->iteration) + 1);
+ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+ /* Fill out remaining state members */
+ atomic_set(&state->rx_good, 0);
+ atomic_set(&state->rx_bad, 0);
+ smp_wmb();
+}
+
+static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ struct ef4_loopback_state *state = efx->loopback_selftest;
+ struct ef4_loopback_payload *payload;
+ struct sk_buff *skb;
+ int i;
+ netdev_tx_t rc;
+
+ /* Transmit N copies of buffer */
+ for (i = 0; i < state->packet_count; i++) {
+ /* Allocate an skb, holding an extra reference for
+ * transmit completion counting */
+ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ state->skbs[i] = skb;
+ skb_get(skb);
+
+ /* Copy the payload in, incrementing the source address to
+ * exercise the rss vectors */
+ payload = ((struct ef4_loopback_payload *)
+ skb_put(skb, sizeof(state->payload)));
+ memcpy(payload, &state->payload, sizeof(state->payload));
+ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+ /* Ensure everything we've written is visible to the
+ * interrupt handler. */
+ smp_wmb();
+
+ netif_tx_lock_bh(efx->net_dev);
+ rc = ef4_enqueue_skb(tx_queue, skb);
+ netif_tx_unlock_bh(efx->net_dev);
+
+ if (rc != NETDEV_TX_OK) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d could not transmit packet %d of "
+ "%d in %s loopback test\n", tx_queue->queue,
+ i + 1, state->packet_count,
+ LOOPBACK_MODE(efx));
+
+ /* Defer cleaning up the other skbs for the caller */
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+ }
+
+ return 0;
+}
+
+static int ef4_poll_loopback(struct ef4_nic *efx)
+{
+ struct ef4_loopback_state *state = efx->loopback_selftest;
+
+ return atomic_read(&state->rx_good) == state->packet_count;
+}
+
+static int ef4_end_loopback(struct ef4_tx_queue *tx_queue,
+ struct ef4_loopback_self_tests *lb_tests)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ struct ef4_loopback_state *state = efx->loopback_selftest;
+ struct sk_buff *skb;
+ int tx_done = 0, rx_good, rx_bad;
+ int i, rc = 0;
+
+ netif_tx_lock_bh(efx->net_dev);
+
+ /* Count the number of tx completions, and decrement the refcnt. Any
+ * skbs not already completed will be free'd when the queue is flushed */
+ for (i = 0; i < state->packet_count; i++) {
+ skb = state->skbs[i];
+ if (skb && !skb_shared(skb))
+ ++tx_done;
+ dev_kfree_skb(skb);
+ }
+
+ netif_tx_unlock_bh(efx->net_dev);
+
+ /* Check TX completion and received packet counts */
+ rx_good = atomic_read(&state->rx_good);
+ rx_bad = atomic_read(&state->rx_bad);
+ if (tx_done != state->packet_count) {
+ /* Don't free the skbs; they will be picked up on TX
+ * overflow or channel teardown.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->queue, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Allow to fall through so we see the RX errors as well */
+ }
+
+ /* We may always be up to a flush away from our desired packet total */
+ if (rx_good != state->packet_count) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->queue, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Fall through */
+ }
+
+ /* Update loopback test structure */
+ lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
+ lb_tests->tx_done[tx_queue->queue] += tx_done;
+ lb_tests->rx_good += rx_good;
+ lb_tests->rx_bad += rx_bad;
+
+ return rc;
+}
+
+static int
+ef4_test_loopback(struct ef4_tx_queue *tx_queue,
+ struct ef4_loopback_self_tests *lb_tests)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ struct ef4_loopback_state *state = efx->loopback_selftest;
+ int i, begin_rc, end_rc;
+
+ for (i = 0; i < 3; i++) {
+ /* Determine how many packets to send */
+ state->packet_count = efx->txq_entries / 3;
+ state->packet_count = min(1 << (i << 2), state->packet_count);
+ state->skbs = kcalloc(state->packet_count,
+ sizeof(state->skbs[0]), GFP_KERNEL);
+ if (!state->skbs)
+ return -ENOMEM;
+ state->flush = false;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d testing %s loopback with %d packets\n",
+ tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ ef4_iterate_state(efx);
+ begin_rc = ef4_begin_loopback(tx_queue);
+
+ /* This will normally complete very quickly, but be
+ * prepared to wait much longer. */
+ msleep(1);
+ if (!ef4_poll_loopback(efx)) {
+ msleep(LOOPBACK_TIMEOUT_MS);
+ ef4_poll_loopback(efx);
+ }
+
+ end_rc = ef4_end_loopback(tx_queue, lb_tests);
+ kfree(state->skbs);
+
+ if (begin_rc || end_rc) {
+ /* Wait a while to ensure there are no packets
+ * floating around after a failure. */
+ schedule_timeout_uninterruptible(HZ / 10);
+ return begin_rc ? begin_rc : end_rc;
+ }
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ return 0;
+}
+
+/* Wait for link up. On Falcon, we would prefer to rely on ef4_monitor, but
+ * any contention on the mac lock (via e.g. ef4_mac_mcast_work) causes it
+ * to delay and retry. Therefore, it's safer to just poll directly. Wait
+ * for link up and any faults to dissipate. */
+static int ef4_wait_for_link(struct ef4_nic *efx)
+{
+ struct ef4_link_state *link_state = &efx->link_state;
+ int count, link_up_count = 0;
+ bool link_up;
+
+ for (count = 0; count < 40; count++) {
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ if (efx->type->monitor != NULL) {
+ mutex_lock(&efx->mac_lock);
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ mutex_lock(&efx->mac_lock);
+ link_up = link_state->up;
+ if (link_up)
+ link_up = !efx->type->check_mac_fault(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (link_up) {
+ if (++link_up_count == 2)
+ return 0;
+ } else {
+ link_up_count = 0;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int ef4_test_loopbacks(struct ef4_nic *efx, struct ef4_self_tests *tests,
+ unsigned int loopback_modes)
+{
+ enum ef4_loopback_mode mode;
+ struct ef4_loopback_state *state;
+ struct ef4_channel *channel =
+ ef4_get_channel(efx, efx->tx_channel_offset);
+ struct ef4_tx_queue *tx_queue;
+ int rc = 0;
+
+ /* Set the port loopback_selftest member. From this point on
+ * all received packets will be dropped. Mark the state as
+ * "flushing" so all inflight packets are dropped */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return -ENOMEM;
+ BUG_ON(efx->loopback_selftest);
+ state->flush = true;
+ efx->loopback_selftest = state;
+
+ /* Test all supported loopback modes */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(loopback_modes & (1 << mode)))
+ continue;
+
+ /* Move the port into the specified loopback mode. */
+ state->flush = true;
+ mutex_lock(&efx->mac_lock);
+ efx->loopback_mode = mode;
+ rc = __ef4_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to move into %s loopback\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ rc = ef4_wait_for_link(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ /* Test all enabled types of TX queue */
+ ef4_for_each_channel_tx_queue(tx_queue, channel) {
+ state->offload_csum = (tx_queue->queue &
+ EF4_TXQ_TYPE_OFFLOAD);
+ rc = ef4_test_loopback(tx_queue,
+ &tests->loopback[mode]);
+ if (rc)
+ goto out;
+ }
+ }
+
+ out:
+ /* Remove the flush. The caller will remove the loopback setting */
+ state->flush = true;
+ efx->loopback_selftest = NULL;
+ wmb();
+ kfree(state);
+
+ if (rc == -EPERM)
+ rc = 0;
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry point
+ *
+ *************************************************************************/
+
+int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests,
+ unsigned flags)
+{
+ enum ef4_loopback_mode loopback_mode = efx->loopback_mode;
+ int phy_mode = efx->phy_mode;
+ int rc_test = 0, rc_reset, rc;
+
+ ef4_selftest_async_cancel(efx);
+
+ /* Online (i.e. non-disruptive) testing
+ * This checks interrupt generation, event delivery and PHY presence. */
+
+ rc = ef4_test_phy_alive(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = ef4_test_nvram(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = ef4_test_interrupts(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = ef4_test_eventq_irq(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ if (rc_test)
+ return rc_test;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return ef4_test_phy(efx, tests, flags);
+
+ /* Offline (i.e. disruptive) testing
+ * This checks MAC and PHY loopback on the specified port. */
+
+ /* Detach the device so the kernel doesn't transmit during the
+ * loopback test and the watchdog timeout doesn't fire.
+ */
+ ef4_device_detach_sync(efx);
+
+ if (efx->type->test_chip) {
+ rc_reset = efx->type->test_chip(efx, tests);
+ if (rc_reset) {
+ netif_err(efx, hw, efx->net_dev,
+ "Unable to recover from chip test\n");
+ ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc_reset;
+ }
+
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
+ rc_test = -EIO;
+ }
+
+ /* Ensure that the phy is powered and out of loopback
+ * for the bist and loopback tests */
+ mutex_lock(&efx->mac_lock);
+ efx->phy_mode &= ~PHY_MODE_LOW_POWER;
+ efx->loopback_mode = LOOPBACK_NONE;
+ __ef4_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ rc = ef4_test_phy(efx, tests, flags);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = ef4_test_loopbacks(efx, tests, efx->loopback_modes);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ /* restore the PHY to the previous state */
+ mutex_lock(&efx->mac_lock);
+ efx->phy_mode = phy_mode;
+ efx->loopback_mode = loopback_mode;
+ __ef4_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ netif_device_attach(efx->net_dev);
+
+ return rc_test;
+}
+
+void ef4_selftest_async_start(struct ef4_nic *efx)
+{
+ struct ef4_channel *channel;
+
+ ef4_for_each_channel(channel, efx)
+ ef4_nic_event_test_start(channel);
+ schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
+}
+
+void ef4_selftest_async_cancel(struct ef4_nic *efx)
+{
+ cancel_delayed_work_sync(&efx->selftest_work);
+}
+
+void ef4_selftest_async_work(struct work_struct *data)
+{
+ struct ef4_nic *efx = container_of(data, struct ef4_nic,
+ selftest_work.work);
+ struct ef4_channel *channel;
+ int cpu;
+
+ ef4_for_each_channel(channel, efx) {
+ cpu = ef4_nic_event_test_irq_cpu(channel);
+ if (cpu < 0)
+ netif_err(efx, ifup, efx->net_dev,
+ "channel %d failed to trigger an interrupt\n",
+ channel->channel);
+ else
+ netif_dbg(efx, ifup, efx->net_dev,
+ "channel %d triggered interrupt on CPU %d\n",
+ channel->channel, cpu);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/falcon/selftest.h b/drivers/net/ethernet/sfc/falcon/selftest.h
new file mode 100644
index 000000000000..be52a49c006a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/selftest.h
@@ -0,0 +1,55 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_SELFTEST_H
+#define EF4_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct ef4_loopback_self_tests {
+ int tx_sent[EF4_TXQ_TYPES];
+ int tx_done[EF4_TXQ_TYPES];
+ int rx_good;
+ int rx_bad;
+};
+
+#define EF4_MAX_PHY_TESTS 20
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure; 0 indicates test could not be run.
+ */
+struct ef4_self_tests {
+ /* online tests */
+ int phy_alive;
+ int nvram;
+ int interrupt;
+ int eventq_dma[EF4_MAX_CHANNELS];
+ int eventq_int[EF4_MAX_CHANNELS];
+ /* offline tests */
+ int memory;
+ int registers;
+ int phy_ext[EF4_MAX_PHY_TESTS];
+ struct ef4_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
+};
+
+void ef4_loopback_rx_packet(struct ef4_nic *efx, const char *buf_ptr,
+ int pkt_len);
+int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests,
+ unsigned flags);
+void ef4_selftest_async_start(struct ef4_nic *efx);
+void ef4_selftest_async_cancel(struct ef4_nic *efx);
+void ef4_selftest_async_work(struct work_struct *data);
+
+#endif /* EF4_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/falcon/tenxpress.c
index 2c90e6b31575..acc548a1c4d6 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/falcon/tenxpress.c
@@ -143,27 +143,27 @@
#define LNPGA_PDOWN_WAIT (HZ / 5)
struct tenxpress_phy_data {
- enum efx_loopback_mode loopback_mode;
- enum efx_phy_mode phy_mode;
+ enum ef4_loopback_mode loopback_mode;
+ enum ef4_phy_mode phy_mode;
int bad_lp_tries;
};
-static int tenxpress_init(struct efx_nic *efx)
+static int tenxpress_init(struct ef4_nic *efx)
{
/* Enable 312.5 MHz clock */
- efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
+ ef4_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
1 << CLK312_EN_LBN);
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
1 << PMA_PMA_LED_ACTIVITY_LBN, true);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
SFX7101_PMA_PMD_LED_DEFAULT);
return 0;
}
-static int tenxpress_phy_probe(struct efx_nic *efx)
+static int tenxpress_phy_probe(struct ef4_nic *efx)
{
struct tenxpress_phy_data *phy_data;
@@ -185,18 +185,18 @@ static int tenxpress_phy_probe(struct efx_nic *efx)
return 0;
}
-static int tenxpress_phy_init(struct efx_nic *efx)
+static int tenxpress_phy_init(struct ef4_nic *efx)
{
int rc;
falcon_board(efx)->type->init_phy(efx);
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
- rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ rc = ef4_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
- rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ rc = ef4_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
}
@@ -206,8 +206,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
return rc;
/* Reinitialise flow control settings */
- efx_link_set_wanted_fc(efx, efx->wanted_fc);
- efx_mdio_an_reconfigure(efx);
+ ef4_link_set_wanted_fc(efx, efx->wanted_fc);
+ ef4_mdio_an_reconfigure(efx);
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
@@ -220,7 +220,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
/* Perform a "special software reset" on the PHY. The caller is
* responsible for saving and restoring the PHY hardware registers
* properly, and masking/unmasking LASI */
-static int tenxpress_special_reset(struct efx_nic *efx)
+static int tenxpress_special_reset(struct ef4_nic *efx)
{
int rc, reg;
@@ -230,14 +230,14 @@ static int tenxpress_special_reset(struct efx_nic *efx)
falcon_stop_nic_stats(efx);
/* Initiate reset */
- reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
+ reg = ef4_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
mdelay(200);
/* Wait for the blocks to come out of reset */
- rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ rc = ef4_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto out;
@@ -253,7 +253,7 @@ out:
return rc;
}
-static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
+static void sfx7101_check_bad_lp(struct ef4_nic *efx, bool link_ok)
{
struct tenxpress_phy_data *pd = efx->phy_data;
bool bad_lp;
@@ -263,7 +263,7 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
bad_lp = false;
} else {
/* Check that AN has started but not completed. */
- reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
+ reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
if (!(reg & MDIO_AN_STAT1_LPABLE))
return; /* LP status is unknown */
bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
@@ -278,7 +278,7 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
/* Use the RX (red) LED as an error indicator once we've seen AN
* failure several times in a row, and also log a message. */
if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
- reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ reg = ef4_mdio_read(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG);
reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
if (!bad_lp) {
@@ -291,35 +291,35 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
" supports 10GBASE-T ONLY, so no link can"
" be established\n");
}
- efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG, reg);
pd->bad_lp_tries = bad_lp;
}
}
-static bool sfx7101_link_ok(struct efx_nic *efx)
+static bool sfx7101_link_ok(struct ef4_nic *efx)
{
- return efx_mdio_links_ok(efx,
+ return ef4_mdio_links_ok(efx,
MDIO_DEVS_PMAPMD |
MDIO_DEVS_PCS |
MDIO_DEVS_PHYXS);
}
-static void tenxpress_ext_loopback(struct efx_nic *efx)
+static void tenxpress_ext_loopback(struct ef4_nic *efx)
{
- efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
+ ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
1 << LOOPBACK_NEAR_LBN,
efx->loopback_mode == LOOPBACK_PHYXS);
}
-static void tenxpress_low_power(struct efx_nic *efx)
+static void tenxpress_low_power(struct ef4_nic *efx)
{
- efx_mdio_set_mmds_lpower(
+ ef4_mdio_set_mmds_lpower(
efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
TENXPRESS_REQUIRED_DEVS);
}
-static int tenxpress_phy_reconfigure(struct efx_nic *efx)
+static int tenxpress_phy_reconfigure(struct ef4_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
bool phy_mode_change, loop_reset;
@@ -340,10 +340,10 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
}
tenxpress_low_power(efx);
- efx_mdio_transmit_disable(efx);
- efx_mdio_phy_reconfigure(efx);
+ ef4_mdio_transmit_disable(efx);
+ ef4_mdio_phy_reconfigure(efx);
tenxpress_ext_loopback(efx);
- efx_mdio_an_reconfigure(efx);
+ ef4_mdio_an_reconfigure(efx);
phy_data->loopback_mode = efx->loopback_mode;
phy_data->phy_mode = efx->phy_mode;
@@ -352,30 +352,30 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
}
static void
-tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
/* Poll for link state changes */
-static bool tenxpress_phy_poll(struct efx_nic *efx)
+static bool tenxpress_phy_poll(struct ef4_nic *efx)
{
- struct efx_link_state old_state = efx->link_state;
+ struct ef4_link_state old_state = efx->link_state;
efx->link_state.up = sfx7101_link_ok(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
- efx->link_state.fc = efx_mdio_get_pause(efx);
+ efx->link_state.fc = ef4_mdio_get_pause(efx);
sfx7101_check_bad_lp(efx, efx->link_state.up);
- return !efx_link_state_equal(&efx->link_state, &old_state);
+ return !ef4_link_state_equal(&efx->link_state, &old_state);
}
-static void sfx7101_phy_fini(struct efx_nic *efx)
+static void sfx7101_phy_fini(struct ef4_nic *efx)
{
int reg;
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
/* Waiting here ensures that the board fini, which can turn
* off the power to the PHY, won't get run until the LNPGA
@@ -383,7 +383,7 @@ static void sfx7101_phy_fini(struct efx_nic *efx)
schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
}
-static void tenxpress_phy_remove(struct efx_nic *efx)
+static void tenxpress_phy_remove(struct ef4_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
@@ -391,17 +391,17 @@ static void tenxpress_phy_remove(struct efx_nic *efx)
/* Override the RX, TX and link LEDs */
-void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+void tenxpress_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
int reg;
switch (mode) {
- case EFX_LED_OFF:
+ case EF4_LED_OFF:
reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
break;
- case EFX_LED_ON:
+ case EF4_LED_ON:
reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
@@ -411,14 +411,14 @@ void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
break;
}
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
}
static const char *const sfx7101_test_names[] = {
"bist"
};
-static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
+static const char *sfx7101_test_name(struct ef4_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(sfx7101_test_names))
return sfx7101_test_names[index];
@@ -426,7 +426,7 @@ static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
}
static int
-sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
+sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
{
int rc;
@@ -437,21 +437,21 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
rc = tenxpress_special_reset(efx);
results[0] = rc ? -1 : 1;
- efx_mdio_an_reconfigure(efx);
+ ef4_mdio_an_reconfigure(efx);
return rc;
}
static void
-tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
u32 adv = 0, lpa = 0;
int reg;
- reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
+ reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
adv |= ADVERTISED_10000baseT_Full;
- reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
if (reg & MDIO_AN_10GBT_STAT_LP10G)
lpa |= ADVERTISED_10000baseT_Full;
@@ -463,22 +463,22 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, SPEED_10000);
}
-static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
if (!ecmd->autoneg)
return -EINVAL;
- return efx_mdio_set_settings(efx, ecmd);
+ return ef4_mdio_set_settings(efx, ecmd);
}
-static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
+static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising)
{
- efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ ef4_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV10G,
advertising & ADVERTISED_10000baseT_Full);
}
-const struct efx_phy_operations falcon_sfx7101_phy_ops = {
+const struct ef4_phy_operations falcon_sfx7101_phy_ops = {
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
@@ -488,7 +488,7 @@ const struct efx_phy_operations falcon_sfx7101_phy_ops = {
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
- .test_alive = efx_mdio_test_alive,
+ .test_alive = ef4_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
new file mode 100644
index 000000000000..104fb15a73f2
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -0,0 +1,649 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/pci.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/cache.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "io.h"
+#include "nic.h"
+#include "tx.h"
+#include "workarounds.h"
+
+static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
+ struct ef4_tx_buffer *buffer)
+{
+ unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
+ struct ef4_buffer *page_buf =
+ &tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
+ unsigned int offset =
+ ((index << EF4_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
+
+ if (unlikely(!page_buf->addr) &&
+ ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
+ return NULL;
+ buffer->dma_addr = page_buf->dma_addr + offset;
+ buffer->unmap_len = 0;
+ return (u8 *)page_buf->addr + offset;
+}
+
+u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
+ struct ef4_tx_buffer *buffer, size_t len)
+{
+ if (len > EF4_TX_CB_SIZE)
+ return NULL;
+ return ef4_tx_get_copy_buffer(tx_queue, buffer);
+}
+
+static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
+ struct ef4_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ if (buffer->unmap_len) {
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+ if (buffer->flags & EF4_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ buffer->unmap_len = 0;
+ }
+
+ if (buffer->flags & EF4_TX_BUF_SKB) {
+ (*pkts_compl)++;
+ (*bytes_compl) += buffer->skb->len;
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ }
+
+ buffer->len = 0;
+ buffer->flags = 0;
+}
+
+unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
+{
+ /* This is probably too much since we don't have any TSO support;
+ * it's a left-over from when we had Software TSO. But it's safer
+ * to leave it as-is than try to determine a new bound.
+ */
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EF4_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for the alignment workaround,
+ * or for option descriptors
+ */
+ if (EF4_WORKAROUND_5391(efx))
+ max_descs += EF4_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EF4_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
+
+ return max_descs;
+}
+
+static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
+{
+ /* We need to consider both queues that the net core sees as one */
+ struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(txq1);
+ struct ef4_nic *efx = txq1->efx;
+ unsigned int fill_level;
+
+ fill_level = max(txq1->insert_count - txq1->old_read_count,
+ txq2->insert_count - txq2->old_read_count);
+ if (likely(fill_level < efx->txq_stop_thresh))
+ return;
+
+ /* We used the stale old_read_count above, which gives us a
+ * pessimistic estimate of the fill level (which may even
+ * validly be >= efx->txq_entries). Now try again using
+ * read_count (more likely to be a cache miss).
+ *
+ * If we read read_count and then conditionally stop the
+ * queue, it is possible for the completion path to race with
+ * us and complete all outstanding descriptors in the middle,
+ * after which there will be no more completions to wake it.
+ * Therefore we stop the queue first, then read read_count
+ * (with a memory barrier to ensure the ordering), then
+ * restart the queue if the fill level turns out to be low
+ * enough.
+ */
+ netif_tx_stop_queue(txq1->core_txq);
+ smp_mb();
+ txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+ txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+ fill_level = max(txq1->insert_count - txq1->old_read_count,
+ txq2->insert_count - txq2->old_read_count);
+ EF4_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+ if (likely(fill_level < efx->txq_stop_thresh)) {
+ smp_mb();
+ if (likely(!efx->loopback_selftest))
+ netif_tx_start_queue(txq1->core_txq);
+ }
+}
+
+static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ unsigned int min_len = tx_queue->tx_min_size;
+ unsigned int copy_len = skb->len;
+ struct ef4_tx_buffer *buffer;
+ u8 *copy_buffer;
+ int rc;
+
+ EF4_BUG_ON_PARANOID(copy_len > EF4_TX_CB_SIZE);
+
+ buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
+
+ copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer);
+ if (unlikely(!copy_buffer))
+ return -ENOMEM;
+
+ rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
+ EF4_WARN_ON_PARANOID(rc);
+ if (unlikely(copy_len < min_len)) {
+ memset(copy_buffer + copy_len, 0, min_len - copy_len);
+ buffer->len = min_len;
+ } else {
+ buffer->len = copy_len;
+ }
+
+ buffer->skb = skb;
+ buffer->flags = EF4_TX_BUF_SKB;
+
+ ++tx_queue->insert_count;
+ return rc;
+}
+
+static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
+ dma_addr_t dma_addr,
+ size_t len)
+{
+ const struct ef4_nic_type *nic_type = tx_queue->efx->type;
+ struct ef4_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ /* Map the fragment taking account of NIC-dependent DMA limits. */
+ do {
+ buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
+ dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EF4_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ return buffer;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue.
+ */
+static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int frag_index, nr_frags;
+ dma_addr_t dma_addr, unmap_addr;
+ unsigned short dma_flags;
+ size_t len, unmap_len;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag_index = 0;
+
+ /* Map header data. */
+ len = skb_headlen(skb);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+ dma_flags = EF4_TX_BUF_MAP_SINGLE;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+
+ /* Add descriptors for each fragment. */
+ do {
+ struct ef4_tx_buffer *buffer;
+ skb_frag_t *fragment;
+
+ buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len);
+
+ /* The final descriptor for a fragment is responsible for
+ * unmapping the whole fragment.
+ */
+ buffer->flags = EF4_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+ if (frag_index >= nr_frags) {
+ /* Store SKB details with the final buffer for
+ * the completion.
+ */
+ buffer->skb = skb;
+ buffer->flags = EF4_TX_BUF_SKB | dma_flags;
+ return 0;
+ }
+
+ /* Move on to the next fragment. */
+ fragment = &skb_shinfo(skb)->frags[frag_index++];
+ len = skb_frag_size(fragment);
+ dma_addr = skb_frag_dma_map(dma_dev, fragment,
+ 0, len, DMA_TO_DEVICE);
+ dma_flags = 0;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+ } while (1);
+}
+
+/* Remove buffers put into a tx_queue. None of the buffers must have
+ * an skb attached.
+ */
+static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_tx_buffer *buffer;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != tx_queue->write_count) {
+ --tx_queue->insert_count;
+ buffer = __ef4_tx_queue_get_insert_buffer(tx_queue);
+ ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL);
+ }
+}
+
+/*
+ * Add a socket buffer to a TX queue
+ *
+ * This maps all fragments of a socket buffer for DMA and adds them to
+ * the TX queue. The queue's insert pointer will be incremented by
+ * the number of fragments in the socket buffer.
+ *
+ * If any DMA mapping fails, any mapped fragments will be unmapped,
+ * the queue's insert pointer will be restored to its original value.
+ *
+ * This function is split out from ef4_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.
+ *
+ * Returns NETDEV_TX_OK.
+ * You must hold netif_tx_lock() to call this function.
+ */
+netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ bool data_mapped = false;
+ unsigned int skb_len;
+
+ skb_len = skb->len;
+ EF4_WARN_ON_PARANOID(skb_is_gso(skb));
+
+ if (skb_len < tx_queue->tx_min_size ||
+ (skb->data_len && skb_len <= EF4_TX_CB_SIZE)) {
+ /* Pad short packets or coalesce short fragmented packets. */
+ if (ef4_enqueue_skb_copy(tx_queue, skb))
+ goto err;
+ tx_queue->cb_packets++;
+ data_mapped = true;
+ }
+
+ /* Map for DMA and create descriptors if we haven't done so already. */
+ if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
+ goto err;
+
+ /* Update BQL */
+ netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
+
+ /* Pass off to hardware */
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
+
+ /* There could be packets left on the partner queue if those
+ * SKBs had skb->xmit_more set. If we do not push those they
+ * could be left for a long time and cause a netdev watchdog.
+ */
+ if (txq2->xmit_more_available)
+ ef4_nic_push_buffers(txq2);
+
+ ef4_nic_push_buffers(tx_queue);
+ } else {
+ tx_queue->xmit_more_available = skb->xmit_more;
+ }
+
+ tx_queue->tx_packets++;
+
+ ef4_tx_maybe_stop_queue(tx_queue);
+
+ return NETDEV_TX_OK;
+
+
+err:
+ ef4_enqueue_unwind(tx_queue);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ unsigned int stop_index, read_ptr;
+
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (read_ptr != stop_index) {
+ struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!(buffer->flags & EF4_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
+ ef4_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+}
+
+/* Initiate a packet transmission. We use one channel per CPU
+ * (sharing when we have more CPUs than channels). On Falcon, the TX
+ * completion events will be directed back to the CPU that transmitted
+ * the packet, which should be cache-efficient.
+ *
+ * Context: non-blocking.
+ * Note that returning anything other than NETDEV_TX_OK will cause the
+ * OS to free the skb.
+ */
+netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct ef4_tx_queue *tx_queue;
+ unsigned index, type;
+
+ EF4_WARN_ON_PARANOID(!netif_device_present(net_dev));
+
+ index = skb_get_queue_mapping(skb);
+ type = skb->ip_summed == CHECKSUM_PARTIAL ? EF4_TXQ_TYPE_OFFLOAD : 0;
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EF4_TXQ_TYPE_HIGHPRI;
+ }
+ tx_queue = ef4_get_tx_queue(efx, index, type);
+
+ return ef4_enqueue_skb(tx_queue, skb);
+}
+
+void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+
+ /* Must be inverse of queue lookup in ef4_hard_start_xmit() */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EF4_TXQ_TYPES +
+ ((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
+ efx->n_tx_channels : 0));
+}
+
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *ntc)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ struct ef4_channel *channel;
+ struct ef4_tx_queue *tx_queue;
+ unsigned tc, num_tc;
+ int rc;
+
+ if (ntc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ num_tc = ntc->tc;
+
+ if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
+ return -EINVAL;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+ net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+ }
+
+ if (num_tc > net_dev->num_tc) {
+ /* Initialise high-priority queues as necessary */
+ ef4_for_each_channel(channel, efx) {
+ ef4_for_each_possible_channel_tx_queue(tx_queue,
+ channel) {
+ if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
+ continue;
+ if (!tx_queue->buffer) {
+ rc = ef4_probe_tx_queue(tx_queue);
+ if (rc)
+ return rc;
+ }
+ if (!tx_queue->initialised)
+ ef4_init_tx_queue(tx_queue);
+ ef4_init_tx_queue_core_txq(tx_queue);
+ }
+ }
+ } else {
+ /* Reduce number of classes before number of queues */
+ net_dev->num_tc = num_tc;
+ }
+
+ rc = netif_set_real_num_tx_queues(net_dev,
+ max_t(int, num_tc, 1) *
+ efx->n_tx_channels);
+ if (rc)
+ return rc;
+
+ /* Do not destroy high-priority queues when they become
+ * unused. We would have to flush them first, and it is
+ * fairly difficult to flush a subset of TX queues. Leave
+ * it to ef4_fini_channels().
+ */
+
+ net_dev->num_tc = num_tc;
+ return 0;
+}
+
+void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
+{
+ unsigned fill_level;
+ struct ef4_nic *efx = tx_queue->efx;
+ struct ef4_tx_queue *txq2;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
+
+ ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
+ /* See if we need to restart the netif queue. This memory
+ * barrier ensures that we write read_count (inside
+ * ef4_dequeue_buffers()) before reading the queue status.
+ */
+ smp_mb();
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled) &&
+ likely(netif_device_present(efx->net_dev))) {
+ txq2 = ef4_tx_queue_partner(tx_queue);
+ fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+ txq2->insert_count - txq2->read_count);
+ if (fill_level <= efx->txq_wake_thresh)
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EF4_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
+static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
+}
+
+int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EF4_MIN_DMAQ_SIZE);
+ EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+
+ tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue),
+ sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+ if (!tx_queue->cb_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Allocate hardware ring */
+ rc = ef4_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+fail1:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_nic *efx = tx_queue->efx;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
+
+ /* Some older hardware requires Tx writes larger than 32. */
+ tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0;
+
+ /* Set up TX descriptor ring */
+ ef4_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
+{
+ struct ef4_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ tx_queue->xmit_more_available = false;
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
+{
+ int i;
+
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ ef4_nic_remove_tx(tx_queue);
+
+ if (tx_queue->cb_page) {
+ for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++)
+ ef4_nic_free_buffer(tx_queue->efx,
+ &tx_queue->cb_page[i]);
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+ }
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/falcon/tx.h b/drivers/net/ethernet/sfc/falcon/tx.h
new file mode 100644
index 000000000000..a607eb0087a8
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/tx.h
@@ -0,0 +1,27 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_TX_H
+#define EF4_TX_H
+
+#include <linux/types.h>
+
+/* Driver internal tx-path related declarations. */
+
+unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
+
+u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
+ struct ef4_tx_buffer *buffer, size_t len);
+
+int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
+#endif /* EF4_TX_H */
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c
index 194f67d9f3bf..18421f5e880f 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c
@@ -158,8 +158,8 @@
struct txc43128_data {
unsigned long bug10934_timer;
- enum efx_phy_mode phy_mode;
- enum efx_loopback_mode loopback_mode;
+ enum ef4_phy_mode phy_mode;
+ enum ef4_loopback_mode loopback_mode;
};
/* The PHY sometimes needs a reset to bring the link back up. So long as
@@ -168,32 +168,32 @@ struct txc43128_data {
#define BUG10934_RESET_INTERVAL (5 * HZ)
/* Perform a reset that doesn't clear configuration changes */
-static void txc_reset_logic(struct efx_nic *efx);
+static void txc_reset_logic(struct ef4_nic *efx);
/* Set the output value of a gpio */
-void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
+void falcon_txc_set_gpio_val(struct ef4_nic *efx, int pin, int on)
{
- efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
+ ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
}
/* Set up the GPIO direction register */
-void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
+void falcon_txc_set_gpio_dir(struct ef4_nic *efx, int pin, int dir)
{
- efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
+ ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
}
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
* global reset (it's less clear what reset of other MMDs does).*/
-static int txc_reset_phy(struct efx_nic *efx)
+static int txc_reset_phy(struct ef4_nic *efx)
{
- int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
+ int rc = ef4_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
TXC_RESET_WAIT);
if (rc < 0)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
- rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
+ rc = ef4_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
@@ -205,28 +205,28 @@ fail:
}
/* Run a single BIST on one MMD */
-static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
+static int txc_bist_one(struct ef4_nic *efx, int mmd, int test)
{
int ctrl, bctl;
int lane;
int rc = 0;
/* Set PMA to test into loopback using Mt Diablo reg as per app note */
- ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
+ ctrl = ef4_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
- efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+ ef4_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
/* The BIST app. note lists these as 3 distinct steps. */
/* Set the BIST type */
bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
- efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+ ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTEN bit in the BIST Control register to enable */
bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
- efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+ ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTRT bit in the BIST Control register */
- efx_mdio_write(efx, mmd, TXC_BIST_CTL,
+ ef4_mdio_write(efx, mmd, TXC_BIST_CTL,
bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
/* Wait. */
@@ -234,22 +234,22 @@ static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
/* Set the BSTOP bit in the BIST Control register */
bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
- efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+ ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* The STOP bit should go off when things have stopped */
while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
- bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
+ bctl = ef4_mdio_read(efx, mmd, TXC_BIST_CTL);
/* Check all the error counts are 0 and all the frame counts are
non-zero */
for (lane = 0; lane < 4; lane++) {
- int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
+ int count = ef4_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
if (count != 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d had %d errs\n", lane, count);
rc = -EIO;
}
- count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
+ count = ef4_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
if (count == 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d got 0 frames\n", lane);
@@ -261,23 +261,23 @@ static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
/* Disable BIST */
- efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
+ ef4_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
/* Turn off loopback */
ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
- efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+ ef4_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
return rc;
}
-static int txc_bist(struct efx_nic *efx)
+static int txc_bist(struct ef4_nic *efx)
{
return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
}
/* Push the non-configurable defaults into the PHY. This must be
* done after every full reset */
-static void txc_apply_defaults(struct efx_nic *efx)
+static void txc_apply_defaults(struct ef4_nic *efx)
{
int mctrl;
@@ -287,33 +287,33 @@ static void txc_apply_defaults(struct efx_nic *efx)
* saves a picowatt or two */
/* Turn off preemphasis */
- efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
- efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
+ ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
+ ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
/* Turn down the amplitude */
- efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ ef4_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
- efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ ef4_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
/* Set the line side amplitude and preemphasis to the databook
* defaults as an erratum causes them to be 0 on at least some
* PHY rev.s */
- efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
/* Set up the LEDs */
- mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
+ mctrl = ef4_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
/* Set the Green and Red LEDs to their default modes */
mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
- efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
+ ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
/* Databook recommends doing this after configuration changes */
txc_reset_logic(efx);
@@ -321,7 +321,7 @@ static void txc_apply_defaults(struct efx_nic *efx)
falcon_board(efx)->type->init_phy(efx);
}
-static int txc43128_phy_probe(struct efx_nic *efx)
+static int txc43128_phy_probe(struct ef4_nic *efx)
{
struct txc43128_data *phy_data;
@@ -341,7 +341,7 @@ static int txc43128_phy_probe(struct efx_nic *efx)
}
/* Initialisation entry point for this PHY driver */
-static int txc43128_phy_init(struct efx_nic *efx)
+static int txc43128_phy_init(struct ef4_nic *efx)
{
int rc;
@@ -359,28 +359,28 @@ static int txc43128_phy_init(struct efx_nic *efx)
}
/* Set the lane power down state in the global registers */
-static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
+static void txc_glrgs_lane_power(struct ef4_nic *efx, int mmd)
{
int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
- int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ int ctl = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
ctl &= ~pd;
else
ctl |= pd;
- efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
+ ef4_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
}
/* Set the lane power down state in the analog control registers */
-static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
+static void txc_analog_lane_power(struct ef4_nic *efx, int mmd)
{
int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
| (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
| (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
- int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
- int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
+ int txctl = ef4_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
+ int rxctl = ef4_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
txctl &= ~txpd;
@@ -390,14 +390,14 @@ static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
rxctl |= rxpd;
}
- efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
- efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
+ ef4_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
+ ef4_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
}
-static void txc_set_power(struct efx_nic *efx)
+static void txc_set_power(struct ef4_nic *efx)
{
/* According to the data book, all the MMDs can do low power */
- efx_mdio_set_mmds_lpower(efx,
+ ef4_mdio_set_mmds_lpower(efx,
!!(efx->phy_mode & PHY_MODE_LOW_POWER),
TXC_REQUIRED_DEVS);
@@ -411,15 +411,15 @@ static void txc_set_power(struct efx_nic *efx)
txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
}
-static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
+static void txc_reset_logic_mmd(struct ef4_nic *efx, int mmd)
{
- int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ int val = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int tries = 50;
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
- efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
+ ef4_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
while (--tries) {
- val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ val = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
udelay(1);
@@ -431,7 +431,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
/* Perform a logic reset. This preserves the configuration registers
* and is needed for some configuration changes to take effect */
-static void txc_reset_logic(struct efx_nic *efx)
+static void txc_reset_logic(struct ef4_nic *efx)
{
/* The data sheet claims we can do the logic reset on either the
* PCS or the PHYXS and the result is a reset of both host- and
@@ -439,15 +439,15 @@ static void txc_reset_logic(struct efx_nic *efx)
txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
}
-static bool txc43128_phy_read_link(struct efx_nic *efx)
+static bool txc43128_phy_read_link(struct ef4_nic *efx)
{
- return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
+ return ef4_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
}
-static int txc43128_phy_reconfigure(struct efx_nic *efx)
+static int txc43128_phy_reconfigure(struct ef4_nic *efx)
{
struct txc43128_data *phy_data = efx->phy_data;
- enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
+ enum ef4_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
@@ -457,8 +457,8 @@ static int txc43128_phy_reconfigure(struct efx_nic *efx)
mode_change &= ~PHY_MODE_TX_DISABLED;
}
- efx_mdio_transmit_disable(efx);
- efx_mdio_phy_reconfigure(efx);
+ ef4_mdio_transmit_disable(efx);
+ ef4_mdio_phy_reconfigure(efx);
if (mode_change & PHY_MODE_LOW_POWER)
txc_set_power(efx);
@@ -475,13 +475,13 @@ static int txc43128_phy_reconfigure(struct efx_nic *efx)
return 0;
}
-static void txc43128_phy_fini(struct efx_nic *efx)
+static void txc43128_phy_fini(struct ef4_nic *efx)
{
/* Disable link events */
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
+ ef4_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
}
-static void txc43128_phy_remove(struct efx_nic *efx)
+static void txc43128_phy_remove(struct ef4_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
@@ -489,7 +489,7 @@ static void txc43128_phy_remove(struct efx_nic *efx)
/* Periodic callback: this exists mainly to poll link status as we
* don't use LASI interrupts */
-static bool txc43128_phy_poll(struct efx_nic *efx)
+static bool txc43128_phy_poll(struct ef4_nic *efx)
{
struct txc43128_data *data = efx->phy_data;
bool was_up = efx->link_state.up;
@@ -516,14 +516,14 @@ static const char *const txc43128_test_names[] = {
"bist"
};
-static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
+static const char *txc43128_test_name(struct ef4_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(txc43128_test_names))
return txc43128_test_names[index];
return NULL;
}
-static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
+static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
{
int rc;
@@ -540,12 +540,12 @@ static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
return rc;
}
-static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
-const struct efx_phy_operations falcon_txc_phy_ops = {
+const struct ef4_phy_operations falcon_txc_phy_ops = {
.probe = txc43128_phy_probe,
.init = txc43128_phy_init,
.reconfigure = txc43128_phy_reconfigure,
@@ -553,8 +553,8 @@ const struct efx_phy_operations falcon_txc_phy_ops = {
.fini = txc43128_phy_fini,
.remove = txc43128_phy_remove,
.get_settings = txc43128_get_settings,
- .set_settings = efx_mdio_set_settings,
- .test_alive = efx_mdio_test_alive,
+ .set_settings = ef4_mdio_set_settings,
+ .test_alive = ef4_mdio_test_alive,
.run_tests = txc43128_run_tests,
.test_name = txc43128_test_name,
};
diff --git a/drivers/net/ethernet/sfc/falcon/workarounds.h b/drivers/net/ethernet/sfc/falcon/workarounds.h
new file mode 100644
index 000000000000..6af800bc9633
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon/workarounds.h
@@ -0,0 +1,44 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF4_WORKAROUNDS_H
+#define EF4_WORKAROUNDS_H
+
+/*
+ * Hardware workarounds.
+ * Bug numbers are from Solarflare's Bugzilla.
+ */
+
+#define EF4_WORKAROUND_FALCON_A(efx) (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1)
+#define EF4_WORKAROUND_FALCON_AB(efx) (ef4_nic_rev(efx) <= EF4_REV_FALCON_B0)
+#define EF4_WORKAROUND_10G(efx) 1
+
+/* Bit-bashed I2C reads cause performance drop */
+#define EF4_WORKAROUND_7884 EF4_WORKAROUND_10G
+/* Truncated IPv4 packets can confuse the TX packet parser */
+#define EF4_WORKAROUND_15592 EF4_WORKAROUND_FALCON_AB
+
+/* Spurious parity errors in TSORT buffers */
+#define EF4_WORKAROUND_5129 EF4_WORKAROUND_FALCON_A
+/* Unaligned read request >512 bytes after aligning may break TSORT */
+#define EF4_WORKAROUND_5391 EF4_WORKAROUND_FALCON_A
+/* iSCSI parsing errors */
+#define EF4_WORKAROUND_5583 EF4_WORKAROUND_FALCON_A
+/* RX events go missing */
+#define EF4_WORKAROUND_5676 EF4_WORKAROUND_FALCON_A
+/* RX_RESET on A1 */
+#define EF4_WORKAROUND_6555 EF4_WORKAROUND_FALCON_A
+/* Increase filter depth to avoid RX_RESET */
+#define EF4_WORKAROUND_7244 EF4_WORKAROUND_FALCON_A
+/* Flushes may never complete */
+#define EF4_WORKAROUND_7803 EF4_WORKAROUND_FALCON_AB
+/* Leak overlength packets rather than free */
+#define EF4_WORKAROUND_8071 EF4_WORKAROUND_FALCON_A
+
+#endif /* EF4_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4762ec444cb8..e4ca2161af70 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -25,7 +25,7 @@
#include "io.h"
#include "workarounds.h"
-/* Falcon-architecture (SFC4000 and SFC9000-family) support */
+/* Falcon-architecture (SFC9000-family) support */
/**************************************************************************
*
@@ -177,7 +177,7 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
dma_addr_t dma_addr;
int i;
- EFX_BUG_ON_PARANOID(!buffer->buf.addr);
+ EFX_WARN_ON_PARANOID(!buffer->buf.addr);
/* Write buffer descriptors to NIC */
for (i = 0; i < buffer->entries; i++) {
@@ -332,7 +332,7 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
txd = efx_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count;
- EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
+ EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
/* Create TX descriptor ring entry */
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
@@ -356,6 +356,18 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
}
}
+unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ /* Don't cross 4K boundaries with descriptors. */
+ unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
+
+ len = min(limit, len);
+
+ return len;
+}
+
+
/* Allocate hardware resources for a TX queue */
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
{
@@ -369,6 +381,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
@@ -390,37 +403,18 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
FRF_AZ_TX_DESCQ_TYPE, 0,
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
- !csum);
- }
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum);
efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- /* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
-
- efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
- __clear_bit_le(tx_queue->queue, &reg);
- else
- __set_bit_le(tx_queue->queue, &reg);
- efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_1(reg,
- FRF_BZ_TX_PACE,
- (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
- FFE_BZ_TX_PACE_OFF :
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
- tx_queue->queue);
- }
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
}
static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -517,16 +511,10 @@ void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
- bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
- bool iscsi_digest_en = is_b0;
bool jumbo_en;
- /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
- * DMA to continue after a PCIe page boundary (and scattering
- * is not possible). In Falcon B0 and Siena, it enables
- * scatter.
- */
- jumbo_en = !is_b0 || efx->rx_scatter;
+ /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */
+ jumbo_en = efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
@@ -540,8 +528,8 @@ void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
/* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr,
- FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_DDIG_EN, true,
+ FRF_AZ_RX_ISCSI_HDIG_EN, true,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID,
efx_rx_queue_channel(rx_queue)->channel,
@@ -880,7 +868,7 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
- bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_frm_trunc, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
bool rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned rx_ev_pkt_type;
@@ -897,12 +885,10 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
- rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
- 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
/* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_other_err = (rx_ev_tcp_udp_chksum_err |
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
@@ -927,7 +913,7 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s\n",
efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
@@ -936,14 +922,13 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
" [TCP_UDP_CHKSUM_ERR]" : "",
rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
- rx_ev_drib_nib ? " [DRIB_NIB]" : "",
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
/* The frame must be discarded if any of these are true. */
- return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EFX_RX_PKT_DISCARD : 0;
}
@@ -972,8 +957,7 @@ efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
- efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
- RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
return false;
}
@@ -1239,10 +1223,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx,
- EFX_WORKAROUND_6555(efx) ?
- RESET_TYPE_RX_RECOVERY :
- RESET_TYPE_DISABLE);
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1379,13 +1360,11 @@ int efx_farch_ev_init(struct efx_channel *channel)
channel->channel, channel->eventq.index,
channel->eventq.index + channel->eventq.entries - 1);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- EFX_POPULATE_OWORD_3(reg,
- FRF_CZ_TIMER_Q_EN, 1,
- FRF_CZ_HOST_NOTIFY_MODE, 0,
- FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
- }
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
/* Pin event queue buffer */
efx_init_special_buffer(efx, &channel->eventq);
@@ -1413,8 +1392,7 @@ void efx_farch_ev_fini(struct efx_channel *channel)
EFX_ZERO_OWORD(reg);
efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
channel->channel);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
/* Unpin event queue */
efx_fini_special_buffer(efx, &channel->eventq);
@@ -1488,7 +1466,6 @@ int efx_farch_irq_test_generate(struct efx_nic *efx)
*/
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
{
- struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
@@ -1514,8 +1491,6 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
/* Disable both devices */
pci_clear_master(efx->pci_dev);
- if (efx_nic_is_dual_func(efx))
- pci_clear_master(nic_data->pci_dev2);
efx_farch_irq_disable_master(efx);
/* Count errors and reset or disable the NIC accordingly */
@@ -1662,8 +1637,6 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
size_t i = 0;
efx_dword_t dword;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
-
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
@@ -1791,8 +1764,7 @@ void efx_farch_init_common(struct efx_nic *efx)
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
@@ -1812,22 +1784,18 @@ void efx_farch_init_common(struct efx_nic *efx)
/* Disable hardware watchdog which can misfire */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_4(temp,
- /* Default values */
- FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
- FRF_BZ_TX_PACE_SB_AF, 0xb,
- FRF_BZ_TX_PACE_FB_BASE, 0,
- /* Allow large pace values in the
- * fast bin. */
- FRF_BZ_TX_PACE_BIN_TH,
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo(efx, &temp, FR_BZ_TX_PACE);
- }
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
/**************************************************************************
@@ -2011,7 +1979,7 @@ static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_SCATTER));
- } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ } else {
/* We don't expose 'default' filters because unmatched
* packets always go to the queue number found in the
* RSS table. But we still need to set the RX scatter
@@ -2073,7 +2041,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
__be32 rhost, host1, host2;
__be16 rport, port1, port2;
- EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
+ EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
if (gen_spec->ether_type != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
@@ -2819,31 +2787,27 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
return -ENOMEM;
efx->filter_state = state;
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
- table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
- table->offset = FR_BZ_RX_FILTER_TBL0;
- table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
- table->step = FR_BZ_RX_FILTER_TBL0_STEP;
- }
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
- table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
- table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
- table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
-
- table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
- table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
- table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
-
- table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
- table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
- table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
- table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
- }
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
+ table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 241520943ada..995651341b94 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -15,7 +15,6 @@
#include "io.h"
#include "farch_regs.h"
#include "mcdi_pcol.h"
-#include "phy.h"
/**************************************************************************
*
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index c9aeb0701c9a..4472107ca8c1 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -129,14 +129,14 @@ struct efx_mcdi_data {
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
- EFX_BUG_ON_PARANOID(!efx->mcdi);
+ EFX_WARN_ON_PARANOID(!efx->mcdi);
return &efx->mcdi->iface;
}
#ifdef CONFIG_SFC_MCDI_MON
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
{
- EFX_BUG_ON_PARANOID(!efx->mcdi);
+ EFX_WARN_ON_PARANOID(!efx->mcdi);
return &efx->mcdi->hwmon;
}
#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index bc27d5b580f5..f97da05952c7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -121,9 +121,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
}
if (!name)
name = "No sensor name available";
- EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+ EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
- EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
unit = efx_hwmon_unit[hwmon_type];
if (!unit)
unit = "";
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index ccceafc15896..35cc3d4fa5f6 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -276,6 +276,9 @@
/* The clock whose frequency you've attempted to set set
* doesn't exist on this NIC */
#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so. */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
#define MC_CMD_ERR_CODE_OFST 0
@@ -933,6 +936,8 @@
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
@@ -1659,6 +1664,8 @@
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
@@ -2211,6 +2218,10 @@
#define MC_CMD_FW_HIGH_TX_RATE 0x3
/* enum: Reserved value */
#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define MC_CMD_FW_RULES_ENGINE 0x5
/* enum: Only this option is allowed for non-admin functions */
#define MC_CMD_FW_DONT_CARE 0xffffffff
@@ -3654,12 +3665,27 @@
#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
@@ -3784,16 +3810,81 @@
#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
-/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command returns immediately with error code EAGAIN. Subsequent
+ * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
+ * in progress. Once the verification has completed, this response payload
+ * includes the results of the signature verification. Note that the nvram lock
+ * in firmware is only released after the verification has completed and the
+ * host has read back the result code from firmware.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+/* enum: Verify succeeded without any errors. */
+#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+
/***********************************/
/* MC_CMD_REBOOT
@@ -4356,6 +4447,28 @@
/* MC_CMD_TESTASSERT_OUT msgresponse */
#define MC_CMD_TESTASSERT_OUT_LEN 0
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
/***********************************/
/* MC_CMD_WORKAROUND
@@ -4421,6 +4534,7 @@
* (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
* output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
* returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
@@ -5362,12 +5476,14 @@
#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
/* enum: Spare partition 0 */
#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
-/* enum: Spare partition 1 */
-#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100
+/* enum: Used for XIP code of shmbooted images */
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
-/* enum: Spare partition 3 */
-#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
/* enum: Spare partition 4 */
#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
@@ -5402,6 +5518,14 @@
#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
/* enum: Network Access Control */
#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
@@ -5458,6 +5582,14 @@
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
#define LICENSED_V3_APPS_MASK_LBN 0
#define LICENSED_V3_APPS_MASK_WIDTH 64
@@ -5988,6 +6120,8 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -7728,6 +7862,8 @@
* tests (Medford development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -7763,6 +7899,8 @@
* tests (Medford development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
@@ -7913,6 +8051,8 @@
* tests (Medford development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -7948,6 +8088,8 @@
* tests (Medford development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
@@ -7980,6 +8122,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8247,6 +8391,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8304,7 +8450,7 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
/* On chips later than Medford the amount of address space assigned to each VI
* is configurable. This is a global setting that the driver must query to
- * discover the VI to address mapping. Cut-through PIO (CTPIO) in not available
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
* with 8k VI windows.
*/
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
@@ -10283,6 +10429,8 @@
* more data is returned.
*/
#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
+#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -10468,6 +10616,12 @@
#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
+
/***********************************/
/* MC_CMD_LICENSING
@@ -10783,29 +10937,45 @@
#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
/* application ID expressed as a single bit mask */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4
-/* challenge for validation */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
/* application expiry time */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
/* application expiry units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
/* enum: expiry units are accounting units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
/* enum: expiry units are calendar days */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
-/* validation response to challenge */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
/***********************************/
@@ -10835,6 +11005,70 @@
/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+/* ECDSA license and signature */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+/* enum: finished validating and installing license */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
/* MC_CMD_SET_PORT_SNIFF_CONFIG
* Configure RX port sniffing for the physical port associated with the calling
* function. Only a privileged function may change the port sniffing
@@ -11705,6 +11939,66 @@
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+/***********************************/
+/* MC_CMD_NVRAM_PRIVATE_APPEND
+ * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
+ * if the tag is already present.
+ */
+#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
+
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
+/* The tag to be appended */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+/* The length of the data */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+/* The data to be contained in the TLV structure */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_VERIFY_CONTENTS
+ * Verify that the contents of the XPM memory is correct (Medford only). This
+ * is used during manufacture to check that the XPM memory has been programmed
+ * correctly at ATE.
+ */
+#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
+
+#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
+/* Data type to be checked */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
+/* Number of sectors found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+/* Number of bytes found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+/* Length of signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+/* Signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
+
+
/***********************************/
/* MC_CMD_SET_EVQ_TMR
* Update the timer load, timer reload and timer mode values for a given EVQ.
@@ -11798,4 +12092,151 @@
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP
+ * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
+ * non used switch buffers.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
+
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+/* Will the common pool be used as TX_vFIFO_ULL (1) */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+/* Number of buffers to reserve for the common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+/* TX datapath to which the Common Pool is connected to. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+/* enum: Extracts information from function */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+/* Network port or RX Engine to which the common pool connects. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+/* enum: Extracts information from function */
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
+/* ID of the common pool allocated */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
+ * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
+ * previously allocated common pools.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
+
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
+/* Common pool previously allocated to which the new vFIFO will be associated
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+/* Port or RX engine to associate the vFIFO egress */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+/* enum: Extracts information from common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+/* Minimum number of buffers that the pool must have */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+/* enum: Do not check the space available */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+/* Will the vFIFO be used as TX_vFIFO_ULL */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+/* Network priority of the vFIFO,if applicable */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+/* enum: Search for the lowest unused priority */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
+/* Short vFIFO ID */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+/* Network priority of the vFIFO */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+
+
+/***********************************/
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF
+ * This interface clears the configuration of the given vFIFO and leaves it
+ * ready to be re-used.
+ */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
+
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
+/* Short vFIFO ID */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
+ * This interface clears the configuration of the given common pool and leaves
+ * it ready to be re-used.
+ */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
+
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
+/* Common pool ID given when pool allocated */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
+ * This interface allows the host to find out how many common pool buffers are
+ * not yet assigned.
+ */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
+
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
+/* Available buffers for the ENG to NET vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 2a9228a6e4a0..c905971c5f3a 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include "efx.h"
-#include "phy.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "nic.h"
@@ -504,45 +503,59 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
kfree(phy_data);
}
-static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
-
- ecmd->supported =
- mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- ecmd->advertising = efx->link_advertising;
- ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
- ecmd->duplex = efx->link_state.fd;
- ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
- ecmd->phy_address = phy_cfg->port;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
- ecmd->mdio_support = (efx->mdio.mode_support &
+ u32 supported, advertising, lp_advertising;
+
+ supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
+ advertising = efx->link_advertising;
+ cmd->base.speed = efx->link_state.speed;
+ cmd->base.duplex = efx->link_state.fd;
+ cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
+ cmd->base.phy_address = phy_cfg->port;
+ cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- ecmd->lp_advertising =
+ lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
}
-static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static int
+efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
- if (ecmd->autoneg) {
- caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
+ if (cmd->base.autoneg) {
+ caps = (ethtool_to_mcdi_cap(advertising) |
1 << MC_CMD_PHY_CAP_AN_LBN);
- } else if (ecmd->duplex) {
- switch (ethtool_cmd_speed(ecmd)) {
+ } else if (cmd->base.duplex) {
+ switch (cmd->base.speed) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
@@ -551,7 +564,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
default: return -EINVAL;
}
} else {
- switch (ethtool_cmd_speed(ecmd)) {
+ switch (cmd->base.speed) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
@@ -564,9 +577,9 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
if (rc)
return rc;
- if (ecmd->autoneg) {
+ if (cmd->base.autoneg) {
efx_link_set_advertising(
- efx, ecmd->advertising | ADVERTISED_Autoneg);
+ efx, advertising | ADVERTISED_Autoneg);
phy_cfg->forced_cap = 0;
} else {
efx_link_set_advertising(efx, 0);
@@ -813,8 +826,8 @@ static const struct efx_phy_operations efx_mcdi_phy_ops = {
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
- .get_settings = efx_mcdi_phy_get_settings,
- .set_settings = efx_mcdi_phy_set_settings,
+ .get_link_ksettings = efx_mcdi_phy_get_link_ksettings,
+ .set_link_ksettings = efx_mcdi_phy_set_link_ksettings,
.test_alive = efx_mcdi_phy_test_alive,
.run_tests = efx_mcdi_phy_run_tests,
.test_name = efx_mcdi_phy_test_name,
@@ -841,7 +854,7 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
u32 flags, fcntl, speed, lpa;
speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
speed = efx_mcdi_event_link_speed[speed];
flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 99d8c82124bb..1a635ced62d0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -41,13 +41,13 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "4.0"
+#define EFX_DRIVER_VERSION "4.1"
#ifdef DEBUG
-#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
+#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
#else
-#define EFX_BUG_ON_PARANOID(x) do {} while (0)
+#define EFX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif
@@ -139,8 +139,6 @@ struct efx_special_buffer {
* struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* freed when descriptor completes
- * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
- * freed when descriptor completes.
* @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
@@ -151,10 +149,7 @@ struct efx_special_buffer {
* Only valid if @unmap_len != 0.
*/
struct efx_tx_buffer {
- union {
- const struct sk_buff *skb;
- void *heap_buf;
- };
+ const struct sk_buff *skb;
union {
efx_qword_t option;
dma_addr_t dma_addr;
@@ -166,7 +161,6 @@ struct efx_tx_buffer {
};
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
-#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
@@ -189,13 +183,16 @@ struct efx_tx_buffer {
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
- * @tsoh_page: Array of pages of TSO header buffers
+ * @cb_page: Array of pages of copy buffers. Carved up according to
+ * %EFX_TX_CB_ORDER into %EFX_TX_CB_SIZE-sized chunks.
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @piobuf: PIO buffer region for this TX queue (shared with its partner).
* Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
+ * @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
+ * may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
* @old_write_count: The value of @write_count when last checked.
@@ -221,9 +218,11 @@ struct efx_tx_buffer {
* @tso_long_headers: Number of packets with headers too long for standard
* blocks
* @tso_packets: Number of packets via the TSO xmit path
+ * @tso_fallbacks: Number of times TSO fallback used
* @pushes: Number of times the TX push feature has been used
* @pio_packets: Number of times the TX PIO feature has been used
* @xmit_more_available: Are any packets waiting to be pushed to the NIC
+ * @cb_packets: Number of times the TX copybreak feature has been used
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -236,13 +235,16 @@ struct efx_tx_queue {
struct efx_channel *channel;
struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
- struct efx_buffer *tsoh_page;
+ struct efx_buffer *cb_page;
struct efx_special_buffer txd;
unsigned int ptr_mask;
void __iomem *piobuf;
unsigned int piobuf_offset;
bool initialised;
+ /* Function pointers used in the fast path. */
+ int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
+
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
@@ -257,9 +259,11 @@ struct efx_tx_queue {
unsigned int tso_bursts;
unsigned int tso_long_headers;
unsigned int tso_packets;
+ unsigned int tso_fallbacks;
unsigned int pushes;
unsigned int pio_packets;
bool xmit_more_available;
+ unsigned int cb_packets;
/* Statistics to supplement MAC stats */
unsigned long tx_packets;
@@ -269,6 +273,9 @@ struct efx_tx_queue {
atomic_t flush_outstanding;
};
+#define EFX_TX_CB_ORDER 7
+#define EFX_TX_CB_SIZE (1 << EFX_TX_CB_ORDER) - NET_IP_ALIGN
+
/**
* struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer
@@ -713,8 +720,8 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
* @reconfigure: Reconfigure PHY (e.g. for new link parameters)
* @poll: Update @link_state and report whether it changed.
* Serialised by the mac_lock.
- * @get_settings: Get ethtool settings. Serialised by the mac_lock.
- * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
* @test_alive: Test that PHY is 'alive' (online)
@@ -729,10 +736,10 @@ struct efx_phy_operations {
void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
- void (*get_settings) (struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
- int (*set_settings) (struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
+ void (*get_link_ksettings)(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd);
+ int (*set_link_ksettings)(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd);
void (*set_npage_adv) (struct efx_nic *efx, u32);
int (*test_alive) (struct efx_nic *efx);
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
@@ -853,6 +860,7 @@ struct vfdi_status;
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
+ * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
@@ -990,6 +998,7 @@ struct efx_nic {
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
+ bool rx_hash_udp_4tuple;
unsigned int_error_count;
unsigned long int_error_expire;
@@ -1210,6 +1219,8 @@ struct efx_mtd_partition {
* and tx_type will already have been validated but this operation
* must validate and update rx_filter.
* @set_mac_address: Set the MAC address of the device
+ * @tso_versions: Returns mask of firmware-assisted TSO versions supported.
+ * If %NULL, then device does not support any TSO version.
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1286,6 +1297,8 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
+ unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
const u32 *rx_indir_table);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
@@ -1364,6 +1377,7 @@ struct efx_nic_type {
void (*vswitching_remove)(struct efx_nic *efx);
int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
int (*set_mac_address)(struct efx_nic *efx);
+ u32 (*tso_versions)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1395,7 +1409,7 @@ struct efx_nic_type {
static inline struct efx_channel *
efx_get_channel(struct efx_nic *efx, unsigned index)
{
- EFX_BUG_ON_PARANOID(index >= efx->n_channels);
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_channels);
return efx->channel[index];
}
@@ -1416,8 +1430,8 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{
- EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
- type >= EFX_TXQ_TYPES);
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
@@ -1430,8 +1444,8 @@ static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
- EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
- type >= EFX_TXQ_TYPES);
+ EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_tx_queues(channel) ||
+ type >= EFX_TXQ_TYPES);
return &channel->tx_queue[type];
}
@@ -1468,7 +1482,7 @@ static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
- EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+ EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_rx_queue(channel));
return &channel->rx_queue;
}
@@ -1543,4 +1557,32 @@ static inline netdev_features_t efx_supported_features(const struct efx_nic *efx
return net_dev->features | net_dev->hw_features;
}
+/* Get the current TX queue insert index. */
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+/* Get a TX buffer. */
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+/* Get a TX buffer, checking it's not currently in use. */
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer =
+ __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ EFX_WARN_ON_ONCE_PARANOID(buffer->len);
+ EFX_WARN_ON_ONCE_PARANOID(buffer->flags);
+ EFX_WARN_ON_ONCE_PARANOID(buffer->unmap_len);
+
+ return buffer;
+}
+
#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 73bee7ea332a..223774635cba 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -18,11 +18,8 @@
#include "mcdi.h"
enum {
- EFX_REV_FALCON_A0 = 0,
- EFX_REV_FALCON_A1 = 1,
- EFX_REV_FALCON_B0 = 2,
- EFX_REV_SIENA_A0 = 3,
- EFX_REV_HUNT_A0 = 4,
+ EFX_REV_SIENA_A0 = 0,
+ EFX_REV_HUNT_A0 = 1,
};
static inline int efx_nic_rev(struct efx_nic *efx)
@@ -32,12 +29,6 @@ static inline int efx_nic_rev(struct efx_nic *efx)
u32 efx_farch_fpga_ver(struct efx_nic *efx);
-/* NIC has two interlinked PCI functions for the same port. */
-static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
-{
- return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
-}
-
/* Read the current event from the event queue */
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
@@ -144,11 +135,6 @@ enum {
PHY_TYPE_SFT9001B = 10,
};
-#define FALCON_XMAC_LOOPBACKS \
- ((1 << LOOPBACK_XGMII) | \
- (1 << LOOPBACK_XGXS) | \
- (1 << LOOPBACK_XAUI))
-
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
@@ -161,160 +147,6 @@ enum {
GENERIC_STAT_COUNT
};
-/**
- * struct falcon_board_type - board operations and type information
- * @id: Board type id, as found in NVRAM
- * @init: Allocate resources and initialise peripheral hardware
- * @init_phy: Do board-specific PHY initialisation
- * @fini: Shut down hardware and free resources
- * @set_id_led: Set state of identifying LED or revert to automatic function
- * @monitor: Board-specific health check function
- */
-struct falcon_board_type {
- u8 id;
- int (*init) (struct efx_nic *nic);
- void (*init_phy) (struct efx_nic *efx);
- void (*fini) (struct efx_nic *nic);
- void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
- int (*monitor) (struct efx_nic *nic);
-};
-
-/**
- * struct falcon_board - board information
- * @type: Type of board
- * @major: Major rev. ('A', 'B' ...)
- * @minor: Minor rev. (0, 1, ...)
- * @i2c_adap: I2C adapter for on-board peripherals
- * @i2c_data: Data for bit-banging algorithm
- * @hwmon_client: I2C client for hardware monitor
- * @ioexp_client: I2C client for power/port control
- */
-struct falcon_board {
- const struct falcon_board_type *type;
- int major;
- int minor;
- struct i2c_adapter i2c_adap;
- struct i2c_algo_bit_data i2c_data;
- struct i2c_client *hwmon_client, *ioexp_client;
-};
-
-/**
- * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
- * @device_id: Controller's id for the device
- * @size: Size (in bytes)
- * @addr_len: Number of address bytes in read/write commands
- * @munge_address: Flag whether addresses should be munged.
- * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
- * use bit 3 of the command byte as address bit A8, rather
- * than having a two-byte address. If this flag is set, then
- * commands should be munged in this way.
- * @erase_command: Erase command (or 0 if sector erase not needed).
- * @erase_size: Erase sector size (in bytes)
- * Erase commands affect sectors with this size and alignment.
- * This must be a power of two.
- * @block_size: Write block size (in bytes).
- * Write commands are limited to blocks with this size and alignment.
- */
-struct falcon_spi_device {
- int device_id;
- unsigned int size;
- unsigned int addr_len;
- unsigned int munge_address:1;
- u8 erase_command;
- unsigned int erase_size;
- unsigned int block_size;
-};
-
-static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
-{
- return spi->size != 0;
-}
-
-enum {
- FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
- FALCON_STAT_tx_packets,
- FALCON_STAT_tx_pause,
- FALCON_STAT_tx_control,
- FALCON_STAT_tx_unicast,
- FALCON_STAT_tx_multicast,
- FALCON_STAT_tx_broadcast,
- FALCON_STAT_tx_lt64,
- FALCON_STAT_tx_64,
- FALCON_STAT_tx_65_to_127,
- FALCON_STAT_tx_128_to_255,
- FALCON_STAT_tx_256_to_511,
- FALCON_STAT_tx_512_to_1023,
- FALCON_STAT_tx_1024_to_15xx,
- FALCON_STAT_tx_15xx_to_jumbo,
- FALCON_STAT_tx_gtjumbo,
- FALCON_STAT_tx_non_tcpudp,
- FALCON_STAT_tx_mac_src_error,
- FALCON_STAT_tx_ip_src_error,
- FALCON_STAT_rx_bytes,
- FALCON_STAT_rx_good_bytes,
- FALCON_STAT_rx_bad_bytes,
- FALCON_STAT_rx_packets,
- FALCON_STAT_rx_good,
- FALCON_STAT_rx_bad,
- FALCON_STAT_rx_pause,
- FALCON_STAT_rx_control,
- FALCON_STAT_rx_unicast,
- FALCON_STAT_rx_multicast,
- FALCON_STAT_rx_broadcast,
- FALCON_STAT_rx_lt64,
- FALCON_STAT_rx_64,
- FALCON_STAT_rx_65_to_127,
- FALCON_STAT_rx_128_to_255,
- FALCON_STAT_rx_256_to_511,
- FALCON_STAT_rx_512_to_1023,
- FALCON_STAT_rx_1024_to_15xx,
- FALCON_STAT_rx_15xx_to_jumbo,
- FALCON_STAT_rx_gtjumbo,
- FALCON_STAT_rx_bad_lt64,
- FALCON_STAT_rx_bad_gtjumbo,
- FALCON_STAT_rx_overflow,
- FALCON_STAT_rx_symbol_error,
- FALCON_STAT_rx_align_error,
- FALCON_STAT_rx_length_error,
- FALCON_STAT_rx_internal_error,
- FALCON_STAT_rx_nodesc_drop_cnt,
- FALCON_STAT_COUNT
-};
-
-/**
- * struct falcon_nic_data - Falcon NIC state
- * @pci_dev2: Secondary function of Falcon A
- * @board: Board state and functions
- * @stats: Hardware statistics
- * @stats_disable_count: Nest count for disabling statistics fetches
- * @stats_pending: Is there a pending DMA of MAC statistics.
- * @stats_timer: A timer for regularly fetching MAC statistics.
- * @spi_flash: SPI flash device
- * @spi_eeprom: SPI EEPROM device
- * @spi_lock: SPI bus lock
- * @mdio_lock: MDIO bus lock
- * @xmac_poll_required: XMAC link state needs polling
- */
-struct falcon_nic_data {
- struct pci_dev *pci_dev2;
- struct falcon_board board;
- u64 stats[FALCON_STAT_COUNT];
- unsigned int stats_disable_count;
- bool stats_pending;
- struct timer_list stats_timer;
- struct falcon_spi_device spi_flash;
- struct falcon_spi_device spi_eeprom;
- struct mutex spi_lock;
- struct mutex mdio_lock;
- bool xmac_poll_required;
-};
-
-static inline struct falcon_board *falcon_board(struct efx_nic *efx)
-{
- struct falcon_nic_data *data = efx->nic_data;
- return &data->board;
-}
-
enum {
SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
SIENA_STAT_tx_good_bytes,
@@ -681,6 +513,8 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 77a5364f7a10..60cdb97f58e2 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -835,7 +835,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
ACCESS_ONCE(*start) = 0;
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
- EFX_BUG_ON_PARANOID(rc);
+ EFX_WARN_ON_ONCE_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 02b0b5272c14..5f4ad4f3518f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -335,7 +335,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
if (fill_level >= rx_queue->fast_fill_trigger)
goto out;
@@ -347,7 +347,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
space = rx_queue->max_fill - fill_level;
- EFX_BUG_ON_PARANOID(space < batch_size);
+ EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
@@ -400,21 +400,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*/
rx_buf->flags |= EFX_RX_PKT_DISCARD;
- if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
- if (net_ratelimit())
- netif_err(efx, rx_err, efx->net_dev,
- " RX queue %d seriously overlength "
- "RX event (0x%x > 0x%x+0x%x). Leaking\n",
- efx_rx_queue_index(rx_queue), len, max_len,
- efx->type->rx_buffer_padding);
- efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
- } else {
- if (net_ratelimit())
- netif_err(efx, rx_err, efx->net_dev,
- " RX queue %d overlength RX event "
- "(0x%x > 0x%x)\n",
- efx_rx_queue_index(rx_queue), len, max_len);
- }
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX queue %d overlength RX event (%#x > %#x)\n",
+ efx_rx_queue_index(rx_queue), len, max_len);
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
@@ -486,7 +475,7 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
return NULL;
}
- EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
+ EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
efx->rx_prefix_size + hdr_len);
@@ -693,7 +682,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
rx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 04ed1b4c7cd9..a3901bc96586 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -20,7 +20,6 @@
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
-#include "phy.h"
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
@@ -718,7 +717,7 @@ static void siena_mcdi_request(struct efx_nic *efx,
unsigned int i;
unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
- EFX_BUG_ON_PARANOID(hdr_len != 4);
+ EFX_WARN_ON_PARANOID(hdr_len != 4);
efx_writed(efx, hdr, pdu);
@@ -977,6 +976,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
+ .tx_limit_len = efx_farch_tx_limit_len,
.rx_push_rss_config = siena_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 233778911557..3c0151424d12 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -22,6 +22,7 @@
#include "efx.h"
#include "io.h"
#include "nic.h"
+#include "tx.h"
#include "workarounds.h"
#include "ef10_regs.h"
@@ -33,29 +34,30 @@ unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
#endif /* EFX_USE_PIO */
-static inline unsigned int
-efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
{
- return tx_queue->insert_count & tx_queue->ptr_mask;
-}
+ unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
+ struct efx_buffer *page_buf =
+ &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
+ unsigned int offset =
+ ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
-static inline struct efx_tx_buffer *
-__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
-{
- return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+ if (unlikely(!page_buf->addr) &&
+ efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
+ return NULL;
+ buffer->dma_addr = page_buf->dma_addr + offset;
+ buffer->unmap_len = 0;
+ return (u8 *)page_buf->addr + offset;
}
-static inline struct efx_tx_buffer *
-efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer, size_t len)
{
- struct efx_tx_buffer *buffer =
- __efx_tx_queue_get_insert_buffer(tx_queue);
-
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->flags);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
-
- return buffer;
+ if (len > EFX_TX_CB_SIZE)
+ return NULL;
+ return efx_tx_get_copy_buffer(tx_queue, buffer);
}
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
@@ -82,35 +84,12 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
- } else if (buffer->flags & EFX_TX_BUF_HEAP) {
- kfree(buffer->heap_buf);
}
buffer->len = 0;
buffer->flags = 0;
}
-static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
- struct sk_buff *skb);
-
-static inline unsigned
-efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
-{
- /* Depending on the NIC revision, we can use descriptor
- * lengths up to 8K or 8K-1. However, since PCI Express
- * devices must split read requests at 4K boundaries, there is
- * little benefit from using descriptors that cross those
- * boundaries and we keep things simple by not doing so.
- */
- unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
-
- /* Work around hardware bug for unaligned buffers. */
- if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
- len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
-
- return len;
-}
-
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
{
/* Header and payload descriptor for each output segment, plus
@@ -118,10 +97,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
*/
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
- /* Possibly one more per segment for the alignment workaround,
- * or for option descriptors
- */
- if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ /* Possibly one more per segment for option descriptors */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
max_descs += EFX_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
@@ -165,7 +142,7 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
- EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb();
if (likely(!efx->loopback_selftest))
@@ -173,6 +150,33 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
}
}
+static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ unsigned int copy_len = skb->len;
+ struct efx_tx_buffer *buffer;
+ u8 *copy_buffer;
+ int rc;
+
+ EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
+
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+ copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
+ if (unlikely(!copy_buffer))
+ return -ENOMEM;
+
+ rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
+ EFX_WARN_ON_PARANOID(rc);
+ buffer->len = copy_len;
+
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB;
+
+ ++tx_queue->insert_count;
+ return rc;
+}
+
#ifdef EFX_USE_PIO
struct efx_short_copy_buffer {
@@ -264,11 +268,11 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
kunmap_atomic(vaddr);
}
- EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+ EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
}
-static struct efx_tx_buffer *
-efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
{
struct efx_tx_buffer *buffer =
efx_tx_queue_get_insert_buffer(tx_queue);
@@ -292,7 +296,7 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
} else {
/* Pad the write to the size of a cache line.
- * We can do this because we know the skb_shared_info sruct is
+ * We can do this because we know the skb_shared_info struct is
* after the source, and the destination buffer is big enough.
*/
BUILD_BUG_ON(L1_CACHE_BYTES >
@@ -301,6 +305,9 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
}
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
+
EFX_POPULATE_QWORD_5(buffer->option,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
@@ -308,127 +315,227 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
ESF_DZ_TX_PIO_BUF_ADDR,
tx_queue->piobuf_offset);
- ++tx_queue->pio_packets;
++tx_queue->insert_count;
- return buffer;
+ return 0;
}
#endif /* EFX_USE_PIO */
-/*
- * Add a socket buffer to a TX queue
- *
- * This maps all fragments of a socket buffer for DMA and adds them to
- * the TX queue. The queue's insert pointer will be incremented by
- * the number of fragments in the socket buffer.
- *
- * If any DMA mapping fails, any mapped fragments will be unmapped,
- * the queue's insert pointer will be restored to its original value.
- *
- * This function is split out from efx_hard_start_xmit to allow the
- * loopback test to direct packets via specific TX queues.
- *
- * Returns NETDEV_TX_OK.
- * You must hold netif_tx_lock() to call this function.
+static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr,
+ size_t len)
+{
+ const struct efx_nic_type *nic_type = tx_queue->efx->type;
+ struct efx_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ /* Map the fragment taking account of NIC-dependent DMA limits. */
+ do {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ return buffer;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue.
*/
-netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count)
{
struct efx_nic *efx = tx_queue->efx;
struct device *dma_dev = &efx->pci_dev->dev;
- struct efx_tx_buffer *buffer;
- unsigned int old_insert_count = tx_queue->insert_count;
- skb_frag_t *fragment;
- unsigned int len, unmap_len = 0;
- dma_addr_t dma_addr, unmap_addr = 0;
- unsigned int dma_len;
+ unsigned int frag_index, nr_frags;
+ dma_addr_t dma_addr, unmap_addr;
unsigned short dma_flags;
- int i = 0;
+ size_t len, unmap_len;
- if (skb_shinfo(skb)->gso_size)
- return efx_enqueue_skb_tso(tx_queue, skb);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag_index = 0;
- /* Get size of the initial fragment */
+ /* Map header data. */
len = skb_headlen(skb);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ unmap_len = len;
+ unmap_addr = dma_addr;
- /* Pad if necessary */
- if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
- EFX_BUG_ON_PARANOID(skb->data_len);
- len = 32 + 1;
- if (skb_pad(skb, len - skb->len))
- return NETDEV_TX_OK;
- }
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
- /* Consider using PIO for short packets */
-#ifdef EFX_USE_PIO
- if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
- efx_nic_may_tx_pio(tx_queue)) {
- buffer = efx_enqueue_skb_pio(tx_queue, skb);
- dma_flags = EFX_TX_BUF_OPTION;
- goto finish_packet;
+ if (segment_count) {
+ /* For TSO we need to put the header in to a separate
+ * descriptor. Map this separately if necessary.
+ */
+ size_t header_len = skb_transport_header(skb) - skb->data +
+ (tcp_hdr(skb)->doff << 2u);
+
+ if (header_len != len) {
+ tx_queue->tso_long_headers++;
+ efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+ len -= header_len;
+ dma_addr += header_len;
+ }
}
-#endif
- /* Map for DMA. Use dma_map_single rather than dma_map_page
- * since this is more efficient on machines with sparse
- * memory.
- */
- dma_flags = EFX_TX_BUF_MAP_SINGLE;
- dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
+ /* Add descriptors for each fragment. */
+ do {
+ struct efx_tx_buffer *buffer;
+ skb_frag_t *fragment;
- /* Process all fragments */
- while (1) {
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- goto dma_err;
+ buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+
+ /* The final descriptor for a fragment is responsible for
+ * unmapping the whole fragment.
+ */
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+ if (frag_index >= nr_frags) {
+ /* Store SKB details with the final buffer for
+ * the completion.
+ */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+ return 0;
+ }
- /* Store fields for marking in the per-fragment final
- * descriptor */
+ /* Move on to the next fragment. */
+ fragment = &skb_shinfo(skb)->frags[frag_index++];
+ len = skb_frag_size(fragment);
+ dma_addr = skb_frag_dma_map(dma_dev, fragment,
+ 0, len, DMA_TO_DEVICE);
+ dma_flags = 0;
unmap_len = len;
unmap_addr = dma_addr;
- /* Add to TX queue, splitting across DMA boundaries */
- do {
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+ } while (1);
+}
+
+/* Remove buffers put into a tx_queue. None of the buffers must have
+ * an skb attached.
+ */
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != tx_queue->write_count) {
+ --tx_queue->insert_count;
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+ efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
+ }
+}
+
+/*
+ * Fallback to software TSO.
+ *
+ * This is used if we are unable to send a GSO packet through hardware TSO.
+ * This should only ever happen due to per-queue restrictions - unsupported
+ * packets should first be filtered by the feature flags.
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ struct sk_buff *segments, *next;
- dma_len = efx_max_tx_len(efx, dma_addr);
- if (likely(dma_len >= len))
- dma_len = len;
+ segments = skb_gso_segment(skb, 0);
+ if (IS_ERR(segments))
+ return PTR_ERR(segments);
- /* Fill out per descriptor fields */
- buffer->len = dma_len;
- buffer->dma_addr = dma_addr;
- buffer->flags = EFX_TX_BUF_CONT;
- len -= dma_len;
- dma_addr += dma_len;
- ++tx_queue->insert_count;
- } while (len);
+ dev_kfree_skb_any(skb);
+ skb = segments;
- /* Transfer ownership of the unmapping to the final buffer */
- buffer->flags = EFX_TX_BUF_CONT | dma_flags;
- buffer->unmap_len = unmap_len;
- buffer->dma_offset = buffer->dma_addr - unmap_addr;
- unmap_len = 0;
+ while (skb) {
+ next = skb->next;
+ skb->next = NULL;
- /* Get address and size of next fragment */
- if (i >= skb_shinfo(skb)->nr_frags)
- break;
- fragment = &skb_shinfo(skb)->frags[i];
- len = skb_frag_size(fragment);
- i++;
- /* Map for DMA */
- dma_flags = 0;
- dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
- DMA_TO_DEVICE);
+ if (next)
+ skb->xmit_more = true;
+ efx_enqueue_skb(tx_queue, skb);
+ skb = next;
}
- /* Transfer ownership of the skb to the final buffer */
+ return 0;
+}
+
+/*
+ * Add a socket buffer to a TX queue
+ *
+ * This maps all fragments of a socket buffer for DMA and adds them to
+ * the TX queue. The queue's insert pointer will be incremented by
+ * the number of fragments in the socket buffer.
+ *
+ * If any DMA mapping fails, any mapped fragments will be unmapped,
+ * the queue's insert pointer will be restored to its original value.
+ *
+ * This function is split out from efx_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.
+ *
+ * Returns NETDEV_TX_OK.
+ * You must hold netif_tx_lock() to call this function.
+ */
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ bool data_mapped = false;
+ unsigned int segments;
+ unsigned int skb_len;
+ int rc;
+
+ skb_len = skb->len;
+ segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
+ if (segments == 1)
+ segments = 0; /* Don't use TSO for a single segment. */
+
+ /* Handle TSO first - it's *possible* (although unlikely) that we might
+ * be passed a packet to segment that's smaller than the copybreak/PIO
+ * size limit.
+ */
+ if (segments) {
+ EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
+ rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
+ if (rc == -EINVAL) {
+ rc = efx_tx_tso_fallback(tx_queue, skb);
+ tx_queue->tso_fallbacks++;
+ if (rc == 0)
+ return 0;
+ }
+ if (rc)
+ goto err;
#ifdef EFX_USE_PIO
-finish_packet:
+ } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
+ efx_nic_may_tx_pio(tx_queue)) {
+ /* Use PIO for short packets with an empty queue. */
+ if (efx_enqueue_skb_pio(tx_queue, skb))
+ goto err;
+ tx_queue->pio_packets++;
+ data_mapped = true;
#endif
- buffer->skb = skb;
- buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+ } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
+ /* Pad short packets or coalesce short fragmented packets. */
+ if (efx_enqueue_skb_copy(tx_queue, skb))
+ goto err;
+ tx_queue->cb_packets++;
+ data_mapped = true;
+ }
- netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+ /* Map for DMA and create descriptors if we haven't done so already. */
+ if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
+ goto err;
- efx_tx_maybe_stop_queue(tx_queue);
+ /* Update BQL */
+ netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
@@ -446,37 +553,22 @@ finish_packet:
tx_queue->xmit_more_available = skb->xmit_more;
}
- tx_queue->tx_packets++;
+ if (segments) {
+ tx_queue->tso_bursts++;
+ tx_queue->tso_packets += segments;
+ tx_queue->tx_packets += segments;
+ } else {
+ tx_queue->tx_packets++;
+ }
+
+ efx_tx_maybe_stop_queue(tx_queue);
return NETDEV_TX_OK;
- dma_err:
- netif_err(efx, tx_err, efx->net_dev,
- " TX queue %d could not map skb with %d bytes %d "
- "fragments for DMA\n", tx_queue->queue, skb->len,
- skb_shinfo(skb)->nr_frags + 1);
- /* Mark the packet as transmitted, and free the SKB ourselves */
+err:
+ efx_enqueue_unwind(tx_queue);
dev_kfree_skb_any(skb);
-
- /* Work backwards until we hit the original insert pointer value */
- while (tx_queue->insert_count != old_insert_count) {
- unsigned int pkts_compl = 0, bytes_compl = 0;
- --tx_queue->insert_count;
- buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
- }
-
- /* Free the fragment we were mid-way through pushing */
- if (unmap_len) {
- if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
- dma_unmap_single(dma_dev, unmap_addr, unmap_len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dma_dev, unmap_addr, unmap_len,
- DMA_TO_DEVICE);
- }
-
return NETDEV_TX_OK;
}
@@ -576,7 +668,7 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
num_tc = ntc->tc;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+ if (num_tc > EFX_MAX_TX_TC)
return -EINVAL;
if (num_tc == net_dev->num_tc)
@@ -632,7 +724,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
struct efx_tx_queue *txq2;
unsigned int pkts_compl = 0, bytes_compl = 0;
- EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
+ EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
tx_queue->pkts_compl += pkts_compl;
@@ -667,19 +759,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
}
}
-/* Size of page-based TSO header buffers. Larger blocks must be
- * allocated from the heap.
- */
-#define TSOH_STD_SIZE 128
-#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
-
-/* At most half the descriptors in the queue at any time will refer to
- * a TSO header buffer, since they must always be followed by a
- * payload descriptor referring to an skb.
- */
-static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
{
- return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
}
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
@@ -690,7 +772,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
tx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
@@ -703,14 +785,11 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
if (!tx_queue->buffer)
return -ENOMEM;
- if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
- tx_queue->tsoh_page =
- kcalloc(efx_tsoh_page_count(tx_queue),
- sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
- if (!tx_queue->tsoh_page) {
- rc = -ENOMEM;
- goto fail1;
- }
+ tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
+ sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+ if (!tx_queue->cb_page) {
+ rc = -ENOMEM;
+ goto fail1;
}
/* Allocate hardware ring */
@@ -721,8 +800,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
return 0;
fail2:
- kfree(tx_queue->tsoh_page);
- tx_queue->tsoh_page = NULL;
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
fail1:
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
@@ -731,7 +810,9 @@ fail1:
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
{
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_dbg(efx, drv, efx->net_dev,
"initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
@@ -742,6 +823,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
tx_queue->xmit_more_available = false;
+ /* Set up default function pointers. These may get replaced by
+ * efx_nic_init_tx() based off NIC/queue capabilities.
+ */
+ tx_queue->handle_tso = efx_enqueue_skb_tso;
+
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
@@ -781,589 +867,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
"destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
- if (tx_queue->tsoh_page) {
- for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+ if (tx_queue->cb_page) {
+ for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
efx_nic_free_buffer(tx_queue->efx,
- &tx_queue->tsoh_page[i]);
- kfree(tx_queue->tsoh_page);
- tx_queue->tsoh_page = NULL;
+ &tx_queue->cb_page[i]);
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
}
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
}
-
-
-/* Efx TCP segmentation acceleration.
- *
- * Why? Because by doing it here in the driver we can go significantly
- * faster than the GSO.
- *
- * Requires TX checksum offload support.
- */
-
-#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
-
-/**
- * struct tso_state - TSO state for an SKB
- * @out_len: Remaining length in current segment
- * @seqnum: Current sequence number
- * @ipv4_id: Current IPv4 ID, host endian
- * @packet_space: Remaining space in current packet
- * @dma_addr: DMA address of current position
- * @in_len: Remaining length in current SKB fragment
- * @unmap_len: Length of SKB fragment
- * @unmap_addr: DMA address of SKB fragment
- * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
- * @protocol: Network protocol (after any VLAN header)
- * @ip_off: Offset of IP header
- * @tcp_off: Offset of TCP header
- * @header_len: Number of bytes of header
- * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
- * @header_dma_addr: Header DMA address, when using option descriptors
- * @header_unmap_len: Header DMA mapped length, or 0 if not using option
- * descriptors
- *
- * The state used during segmentation. It is put into this data structure
- * just to make it easy to pass into inline functions.
- */
-struct tso_state {
- /* Output position */
- unsigned out_len;
- unsigned seqnum;
- u16 ipv4_id;
- unsigned packet_space;
-
- /* Input position */
- dma_addr_t dma_addr;
- unsigned in_len;
- unsigned unmap_len;
- dma_addr_t unmap_addr;
- unsigned short dma_flags;
-
- __be16 protocol;
- unsigned int ip_off;
- unsigned int tcp_off;
- unsigned header_len;
- unsigned int ip_base_len;
- dma_addr_t header_dma_addr;
- unsigned int header_unmap_len;
-};
-
-
-/*
- * Verify that our various assumptions about sk_buffs and the conditions
- * under which TSO will be attempted hold true. Return the protocol number.
- */
-static __be16 efx_tso_check_protocol(struct sk_buff *skb)
-{
- __be16 protocol = skb->protocol;
-
- EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
- protocol);
- if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- protocol = veh->h_vlan_encapsulated_proto;
- }
-
- if (protocol == htons(ETH_P_IP)) {
- EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
- } else {
- EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
- EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
- }
- EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
- + (tcp_hdr(skb)->doff << 2u)) >
- skb_headlen(skb));
-
- return protocol;
-}
-
-static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, unsigned int len)
-{
- u8 *result;
-
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->flags);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
-
- if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
- unsigned index =
- (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
- struct efx_buffer *page_buf =
- &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
- unsigned offset =
- TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
-
- if (unlikely(!page_buf->addr) &&
- efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
- GFP_ATOMIC))
- return NULL;
-
- result = (u8 *)page_buf->addr + offset;
- buffer->dma_addr = page_buf->dma_addr + offset;
- buffer->flags = EFX_TX_BUF_CONT;
- } else {
- tx_queue->tso_long_headers++;
-
- buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
- if (unlikely(!buffer->heap_buf))
- return NULL;
- result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
- buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
- }
-
- buffer->len = len;
-
- return result;
-}
-
-/**
- * efx_tx_queue_insert - push descriptors onto the TX queue
- * @tx_queue: Efx TX queue
- * @dma_addr: DMA address of fragment
- * @len: Length of fragment
- * @final_buffer: The final buffer inserted into the queue
- *
- * Push descriptors onto the TX queue.
- */
-static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr, unsigned len,
- struct efx_tx_buffer **final_buffer)
-{
- struct efx_tx_buffer *buffer;
- struct efx_nic *efx = tx_queue->efx;
- unsigned dma_len;
-
- EFX_BUG_ON_PARANOID(len <= 0);
-
- while (1) {
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
- ++tx_queue->insert_count;
-
- EFX_BUG_ON_PARANOID(tx_queue->insert_count -
- tx_queue->read_count >=
- efx->txq_entries);
-
- buffer->dma_addr = dma_addr;
-
- dma_len = efx_max_tx_len(efx, dma_addr);
-
- /* If there is enough space to send then do so */
- if (dma_len >= len)
- break;
-
- buffer->len = dma_len;
- buffer->flags = EFX_TX_BUF_CONT;
- dma_addr += dma_len;
- len -= dma_len;
- }
-
- EFX_BUG_ON_PARANOID(!len);
- buffer->len = len;
- *final_buffer = buffer;
-}
-
-
-/*
- * Put a TSO header into the TX queue.
- *
- * This is special-cased because we know that it is small enough to fit in
- * a single fragment, and we know it doesn't cross a page boundary. It
- * also allows us to not worry about end-of-packet etc.
- */
-static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, u8 *header)
-{
- if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
- buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
- header, buffer->len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
- buffer->dma_addr))) {
- kfree(buffer->heap_buf);
- buffer->len = 0;
- buffer->flags = 0;
- return -ENOMEM;
- }
- buffer->unmap_len = buffer->len;
- buffer->dma_offset = 0;
- buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
- }
-
- ++tx_queue->insert_count;
- return 0;
-}
-
-
-/* Remove buffers put into a tx_queue. None of the buffers must have
- * an skb attached.
- */
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
- unsigned int insert_count)
-{
- struct efx_tx_buffer *buffer;
-
- /* Work backwards until we hit the original insert pointer value */
- while (tx_queue->insert_count != insert_count) {
- --tx_queue->insert_count;
- buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
- }
-}
-
-
-/* Parse the SKB header and initialise state. */
-static int tso_start(struct tso_state *st, struct efx_nic *efx,
- struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb)
-{
- struct device *dma_dev = &efx->pci_dev->dev;
- unsigned int header_len, in_len;
- bool use_opt_desc = false;
- dma_addr_t dma_addr;
-
- if (tx_queue->tso_version == 1)
- use_opt_desc = true;
-
- st->ip_off = skb_network_header(skb) - skb->data;
- st->tcp_off = skb_transport_header(skb) - skb->data;
- header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
- in_len = skb_headlen(skb) - header_len;
- st->header_len = header_len;
- st->in_len = in_len;
- if (st->protocol == htons(ETH_P_IP)) {
- st->ip_base_len = st->header_len - st->ip_off;
- st->ipv4_id = ntohs(ip_hdr(skb)->id);
- } else {
- st->ip_base_len = st->header_len - st->tcp_off;
- st->ipv4_id = 0;
- }
- st->seqnum = ntohl(tcp_hdr(skb)->seq);
-
- EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
- EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
- EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
-
- st->out_len = skb->len - header_len;
-
- if (!use_opt_desc) {
- st->header_unmap_len = 0;
-
- if (likely(in_len == 0)) {
- st->dma_flags = 0;
- st->unmap_len = 0;
- return 0;
- }
-
- dma_addr = dma_map_single(dma_dev, skb->data + header_len,
- in_len, DMA_TO_DEVICE);
- st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
- st->dma_addr = dma_addr;
- st->unmap_addr = dma_addr;
- st->unmap_len = in_len;
- } else {
- dma_addr = dma_map_single(dma_dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- st->header_dma_addr = dma_addr;
- st->header_unmap_len = skb_headlen(skb);
- st->dma_flags = 0;
- st->dma_addr = dma_addr + header_len;
- st->unmap_len = 0;
- }
-
- return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
-}
-
-static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
- skb_frag_t *frag)
-{
- st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
- if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
- st->dma_flags = 0;
- st->unmap_len = skb_frag_size(frag);
- st->in_len = skb_frag_size(frag);
- st->dma_addr = st->unmap_addr;
- return 0;
- }
- return -ENOMEM;
-}
-
-
-/**
- * tso_fill_packet_with_fragment - form descriptors for the current fragment
- * @tx_queue: Efx TX queue
- * @skb: Socket buffer
- * @st: TSO state
- *
- * Form descriptors for the current fragment, until we reach the end
- * of fragment or end-of-packet.
- */
-static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb,
- struct tso_state *st)
-{
- struct efx_tx_buffer *buffer;
- int n;
-
- if (st->in_len == 0)
- return;
- if (st->packet_space == 0)
- return;
-
- EFX_BUG_ON_PARANOID(st->in_len <= 0);
- EFX_BUG_ON_PARANOID(st->packet_space <= 0);
-
- n = min(st->in_len, st->packet_space);
-
- st->packet_space -= n;
- st->out_len -= n;
- st->in_len -= n;
-
- efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
-
- if (st->out_len == 0) {
- /* Transfer ownership of the skb */
- buffer->skb = skb;
- buffer->flags = EFX_TX_BUF_SKB;
- } else if (st->packet_space != 0) {
- buffer->flags = EFX_TX_BUF_CONT;
- }
-
- if (st->in_len == 0) {
- /* Transfer ownership of the DMA mapping */
- buffer->unmap_len = st->unmap_len;
- buffer->dma_offset = buffer->unmap_len - buffer->len;
- buffer->flags |= st->dma_flags;
- st->unmap_len = 0;
- }
-
- st->dma_addr += n;
-}
-
-
-/**
- * tso_start_new_packet - generate a new header and prepare for the new packet
- * @tx_queue: Efx TX queue
- * @skb: Socket buffer
- * @st: TSO state
- *
- * Generate a new header and prepare for the new packet. Return 0 on
- * success, or -%ENOMEM if failed to alloc header.
- */
-static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb,
- struct tso_state *st)
-{
- struct efx_tx_buffer *buffer =
- efx_tx_queue_get_insert_buffer(tx_queue);
- bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
- u8 tcp_flags_clear;
-
- if (!is_last) {
- st->packet_space = skb_shinfo(skb)->gso_size;
- tcp_flags_clear = 0x09; /* mask out FIN and PSH */
- } else {
- st->packet_space = st->out_len;
- tcp_flags_clear = 0x00;
- }
-
- if (!st->header_unmap_len) {
- /* Allocate and insert a DMA-mapped header buffer. */
- struct tcphdr *tsoh_th;
- unsigned ip_length;
- u8 *header;
- int rc;
-
- header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
- if (!header)
- return -ENOMEM;
-
- tsoh_th = (struct tcphdr *)(header + st->tcp_off);
-
- /* Copy and update the headers. */
- memcpy(header, skb->data, st->header_len);
-
- tsoh_th->seq = htonl(st->seqnum);
- ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
-
- ip_length = st->ip_base_len + st->packet_space;
-
- if (st->protocol == htons(ETH_P_IP)) {
- struct iphdr *tsoh_iph =
- (struct iphdr *)(header + st->ip_off);
-
- tsoh_iph->tot_len = htons(ip_length);
- tsoh_iph->id = htons(st->ipv4_id);
- } else {
- struct ipv6hdr *tsoh_iph =
- (struct ipv6hdr *)(header + st->ip_off);
-
- tsoh_iph->payload_len = htons(ip_length);
- }
-
- rc = efx_tso_put_header(tx_queue, buffer, header);
- if (unlikely(rc))
- return rc;
- } else {
- /* Send the original headers with a TSO option descriptor
- * in front
- */
- u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
-
- buffer->flags = EFX_TX_BUF_OPTION;
- buffer->len = 0;
- buffer->unmap_len = 0;
- EFX_POPULATE_QWORD_5(buffer->option,
- ESF_DZ_TX_DESC_IS_OPT, 1,
- ESF_DZ_TX_OPTION_TYPE,
- ESE_DZ_TX_OPTION_DESC_TSO,
- ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
- ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
- ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
- ++tx_queue->insert_count;
-
- /* We mapped the headers in tso_start(). Unmap them
- * when the last segment is completed.
- */
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
- buffer->dma_addr = st->header_dma_addr;
- buffer->len = st->header_len;
- if (is_last) {
- buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
- buffer->unmap_len = st->header_unmap_len;
- buffer->dma_offset = 0;
- /* Ensure we only unmap them once in case of a
- * later DMA mapping error and rollback
- */
- st->header_unmap_len = 0;
- } else {
- buffer->flags = EFX_TX_BUF_CONT;
- buffer->unmap_len = 0;
- }
- ++tx_queue->insert_count;
- }
-
- st->seqnum += skb_shinfo(skb)->gso_size;
-
- /* Linux leaves suitable gaps in the IP ID space for us to fill. */
- ++st->ipv4_id;
-
- ++tx_queue->tso_packets;
-
- ++tx_queue->tx_packets;
-
- return 0;
-}
-
-
-/**
- * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
- * @tx_queue: Efx TX queue
- * @skb: Socket buffer
- *
- * Context: You must hold netif_tx_lock() to call this function.
- *
- * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
- * @skb was not enqueued. In all cases @skb is consumed. Return
- * %NETDEV_TX_OK.
- */
-static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
- struct sk_buff *skb)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int old_insert_count = tx_queue->insert_count;
- int frag_i, rc;
- struct tso_state state;
-
- /* Find the packet protocol and sanity-check it */
- state.protocol = efx_tso_check_protocol(skb);
-
- rc = tso_start(&state, efx, tx_queue, skb);
- if (rc)
- goto mem_err;
-
- if (likely(state.in_len == 0)) {
- /* Grab the first payload fragment. */
- EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
- frag_i = 0;
- rc = tso_get_fragment(&state, efx,
- skb_shinfo(skb)->frags + frag_i);
- if (rc)
- goto mem_err;
- } else {
- /* Payload starts in the header area. */
- frag_i = -1;
- }
-
- if (tso_start_new_packet(tx_queue, skb, &state) < 0)
- goto mem_err;
-
- while (1) {
- tso_fill_packet_with_fragment(tx_queue, skb, &state);
-
- /* Move onto the next fragment? */
- if (state.in_len == 0) {
- if (++frag_i >= skb_shinfo(skb)->nr_frags)
- /* End of payload reached. */
- break;
- rc = tso_get_fragment(&state, efx,
- skb_shinfo(skb)->frags + frag_i);
- if (rc)
- goto mem_err;
- }
-
- /* Start at new packet? */
- if (state.packet_space == 0 &&
- tso_start_new_packet(tx_queue, skb, &state) < 0)
- goto mem_err;
- }
-
- netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
-
- efx_tx_maybe_stop_queue(tx_queue);
-
- /* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-
- /* There could be packets left on the partner queue if those
- * SKBs had skb->xmit_more set. If we do not push those they
- * could be left for a long time and cause a netdev watchdog.
- */
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
-
- efx_nic_push_buffers(tx_queue);
- } else {
- tx_queue->xmit_more_available = skb->xmit_more;
- }
-
- tx_queue->tso_bursts++;
- return NETDEV_TX_OK;
-
- mem_err:
- netif_err(efx, tx_err, efx->net_dev,
- "Out of memory for TSO headers, or DMA mapping error\n");
- dev_kfree_skb_any(skb);
-
- /* Free the DMA mapping we were in the process of writing out */
- if (state.unmap_len) {
- if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
- dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
- state.unmap_len, DMA_TO_DEVICE);
- else
- dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
- state.unmap_len, DMA_TO_DEVICE);
- }
-
- /* Free the header DMA mapping, if using option descriptors */
- if (state.header_unmap_len)
- dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
- state.header_unmap_len, DMA_TO_DEVICE);
-
- efx_enqueue_unwind(tx_queue, old_insert_count);
- return NETDEV_TX_OK;
-}
diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h
new file mode 100644
index 000000000000..1cccc97ec676
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx.h
@@ -0,0 +1,27 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_H
+#define EFX_TX_H
+
+#include <linux/types.h>
+
+/* Driver internal tx-path related declarations. */
+
+unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
+
+u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer, size_t len);
+
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
+#endif /* EFX_TX_H */
diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c
new file mode 100644
index 000000000000..e0cbda9ae859
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_tso.c
@@ -0,0 +1,451 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/pci.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/moduleparam.h>
+#include <linux/cache.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "io.h"
+#include "nic.h"
+#include "tx.h"
+#include "workarounds.h"
+#include "ef10_regs.h"
+
+/* Efx legacy TCP segmentation acceleration.
+ *
+ * Utilises firmware support to go faster than GSO (but not as fast as TSOv2).
+ *
+ * Requires TX checksum offload support.
+ */
+
+#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
+
+/**
+ * struct tso_state - TSO state for an SKB
+ * @out_len: Remaining length in current segment
+ * @seqnum: Current sequence number
+ * @ipv4_id: Current IPv4 ID, host endian
+ * @packet_space: Remaining space in current packet
+ * @dma_addr: DMA address of current position
+ * @in_len: Remaining length in current SKB fragment
+ * @unmap_len: Length of SKB fragment
+ * @unmap_addr: DMA address of SKB fragment
+ * @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
+ * @header_len: Number of bytes of header
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address
+ * @header_unmap_len: Header DMA mapped length
+ *
+ * The state used during segmentation. It is put into this data structure
+ * just to make it easy to pass into inline functions.
+ */
+struct tso_state {
+ /* Output position */
+ unsigned int out_len;
+ unsigned int seqnum;
+ u16 ipv4_id;
+ unsigned int packet_space;
+
+ /* Input position */
+ dma_addr_t dma_addr;
+ unsigned int in_len;
+ unsigned int unmap_len;
+ dma_addr_t unmap_addr;
+
+ __be16 protocol;
+ unsigned int ip_off;
+ unsigned int tcp_off;
+ unsigned int header_len;
+ unsigned int ip_base_len;
+ dma_addr_t header_dma_addr;
+ unsigned int header_unmap_len;
+};
+
+static inline void prefetch_ptr(struct efx_tx_queue *tx_queue)
+{
+ unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue);
+ char *ptr;
+
+ ptr = (char *) (tx_queue->buffer + insert_ptr);
+ prefetch(ptr);
+ prefetch(ptr + 0x80);
+
+ ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr);
+ prefetch(ptr);
+ prefetch(ptr + 0x80);
+}
+
+/**
+ * efx_tx_queue_insert - push descriptors onto the TX queue
+ * @tx_queue: Efx TX queue
+ * @dma_addr: DMA address of fragment
+ * @len: Length of fragment
+ * @final_buffer: The final buffer inserted into the queue
+ *
+ * Push descriptors onto the TX queue.
+ */
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len,
+ struct efx_tx_buffer **final_buffer)
+{
+ struct efx_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ EFX_WARN_ON_ONCE_PARANOID(len <= 0);
+
+ while (1) {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ ++tx_queue->insert_count;
+
+ EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count -
+ tx_queue->read_count >=
+ tx_queue->efx->txq_entries);
+
+ buffer->dma_addr = dma_addr;
+
+ dma_len = tx_queue->efx->type->tx_limit_len(tx_queue,
+ dma_addr, len);
+
+ /* If there's space for everything this is our last buffer. */
+ if (dma_len >= len)
+ break;
+
+ buffer->len = dma_len;
+ buffer->flags = EFX_TX_BUF_CONT;
+ dma_addr += dma_len;
+ len -= dma_len;
+ }
+
+ EFX_WARN_ON_ONCE_PARANOID(!len);
+ buffer->len = len;
+ *final_buffer = buffer;
+}
+
+/*
+ * Verify that our various assumptions about sk_buffs and the conditions
+ * under which TSO will be attempted hold true. Return the protocol number.
+ */
+static __be16 efx_tso_check_protocol(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+
+ EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
+ protocol);
+ if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+
+ protocol = veh->h_vlan_encapsulated_proto;
+ }
+
+ if (protocol == htons(ETH_P_IP)) {
+ EFX_WARN_ON_ONCE_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+ } else {
+ EFX_WARN_ON_ONCE_PARANOID(protocol != htons(ETH_P_IPV6));
+ EFX_WARN_ON_ONCE_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
+ }
+ EFX_WARN_ON_ONCE_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) +
+ (tcp_hdr(skb)->doff << 2u)) >
+ skb_headlen(skb));
+
+ return protocol;
+}
+
+/* Parse the SKB header and initialise state. */
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+ struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb)
+{
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int header_len, in_len;
+ dma_addr_t dma_addr;
+
+ st->ip_off = skb_network_header(skb) - skb->data;
+ st->tcp_off = skb_transport_header(skb) - skb->data;
+ header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ in_len = skb_headlen(skb) - header_len;
+ st->header_len = header_len;
+ st->in_len = in_len;
+ if (st->protocol == htons(ETH_P_IP)) {
+ st->ip_base_len = st->header_len - st->ip_off;
+ st->ipv4_id = ntohs(ip_hdr(skb)->id);
+ } else {
+ st->ip_base_len = st->header_len - st->tcp_off;
+ st->ipv4_id = 0;
+ }
+ st->seqnum = ntohl(tcp_hdr(skb)->seq);
+
+ EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg);
+ EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn);
+ EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst);
+
+ st->out_len = skb->len - header_len;
+
+ dma_addr = dma_map_single(dma_dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ st->header_dma_addr = dma_addr;
+ st->header_unmap_len = skb_headlen(skb);
+ st->dma_addr = dma_addr + header_len;
+ st->unmap_len = 0;
+
+ return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
+}
+
+static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+ skb_frag_t *frag)
+{
+ st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
+ st->unmap_len = skb_frag_size(frag);
+ st->in_len = skb_frag_size(frag);
+ st->dma_addr = st->unmap_addr;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+
+/**
+ * tso_fill_packet_with_fragment - form descriptors for the current fragment
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Form descriptors for the current fragment, until we reach the end
+ * of fragment or end-of-packet.
+ */
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tx_buffer *buffer;
+ int n;
+
+ if (st->in_len == 0)
+ return;
+ if (st->packet_space == 0)
+ return;
+
+ EFX_WARN_ON_ONCE_PARANOID(st->in_len <= 0);
+ EFX_WARN_ON_ONCE_PARANOID(st->packet_space <= 0);
+
+ n = min(st->in_len, st->packet_space);
+
+ st->packet_space -= n;
+ st->out_len -= n;
+ st->in_len -= n;
+
+ efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
+
+ if (st->out_len == 0) {
+ /* Transfer ownership of the skb */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB;
+ } else if (st->packet_space != 0) {
+ buffer->flags = EFX_TX_BUF_CONT;
+ }
+
+ if (st->in_len == 0) {
+ /* Transfer ownership of the DMA mapping */
+ buffer->unmap_len = st->unmap_len;
+ buffer->dma_offset = buffer->unmap_len - buffer->len;
+ st->unmap_len = 0;
+ }
+
+ st->dma_addr += n;
+}
+
+
+#define TCP_FLAGS_OFFSET 13
+
+/**
+ * tso_start_new_packet - generate a new header and prepare for the new packet
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Generate a new header and prepare for the new packet. Return 0 on
+ * success, or -%ENOMEM if failed to alloc header, or other negative error.
+ */
+static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tx_buffer *buffer =
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+ u8 tcp_flags_mask, tcp_flags;
+
+ if (!is_last) {
+ st->packet_space = skb_shinfo(skb)->gso_size;
+ tcp_flags_mask = 0x09; /* mask out FIN and PSH */
+ } else {
+ st->packet_space = st->out_len;
+ tcp_flags_mask = 0x00;
+ }
+
+ if (WARN_ON(!st->header_unmap_len))
+ return -EINVAL;
+ /* Send the original headers with a TSO option descriptor
+ * in front
+ */
+ tcp_flags = ((u8 *)tcp_hdr(skb))[TCP_FLAGS_OFFSET] & ~tcp_flags_mask;
+
+ buffer->flags = EFX_TX_BUF_OPTION;
+ buffer->len = 0;
+ buffer->unmap_len = 0;
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+ ++tx_queue->insert_count;
+
+ /* We mapped the headers in tso_start(). Unmap them
+ * when the last segment is completed.
+ */
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ buffer->dma_addr = st->header_dma_addr;
+ buffer->len = st->header_len;
+ if (is_last) {
+ buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+ buffer->unmap_len = st->header_unmap_len;
+ buffer->dma_offset = 0;
+ /* Ensure we only unmap them once in case of a
+ * later DMA mapping error and rollback
+ */
+ st->header_unmap_len = 0;
+ } else {
+ buffer->flags = EFX_TX_BUF_CONT;
+ buffer->unmap_len = 0;
+ }
+ ++tx_queue->insert_count;
+
+ st->seqnum += skb_shinfo(skb)->gso_size;
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ ++st->ipv4_id;
+
+ return 0;
+}
+
+/**
+ * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @data_mapped: Did we map the data? Always set to true
+ * by this on success.
+ *
+ * Context: You must hold netif_tx_lock() to call this function.
+ *
+ * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
+ * @skb was not enqueued. @skb is consumed unless return value is
+ * %EINVAL.
+ */
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb,
+ bool *data_mapped)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ int frag_i, rc;
+ struct tso_state state;
+
+ if (tx_queue->tso_version != 1)
+ return -EINVAL;
+
+ prefetch(skb->data);
+
+ /* Find the packet protocol and sanity-check it */
+ state.protocol = efx_tso_check_protocol(skb);
+
+ EFX_WARN_ON_ONCE_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+
+ rc = tso_start(&state, efx, tx_queue, skb);
+ if (rc)
+ goto fail;
+
+ if (likely(state.in_len == 0)) {
+ /* Grab the first payload fragment. */
+ EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1);
+ frag_i = 0;
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
+ if (rc)
+ goto fail;
+ } else {
+ /* Payload starts in the header area. */
+ frag_i = -1;
+ }
+
+ rc = tso_start_new_packet(tx_queue, skb, &state);
+ if (rc)
+ goto fail;
+
+ prefetch_ptr(tx_queue);
+
+ while (1) {
+ tso_fill_packet_with_fragment(tx_queue, skb, &state);
+
+ /* Move onto the next fragment? */
+ if (state.in_len == 0) {
+ if (++frag_i >= skb_shinfo(skb)->nr_frags)
+ /* End of payload reached. */
+ break;
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
+ if (rc)
+ goto fail;
+ }
+
+ /* Start at new packet? */
+ if (state.packet_space == 0) {
+ rc = tso_start_new_packet(tx_queue, skb, &state);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ *data_mapped = true;
+
+ return 0;
+
+fail:
+ if (rc == -ENOMEM)
+ netif_err(efx, tx_err, efx->net_dev,
+ "Out of memory for TSO headers, or DMA mapping error\n");
+ else
+ netif_err(efx, tx_err, efx->net_dev, "TSO failed, rc = %d\n", rc);
+
+ /* Free the DMA mapping we were in the process of writing out */
+ if (state.unmap_len) {
+ dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
+ }
+
+ /* Free the header DMA mapping */
+ if (state.header_unmap_len)
+ dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+ state.header_unmap_len, DMA_TO_DEVICE);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 351cd14cb9f9..103f827a1623 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -15,35 +15,14 @@
* Bug numbers are from Solarflare's Bugzilla.
*/
-#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
-#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
-/* Truncated IPv4 packets can confuse the TX packet parser */
-#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
-/* Spurious parity errors in TSORT buffers */
-#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
-/* Unaligned read request >512 bytes after aligning may break TSORT */
-#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
-/* iSCSI parsing errors */
-#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
-/* RX events go missing */
-#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
-/* RX_RESET on A1 */
-#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
-/* Increase filter depth to avoid RX_RESET */
-#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
-/* Flushes may never complete */
-#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
-/* Leak overlength packets rather than free */
-#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
-
/* Lockup when writing event block registers at gen2/gen3 */
#define EFX_EF10_WORKAROUND_35388(efx) \
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 7a254da85dd7..42051ab98cf0 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1225,7 +1225,6 @@ static const struct net_device_ops ioc3_netdev_ops = {
.ndo_do_ioctl = ioc3_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ioc3_set_mac_address,
- .ndo_change_mtu = eth_change_mtu,
};
static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index aaa80f13859b..69d2d30e5ef1 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -815,7 +815,6 @@ static const struct net_device_ops meth_netdev_ops = {
.ndo_start_xmit = meth_tx,
.ndo_do_ioctl = meth_ioctl,
.ndo_tx_timeout = meth_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_rx_mode = meth_set_rx_mode,
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 7426f8b21252..6c2e2b311c16 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1386,7 +1386,6 @@ static const struct net_device_ops sc92031_netdev_ops = {
.ndo_open = sc92031_open,
.ndo_stop = sc92031_stop,
.ndo_set_rx_mode = sc92031_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = sc92031_tx_timeout,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 27be6c869315..210e35d079dd 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1833,7 +1833,6 @@ static const struct net_device_ops sis190_netdev_ops = {
.ndo_start_xmit = sis190_start_xmit,
.ndo_tx_timeout = sis190_tx_timeout,
.ndo_set_rx_mode = sis190_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = sis190_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6f85276376e8..39fca6c0b68d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -400,7 +400,6 @@ static const struct net_device_ops sis900_netdev_ops = {
.ndo_start_xmit = sis900_start_xmit,
.ndo_set_config = sis900_set_config,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = mii_ioctl,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 7186b89269ad..fe9760ffab51 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -313,7 +313,6 @@ static const struct net_device_ops epic_netdev_ops = {
.ndo_get_stats = epic_get_stats,
.ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = netdev_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index cb49c9654f0a..4f19c6166182 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1753,7 +1753,6 @@ static const struct net_device_ops smc911x_netdev_ops = {
.ndo_start_xmit = smc911x_hard_start_xmit,
.ndo_tx_timeout = smc911x_timeout,
.ndo_set_rx_mode = smc911x_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index d496888b85d3..c8d84679ede7 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -809,7 +809,6 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_start_xmit = smc_wait_to_send_packet,
.ndo_tx_timeout = smc_timeout,
.ndo_set_rx_mode = smc_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index db3c696d7002..f1c75e291e55 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -294,7 +294,6 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_set_config = s9k_config,
.ndo_set_rx_mode = set_rx_mode,
.ndo_do_ioctl = smc_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 73212590d04a..65077c77082a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -602,7 +602,8 @@ static void smc_hardware_send_pkt(unsigned long data)
SMC_PUSH_DATA(lp, buf, len & ~1);
/* Send final ctl word with the last byte if there is one */
- SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp));
+ SMC_outw(lp, ((len & 1) ? (0x2000 | buf[len - 1]) : 0), ioaddr,
+ DATA_REG(lp));
/*
* If THROTTLE_TX_PKTS is set, we stop the queue here. This will
@@ -1762,7 +1763,6 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_start_xmit = smc_hard_start_xmit,
.ndo_tx_timeout = smc_timeout,
.ndo_set_rx_mode = smc_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2326,6 +2326,8 @@ static int smc_drv_probe(struct platform_device *pdev)
if (!device_property_read_u32(&pdev->dev, "reg-shift",
&val))
lp->io_shift = val;
+ lp->cfg.pxa_u16_align4 =
+ device_property_read_bool(&pdev->dev, "pxa-u16-align4");
}
#endif
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index ea8465467469..08b17adf0a65 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -86,11 +86,11 @@
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outw(v, a, r) \
+#define SMC_outw(lp, v, a, r) \
do { \
unsigned int __v = v, __smc_r = r; \
if (SMC_16BIT(lp)) \
- __SMC_outw(__v, a, __smc_r); \
+ __SMC_outw(lp, __v, a, __smc_r); \
else if (SMC_8BIT(lp)) \
SMC_outw_b(__v, a, __smc_r); \
else \
@@ -107,10 +107,10 @@
#define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */
-static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
+ bool use_align4_workaround)
{
- if ((machine_is_mainstone() || machine_is_stargate2() ||
- machine_is_pxa_idp()) && reg & 2) {
+ if (use_align4_workaround) {
unsigned int v = val << 16;
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
writel(v, ioaddr + (reg & ~2));
@@ -119,6 +119,12 @@ static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
}
}
+#define __SMC_outw(lp, v, a, r) \
+ _SMC_outw_align4((v), (a), (r), \
+ IS_BUILTIN(CONFIG_ARCH_PXA) && ((r) & 2) && \
+ (lp)->cfg.pxa_u16_align4)
+
+
#elif defined(CONFIG_SH_SH4202_MICRODEV)
#define SMC_CAN_USE_8BIT 0
@@ -129,7 +135,7 @@ static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000)
#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000)
#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000)
-#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000)
+#define SMC_outw(lp, v, a, r) outw(v, (a) + (r) - 0xa0000000)
#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000)
#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l)
#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l)
@@ -147,7 +153,7 @@ static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_inb(a, r) inb(((u32)a) + (r))
#define SMC_inw(a, r) inw(((u32)a) + (r))
#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r))
-#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r))
+#define SMC_outw(lp, v, a, r) outw(v, ((u32)a) + (r))
#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
@@ -175,7 +181,7 @@ static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outw(lp, v, a, r) writew(v, (a) + (r))
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
@@ -207,7 +213,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
}
#define SMC_inw(a, r) _swapw(readw((a) + (r)))
-#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
@@ -241,7 +247,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
#define SMC_inw(a, r) ioread16((a) + (r))
#define SMC_inl(a, r) ioread32((a) + (r))
#define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
-#define SMC_outw(v, a, r) iowrite16(v, (a) + (r))
+#define SMC_outw(lp, v, a, r) iowrite16(v, (a) + (r))
#define SMC_outl(v, a, r) iowrite32(v, (a) + (r))
#define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l)
@@ -303,6 +309,8 @@ struct smc_local {
/* the low address lines on some platforms aren't connected... */
int io_shift;
+ /* on some platforms a u16 write must be 4-bytes aligned */
+ bool half_word_align4;
struct smc91x_platdata cfg;
};
@@ -457,7 +465,7 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#if ! SMC_CAN_USE_16BIT
-#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
+#define SMC_outw(lp, x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
#define SMC_insw(a, r, p, l) BUG()
#define SMC_outsw(a, r, p, l) BUG()
@@ -909,7 +917,7 @@ static const char * chip_ids[ 16 ] = {
else if (SMC_8BIT(lp)) \
SMC_outb(x, ioaddr, PN_REG(lp)); \
else \
- SMC_outw(x, ioaddr, PN_REG(lp)); \
+ SMC_outw(lp, x, ioaddr, PN_REG(lp)); \
} while (0)
#define SMC_GET_AR(lp) \
@@ -937,7 +945,7 @@ static const char * chip_ids[ 16 ] = {
int __mask; \
local_irq_save(__flags); \
__mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \
- SMC_outw(__mask | (x), ioaddr, INT_REG(lp)); \
+ SMC_outw(lp, __mask | (x), ioaddr, INT_REG(lp)); \
local_irq_restore(__flags); \
} \
} while (0)
@@ -951,7 +959,7 @@ static const char * chip_ids[ 16 ] = {
if (SMC_8BIT(lp)) \
SMC_outb(x, ioaddr, IM_REG(lp)); \
else \
- SMC_outw((x) << 8, ioaddr, INT_REG(lp)); \
+ SMC_outw(lp, (x) << 8, ioaddr, INT_REG(lp)); \
} while (0)
#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT)
@@ -961,22 +969,22 @@ static const char * chip_ids[ 16 ] = {
if (SMC_MUST_ALIGN_WRITE(lp)) \
SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
else \
- SMC_outw(x, ioaddr, BANK_SELECT); \
+ SMC_outw(lp, x, ioaddr, BANK_SELECT); \
} while (0)
#define SMC_GET_BASE(lp) SMC_inw(ioaddr, BASE_REG(lp))
-#define SMC_SET_BASE(lp, x) SMC_outw(x, ioaddr, BASE_REG(lp))
+#define SMC_SET_BASE(lp, x) SMC_outw(lp, x, ioaddr, BASE_REG(lp))
#define SMC_GET_CONFIG(lp) SMC_inw(ioaddr, CONFIG_REG(lp))
-#define SMC_SET_CONFIG(lp, x) SMC_outw(x, ioaddr, CONFIG_REG(lp))
+#define SMC_SET_CONFIG(lp, x) SMC_outw(lp, x, ioaddr, CONFIG_REG(lp))
#define SMC_GET_COUNTER(lp) SMC_inw(ioaddr, COUNTER_REG(lp))
#define SMC_GET_CTL(lp) SMC_inw(ioaddr, CTL_REG(lp))
-#define SMC_SET_CTL(lp, x) SMC_outw(x, ioaddr, CTL_REG(lp))
+#define SMC_SET_CTL(lp, x) SMC_outw(lp, x, ioaddr, CTL_REG(lp))
#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
@@ -987,20 +995,20 @@ static const char * chip_ids[ 16 ] = {
if (SMC_MUST_ALIGN_WRITE(lp)) \
SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
else \
- SMC_outw(x, ioaddr, GP_REG(lp)); \
+ SMC_outw(lp, x, ioaddr, GP_REG(lp)); \
} while (0)
-#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp))
+#define SMC_SET_MII(lp, x) SMC_outw(lp, x, ioaddr, MII_REG(lp))
#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
-#define SMC_SET_MIR(lp, x) SMC_outw(x, ioaddr, MIR_REG(lp))
+#define SMC_SET_MIR(lp, x) SMC_outw(lp, x, ioaddr, MIR_REG(lp))
#define SMC_GET_MMU_CMD(lp) SMC_inw(ioaddr, MMU_CMD_REG(lp))
-#define SMC_SET_MMU_CMD(lp, x) SMC_outw(x, ioaddr, MMU_CMD_REG(lp))
+#define SMC_SET_MMU_CMD(lp, x) SMC_outw(lp, x, ioaddr, MMU_CMD_REG(lp))
-#define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp))
+#define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp))
#define SMC_GET_PTR(lp) SMC_inw(ioaddr, PTR_REG(lp))
@@ -1009,14 +1017,14 @@ static const char * chip_ids[ 16 ] = {
if (SMC_MUST_ALIGN_WRITE(lp)) \
SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2)); \
else \
- SMC_outw(x, ioaddr, PTR_REG(lp)); \
+ SMC_outw(lp, x, ioaddr, PTR_REG(lp)); \
} while (0)
#define SMC_GET_EPH_STATUS(lp) SMC_inw(ioaddr, EPH_STATUS_REG(lp))
#define SMC_GET_RCR(lp) SMC_inw(ioaddr, RCR_REG(lp))
-#define SMC_SET_RCR(lp, x) SMC_outw(x, ioaddr, RCR_REG(lp))
+#define SMC_SET_RCR(lp, x) SMC_outw(lp, x, ioaddr, RCR_REG(lp))
#define SMC_GET_REV(lp) SMC_inw(ioaddr, REV_REG(lp))
@@ -1027,12 +1035,12 @@ static const char * chip_ids[ 16 ] = {
if (SMC_MUST_ALIGN_WRITE(lp)) \
SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0)); \
else \
- SMC_outw(x, ioaddr, RPC_REG(lp)); \
+ SMC_outw(lp, x, ioaddr, RPC_REG(lp)); \
} while (0)
#define SMC_GET_TCR(lp) SMC_inw(ioaddr, TCR_REG(lp))
-#define SMC_SET_TCR(lp, x) SMC_outw(x, ioaddr, TCR_REG(lp))
+#define SMC_SET_TCR(lp, x) SMC_outw(lp, x, ioaddr, TCR_REG(lp))
#ifndef SMC_GET_MAC_ADDR
#define SMC_GET_MAC_ADDR(lp, addr) \
@@ -1049,18 +1057,18 @@ static const char * chip_ids[ 16 ] = {
#define SMC_SET_MAC_ADDR(lp, addr) \
do { \
- SMC_outw(addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
- SMC_outw(addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
- SMC_outw(addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
+ SMC_outw(lp, addr[0] | (addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
+ SMC_outw(lp, addr[2] | (addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
+ SMC_outw(lp, addr[4] | (addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
} while (0)
#define SMC_SET_MCAST(lp, x) \
do { \
const unsigned char *mt = (x); \
- SMC_outw(mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
- SMC_outw(mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
- SMC_outw(mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
- SMC_outw(mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
+ SMC_outw(lp, mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
+ SMC_outw(lp, mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
+ SMC_outw(lp, mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
+ SMC_outw(lp, mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
} while (0)
#define SMC_PUT_PKT_HDR(lp, status, length) \
@@ -1069,8 +1077,8 @@ static const char * chip_ids[ 16 ] = {
SMC_outl((status) | (length)<<16, ioaddr, \
DATA_REG(lp)); \
else { \
- SMC_outw(status, ioaddr, DATA_REG(lp)); \
- SMC_outw(length, ioaddr, DATA_REG(lp)); \
+ SMC_outw(lp, status, ioaddr, DATA_REG(lp)); \
+ SMC_outw(lp, length, ioaddr, DATA_REG(lp)); \
} \
} while (0)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8b0016a785c0..fa5ca0992be6 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1963,11 +1963,6 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
sizeof(info->bus_info));
}
-static int smsc911x_ethtool_nwayreset(struct net_device *dev)
-{
- return phy_start_aneg(dev->phydev);
-}
-
static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
@@ -2139,7 +2134,7 @@ static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
static const struct ethtool_ops smsc911x_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_drvinfo = smsc911x_ethtool_getdrvinfo,
- .nway_reset = smsc911x_ethtool_nwayreset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_msglevel = smsc911x_ethtool_getmsglevel,
.set_msglevel = smsc911x_ethtool_setmsglevel,
.get_regs_len = smsc911x_ethtool_getregslen,
@@ -2159,7 +2154,6 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_get_stats = smsc911x_get_stats,
.ndo_set_rx_mode = smsc911x_set_multicast_list,
.ndo_do_ioctl = smsc911x_do_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = smsc911x_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2591,6 +2585,9 @@ static int smsc911x_suspend(struct device *dev)
PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
return 0;
}
@@ -2600,6 +2597,9 @@ static int smsc911x_resume(struct device *dev)
struct smsc911x_data *pdata = netdev_priv(ndev);
unsigned int to = 100;
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
+
/* Note 3.11 from the datasheet:
* "When the LAN9220 is in a power saving state, a write of any
* data to the BYTE_TEST register will wake-up the device."
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index b7bfed4bc96b..3174aebb322f 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -254,14 +254,6 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
pd->msg_enable = data;
}
-static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
-{
- if (!netdev->phydev)
- return -ENODEV;
-
- return phy_start_aneg(netdev->phydev);
-}
-
static int smsc9420_ethtool_getregslen(struct net_device *dev)
{
/* all smsc9420 registers plus all phy registers */
@@ -417,7 +409,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
.get_drvinfo = smsc9420_ethtool_get_drvinfo,
.get_msglevel = smsc9420_ethtool_get_msglevel,
.set_msglevel = smsc9420_ethtool_set_msglevel,
- .nway_reset = smsc9420_ethtool_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = smsc9420_ethtool_get_eeprom_len,
.get_eeprom = smsc9420_ethtool_get_eeprom,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4b78168a5f3c..ab66248a4b78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -4,7 +4,7 @@ config STMMAC_ETH
select MII
select PHYLIB
select CRC32
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select RESET_CONTROLLER
---help---
This is the driver for the Ethernet IPs are built around a
@@ -69,6 +69,17 @@ config DWMAC_MESON
the stmmac device driver. This driver is used for Meson6,
Meson8, Meson8b and GXBB SoCs.
+config DWMAC_OXNAS
+ tristate "Oxford Semiconductor OXNAS dwmac support"
+ default ARCH_OXNAS
+ depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs.
+
+ This selects the Oxford Semiconductor OXNASSoC glue layer support for
+ the stmmac device driver. This driver is used for OX820.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 5d6ece5919b3..8f83a86ba13c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
+obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index b3e669af3005..026e8e9cb942 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
unsigned int entry = priv->cur_tx;
struct dma_desc *desc = priv->dma_tx + entry;
unsigned int nopaged_len = skb_headlen(skb);
- unsigned int bmax;
+ unsigned int bmax, des2;
unsigned int i = 1, len;
if (priv->plat->enh_desc)
@@ -44,11 +44,12 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len = nopaged_len - bmax;
- desc->des2 = dma_map_single(priv->device, skb->data,
- bmax, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = bmax;
/* do not close the descriptor and do not set own bit */
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
@@ -60,12 +61,13 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc = priv->dma_tx + entry;
if (len > bmax) {
- desc->des2 = dma_map_single(priv->device,
- (skb->data + bmax * i),
- bmax, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i),
+ bmax, DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE, 1,
@@ -73,12 +75,13 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len -= bmax;
i++;
} else {
- desc->des2 = dma_map_single(priv->device,
- (skb->data + bmax * i), len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i), len,
+ DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = len;
/* last descriptor can be set now */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
@@ -119,19 +122,19 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
struct dma_extended_desc *p = (struct dma_extended_desc *)des;
for (i = 0; i < (size - 1); i++) {
dma_phy += sizeof(struct dma_extended_desc);
- p->basic.des3 = (unsigned int)dma_phy;
+ p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
p++;
}
- p->basic.des3 = (unsigned int)phy_addr;
+ p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
} else {
struct dma_desc *p = (struct dma_desc *)des;
for (i = 0; i < (size - 1); i++) {
dma_phy += sizeof(struct dma_desc);
- p->des3 = (unsigned int)dma_phy;
+ p->des3 = cpu_to_le32((unsigned int)dma_phy);
p++;
}
- p->des3 = (unsigned int)phy_addr;
+ p->des3 = cpu_to_le32((unsigned int)phy_addr);
}
}
@@ -144,10 +147,10 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = (unsigned int)(priv->dma_rx_phy +
- (((priv->dirty_rx) + 1) %
- DMA_RX_SIZE) *
- sizeof(struct dma_desc));
+ p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
+ (((priv->dirty_rx) + 1) %
+ DMA_RX_SIZE) *
+ sizeof(struct dma_desc)));
}
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
@@ -161,9 +164,9 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = (unsigned int)((priv->dma_tx_phy +
- ((priv->dirty_tx + 1) % DMA_TX_SIZE))
- * sizeof(struct dma_desc));
+ p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
+ ((priv->dirty_tx + 1) % DMA_TX_SIZE))
+ * sizeof(struct dma_desc)));
}
const struct stmmac_mode_ops chain_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6d2de4e01f6d..b13a144f72ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -44,6 +44,7 @@
#define DWMAC_CORE_4_00 0x40
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
+/* These need to be power of two, and >= 4 */
#define DMA_TX_SIZE 512
#define DMA_RX_SIZE 512
#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
@@ -411,8 +412,8 @@ extern const struct stmmac_desc_ops ndesc_ops;
struct stmmac_dma_ops {
/* DMA core initialization */
int (*reset)(void __iomem *ioaddr);
- void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb,
- int aal, u32 dma_tx, u32 dma_rx, int atds);
+ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx, u32 dma_rx, int atds);
/* Configure the AXI Bus Mode Register */
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
@@ -506,6 +507,12 @@ struct mac_link {
struct mii_regs {
unsigned int addr; /* MII Address */
unsigned int data; /* MII Data */
+ unsigned int addr_shift; /* MII address shift */
+ unsigned int reg_shift; /* MII reg shift */
+ unsigned int addr_mask; /* MII address mask */
+ unsigned int reg_mask; /* MII reg mask */
+ unsigned int clk_csr_shift;
+ unsigned int clk_csr_mask;
};
/* Helpers to manage the descriptors for chain and ring modes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index e3c86d422109..faeeef75d7f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -87,7 +87,7 @@
#define TDES0_ERROR_SUMMARY BIT(15)
#define TDES0_IP_HEADER_ERROR BIT(16)
#define TDES0_TIME_STAMP_STATUS BIT(17)
-#define TDES0_OWN BIT(31)
+#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */
/* TDES1 */
#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
@@ -130,7 +130,7 @@
#define ETDES0_FIRST_SEGMENT BIT(28)
#define ETDES0_LAST_SEGMENT BIT(29)
#define ETDES0_INTERRUPT BIT(30)
-#define ETDES0_OWN BIT(31)
+#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */
/* TDES1 */
#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
@@ -170,19 +170,19 @@
/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
- unsigned int des0;
- unsigned int des1;
- unsigned int des2;
- unsigned int des3;
+ __le32 des0;
+ __le32 des1;
+ __le32 des2;
+ __le32 des3;
};
/* Extended descriptor structure (e.g. >= databook 3.50a) */
struct dma_extended_desc {
struct dma_desc basic; /* Basic descriptors */
- unsigned int des4; /* Extended Status */
- unsigned int des5; /* Reserved */
- unsigned int des6; /* Tx/Rx Timestamp Low */
- unsigned int des7; /* Tx/Rx Timestamp High */
+ __le32 des4; /* Extended Status */
+ __le32 des5; /* Reserved */
+ __le32 des6; /* Tx/Rx Timestamp Low */
+ __le32 des7; /* Tx/Rx Timestamp High */
};
/* Transmit checksum insertion control */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 7635a464ce41..1d181e205d6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -35,47 +35,50 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK;
+ p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
if (end)
- p->des1 |= ERDES1_END_RING;
+ p->des1 |= cpu_to_le32(ERDES1_END_RING);
}
static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
if (end)
- p->des0 |= ETDES0_END_RING;
+ p->des0 |= cpu_to_le32(ETDES0_END_RING);
else
- p->des0 &= ~ETDES0_END_RING;
+ p->des0 &= cpu_to_le32(~ETDES0_END_RING);
}
static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
+ p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
+ << ETDES1_BUFFER2_SIZE_SHIFT)
& ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
- & ETDES1_BUFFER1_SIZE_MASK);
+ & ETDES1_BUFFER1_SIZE_MASK));
} else
- p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
+ p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK;
+ p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
+ << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
if (end)
- p->des1 |= RDES1_END_RING;
+ p->des1 |= cpu_to_le32(RDES1_END_RING);
}
static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
if (end)
- p->des1 |= TDES1_END_RING;
+ p->des1 |= cpu_to_le32(TDES1_END_RING);
else
- p->des1 &= ~TDES1_END_RING;
+ p->des1 &= cpu_to_le32(~TDES1_END_RING);
}
static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
@@ -83,10 +86,11 @@ static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
if (unlikely(len > BUF_SIZE_2KiB)) {
unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
& TDES1_BUFFER1_SIZE_MASK;
- p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
- & TDES1_BUFFER2_SIZE_MASK) | buffer1);
+ p->des1 |= cpu_to_le32((((len - buffer1)
+ << TDES1_BUFFER2_SIZE_SHIFT)
+ & TDES1_BUFFER2_SIZE_MASK) | buffer1);
} else
- p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
+ p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
}
/* Specific functions used for Chain mode */
@@ -94,32 +98,32 @@ static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
{
- p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
+ p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
}
static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
{
- p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
+ p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
}
static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
- p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
+ p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
{
- p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
+ p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
}
static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
{
- p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
+ p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
}
static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
- p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
+ p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
}
#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index e6e6c2fcc4b7..3304095c934c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -71,9 +71,12 @@ err_remove_config_dt:
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.50a"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
{ .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac-4.00"},
+ { .compatible = "snps,dwmac-4.10a"},
{ .compatible = "snps,dwmac"},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
new file mode 100644
index 000000000000..c35597586121
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -0,0 +1,217 @@
+/*
+ * Oxford Semiconductor OXNAS DWMAC glue layer
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* System Control regmap offsets */
+#define OXNAS_DWMAC_CTRL_REGOFFSET 0x78
+#define OXNAS_DWMAC_DELAY_REGOFFSET 0x100
+
+/* Control Register */
+#define DWMAC_CKEN_RX_IN 14
+#define DWMAC_CKEN_RXN_OUT 13
+#define DWMAC_CKEN_RX_OUT 12
+#define DWMAC_CKEN_TX_IN 10
+#define DWMAC_CKEN_TXN_OUT 9
+#define DWMAC_CKEN_TX_OUT 8
+#define DWMAC_RX_SOURCE 7
+#define DWMAC_TX_SOURCE 6
+#define DWMAC_LOW_TX_SOURCE 4
+#define DWMAC_AUTO_TX_SOURCE 3
+#define DWMAC_RGMII 2
+#define DWMAC_SIMPLE_MUX 1
+#define DWMAC_CKEN_GTX 0
+
+/* Delay register */
+#define DWMAC_TX_VARDELAY_SHIFT 0
+#define DWMAC_TXN_VARDELAY_SHIFT 8
+#define DWMAC_RX_VARDELAY_SHIFT 16
+#define DWMAC_RXN_VARDELAY_SHIFT 24
+#define DWMAC_TX_VARDELAY(d) ((d) << DWMAC_TX_VARDELAY_SHIFT)
+#define DWMAC_TXN_VARDELAY(d) ((d) << DWMAC_TXN_VARDELAY_SHIFT)
+#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
+#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
+
+struct oxnas_dwmac {
+ struct device *dev;
+ struct clk *clk;
+ struct regmap *regmap;
+};
+
+static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
+{
+ unsigned int value;
+ int ret;
+
+ /* Reset HW here before changing the glue configuration */
+ ret = device_reset(dwmac->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
+ if (ret < 0) {
+ clk_disable_unprepare(dwmac->clk);
+ return ret;
+ }
+
+ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
+ value |= BIT(DWMAC_CKEN_GTX) |
+ /* Use simple mux for 25/125 Mhz clock switching */
+ BIT(DWMAC_SIMPLE_MUX) |
+ /* set auto switch tx clock source */
+ BIT(DWMAC_AUTO_TX_SOURCE) |
+ /* enable tx & rx vardelay */
+ BIT(DWMAC_CKEN_TX_OUT) |
+ BIT(DWMAC_CKEN_TXN_OUT) |
+ BIT(DWMAC_CKEN_TX_IN) |
+ BIT(DWMAC_CKEN_RX_OUT) |
+ BIT(DWMAC_CKEN_RXN_OUT) |
+ BIT(DWMAC_CKEN_RX_IN);
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
+
+ /* set tx & rx vardelay */
+ value = DWMAC_TX_VARDELAY(4) |
+ DWMAC_TXN_VARDELAY(2) |
+ DWMAC_RX_VARDELAY(10) |
+ DWMAC_RXN_VARDELAY(8);
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value);
+
+ return 0;
+}
+
+static int oxnas_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device_node *sysctrl;
+ struct oxnas_dwmac *dwmac;
+ int ret;
+
+ sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
+ if (!sysctrl) {
+ dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ dwmac->dev = &pdev->dev;
+ plat_dat->bsp_priv = dwmac;
+
+ dwmac->regmap = syscon_node_to_regmap(sysctrl);
+ if (IS_ERR(dwmac->regmap)) {
+ dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
+ return PTR_ERR(dwmac->regmap);
+ }
+
+ dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
+ if (IS_ERR(dwmac->clk))
+ return PTR_ERR(dwmac->clk);
+
+ ret = oxnas_dwmac_init(dwmac);
+ if (ret)
+ return ret;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static int oxnas_dwmac_remove(struct platform_device *pdev)
+{
+ struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int oxnas_dwmac_suspend(struct device *dev)
+{
+ struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static int oxnas_dwmac_resume(struct device *dev)
+{
+ struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
+ int ret;
+
+ ret = oxnas_dwmac_init(dwmac);
+ if (ret)
+ return ret;
+
+ ret = stmmac_resume(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
+ oxnas_dwmac_suspend, oxnas_dwmac_resume);
+
+static const struct of_device_id oxnas_dwmac_match[] = {
+ { .compatible = "oxsemi,ox820-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
+
+static struct platform_driver oxnas_dwmac_driver = {
+ .probe = oxnas_dwmac_probe,
+ .remove = oxnas_dwmac_remove,
+ .driver = {
+ .name = "oxnas-dwmac",
+ .pm = &oxnas_dwmac_pm_ops,
+ .of_match_table = oxnas_dwmac_match,
+ },
+};
+module_platform_driver(oxnas_dwmac_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index d80c88bd2bba..fa6e9704c077 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -864,6 +864,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
int ret;
struct device *dev = &bsp_priv->pdev->dev;
+ ret = gmac_clk_enable(bsp_priv, true);
+ if (ret)
+ return ret;
+
/*rmii or rgmii*/
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
dev_info(dev, "init for RGMII\n");
@@ -880,10 +884,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
if (ret)
return ret;
- ret = gmac_clk_enable(bsp_priv, true);
- if (ret)
- return ret;
-
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -901,44 +901,6 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
gmac_clk_enable(gmac, false);
}
-static int rk_gmac_init(struct platform_device *pdev, void *priv)
-{
- struct rk_priv_data *bsp_priv = priv;
-
- return rk_gmac_powerup(bsp_priv);
-}
-
-static void rk_gmac_exit(struct platform_device *pdev, void *priv)
-{
- struct rk_priv_data *bsp_priv = priv;
-
- rk_gmac_powerdown(bsp_priv);
-}
-
-static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
-{
- struct rk_priv_data *bsp_priv = priv;
-
- /* Keep the PHY up if we use Wake-on-Lan. */
- if (device_may_wakeup(&pdev->dev))
- return;
-
- rk_gmac_powerdown(bsp_priv);
- bsp_priv->suspended = true;
-}
-
-static void rk_gmac_resume(struct platform_device *pdev, void *priv)
-{
- struct rk_priv_data *bsp_priv = priv;
-
- /* The PHY was up for Wake-on-Lan. */
- if (!bsp_priv->suspended)
- return;
-
- rk_gmac_powerup(bsp_priv);
- bsp_priv->suspended = false;
-}
-
static void rk_fix_speed(void *priv, unsigned int speed)
{
struct rk_priv_data *bsp_priv = priv;
@@ -974,11 +936,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
plat_dat->has_gmac = true;
- plat_dat->init = rk_gmac_init;
- plat_dat->exit = rk_gmac_exit;
plat_dat->fix_mac_speed = rk_fix_speed;
- plat_dat->suspend = rk_gmac_suspend;
- plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
if (IS_ERR(plat_dat->bsp_priv)) {
@@ -986,24 +944,65 @@ static int rk_gmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+ ret = rk_gmac_powerup(plat_dat->bsp_priv);
if (ret)
goto err_remove_config_dt;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_gmac_exit;
+ goto err_gmac_powerdown;
return 0;
-err_gmac_exit:
- rk_gmac_exit(pdev, plat_dat->bsp_priv);
+err_gmac_powerdown:
+ rk_gmac_powerdown(plat_dat->bsp_priv);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
+static int rk_gmac_remove(struct platform_device *pdev)
+{
+ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ rk_gmac_powerdown(bsp_priv);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rk_gmac_suspend(struct device *dev)
+{
+ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
+ int ret = stmmac_suspend(dev);
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (!device_may_wakeup(dev)) {
+ rk_gmac_powerdown(bsp_priv);
+ bsp_priv->suspended = true;
+ }
+
+ return ret;
+}
+
+static int rk_gmac_resume(struct device *dev)
+{
+ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (bsp_priv->suspended) {
+ rk_gmac_powerup(bsp_priv);
+ bsp_priv->suspended = false;
+ }
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
+
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
@@ -1016,10 +1015,10 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
.probe = rk_gmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
- .pm = &stmmac_pltfr_pm_ops,
+ .pm = &rk_gmac_pm_ops,
.of_match_table = rk_gmac_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 0c420e97de1e..1f997027ae51 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -380,8 +380,8 @@ static int socfpga_dwmac_resume(struct device *dev)
* control register 0, and can be modified by the phy driver
* framework.
*/
- if (priv->phydev)
- phy_resume(priv->phydev);
+ if (ndev->phydev)
+ phy_resume(ndev->phydev);
return stmmac_resume(dev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 060b98c37a85..86e0e053804c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -126,8 +126,8 @@ struct sti_dwmac {
struct clk *clk; /* PHY clock */
u32 ctrl_reg; /* GMAC glue-logic control register */
int clk_sel_reg; /* GMAC ext clk selection register */
- struct device *dev;
struct regmap *regmap;
+ bool gmac_en;
u32 speed;
void (*fix_retime_src)(void *priv, unsigned int speed);
};
@@ -191,7 +191,7 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd)
}
}
- if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq)
+ if (src == TX_RETIME_SRC_CLKGEN && freq)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
@@ -222,26 +222,20 @@ static void stid127_fix_retime_src(void *priv, u32 spd)
freq = DWMAC_2_5MHZ;
}
- if (dwmac->clk && freq)
+ if (freq)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
}
-static int sti_dwmac_init(struct platform_device *pdev, void *priv)
+static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
{
- struct sti_dwmac *dwmac = priv;
struct regmap *regmap = dwmac->regmap;
int iface = dwmac->interface;
- struct device *dev = dwmac->dev;
- struct device_node *np = dev->of_node;
u32 reg = dwmac->ctrl_reg;
u32 val;
- if (dwmac->clk)
- clk_prepare_enable(dwmac->clk);
-
- if (of_property_read_bool(np, "st,gmac_en"))
+ if (dwmac->gmac_en)
regmap_update_bits(regmap, reg, EN_MASK, EN);
regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
@@ -249,18 +243,11 @@ static int sti_dwmac_init(struct platform_device *pdev, void *priv)
val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
- dwmac->fix_retime_src(priv, dwmac->speed);
+ dwmac->fix_retime_src(dwmac, dwmac->speed);
return 0;
}
-static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
-{
- struct sti_dwmac *dwmac = priv;
-
- if (dwmac->clk)
- clk_disable_unprepare(dwmac->clk);
-}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
struct platform_device *pdev)
{
@@ -270,9 +257,6 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
struct regmap *regmap;
int err;
- if (!np)
- return -EINVAL;
-
/* clk selection from extra syscfg register */
dwmac->clk_sel_reg = -ENXIO;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
@@ -289,9 +273,9 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return err;
}
- dwmac->dev = dev;
dwmac->interface = of_get_phy_mode(np);
dwmac->regmap = regmap;
+ dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
dwmac->tx_retime_src = TX_RETIME_SRC_NA;
dwmac->speed = SPEED_100;
@@ -359,28 +343,65 @@ static int sti_dwmac_probe(struct platform_device *pdev)
dwmac->fix_retime_src = data->fix_retime_src;
plat_dat->bsp_priv = dwmac;
- plat_dat->init = sti_dwmac_init;
- plat_dat->exit = sti_dwmac_exit;
plat_dat->fix_mac_speed = data->fix_retime_src;
- ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+ ret = clk_prepare_enable(dwmac->clk);
if (ret)
goto err_remove_config_dt;
+ ret = sti_dwmac_set_mode(dwmac);
+ if (ret)
+ goto disable_clk;
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_dwmac_exit;
+ goto disable_clk;
return 0;
-err_dwmac_exit:
- sti_dwmac_exit(pdev, plat_dat->bsp_priv);
+disable_clk:
+ clk_disable_unprepare(dwmac->clk);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
+static int sti_dwmac_remove(struct platform_device *pdev)
+{
+ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sti_dwmac_suspend(struct device *dev)
+{
+ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
+ int ret = stmmac_suspend(dev);
+
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static int sti_dwmac_resume(struct device *dev)
+{
+ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
+
+ clk_prepare_enable(dwmac->clk);
+ sti_dwmac_set_mode(dwmac);
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
+ sti_dwmac_resume);
+
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
@@ -400,10 +421,10 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = &stmmac_pltfr_pm_ops,
+ .pm = &sti_dwmac_pm_ops,
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index ff3e5ab39bd0..52b9407a8a39 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -225,7 +225,7 @@ enum rx_tx_priority_ratio {
#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
-#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */
#define DMA_BUS_MODE_RPBL_SHIFT 17
#define DMA_BUS_MODE_USP 0x00800000
#define DMA_BUS_MODE_MAXPBL 0x01000000
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 7df4ff158f3d..be3c91c7f211 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -534,6 +534,12 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->link.speed = GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
/* Get and dump the chip ID */
*synopsys_id = stmmac_get_synopsys_id(hwid);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index f35385266fbf..612d3aaac9a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -84,37 +84,39 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
-static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
- int aal, u32 dma_tx, u32 dma_rx, int atds)
+static void dwmac1000_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
/*
* Set the DMA PBL (Programmable Burst Length) mode.
*
* Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
- *
- * This configuration doesn't take care about the Separate PBL
- * so only the bits: 13-8 are programmed with the PBL passed from the
- * platform.
*/
- value |= DMA_BUS_MODE_MAXPBL;
- value &= ~DMA_BUS_MODE_PBL_MASK;
- value |= (pbl << DMA_BUS_MODE_PBL_SHIFT);
+ if (dma_cfg->pblx8)
+ value |= DMA_BUS_MODE_MAXPBL;
+ value |= DMA_BUS_MODE_USP;
+ value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
+ value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
/* Set the Fixed burst mode */
- if (fb)
+ if (dma_cfg->fixed_burst)
value |= DMA_BUS_MODE_FB;
/* Mixed Burst has no effect when fb is set */
- if (mb)
+ if (dma_cfg->mixed_burst)
value |= DMA_BUS_MODE_MB;
if (atds)
value |= DMA_BUS_MODE_ATDS;
- if (aal)
+ if (dma_cfg->aal)
value |= DMA_BUS_MODE_AAL;
writel(value, ioaddr + DMA_BUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 6418b2e07619..9dd2987e284d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -192,6 +192,13 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
mac->link.speed = 0;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
+
/* Synopsys Id is not available on old chips */
*synopsys_id = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 61f54c99a7de..e5664da382f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,11 +32,12 @@
#include "dwmac100.h"
#include "dwmac_dma.h"
-static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
- int aal, u32 dma_tx, u32 dma_rx, int atds)
+static void dwmac100_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx, u32 dma_rx, int atds)
{
/* Enable Application Access by writing to DMA CSR0 */
- writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
ioaddr + DMA_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 6f4f5ce25114..3e8d4fefa5e0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -155,8 +155,11 @@ enum power_event {
#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
#define MTL_OP_MODE_RSF BIT(5)
+#define MTL_OP_MODE_TXQEN BIT(3)
#define MTL_OP_MODE_TSF BIT(1)
+#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
+
#define MTL_OP_MODE_TTC_MASK 0x70
#define MTL_OP_MODE_TTC_SHIFT 4
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 51019b794be5..eaed7cb21867 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -430,6 +430,12 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
mac->link.speed = GMAC_CONFIG_FES;
mac->mii.addr = GMAC_MDIO_ADDR;
mac->mii.data = GMAC_MDIO_DATA;
+ mac->mii.addr_shift = 21;
+ mac->mii.addr_mask = GENMASK(25, 21);
+ mac->mii.reg_shift = 16;
+ mac->mii.reg_mask = GENMASK(20, 16);
+ mac->mii.clk_csr_shift = 8;
+ mac->mii.clk_csr_mask = GENMASK(11, 8);
/* Get and dump the chip ID */
*synopsys_id = stmmac_get_synopsys_id(hwid);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a601f8d43b75..8816515e1bbb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -23,7 +23,7 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
unsigned int tdes3;
int ret = tx_done;
- tdes3 = p->des3;
+ tdes3 = le32_to_cpu(p->des3);
/* Get tx owner first */
if (unlikely(tdes3 & TDES3_OWN))
@@ -77,9 +77,9 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
- unsigned int rdes1 = p->des1;
- unsigned int rdes2 = p->des2;
- unsigned int rdes3 = p->des3;
+ unsigned int rdes1 = le32_to_cpu(p->des1);
+ unsigned int rdes2 = le32_to_cpu(p->des2);
+ unsigned int rdes3 = le32_to_cpu(p->des3);
int message_type;
int ret = good_frame;
@@ -176,47 +176,48 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
static int dwmac4_rd_get_tx_len(struct dma_desc *p)
{
- return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
+ return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
}
static int dwmac4_get_tx_owner(struct dma_desc *p)
{
- return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
+ return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
}
static void dwmac4_set_tx_owner(struct dma_desc *p)
{
- p->des3 |= TDES3_OWN;
+ p->des3 |= cpu_to_le32(TDES3_OWN);
}
static void dwmac4_set_rx_owner(struct dma_desc *p)
{
- p->des3 |= RDES3_OWN;
+ p->des3 |= cpu_to_le32(RDES3_OWN);
}
static int dwmac4_get_tx_ls(struct dma_desc *p)
{
- return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
+ return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
+ >> TDES3_LAST_DESCRIPTOR_SHIFT;
}
static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
- return (p->des3 & RDES3_PACKET_SIZE_MASK);
+ return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
}
static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
{
- p->des2 |= TDES2_TIMESTAMP_ENABLE;
+ p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
}
static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
{
/* Context type from W/B descriptor must be zero */
- if (p->des3 & TDES3_CONTEXT_TYPE)
+ if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
return -EINVAL;
/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
- if (p->des3 & TDES3_TIMESTAMP_STATUS)
+ if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
return 0;
return 1;
@@ -227,9 +228,9 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
struct dma_desc *p = (struct dma_desc *)desc;
u64 ns;
- ns = p->des0;
+ ns = le32_to_cpu(p->des0);
/* convert high/sec time stamp value to nanosecond */
- ns += p->des1 * 1000000000ULL;
+ ns += le32_to_cpu(p->des1) * 1000000000ULL;
return ns;
}
@@ -264,7 +265,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
/* Get the status from normal w/b descriptor */
if (likely(p->des3 & TDES3_RS1V)) {
- if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
+ if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
int i = 0;
/* Check if timestamp is OK from context descriptor */
@@ -287,10 +288,10 @@ exit:
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
- p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
+ p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
- p->des3 |= RDES3_INT_ON_COMPLETION_EN;
+ p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
}
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
@@ -305,9 +306,9 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls)
{
- unsigned int tdes3 = p->des3;
+ unsigned int tdes3 = le32_to_cpu(p->des3);
- p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
+ p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
if (is_fs)
tdes3 |= TDES3_FIRST_DESCRIPTOR;
@@ -333,9 +334,9 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
- p->des3 = tdes3;
+ p->des3 = cpu_to_le32(tdes3);
}
static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
@@ -343,14 +344,14 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
bool ls, unsigned int tcphdrlen,
unsigned int tcppayloadlen)
{
- unsigned int tdes3 = p->des3;
+ unsigned int tdes3 = le32_to_cpu(p->des3);
if (len1)
- p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
+ p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
if (len2)
- p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
- & TDES2_BUFFER2_SIZE_MASK;
+ p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
+ & TDES2_BUFFER2_SIZE_MASK);
if (is_fs) {
tdes3 |= TDES3_FIRST_DESCRIPTOR |
@@ -376,9 +377,9 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
- p->des3 = tdes3;
+ p->des3 = cpu_to_le32(tdes3);
}
static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
@@ -389,7 +390,7 @@ static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
{
- p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
+ p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
}
static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
@@ -402,7 +403,8 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
for (i = 0; i < size; i++) {
pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(p),
- p->des0, p->des1, p->des2, p->des3);
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
p++;
}
}
@@ -411,8 +413,8 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
{
p->des0 = 0;
p->des1 = 0;
- p->des2 = mss;
- p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
+ p->des2 = cpu_to_le32(mss);
+ p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
}
const struct stmmac_desc_ops dwmac4_desc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 32bc2fc73cdc..8196ab5fc33c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -71,25 +71,29 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
-static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
+static void dwmac4_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx_phy, u32 dma_rx_phy,
u32 channel)
{
u32 value;
+ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
/* set PBL for each channels. Currently we affect same configuration
* on each channel
*/
value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
- value = value | DMA_BUS_MODE_PBL;
+ if (dma_cfg->pblx8)
+ value = value | DMA_BUS_MODE_PBL;
writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
- value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
+ value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
- value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
+ value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
/* Mask interrupts by writing to CSR7 */
@@ -99,27 +103,28 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
}
-static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
- int aal, u32 dma_tx, u32 dma_rx, int atds)
+static void dwmac4_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
int i;
/* Set the Fixed burst mode */
- if (fb)
+ if (dma_cfg->fixed_burst)
value |= DMA_SYS_BUS_FB;
/* Mixed Burst has no effect when fb is set */
- if (mb)
+ if (dma_cfg->mixed_burst)
value |= DMA_SYS_BUS_MB;
- if (aal)
+ if (dma_cfg->aal)
value |= DMA_SYS_BUS_AAL;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
- dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
+ dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
}
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
@@ -215,7 +220,17 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
}
-
+ /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
+ * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
+ * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
+ * with reset values: TXQEN off, TQS 256 bytes.
+ *
+ * Write the bits in both cases, since it will have no effect when RO.
+ * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
+ * be RO, however, writing the whole TQS field will result in a value
+ * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
+ */
+ mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index e75549327c34..f0d86321dfe2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -30,7 +30,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
- unsigned int tdes0 = p->des0;
+ unsigned int tdes0 = le32_to_cpu(p->des0);
int ret = tx_done;
/* Get tx owner first */
@@ -95,7 +95,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
static int enh_desc_get_tx_len(struct dma_desc *p)
{
- return (p->des1 & ETDES1_BUFFER1_SIZE_MASK);
+ return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
}
static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
@@ -134,8 +134,8 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
- unsigned int rdes0 = p->basic.des0;
- unsigned int rdes4 = p->des4;
+ unsigned int rdes0 = le32_to_cpu(p->basic.des0);
+ unsigned int rdes4 = le32_to_cpu(p->des4);
if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
@@ -199,7 +199,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
- unsigned int rdes0 = p->des0;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
int ret = good_frame;
if (unlikely(rdes0 & RDES0_OWN))
@@ -265,8 +265,8 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
- p->des0 |= RDES0_OWN;
- p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+ p->des0 |= cpu_to_le32(RDES0_OWN);
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
@@ -274,12 +274,12 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
ehn_desc_rx_set_on_ring(p, end);
if (disable_rx_ic)
- p->des1 |= ERDES1_DISABLE_IC;
+ p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
}
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des0 &= ~ETDES0_OWN;
+ p->des0 &= cpu_to_le32(~ETDES0_OWN);
if (mode == STMMAC_CHAIN_MODE)
enh_desc_end_tx_desc_on_chain(p);
else
@@ -288,27 +288,27 @@ static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
static int enh_desc_get_tx_owner(struct dma_desc *p)
{
- return (p->des0 & ETDES0_OWN) >> 31;
+ return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
}
static void enh_desc_set_tx_owner(struct dma_desc *p)
{
- p->des0 |= ETDES0_OWN;
+ p->des0 |= cpu_to_le32(ETDES0_OWN);
}
static void enh_desc_set_rx_owner(struct dma_desc *p)
{
- p->des0 |= RDES0_OWN;
+ p->des0 |= cpu_to_le32(RDES0_OWN);
}
static int enh_desc_get_tx_ls(struct dma_desc *p)
{
- return (p->des0 & ETDES0_LAST_SEGMENT) >> 29;
+ return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
}
static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
{
- int ter = (p->des0 & ETDES0_END_RING) >> 21;
+ int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
memset(p, 0, offsetof(struct dma_desc, des2));
if (mode == STMMAC_CHAIN_MODE)
@@ -321,7 +321,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls)
{
- unsigned int tdes0 = p->des0;
+ unsigned int tdes0 = le32_to_cpu(p->des0);
if (mode == STMMAC_CHAIN_MODE)
enh_set_tx_desc_len_on_chain(p, len);
@@ -350,14 +350,14 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
- p->des0 = tdes0;
+ p->des0 = cpu_to_le32(tdes0);
}
static void enh_desc_set_tx_ic(struct dma_desc *p)
{
- p->des0 |= ETDES0_INTERRUPT;
+ p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
}
static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
@@ -372,18 +372,18 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
csum = 2;
- return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
- csum);
+ return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
+ >> RDES0_FRAME_LEN_SHIFT) - csum);
}
static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
{
- p->des0 |= ETDES0_TIME_STAMP_ENABLE;
+ p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
}
static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
{
- return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17;
+ return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
}
static u64 enh_desc_get_timestamp(void *desc, u32 ats)
@@ -392,13 +392,13 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
if (ats) {
struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
- ns = p->des6;
+ ns = le32_to_cpu(p->des6);
/* convert high/sec time stamp value to nanosecond */
- ns += p->des7 * 1000000000ULL;
+ ns += le32_to_cpu(p->des7) * 1000000000ULL;
} else {
struct dma_desc *p = (struct dma_desc *)desc;
- ns = p->des2;
- ns += p->des3 * 1000000000ULL;
+ ns = le32_to_cpu(p->des2);
+ ns += le32_to_cpu(p->des3) * 1000000000ULL;
}
return ns;
@@ -408,10 +408,11 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
{
if (ats) {
struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
- return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7;
+ return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
} else {
struct dma_desc *p = (struct dma_desc *)desc;
- if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ if ((le32_to_cpu(p->des2) == 0xffffffff) &&
+ (le32_to_cpu(p->des3) == 0xffffffff))
/* timestamp is corrupted, hence don't store it */
return 0;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 2beacd0d3043..fd78406e2e9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -30,8 +30,8 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
- unsigned int tdes0 = p->des0;
- unsigned int tdes1 = p->des1;
+ unsigned int tdes0 = le32_to_cpu(p->des0);
+ unsigned int tdes1 = le32_to_cpu(p->des1);
int ret = tx_done;
/* Get tx owner first */
@@ -77,7 +77,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
static int ndesc_get_tx_len(struct dma_desc *p)
{
- return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
+ return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
}
/* This function verifies if each incoming frame has some errors
@@ -88,7 +88,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = good_frame;
- unsigned int rdes0 = p->des0;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(rdes0 & RDES0_OWN))
@@ -141,8 +141,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end)
{
- p->des0 |= RDES0_OWN;
- p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
+ p->des0 |= cpu_to_le32(RDES0_OWN);
+ p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
@@ -150,12 +150,12 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
ndesc_rx_set_on_ring(p, end);
if (disable_rx_ic)
- p->des1 |= RDES1_DISABLE_IC;
+ p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
}
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des0 &= ~TDES0_OWN;
+ p->des0 &= cpu_to_le32(~TDES0_OWN);
if (mode == STMMAC_CHAIN_MODE)
ndesc_tx_set_on_chain(p);
else
@@ -164,27 +164,27 @@ static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
static int ndesc_get_tx_owner(struct dma_desc *p)
{
- return (p->des0 & TDES0_OWN) >> 31;
+ return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
}
static void ndesc_set_tx_owner(struct dma_desc *p)
{
- p->des0 |= TDES0_OWN;
+ p->des0 |= cpu_to_le32(TDES0_OWN);
}
static void ndesc_set_rx_owner(struct dma_desc *p)
{
- p->des0 |= RDES0_OWN;
+ p->des0 |= cpu_to_le32(RDES0_OWN);
}
static int ndesc_get_tx_ls(struct dma_desc *p)
{
- return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
+ return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
}
static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
{
- int ter = (p->des1 & TDES1_END_RING) >> 25;
+ int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
memset(p, 0, offsetof(struct dma_desc, des2));
if (mode == STMMAC_CHAIN_MODE)
@@ -197,7 +197,7 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls)
{
- unsigned int tdes1 = p->des1;
+ unsigned int tdes1 = le32_to_cpu(p->des1);
if (is_fs)
tdes1 |= TDES1_FIRST_SEGMENT;
@@ -212,7 +212,7 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (ls)
tdes1 |= TDES1_LAST_SEGMENT;
- p->des1 = tdes1;
+ p->des1 = cpu_to_le32(tdes1);
if (mode == STMMAC_CHAIN_MODE)
norm_set_tx_desc_len_on_chain(p, len);
@@ -220,12 +220,12 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
norm_set_tx_desc_len_on_ring(p, len);
if (tx_own)
- p->des0 |= TDES0_OWN;
+ p->des0 |= cpu_to_le32(TDES0_OWN);
}
static void ndesc_set_tx_ic(struct dma_desc *p)
{
- p->des1 |= TDES1_INTERRUPT;
+ p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
}
static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
@@ -241,19 +241,20 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
csum = 2;
- return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
+ return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
+ >> RDES0_FRAME_LEN_SHIFT) -
csum);
}
static void ndesc_enable_tx_timestamp(struct dma_desc *p)
{
- p->des1 |= TDES1_TIME_STAMP_ENABLE;
+ p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
}
static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
{
- return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
+ return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
}
static u64 ndesc_get_timestamp(void *desc, u32 ats)
@@ -261,9 +262,9 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
struct dma_desc *p = (struct dma_desc *)desc;
u64 ns;
- ns = p->des2;
+ ns = le32_to_cpu(p->des2);
/* convert high/sec time stamp value to nanosecond */
- ns += p->des3 * 1000000000ULL;
+ ns += le32_to_cpu(p->des3) * 1000000000ULL;
return ns;
}
@@ -272,7 +273,8 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
- if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ if ((le32_to_cpu(p->des2) == 0xffffffff) &&
+ (le32_to_cpu(p->des3) == 0xffffffff))
/* timestamp is corrupted, hence don't store it */
return 0;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 7723b5d2499a..9983ce9bd90d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
unsigned int entry = priv->cur_tx;
struct dma_desc *desc;
unsigned int nopaged_len = skb_headlen(skb);
- unsigned int bmax, len;
+ unsigned int bmax, len, des2;
if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry);
@@ -50,16 +50,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (nopaged_len > BUF_SIZE_8KiB) {
- desc->des2 = dma_map_single(priv->device, skb->data,
- bmax, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des2 = dma_map_single(priv->device, skb->data, bmax,
+ DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = bmax;
priv->tx_skbuff_dma[entry].is_jumbo = true;
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false);
priv->tx_skbuff[entry] = NULL;
@@ -70,26 +71,28 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
else
desc = priv->dma_tx + entry;
- desc->des2 = dma_map_single(priv->device, skb->data + bmax,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des2 = dma_map_single(priv->device, skb->data + bmax, len,
+ DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = len;
priv->tx_skbuff_dma[entry].is_jumbo = true;
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_RING_MODE, 1, true);
} else {
- desc->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = nopaged_len;
priv->tx_skbuff_dma[entry].is_jumbo = true;
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE, 0, true);
}
@@ -115,13 +118,13 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
/* Fill DES3 in case of RING mode */
if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
/* In ring mode we need to fill the desc3 because it is used as buffer */
static void stmmac_init_desc3(struct dma_desc *p)
{
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 4d2a759b8465..eab04aeeeb95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -64,7 +64,6 @@ struct stmmac_priv {
dma_addr_t dma_tx_phy;
int tx_coalesce;
int hwts_tx_en;
- spinlock_t tx_lock;
bool tx_path_in_lpi_mode;
struct timer_list txtimer;
bool tso;
@@ -90,7 +89,6 @@ struct stmmac_priv {
struct mac_device_info *hw;
spinlock_t lock;
- struct phy_device *phydev ____cacheline_aligned_in_smp;
int oldlink;
int speed;
int oldduplex;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5d0142adda2..699ee1d30426 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -263,7 +263,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->plat->has_gmac)
+ if (priv->plat->has_gmac || priv->plat->has_gmac4)
strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
@@ -272,25 +272,26 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int stmmac_ethtool_getsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phy = priv->phydev;
+ struct phy_device *phy = dev->phydev;
int rc;
if (priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII) {
struct rgmii_adv adv;
+ u32 supported, advertising, lp_advertising;
if (!priv->xstats.pcs_link) {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
return 0;
}
- cmd->duplex = priv->xstats.pcs_duplex;
+ cmd->base.duplex = priv->xstats.pcs_duplex;
- ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
+ cmd->base.speed = priv->xstats.pcs_speed;
/* Get and convert ADV/LP_ADV from the HW AN registers */
if (!priv->hw->mac->pcs_get_adv_lp)
@@ -300,45 +301,59 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
+ ethtool_convert_link_mode_to_legacy_u32(
+ &supported, cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, cmd->link_modes.advertising);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &lp_advertising, cmd->link_modes.lp_advertising);
+
if (adv.pause & STMMAC_PCS_PAUSE)
- cmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
- cmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
if (adv.lp_pause & STMMAC_PCS_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Pause;
+ lp_advertising |= ADVERTISED_Pause;
if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+ lp_advertising |= ADVERTISED_Asym_Pause;
/* Reg49[3] always set because ANE is always supported */
- cmd->autoneg = ADVERTISED_Autoneg;
- cmd->supported |= SUPPORTED_Autoneg;
- cmd->advertising |= ADVERTISED_Autoneg;
- cmd->lp_advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = ADVERTISED_Autoneg;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ lp_advertising |= ADVERTISED_Autoneg;
if (adv.duplex) {
- cmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Full);
- cmd->advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
+ supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Full);
+ advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
} else {
- cmd->supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_100baseT_Half |
- SUPPORTED_10baseT_Half);
- cmd->advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
+ supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+ advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
}
if (adv.lp_duplex)
- cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
+ lp_advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
else
- cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
- cmd->port = PORT_OTHER;
+ lp_advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ cmd->base.port = PORT_OTHER;
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.lp_advertising, lp_advertising);
return 0;
}
@@ -353,16 +368,16 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
"link speed / duplex setting\n", dev->name);
return -EBUSY;
}
- cmd->transceiver = XCVR_INTERNAL;
- rc = phy_ethtool_gset(phy, cmd);
+ rc = phy_ethtool_ksettings_get(phy, cmd);
return rc;
}
-static int stmmac_ethtool_setsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+stmmac_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phy = priv->phydev;
+ struct phy_device *phy = dev->phydev;
int rc;
if (priv->hw->pcs & STMMAC_PCS_RGMII ||
@@ -370,7 +385,7 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
/* Only support ANE */
- if (cmd->autoneg != AUTONEG_ENABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
mask &= (ADVERTISED_1000baseT_Half |
@@ -391,9 +406,7 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
return 0;
}
- spin_lock(&priv->lock);
- rc = phy_ethtool_sset(phy, cmd);
- spin_unlock(&priv->lock);
+ rc = phy_ethtool_ksettings_set(phy, cmd);
return rc;
}
@@ -433,7 +446,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
memset(reg_space, 0x0, REG_SPACE_SIZE);
- if (!priv->plat->has_gmac) {
+ if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) {
/* MAC registers */
for (i = 0; i < 12; i++)
reg_space[i] = readl(priv->ioaddr + (i * 4));
@@ -471,12 +484,12 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return;
} else {
- if (!(priv->phydev->supported & SUPPORTED_Pause) ||
- !(priv->phydev->supported & SUPPORTED_Asym_Pause))
+ if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
+ !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
return;
}
- pause->autoneg = priv->phydev->autoneg;
+ pause->autoneg = netdev->phydev->autoneg;
if (priv->flow_ctrl & FLOW_RX)
pause->rx_pause = 1;
@@ -490,7 +503,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct phy_device *phy = priv->phydev;
+ struct phy_device *phy = netdev->phydev;
int new_pause = FLOW_OFF;
if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
@@ -550,7 +563,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
}
}
if (priv->eee_enabled) {
- int val = phy_get_eee_err(priv->phydev);
+ int val = phy_get_eee_err(dev->phydev);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
}
@@ -669,7 +682,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
- return phy_ethtool_get_eee(priv->phydev, edata);
+ return phy_ethtool_get_eee(dev->phydev, edata);
}
static int stmmac_ethtool_op_set_eee(struct net_device *dev,
@@ -694,7 +707,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(priv->phydev, edata);
+ return phy_ethtool_set_eee(dev->phydev, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
@@ -853,13 +866,12 @@ static int stmmac_set_tunable(struct net_device *dev,
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
- .get_settings = stmmac_ethtool_getsettings,
- .set_settings = stmmac_ethtool_setsettings,
.get_msglevel = stmmac_ethtool_getmsglevel,
.set_msglevel = stmmac_ethtool_setmsglevel,
.get_regs = stmmac_ethtool_gregs,
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
+ .nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
.get_ethtool_stats = stmmac_get_ethtool_stats,
@@ -874,6 +886,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_coalesce = stmmac_set_coalesce,
.get_tunable = stmmac_get_tunable,
.set_tunable = stmmac_set_tunable,
+ .get_link_ksettings = stmmac_ethtool_get_link_ksettings,
+ .set_link_ksettings = stmmac_ethtool_set_link_ksettings,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index caf069a465f2..bb40382e205d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -105,8 +105,8 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
-/* By default the driver will use the ring mode to manage tx and rx descriptors
- * but passing this value so user can force to use the chain instead of the ring
+/* By default the driver will use the ring mode to manage tx and rx descriptors,
+ * but allow user to force to use the chain instead of the ring
*/
static unsigned int chain_mode;
module_param(chain_mode, int, S_IRUGO);
@@ -221,7 +221,8 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
*/
static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
{
- struct phy_device *phydev = priv->phydev;
+ struct net_device *ndev = priv->dev;
+ struct phy_device *phydev = ndev->phydev;
if (likely(priv->plat->fix_mac_speed))
priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
@@ -279,6 +280,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
+ struct net_device *ndev = priv->dev;
unsigned long flags;
bool ret = false;
@@ -295,7 +297,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
int tx_lpi_timer = priv->tx_lpi_timer;
/* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1)) {
+ if (phy_init_eee(ndev->phydev, 1)) {
/* To manage at run-time if the EEE cannot be supported
* anymore (for example because the lp caps have been
* changed).
@@ -303,7 +305,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
*/
spin_lock_irqsave(&priv->lock, flags);
if (priv->eee_active) {
- pr_debug("stmmac: disable EEE\n");
+ netdev_dbg(priv->dev, "disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer);
priv->hw->mac->set_eee_timer(priv->hw, 0,
tx_lpi_timer);
@@ -327,12 +329,12 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
tx_lpi_timer);
}
/* Set HW EEE according to the speed */
- priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
+ priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
ret = true;
spin_unlock_irqrestore(&priv->lock, flags);
- pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
}
out:
return ret;
@@ -450,8 +452,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
sizeof(struct hwtstamp_config)))
return -EFAULT;
- pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
+ netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
/* reserved for future extensions */
if (config.flags)
@@ -697,7 +699,7 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
static void stmmac_adjust_link(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int new_state = 0;
unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
@@ -750,9 +752,9 @@ static void stmmac_adjust_link(struct net_device *dev)
stmmac_hw_fix_mac_speed(priv);
break;
default:
- if (netif_msg_link(priv))
- pr_warn("%s: Speed (%d) not 10/100\n",
- dev->name, phydev->speed);
+ netif_warn(priv, link, priv->dev,
+ "Speed (%d) not 10/100\n",
+ phydev->speed);
break;
}
@@ -805,10 +807,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
(interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- pr_debug("STMMAC: PCS RGMII support enable\n");
+ netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
priv->hw->pcs = STMMAC_PCS_RGMII;
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
- pr_debug("STMMAC: PCS SGMII support enable\n");
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
priv->hw->pcs = STMMAC_PCS_SGMII;
}
}
@@ -843,15 +845,15 @@ static int stmmac_init_phy(struct net_device *dev)
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n",
- phy_id_fmt);
+ netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
+ phy_id_fmt);
phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
interface);
}
if (IS_ERR_OR_NULL(phydev)) {
- pr_err("%s: Could not attach to PHY\n", dev->name);
+ netdev_err(priv->dev, "Could not attach to PHY\n");
if (!phydev)
return -ENODEV;
@@ -884,10 +886,8 @@ static int stmmac_init_phy(struct net_device *dev)
if (phydev->is_pseudo_fixed_link)
phydev->irq = PHY_POLL;
- pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
- " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
-
- priv->phydev = phydev;
+ netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
+ __func__, phydev->phy_id, phydev->link);
return 0;
}
@@ -973,7 +973,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
if (!skb) {
- pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ netdev_err(priv->dev,
+ "%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
}
priv->rx_skbuff[i] = skb;
@@ -981,15 +982,15 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
priv->dma_buf_sz,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
- pr_err("%s: DMA mapping error\n", __func__);
+ netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
dev_kfree_skb_any(skb);
return -EINVAL;
}
if (priv->synopsys_id >= DWMAC_CORE_4_00)
- p->des0 = priv->rx_skbuff_dma[i];
+ p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
else
- p->des2 = priv->rx_skbuff_dma[i];
+ p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1031,13 +1032,14 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
priv->dma_buf_sz = bfsize;
- if (netif_msg_probe(priv)) {
- pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
- (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
+ __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
+
+ /* RX INITIALIZATION */
+ netif_dbg(priv, probe, priv->dev,
+ "SKB addresses:\nskb\t\tskb data\tdma data\n");
- /* RX INITIALIZATION */
- pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
- }
for (i = 0; i < DMA_RX_SIZE; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1049,10 +1051,9 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
if (ret)
goto err_init_rx_buffers;
- if (netif_msg_probe(priv))
- pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
- priv->rx_skbuff[i]->data,
- (unsigned int)priv->rx_skbuff_dma[i]);
+ netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
+ priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
+ (unsigned int)priv->rx_skbuff_dma[i]);
}
priv->cur_rx = 0;
priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
@@ -1307,7 +1308,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry = priv->dirty_tx;
- spin_lock(&priv->tx_lock);
+ netif_tx_lock(priv->dev);
priv->xstats.tx_clean++;
@@ -1378,22 +1379,17 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
if (unlikely(netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
- netif_tx_lock(priv->dev);
- if (netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
- if (netif_msg_tx_done(priv))
- pr_debug("%s: restart transmit\n", __func__);
- netif_wake_queue(priv->dev);
- }
- netif_tx_unlock(priv->dev);
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
+ netif_dbg(priv, tx_done, priv->dev,
+ "%s: restart transmit\n", __func__);
+ netif_wake_queue(priv->dev);
}
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
}
- spin_unlock(&priv->tx_lock);
+ netif_tx_unlock(priv->dev);
}
static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
@@ -1497,7 +1493,7 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
dwmac_mmc_ctrl(priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
- pr_info(" No MAC Management Counters available\n");
+ netdev_info(priv->dev, "No MAC Management Counters available\n");
}
/**
@@ -1510,18 +1506,18 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
if (priv->plat->enh_desc) {
- pr_info(" Enhanced/Alternate descriptors\n");
+ dev_info(priv->device, "Enhanced/Alternate descriptors\n");
/* GMAC older than 3.50 has no extended descriptors */
if (priv->synopsys_id >= DWMAC_CORE_3_50) {
- pr_info("\tEnabled extended descriptors\n");
+ dev_info(priv->device, "Enabled extended descriptors\n");
priv->extend_desc = 1;
} else
- pr_warn("Extended descriptors not supported\n");
+ dev_warn(priv->device, "Extended descriptors not supported\n");
priv->hw->desc = &enh_desc_ops;
} else {
- pr_info(" Normal descriptors\n");
+ dev_info(priv->device, "Normal descriptors\n");
priv->hw->desc = &ndesc_ops;
}
}
@@ -1562,8 +1558,8 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
- pr_info("%s: device MAC address %pM\n", priv->dev->name,
- priv->dev->dev_addr);
+ netdev_info(priv->dev, "device MAC address %pM\n",
+ priv->dev->dev_addr);
}
}
@@ -1577,16 +1573,12 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
- int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
- int mixed_burst = 0;
int atds = 0;
int ret = 0;
- if (priv->plat->dma_cfg) {
- pbl = priv->plat->dma_cfg->pbl;
- fixed_burst = priv->plat->dma_cfg->fixed_burst;
- mixed_burst = priv->plat->dma_cfg->mixed_burst;
- aal = priv->plat->dma_cfg->aal;
+ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
+ dev_err(priv->device, "Invalid DMA configuration\n");
+ return -EINVAL;
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
@@ -1598,8 +1590,8 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
- priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
- aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
+ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+ priv->dma_tx_phy, priv->dma_rx_phy, atds);
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->rx_tail_addr = priv->dma_rx_phy +
@@ -1671,7 +1663,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
- pr_err("%s: DMA engine initialization failed\n", __func__);
+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
+ __func__);
return ret;
}
@@ -1700,7 +1693,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
- pr_warn(" RX IPC Checksum Offload disabled\n");
+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
priv->hw->rx_csum = 0;
}
@@ -1725,10 +1718,11 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
if (ret < 0)
- pr_warn("%s: failed debugFS registration\n", __func__);
+ netdev_warn(priv->dev, "%s: failed debugFS registration\n",
+ __func__);
#endif
/* Start the ball rolling... */
- pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
+ netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
@@ -1783,8 +1777,9 @@ static int stmmac_open(struct net_device *dev)
priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
- pr_err("%s: Cannot attach to PHY (error: %d)\n",
- __func__, ret);
+ netdev_err(priv->dev,
+ "%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
return ret;
}
}
@@ -1798,33 +1793,36 @@ static int stmmac_open(struct net_device *dev)
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
- pr_err("%s: DMA descriptors allocation failed\n", __func__);
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
goto dma_desc_error;
}
ret = init_dma_desc_rings(dev, GFP_KERNEL);
if (ret < 0) {
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
goto init_error;
}
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
- pr_err("%s: Hw setup failed\n", __func__);
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
goto init_error;
}
stmmac_init_tx_coalesce(priv);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
- __func__, dev->irq, ret);
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
goto init_error;
}
@@ -1833,8 +1831,9 @@ static int stmmac_open(struct net_device *dev)
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
- __func__, priv->wol_irq, ret);
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+ __func__, priv->wol_irq, ret);
goto wolirq_error;
}
}
@@ -1844,8 +1843,9 @@ static int stmmac_open(struct net_device *dev)
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
dev->name, dev);
if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
- __func__, priv->lpi_irq, ret);
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
goto lpiirq_error;
}
}
@@ -1864,8 +1864,8 @@ wolirq_error:
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
return ret;
}
@@ -1884,10 +1884,9 @@ static int stmmac_release(struct net_device *dev)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
netif_stop_queue(dev);
@@ -1947,7 +1946,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
desc = priv->dma_tx + priv->cur_tx;
- desc->des0 = des + (total_len - tmp_len);
+ desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -1998,8 +1997,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
u8 proto_hdr_len;
int i;
- spin_lock(&priv->tx_lock);
-
/* Compute header lengths */
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -2009,9 +2006,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
- pr_err("%s: Tx Ring full when queue awake\n", __func__);
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
}
- spin_unlock(&priv->tx_lock);
return NETDEV_TX_BUSY;
}
@@ -2049,11 +2047,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
priv->tx_skbuff[first_entry] = skb;
- first->des0 = des;
+ first->des0 = cpu_to_le32(des);
/* Fill start of payload in buff2 of first descriptor */
if (pay_len)
- first->des1 = des + proto_hdr_len;
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
@@ -2082,8 +2080,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
- if (netif_msg_hw(priv))
- pr_debug("%s: stop transmitted packets\n", __func__);
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
netif_stop_queue(dev);
}
@@ -2127,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- smp_wmb();
+ dma_wmb();
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -2146,11 +2144,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
STMMAC_CHAN0);
- spin_unlock(&priv->tx_lock);
return NETDEV_TX_OK;
dma_map_err:
- spin_unlock(&priv->tx_lock);
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
@@ -2182,14 +2178,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
- spin_lock(&priv->tx_lock);
-
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
- spin_unlock(&priv->tx_lock);
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
- pr_err("%s: Tx Ring full when queue awake\n", __func__);
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
}
return NETDEV_TX_BUSY;
}
@@ -2242,13 +2237,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_skbuff[entry] = NULL;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- desc->des0 = des;
- priv->tx_skbuff_dma[entry].buf = desc->des0;
- } else {
- desc->des2 = des;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
- }
+ priv->tx_skbuff_dma[entry].buf = des;
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ desc->des0 = cpu_to_le32(des);
+ else
+ desc->des2 = cpu_to_le32(des);
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
@@ -2266,9 +2259,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_msg_pktdata(priv)) {
void *tx_head;
- pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
- __func__, priv->cur_tx, priv->dirty_tx, first_entry,
- entry, first, nfrags);
+ netdev_dbg(priv->dev,
+ "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ entry, first, nfrags);
if (priv->extend_desc)
tx_head = (void *)priv->dma_etx;
@@ -2277,13 +2271,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
- pr_debug(">>> frame to be transmitted: ");
+ netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
- if (netif_msg_hw(priv))
- pr_debug("%s: stop transmitted packets\n", __func__);
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
netif_stop_queue(dev);
}
@@ -2319,13 +2313,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- first->des0 = des;
- priv->tx_skbuff_dma[first_entry].buf = first->des0;
- } else {
- first->des2 = des;
- priv->tx_skbuff_dma[first_entry].buf = first->des2;
- }
+ priv->tx_skbuff_dma[first_entry].buf = des;
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ first->des0 = cpu_to_le32(des);
+ else
+ first->des2 = cpu_to_le32(des);
priv->tx_skbuff_dma[first_entry].len = nopaged_len;
priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -2346,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- smp_wmb();
+ dma_wmb();
}
netdev_sent_queue(dev, skb->len);
@@ -2357,12 +2349,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
STMMAC_CHAN0);
- spin_unlock(&priv->tx_lock);
return NETDEV_TX_OK;
dma_map_err:
- spin_unlock(&priv->tx_lock);
- dev_err(priv->device, "Tx dma map failed\n");
+ netdev_err(priv->dev, "Tx DMA map failed\n");
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -2433,16 +2423,16 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device,
priv->rx_skbuff_dma[entry])) {
- dev_err(priv->device, "Rx dma map failed\n");
+ netdev_err(priv->dev, "Rx DMA map failed\n");
dev_kfree_skb(skb);
break;
}
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- p->des0 = priv->rx_skbuff_dma[entry];
+ p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
p->des1 = 0;
} else {
- p->des2 = priv->rx_skbuff_dma[entry];
+ p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
}
if (priv->hw->mode->refill_desc3)
priv->hw->mode->refill_desc3(priv, p);
@@ -2450,17 +2440,17 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (priv->rx_zeroc_thresh > 0)
priv->rx_zeroc_thresh--;
- if (netif_msg_rx_status(priv))
- pr_debug("\trefill entry #%d\n", entry);
+ netif_dbg(priv, rx_status, priv->dev,
+ "refill entry #%d\n", entry);
}
- wmb();
+ dma_wmb();
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
else
priv->hw->desc->set_rx_owner(p);
- wmb();
+ dma_wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
@@ -2484,7 +2474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
void *rx_head;
- pr_info(">>>>>> %s: descriptor ring:\n", __func__);
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
rx_head = (void *)priv->dma_erx;
else
@@ -2546,9 +2536,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
unsigned int des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- des = p->des0;
+ des = le32_to_cpu(p->des0);
else
- des = p->des2;
+ des = le32_to_cpu(p->des2);
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
@@ -2557,9 +2547,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
- pr_err("%s: len %d larger than size (%d)\n",
- priv->dev->name, frame_len,
- priv->dma_buf_sz);
+ netdev_err(priv->dev,
+ "len %d larger than size (%d)\n",
+ frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
}
@@ -2571,11 +2561,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
- pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, des);
+ netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
+ p, entry, des);
if (frame_len > ETH_FRAME_LEN)
- pr_debug("\tframe size %d, COE: %d\n",
- frame_len, status);
+ netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+ frame_len, status);
}
/* The zero-copy is always used for all the sizes
@@ -2612,8 +2602,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
} else {
skb = priv->rx_skbuff[entry];
if (unlikely(!skb)) {
- pr_err("%s: Inconsistent Rx chain\n",
- priv->dev->name);
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx chain\n",
+ priv->dev->name);
priv->dev->stats.rx_dropped++;
break;
}
@@ -2629,7 +2620,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
if (netif_msg_pktdata(priv)) {
- pr_debug("frame received (%dbytes)", frame_len);
+ netdev_dbg(priv->dev, "frame received (%dbytes)",
+ frame_len);
print_pkt(skb->data, frame_len);
}
@@ -2729,26 +2721,12 @@ static void stmmac_set_rx_mode(struct net_device *dev)
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int max_mtu;
if (netif_running(dev)) {
- pr_err("%s: must be stopped to change its MTU\n", dev->name);
+ netdev_err(priv->dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
- if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
- max_mtu = JUMBO_LEN;
- else
- max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
-
- if (priv->plat->maxmtu < max_mtu)
- max_mtu = priv->plat->maxmtu;
-
- if ((new_mtu < 46) || (new_mtu > max_mtu)) {
- pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
- return -EINVAL;
- }
-
dev->mtu = new_mtu;
netdev_update_features(dev);
@@ -2824,7 +2802,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
pm_wakeup_event(priv->device, 0);
if (unlikely(!dev)) {
- pr_err("%s: invalid dev pointer\n", __func__);
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
}
@@ -2882,7 +2860,6 @@ static void stmmac_poll_controller(struct net_device *dev)
*/
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct stmmac_priv *priv = netdev_priv(dev);
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
@@ -2892,9 +2869,9 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!priv->phydev)
+ if (!dev->phydev)
return -EINVAL;
- ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ ret = phy_mii_ioctl(dev->phydev, rq, cmd);
break;
case SIOCSHWTSTAMP:
ret = stmmac_hwtstamp_ioctl(dev, rq);
@@ -2922,14 +2899,17 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- ep->basic.des0, ep->basic.des1,
- ep->basic.des2, ep->basic.des3);
+ le32_to_cpu(ep->basic.des0),
+ le32_to_cpu(ep->basic.des1),
+ le32_to_cpu(ep->basic.des2),
+ le32_to_cpu(ep->basic.des3));
ep++;
} else {
x = *(u64 *) p;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- p->des0, p->des1, p->des2, p->des3);
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
p++;
}
seq_printf(seq, "\n");
@@ -2961,6 +2941,8 @@ static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
}
+/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
+
static const struct file_operations stmmac_rings_status_fops = {
.owner = THIS_MODULE,
.open = stmmac_sysfs_ring_open,
@@ -2983,11 +2965,11 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
seq_printf(seq, "\tDMA HW features\n");
seq_printf(seq, "==============================\n");
- seq_printf(seq, "\t10/100 Mbps %s\n",
+ seq_printf(seq, "\t10/100 Mbps: %s\n",
(priv->dma_cap.mbps_10_100) ? "Y" : "N");
- seq_printf(seq, "\t1000 Mbps %s\n",
+ seq_printf(seq, "\t1000 Mbps: %s\n",
(priv->dma_cap.mbps_1000) ? "Y" : "N");
- seq_printf(seq, "\tHalf duple %s\n",
+ seq_printf(seq, "\tHalf duplex: %s\n",
(priv->dma_cap.half_duplex) ? "Y" : "N");
seq_printf(seq, "\tHash Filter: %s\n",
(priv->dma_cap.hash_filter) ? "Y" : "N");
@@ -3005,9 +2987,9 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
(priv->dma_cap.rmon) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
(priv->dma_cap.time_stamp) ? "Y" : "N");
- seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
+ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
- seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
+ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
(priv->dma_cap.eee) ? "Y" : "N");
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
@@ -3054,8 +3036,7 @@ static int stmmac_init_fs(struct net_device *dev)
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
- pr_err("ERROR %s/%s, debugfs create directory failed\n",
- STMMAC_RESOURCE_NAME, dev->name);
+ netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
return -ENOMEM;
}
@@ -3067,7 +3048,7 @@ static int stmmac_init_fs(struct net_device *dev)
&stmmac_rings_status_fops);
if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
- pr_info("ERROR creating stmmac ring debugfs file\n");
+ netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
@@ -3079,7 +3060,7 @@ static int stmmac_init_fs(struct net_device *dev)
dev, &stmmac_dma_cap_fops);
if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
- pr_info("ERROR creating stmmac MMC debugfs file\n");
+ netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
@@ -3151,11 +3132,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
} else {
if (chain_mode) {
priv->hw->mode = &chain_mode_ops;
- pr_info(" Chain mode enabled\n");
+ dev_info(priv->device, "Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE;
} else {
priv->hw->mode = &ring_mode_ops;
- pr_info(" Ring mode enabled\n");
+ dev_info(priv->device, "Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE;
}
}
@@ -3163,7 +3144,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv);
if (priv->hw_cap_support) {
- pr_info(" DMA HW capability register supported");
+ dev_info(priv->device, "DMA HW capability register supported\n");
/* We can override some gmac/dma configuration fields: e.g.
* enh_desc, tx_coe (e.g. that are passed through the
@@ -3188,8 +3169,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
else if (priv->dma_cap.rx_coe_type1)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
- } else
- pr_info(" No HW DMA feature register supported");
+ } else {
+ dev_info(priv->device, "No HW DMA feature register supported\n");
+ }
/* To use alternate (extended), normal or GMAC4 descriptor structures */
if (priv->synopsys_id >= DWMAC_CORE_4_00)
@@ -3199,20 +3181,20 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
- pr_info(" RX Checksum Offload Engine supported\n");
+ dev_info(priv->device, "RX Checksum Offload Engine supported\n");
if (priv->synopsys_id < DWMAC_CORE_4_00)
- pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
+ dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
}
if (priv->plat->tx_coe)
- pr_info(" TX Checksum insertion supported\n");
+ dev_info(priv->device, "TX Checksum insertion supported\n");
if (priv->plat->pmt) {
- pr_info(" Wake-Up On Lan supported\n");
+ dev_info(priv->device, "Wake-Up On Lan supported\n");
device_set_wakeup_capable(priv->device, 1);
}
if (priv->dma_cap.tsoen)
- pr_info(" TSO supported\n");
+ dev_info(priv->device, "TSO supported\n");
return 0;
}
@@ -3271,8 +3253,8 @@ int stmmac_dvr_probe(struct device *device,
priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
if (IS_ERR(priv->stmmac_clk)) {
- dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
- __func__);
+ netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
+ __func__);
/* If failed to obtain stmmac_clk and specific clk_csr value
* is NOT passed from the platform, probe fail.
*/
@@ -3321,7 +3303,7 @@ int stmmac_dvr_probe(struct device *device,
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO;
priv->tso = true;
- pr_info(" TSO feature enabled\n");
+ dev_info(priv->device, "TSO feature enabled\n");
}
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
@@ -3331,6 +3313,15 @@ int stmmac_dvr_probe(struct device *device,
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* MTU range: 46 - hw-specific max */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
+ ndev->max_mtu = JUMBO_LEN;
+ else
+ ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+ if (priv->plat->maxmtu < ndev->max_mtu)
+ ndev->max_mtu = priv->plat->maxmtu;
+
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -3341,17 +3332,17 @@ int stmmac_dvr_probe(struct device *device,
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
- pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
+ netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
}
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
spin_lock_init(&priv->lock);
- spin_lock_init(&priv->tx_lock);
ret = register_netdev(ndev);
if (ret) {
- pr_err("%s: ERROR %i registering the device\n", __func__, ret);
+ netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
+ __func__, ret);
goto error_netdev_register;
}
@@ -3374,8 +3365,9 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- pr_debug("%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ netdev_err(priv->dev,
+ "%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
@@ -3408,7 +3400,7 @@ int stmmac_dvr_remove(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- pr_info("%s:\n\tremoving driver", __func__);
+ netdev_info(priv->dev, "%s: removing driver", __func__);
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -3446,8 +3438,8 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
- if (priv->phydev)
- phy_stop(priv->phydev);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&priv->lock, flags);
@@ -3541,8 +3533,8 @@ int stmmac_resume(struct device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ec295851812b..fda01f770eff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -42,13 +42,6 @@
#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
-#define MII_PHY_ADDR_GMAC4_SHIFT 21
-#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21)
-#define MII_PHY_REG_GMAC4_SHIFT 16
-#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16)
-#define MII_CSR_CLK_GMAC4_SHIFT 8
-#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8)
-
static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
{
unsigned long curr;
@@ -68,8 +61,8 @@ static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
/**
* stmmac_mdio_read
* @bus: points to the mii_bus structure
- * @phyaddr: MII addr reg bits 15-11
- * @phyreg: MII addr reg bits 10-6
+ * @phyaddr: MII addr
+ * @phyreg: MII reg
* Description: it reads data from the MII register from within the phy device.
* For the 7111 GMAC, we must set the bit 0 in the MII address register while
* accessing the PHY registers.
@@ -83,14 +76,20 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
unsigned int mii_data = priv->hw->mii.data;
int data;
- u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
- ((phyreg << 6) & (0x000007C0)));
- regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
+ u32 value = MII_BUSY;
+
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ if (priv->plat->has_gmac4)
+ value |= MII_GMAC4_READ;
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
return -EBUSY;
- writel(regValue, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_address);
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
return -EBUSY;
@@ -104,8 +103,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
/**
* stmmac_mdio_write
* @bus: points to the mii_bus structure
- * @phyaddr: MII addr reg bits 15-11
- * @phyreg: MII addr reg bits 10-6
+ * @phyaddr: MII addr
+ * @phyreg: MII reg
* @phydata: phy data
* Description: it writes the data into the MII register from within the device.
*/
@@ -117,85 +116,16 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u16 value =
- (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
- | MII_WRITE;
-
- value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
-
- /* Wait until any existing MII operation is complete */
- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
- return -EBUSY;
-
- /* Set the MII address register to write */
- writel(phydata, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
-
- /* Wait until any existing MII operation is complete */
- return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
-}
-
-/**
- * stmmac_mdio_read_gmac4
- * @bus: points to the mii_bus structure
- * @phyaddr: MII addr reg bits 25-21
- * @phyreg: MII addr reg bits 20-16
- * Description: it reads data from the MII register of GMAC4 from within
- * the phy device.
- */
-static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
-{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- int data;
- u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
- (MII_PHY_ADDR_GMAC4_MASK)) |
- ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
- (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
-
- value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
- << MII_CSR_CLK_GMAC4_SHIFT);
-
- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
- return -EBUSY;
-
- writel(value, priv->ioaddr + mii_address);
-
- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
- return -EBUSY;
-
- /* Read the data from the MII data register */
- data = (int)readl(priv->ioaddr + mii_data);
-
- return data;
-}
+ u32 value = MII_WRITE | MII_BUSY;
-/**
- * stmmac_mdio_write_gmac4
- * @bus: points to the mii_bus structure
- * @phyaddr: MII addr reg bits 25-21
- * @phyreg: MII addr reg bits 20-16
- * @phydata: phy data
- * Description: it writes the data into the MII register of GMAC4 from within
- * the device.
- */
-static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
- u16 phydata)
-{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
-
- u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
- (MII_PHY_ADDR_GMAC4_MASK)) |
- ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
- (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
- << MII_CSR_CLK_GMAC4_SHIFT);
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ if (priv->plat->has_gmac4)
+ value |= MII_GMAC4_WRITE;
/* Wait until any existing MII operation is complete */
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
@@ -260,7 +190,7 @@ int stmmac_mdio_reset(struct mii_bus *bus)
#endif
if (data->phy_reset) {
- pr_debug("stmmac_mdio_reset: calling phy_reset\n");
+ netdev_dbg(ndev, "stmmac_mdio_reset: calling phy_reset\n");
data->phy_reset(priv->plat->bsp_priv);
}
@@ -305,13 +235,8 @@ int stmmac_mdio_register(struct net_device *ndev)
#endif
new_bus->name = "stmmac";
- if (priv->plat->has_gmac4) {
- new_bus->read = &stmmac_mdio_read_gmac4;
- new_bus->write = &stmmac_mdio_write_gmac4;
- } else {
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
- }
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -325,7 +250,7 @@ int stmmac_mdio_register(struct net_device *ndev)
else
err = mdiobus_register(new_bus);
if (err != 0) {
- pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
+ netdev_err(ndev, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
@@ -372,16 +297,16 @@ int stmmac_mdio_register(struct net_device *ndev)
irq_str = irq_num;
break;
}
- pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n",
- ndev->name, phydev->phy_id, addr,
- irq_str, phydev_name(phydev),
- act ? " active" : "");
+ netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
+ phydev->phy_id, addr,
+ irq_str, phydev_name(phydev),
+ act ? " active" : "");
found = 1;
}
}
if (!found && !mdio_node) {
- pr_warn("%s: No PHY found\n", ndev->name);
+ netdev_warn(ndev, "No PHY found\n");
mdiobus_unregister(new_bus);
mdiobus_free(new_bus);
return -ENODEV;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 56c8a2342c14..a2831773431a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -81,6 +81,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->mdio_bus_data->phy_mask = 0;
plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
/* TODO: AXI */
/* Set default value for multicast hash bins */
@@ -115,6 +116,7 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
plat->mdio_bus_data->phy_mask = 0;
plat->dma_cfg->pbl = 16;
+ plat->dma_cfg->pblx8 = true;
plat->dma_cfg->fixed_burst = 1;
/* AXI (TODO) */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ac3d39c69509..082cd48db6a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -292,6 +292,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a")) {
plat->has_gmac4 = 1;
+ plat->has_gmac = 0;
plat->pmt = 1;
plat->tso_en = of_property_read_bool(np, "snps,tso");
}
@@ -303,21 +304,25 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->force_sf_dma_mode = 1;
}
- if (of_find_property(np, "snps,pbl", NULL)) {
- dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
- GFP_KERNEL);
- if (!dma_cfg) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(-ENOMEM);
- }
- plat->dma_cfg = dma_cfg;
- of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
- dma_cfg->aal = of_property_read_bool(np, "snps,aal");
- dma_cfg->fixed_burst =
- of_property_read_bool(np, "snps,fixed-burst");
- dma_cfg->mixed_burst =
- of_property_read_bool(np, "snps,mixed-burst");
+ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+ GFP_KERNEL);
+ if (!dma_cfg) {
+ stmmac_remove_config_dt(pdev, plat);
+ return ERR_PTR(-ENOMEM);
}
+ plat->dma_cfg = dma_cfg;
+
+ of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
+ if (!dma_cfg->pbl)
+ dma_cfg->pbl = DEFAULT_DMA_PBL;
+ of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
+ of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
+ dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
+
+ dma_cfg->aal = of_property_read_bool(np, "snps,aal");
+ dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
+ dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
+
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
if (plat->force_thresh_dma_mode) {
plat->force_sf_dma_mode = 0;
@@ -444,9 +449,7 @@ static int stmmac_pltfr_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
ret = stmmac_suspend(dev);
- if (priv->plat->suspend)
- priv->plat->suspend(pdev, priv->plat->bsp_priv);
- else if (priv->plat->exit)
+ if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret;
@@ -465,9 +468,7 @@ static int stmmac_pltfr_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- if (priv->plat->resume)
- priv->plat->resume(pdev, priv->plat->bsp_priv);
- else if (priv->plat->init)
+ if (priv->plat->init)
priv->plat->init(pdev, priv->plat->bsp_priv);
return stmmac_resume(dev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 062bce9acde6..e9e5ef241c6f 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3863,9 +3863,6 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
{
struct cas *cp = netdev_priv(dev);
- if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
- return -EINVAL;
-
dev->mtu = new_mtu;
if (!netif_running(dev) || !netif_device_present(dev))
return 0;
@@ -5115,6 +5112,10 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
+ /* MTU range: 60 - varies or 9000 */
+ dev->min_mtu = CAS_MIN_MTU;
+ dev->max_mtu = CAS_MAX_MTU;
+
if (register_netdev(dev)) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_free_consistent;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 0ac449acaf5b..335b87660638 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -139,7 +139,6 @@ static const struct net_device_ops vsw_ops = {
.ndo_set_mac_address = sunvnet_set_mac_addr_common,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = sunvnet_tx_timeout_common,
- .ndo_change_mtu = sunvnet_change_mtu_common,
.ndo_start_xmit = vsw_start_xmit,
.ndo_select_queue = vsw_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -239,6 +238,10 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
+ /* MTU range: 68 - 65535 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = VNET_MAX_MTU;
+
SET_NETDEV_DEV(dev, &vdev->dev);
return dev;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a2371aa14a49..f90d1af6d390 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6754,9 +6754,6 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
struct niu *np = netdev_priv(dev);
int err, orig_jumbo, new_jumbo;
- if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
- return -EINVAL;
-
orig_jumbo = (dev->mtu > ETH_DATA_LEN);
new_jumbo = (new_mtu > ETH_DATA_LEN);
@@ -9823,6 +9820,10 @@ static int niu_pci_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
+ /* MTU range: 68 - 9216 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = NIU_MAX_MTU;
+
niu_assign_netdev_ops(dev);
err = niu_get_invariants(np);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 02f452730d52..c4caf486cbef 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1065,7 +1065,6 @@ static const struct net_device_ops bigmac_ops = {
.ndo_get_stats = bigmac_get_stats,
.ndo_set_rx_mode = bigmac_set_multicast,
.ndo_tx_timeout = bigmac_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index d6ad0fbd054e..66ecf0fcc330 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2476,9 +2476,9 @@ static void gem_set_multicast(struct net_device *dev)
}
/* Jumbo-grams don't seem to work :-( */
-#define GEM_MIN_MTU 68
+#define GEM_MIN_MTU ETH_MIN_MTU
#if 1
-#define GEM_MAX_MTU 1500
+#define GEM_MAX_MTU ETH_DATA_LEN
#else
#define GEM_MAX_MTU 9000
#endif
@@ -2487,9 +2487,6 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
{
struct gem *gp = netdev_priv(dev);
- if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
- return -EINVAL;
-
dev->mtu = new_mtu;
/* We'll just catch it later when the device is up'd or resumed */
@@ -2977,6 +2974,10 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
+ /* MTU range: 68 - 1500 (Jumbo mode is broken) */
+ dev->min_mtu = GEM_MIN_MTU;
+ dev->max_mtu = GEM_MAX_MTU;
+
/* Register with kernel */
if (register_netdev(dev)) {
pr_err("Cannot register net device, aborting\n");
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index cf4dcff051d5..ca96408058b0 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2669,7 +2669,6 @@ static const struct net_device_ops hme_netdev_ops = {
.ndo_tx_timeout = happy_meal_tx_timeout,
.ndo_get_stats = happy_meal_get_stats,
.ndo_set_rx_mode = happy_meal_set_multicast,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index f4307654e4ae..4a8d5b18dfd5 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -302,7 +302,7 @@
* Always write the address first before setting the ownership
* bits to avoid races with the hardware scanning the ring.
*/
-typedef u32 __bitwise__ hme32;
+typedef u32 __bitwise hme32;
struct happy_meal_rxd {
hme32 rx_flags;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 9582948145c1..a6bcdcdd947e 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -824,7 +824,6 @@ static const struct net_device_ops qec_ops = {
.ndo_start_xmit = qe_start_xmit,
.ndo_set_rx_mode = qe_set_multicast,
.ndo_tx_timeout = qe_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a2f9b47de187..5356a7074796 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -159,7 +159,6 @@ static const struct net_device_ops vnet_ops = {
.ndo_set_mac_address = sunvnet_set_mac_addr_common,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = sunvnet_tx_timeout_common,
- .ndo_change_mtu = sunvnet_change_mtu_common,
.ndo_start_xmit = vnet_start_xmit,
.ndo_select_queue = vnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -202,6 +201,10 @@ static struct vnet *vnet_new(const u64 *local_mac,
NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
+ /* MTU range: 68 - 65535 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = VNET_MAX_MTU;
+
SET_NETDEV_DEV(dev, &vdev->dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 904a5a12a85d..8878b75d68b4 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -704,9 +704,8 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
return 0;
}
-/* Got back a STOPPED LDC message on port. If the queue is stopped,
- * wake it up so that we'll send out another START message at the
- * next TX.
+/* If the queue is stopped, wake it up so that we'll
+ * send out another START message at the next TX.
*/
static void maybe_tx_wakeup(struct vnet_port *port)
{
@@ -734,6 +733,7 @@ EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
static int vnet_event_napi(struct vnet_port *port, int budget)
{
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_driver_state *vio = &port->vio;
int tx_wakeup, err;
int npkts = 0;
@@ -747,6 +747,16 @@ ldc_ctrl:
if (event == LDC_EVENT_RESET) {
vnet_port_reset(port);
vio_port_up(vio);
+
+ /* If the device is running but its tx queue was
+ * stopped (due to flow control), restart it.
+ * This is necessary since vnet_port_reset()
+ * clears the tx drings and thus we may never get
+ * back a VIO_TYPE_DATA ACK packet - which is
+ * the normal mechanism to restart the tx queue.
+ */
+ if (netif_running(dev))
+ maybe_tx_wakeup(port);
}
port->rx_event = 0;
return 0;
@@ -1583,16 +1593,6 @@ void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
}
EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
-int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68 || new_mtu > 65535)
- return -EINVAL;
-
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common);
-
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
{
return -EINVAL;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index bd36528af972..ce5c824128a3 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -15,6 +15,8 @@
#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
+#define VNET_MAX_MTU 65535
+
/* VNET packets are sent in buffers with the first 6 bytes skipped
* so that after the ethernet header the IPv4/IPv6 headers are aligned
* properly.
@@ -125,7 +127,6 @@ int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
void sunvnet_tx_timeout_common(struct net_device *dev);
-int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu);
int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
struct vnet_port *(*vnet_tx_port)
(struct sk_buff *, struct net_device *));
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 97d64bfed465..09f5a67da35e 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -2211,7 +2211,7 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_error:
dwceqos_tx_rollback(lp, &trans);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 0;
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 7108c68f16d3..baa3e4a5731c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -761,16 +761,6 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
{
ENTER;
- if (new_mtu == ndev->mtu)
- RET(0);
-
- /* enforce minimum frame size */
- if (new_mtu < ETH_ZLEN) {
- netdev_err(ndev, "mtu %d is less then minimal %d\n",
- new_mtu, ETH_ZLEN);
- RET(-EINVAL);
- }
-
ndev->mtu = new_mtu;
if (netif_running(ndev)) {
bdx_close(ndev);
@@ -2057,6 +2047,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef BDX_LLTX
ndev->features |= NETIF_F_LLTX;
#endif
+ /* MTU range: 60 - 16384 */
+ ndev->min_mtu = ETH_ZLEN;
+ ndev->max_mtu = BDX_MAX_MTU;
+
spin_lock_init(&priv->tx_lock);
/*bdx_hw_reset(priv); */
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 709ebd6e28b4..8e7b4c9abf21 100644
--- a/drivers/net/ethernet/tehuti/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -74,6 +74,9 @@
* ifcontig eth1 txqueuelen 3000 - to change it at runtime */
#define BDX_NDEV_TXQ_LEN 3000
+/* Max MTU for Jumbo Frame mode, per tehutinetworks.net Features FAQ is 16k */
+#define BDX_MAX_MTU (16 * 1024)
+
#define FIFO_SIZE 4096
#define FIFO_EXTRA_SPACE 1024
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 9904d740d528..296c8efd0038 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -74,13 +74,14 @@ config TI_CPSW
will be called cpsw.
config TI_CPTS
- bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW
- select PTP_1588_CLOCK
+ tristate "TI Common Platform Time Sync (CPTS) Support"
+ depends on TI_CPSW || TI_KEYSTONE_NETCP
+ imply PTP_1588_CLOCK
---help---
This driver supports the Common Platform Time Sync unit of
- the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
- and Layer 2 packets, and the driver offers a PTP Hardware Clock.
+ the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
+ The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
+ driver offers a PTP Hardware Clock.
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index d420d9413e4a..1e7c10bf8713 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -12,8 +12,9 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
+obj-$(CONFIG_TI_CPTS) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw.o cpts.o
+ti_cpsw-y := cpsw.o
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 28097be2ff28..77c88fcf2b86 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1068,7 +1068,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
.ndo_do_ioctl = cpmac_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b9087b828eff..b203143647e6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -365,6 +365,11 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
__raw_writel(val, slave->regs + offset);
}
+struct cpsw_vector {
+ struct cpdma_chan *ch;
+ int budget;
+};
+
struct cpsw_common {
struct device *dev;
struct cpsw_platform_data data;
@@ -380,8 +385,8 @@ struct cpsw_common {
int rx_packet_max;
struct cpsw_slave *slaves;
struct cpdma_ctlr *dma;
- struct cpdma_chan *txch[CPSW_MAX_QUEUES];
- struct cpdma_chan *rxch[CPSW_MAX_QUEUES];
+ struct cpsw_vector txv[CPSW_MAX_QUEUES];
+ struct cpsw_vector rxv[CPSW_MAX_QUEUES];
struct cpsw_ale *ale;
bool quirk_irq;
bool rx_irq_disabled;
@@ -389,6 +394,7 @@ struct cpsw_common {
u32 irqs_table[IRQ_NUM];
struct cpts *cpts;
int rx_ch_num, tx_ch_num;
+ int speed;
};
struct cpsw_priv {
@@ -741,13 +747,100 @@ requeue:
return;
}
- ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
+ ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
skb_tailroom(new_skb), 0);
if (WARN_ON(ret < 0))
dev_kfree_skb_any(new_skb);
}
+static void cpsw_split_res(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ u32 consumed_rate = 0, bigest_rate = 0;
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_vector *txv = cpsw->txv;
+ int i, ch_weight, rlim_ch_num = 0;
+ int budget, bigest_rate_ch = 0;
+ u32 ch_rate, max_rate;
+ int ch_budget = 0;
+
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (!ch_rate)
+ continue;
+
+ rlim_ch_num++;
+ consumed_rate += ch_rate;
+ }
+
+ if (cpsw->tx_ch_num == rlim_ch_num) {
+ max_rate = consumed_rate;
+ } else if (!rlim_ch_num) {
+ ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
+ bigest_rate = 0;
+ max_rate = consumed_rate;
+ } else {
+ max_rate = cpsw->speed * 1000;
+
+ /* if max_rate is less then expected due to reduced link speed,
+ * split proportionally according next potential max speed
+ */
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
+ ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ bigest_rate = (max_rate - consumed_rate) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ }
+
+ /* split tx weight/budget */
+ budget = CPSW_POLL_WEIGHT;
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (ch_rate) {
+ txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+ if (!txv[i].budget)
+ txv[i].budget++;
+ if (ch_rate > bigest_rate) {
+ bigest_rate_ch = i;
+ bigest_rate = ch_rate;
+ }
+
+ ch_weight = (ch_rate * 100) / max_rate;
+ if (!ch_weight)
+ ch_weight++;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
+ } else {
+ txv[i].budget = ch_budget;
+ if (!bigest_rate_ch)
+ bigest_rate_ch = i;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
+ }
+
+ budget -= txv[i].budget;
+ }
+
+ if (budget)
+ txv[bigest_rate_ch].budget += budget;
+
+ /* split rx budget */
+ budget = CPSW_POLL_WEIGHT;
+ ch_budget = budget / cpsw->rx_ch_num;
+ for (i = 0; i < cpsw->rx_ch_num; i++) {
+ cpsw->rxv[i].budget = ch_budget;
+ budget -= ch_budget;
+ }
+
+ if (budget)
+ cpsw->rxv[0].budget += budget;
+}
+
static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
struct cpsw_common *cpsw = dev_id;
@@ -783,24 +876,25 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
{
u32 ch_map;
- int num_tx, ch;
+ int num_tx, cur_budget, ch;
struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ struct cpsw_vector *txv;
/* process every unprocessed channel */
ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- for (ch = 0, num_tx = 0; num_tx < budget; ch_map >>= 1, ch++) {
- if (!ch_map) {
- ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- if (!ch_map)
- break;
-
- ch = 0;
- }
-
+ for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) {
if (!(ch_map & 0x01))
continue;
- num_tx += cpdma_chan_process(cpsw->txch[ch], budget - num_tx);
+ txv = &cpsw->txv[ch];
+ if (unlikely(txv->budget > budget - num_tx))
+ cur_budget = budget - num_tx;
+ else
+ cur_budget = txv->budget;
+
+ num_tx += cpdma_chan_process(txv->ch, cur_budget);
+ if (num_tx >= budget)
+ break;
}
if (num_tx < budget) {
@@ -818,24 +912,25 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
{
u32 ch_map;
- int num_rx, ch;
+ int num_rx, cur_budget, ch;
struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ struct cpsw_vector *rxv;
/* process every unprocessed channel */
ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
- for (ch = 0, num_rx = 0; num_rx < budget; ch_map >>= 1, ch++) {
- if (!ch_map) {
- ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
- if (!ch_map)
- break;
-
- ch = 0;
- }
-
+ for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
if (!(ch_map & 0x01))
continue;
- num_rx += cpdma_chan_process(cpsw->rxch[ch], budget - num_rx);
+ rxv = &cpsw->rxv[ch];
+ if (unlikely(rxv->budget > budget - num_rx))
+ cur_budget = budget - num_rx;
+ else
+ cur_budget = rxv->budget;
+
+ num_rx += cpdma_chan_process(rxv->ch, cur_budget);
+ if (num_rx >= budget)
+ break;
}
if (num_rx < budget) {
@@ -926,14 +1021,56 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
slave->mac_control = mac_control;
}
+static int cpsw_get_common_speed(struct cpsw_common *cpsw)
+{
+ int i, speed;
+
+ for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
+ speed += cpsw->slaves[i].phy->speed;
+
+ return speed;
+}
+
+static int cpsw_need_resplit(struct cpsw_common *cpsw)
+{
+ int i, rlim_ch_num;
+ int speed, ch_rate;
+
+ /* re-split resources only in case speed was changed */
+ speed = cpsw_get_common_speed(cpsw);
+ if (speed == cpsw->speed || !speed)
+ return 0;
+
+ cpsw->speed = speed;
+
+ for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
+ if (!ch_rate)
+ break;
+
+ rlim_ch_num++;
+ }
+
+ /* cases not dependent on speed */
+ if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
+ return 0;
+
+ return 1;
+}
+
static void cpsw_adjust_link(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
bool link = false;
for_each_slave(priv, _cpsw_adjust_link, priv, &link);
if (link) {
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(ndev);
+
netif_carrier_on(ndev);
if (netif_running(ndev))
netif_tx_wake_all_queues(ndev);
@@ -1075,7 +1212,7 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev,
cpsw_gstrings_stats[l].stat_offset);
for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- cpdma_chan_get_stats(cpsw->rxch[ch], &ch_stats);
+ cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
p = (u8 *)&ch_stats +
cpsw_gstrings_ch_stats[i].stat_offset;
@@ -1084,7 +1221,7 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev,
}
for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
- cpdma_chan_get_stats(cpsw->txch[ch], &ch_stats);
+ cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
p = (u8 *)&ch_stats +
cpsw_gstrings_ch_stats[i].stat_offset;
@@ -1281,7 +1418,7 @@ static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
int ch, i, ret;
for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
+ ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
for (i = 0; i < ch_buf_num; i++) {
skb = __netdev_alloc_skb_ip_align(priv->ndev,
cpsw->rx_packet_max,
@@ -1292,8 +1429,9 @@ static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
}
skb_set_queue_mapping(skb, ch);
- ret = cpdma_chan_submit(cpsw->rxch[ch], skb, skb->data,
- skb_tailroom(skb), 0);
+ ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
+ skb->data, skb_tailroom(skb),
+ 0);
if (ret < 0) {
cpsw_err(priv, ifup,
"cannot submit skb to channel %d rx, error %d\n",
@@ -1376,10 +1514,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
if (!cpsw_common_res_usage_state(cpsw)) {
- /* setup tx dma to fixed prio and zero offset */
- cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1);
- cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0);
-
/* disable priority elevation */
__raw_writel(0, &cpsw->regs->ptype);
@@ -1406,9 +1540,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (ret < 0)
goto err_cleanup;
- if (cpts_register(cpsw->dev, cpsw->cpts,
- cpsw->data.cpts_clock_mult,
- cpsw->data.cpts_clock_shift))
+ if (cpts_register(cpsw->cpts))
dev_err(priv->dev, "error registering cpts device\n");
}
@@ -1427,8 +1559,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (cpsw->data.dual_emac)
cpsw->slaves[priv->emac_port].open_stat = true;
- netif_tx_start_all_queues(ndev);
-
return 0;
err_cleanup:
@@ -1457,6 +1587,10 @@ static int cpsw_ndo_stop(struct net_device *ndev)
cpsw_ale_stop(cpsw->ale);
}
for_each_slave(priv, cpsw_slave_stop, cpsw);
+
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(ndev);
+
pm_runtime_put_sync(cpsw->dev);
if (cpsw->data.dual_emac)
cpsw->slaves[priv->emac_port].open_stat = false;
@@ -1481,7 +1615,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpsw->cpts->tx_enable)
+ cpts_is_tx_enabled(cpsw->cpts))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
@@ -1490,7 +1624,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
if (q_idx >= cpsw->tx_ch_num)
q_idx = q_idx % cpsw->tx_ch_num;
- txch = cpsw->txch[q_idx];
+ txch = cpsw->txv[q_idx].ch;
ret = cpsw_tx_packet_submit(priv, skb, txch);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
@@ -1513,14 +1647,15 @@ fail:
return NETDEV_TX_BUSY;
}
-#ifdef CONFIG_TI_CPTS
+#if IS_ENABLED(CONFIG_TI_CPTS)
static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
{
struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!cpsw->cpts->tx_enable && !cpsw->cpts->rx_enable) {
+ if (!cpts_is_tx_enabled(cpsw->cpts) &&
+ !cpts_is_rx_enabled(cpsw->cpts)) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -1528,10 +1663,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (cpsw->cpts->tx_enable)
+ if (cpts_is_tx_enabled(cpsw->cpts))
ts_en |= CPSW_V1_TS_TX_EN;
- if (cpsw->cpts->rx_enable)
+ if (cpts_is_rx_enabled(cpsw->cpts))
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -1544,30 +1679,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
struct cpsw_common *cpsw = priv->cpsw;
u32 ctrl, mtype;
- if (cpsw->data.dual_emac)
- slave = &cpsw->slaves[priv->emac_port];
- else
- slave = &cpsw->slaves[cpsw->data.active_slave];
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
ctrl = slave_read(slave, CPSW2_CONTROL);
switch (cpsw->version) {
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (cpsw->cpts->tx_enable)
+ if (cpts_is_tx_enabled(cpsw->cpts))
ctrl |= CTRL_V2_TX_TS_BITS;
- if (cpsw->cpts->rx_enable)
+ if (cpts_is_rx_enabled(cpsw->cpts))
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (cpsw->cpts->tx_enable)
+ if (cpts_is_tx_enabled(cpsw->cpts))
ctrl |= CTRL_V3_TX_TS_BITS;
- if (cpsw->cpts->rx_enable)
+ if (cpts_is_rx_enabled(cpsw->cpts))
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -1603,7 +1735,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts->rx_enable = 0;
+ cpts_rx_enable(cpts, 0);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -1619,14 +1751,14 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts->rx_enable = 1;
+ cpts_rx_enable(cpts, 1);
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;
+ cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -1655,13 +1787,23 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts->rx_enable ?
+ cfg.tx_type = cpts_is_tx_enabled(cpts) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
#endif /*CONFIG_TI_CPTS*/
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1674,12 +1816,10 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
return -EINVAL;
switch (cmd) {
-#ifdef CONFIG_TI_CPTS
case SIOCSHWTSTAMP:
return cpsw_hwtstamp_set(dev, req);
case SIOCGHWTSTAMP:
return cpsw_hwtstamp_get(dev, req);
-#endif
}
if (!cpsw->slaves[slave_no].phy)
@@ -1697,8 +1837,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
ndev->stats.tx_errors++;
cpsw_intr_disable(cpsw);
for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
- cpdma_chan_stop(cpsw->txch[ch]);
- cpdma_chan_start(cpsw->txch[ch]);
+ cpdma_chan_stop(cpsw->txv[ch].ch);
+ cpdma_chan_start(cpsw->txv[ch].ch);
}
cpsw_intr_enable(cpsw);
@@ -1876,6 +2016,57 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
return ret;
}
+static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 min_rate;
+ u32 ch_rate;
+ int i, ret;
+
+ ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
+ if (ch_rate == rate)
+ return 0;
+
+ ch_rate = rate * 1000;
+ min_rate = cpdma_chan_get_min_rate(cpsw->dma);
+ if ((ch_rate < min_rate && ch_rate)) {
+ dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
+ min_rate);
+ return -EINVAL;
+ }
+
+ if (rate > cpsw->speed) {
+ dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
+ pm_runtime_put(cpsw->dev);
+
+ if (ret)
+ return ret;
+
+ /* update rates for slaves tx queues */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ slave = &cpsw->slaves[i];
+ if (!slave->ndev)
+ continue;
+
+ netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
+ }
+
+ cpsw_split_res(ndev);
+ return ret;
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -1883,9 +2074,9 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
.ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
+ .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
#endif
@@ -1935,10 +2126,10 @@ static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+#if IS_ENABLED(CONFIG_TI_CPTS)
static int cpsw_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
-#ifdef CONFIG_TI_CPTS
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
info->so_timestamping =
@@ -1955,7 +2146,12 @@ static int cpsw_get_ts_info(struct net_device *ndev,
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ return 0;
+}
#else
+static int cpsw_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
@@ -1963,31 +2159,34 @@ static int cpsw_get_ts_info(struct net_device *ndev,
info->phc_index = -1;
info->tx_types = 0;
info->rx_filters = 0;
-#endif
return 0;
}
+#endif
-static int cpsw_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+static int cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
int slave_no = cpsw_slave_index(cpsw, priv);
if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_gset(cpsw->slaves[slave_no].phy, ecmd);
+ return phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy,
+ ecmd);
else
return -EOPNOTSUPP;
}
-static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
int slave_no = cpsw_slave_index(cpsw, priv);
if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_sset(cpsw->slaves[slave_no].phy, ecmd);
+ return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
+ ecmd);
else
return -EOPNOTSUPP;
}
@@ -2102,28 +2301,31 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
int (*poll)(struct napi_struct *, int);
struct cpsw_common *cpsw = priv->cpsw;
void (*handler)(void *, int, int);
- struct cpdma_chan **chan;
+ struct netdev_queue *queue;
+ struct cpsw_vector *vec;
int ret, *ch;
if (rx) {
ch = &cpsw->rx_ch_num;
- chan = cpsw->rxch;
+ vec = cpsw->rxv;
handler = cpsw_rx_handler;
poll = cpsw_rx_poll;
} else {
ch = &cpsw->tx_ch_num;
- chan = cpsw->txch;
+ vec = cpsw->txv;
handler = cpsw_tx_handler;
poll = cpsw_tx_poll;
}
while (*ch < ch_num) {
- chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+ vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+ queue = netdev_get_tx_queue(priv->ndev, *ch);
+ queue->tx_maxrate = 0;
- if (IS_ERR(chan[*ch]))
- return PTR_ERR(chan[*ch]);
+ if (IS_ERR(vec[*ch].ch))
+ return PTR_ERR(vec[*ch].ch);
- if (!chan[*ch])
+ if (!vec[*ch].ch)
return -EINVAL;
cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
@@ -2134,7 +2336,7 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
while (*ch > ch_num) {
(*ch)--;
- ret = cpdma_chan_destroy(chan[*ch]);
+ ret = cpdma_chan_destroy(vec[*ch].ch);
if (ret)
return ret;
@@ -2221,6 +2423,8 @@ static int cpsw_set_channels(struct net_device *ndev,
if (ret)
goto err;
+ cpsw_split_res(ndev);
+
/* After this receive is started */
cpdma_ctlr_start(cpsw->dma);
cpsw_intr_enable(cpsw);
@@ -2239,14 +2443,48 @@ err:
return ret;
}
+static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_nway_reset(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = cpsw_get_ts_info,
- .get_settings = cpsw_get_settings,
- .set_settings = cpsw_set_settings,
.get_coalesce = cpsw_get_coalesce,
.set_coalesce = cpsw_set_coalesce,
.get_sset_count = cpsw_get_sset_count,
@@ -2262,6 +2500,11 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.complete = cpsw_ethtool_op_complete,
.get_channels = cpsw_get_channels,
.set_channels = cpsw_set_channels,
+ .get_link_ksettings = cpsw_get_link_ksettings,
+ .set_link_ksettings = cpsw_set_link_ksettings,
+ .get_eee = cpsw_get_eee,
+ .set_eee = cpsw_set_eee,
+ .nway_reset = cpsw_nway_reset,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
@@ -2300,18 +2543,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->active_slave = prop;
- if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
- dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
- return -EINVAL;
- }
- data->cpts_clock_mult = prop;
-
- if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
- dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
- return -EINVAL;
- }
- data->cpts_clock_shift = prop;
-
data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
* sizeof(struct cpsw_slave_data),
GFP_KERNEL);
@@ -2570,6 +2801,7 @@ static int cpsw_probe(struct platform_device *pdev)
struct cpdma_params dma_params;
struct cpsw_ale_params ale_params;
void __iomem *ss_regs;
+ void __iomem *cpts_regs;
struct resource *res, *ss_res;
const struct of_device_id *of_id;
struct gpio_descs *mode;
@@ -2597,12 +2829,6 @@ static int cpsw_probe(struct platform_device *pdev)
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
cpsw->rx_packet_max = max(rx_packet_max, 128);
- cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
- if (!cpsw->cpts) {
- dev_err(&pdev->dev, "error allocating cpts\n");
- ret = -ENOMEM;
- goto clean_ndev_ret;
- }
mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
if (IS_ERR(mode)) {
@@ -2690,7 +2916,7 @@ static int cpsw_probe(struct platform_device *pdev)
switch (cpsw->version) {
case CPSW_VERSION_1:
cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- cpsw->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
@@ -2704,7 +2930,7 @@ static int cpsw_probe(struct platform_device *pdev)
case CPSW_VERSION_3:
case CPSW_VERSION_4:
cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- cpsw->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
@@ -2742,6 +2968,7 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.desc_align = 16;
dma_params.has_ext_regs = true;
dma_params.desc_hw_addr = dma_params.desc_mem_phys;
+ dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
cpsw->dma = cpdma_ctlr_create(&dma_params);
if (!cpsw->dma) {
@@ -2750,9 +2977,9 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dt_ret;
}
- cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
- cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
- if (WARN_ON(!cpsw->rxch[0] || !cpsw->txch[0])) {
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) {
dev_err(priv->dev, "error initializing dma channels\n");
ret = -ENOMEM;
goto clean_dma_ret;
@@ -2770,6 +2997,12 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+ if (IS_ERR(cpsw->cpts)) {
+ ret = PTR_ERR(cpsw->cpts);
+ goto clean_ale_ret;
+ }
+
ndev->irq = platform_get_irq(pdev, 1);
if (ndev->irq < 0) {
dev_err(priv->dev, "error getting irq resource\n");
@@ -2828,6 +3061,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+ cpsw_split_res(ndev);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2885,6 +3119,7 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(cpsw->slaves[1].ndev);
unregister_netdev(ndev);
+ cpts_release(cpsw->cpts);
cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
cpsw_remove_dt(pdev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 16b54c6f32c2..6c3037aa2cd3 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -31,8 +31,6 @@ struct cpsw_platform_data {
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
- u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
u32 mac_control; /* Mac control register */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 85a55b4ff8c0..0c0d48e5bea4 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,10 +31,8 @@
#include "cpts.h"
-#ifdef CONFIG_TI_CPTS
-
-#define cpts_read32(c, r) __raw_readl(&c->reg->r)
-#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
+#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
+#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
static int event_expired(struct cpts_event *event)
{
@@ -59,6 +57,26 @@ static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
return -1;
}
+static int cpts_purge_events(struct cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_expired(event)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ pr_debug("cpts: event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
/*
* Returns zero if matching event type was found.
*/
@@ -71,10 +89,12 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
if (cpts_fifo_pop(cpts, &hi, &lo))
break;
- if (list_empty(&cpts->pool)) {
- pr_err("cpts: event pool is empty\n");
+
+ if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
+ pr_err("cpts: event pool empty\n");
return -1;
}
+
event = list_first_entry(&cpts->pool, struct cpts_event, list);
event->tmo = jiffies + 2;
event->high = hi;
@@ -223,27 +243,9 @@ static void cpts_overflow_check(struct work_struct *work)
struct timespec64 ts;
struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
- cpts_write32(cpts, CPTS_EN, control);
- cpts_write32(cpts, TS_PEND_EN, int_enable);
cpts_ptp_gettime(&cpts->info, &ts);
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
- schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
-}
-
-static void cpts_clk_init(struct device *dev, struct cpts *cpts)
-{
- cpts->refclk = devm_clk_get(dev, "cpts");
- if (IS_ERR(cpts->refclk)) {
- dev_err(dev, "Failed to get cpts refclk\n");
- cpts->refclk = NULL;
- return;
- }
- clk_prepare_enable(cpts->refclk);
-}
-
-static void cpts_clk_release(struct cpts *cpts)
-{
- clk_disable(cpts->refclk);
+ schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
}
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -334,6 +336,7 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
memset(ssh, 0, sizeof(*ssh));
ssh->hwtstamp = ns_to_ktime(ns);
}
+EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -349,60 +352,170 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
ssh.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &ssh);
}
+EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
-#endif /*CONFIG_TI_CPTS*/
-
-int cpts_register(struct device *dev, struct cpts *cpts,
- u32 mult, u32 shift)
+int cpts_register(struct cpts *cpts)
{
-#ifdef CONFIG_TI_CPTS
int err, i;
- unsigned long flags;
-
- cpts->info = cpts_info;
- cpts->clock = ptp_clock_register(&cpts->info, dev);
- if (IS_ERR(cpts->clock)) {
- err = PTR_ERR(cpts->clock);
- cpts->clock = NULL;
- return err;
- }
- spin_lock_init(&cpts->lock);
-
- cpts->cc.read = cpts_systim_read;
- cpts->cc.mask = CLOCKSOURCE_MASK(32);
- cpts->cc_mult = mult;
- cpts->cc.mult = mult;
- cpts->cc.shift = shift;
INIT_LIST_HEAD(&cpts->events);
INIT_LIST_HEAD(&cpts->pool);
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- cpts_clk_init(dev, cpts);
+ clk_enable(cpts->refclk);
+
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
- spin_lock_irqsave(&cpts->lock, flags);
timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
- spin_unlock_irqrestore(&cpts->lock, flags);
-
- INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
- schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+ cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
+ if (IS_ERR(cpts->clock)) {
+ err = PTR_ERR(cpts->clock);
+ cpts->clock = NULL;
+ goto err_ptp;
+ }
cpts->phc_index = ptp_clock_index(cpts->clock);
-#endif
+
+ schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
return 0;
+
+err_ptp:
+ clk_disable(cpts->refclk);
+ return err;
}
+EXPORT_SYMBOL_GPL(cpts_register);
void cpts_unregister(struct cpts *cpts)
{
-#ifdef CONFIG_TI_CPTS
- if (cpts->clock) {
- ptp_clock_unregister(cpts->clock);
- cancel_delayed_work_sync(&cpts->overflow_work);
+ if (WARN_ON(!cpts->clock))
+ return;
+
+ cancel_delayed_work_sync(&cpts->overflow_work);
+
+ ptp_clock_unregister(cpts->clock);
+ cpts->clock = NULL;
+
+ cpts_write32(cpts, 0, int_enable);
+ cpts_write32(cpts, 0, control);
+
+ clk_disable(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_unregister);
+
+static void cpts_calc_mult_shift(struct cpts *cpts)
+{
+ u64 frac, maxsec, ns;
+ u32 freq;
+
+ freq = clk_get_rate(cpts->refclk);
+
+ /* Calc the maximum number of seconds which we can run before
+ * wrapping around.
+ */
+ maxsec = cpts->cc.mask;
+ do_div(maxsec, freq);
+ /* limit conversation rate to 10 sec as higher values will produce
+ * too small mult factors and so reduce the conversion accuracy
+ */
+ if (maxsec > 10)
+ maxsec = 10;
+
+ /* Calc overflow check period (maxsec / 2) */
+ cpts->ov_check_period = (HZ * maxsec) / 2;
+ dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
+ cpts->ov_check_period);
+
+ if (cpts->cc.mult || cpts->cc.shift)
+ return;
+
+ clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
+ freq, NSEC_PER_SEC, maxsec);
+
+ frac = 0;
+ ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
+
+ dev_info(cpts->dev,
+ "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
+ freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
+}
+
+static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
+{
+ int ret = -EINVAL;
+ u32 prop;
+
+ if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
+ cpts->cc.mult = prop;
+
+ if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
+ cpts->cc.shift = prop;
+
+ if ((cpts->cc.mult && !cpts->cc.shift) ||
+ (!cpts->cc.mult && cpts->cc.shift))
+ goto of_error;
+
+ return 0;
+
+of_error:
+ dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
+ return ret;
+}
+
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ struct cpts *cpts;
+ int ret;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct cpsw_cpts __iomem *)regs;
+ spin_lock_init(&cpts->lock);
+ INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
+
+ ret = cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ cpts->refclk = devm_clk_get(dev, "cpts");
+ if (IS_ERR(cpts->refclk)) {
+ dev_err(dev, "Failed to get cpts refclk\n");
+ return ERR_PTR(PTR_ERR(cpts->refclk));
}
- if (cpts->refclk)
- cpts_clk_release(cpts);
-#endif
+
+ clk_prepare(cpts->refclk);
+
+ cpts->cc.read = cpts_systim_read;
+ cpts->cc.mask = CLOCKSOURCE_MASK(32);
+ cpts->info = cpts_info;
+
+ cpts_calc_mult_shift(cpts);
+ /* save cc.mult original value as it can be modified
+ * by cpts_ptp_adjfreq().
+ */
+ cpts->cc_mult = cpts->cc.mult;
+
+ return cpts;
}
+EXPORT_SYMBOL_GPL(cpts_create);
+
+void cpts_release(struct cpts *cpts)
+{
+ if (!cpts)
+ return;
+
+ if (WARN_ON(!cpts->refclk))
+ return;
+
+ clk_unprepare(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI CPTS driver");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 69a46b92c7d6..c96eca2b1b46 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -20,11 +20,14 @@
#ifndef _TI_CPTS_H_
#define _TI_CPTS_H_
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clocksource.h>
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/of.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/timecounter.h>
@@ -94,9 +97,6 @@ enum {
CPTS_EV_TX, /* Ethernet Transmit Event */
};
-/* This covers any input clock up to about 500 MHz. */
-#define CPTS_OVERFLOW_PERIOD (HZ * 8)
-
#define CPTS_FIFO_DEPTH 16
#define CPTS_MAX_EVENTS 32
@@ -108,10 +108,10 @@ struct cpts_event {
};
struct cpts {
+ struct device *dev;
struct cpsw_cpts __iomem *reg;
int tx_enable;
int rx_enable;
-#ifdef CONFIG_TI_CPTS
struct ptp_clock_info info;
struct ptp_clock *clock;
spinlock_t lock; /* protects time registers */
@@ -124,22 +124,86 @@ struct cpts {
struct list_head events;
struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS];
-#endif
+ unsigned long ov_check_period;
};
-#ifdef CONFIG_TI_CPTS
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+int cpts_register(struct cpts *cpts);
+void cpts_unregister(struct cpts *cpts);
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node);
+void cpts_release(struct cpts *cpts);
+
+static inline void cpts_rx_enable(struct cpts *cpts, int enable)
+{
+ cpts->rx_enable = enable;
+}
+
+static inline bool cpts_is_rx_enabled(struct cpts *cpts)
+{
+ return !!cpts->rx_enable;
+}
+
+static inline void cpts_tx_enable(struct cpts *cpts, int enable)
+{
+ cpts->tx_enable = enable;
+}
+
+static inline bool cpts_is_tx_enabled(struct cpts *cpts)
+{
+ return !!cpts->tx_enable;
+}
+
#else
+struct cpts;
+
static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
}
static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
}
+
+static inline
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ return NULL;
+}
+
+static inline void cpts_release(struct cpts *cpts)
+{
+}
+
+static inline int
+cpts_register(struct cpts *cpts)
+{
+ return 0;
+}
+
+static inline void cpts_unregister(struct cpts *cpts)
+{
+}
+
+static inline void cpts_rx_enable(struct cpts *cpts, int enable)
+{
+}
+
+static inline bool cpts_is_rx_enabled(struct cpts *cpts)
+{
+ return false;
+}
+
+static inline void cpts_tx_enable(struct cpts *cpts, int enable)
+{
+}
+
+static inline bool cpts_is_tx_enabled(struct cpts *cpts)
+{
+ return false;
+}
#endif
-int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
-void cpts_unregister(struct cpts *cpts);
#endif
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index c3f35f11a8fd..36518fc5c7cc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -32,6 +32,7 @@
#define CPDMA_RXCONTROL 0x14
#define CPDMA_SOFTRESET 0x1c
#define CPDMA_RXTEARDOWN 0x18
+#define CPDMA_TX_PRI0_RATE 0x30
#define CPDMA_TXINTSTATRAW 0x80
#define CPDMA_TXINTSTATMASKED 0x84
#define CPDMA_TXINTMASKSET 0x88
@@ -68,6 +69,8 @@
#define CPDMA_TEARDOWN_VALUE 0xfffffffc
+#define CPDMA_MAX_RLIM_CNT 16384
+
struct cpdma_desc {
/* hardware fields */
u32 hw_next;
@@ -122,6 +125,33 @@ struct cpdma_chan {
struct cpdma_chan_stats stats;
/* offsets into dmaregs */
int int_set, int_clear, td;
+ int weight;
+ u32 rate_factor;
+ u32 rate;
+};
+
+struct cpdma_control_info {
+ u32 reg;
+ u32 shift, mask;
+ int access;
+#define ACCESS_RO BIT(0)
+#define ACCESS_WO BIT(1)
+#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
+};
+
+static struct cpdma_control_info controls[] = {
+ [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
+ [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
+ [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
+ [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
+ [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
+ [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
+ [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
+ [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
+ [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
+ [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
};
#define tx_chan_num(chan) (chan)
@@ -253,6 +283,211 @@ static void cpdma_desc_free(struct cpdma_desc_pool *pool,
gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
}
+static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ struct cpdma_control_info *info = &controls[control];
+ u32 val;
+
+ if (!ctlr->params.has_ext_regs)
+ return -ENOTSUPP;
+
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ return -ENOENT;
+
+ if ((info->access & ACCESS_WO) != ACCESS_WO)
+ return -EPERM;
+
+ val = dma_reg_read(ctlr, info->reg);
+ val &= ~(info->mask << info->shift);
+ val |= (value & info->mask) << info->shift;
+ dma_reg_write(ctlr, info->reg, val);
+
+ return 0;
+}
+
+static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+
+ if (!ctlr->params.has_ext_regs)
+ return -ENOTSUPP;
+
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ return -ENOENT;
+
+ if ((info->access & ACCESS_RO) != ACCESS_RO)
+ return -EPERM;
+
+ ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+ return ret;
+}
+
+/* cpdma_chan_set_chan_shaper - set shaper for a channel
+ * Has to be called under ctlr lock
+ */
+static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ u32 rate_reg;
+ u32 rmask;
+ int ret;
+
+ if (!chan->rate)
+ return 0;
+
+ rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
+ dma_reg_write(ctlr, rate_reg, chan->rate_factor);
+
+ rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
+ rmask |= chan->mask;
+
+ ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
+ return ret;
+}
+
+static int cpdma_chan_on(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EBUSY;
+ }
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+ dma_reg_write(ctlr, chan->int_set, chan->mask);
+ chan->state = CPDMA_STATE_ACTIVE;
+ if (chan->head) {
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ if (chan->rxfree)
+ chan_write(chan, rxfree, chan->count);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+/* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
+ * rmask - mask of rate limited channels
+ * Returns min rate in Kb/s
+ */
+static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
+ u32 *rmask, int *prio_mode)
+{
+ struct cpdma_ctlr *ctlr = ch->ctlr;
+ struct cpdma_chan *chan;
+ u32 old_rate = ch->rate;
+ u32 new_rmask = 0;
+ int rlim = 1;
+ int i;
+
+ *prio_mode = 0;
+ for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
+ chan = ctlr->channels[i];
+ if (!chan) {
+ rlim = 0;
+ continue;
+ }
+
+ if (chan == ch)
+ chan->rate = rate;
+
+ if (chan->rate) {
+ if (rlim) {
+ new_rmask |= chan->mask;
+ } else {
+ ch->rate = old_rate;
+ dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n",
+ chan->chan_num);
+ return -EINVAL;
+ }
+ } else {
+ *prio_mode = 1;
+ rlim = 0;
+ }
+ }
+
+ *rmask = new_rmask;
+ return 0;
+}
+
+static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
+ struct cpdma_chan *ch)
+{
+ u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
+ u32 best_send_cnt = 0, best_idle_cnt = 0;
+ u32 new_rate, best_rate = 0, rate_reg;
+ u64 send_cnt, idle_cnt;
+ u32 min_send_cnt, freq;
+ u64 divident, divisor;
+
+ if (!ch->rate) {
+ ch->rate_factor = 0;
+ goto set_factor;
+ }
+
+ freq = ctlr->params.bus_freq_mhz * 1000 * 32;
+ if (!freq) {
+ dev_err(ctlr->dev, "The bus frequency is not set\n");
+ return -EINVAL;
+ }
+
+ min_send_cnt = freq - ch->rate;
+ send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
+ while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
+ divident = ch->rate * send_cnt;
+ divisor = min_send_cnt;
+ idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
+
+ divident = freq * idle_cnt;
+ divisor = idle_cnt + send_cnt;
+ new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
+
+ delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_send_cnt = send_cnt;
+ best_idle_cnt = idle_cnt;
+ best_rate = new_rate;
+
+ if (!delta)
+ break;
+ }
+
+ if (prev_delta >= delta) {
+ prev_delta = delta;
+ send_cnt++;
+ continue;
+ }
+
+ idle_cnt++;
+ divident = freq * idle_cnt;
+ send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
+ send_cnt -= idle_cnt;
+ prev_delta = UINT_MAX;
+ }
+
+ ch->rate = best_rate;
+ ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
+
+set_factor:
+ rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
+ dma_reg_write(ctlr, rate_reg, ch->rate_factor);
+ return 0;
+}
+
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
{
struct cpdma_ctlr *ctlr;
@@ -283,8 +518,9 @@ EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
{
+ struct cpdma_chan *chan;
unsigned long flags;
- int i;
+ int i, prio_mode;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_IDLE) {
@@ -320,10 +556,22 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
ctlr->state = CPDMA_STATE_ACTIVE;
+ prio_mode = 0;
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
- if (ctlr->channels[i])
- cpdma_chan_start(ctlr->channels[i]);
+ chan = ctlr->channels[i];
+ if (chan) {
+ cpdma_chan_set_chan_shaper(chan);
+ cpdma_chan_on(chan);
+
+ /* off prio mode if all tx channels are rate limited */
+ if (is_tx_chan(chan) && !chan->rate)
+ prio_mode = 1;
+ }
}
+
+ _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
+ _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
+
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
@@ -335,7 +583,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
int i;
spin_lock_irqsave(&ctlr->lock, flags);
- if (ctlr->state == CPDMA_STATE_TEARDOWN) {
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
@@ -422,30 +670,205 @@ u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
}
EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
+static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
+ int rx, int desc_num,
+ int per_ch_desc)
+{
+ struct cpdma_chan *chan, *most_chan = NULL;
+ int desc_cnt = desc_num;
+ int most_dnum = 0;
+ int min, max, i;
+
+ if (!desc_num)
+ return;
+
+ if (rx) {
+ min = rx_chan_num(0);
+ max = rx_chan_num(CPDMA_MAX_CHANNELS);
+ } else {
+ min = tx_chan_num(0);
+ max = tx_chan_num(CPDMA_MAX_CHANNELS);
+ }
+
+ for (i = min; i < max; i++) {
+ chan = ctlr->channels[i];
+ if (!chan)
+ continue;
+
+ if (chan->weight)
+ chan->desc_num = (chan->weight * desc_num) / 100;
+ else
+ chan->desc_num = per_ch_desc;
+
+ desc_cnt -= chan->desc_num;
+
+ if (most_dnum < chan->desc_num) {
+ most_dnum = chan->desc_num;
+ most_chan = chan;
+ }
+ }
+ /* use remains */
+ most_chan->desc_num += desc_cnt;
+}
+
/**
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock
*/
-static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
{
+ int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
struct cpdma_desc_pool *pool = ctlr->pool;
+ int free_rx_num = 0, free_tx_num = 0;
+ int rx_weight = 0, tx_weight = 0;
+ int tx_desc_num, rx_desc_num;
struct cpdma_chan *chan;
- int ch_desc_num;
- int i;
+ int i, tx_num = 0;
if (!ctlr->chan_num)
- return;
-
- /* calculate average size of pool slice */
- ch_desc_num = pool->num_desc / ctlr->chan_num;
+ return 0;
- /* split ctlr pool */
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
chan = ctlr->channels[i];
- if (chan)
- chan->desc_num = ch_desc_num;
+ if (!chan)
+ continue;
+
+ if (is_rx_chan(chan)) {
+ if (!chan->weight)
+ free_rx_num++;
+ rx_weight += chan->weight;
+ } else {
+ if (!chan->weight)
+ free_tx_num++;
+ tx_weight += chan->weight;
+ tx_num++;
+ }
+ }
+
+ if (rx_weight > 100 || tx_weight > 100)
+ return -EINVAL;
+
+ tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num;
+ rx_desc_num = pool->num_desc - tx_desc_num;
+
+ if (free_tx_num) {
+ tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
+ tx_per_ch_desc /= free_tx_num;
+ }
+ if (free_rx_num) {
+ rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
+ rx_per_ch_desc /= free_rx_num;
+ }
+
+ cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
+ cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
+
+ return 0;
+}
+
+/* cpdma_chan_set_weight - set weight of a channel in percentage.
+ * Tx and Rx channels have separate weights. That is 100% for RX
+ * and 100% for Tx. The weight is used to split cpdma resources
+ * in correct proportion required by the channels, including number
+ * of descriptors. The channel rate is not enough to know the
+ * weight of a channel as the maximum rate of an interface is needed.
+ * If weight = 0, then channel uses rest of descriptors leaved by
+ * weighted channels.
+ */
+int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
+{
+ struct cpdma_ctlr *ctlr = ch->ctlr;
+ unsigned long flags, ch_flags;
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ spin_lock_irqsave(&ch->lock, ch_flags);
+ if (ch->weight == weight) {
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
}
+ ch->weight = weight;
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+
+ /* re-split pool using new channel weight */
+ ret = cpdma_chan_split_pool(ctlr);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
}
+EXPORT_SYMBOL_GPL(cpdma_chan_set_weight);
+
+/* cpdma_chan_get_min_rate - get minimum allowed rate for channel
+ * Should be called before cpdma_chan_set_rate.
+ * Returns min rate in Kb/s
+ */
+u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
+{
+ unsigned int divident, divisor;
+
+ divident = ctlr->params.bus_freq_mhz * 32 * 1000;
+ divisor = 1 + CPDMA_MAX_RLIM_CNT;
+
+ return DIV_ROUND_UP(divident, divisor);
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
+
+/* cpdma_chan_set_rate - limits bandwidth for transmit channel.
+ * The bandwidth * limited channels have to be in order beginning from lowest.
+ * ch - transmit channel the bandwidth is configured for
+ * rate - bandwidth in Kb/s, if 0 - then off shaper
+ */
+int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
+{
+ struct cpdma_ctlr *ctlr = ch->ctlr;
+ unsigned long flags, ch_flags;
+ int ret, prio_mode;
+ u32 rmask;
+
+ if (!ch || !is_tx_chan(ch))
+ return -EINVAL;
+
+ if (ch->rate == rate)
+ return rate;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ spin_lock_irqsave(&ch->lock, ch_flags);
+
+ ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
+ if (ret)
+ goto err;
+
+ ret = cpdma_chan_set_factors(ctlr, ch);
+ if (ret)
+ goto err;
+
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+
+ /* on shapers */
+ _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
+ _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+
+err:
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_set_rate);
+
+u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
+{
+ unsigned long flags;
+ u32 rate;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ rate = ch->rate;
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ return rate;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_rate);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler, int rx_type)
@@ -474,7 +897,9 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan->state = CPDMA_STATE_IDLE;
chan->chan_num = chan_num;
chan->handler = handler;
+ chan->rate = 0;
chan->desc_num = ctlr->pool->num_desc / 2;
+ chan->weight = 0;
if (is_rx_chan(chan)) {
chan->hdp = ctlr->params.rxhdp + offset;
@@ -533,7 +958,7 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
cpdma_chan_stop(chan);
ctlr->channels[chan->chan_num] = NULL;
ctlr->chan_num--;
-
+ devm_kfree(ctlr->dev, chan);
cpdma_chan_split_pool(ctlr);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -768,28 +1193,20 @@ EXPORT_SYMBOL_GPL(cpdma_chan_process);
int cpdma_chan_start(struct cpdma_chan *chan)
{
- struct cpdma_ctlr *ctlr = chan->ctlr;
- struct cpdma_desc_pool *pool = ctlr->pool;
- unsigned long flags;
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ unsigned long flags;
+ int ret;
- spin_lock_irqsave(&chan->lock, flags);
- if (chan->state != CPDMA_STATE_IDLE) {
- spin_unlock_irqrestore(&chan->lock, flags);
- return -EBUSY;
- }
- if (ctlr->state != CPDMA_STATE_ACTIVE) {
- spin_unlock_irqrestore(&chan->lock, flags);
- return -EINVAL;
- }
- dma_reg_write(ctlr, chan->int_set, chan->mask);
- chan->state = CPDMA_STATE_ACTIVE;
- if (chan->head) {
- chan_write(chan, hdp, desc_phys(pool, chan->head));
- if (chan->rxfree)
- chan_write(chan, rxfree, chan->count);
- }
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = cpdma_chan_set_chan_shaper(chan);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ if (ret)
+ return ret;
+
+ ret = cpdma_chan_on(chan);
+ if (ret)
+ return ret;
- spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(cpdma_chan_start);
@@ -874,93 +1291,27 @@ int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
return 0;
}
-struct cpdma_control_info {
- u32 reg;
- u32 shift, mask;
- int access;
-#define ACCESS_RO BIT(0)
-#define ACCESS_WO BIT(1)
-#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
-};
-
-static struct cpdma_control_info controls[] = {
- [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
- [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
- [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
- [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
- [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
- [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
- [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
- [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
- [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
- [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
- [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
-};
-
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
{
unsigned long flags;
- struct cpdma_control_info *info = &controls[control];
int ret;
spin_lock_irqsave(&ctlr->lock, flags);
-
- ret = -ENOTSUPP;
- if (!ctlr->params.has_ext_regs)
- goto unlock_ret;
-
- ret = -EINVAL;
- if (ctlr->state != CPDMA_STATE_ACTIVE)
- goto unlock_ret;
-
- ret = -ENOENT;
- if (control < 0 || control >= ARRAY_SIZE(controls))
- goto unlock_ret;
-
- ret = -EPERM;
- if ((info->access & ACCESS_RO) != ACCESS_RO)
- goto unlock_ret;
-
- ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
-
-unlock_ret:
+ ret = _cpdma_control_get(ctlr, control);
spin_unlock_irqrestore(&ctlr->lock, flags);
+
return ret;
}
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
{
unsigned long flags;
- struct cpdma_control_info *info = &controls[control];
int ret;
- u32 val;
spin_lock_irqsave(&ctlr->lock, flags);
-
- ret = -ENOTSUPP;
- if (!ctlr->params.has_ext_regs)
- goto unlock_ret;
-
- ret = -EINVAL;
- if (ctlr->state != CPDMA_STATE_ACTIVE)
- goto unlock_ret;
-
- ret = -ENOENT;
- if (control < 0 || control >= ARRAY_SIZE(controls))
- goto unlock_ret;
-
- ret = -EPERM;
- if ((info->access & ACCESS_WO) != ACCESS_WO)
- goto unlock_ret;
-
- val = dma_reg_read(ctlr, info->reg);
- val &= ~(info->mask << info->shift);
- val |= (value & info->mask) << info->shift;
- dma_reg_write(ctlr, info->reg, val);
- ret = 0;
-
-unlock_ret:
+ ret = _cpdma_control_set(ctlr, control, value);
spin_unlock_irqrestore(&ctlr->lock, flags);
+
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_control_set);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index a07b22b12bc1..4a167db2abab 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -36,6 +36,7 @@ struct cpdma_params {
u32 desc_hw_addr;
int desc_mem_size;
int desc_align;
+ u32 bus_freq_mhz;
/*
* Some instances of embedded cpdma controllers have extra control and
@@ -90,8 +91,13 @@ int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr);
u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
+int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight);
+int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate);
+u32 cpdma_chan_get_rate(struct cpdma_chan *ch);
+u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr);
enum cpdma_control {
+ CPDMA_TX_RLIM, /* read-write */
CPDMA_CMD_IDLE, /* write-only */
CPDMA_COPY_ERROR_FRAMES, /* read-write */
CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index 17a26a429b71..0f58c584ae09 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -121,7 +121,7 @@ struct netcp_packet {
bool rxtstamp_complete;
void *ts_context;
- int (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt);
+ void (*txtstamp)(void *ctx, struct sk_buff *skb);
};
static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 32516661f180..c243335ed649 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -100,6 +100,11 @@ struct netcp_intf_modpriv {
void *module_priv;
};
+struct netcp_tx_cb {
+ void *ts_context;
+ void (*txtstamp)(void *context, struct sk_buff *skb);
+};
+
static LIST_HEAD(netcp_devices);
static LIST_HEAD(netcp_modules);
static DEFINE_MUTEX(netcp_modules_lock);
@@ -544,6 +549,7 @@ int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
return 0;
}
+EXPORT_SYMBOL_GPL(netcp_register_rxhook);
int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
netcp_hook_rtn *hook_rtn, void *hook_data)
@@ -566,6 +572,7 @@ int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
return -ENOENT;
}
+EXPORT_SYMBOL_GPL(netcp_unregister_rxhook);
static void netcp_frag_free(bool is_frag, void *ptr)
{
@@ -730,6 +737,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
/* Call each of the RX hooks */
p_info.skb = skb;
+ skb->dev = netcp->ndev;
p_info.rxtstamp_complete = false;
list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
int ret;
@@ -987,6 +995,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
unsigned int budget)
{
struct knav_dma_desc *desc;
+ struct netcp_tx_cb *tx_cb;
struct sk_buff *skb;
unsigned int dma_sz;
dma_addr_t dma;
@@ -1014,6 +1023,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
continue;
}
+ tx_cb = (struct netcp_tx_cb *)skb->cb;
+ if (tx_cb->txtstamp)
+ tx_cb->txtstamp(tx_cb->ts_context, skb);
+
if (netif_subqueue_stopped(netcp->ndev, skb) &&
netif_running(netcp->ndev) &&
(knav_pool_count(netcp->tx_pool) >
@@ -1154,6 +1167,7 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
struct netcp_tx_pipe *tx_pipe = NULL;
struct netcp_hook_list *tx_hook;
struct netcp_packet p_info;
+ struct netcp_tx_cb *tx_cb;
unsigned int dma_sz;
dma_addr_t dma;
u32 tmp = 0;
@@ -1164,7 +1178,7 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
p_info.tx_pipe = NULL;
p_info.psdata_len = 0;
p_info.ts_context = NULL;
- p_info.txtstamp_complete = NULL;
+ p_info.txtstamp = NULL;
p_info.epib = desc->epib;
p_info.psdata = (u32 __force *)desc->psdata;
memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32));
@@ -1189,6 +1203,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
goto out;
}
+ tx_cb = (struct netcp_tx_cb *)skb->cb;
+ tx_cb->ts_context = p_info.ts_context;
+ tx_cb->txtstamp = p_info.txtstamp;
+
/* update descriptor */
if (p_info.psdata_len) {
/* psdata points to both native-endian and device-endian data */
@@ -1568,7 +1586,7 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
/* open Tx completion queue */
snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
- if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
+ if (IS_ERR(netcp->tx_compl_q)) {
ret = PTR_ERR(netcp->tx_compl_q);
goto fail;
}
@@ -1588,7 +1606,7 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
/* open Rx completion queue */
snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
- if (IS_ERR_OR_NULL(netcp->rx_queue)) {
+ if (IS_ERR(netcp->rx_queue)) {
ret = PTR_ERR(netcp->rx_queue);
goto fail;
}
@@ -1610,7 +1628,7 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
- if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
+ if (IS_ERR(netcp->rx_fdq[i])) {
ret = PTR_ERR(netcp->rx_fdq[i]);
goto fail;
}
@@ -1766,21 +1784,6 @@ out:
return (ret == 0) ? 0 : err;
}
-static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
-{
- struct netcp_intf *netcp = netdev_priv(ndev);
-
- /* MTU < 68 is an error for IPv4 traffic */
- if ((new_mtu < 68) ||
- (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
- dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
- return -EINVAL;
- }
-
- ndev->mtu = new_mtu;
- return 0;
-}
-
static void netcp_ndo_tx_timeout(struct net_device *ndev)
{
struct netcp_intf *netcp = netdev_priv(ndev);
@@ -1886,7 +1889,6 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_start_xmit = netcp_ndo_start_xmit,
.ndo_set_rx_mode = netcp_set_rx_mode,
.ndo_do_ioctl = netcp_ndo_ioctl,
- .ndo_change_mtu = netcp_ndo_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
@@ -1923,6 +1925,10 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
ndev->hw_features = ndev->features;
ndev->vlan_features |= NETIF_F_SG;
+ /* MTU range: 68 - 9486 */
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
netcp = netdev_priv(ndev);
spin_lock_init(&netcp->lock);
INIT_LIST_HEAD(&netcp->module_head);
@@ -2070,7 +2076,6 @@ static void netcp_delete_interface(struct netcp_device *netcp_device,
if (module->release)
module->release(intf_modpriv->module_priv);
list_del(&intf_modpriv->intf_list);
- kfree(intf_modpriv);
}
WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
ndev->name);
@@ -2133,6 +2138,8 @@ static int netcp_probe(struct platform_device *pdev)
}
}
+ of_node_put(interfaces);
+
/* Add the device instance to the list */
list_add_tail(&netcp_device->device_list, &netcp_devices);
@@ -2145,6 +2152,8 @@ probe_quit_interface:
netcp_delete_interface(netcp_device, netcp_intf->ndev);
}
+ of_node_put(interfaces);
+
probe_quit:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -2165,7 +2174,6 @@ static int netcp_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
module->remove(netcp_device, inst_modpriv->module_priv);
list_del(&inst_modpriv->inst_list);
- kfree(inst_modpriv);
}
/* now that all modules are removed, clean up the interfaces */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d543298d6750..7d9e36f66735 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -23,10 +23,13 @@
#include <linux/of_mdio.h>
#include <linux/of_address.h>
#include <linux/if_vlan.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
#include "cpsw_ale.h"
#include "netcp.h"
+#include "cpts.h"
#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
#define NETCP_DRIVER_VERSION "v1.0"
@@ -51,6 +54,7 @@
#define GBE13_EMAC_OFFSET 0x100
#define GBE13_SLAVE_PORT2_OFFSET 0x200
#define GBE13_HW_STATS_OFFSET 0x300
+#define GBE13_CPTS_OFFSET 0x500
#define GBE13_ALE_OFFSET 0x600
#define GBE13_HOST_PORT_NUM 0
#define GBE13_NUM_ALE_ENTRIES 1024
@@ -74,6 +78,7 @@
#define GBENU_SLAVE_PORT_OFFSET 0x2000
#define GBENU_EMAC_OFFSET 0x2330
#define GBENU_HW_STATS_OFFSET 0x1a000
+#define GBENU_CPTS_OFFSET 0x1d000
#define GBENU_ALE_OFFSET 0x1e000
#define GBENU_HOST_PORT_NUM 0
#define GBENU_NUM_ALE_ENTRIES 1024
@@ -89,10 +94,12 @@
/* offset relative to base of XGBE_SS_REG_INDEX */
#define XGBE10_SGMII_MODULE_OFFSET 0x100
+#define IS_SS_ID_XGBE(d) ((d)->ss_version == XGBE_SS_VERSION_10)
/* offset relative to base of XGBE_SM_REG_INDEX */
#define XGBE10_HOST_PORT_OFFSET 0x34
#define XGBE10_SLAVE_PORT_OFFSET 0x64
#define XGBE10_EMAC_OFFSET 0x400
+#define XGBE10_CPTS_OFFSET 0x600
#define XGBE10_ALE_OFFSET 0x700
#define XGBE10_HW_STATS_OFFSET 0x800
#define XGBE10_HOST_PORT_NUM 0
@@ -155,6 +162,7 @@
#define GBE_TX_QUEUE 648
#define GBE_TXHOOK_ORDER 0
+#define GBE_RXHOOK_ORDER 0
#define GBE_DEFAULT_ALE_AGEOUT 30
#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
#define NETCP_LINK_STATE_INVALID -1
@@ -169,6 +177,56 @@
#define HOST_TX_PRI_MAP_DEFAULT 0x00000000
+#if IS_ENABLED(CONFIG_TI_CPTS)
+/* Px_TS_CTL register fields */
+#define TS_RX_ANX_F_EN BIT(0)
+#define TS_RX_VLAN_LT1_EN BIT(1)
+#define TS_RX_VLAN_LT2_EN BIT(2)
+#define TS_RX_ANX_D_EN BIT(3)
+#define TS_TX_ANX_F_EN BIT(4)
+#define TS_TX_VLAN_LT1_EN BIT(5)
+#define TS_TX_VLAN_LT2_EN BIT(6)
+#define TS_TX_ANX_D_EN BIT(7)
+#define TS_LT2_EN BIT(8)
+#define TS_RX_ANX_E_EN BIT(9)
+#define TS_TX_ANX_E_EN BIT(10)
+#define TS_MSG_TYPE_EN_SHIFT 16
+#define TS_MSG_TYPE_EN_MASK 0xffff
+
+/* Px_TS_SEQ_LTYPE register fields */
+#define TS_SEQ_ID_OFS_SHIFT 16
+#define TS_SEQ_ID_OFS_MASK 0x3f
+
+/* Px_TS_CTL_LTYPE2 register fields */
+#define TS_107 BIT(16)
+#define TS_129 BIT(17)
+#define TS_130 BIT(18)
+#define TS_131 BIT(19)
+#define TS_132 BIT(20)
+#define TS_319 BIT(21)
+#define TS_320 BIT(22)
+#define TS_TTL_NONZERO BIT(23)
+#define TS_UNI_EN BIT(24)
+#define TS_UNI_EN_SHIFT 24
+
+#define TS_TX_ANX_ALL_EN \
+ (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
+
+#define TS_RX_ANX_ALL_EN \
+ (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
+
+#define TS_CTL_DST_PORT TS_319
+#define TS_CTL_DST_PORT_SHIFT 21
+
+#define TS_CTL_MADDR_ALL \
+ (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
+
+#define TS_CTL_MADDR_SHIFT 16
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#endif /* CONFIG_TI_CPTS */
+
struct xgbe_ss_regs {
u32 id_ver;
u32 synce_count;
@@ -616,6 +674,13 @@ struct gbe_hw_stats {
#define GBE_MAX_HW_STAT_MODS 9
#define GBE_HW_STATS_REG_MAP_SZ 0x100
+struct ts_ctl {
+ int uni;
+ u8 dst_port_map;
+ u8 maddr_map;
+ u8 ts_mcast_type;
+};
+
struct gbe_slave {
void __iomem *port_regs;
void __iomem *emac_regs;
@@ -630,6 +695,7 @@ struct gbe_slave {
u32 mac_control;
u8 phy_port_t;
struct device_node *phy_node;
+ struct ts_ctl ts_ctl;
struct list_head slave_list;
};
@@ -655,6 +721,7 @@ struct gbe_priv {
void __iomem *switch_regs;
void __iomem *host_port_regs;
void __iomem *ale_reg;
+ void __iomem *cpts_reg;
void __iomem *sgmii_port_regs;
void __iomem *sgmii_port34_regs;
void __iomem *xgbe_serdes_regs;
@@ -678,6 +745,9 @@ struct gbe_priv {
int num_et_stats;
/* Lock for updating the hwstats */
spinlock_t hw_stats_lock;
+
+ int cpts_registered;
+ struct cpts *cpts;
};
struct gbe_intf {
@@ -1677,6 +1747,17 @@ static void keystone_set_msglevel(struct net_device *ndev, u32 value)
netcp->msg_enable = value;
}
+static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
+{
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+
+ return gbe_intf;
+}
+
static void keystone_get_stat_strings(struct net_device *ndev,
uint32_t stringset, uint8_t *data)
{
@@ -1685,7 +1766,7 @@ static void keystone_get_stat_strings(struct net_device *ndev,
struct gbe_priv *gbe_dev;
int i;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return;
gbe_dev = gbe_intf->gbe_dev;
@@ -1709,7 +1790,7 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
struct gbe_intf *gbe_intf;
struct gbe_priv *gbe_dev;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
gbe_dev = gbe_intf->gbe_dev;
@@ -1827,7 +1908,7 @@ static void keystone_get_ethtool_stats(struct net_device *ndev,
struct gbe_intf *gbe_intf;
struct gbe_priv *gbe_dev;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return;
@@ -1840,8 +1921,8 @@ static void keystone_get_ethtool_stats(struct net_device *ndev,
spin_unlock_bh(&gbe_dev->hw_stats_lock);
}
-static int keystone_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
+static int keystone_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct phy_device *phy = ndev->phydev;
@@ -1851,59 +1932,110 @@ static int keystone_get_settings(struct net_device *ndev,
if (!phy)
return -EINVAL;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
if (!gbe_intf->slave)
return -EINVAL;
- ret = phy_ethtool_gset(phy, cmd);
+ ret = phy_ethtool_ksettings_get(phy, cmd);
if (!ret)
- cmd->port = gbe_intf->slave->phy_port_t;
+ cmd->base.port = gbe_intf->slave->phy_port_t;
return ret;
}
-static int keystone_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
+static int keystone_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct phy_device *phy = ndev->phydev;
struct gbe_intf *gbe_intf;
- u32 features = cmd->advertising & cmd->supported;
+ u8 port = cmd->base.port;
+ u32 advertising, supported;
+ u32 features;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ features = advertising & supported;
if (!phy)
return -EINVAL;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
if (!gbe_intf->slave)
return -EINVAL;
- if (cmd->port != gbe_intf->slave->phy_port_t) {
- if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
+ if (port != gbe_intf->slave->phy_port_t) {
+ if ((port == PORT_TP) && !(features & ADVERTISED_TP))
return -EINVAL;
- if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
+ if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
return -EINVAL;
- if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
+ if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
return -EINVAL;
- if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
+ if ((port == PORT_MII) && !(features & ADVERTISED_MII))
return -EINVAL;
- if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
+ if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
return -EINVAL;
}
- gbe_intf->slave->phy_port_t = cmd->port;
- return phy_ethtool_sset(phy, cmd);
+ gbe_intf->slave->phy_port_t = port;
+ return phy_ethtool_ksettings_set(phy, cmd);
}
+#if IS_ENABLED(CONFIG_TI_CPTS)
+static int keystone_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
+ return -EINVAL;
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ return 0;
+}
+#else
+static int keystone_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = 0;
+ info->rx_filters = 0;
+ return 0;
+}
+#endif /* CONFIG_TI_CPTS */
+
static const struct ethtool_ops keystone_ethtool_ops = {
.get_drvinfo = keystone_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1912,8 +2044,9 @@ static const struct ethtool_ops keystone_ethtool_ops = {
.get_strings = keystone_get_stat_strings,
.get_sset_count = keystone_get_sset_count,
.get_ethtool_stats = keystone_get_ethtool_stats,
- .get_settings = keystone_get_settings,
- .set_settings = keystone_set_settings,
+ .get_link_ksettings = keystone_get_link_ksettings,
+ .set_link_ksettings = keystone_set_link_ksettings,
+ .get_ts_info = keystone_get_ts_info,
};
#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
@@ -2190,7 +2323,7 @@ static void gbe_init_host_port(struct gbe_priv *priv)
int bypass_en = 1;
/* Host Tx Pri */
- if (IS_SS_ID_NU(priv))
+ if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
writel(HOST_TX_PRI_MAP_DEFAULT,
GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
@@ -2357,16 +2490,279 @@ static int gbe_del_vid(void *intf_priv, int vid)
return 0;
}
+#if IS_ENABLED(CONFIG_TI_CPTS)
+#define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
+#define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
+
+static void gbe_txtstamp(void *context, struct sk_buff *skb)
+{
+ struct gbe_intf *gbe_intf = context;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ cpts_tx_timestamp(gbe_dev->cpts, skb);
+}
+
+static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
+ const struct netcp_packet *p_info)
+{
+ struct sk_buff *skb = p_info->skb;
+ unsigned int class = ptp_classify_raw(skb);
+
+ if (class == PTP_CLASS_NONE)
+ return false;
+
+ switch (class) {
+ case PTP_CLASS_V1_IPV4:
+ case PTP_CLASS_V1_IPV6:
+ case PTP_CLASS_V2_IPV4:
+ case PTP_CLASS_V2_IPV6:
+ case PTP_CLASS_V2_L2:
+ case (PTP_CLASS_V2_VLAN | PTP_CLASS_L2):
+ case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV4):
+ case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV6):
+ return true;
+ }
+
+ return false;
+}
+
+static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
+ struct netcp_packet *p_info)
+{
+ struct phy_device *phydev = p_info->skb->dev->phydev;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ !cpts_is_tx_enabled(gbe_dev->cpts))
+ return 0;
+
+ /* If phy has the txtstamp api, assume it will do it.
+ * We mark it here because skb_tx_timestamp() is called
+ * after all the txhooks are called.
+ */
+ if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
+ skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ return 0;
+ }
+
+ if (gbe_need_txtstamp(gbe_intf, p_info)) {
+ p_info->txtstamp = gbe_txtstamp;
+ p_info->ts_context = (void *)gbe_intf;
+ skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
+ return 0;
+}
+
+static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
+{
+ struct phy_device *phydev = p_info->skb->dev->phydev;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ if (p_info->rxtstamp_complete)
+ return 0;
+
+ if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
+ p_info->rxtstamp_complete = true;
+ return 0;
+ }
+
+ cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+ p_info->rxtstamp_complete = true;
+
+ return 0;
+}
+
+static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct cpts *cpts = gbe_dev->cpts;
+ struct hwtstamp_config cfg;
+
+ if (!cpts)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = cpts_is_tx_enabled(cpts) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
+ cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct gbe_slave *slave = gbe_intf->slave;
+ u32 ts_en, seq_id, ctl;
+
+ if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
+ !cpts_is_tx_enabled(gbe_dev->cpts)) {
+ writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
+ return;
+ }
+
+ seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
+ ctl = ETH_P_1588 | TS_TTL_NONZERO |
+ (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
+ (slave->ts_ctl.uni ? TS_UNI_EN :
+ slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
+
+ if (cpts_is_tx_enabled(gbe_dev->cpts))
+ ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
+
+ if (cpts_is_rx_enabled(gbe_dev->cpts))
+ ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
+
+ writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
+ writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
+ writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
+}
+
+static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct cpts *cpts = gbe_dev->cpts;
+ struct hwtstamp_config cfg;
+
+ if (!cpts)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ cpts_tx_enable(cpts, 0);
+ break;
+ case HWTSTAMP_TX_ON:
+ cpts_tx_enable(cpts, 1);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ cpts_rx_enable(cpts, 0);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ gbe_hwtstamp(gbe_intf);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void gbe_register_cpts(struct gbe_priv *gbe_dev)
+{
+ if (!gbe_dev->cpts)
+ return;
+
+ if (gbe_dev->cpts_registered > 0)
+ goto done;
+
+ if (cpts_register(gbe_dev->cpts)) {
+ dev_err(gbe_dev->dev, "error registering cpts device\n");
+ return;
+ }
+
+done:
+ ++gbe_dev->cpts_registered;
+}
+
+static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
+{
+ if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
+ return;
+
+ if (--gbe_dev->cpts_registered)
+ return;
+
+ cpts_unregister(gbe_dev->cpts);
+}
+#else
+static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
+ struct netcp_packet *p_info)
+{
+ return 0;
+}
+
+static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
+ struct netcp_packet *p_info)
+{
+ return 0;
+}
+
+static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
+ struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
+{
+}
+
+static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
+{
+}
+
+static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_TI_CPTS */
+
static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
{
struct gbe_intf *gbe_intf = intf_priv;
struct phy_device *phy = gbe_intf->slave->phy;
- int ret = -EOPNOTSUPP;
+
+ if (!phy || !phy->drv->hwtstamp) {
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return gbe_hwtstamp_get(gbe_intf, req);
+ case SIOCSHWTSTAMP:
+ return gbe_hwtstamp_set(gbe_intf, req);
+ }
+ }
if (phy)
- ret = phy_mii_ioctl(phy, req, cmd);
+ return phy_mii_ioctl(phy, req, cmd);
- return ret;
+ return -EOPNOTSUPP;
}
static void netcp_ethss_timer(unsigned long arg)
@@ -2402,12 +2798,20 @@ static void netcp_ethss_timer(unsigned long arg)
add_timer(&gbe_dev->timer);
}
-static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
+static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
{
struct gbe_intf *gbe_intf = data;
p_info->tx_pipe = &gbe_intf->tx_pipe;
- return 0;
+
+ return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
+}
+
+static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
+{
+ struct gbe_intf *gbe_intf = data;
+
+ return gbe_rxtstamp(gbe_intf, p_info);
}
static int gbe_open(void *intf_priv, struct net_device *ndev)
@@ -2457,11 +2861,14 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
if (ret)
goto fail;
- netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
- gbe_intf);
+ netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
+ netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
slave->open = true;
netcp_ethss_update_link_state(gbe_dev, slave, ndev);
+
+ gbe_register_cpts(gbe_dev);
+
return 0;
fail:
@@ -2473,16 +2880,36 @@ static int gbe_close(void *intf_priv, struct net_device *ndev)
{
struct gbe_intf *gbe_intf = intf_priv;
struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ gbe_unregister_cpts(gbe_dev);
gbe_slave_stop(gbe_intf);
- netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
- gbe_intf);
+
+ netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
+ netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
gbe_intf->slave->open = false;
atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
return 0;
}
+#if IS_ENABLED(CONFIG_TI_CPTS)
+static void init_slave_ts_ctl(struct gbe_slave *slave)
+{
+ slave->ts_ctl.uni = 1;
+ slave->ts_ctl.dst_port_map =
+ (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
+ slave->ts_ctl.maddr_map =
+ (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
+}
+
+#else
+static void init_slave_ts_ctl(struct gbe_slave *slave)
+{
+}
+#endif /* CONFIG_TI_CPTS */
+
static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
struct device_node *node)
{
@@ -2597,6 +3024,8 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
}
atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
+
+ init_slave_ts_ctl(slave);
return 0;
}
@@ -2787,6 +3216,7 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
+ gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
@@ -2909,6 +3339,7 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
}
+ gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBE13_HOST_PORT_NUM;
@@ -2998,6 +3429,7 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
+ gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBENU_HOST_PORT_NUM;
@@ -3179,6 +3611,12 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
}
+ gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
+ if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
+ ret = PTR_ERR(gbe_dev->cpts);
+ goto free_sec_ports;
+ }
+
/* initialize host port */
gbe_init_host_port(gbe_dev);
@@ -3267,6 +3705,7 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
struct gbe_priv *gbe_dev = inst_priv;
del_timer_sync(&gbe_dev->timer);
+ cpts_release(gbe_dev->cpts);
cpsw_ale_stop(gbe_dev->ale);
cpsw_ale_destroy(gbe_dev->ale);
netcp_txpipe_close(&gbe_dev->tx_pipe);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 6c7ec1ddd475..c8d53d8c83ee 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -772,7 +772,6 @@ static const struct net_device_ops tlan_netdev_ops = {
.ndo_get_stats = tlan_get_stats,
.ndo_set_rx_mode = tlan_set_multicast_list,
.ndo_do_ioctl = tlan_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index f59a6c265331..bdfeaf3d4fce 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -9,7 +9,7 @@ config TILE_NET
select CRC32
select TILE_GXIO_MPIPE if TILEGX
select HIGH_RES_TIMERS if TILEGX
- select PTP_1588_CLOCK if TILEGX
+ imply PTP_1588_CLOCK if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 11213a38c795..0aaf975bb347 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -59,6 +59,9 @@
/* Maximum number of packets to handle per "poll". */
#define TILE_NET_WEIGHT 64
+/* Maximum Jumbo Packet MTU */
+#define TILE_JUMBO_MAX_MTU 9000
+
/* Number of entries in each iqueue. */
#define IQUEUE_ENTRIES 512
@@ -2101,17 +2104,6 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EOPNOTSUPP;
}
-/* Change the MTU. */
-static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68)
- return -EINVAL;
- if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
/* Change the Ethernet address of the NIC.
*
* The hypervisor driver does not support changing MAC address. However,
@@ -2154,7 +2146,6 @@ static const struct net_device_ops tile_net_ops = {
.ndo_start_xmit = tile_net_tx,
.ndo_select_queue = tile_net_select_queue,
.ndo_do_ioctl = tile_net_ioctl,
- .ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2174,7 +2165,11 @@ static void tile_net_setup(struct net_device *dev)
ether_setup(dev);
dev->netdev_ops = &tile_net_ops;
dev->watchdog_timeo = TILE_NET_TIMEOUT;
- dev->mtu = 1500;
+
+ /* MTU range: 68 - 1500 or 9000 */
+ dev->mtu = ETH_DATA_LEN;
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = jumbo_num ? TILE_JUMBO_MAX_MTU : ETH_DATA_LEN;
features |= NETIF_F_HW_CSUM;
features |= NETIF_F_SG;
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 4ef605a90247..0a3b7dafa3ba 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -87,7 +87,7 @@
/* This should be 1500 if "jumbo" is not set in LIPP. */
/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
/* ISSUE: This has not been thoroughly tested (except at 1500). */
-#define TILE_NET_MTU 1500
+#define TILE_NET_MTU ETH_DATA_LEN
/* HACK: Define this to verify incoming packets. */
/* #define TILE_NET_VERIFY_INGRESS */
@@ -2095,26 +2095,6 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
}
-/*
- * Change the "mtu".
- *
- * The "change_mtu" method is usually not needed.
- * If you need it, it must be like this.
- */
-static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
-{
- PDEBUG("tile_net_change_mtu()\n");
-
- /* Check ranges. */
- if ((new_mtu < 68) || (new_mtu > 1500))
- return -EINVAL;
-
- /* Accept the value. */
- dev->mtu = new_mtu;
-
- return 0;
-}
-
/*
* Change the Ethernet Address of the NIC.
@@ -2229,7 +2209,6 @@ static const struct net_device_ops tile_net_ops = {
.ndo_start_xmit = tile_net_tx,
.ndo_do_ioctl = tile_net_ioctl,
.ndo_get_stats64 = tile_net_get_stats64,
- .ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2252,7 +2231,11 @@ static void tile_net_setup(struct net_device *dev)
dev->netdev_ops = &tile_net_ops;
dev->watchdog_timeo = TILE_NET_TIMEOUT;
dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+
+ /* MTU range: 68 - 1500 */
dev->mtu = TILE_NET_MTU;
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = TILE_NET_MTU;
features |= NETIF_F_HW_CSUM;
features |= NETIF_F_SG;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 272f2b1cb7ad..345316c749e7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1114,24 +1114,6 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
}
return packets_done;
}
-/**
- * gelic_net_change_mtu - changes the MTU of an interface
- * @netdev: interface device structure
- * @new_mtu: new MTU value
- *
- * returns 0 on success, <0 on failure
- */
-int gelic_net_change_mtu(struct net_device *netdev, int new_mtu)
-{
- /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
- * and mtu is outbound only anyway */
- if ((new_mtu < GELIC_NET_MIN_MTU) ||
- (new_mtu > GELIC_NET_MAX_MTU)) {
- return -EINVAL;
- }
- netdev->mtu = new_mtu;
- return 0;
-}
/**
* gelic_card_interrupt - event handler for gelic_net
@@ -1446,7 +1428,6 @@ static const struct net_device_ops gelic_netdevice_ops = {
.ndo_stop = gelic_net_stop,
.ndo_start_xmit = gelic_net_xmit,
.ndo_set_rx_mode = gelic_net_set_multi,
- .ndo_change_mtu = gelic_net_change_mtu,
.ndo_tx_timeout = gelic_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1513,6 +1494,10 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
netdev->features |= NETIF_F_VLAN_CHALLENGED;
}
+ /* MTU range: 64 - 1518 */
+ netdev->min_mtu = GELIC_NET_MIN_MTU;
+ netdev->max_mtu = GELIC_NET_MAX_MTU;
+
status = register_netdev(netdev);
if (status) {
dev_err(ctodev(card), "%s:Couldn't register %s %d\n",
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 8505196be9f5..003d0452d9cb 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -373,7 +373,6 @@ int gelic_net_stop(struct net_device *netdev);
int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
void gelic_net_set_multi(struct net_device *netdev);
void gelic_net_tx_timeout(struct net_device *netdev);
-int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 928c1dca2673..eed18f88bdff 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -2558,7 +2558,6 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
.ndo_stop = gelic_wl_stop,
.ndo_start_xmit = gelic_net_xmit,
.ndo_set_rx_mode = gelic_net_set_multi,
- .ndo_change_mtu = gelic_net_change_mtu,
.ndo_tx_timeout = gelic_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 36a6e8b54d94..cb341dfe65ad 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1279,25 +1279,6 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
}
/**
- * spider_net_change_mtu - changes the MTU of an interface
- * @netdev: interface device structure
- * @new_mtu: new MTU value
- *
- * returns 0 on success, <0 on failure
- */
-static int
-spider_net_change_mtu(struct net_device *netdev, int new_mtu)
-{
- /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
- * and mtu is outbound only anyway */
- if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
- (new_mtu > SPIDER_NET_MAX_MTU) )
- return -EINVAL;
- netdev->mtu = new_mtu;
- return 0;
-}
-
-/**
* spider_net_set_mac - sets the MAC of an interface
* @netdev: interface device structure
* @ptr: pointer to new MAC address
@@ -2229,7 +2210,6 @@ static const struct net_device_ops spider_net_ops = {
.ndo_start_xmit = spider_net_xmit,
.ndo_set_rx_mode = spider_net_set_multi,
.ndo_set_mac_address = spider_net_set_mac,
- .ndo_change_mtu = spider_net_change_mtu,
.ndo_do_ioctl = spider_net_do_ioctl,
.ndo_tx_timeout = spider_net_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@@ -2299,6 +2279,10 @@ spider_net_setup_netdev(struct spider_net_card *card)
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
* NETIF_F_HW_VLAN_CTAG_FILTER */
+ /* MTU range: 64 - 2294 */
+ netdev->min_mtu = SPIDER_NET_MIN_MTU;
+ netdev->max_mtu = SPIDER_NET_MAX_MTU;
+
netdev->irq = card->pdev->irq;
card->num_rx_ints = 0;
card->ignore_rx_ramfull = 0;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 5b01b3fa9fec..3be61ed28741 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -747,7 +747,6 @@ static const struct net_device_ops tc35815_netdev_ops = {
.ndo_tx_timeout = tc35815_tx_timeout,
.ndo_do_ioctl = tc35815_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tc35815_poll_controller,
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 8fd131207ee1..f153ad729ce5 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1548,7 +1548,6 @@ static const struct net_device_ops tsi108_netdev_ops = {
.ndo_do_ioctl = tsi108_do_ioctl,
.ndo_set_mac_address = tsi108_set_mac,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 9d14731cdcb1..ba5c54249055 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -890,7 +890,6 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_start_xmit = rhine_start_tx,
.ndo_get_stats64 = rhine_get_stats64,
.ndo_set_rx_mode = rhine_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 908e72e18ef7..4716e60e2ccb 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2284,13 +2284,6 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
struct velocity_info *vptr = netdev_priv(dev);
int ret = 0;
- if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
- VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
- vptr->netdev->name);
- ret = -EINVAL;
- goto out_0;
- }
-
if (!netif_running(dev)) {
dev->mtu = new_mtu;
goto out_0;
@@ -2864,6 +2857,10 @@ static int velocity_probe(struct device *dev, int irq,
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_IP_CSUM;
+ /* MTU range: 64 - 9000 */
+ netdev->min_mtu = VELOCITY_MIN_MTU;
+ netdev->max_mtu = VELOCITY_MAX_MTU;
+
ret = register_netdev(netdev);
if (ret < 0)
goto err_iounmap;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index d2349a1bc6ba..e1296ef2cf66 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1045,7 +1045,6 @@ static const struct net_device_ops w5100_netdev_ops = {
.ndo_set_rx_mode = w5100_set_rx_mode,
.ndo_set_mac_address = w5100_set_macaddr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int w5100_mmio_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index ca31a57dbc86..724fabd38a23 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -536,7 +536,6 @@ static const struct net_device_ops w5300_netdev_ops = {
.ndo_set_rx_mode = w5300_set_rx_mode,
.ndo_set_mac_address = w5300_set_macaddr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static int w5300_hw_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a9bd665fd122..d73da8afe08e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -37,6 +37,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
@@ -332,7 +333,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
mutex_unlock(&lp->indirect_mutex);
}
-static int temac_init_mac_address(struct net_device *ndev, void *address)
+static int temac_init_mac_address(struct net_device *ndev, const void *address)
{
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
@@ -967,13 +968,8 @@ static const struct attribute_group temac_attr_group = {
};
/* ethtool support */
-static int temac_nway_reset(struct net_device *ndev)
-{
- return phy_start_aneg(ndev->phydev);
-}
-
static const struct ethtool_ops temac_ethtool_ops = {
- .nway_reset = temac_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -987,7 +983,7 @@ static int temac_of_probe(struct platform_device *op)
struct net_device *ndev;
const void *addr;
__be32 *p;
- int size, rc = 0;
+ int rc = 0;
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
@@ -1079,13 +1075,13 @@ static int temac_of_probe(struct platform_device *op)
/* Retrieve the MAC address */
- addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
- if ((!addr) || (size != 6)) {
+ addr = of_get_mac_address(op->dev.of_node);
+ if (!addr) {
dev_err(&op->dev, "could not find MAC address\n");
rc = -ENODEV;
goto err_iounmap_2;
}
- temac_init_mac_address(ndev, (void *)addr);
+ temac_init_mac_address(ndev, addr);
rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c688d68c39aa..b96e96919e31 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
@@ -292,7 +293,8 @@ out:
* This function is called to initialize the MAC address of the Axi Ethernet
* core. It writes to the UAW0 and UAW1 registers of the core.
*/
-static void axienet_set_mac_address(struct net_device *ndev, void *address)
+static void axienet_set_mac_address(struct net_device *ndev,
+ const void *address)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1034,9 +1036,6 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
XAE_TRL_SIZE) > lp->rxmem)
return -EINVAL;
- if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
- return -EINVAL;
-
ndev->mtu = new_mtu;
return 0;
@@ -1459,7 +1458,7 @@ static int axienet_probe(struct platform_device *pdev)
struct device_node *np;
struct axienet_local *lp;
struct net_device *ndev;
- u8 mac_addr[6];
+ const void *mac_addr;
struct resource *ethres, dmares;
u32 value;
@@ -1475,6 +1474,10 @@ static int axienet_probe(struct platform_device *pdev)
ndev->netdev_ops = &axienet_netdev_ops;
ndev->ethtool_ops = &axienet_ethtool_ops;
+ /* MTU range: 64 - 9000 */
+ ndev->min_mtu = 64;
+ ndev->max_mtu = XAE_JUMBO_MTU;
+
lp = netdev_priv(ndev);
lp->ndev = ndev;
lp->dev = &pdev->dev;
@@ -1566,13 +1569,12 @@ static int axienet_probe(struct platform_device *pdev)
}
/* Retrieve the MAC address */
- ret = of_property_read_u8_array(pdev->dev.of_node,
- "local-mac-address", mac_addr, 6);
- if (ret) {
+ mac_addr = of_get_mac_address(pdev->dev.of_node);
+ if (!mac_addr) {
dev_err(&pdev->dev, "could not find MAC address\n");
goto free_netdev;
}
- axienet_set_mac_address(ndev, (void *)mac_addr);
+ axienet_set_mac_address(ndev, mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index ddced28e8247..3b08ec766076 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -466,7 +466,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_config = do_config,
.ndo_do_ioctl = do_ioctl,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index fa32391720fe..aee55c03def0 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1001,11 +1001,6 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
-static int ixp4xx_nway_reset(struct net_device *dev)
-{
- return phy_start_aneg(dev->phydev);
-}
-
int ixp46x_phc_index = -1;
EXPORT_SYMBOL_GPL(ixp46x_phc_index);
@@ -1037,7 +1032,7 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
static const struct ethtool_ops ixp4xx_ethtool_ops = {
.get_drvinfo = ixp4xx_get_drvinfo,
- .nway_reset = ixp4xx_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ixp4xx_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -1378,7 +1373,6 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
.ndo_do_ioctl = eth_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c
index e26398b5a7dc..d0a68bdd5f63 100644
--- a/drivers/net/fddi/skfp/hwmtm.c
+++ b/drivers/net/fddi/skfp/hwmtm.c
@@ -1483,7 +1483,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
r = queue->rx_curr_get ;
while (queue->rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
- DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
+ DB_RX("switch OWN bit of RxD 0x%p ",r,0,5) ;
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
frag_count = 1 ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
@@ -1645,7 +1645,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
if (frame_status & LAN_TX) {
/* '*t' is already defined */
- DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
+ DB_TX("LAN_TX: TxD = %p, virt = %p ",t,virt,3) ;
t->txd_virt = virt ;
t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
t->txd_tbadr = cpu_to_le32(phys) ;
@@ -1819,7 +1819,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
__le32 tbctrl;
NDD_TRACE("THSB",mb,fc,0) ;
- DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
+ DB_TX("smt_send_mbuf: mb = 0x%p, fc = 0x%x",mb,fc,4) ;
mb->sm_off-- ; /* set to fc */
mb->sm_len++ ; /* + fc */
@@ -1960,7 +1960,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
do {
DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
- DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
+ DB_TX("check OWN/EOF bit of TxD 0x%p",t1,0,5) ;
tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
if (tbctrl & BMU_OWN || !queue->tx_used){
@@ -1988,7 +1988,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
}
else {
#ifndef PASS_1ST_TXD_2_TX_COMP
- DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ;
+ DB_TX("mac_drv_tx_comp for TxD 0x%p",t2,0,4) ;
mac_drv_tx_complete(smc,t2) ;
#else
DB_TX("mac_drv_tx_comp for TxD 0x%x",
@@ -2052,7 +2052,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
tx_used = queue->tx_used ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
- DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
+ DB_TX("switch OWN bit of TxD 0x%p ",t,0,5) ;
t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 441b4dc79450..52fa162a31e0 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -284,7 +284,7 @@ void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local)
SMbuf *reply ;
sm = smtod(mb,struct smt_header *) ;
- DB_SMT("SMT: processing PMF frame at %x len %d\n",sm,mb->sm_len) ;
+ DB_SMT("SMT: processing PMF frame at %p len %d\n",sm,mb->sm_len) ;
#ifdef DEBUG
dump_smt(smc,sm,"PMF Received") ;
#endif
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 51acc6d86e91..3a639180e4a0 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -166,7 +166,6 @@ static const struct net_device_ops skfp_netdev_ops = {
.ndo_stop = skfp_close,
.ndo_start_xmit = skfp_send_pkt,
.ndo_get_stats = skfp_ctl_get_stats,
- .ndo_change_mtu = fddi_change_mtu,
.ndo_set_rx_mode = skfp_ctl_set_multicast_list,
.ndo_set_mac_address = skfp_ctl_set_mac_address,
.ndo_do_ioctl = skfp_ioctl,
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index cd78b7cacc75..e80a08903fcf 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -504,7 +504,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
#endif
smt_swap_para(sm,(int) mb->sm_len,1) ;
- DB_SMT("SMT : received packet [%s] at 0x%x\n",
+ DB_SMT("SMT : received packet [%s] at 0x%p\n",
smt_type_name[m_fc(mb) & 0xf],sm) ;
DB_SMT("SMT : version %d, class %s\n",sm->smt_version,
smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ;
diff --git a/drivers/net/fjes/Makefile b/drivers/net/fjes/Makefile
index 523e3d7cf7aa..bc47b354c104 100644
--- a/drivers/net/fjes/Makefile
+++ b/drivers/net/fjes/Makefile
@@ -27,4 +27,4 @@
obj-$(CONFIG_FUJITSU_ES) += fjes.o
-fjes-objs := fjes_main.o fjes_hw.o fjes_ethtool.o
+fjes-objs := fjes_main.o fjes_hw.o fjes_ethtool.o fjes_trace.o fjes_debugfs.o
diff --git a/drivers/net/fjes/fjes.h b/drivers/net/fjes/fjes.h
index a592fe21c698..0372be3ad8e3 100644
--- a/drivers/net/fjes/fjes.h
+++ b/drivers/net/fjes/fjes.h
@@ -66,6 +66,10 @@ struct fjes_adapter {
bool interrupt_watch_enable;
struct fjes_hw hw;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbg_adapter;
+#endif
};
extern char fjes_driver_name[];
@@ -74,4 +78,16 @@ extern const u32 fjes_support_mtu[];
void fjes_set_ethtool_ops(struct net_device *);
+#ifdef CONFIG_DEBUG_FS
+void fjes_dbg_adapter_init(struct fjes_adapter *adapter);
+void fjes_dbg_adapter_exit(struct fjes_adapter *adapter);
+void fjes_dbg_init(void);
+void fjes_dbg_exit(void);
+#else
+static inline void fjes_dbg_adapter_init(struct fjes_adapter *adapter) {}
+static inline void fjes_dbg_adapter_exit(struct fjes_adapter *adapter) {}
+static inline void fjes_dbg_init(void) {}
+static inline void fjes_dbg_exit(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
#endif /* FJES_H_ */
diff --git a/drivers/net/fjes/fjes_debugfs.c b/drivers/net/fjes/fjes_debugfs.c
new file mode 100644
index 000000000000..30052ebd52bf
--- /dev/null
+++ b/drivers/net/fjes/fjes_debugfs.c
@@ -0,0 +1,117 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015-2016 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/* debugfs support for fjes driver */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+
+#include "fjes.h"
+
+static struct dentry *fjes_debug_root;
+
+static const char * const ep_status_string[] = {
+ "unshared",
+ "shared",
+ "waiting",
+ "complete",
+};
+
+static int fjes_dbg_status_show(struct seq_file *m, void *v)
+{
+ struct fjes_adapter *adapter = m->private;
+ struct fjes_hw *hw = &adapter->hw;
+ int max_epid = hw->max_epid;
+ int my_epid = hw->my_epid;
+ int epidx;
+
+ seq_puts(m, "EPID\tSTATUS SAME_ZONE CONNECTED\n");
+ for (epidx = 0; epidx < max_epid; epidx++) {
+ if (epidx == my_epid) {
+ seq_printf(m, "ep%d\t%-16c %-16c %-16c\n",
+ epidx, '-', '-', '-');
+ } else {
+ seq_printf(m, "ep%d\t%-16s %-16c %-16c\n",
+ epidx,
+ ep_status_string[fjes_hw_get_partner_ep_status(hw, epidx)],
+ fjes_hw_epid_is_same_zone(hw, epidx) ? 'Y' : 'N',
+ fjes_hw_epid_is_shared(hw->hw_info.share, epidx) ? 'Y' : 'N');
+ }
+ }
+
+ return 0;
+}
+
+static int fjes_dbg_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fjes_dbg_status_show, inode->i_private);
+}
+
+static const struct file_operations fjes_dbg_status_fops = {
+ .owner = THIS_MODULE,
+ .open = fjes_dbg_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void fjes_dbg_adapter_init(struct fjes_adapter *adapter)
+{
+ const char *name = dev_name(&adapter->plat_dev->dev);
+ struct dentry *pfile;
+
+ adapter->dbg_adapter = debugfs_create_dir(name, fjes_debug_root);
+ if (!adapter->dbg_adapter) {
+ dev_err(&adapter->plat_dev->dev,
+ "debugfs entry for %s failed\n", name);
+ return;
+ }
+
+ pfile = debugfs_create_file("status", 0444, adapter->dbg_adapter,
+ adapter, &fjes_dbg_status_fops);
+ if (!pfile)
+ dev_err(&adapter->plat_dev->dev,
+ "debugfs status for %s failed\n", name);
+}
+
+void fjes_dbg_adapter_exit(struct fjes_adapter *adapter)
+{
+ debugfs_remove_recursive(adapter->dbg_adapter);
+ adapter->dbg_adapter = NULL;
+}
+
+void fjes_dbg_init(void)
+{
+ fjes_debug_root = debugfs_create_dir(fjes_driver_name, NULL);
+ if (!fjes_debug_root)
+ pr_info("init of debugfs failed\n");
+}
+
+void fjes_dbg_exit(void)
+{
+ debugfs_remove_recursive(fjes_debug_root);
+ fjes_debug_root = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 9c218e140c41..6575f880f1be 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -49,10 +49,18 @@ static const struct fjes_stats fjes_gstrings_stats[] = {
FJES_STAT("tx_dropped", stats64.tx_dropped),
};
+#define FJES_EP_STATS_LEN 14
+#define FJES_STATS_LEN \
+ (ARRAY_SIZE(fjes_gstrings_stats) + \
+ ((&((struct fjes_adapter *)netdev_priv(netdev))->hw)->max_epid - 1) * \
+ FJES_EP_STATS_LEN)
+
static void fjes_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+ int epidx;
char *p;
int i;
@@ -61,11 +69,39 @@ static void fjes_get_ethtool_stats(struct net_device *netdev,
data[i] = (fjes_gstrings_stats[i].sizeof_stat == sizeof(u64))
? *(u64 *)p : *(u32 *)p;
}
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .com_regist_buf_exec;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .com_unregist_buf_exec;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats.send_intr_rx;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats.send_intr_unshare;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .send_intr_zoneupdate;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats.recv_intr_rx;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats.recv_intr_unshare;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats.recv_intr_stop;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .recv_intr_zoneupdate;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats.tx_buffer_full;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .tx_dropped_not_shared;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .tx_dropped_ver_mismatch;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .tx_dropped_buf_size_mismatch;
+ data[i++] = hw->ep_shm_info[epidx].ep_stats
+ .tx_dropped_vlanid_mismatch;
+ }
}
static void fjes_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
u8 *p = data;
int i;
@@ -76,6 +112,38 @@ static void fjes_get_strings(struct net_device *netdev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < hw->max_epid; i++) {
+ if (i == hw->my_epid)
+ continue;
+ sprintf(p, "ep%u_com_regist_buf_exec", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_com_unregist_buf_exec", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_send_intr_rx", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_send_intr_unshare", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_send_intr_zoneupdate", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_recv_intr_rx", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_recv_intr_unshare", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_recv_intr_stop", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_recv_intr_zoneupdate", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_tx_buffer_full", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_tx_dropped_not_shared", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_tx_dropped_ver_mismatch", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_tx_dropped_buf_size_mismatch", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "ep%u_tx_dropped_vlanid_mismatch", i);
+ p += ETH_GSTRING_LEN;
+ }
break;
}
}
@@ -84,7 +152,7 @@ static int fjes_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(fjes_gstrings_stats);
+ return FJES_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -121,12 +189,123 @@ static int fjes_get_settings(struct net_device *netdev,
return 0;
}
+static int fjes_get_regs_len(struct net_device *netdev)
+{
+#define FJES_REGS_LEN 37
+ return FJES_REGS_LEN * sizeof(u32);
+}
+
+static void fjes_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+
+ memset(p, 0, FJES_REGS_LEN * sizeof(u32));
+
+ regs->version = 1;
+
+ /* Information registers */
+ regs_buff[0] = rd32(XSCT_OWNER_EPID);
+ regs_buff[1] = rd32(XSCT_MAX_EP);
+
+ /* Device Control registers */
+ regs_buff[4] = rd32(XSCT_DCTL);
+
+ /* Command Control registers */
+ regs_buff[8] = rd32(XSCT_CR);
+ regs_buff[9] = rd32(XSCT_CS);
+ regs_buff[10] = rd32(XSCT_SHSTSAL);
+ regs_buff[11] = rd32(XSCT_SHSTSAH);
+
+ regs_buff[13] = rd32(XSCT_REQBL);
+ regs_buff[14] = rd32(XSCT_REQBAL);
+ regs_buff[15] = rd32(XSCT_REQBAH);
+
+ regs_buff[17] = rd32(XSCT_RESPBL);
+ regs_buff[18] = rd32(XSCT_RESPBAL);
+ regs_buff[19] = rd32(XSCT_RESPBAH);
+
+ /* Interrupt Control registers */
+ regs_buff[32] = rd32(XSCT_IS);
+ regs_buff[33] = rd32(XSCT_IMS);
+ regs_buff[34] = rd32(XSCT_IMC);
+ regs_buff[35] = rd32(XSCT_IG);
+ regs_buff[36] = rd32(XSCT_ICTL);
+}
+
+static int fjes_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+ int ret = 0;
+
+ if (dump->flag) {
+ if (hw->debug_mode)
+ return -EPERM;
+
+ hw->debug_mode = dump->flag;
+
+ /* enable debug mode */
+ mutex_lock(&hw->hw_info.lock);
+ ret = fjes_hw_start_debug(hw);
+ mutex_unlock(&hw->hw_info.lock);
+
+ if (ret)
+ hw->debug_mode = 0;
+ } else {
+ if (!hw->debug_mode)
+ return -EPERM;
+
+ /* disable debug mode */
+ mutex_lock(&hw->hw_info.lock);
+ ret = fjes_hw_stop_debug(hw);
+ mutex_unlock(&hw->hw_info.lock);
+ }
+
+ return ret;
+}
+
+static int fjes_get_dump_flag(struct net_device *netdev,
+ struct ethtool_dump *dump)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+
+ dump->len = hw->hw_info.trace_size;
+ dump->version = 1;
+ dump->flag = hw->debug_mode;
+
+ return 0;
+}
+
+static int fjes_get_dump_data(struct net_device *netdev,
+ struct ethtool_dump *dump, void *buf)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+ int ret = 0;
+
+ if (hw->hw_info.trace)
+ memcpy(buf, hw->hw_info.trace, hw->hw_info.trace_size);
+ else
+ ret = -EPERM;
+
+ return ret;
+}
+
static const struct ethtool_ops fjes_ethtool_ops = {
.get_settings = fjes_get_settings,
.get_drvinfo = fjes_get_drvinfo,
.get_ethtool_stats = fjes_get_ethtool_stats,
.get_strings = fjes_get_strings,
.get_sset_count = fjes_get_sset_count,
+ .get_regs = fjes_get_regs,
+ .get_regs_len = fjes_get_regs_len,
+ .set_dump = fjes_set_dump,
+ .get_dump_flag = fjes_get_dump_flag,
+ .get_dump_data = fjes_get_dump_data,
};
void fjes_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 0dbafedc0a34..9c652c04375b 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -21,6 +21,7 @@
#include "fjes_hw.h"
#include "fjes.h"
+#include "fjes_trace.h"
static void fjes_hw_update_zone_task(struct work_struct *);
static void fjes_hw_epstop_task(struct work_struct *);
@@ -342,6 +343,9 @@ int fjes_hw_init(struct fjes_hw *hw)
ret = fjes_hw_setup(hw);
+ hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE);
+ hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
+
return ret;
}
@@ -350,6 +354,18 @@ void fjes_hw_exit(struct fjes_hw *hw)
int ret;
if (hw->base) {
+
+ if (hw->debug_mode) {
+ /* disable debug mode */
+ mutex_lock(&hw->hw_info.lock);
+ fjes_hw_stop_debug(hw);
+ mutex_unlock(&hw->hw_info.lock);
+ }
+ vfree(hw->hw_info.trace);
+ hw->hw_info.trace = NULL;
+ hw->hw_info.trace_size = 0;
+ hw->debug_mode = 0;
+
ret = fjes_hw_reset(hw);
if (ret)
pr_err("%s: reset error", __func__);
@@ -371,7 +387,7 @@ fjes_hw_issue_request_command(struct fjes_hw *hw,
enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
union REG_CR cr;
union REG_CS cs;
- int timeout;
+ int timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
cr.reg = 0;
cr.bits.req_start = 1;
@@ -408,6 +424,8 @@ fjes_hw_issue_request_command(struct fjes_hw *hw,
}
}
+ trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret);
+
return ret;
}
@@ -427,11 +445,13 @@ int fjes_hw_request_info(struct fjes_hw *hw)
res_buf->info.code = 0;
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
+ trace_fjes_hw_request_info(hw, res_buf);
result = 0;
if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
res_buf->info.length) {
+ trace_fjes_hw_request_info_err("Invalid res_buf");
result = -ENOMSG;
} else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->info.code) {
@@ -448,6 +468,7 @@ int fjes_hw_request_info(struct fjes_hw *hw)
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
+ trace_fjes_hw_request_info_err("Timeout");
result = -EBUSY;
break;
case FJES_CMD_STATUS_ERROR_PARAM:
@@ -512,6 +533,8 @@ int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
res_buf->share_buffer.length = 0;
res_buf->share_buffer.code = 0;
+ trace_fjes_hw_register_buff_addr_req(req_buf, buf_pair);
+
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
@@ -532,16 +555,20 @@ int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
result = 0;
+ trace_fjes_hw_register_buff_addr(res_buf, timeout);
+
if (res_buf->share_buffer.length !=
- FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN)
+ FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) {
+ trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
result = -ENOMSG;
- else if (ret == FJES_CMD_STATUS_NORMAL) {
+ } else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->share_buffer.code) {
case FJES_CMD_REQ_RES_CODE_NORMAL:
result = 0;
set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
break;
case FJES_CMD_REQ_RES_CODE_BUSY:
+ trace_fjes_hw_register_buff_addr_err("Busy Timeout");
result = -EBUSY;
break;
default:
@@ -554,6 +581,7 @@ int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
+ trace_fjes_hw_register_buff_addr_err("Timeout");
result = -EBUSY;
break;
case FJES_CMD_STATUS_ERROR_PARAM:
@@ -595,6 +623,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
res_buf->unshare_buffer.length = 0;
res_buf->unshare_buffer.code = 0;
+ trace_fjes_hw_unregister_buff_addr_req(req_buf);
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
@@ -616,8 +645,11 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
result = 0;
+ trace_fjes_hw_unregister_buff_addr(res_buf, timeout);
+
if (res_buf->unshare_buffer.length !=
FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
+ trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
result = -ENOMSG;
} else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->unshare_buffer.code) {
@@ -626,6 +658,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
break;
case FJES_CMD_REQ_RES_CODE_BUSY:
+ trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
result = -EBUSY;
break;
default:
@@ -638,6 +671,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
+ trace_fjes_hw_unregister_buff_addr_err("Timeout");
result = -EBUSY;
break;
case FJES_CMD_STATUS_ERROR_PARAM:
@@ -752,6 +786,7 @@ void fjes_hw_raise_epstop(struct fjes_hw *hw)
case EP_PARTNER_SHARED:
fjes_hw_raise_interrupt(hw, epidx,
REG_ICTL_MASK_TXRX_STOP_REQ);
+ hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
break;
default:
break;
@@ -1062,6 +1097,9 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
break;
}
mutex_unlock(&hw->hw_info.lock);
+
+ hw->ep_shm_info[epidx].ep_stats
+ .com_regist_buf_exec += 1;
}
if (test_bit(epidx, &unshare_bit)) {
@@ -1085,6 +1123,9 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
mutex_unlock(&hw->hw_info.lock);
+ hw->ep_shm_info[epidx].ep_stats
+ .com_unregist_buf_exec += 1;
+
if (ret == 0) {
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
@@ -1099,6 +1140,8 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
fjes_hw_raise_interrupt(hw, epidx,
REG_ICTL_MASK_TXRX_STOP_REQ);
+ hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
+
set_bit(epidx, &hw->txrx_stop_req_bit);
spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.
@@ -1147,3 +1190,125 @@ static void fjes_hw_epstop_task(struct work_struct *work)
}
}
}
+
+int fjes_hw_start_debug(struct fjes_hw *hw)
+{
+ union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+ union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+ enum fjes_dev_command_response_e ret;
+ int page_count;
+ int result = 0;
+ void *addr;
+ int i;
+
+ if (!hw->hw_info.trace)
+ return -EPERM;
+ memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE);
+
+ memset(req_buf, 0, hw->hw_info.req_buf_size);
+ memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+ req_buf->start_trace.length =
+ FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw->hw_info.trace_size);
+ req_buf->start_trace.mode = hw->debug_mode;
+ req_buf->start_trace.buffer_len = hw->hw_info.trace_size;
+ page_count = hw->hw_info.trace_size / FJES_DEBUG_PAGE_SIZE;
+ for (i = 0; i < page_count; i++) {
+ addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE;
+ req_buf->start_trace.buffer[i] =
+ (__le64)(page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr));
+ }
+
+ res_buf->start_trace.length = 0;
+ res_buf->start_trace.code = 0;
+
+ trace_fjes_hw_start_debug_req(req_buf);
+ ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_START_DEBUG);
+ trace_fjes_hw_start_debug(res_buf);
+
+ if (res_buf->start_trace.length !=
+ FJES_DEV_COMMAND_START_DBG_RES_LEN) {
+ result = -ENOMSG;
+ trace_fjes_hw_start_debug_err("Invalid res_buf");
+ } else if (ret == FJES_CMD_STATUS_NORMAL) {
+ switch (res_buf->start_trace.code) {
+ case FJES_CMD_REQ_RES_CODE_NORMAL:
+ result = 0;
+ break;
+ default:
+ result = -EPERM;
+ break;
+ }
+ } else {
+ switch (ret) {
+ case FJES_CMD_STATUS_UNKNOWN:
+ result = -EPERM;
+ break;
+ case FJES_CMD_STATUS_TIMEOUT:
+ trace_fjes_hw_start_debug_err("Busy Timeout");
+ result = -EBUSY;
+ break;
+ case FJES_CMD_STATUS_ERROR_PARAM:
+ case FJES_CMD_STATUS_ERROR_STATUS:
+ default:
+ result = -EPERM;
+ break;
+ }
+ }
+
+ return result;
+}
+
+int fjes_hw_stop_debug(struct fjes_hw *hw)
+{
+ union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+ union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+ enum fjes_dev_command_response_e ret;
+ int result = 0;
+
+ if (!hw->hw_info.trace)
+ return -EPERM;
+
+ memset(req_buf, 0, hw->hw_info.req_buf_size);
+ memset(res_buf, 0, hw->hw_info.res_buf_size);
+ req_buf->stop_trace.length = FJES_DEV_COMMAND_STOP_DBG_REQ_LEN;
+
+ res_buf->stop_trace.length = 0;
+ res_buf->stop_trace.code = 0;
+
+ ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_STOP_DEBUG);
+ trace_fjes_hw_stop_debug(res_buf);
+
+ if (res_buf->stop_trace.length != FJES_DEV_COMMAND_STOP_DBG_RES_LEN) {
+ trace_fjes_hw_stop_debug_err("Invalid res_buf");
+ result = -ENOMSG;
+ } else if (ret == FJES_CMD_STATUS_NORMAL) {
+ switch (res_buf->stop_trace.code) {
+ case FJES_CMD_REQ_RES_CODE_NORMAL:
+ result = 0;
+ hw->debug_mode = 0;
+ break;
+ default:
+ result = -EPERM;
+ break;
+ }
+ } else {
+ switch (ret) {
+ case FJES_CMD_STATUS_UNKNOWN:
+ result = -EPERM;
+ break;
+ case FJES_CMD_STATUS_TIMEOUT:
+ result = -EBUSY;
+ trace_fjes_hw_stop_debug_err("Busy Timeout");
+ break;
+ case FJES_CMD_STATUS_ERROR_PARAM:
+ case FJES_CMD_STATUS_ERROR_STATUS:
+ default:
+ result = -EPERM;
+ break;
+ }
+ }
+
+ return result;
+}
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
index 1445ac99d6e3..3a6da0996a0e 100644
--- a/drivers/net/fjes/fjes_hw.h
+++ b/drivers/net/fjes/fjes_hw.h
@@ -33,6 +33,9 @@ struct fjes_hw;
#define EP_BUFFER_SUPPORT_VLAN_MAX 4
#define EP_BUFFER_INFO_SIZE 4096
+#define FJES_DEBUG_PAGE_SIZE 4096
+#define FJES_DEBUG_BUFFER_SIZE (16 * FJES_DEBUG_PAGE_SIZE)
+
#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3 * 8) /* sec */
#define FJES_COMMAND_REQ_TIMEOUT ((5 + 1) * 3 * 8) /* sec */
#define FJES_COMMAND_REQ_BUFF_TIMEOUT (60 * 3) /* sec */
@@ -94,6 +97,12 @@ struct fjes_hw;
#define FJES_DEV_RES_BUF_SIZE(maxep) \
FJES_DEV_COMMAND_INFO_RES_LEN(maxep)
+#define FJES_DEV_COMMAND_START_DBG_REQ_LEN(byte) \
+ (16 + (8 * (byte) / FJES_DEBUG_PAGE_SIZE))
+#define FJES_DEV_COMMAND_START_DBG_RES_LEN (8)
+#define FJES_DEV_COMMAND_STOP_DBG_REQ_LEN (4)
+#define FJES_DEV_COMMAND_STOP_DBG_RES_LEN (8)
+
/* Frame & MTU */
struct esmem_frame {
__le32 frame_size;
@@ -173,6 +182,8 @@ enum fjes_dev_command_request_type {
FJES_CMD_REQ_INFO = 0x0001,
FJES_CMD_REQ_SHARE_BUFFER = 0x0002,
FJES_CMD_REQ_UNSHARE_BUFFER = 0x0004,
+ FJES_CMD_REQ_START_DEBUG = 0x0100,
+ FJES_CMD_REQ_STOP_DEBUG = 0x0200,
};
/* parameter for command control */
@@ -228,6 +239,24 @@ union ep_buffer_info {
};
+/* statistics of EP */
+struct fjes_drv_ep_stats {
+ u64 com_regist_buf_exec;
+ u64 com_unregist_buf_exec;
+ u64 send_intr_rx;
+ u64 send_intr_unshare;
+ u64 send_intr_zoneupdate;
+ u64 recv_intr_rx;
+ u64 recv_intr_unshare;
+ u64 recv_intr_stop;
+ u64 recv_intr_zoneupdate;
+ u64 tx_buffer_full;
+ u64 tx_dropped_not_shared;
+ u64 tx_dropped_ver_mismatch;
+ u64 tx_dropped_buf_size_mismatch;
+ u64 tx_dropped_vlanid_mismatch;
+};
+
/* buffer pair for Extended Partition */
struct ep_share_mem_info {
struct epbuf_handler {
@@ -238,6 +267,7 @@ struct ep_share_mem_info {
} tx, rx;
struct rtnl_link_stats64 net_stats;
+ struct fjes_drv_ep_stats ep_stats;
u16 tx_status_work;
@@ -302,6 +332,8 @@ struct fjes_hw {
struct fjes_hw_info hw_info;
spinlock_t rx_status_lock; /* spinlock for rx_status */
+
+ u32 debug_mode;
};
int fjes_hw_init(struct fjes_hw *);
@@ -334,4 +366,6 @@ void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *, size_t *);
void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *);
int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *, void *, size_t);
+int fjes_hw_start_debug(struct fjes_hw *);
+int fjes_hw_stop_debug(struct fjes_hw *);
#endif /* FJES_HW_H_ */
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index e46b1ebbbff4..b77e4ecf3cf2 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -27,9 +27,10 @@
#include <linux/interrupt.h>
#include "fjes.h"
+#include "fjes_trace.h"
#define MAJ 1
-#define MIN 1
+#define MIN 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
#define DRV_NAME "fjes"
char fjes_driver_name[] = DRV_NAME;
@@ -366,6 +367,8 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
FJES_ZONING_STATUS_ENABLE)) {
fjes_hw_raise_interrupt(hw, epidx,
REG_ICTL_MASK_INFO_UPDATE);
+ hw->ep_shm_info[epidx].ep_stats
+ .send_intr_zoneupdate += 1;
}
}
@@ -397,6 +400,9 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
adapter->force_reset = true;
return result;
}
+
+ hw->ep_shm_info[epidx].ep_stats
+ .com_regist_buf_exec += 1;
}
}
@@ -422,6 +428,8 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
result = fjes_hw_unregister_buff_addr(hw, epidx);
mutex_unlock(&hw->hw_info.lock);
+ hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
+
if (result)
reset_flag = true;
@@ -567,6 +575,7 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *work)
FJES_RX_POLL_WORK)) {
fjes_hw_raise_interrupt(hw, epid,
REG_ICTL_MASK_RX_DATA);
+ hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
}
}
@@ -663,6 +672,9 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
if (pstatus != EP_PARTNER_SHARED) {
+ if (!is_multi)
+ hw->ep_shm_info[dest_epid].ep_stats
+ .tx_dropped_not_shared += 1;
ret = NETDEV_TX_OK;
} else if (!fjes_hw_check_epbuf_version(
&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
@@ -670,6 +682,8 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
adapter->stats64.tx_carrier_errors += 1;
hw->ep_shm_info[dest_epid].net_stats
.tx_carrier_errors += 1;
+ hw->ep_shm_info[dest_epid].ep_stats
+ .tx_dropped_ver_mismatch += 1;
ret = NETDEV_TX_OK;
} else if (!fjes_hw_check_mtu(
@@ -679,12 +693,16 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
adapter->stats64.tx_errors += 1;
hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
+ hw->ep_shm_info[dest_epid].ep_stats
+ .tx_dropped_buf_size_mismatch += 1;
ret = NETDEV_TX_OK;
} else if (vlan &&
!fjes_hw_check_vlan_id(
&adapter->hw.ep_shm_info[dest_epid].rx,
vlan_id)) {
+ hw->ep_shm_info[dest_epid].ep_stats
+ .tx_dropped_vlanid_mismatch += 1;
ret = NETDEV_TX_OK;
} else {
if (len < VLAN_ETH_HLEN) {
@@ -718,6 +736,8 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
} else {
netif_trans_update(netdev);
+ hw->ep_shm_info[dest_epid].ep_stats
+ .tx_buffer_full += 1;
netif_tx_stop_queue(cur_queue);
if (!work_pending(&adapter->tx_stall_task))
@@ -885,6 +905,7 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
unsigned long flags;
status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
switch (status) {
case EP_PARTNER_UNSHARE:
case EP_PARTNER_COMPLETE:
@@ -915,6 +936,7 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
}
break;
}
+ trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
}
static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
@@ -926,6 +948,7 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_stop_req_irq_pre(hw, src_epid, status);
switch (status) {
case EP_PARTNER_WAITING:
spin_lock_irqsave(&hw->rx_status_lock, flags);
@@ -949,6 +972,7 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
queue_work(adapter->control_wq, &hw->epstop_task);
break;
}
+ trace_fjes_stop_req_irq_post(hw, src_epid);
}
static void fjes_update_zone_irq(struct fjes_adapter *adapter,
@@ -970,21 +994,33 @@ static irqreturn_t fjes_intr(int irq, void *data)
icr = fjes_hw_capture_interrupt_status(hw);
if (icr & REG_IS_MASK_IS_ASSERT) {
- if (icr & REG_ICTL_MASK_RX_DATA)
+ if (icr & REG_ICTL_MASK_RX_DATA) {
fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_rx += 1;
+ }
- if (icr & REG_ICTL_MASK_DEV_STOP_REQ)
+ if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_stop += 1;
+ }
- if (icr & REG_ICTL_MASK_TXRX_STOP_REQ)
+ if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_unshare += 1;
+ }
if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
fjes_hw_set_irqmask(hw,
REG_ICTL_MASK_TXRX_STOP_DONE, true);
- if (icr & REG_ICTL_MASK_INFO_UPDATE)
+ if (icr & REG_ICTL_MASK_INFO_UPDATE) {
fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_zoneupdate += 1;
+ }
ret = IRQ_HANDLED;
} else {
@@ -1221,6 +1257,8 @@ static int fjes_probe(struct platform_device *plat_dev)
netif_carrier_off(netdev);
+ fjes_dbg_adapter_init(adapter);
+
return 0;
err_hw_exit:
@@ -1238,6 +1276,8 @@ static int fjes_remove(struct platform_device *plat_dev)
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
+ fjes_dbg_adapter_exit(adapter);
+
cancel_delayed_work_sync(&adapter->interrupt_watch_task);
cancel_work_sync(&adapter->unshare_watch_task);
cancel_work_sync(&adapter->raise_intr_rxdata_task);
@@ -1276,6 +1316,8 @@ static void fjes_netdev_setup(struct net_device *netdev)
netdev->netdev_ops = &fjes_netdev_ops;
fjes_set_ethtool_ops(netdev);
netdev->mtu = fjes_support_mtu[3];
+ netdev->min_mtu = fjes_support_mtu[0];
+ netdev->max_mtu = fjes_support_mtu[3];
netdev->flags |= IFF_BROADCAST;
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
}
@@ -1364,6 +1406,8 @@ static void fjes_watch_unshare_task(struct work_struct *work)
break;
}
mutex_unlock(&hw->hw_info.lock);
+ hw->ep_shm_info[epidx].ep_stats
+ .com_unregist_buf_exec += 1;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
@@ -1406,6 +1450,9 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
mutex_unlock(&hw->hw_info.lock);
+ hw->ep_shm_info[epidx].ep_stats
+ .com_unregist_buf_exec += 1;
+
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
@@ -1437,9 +1484,13 @@ static int __init fjes_init_module(void)
pr_info("%s - version %s - %s\n",
fjes_driver_string, fjes_driver_version, fjes_copyright);
+ fjes_dbg_init();
+
result = platform_driver_register(&fjes_driver);
- if (result < 0)
+ if (result < 0) {
+ fjes_dbg_exit();
return result;
+ }
result = acpi_bus_register_driver(&fjes_acpi_driver);
if (result < 0)
@@ -1449,6 +1500,7 @@ static int __init fjes_init_module(void)
fail_acpi_driver:
platform_driver_unregister(&fjes_driver);
+ fjes_dbg_exit();
return result;
}
@@ -1459,6 +1511,7 @@ static void __exit fjes_exit_module(void)
{
acpi_bus_unregister_driver(&fjes_acpi_driver);
platform_driver_unregister(&fjes_driver);
+ fjes_dbg_exit();
}
module_exit(fjes_exit_module);
diff --git a/drivers/net/fjes/fjes_trace.c b/drivers/net/fjes/fjes_trace.c
new file mode 100644
index 000000000000..066fa765d5bb
--- /dev/null
+++ b/drivers/net/fjes/fjes_trace.c
@@ -0,0 +1,30 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015-2016 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#include "fjes_hw.h"
+
+#define CREATE_TRACE_POINTS
+#include "fjes_trace.h"
+
+#endif /* __CHECKER__ */
diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h
new file mode 100644
index 000000000000..cca01a1b3d64
--- /dev/null
+++ b/drivers/net/fjes/fjes_trace.h
@@ -0,0 +1,380 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015-2016 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#if !defined(FJES_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define FJES_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fjes
+
+/* tracepoints for fjes_hw.c */
+
+TRACE_EVENT(fjes_hw_issue_request_command,
+ TP_PROTO(union REG_CR *cr, union REG_CS *cs, int timeout,
+ enum fjes_dev_command_response_e ret),
+ TP_ARGS(cr, cs, timeout, ret),
+ TP_STRUCT__entry(
+ __field(u16, cr_req)
+ __field(u8, cr_error)
+ __field(u16, cr_err_info)
+ __field(u8, cr_req_start)
+ __field(u16, cs_req)
+ __field(u8, cs_busy)
+ __field(u8, cs_complete)
+ __field(int, timeout)
+ __field(int, ret);
+ ),
+ TP_fast_assign(
+ __entry->cr_req = cr->bits.req_code;
+ __entry->cr_error = cr->bits.error;
+ __entry->cr_err_info = cr->bits.err_info;
+ __entry->cr_req_start = cr->bits.req_start;
+ __entry->cs_req = cs->bits.req_code;
+ __entry->cs_busy = cs->bits.busy;
+ __entry->cs_complete = cs->bits.complete;
+ __entry->timeout = timeout;
+ __entry->ret = ret;
+ ),
+ TP_printk("CR=[req=%04x, error=%u, err_info=%04x, req_start=%u], CS=[req=%04x, busy=%u, complete=%u], timeout=%d, ret=%d",
+ __entry->cr_req, __entry->cr_error, __entry->cr_err_info,
+ __entry->cr_req_start, __entry->cs_req, __entry->cs_busy,
+ __entry->cs_complete, __entry->timeout, __entry->ret)
+);
+
+TRACE_EVENT(fjes_hw_request_info,
+ TP_PROTO(struct fjes_hw *hw, union fjes_device_command_res *res_buf),
+ TP_ARGS(hw, res_buf),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, code)
+ __dynamic_array(u8, zone, hw->max_epid)
+ __dynamic_array(u8, status, hw->max_epid)
+ ),
+ TP_fast_assign(
+ int x;
+
+ __entry->length = res_buf->info.length;
+ __entry->code = res_buf->info.code;
+ for (x = 0; x < hw->max_epid; x++) {
+ *((u8 *)__get_dynamic_array(zone) + x) =
+ res_buf->info.info[x].zone;
+ *((u8 *)__get_dynamic_array(status) + x) =
+ res_buf->info.info[x].es_status;
+ }
+ ),
+ TP_printk("res_buf=[length=%d, code=%d, es_zones=%s, es_status=%s]",
+ __entry->length, __entry->code,
+ __print_array(__get_dynamic_array(zone),
+ __get_dynamic_array_len(zone) / sizeof(u8),
+ sizeof(u8)),
+ __print_array(__get_dynamic_array(status),
+ __get_dynamic_array_len(status) / sizeof(u8),
+ sizeof(u8)))
+);
+
+TRACE_EVENT(fjes_hw_request_info_err,
+ TP_PROTO(char *err),
+ TP_ARGS(err),
+ TP_STRUCT__entry(
+ __string(err, err)
+ ),
+ TP_fast_assign(
+ __assign_str(err, err);
+ ),
+ TP_printk("%s", __get_str(err))
+);
+
+TRACE_EVENT(fjes_hw_register_buff_addr_req,
+ TP_PROTO(union fjes_device_command_req *req_buf,
+ struct ep_share_mem_info *buf_pair),
+ TP_ARGS(req_buf, buf_pair),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, epid)
+ __field(u64, tx)
+ __field(size_t, tx_size)
+ __field(u64, rx)
+ __field(size_t, rx_size)
+ ),
+ TP_fast_assign(
+ void *tx, *rx;
+
+ tx = (void *)buf_pair->tx.buffer;
+ rx = (void *)buf_pair->rx.buffer;
+ __entry->length = req_buf->share_buffer.length;
+ __entry->epid = req_buf->share_buffer.epid;
+ __entry->tx_size = buf_pair->tx.size;
+ __entry->rx_size = buf_pair->rx.size;
+ __entry->tx = page_to_phys(vmalloc_to_page(tx)) +
+ offset_in_page(tx);
+ __entry->rx = page_to_phys(vmalloc_to_page(rx)) +
+ offset_in_page(rx);
+ ),
+ TP_printk("req_buf=[length=%d, epid=%d], TX=[phy=0x%016llx, size=%zu], RX=[phy=0x%016llx, size=%zu]",
+ __entry->length, __entry->epid, __entry->tx, __entry->tx_size,
+ __entry->rx, __entry->rx_size)
+);
+
+TRACE_EVENT(fjes_hw_register_buff_addr,
+ TP_PROTO(union fjes_device_command_res *res_buf, int timeout),
+ TP_ARGS(res_buf, timeout),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, code)
+ __field(int, timeout)
+ ),
+ TP_fast_assign(
+ __entry->length = res_buf->share_buffer.length;
+ __entry->code = res_buf->share_buffer.code;
+ __entry->timeout = timeout;
+ ),
+ TP_printk("res_buf=[length=%d, code=%d], timeout=%d",
+ __entry->length, __entry->code, __entry->timeout)
+);
+
+TRACE_EVENT(fjes_hw_register_buff_addr_err,
+ TP_PROTO(char *err),
+ TP_ARGS(err),
+ TP_STRUCT__entry(
+ __string(err, err)
+ ),
+ TP_fast_assign(
+ __assign_str(err, err);
+ ),
+ TP_printk("%s", __get_str(err))
+);
+
+TRACE_EVENT(fjes_hw_unregister_buff_addr_req,
+ TP_PROTO(union fjes_device_command_req *req_buf),
+ TP_ARGS(req_buf),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, epid)
+ ),
+ TP_fast_assign(
+ __entry->length = req_buf->unshare_buffer.length;
+ __entry->epid = req_buf->unshare_buffer.epid;
+ ),
+ TP_printk("req_buf=[length=%d, epid=%d]",
+ __entry->length, __entry->epid)
+);
+
+TRACE_EVENT(fjes_hw_unregister_buff_addr,
+ TP_PROTO(union fjes_device_command_res *res_buf, int timeout),
+ TP_ARGS(res_buf, timeout),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, code)
+ __field(int, timeout)
+ ),
+ TP_fast_assign(
+ __entry->length = res_buf->unshare_buffer.length;
+ __entry->code = res_buf->unshare_buffer.code;
+ __entry->timeout = timeout;
+ ),
+ TP_printk("res_buf=[length=%d, code=%d], timeout=%d",
+ __entry->length, __entry->code, __entry->timeout)
+);
+
+TRACE_EVENT(fjes_hw_unregister_buff_addr_err,
+ TP_PROTO(char *err),
+ TP_ARGS(err),
+ TP_STRUCT__entry(
+ __string(err, err)
+ ),
+ TP_fast_assign(
+ __assign_str(err, err);
+ ),
+ TP_printk("%s", __get_str(err))
+);
+
+TRACE_EVENT(fjes_hw_start_debug_req,
+ TP_PROTO(union fjes_device_command_req *req_buf),
+ TP_ARGS(req_buf),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, mode)
+ __field(phys_addr_t, buffer)
+ ),
+ TP_fast_assign(
+ __entry->length = req_buf->start_trace.length;
+ __entry->mode = req_buf->start_trace.mode;
+ __entry->buffer = req_buf->start_trace.buffer[0];
+ ),
+ TP_printk("req_buf=[length=%d, mode=%d, buffer=%pap]",
+ __entry->length, __entry->mode, &__entry->buffer)
+);
+
+TRACE_EVENT(fjes_hw_start_debug,
+ TP_PROTO(union fjes_device_command_res *res_buf),
+ TP_ARGS(res_buf),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, code)
+ ),
+ TP_fast_assign(
+ __entry->length = res_buf->start_trace.length;
+ __entry->code = res_buf->start_trace.code;
+ ),
+ TP_printk("res_buf=[length=%d, code=%d]", __entry->length, __entry->code)
+);
+
+TRACE_EVENT(fjes_hw_start_debug_err,
+ TP_PROTO(char *err),
+ TP_ARGS(err),
+ TP_STRUCT__entry(
+ __string(err, err)
+ ),
+ TP_fast_assign(
+ __assign_str(err, err)
+ ),
+ TP_printk("%s", __get_str(err))
+);
+
+TRACE_EVENT(fjes_hw_stop_debug,
+ TP_PROTO(union fjes_device_command_res *res_buf),
+ TP_ARGS(res_buf),
+ TP_STRUCT__entry(
+ __field(int, length)
+ __field(int, code)
+ ),
+ TP_fast_assign(
+ __entry->length = res_buf->stop_trace.length;
+ __entry->code = res_buf->stop_trace.code;
+ ),
+ TP_printk("res_buf=[length=%d, code=%d]", __entry->length, __entry->code)
+);
+
+TRACE_EVENT(fjes_hw_stop_debug_err,
+ TP_PROTO(char *err),
+ TP_ARGS(err),
+ TP_STRUCT__entry(
+ __string(err, err)
+ ),
+ TP_fast_assign(
+ __assign_str(err, err)
+ ),
+ TP_printk("%s", __get_str(err))
+);
+
+/* tracepoints for fjes_main.c */
+
+TRACE_EVENT(fjes_txrx_stop_req_irq_pre,
+ TP_PROTO(struct fjes_hw *hw, int src_epid,
+ enum ep_partner_status status),
+ TP_ARGS(hw, src_epid, status),
+ TP_STRUCT__entry(
+ __field(int, src_epid)
+ __field(enum ep_partner_status, status)
+ __field(u8, ep_status)
+ __field(unsigned long, txrx_stop_req_bit)
+ __field(u16, rx_status)
+ ),
+ TP_fast_assign(
+ __entry->src_epid = src_epid;
+ __entry->status = status;
+ __entry->ep_status = hw->hw_info.share->ep_status[src_epid];
+ __entry->txrx_stop_req_bit = hw->txrx_stop_req_bit;
+ __entry->rx_status =
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status;
+ ),
+ TP_printk("epid=%d, partner_status=%d, ep_status=%x, txrx_stop_req_bit=%016lx, tx.rx_status=%08x",
+ __entry->src_epid, __entry->status, __entry->ep_status,
+ __entry->txrx_stop_req_bit, __entry->rx_status)
+);
+
+TRACE_EVENT(fjes_txrx_stop_req_irq_post,
+ TP_PROTO(struct fjes_hw *hw, int src_epid),
+ TP_ARGS(hw, src_epid),
+ TP_STRUCT__entry(
+ __field(int, src_epid)
+ __field(u8, ep_status)
+ __field(unsigned long, txrx_stop_req_bit)
+ __field(u16, rx_status)
+ ),
+ TP_fast_assign(
+ __entry->src_epid = src_epid;
+ __entry->ep_status = hw->hw_info.share->ep_status[src_epid];
+ __entry->txrx_stop_req_bit = hw->txrx_stop_req_bit;
+ __entry->rx_status = hw->ep_shm_info[src_epid].tx.info->v1i.rx_status;
+ ),
+ TP_printk("epid=%d, ep_status=%x, txrx_stop_req_bit=%016lx, tx.rx_status=%08x",
+ __entry->src_epid, __entry->ep_status,
+ __entry->txrx_stop_req_bit, __entry->rx_status)
+);
+
+TRACE_EVENT(fjes_stop_req_irq_pre,
+ TP_PROTO(struct fjes_hw *hw, int src_epid,
+ enum ep_partner_status status),
+ TP_ARGS(hw, src_epid, status),
+ TP_STRUCT__entry(
+ __field(int, src_epid)
+ __field(enum ep_partner_status, status)
+ __field(u8, ep_status)
+ __field(unsigned long, txrx_stop_req_bit)
+ __field(u16, rx_status)
+ ),
+ TP_fast_assign(
+ __entry->src_epid = src_epid;
+ __entry->status = status;
+ __entry->ep_status = hw->hw_info.share->ep_status[src_epid];
+ __entry->txrx_stop_req_bit = hw->txrx_stop_req_bit;
+ __entry->rx_status =
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status;
+ ),
+ TP_printk("epid=%d, partner_status=%d, ep_status=%x, txrx_stop_req_bit=%016lx, tx.rx_status=%08x",
+ __entry->src_epid, __entry->status, __entry->ep_status,
+ __entry->txrx_stop_req_bit, __entry->rx_status)
+);
+
+TRACE_EVENT(fjes_stop_req_irq_post,
+ TP_PROTO(struct fjes_hw *hw, int src_epid),
+ TP_ARGS(hw, src_epid),
+ TP_STRUCT__entry(
+ __field(int, src_epid)
+ __field(u8, ep_status)
+ __field(unsigned long, txrx_stop_req_bit)
+ __field(u16, rx_status)
+ ),
+ TP_fast_assign(
+ __entry->src_epid = src_epid;
+ __entry->ep_status = hw->hw_info.share->ep_status[src_epid];
+ __entry->txrx_stop_req_bit = hw->txrx_stop_req_bit;
+ __entry->rx_status =
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status;
+ ),
+ TP_printk("epid=%d, ep_status=%x, txrx_stop_req_bit=%016lx, tx.rx_status=%08x",
+ __entry->src_epid, __entry->ep_status,
+ __entry->txrx_stop_req_bit, __entry->rx_status)
+);
+
+#endif /* FJES_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../../drivers/net/fjes
+#define TRACE_INCLUDE_FILE fjes_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 8b4822ad27cb..45301cb98bc1 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -43,43 +43,24 @@ struct geneve_net {
struct list_head sock_list;
};
-static int geneve_net_id;
-
-union geneve_addr {
- struct sockaddr_in sin;
- struct sockaddr_in6 sin6;
- struct sockaddr sa;
-};
-
-static union geneve_addr geneve_remote_unspec = { .sa.sa_family = AF_UNSPEC, };
+static unsigned int geneve_net_id;
/* Pseudo network device */
struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
+ struct ip_tunnel_info info;
struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
#if IS_ENABLED(CONFIG_IPV6)
struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
#endif
- u8 vni[3]; /* virtual network ID for tunnel */
- u8 ttl; /* TTL override */
- u8 tos; /* TOS override */
- union geneve_addr remote; /* IP address for link partner */
struct list_head next; /* geneve's per namespace list */
- __be32 label; /* IPv6 flowlabel override */
- __be16 dst_port;
- bool collect_md;
struct gro_cells gro_cells;
- u32 flags;
- struct dst_cache dst_cache;
+ bool collect_md;
+ bool use_udp6_rx_checksums;
};
-/* Geneve device flags */
-#define GENEVE_F_UDP_ZERO_CSUM_TX BIT(0)
-#define GENEVE_F_UDP_ZERO_CSUM6_TX BIT(1)
-#define GENEVE_F_UDP_ZERO_CSUM6_RX BIT(2)
-
struct geneve_sock {
bool collect_md;
struct list_head list;
@@ -87,7 +68,6 @@ struct geneve_sock {
struct rcu_head rcu;
int refcnt;
struct hlist_head vni_list[VNI_HASH_SIZE];
- u32 flags;
};
static inline __u32 geneve_net_vni_hash(u8 vni[3])
@@ -109,6 +89,31 @@ static __be64 vni_to_tunnel_id(const __u8 *vni)
#endif
}
+/* Convert 64 bit tunnel ID to 24 bit VNI. */
+static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ vni[0] = (__force __u8)(tun_id >> 16);
+ vni[1] = (__force __u8)(tun_id >> 8);
+ vni[2] = (__force __u8)tun_id;
+#else
+ vni[0] = (__force __u8)((__force u64)tun_id >> 40);
+ vni[1] = (__force __u8)((__force u64)tun_id >> 48);
+ vni[2] = (__force __u8)((__force u64)tun_id >> 56);
+#endif
+}
+
+static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ return (vni[0] == tun_id[2]) &&
+ (vni[1] == tun_id[1]) &&
+ (vni[2] == tun_id[0]);
+#else
+ return !memcmp(vni, &tun_id[5], 3);
+#endif
+}
+
static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
{
return gs->sock->sk->sk_family;
@@ -125,8 +130,8 @@ static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
hash = geneve_net_vni_hash(vni);
vni_list_head = &gs->vni_list[hash];
hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
- if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
- addr == geneve->remote.sin.sin_addr.s_addr)
+ if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) &&
+ addr == geneve->info.key.u.ipv4.dst)
return geneve;
}
return NULL;
@@ -144,8 +149,8 @@ static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
hash = geneve_net_vni_hash(vni);
vni_list_head = &gs->vni_list[hash];
hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
- if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
- ipv6_addr_equal(&addr6, &geneve->remote.sin6.sin6_addr))
+ if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) &&
+ ipv6_addr_equal(&addr6, &geneve->info.key.u.ipv6.dst))
return geneve;
}
return NULL;
@@ -160,15 +165,12 @@ static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs,
struct sk_buff *skb)
{
- u8 *vni;
- __be32 addr;
static u8 zero_vni[3];
-#if IS_ENABLED(CONFIG_IPV6)
- static struct in6_addr zero_addr6;
-#endif
+ u8 *vni;
if (geneve_get_sk_family(gs) == AF_INET) {
struct iphdr *iph;
+ __be32 addr;
iph = ip_hdr(skb); /* outer IP header... */
@@ -183,6 +185,7 @@ static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs,
return geneve_lookup(gs, addr, vni);
#if IS_ENABLED(CONFIG_IPV6)
} else if (geneve_get_sk_family(gs) == AF_INET6) {
+ static struct in6_addr zero_addr6;
struct ipv6hdr *ip6h;
struct in6_addr addr6;
@@ -305,13 +308,12 @@ static int geneve_init(struct net_device *dev)
return err;
}
- err = dst_cache_init(&geneve->dst_cache, GFP_KERNEL);
+ err = dst_cache_init(&geneve->info.dst_cache, GFP_KERNEL);
if (err) {
free_percpu(dev->tstats);
gro_cells_destroy(&geneve->gro_cells);
return err;
}
-
return 0;
}
@@ -319,7 +321,7 @@ static void geneve_uninit(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
- dst_cache_destroy(&geneve->dst_cache);
+ dst_cache_destroy(&geneve->info.dst_cache);
gro_cells_destroy(&geneve->gro_cells);
free_percpu(dev->tstats);
}
@@ -368,7 +370,7 @@ drop:
}
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
- __be16 port, u32 flags)
+ __be16 port, bool ipv6_rx_csum)
{
struct socket *sock;
struct udp_port_cfg udp_conf;
@@ -379,8 +381,7 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
if (ipv6) {
udp_conf.family = AF_INET6;
udp_conf.ipv6_v6only = 1;
- udp_conf.use_udp6_rx_checksums =
- !(flags & GENEVE_F_UDP_ZERO_CSUM6_RX);
+ udp_conf.use_udp6_rx_checksums = ipv6_rx_csum;
} else {
udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
@@ -491,7 +492,7 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
/* Create new listen socket if needed */
static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
- bool ipv6, u32 flags)
+ bool ipv6, bool ipv6_rx_csum)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
@@ -503,7 +504,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
if (!gs)
return ERR_PTR(-ENOMEM);
- sock = geneve_create_sock(net, ipv6, port, flags);
+ sock = geneve_create_sock(net, ipv6, port, ipv6_rx_csum);
if (IS_ERR(sock)) {
kfree(gs);
return ERR_CAST(sock);
@@ -579,21 +580,22 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
struct net *net = geneve->net;
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
+ __u8 vni[3];
__u32 hash;
- gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->dst_port);
+ gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->info.key.tp_dst);
if (gs) {
gs->refcnt++;
goto out;
}
- gs = geneve_socket_create(net, geneve->dst_port, ipv6, geneve->flags);
+ gs = geneve_socket_create(net, geneve->info.key.tp_dst, ipv6,
+ geneve->use_udp6_rx_checksums);
if (IS_ERR(gs))
return PTR_ERR(gs);
out:
gs->collect_md = geneve->collect_md;
- gs->flags = geneve->flags;
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
rcu_assign_pointer(geneve->sock6, gs);
@@ -601,7 +603,8 @@ out:
#endif
rcu_assign_pointer(geneve->sock4, gs);
- hash = geneve_net_vni_hash(geneve->vni);
+ tunnel_id_to_vni(geneve->info.key.tun_id, vni);
+ hash = geneve_net_vni_hash(vni);
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
return 0;
}
@@ -609,7 +612,7 @@ out:
static int geneve_open(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
- bool ipv6 = geneve->remote.sa.sa_family == AF_INET6;
+ bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
bool metadata = geneve->collect_md;
int ret = 0;
@@ -636,67 +639,34 @@ static int geneve_stop(struct net_device *dev)
}
static void geneve_build_header(struct genevehdr *geneveh,
- __be16 tun_flags, u8 vni[3],
- u8 options_len, u8 *options)
+ const struct ip_tunnel_info *info)
{
geneveh->ver = GENEVE_VER;
- geneveh->opt_len = options_len / 4;
- geneveh->oam = !!(tun_flags & TUNNEL_OAM);
- geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->opt_len = info->options_len / 4;
+ geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM);
+ geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
geneveh->rsvd1 = 0;
- memcpy(geneveh->vni, vni, 3);
+ tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
geneveh->proto_type = htons(ETH_P_TEB);
geneveh->rsvd2 = 0;
- memcpy(geneveh->options, options, options_len);
+ ip_tunnel_info_opts_get(geneveh->options, info);
}
-static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
- __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- u32 flags, bool xnet)
+static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
+ const struct ip_tunnel_info *info,
+ bool xnet, int ip_hdr_len)
{
+ bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
struct genevehdr *gnvh;
int min_headroom;
int err;
- bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM_TX);
-
- skb_scrub_packet(skb, xnet);
-
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
- err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
- goto free_rt;
-
- err = udp_tunnel_handle_offloads(skb, udp_sum);
- if (err)
- goto free_rt;
-
- gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
- geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- return 0;
-
-free_rt:
- ip_rt_put(rt);
- return err;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
- __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- u32 flags, bool xnet)
-{
- struct genevehdr *gnvh;
- int min_headroom;
- int err;
- bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM6_TX);
+ skb_reset_mac_header(skb);
skb_scrub_packet(skb, xnet);
- min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
- + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
+ GENEVE_BASE_HLEN + info->options_len + ip_hdr_len;
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
goto free_dst;
@@ -705,9 +675,9 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
if (err)
goto free_dst;
- gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
- geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
+ gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) +
+ info->options_len);
+ geneve_build_header(gnvh, info);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
return 0;
@@ -715,12 +685,11 @@ free_dst:
dst_release(dst);
return err;
}
-#endif
static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct net_device *dev,
struct flowi4 *fl4,
- struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -734,32 +703,22 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_proto = IPPROTO_UDP;
+ fl4->daddr = info->key.u.ipv4.dst;
+ fl4->saddr = info->key.u.ipv4.src;
- if (info) {
- fl4->daddr = info->key.u.ipv4.dst;
- fl4->saddr = info->key.u.ipv4.src;
- fl4->flowi4_tos = RT_TOS(info->key.tos);
- dst_cache = &info->dst_cache;
- } else {
- tos = geneve->tos;
- if (tos == 1) {
- const struct iphdr *iip = ip_hdr(skb);
-
- tos = ip_tunnel_get_dsfield(iip, skb);
- use_cache = false;
- }
-
- fl4->flowi4_tos = RT_TOS(tos);
- fl4->daddr = geneve->remote.sin.sin_addr.s_addr;
- dst_cache = &geneve->dst_cache;
+ tos = info->key.tos;
+ if ((tos == 1) && !geneve->collect_md) {
+ tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
+ use_cache = false;
}
+ fl4->flowi4_tos = RT_TOS(tos);
+ dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
if (rt)
return rt;
}
-
rt = ip_route_output_key(geneve->net, fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
@@ -779,7 +738,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
struct net_device *dev,
struct flowi6 *fl6,
- struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -795,34 +754,22 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_mark = skb->mark;
fl6->flowi6_proto = IPPROTO_UDP;
-
- if (info) {
- fl6->daddr = info->key.u.ipv6.dst;
- fl6->saddr = info->key.u.ipv6.src;
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(info->key.tos),
- info->key.label);
- dst_cache = &info->dst_cache;
- } else {
- prio = geneve->tos;
- if (prio == 1) {
- const struct iphdr *iip = ip_hdr(skb);
-
- prio = ip_tunnel_get_dsfield(iip, skb);
- use_cache = false;
- }
-
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- geneve->label);
- fl6->daddr = geneve->remote.sin6.sin6_addr;
- dst_cache = &geneve->dst_cache;
+ fl6->daddr = info->key.u.ipv6.dst;
+ fl6->saddr = info->key.u.ipv6.src;
+ prio = info->key.tos;
+ if ((prio == 1) && !geneve->collect_md) {
+ prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
+ use_cache = false;
}
+ fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
+ info->key.label);
+ dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
if (dst)
return dst;
}
-
if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
return ERR_PTR(-ENETUNREACH);
@@ -839,199 +786,81 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
}
#endif
-/* Convert 64 bit tunnel ID to 24 bit VNI. */
-static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct geneve_dev *geneve,
+ const struct ip_tunnel_info *info)
{
-#ifdef __BIG_ENDIAN
- vni[0] = (__force __u8)(tun_id >> 16);
- vni[1] = (__force __u8)(tun_id >> 8);
- vni[2] = (__force __u8)tun_id;
-#else
- vni[0] = (__force __u8)((__force u64)tun_id >> 40);
- vni[1] = (__force __u8)((__force u64)tun_id >> 48);
- vni[2] = (__force __u8)((__force u64)tun_id >> 56);
-#endif
-}
-
-static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
- struct ip_tunnel_info *info)
-{
- struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs4;
- struct rtable *rt = NULL;
- int err = -EINVAL;
+ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
+ const struct ip_tunnel_key *key = &info->key;
+ struct rtable *rt;
struct flowi4 fl4;
__u8 tos, ttl;
__be16 sport;
__be16 df;
- bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
- u32 flags = geneve->flags;
-
- gs4 = rcu_dereference(geneve->sock4);
- if (!gs4)
- goto tx_error;
-
- if (geneve->collect_md) {
- if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
- netdev_dbg(dev, "no tunnel metadata\n");
- goto tx_error;
- }
- if (info && ip_tunnel_info_af(info) != AF_INET)
- goto tx_error;
- }
+ int err;
rt = geneve_get_v4_rt(skb, dev, &fl4, info);
- if (IS_ERR(rt)) {
- err = PTR_ERR(rt);
- goto tx_error;
- }
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
- skb_reset_mac_header(skb);
-
- if (info) {
- const struct ip_tunnel_key *key = &info->key;
- u8 *opts = NULL;
- u8 vni[3];
-
- tunnel_id_to_vni(key->tun_id, vni);
- if (info->options_len)
- opts = ip_tunnel_info_opts(info);
-
- if (key->tun_flags & TUNNEL_CSUM)
- flags &= ~GENEVE_F_UDP_ZERO_CSUM_TX;
- else
- flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
-
- err = geneve_build_skb(rt, skb, key->tun_flags, vni,
- info->options_len, opts, flags, xnet);
- if (unlikely(err))
- goto tx_error;
-
+ if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- err = geneve_build_skb(rt, skb, 0, geneve->vni,
- 0, NULL, flags, xnet);
- if (unlikely(err))
- goto tx_error;
-
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
- ttl = geneve->ttl;
- if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
- ttl = 1;
- ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- df = 0;
+ ttl = key->ttl ? : ip4_dst_hoplimit(&rt->dst);
}
- udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
- tos, ttl, df, sport, geneve->dst_port,
- !net_eq(geneve->net, dev_net(geneve->dev)),
- !!(flags & GENEVE_F_UDP_ZERO_CSUM_TX));
-
- return NETDEV_TX_OK;
-
-tx_error:
- dev_kfree_skb(skb);
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
- if (err == -ELOOP)
- dev->stats.collisions++;
- else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
+ if (unlikely(err))
+ return err;
- dev->stats.tx_errors++;
- return NETDEV_TX_OK;
+ udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
+ tos, ttl, df, sport, geneve->info.key.tp_dst,
+ !net_eq(geneve->net, dev_net(geneve->dev)),
+ !(info->key.tun_flags & TUNNEL_CSUM));
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
-static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
- struct ip_tunnel_info *info)
+static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct geneve_dev *geneve,
+ const struct ip_tunnel_info *info)
{
- struct geneve_dev *geneve = netdev_priv(dev);
+ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
- struct geneve_sock *gs6;
- int err = -EINVAL;
struct flowi6 fl6;
__u8 prio, ttl;
__be16 sport;
- __be32 label;
- bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
- u32 flags = geneve->flags;
-
- gs6 = rcu_dereference(geneve->sock6);
- if (!gs6)
- goto tx_error;
-
- if (geneve->collect_md) {
- if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
- netdev_dbg(dev, "no tunnel metadata\n");
- goto tx_error;
- }
- }
+ int err;
dst = geneve_get_v6_dst(skb, dev, &fl6, info);
- if (IS_ERR(dst)) {
- err = PTR_ERR(dst);
- goto tx_error;
- }
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
- skb_reset_mac_header(skb);
-
- if (info) {
- const struct ip_tunnel_key *key = &info->key;
- u8 *opts = NULL;
- u8 vni[3];
-
- tunnel_id_to_vni(key->tun_id, vni);
- if (info->options_len)
- opts = ip_tunnel_info_opts(info);
-
- if (key->tun_flags & TUNNEL_CSUM)
- flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
- else
- flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
-
- err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
- info->options_len, opts,
- flags, xnet);
- if (unlikely(err))
- goto tx_error;
-
+ if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
- label = info->key.label;
} else {
- err = geneve6_build_skb(dst, skb, 0, geneve->vni,
- 0, NULL, flags, xnet);
- if (unlikely(err))
- goto tx_error;
-
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
ip_hdr(skb), skb);
- ttl = geneve->ttl;
- if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
- ttl = 1;
- ttl = ttl ? : ip6_dst_hoplimit(dst);
- label = geneve->label;
+ ttl = key->ttl ? : ip6_dst_hoplimit(dst);
}
+ err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
+ if (unlikely(err))
+ return err;
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
- &fl6.saddr, &fl6.daddr, prio, ttl, label,
- sport, geneve->dst_port,
- !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
- return NETDEV_TX_OK;
-
-tx_error:
- dev_kfree_skb(skb);
-
- if (err == -ELOOP)
- dev->stats.collisions++;
- else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
-
- dev->stats.tx_errors++;
- return NETDEV_TX_OK;
+ &fl6.saddr, &fl6.daddr, prio, ttl,
+ info->key.label, sport, geneve->info.key.tp_dst,
+ !(info->key.tun_flags & TUNNEL_CSUM));
+ return 0;
}
#endif
@@ -1039,63 +868,61 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
struct ip_tunnel_info *info = NULL;
+ int err;
- if (geneve->collect_md)
+ if (geneve->collect_md) {
info = skb_tunnel_info(skb);
+ if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
+ err = -EINVAL;
+ netdev_dbg(dev, "no tunnel metadata\n");
+ goto tx_error;
+ }
+ } else {
+ info = &geneve->info;
+ }
#if IS_ENABLED(CONFIG_IPV6)
- if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
- (!info && geneve->remote.sa.sa_family == AF_INET6))
- return geneve6_xmit_skb(skb, dev, info);
-#endif
- return geneve_xmit_skb(skb, dev, info);
-}
-
-static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
-{
- struct geneve_dev *geneve = netdev_priv(dev);
- /* The max_mtu calculation does not take account of GENEVE
- * options, to avoid excluding potentially valid
- * configurations.
- */
- int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
-
- if (geneve->remote.sa.sa_family == AF_INET6)
- max_mtu -= sizeof(struct ipv6hdr);
+ if (info->mode & IP_TUNNEL_INFO_IPV6)
+ err = geneve6_xmit_skb(skb, dev, geneve, info);
else
- max_mtu -= sizeof(struct iphdr);
-
- if (new_mtu < 68)
- return -EINVAL;
+#endif
+ err = geneve_xmit_skb(skb, dev, geneve, info);
- if (new_mtu > max_mtu) {
- if (strict)
- return -EINVAL;
+ if (likely(!err))
+ return NETDEV_TX_OK;
+tx_error:
+ dev_kfree_skb(skb);
- new_mtu = max_mtu;
- }
+ if (err == -ELOOP)
+ dev->stats.collisions++;
+ else if (err == -ENETUNREACH)
+ dev->stats.tx_carrier_errors++;
- dev->mtu = new_mtu;
- return 0;
+ dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
}
static int geneve_change_mtu(struct net_device *dev, int new_mtu)
{
- return __geneve_change_mtu(dev, new_mtu, true);
+ /* Only possible if called internally, ndo_change_mtu path's new_mtu
+ * is guaranteed to be between dev->min_mtu and dev->max_mtu.
+ */
+ if (new_mtu > dev->max_mtu)
+ new_mtu = dev->max_mtu;
+
+ dev->mtu = new_mtu;
+ return 0;
}
static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
struct geneve_dev *geneve = netdev_priv(dev);
- struct rtable *rt;
- struct flowi4 fl4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct dst_entry *dst;
- struct flowi6 fl6;
-#endif
if (ip_tunnel_info_af(info) == AF_INET) {
+ struct rtable *rt;
+ struct flowi4 fl4;
+
rt = geneve_get_v4_rt(skb, dev, &fl4, info);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -1104,6 +931,9 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
info->key.u.ipv4.src = fl4.saddr;
#if IS_ENABLED(CONFIG_IPV6)
} else if (ip_tunnel_info_af(info) == AF_INET6) {
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
dst = geneve_get_v6_dst(skb, dev, &fl6, info);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -1117,7 +947,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
info->key.tp_src = udp_flow_src_port(geneve->net, skb,
1, USHRT_MAX, true);
- info->key.tp_dst = geneve->dst_port;
+ info->key.tp_dst = geneve->info.key.tp_dst;
return 0;
}
@@ -1187,6 +1017,14 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ /* MTU range: 68 - (something less than 65535) */
+ dev->min_mtu = ETH_MIN_MTU;
+ /* The max_mtu calculation does not take account of GENEVE
+ * options, to avoid excluding potentially valid
+ * configurations. This will be further reduced by IPvX hdr size.
+ */
+ dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
+
netif_keep_dst(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
@@ -1231,81 +1069,75 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
}
static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
- __be16 dst_port,
- union geneve_addr *remote,
- u8 vni[],
+ const struct ip_tunnel_info *info,
bool *tun_on_same_port,
bool *tun_collect_md)
{
- struct geneve_dev *geneve, *t;
+ struct geneve_dev *geneve, *t = NULL;
*tun_on_same_port = false;
*tun_collect_md = false;
- t = NULL;
list_for_each_entry(geneve, &gn->geneve_list, next) {
- if (geneve->dst_port == dst_port) {
+ if (info->key.tp_dst == geneve->info.key.tp_dst) {
*tun_collect_md = geneve->collect_md;
*tun_on_same_port = true;
}
- if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
- !memcmp(remote, &geneve->remote, sizeof(geneve->remote)) &&
- dst_port == geneve->dst_port)
+ if (info->key.tun_id == geneve->info.key.tun_id &&
+ info->key.tp_dst == geneve->info.key.tp_dst &&
+ !memcmp(&info->key.u, &geneve->info.key.u, sizeof(info->key.u)))
t = geneve;
}
return t;
}
+static bool is_all_zero(const u8 *fp, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (fp[i])
+ return false;
+ return true;
+}
+
+static bool is_tnl_info_zero(const struct ip_tunnel_info *info)
+{
+ if (info->key.tun_id || info->key.tun_flags || info->key.tos ||
+ info->key.ttl || info->key.label || info->key.tp_src ||
+ !is_all_zero((const u8 *)&info->key.u, sizeof(info->key.u)))
+ return false;
+ else
+ return true;
+}
+
static int geneve_configure(struct net *net, struct net_device *dev,
- union geneve_addr *remote,
- __u32 vni, __u8 ttl, __u8 tos, __be32 label,
- __be16 dst_port, bool metadata, u32 flags)
+ const struct ip_tunnel_info *info,
+ bool metadata, bool ipv6_rx_csum)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
bool tun_collect_md, tun_on_same_port;
int err, encap_len;
- if (!remote)
- return -EINVAL;
- if (metadata &&
- (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl || label))
+ if (metadata && !is_tnl_info_zero(info))
return -EINVAL;
geneve->net = net;
geneve->dev = dev;
- geneve->vni[0] = (vni & 0x00ff0000) >> 16;
- geneve->vni[1] = (vni & 0x0000ff00) >> 8;
- geneve->vni[2] = vni & 0x000000ff;
-
- if ((remote->sa.sa_family == AF_INET &&
- IN_MULTICAST(ntohl(remote->sin.sin_addr.s_addr))) ||
- (remote->sa.sa_family == AF_INET6 &&
- ipv6_addr_is_multicast(&remote->sin6.sin6_addr)))
- return -EINVAL;
- if (label && remote->sa.sa_family != AF_INET6)
- return -EINVAL;
-
- geneve->remote = *remote;
-
- geneve->ttl = ttl;
- geneve->tos = tos;
- geneve->label = label;
- geneve->dst_port = dst_port;
- geneve->collect_md = metadata;
- geneve->flags = flags;
-
- t = geneve_find_dev(gn, dst_port, remote, geneve->vni,
- &tun_on_same_port, &tun_collect_md);
+ t = geneve_find_dev(gn, info, &tun_on_same_port, &tun_collect_md);
if (t)
return -EBUSY;
/* make enough headroom for basic scenario */
encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
- if (remote->sa.sa_family == AF_INET)
+ if (ip_tunnel_info_af(info) == AF_INET) {
encap_len += sizeof(struct iphdr);
- else
+ dev->max_mtu -= sizeof(struct iphdr);
+ } else {
encap_len += sizeof(struct ipv6hdr);
+ dev->max_mtu -= sizeof(struct ipv6hdr);
+ }
dev->needed_headroom = encap_len + ETH_HLEN;
if (metadata) {
@@ -1316,7 +1148,10 @@ static int geneve_configure(struct net *net, struct net_device *dev,
return -EPERM;
}
- dst_cache_reset(&geneve->dst_cache);
+ dst_cache_reset(&geneve->info.dst_cache);
+ geneve->info = *info;
+ geneve->collect_md = metadata;
+ geneve->use_udp6_rx_checksums = ipv6_rx_csum;
err = register_netdevice(dev);
if (err)
@@ -1326,74 +1161,99 @@ static int geneve_configure(struct net *net, struct net_device *dev,
return 0;
}
+static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port)
+{
+ memset(info, 0, sizeof(*info));
+ info->key.tp_dst = htons(dst_port);
+}
+
static int geneve_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- __be16 dst_port = htons(GENEVE_UDP_PORT);
- __u8 ttl = 0, tos = 0;
+ bool use_udp6_rx_checksums = false;
+ struct ip_tunnel_info info;
bool metadata = false;
- union geneve_addr remote = geneve_remote_unspec;
- __be32 label = 0;
- __u32 vni = 0;
- u32 flags = 0;
+
+ init_tnl_info(&info, GENEVE_UDP_PORT);
if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6])
return -EINVAL;
if (data[IFLA_GENEVE_REMOTE]) {
- remote.sa.sa_family = AF_INET;
- remote.sin.sin_addr.s_addr =
+ info.key.u.ipv4.dst =
nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+
+ if (IN_MULTICAST(ntohl(info.key.u.ipv4.dst))) {
+ netdev_dbg(dev, "multicast remote is unsupported\n");
+ return -EINVAL;
+ }
}
if (data[IFLA_GENEVE_REMOTE6]) {
- if (!IS_ENABLED(CONFIG_IPV6))
- return -EPFNOSUPPORT;
-
- remote.sa.sa_family = AF_INET6;
- remote.sin6.sin6_addr =
+ #if IS_ENABLED(CONFIG_IPV6)
+ info.mode = IP_TUNNEL_INFO_IPV6;
+ info.key.u.ipv6.dst =
nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]);
- if (ipv6_addr_type(&remote.sin6.sin6_addr) &
+ if (ipv6_addr_type(&info.key.u.ipv6.dst) &
IPV6_ADDR_LINKLOCAL) {
netdev_dbg(dev, "link-local remote is unsupported\n");
return -EINVAL;
}
+ if (ipv6_addr_is_multicast(&info.key.u.ipv6.dst)) {
+ netdev_dbg(dev, "multicast remote is unsupported\n");
+ return -EINVAL;
+ }
+ info.key.tun_flags |= TUNNEL_CSUM;
+ use_udp6_rx_checksums = true;
+#else
+ return -EPFNOSUPPORT;
+#endif
}
- if (data[IFLA_GENEVE_ID])
+ if (data[IFLA_GENEVE_ID]) {
+ __u32 vni;
+ __u8 tvni[3];
+
vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+ tvni[0] = (vni & 0x00ff0000) >> 16;
+ tvni[1] = (vni & 0x0000ff00) >> 8;
+ tvni[2] = vni & 0x000000ff;
+ info.key.tun_id = vni_to_tunnel_id(tvni);
+ }
if (data[IFLA_GENEVE_TTL])
- ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+ info.key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
if (data[IFLA_GENEVE_TOS])
- tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+ info.key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
- if (data[IFLA_GENEVE_LABEL])
- label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
- IPV6_FLOWLABEL_MASK;
+ if (data[IFLA_GENEVE_LABEL]) {
+ info.key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
+ IPV6_FLOWLABEL_MASK;
+ if (info.key.label && (!(info.mode & IP_TUNNEL_INFO_IPV6)))
+ return -EINVAL;
+ }
if (data[IFLA_GENEVE_PORT])
- dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
+ info.key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]);
if (data[IFLA_GENEVE_COLLECT_METADATA])
metadata = true;
if (data[IFLA_GENEVE_UDP_CSUM] &&
!nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
- flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
+ info.key.tun_flags |= TUNNEL_CSUM;
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] &&
nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
- flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
+ info.key.tun_flags &= ~TUNNEL_CSUM;
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] &&
nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
- flags |= GENEVE_F_UDP_ZERO_CSUM6_RX;
+ use_udp6_rx_checksums = false;
- return geneve_configure(net, dev, &remote, vni, ttl, tos, label,
- dst_port, metadata, flags);
+ return geneve_configure(net, dev, &info, metadata, use_udp6_rx_checksums);
}
static void geneve_dellink(struct net_device *dev, struct list_head *head)
@@ -1422,45 +1282,52 @@ static size_t geneve_get_size(const struct net_device *dev)
static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
+ struct ip_tunnel_info *info = &geneve->info;
+ __u8 tmp_vni[3];
__u32 vni;
- vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
+ tunnel_id_to_vni(info->key.tun_id, tmp_vni);
+ vni = (tmp_vni[0] << 16) | (tmp_vni[1] << 8) | tmp_vni[2];
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
goto nla_put_failure;
- if (geneve->remote.sa.sa_family == AF_INET) {
+ if (ip_tunnel_info_af(info) == AF_INET) {
if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
- geneve->remote.sin.sin_addr.s_addr))
+ info->key.u.ipv4.dst))
goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
+ !!(info->key.tun_flags & TUNNEL_CSUM)))
+ goto nla_put_failure;
+
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
- &geneve->remote.sin6.sin6_addr))
+ &info->key.u.ipv6.dst))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
+ !(info->key.tun_flags & TUNNEL_CSUM)))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
+ !geneve->use_udp6_rx_checksums))
goto nla_put_failure;
#endif
}
- if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
- nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos) ||
- nla_put_be32(skb, IFLA_GENEVE_LABEL, geneve->label))
+ if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
+ nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
+ nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label))
goto nla_put_failure;
- if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
+ if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
goto nla_put_failure;
if (geneve->collect_md) {
if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
goto nla_put_failure;
}
-
- if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
- !(geneve->flags & GENEVE_F_UDP_ZERO_CSUM_TX)) ||
- nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
- !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_TX)) ||
- nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
- !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_RX)))
- goto nla_put_failure;
-
return 0;
nla_put_failure:
@@ -1484,6 +1351,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
u8 name_assign_type, u16 dst_port)
{
struct nlattr *tb[IFLA_MAX + 1];
+ struct ip_tunnel_info info;
struct net_device *dev;
LIST_HEAD(list_kill);
int err;
@@ -1494,9 +1362,8 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
if (IS_ERR(dev))
return dev;
- err = geneve_configure(net, dev, &geneve_remote_unspec,
- 0, 0, 0, 0, htons(dst_port), true,
- GENEVE_F_UDP_ZERO_CSUM6_RX);
+ init_tnl_info(&info, dst_port);
+ err = geneve_configure(net, dev, &info, true, true);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
@@ -1505,7 +1372,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
*/
- err = __geneve_change_mtu(dev, IP_MAX_MTU, false);
+ err = geneve_change_mtu(dev, IP_MAX_MTU);
if (err)
goto err;
@@ -1514,8 +1381,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
goto err;
return dev;
-
- err:
+err:
geneve_dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
@@ -1598,7 +1464,6 @@ static int __init geneve_init_module(void)
goto out3;
return 0;
-
out3:
unregister_netdevice_notifier(&geneve_notifier_block);
out2:
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 97e0cbca0a08..8b6810bad54b 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -77,7 +77,7 @@ struct gtp_dev {
struct hlist_head *addr_hash;
};
-static int gtp_net_id __read_mostly;
+static unsigned int gtp_net_id __read_mostly;
struct gtp_net {
struct list_head gtp_dev_list;
@@ -158,9 +158,9 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
return false;
- iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
+ iph = (struct iphdr *)(skb->data + hdrlen);
- return iph->saddr != pctx->ms_addr_ip4.s_addr;
+ return iph->saddr == pctx->ms_addr_ip4.s_addr;
}
/* Check if the inner IP source address in this packet is assigned to any
@@ -423,11 +423,11 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
/* Bits 8 7 6 5 4 3 2 1
* +--+--+--+--+--+--+--+--+
- * |version |PT| 1| E| S|PN|
+ * |version |PT| 0| E| S|PN|
* +--+--+--+--+--+--+--+--+
* 0 0 1 1 1 0 0 0
*/
- gtp1->flags = 0x38; /* v1, GTP-non-prime. */
+ gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len);
gtp1->tid = htonl(pctx->u.v1.o_tei);
@@ -1094,14 +1094,7 @@ static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static struct genl_family gtp_genl_family = {
- .id = GENL_ID_GENERATE,
- .name = "gtp",
- .version = 0,
- .hdrsize = 0,
- .maxattr = GTPA_MAX,
- .netnsok = true,
-};
+static struct genl_family gtp_genl_family;
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
u32 type, struct pdp_ctx *pctx)
@@ -1297,6 +1290,17 @@ static const struct genl_ops gtp_genl_ops[] = {
},
};
+static struct genl_family gtp_genl_family __ro_after_init = {
+ .name = "gtp",
+ .version = 0,
+ .hdrsize = 0,
+ .maxattr = GTPA_MAX,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = gtp_genl_ops,
+ .n_ops = ARRAY_SIZE(gtp_genl_ops),
+};
+
static int __net_init gtp_net_init(struct net *net)
{
struct gtp_net *gn = net_generic(net, gtp_net_id);
@@ -1336,7 +1340,7 @@ static int __init gtp_init(void)
if (err < 0)
goto error_out;
- err = genl_register_family_with_ops(&gtp_genl_family, gtp_genl_ops);
+ err = genl_register_family(&gtp_genl_family);
if (err < 0)
goto unreg_rtnl_link;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 95c0b45a68fb..f5a9728b89f3 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -68,7 +68,6 @@ static const struct net_device_ops rr_netdev_ops = {
.ndo_stop = rr_close,
.ndo_do_ioctl = rr_ioctl,
.ndo_start_xmit = rr_start_xmit,
- .ndo_change_mtu = hippi_change_mtu,
.ndo_set_mac_address = hippi_mac_addr,
};
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index f4fbcb5aa24a..3958adade7eb 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -606,8 +606,8 @@ struct nvsp_message {
} __packed;
-#define NETVSC_MTU 65536
-#define NETVSC_MTU_MIN 68
+#define NETVSC_MTU 65535
+#define NETVSC_MTU_MIN ETH_MIN_MTU
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 720b5fa9e625..5a1cc089acb7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -410,8 +410,8 @@ static int netvsc_init_buf(struct hv_device *device)
net_device->send_section_cnt =
net_device->send_buf_size / net_device->send_section_size;
- dev_info(&device->device, "Send section size: %d, Section count:%d\n",
- net_device->send_section_size, net_device->send_section_cnt);
+ netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
+ net_device->send_section_size, net_device->send_section_cnt);
/* Setup state for managing the send buffer. */
net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
@@ -578,7 +578,7 @@ void netvsc_device_remove(struct hv_device *device)
* At this point, no one should be accessing net_device
* except in here
*/
- dev_notice(&device->device, "net device safe to remove\n");
+ netdev_dbg(ndev, "net device safe to remove\n");
/* Now, we can close the channel safely */
vmbus_close(device->channel);
@@ -888,6 +888,13 @@ int netvsc_send(struct hv_device *device,
if (!net_device)
return -ENODEV;
+ /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
+ * here before the negotiation with the host is finished and
+ * send_section_map may not be allocated yet.
+ */
+ if (!net_device->send_section_map)
+ return -EAGAIN;
+
out_channel = net_device->chn_table[q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX;
@@ -1380,7 +1387,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
}
/* Channel is opened */
- pr_info("hv_netvsc channel opened successfully\n");
+ netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
/* If we're reopening the device we may have multiple queues, fill the
* chn_table with the default channel to use it before subchannels are
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c9140c3aeb67..c9414c054852 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -875,19 +875,12 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = ndevctx->nvdev;
struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
- int limit = ETH_DATA_LEN;
u32 num_chn;
int ret = 0;
if (ndevctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
- if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
- limit = NETVSC_MTU - ETH_HLEN;
-
- if (mtu < NETVSC_MTU_MIN || mtu > limit)
- return -EINVAL;
-
ret = netvsc_close(ndev);
if (ret)
goto out;
@@ -1406,6 +1399,13 @@ static int netvsc_probe(struct hv_device *dev,
netif_set_real_num_rx_queues(net, nvdev->num_chn);
netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
+ /* MTU range: 68 - 1500 or 65521 */
+ net->min_mtu = NETVSC_MTU_MIN;
+ if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
+ net->max_mtu = NETVSC_MTU - ETH_HLEN;
+ else
+ net->max_mtu = ETH_DATA_LEN;
+
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9195d5da8485..8d90904e0e49 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1059,9 +1059,9 @@ int rndis_filter_device_add(struct hv_device *dev,
device_info->link_state = rndis_device->link_state;
- dev_info(&dev->device, "Device MAC %pM link state %s\n",
- rndis_device->hw_mac_adr,
- device_info->link_state ? "down" : "up");
+ netdev_dbg(net, "Device MAC %pM link state %s\n",
+ rndis_device->hw_mac_adr,
+ device_info->link_state ? "down" : "up");
if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return 0;
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index f355df7cf84a..3e4c8b21403c 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -873,7 +873,7 @@ static int adf7242_rx(struct adf7242_local *lp)
return 0;
}
-static struct ieee802154_ops adf7242_ops = {
+static const struct ieee802154_ops adf7242_ops = {
.owner = THIS_MODULE,
.xmit_sync = adf7242_xmit,
.ed = adf7242_ed,
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 9f10da60e02d..057025722e3d 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -990,7 +990,12 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
}
#define AT86RF2XX_MAX_ED_LEVELS 0xF
-static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+static const s32 at86rf233_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600,
+ -7400, -7200, -7000, -6800, -6600, -6400,
+};
+
+static const s32 at86rf231_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
-9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
-7100, -6900, -6700, -6500, -6300, -6100,
};
@@ -1343,7 +1348,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
- .rssi_base_val = -91,
+ .rssi_base_val = -94,
.set_channel = at86rf23x_set_channel,
.set_txpower = at86rf23x_set_txpower,
};
@@ -1557,9 +1562,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
- lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
- lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
-
lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
switch (part) {
@@ -1575,6 +1577,8 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf231_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
+ lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf231_ed_levels);
break;
case 7:
chip = "at86rf212";
@@ -1598,6 +1602,8 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf233_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
+ lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf233_ed_levels);
break;
default:
chip = "unknown";
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 1056ed142411..1253f864737a 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -58,6 +58,11 @@ struct atusb {
struct urb *tx_urb;
struct sk_buff *tx_skb;
uint8_t tx_ack_seq; /* current TX ACK sequence number */
+
+ /* Firmware variable */
+ unsigned char fw_ver_maj; /* Firmware major version number */
+ unsigned char fw_ver_min; /* Firmware minor version number */
+ unsigned char fw_hw_type; /* Firmware hardware type */
};
/* ----- USB commands without data ----------------------------------------- */
@@ -541,6 +546,21 @@ atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries
}
static int
+atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
+{
+ struct atusb *atusb = hw->priv;
+ struct device *dev = &atusb->usb_dev->dev;
+
+ if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
+ dev_info(dev, "Automatic frame retransmission is only available from "
+ "firmware version 0.3. Please update if you want this feature.");
+ return -EINVAL;
+ }
+
+ return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
+}
+
+static int
atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
{
struct atusb *atusb = hw->priv;
@@ -567,7 +587,7 @@ atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
return 0;
}
-static struct ieee802154_ops atusb_ops = {
+static const struct ieee802154_ops atusb_ops = {
.owner = THIS_MODULE,
.xmit_async = atusb_xmit,
.ed = atusb_ed,
@@ -579,6 +599,7 @@ static struct ieee802154_ops atusb_ops = {
.set_cca_mode = atusb_set_cca_mode,
.set_cca_ed_level = atusb_set_cca_ed_level,
.set_csma_params = atusb_set_csma_params,
+ .set_frame_retries = atusb_set_frame_retries,
.set_promiscuous_mode = atusb_set_promiscuous_mode,
};
@@ -594,14 +615,19 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, 3, 1000);
- if (ret >= 0)
+ if (ret >= 0) {
+ atusb->fw_ver_maj = buffer[0];
+ atusb->fw_ver_min = buffer[1];
+ atusb->fw_hw_type = buffer[2];
+
dev_info(&usb_dev->dev,
"Firmware: major: %u, minor: %u, hardware type: %u\n",
- buffer[0], buffer[1], buffer[2]);
- if (buffer[0] == 0 && buffer[1] < 2) {
+ atusb->fw_ver_maj, atusb->fw_ver_min, atusb->fw_hw_type);
+ }
+ if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) {
dev_info(&usb_dev->dev,
- "Firmware version (%u.%u) is predates our first public release.",
- buffer[0], buffer[1]);
+ "Firmware version (%u.%u) predates our first public release.",
+ atusb->fw_ver_maj, atusb->fw_ver_min);
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
@@ -669,6 +695,43 @@ fail:
return -ENODEV;
}
+static int atusb_set_extended_addr(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
+ __le64 extended_addr;
+ u64 addr;
+ int ret;
+
+ /* Firmware versions before 0.3 do not support the EUI64_READ command.
+ * Just use a random address and be done */
+ if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
+ ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
+ return 0;
+ }
+
+ /* Firmware is new enough so we fetch the address from EEPROM */
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
+ if (ret < 0)
+ dev_err(&usb_dev->dev, "failed to fetch extended address\n");
+
+ memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
+ /* Check if read address is not empty and the unicast bit is set correctly */
+ if (!ieee802154_is_valid_extended_unicast_addr(extended_addr)) {
+ dev_info(&usb_dev->dev, "no permanent extended address found, random address set\n");
+ ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
+ } else {
+ atusb->hw->phy->perm_extended_addr = extended_addr;
+ addr = swab64((__force u64)atusb->hw->phy->perm_extended_addr);
+ dev_info(&usb_dev->dev, "Read permanent extended address %8phC from device\n",
+ &addr);
+ }
+
+ return ret;
+}
+
/* ----- Setup ------------------------------------------------------------- */
static int atusb_probe(struct usb_interface *interface,
@@ -707,7 +770,8 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS |
+ IEEE802154_HW_FRAME_RETRIES;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
@@ -728,13 +792,14 @@ static int atusb_probe(struct usb_interface *interface,
hw->phy->supported.tx_powers = atusb_powers;
hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
- ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
atusb_command(atusb, ATUSB_RF_RESET, 0);
atusb_get_and_show_chip(atusb);
atusb_get_and_show_revision(atusb);
atusb_get_and_show_build(atusb);
+ atusb_set_extended_addr(atusb);
+
ret = atusb_get_and_clear_error(atusb);
if (ret) {
dev_err(&atusb->usb_dev->dev,
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
index 0690edcad57b..b22bbaa77590 100644
--- a/drivers/net/ieee802154/atusb.h
+++ b/drivers/net/ieee802154/atusb.h
@@ -13,8 +13,8 @@
* Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h
*/
-#ifndef _ATUSB_H
-#define _ATUSB_H
+#ifndef _ATUSB_H
+#define _ATUSB_H
#define ATUSB_VENDOR_ID 0x20b7 /* Qi Hardware*/
#define ATUSB_PRODUCT_ID 0x1540 /* 802.15.4, device 0 */
@@ -46,9 +46,12 @@ enum atusb_requests {
ATUSB_SPI_WRITE2_SYNC,
ATUSB_RX_MODE = 0x40, /* HardMAC group */
ATUSB_TX,
+ ATUSB_EUI64_WRITE = 0x50, /* Parameter in EEPROM grp */
+ ATUSB_EUI64_READ,
};
-/* Direction bRequest wValue wIndex wLength
+/*
+ * Direction bRequest wValue wIndex wLength
*
* ->host ATUSB_ID - - 3
* ->host ATUSB_BUILD - - #bytes
@@ -76,6 +79,8 @@ enum atusb_requests {
*
* host-> ATUSB_RX_MODE on - 0
* host-> ATUSB_TX flags ack_seq #bytes
+ * host-> ATUSB_EUI64_WRITE - - #bytes (8)
+ * ->host ATUSB_EUI64_READ - - #bytes (8)
*/
#define ATUSB_REQ_FROM_DEV (USB_TYPE_VENDOR | USB_DIR_IN)
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index ec387efb61d0..0d673f7682ee 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -218,7 +218,7 @@ static int fakelb_probe(struct platform_device *pdev)
goto err_slave;
}
- dev_info(&pdev->dev, "added ieee802154 hardware\n");
+ dev_info(&pdev->dev, "added %i fake ieee802154 hardware devices\n", numlbs);
return 0;
err_slave:
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 7e0732f5ea07..031093e1c25f 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -73,7 +73,6 @@ struct ipvl_dev {
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
u32 msg_enable;
- u16 mtu_adj;
};
struct ipvl_addr {
@@ -98,7 +97,6 @@ struct ipvl_port {
struct work_struct wq;
struct sk_buff_head backlog;
int count;
- struct rcu_head rcu;
};
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index dfbc4ef6d507..693ec5b66222 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -26,13 +26,13 @@ static struct nf_hook_ops ipvl_nfops[] __read_mostly = {
},
};
-static struct l3mdev_ops ipvl_l3mdev_ops __read_mostly = {
+static const struct l3mdev_ops ipvl_l3mdev_ops = {
.l3mdev_l3_rcv = ipvlan_l3_rcv,
};
static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
{
- ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
+ ipvlan->dev->mtu = dev->mtu;
}
static int ipvlan_register_nf_hook(void)
@@ -128,7 +128,7 @@ static int ipvlan_port_create(struct net_device *dev)
return 0;
err:
- kfree_rcu(port, rcu);
+ kfree(port);
return err;
}
@@ -145,7 +145,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
__skb_queue_purge(&port->backlog);
- kfree_rcu(port, rcu);
+ kfree(port);
}
#define IPVLAN_FEATURES \
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index e8c3a8c32534..8d5b903d1d9d 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -1,5 +1,5 @@
/*********************************************************************
- *
+ *
* Filename: w83977af_ir.c
* Version: 1.0
* Description: FIR driver for the Winbond W83977AF Super I/O chip
@@ -8,37 +8,39 @@
* Created at: Wed Nov 4 11:46:16 1998
* Modified at: Fri Jan 28 12:10:59 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
- *
+ *
* Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
* Copyright (c) 1998-1999 Rebel.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
- *
+ *
* Neither Paul VanderSpek nor Rebel.com admit liability nor provide
* warranty for any of this software. This material is provided "AS-IS"
* and at no charge.
- *
+ *
* If you find bugs in this file, its very likely that the same bug
* will also be in pc87108.c since the implementations are quite
* similar.
*
* Notice that all functions that needs to access the chip in _any_
- * way, must save BSR register on entry, and restore it on exit.
+ * way, must save BSR register on entry, and restore it on exit.
* It is _very_ important to follow this policy!
*
* __u8 bank;
- *
+ *
* bank = inb( iobase+BSR);
- *
+ *
* do_your_stuff_here();
*
* outb( bank, iobase+BSR);
*
********************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -63,7 +65,7 @@
#include "w83977af_ir.h"
#define CONFIG_USE_W977_PNP /* Currently needed */
-#define PIO_MAX_SPEED 115200
+#define PIO_MAX_SPEED 115200
static char *driver_name = "w83977af_ir";
static int qos_mtt_bits = 0x07; /* 1 ms or more */
@@ -83,14 +85,14 @@ static unsigned int efio = W977_EFIO_BASE;
static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
/* Some prototypes */
-static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
- unsigned int dma);
+static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
+ unsigned int dma);
static int w83977af_close(struct w83977af_ir *self);
static int w83977af_probe(int iobase, int irq, int dma);
-static int w83977af_dma_receive(struct w83977af_ir *self);
+static int w83977af_dma_receive(struct w83977af_ir *self);
static int w83977af_dma_receive_complete(struct w83977af_ir *self);
static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
- struct net_device *dev);
+ struct net_device *dev);
static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
@@ -108,9 +110,9 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
*/
static int __init w83977af_init(void)
{
- int i;
+ int i;
- for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
}
@@ -127,7 +129,7 @@ static void __exit w83977af_cleanup(void)
{
int i;
- for (i=0; i < ARRAY_SIZE(dev_self); i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
w83977af_close(dev_self[i]);
}
@@ -150,13 +152,13 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
unsigned int dma)
{
struct net_device *dev;
- struct w83977af_ir *self;
+ struct w83977af_ir *self;
int err;
/* Lock the port that we need */
if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
- pr_debug("%s(), can't get iobase of 0x%03x\n",
- __func__ , iobase);
+ pr_debug("%s: can't get iobase of 0x%03x\n",
+ __func__, iobase);
return -ENODEV;
}
@@ -168,46 +170,44 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
* Allocate new instance of the driver
*/
dev = alloc_irdadev(sizeof(struct w83977af_ir));
- if (dev == NULL) {
- printk( KERN_ERR "IrDA: Can't allocate memory for "
- "IrDA control block!\n");
+ if (!dev) {
+ pr_err("IrDA: Can't allocate memory for IrDA control block!\n");
err = -ENOMEM;
goto err_out;
}
self = netdev_priv(dev);
spin_lock_init(&self->lock);
-
/* Initialize IO */
- self->io.fir_base = iobase;
- self->io.irq = irq;
- self->io.fir_ext = CHIP_IO_EXTENT;
- self->io.dma = dma;
- self->io.fifo_size = 32;
+ self->io.fir_base = iobase;
+ self->io.irq = irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = dma;
+ self->io.fifo_size = 32;
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
-
+
/* The only value we must override it the baudrate */
/* FIXME: The HP HDLS-1100 does not support 1152000! */
- self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
- IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+ self->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 | IR_57600 |
+ IR_115200 | IR_576000 | IR_1152000 | (IR_4000000 << 8);
/* The HP HDLS-1100 needs 1 ms according to the specs */
self->qos.min_turn_time.bits = qos_mtt_bits;
irda_qos_bits_to_value(&self->qos);
-
+
/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
- self->rx_buff.truesize = 14384;
+ self->rx_buff.truesize = 14384;
self->tx_buff.truesize = 4000;
-
+
/* Allocate memory if needed */
self->rx_buff.head =
dma_zalloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
- if (self->rx_buff.head == NULL) {
+ if (!self->rx_buff.head) {
err = -ENOMEM;
goto err_out1;
}
@@ -215,7 +215,7 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self->tx_buff.head =
dma_zalloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
- if (self->tx_buff.head == NULL) {
+ if (!self->tx_buff.head) {
err = -ENOMEM;
goto err_out2;
}
@@ -230,7 +230,7 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
err = register_netdev(dev);
if (err) {
- net_err_ratelimited("%s(), register_netdevice() failed!\n",
+ net_err_ratelimited("%s:, register_netdevice() failed!\n",
__func__);
goto err_out3;
}
@@ -238,12 +238,12 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* Need to store self somewhere */
dev_self[i] = self;
-
+
return 0;
err_out3:
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
-err_out2:
+err_out2:
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out1:
@@ -263,7 +263,7 @@ static int w83977af_close(struct w83977af_ir *self)
{
int iobase;
- iobase = self->io.fir_base;
+ iobase = self->io.fir_base;
#ifdef CONFIG_USE_W977_PNP
/* enter PnP configuration mode */
@@ -281,14 +281,13 @@ static int w83977af_close(struct w83977af_ir *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- pr_debug("%s(), Releasing Region %03x\n",
- __func__ , self->io.fir_base);
+ pr_debug("%s: Releasing Region %03x\n", __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
-
+
if (self->rx_buff.head)
dma_free_coherent(NULL, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
@@ -300,106 +299,106 @@ static int w83977af_close(struct w83977af_ir *self)
static int w83977af_probe(int iobase, int irq, int dma)
{
- int version;
+ int version;
int i;
-
- for (i=0; i < 2; i++) {
+
+ for (i = 0; i < 2; i++) {
#ifdef CONFIG_USE_W977_PNP
- /* Enter PnP configuration mode */
+ /* Enter PnP configuration mode */
w977_efm_enter(efbase[i]);
-
- w977_select_device(W977_DEVICE_IR, efbase[i]);
-
- /* Configure PnP port, IRQ, and DMA channel */
- w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
- w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
-
- w977_write_reg(0x70, irq, efbase[i]);
+
+ w977_select_device(W977_DEVICE_IR, efbase[i]);
+
+ /* Configure PnP port, IRQ, and DMA channel */
+ w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
+ w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
+
+ w977_write_reg(0x70, irq, efbase[i]);
#ifdef CONFIG_ARCH_NETWINDER
/* Netwinder uses 1 higher than Linux */
- w977_write_reg(0x74, dma+1, efbase[i]);
+ w977_write_reg(0x74, dma + 1, efbase[i]);
#else
- w977_write_reg(0x74, dma, efbase[i]);
+ w977_write_reg(0x74, dma, efbase[i]);
#endif /* CONFIG_ARCH_NETWINDER */
- w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
-
- /* Set append hardware CRC, enable IR bank selection */
- w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
-
- /* Activate device */
- w977_write_reg(0x30, 0x01, efbase[i]);
-
- w977_efm_exit(efbase[i]);
+ w977_write_reg(0x75, 0x04, efbase[i]);/* Disable Tx DMA */
+
+ /* Set append hardware CRC, enable IR bank selection */
+ w977_write_reg(0xf0, APEDCRC | ENBNKSEL, efbase[i]);
+
+ /* Activate device */
+ w977_write_reg(0x30, 0x01, efbase[i]);
+
+ w977_efm_exit(efbase[i]);
#endif /* CONFIG_USE_W977_PNP */
- /* Disable Advanced mode */
- switch_bank(iobase, SET2);
- outb(iobase+2, 0x00);
-
- /* Turn on UART (global) interrupts */
- switch_bank(iobase, SET0);
- outb(HCR_EN_IRQ, iobase+HCR);
-
- /* Switch to advanced mode */
- switch_bank(iobase, SET2);
- outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
-
- /* Set default IR-mode */
- switch_bank(iobase, SET0);
- outb(HCR_SIR, iobase+HCR);
-
- /* Read the Advanced IR ID */
- switch_bank(iobase, SET3);
- version = inb(iobase+AUID);
-
- /* Should be 0x1? */
- if (0x10 == (version & 0xf0)) {
- efio = efbase[i];
-
- /* Set FIFO size to 32 */
- switch_bank(iobase, SET2);
- outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
-
- /* Set FIFO threshold to TX17, RX16 */
- switch_bank(iobase, SET0);
- outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
- UFR_EN_FIFO,iobase+UFR);
-
- /* Receiver frame length */
- switch_bank(iobase, SET4);
- outb(2048 & 0xff, iobase+6);
- outb((2048 >> 8) & 0x1f, iobase+7);
-
- /*
- * Init HP HSDL-1100 transceiver.
- *
- * Set IRX_MSL since we have 2 * receive paths IRRX,
- * and IRRXH. Clear IRSL0D since we want IRSL0 * to
- * be a input pin used for IRRXH
+ /* Disable Advanced mode */
+ switch_bank(iobase, SET2);
+ outb(iobase + 2, 0x00);
+
+ /* Turn on UART (global) interrupts */
+ switch_bank(iobase, SET0);
+ outb(HCR_EN_IRQ, iobase + HCR);
+
+ /* Switch to advanced mode */
+ switch_bank(iobase, SET2);
+ outb(inb(iobase + ADCR1) | ADCR1_ADV_SL, iobase + ADCR1);
+
+ /* Set default IR-mode */
+ switch_bank(iobase, SET0);
+ outb(HCR_SIR, iobase + HCR);
+
+ /* Read the Advanced IR ID */
+ switch_bank(iobase, SET3);
+ version = inb(iobase + AUID);
+
+ /* Should be 0x1? */
+ if (0x10 == (version & 0xf0)) {
+ efio = efbase[i];
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, SET2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
+
+ /* Set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, SET0);
+ outb(UFR_RXTL | UFR_TXTL | UFR_TXF_RST | UFR_RXF_RST |
+ UFR_EN_FIFO, iobase + UFR);
+
+ /* Receiver frame length */
+ switch_bank(iobase, SET4);
+ outb(2048 & 0xff, iobase + 6);
+ outb((2048 >> 8) & 0x1f, iobase + 7);
+
+ /*
+ * Init HP HSDL-1100 transceiver.
+ *
+ * Set IRX_MSL since we have 2 * receive paths IRRX,
+ * and IRRXH. Clear IRSL0D since we want IRSL0 * to
+ * be a input pin used for IRRXH
*
- * IRRX pin 37 connected to receiver
+ * IRRX pin 37 connected to receiver
* IRTX pin 38 connected to transmitter
- * FIRRX pin 39 connected to receiver (IRSL0)
+ * FIRRX pin 39 connected to receiver (IRSL0)
* CIRRX pin 40 connected to pin 37
*/
switch_bank(iobase, SET7);
- outb(0x40, iobase+7);
-
+ outb(0x40, iobase + 7);
+
net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
version);
-
+
return 0;
} else {
/* Try next extented function register address */
- pr_debug("%s(), Wrong chip version", __func__);
+ pr_debug("%s: Wrong chip version\n", __func__);
}
- }
+ }
return -1;
}
static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
{
int ir_mode = HCR_SIR;
- int iobase;
+ int iobase;
__u8 set;
iobase = self->io.fir_base;
@@ -408,66 +407,67 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
self->io.speed = speed;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable interrupts */
switch_bank(iobase, SET0);
- outb(0, iobase+ICR);
+ outb(0, iobase + ICR);
/* Select Set 2 */
switch_bank(iobase, SET2);
- outb(0x00, iobase+ABHL);
+ outb(0x00, iobase + ABHL);
switch (speed) {
- case 9600: outb(0x0c, iobase+ABLL); break;
- case 19200: outb(0x06, iobase+ABLL); break;
- case 38400: outb(0x03, iobase+ABLL); break;
- case 57600: outb(0x02, iobase+ABLL); break;
- case 115200: outb(0x01, iobase+ABLL); break;
+ case 9600: outb(0x0c, iobase + ABLL); break;
+ case 19200: outb(0x06, iobase + ABLL); break;
+ case 38400: outb(0x03, iobase + ABLL); break;
+ case 57600: outb(0x02, iobase + ABLL); break;
+ case 115200: outb(0x01, iobase + ABLL); break;
case 576000:
ir_mode = HCR_MIR_576;
- pr_debug("%s(), handling baud of 576000\n", __func__);
+ pr_debug("%s: handling baud of 576000\n", __func__);
break;
case 1152000:
ir_mode = HCR_MIR_1152;
- pr_debug("%s(), handling baud of 1152000\n", __func__);
+ pr_debug("%s: handling baud of 1152000\n", __func__);
break;
case 4000000:
ir_mode = HCR_FIR;
- pr_debug("%s(), handling baud of 4000000\n", __func__);
+ pr_debug("%s: handling baud of 4000000\n", __func__);
break;
default:
ir_mode = HCR_FIR;
- pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed);
+ pr_debug("%s: unknown baud rate of %d\n", __func__, speed);
break;
}
/* Set speed mode */
switch_bank(iobase, SET0);
- outb(ir_mode, iobase+HCR);
+ outb(ir_mode, iobase + HCR);
/* set FIFO size to 32 */
switch_bank(iobase, SET2);
- outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
-
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
+
/* set FIFO threshold to TX17, RX16 */
switch_bank(iobase, SET0);
- outb(0x00, iobase+UFR); /* Reset */
- outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
- outb(0xa7, iobase+UFR);
+ outb(0x00, iobase + UFR); /* Reset */
+ outb(UFR_EN_FIFO, iobase + UFR); /* First we must enable FIFO */
+ outb(0xa7, iobase + UFR);
netif_wake_queue(self->netdev);
-
+
/* Enable some interrupts so we can receive frames */
switch_bank(iobase, SET0);
if (speed > PIO_MAX_SPEED) {
- outb(ICR_EFSFI, iobase+ICR);
+ outb(ICR_EFSFI, iobase + ICR);
w83977af_dma_receive(self);
- } else
- outb(ICR_ERBRI, iobase+ICR);
-
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
+
/* Restore SSR */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
@@ -477,69 +477,68 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
*
*/
static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
struct w83977af_ir *self;
__s32 speed;
int iobase;
__u8 set;
int mtt;
-
+
self = netdev_priv(dev);
iobase = self->io.fir_base;
- pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies,
- (int)skb->len);
-
+ pr_debug("%s: %ld, skb->len=%d\n", __func__, jiffies, (int)skb->len);
+
/* Lock transmit buffer */
netif_stop_queue(dev);
-
+
/* Check if we need to change the speed */
speed = irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
- w83977af_change_speed(self, speed);
+ w83977af_change_speed(self, speed);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
- } else
- self->new_speed = speed;
+ }
+ self->new_speed = speed;
}
/* Save current set */
- set = inb(iobase+SSR);
-
+ set = inb(iobase + SSR);
+
/* Decide if we should use PIO or DMA transfer */
if (self->io.speed > PIO_MAX_SPEED) {
self->tx_buff.data = self->tx_buff.head;
skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
self->tx_buff.len = skb->len;
-
+
mtt = irda_get_mtt(skb);
- pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
- if (mtt > 1000)
- mdelay(mtt/1000);
- else if (mtt)
- udelay(mtt);
+ pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
+ if (mtt > 1000)
+ mdelay(mtt / 1000);
+ else if (mtt)
+ udelay(mtt);
- /* Enable DMA interrupt */
- switch_bank(iobase, SET0);
- outb(ICR_EDMAI, iobase+ICR);
- w83977af_dma_write(self, iobase);
+ /* Enable DMA interrupt */
+ switch_bank(iobase, SET0);
+ outb(ICR_EDMAI, iobase + ICR);
+ w83977af_dma_write(self, iobase);
} else {
self->tx_buff.data = self->tx_buff.head;
- self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
self->tx_buff.truesize);
-
+
/* Add interrupt on tx low level (will fire immediately) */
switch_bank(iobase, SET0);
- outb(ICR_ETXTHI, iobase+ICR);
+ outb(ICR_ETXTHI, iobase + ICR);
}
dev_kfree_skb(skb);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return NETDEV_TX_OK;
}
@@ -553,64 +552,64 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
- pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len);
+
+ pr_debug("%s: len=%d\n", __func__, self->tx_buff.len);
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
- /* Choose transmit DMA channel */
+ /* Choose transmit DMA channel */
switch_bank(iobase, SET2);
- outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
+ outb(ADCR1_D_CHSW | /*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase + ADCR1);
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
- DMA_MODE_WRITE);
+ DMA_MODE_WRITE);
self->io.direction = IO_XMIT;
-
+
/* Enable DMA */
- switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) | HCR_EN_DMA | HCR_TX_WT, iobase + HCR);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
* Function w83977af_pio_write (iobase, buf, len, fifo_size)
*
- *
+ *
*
*/
static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
{
int actual = 0;
__u8 set;
-
+
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- if (!(inb_p(iobase+USR) & USR_TSRE)) {
- pr_debug("%s(), warning, FIFO not empty yet!\n", __func__);
+ if (!(inb_p(iobase + USR) & USR_TSRE)) {
+ pr_debug("%s: warning, FIFO not empty yet!\n", __func__);
fifo_size -= 17;
- pr_debug("%s(), %d bytes left in tx fifo\n",
- __func__ , fifo_size);
+ pr_debug("%s: %d bytes left in tx fifo\n", __func__, fifo_size);
}
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
- outb(buf[actual++], iobase+TBR);
+ outb(buf[actual++], iobase + TBR);
}
-
- pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
- __func__ , fifo_size, actual, len);
+
+ pr_debug("%s: fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
/* Restore bank */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return actual;
}
@@ -620,39 +619,39 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
*
* The transfer of a frame in finished. So do the necessary things
*
- *
+ *
*/
static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
{
int iobase;
__u8 set;
- pr_debug("%s(%ld)\n", __func__ , jiffies);
+ pr_debug("%s: %ld\n", __func__, jiffies);
- IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(self, return;);
iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
-
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
+
/* Check for underrun! */
- if (inb(iobase+AUDR) & AUDR_UNDR) {
- pr_debug("%s(), Transmit underrun!\n", __func__);
-
+ if (inb(iobase + AUDR) & AUDR_UNDR) {
+ pr_debug("%s: Transmit underrun!\n", __func__);
+
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
/* Clear bit, by writing 1 to it */
- outb(AUDR_UNDR, iobase+AUDR);
- } else
+ outb(AUDR_UNDR, iobase + AUDR);
+ } else {
self->netdev->stats.tx_packets++;
+ }
-
if (self->new_speed) {
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
@@ -661,9 +660,9 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
/* Unlock tx_buff and request another frame */
/* Tell the network layer, that we want more frames */
netif_wake_queue(self->netdev);
-
+
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
@@ -681,23 +680,23 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
unsigned long flags;
__u8 hcr;
#endif
- IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self, return -1;);
pr_debug("%s\n", __func__);
- iobase= self->io.fir_base;
+ iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
/* Choose DMA Rx, DMA Fairness, and Advanced mode */
switch_bank(iobase, SET2);
- outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
- iobase+ADCR1);
+ outb((inb(iobase + ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/ | ADCR1_ADV_SL,
+ iobase + ADCR1);
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
@@ -714,27 +713,27 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
DMA_MODE_READ);
#endif
- /*
- * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
+ /*
+ * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
* important that we don't reset the Tx FIFO since it might not
* be finished transmitting yet
*/
switch_bank(iobase, SET0);
- outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
+ outb(UFR_RXTL | UFR_TXTL | UFR_RXF_RST | UFR_EN_FIFO, iobase + UFR);
self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
-
+
/* Enable DMA */
switch_bank(iobase, SET0);
#ifdef CONFIG_ARCH_NETWINDER
- hcr = inb(iobase+HCR);
- outb(hcr | HCR_EN_DMA, iobase+HCR);
+ hcr = inb(iobase + HCR);
+ outb(hcr | HCR_EN_DMA, iobase + HCR);
enable_dma(self->io.dma);
spin_unlock_irqrestore(&self->lock, flags);
-#else
- outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
+#else
+ outb(inb(iobase + HCR) | HCR_EN_DMA, iobase + HCR);
#endif
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return 0;
}
@@ -761,22 +760,22 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
-
+ set = inb(iobase + SSR);
+
iobase = self->io.fir_base;
/* Read status FIFO */
switch_bank(iobase, SET5);
- while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
+ while ((status = inb(iobase + FS_FO)) & FS_FO_FSFDR) {
st_fifo->entries[st_fifo->tail].status = status;
-
- st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
- st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
-
+
+ st_fifo->entries[st_fifo->tail].len = inb(iobase + RFLFL);
+ st_fifo->entries[st_fifo->tail].len |= inb(iobase + RFLFH) << 8;
+
st_fifo->tail++;
st_fifo->len++;
}
-
+
while (st_fifo->len) {
/* Get first entry */
status = st_fifo->entries[st_fifo->head].status;
@@ -792,53 +791,52 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
} else {
/* Skip frame */
self->netdev->stats.rx_errors++;
-
+
self->rx_buff.data += len;
-
+
if (status & FS_FO_MX_LEX)
self->netdev->stats.rx_length_errors++;
-
- if (status & FS_FO_PHY_ERR)
+
+ if (status & FS_FO_PHY_ERR)
self->netdev->stats.rx_frame_errors++;
-
- if (status & FS_FO_CRC_ERR)
+
+ if (status & FS_FO_CRC_ERR)
self->netdev->stats.rx_crc_errors++;
}
/* The errors below can be reported in both cases */
if (status & FS_FO_RX_OV)
self->netdev->stats.rx_fifo_errors++;
-
+
if (status & FS_FO_FSF_OV)
self->netdev->stats.rx_fifo_errors++;
-
+
} else {
/* Check if we have transferred all data to memory */
switch_bank(iobase, SET0);
- if (inb(iobase+USR) & USR_RDR) {
+ if (inb(iobase + USR) & USR_RDR)
udelay(80); /* Should be enough!? */
- }
-
- skb = dev_alloc_skb(len+1);
- if (skb == NULL) {
- printk(KERN_INFO
- "%s(), memory squeeze, dropping frame.\n", __func__);
+
+ skb = dev_alloc_skb(len + 1);
+ if (!skb) {
+ pr_info("%s: memory squeeze, dropping frame\n",
+ __func__);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return FALSE;
}
-
+
/* Align to 20 bytes */
- skb_reserve(skb, 1);
-
+ skb_reserve(skb, 1);
+
/* Copy frame without CRC */
if (self->io.speed < 4000000) {
- skb_put(skb, len-2);
+ skb_put(skb, len - 2);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 2);
} else {
- skb_put(skb, len-4);
+ skb_put(skb, len - 4);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 4);
@@ -847,7 +845,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
/* Move to next frame */
self->rx_buff.data += len;
self->netdev->stats.rx_packets++;
-
+
skb->dev = self->netdev;
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
@@ -855,7 +853,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
}
}
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return TRUE;
}
@@ -866,21 +864,21 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
* Receive all data in receiver FIFO
*
*/
-static void w83977af_pio_receive(struct w83977af_ir *self)
+static void w83977af_pio_receive(struct w83977af_ir *self)
{
__u8 byte = 0x00;
int iobase;
- IRDA_ASSERT(self != NULL, return;);
-
+ IRDA_ASSERT(self, return;);
+
iobase = self->io.fir_base;
-
+
/* Receive all characters in Rx FIFO */
do {
- byte = inb(iobase+RBR);
+ byte = inb(iobase + RBR);
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
byte);
- } while (inb(iobase+USR) & USR_RDR); /* Data available */
+ } while (inb(iobase + USR) & USR_RDR); /* Data available */
}
/*
@@ -896,30 +894,30 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
__u8 set;
int iobase;
- pr_debug("%s(), isr=%#x\n", __func__ , isr);
-
+ pr_debug("%s: isr=%#x\n", __func__, isr);
+
iobase = self->io.fir_base;
/* Transmit FIFO low on data */
if (isr & ISR_TXTH_I) {
/* Write data left in transmit buffer */
- actual = w83977af_pio_write(self->io.fir_base,
- self->tx_buff.data,
- self->tx_buff.len,
+ actual = w83977af_pio_write(self->io.fir_base,
+ self->tx_buff.data,
+ self->tx_buff.len,
self->io.fifo_size);
self->tx_buff.data += actual;
self->tx_buff.len -= actual;
-
+
self->io.direction = IO_XMIT;
/* Check if finished */
if (self->tx_buff.len > 0) {
new_icr |= ICR_ETXTHI;
} else {
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- outb(AUDR_SFEND, iobase+AUDR);
- outb(set, iobase+SSR);
+ outb(AUDR_SFEND, iobase + AUDR);
+ outb(set, iobase + SSR);
self->netdev->stats.tx_packets++;
@@ -929,10 +927,10 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
}
}
/* Check if transmission has completed */
- if (isr & ISR_TXEMP_I) {
+ if (isr & ISR_TXEMP_I) {
/* Check if we need to change the speed? */
if (self->new_speed) {
- pr_debug("%s(), Changing speed!\n", __func__);
+ pr_debug("%s: Changing speed!\n", __func__);
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
}
@@ -965,12 +963,11 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
int iobase;
iobase = self->io.fir_base;
- set = inb(iobase+SSR);
-
+ set = inb(iobase + SSR);
+
/* End of frame detected in FIFO */
- if (isr & (ISR_FEND_I|ISR_FSF_I)) {
+ if (isr & (ISR_FEND_I | ISR_FSF_I)) {
if (w83977af_dma_receive_complete(self)) {
-
/* Wait for next status FIFO interrupt */
new_icr |= ICR_EFSFI;
} else {
@@ -978,11 +975,11 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
/* Set timer value, resolution 1 ms */
switch_bank(iobase, SET4);
- outb(0x01, iobase+TMRL); /* 1 ms */
- outb(0x00, iobase+TMRH);
+ outb(0x01, iobase + TMRL); /* 1 ms */
+ outb(0x00, iobase + TMRH);
/* Start timer */
- outb(IR_MSL_EN_TMR, iobase+IR_MSL);
+ outb(IR_MSL_EN_TMR, iobase + IR_MSL);
new_icr |= ICR_ETMRI;
}
@@ -991,11 +988,11 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
if (isr & ISR_TMR_I) {
/* Disable timer */
switch_bank(iobase, SET4);
- outb(0, iobase+IR_MSL);
+ outb(0, iobase + IR_MSL);
/* Clear timer event */
/* switch_bank(iobase, SET0); */
-/* outb(ASCR_CTE, iobase+ASCR); */
+/* outb(ASCR_CTE, iobase+ASCR); */
/* Check if this is a TX timer interrupt */
if (self->io.direction == IO_XMIT) {
@@ -1008,25 +1005,25 @@ static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
new_icr |= ICR_EFSFI;
}
- }
+ }
/* Finished with DMA */
if (isr & ISR_DMA_I) {
w83977af_dma_xmit_complete(self);
/* Check if there are more frames to be transmitted */
/* if (irda_device_txqueue_empty(self)) { */
-
- /* Prepare for receive
- *
+
+ /* Prepare for receive
+ *
* ** Netwinder Tx DMA likes that we do this anyway **
*/
w83977af_dma_receive(self);
new_icr = ICR_EFSFI;
- /* } */
+ /* } */
}
-
+
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return new_icr;
}
@@ -1049,24 +1046,24 @@ static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
iobase = self->io.fir_base;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
-
- icr = inb(iobase+ICR);
- isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
- outb(0, iobase+ICR); /* Disable interrupts */
-
+ icr = inb(iobase + ICR);
+ isr = inb(iobase + ISR) & icr; /* Mask out the interesting ones */
+
+ outb(0, iobase + ICR); /* Disable interrupts */
+
if (isr) {
/* Dispatch interrupt handler for the current speed */
- if (self->io.speed > PIO_MAX_SPEED )
+ if (self->io.speed > PIO_MAX_SPEED)
icr = w83977af_fir_interrupt(self, isr);
else
icr = w83977af_sir_interrupt(self, isr);
}
- outb(icr, iobase+ICR); /* Restore (new) interrupts */
- outb(set, iobase+SSR); /* Restore bank register */
+ outb(icr, iobase + ICR); /* Restore (new) interrupts */
+ outb(set, iobase + SSR); /* Restore bank register */
return IRQ_RETVAL(isr);
}
@@ -1082,22 +1079,23 @@ static int w83977af_is_receiving(struct w83977af_ir *self)
int iobase;
__u8 set;
- IRDA_ASSERT(self != NULL, return FALSE;);
+ IRDA_ASSERT(self, return FALSE;);
if (self->io.speed > 115200) {
iobase = self->io.fir_base;
/* Check if rx FIFO is not empty */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET2);
- if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
+ if ((inb(iobase + RXFDTH) & 0x3f) != 0) {
/* We are receiving something */
status = TRUE;
}
- outb(set, iobase+SSR);
- } else
+ outb(set, iobase + SSR);
+ } else {
status = (self->rx_buff.state != OUTSIDE_FRAME);
-
+ }
+
return status;
}
@@ -1113,17 +1111,16 @@ static int w83977af_net_open(struct net_device *dev)
int iobase;
char hwname[32];
__u8 set;
-
-
- IRDA_ASSERT(dev != NULL, return -1;);
+
+ IRDA_ASSERT(dev, return -1;);
self = netdev_priv(dev);
-
- IRDA_ASSERT(self != NULL, return 0;);
-
+
+ IRDA_ASSERT(self, return 0;);
+
iobase = self->io.fir_base;
- if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
- (void *) dev)) {
+ if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
+ (void *)dev)) {
return -EAGAIN;
}
/*
@@ -1134,30 +1131,31 @@ static int w83977af_net_open(struct net_device *dev)
free_irq(self->io.irq, dev);
return -EAGAIN;
}
-
+
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
- /* Enable some interrupts so we can receive frames again */
- switch_bank(iobase, SET0);
- if (self->io.speed > 115200) {
- outb(ICR_EFSFI, iobase+ICR);
- w83977af_dma_receive(self);
- } else
- outb(ICR_ERBRI, iobase+ICR);
+ /* Enable some interrupts so we can receive frames again */
+ switch_bank(iobase, SET0);
+ if (self->io.speed > 115200) {
+ outb(ICR_EFSFI, iobase + ICR);
+ w83977af_dma_receive(self);
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
/* Restore bank register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
/* Ready to play! */
netif_start_queue(dev);
-
+
/* Give self a hardware name */
sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
- /*
+ /*
* Open new IrLAP layer instance, now that everything should be
- * initialized properly
+ * initialized properly
*/
self->irlap = irlap_open(dev, &self->qos, hwname);
@@ -1176,17 +1174,17 @@ static int w83977af_net_close(struct net_device *dev)
int iobase;
__u8 set;
- IRDA_ASSERT(dev != NULL, return -1;);
-
+ IRDA_ASSERT(dev, return -1;);
+
self = netdev_priv(dev);
-
- IRDA_ASSERT(self != NULL, return 0;);
-
+
+ IRDA_ASSERT(self, return 0;);
+
iobase = self->io.fir_base;
/* Stop device */
netif_stop_queue(dev);
-
+
/* Stop and remove instance of IrLAP */
if (self->irlap)
irlap_close(self->irlap);
@@ -1195,17 +1193,17 @@ static int w83977af_net_close(struct net_device *dev)
disable_dma(self->io.dma);
/* Save current set */
- set = inb(iobase+SSR);
-
+ set = inb(iobase + SSR);
+
/* Disable interrupts */
switch_bank(iobase, SET0);
- outb(0, iobase+ICR);
+ outb(0, iobase + ICR);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
/* Restore bank register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return 0;
}
@@ -1218,19 +1216,19 @@ static int w83977af_net_close(struct net_device *dev)
*/
static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
struct w83977af_ir *self;
unsigned long flags;
int ret = 0;
- IRDA_ASSERT(dev != NULL, return -1;);
+ IRDA_ASSERT(dev, return -1;);
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self, return -1;);
+
+ pr_debug("%s: %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
- pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
-
spin_lock_irqsave(&self->lock, flags);
switch (cmd) {
@@ -1263,7 +1261,6 @@ MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
MODULE_LICENSE("GPL");
-
module_param(qos_mtt_bits, int, 0);
MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
module_param_array(io, int, NULL, 0);
@@ -1274,7 +1271,7 @@ MODULE_PARM_DESC(irq, "IRQ lines");
/*
* Function init_module (void)
*
- *
+ *
*
*/
module_init(w83977af_init);
@@ -1282,7 +1279,7 @@ module_init(w83977af_init);
/*
* Function cleanup_module (void)
*
- *
+ *
*
*/
module_exit(w83977af_cleanup);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d2e61e002926..f83cf6696820 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1431,14 +1431,7 @@ static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
macsec_txsa_put(tx_sa);
}
-static struct genl_family macsec_fam = {
- .id = GENL_ID_GENERATE,
- .name = MACSEC_GENL_NAME,
- .hdrsize = 0,
- .version = MACSEC_GENL_VERSION,
- .maxattr = MACSEC_ATTR_MAX,
- .netnsok = true,
-};
+static struct genl_family macsec_fam;
static struct net_device *get_dev_from_nl(struct net *net,
struct nlattr **attrs)
@@ -2665,6 +2658,17 @@ static const struct genl_ops macsec_genl_ops[] = {
},
};
+static struct genl_family macsec_fam __ro_after_init = {
+ .name = MACSEC_GENL_NAME,
+ .hdrsize = 0,
+ .version = MACSEC_GENL_VERSION,
+ .maxattr = MACSEC_ATTR_MAX,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = macsec_genl_ops,
+ .n_ops = ARRAY_SIZE(macsec_genl_ops),
+};
+
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2980,6 +2984,8 @@ static void macsec_free_netdev(struct net_device *dev)
static void macsec_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &macsec_netdev_ops;
dev->destructor = macsec_free_netdev;
@@ -3340,19 +3346,18 @@ static struct net *macsec_get_link_net(const struct net_device *dev)
static size_t macsec_get_size(const struct net_device *dev)
{
- return 0 +
- nla_total_size_64bit(8) + /* SCI */
- nla_total_size(1) + /* ICV_LEN */
- nla_total_size_64bit(8) + /* CIPHER_SUITE */
- nla_total_size(4) + /* WINDOW */
- nla_total_size(1) + /* ENCODING_SA */
- nla_total_size(1) + /* ENCRYPT */
- nla_total_size(1) + /* PROTECT */
- nla_total_size(1) + /* INC_SCI */
- nla_total_size(1) + /* ES */
- nla_total_size(1) + /* SCB */
- nla_total_size(1) + /* REPLAY_PROTECT */
- nla_total_size(1) + /* VALIDATION */
+ return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
+ nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
+ nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
+ nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
+ nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
+ nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
+ nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
+ nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
+ nla_total_size(1) + /* IFLA_MACSEC_ES */
+ nla_total_size(1) + /* IFLA_MACSEC_SCB */
+ nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
+ nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
0;
}
@@ -3470,7 +3475,7 @@ static int __init macsec_init(void)
if (err)
goto notifier;
- err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops);
+ err = genl_register_family(&macsec_fam);
if (err)
goto rtnl;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 26d6f0bbe14b..20b3fdf282c5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -43,7 +43,6 @@ struct macvlan_port {
struct net_device *dev;
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
- struct rcu_head rcu;
struct sk_buff_head bc_queue;
struct work_struct bc_work;
bool passthru;
@@ -179,20 +178,20 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
macvlan_hash_add(vlan);
}
-static int macvlan_addr_busy(const struct macvlan_port *port,
- const unsigned char *addr)
+static bool macvlan_addr_busy(const struct macvlan_port *port,
+ const unsigned char *addr)
{
/* Test to see if the specified multicast address is
* currently in use by the underlying device or
* another macvlan.
*/
if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
- return 1;
+ return true;
if (macvlan_hash_lookup(port, addr))
- return 1;
+ return true;
- return 0;
+ return false;
}
@@ -400,8 +399,7 @@ static void macvlan_forward_source(struct sk_buff *skb,
hlist_for_each_entry_rcu(entry, h, hlist) {
if (ether_addr_equal_64bits(entry->addr, addr))
- if (entry->vlan->dev->flags & IFF_UP)
- macvlan_forward_source_one(skb, entry->vlan);
+ macvlan_forward_source_one(skb, entry->vlan);
}
}
@@ -778,7 +776,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu)
+ if (vlan->lowerdev->mtu < new_mtu)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
@@ -1086,6 +1084,8 @@ void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
dev->priv_flags |= IFF_UNICAST_FLT;
@@ -1150,7 +1150,7 @@ static void macvlan_port_destroy(struct net_device *dev)
cancel_work_sync(&port->bc_work);
__skb_queue_purge(&port->bc_queue);
- kfree_rcu(port, rcu);
+ kfree(port);
}
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1299,6 +1299,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
else if (dev->mtu > lowerdev->mtu)
return -EINVAL;
+ /* MTU range: 68 - lowerdev->max_mtu */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = lowerdev->max_mtu;
+
if (!tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7869b0651576..5c26653eceb5 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -437,7 +437,7 @@ static int macvtap_get_minor(struct macvlan_dev *vlan)
if (retval >= 0) {
vlan->minor = retval;
} else if (retval == -ENOSPC) {
- printk(KERN_ERR "too many macvtap devices\n");
+ netdev_err(vlan->dev, "Too many macvtap devices\n");
retval = -EINVAL;
}
mutex_unlock(&minor_lock);
@@ -679,7 +679,6 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
int depth;
bool zerocopy = false;
size_t linear;
- ssize_t n;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
@@ -690,8 +689,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
len -= vnet_hdr_len;
err = -EFAULT;
- n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
- if (n != sizeof(vnet_hdr))
+ if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
goto err;
iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -826,9 +824,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (iov_iter_count(iter) < vnet_hdr_len)
return -EINVAL;
- ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
- macvtap_is_little_endian(q));
- if (ret)
+ if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
+ macvtap_is_little_endian(q)))
BUG();
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 993570b1e2ae..6d953c53eed6 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -135,6 +135,103 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
}
/**
+ * mii_ethtool_get_link_ksettings - get settings that are specified in @cmd
+ * @mii: MII interface
+ * @cmd: requested ethtool_link_ksettings
+ *
+ * The @cmd parameter is expected to have been cleared before calling
+ * mii_ethtool_get_link_ksettings().
+ *
+ * Returns 0 for success, negative on error.
+ */
+int mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct net_device *dev = mii->dev;
+ u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0;
+ u32 nego, supported, advertising, lp_advertising;
+
+ supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+ if (mii->supports_gmii)
+ supported |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+
+ /* only supports twisted-pair */
+ cmd->base.port = PORT_MII;
+
+ /* this isn't fully supported at higher layers */
+ cmd->base.phy_address = mii->phy_id;
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
+
+ advertising = ADVERTISED_TP | ADVERTISED_MII;
+
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR);
+ if (mii->supports_gmii) {
+ ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
+ stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
+ }
+ if (bmcr & BMCR_ANENABLE) {
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+
+ advertising |= mii_get_an(mii, MII_ADVERTISE);
+ if (mii->supports_gmii)
+ advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
+
+ if (bmsr & BMSR_ANEGCOMPLETE) {
+ lp_advertising = mii_get_an(mii, MII_LPA);
+ lp_advertising |=
+ mii_stat1000_to_ethtool_lpa_t(stat1000);
+ } else {
+ lp_advertising = 0;
+ }
+
+ nego = advertising & lp_advertising;
+
+ if (nego & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half)) {
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = !!(nego & ADVERTISED_1000baseT_Full);
+ } else if (nego & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
+ cmd->base.speed = SPEED_100;
+ cmd->base.duplex = !!(nego & ADVERTISED_100baseT_Full);
+ } else {
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = !!(nego & ADVERTISED_10baseT_Full);
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ cmd->base.speed = ((bmcr & BMCR_SPEED1000 &&
+ (bmcr & BMCR_SPEED100) == 0) ?
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10));
+ cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ lp_advertising = 0;
+ }
+
+ mii->full_duplex = cmd->base.duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+/**
* mii_ethtool_sset - set settings that are specified in @ecmd
* @mii: MII interface
* @ecmd: requested ethtool_cmd
@@ -227,6 +324,104 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
}
/**
+ * mii_ethtool_set_link_ksettings - set settings that are specified in @cmd
+ * @mii: MII interfaces
+ * @cmd: requested ethtool_link_ksettings
+ *
+ * Returns 0 for success, negative on error.
+ */
+int mii_ethtool_set_link_ksettings(struct mii_if_info *mii,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct net_device *dev = mii->dev;
+ u32 speed = cmd->base.speed;
+
+ if (speed != SPEED_10 &&
+ speed != SPEED_100 &&
+ speed != SPEED_1000)
+ return -EINVAL;
+ if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (cmd->base.port != PORT_MII)
+ return -EINVAL;
+ if (cmd->base.phy_address != mii->phy_id)
+ return -EINVAL;
+ if (cmd->base.autoneg != AUTONEG_DISABLE &&
+ cmd->base.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ if ((speed == SPEED_1000) && (!mii->supports_gmii))
+ return -EINVAL;
+
+ /* ignore supported, maxtxpkt, maxrxpkt */
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ u32 bmcr, advert, tmp;
+ u32 advert2 = 0, tmp2 = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, cmd->link_modes.advertising);
+
+ if ((advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full)) == 0)
+ return -EINVAL;
+
+ /* advertise only what has been requested */
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (mii->supports_gmii) {
+ advert2 = mii->mdio_read(dev, mii->phy_id,
+ MII_CTRL1000);
+ tmp2 = advert2 &
+ ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ }
+ tmp |= ethtool_adv_to_mii_adv_t(advertising);
+
+ if (mii->supports_gmii)
+ tmp2 |= ethtool_adv_to_mii_ctrl1000_t(advertising);
+ if (advert != tmp) {
+ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
+ mii->advertising = tmp;
+ }
+ if ((mii->supports_gmii) && (advert2 != tmp2))
+ mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2);
+
+ /* turn on autonegotiation, and force a renegotiate */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
+
+ mii->force_media = 0;
+ } else {
+ u32 bmcr, tmp;
+
+ /* turn off auto negotiation, set speed and duplexity */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (speed == SPEED_1000)
+ tmp |= BMCR_SPEED1000;
+ else if (speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (cmd->base.duplex == DUPLEX_FULL) {
+ tmp |= BMCR_FULLDPLX;
+ mii->full_duplex = 1;
+ } else {
+ mii->full_duplex = 0;
+ }
+ if (bmcr != tmp)
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
+
+ mii->force_media = 1;
+ }
+ return 0;
+}
+
+/**
* mii_check_gmii_support - check if the MII supports Gb interfaces
* @mii: the MII interface
*/
@@ -466,7 +661,9 @@ MODULE_LICENSE("GPL");
EXPORT_SYMBOL(mii_link_ok);
EXPORT_SYMBOL(mii_nway_restart);
EXPORT_SYMBOL(mii_ethtool_gset);
+EXPORT_SYMBOL(mii_ethtool_get_link_ksettings);
EXPORT_SYMBOL(mii_ethtool_sset);
+EXPORT_SYMBOL(mii_ethtool_set_link_ksettings);
EXPORT_SYMBOL(mii_check_link);
EXPORT_SYMBOL(mii_check_media);
EXPORT_SYMBOL(mii_check_gmii_support);
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 7b7c70e2341e..2de7faee9b19 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -27,24 +27,6 @@ static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int nlmon_is_valid_mtu(int new_mtu)
-{
- /* Note that in netlink we do not really have an upper limit. On
- * default, we use NLMSG_GOODSIZE. Here at least we should make
- * sure that it's at least the header size.
- */
- return new_mtu >= (int) sizeof(struct nlmsghdr);
-}
-
-static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (!nlmon_is_valid_mtu(new_mtu))
- return -EINVAL;
-
- dev->mtu = new_mtu;
- return 0;
-}
-
static int nlmon_dev_init(struct net_device *dev)
{
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
@@ -124,7 +106,6 @@ static const struct net_device_ops nlmon_ops = {
.ndo_stop = nlmon_close,
.ndo_start_xmit = nlmon_xmit,
.ndo_get_stats64 = nlmon_get_stats64,
- .ndo_change_mtu = nlmon_change_mtu,
};
static void nlmon_setup(struct net_device *dev)
@@ -145,6 +126,7 @@ static void nlmon_setup(struct net_device *dev)
* expected in most cases.
*/
dev->mtu = NLMSG_GOODSIZE;
+ dev->min_mtu = sizeof(struct nlmsghdr);
}
static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index a9acf7156855..36877ba65516 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -433,6 +433,9 @@ static int ntb_netdev_probe(struct device *client_dev)
ndev->netdev_ops = &ntb_netdev_ops;
ndev->ethtool_ops = &ntb_ethtool_ops;
+ ndev->min_mtu = 0;
+ ndev->max_mtu = ETH_MAX_MTU;
+
dev->qp = ntb_transport_create_queue(ndev, client_dev,
&ntb_netdev_handlers);
if (!dev->qp) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2651c8d8de2f..d361835b315d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -15,6 +15,19 @@ if PHYLIB
config SWPHY
bool
+config LED_TRIGGER_PHY
+ bool "Support LED triggers for tracking link state"
+ depends on LEDS_TRIGGERS
+ ---help---
+ Adds support for a set of LED trigger events per-PHY. Link
+ state change will trigger the events, for consumption by an
+ LED class driver. There are triggers for each link speed currently
+ supported by the phy, and are of the form:
+ <mii bus id>:<phy>:<speed>
+
+ Where speed is in the form:
+ <Speed in megabits>Mbps or <Speed in gigabits>Gbps
+
comment "MDIO bus device drivers"
config MDIO_BCM_IPROC
@@ -204,7 +217,7 @@ config BROADCOM_PHY
select BCM_NET_PHYLIB
---help---
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
- BCM5481 and BCM5482 PHYs.
+ BCM5481, BCM54810 and BCM5482 PHYs.
config CICADA_PHY
tristate "Cicada PHYs"
@@ -264,6 +277,11 @@ config MARVELL_PHY
---help---
Currently has a driver for the 88E1011S
+config MESON_GXL_PHY
+ tristate "Amlogic Meson GXL Internal PHY"
+ ---help---
+ Currently has a driver for the Amlogic Meson GXL Internal PHY
+
config MICREL_PHY
tristate "Micrel PHYs"
---help---
@@ -277,7 +295,7 @@ config MICROCHIP_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
---help---
- Currently supports the VSC8531 and VSC8541 PHYs
+ Currently supports VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e58667d111e7..356859ac7c18 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -2,6 +2,7 @@
libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o
libphy-$(CONFIG_SWPHY) += swphy.o
+libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
obj-$(CONFIG_PHYLIB) += libphy.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
+obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index 09b0b0aa8d68..e8ae50e1255e 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -21,6 +21,8 @@
#define PHY_ID_AQ1202 0x03a1b445
#define PHY_ID_AQ2104 0x03a1b460
#define PHY_ID_AQR105 0x03a1b4a2
+#define PHY_ID_AQR106 0x03a1b4d0
+#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQR405 0x03a1b4b0
#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
@@ -154,6 +156,30 @@ static struct phy_driver aquantia_driver[] = {
.read_status = aquantia_read_status,
},
{
+ .phy_id = PHY_ID_AQR106,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQR106",
+ .features = PHY_AQUANTIA_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .config_intr = aquantia_config_intr,
+ .ack_interrupt = aquantia_ack_interrupt,
+ .read_status = aquantia_read_status,
+},
+{
+ .phy_id = PHY_ID_AQR107,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQR107",
+ .features = PHY_AQUANTIA_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .config_intr = aquantia_config_intr,
+ .ack_interrupt = aquantia_ack_interrupt,
+ .read_status = aquantia_read_status,
+},
+{
.phy_id = PHY_ID_AQR405,
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR405",
@@ -173,6 +199,8 @@ static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
{ PHY_ID_AQ1202, 0xfffffff0 },
{ PHY_ID_AQ2104, 0xfffffff0 },
{ PHY_ID_AQR105, 0xfffffff0 },
+ { PHY_ID_AQR106, 0xfffffff0 },
+ { PHY_ID_AQR107, 0xfffffff0 },
{ PHY_ID_AQR405, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index a52b560e428b..c1e52b9dc58d 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -63,6 +63,7 @@
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
+#define AT803X_PHY_ID_MASK 0xffffffef
MODULE_DESCRIPTION("Atheros 803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
@@ -398,7 +399,7 @@ static struct phy_driver at803x_driver[] = {
/* ATHEROS 8035 */
.phy_id = ATH8035_PHY_ID,
.name = "Atheros 8035 ethernet",
- .phy_id_mask = 0xffffffef,
+ .phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
@@ -415,7 +416,7 @@ static struct phy_driver at803x_driver[] = {
/* ATHEROS 8030 */
.phy_id = ATH8030_PHY_ID,
.name = "Atheros 8030 ethernet",
- .phy_id_mask = 0xffffffef,
+ .phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
@@ -433,7 +434,7 @@ static struct phy_driver at803x_driver[] = {
/* ATHEROS 8031 */
.phy_id = ATH8031_PHY_ID,
.name = "Atheros 8031 ethernet",
- .phy_id_mask = 0xffffffef,
+ .phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
@@ -452,9 +453,9 @@ static struct phy_driver at803x_driver[] = {
module_phy_driver(at803x_driver);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { ATH8030_PHY_ID, 0xffffffef },
- { ATH8031_PHY_ID, 0xffffffef },
- { ATH8035_PHY_ID, 0xffffffef },
+ { ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
+ { ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
+ { ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 49bbc6826883..3fe8cc5c177e 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -104,7 +104,7 @@ static int bcm_cygnus_config_init(struct phy_device *phydev)
return rc;
/* Advertise EEE */
- rc = bcm_phy_enable_eee(phydev);
+ rc = bcm_phy_set_eee(phydev, true);
if (rc)
return rc;
@@ -134,8 +134,7 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
.phy_id = PHY_ID_BCM_CYGNUS,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom Cygnus PHY",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.config_init = bcm_cygnus_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index df0416db0b88..ab9ad689617c 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -17,6 +17,7 @@
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/ethtool.h>
#define MII_BCM_CHANNEL_WIDTH 0x2000
#define BCM_CL45VEN_EEE_ADV 0x3c
@@ -50,6 +51,23 @@ int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
}
EXPORT_SYMBOL_GPL(bcm_phy_read_exp);
+int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
+{
+ /* The register must be written to both the Shadow Register Select and
+ * the Shadow Read Register Selector
+ */
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
+ regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
+ return phy_read(phydev, MII_BCM54XX_AUX_CTL);
+}
+EXPORT_SYMBOL_GPL(bcm54xx_auxctl_read);
+
+int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
+{
+ return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
+}
+EXPORT_SYMBOL(bcm54xx_auxctl_write);
+
int bcm_phy_write_misc(struct phy_device *phydev,
u16 reg, u16 chl, u16 val)
{
@@ -178,7 +196,7 @@ int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down)
}
EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
-int bcm_phy_enable_eee(struct phy_device *phydev)
+int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
{
int val;
@@ -188,7 +206,10 @@ int bcm_phy_enable_eee(struct phy_device *phydev)
if (val < 0)
return val;
- val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
+ if (enable)
+ val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
+ else
+ val &= ~(LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X);
phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
MDIO_MMD_AN, (u32)val);
@@ -199,14 +220,172 @@ int bcm_phy_enable_eee(struct phy_device *phydev)
if (val < 0)
return val;
- val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+ if (enable)
+ val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+ else
+ val &= ~(MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
MDIO_MMD_AN, (u32)val);
return 0;
}
-EXPORT_SYMBOL_GPL(bcm_phy_enable_eee);
+EXPORT_SYMBOL_GPL(bcm_phy_set_eee);
+
+int bcm_phy_downshift_get(struct phy_device *phydev, u8 *count)
+{
+ int val;
+
+ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ if (val < 0)
+ return val;
+
+ /* Check if wirespeed is enabled or not */
+ if (!(val & MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN)) {
+ *count = DOWNSHIFT_DEV_DISABLE;
+ return 0;
+ }
+
+ val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR2);
+ if (val < 0)
+ return val;
+
+ /* Downgrade after one link attempt */
+ if (val & BCM54XX_SHD_SCR2_WSPD_RTRY_DIS) {
+ *count = 1;
+ } else {
+ /* Downgrade after configured retry count */
+ val >>= BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT;
+ val &= BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK;
+ *count = val + BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_downshift_get);
+
+int bcm_phy_downshift_set(struct phy_device *phydev, u8 count)
+{
+ int val = 0, ret = 0;
+
+ /* Range check the number given */
+ if (count - BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET >
+ BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK &&
+ count != DOWNSHIFT_DEV_DEFAULT_COUNT) {
+ return -ERANGE;
+ }
+
+ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ if (val < 0)
+ return val;
+
+ /* Se the write enable bit */
+ val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+
+ if (count == DOWNSHIFT_DEV_DISABLE) {
+ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN;
+ return bcm54xx_auxctl_write(phydev,
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ val);
+ } else {
+ val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN;
+ ret = bcm54xx_auxctl_write(phydev,
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ val);
+ if (ret < 0)
+ return ret;
+ }
+
+ val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR2);
+ val &= ~(BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK <<
+ BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT |
+ BCM54XX_SHD_SCR2_WSPD_RTRY_DIS);
+
+ switch (count) {
+ case 1:
+ val |= BCM54XX_SHD_SCR2_WSPD_RTRY_DIS;
+ break;
+ case DOWNSHIFT_DEV_DEFAULT_COUNT:
+ val |= 1 << BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT;
+ break;
+ default:
+ val |= (count - BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET) <<
+ BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT;
+ break;
+ }
+
+ return bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR2, val);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_downshift_set);
+
+struct bcm_phy_hw_stat {
+ const char *string;
+ u8 reg;
+ u8 shift;
+ u8 bits;
+};
+
+/* Counters freeze at either 0xffff or 0xff, better than nothing */
+static const struct bcm_phy_hw_stat bcm_phy_hw_stats[] = {
+ { "phy_receive_errors", MII_BRCM_CORE_BASE12, 0, 16 },
+ { "phy_serdes_ber_errors", MII_BRCM_CORE_BASE13, 8, 8 },
+ { "phy_false_carrier_sense_errors", MII_BRCM_CORE_BASE13, 0, 8 },
+ { "phy_local_rcvr_nok", MII_BRCM_CORE_BASE14, 8, 8 },
+ { "phy_remote_rcv_nok", MII_BRCM_CORE_BASE14, 0, 8 },
+};
+
+int bcm_phy_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(bcm_phy_hw_stats);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_get_sset_count);
+
+void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
+
+#ifndef UINT64_MAX
+#define UINT64_MAX (u64)(~((u64)0))
+#endif
+
+/* Caller is supposed to provide appropriate storage for the library code to
+ * access the shadow copy
+ */
+static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow,
+ unsigned int i)
+{
+ struct bcm_phy_hw_stat stat = bcm_phy_hw_stats[i];
+ int val;
+ u64 ret;
+
+ val = phy_read(phydev, stat.reg);
+ if (val < 0) {
+ ret = UINT64_MAX;
+ } else {
+ val >>= stat.shift;
+ val = val & ((1 << stat.bits) - 1);
+ shadow[i] += val;
+ ret = shadow[i];
+ }
+
+ return ret;
+}
+
+void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
+ struct ethtool_stats *stats, u64 *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
+ data[i] = bcm_phy_get_stat(phydev, shadow, i);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_get_stats);
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index b2091c88b44d..7c73808cbbde 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -19,6 +19,9 @@
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
+int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
+
int bcm_phy_write_misc(struct phy_device *phydev,
u16 reg, u16 chl, u16 value);
int bcm_phy_read_misc(struct phy_device *phydev,
@@ -33,5 +36,15 @@ int bcm_phy_config_intr(struct phy_device *phydev);
int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down);
-int bcm_phy_enable_eee(struct phy_device *phydev);
+int bcm_phy_set_eee(struct phy_device *phydev, bool enable);
+
+int bcm_phy_downshift_get(struct phy_device *phydev, u8 *count);
+
+int bcm_phy_downshift_set(struct phy_device *phydev, u8 count);
+
+int bcm_phy_get_sset_count(struct phy_device *phydev);
+void bcm_phy_get_strings(struct phy_device *phydev, u8 *data);
+void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
+ struct ethtool_stats *stats, u64 *data);
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 9636da0b6efc..264b085d796b 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -45,6 +45,10 @@
#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+struct bcm7xxx_phy_priv {
+ u64 *stats;
+};
+
static void r_rc_cal_reset(struct phy_device *phydev)
{
/* Reset R_CAL/RC_CAL Engine */
@@ -167,6 +171,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
{
u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags);
+ u8 count;
int ret = 0;
pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
@@ -199,7 +204,12 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = bcm_phy_enable_eee(phydev);
+ ret = bcm_phy_downshift_get(phydev, &count);
+ if (ret)
+ return ret;
+
+ /* Only enable EEE if Wirespeed/downshift is disabled */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
if (ret)
return ret;
@@ -303,18 +313,91 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
return 0;
}
+static int bcm7xxx_28nm_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna,
+ void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm_phy_downshift_get(phydev, (u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm7xxx_28nm_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna,
+ const void *data)
+{
+ u8 count = *(u8 *)data;
+ int ret;
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ ret = bcm_phy_downshift_set(phydev, count);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Disable EEE advertisment since this prevents the PHY
+ * from successfully linking up, trigger auto-negotiation restart
+ * to let the MAC decide what to do.
+ */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
+ if (ret)
+ return ret;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static void bcm7xxx_28nm_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm7xxx_phy_priv *priv = phydev->priv;
+
+ bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
+static int bcm7xxx_28nm_probe(struct phy_device *phydev)
+{
+ struct bcm7xxx_phy_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->stats = devm_kcalloc(&phydev->mdio.dev,
+ bcm_phy_get_sset_count(phydev), sizeof(u64),
+ GFP_KERNEL);
+ if (!priv->stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
#define BCM7XXX_28NM_GPHY(_oui, _name) \
{ \
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_GBIT_FEATURES | \
- SUPPORTED_Pause | SUPPORTED_Asym_Pause, \
+ .features = PHY_GBIT_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_config_init, \
.config_aneg = genphy_config_aneg, \
.read_status = genphy_read_status, \
.resume = bcm7xxx_28nm_resume, \
+ .get_tunable = bcm7xxx_28nm_get_tunable, \
+ .set_tunable = bcm7xxx_28nm_set_tunable, \
+ .get_sset_count = bcm_phy_get_sset_count, \
+ .get_strings = bcm_phy_get_strings, \
+ .get_stats = bcm7xxx_28nm_get_phy_stats, \
+ .probe = bcm7xxx_28nm_probe, \
}
#define BCM7XXX_40NM_EPHY(_oui, _name) \
@@ -322,8 +405,7 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES | \
- SUPPORTED_Pause | SUPPORTED_Asym_Pause, \
+ .features = PHY_BASIC_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_config_init, \
.config_aneg = genphy_config_aneg, \
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 870327efccf7..4223e35490b0 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
-
+#include <linux/of.h>
#define BRCM_PHY_MODEL(phydev) \
((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
@@ -30,9 +30,32 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
-static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
+static int bcm54810_config(struct phy_device *phydev)
{
- return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
+ int rc, val;
+
+ val = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
+ val &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+ rc = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
+ val);
+ if (rc < 0)
+ return rc;
+
+ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+ rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ val);
+ if (rc < 0)
+ return rc;
+
+ val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
+ val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+ if (rc < 0)
+ return rc;
+
+ return 0;
}
/* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
@@ -207,6 +230,12 @@ static int bcm54xx_config_init(struct phy_device *phydev)
(phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
bcm54xx_adjust_rxrefclk(phydev);
+ if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
+ err = bcm54810_config(phydev);
+ if (err)
+ return err;
+ }
+
bcm54xx_phydsp_config(phydev);
return 0;
@@ -304,6 +333,7 @@ static int bcm5482_read_status(struct phy_device *phydev)
static int bcm5481_config_aneg(struct phy_device *phydev)
{
+ struct device_node *np = phydev->mdio.dev.of_node;
int ret;
/* Aneg firsly. */
@@ -334,6 +364,49 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
phy_write(phydev, 0x18, reg);
}
+ if (of_property_read_bool(np, "enet-phy-lane-swap")) {
+ /* Lane Swap - Undocumented register...magic! */
+ ret = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_SEL_ER + 0x9,
+ 0x11B);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bcm54612e_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* First, auto-negotiate. */
+ ret = genphy_config_aneg(phydev);
+
+ /* Clear TX internal delay unless requested. */
+ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
+ (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
+ /* Disable TXD to GTXCLK clock delay (default set) */
+ /* Bit 9 is the only field in shadow register 00011 */
+ bcm_phy_write_shadow(phydev, 0x03, 0);
+ }
+
+ /* Clear RX internal delay unless requested. */
+ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
+ (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
+ u16 reg;
+
+ /* Errata: reads require filling in the write selector field */
+ bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC);
+ reg = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+ /* Disable RXD to RXC delay (default set) */
+ reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW;
+ /* Clear shadow selector field */
+ reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
+ bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ MII_BCM54XX_AUXCTL_MISC_WREN | reg);
+ }
+
return ret;
}
@@ -452,8 +525,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5411,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -464,8 +536,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -476,8 +547,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -485,11 +555,21 @@ static struct phy_driver broadcom_drivers[] = {
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
+ .phy_id = PHY_ID_BCM54612E,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54612E",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm54612e_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+}, {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -500,8 +580,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -512,8 +591,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
@@ -521,11 +599,21 @@ static struct phy_driver broadcom_drivers[] = {
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
+ .phy_id = PHY_ID_BCM54810,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54810",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm5481_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
.config_aneg = genphy_config_aneg,
@@ -536,8 +624,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -548,8 +635,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -560,8 +646,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
@@ -572,8 +657,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
- .features = PHY_BASIC_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.config_aneg = genphy_config_aneg,
@@ -584,8 +668,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
- .features = PHY_BASIC_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.config_aneg = genphy_config_aneg,
@@ -600,9 +683,11 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
{ PHY_ID_BCM5461, 0xfffffff0 },
+ { PHY_ID_BCM54612E, 0xfffffff0 },
{ PHY_ID_BCM54616S, 0xfffffff0 },
{ PHY_ID_BCM5464, 0xfffffff0 },
{ PHY_ID_BCM5481, 0xfffffff0 },
+ { PHY_ID_BCM54810, 0xfffffff0 },
{ PHY_ID_BCM5482, 0xfffffff0 },
{ PHY_ID_BCM50610, 0xfffffff0 },
{ PHY_ID_BCM50610M, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 7a240fce3a7e..e2460a57e4b1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -375,7 +375,7 @@ static int periodic_output(struct dp83640_clock *clock,
/* ptp clock methods */
-static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int ptp_dp83640_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
@@ -384,13 +384,13 @@ static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
int neg_adj = 0;
u16 hi, lo;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate = ppb;
- rate <<= 26;
- rate = div_u64(rate, 1953125);
+ rate = scaled_ppm;
+ rate <<= 13;
+ rate = div_u64(rate, 15625);
hi = (rate >> 16) & PTP_RATE_HI_MASK;
if (neg_adj)
@@ -1035,7 +1035,7 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.n_per_out = N_PER_OUT;
clock->caps.n_pins = DP83640_N_PINS;
clock->caps.pps = 0;
- clock->caps.adjfreq = ptp_dp83640_adjfreq;
+ clock->caps.adjfine = ptp_dp83640_adjfine;
clock->caps.adjtime = ptp_dp83640_adjtime;
clock->caps.gettime64 = ptp_dp83640_gettime;
clock->caps.settime64 = ptp_dp83640_settime;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 91177a4a32ad..1b639242f9e2 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -33,6 +33,7 @@
/* Extended Registers */
#define DP83867_RGMIICTL 0x0032
#define DP83867_RGMIIDCTL 0x0086
+#define DP83867_IO_MUX_CFG 0x0170
#define DP83867_SW_RESET BIT(15)
#define DP83867_SW_RESTART BIT(14)
@@ -62,10 +63,17 @@
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+/* IO_MUX_CFG bits */
+#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f
+
+#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
+#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+
struct dp83867_private {
int rx_id_delay;
int tx_id_delay;
int fifo_depth;
+ int io_impedance;
};
static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -111,6 +119,14 @@ static int dp83867_of_init(struct phy_device *phydev)
if (!of_node)
return -ENODEV;
+ dp83867->io_impedance = -EINVAL;
+
+ /* Optional configuration */
+ if (of_property_read_bool(of_node, "ti,max-output-impedance"))
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
+ else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
if (ret)
@@ -184,6 +200,18 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
DP83867_DEVADDR, delay);
+
+ if (dp83867->io_impedance >= 0) {
+ val = phy_read_mmd_indirect(phydev, DP83867_IO_MUX_CFG,
+ DP83867_DEVADDR);
+
+ val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
+ val |= dp83867->io_impedance &
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
+
+ phy_write_mmd_indirect(phydev, DP83867_IO_MUX_CFG,
+ DP83867_DEVADDR, val);
+ }
}
return 0;
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index e5f251b91578..22b51f01a94a 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -225,8 +225,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430d90,
.name = "ICPlus IP1001",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.config_init = &ip1001_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
@@ -236,8 +235,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430c54,
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause,
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index c300ab5587b8..b1fd7bb0e4db 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -239,8 +239,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_3,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
- .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
@@ -254,8 +253,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_3,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.3",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
@@ -269,8 +267,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_4,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
- .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
@@ -284,8 +281,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_4,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.4",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
@@ -299,8 +295,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_5,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
- .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = genphy_config_aneg,
@@ -314,8 +309,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_5,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = genphy_config_aneg,
@@ -329,8 +323,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_VR9,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX integrated)",
- .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = genphy_config_aneg,
@@ -344,8 +337,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_VR9,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX integrated)",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c2dcf02df202..e269262471a4 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -268,7 +268,7 @@ static int marvell_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- err = marvell_set_polarity(phydev, phydev->mdix);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
@@ -311,7 +311,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
*/
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
- err = marvell_set_polarity(phydev, phydev->mdix);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
@@ -361,7 +361,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
static int marvell_of_reg_init(struct phy_device *phydev)
{
const __be32 *paddr;
- int len, i, saved_page, current_page, page_changed, ret;
+ int len, i, saved_page, current_page, ret;
if (!phydev->mdio.dev.of_node)
return 0;
@@ -374,7 +374,6 @@ static int marvell_of_reg_init(struct phy_device *phydev)
saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
if (saved_page < 0)
return saved_page;
- page_changed = 0;
current_page = saved_page;
ret = 0;
@@ -388,7 +387,6 @@ static int marvell_of_reg_init(struct phy_device *phydev)
if (reg_page != current_page) {
current_page = reg_page;
- page_changed = 1;
ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
if (ret < 0)
goto err;
@@ -411,7 +409,7 @@ static int marvell_of_reg_init(struct phy_device *phydev)
}
err:
- if (page_changed) {
+ if (current_page != saved_page) {
i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
if (ret == 0)
ret = i;
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index d0bed52c8d16..6a33646bdf05 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -21,7 +21,8 @@
struct mdio_mux_mmioreg_state {
void *mux_handle;
phys_addr_t phys;
- uint8_t mask;
+ unsigned int iosize;
+ unsigned int mask;
};
/*
@@ -47,17 +48,47 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
struct mdio_mux_mmioreg_state *s = data;
if (current_child ^ desired_child) {
- void __iomem *p = ioremap(s->phys, 1);
- uint8_t x, y;
-
+ void __iomem *p = ioremap(s->phys, s->iosize);
if (!p)
return -ENOMEM;
- x = ioread8(p);
- y = (x & ~s->mask) | desired_child;
- if (x != y) {
- iowrite8((x & ~s->mask) | desired_child, p);
- pr_debug("%s: %02x -> %02x\n", __func__, x, y);
+ switch (s->iosize) {
+ case sizeof(uint8_t): {
+ uint8_t x, y;
+
+ x = ioread8(p);
+ y = (x & ~s->mask) | desired_child;
+ if (x != y) {
+ iowrite8((x & ~s->mask) | desired_child, p);
+ pr_debug("%s: %02x -> %02x\n", __func__, x, y);
+ }
+
+ break;
+ }
+ case sizeof(uint16_t): {
+ uint16_t x, y;
+
+ x = ioread16(p);
+ y = (x & ~s->mask) | desired_child;
+ if (x != y) {
+ iowrite16((x & ~s->mask) | desired_child, p);
+ pr_debug("%s: %04x -> %04x\n", __func__, x, y);
+ }
+
+ break;
+ }
+ case sizeof(uint32_t): {
+ uint32_t x, y;
+
+ x = ioread32(p);
+ y = (x & ~s->mask) | desired_child;
+ if (x != y) {
+ iowrite32((x & ~s->mask) | desired_child, p);
+ pr_debug("%s: %08x -> %08x\n", __func__, x, y);
+ }
+
+ break;
+ }
}
iounmap(p);
@@ -88,8 +119,11 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
}
s->phys = res.start;
- if (resource_size(&res) != sizeof(uint8_t)) {
- dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+ s->iosize = resource_size(&res);
+ if (s->iosize != sizeof(uint8_t) &&
+ s->iosize != sizeof(uint16_t) &&
+ s->iosize != sizeof(uint32_t)) {
+ dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n");
return -EINVAL;
}
@@ -98,8 +132,8 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
return -ENODEV;
}
- if (be32_to_cpup(iprop) > 255) {
- dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+ if (be32_to_cpup(iprop) >= BIT(s->iosize * 8)) {
+ dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n");
return -EINVAL;
}
s->mask = be32_to_cpup(iprop);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 09deef4bed09..653d076eafe5 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -38,6 +38,9 @@
#include <asm/irq.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mdio.h>
+
int mdiobus_register_device(struct mdio_device *mdiodev)
{
if (mdiodev->bus->mdio_map[mdiodev->addr])
@@ -461,6 +464,8 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
retval = bus->read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
+ trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+
return retval;
}
EXPORT_SYMBOL(mdiobus_read_nested);
@@ -485,6 +490,8 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
retval = bus->read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
+ trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+
return retval;
}
EXPORT_SYMBOL(mdiobus_read);
@@ -513,6 +520,8 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
err = bus->write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
+ trace_mdio_access(bus, 0, addr, regnum, val, err);
+
return err;
}
EXPORT_SYMBOL(mdiobus_write_nested);
@@ -538,6 +547,8 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
err = bus->write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
+ trace_mdio_access(bus, 0, addr, regnum, val, err);
+
return err;
}
EXPORT_SYMBOL(mdiobus_write);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 9c88e6749b9a..43c8fd46504b 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -144,7 +144,7 @@ int mdio_driver_register(struct mdio_driver *drv)
struct mdio_driver_common *mdiodrv = &drv->mdiodrv;
int retval;
- pr_info("mdio_driver_register: %s\n", mdiodrv->driver.name);
+ pr_debug("mdio_driver_register: %s\n", mdiodrv->driver.name);
mdiodrv->driver.bus = &mdio_bus_type;
mdiodrv->driver.probe = mdio_probe;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
new file mode 100644
index 000000000000..1ea69b7585d9
--- /dev/null
+++ b/drivers/net/phy/meson-gxl.c
@@ -0,0 +1,81 @@
+/*
+ * Amlogic Meson GXL Internal PHY Driver
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2016 BayLibre, SAS. All rights reserved.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+static int meson_gxl_config_init(struct phy_device *phydev)
+{
+ /* Enable Analog and DSP register Bank access by */
+ phy_write(phydev, 0x14, 0x0000);
+ phy_write(phydev, 0x14, 0x0400);
+ phy_write(phydev, 0x14, 0x0000);
+ phy_write(phydev, 0x14, 0x0400);
+
+ /* Write Analog register 23 */
+ phy_write(phydev, 0x17, 0x8E0D);
+ phy_write(phydev, 0x14, 0x4417);
+
+ /* Enable fractional PLL */
+ phy_write(phydev, 0x17, 0x0005);
+ phy_write(phydev, 0x14, 0x5C1B);
+
+ /* Program fraction FR_PLL_DIV1 */
+ phy_write(phydev, 0x17, 0x029A);
+ phy_write(phydev, 0x14, 0x5C1D);
+
+ /* Program fraction FR_PLL_DIV1 */
+ phy_write(phydev, 0x17, 0xAAAA);
+ phy_write(phydev, 0x14, 0x5C1C);
+
+ return 0;
+}
+
+static struct phy_driver meson_gxl_phy[] = {
+ {
+ .phy_id = 0x01814400,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Meson GXL Internal PHY",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = meson_gxl_config_init,
+ .config_aneg = genphy_config_aneg,
+ .aneg_done = genphy_aneg_done,
+ .read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ },
+};
+
+static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
+ { 0x01814400, 0xfffffff0 },
+ { }
+};
+
+module_phy_driver(meson_gxl_phy);
+
+MODULE_DEVICE_TABLE(mdio, meson_gxl_tbl);
+
+MODULE_DESCRIPTION("Amlogic Meson GXL Internal PHY driver");
+MODULE_AUTHOR("Baoqi wang");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ea92d524d5a8..9a77289109b7 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -790,7 +790,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KS8737,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
@@ -807,8 +807,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
@@ -826,8 +825,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8031,
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
@@ -845,8 +843,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
@@ -864,8 +861,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
- .features = PHY_BASIC_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
@@ -883,8 +879,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8051,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
@@ -902,7 +897,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
@@ -920,7 +915,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
@@ -938,7 +933,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
@@ -954,7 +949,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
- .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.config_init = ksz9021_config_init,
@@ -973,7 +968,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
- .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.config_init = ksz9031_config_init,
@@ -990,7 +985,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
- .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
@@ -1004,7 +998,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 7c00e508a101..324fbf6ad8ff 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -106,21 +106,54 @@ static int lan88xx_set_wol(struct phy_device *phydev,
return 0;
}
+static void lan88xx_set_mdix(struct phy_device *phydev)
+{
+ int buf;
+ int val;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ val = LAN88XX_EXT_MODE_CTRL_MDI_;
+ break;
+ case ETH_TP_MDI_X:
+ val = LAN88XX_EXT_MODE_CTRL_MDI_X_;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_;
+ break;
+ default:
+ return;
+ }
+
+ phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
+ buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
+ buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
+ buf |= val;
+ phy_write(phydev, LAN88XX_EXT_MODE_CTRL, buf);
+ phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
+}
+
+static int lan88xx_config_aneg(struct phy_device *phydev)
+{
+ lan88xx_set_mdix(phydev);
+
+ return genphy_config_aneg(phydev);
+}
+
static struct phy_driver microchip_phy_driver[] = {
{
.phy_id = 0x0007c130,
.phy_id_mask = 0xfffffff0,
.name = "Microchip LAN88xx",
- .features = (PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = lan88xx_probe,
.remove = lan88xx_remove,
.config_init = genphy_config_init,
- .config_aneg = genphy_config_aneg,
+ .config_aneg = lan88xx_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = lan88xx_phy_ack_interrupt,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 77a6671d572e..e03ead81fffb 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -12,7 +12,6 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/of.h>
-#include <dt-bindings/net/mscc-phy-vsc8531.h>
#include <linux/netdevice.h>
enum rgmii_rx_clock_delay {
@@ -28,6 +27,11 @@ enum rgmii_rx_clock_delay {
/* Microsemi VSC85xx PHY registers */
/* IEEE 802. Std Registers */
+#define MSCC_PHY_BYPASS_CONTROL 18
+#define DISABLE_HP_AUTO_MDIX_MASK 0x0080
+#define DISABLE_PAIR_SWAP_CORR_MASK 0x0020
+#define DISABLE_POLARITY_CORR_MASK 0x0010
+
#define MSCC_PHY_EXT_PHY_CNTL_1 23
#define MAC_IF_SELECTION_MASK 0x1800
#define MAC_IF_SELECTION_GMII 0
@@ -45,10 +49,25 @@ enum rgmii_rx_clock_delay {
#define EDGE_RATE_CNTL_POS 5
#define EDGE_RATE_CNTL_MASK 0x00E0
+#define MSCC_PHY_DEV_AUX_CNTL 28
+#define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
+#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
+/* Extended Page 1 Registers */
+#define MSCC_PHY_EXT_MODE_CNTL 19
+#define FORCE_MDI_CROSSOVER_MASK 0x000C
+#define FORCE_MDI_CROSSOVER_MDIX 0x000C
+#define FORCE_MDI_CROSSOVER_MDI 0x0008
+
+#define MSCC_PHY_ACTIPHY_CNTL 20
+#define DOWNSHIFT_CNTL_MASK 0x001C
+#define DOWNSHIFT_EN 0x0010
+#define DOWNSHIFT_CNTL_POS 2
+
/* Extended Page 2 Registers */
#define MSCC_PHY_RGMII_CNTL 20
#define RGMII_RX_CLK_DELAY_MASK 0x0070
@@ -66,26 +85,36 @@ enum rgmii_rx_clock_delay {
#define SECURE_ON_PASSWD_LEN_4 0x4000
/* Microsemi PHY ID's */
+#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
+#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
-struct edge_rate_table {
- u16 vddmac;
- int slowdown[MSCC_SLOWDOWN_MAX];
-};
+#define MSCC_VDDMAC_1500 1500
+#define MSCC_VDDMAC_1800 1800
+#define MSCC_VDDMAC_2500 2500
+#define MSCC_VDDMAC_3300 3300
-struct edge_rate_table edge_table[MSCC_VDDMAC_MAX] = {
- {3300, { 0, -2, -4, -7, -10, -17, -29, -53} },
- {2500, { 0, -3, -6, -10, -14, -23, -37, -63} },
- {1800, { 0, -5, -9, -16, -23, -35, -52, -76} },
- {1500, { 0, -6, -14, -21, -29, -42, -58, -77} },
-};
+#define DOWNSHIFT_COUNT_MAX 5
struct vsc8531_private {
- u8 edge_slowdown;
+ int rate_magic;
+};
+
+#ifdef CONFIG_OF_MDIO
+struct vsc8531_edge_rate_table {
u16 vddmac;
+ u8 slowdown[8];
};
+static const struct vsc8531_edge_rate_table edge_table[] = {
+ {MSCC_VDDMAC_3300, { 0, 2, 4, 7, 10, 17, 29, 53} },
+ {MSCC_VDDMAC_2500, { 0, 3, 6, 10, 14, 23, 37, 63} },
+ {MSCC_VDDMAC_1800, { 0, 5, 9, 16, 23, 35, 52, 76} },
+ {MSCC_VDDMAC_1500, { 0, 6, 14, 21, 29, 42, 58, 77} },
+};
+#endif /* CONFIG_OF_MDIO */
+
static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
{
int rc;
@@ -94,6 +123,113 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
return rc;
}
+static int vsc85xx_mdix_get(struct phy_device *phydev, u8 *mdix)
+{
+ u16 reg_val;
+
+ reg_val = phy_read(phydev, MSCC_PHY_DEV_AUX_CNTL);
+ if (reg_val & HP_AUTO_MDIX_X_OVER_IND_MASK)
+ *mdix = ETH_TP_MDI_X;
+ else
+ *mdix = ETH_TP_MDI;
+
+ return 0;
+}
+
+static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
+{
+ int rc;
+ u16 reg_val;
+
+ reg_val = phy_read(phydev, MSCC_PHY_BYPASS_CONTROL);
+ if ((mdix == ETH_TP_MDI) || (mdix == ETH_TP_MDI_X)) {
+ reg_val |= (DISABLE_PAIR_SWAP_CORR_MASK |
+ DISABLE_POLARITY_CORR_MASK |
+ DISABLE_HP_AUTO_MDIX_MASK);
+ } else {
+ reg_val &= ~(DISABLE_PAIR_SWAP_CORR_MASK |
+ DISABLE_POLARITY_CORR_MASK |
+ DISABLE_HP_AUTO_MDIX_MASK);
+ }
+ rc = phy_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg_val);
+ if (rc != 0)
+ return rc;
+
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
+ if (rc != 0)
+ return rc;
+
+ reg_val = phy_read(phydev, MSCC_PHY_EXT_MODE_CNTL);
+ reg_val &= ~(FORCE_MDI_CROSSOVER_MASK);
+ if (mdix == ETH_TP_MDI)
+ reg_val |= FORCE_MDI_CROSSOVER_MDI;
+ else if (mdix == ETH_TP_MDI_X)
+ reg_val |= FORCE_MDI_CROSSOVER_MDIX;
+ rc = phy_write(phydev, MSCC_PHY_EXT_MODE_CNTL, reg_val);
+ if (rc != 0)
+ return rc;
+
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+ if (rc != 0)
+ return rc;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static int vsc85xx_downshift_get(struct phy_device *phydev, u8 *count)
+{
+ int rc;
+ u16 reg_val;
+
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
+ if (rc != 0)
+ goto out;
+
+ reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
+ reg_val &= DOWNSHIFT_CNTL_MASK;
+ if (!(reg_val & DOWNSHIFT_EN))
+ *count = DOWNSHIFT_DEV_DISABLE;
+ else
+ *count = ((reg_val & ~DOWNSHIFT_EN) >> DOWNSHIFT_CNTL_POS) + 2;
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+
+out:
+ return rc;
+}
+
+static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
+{
+ int rc;
+ u16 reg_val;
+
+ if (count == DOWNSHIFT_DEV_DEFAULT_COUNT) {
+ /* Default downshift count 3 (i.e. Bit3:2 = 0b01) */
+ count = ((1 << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
+ } else if (count > DOWNSHIFT_COUNT_MAX || count == 1) {
+ phydev_err(phydev, "Downshift count should be 2,3,4 or 5\n");
+ return -ERANGE;
+ } else if (count) {
+ /* Downshift count is either 2,3,4 or 5 */
+ count = (((count - 2) << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
+ }
+
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
+ if (rc != 0)
+ goto out;
+
+ reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
+ reg_val &= ~(DOWNSHIFT_CNTL_MASK);
+ reg_val |= count;
+ rc = phy_write(phydev, MSCC_PHY_ACTIPHY_CNTL, reg_val);
+ if (rc != 0)
+ goto out;
+
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+
+out:
+ return rc;
+}
+
static int vsc85xx_wol_set(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -205,29 +341,43 @@ out_unlock:
mutex_unlock(&phydev->lock);
}
-static u8 edge_rate_magic_get(u16 vddmac,
- int slowdown)
+#ifdef CONFIG_OF_MDIO
+static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{
- int rc = (MSCC_SLOWDOWN_MAX - 1);
- u8 vdd;
u8 sd;
+ u16 vdd;
+ int rc, i, j;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ u8 sd_array_size = ARRAY_SIZE(edge_table[0].slowdown);
- for (vdd = 0; vdd < MSCC_VDDMAC_MAX; vdd++) {
- if (edge_table[vdd].vddmac == vddmac) {
- for (sd = 0; sd < MSCC_SLOWDOWN_MAX; sd++) {
- if (edge_table[vdd].slowdown[sd] <= slowdown) {
- rc = (MSCC_SLOWDOWN_MAX - sd - 1);
- break;
- }
- }
- }
- }
+ if (!of_node)
+ return -ENODEV;
- return rc;
+ rc = of_property_read_u16(of_node, "vsc8531,vddmac", &vdd);
+ if (rc != 0)
+ vdd = MSCC_VDDMAC_3300;
+
+ rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown", &sd);
+ if (rc != 0)
+ sd = 0;
+
+ for (i = 0; i < ARRAY_SIZE(edge_table); i++)
+ if (edge_table[i].vddmac == vdd)
+ for (j = 0; j < sd_array_size; j++)
+ if (edge_table[i].slowdown[j] == sd)
+ return (sd_array_size - j - 1);
+
+ return -EINVAL;
}
+#else
+static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
-static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev,
- u8 edge_rate)
+static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate)
{
int rc;
u16 reg_val;
@@ -291,6 +441,7 @@ static int vsc85xx_default_config(struct phy_device *phydev)
int rc;
u16 reg_val;
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
if (rc != 0)
@@ -308,45 +459,33 @@ out_unlock:
return rc;
}
-#ifdef CONFIG_OF_MDIO
-static int vsc8531_of_init(struct phy_device *phydev)
+static int vsc85xx_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
{
- int rc;
- struct vsc8531_private *vsc8531 = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct device_node *of_node = dev->of_node;
-
- if (!of_node)
- return -ENODEV;
-
- rc = of_property_read_u16(of_node, "vsc8531,vddmac",
- &vsc8531->vddmac);
- if (rc == -EINVAL)
- vsc8531->vddmac = MSCC_VDDMAC_3300;
- rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown",
- &vsc8531->edge_slowdown);
- if (rc == -EINVAL)
- vsc8531->edge_slowdown = 0;
-
- rc = 0;
- return rc;
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc85xx_downshift_get(phydev, (u8 *)data);
+ default:
+ return -EINVAL;
+ }
}
-#else
-static int vsc8531_of_init(struct phy_device *phydev)
+
+static int vsc85xx_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna,
+ const void *data)
{
- return 0;
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc85xx_downshift_set(phydev, *(u8 *)data);
+ default:
+ return -EINVAL;
+ }
}
-#endif /* CONFIG_OF_MDIO */
static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc;
struct vsc8531_private *vsc8531 = phydev->priv;
- u8 edge_rate;
-
- rc = vsc8531_of_init(phydev);
- if (rc)
- return rc;
rc = vsc85xx_default_config(phydev);
if (rc)
@@ -356,9 +495,7 @@ static int vsc85xx_config_init(struct phy_device *phydev)
if (rc)
return rc;
- edge_rate = edge_rate_magic_get(vsc8531->vddmac,
- -(int)vsc8531->edge_slowdown);
- rc = vsc85xx_edge_rate_cntl_set(phydev, edge_rate);
+ rc = vsc85xx_edge_rate_cntl_set(phydev, vsc8531->rate_magic);
if (rc)
return rc;
@@ -394,22 +531,72 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
return rc;
}
+static int vsc85xx_config_aneg(struct phy_device *phydev)
+{
+ int rc;
+
+ rc = vsc85xx_mdix_set(phydev, phydev->mdix_ctrl);
+ if (rc < 0)
+ return rc;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int vsc85xx_read_status(struct phy_device *phydev)
+{
+ int rc;
+
+ rc = vsc85xx_mdix_get(phydev, &phydev->mdix);
+ if (rc < 0)
+ return rc;
+
+ return genphy_read_status(phydev);
+}
+
static int vsc85xx_probe(struct phy_device *phydev)
{
+ int rate_magic;
struct vsc8531_private *vsc8531;
+ rate_magic = vsc85xx_edge_rate_magic_get(phydev);
+ if (rate_magic < 0)
+ return rate_magic;
+
vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
if (!vsc8531)
return -ENOMEM;
phydev->priv = vsc8531;
+ vsc8531->rate_magic = rate_magic;
+
return 0;
}
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8530,
+ .name = "Microsemi FE VSC8530",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+},
+{
.phy_id = PHY_ID_VSC8531,
.name = "Microsemi VSC8531",
.phy_id_mask = 0xfffffff0,
@@ -417,16 +604,39 @@ static struct phy_driver vsc85xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
- .config_aneg = &genphy_config_aneg,
+ .config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
- .read_status = &genphy_read_status,
+ .read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
- .probe = &vsc85xx_probe,
- .set_wol = &vsc85xx_wol_set,
- .get_wol = &vsc85xx_wol_get,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+},
+{
+ .phy_id = PHY_ID_VSC8540,
+ .name = "Microsemi FE VSC8540 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
},
{
.phy_id = PHY_ID_VSC8541,
@@ -436,16 +646,18 @@ static struct phy_driver vsc85xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
- .config_aneg = &genphy_config_aneg,
+ .config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
- .read_status = &genphy_read_status,
+ .read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
- .probe = &vsc85xx_probe,
- .set_wol = &vsc85xx_wol_set,
- .get_wol = &vsc85xx_wol_get,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
}
};
@@ -453,7 +665,9 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8530, 0xfffffff0, },
{ PHY_ID_VSC8531, 0xfffffff0, },
+ { PHY_ID_VSC8540, 0xfffffff0, },
{ PHY_ID_VSC8541, 0xfffffff0, },
{ }
};
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2a1b490bc587..2addf1d3f619 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -133,7 +133,7 @@ static struct phy_driver dp83865_driver[] = { {
.phy_id = DP83865_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
- .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = ns_config_init,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f424b867f73e..25f93a98863b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -143,13 +143,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
* Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
* is still pending.
*/
-static inline int phy_aneg_done(struct phy_device *phydev)
+int phy_aneg_done(struct phy_device *phydev)
{
if (phydev->drv->aneg_done)
return phydev->drv->aneg_done(phydev);
return genphy_aneg_done(phydev);
}
+EXPORT_SYMBOL(phy_aneg_done);
/* A structure for mapping a particular speed and duplex
* combination to a particular SUPPORTED and ADVERTISED value
@@ -261,6 +262,41 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
}
/**
+ * phy_supported_speeds - return all speeds currently supported by a phy device
+ * @phy: The phy device to return supported speeds of.
+ * @speeds: buffer to store supported speeds in.
+ * @size: size of speeds buffer.
+ *
+ * Description: Returns the number of supported speeds, and fills the speeds
+ * buffer with the supported speeds. If speeds buffer is too small to contain
+ * all currently supported speeds, will return as many speeds as can fit.
+ */
+unsigned int phy_supported_speeds(struct phy_device *phy,
+ unsigned int *speeds,
+ unsigned int size)
+{
+ unsigned int count = 0;
+ unsigned int idx = 0;
+
+ while (idx < MAX_NUM_SETTINGS && count < size) {
+ idx = phy_find_valid(idx, phy->supported);
+
+ if (!(settings[idx].setting & phy->supported))
+ break;
+
+ /* Assumes settings are grouped by speed */
+ if ((count == 0) ||
+ (speeds[count - 1] != settings[idx].speed)) {
+ speeds[count] = settings[idx].speed;
+ count++;
+ }
+ idx++;
+ }
+
+ return count;
+}
+
+/**
* phy_check_valid - check if there is a valid PHY setting which matches
* speed, duplex, and feature mask
* @speed: speed to match
@@ -353,7 +389,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
phydev->duplex = cmd->duplex;
- phydev->mdix = cmd->eth_tp_mdix_ctrl;
+ phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;
/* Restart the PHY */
phy_start_aneg(phydev);
@@ -407,7 +443,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->duplex = duplex;
- phydev->mdix = cmd->base.eth_tp_mdix_ctrl;
+ phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
/* Restart the PHY */
phy_start_aneg(phydev);
@@ -433,7 +469,8 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
cmd->transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->autoneg = phydev->autoneg;
- cmd->eth_tp_mdix_ctrl = phydev->mdix;
+ cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl;
+ cmd->eth_tp_mdix = phydev->mdix;
return 0;
}
@@ -460,7 +497,8 @@ int phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.phy_address = phydev->mdio.addr;
cmd->base.autoneg = phydev->autoneg;
- cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
+ cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
+ cmd->base.eth_tp_mdix = phydev->mdix;
return 0;
}
@@ -664,7 +702,7 @@ static void phy_error(struct phy_device *phydev)
* @phy_dat: phy_device pointer
*
* Description: When a PHY interrupt occurs, the handler disables
- * interrupts, and schedules a work task to clear the interrupt.
+ * interrupts, and uses phy_change to handle the interrupt.
*/
static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
@@ -673,15 +711,10 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (PHY_HALTED == phydev->state)
return IRQ_NONE; /* It can't be ours. */
- /* The MDIO bus is not allowed to be written in interrupt
- * context, so we need to disable the irq here. A work
- * queue will write the PHY to disable and clear the
- * interrupt, and then reenable the irq line.
- */
disable_irq_nosync(irq);
atomic_inc(&phydev->irq_disable);
- queue_work(system_power_efficient_wq, &phydev->phy_queue);
+ phy_change(phydev);
return IRQ_HANDLED;
}
@@ -739,10 +772,9 @@ phy_err:
int phy_start_interrupts(struct phy_device *phydev)
{
atomic_set(&phydev->irq_disable, 0);
- if (request_irq(phydev->irq, phy_interrupt,
- IRQF_SHARED,
- "phy_interrupt",
- phydev) < 0) {
+ if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED,
+ phydev_name(phydev), phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->mdio.bus->name, phydev->irq);
phydev->irq = PHY_POLL;
@@ -766,12 +798,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
free_irq(phydev->irq, phydev);
- /* Cannot call flush_scheduled_work() here as desired because
- * of rtnl_lock(), but we do not really care about what would
- * be done, except from enable_irq(), so cancel any work
- * possibly pending and take care of the matter below.
- */
- cancel_work_sync(&phydev->phy_queue);
/* If work indeed has been cancelled, disable_irq() will have
* been left unbalanced from phy_interrupt() and enable_irq()
* has to be called so that other devices on the line work.
@@ -784,14 +810,11 @@ int phy_stop_interrupts(struct phy_device *phydev)
EXPORT_SYMBOL(phy_stop_interrupts);
/**
- * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
- * @work: work_struct that describes the work to be done
+ * phy_change - Called by the phy_interrupt to handle PHY changes
+ * @phydev: phy_device struct that interrupted
*/
-void phy_change(struct work_struct *work)
+void phy_change(struct phy_device *phydev)
{
- struct phy_device *phydev =
- container_of(work, struct phy_device, phy_queue);
-
if (phy_interrupt_is_valid(phydev)) {
if (phydev->drv->did_interrupt &&
!phydev->drv->did_interrupt(phydev))
@@ -833,6 +856,18 @@ phy_err:
}
/**
+ * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
+ * @work: work_struct that describes the work to be done
+ */
+void phy_change_work(struct work_struct *work)
+{
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, phy_queue);
+
+ phy_change(phydev);
+}
+
+/**
* phy_stop - Bring down the PHY link, and stop checking the status
* @phydev: target phy_device struct
*/
@@ -911,6 +946,12 @@ void phy_start(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_start);
+static void phy_adjust_link(struct phy_device *phydev)
+{
+ phydev->adjust_link(phydev->attached_dev);
+ phy_led_trigger_change_speed(phydev);
+}
+
/**
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
@@ -953,7 +994,7 @@ void phy_state_machine(struct work_struct *work)
if (!phydev->link) {
phydev->state = PHY_NOLINK;
netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
break;
}
@@ -966,7 +1007,7 @@ void phy_state_machine(struct work_struct *work)
if (err > 0) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
} else if (0 == phydev->link_timeout--)
needs_aneg = true;
@@ -993,7 +1034,7 @@ void phy_state_machine(struct work_struct *work)
}
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
}
break;
case PHY_FORCING:
@@ -1009,7 +1050,7 @@ void phy_state_machine(struct work_struct *work)
needs_aneg = true;
}
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
break;
case PHY_RUNNING:
/* Only register a CHANGE if we are polling and link changed
@@ -1038,7 +1079,7 @@ void phy_state_machine(struct work_struct *work)
netif_carrier_off(phydev->attached_dev);
}
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
if (phy_interrupt_is_valid(phydev))
err = phy_config_interrupt(phydev,
@@ -1048,7 +1089,7 @@ void phy_state_machine(struct work_struct *work)
if (phydev->link) {
phydev->link = 0;
netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
do_suspend = true;
}
break;
@@ -1072,7 +1113,7 @@ void phy_state_machine(struct work_struct *work)
} else {
phydev->state = PHY_NOLINK;
}
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
} else {
phydev->state = PHY_AN;
phydev->link_timeout = PHY_AN_TIMEOUT;
@@ -1088,7 +1129,7 @@ void phy_state_machine(struct work_struct *work)
} else {
phydev->state = PHY_NOLINK;
}
- phydev->adjust_link(phydev->attached_dev);
+ phy_adjust_link(phydev);
}
break;
}
@@ -1116,6 +1157,15 @@ void phy_state_machine(struct work_struct *work)
PHY_STATE_TIME * HZ);
}
+/**
+ * phy_mac_interrupt - MAC says the link has changed
+ * @phydev: phy_device struct with changed link
+ * @new_link: Link is Up/Down.
+ *
+ * Description: The MAC layer is able indicate there has been a change
+ * in the PHY link status. Set the new link status, and trigger the
+ * state machine, work a work queue.
+ */
void phy_mac_interrupt(struct phy_device *phydev, int new_link)
{
phydev->link = new_link;
@@ -1348,6 +1398,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ /* Mask prohibited EEE modes */
+ val &= ~phydev->eee_broken_modes;
+
phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
return 0;
@@ -1393,3 +1446,14 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_ksettings_set(phydev, cmd);
}
EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
+
+int phy_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return genphy_restart_aneg(phydev);
+}
+EXPORT_SYMBOL(phy_ethtool_nway_reset);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c4ceb082e970..92b08383cafa 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -30,6 +30,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
#include <linux/mdio.h>
#include <linux/io.h>
#include <linux/uaccess.h>
@@ -234,6 +235,53 @@ int phy_register_fixup_for_id(const char *bus_id,
}
EXPORT_SYMBOL(phy_register_fixup_for_id);
+/**
+ * phy_unregister_fixup - remove a phy_fixup from the list
+ * @bus_id: A string matches fixup->bus_id (or PHY_ANY_ID) in phy_fixup_list
+ * @phy_uid: A phy id matches fixup->phy_id (or PHY_ANY_UID) in phy_fixup_list
+ * @phy_uid_mask: Applied to phy_uid and fixup->phy_uid before comparison
+ */
+int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask)
+{
+ struct list_head *pos, *n;
+ struct phy_fixup *fixup;
+ int ret;
+
+ ret = -ENODEV;
+
+ mutex_lock(&phy_fixup_lock);
+ list_for_each_safe(pos, n, &phy_fixup_list) {
+ fixup = list_entry(pos, struct phy_fixup, list);
+
+ if ((!strcmp(fixup->bus_id, bus_id)) &&
+ ((fixup->phy_uid & phy_uid_mask) ==
+ (phy_uid & phy_uid_mask))) {
+ list_del(&fixup->list);
+ kfree(fixup);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&phy_fixup_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(phy_unregister_fixup);
+
+/* Unregisters a fixup of any PHY with the UID in phy_uid */
+int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask)
+{
+ return phy_unregister_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask);
+}
+EXPORT_SYMBOL(phy_unregister_fixup_for_uid);
+
+/* Unregisters a fixup of the PHY with id string bus_id */
+int phy_unregister_fixup_for_id(const char *bus_id)
+{
+ return phy_unregister_fixup(bus_id, PHY_ANY_UID, 0xffffffff);
+}
+EXPORT_SYMBOL(phy_unregister_fixup_for_id);
+
/* Returns 1 if fixup matches phydev in bus_id and phy_uid.
* Fixups can be set to match any in one or more fields.
*/
@@ -347,7 +395,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
- INIT_WORK(&dev->phy_queue, phy_change);
+ INIT_WORK(&dev->phy_queue, phy_change_work);
/* Request the appropriate module unconditionally; don't
* bother trying to do so only if it isn't already loaded,
@@ -919,13 +967,15 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
*/
err = phy_init_hw(phydev);
if (err)
- phy_detach(phydev);
- else
- phy_resume(phydev);
+ goto error;
+
+ phy_resume(phydev);
+ phy_led_triggers_register(phydev);
return err;
error:
+ phy_detach(phydev);
put_device(d);
if (ndev_owner != bus->owner)
module_put(bus->owner);
@@ -987,6 +1037,8 @@ void phy_detach(struct phy_device *phydev)
phydev->attached_dev = NULL;
phy_suspend(phydev);
+ phy_led_triggers_unregister(phydev);
+
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
@@ -1126,6 +1178,43 @@ static int genphy_config_advert(struct phy_device *phydev)
}
/**
+ * genphy_config_eee_advert - disable unwanted eee mode advertisement
+ * @phydev: target phy_device struct
+ *
+ * Description: Writes MDIO_AN_EEE_ADV after disabling unsupported energy
+ * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't
+ * changed, and 1 if it has changed.
+ */
+static int genphy_config_eee_advert(struct phy_device *phydev)
+{
+ int broken = phydev->eee_broken_modes;
+ int old_adv, adv;
+
+ /* Nothing to disable */
+ if (!broken)
+ return 0;
+
+ /* If the following call fails, we assume that EEE is not
+ * supported by the phy. If we read 0, EEE is not advertised
+ * In both case, we don't need to continue
+ */
+ adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
+ if (adv <= 0)
+ return 0;
+
+ old_adv = adv;
+ adv &= ~broken;
+
+ /* Advertising remains unchanged with the broken mask */
+ if (old_adv == adv)
+ return 0;
+
+ phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv);
+
+ return 1;
+}
+
+/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
* @phydev: target phy_device struct
*
@@ -1183,15 +1272,20 @@ EXPORT_SYMBOL(genphy_restart_aneg);
*/
int genphy_config_aneg(struct phy_device *phydev)
{
- int result;
+ int err, changed;
+
+ changed = genphy_config_eee_advert(phydev);
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
- result = genphy_config_advert(phydev);
- if (result < 0) /* error */
- return result;
- if (result == 0) {
+ err = genphy_config_advert(phydev);
+ if (err < 0) /* error */
+ return err;
+
+ changed |= err;
+
+ if (changed == 0) {
/* Advertisement hasn't changed, but maybe aneg was never on to
* begin with? Or maybe phy was isolated?
*/
@@ -1201,16 +1295,16 @@ int genphy_config_aneg(struct phy_device *phydev)
return ctl;
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- result = 1; /* do restart aneg */
+ changed = 1; /* do restart aneg */
}
/* Only restart aneg if we are advertising something different
* than we were before.
*/
- if (result > 0)
- result = genphy_restart_aneg(phydev);
+ if (changed > 0)
+ return genphy_restart_aneg(phydev);
- return result;
+ return 0;
}
EXPORT_SYMBOL(genphy_config_aneg);
@@ -1568,6 +1662,33 @@ static void of_set_phy_supported(struct phy_device *phydev)
__set_phy_supported(phydev, max_speed);
}
+static void of_set_phy_eee_broken(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u32 broken = 0;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return;
+
+ if (!node)
+ return;
+
+ if (of_property_read_bool(node, "eee-broken-100tx"))
+ broken |= MDIO_EEE_100TX;
+ if (of_property_read_bool(node, "eee-broken-1000t"))
+ broken |= MDIO_EEE_1000T;
+ if (of_property_read_bool(node, "eee-broken-10gt"))
+ broken |= MDIO_EEE_10GT;
+ if (of_property_read_bool(node, "eee-broken-1000kx"))
+ broken |= MDIO_EEE_1000KX;
+ if (of_property_read_bool(node, "eee-broken-10gkx4"))
+ broken |= MDIO_EEE_10GKX4;
+ if (of_property_read_bool(node, "eee-broken-10gkr"))
+ broken |= MDIO_EEE_10GKR;
+
+ phydev->eee_broken_modes = broken;
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -1605,6 +1726,30 @@ static int phy_probe(struct device *dev)
of_set_phy_supported(phydev);
phydev->advertising = phydev->supported;
+ /* Get the EEE modes we want to prohibit. We will ask
+ * the PHY stop advertising these mode later on
+ */
+ of_set_phy_eee_broken(phydev);
+
+ /* The Pause Frame bits indicate that the PHY can support passing
+ * pause frames. During autonegotiation, the PHYs will determine if
+ * they should allow pause frames to pass. The MAC driver should then
+ * use that result to determine whether to enable flow control via
+ * pause frames.
+ *
+ * Normally, PHY drivers should not set the Pause bits, and instead
+ * allow phylib to do that. However, there may be some situations
+ * (e.g. hardware erratum) where the driver wants to set only one
+ * of these bits.
+ */
+ if (phydrv->features & (SUPPORTED_Pause | SUPPORTED_Asym_Pause)) {
+ phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phydev->supported |= phydrv->features &
+ (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ } else {
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ }
+
/* Set the state to READY by default */
phydev->state = PHY_READY;
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
new file mode 100644
index 000000000000..fa62bdf2f526
--- /dev/null
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -0,0 +1,134 @@
+/* Copyright (C) 2016 National Instruments Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/leds.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
+ unsigned int speed)
+{
+ unsigned int i;
+
+ for (i = 0; i < phy->phy_num_led_triggers; i++) {
+ if (phy->phy_led_triggers[i].speed == speed)
+ return &phy->phy_led_triggers[i];
+ }
+ return NULL;
+}
+
+void phy_led_trigger_change_speed(struct phy_device *phy)
+{
+ struct phy_led_trigger *plt;
+
+ if (!phy->link)
+ goto out_change_speed;
+
+ if (phy->speed == 0)
+ return;
+
+ plt = phy_speed_to_led_trigger(phy, phy->speed);
+ if (!plt) {
+ netdev_alert(phy->attached_dev,
+ "No phy led trigger registered for speed(%d)\n",
+ phy->speed);
+ goto out_change_speed;
+ }
+
+ if (plt != phy->last_triggered) {
+ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
+ led_trigger_event(&plt->trigger, LED_FULL);
+ phy->last_triggered = plt;
+ }
+ return;
+
+out_change_speed:
+ if (phy->last_triggered) {
+ led_trigger_event(&phy->last_triggered->trigger,
+ LED_OFF);
+ phy->last_triggered = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed);
+
+static int phy_led_trigger_register(struct phy_device *phy,
+ struct phy_led_trigger *plt,
+ unsigned int speed)
+{
+ char name_suffix[PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE];
+
+ plt->speed = speed;
+
+ if (speed < SPEED_1000)
+ snprintf(name_suffix, sizeof(name_suffix), "%dMbps", speed);
+ else if (speed == SPEED_2500)
+ snprintf(name_suffix, sizeof(name_suffix), "2.5Gbps");
+ else
+ snprintf(name_suffix, sizeof(name_suffix), "%dGbps",
+ DIV_ROUND_CLOSEST(speed, 1000));
+
+ snprintf(plt->name, sizeof(plt->name), PHY_ID_FMT ":%s",
+ phy->mdio.bus->id, phy->mdio.addr, name_suffix);
+ plt->trigger.name = plt->name;
+
+ return led_trigger_register(&plt->trigger);
+}
+
+static void phy_led_trigger_unregister(struct phy_led_trigger *plt)
+{
+ led_trigger_unregister(&plt->trigger);
+}
+
+int phy_led_triggers_register(struct phy_device *phy)
+{
+ int i, err;
+ unsigned int speeds[50];
+
+ phy->phy_num_led_triggers = phy_supported_speeds(phy, speeds,
+ ARRAY_SIZE(speeds));
+ if (!phy->phy_num_led_triggers)
+ return 0;
+
+ phy->phy_led_triggers = devm_kzalloc(&phy->mdio.dev,
+ sizeof(struct phy_led_trigger) *
+ phy->phy_num_led_triggers,
+ GFP_KERNEL);
+ if (!phy->phy_led_triggers)
+ return -ENOMEM;
+
+ for (i = 0; i < phy->phy_num_led_triggers; i++) {
+ err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
+ speeds[i]);
+ if (err)
+ goto out_unreg;
+ }
+
+ phy->last_triggered = NULL;
+ phy_led_trigger_change_speed(phy);
+
+ return 0;
+out_unreg:
+ while (i--)
+ phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+ devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+ return err;
+}
+EXPORT_SYMBOL_GPL(phy_led_triggers_register);
+
+void phy_led_triggers_unregister(struct phy_device *phy)
+{
+ int i;
+
+ for (i = 0; i < phy->phy_num_led_triggers; i++)
+ phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+}
+EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b62c4aaee40b..fb32eaf2255d 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -168,8 +168,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN83C185",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
@@ -191,8 +190,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8187",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
@@ -214,8 +212,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
@@ -237,8 +234,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN911x Internal PHY",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
@@ -259,8 +255,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
@@ -282,8 +277,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8740",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 24b4a09468dd..f78ff0279648 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -69,6 +69,7 @@
#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
+#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8574 0x000704a0
#define PHY_ID_VSC8601 0x00070420
#define PHY_ID_VSC8662 0x00070660
@@ -166,6 +167,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
phydev->drv->phy_id == PHY_ID_VSC8514 ||
+ phydev->drv->phy_id == PHY_ID_VSC8572 ||
phydev->drv->phy_id == PHY_ID_VSC8574 ||
phydev->drv->phy_id == PHY_ID_VSC8601) ?
MII_VSC8244_IMASK_MASK :
@@ -291,6 +293,17 @@ static struct phy_driver vsc82xx_driver[] = {
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
+ .phy_id = PHY_ID_VSC8572,
+ .name = "Vitesse VSC8572",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &vsc82x4_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+}, {
.phy_id = PHY_ID_VSC8574,
.name = "Vitesse VSC8574",
.phy_id_mask = 0x000ffff0,
@@ -355,6 +368,7 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8514, 0x000ffff0 },
+ { PHY_ID_VSC8572, 0x000ffff0 },
{ PHY_ID_VSC8574, 0x000ffff0 },
{ PHY_ID_VSC8662, 0x000ffff0 },
{ PHY_ID_VSC8221, 0x000ffff0 },
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 9c4b41a4df7d..3c55ea357f35 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -270,7 +270,6 @@ static const struct net_device_ops plip_netdev_ops = {
.ndo_stop = plip_close,
.ndo_start_xmit = plip_tx_packet,
.ndo_do_ioctl = plip_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 5489c0ec1d9a..3d3b1f4339ef 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -204,7 +204,7 @@ static atomic_t ppp_unit_count = ATOMIC_INIT(0);
static atomic_t channel_count = ATOMIC_INIT(0);
/* per-net private data for this module */
-static int ppp_net_id __read_mostly;
+static unsigned int ppp_net_id __read_mostly;
struct ppp_net {
/* units to ppp mapping */
struct idr units_idr;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4ddae8118c85..f017c72bb7fd 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -95,7 +95,7 @@ static const struct proto_ops pppoe_ops;
static const struct ppp_channel_ops pppoe_chan_ops;
/* per-net private data for this module */
-static int pppoe_net_id __read_mostly;
+static unsigned int pppoe_net_id __read_mostly;
struct pppoe_net {
/*
* we could use _single_ hash table for all
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a31f4610b493..300bb1479b3a 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -466,17 +466,6 @@ static void rionet_set_msglevel(struct net_device *ndev, u32 value)
rnet->msg_enable = value;
}
-static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) {
- printk(KERN_ERR "%s: Invalid MTU size %d\n",
- ndev->name, new_mtu);
- return -EINVAL;
- }
- ndev->mtu = new_mtu;
- return 0;
-}
-
static const struct ethtool_ops rionet_ethtool_ops = {
.get_drvinfo = rionet_get_drvinfo,
.get_msglevel = rionet_get_msglevel,
@@ -488,7 +477,6 @@ static const struct net_device_ops rionet_netdev_ops = {
.ndo_open = rionet_open,
.ndo_stop = rionet_close,
.ndo_start_xmit = rionet_start_xmit,
- .ndo_change_mtu = rionet_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -525,6 +513,9 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
ndev->netdev_ops = &rionet_netdev_ops;
ndev->mtu = RIONET_MAX_MTU;
+ /* MTU range: 68 - 4082 */
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = RIONET_MAX_MTU;
ndev->features = NETIF_F_LLTX;
SET_NETDEV_DEV(ndev, &mport->dev);
ndev->ethtool_ops = &rionet_ethtool_ops;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index aad0b59d41e3..8b8b53259783 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -141,7 +141,6 @@ static const struct net_device_ops sb1000_netdev_ops = {
.ndo_start_xmit = sb1000_start_xmit,
.ndo_do_ioctl = sb1000_dev_ioctl,
.ndo_stop = sb1000_close,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 9ed6d1c1ee45..7e933d8ff811 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -561,12 +561,7 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
{
struct slip *sl = netdev_priv(dev);
- if (new_mtu < 68 || new_mtu > 65534)
- return -EINVAL;
-
- if (new_mtu != dev->mtu)
- return sl_realloc_bufs(sl, new_mtu);
- return 0;
+ return sl_realloc_bufs(sl, new_mtu);
}
/* Netdevice get statistics request */
@@ -663,6 +658,10 @@ static void sl_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 10;
+ /* MTU range: 68 - 65534 */
+ dev->min_mtu = 68;
+ dev->max_mtu = 65534;
+
/* New-style flags. */
dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a380649bf6b5..bdc58567d10e 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2150,13 +2150,7 @@ static struct rtnl_link_ops team_link_ops __read_mostly = {
* Generic netlink custom interface
***********************************/
-static struct genl_family team_nl_family = {
- .id = GENL_ID_GENERATE,
- .name = TEAM_GENL_NAME,
- .version = TEAM_GENL_VERSION,
- .maxattr = TEAM_ATTR_MAX,
- .netnsok = true,
-};
+static struct genl_family team_nl_family;
static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
[TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
@@ -2746,6 +2740,18 @@ static const struct genl_multicast_group team_nl_mcgrps[] = {
{ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
};
+static struct genl_family team_nl_family __ro_after_init = {
+ .name = TEAM_GENL_NAME,
+ .version = TEAM_GENL_VERSION,
+ .maxattr = TEAM_ATTR_MAX,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = team_nl_ops,
+ .n_ops = ARRAY_SIZE(team_nl_ops),
+ .mcgrps = team_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
+};
+
static int team_nl_send_multicast(struct sk_buff *skb,
struct team *team, u32 portid)
{
@@ -2767,10 +2773,9 @@ static int team_nl_send_event_port_get(struct team *team,
port);
}
-static int team_nl_init(void)
+static int __init team_nl_init(void)
{
- return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
- team_nl_mcgrps);
+ return genl_register_family(&team_nl_family);
}
static void team_nl_fini(void)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db6acecabeaa..57e88b814700 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -878,13 +878,6 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
sk_filter(tfile->socket.sk, skb))
goto drop;
- /* Limit the number of packets queued by dividing txq length with the
- * number of queues.
- */
- if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
- >= dev->tx_queue_len)
- goto drop;
-
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
@@ -925,18 +918,6 @@ static void tun_net_mclist(struct net_device *dev)
*/
}
-#define MIN_MTU 68
-#define MAX_MTU 65535
-
-static int
-tun_net_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
static netdev_features_t tun_net_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1014,7 +995,6 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
- .ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
.ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1029,7 +1009,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
- .ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
.ndo_set_rx_mode = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
@@ -1062,6 +1041,9 @@ static void tun_flow_uninit(struct tun_struct *tun)
tun_flow_flush(tun);
}
+#define MIN_MTU 68
+#define MAX_MTU 65535
+
/* Initialize net device. */
static void tun_net_init(struct net_device *dev)
{
@@ -1092,6 +1074,9 @@ static void tun_net_init(struct net_device *dev)
break;
}
+
+ dev->min_mtu = MIN_MTU;
+ dev->max_mtu = MAX_MTU - dev->hard_header_len;
}
/* Character device part */
@@ -1171,7 +1156,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
bool zerocopy = false;
int err;
u32 rxhash;
- ssize_t n;
if (!(tun->dev->flags & IFF_UP))
return -EIO;
@@ -1181,8 +1165,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL;
len -= sizeof(pi);
- n = copy_from_iter(&pi, sizeof(pi), from);
- if (n != sizeof(pi))
+ if (!copy_from_iter_full(&pi, sizeof(pi), from))
return -EFAULT;
}
@@ -1191,8 +1174,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL;
len -= tun->vnet_hdr_sz;
- n = copy_from_iter(&gso, sizeof(gso), from);
- if (n != sizeof(gso))
+ if (!copy_from_iter_full(&gso, sizeof(gso), from))
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -1255,8 +1237,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EFAULT;
}
- err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun));
- if (err) {
+ if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
@@ -1302,7 +1283,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
+#ifndef CONFIG_4KSTACKS
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+#else
netif_rx_ni(skb);
+#endif
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
@@ -1367,15 +1354,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
}
if (vnet_hdr_sz) {
- struct virtio_net_hdr gso = { 0 }; /* no info leak */
- int ret;
+ struct virtio_net_hdr gso;
if (iov_iter_count(iter) < vnet_hdr_sz)
return -EINVAL;
- ret = virtio_net_hdr_from_skb(skb, &gso,
- tun_is_little_endian(tun));
- if (ret) {
+ if (virtio_net_hdr_from_skb(skb, &gso,
+ tun_is_little_endian(tun))) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
@@ -1991,7 +1976,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int le;
int ret;
- if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
+ if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
} else {
@@ -2011,7 +1996,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
rtnl_lock();
tun = __tun_get(tfile);
- if (cmd == TUNSETIFF && !tun) {
+ if (cmd == TUNSETIFF) {
+ ret = -EEXIST;
+ if (tun)
+ goto unlock;
+
ifr.ifr_name[IFNAMSIZ-1] = '\0';
ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index cdde59089f72..3dd490f53e48 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -114,6 +114,11 @@ config USB_LAN78XX
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
+ LAN7800 : USB 3 to 10/100/1000 Ethernet adapter
+ LAN7850 : USB 2 to 10/100/1000 Ethernet adapter
+ LAN7801 : USB 3 to 10/100/1000 Ethernet adapter (MAC only)
+
+ Proper PHY driver is required for LAN7801.
To compile this driver as a module, choose M here: the
module will be called lan78xx.
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index dc7b6392e75a..6c646e228833 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1026,9 +1026,6 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
netdev_dbg(dev->net, "ax88178_change_mtu() new_mtu=%d\n", new_mtu);
- if (new_mtu <= 0 || ll_mtu > 16384)
- return -EINVAL;
-
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
@@ -1081,6 +1078,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88178_netdev_ops;
dev->net->ethtool_ops = &ax88178_ethtool_ops;
+ dev->net->max_mtu = 16384 - (dev->net->hard_header_len + 4);
/* Blink LEDS so users know driver saw dongle */
asix_sw_reset(dev, 0, 0);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 49a3bc107d05..6308386b09df 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -149,14 +149,6 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_set_rx_mode = asix_set_multicast,
};
-static int ax88172a_nway_reset(struct net_device *net)
-{
- if (!net->phydev)
- return -ENODEV;
-
- return phy_start_aneg(net->phydev);
-}
-
static const struct ethtool_ops ax88172a_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = usbnet_get_link,
@@ -167,7 +159,7 @@ static const struct ethtool_ops ax88172a_ethtool_ops = {
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
- .nway_reset = ax88172a_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 8a6675d92b98..a3a7db0702d8 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -907,9 +907,6 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu)
struct usbnet *dev = netdev_priv(net);
u16 tmp16;
- if (new_mtu <= 0 || new_mtu > 4088)
- return -EINVAL;
-
net->mtu = new_mtu;
dev->hard_mtu = net->mtu + net->hard_header_len;
@@ -1266,6 +1263,7 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88179_netdev_ops;
dev->net->ethtool_ops = &ax88179_ethtool_ops;
dev->net->needed_headroom = 8;
+ dev->net->max_mtu = 4088;
/* Initialize MII structure */
dev->mii.dev = dev->net;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index d9ca05d3ac8e..a1f2f6f1e614 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -761,7 +761,6 @@ static const struct net_device_ops catc_netdev_ops = {
.ndo_tx_timeout = catc_tx_timeout,
.ndo_set_rx_mode = catc_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index ff2270ead2e6..eb52de8205f0 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -276,21 +276,11 @@ static int usbpn_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ENOIOCTLCMD;
}
-static int usbpn_set_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
- return -EINVAL;
-
- dev->mtu = new_mtu;
- return 0;
-}
-
static const struct net_device_ops usbpn_ops = {
.ndo_open = usbpn_open,
.ndo_stop = usbpn_close,
.ndo_start_xmit = usbpn_xmit,
.ndo_do_ioctl = usbpn_ioctl,
- .ndo_change_mtu = usbpn_set_mtu,
};
static void usbpn_setup(struct net_device *dev)
@@ -301,6 +291,8 @@ static void usbpn_setup(struct net_device *dev)
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = PHONET_MAX_MTU;
+ dev->min_mtu = PHONET_MIN_MTU;
+ dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index dd623f674487..fe7b2886cb6b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -711,6 +711,20 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* ThinkPad Thunderbolt 3 Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3069, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
@@ -718,6 +732,20 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo USB C to Ethernet Adapter (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x720c, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Lenovo USB-C Travel Hub (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7214, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index afbfc0f656f3..2d1a6f2e16ab 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -740,10 +740,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
- int maxmtu = cdc_ncm_max_dgram_size(dev) - cdc_ncm_eth_hlen(dev);
-
- if (new_mtu <= 0 || new_mtu > maxmtu)
- return -EINVAL;
net->mtu = new_mtu;
cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
@@ -913,6 +909,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* must handle MTU changes */
dev->net->netdev_ops = &cdc_ncm_netdev_ops;
+ dev->net->max_mtu = cdc_ncm_max_dgram_size(dev) - cdc_ncm_eth_hlen(dev);
return 0;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 66b34ddbe216..338aed5da14d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -982,7 +982,6 @@ static const struct net_device_ops kaweth_netdev_ops = {
.ndo_tx_timeout = kaweth_tx_timeout,
.ndo_set_rx_mode = kaweth_set_rx_mode,
.ndo_get_stats = kaweth_netdev_stats,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f33460cec79f..08f8703e4d54 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -30,13 +30,17 @@
#include <linux/ipv6.h>
#include <linux/mdio.h>
#include <net/ip6_checksum.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/microchipphy.h>
#include "lan78xx.h"
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
#define DRIVER_NAME "lan78xx"
-#define DRIVER_VERSION "1.0.4"
+#define DRIVER_VERSION "1.0.6"
#define TX_TIMEOUT_JIFFIES (5 * HZ)
#define THROTTLE_JIFFIES (HZ / 8)
@@ -63,6 +67,7 @@
#define LAN78XX_USB_VENDOR_ID (0x0424)
#define LAN7800_USB_PRODUCT_ID (0x7800)
#define LAN7850_USB_PRODUCT_ID (0x7850)
+#define LAN7801_USB_PRODUCT_ID (0x7801)
#define LAN78XX_EEPROM_MAGIC (0x78A5)
#define LAN78XX_OTP_MAGIC (0x78F3)
@@ -89,6 +94,38 @@
/* statistic update interval (mSec) */
#define STAT_UPDATE_TIMER (1 * 1000)
+/* defines interrupts from interrupt EP */
+#define MAX_INT_EP (32)
+#define INT_EP_INTEP (31)
+#define INT_EP_OTP_WR_DONE (28)
+#define INT_EP_EEE_TX_LPI_START (26)
+#define INT_EP_EEE_TX_LPI_STOP (25)
+#define INT_EP_EEE_RX_LPI (24)
+#define INT_EP_MAC_RESET_TIMEOUT (23)
+#define INT_EP_RDFO (22)
+#define INT_EP_TXE (21)
+#define INT_EP_USB_STATUS (20)
+#define INT_EP_TX_DIS (19)
+#define INT_EP_RX_DIS (18)
+#define INT_EP_PHY (17)
+#define INT_EP_DP (16)
+#define INT_EP_MAC_ERR (15)
+#define INT_EP_TDFU (14)
+#define INT_EP_TDFO (13)
+#define INT_EP_UTX (12)
+#define INT_EP_GPIO_11 (11)
+#define INT_EP_GPIO_10 (10)
+#define INT_EP_GPIO_9 (9)
+#define INT_EP_GPIO_8 (8)
+#define INT_EP_GPIO_7 (7)
+#define INT_EP_GPIO_6 (6)
+#define INT_EP_GPIO_5 (5)
+#define INT_EP_GPIO_4 (4)
+#define INT_EP_GPIO_3 (3)
+#define INT_EP_GPIO_2 (2)
+#define INT_EP_GPIO_1 (1)
+#define INT_EP_GPIO_0 (0)
+
static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
"RX FCS Errors",
"RX Alignment Errors",
@@ -296,6 +333,15 @@ struct statstage {
struct lan78xx_statstage64 curr_stat;
};
+struct irq_domain_data {
+ struct irq_domain *irqdomain;
+ unsigned int phyirq;
+ struct irq_chip *irqchip;
+ irq_flow_handler_t irq_handler;
+ u32 irqenable;
+ struct mutex irq_lock; /* for irq bus access */
+};
+
struct lan78xx_net {
struct net_device *net;
struct usb_device *udev;
@@ -345,14 +391,21 @@ struct lan78xx_net {
u32 chipid;
u32 chiprev;
struct mii_bus *mdiobus;
+ phy_interface_t interface;
int fc_autoneg;
u8 fc_request_control;
int delta;
struct statstage stats;
+
+ struct irq_domain_data domain_data;
};
+/* define external phy id */
+#define PHY_LAN8835 (0x0007C130)
+#define PHY_KSZ9031RNX (0x00221620)
+
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param(msg_level, int, 0);
@@ -1092,15 +1145,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct phy_device *phydev = dev->net->phydev;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ struct ethtool_link_ksettings ecmd;
int ladv, radv, ret;
u32 buf;
- /* clear PHY interrupt status */
- ret = phy_read(phydev, LAN88XX_INT_STS);
- if (unlikely(ret < 0))
- return -EIO;
-
/* clear LAN78xx interrupt status */
ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
if (unlikely(ret < 0))
@@ -1120,18 +1168,14 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (unlikely(ret < 0))
return -EIO;
- phy_mac_interrupt(phydev, 0);
-
del_timer(&dev->stat_monitor);
} else if (phydev->link && !dev->link_on) {
dev->link_on = true;
- phy_ethtool_gset(phydev, &ecmd);
-
- ret = phy_read(phydev, LAN88XX_INT_STS);
+ phy_ethtool_ksettings_get(phydev, &ecmd);
if (dev->udev->speed == USB_SPEED_SUPER) {
- if (ethtool_cmd_speed(&ecmd) == 1000) {
+ if (ecmd.base.speed == 1000) {
/* disable U2 */
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
@@ -1159,10 +1203,10 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
netif_dbg(dev, link, dev->net,
"speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
- ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
+ ecmd.base.speed, ecmd.base.duplex, ladv, radv);
- ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
- phy_mac_interrupt(phydev, 1);
+ ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
+ radv);
if (!timer_pending(&dev->stat_monitor)) {
dev->delta = 1;
@@ -1201,7 +1245,10 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
if (intdata & INT_ENP_PHY_INT) {
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
- lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+ lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+
+ if (dev->domain_data.phyirq > 0)
+ generic_handle_irq(dev->domain_data.phyirq);
} else
netdev_warn(dev->net,
"unexpected interrupt: 0x%08x\n", intdata);
@@ -1406,11 +1453,6 @@ static u32 lan78xx_get_link(struct net_device *net)
return net->phydev->link;
}
-static int lan78xx_nway_reset(struct net_device *net)
-{
- return phy_start_aneg(net->phydev);
-}
-
static void lan78xx_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
@@ -1435,88 +1477,26 @@ static void lan78xx_set_msglevel(struct net_device *net, u32 level)
dev->msg_enable = level;
}
-static int lan78xx_get_mdix_status(struct net_device *net)
-{
- struct phy_device *phydev = net->phydev;
- int buf;
-
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
- buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
-
- return buf;
-}
-
-static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
-{
- struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- int buf;
-
- if (mdix_ctrl == ETH_TP_MDI) {
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
- LAN88XX_EXT_PAGE_SPACE_1);
- buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
- buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
- phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
- buf | LAN88XX_EXT_MODE_CTRL_MDI_);
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
- LAN88XX_EXT_PAGE_SPACE_0);
- } else if (mdix_ctrl == ETH_TP_MDI_X) {
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
- LAN88XX_EXT_PAGE_SPACE_1);
- buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
- buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
- phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
- buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
- LAN88XX_EXT_PAGE_SPACE_0);
- } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
- LAN88XX_EXT_PAGE_SPACE_1);
- buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
- buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
- phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
- buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
- phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
- LAN88XX_EXT_PAGE_SPACE_0);
- }
- dev->mdix_ctrl = mdix_ctrl;
-}
-
-static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int lan78xx_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *cmd)
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
int ret;
- int buf;
ret = usb_autopm_get_interface(dev->intf);
if (ret < 0)
return ret;
- ret = phy_ethtool_gset(phydev, cmd);
-
- buf = lan78xx_get_mdix_status(net);
-
- buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
- if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
- cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
- } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
- cmd->eth_tp_mdix = ETH_TP_MDI;
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
- } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
- }
+ ret = phy_ethtool_ksettings_get(phydev, cmd);
usb_autopm_put_interface(dev->intf);
return ret;
}
-static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int lan78xx_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *cmd)
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
@@ -1527,14 +1507,10 @@ static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
if (ret < 0)
return ret;
- if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
- lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
- }
-
/* change speed & duplex */
- ret = phy_ethtool_sset(phydev, cmd);
+ ret = phy_ethtool_ksettings_set(phydev, cmd);
- if (!cmd->autoneg) {
+ if (!cmd->base.autoneg) {
/* force link down */
temp = phy_read(phydev, MII_BMCR);
phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
@@ -1552,9 +1528,9 @@ static void lan78xx_get_pause(struct net_device *net,
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ struct ethtool_link_ksettings ecmd;
- phy_ethtool_gset(phydev, &ecmd);
+ phy_ethtool_ksettings_get(phydev, &ecmd);
pause->autoneg = dev->fc_autoneg;
@@ -1570,12 +1546,12 @@ static int lan78xx_set_pause(struct net_device *net,
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ struct ethtool_link_ksettings ecmd;
int ret;
- phy_ethtool_gset(phydev, &ecmd);
+ phy_ethtool_ksettings_get(phydev, &ecmd);
- if (pause->autoneg && !ecmd.autoneg) {
+ if (pause->autoneg && !ecmd.base.autoneg) {
ret = -EINVAL;
goto exit;
}
@@ -1587,13 +1563,21 @@ static int lan78xx_set_pause(struct net_device *net,
if (pause->tx_pause)
dev->fc_request_control |= FLOW_CTRL_TX;
- if (ecmd.autoneg) {
+ if (ecmd.base.autoneg) {
u32 mii_adv;
+ u32 advertising;
- ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, ecmd.link_modes.advertising);
+
+ advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
- phy_ethtool_sset(phydev, &ecmd);
+ advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ ecmd.link_modes.advertising, advertising);
+
+ phy_ethtool_ksettings_set(phydev, &ecmd);
}
dev->fc_autoneg = pause->autoneg;
@@ -1605,12 +1589,10 @@ exit:
static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_link = lan78xx_get_link,
- .nway_reset = lan78xx_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_drvinfo = lan78xx_get_drvinfo,
.get_msglevel = lan78xx_get_msglevel,
.set_msglevel = lan78xx_set_msglevel,
- .get_settings = lan78xx_get_settings,
- .set_settings = lan78xx_set_settings,
.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
.get_eeprom = lan78xx_ethtool_get_eeprom,
.set_eeprom = lan78xx_ethtool_set_eeprom,
@@ -1623,6 +1605,8 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.set_eee = lan78xx_set_eee,
.get_pauseparam = lan78xx_get_pause,
.set_pauseparam = lan78xx_set_pause,
+ .get_link_ksettings = lan78xx_get_link_ksettings,
+ .set_link_ksettings = lan78xx_set_link_ksettings,
};
static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1719,6 +1703,7 @@ static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
done:
mutex_unlock(&dev->phy_mutex);
usb_autopm_put_interface(dev->intf);
+
return ret;
}
@@ -1781,6 +1766,10 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
/* set to internal PHY id */
dev->mdiobus->phy_mask = ~(1 << 1);
break;
+ case ID_REV_CHIP_ID_7801_:
+ /* scan thru PHYAD[2..0] */
+ dev->mdiobus->phy_mask = ~(0xFF);
+ break;
}
ret = mdiobus_register(dev->mdiobus);
@@ -1834,6 +1823,168 @@ static void lan78xx_link_status_change(struct net_device *net)
}
}
+static int irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_domain_data *data = d->host_data;
+
+ irq_set_chip_data(irq, data);
+ irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops chip_domain_ops = {
+ .map = irq_map,
+ .unmap = irq_unmap,
+};
+
+static void lan78xx_irq_mask(struct irq_data *irqd)
+{
+ struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
+
+ data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
+}
+
+static void lan78xx_irq_unmask(struct irq_data *irqd)
+{
+ struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
+
+ data->irqenable |= BIT(irqd_to_hwirq(irqd));
+}
+
+static void lan78xx_irq_bus_lock(struct irq_data *irqd)
+{
+ struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
+
+ mutex_lock(&data->irq_lock);
+}
+
+static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
+{
+ struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
+ struct lan78xx_net *dev =
+ container_of(data, struct lan78xx_net, domain_data);
+ u32 buf;
+ int ret;
+
+ /* call register access here because irq_bus_lock & irq_bus_sync_unlock
+ * are only two callbacks executed in non-atomic contex.
+ */
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (buf != data->irqenable)
+ ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+
+ mutex_unlock(&data->irq_lock);
+}
+
+static struct irq_chip lan78xx_irqchip = {
+ .name = "lan78xx-irqs",
+ .irq_mask = lan78xx_irq_mask,
+ .irq_unmask = lan78xx_irq_unmask,
+ .irq_bus_lock = lan78xx_irq_bus_lock,
+ .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
+};
+
+static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
+{
+ struct device_node *of_node;
+ struct irq_domain *irqdomain;
+ unsigned int irqmap = 0;
+ u32 buf;
+ int ret = 0;
+
+ of_node = dev->udev->dev.parent->of_node;
+
+ mutex_init(&dev->domain_data.irq_lock);
+
+ lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ dev->domain_data.irqenable = buf;
+
+ dev->domain_data.irqchip = &lan78xx_irqchip;
+ dev->domain_data.irq_handler = handle_simple_irq;
+
+ irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
+ &chip_domain_ops, &dev->domain_data);
+ if (irqdomain) {
+ /* create mapping for PHY interrupt */
+ irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
+ if (!irqmap) {
+ irq_domain_remove(irqdomain);
+
+ irqdomain = NULL;
+ ret = -EINVAL;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ dev->domain_data.irqdomain = irqdomain;
+ dev->domain_data.phyirq = irqmap;
+
+ return ret;
+}
+
+static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
+{
+ if (dev->domain_data.phyirq > 0) {
+ irq_dispose_mapping(dev->domain_data.phyirq);
+
+ if (dev->domain_data.irqdomain)
+ irq_domain_remove(dev->domain_data.irqdomain);
+ }
+ dev->domain_data.phyirq = 0;
+ dev->domain_data.irqdomain = NULL;
+}
+
+static int lan8835_fixup(struct phy_device *phydev)
+{
+ int buf;
+ int ret;
+ struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
+
+ /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
+ buf = phy_read_mmd_indirect(phydev, 0x8010, 3);
+ buf &= ~0x1800;
+ buf |= 0x0800;
+ phy_write_mmd_indirect(phydev, 0x8010, 3, buf);
+
+ /* RGMII MAC TXC Delay Enable */
+ ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
+ MAC_RGMII_ID_TXC_DELAY_EN_);
+
+ /* RGMII TX DLL Tune Adjust */
+ ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
+
+ dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+
+ return 1;
+}
+
+static int ksz9031rnx_fixup(struct phy_device *phydev)
+{
+ struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
+
+ /* Micrel9301RNX PHY configuration */
+ /* RGMII Control Signal Pad Skew */
+ phy_write_mmd_indirect(phydev, 4, 2, 0x0077);
+ /* RGMII RX Data Pad Skew */
+ phy_write_mmd_indirect(phydev, 5, 2, 0x7777);
+ /* RGMII RX Clock Pad Skew */
+ phy_write_mmd_indirect(phydev, 8, 2, 0x1FF);
+
+ dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
+
+ return 1;
+}
+
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
int ret;
@@ -1846,28 +1997,61 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
return -EIO;
}
- /* Enable PHY interrupts.
- * We handle our own interrupt
- */
- ret = phy_read(phydev, LAN88XX_INT_STS);
- ret = phy_write(phydev, LAN88XX_INT_MASK,
- LAN88XX_INT_MASK_MDINTPIN_EN_ |
- LAN88XX_INT_MASK_LINK_CHANGE_);
+ if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
+ (dev->chipid == ID_REV_CHIP_ID_7850_)) {
+ phydev->is_internal = true;
+ dev->interface = PHY_INTERFACE_MODE_GMII;
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
+ if (!phydev->drv) {
+ netdev_err(dev->net, "no PHY driver found\n");
+ return -EIO;
+ }
+
+ dev->interface = PHY_INTERFACE_MODE_RGMII;
+
+ /* external PHY fixup for KSZ9031RNX */
+ ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
+ ksz9031rnx_fixup);
+ if (ret < 0) {
+ netdev_err(dev->net, "fail to register fixup\n");
+ return ret;
+ }
+ /* external PHY fixup for LAN8835 */
+ ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
+ lan8835_fixup);
+ if (ret < 0) {
+ netdev_err(dev->net, "fail to register fixup\n");
+ return ret;
+ }
+ /* add more external PHY fixup here if needed */
+
+ phydev->is_internal = false;
+ } else {
+ netdev_err(dev->net, "unknown ID found\n");
+ ret = -EIO;
+ goto error;
+ }
+
+ /* if phyirq is not set, use polling mode in phylib */
+ if (dev->domain_data.phyirq > 0)
+ phydev->irq = dev->domain_data.phyirq;
+ else
+ phydev->irq = 0;
+ netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
+
+ /* set to AUTOMDIX */
+ phydev->mdix = ETH_TP_MDI_AUTO;
ret = phy_connect_direct(dev->net, phydev,
lan78xx_link_status_change,
- PHY_INTERFACE_MODE_GMII);
+ dev->interface);
if (ret) {
netdev_err(dev->net, "can't attach PHY to %s\n",
dev->mdiobus->id);
return -EIO;
}
- /* set to AUTOMDIX */
- lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
-
/* MAC doesn't support 1000T Half */
phydev->supported &= ~SUPPORTED_1000baseT_Half;
@@ -1886,6 +2070,12 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
return 0;
+
+error:
+ phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
+ phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
+
+ return ret;
}
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
@@ -1970,11 +2160,6 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
int old_rx_urb_size = dev->rx_urb_size;
int ret;
- if (new_mtu > MAX_SINGLE_PACKET_SIZE)
- return -EINVAL;
-
- if (new_mtu <= 0)
- return -EINVAL;
/* no second zero-length packet read wanted after mtu-sized packets */
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
@@ -2247,14 +2432,12 @@ static int lan78xx_reset(struct lan78xx_net *dev)
} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ /* LAN7801 only has RGMII mode */
+ if (dev->chipid == ID_REV_CHIP_ID_7801_)
+ buf &= ~MAC_CR_GMII_EN_;
buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
ret = lan78xx_write_reg(dev, MAC_CR, buf);
- /* enable PHY interrupts */
- ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
- buf |= INT_ENP_PHY_INT;
- ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
-
ret = lan78xx_read_reg(dev, MAC_TX, &buf);
buf |= MAC_TX_TXEN_;
ret = lan78xx_write_reg(dev, MAC_TX, buf);
@@ -2378,8 +2561,12 @@ static int lan78xx_stop(struct net_device *net)
if (timer_pending(&dev->stat_monitor))
del_timer_sync(&dev->stat_monitor);
+ phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
+ phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
+
phy_stop(net->phydev);
phy_disconnect(net->phydev);
+
net->phydev = NULL;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
@@ -2663,6 +2850,14 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
dev->net->hw_features = dev->net->features;
+ ret = lan78xx_setup_irq_domain(dev);
+ if (ret < 0) {
+ netdev_warn(dev->net,
+ "lan78xx_setup_irq_domain() failed : %d", ret);
+ kfree(pdata);
+ return ret;
+ }
+
/* Init all registers */
ret = lan78xx_reset(dev);
@@ -2679,6 +2874,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ lan78xx_remove_irq_domain(dev);
+
lan78xx_remove_mdio(dev);
if (pdata) {
@@ -3378,6 +3575,9 @@ static int lan78xx_probe(struct usb_interface *intf,
if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+ /* MTU range: 68 - 9000 */
+ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
@@ -3789,6 +3989,10 @@ static const struct usb_device_id products[] = {
/* LAN7850 USB Gigabit Ethernet Device */
USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
},
+ {
+ /* LAN7801 USB Gigabit Ethernet Device */
+ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
+ },
{},
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
index 40927906109a..25aa54611774 100644
--- a/drivers/net/usb/lan78xx.h
+++ b/drivers/net/usb/lan78xx.h
@@ -108,6 +108,7 @@
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_ID_7800_ (0x7800)
#define ID_REV_CHIP_ID_7850_ (0x7850)
+#define ID_REV_CHIP_ID_7801_ (0x7801)
#define FPGA_REV (0x04)
#define FPGA_REV_MINOR_MASK_ (0x0000FF00)
@@ -550,6 +551,7 @@
#define LTM_INACTIVE1_TIMER10_ (0x0000FFFF)
#define MAC_CR (0x100)
+#define MAC_CR_GMII_EN_ (0x00080000)
#define MAC_CR_EEE_TX_CLK_STOP_EN_ (0x00040000)
#define MAC_CR_EEE_EN_ (0x00020000)
#define MAC_CR_EEE_TLAR_EN_ (0x00010000)
@@ -787,6 +789,18 @@
#define PHY_DEV_ID_MODEL_MASK_ (0x0FC00000)
#define PHY_DEV_ID_OUI_MASK_ (0x003FFFFF)
+#define RGMII_TX_BYP_DLL (0x708)
+#define RGMII_TX_BYP_DLL_TX_TUNE_ADJ_MASK_ (0x000FC00)
+#define RGMII_TX_BYP_DLL_TX_TUNE_SEL_MASK_ (0x00003F0)
+#define RGMII_TX_BYP_DLL_TX_DLL_RESET_ (0x0000002)
+#define RGMII_TX_BYP_DLL_TX_DLL_BYPASS_ (0x0000001)
+
+#define RGMII_RX_BYP_DLL (0x70C)
+#define RGMII_RX_BYP_DLL_RX_TUNE_ADJ_MASK_ (0x000FC00)
+#define RGMII_RX_BYP_DLL_RX_TUNE_SEL_MASK_ (0x00003F0)
+#define RGMII_RX_BYP_DLL_RX_DLL_RESET_ (0x0000002)
+#define RGMII_RX_BYP_DLL_RX_DLL_BYPASS_ (0x0000001)
+
#define OTP_BASE_ADDR (0x00001000)
#define OTP_ADDR_RANGE_ (0x1FF)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 1434e5dd5f9c..399f7ee57aea 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1273,7 +1273,6 @@ static const struct net_device_ops pegasus_netdev_ops = {
.ndo_set_rx_mode = pegasus_set_multicast,
.ndo_get_stats = pegasus_netdev_stats,
.ndo_tx_timeout = pegasus_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index efb84f092492..7dc61228c55b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4116,14 +4116,12 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
switch (tp->version) {
case RTL_VER_01:
case RTL_VER_02:
- return eth_change_mtu(dev, new_mtu);
+ dev->mtu = new_mtu;
+ return 0;
default:
break;
}
- if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU)
- return -EINVAL;
-
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
return ret;
@@ -4313,6 +4311,18 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
+ /* MTU range: 68 - 1500 or 9194 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ netdev->max_mtu = ETH_DATA_LEN;
+ break;
+ default:
+ netdev->max_mtu = RTL8153_MAX_MTU;
+ break;
+ }
+
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
tp->mii.mdio_write = write_mii_word;
@@ -4413,8 +4423,12 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
- {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 7c72bfac89d0..93a1bda1c1e5 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -847,7 +847,6 @@ static const struct net_device_ops rtl8150_netdev_ops = {
.ndo_set_rx_mode = rtl8150_set_multicast,
.ndo_set_mac_address = rtl8150_set_mac_address,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a251588762ec..12071f1582df 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -165,7 +165,6 @@ struct lsi_umts {
/* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata);
-static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
/* Our own net device operations structure */
static const struct net_device_ops sierra_net_device_ops = {
@@ -173,7 +172,7 @@ static const struct net_device_ops sierra_net_device_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_change_mtu = sierra_net_change_mtu,
+ .ndo_change_mtu = usbnet_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -622,15 +621,6 @@ static const struct ethtool_ops sierra_net_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-/* MTU can not be more than 1500 bytes, enforce it. */
-static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
-{
- if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
- return -EINVAL;
-
- return usbnet_change_mtu(net, new_mtu);
-}
-
static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
{
int result = 0;
@@ -720,6 +710,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ dev->net->max_mtu = SIERRA_NET_MAX_SUPPORTED_MTU;
/* Set up the netdev */
dev->net->flags |= IFF_NOARP;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 9af9799935db..0b17b40d7a4f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -925,9 +925,6 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
struct usbnet *dev = netdev_priv(netdev);
int ret;
- if (new_mtu > MAX_SINGLE_PACKET_SIZE)
- return -EINVAL;
-
ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set mac rx frame length\n");
@@ -1448,6 +1445,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
return 0;
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d5071e364d40..3de65ea6531a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -384,8 +384,6 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
int old_hard_mtu = dev->hard_mtu;
int old_rx_urb_size = dev->rx_urb_size;
- if (new_mtu <= 0)
- return -EINVAL;
// no second zero-length packet read wanted after mtu-sized packets
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
@@ -1669,6 +1667,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
* bind() should set rx_urb_size in that case.
*/
dev->hard_mtu = net->mtu + net->hard_header_len;
+ net->min_mtu = 0;
+ net->max_mtu = ETH_MAX_MTU;
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index fbc853e64531..0520952aa096 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -23,9 +23,6 @@
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
-#define MIN_MTU 68 /* Min L3 MTU */
-#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
-
struct pcpu_vstats {
u64 packets;
u64 bytes;
@@ -216,17 +213,9 @@ static int veth_close(struct net_device *dev)
return 0;
}
-static int is_valid_veth_mtu(int new_mtu)
+static int is_valid_veth_mtu(int mtu)
{
- return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
-}
-
-static int veth_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (!is_valid_veth_mtu(new_mtu))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
+ return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
}
static int veth_dev_init(struct net_device *dev)
@@ -300,7 +289,6 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_open = veth_open,
.ndo_stop = veth_close,
.ndo_start_xmit = veth_xmit,
- .ndo_change_mtu = veth_change_mtu,
.ndo_get_stats64 = veth_get_stats64,
.ndo_set_rx_mode = veth_set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
@@ -337,6 +325,7 @@ static void veth_setup(struct net_device *dev)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX);
dev->destructor = veth_dev_free;
+ dev->max_mtu = ETH_MAX_MTU;
dev->hw_features = VETH_FEATURES;
dev->hw_enc_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbf1c613c67a..5deeda61d6d3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
+#include <linux/bpf.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
@@ -81,6 +82,8 @@ struct receive_queue {
struct napi_struct napi;
+ struct bpf_prog __rcu *xdp_prog;
+
/* Chain pages by the private ptr. */
struct page *pages;
@@ -111,6 +114,9 @@ struct virtnet_info {
/* # of queue pairs currently used by the driver */
u16 curr_queue_pairs;
+ /* # of XDP queue pairs currently used by the driver */
+ u16 xdp_queue_pairs;
+
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -324,14 +330,148 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
-static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
+static void virtnet_xdp_xmit(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct send_queue *sq,
+ struct xdp_buff *xdp,
+ void *data)
+{
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+ unsigned int num_sg, len;
+ void *xdp_sent;
+ int err;
+
+ /* Free up any pending old buffers before queueing new ones. */
+ while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ if (vi->mergeable_rx_bufs) {
+ struct page *sent_page = virt_to_head_page(xdp_sent);
+
+ put_page(sent_page);
+ } else { /* small buffer */
+ struct sk_buff *skb = xdp_sent;
+
+ kfree_skb(skb);
+ }
+ }
+
+ if (vi->mergeable_rx_bufs) {
+ /* Zero header and leave csum up to XDP layers */
+ hdr = xdp->data;
+ memset(hdr, 0, vi->hdr_len);
+
+ num_sg = 1;
+ sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
+ } else { /* small buffer */
+ struct sk_buff *skb = data;
+
+ /* Zero header and leave csum up to XDP layers */
+ hdr = skb_vnet_hdr(skb);
+ memset(hdr, 0, vi->hdr_len);
+
+ num_sg = 2;
+ sg_init_table(sq->sg, 2);
+ sg_set_buf(sq->sg, hdr, vi->hdr_len);
+ skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ }
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
+ data, GFP_ATOMIC);
+ if (unlikely(err)) {
+ if (vi->mergeable_rx_bufs) {
+ struct page *page = virt_to_head_page(xdp->data);
+
+ put_page(page);
+ } else /* small buffer */
+ kfree_skb(data);
+ return; // On error abort to avoid unnecessary kick
+ }
+
+ virtqueue_kick(sq->vq);
+}
+
+static u32 do_xdp_prog(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct bpf_prog *xdp_prog,
+ void *data, int len)
+{
+ int hdr_padded_len;
+ struct xdp_buff xdp;
+ void *buf;
+ unsigned int qp;
+ u32 act;
+
+ if (vi->mergeable_rx_bufs) {
+ hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ xdp.data = data + hdr_padded_len;
+ xdp.data_end = xdp.data + (len - vi->hdr_len);
+ buf = data;
+ } else { /* small buffers */
+ struct sk_buff *skb = data;
+
+ xdp.data = skb->data;
+ xdp.data_end = xdp.data + len;
+ buf = skb->data;
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return XDP_PASS;
+ case XDP_TX:
+ qp = vi->curr_queue_pairs -
+ vi->xdp_queue_pairs +
+ smp_processor_id();
+ xdp.data = buf;
+ virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
+ return XDP_TX;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ return XDP_DROP;
+ }
+}
+
+static struct sk_buff *receive_small(struct net_device *dev,
+ struct virtnet_info *vi,
+ struct receive_queue *rq,
+ void *buf, unsigned int len)
{
struct sk_buff * skb = buf;
+ struct bpf_prog *xdp_prog;
len -= vi->hdr_len;
skb_trim(skb, len);
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (xdp_prog) {
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ u32 act;
+
+ if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+ goto err_xdp;
+ act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
+
return skb;
+
+err_xdp:
+ rcu_read_unlock();
+ dev->stats.rx_dropped++;
+ kfree_skb(skb);
+xdp_xmit:
+ return NULL;
}
static struct sk_buff *receive_big(struct net_device *dev,
@@ -354,6 +494,67 @@ err:
return NULL;
}
+/* The conditions to enable XDP should preclude the underlying device from
+ * sending packets across multiple buffers (num_buf > 1). However per spec
+ * it does not appear to be illegal to do so but rather just against convention.
+ * So in order to avoid making a system unresponsive the packets are pushed
+ * into a page and the XDP program is run. This will be extremely slow and we
+ * push a warning to the user to fix this as soon as possible. Fixing this may
+ * require resolving the underlying hardware to determine why multiple buffers
+ * are being received or simply loading the XDP program in the ingress stack
+ * after the skb is built because there is no advantage to running it here
+ * anymore.
+ */
+static struct page *xdp_linearize_page(struct receive_queue *rq,
+ u16 *num_buf,
+ struct page *p,
+ int offset,
+ unsigned int *len)
+{
+ struct page *page = alloc_page(GFP_ATOMIC);
+ unsigned int page_off = 0;
+
+ if (!page)
+ return NULL;
+
+ memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+ page_off += *len;
+
+ while (--*num_buf) {
+ unsigned int buflen;
+ unsigned long ctx;
+ void *buf;
+ int off;
+
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
+ if (unlikely(!ctx))
+ goto err_buf;
+
+ buf = mergeable_ctx_to_buf_address(ctx);
+ p = virt_to_head_page(buf);
+ off = buf - page_address(p);
+
+ /* guard against a misconfigured or uncooperative backend that
+ * is sending packet larger than the MTU.
+ */
+ if ((page_off + buflen) > PAGE_SIZE) {
+ put_page(p);
+ goto err_buf;
+ }
+
+ memcpy(page_address(page) + page_off,
+ page_address(p) + off, buflen);
+ page_off += buflen;
+ put_page(p);
+ }
+
+ *len = page_off;
+ return page;
+err_buf:
+ __free_pages(page, 0);
+ return NULL;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -365,11 +566,71 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
- unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+ struct sk_buff *head_skb, *curr_skb;
+ struct bpf_prog *xdp_prog;
+ unsigned int truesize;
+
+ head_skb = NULL;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (xdp_prog) {
+ struct page *xdp_page;
+ u32 act;
+
+ /* This happens when rx buffer size is underestimated */
+ if (unlikely(num_buf > 1)) {
+ /* linearize data for XDP */
+ xdp_page = xdp_linearize_page(rq, &num_buf,
+ page, offset, &len);
+ if (!xdp_page)
+ goto err_xdp;
+ offset = 0;
+ } else {
+ xdp_page = page;
+ }
+
+ /* Transient failure which in theory could occur if
+ * in-flight packets from before XDP was enabled reach
+ * the receive path after XDP is loaded. In practice I
+ * was not able to create this condition.
+ */
+ if (unlikely(hdr->hdr.gso_type))
+ goto err_xdp;
+
+ act = do_xdp_prog(vi, rq, xdp_prog,
+ page_address(xdp_page) + offset, len);
+ switch (act) {
+ case XDP_PASS:
+ /* We can only create skb based on xdp_page. */
+ if (unlikely(xdp_page != page)) {
+ rcu_read_unlock();
+ put_page(page);
+ head_skb = page_to_skb(vi, rq, xdp_page,
+ 0, len, PAGE_SIZE);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ return head_skb;
+ }
+ break;
+ case XDP_TX:
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ if (unlikely(xdp_page != page))
+ goto err_xdp;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
- struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
- truesize);
- struct sk_buff *curr_skb = head_skb;
+ truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+ curr_skb = head_skb;
if (unlikely(!curr_skb))
goto err_skb;
@@ -423,6 +684,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
+err_xdp:
+ rcu_read_unlock();
err_skb:
put_page(page);
while (--num_buf) {
@@ -439,6 +702,7 @@ err_skb:
err_buf:
dev->stats.rx_dropped++;
dev_kfree_skb(head_skb);
+xdp_xmit:
return NULL;
}
@@ -470,7 +734,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
else if (vi->big_packets)
skb = receive_big(dev, vi, rq, buf, len);
else
- skb = receive_small(vi, buf, len);
+ skb = receive_small(dev, vi, rq, buf, len);
if (unlikely(!skb))
return;
@@ -1337,6 +1601,13 @@ static int virtnet_set_channels(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
return -EINVAL;
+ /* For now we don't support modifying channels while XDP is loaded
+ * also when XDP is loaded all RX queues have XDP programs so we only
+ * need to check a single RX queue.
+ */
+ if (vi->rq[0].xdp_prog)
+ return -EINVAL;
+
get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
@@ -1428,17 +1699,95 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_settings = virtnet_set_settings,
};
-#define MIN_MTU 68
-#define MAX_MTU 65535
-
-static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
+static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
{
- if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
+ unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ u16 xdp_qp = 0, curr_qp;
+ int i, err;
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
+ netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
+ netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
return -EINVAL;
- dev->mtu = new_mtu;
+ }
+
+ if (dev->mtu > max_sz) {
+ netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
+ return -EINVAL;
+ }
+
+ curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
+ if (prog)
+ xdp_qp = nr_cpu_ids;
+
+ /* XDP requires extra queues for XDP_TX */
+ if (curr_qp + xdp_qp > vi->max_queue_pairs) {
+ netdev_warn(dev, "request %i queues but max is %i\n",
+ curr_qp + xdp_qp, vi->max_queue_pairs);
+ return -ENOMEM;
+ }
+
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
+ if (err) {
+ dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
+ return err;
+ }
+
+ if (prog) {
+ prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ if (IS_ERR(prog)) {
+ virtnet_set_queues(vi, curr_qp);
+ return PTR_ERR(prog);
+ }
+ }
+
+ vi->xdp_queue_pairs = xdp_qp;
+ netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
+ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
return 0;
}
+static bool virtnet_xdp_query(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (vi->rq[i].xdp_prog)
+ return true;
+ }
+ return false;
+}
+
+static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return virtnet_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = virtnet_xdp_query(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -1446,7 +1795,6 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = virtnet_set_mac_address,
.ndo_set_rx_mode = virtnet_set_rx_mode,
- .ndo_change_mtu = virtnet_change_mtu,
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
@@ -1456,6 +1804,7 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = virtnet_busy_poll,
#endif
+ .ndo_xdp = virtnet_xdp,
};
static void virtnet_config_changed_work(struct work_struct *work)
@@ -1517,12 +1866,20 @@ static void virtnet_free_queues(struct virtnet_info *vi)
static void free_receive_bufs(struct virtnet_info *vi)
{
+ struct bpf_prog *old_prog;
int i;
+ rtnl_lock();
for (i = 0; i < vi->max_queue_pairs; i++) {
while (vi->rq[i].pages)
__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
+
+ old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
+ RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
+ if (old_prog)
+ bpf_prog_put(old_prog);
}
+ rtnl_unlock();
}
static void free_receive_page_frags(struct virtnet_info *vi)
@@ -1533,6 +1890,16 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static bool is_xdp_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -1540,8 +1907,12 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
- dev_kfree_skb(buf);
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+ if (!is_xdp_queue(vi, i))
+ dev_kfree_skb(buf);
+ else
+ put_page(virt_to_head_page(buf));
+ }
}
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1762,6 +2133,9 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
return true;
}
+#define MIN_MTU ETH_MIN_MTU
+#define MAX_MTU ETH_MAX_MTU
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err;
@@ -1835,6 +2209,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->vlan_features = dev->features;
+ /* MTU range: 68 - 65535 */
+ dev->min_mtu = MIN_MTU;
+ dev->max_mtu = MAX_MTU;
+
/* Configuration may specify what MAC to use. Otherwise random. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
virtio_cread_bytes(vdev,
@@ -1889,15 +2267,22 @@ static int virtnet_probe(struct virtio_device *vdev)
mtu = virtio_cread16(vdev,
offsetof(struct virtio_net_config,
mtu));
- if (virtnet_change_mtu(dev, mtu))
+ if (mtu < dev->min_mtu) {
__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+ } else {
+ dev->mtu = mtu;
+ dev->max_mtu = mtu;
+ }
}
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
- /* Use single tx/rx queue pair as default */
- vi->curr_queue_pairs = 1;
+ /* Enable multiqueue by default */
+ if (num_online_cpus() >= max_queue_pairs)
+ vi->curr_queue_pairs = max_queue_pairs;
+ else
+ vi->curr_queue_pairs = num_online_cpus();
vi->max_queue_pairs = max_queue_pairs;
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
@@ -1928,6 +2313,10 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
+ rtnl_lock();
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
+ rtnl_unlock();
+
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index ef83ae3b0a44..e34b1297c96a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2972,9 +2972,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
- return -EINVAL;
-
netdev->mtu = new_mtu;
/*
@@ -3431,6 +3428,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
vmxnet3_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
+ /* MTU range: 60 - 9000 */
+ netdev->min_mtu = VMXNET3_MIN_MTU;
+ netdev->max_mtu = VMXNET3_MAX_MTU;
+
INIT_WORK(&adapter->work, vmxnet3_reset_work);
set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7dc37a090549..59e077be8829 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -119,9 +119,8 @@ enum {
};
/*
- * PCI vendor and device IDs.
+ * Maximum devices supported.
*/
-#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
#define MAX_ETHERNET_CARDS 10
#define MAX_PCI_PASSTHRU_DEVICE 6
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 820de6a9ddde..7532646c3b7b 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -272,11 +272,6 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
if (IS_ERR(rt))
goto err;
- if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
- ip_rt_put(rt);
- goto err;
- }
-
skb_dst_drop(skb);
/* if dst.dev is loopback or the VRF device again this is locally
@@ -371,6 +366,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
struct in6_addr *nexthop;
int ret;
+ nf_reset(skb);
+
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -552,6 +549,8 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
u32 nexthop;
int ret = -EINVAL;
+ nf_reset(skb);
+
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2;
@@ -611,6 +610,10 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
struct dst_entry *dst = NULL;
struct rtable *rth;
+ /* don't divert multicast */
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ return skb;
+
rcu_read_lock();
rth = rcu_dereference(vrf->rth);
@@ -850,8 +853,6 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
{
struct net *net = dev_net(dev);
- nf_reset(skb);
-
if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
skb = NULL; /* kfree_skb(skb) handled by nf code */
@@ -999,6 +1000,9 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ goto out;
+
/* loopback traffic; do not push through packet taps again.
* Reset pkt_type for upper layers to process skb
*/
@@ -1162,8 +1166,19 @@ static int vrf_add_fib_rules(const struct net_device *dev)
if (err < 0)
goto ipv6_err;
+#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
+ err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
+ if (err < 0)
+ goto ipmr_err;
+#endif
+
return 0;
+#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
+ipmr_err:
+ vrf_fib_rule(dev, AF_INET6, false);
+#endif
+
ipv6_err:
vrf_fib_rule(dev, AF_INET, false);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2ba01ca02c9c..bb70dd5723b5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -52,7 +52,7 @@ static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
-static int vxlan_net_id;
+static unsigned int vxlan_net_id;
static struct rtnl_link_ops vxlan_link_ops;
static const u8 all_zeros_mac[ETH_ALEN + 2];
@@ -1754,21 +1754,16 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
}
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
- + VXLAN_HLEN + iphdr_len
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
+ + VXLAN_HLEN + iphdr_len;
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
- goto out_free;
-
- skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb))
- return -ENOMEM;
+ return err;
err = iptunnel_handle_offloads(skb, type);
if (err)
- goto out_free;
+ return err;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI;
@@ -1792,19 +1787,16 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
if (vxflags & VXLAN_F_GPE) {
err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
if (err < 0)
- goto out_free;
+ return err;
inner_protocol = skb->protocol;
}
skb_set_inner_protocol(skb, inner_protocol);
return 0;
-
-out_free:
- kfree_skb(skb);
- return err;
}
-static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
+static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
+ struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr,
struct dst_cache *dst_cache,
@@ -1814,6 +1806,9 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
struct rtable *rt = NULL;
struct flowi4 fl4;
+ if (!sock4)
+ return ERR_PTR(-EIO);
+
if (tos && !info)
use_cache = false;
if (use_cache) {
@@ -1831,16 +1826,27 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
fl4.saddr = *saddr;
rt = ip_route_output_key(vxlan->net, &fl4);
- if (!IS_ERR(rt)) {
+ if (likely(!IS_ERR(rt))) {
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to %pI4\n", &daddr);
+ ip_rt_put(rt);
+ return ERR_PTR(-ELOOP);
+ }
+
*saddr = fl4.saddr;
if (use_cache)
dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
+ } else {
+ netdev_dbg(dev, "no route to %pI4\n", &daddr);
+ return ERR_PTR(-ENETUNREACH);
}
return rt;
}
#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
+ struct net_device *dev,
+ struct vxlan_sock *sock6,
struct sk_buff *skb, int oif, u8 tos,
__be32 label,
const struct in6_addr *daddr,
@@ -1848,7 +1854,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
- struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
@@ -1876,8 +1881,16 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
sock6->sock->sk,
&ndst, &fl6);
- if (err < 0)
- return ERR_PTR(err);
+ if (unlikely(err < 0)) {
+ netdev_dbg(dev, "no route to %pI6\n", daddr);
+ return ERR_PTR(-ENETUNREACH);
+ }
+
+ if (unlikely(ndst->dev == dev)) {
+ netdev_dbg(dev, "circular route to %pI6\n", daddr);
+ dst_release(ndst);
+ return ERR_PTR(-ELOOP);
+ }
*saddr = fl6.saddr;
if (use_cache)
@@ -1931,23 +1944,55 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
}
}
+static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
+ struct vxlan_dev *vxlan, union vxlan_addr *daddr,
+ __be32 dst_port, __be32 vni, struct dst_entry *dst,
+ u32 rt_flags)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
+ * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
+ * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
+ */
+ BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
+#endif
+ /* Bypass encapsulation if the destination is local */
+ if (rt_flags & RTCF_LOCAL &&
+ !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+ struct vxlan_dev *dst_vxlan;
+
+ dst_release(dst);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+ daddr->sa.sa_family, dst_port,
+ vxlan->flags);
+ if (!dst_vxlan) {
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+
+ return -ENOENT;
+ }
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ return 1;
+ }
+
+ return 0;
+}
+
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
struct dst_cache *dst_cache;
struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct sock *sk;
- struct rtable *rt = NULL;
- const struct iphdr *old_iph;
+ const struct iphdr *old_iph = ip_hdr(skb);
union vxlan_addr *dst;
union vxlan_addr remote_ip, local_ip;
union vxlan_addr *src;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
+ struct dst_entry *ndst = NULL;
__be32 vni, label;
- __be16 df = 0;
__u8 tos, ttl;
int err;
u32 flags = vxlan->flags;
@@ -1957,19 +2002,40 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
info = skb_tunnel_info(skb);
if (rdst) {
+ dst = &rdst->remote_ip;
+ if (vxlan_addr_any(dst)) {
+ if (did_rsc) {
+ /* short-circuited back to local bridge */
+ vxlan_encap_bypass(skb, vxlan, vxlan);
+ return;
+ }
+ goto drop;
+ }
+
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = rdst->remote_vni;
- dst = &rdst->remote_ip;
src = &vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
+ md->gbp = skb->mark;
+ ttl = vxlan->cfg.ttl;
+ if (!ttl && vxlan_addr_multicast(dst))
+ ttl = 1;
+
+ tos = vxlan->cfg.tos;
+ if (tos == 1)
+ tos = ip_tunnel_get_dsfield(old_iph, skb);
+
+ if (dst->sa.sa_family == AF_INET)
+ udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
+ else
+ udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+ label = vxlan->cfg.label;
} else {
if (!info) {
WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
dev->name);
goto drop;
}
- dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
- vni = tunnel_id_to_key32(info->key.tun_id);
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
if (remote_ip.sa.sa_family == AF_INET) {
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
@@ -1979,182 +2045,111 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
}
dst = &remote_ip;
+ dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+ vni = tunnel_id_to_key32(info->key.tun_id);
src = &local_ip;
dst_cache = &info->dst_cache;
- }
-
- if (vxlan_addr_any(dst)) {
- if (did_rsc) {
- /* short-circuited back to local bridge */
- vxlan_encap_bypass(skb, vxlan, vxlan);
- return;
- }
- goto drop;
- }
-
- old_iph = ip_hdr(skb);
-
- ttl = vxlan->cfg.ttl;
- if (!ttl && vxlan_addr_multicast(dst))
- ttl = 1;
-
- tos = vxlan->cfg.tos;
- if (tos == 1)
- tos = ip_tunnel_get_dsfield(old_iph, skb);
-
- label = vxlan->cfg.label;
- src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
- vxlan->cfg.port_max, true);
-
- if (info) {
+ if (info->options_len)
+ md = ip_tunnel_info_opts(info);
ttl = info->key.ttl;
tos = info->key.tos;
label = info->key.label;
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
-
- if (info->options_len)
- md = ip_tunnel_info_opts(info);
- } else {
- md->gbp = skb->mark;
}
+ src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+ vxlan->cfg.port_max, true);
if (dst->sa.sa_family == AF_INET) {
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+ struct rtable *rt;
+ __be16 df = 0;
- if (!sock4)
- goto drop;
- sk = sock4->sock->sk;
-
- rt = vxlan_get_route(vxlan, skb,
+ rt = vxlan_get_route(vxlan, dev, sock4, skb,
rdst ? rdst->remote_ifindex : 0, tos,
dst->sin.sin_addr.s_addr,
&src->sin.sin_addr.s_addr,
dst_cache, info);
if (IS_ERR(rt)) {
- netdev_dbg(dev, "no route to %pI4\n",
- &dst->sin.sin_addr.s_addr);
- dev->stats.tx_carrier_errors++;
+ err = PTR_ERR(rt);
goto tx_error;
}
- if (rt->dst.dev == dev) {
- netdev_dbg(dev, "circular route to %pI4\n",
- &dst->sin.sin_addr.s_addr);
- dev->stats.collisions++;
- goto rt_tx_error;
- }
-
/* Bypass encapsulation if the destination is local */
- if (!info && rt->rt_flags & RTCF_LOCAL &&
- !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
- struct vxlan_dev *dst_vxlan;
-
- ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni,
- dst->sa.sa_family, dst_port,
- vxlan->flags);
- if (!dst_vxlan)
- goto tx_error;
- vxlan_encap_bypass(skb, vxlan, dst_vxlan);
- return;
- }
-
- if (!info)
- udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
- else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ if (!info) {
+ err = encap_bypass_if_local(skb, dev, vxlan, dst,
+ dst_port, vni, &rt->dst,
+ rt->rt_flags);
+ if (err)
+ return;
+ } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
df = htons(IP_DF);
+ }
+ ndst = &rt->dst;
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
+ err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
if (err < 0)
- goto xmit_tx_error;
+ goto tx_error;
- udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
+ udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, src->sin.sin_addr.s_addr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
- struct dst_entry *ndst;
- u32 rt6i_flags;
- if (!sock6)
- goto drop;
- sk = sock6->sock->sk;
-
- ndst = vxlan6_get_route(vxlan, skb,
+ ndst = vxlan6_get_route(vxlan, dev, sock6, skb,
rdst ? rdst->remote_ifindex : 0, tos,
label, &dst->sin6.sin6_addr,
&src->sin6.sin6_addr,
dst_cache, info);
if (IS_ERR(ndst)) {
- netdev_dbg(dev, "no route to %pI6\n",
- &dst->sin6.sin6_addr);
- dev->stats.tx_carrier_errors++;
+ err = PTR_ERR(ndst);
+ ndst = NULL;
goto tx_error;
}
- if (ndst->dev == dev) {
- netdev_dbg(dev, "circular route to %pI6\n",
- &dst->sin6.sin6_addr);
- dst_release(ndst);
- dev->stats.collisions++;
- goto tx_error;
- }
+ if (!info) {
+ u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
- /* Bypass encapsulation if the destination is local */
- rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
- if (!info && rt6i_flags & RTF_LOCAL &&
- !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
- struct vxlan_dev *dst_vxlan;
-
- dst_release(ndst);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni,
- dst->sa.sa_family, dst_port,
- vxlan->flags);
- if (!dst_vxlan)
- goto tx_error;
- vxlan_encap_bypass(skb, vxlan, dst_vxlan);
- return;
+ err = encap_bypass_if_local(skb, dev, vxlan, dst,
+ dst_port, vni, ndst,
+ rt6i_flags);
+ if (err)
+ return;
}
- if (!info)
- udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
-
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
vni, md, flags, udp_sum);
- if (err < 0) {
- dst_release(ndst);
- dev->stats.tx_errors++;
- return;
- }
- udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
+ if (err < 0)
+ goto tx_error;
+
+ udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&src->sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl,
label, src_port, dst_port, !udp_sum);
#endif
}
-
return;
drop:
dev->stats.tx_dropped++;
- goto tx_free;
+ dev_kfree_skb(skb);
+ return;
-xmit_tx_error:
- /* skb is already freed. */
- skb = NULL;
-rt_tx_error:
- ip_rt_put(rt);
tx_error:
+ if (err == -ELOOP)
+ dev->stats.collisions++;
+ else if (err == -ENETUNREACH)
+ dev->stats.tx_carrier_errors++;
+ dst_release(ndst);
dev->stats.tx_errors++;
-tx_free:
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
/* Transmit local packets over Vxlan
@@ -2394,43 +2389,31 @@ static void vxlan_set_multicast_list(struct net_device *dev)
{
}
-static int __vxlan_change_mtu(struct net_device *dev,
- struct net_device *lowerdev,
- struct vxlan_rdst *dst, int new_mtu, bool strict)
+static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
{
- int max_mtu = IP_MAX_MTU;
-
- if (lowerdev)
- max_mtu = lowerdev->mtu;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+ dst->remote_ifindex);
+ bool use_ipv6 = false;
if (dst->remote_ip.sa.sa_family == AF_INET6)
- max_mtu -= VXLAN6_HEADROOM;
- else
- max_mtu -= VXLAN_HEADROOM;
-
- if (new_mtu < 68)
- return -EINVAL;
+ use_ipv6 = true;
- if (new_mtu > max_mtu) {
- if (strict)
+ /* This check is different than dev->max_mtu, because it looks at
+ * the lowerdev->mtu, rather than the static dev->max_mtu
+ */
+ if (lowerdev) {
+ int max_mtu = lowerdev->mtu -
+ (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ if (new_mtu > max_mtu)
return -EINVAL;
-
- new_mtu = max_mtu;
}
dev->mtu = new_mtu;
return 0;
}
-static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_rdst *dst = &vxlan->default_dst;
- struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
- dst->remote_ifindex);
- return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
-}
-
static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2445,9 +2428,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
- if (!sock4)
- return -EINVAL;
- rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
+ rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
&info->key.u.ipv4.src, NULL, info);
if (IS_ERR(rt))
@@ -2455,9 +2436,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ip_rt_put(rt);
} else {
#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
- ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
+ ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL, info);
if (IS_ERR(ndst))
@@ -2545,10 +2527,8 @@ static void vxlan_setup(struct net_device *dev)
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->vlan_features = dev->features;
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
@@ -2821,6 +2801,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
vxlan_ether_setup(dev);
}
+ /* MTU range: 68 - 65535 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
+
vxlan->net = src_net;
dst->remote_vni = conf->vni;
@@ -2864,7 +2848,8 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
#endif
if (!conf->mtu)
- dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ dev->mtu = lowerdev->mtu -
+ (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
needed_headroom = lowerdev->hard_header_len;
} else if (vxlan_addr_multicast(&dst->remote_ip)) {
@@ -2873,9 +2858,20 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
}
if (conf->mtu) {
- err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
- if (err)
- return err;
+ int max_mtu = ETH_MAX_MTU;
+
+ if (lowerdev)
+ max_mtu = lowerdev->mtu;
+
+ max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+
+ if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu)
+ return -EINVAL;
+
+ dev->mtu = conf->mtu;
+
+ if (conf->mtu > max_mtu)
+ dev->mtu = max_mtu;
}
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 09a50751763b..2371e078afbb 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -302,7 +302,6 @@ static void c101_destroy_card(card_t *card)
static const struct net_device_ops c101_ops = {
.ndo_open = c101_open,
.ndo_stop = c101_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = c101_ioctl,
};
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index b87fe0a01c69..087eb266601f 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -432,7 +432,6 @@ module_exit(cosa_exit);
static const struct net_device_ops cosa_ops = {
.ndo_open = cosa_net_open,
.ndo_stop = cosa_net_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = cosa_net_ioctl,
.ndo_tx_timeout = cosa_net_timeout,
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 629225980463..7351e5440ed7 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -887,7 +887,6 @@ static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
static const struct net_device_ops dscc4_ops = {
.ndo_open = dscc4_open,
.ndo_stop = dscc4_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = dscc4_ioctl,
.ndo_tx_timeout = dscc4_tx_timeout,
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3c9cbf908ec7..03696d35ee9c 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2394,7 +2394,6 @@ fst_init_card(struct fst_card_info *card)
static const struct net_device_ops fst_ops = {
.ndo_open = fst_open,
.ndo_stop = fst_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = fst_ioctl,
.ndo_tx_timeout = fst_tx_timeout,
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 65647533b401..e38ce4da3efb 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -992,7 +992,6 @@ static const struct dev_pm_ops uhdlc_pm_ops = {
static const struct net_device_ops uhdlc_ops = {
.ndo_open = uhdlc_open,
.ndo_stop = uhdlc_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = uhdlc_ioctl,
};
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 9bd4aa8083ce..7221a53b8b14 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -46,14 +46,6 @@ static const char* version = "HDLC support module revision 1.22";
static struct hdlc_proto *first_proto;
-int hdlc_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev)
{
@@ -237,6 +229,8 @@ static void hdlc_setup_dev(struct net_device *dev)
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->priv_flags = IFF_WAN_HDLC;
dev->mtu = HDLC_MAX_MTU;
+ dev->min_mtu = 68;
+ dev->max_mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16;
dev->addr_len = 0;
@@ -353,7 +347,6 @@ MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("HDLC support module");
MODULE_LICENSE("GPL v2");
-EXPORT_SYMBOL(hdlc_change_mtu);
EXPORT_SYMBOL(hdlc_start_xmit);
EXPORT_SYMBOL(hdlc_open);
EXPORT_SYMBOL(hdlc_close);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index b6e0cfb095d3..eb915281197e 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1053,7 +1053,6 @@ static void pvc_setup(struct net_device *dev)
static const struct net_device_ops pvc_ops = {
.ndo_open = pvc_open,
.ndo_stop = pvc_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = pvc_xmit,
.ndo_do_ioctl = pvc_ioctl,
};
@@ -1096,6 +1095,8 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
}
dev->netdev_ops = &pvc_ops;
dev->mtu = HDLC_MAX_MTU;
+ dev->min_mtu = 68;
+ dev->max_mtu = HDLC_MAX_MTU;
dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 3d741663fd67..dd6bb3364ad2 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -180,7 +180,6 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding,
static const struct net_device_ops hostess_ops = {
.ndo_open = hostess_open,
.ndo_stop = hostess_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hostess_ioctl,
};
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index e7bbdb7af53a..6a505c26a3e7 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -1321,7 +1321,6 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static const struct net_device_ops hss_hdlc_ops = {
.ndo_open = hss_hdlc_open,
.ndo_stop = hss_hdlc_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hss_hdlc_ioctl,
};
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 299140c04556..001b7796740d 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -808,7 +808,6 @@ static int lmc_attach(struct net_device *dev, unsigned short encoding,
static const struct net_device_ops lmc_ops = {
.ndo_open = lmc_open,
.ndo_stop = lmc_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = lmc_ioctl,
.ndo_tx_timeout = lmc_driver_timeout,
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 5920c996fcdf..ff2e4a5654c7 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
lmc_media_t lmc_ds3_media = {
- lmc_ds3_init, /* special media init stuff */
- lmc_ds3_default, /* reset to default state */
- lmc_ds3_set_status, /* reset status to state provided */
- lmc_dummy_set_1, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_ds3_set_100ft, /* set cable length */
- lmc_ds3_set_scram, /* set scrambler */
- lmc_ds3_get_link_status, /* get link status */
- lmc_dummy_set_1, /* set link status */
- lmc_ds3_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_ds3_watchdog
+ .init = lmc_ds3_init, /* special media init stuff */
+ .defaults = lmc_ds3_default, /* reset to default state */
+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
+ .set_link_status = lmc_dummy_set_1, /* set link status */
+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_ds3_watchdog
};
lmc_media_t lmc_hssi_media = {
- lmc_hssi_init, /* special media init stuff */
- lmc_hssi_default, /* reset to default state */
- lmc_hssi_set_status, /* reset status to state provided */
- lmc_hssi_set_clock, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_hssi_get_link_status, /* get link status */
- lmc_hssi_set_link_status, /* set link status */
- lmc_hssi_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_hssi_watchdog
+ .init = lmc_hssi_init, /* special media init stuff */
+ .defaults = lmc_hssi_default, /* reset to default state */
+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_hssi_watchdog
};
-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
- lmc_ssi_default, /* reset to default state */
- lmc_ssi_set_status, /* reset status to state provided */
- lmc_ssi_set_clock, /* set clock source */
- lmc_ssi_set_speed, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_ssi_get_link_status, /* get link status */
- lmc_ssi_set_link_status, /* set link status */
- lmc_ssi_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_ssi_watchdog
+lmc_media_t lmc_ssi_media = {
+ .init = lmc_ssi_init, /* special media init stuff */
+ .defaults = lmc_ssi_default, /* reset to default state */
+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
+ .set_speed = lmc_ssi_set_speed, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_ssi_watchdog
};
lmc_media_t lmc_t1_media = {
- lmc_t1_init, /* special media init stuff */
- lmc_t1_default, /* reset to default state */
- lmc_t1_set_status, /* reset status to state provided */
- lmc_t1_set_clock, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_t1_get_link_status, /* get link status */
- lmc_dummy_set_1, /* set link status */
- lmc_t1_set_crc_length, /* set CRC length */
- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
- lmc_t1_watchdog
+ .init = lmc_t1_init, /* special media init stuff */
+ .defaults = lmc_t1_default, /* reset to default state */
+ .set_status = lmc_t1_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_t1_get_link_status, /* get link status */
+ .set_link_status = lmc_dummy_set_1, /* set link status */
+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
+ .watchdog = lmc_t1_watchdog
};
static void
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 315bf09d6a20..c8f4517db3a0 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -330,7 +330,6 @@ static void n2_destroy_card(card_t *card)
static const struct net_device_ops n2_ops = {
.ndo_open = n2_open,
.ndo_stop = n2_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = n2_ioctl,
};
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index db363856e0b5..e1dd1ec18d64 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -291,7 +291,6 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
static const struct net_device_ops pc300_ops = {
.ndo_open = pc300_open,
.ndo_stop = pc300_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = pc300_ioctl,
};
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index e8455621390e..4e437c599e9a 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -270,7 +270,6 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
static const struct net_device_ops pci200_ops = {
.ndo_open = pci200_open,
.ndo_stop = pci200_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = pci200_ioctl,
};
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 3a421ca8a4d0..3f83be98d469 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -211,7 +211,6 @@ static const struct net_device_ops sbni_netdev_ops = {
.ndo_start_xmit = sbni_start_xmit,
.ndo_set_rx_mode = set_multicast_list,
.ndo_do_ioctl = sbni_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 27860b4f5908..fbb5aa2c4d8f 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -174,7 +174,6 @@ static int sealevel_attach(struct net_device *dev, unsigned short encoding,
static const struct net_device_ops sealevel_ops = {
.ndo_open = sealevel_open,
.ndo_stop = sealevel_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = sealevel_ioctl,
};
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index a20d688d2595..0c7317520ed3 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -551,7 +551,6 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
static const struct net_device_ops wanxl_ops = {
.ndo_open = wanxl_open,
.ndo_stop = wanxl_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = wanxl_ioctl,
.ndo_get_stats = wanxl_get_stats,
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 1bc5e93d2a34..878b05d06fc7 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -124,9 +124,6 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
unsigned char *xbuff, *rbuff;
int len;
- if (newmtu > 65534)
- return -EINVAL;
-
len = 2 * newmtu;
xbuff = kmalloc(len + 4, GFP_ATOMIC);
rbuff = kmalloc(len + 4, GFP_ATOMIC);
@@ -751,6 +748,8 @@ static void x25_asy_setup(struct net_device *dev)
*/
dev->mtu = SL_MTU;
+ dev->min_mtu = 0;
+ dev->max_mtu = 65534;
dev->netdev_ops = &x25_asy_netdev_ops;
dev->watchdog_timeo = HZ*20;
dev->hard_header_len = 0;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index bb74f4b9a02f..7f64e74d746b 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -395,25 +395,6 @@ drop:
static
-int i2400m_change_mtu(struct net_device *net_dev, int new_mtu)
-{
- int result;
- struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
- struct device *dev = i2400m_dev(i2400m);
-
- if (new_mtu >= I2400M_MAX_MTU) {
- dev_err(dev, "Cannot change MTU to %d (max is %d)\n",
- new_mtu, I2400M_MAX_MTU);
- result = -EINVAL;
- } else {
- net_dev->mtu = new_mtu;
- result = 0;
- }
- return result;
-}
-
-
-static
void i2400m_tx_timeout(struct net_device *net_dev)
{
/*
@@ -590,7 +571,6 @@ static const struct net_device_ops i2400m_netdev_ops = {
.ndo_stop = i2400m_stop,
.ndo_start_xmit = i2400m_hard_start_xmit,
.ndo_tx_timeout = i2400m_tx_timeout,
- .ndo_change_mtu = i2400m_change_mtu,
};
static void i2400m_get_drvinfo(struct net_device *net_dev,
@@ -621,6 +601,8 @@ void i2400m_netdev_setup(struct net_device *net_dev)
d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
ether_setup(net_dev);
net_dev->mtu = I2400M_MAX_MTU;
+ net_dev->min_mtu = 0;
+ net_dev->max_mtu = I2400M_MAX_MTU;
net_dev->tx_queue_len = I2400M_TX_QLEN;
net_dev->features =
NETIF_F_VLAN_CHALLENGED
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 8c8edaf1bba6..8f5a3f4a43f2 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -17,6 +17,19 @@ menuconfig WLAN
if WLAN
+config WIRELESS_WDS
+ bool "mac80211-based legacy WDS support" if EXPERT
+ help
+ This option enables the deprecated WDS support, the newer
+ mac80211-based 4-addr AP/client support supersedes it with
+ a much better feature set (HT, VHT, ...)
+
+ We plan to remove this option and code, so if you find
+ that you have to enable it, please let us know on the
+ linux-wireless@vger.kernel.org mailing list, so we can
+ help you migrate to 4-addr AP/client (or, if it's really
+ necessary, give up on our plan of removing it).
+
source "drivers/net/wireless/admtek/Kconfig"
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/atmel/Kconfig"
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 89f8d5979402..4cdebc7659dd 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -19,6 +19,4 @@ ath-objs := main.o \
ath-$(CONFIG_ATH_DEBUG) += debug.o
ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o
-ccflags-y += -D__CHECK_ENDIAN__
-
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index da7a7c8dafb2..f3f2784f6ebd 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -327,4 +327,10 @@ static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
}
#endif
+extern const char *ath_bus_type_strings[];
+static inline const char *ath_bus_type_to_string(enum ath_bus_type bustype)
+{
+ return ath_bus_type_strings[bustype];
+}
+
#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 21ae8d663e67..749e381edd38 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -198,6 +198,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9984/qca9994 hw1.0",
.patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
.cck_rate_map_rev2 = true,
@@ -223,6 +224,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9888 hw2.0",
.patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
.channel_counters_freq_hz = 150000,
@@ -324,6 +326,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
+ [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -1534,7 +1537,7 @@ static void ath10k_core_restart(struct work_struct *work)
switch (ar->state) {
case ATH10K_STATE_ON:
ar->state = ATH10K_STATE_RESTARTING;
- ath10k_hif_stop(ar);
+ ath10k_halt(ar);
ath10k_scan_finish(ar);
ieee80211_restart_hw(ar->hw);
break;
@@ -1560,6 +1563,15 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_core_set_coverage_class_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ set_coverage_class_work);
+
+ if (ar->hw_params.hw_ops->set_coverage_class)
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1);
+}
+
static int ath10k_core_init_firmware_features(struct ath10k *ar)
{
struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
@@ -1846,7 +1858,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_wmi_detach;
}
- status = ath10k_htt_tx_alloc(&ar->htt);
+ status = ath10k_htt_tx_start(&ar->htt);
if (status) {
ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
goto err_wmi_detach;
@@ -2041,7 +2053,7 @@ void ath10k_core_stop(struct ath10k *ar)
ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
ath10k_hif_stop(ar);
- ath10k_htt_tx_free(&ar->htt);
+ ath10k_htt_tx_stop(&ar->htt);
ath10k_htt_rx_free(&ar->htt);
ath10k_wmi_detach(ar);
}
@@ -2342,6 +2354,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ INIT_WORK(&ar->set_coverage_class_work,
+ ath10k_core_set_coverage_class_work);
init_dummy_netdev(&ar->napi_dev);
@@ -2372,6 +2386,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar);
+ ath10k_htt_tx_destroy(&ar->htt);
ath10k_wmi_free_host_mem(ar);
ath10k_mac_destroy(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 521f1c55c19e..09ff8b8a6441 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -337,6 +337,7 @@ struct ath10k_sta {
u32 nss;
u32 smps;
u16 peer_id;
+ struct rate_info txrate;
struct work_struct update_wk;
@@ -557,13 +558,18 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
- /* Older firmware with HTT delivers incorrect tx status for null func
- * frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
- * Also this workaround results in reporting of incorrect null func
- * status for 10.4. This flag is used to skip the workaround.
+ /* Unused flag and proven to be not working, enable this if you want
+ * to experiment sending NULL func data frames in HTT TX
*/
ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+ /* Firmware allow other BSS mesh broadcast/multicast frames without
+ * creating monitor interface. Appropriate rxfilters are programmed for
+ * mesh vdev by firmware itself. This feature flags will be used for
+ * not creating monitor vdev while configuring mesh node.
+ */
+ ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST = 16,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -695,6 +701,21 @@ struct ath10k_fw_components {
struct ath10k_fw_file fw_file;
};
+struct ath10k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u8 ratecode;
+ u8 flags;
+ u16 peer_id;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u16 duration;
+ u32 reserved1;
+ u32 reserved2;
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -714,6 +735,7 @@ struct ath10k {
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
+ u32 hw_eeprom_rd;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
@@ -907,11 +929,25 @@ struct ath10k {
struct ath10k_thermal thermal;
struct ath10k_wow wow;
+ struct ath10k_per_peer_tx_stats peer_tx_stats;
/* NAPI */
struct net_device napi_dev;
struct napi_struct napi;
+ struct work_struct set_coverage_class_work;
+ /* protected by conf_mutex */
+ struct {
+ /* writing also protected by data_lock */
+ s16 coverage_class;
+
+ u32 reg_phyclk;
+ u32 reg_slottime_conf;
+ u32 reg_slottime_orig;
+ u32 reg_ack_cts_timeout_conf;
+ u32 reg_ack_cts_timeout_orig;
+ } fw_coverage;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index c458fa96a6d4..335512b11ca2 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -94,7 +94,19 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data);
+
+static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
+{
+ return ar->debug.fw_dbglog_mask;
+}
+
+static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
+{
+ return ar->debug.fw_dbglog_level;
+}
+
#else
+
static inline int ath10k_debug_start(struct ath10k *ar)
{
return 0;
@@ -144,6 +156,16 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
return NULL;
}
+static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
+{
+ return 0;
+}
+
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 9955fea0802a..fce6f8137d33 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -77,6 +77,19 @@ void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
}
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 130cd9502021..cd160b16db1e 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -137,6 +137,8 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
[HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] =
HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_STATS] =
+ HTT_T2H_MSG_TYPE_PEER_STATS,
};
int ath10k_htt_connect(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 0d2ed09f202b..44b25cf00553 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -419,6 +419,7 @@ enum htt_10_4_t2h_msg_type {
HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
/* 0x19 to 0x2f are reserved */
HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
+ HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31,
/* keep this last */
HTT_10_4_T2H_NUM_MSGS
};
@@ -453,6 +454,7 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
+ HTT_T2H_MSG_TYPE_PEER_STATS,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -1470,6 +1472,28 @@ struct htt_channel_change {
__le32 phymode;
} __packed;
+struct htt_per_peer_tx_stats_ind {
+ __le32 succ_bytes;
+ __le32 retry_bytes;
+ __le32 failed_bytes;
+ u8 ratecode;
+ u8 flags;
+ __le16 peer_id;
+ __le16 succ_pkts;
+ __le16 retry_pkts;
+ __le16 failed_pkts;
+ __le16 tx_duration;
+ __le32 reserved1;
+ __le32 reserved2;
+} __packed;
+
+struct htt_peer_tx_stats {
+ u8 num_ppdu;
+ u8 ppdu_len;
+ u8 version;
+ u8 payload[0];
+} __packed;
+
union htt_rx_pn_t {
/* WEP: 24-bit PN */
u32 pn24;
@@ -1521,6 +1545,7 @@ struct htt_resp {
struct htt_tx_fetch_confirm tx_fetch_confirm;
struct htt_tx_mode_switch_ind tx_mode_switch_ind;
struct htt_channel_change chan_change;
+ struct htt_peer_tx_stats peer_tx_stats;
};
} __packed;
@@ -1692,6 +1717,8 @@ struct ath10k_htt {
enum htt_tx_mode_switch_mode mode;
enum htt_q_depth_type type;
} tx_q_state;
+
+ bool tx_mem_allocated;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1754,7 +1781,9 @@ int ath10k_htt_connect(struct ath10k_htt *htt);
int ath10k_htt_init(struct ath10k *ar);
int ath10k_htt_setup(struct ath10k_htt *htt);
-int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
+int ath10k_htt_tx_start(struct ath10k_htt *htt);
+void ath10k_htt_tx_stop(struct ath10k_htt *htt);
+void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
void ath10k_htt_tx_free(struct ath10k_htt *htt);
int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 0b4c1562420f..86d082cf4eef 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1463,8 +1463,7 @@ static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
}
static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
- struct sk_buff_head *amsdu,
- bool chained)
+ struct sk_buff_head *amsdu)
{
struct sk_buff *first;
struct htt_rx_desc *rxd;
@@ -1475,9 +1474,6 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
- if (!chained)
- return;
-
/* FIXME: Current unchaining logic can only handle simple case of raw
* msdu chaining. If decapping is other than raw the chaining may be
* more complex and this isn't handled by the current code. Don't even
@@ -1555,7 +1551,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
num_msdus = skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
- ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+
+ /* only for ret = 1 indicates chained msdus */
+ if (ret > 0)
+ ath10k_htt_rx_h_unchain(ar, &amsdu);
+
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
@@ -2194,6 +2194,128 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
+static inline bool is_valid_legacy_rate(u8 rate)
+{
+ static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
+ 18, 24, 36, 48, 54};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
+ if (rate == legacy_rates[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void
+ath10k_update_per_peer_tx_stats(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_per_peer_tx_stats *peer_stats)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ u8 rate = 0, sgi;
+ struct rate_info txrate;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
+ txrate.bw = ATH10K_HW_BW(peer_stats->flags);
+ txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
+ txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
+ sgi = ATH10K_HW_GI(peer_stats->flags);
+
+ if (((txrate.flags == WMI_RATE_PREAMBLE_HT) ||
+ (txrate.flags == WMI_RATE_PREAMBLE_VHT)) && txrate.mcs > 9) {
+ ath10k_warn(ar, "Invalid mcs %hhd peer stats", txrate.mcs);
+ return;
+ }
+
+ if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
+ txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
+ rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
+
+ if (!is_valid_legacy_rate(rate)) {
+ ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
+ rate);
+ return;
+ }
+
+ /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
+ rate *= 10;
+ if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
+ rate = rate - 5;
+ arsta->txrate.legacy = rate * 10;
+ } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ arsta->txrate.mcs = txrate.mcs;
+ } else {
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ arsta->txrate.mcs = txrate.mcs;
+ }
+
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ arsta->txrate.nss = txrate.nss;
+ arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20;
+}
+
+static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+ struct htt_per_peer_tx_stats_ind *tx_stats;
+ struct ieee80211_sta *sta;
+ struct ath10k_peer *peer;
+ int peer_id, i;
+ u8 ppdu_len, num_ppdu;
+
+ num_ppdu = resp->peer_tx_stats.num_ppdu;
+ ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
+
+ if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
+ ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
+ return;
+ }
+
+ tx_stats = (struct htt_per_peer_tx_stats_ind *)
+ (resp->peer_tx_stats.payload);
+ peer_id = __le16_to_cpu(tx_stats->peer_id);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
+ peer_id);
+ goto out;
+ }
+
+ sta = peer->sta;
+ for (i = 0; i < num_ppdu; i++) {
+ tx_stats = (struct htt_per_peer_tx_stats_ind *)
+ (resp->peer_tx_stats.payload + i * ppdu_len);
+
+ p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
+ p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
+ p_tx_stats->failed_bytes =
+ __le32_to_cpu(tx_stats->failed_bytes);
+ p_tx_stats->ratecode = tx_stats->ratecode;
+ p_tx_stats->flags = tx_stats->flags;
+ p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
+ p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
+ p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
+
+ ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -2354,6 +2476,9 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
break;
+ case HTT_T2H_MSG_TYPE_PEER_STATS:
+ ath10k_htt_fetch_peer_stats(ar, skb);
+ break;
case HTT_T2H_MSG_TYPE_EN_STATS:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index ae5b33fe5ba8..27e49db4287a 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -229,6 +229,32 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
idr_remove(&htt->pending_tx, msdu_id);
}
+static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!htt->txbuf.vaddr)
+ return;
+
+ size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
+ htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
{
size_t size;
@@ -256,10 +282,8 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
&htt->frag_desc.paddr,
GFP_KERNEL);
- if (!htt->frag_desc.vaddr) {
- ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ if (!htt->frag_desc.vaddr)
return -ENOMEM;
- }
return 0;
}
@@ -310,25 +334,31 @@ static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
{
- struct ath10k *ar = htt->ar;
- int ret, size;
+ WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+ kfifo_free(&htt->txdone_fifo);
+}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
- htt->max_num_pending_tx);
+static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
+{
+ int ret;
+ size_t size;
- spin_lock_init(&htt->tx_lock);
- idr_init(&htt->pending_tx);
+ size = roundup_pow_of_two(htt->max_num_pending_tx);
+ ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+ return ret;
+}
- size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
- htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
- &htt->txbuf.paddr,
- GFP_KERNEL);
- if (!htt->txbuf.vaddr) {
- ath10k_err(ar, "failed to alloc tx buffer\n");
- ret = -ENOMEM;
- goto free_idr_pending_tx;
+static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
+ return ret;
}
ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
@@ -343,8 +373,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
goto free_frag_desc;
}
- size = roundup_pow_of_two(htt->max_num_pending_tx);
- ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+ ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
if (ret) {
ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
goto free_txq;
@@ -359,10 +388,32 @@ free_frag_desc:
ath10k_htt_tx_free_cont_frag_desc(htt);
free_txbuf:
- size = htt->max_num_pending_tx *
- sizeof(struct ath10k_htt_txbuf);
- dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
- htt->txbuf.paddr);
+ ath10k_htt_tx_free_cont_txbuf(htt);
+
+ return ret;
+}
+
+int ath10k_htt_tx_start(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
+ htt->max_num_pending_tx);
+
+ spin_lock_init(&htt->tx_lock);
+ idr_init(&htt->pending_tx);
+
+ if (htt->tx_mem_allocated)
+ return 0;
+
+ ret = ath10k_htt_tx_alloc_buf(htt);
+ if (ret)
+ goto free_idr_pending_tx;
+
+ htt->tx_mem_allocated = true;
+
+ return 0;
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
@@ -386,24 +437,28 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
return 0;
}
-void ath10k_htt_tx_free(struct ath10k_htt *htt)
+void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
{
- int size;
+ if (!htt->tx_mem_allocated)
+ return;
+
+ ath10k_htt_tx_free_cont_txbuf(htt);
+ ath10k_htt_tx_free_txq(htt);
+ ath10k_htt_tx_free_cont_frag_desc(htt);
+ ath10k_htt_tx_free_txdone_fifo(htt);
+ htt->tx_mem_allocated = false;
+}
+void ath10k_htt_tx_stop(struct ath10k_htt *htt)
+{
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
+}
- if (htt->txbuf.vaddr) {
- size = htt->max_num_pending_tx *
- sizeof(struct ath10k_htt_txbuf);
- dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
- htt->txbuf.paddr);
- }
-
- ath10k_htt_tx_free_txq(htt);
- ath10k_htt_tx_free_cont_frag_desc(htt);
- WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
- kfifo_free(&htt->txdone_fifo);
+void ath10k_htt_tx_free(struct ath10k_htt *htt)
+{
+ ath10k_htt_tx_stop(htt);
+ ath10k_htt_tx_destroy(htt);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 675e75d66db2..33fb26833cd0 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -17,11 +17,14 @@
#include <linux/types.h>
#include "core.h"
#include "hw.h"
+#include "hif.h"
+#include "wmi-ops.h"
const struct ath10k_hw_regs qca988x_regs = {
.rtc_soc_base_address = 0x00004000,
.rtc_wmac_base_address = 0x00005000,
.soc_core_base_address = 0x00009000,
+ .wlan_mac_base_address = 0x00020000,
.ce_wrapper_base_address = 0x00057000,
.ce0_base_address = 0x00057400,
.ce1_base_address = 0x00057800,
@@ -48,6 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = {
.rtc_soc_base_address = 0x00000800,
.rtc_wmac_base_address = 0x00001000,
.soc_core_base_address = 0x0003a000,
+ .wlan_mac_base_address = 0x00020000,
.ce_wrapper_base_address = 0x00034000,
.ce0_base_address = 0x00034400,
.ce1_base_address = 0x00034800,
@@ -74,6 +78,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
.rtc_soc_base_address = 0x00080000,
.rtc_wmac_base_address = 0x00000000,
.soc_core_base_address = 0x00082000,
+ .wlan_mac_base_address = 0x00030000,
.ce_wrapper_base_address = 0x0004d000,
.ce0_base_address = 0x0004a000,
.ce1_base_address = 0x0004a400,
@@ -109,6 +114,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
const struct ath10k_hw_regs qca4019_regs = {
.rtc_soc_base_address = 0x00080000,
.soc_core_base_address = 0x00082000,
+ .wlan_mac_base_address = 0x00030000,
.ce_wrapper_base_address = 0x0004d000,
.ce0_base_address = 0x0004a000,
.ce1_base_address = 0x0004a400,
@@ -220,7 +226,143 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
}
+/* The firmware does not support setting the coverage class. Instead this
+ * function monitors and modifies the corresponding MAC registers.
+ */
+static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
+ s16 value)
+{
+ u32 slottime_reg;
+ u32 slottime;
+ u32 timeout_reg;
+ u32 ack_timeout;
+ u32 cts_timeout;
+ u32 phyclk_reg;
+ u32 phyclk;
+ u64 fw_dbglog_mask;
+ u32 fw_dbglog_level;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Only modify registers if the core is started. */
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED))
+ goto unlock;
+
+ /* Retrieve the current values of the two registers that need to be
+ * adjusted.
+ */
+ slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PCU_GBL_IFS_SLOT);
+ timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PCU_ACK_CTS_TIMEOUT);
+ phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PHYCLK);
+ phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1;
+
+ if (value < 0)
+ value = ar->fw_coverage.coverage_class;
+
+ /* Break out if the coverage class and registers have the expected
+ * value.
+ */
+ if (value == ar->fw_coverage.coverage_class &&
+ slottime_reg == ar->fw_coverage.reg_slottime_conf &&
+ timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf &&
+ phyclk_reg == ar->fw_coverage.reg_phyclk)
+ goto unlock;
+
+ /* Store new initial register values from the firmware. */
+ if (slottime_reg != ar->fw_coverage.reg_slottime_conf)
+ ar->fw_coverage.reg_slottime_orig = slottime_reg;
+ if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf)
+ ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg;
+ ar->fw_coverage.reg_phyclk = phyclk_reg;
+
+ /* Calculat new value based on the (original) firmware calculation. */
+ slottime_reg = ar->fw_coverage.reg_slottime_orig;
+ timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig;
+
+ /* Do some sanity checks on the slottime register. */
+ if (slottime_reg % phyclk) {
+ ath10k_warn(ar,
+ "failed to set coverage class: expected integer microsecond value in register\n");
+
+ goto store_regs;
+ }
+
+ slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime = slottime / phyclk;
+ if (slottime != 9 && slottime != 20) {
+ ath10k_warn(ar,
+ "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n",
+ slottime);
+
+ goto store_regs;
+ }
+
+ /* Recalculate the register values by adding the additional propagation
+ * delay (3us per coverage class).
+ */
+
+ slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime += value * 3 * phyclk;
+ slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX);
+ slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime;
+
+ /* Update ack timeout (lower halfword). */
+ ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
+ ack_timeout += 3 * value * phyclk;
+ ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
+ ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
+
+ /* Update cts timeout (upper halfword). */
+ cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
+ cts_timeout += 3 * value * phyclk;
+ cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
+ cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
+
+ timeout_reg = ack_timeout | cts_timeout;
+
+ ath10k_hif_write32(ar,
+ WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT,
+ slottime_reg);
+ ath10k_hif_write32(ar,
+ WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT,
+ timeout_reg);
+
+ /* Ensure we have a debug level of WARN set for the case that the
+ * coverage class is larger than 0. This is important as we need to
+ * set the registers again if the firmware does an internal reset and
+ * this way we will be notified of the event.
+ */
+ fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar);
+ fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar);
+
+ if (value > 0) {
+ if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN)
+ fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN;
+ fw_dbglog_mask = ~0;
+ }
+
+ ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level);
+
+store_regs:
+ /* After an error we will not retry setting the coverage class. */
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_coverage.coverage_class = value;
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->fw_coverage.reg_slottime_conf = slottime_reg;
+ ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg;
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
const struct ath10k_hw_ops qca988x_ops = {
+ .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
};
static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 6038b7486f1d..883547f3347c 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -230,6 +230,7 @@ struct ath10k_hw_regs {
u32 rtc_soc_base_address;
u32 rtc_wmac_base_address;
u32 soc_core_base_address;
+ u32 wlan_mac_base_address;
u32 ce_wrapper_base_address;
u32 ce0_base_address;
u32 ce1_base_address;
@@ -418,6 +419,7 @@ struct htt_rx_desc;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+ void (*set_coverage_class)(struct ath10k *ar, s16 value);
};
extern const struct ath10k_hw_ops qca988x_ops;
@@ -614,7 +616,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define WLAN_SI_BASE_ADDRESS 0x00010000
#define WLAN_GPIO_BASE_ADDRESS 0x00014000
#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
-#define WLAN_MAC_BASE_ADDRESS 0x00020000
+#define WLAN_MAC_BASE_ADDRESS ar->regs->wlan_mac_base_address
#define EFUSE_BASE_ADDRESS 0x00030000
#define FPGA_REG_BASE_ADDRESS 0x00039000
#define WLAN_UART2_BASE_ADDRESS 0x00054c00
@@ -814,4 +816,28 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+/* Register definitions for first generation ath10k cards. These cards include
+ * a mac thich has a register allocation similar to ath9k and at least some
+ * registers including the ones relevant for modifying the coverage class are
+ * identical to the ath9k definitions.
+ * These registers are usually managed by the ath10k firmware. However by
+ * overriding them it is possible to support coverage class modifications.
+ */
+#define WAVE1_PCU_ACK_CTS_TIMEOUT 0x8014
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_MAX 0x00003FFF
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_MASK 0x00003FFF
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_LSB 0
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_MASK 0x3FFF0000
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_LSB 16
+
+#define WAVE1_PCU_GBL_IFS_SLOT 0x1070
+#define WAVE1_PCU_GBL_IFS_SLOT_MASK 0x0000FFFF
+#define WAVE1_PCU_GBL_IFS_SLOT_MAX 0x0000FFFF
+#define WAVE1_PCU_GBL_IFS_SLOT_LSB 0
+#define WAVE1_PCU_GBL_IFS_SLOT_RESV0 0xFFFF0000
+
+#define WAVE1_PHYCLK 0x801C
+#define WAVE1_PHYCLK_USEC_MASK 0x0000007F
+#define WAVE1_PHYCLK_USEC_LSB 0
+
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 76297d69f1ed..aa545a1dbdc7 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -19,6 +19,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include <linux/acpi.h>
#include "hif.h"
#include "core.h"
@@ -1166,7 +1167,9 @@ static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
return false;
return ar->monitor ||
- ar->filter_flags & FIF_OTHER_BSS ||
+ (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
+ ar->running_fw->fw_file.fw_features) &&
+ (ar->filter_flags & FIF_OTHER_BSS)) ||
test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
}
@@ -3179,7 +3182,8 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
ath10k_mac_vif_tx_unlock(arvif, pause_id);
break;
default:
- ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "received unknown tx pause action %d on vdev %i, ignoring\n",
action, arvif->vdev_id);
break;
}
@@ -3255,8 +3259,6 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
if (ar->htt.target_version_major < 3 &&
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->running_fw->fw_file.fw_features) &&
- !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
ar->running_fw->fw_file.fw_features))
return ATH10K_HW_TXRX_MGMT;
@@ -4449,7 +4451,6 @@ static int ath10k_start(struct ieee80211_hw *hw)
ar->state = ATH10K_STATE_ON;
break;
case ATH10K_STATE_RESTARTING:
- ath10k_halt(ar);
ar->state = ATH10K_STATE_RESTARTED;
break;
case ATH10K_STATE_ON:
@@ -4929,7 +4930,9 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
/* It makes no sense to have firmware do keepalives. mac80211 already
* takes care of this with idle connection polling.
@@ -5080,7 +5083,9 @@ err_peer_delete:
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
err:
if (arvif->beacon_buf) {
@@ -5126,7 +5131,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
@@ -5410,6 +5417,20 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
+{
+ struct ath10k *ar = hw->priv;
+
+ /* This function should never be called if setting the coverage class
+ * is not supported on this hardware.
+ */
+ if (!ar->hw_params.hw_ops->set_coverage_class) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+ ar->hw_params.hw_ops->set_coverage_class(ar, value);
+}
+
static int ath10k_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
@@ -6956,40 +6977,28 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &arsta->update_wk);
}
-static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- /*
- * FIXME: Return 0 for time being. Need to figure out whether FW
- * has the API to fetch 64-bit local TSF
- */
-
- return 0;
-}
-
-static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u64 tsf)
+static void ath10k_offset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, s64 tsf_offset)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
+ u32 offset, vdev_param;
int ret;
- /* Workaround:
- *
- * Given tsf argument is entire TSF value, but firmware accepts
- * only TSF offset to current TSF.
- *
- * get_tsf function is used to get offset value, however since
- * ath10k_get_tsf is not implemented properly, it will return 0 always.
- * Luckily all the caller functions to set_tsf, as of now, also rely on
- * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
- * final tsf offset value to firmware will be arithmetically correct.
- */
- tsf_offset = tsf - ath10k_get_tsf(hw, vif);
+ if (tsf_offset < 0) {
+ vdev_param = ar->wmi.vdev_param->dec_tsf;
+ offset = -tsf_offset;
+ } else {
+ vdev_param = ar->wmi.vdev_param->inc_tsf;
+ offset = tsf_offset;
+ }
+
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- vdev_param, tsf_offset);
+ vdev_param, offset);
+
if (ret && ret != -EOPNOTSUPP)
- ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
+ ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
+ offset, vdev_param, ret);
}
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
@@ -7435,6 +7444,7 @@ static const struct ieee80211_ops ath10k_ops = {
.remove_interface = ath10k_remove_interface,
.configure_filter = ath10k_configure_filter,
.bss_info_changed = ath10k_bss_info_changed,
+ .set_coverage_class = ath10k_mac_op_set_coverage_class,
.hw_scan = ath10k_hw_scan,
.cancel_hw_scan = ath10k_cancel_hw_scan,
.set_key = ath10k_set_key,
@@ -7453,8 +7463,7 @@ static const struct ieee80211_ops ath10k_ops = {
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
- .get_tsf = ath10k_get_tsf,
- .set_tsf = ath10k_set_tsf,
+ .offset_tsf = ath10k_offset_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
@@ -7789,6 +7798,109 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
return arvif_iter.arvif;
}
+#define WRD_METHOD "WRDD"
+#define WRDD_WIFI (0x07)
+
+static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
+{
+ union acpi_object *mcc_pkg;
+ union acpi_object *domain_type;
+ union acpi_object *mcc_value;
+ u32 i;
+
+ if (wrdd->type != ACPI_TYPE_PACKAGE ||
+ wrdd->package.count < 2 ||
+ wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrdd->package.elements[0].integer.value != 0) {
+ ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
+ return 0;
+ }
+
+ for (i = 1; i < wrdd->package.count; ++i) {
+ mcc_pkg = &wrdd->package.elements[i];
+
+ if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
+ continue;
+ if (mcc_pkg->package.count < 2)
+ continue;
+ if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ continue;
+
+ domain_type = &mcc_pkg->package.elements[0];
+ if (domain_type->integer.value != WRDD_WIFI)
+ continue;
+
+ mcc_value = &mcc_pkg->package.elements[1];
+ return mcc_value->integer.value;
+ }
+ return 0;
+}
+
+static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
+{
+ struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ u32 alpha2_code;
+ char alpha2[3];
+
+ root_handle = ACPI_HANDLE(&pdev->dev);
+ if (!root_handle)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to get wrd method %d\n", status);
+ return -EIO;
+ }
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+ if (ACPI_FAILURE(status)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to call wrdc %d\n", status);
+ return -EIO;
+ }
+
+ alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
+ kfree(wrdd.pointer);
+ if (!alpha2_code)
+ return -EIO;
+
+ alpha2[0] = (alpha2_code >> 8) & 0xff;
+ alpha2[1] = (alpha2_code >> 0) & 0xff;
+ alpha2[2] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
+
+ *rd = ath_regd_find_country_by_name(alpha2);
+ if (*rd == 0xffff)
+ return -EIO;
+
+ *rd |= COUNTRY_ERD_FLAG;
+ return 0;
+}
+
+static int ath10k_mac_init_rd(struct ath10k *ar)
+{
+ int ret;
+ u16 rd;
+
+ ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "fallback to eeprom programmed regulatory settings\n");
+ rd = ar->hw_eeprom_rd;
+ }
+
+ ar->ath_common.regulatory.current_rd = rd;
+ return 0;
+}
+
int ath10k_mac_register(struct ath10k *ar)
{
static const u32 cipher_suites[] = {
@@ -7881,6 +7993,8 @@ int ath10k_mac_register(struct ath10k *ar)
ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
@@ -8012,6 +8126,16 @@ int ath10k_mac_register(struct ath10k *ar)
ar->running_fw->fw_file.fw_features))
ar->ops->wake_tx_queue = NULL;
+ ret = ath10k_mac_init_rd(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to derive regdom: %d\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ /* Disable set_coverage_class for chipsets that do not support it. */
+ if (!ar->hw_params.hw_ops->set_coverage_class)
+ ar->ops->set_coverage_class = NULL;
+
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e315d336..b541a1c74488 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2091,7 +2091,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
if (ret != 0) {
- ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
+ ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 7d9b0da1b010..2ffc1fe4923b 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -338,7 +338,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
} else {
res = -EINVAL;
}
- } else if (strncmp("background", buf, 9) == 0) {
+ } else if (strncmp("background", buf, 10) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
} else if (strncmp("manual", buf, 6) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c9a8bb1186f2..c7956e181f80 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -660,6 +660,9 @@ ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
struct sk_buff *skb;
u32 cmd_id;
+ if (!ar->wmi.ops->gen_vdev_spectral_conf)
+ return -EOPNOTSUPP;
+
skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -675,6 +678,9 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
struct sk_buff *skb;
u32 cmd_id;
+ if (!ar->wmi.ops->gen_vdev_spectral_enable)
+ return -EOPNOTSUPP;
+
skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
enable);
if (IS_ERR(skb))
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index e64f59300a7c..f304f6632c4f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1313,8 +1313,8 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
cmd->regd = __cpu_to_le32(rd);
cmd->regd_2ghz = __cpu_to_le32(rd2g);
cmd->regd_5ghz = __cpu_to_le32(rd5g);
- cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
- cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
+ cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
+ cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
return skb;
@@ -3136,6 +3136,76 @@ ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = __cpu_to_le32(arg->scan_count);
+ cmd->scan_period = __cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = __cpu_to_le32(trigger);
+ cmd->enable_cmd = __cpu_to_le32(enable);
+
+ return skb;
+}
+
/****************/
/* TLV mappings */
/****************/
@@ -3464,7 +3534,6 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
- .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {
@@ -3542,6 +3611,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
+ .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 54df425bb0fc..50d6ee6afe26 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -785,7 +785,6 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
- .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -861,7 +860,6 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
- .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -936,7 +934,6 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
- .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1012,7 +1009,8 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
- .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
+ .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
+ .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -4489,7 +4487,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
if (!num_units)
return -ENOMEM;
- paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_TO_DEVICE);
+ paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(ar->dev, paddr)) {
kfree(vaddr);
return -ENOMEM;
@@ -4676,7 +4674,7 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
ar->phy_capability = __le32_to_cpu(arg.phy_capab);
ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
- ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
+ ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
@@ -4931,6 +4929,23 @@ exit:
return 0;
}
+static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
+{
+ if (ar->hw_params.hw_ops->set_coverage_class) {
+ spin_lock_bh(&ar->data_lock);
+
+ /* This call only ensures that the modified coverage class
+ * persists in case the firmware sets the registers back to
+ * their default value. So calling it is only necessary if the
+ * coverage class has a non-zero value.
+ */
+ if (ar->fw_coverage.coverage_class)
+ queue_work(ar->workqueue, &ar->set_coverage_class_work);
+
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -4951,6 +4966,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
return;
case WMI_SCAN_EVENTID:
ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_CHAN_INFO_EVENTID:
ath10k_wmi_event_chan_info(ar, skb);
@@ -4960,15 +4976,18 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_DEBUG_MESG_EVENTID:
ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
case WMI_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_VDEV_STOPPED_EVENTID:
ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
@@ -4984,12 +5003,14 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_ROAM_EVENTID:
ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_PROFILE_MATCH:
ath10k_wmi_event_profile_match(ar, skb);
break;
case WMI_DEBUG_PRINT_EVENTID:
ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_PDEV_QVIT_EVENTID:
ath10k_wmi_event_pdev_qvit(ar, skb);
@@ -5038,6 +5059,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
return;
case WMI_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
@@ -5081,6 +5103,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
return;
case WMI_10X_SCAN_EVENTID:
ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10X_CHAN_INFO_EVENTID:
ath10k_wmi_event_chan_info(ar, skb);
@@ -5090,15 +5113,18 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10X_DEBUG_MESG_EVENTID:
ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10X_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
case WMI_10X_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10X_VDEV_STOPPED_EVENTID:
ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10X_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
@@ -5114,12 +5140,14 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10X_ROAM_EVENTID:
ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10X_PROFILE_MATCH:
ath10k_wmi_event_profile_match(ar, skb);
break;
case WMI_10X_DEBUG_PRINT_EVENTID:
ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10X_PDEV_QVIT_EVENTID:
ath10k_wmi_event_pdev_qvit(ar, skb);
@@ -5159,6 +5187,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
return;
case WMI_10X_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10X_PDEV_UTF_EVENTID:
/* ignore utf events */
@@ -5205,6 +5234,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
return;
case WMI_10_2_SCAN_EVENTID:
ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_CHAN_INFO_EVENTID:
ath10k_wmi_event_chan_info(ar, skb);
@@ -5214,15 +5244,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_2_DEBUG_MESG_EVENTID:
ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
case WMI_10_2_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_VDEV_STOPPED_EVENTID:
ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
@@ -5238,12 +5271,14 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_2_ROAM_EVENTID:
ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_PROFILE_MATCH:
ath10k_wmi_event_profile_match(ar, skb);
break;
case WMI_10_2_DEBUG_PRINT_EVENTID:
ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_PDEV_QVIT_EVENTID:
ath10k_wmi_event_pdev_qvit(ar, skb);
@@ -5274,15 +5309,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
ath10k_wmi_event_vdev_standby_req(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
ath10k_wmi_event_vdev_resume_req(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
return;
case WMI_10_2_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
@@ -5345,12 +5383,14 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_4_DEBUG_MESG_EVENTID:
ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_4_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
return;
case WMI_10_4_SCAN_EVENTID:
ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_4_CHAN_INFO_EVENTID:
ath10k_wmi_event_chan_info(ar, skb);
@@ -5360,12 +5400,14 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_4_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
break;
case WMI_10_4_ROAM_EVENTID:
ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_4_HOST_SWBA_EVENTID:
ath10k_wmi_event_host_swba(ar, skb);
@@ -5375,12 +5417,15 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_4_DEBUG_PRINT_EVENTID:
ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_4_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_4_VDEV_STOPPED_EVENTID:
ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
@@ -5397,6 +5442,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
break;
+ case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -6096,6 +6144,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
| WMI_SCAN_EVENT_COMPLETED
| WMI_SCAN_EVENT_BSS_CHANNEL
| WMI_SCAN_EVENT_FOREIGN_CHANNEL
+ | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
| WMI_SCAN_EVENT_DEQUEUED;
arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->n_bssids = 1;
@@ -8153,6 +8202,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
.gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
};
int ath10k_wmi_attach(struct ath10k *ar)
@@ -8221,7 +8271,7 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
dma_unmap_single(ar->dev,
ar->wmi.mem_chunks[i].paddr,
ar->wmi.mem_chunks[i].len,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
kfree(ar->wmi.mem_chunks[i].vaddr);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 1b243c899bef..5d3dff95b2e5 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -4603,9 +4603,17 @@ enum wmi_rate_preamble {
#define ATH10K_HW_NSS(rate) (1 + (((rate) >> 4) & 0x3))
#define ATH10K_HW_PREAMBLE(rate) (((rate) >> 6) & 0x3)
-#define ATH10K_HW_RATECODE(rate, nss, preamble) \
+#define ATH10K_HW_MCS_RATE(rate) ((rate) & 0xf)
+#define ATH10K_HW_LEGACY_RATE(rate) ((rate) & 0x3f)
+#define ATH10K_HW_BW(flags) (((flags) >> 3) & 0x3)
+#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1)
+#define ATH10K_HW_RATECODE(rate, nss, preamble) \
(((preamble) << 6) | ((nss) << 4) | (rate))
+#define VHT_MCS_NUM 10
+#define VHT_BW_NUM 4
+#define VHT_NSS_NUM 4
+
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@@ -4676,7 +4684,8 @@ struct wmi_vdev_param_map {
u32 meru_vc;
u32 rx_decap_type;
u32 bw_nss_ratemask;
- u32 set_tsf;
+ u32 inc_tsf;
+ u32 dec_tsf;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -5009,6 +5018,11 @@ enum wmi_10_4_vdev_param {
WMI_10_4_VDEV_PARAM_STA_KICKOUT,
WMI_10_4_VDEV_PARAM_CAPABILITIES,
WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
+ WMI_10_4_VDEV_PARAM_RX_FILTER,
+ WMI_10_4_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_10_4_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_10_4_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
};
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 4f8d9ed04f5e..d068df520e7a 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -66,7 +66,6 @@
#include <linux/seq_file.h>
#include <linux/list.h>
-#include <linux/vmalloc.h>
#include "debug.h"
#include "ath5k.h"
#include "reg.h"
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 76eb33679d4b..8ec66e74d06d 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -75,6 +75,8 @@ struct ath6kl_sdio {
#define CMD53_ARG_FIXED_ADDRESS 0
#define CMD53_ARG_INCR_ADDRESS 1
+static int ath6kl_sdio_config(struct ath6kl *ar);
+
static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
{
return ar->hif_priv;
@@ -526,8 +528,15 @@ static int ath6kl_sdio_power_on(struct ath6kl *ar)
*/
msleep(10);
+ ret = ath6kl_sdio_config(ar);
+ if (ret) {
+ ath6kl_err("Failed to config sdio: %d\n", ret);
+ goto out;
+ }
+
ar_sdio->is_disabled = false;
+out:
return ret;
}
@@ -703,8 +712,10 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
* ath6kl_hif_rw_comp_handler() with status -ECANCELED so
* that the packet is properly freed?
*/
- if (s_req->busrequest)
+ if (s_req->busrequest) {
+ s_req->busrequest->scat_req = 0;
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
+ }
kfree(s_req->virt_dma_buf);
kfree(s_req->sgentries);
kfree(s_req);
@@ -712,6 +723,8 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
spin_lock_bh(&ar_sdio->scat_lock);
}
spin_unlock_bh(&ar_sdio->scat_lock);
+
+ ar_sdio->scatter_enabled = false;
}
/* setup of HIF scatter resources */
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 3fd1cc98fd2f..84a6d12c3f8a 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -421,10 +421,6 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
switch ((le16_to_cpu(wh.frame_control)) &
(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
- case 0:
- memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
- memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
- break;
case IEEE80211_FCTL_TODS:
memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN);
memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
@@ -435,6 +431,10 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
break;
case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
break;
+ default:
+ memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
+ break;
}
skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index bea6186f745a..2bd982c3a479 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -62,7 +62,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
return false;
}
-static struct ath_bus_ops ath_ahb_bus_ops = {
+static const struct ath_bus_ops ath_ahb_bus_ops = {
.ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 26fc8ecfe8c4..378d3458fddb 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -91,7 +91,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_RXBUF 512
#define ATH_TXBUF 512
#define ATH_TXBUF_RESERVE 5
-#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
#define ATH_TXMAXTRY 13
#define ATH_MAX_SW_RETRIES 30
@@ -145,7 +144,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
-#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
+#define ATH_AN_2_TID(_an, _tidno) ath_node_to_tid(_an, _tidno)
#define IS_HT_RATE(rate) (rate & 0x80)
#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
@@ -164,7 +163,6 @@ struct ath_txq {
spinlock_t axq_lock;
u32 axq_depth;
u32 axq_ampdu_depth;
- bool stopped;
bool axq_tx_inprogress;
struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
u8 txq_headidx;
@@ -232,7 +230,6 @@ struct ath_buf {
struct ath_atx_tid {
struct list_head list;
- struct sk_buff_head buf_q;
struct sk_buff_head retry_q;
struct ath_node *an;
struct ath_txq *txq;
@@ -247,13 +244,13 @@ struct ath_atx_tid {
s8 bar_index;
bool active;
bool clear_ps_filter;
+ bool has_queued;
};
struct ath_node {
struct ath_softc *sc;
struct ieee80211_sta *sta; /* station struct we're part of */
struct ieee80211_vif *vif; /* interface with which we're associated */
- struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
u16 maxampdu;
u8 mpdudensity;
@@ -276,7 +273,6 @@ struct ath_tx_control {
struct ath_node *an;
struct ieee80211_sta *sta;
u8 paprd;
- bool force_channel;
};
@@ -293,7 +289,6 @@ struct ath_tx {
struct ath_descdma txdma;
struct ath_txq *txq_map[IEEE80211_NUM_ACS];
struct ath_txq *uapsdq;
- u32 txq_max_pending[IEEE80211_NUM_ACS];
u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
};
@@ -421,6 +416,22 @@ struct ath_offchannel {
int duration;
};
+static inline struct ath_atx_tid *
+ath_node_to_tid(struct ath_node *an, u8 tidno)
+{
+ struct ieee80211_sta *sta = an->sta;
+ struct ieee80211_vif *vif = an->vif;
+ struct ieee80211_txq *txq;
+
+ BUG_ON(!vif);
+ if (sta)
+ txq = sta->txq[tidno % ARRAY_SIZE(sta->txq)];
+ else
+ txq = vif->txq;
+
+ return (struct ath_atx_tid *) txq->drv_priv;
+}
+
#define case_rtn_string(val) case val: return #val
#define ath_for_each_chanctx(_sc, _ctx) \
@@ -575,7 +586,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc);
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
-void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -585,6 +595,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
+void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue);
/********/
/* VIFs */
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 57e26a640477..929dd70f48eb 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1010,7 +1010,6 @@ static void ath_scan_send_probe(struct ath_softc *sc,
goto error;
txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
- txctl.force_channel = true;
if (ath_tx_start(sc->hw, skb, &txctl))
goto error;
@@ -1133,7 +1132,6 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
memset(&txctl, 0, sizeof(txctl));
txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
txctl.sta = sta;
- txctl.force_channel = true;
if (ath_tx_start(sc->hw, skb, &txctl)) {
ieee80211_free_txskb(sc->hw, skb);
return false;
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index e2512d5bc0e1..eedf86b67cf5 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -528,6 +528,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
+ if (!spec_priv->rfs_chan_spec_scan)
+ return 1;
+
/* Output buffers are full, no need to process anything
* since there is no space to put the result anyway
*/
@@ -1072,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
+ if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
@@ -1086,6 +1089,9 @@ void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
debugfs_phy,
1024, 256, &rfs_spec_scan_cb,
NULL);
+ if (!spec_priv->rfs_chan_spec_scan)
+ return;
+
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
debugfs_phy, spec_priv,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c56e40ff35e5..89a94dd5f2cb 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -600,7 +600,6 @@ static int read_file_xmit(struct seq_file *file, void *data)
PR("MPDUs XRetried: ", xretries);
PR("Aggregates: ", a_aggr);
PR("AMPDUs Queued HW:", a_queued_hw);
- PR("AMPDUs Queued SW:", a_queued_sw);
PR("AMPDUs Completed:", a_completed);
PR("AMPDUs Retried: ", a_retries);
PR("AMPDUs XRetried: ", a_xretries);
@@ -629,8 +628,7 @@ static void print_queue(struct ath_softc *sc, struct ath_txq *txq,
seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum);
seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth);
seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth);
- seq_printf(file, "%s: %3d ", "pending", txq->pending_frames);
- seq_printf(file, "%s: %d\n", "stopped", txq->stopped);
+ seq_printf(file, "%s: %3d\n", "pending", txq->pending_frames);
ath_txq_unlock(sc, txq);
}
@@ -1208,7 +1206,6 @@ static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
AMKSTR(d_tx_mpdu_xretries),
AMKSTR(d_tx_aggregates),
AMKSTR(d_tx_ampdus_queued_hw),
- AMKSTR(d_tx_ampdus_queued_sw),
AMKSTR(d_tx_ampdus_completed),
AMKSTR(d_tx_ampdu_retries),
AMKSTR(d_tx_ampdu_xretries),
@@ -1288,7 +1285,6 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
AWDATA(xretries);
AWDATA(a_aggr);
AWDATA(a_queued_hw);
- AWDATA(a_queued_sw);
AWDATA(a_completed);
AWDATA(a_retries);
AWDATA(a_xretries);
@@ -1346,14 +1342,6 @@ int ath9k_init_debug(struct ath_hw *ah)
read_file_xmit);
debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
read_file_queues);
- debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
- debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
- debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
- debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
read_file_misc);
debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index cd68c5f0e751..a078cdd3170d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -147,7 +147,6 @@ struct ath_interrupt_stats {
* @completed: Total MPDUs (non-aggr) completed
* @a_aggr: Total no. of aggregates queued
* @a_queued_hw: Total AMPDUs queued to hardware
- * @a_queued_sw: Total AMPDUs queued to software queues
* @a_completed: Total AMPDUs completed
* @a_retries: No. of AMPDUs retried (SW)
* @a_xretries: No. of AMPDUs dropped due to xretries
@@ -174,7 +173,6 @@ struct ath_tx_stats {
u32 xretries;
u32 a_aggr;
u32 a_queued_hw;
- u32 a_queued_sw;
u32 a_completed;
u32 a_retries;
u32 a_xretries;
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index b66cfa91364f..2a3a3c4671bc 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -52,8 +52,8 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
"TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
"BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
- for (tidno = 0, tid = &an->tid[tidno];
- tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+ for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
+ tid = ath_node_to_tid(an, tidno);
txq = tid->txq;
ath_txq_lock(sc, txq);
if (tid->active) {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index e1c338cb9cb5..de2d212f39ec 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -997,7 +997,8 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
err = usb_control_msg(hif_dev->udev,
usb_sndctrlpipe(hif_dev->udev, 0),
FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
- addr >> 8, 0, buf, transfer, HZ);
+ addr >> 8, 0, buf, transfer,
+ USB_MSG_TIMEOUT);
if (err < 0) {
kfree(buf);
return err;
@@ -1020,7 +1021,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
FIRMWARE_DOWNLOAD_COMP,
0x40 | USB_DIR_OUT,
- firm_offset >> 8, 0, NULL, 0, HZ);
+ firm_offset >> 8, 0, NULL, 0, USB_MSG_TIMEOUT);
if (err)
return -EIO;
@@ -1249,7 +1250,7 @@ static int send_eject_command(struct usb_interface *interface)
dev_info(&udev->dev, "Ejecting storage device...\n");
r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
- cmd, 31, NULL, 2000);
+ cmd, 31, NULL, 2 * USB_MSG_TIMEOUT);
kfree(cmd);
if (r)
return r;
@@ -1314,7 +1315,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
return;
ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
- buf, 4, NULL, HZ);
+ buf, 4, NULL, USB_MSG_TIMEOUT);
if (ret)
dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 7c2ef7ecd98b..7846916aa01d 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -71,6 +71,8 @@ extern int htc_use_dev_fw;
#define USB_REG_IN_PIPE 3
#define USB_REG_OUT_PIPE 4
+#define USB_MSG_TIMEOUT 1000 /* (ms) */
+
#define HIF_USB_MAX_RXPIPES 2
#define HIF_USB_MAX_TXPIPES 4
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index fd85f996c554..8e6dae23669b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -244,8 +244,8 @@ int htc_connect_service(struct htc_target *target,
/* Find an available endpoint */
endpoint = get_next_avail_ep(target->endpoint);
if (!endpoint) {
- dev_err(target->dev, "Endpoint is not available for"
- "service %d\n", service_connreq->service_id);
+ dev_err(target->dev, "Endpoint is not available for service %d\n",
+ service_connreq->service_id);
return -EINVAL;
}
@@ -382,7 +382,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
break;
}
default:
- dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
+ dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
break;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 14b13f07cd1f..a35f78be8dec 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2792,7 +2792,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
WARN_ON(1);
}
- return val;
+ return !!val;
}
EXPORT_SYMBOL(ath9k_hw_gpio_get);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index cfa3fe82ade3..20794660d6ae 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -20,6 +20,8 @@
#include <linux/slab.h>
#include <linux/ath9k_platform.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
#include <linux/relay.h>
#include <net/ieee80211_radiotap.h>
@@ -358,7 +360,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
sc->tx.txq_map[i]->mac80211_qnum = i;
- sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
}
return 0;
}
@@ -555,6 +556,42 @@ static int ath9k_init_platform(struct ath_softc *sc)
return 0;
}
+static int ath9k_of_init(struct ath_softc *sc)
+{
+ struct device_node *np = sc->dev->of_node;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ enum ath_bus_type bus_type = common->bus_ops->ath_bus_type;
+ const char *mac;
+ char eeprom_name[100];
+ int ret;
+
+ if (!of_device_is_available(np))
+ return 0;
+
+ ath_dbg(common, CONFIG, "parsing configuration from OF node\n");
+
+ if (of_property_read_bool(np, "qca,no-eeprom")) {
+ /* ath9k-eeprom-<bus>-<id>.bin */
+ scnprintf(eeprom_name, sizeof(eeprom_name),
+ "ath9k-eeprom-%s-%s.bin",
+ ath_bus_type_to_string(bus_type), dev_name(ah->dev));
+
+ ret = ath9k_eeprom_request(sc, eeprom_name);
+ if (ret)
+ return ret;
+ }
+
+ mac = of_get_mac_address(np);
+ if (mac)
+ ether_addr_copy(common->macaddr, mac);
+
+ ah->ah_flags &= ~AH_USE_EEPROM;
+ ah->ah_flags |= AH_NO_EEP_SWAP;
+
+ return 0;
+}
+
static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops)
{
@@ -611,6 +648,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
return ret;
+ ret = ath9k_of_init(sc);
+ if (ret)
+ return ret;
+
if (ath9k_led_active_high != -1)
ah->config.led_active_high = ath9k_led_active_high == 1;
@@ -734,9 +775,11 @@ static const struct ieee80211_iface_limit if_limits[] = {
BIT(NL80211_IFTYPE_P2P_GO) },
};
+#ifdef CONFIG_WIRELESS_WDS
static const struct ieee80211_iface_limit wds_limits[] = {
{ .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
};
+#endif
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
@@ -774,6 +817,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_40),
#endif
},
+#ifdef CONFIG_WIRELESS_WDS
{
.limits = wds_limits,
.n_limits = ARRAY_SIZE(wds_limits),
@@ -781,6 +825,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
.num_different_channels = 1,
.beacon_int_infra_match = true,
},
+#endif
};
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
@@ -851,7 +896,9 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT) |
+#ifdef CONFIG_WIRELESS_WDS
BIT(NL80211_IFTYPE_WDS) |
+#endif
BIT(NL80211_IFTYPE_OCB);
if (ath9k_is_chanctx_enabled())
@@ -877,6 +924,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->max_rate_tries = 10;
hw->sta_data_size = sizeof(struct ath_node);
hw->vif_data_size = sizeof(struct ath_vif);
+ hw->txq_data_size = sizeof(struct ath_atx_tid);
hw->extra_tx_headroom = 4;
hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e9f32b52fc8c..59e3bd0f4c20 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1902,9 +1902,11 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
bool flush = false;
int ret = 0;
struct ieee80211_sta *sta = params->sta;
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
+ struct ath_atx_tid *atid;
mutex_lock(&sc->mutex);
@@ -1937,9 +1939,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- ath9k_ps_wakeup(sc);
- ath_tx_aggr_resume(sc, sta, tid);
- ath9k_ps_restore(sc);
+ atid = ath_node_to_tid(an, tid);
+ atid->baw_size = IEEE80211_MIN_AMPDU_BUF <<
+ sta->ht_cap.ampdu_factor;
break;
default:
ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
@@ -2701,4 +2703,5 @@ struct ieee80211_ops ath9k_ops = {
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.get_txpower = ath9k_get_txpower,
+ .wake_tx_queue = ath9k_wake_tx_queue,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 0dd454acf22a..aff473dfa10d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -26,7 +26,6 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
- { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
#ifdef CONFIG_ATH9K_PCOEM
/* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
@@ -37,7 +36,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
.driver_data = ATH9K_PCI_LED_ACT_HI },
#endif
- { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
#ifdef CONFIG_ATH9K_PCOEM
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -85,7 +84,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
0x10CF, /* Fujitsu */
0x1536),
.driver_data = ATH9K_PCI_D3_L1_WAR },
+#endif
+ { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+
+#ifdef CONFIG_ATH9K_PCOEM
/* AR9285 card for Asus */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002B,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 669734252664..fb4ba27d92b7 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -867,10 +867,21 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* can be dropped.
*/
if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
- ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
- if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime))
+ /*
+ * DFS and spectral are mutually exclusive
+ *
+ * Since some chips use PHYERR_RADAR as indication for both, we
+ * need to double check which feature is enabled to prevent
+ * feeding spectral or dfs-detector with wrong frames.
+ */
+ if (hw->conf.radar_enabled) {
+ ath9k_dfs_process_phyerr(sc, hdr, rx_stats,
+ rx_status->mactime);
+ } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
+ ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
+ rx_status->mactime)) {
RX_STAT_INC(rx_spectral);
-
+ }
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index d38e50f96db7..568b1c6c2b2b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -22,7 +22,7 @@
#include "ar9003_phy.h"
#define ATH9K_RNG_BUF_SIZE 320
-#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 320) >> 10) /* quality: 320/1024 */
+#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 10) >> 5) /* quality: 10/32 */
static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
{
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 52bfbb988611..4e2f3ac266c3 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -67,6 +67,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
struct sk_buff *skb);
+static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
enum {
MCS_HT20,
@@ -137,6 +139,26 @@ static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
list_add_tail(&tid->list, list);
}
+void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_atx_tid *tid = (struct ath_atx_tid *) queue->drv_priv;
+ struct ath_txq *txq = tid->txq;
+
+ ath_dbg(common, QUEUE, "Waking TX queue: %pM (%d)\n",
+ queue->sta ? queue->sta->addr : queue->vif->addr,
+ tid->tidno);
+
+ ath_txq_lock(sc, txq);
+
+ tid->has_queued = true;
+ ath_tx_queue_tid(sc, txq, tid);
+ ath_txq_schedule(sc, txq);
+
+ ath_txq_unlock(sc, txq);
+}
+
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -164,7 +186,6 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath_frame_info *fi = get_frame_info(skb);
int q = fi->txq;
@@ -175,14 +196,6 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
- if (txq->stopped &&
- txq->pending_frames < sc->tx.txq_max_pending[q]) {
- if (ath9k_is_chanctx_enabled())
- ieee80211_wake_queue(sc->hw, info->hw_queue);
- else
- ieee80211_wake_queue(sc->hw, q);
- txq->stopped = false;
- }
}
static struct ath_atx_tid *
@@ -192,9 +205,48 @@ ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
return ATH_AN_2_TID(an, tidno);
}
+static struct sk_buff *
+ath_tid_pull(struct ath_atx_tid *tid)
+{
+ struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv);
+ struct ath_softc *sc = tid->an->sc;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_tx_control txctl = {
+ .txq = tid->txq,
+ .sta = tid->an->sta,
+ };
+ struct sk_buff *skb;
+ struct ath_frame_info *fi;
+ int q;
+
+ if (!tid->has_queued)
+ return NULL;
+
+ skb = ieee80211_tx_dequeue(hw, txq);
+ if (!skb) {
+ tid->has_queued = false;
+ return NULL;
+ }
+
+ if (ath_tx_prepare(hw, skb, &txctl)) {
+ ieee80211_free_txskb(hw, skb);
+ return NULL;
+ }
+
+ q = skb_get_queue_mapping(skb);
+ if (tid->txq == sc->tx.txq_map[q]) {
+ fi = get_frame_info(skb);
+ fi->txq = q;
+ ++tid->txq->pending_frames;
+ }
+
+ return skb;
+ }
+
+
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
- return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
+ return !skb_queue_empty(&tid->retry_q) || tid->has_queued;
}
static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
@@ -203,46 +255,11 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
skb = __skb_dequeue(&tid->retry_q);
if (!skb)
- skb = __skb_dequeue(&tid->buf_q);
+ skb = ath_tid_pull(tid);
return skb;
}
-/*
- * ath_tx_tid_change_state:
- * - clears a-mpdu flag of previous session
- * - force sequence number allocation to fix next BlockAck Window
- */
-static void
-ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
-{
- struct ath_txq *txq = tid->txq;
- struct ieee80211_tx_info *tx_info;
- struct sk_buff *skb, *tskb;
- struct ath_buf *bf;
- struct ath_frame_info *fi;
-
- skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
- fi = get_frame_info(skb);
- bf = fi->bf;
-
- tx_info = IEEE80211_SKB_CB(skb);
- tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-
- if (bf)
- continue;
-
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
- if (!bf) {
- __skb_unlink(skb, &tid->buf_q);
- ath_txq_skb_done(sc, txq, skb);
- ieee80211_free_txskb(sc->hw, skb);
- continue;
- }
- }
-
-}
-
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->txq;
@@ -883,20 +900,16 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid, struct sk_buff_head **q)
+ struct ath_atx_tid *tid)
{
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
- struct sk_buff *skb;
+ struct sk_buff *skb, *first_skb = NULL;
struct ath_buf *bf;
u16 seqno;
while (1) {
- *q = &tid->retry_q;
- if (skb_queue_empty(*q))
- *q = &tid->buf_q;
-
- skb = skb_peek(*q);
+ skb = ath_tid_dequeue(tid);
if (!skb)
break;
@@ -908,7 +921,6 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.stale = false;
if (!bf) {
- __skb_unlink(skb, *q);
ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
@@ -937,8 +949,20 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
seqno = bf->bf_state.seqno;
/* do not step over block-ack window */
- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
+ __skb_queue_tail(&tid->retry_q, skb);
+
+ /* If there are other skbs in the retry q, they are
+ * probably within the BAW, so loop immediately to get
+ * one of them. Otherwise the queue can get stuck. */
+ if (!skb_queue_is_first(&tid->retry_q, skb) &&
+ !WARN_ON(skb == first_skb)) {
+ if(!first_skb) /* infinite loop prevention */
+ first_skb = skb;
+ continue;
+ }
break;
+ }
if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
struct ath_tx_status ts = {};
@@ -946,7 +970,6 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- __skb_unlink(skb, *q);
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
@@ -958,11 +981,10 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
return NULL;
}
-static bool
+static int
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct list_head *bf_q,
- struct ath_buf *bf_first, struct sk_buff_head *tid_q,
- int *aggr_len)
+ struct ath_buf *bf_first)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
struct ath_buf *bf = bf_first, *bf_prev = NULL;
@@ -972,12 +994,13 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
- bool closed = false;
+
bf = bf_first;
aggr_limit = ath_lookup_rate(sc, bf, tid);
- do {
+ while (bf)
+ {
skb = bf->bf_mpdu;
fi = get_frame_info(skb);
@@ -986,12 +1009,12 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (nframes) {
if (aggr_limit < al + bpad + al_delta ||
ath_lookup_legacy(bf) || nframes >= h_baw)
- break;
+ goto stop;
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
!(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
- break;
+ goto stop;
}
/* add padding for previous frame to aggregation length */
@@ -1013,20 +1036,18 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.ndelim = ndelim;
- __skb_unlink(skb, tid_q);
list_add_tail(&bf->list, bf_q);
if (bf_prev)
bf_prev->bf_next = bf;
bf_prev = bf;
- bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
- if (!bf) {
- closed = true;
- break;
- }
- } while (ath_tid_has_buffered(tid));
-
+ bf = ath_tx_get_tid_subframe(sc, txq, tid);
+ }
+ goto finish;
+stop:
+ __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
+finish:
bf = bf_first;
bf->bf_lastbf = bf_prev;
@@ -1037,9 +1058,7 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
TX_STAT_INC(txq->axq_qnum, a_aggr);
}
- *aggr_len = al;
-
- return closed;
+ return al;
#undef PADBYTES
}
@@ -1416,18 +1435,15 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct list_head *bf_q,
- struct ath_buf *bf_first, struct sk_buff_head *tid_q)
+ struct ath_buf *bf_first)
{
struct ath_buf *bf = bf_first, *bf_prev = NULL;
- struct sk_buff *skb;
int nframes = 0;
do {
struct ieee80211_tx_info *tx_info;
- skb = bf->bf_mpdu;
nframes++;
- __skb_unlink(skb, tid_q);
list_add_tail(&bf->list, bf_q);
if (bf_prev)
bf_prev->bf_next = bf;
@@ -1436,13 +1452,15 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
if (nframes >= 2)
break;
- bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ bf = ath_tx_get_tid_subframe(sc, txq, tid);
if (!bf)
break;
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
break;
+ }
ath_set_rates(tid->an->vif, tid->an->sta, bf);
} while (1);
@@ -1453,34 +1471,33 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
{
struct ath_buf *bf;
struct ieee80211_tx_info *tx_info;
- struct sk_buff_head *tid_q;
struct list_head bf_q;
int aggr_len = 0;
- bool aggr, last = true;
+ bool aggr;
if (!ath_tid_has_buffered(tid))
return false;
INIT_LIST_HEAD(&bf_q);
- bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ bf = ath_tx_get_tid_subframe(sc, txq, tid);
if (!bf)
return false;
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
- (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
+ (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
+ __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
*stop = true;
return false;
}
ath_set_rates(tid->an->vif, tid->an->sta, bf);
if (aggr)
- last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
- tid_q, &aggr_len);
+ aggr_len = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf);
else
- ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
+ ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
if (list_empty(&bf_q))
return false;
@@ -1523,9 +1540,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
an->mpdudensity = density;
}
- /* force sequence number allocation for pending frames */
- ath_tx_tid_change_state(sc, txtid);
-
txtid->active = true;
*ssn = txtid->seq_start = txtid->seq_next;
txtid->bar_index = -1;
@@ -1550,7 +1564,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
ath_txq_lock(sc, txq);
txtid->active = false;
ath_tx_flush_tid(sc, txtid);
- ath_tx_tid_change_state(sc, txtid);
ath_txq_unlock_complete(sc, txq);
}
@@ -1560,14 +1573,12 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_tid *tid;
struct ath_txq *txq;
- bool buffered;
int tidno;
ath_dbg(common, XMIT, "%s called\n", __func__);
- for (tidno = 0, tid = &an->tid[tidno];
- tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-
+ for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
+ tid = ath_node_to_tid(an, tidno);
txq = tid->txq;
ath_txq_lock(sc, txq);
@@ -1577,13 +1588,12 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
continue;
}
- buffered = ath_tid_has_buffered(tid);
+ if (!skb_queue_empty(&tid->retry_q))
+ ieee80211_sta_set_buffered(sta, tid->tidno, true);
list_del_init(&tid->list);
ath_txq_unlock(sc, txq);
-
- ieee80211_sta_set_buffered(sta, tidno, buffered);
}
}
@@ -1596,49 +1606,20 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_dbg(common, XMIT, "%s called\n", __func__);
- for (tidno = 0, tid = &an->tid[tidno];
- tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-
+ for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
+ tid = ath_node_to_tid(an, tidno);
txq = tid->txq;
ath_txq_lock(sc, txq);
tid->clear_ps_filter = true;
-
if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(sc, txq, tid);
ath_txq_schedule(sc, txq);
}
-
ath_txq_unlock_complete(sc, txq);
}
}
-void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
- u16 tidno)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_atx_tid *tid;
- struct ath_node *an;
- struct ath_txq *txq;
-
- ath_dbg(common, XMIT, "%s called\n", __func__);
-
- an = (struct ath_node *)sta->drv_priv;
- tid = ATH_AN_2_TID(an, tidno);
- txq = tid->txq;
-
- ath_txq_lock(sc, txq);
-
- tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
-
- if (ath_tid_has_buffered(tid)) {
- ath_tx_queue_tid(sc, txq, tid);
- ath_txq_schedule(sc, txq);
- }
-
- ath_txq_unlock_complete(sc, txq);
-}
-
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -1651,7 +1632,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct list_head bf_q;
struct ath_buf *bf_tail = NULL, *bf;
- struct sk_buff_head *tid_q;
int sent = 0;
int i;
@@ -1666,11 +1646,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
ath_txq_lock(sc, tid->txq);
while (nframes > 0) {
- bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
+ bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
if (!bf)
break;
- __skb_unlink(bf->bf_mpdu, tid_q);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
if (bf_isampdu(bf)) {
@@ -1685,7 +1664,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
sent++;
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
- if (an->sta && !ath_tid_has_buffered(tid))
+ if (an->sta && skb_queue_empty(&tid->retry_q))
ieee80211_sta_set_buffered(an->sta, i, false);
}
ath_txq_unlock_complete(sc, tid->txq);
@@ -1914,13 +1893,7 @@ bool ath_drain_all_txq(struct ath_softc *sc)
if (!ATH_TXQ_SETUP(sc, i))
continue;
- /*
- * The caller will resume queues with ieee80211_wake_queues.
- * Mark the queue as not stopped to prevent ath_tx_complete
- * from waking the queue too early.
- */
txq = &sc->tx.txq[i];
- txq->stopped = false;
ath_draintxq(sc, txq);
}
@@ -2319,16 +2292,14 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
+ struct ath_node *an = NULL;
struct ath_buf *bf;
- bool queue, skip_uapsd = false, ps_resp;
+ bool ps_resp;
int q, ret;
if (vif)
avp = (void *)vif->drv_priv;
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- txctl->force_channel = true;
-
ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
ret = ath_tx_prepare(hw, skb, txctl);
@@ -2343,63 +2314,18 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
q = skb_get_queue_mapping(skb);
- ath_txq_lock(sc, txq);
- if (txq == sc->tx.txq_map[q]) {
- fi->txq = q;
- if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
- !txq->stopped) {
- if (ath9k_is_chanctx_enabled())
- ieee80211_stop_queue(sc->hw, info->hw_queue);
- else
- ieee80211_stop_queue(sc->hw, q);
- txq->stopped = true;
- }
- }
-
- queue = ieee80211_is_data_present(hdr->frame_control);
-
- /* If chanctx, queue all null frames while NOA could be there */
- if (ath9k_is_chanctx_enabled() &&
- ieee80211_is_nullfunc(hdr->frame_control) &&
- !txctl->force_channel)
- queue = true;
-
- /* Force queueing of all frames that belong to a virtual interface on
- * a different channel context, to ensure that they are sent on the
- * correct channel.
- */
- if (((avp && avp->chanctx != sc->cur_chan) ||
- sc->cur_chan->stopped) && !txctl->force_channel) {
- if (!txctl->an)
- txctl->an = &avp->mcast_node;
- queue = true;
- skip_uapsd = true;
- }
-
- if (txctl->an && queue)
- tid = ath_get_skb_tid(sc, txctl->an, skb);
-
- if (!skip_uapsd && ps_resp) {
- ath_txq_unlock(sc, txq);
+ if (ps_resp)
txq = sc->tx.uapsdq;
- ath_txq_lock(sc, txq);
- } else if (txctl->an && queue) {
- WARN_ON(tid->txq != txctl->txq);
-
- if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
- tid->clear_ps_filter = true;
- /*
- * Add this frame to software queue for scheduling later
- * for aggregation.
- */
- TX_STAT_INC(txq->axq_qnum, a_queued_sw);
- __skb_queue_tail(&tid->buf_q, skb);
- if (!txctl->an->sleeping)
- ath_tx_queue_tid(sc, txq, tid);
+ if (txctl->sta) {
+ an = (struct ath_node *) sta->drv_priv;
+ tid = ath_get_skb_tid(sc, an, skb);
+ }
- ath_txq_schedule(sc, txq);
- goto out;
+ ath_txq_lock(sc, txq);
+ if (txq == sc->tx.txq_map[q]) {
+ fi->txq = q;
+ ++txq->pending_frames;
}
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
@@ -2787,7 +2713,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
fifo_list = &txq->txq_fifo[txq->txq_tailidx];
if (list_empty(fifo_list)) {
ath_txq_unlock(sc, txq);
- return;
+ break;
}
bf = list_first_entry(fifo_list, struct ath_buf, list);
@@ -2892,9 +2818,8 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
struct ath_atx_tid *tid;
int tidno, acno;
- for (tidno = 0, tid = &an->tid[tidno];
- tidno < IEEE80211_NUM_TIDS;
- tidno++, tid++) {
+ for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
+ tid = ath_node_to_tid(an, tidno);
tid->an = an;
tid->tidno = tidno;
tid->seq_start = tid->seq_next = 0;
@@ -2902,11 +2827,14 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->baw_head = tid->baw_tail = 0;
tid->active = false;
tid->clear_ps_filter = true;
- __skb_queue_head_init(&tid->buf_q);
+ tid->has_queued = false;
__skb_queue_head_init(&tid->retry_q);
INIT_LIST_HEAD(&tid->list);
acno = TID_TO_WME_AC(tidno);
tid->txq = sc->tx.txq_map[acno];
+
+ if (!an->sta)
+ break; /* just one multicast ath_atx_tid */
}
}
@@ -2916,9 +2844,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
struct ath_txq *txq;
int tidno;
- for (tidno = 0, tid = &an->tid[tidno];
- tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-
+ for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
+ tid = ath_node_to_tid(an, tidno);
txq = tid->txq;
ath_txq_lock(sc, txq);
@@ -2930,6 +2857,9 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
tid->active = false;
ath_txq_unlock(sc, txq);
+
+ if (!an->sta)
+ break; /* just one multicast ath_atx_tid */
}
}
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 338d72337604..89f4b0513946 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -90,3 +90,10 @@ void ath_printk(const char *level, const struct ath_common* common,
va_end(args);
}
EXPORT_SYMBOL(ath_printk);
+
+const char *ath_bus_type_strings[] = {
+ [ATH_PCI] = "pci",
+ [ATH_AHB] = "ahb",
+ [ATH_USB] = "usb",
+};
+EXPORT_SYMBOL(ath_bus_type_strings);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index f8506037736f..43afa83a9f0c 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -449,7 +449,7 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
}
}
-static u16 ath_regd_find_country_by_name(char *alpha2)
+u16 ath_regd_find_country_by_name(char *alpha2)
{
unsigned int i;
@@ -460,6 +460,7 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
return -1;
}
+EXPORT_SYMBOL(ath_regd_find_country_by_name);
static int __ath_reg_dyn_country(struct wiphy *wiphy,
struct ath_regulatory *reg,
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 565d3075f06e..5d80be213fac 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -251,6 +251,7 @@ enum CountryCode {
bool ath_is_world_regd(struct ath_regulatory *reg);
bool ath_is_49ghz_allowed(u16 redomain);
+u16 ath_regd_find_country_by_name(char *alpha2);
int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request));
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 11b544b26c74..89bf2f9eca1d 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -22,5 +22,3 @@ wil6210-y += p2p.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
-
-subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index d117240d9a73..6aa3ff4240a9 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -354,14 +354,6 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
__func__, wdev, wdev->iftype);
- mutex_lock(&wil->p2p_wdev_mutex);
- if (wil->scan_request) {
- wil_err(wil, "Already scanning\n");
- mutex_unlock(&wil->p2p_wdev_mutex);
- return -EAGAIN;
- }
- mutex_unlock(&wil->p2p_wdev_mutex);
-
/* check we are client side */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -378,12 +370,24 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
+ mutex_lock(&wil->mutex);
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ if (wil->scan_request || wil->p2p.discovery_started) {
+ wil_err(wil, "Already scanning\n");
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ rc = -EAGAIN;
+ goto out;
+ }
+ mutex_unlock(&wil->p2p_wdev_mutex);
+
/* social scan on P2P_DEVICE is handled as p2p search */
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
wil_p2p_is_social_scan(request)) {
if (!wil->p2p.p2p_dev_started) {
wil_err(wil, "P2P search requested on stopped P2P device\n");
- return -EIO;
+ rc = -EIO;
+ goto out;
}
wil->scan_request = request;
wil->radio_wdev = wdev;
@@ -392,7 +396,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
- return rc;
+ goto out;
}
(void)wil_p2p_stop_discovery(wil);
@@ -415,7 +419,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
if (rc) {
wil_err(wil, "set SSID for scan request failed: %d\n", rc);
- return rc;
+ goto out;
}
wil->scan_request = request;
@@ -448,7 +452,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
if (rc)
- goto out;
+ goto out_restore;
if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
cmd.cmd.discovery_mode = 1;
@@ -459,16 +463,45 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
-out:
+out_restore:
if (rc) {
del_timer_sync(&wil->scan_timer);
wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
-
+out:
+ mutex_unlock(&wil->mutex);
return rc;
}
+static void wil_cfg80211_abort_scan(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
+
+ mutex_lock(&wil->mutex);
+ mutex_lock(&wil->p2p_wdev_mutex);
+
+ if (!wil->scan_request)
+ goto out;
+
+ if (wdev != wil->scan_request->wdev) {
+ wil_dbg_misc(wil, "abort scan was called on the wrong iface\n");
+ goto out;
+ }
+
+ if (wil->radio_wdev == wil->p2p_wdev)
+ wil_p2p_stop_radio_operations(wil);
+ else
+ wil_abort_scan(wil, true);
+
+out:
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ mutex_unlock(&wil->mutex);
+}
+
static void wil_print_crypto(struct wil6210_priv *wil,
struct cfg80211_crypto_settings *c)
{
@@ -674,6 +707,26 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
return rc;
}
+static int wil_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* these parameters are explicitly not supported */
+ if (changed & (WIPHY_PARAM_RETRY_LONG |
+ WIPHY_PARAM_FRAG_THRESHOLD |
+ WIPHY_PARAM_RTS_THRESHOLD))
+ return -ENOTSUPP;
+
+ if (changed & WIPHY_PARAM_RETRY_SHORT) {
+ rc = wmi_set_mgmt_retry(wil, wiphy->retry_short);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie)
@@ -940,16 +993,8 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
__func__, chan->center_freq, duration, wdev->iftype);
- rc = wil_p2p_listen(wil, duration, chan, cookie);
- if (rc)
- return rc;
-
- wil->radio_wdev = wdev;
-
- cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
- GFP_KERNEL);
-
- return 0;
+ rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
+ return rc;
}
static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1419,17 +1464,49 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s: entered\n", __func__);
mutex_lock(&wil->mutex);
+ mutex_lock(&wil->p2p_wdev_mutex);
wil_p2p_stop_radio_operations(wil);
p2p->p2p_dev_started = 0;
+ mutex_unlock(&wil->p2p_wdev_mutex);
mutex_unlock(&wil->mutex);
}
+static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ enum wmi_ps_profile_type ps_profile;
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
+ wil_err(wil, "set_power_mgmt not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
+ enabled, timeout);
+
+ if (enabled)
+ ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+ else
+ ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
+
+ rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
+ if (rc)
+ wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
+
+ return rc;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
.scan = wil_cfg80211_scan,
+ .abort_scan = wil_cfg80211_abort_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
+ .set_wiphy_params = wil_cfg80211_set_wiphy_params,
.change_virtual_intf = wil_cfg80211_change_iface,
.get_station = wil_cfg80211_get_station,
.dump_station = wil_cfg80211_dump_station,
@@ -1450,6 +1527,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
/* P2P device */
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
+ .set_power_mgmt = wil_cfg80211_set_power_mgmt,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@@ -1466,7 +1544,8 @@ static void wil_wiphy_init(struct wiphy *wiphy)
BIT(NL80211_IFTYPE_MONITOR);
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ WIPHY_FLAG_PS_ON_BY_DEFAULT;
dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
__func__, wiphy->flags);
wiphy->probe_resp_offload =
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index e7130b54d1d8..e2e021bcaa03 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -24,6 +24,7 @@
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
+#define WAIT_FOR_SCAN_ABORT_MS 1000
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
@@ -213,7 +214,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(&sta->stats, 0, sizeof(sta->stats));
}
-static bool wil_ap_is_connected(struct wil6210_priv *wil)
+static bool wil_is_connected(struct wil6210_priv *wil)
{
int i;
@@ -267,7 +268,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
wil_bcast_fini(wil);
- netif_tx_stop_all_queues(ndev);
+ wil_update_net_queues_bh(wil, NULL, true);
netif_carrier_off(ndev);
if (test_bit(wil_status_fwconnected, wil->status)) {
@@ -283,8 +284,12 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- if (!wil_ap_is_connected(wil))
+ if (!wil_is_connected(wil)) {
+ wil_update_net_queues_bh(wil, NULL, true);
clear_bit(wil_status_fwconnected, wil->status);
+ } else {
+ wil_update_net_queues_bh(wil, NULL, false);
+ }
break;
default:
break;
@@ -384,18 +389,19 @@ static void wil_fw_error_worker(struct work_struct *work)
wil->last_fw_recovery = jiffies;
+ wil_info(wil, "fw error recovery requested (try %d)...\n",
+ wil->recovery_count);
+ if (!no_fw_recovery)
+ wil->recovery_state = fw_recovery_running;
+ if (wil_wait_for_recovery(wil) != 0)
+ return;
+
mutex_lock(&wil->mutex);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
- wil_info(wil, "fw error recovery requested (try %d)...\n",
- wil->recovery_count);
- if (!no_fw_recovery)
- wil->recovery_state = fw_recovery_running;
- if (0 != wil_wait_for_recovery(wil))
- break;
-
+ /* silent recovery, upper layers will see disconnect */
__wil_down(wil);
__wil_up(wil);
break;
@@ -512,10 +518,13 @@ int wil_priv_init(struct wil6210_priv *wil)
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
+ INIT_WORK(&wil->p2p.delayed_listen_work, wil_p2p_delayed_listen_work);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
INIT_LIST_HEAD(&wil->probe_client_pending);
spin_lock_init(&wil->wmi_ev_lock);
+ spin_lock_init(&wil->net_queue_lock);
+ wil->net_queue_stopped = 1;
init_waitqueue_head(&wil->wq);
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
@@ -571,6 +580,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
cancel_work_sync(&wil->p2p.discovery_expired_work);
+ cancel_work_sync(&wil->p2p.delayed_listen_work);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
mutex_unlock(&wil->mutex);
@@ -685,6 +695,19 @@ static int wil_target_reset(struct wil6210_priv *wil)
return 0;
}
+static void wil_collect_fw_info(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ u8 retry_short;
+ int rc;
+
+ rc = wmi_get_mgmt_retry(wil, &retry_short);
+ if (!rc) {
+ wiphy->retry_short = retry_short;
+ wil_dbg_misc(wil, "FW retry_short: %d\n", retry_short);
+ }
+}
+
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{
le32_to_cpus(&r->base);
@@ -801,6 +824,34 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
return 0;
}
+void wil_abort_scan(struct wil6210_priv *wil, bool sync)
+{
+ int rc;
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ lockdep_assert_held(&wil->p2p_wdev_mutex);
+
+ if (!wil->scan_request)
+ return;
+
+ wil_dbg_misc(wil, "Abort scan_request 0x%p\n", wil->scan_request);
+ del_timer_sync(&wil->scan_timer);
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ rc = wmi_abort_scan(wil);
+ if (!rc && sync)
+ wait_event_interruptible_timeout(wil->wq, !wil->scan_request,
+ msecs_to_jiffies(
+ WAIT_FOR_SCAN_ABORT_MS));
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ if (wil->scan_request) {
+ cfg80211_scan_done(wil->scan_request, &info);
+ wil->scan_request = NULL;
+ }
+}
+
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@@ -853,17 +904,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
mutex_unlock(&wil->wmi_mutex);
mutex_lock(&wil->p2p_wdev_mutex);
- if (wil->scan_request) {
- struct cfg80211_scan_info info = {
- .aborted = true,
- };
-
- wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
- wil->scan_request);
- del_timer_sync(&wil->scan_timer);
- cfg80211_scan_done(wil->scan_request, &info);
- wil->scan_request = NULL;
- }
+ wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
wil_mask_irq(wil);
@@ -940,6 +981,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ wil_collect_fw_info(wil);
+
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
@@ -1056,20 +1099,9 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
- wil_p2p_stop_radio_operations(wil);
-
mutex_lock(&wil->p2p_wdev_mutex);
- if (wil->scan_request) {
- struct cfg80211_scan_info info = {
- .aborted = true,
- };
-
- wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
- wil->scan_request);
- del_timer_sync(&wil->scan_timer);
- cfg80211_scan_done(wil->scan_request, &info);
- wil->scan_request = NULL;
- }
+ wil_p2p_stop_radio_operations(wil);
+ wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
wil_reset(wil, false);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 61de5e9f8ef0..6676001dcbca 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -41,21 +41,6 @@ static int wil_stop(struct net_device *ndev)
return wil_down(wil);
}
-static int wil_change_mtu(struct net_device *ndev, int new_mtu)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- if (new_mtu < 68 || new_mtu > mtu_max) {
- wil_err(wil, "invalid MTU %d\n", new_mtu);
- return -EINVAL;
- }
-
- wil_dbg_misc(wil, "change MTU %d -> %d\n", ndev->mtu, new_mtu);
- ndev->mtu = new_mtu;
-
- return 0;
-}
-
static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
@@ -69,7 +54,6 @@ static const struct net_device_ops wil_netdev_ops = {
.ndo_start_xmit = wil_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = wil_change_mtu,
.ndo_do_ioctl = wil_do_ioctl,
};
@@ -126,6 +110,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
static void wil_dev_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->max_mtu = mtu_max;
dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT;
}
@@ -229,7 +214,7 @@ int wil_if_add(struct wil6210_priv *wil)
netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
- netif_tx_stop_all_queues(ndev);
+ wil_update_net_queues_bh(wil, NULL, true);
rc = register_netdev(ndev);
if (rc < 0) {
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 4087785d3090..fbae99525e01 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -22,6 +22,43 @@
#define P2P_SEARCH_DURATION_MS 500
#define P2P_DEFAULT_BI 100
+static int wil_p2p_start_listen(struct wil6210_priv *wil)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ u8 channel = p2p->listen_chan.hw_value;
+ int rc;
+
+ lockdep_assert_held(&wil->mutex);
+
+ rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
+ if (rc) {
+ wil_err(wil, "wmi_p2p_cfg failed\n");
+ goto out;
+ }
+
+ rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+ if (rc) {
+ wil_err(wil, "wmi_set_ssid failed\n");
+ goto out_stop;
+ }
+
+ rc = wmi_start_listen(wil);
+ if (rc) {
+ wil_err(wil, "wmi_start_listen failed\n");
+ goto out_stop;
+ }
+
+ INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
+ mod_timer(&p2p->discovery_timer,
+ jiffies + msecs_to_jiffies(p2p->listen_duration));
+out_stop:
+ if (rc)
+ wmi_stop_discovery(wil);
+
+out:
+ return rc;
+}
+
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
{
return (request->n_channels == 1) &&
@@ -46,7 +83,7 @@ int wil_p2p_search(struct wil6210_priv *wil,
wil_dbg_misc(wil, "%s: channel %d\n",
__func__, P2P_DMG_SOCIAL_CHANNEL);
- mutex_lock(&wil->mutex);
+ lockdep_assert_held(&wil->mutex);
if (p2p->discovery_started) {
wil_err(wil, "%s: search failed. discovery already ongoing\n",
@@ -103,22 +140,19 @@ out_stop:
wmi_stop_discovery(wil);
out:
- mutex_unlock(&wil->mutex);
return rc;
}
-int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
- struct ieee80211_channel *chan, u64 *cookie)
+int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
+ unsigned int duration, struct ieee80211_channel *chan,
+ u64 *cookie)
{
struct wil_p2p_info *p2p = &wil->p2p;
- u8 channel = P2P_DMG_SOCIAL_CHANNEL;
int rc;
if (!chan)
return -EINVAL;
- channel = chan->hw_value;
-
wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
mutex_lock(&wil->mutex);
@@ -129,35 +163,30 @@ int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
goto out;
}
- rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
- if (rc) {
- wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
- goto out;
- }
-
- rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
- if (rc) {
- wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
- goto out_stop;
- }
+ memcpy(&p2p->listen_chan, chan, sizeof(*chan));
+ *cookie = ++p2p->cookie;
+ p2p->listen_duration = duration;
- rc = wmi_start_listen(wil);
- if (rc) {
- wil_err(wil, "%s: wmi_start_listen failed\n", __func__);
- goto out_stop;
+ mutex_lock(&wil->p2p_wdev_mutex);
+ if (wil->scan_request) {
+ wil_dbg_misc(wil, "Delaying p2p listen until scan done\n");
+ p2p->pending_listen_wdev = wdev;
+ p2p->discovery_started = 1;
+ rc = 0;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ goto out;
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
- memcpy(&p2p->listen_chan, chan, sizeof(*chan));
- *cookie = ++p2p->cookie;
+ rc = wil_p2p_start_listen(wil);
+ if (rc)
+ goto out;
p2p->discovery_started = 1;
- INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
- mod_timer(&p2p->discovery_timer,
- jiffies + msecs_to_jiffies(duration));
+ wil->radio_wdev = wdev;
-out_stop:
- if (rc)
- wmi_stop_discovery(wil);
+ cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
+ GFP_KERNEL);
out:
mutex_unlock(&wil->mutex);
@@ -170,9 +199,14 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil)
u8 started = p2p->discovery_started;
if (p2p->discovery_started) {
- del_timer_sync(&p2p->discovery_timer);
+ if (p2p->pending_listen_wdev) {
+ /* discovery not really started, only pending */
+ p2p->pending_listen_wdev = NULL;
+ } else {
+ del_timer_sync(&p2p->discovery_timer);
+ wmi_stop_discovery(wil);
+ }
p2p->discovery_started = 0;
- wmi_stop_discovery(wil);
}
return started;
@@ -257,13 +291,59 @@ void wil_p2p_search_expired(struct work_struct *work)
};
mutex_lock(&wil->p2p_wdev_mutex);
- cfg80211_scan_done(wil->scan_request, &info);
- wil->scan_request = NULL;
- wil->radio_wdev = wil->wdev;
+ if (wil->scan_request) {
+ cfg80211_scan_done(wil->scan_request, &info);
+ wil->scan_request = NULL;
+ wil->radio_wdev = wil->wdev;
+ }
mutex_unlock(&wil->p2p_wdev_mutex);
}
}
+void wil_p2p_delayed_listen_work(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, delayed_listen_work);
+ struct wil6210_priv *wil = container_of(p2p,
+ struct wil6210_priv, p2p);
+ int rc;
+
+ mutex_lock(&wil->mutex);
+
+ wil_dbg_misc(wil, "Checking delayed p2p listen\n");
+ if (!p2p->discovery_started || !p2p->pending_listen_wdev)
+ goto out;
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ if (wil->scan_request) {
+ /* another scan started, wait again... */
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ goto out;
+ }
+ mutex_unlock(&wil->p2p_wdev_mutex);
+
+ rc = wil_p2p_start_listen(wil);
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ if (rc) {
+ cfg80211_remain_on_channel_expired(p2p->pending_listen_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ wil->radio_wdev = wil->wdev;
+ } else {
+ cfg80211_ready_on_channel(p2p->pending_listen_wdev, p2p->cookie,
+ &p2p->listen_chan,
+ p2p->listen_duration, GFP_KERNEL);
+ wil->radio_wdev = p2p->pending_listen_wdev;
+ }
+ p2p->pending_listen_wdev = NULL;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+
+out:
+ mutex_unlock(&wil->mutex);
+}
+
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
{
struct wil_p2p_info *p2p = &wil->p2p;
@@ -272,8 +352,7 @@ void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
};
lockdep_assert_held(&wil->mutex);
-
- mutex_lock(&wil->p2p_wdev_mutex);
+ lockdep_assert_held(&wil->p2p_wdev_mutex);
if (wil->radio_wdev != wil->p2p_wdev)
goto out;
@@ -281,10 +360,8 @@ void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
if (!p2p->discovery_started) {
/* Regular scan on the p2p device */
if (wil->scan_request &&
- wil->scan_request->wdev == wil->p2p_wdev) {
- cfg80211_scan_done(wil->scan_request, &info);
- wil->scan_request = NULL;
- }
+ wil->scan_request->wdev == wil->p2p_wdev)
+ wil_abort_scan(wil, true);
goto out;
}
@@ -307,5 +384,4 @@ void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
out:
wil->radio_wdev = wil->wdev;
- mutex_unlock(&wil->p2p_wdev_mutex);
}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 5ca0307a3274..b9faae0278c9 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -54,6 +54,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wmi_pmc_cmd pmc_cmd = {0};
+ int last_cmd_err = -ENOMEM;
mutex_lock(&pmc->lock);
@@ -62,6 +63,29 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
goto no_release_err;
}
+ if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
+ wil_err(wil,
+ "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
+ num_descriptors, descriptor_size);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
+
+ if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
+ wil_err(wil,
+ "num_descriptors(%d) exceeds max ring size %d\n",
+ num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
+
+ if (num_descriptors > INT_MAX / descriptor_size) {
+ wil_err(wil,
+ "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
+ num_descriptors, descriptor_size);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
@@ -189,7 +213,7 @@ release_pmc_skb_list:
pmc->descriptors = NULL;
no_release_err:
- pmc->last_cmd_status = -ENOMEM;
+ pmc->last_cmd_status = last_cmd_err;
mutex_unlock(&pmc->lock);
}
@@ -295,7 +319,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
size_t retval = 0;
unsigned long long idx;
loff_t offset;
- size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+ size_t pmc_size;
mutex_lock(&pmc->lock);
@@ -306,6 +330,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
return -EPERM;
}
+ pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
wil_dbg_misc(wil,
"%s: size %u, pos %lld\n",
__func__, (unsigned)count, *f_pos);
@@ -345,7 +371,18 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
loff_t newpos;
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
- size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+ size_t pmc_size;
+
+ mutex_lock(&pmc->lock);
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_err(wil, "error, pmc is not allocated!\n");
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return -EPERM;
+ }
+
+ pmc_size = pmc->descriptor_size * pmc->num_descriptors;
switch (whence) {
case 0: /* SEEK_SET */
@@ -361,15 +398,21 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
break;
default: /* can't happen */
- return -EINVAL;
+ newpos = -EINVAL;
+ goto out;
}
- if (newpos < 0)
- return -EINVAL;
+ if (newpos < 0) {
+ newpos = -EINVAL;
+ goto out;
+ }
if (newpos > pmc_size)
newpos = pmc_size;
filp->f_pos = newpos;
+out:
+ mutex_unlock(&pmc->lock);
+
return newpos;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4c38520d4dd2..c1b4bb03e997 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -88,6 +88,18 @@ static inline int wil_vring_wmark_high(struct vring *vring)
return vring->size/4;
}
+/* returns true if num avail descriptors is lower than wmark_low */
+static inline int wil_vring_avail_low(struct vring *vring)
+{
+ return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
+}
+
+/* returns true if num avail descriptors is higher than wmark_high */
+static inline int wil_vring_avail_high(struct vring *vring)
+{
+ return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
+}
+
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
@@ -326,7 +338,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
if (skb_headroom(skb) < rtap_len &&
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
- wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
return;
}
@@ -1780,6 +1792,89 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
return rc;
}
+/**
+ * Check status of tx vrings and stop/wake net queues if needed
+ *
+ * This function does one of two checks:
+ * In case check_stop is true, will check if net queues need to be stopped. If
+ * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
+ * In case check_stop is false, will check if net queues need to be waked. If
+ * the conditions for waking are met, netif_tx_wake_all_queues() is called.
+ * vring is the vring which is currently being modified by either adding
+ * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
+ * be null when irrelevant (e.g. connect/disconnect events).
+ *
+ * The implementation is to stop net queues if modified vring has low
+ * descriptor availability. Wake if all vrings are not in low descriptor
+ * availability and modified vring has high descriptor availability.
+ */
+static inline void __wil_update_net_queues(struct wil6210_priv *wil,
+ struct vring *vring,
+ bool check_stop)
+{
+ int i;
+
+ if (vring)
+ wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d",
+ (int)(vring - wil->vring_tx), check_stop,
+ wil->net_queue_stopped);
+ else
+ wil_dbg_txrx(wil, "check_stop=%d, stopped=%d",
+ check_stop, wil->net_queue_stopped);
+
+ if (check_stop == wil->net_queue_stopped)
+ /* net queues already in desired state */
+ return;
+
+ if (check_stop) {
+ if (!vring || unlikely(wil_vring_avail_low(vring))) {
+ /* not enough room in the vring */
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+ wil->net_queue_stopped = true;
+ wil_dbg_txrx(wil, "netif_tx_stop called\n");
+ }
+ return;
+ }
+
+ /* check wake */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct vring *cur_vring = &wil->vring_tx[i];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+
+ if (!cur_vring->va || !txdata->enabled || cur_vring == vring)
+ continue;
+
+ if (wil_vring_avail_low(cur_vring)) {
+ wil_dbg_txrx(wil, "vring %d full, can't wake\n",
+ (int)(cur_vring - wil->vring_tx));
+ return;
+ }
+ }
+
+ if (!vring || wil_vring_avail_high(vring)) {
+ /* enough room in the vring */
+ wil_dbg_txrx(wil, "calling netif_tx_wake\n");
+ netif_tx_wake_all_queues(wil_to_ndev(wil));
+ wil->net_queue_stopped = false;
+ }
+}
+
+void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
+ bool check_stop)
+{
+ spin_lock(&wil->net_queue_lock);
+ __wil_update_net_queues(wil, vring, check_stop);
+ spin_unlock(&wil->net_queue_lock);
+}
+
+void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
+ bool check_stop)
+{
+ spin_lock_bh(&wil->net_queue_lock);
+ __wil_update_net_queues(wil, vring, check_stop);
+ spin_unlock_bh(&wil->net_queue_lock);
+}
+
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
@@ -1822,14 +1917,10 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
- /* do we still have enough room in the vring? */
- if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
- netif_tx_stop_all_queues(wil_to_ndev(wil));
- wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
- }
-
switch (rc) {
case 0:
+ /* shall we stop net queues? */
+ wil_update_net_queues_bh(wil, vring, true);
/* statistics will be updated on the tx_complete */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -1978,10 +2069,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
txdata->last_idle = get_cycles();
}
- if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
- wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
- netif_tx_wake_all_queues(wil_to_ndev(wil));
- }
+ /* shall we wake net queues? */
+ if (done)
+ wil_update_net_queues(wil, vring, false);
return done;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index a949cd62bc4e..237e1666df2d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -276,10 +276,11 @@ struct fw_map {
u32 to; /* linker address - to, exclusive */
u32 host; /* PCI/Host address - BAR0 + 0x880000 */
const char *name; /* for debugfs */
+ bool fw; /* true if FW mapping, false if UCODE mapping */
};
/* array size should be in sync with actual definition in the wmi.c */
-extern const struct fw_map fw_mapping[8];
+extern const struct fw_map fw_mapping[10];
/**
* mk_cidxtid - construct @cidxtid field
@@ -461,8 +462,11 @@ struct wil_p2p_info {
u8 discovery_started;
u8 p2p_dev_started;
u64 cookie;
+ struct wireless_dev *pending_listen_wdev;
+ unsigned int listen_duration;
struct timer_list discovery_timer; /* listen/search duration */
struct work_struct discovery_expired_work; /* listen/search expire */
+ struct work_struct delayed_listen_work; /* listen after scan done */
};
enum wil_sta_status {
@@ -624,6 +628,8 @@ struct wil6210_priv {
* - consumed in thread by wmi_event_worker
*/
spinlock_t wmi_ev_lock;
+ spinlock_t net_queue_lock; /* guarding stop/wake netif queue */
+ int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
struct napi_struct napi_rx;
struct napi_struct napi_tx;
/* keep alive */
@@ -817,6 +823,10 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
+int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
+ enum wmi_ps_profile_type ps_profile);
+int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short);
+int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short);
int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
@@ -837,13 +847,15 @@ bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
void wil_p2p_discovery_timer_fn(ulong x);
int wil_p2p_search(struct wil6210_priv *wil,
struct cfg80211_scan_request *request);
-int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
- struct ieee80211_channel *chan, u64 *cookie);
+int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
+ unsigned int duration, struct ieee80211_channel *chan,
+ u64 *cookie);
u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
void wil_p2p_listen_expired(struct work_struct *work);
void wil_p2p_search_expired(struct work_struct *work);
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
+void wil_p2p_delayed_listen_work(struct work_struct *work);
/* WMI for P2P */
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
@@ -869,6 +881,9 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
u8 chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_priv *wil);
int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
+int wmi_abort_scan(struct wil6210_priv *wil);
+void wil_abort_scan(struct wil6210_priv *wil, bool sync);
+
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
@@ -886,6 +901,10 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
int wil_bcast_init(struct wil6210_priv *wil);
void wil_bcast_fini(struct wil6210_priv *wil);
+void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
+ bool should_stop);
+void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
+ bool check_stop);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int wil_tx_complete(struct wil6210_priv *wil, int ringid);
void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index b57d280946e0..d051eea47a54 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -36,6 +36,9 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
+ if (!map->fw)
+ continue;
+
if (map->host < host_min)
host_min = map->host;
@@ -73,6 +76,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
+ if (!map->fw)
+ continue;
+
data = (void * __force)wil->csr + HOSTADDR(map->host);
len = map->to - map->from;
offset = map->host - host_min;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index fae4f1285d08..7585003bef67 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -84,19 +84,29 @@ MODULE_PARM_DESC(led_id,
* array size should be in sync with the declaration in the wil6210.h
*/
const struct fw_map fw_mapping[] = {
- {0x000000, 0x040000, 0x8c0000, "fw_code"}, /* FW code RAM 256k */
- {0x800000, 0x808000, 0x900000, "fw_data"}, /* FW data RAM 32k */
- {0x840000, 0x860000, 0x908000, "fw_peri"}, /* periph. data RAM 128k */
- {0x880000, 0x88a000, 0x880000, "rgf"}, /* various RGF 40k */
- {0x88a000, 0x88b000, 0x88a000, "AGC_tbl"}, /* AGC table 4k */
- {0x88b000, 0x88c000, 0x88b000, "rgf_ext"}, /* Pcie_ext_rgf 4k */
- {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext"}, /* mac_ext_rgf 512b */
- {0x8c0000, 0x949000, 0x8c0000, "upper"}, /* upper area 548k */
- /*
- * 920000..930000 ucode code RAM
- * 930000..932000 ucode data RAM
- * 932000..949000 back-door debug data
+ /* FW code RAM 256k */
+ {0x000000, 0x040000, 0x8c0000, "fw_code", true},
+ /* FW data RAM 32k */
+ {0x800000, 0x808000, 0x900000, "fw_data", true},
+ /* periph data 128k */
+ {0x840000, 0x860000, 0x908000, "fw_peri", true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ /* mac_ext_rgf 512b */
+ {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true},
+ /* upper area 548k */
+ {0x8c0000, 0x949000, 0x8c0000, "upper", true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
+ /* ucode code RAM 128k */
+ {0x000000, 0x020000, 0x920000, "uc_code", false},
+ /* ucode data RAM 16k */
+ {0x800000, 0x804000, 0x940000, "uc_data", false},
};
struct blink_on_off_time led_blink_time[] = {
@@ -108,7 +118,7 @@ struct blink_on_off_time led_blink_time[] = {
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
- * return AHB address for given firmware/ucode internal (linker) address
+ * return AHB address for given firmware internal (linker) address
* @x - internal address
* If address have no valid AHB mapping, return 0
*/
@@ -117,7 +127,8 @@ static u32 wmi_addr_remap(u32 x)
uint i;
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
- if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+ if (fw_mapping[i].fw &&
+ ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)))
return x + fw_mapping[i].host - fw_mapping[i].from;
}
@@ -427,18 +438,24 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
+ int status = le32_to_cpu(data->status);
struct cfg80211_scan_info info = {
- .aborted = (data->status != WMI_SCAN_SUCCESS),
+ .aborted = ((status != WMI_SCAN_SUCCESS) &&
+ (status != WMI_SCAN_ABORT_REJECTED)),
};
- wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status);
wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
wil->scan_request, info.aborted);
-
del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, &info);
wil->radio_wdev = wil->wdev;
wil->scan_request = NULL;
+ wake_up_interruptible(&wil->wq);
+ if (wil->p2p.pending_listen_wdev) {
+ wil_dbg_misc(wil, "Scheduling delayed listen\n");
+ schedule_work(&wil->p2p.delayed_listen_work);
+ }
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
}
@@ -548,7 +565,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (rc) {
- netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
wil_err(wil,
"%s: cfg80211_connect_result with failure\n",
@@ -588,7 +604,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
wil->sta[evt->cid].status = wil_sta_connected;
set_bit(wil_status_fwconnected, wil->status);
- netif_tx_wake_all_queues(ndev);
+ wil_update_net_queues_bh(wil, NULL, false);
out:
if (rc)
@@ -1564,6 +1580,112 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
return rc;
}
+int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
+ enum wmi_ps_profile_type ps_profile)
+{
+ int rc;
+ struct wmi_ps_dev_profile_cfg_cmd cmd = {
+ .ps_profile = ps_profile,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_ps_dev_profile_cfg_event evt;
+ } __packed reply;
+ u32 status;
+
+ wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile);
+
+ reply.evt.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR);
+
+ rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply),
+ 100);
+ if (rc)
+ return rc;
+
+ status = le32_to_cpu(reply.evt.status);
+
+ if (status != WMI_PS_CFG_CMD_STATUS_SUCCESS) {
+ wil_err(wil, "ps dev profile cfg failed with status %d\n",
+ status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short)
+{
+ int rc;
+ struct wmi_set_mgmt_retry_limit_cmd cmd = {
+ .mgmt_retry_limit = retry_short,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_mgmt_retry_limit_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short);
+
+ if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ reply.evt.status = WMI_FW_STATUS_FAILURE;
+
+ rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, &cmd, sizeof(cmd),
+ WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
+ 100);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "set mgmt retry limit failed with status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_mgmt_retry_limit_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "getting mgmt retry short\n");
+
+ if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ reply.evt.mgmt_retry_limit = 0;
+ rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, NULL, 0,
+ WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
+ 100);
+ if (rc)
+ return rc;
+
+ if (retry_short)
+ *retry_short = reply.evt.mgmt_retry_limit;
+
+ return 0;
+}
+
+int wmi_abort_scan(struct wil6210_priv *wil)
+{
+ int rc;
+
+ wil_dbg_wmi(wil, "sending WMI_ABORT_SCAN_CMDID\n");
+
+ rc = wmi_send(wil, WMI_ABORT_SCAN_CMDID, NULL, 0);
+ if (rc)
+ wil_err(wil, "Failed to abort scan (%d)\n", rc);
+
+ return rc;
+}
+
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index f430e8a80603..d93a4d490d24 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -35,6 +35,7 @@
#define WMI_MAC_LEN (6)
#define WMI_PROX_RANGE_NUM (3)
#define WMI_MAX_LOSS_DMG_BEACONS (20)
+#define MAX_NUM_OF_SECTORS (128)
/* Mailbox interface
* used for commands and events
@@ -51,8 +52,10 @@ enum wmi_mid {
* the host
*/
enum wmi_fw_capability {
- WMI_FW_CAPABILITY_FTM = 0,
- WMI_FW_CAPABILITY_PS_CONFIG = 1,
+ WMI_FW_CAPABILITY_FTM = 0,
+ WMI_FW_CAPABILITY_PS_CONFIG = 1,
+ WMI_FW_CAPABILITY_RF_SECTORS = 2,
+ WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
WMI_FW_CAPABILITY_MAX,
};
@@ -66,137 +69,149 @@ struct wmi_cmd_hdr {
/* List of Commands */
enum wmi_command_id {
- WMI_CONNECT_CMDID = 0x01,
- WMI_DISCONNECT_CMDID = 0x03,
- WMI_DISCONNECT_STA_CMDID = 0x04,
- WMI_START_SCAN_CMDID = 0x07,
- WMI_SET_BSS_FILTER_CMDID = 0x09,
- WMI_SET_PROBED_SSID_CMDID = 0x0A,
- WMI_SET_LISTEN_INT_CMDID = 0x0B,
- WMI_BCON_CTRL_CMDID = 0x0F,
- WMI_ADD_CIPHER_KEY_CMDID = 0x16,
- WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
- WMI_PCP_CONF_CMDID = 0x18,
- WMI_SET_APPIE_CMDID = 0x3F,
- WMI_SET_WSC_STATUS_CMDID = 0x41,
- WMI_PXMT_RANGE_CFG_CMDID = 0x42,
- WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
- WMI_MEM_READ_CMDID = 0x800,
- WMI_MEM_WR_CMDID = 0x801,
- WMI_ECHO_CMDID = 0x803,
- WMI_DEEP_ECHO_CMDID = 0x804,
- WMI_CONFIG_MAC_CMDID = 0x805,
- WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
- WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
- WMI_PHY_GET_STATISTICS_CMDID = 0x809,
- WMI_FS_TUNE_CMDID = 0x80A,
- WMI_CORR_MEASURE_CMDID = 0x80B,
- WMI_READ_RSSI_CMDID = 0x80C,
- WMI_TEMP_SENSE_CMDID = 0x80E,
- WMI_DC_CALIB_CMDID = 0x80F,
- WMI_SEND_TONE_CMDID = 0x810,
- WMI_IQ_TX_CALIB_CMDID = 0x811,
- WMI_IQ_RX_CALIB_CMDID = 0x812,
- WMI_SET_UCODE_IDLE_CMDID = 0x813,
- WMI_SET_WORK_MODE_CMDID = 0x815,
- WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
- WMI_MARLON_R_READ_CMDID = 0x818,
- WMI_MARLON_R_WRITE_CMDID = 0x819,
- WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A,
- MAC_IO_STATIC_PARAMS_CMDID = 0x81B,
- MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C,
- WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
- WMI_RF_RX_TEST_CMDID = 0x81E,
- WMI_CFG_RX_CHAIN_CMDID = 0x820,
- WMI_VRING_CFG_CMDID = 0x821,
- WMI_BCAST_VRING_CFG_CMDID = 0x822,
- WMI_VRING_BA_EN_CMDID = 0x823,
- WMI_VRING_BA_DIS_CMDID = 0x824,
- WMI_RCP_ADDBA_RESP_CMDID = 0x825,
- WMI_RCP_DELBA_CMDID = 0x826,
- WMI_SET_SSID_CMDID = 0x827,
- WMI_GET_SSID_CMDID = 0x828,
- WMI_SET_PCP_CHANNEL_CMDID = 0x829,
- WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
- WMI_SW_TX_REQ_CMDID = 0x82B,
- WMI_READ_MAC_RXQ_CMDID = 0x830,
- WMI_READ_MAC_TXQ_CMDID = 0x831,
- WMI_WRITE_MAC_RXQ_CMDID = 0x832,
- WMI_WRITE_MAC_TXQ_CMDID = 0x833,
- WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834,
- WMI_MLME_PUSH_CMDID = 0x835,
- WMI_BEAMFORMING_MGMT_CMDID = 0x836,
- WMI_BF_TXSS_MGMT_CMDID = 0x837,
- WMI_BF_SM_MGMT_CMDID = 0x838,
- WMI_BF_RXSS_MGMT_CMDID = 0x839,
- WMI_BF_TRIG_CMDID = 0x83A,
- WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
- WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
- WMI_SET_SECTORS_CMDID = 0x849,
- WMI_MAINTAIN_PAUSE_CMDID = 0x850,
- WMI_MAINTAIN_RESUME_CMDID = 0x851,
- WMI_RS_MGMT_CMDID = 0x852,
- WMI_RF_MGMT_CMDID = 0x853,
- WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
- WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
- WMI_OTP_READ_CMDID = 0x856,
- WMI_OTP_WRITE_CMDID = 0x857,
- WMI_LED_CFG_CMDID = 0x858,
+ WMI_CONNECT_CMDID = 0x01,
+ WMI_DISCONNECT_CMDID = 0x03,
+ WMI_DISCONNECT_STA_CMDID = 0x04,
+ WMI_START_SCAN_CMDID = 0x07,
+ WMI_SET_BSS_FILTER_CMDID = 0x09,
+ WMI_SET_PROBED_SSID_CMDID = 0x0A,
+ WMI_SET_LISTEN_INT_CMDID = 0x0B,
+ WMI_BCON_CTRL_CMDID = 0x0F,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x16,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
+ WMI_PCP_CONF_CMDID = 0x18,
+ WMI_SET_APPIE_CMDID = 0x3F,
+ WMI_SET_WSC_STATUS_CMDID = 0x41,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x42,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
+ WMI_MEM_READ_CMDID = 0x800,
+ WMI_MEM_WR_CMDID = 0x801,
+ WMI_ECHO_CMDID = 0x803,
+ WMI_DEEP_ECHO_CMDID = 0x804,
+ WMI_CONFIG_MAC_CMDID = 0x805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x809,
+ WMI_FS_TUNE_CMDID = 0x80A,
+ WMI_CORR_MEASURE_CMDID = 0x80B,
+ WMI_READ_RSSI_CMDID = 0x80C,
+ WMI_TEMP_SENSE_CMDID = 0x80E,
+ WMI_DC_CALIB_CMDID = 0x80F,
+ WMI_SEND_TONE_CMDID = 0x810,
+ WMI_IQ_TX_CALIB_CMDID = 0x811,
+ WMI_IQ_RX_CALIB_CMDID = 0x812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x813,
+ WMI_SET_WORK_MODE_CMDID = 0x815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
+ WMI_MARLON_R_READ_CMDID = 0x818,
+ WMI_MARLON_R_WRITE_CMDID = 0x819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x81B,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
+ WMI_RF_RX_TEST_CMDID = 0x81E,
+ WMI_CFG_RX_CHAIN_CMDID = 0x820,
+ WMI_VRING_CFG_CMDID = 0x821,
+ WMI_BCAST_VRING_CFG_CMDID = 0x822,
+ WMI_VRING_BA_EN_CMDID = 0x823,
+ WMI_VRING_BA_DIS_CMDID = 0x824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x825,
+ WMI_RCP_DELBA_CMDID = 0x826,
+ WMI_SET_SSID_CMDID = 0x827,
+ WMI_GET_SSID_CMDID = 0x828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
+ WMI_SW_TX_REQ_CMDID = 0x82B,
+ WMI_READ_MAC_RXQ_CMDID = 0x830,
+ WMI_READ_MAC_TXQ_CMDID = 0x831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834,
+ WMI_MLME_PUSH_CMDID = 0x835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x837,
+ WMI_BF_SM_MGMT_CMDID = 0x838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x839,
+ WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
+ WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
+ WMI_SET_SECTORS_CMDID = 0x849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x851,
+ WMI_RS_MGMT_CMDID = 0x852,
+ WMI_RF_MGMT_CMDID = 0x853,
+ WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
+ WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
+ WMI_OTP_READ_CMDID = 0x856,
+ WMI_OTP_WRITE_CMDID = 0x857,
+ WMI_LED_CFG_CMDID = 0x858,
/* Performance monitoring commands */
- WMI_BF_CTRL_CMDID = 0x862,
- WMI_NOTIFY_REQ_CMDID = 0x863,
- WMI_GET_STATUS_CMDID = 0x864,
- WMI_GET_RF_STATUS_CMDID = 0x866,
- WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
- WMI_UNIT_TEST_CMDID = 0x900,
- WMI_HICCUP_CMDID = 0x901,
- WMI_FLASH_READ_CMDID = 0x902,
- WMI_FLASH_WRITE_CMDID = 0x903,
+ WMI_BF_CTRL_CMDID = 0x862,
+ WMI_NOTIFY_REQ_CMDID = 0x863,
+ WMI_GET_STATUS_CMDID = 0x864,
+ WMI_GET_RF_STATUS_CMDID = 0x866,
+ WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
+ WMI_UNIT_TEST_CMDID = 0x900,
+ WMI_HICCUP_CMDID = 0x901,
+ WMI_FLASH_READ_CMDID = 0x902,
+ WMI_FLASH_WRITE_CMDID = 0x903,
/* Power management */
- WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
- WMI_TRAFFIC_RESUME_CMDID = 0x905,
+ WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
+ WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
- WMI_P2P_CFG_CMDID = 0x910,
- WMI_PORT_ALLOCATE_CMDID = 0x911,
- WMI_PORT_DELETE_CMDID = 0x912,
- WMI_POWER_MGMT_CFG_CMDID = 0x913,
- WMI_START_LISTEN_CMDID = 0x914,
- WMI_START_SEARCH_CMDID = 0x915,
- WMI_DISCOVERY_START_CMDID = 0x916,
- WMI_DISCOVERY_STOP_CMDID = 0x917,
- WMI_PCP_START_CMDID = 0x918,
- WMI_PCP_STOP_CMDID = 0x919,
- WMI_GET_PCP_FACTOR_CMDID = 0x91B,
+ WMI_P2P_CFG_CMDID = 0x910,
+ WMI_PORT_ALLOCATE_CMDID = 0x911,
+ WMI_PORT_DELETE_CMDID = 0x912,
+ WMI_POWER_MGMT_CFG_CMDID = 0x913,
+ WMI_START_LISTEN_CMDID = 0x914,
+ WMI_START_SEARCH_CMDID = 0x915,
+ WMI_DISCOVERY_START_CMDID = 0x916,
+ WMI_DISCOVERY_STOP_CMDID = 0x917,
+ WMI_PCP_START_CMDID = 0x918,
+ WMI_PCP_STOP_CMDID = 0x919,
+ WMI_GET_PCP_FACTOR_CMDID = 0x91B,
/* Power Save Configuration Commands */
- WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
/* Not supported yet */
- WMI_PS_DEV_CFG_CMDID = 0x91D,
+ WMI_PS_DEV_CFG_CMDID = 0x91D,
/* Not supported yet */
- WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
+ WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
/* Per MAC Power Save Configuration commands
* Not supported yet
*/
- WMI_PS_MID_CFG_CMDID = 0x91F,
+ WMI_PS_MID_CFG_CMDID = 0x91F,
/* Not supported yet */
- WMI_PS_MID_CFG_READ_CMDID = 0x920,
- WMI_RS_CFG_CMDID = 0x921,
- WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
- WMI_AOA_MEAS_CMDID = 0x923,
- WMI_TOF_SESSION_START_CMDID = 0x991,
- WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
- WMI_TOF_SET_LCR_CMDID = 0x993,
- WMI_TOF_SET_LCI_CMDID = 0x994,
- WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
- WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
- WMI_ABORT_SCAN_CMDID = 0xF007,
- WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
- WMI_GET_PMK_CMDID = 0xF048,
- WMI_SET_PASSPHRASE_CMDID = 0xF049,
- WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
- WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
- WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
- WMI_FW_VER_CMDID = 0xF04E,
- WMI_PMC_CMDID = 0xF04F,
+ WMI_PS_MID_CFG_READ_CMDID = 0x920,
+ WMI_RS_CFG_CMDID = 0x921,
+ WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
+ WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930,
+ WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
+ WMI_TOF_SESSION_START_CMDID = 0x991,
+ WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
+ WMI_TOF_SET_LCR_CMDID = 0x993,
+ WMI_TOF_SET_LCI_CMDID = 0x994,
+ WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
+ WMI_TOF_SET_TX_RX_OFFSET_CMDID = 0x997,
+ WMI_TOF_GET_TX_RX_OFFSET_CMDID = 0x998,
+ WMI_GET_RF_SECTOR_PARAMS_CMDID = 0x9A0,
+ WMI_SET_RF_SECTOR_PARAMS_CMDID = 0x9A1,
+ WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A2,
+ WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A3,
+ WMI_SET_RF_SECTOR_ON_CMDID = 0x9A4,
+ WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5,
+ WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
+ WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
+ WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
+ WMI_ABORT_SCAN_CMDID = 0xF007,
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
+ WMI_GET_PMK_CMDID = 0xF048,
+ WMI_SET_PASSPHRASE_CMDID = 0xF049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
+ WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
+ WMI_FW_VER_CMDID = 0xF04E,
+ WMI_PMC_CMDID = 0xF04F,
};
/* WMI_CONNECT_CMDID */
@@ -879,6 +894,14 @@ struct wmi_aoa_meas_cmd {
__le32 meas_rf_mask;
} __packed;
+/* WMI_SET_MGMT_RETRY_LIMIT_CMDID */
+struct wmi_set_mgmt_retry_limit_cmd {
+ /* MAC retransmit limit for mgmt frames */
+ u8 mgmt_retry_limit;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
enum wmi_tof_burst_duration {
WMI_TOF_BURST_DURATION_250_USEC = 2,
WMI_TOF_BURST_DURATION_500_USEC = 3,
@@ -942,6 +965,15 @@ struct wmi_tof_channel_info_cmd {
__le32 channel_info_report_request;
} __packed;
+/* WMI_TOF_SET_TX_RX_OFFSET_CMDID */
+struct wmi_tof_set_tx_rx_offset_cmd {
+ /* TX delay offset */
+ __le32 tx_offset;
+ /* RX delay offset */
+ __le32 rx_offset;
+ __le32 reserved[2];
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
@@ -1035,12 +1067,24 @@ enum wmi_event_id {
WMI_RS_CFG_DONE_EVENTID = 0x1921,
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
+ WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
WMI_TOF_SET_LCR_EVENTID = 0x1993,
WMI_TOF_SET_LCI_EVENTID = 0x1994,
WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996,
+ WMI_TOF_SET_TX_RX_OFFSET_EVENTID = 0x1997,
+ WMI_TOF_GET_TX_RX_OFFSET_EVENTID = 0x1998,
+ WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A0,
+ WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A1,
+ WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A2,
+ WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A3,
+ WMI_SET_RF_SECTOR_ON_DONE_EVENTID = 0x19A4,
+ WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5,
+ WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
+ WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -1166,6 +1210,7 @@ enum baseband_type {
BASEBAND_SPARROW_M_B0 = 0x05,
BASEBAND_SPARROW_M_C0 = 0x06,
BASEBAND_SPARROW_M_D0 = 0x07,
+ BASEBAND_TALYN_M_A0 = 0x08,
};
/* WMI_GET_BASEBAND_TYPE_EVENTID */
@@ -2070,6 +2115,22 @@ struct wmi_aoa_meas_event {
u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
} __packed;
+/* WMI_SET_MGMT_RETRY_LIMIT_EVENTID */
+struct wmi_set_mgmt_retry_limit_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_MGMT_RETRY_LIMIT_EVENTID */
+struct wmi_get_mgmt_retry_limit_event {
+ /* MAC retransmit limit for mgmt frames */
+ u8 mgmt_retry_limit;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
/* WMI_TOF_GET_CAPABILITIES_EVENTID */
struct wmi_tof_get_capabilities_event {
u8 ftm_capability;
@@ -2184,4 +2245,283 @@ struct wmi_tof_channel_info_event {
u8 report[0];
} __packed;
+/* WMI_TOF_SET_TX_RX_OFFSET_EVENTID */
+struct wmi_tof_set_tx_rx_offset_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TOF_GET_TX_RX_OFFSET_EVENTID */
+struct wmi_tof_get_tx_rx_offset_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved1[3];
+ /* TX delay offset */
+ __le32 tx_offset;
+ /* RX delay offset */
+ __le32 rx_offset;
+ __le32 reserved2[2];
+} __packed;
+
+/* Result status codes for WMI commands */
+enum wmi_rf_sector_status {
+ WMI_RF_SECTOR_STATUS_SUCCESS = 0x00,
+ WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR = 0x01,
+ WMI_RF_SECTOR_STATUS_BUSY_ERROR = 0x02,
+ WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR = 0x03,
+};
+
+/* Types of the RF sector (TX,RX) */
+enum wmi_rf_sector_type {
+ WMI_RF_SECTOR_TYPE_RX = 0x00,
+ WMI_RF_SECTOR_TYPE_TX = 0x01,
+};
+
+/* Content of RF Sector (six 32-bits registers) */
+struct wmi_rf_sector_info {
+ /* Phase values for RF Chains[15-0] (2bits per RF chain) */
+ __le32 psh_hi;
+ /* Phase values for RF Chains[31-16] (2bits per RF chain) */
+ __le32 psh_lo;
+ /* ETYPE Bit0 for all RF chains[31-0] - bit0 of Edge amplifier gain
+ * index
+ */
+ __le32 etype0;
+ /* ETYPE Bit1 for all RF chains[31-0] - bit1 of Edge amplifier gain
+ * index
+ */
+ __le32 etype1;
+ /* ETYPE Bit2 for all RF chains[31-0] - bit2 of Edge amplifier gain
+ * index
+ */
+ __le32 etype2;
+ /* D-Type values (3bits each) for 8 Distribution amplifiers + X16
+ * switch bits
+ */
+ __le32 dtype_swch_off;
+} __packed;
+
+#define WMI_INVALID_RF_SECTOR_INDEX (0xFFFF)
+#define WMI_MAX_RF_MODULES_NUM (8)
+
+/* WMI_GET_RF_SECTOR_PARAMS_CMD */
+struct wmi_get_rf_sector_params_cmd {
+ /* Sector number to be retrieved */
+ __le16 sector_idx;
+ /* enum wmi_rf_sector_type - type of requested RF sector */
+ u8 sector_type;
+ /* bitmask vector specifying destination RF modules */
+ u8 rf_modules_vec;
+} __packed;
+
+/* \WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT */
+struct wmi_get_rf_sector_params_done_event {
+ /* result status of WMI_GET_RF_SECTOR_PARAMS_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align next field to U64 boundary */
+ u8 reserved[7];
+ /* TSF timestamp when RF sectors where retrieved */
+ __le64 tsf;
+ /* Content of RF sector retrieved from each RF module */
+ struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM];
+} __packed;
+
+/* WMI_SET_RF_SECTOR_PARAMS_CMD */
+struct wmi_set_rf_sector_params_cmd {
+ /* Sector number to be retrieved */
+ __le16 sector_idx;
+ /* enum wmi_rf_sector_type - type of requested RF sector */
+ u8 sector_type;
+ /* bitmask vector specifying destination RF modules */
+ u8 rf_modules_vec;
+ /* Content of RF sector to be written to each RF module */
+ struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM];
+} __packed;
+
+/* \WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT */
+struct wmi_set_rf_sector_params_done_event {
+ /* result status of WMI_SET_RF_SECTOR_PARAMS_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+} __packed;
+
+/* WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD - Get RF sector index selected by
+ * TXSS/BRP for communication with specified CID
+ */
+struct wmi_get_selected_rf_sector_index_cmd {
+ /* Connection/Station ID in [0:7] range */
+ u8 cid;
+ /* type of requested RF sector (enum wmi_rf_sector_type) */
+ u8 sector_type;
+ /* align to U32 boundary */
+ u8 reserved[2];
+} __packed;
+
+/* \WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Returns retrieved RF sector
+ * index selected by TXSS/BRP for communication with specified CID
+ */
+struct wmi_get_selected_rf_sector_index_done_event {
+ /* Retrieved sector index selected in TXSS (for TX sector request) or
+ * BRP (for RX sector request)
+ */
+ __le16 sector_idx;
+ /* result status of WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align next field to U64 boundary */
+ u8 reserved[5];
+ /* TSF timestamp when result was retrieved */
+ __le64 tsf;
+} __packed;
+
+/* WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD - Force RF sector index for
+ * communication with specified CID. Assumes that TXSS/BRP is disabled by
+ * other command
+ */
+struct wmi_set_selected_rf_sector_index_cmd {
+ /* Connection/Station ID in [0:7] range */
+ u8 cid;
+ /* type of requested RF sector (enum wmi_rf_sector_type) */
+ u8 sector_type;
+ /* Forced sector index */
+ __le16 sector_idx;
+} __packed;
+
+/* \WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Success/Fail status for
+ * WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD
+ */
+struct wmi_set_selected_rf_sector_index_done_event {
+ /* result status of WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align to U32 boundary */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_RF_SECTOR_ON_CMD - Activates specified sector for specified rf
+ * modules
+ */
+struct wmi_set_rf_sector_on_cmd {
+ /* Sector index to be activated */
+ __le16 sector_idx;
+ /* type of requested RF sector (enum wmi_rf_sector_type) */
+ u8 sector_type;
+ /* bitmask vector specifying destination RF modules */
+ u8 rf_modules_vec;
+} __packed;
+
+/* \WMI_SET_RF_SECTOR_ON_DONE_EVENT - Success/Fail status for
+ * WMI_SET_RF_SECTOR_ON_CMD
+ */
+struct wmi_set_rf_sector_on_done_event {
+ /* result status of WMI_SET_RF_SECTOR_ON_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align to U32 boundary */
+ u8 reserved[3];
+} __packed;
+
+enum wmi_sector_sweep_type {
+ WMI_SECTOR_SWEEP_TYPE_TXSS = 0x00,
+ WMI_SECTOR_SWEEP_TYPE_BCON = 0x01,
+ WMI_SECTOR_SWEEP_TYPE_TXSS_AND_BCON = 0x02,
+ WMI_SECTOR_SWEEP_TYPE_NUM = 0x03,
+};
+
+/* WMI_PRIO_TX_SECTORS_ORDER_CMDID
+ *
+ * Set the order of TX sectors in TXSS and/or Beacon(AP).
+ *
+ * Returned event:
+ * - WMI_PRIO_TX_SECTORS_ORDER_EVENTID
+ */
+struct wmi_prio_tx_sectors_order_cmd {
+ /* tx sectors order to be applied, 0xFF for end of array */
+ u8 tx_sectors_priority_array[MAX_NUM_OF_SECTORS];
+ /* enum wmi_sector_sweep_type, TXSS and/or Beacon */
+ u8 sector_sweep_type;
+ /* needed only for TXSS configuration */
+ u8 cid;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* completion status codes */
+enum wmi_prio_tx_sectors_cmd_status {
+ WMI_PRIO_TX_SECT_CMD_STATUS_SUCCESS = 0x00,
+ WMI_PRIO_TX_SECT_CMD_STATUS_BAD_PARAM = 0x01,
+ /* other error */
+ WMI_PRIO_TX_SECT_CMD_STATUS_ERROR = 0x02,
+};
+
+/* WMI_PRIO_TX_SECTORS_ORDER_EVENTID */
+struct wmi_prio_tx_sectors_order_event {
+ /* enum wmi_prio_tx_sectors_cmd_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+struct wmi_prio_tx_sectors_num_cmd {
+ /* [0-128], 0 = No changes */
+ u8 beacon_number_of_sectors;
+ /* [0-128], 0 = No changes */
+ u8 txss_number_of_sectors;
+ /* [0-8] needed only for TXSS configuration */
+ u8 cid;
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_NUMBER_CMDID
+ *
+ * Set the number of active sectors in TXSS and/or Beacon.
+ *
+ * Returned event:
+ * - WMI_PRIO_TX_SECTORS_NUMBER_EVENTID
+ */
+struct wmi_prio_tx_sectors_number_cmd {
+ struct wmi_prio_tx_sectors_num_cmd active_sectors_num;
+ /* alignment to 32b */
+ u8 reserved;
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_NUMBER_EVENTID */
+struct wmi_prio_tx_sectors_number_event {
+ /* enum wmi_prio_tx_sectors_cmd_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID
+ *
+ * Set default sectors order and number (hard coded in board file)
+ * in TXSS and/or Beacon.
+ *
+ * Returned event:
+ * - WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID
+ */
+struct wmi_prio_tx_sectors_set_default_cfg_cmd {
+ /* enum wmi_sector_sweep_type, TXSS and/or Beacon */
+ u8 sector_sweep_type;
+ /* needed only for TXSS configuration */
+ u8 cid;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID */
+struct wmi_prio_tx_sectors_set_default_cfg_event {
+ /* enum wmi_prio_tx_sectors_cmd_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index bf2e9a083c0c..eb92d5ab7a27 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1295,14 +1295,6 @@ static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev)
return &priv->wstats;
}
-static int atmel_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > 2312))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
static int atmel_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
@@ -1506,7 +1498,6 @@ static const struct file_operations atmel_proc_fops = {
static const struct net_device_ops atmel_netdev_ops = {
.ndo_open = atmel_open,
.ndo_stop = atmel_close,
- .ndo_change_mtu = atmel_change_mtu,
.ndo_set_mac_address = atmel_set_mac_address,
.ndo_start_xmit = start_tx,
.ndo_do_ioctl = atmel_ioctl,
@@ -1600,6 +1591,10 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
dev->irq = irq;
dev->base_addr = port;
+ /* MTU range: 68 - 2312 */
+ dev->min_mtu = 68;
+ dev->max_mtu = MAX_WIRELESS_BODY - ETH_FCS_LEN;
+
SET_NETDEV_DEV(dev, sys_dev);
if ((rc = request_irq(dev->irq, service_interrupt, IRQF_SHARED, dev->name, dev))) {
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 6e5d9095b195..52f3541ecbcf 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5591,7 +5591,9 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_WIRELESS_WDS
BIT(NL80211_IFTYPE_WDS) |
+#endif
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 83770d2ea057..e97ab2b91663 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3838,7 +3838,9 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_WIRELESS_WDS
BIT(NL80211_IFTYPE_WDS) |
+#endif
BIT(NL80211_IFTYPE_ADHOC);
hw->queues = 1; /* FIXME: hardware has more queues */
hw->max_rates = 2;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index 9e4b505ca593..0383ba559edc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -19,8 +19,6 @@ ccflags-y += \
-Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \
-Idrivers/net/wireless/broadcom/brcm80211/include
-ccflags-y += -D__CHECK_ENDIAN__
-
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
cfg80211.o \
@@ -35,7 +33,8 @@ brcmfmac-objs += \
firmware.o \
feature.o \
btcoex.o \
- vendor.o
+ vendor.o \
+ pno.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
bcdc.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 038a960c5104..384b1873e7e3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -326,6 +326,17 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return 0;
}
+static int brcmf_proto_bcdc_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *skb)
+{
+ struct brcmf_if *ifp = brcmf_get_ifp(drvr, ifidx);
+
+ if (!brcmf_fws_queue_skbs(drvr->fws))
+ return brcmf_proto_txdata(drvr, ifidx, 0, skb);
+
+ return brcmf_fws_process_skb(ifp, skb);
+}
+
static int
brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
struct sk_buff *pktbuf)
@@ -375,6 +386,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->proto->hdrpull = brcmf_proto_bcdc_hdrpull;
drvr->proto->query_dcmd = brcmf_proto_bcdc_query_dcmd;
drvr->proto->set_dcmd = brcmf_proto_bcdc_set_dcmd;
+ drvr->proto->tx_queue_data = brcmf_proto_bcdc_tx_queue_data;
drvr->proto->txdata = brcmf_proto_bcdc_txdata;
drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode;
drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 2b246545647a..e21f7600122b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -22,10 +22,12 @@
/* IDs of the 6 default common rings of msgbuf protocol */
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT 0
#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT 1
+#define BRCMF_H2D_MSGRING_FLOWRING_IDSTART 2
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE 2
#define BRCMF_D2H_MSGRING_TX_COMPLETE 3
#define BRCMF_D2H_MSGRING_RX_COMPLETE 4
+
#define BRCMF_NROF_H2D_COMMON_MSGRINGS 2
#define BRCMF_NROF_D2H_COMMON_MSGRINGS 3
#define BRCMF_NROF_COMMON_MSGRINGS (BRCMF_NROF_H2D_COMMON_MSGRINGS + \
@@ -95,14 +97,18 @@ struct brcmf_bus_ops {
* @flowrings: commonrings which are dynamically created and destroyed for data.
* @rx_dataoffset: if set then all rx data has this this offset.
* @max_rxbufpost: maximum number of buffers to post for rx.
- * @nrof_flowrings: number of flowrings.
+ * @max_flowrings: maximum number of tx flow rings supported.
+ * @max_submissionrings: maximum number of submission rings(h2d) supported.
+ * @max_completionrings: maximum number of completion rings(d2h) supported.
*/
struct brcmf_bus_msgbuf {
struct brcmf_commonring *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
struct brcmf_commonring **flowrings;
u32 rx_dataoffset;
u32 max_rxbufpost;
- u32 nrof_flowrings;
+ u16 max_flowrings;
+ u16 max_submissionrings;
+ u16 max_completionrings;
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 78d9966a3957..7ffc4aba5bab 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -32,6 +32,7 @@
#include "fwil_types.h"
#include "p2p.h"
#include "btcoex.h"
+#include "pno.h"
#include "cfg80211.h"
#include "feature.h"
#include "fwil.h"
@@ -41,16 +42,6 @@
#include "common.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
-#define BRCMF_PNO_VERSION 2
-#define BRCMF_PNO_TIME 30
-#define BRCMF_PNO_REPEAT 4
-#define BRCMF_PNO_FREQ_EXPO_MAX 3
-#define BRCMF_PNO_MAX_PFN_COUNT 16
-#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
-#define BRCMF_PNO_HIDDEN_BIT 2
-#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
-#define BRCMF_PNO_SCAN_COMPLETE 1
-#define BRCMF_PNO_SCAN_INCOMPLETE 0
#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
#define WPA_OUI_TYPE 1
@@ -414,23 +405,24 @@ static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_vif *vif,
enum nl80211_iftype new_type)
{
- int iftype_num[NUM_NL80211_IFTYPES];
struct brcmf_cfg80211_vif *pos;
bool check_combos = false;
int ret = 0;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
- memset(&iftype_num[0], 0, sizeof(iftype_num));
list_for_each_entry(pos, &cfg->vif_list, list)
if (pos == vif) {
- iftype_num[new_type]++;
+ params.iftype_num[new_type]++;
} else {
/* concurrent interfaces so need check combinations */
check_combos = true;
- iftype_num[pos->wdev.iftype]++;
+ params.iftype_num[pos->wdev.iftype]++;
}
if (check_combos)
- ret = cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+ ret = cfg80211_check_combinations(cfg->wiphy, &params);
return ret;
}
@@ -438,15 +430,16 @@ static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg,
static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype new_type)
{
- int iftype_num[NUM_NL80211_IFTYPES];
struct brcmf_cfg80211_vif *pos;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
- memset(&iftype_num[0], 0, sizeof(iftype_num));
list_for_each_entry(pos, &cfg->vif_list, list)
- iftype_num[pos->wdev.iftype]++;
+ params.iftype_num[pos->wdev.iftype]++;
- iftype_num[new_type]++;
- return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+ params.iftype_num[new_type]++;
+ return cfg80211_check_combinations(cfg->wiphy, &params);
}
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
@@ -766,12 +759,12 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
brcmf_scan_config_mpc(ifp, 1);
/*
- * e-scan can be initiated by scheduled scan
+ * e-scan can be initiated internally
* which takes precedence.
*/
- if (cfg->sched_escan) {
+ if (cfg->internal_escan) {
brcmf_dbg(SCAN, "scheduled scan completed\n");
- cfg->sched_escan = false;
+ cfg->internal_escan = false;
if (!aborted)
cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
} else if (scan_request) {
@@ -1089,9 +1082,9 @@ exit:
}
static s32
-brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
- struct brcmf_if *ifp, struct cfg80211_scan_request *request)
+brcmf_do_escan(struct brcmf_if *ifp, struct cfg80211_scan_request *request)
{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 err;
u32 passive_scan;
struct brcmf_scan_results *results;
@@ -1099,7 +1092,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
brcmf_dbg(SCAN, "Enter\n");
escan->ifp = ifp;
- escan->wiphy = wiphy;
+ escan->wiphy = cfg->wiphy;
escan->escan_state = WL_ESCAN_STATE_SCANNING;
passive_scan = cfg->active_scan ? 0 : 1;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
@@ -1179,7 +1172,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
if (err)
goto scan_out;
- err = brcmf_do_escan(cfg, wiphy, vif->ifp, request);
+ err = brcmf_do_escan(vif->ifp, request);
if (err)
goto scan_out;
} else {
@@ -3022,7 +3015,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
struct escan_info *escan = &cfg->escan_info;
set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
- if (cfg->scan_request) {
+ if (cfg->internal_escan || cfg->scan_request) {
escan->escan_state = WL_ESCAN_STATE_IDLE;
brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
}
@@ -3045,7 +3038,7 @@ static void brcmf_escan_timeout(unsigned long data)
struct brcmf_cfg80211_info *cfg =
(struct brcmf_cfg80211_info *)data;
- if (cfg->scan_request) {
+ if (cfg->internal_escan || cfg->scan_request) {
brcmf_err("timer expired\n");
schedule_work(&cfg->escan_timeout_work);
}
@@ -3128,7 +3121,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
goto exit;
- if (!cfg->scan_request) {
+ if (!cfg->internal_escan && !cfg->scan_request) {
brcmf_dbg(SCAN, "result without cfg80211 request\n");
goto exit;
}
@@ -3174,7 +3167,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
goto exit;
- if (cfg->scan_request) {
+ if (cfg->internal_escan || cfg->scan_request) {
brcmf_inform_bss(cfg);
aborted = status != BRCMF_E_STATUS_SUCCESS;
brcmf_notify_escan_complete(cfg, ifp, aborted, false);
@@ -3199,6 +3192,95 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
brcmf_cfg80211_escan_timeout_worker);
}
+static struct cfg80211_scan_request *
+brcmf_alloc_internal_escan_request(struct wiphy *wiphy, u32 n_netinfo) {
+ struct cfg80211_scan_request *req;
+ size_t req_size;
+
+ req_size = sizeof(*req) +
+ n_netinfo * sizeof(req->channels[0]) +
+ n_netinfo * sizeof(*req->ssids);
+
+ req = kzalloc(req_size, GFP_KERNEL);
+ if (req) {
+ req->wiphy = wiphy;
+ req->ssids = (void *)(&req->channels[0]) +
+ n_netinfo * sizeof(req->channels[0]);
+ }
+ return req;
+}
+
+static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
+ u8 *ssid, u8 ssid_len, u8 channel)
+{
+ struct ieee80211_channel *chan;
+ enum nl80211_band band;
+ int freq;
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = NL80211_BAND_2GHZ;
+ else
+ band = NL80211_BAND_5GHZ;
+
+ freq = ieee80211_channel_to_frequency(channel, band);
+ if (!freq)
+ return -EINVAL;
+
+ chan = ieee80211_get_channel(req->wiphy, freq);
+ if (!chan)
+ return -EINVAL;
+
+ req->channels[req->n_channels++] = chan;
+ memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
+ req->ssids[req->n_ssids++].ssid_len = ssid_len;
+
+ return 0;
+}
+
+static int brcmf_start_internal_escan(struct brcmf_if *ifp,
+ struct cfg80211_scan_request *request)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ int err;
+
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ /* Abort any on-going scan */
+ brcmf_abort_scanning(cfg);
+ }
+
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+ cfg->escan_info.run = brcmf_run_escan;
+ err = brcmf_do_escan(ifp, request);
+ if (err) {
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+ return err;
+ }
+ cfg->internal_escan = true;
+ return 0;
+}
+
+static struct brcmf_pno_net_info_le *
+brcmf_get_netinfo_array(struct brcmf_pno_scanresults_le *pfn_v1)
+{
+ struct brcmf_pno_scanresults_v2_le *pfn_v2;
+ struct brcmf_pno_net_info_le *netinfo;
+
+ switch (pfn_v1->version) {
+ default:
+ WARN_ON(1);
+ /* fall-thru */
+ case cpu_to_le32(1):
+ netinfo = (struct brcmf_pno_net_info_le *)(pfn_v1 + 1);
+ break;
+ case cpu_to_le32(2):
+ pfn_v2 = (struct brcmf_pno_scanresults_v2_le *)pfn_v1;
+ netinfo = (struct brcmf_pno_net_info_le *)(pfn_v2 + 1);
+ break;
+ }
+
+ return netinfo;
+}
+
/* PFN result doesn't have all the info which are required by the supplicant
* (For e.g IEs) Do a target Escan so that sched scan results are reported
* via wl_inform_single_bss in the required format. Escan does require the
@@ -3212,12 +3294,8 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
struct cfg80211_scan_request *request = NULL;
- struct cfg80211_ssid *ssid = NULL;
- struct ieee80211_channel *channel = NULL;
struct wiphy *wiphy = cfg_to_wiphy(cfg);
- int err = 0;
- int channel_req = 0;
- int band = 0;
+ int i, err = 0;
struct brcmf_pno_scanresults_le *pfn_result;
u32 result_count;
u32 status;
@@ -3243,254 +3321,86 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
*/
WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count);
- if (result_count > 0) {
- int i;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
- channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
- if (!request || !ssid || !channel) {
- err = -ENOMEM;
- goto out_err;
- }
-
- request->wiphy = wiphy;
- data += sizeof(struct brcmf_pno_scanresults_le);
- netinfo_start = (struct brcmf_pno_net_info_le *)data;
-
- for (i = 0; i < result_count; i++) {
- netinfo = &netinfo_start[i];
- if (!netinfo) {
- brcmf_err("Invalid netinfo ptr. index: %d\n",
- i);
- err = -EINVAL;
- goto out_err;
- }
-
- brcmf_dbg(SCAN, "SSID:%s Channel:%d\n",
- netinfo->SSID, netinfo->channel);
- memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
- ssid[i].ssid_len = netinfo->SSID_len;
- request->n_ssids++;
-
- channel_req = netinfo->channel;
- if (channel_req <= CH_MAX_2G_CHANNEL)
- band = NL80211_BAND_2GHZ;
- else
- band = NL80211_BAND_5GHZ;
- channel[i].center_freq =
- ieee80211_channel_to_frequency(channel_req,
- band);
- channel[i].band = band;
- channel[i].flags |= IEEE80211_CHAN_NO_HT40;
- request->channels[i] = &channel[i];
- request->n_channels++;
- }
-
- /* assign parsed ssid array */
- if (request->n_ssids)
- request->ssids = &ssid[0];
-
- if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
- /* Abort any on-going scan */
- brcmf_abort_scanning(cfg);
- }
-
- set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
- cfg->escan_info.run = brcmf_run_escan;
- err = brcmf_do_escan(cfg, wiphy, ifp, request);
- if (err) {
- clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
- goto out_err;
- }
- cfg->sched_escan = true;
- cfg->scan_request = request;
- } else {
+ if (!result_count) {
brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
goto out_err;
}
-
- kfree(ssid);
- kfree(channel);
- kfree(request);
- return 0;
-
-out_err:
- kfree(ssid);
- kfree(channel);
- kfree(request);
- cfg80211_sched_scan_stopped(wiphy);
- return err;
-}
-
-static int brcmf_dev_pno_clean(struct net_device *ndev)
-{
- int ret;
-
- /* Disable pfn */
- ret = brcmf_fil_iovar_int_set(netdev_priv(ndev), "pfn", 0);
- if (ret == 0) {
- /* clear pfn */
- ret = brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfnclear",
- NULL, 0);
- }
- if (ret < 0)
- brcmf_err("failed code %d\n", ret);
-
- return ret;
-}
-
-static int brcmf_dev_pno_config(struct brcmf_if *ifp,
- struct cfg80211_sched_scan_request *request)
-{
- struct brcmf_pno_param_le pfn_param;
- struct brcmf_pno_macaddr_le pfn_mac;
- s32 err;
- u8 *mac_mask;
- int i;
-
- memset(&pfn_param, 0, sizeof(pfn_param));
- pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
-
- /* set extra pno params */
- pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
- pfn_param.repeat = BRCMF_PNO_REPEAT;
- pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
-
- /* set up pno scan fr */
- pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
-
- err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
- sizeof(pfn_param));
- if (err) {
- brcmf_err("pfn_set failed, err=%d\n", err);
- return err;
+ request = brcmf_alloc_internal_escan_request(wiphy,
+ result_count);
+ if (!request) {
+ err = -ENOMEM;
+ goto out_err;
}
- /* Find out if mac randomization should be turned on */
- if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR))
- return 0;
+ data += sizeof(struct brcmf_pno_scanresults_le);
+ netinfo_start = brcmf_get_netinfo_array(pfn_result);
- pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
- pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
+ for (i = 0; i < result_count; i++) {
+ netinfo = &netinfo_start[i];
+ if (!netinfo) {
+ brcmf_err("Invalid netinfo ptr. index: %d\n",
+ i);
+ err = -EINVAL;
+ goto out_err;
+ }
- memcpy(pfn_mac.mac, request->mac_addr, ETH_ALEN);
- mac_mask = request->mac_addr_mask;
- for (i = 0; i < ETH_ALEN; i++) {
- pfn_mac.mac[i] &= mac_mask[i];
- pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
+ brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
+ netinfo->SSID, netinfo->channel);
+ err = brcmf_internal_escan_add_info(request,
+ netinfo->SSID,
+ netinfo->SSID_len,
+ netinfo->channel);
+ if (err)
+ goto out_err;
}
- /* Clear multi bit */
- pfn_mac.mac[0] &= 0xFE;
- /* Set locally administered */
- pfn_mac.mac[0] |= 0x02;
- err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
- sizeof(pfn_mac));
- if (err)
- brcmf_err("pfn_macaddr failed, err=%d\n", err);
+ err = brcmf_start_internal_escan(ifp, request);
+ if (!err)
+ goto free_req;
+out_err:
+ cfg80211_sched_scan_stopped(wiphy);
+free_req:
+ kfree(request);
return err;
}
static int
brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *ndev,
- struct cfg80211_sched_scan_request *request)
+ struct cfg80211_sched_scan_request *req)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
- struct brcmf_pno_net_param_le pfn;
- int i;
- int ret = 0;
brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
- request->n_match_sets, request->n_ssids);
- if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
- brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
- return -EAGAIN;
- }
+ req->n_match_sets, req->n_ssids);
+
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
brcmf_err("Scanning suppressed: status (%lu)\n",
cfg->scan_status);
return -EAGAIN;
}
- if (!request->n_ssids || !request->n_match_sets) {
- brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
- request->n_ssids);
+ if (req->n_match_sets <= 0) {
+ brcmf_dbg(SCAN, "invalid number of matchsets specified: %d\n",
+ req->n_match_sets);
return -EINVAL;
}
- if (request->n_ssids > 0) {
- for (i = 0; i < request->n_ssids; i++) {
- /* Active scan req for ssids */
- brcmf_dbg(SCAN, ">>> Active scan req for ssid (%s)\n",
- request->ssids[i].ssid);
-
- /* match_set ssids is a supert set of n_ssid list,
- * so we need not add these set separately.
- */
- }
- }
-
- if (request->n_match_sets > 0) {
- /* clean up everything */
- ret = brcmf_dev_pno_clean(ndev);
- if (ret < 0) {
- brcmf_err("failed error=%d\n", ret);
- return ret;
- }
-
- /* configure pno */
- if (brcmf_dev_pno_config(ifp, request))
- return -EINVAL;
-
- /* configure each match set */
- for (i = 0; i < request->n_match_sets; i++) {
- struct cfg80211_ssid *ssid;
- u32 ssid_len;
-
- ssid = &request->match_sets[i].ssid;
- ssid_len = ssid->ssid_len;
-
- if (!ssid_len) {
- brcmf_err("skip broadcast ssid\n");
- continue;
- }
- pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
- pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
- pfn.wsec = cpu_to_le32(0);
- pfn.infra = cpu_to_le32(1);
- pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
- pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
- memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
- ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
- sizeof(pfn));
- brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
- ret == 0 ? "set" : "failed", ssid->ssid);
- }
- /* Enable the PNO */
- if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
- brcmf_err("PNO enable failed!! ret=%d\n", ret);
- return -EINVAL;
- }
- } else {
- return -EINVAL;
- }
-
- return 0;
+ return brcmf_pno_start_sched_scan(ifp, req);
}
static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
struct net_device *ndev)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
brcmf_dbg(SCAN, "enter\n");
- brcmf_dev_pno_clean(ndev);
- if (cfg->sched_escan)
- brcmf_notify_escan_complete(cfg, netdev_priv(ndev), true, true);
+ brcmf_pno_clean(ifp);
+ if (cfg->internal_escan)
+ brcmf_notify_escan_complete(cfg, ifp, true, true);
return 0;
}
@@ -4578,8 +4488,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_configure_opensecurity(ifp);
}
- brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
-
/* Parameters shared by all radio interfaces */
if (!mbss) {
if ((supports_11d) && (is_11d != ifp->vif->is_11d)) {
@@ -4708,6 +4616,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
WARN_ON(1);
}
+ brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
brcmf_net_setcarrier(ifp, true);
@@ -4764,6 +4673,8 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0)
brcmf_err("BRCMF_C_UP error %d\n", err);
+
+ brcmf_vif_clear_mgmt_ies(ifp->vif);
} else {
bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx);
bss_enable.enable = cpu_to_le32(0);
@@ -5506,7 +5417,8 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
u32 reason = e->reason;
struct station_info sinfo;
- brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
+ brcmf_dbg(CONN, "event %s (%u), reason %d\n",
+ brcmf_fweh_event_name(event), event, reason);
if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS &&
ndev != cfg_to_ndev(cfg)) {
brcmf_dbg(CONN, "AP mode link down\n");
@@ -6424,6 +6336,7 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+ wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}
@@ -6955,7 +6868,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
err = brcmf_p2p_attach(cfg, p2pdev_forced);
if (err) {
- brcmf_err("P2P initilisation failed (%d)\n", err);
+ brcmf_err("P2P initialisation failed (%d)\n", err);
goto wiphy_unreg_out;
}
err = brcmf_btcoex_attach(cfg);
@@ -6980,7 +6893,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
err = brcmf_fweh_activate_events(ifp);
if (err) {
brcmf_err("FWEH activation failed (%d)\n", err);
- goto wiphy_unreg_out;
+ goto detach;
}
/* Fill in some of the advertised nl80211 supported features */
@@ -6995,6 +6908,9 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
return cfg;
+detach:
+ brcmf_btcoex_detach(cfg);
+ brcmf_p2p_detach(&cfg->p2p);
wiphy_unreg_out:
wiphy_unregister(cfg->wiphy);
priv_out:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 8889832c17e0..0c9a7081fca9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -271,7 +271,7 @@ struct brcmf_cfg80211_wowl {
* @pub: common driver information.
* @channel: current channel.
* @active_scan: current scan mode.
- * @sched_escan: e-scan for scheduled scan support running.
+ * @internal_escan: indicates internally initiated e-scan is running.
* @ibss_starter: indicates this sta is ibss starter.
* @pwr_save: indicate whether dongle to support power save mode.
* @dongle_up: indicate whether dongle up or not.
@@ -303,7 +303,7 @@ struct brcmf_cfg80211_info {
struct brcmf_pub *pub;
u32 channel;
bool active_scan;
- bool sched_escan;
+ bool internal_escan;
bool ibss_starter;
bool pwr_save;
bool dongle_up;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 5eaac13e2317..9e6f60a0ec3e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -239,7 +239,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
if (eh->h_proto == htons(ETH_P_PAE))
atomic_inc(&ifp->pend_8021x_cnt);
- ret = brcmf_fws_process_skb(ifp, skb);
+ /* determine the priority */
+ if ((skb->priority == 0) || (skb->priority > 7))
+ skb->priority = cfg80211_classify8021d(skb, NULL);
+
+ ret = brcmf_proto_tx_queue_data(drvr, ifp->ifidx, skb);
+ if (ret < 0)
+ brcmf_txfinalize(ifp, skb, false);
done:
if (ret) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 79c081fd560f..c79306b57532 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -69,7 +69,7 @@ static struct brcmf_fweh_event_name fweh_event_names[] = {
*
* @code: code to lookup.
*/
-static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
{
int i;
for (i = 0; i < ARRAY_SIZE(fweh_event_names); i++) {
@@ -79,7 +79,7 @@ static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
return "unknown";
}
#else
-static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
{
return "nodebug";
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 26ff5a9648f3..5fba4b49f3b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -287,6 +287,8 @@ struct brcmf_fweh_info {
void *data);
};
+const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code);
+
void brcmf_fweh_attach(struct brcmf_pub *drvr);
void brcmf_fweh_detach(struct brcmf_pub *drvr);
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index a4118c0ef6ca..9a1eb5ab6c4b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -131,6 +131,7 @@
#define BRCMF_TXBF_MU_BFR_CAP BIT(1)
#define BRCMF_MAXPMKID 16 /* max # PMKID cache entries */
+#define BRCMF_NUMCHANNELS 64
#define BRCMF_PFN_MACADDR_CFG_VER 1
#define BRCMF_PFN_MAC_OUI_ONLY BIT(0)
@@ -719,6 +720,21 @@ struct brcmf_pno_param_le {
};
/**
+ * struct brcmf_pno_config_le - PNO channel configuration.
+ *
+ * @reporttype: determines what is reported.
+ * @channel_num: number of channels specified in @channel_list.
+ * @channel_list: channels to use in PNO scan.
+ * @flags: reserved.
+ */
+struct brcmf_pno_config_le {
+ __le32 reporttype;
+ __le32 channel_num;
+ __le16 channel_list[BRCMF_NUMCHANNELS];
+ __le32 flags;
+};
+
+/**
* struct brcmf_pno_net_param_le - scan parameters per preferred network.
*
* @ssid: ssid name and its length.
@@ -769,6 +785,13 @@ struct brcmf_pno_scanresults_le {
__le32 count;
};
+struct brcmf_pno_scanresults_v2_le {
+ __le32 version;
+ __le32 status;
+ __le32 count;
+ __le32 scan_ch_bucket;
+};
+
/**
* struct brcmf_pno_macaddr_le - to configure PNO macaddr randomization.
*
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index a190f535efc9..5f1a5929cb30 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2100,16 +2100,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
int rc = 0;
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
- /* determine the priority */
- if ((skb->priority == 0) || (skb->priority > 7))
- skb->priority = cfg80211_classify8021d(skb, NULL);
-
- if (fws->avoid_queueing) {
- rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
- if (rc < 0)
- brcmf_txfinalize(ifp, skb, false);
- return rc;
- }
/* set control buffer information */
skcb->if_flags = 0;
@@ -2442,6 +2432,11 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr)
kfree(fws);
}
+bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws)
+{
+ return !fws->avoid_queueing;
+}
+
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
{
if (!fws->creditmap_received)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index ef0ad8597c8a..96df66073b2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -20,6 +20,7 @@
int brcmf_fws_init(struct brcmf_pub *drvr);
void brcmf_fws_deinit(struct brcmf_pub *drvr);
+bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);
int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 2b9a2bc429d6..d2c834c3b2fc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -87,11 +87,6 @@ struct msgbuf_common_hdr {
__le32 request_id;
};
-struct msgbuf_buf_addr {
- __le32 low_addr;
- __le32 high_addr;
-};
-
struct msgbuf_ioctl_req_hdr {
struct msgbuf_common_hdr msg;
__le32 cmd;
@@ -227,7 +222,10 @@ struct brcmf_msgbuf {
struct brcmf_commonring **commonrings;
struct brcmf_commonring **flowrings;
dma_addr_t *flowring_dma_handle;
- u16 nrof_flowrings;
+
+ u16 max_flowrings;
+ u16 max_submissionrings;
+ u16 max_completionrings;
u16 rx_dataoffset;
u32 max_rxbufpost;
@@ -610,7 +608,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
create->msg.request_id = 0;
create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
create->flow_ring_id = cpu_to_le16(flowid +
- BRCMF_NROF_H2D_COMMON_MSGRINGS);
+ BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
memcpy(create->sa, work->sa, ETH_ALEN);
memcpy(create->da, work->da, ETH_ALEN);
address = (u64)msgbuf->flowring_dma_handle[flowid];
@@ -760,7 +758,7 @@ static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
u32 flowid;
msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
- for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
+ for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) {
clear_bit(flowid, msgbuf->flow_map);
brcmf_msgbuf_txflow(msgbuf, flowid);
}
@@ -782,8 +780,8 @@ static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
}
-static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
- u8 offset, struct sk_buff *skb)
+static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *skb)
{
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct brcmf_flowring *flow = msgbuf->flow;
@@ -866,7 +864,7 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
tx_status = (struct msgbuf_tx_status *)buf;
idx = le32_to_cpu(tx_status->msg.request_id);
flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
- flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->tx_pktids, idx);
if (!skb)
@@ -1174,7 +1172,7 @@ brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
- flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
status = le16_to_cpu(flowring_create_resp->compl_hdr.status);
if (status) {
@@ -1202,7 +1200,7 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
- flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
status = le16_to_cpu(flowring_delete_resp->compl_hdr.status);
if (status) {
@@ -1307,7 +1305,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
brcmf_msgbuf_process_rx(msgbuf, buf);
for_each_set_bit(flowid, msgbuf->txstatus_done_map,
- msgbuf->nrof_flowrings) {
+ msgbuf->max_flowrings) {
clear_bit(flowid, msgbuf->txstatus_done_map);
commonring = msgbuf->flowrings[flowid];
qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
@@ -1349,7 +1347,7 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
delete->msg.request_id = 0;
delete->flow_ring_id = cpu_to_le16(flowid +
- BRCMF_NROF_H2D_COMMON_MSGRINGS);
+ BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
delete->reason = 0;
brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
@@ -1427,10 +1425,10 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
if_msgbuf = drvr->bus_if->msgbuf;
- if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
+ if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
brcmf_err("driver not configured for this many flowrings %d\n",
- if_msgbuf->nrof_flowrings);
- if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
+ if_msgbuf->max_flowrings);
+ if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
}
msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
@@ -1443,7 +1441,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
goto fail;
}
INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
- count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
+ count = BITS_TO_LONGS(if_msgbuf->max_flowrings);
count = count * sizeof(unsigned long);
msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
if (!msgbuf->flow_map)
@@ -1467,7 +1465,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
- drvr->proto->txdata = brcmf_msgbuf_txdata;
+ drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data;
drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
@@ -1479,8 +1477,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
msgbuf->commonrings =
(struct brcmf_commonring **)if_msgbuf->commonrings;
msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
- msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
- msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
+ msgbuf->max_flowrings = if_msgbuf->max_flowrings;
+ msgbuf->flowring_dma_handle = kzalloc(msgbuf->max_flowrings *
sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
if (!msgbuf->flowring_dma_handle)
goto fail;
@@ -1501,7 +1499,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
goto fail;
msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
- if_msgbuf->nrof_flowrings);
+ if_msgbuf->max_flowrings);
if (!msgbuf->flow)
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index ee6906a3c3f6..f93ba6be1ef8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -31,6 +31,10 @@
#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 32
#define BRCMF_H2D_TXFLOWRING_ITEMSIZE 48
+struct msgbuf_buf_addr {
+ __le32 low_addr;
+ __le32 high_addr;
+};
int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 3deba90c7eb5..048027f2085b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -135,7 +135,7 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_PCIE_MB_INT_D2H3_DB1)
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
-#define BRCMF_PCIE_MAX_SHARED_VERSION 5
+#define BRCMF_PCIE_MAX_SHARED_VERSION 6
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
@@ -166,17 +166,6 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_RING_MEM_SZ 16
#define BRCMF_RING_STATE_SZ 8
-#define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
-#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
-#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
-#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
-#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
-#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
-#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
-#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
-#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
-#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
-
#define BRCMF_DEF_MAX_RXBUFPOST 255
#define BRCMF_CONSOLE_BUFADDR_OFFSET 8
@@ -231,7 +220,9 @@ struct brcmf_pcie_shared_info {
struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
struct brcmf_pcie_ringbuf *flowrings;
u16 max_rxbufpost;
- u32 nrof_flowrings;
+ u16 max_flowrings;
+ u16 max_submissionrings;
+ u16 max_completionrings;
u32 rx_dataoffset;
u32 htod_mb_data_addr;
u32 dtoh_mb_data_addr;
@@ -241,6 +232,7 @@ struct brcmf_pcie_shared_info {
dma_addr_t scratch_dmahandle;
void *ringupd;
dma_addr_t ringupd_dmahandle;
+ u8 version;
};
struct brcmf_pcie_core_info {
@@ -284,6 +276,36 @@ struct brcmf_pcie_ringbuf {
u8 id;
};
+/**
+ * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
+ *
+ * @ringmem: dongle memory pointer to ring memory location
+ * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
+ * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
+ * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
+ * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
+ * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
+ * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
+ * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
+ * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
+ * @max_flowrings: maximum number of tx flow rings supported.
+ * @max_submissionrings: maximum number of submission rings(h2d) supported.
+ * @max_completionrings: maximum number of completion rings(d2h) supported.
+ */
+struct brcmf_pcie_dhi_ringinfo {
+ __le32 ringmem;
+ __le32 h2d_w_idx_ptr;
+ __le32 h2d_r_idx_ptr;
+ __le32 d2h_w_idx_ptr;
+ __le32 d2h_r_idx_ptr;
+ struct msgbuf_buf_addr h2d_w_idx_hostaddr;
+ struct msgbuf_buf_addr h2d_r_idx_hostaddr;
+ struct msgbuf_buf_addr d2h_w_idx_hostaddr;
+ struct msgbuf_buf_addr d2h_r_idx_hostaddr;
+ __le16 max_flowrings;
+ __le16 max_submissionrings;
+ __le16 max_completionrings;
+};
static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
@@ -1054,26 +1076,35 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
{
struct brcmf_pcie_ringbuf *ring;
struct brcmf_pcie_ringbuf *rings;
- u32 ring_addr;
u32 d2h_w_idx_ptr;
u32 d2h_r_idx_ptr;
u32 h2d_w_idx_ptr;
u32 h2d_r_idx_ptr;
- u32 addr;
u32 ring_mem_ptr;
u32 i;
u64 address;
u32 bufsz;
- u16 max_sub_queues;
u8 idx_offset;
-
- ring_addr = devinfo->shared.ring_info_addr;
- brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
- addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
- max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+ struct brcmf_pcie_dhi_ringinfo ringinfo;
+ u16 max_flowrings;
+ u16 max_submissionrings;
+ u16 max_completionrings;
+
+ memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
+ sizeof(ringinfo));
+ if (devinfo->shared.version >= 6) {
+ max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
+ max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
+ max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
+ } else {
+ max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
+ max_flowrings = max_submissionrings -
+ BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
+ }
if (devinfo->dma_idx_sz != 0) {
- bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
+ bufsz = (max_submissionrings + max_completionrings) *
devinfo->dma_idx_sz * 2;
devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
&devinfo->idxbuf_dmahandle,
@@ -1083,14 +1114,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
}
if (devinfo->dma_idx_sz == 0) {
- addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
- d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
- d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
- h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
- h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
+ d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
+ h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
+ h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
idx_offset = sizeof(u32);
devinfo->write_ptr = brcmf_pcie_write_tcm16;
devinfo->read_ptr = brcmf_pcie_read_tcm16;
@@ -1103,34 +1130,42 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
devinfo->read_ptr = brcmf_pcie_read_idx;
h2d_w_idx_ptr = 0;
- addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
address = (u64)devinfo->idxbuf_dmahandle;
- brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
- brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
-
- h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
- addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
- address += max_sub_queues * idx_offset;
- brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
- brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
-
- d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
- addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
- address += max_sub_queues * idx_offset;
- brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
- brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+ ringinfo.h2d_w_idx_hostaddr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+ ringinfo.h2d_w_idx_hostaddr.high_addr =
+ cpu_to_le32(address >> 32);
+
+ h2d_r_idx_ptr = h2d_w_idx_ptr +
+ max_submissionrings * idx_offset;
+ address += max_submissionrings * idx_offset;
+ ringinfo.h2d_r_idx_hostaddr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+ ringinfo.h2d_r_idx_hostaddr.high_addr =
+ cpu_to_le32(address >> 32);
+
+ d2h_w_idx_ptr = h2d_r_idx_ptr +
+ max_submissionrings * idx_offset;
+ address += max_submissionrings * idx_offset;
+ ringinfo.d2h_w_idx_hostaddr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+ ringinfo.d2h_w_idx_hostaddr.high_addr =
+ cpu_to_le32(address >> 32);
d2h_r_idx_ptr = d2h_w_idx_ptr +
- BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
- addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
- address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
- brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
- brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+ max_completionrings * idx_offset;
+ address += max_completionrings * idx_offset;
+ ringinfo.d2h_r_idx_hostaddr.low_addr =
+ cpu_to_le32(address & 0xffffffff);
+ ringinfo.d2h_r_idx_hostaddr.high_addr =
+ cpu_to_le32(address >> 32);
+
+ memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
+ &ringinfo, sizeof(ringinfo));
brcmf_dbg(PCIE, "Using host memory indices\n");
}
- addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
- ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
@@ -1161,20 +1196,19 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
- devinfo->shared.nrof_flowrings =
- max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
- rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
- GFP_KERNEL);
+ devinfo->shared.max_flowrings = max_flowrings;
+ devinfo->shared.max_submissionrings = max_submissionrings;
+ devinfo->shared.max_completionrings = max_completionrings;
+ rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
if (!rings)
goto fail;
- brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
- devinfo->shared.nrof_flowrings);
+ brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
- for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
+ for (i = 0; i < max_flowrings; i++) {
ring = &rings[i];
ring->devinfo = devinfo;
- ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
+ ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
brcmf_commonring_register_cb(&ring->commonring,
brcmf_pcie_ring_mb_ring_bell,
brcmf_pcie_ring_mb_update_rptr,
@@ -1357,17 +1391,16 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
{
struct brcmf_pcie_shared_info *shared;
u32 addr;
- u32 version;
shared = &devinfo->shared;
shared->tcm_base_address = sharedram_addr;
shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
- version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
- brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
- if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
- (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
- brcmf_err("Unsupported PCIE version %d\n", version);
+ shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
+ brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
+ if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
+ (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
+ brcmf_err("Unsupported PCIE version %d\n", shared->version);
return -EINVAL;
}
@@ -1661,18 +1694,18 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
bus->msgbuf->commonrings[i] =
&devinfo->shared.commonrings[i]->commonring;
- flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
+ flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
GFP_KERNEL);
if (!flowrings)
goto fail;
- for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
+ for (i = 0; i < devinfo->shared.max_flowrings; i++)
flowrings[i] = &devinfo->shared.flowrings[i].commonring;
bus->msgbuf->flowrings = flowrings;
bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
- bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
+ bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
init_waitqueue_head(&devinfo->mbdata_resp_wait);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
new file mode 100644
index 000000000000..9a25e79a46cf
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2016 Broadcom
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+
+#include "core.h"
+#include "debug.h"
+#include "fwil.h"
+#include "fwil_types.h"
+#include "cfg80211.h"
+#include "pno.h"
+
+#define BRCMF_PNO_VERSION 2
+#define BRCMF_PNO_REPEAT 4
+#define BRCMF_PNO_FREQ_EXPO_MAX 3
+#define BRCMF_PNO_IMMEDIATE_SCAN_BIT 3
+#define BRCMF_PNO_ENABLE_BD_SCAN_BIT 5
+#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
+#define BRCMF_PNO_REPORT_SEPARATELY_BIT 11
+#define BRCMF_PNO_SCAN_INCOMPLETE 0
+#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
+#define BRCMF_PNO_HIDDEN_BIT 2
+#define BRCMF_PNO_SCHED_SCAN_PERIOD 30
+
+static int brcmf_pno_channel_config(struct brcmf_if *ifp,
+ struct brcmf_pno_config_le *cfg)
+{
+ cfg->reporttype = 0;
+ cfg->flags = 0;
+
+ return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
+}
+
+static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
+ u32 mscan, u32 bestn)
+{
+ struct brcmf_pno_param_le pfn_param;
+ u16 flags;
+ u32 pfnmem;
+ s32 err;
+
+ memset(&pfn_param, 0, sizeof(pfn_param));
+ pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
+
+ /* set extra pno params */
+ flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
+ BIT(BRCMF_PNO_REPORT_SEPARATELY_BIT) |
+ BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
+ pfn_param.repeat = BRCMF_PNO_REPEAT;
+ pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
+
+ /* set up pno scan fr */
+ if (scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
+ brcmf_dbg(SCAN, "scan period too small, using minimum\n");
+ scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
+ }
+ pfn_param.scan_freq = cpu_to_le32(scan_freq);
+
+ if (mscan) {
+ pfnmem = bestn;
+
+ /* set bestn in firmware */
+ err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
+ if (err < 0) {
+ brcmf_err("failed to set pfnmem\n");
+ goto exit;
+ }
+ /* get max mscan which the firmware supports */
+ err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
+ if (err < 0) {
+ brcmf_err("failed to get pfnmem\n");
+ goto exit;
+ }
+ mscan = min_t(u32, mscan, pfnmem);
+ pfn_param.mscan = mscan;
+ pfn_param.bestn = bestn;
+ flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
+ brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
+ }
+
+ pfn_param.flags = cpu_to_le16(flags);
+ err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
+ sizeof(pfn_param));
+ if (err)
+ brcmf_err("pfn_set failed, err=%d\n", err);
+
+exit:
+ return err;
+}
+
+static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
+ u8 *mac_mask)
+{
+ struct brcmf_pno_macaddr_le pfn_mac;
+ int err, i;
+
+ pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
+ pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
+
+ memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++) {
+ pfn_mac.mac[i] &= mac_mask[i];
+ pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
+ }
+ /* Clear multi bit */
+ pfn_mac.mac[0] &= 0xFE;
+ /* Set locally administered */
+ pfn_mac.mac[0] |= 0x02;
+
+ err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
+ sizeof(pfn_mac));
+ if (err)
+ brcmf_err("pfn_macaddr failed, err=%d\n", err);
+
+ return err;
+}
+
+static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
+ bool active)
+{
+ struct brcmf_pno_net_param_le pfn;
+
+ pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
+ pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
+ pfn.wsec = cpu_to_le32(0);
+ pfn.infra = cpu_to_le32(1);
+ pfn.flags = 0;
+ if (active)
+ pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
+ pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
+ memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
+ return brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
+}
+
+static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
+ struct cfg80211_sched_scan_request *req)
+{
+ int i;
+
+ if (!ssid || !req->ssids || !req->n_ssids)
+ return false;
+
+ for (i = 0; i < req->n_ssids; i++) {
+ if (ssid->ssid_len == req->ssids[i].ssid_len) {
+ if (!strncmp(ssid->ssid, req->ssids[i].ssid,
+ ssid->ssid_len))
+ return true;
+ }
+ }
+ return false;
+}
+
+int brcmf_pno_clean(struct brcmf_if *ifp)
+{
+ int ret;
+
+ /* Disable pfn */
+ ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
+ if (ret == 0) {
+ /* clear pfn */
+ ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
+ }
+ if (ret < 0)
+ brcmf_err("failed code %d\n", ret);
+
+ return ret;
+}
+
+int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct brcmu_d11inf *d11inf;
+ struct brcmf_pno_config_le pno_cfg;
+ struct cfg80211_ssid *ssid;
+ u16 chan;
+ int i, ret;
+
+ /* clean up everything */
+ ret = brcmf_pno_clean(ifp);
+ if (ret < 0) {
+ brcmf_err("failed error=%d\n", ret);
+ return ret;
+ }
+
+ /* configure pno */
+ ret = brcmf_pno_config(ifp, req->scan_plans[0].interval, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* configure random mac */
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ ret = brcmf_pno_set_random(ifp, req->mac_addr,
+ req->mac_addr_mask);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* configure channels to use */
+ d11inf = &ifp->drvr->config->d11inf;
+ for (i = 0; i < req->n_channels; i++) {
+ chan = req->channels[i]->hw_value;
+ pno_cfg.channel_list[i] = cpu_to_le16(chan);
+ }
+ if (req->n_channels) {
+ pno_cfg.channel_num = cpu_to_le32(req->n_channels);
+ brcmf_pno_channel_config(ifp, &pno_cfg);
+ }
+
+ /* configure each match set */
+ for (i = 0; i < req->n_match_sets; i++) {
+ ssid = &req->match_sets[i].ssid;
+ if (!ssid->ssid_len) {
+ brcmf_err("skip broadcast ssid\n");
+ continue;
+ }
+
+ ret = brcmf_pno_add_ssid(ifp, ssid,
+ brcmf_is_ssid_active(ssid, req));
+ if (ret < 0)
+ brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
+ ret == 0 ? "set" : "failed", ssid->ssid);
+ }
+ /* Enable the PNO */
+ ret = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
+ if (ret < 0)
+ brcmf_err("PNO enable failed!! ret=%d\n", ret);
+
+ return ret;
+}
+
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
new file mode 100644
index 000000000000..bae55b2af78c
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016 Broadcom
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _BRCMF_PNO_H
+#define _BRCMF_PNO_H
+
+#define BRCMF_PNO_SCAN_COMPLETE 1
+#define BRCMF_PNO_MAX_PFN_COUNT 16
+#define BRCMF_PNO_SCHED_SCAN_MIN_PERIOD 10
+#define BRCMF_PNO_SCHED_SCAN_MAX_PERIOD 508
+
+/**
+ * brcmf_pno_clean - disable and clear pno in firmware.
+ *
+ * @ifp: interface object used.
+ */
+int brcmf_pno_clean(struct brcmf_if *ifp);
+
+/**
+ * brcmf_pno_start_sched_scan - initiate scheduled scan on device.
+ *
+ * @ifp: interface object used.
+ * @req: configuration parameters for scheduled scan.
+ */
+int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
+ struct cfg80211_sched_scan_request *req);
+
+#endif /* _BRCMF_PNO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
index 26b68c367f57..d26ff219ef66 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
@@ -51,7 +51,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
drvr->bus_if->proto_type);
goto fail;
}
- if ((proto->txdata == NULL) || (proto->hdrpull == NULL) ||
+ if (!proto->tx_queue_data || (proto->hdrpull == NULL) ||
(proto->query_dcmd == NULL) || (proto->set_dcmd == NULL) ||
(proto->configure_addr_mode == NULL) ||
(proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 57531f42190e..34b59feedeba 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -33,6 +33,8 @@ struct brcmf_proto {
void *buf, uint len);
int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
uint len);
+ int (*tx_queue_data)(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *skb);
int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
struct sk_buff *skb);
void (*configure_addr_mode)(struct brcmf_pub *drvr, int ifidx,
@@ -74,6 +76,13 @@ static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
{
return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
}
+
+static inline int brcmf_proto_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *skb)
+{
+ return drvr->proto->tx_queue_data(drvr, ifidx, skb);
+}
+
static inline int brcmf_proto_txdata(struct brcmf_pub *drvr, int ifidx,
u8 offset, struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index b892dac70f4b..dfb0658713d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -621,6 +621,7 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, 4330),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, 4334),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, 43340),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43341_CHIP_ID, 0xFFFFFFFF, 43340),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index 960e6b86bbcb..ed83f33aceb7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -16,7 +16,6 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -D__CHECK_ENDIAN__ \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \
-Idrivers/net/wireless/broadcom/brcm80211/include
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
index faf1ebe76068..b9672da24a9d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
@@ -179,7 +179,7 @@ s16 qm_norm32(s32 op)
return u16extraSignBits;
}
-/* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */
+/* This table is log2(1+(i/32)) where i=[0:1:32], in q.15 format */
static const s16 log_table[] = {
0,
1455,
@@ -212,7 +212,8 @@ static const s16 log_table[] = {
29717,
30498,
31267,
- 32024
+ 32024,
+ 32768
};
#define LOG_TABLE_SIZE 32 /* log_table size */
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index d0407d9ad782..f1fb8a3c7a32 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -36,6 +36,7 @@
#define BRCM_CC_4330_CHIP_ID 0x4330
#define BRCM_CC_4334_CHIP_ID 0x4334
#define BRCM_CC_43340_CHIP_ID 43340
+#define BRCM_CC_43341_CHIP_ID 43341
#define BRCM_CC_43362_CHIP_ID 43362
#define BRCM_CC_4335_CHIP_ID 0x4335
#define BRCM_CC_4339_CHIP_ID 0x4339
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 69b826d229c5..4b040451a9b8 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -2329,14 +2329,6 @@ static int airo_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int airo_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > 2400))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
static LIST_HEAD(airo_devices);
static void add_airo_dev(struct airo_info *ai)
@@ -2656,7 +2648,6 @@ static const struct net_device_ops airo11_netdev_ops = {
.ndo_get_stats = airo_get_stats,
.ndo_set_mac_address = airo_set_mac_address,
.ndo_do_ioctl = airo_ioctl,
- .ndo_change_mtu = airo_change_mtu,
};
static void wifi_setup(struct net_device *dev)
@@ -2668,6 +2659,8 @@ static void wifi_setup(struct net_device *dev)
dev->type = ARPHRD_IEEE80211;
dev->hard_header_len = ETH_HLEN;
dev->mtu = AIRO_DEF_MTU;
+ dev->min_mtu = 68;
+ dev->max_mtu = MIC_MSGLEN_MAX;
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 100;
@@ -2754,7 +2747,6 @@ static const struct net_device_ops airo_netdev_ops = {
.ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
.ndo_do_ioctl = airo_ioctl,
- .ndo_change_mtu = airo_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -2766,7 +2758,6 @@ static const struct net_device_ops mpi_netdev_ops = {
.ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
.ndo_do_ioctl = airo_ioctl,
- .ndo_change_mtu = airo_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -2822,6 +2813,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
dev->irq = irq;
dev->base_addr = port;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->max_mtu = MIC_MSGLEN_MAX;
SET_NETDEV_DEV(dev, dmdev);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index bfa542c8d6f1..64176090b196 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -6035,7 +6035,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_open = ipw2100_open,
.ndo_stop = ipw2100_close,
.ndo_start_xmit = libipw_xmit,
- .ndo_change_mtu = libipw_change_mtu,
.ndo_tx_timeout = ipw2100_tx_timeout,
.ndo_set_mac_address = ipw2100_set_address,
.ndo_validate_addr = eth_validate_addr,
@@ -6071,6 +6070,8 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
dev->wireless_data = &priv->wireless_data;
dev->watchdog_timeo = 3 * HZ;
dev->irq = 0;
+ dev->min_mtu = 68;
+ dev->max_mtu = LIBIPW_DATA_LEN;
/* NOTE: We don't use the wireless_handlers hook
* in dev as the system will start throwing WX requests
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index bfd68612a535..ef9af8a29cad 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -11561,7 +11561,6 @@ static const struct net_device_ops ipw_prom_netdev_ops = {
.ndo_open = ipw_prom_open,
.ndo_stop = ipw_prom_stop,
.ndo_start_xmit = ipw_prom_hard_start_xmit,
- .ndo_change_mtu = libipw_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -11587,6 +11586,9 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
+ priv->prom_net_dev->min_mtu = 68;
+ priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
+
priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
@@ -11619,7 +11621,6 @@ static const struct net_device_ops ipw_netdev_ops = {
.ndo_set_rx_mode = ipw_net_set_multicast_list,
.ndo_set_mac_address = ipw_net_set_mac_address,
.ndo_start_xmit = libipw_xmit,
- .ndo_change_mtu = libipw_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -11729,6 +11730,9 @@ static int ipw_pci_probe(struct pci_dev *pdev,
net_dev->wireless_handlers = &ipw_wx_handler_def;
net_dev->ethtool_ops = &ipw_ethtool_ops;
+ net_dev->min_mtu = 68;
+ net_dev->max_mtu = LIBIPW_DATA_LEN;
+
err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
if (err) {
IPW_ERROR("failed to create sysfs device attributes\n");
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index b0571618c2ed..b51355134e04 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -948,7 +948,6 @@ static inline int libipw_is_cck_rate(u8 rate)
/* libipw.c */
void free_libipw(struct net_device *dev, int monitor);
struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-int libipw_change_mtu(struct net_device *dev, int new_mtu);
void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
index 60f28740f6af..2332075565f2 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
@@ -118,15 +118,6 @@ static void libipw_networks_initialize(struct libipw_device *ieee)
&ieee->network_free_list);
}
-int libipw_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > LIBIPW_DATA_LEN))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL(libipw_change_mtu);
-
struct net_device *alloc_libipw(int sizeof_priv, int monitor)
{
struct libipw_device *ieee;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index cef7f7d79cd9..1c1ec7bb9302 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -507,7 +507,7 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
memcpy(dst, hdr->addr3, ETH_ALEN);
memcpy(src, hdr->addr4, ETH_ALEN);
break;
- case 0:
+ default:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
break;
diff --git a/drivers/net/wireless/intel/iwlegacy/Makefile b/drivers/net/wireless/intel/iwlegacy/Makefile
index c985a01a0731..c826a6b985bb 100644
--- a/drivers/net/wireless/intel/iwlegacy/Makefile
+++ b/drivers/net/wireless/intel/iwlegacy/Makefile
@@ -13,5 +13,3 @@ iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 6e7ed908de0c..92e611841200 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -15,7 +15,7 @@ iwlwifi-objs += $(iwlwifi-m)
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
+ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 4d19685f31c3..b256a354953a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -10,4 +10,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index e9cef9de9ed8..c96f9b1d948a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -900,8 +900,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
- min(abs(delta_g),
- (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+ min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index ceec5ca2b1ab..84813b550ef1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -228,7 +228,7 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
-typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
/**
* enum iwl_ucode_tlv_api - ucode api
@@ -258,7 +258,7 @@ enum iwl_ucode_tlv_api {
#endif
};
-typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
@@ -293,6 +293,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -342,6 +343,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 2e06dfc1c477..83ac807e547d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -9,4 +9,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-y += tof.o fw-dbg.o
iwlmvm-$(CONFIG_PM) += d3.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index acc5cd53e4ba..b530fa47d68a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -474,4 +474,30 @@ struct iwl_mvm_internal_rxq_notif {
u8 data[];
} __packed;
+/**
+ * enum iwl_mvm_pm_event - type of station PM event
+ * @IWL_MVM_PM_EVENT_AWAKE: station woke up
+ * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep
+ * @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger
+ * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll
+ */
+enum iwl_mvm_pm_event {
+ IWL_MVM_PM_EVENT_AWAKE,
+ IWL_MVM_PM_EVENT_ASLEEP,
+ IWL_MVM_PM_EVENT_UAPSD,
+ IWL_MVM_PM_EVENT_PS_POLL,
+}; /* PEER_PM_NTFY_API_E_VER_1 */
+
+/**
+ * struct iwl_mvm_pm_state_notification - station PM state notification
+ * @sta_id: station ID of the station changing state
+ * @type: the new powersave state, see IWL_MVM_PM_EVENT_ above
+ */
+struct iwl_mvm_pm_state_notification {
+ u8 sta_id;
+ u8 type;
+ /* private: */
+ u16 reserved;
+} __packed; /* PEER_PM_NTFY_API_S_VER_1 */
+
#endif /* __fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 6c8e3ca79323..3b5150e9975d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
- * @STA_MODIFY_TX_RATE: unused
+ * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
* @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
* @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
@@ -189,7 +189,7 @@ enum iwl_sta_key_flag {
enum iwl_sta_modify_flag {
STA_MODIFY_QUEUE_REMOVAL = BIT(0),
STA_MODIFY_TID_DISABLE_TX = BIT(1),
- STA_MODIFY_TX_RATE = BIT(2),
+ STA_MODIFY_UAPSD_ACS = BIT(2),
STA_MODIFY_ADD_BA_TID = BIT(3),
STA_MODIFY_REMOVE_BA_TID = BIT(4),
STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
@@ -353,6 +353,8 @@ struct iwl_mvm_add_sta_cmd_v7 {
* @beamform_flags: beam forming controls
* @tfd_queue_msk: tfd queues used by this station
* @rx_ba_window: aggregation window size
+ * @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means
+ * that the queues used by this station are in the first 32.
*
* The device contains an internal table of per-station information, with info
* on security keys, aggregation parameters, and Tx rates for initial Tx
@@ -382,7 +384,8 @@ struct iwl_mvm_add_sta_cmd {
__le16 beamform_flags;
__le32 tfd_queue_msk;
__le16 rx_ba_window;
- __le16 reserved;
+ u8 scd_queue_bank;
+ u8 uapsd_trigger_acs;
} __packed; /* ADD_STA_CMD_API_S_VER_8 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 97633690f3d5..ae12badc0c2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -332,6 +332,7 @@ enum iwl_data_path_subcmd_ids {
DQA_ENABLE_CMD = 0x0,
UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
+ STA_PM_NOTIF = 0xFD,
MU_GROUP_MGMT_NOTIF = 0xFE,
RX_QUEUES_NOTIFICATION = 0xFF,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index d89d0a1fd34e..2e8e3e8e30a3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -70,49 +70,6 @@
#include "iwl-prph.h"
#include "iwl-csr.h"
-static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen)
-{
- const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
- ssize_t bytes_read;
- ssize_t bytes_read_trans;
-
- if (offset < dump_ptrs->op_mode_len) {
- bytes_read = min_t(ssize_t, count,
- dump_ptrs->op_mode_len - offset);
- memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
- bytes_read);
- offset += bytes_read;
- count -= bytes_read;
-
- if (count == 0)
- return bytes_read;
- } else {
- bytes_read = 0;
- }
-
- if (!dump_ptrs->trans_ptr)
- return bytes_read;
-
- offset -= dump_ptrs->op_mode_len;
- bytes_read_trans = min_t(ssize_t, count,
- dump_ptrs->trans_ptr->len - offset);
- memcpy(buffer + bytes_read,
- (u8 *)dump_ptrs->trans_ptr->data + offset,
- bytes_read_trans);
-
- return bytes_read + bytes_read_trans;
-}
-
-static void iwl_mvm_free_coredump(void *data)
-{
- const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
-
- vfree(fw_error_dump->op_mode_ptr);
- vfree(fw_error_dump->trans_ptr);
- kfree(fw_error_dump);
-}
-
#define RADIO_REG_MAX_READ 0x2ad
static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
@@ -491,6 +448,43 @@ static u32 iwl_dump_prph(struct iwl_trans *trans,
return prph_len;
}
+/*
+ * alloc_sgtable - allocates scallerlist table in the given size,
+ * fills it with pages and returns it
+ * @size: the size (in bytes) of the table
+*/
+static struct scatterlist *alloc_sgtable(int size)
+{
+ int alloc_size, nents, i;
+ struct page *new_page;
+ struct scatterlist *iter;
+ struct scatterlist *table;
+
+ nents = DIV_ROUND_UP(size, PAGE_SIZE);
+ table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+ sg_init_table(table, nents);
+ iter = table;
+ for_each_sg(table, iter, sg_nents(table), i) {
+ new_page = alloc_page(GFP_KERNEL);
+ if (!new_page) {
+ /* release all previous allocated pages in the table */
+ iter = table;
+ for_each_sg(table, iter, sg_nents(table), i) {
+ new_page = sg_page(iter);
+ if (new_page)
+ __free_page(new_page);
+ }
+ return NULL;
+ }
+ alloc_size = min_t(int, size, PAGE_SIZE);
+ size -= PAGE_SIZE;
+ sg_set_page(iter, new_page, alloc_size, 0);
+ }
+ return table;
+}
+
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
struct iwl_fw_error_dump_file *dump_file;
@@ -499,6 +493,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_mem *dump_mem;
struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_mvm_dump_ptrs *fw_error_dump;
+ struct scatterlist *sg_dump_data;
u32 sram_len, sram_ofs;
struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
mvm->fw->dbg_mem_tlv;
@@ -815,8 +810,23 @@ dump_trans_data:
file_len += fw_error_dump->trans_ptr->len;
dump_file->file_len = cpu_to_le32(file_len);
- dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
- GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
+ sg_dump_data = alloc_sgtable(file_len);
+ if (sg_dump_data) {
+ sg_pcopy_from_buffer(sg_dump_data,
+ sg_nents(sg_dump_data),
+ fw_error_dump->op_mode_ptr,
+ fw_error_dump->op_mode_len, 0);
+ sg_pcopy_from_buffer(sg_dump_data,
+ sg_nents(sg_dump_data),
+ fw_error_dump->trans_ptr->data,
+ fw_error_dump->trans_ptr->len,
+ fw_error_dump->op_mode_len);
+ dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len,
+ GFP_KERNEL);
+ }
+ vfree(fw_error_dump->op_mode_ptr);
+ vfree(fw_error_dump->trans_ptr);
+ kfree(fw_error_dump);
out:
iwl_mvm_free_fw_dump_desc(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 6b962d6b067a..4a0874e40731 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -499,23 +499,21 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (ret)
return ret;
+ /* If DQA is supported - queues will be enabled when needed */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ return 0;
+
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- if (!iwl_mvm_is_dqa_supported(mvm))
- iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_TX_FIFO_VO, 0,
- wdg_timeout);
+ iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* fall through */
default:
- /* If DQA is supported - queues will be enabled when needed */
- if (iwl_mvm_is_dqa_supported(mvm))
- break;
-
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
@@ -899,9 +897,11 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
- tfd_queue_msk |= BIT(vif->hw_queue[i]);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
+ tfd_queue_msk |= BIT(vif->hw_queue[i]);
+ }
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
MAC_FILTER_IN_CONTROL_AND_MGMT |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1db1dc13e988..45122dafe922 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -445,6 +445,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
if (iwl_mvm_has_new_rx_api(mvm))
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF))
+ ieee80211_hw_set(hw, AP_LINK_PS);
if (mvm->trans->num_rx_queues > 1)
ieee80211_hw_set(hw, USES_RSS);
@@ -2097,6 +2099,22 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unbind;
+ /* enable the multicast queue, now that we have a station for it */
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+ &cfg, wdg_timeout);
+ }
+
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
@@ -2318,10 +2336,9 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
tids, more_data, true);
}
-static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -2374,6 +2391,67 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
spin_unlock_bh(&mvmsta->lock);
}
+static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ __iwl_mvm_mac_sta_notify(hw, cmd, sta);
+}
+
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
+
+ if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
+ return;
+
+ rcu_read_lock();
+ sta = mvm->fw_id_to_mac_id[notif->sta_id];
+ if (WARN_ON(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (!mvmsta->vif ||
+ mvmsta->vif->type != NL80211_IFTYPE_AP) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (mvmsta->sleeping != sleeping) {
+ mvmsta->sleeping = sleeping;
+ __iwl_mvm_mac_sta_notify(mvm->hw,
+ sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
+ sta);
+ ieee80211_sta_ps_transition(sta, sleeping);
+ }
+
+ if (sleeping) {
+ switch (notif->type) {
+ case IWL_MVM_PM_EVENT_AWAKE:
+ case IWL_MVM_PM_EVENT_ASLEEP:
+ break;
+ case IWL_MVM_PM_EVENT_UAPSD:
+ ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
+ break;
+ case IWL_MVM_PM_EVENT_PS_POLL:
+ ieee80211_sta_pspoll(sta);
+ break;
+ default:
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+}
+
static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index c60703e0c246..4a9cb76b7611 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1112,9 +1112,8 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
{
- /* Make sure DQA isn't allowed in driver until feature is complete */
- return false && fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
}
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
@@ -1419,6 +1418,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4d35deb628bc..f14aada390c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -306,6 +306,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
+ iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -452,6 +454,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index fc771885e383..636c8b03e318 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -202,6 +202,20 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
add_sta_cmd.station_flags |=
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+ add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
+
+ if (sta->wme) {
+ add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
+ }
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
@@ -875,12 +889,17 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
- if (ret)
+ if (ret) {
IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
queue, ret);
- else
- IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
- queue, tid);
+ return;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
}
static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
@@ -1010,6 +1029,7 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
local_bh_disable();
spin_lock(&mvmsta->lock);
skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+ mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
spin_unlock(&mvmsta->lock);
while ((skb = __skb_dequeue(&deferred_tx)))
@@ -1489,12 +1509,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
+ /* If there is a TXQ still marked as reserved - free it */
+ if (iwl_mvm_is_dqa_supported(mvm) &&
+ mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
u8 reserved_txq = mvm_sta->reserved_queue;
enum iwl_mvm_queue_status *status;
- iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
-
/*
* If no traffic has gone through the reserved TXQ - it
* is still marked as IWL_MVM_QUEUE_RESERVED, and
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index e068d5355865..b45c7b9937c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -436,6 +436,7 @@ struct iwl_mvm_sta {
bool disable_tx;
bool tlc_amsdu;
+ bool sleeping;
u8 agg_tids;
u8 sleep_tx_count;
u8 avg_energy;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index ae95533e587d..b10e3633df1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1598,6 +1598,29 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
}
}
+static const char *queue_name(struct device *dev,
+ struct iwl_trans_pcie *trans_p, int i)
+{
+ if (trans_p->shared_vec_mask) {
+ int vec = trans_p->shared_vec_mask &
+ IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+ if (i == 0)
+ return DRV_NAME ": shared IRQ";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i + vec);
+ }
+ if (i == 0)
+ return DRV_NAME ": default queue";
+
+ if (i == trans_p->alloc_vecs - 1)
+ return DRV_NAME ": exception";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i);
+}
+
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
@@ -1606,6 +1629,10 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
int ret;
struct msix_entry *msix_entry;
+ const char *qname = queue_name(&pdev->dev, trans_pcie, i);
+
+ if (!qname)
+ return -ENOMEM;
msix_entry = &trans_pcie->msix_entries[i];
ret = devm_request_threaded_irq(&pdev->dev,
@@ -1615,7 +1642,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
iwl_pcie_irq_msix_handler :
iwl_pcie_irq_rx_msix_handler,
IRQF_SHARED,
- DRV_NAME,
+ qname,
msix_entry);
if (ret) {
IWL_ERR(trans_pcie->trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 5f840f16f40b..e44e5adc2b95 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -2196,7 +2196,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
memcpy(skb_put(csum_skb, tcp_hdrlen(skb)),
tcph, tcp_hdrlen(skb));
- skb_set_transport_header(csum_skb, 0);
+ skb_reset_transport_header(csum_skb);
csum_skb->csum_start =
(unsigned char *)tcp_hdr(csum_skb) -
csum_skb->head;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
index 599f30f22841..34dbddbf3f9b 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
@@ -855,7 +855,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
memcpy(dst, hdr->addr3, ETH_ALEN);
memcpy(src, hdr->addr4, ETH_ALEN);
break;
- case 0:
+ default:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
break;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 80d4228ba754..1a16b8cb366e 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -765,16 +765,6 @@ static void hostap_set_multicast_list(struct net_device *dev)
}
-static int prism2_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < PRISM2_MIN_MTU || new_mtu > PRISM2_MAX_MTU)
- return -EINVAL;
-
- dev->mtu = new_mtu;
- return 0;
-}
-
-
static void prism2_tx_timeout(struct net_device *dev)
{
struct hostap_interface *iface;
@@ -813,7 +803,6 @@ static const struct net_device_ops hostap_netdev_ops = {
.ndo_do_ioctl = hostap_ioctl,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
- .ndo_change_mtu = prism2_change_mtu,
.ndo_tx_timeout = prism2_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
};
@@ -826,7 +815,6 @@ static const struct net_device_ops hostap_mgmt_netdev_ops = {
.ndo_do_ioctl = hostap_ioctl,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
- .ndo_change_mtu = prism2_change_mtu,
.ndo_tx_timeout = prism2_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
};
@@ -839,7 +827,6 @@ static const struct net_device_ops hostap_master_ops = {
.ndo_do_ioctl = hostap_ioctl,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
- .ndo_change_mtu = prism2_change_mtu,
.ndo_tx_timeout = prism2_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
};
@@ -851,6 +838,8 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
iface = netdev_priv(dev);
ether_setup(dev);
+ dev->min_mtu = PRISM2_MIN_MTU;
+ dev->max_mtu = PRISM2_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
/* kernel callbacks */
diff --git a/drivers/net/wireless/intersil/orinoco/Makefile b/drivers/net/wireless/intersil/orinoco/Makefile
index bfdefb85abcd..b7ecef820f76 100644
--- a/drivers/net/wireless/intersil/orinoco/Makefile
+++ b/drivers/net/wireless/intersil/orinoco/Makefile
@@ -12,6 +12,3 @@ obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
-
-# Orinoco should be endian clean.
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 7afe2004e930..9d96b7c928f7 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -322,9 +322,6 @@ int orinoco_change_mtu(struct net_device *dev, int new_mtu)
{
struct orinoco_private *priv = ndev_priv(dev);
- if ((new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU))
- return -EINVAL;
-
/* MTU + encapsulation + header length */
if ((new_mtu + ENCAPS_OVERHEAD + sizeof(struct ieee80211_hdr)) >
(priv->nicbuf_size - ETH_HLEN))
@@ -2288,6 +2285,9 @@ int orinoco_if_add(struct orinoco_private *priv,
dev->base_addr = base_addr;
dev->irq = irq;
+ dev->min_mtu = ORINOCO_MIN_MTU;
+ dev->max_mtu = ORINOCO_MAX_MTU;
+
SET_NETDEV_DEV(dev, priv->dev);
ret = register_netdev(dev);
if (ret)
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index 257a9eadd595..4ac6764f4897 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
entry += sizeof(__le16);
chan->pa_points_per_curve = 8;
- memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+ memset(chan->curve_data, 0, sizeof(chan->curve_data));
memcpy(chan->curve_data, entry,
sizeof(struct p54_pa_curve_data_sample) *
min((u8)8, curve_data->points_per_channel));
diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c
index 84a42012aeae..325176d4d796 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_dev.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c
@@ -808,7 +808,6 @@ static const struct net_device_ops islpci_netdev_ops = {
.ndo_start_xmit = islpci_eth_transmit,
.ndo_tx_timeout = islpci_eth_tx_timeout,
.ndo_set_mac_address = prism54_set_mac_address,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d3bad5779376..1620a5d2757d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -250,7 +250,7 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
cp->magic = 0;
}
-static int hwsim_net_id;
+static unsigned int hwsim_net_id;
static int hwsim_netgroup;
@@ -587,15 +587,8 @@ struct hwsim_radiotap_ack_hdr {
__le16 rt_chbitmask;
} __packed;
-/* MAC80211_HWSIM netlinf family */
-static struct genl_family hwsim_genl_family = {
- .id = GENL_ID_GENERATE,
- .hdrsize = 0,
- .name = "MAC80211_HWSIM",
- .version = 1,
- .maxattr = HWSIM_ATTR_MAX,
- .netnsok = true,
-};
+/* MAC80211_HWSIM netlink family */
+static struct genl_family hwsim_genl_family;
enum hwsim_multicast_groups {
HWSIM_MCGRP_CONFIG,
@@ -2256,35 +2249,51 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw,
WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN);
}
+#define HWSIM_COMMON_OPS \
+ .tx = mac80211_hwsim_tx, \
+ .start = mac80211_hwsim_start, \
+ .stop = mac80211_hwsim_stop, \
+ .add_interface = mac80211_hwsim_add_interface, \
+ .change_interface = mac80211_hwsim_change_interface, \
+ .remove_interface = mac80211_hwsim_remove_interface, \
+ .config = mac80211_hwsim_config, \
+ .configure_filter = mac80211_hwsim_configure_filter, \
+ .bss_info_changed = mac80211_hwsim_bss_info_changed, \
+ .sta_add = mac80211_hwsim_sta_add, \
+ .sta_remove = mac80211_hwsim_sta_remove, \
+ .sta_notify = mac80211_hwsim_sta_notify, \
+ .set_tim = mac80211_hwsim_set_tim, \
+ .conf_tx = mac80211_hwsim_conf_tx, \
+ .get_survey = mac80211_hwsim_get_survey, \
+ CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \
+ .ampdu_action = mac80211_hwsim_ampdu_action, \
+ .flush = mac80211_hwsim_flush, \
+ .get_tsf = mac80211_hwsim_get_tsf, \
+ .set_tsf = mac80211_hwsim_set_tsf, \
+ .get_et_sset_count = mac80211_hwsim_get_et_sset_count, \
+ .get_et_stats = mac80211_hwsim_get_et_stats, \
+ .get_et_strings = mac80211_hwsim_get_et_strings,
+
static const struct ieee80211_ops mac80211_hwsim_ops = {
- .tx = mac80211_hwsim_tx,
- .start = mac80211_hwsim_start,
- .stop = mac80211_hwsim_stop,
- .add_interface = mac80211_hwsim_add_interface,
- .change_interface = mac80211_hwsim_change_interface,
- .remove_interface = mac80211_hwsim_remove_interface,
- .config = mac80211_hwsim_config,
- .configure_filter = mac80211_hwsim_configure_filter,
- .bss_info_changed = mac80211_hwsim_bss_info_changed,
- .sta_add = mac80211_hwsim_sta_add,
- .sta_remove = mac80211_hwsim_sta_remove,
- .sta_notify = mac80211_hwsim_sta_notify,
- .set_tim = mac80211_hwsim_set_tim,
- .conf_tx = mac80211_hwsim_conf_tx,
- .get_survey = mac80211_hwsim_get_survey,
- CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
- .ampdu_action = mac80211_hwsim_ampdu_action,
+ HWSIM_COMMON_OPS
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
- .flush = mac80211_hwsim_flush,
- .get_tsf = mac80211_hwsim_get_tsf,
- .set_tsf = mac80211_hwsim_set_tsf,
- .get_et_sset_count = mac80211_hwsim_get_et_sset_count,
- .get_et_stats = mac80211_hwsim_get_et_stats,
- .get_et_strings = mac80211_hwsim_get_et_strings,
};
-static struct ieee80211_ops mac80211_hwsim_mchan_ops;
+static const struct ieee80211_ops mac80211_hwsim_mchan_ops = {
+ HWSIM_COMMON_OPS
+ .hw_scan = mac80211_hwsim_hw_scan,
+ .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan,
+ .sw_scan_start = NULL,
+ .sw_scan_complete = NULL,
+ .remain_on_channel = mac80211_hwsim_roc,
+ .cancel_remain_on_channel = mac80211_hwsim_croc,
+ .add_chanctx = mac80211_hwsim_add_chanctx,
+ .remove_chanctx = mac80211_hwsim_remove_chanctx,
+ .change_chanctx = mac80211_hwsim_change_chanctx,
+ .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,
+ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+};
struct hwsim_new_radio_params {
unsigned int channels;
@@ -2791,7 +2800,6 @@ static void mac80211_hwsim_free(void)
static const struct net_device_ops hwsim_netdev_ops = {
.ndo_start_xmit = hwsim_mon_xmit,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -3236,6 +3244,18 @@ static const struct genl_ops hwsim_ops[] = {
},
};
+static struct genl_family hwsim_genl_family __ro_after_init = {
+ .name = "MAC80211_HWSIM",
+ .version = 1,
+ .maxattr = HWSIM_ATTR_MAX,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = hwsim_ops,
+ .n_ops = ARRAY_SIZE(hwsim_ops),
+ .mcgrps = hwsim_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
+};
+
static void destroy_radio(struct work_struct *work)
{
struct mac80211_hwsim_data *data =
@@ -3283,15 +3303,13 @@ static struct notifier_block hwsim_netlink_notifier = {
.notifier_call = mac80211_hwsim_netlink_notify,
};
-static int hwsim_init_netlink(void)
+static int __init hwsim_init_netlink(void)
{
int rc;
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- rc = genl_register_family_with_ops_groups(&hwsim_genl_family,
- hwsim_ops,
- hwsim_mcgrps);
+ rc = genl_register_family(&hwsim_genl_family);
if (rc)
goto failure;
@@ -3360,21 +3378,6 @@ static int __init init_mac80211_hwsim(void)
if (channels < 1)
return -EINVAL;
- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
- mac80211_hwsim_assign_vif_chanctx;
- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
- mac80211_hwsim_unassign_vif_chanctx;
-
spin_lock_init(&hwsim_radio_lock);
err = register_pernet_device(&hwsim_net_ops);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 8541cbed786d..e3500203715c 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -945,7 +945,6 @@ static const struct net_device_ops lbs_netdev_ops = {
.ndo_start_xmit = lbs_hard_start_xmit,
.ndo_set_mac_address = lbs_set_mac_address,
.ndo_set_rx_mode = lbs_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/wireless/marvell/mwifiex/README b/drivers/net/wireless/marvell/mwifiex/README
index 24e649b1eb24..588fcbe38374 100644
--- a/drivers/net/wireless/marvell/mwifiex/README
+++ b/drivers/net/wireless/marvell/mwifiex/README
@@ -180,6 +180,29 @@ regrdwr
echo "1 0xa060 0x12" > regrdwr : Write the MAC register
echo "1 0xa794 0x80000000" > regrdwr
: Write 0x80000000 to MAC register
+
+memrw
+ This command is used to read/write the firmware memory.
+
+ Usage:
+ 1) For reading firmware memory location.
+ echo r <address> 0 > /sys/kernel/debug/mwifiex/mlan0/memrw
+ cat /sys/kernel/debug/mwifiex/mlan0/memrw
+ 2) For writing value to firmware memory location.
+ echo w <address> [value] > /sys/kernel/debug/mwifiex/mlan0/memrw
+
+ where the parameters are,
+ <address>: memory address
+ [value]: value to be written
+
+ Examples:
+ echo r 0x4cf70 0 > /sys/kernel/debug/mwifiex/mlan0/memrw
+ cat /sys/kernel/debug/mwifiex/mlan0/memrw
+ : Read memory address 0x4cf70
+ iwpriv mlan0 memrdwr -0x7fff6000 -0x40000000
+ echo w 0x8000a000 0xc0000000 > /sys/kernel/debug/mwifiex/mlan0/memrw
+ : Write 0xc0000000 to memory address 0x8000a000
+
rdeeprom
This command is used to read the EEPROM contents of the card.
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 16241d21727b..145cc4b5103b 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1203,6 +1203,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
priv->adapter->curr_iface_comb.p2p_intf--;
priv->adapter->curr_iface_comb.sta_intf++;
dev->ieee80211_ptr->iftype = type;
+ if (mwifiex_deinit_priv_params(priv))
+ return -1;
+ if (mwifiex_init_new_priv_params(priv, dev, type))
+ return -1;
+ if (mwifiex_sta_init_cmd(priv, false, false))
+ return -1;
break;
case NL80211_IFTYPE_ADHOC:
if (mwifiex_cfg80211_deinit_p2p(priv))
@@ -2137,6 +2143,16 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
+ u16 enable = true;
+
+ /* set ibss coalescing_status */
+ ret = mwifiex_send_cmd(
+ priv,
+ HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
+ HostCmd_ACT_GEN_SET, 0, &enable, true);
+ if (ret)
+ return ret;
+
/* "privacy" is set only for ad-hoc mode */
if (privacy) {
/*
@@ -3017,6 +3033,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->netdev = NULL;
memset(&priv->wdev, 0, sizeof(priv->wdev));
priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ destroy_workqueue(priv->dfs_cac_workqueue);
+ priv->dfs_cac_workqueue = NULL;
return ERR_PTR(-ENOMEM);
}
@@ -3071,8 +3089,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
- skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
+ skb_unlink(skb, &priv->bypass_txq);
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ }
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
@@ -3971,13 +3991,11 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
struct mwifiex_ds_misc_cmd *hostcmd;
struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1];
- struct mwifiex_adapter *adapter;
struct sk_buff *skb;
int err;
if (!priv)
return -EINVAL;
- adapter = priv->adapter;
err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len,
mwifiex_tm_policy);
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 53477280f39c..25a7475702f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1118,13 +1118,14 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
void
mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
{
- if (!adapter->cmd_sent &&
+ if (!adapter->cmd_sent && !atomic_read(&adapter->tx_hw_pending) &&
!adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
mwifiex_dnld_sleep_confirm_cmd(adapter);
else
mwifiex_dbg(adapter, CMD,
- "cmd: Delay Sleep Confirm (%s%s%s)\n",
+ "cmd: Delay Sleep Confirm (%s%s%s%s)\n",
(adapter->cmd_sent) ? "D" : "",
+ atomic_read(&adapter->tx_hw_pending) ? "T" : "",
(adapter->curr_cmd) ? "C" : "",
(IS_CARD_RX_RCVD(adapter)) ? "R" : "");
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 4b1894b4757f..ea455948a68a 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -181,6 +181,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
#define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176)
+#define TLV_TYPE_PS_PARAMS_IN_HS (PROPRIETARY_TLV_BASE_ID + 181)
#define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183)
#define TLV_TYPE_MC_GROUP_INFO (PROPRIETARY_TLV_BASE_ID + 184)
#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
@@ -218,6 +219,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
+#define ISSUPP_ADHOC_ENABLED(FwCapInfo) (FwCapInfo & BIT(25))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
@@ -986,6 +988,15 @@ struct mwifiex_ps_param {
__le16 delay_to_ps;
};
+#define HS_DEF_WAKE_INTERVAL 100
+#define HS_DEF_INACTIVITY_TIMEOUT 50
+
+struct mwifiex_ps_param_in_hs {
+ struct mwifiex_ie_types_header header;
+ __le32 hs_wake_int;
+ __le32 hs_inact_timeout;
+};
+
#define BITMAP_AUTO_DS 0x01
#define BITMAP_STA_PS 0x10
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 82839d9f079f..b36cb3fef358 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -270,6 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->adhoc_11n_enabled = false;
mwifiex_wmm_init(adapter);
+ atomic_set(&adapter->tx_hw_pending, 0);
sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
adapter->sleep_cfm->data;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 2478ccd6f2d9..e5c3a8aa3929 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -308,6 +308,9 @@ process_start:
/* We have tried to wakeup the card already */
if (adapter->pm_wakeup_fw_try)
break;
+ if (adapter->ps_state == PS_STATE_PRE_SLEEP)
+ mwifiex_check_ps_cond(adapter);
+
if (adapter->ps_state != PS_STATE_AWAKE)
break;
if (adapter->tx_lock_flag) {
@@ -355,10 +358,8 @@ process_start:
/* Check if we need to confirm Sleep Request
received previously */
- if (adapter->ps_state == PS_STATE_PRE_SLEEP) {
- if (!adapter->cmd_sent && !adapter->curr_cmd)
- mwifiex_check_ps_cond(adapter);
- }
+ if (adapter->ps_state == PS_STATE_PRE_SLEEP)
+ mwifiex_check_ps_cond(adapter);
/* * The ps_state may have been changed during processing of
* Sleep Request event.
@@ -517,12 +518,11 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
{
int ret;
char fmt[64];
- struct mwifiex_private *priv;
struct mwifiex_adapter *adapter = context;
struct mwifiex_fw_image fw;
- struct semaphore *sem = adapter->card_sem;
bool init_failed = false;
struct wireless_dev *wdev;
+ struct completion *fw_done = adapter->fw_done;
if (!firmware) {
mwifiex_dbg(adapter, ERROR,
@@ -575,8 +575,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto err_init_fw;
}
- priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
-
if (!adapter->wiphy) {
if (mwifiex_register_cfg80211(adapter)) {
mwifiex_dbg(adapter, ERROR,
@@ -669,7 +667,8 @@ done:
}
if (init_failed)
mwifiex_free_adapter(adapter);
- up(sem);
+ /* Tell all current and future waiters we're finished */
+ complete_all(fw_done);
return;
}
@@ -1364,7 +1363,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
* code is extracted from mwifiex_remove_card()
*/
static int
-mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
+mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
int i;
@@ -1372,8 +1371,9 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
if (!adapter)
goto exit_return;
- if (down_interruptible(sem))
- goto exit_sem_err;
+ wait_for_completion(adapter->fw_done);
+ /* Caller should ensure we aren't suspending while this happens */
+ reinit_completion(adapter->fw_done);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_deauthenticate(priv, NULL);
@@ -1430,8 +1430,6 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_unlock();
}
- up(sem);
-exit_sem_err:
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
exit_return:
return 0;
@@ -1441,21 +1439,18 @@ exit_return:
* code is extracted from mwifiex_add_card()
*/
static int
-mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem,
+mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done,
struct mwifiex_if_ops *if_ops, u8 iface_type)
{
char fw_name[32];
struct pcie_service_card *card = adapter->card;
- if (down_interruptible(sem))
- goto exit_sem_err;
-
mwifiex_init_lock_list(adapter);
if (adapter->if_ops.up_dev)
adapter->if_ops.up_dev(adapter);
adapter->iface_type = iface_type;
- adapter->card_sem = sem;
+ adapter->fw_done = fw_done;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
adapter->surprise_removed = false;
@@ -1506,7 +1501,8 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem,
}
strcpy(adapter->fw_name, fw_name);
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
- up(sem);
+
+ complete_all(adapter->fw_done);
return 0;
err_init_fw:
@@ -1526,8 +1522,7 @@ err_init_fw:
err_kmalloc:
mwifiex_terminate_workqueue(adapter);
adapter->surprise_removed = true;
- up(sem);
-exit_sem_err:
+ complete_all(adapter->fw_done);
mwifiex_dbg(adapter, INFO, "%s, error\n", __func__);
return -1;
@@ -1542,16 +1537,67 @@ void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare)
struct mwifiex_if_ops if_ops;
if (!prepare) {
- mwifiex_reinit_sw(adapter, adapter->card_sem, &if_ops,
+ mwifiex_reinit_sw(adapter, adapter->fw_done, &if_ops,
adapter->iface_type);
} else {
memcpy(&if_ops, &adapter->if_ops,
sizeof(struct mwifiex_if_ops));
- mwifiex_shutdown_sw(adapter, adapter->card_sem);
+ mwifiex_shutdown_sw(adapter);
}
}
EXPORT_SYMBOL_GPL(mwifiex_do_flr);
+static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv)
+{
+ struct mwifiex_adapter *adapter = priv;
+
+ if (adapter->irq_wakeup >= 0) {
+ dev_dbg(adapter->dev, "%s: wake by wifi", __func__);
+ adapter->wake_by_wifi = true;
+ disable_irq_nosync(irq);
+ }
+
+ /* Notify PM core we are wakeup source */
+ pm_wakeup_event(adapter->dev, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void mwifiex_probe_of(struct mwifiex_adapter *adapter)
+{
+ int ret;
+ struct device *dev = adapter->dev;
+
+ if (!dev->of_node)
+ return;
+
+ adapter->dt_node = dev->of_node;
+ adapter->irq_wakeup = irq_of_parse_and_map(adapter->dt_node, 0);
+ if (!adapter->irq_wakeup) {
+ dev_info(dev, "fail to parse irq_wakeup from device tree\n");
+ return;
+ }
+
+ ret = devm_request_irq(dev, adapter->irq_wakeup,
+ mwifiex_irq_wakeup_handler, IRQF_TRIGGER_LOW,
+ "wifi_wake", adapter);
+ if (ret) {
+ dev_err(dev, "Failed to request irq_wakeup %d (%d)\n",
+ adapter->irq_wakeup, ret);
+ goto err_exit;
+ }
+
+ disable_irq(adapter->irq_wakeup);
+ if (device_init_wakeup(dev, true)) {
+ dev_err(dev, "fail to init wakeup for mwifiex\n");
+ goto err_exit;
+ }
+ return;
+
+err_exit:
+ adapter->irq_wakeup = 0;
+}
+
/*
* This function adds the card.
*
@@ -1566,21 +1612,22 @@ EXPORT_SYMBOL_GPL(mwifiex_do_flr);
* - Add logical interfaces
*/
int
-mwifiex_add_card(void *card, struct semaphore *sem,
- struct mwifiex_if_ops *if_ops, u8 iface_type)
+mwifiex_add_card(void *card, struct completion *fw_done,
+ struct mwifiex_if_ops *if_ops, u8 iface_type,
+ struct device *dev)
{
struct mwifiex_adapter *adapter;
- if (down_interruptible(sem))
- goto exit_sem_err;
-
if (mwifiex_register(card, if_ops, (void **)&adapter)) {
pr_err("%s: software init failed\n", __func__);
goto err_init_sw;
}
+ adapter->dev = dev;
+ mwifiex_probe_of(adapter);
+
adapter->iface_type = iface_type;
- adapter->card_sem = sem;
+ adapter->fw_done = fw_done;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
adapter->surprise_removed = false;
@@ -1649,9 +1696,7 @@ err_kmalloc:
mwifiex_free_adapter(adapter);
err_init_sw:
- up(sem);
-exit_sem_err:
return -1;
}
EXPORT_SYMBOL_GPL(mwifiex_add_card);
@@ -1667,14 +1712,11 @@ EXPORT_SYMBOL_GPL(mwifiex_add_card);
* - Unregister the device
* - Free the adapter structure
*/
-int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
+int mwifiex_remove_card(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv = NULL;
int i;
- if (down_trylock(sem))
- goto exit_sem_err;
-
if (!adapter)
goto exit_remove;
@@ -1744,8 +1786,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
mwifiex_free_adapter(adapter);
exit_remove:
- up(sem);
-exit_sem_err:
return 0;
}
EXPORT_SYMBOL_GPL(mwifiex_remove_card);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 26df28f4bfb2..5c9bd944b6ea 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -20,6 +20,7 @@
#ifndef _MWIFIEX_MAIN_H_
#define _MWIFIEX_MAIN_H_
+#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -315,6 +316,7 @@ struct mwifiex_tid_tbl {
#define WMM_HIGHEST_PRIORITY 7
#define HIGH_PRIO_TID 7
#define LOW_PRIO_TID 0
+#define NO_PKT_PRIO_TID -1
#define MWIFIEX_WMM_DRV_DELAY_MAX 510
struct mwifiex_wmm_desc {
@@ -856,6 +858,7 @@ struct mwifiex_adapter {
atomic_t rx_pending;
atomic_t tx_pending;
atomic_t cmd_pending;
+ atomic_t tx_hw_pending;
struct workqueue_struct *workqueue;
struct work_struct main_work;
struct workqueue_struct *rx_workqueue;
@@ -983,7 +986,10 @@ struct mwifiex_adapter {
u32 usr_dot_11ac_mcs_support;
atomic_t pending_bridged_pkts;
- struct semaphore *card_sem;
+
+ /* For synchronizing FW initialization with device lifecycle. */
+ struct completion *fw_done;
+
bool ext_scan;
u8 fw_api_ver;
u8 key_api_major_ver, key_api_minor_ver;
@@ -1010,6 +1016,10 @@ struct mwifiex_adapter {
bool usb_mc_setup;
struct cfg80211_wowlan_nd_info *nd_info;
struct ieee80211_regdomain *regd;
+
+ /* Wake-on-WLAN (WoWLAN) */
+ int irq_wakeup;
+ bool wake_by_wifi;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1409,10 +1419,39 @@ static inline u8 mwifiex_is_tdls_link_setup(u8 status)
return false;
}
+/* Disable platform specific wakeup interrupt */
+static inline void mwifiex_disable_wake(struct mwifiex_adapter *adapter)
+{
+ if (adapter->irq_wakeup >= 0) {
+ disable_irq_wake(adapter->irq_wakeup);
+ disable_irq(adapter->irq_wakeup);
+ if (adapter->wake_by_wifi)
+ /* Undo our disable, since interrupt handler already
+ * did this.
+ */
+ enable_irq(adapter->irq_wakeup);
+
+ }
+}
+
+/* Enable platform specific wakeup interrupt */
+static inline void mwifiex_enable_wake(struct mwifiex_adapter *adapter)
+{
+ /* Enable platform specific wakeup interrupt */
+ if (adapter->irq_wakeup >= 0) {
+ adapter->wake_by_wifi = false;
+ enable_irq(adapter->irq_wakeup);
+ enable_irq_wake(adapter->irq_wakeup);
+ }
+}
+
int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
-int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
-int mwifiex_remove_card(struct mwifiex_adapter *, struct semaphore *);
+
+int mwifiex_add_card(void *card, struct completion *fw_done,
+ struct mwifiex_if_ops *if_ops, u8 iface_type,
+ struct device *dev);
+int mwifiex_remove_card(struct mwifiex_adapter *adapter);
void mwifiex_get_version(struct mwifiex_adapter *adapter, char *version,
int maxlen);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 3c3c4f197da8..4db07da81d8d 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -35,7 +35,21 @@ static u8 user_rmmod;
static struct mwifiex_if_ops pcie_ops;
-static struct semaphore add_remove_card_sem;
+static const struct of_device_id mwifiex_pcie_of_match_table[] = {
+ { .compatible = "pci11ab,2b42" },
+ { .compatible = "pci1b4b,2b42" },
+ { }
+};
+
+static int mwifiex_pcie_probe_of(struct device *dev)
+{
+ if (!of_match_node(mwifiex_pcie_of_match_table, dev->of_node)) {
+ dev_err(dev, "required compatible string missing\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
@@ -101,23 +115,31 @@ static int mwifiex_pcie_suspend(struct device *dev)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
- int hs_actived;
struct pci_dev *pdev = to_pci_dev(dev);
- if (pdev) {
- card = pci_get_drvdata(pdev);
- if (!card || !card->adapter) {
- pr_err("Card or adapter structure is not valid\n");
- return 0;
- }
- } else {
- pr_err("PCIE device is not specified\n");
+ card = pci_get_drvdata(pdev);
+
+ /* Might still be loading firmware */
+ wait_for_completion(&card->fw_done);
+
+ adapter = card->adapter;
+ if (!adapter) {
+ dev_err(dev, "adapter is not valid\n");
return 0;
}
- adapter = card->adapter;
+ mwifiex_enable_wake(adapter);
+
+ /* Enable the Host Sleep */
+ if (!mwifiex_enable_hs(adapter)) {
+ mwifiex_dbg(adapter, ERROR,
+ "cmd: failed to suspend\n");
+ adapter->hs_enabling = false;
+ mwifiex_disable_wake(adapter);
+ return -EFAULT;
+ }
- hs_actived = mwifiex_enable_hs(adapter);
+ flush_workqueue(adapter->workqueue);
/* Indicate device suspended */
adapter->is_suspended = true;
@@ -140,14 +162,10 @@ static int mwifiex_pcie_resume(struct device *dev)
struct pcie_service_card *card;
struct pci_dev *pdev = to_pci_dev(dev);
- if (pdev) {
- card = pci_get_drvdata(pdev);
- if (!card || !card->adapter) {
- pr_err("Card or adapter structure is not valid\n");
- return 0;
- }
- } else {
- pr_err("PCIE device is not specified\n");
+ card = pci_get_drvdata(pdev);
+
+ if (!card->adapter) {
+ dev_err(dev, "adapter structure is not valid\n");
return 0;
}
@@ -163,6 +181,7 @@ static int mwifiex_pcie_resume(struct device *dev)
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
+ mwifiex_disable_wake(adapter);
return 0;
}
@@ -178,14 +197,17 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct pcie_service_card *card;
+ int ret;
pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
pdev->vendor, pdev->device, pdev->revision);
- card = kzalloc(sizeof(struct pcie_service_card), GFP_KERNEL);
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
+ init_completion(&card->fw_done);
+
card->dev = pdev;
if (ent->driver_data) {
@@ -199,8 +221,15 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.can_ext_scan = data->can_ext_scan;
}
- if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
- MWIFIEX_PCIE)) {
+ /* device tree node parsing and platform specific configuration*/
+ if (pdev->dev.of_node) {
+ ret = mwifiex_pcie_probe_of(&pdev->dev);
+ if (ret)
+ return ret;
+ }
+
+ if (mwifiex_add_card(card, &card->fw_done, &pcie_ops,
+ MWIFIEX_PCIE, &pdev->dev)) {
pr_err("%s failed\n", __func__);
return -1;
}
@@ -218,19 +247,14 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
struct mwifiex_private *priv;
card = pci_get_drvdata(pdev);
- if (!card)
- return;
+
+ wait_for_completion(&card->fw_done);
adapter = card->adapter;
if (!adapter || !adapter->priv_num)
return;
if (user_rmmod && !adapter->mfg_mode) {
-#ifdef CONFIG_PM_SLEEP
- if (adapter->is_suspended)
- mwifiex_pcie_resume(&pdev->dev);
-#endif
-
mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -240,7 +264,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
}
- mwifiex_remove_card(card->adapter, &add_remove_card_sem);
+ mwifiex_remove_card(adapter);
}
static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
@@ -483,6 +507,7 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter)
}
}
+ atomic_set(&adapter->tx_hw_pending, 0);
return 0;
}
@@ -682,6 +707,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
card->tx_buf_list[i] = NULL;
}
+ atomic_set(&adapter->tx_hw_pending, 0);
return;
}
@@ -1119,6 +1145,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
-1);
else
mwifiex_write_data_complete(adapter, skb, 0, 0);
+ atomic_dec(&adapter->tx_hw_pending);
}
card->tx_buf_list[wrdoneidx] = NULL;
@@ -1211,6 +1238,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
card->tx_buf_list[wrindx] = skb;
+ atomic_inc(&adapter->tx_hw_pending);
if (reg->pfu_enabled) {
desc2 = card->txbd_ring[wrindx];
@@ -1288,6 +1316,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
done_unmap:
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
card->tx_buf_list[wrindx] = NULL;
+ atomic_dec(&adapter->tx_hw_pending);
if (reg->pfu_enabled)
memset(desc2, 0, sizeof(*desc2));
else
@@ -1669,9 +1698,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd) {
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
- mwifiex_process_sleep_confirm_resp(adapter, skb->data,
- skb->len);
- mwifiex_pcie_enable_host_int(adapter);
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
@@ -1684,6 +1710,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
+ mwifiex_pcie_enable_host_int(adapter);
+ mwifiex_process_sleep_confirm_resp(adapter, skb->data,
+ skb->len);
} else {
mwifiex_dbg(adapter, ERROR,
"There is no command but got cmdrsp\n");
@@ -2022,7 +2051,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
}
/* Wait for the command done interrupt */
- do {
+ for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
&ireg_intr)) {
mwifiex_dbg(adapter, ERROR,
@@ -2034,8 +2063,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = -1;
goto done;
}
- } while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
- CPU_INTR_DOOR_BELL);
+ if (!(ireg_intr & CPU_INTR_DOOR_BELL))
+ break;
+ usleep_range(10, 20);
+ }
+ if (ireg_intr & CPU_INTR_DOOR_BELL) {
+ mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n",
+ __func__);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
+ ret = -1;
+ goto done;
+ }
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
@@ -2210,7 +2249,8 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
}
card = pci_get_drvdata(pdev);
- if (!card || !card->adapter) {
+
+ if (!card->adapter) {
pr_err("info: %s: card=%p adapter=%p\n", __func__, card,
card ? card->adapter : NULL);
goto exit;
@@ -2322,6 +2362,8 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
ret = mwifiex_pcie_process_cmd_complete(adapter);
if (ret)
return ret;
+ if (adapter->hs_activated)
+ return ret;
}
if (card->msi_enable) {
@@ -2806,7 +2848,6 @@ err_req_region0:
err_set_dma_mask:
pci_disable_device(pdev);
err_enable_dev:
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -2840,9 +2881,7 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
pci_disable_device(pdev);
pci_release_region(pdev, 2);
pci_release_region(pdev, 0);
- pci_set_drvdata(pdev, NULL);
}
- kfree(card);
}
static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
@@ -2962,11 +3001,9 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct pci_dev *pdev = card->dev;
/* save adapter pointer in card */
card->adapter = adapter;
- adapter->dev = &pdev->dev;
if (mwifiex_pcie_request_irq(adapter))
return -1;
@@ -2989,30 +3026,28 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- struct pci_dev *pdev;
+ struct pci_dev *pdev = card->dev;
int i;
- if (card) {
- pdev = card->dev;
- if (card->msix_enable) {
- for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
- synchronize_irq(card->msix_entries[i].vector);
+ if (card->msix_enable) {
+ for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+ synchronize_irq(card->msix_entries[i].vector);
- for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
- free_irq(card->msix_entries[i].vector,
- &card->msix_ctx[i]);
+ for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+ free_irq(card->msix_entries[i].vector,
+ &card->msix_ctx[i]);
- card->msix_enable = 0;
- pci_disable_msix(pdev);
- } else {
- mwifiex_dbg(adapter, INFO,
- "%s(): calling free_irq()\n", __func__);
- free_irq(card->dev->irq, &card->share_irq_ctx);
+ card->msix_enable = 0;
+ pci_disable_msix(pdev);
+ } else {
+ mwifiex_dbg(adapter, INFO,
+ "%s(): calling free_irq()\n", __func__);
+ free_irq(card->dev->irq, &card->share_irq_ctx);
- if (card->msi_enable)
- pci_disable_msi(pdev);
- }
+ if (card->msi_enable)
+ pci_disable_msi(pdev);
}
+ card->adapter = NULL;
}
/* This function initializes the PCI-E host memory space, WCB rings, etc.
@@ -3095,18 +3130,14 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
adapter->seq_num = 0;
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
- if (card) {
- if (reg->sleep_cookie)
- mwifiex_pcie_delete_sleep_cookie_buf(adapter);
-
- mwifiex_pcie_delete_cmdrsp_buf(adapter);
- mwifiex_pcie_delete_evtbd_ring(adapter);
- mwifiex_pcie_delete_rxbd_ring(adapter);
- mwifiex_pcie_delete_txbd_ring(adapter);
- card->cmdrsp_buf = NULL;
- }
+ if (reg->sleep_cookie)
+ mwifiex_pcie_delete_sleep_cookie_buf(adapter);
- return;
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+ mwifiex_pcie_delete_txbd_ring(adapter);
+ card->cmdrsp_buf = NULL;
}
static struct mwifiex_if_ops pcie_ops = {
@@ -3140,8 +3171,7 @@ static struct mwifiex_if_ops pcie_ops = {
/*
* This function initializes the PCIE driver module.
*
- * This initiates the semaphore and registers the device with
- * PCIE bus.
+ * This registers the device with PCIE bus.
*/
static int mwifiex_pcie_init_module(void)
{
@@ -3149,8 +3179,6 @@ static int mwifiex_pcie_init_module(void)
pr_debug("Marvell PCIe Driver\n");
- sema_init(&add_remove_card_sem, 1);
-
/* Clear the flag in case user removes the card. */
user_rmmod = 0;
@@ -3174,9 +3202,6 @@ static int mwifiex_pcie_init_module(void)
*/
static void mwifiex_pcie_cleanup_module(void)
{
- if (!down_interruptible(&add_remove_card_sem))
- up(&add_remove_card_sem);
-
/* Set the flag as user is removing this module. */
user_rmmod = 1;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index 46f99cae9399..ae3365d1c34e 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -22,6 +22,7 @@
#ifndef _MWIFIEX_PCIE_H
#define _MWIFIEX_PCIE_H
+#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -345,6 +346,7 @@ struct pcie_service_card {
struct pci_dev *dev;
struct mwifiex_adapter *adapter;
struct mwifiex_pcie_device pcie;
+ struct completion fw_done;
u8 txbd_flush;
u32 txbd_wrptr;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 97c9765b5bc6..181691684a08 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -827,7 +827,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u32 num_probes;
u32 ssid_len;
u32 chan_idx;
- u32 chan_num;
u32 scan_type;
u16 scan_dur;
u8 channel;
@@ -1105,13 +1104,12 @@ mwifiex_config_scan(struct mwifiex_private *priv,
mwifiex_dbg(adapter, INFO,
"info: Scan: Scanning current channel only\n");
}
- chan_num = chan_idx;
} else {
mwifiex_dbg(adapter, INFO,
"info: Scan: Creating full region channel list\n");
- chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
- scan_chan_list,
- *filtered_scan);
+ mwifiex_scan_create_channel_list(priv, user_scan_in,
+ scan_chan_list,
+ *filtered_scan);
}
}
@@ -1671,6 +1669,10 @@ static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv,
}
done:
+ /* beacon_ie buffer was allocated in function
+ * mwifiex_fill_new_bss_desc(). Free it now.
+ */
+ kfree(bss_desc->beacon_buf);
kfree(bss_desc);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 8718950004f3..740d79cd91fa 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -49,8 +49,6 @@ static u8 user_rmmod;
static struct mwifiex_if_ops sdio_ops;
static unsigned long iface_work_flags;
-static struct semaphore add_remove_card_sem;
-
static struct memory_type_mapping generic_mem_type_map[] = {
{"DUMP", NULL, 0, 0xDD},
};
@@ -79,59 +77,18 @@ static const struct of_device_id mwifiex_sdio_of_match_table[] = {
{ }
};
-static irqreturn_t mwifiex_wake_irq_wifi(int irq, void *priv)
-{
- struct mwifiex_plt_wake_cfg *cfg = priv;
-
- if (cfg->irq_wifi >= 0) {
- pr_info("%s: wake by wifi", __func__);
- cfg->wake_by_wifi = true;
- disable_irq_nosync(irq);
- }
-
- return IRQ_HANDLED;
-}
-
/* This function parse device tree node using mmc subnode devicetree API.
* The device node is saved in card->plt_of_node.
* if the device tree node exist and include interrupts attributes, this
* function will also request platform specific wakeup interrupt.
*/
-static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
+static int mwifiex_sdio_probe_of(struct device *dev)
{
- struct mwifiex_plt_wake_cfg *cfg;
- int ret;
-
if (!of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
dev_err(dev, "required compatible string missing\n");
return -EINVAL;
}
- card->plt_of_node = dev->of_node;
- card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
- GFP_KERNEL);
- cfg = card->plt_wake_cfg;
- if (cfg && card->plt_of_node) {
- cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
- if (!cfg->irq_wifi) {
- dev_dbg(dev,
- "fail to parse irq_wifi from device tree\n");
- } else {
- ret = devm_request_irq(dev, cfg->irq_wifi,
- mwifiex_wake_irq_wifi,
- IRQF_TRIGGER_LOW,
- "wifi_wake", cfg);
- if (ret) {
- dev_dbg(dev,
- "Failed to request irq_wifi %d (%d)\n",
- cfg->irq_wifi, ret);
- card->plt_wake_cfg = NULL;
- return 0;
- }
- disable_irq(cfg->irq_wifi);
- }
- }
-
return 0;
}
@@ -152,10 +109,12 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
pr_debug("info: vendor=0x%4.04X device=0x%4.04X class=%d function=%d\n",
func->vendor, func->device, func->class, func->num);
- card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL);
+ card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
+ init_completion(&card->fw_done);
+
card->func = func;
card->device_id = id;
@@ -185,20 +144,18 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
if (ret) {
dev_err(&func->dev, "failed to enable function\n");
- goto err_free;
+ return ret;
}
/* device tree node parsing and platform specific configuration*/
if (func->dev.of_node) {
- ret = mwifiex_sdio_probe_of(&func->dev, card);
- if (ret) {
- dev_err(&func->dev, "SDIO dt node parse failed\n");
+ ret = mwifiex_sdio_probe_of(&func->dev);
+ if (ret)
goto err_disable;
- }
}
- ret = mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
- MWIFIEX_SDIO);
+ ret = mwifiex_add_card(card, &card->fw_done, &sdio_ops,
+ MWIFIEX_SDIO, &func->dev);
if (ret) {
dev_err(&func->dev, "add card failed\n");
goto err_disable;
@@ -210,8 +167,6 @@ err_disable:
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
-err_free:
- kfree(card);
return ret;
}
@@ -231,17 +186,10 @@ static int mwifiex_sdio_resume(struct device *dev)
struct sdio_func *func = dev_to_sdio_func(dev);
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
- mmc_pm_flag_t pm_flag = 0;
- if (func) {
- pm_flag = sdio_get_host_pm_caps(func);
- card = sdio_get_drvdata(func);
- if (!card || !card->adapter) {
- pr_err("resume: invalid card or adapter\n");
- return 0;
- }
- } else {
- pr_err("resume: sdio_func is not specified\n");
+ card = sdio_get_drvdata(func);
+ if (!card || !card->adapter) {
+ dev_err(dev, "resume: invalid card or adapter\n");
return 0;
}
@@ -259,12 +207,7 @@ static int mwifiex_sdio_resume(struct device *dev)
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_SYNC_CMD);
- /* Disable platform specific wakeup interrupt */
- if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
- disable_irq_wake(card->plt_wake_cfg->irq_wifi);
- if (!card->plt_wake_cfg->wake_by_wifi)
- disable_irq(card->plt_wake_cfg->irq_wifi);
- }
+ mwifiex_disable_wake(adapter);
return 0;
}
@@ -285,6 +228,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!card)
return;
+ wait_for_completion(&card->fw_done);
+
adapter = card->adapter;
if (!adapter || !adapter->priv_num)
return;
@@ -292,9 +237,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
if (user_rmmod && !adapter->mfg_mode) {
- if (adapter->is_suspended)
- mwifiex_sdio_resume(adapter->dev);
-
mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -302,7 +244,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
}
- mwifiex_remove_card(card->adapter, &add_remove_card_sem);
+ mwifiex_remove_card(adapter);
}
/*
@@ -323,40 +265,38 @@ static int mwifiex_sdio_suspend(struct device *dev)
mmc_pm_flag_t pm_flag = 0;
int ret = 0;
- if (func) {
- pm_flag = sdio_get_host_pm_caps(func);
- pr_debug("cmd: %s: suspend: PM flag = 0x%x\n",
- sdio_func_id(func), pm_flag);
- if (!(pm_flag & MMC_PM_KEEP_POWER)) {
- pr_err("%s: cannot remain alive while host is"
- " suspended\n", sdio_func_id(func));
- return -ENOSYS;
- }
+ pm_flag = sdio_get_host_pm_caps(func);
+ pr_debug("cmd: %s: suspend: PM flag = 0x%x\n",
+ sdio_func_id(func), pm_flag);
+ if (!(pm_flag & MMC_PM_KEEP_POWER)) {
+ dev_err(dev, "%s: cannot remain alive while host is"
+ " suspended\n", sdio_func_id(func));
+ return -ENOSYS;
+ }
- card = sdio_get_drvdata(func);
- if (!card || !card->adapter) {
- pr_err("suspend: invalid card or adapter\n");
- return 0;
- }
- } else {
- pr_err("suspend: sdio_func is not specified\n");
+ card = sdio_get_drvdata(func);
+ if (!card) {
+ dev_err(dev, "suspend: invalid card\n");
return 0;
}
- adapter = card->adapter;
+ /* Might still be loading firmware */
+ wait_for_completion(&card->fw_done);
- /* Enable platform specific wakeup interrupt */
- if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
- card->plt_wake_cfg->wake_by_wifi = false;
- enable_irq(card->plt_wake_cfg->irq_wifi);
- enable_irq_wake(card->plt_wake_cfg->irq_wifi);
+ adapter = card->adapter;
+ if (!adapter) {
+ dev_err(dev, "adapter is not valid\n");
+ return 0;
}
+ mwifiex_enable_wake(adapter);
+
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
adapter->hs_enabling = false;
+ mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -1195,7 +1135,6 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
{
u32 total_pkt_len, pkt_len;
struct sk_buff *skb_deaggr;
- u32 pkt_type;
u16 blk_size;
u8 blk_num;
u8 *data;
@@ -1216,8 +1155,6 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
break;
}
pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
- pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
- 2));
if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
mwifiex_dbg(adapter, ERROR,
"%s: error in pkt_len,\t"
@@ -2066,6 +2003,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
struct sdio_mmc_card *card = adapter->card;
if (adapter->card) {
+ card->adapter = NULL;
sdio_claim_host(card->func);
sdio_disable_func(card->func);
sdio_release_host(card->func);
@@ -2098,9 +2036,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return ret;
}
-
- adapter->dev = &func->dev;
-
strcpy(adapter->fw_name, card->firmware);
if (card->fw_dump_enh) {
adapter->mem_type_mapping_tbl = generic_mem_type_map;
@@ -2240,8 +2175,6 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
kfree(card->mpa_rx.len_arr);
kfree(card->mpa_tx.buf);
kfree(card->mpa_rx.buf);
- sdio_set_drvdata(card->func, NULL);
- kfree(card);
}
/*
@@ -2291,6 +2224,14 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
mwifiex_sdio_remove(func);
+ /*
+ * Normally, we would let the driver core take care of releasing these.
+ * But we're not letting the driver core handle this one. See above
+ * TODO.
+ */
+ sdio_set_drvdata(func, NULL);
+ devm_kfree(&func->dev, card);
+
/* power cycle the adapter */
sdio_claim_host(func);
mmc_hw_reset(func->card->host);
@@ -2767,14 +2708,11 @@ static struct mwifiex_if_ops sdio_ops = {
/*
* This function initializes the SDIO driver.
*
- * This initiates the semaphore and registers the device with
- * SDIO bus.
+ * This registers the device with SDIO bus.
*/
static int
mwifiex_sdio_init_module(void)
{
- sema_init(&add_remove_card_sem, 1);
-
/* Clear the flag in case user removes the card. */
user_rmmod = 0;
@@ -2793,9 +2731,6 @@ mwifiex_sdio_init_module(void)
static void
mwifiex_sdio_cleanup_module(void)
{
- if (!down_interruptible(&add_remove_card_sem))
- up(&add_remove_card_sem);
-
/* Set the flag as user is removing this module. */
user_rmmod = 1;
cancel_work_sync(&sdio_work);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index db837f12c547..cdbf3a3ac7f9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -21,6 +21,7 @@
#define _MWIFIEX_SDIO_H
+#include <linux/completion.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
@@ -154,11 +155,6 @@
a->mpa_rx.start_port = 0; \
} while (0)
-struct mwifiex_plt_wake_cfg {
- int irq_wifi;
- bool wake_by_wifi;
-};
-
/* data structure for SDIO MPA TX */
struct mwifiex_sdio_mpa_tx {
/* multiport tx aggregation buffer pointer */
@@ -242,9 +238,8 @@ struct mwifiex_sdio_card_reg {
struct sdio_mmc_card {
struct sdio_func *func;
struct mwifiex_adapter *adapter;
- struct device_node *plt_of_node;
- struct mwifiex_plt_wake_cfg *plt_wake_cfg;
+ struct completion fw_done;
const char *firmware;
const struct mwifiex_sdio_card_reg *reg;
u8 max_ports;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 2a162c33d271..125e448712dd 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -368,7 +368,10 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
+ u8 *tlv = (u8 *)hs_cfg + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh);
+ struct mwifiex_ps_param_in_hs *psparam_tlv = NULL;
bool hs_activate = false;
+ u16 size;
if (!hscfg_param)
/* New Activate command */
@@ -385,13 +388,14 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
memcpy(((u8 *) hs_cfg) +
sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
adapter->arp_filter, adapter->arp_filter_size);
- cmd->size = cpu_to_le16
- (adapter->arp_filter_size +
- sizeof(struct host_cmd_ds_802_11_hs_cfg_enh)
- + S_DS_GEN);
+ size = adapter->arp_filter_size +
+ sizeof(struct host_cmd_ds_802_11_hs_cfg_enh)
+ + S_DS_GEN;
+ tlv = (u8 *)hs_cfg
+ + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh)
+ + adapter->arp_filter_size;
} else {
- cmd->size = cpu_to_le16(S_DS_GEN + sizeof(struct
- host_cmd_ds_802_11_hs_cfg_enh));
+ size = S_DS_GEN + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh);
}
if (hs_activate) {
hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
@@ -401,12 +405,25 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
hs_cfg->params.hs_config.gpio = hscfg_param->gpio;
hs_cfg->params.hs_config.gap = hscfg_param->gap;
+
+ size += sizeof(struct mwifiex_ps_param_in_hs);
+ psparam_tlv = (struct mwifiex_ps_param_in_hs *)tlv;
+ psparam_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_PS_PARAMS_IN_HS);
+ psparam_tlv->header.len =
+ cpu_to_le16(sizeof(struct mwifiex_ps_param_in_hs)
+ - sizeof(struct mwifiex_ie_types_header));
+ psparam_tlv->hs_wake_int = cpu_to_le32(HS_DEF_WAKE_INTERVAL);
+ psparam_tlv->hs_inact_timeout =
+ cpu_to_le32(HS_DEF_INACTIVITY_TIMEOUT);
+
mwifiex_dbg(adapter, CMD,
"cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
hs_cfg->params.hs_config.conditions,
hs_cfg->params.hs_config.gpio,
hs_cfg->params.hs_config.gap);
}
+ cmd->size = cpu_to_le16(size);
return 0;
}
@@ -1729,7 +1746,6 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
{
struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
struct mwifiex_ds_tdls_oper *oper = data_buf;
- struct mwifiex_sta_node *sta_ptr;
struct host_cmd_tlv_rates *tlv_rates;
struct mwifiex_ie_types_htcap *ht_capab;
struct mwifiex_ie_types_qos_info *wmm_qos_info;
@@ -1747,7 +1763,6 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
tdls_oper->reason = 0;
memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
- sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
@@ -1885,6 +1900,24 @@ static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv,
return 0;
}
+/* This function check if the command is supported by firmware */
+static int mwifiex_is_cmd_supported(struct mwifiex_private *priv, u16 cmd_no)
+{
+ if (!ISSUPP_ADHOC_ENABLED(priv->adapter->fw_cap_info)) {
+ switch (cmd_no) {
+ case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
+ case HostCmd_CMD_802_11_AD_HOC_START:
+ case HostCmd_CMD_802_11_AD_HOC_JOIN:
+ case HostCmd_CMD_802_11_AD_HOC_STOP:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1898,6 +1931,13 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
struct host_cmd_ds_command *cmd_ptr = cmd_buf;
int ret = 0;
+ if (mwifiex_is_cmd_supported(priv, cmd_no)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "0x%x command not supported by firmware\n",
+ cmd_no);
+ return -EOPNOTSUPP;
+ }
+
/* Prepare command */
switch (cmd_no) {
case HostCmd_CMD_GET_HW_SPEC:
@@ -2191,7 +2231,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret;
- u16 enable = true;
struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
struct mwifiex_ds_auto_ds auto_ds;
enum state_11d_t state_11d;
@@ -2218,9 +2257,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
* The cal-data can be read from device tree and/or
* a configuration file and downloaded to firmware.
*/
- if (priv->adapter->iface_type == MWIFIEX_SDIO &&
- adapter->dev->of_node) {
- adapter->dt_node = adapter->dev->of_node;
+ if (adapter->dt_node) {
if (of_property_read_u32(adapter->dt_node,
"marvell,wakeup-pin",
&data) == 0) {
@@ -2228,19 +2265,13 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
adapter->hs_cfg.gpio = data;
}
- ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
- "marvell,caldata");
- if (ret)
- return -1;
+ mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
+ "marvell,caldata");
}
- if (adapter->cal_data) {
- ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
- HostCmd_ACT_GEN_SET, 0, NULL,
- true);
- if (ret)
- return -1;
- }
+ if (adapter->cal_data)
+ mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
/* Read MAC address from HW */
ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
@@ -2312,16 +2343,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
- if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
- /* set ibss coalescing_status */
- ret = mwifiex_send_cmd(
- priv,
- HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
- HostCmd_ACT_GEN_SET, 0, &enable, true);
- if (ret)
- return -1;
- }
-
memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
amsdu_aggr_ctrl.enable = true;
/* Send request to firmware */
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index a7e9f544f219..35d8636bdb91 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -404,7 +404,7 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params)
{
const u8 *vendor_ie;
- struct ieee_types_header *wmm_ie;
+ const u8 *wmm_ie;
u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
@@ -412,9 +412,9 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
params->beacon.tail,
params->beacon.tail_len);
if (vendor_ie) {
- wmm_ie = (struct ieee_types_header *)vendor_ie;
- memcpy(&bss_cfg->wmm_info, wmm_ie + 1,
- sizeof(bss_cfg->wmm_info));
+ wmm_ie = vendor_ie;
+ memcpy(&bss_cfg->wmm_info, wmm_ie +
+ sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;
} else {
memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info));
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 73eb0846db21..c563160b3b6b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -24,7 +24,6 @@
static u8 user_rmmod;
static struct mwifiex_if_ops usb_ops;
-static struct semaphore add_remove_card_sem;
static struct usb_device_id mwifiex_usb_table[] = {
/* 8766 */
@@ -380,16 +379,17 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *epd;
int ret, i;
struct usb_card_rec *card;
- u16 id_vendor, id_product, bcd_device, bcd_usb;
+ u16 id_vendor, id_product, bcd_device;
- card = kzalloc(sizeof(struct usb_card_rec), GFP_KERNEL);
+ card = devm_kzalloc(&intf->dev, sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
+ init_completion(&card->fw_done);
+
id_vendor = le16_to_cpu(udev->descriptor.idVendor);
id_product = le16_to_cpu(udev->descriptor.idProduct);
bcd_device = le16_to_cpu(udev->descriptor.bcdDevice);
- bcd_usb = le16_to_cpu(udev->descriptor.bcdUSB);
pr_debug("info: VID/PID = %X/%X, Boot2 version = %X\n",
id_vendor, id_product, bcd_device);
@@ -475,12 +475,11 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, card);
- ret = mwifiex_add_card(card, &add_remove_card_sem, &usb_ops,
- MWIFIEX_USB);
+ ret = mwifiex_add_card(card, &card->fw_done, &usb_ops,
+ MWIFIEX_USB, &card->udev->dev);
if (ret) {
pr_err("%s: mwifiex_add_card failed: %d\n", __func__, ret);
usb_reset_device(udev);
- kfree(card);
return ret;
}
@@ -503,17 +502,27 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
struct usb_tx_data_port *port;
int i, j;
- if (!card || !card->adapter) {
- pr_err("%s: card or card->adapter is NULL\n", __func__);
+ /* Might still be loading firmware */
+ wait_for_completion(&card->fw_done);
+
+ adapter = card->adapter;
+ if (!adapter) {
+ dev_err(&intf->dev, "card is not valid\n");
return 0;
}
- adapter = card->adapter;
if (unlikely(adapter->is_suspended))
mwifiex_dbg(adapter, WARN,
"Device already suspended\n");
- mwifiex_enable_hs(adapter);
+ /* Enable the Host Sleep */
+ if (!mwifiex_enable_hs(adapter)) {
+ mwifiex_dbg(adapter, ERROR,
+ "cmd: failed to suspend\n");
+ adapter->hs_enabling = false;
+ return -EFAULT;
+ }
+
/* 'is_suspended' flag indicates device is suspended.
* It must be set here before the usb_kill_urb() calls. Reason
@@ -559,8 +568,9 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
struct mwifiex_adapter *adapter;
int i;
- if (!card || !card->adapter) {
- pr_err("%s: card or card->adapter is NULL\n", __func__);
+ if (!card->adapter) {
+ dev_err(&intf->dev, "%s: card->adapter is NULL\n",
+ __func__);
return 0;
}
adapter = card->adapter;
@@ -602,21 +612,13 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
struct usb_card_rec *card = usb_get_intfdata(intf);
struct mwifiex_adapter *adapter;
- if (!card || !card->adapter) {
- pr_err("%s: card or card->adapter is NULL\n", __func__);
- return;
- }
+ wait_for_completion(&card->fw_done);
adapter = card->adapter;
- if (!adapter->priv_num)
+ if (!adapter || !adapter->priv_num)
return;
if (user_rmmod && !adapter->mfg_mode) {
-#ifdef CONFIG_PM
- if (adapter->is_suspended)
- mwifiex_usb_resume(intf);
-#endif
-
mwifiex_deauthenticate_all(adapter);
mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
@@ -628,13 +630,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
mwifiex_dbg(adapter, FATAL,
"%s: removing card\n", __func__);
- mwifiex_remove_card(adapter, &add_remove_card_sem);
+ mwifiex_remove_card(adapter);
- usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
- kfree(card);
-
- return;
}
static struct usb_driver mwifiex_usb_driver = {
@@ -932,7 +930,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
card->adapter = adapter;
- adapter->dev = &card->udev->dev;
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
case USB8997_PID_1:
@@ -1206,8 +1203,7 @@ static struct mwifiex_if_ops usb_ops = {
/* This function initializes the USB driver module.
*
- * This initiates the semaphore and registers the device with
- * USB bus.
+ * This registers the device with USB bus.
*/
static int mwifiex_usb_init_module(void)
{
@@ -1215,8 +1211,6 @@ static int mwifiex_usb_init_module(void)
pr_debug("Marvell USB8797 Driver\n");
- sema_init(&add_remove_card_sem, 1);
-
ret = usb_register(&mwifiex_usb_driver);
if (ret)
pr_err("Driver register failed!\n");
@@ -1236,9 +1230,6 @@ static int mwifiex_usb_init_module(void)
*/
static void mwifiex_usb_cleanup_module(void)
{
- if (!down_interruptible(&add_remove_card_sem))
- up(&add_remove_card_sem);
-
/* set the flag as user is removing this module */
user_rmmod = 1;
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index 30e8eb8c259d..e5f204ea018b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -20,6 +20,7 @@
#ifndef _MWIFIEX_USB_H
#define _MWIFIEX_USB_H
+#include <linux/completion.h>
#include <linux/usb.h>
#define USB8XXX_VID 0x1286
@@ -75,6 +76,7 @@ struct usb_card_rec {
struct mwifiex_adapter *adapter;
struct usb_device *udev;
struct usb_interface *intf;
+ struct completion fw_done;
u8 rx_cmd_ep;
struct urb_context rx_cmd;
atomic_t rx_cmd_urb_pending;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 0eb246502e1d..28c2f6fae3e6 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -503,8 +503,10 @@ mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct sk_buff *skb, *tmp;
- skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
+ skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
+ skb_unlink(skb, &ra_list->skb_head);
mwifiex_write_data_complete(adapter, skb, 0, -1);
+ }
}
/*
@@ -600,11 +602,15 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
- skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
+ skb_unlink(skb, &priv->tdls_txq);
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ }
- skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
+ skb_unlink(skb, &priv->bypass_txq);
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ }
atomic_set(&priv->adapter->bypass_tx_pending, 0);
idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
@@ -1099,6 +1105,7 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
&adapter->bss_prio_tbl[j].bss_prio_head,
list) {
+try_again:
priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
@@ -1134,8 +1141,18 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
ra_list_spinlock,
flags_ra);
}
- }
+ if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
+ atomic_set(&priv_tmp->wmm.highest_queued_prio,
+ HIGH_PRIO_TID);
+ /* Iterate current private once more, since
+ * there still exist packets in data queue
+ */
+ goto try_again;
+ } else
+ atomic_set(&priv_tmp->wmm.highest_queued_prio,
+ NO_PKT_PRIO_TID);
+ }
}
return NULL;
@@ -1328,9 +1345,11 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
skb = skb_dequeue(&ptr->skb_head);
if (adapter->data_sent || adapter->tx_lock_flag) {
+ ptr->total_pkt_count--;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
skb_queue_tail(&adapter->tx_data_q, skb);
+ atomic_dec(&priv->wmm.tx_pkts_queued);
atomic_inc(&adapter->tx_queued);
return;
}
@@ -1388,6 +1407,10 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
if (ret != -EBUSY) {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
atomic_dec(&priv->wmm.tx_pkts_queued);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ ptr->total_pkt_count--;
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ ra_list_flags);
}
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
index ea9ed8a5db4d..08fc802ead4b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/Makefile
+++ b/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -1,5 +1,3 @@
-ccflags-y += -D__CHECK_ENDIAN__
-
obj-$(CONFIG_MT7601U) += mt7601u.o
mt7601u-objs = \
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 44d46e25db80..a6e901766226 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -293,13 +293,13 @@ static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
ok = 0;
i = 200;
while (i--) {
- if ((mt76_rr(dev, 0x0430) & 0x00ff0000) ||
- (mt76_rr(dev, 0x0a30) & 0xffffffff) ||
- (mt76_rr(dev, 0x0a34) & 0xffffffff))
- ok++;
- if (ok > 6)
- break;
-
+ if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+ !mt76_rr(dev, 0x0a30) &&
+ !mt76_rr(dev, 0x0a34)) {
+ if (ok++ > 5)
+ break;
+ continue;
+ }
msleep(1);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
index 27a429d90cec..2a8837002f00 100644
--- a/drivers/net/wireless/mediatek/mt7601u/regs.h
+++ b/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -192,6 +192,9 @@
#define MT_BCN_OFFSET_BASE 0x041c
#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+
#define MT_RF_CSR_CFG 0x0500
#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 155f343981fe..085c5b423bdf 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1459,10 +1459,7 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* Start validation of the data that has been read.
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
- if (!is_valid_ether_addr(mac)) {
- eth_random_addr(mac);
- rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
- }
+ rt2x00lib_set_mac_address(rt2x00dev, mac);
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
if (word == 0xffff) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 2553cdd74066..9832fd50c793 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1585,10 +1585,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* Start validation of the data that has been read.
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
- if (!is_valid_ether_addr(mac)) {
- eth_random_addr(mac);
- rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
- }
+ rt2x00lib_set_mac_address(rt2x00dev, mac);
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
if (word == 0xffff) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index 2d64611de300..cd3ab5a9e98d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -1349,10 +1349,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* Start validation of the data that has been read.
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
- if (!is_valid_ether_addr(mac)) {
- eth_random_addr(mac);
- rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
- }
+ rt2x00lib_set_mac_address(rt2x00dev, mac);
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
if (word == 0xffff) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index bf3f0a39908c..4fb79e05078f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1621,7 +1621,7 @@ static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev,
* => Protect all HT40 transmissions.
*/
mm20_mode = gf20_mode = 0;
- mm40_mode = gf40_mode = 2;
+ mm40_mode = gf40_mode = 1;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
@@ -1644,7 +1644,7 @@ static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev,
* Legacy STAs are present
* => Protect all HT transmissions.
*/
- mm20_mode = mm40_mode = gf20_mode = gf40_mode = 2;
+ mm20_mode = mm40_mode = gf20_mode = gf40_mode = 1;
/*
* If erp protection is needed we have to protect HT
@@ -1660,7 +1660,7 @@ static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev,
/* check for STAs not supporting greenfield mode */
if (any_sta_nongf)
- gf20_mode = gf40_mode = 2;
+ gf20_mode = gf40_mode = 1;
/* Update HT protection config */
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
@@ -1691,8 +1691,6 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
- rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
- !!erp->short_preamble);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
!!erp->short_preamble);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
@@ -1707,7 +1705,7 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
if (changed & BSS_CHANGED_BASIC_RATES) {
rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE,
- erp->basic_rates);
+ 0xff0 | erp->basic_rates);
rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
}
@@ -4672,11 +4670,14 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
0x00000000);
}
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT5592)) {
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ } else if (rt2x00_rt(rt2x00dev, RT5592)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -4735,9 +4736,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
- rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
- rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 1);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
@@ -4770,9 +4771,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
- rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
- rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 0);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
@@ -4783,9 +4784,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
- rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
- rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 0);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
@@ -4796,9 +4797,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
- rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
- rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 0);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
@@ -4809,9 +4810,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
- rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
- rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 0);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
@@ -6756,7 +6757,6 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
@@ -6919,10 +6919,7 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* Start validation of the data that has been read.
*/
mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
- if (!is_valid_ether_addr(mac)) {
- eth_random_addr(mac);
- rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
- }
+ rt2x00lib_set_mac_address(rt2x00dev, mac);
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
if (word == 0xffff) {
@@ -7464,7 +7461,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *default_power1;
char *default_power2;
char *default_power3;
- unsigned int i;
+ unsigned int i, tx_chains, rx_chains;
u32 reg;
/*
@@ -7475,7 +7472,6 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(rt2x00dev->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
@@ -7589,21 +7585,24 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40;
- if (rt2x00dev->default_ant.tx_chain_num >= 2)
+ tx_chains = rt2x00dev->default_ant.tx_chain_num;
+ rx_chains = rt2x00dev->default_ant.rx_chain_num;
+
+ if (tx_chains >= 2)
spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
- spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
- IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ spec->ht.cap |= rx_chains << IEEE80211_HT_CAP_RX_STBC_SHIFT;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
- spec->ht.mcs.tx_params =
- IEEE80211_HT_MCS_TX_DEFINED |
- IEEE80211_HT_MCS_TX_RX_DIFF |
- ((rt2x00dev->default_ant.tx_chain_num - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ spec->ht.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains != rx_chains) {
+ spec->ht.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ spec->ht.mcs.tx_params |=
+ (tx_chains - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ }
- switch (rt2x00dev->default_ant.rx_chain_num) {
+ switch (rx_chains) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
case 2:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 4b0bb6b4f6f1..9f61293f1a56 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -341,8 +341,6 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
@@ -353,12 +351,11 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
- u32 reg;
+ u32 reg = 0;
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev)))
return -EIO;
- rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, &reg);
rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index f68d492129c6..aa3d4ceef4ad 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1403,6 +1403,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
*/
u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_vif *vif);
+void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr);
/*
* Interrupt context handlers.
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 4e0c5653054b..eb7b71443657 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/log2.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
#include "rt2x00.h"
#include "rt2x00lib.h"
@@ -931,6 +933,21 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE;
}
+void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr)
+{
+ const char *mac_addr;
+
+ mac_addr = of_get_mac_address(rt2x00dev->dev->of_node);
+ if (mac_addr)
+ ether_addr_copy(eeprom_mac_addr, mac_addr);
+
+ if (!is_valid_ether_addr(eeprom_mac_addr)) {
+ eth_random_addr(eeprom_mac_addr);
+ rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", eeprom_mac_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00lib_set_mac_address);
+
static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
struct hw_mode_spec *spec)
{
@@ -1362,11 +1379,13 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
if (rt2x00dev->bcn->limit > 0)
rt2x00dev->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
- BIT(NL80211_IFTYPE_WDS);
+#ifdef CONFIG_WIRELESS_WDS
+ BIT(NL80211_IFTYPE_WDS) |
+#endif
+ BIT(NL80211_IFTYPE_AP);
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -1422,7 +1441,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
-#ifdef CONFIG_RT2X00_LIB_USB
+#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
if (rt2x00_is_usb(rt2x00dev)) {
usb_kill_anchored_urbs(rt2x00dev->anchor);
hrtimer_cancel(&rt2x00dev->txstatus_timer);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 03013eb2f642..5306a3b2622d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2413,10 +2413,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* Start validation of the data that has been read.
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
- if (!is_valid_ether_addr(mac)) {
- eth_random_addr(mac);
- rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
- }
+ rt2x00lib_set_mac_address(rt2x00dev, mac);
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
if (word == 0xffff) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index c1397a6d3cee..1a29c4d205a5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -1766,10 +1766,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* Start validation of the data that has been read.
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
- if (!is_valid_ether_addr(mac)) {
- eth_random_addr(mac);
- rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
- }
+ rt2x00lib_set_mac_address(rt2x00dev, mac);
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
if (word == 0xffff) {
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0881ba8535f4..4fdc7223c894 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -272,7 +272,6 @@ static const struct net_device_ops ray_netdev_ops = {
.ndo_set_config = ray_dev_config,
.ndo_get_stats = ray_get_stats,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 08d587a342d3..df551b2b56eb 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1337,10 +1337,11 @@ struct rtl8xxxu_fileops {
u32 ramask, int sgi);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
- void (*fill_txdesc) (struct ieee80211_hdr *hdr,
- struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
- u16 rate_flag, bool sgi, bool short_preamble,
- bool ampdu_enable);
+ void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
+ bool short_preamble, bool ampdu_enable,
+ u32 rts_rate);
int writeN_block_size;
int rx_agg_buf_size;
char tx_desc_size;
@@ -1434,14 +1435,16 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
int result[][8], int c1, int c2);
-void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
- struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
- u16 rate_flag, bool sgi, bool short_preamble,
- bool ampdu_enable);
-void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
- struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
- u16 rate_flag, bool sgi, bool short_preamble,
- bool ampdu_enable);
+void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
+ bool short_preamble, bool ampdu_enable,
+ u32 rts_rate);
+void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
+ bool short_preamble, bool ampdu_enable,
+ u32 rts_rate);
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index a793fedc3654..a1178c5d6ad8 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1556,7 +1556,7 @@ exit:
return ret;
}
-void rtl8192eu_power_off(struct rtl8xxxu_priv *priv)
+static void rtl8192eu_power_off(struct rtl8xxxu_priv *priv)
{
u8 val8;
u16 val16;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index a5e6ec2152bf..3a86675020a2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4372,6 +4372,13 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect)
{
+#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
+ /*
+ * Barry Day reports this causes issues with 8192eu and 8723bu
+ * devices reconnecting. The reason for this is unclear, but
+ * until it is better understood, leave the code in place but
+ * disabled, so it is not lost.
+ */
struct h2c_cmd h2c;
memset(&h2c, 0, sizeof(struct h2c_cmd));
@@ -4383,6 +4390,7 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
h2c.media_status_rpt.parm &= ~BIT(0);
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
+#endif
}
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
@@ -4759,13 +4767,28 @@ static void rtl8xxxu_dump_action(struct device *dev,
* This format is used on 8188cu/8192cu/8723au
*/
void
-rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
- struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
- u16 rate_flag, bool sgi, bool short_preamble,
- bool ampdu_enable)
+rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
+ bool short_preamble, bool ampdu_enable, u32 rts_rate)
{
+ struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ u32 rate;
+ u16 rate_flags = tx_info->control.rates[0].flags;
u16 seq_number;
+ if (rate_flags & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_mgmt(hdr->frame_control))
+ rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+ else
+ rate = tx_rate->hw_value;
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+ dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
+ __func__, rate, cpu_to_le16(tx_desc->pkt_size));
+
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
tx_desc->txdw5 = cpu_to_le32(rate);
@@ -4796,15 +4819,16 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
if (sgi)
tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /*
- * Use RTS rate 24M - does the mac80211 tell
- * us which to use?
- */
- tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
- TXDESC32_RTS_RATE_SHIFT);
+ /*
+ * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
+ */
+ tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT);
+ if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+ } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_CTS_SELF_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
}
}
@@ -4813,16 +4837,31 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
* This format is used on 8192eu/8723bu
*/
void
-rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
- struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
- u16 rate_flag, bool sgi, bool short_preamble,
- bool ampdu_enable)
+rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
+ bool short_preamble, bool ampdu_enable, u32 rts_rate)
{
+ struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
struct rtl8xxxu_txdesc40 *tx_desc40;
+ u32 rate;
+ u16 rate_flags = tx_info->control.rates[0].flags;
u16 seq_number;
tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc32;
+ if (rate_flags & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_mgmt(hdr->frame_control))
+ rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+ else
+ rate = tx_rate->hw_value;
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+ dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
+ __func__, rate, cpu_to_le16(tx_desc40->pkt_size));
+
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
tx_desc40->txdw4 = cpu_to_le32(rate);
@@ -4849,15 +4888,19 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
if (short_preamble)
tx_desc40->txdw5 |= cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /*
- * Use RTS rate 24M - does the mac80211 tell
- * us which to use?
- */
- tx_desc40->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
- TXDESC40_RTS_RATE_SHIFT);
+ tx_desc40->txdw4 |= cpu_to_le32(rts_rate << TXDESC40_RTS_RATE_SHIFT);
+ /*
+ * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
+ */
+ if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
+ } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ /*
+ * For some reason the vendor driver doesn't set
+ * TXDESC40_HW_RTS_ENABLE for CTS to SELF
+ */
+ tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_CTS_SELF_ENABLE);
}
}
@@ -4867,14 +4910,13 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_txdesc32 *tx_desc;
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
struct device *dev = &priv->udev->dev;
- u32 queue, rate;
+ u32 queue, rts_rate;
u16 pktlen = skb->len;
u16 seq_number;
u16 rate_flag = tx_info->control.rates[0].flags;
@@ -4901,10 +4943,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
goto error;
}
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
- dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n",
- __func__, tx_rate->bitrate, tx_rate->hw_value, pktlen);
-
if (ieee80211_is_action(hdr->frame_control))
rtl8xxxu_dump_action(dev, hdr);
@@ -4958,12 +4996,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
}
}
- if (rate_flag & IEEE80211_TX_RC_MCS &&
- !ieee80211_is_mgmt(hdr->frame_control))
- rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
- else
- rate = tx_rate->hw_value;
-
if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
(ieee80211_is_data_qos(hdr->frame_control) &&
sta && sta->ht_cap.cap &
@@ -4974,10 +5006,17 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
(sta && vif && vif->bss_conf.use_short_preamble))
short_preamble = true;
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS)
+ rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value;
+ else if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT)
+ rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value;
+ else
+ rts_rate = 0;
+
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- priv->fops->fill_txdesc(hdr, tx_desc, rate, rate_flag,
- sgi, short_preamble, ampdu_enable);
+ priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
+ ampdu_enable, rts_rate);
rtl8xxxu_calc_tx_desc_csum(tx_desc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
index ad6d3c52ec57..84c2e826fa1d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/Makefile
@@ -30,5 +30,3 @@ obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/
obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/
obj-$(CONFIG_RTL8821AE) += rtl8821ae/
obj-$(CONFIG_RTL8192EE) += rtl8192ee/
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 264466f59c57..4ac928bf1f8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1303,12 +1303,13 @@ EXPORT_SYMBOL_GPL(rtl_action_proc);
static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
{
+ struct ieee80211_hw *hw = rtlpriv->hw;
+
rtlpriv->ra.is_special_data = true;
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
rtlpriv, 1);
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
+ rtl_lps_leave(hw);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
@@ -1381,8 +1382,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
if (is_tx) {
rtlpriv->ra.is_special_data = true;
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
+ rtl_lps_leave(hw);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
index 47ceecfcb7dc..d1454d4f08a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
@@ -3,5 +3,3 @@ btcoexist-objs := halbtc8723b2ant.o \
rtl_btc.o
obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 8e7f23c11680..ded1493fee9c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1150,10 +1150,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
} else {
mstatus = RT_MEDIA_DISCONNECT;
- if (mac->link_state == MAC80211_LINKED) {
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
- }
+ if (mac->link_state == MAC80211_LINKED)
+ rtl_lps_leave(hw);
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
@@ -1431,8 +1429,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
}
if (mac->link_state == MAC80211_LINKED) {
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
+ rtl_lps_leave(hw);
mac->link_state = MAC80211_LINKED_SCANNING;
} else {
rtl_ips_nic_on(hw);
@@ -1832,7 +1829,8 @@ bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pskb = __skb_dequeue(&ring->queue);
- kfree_skb(pskb);
+ if (pskb)
+ dev_kfree_skb_irq(pskb);
/*this is wrong, fill_tx_cmddesc needs update*/
pdesc = &ring->desc[0];
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 0dfa9eac3926..8bfe020edd3a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -663,11 +659,9 @@ tx_status_ok:
}
if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2)) {
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
- }
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2))
+ rtl_lps_leave(hw);
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
@@ -918,10 +912,8 @@ new_trx_end:
}
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2)) {
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
- }
+ (rtlpriv->link_info.num_rx_inperiod > 2))
+ rtl_lps_leave(hw);
skb = new_skb;
no_new:
if (rtlpriv->use_new_trx_flow) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index b951ebac15ea..578b1d900bfb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 18d979affc18..d0ffc4d508cf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -407,8 +407,8 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
}
}
-/*Enter the leisure power save mode.*/
-void rtl_lps_enter(struct ieee80211_hw *hw)
+/* Interrupt safe routine to enter the leisure power save mode.*/
+static void rtl_lps_enter_core(struct ieee80211_hw *hw)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -444,10 +444,9 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
}
-EXPORT_SYMBOL(rtl_lps_enter);
-/*Leave the leisure power save mode.*/
-void rtl_lps_leave(struct ieee80211_hw *hw)
+/* Interrupt safe routine to leave the leisure power save mode.*/
+static void rtl_lps_leave_core(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -477,7 +476,6 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
}
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
}
-EXPORT_SYMBOL(rtl_lps_leave);
/* For sw LPS*/
void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
@@ -670,12 +668,34 @@ void rtl_lps_change_work_callback(struct work_struct *work)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->enter_ps)
- rtl_lps_enter(hw);
+ rtl_lps_enter_core(hw);
else
- rtl_lps_leave(hw);
+ rtl_lps_leave_core(hw);
}
EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
+void rtl_lps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!in_interrupt())
+ return rtl_lps_enter_core(hw);
+ rtlpriv->enter_ps = true;
+ schedule_work(&rtlpriv->works.lps_change_work);
+}
+EXPORT_SYMBOL_GPL(rtl_lps_enter);
+
+void rtl_lps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!in_interrupt())
+ return rtl_lps_leave_core(hw);
+ rtlpriv->enter_ps = false;
+ schedule_work(&rtlpriv->works.lps_change_work);
+}
+EXPORT_SYMBOL_GPL(rtl_lps_leave);
+
void rtl_swlps_wq_callback(void *data)
{
struct rtl_works *rtlworks = container_of_dwork_rtl(data,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
index 676e7de27f27..dae4f0f19cd3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
@@ -11,5 +11,3 @@ rtl8188ee-objs := \
trx.o
obj-$(CONFIG_RTL8188EE) += rtl8188ee.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
index 071ccee69eae..0fd2bac14db6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
index 21bd4a5337ab..b884c30c7b37 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
index 1850fde881b5..d38dbca3c19e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
index f05c2c674165..6ea7fd7bb527 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
index aee42d7ae8a2..0546b7556259 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
@@ -5,5 +5,3 @@ rtl8192c-common-objs := \
phy_common.o
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 316be5ff69ca..bdc132bef822 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
index 6a72d0c8afa0..441604ff5858 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
index 864806c19ca7..c5fa14bda387 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
index 918b1d129e77..889bd1301154 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 27e3d5f9ca34..94dd25cf1ca8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
index 202412577bf0..d11261e05a2e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
index c0cb0cfe7d37..577c7adbc322 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
@@ -9,5 +9,3 @@ rtl8192ce-objs := \
trx.o
obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
index 690a7a1675e2..b90aaf128072 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
index 09898cf2e07a..2c8205e46be4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
index 38ba707015f5..9761d0ca31b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index a47be73a0980..4483d40ecad1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
index 98a086822aac..877f138a0cb9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
index 24e483ba3fa4..833193b751f7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
index c5761066d383..f6edb9cd9b67 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index 46d0d945f283..d1b6a8fe7b6a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
index dadc02b5de0b..93f3bc0197b4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
index dc8460c0b32f..1bb7ed35812d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
index a9c406f33d0a..7cae6350437c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
index ebd72cae10b6..22c5e6f51331 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 8b6e37ce3f66..691ddef1ae28 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
index d2367a5d0cf5..9a1c89cbbda1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
index 752f943a84ae..98b06d48a2dd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
index 8b79161f71be..51e4e07396a6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 781af1b99eb5..2ab4a00246cc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
index 607304586c03..66291fc341e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
index ad2de6b839ef..97437dadc287 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
@@ -10,5 +10,3 @@ rtl8192cu-objs := \
trx.o
obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
index 74a479ac323d..316fe9990b6d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
index c16209a336ea..00fc0685317a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
index fafa6bac2a3f..ce71433792e3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index ae8f055483fa..5c7da0cfc684 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
index 67588083e6cc..932f056f7ef8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
index 8514ab652520..c6240813ff7b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
index 0f372278b7af..551deb8afb6f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 68ca734853c1..cf212f694db5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
index 20a49ec8459b..8573b7e257d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index 4b2976465905..f35f435c094e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
index 42b068660483..a422c4db1a41 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
index 8b81465c629b..8185886daa8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index ec2ea56f7933..5e3183024aa0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
index 6f987de5b441..07aec0b20cc9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index f953320f0e23..b84e13ac6ead 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
index a1310abd0d54..4ea2cb225580 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
index 7903c154de00..b3ac981d88c6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
index 4b020e9e30b1..851bf53d246c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 95880fe4106e..1ea878fa7901 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
index fd8051dcd98a..df88e39301c2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
index e3213c8264b6..d0703f20d30c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
@@ -10,5 +10,3 @@ rtl8192de-objs := \
trx.o
obj-$(CONFIG_RTL8192DE) += rtl8192de.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
index 0a443ed17cf4..cb7b9b727e3a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
index 7c1db7e7572d..ac6d554b67c8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
index f2d318ceeb28..5d346ec366ce 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 8de29cc3ced0..17f6903c14bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
index 8a38daa316cb..6b435236a28e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index d91f8bbfe7a0..fcb14c5db172 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index 1bc7b1a96d4a..24b03b9999be 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 811ba57eb9bb..c22b8a215c87 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
index a29df30c3025..9874519704d3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 2a1edfd21b96..424f54babd03 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
index 8115bf4ac683..58b56b523dbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
index 315a298bab06..b354b95936e2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 2f479d397644..9dc9e915513e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
index 7303d12c266f..c650a8dcdb26 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 1ebfee18882f..2d65e4095292 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
index 0e6035b8fd86..fd7d036e9abc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
index 8ea6f528dfa6..4badb183cf35 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
index 8b724a86117a..7fefc483ec28 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index e998e98d74cb..5fb37564957c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 194d99f8bacf..9bb6cc648590 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
index 0315eeda9b60..f254b9f64326 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
@@ -12,5 +12,3 @@ rtl8192ee-objs := \
obj-$(CONFIG_RTL8192EE) += rtl8192ee.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
index b7eb13819cbc..dfa9dbbe2cdf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
@@ -11,5 +11,3 @@ rtl8192se-objs := \
obj-$(CONFIG_RTL8192SE) += rtl8192se.o
-ccflags-y += -D__CHECK_ENDIAN__
-
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
index 41466f957cdc..b5ba0554a0cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
index 9bae5a92e30f..2c073a77b194 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
index de6ac796c74d..3af07efed73a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 331b1584a1a2..32f9207b5cf5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
index b1e44b86e8ed..5827aa32cef0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 52e4430edb54..26e06b2837c3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
index 4cacee10f31e..86bce1be83ce 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
index 9849cb988186..870007801f6b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
index 2182dbeb5f32..90e265d9ffc6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 4bb75581ab38..fcb9216af82d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
index 8acf4765a7a6..7a3b6b623872 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
index e13043479b71..5d445c2afcf3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index 34e88a3f6abe..bd2fa7735866 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
index 8a29eb94ab17..e9ba283d05ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 3e1eaeac4fdc..998cefbd7e89 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
index 2eb88862ebe4..af449d6714e6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
index f1a73f75127e..162578f05c85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
index 2feb73b71a4f..aa3c7687d226 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
@@ -6,10 +6,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index d53bbf6bef81..9a5a11399221 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
index 5a13f17e3b41..728589138072 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
index 6220672a96f4..e7607d2cb2ef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
@@ -14,5 +14,3 @@ rtl8723ae-objs := \
obj-$(CONFIG_RTL8723AE) += rtl8723ae.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
index 57111052e86b..a113780af08a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index 1186755e55b8..e5505387260b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -134,7 +134,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wating too long for FW read clear HMEBox(%d)!\n",
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
boxnum);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
index 9d1fe25db953..2e668fcfc5c2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
index bcd64a22acc0..45719fdcb067 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
index c7be9342136c..77c10047cb20 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
index a77c34102792..a841cbd55d8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
@@ -12,5 +12,3 @@ rtl8723be-objs := \
obj-$(CONFIG_RTL8723BE) += rtl8723be.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
index 345a68adcf38..73da75526e2a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
@@ -5,5 +5,3 @@ rtl8723-common-objs := \
phy_common.o
obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
index f7a26f71197e..8ca406b95f02 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
@@ -12,5 +12,3 @@ rtl8821ae-objs := \
obj-$(CONFIG_RTL8821AE) += rtl8821ae.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 32aa5c1d070a..0a508649903d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index 685273ca9561..a6d43d2ecd36 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index dbb23899ddcb..dadaa73ab49d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -194,6 +194,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
void rsi_mac80211_detach(struct rsi_hw *adapter)
{
struct ieee80211_hw *hw = adapter->hw;
+ enum nl80211_band band;
if (hw) {
ieee80211_stop_queues(hw);
@@ -201,7 +202,17 @@ void rsi_mac80211_detach(struct rsi_hw *adapter)
ieee80211_free_hw(hw);
}
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ &adapter->sbands[band];
+
+ kfree(sband->channels);
+ }
+
+#ifdef CONFIG_RSI_DEBUGFS
rsi_remove_dbgfs(adapter);
+ kfree(adapter->dfsentry);
+#endif
}
EXPORT_SYMBOL_GPL(rsi_mac80211_detach);
@@ -264,6 +275,8 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw)
common->iface_down = false;
mutex_unlock(&common->mutex);
+ rsi_send_rx_filter_frame(common, 0);
+
return 0;
}
@@ -304,7 +317,9 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
if (!adapter->sc_nvifs) {
++adapter->sc_nvifs;
adapter->vifs[0] = vif;
- ret = rsi_set_vap_capabilities(common, STA_OPMODE);
+ ret = rsi_set_vap_capabilities(common,
+ STA_OPMODE,
+ VAP_ADD);
}
break;
default:
@@ -332,8 +347,10 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
struct rsi_common *common = adapter->priv;
mutex_lock(&common->mutex);
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION) {
adapter->sc_nvifs--;
+ rsi_set_vap_capabilities(common, STA_OPMODE, VAP_DELETE);
+ }
if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif)))
adapter->vifs[0] = NULL;
@@ -373,7 +390,7 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
status = rsi_band_check(common);
if (!status)
- status = rsi_set_channel(adapter->priv, channel);
+ status = rsi_set_channel(adapter->priv, curchan);
if (bss->assoc) {
if (common->hw_data_qs_blocked &&
@@ -394,6 +411,34 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
}
/**
+ * rsi_config_power() - This function configures tx power to device
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_config_power(struct ieee80211_hw *hw)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (adapter->sc_nvifs <= 0) {
+ rsi_dbg(ERR_ZONE, "%s: No virtual interface found\n", __func__);
+ return -EINVAL;
+ }
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Set tx power: %d dBM\n", __func__, conf->power_level);
+
+ if (conf->power_level == common->tx_power)
+ return 0;
+
+ common->tx_power = conf->power_level;
+
+ return rsi_send_radio_params_update(common);
+}
+
+/**
* rsi_mac80211_config() - This function is a handler for configuration
* requests. The stack calls this function to
* change hardware configuration, e.g., channel.
@@ -414,6 +459,12 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
status = rsi_channel_change(hw);
+ /* tx power */
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ rsi_dbg(INFO_ZONE, "%s: Configuring Power\n", __func__);
+ status = rsi_config_power(hw);
+ }
+
mutex_unlock(&common->mutex);
return status;
@@ -456,11 +507,19 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
+ u16 rx_filter_word = 0;
mutex_lock(&common->mutex);
if (changed & BSS_CHANGED_ASSOC) {
rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
__func__, bss_conf->assoc);
+ if (bss_conf->assoc) {
+ /* Send the RX filter frame */
+ rx_filter_word = (ALLOW_DATA_ASSOC_PEER |
+ ALLOW_CTRL_ASSOC_PEER |
+ ALLOW_MGMT_ASSOC_PEER);
+ rsi_send_rx_filter_frame(common, rx_filter_word);
+ }
rsi_inform_bss_status(common,
bss_conf->assoc,
bss_conf->bssid,
@@ -998,6 +1057,7 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
struct rsi_common *common = adapter->priv;
mutex_lock(&common->mutex);
+
/* Resetting all the fields to default values */
common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
@@ -1007,9 +1067,114 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
common->vif_info[0].seq_start = 0;
common->secinfo.ptk_cipher = 0;
common->secinfo.gtk_cipher = 0;
+
+ rsi_send_rx_filter_frame(common, 0);
+
mutex_unlock(&common->mutex);
+
+ return 0;
+}
+/**
+ * rsi_mac80211_set_antenna() - This function is used to configure
+ * tx and rx antennas.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @tx_ant: Bitmap for tx antenna
+ * @rx_ant: Bitmap for rx antenna
+ *
+ * Return: 0 on success, Negative error code on failure.
+ */
+static int rsi_mac80211_set_antenna(struct ieee80211_hw *hw,
+ u32 tx_ant, u32 rx_ant)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ u8 antenna = 0;
+
+ if (tx_ant > 1 || rx_ant > 1) {
+ rsi_dbg(ERR_ZONE,
+ "Invalid antenna selection (tx: %d, rx:%d)\n",
+ tx_ant, rx_ant);
+ rsi_dbg(ERR_ZONE,
+ "Use 0 for int_ant, 1 for ext_ant\n");
+ return -EINVAL;
+ }
+
+ rsi_dbg(INFO_ZONE, "%s: Antenna map Tx %x Rx %d\n",
+ __func__, tx_ant, rx_ant);
+
+ mutex_lock(&common->mutex);
+
+ antenna = tx_ant ? ANTENNA_SEL_UFL : ANTENNA_SEL_INT;
+ if (common->ant_in_use != antenna)
+ if (rsi_set_antenna(common, antenna))
+ goto fail_set_antenna;
+
+ rsi_dbg(INFO_ZONE, "(%s) Antenna path configured successfully\n",
+ tx_ant ? "UFL" : "INT");
+
+ common->ant_in_use = antenna;
+
+ mutex_unlock(&common->mutex);
+
return 0;
+
+fail_set_antenna:
+ rsi_dbg(ERR_ZONE, "%s: Failed.\n", __func__);
+ mutex_unlock(&common->mutex);
+ return -EINVAL;
+}
+
+/**
+ * rsi_mac80211_get_antenna() - This function is used to configure
+ * tx and rx antennas.
+ *
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @tx_ant: Bitmap for tx antenna
+ * @rx_ant: Bitmap for rx antenna
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw,
+ u32 *tx_ant, u32 *rx_ant)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+
+ *tx_ant = (common->ant_in_use == ANTENNA_SEL_UFL) ? 1 : 0;
+ *rx_ant = 0;
+
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+static void rsi_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rsi_hw * adapter = hw->priv;
+ int i;
+
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (ch->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ if (ch->flags & IEEE80211_CHAN_RADAR)
+ ch->flags |= IEEE80211_CHAN_NO_IR;
+ }
+
+ rsi_dbg(INFO_ZONE,
+ "country = %s dfs_region = %d\n",
+ request->alpha2, request->dfs_region);
+ adapter->dfs_region = request->dfs_region;
}
static struct ieee80211_ops mac80211_ops = {
@@ -1028,6 +1193,8 @@ static struct ieee80211_ops mac80211_ops = {
.ampdu_action = rsi_mac80211_ampdu_action,
.sta_add = rsi_mac80211_sta_add,
.sta_remove = rsi_mac80211_sta_remove,
+ .set_antenna = rsi_mac80211_set_antenna,
+ .get_antenna = rsi_mac80211_get_antenna,
};
/**
@@ -1092,6 +1259,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->bands[NL80211_BAND_5GHZ] =
&adapter->sbands[NL80211_BAND_5GHZ];
+ wiphy->reg_notifier = rsi_reg_notify;
+
status = ieee80211_register_hw(hw);
if (status)
return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 35c14cc3f0d2..fac87c06357b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -617,7 +617,9 @@ static int rsi_program_bb_rf(struct rsi_common *common)
*
* Return: 0 on success, corresponding negative error code on failure.
*/
-int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
+int rsi_set_vap_capabilities(struct rsi_common *common,
+ enum opmode mode,
+ u8 vap_status)
{
struct sk_buff *skb = NULL;
struct rsi_vap_caps *vap_caps;
@@ -642,6 +644,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
FRAME_DESC_SZ) |
(RSI_WIFI_MGMT_Q << 12));
vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES);
+ vap_caps->desc_word[2] = cpu_to_le16(vap_status << 8);
vap_caps->desc_word[4] = cpu_to_le16(mode |
(common->channel_width << 8));
vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) |
@@ -910,7 +913,8 @@ int rsi_band_check(struct rsi_common *common)
*
* Return: 0 on success, corresponding error code on failure.
*/
-int rsi_set_channel(struct rsi_common *common, u16 channel)
+int rsi_set_channel(struct rsi_common *common,
+ struct ieee80211_channel *channel)
{
struct sk_buff *skb = NULL;
struct rsi_mac_frame *mgmt_frame;
@@ -925,24 +929,76 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
return -ENOMEM;
}
+ if (!channel) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
memset(skb->data, 0, FRAME_DESC_SZ);
mgmt_frame = (struct rsi_mac_frame *)skb->data;
mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
- mgmt_frame->desc_word[4] = cpu_to_le16(channel);
+ mgmt_frame->desc_word[4] = cpu_to_le16(channel->hw_value);
+
+ mgmt_frame->desc_word[4] |=
+ cpu_to_le16(((char)(channel->max_antenna_gain)) << 8);
+ mgmt_frame->desc_word[5] =
+ cpu_to_le16((char)(channel->max_antenna_gain));
mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET |
BBP_REG_WRITE |
(RSI_RF_TYPE << 4));
- mgmt_frame->desc_word[5] = cpu_to_le16(0x01);
- mgmt_frame->desc_word[6] = cpu_to_le16(0x12);
+ if (!(channel->flags & IEEE80211_CHAN_NO_IR) &&
+ !(channel->flags & IEEE80211_CHAN_RADAR)) {
+ if (common->tx_power < channel->max_power)
+ mgmt_frame->desc_word[6] = cpu_to_le16(common->tx_power);
+ else
+ mgmt_frame->desc_word[6] = cpu_to_le16(channel->max_power);
+ }
+ mgmt_frame->desc_word[7] = cpu_to_le16(common->priv->dfs_region);
if (common->channel_width == BW_40MHZ)
mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8);
- common->channel = channel;
+ common->channel = channel->hw_value;
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_send_radio_params_update() - This function sends the radio
+ * parameters update to device
+ * @common: Pointer to the driver private structure.
+ * @channel: Channel value to be set.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+int rsi_send_radio_params_update(struct rsi_common *common)
+{
+ struct rsi_mac_frame *cmd_frame;
+ struct sk_buff *skb = NULL;
+
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Sending Radio Params update frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ cmd_frame = (struct rsi_mac_frame *)skb->data;
+
+ cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ cmd_frame->desc_word[1] = cpu_to_le16(RADIO_PARAMS_UPDATE);
+ cmd_frame->desc_word[3] = cpu_to_le16(BIT(0));
+
+ cmd_frame->desc_word[3] |= cpu_to_le16(common->tx_power << 8);
skb_put(skb, FRAME_DESC_SZ);
@@ -1240,6 +1296,72 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
}
+/**
+ * rsi_send_rx_filter_frame() - Sends a frame to filter the RX packets
+ *
+ * @common: Pointer to the driver private structure.
+ * @rx_filter_word: Flags of filter packets
+ *
+ * @Return: 0 on success, -1 on failure.
+ */
+int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word)
+{
+ struct rsi_mac_frame *cmd_frame;
+ struct sk_buff *skb;
+
+ rsi_dbg(MGMT_TX_ZONE, "Sending RX filter frame\n");
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ cmd_frame = (struct rsi_mac_frame *)skb->data;
+
+ cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ cmd_frame->desc_word[1] = cpu_to_le16(SET_RX_FILTER);
+ cmd_frame->desc_word[4] = cpu_to_le16(rx_filter_word);
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_set_antenna() - This fuction send antenna configuration request
+ * to device
+ *
+ * @common: Pointer to the driver private structure.
+ * @antenna: bitmap for tx antenna selection
+ *
+ * Return: 0 on Success, negative error code on failure
+ */
+int rsi_set_antenna(struct rsi_common *common, u8 antenna)
+{
+ struct rsi_mac_frame *cmd_frame;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ cmd_frame = (struct rsi_mac_frame *)skb->data;
+
+ cmd_frame->desc_word[1] = cpu_to_le16(ANT_SEL_FRAME);
+ cmd_frame->desc_word[3] = cpu_to_le16(antenna & 0x00ff);
+ cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
/**
* rsi_handle_ta_confirm_type() - This function handles the confirm frames.
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index dcd095787166..1d5904bc2c74 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -204,6 +204,9 @@ struct rsi_common {
struct cqm_info cqm_info;
bool hw_data_qs_blocked;
+
+ int tx_power;
+ u8 ant_in_use;
};
struct rsi_hw {
@@ -220,6 +223,7 @@ struct rsi_hw {
struct rsi_debugfs *dfsentry;
u8 num_debugfs_entries;
#endif
+ u8 dfs_region;
void *rsi_dev;
int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 3741173fd3ac..dfbf7a50269b 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -140,11 +140,30 @@
#define RSI_SUPP_FILTERS (FIF_ALLMULTI | FIF_PROBE_REQ |\
FIF_BCN_PRBRESP_PROMISC)
+
+#define ANTENNA_SEL_INT 0x02 /* RF_OUT_2 / Integerated */
+#define ANTENNA_SEL_UFL 0x03 /* RF_OUT_1 / U.FL */
+
+/* Rx filter word definitions */
+#define PROMISCOUS_MODE BIT(0)
+#define ALLOW_DATA_ASSOC_PEER BIT(1)
+#define ALLOW_MGMT_ASSOC_PEER BIT(2)
+#define ALLOW_CTRL_ASSOC_PEER BIT(3)
+#define DISALLOW_BEACONS BIT(4)
+#define ALLOW_CONN_PEER_MGMT_WHILE_BUF_FULL BIT(5)
+#define DISALLOW_BROADCAST_DATA BIT(6)
+
enum opmode {
STA_OPMODE = 1,
AP_OPMODE = 2
};
+enum vap_status {
+ VAP_ADD = 1,
+ VAP_DELETE = 2,
+ VAP_UPDATE = 3
+};
+
extern struct ieee80211_rate rsi_rates[12];
extern const u16 rsi_mcsrates[8];
@@ -184,7 +203,9 @@ enum cmd_frame_type {
BG_SCAN_PARAMS,
BG_SCAN_PROBE_REQ,
CW_MODE_REQ,
- PER_CMD_PKT
+ PER_CMD_PKT,
+ ANT_SEL_FRAME = 0x20,
+ RADIO_PARAMS_UPDATE = 0x29
};
struct rsi_mac_frame {
@@ -287,12 +308,14 @@ static inline u8 rsi_get_channel(u8 *addr)
}
int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg);
-int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode);
+int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode,
+ u8 vap_status);
int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
u16 ssn, u8 buf_size, u8 event);
int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
u8 key_type, u8 key_id, u32 cipher);
-int rsi_set_channel(struct rsi_common *common, u16 chno);
+int rsi_set_channel(struct rsi_common *common,
+ struct ieee80211_channel *channel);
int rsi_send_block_unblock_frame(struct rsi_common *common, bool event);
void rsi_inform_bss_status(struct rsi_common *common, u8 status,
const u8 *bssid, u8 qos_enable, u16 aid);
@@ -306,4 +329,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
int rsi_band_check(struct rsi_common *common);
+int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word);
+int rsi_send_radio_params_update(struct rsi_common *common);
+int rsi_set_antenna(struct rsi_common *common, u8 antenna);
#endif
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 680d60eabc75..be4c22e0d902 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -379,7 +379,6 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
{
int ret;
int count;
- int i;
count = WSM_GET32(buf);
if (WARN_ON(count <= 0))
@@ -395,11 +394,10 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
}
cw1200_debug_txed_multi(priv, count);
- for (i = 0; i < count; ++i) {
+ do {
ret = wsm_tx_confirm(priv, buf, link_id);
- if (ret)
- return ret;
- }
+ } while (!ret && --count);
+
return ret;
underflow:
@@ -1807,16 +1805,18 @@ static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
{
size_t pos = buf->data - buf->begin;
size_t size = pos + extra_size;
+ u8 *tmp;
size = round_up(size, FWLOAD_BLOCK_SIZE);
- buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
- if (buf->begin) {
- buf->data = &buf->begin[pos];
- buf->end = &buf->begin[size];
- return 0;
- } else {
- buf->end = buf->data = buf->begin;
+ tmp = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
+ if (!tmp) {
+ wsm_buf_deinit(buf);
return -ENOMEM;
}
+
+ buf->begin = tmp;
+ buf->data = &buf->begin[pos];
+ buf->end = &buf->begin[size];
+ return 0;
}
diff --git a/drivers/net/wireless/ti/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile
index a5c6328b5f72..58b4f935a3f6 100644
--- a/drivers/net/wireless/ti/wl1251/Makefile
+++ b/drivers/net/wireless/ti/wl1251/Makefile
@@ -6,5 +6,3 @@ wl1251_sdio-objs += sdio.o
obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index b36ce185c9f2..86fa0fc69084 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -218,5 +218,33 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
if (vector & FW_LOGGER_INDICATION)
wlcore_event_fw_logger(wl);
+ if (vector & RX_BA_WIN_SIZE_CHANGE_EVENT_ID) {
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ u8 link_id = mbox->rx_ba_link_id;
+ u8 win_size = mbox->rx_ba_win_size;
+ const u8 *addr;
+
+ wlvif = wl->links[link_id].wlvif;
+ vif = wl12xx_wlvif_to_vif(wlvif);
+
+ /* Update RX aggregation window size and call
+ * MAC routine to stop active RX aggregations for this link
+ */
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
+ addr = vif->bss_conf.bssid;
+ else
+ addr = wl->links[link_id].addr;
+
+ sta = ieee80211_find_sta(vif, addr);
+ if (sta) {
+ sta->max_rx_aggregation_subframes = win_size;
+ ieee80211_stop_rx_ba_session(vif,
+ wl->links[link_id].ba_bitmap,
+ addr);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index ce8ea9c04052..4af297fbb529 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -38,6 +38,7 @@ enum {
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18),
DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19),
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20),
+ RX_BA_WIN_SIZE_CHANGE_EVENT_ID = BIT(21),
SMART_CONFIG_SYNC_EVENT_ID = BIT(22),
SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
TIME_SYNC_EVENT_ID = BIT(24),
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 06d6943b257c..5bdf7a03e3dd 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1041,7 +1041,8 @@ static int wl18xx_boot(struct wl1271 *wl)
SMART_CONFIG_SYNC_EVENT_ID |
SMART_CONFIG_DECODE_EVENT_ID |
TIME_SYNC_EVENT_ID |
- FW_LOGGER_INDICATION;
+ FW_LOGGER_INDICATION |
+ RX_BA_WIN_SIZE_CHANGE_EVENT_ID;
wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index 0a69c1373643..e286713b3c18 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -8,5 +8,3 @@ wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WLCORE) += wlcore.o
obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o
obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 26cc23f32241..a4859993db3c 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1419,7 +1419,8 @@ out:
/* setup BA session receiver setting in the FW. */
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
- u16 ssn, bool enable, u8 peer_hlid)
+ u16 ssn, bool enable, u8 peer_hlid,
+ u8 win_size)
{
struct wl1271_acx_ba_receiver_setup *acx;
int ret;
@@ -1435,7 +1436,7 @@ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
acx->hlid = peer_hlid;
acx->tid = tid_index;
acx->enable = enable;
- acx->win_size = wl->conf.ht.rx_ba_win_size;
+ acx->win_size = win_size;
acx->ssn = ssn;
ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 6321ed472891..f46d7fdf9a00 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1113,7 +1113,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
- u16 ssn, bool enable, u8 peer_hlid);
+ u16 ssn, bool enable, u8 peer_hlid,
+ u8 win_size);
int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u64 *mactime);
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 471521a0db7b..e536aa01b937 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5285,7 +5285,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
- hlid);
+ hlid,
+ params->buf_size);
+
if (!ret) {
*ba_bitmap |= BIT(tid);
wl->ba_rx_session_count++;
@@ -5306,7 +5308,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
- hlid);
+ hlid, 0);
if (!ret) {
*ba_bitmap &= ~BIT(tid);
wl->ba_rx_session_count--;
@@ -6086,6 +6088,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(wl->hw, SIGNAL_DBM);
ieee80211_hw_set(wl->hw, SUPPORTS_PS);
+ ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -6120,6 +6123,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
+
/* make sure all our channels fit in the scanned_ch bitmask */
BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
ARRAY_SIZE(wl1271_channels_5ghz) >
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 932f3f81e8cf..d9d29ab88184 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1853,7 +1853,6 @@ static const struct net_device_ops wl3501_netdev_ops = {
.ndo_stop = wl3501_close,
.ndo_start_xmit = wl3501_hard_start_xmit,
.ndo_tx_timeout = wl3501_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index dea049b2556f..de7ff395977a 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -1724,7 +1724,6 @@ static const struct net_device_ops zd1201_netdev_ops = {
.ndo_tx_timeout = zd1201_tx_timeout,
.ndo_set_rx_mode = zd1201_set_multicast,
.ndo_set_mac_address = zd1201_set_mac_address,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 74dc2bf71428..e30ffd29b7e9 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -302,7 +302,7 @@ static int xenvif_close(struct net_device *dev)
static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
struct xenvif *vif = netdev_priv(dev);
- int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
+ int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
@@ -471,6 +471,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
+
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 8674e188b697..3124eaec9427 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -785,12 +785,9 @@ static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
struct xenvif *vif = container_of(watch, struct xenvif,
mcast_ctrl_watch);
struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
- int val;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "request-multicast-control", "%d", &val) < 0)
- val = 0;
- vif->multicast_control = !!val;
+ vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
+ "request-multicast-control", 0);
}
static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
@@ -889,16 +886,16 @@ static int connect_ctrl_ring(struct backend_info *be)
unsigned int evtchn;
int err;
- err = xenbus_gather(XBT_NIL, dev->otherend,
- "ctrl-ring-ref", "%u", &val, NULL);
- if (err)
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "ctrl-ring-ref", "%u", &val);
+ if (err < 0)
goto done; /* The frontend does not have a control ring */
ring_ref = val;
- err = xenbus_gather(XBT_NIL, dev->otherend,
- "event-channel-ctrl", "%u", &val, NULL);
- if (err) {
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "event-channel-ctrl", "%u", &val);
+ if (err < 0) {
xenbus_dev_fatal(dev, err,
"reading %s/event-channel-ctrl",
dev->otherend);
@@ -934,14 +931,11 @@ static void connect(struct backend_info *be)
/* Check whether the frontend requested multiple queues
* and read the number requested.
*/
- err = xenbus_scanf(XBT_NIL, dev->otherend,
- "multi-queue-num-queues",
- "%u", &requested_num_queues);
- if (err < 0) {
- requested_num_queues = 1; /* Fall back to single queue */
- } else if (requested_num_queues > xenvif_max_queues) {
+ requested_num_queues = xenbus_read_unsigned(dev->otherend,
+ "multi-queue-num-queues", 1);
+ if (requested_num_queues > xenvif_max_queues) {
/* buggy or malicious guest */
- xenbus_dev_fatal(dev, err,
+ xenbus_dev_fatal(dev, -EINVAL,
"guest requested %u queues, exceeding the maximum of %u.",
requested_num_queues, xenvif_max_queues);
return;
@@ -1134,7 +1128,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev;
unsigned int rx_copy;
- int err, val;
+ int err;
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
&rx_copy);
@@ -1150,10 +1144,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
if (!rx_copy)
return -EOPNOTSUPP;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-rx-notify", "%d", &val) < 0)
- val = 0;
- if (!val) {
+ if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
/* - Reduce drain timeout to poll more frequently for
* Rx requests.
* - Disable Rx stall detection.
@@ -1162,34 +1153,21 @@ static int read_xenbus_vif_flags(struct backend_info *be)
be->vif->stall_timeout = 0;
}
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
- "%d", &val) < 0)
- val = 0;
- vif->can_sg = !!val;
+ vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
vif->gso_mask = 0;
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
- "%d", &val) < 0)
- val = 0;
- if (val)
+ if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
vif->gso_mask |= GSO_BIT(TCPV4);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
- "%d", &val) < 0)
- val = 0;
- if (val)
+ if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
vif->gso_mask |= GSO_BIT(TCPV6);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
- "%d", &val) < 0)
- val = 0;
- vif->ip_csum = !val;
+ vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
+ "feature-no-csum-offload", 0);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
- "%d", &val) < 0)
- val = 0;
- vif->ipv6_csum = !!val;
+ vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
+ "feature-ipv6-csum-offload", 0);
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index bf2744e1e3db..a479cd99911d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1169,43 +1169,23 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct netfront_info *np = netdev_priv(dev);
- int val;
- if (features & NETIF_F_SG) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
- "%d", &val) < 0)
- val = 0;
+ if (features & NETIF_F_SG &&
+ !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
+ features &= ~NETIF_F_SG;
- if (!val)
- features &= ~NETIF_F_SG;
- }
-
- if (features & NETIF_F_IPV6_CSUM) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-ipv6-csum-offload", "%d", &val) < 0)
- val = 0;
-
- if (!val)
- features &= ~NETIF_F_IPV6_CSUM;
- }
-
- if (features & NETIF_F_TSO) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-gso-tcpv4", "%d", &val) < 0)
- val = 0;
+ if (features & NETIF_F_IPV6_CSUM &&
+ !xenbus_read_unsigned(np->xbdev->otherend,
+ "feature-ipv6-csum-offload", 0))
+ features &= ~NETIF_F_IPV6_CSUM;
- if (!val)
- features &= ~NETIF_F_TSO;
- }
+ if (features & NETIF_F_TSO &&
+ !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
+ features &= ~NETIF_F_TSO;
- if (features & NETIF_F_TSO6) {
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-gso-tcpv6", "%d", &val) < 0)
- val = 0;
-
- if (!val)
- features &= ~NETIF_F_TSO6;
- }
+ if (features & NETIF_F_TSO6 &&
+ !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
+ features &= ~NETIF_F_TSO6;
return features;
}
@@ -1329,6 +1309,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netdev->features |= netdev->hw_features;
netdev->ethtool_ops = &xennet_ethtool_ops;
+ netdev->min_mtu = 0;
+ netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
SET_NETDEV_DEV(netdev, &dev->dev);
np->netdev = netdev;
@@ -1821,18 +1803,13 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
/* Check if backend supports multiple queues */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "multi-queue-max-queues", "%u", &max_queues);
- if (err < 0)
- max_queues = 1;
+ max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+ "multi-queue-max-queues", 1);
num_queues = min(max_queues, xennet_max_queues);
/* Check feature-split-event-channels */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-split-event-channels", "%u",
- &feature_split_evtchn);
- if (err < 0)
- feature_split_evtchn = 0;
+ feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-split-event-channels", 0);
/* Read mac addr. */
err = xen_net_read_mac(dev, info->netdev->dev_addr);
@@ -1966,16 +1943,10 @@ static int xennet_connect(struct net_device *dev)
struct netfront_info *np = netdev_priv(dev);
unsigned int num_queues = 0;
int err;
- unsigned int feature_rx_copy;
unsigned int j = 0;
struct netfront_queue *queue = NULL;
- err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-rx-copy", "%u", &feature_rx_copy);
- if (err != 1)
- feature_rx_copy = 0;
-
- if (!feature_rx_copy) {
+ if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
dev_info(&dev->dev,
"backend does not support copying receive path\n");
return -ENODEV;
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 6f9563a96488..8a04c5e02999 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -297,35 +297,34 @@ static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length)
}
-static void nfc_mei_event_cb(struct mei_cl_device *cldev, u32 events,
- void *context)
+static void nfc_mei_rx_cb(struct mei_cl_device *cldev)
{
- struct nfc_mei_phy *phy = context;
+ struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
+ struct sk_buff *skb;
+ int reply_size;
- if (phy->hard_fault != 0)
+ if (!phy)
return;
- if (events & BIT(MEI_CL_EVENT_RX)) {
- struct sk_buff *skb;
- int reply_size;
+ if (phy->hard_fault != 0)
+ return;
- skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
- if (!skb)
- return;
+ skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
+ if (!skb)
+ return;
- reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
- if (reply_size < MEI_NFC_HEADER_SIZE) {
- kfree_skb(skb);
- return;
- }
+ reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
+ if (reply_size < MEI_NFC_HEADER_SIZE) {
+ kfree_skb(skb);
+ return;
+ }
- skb_put(skb, reply_size);
- skb_pull(skb, MEI_NFC_HEADER_SIZE);
+ skb_put(skb, reply_size);
+ skb_pull(skb, MEI_NFC_HEADER_SIZE);
- MEI_DUMP_SKB_IN("mei frame read", skb);
+ MEI_DUMP_SKB_IN("mei frame read", skb);
- nfc_hci_recv_frame(phy->hdev, skb);
- }
+ nfc_hci_recv_frame(phy->hdev, skb);
}
static int nfc_mei_phy_enable(void *phy_id)
@@ -356,8 +355,7 @@ static int nfc_mei_phy_enable(void *phy_id)
goto err;
}
- r = mei_cldev_register_event_cb(phy->cldev, BIT(MEI_CL_EVENT_RX),
- nfc_mei_event_cb, phy);
+ r = mei_cldev_register_rx_cb(phy->cldev, nfc_mei_rx_cb);
if (r) {
pr_err("Event cb registration failed %d\n", r);
goto err;
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 3092501f26c4..eb5eddf1794e 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -82,28 +82,7 @@ static struct mei_cl_driver microread_driver = {
.remove = microread_mei_remove,
};
-static int microread_mei_init(void)
-{
- int r;
-
- pr_debug(DRIVER_DESC ": %s\n", __func__);
-
- r = mei_cldev_driver_register(&microread_driver);
- if (r) {
- pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
- return r;
- }
-
- return 0;
-}
-
-static void microread_mei_exit(void)
-{
- mei_cldev_driver_unregister(&microread_driver);
-}
-
-module_init(microread_mei_init);
-module_exit(microread_mei_exit);
+module_mei_cl_driver(microread_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 46d0eb24eef9..ad57a8ec00d6 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -82,28 +82,7 @@ static struct mei_cl_driver pn544_driver = {
.remove = pn544_mei_remove,
};
-static int pn544_mei_init(void)
-{
- int r;
-
- pr_debug(DRIVER_DESC ": %s\n", __func__);
-
- r = mei_cldev_driver_register(&pn544_driver);
- if (r) {
- pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
- return r;
- }
-
- return 0;
-}
-
-static void pn544_mei_exit(void)
-{
- mei_cldev_driver_unregister(&pn544_driver);
-}
-
-module_init(pn544_mei_init);
-module_exit(pn544_mei_exit);
+module_mei_cl_driver(pn544_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 6ccba0d862df..019a158e1128 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -138,11 +138,11 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
base_addr = pci_resource_start(ndev->ntb.pdev, bar);
if (bar != 1) {
- xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 3);
- limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 3);
+ xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
+ limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
/* Set the limit if supported */
- limit = base_addr + size;
+ limit = size;
/* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg);
@@ -164,14 +164,8 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
xlat_reg = AMD_BAR1XLAT_OFFSET;
limit_reg = AMD_BAR1LMT_OFFSET;
- /* split bar addr range must all be 32 bit */
- if (addr & (~0ull << 32))
- return -EINVAL;
- if ((addr + size) & (~0ull << 32))
- return -EINVAL;
-
/* Set the limit if supported */
- limit = base_addr + size;
+ limit = size;
/* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg);
@@ -199,6 +193,11 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev)
if (!ndev->peer_sta)
return NTB_LNK_STA_ACTIVE(ndev->cntl_sta);
+ if (ndev->peer_sta & AMD_LINK_UP_EVENT) {
+ ndev->peer_sta = 0;
+ return 1;
+ }
+
/* If peer_sta is reset or D0 event, the ISR has
* started a timer to check link status of hardware.
* So here just clear status bit. And if peer_sta is
@@ -207,7 +206,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev)
*/
if (ndev->peer_sta & AMD_PEER_RESET_EVENT)
ndev->peer_sta &= ~AMD_PEER_RESET_EVENT;
- else if (ndev->peer_sta & AMD_PEER_D0_EVENT)
+ else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT))
ndev->peer_sta = 0;
return 0;
@@ -491,6 +490,8 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
break;
case AMD_PEER_D3_EVENT:
case AMD_PEER_PMETO_EVENT:
+ case AMD_LINK_UP_EVENT:
+ case AMD_LINK_DOWN_EVENT:
amd_ack_smu(ndev, status);
/* link down */
@@ -598,7 +599,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
err_msix_request:
while (i-- > 0)
- free_irq(ndev->msix[i].vector, ndev);
+ free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
err_msix_enable:
kfree(ndev->msix);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h
index 2eac3cd3e646..13d73ed94a52 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.h
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -148,9 +148,12 @@ enum {
AMD_PEER_D3_EVENT = BIT(2),
AMD_PEER_PMETO_EVENT = BIT(3),
AMD_PEER_D0_EVENT = BIT(4),
+ AMD_LINK_UP_EVENT = BIT(5),
+ AMD_LINK_DOWN_EVENT = BIT(6),
AMD_EVENT_INTMASK = (AMD_PEER_FLUSH_EVENT |
AMD_PEER_RESET_EVENT | AMD_PEER_D3_EVENT |
- AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT),
+ AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT |
+ AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT),
AMD_PMESTAT_OFFSET = 0x480,
AMD_PMSGTRIG_OFFSET = 0x490,
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 7310a261c858..eca9688bf9d9 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -86,7 +86,12 @@ static const struct intel_ntb_xlat_reg xeon_pri_xlat;
static const struct intel_ntb_xlat_reg xeon_sec_xlat;
static struct intel_b2b_addr xeon_b2b_usd_addr;
static struct intel_b2b_addr xeon_b2b_dsd_addr;
+static const struct intel_ntb_reg skx_reg;
+static const struct intel_ntb_alt_reg skx_pri_reg;
+static const struct intel_ntb_alt_reg skx_b2b_reg;
+static const struct intel_ntb_xlat_reg skx_sec_xlat;
static const struct ntb_dev_ops intel_ntb_ops;
+static const struct ntb_dev_ops intel_ntb3_ops;
static const struct file_operations intel_ntb_debugfs_info;
static struct dentry *debugfs_dir;
@@ -145,6 +150,9 @@ module_param_named(xeon_b2b_dsd_bar5_addr32,
MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
"XEON B2B DSD split-BAR 5 32-bit address");
+static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
+static int xeon_init_isr(struct intel_ntb_dev *ndev);
+
#ifndef ioread64
#ifdef readq
#define ioread64 readq
@@ -206,6 +214,14 @@ static inline int pdev_is_xeon(struct pci_dev *pdev)
return 0;
}
+static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
+{
+ if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
+ return 1;
+
+ return 0;
+}
+
static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
{
ndev->unsafe_flags = 0;
@@ -390,6 +406,9 @@ static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
vec_mask = ndev_vec_mask(ndev, vec);
+ if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
+ vec_mask |= ndev->db_link_mask;
+
dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
ndev->last_ts = jiffies;
@@ -409,6 +428,9 @@ static irqreturn_t ndev_vec_isr(int irq, void *dev)
{
struct intel_ntb_vec *nvec = dev;
+ dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n",
+ irq, nvec->num);
+
return ndev_interrupt(nvec->ndev, nvec->num);
}
@@ -465,14 +487,14 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
goto err_msix_request;
}
- dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
+ dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count);
ndev->db_vec_count = msix_count;
ndev->db_vec_shift = msix_shift;
return 0;
err_msix_request:
while (i-- > 0)
- free_irq(ndev->msix[i].vector, ndev);
+ free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
err_msix_enable:
kfree(ndev->msix);
@@ -547,8 +569,171 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
}
}
-static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *offp)
+static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct intel_ntb_dev *ndev;
+ void __iomem *mmio;
+ char *buf;
+ size_t buf_size;
+ ssize_t ret, off;
+ union { u64 v64; u32 v32; u16 v16; } u;
+
+ ndev = filp->private_data;
+ mmio = ndev->self_mmio;
+
+ buf_size = min(count, 0x800ul);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ off = 0;
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB Device Information:\n");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Connection Topology -\t%s\n",
+ ntb_topo_string(ndev->ntb.topo));
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+ off += scnprintf(buf + off, buf_size - off,
+ "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
+
+ if (!ndev->reg->link_is_up(ndev))
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tDown\n");
+ else {
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tUp\n");
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Speed -\t\tPCI-E Gen %u\n",
+ NTB_LNK_STA_SPEED(ndev->lnk_sta));
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Width -\t\tx%u\n",
+ NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Memory Window Count -\t%u\n", ndev->mw_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Scratchpad Count -\t%u\n", ndev->spad_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Count -\t%u\n", ndev->db_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Bell -\t\t%#llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Incoming XLAT:\n");
+
+ u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
+
+ if (ntb_topo_is_b2b(ndev->ntb.topo)) {
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Outgoing B2B XLAT:\n");
+
+ u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Secondary BAR:\n");
+
+ u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR0 -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR1 -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR2 -\t\t%#018llx\n", u.v64);
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Statistics:\n");
+
+ u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "Upstream Memory Miss -\t%u\n", u.v16);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Hardware Errors:\n");
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ SKX_DEVSTS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "DEVSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ SKX_LINK_STATUS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "LNKSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ SKX_UNCERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ SKX_CORERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "CORERRSTS -\t\t%#06x\n", u.v32);
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev;
struct pci_dev *pdev;
@@ -813,6 +998,20 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ret;
}
+static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct intel_ntb_dev *ndev = filp->private_data;
+
+ if (pdev_is_xeon(ndev->ntb.pdev) ||
+ pdev_is_atom(ndev->ntb.pdev))
+ return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
+ else if (pdev_is_skx_xeon(ndev->ntb.pdev))
+ return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
+
+ return -ENXIO;
+}
+
static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
{
if (!debugfs_dir) {
@@ -1428,6 +1627,383 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev)
atom_deinit_isr(ndev);
}
+/* Skylake Xeon NTB */
+
+static u64 skx_db_ioread(void __iomem *mmio)
+{
+ return ioread64(mmio);
+}
+
+static void skx_db_iowrite(u64 bits, void __iomem *mmio)
+{
+ iowrite64(bits, mmio);
+}
+
+static int skx_init_isr(struct intel_ntb_dev *ndev)
+{
+ int i;
+
+ /*
+ * The MSIX vectors and the interrupt status bits are not lined up
+ * on Skylake. By default the link status bit is bit 32, however it
+ * is by default MSIX vector0. We need to fixup to line them up.
+ * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
+ */
+
+ for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
+ iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
+
+ /* move link status down one as workaround */
+ if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
+ iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
+ ndev->self_mmio + SKX_INTVEC_OFFSET +
+ (SKX_DB_MSIX_VECTOR_COUNT - 1));
+ }
+
+ return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
+ SKX_DB_MSIX_VECTOR_COUNT,
+ SKX_DB_MSIX_VECTOR_SHIFT,
+ SKX_DB_TOTAL_SHIFT);
+}
+
+static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
+ const struct intel_b2b_addr *addr,
+ const struct intel_b2b_addr *peer_addr)
+{
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ resource_size_t bar_size;
+ phys_addr_t bar_addr;
+ int b2b_bar;
+ u8 bar_sz;
+
+ pdev = ndev_pdev(ndev);
+ mmio = ndev->self_mmio;
+
+ if (ndev->b2b_idx == UINT_MAX) {
+ dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
+ b2b_bar = 0;
+ ndev->b2b_off = 0;
+ } else {
+ b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
+ if (b2b_bar < 0)
+ return -EIO;
+
+ dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
+
+ dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
+
+ if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
+ dev_dbg(ndev_dev(ndev),
+ "b2b using first half of bar\n");
+ ndev->b2b_off = bar_size >> 1;
+ } else if (bar_size >= XEON_B2B_MIN_SIZE) {
+ dev_dbg(ndev_dev(ndev),
+ "b2b using whole bar\n");
+ ndev->b2b_off = 0;
+ --ndev->mw_count;
+ } else {
+ dev_dbg(ndev_dev(ndev),
+ "b2b bar size is too small\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * Reset the secondary bar sizes to match the primary bar sizes,
+ * except disable or halve the size of the b2b secondary bar.
+ */
+ pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz);
+ if (b2b_bar == 1) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+
+ pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz);
+
+ pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz);
+ if (b2b_bar == 2) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+
+ pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz);
+
+ /* SBAR01 hit by first part of the b2b bar */
+ if (b2b_bar == 0)
+ bar_addr = addr->bar0_addr;
+ else if (b2b_bar == 1)
+ bar_addr = addr->bar2_addr64;
+ else if (b2b_bar == 2)
+ bar_addr = addr->bar4_addr64;
+ else
+ return -EIO;
+
+ /* setup incoming bar limits == base addrs (zero length windows) */
+ bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
+ bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr);
+
+ bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
+ bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr);
+
+ /* zero incoming translation addrs */
+ iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
+ iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
+
+ ndev->peer_mmio = ndev->self_mmio;
+
+ return 0;
+}
+
+static int skx_init_ntb(struct intel_ntb_dev *ndev)
+{
+ int rc;
+
+
+ ndev->mw_count = XEON_MW_COUNT;
+ ndev->spad_count = SKX_SPAD_COUNT;
+ ndev->db_count = SKX_DB_COUNT;
+ ndev->db_link_mask = SKX_DB_LINK_BIT;
+
+ /* DB fixup for using 31 right now */
+ if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
+ ndev->db_link_mask |= BIT_ULL(31);
+
+ switch (ndev->ntb.topo) {
+ case NTB_TOPO_B2B_USD:
+ case NTB_TOPO_B2B_DSD:
+ ndev->self_reg = &skx_pri_reg;
+ ndev->peer_reg = &skx_b2b_reg;
+ ndev->xlat_reg = &skx_sec_xlat;
+
+ if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
+ rc = skx_setup_b2b_mw(ndev,
+ &xeon_b2b_dsd_addr,
+ &xeon_b2b_usd_addr);
+ } else {
+ rc = skx_setup_b2b_mw(ndev,
+ &xeon_b2b_usd_addr,
+ &xeon_b2b_dsd_addr);
+ }
+
+ if (rc)
+ return rc;
+
+ /* Enable Bus Master and Memory Space on the secondary side */
+ iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+ ndev->self_mmio + SKX_SPCICMD_OFFSET);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+ ndev->reg->db_iowrite(ndev->db_valid_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+
+ return 0;
+}
+
+static int skx_init_dev(struct intel_ntb_dev *ndev)
+{
+ struct pci_dev *pdev;
+ u8 ppd;
+ int rc;
+
+ pdev = ndev_pdev(ndev);
+
+ ndev->reg = &skx_reg;
+
+ rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
+ if (rc)
+ return -EIO;
+
+ ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
+ dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
+ ntb_topo_string(ndev->ntb.topo));
+ if (ndev->ntb.topo == NTB_TOPO_NONE)
+ return -EINVAL;
+
+ if (pdev_is_skx_xeon(pdev))
+ ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
+
+ rc = skx_init_ntb(ndev);
+ if (rc)
+ return rc;
+
+ return skx_init_isr(ndev);
+}
+
+static int intel_ntb3_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_ctl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ dev_dbg(ndev_dev(ndev),
+ "Enabling link with max_speed %d max_width %d\n",
+ max_speed, max_width);
+
+ if (max_speed != NTB_SPEED_AUTO)
+ dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
+ if (max_width != NTB_WIDTH_AUTO)
+ dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
+
+ ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+ ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
+ ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
+ ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
+ iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ return 0;
+}
+static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ unsigned long xlat_reg, limit_reg;
+ resource_size_t bar_size, mw_size;
+ void __iomem *mmio;
+ u64 base, limit, reg_val;
+ int bar;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ /* hardware requires that addr is aligned to bar size */
+ if (addr & (bar_size - 1))
+ return -EINVAL;
+
+ /* make sure the range fits in the usable mw size */
+ if (size > mw_size)
+ return -EINVAL;
+
+ mmio = ndev->self_mmio;
+ xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
+ limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
+ base = pci_resource_start(ndev->ntb.pdev, bar);
+
+ /* Set the limit if supported, if size is not mw_size */
+ if (limit_reg && size != mw_size)
+ limit = base + size;
+ else
+ limit = base + mw_size;
+
+ /* set and verify setting the translation address */
+ iowrite64(addr, mmio + xlat_reg);
+ reg_val = ioread64(mmio + xlat_reg);
+ if (reg_val != addr) {
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
+
+ /* set and verify setting the limit */
+ iowrite64(limit, mmio + limit_reg);
+ reg_val = ioread64(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
+
+ /* setup the EP */
+ limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
+ base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
+ base &= ~0xf;
+
+ if (limit_reg && size != mw_size)
+ limit = base + size;
+ else
+ limit = base + mw_size;
+
+ /* set and verify setting the limit */
+ iowrite64(limit, mmio + limit_reg);
+ reg_val = ioread64(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
+
+ return 0;
+}
+
+static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ int bit;
+
+ if (db_bits & ~ndev->db_valid_mask)
+ return -EINVAL;
+
+ while (db_bits) {
+ bit = __ffs(db_bits);
+ iowrite32(1, ndev->peer_mmio +
+ ndev->peer_reg->db_bell + (bit * 4));
+ db_bits &= db_bits - 1;
+ }
+
+ return 0;
+}
+
+static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_read(ndev,
+ ndev->self_mmio +
+ ndev->self_reg->db_clear);
+}
+
+static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_write(ndev, db_bits,
+ ndev->self_mmio +
+ ndev->self_reg->db_clear);
+}
+
/* XEON */
static u64 xeon_db_ioread(void __iomem *mmio)
@@ -2120,6 +2696,24 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
if (rc)
goto err_init_dev;
+ } else if (pdev_is_skx_xeon(pdev)) {
+ ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto err_ndev;
+ }
+
+ ndev_init_struct(ndev, pdev);
+ ndev->ntb.ops = &intel_ntb3_ops;
+
+ rc = intel_ntb_init_pci(ndev, pdev);
+ if (rc)
+ goto err_init_pci;
+
+ rc = skx_init_dev(ndev);
+ if (rc)
+ goto err_init_dev;
+
} else {
rc = -EINVAL;
goto err_ndev;
@@ -2143,7 +2737,7 @@ err_register:
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
- else if (pdev_is_xeon(pdev))
+ else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -2161,7 +2755,7 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
- else if (pdev_is_xeon(pdev))
+ else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2257,6 +2851,36 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = {
.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
};
+static const struct intel_ntb_reg skx_reg = {
+ .poll_link = xeon_poll_link,
+ .link_is_up = xeon_link_is_up,
+ .db_ioread = skx_db_ioread,
+ .db_iowrite = skx_db_iowrite,
+ .db_size = sizeof(u64),
+ .ntb_ctl = SKX_NTBCNTL_OFFSET,
+ .mw_bar = {2, 4},
+};
+
+static const struct intel_ntb_alt_reg skx_pri_reg = {
+ .db_bell = SKX_EM_DOORBELL_OFFSET,
+ .db_clear = SKX_IM_INT_STATUS_OFFSET,
+ .db_mask = SKX_IM_INT_DISABLE_OFFSET,
+ .spad = SKX_IM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg skx_b2b_reg = {
+ .db_bell = SKX_IM_DOORBELL_OFFSET,
+ .db_clear = SKX_EM_INT_STATUS_OFFSET,
+ .db_mask = SKX_EM_INT_DISABLE_OFFSET,
+ .spad = SKX_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg skx_sec_xlat = {
+/* .bar0_base = SKX_EMBAR0_OFFSET, */
+ .bar2_limit = SKX_IMBAR1XLMT_OFFSET,
+ .bar2_xlat = SKX_IMBAR1XBASE_OFFSET,
+};
+
/* operations for primary side of local ntb */
static const struct ntb_dev_ops intel_ntb_ops = {
.mw_count = intel_ntb_mw_count,
@@ -2284,6 +2908,31 @@ static const struct ntb_dev_ops intel_ntb_ops = {
.peer_spad_write = intel_ntb_peer_spad_write,
};
+static const struct ntb_dev_ops intel_ntb3_ops = {
+ .mw_count = intel_ntb_mw_count,
+ .mw_get_range = intel_ntb_mw_get_range,
+ .mw_set_trans = intel_ntb3_mw_set_trans,
+ .link_is_up = intel_ntb_link_is_up,
+ .link_enable = intel_ntb3_link_enable,
+ .link_disable = intel_ntb_link_disable,
+ .db_valid_mask = intel_ntb_db_valid_mask,
+ .db_vector_count = intel_ntb_db_vector_count,
+ .db_vector_mask = intel_ntb_db_vector_mask,
+ .db_read = intel_ntb3_db_read,
+ .db_clear = intel_ntb3_db_clear,
+ .db_set_mask = intel_ntb_db_set_mask,
+ .db_clear_mask = intel_ntb_db_clear_mask,
+ .peer_db_addr = intel_ntb_peer_db_addr,
+ .peer_db_set = intel_ntb3_peer_db_set,
+ .spad_is_unsafe = intel_ntb_spad_is_unsafe,
+ .spad_count = intel_ntb_spad_count,
+ .spad_read = intel_ntb_spad_read,
+ .spad_write = intel_ntb_spad_write,
+ .peer_spad_addr = intel_ntb_peer_spad_addr,
+ .peer_spad_read = intel_ntb_peer_spad_read,
+ .peer_spad_write = intel_ntb_peer_spad_write,
+};
+
static const struct file_operations intel_ntb_debugfs_info = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -2307,6 +2956,7 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index 3ec149cf6562..f2cf8a783f1e 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
/* Intel Xeon hardware */
@@ -150,6 +151,51 @@
#define XEON_DB_TOTAL_SHIFT 16
#define XEON_SPAD_COUNT 16
+/* Intel Skylake Xeon hardware */
+#define SKX_IMBAR1SZ_OFFSET 0x00d0
+#define SKX_IMBAR2SZ_OFFSET 0x00d1
+#define SKX_EMBAR1SZ_OFFSET 0x00d2
+#define SKX_EMBAR2SZ_OFFSET 0x00d3
+#define SKX_DEVCTRL_OFFSET 0x0098
+#define SKX_DEVSTS_OFFSET 0x009a
+#define SKX_UNCERRSTS_OFFSET 0x014c
+#define SKX_CORERRSTS_OFFSET 0x0158
+#define SKX_LINK_STATUS_OFFSET 0x01a2
+
+#define SKX_NTBCNTL_OFFSET 0x0000
+#define SKX_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */
+#define SKX_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */
+#define SKX_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */
+#define SKX_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */
+#define SKX_IM_INT_STATUS_OFFSET 0x0040
+#define SKX_IM_INT_DISABLE_OFFSET 0x0048
+#define SKX_IM_SPAD_OFFSET 0x0080 /* SPAD */
+#define SKX_USMEMMISS_OFFSET 0x0070
+#define SKX_INTVEC_OFFSET 0x00d0
+#define SKX_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */
+#define SKX_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */
+#define SKX_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */
+#define SKX_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */
+#define SKX_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */
+#define SKX_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */
+#define SKX_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */
+#define SKX_EM_INT_STATUS_OFFSET 0x4040
+#define SKX_EM_INT_DISABLE_OFFSET 0x4048
+#define SKX_EM_SPAD_OFFSET 0x4080 /* remote SPAD */
+#define SKX_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */
+#define SKX_SPCICMD_OFFSET 0x4504 /* SPCICMD */
+#define SKX_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */
+#define SKX_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */
+#define SKX_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */
+
+#define SKX_DB_COUNT 32
+#define SKX_DB_LINK 32
+#define SKX_DB_LINK_BIT BIT_ULL(SKX_DB_LINK)
+#define SKX_DB_MSIX_VECTOR_COUNT 33
+#define SKX_DB_MSIX_VECTOR_SHIFT 1
+#define SKX_DB_TOTAL_SHIFT 33
+#define SKX_SPAD_COUNT 16
+
/* Intel Atom hardware */
#define ATOM_SBAR2XLAT_OFFSET 0x0008
@@ -240,6 +286,7 @@
#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0)
#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
+#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
/* flags to indicate unsafe api */
#define NTB_UNSAFE_DB BIT_ULL(0)
@@ -263,6 +310,7 @@ struct intel_ntb_reg {
struct intel_ntb_alt_reg {
unsigned long db_bell;
unsigned long db_mask;
+ unsigned long db_clear;
unsigned long spad;
};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 4eb8adb34508..f81aa4b18d9f 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -66,6 +66,7 @@
#define NTB_TRANSPORT_VER "4"
#define NTB_TRANSPORT_NAME "ntb_transport"
#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
+#define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER);
@@ -242,9 +243,6 @@ enum {
NUM_MWS,
MW0_SZ_HIGH,
MW0_SZ_LOW,
- MW1_SZ_HIGH,
- MW1_SZ_LOW,
- MAX_SPAD,
};
#define dev_client_dev(__dev) \
@@ -811,7 +809,7 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
{
struct ntb_transport_qp *qp;
u64 qp_bitmap_alloc;
- int i;
+ unsigned int i, count;
qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
@@ -831,7 +829,8 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
* goes down, blast them now to give them a sane value the next
* time they are accessed
*/
- for (i = 0; i < MAX_SPAD; i++)
+ count = ntb_spad_count(nt->ndev);
+ for (i = 0; i < count; i++)
ntb_spad_write(nt->ndev, i, 0);
}
@@ -960,7 +959,6 @@ static void ntb_qp_link_work(struct work_struct *work)
ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
/* query remote spad for qp ready bits */
- ntb_peer_spad_read(nt->ndev, QP_LINKS);
dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
/* See if the remote side is up */
@@ -1064,17 +1062,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
{
struct ntb_transport_ctx *nt;
struct ntb_transport_mw *mw;
- unsigned int mw_count, qp_count;
+ unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads;
u64 qp_bitmap;
int node;
int rc, i;
mw_count = ntb_mw_count(ndev);
- if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) {
- dev_err(&ndev->dev, "Not enough scratch pad registers for %s",
- NTB_TRANSPORT_NAME);
- return -EIO;
- }
if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev,
@@ -1090,8 +1083,18 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
return -ENOMEM;
nt->ndev = ndev;
+ spad_count = ntb_spad_count(ndev);
+
+ /* Limit the MW's based on the availability of scratchpads */
+
+ if (spad_count < NTB_TRANSPORT_MIN_SPADS) {
+ nt->mw_count = 0;
+ rc = -EINVAL;
+ goto err;
+ }
- nt->mw_count = mw_count;
+ max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
+ nt->mw_count = min(mw_count, max_mw_count_for_spads);
nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
GFP_KERNEL, node);
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 124c2432ac9c..59e750183b7f 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -28,7 +28,7 @@ config BLK_DEV_PMEM
non-standard OEM-specific E820 memory type (type-12, see
CONFIG_X86_PMEM_LEGACY), or it is manually specified by the
'memmap=nn[KMG]!ss[KMG]' kernel command line (see
- Documentation/kernel-parameters.txt). This driver converts
+ Documentation/admin-guide/kernel-parameters.rst). This driver converts
these persistent memory ranges into block devices that are
capable of DAX (direct-access) file system mappings. See
Documentation/nvdimm/nvdimm.txt for more details.
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index d5dc80c48b4c..b3323c0697f6 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -22,9 +22,8 @@ void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
{
struct nd_namespace_common *ndns = *_ndns;
- dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex)
- || ndns->claim != dev,
- "%s: invalid claim\n", __func__);
+ lockdep_assert_held(&ndns->dev.mutex);
+ dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
ndns->claim = NULL;
*_ndns = NULL;
put_device(&ndns->dev);
@@ -49,9 +48,8 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
{
if (attach->claim)
return false;
- dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex)
- || *_ndns,
- "%s: invalid claim\n", __func__);
+ lockdep_assert_held(&attach->dev.mutex);
+ dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
attach->claim = dev;
*_ndns = attach;
get_device(&attach->dev);
@@ -226,6 +224,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
resource_size_t offset, void *buf, size_t size, int rw)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
+ sector_t sector = offset >> 9;
+ int rc = 0;
+
+ if (unlikely(!size))
+ return 0;
if (unlikely(offset + size > nsio->size)) {
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
@@ -233,17 +237,31 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
}
if (rw == READ) {
- unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
-
- if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
+ if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
return -EIO;
return memcpy_from_pmem(buf, nsio->addr + offset, size);
- } else {
- memcpy_to_pmem(nsio->addr + offset, buf, size);
- nvdimm_flush(to_nd_region(ndns->dev.parent));
}
- return 0;
+ if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
+ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
+ long cleared;
+
+ cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
+ if (cleared < size)
+ rc = -EIO;
+ if (cleared > 0 && cleared / 512) {
+ cleared /= 512;
+ badblocks_clear(&nsio->bb, sector, cleared);
+ }
+ invalidate_pmem(nsio->addr + offset, size);
+ } else
+ rc = -EIO;
+ }
+
+ memcpy_to_pmem(nsio->addr + offset, buf, size);
+ nvdimm_flush(to_nd_region(ndns->dev.parent));
+
+ return rc;
}
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
@@ -253,7 +271,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
nsio->size = resource_size(res);
if (!devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev))) {
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
}
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7ceba08774b6..9303cfeb8bee 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -317,35 +317,6 @@ ssize_t nd_sector_size_store(struct device *dev, const char *buf,
}
}
-void __nd_iostat_start(struct bio *bio, unsigned long *start)
-{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
- const int rw = bio_data_dir(bio);
- int cpu = part_stat_lock();
-
- *start = jiffies;
- part_round_stats(cpu, &disk->part0);
- part_stat_inc(cpu, &disk->part0, ios[rw]);
- part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
- part_inc_in_flight(&disk->part0, rw);
- part_stat_unlock();
-}
-EXPORT_SYMBOL(__nd_iostat_start);
-
-void nd_iostat_end(struct bio *bio, unsigned long start)
-{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
- unsigned long duration = jiffies - start;
- const int rw = bio_data_dir(bio);
- int cpu = part_stat_lock();
-
- part_stat_add(cpu, &disk->part0, ticks[rw], duration);
- part_round_stats(cpu, &disk->part0);
- part_dec_in_flight(&disk->part0, rw);
- part_stat_unlock();
-}
-EXPORT_SYMBOL(nd_iostat_end);
-
static ssize_t commands_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 619834e144d1..ee0b412827bf 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -64,6 +64,8 @@ static int nvdimm_probe(struct device *dev)
nd_label_copy(ndd, to_next_namespace_index(ndd),
to_current_namespace_index(ndd));
rc = nd_label_reserve_dpa(ndd);
+ if (ndd->ns_current >= 0)
+ nvdimm_set_aliasing(dev);
nvdimm_bus_unlock(dev);
if (rc)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index d614493ad5ac..0eedc49e0d47 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -184,6 +184,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
return rc;
}
+void nvdimm_set_aliasing(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ nvdimm->flags |= NDD_ALIASING;
+}
+
static void nvdimm_release(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 11ea90120542..6f9a6ffd7cde 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -84,18 +84,8 @@ static struct platform_driver e820_pmem_driver = {
},
};
-static __init int e820_pmem_init(void)
-{
- return platform_driver_register(&e820_pmem_driver);
-}
-
-static __exit void e820_pmem_exit(void)
-{
- platform_driver_unregister(&e820_pmem_driver);
-}
+module_platform_driver(e820_pmem_driver);
MODULE_ALIAS("platform:e820_pmem*");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
-module_init(e820_pmem_init);
-module_exit(e820_pmem_exit);
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index fac7cabe8f56..dd615345699f 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -938,7 +938,7 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
}
for_each_dpa_resource(ndd, res)
- if (strncmp(res->name, "pmem", 3) == 0)
+ if (strncmp(res->name, "pmem", 4) == 0)
count++;
WARN_ON_ONCE(!count);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index abe5c6bc756c..6307088b375f 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1132,7 +1132,7 @@ static ssize_t size_show(struct device *dev,
return sprintf(buf, "%llu\n", (unsigned long long)
nvdimm_namespace_capacity(to_ndns(dev)));
}
-static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
+static DEVICE_ATTR(size, 0444, size_show, size_store);
static u8 *namespace_to_uuid(struct device *dev)
{
@@ -1456,7 +1456,7 @@ static umode_t namespace_visible(struct kobject *kobj,
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
if (a == &dev_attr_size.attr)
- return S_IWUSR | S_IRUGO;
+ return 0644;
if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
return 0;
@@ -1653,7 +1653,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
u64 hw_start, hw_end, pmem_start, pmem_end;
struct nd_label_ent *label_ent;
- WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+ lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
nd_label = label_ent->label;
if (!nd_label)
@@ -1997,7 +1997,7 @@ struct device *create_namespace_blk(struct nd_region *nd_region,
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_blk *nsblk;
- char *name[NSLABEL_NAME_LEN];
+ char name[NSLABEL_NAME_LEN];
struct device *dev = NULL;
struct resource *res;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index d3b2fca8deec..35dd75057e16 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -238,6 +238,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
+void nvdimm_set_aliasing(struct device *dev);
struct nd_btt *to_nd_btt(struct device *dev);
struct nd_gen_sb {
@@ -377,10 +378,17 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
if (!blk_queue_io_stat(disk->queue))
return false;
- __nd_iostat_start(bio, start);
+ *start = jiffies;
+ generic_start_io_acct(bio_data_dir(bio),
+ bio_sectors(bio), &disk->part0);
return true;
}
-void nd_iostat_end(struct bio *bio, unsigned long start);
+static inline void nd_iostat_end(struct bio *bio, unsigned long start)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+
+ generic_end_io_acct(bio_data_dir(bio), &disk->part0, start);
+}
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
{
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index cea8350fbc7e..a2ac9e641aa9 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -108,7 +108,7 @@ static ssize_t align_show(struct device *dev,
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
- return sprintf(buf, "%lx\n", nd_pfn->align);
+ return sprintf(buf, "%ld\n", nd_pfn->align);
}
static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 24618431a14b..7282d7495bf1 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -53,21 +53,24 @@ static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
struct device *dev = to_dev(pmem);
sector_t sector;
long cleared;
+ int rc = 0;
sector = (offset - pmem->data_offset) / 512;
- cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
+ cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
+ if (cleared < len)
+ rc = -EIO;
if (cleared > 0 && cleared / 512) {
- dev_dbg(dev, "%s: %#llx clear %ld sector%s\n",
- __func__, (unsigned long long) sector,
- cleared / 512, cleared / 512 > 1 ? "s" : "");
- badblocks_clear(&pmem->bb, sector, cleared / 512);
- } else {
- return -EIO;
+ cleared /= 512;
+ dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
+ (unsigned long long) sector, cleared,
+ cleared > 1 ? "s" : "");
+ badblocks_clear(&pmem->bb, sector, cleared);
}
invalidate_pmem(pmem->virt_addr + offset, len);
- return 0;
+
+ return rc;
}
static void write_pmem(void *pmem_addr, struct page *page,
@@ -270,7 +273,7 @@ static int pmem_attach_disk(struct device *dev,
dev_warn(dev, "unable to guarantee persistence of writes\n");
if (!devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev))) {
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 6af5e629140c..7cd705f3247c 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -509,7 +509,7 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{
struct nd_label_ent *label_ent, *e;
- WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+ lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
list_del(&label_ent->list);
kfree(label_ent);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index f7d37a62f874..90745a616df7 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -43,3 +43,20 @@ config NVME_RDMA
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
+
+config NVME_FC
+ tristate "NVM Express over Fabrics FC host driver"
+ depends on BLOCK
+ depends on HAS_DMA
+ select NVME_CORE
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the FC transport. This allows you to use remote block devices
+ exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 47abcec23514..f1a7d945fbb6 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
+obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
@@ -12,3 +13,5 @@ nvme-y += pci.o
nvme-fabrics-y += fabrics.o
nvme-rdma-y += rdma.o
+
+nvme-fc-y += fc.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 79e679d12f3b..b40cfb076f02 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -201,13 +201,7 @@ fail:
void nvme_requeue_req(struct request *req)
{
- unsigned long flags;
-
- blk_mq_requeue_request(req);
- spin_lock_irqsave(req->q->queue_lock, flags);
- if (!blk_queue_stopped(req->q))
- blk_mq_kick_requeue_list(req->q);
- spin_unlock_irqrestore(req->q->queue_lock, flags);
+ blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
}
EXPORT_SYMBOL_GPL(nvme_requeue_req);
@@ -227,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- req->cmd = (unsigned char *)cmd;
- req->cmd_len = sizeof(struct nvme_command);
+ nvme_req(req)->cmd = cmd;
return req;
}
@@ -246,8 +239,6 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_dsm_range *range;
- struct page *page;
- int offset;
unsigned int nr_bytes = blk_rq_bytes(req);
range = kmalloc(sizeof(*range), GFP_ATOMIC);
@@ -264,19 +255,12 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
cmnd->dsm.nr = 0;
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- req->completion_data = range;
- page = virt_to_page(range);
- offset = offset_in_page(range);
- blk_add_request_payload(req, page, offset, sizeof(*range));
-
- /*
- * we set __data_len back to the size of the area to be discarded
- * on disk. This allows us to report completion on the full amount
- * of blocks described by the request.
- */
- req->__data_len = nr_bytes;
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+ req->special_vec.bv_len = sizeof(*range);
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -295,7 +279,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -324,10 +307,10 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd)
{
- int ret = 0;
+ int ret = BLK_MQ_RQ_QUEUE_OK;
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- memcpy(cmd, req->cmd, sizeof(*cmd));
+ memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
else if (req_op(req) == REQ_OP_FLUSH)
nvme_setup_flush(ns, cmd);
else if (req_op(req) == REQ_OP_DISCARD)
@@ -335,6 +318,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
else
nvme_setup_rw(ns, req, cmd);
+ cmd->common.command_id = req->tag;
+
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -344,7 +329,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* if the result is positive, it's an NVM Express status code
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, int flags)
{
struct request *req;
@@ -355,7 +340,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- req->special = cqe;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -364,6 +348,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
blk_execute_rq(req->q, NULL, req, at_head);
+ if (result)
+ *result = nvme_req(req)->result;
ret = req->errors;
out:
blk_mq_free_request(req);
@@ -385,7 +371,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
- struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
struct request *req;
@@ -398,7 +383,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- req->special = &cqe;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -453,7 +437,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
blk_execute_rq(req->q, disk, req, 0);
ret = req->errors;
if (result)
- *result = le32_to_cpu(cqe.result);
+ *result = le32_to_cpu(nvme_req(req)->result.u32);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
@@ -602,7 +586,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&c, 0, sizeof(c));
@@ -610,10 +594,10 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
c.features.nsid = cpu_to_le32(nsid);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
- *result = le32_to_cpu(cqe.result);
+ *result = le32_to_cpu(res.u32);
return ret;
}
@@ -621,7 +605,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&c, 0, sizeof(c));
@@ -629,10 +613,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
- *result = le32_to_cpu(cqe.result);
+ *result = le32_to_cpu(res.u32);
return ret;
}
@@ -1683,27 +1667,24 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_revalidate_ns(ns, &id))
goto out_free_queue;
- if (nvme_nvm_ns_supported(ns, id)) {
- if (nvme_nvm_register(ns, disk_name, node,
- &nvme_ns_attr_group)) {
- dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
- __func__);
- goto out_free_id;
- }
- } else {
- disk = alloc_disk_node(0, node);
- if (!disk)
- goto out_free_id;
+ if (nvme_nvm_ns_supported(ns, id) &&
+ nvme_nvm_register(ns, disk_name, node)) {
+ dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__);
+ goto out_free_id;
+ }
- disk->fops = &nvme_fops;
- disk->private_data = ns;
- disk->queue = ns->queue;
- disk->flags = GENHD_FL_EXT_DEVT;
- memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
- ns->disk = disk;
+ disk = alloc_disk_node(0, node);
+ if (!disk)
+ goto out_free_id;
- __nvme_revalidate_disk(disk, id);
- }
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->flags = GENHD_FL_EXT_DEVT;
+ memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
+ ns->disk = disk;
+
+ __nvme_revalidate_disk(disk, id);
mutex_lock(&ctrl->namespaces_mutex);
list_add_tail(&ns->list, &ctrl->namespaces);
@@ -1713,14 +1694,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
kfree(id);
- if (ns->ndev)
- return;
-
device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group))
pr_warn("%s: failed to create sysfs group for identification\n",
ns->disk->disk_name);
+ if (ns->ndev && nvme_nvm_register_sysfs(ns))
+ pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
+ ns->disk->disk_name);
return;
out_free_id:
kfree(id);
@@ -1742,6 +1723,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_integrity_unregister(ns->disk);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group);
+ if (ns->ndev)
+ nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
@@ -1905,18 +1888,25 @@ static void nvme_async_event_work(struct work_struct *work)
spin_unlock_irq(&ctrl->lock);
}
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
- struct nvme_completion *cqe)
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ union nvme_result *res)
{
- u16 status = le16_to_cpu(cqe->status) >> 1;
- u32 result = le32_to_cpu(cqe->result);
+ u32 result = le32_to_cpu(res->u32);
+ bool done = true;
- if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+ switch (le16_to_cpu(status) >> 1) {
+ case NVME_SC_SUCCESS:
+ done = false;
+ /*FALLTHRU*/
+ case NVME_SC_ABORT_REQ:
++ctrl->event_limit;
schedule_work(&ctrl->async_event_work);
+ break;
+ default:
+ break;
}
- if (status != NVME_SC_SUCCESS)
+ if (done)
return;
switch (result & 0xff07) {
@@ -2078,14 +2068,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
struct nvme_ns *ns;
mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
- spin_lock_irq(ns->queue->queue_lock);
- queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
- spin_unlock_irq(ns->queue->queue_lock);
-
- blk_mq_cancel_requeue_work(ns->queue);
- blk_mq_stop_hw_queues(ns->queue);
- }
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ blk_mq_quiesce_queue(ns->queue);
mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -2096,7 +2080,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5a3f008d3480..916d13608059 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -161,7 +161,7 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
struct nvme_command cmd;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&cmd, 0, sizeof(cmd));
@@ -169,11 +169,11 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
- *val = le64_to_cpu(cqe.result64);
+ *val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
struct nvme_command cmd;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&cmd, 0, sizeof(cmd));
@@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
- *val = le64_to_cpu(cqe.result64);
+ *val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
@@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{
struct nvme_command cmd;
- struct nvme_completion cqe;
+ union nvme_result res;
struct nvmf_connect_data *data;
int ret;
@@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe,
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
- nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
goto out_free_data;
}
- ctrl->cntlid = le16_to_cpu(cqe.result16);
+ ctrl->cntlid = le16_to_cpu(res.u16);
out_free_data:
kfree(data);
@@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
{
struct nvme_command cmd;
struct nvmf_connect_data *data;
- struct nvme_completion cqe;
+ union nvme_result res;
int ret;
memset(&cmd, 0, sizeof(cmd));
@@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
- ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe,
+ ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), 0, qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
- nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
}
kfree(data);
@@ -576,7 +576,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
nqnlen = strlen(opts->subsysnqn);
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
- opts->subsysnqn, NVMF_NQN_SIZE);
+ opts->subsysnqn, NVMF_NQN_SIZE);
ret = -EINVAL;
goto out;
}
@@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
p, NVMF_NQN_SIZE);
+ kfree(p);
ret = -EINVAL;
goto out;
}
opts->host = nvmf_host_add(p);
+ kfree(p);
if (!opts->host) {
ret = -ENOMEM;
goto out;
@@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
out_unlock:
mutex_unlock(&nvmf_transports_mutex);
out_free_opts:
- nvmf_host_put(opts->host);
- kfree(opts);
+ nvmf_free_options(opts);
return ERR_PTR(ret);
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
new file mode 100644
index 000000000000..771e2e761872
--- /dev/null
+++ b/drivers/nvme/host/fc.c
@@ -0,0 +1,2586 @@
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_FC_NR_AEN_COMMANDS 1
+#define NVME_FC_AQ_BLKMQ_DEPTH \
+ (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
+#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
+
+enum nvme_fc_queue_flags {
+ NVME_FC_Q_CONNECTED = (1 << 0),
+};
+
+#define NVMEFC_QUEUE_DELAY 3 /* ms units */
+
+struct nvme_fc_queue {
+ struct nvme_fc_ctrl *ctrl;
+ struct device *dev;
+ struct blk_mq_hw_ctx *hctx;
+ void *lldd_handle;
+ int queue_size;
+ size_t cmnd_capsule_len;
+ u32 qnum;
+ u32 rqcnt;
+ u32 seqno;
+
+ u64 connection_id;
+ atomic_t csn;
+
+ unsigned long flags;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+struct nvmefc_ls_req_op {
+ struct nvmefc_ls_req ls_req;
+
+ struct nvme_fc_ctrl *ctrl;
+ struct nvme_fc_queue *queue;
+ struct request *rq;
+
+ int ls_error;
+ struct completion ls_done;
+ struct list_head lsreq_list; /* ctrl->ls_req_list */
+ bool req_queued;
+};
+
+enum nvme_fcpop_state {
+ FCPOP_STATE_UNINIT = 0,
+ FCPOP_STATE_IDLE = 1,
+ FCPOP_STATE_ACTIVE = 2,
+ FCPOP_STATE_ABORTED = 3,
+};
+
+struct nvme_fc_fcp_op {
+ struct nvme_request nreq; /*
+ * nvme/host/core.c
+ * requires this to be
+ * the 1st element in the
+ * private structure
+ * associated with the
+ * request.
+ */
+ struct nvmefc_fcp_req fcp_req;
+
+ struct nvme_fc_ctrl *ctrl;
+ struct nvme_fc_queue *queue;
+ struct request *rq;
+
+ atomic_t state;
+ u32 rqno;
+ u32 nents;
+
+ struct nvme_fc_cmd_iu cmd_iu;
+ struct nvme_fc_ersp_iu rsp_iu;
+};
+
+struct nvme_fc_lport {
+ struct nvme_fc_local_port localport;
+
+ struct ida endp_cnt;
+ struct list_head port_list; /* nvme_fc_port_list */
+ struct list_head endp_list;
+ struct device *dev; /* physical device for dma */
+ struct nvme_fc_port_template *ops;
+ struct kref ref;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+struct nvme_fc_rport {
+ struct nvme_fc_remote_port remoteport;
+
+ struct list_head endp_list; /* for lport->endp_list */
+ struct list_head ctrl_list;
+ spinlock_t lock;
+ struct kref ref;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+enum nvme_fcctrl_state {
+ FCCTRL_INIT = 0,
+ FCCTRL_ACTIVE = 1,
+};
+
+struct nvme_fc_ctrl {
+ spinlock_t lock;
+ struct nvme_fc_queue *queues;
+ u32 queue_count;
+
+ struct device *dev;
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ u32 cnum;
+
+ u64 association_id;
+
+ u64 cap;
+
+ struct list_head ctrl_list; /* rport->ctrl_list */
+ struct list_head ls_req_list;
+
+ struct blk_mq_tag_set admin_tag_set;
+ struct blk_mq_tag_set tag_set;
+
+ struct work_struct delete_work;
+ struct kref ref;
+ int state;
+
+ struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
+
+ struct nvme_ctrl ctrl;
+};
+
+static inline struct nvme_fc_ctrl *
+to_fc_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
+}
+
+static inline struct nvme_fc_lport *
+localport_to_lport(struct nvme_fc_local_port *portptr)
+{
+ return container_of(portptr, struct nvme_fc_lport, localport);
+}
+
+static inline struct nvme_fc_rport *
+remoteport_to_rport(struct nvme_fc_remote_port *portptr)
+{
+ return container_of(portptr, struct nvme_fc_rport, remoteport);
+}
+
+static inline struct nvmefc_ls_req_op *
+ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
+{
+ return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
+}
+
+static inline struct nvme_fc_fcp_op *
+fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
+{
+ return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
+}
+
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvme_fc_lock);
+
+static LIST_HEAD(nvme_fc_lport_list);
+static DEFINE_IDA(nvme_fc_local_port_cnt);
+static DEFINE_IDA(nvme_fc_ctrl_cnt);
+
+static struct workqueue_struct *nvme_fc_wq;
+
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
+static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
+ struct nvme_fc_queue *, unsigned int);
+
+
+/**
+ * nvme_fc_register_localport - transport entry point called by an
+ * LLDD to register the existence of a NVME
+ * host FC port.
+ * @pinfo: pointer to information about the port to be registered
+ * @template: LLDD entrypoints and operational parameters for the port
+ * @dev: physical hardware device node port corresponds to. Will be
+ * used for DMA mappings
+ * @lport_p: pointer to a local port pointer. Upon success, the routine
+ * will allocate a nvme_fc_local_port structure and place its
+ * address in the local port pointer. Upon failure, local port
+ * pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *template,
+ struct device *dev,
+ struct nvme_fc_local_port **portptr)
+{
+ struct nvme_fc_lport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ if (!template->localport_delete || !template->remoteport_delete ||
+ !template->ls_req || !template->fcp_io ||
+ !template->ls_abort || !template->fcp_abort ||
+ !template->max_hw_queues || !template->max_sgl_segments ||
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
+ ret = -EINVAL;
+ goto out_reghost_failed;
+ }
+
+ newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_reghost_failed;
+ }
+
+ idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_fail_kfree;
+ }
+
+ if (!get_device(dev) && dev) {
+ ret = -ENODEV;
+ goto out_ida_put;
+ }
+
+ INIT_LIST_HEAD(&newrec->port_list);
+ INIT_LIST_HEAD(&newrec->endp_list);
+ kref_init(&newrec->ref);
+ newrec->ops = template;
+ newrec->dev = dev;
+ ida_init(&newrec->endp_cnt);
+ newrec->localport.private = &newrec[1];
+ newrec->localport.node_name = pinfo->node_name;
+ newrec->localport.port_name = pinfo->port_name;
+ newrec->localport.port_role = pinfo->port_role;
+ newrec->localport.port_id = pinfo->port_id;
+ newrec->localport.port_state = FC_OBJSTATE_ONLINE;
+ newrec->localport.port_num = idx;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ if (dev)
+ dma_set_seg_boundary(dev, template->dma_boundary);
+
+ *portptr = &newrec->localport;
+ return 0;
+
+out_ida_put:
+ ida_simple_remove(&nvme_fc_local_port_cnt, idx);
+out_fail_kfree:
+ kfree(newrec);
+out_reghost_failed:
+ *portptr = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
+
+static void
+nvme_fc_free_lport(struct kref *ref)
+{
+ struct nvme_fc_lport *lport =
+ container_of(ref, struct nvme_fc_lport, ref);
+ unsigned long flags;
+
+ WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&lport->endp_list));
+
+ /* remove from transport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&lport->port_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ /* let the LLDD know we've finished tearing it down */
+ lport->ops->localport_delete(&lport->localport);
+
+ ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
+ ida_destroy(&lport->endp_cnt);
+
+ put_device(lport->dev);
+
+ kfree(lport);
+}
+
+static void
+nvme_fc_lport_put(struct nvme_fc_lport *lport)
+{
+ kref_put(&lport->ref, nvme_fc_free_lport);
+}
+
+static int
+nvme_fc_lport_get(struct nvme_fc_lport *lport)
+{
+ return kref_get_unless_zero(&lport->ref);
+}
+
+/**
+ * nvme_fc_unregister_localport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a NVME host FC port.
+ * @localport: pointer to the (registered) local port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
+{
+ struct nvme_fc_lport *lport = localport_to_lport(portptr);
+ unsigned long flags;
+
+ if (!portptr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ return -EINVAL;
+ }
+ portptr->port_state = FC_OBJSTATE_DELETED;
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ nvme_fc_lport_put(lport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
+
+/**
+ * nvme_fc_register_remoteport - transport entry point called by an
+ * LLDD to register the existence of a NVME
+ * subsystem FC port on its fabric.
+ * @localport: pointer to the (registered) local port that the remote
+ * subsystem port is connected to.
+ * @pinfo: pointer to information about the port to be registered
+ * @rport_p: pointer to a remote port pointer. Upon success, the routine
+ * will allocate a nvme_fc_remote_port structure and place its
+ * address in the remote port pointer. Upon failure, remote port
+ * pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
+ struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_remote_port **portptr)
+{
+ struct nvme_fc_lport *lport = localport_to_lport(localport);
+ struct nvme_fc_rport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_reghost_failed;
+ }
+
+ if (!nvme_fc_lport_get(lport)) {
+ ret = -ESHUTDOWN;
+ goto out_kfree_rport;
+ }
+
+ idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_lport_put;
+ }
+
+ INIT_LIST_HEAD(&newrec->endp_list);
+ INIT_LIST_HEAD(&newrec->ctrl_list);
+ kref_init(&newrec->ref);
+ spin_lock_init(&newrec->lock);
+ newrec->remoteport.localport = &lport->localport;
+ newrec->remoteport.private = &newrec[1];
+ newrec->remoteport.port_role = pinfo->port_role;
+ newrec->remoteport.node_name = pinfo->node_name;
+ newrec->remoteport.port_name = pinfo->port_name;
+ newrec->remoteport.port_id = pinfo->port_id;
+ newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
+ newrec->remoteport.port_num = idx;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_add_tail(&newrec->endp_list, &lport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ *portptr = &newrec->remoteport;
+ return 0;
+
+out_lport_put:
+ nvme_fc_lport_put(lport);
+out_kfree_rport:
+ kfree(newrec);
+out_reghost_failed:
+ *portptr = NULL;
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
+
+static void
+nvme_fc_free_rport(struct kref *ref)
+{
+ struct nvme_fc_rport *rport =
+ container_of(ref, struct nvme_fc_rport, ref);
+ struct nvme_fc_lport *lport =
+ localport_to_lport(rport->remoteport.localport);
+ unsigned long flags;
+
+ WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&rport->ctrl_list));
+
+ /* remove from lport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&rport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ /* let the LLDD know we've finished tearing it down */
+ lport->ops->remoteport_delete(&rport->remoteport);
+
+ ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+
+ kfree(rport);
+
+ nvme_fc_lport_put(lport);
+}
+
+static void
+nvme_fc_rport_put(struct nvme_fc_rport *rport)
+{
+ kref_put(&rport->ref, nvme_fc_free_rport);
+}
+
+static int
+nvme_fc_rport_get(struct nvme_fc_rport *rport)
+{
+ return kref_get_unless_zero(&rport->ref);
+}
+
+/**
+ * nvme_fc_unregister_remoteport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a NVME subsystem FC port.
+ * @remoteport: pointer to the (registered) remote port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+
+ if (!portptr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ return -EINVAL;
+ }
+ portptr->port_state = FC_OBJSTATE_DELETED;
+
+ /* tear down all associations to the remote port */
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
+ __nvme_fc_del_ctrl(ctrl);
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ nvme_fc_rport_put(rport);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ }
+ return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
+static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
+
+
+static void
+__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
+ struct nvmefc_ls_req_op *lsop)
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ return;
+ }
+
+ list_del(&lsop->lsreq_list);
+
+ lsop->req_queued = false;
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+ nvme_fc_ctrl_put(ctrl);
+}
+
+static int
+__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
+ struct nvmefc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+ int ret;
+
+ if (!nvme_fc_ctrl_get(ctrl))
+ return -ESHUTDOWN;
+
+ lsreq->done = done;
+ lsop->ctrl = ctrl;
+ lsop->req_queued = false;
+ INIT_LIST_HEAD(&lsop->lsreq_list);
+ init_completion(&lsop->ls_done);
+
+ lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
+ lsreq->rqstlen + lsreq->rsplen,
+ DMA_BIDIRECTIONAL);
+ if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
+ nvme_fc_ctrl_put(ctrl);
+ dev_err(ctrl->dev,
+ "els request command failed EFAULT.\n");
+ return -EFAULT;
+ }
+ lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
+
+ lsop->req_queued = true;
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
+ &ctrl->rport->remoteport, lsreq);
+ if (ret)
+ lsop->ls_error = ret;
+
+ return ret;
+}
+
+static void
+nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+
+ lsop->ls_error = status;
+ complete(&lsop->ls_done);
+}
+
+static int
+nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
+ int ret;
+
+ ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
+
+ if (!ret)
+ /*
+ * No timeout/not interruptible as we need the struct
+ * to exist until the lldd calls us back. Thus mandate
+ * wait until driver calls back. lldd responsible for
+ * the timeout action
+ */
+ wait_for_completion(&lsop->ls_done);
+
+ __nvme_fc_finish_ls_req(ctrl, lsop);
+
+ if (ret) {
+ dev_err(ctrl->dev,
+ "ls request command failed (%d).\n", ret);
+ return ret;
+ }
+
+ /* ACC or RJT payload ? */
+ if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
+ return -ENXIO;
+
+ return 0;
+}
+
+static void
+nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
+ struct nvmefc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ int ret;
+
+ ret = __nvme_fc_send_ls_req(ctrl, lsop, done);
+
+ /* don't wait for completion */
+
+ if (ret)
+ done(&lsop->ls_req, ret);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_LSACC = 1,
+ VERR_LSDESC_RQST = 2,
+ VERR_LSDESC_RQST_LEN = 3,
+ VERR_ASSOC_ID = 4,
+ VERR_ASSOC_ID_LEN = 5,
+ VERR_CONN_ID = 6,
+ VERR_CONN_ID_LEN = 7,
+ VERR_CR_ASSOC = 8,
+ VERR_CR_ASSOC_ACC_LEN = 9,
+ VERR_CR_CONN = 10,
+ VERR_CR_CONN_ACC_LEN = 11,
+ VERR_DISCONN = 12,
+ VERR_DISCONN_ACC_LEN = 13,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Not LS_ACC",
+ "Not LSDESC_RQST",
+ "Bad LSDESC_RQST Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "Not CR_ASSOC Rqst",
+ "Bad CR_ASSOC ACC Length",
+ "Not CR_CONN Rqst",
+ "Bad CR_CONN ACC Length",
+ "Not Disconnect Rqst",
+ "Bad Disconnect ACC Length",
+};
+
+static int
+nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
+{
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
+ struct fcnvme_ls_cr_assoc_acc *assoc_acc;
+ int ret, fcret = 0;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ ctrl->lport->ops->lsrqst_priv_sz +
+ sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
+ if (!lsop) {
+ ret = -ENOMEM;
+ goto out_no_memory;
+ }
+ lsreq = &lsop->ls_req;
+
+ lsreq->private = (void *)&lsop[1];
+ assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
+ (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
+
+ assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
+ assoc_rqst->desc_list_len =
+ cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+ assoc_rqst->assoc_cmd.desc_tag =
+ cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
+ assoc_rqst->assoc_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+ assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+ assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
+ /* Linux supports only Dynamic controllers */
+ assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
+ memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
+ min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
+ strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
+ min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
+ strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
+ min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
+
+ lsop->queue = queue;
+ lsreq->rqstaddr = assoc_rqst;
+ lsreq->rqstlen = sizeof(*assoc_rqst);
+ lsreq->rspaddr = assoc_acc;
+ lsreq->rsplen = sizeof(*assoc_acc);
+ lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+ ret = nvme_fc_send_ls_req(ctrl, lsop);
+ if (ret)
+ goto out_free_buffer;
+
+ /* process connect LS completion */
+
+ /* validate the ACC response */
+ if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+ fcret = VERR_LSACC;
+ if (assoc_acc->hdr.desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_acc)))
+ fcret = VERR_CR_ASSOC_ACC_LEN;
+ if (assoc_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
+ fcret = VERR_LSDESC_RQST;
+ else if (assoc_acc->hdr.rqst.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+ fcret = VERR_LSDESC_RQST_LEN;
+ else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
+ fcret = VERR_CR_ASSOC;
+ else if (assoc_acc->associd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ fcret = VERR_ASSOC_ID;
+ else if (assoc_acc->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ fcret = VERR_ASSOC_ID_LEN;
+ else if (assoc_acc->connectid.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+ fcret = VERR_CONN_ID;
+ else if (assoc_acc->connectid.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+ fcret = VERR_CONN_ID_LEN;
+
+ if (fcret) {
+ ret = -EBADF;
+ dev_err(ctrl->dev,
+ "q %d connect failed: %s\n",
+ queue->qnum, validation_errors[fcret]);
+ } else {
+ ctrl->association_id =
+ be64_to_cpu(assoc_acc->associd.association_id);
+ queue->connection_id =
+ be64_to_cpu(assoc_acc->connectid.connection_id);
+ set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ }
+
+out_free_buffer:
+ kfree(lsop);
+out_no_memory:
+ if (ret)
+ dev_err(ctrl->dev,
+ "queue %d connect admin queue failed (%d).\n",
+ queue->qnum, ret);
+ return ret;
+}
+
+static int
+nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ u16 qsize, u16 ersp_ratio)
+{
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct fcnvme_ls_cr_conn_rqst *conn_rqst;
+ struct fcnvme_ls_cr_conn_acc *conn_acc;
+ int ret, fcret = 0;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ ctrl->lport->ops->lsrqst_priv_sz +
+ sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
+ if (!lsop) {
+ ret = -ENOMEM;
+ goto out_no_memory;
+ }
+ lsreq = &lsop->ls_req;
+
+ lsreq->private = (void *)&lsop[1];
+ conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
+ (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
+
+ conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
+ conn_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+
+ conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ conn_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+ conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+ conn_rqst->connect_cmd.desc_tag =
+ cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
+ conn_rqst->connect_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+ conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+ conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
+ conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
+
+ lsop->queue = queue;
+ lsreq->rqstaddr = conn_rqst;
+ lsreq->rqstlen = sizeof(*conn_rqst);
+ lsreq->rspaddr = conn_acc;
+ lsreq->rsplen = sizeof(*conn_acc);
+ lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+ ret = nvme_fc_send_ls_req(ctrl, lsop);
+ if (ret)
+ goto out_free_buffer;
+
+ /* process connect LS completion */
+
+ /* validate the ACC response */
+ if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+ fcret = VERR_LSACC;
+ if (conn_acc->hdr.desc_list_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
+ fcret = VERR_CR_CONN_ACC_LEN;
+ if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
+ fcret = VERR_LSDESC_RQST;
+ else if (conn_acc->hdr.rqst.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+ fcret = VERR_LSDESC_RQST_LEN;
+ else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
+ fcret = VERR_CR_CONN;
+ else if (conn_acc->connectid.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+ fcret = VERR_CONN_ID;
+ else if (conn_acc->connectid.desc_len !=
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+ fcret = VERR_CONN_ID_LEN;
+
+ if (fcret) {
+ ret = -EBADF;
+ dev_err(ctrl->dev,
+ "q %d connect failed: %s\n",
+ queue->qnum, validation_errors[fcret]);
+ } else {
+ queue->connection_id =
+ be64_to_cpu(conn_acc->connectid.connection_id);
+ set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ }
+
+out_free_buffer:
+ kfree(lsop);
+out_no_memory:
+ if (ret)
+ dev_err(ctrl->dev,
+ "queue %d connect command failed (%d).\n",
+ queue->qnum, ret);
+ return ret;
+}
+
+static void
+nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+ struct nvme_fc_ctrl *ctrl = lsop->ctrl;
+
+ __nvme_fc_finish_ls_req(ctrl, lsop);
+
+ if (status)
+ dev_err(ctrl->dev,
+ "disconnect assoc ls request command failed (%d).\n",
+ status);
+
+ /* fc-nvme iniator doesn't care about success or failure of cmd */
+
+ kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association. Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme initiator is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme target, so you may never get a
+ * response even if you tried. As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme target
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
+{
+ struct fcnvme_ls_disconnect_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_acc *discon_acc;
+ struct nvmefc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ ctrl->lport->ops->lsrqst_priv_sz +
+ sizeof(*discon_rqst) + sizeof(*discon_acc)),
+ GFP_KERNEL);
+ if (!lsop)
+ /* couldn't sent it... too bad */
+ return;
+
+ lsreq = &lsop->ls_req;
+
+ lsreq->private = (void *)&lsop[1];
+ discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
+ (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
+
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ discon_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+
+ discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ discon_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+
+ discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+
+ discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
+ FCNVME_LSDESC_DISCONN_CMD);
+ discon_rqst->discon_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+ discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
+ discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
+
+ lsreq->rqstaddr = discon_rqst;
+ lsreq->rqstlen = sizeof(*discon_rqst);
+ lsreq->rspaddr = discon_acc;
+ lsreq->rsplen = sizeof(*discon_acc);
+ lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+ nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
+
+ /* only meaningful part to terminating the association */
+ ctrl->association_id = 0;
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static int
+nvme_fc_reinit_request(void *data, struct request *rq)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+
+ memset(cmdiu, 0, sizeof(*cmdiu));
+ cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->fc_id = NVME_CMD_FC_ID;
+ cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+ memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
+
+ return 0;
+}
+
+static void
+__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_fcp_op *op)
+{
+ fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
+ sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+ fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
+ sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+ atomic_set(&op->state, FCPOP_STATE_UNINIT);
+}
+
+static void
+nvme_fc_exit_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+
+ return __nvme_fc_exit_request(data, op);
+}
+
+static void
+nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
+ int i;
+
+ for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
+ continue;
+ __nvme_fc_exit_request(ctrl, aen_op);
+ nvme_fc_ctrl_put(ctrl);
+ }
+}
+
+void
+nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+{
+ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ struct request *rq = op->rq;
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ struct nvme_fc_queue *queue = op->queue;
+ struct nvme_completion *cqe = &op->rsp_iu.cqe;
+ u16 status;
+
+ /*
+ * WARNING:
+ * The current linux implementation of a nvme controller
+ * allocates a single tag set for all io queues and sizes
+ * the io queues to fully hold all possible tags. Thus, the
+ * implementation does not reference or care about the sqhd
+ * value as it never needs to use the sqhd/sqtail pointers
+ * for submission pacing.
+ *
+ * This affects the FC-NVME implementation in two ways:
+ * 1) As the value doesn't matter, we don't need to waste
+ * cycles extracting it from ERSPs and stamping it in the
+ * cases where the transport fabricates CQEs on successful
+ * completions.
+ * 2) The FC-NVME implementation requires that delivery of
+ * ERSP completions are to go back to the nvme layer in order
+ * relative to the rsn, such that the sqhd value will always
+ * be "in order" for the nvme layer. As the nvme layer in
+ * linux doesn't care about sqhd, there's no need to return
+ * them in order.
+ *
+ * Additionally:
+ * As the core nvme layer in linux currently does not look at
+ * every field in the cqe - in cases where the FC transport must
+ * fabricate a CQE, the following fields will not be set as they
+ * are not referenced:
+ * cqe.sqid, cqe.sqhd, cqe.command_id
+ */
+
+ fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
+ sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+
+ if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
+ status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+ else
+ status = freq->status;
+
+ /*
+ * For the linux implementation, if we have an unsuccesful
+ * status, they blk-mq layer can typically be called with the
+ * non-zero status and the content of the cqe isn't important.
+ */
+ if (status)
+ goto done;
+
+ /*
+ * command completed successfully relative to the wire
+ * protocol. However, validate anything received and
+ * extract the status and result from the cqe (create it
+ * where necessary).
+ */
+
+ switch (freq->rcv_rsplen) {
+
+ case 0:
+ case NVME_FC_SIZEOF_ZEROS_RSP:
+ /*
+ * No response payload or 12 bytes of payload (which
+ * should all be zeros) are considered successful and
+ * no payload in the CQE by the transport.
+ */
+ if (freq->transferred_length !=
+ be32_to_cpu(op->cmd_iu.data_len)) {
+ status = -EIO;
+ goto done;
+ }
+ op->nreq.result.u64 = 0;
+ break;
+
+ case sizeof(struct nvme_fc_ersp_iu):
+ /*
+ * The ERSP IU contains a full completion with CQE.
+ * Validate ERSP IU and look at cqe.
+ */
+ if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
+ (freq->rcv_rsplen / 4) ||
+ be32_to_cpu(op->rsp_iu.xfrd_len) !=
+ freq->transferred_length ||
+ op->rqno != le16_to_cpu(cqe->command_id))) {
+ status = -EIO;
+ goto done;
+ }
+ op->nreq.result = cqe->result;
+ status = le16_to_cpu(cqe->status) >> 1;
+ break;
+
+ default:
+ status = -EIO;
+ goto done;
+ }
+
+done:
+ if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
+ nvme_complete_async_event(&queue->ctrl->ctrl, status,
+ &op->nreq.result);
+ nvme_fc_ctrl_put(ctrl);
+ return;
+ }
+
+ blk_mq_complete_request(rq, status);
+}
+
+static int
+__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
+ struct request *rq, u32 rqno)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ int ret = 0;
+
+ memset(op, 0, sizeof(*op));
+ op->fcp_req.cmdaddr = &op->cmd_iu;
+ op->fcp_req.cmdlen = sizeof(op->cmd_iu);
+ op->fcp_req.rspaddr = &op->rsp_iu;
+ op->fcp_req.rsplen = sizeof(op->rsp_iu);
+ op->fcp_req.done = nvme_fc_fcpio_done;
+ op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
+ op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
+ op->ctrl = ctrl;
+ op->queue = queue;
+ op->rq = rq;
+ op->rqno = rqno;
+
+ cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->fc_id = NVME_CMD_FC_ID;
+ cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+
+ op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
+ &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - cmdiu dma mapping failed.\n");
+ ret = EFAULT;
+ goto out_on_error;
+ }
+
+ op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
+ &op->rsp_iu, sizeof(op->rsp_iu),
+ DMA_FROM_DEVICE);
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - rspiu dma mapping failed.\n");
+ ret = EFAULT;
+ }
+
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+out_on_error:
+ return ret;
+}
+
+static int
+nvme_fc_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
+
+ return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+}
+
+static int
+nvme_fc_init_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_queue *queue = &ctrl->queues[0];
+
+ return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+}
+
+static int
+nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op;
+ struct nvme_fc_cmd_iu *cmdiu;
+ struct nvme_command *sqe;
+ int i, ret;
+
+ aen_op = ctrl->aen_ops;
+ for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ cmdiu = &aen_op->cmd_iu;
+ sqe = &cmdiu->sqe;
+ ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
+ aen_op, (struct request *)NULL,
+ (AEN_CMDID_BASE + i));
+ if (ret)
+ return ret;
+
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->common.opcode = nvme_admin_async_event;
+ sqe->common.command_id = AEN_CMDID_BASE + i;
+ }
+ return 0;
+}
+
+
+static inline void
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
+ unsigned int qidx)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[qidx];
+
+ hctx->driver_data = queue;
+ queue->hctx = hctx;
+}
+
+static int
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+
+ __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
+
+ return 0;
+}
+
+static int
+nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_fc_ctrl *ctrl = data;
+
+ __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
+
+ return 0;
+}
+
+static void
+nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
+{
+ struct nvme_fc_queue *queue;
+
+ queue = &ctrl->queues[idx];
+ memset(queue, 0, sizeof(*queue));
+ queue->ctrl = ctrl;
+ queue->qnum = idx;
+ atomic_set(&queue->csn, 1);
+ queue->dev = ctrl->dev;
+
+ if (idx > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+ queue->queue_size = queue_size;
+
+ /*
+ * Considered whether we should allocate buffers for all SQEs
+ * and CQEs and dma map them - mapping their respective entries
+ * into the request structures (kernel vm addr and dma address)
+ * thus the driver could use the buffers/mappings directly.
+ * It only makes sense if the LLDD would use them for its
+ * messaging api. It's very unlikely most adapter api's would use
+ * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
+ * structures were used instead.
+ */
+}
+
+/*
+ * This routine terminates a queue at the transport level.
+ * The transport has already ensured that all outstanding ios on
+ * the queue have been terminated.
+ * The transport will send a Disconnect LS request to terminate
+ * the queue's connection. Termination of the admin queue will also
+ * terminate the association at the target.
+ */
+static void
+nvme_fc_free_queue(struct nvme_fc_queue *queue)
+{
+ if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
+ return;
+
+ /*
+ * Current implementation never disconnects a single queue.
+ * It always terminates a whole association. So there is never
+ * a disconnect(queue) LS sent to the target.
+ */
+
+ queue->connection_id = 0;
+ clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+}
+
+static void
+__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, unsigned int qidx)
+{
+ if (ctrl->lport->ops->delete_queue)
+ ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
+ queue->lldd_handle);
+ queue->lldd_handle = NULL;
+}
+
+static void
+nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
+{
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_fc_free_queue(&ctrl->queues[0]);
+}
+
+static void
+nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_fc_free_queue(&ctrl->queues[i]);
+}
+
+static int
+__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
+{
+ int ret = 0;
+
+ queue->lldd_handle = NULL;
+ if (ctrl->lport->ops->create_queue)
+ ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
+ qidx, qsize, &queue->lldd_handle);
+
+ return ret;
+}
+
+static void
+nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
+ int i;
+
+ for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
+ __nvme_fc_delete_hw_queue(ctrl, queue, i);
+}
+
+static int
+nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+ struct nvme_fc_queue *queue = &ctrl->queues[1];
+ int i, j, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++, queue++) {
+ ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
+ if (ret) {
+ for (j = i-1; j >= 0; j--)
+ __nvme_fc_delete_hw_queue(ctrl,
+ &ctrl->queues[j], j);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+ int i, ret = 0;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
+ (qsize / 5));
+ if (ret)
+ break;
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void
+nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+}
+
+static void
+nvme_fc_ctrl_free(struct kref *ref)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(ref, struct nvme_fc_ctrl, ref);
+ unsigned long flags;
+
+ if (ctrl->state != FCCTRL_INIT) {
+ /* remove from rport list */
+ spin_lock_irqsave(&ctrl->rport->lock, flags);
+ list_del(&ctrl->ctrl_list);
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+ }
+
+ put_device(ctrl->dev);
+ nvme_fc_rport_put(ctrl->rport);
+
+ kfree(ctrl->queues);
+ ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ nvmf_free_options(ctrl->ctrl.opts);
+ kfree(ctrl);
+}
+
+static void
+nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
+{
+ kref_put(&ctrl->ref, nvme_fc_ctrl_free);
+}
+
+static int
+nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
+{
+ return kref_get_unless_zero(&ctrl->ref);
+}
+
+/*
+ * All accesses from nvme core layer done - can now free the
+ * controller. Called after last nvme_put_ctrl() call
+ */
+static void
+nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+
+ WARN_ON(nctrl != &ctrl->ctrl);
+
+ /*
+ * Tear down the association, which will generate link
+ * traffic to terminate connections
+ */
+
+ if (ctrl->state != FCCTRL_INIT) {
+ /* send a Disconnect(association) LS to fc-nvme target */
+ nvme_fc_xmt_disconnect_assoc(ctrl);
+
+ if (ctrl->ctrl.tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_fc_delete_hw_io_queues(ctrl);
+ nvme_fc_free_io_queues(ctrl);
+ }
+
+ nvme_fc_exit_aen_ops(ctrl);
+
+ nvme_fc_destroy_admin_queue(ctrl);
+ }
+
+ nvme_fc_ctrl_put(ctrl);
+}
+
+
+static int
+__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
+{
+ int state;
+
+ state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+ if (state != FCPOP_STATE_ACTIVE) {
+ atomic_set(&op->state, state);
+ return -ECANCELED; /* fail */
+ }
+
+ ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
+ &ctrl->rport->remoteport,
+ op->queue->lldd_handle,
+ &op->fcp_req);
+
+ return 0;
+}
+
+enum blk_eh_timer_return
+nvme_fc_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ int ret;
+
+ if (reserved)
+ return BLK_EH_RESET_TIMER;
+
+ ret = __nvme_fc_abort_op(ctrl, op);
+ if (ret)
+ /* io wasn't active to abort consider it done */
+ return BLK_EH_HANDLED;
+
+ /*
+ * TODO: force a controller reset
+ * when that happens, queues will be torn down and outstanding
+ * ios will be terminated, and the above abort, on a single io
+ * will no longer be needed.
+ */
+
+ return BLK_EH_HANDLED;
+}
+
+static int
+nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+ struct nvme_fc_fcp_op *op)
+{
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+ u32 map_len = nvme_map_len(rq);
+ enum dma_data_direction dir;
+ int ret;
+
+ freq->sg_cnt = 0;
+
+ if (!map_len)
+ return 0;
+
+ freq->sg_table.sgl = freq->first_sgl;
+ ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
+ freq->sg_table.sgl);
+ if (ret)
+ return -ENOMEM;
+
+ op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
+ WARN_ON(op->nents > rq->nr_phys_segments);
+ dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
+ op->nents, dir);
+ if (unlikely(freq->sg_cnt <= 0)) {
+ sg_free_table_chained(&freq->sg_table, true);
+ freq->sg_cnt = 0;
+ return -EFAULT;
+ }
+
+ /*
+ * TODO: blk_integrity_rq(rq) for DIF
+ */
+ return 0;
+}
+
+static void
+nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+ struct nvme_fc_fcp_op *op)
+{
+ struct nvmefc_fcp_req *freq = &op->fcp_req;
+
+ if (!freq->sg_cnt)
+ return;
+
+ fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
+ ((rq_data_dir(rq) == WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+ nvme_cleanup_cmd(rq);
+
+ sg_free_table_chained(&freq->sg_table, true);
+
+ freq->sg_cnt = 0;
+}
+
+/*
+ * In FC, the queue is a logical thing. At transport connect, the target
+ * creates its "queue" and returns a handle that is to be given to the
+ * target whenever it posts something to the corresponding SQ. When an
+ * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
+ * command contained within the SQE, an io, and assigns a FC exchange
+ * to it. The SQE and the associated SQ handle are sent in the initial
+ * CMD IU sents on the exchange. All transfers relative to the io occur
+ * as part of the exchange. The CQE is the last thing for the io,
+ * which is transferred (explicitly or implicitly) with the RSP IU
+ * sent on the exchange. After the CQE is received, the FC exchange is
+ * terminaed and the Exchange may be used on a different io.
+ *
+ * The transport to LLDD api has the transport making a request for a
+ * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
+ * resource and transfers the command. The LLDD will then process all
+ * steps to complete the io. Upon completion, the transport done routine
+ * is called.
+ *
+ * So - while the operation is outstanding to the LLDD, there is a link
+ * level FC exchange resource that is also outstanding. This must be
+ * considered in all cleanup operations.
+ */
+static int
+nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ struct nvme_fc_fcp_op *op, u32 data_len,
+ enum nvmefc_fcp_datadir io_dir)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
+ u32 csn;
+ int ret;
+
+ if (!nvme_fc_ctrl_get(ctrl))
+ return BLK_MQ_RQ_QUEUE_ERROR;
+
+ /* format the FC-NVME CMD IU and fcp_req */
+ cmdiu->connection_id = cpu_to_be64(queue->connection_id);
+ csn = atomic_inc_return(&queue->csn);
+ cmdiu->csn = cpu_to_be32(csn);
+ cmdiu->data_len = cpu_to_be32(data_len);
+ switch (io_dir) {
+ case NVMEFC_FCP_WRITE:
+ cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
+ break;
+ case NVMEFC_FCP_READ:
+ cmdiu->flags = FCNVME_CMD_FLAGS_READ;
+ break;
+ case NVMEFC_FCP_NODATA:
+ cmdiu->flags = 0;
+ break;
+ }
+ op->fcp_req.payload_length = data_len;
+ op->fcp_req.io_dir = io_dir;
+ op->fcp_req.transferred_length = 0;
+ op->fcp_req.rcv_rsplen = 0;
+ op->fcp_req.status = 0;
+ op->fcp_req.sqid = cpu_to_le16(queue->qnum);
+
+ /*
+ * validate per fabric rules, set fields mandated by fabric spec
+ * as well as those by FC-NVME spec.
+ */
+ WARN_ON_ONCE(sqe->common.metadata);
+ WARN_ON_ONCE(sqe->common.dptr.prp1);
+ WARN_ON_ONCE(sqe->common.dptr.prp2);
+ sqe->common.flags |= NVME_CMD_SGL_METABUF;
+
+ /*
+ * format SQE DPTR field per FC-NVME rules
+ * type=data block descr; subtype=offset;
+ * offset is currently 0.
+ */
+ sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
+ sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
+ sqe->rw.dptr.sgl.addr = 0;
+
+ /* odd that we set the command_id - should come from nvme-fabrics */
+ WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
+
+ if (op->rq) { /* skipped on aens */
+ ret = nvme_fc_map_data(ctrl, op->rq, op);
+ if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", ret);
+ nvme_cleanup_cmd(op->rq);
+ nvme_fc_ctrl_put(ctrl);
+ return (ret == -ENOMEM || ret == -EAGAIN) ?
+ BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+ }
+ }
+
+ fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
+ sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+ atomic_set(&op->state, FCPOP_STATE_ACTIVE);
+
+ if (op->rq)
+ blk_mq_start_request(op->rq);
+
+ ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
+ &ctrl->rport->remoteport,
+ queue->lldd_handle, &op->fcp_req);
+
+ if (ret) {
+ dev_err(ctrl->dev,
+ "Send nvme command failed - lldd returned %d.\n", ret);
+
+ if (op->rq) { /* normal request */
+ nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
+ }
+ /* else - aen. no cleanup needed */
+
+ nvme_fc_ctrl_put(ctrl);
+
+ if (ret != -EBUSY)
+ return BLK_MQ_RQ_QUEUE_ERROR;
+
+ if (op->rq) {
+ blk_mq_stop_hw_queues(op->rq->q);
+ blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
+ }
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static int
+nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_fc_queue *queue = hctx->driver_data;
+ struct nvme_fc_ctrl *ctrl = queue->ctrl;
+ struct request *rq = bd->rq;
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
+ enum nvmefc_fcp_datadir io_dir;
+ u32 data_len;
+ int ret;
+
+ ret = nvme_setup_cmd(ns, rq, sqe);
+ if (ret)
+ return ret;
+
+ data_len = nvme_map_len(rq);
+ if (data_len)
+ io_dir = ((rq_data_dir(rq) == WRITE) ?
+ NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
+ else
+ io_dir = NVMEFC_FCP_NODATA;
+
+ return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
+}
+
+static struct blk_mq_tags *
+nvme_fc_tagset(struct nvme_fc_queue *queue)
+{
+ if (queue->qnum == 0)
+ return queue->ctrl->admin_tag_set.tags[queue->qnum];
+
+ return queue->ctrl->tag_set.tags[queue->qnum - 1];
+}
+
+static int
+nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+
+{
+ struct nvme_fc_queue *queue = hctx->driver_data;
+ struct nvme_fc_ctrl *ctrl = queue->ctrl;
+ struct request *req;
+ struct nvme_fc_fcp_op *op;
+
+ req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
+ if (!req) {
+ dev_err(queue->ctrl->ctrl.device,
+ "tag 0x%x on QNum %#x not found\n",
+ tag, queue->qnum);
+ return 0;
+ }
+
+ op = blk_mq_rq_to_pdu(req);
+
+ if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
+ (ctrl->lport->ops->poll_queue))
+ ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
+ queue->lldd_handle);
+
+ return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
+}
+
+static void
+nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
+ struct nvme_fc_fcp_op *aen_op;
+ int ret;
+
+ if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
+ return;
+
+ aen_op = &ctrl->aen_ops[aer_idx];
+
+ ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
+ NVMEFC_FCP_NODATA);
+ if (ret)
+ dev_err(ctrl->ctrl.device,
+ "failed async event work [%d]\n", aer_idx);
+}
+
+static void
+nvme_fc_complete_rq(struct request *rq)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ int error = 0, state;
+
+ state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
+
+ nvme_cleanup_cmd(rq);
+
+ nvme_fc_unmap_data(ctrl, rq, op);
+
+ if (unlikely(rq->errors)) {
+ if (nvme_req_needs_retry(rq, rq->errors)) {
+ nvme_requeue_req(rq);
+ return;
+ }
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ error = rq->errors;
+ else
+ error = nvme_error_status(rq->errors);
+ }
+
+ nvme_fc_ctrl_put(ctrl);
+
+ blk_mq_end_request(rq, error);
+}
+
+static struct blk_mq_ops nvme_fc_mq_ops = {
+ .queue_rq = nvme_fc_queue_rq,
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_request,
+ .exit_request = nvme_fc_exit_request,
+ .reinit_request = nvme_fc_reinit_request,
+ .init_hctx = nvme_fc_init_hctx,
+ .poll = nvme_fc_poll,
+ .timeout = nvme_fc_timeout,
+};
+
+static struct blk_mq_ops nvme_fc_admin_mq_ops = {
+ .queue_rq = nvme_fc_queue_rq,
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_admin_request,
+ .exit_request = nvme_fc_exit_request,
+ .reinit_request = nvme_fc_reinit_request,
+ .init_hctx = nvme_fc_init_admin_hctx,
+ .timeout = nvme_fc_timeout,
+};
+
+static int
+nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
+{
+ u32 segs;
+ int error;
+
+ nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
+
+ error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
+ NVME_FC_AQ_BLKMQ_DEPTH,
+ (NVME_FC_AQ_BLKMQ_DEPTH / 4));
+ if (error)
+ return error;
+
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
+ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
+ (SG_CHUNK_SIZE *
+ sizeof(struct scatterlist)) +
+ ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->admin_tag_set.driver_data = ctrl;
+ ctrl->admin_tag_set.nr_hw_queues = 1;
+ ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+ error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ if (error)
+ goto out_free_queue;
+
+ ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.admin_q)) {
+ error = PTR_ERR(ctrl->ctrl.admin_q);
+ goto out_free_tagset;
+ }
+
+ error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
+ NVME_FC_AQ_BLKMQ_DEPTH);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_delete_hw_queue;
+
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_delete_hw_queue;
+ }
+
+ ctrl->ctrl.sqsize =
+ min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (error)
+ goto out_delete_hw_queue;
+
+ segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
+ ctrl->lport->ops->max_sgl_segments);
+ ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+ goto out_delete_hw_queue;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ return 0;
+
+out_delete_hw_queue:
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+out_cleanup_queue:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_free_queue:
+ nvme_fc_free_queue(&ctrl->queues[0]);
+ return error;
+}
+
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static void
+nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+int status;
+
+ if (!blk_mq_request_started(req))
+ return;
+
+ /* this performs an ABTS-LS on the FC exchange for the io */
+ status = __nvme_fc_abort_op(ctrl, op);
+ /*
+ * if __nvme_fc_abort_op failed: io wasn't active to abort
+ * consider it done. Assume completion path already completing
+ * in parallel
+ */
+ if (status)
+ /* io wasn't active to abort consider it done */
+ /* assume completion path already completing in parallel */
+ return;
+}
+
+
+/*
+ * This routine stops operation of the controller. Admin and IO queues
+ * are stopped, outstanding ios on them terminated, and the nvme ctrl
+ * is shutdown.
+ */
+static void
+nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+ /*
+ * If io queues are present, stop them and terminate all outstanding
+ * ios on them. As FC allocates FC exchange for each io, the
+ * transport must contact the LLDD to terminate the exchange,
+ * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+ * to tell us what io's are busy and invoke a transport routine
+ * to kill them with the LLDD. After terminating the exchange
+ * the LLDD will call the transport's normal io done path, but it
+ * will have an aborted status. The done path will return the
+ * io requests back to the block layer as part of normal completions
+ * (but with error status).
+ */
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ }
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ nvme_shutdown_ctrl(&ctrl->ctrl);
+
+ /*
+ * now clean up the admin queue. Same thing as above.
+ * use blk_mq_tagset_busy_itr() and the transport routine to
+ * terminate the exchanges.
+ */
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+}
+
+/*
+ * Called to teardown an association.
+ * May be called with association fully in place or partially in place.
+ */
+static void
+__nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+ nvme_stop_keep_alive(&ctrl->ctrl);
+
+ /* stop and terminate ios on admin and io queues */
+ nvme_fc_shutdown_ctrl(ctrl);
+
+ /*
+ * tear down the controller
+ * This will result in the last reference on the nvme ctrl to
+ * expire, calling the transport nvme_fc_free_nvme_ctrl() callback.
+ * From there, the transport will tear down it's logical queues and
+ * association.
+ */
+ nvme_uninit_ctrl(&ctrl->ctrl);
+
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static void
+nvme_fc_del_ctrl_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, delete_work);
+
+ __nvme_fc_remove_ctrl(ctrl);
+}
+
+static int
+__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+/*
+ * Request from nvme core layer to delete the controller
+ */
+static int
+nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_rport *rport = ctrl->rport;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ ret = __nvme_fc_del_ctrl(ctrl);
+ spin_unlock_irqrestore(&rport->lock, flags);
+ if (ret)
+ return ret;
+
+ flush_work(&ctrl->delete_work);
+
+ return 0;
+}
+
+static int
+nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+ return -EIO;
+}
+
+static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
+ .name = "fc",
+ .module = THIS_MODULE,
+ .is_fabrics = true,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .reset_ctrl = nvme_fc_reset_nvme_ctrl,
+ .free_ctrl = nvme_fc_free_nvme_ctrl,
+ .submit_async_event = nvme_fc_submit_async_event,
+ .delete_ctrl = nvme_fc_del_nvme_ctrl,
+ .get_subsysnqn = nvmf_get_subsysnqn,
+ .get_address = nvmf_get_address,
+};
+
+static int
+nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "set_queue_count failed: %d\n", ret);
+ return ret;
+ }
+
+ ctrl->queue_count = opts->nr_io_queues + 1;
+ if (!opts->nr_io_queues)
+ return 0;
+
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
+ opts->nr_io_queues);
+
+ nvme_fc_init_io_queues(ctrl);
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_fc_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
+ (SG_CHUNK_SIZE *
+ sizeof(struct scatterlist)) +
+ ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ return ret;
+
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tag_set;
+ }
+
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ if (ret)
+ goto out_cleanup_blk_queue;
+
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ if (ret)
+ goto out_delete_hw_queues;
+
+ return 0;
+
+out_delete_hw_queues:
+ nvme_fc_delete_hw_io_queues(ctrl);
+out_cleanup_blk_queue:
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_fc_free_io_queues(ctrl);
+
+ /* force put free routine to ignore io queues */
+ ctrl->ctrl.tagset = NULL;
+
+ return ret;
+}
+
+
+static struct nvme_ctrl *
+__nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ int ret, idx;
+ bool changed;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_free_ctrl;
+ }
+
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->ctrl_list);
+ INIT_LIST_HEAD(&ctrl->ls_req_list);
+ ctrl->lport = lport;
+ ctrl->rport = rport;
+ ctrl->dev = lport->dev;
+ ctrl->state = FCCTRL_INIT;
+ ctrl->cnum = idx;
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
+ if (ret)
+ goto out_free_ida;
+
+ get_device(ctrl->dev);
+ kref_init(&ctrl->ref);
+
+ INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work);
+ spin_lock_init(&ctrl->lock);
+
+ /* io queue count */
+ ctrl->queue_count = min_t(unsigned int,
+ opts->nr_io_queues,
+ lport->ops->max_hw_queues);
+ opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
+ ctrl->queue_count++; /* +1 for admin queue */
+
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_fc_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_uninit_ctrl;
+
+ /* sanity checks */
+
+ /* FC-NVME supports 64-byte SQE only */
+ if (ctrl->ctrl.ioccsz != 4) {
+ dev_err(ctrl->ctrl.device, "ioccsz %d is not supported!\n",
+ ctrl->ctrl.ioccsz);
+ goto out_remove_admin_queue;
+ }
+ /* FC-NVME supports 16-byte CQE only */
+ if (ctrl->ctrl.iorcsz != 1) {
+ dev_err(ctrl->ctrl.device, "iorcsz %d is not supported!\n",
+ ctrl->ctrl.iorcsz);
+ goto out_remove_admin_queue;
+ }
+ /* FC-NVME does not have other data in the capsule */
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
+ ctrl->ctrl.icdoff);
+ goto out_remove_admin_queue;
+ }
+
+ /* FC-NVME supports normal SGL Data Block Descriptors */
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, reducing "
+ "to queue_size\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+
+ ret = nvme_fc_init_aen_ops(ctrl);
+ if (ret)
+ goto out_exit_aen_ops;
+
+ if (ctrl->queue_count > 1) {
+ ret = nvme_fc_create_io_queues(ctrl);
+ if (ret)
+ goto out_exit_aen_ops;
+ }
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ctrl->state = FCCTRL_ACTIVE;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
+ ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+
+ kref_get(&ctrl->ctrl.kref);
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ if (opts->nr_io_queues) {
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return &ctrl->ctrl;
+
+out_exit_aen_ops:
+ nvme_fc_exit_aen_ops(ctrl);
+out_remove_admin_queue:
+ /* send a Disconnect(association) LS to fc-nvme target */
+ nvme_fc_xmt_disconnect_assoc(ctrl);
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ nvme_fc_destroy_admin_queue(ctrl);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ /* exit via here will follow ctlr ref point callbacks to free */
+ return ERR_PTR(ret);
+
+out_free_ida:
+ ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_free_ctrl:
+ kfree(ctrl);
+out_fail:
+ nvme_fc_rport_put(rport);
+ /* exit via here doesn't follow ctlr ref points */
+ return ERR_PTR(ret);
+}
+
+enum {
+ FCT_TRADDR_ERR = 0,
+ FCT_TRADDR_WWNN = 1 << 0,
+ FCT_TRADDR_WWPN = 1 << 1,
+};
+
+struct nvmet_fc_traddr {
+ u64 nn;
+ u64 pn;
+};
+
+static const match_table_t traddr_opt_tokens = {
+ { FCT_TRADDR_WWNN, "nn-%s" },
+ { FCT_TRADDR_WWPN, "pn-%s" },
+ { FCT_TRADDR_ERR, NULL }
+};
+
+static int
+nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ":\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, traddr_opt_tokens, args);
+ switch (token) {
+ case FCT_TRADDR_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->nn = token64;
+ break;
+ case FCT_TRADDR_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->pn = token64;
+ break;
+ default:
+ pr_warn("unknown traddr token or missing value '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ kfree(options);
+ return ret;
+}
+
+static struct nvme_ctrl *
+nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
+{
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ struct nvmet_fc_traddr laddr = { 0L, 0L };
+ struct nvmet_fc_traddr raddr = { 0L, 0L };
+ unsigned long flags;
+ int ret;
+
+ ret = nvme_fc_parse_address(&raddr, opts->traddr);
+ if (ret || !raddr.nn || !raddr.pn)
+ return ERR_PTR(-EINVAL);
+
+ ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
+ if (ret || !laddr.nn || !laddr.pn)
+ return ERR_PTR(-EINVAL);
+
+ /* find the host and remote ports to connect together */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ if (lport->localport.node_name != laddr.nn ||
+ lport->localport.port_name != laddr.pn)
+ continue;
+
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (rport->remoteport.node_name != raddr.nn ||
+ rport->remoteport.port_name != raddr.pn)
+ continue;
+
+ /* if fail to get reference fall through. Will error */
+ if (!nvme_fc_rport_get(rport))
+ break;
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return __nvme_fc_create_ctrl(dev, opts, lport, rport);
+ }
+ }
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return ERR_PTR(-ENOENT);
+}
+
+
+static struct nvmf_transport_ops nvme_fc_transport = {
+ .name = "fc",
+ .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
+ .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
+ .create_ctrl = nvme_fc_create_ctrl,
+};
+
+static int __init nvme_fc_init_module(void)
+{
+ nvme_fc_wq = create_workqueue("nvme_fc_wq");
+ if (!nvme_fc_wq)
+ return -ENOMEM;
+
+ nvmf_register_transport(&nvme_fc_transport);
+ return 0;
+}
+
+static void __exit nvme_fc_exit_module(void)
+{
+ /* sanity check - all lports should be removed */
+ if (!list_empty(&nvme_fc_lport_list))
+ pr_warn("%s: localport list not empty\n", __func__);
+
+ nvmf_unregister_transport(&nvme_fc_transport);
+
+ destroy_workqueue(nvme_fc_wq);
+
+ ida_destroy(&nvme_fc_local_port_cnt);
+ ida_destroy(&nvme_fc_ctrl_cnt);
+}
+
+module_init(nvme_fc_init_module);
+module_exit(nvme_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5daf2f4be0cd..588d4a34c083 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,14 +146,6 @@ struct nvme_nvm_command {
};
};
-struct nvme_nvm_completion {
- __le64 result; /* Used by LightNVM to return ppa completions */
- __le16 sq_head; /* how much of this queue may be reclaimed */
- __le16 sq_id; /* submission queue that generated this entry */
- __u16 command_id; /* of the command which completed */
- __le16 status; /* did the command fail, and if so, why? */
-};
-
#define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc {
__le16 num_pairs;
@@ -360,6 +352,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
while (nlb) {
u32 cmd_nlb = min(nlb_pr_rq, nlb);
+ u64 elba = slba + cmd_nlb;
c.l2p.slba = cpu_to_le64(cmd_slba);
c.l2p.nlb = cpu_to_le32(cmd_nlb);
@@ -373,6 +366,14 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
goto out;
}
+ if (unlikely(elba > nvmdev->total_secs)) {
+ pr_err("nvm: L2P data from device is out of bounds!\n");
+ return -EINVAL;
+ }
+
+ /* Transform physical address to target address space */
+ nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb);
+
if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
ret = -EINTR;
goto out;
@@ -391,11 +392,12 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
u8 *blks)
{
struct request_queue *q = nvmdev->q;
+ struct nvm_geo *geo = &nvmdev->geo;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+ int nr_blks = geo->blks_per_lun * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
@@ -436,7 +438,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
+ memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
out:
kfree(bb_tbl);
return ret;
@@ -481,14 +483,11 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
static void nvme_nvm_end_io(struct request *rq, int error)
{
struct nvm_rq *rqd = rq->end_io_data;
- struct nvme_nvm_completion *cqe = rq->special;
-
- if (cqe)
- rqd->ppa_status = le64_to_cpu(cqe->result);
+ rqd->ppa_status = nvme_req(rq)->result.u64;
nvm_end_io(rqd, error);
- kfree(rq->cmd);
+ kfree(nvme_req(rq)->cmd);
blk_mq_free_request(rq);
}
@@ -500,20 +499,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd;
- rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
- if (IS_ERR(rq))
+ cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+ if (!cmd)
return -ENOMEM;
- cmd = kzalloc(sizeof(struct nvme_nvm_command) +
- sizeof(struct nvme_nvm_completion), GFP_KERNEL);
- if (!cmd) {
- blk_mq_free_request(rq);
+ rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
+ if (IS_ERR(rq)) {
+ kfree(cmd);
return -ENOMEM;
}
+ rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->ioprio = bio_prio(bio);
-
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -522,10 +519,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
- rq->cmd = (unsigned char *)cmd;
- rq->cmd_len = sizeof(struct nvme_nvm_command);
- rq->special = cmd + 1;
-
rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
@@ -543,6 +536,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
c.erase.nsid = cpu_to_le32(ns->ns_id);
c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
+ c.erase.control = cpu_to_le16(rqd->flags);
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
}
@@ -592,12 +586,10 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.max_phys_sect = 64,
};
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
- const struct attribute_group *attrs)
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
{
struct request_queue *q = ns->queue;
struct nvm_dev *dev;
- int ret;
dev = nvm_alloc_dev(node);
if (!dev)
@@ -606,18 +598,10 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
dev->q = q;
memcpy(dev->name, disk_name, DISK_NAME_LEN);
dev->ops = &nvme_nvm_dev_ops;
- dev->parent_dev = ns->ctrl->device;
dev->private_data = ns;
ns->ndev = dev;
- ret = nvm_register(dev);
-
- ns->lba_shift = ilog2(dev->sec_size);
-
- if (sysfs_create_group(&dev->dev.kobj, attrs))
- pr_warn("%s: failed to create sysfs group for identification\n",
- disk_name);
- return ret;
+ return nvm_register(dev);
}
void nvme_nvm_unregister(struct nvme_ns *ns)
@@ -625,6 +609,167 @@ void nvme_nvm_unregister(struct nvme_ns *ns)
nvm_unregister(ns->ndev);
}
+static ssize_t nvm_dev_attr_show(struct device *dev,
+ struct device_attribute *dattr, char *page)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvm_dev *ndev = ns->ndev;
+ struct nvm_id *id;
+ struct nvm_id_group *grp;
+ struct attribute *attr;
+
+ if (!ndev)
+ return 0;
+
+ id = &ndev->identity;
+ grp = &id->groups[0];
+ attr = &dattr->attr;
+
+ if (strcmp(attr->name, "version") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
+ } else if (strcmp(attr->name, "vendor_opcode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
+ } else if (strcmp(attr->name, "capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+ } else if (strcmp(attr->name, "device_mode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+ } else if (strcmp(attr->name, "media_manager") == 0) {
+ if (!ndev->mt)
+ return scnprintf(page, PAGE_SIZE, "%s\n", "none");
+ return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
+ } else if (strcmp(attr->name, "ppa_format") == 0) {
+ return scnprintf(page, PAGE_SIZE,
+ "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ id->ppaf.ch_offset, id->ppaf.ch_len,
+ id->ppaf.lun_offset, id->ppaf.lun_len,
+ id->ppaf.pln_offset, id->ppaf.pln_len,
+ id->ppaf.blk_offset, id->ppaf.blk_len,
+ id->ppaf.pg_offset, id->ppaf.pg_len,
+ id->ppaf.sect_offset, id->ppaf.sect_len);
+ } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+ } else if (strcmp(attr->name, "flash_media_type") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+ } else if (strcmp(attr->name, "num_channels") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+ } else if (strcmp(attr->name, "num_luns") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+ } else if (strcmp(attr->name, "num_planes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+ } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ } else if (strcmp(attr->name, "num_pages") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+ } else if (strcmp(attr->name, "page_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+ } else if (strcmp(attr->name, "hw_sector_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+ } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+ } else if (strcmp(attr->name, "read_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+ } else if (strcmp(attr->name, "read_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+ } else if (strcmp(attr->name, "prog_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+ } else if (strcmp(attr->name, "prog_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+ } else if (strcmp(attr->name, "erase_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+ } else if (strcmp(attr->name, "erase_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+ } else if (strcmp(attr->name, "multiplane_modes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+ } else if (strcmp(attr->name, "media_capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+ } else if (strcmp(attr->name, "max_phys_secs") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ ndev->ops->max_phys_sect);
+ } else {
+ return scnprintf(page,
+ PAGE_SIZE,
+ "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+ attr->name);
+ }
+}
+
+#define NVM_DEV_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
+
+static NVM_DEV_ATTR_RO(version);
+static NVM_DEV_ATTR_RO(vendor_opcode);
+static NVM_DEV_ATTR_RO(capabilities);
+static NVM_DEV_ATTR_RO(device_mode);
+static NVM_DEV_ATTR_RO(ppa_format);
+static NVM_DEV_ATTR_RO(media_manager);
+
+static NVM_DEV_ATTR_RO(media_type);
+static NVM_DEV_ATTR_RO(flash_media_type);
+static NVM_DEV_ATTR_RO(num_channels);
+static NVM_DEV_ATTR_RO(num_luns);
+static NVM_DEV_ATTR_RO(num_planes);
+static NVM_DEV_ATTR_RO(num_blocks);
+static NVM_DEV_ATTR_RO(num_pages);
+static NVM_DEV_ATTR_RO(page_size);
+static NVM_DEV_ATTR_RO(hw_sector_size);
+static NVM_DEV_ATTR_RO(oob_sector_size);
+static NVM_DEV_ATTR_RO(read_typ);
+static NVM_DEV_ATTR_RO(read_max);
+static NVM_DEV_ATTR_RO(prog_typ);
+static NVM_DEV_ATTR_RO(prog_max);
+static NVM_DEV_ATTR_RO(erase_typ);
+static NVM_DEV_ATTR_RO(erase_max);
+static NVM_DEV_ATTR_RO(multiplane_modes);
+static NVM_DEV_ATTR_RO(media_capabilities);
+static NVM_DEV_ATTR_RO(max_phys_secs);
+
+static struct attribute *nvm_dev_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_vendor_opcode.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_device_mode.attr,
+ &dev_attr_media_manager.attr,
+
+ &dev_attr_ppa_format.attr,
+ &dev_attr_media_type.attr,
+ &dev_attr_flash_media_type.attr,
+ &dev_attr_num_channels.attr,
+ &dev_attr_num_luns.attr,
+ &dev_attr_num_planes.attr,
+ &dev_attr_num_blocks.attr,
+ &dev_attr_num_pages.attr,
+ &dev_attr_page_size.attr,
+ &dev_attr_hw_sector_size.attr,
+ &dev_attr_oob_sector_size.attr,
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ &dev_attr_prog_typ.attr,
+ &dev_attr_prog_max.attr,
+ &dev_attr_erase_typ.attr,
+ &dev_attr_erase_max.attr,
+ &dev_attr_multiplane_modes.attr,
+ &dev_attr_media_capabilities.attr,
+ &dev_attr_max_phys_secs.attr,
+ NULL,
+};
+
+static const struct attribute_group nvm_dev_attr_group = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs,
+};
+
+int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+{
+ return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group);
+}
+
+void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
+{
+ sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+ &nvm_dev_attr_group);
+}
+
/* move to shared place when used in multiple places. */
#define PCI_VENDOR_ID_CNEX 0x1d1d
#define PCI_DEVICE_ID_CNEX_WL 0x2807
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d47f5a5d18c7..bd5321441d12 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -79,6 +79,20 @@ enum nvme_quirks {
NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
};
+/*
+ * Common request structure for NVMe passthrough. All drivers must have
+ * this structure as the first member of their request-private data.
+ */
+struct nvme_request {
+ struct nvme_command *cmd;
+ union nvme_result result;
+};
+
+static inline struct nvme_request *nvme_req(struct request *req)
+{
+ return blk_mq_rq_to_pdu(req);
+}
+
/* The below value is the specific amount of delay needed before checking
* readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
@@ -222,8 +236,10 @@ static inline unsigned nvme_map_len(struct request *rq)
static inline void nvme_cleanup_cmd(struct request *req)
{
- if (req_op(req) == REQ_OP_DISCARD)
- kfree(req->completion_data);
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ kfree(page_address(req->special_vec.bv_page) +
+ req->special_vec.bv_offset);
+ }
}
static inline int nvme_error_status(u16 status)
@@ -261,8 +277,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
#define NVME_NR_AERS 1
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
- struct nvme_completion *cqe);
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ union nvme_result *res);
void nvme_queue_async_events(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
@@ -278,7 +294,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
@@ -307,36 +323,33 @@ int nvme_sg_get_version_num(int __user *ip);
#ifdef CONFIG_NVM
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
- const struct attribute_group *attrs);
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
-
-static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
-{
- if (dev->type->devnode)
- return dev_to_disk(dev)->private_data;
-
- return (container_of(dev, struct nvm_dev, dev))->private_data;
-}
+int nvme_nvm_register_sysfs(struct nvme_ns *ns);
+void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
#else
static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
- int node,
- const struct attribute_group *attrs)
+ int node)
{
return 0;
}
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-
+static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+{
+ return 0;
+}
+static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
{
return 0;
}
+#endif /* CONFIG_NVM */
+
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
}
-#endif /* CONFIG_NVM */
int __init nvme_core_init(void);
void nvme_core_exit(void);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5e52034ab010..3d21a154dce7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -50,7 +50,7 @@
#define NVME_AQ_DEPTH 256
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-
+
/*
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
@@ -141,6 +141,7 @@ struct nvme_queue {
* allocated to store the PRP list.
*/
struct nvme_iod {
+ struct nvme_request req;
struct nvme_queue *nvmeq;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
@@ -302,14 +303,14 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
static __le64 **iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (__le64 **)(iod->sg + req->nr_phys_segments);
+ return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
static int nvme_init_iod(struct request *rq, unsigned size,
struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
- int nseg = rq->nr_phys_segments;
+ int nseg = blk_rq_nr_phys_segments(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -324,11 +325,11 @@ static int nvme_init_iod(struct request *rq, unsigned size,
iod->nents = 0;
iod->length = size;
- if (!(rq->cmd_flags & REQ_DONTPREP)) {
+ if (!(rq->rq_flags & RQF_DONTPREP)) {
rq->retries = 0;
- rq->cmd_flags |= REQ_DONTPREP;
+ rq->rq_flags |= RQF_DONTPREP;
}
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -339,8 +340,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
__le64 **list = iod_list(req);
dma_addr_t prp_dma = iod->first_dma;
- nvme_cleanup_cmd(req);
-
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
for (i = 0; i < iod->npages; i++) {
@@ -510,7 +509,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = BLK_MQ_RQ_QUEUE_ERROR;
- sg_init_table(iod->sg, req->nr_phys_segments);
+ sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(q, req, iod->sg);
if (!iod->nents)
goto out;
@@ -566,6 +565,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
}
}
+ nvme_cleanup_cmd(req);
nvme_free_iod(dev, req);
}
@@ -596,22 +596,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- map_len = nvme_map_len(req);
- ret = nvme_init_iod(req, map_len, dev);
- if (ret)
+ ret = nvme_setup_cmd(ns, req, &cmnd);
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- ret = nvme_setup_cmd(ns, req, &cmnd);
- if (ret)
- goto out;
+ map_len = nvme_map_len(req);
+ ret = nvme_init_iod(req, map_len, dev);
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
+ goto out_free_cmd;
- if (req->nr_phys_segments)
+ if (blk_rq_nr_phys_segments(req))
ret = nvme_map_data(dev, req, map_len, &cmnd);
- if (ret)
- goto out;
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
+ goto out_cleanup_iod;
- cmnd.common.command_id = req->tag;
blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock);
@@ -621,14 +620,16 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
else
ret = BLK_MQ_RQ_QUEUE_ERROR;
spin_unlock_irq(&nvmeq->q_lock);
- goto out;
+ goto out_cleanup_iod;
}
__nvme_submit_cmd(nvmeq, &cmnd);
nvme_process_cq(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
return BLK_MQ_RQ_QUEUE_OK;
-out:
+out_cleanup_iod:
nvme_free_iod(dev, req);
+out_free_cmd:
+ nvme_cleanup_cmd(req);
return ret;
}
@@ -703,13 +704,13 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*/
if (unlikely(nvmeq->qid == 0 &&
cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
+ nvme_complete_async_event(&nvmeq->dev->ctrl,
+ cqe.status, &cqe.result);
continue;
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
- memcpy(req->special, &cqe, sizeof(cqe));
+ nvme_req(req)->result = cqe.result;
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
}
@@ -1281,6 +1282,24 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
return true;
}
+static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+{
+ /* Read a config register to help see what died. */
+ u16 pci_status;
+ int result;
+
+ result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
+ &pci_status);
+ if (result == PCIBIOS_SUCCESSFUL)
+ dev_warn(dev->dev,
+ "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
+ csts, pci_status);
+ else
+ dev_warn(dev->dev,
+ "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
+ csts, result);
+}
+
static void nvme_watchdog_timer(unsigned long data)
{
struct nvme_dev *dev = (struct nvme_dev *)data;
@@ -1289,9 +1308,7 @@ static void nvme_watchdog_timer(unsigned long data)
/* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) {
if (!nvme_reset(dev))
- dev_warn(dev->dev,
- "Failed status: 0x%x, reset controller.\n",
- csts);
+ nvme_warn_reset(dev, csts);
return;
}
@@ -1332,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
{
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
- return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
@@ -2085,9 +2102,6 @@ static const struct pci_error_handlers nvme_err_handler = {
.reset_notify = nvme_reset_notify,
};
-/* Move to pci_ids.h later */
-#define PCI_CLASS_STORAGE_EXPRESS 0x010802
-
static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3d25add36d91..f587af345889 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -28,7 +28,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvme.h"
@@ -43,6 +42,28 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
+static const char *const nvme_rdma_cm_status_strs[] = {
+ [NVME_RDMA_CM_INVALID_LEN] = "invalid length",
+ [NVME_RDMA_CM_INVALID_RECFMT] = "invalid record format",
+ [NVME_RDMA_CM_INVALID_QID] = "invalid queue ID",
+ [NVME_RDMA_CM_INVALID_HSQSIZE] = "invalid host SQ size",
+ [NVME_RDMA_CM_INVALID_HRQSIZE] = "invalid host RQ size",
+ [NVME_RDMA_CM_NO_RSC] = "resource not found",
+ [NVME_RDMA_CM_INVALID_IRD] = "invalid IRD",
+ [NVME_RDMA_CM_INVALID_ORD] = "Invalid ORD",
+};
+
+static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
+{
+ size_t index = status;
+
+ if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
+ nvme_rdma_cm_status_strs[index])
+ return nvme_rdma_cm_status_strs[index];
+ else
+ return "unrecognized reason";
+};
+
/*
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
@@ -66,6 +87,7 @@ struct nvme_rdma_qe {
struct nvme_rdma_queue;
struct nvme_rdma_request {
+ struct nvme_request req;
struct ib_mr *mr;
struct nvme_rdma_qe sqe;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
@@ -241,7 +263,9 @@ out_free_ring:
static void nvme_rdma_qp_event(struct ib_event *event, void *context)
{
- pr_debug("QP event %d\n", event->event);
+ pr_debug("QP event %s (%d)\n",
+ ib_event_msg(event->event), event->event);
+
}
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
@@ -963,8 +987,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
- int nents, count;
- int ret;
+ int count, ret;
req->num_sge = 1;
req->inline_data = false;
@@ -976,16 +999,14 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
return nvme_rdma_set_sg_null(c);
req->sg_table.sgl = req->first_sgl;
- ret = sg_alloc_table_chained(&req->sg_table, rq->nr_phys_segments,
- req->sg_table.sgl);
+ ret = sg_alloc_table_chained(&req->sg_table,
+ blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
if (ret)
return -ENOMEM;
- nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
- BUG_ON(nents > rq->nr_phys_segments);
- req->nents = nents;
+ req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
- count = ib_dma_map_sg(ibdev, req->sg_table.sgl, nents,
+ count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (unlikely(count <= 0)) {
sg_free_table_chained(&req->sg_table, true);
@@ -1130,13 +1151,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
struct nvme_completion *cqe, struct ib_wc *wc, int tag)
{
- u16 status = le16_to_cpu(cqe->status);
struct request *rq;
struct nvme_rdma_request *req;
int ret = 0;
- status >>= 1;
-
rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
@@ -1147,9 +1165,6 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
}
req = blk_mq_rq_to_pdu(rq);
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
- memcpy(rq->special, cqe, sizeof(*cqe));
-
if (rq->tag == tag)
ret = 1;
@@ -1157,8 +1172,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
wc->ex.invalidate_rkey == req->mr->rkey)
req->mr->need_inval = false;
- blk_mq_complete_request(rq, status);
-
+ req->req.result = cqe->result;
+ blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
return ret;
}
@@ -1186,7 +1201,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
*/
if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
- nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
else
ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
@@ -1220,16 +1236,24 @@ out_destroy_queue_ib:
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
struct rdma_cm_event *ev)
{
- if (ev->param.conn.private_data_len) {
- struct nvme_rdma_cm_rej *rej =
- (struct nvme_rdma_cm_rej *)ev->param.conn.private_data;
+ struct rdma_cm_id *cm_id = queue->cm_id;
+ int status = ev->status;
+ const char *rej_msg;
+ const struct nvme_rdma_cm_rej *rej_data;
+ u8 rej_data_len;
+
+ rej_msg = rdma_reject_msg(cm_id, status);
+ rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
+
+ if (rej_data && rej_data_len >= sizeof(u16)) {
+ u16 sts = le16_to_cpu(rej_data->sts);
dev_err(queue->ctrl->ctrl.device,
- "Connect rejected, status %d.", le16_to_cpu(rej->sts));
- /* XXX: Think of something clever to do here... */
+ "Connect rejected: status %d (%s) nvme status %d (%s).\n",
+ status, rej_msg, sts, nvme_rdma_cm_msg(sts));
} else {
dev_err(queue->ctrl->ctrl.device,
- "Connect rejected, no private data.\n");
+ "Connect rejected: status %d (%s).\n", status, rej_msg);
}
return -ECONNRESET;
@@ -1433,10 +1457,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ret = nvme_setup_cmd(ns, rq, c);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- c->common.command_id = rq->tag;
blk_mq_start_request(rq);
map_len = nvme_map_len(rq);
@@ -1944,6 +1967,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
opts->queue_size = ctrl->ctrl.maxcmd;
}
+ if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
+ /* warn if sqsize is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.sqsize + 1);
+ opts->queue_size = ctrl->ctrl.sqsize + 1;
+ }
+
if (opts->nr_io_queues) {
ret = nvme_rdma_create_io_queues(ctrl);
if (ret)
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index 3eaa4d27801e..b71e95044b43 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
u16 idx, u16 bd_len, u8 llbaa)
{
- u16 bd_num;
-
- bd_num = bd_len / ((llbaa == 0) ?
- SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
/* Store block descriptor info if a FORMAT UNIT comes later */
/* TODO Saving 1st BD info; what to do if multiple BD received? */
if (llbaa == 0) {
@@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int nvme_sc;
struct nvme_id_ns *id_ns;
u8 i;
- u8 flbas, nlbaf;
+ u8 nlbaf;
u8 selected_lbaf = 0xFF;
u32 cdw10 = 0;
struct nvme_command c;
@@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (res)
return res;
- flbas = (id_ns->flbas) & 0x0F;
nlbaf = id_ns->nlbaf;
for (i = 0; i < nlbaf; i++) {
@@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
- u8 immed, pcmod, no_flush, start;
+ u8 immed, no_flush;
immed = cmd[1] & 0x01;
- pcmod = cmd[3] & 0x0f;
no_flush = cmd[4] & 0x04;
- start = cmd[4] & 0x01;
if (immed != 0) {
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 3a5b9d0576cb..03e4ab65fe77 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -34,3 +34,27 @@ config NVME_TARGET_RDMA
devices over RDMA.
If unsure, say N.
+
+config NVME_TARGET_FC
+ tristate "NVMe over Fabrics FC target driver"
+ depends on NVME_TARGET
+ depends on HAS_DMA
+ help
+ This enables the NVMe FC target support, which allows exporting NVMe
+ devices over FC.
+
+ If unsure, say N.
+
+config NVME_TARGET_FCLOOP
+ tristate "NVMe over Fabrics FC Transport Loopback Test driver"
+ depends on NVME_TARGET
+ select NVME_CORE
+ select NVME_FABRICS
+ select SG_POOL
+ depends on NVME_FC
+ depends on NVME_TARGET_FC
+ help
+ This enables the NVMe FC loopback test support, which can be useful
+ to test NVMe-FC transport interfaces.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index b7a06232c9da..fecc14f535b2 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -2,8 +2,12 @@
obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
+obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
+obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
discovery.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
+nvmet-fc-y += fc.o
+nvme-fcloop-y += fcloop.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fe4c48a21e4..ec1ad2aa0a4c 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -237,7 +237,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
- id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM);
+ id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
+ NVME_CTRL_ONCS_WRITE_ZEROES);
/* XXX: don't report vwc if the underlying device is write through */
id->vwc = NVME_CTRL_VWC_PRESENT;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index af5e2dc4a3d5..6f5074153dcd 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
return sprintf(page, "ipv6\n");
case NVMF_ADDR_FAMILY_IB:
return sprintf(page, "ib\n");
+ case NVMF_ADDR_FAMILY_FC:
+ return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
} else if (sysfs_streq(page, "ib")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
+ } else if (sysfs_streq(page, "fc")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
} else {
pr_err("Invalid value '%s' for adrfam\n", page);
return -EINVAL;
@@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
return sprintf(page, "rdma\n");
case NVMF_TRTYPE_LOOP:
return sprintf(page, "loop\n");
+ case NVMF_TRTYPE_FC:
+ return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
}
+static void nvmet_port_init_tsas_fc(struct nvmet_port *port)
+{
+ port->disc_addr.trtype = NVMF_TRTYPE_FC;
+ memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
+}
+
static ssize_t nvmet_addr_trtype_store(struct config_item *item,
const char *page, size_t count)
{
@@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
nvmet_port_init_tsas_rdma(port);
} else if (sysfs_streq(page, "loop")) {
nvmet_port_init_tsas_loop(port);
+ } else if (sysfs_streq(page, "fc")) {
+ nvmet_port_init_tsas_fc(port);
} else {
pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL;
@@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
mutex_lock(&subsys->lock);
ret = -EBUSY;
- if (nvmet_ns_enabled(ns))
+ if (ns->enabled)
goto out_unlock;
kfree(ns->device_path);
@@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
int ret = 0;
mutex_lock(&subsys->lock);
- if (nvmet_ns_enabled(ns)) {
+ if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
@@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
{
- return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
+ return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
}
static ssize_t nvmet_ns_enable_store(struct config_item *item,
@@ -466,7 +480,7 @@ out_free_link:
return ret;
}
-static int nvmet_port_subsys_drop_link(struct config_item *parent,
+static void nvmet_port_subsys_drop_link(struct config_item *parent,
struct config_item *target)
{
struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
@@ -479,7 +493,7 @@ static int nvmet_port_subsys_drop_link(struct config_item *parent,
goto found;
}
up_write(&nvmet_config_sem);
- return -EINVAL;
+ return;
found:
list_del(&p->entry);
@@ -488,7 +502,6 @@ found:
nvmet_disable_port(port);
up_write(&nvmet_config_sem);
kfree(p);
- return 0;
}
static struct configfs_item_operations nvmet_port_subsys_item_ops = {
@@ -542,7 +555,7 @@ out_free_link:
return ret;
}
-static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
+static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
struct config_item *target)
{
struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
@@ -555,14 +568,13 @@ static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
goto found;
}
up_write(&nvmet_config_sem);
- return -EINVAL;
+ return;
found:
list_del(&p->entry);
nvmet_genctr++;
up_write(&nvmet_config_sem);
kfree(p);
- return 0;
}
static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a21437a33adb..b1d66ed655c9 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret = 0;
mutex_lock(&subsys->lock);
- if (!list_empty(&ns->dev_link))
+ if (ns->enabled)
goto out_unlock;
ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
@@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
+ ns->enabled = true;
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
@@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
- if (list_empty(&ns->dev_link)) {
- mutex_unlock(&subsys->lock);
- return;
- }
- list_del_init(&ns->dev_link);
+ if (!ns->enabled)
+ goto out_unlock;
+
+ ns->enabled = false;
+ list_del_rcu(&ns->dev_link);
mutex_unlock(&subsys->lock);
/*
@@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
if (ns->bdev)
blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+out_unlock:
mutex_unlock(&subsys->lock);
}
@@ -617,7 +619,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
}
@@ -638,7 +640,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out:
@@ -700,7 +702,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
goto out;
}
@@ -709,7 +711,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!nvmet_host_allowed(req, subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn);
- req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
goto out_put_subsystem;
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 9a97ae67e656..f4088198cd0d 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -69,7 +69,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
}
}
- req->rsp->result64 = cpu_to_le64(val);
+ req->rsp->result.u64 = cpu_to_le64(val);
nvmet_req_complete(req, status);
}
@@ -125,7 +125,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
d = kmap(sg_page(req->sg)) + req->sg->offset;
/* zero out initial completion result, assign values as needed */
- req->rsp->result = 0;
+ req->rsp->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -138,7 +138,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
@@ -155,7 +155,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_info("creating controller %d for NQN %s.\n",
ctrl->cntlid, ctrl->hostnqn);
- req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+ req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
out:
kunmap(sg_page(req->sg));
@@ -173,7 +173,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
d = kmap(sg_page(req->sg)) + req->sg->offset;
/* zero out initial completion result, assign values as needed */
- req->rsp->result = 0;
+ req->rsp->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -191,14 +191,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result = IPO_IATTR_CONNECT_SQE(qid);
+ req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put;
}
status = nvmet_install_queue(ctrl, req);
if (status) {
/* pass back cntlid that had the issue of installing queue */
- req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+ req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
goto out_ctrl_put;
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
new file mode 100644
index 000000000000..173e842f19c9
--- /dev/null
+++ b/drivers/nvme/target/fc.c
@@ -0,0 +1,2288 @@
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+#define NVMET_LS_CTX_COUNT 4
+
+/* for this implementation, assume small single frame rqst/rsp */
+#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
+
+struct nvmet_fc_tgtport;
+struct nvmet_fc_tgt_assoc;
+
+struct nvmet_fc_ls_iod {
+ struct nvmefc_tgt_ls_req *lsreq;
+ struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
+
+ struct list_head ls_list; /* tgtport->ls_list */
+
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_tgt_assoc *assoc;
+
+ u8 *rqstbuf;
+ u8 *rspbuf;
+ u16 rqstdatalen;
+ dma_addr_t rspdma;
+
+ struct scatterlist sg[2];
+
+ struct work_struct work;
+} __aligned(sizeof(unsigned long long));
+
+#define NVMET_FC_MAX_KB_PER_XFR 256
+
+enum nvmet_fcp_datadir {
+ NVMET_FCP_NODATA,
+ NVMET_FCP_WRITE,
+ NVMET_FCP_READ,
+ NVMET_FCP_ABORTED,
+};
+
+struct nvmet_fc_fcp_iod {
+ struct nvmefc_tgt_fcp_req *fcpreq;
+
+ struct nvme_fc_cmd_iu cmdiubuf;
+ struct nvme_fc_ersp_iu rspiubuf;
+ dma_addr_t rspdma;
+ struct scatterlist *data_sg;
+ struct scatterlist *next_sg;
+ int data_sg_cnt;
+ u32 next_sg_offset;
+ u32 total_length;
+ u32 offset;
+ enum nvmet_fcp_datadir io_dir;
+ bool active;
+ bool abort;
+ spinlock_t flock;
+
+ struct nvmet_req req;
+ struct work_struct work;
+
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_tgt_queue *queue;
+
+ struct list_head fcp_list; /* tgtport->fcp_list */
+};
+
+struct nvmet_fc_tgtport {
+
+ struct nvmet_fc_target_port fc_target_port;
+
+ struct list_head tgt_list; /* nvmet_fc_target_list */
+ struct device *dev; /* dev for dma mapping */
+ struct nvmet_fc_target_template *ops;
+
+ struct nvmet_fc_ls_iod *iod;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct list_head ls_busylist;
+ struct list_head assoc_list;
+ struct ida assoc_cnt;
+ struct nvmet_port *port;
+ struct kref ref;
+};
+
+struct nvmet_fc_tgt_queue {
+ bool ninetypercent;
+ u16 qid;
+ u16 sqsize;
+ u16 ersp_ratio;
+ u16 sqhd;
+ int cpu;
+ atomic_t connected;
+ atomic_t sqtail;
+ atomic_t zrspcnt;
+ atomic_t rsn;
+ spinlock_t qlock;
+ struct nvmet_port *port;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
+ struct list_head fod_list;
+ struct workqueue_struct *work_q;
+ struct kref ref;
+} __aligned(sizeof(unsigned long long));
+
+struct nvmet_fc_tgt_assoc {
+ u64 association_id;
+ u32 a_id;
+ struct nvmet_fc_tgtport *tgtport;
+ struct list_head a_list;
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
+ struct kref ref;
+};
+
+
+static inline int
+nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
+{
+ return (iodptr - iodptr->tgtport->iod);
+}
+
+static inline int
+nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
+{
+ return (fodptr - fodptr->queue->fod);
+}
+
+
+/*
+ * Association and Connection IDs:
+ *
+ * Association ID will have random number in upper 6 bytes and zero
+ * in lower 2 bytes
+ *
+ * Connection IDs will be Association ID with QID or'd in lower 2 bytes
+ *
+ * note: Association ID = Connection ID for queue 0
+ */
+#define BYTES_FOR_QID sizeof(u16)
+#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
+#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
+
+static inline u64
+nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
+{
+ return (assoc->association_id | qid);
+}
+
+static inline u64
+nvmet_fc_getassociationid(u64 connectionid)
+{
+ return connectionid & ~NVMET_FC_QUEUEID_MASK;
+}
+
+static inline u16
+nvmet_fc_getqueueid(u64 connectionid)
+{
+ return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
+}
+
+static inline struct nvmet_fc_tgtport *
+targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
+{
+ return container_of(targetport, struct nvmet_fc_tgtport,
+ fc_target_port);
+}
+
+static inline struct nvmet_fc_fcp_iod *
+nvmet_req_to_fod(struct nvmet_req *nvme_req)
+{
+ return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
+}
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
+
+static LIST_HEAD(nvmet_fc_target_list);
+static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+
+
+static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
+static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
+static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
+static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ }
+ return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ if (dev)
+ dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+
+static int
+nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_iod *iod;
+ int i;
+
+ iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
+ GFP_KERNEL);
+ if (!iod)
+ return -ENOMEM;
+
+ tgtport->iod = iod;
+
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+ INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
+ iod->tgtport = tgtport;
+ list_add_tail(&iod->ls_list, &tgtport->ls_list);
+
+ iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!iod->rqstbuf)
+ goto out_fail;
+
+ iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+
+ iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
+ NVME_FC_MAX_LS_BUFFER_SIZE,
+ DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
+ goto out_fail;
+ }
+
+ return 0;
+
+out_fail:
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_list);
+ for (iod--, i--; i >= 0; iod--, i--) {
+ fc_dma_unmap_single(tgtport->dev, iod->rspdma,
+ NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_list);
+ }
+
+ kfree(iod);
+
+ return -EFAULT;
+}
+
+static void
+nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_iod *iod = tgtport->iod;
+ int i;
+
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+ fc_dma_unmap_single(tgtport->dev,
+ iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(iod->rqstbuf);
+ list_del(&iod->ls_list);
+ }
+ kfree(tgtport->iod);
+}
+
+static struct nvmet_fc_ls_iod *
+nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
+{
+ static struct nvmet_fc_ls_iod *iod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ iod = list_first_entry_or_null(&tgtport->ls_list,
+ struct nvmet_fc_ls_iod, ls_list);
+ if (iod)
+ list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return iod;
+}
+
+
+static void
+nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_move(&iod->ls_list, &tgtport->ls_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+static void
+nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ int i;
+
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
+ fod->tgtport = tgtport;
+ fod->queue = queue;
+ fod->active = false;
+ list_add_tail(&fod->fcp_list, &queue->fod_list);
+ spin_lock_init(&fod->flock);
+
+ fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
+ list_del(&fod->fcp_list);
+ for (fod--, i--; i >= 0; fod--, i--) {
+ fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf),
+ DMA_TO_DEVICE);
+ fod->rspdma = 0L;
+ list_del(&fod->fcp_list);
+ }
+
+ return;
+ }
+ }
+}
+
+static void
+nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ int i;
+
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ if (fod->rspdma)
+ fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ }
+}
+
+static struct nvmet_fc_fcp_iod *
+nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
+{
+ static struct nvmet_fc_fcp_iod *fod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ fod = list_first_entry_or_null(&queue->fod_list,
+ struct nvmet_fc_fcp_iod, fcp_list);
+ if (fod) {
+ list_del(&fod->fcp_list);
+ fod->active = true;
+ fod->abort = false;
+ /*
+ * no queue reference is taken, as it was taken by the
+ * queue lookup just prior to the allocation. The iod
+ * will "inherit" that reference.
+ */
+ }
+ spin_unlock_irqrestore(&queue->qlock, flags);
+ return fod;
+}
+
+
+static void
+nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+ fod->active = false;
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ /*
+ * release the reference taken at queue lookup and fod allocation
+ */
+ nvmet_fc_tgt_q_put(queue);
+}
+
+static int
+nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
+{
+ int cpu, idx, cnt;
+
+ if (!(tgtport->ops->target_features &
+ NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
+ tgtport->ops->max_hw_queues == 1)
+ return WORK_CPU_UNBOUND;
+
+ /* Simple cpu selection based on qid modulo active cpu count */
+ idx = !qid ? 0 : (qid - 1) % num_active_cpus();
+
+ /* find the n'th active cpu */
+ for (cpu = 0, cnt = 0; ; ) {
+ if (cpu_active(cpu)) {
+ if (cnt == idx)
+ break;
+ cnt++;
+ }
+ cpu = (cpu + 1) % num_possible_cpus();
+ }
+
+ return cpu;
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ u16 qid, u16 sqsize)
+{
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+ int ret;
+
+ if (qid >= NVMET_NR_QUEUES)
+ return NULL;
+
+ queue = kzalloc((sizeof(*queue) +
+ (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
+ GFP_KERNEL);
+ if (!queue)
+ return NULL;
+
+ if (!nvmet_fc_tgt_a_get(assoc))
+ goto out_free_queue;
+
+ queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+ assoc->tgtport->fc_target_port.port_num,
+ assoc->a_id, qid);
+ if (!queue->work_q)
+ goto out_a_put;
+
+ queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
+ queue->qid = qid;
+ queue->sqsize = sqsize;
+ queue->assoc = assoc;
+ queue->port = assoc->tgtport->port;
+ queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
+ INIT_LIST_HEAD(&queue->fod_list);
+ atomic_set(&queue->connected, 0);
+ atomic_set(&queue->sqtail, 0);
+ atomic_set(&queue->rsn, 1);
+ atomic_set(&queue->zrspcnt, 0);
+ spin_lock_init(&queue->qlock);
+ kref_init(&queue->ref);
+
+ nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret)
+ goto out_fail_iodlist;
+
+ WARN_ON(assoc->queues[qid]);
+ spin_lock_irqsave(&assoc->tgtport->lock, flags);
+ assoc->queues[qid] = queue;
+ spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+
+ return queue;
+
+out_fail_iodlist:
+ nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+ destroy_workqueue(queue->work_q);
+out_a_put:
+ nvmet_fc_tgt_a_put(assoc);
+out_free_queue:
+ kfree(queue);
+ return NULL;
+}
+
+
+static void
+nvmet_fc_tgt_queue_free(struct kref *ref)
+{
+ struct nvmet_fc_tgt_queue *queue =
+ container_of(ref, struct nvmet_fc_tgt_queue, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
+ queue->assoc->queues[queue->qid] = NULL;
+ spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+
+ nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+
+ nvmet_fc_tgt_a_put(queue->assoc);
+
+ destroy_workqueue(queue->work_q);
+
+ kfree(queue);
+}
+
+static void
+nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
+{
+ kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
+}
+
+static int
+nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
+{
+ return kref_get_unless_zero(&queue->ref);
+}
+
+
+static void
+nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ int ret;
+
+ fcpreq->op = NVMET_FCOP_ABORT;
+ fcpreq->offset = 0;
+ fcpreq->timeout = 0;
+ fcpreq->transfer_length = 0;
+ fcpreq->transferred_length = 0;
+ fcpreq->fcp_error = 0;
+ fcpreq->sg_cnt = 0;
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
+ if (ret)
+ /* should never reach here !! */
+ WARN_ON(1);
+}
+
+
+static void
+nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
+{
+ struct nvmet_fc_fcp_iod *fod = queue->fod;
+ unsigned long flags;
+ int i;
+ bool disconnect;
+
+ disconnect = atomic_xchg(&queue->connected, 0);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ /* about outstanding io's */
+ for (i = 0; i < queue->sqsize; fod++, i++) {
+ if (fod->active) {
+ spin_lock(&fod->flock);
+ fod->abort = true;
+ spin_unlock(&fod->flock);
+ }
+ }
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ flush_workqueue(queue->work_q);
+
+ if (disconnect)
+ nvmet_sq_destroy(&queue->nvme_sq);
+
+ nvmet_fc_tgt_q_put(queue);
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+ u64 connection_id)
+{
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_queue *queue;
+ u64 association_id = nvmet_fc_getassociationid(connection_id);
+ u16 qid = nvmet_fc_getqueueid(connection_id);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+ queue = assoc->queues[qid];
+ if (queue &&
+ (!atomic_read(&queue->connected) ||
+ !nvmet_fc_tgt_q_get(queue)))
+ queue = NULL;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return queue;
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return NULL;
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+ unsigned long flags;
+ u64 ran;
+ int idx;
+ bool needrandom = true;
+
+ assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+ if (!assoc)
+ return NULL;
+
+ idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0)
+ goto out_free_assoc;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ goto out_ida_put;
+
+ assoc->tgtport = tgtport;
+ assoc->a_id = idx;
+ INIT_LIST_HEAD(&assoc->a_list);
+ kref_init(&assoc->ref);
+
+ while (needrandom) {
+ get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
+ ran = ran << BYTES_FOR_QID_SHIFT;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ needrandom = false;
+ list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
+ if (ran == tmpassoc->association_id) {
+ needrandom = true;
+ break;
+ }
+ if (!needrandom) {
+ assoc->association_id = ran;
+ list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ }
+
+ return assoc;
+
+out_ida_put:
+ ida_simple_remove(&tgtport->assoc_cnt, idx);
+out_free_assoc:
+ kfree(assoc);
+ return NULL;
+}
+
+static void
+nvmet_fc_target_assoc_free(struct kref *ref)
+{
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(ref, struct nvmet_fc_tgt_assoc, ref);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del(&assoc->a_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+ kfree(assoc);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
+{
+ kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
+}
+
+static int
+nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
+{
+ return kref_get_unless_zero(&assoc->ref);
+}
+
+static void
+nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
+ queue = assoc->queues[i];
+ if (queue) {
+ if (!nvmet_fc_tgt_q_get(queue))
+ continue;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_tgt_q_put(queue);
+ spin_lock_irqsave(&tgtport->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
+ u64 association_id)
+{
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_assoc *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+ ret = assoc;
+ nvmet_fc_tgt_a_get(assoc);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ return ret;
+}
+
+
+/**
+ * nvme_fc_register_targetport - transport entry point called by an
+ * LLDD to register the existence of a local
+ * NVME subystem FC port.
+ * @pinfo: pointer to information about the port to be registered
+ * @template: LLDD entrypoints and operational parameters for the port
+ * @dev: physical hardware device node port corresponds to. Will be
+ * used for DMA mappings
+ * @portptr: pointer to a local port pointer. Upon success, the routine
+ * will allocate a nvme_fc_local_port structure and place its
+ * address in the local port pointer. Upon failure, local port
+ * pointer will be set to NULL.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+ struct nvmet_fc_target_template *template,
+ struct device *dev,
+ struct nvmet_fc_target_port **portptr)
+{
+ struct nvmet_fc_tgtport *newrec;
+ unsigned long flags;
+ int ret, idx;
+
+ if (!template->xmt_ls_rsp || !template->fcp_op ||
+ !template->targetport_delete ||
+ !template->max_hw_queues || !template->max_sgl_segments ||
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
+ ret = -EINVAL;
+ goto out_regtgt_failed;
+ }
+
+ newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
+ GFP_KERNEL);
+ if (!newrec) {
+ ret = -ENOMEM;
+ goto out_regtgt_failed;
+ }
+
+ idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out_fail_kfree;
+ }
+
+ if (!get_device(dev) && dev) {
+ ret = -ENODEV;
+ goto out_ida_put;
+ }
+
+ newrec->fc_target_port.node_name = pinfo->node_name;
+ newrec->fc_target_port.port_name = pinfo->port_name;
+ newrec->fc_target_port.private = &newrec[1];
+ newrec->fc_target_port.port_id = pinfo->port_id;
+ newrec->fc_target_port.port_num = idx;
+ INIT_LIST_HEAD(&newrec->tgt_list);
+ newrec->dev = dev;
+ newrec->ops = template;
+ spin_lock_init(&newrec->lock);
+ INIT_LIST_HEAD(&newrec->ls_list);
+ INIT_LIST_HEAD(&newrec->ls_busylist);
+ INIT_LIST_HEAD(&newrec->assoc_list);
+ kref_init(&newrec->ref);
+ ida_init(&newrec->assoc_cnt);
+
+ ret = nvmet_fc_alloc_ls_iodlist(newrec);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_free_newrec;
+ }
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ *portptr = &newrec->fc_target_port;
+ return 0;
+
+out_free_newrec:
+ put_device(dev);
+out_ida_put:
+ ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
+out_fail_kfree:
+ kfree(newrec);
+out_regtgt_failed:
+ *portptr = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
+
+
+static void
+nvmet_fc_free_tgtport(struct kref *ref)
+{
+ struct nvmet_fc_tgtport *tgtport =
+ container_of(ref, struct nvmet_fc_tgtport, ref);
+ struct device *dev = tgtport->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_del(&tgtport->tgt_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ nvmet_fc_free_ls_iodlist(tgtport);
+
+ /* let the LLDD know we've finished tearing it down */
+ tgtport->ops->targetport_delete(&tgtport->fc_target_port);
+
+ ida_simple_remove(&nvmet_fc_tgtport_cnt,
+ tgtport->fc_target_port.port_num);
+
+ ida_destroy(&tgtport->assoc_cnt);
+
+ kfree(tgtport);
+
+ put_device(dev);
+}
+
+static void
+nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
+{
+ kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
+}
+
+static int
+nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
+{
+ return kref_get_unless_zero(&tgtport->ref);
+}
+
+static void
+__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_tgt_assoc *assoc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry_safe(assoc, next,
+ &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+ spin_lock_irqsave(&tgtport->lock, flags);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+/*
+ * nvmet layer has called to terminate an association
+ */
+static void
+nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_fc_tgtport *tgtport, *next;
+ struct nvmet_fc_tgt_assoc *assoc;
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+ bool found_ctrl = false;
+
+ /* this is a bit ugly, but don't want to make locks layered */
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
+ tgt_list) {
+ if (!nvmet_fc_tgtport_get(tgtport))
+ continue;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ queue = assoc->queues[0];
+ if (queue && queue->nvme_sq.ctrl == ctrl) {
+ if (nvmet_fc_tgt_a_get(assoc))
+ found_ctrl = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_tgtport_put(tgtport);
+
+ if (found_ctrl) {
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+ return;
+ }
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/**
+ * nvme_fc_unregister_targetport - transport entry point called by an
+ * LLDD to deregister/remove a previously
+ * registered a local NVME subsystem FC port.
+ * @tgtport: pointer to the (registered) target port that is to be
+ * deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(tgtport);
+
+ nvmet_fc_tgtport_put(tgtport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
+
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+
+static void
+nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
+{
+ struct fcnvme_ls_acc_hdr *acc = buf;
+
+ acc->w0.ls_cmd = ls_cmd;
+ acc->desc_list_len = desc_len;
+ acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ acc->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static int
+nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+ u8 reason, u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+ ls_cmd);
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+
+ return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_CR_ASSOC_LEN = 1,
+ VERR_CR_ASSOC_RQST_LEN = 2,
+ VERR_CR_ASSOC_CMD = 3,
+ VERR_CR_ASSOC_CMD_LEN = 4,
+ VERR_ERSP_RATIO = 5,
+ VERR_ASSOC_ALLOC_FAIL = 6,
+ VERR_QUEUE_ALLOC_FAIL = 7,
+ VERR_CR_CONN_LEN = 8,
+ VERR_CR_CONN_RQST_LEN = 9,
+ VERR_ASSOC_ID = 10,
+ VERR_ASSOC_ID_LEN = 11,
+ VERR_NO_ASSOC = 12,
+ VERR_CONN_ID = 13,
+ VERR_CONN_ID_LEN = 14,
+ VERR_NO_CONN = 15,
+ VERR_CR_CONN_CMD = 16,
+ VERR_CR_CONN_CMD_LEN = 17,
+ VERR_DISCONN_LEN = 18,
+ VERR_DISCONN_RQST_LEN = 19,
+ VERR_DISCONN_CMD = 20,
+ VERR_DISCONN_CMD_LEN = 21,
+ VERR_DISCONN_SCOPE = 22,
+ VERR_RS_LEN = 23,
+ VERR_RS_RQST_LEN = 24,
+ VERR_RS_CMD = 25,
+ VERR_RS_CMD_LEN = 26,
+ VERR_RS_RCTL = 27,
+ VERR_RS_RO = 28,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Bad CR_ASSOC Length",
+ "Bad CR_ASSOC Rqst Length",
+ "Not CR_ASSOC Cmd",
+ "Bad CR_ASSOC Cmd Length",
+ "Bad Ersp Ratio",
+ "Association Allocation Failed",
+ "Queue Allocation Failed",
+ "Bad CR_CONN Length",
+ "Bad CR_CONN Rqst Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "No Association",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "No Connection",
+ "Not CR_CONN Cmd",
+ "Bad CR_CONN Cmd Length",
+ "Bad DISCONN Length",
+ "Bad DISCONN Rqst Length",
+ "Not DISCONN Cmd",
+ "Bad DISCONN Cmd Length",
+ "Bad Disconnect Scope",
+ "Bad RS Length",
+ "Bad RS Rqst Length",
+ "Not RS Cmd",
+ "Bad RS Cmd Length",
+ "Bad RS R_CTL",
+ "Bad RS Relative Offset",
+};
+
+static void
+nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_cr_assoc_rqst *rqst =
+ (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_cr_assoc_acc *acc =
+ (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+ struct nvmet_fc_tgt_queue *queue;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
+ ret = VERR_CR_ASSOC_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_rqst)))
+ ret = VERR_CR_ASSOC_RQST_LEN;
+ else if (rqst->assoc_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
+ ret = VERR_CR_ASSOC_CMD;
+ else if (rqst->assoc_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
+ ret = VERR_CR_ASSOC_CMD_LEN;
+ else if (!rqst->assoc_cmd.ersp_ratio ||
+ (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
+ be16_to_cpu(rqst->assoc_cmd.sqsize)))
+ ret = VERR_ERSP_RATIO;
+
+ else {
+ /* new association w/ admin queue */
+ iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
+ if (!iod->assoc)
+ ret = VERR_ASSOC_ALLOC_FAIL;
+ else {
+ queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+ be16_to_cpu(rqst->assoc_cmd.sqsize));
+ if (!queue)
+ ret = VERR_QUEUE_ALLOC_FAIL;
+ }
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Create Association LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+ NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ ELS_RJT_LOGIC,
+ ELS_EXPL_NONE, 0);
+ return;
+ }
+
+ queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
+ atomic_set(&queue->connected, 1);
+ queue->sqhd = 0; /* best place to init value */
+
+ /* format a response */
+
+ iod->lsreq->rsplen = sizeof(*acc);
+
+ nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_assoc_acc)),
+ FCNVME_LS_CREATE_ASSOCIATION);
+ acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ acc->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+ acc->associd.association_id =
+ cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
+ acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+ acc->connectid.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_conn_id));
+ acc->connectid.connection_id = acc->associd.association_id;
+}
+
+static void
+nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_cr_conn_rqst *rqst =
+ (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_cr_conn_acc *acc =
+ (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+ struct nvmet_fc_tgt_queue *queue;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
+ ret = VERR_CR_CONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_cr_conn_rqst)))
+ ret = VERR_CR_CONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->connect_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
+ ret = VERR_CR_CONN_CMD;
+ else if (rqst->connect_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
+ ret = VERR_CR_CONN_CMD_LEN;
+ else if (!rqst->connect_cmd.ersp_ratio ||
+ (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
+ be16_to_cpu(rqst->connect_cmd.sqsize)))
+ ret = VERR_ERSP_RATIO;
+
+ else {
+ /* new io queue */
+ iod->assoc = nvmet_fc_find_target_assoc(tgtport,
+ be64_to_cpu(rqst->associd.association_id));
+ if (!iod->assoc)
+ ret = VERR_NO_ASSOC;
+ else {
+ queue = nvmet_fc_alloc_target_queue(iod->assoc,
+ be16_to_cpu(rqst->connect_cmd.qid),
+ be16_to_cpu(rqst->connect_cmd.sqsize));
+ if (!queue)
+ ret = VERR_QUEUE_ALLOC_FAIL;
+
+ /* release get taken in nvmet_fc_find_target_assoc */
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Create Connection LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+ NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ ELS_RJT_PROT : ELS_RJT_LOGIC,
+ ELS_EXPL_NONE, 0);
+ return;
+ }
+
+ queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
+ atomic_set(&queue->connected, 1);
+ queue->sqhd = 0; /* best place to init value */
+
+ /* format a response */
+
+ iod->lsreq->rsplen = sizeof(*acc);
+
+ nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
+ FCNVME_LS_CREATE_CONNECTION);
+ acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+ acc->connectid.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_conn_id));
+ acc->connectid.connection_id =
+ cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
+ be16_to_cpu(rqst->connect_cmd.qid)));
+}
+
+static void
+nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_disconnect_rqst *rqst =
+ (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_disconnect_acc *acc =
+ (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
+ struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_tgt_assoc *assoc;
+ int ret = 0;
+ bool del_assoc = false;
+
+ memset(acc, 0, sizeof(*acc));
+
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
+ ret = VERR_DISCONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_rqst)))
+ ret = VERR_DISCONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->discon_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+ ret = VERR_DISCONN_CMD;
+ else if (rqst->discon_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+ ret = VERR_DISCONN_CMD_LEN;
+ else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
+ (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
+ ret = VERR_DISCONN_SCOPE;
+ else {
+ /* match an active association */
+ assoc = nvmet_fc_find_target_assoc(tgtport,
+ be64_to_cpu(rqst->associd.association_id));
+ iod->assoc = assoc;
+ if (!assoc)
+ ret = VERR_NO_ASSOC;
+ }
+
+ if (ret) {
+ dev_err(tgtport->dev,
+ "Disconnect LS failed: %s\n",
+ validation_errors[ret]);
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+ NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC,
+ ELS_EXPL_NONE, 0);
+ return;
+ }
+
+ /* format a response */
+
+ iod->lsreq->rsplen = sizeof(*acc);
+
+ nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_acc)),
+ FCNVME_LS_DISCONNECT);
+
+
+ if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(rqst->discon_cmd.id));
+ if (queue) {
+ int qid = queue->qid;
+
+ nvmet_fc_delete_target_queue(queue);
+
+ /* release the get taken by find_target_queue */
+ nvmet_fc_tgt_q_put(queue);
+
+ /* tear association down if io queue terminated */
+ if (!qid)
+ del_assoc = true;
+ }
+ }
+
+ /* release get taken in nvmet_fc_find_target_assoc */
+ nvmet_fc_tgt_a_put(iod->assoc);
+
+ if (del_assoc)
+ nvmet_fc_delete_target_assoc(iod->assoc);
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
+
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
+
+static void
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
+{
+ struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
+ struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+ fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
+ NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ nvmet_fc_free_ls_iod(tgtport, iod);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ int ret;
+
+ fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
+ NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+
+ ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
+ if (ret)
+ nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod)
+{
+ struct fcnvme_ls_rqst_w0 *w0 =
+ (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+
+ iod->lsreq->nvmet_fc_private = iod;
+ iod->lsreq->rspbuf = iod->rspbuf;
+ iod->lsreq->rspdma = iod->rspdma;
+ iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
+ /* Be preventative. handlers will later set to valid length */
+ iod->lsreq->rsplen = 0;
+
+ iod->assoc = NULL;
+
+ /*
+ * handlers:
+ * parse request input, execute the request, and format the
+ * LS response
+ */
+ switch (w0->ls_cmd) {
+ case FCNVME_LS_CREATE_ASSOCIATION:
+ /* Creates Association and initial Admin Queue/Connection */
+ nvmet_fc_ls_create_association(tgtport, iod);
+ break;
+ case FCNVME_LS_CREATE_CONNECTION:
+ /* Creates an IO Queue/Connection */
+ nvmet_fc_ls_create_connection(tgtport, iod);
+ break;
+ case FCNVME_LS_DISCONNECT:
+ /* Terminate a Queue/Connection or the Association */
+ nvmet_fc_ls_disconnect(tgtport, iod);
+ break;
+ default:
+ iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
+ NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+ ELS_RJT_INVAL, ELS_EXPL_NONE, 0);
+ }
+
+ nvmet_fc_xmt_ls_rsp(tgtport, iod);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+ struct nvmet_fc_ls_iod *iod =
+ container_of(work, struct nvmet_fc_ls_iod, work);
+ struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+ nvmet_fc_handle_ls_rqst(tgtport, iod);
+}
+
+
+/**
+ * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
+ * upon the reception of a NVME LS request.
+ *
+ * The nvmet-fc layer will copy payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @tgtport: pointer to the (registered) target port the LS was
+ * received on.
+ * @lsreq: pointer to a lsreq request structure to be used to reference
+ * the exchange corresponding to the LS.
+ * @lsreqbuf: pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
+ struct nvmefc_tgt_ls_req *lsreq,
+ void *lsreqbuf, u32 lsreqbuf_len)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvmet_fc_ls_iod *iod;
+
+ if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+ return -E2BIG;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return -ESHUTDOWN;
+
+ iod = nvmet_fc_alloc_ls_iod(tgtport);
+ if (!iod) {
+ nvmet_fc_tgtport_put(tgtport);
+ return -ENOENT;
+ }
+
+ iod->lsreq = lsreq;
+ iod->fcpreq = NULL;
+ memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
+ iod->rqstdatalen = lsreqbuf_len;
+
+ schedule_work(&iod->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
+
+
+/*
+ * **********************
+ * Start of FCP handling
+ * **********************
+ */
+
+static int
+nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int nent;
+ u32 page_len, length;
+ int i = 0;
+
+ length = fod->total_length;
+ nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ if (!sg)
+ goto out;
+
+ sg_init_table(sg, nent);
+
+ while (length) {
+ page_len = min_t(u32, length, PAGE_SIZE);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ goto out_free_pages;
+
+ sg_set_page(&sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
+ }
+
+ fod->data_sg = sg;
+ fod->data_sg_cnt = nent;
+ fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
+ ((fod->io_dir == NVMET_FCP_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ /* note: write from initiator perspective */
+
+ return 0;
+
+out_free_pages:
+ while (i > 0) {
+ i--;
+ __free_page(sg_page(&sg[i]));
+ }
+ kfree(sg);
+ fod->data_sg = NULL;
+ fod->data_sg_cnt = 0;
+out:
+ return NVME_SC_INTERNAL;
+}
+
+static void
+nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+ struct scatterlist *sg;
+ int count;
+
+ if (!fod->data_sg || !fod->data_sg_cnt)
+ return;
+
+ fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
+ ((fod->io_dir == NVMET_FCP_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
+ __free_page(sg_page(sg));
+ kfree(fod->data_sg);
+}
+
+
+static bool
+queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
+{
+ u32 sqtail, used;
+
+ /* egad, this is ugly. And sqtail is just a best guess */
+ sqtail = atomic_read(&q->sqtail) % q->sqsize;
+
+ used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
+ return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
+}
+
+/*
+ * Prep RSP payload.
+ * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
+ */
+static void
+nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
+ struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+ struct nvme_completion *cqe = &ersp->cqe;
+ u32 *cqewd = (u32 *)cqe;
+ bool send_ersp = false;
+ u32 rsn, rspcnt, xfr_length;
+
+ if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
+ xfr_length = fod->total_length;
+ else
+ xfr_length = fod->offset;
+
+ /*
+ * check to see if we can send a 0's rsp.
+ * Note: to send a 0's response, the NVME-FC host transport will
+ * recreate the CQE. The host transport knows: sq id, SQHD (last
+ * seen in an ersp), and command_id. Thus it will create a
+ * zero-filled CQE with those known fields filled in. Transport
+ * must send an ersp for any condition where the cqe won't match
+ * this.
+ *
+ * Here are the FC-NVME mandated cases where we must send an ersp:
+ * every N responses, where N=ersp_ratio
+ * force fabric commands to send ersp's (not in FC-NVME but good
+ * practice)
+ * normal cmds: any time status is non-zero, or status is zero
+ * but words 0 or 1 are non-zero.
+ * the SQ is 90% or more full
+ * the cmd is a fused command
+ * transferred data length not equal to cmd iu length
+ */
+ rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
+ if (!(rspcnt % fod->queue->ersp_ratio) ||
+ sqe->opcode == nvme_fabrics_command ||
+ xfr_length != fod->total_length ||
+ (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
+ (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
+ queue_90percent_full(fod->queue, cqe->sq_head))
+ send_ersp = true;
+
+ /* re-set the fields */
+ fod->fcpreq->rspaddr = ersp;
+ fod->fcpreq->rspdma = fod->rspdma;
+
+ if (!send_ersp) {
+ memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
+ fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
+ } else {
+ ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
+ rsn = atomic_inc_return(&fod->queue->rsn);
+ ersp->rsn = cpu_to_be32(rsn);
+ ersp->xfrd_len = cpu_to_be32(xfr_length);
+ fod->fcpreq->rsplen = sizeof(*ersp);
+ }
+
+ fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+}
+
+static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
+
+static void
+nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ int ret;
+
+ fod->fcpreq->op = NVMET_FCOP_RSP;
+ fod->fcpreq->timeout = 0;
+
+ nvmet_fc_prep_fcp_rsp(tgtport, fod);
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+ if (ret)
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+}
+
+static void
+nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod, u8 op)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct scatterlist *sg, *datasg;
+ u32 tlen, sg_off;
+ int ret;
+
+ fcpreq->op = op;
+ fcpreq->offset = fod->offset;
+ fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
+ tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
+ (fod->total_length - fod->offset));
+ tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
+ tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
+ * PAGE_SIZE);
+ fcpreq->transfer_length = tlen;
+ fcpreq->transferred_length = 0;
+ fcpreq->fcp_error = 0;
+ fcpreq->rsplen = 0;
+
+ fcpreq->sg_cnt = 0;
+
+ datasg = fod->next_sg;
+ sg_off = fod->next_sg_offset;
+
+ for (sg = fcpreq->sg ; tlen; sg++) {
+ *sg = *datasg;
+ if (sg_off) {
+ sg->offset += sg_off;
+ sg->length -= sg_off;
+ sg->dma_address += sg_off;
+ sg_off = 0;
+ }
+ if (tlen < sg->length) {
+ sg->length = tlen;
+ fod->next_sg = datasg;
+ fod->next_sg_offset += tlen;
+ } else if (tlen == sg->length) {
+ fod->next_sg_offset = 0;
+ fod->next_sg = sg_next(datasg);
+ } else {
+ fod->next_sg_offset = 0;
+ datasg = sg_next(datasg);
+ }
+ tlen -= sg->length;
+ fcpreq->sg_cnt++;
+ }
+
+ /*
+ * If the last READDATA request: check if LLDD supports
+ * combined xfr with response.
+ */
+ if ((op == NVMET_FCOP_READDATA) &&
+ ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
+ (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
+ fcpreq->op = NVMET_FCOP_READDATA_RSP;
+ nvmet_fc_prep_fcp_rsp(tgtport, fod);
+ }
+
+ ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+ if (ret) {
+ /*
+ * should be ok to set w/o lock as its in the thread of
+ * execution (not an async timer routine) and doesn't
+ * contend with any clearing action
+ */
+ fod->abort = true;
+
+ if (op == NVMET_FCOP_WRITEDATA)
+ nvmet_req_complete(&fod->req,
+ NVME_SC_FC_TRANSPORT_ERROR);
+ else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
+ fcpreq->fcp_error = ret;
+ fcpreq->transferred_length = 0;
+ nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
+ }
+ }
+}
+
+static void
+nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+ unsigned long flags;
+ bool abort;
+
+ spin_lock_irqsave(&fod->flock, flags);
+ abort = fod->abort;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ /* if in the middle of an io and we need to tear down */
+ if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ if (fcpreq->fcp_error || abort)
+ nvmet_req_complete(&fod->req, fcpreq->fcp_error);
+
+ return;
+ }
+
+ switch (fcpreq->op) {
+
+ case NVMET_FCOP_WRITEDATA:
+ if (abort || fcpreq->fcp_error ||
+ fcpreq->transferred_length != fcpreq->transfer_length) {
+ nvmet_req_complete(&fod->req,
+ NVME_SC_FC_TRANSPORT_ERROR);
+ return;
+ }
+
+ fod->offset += fcpreq->transferred_length;
+ if (fod->offset != fod->total_length) {
+ /* transfer the next chunk */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_WRITEDATA);
+ return;
+ }
+
+ /* data transfer complete, resume with nvmet layer */
+
+ fod->req.execute(&fod->req);
+
+ break;
+
+ case NVMET_FCOP_READDATA:
+ case NVMET_FCOP_READDATA_RSP:
+ if (abort || fcpreq->fcp_error ||
+ fcpreq->transferred_length != fcpreq->transfer_length) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ return;
+ }
+
+ /* success */
+
+ if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+ fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+ return;
+ }
+
+ fod->offset += fcpreq->transferred_length;
+ if (fod->offset != fod->total_length) {
+ /* transfer the next chunk */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_READDATA);
+ return;
+ }
+
+ /* data transfer complete, send response */
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+
+ break;
+
+ case NVMET_FCOP_RSP:
+ case NVMET_FCOP_ABORT:
+ fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+ break;
+
+ default:
+ nvmet_fc_free_tgt_pgs(fod);
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ break;
+ }
+}
+
+/*
+ * actual completion handler after execution by the nvmet layer
+ */
+static void
+__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod, int status)
+{
+ struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+ struct nvme_completion *cqe = &fod->rspiubuf.cqe;
+ unsigned long flags;
+ bool abort;
+
+ spin_lock_irqsave(&fod->flock, flags);
+ abort = fod->abort;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
+ /* if we have a CQE, snoop the last sq_head value */
+ if (!status)
+ fod->queue->sqhd = cqe->sq_head;
+
+ if (abort) {
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ return;
+ }
+
+ /* if an error handling the cmd post initial parsing */
+ if (status) {
+ /* fudge up a failed CQE status for our transport error */
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
+ cqe->sq_id = cpu_to_le16(fod->queue->qid);
+ cqe->command_id = sqe->command_id;
+ cqe->status = cpu_to_le16(status);
+ } else {
+
+ /*
+ * try to push the data even if the SQE status is non-zero.
+ * There may be a status where data still was intended to
+ * be moved
+ */
+ if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
+ /* push the data over before sending rsp */
+ nvmet_fc_transfer_fcp_data(tgtport, fod,
+ NVMET_FCOP_READDATA);
+ return;
+ }
+
+ /* writes & no data - fall thru */
+ }
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+}
+
+
+static void
+nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
+{
+ struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+ __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
+}
+
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+void
+nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
+ int ret;
+
+ /*
+ * Fused commands are currently not supported in the linux
+ * implementation.
+ *
+ * As such, the implementation of the FC transport does not
+ * look at the fused commands and order delivery to the upper
+ * layer until we have both based on csn.
+ */
+
+ fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
+
+ fod->total_length = be32_to_cpu(cmdiu->data_len);
+ if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
+ fod->io_dir = NVMET_FCP_WRITE;
+ if (!nvme_is_write(&cmdiu->sqe))
+ goto transport_error;
+ } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
+ fod->io_dir = NVMET_FCP_READ;
+ if (nvme_is_write(&cmdiu->sqe))
+ goto transport_error;
+ } else {
+ fod->io_dir = NVMET_FCP_NODATA;
+ if (fod->total_length)
+ goto transport_error;
+ }
+
+ fod->req.cmd = &fod->cmdiubuf.sqe;
+ fod->req.rsp = &fod->rspiubuf.cqe;
+ fod->req.port = fod->queue->port;
+
+ /* ensure nvmet handlers will set cmd handler callback */
+ fod->req.execute = NULL;
+
+ /* clear any response payload */
+ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+
+ ret = nvmet_req_init(&fod->req,
+ &fod->queue->nvme_cq,
+ &fod->queue->nvme_sq,
+ &nvmet_fc_tgt_fcp_ops);
+ if (!ret) { /* bad SQE content */
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ return;
+ }
+
+ /* keep a running counter of tail position */
+ atomic_inc(&fod->queue->sqtail);
+
+ fod->data_sg = NULL;
+ fod->data_sg_cnt = 0;
+ if (fod->total_length) {
+ ret = nvmet_fc_alloc_tgt_pgs(fod);
+ if (ret) {
+ nvmet_req_complete(&fod->req, ret);
+ return;
+ }
+ }
+ fod->req.sg = fod->data_sg;
+ fod->req.sg_cnt = fod->data_sg_cnt;
+ fod->offset = 0;
+ fod->next_sg = fod->data_sg;
+ fod->next_sg_offset = 0;
+
+ if (fod->io_dir == NVMET_FCP_WRITE) {
+ /* pull the data over before invoking nvmet layer */
+ nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
+ return;
+ }
+
+ /*
+ * Reads or no data:
+ *
+ * can invoke the nvmet_layer now. If read data, cmd completion will
+ * push the data
+ */
+
+ fod->req.execute(&fod->req);
+
+ return;
+
+transport_error:
+ nvmet_fc_abort_op(tgtport, fod->fcpreq);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
+{
+ struct nvmet_fc_fcp_iod *fod =
+ container_of(work, struct nvmet_fc_fcp_iod, work);
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+/**
+ * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
+ * upon the reception of a NVME FCP CMD IU.
+ *
+ * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
+ * layer for processing.
+ *
+ * The nvmet-fc layer will copy cmd payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the CMD IU buffer passed in the call.
+ *
+ * If this routine returns error, the lldd should abort the exchange.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ * was receive on.
+ * @fcpreq: pointer to a fcpreq request structure to be used to reference
+ * the exchange corresponding to the FCP Exchange.
+ * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
+ * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
+ */
+int
+nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
+ struct nvmefc_tgt_fcp_req *fcpreq,
+ void *cmdiubuf, u32 cmdiubuf_len)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
+ struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_fcp_iod *fod;
+
+ /* validate iu, so the connection id can be used to find the queue */
+ if ((cmdiubuf_len != sizeof(*cmdiu)) ||
+ (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+ (cmdiu->fc_id != NVME_CMD_FC_ID) ||
+ (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
+ return -EIO;
+
+
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(cmdiu->connection_id));
+ if (!queue)
+ return -ENOTCONN;
+
+ /*
+ * note: reference taken by find_target_queue
+ * After successful fod allocation, the fod will inherit the
+ * ownership of that reference and will remove the reference
+ * when the fod is freed.
+ */
+
+ fod = nvmet_fc_alloc_fcp_iod(queue);
+ if (!fod) {
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+ return -ENOENT;
+ }
+
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+ /*
+ * put all admin cmds on hw queue id 0. All io commands go to
+ * the respective hw queue based on a modulo basis
+ */
+ fcpreq->hwqid = queue->qid ?
+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+ queue_work_on(queue->cpu, queue->work_q, &fod->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
+
+enum {
+ FCT_TRADDR_ERR = 0,
+ FCT_TRADDR_WWNN = 1 << 0,
+ FCT_TRADDR_WWPN = 1 << 1,
+};
+
+struct nvmet_fc_traddr {
+ u64 nn;
+ u64 pn;
+};
+
+static const match_table_t traddr_opt_tokens = {
+ { FCT_TRADDR_WWNN, "nn-%s" },
+ { FCT_TRADDR_WWPN, "pn-%s" },
+ { FCT_TRADDR_ERR, NULL }
+};
+
+static int
+nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, traddr_opt_tokens, args);
+ switch (token) {
+ case FCT_TRADDR_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->nn = token64;
+ break;
+ case FCT_TRADDR_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ traddr->pn = token64;
+ break;
+ default:
+ pr_warn("unknown traddr token or missing value '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ kfree(options);
+ return ret;
+}
+
+static int
+nvmet_fc_add_port(struct nvmet_port *port)
+{
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_traddr traddr = { 0L, 0L };
+ unsigned long flags;
+ int ret;
+
+ /* validate the address info */
+ if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
+ (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
+ return -EINVAL;
+
+ /* map the traddr address info to a target port */
+
+ ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
+ if (ret)
+ return ret;
+
+ ret = -ENXIO;
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
+ if ((tgtport->fc_target_port.node_name == traddr.nn) &&
+ (tgtport->fc_target_port.port_name == traddr.pn)) {
+ /* a FC port can only be 1 nvmet port id */
+ if (!tgtport->port) {
+ tgtport->port = port;
+ port->priv = tgtport;
+ ret = 0;
+ } else
+ ret = -EALREADY;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+ return ret;
+}
+
+static void
+nvmet_fc_remove_port(struct nvmet_port *port)
+{
+ struct nvmet_fc_tgtport *tgtport = port->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (tgtport->port == port) {
+ nvmet_fc_tgtport_put(tgtport);
+ tgtport->port = NULL;
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_FC,
+ .msdbd = 1,
+ .add_port = nvmet_fc_add_port,
+ .remove_port = nvmet_fc_remove_port,
+ .queue_response = nvmet_fc_fcp_nvme_cmd_done,
+ .delete_ctrl = nvmet_fc_delete_ctrl,
+};
+
+static int __init nvmet_fc_init_module(void)
+{
+ return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
+}
+
+static void __exit nvmet_fc_exit_module(void)
+{
+ /* sanity check - all lports should be removed */
+ if (!list_empty(&nvmet_fc_target_list))
+ pr_warn("%s: targetport list not empty\n", __func__);
+
+ nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
+
+ ida_destroy(&nvmet_fc_tgtport_cnt);
+}
+
+module_init(nvmet_fc_init_module);
+module_exit(nvmet_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
new file mode 100644
index 000000000000..bcb8ebeb01c5
--- /dev/null
+++ b/drivers/nvme/target/fcloop.c
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+
+#include "../host/nvme.h"
+#include "../target/nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+enum {
+ NVMF_OPT_ERR = 0,
+ NVMF_OPT_WWNN = 1 << 0,
+ NVMF_OPT_WWPN = 1 << 1,
+ NVMF_OPT_ROLES = 1 << 2,
+ NVMF_OPT_FCADDR = 1 << 3,
+ NVMF_OPT_LPWWNN = 1 << 4,
+ NVMF_OPT_LPWWPN = 1 << 5,
+};
+
+struct fcloop_ctrl_options {
+ int mask;
+ u64 wwnn;
+ u64 wwpn;
+ u32 roles;
+ u32 fcaddr;
+ u64 lpwwnn;
+ u64 lpwwpn;
+};
+
+static const match_table_t opt_tokens = {
+ { NVMF_OPT_WWNN, "wwnn=%s" },
+ { NVMF_OPT_WWPN, "wwpn=%s" },
+ { NVMF_OPT_ROLES, "roles=%d" },
+ { NVMF_OPT_FCADDR, "fcaddr=%x" },
+ { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
+ { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
+ { NVMF_OPT_ERR, NULL }
+};
+
+static int
+fcloop_parse_options(struct fcloop_ctrl_options *opts,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case NVMF_OPT_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->wwnn = token64;
+ break;
+ case NVMF_OPT_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->wwpn = token64;
+ break;
+ case NVMF_OPT_ROLES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->roles = token;
+ break;
+ case NVMF_OPT_FCADDR:
+ if (match_hex(args, &token)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->fcaddr = token;
+ break;
+ case NVMF_OPT_LPWWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->lpwwnn = token64;
+ break;
+ case NVMF_OPT_LPWWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ opts->lpwwpn = token64;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ }
+
+out_free_options:
+ kfree(options);
+ return ret;
+}
+
+
+static int
+fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ u64 token64;
+
+ *nname = -1;
+ *pname = -1;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ switch (token) {
+ case NVMF_OPT_WWNN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ *nname = token64;
+ break;
+ case NVMF_OPT_WWPN:
+ if (match_u64(args, &token64)) {
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ *pname = token64;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out_free_options;
+ }
+ }
+
+out_free_options:
+ kfree(options);
+
+ if (!ret) {
+ if (*nname == -1)
+ return -EINVAL;
+ if (*pname == -1)
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+
+#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
+ NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
+ NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+
+static DEFINE_SPINLOCK(fcloop_lock);
+static LIST_HEAD(fcloop_lports);
+static LIST_HEAD(fcloop_nports);
+
+struct fcloop_lport {
+ struct nvme_fc_local_port *localport;
+ struct list_head lport_list;
+ struct completion unreg_done;
+};
+
+struct fcloop_rport {
+ struct nvme_fc_remote_port *remoteport;
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+};
+
+struct fcloop_tport {
+ struct nvmet_fc_target_port *targetport;
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+};
+
+struct fcloop_nport {
+ struct fcloop_rport *rport;
+ struct fcloop_tport *tport;
+ struct fcloop_lport *lport;
+ struct list_head nport_list;
+ struct kref ref;
+ struct completion rport_unreg_done;
+ struct completion tport_unreg_done;
+ u64 node_name;
+ u64 port_name;
+ u32 port_role;
+ u32 port_id;
+};
+
+struct fcloop_lsreq {
+ struct fcloop_tport *tport;
+ struct nvmefc_ls_req *lsreq;
+ struct work_struct work;
+ struct nvmefc_tgt_ls_req tgt_ls_req;
+ int status;
+};
+
+struct fcloop_fcpreq {
+ struct fcloop_tport *tport;
+ struct nvmefc_fcp_req *fcpreq;
+ u16 status;
+ struct work_struct work;
+ struct nvmefc_tgt_fcp_req tgt_fcp_req;
+};
+
+
+static inline struct fcloop_lsreq *
+tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+ return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
+}
+
+static inline struct fcloop_fcpreq *
+tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
+}
+
+
+static int
+fcloop_create_queue(struct nvme_fc_local_port *localport,
+ unsigned int qidx, u16 qsize,
+ void **handle)
+{
+ *handle = localport;
+ return 0;
+}
+
+static void
+fcloop_delete_queue(struct nvme_fc_local_port *localport,
+ unsigned int idx, void *handle)
+{
+}
+
+
+/*
+ * Transmit of LS RSP done (e.g. buffers all set). call back up
+ * initiator "done" flows.
+ */
+static void
+fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+{
+ struct fcloop_lsreq *tls_req =
+ container_of(work, struct fcloop_lsreq, work);
+ struct fcloop_tport *tport = tls_req->tport;
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+ if (tport->remoteport)
+ lsreq->done(lsreq, tls_req->status);
+}
+
+static int
+fcloop_ls_req(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_rport *rport = remoteport->private;
+ int ret = 0;
+
+ tls_req->lsreq = lsreq;
+ INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+
+ if (!rport->targetport) {
+ tls_req->status = -ECONNREFUSED;
+ schedule_work(&tls_req->work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ tls_req->tport = rport->targetport->private;
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
+ struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+ struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+ memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
+ ((lsreq->rsplen < tgt_lsreq->rsplen) ?
+ lsreq->rsplen : tgt_lsreq->rsplen));
+ tgt_lsreq->done(tgt_lsreq);
+
+ schedule_work(&tls_req->work);
+
+ return 0;
+}
+
+/*
+ * FCP IO operation done. call back up initiator "done" flows.
+ */
+static void
+fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, work);
+ struct fcloop_tport *tport = tfcp_req->tport;
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+
+ if (tport->remoteport) {
+ fcpreq->status = tfcp_req->status;
+ fcpreq->done(fcpreq);
+ }
+}
+
+
+static int
+fcloop_fcp_req(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *fcpreq)
+{
+ struct fcloop_fcpreq *tfcp_req = fcpreq->private;
+ struct fcloop_rport *rport = remoteport->private;
+ int ret = 0;
+
+ INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+
+ if (!rport->targetport) {
+ tfcp_req->status = NVME_SC_FC_TRANSPORT_ERROR;
+ schedule_work(&tfcp_req->work);
+ return ret;
+ }
+
+ tfcp_req->fcpreq = fcpreq;
+ tfcp_req->tport = rport->targetport->private;
+
+ ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+
+ return ret;
+}
+
+static void
+fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
+ struct scatterlist *io_sg, u32 offset, u32 length)
+{
+ void *data_p, *io_p;
+ u32 data_len, io_len, tlen;
+
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+
+ for ( ; offset; ) {
+ tlen = min_t(u32, offset, io_len);
+ offset -= tlen;
+ io_len -= tlen;
+ if (!io_len) {
+ io_sg = sg_next(io_sg);
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+ } else
+ io_p += tlen;
+ }
+
+ data_p = sg_virt(data_sg);
+ data_len = data_sg->length;
+
+ for ( ; length; ) {
+ tlen = min_t(u32, io_len, data_len);
+ tlen = min_t(u32, tlen, length);
+
+ if (op == NVMET_FCOP_WRITEDATA)
+ memcpy(data_p, io_p, tlen);
+ else
+ memcpy(io_p, data_p, tlen);
+
+ length -= tlen;
+
+ io_len -= tlen;
+ if ((!io_len) && (length)) {
+ io_sg = sg_next(io_sg);
+ io_p = sg_virt(io_sg);
+ io_len = io_sg->length;
+ } else
+ io_p += tlen;
+
+ data_len -= tlen;
+ if ((!data_len) && (length)) {
+ data_sg = sg_next(data_sg);
+ data_p = sg_virt(data_sg);
+ data_len = data_sg->length;
+ } else
+ data_p += tlen;
+ }
+}
+
+static int
+fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+ struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ u32 rsplen = 0, xfrlen = 0;
+ int fcp_err = 0;
+ u8 op = tgt_fcpreq->op;
+
+ switch (op) {
+ case NVMET_FCOP_WRITEDATA:
+ xfrlen = tgt_fcpreq->transfer_length;
+ fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
+ tgt_fcpreq->offset, xfrlen);
+ fcpreq->transferred_length += xfrlen;
+ break;
+
+ case NVMET_FCOP_READDATA:
+ case NVMET_FCOP_READDATA_RSP:
+ xfrlen = tgt_fcpreq->transfer_length;
+ fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
+ tgt_fcpreq->offset, xfrlen);
+ fcpreq->transferred_length += xfrlen;
+ if (op == NVMET_FCOP_READDATA)
+ break;
+
+ /* Fall-Thru to RSP handling */
+
+ case NVMET_FCOP_RSP:
+ rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
+ fcpreq->rsplen : tgt_fcpreq->rsplen);
+ memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
+ if (rsplen < tgt_fcpreq->rsplen)
+ fcp_err = -E2BIG;
+ fcpreq->rcv_rsplen = rsplen;
+ fcpreq->status = 0;
+ tfcp_req->status = 0;
+ break;
+
+ case NVMET_FCOP_ABORT:
+ tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
+ break;
+
+ default:
+ fcp_err = -EINVAL;
+ break;
+ }
+
+ tgt_fcpreq->transferred_length = xfrlen;
+ tgt_fcpreq->fcp_error = fcp_err;
+ tgt_fcpreq->done(tgt_fcpreq);
+
+ if ((!fcp_err) && (op == NVMET_FCOP_RSP ||
+ op == NVMET_FCOP_READDATA_RSP ||
+ op == NVMET_FCOP_ABORT))
+ schedule_work(&tfcp_req->work);
+
+ return 0;
+}
+
+static void
+fcloop_ls_abort(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
+fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *fcpreq)
+{
+}
+
+static void
+fcloop_localport_delete(struct nvme_fc_local_port *localport)
+{
+ struct fcloop_lport *lport = localport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&lport->unreg_done);
+}
+
+static void
+fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+{
+ struct fcloop_rport *rport = remoteport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&rport->nport->rport_unreg_done);
+}
+
+static void
+fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+ struct fcloop_tport *tport = targetport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&tport->nport->tport_unreg_done);
+}
+
+#define FCLOOP_HW_QUEUES 4
+#define FCLOOP_SGL_SEGS 256
+#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
+
+struct nvme_fc_port_template fctemplate = {
+ .localport_delete = fcloop_localport_delete,
+ .remoteport_delete = fcloop_remoteport_delete,
+ .create_queue = fcloop_create_queue,
+ .delete_queue = fcloop_delete_queue,
+ .ls_req = fcloop_ls_req,
+ .fcp_io = fcloop_fcp_req,
+ .ls_abort = fcloop_ls_abort,
+ .fcp_abort = fcloop_fcp_abort,
+ .max_hw_queues = FCLOOP_HW_QUEUES,
+ .max_sgl_segments = FCLOOP_SGL_SEGS,
+ .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
+ .dma_boundary = FCLOOP_DMABOUND_4G,
+ /* sizes of additional private data for data structures */
+ .local_priv_sz = sizeof(struct fcloop_lport),
+ .remote_priv_sz = sizeof(struct fcloop_rport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
+ .fcprqst_priv_sz = sizeof(struct fcloop_fcpreq),
+};
+
+struct nvmet_fc_target_template tgttemplate = {
+ .targetport_delete = fcloop_targetport_delete,
+ .xmt_ls_rsp = fcloop_xmt_ls_rsp,
+ .fcp_op = fcloop_fcp_op,
+ .max_hw_queues = FCLOOP_HW_QUEUES,
+ .max_sgl_segments = FCLOOP_SGL_SEGS,
+ .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
+ .dma_boundary = FCLOOP_DMABOUND_4G,
+ /* optional features */
+ .target_features = NVMET_FCTGTFEAT_READDATA_RSP |
+ NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED,
+ /* sizes of additional private data for data structures */
+ .target_priv_sz = sizeof(struct fcloop_tport),
+};
+
+static ssize_t
+fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_fc_port_info pinfo;
+ struct fcloop_ctrl_options *opts;
+ struct nvme_fc_local_port *localport;
+ struct fcloop_lport *lport;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ ret = fcloop_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /* everything there ? */
+ if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
+ ret = -EINVAL;
+ goto out_free_opts;
+ }
+
+ pinfo.node_name = opts->wwnn;
+ pinfo.port_name = opts->wwpn;
+ pinfo.port_role = opts->roles;
+ pinfo.port_id = opts->fcaddr;
+
+ ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
+ if (!ret) {
+ unsigned long flags;
+
+ /* success */
+ lport = localport->private;
+ lport->localport = localport;
+ INIT_LIST_HEAD(&lport->lport_list);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_add_tail(&lport->lport_list, &fcloop_lports);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ /* mark all of the input buffer consumed */
+ ret = count;
+ }
+
+out_free_opts:
+ kfree(opts);
+ return ret ? ret : count;
+}
+
+
+static void
+__unlink_local_port(struct fcloop_lport *lport)
+{
+ list_del(&lport->lport_list);
+}
+
+static int
+__wait_localport_unreg(struct fcloop_lport *lport)
+{
+ int ret;
+
+ init_completion(&lport->unreg_done);
+
+ ret = nvme_fc_unregister_localport(lport->localport);
+
+ wait_for_completion(&lport->unreg_done);
+
+ return ret;
+}
+
+
+static ssize_t
+fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_lport *tlport, *lport = NULL;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tlport, &fcloop_lports, lport_list) {
+ if (tlport->localport->node_name == nodename &&
+ tlport->localport->port_name == portname) {
+ lport = tlport;
+ __unlink_local_port(lport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!lport)
+ return -ENOENT;
+
+ ret = __wait_localport_unreg(lport);
+
+ return ret ? ret : count;
+}
+
+static void
+fcloop_nport_free(struct kref *ref)
+{
+ struct fcloop_nport *nport =
+ container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&nport->nport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+ kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+ return kref_get_unless_zero(&nport->ref);
+}
+
+static struct fcloop_nport *
+fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
+{
+ struct fcloop_nport *newnport, *nport = NULL;
+ struct fcloop_lport *tmplport, *lport = NULL;
+ struct fcloop_ctrl_options *opts;
+ unsigned long flags;
+ u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return NULL;
+
+ ret = fcloop_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /* everything there ? */
+ if ((opts->mask & opts_mask) != opts_mask) {
+ ret = -EINVAL;
+ goto out_free_opts;
+ }
+
+ newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
+ if (!newnport)
+ goto out_free_opts;
+
+ INIT_LIST_HEAD(&newnport->nport_list);
+ newnport->node_name = opts->wwnn;
+ newnport->port_name = opts->wwpn;
+ if (opts->mask & NVMF_OPT_ROLES)
+ newnport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ newnport->port_id = opts->fcaddr;
+ kref_init(&newnport->ref);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
+ if (tmplport->localport->node_name == opts->wwnn &&
+ tmplport->localport->port_name == opts->wwpn)
+ goto out_invalid_opts;
+
+ if (tmplport->localport->node_name == opts->lpwwnn &&
+ tmplport->localport->port_name == opts->lpwwpn)
+ lport = tmplport;
+ }
+
+ if (remoteport) {
+ if (!lport)
+ goto out_invalid_opts;
+ newnport->lport = lport;
+ }
+
+ list_for_each_entry(nport, &fcloop_nports, nport_list) {
+ if (nport->node_name == opts->wwnn &&
+ nport->port_name == opts->wwpn) {
+ if ((remoteport && nport->rport) ||
+ (!remoteport && nport->tport)) {
+ nport = NULL;
+ goto out_invalid_opts;
+ }
+
+ fcloop_nport_get(nport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (remoteport)
+ nport->lport = lport;
+ if (opts->mask & NVMF_OPT_ROLES)
+ nport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ nport->port_id = opts->fcaddr;
+ goto out_free_newnport;
+ }
+ }
+
+ list_add_tail(&newnport->nport_list, &fcloop_nports);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(opts);
+ return newnport;
+
+out_invalid_opts:
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+out_free_newnport:
+ kfree(newnport);
+out_free_opts:
+ kfree(opts);
+ return nport;
+}
+
+static ssize_t
+fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_rport *rport;
+ struct nvme_fc_port_info pinfo;
+ int ret;
+
+ nport = fcloop_alloc_nport(buf, count, true);
+ if (!nport)
+ return -EIO;
+
+ pinfo.node_name = nport->node_name;
+ pinfo.port_name = nport->port_name;
+ pinfo.port_role = nport->port_role;
+ pinfo.port_id = nport->port_id;
+
+ ret = nvme_fc_register_remoteport(nport->lport->localport,
+ &pinfo, &remoteport);
+ if (ret || !remoteport) {
+ fcloop_nport_put(nport);
+ return ret;
+ }
+
+ /* success */
+ rport = remoteport->private;
+ rport->remoteport = remoteport;
+ rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
+ if (nport->tport) {
+ nport->tport->remoteport = remoteport;
+ nport->tport->lport = nport->lport;
+ }
+ rport->nport = nport;
+ rport->lport = nport->lport;
+ nport->rport = rport;
+
+ return ret ? ret : count;
+}
+
+
+static struct fcloop_rport *
+__unlink_remote_port(struct fcloop_nport *nport)
+{
+ struct fcloop_rport *rport = nport->rport;
+
+ if (rport && nport->tport)
+ nport->tport->remoteport = NULL;
+ nport->rport = NULL;
+
+ return rport;
+}
+
+static int
+__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+{
+ int ret;
+
+ if (!rport)
+ return -EALREADY;
+
+ init_completion(&nport->rport_unreg_done);
+
+ ret = nvme_fc_unregister_remoteport(rport->remoteport);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&nport->rport_unreg_done);
+
+ fcloop_nport_put(nport);
+
+ return ret;
+}
+
+static ssize_t
+fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_nport *nport = NULL, *tmpport;
+ static struct fcloop_rport *rport;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+ if (tmpport->node_name == nodename &&
+ tmpport->port_name == portname && tmpport->rport) {
+ nport = tmpport;
+ rport = __unlink_remote_port(nport);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!nport)
+ return -ENOENT;
+
+ ret = __wait_remoteport_unreg(nport, rport);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_tport *tport;
+ struct nvmet_fc_port_info tinfo;
+ int ret;
+
+ nport = fcloop_alloc_nport(buf, count, false);
+ if (!nport)
+ return -EIO;
+
+ tinfo.node_name = nport->node_name;
+ tinfo.port_name = nport->port_name;
+ tinfo.port_id = nport->port_id;
+
+ ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
+ &targetport);
+ if (ret) {
+ fcloop_nport_put(nport);
+ return ret;
+ }
+
+ /* success */
+ tport = targetport->private;
+ tport->targetport = targetport;
+ tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
+ if (nport->rport)
+ nport->rport->targetport = targetport;
+ tport->nport = nport;
+ tport->lport = nport->lport;
+ nport->tport = tport;
+
+ return ret ? ret : count;
+}
+
+
+static struct fcloop_tport *
+__unlink_target_port(struct fcloop_nport *nport)
+{
+ struct fcloop_tport *tport = nport->tport;
+
+ if (tport && nport->rport)
+ nport->rport->targetport = NULL;
+ nport->tport = NULL;
+
+ return tport;
+}
+
+static int
+__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+{
+ int ret;
+
+ if (!tport)
+ return -EALREADY;
+
+ init_completion(&nport->tport_unreg_done);
+
+ ret = nvmet_fc_unregister_targetport(tport->targetport);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&nport->tport_unreg_done);
+
+ fcloop_nport_put(nport);
+
+ return ret;
+}
+
+static ssize_t
+fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcloop_nport *nport = NULL, *tmpport;
+ struct fcloop_tport *tport;
+ u64 nodename, portname;
+ unsigned long flags;
+ int ret;
+
+ ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+ if (tmpport->node_name == nodename &&
+ tmpport->port_name == portname && tmpport->tport) {
+ nport = tmpport;
+ tport = __unlink_target_port(nport);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (!nport)
+ return -ENOENT;
+
+ ret = __wait_targetport_unreg(nport, tport);
+
+ return ret ? ret : count;
+}
+
+
+static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
+static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
+static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
+static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
+static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
+static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
+
+static struct attribute *fcloop_dev_attrs[] = {
+ &dev_attr_add_local_port.attr,
+ &dev_attr_del_local_port.attr,
+ &dev_attr_add_remote_port.attr,
+ &dev_attr_del_remote_port.attr,
+ &dev_attr_add_target_port.attr,
+ &dev_attr_del_target_port.attr,
+ NULL
+};
+
+static struct attribute_group fclopp_dev_attrs_group = {
+ .attrs = fcloop_dev_attrs,
+};
+
+static const struct attribute_group *fcloop_dev_attr_groups[] = {
+ &fclopp_dev_attrs_group,
+ NULL,
+};
+
+static struct class *fcloop_class;
+static struct device *fcloop_device;
+
+
+static int __init fcloop_init(void)
+{
+ int ret;
+
+ fcloop_class = class_create(THIS_MODULE, "fcloop");
+ if (IS_ERR(fcloop_class)) {
+ pr_err("couldn't register class fcloop\n");
+ ret = PTR_ERR(fcloop_class);
+ return ret;
+ }
+
+ fcloop_device = device_create_with_groups(
+ fcloop_class, NULL, MKDEV(0, 0), NULL,
+ fcloop_dev_attr_groups, "ctl");
+ if (IS_ERR(fcloop_device)) {
+ pr_err("couldn't create ctl device!\n");
+ ret = PTR_ERR(fcloop_device);
+ goto out_destroy_class;
+ }
+
+ get_device(fcloop_device);
+
+ return 0;
+
+out_destroy_class:
+ class_destroy(fcloop_class);
+ return ret;
+}
+
+static void __exit fcloop_exit(void)
+{
+ struct fcloop_lport *lport;
+ struct fcloop_nport *nport;
+ struct fcloop_tport *tport;
+ struct fcloop_rport *rport;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+
+ for (;;) {
+ nport = list_first_entry_or_null(&fcloop_nports,
+ typeof(*nport), nport_list);
+ if (!nport)
+ break;
+
+ tport = __unlink_target_port(nport);
+ rport = __unlink_remote_port(nport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ ret = __wait_targetport_unreg(nport, tport);
+ if (ret)
+ pr_warn("%s: Failed deleting target port\n", __func__);
+
+ ret = __wait_remoteport_unreg(nport, rport);
+ if (ret)
+ pr_warn("%s: Failed deleting remote port\n", __func__);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ }
+
+ for (;;) {
+ lport = list_first_entry_or_null(&fcloop_lports,
+ typeof(*lport), lport_list);
+ if (!lport)
+ break;
+
+ __unlink_local_port(lport);
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ ret = __wait_localport_unreg(lport);
+ if (ret)
+ pr_warn("%s: Failed deleting local port\n", __func__);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ put_device(fcloop_device);
+
+ device_destroy(fcloop_class, MKDEV(0, 0));
+ class_destroy(fcloop_class);
+}
+
+module_init(fcloop_init);
+module_exit(fcloop_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4a96c2049b7b..4195115c7e54 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -37,9 +37,7 @@ static void nvmet_inline_bio_init(struct nvmet_req *req)
{
struct bio *bio = &req->inline_bio;
- bio_init(bio);
- bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
- bio->bi_io_vec = req->inline_bvec;
+ bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC);
}
static void nvmet_execute_rw(struct nvmet_req *req)
@@ -58,7 +56,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
if (req->cmd->rw.opcode == nvme_cmd_write) {
op = REQ_OP_WRITE;
- op_flags = WRITE_ODIRECT;
+ op_flags = REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
op_flags |= REQ_FUA;
} else {
@@ -96,7 +94,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
cookie = submit_bio(bio);
- blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+ blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
}
static void nvmet_execute_flush(struct nvmet_req *req)
@@ -109,7 +107,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
bio->bi_bdev = req->ns->bdev;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio);
}
@@ -172,6 +170,32 @@ static void nvmet_execute_dsm(struct nvmet_req *req)
}
}
+static void nvmet_execute_write_zeroes(struct nvmet_req *req)
+{
+ struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+ struct bio *bio = NULL;
+ u16 status = NVME_SC_SUCCESS;
+ sector_t sector;
+ sector_t nr_sector;
+
+ sector = le64_to_cpu(write_zeroes->slba) <<
+ (req->ns->blksize_shift - 9);
+ nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) <<
+ (req->ns->blksize_shift - 9)) + 1;
+
+ if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
+ GFP_KERNEL, &bio, true))
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+
+ if (bio) {
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ submit_bio(bio);
+ } else {
+ nvmet_req_complete(req, status);
+ }
+}
+
int nvmet_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -209,6 +233,9 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
sizeof(struct nvme_dsm_range);
return 0;
+ case nvme_cmd_write_zeroes:
+ req->execute = nvmet_execute_write_zeroes;
+ return 0;
default:
pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d5df77d686b2..9aaa70071ae5 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -36,6 +36,7 @@
(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
struct nvme_loop_iod {
+ struct nvme_request nvme_req;
struct nvme_command cmd;
struct nvme_completion rsp;
struct nvmet_req req;
@@ -112,10 +113,10 @@ static void nvme_loop_complete_rq(struct request *req)
blk_mq_end_request(req, error);
}
-static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
+static void nvme_loop_queue_response(struct nvmet_req *req)
{
struct nvme_loop_iod *iod =
- container_of(nvme_req, struct nvme_loop_iod, req);
+ container_of(req, struct nvme_loop_iod, req);
struct nvme_completion *cqe = &iod->rsp;
/*
@@ -126,13 +127,13 @@ static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
*/
if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
+ nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
} else {
- struct request *req = blk_mq_rq_from_pdu(iod);
+ struct request *rq = blk_mq_rq_from_pdu(iod);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
- memcpy(req->special, cqe, sizeof(*cqe));
- blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
+ iod->nvme_req.result = cqe->result;
+ blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
}
}
@@ -168,7 +169,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
int ret;
ret = nvme_setup_cmd(ns, req, &iod->cmd);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -178,26 +179,25 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
nvme_cleanup_cmd(req);
blk_mq_start_request(req);
nvme_loop_queue_response(&iod->req);
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
if (blk_rq_bytes(req)) {
iod->sg_table.sgl = iod->first_sgl;
ret = sg_alloc_table_chained(&iod->sg_table,
- req->nr_phys_segments, iod->sg_table.sgl);
+ blk_rq_nr_phys_segments(req),
+ iod->sg_table.sgl);
if (ret)
return BLK_MQ_RQ_QUEUE_BUSY;
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
- BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
}
- iod->cmd.common.command_id = req->tag;
blk_mq_start_request(req);
schedule_work(&iod->work);
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 76b6eedccaf9..23d5eb1c944f 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -47,6 +47,7 @@ struct nvmet_ns {
loff_t size;
u8 nguid[16];
+ bool enabled;
struct nvmet_subsys *subsys;
const char *device_path;
@@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group);
}
-static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
-{
- return !list_empty_careful(&ns->dev_link);
-}
-
struct nvmet_cq {
u16 qid;
u16 size;
@@ -238,7 +234,7 @@ static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
- req->rsp->result = cpu_to_le32(result);
+ req->rsp->result.u32 = cpu_to_le32(result);
}
/*
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 005ef5d17a19..8c3760a78ac0 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1045,8 +1045,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
}
ret = nvmet_sq_init(&queue->nvme_sq);
- if (ret)
+ if (ret) {
+ ret = NVME_RDMA_CM_NO_RSC;
goto out_free_queue;
+ }
ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
if (ret)
@@ -1116,6 +1118,7 @@ out_destroy_sq:
out_free_queue:
kfree(queue);
out_reject:
+ pr_debug("rejecting connect request with status code %d\n", ret);
nvmet_rdma_cm_reject(cm_id, ret);
return NULL;
}
@@ -1129,7 +1132,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
rdma_notify(queue->cm_id, event->event);
break;
default:
- pr_err("received unrecognized IB QP event %d\n", event->event);
+ pr_err("received IB QP event: %s (%d)\n",
+ ib_event_msg(event->event), event->event);
break;
}
}
@@ -1370,6 +1374,9 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_device_removal(cm_id, queue);
break;
case RDMA_CM_EVENT_REJECTED:
+ pr_debug("Connection rejected: %s\n",
+ rdma_reject_msg(cm_id, event->status));
+ /* FALLTHROUGH */
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
nvmet_rdma_queue_connect_fail(cm_id, queue);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index ba140eaee5c8..650f1b1797ad 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -35,6 +35,16 @@ config NVMEM_LPC18XX_EEPROM
To compile this driver as a module, choose M here: the module
will be called nvmem_lpc18xx_eeprom.
+config NVMEM_LPC18XX_OTP
+ tristate "NXP LPC18XX OTP Memory Support"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say Y here to include support for NXP LPC18xx OTP memory found on
+ all LPC18xx and LPC43xx devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_lpc18xx_otp.
+
config NVMEM_MXS_OCOTP
tristate "Freescale MXS On-Chip OTP Memory Support"
depends on ARCH_MXS || COMPILE_TEST
@@ -80,6 +90,18 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
+config NVMEM_BCM_OCOTP
+ tristate "Broadcom On-Chip OTP Controller support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM
+ default ARCH_BCM_IPROC
+ help
+ Say y here to enable read/write access to the Broadcom OTP
+ controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-bcm-ocotp.
+
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 8f942a0cdaec..86e45995fdad 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,10 +6,14 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
# Devices
+obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
+nvmem-bcm-ocotp-y := bcm-ocotp.o
obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
+obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
+nvmem_lpc18xx_otp-y := lpc18xx_otp.o
obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
nvmem-mxs-ocotp-y := mxs-ocotp.o
obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
new file mode 100644
index 000000000000..646cadbf1f93
--- /dev/null
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+/*
+ * # of tries for OTP Status. The time to execute a command varies. The slowest
+ * commands are writes which also vary based on the # of bits turned on. Writing
+ * 0xffffffff takes ~3800 us.
+ */
+#define OTPC_RETRIES 5000
+
+/* Sequence to enable OTP program */
+#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd }
+
+/* OTPC Commands */
+#define OTPC_CMD_READ 0x0
+#define OTPC_CMD_OTP_PROG_ENABLE 0x2
+#define OTPC_CMD_OTP_PROG_DISABLE 0x3
+#define OTPC_CMD_PROGRAM 0xA
+
+/* OTPC Status Bits */
+#define OTPC_STAT_CMD_DONE BIT(1)
+#define OTPC_STAT_PROG_OK BIT(2)
+
+/* OTPC register definition */
+#define OTPC_MODE_REG_OFFSET 0x0
+#define OTPC_MODE_REG_OTPC_MODE 0
+#define OTPC_COMMAND_OFFSET 0x4
+#define OTPC_COMMAND_COMMAND_WIDTH 6
+#define OTPC_CMD_START_OFFSET 0x8
+#define OTPC_CMD_START_START 0
+#define OTPC_CPU_STATUS_OFFSET 0xc
+#define OTPC_CPUADDR_REG_OFFSET 0x28
+#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16
+#define OTPC_CPU_WRITE_REG_OFFSET 0x2c
+
+#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1)
+#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1)
+
+
+struct otpc_map {
+ /* in words. */
+ u32 otpc_row_size;
+ /* 128 bit row / 4 words support. */
+ u16 data_r_offset[4];
+ /* 128 bit row / 4 words support. */
+ u16 data_w_offset[4];
+};
+
+static struct otpc_map otp_map = {
+ .otpc_row_size = 1,
+ .data_r_offset = {0x10},
+ .data_w_offset = {0x2c},
+};
+
+static struct otpc_map otp_map_v2 = {
+ .otpc_row_size = 2,
+ .data_r_offset = {0x10, 0x5c},
+ .data_w_offset = {0x2c, 0x64},
+};
+
+struct otpc_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct otpc_map *map;
+ struct nvmem_config *config;
+};
+
+static inline void set_command(void __iomem *base, u32 command)
+{
+ writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET);
+}
+
+static inline void set_cpu_address(void __iomem *base, u32 addr)
+{
+ writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET);
+}
+
+static inline void set_start_bit(void __iomem *base)
+{
+ writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void reset_start_bit(void __iomem *base)
+{
+ writel(0, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void write_cpu_data(void __iomem *base, u32 value)
+{
+ writel(value, base + OTPC_CPU_WRITE_REG_OFFSET);
+}
+
+static int poll_cpu_status(void __iomem *base, u32 value)
+{
+ u32 status;
+ u32 retries;
+
+ for (retries = 0; retries < OTPC_RETRIES; retries++) {
+ status = readl(base + OTPC_CPU_STATUS_OFFSET);
+ if (status & value)
+ break;
+ udelay(1);
+ }
+ if (retries == OTPC_RETRIES)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int enable_ocotp_program(void __iomem *base)
+{
+ static const u32 vals[] = OTPC_PROG_EN_SEQ;
+ int i;
+ int ret;
+
+ /* Write the magic sequence to enable programming */
+ set_command(base, OTPC_CMD_OTP_PROG_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(vals); i++) {
+ write_cpu_data(base, vals[i]);
+ set_start_bit(base);
+ ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE);
+ reset_start_bit(base);
+ if (ret)
+ return ret;
+ }
+
+ return poll_cpu_status(base, OTPC_STAT_PROG_OK);
+}
+
+static int disable_ocotp_program(void __iomem *base)
+{
+ int ret;
+
+ set_command(base, OTPC_CMD_OTP_PROG_DISABLE);
+ set_start_bit(base);
+ ret = poll_cpu_status(base, OTPC_STAT_PROG_OK);
+ reset_start_bit(base);
+
+ return ret;
+}
+
+static int bcm_otpc_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct otpc_priv *priv = context;
+ u32 *buf = val;
+ u32 bytes_read;
+ u32 address = offset / priv->config->word_size;
+ int i, ret;
+
+ for (bytes_read = 0; bytes_read < bytes;) {
+ set_command(priv->base, OTPC_CMD_READ);
+ set_cpu_address(priv->base, address++);
+ set_start_bit(priv->base);
+ ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+ if (ret) {
+ dev_err(priv->dev, "otp read error: 0x%x", ret);
+ return -EIO;
+ }
+
+ for (i = 0; i < priv->map->otpc_row_size; i++) {
+ *buf++ = readl(priv->base +
+ priv->map->data_r_offset[i]);
+ bytes_read += sizeof(*buf);
+ }
+
+ reset_start_bit(priv->base);
+ }
+
+ return 0;
+}
+
+static int bcm_otpc_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct otpc_priv *priv = context;
+ u32 *buf = val;
+ u32 bytes_written;
+ u32 address = offset / priv->config->word_size;
+ int i, ret;
+
+ if (offset % priv->config->word_size)
+ return -EINVAL;
+
+ ret = enable_ocotp_program(priv->base);
+ if (ret)
+ return -EIO;
+
+ for (bytes_written = 0; bytes_written < bytes;) {
+ set_command(priv->base, OTPC_CMD_PROGRAM);
+ set_cpu_address(priv->base, address++);
+ for (i = 0; i < priv->map->otpc_row_size; i++) {
+ writel(*buf, priv->base + priv->map->data_r_offset[i]);
+ buf++;
+ bytes_written += sizeof(*buf);
+ }
+ set_start_bit(priv->base);
+ ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+ reset_start_bit(priv->base);
+ if (ret) {
+ dev_err(priv->dev, "otp write error: 0x%x", ret);
+ return -EIO;
+ }
+ }
+
+ disable_ocotp_program(priv->base);
+
+ return 0;
+}
+
+static struct nvmem_config bcm_otpc_nvmem_config = {
+ .name = "bcm-ocotp",
+ .read_only = false,
+ .word_size = 4,
+ .stride = 4,
+ .owner = THIS_MODULE,
+ .reg_read = bcm_otpc_read,
+ .reg_write = bcm_otpc_write,
+};
+
+static const struct of_device_id bcm_otpc_dt_ids[] = {
+ { .compatible = "brcm,ocotp" },
+ { .compatible = "brcm,ocotp-v2" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
+
+static int bcm_otpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct resource *res;
+ struct otpc_priv *priv;
+ struct nvmem_device *nvmem;
+ int err;
+ u32 num_words;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (of_device_is_compatible(dev->of_node, "brcm,ocotp"))
+ priv->map = &otp_map;
+ else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2"))
+ priv->map = &otp_map_v2;
+ else {
+ dev_err(&pdev->dev,
+ "%s otpc config map not defined\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Get OTP base address register. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base)) {
+ dev_err(dev, "unable to map I/O memory\n");
+ return PTR_ERR(priv->base);
+ }
+
+ /* Enable CPU access to OTPC. */
+ writel(readl(priv->base + OTPC_MODE_REG_OFFSET) |
+ BIT(OTPC_MODE_REG_OTPC_MODE),
+ priv->base + OTPC_MODE_REG_OFFSET);
+ reset_start_bit(priv->base);
+
+ /* Read size of memory in words. */
+ err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words);
+ if (err) {
+ dev_err(dev, "size parameter not specified\n");
+ return -EINVAL;
+ } else if (num_words == 0) {
+ dev_err(dev, "size must be > 0\n");
+ return -EINVAL;
+ }
+
+ bcm_otpc_nvmem_config.size = 4 * num_words;
+ bcm_otpc_nvmem_config.dev = dev;
+ bcm_otpc_nvmem_config.priv = priv;
+
+ if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) {
+ bcm_otpc_nvmem_config.word_size = 8;
+ bcm_otpc_nvmem_config.stride = 8;
+ }
+
+ priv->config = &bcm_otpc_nvmem_config;
+
+ nvmem = nvmem_register(&bcm_otpc_nvmem_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(dev, "error registering nvmem config\n");
+ return PTR_ERR(nvmem);
+ }
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int bcm_otpc_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static struct platform_driver bcm_otpc_driver = {
+ .probe = bcm_otpc_probe,
+ .remove = bcm_otpc_remove,
+ .driver = {
+ .name = "brcm-otpc",
+ .of_match_table = bcm_otpc_dt_ids,
+ },
+};
+module_platform_driver(bcm_otpc_driver);
+
+MODULE_DESCRIPTION("Broadcom OTPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
new file mode 100644
index 000000000000..be8d07403ffc
--- /dev/null
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -0,0 +1,124 @@
+/*
+ * NXP LPC18xx/43xx OTP memory NVMEM driver
+ *
+ * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on the imx ocotp driver,
+ * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * TODO: add support for writing OTP register via API in boot ROM.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts
+ * at offset 0 from the base.
+ *
+ * Bank 0 contains the part ID for Flashless devices and is reseverd for
+ * devices with Flash.
+ * Bank 1/2 is generale purpose or AES key storage for secure devices.
+ * Bank 3 contains control data, USB ID and generale purpose words.
+ */
+#define LPC18XX_OTP_NUM_BANKS 4
+#define LPC18XX_OTP_WORDS_PER_BANK 4
+#define LPC18XX_OTP_WORD_SIZE sizeof(u32)
+#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \
+ LPC18XX_OTP_WORDS_PER_BANK * \
+ LPC18XX_OTP_WORD_SIZE)
+
+struct lpc18xx_otp {
+ void __iomem *base;
+};
+
+static int lpc18xx_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct lpc18xx_otp *otp = context;
+ unsigned int count = bytes >> 2;
+ u32 index = offset >> 2;
+ u32 *buf = val;
+ int i;
+
+ if (count > (LPC18XX_OTP_SIZE - index))
+ count = LPC18XX_OTP_SIZE - index;
+
+ for (i = index; i < (index + count); i++)
+ *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE);
+
+ return 0;
+}
+
+static struct nvmem_config lpc18xx_otp_nvmem_config = {
+ .name = "lpc18xx-otp",
+ .read_only = true,
+ .word_size = LPC18XX_OTP_WORD_SIZE,
+ .stride = LPC18XX_OTP_WORD_SIZE,
+ .owner = THIS_MODULE,
+ .reg_read = lpc18xx_otp_read,
+};
+
+static int lpc18xx_otp_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct lpc18xx_otp *otp;
+ struct resource *res;
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ otp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE;
+ lpc18xx_otp_nvmem_config.dev = &pdev->dev;
+ lpc18xx_otp_nvmem_config.priv = otp;
+
+ nvmem = nvmem_register(&lpc18xx_otp_nvmem_config);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int lpc18xx_otp_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id lpc18xx_otp_dt_ids[] = {
+ { .compatible = "nxp,lpc1850-otp" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids);
+
+static struct platform_driver lpc18xx_otp_driver = {
+ .probe = lpc18xx_otp_probe,
+ .remove = lpc18xx_otp_remove,
+ .driver = {
+ .name = "lpc18xx_otp",
+ .of_match_table = lpc18xx_otp_dt_ids,
+ },
+};
+module_platform_driver(lpc18xx_otp_driver);
+
+MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>");
+MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a0bccb54a9bd..d4bea3c797d6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1534,9 +1534,12 @@ void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
{
int i;
printk("%s %s", msg, of_node_full_name(args->np));
- for (i = 0; i < args->args_count; i++)
- printk(i ? ",%08x" : ":%08x", args->args[i]);
- printk("\n");
+ for (i = 0; i < args->args_count; i++) {
+ const char delim = i ? ',' : ':';
+
+ pr_cont("%c%08x", delim, args->args[i]);
+ }
+ pr_cont("\n");
}
int of_phandle_iterator_init(struct of_phandle_iterator *it,
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c89d5d231a0e..c9b5cac03b36 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1015,6 +1015,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const __be32 *reg, *endp;
int l;
+ bool hotpluggable;
/* We are scanning "memory" nodes only */
if (type == NULL) {
@@ -1034,6 +1035,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
return 0;
endp = reg + (l / sizeof(__be32));
+ hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
pr_debug("memory scan node %s, reg size %d,\n", uname, l);
@@ -1049,6 +1051,13 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
(unsigned long long)size);
early_init_dt_add_memory_arch(base, size);
+
+ if (!hotpluggable)
+ continue;
+
+ if (early_init_dt_mark_hotplug_memory_arch(base, size))
+ pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
+ base, base + size);
}
return 0;
@@ -1146,6 +1155,11 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
+int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
+{
+ return memblock_mark_hotplug(base, size);
+}
+
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
@@ -1168,6 +1182,11 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
WARN_ON(1);
}
+int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
+{
+ return -ENOSYS;
+}
+
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 393fea85eb4e..3fda9a32defb 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -697,3 +697,4 @@ void of_msi_configure(struct device *dev, struct device_node *np)
dev_set_msi_domain(dev,
of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
}
+EXPORT_SYMBOL_GPL(of_msi_configure);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index f63d4b0deff0..a53982a330ea 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -176,7 +176,12 @@ int of_node_to_nid(struct device_node *device)
np->name);
of_node_put(np);
- if (!r)
+ /*
+ * If numa=off passed on command line, or with a defective
+ * device tree, the nid may not be in the set of possible
+ * nodes. Check for this case and return NUMA_NO_NODE.
+ */
+ if (!r && nid < MAX_NUMNODES && node_possible(nid))
return nid;
return NUMA_NO_NODE;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index b58be12ab277..0ee42c3e66a1 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -120,6 +120,27 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
+ * This function will try to find the limitation of link speed by finding
+ * a property called "max-link-speed" of the given device node.
+ *
+ * @node: device tree node with the max link speed information
+ *
+ * Returns the associated max link speed from DT, or a negative value if the
+ * required property is not found or is invalid.
+ */
+int of_pci_get_max_link_speed(struct device_node *node)
+{
+ u32 max_link_speed;
+
+ if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
+ max_link_speed > 4)
+ return -EINVAL;
+
+ return max_link_speed;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
+
+/**
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
* is present and valid
*/
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 318dbb51e7a2..0d4cda7050e0 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -58,6 +58,41 @@ struct of_overlay {
static int of_overlay_apply_one(struct of_overlay *ov,
struct device_node *target, const struct device_node *overlay);
+static BLOCKING_NOTIFIER_HEAD(of_overlay_chain);
+
+int of_overlay_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&of_overlay_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
+
+int of_overlay_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&of_overlay_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister);
+
+static int of_overlay_notify(struct of_overlay *ov,
+ enum of_overlay_notify_action action)
+{
+ struct of_overlay_notify_data nd;
+ int i, ret;
+
+ for (i = 0; i < ov->count; i++) {
+ struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+
+ nd.target = ovinfo->target;
+ nd.overlay = ovinfo->overlay;
+
+ ret = blocking_notifier_call_chain(&of_overlay_chain,
+ action, &nd);
+ if (ret)
+ return notifier_to_errno(ret);
+ }
+
+ return 0;
+}
+
static int of_overlay_apply_single_property(struct of_overlay *ov,
struct device_node *target, struct property *prop)
{
@@ -368,6 +403,13 @@ int of_overlay_create(struct device_node *tree)
goto err_free_idr;
}
+ err = of_overlay_notify(ov, OF_OVERLAY_PRE_APPLY);
+ if (err < 0) {
+ pr_err("%s: Pre-apply notifier failed (err=%d)\n",
+ __func__, err);
+ goto err_free_idr;
+ }
+
/* apply the overlay */
err = of_overlay_apply(ov);
if (err)
@@ -382,6 +424,8 @@ int of_overlay_create(struct device_node *tree)
/* add to the tail of the overlay list */
list_add_tail(&ov->node, &ov_list);
+ of_overlay_notify(ov, OF_OVERLAY_POST_APPLY);
+
mutex_unlock(&of_mutex);
return id;
@@ -498,9 +542,10 @@ int of_overlay_destroy(int id)
goto out;
}
-
+ of_overlay_notify(ov, OF_OVERLAY_PRE_REMOVE);
list_del(&ov->node);
__of_changeset_revert(&ov->cset);
+ of_overlay_notify(ov, OF_OVERLAY_POST_REMOVE);
of_free_overlay_info(ov);
idr_remove(&ov_idr, id);
of_changeset_destroy(&ov->cset);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index e4bf07d20f9b..b8064bc2b6eb 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -45,6 +45,9 @@ static int of_dev_node_match(struct device *dev, void *data)
* of_find_device_by_node - Find the platform_device associated with a node
* @np: Pointer to device tree node
*
+ * Takes a reference to the embedded struct device which needs to be dropped
+ * after use.
+ *
* Returns platform_device pointer, or NULL if not found
*/
struct platform_device *of_find_device_by_node(struct device_node *np)
@@ -558,9 +561,6 @@ static int of_platform_device_destroy(struct device *dev, void *data)
* of the given device (and, recurrently, their children) that have been
* created from their respective device tree nodes (and only those,
* leaving others - eg. manually created - unharmed).
- *
- * Returns 0 when all children devices have been removed or
- * -EBUSY when some children remained.
*/
void of_platform_depopulate(struct device *parent)
{
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 46325d6394cf..8bf12e904fd2 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -28,20 +28,19 @@
* Find a node with the give full name by recursively following any of
* the child node links.
*/
-static struct device_node *__of_find_node_by_full_name(struct device_node *node,
+static struct device_node *find_node_by_full_name(struct device_node *node,
const char *full_name)
{
struct device_node *child, *found;
- if (node == NULL)
+ if (!node)
return NULL;
- /* check */
- if (of_node_cmp(node->full_name, full_name) == 0)
+ if (!of_node_cmp(node->full_name, full_name))
return of_node_get(node);
for_each_child_of_node(node, child) {
- found = __of_find_node_by_full_name(child, full_name);
+ found = find_node_by_full_name(child, full_name);
if (found != NULL) {
of_node_put(child);
return found;
@@ -51,16 +50,12 @@ static struct device_node *__of_find_node_by_full_name(struct device_node *node,
return NULL;
}
-/*
- * Find live tree's maximum phandle value.
- */
-static phandle of_get_tree_max_phandle(void)
+static phandle live_tree_max_phandle(void)
{
struct device_node *node;
phandle phandle;
unsigned long flags;
- /* now search recursively */
raw_spin_lock_irqsave(&devtree_lock, flags);
phandle = 0;
for_each_of_allnodes(node) {
@@ -73,131 +68,102 @@ static phandle of_get_tree_max_phandle(void)
return phandle;
}
-/*
- * Adjust a subtree's phandle values by a given delta.
- * Makes sure not to just adjust the device node's phandle value,
- * but modify the phandle properties values as well.
- */
-static void __of_adjust_tree_phandles(struct device_node *node,
+static void adjust_overlay_phandles(struct device_node *overlay,
int phandle_delta)
{
struct device_node *child;
struct property *prop;
phandle phandle;
- /* first adjust the node's phandle direct value */
- if (node->phandle != 0 && node->phandle != OF_PHANDLE_ILLEGAL)
- node->phandle += phandle_delta;
+ /* adjust node's phandle in node */
+ if (overlay->phandle != 0 && overlay->phandle != OF_PHANDLE_ILLEGAL)
+ overlay->phandle += phandle_delta;
- /* now adjust phandle & linux,phandle values */
- for_each_property_of_node(node, prop) {
+ /* copy adjusted phandle into *phandle properties */
+ for_each_property_of_node(overlay, prop) {
- /* only look for these two */
- if (of_prop_cmp(prop->name, "phandle") != 0 &&
- of_prop_cmp(prop->name, "linux,phandle") != 0)
+ if (of_prop_cmp(prop->name, "phandle") &&
+ of_prop_cmp(prop->name, "linux,phandle"))
continue;
- /* must be big enough */
if (prop->length < 4)
continue;
- /* read phandle value */
phandle = be32_to_cpup(prop->value);
- if (phandle == OF_PHANDLE_ILLEGAL) /* unresolved */
+ if (phandle == OF_PHANDLE_ILLEGAL)
continue;
- /* adjust */
- *(uint32_t *)prop->value = cpu_to_be32(node->phandle);
+ *(uint32_t *)prop->value = cpu_to_be32(overlay->phandle);
}
- /* now do the children recursively */
- for_each_child_of_node(node, child)
- __of_adjust_tree_phandles(child, phandle_delta);
+ for_each_child_of_node(overlay, child)
+ adjust_overlay_phandles(child, phandle_delta);
}
-static int __of_adjust_phandle_ref(struct device_node *node,
- struct property *rprop, int value)
+static int update_usages_of_a_phandle_reference(struct device_node *overlay,
+ struct property *prop_fixup, phandle phandle)
{
- phandle phandle;
struct device_node *refnode;
- struct property *sprop;
- char *propval, *propcur, *propend, *nodestr, *propstr, *s;
- int offset, propcurlen;
+ struct property *prop;
+ char *value, *cur, *end, *node_path, *prop_name, *s;
+ int offset, len;
int err = 0;
- /* make a copy */
- propval = kmalloc(rprop->length, GFP_KERNEL);
- if (!propval) {
- pr_err("%s: Could not copy value of '%s'\n",
- __func__, rprop->name);
+ value = kmalloc(prop_fixup->length, GFP_KERNEL);
+ if (!value)
return -ENOMEM;
- }
- memcpy(propval, rprop->value, rprop->length);
+ memcpy(value, prop_fixup->value, prop_fixup->length);
- propend = propval + rprop->length;
- for (propcur = propval; propcur < propend; propcur += propcurlen + 1) {
- propcurlen = strlen(propcur);
+ /* prop_fixup contains a list of tuples of path:property_name:offset */
+ end = value + prop_fixup->length;
+ for (cur = value; cur < end; cur += len + 1) {
+ len = strlen(cur);
- nodestr = propcur;
- s = strchr(propcur, ':');
+ node_path = cur;
+ s = strchr(cur, ':');
if (!s) {
- pr_err("%s: Illegal symbol entry '%s' (1)\n",
- __func__, propcur);
err = -EINVAL;
goto err_fail;
}
*s++ = '\0';
- propstr = s;
+ prop_name = s;
s = strchr(s, ':');
if (!s) {
- pr_err("%s: Illegal symbol entry '%s' (2)\n",
- __func__, (char *)rprop->value);
err = -EINVAL;
goto err_fail;
}
-
*s++ = '\0';
+
err = kstrtoint(s, 10, &offset);
- if (err != 0) {
- pr_err("%s: Could get offset '%s'\n",
- __func__, (char *)rprop->value);
+ if (err)
goto err_fail;
- }
- /* look into the resolve node for the full path */
- refnode = __of_find_node_by_full_name(node, nodestr);
- if (!refnode) {
- pr_warn("%s: Could not find refnode '%s'\n",
- __func__, (char *)rprop->value);
+ refnode = find_node_by_full_name(overlay, node_path);
+ if (!refnode)
continue;
- }
- /* now find the property */
- for_each_property_of_node(refnode, sprop) {
- if (of_prop_cmp(sprop->name, propstr) == 0)
+ for_each_property_of_node(refnode, prop) {
+ if (!of_prop_cmp(prop->name, prop_name))
break;
}
of_node_put(refnode);
- if (!sprop) {
- pr_err("%s: Could not find property '%s'\n",
- __func__, (char *)rprop->value);
+ if (!prop) {
err = -ENOENT;
goto err_fail;
}
- phandle = value;
- *(__be32 *)(sprop->value + offset) = cpu_to_be32(phandle);
+ *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
}
err_fail:
- kfree(propval);
+ kfree(value);
return err;
}
/* compare nodes taking into account that 'name' strips out the @ part */
-static int __of_node_name_cmp(const struct device_node *dn1,
+static int node_name_cmp(const struct device_node *dn1,
const struct device_node *dn2)
{
const char *n1 = strrchr(dn1->full_name, '/') ? : "/";
@@ -208,85 +174,77 @@ static int __of_node_name_cmp(const struct device_node *dn1,
/*
* Adjust the local phandle references by the given phandle delta.
- * Assumes the existances of a __local_fixups__ node at the root.
- * Assumes that __of_verify_tree_phandle_references has been called.
- * Does not take any devtree locks so make sure you call this on a tree
- * which is at the detached state.
+ *
+ * Subtree @local_fixups, which is overlay node __local_fixups__,
+ * mirrors the fragment node structure at the root of the overlay.
+ *
+ * For each property in the fragments that contains a phandle reference,
+ * @local_fixups has a property of the same name that contains a list
+ * of offsets of the phandle reference(s) within the respective property
+ * value(s). The values at these offsets will be fixed up.
*/
-static int __of_adjust_tree_phandle_references(struct device_node *node,
- struct device_node *target, int phandle_delta)
+static int adjust_local_phandle_references(struct device_node *local_fixups,
+ struct device_node *overlay, int phandle_delta)
{
- struct device_node *child, *childtarget;
- struct property *rprop, *sprop;
+ struct device_node *child, *overlay_child;
+ struct property *prop_fix, *prop;
int err, i, count;
unsigned int off;
phandle phandle;
- if (node == NULL)
+ if (!local_fixups)
return 0;
- for_each_property_of_node(node, rprop) {
+ for_each_property_of_node(local_fixups, prop_fix) {
/* skip properties added automatically */
- if (of_prop_cmp(rprop->name, "name") == 0 ||
- of_prop_cmp(rprop->name, "phandle") == 0 ||
- of_prop_cmp(rprop->name, "linux,phandle") == 0)
+ if (!of_prop_cmp(prop_fix->name, "name") ||
+ !of_prop_cmp(prop_fix->name, "phandle") ||
+ !of_prop_cmp(prop_fix->name, "linux,phandle"))
continue;
- if ((rprop->length % 4) != 0 || rprop->length == 0) {
- pr_err("%s: Illegal property (size) '%s' @%s\n",
- __func__, rprop->name, node->full_name);
+ if ((prop_fix->length % 4) != 0 || prop_fix->length == 0)
return -EINVAL;
- }
- count = rprop->length / sizeof(__be32);
+ count = prop_fix->length / sizeof(__be32);
- /* now find the target property */
- for_each_property_of_node(target, sprop) {
- if (of_prop_cmp(sprop->name, rprop->name) == 0)
+ for_each_property_of_node(overlay, prop) {
+ if (!of_prop_cmp(prop->name, prop_fix->name))
break;
}
- if (sprop == NULL) {
- pr_err("%s: Could not find target property '%s' @%s\n",
- __func__, rprop->name, node->full_name);
+ if (!prop)
return -EINVAL;
- }
for (i = 0; i < count; i++) {
- off = be32_to_cpu(((__be32 *)rprop->value)[i]);
- /* make sure the offset doesn't overstep (even wrap) */
- if (off >= sprop->length ||
- (off + 4) > sprop->length) {
- pr_err("%s: Illegal property '%s' @%s\n",
- __func__, rprop->name,
- node->full_name);
+ off = be32_to_cpu(((__be32 *)prop_fix->value)[i]);
+ if ((off + 4) > prop->length)
return -EINVAL;
- }
-
- if (phandle_delta) {
- /* adjust */
- phandle = be32_to_cpu(*(__be32 *)(sprop->value + off));
- phandle += phandle_delta;
- *(__be32 *)(sprop->value + off) = cpu_to_be32(phandle);
- }
+
+ phandle = be32_to_cpu(*(__be32 *)(prop->value + off));
+ phandle += phandle_delta;
+ *(__be32 *)(prop->value + off) = cpu_to_be32(phandle);
}
}
- for_each_child_of_node(node, child) {
-
- for_each_child_of_node(target, childtarget)
- if (__of_node_name_cmp(child, childtarget) == 0)
+ /*
+ * These nested loops recurse down two subtrees in parallel, where the
+ * node names in the two subtrees match.
+ *
+ * The roots of the subtrees are the overlay's __local_fixups__ node
+ * and the overlay's root node.
+ */
+ for_each_child_of_node(local_fixups, child) {
+
+ for_each_child_of_node(overlay, overlay_child)
+ if (!node_name_cmp(child, overlay_child))
break;
- if (!childtarget) {
- pr_err("%s: Could not find target child '%s' @%s\n",
- __func__, child->name, node->full_name);
+ if (!overlay_child)
return -EINVAL;
- }
- err = __of_adjust_tree_phandle_references(child, childtarget,
+ err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
- if (err != 0)
+ if (err)
return err;
}
@@ -294,111 +252,103 @@ static int __of_adjust_tree_phandle_references(struct device_node *node,
}
/**
- * of_resolve - Resolve the given node against the live tree.
+ * of_resolve_phandles - Relocate and resolve overlay against live tree
*
- * @resolve: Node to resolve
+ * @overlay: Pointer to devicetree overlay to relocate and resolve
*
- * Perform dynamic Device Tree resolution against the live tree
- * to the given node to resolve. This depends on the live tree
- * having a __symbols__ node, and the resolve node the __fixups__ &
- * __local_fixups__ nodes (if needed).
- * The result of the operation is a resolve node that it's contents
- * are fit to be inserted or operate upon the live tree.
- * Returns 0 on success or a negative error value on error.
+ * Modify (relocate) values of local phandles in @overlay to a range that
+ * does not conflict with the live expanded devicetree. Update references
+ * to the local phandles in @overlay. Update (resolve) phandle references
+ * in @overlay that refer to the live expanded devicetree.
+ *
+ * Phandle values in the live tree are in the range of
+ * 1 .. live_tree_max_phandle(). The range of phandle values in the overlay
+ * also begin with at 1. Adjust the phandle values in the overlay to begin
+ * at live_tree_max_phandle() + 1. Update references to the phandles to
+ * the adjusted phandle values.
+ *
+ * The name of each property in the "__fixups__" node in the overlay matches
+ * the name of a symbol (a label) in the live tree. The values of each
+ * property in the "__fixups__" node is a list of the property values in the
+ * overlay that need to be updated to contain the phandle reference
+ * corresponding to that symbol in the live tree. Update the references in
+ * the overlay with the phandle values in the live tree.
+ *
+ * @overlay must be detached.
+ *
+ * Resolving and applying @overlay to the live expanded devicetree must be
+ * protected by a mechanism to ensure that multiple overlays are processed
+ * in a single threaded manner so that multiple overlays will not relocate
+ * phandles to overlapping ranges. The mechanism to enforce this is not
+ * yet implemented.
+ *
+ * Return: %0 on success or a negative error value on error.
*/
-int of_resolve_phandles(struct device_node *resolve)
+int of_resolve_phandles(struct device_node *overlay)
{
- struct device_node *child, *childroot, *refnode;
- struct device_node *root_sym, *resolve_sym, *resolve_fix;
- struct property *rprop;
+ struct device_node *child, *local_fixups, *refnode;
+ struct device_node *tree_symbols, *overlay_fixups;
+ struct property *prop;
const char *refpath;
phandle phandle, phandle_delta;
int err;
- if (!resolve)
- pr_err("%s: null node\n", __func__);
- if (resolve && !of_node_check_flag(resolve, OF_DETACHED))
- pr_err("%s: node %s not detached\n", __func__,
- resolve->full_name);
- /* the resolve node must exist, and be detached */
- if (!resolve || !of_node_check_flag(resolve, OF_DETACHED))
- return -EINVAL;
-
- /* first we need to adjust the phandles */
- phandle_delta = of_get_tree_max_phandle() + 1;
- __of_adjust_tree_phandles(resolve, phandle_delta);
-
- /* locate the local fixups */
- childroot = NULL;
- for_each_child_of_node(resolve, childroot)
- if (of_node_cmp(childroot->name, "__local_fixups__") == 0)
- break;
-
- if (childroot != NULL) {
- /* resolve root is guaranteed to be the '/' */
- err = __of_adjust_tree_phandle_references(childroot,
- resolve, 0);
- if (err != 0)
- return err;
+ tree_symbols = NULL;
- BUG_ON(__of_adjust_tree_phandle_references(childroot,
- resolve, phandle_delta));
+ if (!overlay) {
+ pr_err("null overlay\n");
+ err = -EINVAL;
+ goto out;
+ }
+ if (!of_node_check_flag(overlay, OF_DETACHED)) {
+ pr_err("overlay not detached\n");
+ err = -EINVAL;
+ goto out;
}
- root_sym = NULL;
- resolve_sym = NULL;
- resolve_fix = NULL;
-
- /* this may fail (if no fixups are required) */
- root_sym = of_find_node_by_path("/__symbols__");
+ phandle_delta = live_tree_max_phandle() + 1;
+ adjust_overlay_phandles(overlay, phandle_delta);
- /* locate the symbols & fixups nodes on resolve */
- for_each_child_of_node(resolve, child) {
+ for_each_child_of_node(overlay, local_fixups)
+ if (!of_node_cmp(local_fixups->name, "__local_fixups__"))
+ break;
- if (!resolve_sym &&
- of_node_cmp(child->name, "__symbols__") == 0)
- resolve_sym = child;
+ err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
+ if (err)
+ goto out;
- if (!resolve_fix &&
- of_node_cmp(child->name, "__fixups__") == 0)
- resolve_fix = child;
+ overlay_fixups = NULL;
- /* both found, don't bother anymore */
- if (resolve_sym && resolve_fix)
- break;
+ for_each_child_of_node(overlay, child) {
+ if (!of_node_cmp(child->name, "__fixups__"))
+ overlay_fixups = child;
}
- /* we do allow for the case where no fixups are needed */
- if (!resolve_fix) {
- err = 0; /* no error */
+ if (!overlay_fixups) {
+ err = 0;
goto out;
}
- /* we need to fixup, but no root symbols... */
- if (!root_sym) {
- pr_err("%s: no symbols in root of device tree.\n", __func__);
+ tree_symbols = of_find_node_by_path("/__symbols__");
+ if (!tree_symbols) {
+ pr_err("no symbols in root of device tree.\n");
err = -EINVAL;
goto out;
}
- for_each_property_of_node(resolve_fix, rprop) {
+ for_each_property_of_node(overlay_fixups, prop) {
/* skip properties added automatically */
- if (of_prop_cmp(rprop->name, "name") == 0)
+ if (!of_prop_cmp(prop->name, "name"))
continue;
- err = of_property_read_string(root_sym,
- rprop->name, &refpath);
- if (err != 0) {
- pr_err("%s: Could not find symbol '%s'\n",
- __func__, rprop->name);
+ err = of_property_read_string(tree_symbols,
+ prop->name, &refpath);
+ if (err)
goto out;
- }
refnode = of_find_node_by_path(refpath);
if (!refnode) {
- pr_err("%s: Could not find node by path '%s'\n",
- __func__, refpath);
err = -ENOENT;
goto out;
}
@@ -406,17 +356,15 @@ int of_resolve_phandles(struct device_node *resolve)
phandle = refnode->phandle;
of_node_put(refnode);
- pr_debug("%s: %s phandle is 0x%08x\n",
- __func__, rprop->name, phandle);
-
- err = __of_adjust_phandle_ref(resolve, rprop, phandle);
+ err = update_usages_of_a_phandle_reference(overlay, prop, phandle);
if (err)
break;
}
out:
- /* NULL is handled by of_node_put as NOP */
- of_node_put(root_sym);
+ if (err)
+ pr_err("overlay phandle fixup failed: %d\n", err);
+ of_node_put(tree_symbols);
return err;
}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 82f7000a285d..642478d35e99 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -206,7 +206,7 @@ void sync_stop(void)
* because we cannot reach this code without at least one
* dcookie user still being registered (namely, the reader
* of the event buffer). */
-static inline unsigned long fast_get_dcookie(struct path *path)
+static inline unsigned long fast_get_dcookie(const struct path *path)
{
unsigned long cookie;
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
index 9559829fb234..e65a576e4032 100644
--- a/drivers/oprofile/nmi_timer_int.c
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -59,25 +59,16 @@ static void nmi_timer_stop_cpu(int cpu)
perf_event_disable(event);
}
-static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
- void *data)
+static int nmi_timer_cpu_online(unsigned int cpu)
{
- int cpu = (unsigned long)data;
- switch (action) {
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- nmi_timer_start_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- nmi_timer_stop_cpu(cpu);
- break;
- }
- return NOTIFY_DONE;
+ nmi_timer_start_cpu(cpu);
+ return 0;
+}
+static int nmi_timer_cpu_predown(unsigned int cpu)
+{
+ nmi_timer_stop_cpu(cpu);
+ return 0;
}
-
-static struct notifier_block nmi_timer_cpu_nb = {
- .notifier_call = nmi_timer_cpu_notifier
-};
static int nmi_timer_start(void)
{
@@ -103,13 +94,14 @@ static void nmi_timer_stop(void)
put_online_cpus();
}
+static enum cpuhp_state hp_online;
+
static void nmi_timer_shutdown(void)
{
struct perf_event *event;
int cpu;
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&nmi_timer_cpu_nb);
+ cpuhp_remove_state(hp_online);
for_each_possible_cpu(cpu) {
event = per_cpu(nmi_timer_events, cpu);
if (!event)
@@ -118,13 +110,11 @@ static void nmi_timer_shutdown(void)
per_cpu(nmi_timer_events, cpu) = NULL;
perf_event_release_kernel(event);
}
-
- cpu_notifier_register_done();
}
static int nmi_timer_setup(void)
{
- int cpu, err;
+ int err;
u64 period;
/* clock cycles per tick: */
@@ -132,24 +122,14 @@ static int nmi_timer_setup(void)
do_div(period, HZ);
nmi_timer_attr.sample_period = period;
- cpu_notifier_register_begin();
- err = __register_cpu_notifier(&nmi_timer_cpu_nb);
- if (err)
- goto out;
-
- /* can't attach events to offline cpus: */
- for_each_online_cpu(cpu) {
- err = nmi_timer_start_cpu(cpu);
- if (err) {
- cpu_notifier_register_done();
- nmi_timer_shutdown();
- return err;
- }
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online",
+ nmi_timer_cpu_online, nmi_timer_cpu_predown);
+ if (err < 0) {
+ nmi_timer_shutdown();
+ return err;
}
-
-out:
- cpu_notifier_register_done();
- return err;
+ hp_online = err;
+ return 0;
}
int __init op_nmi_timer_init(struct oprofile_operations *ops)
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d11cdbb8fba3..db239547fefd 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -142,10 +142,22 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
if (size == 4) {
writel(val, addr);
return PCIBIOS_SUCCESSFUL;
- } else {
- mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
}
+ /*
+ * In general, hardware that supports only 32-bit writes on PCI is
+ * not spec-compliant. For example, software may perform a 16-bit
+ * write. If the hardware only supports 32-bit accesses, we must
+ * do a 32-bit read, merge in the 16 bits we intend to write,
+ * followed by a 32-bit write. If the 16 bits we *don't* intend to
+ * write happen to have any RW1C (write-one-to-clear) bits set, we
+ * just inadvertently cleared something we shouldn't have.
+ */
+ dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+ size, pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
tmp = readl(addr) & mask;
tmp |= val << ((where & 0x3) * 8);
writel(tmp, addr);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index c288e5a52575..bc56cf19afd3 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -320,7 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
pci_proc_attach_device(dev);
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
dev->match_driver = true;
retval = device_attach(&dev->dev);
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 43ed08dd8b01..2fee61bb6559 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -162,3 +162,15 @@ struct pci_ecam_ops pci_generic_ecam_ops = {
.write = pci_generic_config_write,
}
};
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+/* ECAM ops for 32-bit access only (non-compliant) */
+struct pci_ecam_ops pci_32b_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ }
+};
+#endif
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d7e7c0a827c3..898d2c48239c 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -69,7 +69,7 @@ config PCI_IMX6
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA && !ARM64
+ depends on ARCH_TEGRA
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -133,8 +133,8 @@ config PCIE_XILINX
config PCI_XGENE
bool "X-Gene PCIe controller"
- depends on ARCH_XGENE
- depends on OF
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCIEPORTBUS
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
@@ -240,14 +240,16 @@ config PCIE_QCOM
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
- depends on OF && ARM64
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
- depends on OF && ARM64
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
@@ -276,7 +278,7 @@ config PCIE_ARTPEC6
config PCIE_ROCKCHIP
bool "Rockchip PCIe controller"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
@@ -286,7 +288,7 @@ config PCIE_ROCKCHIP
4 slots.
config VMD
- depends on PCI_MSI && X86_64
+ depends on PCI_MSI && X86_64 && SRCU
tristate "Intel Volume Management Device Driver"
default N
---help---
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 084cb4983645..bfe3179ae74c 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
-obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
@@ -25,11 +24,23 @@ obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
-obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
-obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
-obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_VMD) += vmd.o
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
+obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-xgene.o
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 763ff8745828..3efcc7bdc5fb 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -378,6 +378,8 @@ struct hv_pcibus_device {
struct msi_domain_info msi_info;
struct msi_controller msi_chip;
struct irq_domain *irq_domain;
+ struct retarget_msi_interrupt retarget_msi_interrupt_params;
+ spinlock_t retarget_msi_interrupt_lock;
};
/*
@@ -755,7 +757,7 @@ static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
return parent->chip->irq_set_affinity(parent, dest, force);
}
-void hv_irq_mask(struct irq_data *data)
+static void hv_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
}
@@ -770,38 +772,44 @@ void hv_irq_mask(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-void hv_irq_unmask(struct irq_data *data)
+static void hv_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data);
- struct retarget_msi_interrupt params;
+ struct retarget_msi_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
struct pci_bus *pbus;
struct pci_dev *pdev;
int cpu;
+ unsigned long flags;
dest = irq_data_get_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
- memset(&params, 0, sizeof(params));
- params.partition_id = HV_PARTITION_ID_SELF;
- params.source = 1; /* MSI(-X) */
- params.address = msi_desc->msg.address_lo;
- params.data = msi_desc->msg.data;
- params.device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+ params = &hbus->retarget_msi_interrupt_params;
+ memset(params, 0, sizeof(*params));
+ params->partition_id = HV_PARTITION_ID_SELF;
+ params->source = 1; /* MSI(-X) */
+ params->address = msi_desc->msg.address_lo;
+ params->data = msi_desc->msg.data;
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
(hbus->hdev->dev_instance.b[6] & 0xf8) |
PCI_FUNC(pdev->devfn);
- params.vector = cfg->vector;
+ params->vector = cfg->vector;
for_each_cpu_and(cpu, dest, cpu_online_mask)
- params.vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
- hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, &params, NULL);
+ hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL);
+
+ spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
pci_msi_unmask_irq(data);
}
@@ -1271,9 +1279,9 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
struct hv_pci_dev *hpdev;
struct pci_child_message *res_req;
struct q_res_req_compl comp_pkt;
- union {
- struct pci_packet init_packet;
- u8 buffer[0x100];
+ struct {
+ struct pci_packet init_packet;
+ u8 buffer[sizeof(struct pci_child_message)];
} pkt;
unsigned long flags;
int ret;
@@ -1582,6 +1590,10 @@ static void hv_eject_device_work(struct work_struct *work)
pci_dev_put(pdev);
}
+ spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ list_del(&hpdev->list_entry);
+ spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
@@ -1590,10 +1602,6 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
- list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
-
put_pcichild(hpdev, hv_pcidev_ref_childlist);
put_pcichild(hpdev, hv_pcidev_ref_pnp);
put_hvpcibus(hpdev->hbus);
@@ -2186,6 +2194,7 @@ static int hv_pci_probe(struct hv_device *hdev,
INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
+ spin_lock_init(&hbus->retarget_msi_interrupt_lock);
sema_init(&hbus->enum_sem, 1);
init_completion(&hbus->remove_event);
@@ -2266,24 +2275,32 @@ free_bus:
return ret;
}
-/**
- * hv_pci_remove() - Remove routine for this VMBus channel
- * @hdev: VMBus's tracking struct for this root PCI bus
- *
- * Return: 0 on success, -errno on failure
- */
-static int hv_pci_remove(struct hv_device *hdev)
+static void hv_pci_bus_exit(struct hv_device *hdev)
{
- int ret;
- struct hv_pcibus_device *hbus;
- union {
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct {
struct pci_packet teardown_packet;
- u8 buffer[0x100];
+ u8 buffer[sizeof(struct pci_message)];
} pkt;
struct pci_bus_relations relations;
struct hv_pci_compl comp_pkt;
+ int ret;
- hbus = hv_get_drvdata(hdev);
+ /*
+ * After the host sends the RESCIND_CHANNEL message, it doesn't
+ * access the per-channel ringbuffer any longer.
+ */
+ if (hdev->channel->rescind)
+ return;
+
+ /* Delete any children which might still exist. */
+ memset(&relations, 0, sizeof(relations));
+ hv_pci_devices_present(hbus, &relations);
+
+ ret = hv_send_resources_released(hdev);
+ if (ret)
+ dev_err(&hdev->device,
+ "Couldn't send resources released packet(s)\n");
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
@@ -2298,7 +2315,19 @@ static int hv_pci_remove(struct hv_device *hdev)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (!ret)
wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+}
+
+/**
+ * hv_pci_remove() - Remove routine for this VMBus channel
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_remove(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus;
+ hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
@@ -2307,17 +2336,10 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
- ret = hv_send_resources_released(hdev);
- if (ret)
- dev_err(&hdev->device,
- "Couldn't send resources released packet(s)\n");
+ hv_pci_bus_exit(hdev);
vmbus_close(hdev->channel);
- /* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
-
iounmap(hbus->cfg_addr);
hv_free_config_window(hbus);
pci_free_resource_list(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 653707996342..ea789138531b 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -35,12 +35,10 @@
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
-/* PEX LUT registers */
-#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
-
struct ls_pcie_drvdata {
u32 lut_offset;
u32 ltssm_shift;
+ u32 lut_dbg;
struct pcie_host_ops *ops;
};
@@ -134,7 +132,7 @@ static int ls_pcie_link_up(struct pcie_port *pp)
struct ls_pcie *pcie = to_ls_pcie(pp);
u32 state;
- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
+ state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
pcie->drvdata->ltssm_shift) &
LTSSM_STATE_MASK;
@@ -196,18 +194,28 @@ static struct ls_pcie_drvdata ls1021_drvdata = {
static struct ls_pcie_drvdata ls1043_drvdata = {
.lut_offset = 0x10000,
.ltssm_shift = 24,
+ .lut_dbg = 0x7fc,
+ .ops = &ls_pcie_host_ops,
+};
+
+static struct ls_pcie_drvdata ls1046_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 24,
+ .lut_dbg = 0x407fc,
.ops = &ls_pcie_host_ops,
};
static struct ls_pcie_drvdata ls2080_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
+ .lut_dbg = 0x7fc,
.ops = &ls_pcie_host_ops,
};
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
{ },
@@ -252,10 +260,8 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pcie->pp.dbi_base)) {
- dev_err(dev, "missing *regs* space\n");
+ if (IS_ERR(pcie->pp.dbi_base))
return PTR_ERR(pcie->pp.dbi_base);
- }
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 1eeefa4df64c..85348590848b 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -430,10 +430,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
}
static struct of_device_id rcar_pci_of_match[] = {
- { .compatible = "renesas,pci-rcar-gen2", },
{ .compatible = "renesas,pci-r8a7790", },
{ .compatible = "renesas,pci-r8a7791", },
{ .compatible = "renesas,pci-r8a7794", },
+ { .compatible = "renesas,pci-rcar-gen2", },
{ },
};
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 8dfccf733241..ed8a93f2bfb5 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -51,10 +51,6 @@
#include <soc/tegra/cpuidle.h>
#include <soc/tegra/pmc.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-#include <asm/mach/pci.h>
-
#define INT_PCI_MSI_NR (8 * 32)
/* register definitions */
@@ -188,6 +184,9 @@
#define RP_VEND_XP 0x00000f00
#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_CTL2 0x00000fa8
+#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
+
#define RP_PRIV_MISC 0x00000fe0
#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
@@ -252,6 +251,7 @@ struct tegra_pcie_soc {
bool has_intr_prsnt_sense;
bool has_cml_clk;
bool has_gen2;
+ bool force_pca_enable;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -322,11 +322,6 @@ struct tegra_pcie_bus {
unsigned int nr;
};
-static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
unsigned long offset)
{
@@ -385,8 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
- pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
+ pgprot_t prot = pgprot_device(PAGE_KERNEL);
phys_addr_t cs = pcie->cs->start;
struct tegra_pcie_bus *bus;
unsigned int i;
@@ -430,7 +424,8 @@ free:
static int tegra_pcie_add_bus(struct pci_bus *bus)
{
- struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct tegra_pcie_bus *b;
b = tegra_pcie_bus_alloc(pcie, bus->number);
@@ -444,7 +439,8 @@ static int tegra_pcie_add_bus(struct pci_bus *bus)
static void tegra_pcie_remove_bus(struct pci_bus *child)
{
- struct tegra_pcie *pcie = sys_to_pcie(child->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(child);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct tegra_pcie_bus *bus, *tmp;
list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
@@ -461,7 +457,8 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct device *dev = pcie->dev;
void __iomem *addr = NULL;
@@ -558,6 +555,12 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
afi_writel(port->pcie, value, ctrl);
tegra_pcie_port_reset(port);
+
+ if (soc->force_pca_enable) {
+ value = readl(port->base + RP_VEND_CTL2);
+ value |= RP_VEND_CTL2_PCA_ENABLE;
+ writel(value, port->base + RP_VEND_CTL2);
+ }
}
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
@@ -610,39 +613,31 @@ static void tegra_pcie_relax_enable(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
-static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
+static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
{
- struct tegra_pcie *pcie = sys_to_pcie(sys);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
struct device *dev = pcie->dev;
int err;
- sys->mem_offset = pcie->offset.mem;
- sys->io_offset = pcie->offset.io;
+ pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
+ pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
+ pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
+ pci_add_resource(windows, &pcie->busn);
- err = devm_request_resource(dev, &iomem_resource, &pcie->io);
+ err = devm_request_pci_bus_resources(dev, windows);
if (err < 0)
return err;
- err = pci_remap_iospace(&pcie->pio, pcie->io.start);
- if (!err)
- pci_add_resource_offset(&sys->resources, &pcie->pio,
- sys->io_offset);
+ pci_remap_iospace(&pcie->pio, pcie->io.start);
- pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
- pci_add_resource_offset(&sys->resources, &pcie->prefetch,
- sys->mem_offset);
- pci_add_resource(&sys->resources, &pcie->busn);
-
- err = devm_request_pci_bus_resources(dev, &sys->resources);
- if (err < 0)
- return err;
-
- return 1;
+ return 0;
}
static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
- struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
int irq;
tegra_cpuidle_pcie_irqs_in_use();
@@ -1499,10 +1494,11 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
- struct device *dev = pcie->dev;
- struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct platform_device *pdev = to_platform_device(pcie->dev);
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
+ struct device *dev = pcie->dev;
unsigned long base;
int err;
u32 reg;
@@ -1559,6 +1555,8 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
reg |= AFI_INTR_MASK_MSI_MASK;
afi_writel(pcie, reg, AFI_INTR_MASK);
+ host->msi = &msi->chip;
+
return 0;
err:
@@ -1609,7 +1607,8 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+ of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(dev, "4x1, 1x1 configuration\n");
@@ -1730,7 +1729,22 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
struct device_node *np = dev->of_node;
unsigned int i = 0;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ pcie->num_supplies = 6;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avdd-pll-uerefe";
+ pcie->supplies[i++].supply = "hvddio-pex";
+ pcie->supplies[i++].supply = "dvddio-pex";
+ pcie->supplies[i++].supply = "dvdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
@@ -2021,11 +2035,10 @@ retry:
return false;
}
-static int tegra_pcie_enable(struct tegra_pcie *pcie)
+static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
- struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
dev_info(dev, "probing port %u, using %u lanes\n",
@@ -2041,21 +2054,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
}
-
- memset(&hw, 0, sizeof(hw));
-
-#ifdef CONFIG_PCI_MSI
- hw.msi_ctrl = &pcie->msi.chip;
-#endif
-
- hw.nr_controllers = 1;
- hw.private_data = (void **)&pcie;
- hw.setup = tegra_pcie_setup;
- hw.map_irq = tegra_pcie_map_irq;
- hw.ops = &tegra_pcie_ops;
-
- pci_common_init_dev(dev, &hw);
- return 0;
}
static const struct tegra_pcie_soc tegra20_pcie = {
@@ -2069,6 +2067,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_intr_prsnt_sense = false,
.has_cml_clk = false,
.has_gen2 = false,
+ .force_pca_enable = false,
};
static const struct tegra_pcie_soc tegra30_pcie = {
@@ -2083,6 +2082,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_intr_prsnt_sense = true,
.has_cml_clk = true,
.has_gen2 = false,
+ .force_pca_enable = false,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2096,9 +2096,25 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_intr_prsnt_sense = true,
.has_cml_clk = true,
.has_gen2 = true,
+ .force_pca_enable = false,
+};
+
+static const struct tegra_pcie_soc tegra210_pcie = {
+ .num_ports = 2,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x90b890b8,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
+ .force_pca_enable = true,
};
static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
@@ -2217,13 +2233,17 @@ remove:
static int tegra_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pci_host_bridge *host;
struct tegra_pcie *pcie;
+ struct pci_bus *child;
int err;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ host = pci_alloc_host_bridge(sizeof(*pcie));
+ if (!host)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(host);
+
pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
@@ -2243,6 +2263,10 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (err)
goto put_resources;
+ err = tegra_pcie_request_resources(pcie);
+ if (err)
+ goto put_resources;
+
/* setup the AFI address translations */
tegra_pcie_setup_translations(pcie);
@@ -2254,12 +2278,30 @@ static int tegra_pcie_probe(struct platform_device *pdev)
}
}
- err = tegra_pcie_enable(pcie);
+ tegra_pcie_enable_ports(pcie);
+
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+ host->busnr = pcie->busn.start;
+ host->dev.parent = &pdev->dev;
+ host->ops = &tegra_pcie_ops;
+
+ err = pci_register_host_bridge(host);
if (err < 0) {
- dev_err(dev, "failed to enable PCIe ports: %d\n", err);
+ dev_err(dev, "failed to register host: %d\n", err);
goto disable_msi;
}
+ pci_scan_child_bus(host->bus);
+
+ pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq);
+ pci_bus_size_bridges(host->bus);
+ pci_bus_assign_resources(host->bus);
+
+ list_for_each_entry(child, &host->bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(host->bus);
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index d50a3dc2d8db..3f54a43bbbea 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
static void set_val(u32 v, int where, int size, u32 *val)
{
int shift = (where & 3) * 8;
@@ -346,7 +348,7 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static struct pci_ecam_ops pci_thunder_ecam_ops = {
+struct pci_ecam_ops pci_thunder_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -355,6 +357,8 @@ static struct pci_ecam_ops pci_thunder_ecam_ops = {
}
};
+#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
+
static const struct of_device_id thunder_ecam_of_match[] = {
{ .compatible = "cavium,pci-host-thunder-ecam" },
{ },
@@ -373,3 +377,6 @@ static struct platform_driver thunder_ecam_driver = {
.probe = thunder_ecam_probe,
};
builtin_platform_driver(thunder_ecam_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6abaf80ffb39..af722eb0ca75 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -18,8 +18,12 @@
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
#define PEM_CFG_WR 0x28
#define PEM_CFG_RD 0x30
@@ -284,35 +288,16 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static int thunder_pem_init(struct pci_config_window *cfg)
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
{
- struct device *dev = cfg->parent;
- resource_size_t bar4_start;
- struct resource *res_pem;
struct thunder_pem_pci *pem_pci;
- struct platform_device *pdev;
-
- /* Only OF support for now */
- if (!dev->of_node)
- return -EINVAL;
+ resource_size_t bar4_start;
pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
if (!pem_pci)
return -ENOMEM;
- pdev = to_platform_device(dev);
-
- /*
- * The second register range is the PEM bridge to the PCIe
- * bus. It has a different config access method than those
- * devices behind the bridge.
- */
- res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_pem) {
- dev_err(dev, "missing \"reg[1]\"property\n");
- return -EINVAL;
- }
-
pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
if (!pem_pci->pem_reg_base)
return -ENOMEM;
@@ -332,9 +317,69 @@ static int thunder_pem_init(struct pci_config_window *cfg)
return 0;
}
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res_pem;
+ int ret;
+
+ res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL);
+ if (!res_pem)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return ret;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
+struct pci_ecam_ops thunder_pem_ecam_ops = {
+ .bus_shift = 24,
+ .init = thunder_pem_acpi_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = thunder_pem_config_read,
+ .write = thunder_pem_config_write,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HOST_THUNDER_PEM
+
+static int thunder_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
static struct pci_ecam_ops pci_thunder_pem_ops = {
.bus_shift = 24,
- .init = thunder_pem_init,
+ .init = thunder_pem_platform_init,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = thunder_pem_config_read,
@@ -360,3 +405,6 @@ static struct platform_driver thunder_pem_driver = {
.probe = thunder_pem_probe,
};
builtin_platform_driver(thunder_pem_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index a6456b578269..1f38d0836751 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -360,16 +360,16 @@ static void xgene_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static enum cpuhp_state pci_xgene_online;
+
static int xgene_msi_remove(struct platform_device *pdev)
{
- int virq, i;
struct xgene_msi *msi = platform_get_drvdata(pdev);
- for (i = 0; i < NR_HW_IRQS; i++) {
- virq = msi->msi_groups[i].gic_irq;
- if (virq != 0)
- irq_set_chained_handler_and_data(virq, NULL, NULL);
- }
+ if (pci_xgene_online)
+ cpuhp_remove_state(pci_xgene_online);
+ cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
+
kfree(msi->msi_groups);
kfree(msi->bitmap);
@@ -427,7 +427,7 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
return 0;
}
-static void xgene_msi_hwirq_free(unsigned int cpu)
+static int xgene_msi_hwirq_free(unsigned int cpu)
{
struct xgene_msi *msi = &xgene_msi_ctrl;
struct xgene_msi_group *msi_group;
@@ -441,33 +441,9 @@ static void xgene_msi_hwirq_free(unsigned int cpu)
irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
NULL);
}
+ return 0;
}
-static int xgene_msi_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- xgene_msi_hwirq_alloc(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- xgene_msi_hwirq_free(cpu);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block xgene_msi_cpu_notifier = {
- .notifier_call = xgene_msi_cpu_callback,
-};
-
static const struct of_device_id xgene_msi_match_table[] = {
{.compatible = "apm,xgene1-msi"},
{},
@@ -478,7 +454,6 @@ static int xgene_msi_probe(struct platform_device *pdev)
struct resource *res;
int rc, irq_index;
struct xgene_msi *xgene_msi;
- unsigned int cpu;
int virt_msir;
u32 msi_val, msi_idx;
@@ -540,28 +515,22 @@ static int xgene_msi_probe(struct platform_device *pdev)
}
}
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu)
- if (xgene_msi_hwirq_alloc(cpu)) {
- dev_err(&pdev->dev, "failed to register MSI handlers\n");
- cpu_notifier_register_done();
- goto error;
- }
-
- rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier);
- if (rc) {
- dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
- cpu_notifier_register_done();
- goto error;
- }
-
- cpu_notifier_register_done();
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
+ xgene_msi_hwirq_alloc, NULL);
+ if (rc)
+ goto err_cpuhp;
+ pci_xgene_online = rc;
+ rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
+ xgene_msi_hwirq_free);
+ if (rc)
+ goto err_cpuhp;
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
+err_cpuhp:
+ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
error:
xgene_msi_remove(pdev);
return rc;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 1de23d74783f..7c3b54b9eb17 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -27,6 +27,8 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -64,7 +66,9 @@
/* PCIe IP version */
#define XGENE_PCIE_IP_VER_UNKN 0
#define XGENE_PCIE_IP_VER_1 1
+#define XGENE_PCIE_IP_VER_2 2
+#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
struct xgene_pcie_port {
struct device_node *node;
struct device *dev;
@@ -91,13 +95,24 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}
+static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
+
+ if (acpi_disabled)
+ return (struct xgene_pcie_port *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct xgene_pcie_port *)(cfg->priv);
+}
+
/*
* When the address bit [17:16] is 2'b01, the Configuration access will be
* treated as Type 1 and it will be forwarded to external PCIe device.
*/
static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
if (bus->number >= (bus->primary + 1))
return port->cfg_base + AXI_EP_CFG_ACCESS;
@@ -111,7 +126,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
*/
static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
unsigned int b, d, f;
u32 rtdid_val = 0;
@@ -158,7 +173,7 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
PCIBIOS_SUCCESSFUL)
@@ -182,13 +197,103 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
+#endif
-static struct pci_ops xgene_pcie_ops = {
- .map_bus = xgene_pcie_map_bus,
- .read = xgene_pcie_config_read32,
- .write = pci_generic_config_write32,
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+static int xgene_get_csr_resource(struct acpi_device *adev,
+ struct resource *res)
+{
+ struct device *dev = &adev->dev;
+ struct resource_entry *entry;
+ struct list_head list;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ flags = IORESOURCE_MEM;
+ ret = acpi_dev_get_resources(adev, &list,
+ acpi_dev_filter_resource_type_cb,
+ (void *) flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse _CRS method, error code %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(dev, "no IO and memory resources present in _CRS\n");
+ return -EINVAL;
+ }
+
+ entry = list_first_entry(&list, struct resource_entry, node);
+ *res = *entry->res;
+ acpi_dev_free_resource_list(&list);
+ return 0;
+}
+
+static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct xgene_pcie_port *port;
+ struct resource csr;
+ int ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = xgene_get_csr_resource(adev, &csr);
+ if (ret) {
+ dev_err(dev, "can't get CSR resource\n");
+ kfree(port);
+ return ret;
+ }
+ port->csr_base = devm_ioremap_resource(dev, &csr);
+ if (IS_ERR(port->csr_base)) {
+ kfree(port);
+ return -ENOMEM;
+ }
+
+ port->cfg_base = cfg->win;
+ port->version = ipversion;
+
+ cfg->priv = port;
+ return 0;
+}
+
+static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
+}
+
+struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+ .bus_shift = 16,
+ .init = xgene_v1_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
+}
+
+struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+ .bus_shift = 16,
+ .init = xgene_v2_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
};
+#endif
+#if defined(CONFIG_PCI_XGENE)
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
@@ -521,6 +626,12 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
return 0;
}
+static struct pci_ops xgene_pcie_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write32,
+};
+
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -591,3 +702,4 @@ static struct platform_driver xgene_pcie_driver = {
.probe = xgene_pcie_probe_bridge,
};
builtin_platform_driver(xgene_pcie_driver);
+#endif
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index b0ac4dfafa0b..0c1540225ca3 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -550,10 +550,8 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
pcie->cra_base = devm_ioremap_resource(dev, cra);
- if (IS_ERR(pcie->cra_base)) {
- dev_err(dev, "failed to map cra memory\n");
+ if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- }
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
@@ -641,8 +639,4 @@ static struct platform_driver altera_pcie_driver = {
},
};
-static int altera_pcie_init(void)
-{
- return platform_driver_register(&altera_pcie_driver);
-}
-device_initcall(altera_pcie_init);
+builtin_platform_driver(altera_pcie_driver);
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 56154c25980c..a301a7187b30 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -18,7 +18,106 @@
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/regmap.h>
+#include "../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_acpi_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_read32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_acpi_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_write32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ void __iomem *reg_base = cfg->priv;
+
+ if (bus->number == cfg->busr.start)
+ return reg_base + where;
+ else
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res;
+ void __iomem *reg_base;
+ int ret;
+
+ /*
+ * Retrieve RC base and size from a HISI0081 device with _UID
+ * matching our segment.
+ */
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return -ENOMEM;
+ }
+
+ reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!reg_base)
+ return -ENOMEM;
+
+ cfg->priv = reg_base;
+ return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_ops = {
+ .bus_shift = 20,
+ .init = hisi_pcie_init,
+ .pci_ops = {
+ .map_bus = hisi_pcie_map_bus,
+ .read = hisi_pcie_acpi_rd_conf,
+ .write = hisi_pcie_acpi_wr_conf,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
#include "pcie-designware.h"
@@ -185,17 +284,13 @@ static int hisi_pcie_probe(struct platform_device *pdev)
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
pp->dbi_base = devm_ioremap_resource(dev, reg);
- if (IS_ERR(pp->dbi_base)) {
- dev_err(dev, "cannot get rc_dbi base\n");
+ if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
- }
ret = hisi_add_pcie_port(hisi_pcie, pdev);
if (ret)
return ret;
- dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
return 0;
}
@@ -227,3 +322,5 @@ static struct platform_driver hisi_pcie_driver = {
},
};
builtin_platform_driver(hisi_pcie_driver);
+
+#endif
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 8ce089043a27..bd4c9ec25edc 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -54,6 +54,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
pcie->dev = dev;
+ pcie->type = IPROC_PCIE_PAXB_BCMA;
pcie->base = bdev->io_addr;
if (!pcie->base) {
dev_err(dev, "no controller registers\n");
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 9a2973bdc78a..9fad7915f82a 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -563,6 +563,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
}
switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
case IPROC_PCIE_PAXB:
msi->reg_offsets = iproc_msi_reg_paxb;
msi->nr_eq_region = 1;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index a3de087976b3..22d814a78a78 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -31,8 +31,14 @@ static const struct of_device_id iproc_pcie_of_match_table[] = {
.compatible = "brcm,iproc-pcie",
.data = (int *)IPROC_PCIE_PAXB,
}, {
+ .compatible = "brcm,iproc-pcie-paxb-v2",
+ .data = (int *)IPROC_PCIE_PAXB_V2,
+ }, {
.compatible = "brcm,iproc-pcie-paxc",
.data = (int *)IPROC_PCIE_PAXC,
+ }, {
+ .compatible = "brcm,iproc-pcie-paxc-v2",
+ .data = (int *)IPROC_PCIE_PAXC_V2,
},
{ /* sentinel */ }
};
@@ -84,19 +90,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
pcie->ob.axi_offset = val;
-
- ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
- &val);
- if (ret) {
- dev_err(dev,
- "missing brcm,pcie-ob-window-size property\n");
- return ret;
- }
- pcie->ob.window_size = (resource_size_t)val * SZ_1M;
-
- if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size"))
- pcie->ob.set_oarr_size = true;
-
pcie->need_ob_cfg = true;
}
@@ -115,7 +108,14 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
- pcie->map_irq = of_irq_parse_and_map_pci;
+ /* PAXC doesn't support legacy IRQs, skip mapping */
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXC:
+ case IPROC_PCIE_PAXC_V2:
+ break;
+ default:
+ pcie->map_irq = of_irq_parse_and_map_pci;
+ }
ret = iproc_pcie_setup(pcie, &res);
if (ret)
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 0b999a9fb843..3ebc025499b9 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -38,6 +39,12 @@
#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
#define PAXC_RESET_MASK 0x7f
+#define GIC_V3_CFG_SHIFT 0
+#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
+
+#define MSI_ENABLE_CFG_SHIFT 0
+#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
+
#define CFG_IND_ADDR_MASK 0x00001ffc
#define CFG_ADDR_BUS_NUM_SHIFT 20
@@ -58,59 +65,319 @@
#define PCIE_DL_ACTIVE_SHIFT 2
#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
+#define APB_ERR_EN_SHIFT 0
+#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+
+/* derive the enum index of the outbound/inbound mapping registers */
+#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
+
+/*
+ * Maximum number of outbound mapping window sizes that can be supported by any
+ * OARR/OMAP mapping pair
+ */
+#define MAX_NUM_OB_WINDOW_SIZES 4
+
#define OARR_VALID_SHIFT 0
#define OARR_VALID BIT(OARR_VALID_SHIFT)
#define OARR_SIZE_CFG_SHIFT 1
-#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
-#define PCI_EXP_CAP 0xac
+/*
+ * Maximum number of inbound mapping region sizes that can be supported by an
+ * IARR
+ */
+#define MAX_NUM_IB_REGION_SIZES 9
+
+#define IMAP_VALID_SHIFT 0
+#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
-#define MAX_NUM_OB_WINDOWS 2
+#define PCI_EXP_CAP 0xac
#define IPROC_PCIE_REG_INVALID 0xffff
+/**
+ * iProc PCIe outbound mapping controller specific parameters
+ *
+ * @window_sizes: list of supported outbound mapping window sizes in MB
+ * @nr_sizes: number of supported outbound mapping window sizes
+ */
+struct iproc_pcie_ob_map {
+ resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
+ unsigned int nr_sizes;
+};
+
+static const struct iproc_pcie_ob_map paxb_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+};
+
+static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR2/OMAP2 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+ {
+ /* OARR3/OMAP3 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+};
+
+/**
+ * iProc PCIe inbound mapping type
+ */
+enum iproc_pcie_ib_map_type {
+ /* for DDR memory */
+ IPROC_PCIE_IB_MAP_MEM = 0,
+
+ /* for device I/O memory */
+ IPROC_PCIE_IB_MAP_IO,
+
+ /* invalid or unused */
+ IPROC_PCIE_IB_MAP_INVALID
+};
+
+/**
+ * iProc PCIe inbound mapping controller specific parameters
+ *
+ * @type: inbound mapping region type
+ * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
+ * SZ_1G
+ * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
+ * GB, depedning on the size unit
+ * @nr_sizes: number of supported inbound mapping region sizes
+ * @nr_windows: number of supported inbound mapping windows for the region
+ * @imap_addr_offset: register offset between the upper and lower 32-bit
+ * IMAP address registers
+ * @imap_window_offset: register offset between each IMAP window
+ */
+struct iproc_pcie_ib_map {
+ enum iproc_pcie_ib_map_type type;
+ unsigned int size_unit;
+ resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
+ unsigned int nr_sizes;
+ unsigned int nr_windows;
+ u16 imap_addr_offset;
+ u16 imap_window_offset;
+};
+
+static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
+ {
+ /* IARR0/IMAP0 */
+ .type = IPROC_PCIE_IB_MAP_IO,
+ .size_unit = SZ_1K,
+ .region_sizes = { 32 },
+ .nr_sizes = 1,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x40,
+ .imap_window_offset = 0x4,
+ },
+ {
+ /* IARR1/IMAP1 (currently unused) */
+ .type = IPROC_PCIE_IB_MAP_INVALID,
+ },
+ {
+ /* IARR2/IMAP2 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1M,
+ .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
+ 16384 },
+ .nr_sizes = 9,
+ .nr_windows = 1,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR3/IMAP3 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 1, 2, 4, 8, 16, 32 },
+ .nr_sizes = 6,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR4/IMAP4 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 32, 64, 128, 256, 512 },
+ .nr_sizes = 5,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+};
+
+/*
+ * iProc PCIe host registers
+ */
enum iproc_pcie_reg {
+ /* clock/reset signal control */
IPROC_PCIE_CLK_CTRL = 0,
+
+ /*
+ * To allow MSI to be steered to an external MSI controller (e.g., ARM
+ * GICv3 ITS)
+ */
+ IPROC_PCIE_MSI_GIC_MODE,
+
+ /*
+ * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
+ * window where the MSI posted writes are written, for the writes to be
+ * interpreted as MSI writes.
+ */
+ IPROC_PCIE_MSI_BASE_ADDR,
+ IPROC_PCIE_MSI_WINDOW_SIZE,
+
+ /*
+ * To hold the address of the register where the MSI writes are
+ * programed. When ARM GICv3 ITS is used, this should be programmed
+ * with the address of the GITS_TRANSLATER register.
+ */
+ IPROC_PCIE_MSI_ADDR_LO,
+ IPROC_PCIE_MSI_ADDR_HI,
+
+ /* enable MSI */
+ IPROC_PCIE_MSI_EN_CFG,
+
+ /* allow access to root complex configuration space */
IPROC_PCIE_CFG_IND_ADDR,
IPROC_PCIE_CFG_IND_DATA,
+
+ /* allow access to device configuration space */
IPROC_PCIE_CFG_ADDR,
IPROC_PCIE_CFG_DATA,
+
+ /* enable INTx */
IPROC_PCIE_INTX_EN,
- IPROC_PCIE_OARR_LO,
- IPROC_PCIE_OARR_HI,
- IPROC_PCIE_OMAP_LO,
- IPROC_PCIE_OMAP_HI,
+
+ /* outbound address mapping */
+ IPROC_PCIE_OARR0,
+ IPROC_PCIE_OMAP0,
+ IPROC_PCIE_OARR1,
+ IPROC_PCIE_OMAP1,
+ IPROC_PCIE_OARR2,
+ IPROC_PCIE_OMAP2,
+ IPROC_PCIE_OARR3,
+ IPROC_PCIE_OMAP3,
+
+ /* inbound address mapping */
+ IPROC_PCIE_IARR0,
+ IPROC_PCIE_IMAP0,
+ IPROC_PCIE_IARR1,
+ IPROC_PCIE_IMAP1,
+ IPROC_PCIE_IARR2,
+ IPROC_PCIE_IMAP2,
+ IPROC_PCIE_IARR3,
+ IPROC_PCIE_IMAP3,
+ IPROC_PCIE_IARR4,
+ IPROC_PCIE_IMAP4,
+
+ /* link status */
IPROC_PCIE_LINK_STATUS,
+
+ /* enable APB error for unsupported requests */
+ IPROC_PCIE_APB_ERR_EN,
+
+ /* total number of core registers */
+ IPROC_PCIE_MAX_NUM_REG,
+};
+
+/* iProc PCIe PAXB BCMA registers */
+static const u16 iproc_pcie_reg_paxb_bcma[] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
};
/* iProc PCIe PAXB registers */
static const u16 iproc_pcie_reg_paxb[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
- [IPROC_PCIE_CFG_IND_DATA] = 0x124,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = 0x330,
- [IPROC_PCIE_OARR_LO] = 0xd20,
- [IPROC_PCIE_OARR_HI] = 0xd24,
- [IPROC_PCIE_OMAP_LO] = 0xd40,
- [IPROC_PCIE_OMAP_HI] = 0xd44,
- [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
+};
+
+/* iProc PCIe PAXB v2 registers */
+static const u16 iproc_pcie_reg_paxb_v2[] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_OARR2] = 0xd60,
+ [IPROC_PCIE_OMAP2] = 0xd68,
+ [IPROC_PCIE_OARR3] = 0xdf0,
+ [IPROC_PCIE_OMAP3] = 0xdf8,
+ [IPROC_PCIE_IARR0] = 0xd00,
+ [IPROC_PCIE_IMAP0] = 0xc00,
+ [IPROC_PCIE_IARR2] = 0xd10,
+ [IPROC_PCIE_IMAP2] = 0xcc0,
+ [IPROC_PCIE_IARR3] = 0xe00,
+ [IPROC_PCIE_IMAP3] = 0xe08,
+ [IPROC_PCIE_IARR4] = 0xe68,
+ [IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
/* iProc PCIe PAXC v1 registers */
static const u16 iproc_pcie_reg_paxc[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
- [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OARR_LO] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OARR_HI] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OMAP_LO] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OMAP_HI] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_LINK_STATUS] = IPROC_PCIE_REG_INVALID,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+};
+
+/* iProc PCIe PAXC v2 registers */
+static const u16 iproc_pcie_reg_paxc_v2[] = {
+ [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
+ [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
+ [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
+ [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
+ [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
+ [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
};
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
@@ -159,16 +426,26 @@ static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
writel(val, pcie->base + offset);
}
-static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
- enum iproc_pcie_reg reg,
- unsigned window, u32 val)
+/**
+ * APB error forwarding can be disabled during access of configuration
+ * registers of the endpoint device, to prevent unsupported requests
+ * (typically seen during enumeration with multi-function devices) from
+ * triggering a system exception.
+ */
+static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
+ bool disable)
{
- u16 offset = iproc_pcie_reg_offset(pcie, reg);
-
- if (iproc_pcie_reg_is_invalid(offset))
- return;
+ struct iproc_pcie *pcie = iproc_data(bus);
+ u32 val;
- writel(val, pcie->base + offset + (window * 8));
+ if (bus->number && pcie->has_apb_err_disable) {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
+ if (disable)
+ val &= ~APB_ERR_EN;
+ else
+ val |= APB_ERR_EN;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
+ }
}
/**
@@ -204,7 +481,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
* PAXC is connected to an internally emulated EP within the SoC. It
* allows only one device.
*/
- if (pcie->type == IPROC_PCIE_PAXC)
+ if (pcie->ep_is_internal)
if (slot > 0)
return NULL;
@@ -222,26 +499,47 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
return (pcie->base + offset);
}
+static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ int ret;
+
+ iproc_pcie_apb_err_disable(bus, true);
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
+static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ int ret;
+
+ iproc_pcie_apb_err_disable(bus, true);
+ ret = pci_generic_config_write32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
static struct pci_ops iproc_pcie_ops = {
.map_bus = iproc_pcie_map_cfg_bus,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
+ .read = iproc_pcie_config_read32,
+ .write = iproc_pcie_config_write32,
};
static void iproc_pcie_reset(struct iproc_pcie *pcie)
{
u32 val;
- if (pcie->type == IPROC_PCIE_PAXC) {
- val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
- val &= ~PAXC_RESET_MASK;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- udelay(100);
- val |= PAXC_RESET_MASK;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- udelay(100);
+ /*
+ * PAXC and the internal emulated endpoint device downstream should not
+ * be reset. If firmware has been loaded on the endpoint device at an
+ * earlier boot stage, reset here causes issues.
+ */
+ if (pcie->ep_is_internal)
return;
- }
/*
* Select perst_b signal as reset source. Put the device into reset,
@@ -270,7 +568,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
* PAXC connects to emulated endpoint devices directly and does not
* have a Serdes. Therefore skip the link detection logic here.
*/
- if (pcie->type == IPROC_PCIE_PAXC)
+ if (pcie->ep_is_internal)
return 0;
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
@@ -334,6 +632,58 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
}
+static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
+ int window_idx)
+{
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
+
+ return !!(val & OARR_VALID);
+}
+
+static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
+ int size_idx, u64 axi_addr, u64 pci_addr)
+{
+ struct device *dev = pcie->dev;
+ u16 oarr_offset, omap_offset;
+
+ /*
+ * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
+ * on window index.
+ */
+ oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
+ window_idx));
+ omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
+ window_idx));
+ if (iproc_pcie_reg_is_invalid(oarr_offset) ||
+ iproc_pcie_reg_is_invalid(omap_offset))
+ return -EINVAL;
+
+ /*
+ * Program the OARR registers. The upper 32-bit OARR register is
+ * always right after the lower 32-bit OARR register.
+ */
+ writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
+ OARR_VALID, pcie->base + oarr_offset);
+ writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
+
+ /* now program the OMAP registers */
+ writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
+
+ dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+ window_idx, oarr_offset, &axi_addr, &pci_addr);
+ dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+ readl(pcie->base + oarr_offset),
+ readl(pcie->base + oarr_offset + 4));
+ dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
+ readl(pcie->base + omap_offset),
+ readl(pcie->base + omap_offset + 4));
+
+ return 0;
+}
+
/**
* Some iProc SoCs require the SW to configure the outbound address mapping
*
@@ -350,24 +700,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
{
struct iproc_pcie_ob *ob = &pcie->ob;
struct device *dev = pcie->dev;
- unsigned i;
- u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
- u64 remainder;
-
- if (size > max_size) {
- dev_err(dev,
- "res size %pap exceeds max supported size 0x%llx\n",
- &size, max_size);
- return -EINVAL;
- }
-
- div64_u64_rem(size, ob->window_size, &remainder);
- if (remainder) {
- dev_err(dev,
- "res size %pap needs to be multiple of window size %pap\n",
- &size, &ob->window_size);
- return -EINVAL;
- }
+ int ret = -EINVAL, window_idx, size_idx;
if (axi_addr < ob->axi_offset) {
dev_err(dev, "axi address %pap less than offset %pap\n",
@@ -381,26 +714,70 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
*/
axi_addr -= ob->axi_offset;
- for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i,
- lower_32_bits(axi_addr) | OARR_VALID |
- (ob->set_oarr_size ? 1 : 0));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i,
- upper_32_bits(axi_addr));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i,
- lower_32_bits(pci_addr));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i,
- upper_32_bits(pci_addr));
-
- size -= ob->window_size;
- if (size == 0)
+ /* iterate through all OARR/OMAP mapping windows */
+ for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
+ const struct iproc_pcie_ob_map *ob_map =
+ &pcie->ob_map[window_idx];
+
+ /*
+ * If current outbound window is already in use, move on to the
+ * next one.
+ */
+ if (iproc_pcie_ob_is_valid(pcie, window_idx))
+ continue;
+
+ /*
+ * Iterate through all supported window sizes within the
+ * OARR/OMAP pair to find a match. Go through the window sizes
+ * in a descending order.
+ */
+ for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
+ size_idx--) {
+ resource_size_t window_size =
+ ob_map->window_sizes[size_idx] * SZ_1M;
+
+ if (size < window_size)
+ continue;
+
+ if (!IS_ALIGNED(axi_addr, window_size) ||
+ !IS_ALIGNED(pci_addr, window_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /*
+ * Match found! Program both OARR and OMAP and mark
+ * them as a valid entry.
+ */
+ ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
+ axi_addr, pci_addr);
+ if (ret)
+ goto err_ob;
+
+ size -= window_size;
+ if (size == 0)
+ return 0;
+
+ /*
+ * If we are here, we are done with the current window,
+ * but not yet finished all mappings. Need to move on
+ * to the next window.
+ */
+ axi_addr += window_size;
+ pci_addr += window_size;
break;
-
- axi_addr += ob->window_size;
- pci_addr += ob->window_size;
+ }
}
- return 0;
+err_ob:
+ dev_err(dev, "unable to configure outbound mapping\n");
+ dev_err(dev,
+ "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
+ &axi_addr, &ob->axi_offset, &pci_addr, &size);
+
+ return ret;
}
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
@@ -434,13 +811,323 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return 0;
}
+static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
+ int region_idx)
+{
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
+
+ return !!(val & (BIT(ib_map->nr_sizes) - 1));
+}
+
+static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
+ enum iproc_pcie_ib_map_type type)
+{
+ return !!(ib_map->type == type);
+}
+
+static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
+ int size_idx, int nr_windows, u64 axi_addr,
+ u64 pci_addr, resource_size_t size)
+{
+ struct device *dev = pcie->dev;
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u16 iarr_offset, imap_offset;
+ u32 val;
+ int window_idx;
+
+ iarr_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, region_idx));
+ imap_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IMAP0, region_idx));
+ if (iproc_pcie_reg_is_invalid(iarr_offset) ||
+ iproc_pcie_reg_is_invalid(imap_offset))
+ return -EINVAL;
+
+ dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+ region_idx, iarr_offset, &axi_addr, &pci_addr);
+
+ /*
+ * Program the IARR registers. The upper 32-bit IARR register is
+ * always right after the lower 32-bit IARR register.
+ */
+ writel(lower_32_bits(pci_addr) | BIT(size_idx),
+ pcie->base + iarr_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
+
+ dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+ readl(pcie->base + iarr_offset),
+ readl(pcie->base + iarr_offset + 4));
+
+ /*
+ * Now program the IMAP registers. Each IARR region may have one or
+ * more IMAP windows.
+ */
+ size >>= ilog2(nr_windows);
+ for (window_idx = 0; window_idx < nr_windows; window_idx++) {
+ val = readl(pcie->base + imap_offset);
+ val |= lower_32_bits(axi_addr) | IMAP_VALID;
+ writel(val, pcie->base + imap_offset);
+ writel(upper_32_bits(axi_addr),
+ pcie->base + imap_offset + ib_map->imap_addr_offset);
+
+ dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+ window_idx, readl(pcie->base + imap_offset),
+ readl(pcie->base + imap_offset +
+ ib_map->imap_addr_offset));
+
+ imap_offset += ib_map->imap_window_offset;
+ axi_addr += size;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
+ struct of_pci_range *range,
+ enum iproc_pcie_ib_map_type type)
+{
+ struct device *dev = pcie->dev;
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ int ret;
+ unsigned int region_idx, size_idx;
+ u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
+ resource_size_t size = range->size;
+
+ /* iterate through all IARR mapping regions */
+ for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
+ const struct iproc_pcie_ib_map *ib_map =
+ &pcie->ib_map[region_idx];
+
+ /*
+ * If current inbound region is already in use or not a
+ * compatible type, move on to the next.
+ */
+ if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
+ !iproc_pcie_ib_check_type(ib_map, type))
+ continue;
+
+ /* iterate through all supported region sizes to find a match */
+ for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
+ resource_size_t region_size =
+ ib_map->region_sizes[size_idx] * ib_map->size_unit;
+
+ if (size != region_size)
+ continue;
+
+ if (!IS_ALIGNED(axi_addr, region_size) ||
+ !IS_ALIGNED(pci_addr, region_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /* Match found! Program IARR and all IMAP windows. */
+ ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
+ ib_map->nr_windows, axi_addr,
+ pci_addr, size);
+ if (ret)
+ goto err_ib;
+ else
+ return 0;
+
+ }
+ }
+ ret = -EINVAL;
+
+err_ib:
+ dev_err(dev, "unable to configure inbound mapping\n");
+ dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
+ &axi_addr, &pci_addr, &size);
+
+ return ret;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+ return 0;
+}
+
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int ret;
+
+ /* Get the dma-ranges from DT */
+ ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+ if (ret)
+ return ret;
+
+ for_each_of_pci_range(&parser, &range) {
+ /* Each range entry corresponds to an inbound mapping region */
+ ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
+ struct device_node *msi_node,
+ u64 *msi_addr)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ struct resource res;
+
+ /*
+ * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
+ * supported external MSI controller that requires steering.
+ */
+ if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
+ dev_err(dev, "unable to find compatible MSI controller\n");
+ return -ENODEV;
+ }
+
+ /* derive GITS_TRANSLATER address from GICv3 */
+ ret = of_address_to_resource(msi_node, 0, &res);
+ if (ret < 0) {
+ dev_err(dev, "unable to obtain MSI controller resources\n");
+ return ret;
+ }
+
+ *msi_addr = res.start + GITS_TRANSLATER;
+ return 0;
+}
+
+static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+ int ret;
+ struct of_pci_range range;
+
+ memset(&range, 0, sizeof(range));
+ range.size = SZ_32K;
+ range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+
+ ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+ return ret;
+}
+
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+ u32 val;
+
+ /*
+ * Program bits [43:13] of address of GITS_TRANSLATER register into
+ * bits [30:0] of the MSI base address register. In fact, in all iProc
+ * based SoCs, all I/O register bases are well below the 32-bit
+ * boundary, so we can safely assume bits [43:32] are always zeros.
+ */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
+ (u32)(msi_addr >> 13));
+
+ /* use a default 8K window size */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
+
+ /* steering MSI to GICv3 ITS */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
+ val |= GIC_V3_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
+
+ /*
+ * Program bits [43:2] of address of GITS_TRANSLATER register into the
+ * iProc MSI address registers.
+ */
+ msi_addr >>= 2;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
+ upper_32_bits(msi_addr));
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
+ lower_32_bits(msi_addr));
+
+ /* enable MSI */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+ val |= MSI_ENABLE_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+}
+
+static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
+ struct device_node *msi_node)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ u64 msi_addr;
+
+ ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
+ if (ret < 0) {
+ dev_err(dev, "msi steering failed\n");
+ return ret;
+ }
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_V2:
+ ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
+ if (ret)
+ return ret;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
{
struct device_node *msi_node;
+ int ret;
+
+ /*
+ * Either the "msi-parent" or the "msi-map" phandle needs to exist
+ * for us to obtain the MSI node.
+ */
msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
- if (!msi_node)
- return -ENODEV;
+ if (!msi_node) {
+ const __be32 *msi_map = NULL;
+ int len;
+ u32 phandle;
+
+ msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
+ if (!msi_map)
+ return -ENODEV;
+
+ phandle = be32_to_cpup(msi_map + 1);
+ msi_node = of_find_node_by_phandle(phandle);
+ if (!msi_node)
+ return -ENODEV;
+ }
+
+ /*
+ * Certain revisions of the iProc PCIe controller require additional
+ * configurations to steer the MSI writes towards an external MSI
+ * controller.
+ */
+ if (pcie->need_msi_steer) {
+ ret = iproc_pcie_msi_steer(pcie, msi_node);
+ if (ret)
+ return ret;
+ }
/*
* If another MSI controller is being used, the call below should fail
@@ -454,6 +1141,65 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
iproc_msi_exit(pcie);
}
+static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int reg_idx;
+ const u16 *regs;
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
+ regs = iproc_pcie_reg_paxb_bcma;
+ break;
+ case IPROC_PCIE_PAXB:
+ regs = iproc_pcie_reg_paxb;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
+ }
+ break;
+ case IPROC_PCIE_PAXB_V2:
+ regs = iproc_pcie_reg_paxb_v2;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_v2_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
+ }
+ pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
+ pcie->ib_map = paxb_v2_ib_map;
+ pcie->need_msi_steer = true;
+ break;
+ case IPROC_PCIE_PAXC:
+ regs = iproc_pcie_reg_paxc;
+ pcie->ep_is_internal = true;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ regs = iproc_pcie_reg_paxc_v2;
+ pcie->ep_is_internal = true;
+ pcie->need_msi_steer = true;
+ break;
+ default:
+ dev_err(dev, "incompatible iProc PCIe interface\n");
+ return -EINVAL;
+ }
+
+ pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
+ sizeof(*pcie->reg_offsets),
+ GFP_KERNEL);
+ if (!pcie->reg_offsets)
+ return -ENOMEM;
+
+ /* go through the register table and populate all valid registers */
+ pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
+ IPROC_PCIE_REG_INVALID : regs[0];
+ for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
+ pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
+ regs[reg_idx] : IPROC_PCIE_REG_INVALID;
+
+ return 0;
+}
+
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
struct device *dev;
@@ -462,6 +1208,13 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
struct pci_bus *bus;
dev = pcie->dev;
+
+ ret = iproc_pcie_rev_init(pcie);
+ if (ret) {
+ dev_err(dev, "unable to initialize controller parameters\n");
+ return ret;
+ }
+
ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
@@ -478,19 +1231,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
goto err_exit_phy;
}
- switch (pcie->type) {
- case IPROC_PCIE_PAXB:
- pcie->reg_offsets = iproc_pcie_reg_paxb;
- break;
- case IPROC_PCIE_PAXC:
- pcie->reg_offsets = iproc_pcie_reg_paxc;
- break;
- default:
- dev_err(dev, "incompatible iProc PCIe interface\n");
- ret = -EINVAL;
- goto err_power_off_phy;
- }
-
iproc_pcie_reset(pcie);
if (pcie->need_ob_cfg) {
@@ -501,6 +1241,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
}
}
+ ret = iproc_pcie_map_dma_ranges(pcie);
+ if (ret && ret != -ENOENT)
+ goto err_power_off_phy;
+
#ifdef CONFIG_ARM
pcie->sysdata.private_data = pcie;
sysdata = &pcie->sysdata;
@@ -530,7 +1274,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
- pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
+ if (pcie->map_irq)
+ pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index e84d93c53c7b..04fed8e907f1 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -24,23 +24,34 @@
* endpoint devices.
*/
enum iproc_pcie_type {
- IPROC_PCIE_PAXB = 0,
+ IPROC_PCIE_PAXB_BCMA = 0,
+ IPROC_PCIE_PAXB,
+ IPROC_PCIE_PAXB_V2,
IPROC_PCIE_PAXC,
+ IPROC_PCIE_PAXC_V2,
};
/**
* iProc PCIe outbound mapping
- * @set_oarr_size: indicates the OARR size bit needs to be set
* @axi_offset: offset from the AXI address to the internal address used by
* the iProc PCIe core
- * @window_size: outbound window size
+ * @nr_windows: total number of supported outbound mapping windows
*/
struct iproc_pcie_ob {
- bool set_oarr_size;
resource_size_t axi_offset;
- resource_size_t window_size;
+ unsigned int nr_windows;
};
+/**
+ * iProc PCIe inbound mapping
+ * @nr_regions: total number of supported inbound mapping regions
+ */
+struct iproc_pcie_ib {
+ unsigned int nr_regions;
+};
+
+struct iproc_pcie_ob_map;
+struct iproc_pcie_ib_map;
struct iproc_msi;
/**
@@ -55,14 +66,25 @@ struct iproc_msi;
* @root_bus: pointer to root bus
* @phy: optional PHY device that controls the Serdes
* @map_irq: function callback to map interrupts
+ * @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @has_apb_err_disable: indicates the controller can be configured to prevent
+ * unsupported request from being forwarded as an APB bus error
+ *
* @need_ob_cfg: indicates SW needs to configure the outbound mapping window
- * @ob: outbound mapping parameters
+ * @ob: outbound mapping related parameters
+ * @ob_map: outbound mapping related parameters specific to the controller
+ *
+ * @ib: inbound mapping related parameters
+ * @ib_map: outbound mapping region related parameters
+ *
+ * @need_msi_steer: indicates additional configuration of the iProc PCIe
+ * controller is required to steer MSI writes to external interrupt controller
* @msi: MSI data
*/
struct iproc_pcie {
struct device *dev;
enum iproc_pcie_type type;
- const u16 *reg_offsets;
+ u16 *reg_offsets;
void __iomem *base;
phys_addr_t base_addr;
#ifdef CONFIG_ARM
@@ -71,8 +93,17 @@ struct iproc_pcie {
struct pci_bus *root_bus;
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
+ bool ep_is_internal;
+ bool has_apb_err_disable;
+
bool need_ob_cfg;
struct iproc_pcie_ob ob;
+ const struct iproc_pcie_ob_map *ob_map;
+
+ struct iproc_pcie_ib ib;
+ const struct iproc_pcie_ib_map *ib_map;
+
+ bool need_msi_steer;
struct iproc_msi *msi;
};
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index 35936409b2d4..734ba0d4a5c8 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -36,11 +36,17 @@
#include "pcie-designware.h"
+#define PCIE20_PARF_SYS_CTRL 0x00
#define PCIE20_PARF_PHY_CTRL 0x40
#define PCIE20_PARF_PHY_REFCLK 0x4C
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_SID_OFFSET 0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -72,9 +78,18 @@ struct qcom_pcie_resources_v1 {
struct regulator *vdda;
};
+struct qcom_pcie_resources_v2 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct clk *cfg_clk;
+ struct clk *pipe_clk;
+};
+
union qcom_pcie_resources {
struct qcom_pcie_resources_v0 v0;
struct qcom_pcie_resources_v1 v1;
+ struct qcom_pcie_resources_v2 v2;
};
struct qcom_pcie;
@@ -82,7 +97,9 @@ struct qcom_pcie;
struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
};
struct qcom_pcie {
@@ -116,17 +133,35 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
return dw_handle_msi_irq(pp);
}
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
- if (dw_pcie_link_up(&pcie->pp))
- return 0;
-
/* enable link training */
val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+
+ if (dw_pcie_link_up(&pcie->pp))
+ return 0;
+
+ /* Enable Link Training state machine */
+ if (pcie->ops->ltssm_enable)
+ pcie->ops->ltssm_enable(pcie);
return dw_pcie_wait_for_link(&pcie->pp);
}
@@ -421,6 +456,113 @@ err_res:
return ret;
}
+static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->cfg_clk = devm_clk_get(dev, "cfg");
+ if (IS_ERR(res->cfg_clk))
+ return PTR_ERR(res->cfg_clk);
+
+ res->master_clk = devm_clk_get(dev, "bus_master");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "bus_slave");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(res->pipe_clk))
+ return PTR_ERR(res->pipe_clk);
+
+ return 0;
+}
+
+static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->cfg_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable cfg clock\n");
+ goto err_cfg_clk;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master clock\n");
+ goto err_master_clk;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave clock\n");
+ goto err_slave_clk;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+ int ret;
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int qcom_pcie_link_up(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -429,6 +571,17 @@ static int qcom_pcie_link_up(struct pcie_port *pp)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
+static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+
+ clk_disable_unprepare(res->pipe_clk);
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
static void qcom_pcie_host_init(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -444,6 +597,9 @@ static void qcom_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err_deinit;
+ if (pcie->ops->post_init)
+ pcie->ops->post_init(pcie);
+
dw_pcie_setup_rc(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
@@ -487,12 +643,22 @@ static const struct qcom_pcie_ops ops_v0 = {
.get_resources = qcom_pcie_get_resources_v0,
.init = qcom_pcie_init_v0,
.deinit = qcom_pcie_deinit_v0,
+ .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
};
static const struct qcom_pcie_ops ops_v1 = {
.get_resources = qcom_pcie_get_resources_v1,
.init = qcom_pcie_init_v1,
.deinit = qcom_pcie_deinit_v1,
+ .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
+};
+
+static const struct qcom_pcie_ops ops_v2 = {
+ .get_resources = qcom_pcie_get_resources_v2,
+ .init = qcom_pcie_init_v2,
+ .post_init = qcom_pcie_post_init_v2,
+ .deinit = qcom_pcie_deinit_v2,
+ .ltssm_enable = qcom_pcie_v2_ltssm_enable,
};
static int qcom_pcie_probe(struct platform_device *pdev)
@@ -572,6 +738,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
{ }
};
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 62700d1896f4..aca85be101f8 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1071,13 +1071,14 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
- { .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7790",
.data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791",
.data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
+ { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
{},
};
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index e04f69beb42d..f2dca7bb0b39 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -53,6 +53,7 @@
#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
@@ -135,13 +136,14 @@
#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
+#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
+#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
+#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5)
-#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10)
-#define PCIE_RC_CONFIG_LCS_LABIE BIT(11)
-#define PCIE_RC_CONFIG_LCS_LBMS BIT(30)
-#define PCIE_RC_CONFIG_LCS_LAMS BIT(31)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
#define PCIE_CORE_AXI_CONF_BASE 0xc00000
#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
@@ -203,8 +205,14 @@ struct rockchip_pcie {
struct gpio_desc *ep_gpio;
u32 lanes;
u8 root_bus_nr;
+ int link_gen;
struct device *dev;
struct irq_domain *irq_domain;
+ u32 io_size;
+ int offset;
+ phys_addr_t io_bus_addr;
+ u32 mem_size;
+ phys_addr_t mem_bus_addr;
};
static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
@@ -223,7 +231,7 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE);
+ status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
@@ -232,7 +240,7 @@ static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS);
+ status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
@@ -398,6 +406,40 @@ static struct pci_ops rockchip_pcie_ops = {
.write = rockchip_pcie_wr_conf,
};
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+ u32 status, curr, scale, power;
+
+ if (IS_ERR(rockchip->vpcie3v3))
+ return;
+
+ /*
+ * Set RC's captured slot power limit and scale if
+ * vpcie3v3 available. The default values are both zero
+ * which means the software should set these two according
+ * to the actual power supply.
+ */
+ curr = regulator_get_current_limit(rockchip->vpcie3v3);
+ if (curr > 0) {
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ }
+}
+
/**
* rockchip_pcie_init_port - Initialize hardware
* @rockchip: PCIe port information
@@ -429,26 +471,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
- udelay(10);
-
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- return err;
- }
-
err = phy_init(rockchip->phy);
if (err < 0) {
dev_err(dev, "fail to init phy, err %d\n", err);
@@ -479,14 +501,40 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
+ udelay(10);
+
+ err = reset_control_deassert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "deassert pm_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "deassert aclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "deassert pclk_rst err %d\n", err);
+ return err;
+ }
+
+ if (rockchip->link_gen == 2)
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2,
+ PCIE_CLIENT_CONFIG);
+ else
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
+ PCIE_CLIENT_CONFIG);
+
rockchip_pcie_write(rockchip,
PCIE_CLIENT_CONF_ENABLE |
PCIE_CLIENT_LINK_TRAIN_ENABLE |
PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
- PCIE_CLIENT_MODE_RC |
- PCIE_CLIENT_GEN_SEL_2,
- PCIE_CLIENT_CONFIG);
+ PCIE_CLIENT_MODE_RC,
+ PCIE_CLIENT_CONFIG);
err = phy_power_on(rockchip->phy);
if (err) {
@@ -522,21 +570,19 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
- /*
- * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before
- * enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't
- * reliable and enabling ASPM doesn't work. This is a controller
- * bug we need to work around.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
-
/* Fix the transmitted FTS count desired to exit from L0s. */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
- status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
(PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+ rockchip_pcie_set_power_limit(rockchip);
+
+ /* Set RC's clock architecture as common clock */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_CCC;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
/* Enable Gen1 training */
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
@@ -563,35 +609,37 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
msleep(20);
}
- /*
- * Enable retrain for gen2. This should be configured only after
- * gen1 finished.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ if (rockchip->link_gen == 2) {
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ for (;;) {
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
+ PCIE_CORE_PL_CONF_SPEED_5G) {
+ dev_dbg(dev, "PCIe link training gen2 pass!\n");
+ break;
+ }
- timeout = jiffies + msecs_to_jiffies(500);
- for (;;) {
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
- PCIE_CORE_PL_CONF_SPEED_5G) {
- dev_dbg(dev, "PCIe link training gen2 pass!\n");
- break;
- }
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ break;
+ }
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
- break;
+ msleep(20);
}
-
- msleep(20);
}
/* Check the final link width from negotiated lane counter from MGMT */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
- PCIE_CORE_PL_CONF_LANE_MASK);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_SHIFT);
dev_dbg(dev, "current link width is x%d\n", status);
rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
@@ -599,6 +647,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
PCIE_RC_CONFIG_RID_CCR);
+
+ /* Clear THP cap's next cap pointer to remove L1 substate cap */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+ status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
rockchip_pcie_write(rockchip,
@@ -794,6 +848,10 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
rockchip->lanes = 1;
}
+ rockchip->link_gen = of_pci_get_max_link_speed(node);
+ if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+ rockchip->link_gen = 2;
+
rockchip->core_rst = devm_reset_control_get(dev, "core");
if (IS_ERR(rockchip->core_rst)) {
if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
@@ -1087,6 +1145,50 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
return 0;
}
+static int rockchip_cfg_atu(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int offset;
+ int err;
+ int reg_no;
+
+ for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ rockchip->mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ return err;
+ }
+
+ offset = rockchip->mem_size >> 20;
+ for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ rockchip->io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct rockchip_pcie *rockchip;
@@ -1096,13 +1198,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
resource_size_t io_base;
struct resource *mem;
struct resource *io;
- phys_addr_t io_bus_addr = 0;
- u32 io_size;
- phys_addr_t mem_bus_addr = 0;
- u32 mem_size = 0;
- int reg_no;
int err;
- int offset;
LIST_HEAD(res);
@@ -1169,14 +1265,13 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
goto err_vpcie;
/* Get the I/O and memory ranges from DT */
- io_size = 0;
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
io->name = "I/O";
- io_size = resource_size(io);
- io_bus_addr = io->start - win->offset;
+ rockchip->io_size = resource_size(io);
+ rockchip->io_bus_addr = io->start - win->offset;
err = pci_remap_iospace(io, io_base);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
@@ -1187,8 +1282,8 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
case IORESOURCE_MEM:
mem = win->res;
mem->name = "MEM";
- mem_size = resource_size(mem);
- mem_bus_addr = mem->start - win->offset;
+ rockchip->mem_size = resource_size(mem);
+ rockchip->mem_bus_addr = mem->start - win->offset;
break;
case IORESOURCE_BUS:
rockchip->root_bus_nr = win->res->start;
@@ -1198,45 +1293,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
}
}
- if (mem_size) {
- for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
- AXI_WRAPPER_MEM_WRITE,
- 20 - 1,
- mem_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC mem outbound ATU failed\n");
- goto err_vpcie;
- }
- }
- }
-
- err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
- if (err) {
- dev_err(dev, "program RC mem inbound ATU failed\n");
+ err = rockchip_cfg_atu(rockchip);
+ if (err)
goto err_vpcie;
- }
-
- offset = mem_size >> 20;
-
- if (io_size) {
- for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip,
- reg_no + 1 + offset,
- AXI_WRAPPER_IO_WRITE,
- 20 - 1,
- io_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC io outbound ATU failed\n");
- goto err_vpcie;
- }
- }
- }
-
bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
if (!bus) {
err = -ENOMEM;
@@ -1249,9 +1308,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
-
- dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
return err;
err_vpcie:
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 3cf197ba7f37..dafe8b88d97d 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -296,8 +296,4 @@ static struct platform_driver spear13xx_pcie_driver = {
},
};
-static int __init spear13xx_pcie_init(void)
-{
- return platform_driver_register(&spear13xx_pcie_driver);
-}
-device_initcall(spear13xx_pcie_init);
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 37e29b580be3..18ef1a93c10a 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -39,7 +40,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
/**
* struct vmd_irq - private data to map driver IRQ to the VMD shared vector
* @node: list item for parent traversal.
- * @rcu: RCU callback item for freeing.
* @irq: back pointer to parent.
* @enabled: true if driver enabled IRQ
* @virq: the virtual IRQ value provided to the requesting driver.
@@ -49,7 +49,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
*/
struct vmd_irq {
struct list_head node;
- struct rcu_head rcu;
struct vmd_irq_list *irq;
bool enabled;
unsigned int virq;
@@ -58,11 +57,13 @@ struct vmd_irq {
/**
* struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
* @irq_list: the list of irq's the VMD one demuxes to.
+ * @srcu: SRCU struct for local synchronization.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
*/
struct vmd_irq_list {
struct list_head irq_list;
+ struct srcu_struct srcu;
unsigned int count;
};
@@ -224,14 +225,14 @@ static void vmd_msi_free(struct irq_domain *domain,
struct vmd_irq *vmdirq = irq_get_chip_data(virq);
unsigned long flags;
- synchronize_rcu();
+ synchronize_srcu(&vmdirq->irq->srcu);
/* XXX: Potential optimization to rebalance */
raw_spin_lock_irqsave(&list_lock, flags);
vmdirq->irq->count--;
raw_spin_unlock_irqrestore(&list_lock, flags);
- kfree_rcu(vmdirq, rcu);
+ kfree(vmdirq);
}
static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
@@ -646,11 +647,12 @@ static irqreturn_t vmd_irq(int irq, void *data)
{
struct vmd_irq_list *irqs = data;
struct vmd_irq *vmdirq;
+ int idx;
- rcu_read_lock();
+ idx = srcu_read_lock(&irqs->srcu);
list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
generic_handle_irq(vmdirq->virq);
- rcu_read_unlock();
+ srcu_read_unlock(&irqs->srcu, idx);
return IRQ_HANDLED;
}
@@ -696,6 +698,10 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
for (i = 0; i < vmd->msix_count; i++) {
+ err = init_srcu_struct(&vmd->irqs[i].srcu);
+ if (err)
+ return err;
+
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
vmd_irq, 0, "vmd", &vmd->irqs[i]);
@@ -714,12 +720,20 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
}
+static void vmd_cleanup_srcu(struct vmd_dev *vmd)
+{
+ int i;
+
+ for (i = 0; i < vmd->msix_count; i++)
+ cleanup_srcu_struct(&vmd->irqs[i].srcu);
+}
+
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
vmd_detach_resources(vmd);
- pci_set_drvdata(dev, NULL);
+ vmd_cleanup_srcu(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
@@ -727,7 +741,7 @@ static void vmd_remove(struct pci_dev *dev)
irq_domain_remove(vmd->irq_domain);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int vmd_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a46b585fae31..5ed2dcaa8e27 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -222,35 +222,6 @@ static void acpiphp_post_dock_fixup(struct acpi_device *adev)
acpiphp_let_context_go(context);
}
-/* Check whether the PCI device is managed by native PCIe hotplug driver */
-static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
-{
- u32 reg32;
- acpi_handle tmp;
- struct acpi_pci_root *root;
-
- /* Check whether the PCIe port supports native PCIe hotplug */
- if (pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32))
- return false;
- if (!(reg32 & PCI_EXP_SLTCAP_HPC))
- return false;
-
- /*
- * Check whether native PCIe hotplug has been enabled for
- * this PCIe hierarchy.
- */
- tmp = acpi_find_root_bridge_handle(pdev);
- if (!tmp)
- return false;
- root = acpi_pci_find_root(tmp);
- if (!root)
- return false;
- if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- return false;
-
- return true;
-}
-
/**
* acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
* @handle: ACPI handle of the object to add a context to.
@@ -334,7 +305,7 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
* expose slots to user space in those cases.
*/
if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
- && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
+ && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
unsigned long long sun;
int retval;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 74f3a0695b43..ec009a7dba20 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -867,7 +867,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_disable_device;
}
/* TODO: This code can be made to support non-Compaq or Intel
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index fea0b8b33589..56013d0daf7f 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -23,6 +23,9 @@
*
* Send feedback to <kristen.c.accardi@intel.com>
*
+ * Authors:
+ * Greg Kroah-Hartman <greg@kroah.com>
+ * Scott Murray <scottm@somanetworks.com>
*/
#include <linux/module.h> /* try_module_get & module_put */
@@ -50,15 +53,9 @@
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
-
/* local variables */
static bool debug;
-#define DRIVER_VERSION "0.5"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
-#define DRIVER_DESC "PCI Hot Plug PCI Core"
-
-
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
@@ -534,7 +531,6 @@ static int __init pci_hotplug_init(void)
return result;
}
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return result;
}
device_initcall(pci_hotplug_init);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7d32fa33dcef..35d84845d5af 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -25,6 +25,10 @@
*
* Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
+ * Authors:
+ * Dan Zink <dan.zink@compaq.com>
+ * Greg Kroah-Hartman <greg@kroah.com>
+ * Dely Sy <dely.l.sy@intel.com>"
*/
#include <linux/moduleparam.h>
@@ -42,10 +46,6 @@ bool pciehp_poll_mode;
int pciehp_poll_time;
static bool pciehp_force;
-#define DRIVER_VERSION "0.4"
-#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
-#define DRIVER_DESC "PCI Express Hot Plug Controller Driver"
-
/*
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
@@ -333,7 +333,6 @@ static int __init pcied_init(void)
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval)
dbg("Failure to register service\n");
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index efe69e879455..10c9c0ba8ff2 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
@@ -98,6 +99,7 @@ static int board_added(struct slot *p_slot)
pciehp_green_led_blink(p_slot);
/* Check link training status */
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
@@ -118,12 +120,14 @@ static int board_added(struct slot *p_slot)
if (retval != -EEXIST)
goto err_exit;
}
+ pm_runtime_put(&ctrl->pcie->port->dev);
pciehp_green_led_on(p_slot);
pciehp_set_attention_status(p_slot, 0);
return 0;
err_exit:
+ pm_runtime_put(&ctrl->pcie->port->dev);
set_slot_off(ctrl, p_slot);
return retval;
}
@@ -137,7 +141,9 @@ static int remove_board(struct slot *p_slot)
int retval;
struct controller *ctrl = p_slot->ctrl;
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_unconfigure_device(p_slot);
+ pm_runtime_put(&ctrl->pcie->port->dev);
if (retval)
return retval;
@@ -410,7 +416,7 @@ int pciehp_enable_slot(struct slot *p_slot)
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
- return -EINVAL;
+ return 0;
}
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b57fc6d6e28a..026830a138ae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -620,8 +620,18 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
}
- /* Check Presence Detect Changed */
- if (events & PCI_EXP_SLTSTA_PDC) {
+ /*
+ * Check Link Status Changed at higher precedence than Presence
+ * Detect Changed. The PDS value may be set to "card present" from
+ * out-of-band detection, which may be in conflict with a Link Down
+ * and cause the wrong event to queue.
+ */
+ if (events & PCI_EXP_SLTSTA_DLLSC) {
+ ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
+ link ? "Up" : "Down");
+ pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
+ INT_LINK_DOWN);
+ } else if (events & PCI_EXP_SLTSTA_PDC) {
present = !!(status & PCI_EXP_SLTSTA_PDS);
ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
present ? "" : "not ");
@@ -636,13 +646,6 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
}
- if (events & PCI_EXP_SLTSTA_DLLSC) {
- ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
- link ? "Up" : "Down");
- pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
- INT_LINK_DOWN);
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index dc67f39779ec..c614ff7c3bc3 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
{
- if (vio_find_node(dn))
+ struct vio_dev *vio_dev;
+
+ vio_dev = vio_find_node(dn);
+ if (vio_dev) {
+ put_device(&vio_dev->dev);
return -EINVAL;
+ }
if (!vio_register_device_node(dn)) {
printk(KERN_ERR
@@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
return -EINVAL;
vio_unregister_device(vio_dev);
+
+ put_device(&vio_dev->dev);
+
return 0;
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 50b8b7d54416..530d0e49f2ed 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -5,12 +5,13 @@
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * License: GPL
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
@@ -21,10 +22,6 @@
#define SLOT_NAME_SIZE 10
static LIST_HEAD(s390_hotplug_slot_list);
-MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
-MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
-MODULE_LICENSE("GPL");
-
static int zpci_fn_configured(enum zpci_state state)
{
return state == ZPCI_FN_STATE_CONFIGURED ||
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e30f05c8517f..47227820406d 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
- pci_iov_set_numvfs(dev, nr_virtfn);
- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_cfg_access_lock(dev);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- msleep(100);
- pci_cfg_access_unlock(dev);
-
iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
@@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
goto err_pcibios;
}
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ msleep(100);
+ pci_cfg_access_unlock(dev);
+
for (i = 0; i < initial; i++) {
rc = pci_iov_add_virtfn(dev, i, 0);
if (rc)
@@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev)
}
/**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
*
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
*/
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
- return 0;
+ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+ struct resource *res = dev->resource + resno;
+ int vf_bar = resno - PCI_IOV_RESOURCES;
+ struct pci_bus_region region;
+ u16 cmd;
+ u32 new;
+ int reg;
+
+ /*
+ * The generic pci_restore_bars() path calls this for all devices,
+ * including VFs and non-SR-IOV devices. If this is not a PF, we
+ * have nothing to do.
+ */
+ if (!iov)
+ return;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+ vf_bar, res);
+ return;
+ }
+
+ /*
+ * Ignore unimplemented BARs, unused resource slots for 64-bit
+ * BARs, and non-movable resources, e.g., those described via
+ * Enhanced Allocation.
+ */
+ if (!res->flags)
+ return;
+
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return;
- BUG_ON(!dev->is_physfn);
+ pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
- return dev->sriov->pos + PCI_SRIOV_BAR +
- 4 * (resno - PCI_IOV_RESOURCES);
+ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+ pci_write_config_dword(dev, reg, new);
+ if (res->flags & IORESOURCE_MEM_64) {
+ new = region.start >> 16 >> 16;
+ pci_write_config_dword(dev, reg + 4, new);
+ }
}
resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ad70507cfb56..50c5003295ca 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -551,14 +551,14 @@ error_attrs:
}
static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
+msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
{
struct cpumask *masks = NULL;
struct msi_desc *entry;
u16 control;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -618,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev)
* an error, and a positive return value indicates the number of interrupts
* which could have been allocated.
*/
-static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ const struct irq_affinity *affd)
{
struct msi_desc *entry;
int ret;
@@ -626,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
- entry = msi_setup_entry(dev, nvec, affinity);
+ entry = msi_setup_entry(dev, nvec, affd);
if (!entry)
return -ENOMEM;
@@ -690,14 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
- bool affinity)
+ const struct irq_affinity *affd)
{
struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
int ret, i;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -753,14 +754,14 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
- * @affinity: flag to indicate cpu irq affinity mask should be set
+ * @affd: Optional pointer to enable automatic affinity assignement
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int ret;
u16 control;
@@ -775,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, base, entries, nvec, affinity);
+ ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
return ret;
@@ -956,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int nr_entries;
int i, j;
@@ -988,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
- return msix_capability_init(dev, entries, nvec, affinity);
+ return msix_capability_init(dev, entries, nvec, affd);
}
/**
@@ -1008,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
**/
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
- return __pci_enable_msix(dev, entries, nvec, false);
+ return __pci_enable_msix(dev, entries, nvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix);
@@ -1059,9 +1060,8 @@ int pci_msi_enabled(void)
EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
- unsigned int flags)
+ const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int nvec;
int rc;
@@ -1090,14 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
nvec = maxvec;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = msi_capability_init(dev, nvec, affinity);
+ rc = msi_capability_init(dev, nvec, affd);
if (rc == 0)
return nvec;
@@ -1124,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
- return __pci_enable_msi_range(dev, minvec, maxvec, 0);
+ return __pci_enable_msi_range(dev, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msi_range);
static int __pci_enable_msix_range(struct pci_dev *dev,
- struct msix_entry *entries, int minvec, int maxvec,
- unsigned int flags)
+ struct msix_entry *entries, int minvec,
+ int maxvec, const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = __pci_enable_msix(dev, entries, nvec, affinity);
+ rc = __pci_enable_msix(dev, entries, nvec, affd);
if (rc == 0)
return nvec;
@@ -1177,16 +1174,17 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
- return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix_range);
/**
- * pci_alloc_irq_vectors - allocate multiple IRQs for a device
+ * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
* @dev: PCI device to operate on
* @min_vecs: minimum number of vectors required (must be >= 1)
* @max_vecs: maximum (desired) number of vectors
* @flags: flags or quirks for the allocation
+ * @affd: optional description of the affinity requirements
*
* Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
* vectors if available, and fall back to a single legacy vector
@@ -1198,20 +1196,30 @@ EXPORT_SYMBOL(pci_enable_msix_range);
* To get the Linux IRQ number used for a vector that can be passed to
* request_irq() use the pci_irq_vector() helper.
*/
-int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags)
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *affd)
{
+ static const struct irq_affinity msi_default_affd;
int vecs = -ENOSPC;
+ if (flags & PCI_IRQ_AFFINITY) {
+ if (!affd)
+ affd = &msi_default_affd;
+ } else {
+ if (WARN_ON(affd))
+ affd = NULL;
+ }
+
if (flags & PCI_IRQ_MSIX) {
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
- flags);
+ affd);
if (vecs > 0)
return vecs;
}
if (flags & PCI_IRQ_MSI) {
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
+ vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
if (vecs > 0)
return vecs;
}
@@ -1224,7 +1232,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
return vecs;
}
-EXPORT_SYMBOL(pci_alloc_irq_vectors);
+EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
/**
* pci_free_irq_vectors - free previously allocated IRQs for a device
@@ -1294,7 +1302,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
} else if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
- if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used))
+ if (WARN_ON_ONCE(!entry || !entry->affinity ||
+ nr >= entry->nvec_used))
return NULL;
return &entry->affinity[nr];
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d966d47c9e80..001860361434 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -29,6 +29,82 @@ const u8 pci_acpi_dsm_uuid[] = {
0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
};
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
+{
+ struct device *dev = &adev->dev;
+ struct resource_entry *entry;
+ struct list_head list;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ flags = IORESOURCE_MEM;
+ ret = acpi_dev_get_resources(adev, &list,
+ acpi_dev_filter_resource_type_cb,
+ (void *) flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse _CRS method, error code %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(dev, "no IO and memory resources present in _CRS\n");
+ return -EINVAL;
+ }
+
+ entry = list_first_entry(&list, struct resource_entry, node);
+ *res = *entry->res;
+ acpi_dev_free_resource_list(&list);
+ return 0;
+}
+
+static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
+ void **retval)
+{
+ u16 *segment = context;
+ unsigned long long uid;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+ if (ACPI_FAILURE(status) || uid != *segment)
+ return AE_CTRL_DEPTH;
+
+ *(acpi_handle *)retval = handle;
+ return AE_CTRL_TERMINATE;
+}
+
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+ struct resource *res)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+ acpi_handle handle;
+ int ret;
+
+ status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "can't find _HID %s device to locate resources\n",
+ hid);
+ return -ENODEV;
+ }
+
+ ret = acpi_bus_get_device(handle, &adev);
+ if (ret)
+ return ret;
+
+ ret = acpi_get_rc_addr(adev, res);
+ if (ret) {
+ dev_err(dev, "can't get resource from %s\n",
+ dev_name(&adev->dev));
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
{
acpi_status status = AE_NOT_EXIST;
@@ -294,6 +370,30 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
+ * pciehp_is_native - Check whether a hotplug port is handled by the OS
+ * @pdev: Hotplug port to check
+ *
+ * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
+ * and return the value of the "PCI Express Native Hot Plug control" bit.
+ * On failure to obtain the _OSC Control Field return %false.
+ */
+bool pciehp_is_native(struct pci_dev *pdev)
+{
+ struct acpi_pci_root *root;
+ acpi_handle handle;
+
+ handle = acpi_find_root_bridge_handle(pdev);
+ if (!handle)
+ return false;
+
+ root = acpi_pci_find_root(handle);
+ if (!root)
+ return false;
+
+ return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+}
+
+/**
* pci_acpi_wake_bus - Root bus wakeup notification fork function.
* @work: Work item to handle.
*/
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index c7f3408e3148..1c4af7227bca 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -54,7 +54,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
return false;
}
-static struct pci_platform_pm_ops mid_pci_platform_pm = {
+static const struct pci_platform_pm_ops mid_pci_platform_pm = {
.is_manageable = mid_pci_power_manageable,
.set_state = mid_pci_set_power_state,
.get_state = mid_pci_get_power_state,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bcd10c795284..066628776e1b 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -50,6 +50,7 @@ pci_config_attr(vendor, "0x%04x\n");
pci_config_attr(device, "0x%04x\n");
pci_config_attr(subsystem_vendor, "0x%04x\n");
pci_config_attr(subsystem_device, "0x%04x\n");
+pci_config_attr(revision, "0x%02x\n");
pci_config_attr(class, "0x%06x\n");
pci_config_attr(irq, "%u\n");
@@ -568,6 +569,7 @@ static struct attribute *pci_dev_attrs[] = {
&dev_attr_device.attr,
&dev_attr_subsystem_vendor.attr,
&dev_attr_subsystem_device.attr,
+ &dev_attr_revision.attr,
&dev_attr_class.attr,
&dev_attr_irq.attr,
&dev_attr_local_cpus.attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ba34907538f6..a881c0d3d2e8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
- if (dev->is_virtfn)
- return;
-
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -2106,6 +2102,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
if (!dev->pme_support)
return false;
+ /* PME-capable in principle, but not from the intended sleep state */
+ if (!pci_pme_capable(dev, pci_target_state(dev)))
+ return false;
+
while (bus->parent) {
struct pci_dev *bridge = bus->self;
@@ -2226,7 +2226,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
* This function checks if it is possible to move the bridge to D3.
* Currently we only allow D3 for recent enough PCIe ports.
*/
-static bool pci_bridge_d3_possible(struct pci_dev *bridge)
+bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
unsigned int year;
@@ -2239,6 +2239,14 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
case PCI_EXP_TYPE_DOWNSTREAM:
if (pci_bridge_d3_disable)
return false;
+
+ /*
+ * Hotplug ports handled by firmware in System Management Mode
+ * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+ */
+ if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+ return false;
+
if (pci_bridge_d3_force)
return true;
@@ -2259,32 +2267,36 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
{
bool *d3cold_ok = data;
- bool no_d3cold;
- /*
- * The device needs to be allowed to go D3cold and if it is wake
- * capable to do so from D3cold.
- */
- no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
- (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
- !pci_power_manageable(dev);
+ if (/* The device needs to be allowed to go D3cold ... */
+ dev->no_d3cold || !dev->d3cold_allowed ||
- *d3cold_ok = !no_d3cold;
+ /* ... and if it is wakeup capable to do so from D3cold. */
+ (device_may_wakeup(&dev->dev) &&
+ !pci_pme_capable(dev, PCI_D3cold)) ||
- return no_d3cold;
+ /* If it is a bridge it must be allowed to go to D3. */
+ !pci_power_manageable(dev) ||
+
+ /* Hotplug interrupts cannot be delivered if the link is down. */
+ dev->is_hotplug_bridge)
+
+ *d3cold_ok = false;
+
+ return !*d3cold_ok;
}
/*
* pci_bridge_d3_update - Update bridge D3 capabilities
* @dev: PCI device which is changed
- * @remove: Is the device being removed
*
* Update upstream bridge PM capabilities accordingly depending on if the
* device PM configuration was changed or the device is being removed. The
* change is also propagated upstream.
*/
-static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
+void pci_bridge_d3_update(struct pci_dev *dev)
{
+ bool remove = !device_is_registered(&dev->dev);
struct pci_dev *bridge;
bool d3cold_ok = true;
@@ -2292,55 +2304,39 @@ static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
if (!bridge || !pci_bridge_d3_possible(bridge))
return;
- pci_dev_get(bridge);
/*
- * If the device is removed we do not care about its D3cold
- * capabilities.
+ * If D3 is currently allowed for the bridge, removing one of its
+ * children won't change that.
+ */
+ if (remove && bridge->bridge_d3)
+ return;
+
+ /*
+ * If D3 is currently allowed for the bridge and a child is added or
+ * changed, disallowance of D3 can only be caused by that child, so
+ * we only need to check that single device, not any of its siblings.
+ *
+ * If D3 is currently not allowed for the bridge, checking the device
+ * first may allow us to skip checking its siblings.
*/
if (!remove)
pci_dev_check_d3cold(dev, &d3cold_ok);
- if (d3cold_ok) {
- /*
- * We need to go through all children to find out if all of
- * them can still go to D3cold.
- */
+ /*
+ * If D3 is currently not allowed for the bridge, this may be caused
+ * either by the device being changed/removed or any of its siblings,
+ * so we need to go through all children to find out if one of them
+ * continues to block D3.
+ */
+ if (d3cold_ok && !bridge->bridge_d3)
pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
&d3cold_ok);
- }
if (bridge->bridge_d3 != d3cold_ok) {
bridge->bridge_d3 = d3cold_ok;
/* Propagate change to upstream bridges */
- pci_bridge_d3_update(bridge, false);
+ pci_bridge_d3_update(bridge);
}
-
- pci_dev_put(bridge);
-}
-
-/**
- * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
- * @dev: PCI device that was changed
- *
- * If a device is added or its PM configuration, such as is it allowed to
- * enter D3cold, is changed this function updates upstream bridge PM
- * capabilities accordingly.
- */
-void pci_bridge_d3_device_changed(struct pci_dev *dev)
-{
- pci_bridge_d3_update(dev, false);
-}
-
-/**
- * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
- * @dev: PCI device being removed
- *
- * Function updates upstream bridge PM capabilities based on other devices
- * still left on the bus.
- */
-void pci_bridge_d3_device_removed(struct pci_dev *dev)
-{
- pci_bridge_d3_update(dev, true);
}
/**
@@ -2355,7 +2351,7 @@ void pci_d3cold_enable(struct pci_dev *dev)
{
if (dev->no_d3cold) {
dev->no_d3cold = false;
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
}
}
EXPORT_SYMBOL_GPL(pci_d3cold_enable);
@@ -2372,7 +2368,7 @@ void pci_d3cold_disable(struct pci_dev *dev)
{
if (!dev->no_d3cold) {
dev->no_d3cold = true;
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
}
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
@@ -4831,36 +4827,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
}
EXPORT_SYMBOL(pci_select_bars);
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
- int reg;
-
- if (resno < PCI_ROM_RESOURCE) {
- *type = pci_bar_unknown;
- return PCI_BASE_ADDRESS_0 + 4 * resno;
- } else if (resno == PCI_ROM_RESOURCE) {
- *type = pci_bar_mem32;
- return dev->rom_base_reg;
- } else if (resno < PCI_BRIDGE_RESOURCES) {
- /* device specific resource */
- *type = pci_bar_unknown;
- reg = pci_iov_resource_bar(dev, resno);
- if (reg)
- return reg;
- }
-
- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
- return 0;
-}
-
/* Some architectures require additional programming to enable VGA */
static arch_set_vga_state_t arch_set_vga_state;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 451856210e18..cb17db242f30 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,9 +1,6 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
-#define PCI_CFG_SPACE_SIZE 256
-#define PCI_CFG_SPACE_EXP_SIZE 4096
-
#define PCI_FIND_CAP_TTL 48
extern const unsigned char pcie_link_speed[];
@@ -85,8 +82,8 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
-void pci_bridge_d3_device_changed(struct pci_dev *dev);
-void pci_bridge_d3_device_removed(struct pci_dev *dev);
+bool pci_bridge_d3_possible(struct pci_dev *dev);
+void pci_bridge_d3_update(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -245,7 +242,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -289,7 +285,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
@@ -303,10 +299,6 @@ static inline void pci_iov_release(struct pci_dev *dev)
{
}
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
- return 0;
-}
static inline void pci_restore_iov_state(struct pci_dev *dev)
{
}
@@ -356,4 +348,9 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
}
#endif
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+ struct resource *res);
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 139150b2bdfd..dea186a9d6b6 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -30,13 +30,6 @@
#include "aerdrv.h"
#include "../../pci.h"
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.0"
-#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "Root Port Advanced Error Reporting Driver"
-
static int aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
@@ -297,12 +290,12 @@ static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
- struct device *device = &dev->device;
+ struct device *device = &dev->port->dev;
/* Alloc rpc data structure */
rpc = aer_alloc_rpc(dev);
if (!rpc) {
- dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
+ dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
aer_remove(dev);
return -ENOMEM;
}
@@ -310,7 +303,8 @@ static int aer_probe(struct pcie_device *dev)
/* Request IRQ ISR */
status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
if (status) {
- dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
+ dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
+ dev->irq);
aer_remove(dev);
return status;
}
@@ -318,8 +312,8 @@ static int aer_probe(struct pcie_device *dev)
rpc->isr = 1;
aer_enable_rootport(rpc);
-
- return status;
+ dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
+ return 0;
}
/**
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 0ec649d961d7..17ac1dce3286 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -351,12 +351,26 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
return;
}
+ /* Get upstream/downstream components' register state */
+ pcie_get_aspm_reg(parent, &upreg);
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_get_aspm_reg(child, &dwreg);
+
+ /*
+ * If ASPM not supported, don't mess with the clocks and link,
+ * bail out now.
+ */
+ if (!(upreg.support & dwreg.support))
+ return;
+
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
- /* Get upstream/downstream components' register state */
+ /*
+ * Re-read upstream/downstream components' register state
+ * after clock configuration
+ */
pcie_get_aspm_reg(parent, &upreg);
- child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
pcie_get_aspm_reg(child, &dwreg);
/*
@@ -886,8 +900,8 @@ static ssize_t clk_ctl_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store);
-static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store);
+static DEVICE_ATTR_RW(link_state);
+static DEVICE_ATTR_RW(clk_ctl);
static char power_group[] = "power";
void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 884bad5320f8..717529331dac 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -300,8 +300,6 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
*/
static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
{
- dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
-
device_set_run_wake(&dev->dev, true);
dev->pme_interrupt = true;
return 0;
@@ -319,23 +317,8 @@ static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
static void pcie_pme_mark_devices(struct pci_dev *port)
{
pcie_pme_set_native(port, NULL);
- if (port->subordinate) {
+ if (port->subordinate)
pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
- } else {
- struct pci_bus *bus = port->bus;
- struct pci_dev *dev;
-
- /* Check if this is a root port event collector. */
- if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
- return;
-
- down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_pcie(dev)
- && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
- pcie_pme_set_native(dev, NULL);
- up_read(&pci_bus_sem);
- }
}
/**
@@ -364,12 +347,14 @@ static int pcie_pme_probe(struct pcie_device *srv)
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
kfree(data);
- } else {
- pcie_pme_mark_devices(port);
- pcie_pme_interrupt_enable(port, true);
+ return ret;
}
- return ret;
+ dev_info(&port->dev, "Signaling PME with IRQ %d\n", srv->irq);
+
+ pcie_pme_mark_devices(port);
+ pcie_pme_interrupt_enable(port, true);
+ return 0;
}
static bool pcie_pme_check_wakeup(struct pci_bus *bus)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e9270b4026f3..9698289f105c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -499,7 +499,6 @@ static int pcie_port_probe_service(struct device *dev)
if (status)
return status;
- dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
get_device(dev);
return 0;
}
@@ -524,8 +523,6 @@ static int pcie_port_remove_service(struct device *dev)
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->remove) {
- dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
- driver->name);
driver->remove(pciedev);
put_device(dev);
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 79327cc14e7d..8aa3f14bc87d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -19,6 +19,7 @@
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
+#include "../pci.h"
#include "portdrv.h"
#include "aer/aerdrv.h"
@@ -149,15 +150,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
- /*
- * Prevent runtime PM if the port is advertising support for PCIe
- * hotplug. Otherwise the BIOS hotplug SMI code might not be able
- * to enumerate devices behind this port properly (the port is
- * powered down preventing all config space accesses to the
- * subordinate devices). We can't be sure for native PCIe hotplug
- * either so prevent that as well.
- */
- if (!dev->is_hotplug_bridge) {
+ if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
* config space accesses from userspace (lspci) will not
@@ -175,7 +168,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
static void pcie_portdrv_remove(struct pci_dev *dev)
{
- if (!dev->is_hotplug_bridge) {
+ if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 104c46d53121..e164b5c9f0f0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
}
} else {
- res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ if (l & PCI_ROM_ADDRESS_ENABLE)
+ res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
mask64 = (u32)PCI_ROM_ADDRESS_MASK;
@@ -521,18 +522,19 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(bridge);
}
-static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
{
struct pci_host_bridge *bridge;
- bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
if (!bridge)
return NULL;
INIT_LIST_HEAD(&bridge->windows);
- bridge->bus = b;
+
return bridge;
}
+EXPORT_SYMBOL(pci_alloc_host_bridge);
static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
@@ -717,6 +719,123 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
dev_set_msi_domain(&bus->dev, d);
}
+int pci_register_host_bridge(struct pci_host_bridge *bridge)
+{
+ struct device *parent = bridge->dev.parent;
+ struct resource_entry *window, *n;
+ struct pci_bus *bus, *b;
+ resource_size_t offset;
+ LIST_HEAD(resources);
+ struct resource *res;
+ char addr[64], *fmt;
+ const char *name;
+ int err;
+
+ bus = pci_alloc_bus(NULL);
+ if (!bus)
+ return -ENOMEM;
+
+ bridge->bus = bus;
+
+ /* temporarily move resources off the list */
+ list_splice_init(&bridge->windows, &resources);
+ bus->sysdata = bridge->sysdata;
+ bus->msi = bridge->msi;
+ bus->ops = bridge->ops;
+ bus->number = bus->busn_res.start = bridge->busnr;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+#endif
+
+ b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
+ if (b) {
+ /* If we already got to this bus through a different bridge, ignore it */
+ dev_dbg(&b->dev, "bus already known\n");
+ err = -EEXIST;
+ goto free;
+ }
+
+ dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
+ bridge->busnr);
+
+ err = pcibios_root_bridge_prepare(bridge);
+ if (err)
+ goto free;
+
+ err = device_register(&bridge->dev);
+ if (err)
+ put_device(&bridge->dev);
+
+ bus->bridge = get_device(&bridge->dev);
+ device_enable_async_suspend(bus->bridge);
+ pci_set_bus_of_node(bus);
+ pci_set_bus_msi_domain(bus);
+
+ if (!parent)
+ set_dev_node(bus->bridge, pcibus_to_node(bus));
+
+ bus->dev.class = &pcibus_class;
+ bus->dev.parent = bus->bridge;
+
+ dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
+ name = dev_name(&bus->dev);
+
+ err = device_register(&bus->dev);
+ if (err)
+ goto unregister;
+
+ pcibios_add_bus(bus);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(bus);
+
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", name);
+ else
+ pr_info("PCI host bridge to bus %s\n", name);
+
+ /* Add initial resources to the bus */
+ resource_list_for_each_entry_safe(window, n, &resources) {
+ list_move_tail(&window->node, &bridge->windows);
+ offset = window->offset;
+ res = window->res;
+
+ if (res->flags & IORESOURCE_BUS)
+ pci_bus_insert_busn_res(bus, bus->number, res->end);
+ else
+ pci_bus_add_resource(bus, res, 0);
+
+ if (offset) {
+ if (resource_type(res) == IORESOURCE_IO)
+ fmt = " (bus address [%#06llx-%#06llx])";
+ else
+ fmt = " (bus address [%#010llx-%#010llx])";
+
+ snprintf(addr, sizeof(addr), fmt,
+ (unsigned long long)(res->start - offset),
+ (unsigned long long)(res->end - offset));
+ } else
+ addr[0] = '\0';
+
+ dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
+ }
+
+ down_write(&pci_bus_sem);
+ list_add_tail(&bus->node, &pci_root_buses);
+ up_write(&pci_bus_sem);
+
+ return 0;
+
+unregister:
+ put_device(&bridge->dev);
+ device_unregister(&bridge->dev);
+
+free:
+ kfree(bus);
+ return err;
+}
+EXPORT_SYMBOL(pci_register_host_bridge);
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -1764,8 +1883,7 @@ static void pci_dma_configure(struct pci_dev *dev)
if (attr == DEV_DMA_NOT_SUPPORTED)
dev_warn(&dev->dev, "DMA not supported.\n");
else
- arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
- attr == DEV_DMA_COHERENT);
+ acpi_dma_configure(&dev->dev, attr);
}
pci_put_host_bridge_device(bridge);
@@ -2156,113 +2274,43 @@ void __weak pcibios_remove_bus(struct pci_bus *bus)
{
}
-struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata, struct list_head *resources)
+static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
+ int bus, struct pci_ops *ops, void *sysdata,
+ struct list_head *resources, struct msi_controller *msi)
{
int error;
struct pci_host_bridge *bridge;
- struct pci_bus *b, *b2;
- struct resource_entry *window, *n;
- struct resource *res;
- resource_size_t offset;
- char bus_addr[64];
- char *fmt;
-
- b = pci_alloc_bus(NULL);
- if (!b)
- return NULL;
- b->sysdata = sysdata;
- b->ops = ops;
- b->number = b->busn_res.start = bus;
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
- b->domain_nr = pci_bus_find_domain_nr(b, parent);
-#endif
- b2 = pci_find_bus(pci_domain_nr(b), bus);
- if (b2) {
- /* If we already got to this bus through a different bridge, ignore it */
- dev_dbg(&b2->dev, "bus already known\n");
- goto err_out;
- }
-
- bridge = pci_alloc_host_bridge(b);
+ bridge = pci_alloc_host_bridge(0);
if (!bridge)
- goto err_out;
+ return NULL;
bridge->dev.parent = parent;
bridge->dev.release = pci_release_host_bridge_dev;
- dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
- error = pcibios_root_bridge_prepare(bridge);
- if (error) {
- kfree(bridge);
- goto err_out;
- }
-
- error = device_register(&bridge->dev);
- if (error) {
- put_device(&bridge->dev);
- goto err_out;
- }
- b->bridge = get_device(&bridge->dev);
- device_enable_async_suspend(b->bridge);
- pci_set_bus_of_node(b);
- pci_set_bus_msi_domain(b);
- if (!parent)
- set_dev_node(b->bridge, pcibus_to_node(b));
-
- b->dev.class = &pcibus_class;
- b->dev.parent = b->bridge;
- dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
- error = device_register(&b->dev);
- if (error)
- goto class_dev_reg_err;
+ list_splice_init(resources, &bridge->windows);
+ bridge->sysdata = sysdata;
+ bridge->busnr = bus;
+ bridge->ops = ops;
+ bridge->msi = msi;
- pcibios_add_bus(b);
-
- /* Create legacy_io and legacy_mem files for this bus */
- pci_create_legacy_files(b);
-
- if (parent)
- dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
- else
- printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
-
- /* Add initial resources to the bus */
- resource_list_for_each_entry_safe(window, n, resources) {
- list_move_tail(&window->node, &bridge->windows);
- res = window->res;
- offset = window->offset;
- if (res->flags & IORESOURCE_BUS)
- pci_bus_insert_busn_res(b, bus, res->end);
- else
- pci_bus_add_resource(b, res, 0);
- if (offset) {
- if (resource_type(res) == IORESOURCE_IO)
- fmt = " (bus address [%#06llx-%#06llx])";
- else
- fmt = " (bus address [%#010llx-%#010llx])";
- snprintf(bus_addr, sizeof(bus_addr), fmt,
- (unsigned long long) (res->start - offset),
- (unsigned long long) (res->end - offset));
- } else
- bus_addr[0] = '\0';
- dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
- }
+ error = pci_register_host_bridge(bridge);
+ if (error < 0)
+ goto err_out;
- down_write(&pci_bus_sem);
- list_add_tail(&b->node, &pci_root_buses);
- up_write(&pci_bus_sem);
+ return bridge->bus;
- return b;
-
-class_dev_reg_err:
- put_device(&bridge->dev);
- device_unregister(&bridge->dev);
err_out:
- kfree(b);
+ kfree(bridge);
return NULL;
}
+
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
+ NULL);
+}
EXPORT_SYMBOL_GPL(pci_create_root_bus);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
@@ -2343,12 +2391,10 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
break;
}
- b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
if (!b)
return NULL;
- b->msi = msi;
-
if (!found) {
dev_info(&b->dev,
"No busn resource found for root bus, will use [bus %02x-ff]\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c232729f5b1b..9236e40ac055 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2156,7 +2156,7 @@ static void quirk_blacklist_vpd(struct pci_dev *dev)
{
if (dev->vpd) {
dev->vpd->len = 0;
- dev_warn(&dev->dev, FW_BUG "VPD access disabled\n");
+ dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
}
@@ -3137,8 +3137,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
+
/*
- * Some devices may pass our check in pci_intx_mask_supported if
+ * Some devices may pass our check in pci_intx_mask_supported() if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
* support this feature.
*/
@@ -3146,53 +3147,139 @@ static void quirk_broken_intx_masking(struct pci_dev *dev)
{
dev->broken_intx_masking = 1;
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+ quirk_broken_intx_masking);
+
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
* Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
*
* RTL8110SC - Fails under PCI device assignment using DisINTx masking.
*/
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
+ quirk_broken_intx_masking);
/*
* Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
* DisINTx can be set but the interrupt status bit is non-functional.
*/
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
+ quirk_broken_intx_masking);
+
+static u16 mellanox_broken_intx_devs[] = {
+ PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
+ PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
+ PCI_DEVICE_ID_MELLANOX_HERMON_EN,
+ PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
+};
+
+#define CONNECTX_4_CURR_MAX_MINOR 99
+#define CONNECTX_4_INTX_SUPPORT_MINOR 14
+
+/*
+ * Check ConnectX-4/LX FW version to see if it supports legacy interrupts.
+ * If so, don't mark it as broken.
+ * FW minor > 99 means older FW version format and no INTx masking support.
+ * FW minor < 14 means new FW version format and no INTx masking support.
+ */
+static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
+{
+ __be32 __iomem *fw_ver;
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_subminor;
+ u32 fw_maj_min;
+ u32 fw_sub_min;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
+ if (pdev->device == mellanox_broken_intx_devs[i]) {
+ pdev->broken_intx_masking = 1;
+ return;
+ }
+ }
+
+ /* Getting here means Connect-IB cards and up. Connect-IB has no INTx
+ * support so shouldn't be checked further
+ */
+ if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
+ return;
+
+ if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
+ pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
+ return;
+
+ /* For ConnectX-4 and ConnectX-4LX, need to check FW support */
+ if (pci_enable_device_mem(pdev)) {
+ dev_warn(&pdev->dev, "Can't enable device memory\n");
+ return;
+ }
+
+ fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
+ if (!fw_ver) {
+ dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n");
+ goto out;
+ }
+
+ /* Reading from resource space should be 32b aligned */
+ fw_maj_min = ioread32be(fw_ver);
+ fw_sub_min = ioread32be(fw_ver + 1);
+ fw_major = fw_maj_min & 0xffff;
+ fw_minor = fw_maj_min >> 16;
+ fw_subminor = fw_sub_min & 0xffff;
+ if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
+ fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
+ dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
+ fw_major, fw_minor, fw_subminor, pdev->device ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
+ pdev->broken_intx_masking = 1;
+ }
+
+ iounmap(fw_ver);
+
+out:
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
+ mellanox_check_broken_intx_masking);
static void quirk_no_bus_reset(struct pci_dev *dev)
{
@@ -3255,6 +3342,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
quirk_thunderbolt_hotplug_msi);
+static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
+{
+ pci_set_vpd_size(dev, 8192);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index f9357e09e9b3..73a03d382590 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,7 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
- pci_bridge_d3_device_removed(dev);
+ pci_bridge_d3_update(dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 06663d391b39..b6edb187d160 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev)
if (res->flags & IORESOURCE_ROM_SHADOW)
return 0;
+ /*
+ * Ideally pci_update_resource() would update the ROM BAR address,
+ * and we would only set the enable bit here. But apparently some
+ * devices have buggy ROM BARs that read as zero when disabled.
+ */
pcibios_resource_to_bus(pdev->bus, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 9526e341988b..4bc589ee78d0 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
#include <linux/slab.h>
#include "pci.h"
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
bool disable;
u16 cmd;
u32 new, check, mask;
int reg;
- enum pci_bar_type type;
struct resource *res = dev->resource + resno;
- if (dev->is_virtfn) {
- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
return;
- }
/*
* Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno)
return;
pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
- new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
- if (res->flags & IORESOURCE_IO)
+ if (res->flags & IORESOURCE_IO) {
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
- else
+ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+ } else if (resno == PCI_ROM_RESOURCE) {
+ mask = (u32)PCI_ROM_ADDRESS_MASK;
+ } else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+ }
- reg = pci_resource_bar(dev, resno, &type);
- if (!reg)
- return;
- if (type != pci_bar_unknown) {
+ if (resno < PCI_ROM_RESOURCE) {
+ reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+ } else if (resno == PCI_ROM_RESOURCE) {
+
+ /*
+ * Apparently some Matrox devices have ROM BARs that read
+ * as zero when disabled, so don't update ROM BARs unless
+ * they're enabled. See https://lkml.org/lkml/2005/8/30/138.
+ */
if (!(res->flags & IORESOURCE_ROM_ENABLE))
return;
+
+ reg = dev->rom_base_reg;
new |= PCI_ROM_ADDRESS_ENABLE;
- }
+ } else
+ return;
/*
* We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno)
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+ if (resno <= PCI_ROM_RESOURCE)
+ pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ pci_iov_update_resource(dev, resno);
+#endif
+}
+
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d6ff5e82377d..8fc2e9532575 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
err = -ENOMEM;
goto out;
}
- err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
- &state);
- if (err != 1)
- state = XenbusStateUnknown;
+ state = xenbus_read_unsigned(pdev->xdev->otherend, str,
+ XenbusStateUnknown);
if (state != XenbusStateClosing)
continue;
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index eb126b98ed8a..e50bbf826188 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -296,10 +296,11 @@ static int __init is_alive(u_short sock)
return 0;
}
-static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
- unsigned int ioaddr)
+static int add_pcc_socket(ulong base, int irq, ulong mapaddr,
+ unsigned int ioaddr)
{
pcc_socket_t *t = &socket[pcc_sockets];
+ int err;
/* add sockets */
t->ioaddr = ioaddr;
@@ -328,11 +329,16 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
t->socket.irq_mask = 0;
t->socket.pci_irq = 2 + pcc_sockets; /* XXX */
- request_irq(irq, pcc_interrupt, 0, "m32r-pcc", pcc_interrupt);
+ err = request_irq(irq, pcc_interrupt, 0, "m32r-pcc", pcc_interrupt);
+ if (err) {
+ if (t->base > 0)
+ release_region(t->base, 0x20);
+ return err;
+ }
pcc_sockets++;
- return;
+ return 0;
}
@@ -683,26 +689,29 @@ static int __init init_m32r_pcc(void)
return ret;
ret = platform_device_register(&pcc_device);
- if (ret){
- platform_driver_unregister(&pcc_driver);
- return ret;
- }
+ if (ret)
+ goto unreg_driv;
printk(KERN_INFO "m32r PCC probe:\n");
pcc_sockets = 0;
- add_pcc_socket(M32R_PCC0_BASE, PCC0_IRQ, M32R_PCC0_MAPBASE, 0x1000);
+ ret = add_pcc_socket(M32R_PCC0_BASE, PCC0_IRQ, M32R_PCC0_MAPBASE,
+ 0x1000);
+ if (ret)
+ goto unreg_dev;
#ifdef CONFIG_M32RPCC_SLOT2
- add_pcc_socket(M32R_PCC1_BASE, PCC1_IRQ, M32R_PCC1_MAPBASE, 0x2000);
+ ret = add_pcc_socket(M32R_PCC1_BASE, PCC1_IRQ, M32R_PCC1_MAPBASE,
+ 0x2000);
+ if (ret)
+ goto unreg_dev;
#endif
if (pcc_sockets == 0) {
printk("socket is not found.\n");
- platform_device_unregister(&pcc_device);
- platform_driver_unregister(&pcc_driver);
- return -ENODEV;
+ ret = -ENODEV;
+ goto unreg_dev;
}
/* Set up interrupt handler(s) */
@@ -728,6 +737,12 @@ static int __init init_m32r_pcc(void)
}
return 0;
+
+unreg_dev:
+ platform_device_unregister(&pcc_device);
+unreg_driv:
+ platform_driver_unregister(&pcc_driver);
+ return ret;
} /* init_m32r_pcc */
static void __exit exit_m32r_pcc(void)
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index fe00f9134d51..e8eb7f225a88 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -129,16 +129,6 @@ config PHY_MIPHY28LP
Enable this to support the miphy transceiver (for SATA/PCIE/USB3)
that is part of STMicroelectronics STiH407 SoC.
-config PHY_MIPHY365X
- tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
- depends on ARCH_STI
- depends on HAS_IOMEM
- depends on OF
- select GENERIC_PHY
- help
- Enable this to support the miphy transceiver (for SATA/PCIE)
- that is part of STMicroelectronics STiH41x SoC series.
-
config PHY_RCAR_GEN2
tristate "Renesas R-Car generation 2 USB PHY driver"
depends on ARCH_RENESAS
@@ -373,7 +363,9 @@ config PHY_ROCKCHIP_INNO_USB2
tristate "Rockchip INNO USB2PHY Driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
depends on COMMON_CLK
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_COMMON
help
Support for Rockchip USB2.0 PHY with Innosilicon IP block.
@@ -438,14 +430,6 @@ config PHY_STIH407_USB
Enable this support to enable the picoPHY device used by USB2
and USB3 controllers on STMicroelectronics STiH407 SoC families.
-config PHY_STIH41X_USB
- tristate "STMicroelectronics USB2 PHY driver for STiH41x series"
- depends on ARCH_STI
- select GENERIC_PHY
- help
- Enable this to support the USB transceiver that is part of
- STMicroelectronics STiH41x SoC series.
-
config PHY_QCOM_UFS
tristate "Qualcomm UFS PHY driver"
depends on OF && ARCH_QCOM
@@ -489,4 +473,17 @@ config PHY_NS2_PCIE
help
Enable this to support the Broadcom Northstar2 PCIe PHY.
If unsure, say N.
+
+config PHY_MESON8B_USB2
+ tristate "Meson8b and GXBB USB2 PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ depends on USB_SUPPORT
+ select USB_COMMON
+ select GENERIC_PHY
+ help
+ Enable this to support the Meson USB2 PHYs found in Meson8b
+ and GXBB SoCs.
+ If unsure, say N.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index a534cf5be07d..65eb2f436a41 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_MIPHY28LP) += phy-miphy28lp.o
-obj-$(CONFIG_PHY_MIPHY365X) += phy-miphy365x.o
obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o
obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o
obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o
@@ -50,7 +49,6 @@ obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o
-obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
@@ -60,3 +58,4 @@ obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
+obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index f84a33a1bdd9..2c7a57f2d595 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -85,7 +85,6 @@ static int phy_berlin_sata_power_on(struct phy *phy)
struct phy_berlin_desc *desc = phy_get_drvdata(phy);
struct phy_berlin_priv *priv = dev_get_drvdata(phy->dev.parent);
void __iomem *ctrl_reg = priv->base + 0x60 + (desc->index * 0x80);
- int ret = 0;
u32 regval;
clk_prepare_enable(priv->clk);
@@ -130,7 +129,7 @@ static int phy_berlin_sata_power_on(struct phy *phy)
clk_disable_unprepare(priv->clk);
- return ret;
+ return 0;
}
static int phy_berlin_sata_power_off(struct phy *phy)
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
index 8ffc44afdb75..ccbc3d994998 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/phy-brcm-sata.c
@@ -140,7 +140,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
default:
dev_err(priv->dev, "invalid phy version\n");
break;
- };
+ }
return priv->phy_base + (port->portnum * size);
}
@@ -157,7 +157,7 @@ static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
default:
dev_err(priv->dev, "invalid phy version\n");
break;
- };
+ }
return priv->ctrl_base + (port->portnum * size);
}
@@ -365,7 +365,7 @@ static int brcm_sata_phy_init(struct phy *phy)
break;
default:
rc = -ENODEV;
- };
+ }
return rc;
}
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index c85fb0b59729..1b82bff6330f 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -23,6 +23,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#define PHY_INIT_BITS (CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN)
+
struct da8xx_usb_phy {
struct phy_provider *phy_provider;
struct phy *usb11_phy;
@@ -208,6 +210,9 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
dev_warn(dev, "Failed to create usb20 phy lookup\n");
}
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2),
+ PHY_INIT_BITS, PHY_INIT_BITS);
+
return 0;
}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 8b851f718123..6bee04cc4d53 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -229,19 +229,6 @@ struct exynos_mipi_video_phy {
spinlock_t slock;
};
-static inline int __is_running(const struct exynos_mipi_phy_desc *data,
- struct exynos_mipi_video_phy *state)
-{
- u32 val;
- int ret;
-
- ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
- if (ret)
- return 0;
-
- return val & data->resetn_val;
-}
-
static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
struct exynos_mipi_video_phy *state, unsigned int on)
{
@@ -251,7 +238,7 @@ static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
/* disable in PMU sysreg */
if (!on && data->coupled_phy_id >= 0 &&
- !__is_running(state->phys[data->coupled_phy_id].data, state)) {
+ state->phys[data->coupled_phy_id].phy->power_count == 0) {
regmap_read(state->regmaps[data->enable_map], data->enable_reg,
&val);
val &= ~data->enable_val;
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
index f30bbb0fb3b2..1f50e1004828 100644
--- a/drivers/phy/phy-exynos4210-usb2.c
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -141,7 +141,7 @@ static void exynos4210_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
}
@@ -179,7 +179,7 @@ static void exynos4210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
rstbits = EXYNOS_4210_URSTCON_PHY1_P1P2 |
EXYNOS_4210_URSTCON_HOST_LINK_P2;
break;
- };
+ }
if (on) {
clk = readl(drv->reg_phy + EXYNOS_4210_UPHYCLK);
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
index 765da90a536f..7f27a91acf87 100644
--- a/drivers/phy/phy-exynos4x12-usb2.c
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -187,7 +187,7 @@ static void exynos4x12_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
}
@@ -237,7 +237,7 @@ static void exynos4x12_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
rstbits = EXYNOS_4x12_URSTCON_HSIC1 |
EXYNOS_4x12_URSTCON_HOST_LINK_P1;
break;
- };
+ }
if (on) {
pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR);
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
index 2ed1735a076a..aad806272305 100644
--- a/drivers/phy/phy-exynos5250-usb2.c
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -192,7 +192,7 @@ static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
}
diff --git a/drivers/phy/phy-meson8b-usb2.c b/drivers/phy/phy-meson8b-usb2.c
new file mode 100644
index 000000000000..33c9f4ba157d
--- /dev/null
+++ b/drivers/phy/phy-meson8b-usb2.c
@@ -0,0 +1,286 @@
+/*
+ * Meson8b and GXBB USB2 PHY driver
+ *
+ * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/of.h>
+
+#define REG_CONFIG 0x00
+ #define REG_CONFIG_CLK_EN BIT(0)
+ #define REG_CONFIG_CLK_SEL_MASK GENMASK(3, 1)
+ #define REG_CONFIG_CLK_DIV_MASK GENMASK(10, 4)
+ #define REG_CONFIG_CLK_32k_ALTSEL BIT(15)
+ #define REG_CONFIG_TEST_TRIG BIT(31)
+
+#define REG_CTRL 0x04
+ #define REG_CTRL_SOFT_PRST BIT(0)
+ #define REG_CTRL_SOFT_HRESET BIT(1)
+ #define REG_CTRL_SS_SCALEDOWN_MODE_MASK GENMASK(3, 2)
+ #define REG_CTRL_CLK_DET_RST BIT(4)
+ #define REG_CTRL_INTR_SEL BIT(5)
+ #define REG_CTRL_CLK_DETECTED BIT(8)
+ #define REG_CTRL_SOF_SENT_RCVD_TGL BIT(9)
+ #define REG_CTRL_SOF_TOGGLE_OUT BIT(10)
+ #define REG_CTRL_POWER_ON_RESET BIT(15)
+ #define REG_CTRL_SLEEPM BIT(16)
+ #define REG_CTRL_TX_BITSTUFF_ENN_H BIT(17)
+ #define REG_CTRL_TX_BITSTUFF_ENN BIT(18)
+ #define REG_CTRL_COMMON_ON BIT(19)
+ #define REG_CTRL_REF_CLK_SEL_MASK GENMASK(21, 20)
+ #define REG_CTRL_REF_CLK_SEL_SHIFT 20
+ #define REG_CTRL_FSEL_MASK GENMASK(24, 22)
+ #define REG_CTRL_FSEL_SHIFT 22
+ #define REG_CTRL_PORT_RESET BIT(25)
+ #define REG_CTRL_THREAD_ID_MASK GENMASK(31, 26)
+
+#define REG_ENDP_INTR 0x08
+
+/* bits [31:26], [24:21] and [15:3] seem to be read-only */
+#define REG_ADP_BC 0x0c
+ #define REG_ADP_BC_VBUS_VLD_EXT_SEL BIT(0)
+ #define REG_ADP_BC_VBUS_VLD_EXT BIT(1)
+ #define REG_ADP_BC_OTG_DISABLE BIT(2)
+ #define REG_ADP_BC_ID_PULLUP BIT(3)
+ #define REG_ADP_BC_DRV_VBUS BIT(4)
+ #define REG_ADP_BC_ADP_PRB_EN BIT(5)
+ #define REG_ADP_BC_ADP_DISCHARGE BIT(6)
+ #define REG_ADP_BC_ADP_CHARGE BIT(7)
+ #define REG_ADP_BC_SESS_END BIT(8)
+ #define REG_ADP_BC_DEVICE_SESS_VLD BIT(9)
+ #define REG_ADP_BC_B_VALID BIT(10)
+ #define REG_ADP_BC_A_VALID BIT(11)
+ #define REG_ADP_BC_ID_DIG BIT(12)
+ #define REG_ADP_BC_VBUS_VALID BIT(13)
+ #define REG_ADP_BC_ADP_PROBE BIT(14)
+ #define REG_ADP_BC_ADP_SENSE BIT(15)
+ #define REG_ADP_BC_ACA_ENABLE BIT(16)
+ #define REG_ADP_BC_DCD_ENABLE BIT(17)
+ #define REG_ADP_BC_VDAT_DET_EN_B BIT(18)
+ #define REG_ADP_BC_VDAT_SRC_EN_B BIT(19)
+ #define REG_ADP_BC_CHARGE_SEL BIT(20)
+ #define REG_ADP_BC_CHARGE_DETECT BIT(21)
+ #define REG_ADP_BC_ACA_PIN_RANGE_C BIT(22)
+ #define REG_ADP_BC_ACA_PIN_RANGE_B BIT(23)
+ #define REG_ADP_BC_ACA_PIN_RANGE_A BIT(24)
+ #define REG_ADP_BC_ACA_PIN_GND BIT(25)
+ #define REG_ADP_BC_ACA_PIN_FLOAT BIT(26)
+
+#define REG_DBG_UART 0x14
+
+#define REG_TEST 0x18
+ #define REG_TEST_DATA_IN_MASK GENMASK(3, 0)
+ #define REG_TEST_EN_MASK GENMASK(7, 4)
+ #define REG_TEST_ADDR_MASK GENMASK(11, 8)
+ #define REG_TEST_DATA_OUT_SEL BIT(12)
+ #define REG_TEST_CLK BIT(13)
+ #define REG_TEST_VA_TEST_EN_B_MASK GENMASK(15, 14)
+ #define REG_TEST_DATA_OUT_MASK GENMASK(19, 16)
+ #define REG_TEST_DISABLE_ID_PULLUP BIT(20)
+
+#define REG_TUNE 0x1c
+ #define REG_TUNE_TX_RES_TUNE_MASK GENMASK(1, 0)
+ #define REG_TUNE_TX_HSXV_TUNE_MASK GENMASK(3, 2)
+ #define REG_TUNE_TX_VREF_TUNE_MASK GENMASK(7, 4)
+ #define REG_TUNE_TX_RISE_TUNE_MASK GENMASK(9, 8)
+ #define REG_TUNE_TX_PREEMP_PULSE_TUNE BIT(10)
+ #define REG_TUNE_TX_PREEMP_AMP_TUNE_MASK GENMASK(12, 11)
+ #define REG_TUNE_TX_FSLS_TUNE_MASK GENMASK(16, 13)
+ #define REG_TUNE_SQRX_TUNE_MASK GENMASK(19, 17)
+ #define REG_TUNE_OTG_TUNE GENMASK(22, 20)
+ #define REG_TUNE_COMP_DIS_TUNE GENMASK(25, 23)
+ #define REG_TUNE_HOST_DM_PULLDOWN BIT(26)
+ #define REG_TUNE_HOST_DP_PULLDOWN BIT(27)
+
+#define RESET_COMPLETE_TIME 500
+#define ACA_ENABLE_COMPLETE_TIME 50
+
+struct phy_meson8b_usb2_priv {
+ void __iomem *regs;
+ enum usb_dr_mode dr_mode;
+ struct clk *clk_usb_general;
+ struct clk *clk_usb;
+ struct reset_control *reset;
+};
+
+static u32 phy_meson8b_usb2_read(struct phy_meson8b_usb2_priv *phy_priv,
+ u32 reg)
+{
+ return readl(phy_priv->regs + reg);
+}
+
+static void phy_meson8b_usb2_mask_bits(struct phy_meson8b_usb2_priv *phy_priv,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 data;
+
+ data = phy_meson8b_usb2_read(phy_priv, reg);
+ data &= ~mask;
+ data |= (value & mask);
+
+ writel(data, phy_priv->regs + reg);
+}
+
+static int phy_meson8b_usb2_power_on(struct phy *phy)
+{
+ struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ if (!IS_ERR_OR_NULL(priv->reset)) {
+ ret = reset_control_reset(priv->reset);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to trigger USB reset\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(priv->clk_usb_general);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to enable USB general clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->clk_usb);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to enable USB DDR clock\n");
+ clk_disable_unprepare(priv->clk_usb_general);
+ return ret;
+ }
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
+ REG_CONFIG_CLK_32k_ALTSEL);
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
+ 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_FSEL_MASK,
+ 0x5 << REG_CTRL_FSEL_SHIFT);
+
+ /* reset the PHY */
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET,
+ REG_CTRL_POWER_ON_RESET);
+ udelay(RESET_COMPLETE_TIME);
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
+ udelay(RESET_COMPLETE_TIME);
+
+ phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
+ REG_CTRL_SOF_TOGGLE_OUT);
+
+ if (priv->dr_mode == USB_DR_MODE_HOST) {
+ phy_meson8b_usb2_mask_bits(priv, REG_ADP_BC,
+ REG_ADP_BC_ACA_ENABLE,
+ REG_ADP_BC_ACA_ENABLE);
+
+ udelay(ACA_ENABLE_COMPLETE_TIME);
+
+ if (phy_meson8b_usb2_read(priv, REG_ADP_BC) &
+ REG_ADP_BC_ACA_PIN_FLOAT) {
+ dev_warn(&phy->dev, "USB ID detect failed!\n");
+ clk_disable_unprepare(priv->clk_usb);
+ clk_disable_unprepare(priv->clk_usb_general);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int phy_meson8b_usb2_power_off(struct phy *phy)
+{
+ struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(priv->clk_usb);
+ clk_disable_unprepare(priv->clk_usb_general);
+
+ return 0;
+}
+
+static const struct phy_ops phy_meson8b_usb2_ops = {
+ .power_on = phy_meson8b_usb2_power_on,
+ .power_off = phy_meson8b_usb2_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int phy_meson8b_usb2_probe(struct platform_device *pdev)
+{
+ struct phy_meson8b_usb2_priv *priv;
+ struct resource *res;
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ priv->clk_usb_general = devm_clk_get(&pdev->dev, "usb_general");
+ if (IS_ERR(priv->clk_usb_general))
+ return PTR_ERR(priv->clk_usb_general);
+
+ priv->clk_usb = devm_clk_get(&pdev->dev, "usb");
+ if (IS_ERR(priv->clk_usb))
+ return PTR_ERR(priv->clk_usb);
+
+ priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (PTR_ERR(priv->reset) == -EPROBE_DEFER)
+ return PTR_ERR(priv->reset);
+
+ priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
+ if (priv->dr_mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(&pdev->dev,
+ "missing dual role configuration of the controller\n");
+ return -EINVAL;
+ }
+
+ phy = devm_phy_create(&pdev->dev, NULL, &phy_meson8b_usb2_ops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ phy_set_drvdata(phy, priv);
+
+ phy_provider =
+ devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson8b_usb2_of_match[] = {
+ { .compatible = "amlogic,meson8b-usb2-phy", },
+ { .compatible = "amlogic,meson-gxbb-usb2-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_meson8b_usb2_of_match);
+
+static struct platform_driver phy_meson8b_usb2_driver = {
+ .probe = phy_meson8b_usb2_probe,
+ .driver = {
+ .name = "phy-meson-usb2",
+ .of_match_table = phy_meson8b_usb2_of_match,
+ },
+};
+module_platform_driver(phy_meson8b_usb2_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Meson8b and GXBB USB2 PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
deleted file mode 100644
index e661f3b36eaa..000000000000
--- a/drivers/phy/phy-miphy365x.c
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics – All Rights Reserved
- *
- * STMicroelectronics PHY driver MiPHY365 (for SoC STiH416).
- *
- * Authors: Alexandre Torgue <alexandre.torgue@st.com>
- * Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/delay.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-
-#include <dt-bindings/phy/phy.h>
-
-#define HFC_TIMEOUT 100
-
-#define SYSCFG_SELECT_SATA_MASK BIT(1)
-#define SYSCFG_SELECT_SATA_POS 1
-
-/* MiPHY365x register definitions */
-#define RESET_REG 0x00
-#define RST_PLL BIT(1)
-#define RST_PLL_CAL BIT(2)
-#define RST_RX BIT(4)
-#define RST_MACRO BIT(7)
-
-#define STATUS_REG 0x01
-#define IDLL_RDY BIT(0)
-#define PLL_RDY BIT(1)
-#define DES_BIT_LOCK BIT(2)
-#define DES_SYMBOL_LOCK BIT(3)
-
-#define CTRL_REG 0x02
-#define TERM_EN BIT(0)
-#define PCI_EN BIT(2)
-#define DES_BIT_LOCK_EN BIT(3)
-#define TX_POL BIT(5)
-
-#define INT_CTRL_REG 0x03
-
-#define BOUNDARY1_REG 0x10
-#define SPDSEL_SEL BIT(0)
-
-#define BOUNDARY3_REG 0x12
-#define TX_SPDSEL_GEN1_VAL 0
-#define TX_SPDSEL_GEN2_VAL 0x01
-#define TX_SPDSEL_GEN3_VAL 0x02
-#define RX_SPDSEL_GEN1_VAL 0
-#define RX_SPDSEL_GEN2_VAL (0x01 << 3)
-#define RX_SPDSEL_GEN3_VAL (0x02 << 3)
-
-#define PCIE_REG 0x16
-
-#define BUF_SEL_REG 0x20
-#define CONF_GEN_SEL_GEN3 0x02
-#define CONF_GEN_SEL_GEN2 0x01
-#define PD_VDDTFILTER BIT(4)
-
-#define TXBUF1_REG 0x21
-#define SWING_VAL 0x04
-#define SWING_VAL_GEN1 0x03
-#define PREEMPH_VAL (0x3 << 5)
-
-#define TXBUF2_REG 0x22
-#define TXSLEW_VAL 0x2
-#define TXSLEW_VAL_GEN1 0x4
-
-#define RXBUF_OFFSET_CTRL_REG 0x23
-
-#define RXBUF_REG 0x25
-#define SDTHRES_VAL 0x01
-#define EQ_ON3 (0x03 << 4)
-#define EQ_ON1 (0x01 << 4)
-
-#define COMP_CTRL1_REG 0x40
-#define START_COMSR BIT(0)
-#define START_COMZC BIT(1)
-#define COMSR_DONE BIT(2)
-#define COMZC_DONE BIT(3)
-#define COMP_AUTO_LOAD BIT(4)
-
-#define COMP_CTRL2_REG 0x41
-#define COMP_2MHZ_RAT_GEN1 0x1e
-#define COMP_2MHZ_RAT 0xf
-
-#define COMP_CTRL3_REG 0x42
-#define COMSR_COMP_REF 0x33
-
-#define COMP_IDLL_REG 0x47
-#define COMZC_IDLL 0x2a
-
-#define PLL_CTRL1_REG 0x50
-#define PLL_START_CAL BIT(0)
-#define BUF_EN BIT(2)
-#define SYNCHRO_TX BIT(3)
-#define SSC_EN BIT(6)
-#define CONFIG_PLL BIT(7)
-
-#define PLL_CTRL2_REG 0x51
-#define BYPASS_PLL_CAL BIT(1)
-
-#define PLL_RAT_REG 0x52
-
-#define PLL_SSC_STEP_MSB_REG 0x56
-#define PLL_SSC_STEP_MSB_VAL 0x03
-
-#define PLL_SSC_STEP_LSB_REG 0x57
-#define PLL_SSC_STEP_LSB_VAL 0x63
-
-#define PLL_SSC_PER_MSB_REG 0x58
-#define PLL_SSC_PER_MSB_VAL 0
-
-#define PLL_SSC_PER_LSB_REG 0x59
-#define PLL_SSC_PER_LSB_VAL 0xf1
-
-#define IDLL_TEST_REG 0x72
-#define START_CLK_HF BIT(6)
-
-#define DES_BITLOCK_REG 0x86
-#define BIT_LOCK_LEVEL 0x01
-#define BIT_LOCK_CNT_512 (0x03 << 5)
-
-struct miphy365x_phy {
- struct phy *phy;
- void __iomem *base;
- bool pcie_tx_pol_inv;
- bool sata_tx_pol_inv;
- u32 sata_gen;
- u32 ctrlreg;
- u8 type;
-};
-
-struct miphy365x_dev {
- struct device *dev;
- struct regmap *regmap;
- struct mutex miphy_mutex;
- struct miphy365x_phy **phys;
- int nphys;
-};
-
-/*
- * These values are represented in Device tree. They are considered to be ABI
- * and although they can be extended any existing values must not change.
- */
-enum miphy_sata_gen {
- SATA_GEN1 = 1,
- SATA_GEN2,
- SATA_GEN3
-};
-
-static u8 rx_tx_spd[] = {
- 0, /* GEN0 doesn't exist. */
- TX_SPDSEL_GEN1_VAL | RX_SPDSEL_GEN1_VAL,
- TX_SPDSEL_GEN2_VAL | RX_SPDSEL_GEN2_VAL,
- TX_SPDSEL_GEN3_VAL | RX_SPDSEL_GEN3_VAL
-};
-
-/*
- * This function selects the system configuration,
- * either two SATA, one SATA and one PCIe, or two PCIe lanes.
- */
-static int miphy365x_set_path(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- bool sata = (miphy_phy->type == PHY_TYPE_SATA);
-
- return regmap_update_bits(miphy_dev->regmap,
- miphy_phy->ctrlreg,
- SYSCFG_SELECT_SATA_MASK,
- sata << SYSCFG_SELECT_SATA_POS);
-}
-
-static int miphy365x_init_pcie_port(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- u8 val;
-
- if (miphy_phy->pcie_tx_pol_inv) {
- /* Invert Tx polarity and clear pci_txdetect_pol bit */
- val = TERM_EN | PCI_EN | DES_BIT_LOCK_EN | TX_POL;
- writeb_relaxed(val, miphy_phy->base + CTRL_REG);
- writeb_relaxed(0x00, miphy_phy->base + PCIE_REG);
- }
-
- return 0;
-}
-
-static inline int miphy365x_hfc_not_rdy(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT);
- u8 mask = IDLL_RDY | PLL_RDY;
- u8 regval;
-
- do {
- regval = readb_relaxed(miphy_phy->base + STATUS_REG);
- if (!(regval & mask))
- return 0;
-
- usleep_range(2000, 2500);
- } while (time_before(jiffies, timeout));
-
- dev_err(miphy_dev->dev, "HFC ready timeout!\n");
- return -EBUSY;
-}
-
-static inline int miphy365x_rdy(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT);
- u8 mask = IDLL_RDY | PLL_RDY;
- u8 regval;
-
- do {
- regval = readb_relaxed(miphy_phy->base + STATUS_REG);
- if ((regval & mask) == mask)
- return 0;
-
- usleep_range(2000, 2500);
- } while (time_before(jiffies, timeout));
-
- dev_err(miphy_dev->dev, "PHY not ready timeout!\n");
- return -EBUSY;
-}
-
-static inline void miphy365x_set_comp(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- u8 val, mask;
-
- if (miphy_phy->sata_gen == SATA_GEN1)
- writeb_relaxed(COMP_2MHZ_RAT_GEN1,
- miphy_phy->base + COMP_CTRL2_REG);
- else
- writeb_relaxed(COMP_2MHZ_RAT,
- miphy_phy->base + COMP_CTRL2_REG);
-
- if (miphy_phy->sata_gen != SATA_GEN3) {
- writeb_relaxed(COMSR_COMP_REF,
- miphy_phy->base + COMP_CTRL3_REG);
- /*
- * Force VCO current to value defined by address 0x5A
- * and disable PCIe100Mref bit
- * Enable auto load compensation for pll_i_bias
- */
- writeb_relaxed(BYPASS_PLL_CAL, miphy_phy->base + PLL_CTRL2_REG);
- writeb_relaxed(COMZC_IDLL, miphy_phy->base + COMP_IDLL_REG);
- }
-
- /*
- * Force restart compensation and enable auto load
- * for Comzc_Tx, Comzc_Rx and Comsr on macro
- */
- val = START_COMSR | START_COMZC | COMP_AUTO_LOAD;
- writeb_relaxed(val, miphy_phy->base + COMP_CTRL1_REG);
-
- mask = COMSR_DONE | COMZC_DONE;
- while ((readb_relaxed(miphy_phy->base + COMP_CTRL1_REG) & mask) != mask)
- cpu_relax();
-}
-
-static inline void miphy365x_set_ssc(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- u8 val;
-
- /*
- * SSC Settings. SSC will be enabled through Link
- * SSC Ampl. = 0.4%
- * SSC Freq = 31KHz
- */
- writeb_relaxed(PLL_SSC_STEP_MSB_VAL,
- miphy_phy->base + PLL_SSC_STEP_MSB_REG);
- writeb_relaxed(PLL_SSC_STEP_LSB_VAL,
- miphy_phy->base + PLL_SSC_STEP_LSB_REG);
- writeb_relaxed(PLL_SSC_PER_MSB_VAL,
- miphy_phy->base + PLL_SSC_PER_MSB_REG);
- writeb_relaxed(PLL_SSC_PER_LSB_VAL,
- miphy_phy->base + PLL_SSC_PER_LSB_REG);
-
- /* SSC Settings complete */
- if (miphy_phy->sata_gen == SATA_GEN1) {
- val = PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL;
- writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG);
- } else {
- val = SSC_EN | PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL;
- writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG);
- }
-}
-
-static int miphy365x_init_sata_port(struct miphy365x_phy *miphy_phy,
- struct miphy365x_dev *miphy_dev)
-{
- int ret;
- u8 val;
-
- /*
- * Force PHY macro reset, PLL calibration reset, PLL reset
- * and assert Deserializer Reset
- */
- val = RST_PLL | RST_PLL_CAL | RST_RX | RST_MACRO;
- writeb_relaxed(val, miphy_phy->base + RESET_REG);
-
- if (miphy_phy->sata_tx_pol_inv)
- writeb_relaxed(TX_POL, miphy_phy->base + CTRL_REG);
-
- /*
- * Force macro1 to use rx_lspd, tx_lspd
- * Force Rx_Clock on first I-DLL phase
- * Force Des in HP mode on macro, rx_lspd, tx_lspd for Gen2/3
- */
- writeb_relaxed(SPDSEL_SEL, miphy_phy->base + BOUNDARY1_REG);
- writeb_relaxed(START_CLK_HF, miphy_phy->base + IDLL_TEST_REG);
- val = rx_tx_spd[miphy_phy->sata_gen];
- writeb_relaxed(val, miphy_phy->base + BOUNDARY3_REG);
-
- /* Wait for HFC_READY = 0 */
- ret = miphy365x_hfc_not_rdy(miphy_phy, miphy_dev);
- if (ret)
- return ret;
-
- /* Compensation Recalibration */
- miphy365x_set_comp(miphy_phy, miphy_dev);
-
- switch (miphy_phy->sata_gen) {
- case SATA_GEN3:
- /*
- * TX Swing target 550-600mv peak to peak diff
- * Tx Slew target 90-110ps rising/falling time
- * Rx Eq ON3, Sigdet threshold SDTH1
- */
- val = PD_VDDTFILTER | CONF_GEN_SEL_GEN3;
- writeb_relaxed(val, miphy_phy->base + BUF_SEL_REG);
- val = SWING_VAL | PREEMPH_VAL;
- writeb_relaxed(val, miphy_phy->base + TXBUF1_REG);
- writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG);
- writeb_relaxed(0x00, miphy_phy->base + RXBUF_OFFSET_CTRL_REG);
- val = SDTHRES_VAL | EQ_ON3;
- writeb_relaxed(val, miphy_phy->base + RXBUF_REG);
- break;
- case SATA_GEN2:
- /*
- * conf gen sel=0x1 to program Gen2 banked registers
- * VDDT filter ON
- * Tx Swing target 550-600mV peak-to-peak diff
- * Tx Slew target 90-110 ps rising/falling time
- * RX Equalization ON1, Sigdet threshold SDTH1
- */
- writeb_relaxed(CONF_GEN_SEL_GEN2,
- miphy_phy->base + BUF_SEL_REG);
- writeb_relaxed(SWING_VAL, miphy_phy->base + TXBUF1_REG);
- writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG);
- val = SDTHRES_VAL | EQ_ON1;
- writeb_relaxed(val, miphy_phy->base + RXBUF_REG);
- break;
- case SATA_GEN1:
- /*
- * conf gen sel = 00b to program Gen1 banked registers
- * VDDT filter ON
- * Tx Swing target 500-550mV peak-to-peak diff
- * Tx Slew target120-140 ps rising/falling time
- */
- writeb_relaxed(PD_VDDTFILTER, miphy_phy->base + BUF_SEL_REG);
- writeb_relaxed(SWING_VAL_GEN1, miphy_phy->base + TXBUF1_REG);
- writeb_relaxed(TXSLEW_VAL_GEN1, miphy_phy->base + TXBUF2_REG);
- break;
- default:
- break;
- }
-
- /* Force Macro1 in partial mode & release pll cal reset */
- writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG);
- usleep_range(100, 150);
-
- miphy365x_set_ssc(miphy_phy, miphy_dev);
-
- /* Wait for phy_ready */
- ret = miphy365x_rdy(miphy_phy, miphy_dev);
- if (ret)
- return ret;
-
- /*
- * Enable macro1 to use rx_lspd & tx_lspd
- * Release Rx_Clock on first I-DLL phase on macro1
- * Assert deserializer reset
- * des_bit_lock_en is set
- * bit lock detection strength
- * Deassert deserializer reset
- */
- writeb_relaxed(0x00, miphy_phy->base + BOUNDARY1_REG);
- writeb_relaxed(0x00, miphy_phy->base + IDLL_TEST_REG);
- writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG);
- val = miphy_phy->sata_tx_pol_inv ?
- (TX_POL | DES_BIT_LOCK_EN) : DES_BIT_LOCK_EN;
- writeb_relaxed(val, miphy_phy->base + CTRL_REG);
-
- val = BIT_LOCK_CNT_512 | BIT_LOCK_LEVEL;
- writeb_relaxed(val, miphy_phy->base + DES_BITLOCK_REG);
- writeb_relaxed(0x00, miphy_phy->base + RESET_REG);
-
- return 0;
-}
-
-static int miphy365x_init(struct phy *phy)
-{
- struct miphy365x_phy *miphy_phy = phy_get_drvdata(phy);
- struct miphy365x_dev *miphy_dev = dev_get_drvdata(phy->dev.parent);
- int ret = 0;
-
- mutex_lock(&miphy_dev->miphy_mutex);
-
- ret = miphy365x_set_path(miphy_phy, miphy_dev);
- if (ret) {
- mutex_unlock(&miphy_dev->miphy_mutex);
- return ret;
- }
-
- /* Initialise Miphy for PCIe or SATA */
- if (miphy_phy->type == PHY_TYPE_PCIE)
- ret = miphy365x_init_pcie_port(miphy_phy, miphy_dev);
- else
- ret = miphy365x_init_sata_port(miphy_phy, miphy_dev);
-
- mutex_unlock(&miphy_dev->miphy_mutex);
-
- return ret;
-}
-
-static int miphy365x_get_addr(struct device *dev,
- struct miphy365x_phy *miphy_phy, int index)
-{
- struct device_node *phynode = miphy_phy->phy->dev.of_node;
- const char *name;
- int type = miphy_phy->type;
- int ret;
-
- ret = of_property_read_string_index(phynode, "reg-names", index, &name);
- if (ret) {
- dev_err(dev, "no reg-names property not found\n");
- return ret;
- }
-
- if (!((!strncmp(name, "sata", 4) && type == PHY_TYPE_SATA) ||
- (!strncmp(name, "pcie", 4) && type == PHY_TYPE_PCIE)))
- return 0;
-
- miphy_phy->base = of_iomap(phynode, index);
- if (!miphy_phy->base) {
- dev_err(dev, "Failed to map %s\n", phynode->full_name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct phy *miphy365x_xlate(struct device *dev,
- struct of_phandle_args *args)
-{
- struct miphy365x_dev *miphy_dev = dev_get_drvdata(dev);
- struct miphy365x_phy *miphy_phy = NULL;
- struct device_node *phynode = args->np;
- int ret, index;
-
- if (args->args_count != 1) {
- dev_err(dev, "Invalid number of cells in 'phy' property\n");
- return ERR_PTR(-EINVAL);
- }
-
- for (index = 0; index < miphy_dev->nphys; index++)
- if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
- miphy_phy = miphy_dev->phys[index];
- break;
- }
-
- if (!miphy_phy) {
- dev_err(dev, "Failed to find appropriate phy\n");
- return ERR_PTR(-EINVAL);
- }
-
- miphy_phy->type = args->args[0];
-
- if (!(miphy_phy->type == PHY_TYPE_SATA ||
- miphy_phy->type == PHY_TYPE_PCIE)) {
- dev_err(dev, "Unsupported device type: %d\n", miphy_phy->type);
- return ERR_PTR(-EINVAL);
- }
-
- /* Each port handles SATA and PCIE - third entry is always sysconf. */
- for (index = 0; index < 3; index++) {
- ret = miphy365x_get_addr(dev, miphy_phy, index);
- if (ret < 0)
- return ERR_PTR(ret);
- }
-
- return miphy_phy->phy;
-}
-
-static const struct phy_ops miphy365x_ops = {
- .init = miphy365x_init,
- .owner = THIS_MODULE,
-};
-
-static int miphy365x_of_probe(struct device_node *phynode,
- struct miphy365x_phy *miphy_phy)
-{
- of_property_read_u32(phynode, "st,sata-gen", &miphy_phy->sata_gen);
- if (!miphy_phy->sata_gen)
- miphy_phy->sata_gen = SATA_GEN1;
-
- miphy_phy->pcie_tx_pol_inv =
- of_property_read_bool(phynode, "st,pcie-tx-pol-inv");
-
- miphy_phy->sata_tx_pol_inv =
- of_property_read_bool(phynode, "st,sata-tx-pol-inv");
-
- return 0;
-}
-
-static int miphy365x_probe(struct platform_device *pdev)
-{
- struct device_node *child, *np = pdev->dev.of_node;
- struct miphy365x_dev *miphy_dev;
- struct phy_provider *provider;
- struct phy *phy;
- int ret, port = 0;
-
- miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
- if (!miphy_dev)
- return -ENOMEM;
-
- miphy_dev->nphys = of_get_child_count(np);
- miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
- sizeof(*miphy_dev->phys), GFP_KERNEL);
- if (!miphy_dev->phys)
- return -ENOMEM;
-
- miphy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(miphy_dev->regmap)) {
- dev_err(miphy_dev->dev, "No syscfg phandle specified\n");
- return PTR_ERR(miphy_dev->regmap);
- }
-
- miphy_dev->dev = &pdev->dev;
-
- dev_set_drvdata(&pdev->dev, miphy_dev);
-
- mutex_init(&miphy_dev->miphy_mutex);
-
- for_each_child_of_node(np, child) {
- struct miphy365x_phy *miphy_phy;
-
- miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
- GFP_KERNEL);
- if (!miphy_phy) {
- ret = -ENOMEM;
- goto put_child;
- }
-
- miphy_dev->phys[port] = miphy_phy;
-
- phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
- if (IS_ERR(phy)) {
- dev_err(&pdev->dev, "failed to create PHY\n");
- ret = PTR_ERR(phy);
- goto put_child;
- }
-
- miphy_dev->phys[port]->phy = phy;
-
- ret = miphy365x_of_probe(child, miphy_phy);
- if (ret)
- goto put_child;
-
- phy_set_drvdata(phy, miphy_dev->phys[port]);
-
- port++;
- /* sysconfig offsets are indexed from 1 */
- ret = of_property_read_u32_index(np, "st,syscfg", port,
- &miphy_phy->ctrlreg);
- if (ret) {
- dev_err(&pdev->dev, "No sysconfig offset found\n");
- goto put_child;
- }
- }
-
- provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
- return PTR_ERR_OR_ZERO(provider);
-put_child:
- of_node_put(child);
- return ret;
-}
-
-static const struct of_device_id miphy365x_of_match[] = {
- { .compatible = "st,miphy365x-phy", },
- { },
-};
-MODULE_DEVICE_TABLE(of, miphy365x_of_match);
-
-static struct platform_driver miphy365x_driver = {
- .probe = miphy365x_probe,
- .driver = {
- .name = "miphy365x-phy",
- .of_match_table = miphy365x_of_match,
- }
-};
-module_platform_driver(miphy365x_driver);
-
-MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics miphy365x driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 2bd5ce43a724..d505d98cf5f8 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -141,11 +141,8 @@ struct ufs_qcom_phy_specific_ops {
struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
int ufs_qcom_phy_power_on(struct phy *generic_phy);
int ufs_qcom_phy_power_off(struct phy *generic_phy);
-int ufs_qcom_phy_exit(struct phy *generic_phy);
-int ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common);
int ufs_qcom_phy_remove(struct phy *generic_phy,
struct ufs_qcom_phy *ufs_qcom_phy);
struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 6ee51490f786..c71c84734916 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -44,30 +44,12 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
- phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
- phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-
- ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -117,7 +99,7 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.init = ufs_qcom_phy_qmp_14nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -136,6 +118,7 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_14nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -143,8 +126,9 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
if (!generic_phy) {
@@ -154,39 +138,43 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev,
+ "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
+ phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
+ phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
{.compatible = "qcom,ufs-phy-qmp-14nm"},
+ {.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"},
{},
};
MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
.probe = ufs_qcom_phy_qmp_14nm_probe,
- .remove = ufs_qcom_phy_qmp_14nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
.name = "ufs_qcom_phy_qmp_14nm",
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index 770087ab05e2..1a26a64e06d3 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -63,28 +63,12 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
{
- struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
- struct ufs_qcom_phy *phy_common = &phy->common_cfg;
- int err = 0;
-
- err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
- if (err) {
- dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
- __func__, err);
- goto out;
- }
-
- ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
+ return 0;
+}
-out:
- return err;
+static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
+{
+ return 0;
}
static
@@ -173,7 +157,7 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.init = ufs_qcom_phy_qmp_20nm_init,
- .exit = ufs_qcom_phy_exit,
+ .exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
.owner = THIS_MODULE,
@@ -192,6 +176,7 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct ufs_qcom_phy_qmp_20nm *phy;
+ struct ufs_qcom_phy *phy_common;
int err = 0;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -199,8 +184,9 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out;
}
+ phy_common = &phy->common_cfg;
- generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+ generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
&ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
if (!generic_phy) {
@@ -210,27 +196,27 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
goto out;
}
- phy_set_drvdata(generic_phy, phy);
+ err = ufs_qcom_phy_init_clks(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+ __func__, err);
+ goto out;
+ }
- strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
- sizeof(phy->common_cfg.name));
+ err = ufs_qcom_phy_init_vregulators(phy_common);
+ if (err) {
+ dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+ __func__, err);
+ goto out;
+ }
-out:
- return err;
-}
+ ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = to_phy(dev);
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int err = 0;
+ phy_set_drvdata(generic_phy, phy);
- err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
- if (err)
- dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
- __func__, err);
+ strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
+out:
return err;
}
@@ -242,7 +228,6 @@ MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
.probe = ufs_qcom_phy_qmp_20nm_probe,
- .remove = ufs_qcom_phy_qmp_20nm_remove,
.driver = {
.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
.name = "ufs_qcom_phy_qmp_20nm",
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 18a5b495ad65..c69568b8543d 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -22,13 +22,6 @@
#define VDDP_REF_CLK_MIN_UV 1200000
#define VDDP_REF_CLK_MAX_UV 1200000
-static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *, bool);
-static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
- const char *);
-static int ufs_qcom_phy_base_init(struct platform_device *pdev,
- struct ufs_qcom_phy *phy_common);
-
int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
struct ufs_qcom_phy_calibration *tbl_A,
int tbl_size_A,
@@ -75,45 +68,6 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops)
-{
- int err;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = NULL;
- struct phy_provider *phy_provider;
-
- err = ufs_qcom_phy_base_init(pdev, common_cfg);
- if (err) {
- dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
- goto out;
- }
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- err = PTR_ERR(phy_provider);
- dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
- goto out;
- }
-
- generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
- if (IS_ERR(generic_phy)) {
- err = PTR_ERR(generic_phy);
- dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
- generic_phy = NULL;
- goto out;
- }
-
- common_cfg->phy_spec_ops = phy_spec_ops;
- common_cfg->dev = dev;
-
-out:
- return generic_phy;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
-
/*
* This assumes the embedded phy structure inside generic_phy is of type
* struct ufs_qcom_phy. In order to function properly it's crucial
@@ -154,13 +108,50 @@ int ufs_qcom_phy_base_init(struct platform_device *pdev,
return 0;
}
-static int __ufs_qcom_phy_clk_get(struct phy *phy,
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+ struct ufs_qcom_phy *common_cfg,
+ const struct phy_ops *ufs_qcom_phy_gen_ops,
+ struct ufs_qcom_phy_specific_ops *phy_spec_ops)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy = NULL;
+ struct phy_provider *phy_provider;
+
+ err = ufs_qcom_phy_base_init(pdev, common_cfg);
+ if (err) {
+ dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
+ goto out;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ err = PTR_ERR(phy_provider);
+ dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
+ goto out;
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ err = PTR_ERR(generic_phy);
+ dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
+ generic_phy = NULL;
+ goto out;
+ }
+
+ common_cfg->phy_spec_ops = phy_spec_ops;
+ common_cfg->dev = dev;
+
+out:
+ return generic_phy;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
+
+static int __ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool err_print)
{
struct clk *clk;
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk)) {
@@ -174,42 +165,44 @@ static int __ufs_qcom_phy_clk_get(struct phy *phy,
return err;
}
-static
-int ufs_qcom_phy_clk_get(struct phy *phy,
+static int ufs_qcom_phy_clk_get(struct device *dev,
const char *name, struct clk **clk_out)
{
- return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
+ return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
}
-int
-ufs_qcom_phy_init_clks(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
{
int err;
- err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+ if (of_device_is_compatible(phy_common->dev->of_node,
+ "qcom,msm8996-ufs-phy-qmp-14nm"))
+ goto skip_txrx_clk;
+
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
&phy_common->tx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
&phy_common->rx_iface_clk);
if (err)
goto out;
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
&phy_common->ref_clk_src);
if (err)
goto out;
+skip_txrx_clk:
/*
* "ref_clk_parent" is optional hence don't abort init if it's not
* found.
*/
- __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
+ __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
&phy_common->ref_clk_parent, false);
- err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
+ err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
&phy_common->ref_clk);
out:
@@ -217,41 +210,14 @@ out:
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
-int
-ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
- struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
- "vdda-pll");
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
- "vdda-phy");
-
- if (err)
- goto out;
-
- /* vddp-ref-clk-* properties are optional */
- __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
- "vddp-ref-clk", true);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
-
-static int __ufs_qcom_phy_init_vreg(struct phy *phy,
+static int __ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
{
int err = 0;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
char prop_name[MAX_PROP_NAME];
- vreg->name = kstrdup(name, GFP_KERNEL);
+ vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!vreg->name) {
err = -ENOMEM;
goto out;
@@ -304,14 +270,36 @@ out:
return err;
}
-static int ufs_qcom_phy_init_vreg(struct phy *phy,
+static int ufs_qcom_phy_init_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, const char *name)
{
- return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
+ return __ufs_qcom_phy_init_vreg(dev, vreg, name, false);
}
-static
-int ufs_qcom_phy_cfg_vreg(struct phy *phy,
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
+{
+ int err;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
+ "vdda-pll");
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
+ "vdda-phy");
+
+ if (err)
+ goto out;
+
+ /* vddp-ref-clk-* properties are optional */
+ __ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
+ "vddp-ref-clk", true);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
+
+static int ufs_qcom_phy_cfg_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg, bool on)
{
int ret = 0;
@@ -319,10 +307,6 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy,
const char *name = vreg->name;
int min_uV;
int uA_load;
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
-
- BUG_ON(!vreg);
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
@@ -350,18 +334,15 @@ out:
return ret;
}
-static
-int ufs_qcom_phy_enable_vreg(struct phy *phy,
+static int ufs_qcom_phy_enable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || vreg->enabled)
goto out;
- ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
+ ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
if (ret) {
dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
__func__, ret);
@@ -380,10 +361,9 @@ out:
return ret;
}
-int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
{
int ret = 0;
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
if (phy->is_ref_clk_enabled)
goto out;
@@ -430,14 +410,10 @@ out_disable_src:
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
-static
-int ufs_qcom_phy_disable_vreg(struct phy *phy,
+static int ufs_qcom_phy_disable_vreg(struct device *dev,
struct ufs_qcom_phy_vreg *vreg)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
- struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
if (!vreg || !vreg->enabled || vreg->is_always_on)
@@ -447,7 +423,7 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy,
if (!ret) {
/* ignore errors on applying disable config */
- ufs_qcom_phy_cfg_vreg(phy, vreg, false);
+ ufs_qcom_phy_cfg_vreg(dev, vreg, false);
vreg->enabled = false;
} else {
dev_err(dev, "%s: %s disable failed, err=%d\n",
@@ -457,10 +433,8 @@ out:
return ret;
}
-void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
+static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_ref_clk_enabled) {
clk_disable_unprepare(phy->ref_clk);
/*
@@ -473,7 +447,6 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
phy->is_ref_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
#define UFS_REF_CLK_EN (1 << 5)
@@ -526,9 +499,8 @@ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
/* Turn ON M-PHY RMMI interface clocks */
-int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
int ret = 0;
if (phy->is_iface_clk_enabled)
@@ -552,20 +524,16 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
/* Turn OFF M-PHY RMMI interface clocks */
-void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
+void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
if (phy->is_iface_clk_enabled) {
clk_disable_unprepare(phy->tx_iface_clk);
clk_disable_unprepare(phy->rx_iface_clk);
phy->is_iface_clk_enabled = false;
}
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
{
@@ -634,29 +602,6 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
-int ufs_qcom_phy_remove(struct phy *generic_phy,
- struct ufs_qcom_phy *ufs_qcom_phy)
-{
- phy_power_off(generic_phy);
-
- kfree(ufs_qcom_phy->vdda_pll.name);
- kfree(ufs_qcom_phy->vdda_phy.name);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
-
-int ufs_qcom_phy_exit(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
- if (ufs_qcom_phy->is_powered_on)
- phy_power_off(generic_phy);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
-
int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
{
struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
@@ -678,7 +623,10 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
struct device *dev = phy_common->dev;
int err;
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
+ if (phy_common->is_powered_on)
+ return 0;
+
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
__func__, err);
@@ -688,23 +636,30 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
phy_common->phy_spec_ops->power_control(phy_common, true);
/* vdda_pll also enables ref clock LDOs so enable it first */
- err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
+ err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
if (err) {
dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
__func__, err);
goto out_disable_phy;
}
- err = ufs_qcom_phy_enable_ref_clk(generic_phy);
+ err = ufs_qcom_phy_enable_iface_clk(phy_common);
if (err) {
- dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
__func__, err);
goto out_disable_pll;
}
+ err = ufs_qcom_phy_enable_ref_clk(phy_common);
+ if (err) {
+ dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+ __func__, err);
+ goto out_disable_iface_clk;
+ }
+
/* enable device PHY ref_clk pad rail */
if (phy_common->vddp_ref_clk.reg) {
- err = ufs_qcom_phy_enable_vreg(generic_phy,
+ err = ufs_qcom_phy_enable_vreg(dev,
&phy_common->vddp_ref_clk);
if (err) {
dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
@@ -717,11 +672,13 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
goto out;
out_disable_ref_clk:
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+out_disable_iface_clk:
+ ufs_qcom_phy_disable_iface_clk(phy_common);
out_disable_pll:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
out_disable_phy:
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
out:
return err;
}
@@ -731,15 +688,19 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
{
struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ if (!phy_common->is_powered_on)
+ return 0;
+
phy_common->phy_spec_ops->power_control(phy_common, false);
if (phy_common->vddp_ref_clk.reg)
- ufs_qcom_phy_disable_vreg(generic_phy,
+ ufs_qcom_phy_disable_vreg(phy_common->dev,
&phy_common->vddp_ref_clk);
- ufs_qcom_phy_disable_ref_clk(generic_phy);
+ ufs_qcom_phy_disable_ref_clk(phy_common);
+ ufs_qcom_phy_disable_iface_clk(phy_common);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
- ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
+ ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
phy_common->is_powered_on = false;
return 0;
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index 3d97eadd247d..c63da1b955c1 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -70,6 +70,7 @@
#define USB2_LINECTRL1_DP_RPD BIT(18)
#define USB2_LINECTRL1_DMRPD_EN BIT(17)
#define USB2_LINECTRL1_DM_RPD BIT(16)
+#define USB2_LINECTRL1_OPMODE_NODRV BIT(6)
/* ADPCTRL */
#define USB2_ADPCTRL_OTGSESSVLD BIT(20)
@@ -161,6 +162,43 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
schedule_work(&ch->work);
}
+static void rcar_gen3_init_for_b_host(struct rcar_gen3_chan *ch)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
+ val = readl(usb2_base + USB2_LINECTRL1);
+ writel(val | USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
+
+ rcar_gen3_set_linectrl(ch, 1, 1);
+ rcar_gen3_set_host_mode(ch, 1);
+ rcar_gen3_enable_vbus_ctrl(ch, 0);
+
+ val = readl(usb2_base + USB2_LINECTRL1);
+ writel(val & ~USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
+}
+
+static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch)
+{
+ rcar_gen3_set_linectrl(ch, 0, 1);
+ rcar_gen3_set_host_mode(ch, 0);
+ rcar_gen3_enable_vbus_ctrl(ch, 1);
+}
+
+static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
+ val = readl(usb2_base + USB2_OBINTEN);
+ writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+
+ rcar_gen3_enable_vbus_ctrl(ch, 0);
+ rcar_gen3_init_for_host(ch);
+
+ writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+}
+
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -174,6 +212,65 @@ static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
rcar_gen3_init_for_peri(ch);
}
+static bool rcar_gen3_is_host(struct rcar_gen3_chan *ch)
+{
+ return !(readl(ch->base + USB2_COMMCTRL) & USB2_COMMCTRL_OTG_PERI);
+}
+
+static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+ bool is_b_device, is_host, new_mode_is_host;
+
+ if (!ch->has_otg || !ch->phy->init_count)
+ return -EIO;
+
+ /*
+ * is_b_device: true is B-Device. false is A-Device.
+ * If {new_mode_}is_host: true is Host mode. false is Peripheral mode.
+ */
+ is_b_device = rcar_gen3_check_id(ch);
+ is_host = rcar_gen3_is_host(ch);
+ if (!strncmp(buf, "host", strlen("host")))
+ new_mode_is_host = true;
+ else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ new_mode_is_host = false;
+ else
+ return -EINVAL;
+
+ /* If current and new mode is the same, this returns the error */
+ if (is_host == new_mode_is_host)
+ return -EINVAL;
+
+ if (new_mode_is_host) { /* And is_host must be false */
+ if (!is_b_device) /* A-Peripheral */
+ rcar_gen3_init_from_a_peri_to_a_host(ch);
+ else /* B-Peripheral */
+ rcar_gen3_init_for_b_host(ch);
+ } else { /* And is_host must be true */
+ if (!is_b_device) /* A-Host */
+ rcar_gen3_init_for_a_peri(ch);
+ else /* B-Host */
+ rcar_gen3_init_for_peri(ch);
+ }
+
+ return count;
+}
+
+static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+
+ if (!ch->has_otg || !ch->phy->init_count)
+ return -EIO;
+
+ return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
+ "peripheral");
+}
+static DEVICE_ATTR_RW(role);
+
static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
{
void __iomem *usb2_base = ch->base;
@@ -351,21 +448,40 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->vbus = NULL;
}
+ platform_set_drvdata(pdev, channel);
phy_set_drvdata(channel->phy, channel);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(provider))
+ if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
+ } else if (channel->has_otg) {
+ int ret;
+
+ ret = device_create_file(dev, &dev_attr_role);
+ if (ret < 0)
+ return ret;
+ }
return PTR_ERR_OR_ZERO(provider);
}
+static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
+{
+ struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
+
+ if (channel->has_otg)
+ device_remove_file(&pdev->dev, &dev_attr_role);
+
+ return 0;
+};
+
static struct platform_driver rcar_gen3_phy_usb2_driver = {
.driver = {
.name = "phy_rcar_gen3_usb2",
.of_match_table = rcar_gen3_phy_usb2_match_table,
},
.probe = rcar_gen3_phy_usb2_probe,
+ .remove = rcar_gen3_phy_usb2_remove,
};
module_platform_driver(rcar_gen3_phy_usb2_driver);
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index fd57345ffed2..f1b24f18e9b2 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -132,7 +132,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
default:
ideal_rate = 200000000;
break;
- };
+ }
diff = (rate > ideal_rate) ?
rate - ideal_rate : ideal_rate - rate;
diff --git a/drivers/phy/phy-rockchip-inno-usb2.c b/drivers/phy/phy-rockchip-inno-usb2.c
index ac203107b071..2f99ec95079c 100644
--- a/drivers/phy/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/phy-rockchip-inno-usb2.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/extcon.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio/consumer.h>
@@ -30,11 +31,15 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
#define BIT_WRITEABLE_SHIFT 16
-#define SCHEDULE_DELAY (60 * HZ)
+#define SCHEDULE_DELAY (60 * HZ)
+#define OTG_SCHEDULE_DELAY (2 * HZ)
enum rockchip_usb2phy_port_id {
USB2PHY_PORT_OTG,
@@ -49,6 +54,37 @@ enum rockchip_usb2phy_host_state {
PHY_STATE_FS_LS_ONLINE = 4,
};
+/**
+ * Different states involved in USB charger detection.
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+static const unsigned int rockchip_usb2phy_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_CDP,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_SLOW,
+ EXTCON_NONE,
+};
+
struct usb2phy_reg {
unsigned int offset;
unsigned int bitend;
@@ -58,19 +94,55 @@ struct usb2phy_reg {
};
/**
+ * struct rockchip_chg_det_reg: usb charger detect registers
+ * @cp_det: charging port detected successfully.
+ * @dcp_det: dedicated charging port detected successfully.
+ * @dp_det: assert data pin connect successfully.
+ * @idm_sink_en: open dm sink curren.
+ * @idp_sink_en: open dp sink current.
+ * @idp_src_en: open dm source current.
+ * @rdm_pdwn_en: open dm pull down resistor.
+ * @vdm_src_en: open dm voltage source.
+ * @vdp_src_en: open dp voltage source.
+ * @opmode: utmi operational mode.
+ */
+struct rockchip_chg_det_reg {
+ struct usb2phy_reg cp_det;
+ struct usb2phy_reg dcp_det;
+ struct usb2phy_reg dp_det;
+ struct usb2phy_reg idm_sink_en;
+ struct usb2phy_reg idp_sink_en;
+ struct usb2phy_reg idp_src_en;
+ struct usb2phy_reg rdm_pdwn_en;
+ struct usb2phy_reg vdm_src_en;
+ struct usb2phy_reg vdp_src_en;
+ struct usb2phy_reg opmode;
+};
+
+/**
* struct rockchip_usb2phy_port_cfg: usb-phy port configuration.
* @phy_sus: phy suspend register.
+ * @bvalid_det_en: vbus valid rise detection enable register.
+ * @bvalid_det_st: vbus valid rise detection status register.
+ * @bvalid_det_clr: vbus valid rise detection clear register.
* @ls_det_en: linestate detection enable register.
* @ls_det_st: linestate detection state register.
* @ls_det_clr: linestate detection clear register.
+ * @utmi_avalid: utmi vbus avalid status register.
+ * @utmi_bvalid: utmi vbus bvalid status register.
* @utmi_ls: utmi linestate state register.
* @utmi_hstdet: utmi host disconnect register.
*/
struct rockchip_usb2phy_port_cfg {
struct usb2phy_reg phy_sus;
+ struct usb2phy_reg bvalid_det_en;
+ struct usb2phy_reg bvalid_det_st;
+ struct usb2phy_reg bvalid_det_clr;
struct usb2phy_reg ls_det_en;
struct usb2phy_reg ls_det_st;
struct usb2phy_reg ls_det_clr;
+ struct usb2phy_reg utmi_avalid;
+ struct usb2phy_reg utmi_bvalid;
struct usb2phy_reg utmi_ls;
struct usb2phy_reg utmi_hstdet;
};
@@ -80,31 +152,51 @@ struct rockchip_usb2phy_port_cfg {
* @reg: the address offset of grf for usb-phy config.
* @num_ports: specify how many ports that the phy has.
* @clkout_ctl: keep on/turn off output clk of phy.
+ * @chg_det: charger detection registers.
*/
struct rockchip_usb2phy_cfg {
unsigned int reg;
unsigned int num_ports;
struct usb2phy_reg clkout_ctl;
const struct rockchip_usb2phy_port_cfg port_cfgs[USB2PHY_NUM_PORTS];
+ const struct rockchip_chg_det_reg chg_det;
};
/**
* struct rockchip_usb2phy_port: usb-phy port data.
* @port_id: flag for otg port or host port.
* @suspended: phy suspended flag.
+ * @utmi_avalid: utmi avalid status usage flag.
+ * true - use avalid to get vbus status
+ * flase - use bvalid to get vbus status
+ * @vbus_attached: otg device vbus status.
+ * @bvalid_irq: IRQ number assigned for vbus valid rise detection.
* @ls_irq: IRQ number assigned for linestate detection.
* @mutex: for register updating in sm_work.
- * @sm_work: OTG state machine work.
+ * @chg_work: charge detect work.
+ * @otg_sm_work: OTG state machine work.
+ * @sm_work: HOST state machine work.
* @phy_cfg: port register configuration, assigned by driver data.
+ * @event_nb: hold event notification callback.
+ * @state: define OTG enumeration states before device reset.
+ * @mode: the dr_mode of the controller.
*/
struct rockchip_usb2phy_port {
struct phy *phy;
unsigned int port_id;
bool suspended;
+ bool utmi_avalid;
+ bool vbus_attached;
+ int bvalid_irq;
int ls_irq;
struct mutex mutex;
+ struct delayed_work chg_work;
+ struct delayed_work otg_sm_work;
struct delayed_work sm_work;
const struct rockchip_usb2phy_port_cfg *port_cfg;
+ struct notifier_block event_nb;
+ enum usb_otg_state state;
+ enum usb_dr_mode mode;
};
/**
@@ -113,6 +205,11 @@ struct rockchip_usb2phy_port {
* @clk: clock struct of phy input clk.
* @clk480m: clock struct of phy output clk.
* @clk_hw: clock struct of phy output clk management.
+ * @chg_state: states involved in USB charger detection.
+ * @chg_type: USB charger types.
+ * @dcd_retries: The retry count used to track Data contact
+ * detection process.
+ * @edev: extcon device for notification registration
* @phy_cfg: phy register configuration, assigned by driver data.
* @ports: phy port instance.
*/
@@ -122,6 +219,10 @@ struct rockchip_usb2phy {
struct clk *clk;
struct clk *clk480m;
struct clk_hw clk480m_hw;
+ enum usb_chg_state chg_state;
+ enum power_supply_type chg_type;
+ u8 dcd_retries;
+ struct extcon_dev *edev;
const struct rockchip_usb2phy_cfg *phy_cfg;
struct rockchip_usb2phy_port ports[USB2PHY_NUM_PORTS];
};
@@ -153,7 +254,7 @@ static inline bool property_enabled(struct rockchip_usb2phy *rphy,
return tmp == reg->enable;
}
-static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw)
+static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -165,14 +266,14 @@ static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw)
if (ret)
return ret;
- /* waitting for the clk become stable */
- mdelay(1);
+ /* waiting for the clk become stable */
+ usleep_range(1200, 1300);
}
return 0;
}
-static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw)
+static void rockchip_usb2phy_clk480m_unprepare(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -181,7 +282,7 @@ static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw)
property_enable(rphy, &rphy->phy_cfg->clkout_ctl, false);
}
-static int rockchip_usb2phy_clk480m_enabled(struct clk_hw *hw)
+static int rockchip_usb2phy_clk480m_prepared(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -197,9 +298,9 @@ rockchip_usb2phy_clk480m_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops rockchip_usb2phy_clkout_ops = {
- .enable = rockchip_usb2phy_clk480m_enable,
- .disable = rockchip_usb2phy_clk480m_disable,
- .is_enabled = rockchip_usb2phy_clk480m_enabled,
+ .prepare = rockchip_usb2phy_clk480m_prepare,
+ .unprepare = rockchip_usb2phy_clk480m_unprepare,
+ .is_prepared = rockchip_usb2phy_clk480m_prepared,
.recalc_rate = rockchip_usb2phy_clk480m_recalc_rate,
};
@@ -263,33 +364,84 @@ err_ret:
return ret;
}
-static int rockchip_usb2phy_init(struct phy *phy)
+static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
{
- struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
- struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
int ret;
+ struct device_node *node = rphy->dev->of_node;
+ struct extcon_dev *edev;
+
+ if (of_property_read_bool(node, "extcon")) {
+ edev = extcon_get_edev_by_phandle(rphy->dev, 0);
+ if (IS_ERR(edev)) {
+ if (PTR_ERR(edev) != -EPROBE_DEFER)
+ dev_err(rphy->dev, "Invalid or missing extcon\n");
+ return PTR_ERR(edev);
+ }
+ } else {
+ /* Initialize extcon device */
+ edev = devm_extcon_dev_allocate(rphy->dev,
+ rockchip_usb2phy_extcon_cable);
- if (rport->port_id == USB2PHY_PORT_HOST) {
- /* clear linestate and enable linestate detect irq */
- mutex_lock(&rport->mutex);
+ if (IS_ERR(edev))
+ return -ENOMEM;
- ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+ ret = devm_extcon_dev_register(rphy->dev, edev);
if (ret) {
- mutex_unlock(&rport->mutex);
+ dev_err(rphy->dev, "failed to register extcon device\n");
return ret;
}
+ }
- ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true);
- if (ret) {
- mutex_unlock(&rport->mutex);
- return ret;
+ rphy->edev = edev;
+
+ return 0;
+}
+
+static int rockchip_usb2phy_init(struct phy *phy)
+{
+ struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
+ int ret = 0;
+
+ mutex_lock(&rport->mutex);
+
+ if (rport->port_id == USB2PHY_PORT_OTG) {
+ if (rport->mode != USB_DR_MODE_HOST) {
+ /* clear bvalid status and enable bvalid detect irq */
+ ret = property_enable(rphy,
+ &rport->port_cfg->bvalid_det_clr,
+ true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy,
+ &rport->port_cfg->bvalid_det_en,
+ true);
+ if (ret)
+ goto out;
+
+ schedule_delayed_work(&rport->otg_sm_work,
+ OTG_SCHEDULE_DELAY);
+ } else {
+ /* If OTG works in host only mode, do nothing. */
+ dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode);
}
+ } else if (rport->port_id == USB2PHY_PORT_HOST) {
+ /* clear linestate and enable linestate detect irq */
+ ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true);
+ if (ret)
+ goto out;
- mutex_unlock(&rport->mutex);
schedule_delayed_work(&rport->sm_work, SCHEDULE_DELAY);
}
- return 0;
+out:
+ mutex_unlock(&rport->mutex);
+ return ret;
}
static int rockchip_usb2phy_power_on(struct phy *phy)
@@ -340,7 +492,11 @@ static int rockchip_usb2phy_exit(struct phy *phy)
{
struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
- if (rport->port_id == USB2PHY_PORT_HOST)
+ if (rport->port_id == USB2PHY_PORT_OTG &&
+ rport->mode != USB_DR_MODE_HOST) {
+ cancel_delayed_work_sync(&rport->otg_sm_work);
+ cancel_delayed_work_sync(&rport->chg_work);
+ } else if (rport->port_id == USB2PHY_PORT_HOST)
cancel_delayed_work_sync(&rport->sm_work);
return 0;
@@ -354,6 +510,249 @@ static const struct phy_ops rockchip_usb2phy_ops = {
.owner = THIS_MODULE,
};
+static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
+{
+ struct rockchip_usb2phy_port *rport =
+ container_of(work, struct rockchip_usb2phy_port,
+ otg_sm_work.work);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+ static unsigned int cable;
+ unsigned long delay;
+ bool vbus_attach, sch_work, notify_charger;
+
+ if (rport->utmi_avalid)
+ vbus_attach =
+ property_enabled(rphy, &rport->port_cfg->utmi_avalid);
+ else
+ vbus_attach =
+ property_enabled(rphy, &rport->port_cfg->utmi_bvalid);
+
+ sch_work = false;
+ notify_charger = false;
+ delay = OTG_SCHEDULE_DELAY;
+ dev_dbg(&rport->phy->dev, "%s otg sm work\n",
+ usb_otg_state_string(rport->state));
+
+ switch (rport->state) {
+ case OTG_STATE_UNDEFINED:
+ rport->state = OTG_STATE_B_IDLE;
+ if (!vbus_attach)
+ rockchip_usb2phy_power_off(rport->phy);
+ /* fall through */
+ case OTG_STATE_B_IDLE:
+ if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) > 0) {
+ dev_dbg(&rport->phy->dev, "usb otg host connect\n");
+ rport->state = OTG_STATE_A_HOST;
+ rockchip_usb2phy_power_on(rport->phy);
+ return;
+ } else if (vbus_attach) {
+ dev_dbg(&rport->phy->dev, "vbus_attach\n");
+ switch (rphy->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ schedule_delayed_work(&rport->chg_work, 0);
+ return;
+ case USB_CHG_STATE_DETECTED:
+ switch (rphy->chg_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ dev_dbg(&rport->phy->dev,
+ "sdp cable is connecetd\n");
+ rockchip_usb2phy_power_on(rport->phy);
+ rport->state = OTG_STATE_B_PERIPHERAL;
+ notify_charger = true;
+ sch_work = true;
+ cable = EXTCON_CHG_USB_SDP;
+ break;
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ dev_dbg(&rport->phy->dev,
+ "dcp cable is connecetd\n");
+ rockchip_usb2phy_power_off(rport->phy);
+ notify_charger = true;
+ sch_work = true;
+ cable = EXTCON_CHG_USB_DCP;
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ dev_dbg(&rport->phy->dev,
+ "cdp cable is connecetd\n");
+ rockchip_usb2phy_power_on(rport->phy);
+ rport->state = OTG_STATE_B_PERIPHERAL;
+ notify_charger = true;
+ sch_work = true;
+ cable = EXTCON_CHG_USB_CDP;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ notify_charger = true;
+ rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+ rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ }
+
+ if (rport->vbus_attached != vbus_attach) {
+ rport->vbus_attached = vbus_attach;
+
+ if (notify_charger && rphy->edev)
+ extcon_set_cable_state_(rphy->edev,
+ cable, vbus_attach);
+ }
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (!vbus_attach) {
+ dev_dbg(&rport->phy->dev, "usb disconnect\n");
+ rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+ rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ rport->state = OTG_STATE_B_IDLE;
+ delay = 0;
+ rockchip_usb2phy_power_off(rport->phy);
+ }
+ sch_work = true;
+ break;
+ case OTG_STATE_A_HOST:
+ if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) == 0) {
+ dev_dbg(&rport->phy->dev, "usb otg host disconnect\n");
+ rport->state = OTG_STATE_B_IDLE;
+ rockchip_usb2phy_power_off(rport->phy);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (sch_work)
+ schedule_delayed_work(&rport->otg_sm_work, delay);
+}
+
+static const char *chg_to_string(enum power_supply_type chg_type)
+{
+ switch (chg_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ return "USB_SDP_CHARGER";
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ return "USB_DCP_CHARGER";
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ return "USB_CDP_CHARGER";
+ default:
+ return "INVALID_CHARGER";
+ }
+}
+
+static void rockchip_chg_enable_dcd(struct rockchip_usb2phy *rphy,
+ bool en)
+{
+ property_enable(rphy, &rphy->phy_cfg->chg_det.rdm_pdwn_en, en);
+ property_enable(rphy, &rphy->phy_cfg->chg_det.idp_src_en, en);
+}
+
+static void rockchip_chg_enable_primary_det(struct rockchip_usb2phy *rphy,
+ bool en)
+{
+ property_enable(rphy, &rphy->phy_cfg->chg_det.vdp_src_en, en);
+ property_enable(rphy, &rphy->phy_cfg->chg_det.idm_sink_en, en);
+}
+
+static void rockchip_chg_enable_secondary_det(struct rockchip_usb2phy *rphy,
+ bool en)
+{
+ property_enable(rphy, &rphy->phy_cfg->chg_det.vdm_src_en, en);
+ property_enable(rphy, &rphy->phy_cfg->chg_det.idp_sink_en, en);
+}
+
+#define CHG_DCD_POLL_TIME (100 * HZ / 1000)
+#define CHG_DCD_MAX_RETRIES 6
+#define CHG_PRIMARY_DET_TIME (40 * HZ / 1000)
+#define CHG_SECONDARY_DET_TIME (40 * HZ / 1000)
+static void rockchip_chg_detect_work(struct work_struct *work)
+{
+ struct rockchip_usb2phy_port *rport =
+ container_of(work, struct rockchip_usb2phy_port, chg_work.work);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+ bool is_dcd, tmout, vout;
+ unsigned long delay;
+
+ dev_dbg(&rport->phy->dev, "chg detection work state = %d\n",
+ rphy->chg_state);
+ switch (rphy->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ if (!rport->suspended)
+ rockchip_usb2phy_power_off(rport->phy);
+ /* put the controller in non-driving mode */
+ property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, false);
+ /* Start DCD processing stage 1 */
+ rockchip_chg_enable_dcd(rphy, true);
+ rphy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+ rphy->dcd_retries = 0;
+ delay = CHG_DCD_POLL_TIME;
+ break;
+ case USB_CHG_STATE_WAIT_FOR_DCD:
+ /* get data contact detection status */
+ is_dcd = property_enabled(rphy, &rphy->phy_cfg->chg_det.dp_det);
+ tmout = ++rphy->dcd_retries == CHG_DCD_MAX_RETRIES;
+ /* stage 2 */
+ if (is_dcd || tmout) {
+ /* stage 4 */
+ /* Turn off DCD circuitry */
+ rockchip_chg_enable_dcd(rphy, false);
+ /* Voltage Source on DP, Probe on DM */
+ rockchip_chg_enable_primary_det(rphy, true);
+ delay = CHG_PRIMARY_DET_TIME;
+ rphy->chg_state = USB_CHG_STATE_DCD_DONE;
+ } else {
+ /* stage 3 */
+ delay = CHG_DCD_POLL_TIME;
+ }
+ break;
+ case USB_CHG_STATE_DCD_DONE:
+ vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.cp_det);
+ rockchip_chg_enable_primary_det(rphy, false);
+ if (vout) {
+ /* Voltage Source on DM, Probe on DP */
+ rockchip_chg_enable_secondary_det(rphy, true);
+ delay = CHG_SECONDARY_DET_TIME;
+ rphy->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+ } else {
+ if (rphy->dcd_retries == CHG_DCD_MAX_RETRIES) {
+ /* floating charger found */
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ rphy->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ } else {
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB;
+ rphy->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ }
+ }
+ break;
+ case USB_CHG_STATE_PRIMARY_DONE:
+ vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.dcp_det);
+ /* Turn off voltage source */
+ rockchip_chg_enable_secondary_det(rphy, false);
+ if (vout)
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ else
+ rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP;
+ /* fall through */
+ case USB_CHG_STATE_SECONDARY_DONE:
+ rphy->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ /* fall through */
+ case USB_CHG_STATE_DETECTED:
+ /* put the controller in normal mode */
+ property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, true);
+ rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
+ dev_info(&rport->phy->dev, "charger = %s\n",
+ chg_to_string(rphy->chg_type));
+ return;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&rport->chg_work, delay);
+}
+
/*
* The function manage host-phy port state and suspend/resume phy port
* to save power.
@@ -485,6 +884,26 @@ static irqreturn_t rockchip_usb2phy_linestate_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data)
+{
+ struct rockchip_usb2phy_port *rport = data;
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+
+ if (!property_enabled(rphy, &rport->port_cfg->bvalid_det_st))
+ return IRQ_NONE;
+
+ mutex_lock(&rport->mutex);
+
+ /* clear bvalid detect irq pending status */
+ property_enable(rphy, &rport->port_cfg->bvalid_det_clr, true);
+
+ mutex_unlock(&rport->mutex);
+
+ rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
+
+ return IRQ_HANDLED;
+}
+
static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
struct rockchip_usb2phy_port *rport,
struct device_node *child_np)
@@ -509,13 +928,86 @@ static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
IRQF_ONESHOT,
"rockchip_usb2phy", rport);
if (ret) {
- dev_err(rphy->dev, "failed to request irq handle\n");
+ dev_err(rphy->dev, "failed to request linestate irq handle\n");
return ret;
}
return 0;
}
+static int rockchip_otg_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct rockchip_usb2phy_port *rport =
+ container_of(nb, struct rockchip_usb2phy_port, event_nb);
+
+ schedule_delayed_work(&rport->otg_sm_work, OTG_SCHEDULE_DELAY);
+
+ return NOTIFY_DONE;
+}
+
+static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
+ struct rockchip_usb2phy_port *rport,
+ struct device_node *child_np)
+{
+ int ret;
+
+ rport->port_id = USB2PHY_PORT_OTG;
+ rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG];
+ rport->state = OTG_STATE_UNDEFINED;
+
+ /*
+ * set suspended flag to true, but actually don't
+ * put phy in suspend mode, it aims to enable usb
+ * phy and clock in power_on() called by usb controller
+ * driver during probe.
+ */
+ rport->suspended = true;
+ rport->vbus_attached = false;
+
+ mutex_init(&rport->mutex);
+
+ rport->mode = of_usb_get_dr_mode_by_phy(child_np, -1);
+ if (rport->mode == USB_DR_MODE_HOST) {
+ ret = 0;
+ goto out;
+ }
+
+ INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
+ INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
+
+ rport->utmi_avalid =
+ of_property_read_bool(child_np, "rockchip,utmi-avalid");
+
+ rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
+ if (rport->bvalid_irq < 0) {
+ dev_err(rphy->dev, "no vbus valid irq provided\n");
+ ret = rport->bvalid_irq;
+ goto out;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq, NULL,
+ rockchip_usb2phy_bvalid_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_bvalid", rport);
+ if (ret) {
+ dev_err(rphy->dev, "failed to request otg-bvalid irq handle\n");
+ goto out;
+ }
+
+ if (!IS_ERR(rphy->edev)) {
+ rport->event_nb.notifier_call = rockchip_otg_event;
+
+ ret = extcon_register_notifier(rphy->edev, EXTCON_USB_HOST,
+ &rport->event_nb);
+ if (ret)
+ dev_err(rphy->dev, "register USB HOST notifier failed\n");
+ }
+
+out:
+ return ret;
+}
+
static int rockchip_usb2phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -553,8 +1045,14 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
rphy->dev = dev;
phy_cfgs = match->data;
+ rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+ rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
platform_set_drvdata(pdev, rphy);
+ ret = rockchip_usb2phy_extcon_register(rphy);
+ if (ret)
+ return ret;
+
/* find out a proper config which can be matched with dt. */
index = 0;
while (phy_cfgs[index].reg) {
@@ -591,13 +1089,9 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
struct rockchip_usb2phy_port *rport = &rphy->ports[index];
struct phy *phy;
- /*
- * This driver aim to support both otg-port and host-port,
- * but unfortunately, the otg part is not ready in current,
- * so this comments and below codes are interim, which should
- * be changed after otg-port is supplied soon.
- */
- if (of_node_cmp(child_np->name, "host-port"))
+ /* This driver aims to support both otg-port and host-port */
+ if (of_node_cmp(child_np->name, "host-port") &&
+ of_node_cmp(child_np->name, "otg-port"))
goto next_child;
phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops);
@@ -610,9 +1104,18 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
rport->phy = phy;
phy_set_drvdata(rport->phy, rport);
- ret = rockchip_usb2phy_host_port_init(rphy, rport, child_np);
- if (ret)
- goto put_child;
+ /* initialize otg/host port separately */
+ if (!of_node_cmp(child_np->name, "host-port")) {
+ ret = rockchip_usb2phy_host_port_init(rphy, rport,
+ child_np);
+ if (ret)
+ goto put_child;
+ } else {
+ ret = rockchip_usb2phy_otg_port_init(rphy, rport,
+ child_np);
+ if (ret)
+ goto put_child;
+ }
next_child:
/* to prevent out of boundary */
@@ -654,10 +1157,18 @@ static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = {
static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
{
- .reg = 0xe450,
+ .reg = 0xe450,
.num_ports = 2,
.clkout_ctl = { 0xe450, 4, 4, 1, 0 },
.port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0xe454, 1, 0, 2, 1 },
+ .bvalid_det_en = { 0xe3c0, 3, 3, 0, 1 },
+ .bvalid_det_st = { 0xe3e0, 3, 3, 0, 1 },
+ .bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 },
+ .utmi_avalid = { 0xe2ac, 7, 7, 0, 1 },
+ .utmi_bvalid = { 0xe2ac, 12, 12, 0, 1 },
+ },
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe458, 1, 0, 0x2, 0x1 },
.ls_det_en = { 0xe3c0, 6, 6, 0, 1 },
@@ -667,12 +1178,32 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.utmi_hstdet = { 0xe2ac, 23, 23, 0, 1 }
}
},
+ .chg_det = {
+ .opmode = { 0xe454, 3, 0, 5, 1 },
+ .cp_det = { 0xe2ac, 2, 2, 0, 1 },
+ .dcp_det = { 0xe2ac, 1, 1, 0, 1 },
+ .dp_det = { 0xe2ac, 0, 0, 0, 1 },
+ .idm_sink_en = { 0xe450, 8, 8, 0, 1 },
+ .idp_sink_en = { 0xe450, 7, 7, 0, 1 },
+ .idp_src_en = { 0xe450, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0xe450, 10, 10, 0, 1 },
+ .vdm_src_en = { 0xe450, 12, 12, 0, 1 },
+ .vdp_src_en = { 0xe450, 11, 11, 0, 1 },
+ },
},
{
- .reg = 0xe460,
+ .reg = 0xe460,
.num_ports = 2,
.clkout_ctl = { 0xe460, 4, 4, 1, 0 },
.port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0xe464, 1, 0, 2, 1 },
+ .bvalid_det_en = { 0xe3c0, 8, 8, 0, 1 },
+ .bvalid_det_st = { 0xe3e0, 8, 8, 0, 1 },
+ .bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
+ .utmi_avalid = { 0xe2ac, 10, 10, 0, 1 },
+ .utmi_bvalid = { 0xe2ac, 16, 16, 0, 1 },
+ },
[USB2PHY_PORT_HOST] = {
.phy_sus = { 0xe468, 1, 0, 0x2, 0x1 },
.ls_det_en = { 0xe3c0, 11, 11, 0, 1 },
diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c
index 004d320767e4..f6f72339bbc3 100644
--- a/drivers/phy/phy-s5pv210-usb2.c
+++ b/drivers/phy/phy-s5pv210-usb2.c
@@ -103,7 +103,7 @@ static void s5pv210_isol(struct samsung_usb2_phy_instance *inst, bool on)
break;
default:
return;
- };
+ }
regmap_update_bits(drv->reg_pmu, S5PV210_USB_ISOL_OFFSET,
mask, on ? 0 : mask);
@@ -127,7 +127,7 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
rstbits = S5PV210_URSTCON_PHY1_ALL |
S5PV210_URSTCON_HOST_LINK_ALL;
break;
- };
+ }
if (on) {
writel(drv->ref_reg_val, drv->reg_phy + S5PV210_UPHYCLK);
diff --git a/drivers/phy/phy-stih41x-usb.c b/drivers/phy/phy-stih41x-usb.c
deleted file mode 100644
index 0ac74639ad02..000000000000
--- a/drivers/phy/phy-stih41x-usb.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics
- *
- * STMicroelectronics PHY driver for STiH41x USB.
- *
- * Author: Maxime Coquelin <maxime.coquelin@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-
-#define SYSCFG332 0x80
-#define SYSCFG2520 0x820
-
-/**
- * struct stih41x_usb_cfg - SoC specific PHY register mapping
- * @syscfg: Offset in syscfg registers bank
- * @cfg_mask: Bits mask for PHY configuration
- * @cfg: Static configuration value for PHY
- * @oscok: Notify the PHY oscillator clock is ready
- * Setting this bit enable the PHY
- */
-struct stih41x_usb_cfg {
- u32 syscfg;
- u32 cfg_mask;
- u32 cfg;
- u32 oscok;
-};
-
-/**
- * struct stih41x_usb_phy - Private data for the PHY
- * @dev: device for this controller
- * @regmap: Syscfg registers bank in which PHY is configured
- * @cfg: SoC specific PHY register mapping
- * @clk: Oscillator used by the PHY
- */
-struct stih41x_usb_phy {
- struct device *dev;
- struct regmap *regmap;
- const struct stih41x_usb_cfg *cfg;
- struct clk *clk;
-};
-
-static struct stih41x_usb_cfg stih415_usb_phy_cfg = {
- .syscfg = SYSCFG332,
- .cfg_mask = 0x3f,
- .cfg = 0x38,
- .oscok = BIT(6),
-};
-
-static struct stih41x_usb_cfg stih416_usb_phy_cfg = {
- .syscfg = SYSCFG2520,
- .cfg_mask = 0x33f,
- .cfg = 0x238,
- .oscok = BIT(6),
-};
-
-static int stih41x_usb_phy_init(struct phy *phy)
-{
- struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
-
- return regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
- phy_dev->cfg->cfg_mask, phy_dev->cfg->cfg);
-}
-
-static int stih41x_usb_phy_power_on(struct phy *phy)
-{
- struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
- int ret;
-
- ret = clk_prepare_enable(phy_dev->clk);
- if (ret) {
- dev_err(phy_dev->dev, "Failed to enable osc_phy clock\n");
- return ret;
- }
-
- ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
- phy_dev->cfg->oscok, phy_dev->cfg->oscok);
- if (ret)
- clk_disable_unprepare(phy_dev->clk);
-
- return ret;
-}
-
-static int stih41x_usb_phy_power_off(struct phy *phy)
-{
- struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
- int ret;
-
- ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
- phy_dev->cfg->oscok, 0);
- if (ret) {
- dev_err(phy_dev->dev, "Failed to clear oscok bit\n");
- return ret;
- }
-
- clk_disable_unprepare(phy_dev->clk);
-
- return 0;
-}
-
-static const struct phy_ops stih41x_usb_phy_ops = {
- .init = stih41x_usb_phy_init,
- .power_on = stih41x_usb_phy_power_on,
- .power_off = stih41x_usb_phy_power_off,
- .owner = THIS_MODULE,
-};
-
-static const struct of_device_id stih41x_usb_phy_of_match[];
-
-static int stih41x_usb_phy_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
- struct stih41x_usb_phy *phy_dev;
- struct device *dev = &pdev->dev;
- struct phy_provider *phy_provider;
- struct phy *phy;
-
- phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
- if (!phy_dev)
- return -ENOMEM;
-
- match = of_match_device(stih41x_usb_phy_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- phy_dev->cfg = match->data;
-
- phy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(phy_dev->regmap)) {
- dev_err(dev, "No syscfg phandle specified\n");
- return PTR_ERR(phy_dev->regmap);
- }
-
- phy_dev->clk = devm_clk_get(dev, "osc_phy");
- if (IS_ERR(phy_dev->clk)) {
- dev_err(dev, "osc_phy clk not found\n");
- return PTR_ERR(phy_dev->clk);
- }
-
- phy = devm_phy_create(dev, NULL, &stih41x_usb_phy_ops);
-
- if (IS_ERR(phy)) {
- dev_err(dev, "failed to create phy\n");
- return PTR_ERR(phy);
- }
-
- phy_dev->dev = dev;
-
- phy_set_drvdata(phy, phy_dev);
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static const struct of_device_id stih41x_usb_phy_of_match[] = {
- { .compatible = "st,stih415-usb-phy", .data = &stih415_usb_phy_cfg },
- { .compatible = "st,stih416-usb-phy", .data = &stih416_usb_phy_cfg },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, stih41x_usb_phy_of_match);
-
-static struct platform_driver stih41x_usb_phy_driver = {
- .probe = stih41x_usb_phy_probe,
- .driver = {
- .name = "stih41x-usb-phy",
- .of_match_table = stih41x_usb_phy_of_match,
- }
-};
-module_platform_driver(stih41x_usb_phy_driver);
-
-MODULE_AUTHOR("Maxime Coquelin <maxime.coquelin@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics USB PHY driver for STiH41x series");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index fec34f5213c4..bf28a0fdd569 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -436,25 +436,31 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+ int new_mode;
if (phy->index != 0)
return -EINVAL;
switch (mode) {
case PHY_MODE_USB_HOST:
- data->dr_mode = USB_DR_MODE_HOST;
+ new_mode = USB_DR_MODE_HOST;
break;
case PHY_MODE_USB_DEVICE:
- data->dr_mode = USB_DR_MODE_PERIPHERAL;
+ new_mode = USB_DR_MODE_PERIPHERAL;
break;
case PHY_MODE_USB_OTG:
- data->dr_mode = USB_DR_MODE_OTG;
+ new_mode = USB_DR_MODE_OTG;
break;
default:
return -EINVAL;
}
- dev_info(&_phy->dev, "Changing dr_mode to %d\n", (int)data->dr_mode);
+ if (new_mode != data->dr_mode) {
+ dev_info(&_phy->dev, "Changing dr_mode to %d\n", new_mode);
+ data->dr_mode = new_mode;
+ }
+
+ data->id_det = -1; /* Force reprocessing of id */
data->force_session_end = true;
queue_delayed_work(system_wq, &data->detect, 0);
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index bf46844dc387..9c84d32c6f60 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -537,10 +537,7 @@ static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"pll_ctrl");
phy->pll_ctrl_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(phy->pll_ctrl_base))
- return PTR_ERR(phy->pll_ctrl_base);
-
- return 0;
+ return PTR_ERR_OR_ZERO(phy->pll_ctrl_base);
}
static int ti_pipe3_probe(struct platform_device *pdev)
@@ -592,10 +589,7 @@ static int ti_pipe3_probe(struct platform_device *pdev)
ti_pipe3_power_off(generic_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static int ti_pipe3_remove(struct platform_device *pdev)
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 547ca7b3f098..2990b3965460 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -317,6 +317,9 @@ static enum musb_vbus_id_status
linkstat = MUSB_VBUS_OFF;
}
+ kobject_uevent(&twl->dev->kobj, linkstat == MUSB_VBUS_VALID
+ ? KOBJ_ONLINE : KOBJ_OFFLINE);
+
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c
index 119957249a51..c45cbedc6634 100644
--- a/drivers/phy/tegra/xusb-tegra124.c
+++ b/drivers/phy/tegra/xusb-tegra124.c
@@ -1483,7 +1483,6 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
struct tegra_xusb_padctl *padctl = port->padctl;
struct tegra_xusb_lane *lane = usb3->base.lane;
unsigned int index = port->index, offset;
- int ret = 0;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
@@ -1612,7 +1611,7 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
- return ret;
+ return 0;
}
static void tegra124_usb3_port_disable(struct tegra_xusb_port *port)
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 873424ab0e32..3cbcb2537657 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -561,10 +561,7 @@ static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
usb2->internal = of_property_read_bool(np, "nvidia,internal");
usb2->supply = devm_regulator_get(&port->dev, "vbus");
- if (IS_ERR(usb2->supply))
- return PTR_ERR(usb2->supply);
-
- return 0;
+ return PTR_ERR_OR_ZERO(usb2->supply);
}
static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
@@ -731,10 +728,7 @@ static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
usb3->internal = of_property_read_bool(np, "nvidia,internal");
usb3->supply = devm_regulator_get(&port->dev, "vbus");
- if (IS_ERR(usb3->supply))
- return PTR_ERR(usb3->supply);
-
- return 0;
+ return PTR_ERR_OR_ZERO(usb3->supply);
}
static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl,
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 0e75d94972ba..54044a8ecbd7 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -93,6 +93,15 @@ config PINCTRL_AMD
Requires ACPI/FDT device enumeration code to set up a platform
device.
+config PINCTRL_DA850_PUPD
+ tristate "TI DA850/OMAP-L138/AM18XX pullup/pulldown groups"
+ depends on OF && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ Driver for TI DA850/OMAP-L138/AM18XX pinconf. Used to control
+ pullup/pulldown pin groups.
+
config PINCTRL_DIGICOLOR
bool
depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
@@ -164,6 +173,21 @@ config PINCTRL_SIRF
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
+config PINCTRL_SX150X
+ bool "Semtech SX150x I2C GPIO expander pinctrl driver"
+ depends on GPIOLIB && I2C=y
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select REGMAP
+ help
+ Say yes here to provide support for Semtech SX150x-series I2C
+ GPIO expanders as pinctrl module.
+ Compatible models include:
+ - 8 bits: sx1508q, sx1502q
+ - 16 bits: sx1509q, sx1506q
+
config PINCTRL_PISTACHIO
def_bool y if MACH_PISTACHIO
depends on GPIOLIB
@@ -209,7 +233,7 @@ config PINCTRL_COH901
config PINCTRL_MAX77620
tristate "MAX77620/MAX20024 Pincontrol support"
- depends on MFD_MAX77620
+ depends on MFD_MAX77620 && OF
select PINMUX
select GENERIC_PINCONF
help
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 11bad373dfe0..25d50a86981d 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
@@ -25,6 +26,7 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
+obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 63246770bd74..8968dd7aebed 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -20,6 +20,7 @@ config PINCTRL_BCM2835
bool
select PINMUX
select PINCONF
+ select GPIOLIB_IRQCHIP
config PINCTRL_IPROC_GPIO
bool "Broadcom iProc GPIO (with PINCONF) driver"
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index fa77165fab2c..85d009112864 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -24,11 +24,9 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
-#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of.h>
@@ -47,6 +45,7 @@
#define MODULE_NAME "pinctrl-bcm2835"
#define BCM2835_NUM_GPIOS 54
#define BCM2835_NUM_BANKS 2
+#define BCM2835_NUM_IRQS 3
#define BCM2835_PIN_BITMAP_SZ \
DIV_ROUND_UP(BCM2835_NUM_GPIOS, sizeof(unsigned long) * 8)
@@ -76,41 +75,27 @@ enum bcm2835_pinconf_param {
BCM2835_PINCONF_PARAM_PULL,
};
-enum bcm2835_pinconf_pull {
- BCM2835_PINCONFIG_PULL_NONE,
- BCM2835_PINCONFIG_PULL_DOWN,
- BCM2835_PINCONFIG_PULL_UP,
-};
-
#define BCM2835_PINCONF_PACK(_param_, _arg_) ((_param_) << 16 | (_arg_))
#define BCM2835_PINCONF_UNPACK_PARAM(_conf_) ((_conf_) >> 16)
#define BCM2835_PINCONF_UNPACK_ARG(_conf_) ((_conf_) & 0xffff)
-struct bcm2835_gpio_irqdata {
- struct bcm2835_pinctrl *pc;
- int bank;
-};
-
struct bcm2835_pinctrl {
struct device *dev;
void __iomem *base;
- int irq[BCM2835_NUM_BANKS];
+ int irq[BCM2835_NUM_IRQS];
/* note: locking assumes each bank will have its own unsigned long */
unsigned long enabled_irq_map[BCM2835_NUM_BANKS];
unsigned int irq_type[BCM2835_NUM_GPIOS];
struct pinctrl_dev *pctl_dev;
- struct irq_domain *irq_domain;
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
- struct bcm2835_gpio_irqdata irq_data[BCM2835_NUM_BANKS];
+ int irq_group[BCM2835_NUM_IRQS];
spinlock_t irq_lock[BCM2835_NUM_BANKS];
};
-static struct lock_class_key gpio_lock_class;
-
/* pins are just named GPIO0..GPIO53 */
#define BCM2835_GPIO_PIN(a) PINCTRL_PIN(a, "gpio" #a)
static struct pinctrl_pin_desc bcm2835_gpio_pins[] = {
@@ -368,13 +353,6 @@ static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
return pinctrl_gpio_direction_output(chip->base + offset);
}
-static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
-
- return irq_linear_revmap(pc->irq_domain, offset);
-}
-
static struct gpio_chip bcm2835_gpio_chip = {
.label = MODULE_NAME,
.owner = THIS_MODULE,
@@ -385,31 +363,67 @@ static struct gpio_chip bcm2835_gpio_chip = {
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
.set = bcm2835_gpio_set,
- .to_irq = bcm2835_gpio_to_irq,
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
.can_sleep = false,
};
-static irqreturn_t bcm2835_gpio_irq_handler(int irq, void *dev_id)
+static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
+ unsigned int bank, u32 mask)
{
- struct bcm2835_gpio_irqdata *irqdata = dev_id;
- struct bcm2835_pinctrl *pc = irqdata->pc;
- int bank = irqdata->bank;
unsigned long events;
unsigned offset;
unsigned gpio;
unsigned int type;
events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
+ events &= mask;
events &= pc->enabled_irq_map[bank];
for_each_set_bit(offset, &events, 32) {
gpio = (32 * bank) + offset;
+ /* FIXME: no clue why the code looks up the type here */
type = pc->irq_type[gpio];
- generic_handle_irq(irq_linear_revmap(pc->irq_domain, gpio));
+ generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
+ gpio));
+ }
+}
+
+static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ int irq = irq_desc_get_irq(desc);
+ int group;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pc->irq); i++) {
+ if (pc->irq[i] == irq) {
+ group = pc->irq_group[i];
+ break;
+ }
+ }
+ /* This should not happen, every IRQ has a bank */
+ if (i == ARRAY_SIZE(pc->irq))
+ BUG();
+
+ chained_irq_enter(host_chip, desc);
+
+ switch (group) {
+ case 0: /* IRQ0 covers GPIOs 0-27 */
+ bcm2835_gpio_irq_handle_bank(pc, 0, 0x0fffffff);
+ break;
+ case 1: /* IRQ1 covers GPIOs 28-45 */
+ bcm2835_gpio_irq_handle_bank(pc, 0, 0xf0000000);
+ bcm2835_gpio_irq_handle_bank(pc, 1, 0x00003fff);
+ break;
+ case 2: /* IRQ2 covers GPIOs 46-53 */
+ bcm2835_gpio_irq_handle_bank(pc, 1, 0x003fc000);
+ break;
}
- return events ? IRQ_HANDLED : IRQ_NONE;
+
+ chained_irq_exit(host_chip, desc);
}
static inline void __bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
@@ -455,7 +469,8 @@ static void bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
static void bcm2835_gpio_irq_enable(struct irq_data *data)
{
- struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
unsigned gpio = irqd_to_hwirq(data);
unsigned offset = GPIO_REG_SHIFT(gpio);
unsigned bank = GPIO_REG_OFFSET(gpio);
@@ -469,7 +484,8 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
static void bcm2835_gpio_irq_disable(struct irq_data *data)
{
- struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
unsigned gpio = irqd_to_hwirq(data);
unsigned offset = GPIO_REG_SHIFT(gpio);
unsigned bank = GPIO_REG_OFFSET(gpio);
@@ -575,7 +591,8 @@ static int __bcm2835_gpio_irq_set_type_enabled(struct bcm2835_pinctrl *pc,
static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
unsigned gpio = irqd_to_hwirq(data);
unsigned offset = GPIO_REG_SHIFT(gpio);
unsigned bank = GPIO_REG_OFFSET(gpio);
@@ -601,7 +618,8 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
static void bcm2835_gpio_irq_ack(struct irq_data *data)
{
- struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
unsigned gpio = irqd_to_hwirq(data);
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
@@ -644,10 +662,11 @@ static void bcm2835_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
unsigned offset)
{
struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &pc->gpio_chip;
enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
const char *fname = bcm2835_functions[fsel];
int value = bcm2835_gpio_get_bit(pc, GPLEV0, offset);
- int irq = irq_find_mapping(pc->irq_domain, offset);
+ int irq = irq_find_mapping(chip->irqdomain, offset);
seq_printf(s, "function %s in %s; irq %d (%s)",
fname, value ? "hi" : "lo",
@@ -821,6 +840,16 @@ static const struct pinctrl_ops bcm2835_pctl_ops = {
.dt_free_map = bcm2835_pctl_dt_free_map,
};
+static int bcm2835_pmx_free(struct pinctrl_dev *pctldev,
+ unsigned offset)
+{
+ struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable by setting to GPIO_IN */
+ bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_IN);
+ return 0;
+}
+
static int bcm2835_pmx_get_functions_count(struct pinctrl_dev *pctldev)
{
return BCM2835_FSEL_COUNT;
@@ -880,6 +909,7 @@ static int bcm2835_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
}
static const struct pinmux_ops bcm2835_pmx_ops = {
+ .free = bcm2835_pmx_free,
.get_functions_count = bcm2835_pmx_get_functions_count,
.get_function_name = bcm2835_pmx_get_function_name,
.get_function_groups = bcm2835_pmx_get_function_groups,
@@ -917,12 +947,14 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
bcm2835_gpio_wr(pc, GPPUD, arg & 3);
/*
- * Docs say to wait 150 cycles, but not of what. We assume a
- * 1 MHz clock here, which is pretty slow...
+ * BCM2835 datasheet say to wait 150 cycles, but not of what.
+ * But the VideoCore firmware delay for this operation
+ * based nearly on the same amount of VPU cycles and this clock
+ * runs at 250 MHz.
*/
- udelay(150);
+ udelay(1);
bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
- udelay(150);
+ udelay(1);
bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
} /* for each config */
@@ -980,26 +1012,9 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pc->gpio_chip.parent = dev;
pc->gpio_chip.of_node = np;
- pc->irq_domain = irq_domain_add_linear(np, BCM2835_NUM_GPIOS,
- &irq_domain_simple_ops, NULL);
- if (!pc->irq_domain) {
- dev_err(dev, "could not create IRQ domain\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < BCM2835_NUM_GPIOS; i++) {
- int irq = irq_create_mapping(pc->irq_domain, i);
- irq_set_lockdep_class(irq, &gpio_lock_class);
- irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, pc);
- }
-
for (i = 0; i < BCM2835_NUM_BANKS; i++) {
unsigned long events;
unsigned offset;
- int len;
- char *name;
/* clear event detection flags */
bcm2835_gpio_wr(pc, GPREN0 + i * 4, 0);
@@ -1014,24 +1029,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
for_each_set_bit(offset, &events, 32)
bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset));
- pc->irq[i] = irq_of_parse_and_map(np, i);
- pc->irq_data[i].pc = pc;
- pc->irq_data[i].bank = i;
spin_lock_init(&pc->irq_lock[i]);
-
- len = strlen(dev_name(pc->dev)) + 16;
- name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
- snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
-
- err = devm_request_irq(dev, pc->irq[i],
- bcm2835_gpio_irq_handler, IRQF_SHARED,
- name, &pc->irq_data[i]);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", pc->irq[i]);
- return err;
- }
}
err = gpiochip_add_data(&pc->gpio_chip, pc);
@@ -1040,6 +1038,29 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
return err;
}
+ err = gpiochip_irqchip_add(&pc->gpio_chip, &bcm2835_gpio_irq_chip,
+ 0, handle_level_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_info(dev, "could not add irqchip\n");
+ return err;
+ }
+
+ for (i = 0; i < BCM2835_NUM_IRQS; i++) {
+ pc->irq[i] = irq_of_parse_and_map(np, i);
+ pc->irq_group[i] = i;
+ /*
+ * Use the same handler for all groups: this is necessary
+ * since we use one gpiochip to cover all lines - the
+ * irq handler then needs to figure out which group and
+ * bank that was firing the IRQ and look up the per-group
+ * and bank data.
+ */
+ gpiochip_set_chained_irqchip(&pc->gpio_chip,
+ &bcm2835_gpio_irq_chip,
+ pc->irq[i],
+ bcm2835_gpio_irq_handler);
+ }
+
pc->pctl_dev = devm_pinctrl_register(dev, &bcm2835_pinctrl_desc, pc);
if (IS_ERR(pc->pctl_dev)) {
gpiochip_remove(&pc->gpio_chip);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 54dad89fc9bf..260908480075 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -253,3 +253,147 @@ err:
pinctrl_dt_free_maps(p);
return ret;
}
+
+/*
+ * For pinctrl binding, typically #pinctrl-cells is for the pin controller
+ * device, so either parent or grandparent. See pinctrl-bindings.txt.
+ */
+static int pinctrl_find_cells_size(const struct device_node *np)
+{
+ const char *cells_name = "#pinctrl-cells";
+ int cells_size, error;
+
+ error = of_property_read_u32(np->parent, cells_name, &cells_size);
+ if (error) {
+ error = of_property_read_u32(np->parent->parent,
+ cells_name, &cells_size);
+ if (error)
+ return -ENOENT;
+ }
+
+ return cells_size;
+}
+
+/**
+ * pinctrl_get_list_and_count - Gets the list and it's cell size and number
+ * @np: pointer to device node with the property
+ * @list_name: property that contains the list
+ * @list: pointer for the list found
+ * @cells_size: pointer for the cell size found
+ * @nr_elements: pointer for the number of elements found
+ *
+ * Typically np is a single pinctrl entry containing the list.
+ */
+static int pinctrl_get_list_and_count(const struct device_node *np,
+ const char *list_name,
+ const __be32 **list,
+ int *cells_size,
+ int *nr_elements)
+{
+ int size;
+
+ *cells_size = 0;
+ *nr_elements = 0;
+
+ *list = of_get_property(np, list_name, &size);
+ if (!*list)
+ return -ENOENT;
+
+ *cells_size = pinctrl_find_cells_size(np);
+ if (*cells_size < 0)
+ return -ENOENT;
+
+ /* First element is always the index within the pinctrl device */
+ *nr_elements = (size / sizeof(**list)) / (*cells_size + 1);
+
+ return 0;
+}
+
+/**
+ * pinctrl_count_index_with_args - Count number of elements in a pinctrl entry
+ * @np: pointer to device node with the property
+ * @list_name: property that contains the list
+ *
+ * Counts the number of elements in a pinctrl array consisting of an index
+ * within the controller and a number of u32 entries specified for each
+ * entry. Note that device_node is always for the parent pin controller device.
+ */
+int pinctrl_count_index_with_args(const struct device_node *np,
+ const char *list_name)
+{
+ const __be32 *list;
+ int size, nr_cells, error;
+
+ error = pinctrl_get_list_and_count(np, list_name, &list,
+ &nr_cells, &size);
+ if (error)
+ return error;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(pinctrl_count_index_with_args);
+
+/**
+ * pinctrl_copy_args - Populates of_phandle_args based on index
+ * @np: pointer to device node with the property
+ * @list: pointer to a list with the elements
+ * @index: entry within the list of elements
+ * @nr_cells: number of cells in the list
+ * @nr_elem: number of elements for each entry in the list
+ * @out_args: returned values
+ *
+ * Populates the of_phandle_args based on the index in the list.
+ */
+static int pinctrl_copy_args(const struct device_node *np,
+ const __be32 *list,
+ int index, int nr_cells, int nr_elem,
+ struct of_phandle_args *out_args)
+{
+ int i;
+
+ memset(out_args, 0, sizeof(*out_args));
+ out_args->np = (struct device_node *)np;
+ out_args->args_count = nr_cells + 1;
+
+ if (index >= nr_elem)
+ return -EINVAL;
+
+ list += index * (nr_cells + 1);
+
+ for (i = 0; i < nr_cells + 1; i++)
+ out_args->args[i] = be32_to_cpup(list++);
+
+ return 0;
+}
+
+/**
+ * pinctrl_parse_index_with_args - Find a node pointed by index in a list
+ * @np: pointer to device node with the property
+ * @list_name: property that contains the list
+ * @index: index within the list
+ * @out_arts: entries in the list pointed by index
+ *
+ * Finds the selected element in a pinctrl array consisting of an index
+ * within the controller and a number of u32 entries specified for each
+ * entry. Note that device_node is always for the parent pin controller device.
+ */
+int pinctrl_parse_index_with_args(const struct device_node *np,
+ const char *list_name, int index,
+ struct of_phandle_args *out_args)
+{
+ const __be32 *list;
+ int nr_elem, nr_cells, error;
+
+ error = pinctrl_get_list_and_count(np, list_name, &list,
+ &nr_cells, &nr_elem);
+ if (error || !nr_cells)
+ return error;
+
+ error = pinctrl_copy_args(np, list, index, nr_cells, nr_elem,
+ out_args);
+ if (error)
+ return error;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_parse_index_with_args);
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
index 760bc4960f58..c2d1a5505850 100644
--- a/drivers/pinctrl/devicetree.h
+++ b/drivers/pinctrl/devicetree.h
@@ -16,11 +16,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+struct of_phandle_args;
+
#ifdef CONFIG_OF
void pinctrl_dt_free_maps(struct pinctrl *p);
int pinctrl_dt_to_map(struct pinctrl *p);
+int pinctrl_count_index_with_args(const struct device_node *np,
+ const char *list_name);
+
+int pinctrl_parse_index_with_args(const struct device_node *np,
+ const char *list_name, int index,
+ struct of_phandle_args *out_args);
+
#else
static inline int pinctrl_dt_to_map(struct pinctrl *p)
@@ -32,4 +41,18 @@ static inline void pinctrl_dt_free_maps(struct pinctrl *p)
{
}
+static inline int pinctrl_count_index_with_args(const struct device_node *np,
+ const char *list_name)
+{
+ return -ENODEV;
+}
+
+static inline int
+pinctrl_parse_index_with_args(const struct device_node *np,
+ const char *list_name, int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENODEV;
+}
+
#endif
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 79c4e14a5a75..5ef7e875b50e 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -778,10 +778,10 @@ int imx_pinctrl_probe(struct platform_device *pdev,
imx_pinctrl_desc->name = dev_name(&pdev->dev);
imx_pinctrl_desc->pins = info->pins;
imx_pinctrl_desc->npins = info->npins;
- imx_pinctrl_desc->pctlops = &imx_pctrl_ops,
- imx_pinctrl_desc->pmxops = &imx_pmx_ops,
- imx_pinctrl_desc->confops = &imx_pinconf_ops,
- imx_pinctrl_desc->owner = THIS_MODULE,
+ imx_pinctrl_desc->pctlops = &imx_pctrl_ops;
+ imx_pinctrl_desc->pmxops = &imx_pmx_ops;
+ imx_pinctrl_desc->confops = &imx_pinconf_ops;
+ imx_pinctrl_desc->owner = THIS_MODULE;
ret = imx_pinctrl_probe_dt(pdev, info);
if (ret) {
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 71bbeb9321ba..37300634b7d2 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1703,7 +1703,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
if (irq_rc && irq_rc->start) {
byt_gpio_irq_init_hw(vg);
ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(&vg->pdev->dev, "failed to add irqchip\n");
goto fail;
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index c43b1e9a06af..5e66860a5e67 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -762,7 +762,7 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, "mode %d ", mode);
}
- seq_printf(s, "ctrl0 0x%08x ctrl1 0x%08x", ctrl0, ctrl1);
+ seq_printf(s, "0x%08x 0x%08x", ctrl0, ctrl1);
if (locked)
seq_puts(s, " [LOCKED]");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 01443762e570..1e139672f1af 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -911,7 +911,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
}
ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add irqchip\n");
goto fail;
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 7826c7f0cb7c..b21896126f76 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -814,10 +814,51 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return 0;
}
+static int mrfld_config_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int npins;
+ int ret;
+
+ ret = mrfld_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ ret = mrfld_config_get(pctldev, pins[0], config);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mrfld_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int npins;
+ int i, ret;
+
+ ret = mrfld_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = mrfld_config_set(pctldev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct pinconf_ops mrfld_pinconf_ops = {
.is_generic = true,
.pin_config_get = mrfld_config_get,
.pin_config_set = mrfld_config_set,
+ .pin_config_group_get = mrfld_config_group_get,
+ .pin_config_group_set = mrfld_config_group_set,
};
static const struct pinctrl_desc mrfld_pinctrl_desc = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6397.c b/drivers/pinctrl/mediatek/pinctrl-mt6397.c
index 6eccb85c02cd..afcede7e2222 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6397.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6397.c
@@ -64,8 +64,4 @@ static struct platform_driver mtk_pinctrl_driver = {
},
};
-static int __init mtk_pinctrl_init(void)
-{
- return platform_driver_register(&mtk_pinctrl_driver);
-}
-device_initcall(mtk_pinctrl_init);
+builtin_platform_driver(mtk_pinctrl_driver);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h
index 13e5b68bfe1b..9b018fdbeb51 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h
@@ -201,7 +201,7 @@ static const struct mtk_desc_pin mtk_pins_mt8173[] = {
MTK_PIN(
PINCTRL_PIN(16, "IDDIG"),
NULL, "mt8173",
- MTK_EINT_FUNCTION(0, 16),
+ MTK_EINT_FUNCTION(1, 16),
MTK_FUNCTION(0, "GPIO16"),
MTK_FUNCTION(1, "IDDIG"),
MTK_FUNCTION(2, "CMFLASH"),
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index 24434f139947..27c5b5126008 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -1,2 +1,3 @@
-obj-y += pinctrl-meson8.o pinctrl-meson8b.o pinctrl-meson-gxbb.o
+obj-y += pinctrl-meson8.o pinctrl-meson8b.o
+obj-y += pinctrl-meson-gxbb.o pinctrl-meson-gxl.o
obj-y += pinctrl-meson.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
new file mode 100644
index 000000000000..25694f7094c7
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -0,0 +1,589 @@
+/*
+ * Pin controller and GPIO driver for Amlogic Meson GXL.
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/gpio/meson-gxl-gpio.h>
+#include "pinctrl-meson.h"
+
+#define EE_OFF 10
+
+static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = {
+ MESON_PIN(GPIOZ_0, EE_OFF),
+ MESON_PIN(GPIOZ_1, EE_OFF),
+ MESON_PIN(GPIOZ_2, EE_OFF),
+ MESON_PIN(GPIOZ_3, EE_OFF),
+ MESON_PIN(GPIOZ_4, EE_OFF),
+ MESON_PIN(GPIOZ_5, EE_OFF),
+ MESON_PIN(GPIOZ_6, EE_OFF),
+ MESON_PIN(GPIOZ_7, EE_OFF),
+ MESON_PIN(GPIOZ_8, EE_OFF),
+ MESON_PIN(GPIOZ_9, EE_OFF),
+ MESON_PIN(GPIOZ_10, EE_OFF),
+ MESON_PIN(GPIOZ_11, EE_OFF),
+ MESON_PIN(GPIOZ_12, EE_OFF),
+ MESON_PIN(GPIOZ_13, EE_OFF),
+ MESON_PIN(GPIOZ_14, EE_OFF),
+ MESON_PIN(GPIOZ_15, EE_OFF),
+
+ MESON_PIN(GPIOH_0, EE_OFF),
+ MESON_PIN(GPIOH_1, EE_OFF),
+ MESON_PIN(GPIOH_2, EE_OFF),
+ MESON_PIN(GPIOH_3, EE_OFF),
+ MESON_PIN(GPIOH_4, EE_OFF),
+ MESON_PIN(GPIOH_5, EE_OFF),
+ MESON_PIN(GPIOH_6, EE_OFF),
+ MESON_PIN(GPIOH_7, EE_OFF),
+ MESON_PIN(GPIOH_8, EE_OFF),
+ MESON_PIN(GPIOH_9, EE_OFF),
+
+ MESON_PIN(BOOT_0, EE_OFF),
+ MESON_PIN(BOOT_1, EE_OFF),
+ MESON_PIN(BOOT_2, EE_OFF),
+ MESON_PIN(BOOT_3, EE_OFF),
+ MESON_PIN(BOOT_4, EE_OFF),
+ MESON_PIN(BOOT_5, EE_OFF),
+ MESON_PIN(BOOT_6, EE_OFF),
+ MESON_PIN(BOOT_7, EE_OFF),
+ MESON_PIN(BOOT_8, EE_OFF),
+ MESON_PIN(BOOT_9, EE_OFF),
+ MESON_PIN(BOOT_10, EE_OFF),
+ MESON_PIN(BOOT_11, EE_OFF),
+ MESON_PIN(BOOT_12, EE_OFF),
+ MESON_PIN(BOOT_13, EE_OFF),
+ MESON_PIN(BOOT_14, EE_OFF),
+ MESON_PIN(BOOT_15, EE_OFF),
+
+ MESON_PIN(CARD_0, EE_OFF),
+ MESON_PIN(CARD_1, EE_OFF),
+ MESON_PIN(CARD_2, EE_OFF),
+ MESON_PIN(CARD_3, EE_OFF),
+ MESON_PIN(CARD_4, EE_OFF),
+ MESON_PIN(CARD_5, EE_OFF),
+ MESON_PIN(CARD_6, EE_OFF),
+
+ MESON_PIN(GPIODV_0, EE_OFF),
+ MESON_PIN(GPIODV_1, EE_OFF),
+ MESON_PIN(GPIODV_2, EE_OFF),
+ MESON_PIN(GPIODV_3, EE_OFF),
+ MESON_PIN(GPIODV_4, EE_OFF),
+ MESON_PIN(GPIODV_5, EE_OFF),
+ MESON_PIN(GPIODV_6, EE_OFF),
+ MESON_PIN(GPIODV_7, EE_OFF),
+ MESON_PIN(GPIODV_8, EE_OFF),
+ MESON_PIN(GPIODV_9, EE_OFF),
+ MESON_PIN(GPIODV_10, EE_OFF),
+ MESON_PIN(GPIODV_11, EE_OFF),
+ MESON_PIN(GPIODV_12, EE_OFF),
+ MESON_PIN(GPIODV_13, EE_OFF),
+ MESON_PIN(GPIODV_14, EE_OFF),
+ MESON_PIN(GPIODV_15, EE_OFF),
+ MESON_PIN(GPIODV_16, EE_OFF),
+ MESON_PIN(GPIODV_17, EE_OFF),
+ MESON_PIN(GPIODV_19, EE_OFF),
+ MESON_PIN(GPIODV_20, EE_OFF),
+ MESON_PIN(GPIODV_21, EE_OFF),
+ MESON_PIN(GPIODV_22, EE_OFF),
+ MESON_PIN(GPIODV_23, EE_OFF),
+ MESON_PIN(GPIODV_24, EE_OFF),
+ MESON_PIN(GPIODV_25, EE_OFF),
+ MESON_PIN(GPIODV_26, EE_OFF),
+ MESON_PIN(GPIODV_27, EE_OFF),
+ MESON_PIN(GPIODV_28, EE_OFF),
+ MESON_PIN(GPIODV_29, EE_OFF),
+
+ MESON_PIN(GPIOX_0, EE_OFF),
+ MESON_PIN(GPIOX_1, EE_OFF),
+ MESON_PIN(GPIOX_2, EE_OFF),
+ MESON_PIN(GPIOX_3, EE_OFF),
+ MESON_PIN(GPIOX_4, EE_OFF),
+ MESON_PIN(GPIOX_5, EE_OFF),
+ MESON_PIN(GPIOX_6, EE_OFF),
+ MESON_PIN(GPIOX_7, EE_OFF),
+ MESON_PIN(GPIOX_8, EE_OFF),
+ MESON_PIN(GPIOX_9, EE_OFF),
+ MESON_PIN(GPIOX_10, EE_OFF),
+ MESON_PIN(GPIOX_11, EE_OFF),
+ MESON_PIN(GPIOX_12, EE_OFF),
+ MESON_PIN(GPIOX_13, EE_OFF),
+ MESON_PIN(GPIOX_14, EE_OFF),
+ MESON_PIN(GPIOX_15, EE_OFF),
+ MESON_PIN(GPIOX_16, EE_OFF),
+ MESON_PIN(GPIOX_17, EE_OFF),
+ MESON_PIN(GPIOX_18, EE_OFF),
+
+ MESON_PIN(GPIOCLK_0, EE_OFF),
+ MESON_PIN(GPIOCLK_1, EE_OFF),
+
+ MESON_PIN(GPIO_TEST_N, EE_OFF),
+};
+
+static const unsigned int emmc_nand_d07_pins[] = {
+ PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
+ PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
+ PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
+};
+static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
+static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
+static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
+
+static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
+static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
+static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
+static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
+static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
+static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
+
+static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
+static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
+static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
+static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
+static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
+static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
+static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
+
+static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
+static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
+static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
+static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
+static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
+static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
+static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
+static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
+
+static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
+static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
+static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
+static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
+
+static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
+static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
+
+static const unsigned int uart_tx_c_pins[] = { PIN(GPIOX_8, EE_OFF) };
+static const unsigned int uart_rx_c_pins[] = { PIN(GPIOX_9, EE_OFF) };
+
+static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
+static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
+
+static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
+static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
+
+static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
+static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
+
+static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
+static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
+static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
+static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
+static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
+static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
+static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
+static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
+static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
+static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
+static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
+static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
+static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
+
+static const unsigned int pwm_e_pins[] = { PIN(GPIOX_16, EE_OFF) };
+
+static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
+ MESON_PIN(GPIOAO_0, 0),
+ MESON_PIN(GPIOAO_1, 0),
+ MESON_PIN(GPIOAO_2, 0),
+ MESON_PIN(GPIOAO_3, 0),
+ MESON_PIN(GPIOAO_4, 0),
+ MESON_PIN(GPIOAO_5, 0),
+ MESON_PIN(GPIOAO_6, 0),
+ MESON_PIN(GPIOAO_7, 0),
+ MESON_PIN(GPIOAO_8, 0),
+ MESON_PIN(GPIOAO_9, 0),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
+static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
+static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
+static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
+ PIN(GPIOAO_5, 0) };
+static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
+static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
+
+static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
+
+static struct meson_pmx_group meson_gxl_periphs_groups[] = {
+ GPIO_GROUP(GPIOZ_0, EE_OFF),
+ GPIO_GROUP(GPIOZ_1, EE_OFF),
+ GPIO_GROUP(GPIOZ_2, EE_OFF),
+ GPIO_GROUP(GPIOZ_3, EE_OFF),
+ GPIO_GROUP(GPIOZ_4, EE_OFF),
+ GPIO_GROUP(GPIOZ_5, EE_OFF),
+ GPIO_GROUP(GPIOZ_6, EE_OFF),
+ GPIO_GROUP(GPIOZ_7, EE_OFF),
+ GPIO_GROUP(GPIOZ_8, EE_OFF),
+ GPIO_GROUP(GPIOZ_9, EE_OFF),
+ GPIO_GROUP(GPIOZ_10, EE_OFF),
+ GPIO_GROUP(GPIOZ_11, EE_OFF),
+ GPIO_GROUP(GPIOZ_12, EE_OFF),
+ GPIO_GROUP(GPIOZ_13, EE_OFF),
+ GPIO_GROUP(GPIOZ_14, EE_OFF),
+ GPIO_GROUP(GPIOZ_15, EE_OFF),
+
+ GPIO_GROUP(GPIOH_0, EE_OFF),
+ GPIO_GROUP(GPIOH_1, EE_OFF),
+ GPIO_GROUP(GPIOH_2, EE_OFF),
+ GPIO_GROUP(GPIOH_3, EE_OFF),
+ GPIO_GROUP(GPIOH_4, EE_OFF),
+ GPIO_GROUP(GPIOH_5, EE_OFF),
+ GPIO_GROUP(GPIOH_6, EE_OFF),
+ GPIO_GROUP(GPIOH_7, EE_OFF),
+ GPIO_GROUP(GPIOH_8, EE_OFF),
+ GPIO_GROUP(GPIOH_9, EE_OFF),
+
+ GPIO_GROUP(BOOT_0, EE_OFF),
+ GPIO_GROUP(BOOT_1, EE_OFF),
+ GPIO_GROUP(BOOT_2, EE_OFF),
+ GPIO_GROUP(BOOT_3, EE_OFF),
+ GPIO_GROUP(BOOT_4, EE_OFF),
+ GPIO_GROUP(BOOT_5, EE_OFF),
+ GPIO_GROUP(BOOT_6, EE_OFF),
+ GPIO_GROUP(BOOT_7, EE_OFF),
+ GPIO_GROUP(BOOT_8, EE_OFF),
+ GPIO_GROUP(BOOT_9, EE_OFF),
+ GPIO_GROUP(BOOT_10, EE_OFF),
+ GPIO_GROUP(BOOT_11, EE_OFF),
+ GPIO_GROUP(BOOT_12, EE_OFF),
+ GPIO_GROUP(BOOT_13, EE_OFF),
+ GPIO_GROUP(BOOT_14, EE_OFF),
+ GPIO_GROUP(BOOT_15, EE_OFF),
+
+ GPIO_GROUP(CARD_0, EE_OFF),
+ GPIO_GROUP(CARD_1, EE_OFF),
+ GPIO_GROUP(CARD_2, EE_OFF),
+ GPIO_GROUP(CARD_3, EE_OFF),
+ GPIO_GROUP(CARD_4, EE_OFF),
+ GPIO_GROUP(CARD_5, EE_OFF),
+ GPIO_GROUP(CARD_6, EE_OFF),
+
+ GPIO_GROUP(GPIODV_0, EE_OFF),
+ GPIO_GROUP(GPIODV_1, EE_OFF),
+ GPIO_GROUP(GPIODV_2, EE_OFF),
+ GPIO_GROUP(GPIODV_3, EE_OFF),
+ GPIO_GROUP(GPIODV_4, EE_OFF),
+ GPIO_GROUP(GPIODV_5, EE_OFF),
+ GPIO_GROUP(GPIODV_6, EE_OFF),
+ GPIO_GROUP(GPIODV_7, EE_OFF),
+ GPIO_GROUP(GPIODV_8, EE_OFF),
+ GPIO_GROUP(GPIODV_9, EE_OFF),
+ GPIO_GROUP(GPIODV_10, EE_OFF),
+ GPIO_GROUP(GPIODV_11, EE_OFF),
+ GPIO_GROUP(GPIODV_12, EE_OFF),
+ GPIO_GROUP(GPIODV_13, EE_OFF),
+ GPIO_GROUP(GPIODV_14, EE_OFF),
+ GPIO_GROUP(GPIODV_15, EE_OFF),
+ GPIO_GROUP(GPIODV_16, EE_OFF),
+ GPIO_GROUP(GPIODV_17, EE_OFF),
+ GPIO_GROUP(GPIODV_19, EE_OFF),
+ GPIO_GROUP(GPIODV_20, EE_OFF),
+ GPIO_GROUP(GPIODV_21, EE_OFF),
+ GPIO_GROUP(GPIODV_22, EE_OFF),
+ GPIO_GROUP(GPIODV_23, EE_OFF),
+ GPIO_GROUP(GPIODV_24, EE_OFF),
+ GPIO_GROUP(GPIODV_25, EE_OFF),
+ GPIO_GROUP(GPIODV_26, EE_OFF),
+ GPIO_GROUP(GPIODV_27, EE_OFF),
+ GPIO_GROUP(GPIODV_28, EE_OFF),
+ GPIO_GROUP(GPIODV_29, EE_OFF),
+
+ GPIO_GROUP(GPIOX_0, EE_OFF),
+ GPIO_GROUP(GPIOX_1, EE_OFF),
+ GPIO_GROUP(GPIOX_2, EE_OFF),
+ GPIO_GROUP(GPIOX_3, EE_OFF),
+ GPIO_GROUP(GPIOX_4, EE_OFF),
+ GPIO_GROUP(GPIOX_5, EE_OFF),
+ GPIO_GROUP(GPIOX_6, EE_OFF),
+ GPIO_GROUP(GPIOX_7, EE_OFF),
+ GPIO_GROUP(GPIOX_8, EE_OFF),
+ GPIO_GROUP(GPIOX_9, EE_OFF),
+ GPIO_GROUP(GPIOX_10, EE_OFF),
+ GPIO_GROUP(GPIOX_11, EE_OFF),
+ GPIO_GROUP(GPIOX_12, EE_OFF),
+ GPIO_GROUP(GPIOX_13, EE_OFF),
+ GPIO_GROUP(GPIOX_14, EE_OFF),
+ GPIO_GROUP(GPIOX_15, EE_OFF),
+ GPIO_GROUP(GPIOX_16, EE_OFF),
+ GPIO_GROUP(GPIOX_17, EE_OFF),
+ GPIO_GROUP(GPIOX_18, EE_OFF),
+
+ GPIO_GROUP(GPIOCLK_0, EE_OFF),
+ GPIO_GROUP(GPIOCLK_1, EE_OFF),
+
+ GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+
+ /* Bank X */
+ GROUP(sdio_d0, 5, 31),
+ GROUP(sdio_d1, 5, 30),
+ GROUP(sdio_d2, 5, 29),
+ GROUP(sdio_d3, 5, 28),
+ GROUP(sdio_cmd, 5, 27),
+ GROUP(sdio_clk, 5, 26),
+ GROUP(sdio_irq, 5, 24),
+ GROUP(uart_tx_a, 5, 19),
+ GROUP(uart_rx_a, 5, 18),
+ GROUP(uart_cts_a, 5, 17),
+ GROUP(uart_rts_a, 5, 16),
+ GROUP(uart_tx_c, 5, 13),
+ GROUP(uart_rx_c, 5, 12),
+ GROUP(pwm_e, 5, 15),
+
+ /* Bank Z */
+ GROUP(eth_mdio, 4, 22),
+ GROUP(eth_mdc, 4, 23),
+ GROUP(eth_clk_rx_clk, 4, 21),
+ GROUP(eth_rx_dv, 4, 20),
+ GROUP(eth_rxd0, 4, 19),
+ GROUP(eth_rxd1, 4, 18),
+ GROUP(eth_rxd2, 4, 17),
+ GROUP(eth_rxd3, 4, 16),
+ GROUP(eth_rgmii_tx_clk, 4, 15),
+ GROUP(eth_tx_en, 4, 14),
+ GROUP(eth_txd0, 4, 13),
+ GROUP(eth_txd1, 4, 12),
+ GROUP(eth_txd2, 4, 11),
+ GROUP(eth_txd3, 4, 10),
+
+ /* Bank DV */
+ GROUP(uart_tx_b, 2, 16),
+ GROUP(uart_rx_b, 2, 15),
+ GROUP(i2c_sck_a, 1, 15),
+ GROUP(i2c_sda_a, 1, 14),
+ GROUP(i2c_sck_b, 1, 13),
+ GROUP(i2c_sda_b, 1, 12),
+ GROUP(i2c_sck_c, 1, 11),
+ GROUP(i2c_sda_c, 1, 10),
+
+ /* Bank BOOT */
+ GROUP(emmc_nand_d07, 7, 31),
+ GROUP(emmc_clk, 7, 30),
+ GROUP(emmc_cmd, 7, 29),
+ GROUP(emmc_ds, 7, 28),
+ GROUP(nand_ce0, 7, 7),
+ GROUP(nand_ce1, 7, 6),
+ GROUP(nand_rb0, 7, 5),
+ GROUP(nand_ale, 7, 4),
+ GROUP(nand_cle, 7, 3),
+ GROUP(nand_wen_clk, 7, 2),
+ GROUP(nand_ren_wr, 7, 1),
+ GROUP(nand_dqs, 7, 0),
+
+ /* Bank CARD */
+ GROUP(sdcard_d1, 6, 5),
+ GROUP(sdcard_d0, 6, 4),
+ GROUP(sdcard_d3, 6, 1),
+ GROUP(sdcard_d2, 6, 0),
+ GROUP(sdcard_cmd, 6, 2),
+ GROUP(sdcard_clk, 6, 3),
+};
+
+static struct meson_pmx_group meson_gxl_aobus_groups[] = {
+ GPIO_GROUP(GPIOAO_0, 0),
+ GPIO_GROUP(GPIOAO_1, 0),
+ GPIO_GROUP(GPIOAO_2, 0),
+ GPIO_GROUP(GPIOAO_3, 0),
+ GPIO_GROUP(GPIOAO_4, 0),
+ GPIO_GROUP(GPIOAO_5, 0),
+ GPIO_GROUP(GPIOAO_6, 0),
+ GPIO_GROUP(GPIOAO_7, 0),
+ GPIO_GROUP(GPIOAO_8, 0),
+ GPIO_GROUP(GPIOAO_9, 0),
+
+ /* bank AO */
+ GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_rx_ao_b, 0, 25),
+ GROUP(uart_tx_ao_a, 0, 12),
+ GROUP(uart_rx_ao_a, 0, 11),
+ GROUP(uart_cts_ao_a, 0, 10),
+ GROUP(uart_rts_ao_a, 0, 9),
+ GROUP(uart_cts_ao_b, 0, 8),
+ GROUP(uart_rts_ao_b, 0, 7),
+ GROUP(remote_input_ao, 0, 0),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOZ_0", "GPIOZ_1", "GPIOZ_2", "GPIOZ_3", "GPIOZ_4",
+ "GPIOZ_5", "GPIOZ_6", "GPIOZ_7", "GPIOZ_8", "GPIOZ_9",
+ "GPIOZ_10", "GPIOZ_11", "GPIOZ_12", "GPIOZ_13", "GPIOZ_14",
+ "GPIOZ_15",
+
+ "GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3", "GPIOH_4",
+ "GPIOH_5", "GPIOH_6", "GPIOH_7", "GPIOH_8", "GPIOH_9",
+
+ "BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4",
+ "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
+ "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+ "BOOT_15",
+
+ "CARD_0", "CARD_1", "CARD_2", "CARD_3", "CARD_4",
+ "CARD_5", "CARD_6",
+
+ "GPIODV_0", "GPIODV_1", "GPIODV_2", "GPIODV_3", "GPIODV_4",
+ "GPIODV_5", "GPIODV_6", "GPIODV_7", "GPIODV_8", "GPIODV_9",
+ "GPIODV_10", "GPIODV_11", "GPIODV_12", "GPIODV_13", "GPIODV_14",
+ "GPIODV_15", "GPIODV_16", "GPIODV_17", "GPIODV_18", "GPIODV_19",
+ "GPIODV_20", "GPIODV_21", "GPIODV_22", "GPIODV_23", "GPIODV_24",
+ "GPIODV_25", "GPIODV_26", "GPIODV_27", "GPIODV_28", "GPIODV_29",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18",
+
+ "GPIO_TEST_N",
+};
+
+static const char * const emmc_groups[] = {
+ "emmc_nand_d07", "emmc_clk", "emmc_cmd", "emmc_ds",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3",
+ "sdcard_cmd", "sdcard_clk",
+};
+
+static const char * const sdio_groups[] = {
+ "sdio_d0", "sdio_d1", "sdio_d2", "sdio_d3",
+ "sdio_cmd", "sdio_clk", "sdio_irq",
+};
+
+static const char * const nand_groups[] = {
+ "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale", "nand_cle",
+ "nand_wen_clk", "nand_ren_wr", "nand_dqs",
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_tx_a", "uart_rx_a", "uart_cts_a", "uart_rts_a",
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_tx_b", "uart_rx_b",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_tx_c", "uart_rx_c",
+};
+
+static const char * const i2c_a_groups[] = {
+ "i2c_sck_a", "i2c_sda_a",
+};
+
+static const char * const i2c_b_groups[] = {
+ "i2c_sck_b", "i2c_sda_b",
+};
+
+static const char * const i2c_c_groups[] = {
+ "i2c_sck_c", "i2c_sda_c",
+};
+
+static const char * const eth_groups[] = {
+ "eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv",
+ "eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3",
+ "eth_rgmii_tx_clk", "eth_tx_en",
+ "eth_txd0", "eth_txd1", "eth_txd2", "eth_txd3",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e",
+};
+
+static const char * const gpio_aobus_groups[] = {
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
+ "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
+};
+
+static const char * const uart_ao_groups[] = {
+ "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a",
+};
+
+static const char * const uart_ao_b_groups[] = {
+ "uart_tx_ao_b", "uart_rx_ao_b", "uart_cts_ao_b", "uart_rts_ao_b",
+};
+
+static const char * const remote_input_ao_groups[] = {
+ "remote_input_ao",
+};
+
+static struct meson_pmx_func meson_gxl_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+ FUNCTION(emmc),
+ FUNCTION(sdcard),
+ FUNCTION(sdio),
+ FUNCTION(nand),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(uart_c),
+ FUNCTION(i2c_a),
+ FUNCTION(i2c_b),
+ FUNCTION(i2c_c),
+ FUNCTION(eth),
+ FUNCTION(pwm_e),
+};
+
+static struct meson_pmx_func meson_gxl_aobus_functions[] = {
+ FUNCTION(gpio_aobus),
+ FUNCTION(uart_ao),
+ FUNCTION(uart_ao_b),
+ FUNCTION(remote_input_ao),
+};
+
+static struct meson_bank meson_gxl_periphs_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_18, EE_OFF), 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_9, EE_OFF), 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_15, EE_OFF), 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_1, EE_OFF), 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+};
+
+static struct meson_bank meson_gxl_aobus_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_9, 0), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+};
+
+struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
+ .name = "periphs-banks",
+ .pin_base = 10,
+ .pins = meson_gxl_periphs_pins,
+ .groups = meson_gxl_periphs_groups,
+ .funcs = meson_gxl_periphs_functions,
+ .banks = meson_gxl_periphs_banks,
+ .num_pins = ARRAY_SIZE(meson_gxl_periphs_pins),
+ .num_groups = ARRAY_SIZE(meson_gxl_periphs_groups),
+ .num_funcs = ARRAY_SIZE(meson_gxl_periphs_functions),
+ .num_banks = ARRAY_SIZE(meson_gxl_periphs_banks),
+};
+
+struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
+ .name = "aobus-banks",
+ .pin_base = 0,
+ .pins = meson_gxl_aobus_pins,
+ .groups = meson_gxl_aobus_groups,
+ .funcs = meson_gxl_aobus_functions,
+ .banks = meson_gxl_aobus_banks,
+ .num_pins = ARRAY_SIZE(meson_gxl_aobus_pins),
+ .num_groups = ARRAY_SIZE(meson_gxl_aobus_groups),
+ .num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
+ .num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
+};
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 57122eda155a..a579126832af 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -524,6 +524,14 @@ static const struct of_device_id meson_pinctrl_dt_match[] = {
.compatible = "amlogic,meson-gxbb-aobus-pinctrl",
.data = &meson_gxbb_aobus_pinctrl_data,
},
+ {
+ .compatible = "amlogic,meson-gxl-periphs-pinctrl",
+ .data = &meson_gxl_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxl-aobus-pinctrl",
+ .data = &meson_gxl_aobus_pinctrl_data,
+ },
{ },
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 98b5080650c1..1aa871d5431e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -169,3 +169,5 @@ extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
extern struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data;
extern struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data;
+extern struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data;
+extern struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 8392083514fb..af4814479eb0 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -379,13 +379,24 @@ static const unsigned msp0txrx_a_1_pins[] = { DB8500_PIN_AC4, DB8500_PIN_AC3 };
static const unsigned msp0tfstck_a_1_pins[] = { DB8500_PIN_AF3, DB8500_PIN_AE3 };
static const unsigned msp0rfsrck_a_1_pins[] = { DB8500_PIN_AD3, DB8500_PIN_AD4 };
/* Basic pins of the MMC/SD card 0 interface */
-static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, DB8500_PIN_AC1,
- DB8500_PIN_AB4, DB8500_PIN_AA3, DB8500_PIN_AA4, DB8500_PIN_AB2,
- DB8500_PIN_Y4, DB8500_PIN_Y2, DB8500_PIN_AA2, DB8500_PIN_AA1 };
+static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, /* MC0_CMDDIR */
+ DB8500_PIN_AC1, /* MC0_DAT0DIR */
+ DB8500_PIN_AB4, /* MC0_DAT2DIR */
+ DB8500_PIN_AA3, /* MC0_FBCLK */
+ DB8500_PIN_AA4, /* MC0_CLK */
+ DB8500_PIN_AB2, /* MC0_CMD */
+ DB8500_PIN_Y4, /* MC0_DAT0 */
+ DB8500_PIN_Y2, /* MC0_DAT1 */
+ DB8500_PIN_AA2, /* MC0_DAT2 */
+ DB8500_PIN_AA1 /* MC0_DAT3 */
+};
/* Often only 4 bits are used, then these are not needed (only used for MMC) */
-static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, DB8500_PIN_W3,
- DB8500_PIN_V3, DB8500_PIN_V2};
-static const unsigned mc0dat31dir_a_1_pins[] = { DB8500_PIN_AB3 };
+static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, /* MC0_DAT4 */
+ DB8500_PIN_W3, /* MC0_DAT5 */
+ DB8500_PIN_V3, /* MC0_DAT6 */
+ DB8500_PIN_V2 /* MC0_DAT7 */
+};
+static const unsigned mc0dat31dir_a_1_pins[] = { DB8500_PIN_AB3 }; /* MC0_DAT31DIR */
/* MSP1 can only be on these pins, but TXD and RXD can be flipped */
static const unsigned msp1txrx_a_1_pins[] = { DB8500_PIN_AF2, DB8500_PIN_AG2 };
static const unsigned msp1_a_1_pins[] = { DB8500_PIN_AE1, DB8500_PIN_AE2 };
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 5020ae534479..ce3335accb5b 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -381,7 +381,7 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- for_each_child_of_node(np_config, np) {
+ for_each_available_child_of_node(np_config, np) {
ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps, type);
if (ret < 0)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 9f0904185909..569bc28cb909 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -56,6 +56,9 @@ static int gpio_banks;
#define DRIVE_STRENGTH_SHIFT 5
#define DRIVE_STRENGTH_MASK 0x3
#define DRIVE_STRENGTH (DRIVE_STRENGTH_MASK << DRIVE_STRENGTH_SHIFT)
+#define OUTPUT (1 << 7)
+#define OUTPUT_VAL_SHIFT 8
+#define OUTPUT_VAL (0x1 << OUTPUT_VAL_SHIFT)
#define DEBOUNCE (1 << 16)
#define DEBOUNCE_VAL_SHIFT 17
#define DEBOUNCE_VAL (0x3fff << DEBOUNCE_VAL_SHIFT)
@@ -375,6 +378,19 @@ static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
writel_relaxed(mask, pio + (on ? PIO_PUER : PIO_PUDR));
}
+static bool at91_mux_get_output(void __iomem *pio, unsigned int pin, bool *val)
+{
+ *val = (readl_relaxed(pio + PIO_ODSR) >> pin) & 0x1;
+ return (readl_relaxed(pio + PIO_OSR) >> pin) & 0x1;
+}
+
+static void at91_mux_set_output(void __iomem *pio, unsigned int mask,
+ bool is_on, bool val)
+{
+ writel_relaxed(mask, pio + (val ? PIO_SODR : PIO_CODR));
+ writel_relaxed(mask, pio + (is_on ? PIO_OER : PIO_ODR));
+}
+
static unsigned at91_mux_get_multidrive(void __iomem *pio, unsigned pin)
{
return (readl_relaxed(pio + PIO_MDSR) >> pin) & 0x1;
@@ -848,6 +864,7 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
void __iomem *pio;
unsigned pin;
int div;
+ bool out;
*config = 0;
dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
@@ -875,6 +892,8 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
if (info->ops->get_drivestrength)
*config |= (info->ops->get_drivestrength(pio, pin)
<< DRIVE_STRENGTH_SHIFT);
+ if (at91_mux_get_output(pio, pin, &out))
+ *config |= OUTPUT | (out << OUTPUT_VAL_SHIFT);
return 0;
}
@@ -907,6 +926,8 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
if (config & PULL_UP && config & PULL_DOWN)
return -EINVAL;
+ at91_mux_set_output(pio, mask, config & OUTPUT,
+ (config & OUTPUT_VAL) >> OUTPUT_VAL_SHIFT);
at91_mux_set_pullup(pio, mask, config & PULL_UP);
at91_mux_set_multidrive(pio, mask, config & MULTI_DRIVE);
if (info->ops->set_deglitch)
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
new file mode 100644
index 000000000000..b36a90a3f3e4
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -0,0 +1,210 @@
+/*
+ * Pinconf driver for TI DA850/OMAP-L138/AM18XX pullup/pulldown groups
+ *
+ * Copyright (C) 2016 David Lechner
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#define DA850_PUPD_ENA 0x00
+#define DA850_PUPD_SEL 0x04
+
+struct da850_pupd_data {
+ void __iomem *base;
+ struct pinctrl_desc desc;
+ struct pinctrl_dev *pinctrl;
+};
+
+static const char * const da850_pupd_group_names[] = {
+ "cp0", "cp1", "cp2", "cp3", "cp4", "cp5", "cp6", "cp7",
+ "cp8", "cp9", "cp10", "cp11", "cp12", "cp13", "cp14", "cp15",
+ "cp16", "cp17", "cp18", "cp19", "cp20", "cp21", "cp22", "cp23",
+ "cp24", "cp25", "cp26", "cp27", "cp28", "cp29", "cp30", "cp31",
+};
+
+static int da850_pupd_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(da850_pupd_group_names);
+}
+
+static const char *da850_pupd_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return da850_pupd_group_names[selector];
+}
+
+static int da850_pupd_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *num_pins = 0;
+
+ return 0;
+}
+
+static const struct pinctrl_ops da850_pupd_pctlops = {
+ .get_groups_count = da850_pupd_get_groups_count,
+ .get_group_name = da850_pupd_get_group_name,
+ .get_group_pins = da850_pupd_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int da850_pupd_pin_config_group_get(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *config)
+{
+ struct da850_pupd_data *data = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 val;
+ u16 arg;
+
+ val = readl(data->base + DA850_PUPD_ENA);
+ arg = !!(~val & BIT(selector));
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (arg) {
+ /* bias is disabled */
+ arg = 0;
+ break;
+ }
+ val = readl(data->base + DA850_PUPD_SEL);
+ if (param == PIN_CONFIG_BIAS_PULL_DOWN)
+ val = ~val;
+ arg = !!(val & BIT(selector));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int da850_pupd_pin_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct da850_pupd_data *data = pinctrl_dev_get_drvdata(pctldev);
+ u32 ena, sel;
+ enum pin_config_param param;
+ u16 arg;
+ int i;
+
+ ena = readl(data->base + DA850_PUPD_ENA);
+ sel = readl(data->base + DA850_PUPD_SEL);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ena &= ~BIT(selector);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ena |= BIT(selector);
+ sel |= BIT(selector);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ena |= BIT(selector);
+ sel &= ~BIT(selector);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ writel(sel, data->base + DA850_PUPD_SEL);
+ writel(ena, data->base + DA850_PUPD_ENA);
+
+ return 0;
+}
+
+static const struct pinconf_ops da850_pupd_confops = {
+ .is_generic = true,
+ .pin_config_group_get = da850_pupd_pin_config_group_get,
+ .pin_config_group_set = da850_pupd_pin_config_group_set,
+};
+
+static int da850_pupd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct da850_pupd_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base)) {
+ dev_err(dev, "Could not map resource\n");
+ return PTR_ERR(data->base);
+ }
+
+ data->desc.name = dev_name(dev);
+ data->desc.pctlops = &da850_pupd_pctlops;
+ data->desc.confops = &da850_pupd_confops;
+ data->desc.owner = THIS_MODULE;
+
+ data->pinctrl = devm_pinctrl_register(dev, &data->desc, data);
+ if (IS_ERR(data->pinctrl)) {
+ dev_err(dev, "Failed to register pinctrl\n");
+ return PTR_ERR(data->pinctrl);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int da850_pupd_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id da850_pupd_of_match[] = {
+ { .compatible = "ti,da850-pupd" },
+ { }
+};
+
+static struct platform_driver da850_pupd_driver = {
+ .driver = {
+ .name = "ti-da850-pupd",
+ .of_match_table = da850_pupd_of_match,
+ },
+ .probe = da850_pupd_probe,
+ .remove = da850_pupd_remove,
+};
+module_platform_driver(da850_pupd_driver);
+
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("TI DA850/OMAP-L138/AM18XX pullup/pulldown configuration");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 917a7d2535d7..494ec9a7573a 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -37,15 +37,24 @@
#define GPIO_BANK_START(bank) ((bank) * PINS_PER_BANK)
-/* Regmap Offsets */
-#define PINMUX_PRIMARY_SEL0 0x0c
-#define PINMUX_SECONDARY_SEL0 0x14
-#define PINMUX_TERTIARY_SEL0 0x8c
-#define PINMUX_PRIMARY_SEL1 0x10
-#define PINMUX_SECONDARY_SEL1 0x18
-#define PINMUX_TERTIARY_SEL1 0x90
-#define PINMUX_PULLUP_CTRL0 0xac
-#define PINMUX_PULLUP_CTRL1 0xb0
+/* OX810 Regmap Offsets */
+#define PINMUX_810_PRIMARY_SEL0 0x0c
+#define PINMUX_810_SECONDARY_SEL0 0x14
+#define PINMUX_810_TERTIARY_SEL0 0x8c
+#define PINMUX_810_PRIMARY_SEL1 0x10
+#define PINMUX_810_SECONDARY_SEL1 0x18
+#define PINMUX_810_TERTIARY_SEL1 0x90
+#define PINMUX_810_PULLUP_CTRL0 0xac
+#define PINMUX_810_PULLUP_CTRL1 0xb0
+
+/* OX820 Regmap Offsets */
+#define PINMUX_820_BANK_OFFSET 0x100000
+#define PINMUX_820_SECONDARY_SEL 0x14
+#define PINMUX_820_TERTIARY_SEL 0x8c
+#define PINMUX_820_QUATERNARY_SEL 0x94
+#define PINMUX_820_DEBUG_SEL 0x9c
+#define PINMUX_820_ALTERNATIVE_SEL 0xa4
+#define PINMUX_820_PULLUP_CTRL 0xac
/* GPIO Registers */
#define INPUT_VALUE 0x00
@@ -87,8 +96,6 @@ struct oxnas_pinctrl {
struct regmap *regmap;
struct device *dev;
struct pinctrl_dev *pctldev;
- const struct pinctrl_pin_desc *pins;
- unsigned int npins;
const struct oxnas_function *functions;
unsigned int nfunctions;
const struct oxnas_pin_group *groups;
@@ -97,7 +104,50 @@ struct oxnas_pinctrl {
unsigned int nbanks;
};
-static const struct pinctrl_pin_desc oxnas_pins[] = {
+struct oxnas_pinctrl_data {
+ struct pinctrl_desc *desc;
+ struct oxnas_pinctrl *pctl;
+};
+
+static const struct pinctrl_pin_desc oxnas_ox810se_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+};
+
+static const struct pinctrl_pin_desc oxnas_ox820_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
PINCTRL_PIN(2, "gpio2"),
@@ -133,9 +183,24 @@ static const struct pinctrl_pin_desc oxnas_pins[] = {
PINCTRL_PIN(32, "gpio32"),
PINCTRL_PIN(33, "gpio33"),
PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+ PINCTRL_PIN(40, "gpio40"),
+ PINCTRL_PIN(41, "gpio41"),
+ PINCTRL_PIN(42, "gpio42"),
+ PINCTRL_PIN(43, "gpio43"),
+ PINCTRL_PIN(44, "gpio44"),
+ PINCTRL_PIN(45, "gpio45"),
+ PINCTRL_PIN(46, "gpio46"),
+ PINCTRL_PIN(47, "gpio47"),
+ PINCTRL_PIN(48, "gpio48"),
+ PINCTRL_PIN(49, "gpio49"),
};
-static const char * const oxnas_fct0_group[] = {
+static const char * const oxnas_ox810se_fct0_group[] = {
"gpio0", "gpio1", "gpio2", "gpio3",
"gpio4", "gpio5", "gpio6", "gpio7",
"gpio8", "gpio9", "gpio10", "gpio11",
@@ -147,7 +212,7 @@ static const char * const oxnas_fct0_group[] = {
"gpio32", "gpio33", "gpio34"
};
-static const char * const oxnas_fct3_group[] = {
+static const char * const oxnas_ox810se_fct3_group[] = {
"gpio0", "gpio1", "gpio2", "gpio3",
"gpio4", "gpio5", "gpio6", "gpio7",
"gpio8", "gpio9",
@@ -158,6 +223,40 @@ static const char * const oxnas_fct3_group[] = {
"gpio34"
};
+static const char * const oxnas_ox820_fct0_group[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+ "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio24", "gpio25", "gpio26", "gpio27",
+ "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39",
+ "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio48", "gpio49"
+};
+
+static const char * const oxnas_ox820_fct1_group[] = {
+ "gpio3", "gpio4",
+ "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio24"
+};
+
+static const char * const oxnas_ox820_fct4_group[] = {
+ "gpio5", "gpio6", "gpio7", "gpio8",
+ "gpio24", "gpio25", "gpio26", "gpio27",
+ "gpio40", "gpio41", "gpio42", "gpio43"
+};
+
+static const char * const oxnas_ox820_fct5_group[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31"
+};
+
#define FUNCTION(_name, _gr) \
{ \
.name = #_name, \
@@ -165,9 +264,16 @@ static const char * const oxnas_fct3_group[] = {
.ngroups = ARRAY_SIZE(oxnas_##_gr##_group), \
}
-static const struct oxnas_function oxnas_functions[] = {
- FUNCTION(gpio, fct0),
- FUNCTION(fct3, fct3),
+static const struct oxnas_function oxnas_ox810se_functions[] = {
+ FUNCTION(gpio, ox810se_fct0),
+ FUNCTION(fct3, ox810se_fct3),
+};
+
+static const struct oxnas_function oxnas_ox820_functions[] = {
+ FUNCTION(gpio, ox820_fct0),
+ FUNCTION(fct1, ox820_fct1),
+ FUNCTION(fct4, ox820_fct4),
+ FUNCTION(fct5, ox820_fct5),
};
#define OXNAS_PINCTRL_GROUP(_pin, _name, ...) \
@@ -185,7 +291,7 @@ static const struct oxnas_function oxnas_functions[] = {
.fct = _fct, \
}
-static const struct oxnas_pin_group oxnas_groups[] = {
+static const struct oxnas_pin_group oxnas_ox810se_groups[] = {
OXNAS_PINCTRL_GROUP(0, gpio0,
OXNAS_PINCTRL_FUNCTION(gpio, 0),
OXNAS_PINCTRL_FUNCTION(fct3, 3)),
@@ -282,6 +388,140 @@ static const struct oxnas_pin_group oxnas_groups[] = {
OXNAS_PINCTRL_FUNCTION(fct3, 3)),
};
+static const struct oxnas_pin_group oxnas_ox820_groups[] = {
+ OXNAS_PINCTRL_GROUP(0, gpio0,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(1, gpio1,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(2, gpio2,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(3, gpio3,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(4, gpio4,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(5, gpio5,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(6, gpio6,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(7, gpio7,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(8, gpio8,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(9, gpio9,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(10, gpio10,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(11, gpio11,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(12, gpio12,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(13, gpio13,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(14, gpio14,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(15, gpio15,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(16, gpio16,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(17, gpio17,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(18, gpio18,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(19, gpio19,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(20, gpio20,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(21, gpio21,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(22, gpio22,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(23, gpio23,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1)),
+ OXNAS_PINCTRL_GROUP(24, gpio24,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct1, 1),
+ OXNAS_PINCTRL_FUNCTION(fct4, 5)),
+ OXNAS_PINCTRL_GROUP(25, gpio25,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(26, gpio26,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(27, gpio27,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(28, gpio28,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct5, 5)),
+ OXNAS_PINCTRL_GROUP(29, gpio29,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct5, 5)),
+ OXNAS_PINCTRL_GROUP(30, gpio30,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct5, 5)),
+ OXNAS_PINCTRL_GROUP(31, gpio31,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct5, 5)),
+ OXNAS_PINCTRL_GROUP(32, gpio32,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(33, gpio33,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(34, gpio34,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(35, gpio35,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(36, gpio36,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(37, gpio37,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(38, gpio38,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(39, gpio39,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(40, gpio40,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(41, gpio41,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(42, gpio42,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(43, gpio43,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct4, 4)),
+ OXNAS_PINCTRL_GROUP(44, gpio44,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(45, gpio45,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(46, gpio46,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(47, gpio47,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(48, gpio48,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(49, gpio49,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+};
+
static inline struct oxnas_gpio_bank *pctl_to_bank(struct oxnas_pinctrl *pctl,
unsigned int pin)
{
@@ -352,8 +592,8 @@ static int oxnas_pinmux_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev,
- unsigned int func, unsigned int group)
+static int oxnas_ox810se_pinmux_enable(struct pinctrl_dev *pctldev,
+ unsigned int func, unsigned int group)
{
struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
const struct oxnas_pin_group *pg = &pctl->groups[group];
@@ -371,22 +611,22 @@ static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev,
regmap_write_bits(pctl->regmap,
(pg->bank ?
- PINMUX_PRIMARY_SEL1 :
- PINMUX_PRIMARY_SEL0),
+ PINMUX_810_PRIMARY_SEL1 :
+ PINMUX_810_PRIMARY_SEL0),
mask,
(functions->fct == 1 ?
mask : 0));
regmap_write_bits(pctl->regmap,
(pg->bank ?
- PINMUX_SECONDARY_SEL1 :
- PINMUX_SECONDARY_SEL0),
+ PINMUX_810_SECONDARY_SEL1 :
+ PINMUX_810_SECONDARY_SEL0),
mask,
(functions->fct == 2 ?
mask : 0));
regmap_write_bits(pctl->regmap,
(pg->bank ?
- PINMUX_TERTIARY_SEL1 :
- PINMUX_TERTIARY_SEL0),
+ PINMUX_810_TERTIARY_SEL1 :
+ PINMUX_810_TERTIARY_SEL0),
mask,
(functions->fct == 3 ?
mask : 0));
@@ -402,9 +642,64 @@ static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev,
return -EINVAL;
}
-static int oxnas_gpio_request_enable(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range,
- unsigned int offset)
+static int oxnas_ox820_pinmux_enable(struct pinctrl_dev *pctldev,
+ unsigned int func, unsigned int group)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct oxnas_pin_group *pg = &pctl->groups[group];
+ const struct oxnas_function *pf = &pctl->functions[func];
+ const char *fname = pf->name;
+ struct oxnas_desc_function *functions = pg->functions;
+ unsigned int offset = (pg->bank ? PINMUX_820_BANK_OFFSET : 0);
+ u32 mask = BIT(pg->pin);
+
+ while (functions->name) {
+ if (!strcmp(functions->name, fname)) {
+ dev_dbg(pctl->dev,
+ "setting function %s bank %d pin %d fct %d mask %x\n",
+ fname, pg->bank, pg->pin,
+ functions->fct, mask);
+
+ regmap_write_bits(pctl->regmap,
+ offset + PINMUX_820_SECONDARY_SEL,
+ mask,
+ (functions->fct == 1 ?
+ mask : 0));
+ regmap_write_bits(pctl->regmap,
+ offset + PINMUX_820_TERTIARY_SEL,
+ mask,
+ (functions->fct == 2 ?
+ mask : 0));
+ regmap_write_bits(pctl->regmap,
+ offset + PINMUX_820_QUATERNARY_SEL,
+ mask,
+ (functions->fct == 3 ?
+ mask : 0));
+ regmap_write_bits(pctl->regmap,
+ offset + PINMUX_820_DEBUG_SEL,
+ mask,
+ (functions->fct == 4 ?
+ mask : 0));
+ regmap_write_bits(pctl->regmap,
+ offset + PINMUX_820_ALTERNATIVE_SEL,
+ mask,
+ (functions->fct == 5 ?
+ mask : 0));
+
+ return 0;
+ }
+
+ functions++;
+ }
+
+ dev_err(pctl->dev, "cannot mux pin %u to function %u\n", group, func);
+
+ return -EINVAL;
+}
+
+static int oxnas_ox810se_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
{
struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct oxnas_gpio_bank *bank = gpiochip_get_data(range->gc);
@@ -415,18 +710,49 @@ static int oxnas_gpio_request_enable(struct pinctrl_dev *pctldev,
regmap_write_bits(pctl->regmap,
(bank->id ?
- PINMUX_PRIMARY_SEL1 :
- PINMUX_PRIMARY_SEL0),
+ PINMUX_810_PRIMARY_SEL1 :
+ PINMUX_810_PRIMARY_SEL0),
mask, 0);
regmap_write_bits(pctl->regmap,
(bank->id ?
- PINMUX_SECONDARY_SEL1 :
- PINMUX_SECONDARY_SEL0),
+ PINMUX_810_SECONDARY_SEL1 :
+ PINMUX_810_SECONDARY_SEL0),
mask, 0);
regmap_write_bits(pctl->regmap,
(bank->id ?
- PINMUX_TERTIARY_SEL1 :
- PINMUX_TERTIARY_SEL0),
+ PINMUX_810_TERTIARY_SEL1 :
+ PINMUX_810_TERTIARY_SEL0),
+ mask, 0);
+
+ return 0;
+}
+
+static int oxnas_ox820_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(range->gc);
+ unsigned int bank_offset = (bank->id ? PINMUX_820_BANK_OFFSET : 0);
+ u32 mask = BIT(offset - bank->gpio_chip.base);
+
+ dev_dbg(pctl->dev, "requesting gpio %d in bank %d (id %d) with mask 0x%x\n",
+ offset, bank->gpio_chip.base, bank->id, mask);
+
+ regmap_write_bits(pctl->regmap,
+ bank_offset + PINMUX_820_SECONDARY_SEL,
+ mask, 0);
+ regmap_write_bits(pctl->regmap,
+ bank_offset + PINMUX_820_TERTIARY_SEL,
+ mask, 0);
+ regmap_write_bits(pctl->regmap,
+ bank_offset + PINMUX_820_QUATERNARY_SEL,
+ mask, 0);
+ regmap_write_bits(pctl->regmap,
+ bank_offset + PINMUX_820_DEBUG_SEL,
+ mask, 0);
+ regmap_write_bits(pctl->regmap,
+ bank_offset + PINMUX_820_ALTERNATIVE_SEL,
mask, 0);
return 0;
@@ -498,17 +824,26 @@ static int oxnas_gpio_set_direction(struct pinctrl_dev *pctldev,
return 0;
}
-static const struct pinmux_ops oxnas_pinmux_ops = {
+static const struct pinmux_ops oxnas_ox810se_pinmux_ops = {
.get_functions_count = oxnas_pinmux_get_functions_count,
.get_function_name = oxnas_pinmux_get_function_name,
.get_function_groups = oxnas_pinmux_get_function_groups,
- .set_mux = oxnas_pinmux_enable,
- .gpio_request_enable = oxnas_gpio_request_enable,
+ .set_mux = oxnas_ox810se_pinmux_enable,
+ .gpio_request_enable = oxnas_ox810se_gpio_request_enable,
.gpio_set_direction = oxnas_gpio_set_direction,
};
-static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *config)
+static const struct pinmux_ops oxnas_ox820_pinmux_ops = {
+ .get_functions_count = oxnas_pinmux_get_functions_count,
+ .get_function_name = oxnas_pinmux_get_function_name,
+ .get_function_groups = oxnas_pinmux_get_function_groups,
+ .set_mux = oxnas_ox820_pinmux_enable,
+ .gpio_request_enable = oxnas_ox820_gpio_request_enable,
+ .gpio_set_direction = oxnas_gpio_set_direction,
+};
+
+static int oxnas_ox810se_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
{
struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin);
@@ -521,8 +856,38 @@ static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
ret = regmap_read(pctl->regmap,
(bank->id ?
- PINMUX_PULLUP_CTRL1 :
- PINMUX_PULLUP_CTRL0),
+ PINMUX_810_PULLUP_CTRL1 :
+ PINMUX_810_PULLUP_CTRL0),
+ &arg);
+ if (ret)
+ return ret;
+
+ arg = !!(arg & mask);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int oxnas_ox820_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin);
+ unsigned int param = pinconf_to_config_param(*config);
+ unsigned int bank_offset = (bank->id ? PINMUX_820_BANK_OFFSET : 0);
+ u32 mask = BIT(pin - bank->gpio_chip.base);
+ int ret;
+ u32 arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = regmap_read(pctl->regmap,
+ bank_offset + PINMUX_820_PULLUP_CTRL,
&arg);
if (ret)
return ret;
@@ -538,8 +903,9 @@ static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
return 0;
}
-static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *configs, unsigned int num_configs)
+static int oxnas_ox810se_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
{
struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin);
@@ -561,8 +927,8 @@ static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
dev_dbg(pctl->dev, " pullup\n");
regmap_write_bits(pctl->regmap,
(bank->id ?
- PINMUX_PULLUP_CTRL1 :
- PINMUX_PULLUP_CTRL0),
+ PINMUX_810_PULLUP_CTRL1 :
+ PINMUX_810_PULLUP_CTRL0),
mask, mask);
break;
default:
@@ -575,18 +941,53 @@ static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return 0;
}
-static const struct pinconf_ops oxnas_pinconf_ops = {
- .pin_config_get = oxnas_pinconf_get,
- .pin_config_set = oxnas_pinconf_set,
+static int oxnas_ox820_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin);
+ unsigned int bank_offset = (bank->id ? PINMUX_820_BANK_OFFSET : 0);
+ unsigned int param;
+ u32 arg;
+ unsigned int i;
+ u32 offset = pin - bank->gpio_chip.base;
+ u32 mask = BIT(offset);
+
+ dev_dbg(pctl->dev, "setting pin %d bank %d mask 0x%x\n",
+ pin, bank->gpio_chip.base, mask);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ dev_dbg(pctl->dev, " pullup\n");
+ regmap_write_bits(pctl->regmap,
+ bank_offset + PINMUX_820_PULLUP_CTRL,
+ mask, mask);
+ break;
+ default:
+ dev_err(pctl->dev, "Property %u not supported\n",
+ param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops oxnas_ox810se_pinconf_ops = {
+ .pin_config_get = oxnas_ox810se_pinconf_get,
+ .pin_config_set = oxnas_ox810se_pinconf_set,
.is_generic = true,
};
-static struct pinctrl_desc oxnas_pinctrl_desc = {
- .name = "oxnas-pinctrl",
- .pctlops = &oxnas_pinctrl_ops,
- .pmxops = &oxnas_pinmux_ops,
- .confops = &oxnas_pinconf_ops,
- .owner = THIS_MODULE,
+static const struct pinconf_ops oxnas_ox820_pinconf_ops = {
+ .pin_config_get = oxnas_ox820_pinconf_get,
+ .pin_config_set = oxnas_ox820_pinconf_set,
+ .is_generic = true,
};
static void oxnas_gpio_irq_ack(struct irq_data *data)
@@ -699,10 +1100,78 @@ static struct oxnas_gpio_bank oxnas_gpio_banks[] = {
GPIO_BANK(1),
};
+static struct oxnas_pinctrl ox810se_pinctrl = {
+ .functions = oxnas_ox810se_functions,
+ .nfunctions = ARRAY_SIZE(oxnas_ox810se_functions),
+ .groups = oxnas_ox810se_groups,
+ .ngroups = ARRAY_SIZE(oxnas_ox810se_groups),
+ .gpio_banks = oxnas_gpio_banks,
+ .nbanks = ARRAY_SIZE(oxnas_gpio_banks),
+};
+
+static struct pinctrl_desc oxnas_ox810se_pinctrl_desc = {
+ .name = "oxnas-pinctrl",
+ .pins = oxnas_ox810se_pins,
+ .npins = ARRAY_SIZE(oxnas_ox810se_pins),
+ .pctlops = &oxnas_pinctrl_ops,
+ .pmxops = &oxnas_ox810se_pinmux_ops,
+ .confops = &oxnas_ox810se_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static struct oxnas_pinctrl ox820_pinctrl = {
+ .functions = oxnas_ox820_functions,
+ .nfunctions = ARRAY_SIZE(oxnas_ox820_functions),
+ .groups = oxnas_ox820_groups,
+ .ngroups = ARRAY_SIZE(oxnas_ox820_groups),
+ .gpio_banks = oxnas_gpio_banks,
+ .nbanks = ARRAY_SIZE(oxnas_gpio_banks),
+};
+
+static struct pinctrl_desc oxnas_ox820_pinctrl_desc = {
+ .name = "oxnas-pinctrl",
+ .pins = oxnas_ox820_pins,
+ .npins = ARRAY_SIZE(oxnas_ox820_pins),
+ .pctlops = &oxnas_pinctrl_ops,
+ .pmxops = &oxnas_ox820_pinmux_ops,
+ .confops = &oxnas_ox820_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static struct oxnas_pinctrl_data oxnas_ox810se_pinctrl_data = {
+ .desc = &oxnas_ox810se_pinctrl_desc,
+ .pctl = &ox810se_pinctrl,
+};
+
+static struct oxnas_pinctrl_data oxnas_ox820_pinctrl_data = {
+ .desc = &oxnas_ox820_pinctrl_desc,
+ .pctl = &ox820_pinctrl,
+};
+
+static const struct of_device_id oxnas_pinctrl_of_match[] = {
+ { .compatible = "oxsemi,ox810se-pinctrl",
+ .data = &oxnas_ox810se_pinctrl_data
+ },
+ { .compatible = "oxsemi,ox820-pinctrl",
+ .data = &oxnas_ox820_pinctrl_data,
+ },
+ { },
+};
+
static int oxnas_pinctrl_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
+ const struct oxnas_pinctrl_data *data;
struct oxnas_pinctrl *pctl;
+ id = of_match_node(oxnas_pinctrl_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ data = id->data;
+ if (!data || !data->pctl || !data->desc)
+ return -EINVAL;
+
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
return -ENOMEM;
@@ -716,20 +1185,14 @@ static int oxnas_pinctrl_probe(struct platform_device *pdev)
return -ENODEV;
}
- pctl->pins = oxnas_pins;
- pctl->npins = ARRAY_SIZE(oxnas_pins);
- pctl->functions = oxnas_functions;
- pctl->nfunctions = ARRAY_SIZE(oxnas_functions);
- pctl->groups = oxnas_groups;
- pctl->ngroups = ARRAY_SIZE(oxnas_groups);
- pctl->gpio_banks = oxnas_gpio_banks;
- pctl->nbanks = ARRAY_SIZE(oxnas_gpio_banks);
-
- oxnas_pinctrl_desc.pins = pctl->pins;
- oxnas_pinctrl_desc.npins = pctl->npins;
+ pctl->functions = data->pctl->functions;
+ pctl->nfunctions = data->pctl->nfunctions;
+ pctl->groups = data->pctl->groups;
+ pctl->ngroups = data->pctl->ngroups;
+ pctl->gpio_banks = data->pctl->gpio_banks;
+ pctl->nbanks = data->pctl->nbanks;
- pctl->pctldev = pinctrl_register(&oxnas_pinctrl_desc,
- &pdev->dev, pctl);
+ pctl->pctldev = pinctrl_register(data->desc, &pdev->dev, pctl);
if (IS_ERR(pctl->pctldev)) {
dev_err(&pdev->dev, "Failed to register pinctrl device\n");
return PTR_ERR(pctl->pctldev);
@@ -805,11 +1268,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id oxnas_pinctrl_of_match[] = {
- { .compatible = "oxsemi,ox810se-pinctrl", },
- { },
-};
-
static struct platform_driver oxnas_pinctrl_driver = {
.driver = {
.name = "oxnas-pinctrl",
@@ -821,6 +1279,7 @@ static struct platform_driver oxnas_pinctrl_driver = {
static const struct of_device_id oxnas_gpio_of_match[] = {
{ .compatible = "oxsemi,ox810se-gpio", },
+ { .compatible = "oxsemi,ox820-gpio", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 49bf7dcb7ed8..08765f58253c 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -59,6 +59,7 @@
#define GPIO_LS_SYNC 0x60
enum rockchip_pinctrl_type {
+ RK1108,
RK2928,
RK3066B,
RK3188,
@@ -624,6 +625,65 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
return ret;
}
+#define RK1108_PULL_PMU_OFFSET 0x10
+#define RK1108_PULL_OFFSET 0x110
+#define RK1108_PULL_PINS_PER_REG 8
+#define RK1108_PULL_BITS_PER_PIN 2
+#define RK1108_PULL_BANK_STRIDE 16
+
+static void rk1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK1108_PULL_PMU_OFFSET;
+ } else {
+ *reg = RK1108_PULL_OFFSET;
+ *regmap = info->regmap_base;
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * RK1108_PULL_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RK1108_PULL_PINS_PER_REG) * 4);
+ *bit = (pin_num % RK1108_PULL_PINS_PER_REG);
+ *bit *= RK1108_PULL_BITS_PER_PIN;
+}
+
+#define RK1108_DRV_PMU_OFFSET 0x20
+#define RK1108_DRV_GRF_OFFSET 0x210
+#define RK1108_DRV_BITS_PER_PIN 2
+#define RK1108_DRV_PINS_PER_REG 8
+#define RK1108_DRV_BANK_STRIDE 16
+
+static void rk1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK1108_DRV_PMU_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK1108_DRV_GRF_OFFSET;
+
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * RK1108_DRV_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RK1108_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK1108_DRV_PINS_PER_REG;
+ *bit *= RK1108_DRV_BITS_PER_PIN;
+}
+
#define RK2928_PULL_OFFSET 0x118
#define RK2928_PULL_PINS_PER_REG 16
#define RK2928_PULL_BANK_STRIDE 8
@@ -1123,6 +1183,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
return !(data & BIT(bit))
? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT
: PIN_CONFIG_BIAS_DISABLE;
+ case RK1108:
case RK3188:
case RK3288:
case RK3368:
@@ -1169,6 +1230,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
spin_unlock_irqrestore(&bank->slock, flags);
break;
+ case RK1108:
case RK3188:
case RK3288:
case RK3368:
@@ -1358,6 +1420,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
pull == PIN_CONFIG_BIAS_DISABLE);
case RK3066B:
return pull ? false : true;
+ case RK1108:
case RK3188:
case RK3288:
case RK3368:
@@ -2455,6 +2518,27 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static struct rockchip_pin_bank rk1108_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", 0, 0, 0, 0),
+};
+
+static struct rockchip_pin_ctrl rk1108_pin_ctrl = {
+ .pin_banks = rk1108_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk1108_pin_banks),
+ .label = "RK1108-GPIO",
+ .type = RK1108,
+ .grf_mux_offset = 0x10,
+ .pmu_mux_offset = 0x0,
+ .pull_calc_reg = rk1108_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk1108_calc_drv_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk2928_pin_banks[] = {
PIN_BANK(0, 32, "gpio0"),
PIN_BANK(1, 32, "gpio1"),
@@ -2684,6 +2768,8 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
};
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
+ { .compatible = "rockchip,rk1108-pinctrl",
+ .data = (void *)&rk1108_pin_ctrl },
{ .compatible = "rockchip,rk2928-pinctrl",
.data = (void *)&rk2928_pin_ctrl },
{ .compatible = "rockchip,rk3036-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index bfdf720db270..a5a0392ab817 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -31,12 +31,10 @@
#include <linux/platform_data/pinctrl-single.h>
#include "core.h"
+#include "devicetree.h"
#include "pinconf.h"
#define DRIVER_NAME "pinctrl-single"
-#define PCS_MUX_PINS_NAME "pinctrl-single,pins"
-#define PCS_MUX_BITS_NAME "pinctrl-single,bits"
-#define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 3)
#define PCS_OFF_DISABLED ~0U
/**
@@ -142,20 +140,6 @@ struct pcs_data {
};
/**
- * struct pcs_name - register name for a pin
- * @name: name of the pinctrl register
- *
- * REVISIT: We may want to make names optional in the pinctrl
- * framework as some drivers may not care about pin names to
- * avoid kernel bloat. The pin names can be deciphered by user
- * space tools using debugfs based on the register address and
- * SoC packaging information.
- */
-struct pcs_name {
- char name[PCS_REG_NAME_LEN];
-};
-
-/**
* struct pcs_soc_data - SoC specific settings
* @flags: initial SoC specific PCS_FEAT_xxx values
* @irq: optional interrupt for the controller
@@ -177,8 +161,11 @@ struct pcs_soc_data {
* @base: virtual address of the controller
* @size: size of the ioremapped area
* @dev: device entry
+ * @np: device tree node
* @pctl: pin controller device
* @flags: mask of PCS_FEAT_xxx values
+ * @missing_nr_pinctrl_cells: for legacy binding, may go away
+ * @socdata: soc specific data
* @lock: spinlock for register access
* @mutex: mutex protecting the lists
* @width: bits per mux register
@@ -186,8 +173,8 @@ struct pcs_soc_data {
* @fshift: function register shift
* @foff: value to turn mux off
* @fmax: max number of functions in fmask
- * @bits_per_pin:number of bits per pin
- * @names: array of register names for pins
+ * @bits_per_mux: number of bits per mux
+ * @bits_per_pin: number of bits per pin
* @pins: physical pins on the SoC
* @pgtree: pingroup index radix tree
* @ftree: function index radix tree
@@ -208,11 +195,13 @@ struct pcs_device {
void __iomem *base;
unsigned size;
struct device *dev;
+ struct device_node *np;
struct pinctrl_dev *pctl;
unsigned flags;
#define PCS_QUIRK_SHARED_IRQ (1 << 2)
#define PCS_FEAT_IRQ (1 << 1)
#define PCS_FEAT_PINCONF (1 << 0)
+ struct property *missing_nr_pinctrl_cells;
struct pcs_soc_data socdata;
raw_spinlock_t lock;
struct mutex mutex;
@@ -223,7 +212,6 @@ struct pcs_device {
unsigned fmax;
bool bits_per_mux;
unsigned bits_per_pin;
- struct pcs_name *names;
struct pcs_data pins;
struct radix_tree_root pgtree;
struct radix_tree_root ftree;
@@ -354,13 +342,17 @@ static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
{
struct pcs_device *pcs;
unsigned val, mux_bytes;
+ unsigned long offset;
+ size_t pa;
pcs = pinctrl_dev_get_drvdata(pctldev);
mux_bytes = pcs->width / BITS_PER_BYTE;
- val = pcs->read(pcs->base + pin * mux_bytes);
+ offset = pin * mux_bytes;
+ val = pcs->read(pcs->base + offset);
+ pa = pcs->res->start + offset;
- seq_printf(s, "%08x %s " , val, DRIVER_NAME);
+ seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
}
static void pcs_dt_free_map(struct pinctrl_dev *pctldev,
@@ -763,7 +755,6 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
{
struct pcs_soc_data *pcs_soc = &pcs->socdata;
struct pinctrl_pin_desc *pin;
- struct pcs_name *pn;
int i;
i = pcs->pins.cur;
@@ -786,10 +777,6 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
}
pin = &pcs->pins.pa[i];
- pn = &pcs->names[i];
- sprintf(pn->name, "%lx.%u",
- (unsigned long)pcs->res->start + offset, pin_pos);
- pin->name = pn->name;
pin->number = i;
pcs->pins.cur++;
@@ -827,12 +814,6 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
if (!pcs->pins.pa)
return -ENOMEM;
- pcs->names = devm_kzalloc(pcs->dev,
- sizeof(struct pcs_name) * nr_pins,
- GFP_KERNEL);
- if (!pcs->names)
- return -ENOMEM;
-
pcs->desc.pins = pcs->pins.pa;
pcs->desc.npins = nr_pins;
@@ -1146,21 +1127,17 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
unsigned *num_maps,
const char **pgnames)
{
+ const char *name = "pinctrl-single,pins";
struct pcs_func_vals *vals;
- const __be32 *mux;
- int size, rows, *pins, index = 0, found = 0, res = -ENOMEM;
+ int rows, *pins, found = 0, res = -ENOMEM, i;
struct pcs_function *function;
- mux = of_get_property(np, PCS_MUX_PINS_NAME, &size);
- if ((!mux) || (size < sizeof(*mux) * 2)) {
- dev_err(pcs->dev, "bad data for mux %s\n",
- np->name);
+ rows = pinctrl_count_index_with_args(np, name);
+ if (rows <= 0) {
+ dev_err(pcs->dev, "Ivalid number of rows: %d\n", rows);
return -EINVAL;
}
- size /= sizeof(*mux); /* Number of elements in array */
- rows = size / 2;
-
vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows, GFP_KERNEL);
if (!vals)
return -ENOMEM;
@@ -1169,14 +1146,28 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
if (!pins)
goto free_vals;
- while (index < size) {
- unsigned offset, val;
+ for (i = 0; i < rows; i++) {
+ struct of_phandle_args pinctrl_spec;
+ unsigned int offset;
int pin;
- offset = be32_to_cpup(mux + index++);
- val = be32_to_cpup(mux + index++);
+ res = pinctrl_parse_index_with_args(np, name, i, &pinctrl_spec);
+ if (res)
+ return res;
+
+ if (pinctrl_spec.args_count < 2) {
+ dev_err(pcs->dev, "invalid args_count for spec: %i\n",
+ pinctrl_spec.args_count);
+ break;
+ }
+
+ /* Index plus one value cell */
+ offset = pinctrl_spec.args[0];
vals[found].reg = pcs->base + offset;
- vals[found].val = val;
+ vals[found].val = pinctrl_spec.args[1];
+
+ dev_dbg(pcs->dev, "%s index: 0x%x value: 0x%x\n",
+ pinctrl_spec.np->name, offset, pinctrl_spec.args[1]);
pin = pcs_get_pin_by_offset(pcs, offset);
if (pin < 0) {
@@ -1190,8 +1181,10 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
pgnames[0] = np->name;
function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1);
- if (!function)
+ if (!function) {
+ res = -ENOMEM;
goto free_pins;
+ }
res = pcs_add_pingroup(pcs, np, np->name, pins, found);
if (res < 0)
@@ -1226,36 +1219,24 @@ free_vals:
return res;
}
-#define PARAMS_FOR_BITS_PER_MUX 3
-
static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
struct device_node *np,
struct pinctrl_map **map,
unsigned *num_maps,
const char **pgnames)
{
+ const char *name = "pinctrl-single,bits";
struct pcs_func_vals *vals;
- const __be32 *mux;
- int size, rows, *pins, index = 0, found = 0, res = -ENOMEM;
+ int rows, *pins, found = 0, res = -ENOMEM, i;
int npins_in_row;
struct pcs_function *function;
- mux = of_get_property(np, PCS_MUX_BITS_NAME, &size);
-
- if (!mux) {
- dev_err(pcs->dev, "no valid property for %s\n", np->name);
- return -EINVAL;
- }
-
- if (size < (sizeof(*mux) * PARAMS_FOR_BITS_PER_MUX)) {
- dev_err(pcs->dev, "bad data for %s\n", np->name);
+ rows = pinctrl_count_index_with_args(np, name);
+ if (rows <= 0) {
+ dev_err(pcs->dev, "Invalid number of rows: %d\n", rows);
return -EINVAL;
}
- /* Number of elements in array */
- size /= sizeof(*mux);
-
- rows = size / PARAMS_FOR_BITS_PER_MUX;
npins_in_row = pcs->width / pcs->bits_per_pin;
vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows * npins_in_row,
@@ -1268,15 +1249,30 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
if (!pins)
goto free_vals;
- while (index < size) {
+ for (i = 0; i < rows; i++) {
+ struct of_phandle_args pinctrl_spec;
unsigned offset, val;
unsigned mask, bit_pos, val_pos, mask_pos, submask;
unsigned pin_num_from_lsb;
int pin;
- offset = be32_to_cpup(mux + index++);
- val = be32_to_cpup(mux + index++);
- mask = be32_to_cpup(mux + index++);
+ res = pinctrl_parse_index_with_args(np, name, i, &pinctrl_spec);
+ if (res)
+ return res;
+
+ if (pinctrl_spec.args_count < 3) {
+ dev_err(pcs->dev, "invalid args_count for spec: %i\n",
+ pinctrl_spec.args_count);
+ break;
+ }
+
+ /* Index plus two value cells */
+ offset = pinctrl_spec.args[0];
+ val = pinctrl_spec.args[1];
+ mask = pinctrl_spec.args[2];
+
+ dev_dbg(pcs->dev, "%s index: 0x%x value: 0x%x mask: 0x%x\n",
+ pinctrl_spec.np->name, offset, val, mask);
/* Parse pins in each row from LSB */
while (mask) {
@@ -1319,8 +1315,10 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
pgnames[0] = np->name;
function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1);
- if (!function)
+ if (!function) {
+ res = -ENOMEM;
goto free_pins;
+ }
res = pcs_add_pingroup(pcs, np, np->name, pins, found);
if (res < 0)
@@ -1494,17 +1492,12 @@ static void pcs_free_resources(struct pcs_device *pcs)
pinctrl_unregister(pcs->pctl);
pcs_free_funcs(pcs);
pcs_free_pingroups(pcs);
+#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE)
+ if (pcs->missing_nr_pinctrl_cells)
+ of_remove_property(pcs->np, pcs->missing_nr_pinctrl_cells);
+#endif
}
-#define PCS_GET_PROP_U32(name, reg, err) \
- do { \
- ret = of_property_read_u32(np, name, reg); \
- if (ret) { \
- dev_err(pcs->dev, err); \
- return ret; \
- } \
- } while (0);
-
static const struct of_device_id pcs_of_match[];
static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
@@ -1820,6 +1813,55 @@ static int pinctrl_single_resume(struct platform_device *pdev)
}
#endif
+/**
+ * pcs_quirk_missing_pinctrl_cells - handle legacy binding
+ * @pcs: pinctrl driver instance
+ * @np: device tree node
+ * @cells: number of cells
+ *
+ * Handle legacy binding with no #pinctrl-cells. This should be
+ * always two pinctrl-single,bit-per-mux and one for others.
+ * At some point we may want to consider removing this.
+ */
+static int pcs_quirk_missing_pinctrl_cells(struct pcs_device *pcs,
+ struct device_node *np,
+ int cells)
+{
+ struct property *p;
+ const char *name = "#pinctrl-cells";
+ int error;
+ u32 val;
+
+ error = of_property_read_u32(np, name, &val);
+ if (!error)
+ return 0;
+
+ dev_warn(pcs->dev, "please update dts to use %s = <%i>\n",
+ name, cells);
+
+ p = devm_kzalloc(pcs->dev, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->length = sizeof(__be32);
+ p->value = devm_kzalloc(pcs->dev, sizeof(__be32), GFP_KERNEL);
+ if (!p->value)
+ return -ENOMEM;
+ *(__be32 *)p->value = cpu_to_be32(cells);
+
+ p->name = devm_kstrdup(pcs->dev, name, GFP_KERNEL);
+ if (!p->name)
+ return -ENOMEM;
+
+ pcs->missing_nr_pinctrl_cells = p;
+
+#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE)
+ error = of_add_property(np, pcs->missing_nr_pinctrl_cells);
+#endif
+
+ return error;
+}
+
static int pcs_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1840,6 +1882,7 @@ static int pcs_probe(struct platform_device *pdev)
return -ENOMEM;
}
pcs->dev = &pdev->dev;
+ pcs->np = np;
raw_spin_lock_init(&pcs->lock);
mutex_init(&pcs->mutex);
INIT_LIST_HEAD(&pcs->pingroups);
@@ -1849,8 +1892,13 @@ static int pcs_probe(struct platform_device *pdev)
pcs->flags = soc->flags;
memcpy(&pcs->socdata, soc, sizeof(*soc));
- PCS_GET_PROP_U32("pinctrl-single,register-width", &pcs->width,
- "register width not specified\n");
+ ret = of_property_read_u32(np, "pinctrl-single,register-width",
+ &pcs->width);
+ if (ret) {
+ dev_err(pcs->dev, "register width not specified\n");
+
+ return ret;
+ }
ret = of_property_read_u32(np, "pinctrl-single,function-mask",
&pcs->fmask);
@@ -1871,6 +1919,13 @@ static int pcs_probe(struct platform_device *pdev)
pcs->bits_per_mux = of_property_read_bool(np,
"pinctrl-single,bit-per-mux");
+ ret = pcs_quirk_missing_pinctrl_cells(pcs, np,
+ pcs->bits_per_mux ? 2 : 1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to patch #pinctrl-cells\n");
+
+ return ret;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index b7bb37167969..676efcc032d2 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1006,7 +1006,7 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
function = st_pctl_get_pin_function(pc, offset);
if (function)
- snprintf(f, 10, "Alt Fn %d", function);
+ snprintf(f, 10, "Alt Fn %u", function);
else
snprintf(f, 5, "GPIO");
@@ -1181,7 +1181,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
if (!strcmp(pp->name, "name"))
continue;
- if (pp && (pp->length/sizeof(__be32)) >= OF_GPIO_ARGS_MIN) {
+ if (pp->length / sizeof(__be32) >= OF_GPIO_ARGS_MIN) {
npins++;
} else {
pr_warn("Invalid st,pins in %s node\n", np->name);
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
new file mode 100644
index 000000000000..29fb7403d24e
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (c) 2016, BayLibre, SAS. All rights reserved.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * Driver for Semtech SX150X I2C GPIO Expanders
+ * The handling of the 4-bit chips (SX1501/SX1504/SX1507) is untested.
+ *
+ * Author: Gregory Bean <gbean@codeaurora.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gpio/driver.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+
+/* The chip models of sx150x */
+enum {
+ SX150X_123 = 0,
+ SX150X_456,
+ SX150X_789,
+};
+enum {
+ SX150X_789_REG_MISC_AUTOCLEAR_OFF = 1 << 0,
+ SX150X_MAX_REGISTER = 0xad,
+ SX150X_IRQ_TYPE_EDGE_RISING = 0x1,
+ SX150X_IRQ_TYPE_EDGE_FALLING = 0x2,
+ SX150X_789_RESET_KEY1 = 0x12,
+ SX150X_789_RESET_KEY2 = 0x34,
+};
+
+struct sx150x_123_pri {
+ u8 reg_pld_mode;
+ u8 reg_pld_table0;
+ u8 reg_pld_table1;
+ u8 reg_pld_table2;
+ u8 reg_pld_table3;
+ u8 reg_pld_table4;
+ u8 reg_advanced;
+};
+
+struct sx150x_456_pri {
+ u8 reg_pld_mode;
+ u8 reg_pld_table0;
+ u8 reg_pld_table1;
+ u8 reg_pld_table2;
+ u8 reg_pld_table3;
+ u8 reg_pld_table4;
+ u8 reg_advanced;
+};
+
+struct sx150x_789_pri {
+ u8 reg_drain;
+ u8 reg_polarity;
+ u8 reg_clock;
+ u8 reg_misc;
+ u8 reg_reset;
+ u8 ngpios;
+};
+
+struct sx150x_device_data {
+ u8 model;
+ u8 reg_pullup;
+ u8 reg_pulldn;
+ u8 reg_dir;
+ u8 reg_data;
+ u8 reg_irq_mask;
+ u8 reg_irq_src;
+ u8 reg_sense;
+ u8 ngpios;
+ union {
+ struct sx150x_123_pri x123;
+ struct sx150x_456_pri x456;
+ struct sx150x_789_pri x789;
+ } pri;
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+};
+
+struct sx150x_pinctrl {
+ struct device *dev;
+ struct i2c_client *client;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pinctrl_desc;
+ struct gpio_chip gpio;
+ struct irq_chip irq_chip;
+ struct regmap *regmap;
+ struct {
+ u32 sense;
+ u32 masked;
+ } irq;
+ struct mutex lock;
+ const struct sx150x_device_data *data;
+};
+
+static const struct pinctrl_pin_desc sx150x_4_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "oscio"),
+};
+
+static const struct pinctrl_pin_desc sx150x_8_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "oscio"),
+};
+
+static const struct pinctrl_pin_desc sx150x_16_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "oscio"),
+};
+
+static const struct sx150x_device_data sx1501q_device_data = {
+ .model = SX150X_123,
+ .reg_pullup = 0x02,
+ .reg_pulldn = 0x03,
+ .reg_dir = 0x01,
+ .reg_data = 0x00,
+ .reg_irq_mask = 0x05,
+ .reg_irq_src = 0x08,
+ .reg_sense = 0x07,
+ .pri.x123 = {
+ .reg_pld_mode = 0x10,
+ .reg_pld_table0 = 0x11,
+ .reg_pld_table2 = 0x13,
+ .reg_advanced = 0xad,
+ },
+ .ngpios = 4,
+ .pins = sx150x_4_pins,
+ .npins = 4, /* oscio not available */
+};
+
+static const struct sx150x_device_data sx1502q_device_data = {
+ .model = SX150X_123,
+ .reg_pullup = 0x02,
+ .reg_pulldn = 0x03,
+ .reg_dir = 0x01,
+ .reg_data = 0x00,
+ .reg_irq_mask = 0x05,
+ .reg_irq_src = 0x08,
+ .reg_sense = 0x06,
+ .pri.x123 = {
+ .reg_pld_mode = 0x10,
+ .reg_pld_table0 = 0x11,
+ .reg_pld_table1 = 0x12,
+ .reg_pld_table2 = 0x13,
+ .reg_pld_table3 = 0x14,
+ .reg_pld_table4 = 0x15,
+ .reg_advanced = 0xad,
+ },
+ .ngpios = 8,
+ .pins = sx150x_8_pins,
+ .npins = 8, /* oscio not available */
+};
+
+static const struct sx150x_device_data sx1503q_device_data = {
+ .model = SX150X_123,
+ .reg_pullup = 0x04,
+ .reg_pulldn = 0x06,
+ .reg_dir = 0x02,
+ .reg_data = 0x00,
+ .reg_irq_mask = 0x08,
+ .reg_irq_src = 0x0e,
+ .reg_sense = 0x0a,
+ .pri.x123 = {
+ .reg_pld_mode = 0x20,
+ .reg_pld_table0 = 0x22,
+ .reg_pld_table1 = 0x24,
+ .reg_pld_table2 = 0x26,
+ .reg_pld_table3 = 0x28,
+ .reg_pld_table4 = 0x2a,
+ .reg_advanced = 0xad,
+ },
+ .ngpios = 16,
+ .pins = sx150x_16_pins,
+ .npins = 16, /* oscio not available */
+};
+
+static const struct sx150x_device_data sx1504q_device_data = {
+ .model = SX150X_456,
+ .reg_pullup = 0x02,
+ .reg_pulldn = 0x03,
+ .reg_dir = 0x01,
+ .reg_data = 0x00,
+ .reg_irq_mask = 0x05,
+ .reg_irq_src = 0x08,
+ .reg_sense = 0x07,
+ .pri.x456 = {
+ .reg_pld_mode = 0x10,
+ .reg_pld_table0 = 0x11,
+ .reg_pld_table2 = 0x13,
+ },
+ .ngpios = 4,
+ .pins = sx150x_4_pins,
+ .npins = 4, /* oscio not available */
+};
+
+static const struct sx150x_device_data sx1505q_device_data = {
+ .model = SX150X_456,
+ .reg_pullup = 0x02,
+ .reg_pulldn = 0x03,
+ .reg_dir = 0x01,
+ .reg_data = 0x00,
+ .reg_irq_mask = 0x05,
+ .reg_irq_src = 0x08,
+ .reg_sense = 0x06,
+ .pri.x456 = {
+ .reg_pld_mode = 0x10,
+ .reg_pld_table0 = 0x11,
+ .reg_pld_table1 = 0x12,
+ .reg_pld_table2 = 0x13,
+ .reg_pld_table3 = 0x14,
+ .reg_pld_table4 = 0x15,
+ },
+ .ngpios = 8,
+ .pins = sx150x_8_pins,
+ .npins = 8, /* oscio not available */
+};
+
+static const struct sx150x_device_data sx1506q_device_data = {
+ .model = SX150X_456,
+ .reg_pullup = 0x04,
+ .reg_pulldn = 0x06,
+ .reg_dir = 0x02,
+ .reg_data = 0x00,
+ .reg_irq_mask = 0x08,
+ .reg_irq_src = 0x0e,
+ .reg_sense = 0x0a,
+ .pri.x456 = {
+ .reg_pld_mode = 0x20,
+ .reg_pld_table0 = 0x22,
+ .reg_pld_table1 = 0x24,
+ .reg_pld_table2 = 0x26,
+ .reg_pld_table3 = 0x28,
+ .reg_pld_table4 = 0x2a,
+ .reg_advanced = 0xad,
+ },
+ .ngpios = 16,
+ .pins = sx150x_16_pins,
+ .npins = 16, /* oscio not available */
+};
+
+static const struct sx150x_device_data sx1507q_device_data = {
+ .model = SX150X_789,
+ .reg_pullup = 0x03,
+ .reg_pulldn = 0x04,
+ .reg_dir = 0x07,
+ .reg_data = 0x08,
+ .reg_irq_mask = 0x09,
+ .reg_irq_src = 0x0b,
+ .reg_sense = 0x0a,
+ .pri.x789 = {
+ .reg_drain = 0x05,
+ .reg_polarity = 0x06,
+ .reg_clock = 0x0d,
+ .reg_misc = 0x0e,
+ .reg_reset = 0x7d,
+ },
+ .ngpios = 4,
+ .pins = sx150x_4_pins,
+ .npins = ARRAY_SIZE(sx150x_4_pins),
+};
+
+static const struct sx150x_device_data sx1508q_device_data = {
+ .model = SX150X_789,
+ .reg_pullup = 0x03,
+ .reg_pulldn = 0x04,
+ .reg_dir = 0x07,
+ .reg_data = 0x08,
+ .reg_irq_mask = 0x09,
+ .reg_irq_src = 0x0c,
+ .reg_sense = 0x0a,
+ .pri.x789 = {
+ .reg_drain = 0x05,
+ .reg_polarity = 0x06,
+ .reg_clock = 0x0f,
+ .reg_misc = 0x10,
+ .reg_reset = 0x7d,
+ },
+ .ngpios = 8,
+ .pins = sx150x_8_pins,
+ .npins = ARRAY_SIZE(sx150x_8_pins),
+};
+
+static const struct sx150x_device_data sx1509q_device_data = {
+ .model = SX150X_789,
+ .reg_pullup = 0x06,
+ .reg_pulldn = 0x08,
+ .reg_dir = 0x0e,
+ .reg_data = 0x10,
+ .reg_irq_mask = 0x12,
+ .reg_irq_src = 0x18,
+ .reg_sense = 0x14,
+ .pri.x789 = {
+ .reg_drain = 0x0a,
+ .reg_polarity = 0x0c,
+ .reg_clock = 0x1e,
+ .reg_misc = 0x1f,
+ .reg_reset = 0x7d,
+ },
+ .ngpios = 16,
+ .pins = sx150x_16_pins,
+ .npins = ARRAY_SIZE(sx150x_16_pins),
+};
+
+static int sx150x_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static const char *sx150x_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return NULL;
+}
+
+static int sx150x_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ return -ENOTSUPP;
+}
+
+static const struct pinctrl_ops sx150x_pinctrl_ops = {
+ .get_groups_count = sx150x_pinctrl_get_groups_count,
+ .get_group_name = sx150x_pinctrl_get_group_name,
+ .get_group_pins = sx150x_pinctrl_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+#endif
+};
+
+static bool sx150x_pin_is_oscio(struct sx150x_pinctrl *pctl, unsigned int pin)
+{
+ if (pin >= pctl->data->npins)
+ return false;
+
+ /* OSCIO pin is only present in 789 devices */
+ if (pctl->data->model != SX150X_789)
+ return false;
+
+ return !strcmp(pctl->data->pins[pin].name, "oscio");
+}
+
+static int sx150x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
+ unsigned int value;
+ int ret;
+
+ if (sx150x_pin_is_oscio(pctl, offset))
+ return false;
+
+ ret = regmap_read(pctl->regmap, pctl->data->reg_dir, &value);
+ if (ret < 0)
+ return ret;
+
+ return !!(value & BIT(offset));
+}
+
+static int sx150x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
+ unsigned int value;
+ int ret;
+
+ if (sx150x_pin_is_oscio(pctl, offset))
+ return -EINVAL;
+
+ ret = regmap_read(pctl->regmap, pctl->data->reg_data, &value);
+ if (ret < 0)
+ return ret;
+
+ return !!(value & BIT(offset));
+}
+
+static int sx150x_gpio_set_single_ended(struct gpio_chip *chip,
+ unsigned int offset,
+ enum single_ended_mode mode)
+{
+ struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
+ int ret;
+
+ switch (mode) {
+ case LINE_MODE_PUSH_PULL:
+ if (pctl->data->model != SX150X_789 ||
+ sx150x_pin_is_oscio(pctl, offset))
+ return 0;
+
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ BIT(offset), 0);
+ break;
+
+ case LINE_MODE_OPEN_DRAIN:
+ if (pctl->data->model != SX150X_789 ||
+ sx150x_pin_is_oscio(pctl, offset))
+ return -ENOTSUPP;
+
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ BIT(offset), BIT(offset));
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int __sx150x_gpio_set(struct sx150x_pinctrl *pctl, unsigned int offset,
+ int value)
+{
+ return regmap_write_bits(pctl->regmap, pctl->data->reg_data,
+ BIT(offset), value ? BIT(offset) : 0);
+}
+
+static int sx150x_gpio_oscio_set(struct sx150x_pinctrl *pctl,
+ int value)
+{
+ return regmap_write(pctl->regmap,
+ pctl->data->pri.x789.reg_clock,
+ (value ? 0x1f : 0x10));
+}
+
+static void sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
+
+ if (sx150x_pin_is_oscio(pctl, offset))
+ sx150x_gpio_oscio_set(pctl, value);
+ else
+ __sx150x_gpio_set(pctl, offset, value);
+
+}
+
+static void sx150x_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
+
+ regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask, *bits);
+}
+
+static int sx150x_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
+
+ if (sx150x_pin_is_oscio(pctl, offset))
+ return -EINVAL;
+
+ return regmap_write_bits(pctl->regmap,
+ pctl->data->reg_dir,
+ BIT(offset), BIT(offset));
+}
+
+static int sx150x_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
+ int ret;
+
+ if (sx150x_pin_is_oscio(pctl, offset))
+ return sx150x_gpio_oscio_set(pctl, value);
+
+ ret = __sx150x_gpio_set(pctl, offset, value);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write_bits(pctl->regmap,
+ pctl->data->reg_dir,
+ BIT(offset), 0);
+}
+
+static void sx150x_irq_mask(struct irq_data *d)
+{
+ struct sx150x_pinctrl *pctl =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int n = d->hwirq;
+
+ pctl->irq.masked |= BIT(n);
+}
+
+static void sx150x_irq_unmask(struct irq_data *d)
+{
+ struct sx150x_pinctrl *pctl =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int n = d->hwirq;
+
+ pctl->irq.masked &= ~BIT(n);
+}
+
+static void sx150x_irq_set_sense(struct sx150x_pinctrl *pctl,
+ unsigned int line, unsigned int sense)
+{
+ /*
+ * Every interrupt line is represented by two bits shifted
+ * proportionally to the line number
+ */
+ const unsigned int n = line * 2;
+ const unsigned int mask = ~((SX150X_IRQ_TYPE_EDGE_RISING |
+ SX150X_IRQ_TYPE_EDGE_FALLING) << n);
+
+ pctl->irq.sense &= mask;
+ pctl->irq.sense |= sense << n;
+}
+
+static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct sx150x_pinctrl *pctl =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int n, val = 0;
+
+ if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ return -EINVAL;
+
+ n = d->hwirq;
+
+ if (flow_type & IRQ_TYPE_EDGE_RISING)
+ val |= SX150X_IRQ_TYPE_EDGE_RISING;
+ if (flow_type & IRQ_TYPE_EDGE_FALLING)
+ val |= SX150X_IRQ_TYPE_EDGE_FALLING;
+
+ sx150x_irq_set_sense(pctl, n, val);
+ return 0;
+}
+
+static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
+{
+ struct sx150x_pinctrl *pctl = (struct sx150x_pinctrl *)dev_id;
+ unsigned long n, status;
+ unsigned int val;
+ int err;
+
+ err = regmap_read(pctl->regmap, pctl->data->reg_irq_src, &val);
+ if (err < 0)
+ return IRQ_NONE;
+
+ err = regmap_write(pctl->regmap, pctl->data->reg_irq_src, val);
+ if (err < 0)
+ return IRQ_NONE;
+
+ status = val;
+ for_each_set_bit(n, &status, pctl->data->ngpios)
+ handle_nested_irq(irq_find_mapping(pctl->gpio.irqdomain, n));
+
+ return IRQ_HANDLED;
+}
+
+static void sx150x_irq_bus_lock(struct irq_data *d)
+{
+ struct sx150x_pinctrl *pctl =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+
+ mutex_lock(&pctl->lock);
+}
+
+static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct sx150x_pinctrl *pctl =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+
+ regmap_write(pctl->regmap, pctl->data->reg_irq_mask, pctl->irq.masked);
+ regmap_write(pctl->regmap, pctl->data->reg_sense, pctl->irq.sense);
+ mutex_unlock(&pctl->lock);
+}
+
+static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct sx150x_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ int ret;
+ u32 arg;
+ unsigned int data;
+
+ if (sx150x_pin_is_oscio(pctl, pin)) {
+ switch (param) {
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ case PIN_CONFIG_OUTPUT:
+ ret = regmap_read(pctl->regmap,
+ pctl->data->pri.x789.reg_clock,
+ &data);
+ if (ret < 0)
+ return ret;
+
+ if (param == PIN_CONFIG_DRIVE_PUSH_PULL)
+ arg = (data & 0x1f) ? 1 : 0;
+ else {
+ if ((data & 0x1f) == 0x1f)
+ arg = 1;
+ else if ((data & 0x1f) == 0x10)
+ arg = 0;
+ else
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ goto out;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = regmap_read(pctl->regmap,
+ pctl->data->reg_pulldn,
+ &data);
+ data &= BIT(pin);
+
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = regmap_read(pctl->regmap,
+ pctl->data->reg_pullup,
+ &data);
+ data &= BIT(pin);
+
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (pctl->data->model != SX150X_789)
+ return -ENOTSUPP;
+
+ ret = regmap_read(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ &data);
+ data &= BIT(pin);
+
+ if (ret < 0)
+ return ret;
+
+ if (!data)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (pctl->data->model != SX150X_789)
+ arg = true;
+ else {
+ ret = regmap_read(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ &data);
+ data &= BIT(pin);
+
+ if (ret < 0)
+ return ret;
+
+ if (data)
+ return -EINVAL;
+
+ arg = 1;
+ }
+ break;
+
+ case PIN_CONFIG_OUTPUT:
+ ret = sx150x_gpio_get_direction(&pctl->gpio, pin);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return -EINVAL;
+
+ ret = sx150x_gpio_get(&pctl->gpio, pin);
+ if (ret < 0)
+ return ret;
+
+ arg = ret;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+out:
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct sx150x_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ u32 arg;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ if (sx150x_pin_is_oscio(pctl, pin)) {
+ if (param == PIN_CONFIG_OUTPUT) {
+ ret = sx150x_gpio_direction_output(&pctl->gpio,
+ pin, arg);
+ if (ret < 0)
+ return ret;
+
+ continue;
+ } else
+ return -ENOTSUPP;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->reg_pulldn,
+ BIT(pin), 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->reg_pullup,
+ BIT(pin), 0);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->reg_pullup,
+ BIT(pin), BIT(pin));
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->reg_pulldn,
+ BIT(pin), BIT(pin));
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ ret = sx150x_gpio_set_single_ended(&pctl->gpio,
+ pin, LINE_MODE_OPEN_DRAIN);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ ret = sx150x_gpio_set_single_ended(&pctl->gpio,
+ pin, LINE_MODE_PUSH_PULL);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PIN_CONFIG_OUTPUT:
+ ret = sx150x_gpio_direction_output(&pctl->gpio,
+ pin, arg);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+ } /* for each config */
+
+ return 0;
+}
+
+static const struct pinconf_ops sx150x_pinconf_ops = {
+ .pin_config_get = sx150x_pinconf_get,
+ .pin_config_set = sx150x_pinconf_set,
+ .is_generic = true,
+};
+
+static const struct i2c_device_id sx150x_id[] = {
+ {"sx1501q", (kernel_ulong_t) &sx1501q_device_data },
+ {"sx1502q", (kernel_ulong_t) &sx1502q_device_data },
+ {"sx1503q", (kernel_ulong_t) &sx1503q_device_data },
+ {"sx1504q", (kernel_ulong_t) &sx1504q_device_data },
+ {"sx1505q", (kernel_ulong_t) &sx1505q_device_data },
+ {"sx1506q", (kernel_ulong_t) &sx1506q_device_data },
+ {"sx1507q", (kernel_ulong_t) &sx1507q_device_data },
+ {"sx1508q", (kernel_ulong_t) &sx1508q_device_data },
+ {"sx1509q", (kernel_ulong_t) &sx1509q_device_data },
+ {}
+};
+
+static const struct of_device_id sx150x_of_match[] = {
+ { .compatible = "semtech,sx1501q", .data = &sx1501q_device_data },
+ { .compatible = "semtech,sx1502q", .data = &sx1502q_device_data },
+ { .compatible = "semtech,sx1503q", .data = &sx1503q_device_data },
+ { .compatible = "semtech,sx1504q", .data = &sx1504q_device_data },
+ { .compatible = "semtech,sx1505q", .data = &sx1505q_device_data },
+ { .compatible = "semtech,sx1506q", .data = &sx1506q_device_data },
+ { .compatible = "semtech,sx1507q", .data = &sx1507q_device_data },
+ { .compatible = "semtech,sx1508q", .data = &sx1508q_device_data },
+ { .compatible = "semtech,sx1509q", .data = &sx1509q_device_data },
+ {},
+};
+
+static int sx150x_reset(struct sx150x_pinctrl *pctl)
+{
+ int err;
+
+ err = i2c_smbus_write_byte_data(pctl->client,
+ pctl->data->pri.x789.reg_reset,
+ SX150X_789_RESET_KEY1);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte_data(pctl->client,
+ pctl->data->pri.x789.reg_reset,
+ SX150X_789_RESET_KEY2);
+ return err;
+}
+
+static int sx150x_init_misc(struct sx150x_pinctrl *pctl)
+{
+ u8 reg, value;
+
+ switch (pctl->data->model) {
+ case SX150X_789:
+ reg = pctl->data->pri.x789.reg_misc;
+ value = SX150X_789_REG_MISC_AUTOCLEAR_OFF;
+ break;
+ case SX150X_456:
+ reg = pctl->data->pri.x456.reg_advanced;
+ value = 0x00;
+
+ /*
+ * Only SX1506 has RegAdvanced, SX1504/5 are expected
+ * to initialize this offset to zero
+ */
+ if (!reg)
+ return 0;
+ break;
+ case SX150X_123:
+ reg = pctl->data->pri.x123.reg_advanced;
+ value = 0x00;
+ break;
+ default:
+ WARN(1, "Unknown chip model %d\n", pctl->data->model);
+ return -EINVAL;
+ }
+
+ return regmap_write(pctl->regmap, reg, value);
+}
+
+static int sx150x_init_hw(struct sx150x_pinctrl *pctl)
+{
+ const u8 reg[] = {
+ [SX150X_789] = pctl->data->pri.x789.reg_polarity,
+ [SX150X_456] = pctl->data->pri.x456.reg_pld_mode,
+ [SX150X_123] = pctl->data->pri.x123.reg_pld_mode,
+ };
+ int err;
+
+ if (pctl->data->model == SX150X_789 &&
+ of_property_read_bool(pctl->dev->of_node, "semtech,probe-reset")) {
+ err = sx150x_reset(pctl);
+ if (err < 0)
+ return err;
+ }
+
+ err = sx150x_init_misc(pctl);
+ if (err < 0)
+ return err;
+
+ /* Set all pins to work in normal mode */
+ return regmap_write(pctl->regmap, reg[pctl->data->model], 0);
+}
+
+static int sx150x_regmap_reg_width(struct sx150x_pinctrl *pctl,
+ unsigned int reg)
+{
+ const struct sx150x_device_data *data = pctl->data;
+
+ if (reg == data->reg_sense) {
+ /*
+ * RegSense packs two bits of configuration per GPIO,
+ * so we'd need to read twice as many bits as there
+ * are GPIO in our chip
+ */
+ return 2 * data->ngpios;
+ } else if ((data->model == SX150X_789 &&
+ (reg == data->pri.x789.reg_misc ||
+ reg == data->pri.x789.reg_clock ||
+ reg == data->pri.x789.reg_reset))
+ ||
+ (data->model == SX150X_123 &&
+ reg == data->pri.x123.reg_advanced)
+ ||
+ (data->model == SX150X_456 &&
+ data->pri.x456.reg_advanced &&
+ reg == data->pri.x456.reg_advanced)) {
+ return 8;
+ } else {
+ return data->ngpios;
+ }
+}
+
+static unsigned int sx150x_maybe_swizzle(struct sx150x_pinctrl *pctl,
+ unsigned int reg, unsigned int val)
+{
+ unsigned int a, b;
+ const struct sx150x_device_data *data = pctl->data;
+
+ /*
+ * Whereas SX1509 presents RegSense in a simple layout as such:
+ * reg [ f f e e d d c c ]
+ * reg + 1 [ b b a a 9 9 8 8 ]
+ * reg + 2 [ 7 7 6 6 5 5 4 4 ]
+ * reg + 3 [ 3 3 2 2 1 1 0 0 ]
+ *
+ * SX1503 and SX1506 deviate from that data layout, instead storing
+ * their contents as follows:
+ *
+ * reg [ f f e e d d c c ]
+ * reg + 1 [ 7 7 6 6 5 5 4 4 ]
+ * reg + 2 [ b b a a 9 9 8 8 ]
+ * reg + 3 [ 3 3 2 2 1 1 0 0 ]
+ *
+ * so, taking that into account, we swap two
+ * inner bytes of a 4-byte result
+ */
+
+ if (reg == data->reg_sense &&
+ data->ngpios == 16 &&
+ (data->model == SX150X_123 ||
+ data->model == SX150X_456)) {
+ a = val & 0x00ff0000;
+ b = val & 0x0000ff00;
+
+ val &= 0xff0000ff;
+ val |= b << 8;
+ val |= a >> 8;
+ }
+
+ return val;
+}
+
+/*
+ * In order to mask the differences between 16 and 8 bit expander
+ * devices we set up a sligthly ficticious regmap that pretends to be
+ * a set of 32-bit (to accomodate RegSenseLow/RegSenseHigh
+ * pair/quartet) registers and transparently reconstructs those
+ * registers via multiple I2C/SMBus reads
+ *
+ * This way the rest of the driver code, interfacing with the chip via
+ * regmap API, can work assuming that each GPIO pin is represented by
+ * a group of bits at an offset proportional to GPIO number within a
+ * given register.
+ */
+static int sx150x_regmap_reg_read(void *context, unsigned int reg,
+ unsigned int *result)
+{
+ int ret, n;
+ struct sx150x_pinctrl *pctl = context;
+ struct i2c_client *i2c = pctl->client;
+ const int width = sx150x_regmap_reg_width(pctl, reg);
+ unsigned int idx, val;
+
+ /*
+ * There are four potential cases covered by this function:
+ *
+ * 1) 8-pin chip, single configuration bit register
+ *
+ * This is trivial the code below just needs to read:
+ * reg [ 7 6 5 4 3 2 1 0 ]
+ *
+ * 2) 8-pin chip, double configuration bit register (RegSense)
+ *
+ * The read will be done as follows:
+ * reg [ 7 7 6 6 5 5 4 4 ]
+ * reg + 1 [ 3 3 2 2 1 1 0 0 ]
+ *
+ * 3) 16-pin chip, single configuration bit register
+ *
+ * The read will be done as follows:
+ * reg [ f e d c b a 9 8 ]
+ * reg + 1 [ 7 6 5 4 3 2 1 0 ]
+ *
+ * 4) 16-pin chip, double configuration bit register (RegSense)
+ *
+ * The read will be done as follows:
+ * reg [ f f e e d d c c ]
+ * reg + 1 [ b b a a 9 9 8 8 ]
+ * reg + 2 [ 7 7 6 6 5 5 4 4 ]
+ * reg + 3 [ 3 3 2 2 1 1 0 0 ]
+ */
+
+ for (n = width, val = 0, idx = reg; n > 0; n -= 8, idx++) {
+ val <<= 8;
+
+ ret = i2c_smbus_read_byte_data(i2c, idx);
+ if (ret < 0)
+ return ret;
+
+ val |= ret;
+ }
+
+ *result = sx150x_maybe_swizzle(pctl, reg, val);
+
+ return 0;
+}
+
+static int sx150x_regmap_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int ret, n;
+ struct sx150x_pinctrl *pctl = context;
+ struct i2c_client *i2c = pctl->client;
+ const int width = sx150x_regmap_reg_width(pctl, reg);
+
+ val = sx150x_maybe_swizzle(pctl, reg, val);
+
+ n = (width - 1) & ~7;
+ do {
+ const u8 byte = (val >> n) & 0xff;
+
+ ret = i2c_smbus_write_byte_data(i2c, reg, byte);
+ if (ret < 0)
+ return ret;
+
+ reg++;
+ n -= 8;
+ } while (n >= 0);
+
+ return 0;
+}
+
+static bool sx150x_reg_volatile(struct device *dev, unsigned int reg)
+{
+ struct sx150x_pinctrl *pctl = i2c_get_clientdata(to_i2c_client(dev));
+
+ return reg == pctl->data->reg_irq_src || reg == pctl->data->reg_data;
+}
+
+const struct regmap_config sx150x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_read = sx150x_regmap_reg_read,
+ .reg_write = sx150x_regmap_reg_write,
+
+ .max_register = SX150X_MAX_REGISTER,
+ .volatile_reg = sx150x_reg_volatile,
+};
+
+static int sx150x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_WORD_DATA;
+ struct device *dev = &client->dev;
+ struct sx150x_pinctrl *pctl;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, i2c_funcs))
+ return -ENOSYS;
+
+ pctl = devm_kzalloc(dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pctl);
+
+ pctl->dev = dev;
+ pctl->client = client;
+
+ if (dev->of_node)
+ pctl->data = of_device_get_match_data(dev);
+ else
+ pctl->data = (struct sx150x_device_data *)id->driver_data;
+
+ if (!pctl->data)
+ return -EINVAL;
+
+ pctl->regmap = devm_regmap_init(dev, NULL, pctl,
+ &sx150x_regmap_config);
+ if (IS_ERR(pctl->regmap)) {
+ ret = PTR_ERR(pctl->regmap);
+ dev_err(dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ mutex_init(&pctl->lock);
+
+ ret = sx150x_init_hw(pctl);
+ if (ret)
+ return ret;
+
+ /* Register GPIO controller */
+ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
+ pctl->gpio.base = -1;
+ pctl->gpio.ngpio = pctl->data->npins;
+ pctl->gpio.get_direction = sx150x_gpio_get_direction;
+ pctl->gpio.direction_input = sx150x_gpio_direction_input;
+ pctl->gpio.direction_output = sx150x_gpio_direction_output;
+ pctl->gpio.get = sx150x_gpio_get;
+ pctl->gpio.set = sx150x_gpio_set;
+ pctl->gpio.set_single_ended = sx150x_gpio_set_single_ended;
+ pctl->gpio.parent = dev;
+#ifdef CONFIG_OF_GPIO
+ pctl->gpio.of_node = dev->of_node;
+#endif
+ pctl->gpio.can_sleep = true;
+ /*
+ * Setting multiple pins is not safe when all pins are not
+ * handled by the same regmap register. The oscio pin (present
+ * on the SX150X_789 chips) lives in its own register, so
+ * would require locking that is not in place at this time.
+ */
+ if (pctl->data->model != SX150X_789)
+ pctl->gpio.set_multiple = sx150x_gpio_set_multiple;
+
+ ret = devm_gpiochip_add_data(dev, &pctl->gpio, pctl);
+ if (ret)
+ return ret;
+
+ /* Add Interrupt support if an irq is specified */
+ if (client->irq > 0) {
+ pctl->irq_chip.name = devm_kstrdup(dev, client->name,
+ GFP_KERNEL);
+ pctl->irq_chip.irq_mask = sx150x_irq_mask;
+ pctl->irq_chip.irq_unmask = sx150x_irq_unmask;
+ pctl->irq_chip.irq_set_type = sx150x_irq_set_type;
+ pctl->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
+ pctl->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+
+ pctl->irq.masked = ~0;
+ pctl->irq.sense = 0;
+
+ /*
+ * Because sx150x_irq_threaded_fn invokes all of the
+ * nested interrrupt handlers via handle_nested_irq,
+ * any "handler" passed to gpiochip_irqchip_add()
+ * below is going to be ignored, so the choice of the
+ * function does not matter that much.
+ *
+ * We set it to handle_bad_irq to avoid confusion,
+ * plus it will be instantly noticeable if it is ever
+ * called (should not happen)
+ */
+ ret = gpiochip_irqchip_add_nested(&pctl->gpio,
+ &pctl->irq_chip, 0,
+ handle_bad_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "could not connect irqchip to gpiochip\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sx150x_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_FALLING,
+ pctl->irq_chip.name, pctl);
+ if (ret < 0)
+ return ret;
+
+ gpiochip_set_nested_irqchip(&pctl->gpio,
+ &pctl->irq_chip,
+ client->irq);
+ }
+
+ /* Pinctrl_desc */
+ pctl->pinctrl_desc.name = "sx150x-pinctrl";
+ pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops;
+ pctl->pinctrl_desc.confops = &sx150x_pinconf_ops;
+ pctl->pinctrl_desc.pins = pctl->data->pins;
+ pctl->pinctrl_desc.npins = pctl->data->npins;
+ pctl->pinctrl_desc.owner = THIS_MODULE;
+
+ pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl);
+ if (IS_ERR(pctl->pctldev)) {
+ dev_err(dev, "Failed to register pinctrl device\n");
+ return PTR_ERR(pctl->pctldev);
+ }
+
+ return 0;
+}
+
+static struct i2c_driver sx150x_driver = {
+ .driver = {
+ .name = "sx150x-pinctrl",
+ .of_match_table = of_match_ptr(sx150x_of_match),
+ },
+ .probe = sx150x_probe,
+ .id_table = sx150x_id,
+};
+
+static int __init sx150x_init(void)
+{
+ return i2c_add_driver(&sx150x_driver);
+}
+subsys_initcall(sx150x_init);
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index e0ecffcbe11f..b51a46dfdcc3 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -247,6 +247,8 @@ static const unsigned int smc0_nor_addr25_pins[] = {1};
static const unsigned int smc0_nand_pins[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 16, 17, 18, 19, 20,
21, 22, 23};
+static const unsigned int smc0_nand8_pins[] = {0, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14};
/* Note: CAN MIO clock inputs are modeled in the clock framework */
static const unsigned int can0_0_pins[] = {10, 11};
static const unsigned int can0_1_pins[] = {14, 15};
@@ -445,6 +447,7 @@ static const struct zynq_pctrl_group zynq_pctrl_groups[] = {
DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_cs1),
DEFINE_ZYNQ_PINCTRL_GRP(smc0_nor_addr25),
DEFINE_ZYNQ_PINCTRL_GRP(smc0_nand),
+ DEFINE_ZYNQ_PINCTRL_GRP(smc0_nand8),
DEFINE_ZYNQ_PINCTRL_GRP(can0_0),
DEFINE_ZYNQ_PINCTRL_GRP(can0_1),
DEFINE_ZYNQ_PINCTRL_GRP(can0_2),
@@ -709,7 +712,8 @@ static const char * const sdio1_wp_groups[] = {"gpio0_0_grp",
static const char * const smc0_nor_groups[] = {"smc0_nor_grp"};
static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"};
static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"};
-static const char * const smc0_nand_groups[] = {"smc0_nand_grp"};
+static const char * const smc0_nand_groups[] = {"smc0_nand_grp",
+ "smc0_nand8_grp"};
static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp",
"can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp",
"can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp",
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 93ef268d5ccd..3ebdc01f53c0 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -79,6 +79,15 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8994
+ tristate "Qualcomm 8994 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm 8994 platform. The
+ Qualcomm 8992 platform is also supported by this driver.
+
config PINCTRL_MSM8996
tristate "Qualcomm MSM8996 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 8319e11cecb5..ab47764dbc5c 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o
obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c
new file mode 100644
index 000000000000..8e16d9ae0c39
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c
@@ -0,0 +1,1379 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [MSM_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ MSM_MUX_gpio, \
+ MSM_MUX_##f1, \
+ MSM_MUX_##f2, \
+ MSM_MUX_##f3, \
+ MSM_MUX_##f4, \
+ MSM_MUX_##f5, \
+ MSM_MUX_##f6, \
+ MSM_MUX_##f7, \
+ MSM_MUX_##f8, \
+ MSM_MUX_##f9, \
+ MSM_MUX_##f10, \
+ MSM_MUX_##f11 \
+ }, \
+ .nfuncs = 12, \
+ .ctl_reg = 0x1000 + 0x10 * id, \
+ .io_reg = 0x1004 + 0x10 * id, \
+ .intr_cfg_reg = 0x1008 + 0x10 * id, \
+ .intr_status_reg = 0x100c + 0x10 * id, \
+ .intr_target_reg = 0x1008 + 0x10 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8994_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "SDC1_RCLK"),
+ PINCTRL_PIN(147, "SDC1_CLK"),
+ PINCTRL_PIN(148, "SDC1_CMD"),
+ PINCTRL_PIN(149, "SDC1_DATA"),
+ PINCTRL_PIN(150, "SDC2_CLK"),
+ PINCTRL_PIN(151, "SDC2_CMD"),
+ PINCTRL_PIN(152, "SDC2_DATA"),
+ PINCTRL_PIN(153, "SDC3_CLK"),
+ PINCTRL_PIN(154, "SDC3_CMD"),
+ PINCTRL_PIN(155, "SDC3_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+
+static const unsigned int sdc1_rclk_pins[] = { 146 };
+static const unsigned int sdc1_clk_pins[] = { 147 };
+static const unsigned int sdc1_cmd_pins[] = { 148 };
+static const unsigned int sdc1_data_pins[] = { 149 };
+static const unsigned int sdc2_clk_pins[] = { 150 };
+static const unsigned int sdc2_cmd_pins[] = { 151 };
+static const unsigned int sdc2_data_pins[] = { 152 };
+static const unsigned int sdc3_clk_pins[] = { 153 };
+static const unsigned int sdc3_cmd_pins[] = { 154 };
+static const unsigned int sdc3_data_pins[] = { 155 };
+
+enum msm8994_functions {
+ MSM_MUX_audio_ref_clk,
+ MSM_MUX_blsp_i2c1,
+ MSM_MUX_blsp_i2c2,
+ MSM_MUX_blsp_i2c3,
+ MSM_MUX_blsp_i2c4,
+ MSM_MUX_blsp_i2c5,
+ MSM_MUX_blsp_i2c6,
+ MSM_MUX_blsp_i2c7,
+ MSM_MUX_blsp_i2c8,
+ MSM_MUX_blsp_i2c9,
+ MSM_MUX_blsp_i2c10,
+ MSM_MUX_blsp_i2c11,
+ MSM_MUX_blsp_i2c12,
+ MSM_MUX_blsp_spi1,
+ MSM_MUX_blsp_spi1_cs1,
+ MSM_MUX_blsp_spi1_cs2,
+ MSM_MUX_blsp_spi1_cs3,
+ MSM_MUX_blsp_spi2,
+ MSM_MUX_blsp_spi2_cs1,
+ MSM_MUX_blsp_spi2_cs2,
+ MSM_MUX_blsp_spi2_cs3,
+ MSM_MUX_blsp_spi3,
+ MSM_MUX_blsp_spi4,
+ MSM_MUX_blsp_spi5,
+ MSM_MUX_blsp_spi6,
+ MSM_MUX_blsp_spi7,
+ MSM_MUX_blsp_spi8,
+ MSM_MUX_blsp_spi9,
+ MSM_MUX_blsp_spi10,
+ MSM_MUX_blsp_spi10_cs1,
+ MSM_MUX_blsp_spi10_cs2,
+ MSM_MUX_blsp_spi10_cs3,
+ MSM_MUX_blsp_spi11,
+ MSM_MUX_blsp_spi12,
+ MSM_MUX_blsp_uart1,
+ MSM_MUX_blsp_uart2,
+ MSM_MUX_blsp_uart3,
+ MSM_MUX_blsp_uart4,
+ MSM_MUX_blsp_uart5,
+ MSM_MUX_blsp_uart6,
+ MSM_MUX_blsp_uart7,
+ MSM_MUX_blsp_uart8,
+ MSM_MUX_blsp_uart9,
+ MSM_MUX_blsp_uart10,
+ MSM_MUX_blsp_uart11,
+ MSM_MUX_blsp_uart12,
+ MSM_MUX_blsp_uim1,
+ MSM_MUX_blsp_uim2,
+ MSM_MUX_blsp_uim3,
+ MSM_MUX_blsp_uim4,
+ MSM_MUX_blsp_uim5,
+ MSM_MUX_blsp_uim6,
+ MSM_MUX_blsp_uim7,
+ MSM_MUX_blsp_uim8,
+ MSM_MUX_blsp_uim9,
+ MSM_MUX_blsp_uim10,
+ MSM_MUX_blsp_uim11,
+ MSM_MUX_blsp_uim12,
+ MSM_MUX_blsp11_i2c_scl_b,
+ MSM_MUX_blsp11_i2c_sda_b,
+ MSM_MUX_blsp11_uart_rx_b,
+ MSM_MUX_blsp11_uart_tx_b,
+ MSM_MUX_cam_mclk0,
+ MSM_MUX_cam_mclk1,
+ MSM_MUX_cam_mclk2,
+ MSM_MUX_cam_mclk3,
+ MSM_MUX_cci_async_in0,
+ MSM_MUX_cci_async_in1,
+ MSM_MUX_cci_async_in2,
+ MSM_MUX_cci_i2c0,
+ MSM_MUX_cci_i2c1,
+ MSM_MUX_cci_timer0,
+ MSM_MUX_cci_timer1,
+ MSM_MUX_cci_timer2,
+ MSM_MUX_cci_timer3,
+ MSM_MUX_cci_timer4,
+ MSM_MUX_gcc_gp1_clk_a,
+ MSM_MUX_gcc_gp1_clk_b,
+ MSM_MUX_gcc_gp2_clk_a,
+ MSM_MUX_gcc_gp2_clk_b,
+ MSM_MUX_gcc_gp3_clk_a,
+ MSM_MUX_gcc_gp3_clk_b,
+ MSM_MUX_gp_mn,
+ MSM_MUX_gp_pdm0,
+ MSM_MUX_gp_pdm1,
+ MSM_MUX_gp_pdm2,
+ MSM_MUX_gp0_clk,
+ MSM_MUX_gp1_clk,
+ MSM_MUX_gps_tx,
+ MSM_MUX_gsm_tx,
+ MSM_MUX_hdmi_cec,
+ MSM_MUX_hdmi_ddc,
+ MSM_MUX_hdmi_hpd,
+ MSM_MUX_hdmi_rcv,
+ MSM_MUX_mdp_vsync,
+ MSM_MUX_mss_lte,
+ MSM_MUX_nav_pps,
+ MSM_MUX_nav_tsync,
+ MSM_MUX_qdss_cti_trig_in_a,
+ MSM_MUX_qdss_cti_trig_in_b,
+ MSM_MUX_qdss_cti_trig_in_c,
+ MSM_MUX_qdss_cti_trig_in_d,
+ MSM_MUX_qdss_cti_trig_out_a,
+ MSM_MUX_qdss_cti_trig_out_b,
+ MSM_MUX_qdss_cti_trig_out_c,
+ MSM_MUX_qdss_cti_trig_out_d,
+ MSM_MUX_qdss_traceclk_a,
+ MSM_MUX_qdss_traceclk_b,
+ MSM_MUX_qdss_tracectl_a,
+ MSM_MUX_qdss_tracectl_b,
+ MSM_MUX_qdss_tracedata_a,
+ MSM_MUX_qdss_tracedata_b,
+ MSM_MUX_qua_mi2s,
+ MSM_MUX_pci_e0,
+ MSM_MUX_pci_e1,
+ MSM_MUX_pri_mi2s,
+ MSM_MUX_sdc4,
+ MSM_MUX_sec_mi2s,
+ MSM_MUX_slimbus,
+ MSM_MUX_spkr_i2s,
+ MSM_MUX_ter_mi2s,
+ MSM_MUX_tsif1,
+ MSM_MUX_tsif2,
+ MSM_MUX_uim1,
+ MSM_MUX_uim2,
+ MSM_MUX_uim3,
+ MSM_MUX_uim4,
+ MSM_MUX_uim_batt_alarm,
+ MSM_MUX_gpio,
+ MSM_MUX_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145",
+};
+
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_uim1_groups[] = {
+ "gpio0", "gpio1"
+};
+static const char * const hdmi_rcv_groups[] = {
+ "gpio0"
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3"
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uim2_groups[] = {
+ "gpio4", "gpio5"
+};
+static const char * const qdss_cti_trig_out_b_groups[] = {
+ "gpio4",
+};
+static const char * const qdss_cti_trig_in_b_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7"
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_uim3_groups[] = {
+ "gpio8", "gpio9"
+};
+static const char * const blsp_spi1_cs1_groups[] = {
+ "gpio8"
+};
+static const char * const blsp_spi1_cs2_groups[] = {
+ "gpio9", "gpio11"
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio10", "gpio11", "gpio12"
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11"
+};
+static const char * const blsp_spi1_cs3_groups[] = {
+ "gpio10"
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18",
+ "gpio19", "gpio21", "gpio22", "gpio23", "gpio25", "gpio26",
+ "gpio57", "gpio58", "gpio92", "gpio93",
+};
+static const char * const cam_mclk0_groups[] = {
+ "gpio13"
+};
+static const char * const cam_mclk1_groups[] = {
+ "gpio14"
+};
+static const char * const cam_mclk2_groups[] = {
+ "gpio15"
+};
+static const char * const cam_mclk3_groups[] = {
+ "gpio16"
+};
+static const char * const cci_i2c0_groups[] = {
+ "gpio17", "gpio18"
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio17", "gpio18", "gpio19", "gpio20"
+};
+static const char * const blsp_uart4_groups[] = {
+ "gpio17", "gpio18", "gpio19", "gpio20"
+};
+static const char * const blsp_uim4_groups[] = {
+ "gpio17", "gpio18"
+};
+static const char * const cci_i2c1_groups[] = {
+ "gpio19", "gpio20"
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio19", "gpio20"
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio21"
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio21", "gpio22", "gpio23", "gpio24"
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio21", "gpio22", "gpio23", "gpio24"
+};
+static const char * const blsp_uim5_groups[] = {
+ "gpio21", "gpio22"
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio22"
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio23"
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio23", "gpio24"
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio24"
+};
+static const char * const cci_async_in1_groups[] = {
+ "gpio24"
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio25"
+};
+static const char * const cci_async_in2_groups[] = {
+ "gpio25"
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28"
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28"
+};
+static const char * const blsp_uim6_groups[] = {
+ "gpio25", "gpio26"
+};
+static const char * const cci_async_in0_groups[] = {
+ "gpio26"
+};
+static const char * const gp0_clk_groups[] = {
+ "gpio26"
+};
+static const char * const gp1_clk_groups[] = {
+ "gpio27", "gpio57", "gpio78"
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio27", "gpio28"
+};
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio27",
+};
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio28",
+};
+static const char * const gp_mn_groups[] = {
+ "gpio29"
+};
+static const char * const hdmi_cec_groups[] = {
+ "gpio31"
+};
+static const char * const hdmi_ddc_groups[] = {
+ "gpio32", "gpio33"
+};
+static const char * const hdmi_hpd_groups[] = {
+ "gpio34"
+};
+static const char * const uim3_groups[] = {
+ "gpio35", "gpio36", "gpio37", "gpio38"
+};
+static const char * const pci_e1_groups[] = {
+ "gpio35", "gpio36",
+};
+static const char * const blsp_spi7_groups[] = {
+ "gpio41", "gpio42", "gpio43", "gpio44"
+};
+static const char * const blsp_uart7_groups[] = {
+ "gpio41", "gpio42", "gpio43", "gpio44"
+};
+static const char * const blsp_uim7_groups[] = {
+ "gpio41", "gpio42"
+};
+static const char * const qdss_cti_trig_out_c_groups[] = {
+ "gpio41",
+};
+static const char * const qdss_cti_trig_in_c_groups[] = {
+ "gpio42",
+};
+static const char * const blsp_i2c7_groups[] = {
+ "gpio43", "gpio44"
+};
+static const char * const blsp_spi8_groups[] = {
+ "gpio45", "gpio46", "gpio47", "gpio48"
+};
+static const char * const blsp_uart8_groups[] = {
+ "gpio45", "gpio46", "gpio47", "gpio48"
+};
+static const char * const blsp_uim8_groups[] = {
+ "gpio45", "gpio46"
+};
+static const char * const blsp_i2c8_groups[] = {
+ "gpio47", "gpio48"
+};
+static const char * const blsp_spi10_cs1_groups[] = {
+ "gpio47", "gpio67"
+};
+static const char * const blsp_spi10_cs2_groups[] = {
+ "gpio48", "gpio68"
+};
+static const char * const uim2_groups[] = {
+ "gpio49", "gpio50", "gpio51", "gpio52"
+};
+static const char * const blsp_spi9_groups[] = {
+ "gpio49", "gpio50", "gpio51", "gpio52"
+};
+static const char * const blsp_uart9_groups[] = {
+ "gpio49", "gpio50", "gpio51", "gpio52"
+};
+static const char * const blsp_uim9_groups[] = {
+ "gpio49", "gpio50"
+};
+static const char * const blsp_i2c9_groups[] = {
+ "gpio51", "gpio52"
+};
+static const char * const pci_e0_groups[] = {
+ "gpio53", "gpio54",
+};
+static const char * const uim4_groups[] = {
+ "gpio53", "gpio54", "gpio55", "gpio56"
+};
+static const char * const blsp_spi10_groups[] = {
+ "gpio53", "gpio54", "gpio55", "gpio56"
+};
+static const char * const blsp_uart10_groups[] = {
+ "gpio53", "gpio54", "gpio55", "gpio56"
+};
+static const char * const blsp_uim10_groups[] = {
+ "gpio53", "gpio54"
+};
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio53", "gpio54", "gpio63", "gpio64", "gpio65",
+ "gpio66", "gpio67", "gpio74", "gpio75", "gpio76",
+ "gpio77", "gpio85", "gpio86", "gpio87", "gpio89",
+ "gpio90"
+};
+static const char * const gp_pdm0_groups[] = {
+ "gpio54", "gpio95"
+};
+static const char * const blsp_i2c10_groups[] = {
+ "gpio55", "gpio56"
+};
+static const char * const qdss_cti_trig_in_a_groups[] = {
+ "gpio55",
+};
+static const char * const qdss_cti_trig_out_a_groups[] = {
+ "gpio56",
+};
+static const char * const qua_mi2s_groups[] = {
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio57"
+};
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio58"
+};
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio59"
+};
+static const char * const blsp_spi2_cs1_groups[] = {
+ "gpio62"
+};
+static const char * const blsp_spi2_cs2_groups[] = {
+ "gpio63"
+};
+static const char * const gp_pdm2_groups[] = {
+ "gpio63", "gpio79"
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"
+};
+static const char * const blsp_spi2_cs3_groups[] = {
+ "gpio66"
+};
+static const char * const spkr_i2s_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72"
+};
+static const char * const audio_ref_clk_groups[] = {
+ "gpio69"
+};
+static const char * const slimbus_groups[] = {
+ "gpio70", "gpio71"
+};
+static const char * const ter_mi2s_groups[] = {
+ "gpio73", "gpio74", "gpio75", "gpio76", "gpio77"
+};
+static const char * const gp_pdm1_groups[] = {
+ "gpio74", "gpio86"
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82"
+};
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio78"
+};
+static const char * const blsp_spi11_groups[] = {
+ "gpio81", "gpio82", "gpio83", "gpio84"
+};
+static const char * const blsp_uart11_groups[] = {
+ "gpio81", "gpio82", "gpio83", "gpio84"
+};
+static const char * const blsp_uim11_groups[] = {
+ "gpio81", "gpio82"
+};
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio81"
+};
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio82"
+};
+static const char * const blsp_i2c11_groups[] = {
+ "gpio83", "gpio84"
+};
+static const char * const blsp_uart12_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88"
+};
+static const char * const blsp_uim12_groups[] = {
+ "gpio85", "gpio86"
+};
+static const char * const blsp_i2c12_groups[] = {
+ "gpio87", "gpio88"
+};
+static const char * const blsp_spi12_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88"
+};
+static const char * const tsif1_groups[] = {
+ "gpio89", "gpio90", "gpio91", "gpio110", "gpio111"
+};
+static const char * const blsp_spi10_cs3_groups[] = {
+ "gpio90"
+};
+static const char * const sdc4_groups[] = {
+ "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96"
+};
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio91",
+};
+static const char * const tsif2_groups[] = {
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96"
+};
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio94",
+};
+static const char * const qdss_cti_trig_out_d_groups[] = {
+ "gpio95",
+};
+static const char * const qdss_cti_trig_in_d_groups[] = {
+ "gpio96",
+};
+static const char * const uim1_groups[] = {
+ "gpio97", "gpio98", "gpio99", "gpio100"
+};
+static const char * const uim_batt_alarm_groups[] = {
+ "gpio101"
+};
+static const char * const blsp11_uart_tx_b_groups[] = {
+ "gpio111"
+};
+static const char * const blsp11_uart_rx_b_groups[] = {
+ "gpio112"
+};
+static const char * const blsp11_i2c_sda_b_groups[] = {
+ "gpio113"
+};
+static const char * const blsp11_i2c_scl_b_groups[] = {
+ "gpio114"
+};
+static const char * const gsm_tx_groups[] = {
+ "gpio126", "gpio131", "gpio132", "gpio133"
+};
+static const char * const nav_tsync_groups[] = {
+ "gpio127"
+};
+static const char * const nav_pps_groups[] = {
+ "gpio127"
+};
+static const char * const gps_tx_groups[] = {
+ "gpio130"
+};
+static const char * const mss_lte_groups[] = {
+ "gpio134", "gpio135"
+};
+
+static const struct msm_function msm8994_functions[] = {
+ FUNCTION(audio_ref_clk),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_i2c7),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(blsp_i2c9),
+ FUNCTION(blsp_i2c10),
+ FUNCTION(blsp_i2c11),
+ FUNCTION(blsp_i2c12),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi1_cs1),
+ FUNCTION(blsp_spi1_cs2),
+ FUNCTION(blsp_spi1_cs3),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi2_cs1),
+ FUNCTION(blsp_spi2_cs2),
+ FUNCTION(blsp_spi2_cs3),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_spi7),
+ FUNCTION(blsp_spi8),
+ FUNCTION(blsp_spi9),
+ FUNCTION(blsp_spi10),
+ FUNCTION(blsp_spi10_cs1),
+ FUNCTION(blsp_spi10_cs2),
+ FUNCTION(blsp_spi10_cs3),
+ FUNCTION(blsp_spi11),
+ FUNCTION(blsp_spi12),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_uart4),
+ FUNCTION(blsp_uart5),
+ FUNCTION(blsp_uart6),
+ FUNCTION(blsp_uart7),
+ FUNCTION(blsp_uart8),
+ FUNCTION(blsp_uart9),
+ FUNCTION(blsp_uart10),
+ FUNCTION(blsp_uart11),
+ FUNCTION(blsp_uart12),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(blsp_uim3),
+ FUNCTION(blsp_uim4),
+ FUNCTION(blsp_uim5),
+ FUNCTION(blsp_uim6),
+ FUNCTION(blsp_uim7),
+ FUNCTION(blsp_uim8),
+ FUNCTION(blsp_uim9),
+ FUNCTION(blsp_uim10),
+ FUNCTION(blsp_uim11),
+ FUNCTION(blsp_uim12),
+ FUNCTION(blsp11_i2c_scl_b),
+ FUNCTION(blsp11_i2c_sda_b),
+ FUNCTION(blsp11_uart_rx_b),
+ FUNCTION(blsp11_uart_tx_b),
+ FUNCTION(cam_mclk0),
+ FUNCTION(cam_mclk1),
+ FUNCTION(cam_mclk2),
+ FUNCTION(cam_mclk3),
+ FUNCTION(cci_async_in0),
+ FUNCTION(cci_async_in1),
+ FUNCTION(cci_async_in2),
+ FUNCTION(cci_i2c0),
+ FUNCTION(cci_i2c1),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gp_mn),
+ FUNCTION(gp_pdm0),
+ FUNCTION(gp_pdm1),
+ FUNCTION(gp_pdm2),
+ FUNCTION(gp0_clk),
+ FUNCTION(gp1_clk),
+ FUNCTION(gps_tx),
+ FUNCTION(gsm_tx),
+ FUNCTION(hdmi_cec),
+ FUNCTION(hdmi_ddc),
+ FUNCTION(hdmi_hpd),
+ FUNCTION(hdmi_rcv),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_tsync),
+ FUNCTION(qdss_cti_trig_in_a),
+ FUNCTION(qdss_cti_trig_in_b),
+ FUNCTION(qdss_cti_trig_in_c),
+ FUNCTION(qdss_cti_trig_in_d),
+ FUNCTION(qdss_cti_trig_out_a),
+ FUNCTION(qdss_cti_trig_out_b),
+ FUNCTION(qdss_cti_trig_out_c),
+ FUNCTION(qdss_cti_trig_out_d),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(qua_mi2s),
+ FUNCTION(pci_e0),
+ FUNCTION(pci_e1),
+ FUNCTION(pri_mi2s),
+ FUNCTION(sdc4),
+ FUNCTION(sec_mi2s),
+ FUNCTION(slimbus),
+ FUNCTION(spkr_i2s),
+ FUNCTION(ter_mi2s),
+ FUNCTION(tsif1),
+ FUNCTION(tsif2),
+ FUNCTION(uim_batt_alarm),
+ FUNCTION(uim1),
+ FUNCTION(uim2),
+ FUNCTION(uim3),
+ FUNCTION(uim4),
+ FUNCTION(gpio),
+};
+
+static const struct msm_pingroup msm8994_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, blsp_uim1, hdmi_rcv, NA, NA, NA,
+ NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, blsp_uim2, NA, qdss_cti_trig_out_b,
+ NA, NA, NA, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, blsp_uim2, NA, qdss_cti_trig_in_b,
+ NA, NA, NA, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(8, blsp_spi3, blsp_uart3, blsp_uim3, blsp_spi1_cs1, NA, NA,
+ NA, NA, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, blsp_uart3, blsp_uim3, blsp_spi1_cs2, NA, NA,
+ NA, NA, NA, NA, NA),
+ PINGROUP(10, mdp_vsync, blsp_spi3, blsp_uart3, blsp_i2c3,
+ blsp_spi1_cs3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(11, mdp_vsync, blsp_spi3, blsp_uart3, blsp_i2c3,
+ blsp_spi1_cs2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, cam_mclk0, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(14, cam_mclk1, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(15, cam_mclk2, NA, qdss_tracedata_b, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(16, cam_mclk3, NA, qdss_tracedata_b, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(17, cci_i2c0, blsp_spi4, blsp_uart4, blsp_uim4, NA,
+ qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(18, cci_i2c0, blsp_spi4, blsp_uart4, blsp_uim4, NA,
+ qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(19, cci_i2c1, blsp_spi4, blsp_uart4, blsp_i2c4, NA,
+ qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(20, cci_i2c1, blsp_spi4, blsp_uart4, blsp_i2c4, NA, NA, NA,
+ NA, NA, NA, NA),
+ PINGROUP(21, cci_timer0, blsp_spi5, blsp_uart5, blsp_uim5, NA,
+ qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(22, cci_timer1, blsp_spi5, blsp_uart5, blsp_uim5, NA,
+ qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(23, cci_timer2, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA,
+ qdss_tracedata_b, NA, NA, NA, NA),
+ PINGROUP(24, cci_timer3, cci_async_in1, blsp_spi5, blsp_uart5,
+ blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, cci_timer4, cci_async_in2, blsp_spi6, blsp_uart6,
+ blsp_uim6, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(26, cci_async_in0, blsp_spi6, blsp_uart6, blsp_uim6, gp0_clk,
+ NA, qdss_tracedata_b, NA, NA, NA, NA),
+ PINGROUP(27, blsp_spi6, blsp_uart6, blsp_i2c6, gp1_clk,
+ qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(28, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_traceclk_a, NA,
+ NA, NA, NA, NA, NA, NA),
+ PINGROUP(29, gp_mn, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, hdmi_cec, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, hdmi_ddc, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, hdmi_ddc, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, hdmi_hpd, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, uim3, pci_e1, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, uim3, pci_e1, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, uim3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, uim3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, blsp_spi7, blsp_uart7, blsp_uim7, qdss_cti_trig_out_c,
+ NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(42, blsp_spi7, blsp_uart7, blsp_uim7, qdss_cti_trig_in_c, NA,
+ NA, NA, NA, NA, NA, NA),
+ PINGROUP(43, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(44, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(45, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(46, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(47, blsp_spi8, blsp_uart8, blsp_i2c8, blsp_spi10_cs1, NA, NA,
+ NA, NA, NA, NA, NA),
+ PINGROUP(48, blsp_spi8, blsp_uart8, blsp_i2c8, blsp_spi10_cs2, NA, NA,
+ NA, NA, NA, NA, NA),
+ PINGROUP(49, uim2, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(50, uim2, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(51, uim2, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(52, uim2, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(53, uim4, pci_e0, blsp_spi10, blsp_uart10, blsp_uim10, NA,
+ NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(54, uim4, pci_e0, blsp_spi10, blsp_uart10, blsp_uim10,
+ gp_pdm0, NA, NA, qdss_tracedata_a, NA, NA),
+ PINGROUP(55, uim4, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA, NA,
+ qdss_cti_trig_in_a, NA, NA, NA),
+ PINGROUP(56, uim4, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA,
+ qdss_cti_trig_out_a, NA, NA, NA, NA),
+ PINGROUP(57, qua_mi2s, gcc_gp1_clk_a, NA, NA, qdss_tracedata_b, NA, NA,
+ NA, NA, NA, NA),
+ PINGROUP(58, qua_mi2s, gcc_gp2_clk_a, NA, NA, qdss_tracedata_b, NA, NA,
+ NA, NA, NA, NA),
+ PINGROUP(59, qua_mi2s, gcc_gp3_clk_a, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(60, qua_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, qua_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, qua_mi2s, blsp_spi2_cs1, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(63, qua_mi2s, blsp_spi2_cs2, gp_pdm2, NA, NA, NA, NA, NA,
+ qdss_tracedata_a, NA, NA),
+ PINGROUP(64, pri_mi2s, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(65, pri_mi2s, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(66, pri_mi2s, blsp_spi2_cs3, NA, NA, NA, qdss_tracedata_a,
+ NA, NA, NA, NA, NA),
+ PINGROUP(67, pri_mi2s, blsp_spi10_cs1, NA, NA, NA, qdss_tracedata_a,
+ NA, NA, NA, NA, NA),
+ PINGROUP(68, pri_mi2s, blsp_spi10_cs2, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(69, spkr_i2s, audio_ref_clk, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(70, slimbus, spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, slimbus, spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, ter_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, ter_mi2s, gp_pdm1, NA, NA, NA, qdss_tracedata_a, NA, NA,
+ NA, NA, NA),
+ PINGROUP(75, ter_mi2s, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(76, ter_mi2s, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(77, ter_mi2s, NA, NA, qdss_tracedata_a, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(78, sec_mi2s, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(79, sec_mi2s, gp_pdm2, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, sec_mi2s, blsp_spi11, blsp_uart11, blsp_uim11,
+ gcc_gp2_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, sec_mi2s, blsp_spi11, blsp_uart11, blsp_uim11,
+ gcc_gp3_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(84, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(85, blsp_spi12, blsp_uart12, blsp_uim12, NA, NA,
+ qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(86, blsp_spi12, blsp_uart12, blsp_uim12, gp_pdm1, NA,
+ qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(87, blsp_spi12, blsp_uart12, blsp_i2c12, NA,
+ qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, blsp_spi12, blsp_uart12, blsp_i2c12, NA, NA, NA, NA, NA,
+ NA, NA, NA),
+ PINGROUP(89, tsif1, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(90, tsif1, blsp_spi10_cs3, qdss_tracedata_a, NA, NA, NA, NA,
+ NA, NA, NA, NA),
+ PINGROUP(91, tsif1, sdc4, NA, NA, NA, NA, qdss_traceclk_b, NA, NA, NA,
+ NA),
+ PINGROUP(92, tsif2, sdc4, NA, NA, qdss_tracedata_b, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(93, tsif2, sdc4, NA, NA, NA, NA, qdss_tracedata_b, NA, NA,
+ NA, NA),
+ PINGROUP(94, tsif2, sdc4, NA, NA, NA, NA, qdss_tracectl_b, NA, NA, NA,
+ NA),
+ PINGROUP(95, tsif2, sdc4, gp_pdm0, NA, NA, NA, qdss_cti_trig_out_d,
+ NA, NA, NA, NA),
+ PINGROUP(96, tsif2, sdc4, qdss_cti_trig_in_d, NA, NA, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(97, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, uim1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, uim_batt_alarm, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, tsif1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, tsif1, blsp11_uart_tx_b, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(112, blsp11_uart_rx_b, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(113, blsp11_i2c_sda_b, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(114, blsp11_i2c_scl_b, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(117, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, NA, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(127, NA, nav_tsync, nav_pps, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(129, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, gps_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, gsm_tx, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(134, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(145, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_PINGROUP(sdc1_rclk, 0x2044, 15, 0),
+ SDC_PINGROUP(sdc1_clk, 0x2044, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x2044, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x2044, 9, 0),
+ SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x2048, 9, 0),
+ SDC_PINGROUP(sdc3_clk, 0x206c, 14, 6),
+ SDC_PINGROUP(sdc3_cmd, 0x206c, 11, 3),
+ SDC_PINGROUP(sdc3_data, 0x206c, 9, 0),
+};
+
+#define NUM_GPIO_PINGROUPS 146
+
+static const struct msm_pinctrl_soc_data msm8994_pinctrl = {
+ .pins = msm8994_pins,
+ .npins = ARRAY_SIZE(msm8994_pins),
+ .functions = msm8994_functions,
+ .nfunctions = ARRAY_SIZE(msm8994_functions),
+ .groups = msm8994_groups,
+ .ngroups = ARRAY_SIZE(msm8994_groups),
+ .ngpios = NUM_GPIO_PINGROUPS,
+};
+
+static int msm8994_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8994_pinctrl);
+}
+
+static const struct of_device_id msm8994_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8992-pinctrl", },
+ { .compatible = "qcom,msm8994-pinctrl", },
+ { }
+};
+
+static struct platform_driver msm8994_pinctrl_driver = {
+ .driver = {
+ .name = "msm8994-pinctrl",
+ .of_match_table = msm8994_pinctrl_of_match,
+ },
+ .probe = msm8994_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8994_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8994_pinctrl_driver);
+}
+arch_initcall(msm8994_pinctrl_init);
+
+static void __exit msm8994_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8994_pinctrl_driver);
+}
+module_exit(msm8994_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm MSM8994 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8994_pinctrl_of_match);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index d32fa2b5ff82..12f7d1eb65bc 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -61,16 +61,15 @@ static void exynos_irq_mask(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
unsigned long mask;
unsigned long flags;
spin_lock_irqsave(&bank->slock, flags);
- mask = readl(d->virt_base + reg_mask);
+ mask = readl(bank->eint_base + reg_mask);
mask |= 1 << irqd->hwirq;
- writel(mask, d->virt_base + reg_mask);
+ writel(mask, bank->eint_base + reg_mask);
spin_unlock_irqrestore(&bank->slock, flags);
}
@@ -80,10 +79,9 @@ static void exynos_irq_ack(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned long reg_pend = our_chip->eint_pend + bank->eint_offset;
- writel(1 << irqd->hwirq, d->virt_base + reg_pend);
+ writel(1 << irqd->hwirq, bank->eint_base + reg_pend);
}
static void exynos_irq_unmask(struct irq_data *irqd)
@@ -91,7 +89,6 @@ static void exynos_irq_unmask(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
unsigned long mask;
unsigned long flags;
@@ -109,9 +106,9 @@ static void exynos_irq_unmask(struct irq_data *irqd)
spin_lock_irqsave(&bank->slock, flags);
- mask = readl(d->virt_base + reg_mask);
+ mask = readl(bank->eint_base + reg_mask);
mask &= ~(1 << irqd->hwirq);
- writel(mask, d->virt_base + reg_mask);
+ writel(mask, bank->eint_base + reg_mask);
spin_unlock_irqrestore(&bank->slock, flags);
}
@@ -121,7 +118,6 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
unsigned int con, trig_type;
unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
@@ -152,10 +148,10 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
else
irq_set_handler_locked(irqd, handle_level_irq);
- con = readl(d->virt_base + reg_con);
+ con = readl(bank->eint_base + reg_con);
con &= ~(EXYNOS_EINT_CON_MASK << shift);
con |= trig_type << shift;
- writel(con, d->virt_base + reg_con);
+ writel(con, bank->eint_base + reg_con);
return 0;
}
@@ -166,7 +162,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
const struct samsung_pin_bank_type *bank_type = bank->type;
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
unsigned long flags;
@@ -188,10 +183,10 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
spin_lock_irqsave(&bank->slock, flags);
- con = readl(d->virt_base + reg_con);
+ con = readl(bank->eint_base + reg_con);
con &= ~(mask << shift);
con |= EXYNOS_EINT_FUNC << shift;
- writel(con, d->virt_base + reg_con);
+ writel(con, bank->eint_base + reg_con);
spin_unlock_irqrestore(&bank->slock, flags);
@@ -206,7 +201,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
const struct samsung_pin_bank_type *bank_type = bank->type;
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
unsigned long flags;
@@ -221,10 +215,10 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
spin_lock_irqsave(&bank->slock, flags);
- con = readl(d->virt_base + reg_con);
+ con = readl(bank->eint_base + reg_con);
con &= ~(mask << shift);
con |= FUNC_INPUT << shift;
- writel(con, d->virt_base + reg_con);
+ writel(con, bank->eint_base + reg_con);
spin_unlock_irqrestore(&bank->slock, flags);
@@ -274,7 +268,7 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
struct samsung_pin_bank *bank = d->pin_banks;
unsigned int svc, group, pin, virq;
- svc = readl(d->virt_base + EXYNOS_SVC_OFFSET);
+ svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
group = EXYNOS_SVC_GROUP(svc);
pin = svc & EXYNOS_SVC_NUM_MASK;
@@ -452,7 +446,6 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
- struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata;
unsigned long pend;
unsigned long mask;
int i;
@@ -461,9 +454,9 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
for (i = 0; i < eintd->nr_banks; ++i) {
struct samsung_pin_bank *b = eintd->banks[i];
- pend = readl(d->virt_base + b->irq_chip->eint_pend
+ pend = readl(b->eint_base + b->irq_chip->eint_pend
+ b->eint_offset);
- mask = readl(d->virt_base + b->irq_chip->eint_mask
+ mask = readl(b->eint_base + b->irq_chip->eint_mask
+ b->eint_offset);
exynos_irq_demux_eint(pend & ~mask, b->irq_domain);
}
@@ -581,7 +574,7 @@ static void exynos_pinctrl_suspend_bank(
struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
- void __iomem *regs = drvdata->virt_base;
+ void __iomem *regs = bank->eint_base;
save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ bank->eint_offset);
@@ -610,7 +603,7 @@ static void exynos_pinctrl_resume_bank(
struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
- void __iomem *regs = drvdata->virt_base;
+ void __iomem *regs = bank->eint_base;
pr_debug("%s: con %#010x => %#010x\n", bank->name,
readl(regs + EXYNOS_GPIO_ECON_OFFSET
@@ -1346,6 +1339,11 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+ EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
};
/* pin banks of exynos5433 pin-controller - AUD */
@@ -1427,6 +1425,7 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .nr_ext_resources = 1,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5433_pin_banks1,
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 0f0f7cedb2dc..5821525a2c84 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -79,6 +79,17 @@
.name = id \
}
+#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
+ { \
+ .type = &bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .name = id, \
+ .pctl_res_idx = pctl_idx, \
+ } \
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 3d92f827da7a..b82a003546ae 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -151,7 +151,7 @@ static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
u32 val;
/* Make sure that pin is configured as interrupt */
- reg = d->virt_base + bank->pctl_offset;
+ reg = bank->pctl_base + bank->pctl_offset;
shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
@@ -184,7 +184,7 @@ static int s3c24xx_eint_type(struct irq_data *data, unsigned int type)
s3c24xx_eint_set_handler(data, type);
/* Set up interrupt trigger */
- reg = d->virt_base + EINT_REG(index);
+ reg = bank->eint_base + EINT_REG(index);
shift = EINT_OFFS(index);
val = readl(reg);
@@ -259,32 +259,29 @@ static void s3c2410_demux_eint0_3(struct irq_desc *desc)
static void s3c2412_eint0_3_ack(struct irq_data *data)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned long bitval = 1UL << data->hwirq;
- writel(bitval, d->virt_base + EINTPEND_REG);
+ writel(bitval, bank->eint_base + EINTPEND_REG);
}
static void s3c2412_eint0_3_mask(struct irq_data *data)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned long mask;
- mask = readl(d->virt_base + EINTMASK_REG);
+ mask = readl(bank->eint_base + EINTMASK_REG);
mask |= (1UL << data->hwirq);
- writel(mask, d->virt_base + EINTMASK_REG);
+ writel(mask, bank->eint_base + EINTMASK_REG);
}
static void s3c2412_eint0_3_unmask(struct irq_data *data)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned long mask;
- mask = readl(d->virt_base + EINTMASK_REG);
+ mask = readl(bank->eint_base + EINTMASK_REG);
mask &= ~(1UL << data->hwirq);
- writel(mask, d->virt_base + EINTMASK_REG);
+ writel(mask, bank->eint_base + EINTMASK_REG);
}
static struct irq_chip s3c2412_eint0_3_chip = {
@@ -319,34 +316,31 @@ static void s3c2412_demux_eint0_3(struct irq_desc *desc)
static void s3c24xx_eint_ack(struct irq_data *data)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned char index = bank->eint_offset + data->hwirq;
- writel(1UL << index, d->virt_base + EINTPEND_REG);
+ writel(1UL << index, bank->eint_base + EINTPEND_REG);
}
static void s3c24xx_eint_mask(struct irq_data *data)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned char index = bank->eint_offset + data->hwirq;
unsigned long mask;
- mask = readl(d->virt_base + EINTMASK_REG);
+ mask = readl(bank->eint_base + EINTMASK_REG);
mask |= (1UL << index);
- writel(mask, d->virt_base + EINTMASK_REG);
+ writel(mask, bank->eint_base + EINTMASK_REG);
}
static void s3c24xx_eint_unmask(struct irq_data *data)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned char index = bank->eint_offset + data->hwirq;
unsigned long mask;
- mask = readl(d->virt_base + EINTMASK_REG);
+ mask = readl(bank->eint_base + EINTMASK_REG);
mask &= ~(1UL << index);
- writel(mask, d->virt_base + EINTMASK_REG);
+ writel(mask, bank->eint_base + EINTMASK_REG);
}
static struct irq_chip s3c24xx_eint_chip = {
@@ -362,13 +356,14 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
{
struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct samsung_pinctrl_drv_data *d = data->drvdata;
+ struct irq_data *irqd = irq_desc_get_irq_data(desc);
+ struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned int pend, mask;
chained_irq_enter(chip, desc);
- pend = readl(d->virt_base + EINTPEND_REG);
- mask = readl(d->virt_base + EINTMASK_REG);
+ pend = readl(bank->eint_base + EINTPEND_REG);
+ mask = readl(bank->eint_base + EINTMASK_REG);
pend &= ~mask;
pend &= range;
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 43407ab248f5..4c632812ccff 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -280,7 +280,7 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
u32 val;
/* Make sure that pin is configured as interrupt */
- reg = d->virt_base + bank->pctl_offset;
+ reg = bank->pctl_base + bank->pctl_offset;
shift = pin;
if (bank_type->fld_width[PINCFG_TYPE_FUNC] * shift >= 32) {
/* 4-bit bank type with 2 con regs */
@@ -308,9 +308,8 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
static inline void s3c64xx_gpio_irq_set_mask(struct irq_data *irqd, bool mask)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq;
- void __iomem *reg = d->virt_base + EINTMASK_REG(bank->eint_offset);
+ void __iomem *reg = bank->eint_base + EINTMASK_REG(bank->eint_offset);
u32 val;
val = readl(reg);
@@ -334,9 +333,8 @@ static void s3c64xx_gpio_irq_mask(struct irq_data *irqd)
static void s3c64xx_gpio_irq_ack(struct irq_data *irqd)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq;
- void __iomem *reg = d->virt_base + EINTPEND_REG(bank->eint_offset);
+ void __iomem *reg = bank->eint_base + EINTPEND_REG(bank->eint_offset);
writel(1 << index, reg);
}
@@ -359,7 +357,7 @@ static int s3c64xx_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
s3c64xx_irq_set_handler(irqd, type);
/* Set up interrupt trigger */
- reg = d->virt_base + EINTCON_REG(bank->eint_offset);
+ reg = bank->eint_base + EINTCON_REG(bank->eint_offset);
shift = EINT_OFFS(bank->eint_offset) + irqd->hwirq;
shift = 4 * (shift / 4); /* 4 EINTs per trigger selector */
@@ -411,7 +409,8 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
- struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
+ struct irq_data *irqd = irq_desc_get_irq_data(desc);
+ struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
chained_irq_enter(chip, desc);
@@ -421,7 +420,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
unsigned int pin;
unsigned int virq;
- svc = readl(drvdata->virt_base + SERVICE_REG);
+ svc = readl(bank->eint_base + SERVICE_REG);
group = SVC_GROUP(svc);
pin = svc & SVC_NUM_MASK;
@@ -518,15 +517,15 @@ static inline void s3c64xx_eint0_irq_set_mask(struct irq_data *irqd, bool mask)
{
struct s3c64xx_eint0_domain_data *ddata =
irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata;
+ struct samsung_pin_bank *bank = ddata->bank;
u32 val;
- val = readl(d->virt_base + EINT0MASK_REG);
+ val = readl(bank->eint_base + EINT0MASK_REG);
if (mask)
val |= 1 << ddata->eints[irqd->hwirq];
else
val &= ~(1 << ddata->eints[irqd->hwirq]);
- writel(val, d->virt_base + EINT0MASK_REG);
+ writel(val, bank->eint_base + EINT0MASK_REG);
}
static void s3c64xx_eint0_irq_unmask(struct irq_data *irqd)
@@ -543,10 +542,10 @@ static void s3c64xx_eint0_irq_ack(struct irq_data *irqd)
{
struct s3c64xx_eint0_domain_data *ddata =
irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata;
+ struct samsung_pin_bank *bank = ddata->bank;
writel(1 << ddata->eints[irqd->hwirq],
- d->virt_base + EINT0PEND_REG);
+ bank->eint_base + EINT0PEND_REG);
}
static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type)
@@ -554,7 +553,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type)
struct s3c64xx_eint0_domain_data *ddata =
irq_data_get_irq_chip_data(irqd);
struct samsung_pin_bank *bank = ddata->bank;
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata;
void __iomem *reg;
int trigger;
u8 shift;
@@ -569,7 +568,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type)
s3c64xx_irq_set_handler(irqd, type);
/* Set up interrupt trigger */
- reg = d->virt_base + EINT0CON0_REG;
+ reg = bank->eint_base + EINT0CON0_REG;
shift = ddata->eints[irqd->hwirq];
if (shift >= EINT_MAX_PER_REG) {
reg += 4;
@@ -601,14 +600,19 @@ static struct irq_chip s3c64xx_eint0_irq_chip = {
static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_data *irqd = irq_desc_get_irq_data(desc);
+ struct s3c64xx_eint0_domain_data *ddata =
+ irq_data_get_irq_chip_data(irqd);
+ struct samsung_pin_bank *bank = ddata->bank;
+
struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc);
- struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
+
unsigned int pend, mask;
chained_irq_enter(chip, desc);
- pend = readl(drvdata->virt_base + EINT0PEND_REG);
- mask = readl(drvdata->virt_base + EINT0MASK_REG);
+ pend = readl(bank->eint_base + EINT0PEND_REG);
+ mask = readl(bank->eint_base + EINT0MASK_REG);
pend = pend & range & ~mask;
pend &= range;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 620727fabe64..41e62391c33c 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,6 +33,9 @@
#include "../core.h"
#include "pinctrl-samsung.h"
+/* maximum number of the memory resources */
+#define SAMSUNG_PINCTRL_NUM_RESOURCES 2
+
/* list of all possible config options supported */
static struct pin_config {
const char *property;
@@ -345,7 +348,7 @@ static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata,
((b->pin_base + b->nr_pins - 1) < pin))
b++;
- *reg = drvdata->virt_base + b->pctl_offset;
+ *reg = b->pctl_base + b->pctl_offset;
*offset = pin - b->pin_base;
if (bank)
*bank = b;
@@ -526,7 +529,7 @@ static void samsung_gpio_set_value(struct gpio_chip *gc,
void __iomem *reg;
u32 data;
- reg = bank->drvdata->virt_base + bank->pctl_offset;
+ reg = bank->pctl_base + bank->pctl_offset;
data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
data &= ~(1 << offset);
@@ -554,7 +557,7 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
const struct samsung_pin_bank_type *type = bank->type;
- reg = bank->drvdata->virt_base + bank->pctl_offset;
+ reg = bank->pctl_base + bank->pctl_offset;
data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
data >>= offset;
@@ -581,8 +584,8 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
type = bank->type;
drvdata = bank->drvdata;
- reg = drvdata->virt_base + bank->pctl_offset +
- type->reg_offset[PINCFG_TYPE_FUNC];
+ reg = bank->pctl_base + bank->pctl_offset
+ + type->reg_offset[PINCFG_TYPE_FUNC];
mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
shift = offset * type->fld_width[PINCFG_TYPE_FUNC];
@@ -979,6 +982,8 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
const struct samsung_pin_bank_data *bdata;
const struct samsung_pin_ctrl *ctrl;
struct samsung_pin_bank *bank;
+ struct resource *res;
+ void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES];
int i;
id = of_alias_get_id(node, "pinctrl");
@@ -997,6 +1002,17 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
if (!d->pin_banks)
return ERR_PTR(-ENOMEM);
+ if (ctrl->nr_ext_resources + 1 > SAMSUNG_PINCTRL_NUM_RESOURCES)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ virt_base[i] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(virt_base[i]))
+ return ERR_PTR(-EIO);
+ }
+
bank = d->pin_banks;
bdata = ctrl->pin_banks;
for (i = 0; i < ctrl->nr_banks; ++i, ++bdata, ++bank) {
@@ -1013,6 +1029,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
bank->drvdata = d;
bank->pin_base = d->nr_pins;
d->nr_pins += bank->nr_pins;
+
+ bank->eint_base = virt_base[0];
+ bank->pctl_base = virt_base[bdata->pctl_res_idx];
}
for_each_child_of_node(node, np) {
@@ -1052,11 +1071,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
}
drvdata->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drvdata->virt_base))
- return PTR_ERR(drvdata->virt_base);
-
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res)
drvdata->irq = res->start;
@@ -1094,12 +1108,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
static void samsung_pinctrl_suspend_dev(
struct samsung_pinctrl_drv_data *drvdata)
{
- void __iomem *virt_base = drvdata->virt_base;
int i;
for (i = 0; i < drvdata->nr_banks; i++) {
struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
- void __iomem *reg = virt_base + bank->pctl_offset;
+ void __iomem *reg = bank->pctl_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
enum pincfg_type type;
@@ -1140,7 +1153,6 @@ static void samsung_pinctrl_suspend_dev(
*/
static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
{
- void __iomem *virt_base = drvdata->virt_base;
int i;
if (drvdata->resume)
@@ -1148,7 +1160,7 @@ static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
for (i = 0; i < drvdata->nr_banks; i++) {
struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
- void __iomem *reg = virt_base + bank->pctl_offset;
+ void __iomem *reg = bank->pctl_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
enum pincfg_type type;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index cd31bfaf62cb..043cb6c11180 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -116,6 +116,7 @@ struct samsung_pin_bank_type {
* struct samsung_pin_bank_data: represent a controller pin-bank (init data).
* @type: type of the bank (register offsets and bitfield widths)
* @pctl_offset: starting offset of the pin-bank registers.
+ * @pctl_res_idx: index of base address for pin-bank registers.
* @nr_pins: number of pins included in this bank.
* @eint_func: function to set in CON register to configure pin as EINT.
* @eint_type: type of the external interrupt supported by the bank.
@@ -126,6 +127,7 @@ struct samsung_pin_bank_type {
struct samsung_pin_bank_data {
const struct samsung_pin_bank_type *type;
u32 pctl_offset;
+ u8 pctl_res_idx;
u8 nr_pins;
u8 eint_func;
enum eint_type eint_type;
@@ -137,8 +139,10 @@ struct samsung_pin_bank_data {
/**
* struct samsung_pin_bank: represent a controller pin-bank.
* @type: type of the bank (register offsets and bitfield widths)
+ * @pctl_base: base address of the pin-bank registers
* @pctl_offset: starting offset of the pin-bank registers.
* @nr_pins: number of pins included in this bank.
+ * @eint_base: base address of the pin-bank EINT registers.
* @eint_func: function to set in CON register to configure pin as EINT.
* @eint_type: type of the external interrupt supported by the bank.
* @eint_mask: bit mask of pins which support EINT function.
@@ -157,8 +161,10 @@ struct samsung_pin_bank_data {
*/
struct samsung_pin_bank {
const struct samsung_pin_bank_type *type;
+ void __iomem *pctl_base;
u32 pctl_offset;
u8 nr_pins;
+ void __iomem *eint_base;
u8 eint_func;
enum eint_type eint_type;
u32 eint_mask;
@@ -182,6 +188,7 @@ struct samsung_pin_bank {
* struct samsung_pin_ctrl: represent a pin controller.
* @pin_banks: list of pin banks included in this controller.
* @nr_banks: number of pin banks.
+ * @nr_ext_resources: number of the extra base address for pin banks.
* @eint_gpio_init: platform specific callback to setup the external gpio
* interrupts for the controller.
* @eint_wkup_init: platform specific callback to setup the external wakeup
@@ -190,6 +197,7 @@ struct samsung_pin_bank {
struct samsung_pin_ctrl {
const struct samsung_pin_bank_data *pin_banks;
u32 nr_banks;
+ int nr_ext_resources;
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
@@ -200,7 +208,6 @@ struct samsung_pin_ctrl {
/**
* struct samsung_pinctrl_drv_data: wrapper for holding driver data together.
* @node: global list node
- * @virt_base: register base address of the controller.
* @dev: device instance representing the controller.
* @irq: interrpt number used by the controller to notify gpio interrupts.
* @ctrl: pin controller instance managed by the driver.
@@ -215,7 +222,6 @@ struct samsung_pin_ctrl {
*/
struct samsung_pinctrl_drv_data {
struct list_head node;
- void __iomem *virt_base;
struct device *dev;
int irq;
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index f3a8897d4e8f..cf80ce1dd7ce 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -389,6 +389,21 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return 0;
}
+const struct sh_pfc_bias_info *
+sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
+ unsigned int num, unsigned int pin)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (info[i].pin == pin)
+ return &info[i];
+
+ WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
+
+ return NULL;
+}
+
static int sh_pfc_init_ranges(struct sh_pfc *pfc)
{
struct sh_pfc_pin_range *range;
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 0bbdea5849f4..6d598dd63720 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -33,4 +33,8 @@ void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
+const struct sh_pfc_bias_info *
+sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
+ unsigned int num, unsigned int pin);
+
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 18ef7042b3d1..c3af9ebee4af 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "core.h"
#include "sh_pfc.h"
#define PORT_GP_PUP_1(bank, pin, fn, sfx) \
@@ -2918,183 +2919,182 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
#define PUPR4 0x110
#define PUPR5 0x114
-static const struct {
- u16 reg : 11;
- u16 bit : 5;
-} pullups[] = {
- [RCAR_GP_PIN(0, 6)] = { PUPR0, 0 }, /* A0 */
- [RCAR_GP_PIN(0, 7)] = { PUPR0, 1 }, /* A1 */
- [RCAR_GP_PIN(0, 8)] = { PUPR0, 2 }, /* A2 */
- [RCAR_GP_PIN(0, 9)] = { PUPR0, 3 }, /* A3 */
- [RCAR_GP_PIN(0, 10)] = { PUPR0, 4 }, /* A4 */
- [RCAR_GP_PIN(0, 11)] = { PUPR0, 5 }, /* A5 */
- [RCAR_GP_PIN(0, 12)] = { PUPR0, 6 }, /* A6 */
- [RCAR_GP_PIN(0, 13)] = { PUPR0, 7 }, /* A7 */
- [RCAR_GP_PIN(0, 14)] = { PUPR0, 8 }, /* A8 */
- [RCAR_GP_PIN(0, 15)] = { PUPR0, 9 }, /* A9 */
- [RCAR_GP_PIN(0, 16)] = { PUPR0, 10 }, /* A10 */
- [RCAR_GP_PIN(0, 17)] = { PUPR0, 11 }, /* A11 */
- [RCAR_GP_PIN(0, 18)] = { PUPR0, 12 }, /* A12 */
- [RCAR_GP_PIN(0, 19)] = { PUPR0, 13 }, /* A13 */
- [RCAR_GP_PIN(0, 20)] = { PUPR0, 14 }, /* A14 */
- [RCAR_GP_PIN(0, 21)] = { PUPR0, 15 }, /* A15 */
- [RCAR_GP_PIN(0, 22)] = { PUPR0, 16 }, /* A16 */
- [RCAR_GP_PIN(0, 23)] = { PUPR0, 17 }, /* A17 */
- [RCAR_GP_PIN(0, 24)] = { PUPR0, 18 }, /* A18 */
- [RCAR_GP_PIN(0, 25)] = { PUPR0, 19 }, /* A19 */
- [RCAR_GP_PIN(0, 26)] = { PUPR0, 20 }, /* A20 */
- [RCAR_GP_PIN(0, 27)] = { PUPR0, 21 }, /* A21 */
- [RCAR_GP_PIN(0, 28)] = { PUPR0, 22 }, /* A22 */
- [RCAR_GP_PIN(0, 29)] = { PUPR0, 23 }, /* A23 */
- [RCAR_GP_PIN(0, 30)] = { PUPR0, 24 }, /* A24 */
- [RCAR_GP_PIN(0, 31)] = { PUPR0, 25 }, /* A25 */
- [RCAR_GP_PIN(1, 3)] = { PUPR0, 26 }, /* /EX_CS0 */
- [RCAR_GP_PIN(1, 4)] = { PUPR0, 27 }, /* /EX_CS1 */
- [RCAR_GP_PIN(1, 5)] = { PUPR0, 28 }, /* /EX_CS2 */
- [RCAR_GP_PIN(1, 6)] = { PUPR0, 29 }, /* /EX_CS3 */
- [RCAR_GP_PIN(1, 7)] = { PUPR0, 30 }, /* /EX_CS4 */
- [RCAR_GP_PIN(1, 8)] = { PUPR0, 31 }, /* /EX_CS5 */
-
- [RCAR_GP_PIN(0, 0)] = { PUPR1, 0 }, /* /PRESETOUT */
- [RCAR_GP_PIN(0, 5)] = { PUPR1, 1 }, /* /BS */
- [RCAR_GP_PIN(1, 0)] = { PUPR1, 2 }, /* RD//WR */
- [RCAR_GP_PIN(1, 1)] = { PUPR1, 3 }, /* /WE0 */
- [RCAR_GP_PIN(1, 2)] = { PUPR1, 4 }, /* /WE1 */
- [RCAR_GP_PIN(1, 11)] = { PUPR1, 5 }, /* EX_WAIT0 */
- [RCAR_GP_PIN(1, 9)] = { PUPR1, 6 }, /* DREQ0 */
- [RCAR_GP_PIN(1, 10)] = { PUPR1, 7 }, /* DACK0 */
- [RCAR_GP_PIN(1, 12)] = { PUPR1, 8 }, /* IRQ0 */
- [RCAR_GP_PIN(1, 13)] = { PUPR1, 9 }, /* IRQ1 */
-
- [RCAR_GP_PIN(1, 22)] = { PUPR2, 0 }, /* DU0_DR0 */
- [RCAR_GP_PIN(1, 23)] = { PUPR2, 1 }, /* DU0_DR1 */
- [RCAR_GP_PIN(1, 24)] = { PUPR2, 2 }, /* DU0_DR2 */
- [RCAR_GP_PIN(1, 25)] = { PUPR2, 3 }, /* DU0_DR3 */
- [RCAR_GP_PIN(1, 26)] = { PUPR2, 4 }, /* DU0_DR4 */
- [RCAR_GP_PIN(1, 27)] = { PUPR2, 5 }, /* DU0_DR5 */
- [RCAR_GP_PIN(1, 28)] = { PUPR2, 6 }, /* DU0_DR6 */
- [RCAR_GP_PIN(1, 29)] = { PUPR2, 7 }, /* DU0_DR7 */
- [RCAR_GP_PIN(1, 30)] = { PUPR2, 8 }, /* DU0_DG0 */
- [RCAR_GP_PIN(1, 31)] = { PUPR2, 9 }, /* DU0_DG1 */
- [RCAR_GP_PIN(2, 0)] = { PUPR2, 10 }, /* DU0_DG2 */
- [RCAR_GP_PIN(2, 1)] = { PUPR2, 11 }, /* DU0_DG3 */
- [RCAR_GP_PIN(2, 2)] = { PUPR2, 12 }, /* DU0_DG4 */
- [RCAR_GP_PIN(2, 3)] = { PUPR2, 13 }, /* DU0_DG5 */
- [RCAR_GP_PIN(2, 4)] = { PUPR2, 14 }, /* DU0_DG6 */
- [RCAR_GP_PIN(2, 5)] = { PUPR2, 15 }, /* DU0_DG7 */
- [RCAR_GP_PIN(2, 6)] = { PUPR2, 16 }, /* DU0_DB0 */
- [RCAR_GP_PIN(2, 7)] = { PUPR2, 17 }, /* DU0_DB1 */
- [RCAR_GP_PIN(2, 8)] = { PUPR2, 18 }, /* DU0_DB2 */
- [RCAR_GP_PIN(2, 9)] = { PUPR2, 19 }, /* DU0_DB3 */
- [RCAR_GP_PIN(2, 10)] = { PUPR2, 20 }, /* DU0_DB4 */
- [RCAR_GP_PIN(2, 11)] = { PUPR2, 21 }, /* DU0_DB5 */
- [RCAR_GP_PIN(2, 12)] = { PUPR2, 22 }, /* DU0_DB6 */
- [RCAR_GP_PIN(2, 13)] = { PUPR2, 23 }, /* DU0_DB7 */
- [RCAR_GP_PIN(2, 14)] = { PUPR2, 24 }, /* DU0_DOTCLKIN */
- [RCAR_GP_PIN(2, 15)] = { PUPR2, 25 }, /* DU0_DOTCLKOUT0 */
- [RCAR_GP_PIN(2, 17)] = { PUPR2, 26 }, /* DU0_HSYNC */
- [RCAR_GP_PIN(2, 18)] = { PUPR2, 27 }, /* DU0_VSYNC */
- [RCAR_GP_PIN(2, 19)] = { PUPR2, 28 }, /* DU0_EXODDF */
- [RCAR_GP_PIN(2, 20)] = { PUPR2, 29 }, /* DU0_DISP */
- [RCAR_GP_PIN(2, 21)] = { PUPR2, 30 }, /* DU0_CDE */
- [RCAR_GP_PIN(2, 16)] = { PUPR2, 31 }, /* DU0_DOTCLKOUT1 */
-
- [RCAR_GP_PIN(3, 24)] = { PUPR3, 0 }, /* VI0_CLK */
- [RCAR_GP_PIN(3, 25)] = { PUPR3, 1 }, /* VI0_CLKENB */
- [RCAR_GP_PIN(3, 26)] = { PUPR3, 2 }, /* VI0_FIELD */
- [RCAR_GP_PIN(3, 27)] = { PUPR3, 3 }, /* /VI0_HSYNC */
- [RCAR_GP_PIN(3, 28)] = { PUPR3, 4 }, /* /VI0_VSYNC */
- [RCAR_GP_PIN(3, 29)] = { PUPR3, 5 }, /* VI0_DATA0 */
- [RCAR_GP_PIN(3, 30)] = { PUPR3, 6 }, /* VI0_DATA1 */
- [RCAR_GP_PIN(3, 31)] = { PUPR3, 7 }, /* VI0_DATA2 */
- [RCAR_GP_PIN(4, 0)] = { PUPR3, 8 }, /* VI0_DATA3 */
- [RCAR_GP_PIN(4, 1)] = { PUPR3, 9 }, /* VI0_DATA4 */
- [RCAR_GP_PIN(4, 2)] = { PUPR3, 10 }, /* VI0_DATA5 */
- [RCAR_GP_PIN(4, 3)] = { PUPR3, 11 }, /* VI0_DATA6 */
- [RCAR_GP_PIN(4, 4)] = { PUPR3, 12 }, /* VI0_DATA7 */
- [RCAR_GP_PIN(4, 5)] = { PUPR3, 13 }, /* VI0_G2 */
- [RCAR_GP_PIN(4, 6)] = { PUPR3, 14 }, /* VI0_G3 */
- [RCAR_GP_PIN(4, 7)] = { PUPR3, 15 }, /* VI0_G4 */
- [RCAR_GP_PIN(4, 8)] = { PUPR3, 16 }, /* VI0_G5 */
- [RCAR_GP_PIN(4, 21)] = { PUPR3, 17 }, /* VI1_DATA12 */
- [RCAR_GP_PIN(4, 22)] = { PUPR3, 18 }, /* VI1_DATA13 */
- [RCAR_GP_PIN(4, 23)] = { PUPR3, 19 }, /* VI1_DATA14 */
- [RCAR_GP_PIN(4, 24)] = { PUPR3, 20 }, /* VI1_DATA15 */
- [RCAR_GP_PIN(4, 9)] = { PUPR3, 21 }, /* ETH_REF_CLK */
- [RCAR_GP_PIN(4, 10)] = { PUPR3, 22 }, /* ETH_TXD0 */
- [RCAR_GP_PIN(4, 11)] = { PUPR3, 23 }, /* ETH_TXD1 */
- [RCAR_GP_PIN(4, 12)] = { PUPR3, 24 }, /* ETH_CRS_DV */
- [RCAR_GP_PIN(4, 13)] = { PUPR3, 25 }, /* ETH_TX_EN */
- [RCAR_GP_PIN(4, 14)] = { PUPR3, 26 }, /* ETH_RX_ER */
- [RCAR_GP_PIN(4, 15)] = { PUPR3, 27 }, /* ETH_RXD0 */
- [RCAR_GP_PIN(4, 16)] = { PUPR3, 28 }, /* ETH_RXD1 */
- [RCAR_GP_PIN(4, 17)] = { PUPR3, 29 }, /* ETH_MDC */
- [RCAR_GP_PIN(4, 18)] = { PUPR3, 30 }, /* ETH_MDIO */
- [RCAR_GP_PIN(4, 19)] = { PUPR3, 31 }, /* ETH_LINK */
-
- [RCAR_GP_PIN(3, 6)] = { PUPR4, 0 }, /* SSI_SCK012 */
- [RCAR_GP_PIN(3, 7)] = { PUPR4, 1 }, /* SSI_WS012 */
- [RCAR_GP_PIN(3, 10)] = { PUPR4, 2 }, /* SSI_SDATA0 */
- [RCAR_GP_PIN(3, 9)] = { PUPR4, 3 }, /* SSI_SDATA1 */
- [RCAR_GP_PIN(3, 8)] = { PUPR4, 4 }, /* SSI_SDATA2 */
- [RCAR_GP_PIN(3, 2)] = { PUPR4, 5 }, /* SSI_SCK34 */
- [RCAR_GP_PIN(3, 3)] = { PUPR4, 6 }, /* SSI_WS34 */
- [RCAR_GP_PIN(3, 5)] = { PUPR4, 7 }, /* SSI_SDATA3 */
- [RCAR_GP_PIN(3, 4)] = { PUPR4, 8 }, /* SSI_SDATA4 */
- [RCAR_GP_PIN(2, 31)] = { PUPR4, 9 }, /* SSI_SCK5 */
- [RCAR_GP_PIN(3, 0)] = { PUPR4, 10 }, /* SSI_WS5 */
- [RCAR_GP_PIN(3, 1)] = { PUPR4, 11 }, /* SSI_SDATA5 */
- [RCAR_GP_PIN(2, 28)] = { PUPR4, 12 }, /* SSI_SCK6 */
- [RCAR_GP_PIN(2, 29)] = { PUPR4, 13 }, /* SSI_WS6 */
- [RCAR_GP_PIN(2, 30)] = { PUPR4, 14 }, /* SSI_SDATA6 */
- [RCAR_GP_PIN(2, 24)] = { PUPR4, 15 }, /* SSI_SCK78 */
- [RCAR_GP_PIN(2, 25)] = { PUPR4, 16 }, /* SSI_WS78 */
- [RCAR_GP_PIN(2, 27)] = { PUPR4, 17 }, /* SSI_SDATA7 */
- [RCAR_GP_PIN(2, 26)] = { PUPR4, 18 }, /* SSI_SDATA8 */
- [RCAR_GP_PIN(3, 23)] = { PUPR4, 19 }, /* TCLK0 */
- [RCAR_GP_PIN(3, 11)] = { PUPR4, 20 }, /* SD0_CLK */
- [RCAR_GP_PIN(3, 12)] = { PUPR4, 21 }, /* SD0_CMD */
- [RCAR_GP_PIN(3, 13)] = { PUPR4, 22 }, /* SD0_DAT0 */
- [RCAR_GP_PIN(3, 14)] = { PUPR4, 23 }, /* SD0_DAT1 */
- [RCAR_GP_PIN(3, 15)] = { PUPR4, 24 }, /* SD0_DAT2 */
- [RCAR_GP_PIN(3, 16)] = { PUPR4, 25 }, /* SD0_DAT3 */
- [RCAR_GP_PIN(3, 17)] = { PUPR4, 26 }, /* SD0_CD */
- [RCAR_GP_PIN(3, 18)] = { PUPR4, 27 }, /* SD0_WP */
- [RCAR_GP_PIN(2, 22)] = { PUPR4, 28 }, /* AUDIO_CLKA */
- [RCAR_GP_PIN(2, 23)] = { PUPR4, 29 }, /* AUDIO_CLKB */
- [RCAR_GP_PIN(1, 14)] = { PUPR4, 30 }, /* IRQ2 */
- [RCAR_GP_PIN(1, 15)] = { PUPR4, 31 }, /* IRQ3 */
-
- [RCAR_GP_PIN(0, 1)] = { PUPR5, 0 }, /* PENC0 */
- [RCAR_GP_PIN(0, 2)] = { PUPR5, 1 }, /* PENC1 */
- [RCAR_GP_PIN(0, 3)] = { PUPR5, 2 }, /* USB_OVC0 */
- [RCAR_GP_PIN(0, 4)] = { PUPR5, 3 }, /* USB_OVC1 */
- [RCAR_GP_PIN(1, 16)] = { PUPR5, 4 }, /* SCIF_CLK */
- [RCAR_GP_PIN(1, 17)] = { PUPR5, 5 }, /* TX0 */
- [RCAR_GP_PIN(1, 18)] = { PUPR5, 6 }, /* RX0 */
- [RCAR_GP_PIN(1, 19)] = { PUPR5, 7 }, /* SCK0 */
- [RCAR_GP_PIN(1, 20)] = { PUPR5, 8 }, /* /CTS0 */
- [RCAR_GP_PIN(1, 21)] = { PUPR5, 9 }, /* /RTS0 */
- [RCAR_GP_PIN(3, 19)] = { PUPR5, 10 }, /* HSPI_CLK0 */
- [RCAR_GP_PIN(3, 20)] = { PUPR5, 11 }, /* /HSPI_CS0 */
- [RCAR_GP_PIN(3, 21)] = { PUPR5, 12 }, /* HSPI_RX0 */
- [RCAR_GP_PIN(3, 22)] = { PUPR5, 13 }, /* HSPI_TX0 */
- [RCAR_GP_PIN(4, 20)] = { PUPR5, 14 }, /* ETH_MAGIC */
- [RCAR_GP_PIN(4, 25)] = { PUPR5, 15 }, /* AVS1 */
- [RCAR_GP_PIN(4, 26)] = { PUPR5, 16 }, /* AVS2 */
+static const struct sh_pfc_bias_info bias_info[] = {
+ { RCAR_GP_PIN(0, 6), PUPR0, 0 }, /* A0 */
+ { RCAR_GP_PIN(0, 7), PUPR0, 1 }, /* A1 */
+ { RCAR_GP_PIN(0, 8), PUPR0, 2 }, /* A2 */
+ { RCAR_GP_PIN(0, 9), PUPR0, 3 }, /* A3 */
+ { RCAR_GP_PIN(0, 10), PUPR0, 4 }, /* A4 */
+ { RCAR_GP_PIN(0, 11), PUPR0, 5 }, /* A5 */
+ { RCAR_GP_PIN(0, 12), PUPR0, 6 }, /* A6 */
+ { RCAR_GP_PIN(0, 13), PUPR0, 7 }, /* A7 */
+ { RCAR_GP_PIN(0, 14), PUPR0, 8 }, /* A8 */
+ { RCAR_GP_PIN(0, 15), PUPR0, 9 }, /* A9 */
+ { RCAR_GP_PIN(0, 16), PUPR0, 10 }, /* A10 */
+ { RCAR_GP_PIN(0, 17), PUPR0, 11 }, /* A11 */
+ { RCAR_GP_PIN(0, 18), PUPR0, 12 }, /* A12 */
+ { RCAR_GP_PIN(0, 19), PUPR0, 13 }, /* A13 */
+ { RCAR_GP_PIN(0, 20), PUPR0, 14 }, /* A14 */
+ { RCAR_GP_PIN(0, 21), PUPR0, 15 }, /* A15 */
+ { RCAR_GP_PIN(0, 22), PUPR0, 16 }, /* A16 */
+ { RCAR_GP_PIN(0, 23), PUPR0, 17 }, /* A17 */
+ { RCAR_GP_PIN(0, 24), PUPR0, 18 }, /* A18 */
+ { RCAR_GP_PIN(0, 25), PUPR0, 19 }, /* A19 */
+ { RCAR_GP_PIN(0, 26), PUPR0, 20 }, /* A20 */
+ { RCAR_GP_PIN(0, 27), PUPR0, 21 }, /* A21 */
+ { RCAR_GP_PIN(0, 28), PUPR0, 22 }, /* A22 */
+ { RCAR_GP_PIN(0, 29), PUPR0, 23 }, /* A23 */
+ { RCAR_GP_PIN(0, 30), PUPR0, 24 }, /* A24 */
+ { RCAR_GP_PIN(0, 31), PUPR0, 25 }, /* A25 */
+ { RCAR_GP_PIN(1, 3), PUPR0, 26 }, /* /EX_CS0 */
+ { RCAR_GP_PIN(1, 4), PUPR0, 27 }, /* /EX_CS1 */
+ { RCAR_GP_PIN(1, 5), PUPR0, 28 }, /* /EX_CS2 */
+ { RCAR_GP_PIN(1, 6), PUPR0, 29 }, /* /EX_CS3 */
+ { RCAR_GP_PIN(1, 7), PUPR0, 30 }, /* /EX_CS4 */
+ { RCAR_GP_PIN(1, 8), PUPR0, 31 }, /* /EX_CS5 */
+
+ { RCAR_GP_PIN(0, 0), PUPR1, 0 }, /* /PRESETOUT */
+ { RCAR_GP_PIN(0, 5), PUPR1, 1 }, /* /BS */
+ { RCAR_GP_PIN(1, 0), PUPR1, 2 }, /* RD//WR */
+ { RCAR_GP_PIN(1, 1), PUPR1, 3 }, /* /WE0 */
+ { RCAR_GP_PIN(1, 2), PUPR1, 4 }, /* /WE1 */
+ { RCAR_GP_PIN(1, 11), PUPR1, 5 }, /* EX_WAIT0 */
+ { RCAR_GP_PIN(1, 9), PUPR1, 6 }, /* DREQ0 */
+ { RCAR_GP_PIN(1, 10), PUPR1, 7 }, /* DACK0 */
+ { RCAR_GP_PIN(1, 12), PUPR1, 8 }, /* IRQ0 */
+ { RCAR_GP_PIN(1, 13), PUPR1, 9 }, /* IRQ1 */
+
+ { RCAR_GP_PIN(1, 22), PUPR2, 0 }, /* DU0_DR0 */
+ { RCAR_GP_PIN(1, 23), PUPR2, 1 }, /* DU0_DR1 */
+ { RCAR_GP_PIN(1, 24), PUPR2, 2 }, /* DU0_DR2 */
+ { RCAR_GP_PIN(1, 25), PUPR2, 3 }, /* DU0_DR3 */
+ { RCAR_GP_PIN(1, 26), PUPR2, 4 }, /* DU0_DR4 */
+ { RCAR_GP_PIN(1, 27), PUPR2, 5 }, /* DU0_DR5 */
+ { RCAR_GP_PIN(1, 28), PUPR2, 6 }, /* DU0_DR6 */
+ { RCAR_GP_PIN(1, 29), PUPR2, 7 }, /* DU0_DR7 */
+ { RCAR_GP_PIN(1, 30), PUPR2, 8 }, /* DU0_DG0 */
+ { RCAR_GP_PIN(1, 31), PUPR2, 9 }, /* DU0_DG1 */
+ { RCAR_GP_PIN(2, 0), PUPR2, 10 }, /* DU0_DG2 */
+ { RCAR_GP_PIN(2, 1), PUPR2, 11 }, /* DU0_DG3 */
+ { RCAR_GP_PIN(2, 2), PUPR2, 12 }, /* DU0_DG4 */
+ { RCAR_GP_PIN(2, 3), PUPR2, 13 }, /* DU0_DG5 */
+ { RCAR_GP_PIN(2, 4), PUPR2, 14 }, /* DU0_DG6 */
+ { RCAR_GP_PIN(2, 5), PUPR2, 15 }, /* DU0_DG7 */
+ { RCAR_GP_PIN(2, 6), PUPR2, 16 }, /* DU0_DB0 */
+ { RCAR_GP_PIN(2, 7), PUPR2, 17 }, /* DU0_DB1 */
+ { RCAR_GP_PIN(2, 8), PUPR2, 18 }, /* DU0_DB2 */
+ { RCAR_GP_PIN(2, 9), PUPR2, 19 }, /* DU0_DB3 */
+ { RCAR_GP_PIN(2, 10), PUPR2, 20 }, /* DU0_DB4 */
+ { RCAR_GP_PIN(2, 11), PUPR2, 21 }, /* DU0_DB5 */
+ { RCAR_GP_PIN(2, 12), PUPR2, 22 }, /* DU0_DB6 */
+ { RCAR_GP_PIN(2, 13), PUPR2, 23 }, /* DU0_DB7 */
+ { RCAR_GP_PIN(2, 14), PUPR2, 24 }, /* DU0_DOTCLKIN */
+ { RCAR_GP_PIN(2, 15), PUPR2, 25 }, /* DU0_DOTCLKOUT0 */
+ { RCAR_GP_PIN(2, 17), PUPR2, 26 }, /* DU0_HSYNC */
+ { RCAR_GP_PIN(2, 18), PUPR2, 27 }, /* DU0_VSYNC */
+ { RCAR_GP_PIN(2, 19), PUPR2, 28 }, /* DU0_EXODDF */
+ { RCAR_GP_PIN(2, 20), PUPR2, 29 }, /* DU0_DISP */
+ { RCAR_GP_PIN(2, 21), PUPR2, 30 }, /* DU0_CDE */
+ { RCAR_GP_PIN(2, 16), PUPR2, 31 }, /* DU0_DOTCLKOUT1 */
+
+ { RCAR_GP_PIN(3, 24), PUPR3, 0 }, /* VI0_CLK */
+ { RCAR_GP_PIN(3, 25), PUPR3, 1 }, /* VI0_CLKENB */
+ { RCAR_GP_PIN(3, 26), PUPR3, 2 }, /* VI0_FIELD */
+ { RCAR_GP_PIN(3, 27), PUPR3, 3 }, /* /VI0_HSYNC */
+ { RCAR_GP_PIN(3, 28), PUPR3, 4 }, /* /VI0_VSYNC */
+ { RCAR_GP_PIN(3, 29), PUPR3, 5 }, /* VI0_DATA0 */
+ { RCAR_GP_PIN(3, 30), PUPR3, 6 }, /* VI0_DATA1 */
+ { RCAR_GP_PIN(3, 31), PUPR3, 7 }, /* VI0_DATA2 */
+ { RCAR_GP_PIN(4, 0), PUPR3, 8 }, /* VI0_DATA3 */
+ { RCAR_GP_PIN(4, 1), PUPR3, 9 }, /* VI0_DATA4 */
+ { RCAR_GP_PIN(4, 2), PUPR3, 10 }, /* VI0_DATA5 */
+ { RCAR_GP_PIN(4, 3), PUPR3, 11 }, /* VI0_DATA6 */
+ { RCAR_GP_PIN(4, 4), PUPR3, 12 }, /* VI0_DATA7 */
+ { RCAR_GP_PIN(4, 5), PUPR3, 13 }, /* VI0_G2 */
+ { RCAR_GP_PIN(4, 6), PUPR3, 14 }, /* VI0_G3 */
+ { RCAR_GP_PIN(4, 7), PUPR3, 15 }, /* VI0_G4 */
+ { RCAR_GP_PIN(4, 8), PUPR3, 16 }, /* VI0_G5 */
+ { RCAR_GP_PIN(4, 21), PUPR3, 17 }, /* VI1_DATA12 */
+ { RCAR_GP_PIN(4, 22), PUPR3, 18 }, /* VI1_DATA13 */
+ { RCAR_GP_PIN(4, 23), PUPR3, 19 }, /* VI1_DATA14 */
+ { RCAR_GP_PIN(4, 24), PUPR3, 20 }, /* VI1_DATA15 */
+ { RCAR_GP_PIN(4, 9), PUPR3, 21 }, /* ETH_REF_CLK */
+ { RCAR_GP_PIN(4, 10), PUPR3, 22 }, /* ETH_TXD0 */
+ { RCAR_GP_PIN(4, 11), PUPR3, 23 }, /* ETH_TXD1 */
+ { RCAR_GP_PIN(4, 12), PUPR3, 24 }, /* ETH_CRS_DV */
+ { RCAR_GP_PIN(4, 13), PUPR3, 25 }, /* ETH_TX_EN */
+ { RCAR_GP_PIN(4, 14), PUPR3, 26 }, /* ETH_RX_ER */
+ { RCAR_GP_PIN(4, 15), PUPR3, 27 }, /* ETH_RXD0 */
+ { RCAR_GP_PIN(4, 16), PUPR3, 28 }, /* ETH_RXD1 */
+ { RCAR_GP_PIN(4, 17), PUPR3, 29 }, /* ETH_MDC */
+ { RCAR_GP_PIN(4, 18), PUPR3, 30 }, /* ETH_MDIO */
+ { RCAR_GP_PIN(4, 19), PUPR3, 31 }, /* ETH_LINK */
+
+ { RCAR_GP_PIN(3, 6), PUPR4, 0 }, /* SSI_SCK012 */
+ { RCAR_GP_PIN(3, 7), PUPR4, 1 }, /* SSI_WS012 */
+ { RCAR_GP_PIN(3, 10), PUPR4, 2 }, /* SSI_SDATA0 */
+ { RCAR_GP_PIN(3, 9), PUPR4, 3 }, /* SSI_SDATA1 */
+ { RCAR_GP_PIN(3, 8), PUPR4, 4 }, /* SSI_SDATA2 */
+ { RCAR_GP_PIN(3, 2), PUPR4, 5 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(3, 3), PUPR4, 6 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(3, 5), PUPR4, 7 }, /* SSI_SDATA3 */
+ { RCAR_GP_PIN(3, 4), PUPR4, 8 }, /* SSI_SDATA4 */
+ { RCAR_GP_PIN(2, 31), PUPR4, 9 }, /* SSI_SCK5 */
+ { RCAR_GP_PIN(3, 0), PUPR4, 10 }, /* SSI_WS5 */
+ { RCAR_GP_PIN(3, 1), PUPR4, 11 }, /* SSI_SDATA5 */
+ { RCAR_GP_PIN(2, 28), PUPR4, 12 }, /* SSI_SCK6 */
+ { RCAR_GP_PIN(2, 29), PUPR4, 13 }, /* SSI_WS6 */
+ { RCAR_GP_PIN(2, 30), PUPR4, 14 }, /* SSI_SDATA6 */
+ { RCAR_GP_PIN(2, 24), PUPR4, 15 }, /* SSI_SCK78 */
+ { RCAR_GP_PIN(2, 25), PUPR4, 16 }, /* SSI_WS78 */
+ { RCAR_GP_PIN(2, 27), PUPR4, 17 }, /* SSI_SDATA7 */
+ { RCAR_GP_PIN(2, 26), PUPR4, 18 }, /* SSI_SDATA8 */
+ { RCAR_GP_PIN(3, 23), PUPR4, 19 }, /* TCLK0 */
+ { RCAR_GP_PIN(3, 11), PUPR4, 20 }, /* SD0_CLK */
+ { RCAR_GP_PIN(3, 12), PUPR4, 21 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 13), PUPR4, 22 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 14), PUPR4, 23 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 15), PUPR4, 24 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 16), PUPR4, 25 }, /* SD0_DAT3 */
+ { RCAR_GP_PIN(3, 17), PUPR4, 26 }, /* SD0_CD */
+ { RCAR_GP_PIN(3, 18), PUPR4, 27 }, /* SD0_WP */
+ { RCAR_GP_PIN(2, 22), PUPR4, 28 }, /* AUDIO_CLKA */
+ { RCAR_GP_PIN(2, 23), PUPR4, 29 }, /* AUDIO_CLKB */
+ { RCAR_GP_PIN(1, 14), PUPR4, 30 }, /* IRQ2 */
+ { RCAR_GP_PIN(1, 15), PUPR4, 31 }, /* IRQ3 */
+
+ { RCAR_GP_PIN(0, 1), PUPR5, 0 }, /* PENC0 */
+ { RCAR_GP_PIN(0, 2), PUPR5, 1 }, /* PENC1 */
+ { RCAR_GP_PIN(0, 3), PUPR5, 2 }, /* USB_OVC0 */
+ { RCAR_GP_PIN(0, 4), PUPR5, 3 }, /* USB_OVC1 */
+ { RCAR_GP_PIN(1, 16), PUPR5, 4 }, /* SCIF_CLK */
+ { RCAR_GP_PIN(1, 17), PUPR5, 5 }, /* TX0 */
+ { RCAR_GP_PIN(1, 18), PUPR5, 6 }, /* RX0 */
+ { RCAR_GP_PIN(1, 19), PUPR5, 7 }, /* SCK0 */
+ { RCAR_GP_PIN(1, 20), PUPR5, 8 }, /* /CTS0 */
+ { RCAR_GP_PIN(1, 21), PUPR5, 9 }, /* /RTS0 */
+ { RCAR_GP_PIN(3, 19), PUPR5, 10 }, /* HSPI_CLK0 */
+ { RCAR_GP_PIN(3, 20), PUPR5, 11 }, /* /HSPI_CS0 */
+ { RCAR_GP_PIN(3, 21), PUPR5, 12 }, /* HSPI_RX0 */
+ { RCAR_GP_PIN(3, 22), PUPR5, 13 }, /* HSPI_TX0 */
+ { RCAR_GP_PIN(4, 20), PUPR5, 14 }, /* ETH_MAGIC */
+ { RCAR_GP_PIN(4, 25), PUPR5, 15 }, /* AVS1 */
+ { RCAR_GP_PIN(4, 26), PUPR5, 16 }, /* AVS2 */
};
static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
+ const struct sh_pfc_bias_info *info;
void __iomem *addr;
- if (WARN_ON_ONCE(!pullups[pin].reg))
+ info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
+ if (!info)
return PIN_CONFIG_BIAS_DISABLE;
- addr = pfc->windows->virt + pullups[pin].reg;
+ addr = pfc->windows->virt + info->reg;
- if (ioread32(addr) & BIT(pullups[pin].bit))
+ if (ioread32(addr) & BIT(info->bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_DISABLE;
@@ -3103,15 +3103,17 @@ static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
+ const struct sh_pfc_bias_info *info;
void __iomem *addr;
u32 value;
u32 bit;
- if (WARN_ON_ONCE(!pullups[pin].reg))
+ info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
+ if (!info)
return;
- addr = pfc->windows->virt + pullups[pin].reg;
- bit = BIT(pullups[pin].bit);
+ addr = pfc->windows->virt + info->reg;
+ bit = BIT(info->bit);
value = ioread32(addr) & ~bit;
if (bias == PIN_CONFIG_BIAS_PULL_UP)
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 2e8cc2adbed7..135ed5cbeb44 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -523,6 +523,22 @@ MOD_SEL0_2_1 MOD_SEL1_2 \
MOD_SEL1_1 \
MOD_SEL1_0 MOD_SEL2_0
+/*
+ * These pins are not able to be muxed but have other properties
+ * that can be set, such as drive-strength or pull-up/pull-down enable.
+ */
+#define PINMUX_STATIC \
+ FM(QSPI0_SPCLK) FM(QSPI0_SSL) FM(QSPI0_MOSI_IO0) FM(QSPI0_MISO_IO1) \
+ FM(QSPI0_IO2) FM(QSPI0_IO3) \
+ FM(QSPI1_SPCLK) FM(QSPI1_SSL) FM(QSPI1_MOSI_IO0) FM(QSPI1_MISO_IO1) \
+ FM(QSPI1_IO2) FM(QSPI1_IO3) \
+ FM(RPC_INT) FM(RPC_WP) FM(RPC_RESET) \
+ FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) FM(AVB_TD3) \
+ FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \
+ FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
+ FM(CLKOUT) FM(PRESETOUT) \
+ FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) FM(DU_DOTCLKIN3) \
+ FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF)
enum {
PINMUX_RESERVED = 0,
@@ -548,6 +564,7 @@ enum {
PINMUX_GPSR
PINMUX_IPSR
PINMUX_MOD_SELS
+ PINMUX_STATIC
PINMUX_MARK_END,
#undef F_
#undef FM
@@ -1412,10 +1429,78 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP17_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
PINMUX_IPSR_MSEL(IP17_7_4, RIF2_D1_B, SEL_DRIF2_1),
PINMUX_IPSR_GPSR(IP17_7_4, TPU0TO3),
+
+/*
+ * Static pins can not be muxed between different functions but
+ * still needs a mark entry in the pinmux list. Add each static
+ * pin to the list without an associated function. The sh-pfc
+ * core will do the right thing and skip trying to mux then pin
+ * while still applying configuration to it
+ */
+#define FM(x) PINMUX_DATA(x##_MARK, 0),
+ PINMUX_STATIC
+#undef FM
};
+/*
+ * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs.
+ * Physical layout rows: A - AW, cols: 1 - 39.
+ */
+#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
+#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
+#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
+
+ /*
+ * Pins not associated with a GPIO port.
+ *
+ * The pin positions are different between different r8a7795
+ * packages, all that is needed for the pfc driver is a unique
+ * number for each pin. To this end use the pin layout from
+ * R-Car H3SiP to calculate a unique number for each pin.
+ */
+ SH_PFC_PIN_NAMED_CFG('A', 8, AVB_TX_CTL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 9, AVB_MDIO, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 12, AVB_TXCREFCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 13, AVB_RD0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 14, AVB_RD2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 16, AVB_RX_CTL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 17, AVB_TD2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 18, AVB_TD0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 19, AVB_TXC, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('B', 13, AVB_RD1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('B', 14, AVB_RD3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('B', 17, AVB_TD3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('F', 1, CLKOUT, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('V', 6, RPC_WP#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('V', 7, RPC_RESET#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('W', 3, QSPI0_SPCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('Y', 3, QSPI0_SSL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('Y', 6, QSPI0_IO2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('Y', 7, RPC_INT#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 4, QSPI0_MISO_IO1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 6, QSPI0_IO3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 3, QSPI1_IO3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 5, QSPI0_MOSI_IO0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 7, QSPI1_MOSI_IO0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 38, FSCLKST#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 4, QSPI1_IO2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 5, QSPI1_MISO_IO1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 7, DU_DOTCLKIN0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 8, DU_DOTCLKIN1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 7, DU_DOTCLKIN2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 30, TMS, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 28, TDO, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
};
/* - AUDIO CLOCK ------------------------------------------------------------ */
@@ -1563,11 +1648,33 @@ static const unsigned int avb_phy_int_mux[] = {
AVB_PHY_INT_MARK,
};
static const unsigned int avb_mdc_pins[] = {
- /* AVB_MDC */
- RCAR_GP_PIN(2, 9),
+ /* AVB_MDC, AVB_MDIO */
+ RCAR_GP_PIN(2, 9), PIN_NUMBER('A', 9),
};
static const unsigned int avb_mdc_mux[] = {
- AVB_MDC_MARK,
+ AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+ /*
+ * AVB_TX_CTL, AVB_TXC, AVB_TD0,
+ * AVB_TD1, AVB_TD2, AVB_TD3,
+ * AVB_RX_CTL, AVB_RXC, AVB_RD0,
+ * AVB_RD1, AVB_RD2, AVB_RD3,
+ * AVB_TXCREFCLK
+ */
+ PIN_NUMBER('A', 8), PIN_NUMBER('A', 19), PIN_NUMBER('A', 18),
+ PIN_NUMBER('B', 18), PIN_NUMBER('A', 17), PIN_NUMBER('B', 17),
+ PIN_NUMBER('A', 16), PIN_NUMBER('B', 19), PIN_NUMBER('A', 13),
+ PIN_NUMBER('B', 13), PIN_NUMBER('A', 14), PIN_NUMBER('B', 14),
+ PIN_NUMBER('A', 12),
+
+};
+static const unsigned int avb_mii_mux[] = {
+ AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
+ AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK,
+ AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
+ AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
+ AVB_TXCREFCLK_MARK,
};
static const unsigned int avb_avtp_pps_pins[] = {
/* AVB_AVTP_PPS */
@@ -3613,6 +3720,55 @@ static const unsigned int usb2_mux[] = {
USB2_PWEN_MARK, USB2_OVC_MARK,
};
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* QSPI0_SPCLK, QSPI0_SSL */
+ PIN_NUMBER('W', 3), PIN_NUMBER('Y', 3),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */
+ PIN_A_NUMBER('C', 5), PIN_A_NUMBER('B', 4),
+};
+static const unsigned int qspi0_data2_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+ /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1, QSPI0_IO2, QSPI0_IO3 */
+ PIN_A_NUMBER('C', 5), PIN_A_NUMBER('B', 4),
+ PIN_NUMBER('Y', 6), PIN_A_NUMBER('B', 6),
+};
+static const unsigned int qspi0_data4_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+};
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* QSPI1_SPCLK, QSPI1_SSL */
+ PIN_NUMBER('V', 3), PIN_NUMBER('V', 5),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data2_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */
+ PIN_A_NUMBER('C', 7), PIN_A_NUMBER('E', 5),
+};
+static const unsigned int qspi1_data2_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+};
+static const unsigned int qspi1_data4_pins[] = {
+ /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1, QSPI1_IO2, QSPI1_IO3 */
+ PIN_A_NUMBER('C', 7), PIN_A_NUMBER('E', 5),
+ PIN_A_NUMBER('E', 4), PIN_A_NUMBER('C', 3),
+};
+static const unsigned int qspi1_data4_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
SH_PFC_PIN_GROUP(audio_clk_a_b),
@@ -3635,6 +3791,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_magic),
SH_PFC_PIN_GROUP(avb_phy_int),
SH_PFC_PIN_GROUP(avb_mdc),
+ SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_avtp_pps),
SH_PFC_PIN_GROUP(avb_avtp_match_a),
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
@@ -3912,6 +4069,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_data2),
+ SH_PFC_PIN_GROUP(qspi0_data4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ SH_PFC_PIN_GROUP(qspi1_data2),
+ SH_PFC_PIN_GROUP(qspi1_data4),
};
static const char * const audio_clk_groups[] = {
@@ -3939,6 +4102,7 @@ static const char * const avb_groups[] = {
"avb_magic",
"avb_phy_int",
"avb_mdc",
+ "avb_mii",
"avb_avtp_pps",
"avb_avtp_match_a",
"avb_avtp_capture_a",
@@ -4356,6 +4520,18 @@ static const char * const usb2_groups[] = {
"usb2",
};
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
@@ -4405,6 +4581,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4962,10 +5140,45 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
};
static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRVCTRL0", 0xe6060300) {
+ { PIN_NUMBER('W', 3), 28, 2 }, /* QSPI0_SPCLK */
+ { PIN_A_NUMBER('C', 5), 24, 2 }, /* QSPI0_MOSI_IO0 */
+ { PIN_A_NUMBER('B', 4), 20, 2 }, /* QSPI0_MISO_IO1 */
+ { PIN_NUMBER('Y', 6), 16, 2 }, /* QSPI0_IO2 */
+ { PIN_A_NUMBER('B', 6), 12, 2 }, /* QSPI0_IO3 */
+ { PIN_NUMBER('Y', 3), 8, 2 }, /* QSPI0_SSL */
+ { PIN_NUMBER('V', 3), 4, 2 }, /* QSPI1_SPCLK */
+ { PIN_A_NUMBER('C', 7), 0, 2 }, /* QSPI1_MOSI_IO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL1", 0xe6060304) {
+ { PIN_A_NUMBER('E', 5), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { PIN_A_NUMBER('E', 4), 24, 2 }, /* QSPI1_IO2 */
+ { PIN_A_NUMBER('C', 3), 20, 2 }, /* QSPI1_IO3 */
+ { PIN_NUMBER('V', 5), 16, 2 }, /* QSPI1_SSL */
+ { PIN_NUMBER('Y', 7), 12, 2 }, /* RPC_INT# */
+ { PIN_NUMBER('V', 6), 8, 2 }, /* RPC_WP# */
+ { PIN_NUMBER('V', 7), 4, 2 }, /* RPC_RESET# */
+ { PIN_NUMBER('A', 16), 0, 3 }, /* AVB_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL2", 0xe6060308) {
+ { PIN_NUMBER('B', 19), 28, 3 }, /* AVB_RXC */
+ { PIN_NUMBER('A', 13), 24, 3 }, /* AVB_RD0 */
+ { PIN_NUMBER('B', 13), 20, 3 }, /* AVB_RD1 */
+ { PIN_NUMBER('A', 14), 16, 3 }, /* AVB_RD2 */
+ { PIN_NUMBER('B', 14), 12, 3 }, /* AVB_RD3 */
+ { PIN_NUMBER('A', 8), 8, 3 }, /* AVB_TX_CTL */
+ { PIN_NUMBER('A', 19), 4, 3 }, /* AVB_TXC */
+ { PIN_NUMBER('A', 18), 0, 3 }, /* AVB_TD0 */
+ } },
{ PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
- { RCAR_GP_PIN(2, 9), 8, 3 }, /* AVB_MDC */
- { RCAR_GP_PIN(2, 10), 4, 3 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 11), 0, 3 }, /* AVB_PHY_INT */
+ { PIN_NUMBER('B', 18), 28, 3 }, /* AVB_TD1 */
+ { PIN_NUMBER('A', 17), 24, 3 }, /* AVB_TD2 */
+ { PIN_NUMBER('B', 17), 20, 3 }, /* AVB_TD3 */
+ { PIN_NUMBER('A', 12), 16, 3 }, /* AVB_TXCREFCLK */
+ { PIN_NUMBER('A', 9), 12, 3 }, /* AVB_MDIO */
+ { RCAR_GP_PIN(2, 9), 8, 3 }, /* AVB_MDC */
+ { RCAR_GP_PIN(2, 10), 4, 3 }, /* AVB_MAGIC */
+ { RCAR_GP_PIN(2, 11), 0, 3 }, /* AVB_PHY_INT */
} },
{ PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
{ RCAR_GP_PIN(2, 12), 28, 3 }, /* AVB_LINK */
@@ -5008,6 +5221,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(1, 19), 0, 3 }, /* A19 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
+ { PIN_NUMBER('F', 1), 28, 3 }, /* CLKOUT */
{ RCAR_GP_PIN(1, 20), 24, 3 }, /* CS0 */
{ RCAR_GP_PIN(1, 21), 20, 3 }, /* CS1_A26 */
{ RCAR_GP_PIN(1, 22), 16, 3 }, /* BS */
@@ -5018,6 +5232,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
} },
{ PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
{ RCAR_GP_PIN(1, 27), 28, 3 }, /* EX_WAIT0 */
+ { PIN_NUMBER('C', 1), 24, 3 }, /* PRESETOUT# */
{ RCAR_GP_PIN(0, 0), 20, 3 }, /* D0 */
{ RCAR_GP_PIN(0, 1), 16, 3 }, /* D1 */
{ RCAR_GP_PIN(0, 2), 12, 3 }, /* D2 */
@@ -5036,20 +5251,30 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(0, 13), 0, 3 }, /* D13 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
- { RCAR_GP_PIN(0, 14), 28, 3 }, /* D14 */
- { RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
- { RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
- { RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
- { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 3), 8, 3 }, /* HDMI1_CEC */
+ { RCAR_GP_PIN(0, 14), 28, 3 }, /* D14 */
+ { RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
+ { RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
+ { RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
+ { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 3), 8, 3 }, /* HDMI1_CEC */
+ { PIN_A_NUMBER('P', 7), 4, 2 }, /* DU_DOTCLKIN0 */
+ { PIN_A_NUMBER('P', 8), 0, 2 }, /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
+ { PIN_A_NUMBER('R', 7), 28, 2 }, /* DU_DOTCLKIN2 */
+ { PIN_A_NUMBER('R', 8), 24, 2 }, /* DU_DOTCLKIN3 */
+ { PIN_A_NUMBER('D', 38), 20, 2 }, /* FSCLKST# */
+ { PIN_A_NUMBER('R', 30), 4, 2 }, /* TMS */
} },
{ PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
- { RCAR_GP_PIN(3, 0), 20, 3 }, /* SD0_CLK */
- { RCAR_GP_PIN(3, 1), 16, 3 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 2), 12, 3 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 3), 8, 3 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 4), 4, 3 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 5), 0, 3 }, /* SD0_DAT3 */
+ { PIN_A_NUMBER('T', 28), 28, 2 }, /* TDO */
+ { PIN_A_NUMBER('T', 30), 24, 2 }, /* ASEBRK */
+ { RCAR_GP_PIN(3, 0), 20, 3 }, /* SD0_CLK */
+ { RCAR_GP_PIN(3, 1), 16, 3 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 2), 12, 3 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 3), 8, 3 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 4), 4, 3 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 5), 0, 3 }, /* SD0_DAT3 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
{ RCAR_GP_PIN(3, 6), 28, 3 }, /* SD1_CLK */
@@ -5118,6 +5343,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(5, 23), 16, 3 }, /* MLB_CLK */
{ RCAR_GP_PIN(5, 24), 12, 3 }, /* MLB_SIG */
{ RCAR_GP_PIN(5, 25), 8, 3 }, /* MLB_DAT */
+ { PIN_NUMBER('H', 37), 4, 3 }, /* MLB_REF */
{ RCAR_GP_PIN(6, 0), 0, 3 }, /* SSI_SCK01239 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
@@ -5188,206 +5414,206 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
#define PU5 0x14
#define PU6 0x18
-static const struct {
- u16 reg : 11;
- u16 bit : 5;
-} pullups[] = {
- [RCAR_GP_PIN(2, 11)] = { PU0, 31 }, /* AVB_PHY_INT */
- [RCAR_GP_PIN(2, 10)] = { PU0, 30 }, /* AVB_MAGIC */
- [RCAR_GP_PIN(2, 9)] = { PU0, 29 }, /* AVB_MDC */
-
- [RCAR_GP_PIN(1, 19)] = { PU1, 31 }, /* A19 */
- [RCAR_GP_PIN(1, 18)] = { PU1, 30 }, /* A18 */
- [RCAR_GP_PIN(1, 17)] = { PU1, 29 }, /* A17 */
- [RCAR_GP_PIN(1, 16)] = { PU1, 28 }, /* A16 */
- [RCAR_GP_PIN(1, 15)] = { PU1, 27 }, /* A15 */
- [RCAR_GP_PIN(1, 14)] = { PU1, 26 }, /* A14 */
- [RCAR_GP_PIN(1, 13)] = { PU1, 25 }, /* A13 */
- [RCAR_GP_PIN(1, 12)] = { PU1, 24 }, /* A12 */
- [RCAR_GP_PIN(1, 11)] = { PU1, 23 }, /* A11 */
- [RCAR_GP_PIN(1, 10)] = { PU1, 22 }, /* A10 */
- [RCAR_GP_PIN(1, 9)] = { PU1, 21 }, /* A9 */
- [RCAR_GP_PIN(1, 8)] = { PU1, 20 }, /* A8 */
- [RCAR_GP_PIN(1, 7)] = { PU1, 19 }, /* A7 */
- [RCAR_GP_PIN(1, 6)] = { PU1, 18 }, /* A6 */
- [RCAR_GP_PIN(1, 5)] = { PU1, 17 }, /* A5 */
- [RCAR_GP_PIN(1, 4)] = { PU1, 16 }, /* A4 */
- [RCAR_GP_PIN(1, 3)] = { PU1, 15 }, /* A3 */
- [RCAR_GP_PIN(1, 2)] = { PU1, 14 }, /* A2 */
- [RCAR_GP_PIN(1, 1)] = { PU1, 13 }, /* A1 */
- [RCAR_GP_PIN(1, 0)] = { PU1, 12 }, /* A0 */
- [RCAR_GP_PIN(2, 8)] = { PU1, 11 }, /* PWM2_A */
- [RCAR_GP_PIN(2, 7)] = { PU1, 10 }, /* PWM1_A */
- [RCAR_GP_PIN(2, 6)] = { PU1, 9 }, /* PWM0 */
- [RCAR_GP_PIN(2, 5)] = { PU1, 8 }, /* IRQ5 */
- [RCAR_GP_PIN(2, 4)] = { PU1, 7 }, /* IRQ4 */
- [RCAR_GP_PIN(2, 3)] = { PU1, 6 }, /* IRQ3 */
- [RCAR_GP_PIN(2, 2)] = { PU1, 5 }, /* IRQ2 */
- [RCAR_GP_PIN(2, 1)] = { PU1, 4 }, /* IRQ1 */
- [RCAR_GP_PIN(2, 0)] = { PU1, 3 }, /* IRQ0 */
- [RCAR_GP_PIN(2, 14)] = { PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- [RCAR_GP_PIN(2, 13)] = { PU1, 1 }, /* AVB_AVTP_MATCH_A */
- [RCAR_GP_PIN(2, 12)] = { PU1, 0 }, /* AVB_LINK */
-
- [RCAR_GP_PIN(7, 3)] = { PU2, 29 }, /* HDMI1_CEC */
- [RCAR_GP_PIN(7, 2)] = { PU2, 28 }, /* HDMI0_CEC */
- [RCAR_GP_PIN(7, 1)] = { PU2, 27 }, /* AVS2 */
- [RCAR_GP_PIN(7, 0)] = { PU2, 26 }, /* AVS1 */
- [RCAR_GP_PIN(0, 15)] = { PU2, 25 }, /* D15 */
- [RCAR_GP_PIN(0, 14)] = { PU2, 24 }, /* D14 */
- [RCAR_GP_PIN(0, 13)] = { PU2, 23 }, /* D13 */
- [RCAR_GP_PIN(0, 12)] = { PU2, 22 }, /* D12 */
- [RCAR_GP_PIN(0, 11)] = { PU2, 21 }, /* D11 */
- [RCAR_GP_PIN(0, 10)] = { PU2, 20 }, /* D10 */
- [RCAR_GP_PIN(0, 9)] = { PU2, 19 }, /* D9 */
- [RCAR_GP_PIN(0, 8)] = { PU2, 18 }, /* D8 */
- [RCAR_GP_PIN(0, 7)] = { PU2, 17 }, /* D7 */
- [RCAR_GP_PIN(0, 6)] = { PU2, 16 }, /* D6 */
- [RCAR_GP_PIN(0, 5)] = { PU2, 15 }, /* D5 */
- [RCAR_GP_PIN(0, 4)] = { PU2, 14 }, /* D4 */
- [RCAR_GP_PIN(0, 3)] = { PU2, 13 }, /* D3 */
- [RCAR_GP_PIN(0, 2)] = { PU2, 12 }, /* D2 */
- [RCAR_GP_PIN(0, 1)] = { PU2, 11 }, /* D1 */
- [RCAR_GP_PIN(0, 0)] = { PU2, 10 }, /* D0 */
- [RCAR_GP_PIN(1, 27)] = { PU2, 8 }, /* EX_WAIT0_A */
- [RCAR_GP_PIN(1, 26)] = { PU2, 7 }, /* WE1_N */
- [RCAR_GP_PIN(1, 25)] = { PU2, 6 }, /* WE0_N */
- [RCAR_GP_PIN(1, 24)] = { PU2, 5 }, /* RD_WR_N */
- [RCAR_GP_PIN(1, 23)] = { PU2, 4 }, /* RD_N */
- [RCAR_GP_PIN(1, 22)] = { PU2, 3 }, /* BS_N */
- [RCAR_GP_PIN(1, 21)] = { PU2, 2 }, /* CS1_N_A26 */
- [RCAR_GP_PIN(1, 20)] = { PU2, 1 }, /* CS0_N */
-
- [RCAR_GP_PIN(4, 9)] = { PU3, 31 }, /* SD3_DAT0 */
- [RCAR_GP_PIN(4, 8)] = { PU3, 30 }, /* SD3_CMD */
- [RCAR_GP_PIN(4, 7)] = { PU3, 29 }, /* SD3_CLK */
- [RCAR_GP_PIN(4, 6)] = { PU3, 28 }, /* SD2_DS */
- [RCAR_GP_PIN(4, 5)] = { PU3, 27 }, /* SD2_DAT3 */
- [RCAR_GP_PIN(4, 4)] = { PU3, 26 }, /* SD2_DAT2 */
- [RCAR_GP_PIN(4, 3)] = { PU3, 25 }, /* SD2_DAT1 */
- [RCAR_GP_PIN(4, 2)] = { PU3, 24 }, /* SD2_DAT0 */
- [RCAR_GP_PIN(4, 1)] = { PU3, 23 }, /* SD2_CMD */
- [RCAR_GP_PIN(4, 0)] = { PU3, 22 }, /* SD2_CLK */
- [RCAR_GP_PIN(3, 11)] = { PU3, 21 }, /* SD1_DAT3 */
- [RCAR_GP_PIN(3, 10)] = { PU3, 20 }, /* SD1_DAT2 */
- [RCAR_GP_PIN(3, 9)] = { PU3, 19 }, /* SD1_DAT1 */
- [RCAR_GP_PIN(3, 8)] = { PU3, 18 }, /* SD1_DAT0 */
- [RCAR_GP_PIN(3, 7)] = { PU3, 17 }, /* SD1_CMD */
- [RCAR_GP_PIN(3, 6)] = { PU3, 16 }, /* SD1_CLK */
- [RCAR_GP_PIN(3, 5)] = { PU3, 15 }, /* SD0_DAT3 */
- [RCAR_GP_PIN(3, 4)] = { PU3, 14 }, /* SD0_DAT2 */
- [RCAR_GP_PIN(3, 3)] = { PU3, 13 }, /* SD0_DAT1 */
- [RCAR_GP_PIN(3, 2)] = { PU3, 12 }, /* SD0_DAT0 */
- [RCAR_GP_PIN(3, 1)] = { PU3, 11 }, /* SD0_CMD */
- [RCAR_GP_PIN(3, 0)] = { PU3, 10 }, /* SD0_CLK */
-
- [RCAR_GP_PIN(5, 19)] = { PU4, 31 }, /* MSIOF0_SS1 */
- [RCAR_GP_PIN(5, 18)] = { PU4, 30 }, /* MSIOF0_SYNC */
- [RCAR_GP_PIN(5, 17)] = { PU4, 29 }, /* MSIOF0_SCK */
- [RCAR_GP_PIN(5, 16)] = { PU4, 28 }, /* HRTS0_N */
- [RCAR_GP_PIN(5, 15)] = { PU4, 27 }, /* HCTS0_N */
- [RCAR_GP_PIN(5, 14)] = { PU4, 26 }, /* HTX0 */
- [RCAR_GP_PIN(5, 13)] = { PU4, 25 }, /* HRX0 */
- [RCAR_GP_PIN(5, 12)] = { PU4, 24 }, /* HSCK0 */
- [RCAR_GP_PIN(5, 11)] = { PU4, 23 }, /* RX2_A */
- [RCAR_GP_PIN(5, 10)] = { PU4, 22 }, /* TX2_A */
- [RCAR_GP_PIN(5, 9)] = { PU4, 21 }, /* SCK2 */
- [RCAR_GP_PIN(5, 8)] = { PU4, 20 }, /* RTS1_N_TANS */
- [RCAR_GP_PIN(5, 7)] = { PU4, 19 }, /* CTS1_N */
- [RCAR_GP_PIN(5, 6)] = { PU4, 18 }, /* TX1_A */
- [RCAR_GP_PIN(5, 5)] = { PU4, 17 }, /* RX1_A */
- [RCAR_GP_PIN(5, 4)] = { PU4, 16 }, /* RTS0_N_TANS */
- [RCAR_GP_PIN(5, 3)] = { PU4, 15 }, /* CTS0_N */
- [RCAR_GP_PIN(5, 2)] = { PU4, 14 }, /* TX0 */
- [RCAR_GP_PIN(5, 1)] = { PU4, 13 }, /* RX0 */
- [RCAR_GP_PIN(5, 0)] = { PU4, 12 }, /* SCK0 */
- [RCAR_GP_PIN(3, 15)] = { PU4, 11 }, /* SD1_WP */
- [RCAR_GP_PIN(3, 14)] = { PU4, 10 }, /* SD1_CD */
- [RCAR_GP_PIN(3, 13)] = { PU4, 9 }, /* SD0_WP */
- [RCAR_GP_PIN(3, 12)] = { PU4, 8 }, /* SD0_CD */
- [RCAR_GP_PIN(4, 17)] = { PU4, 7 }, /* SD3_DS */
- [RCAR_GP_PIN(4, 16)] = { PU4, 6 }, /* SD3_DAT7 */
- [RCAR_GP_PIN(4, 15)] = { PU4, 5 }, /* SD3_DAT6 */
- [RCAR_GP_PIN(4, 14)] = { PU4, 4 }, /* SD3_DAT5 */
- [RCAR_GP_PIN(4, 13)] = { PU4, 3 }, /* SD3_DAT4 */
- [RCAR_GP_PIN(4, 12)] = { PU4, 2 }, /* SD3_DAT3 */
- [RCAR_GP_PIN(4, 11)] = { PU4, 1 }, /* SD3_DAT2 */
- [RCAR_GP_PIN(4, 10)] = { PU4, 0 }, /* SD3_DAT1 */
-
- [RCAR_GP_PIN(6, 24)] = { PU5, 31 }, /* USB0_PWEN */
- [RCAR_GP_PIN(6, 23)] = { PU5, 30 }, /* AUDIO_CLKB_B */
- [RCAR_GP_PIN(6, 22)] = { PU5, 29 }, /* AUDIO_CLKA_A */
- [RCAR_GP_PIN(6, 21)] = { PU5, 28 }, /* SSI_SDATA9_A */
- [RCAR_GP_PIN(6, 20)] = { PU5, 27 }, /* SSI_SDATA8 */
- [RCAR_GP_PIN(6, 19)] = { PU5, 26 }, /* SSI_SDATA7 */
- [RCAR_GP_PIN(6, 18)] = { PU5, 25 }, /* SSI_WS78 */
- [RCAR_GP_PIN(6, 17)] = { PU5, 24 }, /* SSI_SCK78 */
- [RCAR_GP_PIN(6, 16)] = { PU5, 23 }, /* SSI_SDATA6 */
- [RCAR_GP_PIN(6, 15)] = { PU5, 22 }, /* SSI_WS6 */
- [RCAR_GP_PIN(6, 14)] = { PU5, 21 }, /* SSI_SCK6 */
- [RCAR_GP_PIN(6, 13)] = { PU5, 20 }, /* SSI_SDATA5 */
- [RCAR_GP_PIN(6, 12)] = { PU5, 19 }, /* SSI_WS5 */
- [RCAR_GP_PIN(6, 11)] = { PU5, 18 }, /* SSI_SCK5 */
- [RCAR_GP_PIN(6, 10)] = { PU5, 17 }, /* SSI_SDATA4 */
- [RCAR_GP_PIN(6, 9)] = { PU5, 16 }, /* SSI_WS4 */
- [RCAR_GP_PIN(6, 8)] = { PU5, 15 }, /* SSI_SCK4 */
- [RCAR_GP_PIN(6, 7)] = { PU5, 14 }, /* SSI_SDATA3 */
- [RCAR_GP_PIN(6, 6)] = { PU5, 13 }, /* SSI_WS34 */
- [RCAR_GP_PIN(6, 5)] = { PU5, 12 }, /* SSI_SCK34 */
- [RCAR_GP_PIN(6, 4)] = { PU5, 11 }, /* SSI_SDATA2_A */
- [RCAR_GP_PIN(6, 3)] = { PU5, 10 }, /* SSI_SDATA1_A */
- [RCAR_GP_PIN(6, 2)] = { PU5, 9 }, /* SSI_SDATA0 */
- [RCAR_GP_PIN(6, 1)] = { PU5, 8 }, /* SSI_WS01239 */
- [RCAR_GP_PIN(6, 0)] = { PU5, 7 }, /* SSI_SCK01239 */
- [RCAR_GP_PIN(5, 25)] = { PU5, 5 }, /* MLB_DAT */
- [RCAR_GP_PIN(5, 24)] = { PU5, 4 }, /* MLB_SIG */
- [RCAR_GP_PIN(5, 23)] = { PU5, 3 }, /* MLB_CLK */
- [RCAR_GP_PIN(5, 22)] = { PU5, 2 }, /* MSIOF0_RXD */
- [RCAR_GP_PIN(5, 21)] = { PU5, 1 }, /* MSIOF0_SS2 */
- [RCAR_GP_PIN(5, 20)] = { PU5, 0 }, /* MSIOF0_TXD */
-
- [RCAR_GP_PIN(6, 31)] = { PU6, 6 }, /* USB31_OVC */
- [RCAR_GP_PIN(6, 30)] = { PU6, 5 }, /* USB31_PWEN */
- [RCAR_GP_PIN(6, 29)] = { PU6, 4 }, /* USB30_OVC */
- [RCAR_GP_PIN(6, 28)] = { PU6, 3 }, /* USB30_PWEN */
- [RCAR_GP_PIN(6, 27)] = { PU6, 2 }, /* USB1_OVC */
- [RCAR_GP_PIN(6, 26)] = { PU6, 1 }, /* USB1_PWEN */
- [RCAR_GP_PIN(6, 25)] = { PU6, 0 }, /* USB0_OVC */
+static const struct sh_pfc_bias_info bias_info[] = {
+ { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
+ { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
+ { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
+
+ { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
+ { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
+ { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
+ { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
+ { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
+ { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
+ { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
+ { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
+ { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
+ { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
+ { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
+ { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
+ { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
+ { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
+ { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
+ { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
+ { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
+ { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
+ { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
+ { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
+ { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
+ { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
+ { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
+ { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
+ { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
+ { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
+ { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
+ { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
+ { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
+ { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
+ { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
+
+ { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
+ { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
+ { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
+ { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
+ { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
+ { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
+ { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
+ { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
+ { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
+ { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
+ { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
+ { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
+ { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
+ { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
+ { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
+ { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
+ { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
+ { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
+ { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
+ { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
+ { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
+ { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
+ { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
+ { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
+ { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
+ { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
+ { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
+
+ { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
+ { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
+ { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
+ { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
+ { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
+ { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
+ { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
+ { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
+ { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
+ { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
+ { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
+ { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
+ { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
+ { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
+ { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
+ { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
+ { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
+ { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
+
+ { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
+ { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
+ { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
+ { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
+ { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
+ { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
+ { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
+ { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
+ { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
+ { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
+ { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
+ { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
+ { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
+ { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
+ { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
+ { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
+ { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
+ { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
+ { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
+ { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
+ { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
+ { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
+ { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
+ { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
+ { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
+ { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
+ { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
+ { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
+ { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
+ { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
+
+ { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
+ { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
+ { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
+ { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
+ { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
+ { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
+ { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
+ { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
+ { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
+ { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
+ { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
+ { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
+ { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
+ { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
+ { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
+ { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
+ { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
+ { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
+ { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
+ { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
+ { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
+ { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
+ { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
+ { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
+ { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
+ { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
+ { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
+
+ { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */
+ { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */
+ { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
+ { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
+ { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
+ { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
+ { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
};
static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
+ const struct sh_pfc_bias_info *info;
u32 reg;
u32 bit;
- if (WARN_ON_ONCE(!pullups[pin].reg))
+ info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
+ if (!info)
return PIN_CONFIG_BIAS_DISABLE;
- reg = pullups[pin].reg;
- bit = BIT(pullups[pin].bit);
+ reg = info->reg;
+ bit = BIT(info->bit);
- if (sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit) {
- if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
- } else
+ if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
return PIN_CONFIG_BIAS_DISABLE;
+ else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
}
static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
+ const struct sh_pfc_bias_info *info;
u32 enable, updown;
u32 reg;
u32 bit;
- if (WARN_ON_ONCE(!pullups[pin].reg))
+ info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
+ if (!info)
return;
- reg = pullups[pin].reg;
- bit = BIT(pullups[pin].bit);
+ reg = info->reg;
+ bit = BIT(info->bit);
enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
if (bias != PIN_CONFIG_BIAS_DISABLE)
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index dc9b671ccf2e..7e16545a2c3c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -122,22 +122,22 @@
#define GPSR3_0 F_(SD0_CLK, IP7_19_16)
/* GPSR4 */
-#define GPSR4_17 F_(SD3_DS, IP11_11_8)
-#define GPSR4_16 F_(SD3_DAT7, IP10_7_4)
-#define GPSR4_15 F_(SD3_DAT6, IP10_3_0)
-#define GPSR4_14 F_(SD3_DAT5, IP9_31_28)
-#define GPSR4_13 F_(SD3_DAT4, IP9_27_24)
+#define GPSR4_17 F_(SD3_DS, IP11_7_4)
+#define GPSR4_16 F_(SD3_DAT7, IP11_3_0)
+#define GPSR4_15 F_(SD3_DAT6, IP10_31_28)
+#define GPSR4_14 F_(SD3_DAT5, IP10_27_24)
+#define GPSR4_13 F_(SD3_DAT4, IP10_23_20)
#define GPSR4_12 F_(SD3_DAT3, IP10_19_16)
#define GPSR4_11 F_(SD3_DAT2, IP10_15_12)
#define GPSR4_10 F_(SD3_DAT1, IP10_11_8)
#define GPSR4_9 F_(SD3_DAT0, IP10_7_4)
#define GPSR4_8 F_(SD3_CMD, IP10_3_0)
#define GPSR4_7 F_(SD3_CLK, IP9_31_28)
-#define GPSR4_6 F_(SD2_DS, IP9_23_20)
-#define GPSR4_5 F_(SD2_DAT3, IP9_19_16)
-#define GPSR4_4 F_(SD2_DAT2, IP9_15_12)
-#define GPSR4_3 F_(SD2_DAT1, IP9_11_8)
-#define GPSR4_2 F_(SD2_DAT0, IP9_7_4)
+#define GPSR4_6 F_(SD2_DS, IP9_27_24)
+#define GPSR4_5 F_(SD2_DAT3, IP9_23_20)
+#define GPSR4_4 F_(SD2_DAT2, IP9_19_16)
+#define GPSR4_3 F_(SD2_DAT1, IP9_15_12)
+#define GPSR4_2 F_(SD2_DAT0, IP9_11_8)
#define GPSR4_1 F_(SD2_CMD, IP9_7_4)
#define GPSR4_0 F_(SD2_CLK, IP9_3_0)
@@ -1490,6 +1490,418 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+ /* AVB_LINK */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+ /* AVB_MAGIC_ */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+ /* AVB_PHY_INT */
+ RCAR_GP_PIN(2, 11),
+};
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdc_pins[] = {
+ /* AVB_MDC */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int avb_mdc_mux[] = {
+ AVB_MDC_MARK,
+};
+static const unsigned int avb_avtp_pps_pins[] = {
+ /* AVB_AVTP_PPS */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int avb_avtp_pps_mux[] = {
+ AVB_AVTP_PPS_MARK,
+};
+static const unsigned int avb_avtp_match_a_pins[] = {
+ /* AVB_AVTP_MATCH_A */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int avb_avtp_match_a_mux[] = {
+ AVB_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb_avtp_capture_a_pins[] = {
+ /* AVB_AVTP_CAPTURE_A */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int avb_avtp_capture_a_mux[] = {
+ AVB_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+ /* AVB_AVTP_MATCH_B */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+ AVB_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+ /* AVB_AVTP_CAPTURE_B */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+ AVB_AVTP_CAPTURE_B_MARK,
+};
+
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif0_ctrl_a_mux[] = {
+ RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+static const unsigned int drif0_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif0_data0_a_mux[] = {
+ RIF0_D0_A_MARK,
+};
+static const unsigned int drif0_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif0_data1_a_mux[] = {
+ RIF0_D1_A_MARK,
+};
+static const unsigned int drif0_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int drif0_ctrl_b_mux[] = {
+ RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+static const unsigned int drif0_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int drif0_data0_b_mux[] = {
+ RIF0_D0_B_MARK,
+};
+static const unsigned int drif0_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int drif0_data1_b_mux[] = {
+ RIF0_D1_B_MARK,
+};
+static const unsigned int drif0_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int drif0_ctrl_c_mux[] = {
+ RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+static const unsigned int drif0_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int drif0_data0_c_mux[] = {
+ RIF0_D0_C_MARK,
+};
+static const unsigned int drif0_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int drif0_data1_c_mux[] = {
+ RIF0_D1_C_MARK,
+};
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif1_ctrl_a_mux[] = {
+ RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+static const unsigned int drif1_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif1_data0_a_mux[] = {
+ RIF1_D0_A_MARK,
+};
+static const unsigned int drif1_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif1_data1_a_mux[] = {
+ RIF1_D1_A_MARK,
+};
+static const unsigned int drif1_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int drif1_ctrl_b_mux[] = {
+ RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+static const unsigned int drif1_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int drif1_data0_b_mux[] = {
+ RIF1_D0_B_MARK,
+};
+static const unsigned int drif1_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int drif1_data1_b_mux[] = {
+ RIF1_D1_B_MARK,
+};
+static const unsigned int drif1_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int drif1_ctrl_c_mux[] = {
+ RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+static const unsigned int drif1_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 6),
+};
+static const unsigned int drif1_data0_c_mux[] = {
+ RIF1_D0_C_MARK,
+};
+static const unsigned int drif1_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int drif1_data1_c_mux[] = {
+ RIF1_D1_C_MARK,
+};
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif2_ctrl_a_mux[] = {
+ RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+static const unsigned int drif2_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif2_data0_a_mux[] = {
+ RIF2_D0_A_MARK,
+};
+static const unsigned int drif2_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif2_data1_a_mux[] = {
+ RIF2_D1_A_MARK,
+};
+static const unsigned int drif2_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int drif2_ctrl_b_mux[] = {
+ RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+static const unsigned int drif2_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int drif2_data0_b_mux[] = {
+ RIF2_D0_B_MARK,
+};
+static const unsigned int drif2_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int drif2_data1_b_mux[] = {
+ RIF2_D1_B_MARK,
+};
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif3_ctrl_a_mux[] = {
+ RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+static const unsigned int drif3_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif3_data0_a_mux[] = {
+ RIF3_D0_A_MARK,
+};
+static const unsigned int drif3_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif3_data1_a_mux[] = {
+ RIF3_D1_A_MARK,
+};
+static const unsigned int drif3_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int drif3_ctrl_b_mux[] = {
+ RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+static const unsigned int drif3_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int drif3_data0_b_mux[] = {
+ RIF3_D0_B_MARK,
+};
+static const unsigned int drif3_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int drif3_data1_b_mux[] = {
+ RIF3_D1_B_MARK,
+};
+
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+};
+static const unsigned int du_rgb666_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int du_rgb888_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+ DU_DOTCLKOUT0_MARK
+};
+static const unsigned int du_clk_out_1_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int du_clk_out_1_mux[] = {
+ DU_DOTCLKOUT1_MARK
+};
+static const unsigned int du_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+static const unsigned int du_sync_mux[] = {
+ DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
+};
+static const unsigned int du_oddf_pins[] = {
+ /* EXDISP/EXODDF/EXCDE */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int du_oddf_mux[] = {
+ DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int du_cde_mux[] = {
+ DU_CDE_MARK,
+};
+static const unsigned int du_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int du_disp_mux[] = {
+ DU_DISP_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+ SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+ SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SDA2_B_MARK, SCL2_B_MARK,
+};
+static const unsigned int i2c6_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+ SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+ SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+ SDA6_C_MARK, SCL6_C_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -1912,6 +2324,60 @@ static const unsigned int sdhi3_ds_mux[] = {
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdc),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -1969,6 +2435,87 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi3_ds),
};
+static const char * const avb_groups[] = {
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mdc",
+ "avb_avtp_pps",
+ "avb_avtp_match_a",
+ "avb_avtp_capture_a",
+ "avb_avtp_match_b",
+ "avb_avtp_capture_b",
+};
+
+static const char * const drif0_groups[] = {
+ "drif0_ctrl_a",
+ "drif0_data0_a",
+ "drif0_data1_a",
+ "drif0_ctrl_b",
+ "drif0_data0_b",
+ "drif0_data1_b",
+ "drif0_ctrl_c",
+ "drif0_data0_c",
+ "drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+ "drif1_ctrl_a",
+ "drif1_data0_a",
+ "drif1_data1_a",
+ "drif1_ctrl_b",
+ "drif1_data0_b",
+ "drif1_data1_b",
+ "drif1_ctrl_c",
+ "drif1_data0_c",
+ "drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+ "drif2_ctrl_a",
+ "drif2_data0_a",
+ "drif2_data1_a",
+ "drif2_ctrl_b",
+ "drif2_data0_b",
+ "drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+ "drif3_ctrl_a",
+ "drif3_data0_a",
+ "drif3_data1_a",
+ "drif3_ctrl_b",
+ "drif3_data0_b",
+ "drif3_data1_b",
+};
+
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_out_0",
+ "du_clk_out_1",
+ "du_sync",
+ "du_oddf",
+ "du_cde",
+ "du_disp",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+ "i2c6_c",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -2058,6 +2605,15 @@ static const char * const sdhi3_groups[] = {
};
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c6),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index c5772584594c..fcacfa73ef6e 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -570,7 +570,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- return true;
+ return pin->configs &
+ (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
case PIN_CONFIG_BIAS_PULL_UP:
return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 2345421103db..e42cc7a8d10e 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -189,6 +189,12 @@ struct sh_pfc_window {
unsigned long size;
};
+struct sh_pfc_bias_info {
+ u16 pin;
+ u16 reg : 11;
+ u16 bit : 5;
+};
+
struct sh_pfc_pin_range;
struct sh_pfc {
@@ -540,6 +546,14 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
.configs = SH_PFC_PIN_CFG_NO_GPIO, \
}
+/* SH_PFC_PIN_NAMED_CFG - Expand to a sh_pfc_pin entry with the given name */
+#define SH_PFC_PIN_NAMED_CFG(row, col, _name, cfgs) \
+ { \
+ .pin = PIN_NUMBER(row, col), \
+ .name = __stringify(PIN_##_name), \
+ .configs = SH_PFC_PIN_CFG_NO_GPIO | cfgs, \
+ }
+
/* PINMUX_DATA_ALL - Expand to a list of PORT_name_DATA, PORT_name_FN0,
* PORT_name_OUT, PORT_name_IN marks
*/
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32f429.c b/drivers/pinctrl/stm32/pinctrl-stm32f429.c
index e9b15dc0654b..990b867b9625 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32f429.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32f429.c
@@ -1584,8 +1584,4 @@ static struct platform_driver stm32f429_pinctrl_driver = {
},
};
-static int __init stm32f429_pinctrl_init(void)
-{
- return platform_driver_register(&stm32f429_pinctrl_driver);
-}
-device_initcall(stm32f429_pinctrl_init);
+builtin_platform_driver(stm32f429_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-gr8.c b/drivers/pinctrl/sunxi/pinctrl-gr8.c
index 2904d2b7378b..2f232c3a0579 100644
--- a/drivers/pinctrl/sunxi/pinctrl-gr8.c
+++ b/drivers/pinctrl/sunxi/pinctrl-gr8.c
@@ -12,7 +12,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -525,7 +525,6 @@ static const struct of_device_id sun5i_gr8_pinctrl_match[] = {
{ .compatible = "nextthing,gr8-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun5i_gr8_pinctrl_match);
static struct platform_driver sun5i_gr8_pinctrl_driver = {
.probe = sun5i_gr8_pinctrl_probe,
@@ -534,8 +533,4 @@ static struct platform_driver sun5i_gr8_pinctrl_driver = {
.of_match_table = sun5i_gr8_pinctrl_match,
},
};
-module_platform_driver(sun5i_gr8_pinctrl_driver);
-
-MODULE_AUTHOR("Mylene Josserand <mylene.josserand@free-electrons.com");
-MODULE_DESCRIPTION("NextThing GR8 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun5i_gr8_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 862a096c5dba..fb30b86a97ee 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -1035,7 +1035,6 @@ static const struct of_device_id sun4i_a10_pinctrl_match[] = {
{ .compatible = "allwinner,sun4i-a10-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun4i_a10_pinctrl_match);
static struct platform_driver sun4i_a10_pinctrl_driver = {
.probe = sun4i_a10_pinctrl_probe,
@@ -1044,8 +1043,4 @@ static struct platform_driver sun4i_a10_pinctrl_driver = {
.of_match_table = sun4i_a10_pinctrl_match,
},
};
-module_platform_driver(sun4i_a10_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A10 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun4i_a10_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
index f9a3f8f446f7..a5b57fdff9e1 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -674,7 +674,6 @@ static const struct of_device_id sun5i_a10s_pinctrl_match[] = {
{ .compatible = "allwinner,sun5i-a10s-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun5i_a10s_pinctrl_match);
static struct platform_driver sun5i_a10s_pinctrl_driver = {
.probe = sun5i_a10s_pinctrl_probe,
@@ -683,8 +682,4 @@ static struct platform_driver sun5i_a10s_pinctrl_driver = {
.of_match_table = sun5i_a10s_pinctrl_match,
},
};
-module_platform_driver(sun5i_a10s_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A10s pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun5i_a10s_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
index 2bb07b38834f..8575f3f6d3dd 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -392,7 +392,6 @@ static const struct of_device_id sun5i_a13_pinctrl_match[] = {
{ .compatible = "allwinner,sun5i-a13-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun5i_a13_pinctrl_match);
static struct platform_driver sun5i_a13_pinctrl_driver = {
.probe = sun5i_a13_pinctrl_probe,
@@ -401,8 +400,4 @@ static struct platform_driver sun5i_a13_pinctrl_driver = {
.of_match_table = sun5i_a13_pinctrl_match,
},
};
-module_platform_driver(sun5i_a13_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A13 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun5i_a13_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index d4bc4f0e8be0..a22bd88a1f03 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -12,7 +12,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -136,7 +136,6 @@ static const struct of_device_id sun6i_a31_r_pinctrl_match[] = {
{ .compatible = "allwinner,sun6i-a31-r-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun6i_a31_r_pinctrl_match);
static struct platform_driver sun6i_a31_r_pinctrl_driver = {
.probe = sun6i_a31_r_pinctrl_probe,
@@ -145,9 +144,4 @@ static struct platform_driver sun6i_a31_r_pinctrl_driver = {
.of_match_table = sun6i_a31_r_pinctrl_match,
},
};
-module_platform_driver(sun6i_a31_r_pinctrl_driver);
-
-MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com");
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A31 R_PIO pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun6i_a31_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index a70b52957e24..9e58926bef37 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -934,7 +934,6 @@ static const struct of_device_id sun6i_a31_pinctrl_match[] = {
{ .compatible = "allwinner,sun6i-a31-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun6i_a31_pinctrl_match);
static struct platform_driver sun6i_a31_pinctrl_driver = {
.probe = sun6i_a31_pinctrl_probe,
@@ -943,8 +942,4 @@ static struct platform_driver sun6i_a31_pinctrl_driver = {
.of_match_table = sun6i_a31_pinctrl_match,
},
};
-module_platform_driver(sun6i_a31_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A31 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun6i_a31_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
index e570d5c93ecc..231a746a5356 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
@@ -11,7 +11,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -798,7 +798,6 @@ static const struct of_device_id sun6i_a31s_pinctrl_match[] = {
{ .compatible = "allwinner,sun6i-a31s-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun6i_a31s_pinctrl_match);
static struct platform_driver sun6i_a31s_pinctrl_driver = {
.probe = sun6i_a31s_pinctrl_probe,
@@ -807,8 +806,4 @@ static struct platform_driver sun6i_a31s_pinctrl_driver = {
.of_match_table = sun6i_a31s_pinctrl_match,
},
};
-module_platform_driver(sun6i_a31s_pinctrl_driver);
-
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_DESCRIPTION("Allwinner A31s pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun6i_a31s_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
index 435ad30f45db..b6f4c68ffb39 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -1045,7 +1045,6 @@ static const struct of_device_id sun7i_a20_pinctrl_match[] = {
{ .compatible = "allwinner,sun7i-a20-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun7i_a20_pinctrl_match);
static struct platform_driver sun7i_a20_pinctrl_driver = {
.probe = sun7i_a20_pinctrl_probe,
@@ -1054,8 +1053,4 @@ static struct platform_driver sun7i_a20_pinctrl_driver = {
.of_match_table = sun7i_a20_pinctrl_match,
},
};
-module_platform_driver(sun7i_a20_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A20 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun7i_a20_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 056287635873..2292e05a397b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -15,7 +15,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -123,7 +123,6 @@ static const struct of_device_id sun8i_a23_r_pinctrl_match[] = {
{ .compatible = "allwinner,sun8i-a23-r-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun8i_a23_r_pinctrl_match);
static struct platform_driver sun8i_a23_r_pinctrl_driver = {
.probe = sun8i_a23_r_pinctrl_probe,
@@ -132,10 +131,4 @@ static struct platform_driver sun8i_a23_r_pinctrl_driver = {
.of_match_table = sun8i_a23_r_pinctrl_match,
},
};
-module_platform_driver(sun8i_a23_r_pinctrl_driver);
-
-MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
-MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com");
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A23 R_PIO pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun8i_a23_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index f9d661e5c14a..721b6935baf3 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -14,7 +14,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -575,7 +575,6 @@ static const struct of_device_id sun8i_a23_pinctrl_match[] = {
{ .compatible = "allwinner,sun8i-a23-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun8i_a23_pinctrl_match);
static struct platform_driver sun8i_a23_pinctrl_driver = {
.probe = sun8i_a23_pinctrl_probe,
@@ -584,9 +583,4 @@ static struct platform_driver sun8i_a23_pinctrl_driver = {
.of_match_table = sun8i_a23_pinctrl_match,
},
};
-module_platform_driver(sun8i_a23_pinctrl_driver);
-
-MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A23 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun8i_a23_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 3131cac2b76f..ef1e0bef4099 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -12,7 +12,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -498,7 +498,6 @@ static const struct of_device_id sun8i_a33_pinctrl_match[] = {
{ .compatible = "allwinner,sun8i-a33-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun8i_a33_pinctrl_match);
static struct platform_driver sun8i_a33_pinctrl_driver = {
.probe = sun8i_a33_pinctrl_probe,
@@ -507,8 +506,4 @@ static struct platform_driver sun8i_a33_pinctrl_driver = {
.of_match_table = sun8i_a33_pinctrl_match,
},
};
-module_platform_driver(sun8i_a33_pinctrl_driver);
-
-MODULE_AUTHOR("Vishnu Patekar <vishnupatekar0510@gmail.com>");
-MODULE_DESCRIPTION("Allwinner a33 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun8i_a33_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 90b973e15982..9aec1d2232dd 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -12,7 +12,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -587,7 +587,6 @@ static const struct of_device_id sun8i_a83t_pinctrl_match[] = {
{ .compatible = "allwinner,sun8i-a83t-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun8i_a83t_pinctrl_match);
static struct platform_driver sun8i_a83t_pinctrl_driver = {
.probe = sun8i_a83t_pinctrl_probe,
@@ -596,8 +595,4 @@ static struct platform_driver sun8i_a83t_pinctrl_driver = {
.of_match_table = sun8i_a83t_pinctrl_match,
},
};
-module_platform_driver(sun8i_a83t_pinctrl_driver);
-
-MODULE_AUTHOR("Vishnu Patekar <vishnupatekar0510@gmail.com>");
-MODULE_DESCRIPTION("Allwinner a83t pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun8i_a83t_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index 1b580ba76453..bc14e954d7a2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -733,7 +733,6 @@ static const struct of_device_id sun9i_a80_pinctrl_match[] = {
{ .compatible = "allwinner,sun9i-a80-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun9i_a80_pinctrl_match);
static struct platform_driver sun9i_a80_pinctrl_driver = {
.probe = sun9i_a80_pinctrl_probe,
@@ -742,8 +741,4 @@ static struct platform_driver sun9i_a80_pinctrl_driver = {
.of_match_table = sun9i_a80_pinctrl_match,
},
};
-module_platform_driver(sun9i_a80_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("Allwinner A80 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun9i_a80_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0facbea5f465..0eb51e33cb1b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -28,6 +28,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
#include "../core.h"
#include "pinctrl-sunxi.h"
@@ -145,6 +147,171 @@ static int sunxi_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
+static bool sunxi_pctrl_has_bias_prop(struct device_node *node)
+{
+ return of_find_property(node, "bias-pull-up", NULL) ||
+ of_find_property(node, "bias-pull-down", NULL) ||
+ of_find_property(node, "bias-disable", NULL) ||
+ of_find_property(node, "allwinner,pull", NULL);
+}
+
+static bool sunxi_pctrl_has_drive_prop(struct device_node *node)
+{
+ return of_find_property(node, "drive-strength", NULL) ||
+ of_find_property(node, "allwinner,drive", NULL);
+}
+
+static int sunxi_pctrl_parse_bias_prop(struct device_node *node)
+{
+ u32 val;
+
+ /* Try the new style binding */
+ if (of_find_property(node, "bias-pull-up", NULL))
+ return PIN_CONFIG_BIAS_PULL_UP;
+
+ if (of_find_property(node, "bias-pull-down", NULL))
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+
+ if (of_find_property(node, "bias-disable", NULL))
+ return PIN_CONFIG_BIAS_DISABLE;
+
+ /* And fall back to the old binding */
+ if (of_property_read_u32(node, "allwinner,pull", &val))
+ return -EINVAL;
+
+ switch (val) {
+ case SUN4I_PINCTRL_NO_PULL:
+ return PIN_CONFIG_BIAS_DISABLE;
+ case SUN4I_PINCTRL_PULL_UP:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ case SUN4I_PINCTRL_PULL_DOWN:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ }
+
+ return -EINVAL;
+}
+
+static int sunxi_pctrl_parse_drive_prop(struct device_node *node)
+{
+ u32 val;
+
+ /* Try the new style binding */
+ if (!of_property_read_u32(node, "drive-strength", &val)) {
+ /* We can't go below 10mA ... */
+ if (val < 10)
+ return -EINVAL;
+
+ /* ... and only up to 40 mA ... */
+ if (val > 40)
+ val = 40;
+
+ /* by steps of 10 mA */
+ return rounddown(val, 10);
+ }
+
+ /* And then fall back to the old binding */
+ if (of_property_read_u32(node, "allwinner,drive", &val))
+ return -EINVAL;
+
+ return (val + 1) * 10;
+}
+
+static const char *sunxi_pctrl_parse_function_prop(struct device_node *node)
+{
+ const char *function;
+ int ret;
+
+ /* Try the generic binding */
+ ret = of_property_read_string(node, "function", &function);
+ if (!ret)
+ return function;
+
+ /* And fall back to our legacy one */
+ ret = of_property_read_string(node, "allwinner,function", &function);
+ if (!ret)
+ return function;
+
+ return NULL;
+}
+
+static const char *sunxi_pctrl_find_pins_prop(struct device_node *node,
+ int *npins)
+{
+ int count;
+
+ /* Try the generic binding */
+ count = of_property_count_strings(node, "pins");
+ if (count > 0) {
+ *npins = count;
+ return "pins";
+ }
+
+ /* And fall back to our legacy one */
+ count = of_property_count_strings(node, "allwinner,pins");
+ if (count > 0) {
+ *npins = count;
+ return "allwinner,pins";
+ }
+
+ return NULL;
+}
+
+static unsigned long *sunxi_pctrl_build_pin_config(struct device_node *node,
+ unsigned int *len)
+{
+ unsigned long *pinconfig;
+ unsigned int configlen = 0, idx = 0;
+ int ret;
+
+ if (sunxi_pctrl_has_drive_prop(node))
+ configlen++;
+ if (sunxi_pctrl_has_bias_prop(node))
+ configlen++;
+
+ /*
+ * If we don't have any configuration, bail out
+ */
+ if (!configlen)
+ return NULL;
+
+ pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+ if (!pinconfig)
+ return ERR_PTR(-ENOMEM);
+
+ if (sunxi_pctrl_has_drive_prop(node)) {
+ int drive = sunxi_pctrl_parse_drive_prop(node);
+ if (drive < 0) {
+ ret = drive;
+ goto err_free;
+ }
+
+ pinconfig[idx++] = pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH,
+ drive);
+ }
+
+ if (sunxi_pctrl_has_bias_prop(node)) {
+ int pull = sunxi_pctrl_parse_bias_prop(node);
+ int arg = 0;
+ if (pull < 0) {
+ ret = pull;
+ goto err_free;
+ }
+
+ if (pull != PIN_CONFIG_BIAS_DISABLE)
+ arg = 1; /* hardware uses weak pull resistors */
+
+ pinconfig[idx++] = pinconf_to_config_packed(pull, arg);
+ }
+
+
+ *len = configlen;
+ return pinconfig;
+
+err_free:
+ kfree(pinconfig);
+ return ERR_PTR(ret);
+}
+
static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *node,
struct pinctrl_map **map,
@@ -153,38 +320,48 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
unsigned long *pinconfig;
struct property *prop;
- const char *function;
+ const char *function, *pin_prop;
const char *group;
- int ret, nmaps, i = 0;
- u32 val;
+ int ret, npins, nmaps, configlen = 0, i = 0;
*map = NULL;
*num_maps = 0;
- ret = of_property_read_string(node, "allwinner,function", &function);
- if (ret) {
- dev_err(pctl->dev,
- "missing allwinner,function property in node %s\n",
+ function = sunxi_pctrl_parse_function_prop(node);
+ if (!function) {
+ dev_err(pctl->dev, "missing function property in node %s\n",
node->name);
return -EINVAL;
}
- nmaps = of_property_count_strings(node, "allwinner,pins") * 2;
- if (nmaps < 0) {
- dev_err(pctl->dev,
- "missing allwinner,pins property in node %s\n",
+ pin_prop = sunxi_pctrl_find_pins_prop(node, &npins);
+ if (!pin_prop) {
+ dev_err(pctl->dev, "missing pins property in node %s\n",
node->name);
return -EINVAL;
}
+ /*
+ * We have two maps for each pin: one for the function, one
+ * for the configuration (bias, strength, etc).
+ *
+ * We might be slightly overshooting, since we might not have
+ * any configuration.
+ */
+ nmaps = npins * 2;
*map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
if (!*map)
return -ENOMEM;
- of_property_for_each_string(node, "allwinner,pins", prop, group) {
+ pinconfig = sunxi_pctrl_build_pin_config(node, &configlen);
+ if (IS_ERR(pinconfig)) {
+ ret = PTR_ERR(pinconfig);
+ goto err_free_map;
+ }
+
+ of_property_for_each_string(node, pin_prop, prop, group) {
struct sunxi_pinctrl_group *grp =
sunxi_pinctrl_find_group_by_name(pctl, group);
- int j = 0, configlen = 0;
if (!grp) {
dev_err(pctl->dev, "unknown pin %s", group);
@@ -205,45 +382,31 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
i++;
- (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
- (*map)[i].data.configs.group_or_pin = group;
-
- if (of_find_property(node, "allwinner,drive", NULL))
- configlen++;
- if (of_find_property(node, "allwinner,pull", NULL))
- configlen++;
-
- pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
- if (!pinconfig) {
- kfree(*map);
- return -ENOMEM;
+ if (pinconfig) {
+ (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ (*map)[i].data.configs.group_or_pin = group;
+ (*map)[i].data.configs.configs = pinconfig;
+ (*map)[i].data.configs.num_configs = configlen;
+ i++;
}
-
- if (!of_property_read_u32(node, "allwinner,drive", &val)) {
- u16 strength = (val + 1) * 10;
- pinconfig[j++] =
- pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH,
- strength);
- }
-
- if (!of_property_read_u32(node, "allwinner,pull", &val)) {
- enum pin_config_param pull = PIN_CONFIG_END;
- if (val == 1)
- pull = PIN_CONFIG_BIAS_PULL_UP;
- else if (val == 2)
- pull = PIN_CONFIG_BIAS_PULL_DOWN;
- pinconfig[j++] = pinconf_to_config_packed(pull, 0);
- }
-
- (*map)[i].data.configs.configs = pinconfig;
- (*map)[i].data.configs.num_configs = configlen;
-
- i++;
}
- *num_maps = nmaps;
+ *num_maps = i;
+
+ /*
+ * We know have the number of maps we need, we can resize our
+ * map array
+ */
+ *map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
+ if (!*map)
+ return -ENOMEM;
return 0;
+
+err_free_map:
+ kfree(*map);
+ *map = NULL;
+ return ret;
}
static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev,
@@ -252,9 +415,17 @@ static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev,
{
int i;
- for (i = 0; i < num_maps; i++) {
- if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
- kfree(map[i].data.configs.configs);
+ /* pin config is never in the first map */
+ for (i = 1; i < num_maps; i++) {
+ if (map[i].type != PIN_MAP_TYPE_CONFIGS_GROUP)
+ continue;
+
+ /*
+ * All the maps share the same pin config,
+ * free only the first one we find.
+ */
+ kfree(map[i].data.configs.configs);
+ break;
}
kfree(map);
@@ -268,15 +439,91 @@ static const struct pinctrl_ops sunxi_pctrl_ops = {
.get_group_pins = sunxi_pctrl_get_group_pins,
};
+static int sunxi_pconf_reg(unsigned pin, enum pin_config_param param,
+ u32 *offset, u32 *shift, u32 *mask)
+{
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ *offset = sunxi_dlevel_reg(pin);
+ *shift = sunxi_dlevel_offset(pin);
+ *mask = DLEVEL_PINS_MASK;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_DISABLE:
+ *offset = sunxi_pull_reg(pin);
+ *shift = sunxi_pull_offset(pin);
+ *mask = PULL_PINS_MASK;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sunxi_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 offset, shift, mask, val;
+ u16 arg;
+ int ret;
+
+ pin -= pctl->desc->pin_base;
+
+ ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ if (ret < 0)
+ return ret;
+
+ val = (readl(pctl->membase + offset) >> shift) & mask;
+
+ switch (pinconf_to_config_param(*config)) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = (val + 1) * 10;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (val != SUN4I_PINCTRL_PULL_UP)
+ return -EINVAL;
+ arg = 1; /* hardware is weak pull-up */
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (val != SUN4I_PINCTRL_PULL_DOWN)
+ return -EINVAL;
+ arg = 1; /* hardware is weak pull-down */
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (val != SUN4I_PINCTRL_NO_PULL)
+ return -EINVAL;
+ arg = 0;
+ break;
+
+ default:
+ /* sunxi_pconf_reg should catch anything unsupported */
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev,
unsigned group,
unsigned long *config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct sunxi_pinctrl_group *g = &pctl->groups[group];
- *config = pctl->groups[group].config;
-
- return 0;
+ /* We only support 1 pin per group. Chain it to the pin callback */
+ return sunxi_pconf_get(pctldev, g->pin, config);
}
static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
@@ -286,23 +533,27 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct sunxi_pinctrl_group *g = &pctl->groups[group];
- unsigned long flags;
unsigned pin = g->pin - pctl->desc->pin_base;
- u32 val, mask;
- u16 strength;
- u8 dlevel;
int i;
- spin_lock_irqsave(&pctl->lock, flags);
-
for (i = 0; i < num_configs; i++) {
- switch (pinconf_to_config_param(configs[i])) {
+ enum pin_config_param param;
+ unsigned long flags;
+ u32 offset, shift, mask, reg;
+ u16 arg, val;
+ int ret;
+
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ if (ret < 0)
+ return ret;
+
+ switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH:
- strength = pinconf_to_config_argument(configs[i]);
- if (strength > 40) {
- spin_unlock_irqrestore(&pctl->lock, flags);
+ if (arg < 10 || arg > 40)
return -EINVAL;
- }
/*
* We convert from mA to what the register expects:
* 0: 10mA
@@ -310,38 +561,40 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
* 2: 30mA
* 3: 40mA
*/
- dlevel = strength / 10 - 1;
- val = readl(pctl->membase + sunxi_dlevel_reg(pin));
- mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(pin);
- writel((val & ~mask)
- | dlevel << sunxi_dlevel_offset(pin),
- pctl->membase + sunxi_dlevel_reg(pin));
+ val = arg / 10 - 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ val = 0;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- val = readl(pctl->membase + sunxi_pull_reg(pin));
- mask = PULL_PINS_MASK << sunxi_pull_offset(pin);
- writel((val & ~mask) | 1 << sunxi_pull_offset(pin),
- pctl->membase + sunxi_pull_reg(pin));
+ if (arg == 0)
+ return -EINVAL;
+ val = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- val = readl(pctl->membase + sunxi_pull_reg(pin));
- mask = PULL_PINS_MASK << sunxi_pull_offset(pin);
- writel((val & ~mask) | 2 << sunxi_pull_offset(pin),
- pctl->membase + sunxi_pull_reg(pin));
+ if (arg == 0)
+ return -EINVAL;
+ val = 2;
break;
default:
- break;
+ /* sunxi_pconf_reg should catch anything unsupported */
+ WARN_ON(1);
+ return -ENOTSUPP;
}
- /* cache the config value */
- g->config = configs[i];
- } /* for each config */
- spin_unlock_irqrestore(&pctl->lock, flags);
+ spin_lock_irqsave(&pctl->lock, flags);
+ reg = readl(pctl->membase + offset);
+ reg &= ~(mask << shift);
+ writel(reg | val << shift, pctl->membase + offset);
+ spin_unlock_irqrestore(&pctl->lock, flags);
+ } /* for each config */
return 0;
}
static const struct pinconf_ops sunxi_pconf_ops = {
+ .is_generic = true,
+ .pin_config_get = sunxi_pconf_get,
.pin_config_group_get = sunxi_pconf_group_get,
.pin_config_group_set = sunxi_pconf_group_set,
};
@@ -870,6 +1123,91 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
return 0;
}
+static int sunxi_pinctrl_get_debounce_div(struct clk *clk, int freq, int *diff)
+{
+ unsigned long clock = clk_get_rate(clk);
+ unsigned int best_diff, best_div;
+ int i;
+
+ best_diff = abs(freq - clock);
+ best_div = 0;
+
+ for (i = 1; i < 8; i++) {
+ int cur_diff = abs(freq - (clock >> i));
+
+ if (cur_diff < best_diff) {
+ best_diff = cur_diff;
+ best_div = i;
+ }
+ }
+
+ *diff = best_diff;
+ return best_div;
+}
+
+static int sunxi_pinctrl_setup_debounce(struct sunxi_pinctrl *pctl,
+ struct device_node *node)
+{
+ unsigned int hosc_diff, losc_diff;
+ unsigned int hosc_div, losc_div;
+ struct clk *hosc, *losc;
+ u8 div, src;
+ int i, ret;
+
+ /* Deal with old DTs that didn't have the oscillators */
+ if (of_count_phandle_with_args(node, "clocks", "#clock-cells") != 3)
+ return 0;
+
+ /* If we don't have any setup, bail out */
+ if (!of_find_property(node, "input-debounce", NULL))
+ return 0;
+
+ losc = devm_clk_get(pctl->dev, "losc");
+ if (IS_ERR(losc))
+ return PTR_ERR(losc);
+
+ hosc = devm_clk_get(pctl->dev, "hosc");
+ if (IS_ERR(hosc))
+ return PTR_ERR(hosc);
+
+ for (i = 0; i < pctl->desc->irq_banks; i++) {
+ unsigned long debounce_freq;
+ u32 debounce;
+
+ ret = of_property_read_u32_index(node, "input-debounce",
+ i, &debounce);
+ if (ret)
+ return ret;
+
+ if (!debounce)
+ continue;
+
+ debounce_freq = DIV_ROUND_CLOSEST(USEC_PER_SEC, debounce);
+ losc_div = sunxi_pinctrl_get_debounce_div(losc,
+ debounce_freq,
+ &losc_diff);
+
+ hosc_div = sunxi_pinctrl_get_debounce_div(hosc,
+ debounce_freq,
+ &hosc_diff);
+
+ if (hosc_diff < losc_diff) {
+ div = hosc_div;
+ src = 1;
+ } else {
+ div = losc_div;
+ src = 0;
+ }
+
+ writel(src | div << 4,
+ pctl->membase +
+ sunxi_irq_debounce_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
+ }
+
+ return 0;
+}
+
int sunxi_pinctrl_init(struct platform_device *pdev,
const struct sunxi_pinctrl_desc *desc)
{
@@ -1032,6 +1370,8 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctl);
}
+ sunxi_pinctrl_setup_debounce(pctl, node);
+
dev_info(&pdev->dev, "initialized sunXi PIO driver\n");
return 0;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 0afce1ab12d0..f78a44a03189 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -69,6 +69,8 @@
#define IRQ_STATUS_IRQ_BITS 1
#define IRQ_STATUS_IRQ_MASK ((1 << IRQ_STATUS_IRQ_BITS) - 1)
+#define IRQ_DEBOUNCE_REG 0x218
+
#define IRQ_MEM_SIZE 0x20
#define IRQ_EDGE_RISING 0x00
@@ -109,7 +111,6 @@ struct sunxi_pinctrl_function {
struct sunxi_pinctrl_group {
const char *name;
- unsigned long config;
unsigned pin;
};
@@ -266,6 +267,11 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
return irq_num * IRQ_CTRL_IRQ_BITS;
}
+static inline u32 sunxi_irq_debounce_reg_from_bank(u8 bank, unsigned bank_base)
+{
+ return IRQ_DEBOUNCE_REG + (bank_base + bank) * IRQ_MEM_SIZE;
+}
+
static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
{
return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
diff --git a/drivers/pinctrl/vt8500/pinctrl-vt8500.c b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
index ca946b3dbdb4..767f340d6b11 100644
--- a/drivers/pinctrl/vt8500/pinctrl-vt8500.c
+++ b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
@@ -14,7 +14,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -473,11 +473,6 @@ static int vt8500_pinctrl_probe(struct platform_device *pdev)
return wmt_pinctrl_probe(pdev, data);
}
-static int vt8500_pinctrl_remove(struct platform_device *pdev)
-{
- return wmt_pinctrl_remove(pdev);
-}
-
static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "via,vt8500-pinctrl" },
{ /* sentinel */ },
@@ -485,16 +480,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = {
static struct platform_driver wmt_pinctrl_driver = {
.probe = vt8500_pinctrl_probe,
- .remove = vt8500_pinctrl_remove,
.driver = {
.name = "pinctrl-vt8500",
.of_match_table = wmt_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(wmt_pinctrl_driver);
-
-MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
-MODULE_DESCRIPTION("VIA VT8500 Pincontrol driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
+builtin_platform_driver(wmt_pinctrl_driver);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8505.c b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
index 626fc7ec0174..a56fdbd87e42 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8505.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
@@ -14,7 +14,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -504,11 +504,6 @@ static int wm8505_pinctrl_probe(struct platform_device *pdev)
return wmt_pinctrl_probe(pdev, data);
}
-static int wm8505_pinctrl_remove(struct platform_device *pdev)
-{
- return wmt_pinctrl_remove(pdev);
-}
-
static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8505-pinctrl" },
{ /* sentinel */ },
@@ -516,16 +511,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = {
static struct platform_driver wmt_pinctrl_driver = {
.probe = wm8505_pinctrl_probe,
- .remove = wm8505_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8505",
.of_match_table = wmt_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(wmt_pinctrl_driver);
-
-MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
-MODULE_DESCRIPTION("Wondermedia WM8505 Pincontrol driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
+builtin_platform_driver(wmt_pinctrl_driver);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8650.c b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
index 8953aba8bfc2..270dd491f5a1 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8650.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
@@ -14,7 +14,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -342,11 +342,6 @@ static int wm8650_pinctrl_probe(struct platform_device *pdev)
return wmt_pinctrl_probe(pdev, data);
}
-static int wm8650_pinctrl_remove(struct platform_device *pdev)
-{
- return wmt_pinctrl_remove(pdev);
-}
-
static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8650-pinctrl" },
{ /* sentinel */ },
@@ -354,16 +349,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = {
static struct platform_driver wmt_pinctrl_driver = {
.probe = wm8650_pinctrl_probe,
- .remove = wm8650_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8650",
.of_match_table = wmt_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(wmt_pinctrl_driver);
-
-MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
-MODULE_DESCRIPTION("Wondermedia WM8650 Pincontrol driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
+builtin_platform_driver(wmt_pinctrl_driver);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
index c79053d430db..74f7b3a18f3a 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8750.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
@@ -14,7 +14,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -381,11 +381,6 @@ static int wm8750_pinctrl_probe(struct platform_device *pdev)
return wmt_pinctrl_probe(pdev, data);
}
-static int wm8750_pinctrl_remove(struct platform_device *pdev)
-{
- return wmt_pinctrl_remove(pdev);
-}
-
static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8750-pinctrl" },
{ /* sentinel */ },
@@ -393,16 +388,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = {
static struct platform_driver wmt_pinctrl_driver = {
.probe = wm8750_pinctrl_probe,
- .remove = wm8750_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8750",
.of_match_table = wmt_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(wmt_pinctrl_driver);
-
-MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
-MODULE_DESCRIPTION("Wondermedia WM8750 Pincontrol driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
+builtin_platform_driver(wmt_pinctrl_driver);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8850.c b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
index f232b163c735..45792aa7a06e 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8850.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
@@ -14,7 +14,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -360,11 +360,6 @@ static int wm8850_pinctrl_probe(struct platform_device *pdev)
return wmt_pinctrl_probe(pdev, data);
}
-static int wm8850_pinctrl_remove(struct platform_device *pdev)
-{
- return wmt_pinctrl_remove(pdev);
-}
-
static const struct of_device_id wmt_pinctrl_of_match[] = {
{ .compatible = "wm,wm8850-pinctrl" },
{ /* sentinel */ },
@@ -372,16 +367,10 @@ static const struct of_device_id wmt_pinctrl_of_match[] = {
static struct platform_driver wmt_pinctrl_driver = {
.probe = wm8850_pinctrl_probe,
- .remove = wm8850_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8850",
.of_match_table = wmt_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(wmt_pinctrl_driver);
-
-MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
-MODULE_DESCRIPTION("Wondermedia WM8850 Pincontrol driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, wmt_pinctrl_of_match);
+builtin_platform_driver(wmt_pinctrl_driver);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index cbc638631678..270ca2a47a8c 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
@@ -608,12 +607,3 @@ fail_range:
gpiochip_remove(&data->gpio_chip);
return err;
}
-
-int wmt_pinctrl_remove(struct platform_device *pdev)
-{
- struct wmt_pinctrl_data *data = platform_get_drvdata(pdev);
-
- gpiochip_remove(&data->gpio_chip);
-
- return 0;
-}
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.h b/drivers/pinctrl/vt8500/pinctrl-wmt.h
index 41f5f2deb5d6..885613396fe7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.h
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.h
@@ -76,4 +76,3 @@ struct wmt_pinctrl_data {
int wmt_pinctrl_probe(struct platform_device *pdev,
struct wmt_pinctrl_data *data);
-int wmt_pinctrl_remove(struct platform_device *pdev);
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 8abd80dbcbed..47268ecedc4d 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -18,6 +18,7 @@
*/
#include <linux/fs.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -87,6 +88,41 @@ exit:
return ret;
}
+static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ if (ec->features[0] == -1U && ec->features[1] == -1U) {
+ /* features bitmap not read yet */
+
+ msg = kmalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 0;
+ msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
+ msg->insize = sizeof(ec->features);
+ msg->outsize = 0;
+
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+ dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
+ ret, msg->result);
+ memset(ec->features, 0, sizeof(ec->features));
+ }
+
+ memcpy(ec->features, msg->data, sizeof(ec->features));
+
+ dev_dbg(ec->dev, "EC features %08x %08x\n",
+ ec->features[0], ec->features[1]);
+
+ kfree(msg);
+ }
+
+ return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
+}
+
/* Device file ops */
static int ec_device_open(struct inode *inode, struct file *filp)
{
@@ -230,6 +266,123 @@ static void __remove(struct device *dev)
kfree(ec);
}
+static void cros_ec_sensors_register(struct cros_ec_dev *ec)
+{
+ /*
+ * Issue a command to get the number of sensor reported.
+ * Build an array of sensors driver and register them all.
+ */
+ int ret, i, id, sensor_num;
+ struct mfd_cell *sensor_cells;
+ struct cros_ec_sensor_platform *sensor_platforms;
+ int sensor_type[MOTIONSENSE_TYPE_MAX];
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+
+ msg = kzalloc(sizeof(struct cros_ec_command) +
+ max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
+ if (msg == NULL)
+ return;
+
+ msg->version = 2;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+
+ params = (struct ec_params_motion_sense *)msg->data;
+ params->cmd = MOTIONSENSE_CMD_DUMP;
+
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+ dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
+ ret, msg->result);
+ goto error;
+ }
+
+ resp = (struct ec_response_motion_sense *)msg->data;
+ sensor_num = resp->dump.sensor_count;
+ /* Allocate 2 extra sensors in case lid angle or FIFO are needed */
+ sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
+ GFP_KERNEL);
+ if (sensor_cells == NULL)
+ goto error;
+
+ sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) *
+ (sensor_num + 1), GFP_KERNEL);
+ if (sensor_platforms == NULL)
+ goto error_platforms;
+
+ memset(sensor_type, 0, sizeof(sensor_type));
+ id = 0;
+ for (i = 0; i < sensor_num; i++) {
+ params->cmd = MOTIONSENSE_CMD_INFO;
+ params->info.sensor_num = i;
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+ dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
+ i, ret, msg->result);
+ continue;
+ }
+ switch (resp->info.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ sensor_cells[id].name = "cros-ec-accel";
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ sensor_cells[id].name = "cros-ec-gyro";
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ sensor_cells[id].name = "cros-ec-mag";
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ sensor_cells[id].name = "cros-ec-prox";
+ break;
+ case MOTIONSENSE_TYPE_LIGHT:
+ sensor_cells[id].name = "cros-ec-light";
+ break;
+ case MOTIONSENSE_TYPE_ACTIVITY:
+ sensor_cells[id].name = "cros-ec-activity";
+ break;
+ default:
+ dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
+ continue;
+ }
+ sensor_platforms[id].sensor_num = i;
+ sensor_cells[id].id = sensor_type[resp->info.type];
+ sensor_cells[id].platform_data = &sensor_platforms[id];
+ sensor_cells[id].pdata_size =
+ sizeof(struct cros_ec_sensor_platform);
+
+ sensor_type[resp->info.type]++;
+ id++;
+ }
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
+ sensor_platforms[id].sensor_num = sensor_num;
+
+ sensor_cells[id].name = "cros-ec-angle";
+ sensor_cells[id].id = 0;
+ sensor_cells[id].platform_data = &sensor_platforms[id];
+ sensor_cells[id].pdata_size =
+ sizeof(struct cros_ec_sensor_platform);
+ id++;
+ }
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ sensor_cells[id].name = "cros-ec-ring";
+ id++;
+ }
+
+ ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(ec->dev, "failed to add EC sensors\n");
+
+ kfree(sensor_platforms);
+error_platforms:
+ kfree(sensor_cells);
+error:
+ kfree(msg);
+}
+
static int ec_device_probe(struct platform_device *pdev)
{
int retval = -ENOMEM;
@@ -245,6 +398,8 @@ static int ec_device_probe(struct platform_device *pdev)
ec->ec_dev = dev_get_drvdata(dev->parent);
ec->dev = dev;
ec->cmd_offset = ec_platform->cmd_offset;
+ ec->features[0] = -1U; /* Not cached yet */
+ ec->features[1] = -1U; /* Not cached yet */
device_initialize(&ec->class_dev);
cdev_init(&ec->cdev, &fops);
@@ -282,6 +437,10 @@ static int ec_device_probe(struct platform_device *pdev)
goto dev_reg_failed;
}
+ /* check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
+ cros_ec_sensors_register(ec);
+
return 0;
dev_reg_failed:
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 1aba2c74160e..2b21033f11f0 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -308,10 +308,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* returns a small amount, then there's no need to pin that
* much memory to the process.
*/
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
- &page, NULL);
- up_read(&current->mm->mmap_sem);
+ ret = get_user_pages_unlocked(address, 1, &page,
+ is_write ? 0 : FOLL_WRITE);
if (ret < 0)
break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b8a21d7b25d4..5fe8be089b8b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -363,6 +363,18 @@ config IDEAPAD_LAPTOP
This is a driver for Lenovo IdeaPad netbooks contains drivers for
rfkill switch, hotkey, fan control and backlight control.
+config SURFACE3_WMI
+ tristate "Surface 3 WMI Driver"
+ depends on ACPI_WMI
+ depends on DMI
+ depends on INPUT
+ depends on SPI
+ ---help---
+ Say Y here if you have a Surface 3.
+
+ To compile this driver as a module, choose M here: the module will
+ be called surface3-wmi.
+
config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
depends on ACPI
@@ -1005,12 +1017,27 @@ config INTEL_PMC_IPC
The PMC is an ARC processor which defines IPC commands for communication
with other entities in the CPU.
+config INTEL_BXTWC_PMIC_TMU
+ tristate "Intel BXT Whiskey Cove TMU Driver"
+ depends on REGMAP
+ depends on INTEL_SOC_PMIC && INTEL_PMC_IPC
+ ---help---
+ Select this driver to use Intel BXT Whiskey Cove PMIC TMU feature.
+ This driver enables the alarm wakeup functionality in the TMU unit
+ of Whiskey Cove PMIC.
+
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
depends on ACPI && INPUT
---help---
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
+config SURFACE_3_BUTTON
+ tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
+ depends on ACPI && KEYBOARD_GPIO
+ ---help---
+ This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
+
config INTEL_PUNIT_IPC
tristate "Intel P-Unit IPC Driver"
---help---
@@ -1027,4 +1054,26 @@ config INTEL_TELEMETRY
used to get various SoC events and parameters
directly via debugfs files. Various tools may use
this interface for SoC state monitoring.
+
+config MLX_PLATFORM
+ tristate "Mellanox Technologies platform support"
+ depends on X86_64
+ ---help---
+ This option enables system support for the Mellanox Technologies
+ platform. The Mellanox systems provide data center networking
+ solutions based on Virtual Protocol Interconnect (VPI) technology
+ enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
+ connection.
+
+ If you have a Mellanox system, say Y or M here.
+
+config MLX_CPLD_PLATFORM
+ tristate "Mellanox platform hotplug driver support"
+ default n
+ select HWMON
+ select I2C
+ ---help---
+ This driver handles hot-plug events for the power suppliers, power
+ cables and fans on the wide range Mellanox IB and Ethernet systems.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 2efa86d2a1a7..d4111f0f8a78 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_MSI_WMI) += msi-wmi.o
+obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# toshiba_acpi must link after wmi to ensure that wmi devices are found
@@ -66,8 +67,12 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
+obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o
obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
intel_telemetry_pltdrv.o \
intel_telemetry_debugfs.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
+obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
+obj-$(CONFIG_MLX_CPLD_PLATFORM) += mlxcpld-hotplug.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 79d64ea00bfb..a66192f692e3 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -355,6 +355,32 @@ static const struct dmi_system_id acer_blacklist[] __initconst = {
{}
};
+static const struct dmi_system_id amw0_whitelist[] __initconst = {
+ {
+ .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ {
+ .ident = "Gateway",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ },
+ },
+ {
+ .ident = "Packard Bell",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
+ },
+ },
+ {}
+};
+
+/*
+ * This quirk table is only for Acer/Gateway/Packard Bell family
+ * that those machines are supported by acer-wmi driver.
+ */
static const struct dmi_system_id acer_quirks[] __initconst = {
{
.callback = dmi_matched,
@@ -464,6 +490,17 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
.driver_data = &quirk_acer_travelmate_2490,
},
+ {}
+};
+
+/*
+ * This quirk list is for those non-acer machines that have AMW0_GUID1
+ * but supported by acer-wmi in past days. Keeping this quirk list here
+ * is only for backward compatible. Please do not add new machine to
+ * here anymore. Those non-acer machines should be supported by
+ * appropriate wmi drivers.
+ */
+static const struct dmi_system_id non_acer_quirks[] __initconst = {
{
.callback = dmi_matched,
.ident = "Fujitsu Siemens Amilo Li 1718",
@@ -598,6 +635,7 @@ static void __init find_quirks(void)
{
if (!force_series) {
dmi_check_system(acer_quirks);
+ dmi_check_system(non_acer_quirks);
} else if (force_series == 2490) {
quirks = &quirk_acer_travelmate_2490;
}
@@ -2108,6 +2146,24 @@ static int __init acer_wmi_init(void)
find_quirks();
/*
+ * The AMW0_GUID1 wmi is not only found on Acer family but also other
+ * machines like Lenovo, Fujitsu and Medion. In the past days,
+ * acer-wmi driver handled those non-Acer machines by quirks list.
+ * But actually acer-wmi driver was loaded on any machines that have
+ * AMW0_GUID1. This behavior is strange because those machines should
+ * be supported by appropriate wmi drivers. e.g. fujitsu-laptop,
+ * ideapad-laptop. So, here checks the machine that has AMW0_GUID1
+ * should be in Acer/Gateway/Packard Bell white list, or it's already
+ * in the past quirk list.
+ */
+ if (wmi_has_guid(AMW0_GUID1) &&
+ !dmi_check_system(amw0_whitelist) &&
+ quirks == &quirk_unknown) {
+ pr_err("Unsupported machine has AMW0_GUID1, unable to load\n");
+ return -ENODEV;
+ }
+
+ /*
* Detect which ACPI-WMI interface we're using.
*/
if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 26e4cbc34db8..5be4783e40d4 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -116,8 +116,13 @@ static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
};
+static struct quirk_entry quirk_asus_x550lb = {
+ .xusb2pr = 0x01D9,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
quirks = dmi->driver_data;
return 1;
}
@@ -175,6 +180,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X45U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X45U"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X456UA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -398,6 +412,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_ux303ub,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550LB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550LB"),
+ },
+ .driver_data = &quirk_asus_x550lb,
+ },
{},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ce6ca31a2d09..43cb680adbb4 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -156,6 +156,9 @@ MODULE_LICENSE("GPL");
#define ASUS_FAN_CTRL_MANUAL 1
#define ASUS_FAN_CTRL_AUTO 2
+#define USB_INTEL_XUSB2PR 0xD0
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+
struct bios_args {
u32 arg0;
u32 arg1;
@@ -1080,6 +1083,29 @@ exit:
return result;
}
+static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
+{
+ struct pci_dev *xhci_pdev;
+ u32 orig_ports_available;
+ u32 ports_available = asus->driver->quirks->xusb2pr;
+
+ xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI,
+ NULL);
+
+ if (!xhci_pdev)
+ return;
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &orig_ports_available);
+
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
+ orig_ports_available, ports_available);
+}
+
/*
* Hwmon device
*/
@@ -2087,6 +2113,9 @@ static int asus_wmi_add(struct platform_device *pdev)
if (asus->driver->quirks->wmi_backlight_native)
acpi_video_set_dmi_backlight_type(acpi_backlight_native);
+ if (asus->driver->quirks->xusb2pr)
+ asus_wmi_set_xusb2pr(asus);
+
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 0e19014e9f54..fdff626c3b51 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -53,6 +53,7 @@ struct quirk_entry {
* and let the ACPI interrupt to send out the key event.
*/
int no_display_toggle;
+ u32 xusb2pr;
bool (*i8042_filter)(unsigned char data, unsigned char str,
struct serio *serio);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2c2f02b2e08a..14392a01ab36 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -1904,38 +1904,40 @@ static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
return 0;
}
-static void kbd_led_level_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static int kbd_led_level_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
struct kbd_state state;
struct kbd_state new_state;
u16 num;
+ int ret;
if (kbd_get_max_level()) {
- if (kbd_get_state(&state))
- return;
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
new_state = state;
- if (kbd_set_level(&new_state, value))
- return;
- kbd_set_state_safe(&new_state, &state);
- return;
+ ret = kbd_set_level(&new_state, value);
+ if (ret)
+ return ret;
+ return kbd_set_state_safe(&new_state, &state);
}
if (kbd_get_valid_token_counts()) {
for (num = kbd_token_bits; num != 0 && value > 0; --value)
num &= num - 1; /* clear the first bit set */
if (num == 0)
- return;
- kbd_set_token_bit(ffs(num) - 1);
- return;
+ return 0;
+ return kbd_set_token_bit(ffs(num) - 1);
}
pr_warn("Keyboard brightness level control not supported\n");
+ return -ENXIO;
}
static struct led_classdev kbd_led = {
.name = "dell::kbd_backlight",
- .brightness_set = kbd_led_level_set,
+ .brightness_set_blocking = kbd_led_level_set,
.brightness_get = kbd_led_level_get,
.groups = kbd_led_groups,
};
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index da2fe18162e1..75e637047d36 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -114,7 +114,7 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
{ KE_IGNORE, 0xe00e, { KEY_RESERVED } },
/* Wifi Catcher */
- { KE_KEY, 0xe011, { KEY_PROG2 } },
+ { KE_KEY, 0xe011, { KEY_WLAN } },
/* Ambient light sensor toggle */
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
@@ -274,6 +274,16 @@ static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
/* Stealth mode toggle */
{ KE_IGNORE, 0x155, { KEY_RESERVED } },
+
+ /* Rugged magnetic dock attach/detach events */
+ { KE_IGNORE, 0x156, { KEY_RESERVED } },
+ { KE_IGNORE, 0x157, { KEY_RESERVED } },
+
+ /* Rugged programmable (P1/P2/P3 keys) */
+ { KE_KEY, 0x850, { KEY_PROG1 } },
+ { KE_KEY, 0x851, { KEY_PROG2 } },
+ { KE_KEY, 0x852, { KEY_PROG3 } },
+
};
/*
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a7614fc542b5..410741acb3c9 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -871,6 +871,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo ideapad Y700-15ACZ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ACZ"),
+ },
+ },
+ {
.ident = "Lenovo ideapad Y700-15ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 12dbb5063376..cb3ab2b212b1 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -69,7 +69,7 @@ static int intel_hid_set_enable(struct device *device, int enable)
arg0.integer.value = enable;
status = acpi_evaluate_object(ACPI_HANDLE(device), "HDSM", &args, NULL);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(device, "failed to %sable hotkeys\n",
enable ? "en" : "dis");
return -EIO;
@@ -148,7 +148,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
}
status = acpi_evaluate_integer(handle, "HDEM", NULL, &ev_index);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(&device->dev, "failed to get event index\n");
return;
}
@@ -167,7 +167,7 @@ static int intel_hid_probe(struct platform_device *device)
int err;
status = acpi_evaluate_integer(handle, "HDMM", NULL, &mode);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(&device->dev, "failed to read mode\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 04cf5dffdfd9..bbe4c06c769f 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -29,7 +29,7 @@ static int smartconnect_acpi_init(struct acpi_device *acpi)
acpi_status status;
status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return -EINVAL;
if (value & 0x1) {
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 78080763df51..554e82ebe83c 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -49,34 +49,19 @@ static int intel_vbtn_input_setup(struct platform_device *device)
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
int ret;
- priv->input_dev = input_allocate_device();
+ priv->input_dev = devm_input_allocate_device(&device->dev);
if (!priv->input_dev)
return -ENOMEM;
ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
if (ret)
- goto err_free_device;
+ return ret;
priv->input_dev->dev.parent = &device->dev;
priv->input_dev->name = "Intel Virtual Button driver";
priv->input_dev->id.bustype = BUS_HOST;
- ret = input_register_device(priv->input_dev);
- if (ret)
- goto err_free_device;
-
- return 0;
-
-err_free_device:
- input_free_device(priv->input_dev);
- return ret;
-}
-
-static void intel_vbtn_input_destroy(struct platform_device *device)
-{
- struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
-
- input_unregister_device(priv->input_dev);
+ return input_register_device(priv->input_dev);
}
static void notify_handler(acpi_handle handle, u32 event, void *context)
@@ -97,7 +82,7 @@ static int intel_vbtn_probe(struct platform_device *device)
int err;
status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
return -ENODEV;
}
@@ -117,24 +102,16 @@ static int intel_vbtn_probe(struct platform_device *device)
ACPI_DEVICE_NOTIFY,
notify_handler,
device);
- if (ACPI_FAILURE(status)) {
- err = -EBUSY;
- goto err_remove_input;
- }
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
return 0;
-
-err_remove_input:
- intel_vbtn_input_destroy(device);
-
- return err;
}
static int intel_vbtn_remove(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
- intel_vbtn_input_destroy(device);
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
/*
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c
new file mode 100644
index 000000000000..e202abd5b0df
--- /dev/null
+++ b/drivers/platform/x86/intel_bxtwc_tmu.c
@@ -0,0 +1,162 @@
+/*
+ * intel_bxtwc_tmu.c - Intel BXT Whiskey Cove PMIC TMU driver
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This driver adds TMU (Time Management Unit) support for Intel BXT platform.
+ * It enables the alarm wake-up functionality in the TMU unit of Whiskey Cove
+ * PMIC.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define BXTWC_TMUIRQ 0x4fb6
+#define BXTWC_MIRQLVL1 0x4e0e
+#define BXTWC_MTMUIRQ_REG 0x4fb7
+#define BXTWC_MIRQLVL1_MTMU BIT(1)
+#define BXTWC_TMU_WK_ALRM BIT(1)
+#define BXTWC_TMU_SYS_ALRM BIT(2)
+#define BXTWC_TMU_ALRM_MASK (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+#define BXTWC_TMU_ALRM_IRQ (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+
+struct wcove_tmu {
+ int irq;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data)
+{
+ struct wcove_tmu *wctmu = data;
+ unsigned int tmu_irq;
+
+ /* Read TMU interrupt reg */
+ regmap_read(wctmu->regmap, BXTWC_TMUIRQ, &tmu_irq);
+ if (tmu_irq & BXTWC_TMU_ALRM_IRQ) {
+ /* clear TMU irq */
+ regmap_write(wctmu->regmap, BXTWC_TMUIRQ, tmu_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int bxt_wcove_tmu_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct regmap_irq_chip_data *regmap_irq_chip;
+ struct wcove_tmu *wctmu;
+ int ret, virq, irq;
+
+ wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL);
+ if (!wctmu)
+ return -ENOMEM;
+
+ wctmu->dev = &pdev->dev;
+ wctmu->regmap = pmic->regmap;
+
+ irq = platform_get_irq(pdev, 0);
+
+ if (irq < 0) {
+ dev_err(&pdev->dev, "invalid irq %d\n", irq);
+ return irq;
+ }
+
+ regmap_irq_chip = pmic->irq_chip_data_tmu;
+ virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+ if (virq < 0) {
+ dev_err(&pdev->dev,
+ "failed to get virtual interrupt=%d\n", irq);
+ return virq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq,
+ NULL, bxt_wcove_tmu_irq_handler,
+ IRQF_ONESHOT, "bxt_wcove_tmu", wctmu);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n",
+ ret, virq);
+ return ret;
+ }
+ wctmu->irq = virq;
+
+ /* Enable TMU interrupts */
+ regmap_update_bits(wctmu->regmap, BXTWC_MIRQLVL1,
+ BXTWC_MIRQLVL1_MTMU, 0);
+
+ /* Unmask TMU second level Wake & System alarm */
+ regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ BXTWC_TMU_ALRM_MASK, 0);
+
+ platform_set_drvdata(pdev, wctmu);
+ return 0;
+}
+
+static int bxt_wcove_tmu_remove(struct platform_device *pdev)
+{
+ struct wcove_tmu *wctmu = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ /* Mask TMU interrupts */
+ regmap_read(wctmu->regmap, BXTWC_MIRQLVL1, &val);
+ regmap_write(wctmu->regmap, BXTWC_MIRQLVL1,
+ val | BXTWC_MIRQLVL1_MTMU);
+ regmap_read(wctmu->regmap, BXTWC_MTMUIRQ_REG, &val);
+ regmap_write(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ val | BXTWC_TMU_ALRM_MASK);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bxtwc_tmu_suspend(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ enable_irq_wake(wctmu->irq);
+ return 0;
+}
+
+static int bxtwc_tmu_resume(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ disable_irq_wake(wctmu->irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bxtwc_tmu_pm_ops, bxtwc_tmu_suspend, bxtwc_tmu_resume);
+
+static const struct platform_device_id bxt_wcove_tmu_id_table[] = {
+ { .name = "bxt_wcove_tmu" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, bxt_wcove_tmu_id_table);
+
+static struct platform_driver bxt_wcove_tmu_driver = {
+ .probe = bxt_wcove_tmu_probe,
+ .remove = bxt_wcove_tmu_remove,
+ .driver = {
+ .name = "bxt_wcove_tmu",
+ .pm = &bxtwc_tmu_pm_ops,
+ },
+ .id_table = bxt_wcove_tmu_id_table,
+};
+
+module_platform_driver(bxt_wcove_tmu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nilesh Bacchewar <nilesh.bacchewar@intel.com>");
+MODULE_DESCRIPTION("BXT Whiskey Cove TMU Driver");
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 9f713b832ba3..0df3c9d37509 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -415,6 +415,7 @@ static struct thermal_device_info *initialize_sensor(int index)
return td_info;
}
+#ifdef CONFIG_PM_SLEEP
/**
* mid_thermal_resume - resume routine
* @dev: device structure
@@ -442,6 +443,7 @@ static int mid_thermal_suspend(struct device *dev)
*/
return configure_adc(0);
}
+#endif
static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
mid_thermal_suspend, mid_thermal_resume);
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index e8b1b836ca2d..b130b8c9b9d7 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -19,10 +19,12 @@
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/pci.h>
+#include <linux/uaccess.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -32,16 +34,106 @@
static struct pmc_dev pmc;
+static const struct pmc_bit_map spt_pll_map[] = {
+ {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
+ {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
+ {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
+ {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
+ {},
+};
+
+static const struct pmc_bit_map spt_mphy_map[] = {
+ {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
+ {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
+ {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
+ {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3},
+ {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4},
+ {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5},
+ {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6},
+ {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7},
+ {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8},
+ {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9},
+ {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10},
+ {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11},
+ {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12},
+ {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
+ {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
+ {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
+ {},
+};
+
+static const struct pmc_bit_map spt_pfear_map[] = {
+ {"PMC", SPT_PMC_BIT_PMC},
+ {"OPI-DMI", SPT_PMC_BIT_OPI},
+ {"SPI / eSPI", SPT_PMC_BIT_SPI},
+ {"XHCI", SPT_PMC_BIT_XHCI},
+ {"SPA", SPT_PMC_BIT_SPA},
+ {"SPB", SPT_PMC_BIT_SPB},
+ {"SPC", SPT_PMC_BIT_SPC},
+ {"GBE", SPT_PMC_BIT_GBE},
+ {"SATA", SPT_PMC_BIT_SATA},
+ {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0},
+ {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1},
+ {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2},
+ {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3},
+ {"RSVD", SPT_PMC_BIT_RSVD_0B},
+ {"LPSS", SPT_PMC_BIT_LPSS},
+ {"LPC", SPT_PMC_BIT_LPC},
+ {"SMB", SPT_PMC_BIT_SMB},
+ {"ISH", SPT_PMC_BIT_ISH},
+ {"P2SB", SPT_PMC_BIT_P2SB},
+ {"DFX", SPT_PMC_BIT_DFX},
+ {"SCC", SPT_PMC_BIT_SCC},
+ {"RSVD", SPT_PMC_BIT_RSVD_0C},
+ {"FUSE", SPT_PMC_BIT_FUSE},
+ {"CAMERA", SPT_PMC_BIT_CAMREA},
+ {"RSVD", SPT_PMC_BIT_RSVD_0D},
+ {"USB3-OTG", SPT_PMC_BIT_USB3_OTG},
+ {"EXI", SPT_PMC_BIT_EXI},
+ {"CSE", SPT_PMC_BIT_CSE},
+ {"CSME_KVM", SPT_PMC_BIT_CSME_KVM},
+ {"CSME_PMT", SPT_PMC_BIT_CSME_PMT},
+ {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK},
+ {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO},
+ {"CSME_USBR", SPT_PMC_BIT_CSME_USBR},
+ {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM},
+ {"CSME_SMT", SPT_PMC_BIT_CSME_SMT},
+ {"RSVD", SPT_PMC_BIT_RSVD_1A},
+ {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2},
+ {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
+ {"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
+ {"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
+ {},
+};
+
+static const struct pmc_reg_map spt_reg_map = {
+ .pfear_sts = spt_pfear_map,
+ .mphy_sts = spt_mphy_map,
+ .pll_sts = spt_pll_map,
+};
+
static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID),
+ (kernel_ulong_t)&spt_reg_map },
{ 0, },
};
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
}
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
+ reg_offset, u32 val)
+{
+ writel(val, pmcdev->regbase + reg_offset);
+}
+
static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
{
return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
@@ -90,6 +182,245 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
+static int pmc_core_check_read_lock_bit(void)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_CFG_OFFSET);
+ return test_bit(SPT_PMC_READ_DISABLE_BIT,
+ (unsigned long *)&value);
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static void pmc_core_display_map(struct seq_file *s, int index,
+ u8 pf_reg, const struct pmc_bit_map *pf_map)
+{
+ seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
+ index, pf_map[index].name,
+ pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+}
+
+static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+ u8 pf_regs[NUM_ENTRIES];
+ int index, iter;
+
+ iter = SPT_PMC_XRAM_PPFEAR0A;
+
+ for (index = 0; index < NUM_ENTRIES; index++, iter++)
+ pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
+
+ for (index = 0; map[index].name; index++)
+ pmc_core_display_map(s, index, pf_regs[index / 8], map);
+
+ return 0;
+}
+
+static int pmc_core_ppfear_sts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_ppfear_sts_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ppfear_ops = {
+ .open = pmc_core_ppfear_sts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* This function should return link status, 0 means ready */
+static int pmc_core_mtpmc_link_status(void)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
+ return test_bit(SPT_PMC_MSG_FULL_STS_BIT,
+ (unsigned long *)&value);
+}
+
+static int pmc_core_send_msg(u32 *addr_xram)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 dest;
+ int timeout;
+
+ for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
+ if (pmc_core_mtpmc_link_status() == 0)
+ break;
+ msleep(5);
+ }
+
+ if (timeout <= 0 && pmc_core_mtpmc_link_status())
+ return -EBUSY;
+
+ dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
+ pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
+ return 0;
+}
+
+static int pmc_core_mphy_pg_sts_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
+ u32 mphy_core_reg_low, mphy_core_reg_high;
+ u32 val_low, val_high;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
+ mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
+
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; map[index].name && index < 8; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_low ? "Not power gated" :
+ "Power gated");
+ }
+
+ for (index = 8; map[index].name; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_high ? "Not power gated" :
+ "Power gated");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+
+static int pmc_core_mphy_pg_sts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_mphy_pg_sts_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_mphy_pg_ops = {
+ .open = pmc_core_mphy_pg_sts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmc_core_pll_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->pll_sts;
+ u32 mphy_common_reg, val;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(&mphy_common_reg) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
+ msleep(10);
+ val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; map[index].name ; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val ? "Active" : "Idle");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+
+static int pmc_core_pll_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_pll_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_pll_ops = {
+ .open = pmc_core_pll_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
+*userbuf, size_t count, loff_t *ppos)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 val, buf_size, fd;
+ int err = 0;
+
+ buf_size = count < 64 ? count : 64;
+ mutex_lock(&pmcdev->lock);
+
+ if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ if (val > NUM_IP_IGN_ALLOWED) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ fd = pmc_core_reg_read(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET);
+ fd |= (1U << val);
+ pmc_core_reg_write(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET, fd);
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err == 0 ? count : err;
+}
+
+static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ltr_ignore_ops = {
+ .open = pmc_core_ltr_ignore_open,
+ .read = seq_read,
+ .write = pmc_core_ltr_ignore_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -106,20 +437,59 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
pmcdev->dbgfs_dir = dir;
file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
dir, pmcdev, &pmc_core_dev_state);
+ if (!file)
+ goto err;
- if (!file) {
- pmc_core_dbgfs_unregister(pmcdev);
- return -ENODEV;
- }
+ file = debugfs_create_file("pch_ip_power_gating_status",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_ppfear_ops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("mphy_core_lanes_power_gating_status",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_mphy_pg_ops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("pll_status",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_pll_ops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("ltr_ignore",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_ltr_ignore_ops);
+
+ if (!file)
+ goto err;
return 0;
+err:
+ pmc_core_dbgfs_unregister(pmcdev);
+ return -ENODEV;
}
+#else
+static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ return 0;
+}
+
+static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
(kernel_ulong_t)NULL},
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
(kernel_ulong_t)NULL},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_MOBILE, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_DESKTOP, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
{}
};
@@ -128,6 +498,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct device *ptr_dev = &dev->dev;
struct pmc_dev *pmcdev = &pmc;
const struct x86_cpu_id *cpu_id;
+ const struct pmc_reg_map *map = (struct pmc_reg_map *)id->driver_data;
int err;
cpu_id = x86_match_cpu(intel_pmc_core_ids);
@@ -149,6 +520,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
return err;
}
+ pmcdev->base_addr &= PMC_BASE_ADDR_MASK;
dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
@@ -159,6 +531,10 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
}
+ mutex_init(&pmcdev->lock);
+ pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+ pmcdev->map = map;
+
err = pmc_core_dbgfs_register(pmcdev);
if (err < 0)
dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n");
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index e3f671f4d122..5a48e7728479 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -26,8 +26,111 @@
#define SPT_PMC_BASE_ADDR_OFFSET 0x48
#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
-#define SPT_PMC_MMIO_REG_LEN 0x100
+#define SPT_PMC_PM_CFG_OFFSET 0x18
+#define SPT_PMC_PM_STS_OFFSET 0x1c
+#define SPT_PMC_MTPMC_OFFSET 0x20
+#define SPT_PMC_MFPMC_OFFSET 0x38
+#define SPT_PMC_LTR_IGNORE_OFFSET 0x30C
+#define SPT_PMC_MPHY_CORE_STS_0 0x1143
+#define SPT_PMC_MPHY_CORE_STS_1 0x1142
+#define SPT_PMC_MPHY_COM_STS_0 0x1155
+#define SPT_PMC_MMIO_REG_LEN 0x1000
#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+#define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
+#define MTPMC_MASK 0xffff0000
+#define NUM_ENTRIES 5
+#define SPT_PMC_READ_DISABLE_BIT 0x16
+#define SPT_PMC_MSG_FULL_STS_BIT 0x18
+#define NUM_RETRIES 100
+#define NUM_IP_IGN_ALLOWED 17
+
+/* Sunrise Point: PGD PFET Enable Ack Status Registers */
+enum ppfear_regs {
+ SPT_PMC_XRAM_PPFEAR0A = 0x590,
+ SPT_PMC_XRAM_PPFEAR0B,
+ SPT_PMC_XRAM_PPFEAR0C,
+ SPT_PMC_XRAM_PPFEAR0D,
+ SPT_PMC_XRAM_PPFEAR1A,
+};
+
+#define SPT_PMC_BIT_PMC BIT(0)
+#define SPT_PMC_BIT_OPI BIT(1)
+#define SPT_PMC_BIT_SPI BIT(2)
+#define SPT_PMC_BIT_XHCI BIT(3)
+#define SPT_PMC_BIT_SPA BIT(4)
+#define SPT_PMC_BIT_SPB BIT(5)
+#define SPT_PMC_BIT_SPC BIT(6)
+#define SPT_PMC_BIT_GBE BIT(7)
+
+#define SPT_PMC_BIT_SATA BIT(0)
+#define SPT_PMC_BIT_HDA_PGD0 BIT(1)
+#define SPT_PMC_BIT_HDA_PGD1 BIT(2)
+#define SPT_PMC_BIT_HDA_PGD2 BIT(3)
+#define SPT_PMC_BIT_HDA_PGD3 BIT(4)
+#define SPT_PMC_BIT_RSVD_0B BIT(5)
+#define SPT_PMC_BIT_LPSS BIT(6)
+#define SPT_PMC_BIT_LPC BIT(7)
+
+#define SPT_PMC_BIT_SMB BIT(0)
+#define SPT_PMC_BIT_ISH BIT(1)
+#define SPT_PMC_BIT_P2SB BIT(2)
+#define SPT_PMC_BIT_DFX BIT(3)
+#define SPT_PMC_BIT_SCC BIT(4)
+#define SPT_PMC_BIT_RSVD_0C BIT(5)
+#define SPT_PMC_BIT_FUSE BIT(6)
+#define SPT_PMC_BIT_CAMREA BIT(7)
+
+#define SPT_PMC_BIT_RSVD_0D BIT(0)
+#define SPT_PMC_BIT_USB3_OTG BIT(1)
+#define SPT_PMC_BIT_EXI BIT(2)
+#define SPT_PMC_BIT_CSE BIT(3)
+#define SPT_PMC_BIT_CSME_KVM BIT(4)
+#define SPT_PMC_BIT_CSME_PMT BIT(5)
+#define SPT_PMC_BIT_CSME_CLINK BIT(6)
+#define SPT_PMC_BIT_CSME_PTIO BIT(7)
+
+#define SPT_PMC_BIT_CSME_USBR BIT(0)
+#define SPT_PMC_BIT_CSME_SUSRAM BIT(1)
+#define SPT_PMC_BIT_CSME_SMT BIT(2)
+#define SPT_PMC_BIT_RSVD_1A BIT(3)
+#define SPT_PMC_BIT_CSME_SMS2 BIT(4)
+#define SPT_PMC_BIT_CSME_SMS1 BIT(5)
+#define SPT_PMC_BIT_CSME_RTC BIT(6)
+#define SPT_PMC_BIT_CSME_PSF BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE3 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE4 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE5 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE6 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE7 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE8 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE9 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE10 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE11 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE12 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE13 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE14 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE15 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_CMN_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_CMN_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct pmc_reg_map {
+ const struct pmc_bit_map *pfear_sts;
+ const struct pmc_bit_map *mphy_sts;
+ const struct pmc_bit_map *pll_sts;
+};
/**
* struct pmc_dev - pmc device structure
@@ -43,8 +146,13 @@
struct pmc_dev {
u32 base_addr;
void __iomem *regbase;
+ const struct pmc_reg_map *map;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
bool has_slp_s0_res;
+ int pmc_xram_read_bit;
+ struct mutex lock; /* generic mutex lock for PMC Core */
};
#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
new file mode 100644
index 000000000000..97b4c3a219c0
--- /dev/null
+++ b/drivers/platform/x86/mlx-platform.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-mux-reg.h>
+#include <linux/platform_data/mlxcpld-hotplug.h>
+
+#define MLX_PLAT_DEVICE_NAME "mlxplat"
+
+/* LPC bus IO offsets */
+#define MLXPLAT_CPLD_LPC_I2C_BASE_ADRR 0x2000
+#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500
+#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
+#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
+#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
+#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH1_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+
+/* Start channel numbers */
+#define MLXPLAT_CPLD_CH1 2
+#define MLXPLAT_CPLD_CH2 10
+
+/* Number of LPC attached MUX platform devices */
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
+
+/* mlxplat_priv - platform private data
+ * @pdev_i2c - i2c controller platform device
+ * @pdev_mux - array of mux platform devices
+ */
+struct mlxplat_priv {
+ struct platform_device *pdev_i2c;
+ struct platform_device *pdev_mux[MLXPLAT_CPLD_LPC_MUX_DEVS];
+ struct platform_device *pdev_hotplug;
+};
+
+/* Regions for LPC I2C controller and LPC base register space */
+static const struct resource mlxplat_lpc_resources[] = {
+ [0] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_I2C_BASE_ADRR,
+ MLXPLAT_CPLD_LPC_IO_RANGE,
+ "mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO),
+ [1] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_REG_BASE_ADRR,
+ MLXPLAT_CPLD_LPC_IO_RANGE,
+ "mlxplat_cpld_lpc_regs",
+ IORESOURCE_IO),
+};
+
+/* Platform default channels */
+static const int mlxplat_default_channels[][8] = {
+ {
+ MLXPLAT_CPLD_CH1, MLXPLAT_CPLD_CH1 + 1, MLXPLAT_CPLD_CH1 + 2,
+ MLXPLAT_CPLD_CH1 + 3, MLXPLAT_CPLD_CH1 + 4, MLXPLAT_CPLD_CH1 +
+ 5, MLXPLAT_CPLD_CH1 + 6, MLXPLAT_CPLD_CH1 + 7
+ },
+ {
+ MLXPLAT_CPLD_CH2, MLXPLAT_CPLD_CH2 + 1, MLXPLAT_CPLD_CH2 + 2,
+ MLXPLAT_CPLD_CH2 + 3, MLXPLAT_CPLD_CH2 + 4, MLXPLAT_CPLD_CH2 +
+ 5, MLXPLAT_CPLD_CH2 + 6, MLXPLAT_CPLD_CH2 + 7
+ },
+};
+
+/* Platform channels for MSN21xx system family */
+static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+
+/* Platform mux data */
+static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
+/* Platform hotplug devices */
+static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_psu[] = {
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c02", 0x51) },
+ .bus = 10,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c02", 0x50) },
+ .bus = 10,
+ },
+};
+
+static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_pwr[] = {
+ {
+ .brdinfo = { I2C_BOARD_INFO("dps460", 0x59) },
+ .bus = 10,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("dps460", 0x58) },
+ .bus = 10,
+ },
+};
+
+static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_fan[] = {
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 11,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 12,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 13,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 14,
+ },
+};
+
+/* Platform hotplug default data */
+static
+struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_default_data = {
+ .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a),
+ .top_aggr_mask = 0x48,
+ .top_aggr_psu_mask = 0x08,
+ .psu_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x58),
+ .psu_mask = 0x03,
+ .psu_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_psu),
+ .psu = mlxplat_mlxcpld_hotplug_psu,
+ .top_aggr_pwr_mask = 0x08,
+ .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64),
+ .pwr_mask = 0x03,
+ .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr),
+ .pwr = mlxplat_mlxcpld_hotplug_pwr,
+ .top_aggr_fan_mask = 0x40,
+ .fan_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x88),
+ .fan_mask = 0x0f,
+ .fan_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_fan),
+ .fan = mlxplat_mlxcpld_hotplug_fan,
+};
+
+/* Platform hotplug MSN21xx system family data */
+static
+struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_msn21xx_data = {
+ .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a),
+ .top_aggr_mask = 0x04,
+ .top_aggr_pwr_mask = 0x04,
+ .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64),
+ .pwr_mask = 0x03,
+ .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr),
+};
+
+static struct resource mlxplat_mlxcpld_hotplug_resources[] = {
+ [0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"),
+};
+
+struct platform_device *mlxplat_dev;
+struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
+
+static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_mux_data[i].values = mlxplat_default_channels[i];
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_default_channels[i]);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_default_data;
+
+ return 1;
+};
+
+static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_msn21xx_data;
+
+ return 1;
+};
+
+static struct dmi_system_id mlxplat_dmi_table[] __initdata = {
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN24"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN27"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSB"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSX"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn21xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN21"),
+ },
+ },
+ { }
+};
+
+static int __init mlxplat_init(void)
+{
+ struct mlxplat_priv *priv;
+ int i, err;
+
+ if (!dmi_check_system(mlxplat_dmi_table))
+ return -ENODEV;
+
+ mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, -1,
+ mlxplat_lpc_resources,
+ ARRAY_SIZE(mlxplat_lpc_resources));
+
+ if (IS_ERR(mlxplat_dev))
+ return PTR_ERR(mlxplat_dev);
+
+ priv = devm_kzalloc(&mlxplat_dev->dev, sizeof(struct mlxplat_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+ platform_set_drvdata(mlxplat_dev, priv);
+
+ priv->pdev_i2c = platform_device_register_simple("i2c_mlxcpld", -1,
+ NULL, 0);
+ if (IS_ERR(priv->pdev_i2c)) {
+ err = PTR_ERR(priv->pdev_i2c);
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ priv->pdev_mux[i] = platform_device_register_resndata(
+ &mlxplat_dev->dev,
+ "i2c-mux-reg", i, NULL,
+ 0, &mlxplat_mux_data[i],
+ sizeof(mlxplat_mux_data[i]));
+ if (IS_ERR(priv->pdev_mux[i])) {
+ err = PTR_ERR(priv->pdev_mux[i]);
+ goto fail_platform_mux_register;
+ }
+ }
+
+ priv->pdev_hotplug = platform_device_register_resndata(
+ &mlxplat_dev->dev, "mlxcpld-hotplug", -1,
+ mlxplat_mlxcpld_hotplug_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_hotplug_resources),
+ mlxplat_hotplug, sizeof(*mlxplat_hotplug));
+ if (IS_ERR(priv->pdev_hotplug)) {
+ err = PTR_ERR(priv->pdev_hotplug);
+ goto fail_platform_mux_register;
+ }
+
+ return 0;
+
+fail_platform_mux_register:
+ for (i--; i > 0 ; i--)
+ platform_device_unregister(priv->pdev_mux[i]);
+ platform_device_unregister(priv->pdev_i2c);
+fail_alloc:
+ platform_device_unregister(mlxplat_dev);
+
+ return err;
+}
+module_init(mlxplat_init);
+
+static void __exit mlxplat_exit(void)
+{
+ struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+ int i;
+
+ platform_device_unregister(priv->pdev_hotplug);
+
+ for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+ platform_device_unregister(priv->pdev_mux[i]);
+
+ platform_device_unregister(priv->pdev_i2c);
+ platform_device_unregister(mlxplat_dev);
+}
+module_exit(mlxplat_exit);
+
+MODULE_AUTHOR("Vadim Pasternak (vadimp@mellanox.com)");
+MODULE_DESCRIPTION("Mellanox platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSN24*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSN27*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSB*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSX*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSN21*:");
diff --git a/drivers/platform/x86/mlxcpld-hotplug.c b/drivers/platform/x86/mlxcpld-hotplug.c
new file mode 100644
index 000000000000..aff3686b3b37
--- /dev/null
+++ b/drivers/platform/x86/mlxcpld-hotplug.c
@@ -0,0 +1,515 @@
+/*
+ * drivers/platform/x86/mlxcpld-hotplug.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxcpld-hotplug.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Offset of event and mask registers from status register */
+#define MLXCPLD_HOTPLUG_EVENT_OFF 1
+#define MLXCPLD_HOTPLUG_MASK_OFF 2
+#define MLXCPLD_HOTPLUG_AGGR_MASK_OFF 1
+
+#define MLXCPLD_HOTPLUG_ATTRS_NUM 8
+
+/**
+ * enum mlxcpld_hotplug_attr_type - sysfs attributes for hotplug events:
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_PSU: power supply unit attribute;
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_PWR: power cable attribute;
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_FAN: FAN drawer attribute;
+ */
+enum mlxcpld_hotplug_attr_type {
+ MLXCPLD_HOTPLUG_ATTR_TYPE_PSU,
+ MLXCPLD_HOTPLUG_ATTR_TYPE_PWR,
+ MLXCPLD_HOTPLUG_ATTR_TYPE_FAN,
+};
+
+/**
+ * struct mlxcpld_hotplug_priv_data - platform private data:
+ * @irq: platform interrupt number;
+ * @pdev: platform device;
+ * @plat: platform data;
+ * @hwmon: hwmon device;
+ * @mlxcpld_hotplug_attr: sysfs attributes array;
+ * @mlxcpld_hotplug_dev_attr: sysfs sensor device attribute array;
+ * @group: sysfs attribute group;
+ * @groups: list of sysfs attribute group for hwmon registration;
+ * @dwork: delayed work template;
+ * @lock: spin lock;
+ * @aggr_cache: last value of aggregation register status;
+ * @psu_cache: last value of PSU register status;
+ * @pwr_cache: last value of power register status;
+ * @fan_cache: last value of FAN register status;
+ */
+struct mlxcpld_hotplug_priv_data {
+ int irq;
+ struct platform_device *pdev;
+ struct mlxcpld_hotplug_platform_data *plat;
+ struct device *hwmon;
+ struct attribute *mlxcpld_hotplug_attr[MLXCPLD_HOTPLUG_ATTRS_NUM + 1];
+ struct sensor_device_attribute_2
+ mlxcpld_hotplug_dev_attr[MLXCPLD_HOTPLUG_ATTRS_NUM];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct delayed_work dwork;
+ spinlock_t lock;
+ u8 aggr_cache;
+ u8 psu_cache;
+ u8 pwr_cache;
+ u8 fan_cache;
+};
+
+static ssize_t mlxcpld_hotplug_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev);
+ int index = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ u8 reg_val = 0;
+
+ switch (nr) {
+ case MLXCPLD_HOTPLUG_ATTR_TYPE_PSU:
+ /* Bit = 0 : PSU is present. */
+ reg_val = !!!(inb(priv->plat->psu_reg_offset) & BIT(index));
+ break;
+
+ case MLXCPLD_HOTPLUG_ATTR_TYPE_PWR:
+ /* Bit = 1 : power cable is attached. */
+ reg_val = !!(inb(priv->plat->pwr_reg_offset) & BIT(index %
+ priv->plat->pwr_count));
+ break;
+
+ case MLXCPLD_HOTPLUG_ATTR_TYPE_FAN:
+ /* Bit = 0 : FAN is present. */
+ reg_val = !!!(inb(priv->plat->fan_reg_offset) & BIT(index %
+ priv->plat->fan_count));
+ break;
+ }
+
+ return sprintf(buf, "%u\n", reg_val);
+}
+
+#define PRIV_ATTR(i) priv->mlxcpld_hotplug_attr[i]
+#define PRIV_DEV_ATTR(i) priv->mlxcpld_hotplug_dev_attr[i]
+static int mlxcpld_hotplug_attr_init(struct mlxcpld_hotplug_priv_data *priv)
+{
+ int num_attrs = priv->plat->psu_count + priv->plat->pwr_count +
+ priv->plat->fan_count;
+ int i;
+
+ priv->group.attrs = devm_kzalloc(&priv->pdev->dev, num_attrs *
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!priv->group.attrs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_attrs; i++) {
+ PRIV_ATTR(i) = &PRIV_DEV_ATTR(i).dev_attr.attr;
+
+ if (i < priv->plat->psu_count) {
+ PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+ GFP_KERNEL, "psu%u", i + 1);
+ PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PSU;
+ } else if (i < priv->plat->psu_count + priv->plat->pwr_count) {
+ PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+ GFP_KERNEL, "pwr%u", i %
+ priv->plat->pwr_count + 1);
+ PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PWR;
+ } else {
+ PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+ GFP_KERNEL, "fan%u", i %
+ priv->plat->fan_count + 1);
+ PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_FAN;
+ }
+
+ if (!PRIV_ATTR(i)->name) {
+ dev_err(&priv->pdev->dev, "Memory allocation failed for sysfs attribute %d.\n",
+ i + 1);
+ return -ENOMEM;
+ }
+
+ PRIV_DEV_ATTR(i).dev_attr.attr.name = PRIV_ATTR(i)->name;
+ PRIV_DEV_ATTR(i).dev_attr.attr.mode = S_IRUGO;
+ PRIV_DEV_ATTR(i).dev_attr.show = mlxcpld_hotplug_attr_show;
+ PRIV_DEV_ATTR(i).index = i;
+ sysfs_attr_init(&PRIV_DEV_ATTR(i).dev_attr.attr);
+ }
+
+ priv->group.attrs = priv->mlxcpld_hotplug_attr;
+ priv->groups[0] = &priv->group;
+ priv->groups[1] = NULL;
+
+ return 0;
+}
+
+static int mlxcpld_hotplug_device_create(struct device *dev,
+ struct mlxcpld_hotplug_device *item)
+{
+ item->adapter = i2c_get_adapter(item->bus);
+ if (!item->adapter) {
+ dev_err(dev, "Failed to get adapter for bus %d\n",
+ item->bus);
+ return -EFAULT;
+ }
+
+ item->client = i2c_new_device(item->adapter, &item->brdinfo);
+ if (!item->client) {
+ dev_err(dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ item->brdinfo.type, item->bus, item->brdinfo.addr);
+ i2c_put_adapter(item->adapter);
+ item->adapter = NULL;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void mlxcpld_hotplug_device_destroy(struct mlxcpld_hotplug_device *item)
+{
+ if (item->client) {
+ i2c_unregister_device(item->client);
+ item->client = NULL;
+ }
+
+ if (item->adapter) {
+ i2c_put_adapter(item->adapter);
+ item->adapter = NULL;
+ }
+}
+
+static inline void
+mlxcpld_hotplug_work_helper(struct device *dev,
+ struct mlxcpld_hotplug_device *item, u8 is_inverse,
+ u16 offset, u8 mask, u8 *cache)
+{
+ u8 val, asserted;
+ int bit;
+
+ /* Mask event. */
+ outb(0, offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Read status. */
+ val = inb(offset) & mask;
+ asserted = *cache ^ val;
+ *cache = val;
+
+ /*
+ * Validate if item related to received signal type is valid.
+ * It should never happen, excepted the situation when some
+ * piece of hardware is broken. In such situation just produce
+ * error message and return. Caller must continue to handle the
+ * signals from other devices if any.
+ */
+ if (unlikely(!item)) {
+ dev_err(dev, "False signal is received: register at offset 0x%02x, mask 0x%02x.\n",
+ offset, mask);
+ return;
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
+ if (val & BIT(bit)) {
+ if (is_inverse)
+ mlxcpld_hotplug_device_destroy(item + bit);
+ else
+ mlxcpld_hotplug_device_create(dev, item + bit);
+ } else {
+ if (is_inverse)
+ mlxcpld_hotplug_device_create(dev, item + bit);
+ else
+ mlxcpld_hotplug_device_destroy(item + bit);
+ }
+ }
+
+ /* Acknowledge event. */
+ outb(0, offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Unmask event. */
+ outb(mask, offset + MLXCPLD_HOTPLUG_MASK_OFF);
+}
+
+/*
+ * mlxcpld_hotplug_work_handler - performs traversing of CPLD interrupt
+ * registers according to the below hierarchy schema:
+ *
+ * Aggregation registers (status/mask)
+ * PSU registers: *---*
+ * *-----------------* | |
+ * |status/event/mask|----->| * |
+ * *-----------------* | |
+ * Power registers: | |
+ * *-----------------* | |
+ * |status/event/mask|----->| * |---> CPU
+ * *-----------------* | |
+ * FAN registers:
+ * *-----------------* | |
+ * |status/event/mask|----->| * |
+ * *-----------------* | |
+ * *---*
+ * In case some system changed are detected: FAN in/out, PSU in/out, power
+ * cable attached/detached, relevant device is created or destroyed.
+ */
+static void mlxcpld_hotplug_work_handler(struct work_struct *work)
+{
+ struct mlxcpld_hotplug_priv_data *priv = container_of(work,
+ struct mlxcpld_hotplug_priv_data, dwork.work);
+ u8 val, aggr_asserted;
+ unsigned long flags;
+
+ /* Mask aggregation event. */
+ outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+ /* Read aggregation status. */
+ val = inb(priv->plat->top_aggr_offset) & priv->plat->top_aggr_mask;
+ aggr_asserted = priv->aggr_cache ^ val;
+ priv->aggr_cache = val;
+
+ /* Handle PSU configuration changes. */
+ if (aggr_asserted & priv->plat->top_aggr_psu_mask)
+ mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->psu,
+ 1, priv->plat->psu_reg_offset,
+ priv->plat->psu_mask,
+ &priv->psu_cache);
+
+ /* Handle power cable configuration changes. */
+ if (aggr_asserted & priv->plat->top_aggr_pwr_mask)
+ mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->pwr,
+ 0, priv->plat->pwr_reg_offset,
+ priv->plat->pwr_mask,
+ &priv->pwr_cache);
+
+ /* Handle FAN configuration changes. */
+ if (aggr_asserted & priv->plat->top_aggr_fan_mask)
+ mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->fan,
+ 1, priv->plat->fan_reg_offset,
+ priv->plat->fan_mask,
+ &priv->fan_cache);
+
+ if (aggr_asserted) {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /*
+ * It is possible, that some signals have been inserted, while
+ * interrupt has been masked by mlxcpld_hotplug_work_handler.
+ * In this case such signals will be missed. In order to handle
+ * these signals delayed work is canceled and work task
+ * re-scheduled for immediate execution. It allows to handle
+ * missed signals, if any. In other case work handler just
+ * validates that no new signals have been received during
+ * masking.
+ */
+ cancel_delayed_work(&priv->dwork);
+ schedule_delayed_work(&priv->dwork, 0);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return;
+ }
+
+ /* Unmask aggregation event (no need acknowledge). */
+ outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset +
+ MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+}
+
+static void mlxcpld_hotplug_set_irq(struct mlxcpld_hotplug_priv_data *priv)
+{
+ /* Clear psu presense event. */
+ outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Set psu initial status as mask and unmask psu event. */
+ priv->psu_cache = priv->plat->psu_mask;
+ outb(priv->plat->psu_mask, priv->plat->psu_reg_offset +
+ MLXCPLD_HOTPLUG_MASK_OFF);
+
+ /* Clear power cable event. */
+ outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Keep power initial status as zero and unmask power event. */
+ outb(priv->plat->pwr_mask, priv->plat->pwr_reg_offset +
+ MLXCPLD_HOTPLUG_MASK_OFF);
+
+ /* Clear fan presense event. */
+ outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Set fan initial status as mask and unmask fan event. */
+ priv->fan_cache = priv->plat->fan_mask;
+ outb(priv->plat->fan_mask, priv->plat->fan_reg_offset +
+ MLXCPLD_HOTPLUG_MASK_OFF);
+
+ /* Keep aggregation initial status as zero and unmask events. */
+ outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset +
+ MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+
+ /* Invoke work handler for initializing hot plug devices setting. */
+ mlxcpld_hotplug_work_handler(&priv->dwork.work);
+
+ enable_irq(priv->irq);
+}
+
+static void mlxcpld_hotplug_unset_irq(struct mlxcpld_hotplug_priv_data *priv)
+{
+ int i;
+
+ disable_irq(priv->irq);
+ cancel_delayed_work_sync(&priv->dwork);
+
+ /* Mask aggregation event. */
+ outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+
+ /* Mask psu presense event. */
+ outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Clear psu presense event. */
+ outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+ /* Mask power cable event. */
+ outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Clear power cable event. */
+ outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+ /* Mask fan presense event. */
+ outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Clear fan presense event. */
+ outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+ /* Remove all the attached devices. */
+ for (i = 0; i < priv->plat->psu_count; i++)
+ mlxcpld_hotplug_device_destroy(priv->plat->psu + i);
+
+ for (i = 0; i < priv->plat->pwr_count; i++)
+ mlxcpld_hotplug_device_destroy(priv->plat->pwr + i);
+
+ for (i = 0; i < priv->plat->fan_count; i++)
+ mlxcpld_hotplug_device_destroy(priv->plat->fan + i);
+}
+
+static irqreturn_t mlxcpld_hotplug_irq_handler(int irq, void *dev)
+{
+ struct mlxcpld_hotplug_priv_data *priv =
+ (struct mlxcpld_hotplug_priv_data *)dev;
+
+ /* Schedule work task for immediate execution.*/
+ schedule_delayed_work(&priv->dwork, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int mlxcpld_hotplug_probe(struct platform_device *pdev)
+{
+ struct mlxcpld_hotplug_platform_data *pdata;
+ struct mlxcpld_hotplug_priv_data *priv;
+ int err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ priv->plat = pdata;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
+ priv->irq);
+ return priv->irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, priv->irq,
+ mlxcpld_hotplug_irq_handler, 0, pdev->name,
+ priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
+ return err;
+ }
+ disable_irq(priv->irq);
+
+ INIT_DELAYED_WORK(&priv->dwork, mlxcpld_hotplug_work_handler);
+ spin_lock_init(&priv->lock);
+
+ err = mlxcpld_hotplug_attr_init(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate attributes: %d\n", err);
+ return err;
+ }
+
+ priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "mlxcpld_hotplug", priv, priv->groups);
+ if (IS_ERR(priv->hwmon)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
+ PTR_ERR(priv->hwmon));
+ return PTR_ERR(priv->hwmon);
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Perform initial interrupts setup. */
+ mlxcpld_hotplug_set_irq(priv);
+
+ return 0;
+}
+
+static int mlxcpld_hotplug_remove(struct platform_device *pdev)
+{
+ struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev);
+
+ /* Clean interrupts setup. */
+ mlxcpld_hotplug_unset_irq(priv);
+
+ return 0;
+}
+
+static struct platform_driver mlxcpld_hotplug_driver = {
+ .driver = {
+ .name = "mlxcpld-hotplug",
+ },
+ .probe = mlxcpld_hotplug_probe,
+ .remove = mlxcpld_hotplug_remove,
+};
+
+module_platform_driver(mlxcpld_hotplug_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox CPLD hotplug platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:mlxcpld-hotplug");
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 3f870972247c..59b8eb626dcc 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -458,7 +458,7 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
NULL, &result);
- if (!ACPI_SUCCESS(rc)) {
+ if (ACPI_FAILURE(rc)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"error getting hotkey status\n"));
return;
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
new file mode 100644
index 000000000000..cbf4d83a7271
--- /dev/null
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -0,0 +1,297 @@
+/*
+ * Driver for the LID cover switch of the Surface 3
+ *
+ * Copyright (c) 2016 Red Hat Inc.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@redhat.com>");
+MODULE_DESCRIPTION("Surface 3 platform driver");
+MODULE_LICENSE("GPL");
+
+#define ACPI_BUTTON_HID_LID "PNP0C0D"
+#define SPI_CTL_OBJ_NAME "SPI"
+#define SPI_TS_OBJ_NAME "NTRG"
+
+#define SURFACE3_LID_GUID "F7CC25EC-D20B-404C-8903-0ED4359C18AE"
+
+MODULE_ALIAS("wmi:" SURFACE3_LID_GUID);
+
+static const struct dmi_system_id surface3_dmi_table[] = {
+#if defined(CONFIG_X86)
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
+#endif
+ { }
+};
+
+struct surface3_wmi {
+ struct acpi_device *touchscreen_adev;
+ struct acpi_device *pnp0c0d_adev;
+ struct acpi_hotplug_context hp;
+ struct input_dev *input;
+};
+
+static struct platform_device *s3_wmi_pdev;
+
+static struct surface3_wmi s3_wmi;
+
+static DEFINE_MUTEX(s3_wmi_lock);
+
+static int s3_wmi_query_block(const char *guid, int instance, int *ret)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ int error = 0;
+
+ mutex_lock(&s3_wmi_lock);
+ status = wmi_query_block(guid, instance, &output);
+
+ obj = output.pointer;
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj) {
+ pr_err("query block returned object type: %d - buffer length:%d\n",
+ obj->type,
+ obj->type == ACPI_TYPE_BUFFER ?
+ obj->buffer.length : 0);
+ }
+ error = -EINVAL;
+ goto out_free_unlock;
+ }
+ *ret = obj->integer.value;
+ out_free_unlock:
+ kfree(obj);
+ mutex_unlock(&s3_wmi_lock);
+ return error;
+}
+
+static inline int s3_wmi_query_lid(int *ret)
+{
+ return s3_wmi_query_block(SURFACE3_LID_GUID, 0, ret);
+}
+
+static int s3_wmi_send_lid_state(void)
+{
+ int ret, lid_sw;
+
+ ret = s3_wmi_query_lid(&lid_sw);
+ if (ret)
+ return ret;
+
+ input_report_switch(s3_wmi.input, SW_LID, lid_sw);
+ input_sync(s3_wmi.input);
+
+ return 0;
+}
+
+static int s3_wmi_hp_notify(struct acpi_device *adev, u32 value)
+{
+ return s3_wmi_send_lid_state();
+}
+
+static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
+ u32 level,
+ void *data,
+ void **return_value)
+{
+ struct acpi_device *adev, **ts_adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ ts_adev = data;
+
+ if (strncmp(acpi_device_bid(adev), SPI_TS_OBJ_NAME,
+ strlen(SPI_TS_OBJ_NAME)))
+ return AE_OK;
+
+ if (*ts_adev) {
+ pr_err("duplicate entry %s\n", SPI_TS_OBJ_NAME);
+ return AE_OK;
+ }
+
+ *ts_adev = adev;
+
+ return AE_OK;
+}
+
+static int s3_wmi_check_platform_device(struct device *dev, void *data)
+{
+ struct acpi_device *adev, *ts_adev;
+ acpi_handle handle;
+ acpi_status status;
+
+ /* ignore non ACPI devices */
+ handle = ACPI_HANDLE(dev);
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return 0;
+
+ /* check for LID ACPI switch */
+ if (!strcmp(ACPI_BUTTON_HID_LID, acpi_device_hid(adev))) {
+ s3_wmi.pnp0c0d_adev = adev;
+ return 0;
+ }
+
+ /* ignore non SPI controllers */
+ if (strncmp(acpi_device_bid(adev), SPI_CTL_OBJ_NAME,
+ strlen(SPI_CTL_OBJ_NAME)))
+ return 0;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ s3_wmi_attach_spi_device, NULL,
+ &ts_adev, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(dev, "failed to enumerate SPI slaves\n");
+
+ if (!ts_adev)
+ return 0;
+
+ s3_wmi.touchscreen_adev = ts_adev;
+
+ return 0;
+}
+
+static int s3_wmi_create_and_register_input(struct platform_device *pdev)
+{
+ struct input_dev *input;
+ int error;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Lid Switch";
+ input->phys = "button/input0";
+ input->id.bustype = BUS_HOST;
+ input->id.product = 0x0005;
+
+ input_set_capability(input, EV_SW, SW_LID);
+
+ error = input_register_device(input);
+ if (error)
+ goto out_err;
+
+ s3_wmi.input = input;
+
+ return 0;
+ out_err:
+ input_free_device(s3_wmi.input);
+ return error;
+}
+
+static int __init s3_wmi_probe(struct platform_device *pdev)
+{
+ int error;
+
+ if (!dmi_check_system(surface3_dmi_table))
+ return -ENODEV;
+
+ memset(&s3_wmi, 0, sizeof(s3_wmi));
+
+ bus_for_each_dev(&platform_bus_type, NULL, NULL,
+ s3_wmi_check_platform_device);
+
+ if (!s3_wmi.touchscreen_adev)
+ return -ENODEV;
+
+ acpi_bus_trim(s3_wmi.pnp0c0d_adev);
+
+ error = s3_wmi_create_and_register_input(pdev);
+ if (error)
+ goto restore_acpi_lid;
+
+ acpi_initialize_hp_context(s3_wmi.touchscreen_adev, &s3_wmi.hp,
+ s3_wmi_hp_notify, NULL);
+
+ s3_wmi_send_lid_state();
+
+ return 0;
+
+ restore_acpi_lid:
+ acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
+ return error;
+}
+
+static int s3_wmi_remove(struct platform_device *device)
+{
+ /* remove the hotplug context from the acpi device */
+ s3_wmi.touchscreen_adev->hp = NULL;
+
+ /* reinstall the actual PNPC0C0D LID default handle */
+ acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3_wmi_resume(struct device *dev)
+{
+ s3_wmi_send_lid_state();
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
+
+static struct platform_driver s3_wmi_driver = {
+ .driver = {
+ .name = "surface3-wmi",
+ .pm = &s3_wmi_pm,
+ },
+ .remove = s3_wmi_remove,
+};
+
+static int __init s3_wmi_init(void)
+{
+ int error;
+
+ s3_wmi_pdev = platform_device_alloc("surface3-wmi", -1);
+ if (!s3_wmi_pdev)
+ return -ENOMEM;
+
+ error = platform_device_add(s3_wmi_pdev);
+ if (error)
+ goto err_device_put;
+
+ error = platform_driver_probe(&s3_wmi_driver, s3_wmi_probe);
+ if (error)
+ goto err_device_del;
+
+ pr_info("Surface 3 WMI Extras loaded\n");
+ return 0;
+
+ err_device_del:
+ platform_device_del(s3_wmi_pdev);
+ err_device_put:
+ platform_device_put(s3_wmi_pdev);
+ return error;
+}
+
+static void __exit s3_wmi_exit(void)
+{
+ platform_device_unregister(s3_wmi_pdev);
+ platform_driver_unregister(&s3_wmi_driver);
+}
+
+module_init(s3_wmi_init);
+module_exit(s3_wmi_exit);
diff --git a/drivers/platform/x86/surface3_button.c b/drivers/platform/x86/surface3_button.c
new file mode 100644
index 000000000000..8bfd7f613d36
--- /dev/null
+++ b/drivers/platform/x86/surface3_button.c
@@ -0,0 +1,250 @@
+/*
+ * Supports for the button array on the Surface tablets.
+ *
+ * (C) Copyright 2016 Red Hat, Inc
+ *
+ * Based on soc_button_array.c:
+ *
+ * {C} Copyright 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+
+#define SURFACE_BUTTON_OBJ_NAME "TEV2"
+#define MAX_NBUTTONS 4
+
+/*
+ * Some of the buttons like volume up/down are auto repeat, while others
+ * are not. To support both, we register two platform devices, and put
+ * buttons into them based on whether the key should be auto repeat.
+ */
+#define BUTTON_TYPES 2
+
+/*
+ * Power button, Home button, Volume buttons support is supposed to
+ * be covered by drivers/input/misc/soc_button_array.c, which is implemented
+ * according to "Windows ACPI Design Guide for SoC Platforms".
+ * However surface 3 seems not to obey the specs, instead it uses
+ * device TEV2(MSHW0028) for declaring the GPIOs. The gpios are also slightly
+ * different in which the Home button is active high.
+ * Compared to surfacepro3_button.c which also handles MSHW0028, the Surface 3
+ * is a reduce platform and thus uses GPIOs, not ACPI events.
+ * We choose an I2C driver here because we need to access the resources
+ * declared under the device node, while surfacepro3_button.c only needs
+ * the ACPI companion node.
+ */
+static const struct acpi_device_id surface3_acpi_match[] = {
+ { "MSHW0028", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, surface3_acpi_match);
+
+struct surface3_button_info {
+ const char *name;
+ int acpi_index;
+ unsigned int event_type;
+ unsigned int event_code;
+ bool autorepeat;
+ bool wakeup;
+ bool active_low;
+};
+
+struct surface3_button_data {
+ struct platform_device *children[BUTTON_TYPES];
+};
+
+/*
+ * Get the Nth GPIO number from the ACPI object.
+ */
+static int surface3_button_lookup_gpio(struct device *dev, int acpi_index)
+{
+ struct gpio_desc *desc;
+ int gpio;
+
+ desc = gpiod_get_index(dev, NULL, acpi_index, GPIOD_ASIS);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ gpio = desc_to_gpio(desc);
+
+ gpiod_put(desc);
+
+ return gpio;
+}
+
+static struct platform_device *
+surface3_button_device_create(struct i2c_client *client,
+ const struct surface3_button_info *button_info,
+ bool autorepeat)
+{
+ const struct surface3_button_info *info;
+ struct platform_device *pd;
+ struct gpio_keys_button *gpio_keys;
+ struct gpio_keys_platform_data *gpio_keys_pdata;
+ int n_buttons = 0;
+ int gpio;
+ int error;
+
+ gpio_keys_pdata = devm_kzalloc(&client->dev,
+ sizeof(*gpio_keys_pdata) +
+ sizeof(*gpio_keys) * MAX_NBUTTONS,
+ GFP_KERNEL);
+ if (!gpio_keys_pdata)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_keys = (void *)(gpio_keys_pdata + 1);
+
+ for (info = button_info; info->name; info++) {
+ if (info->autorepeat != autorepeat)
+ continue;
+
+ gpio = surface3_button_lookup_gpio(&client->dev,
+ info->acpi_index);
+ if (!gpio_is_valid(gpio))
+ continue;
+
+ gpio_keys[n_buttons].type = info->event_type;
+ gpio_keys[n_buttons].code = info->event_code;
+ gpio_keys[n_buttons].gpio = gpio;
+ gpio_keys[n_buttons].active_low = info->active_low;
+ gpio_keys[n_buttons].desc = info->name;
+ gpio_keys[n_buttons].wakeup = info->wakeup;
+ n_buttons++;
+ }
+
+ if (n_buttons == 0) {
+ error = -ENODEV;
+ goto err_free_mem;
+ }
+
+ gpio_keys_pdata->buttons = gpio_keys;
+ gpio_keys_pdata->nbuttons = n_buttons;
+ gpio_keys_pdata->rep = autorepeat;
+
+ pd = platform_device_alloc("gpio-keys", PLATFORM_DEVID_AUTO);
+ if (!pd) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ error = platform_device_add_data(pd, gpio_keys_pdata,
+ sizeof(*gpio_keys_pdata));
+ if (error)
+ goto err_free_pdev;
+
+ error = platform_device_add(pd);
+ if (error)
+ goto err_free_pdev;
+
+ return pd;
+
+err_free_pdev:
+ platform_device_put(pd);
+err_free_mem:
+ devm_kfree(&client->dev, gpio_keys_pdata);
+ return ERR_PTR(error);
+}
+
+static int surface3_button_remove(struct i2c_client *client)
+{
+ struct surface3_button_data *priv = i2c_get_clientdata(client);
+
+ int i;
+
+ for (i = 0; i < BUTTON_TYPES; i++)
+ if (priv->children[i])
+ platform_device_unregister(priv->children[i]);
+
+ return 0;
+}
+
+static struct surface3_button_info surface3_button_surface3[] = {
+ { "power", 0, EV_KEY, KEY_POWER, false, true, true },
+ { "home", 1, EV_KEY, KEY_LEFTMETA, false, true, false },
+ { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true },
+ { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false, true },
+ { }
+};
+
+static int surface3_button_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct surface3_button_data *priv;
+ struct platform_device *pd;
+ int i;
+ int error;
+
+ if (strncmp(acpi_device_bid(ACPI_COMPANION(&client->dev)),
+ SURFACE_BUTTON_OBJ_NAME,
+ strlen(SURFACE_BUTTON_OBJ_NAME)))
+ return -ENODEV;
+
+ if (gpiod_count(dev, KBUILD_MODNAME) <= 0) {
+ dev_dbg(dev, "no GPIO attached, ignoring...\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, priv);
+
+ for (i = 0; i < BUTTON_TYPES; i++) {
+ pd = surface3_button_device_create(client,
+ surface3_button_surface3,
+ i == 0);
+ if (IS_ERR(pd)) {
+ error = PTR_ERR(pd);
+ if (error != -ENODEV) {
+ surface3_button_remove(client);
+ return error;
+ }
+ continue;
+ }
+
+ priv->children[i] = pd;
+ }
+
+ if (!priv->children[0] && !priv->children[1])
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct i2c_device_id surface3_id[] = {
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, surface3_id);
+
+static struct i2c_driver surface3_driver = {
+ .probe = surface3_button_probe,
+ .remove = surface3_button_remove,
+ .id_table = surface3_id,
+ .driver = {
+ .name = "surface3",
+ .acpi_match_table = ACPI_PTR(surface3_acpi_match),
+ },
+};
+module_i2c_driver(surface3_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_DESCRIPTION("surface3 button array driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index b65ce7519411..aa65a857a6b1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -128,6 +128,7 @@ enum {
/* ACPI HIDs */
#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068"
#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068"
+#define TPACPI_ACPI_LENOVO_HKEY_V2_HID "LEN0268"
#define TPACPI_ACPI_EC_HID "PNP0C09"
/* Input IDs */
@@ -190,6 +191,9 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */
TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */
TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */
+ TP_HKEY_EV_TABLET_CHANGED = 0x60c0, /* X1 Yoga (2016):
+ * enter/leave tablet mode
+ */
TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */
TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
@@ -302,7 +306,12 @@ static struct {
u32 hotkey:1;
u32 hotkey_mask:1;
u32 hotkey_wlsw:1;
- u32 hotkey_tablet:1;
+ enum {
+ TP_HOTKEY_TABLET_NONE = 0,
+ TP_HOTKEY_TABLET_USES_MHKG,
+ /* X1 Yoga 2016, seen on BIOS N1FET44W */
+ TP_HOTKEY_TABLET_USES_CMMD,
+ } hotkey_tablet;
u32 kbdlight:1;
u32 light:1;
u32 light_status:1;
@@ -2059,6 +2068,8 @@ static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
+/* ThinkPad X1 Yoga (2016) */
+#define TP_EC_CMMD_TABLET_MODE 0x6
static int hotkey_get_wlsw(void)
{
@@ -2083,10 +2094,23 @@ static int hotkey_get_tablet_mode(int *status)
{
int s;
- if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
- return -EIO;
+ switch (tp_features.hotkey_tablet) {
+ case TP_HOTKEY_TABLET_USES_MHKG:
+ if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
+ return -EIO;
+
+ *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
+ break;
+ case TP_HOTKEY_TABLET_USES_CMMD:
+ if (!acpi_evalf(ec_handle, &s, "CMMD", "d"))
+ return -EIO;
+
+ *status = (s == TP_EC_CMMD_TABLET_MODE);
+ break;
+ default:
+ break;
+ }
- *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
return 0;
}
@@ -3117,6 +3141,37 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
typedef u16 tpacpi_keymap_entry_t;
typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+static int hotkey_init_tablet_mode(void)
+{
+ int in_tablet_mode = 0, res;
+ char *type = NULL;
+
+ if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+ /* For X41t, X60t, X61t Tablets... */
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG;
+ in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK);
+ type = "MHKG";
+ } else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) {
+ /* For X1 Yoga (2016) */
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD;
+ in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE;
+ type = "CMMD";
+ }
+
+ if (!tp_features.hotkey_tablet)
+ return 0;
+
+ pr_info("Tablet mode switch found (type: %s), currently in %s mode\n",
+ type, in_tablet_mode ? "tablet" : "laptop");
+
+ res = add_to_attr_set(hotkey_dev_attributes,
+ &dev_attr_hotkey_tablet_mode.attr);
+ if (res)
+ return -1;
+
+ return in_tablet_mode;
+}
+
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
/* Requirements for changing the default keymaps:
@@ -3464,21 +3519,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
res = add_to_attr_set(hotkey_dev_attributes,
&dev_attr_hotkey_radio_sw.attr);
- /* For X41t, X60t, X61t Tablets... */
- if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
- tp_features.hotkey_tablet = 1;
- tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
- pr_info("possible tablet mode switch found; "
- "ThinkPad in %s mode\n",
- (tabletsw_state) ? "tablet" : "laptop");
- res = add_to_attr_set(hotkey_dev_attributes,
- &dev_attr_hotkey_tablet_mode.attr);
- }
+ res = hotkey_init_tablet_mode();
+ if (res < 0)
+ goto err_exit;
- if (!res)
- res = register_attr_set_with_sysfs(
- hotkey_dev_attributes,
- &tpacpi_pdev->dev.kobj);
+ tabletsw_state = res;
+
+ res = register_attr_set_with_sysfs(hotkey_dev_attributes,
+ &tpacpi_pdev->dev.kobj);
if (res)
goto err_exit;
@@ -3899,6 +3947,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
*ignore_acpi_ev = true;
return true;
+ case TP_HKEY_EV_TABLET_CHANGED:
+ tpacpi_input_send_tabletsw();
+ hotkey_tablet_mode_notify_change();
+ *send_acpi_ev = false;
+ break;
+
default:
pr_warn("unknown possible thermal alarm or keyboard event received\n");
known = false;
@@ -4143,6 +4197,7 @@ errexit:
static const struct acpi_device_id ibm_htk_device_ids[] = {
{TPACPI_ACPI_IBM_HKEY_HID, 0},
{TPACPI_ACPI_LENOVO_HKEY_HID, 0},
+ {TPACPI_ACPI_LENOVO_HKEY_V2_HID, 0},
{"", 0},
};
@@ -7716,7 +7771,7 @@ static struct ibm_struct volume_driver_data = {
#define alsa_card NULL
-static void inline volume_alsa_notify_change(void)
+static inline void volume_alsa_notify_change(void)
{
}
@@ -9018,7 +9073,7 @@ static int mute_led_on_off(struct tp_led_table *t, bool state)
acpi_handle temp;
int output;
- if (!ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp))) {
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
return -EIO;
}
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 01b6d3f9b8fb..56bce1908be2 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -143,7 +143,7 @@ static int rockchip_iodomain_notify(struct notifier_block *nb,
if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
return NOTIFY_BAD;
- dev_info(supply->iod->dev, "Setting to %d done\n", uV);
+ dev_dbg(supply->iod->dev, "Setting to %d done\n", uV);
return NOTIFY_OK;
}
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c74c3f67b8da..abeb77217a21 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -104,6 +104,16 @@ config POWER_RESET_MSM
help
Power off and restart support for Qualcomm boards.
+config POWER_RESET_PIIX4_POWEROFF
+ tristate "Intel PIIX4 power-off driver"
+ depends on PCI
+ depends on MIPS || COMPILE_TEST
+ help
+ This driver supports powering off a system using the Intel PIIX4
+ southbridge, for example the MIPS Malta development board. The
+ southbridge SOff state is entered in response to a request to
+ power off the system.
+
config POWER_RESET_LTC2952
bool "LTC2952 PowerPath power-off driver"
depends on OF_GPIO
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1be307c7fc25..11dae3b56ff9 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index e9e24df35f26..a85dd4d233af 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -169,6 +169,7 @@ static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9x5-shdwc", },
{ /*sentinel*/ }
};
+MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
static struct platform_driver at91_poweroff_driver = {
.remove = __exit_p(at91_poweroff_remove),
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 1b5d450586d1..568580cf0655 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -175,6 +175,7 @@ static const struct of_device_id at91_reset_of_match[] = {
{ .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, at91_reset_of_match);
static struct notifier_block at91_restart_nb = {
.priority = 192,
@@ -242,6 +243,7 @@ static const struct platform_device_id at91_reset_plat_match[] = {
{ "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, at91_reset_plat_match);
static struct platform_driver at91_reset_driver = {
.remove = __exit_p(at91_reset_remove),
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
new file mode 100644
index 000000000000..bacfc95783f0
--- /dev/null
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+
+static struct pci_dev *pm_dev;
+static resource_size_t io_offset;
+
+enum piix4_pm_io_reg {
+ PIIX4_FUNC3IO_PMSTS = 0x00,
+#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS BIT(8)
+ PIIX4_FUNC3IO_PMCNTRL = 0x04,
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN BIT(13)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
+};
+
+#define PIIX4_SUSPEND_MAGIC 0x00120002
+
+static const int piix4_pm_io_region = PCI_BRIDGE_RESOURCES;
+
+static void piix4_poweroff(void)
+{
+ int spec_devid;
+ u16 sts;
+
+ /* Ensure the power button status is clear */
+ while (1) {
+ sts = inw(io_offset + PIIX4_FUNC3IO_PMSTS);
+ if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
+ break;
+ outw(sts, io_offset + PIIX4_FUNC3IO_PMSTS);
+ }
+
+ /* Enable entry to suspend */
+ outw(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
+ io_offset + PIIX4_FUNC3IO_PMCNTRL);
+
+ /* If the special cycle occurs too soon this doesn't work... */
+ mdelay(10);
+
+ /*
+ * The PIIX4 will enter the suspend state only after seeing a special
+ * cycle with the correct magic data on the PCI bus. Generate that
+ * cycle now.
+ */
+ spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
+ pci_bus_write_config_dword(pm_dev->bus, spec_devid, 0,
+ PIIX4_SUSPEND_MAGIC);
+
+ /* Give the system some time to power down, then error */
+ mdelay(1000);
+ pr_emerg("Unable to poweroff system\n");
+}
+
+static int piix4_poweroff_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int res;
+
+ if (pm_dev)
+ return -EINVAL;
+
+ /* Request access to the PIIX4 PM IO registers */
+ res = pci_request_region(dev, piix4_pm_io_region,
+ "PIIX4 PM IO registers");
+ if (res) {
+ dev_err(&dev->dev, "failed to request PM IO registers: %d\n",
+ res);
+ return res;
+ }
+
+ pm_dev = dev;
+ io_offset = pci_resource_start(dev, piix4_pm_io_region);
+ pm_power_off = piix4_poweroff;
+
+ return 0;
+}
+
+static void piix4_poweroff_remove(struct pci_dev *dev)
+{
+ if (pm_power_off == piix4_poweroff)
+ pm_power_off = NULL;
+
+ pci_release_region(dev, piix4_pm_io_region);
+ pm_dev = NULL;
+}
+
+static const struct pci_device_id piix4_poweroff_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
+ { 0 },
+};
+
+static struct pci_driver piix4_poweroff_driver = {
+ .name = "piix4-poweroff",
+ .id_table = piix4_poweroff_ids,
+ .probe = piix4_poweroff_probe,
+ .remove = piix4_poweroff_remove,
+};
+
+module_pci_driver(piix4_poweroff_driver);
+MODULE_AUTHOR("Paul Burton <paul.burton@imgtec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
index 1ecb51d67149..c8c371b285b1 100644
--- a/drivers/power/reset/syscon-reboot-mode.c
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -74,6 +74,7 @@ static const struct of_device_id syscon_reboot_mode_of_match[] = {
{ .compatible = "syscon-reboot-mode" },
{}
};
+MODULE_DEVICE_TABLE(of, syscon_reboot_mode_of_match);
static struct platform_driver syscon_reboot_mode_driver = {
.probe = syscon_reboot_mode_probe,
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index b0b1eb3a78c2..7549c7f74a3c 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -72,6 +72,7 @@ static const struct of_device_id zx_reboot_of_match[] = {
{ .compatible = "zte,sysctrl" },
{}
};
+MODULE_DEVICE_TABLE(of, zx_reboot_of_match);
static struct platform_driver zx_reboot_driver = {
.probe = zx_reboot_probe,
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2199f673118c..c569f82a0071 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1900,7 +1900,7 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
* ab8500_fg_battok_calc - calculate the bit pattern corresponding
* to the target voltage.
* @di: pointer to the ab8500_fg structure
- * @target target voltage
+ * @target: target voltage
*
* Returns bit pattern closest to the target voltage
* valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
@@ -2391,7 +2391,7 @@ static void ab8500_fg_external_power_changed(struct power_supply *psy)
}
/**
- * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * ab8500_fg_reinit_work() - work to reset the FG algorithm
* @work: pointer to the work_struct structure
*
* Used to reset the current battery capacity to be able to
@@ -2528,7 +2528,7 @@ static struct kobj_type ab8500_fg_ktype = {
};
/**
- * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * ab8500_fg_sysfs_exit() - de-init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function removes the entry in sysfs.
@@ -2539,7 +2539,7 @@ static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
}
/**
- * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * ab8500_fg_sysfs_init() - init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function adds an entry in sysfs.
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 5bdde692f724..539eb41504bb 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1120,6 +1120,7 @@ static const struct platform_device_id axp288_fg_id_table[] = {
{ .name = DEV_NAME },
{},
};
+MODULE_DEVICE_TABLE(platform, axp288_fg_id_table);
static int axp288_fuel_gauge_remove(struct platform_device *pdev)
{
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index f5746b9f4e83..e9584330aeed 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1141,7 +1141,7 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
- pm_runtime_put_sync(bdi->dev);
+ pm_runtime_get_sync(bdi->dev);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 3b0dbc689d72..08c36b8e04bd 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -164,6 +164,25 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_DCAP] = 0x3c,
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
},
+ [BQ27510] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1a,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x1e,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x20,
+ [BQ27XXX_REG_DCAP] = 0x2e,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
[BQ27530] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -302,6 +321,24 @@ static enum power_supply_property bq27500_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27510_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
static enum power_supply_property bq27530_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -385,6 +422,7 @@ static struct {
BQ27XXX_PROP(BQ27000, bq27000_battery_props),
BQ27XXX_PROP(BQ27010, bq27010_battery_props),
BQ27XXX_PROP(BQ27500, bq27500_battery_props),
+ BQ27XXX_PROP(BQ27510, bq27510_battery_props),
BQ27XXX_PROP(BQ27530, bq27530_battery_props),
BQ27XXX_PROP(BQ27541, bq27541_battery_props),
BQ27XXX_PROP(BQ27545, bq27545_battery_props),
@@ -397,10 +435,11 @@ static LIST_HEAD(bq27xxx_battery_devices);
static int poll_interval_param_set(const char *val, const struct kernel_param *kp)
{
struct bq27xxx_device_info *di;
+ unsigned int prev_val = *(unsigned int *) kp->arg;
int ret;
ret = param_set_uint(val, kp);
- if (ret < 0)
+ if (ret < 0 || prev_val == *(unsigned int *) kp->arg)
return ret;
mutex_lock(&bq27xxx_list_lock);
@@ -635,7 +674,8 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
*/
static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
{
- if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545)
+ if (di->chip == BQ27500 || di->chip == BQ27510 ||
+ di->chip == BQ27541 || di->chip == BQ27545)
return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
if (di->chip == BQ27530 || di->chip == BQ27421)
return flags & BQ27XXX_FLAG_OT;
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 85d4ea2a9c20..5c5c3a6f9923 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -149,8 +149,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27200", BQ27000 },
{ "bq27210", BQ27010 },
{ "bq27500", BQ27500 },
- { "bq27510", BQ27500 },
- { "bq27520", BQ27500 },
+ { "bq27510", BQ27510 },
+ { "bq27520", BQ27510 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27530 },
{ "bq27541", BQ27541 },
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 4af7b770f293..2fa6edd6e8b1 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -313,4 +313,4 @@ module_platform_driver(micro_batt_device_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery");
-MODULE_ALIAS("platform:battery-ipaq-micro");
+MODULE_ALIAS("platform:ipaq-micro-battery");
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 7321b727d484..509e2b341bd6 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -384,9 +384,6 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
- if (!param)
- continue;
-
if (lp8788_is_valid_charger_register(param->addr)) {
ret = lp8788_write_byte(lp, param->addr, param->val);
if (ret)
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 8689c80202b5..e7c3649b31a0 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -21,18 +21,13 @@
#include <linux/max17040_battery.h>
#include <linux/slab.h>
-#define MAX17040_VCELL_MSB 0x02
-#define MAX17040_VCELL_LSB 0x03
-#define MAX17040_SOC_MSB 0x04
-#define MAX17040_SOC_LSB 0x05
-#define MAX17040_MODE_MSB 0x06
-#define MAX17040_MODE_LSB 0x07
-#define MAX17040_VER_MSB 0x08
-#define MAX17040_VER_LSB 0x09
-#define MAX17040_RCOMP_MSB 0x0C
-#define MAX17040_RCOMP_LSB 0x0D
-#define MAX17040_CMD_MSB 0xFE
-#define MAX17040_CMD_LSB 0xFF
+#define MAX17040_VCELL 0x02
+#define MAX17040_SOC 0x04
+#define MAX17040_MODE 0x06
+#define MAX17040_VER 0x08
+#define MAX17040_RCOMP 0x0C
+#define MAX17040_CMD 0xFE
+
#define MAX17040_DELAY 1000
#define MAX17040_BATTERY_FULL 95
@@ -78,11 +73,11 @@ static int max17040_get_property(struct power_supply *psy,
return 0;
}
-static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+static int max17040_write_reg(struct i2c_client *client, int reg, u16 value)
{
int ret;
- ret = i2c_smbus_write_byte_data(client, reg, value);
+ ret = i2c_smbus_write_word_swapped(client, reg, value);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -94,7 +89,7 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
{
int ret;
- ret = i2c_smbus_read_byte_data(client, reg);
+ ret = i2c_smbus_read_word_swapped(client, reg);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -104,43 +99,36 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
static void max17040_reset(struct i2c_client *client)
{
- max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
- max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+ max17040_write_reg(client, MAX17040_CMD, 0x0054);
}
static void max17040_get_vcell(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 vcell;
- msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
- lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+ vcell = max17040_read_reg(client, MAX17040_VCELL);
- chip->vcell = (msb << 4) + (lsb >> 4);
+ chip->vcell = vcell;
}
static void max17040_get_soc(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 soc;
- msb = max17040_read_reg(client, MAX17040_SOC_MSB);
- lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+ soc = max17040_read_reg(client, MAX17040_SOC);
- chip->soc = msb;
+ chip->soc = (soc >> 8);
}
static void max17040_get_version(struct i2c_client *client)
{
- u8 msb;
- u8 lsb;
+ u16 version;
- msb = max17040_read_reg(client, MAX17040_VER_MSB);
- lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+ version = max17040_read_reg(client, MAX17040_VER);
- dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+ dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", version);
}
static void max17040_get_online(struct i2c_client *client)
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 0b2eab571528..290ddc12b040 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -184,6 +184,7 @@ static const struct platform_device_id max8997_battery_id[] = {
{ "max8997-battery", 0 },
{ }
};
+MODULE_DEVICE_TABLE(platform, max8997_battery_id);
static struct platform_driver max8997_battery_driver = {
.driver = {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a74d8ca383a1..1e0960b646e8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -413,7 +413,7 @@ static int power_supply_match_device_node(struct device *dev, const void *data)
/**
* power_supply_get_by_phandle() - Search for a power supply and returns its ref
* @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a power supply name
+ * @property: Name of property holding a power supply name
*
* If power supply was found, it increases reference count for the
* internal power supply's device. The user should power_supply_put()
@@ -458,7 +458,7 @@ static void devm_power_supply_put(struct device *dev, void *res)
* devm_power_supply_get_by_phandle() - Resource managed version of
* power_supply_get_by_phandle()
* @dev: Pointer to device holding phandle property
- * @phandle_name: Name of property holding a power supply phandle
+ * @property: Name of property holding a power supply phandle
*
* Return: On success returns a reference to a power supply with
* matching name equals to value under @property, NULL or ERR_PTR otherwise.
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 5c5880664e09..a2740cf57ad3 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -182,7 +182,7 @@ static ssize_t charger_state_show(struct device *dev,
return sprintf(buf, "%s\n", charge);
}
-static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL);
+static DEVICE_ATTR_RO(charger_state);
static irqreturn_t wm8350_charger_handler(int irq, void *data)
{
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index 6285626d142a..e3edb31ac880 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -30,8 +30,7 @@ static enum power_supply_property *prop;
static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->batt_aux) * pdata->batt_mult /
@@ -40,8 +39,7 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
static unsigned long wm97xx_read_temp(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->temp_aux) * pdata->temp_mult /
@@ -52,8 +50,7 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -103,8 +100,7 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps)
static void wm97xx_bat_update(struct power_supply *bat_ps)
{
int old_status = bat_status;
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
mutex_lock(&work_lock);
@@ -166,15 +162,15 @@ static int wm97xx_bat_probe(struct platform_device *dev)
int ret = 0;
int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
int i = 0;
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
+ struct power_supply_config cfg = {};
- if (!wmdata) {
+ if (!pdata) {
dev_err(&dev->dev, "No platform data supplied\n");
return -EINVAL;
}
- pdata = wmdata->batt_pdata;
+ cfg.drv_data = pdata;
if (dev->id != -1)
return -EINVAL;
@@ -243,7 +239,7 @@ static int wm97xx_bat_probe(struct platform_device *dev)
bat_psy_desc.properties = prop;
bat_psy_desc.num_properties = props;
- bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, NULL);
+ bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, &cfg);
if (!IS_ERR(bat_psy)) {
schedule_work(&bat_work);
} else {
@@ -266,8 +262,7 @@ err:
static int wm97xx_bat_remove(struct platform_device *dev)
{
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
if (pdata && gpio_is_valid(pdata->charge_gpio)) {
free_irq(gpio_to_irq(pdata->charge_gpio), dev);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 243b233ff31b..9a25110c4a46 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -189,14 +189,13 @@ struct rapl_package {
unsigned int time_unit;
struct rapl_domain *domains; /* array of domains, sized at runtime */
struct powercap_zone *power_zone; /* keep track of parent zone */
- int nr_cpus; /* active cpus on the package, topology info is lost during
- * cpu hotplug. so we have to track ourselves.
- */
unsigned long power_limit_irq; /* keep track of package power limit
* notify interrupt enable status.
*/
struct list_head plist;
int lead_cpu; /* one active cpu per package for access */
+ /* Track active cpus */
+ struct cpumask cpumask;
};
struct rapl_defaults {
@@ -275,18 +274,6 @@ static struct rapl_package *find_package_by_id(int id)
return NULL;
}
-/* caller must hold cpu hotplug lock */
-static void rapl_cleanup_data(void)
-{
- struct rapl_package *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
- kfree(p->domains);
- list_del(&p->plist);
- kfree(p);
- }
-}
-
static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
{
struct rapl_domain *rd;
@@ -442,6 +429,7 @@ static int contraint_to_pl(struct rapl_domain *rd, int cid)
return i;
}
}
+ pr_err("Cannot find matching power limit for constraint %d\n", cid);
return -EINVAL;
}
@@ -457,6 +445,10 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
+ if (id < 0) {
+ ret = id;
+ goto set_exit;
+ }
rp = rd->rp;
@@ -496,6 +488,11 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
+ if (id < 0) {
+ ret = id;
+ goto get_exit;
+ }
+
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
prim = POWER_LIMIT1;
@@ -512,6 +509,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
else
*data = val;
+get_exit:
put_online_cpus();
return ret;
@@ -527,6 +525,10 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
+ if (id < 0) {
+ ret = id;
+ goto set_time_exit;
+ }
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
@@ -538,6 +540,8 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
default:
ret = -EINVAL;
}
+
+set_time_exit:
put_online_cpus();
return ret;
}
@@ -552,6 +556,10 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
+ if (id < 0) {
+ ret = id;
+ goto get_time_exit;
+ }
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
@@ -566,6 +574,8 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
}
if (!ret)
*data = val;
+
+get_time_exit:
put_online_cpus();
return ret;
@@ -707,7 +717,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
case ENERGY_UNIT:
scale = ENERGY_UNIT_SCALE;
/* per domain unit takes precedence */
- if (rd && rd->domain_energy_unit)
+ if (rd->domain_energy_unit)
units = rd->domain_energy_unit;
else
units = rp->energy_unit;
@@ -976,10 +986,20 @@ static void package_power_limit_irq_save(struct rapl_package *rp)
smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
}
-static void power_limit_irq_restore_cpu(void *info)
+/*
+ * Restore per package power limit interrupt enable state. Called from cpu
+ * hotplug code on package removal.
+ */
+static void package_power_limit_irq_restore(struct rapl_package *rp)
{
- u32 l, h = 0;
- struct rapl_package *rp = (struct rapl_package *)info;
+ u32 l, h;
+
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+ return;
+
+ /* irq enable state not saved, nothing to restore */
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+ return;
rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
@@ -991,19 +1011,6 @@ static void power_limit_irq_restore_cpu(void *info)
wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
}
-/* restore per package power limit interrupt enable state */
-static void package_power_limit_irq_restore(struct rapl_package *rp)
-{
- if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
- return;
-
- /* irq enable state not saved, nothing to restore */
- if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
- return;
-
- smp_call_function_single(rp->lead_cpu, power_limit_irq_restore_cpu, rp, 1);
-}
-
static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
{
int nr_powerlimit = find_nr_power_limit(rd);
@@ -1160,84 +1167,49 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
+ RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM, rapl_defaults_hsw_server),
{}
};
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
-/* read once for all raw primitive data for all packages, domains */
-static void rapl_update_domain_data(void)
+/* Read once for all raw primitive data for domains */
+static void rapl_update_domain_data(struct rapl_package *rp)
{
int dmn, prim;
u64 val;
- struct rapl_package *rp;
- list_for_each_entry(rp, &rapl_packages, plist) {
- for (dmn = 0; dmn < rp->nr_domains; dmn++) {
- pr_debug("update package %d domain %s data\n", rp->id,
- rp->domains[dmn].name);
- /* exclude non-raw primitives */
- for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
- if (!rapl_read_data_raw(&rp->domains[dmn], prim,
- rpi[prim].unit,
- &val))
- rp->domains[dmn].rdd.primitives[prim] =
- val;
+ for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+ pr_debug("update package %d domain %s data\n", rp->id,
+ rp->domains[dmn].name);
+ /* exclude non-raw primitives */
+ for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
+ if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+ rpi[prim].unit, &val))
+ rp->domains[dmn].rdd.primitives[prim] = val;
}
}
}
-static int rapl_unregister_powercap(void)
+static void rapl_unregister_powercap(void)
{
- struct rapl_package *rp;
- struct rapl_domain *rd, *rd_package = NULL;
-
- /* unregister all active rapl packages from the powercap layer,
- * hotplug lock held
- */
- list_for_each_entry(rp, &rapl_packages, plist) {
- package_power_limit_irq_restore(rp);
-
- for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
- rd++) {
- pr_debug("remove package, undo power limit on %d: %s\n",
- rp->id, rd->name);
- rapl_write_data_raw(rd, PL1_ENABLE, 0);
- rapl_write_data_raw(rd, PL1_CLAMP, 0);
- if (find_nr_power_limit(rd) > 1) {
- rapl_write_data_raw(rd, PL2_ENABLE, 0);
- rapl_write_data_raw(rd, PL2_CLAMP, 0);
- }
- if (rd->id == RAPL_DOMAIN_PACKAGE) {
- rd_package = rd;
- continue;
- }
- powercap_unregister_zone(control_type, &rd->power_zone);
- }
- /* do the package zone last */
- if (rd_package)
- powercap_unregister_zone(control_type,
- &rd_package->power_zone);
- }
-
if (platform_rapl_domain) {
powercap_unregister_zone(control_type,
&platform_rapl_domain->power_zone);
kfree(platform_rapl_domain);
}
-
powercap_unregister_control_type(control_type);
-
- return 0;
}
static int rapl_package_register_powercap(struct rapl_package *rp)
{
struct rapl_domain *rd;
- int ret = 0;
char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
- int nr_pl;
+ int nr_pl, ret;;
+
+ /* Update the domain data of the new package */
+ rapl_update_domain_data(rp);
/* first we register package domain as the parent zone*/
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
@@ -1257,8 +1229,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
if (IS_ERR(power_zone)) {
pr_debug("failed to register package, %d\n",
rp->id);
- ret = PTR_ERR(power_zone);
- goto exit_package;
+ return PTR_ERR(power_zone);
}
/* track parent zone in per package/socket data */
rp->power_zone = power_zone;
@@ -1268,8 +1239,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
}
if (!power_zone) {
pr_err("no package domain found, unknown topology!\n");
- ret = -ENODEV;
- goto exit_package;
+ return -ENODEV;
}
/* now register domains as children of the socket/package*/
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
@@ -1290,11 +1260,11 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
goto err_cleanup;
}
}
+ return 0;
-exit_package:
- return ret;
err_cleanup:
- /* clean up previously initialized domains within the package if we
+ /*
+ * Clean up previously initialized domains within the package if we
* failed after the first domain setup.
*/
while (--rd >= rp->domains) {
@@ -1305,7 +1275,7 @@ err_cleanup:
return ret;
}
-static int rapl_register_psys(void)
+static int __init rapl_register_psys(void)
{
struct rapl_domain *rd;
struct powercap_zone *power_zone;
@@ -1346,40 +1316,14 @@ static int rapl_register_psys(void)
return 0;
}
-static int rapl_register_powercap(void)
+static int __init rapl_register_powercap(void)
{
- struct rapl_domain *rd;
- struct rapl_package *rp;
- int ret = 0;
-
control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
if (IS_ERR(control_type)) {
pr_debug("failed to register powercap control_type.\n");
return PTR_ERR(control_type);
}
- /* read the initial data */
- rapl_update_domain_data();
- list_for_each_entry(rp, &rapl_packages, plist)
- if (rapl_package_register_powercap(rp))
- goto err_cleanup_package;
-
- /* Don't bail out if PSys is not supported */
- rapl_register_psys();
-
- return ret;
-
-err_cleanup_package:
- /* clean up previously initialized packages */
- list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
- for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
- rd++) {
- pr_debug("unregister zone/package %d, %s domain\n",
- rp->id, rd->name);
- powercap_unregister_zone(control_type, &rd->power_zone);
- }
- }
-
- return ret;
+ return 0;
}
static int rapl_check_domain(int cpu, int domain)
@@ -1452,9 +1396,8 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
*/
static int rapl_detect_domains(struct rapl_package *rp, int cpu)
{
- int i;
- int ret = 0;
struct rapl_domain *rd;
+ int i;
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
/* use physical package id to read counters */
@@ -1466,84 +1409,20 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
pr_debug("no valid rapl domains found in package %d\n", rp->id);
- ret = -ENODEV;
- goto done;
+ return -ENODEV;
}
pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
GFP_KERNEL);
- if (!rp->domains) {
- ret = -ENOMEM;
- goto done;
- }
+ if (!rp->domains)
+ return -ENOMEM;
+
rapl_init_domains(rp);
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++)
rapl_detect_powerlimit(rd);
-
-
-done:
- return ret;
-}
-
-static bool is_package_new(int package)
-{
- struct rapl_package *rp;
-
- /* caller prevents cpu hotplug, there will be no new packages added
- * or deleted while traversing the package list, no need for locking.
- */
- list_for_each_entry(rp, &rapl_packages, plist)
- if (package == rp->id)
- return false;
-
- return true;
-}
-
-/* RAPL interface can be made of a two-level hierarchy: package level and domain
- * level. We first detect the number of packages then domains of each package.
- * We have to consider the possiblity of CPU online/offline due to hotplug and
- * other scenarios.
- */
-static int rapl_detect_topology(void)
-{
- int i;
- int phy_package_id;
- struct rapl_package *new_package, *rp;
-
- for_each_online_cpu(i) {
- phy_package_id = topology_physical_package_id(i);
- if (is_package_new(phy_package_id)) {
- new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
- if (!new_package) {
- rapl_cleanup_data();
- return -ENOMEM;
- }
- /* add the new package to the list */
- new_package->id = phy_package_id;
- new_package->nr_cpus = 1;
- /* use the first active cpu of the package to access */
- new_package->lead_cpu = i;
- /* check if the package contains valid domains */
- if (rapl_detect_domains(new_package, i) ||
- rapl_defaults->check_unit(new_package, i)) {
- kfree(new_package->domains);
- kfree(new_package);
- /* free up the packages already initialized */
- rapl_cleanup_data();
- return -ENODEV;
- }
- INIT_LIST_HEAD(&new_package->plist);
- list_add(&new_package->plist, &rapl_packages);
- } else {
- rp = find_package_by_id(phy_package_id);
- if (rp)
- ++rp->nr_cpus;
- }
- }
-
return 0;
}
@@ -1552,12 +1431,21 @@ static void rapl_remove_package(struct rapl_package *rp)
{
struct rapl_domain *rd, *rd_package = NULL;
+ package_power_limit_irq_restore(rp);
+
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ rapl_write_data_raw(rd, PL1_ENABLE, 0);
+ rapl_write_data_raw(rd, PL1_CLAMP, 0);
+ if (find_nr_power_limit(rd) > 1) {
+ rapl_write_data_raw(rd, PL2_ENABLE, 0);
+ rapl_write_data_raw(rd, PL2_CLAMP, 0);
+ }
if (rd->id == RAPL_DOMAIN_PACKAGE) {
rd_package = rd;
continue;
}
- pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+ pr_debug("remove package, undo power limit on %d: %s\n",
+ rp->id, rd->name);
powercap_unregister_zone(control_type, &rd->power_zone);
}
/* do parent zone last */
@@ -1567,20 +1455,17 @@ static void rapl_remove_package(struct rapl_package *rp)
}
/* called from CPU hotplug notifier, hotplug lock held */
-static int rapl_add_package(int cpu)
+static struct rapl_package *rapl_add_package(int cpu, int pkgid)
{
- int ret = 0;
- int phy_package_id;
struct rapl_package *rp;
+ int ret;
- phy_package_id = topology_physical_package_id(cpu);
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
if (!rp)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
/* add the new package to the list */
- rp->id = phy_package_id;
- rp->nr_cpus = 1;
+ rp->id = pkgid;
rp->lead_cpu = cpu;
/* check if the package contains valid domains */
@@ -1589,17 +1474,17 @@ static int rapl_add_package(int cpu)
ret = -ENODEV;
goto err_free_package;
}
- if (!rapl_package_register_powercap(rp)) {
+ ret = rapl_package_register_powercap(rp);
+ if (!ret) {
INIT_LIST_HEAD(&rp->plist);
list_add(&rp->plist, &rapl_packages);
- return ret;
+ return rp;
}
err_free_package:
kfree(rp->domains);
kfree(rp);
-
- return ret;
+ return ERR_PTR(ret);
}
/* Handles CPU hotplug on multi-socket systems.
@@ -1609,55 +1494,46 @@ err_free_package:
* associated domains. Cooling devices are handled accordingly at
* per-domain level.
*/
-static int rapl_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int rapl_cpu_online(unsigned int cpu)
{
- unsigned long cpu = (unsigned long)hcpu;
- int phy_package_id;
+ int pkgid = topology_physical_package_id(cpu);
struct rapl_package *rp;
- int lead_cpu;
- phy_package_id = topology_physical_package_id(cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- rp = find_package_by_id(phy_package_id);
- if (rp)
- ++rp->nr_cpus;
- else
- rapl_add_package(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- rp = find_package_by_id(phy_package_id);
- if (!rp)
- break;
- if (--rp->nr_cpus == 0)
- rapl_remove_package(rp);
- else if (cpu == rp->lead_cpu) {
- /* choose another active cpu in the package */
- lead_cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
- if (lead_cpu < nr_cpu_ids)
- rp->lead_cpu = lead_cpu;
- else /* should never go here */
- pr_err("no active cpu available for package %d\n",
- phy_package_id);
- }
+ rp = find_package_by_id(pkgid);
+ if (!rp) {
+ rp = rapl_add_package(cpu, pkgid);
+ if (IS_ERR(rp))
+ return PTR_ERR(rp);
}
+ cpumask_set_cpu(cpu, &rp->cpumask);
+ return 0;
+}
+
+static int rapl_cpu_down_prep(unsigned int cpu)
+{
+ int pkgid = topology_physical_package_id(cpu);
+ struct rapl_package *rp;
+ int lead_cpu;
+
+ rp = find_package_by_id(pkgid);
+ if (!rp)
+ return 0;
- return NOTIFY_OK;
+ cpumask_clear_cpu(cpu, &rp->cpumask);
+ lead_cpu = cpumask_first(&rp->cpumask);
+ if (lead_cpu >= nr_cpu_ids)
+ rapl_remove_package(rp);
+ else if (rp->lead_cpu == cpu)
+ rp->lead_cpu = lead_cpu;
+ return 0;
}
-static struct notifier_block rapl_cpu_notifier = {
- .notifier_call = rapl_cpu_callback,
-};
+static enum cpuhp_state pcap_rapl_online;
static int __init rapl_init(void)
{
- int ret = 0;
const struct x86_cpu_id *id;
+ int ret;
id = x86_match_cpu(rapl_ids);
if (!id) {
@@ -1669,36 +1545,29 @@ static int __init rapl_init(void)
rapl_defaults = (struct rapl_defaults *)id->driver_data;
- cpu_notifier_register_begin();
-
- /* prevent CPU hotplug during detection */
- get_online_cpus();
- ret = rapl_detect_topology();
+ ret = rapl_register_powercap();
if (ret)
- goto done;
+ return ret;
- if (rapl_register_powercap()) {
- rapl_cleanup_data();
- ret = -ENODEV;
- goto done;
- }
- __register_hotcpu_notifier(&rapl_cpu_notifier);
-done:
- put_online_cpus();
- cpu_notifier_register_done();
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
+ rapl_cpu_online, rapl_cpu_down_prep);
+ if (ret < 0)
+ goto err_unreg;
+ pcap_rapl_online = ret;
+
+ /* Don't bail out if PSys is not supported */
+ rapl_register_psys();
+ return 0;
+err_unreg:
+ rapl_unregister_powercap();
return ret;
}
static void __exit rapl_exit(void)
{
- cpu_notifier_register_begin();
- get_online_cpus();
- __unregister_hotcpu_notifier(&rapl_cpu_notifier);
+ cpuhp_remove_state(pcap_rapl_online);
rapl_unregister_powercap();
- rapl_cleanup_data();
- put_online_cpus();
- cpu_notifier_register_done();
}
module_init(rapl_init);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index ee3de3421f2d..bdce33291161 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,7 +6,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
- depends on NET
+ depends on NET && POSIX_TIMERS
select PPS
select NET_PTP_CLASSIFY
help
@@ -28,7 +28,7 @@ config PTP_1588_CLOCK
config PTP_1588_CLOCK_GIANFAR
tristate "Freescale eTSEC as PTP clock"
depends on GIANFAR
- select PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
default y
help
This driver adds support for using the eTSEC as a PTP
@@ -42,7 +42,7 @@ config PTP_1588_CLOCK_GIANFAR
config PTP_1588_CLOCK_IXP46X
tristate "Intel IXP46x as PTP clock"
depends on IXP4XX_ETH
- select PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
default y
help
This driver adds support for using the IXP46X as a PTP
@@ -60,7 +60,7 @@ config DP83640_PHY
tristate "Driver for the National Semiconductor DP83640 PHYTER"
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
- select PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
---help---
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -76,7 +76,7 @@ config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
depends on HAS_IOMEM && NET
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
clock. The hardware supports time stamping of PTP packets
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 86280b7e41f3..9c13381b6966 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -153,7 +153,10 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
s32 ppb = scaled_ppm_to_ppb(tx->freq);
if (ppb > ops->max_adj || ppb < -ops->max_adj)
return -ERANGE;
- err = ops->adjfreq(ops, ppb);
+ if (ops->adjfine)
+ err = ops->adjfine(ops, tx->freq);
+ else
+ err = ops->adjfreq(ops, ppb);
ptp->dialed_frequency = tx->freq;
} else if (tx->modes == 0) {
tx->freq = ptp->dialed_frequency;
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 302e626fe6b0..53d43954a974 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -28,7 +28,7 @@ static ssize_t clock_name_show(struct device *dev,
struct ptp_clock *ptp = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
}
-static DEVICE_ATTR(clock_name, 0444, clock_name_show, NULL);
+static DEVICE_ATTR_RO(clock_name);
#define PTP_SHOW_INT(name, var) \
static ssize_t var##_show(struct device *dev, \
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index bf0128899c09..f92dd41b0395 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -175,6 +175,15 @@ config PWM_FSL_FTM
To compile this driver as a module, choose M here: the module
will be called pwm-fsl-ftm.
+config PWM_HIBVT
+ tristate "HiSilicon BVT PWM support"
+ depends on ARCH_HISI || COMPILE_TEST
+ help
+ Generic PWM framework driver for HiSilicon BVT SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-hibvt.
+
config PWM_IMG
tristate "Imagination Technologies PWM driver"
depends on HAS_IOMEM
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 1194c54efcc2..a48bdb517792 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PWM_CRC) += pwm-crc.o
obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
+obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
new file mode 100644
index 000000000000..d0e8f8542626
--- /dev/null
+++ b/drivers/pwm/pwm-hibvt.c
@@ -0,0 +1,271 @@
+/*
+ * PWM Controller Driver for HiSilicon BVT SoCs
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/reset.h>
+
+#define PWM_CFG0_ADDR(x) (((x) * 0x20) + 0x0)
+#define PWM_CFG1_ADDR(x) (((x) * 0x20) + 0x4)
+#define PWM_CFG2_ADDR(x) (((x) * 0x20) + 0x8)
+#define PWM_CTRL_ADDR(x) (((x) * 0x20) + 0xC)
+
+#define PWM_ENABLE_SHIFT 0
+#define PWM_ENABLE_MASK BIT(0)
+
+#define PWM_POLARITY_SHIFT 1
+#define PWM_POLARITY_MASK BIT(1)
+
+#define PWM_KEEP_SHIFT 2
+#define PWM_KEEP_MASK BIT(2)
+
+#define PWM_PERIOD_MASK GENMASK(31, 0)
+#define PWM_DUTY_MASK GENMASK(31, 0)
+
+struct hibvt_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+ struct reset_control *rstc;
+};
+
+struct hibvt_pwm_soc {
+ u32 num_pwms;
+};
+
+static const struct hibvt_pwm_soc pwm_soc[2] = {
+ { .num_pwms = 4 },
+ { .num_pwms = 8 },
+};
+
+static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct hibvt_pwm_chip, chip);
+}
+
+static void hibvt_pwm_set_bits(void __iomem *base, u32 offset,
+ u32 mask, u32 data)
+{
+ void __iomem *address = base + offset;
+ u32 value;
+
+ value = readl(address);
+ value &= ~mask;
+ value |= (data & mask);
+ writel(value, address);
+}
+
+static void hibvt_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_ENABLE_MASK, 0x1);
+}
+
+static void hibvt_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_ENABLE_MASK, 0x0);
+}
+
+static void hibvt_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_cycle_ns, int period_ns)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+ u32 freq, period, duty;
+
+ freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
+
+ period = div_u64(freq * period_ns, 1000);
+ duty = div_u64(period * duty_cycle_ns, period_ns);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG0_ADDR(pwm->hwpwm),
+ PWM_PERIOD_MASK, period);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG1_ADDR(pwm->hwpwm),
+ PWM_DUTY_MASK, duty);
+}
+
+static void hibvt_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_POLARITY_MASK, (0x1 << PWM_POLARITY_SHIFT));
+ else
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_POLARITY_MASK, (0x0 << PWM_POLARITY_SHIFT));
+}
+
+static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+ void __iomem *base;
+ u32 freq, value;
+
+ freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
+ base = hi_pwm_chip->base;
+
+ value = readl(base + PWM_CFG0_ADDR(pwm->hwpwm));
+ state->period = div_u64(value * 1000, freq);
+
+ value = readl(base + PWM_CFG1_ADDR(pwm->hwpwm));
+ state->duty_cycle = div_u64(value * 1000, freq);
+
+ value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
+ state->enabled = (PWM_ENABLE_MASK & value);
+}
+
+static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ if (state->polarity != pwm->state.polarity)
+ hibvt_pwm_set_polarity(chip, pwm, state->polarity);
+
+ if (state->period != pwm->state.period ||
+ state->duty_cycle != pwm->state.duty_cycle)
+ hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period);
+
+ if (state->enabled != pwm->state.enabled) {
+ if (state->enabled)
+ hibvt_pwm_enable(chip, pwm);
+ else
+ hibvt_pwm_disable(chip, pwm);
+ }
+
+ return 0;
+}
+
+static struct pwm_ops hibvt_pwm_ops = {
+ .get_state = hibvt_pwm_get_state,
+ .apply = hibvt_pwm_apply,
+
+ .owner = THIS_MODULE,
+};
+
+static int hibvt_pwm_probe(struct platform_device *pdev)
+{
+ const struct hibvt_pwm_soc *soc =
+ of_device_get_match_data(&pdev->dev);
+ struct hibvt_pwm_chip *pwm_chip;
+ struct resource *res;
+ int ret;
+ int i;
+
+ pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
+ if (pwm_chip == NULL)
+ return -ENOMEM;
+
+ pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm_chip->clk)) {
+ dev_err(&pdev->dev, "getting clock failed with %ld\n",
+ PTR_ERR(pwm_chip->clk));
+ return PTR_ERR(pwm_chip->clk);
+ }
+
+ pwm_chip->chip.ops = &hibvt_pwm_ops;
+ pwm_chip->chip.dev = &pdev->dev;
+ pwm_chip->chip.base = -1;
+ pwm_chip->chip.npwm = soc->num_pwms;
+ pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
+ pwm_chip->chip.of_pwm_n_cells = 3;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pwm_chip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pwm_chip->base))
+ return PTR_ERR(pwm_chip->base);
+
+ ret = clk_prepare_enable(pwm_chip->clk);
+ if (ret < 0)
+ return ret;
+
+ pwm_chip->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm_chip->rstc)) {
+ clk_disable_unprepare(pwm_chip->clk);
+ return PTR_ERR(pwm_chip->rstc);
+ }
+
+ reset_control_assert(pwm_chip->rstc);
+ msleep(30);
+ reset_control_deassert(pwm_chip->rstc);
+
+ ret = pwmchip_add(&pwm_chip->chip);
+ if (ret < 0) {
+ clk_disable_unprepare(pwm_chip->clk);
+ return ret;
+ }
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++) {
+ hibvt_pwm_set_bits(pwm_chip->base, PWM_CTRL_ADDR(i),
+ PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT));
+ }
+
+ platform_set_drvdata(pdev, pwm_chip);
+
+ return 0;
+}
+
+static int hibvt_pwm_remove(struct platform_device *pdev)
+{
+ struct hibvt_pwm_chip *pwm_chip;
+
+ pwm_chip = platform_get_drvdata(pdev);
+
+ reset_control_assert(pwm_chip->rstc);
+ msleep(30);
+ reset_control_deassert(pwm_chip->rstc);
+
+ clk_disable_unprepare(pwm_chip->clk);
+
+ return pwmchip_remove(&pwm_chip->chip);
+}
+
+static const struct of_device_id hibvt_pwm_of_match[] = {
+ { .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] },
+ { .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match);
+
+static struct platform_driver hibvt_pwm_driver = {
+ .driver = {
+ .name = "hibvt-pwm",
+ .of_match_table = hibvt_pwm_of_match,
+ },
+ .probe = hibvt_pwm_probe,
+ .remove = hibvt_pwm_remove,
+};
+module_platform_driver(hibvt_pwm_driver);
+
+MODULE_AUTHOR("Jian Yuan");
+MODULE_DESCRIPTION("HiSilicon BVT SoCs PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 9d5bd7d5c610..045ef9fa6fe3 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -524,7 +524,6 @@ static struct platform_driver meson_pwm_driver = {
};
module_platform_driver(meson_pwm_driver);
-MODULE_ALIAS("platform:meson-pwm");
MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2142a5d3fc08..14294692beb9 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -104,7 +104,7 @@ obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
-obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
+obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index f7c88ff90c43..302b57cb89c6 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -130,6 +130,7 @@ static const struct regulator_desc arizona_ldo1_hc = {
.uV_step = 50000,
.n_voltages = 8,
.enable_time = 1500,
+ .ramp_delay = 24000,
.owner = THIS_MODULE,
};
@@ -153,6 +154,7 @@ static const struct regulator_desc arizona_ldo1 = {
.uV_step = 25000,
.n_voltages = 13,
.enable_time = 500,
+ .ramp_delay = 24000,
.owner = THIS_MODULE,
};
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 54382ef902c6..e6a512ebeae2 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -337,10 +337,18 @@ static const struct regulator_desc axp809_regulators[] = {
AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
AXP_DESC(AXP809, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
- AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
+ /*
+ * Note the datasheet only guarantees reliable operation up to
+ * 3.3V, this needs to be enforced via dts provided constraints
+ */
+ AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
- AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
+ /*
+ * Note the datasheet only guarantees reliable operation up to
+ * 3.3V, this needs to be enforced via dts provided constraints
+ */
+ AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP809, RTC_LDO, "rtc_ldo", "ips", 1800),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 5c1519b229e0..04baac9a165b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -204,7 +204,7 @@ static struct device_node *of_get_regulator(struct device *dev, const char *supp
regnode = of_parse_phandle(dev->of_node, prop_name, 0);
if (!regnode) {
- dev_dbg(dev, "Looking up %s property in node %s failed",
+ dev_dbg(dev, "Looking up %s property in node %s failed\n",
prop_name, dev->of_node->full_name);
return NULL;
}
@@ -293,7 +293,8 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
}
/* operating mode constraint check */
-static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
+static int regulator_mode_constrain(struct regulator_dev *rdev,
+ unsigned int *mode)
{
switch (*mode) {
case REGULATOR_MODE_FAST:
@@ -3359,6 +3360,39 @@ unsigned int regulator_get_mode(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_get_mode);
+static int _regulator_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ int ret;
+
+ mutex_lock(&rdev->mutex);
+
+ /* sanity check */
+ if (!rdev->desc->ops->get_error_flags) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rdev->desc->ops->get_error_flags(rdev, flags);
+out:
+ mutex_unlock(&rdev->mutex);
+ return ret;
+}
+
+/**
+ * regulator_get_error_flags - get regulator error information
+ * @regulator: regulator source
+ * @flags: pointer to store error flags
+ *
+ * Get the current regulator error information.
+ */
+int regulator_get_error_flags(struct regulator *regulator,
+ unsigned int *flags)
+{
+ return _regulator_get_error_flags(regulator->rdev, flags);
+}
+EXPORT_SYMBOL_GPL(regulator_get_error_flags);
+
/**
* regulator_set_load - set regulator load
* @regulator: regulator source
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 988a7472c2ab..a43b0e8a438d 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -30,6 +30,9 @@
#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
+#include <linux/acpi.h>
+#include <linux/property.h>
+#include <linux/gpio/consumer.h>
struct fixed_voltage_data {
struct regulator_desc desc;
@@ -94,6 +97,44 @@ of_get_fixed_voltage_config(struct device *dev,
return config;
}
+/**
+ * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
+ * @dev: device requesting for fixed_voltage_config
+ * @desc: regulator description
+ *
+ * Populates fixed_voltage_config structure by extracting data through ACPI
+ * interface, returns a pointer to the populated structure of NULL if memory
+ * alloc fails.
+ */
+static struct fixed_voltage_config *
+acpi_get_fixed_voltage_config(struct device *dev,
+ const struct regulator_desc *desc)
+{
+ struct fixed_voltage_config *config;
+ const char *supply_name;
+ struct gpio_desc *gpiod;
+ int ret;
+
+ config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return ERR_PTR(-ENOMEM);
+
+ ret = device_property_read_string(dev, "supply-name", &supply_name);
+ if (!ret)
+ config->supply_name = supply_name;
+
+ gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return ERR_PTR(-ENODEV);
+
+ config->gpio = desc_to_gpio(gpiod);
+ config->enable_high = device_property_read_bool(dev,
+ "enable-active-high");
+ gpiod_put(gpiod);
+
+ return config;
+}
+
static struct regulator_ops fixed_voltage_ops = {
};
@@ -114,6 +155,11 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
&drvdata->desc);
if (IS_ERR(config))
return PTR_ERR(config);
+ } else if (ACPI_HANDLE(&pdev->dev)) {
+ config = acpi_get_fixed_voltage_config(&pdev->dev,
+ &drvdata->desc);
+ if (IS_ERR(config))
+ return PTR_ERR(config);
} else {
config = dev_get_platdata(&pdev->dev);
}
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 83e89e5d4752..0fce06acfaec 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -162,8 +162,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
- if (config->enable_gpio == -EPROBE_DEFER)
- return ERR_PTR(-EPROBE_DEFER);
+ if (config->enable_gpio < 0 && config->enable_gpio != -ENOENT)
+ return ERR_PTR(config->enable_gpio);
/* Fetch GPIOs. - optional property*/
ret = of_gpio_count(np);
@@ -190,8 +190,11 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
for (i = 0; i < config->nr_gpios; i++) {
gpio = of_get_named_gpio(np, "gpios", i);
- if (gpio < 0)
+ if (gpio < 0) {
+ if (gpio != -ENOENT)
+ return ERR_PTR(gpio);
break;
+ }
config->gpios[i].gpio = gpio;
if (proplen > 0) {
of_property_read_u32_index(np, "gpios-states",
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index bcf38fd5106a..379cdacc05d8 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -454,13 +454,17 @@ EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap);
int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
{
unsigned int val;
+ unsigned int val_on = rdev->desc->bypass_val_on;
int ret;
ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val);
if (ret != 0)
return ret;
- *enable = (val & rdev->desc->bypass_mask) == rdev->desc->bypass_val_on;
+ if (!val_on)
+ val_on = rdev->desc->bypass_mask;
+
+ *enable = (val & rdev->desc->bypass_mask) == val_on;
return 0;
}
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index e504b9148226..70e3df653381 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -24,6 +24,7 @@
[_id] = { \
.desc = { \
.name = _name, \
+ .supply_name = _of "-in", \
.id = _id, \
.of_match = of_match_ptr(_of), \
.regulators_node = of_match_ptr("regulators"),\
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index a1b49a6d538f..d088a7c79e60 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -73,7 +73,6 @@ struct max77620_regulator_info {
};
struct max77620_regulator_pdata {
- struct regulator_init_data *reg_idata;
int active_fps_src;
int active_fps_pd_slot;
int active_fps_pu_slot;
@@ -81,6 +80,7 @@ struct max77620_regulator_pdata {
int suspend_fps_pd_slot;
int suspend_fps_pu_slot;
int current_mode;
+ int power_ok;
int ramp_rate_setting;
};
@@ -351,11 +351,48 @@ static int max77620_set_slew_rate(struct max77620_regulator *pmic, int id,
return 0;
}
+static int max77620_config_power_ok(struct max77620_regulator *pmic, int id)
+{
+ struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ struct max77620_chip *chip = dev_get_drvdata(pmic->dev->parent);
+ u8 val, mask;
+ int ret;
+
+ switch (chip->chip_id) {
+ case MAX20024:
+ if (rpdata->power_ok >= 0) {
+ if (rinfo->type == MAX77620_REGULATOR_TYPE_SD)
+ mask = MAX20024_SD_CFG1_MPOK_MASK;
+ else
+ mask = MAX20024_LDO_CFG2_MPOK_MASK;
+
+ val = rpdata->power_ok ? mask : 0;
+
+ ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr,
+ mask, val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Reg 0x%02x update failed %d\n",
+ rinfo->cfg_addr, ret);
+ return ret;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
{
struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
int ret;
+ max77620_config_power_ok(pmic, id);
+
/* Update power mode */
ret = max77620_regulator_get_power_mode(pmic, id);
if (ret < 0)
@@ -595,6 +632,12 @@ static int max77620_of_parse_cb(struct device_node *np,
np, "maxim,suspend-fps-power-down-slot", &pval);
rpdata->suspend_fps_pd_slot = (!ret) ? pval : -1;
+ ret = of_property_read_u32(np, "maxim,power-ok-control", &pval);
+ if (!ret)
+ rpdata->power_ok = pval;
+ else
+ rpdata->power_ok = -1;
+
ret = of_property_read_u32(np, "maxim,ramp-rate-setting", &pval);
rpdata->ramp_rate_setting = (!ret) ? pval : 0;
@@ -807,6 +850,8 @@ static int max77620_regulator_resume(struct device *dev)
for (id = 0; id < MAX77620_NUM_REGS; id++) {
reg_pdata = &pmic->reg_pdata[id];
+ max77620_config_power_ok(pmic, id);
+
max77620_regulator_set_fps_slots(pmic, id, false);
if (reg_pdata->active_fps_src < 0)
continue;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf299a51..fb44d5215e30 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -120,7 +120,7 @@ static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int val;
int ret;
@@ -193,7 +193,7 @@ static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int reg = rdev->desc->vsel_reg;
unsigned old_sel;
@@ -232,7 +232,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_selector)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
/* if there is no dvs1/2 pin, we don't need wait extra time here. */
@@ -245,8 +245,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
- unsigned int reg = rk808_buck_config_regs[rdev->desc->id -
- RK808_ID_DCDC1];
+ unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)];
switch (ramp_delay) {
case 1 ... 2000:
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
index 7d2ae3e9e942..342f5da79975 100644
--- a/drivers/regulator/stw481x-vmmc.c
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -47,7 +47,8 @@ static struct regulator_desc vmmc_regulator = {
.volt_table = stw481x_vmmc_voltages,
.enable_time = 200, /* FIXME: look this up */
.enable_reg = STW_CONF1,
- .enable_mask = STW_CONF1_PDN_VMMC,
+ .enable_mask = STW_CONF1_PDN_VMMC | STW_CONF1_MMC_LS_STATUS,
+ .enable_val = STW_CONF1_PDN_VMMC,
.vsel_reg = STW_CONF1,
.vsel_mask = STW_CONF1_VMMC_MASK,
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index dad0bac09ecf..c179a3a221af 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -375,7 +375,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
struct device_node *np = pdev->dev.parent->of_node;
struct device_node *regulators;
struct of_regulator_match *matches;
- static struct regulator_init_data *reg_data;
+ struct regulator_init_data *reg_data;
int idx = 0, count, ret;
tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 33f389d583ef..ecb0371780af 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -71,7 +71,7 @@ struct tps65086_regulator {
unsigned int decay_mask;
};
-static const struct regulator_linear_range tps65086_buck126_10mv_ranges[] = {
+static const struct regulator_linear_range tps65086_10mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(410000, 0x1, 0x7F, 10000),
};
@@ -82,7 +82,7 @@ static const struct regulator_linear_range tps65086_buck126_25mv_ranges[] = {
REGULATOR_LINEAR_RANGE(1025000, 0x19, 0x7F, 25000),
};
-static const struct regulator_linear_range tps65086_buck345_ranges[] = {
+static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(425000, 0x1, 0x7F, 25000),
};
@@ -125,27 +125,27 @@ static int tps65086_of_parse_cb(struct device_node *dev,
static struct tps65086_regulator regulators[] = {
TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
- tps65086_buck126_10mv_ranges, TPS65086_BUCK1CTRL,
+ tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
- tps65086_buck126_10mv_ranges, TPS65086_BUCK2CTRL,
+ tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
- tps65086_buck345_ranges, TPS65086_BUCK3DECAY,
+ tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
BIT(0)),
TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
- tps65086_buck345_ranges, TPS65086_BUCK4VID,
+ tps65086_10mv_ranges, TPS65086_BUCK4VID,
BIT(0)),
TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
- tps65086_buck345_ranges, TPS65086_BUCK5CTRL,
+ tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
- tps65086_buck126_10mv_ranges, TPS65086_BUCK6CTRL,
+ tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
BIT(0)),
TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
VDOA1_VID_MASK, TPS65086_LDOA1CTRL, BIT(0),
@@ -162,18 +162,6 @@ static struct tps65086_regulator regulators[] = {
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
};
-static inline bool has_25mv_mode(int id)
-{
- switch (id) {
- case BUCK1:
- case BUCK2:
- case BUCK6:
- return true;
- default:
- return false;
- }
-}
-
static int tps65086_of_parse_cb(struct device_node *dev,
const struct regulator_desc *desc,
struct regulator_config *config)
@@ -181,12 +169,27 @@ static int tps65086_of_parse_cb(struct device_node *dev,
int ret;
/* Check for 25mV step mode */
- if (has_25mv_mode(desc->id) &&
- of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
- regulators[desc->id].desc.linear_ranges =
+ if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
+ switch (desc->id) {
+ case BUCK1:
+ case BUCK2:
+ case BUCK6:
+ regulators[desc->id].desc.linear_ranges =
tps65086_buck126_25mv_ranges;
- regulators[desc->id].desc.n_linear_ranges =
+ regulators[desc->id].desc.n_linear_ranges =
ARRAY_SIZE(tps65086_buck126_25mv_ranges);
+ break;
+ case BUCK3:
+ case BUCK4:
+ case BUCK5:
+ regulators[desc->id].desc.linear_ranges =
+ tps65086_buck345_25mv_ranges;
+ regulators[desc->id].desc.n_linear_ranges =
+ ARRAY_SIZE(tps65086_buck345_25mv_ranges);
+ break;
+ default:
+ dev_warn(config->dev, "25mV step mode only valid for BUCK regulators\n");
+ }
}
/* Check for decay mode */
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index eb0f5b13841a..9aafbb03482d 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -22,6 +22,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -30,10 +31,11 @@
enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
DCDC5, DCDC6, LDO1, LS3 };
-#define TPS65218_REGULATOR(_name, _id, _type, _ops, _n, _vr, _vm, _er, _em, \
- _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
+#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
+ _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
.name = _name, \
+ .of_match = _of, \
.id = _id, \
.ops = &_ops, \
.n_voltages = _n, \
@@ -54,14 +56,6 @@ enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
.bypass_mask = _sm, \
} \
-#define TPS65218_INFO(_id, _nm, _min, _max) \
- [_id] = { \
- .id = _id, \
- .name = _nm, \
- .min_uV = _min, \
- .max_uV = _max, \
- }
-
static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = {
REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000),
REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000),
@@ -77,36 +71,6 @@ static const struct regulator_linear_range dcdc4_ranges[] = {
REGULATOR_LINEAR_RANGE(1600000, 0x10, 0x34, 50000),
};
-static struct tps_info tps65218_pmic_regs[] = {
- TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
- TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
- TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
- TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
- TPS65218_INFO(DCDC5, "DCDC5", 1000000, 1000000),
- TPS65218_INFO(DCDC6, "DCDC6", 1800000, 1800000),
- TPS65218_INFO(LDO1, "LDO1", 900000, 3400000),
- TPS65218_INFO(LS3, "LS3", -1, -1),
-};
-
-#define TPS65218_OF_MATCH(comp, label) \
- { \
- .compatible = comp, \
- .data = &label, \
- }
-
-static const struct of_device_id tps65218_of_match[] = {
- TPS65218_OF_MATCH("ti,tps65218-dcdc1", tps65218_pmic_regs[DCDC1]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc2", tps65218_pmic_regs[DCDC2]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc3", tps65218_pmic_regs[DCDC3]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc4", tps65218_pmic_regs[DCDC4]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc5", tps65218_pmic_regs[DCDC5]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc6", tps65218_pmic_regs[DCDC6]),
- TPS65218_OF_MATCH("ti,tps65218-ldo1", tps65218_pmic_regs[LDO1]),
- TPS65218_OF_MATCH("ti,tps65218-ls3", tps65218_pmic_regs[LS3]),
- { }
-};
-MODULE_DEVICE_TABLE(of, tps65218_of_match);
-
static int tps65218_pmic_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
@@ -188,7 +152,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
if (rid == TPS65218_DCDC_3 && tps->rev == TPS65218_REV_2_1)
return 0;
- if (!tps->info[rid]->strobe) {
+ if (!tps->strobes[rid]) {
if (rid == TPS65218_DCDC_3)
tps->info[rid]->strobe = 3;
else
@@ -197,8 +161,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
return tps65218_set_bits(tps, dev->desc->bypass_reg,
dev->desc->bypass_mask,
- tps->info[rid]->strobe,
- TPS65218_PROTECT_L1);
+ tps->strobes[rid], TPS65218_PROTECT_L1);
}
/* Operations permitted on DCDC1, DCDC2 */
@@ -272,7 +235,7 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
unsigned int index;
struct tps65218 *tps = rdev_get_drvdata(dev);
- retval = tps65218_reg_read(tps, dev->desc->csel_reg, &index);
+ retval = regmap_read(tps->regmap, dev->desc->csel_reg, &index);
if (retval < 0)
return retval;
@@ -300,104 +263,104 @@ static struct regulator_ops tps65218_dcdc56_pmic_ops = {
};
static const struct regulator_desc regulators[] = {
- TPS65218_REGULATOR("DCDC1", TPS65218_DCDC_1, REGULATOR_VOLTAGE,
- tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC1,
+ TPS65218_REGULATOR("DCDC1", "regulator-dcdc1", TPS65218_DCDC_1,
+ REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC1,
TPS65218_CONTROL_DCDC1_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC1_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
TPS65218_SEQ3_DC1_SEQ_MASK),
- TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, REGULATOR_VOLTAGE,
- tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC2,
+ TPS65218_REGULATOR("DCDC2", "regulator-dcdc2", TPS65218_DCDC_2,
+ REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC2,
TPS65218_CONTROL_DCDC2_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC2_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
TPS65218_SEQ3_DC2_SEQ_MASK),
- TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, REGULATOR_VOLTAGE,
- tps65218_ldo1_dcdc34_ops, 64,
+ TPS65218_REGULATOR("DCDC3", "regulator-dcdc3", TPS65218_DCDC_3,
+ REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_DCDC3,
TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC3_EN, 0, 0, ldo1_dcdc3_ranges, 2,
0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK),
- TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, REGULATOR_VOLTAGE,
- tps65218_ldo1_dcdc34_ops, 53,
+ TPS65218_REGULATOR("DCDC4", "regulator-dcdc4", TPS65218_DCDC_4,
+ REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 53,
TPS65218_REG_CONTROL_DCDC4,
TPS65218_CONTROL_DCDC4_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC4_EN, 0, 0, dcdc4_ranges, 2,
0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK),
- TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, REGULATOR_VOLTAGE,
- tps65218_dcdc56_pmic_ops, 1, -1, -1,
- TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0, 0,
- NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
+ TPS65218_REGULATOR("DCDC5", "regulator-dcdc5", TPS65218_DCDC_5,
+ REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
+ -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0,
+ 0, NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
TPS65218_SEQ5_DC5_SEQ_MASK),
- TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, REGULATOR_VOLTAGE,
- tps65218_dcdc56_pmic_ops, 1, -1, -1,
- TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0, 0,
- NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
+ TPS65218_REGULATOR("DCDC6", "regulator-dcdc6", TPS65218_DCDC_6,
+ REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
+ -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0,
+ 0, NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
TPS65218_SEQ5_DC6_SEQ_MASK),
- TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, REGULATOR_VOLTAGE,
- tps65218_ldo1_dcdc34_ops, 64,
+ TPS65218_REGULATOR("LDO1", "regulator-ldo1", TPS65218_LDO_1,
+ REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_LDO1,
TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
2, 0, 0, TPS65218_REG_SEQ6,
TPS65218_SEQ6_LDO1_SEQ_MASK),
- TPS65218_REGULATOR("LS3", TPS65218_LS_3, REGULATOR_CURRENT,
- tps65218_ls3_ops, 0, 0, 0, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_LS3_EN, TPS65218_REG_CONFIG2,
- TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0, 0, 0),
+ TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3,
+ REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0,
+ TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN,
+ TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK,
+ NULL, 0, 0, 0, 0, 0),
};
static int tps65218_regulator_probe(struct platform_device *pdev)
{
struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
- struct regulator_init_data *init_data;
- const struct tps_info *template;
struct regulator_dev *rdev;
- const struct of_device_id *match;
struct regulator_config config = { };
- int id, ret;
+ int i, ret;
unsigned int val;
- match = of_match_device(tps65218_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- template = match->data;
- id = template->id;
- init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
- &regulators[id]);
-
- platform_set_drvdata(pdev, tps);
-
- tps->info[id] = &tps65218_pmic_regs[id];
config.dev = &pdev->dev;
- config.init_data = init_data;
+ config.dev->of_node = tps->dev->of_node;
config.driver_data = tps;
config.regmap = tps->regmap;
- config.of_node = pdev->dev.of_node;
- rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
- if (IS_ERR(rdev)) {
- dev_err(tps->dev, "failed to register %s regulator\n",
- pdev->name);
- return PTR_ERR(rdev);
- }
+ /* Allocate memory for strobes */
+ tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
+ TPS65218_NUM_REGULATOR, GFP_KERNEL);
- ret = tps65218_reg_read(tps, regulators[id].bypass_reg, &val);
- if (ret)
- return ret;
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
- tps->info[id]->strobe = val & regulators[id].bypass_mask;
+ ret = regmap_read(tps->regmap, regulators[i].bypass_reg, &val);
+ if (ret)
+ return ret;
+
+ tps->strobes[i] = val & regulators[i].bypass_mask;
+ }
return 0;
}
+static const struct platform_device_id tps65218_regulator_id_table[] = {
+ { "tps65218-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65218_regulator_id_table);
+
static struct platform_driver tps65218_regulator_driver = {
.driver = {
.name = "tps65218-pmic",
- .of_match_table = tps65218_of_match,
},
.probe = tps65218_regulator_probe,
+ .id_table = tps65218_regulator_id_table,
};
module_platform_driver(tps65218_regulator_driver);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 210681d6b743..6c9ec84121bd 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -24,7 +24,7 @@
#include <linux/delay.h>
/*
- * The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a
+ * The TWL4030/TW5030/TPS659x0 family chips include power management, a
* USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions
* include an audio codec, battery charger, and more voltage regulators.
* These chips are often used in OMAP-based systems.
@@ -48,25 +48,12 @@ struct twlreg_info {
/* State REMAP default configuration */
u8 remap;
- /* chip constraints on regulator behavior */
- u16 min_mV;
- u16 max_mV;
-
- u8 flags;
-
/* used by regulator core */
struct regulator_desc desc;
/* chip specific features */
unsigned long features;
- /*
- * optional override functions for voltage set/get
- * these are currently only used for SMPS regulators
- */
- int (*get_voltage)(void *data);
- int (*set_voltage)(void *data, int target_uV);
-
/* data passed from board for external get/set voltage */
void *data;
};
@@ -88,33 +75,6 @@ struct twlreg_info {
#define VREG_STATE 2
#define VREG_VOLTAGE 3
#define VREG_VOLTAGE_SMPS 4
-/* TWL6030 Misc register offsets */
-#define VREG_BC_ALL 1
-#define VREG_BC_REF 2
-#define VREG_BC_PROC 3
-#define VREG_BC_CLK_RST 4
-
-/* TWL6030 LDO register values for CFG_STATE */
-#define TWL6030_CFG_STATE_OFF 0x00
-#define TWL6030_CFG_STATE_ON 0x01
-#define TWL6030_CFG_STATE_OFF2 0x02
-#define TWL6030_CFG_STATE_SLEEP 0x03
-#define TWL6030_CFG_STATE_GRP_SHIFT 5
-#define TWL6030_CFG_STATE_APP_SHIFT 2
-#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
-#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
- TWL6030_CFG_STATE_APP_SHIFT)
-
-/* Flags for SMPS Voltage reading */
-#define SMPS_OFFSET_EN BIT(0)
-#define SMPS_EXTENDED_EN BIT(1)
-
-/* twl6032 SMPS EPROM values */
-#define TWL6030_SMPS_OFFSET 0xB0
-#define TWL6030_SMPS_MULT 0xB3
-#define SMPS_MULTOFFSET_SMPS4 BIT(0)
-#define SMPS_MULTOFFSET_VIO BIT(1)
-#define SMPS_MULTOFFSET_SMPS3 BIT(6)
static inline int
twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
@@ -168,26 +128,6 @@ static int twl4030reg_is_enabled(struct regulator_dev *rdev)
return state & P1_GRP_4030;
}
-static int twl6030reg_is_enabled(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
- int grp = 0, val;
-
- if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) {
- grp = twlreg_grp(rdev);
- if (grp < 0)
- return grp;
- grp &= P1_GRP_6030;
- } else {
- grp = 1;
- }
-
- val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- val = TWL6030_CFG_STATE_APP(val);
-
- return grp && (val == TWL6030_CFG_STATE_ON);
-}
-
#define PB_I2C_BUSY BIT(0)
#define PB_I2C_BWEN BIT(1)
@@ -273,23 +213,6 @@ static int twl4030reg_enable(struct regulator_dev *rdev)
return ret;
}
-static int twl6030reg_enable(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
- int grp = 0;
- int ret;
-
- if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
- grp = twlreg_grp(rdev);
- if (grp < 0)
- return grp;
-
- ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- grp << TWL6030_CFG_STATE_GRP_SHIFT |
- TWL6030_CFG_STATE_ON);
- return ret;
-}
-
static int twl4030reg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -307,23 +230,6 @@ static int twl4030reg_disable(struct regulator_dev *rdev)
return ret;
}
-static int twl6030reg_disable(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
- int grp = 0;
- int ret;
-
- if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
- grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
-
- /* For 6030, set the off state for all grps enabled */
- ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- (grp) << TWL6030_CFG_STATE_GRP_SHIFT |
- TWL6030_CFG_STATE_OFF);
-
- return ret;
-}
-
static int twl4030reg_get_status(struct regulator_dev *rdev)
{
int state = twlreg_grp(rdev);
@@ -340,33 +246,6 @@ static int twl4030reg_get_status(struct regulator_dev *rdev)
: REGULATOR_STATUS_STANDBY;
}
-static int twl6030reg_get_status(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
- int val;
-
- val = twlreg_grp(rdev);
- if (val < 0)
- return val;
-
- val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
-
- switch (TWL6030_CFG_STATE_APP(val)) {
- case TWL6030_CFG_STATE_ON:
- return REGULATOR_STATUS_NORMAL;
-
- case TWL6030_CFG_STATE_SLEEP:
- return REGULATOR_STATUS_STANDBY;
-
- case TWL6030_CFG_STATE_OFF:
- case TWL6030_CFG_STATE_OFF2:
- default:
- break;
- }
-
- return REGULATOR_STATUS_OFF;
-}
-
static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -399,36 +278,6 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode)
}
}
-static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
- int grp = 0;
- int val;
-
- if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
- grp = twlreg_grp(rdev);
-
- if (grp < 0)
- return grp;
-
- /* Compose the state register settings */
- val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
- /* We can only set the mode through state machine commands... */
- switch (mode) {
- case REGULATOR_MODE_NORMAL:
- val |= TWL6030_CFG_STATE_ON;
- break;
- case REGULATOR_MODE_STANDBY:
- val |= TWL6030_CFG_STATE_SLEEP;
- break;
-
- default:
- return -EINVAL;
- }
-
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
-}
-
/*----------------------------------------------------------------------*/
/*
@@ -565,12 +414,7 @@ twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
- if (info->set_voltage) {
- return info->set_voltage(info->data, min_uV);
- } else {
- twlreg_write(info, TWL_MODULE_PM_RECEIVER,
- VREG_VOLTAGE_SMPS_4030, vsel);
- }
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030, vsel);
return 0;
}
@@ -580,9 +424,6 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel;
- if (info->get_voltage)
- return info->get_voltage(info->data);
-
vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
VREG_VOLTAGE_SMPS_4030);
@@ -594,85 +435,6 @@ static struct regulator_ops twl4030smps_ops = {
.get_voltage = twl4030smps_get_voltage,
};
-static int twl6030coresmps_set_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV, unsigned *selector)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- if (info->set_voltage)
- return info->set_voltage(info->data, min_uV);
-
- return -ENODEV;
-}
-
-static int twl6030coresmps_get_voltage(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- if (info->get_voltage)
- return info->get_voltage(info->data);
-
- return -ENODEV;
-}
-
-static struct regulator_ops twl6030coresmps_ops = {
- .set_voltage = twl6030coresmps_set_voltage,
- .get_voltage = twl6030coresmps_get_voltage,
-};
-
-static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- switch (sel) {
- case 0:
- return 0;
- case 1 ... 24:
- /* Linear mapping from 00000001 to 00011000:
- * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001)
- */
- return (info->min_mV + 100 * (sel - 1)) * 1000;
- case 25 ... 30:
- return -EINVAL;
- case 31:
- return 2750000;
- default:
- return -EINVAL;
- }
-}
-
-static int
-twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
- selector);
-}
-
-static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
-
- return vsel;
-}
-
-static struct regulator_ops twl6030ldo_ops = {
- .list_voltage = twl6030ldo_list_voltage,
-
- .set_voltage_sel = twl6030ldo_set_voltage_sel,
- .get_voltage_sel = twl6030ldo_get_voltage_sel,
-
- .enable = twl6030reg_enable,
- .disable = twl6030reg_disable,
- .is_enabled = twl6030reg_is_enabled,
-
- .set_mode = twl6030reg_set_mode,
-
- .get_status = twl6030reg_get_status,
-};
-
/*----------------------------------------------------------------------*/
static struct regulator_ops twl4030fixed_ops = {
@@ -687,226 +449,8 @@ static struct regulator_ops twl4030fixed_ops = {
.get_status = twl4030reg_get_status,
};
-static struct regulator_ops twl6030fixed_ops = {
- .list_voltage = regulator_list_voltage_linear,
-
- .enable = twl6030reg_enable,
- .disable = twl6030reg_disable,
- .is_enabled = twl6030reg_is_enabled,
-
- .set_mode = twl6030reg_set_mode,
-
- .get_status = twl6030reg_get_status,
-};
-
-/*
- * SMPS status and control
- */
-
-static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- int voltage = 0;
-
- switch (info->flags) {
- case SMPS_OFFSET_EN:
- voltage = 100000;
- /* fall through */
- case 0:
- switch (index) {
- case 0:
- voltage = 0;
- break;
- case 58:
- voltage = 1350 * 1000;
- break;
- case 59:
- voltage = 1500 * 1000;
- break;
- case 60:
- voltage = 1800 * 1000;
- break;
- case 61:
- voltage = 1900 * 1000;
- break;
- case 62:
- voltage = 2100 * 1000;
- break;
- default:
- voltage += (600000 + (12500 * (index - 1)));
- }
- break;
- case SMPS_EXTENDED_EN:
- switch (index) {
- case 0:
- voltage = 0;
- break;
- case 58:
- voltage = 2084 * 1000;
- break;
- case 59:
- voltage = 2315 * 1000;
- break;
- case 60:
- voltage = 2778 * 1000;
- break;
- case 61:
- voltage = 2932 * 1000;
- break;
- case 62:
- voltage = 3241 * 1000;
- break;
- default:
- voltage = (1852000 + (38600 * (index - 1)));
- }
- break;
- case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
- switch (index) {
- case 0:
- voltage = 0;
- break;
- case 58:
- voltage = 4167 * 1000;
- break;
- case 59:
- voltage = 2315 * 1000;
- break;
- case 60:
- voltage = 2778 * 1000;
- break;
- case 61:
- voltage = 2932 * 1000;
- break;
- case 62:
- voltage = 3241 * 1000;
- break;
- default:
- voltage = (2161000 + (38600 * (index - 1)));
- }
- break;
- }
-
- return voltage;
-}
-
-static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel = 0;
-
- switch (info->flags) {
- case 0:
- if (min_uV == 0)
- vsel = 0;
- else if ((min_uV >= 600000) && (min_uV <= 1300000)) {
- vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
- vsel++;
- }
- /* Values 1..57 for vsel are linear and can be calculated
- * values 58..62 are non linear.
- */
- else if ((min_uV > 1900000) && (min_uV <= 2100000))
- vsel = 62;
- else if ((min_uV > 1800000) && (min_uV <= 1900000))
- vsel = 61;
- else if ((min_uV > 1500000) && (min_uV <= 1800000))
- vsel = 60;
- else if ((min_uV > 1350000) && (min_uV <= 1500000))
- vsel = 59;
- else if ((min_uV > 1300000) && (min_uV <= 1350000))
- vsel = 58;
- else
- return -EINVAL;
- break;
- case SMPS_OFFSET_EN:
- if (min_uV == 0)
- vsel = 0;
- else if ((min_uV >= 700000) && (min_uV <= 1420000)) {
- vsel = DIV_ROUND_UP(min_uV - 700000, 12500);
- vsel++;
- }
- /* Values 1..57 for vsel are linear and can be calculated
- * values 58..62 are non linear.
- */
- else if ((min_uV > 1900000) && (min_uV <= 2100000))
- vsel = 62;
- else if ((min_uV > 1800000) && (min_uV <= 1900000))
- vsel = 61;
- else if ((min_uV > 1350000) && (min_uV <= 1800000))
- vsel = 60;
- else if ((min_uV > 1350000) && (min_uV <= 1500000))
- vsel = 59;
- else if ((min_uV > 1300000) && (min_uV <= 1350000))
- vsel = 58;
- else
- return -EINVAL;
- break;
- case SMPS_EXTENDED_EN:
- if (min_uV == 0) {
- vsel = 0;
- } else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
- vsel = DIV_ROUND_UP(min_uV - 1852000, 38600);
- vsel++;
- }
- break;
- case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
- if (min_uV == 0) {
- vsel = 0;
- } else if ((min_uV >= 2161000) && (min_uV <= 4321000)) {
- vsel = DIV_ROUND_UP(min_uV - 2161000, 38600);
- vsel++;
- }
- break;
- }
-
- return vsel;
-}
-
-static int twl6030smps_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
- selector);
-}
-
-static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
-}
-
-static struct regulator_ops twlsmps_ops = {
- .list_voltage = twl6030smps_list_voltage,
- .map_voltage = twl6030smps_map_voltage,
-
- .set_voltage_sel = twl6030smps_set_voltage_sel,
- .get_voltage_sel = twl6030smps_get_voltage_sel,
-
- .enable = twl6030reg_enable,
- .disable = twl6030reg_disable,
- .is_enabled = twl6030reg_is_enabled,
-
- .set_mode = twl6030reg_set_mode,
-
- .get_status = twl6030reg_get_status,
-};
-
/*----------------------------------------------------------------------*/
-#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf) \
- TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf, TWL4030, twl4030fixed_ops, \
- twl4030reg_map_mode)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
- TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
- 0x0, TWL6030, twl6030fixed_ops, NULL)
-
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
static const struct twlreg_info TWL4030_INFO_##label = { \
.base = offset, \
@@ -942,79 +486,22 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
}, \
}
-#define TWL6030_ADJUSTABLE_SMPS(label) \
-static const struct twlreg_info TWL6030_INFO_##label = { \
- .desc = { \
- .name = #label, \
- .id = TWL6030_REG_##label, \
- .ops = &twl6030coresmps_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \
-static const struct twlreg_info TWL6030_INFO_##label = { \
- .base = offset, \
- .min_mV = min_mVolts, \
- .max_mV = max_mVolts, \
- .desc = { \
- .name = #label, \
- .id = TWL6030_REG_##label, \
- .n_voltages = 32, \
- .ops = &twl6030ldo_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \
-static const struct twlreg_info TWL6032_INFO_##label = { \
- .base = offset, \
- .min_mV = min_mVolts, \
- .max_mV = max_mVolts, \
- .desc = { \
- .name = #label, \
- .id = TWL6032_REG_##label, \
- .n_voltages = 32, \
- .ops = &twl6030ldo_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
- family, operations, map_mode) \
+#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf) \
static const struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = num, \
- .min_mV = mVolts, \
.remap = remap_conf, \
.desc = { \
.name = #label, \
- .id = family##_REG_##label, \
+ .id = TWL4030##_REG_##label, \
.n_voltages = 1, \
- .ops = &operations, \
+ .ops = &twl4030fixed_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = mVolts * 1000, \
.enable_time = turnon_delay, \
- .of_map_mode = map_mode, \
- }, \
- }
-
-#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
-static const struct twlreg_info TWLSMPS_INFO_##label = { \
- .base = offset, \
- .min_mV = 600, \
- .max_mV = 2100, \
- .desc = { \
- .name = #label, \
- .id = TWL6032_REG_##label, \
- .n_voltages = 63, \
- .ops = &twlsmps_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
+ .of_map_mode = twl4030reg_map_mode, \
}, \
}
@@ -1038,60 +525,11 @@ TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
/* VUSBCP is managed *only* by the USB subchip */
-/* 6030 REG with base as PMC Slave Misc : 0x0030 */
-/* Turnon-delay and remap configuration values for 6030 are not
- verified since the specification is not public */
-TWL6030_ADJUSTABLE_SMPS(VDD1);
-TWL6030_ADJUSTABLE_SMPS(VDD2);
-TWL6030_ADJUSTABLE_SMPS(VDD3);
-TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300);
-TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300);
-TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300);
-TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300);
-TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300);
-TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300);
-/* 6025 are renamed compared to 6030 versions */
-TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300);
-TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300);
TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08);
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08);
TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08);
-TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0);
-TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0);
-TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
-TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);
-TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);
-TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0);
-TWL6032_ADJUSTABLE_SMPS(SMPS3, 0x34);
-TWL6032_ADJUSTABLE_SMPS(SMPS4, 0x10);
-TWL6032_ADJUSTABLE_SMPS(VIO, 0x16);
-
-static u8 twl_get_smps_offset(void)
-{
- u8 value;
-
- twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
- TWL6030_SMPS_OFFSET);
- return value;
-}
-
-static u8 twl_get_smps_mult(void)
-{
- u8 value;
-
- twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
- TWL6030_SMPS_MULT);
- return value;
-}
#define TWL_OF_MATCH(comp, family, label) \
{ \
@@ -1121,81 +559,37 @@ static const struct of_device_id twl_of_match[] = {
TWL4030_OF_MATCH("ti,twl4030-vio", VIO),
TWL4030_OF_MATCH("ti,twl4030-vdd1", VDD1),
TWL4030_OF_MATCH("ti,twl4030-vdd2", VDD2),
- TWL6030_OF_MATCH("ti,twl6030-vdd1", VDD1),
- TWL6030_OF_MATCH("ti,twl6030-vdd2", VDD2),
- TWL6030_OF_MATCH("ti,twl6030-vdd3", VDD3),
- TWL6030_OF_MATCH("ti,twl6030-vaux1", VAUX1_6030),
- TWL6030_OF_MATCH("ti,twl6030-vaux2", VAUX2_6030),
- TWL6030_OF_MATCH("ti,twl6030-vaux3", VAUX3_6030),
- TWL6030_OF_MATCH("ti,twl6030-vmmc", VMMC),
- TWL6030_OF_MATCH("ti,twl6030-vpp", VPP),
- TWL6030_OF_MATCH("ti,twl6030-vusim", VUSIM),
- TWL6032_OF_MATCH("ti,twl6032-ldo2", LDO2),
- TWL6032_OF_MATCH("ti,twl6032-ldo4", LDO4),
- TWL6032_OF_MATCH("ti,twl6032-ldo3", LDO3),
- TWL6032_OF_MATCH("ti,twl6032-ldo5", LDO5),
- TWL6032_OF_MATCH("ti,twl6032-ldo1", LDO1),
- TWL6032_OF_MATCH("ti,twl6032-ldo7", LDO7),
- TWL6032_OF_MATCH("ti,twl6032-ldo6", LDO6),
- TWL6032_OF_MATCH("ti,twl6032-ldoln", LDOLN),
- TWL6032_OF_MATCH("ti,twl6032-ldousb", LDOUSB),
TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1),
TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG),
TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5),
TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8),
TWLFIXED_OF_MATCH("ti,twl4030-vusb3v1", VUSB3V1),
- TWLFIXED_OF_MATCH("ti,twl6030-vana", VANA),
- TWLFIXED_OF_MATCH("ti,twl6030-vcxio", VCXIO),
- TWLFIXED_OF_MATCH("ti,twl6030-vdac", VDAC),
- TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB),
- TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8),
- TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1),
- TWLSMPS_OF_MATCH("ti,twl6032-smps3", SMPS3),
- TWLSMPS_OF_MATCH("ti,twl6032-smps4", SMPS4),
- TWLSMPS_OF_MATCH("ti,twl6032-vio", VIO),
{},
};
MODULE_DEVICE_TABLE(of, twl_of_match);
static int twlreg_probe(struct platform_device *pdev)
{
- int i, id;
+ int id;
struct twlreg_info *info;
const struct twlreg_info *template;
struct regulator_init_data *initdata;
struct regulation_constraints *c;
struct regulator_dev *rdev;
- struct twl_regulator_driver_data *drvdata;
const struct of_device_id *match;
struct regulator_config config = { };
match = of_match_device(twl_of_match, &pdev->dev);
- if (match) {
- template = match->data;
- id = template->desc.id;
- initdata = of_get_regulator_init_data(&pdev->dev,
- pdev->dev.of_node,
- &template->desc);
- drvdata = NULL;
- } else {
- id = pdev->id;
- initdata = dev_get_platdata(&pdev->dev);
- for (i = 0, template = NULL; i < ARRAY_SIZE(twl_of_match); i++) {
- template = twl_of_match[i].data;
- if (template && template->desc.id == id)
- break;
- }
- if (i == ARRAY_SIZE(twl_of_match))
- return -ENODEV;
-
- drvdata = initdata->driver_data;
- if (!drvdata)
- return -EINVAL;
- }
+ if (!match)
+ return -ENODEV;
+ template = match->data;
if (!template)
return -ENODEV;
+ id = template->desc.id;
+ initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+ &template->desc);
if (!initdata)
return -EINVAL;
@@ -1203,14 +597,6 @@ static int twlreg_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- if (drvdata) {
- /* copy the driver data into regulator data */
- info->features = drvdata->features;
- info->data = drvdata->data;
- info->set_voltage = drvdata->set_voltage;
- info->get_voltage = drvdata->get_voltage;
- }
-
/* Constrain board-specific capabilities according to what
* this driver and the chip itself can actually do.
*/
@@ -1233,27 +619,6 @@ static int twlreg_probe(struct platform_device *pdev)
break;
}
- switch (id) {
- case TWL6032_REG_SMPS3:
- if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
- info->flags |= SMPS_EXTENDED_EN;
- if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
- info->flags |= SMPS_OFFSET_EN;
- break;
- case TWL6032_REG_SMPS4:
- if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
- info->flags |= SMPS_EXTENDED_EN;
- if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
- info->flags |= SMPS_OFFSET_EN;
- break;
- case TWL6032_REG_VIO:
- if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
- info->flags |= SMPS_EXTENDED_EN;
- if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
- info->flags |= SMPS_OFFSET_EN;
- break;
- }
-
config.dev = &pdev->dev;
config.init_data = initdata;
config.driver_data = info;
@@ -1267,9 +632,7 @@ static int twlreg_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rdev);
- if (twl_class_is_4030())
- twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
- info->remap);
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, info->remap);
/* NOTE: many regulators support short-circuit IRQs (presentable
* as REGULATOR_OVER_CURRENT notifications?) configured via:
@@ -1282,7 +645,7 @@ static int twlreg_probe(struct platform_device *pdev)
return 0;
}
-MODULE_ALIAS("platform:twl_reg");
+MODULE_ALIAS("platform:twl4030_reg");
static struct platform_driver twlreg_driver = {
.probe = twlreg_probe,
@@ -1290,7 +653,7 @@ static struct platform_driver twlreg_driver = {
* "twl_regulator.12" (and friends) to "twl_regulator.1".
*/
.driver = {
- .name = "twl_reg",
+ .name = "twl4030_reg",
.of_match_table = of_match_ptr(twl_of_match),
},
};
@@ -1307,5 +670,5 @@ static void __exit twlreg_exit(void)
}
module_exit(twlreg_exit)
-MODULE_DESCRIPTION("TWL regulator driver");
+MODULE_DESCRIPTION("TWL4030 regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
new file mode 100644
index 000000000000..4864b9d742c0
--- /dev/null
+++ b/drivers/regulator/twl6030-regulator.c
@@ -0,0 +1,793 @@
+/*
+ * Split TWL6030 logic from twl-regulator.c:
+ * Copyright (C) 2008 David Brownell
+ *
+ * Copyright (C) 2016 Nicolae Rosia <nicolae.rosia@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c/twl.h>
+#include <linux/delay.h>
+
+struct twlreg_info {
+ /* start of regulator's PM_RECEIVER control register bank */
+ u8 base;
+
+ /* twl resource ID, for resource control state machine */
+ u8 id;
+
+ /* chip constraints on regulator behavior */
+ u16 min_mV;
+
+ u8 flags;
+
+ /* used by regulator core */
+ struct regulator_desc desc;
+
+ /* chip specific features */
+ unsigned long features;
+
+ /* data passed from board for external get/set voltage */
+ void *data;
+};
+
+
+/* LDO control registers ... offset is from the base of its register bank.
+ * The first three registers of all power resource banks help hardware to
+ * manage the various resource groups.
+ */
+/* Common offset in TWL4030/6030 */
+#define VREG_GRP 0
+/* TWL6030 register offsets */
+#define VREG_TRANS 1
+#define VREG_STATE 2
+#define VREG_VOLTAGE 3
+#define VREG_VOLTAGE_SMPS 4
+/* TWL6030 Misc register offsets */
+#define VREG_BC_ALL 1
+#define VREG_BC_REF 2
+#define VREG_BC_PROC 3
+#define VREG_BC_CLK_RST 4
+
+/* TWL6030 LDO register values for CFG_STATE */
+#define TWL6030_CFG_STATE_OFF 0x00
+#define TWL6030_CFG_STATE_ON 0x01
+#define TWL6030_CFG_STATE_OFF2 0x02
+#define TWL6030_CFG_STATE_SLEEP 0x03
+#define TWL6030_CFG_STATE_GRP_SHIFT 5
+#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+ TWL6030_CFG_STATE_APP_SHIFT)
+
+/* Flags for SMPS Voltage reading */
+#define SMPS_OFFSET_EN BIT(0)
+#define SMPS_EXTENDED_EN BIT(1)
+
+/* twl6032 SMPS EPROM values */
+#define TWL6030_SMPS_OFFSET 0xB0
+#define TWL6030_SMPS_MULT 0xB3
+#define SMPS_MULTOFFSET_SMPS4 BIT(0)
+#define SMPS_MULTOFFSET_VIO BIT(1)
+#define SMPS_MULTOFFSET_SMPS3 BIT(6)
+
+static inline int
+twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
+{
+ u8 value;
+ int status;
+
+ status = twl_i2c_read_u8(slave_subgp,
+ &value, info->base + offset);
+ return (status < 0) ? status : value;
+}
+
+static inline int
+twlreg_write(struct twlreg_info *info, unsigned slave_subgp, unsigned offset,
+ u8 value)
+{
+ return twl_i2c_write_u8(slave_subgp,
+ value, info->base + offset);
+}
+
+/* generic power resource operations, which work on all regulators */
+static int twlreg_grp(struct regulator_dev *rdev)
+{
+ return twlreg_read(rdev_get_drvdata(rdev), TWL_MODULE_PM_RECEIVER,
+ VREG_GRP);
+}
+
+/*
+ * Enable/disable regulators by joining/leaving the P1 (processor) group.
+ * We assume nobody else is updating the DEV_GRP registers.
+ */
+/* definition for 6030 family */
+#define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */
+#define P2_GRP_6030 BIT(1) /* "peripherals" */
+#define P1_GRP_6030 BIT(0) /* CPU/Linux */
+
+static int twl6030reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0, val;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) {
+ grp = twlreg_grp(rdev);
+ if (grp < 0)
+ return grp;
+ grp &= P1_GRP_6030;
+ } else {
+ grp = 1;
+ }
+
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ val = TWL6030_CFG_STATE_APP(val);
+
+ return grp && (val == TWL6030_CFG_STATE_ON);
+}
+
+#define PB_I2C_BUSY BIT(0)
+#define PB_I2C_BWEN BIT(1)
+
+
+static int twl6030reg_enable(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int ret;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
+ grp = twlreg_grp(rdev);
+ if (grp < 0)
+ return grp;
+
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ grp << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_ON);
+ return ret;
+}
+
+static int twl6030reg_disable(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int ret;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
+ grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
+
+ /* For 6030, set the off state for all grps enabled */
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ (grp) << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_OFF);
+
+ return ret;
+}
+
+static int twl6030reg_get_status(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val = twlreg_grp(rdev);
+ if (val < 0)
+ return val;
+
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+
+ switch (TWL6030_CFG_STATE_APP(val)) {
+ case TWL6030_CFG_STATE_ON:
+ return REGULATOR_STATUS_NORMAL;
+
+ case TWL6030_CFG_STATE_SLEEP:
+ return REGULATOR_STATUS_STANDBY;
+
+ case TWL6030_CFG_STATE_OFF:
+ case TWL6030_CFG_STATE_OFF2:
+ default:
+ break;
+ }
+
+ return REGULATOR_STATUS_OFF;
+}
+
+static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int val;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
+ grp = twlreg_grp(rdev);
+
+ if (grp < 0)
+ return grp;
+
+ /* Compose the state register settings */
+ val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
+ /* We can only set the mode through state machine commands... */
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val |= TWL6030_CFG_STATE_ON;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val |= TWL6030_CFG_STATE_SLEEP;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
+}
+
+static int twl6030coresmps_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ return -ENODEV;
+}
+
+static int twl6030coresmps_get_voltage(struct regulator_dev *rdev)
+{
+ return -ENODEV;
+}
+
+static struct regulator_ops twl6030coresmps_ops = {
+ .set_voltage = twl6030coresmps_set_voltage,
+ .get_voltage = twl6030coresmps_get_voltage,
+};
+
+static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ switch (sel) {
+ case 0:
+ return 0;
+ case 1 ... 24:
+ /* Linear mapping from 00000001 to 00011000:
+ * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001)
+ */
+ return (info->min_mV + 100 * (sel - 1)) * 1000;
+ case 25 ... 30:
+ return -EINVAL;
+ case 31:
+ return 2750000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
+ selector);
+}
+
+static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
+
+ return vsel;
+}
+
+static struct regulator_ops twl6030ldo_ops = {
+ .list_voltage = twl6030ldo_list_voltage,
+
+ .set_voltage_sel = twl6030ldo_set_voltage_sel,
+ .get_voltage_sel = twl6030ldo_get_voltage_sel,
+
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
+
+ .set_mode = twl6030reg_set_mode,
+
+ .get_status = twl6030reg_get_status,
+};
+
+static struct regulator_ops twl6030fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
+
+ .set_mode = twl6030reg_set_mode,
+
+ .get_status = twl6030reg_get_status,
+};
+
+/*
+ * SMPS status and control
+ */
+
+static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ int voltage = 0;
+
+ switch (info->flags) {
+ case SMPS_OFFSET_EN:
+ voltage = 100000;
+ /* fall through */
+ case 0:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 1350 * 1000;
+ break;
+ case 59:
+ voltage = 1500 * 1000;
+ break;
+ case 60:
+ voltage = 1800 * 1000;
+ break;
+ case 61:
+ voltage = 1900 * 1000;
+ break;
+ case 62:
+ voltage = 2100 * 1000;
+ break;
+ default:
+ voltage += (600000 + (12500 * (index - 1)));
+ }
+ break;
+ case SMPS_EXTENDED_EN:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 2084 * 1000;
+ break;
+ case 59:
+ voltage = 2315 * 1000;
+ break;
+ case 60:
+ voltage = 2778 * 1000;
+ break;
+ case 61:
+ voltage = 2932 * 1000;
+ break;
+ case 62:
+ voltage = 3241 * 1000;
+ break;
+ default:
+ voltage = (1852000 + (38600 * (index - 1)));
+ }
+ break;
+ case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 4167 * 1000;
+ break;
+ case 59:
+ voltage = 2315 * 1000;
+ break;
+ case 60:
+ voltage = 2778 * 1000;
+ break;
+ case 61:
+ voltage = 2932 * 1000;
+ break;
+ case 62:
+ voltage = 3241 * 1000;
+ break;
+ default:
+ voltage = (2161000 + (38600 * (index - 1)));
+ }
+ break;
+ }
+
+ return voltage;
+}
+
+static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = 0;
+
+ switch (info->flags) {
+ case 0:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 600000) && (min_uV <= 1300000)) {
+ vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+ vsel++;
+ }
+ /* Values 1..57 for vsel are linear and can be calculated
+ * values 58..62 are non linear.
+ */
+ else if ((min_uV > 1900000) && (min_uV <= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (min_uV <= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1500000) && (min_uV <= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (min_uV <= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (min_uV <= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+ case SMPS_OFFSET_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 700000) && (min_uV <= 1420000)) {
+ vsel = DIV_ROUND_UP(min_uV - 700000, 12500);
+ vsel++;
+ }
+ /* Values 1..57 for vsel are linear and can be calculated
+ * values 58..62 are non linear.
+ */
+ else if ((min_uV > 1900000) && (min_uV <= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (min_uV <= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1350000) && (min_uV <= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (min_uV <= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (min_uV <= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+ case SMPS_EXTENDED_EN:
+ if (min_uV == 0) {
+ vsel = 0;
+ } else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
+ vsel = DIV_ROUND_UP(min_uV - 1852000, 38600);
+ vsel++;
+ }
+ break;
+ case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
+ if (min_uV == 0) {
+ vsel = 0;
+ } else if ((min_uV >= 2161000) && (min_uV <= 4321000)) {
+ vsel = DIV_ROUND_UP(min_uV - 2161000, 38600);
+ vsel++;
+ }
+ break;
+ }
+
+ return vsel;
+}
+
+static int twl6030smps_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
+ selector);
+}
+
+static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
+}
+
+static struct regulator_ops twlsmps_ops = {
+ .list_voltage = twl6030smps_list_voltage,
+ .map_voltage = twl6030smps_map_voltage,
+
+ .set_voltage_sel = twl6030smps_set_voltage_sel,
+ .get_voltage_sel = twl6030smps_get_voltage_sel,
+
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
+
+ .set_mode = twl6030reg_set_mode,
+
+ .get_status = twl6030reg_get_status,
+};
+
+/*----------------------------------------------------------------------*/
+
+#define TWL6030_ADJUSTABLE_SMPS(label) \
+static const struct twlreg_info TWL6030_INFO_##label = { \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .ops = &twl6030coresmps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts) \
+static const struct twlreg_info TWL6030_INFO_##label = { \
+ .base = offset, \
+ .min_mV = min_mVolts, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .n_voltages = 32, \
+ .ops = &twl6030ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts) \
+static const struct twlreg_info TWL6032_INFO_##label = { \
+ .base = offset, \
+ .min_mV = min_mVolts, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6032_REG_##label, \
+ .n_voltages = 32, \
+ .ops = &twl6030ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
+static const struct twlreg_info TWLFIXED_INFO_##label = { \
+ .base = offset, \
+ .id = 0, \
+ .min_mV = mVolts, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030##_REG_##label, \
+ .n_voltages = 1, \
+ .ops = &twl6030fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = mVolts * 1000, \
+ .enable_time = turnon_delay, \
+ .of_map_mode = NULL, \
+ }, \
+ }
+
+#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
+static const struct twlreg_info TWLSMPS_INFO_##label = { \
+ .base = offset, \
+ .min_mV = 600, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6032_REG_##label, \
+ .n_voltages = 63, \
+ .ops = &twlsmps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+/* VUSBCP is managed *only* by the USB subchip */
+/* 6030 REG with base as PMC Slave Misc : 0x0030 */
+/* Turnon-delay and remap configuration values for 6030 are not
+ verified since the specification is not public */
+TWL6030_ADJUSTABLE_SMPS(VDD1);
+TWL6030_ADJUSTABLE_SMPS(VDD2);
+TWL6030_ADJUSTABLE_SMPS(VDD3);
+TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000);
+TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000);
+TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000);
+TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000);
+TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000);
+TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000);
+/* 6025 are renamed compared to 6030 versions */
+TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000);
+TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000);
+TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000);
+TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0);
+TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0);
+TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
+TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);
+TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);
+TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0);
+TWL6032_ADJUSTABLE_SMPS(SMPS3, 0x34);
+TWL6032_ADJUSTABLE_SMPS(SMPS4, 0x10);
+TWL6032_ADJUSTABLE_SMPS(VIO, 0x16);
+
+static u8 twl_get_smps_offset(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_OFFSET);
+ return value;
+}
+
+static u8 twl_get_smps_mult(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_MULT);
+ return value;
+}
+
+#define TWL_OF_MATCH(comp, family, label) \
+ { \
+ .compatible = comp, \
+ .data = &family##_INFO_##label, \
+ }
+
+#define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label)
+#define TWL6032_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6032, label)
+#define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label)
+#define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label)
+
+static const struct of_device_id twl_of_match[] = {
+ TWL6030_OF_MATCH("ti,twl6030-vdd1", VDD1),
+ TWL6030_OF_MATCH("ti,twl6030-vdd2", VDD2),
+ TWL6030_OF_MATCH("ti,twl6030-vdd3", VDD3),
+ TWL6030_OF_MATCH("ti,twl6030-vaux1", VAUX1_6030),
+ TWL6030_OF_MATCH("ti,twl6030-vaux2", VAUX2_6030),
+ TWL6030_OF_MATCH("ti,twl6030-vaux3", VAUX3_6030),
+ TWL6030_OF_MATCH("ti,twl6030-vmmc", VMMC),
+ TWL6030_OF_MATCH("ti,twl6030-vpp", VPP),
+ TWL6030_OF_MATCH("ti,twl6030-vusim", VUSIM),
+ TWL6032_OF_MATCH("ti,twl6032-ldo2", LDO2),
+ TWL6032_OF_MATCH("ti,twl6032-ldo4", LDO4),
+ TWL6032_OF_MATCH("ti,twl6032-ldo3", LDO3),
+ TWL6032_OF_MATCH("ti,twl6032-ldo5", LDO5),
+ TWL6032_OF_MATCH("ti,twl6032-ldo1", LDO1),
+ TWL6032_OF_MATCH("ti,twl6032-ldo7", LDO7),
+ TWL6032_OF_MATCH("ti,twl6032-ldo6", LDO6),
+ TWL6032_OF_MATCH("ti,twl6032-ldoln", LDOLN),
+ TWL6032_OF_MATCH("ti,twl6032-ldousb", LDOUSB),
+ TWLFIXED_OF_MATCH("ti,twl6030-vana", VANA),
+ TWLFIXED_OF_MATCH("ti,twl6030-vcxio", VCXIO),
+ TWLFIXED_OF_MATCH("ti,twl6030-vdac", VDAC),
+ TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB),
+ TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8),
+ TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1),
+ TWLSMPS_OF_MATCH("ti,twl6032-smps3", SMPS3),
+ TWLSMPS_OF_MATCH("ti,twl6032-smps4", SMPS4),
+ TWLSMPS_OF_MATCH("ti,twl6032-vio", VIO),
+ {},
+};
+MODULE_DEVICE_TABLE(of, twl_of_match);
+
+static int twlreg_probe(struct platform_device *pdev)
+{
+ int id;
+ struct twlreg_info *info;
+ const struct twlreg_info *template;
+ struct regulator_init_data *initdata;
+ struct regulation_constraints *c;
+ struct regulator_dev *rdev;
+ const struct of_device_id *match;
+ struct regulator_config config = { };
+
+ match = of_match_device(twl_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ template = match->data;
+ if (!template)
+ return -ENODEV;
+
+ id = template->desc.id;
+ initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+ &template->desc);
+ if (!initdata)
+ return -EINVAL;
+
+ info = devm_kmemdup(&pdev->dev, template, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* Constrain board-specific capabilities according to what
+ * this driver and the chip itself can actually do.
+ */
+ c = &initdata->constraints;
+ c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY;
+ c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS;
+
+ switch (id) {
+ case TWL6032_REG_SMPS3:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ case TWL6032_REG_SMPS4:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ case TWL6032_REG_VIO:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ }
+
+ config.dev = &pdev->dev;
+ config.init_data = initdata;
+ config.driver_data = info;
+ config.of_node = pdev->dev.of_node;
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "can't register %s, %ld\n",
+ info->desc.name, PTR_ERR(rdev));
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ /* NOTE: many regulators support short-circuit IRQs (presentable
+ * as REGULATOR_OVER_CURRENT notifications?) configured via:
+ * - SC_CONFIG
+ * - SC_DETECT1 (vintana2, vmmc1/2, vaux1/2/3/4)
+ * - SC_DETECT2 (vusb, vdac, vio, vdd1/2, vpll2)
+ * - IT_CONFIG
+ */
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:twl6030_reg");
+
+static struct platform_driver twlreg_driver = {
+ .probe = twlreg_probe,
+ /* NOTE: short name, to work around driver model truncation of
+ * "twl_regulator.12" (and friends) to "twl_regulator.1".
+ */
+ .driver = {
+ .name = "twl6030_reg",
+ .of_match_table = of_match_ptr(twl_of_match),
+ },
+};
+
+static int __init twlreg_init(void)
+{
+ return platform_driver_register(&twlreg_driver);
+}
+subsys_initcall(twlreg_init);
+
+static void __exit twlreg_exit(void)
+{
+ platform_driver_unregister(&twlreg_driver);
+}
+module_exit(twlreg_exit)
+
+MODULE_DESCRIPTION("TWL6030 regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index f396bfef5d42..8f9cf0bc571c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -1,20 +1,21 @@
menu "Remoteproc drivers"
-# REMOTEPROC gets selected by whoever wants it
config REMOTEPROC
- tristate
+ tristate "Support for Remote Processor subsystem"
depends on HAS_DMA
select CRC32
select FW_LOADER
select VIRTIO
select VIRTUALIZATION
+if REMOTEPROC
+
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
depends on HAS_DMA
depends on ARCH_OMAP4 || SOC_OMAP5
depends on OMAP_IOMMU
- select REMOTEPROC
+ depends on REMOTEPROC
select MAILBOX
select OMAP2PLUS_MBOX
select RPMSG_VIRTIO
@@ -31,20 +32,10 @@ config OMAP_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading or just want a bare minimum kernel.
-config STE_MODEM_RPROC
- tristate "STE-Modem remoteproc support"
- depends on HAS_DMA
- select REMOTEPROC
- default n
- help
- Say y or m here to support STE-Modem shared memory driver.
- This can be either built-in or a loadable module.
- If unsure say N.
-
config WKUP_M3_RPROC
tristate "AMx3xx Wakeup M3 remoteproc support"
depends on SOC_AM33XX || SOC_AM43XX
- select REMOTEPROC
+ depends on REMOTEPROC
help
Say y here to support Wakeup M3 remote processor on TI AM33xx
and AM43xx family of SoCs.
@@ -57,8 +48,8 @@ config WKUP_M3_RPROC
config DA8XX_REMOTEPROC
tristate "DA8xx/OMAP-L13x remoteproc support"
depends on ARCH_DAVINCI_DA8XX
+ depends on REMOTEPROC
select CMA if MMU
- select REMOTEPROC
select RPMSG_VIRTIO
help
Say y here to support DA8xx/OMAP-L13x remote processors via the
@@ -77,6 +68,18 @@ config DA8XX_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading.
+config QCOM_ADSP_PIL
+ tristate "Qualcomm ADSP Peripheral Image Loader"
+ depends on OF && ARCH_QCOM
+ depends on REMOTEPROC
+ depends on QCOM_SMEM
+ select MFD_SYSCON
+ select QCOM_MDT_LOADER
+ select QCOM_SCM
+ help
+ Say y here to support the TrustZone based Peripherial Image Loader
+ for the Qualcomm ADSP remote processors.
+
config QCOM_MDT_LOADER
tristate
@@ -84,25 +87,22 @@ config QCOM_Q6V5_PIL
tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
+ depends on REMOTEPROC
select MFD_SYSCON
select QCOM_MDT_LOADER
- select REMOTEPROC
+ select QCOM_SCM
help
Say y here to support the Qualcomm Peripherial Image Loader for the
Hexagon V5 based remote processors.
-config QCOM_WCNSS_IRIS
- tristate
- depends on OF && ARCH_QCOM
-
config QCOM_WCNSS_PIL
tristate "Qualcomm WCNSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
+ depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
depends on QCOM_SMEM
+ depends on REMOTEPROC
select QCOM_MDT_LOADER
select QCOM_SCM
- select QCOM_WCNSS_IRIS
- select REMOTEPROC
help
Say y here to support the Peripheral Image Loader for the Qualcomm
Wireless Connectivity Subsystem.
@@ -110,10 +110,16 @@ config QCOM_WCNSS_PIL
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
- select REMOTEPROC
+ depends on REMOTEPROC
help
Say y here to support ST's adjunct processors via the remote
processor framework.
This can be either built-in or a loadable module.
+config ST_SLIM_REMOTEPROC
+ tristate
+ depends on REMOTEPROC
+
+endif # REMOTEPROC
+
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 6dfb62ed643f..0938ea3c41ba 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -5,14 +5,17 @@
obj-$(CONFIG_REMOTEPROC) += remoteproc.o
remoteproc-y := remoteproc_core.o
remoteproc-y += remoteproc_debugfs.o
+remoteproc-y += remoteproc_sysfs.o
remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
-obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_QCOM_ADSP_PIL) += qcom_adsp_pil.o
obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o
obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
-obj-$(CONFIG_QCOM_WCNSS_IRIS) += qcom_wcnss_iris.o
-obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss.o
+obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
+qcom_wcnss_pil-y += qcom_wcnss.o
+qcom_wcnss_pil-y += qcom_wcnss_iris.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
+obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
diff --git a/drivers/remoteproc/qcom_adsp_pil.c b/drivers/remoteproc/qcom_adsp_pil.c
new file mode 100644
index 000000000000..43a4ed2f346c
--- /dev/null
+++ b/drivers/remoteproc/qcom_adsp_pil.c
@@ -0,0 +1,428 @@
+/*
+ * Qualcomm ADSP Peripheral Image Loader for MSM8974 and MSM8996
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "qcom_mdt_loader.h"
+#include "remoteproc_internal.h"
+
+#define ADSP_CRASH_REASON_SMEM 423
+#define ADSP_FIRMWARE_NAME "adsp.mdt"
+#define ADSP_PAS_ID 1
+
+struct qcom_adsp {
+ struct device *dev;
+ struct rproc *rproc;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct qcom_smem_state *state;
+ unsigned stop_bit;
+
+ struct clk *xo;
+
+ struct regulator *cx_supply;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+};
+
+static int adsp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ phys_addr_t fw_addr;
+ size_t fw_size;
+ bool relocate;
+ int ret;
+
+ ret = qcom_scm_pas_init_image(ADSP_PAS_ID, fw->data, fw->size);
+ if (ret) {
+ dev_err(&rproc->dev, "invalid firmware metadata\n");
+ return ret;
+ }
+
+ ret = qcom_mdt_parse(fw, &fw_addr, &fw_size, &relocate);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to parse mdt header\n");
+ return ret;
+ }
+
+ if (relocate) {
+ adsp->mem_reloc = fw_addr;
+
+ ret = qcom_scm_pas_mem_setup(ADSP_PAS_ID, adsp->mem_phys, fw_size);
+ if (ret) {
+ dev_err(&rproc->dev, "unable to setup memory for image\n");
+ return ret;
+ }
+ }
+
+ return qcom_mdt_load(rproc, fw, rproc->firmware);
+}
+
+static const struct rproc_fw_ops adsp_fw_ops = {
+ .find_rsc_table = qcom_mdt_find_rsc_table,
+ .load = adsp_load,
+};
+
+static int adsp_start(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(adsp->xo);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(adsp->cx_supply);
+ if (ret)
+ goto disable_clocks;
+
+ ret = qcom_scm_pas_auth_and_reset(ADSP_PAS_ID);
+ if (ret) {
+ dev_err(adsp->dev,
+ "failed to authenticate image and release reset\n");
+ goto disable_regulators;
+ }
+
+ ret = wait_for_completion_timeout(&adsp->start_done,
+ msecs_to_jiffies(5000));
+ if (!ret) {
+ dev_err(adsp->dev, "start timed out\n");
+ qcom_scm_pas_shutdown(ADSP_PAS_ID);
+ ret = -ETIMEDOUT;
+ goto disable_regulators;
+ }
+
+ ret = 0;
+
+disable_regulators:
+ regulator_disable(adsp->cx_supply);
+disable_clocks:
+ clk_disable_unprepare(adsp->xo);
+
+ return ret;
+}
+
+static int adsp_stop(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ qcom_smem_state_update_bits(adsp->state,
+ BIT(adsp->stop_bit),
+ BIT(adsp->stop_bit));
+
+ ret = wait_for_completion_timeout(&adsp->stop_done,
+ msecs_to_jiffies(5000));
+ if (ret == 0)
+ dev_err(adsp->dev, "timed out on wait\n");
+
+ qcom_smem_state_update_bits(adsp->state,
+ BIT(adsp->stop_bit),
+ 0);
+
+ ret = qcom_scm_pas_shutdown(ADSP_PAS_ID);
+ if (ret)
+ dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
+
+ return ret;
+}
+
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int offset;
+
+ offset = da - adsp->mem_reloc;
+ if (offset < 0 || offset + len > adsp->mem_size)
+ return NULL;
+
+ return adsp->mem_region + offset;
+}
+
+static const struct rproc_ops adsp_ops = {
+ .start = adsp_start,
+ .stop = adsp_stop,
+ .da_to_va = adsp_da_to_va,
+};
+
+static irqreturn_t adsp_wdog_interrupt(int irq, void *dev)
+{
+ struct qcom_adsp *adsp = dev;
+
+ rproc_report_crash(adsp->rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adsp_fatal_interrupt(int irq, void *dev)
+{
+ struct qcom_adsp *adsp = dev;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, ADSP_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(adsp->dev, "fatal error received: %s\n", msg);
+
+ rproc_report_crash(adsp->rproc, RPROC_FATAL_ERROR);
+
+ if (!IS_ERR(msg))
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adsp_ready_interrupt(int irq, void *dev)
+{
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adsp_handover_interrupt(int irq, void *dev)
+{
+ struct qcom_adsp *adsp = dev;
+
+ complete(&adsp->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adsp_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qcom_adsp *adsp = dev;
+
+ complete(&adsp->stop_done);
+
+ return IRQ_HANDLED;
+}
+
+static int adsp_init_clock(struct qcom_adsp *adsp)
+{
+ int ret;
+
+ adsp->xo = devm_clk_get(adsp->dev, "xo");
+ if (IS_ERR(adsp->xo)) {
+ ret = PTR_ERR(adsp->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(adsp->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adsp_init_regulator(struct qcom_adsp *adsp)
+{
+ adsp->cx_supply = devm_regulator_get(adsp->dev, "cx");
+ if (IS_ERR(adsp->cx_supply))
+ return PTR_ERR(adsp->cx_supply);
+
+ regulator_set_load(adsp->cx_supply, 100000);
+
+ return 0;
+}
+
+static int adsp_request_irq(struct qcom_adsp *adsp,
+ struct platform_device *pdev,
+ const char *name,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_ONESHOT,
+ "adsp", adsp);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+
+ return ret;
+}
+
+static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(adsp->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ adsp->mem_phys = adsp->mem_reloc = r.start;
+ adsp->mem_size = resource_size(&r);
+ adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size);
+ if (!adsp->mem_region) {
+ dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, adsp->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int adsp_probe(struct platform_device *pdev)
+{
+ struct qcom_adsp *adsp;
+ struct rproc *rproc;
+ int ret;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ if (!qcom_scm_pas_supported(ADSP_PAS_ID)) {
+ dev_err(&pdev->dev, "PAS is not available for ADSP\n");
+ return -ENXIO;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
+ ADSP_FIRMWARE_NAME, sizeof(*adsp));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &adsp_fw_ops;
+
+ adsp = (struct qcom_adsp *)rproc->priv;
+ adsp->dev = &pdev->dev;
+ adsp->rproc = rproc;
+ platform_set_drvdata(pdev, adsp);
+
+ init_completion(&adsp->start_done);
+ init_completion(&adsp->stop_done);
+
+ ret = adsp_alloc_memory_region(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_init_clock(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_init_regulator(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_request_irq(adsp, pdev, "wdog", adsp_wdog_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->wdog_irq = ret;
+
+ ret = adsp_request_irq(adsp, pdev, "fatal", adsp_fatal_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->fatal_irq = ret;
+
+ ret = adsp_request_irq(adsp, pdev, "ready", adsp_ready_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->ready_irq = ret;
+
+ ret = adsp_request_irq(adsp, pdev, "handover", adsp_handover_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->handover_irq = ret;
+
+ ret = adsp_request_irq(adsp, pdev, "stop-ack", adsp_stop_ack_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->stop_ack_irq = ret;
+
+ adsp->state = qcom_smem_state_get(&pdev->dev, "stop",
+ &adsp->stop_bit);
+ if (IS_ERR(adsp->state)) {
+ ret = PTR_ERR(adsp->state);
+ goto free_rproc;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return 0;
+
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int adsp_remove(struct platform_device *pdev)
+{
+ struct qcom_adsp *adsp = platform_get_drvdata(pdev);
+
+ qcom_smem_state_put(adsp->state);
+ rproc_del(adsp->rproc);
+ rproc_free(adsp->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id adsp_of_match[] = {
+ { .compatible = "qcom,msm8974-adsp-pil" },
+ { .compatible = "qcom,msm8996-adsp-pil" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adsp_of_match);
+
+static struct platform_driver adsp_driver = {
+ .probe = adsp_probe,
+ .remove = adsp_remove,
+ .driver = {
+ .name = "qcom_adsp_pil",
+ .of_match_table = adsp_of_match,
+ },
+};
+
+module_platform_driver(adsp_driver);
+MODULE_DESCRIPTION("Qualcomm MSM8974/MSM8996 ADSP Peripherial Image Loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c
index 114e8e4cef67..2ff18cd6c096 100644
--- a/drivers/remoteproc/qcom_mdt_loader.c
+++ b/drivers/remoteproc/qcom_mdt_loader.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/remoteproc.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include "remoteproc_internal.h"
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 2e0caaaa766a..b08989b48df7 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -894,6 +894,7 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,q6v5-pil", },
{ },
};
+MODULE_DEVICE_TABLE(of, q6v5_of_match);
static struct platform_driver q6v5_driver = {
.probe = q6v5_probe,
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index f5cedeaafba1..ebd61f5d18bb 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -30,6 +30,7 @@
#include <linux/remoteproc.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
+#include <linux/rpmsg/qcom_smd.h>
#include "qcom_mdt_loader.h"
#include "remoteproc_internal.h"
@@ -94,6 +95,10 @@ struct qcom_wcnss {
phys_addr_t mem_reloc;
void *mem_region;
size_t mem_size;
+
+ struct device_node *smd_node;
+ struct qcom_smd_edge *smd_edge;
+ struct rproc_subdev smd_subdev;
};
static const struct wcnss_data riva_data = {
@@ -143,7 +148,6 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
mutex_unlock(&wcnss->iris_lock);
}
-EXPORT_SYMBOL_GPL(qcom_wcnss_assign_iris);
static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
{
@@ -396,6 +400,23 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
+static int wcnss_smd_probe(struct rproc_subdev *subdev)
+{
+ struct qcom_wcnss *wcnss = container_of(subdev, struct qcom_wcnss, smd_subdev);
+
+ wcnss->smd_edge = qcom_smd_register_edge(wcnss->dev, wcnss->smd_node);
+
+ return IS_ERR(wcnss->smd_edge) ? PTR_ERR(wcnss->smd_edge) : 0;
+}
+
+static void wcnss_smd_remove(struct rproc_subdev *subdev)
+{
+ struct qcom_wcnss *wcnss = container_of(subdev, struct qcom_wcnss, smd_subdev);
+
+ qcom_smd_unregister_edge(wcnss->smd_edge);
+ wcnss->smd_edge = NULL;
+}
+
static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
const struct wcnss_vreg_info *info,
int num_vregs)
@@ -578,6 +599,10 @@ static int wcnss_probe(struct platform_device *pdev)
}
}
+ wcnss->smd_node = of_get_child_by_name(pdev->dev.of_node, "smd-edge");
+ if (wcnss->smd_node)
+ rproc_add_subdev(rproc, &wcnss->smd_subdev, wcnss_smd_probe, wcnss_smd_remove);
+
ret = rproc_add(rproc);
if (ret)
goto free_rproc;
@@ -596,6 +621,7 @@ static int wcnss_remove(struct platform_device *pdev)
of_platform_depopulate(&pdev->dev);
+ of_node_put(wcnss->smd_node);
qcom_smem_state_put(wcnss->state);
rproc_del(wcnss->rproc);
rproc_free(wcnss->rproc);
@@ -609,6 +635,7 @@ static const struct of_device_id wcnss_of_match[] = {
{ .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
{ },
};
+MODULE_DEVICE_TABLE(of, wcnss_of_match);
static struct platform_driver wcnss_driver = {
.probe = wcnss_probe,
@@ -619,6 +646,28 @@ static struct platform_driver wcnss_driver = {
},
};
-module_platform_driver(wcnss_driver);
+static int __init wcnss_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&wcnss_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&qcom_iris_driver);
+ if (ret)
+ platform_driver_unregister(&wcnss_driver);
+
+ return ret;
+}
+module_init(wcnss_init);
+
+static void __exit wcnss_exit(void)
+{
+ platform_driver_unregister(&qcom_iris_driver);
+ platform_driver_unregister(&wcnss_driver);
+}
+module_exit(wcnss_exit);
+
MODULE_DESCRIPTION("Qualcomm Peripherial Image Loader for Wireless Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
index 9dc4a9fe41e1..25fb7f62a457 100644
--- a/drivers/remoteproc/qcom_wcnss.h
+++ b/drivers/remoteproc/qcom_wcnss.h
@@ -4,6 +4,8 @@
struct qcom_iris;
struct qcom_wcnss;
+extern struct platform_driver qcom_iris_driver;
+
struct wcnss_vreg_info {
const char * const name;
int min_voltage;
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index f0ca24a8dd0b..e842be58e8c7 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -94,14 +94,12 @@ disable_regulators:
return ret;
}
-EXPORT_SYMBOL_GPL(qcom_iris_enable);
void qcom_iris_disable(struct qcom_iris *iris)
{
clk_disable_unprepare(iris->xo_clk);
regulator_bulk_disable(iris->num_vregs, iris->vregs);
}
-EXPORT_SYMBOL_GPL(qcom_iris_disable);
static int qcom_iris_probe(struct platform_device *pdev)
{
@@ -173,8 +171,9 @@ static const struct of_device_id iris_of_match[] = {
{ .compatible = "qcom,wcn3680", .data = &wcn3680_data },
{}
};
+MODULE_DEVICE_TABLE(of, iris_of_match);
-static struct platform_driver wcnss_driver = {
+struct platform_driver qcom_iris_driver = {
.probe = qcom_iris_probe,
.remove = qcom_iris_remove,
.driver = {
@@ -182,7 +181,3 @@ static struct platform_driver wcnss_driver = {
.of_match_table = iris_of_match,
},
};
-
-module_platform_driver(wcnss_driver);
-MODULE_DESCRIPTION("Qualcomm Wireless Subsystem Iris driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index c6bfb3496684..9a507e77eced 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -236,6 +236,10 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
}
notifyid = ret;
+ /* Potentially bump max_notifyid */
+ if (notifyid > rproc->max_notifyid)
+ rproc->max_notifyid = notifyid;
+
dev_dbg(dev, "vring%d: va %p dma %pad size 0x%x idr %d\n",
i, va, &dma, size, notifyid);
@@ -296,6 +300,20 @@ void rproc_free_vring(struct rproc_vring *rvring)
rsc->vring[idx].notifyid = -1;
}
+static int rproc_vdev_do_probe(struct rproc_subdev *subdev)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+
+ return rproc_add_virtio_dev(rvdev, rvdev->id);
+}
+
+static void rproc_vdev_do_remove(struct rproc_subdev *subdev)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+
+ rproc_remove_virtio_dev(rvdev);
+}
+
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
@@ -356,6 +374,9 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
if (!rvdev)
return -ENOMEM;
+ kref_init(&rvdev->refcount);
+
+ rvdev->id = rsc->id;
rvdev->rproc = rproc;
/* parse the vrings */
@@ -368,22 +389,51 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* remember the resource offset*/
rvdev->rsc_offset = offset;
+ /* allocate the vring resources */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = rproc_alloc_vring(rvdev, i);
+ if (ret)
+ goto unwind_vring_allocations;
+ }
+
+ /* track the rvdevs list reference */
+ kref_get(&rvdev->refcount);
+
list_add_tail(&rvdev->node, &rproc->rvdevs);
- /* it is now safe to add the virtio device */
- ret = rproc_add_virtio_dev(rvdev, rsc->id);
- if (ret)
- goto remove_rvdev;
+ rproc_add_subdev(rproc, &rvdev->subdev,
+ rproc_vdev_do_probe, rproc_vdev_do_remove);
return 0;
-remove_rvdev:
- list_del(&rvdev->node);
+unwind_vring_allocations:
+ for (i--; i >= 0; i--)
+ rproc_free_vring(&rvdev->vring[i]);
free_rvdev:
kfree(rvdev);
return ret;
}
+void rproc_vdev_release(struct kref *ref)
+{
+ struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount);
+ struct rproc_vring *rvring;
+ struct rproc *rproc = rvdev->rproc;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
+ rvring = &rvdev->vring[id];
+ if (!rvring->va)
+ continue;
+
+ rproc_free_vring(rvring);
+ }
+
+ rproc_remove_subdev(rproc, &rvdev->subdev);
+ list_del(&rvdev->node);
+ kfree(rvdev);
+}
+
/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
@@ -673,15 +723,6 @@ free_carv:
return ret;
}
-static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc,
- int offset, int avail)
-{
- /* Summarize the number of notification IDs */
- rproc->max_notifyid += rsc->num_of_vrings;
-
- return 0;
-}
-
/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
@@ -690,10 +731,6 @@ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
[RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
[RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
[RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
- [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
-};
-
-static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = {
[RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
};
@@ -736,6 +773,34 @@ static int rproc_handle_resources(struct rproc *rproc, int len,
return ret;
}
+static int rproc_probe_subdevices(struct rproc *rproc)
+{
+ struct rproc_subdev *subdev;
+ int ret;
+
+ list_for_each_entry(subdev, &rproc->subdevs, node) {
+ ret = subdev->probe(subdev);
+ if (ret)
+ goto unroll_registration;
+ }
+
+ return 0;
+
+unroll_registration:
+ list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node)
+ subdev->remove(subdev);
+
+ return ret;
+}
+
+static void rproc_remove_subdevices(struct rproc *rproc)
+{
+ struct rproc_subdev *subdev;
+
+ list_for_each_entry(subdev, &rproc->subdevs, node)
+ subdev->remove(subdev);
+}
+
/**
* rproc_resource_cleanup() - clean up and free all acquired resources
* @rproc: rproc handle
@@ -782,7 +847,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
/* clean up remote vdev entries */
list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
- rproc_remove_virtio_dev(rvdev);
+ kref_put(&rvdev->refcount, rproc_vdev_release);
}
/*
@@ -824,25 +889,16 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/*
* Create a copy of the resource table. When a virtio device starts
* and calls vring_new_virtqueue() the address of the allocated vring
- * will be stored in the cached_table. Before the device is started,
- * cached_table will be copied into device memory.
+ * will be stored in the table_ptr. Before the device is started,
+ * table_ptr will be copied into device memory.
*/
- rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
- if (!rproc->cached_table)
+ rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL);
+ if (!rproc->table_ptr)
goto clean_up;
- rproc->table_ptr = rproc->cached_table;
-
/* reset max_notifyid */
rproc->max_notifyid = -1;
- /* look for virtio devices and register them */
- ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
- if (ret) {
- dev_err(dev, "Failed to handle vdev resources: %d\n", ret);
- goto clean_up;
- }
-
/* handle fw resources which are required to boot rproc */
ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers);
if (ret) {
@@ -858,18 +914,16 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
}
/*
- * The starting device has been given the rproc->cached_table as the
+ * The starting device has been given the rproc->table_ptr as the
* resource table. The address of the vring along with the other
- * allocated resources (carveouts etc) is stored in cached_table.
+ * allocated resources (carveouts etc) is stored in table_ptr.
* In order to pass this information to the remote device we must copy
* this information to device memory. We also update the table_ptr so
* that any subsequent changes will be applied to the loaded version.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (loaded_table) {
- memcpy(loaded_table, rproc->cached_table, tablesz);
- rproc->table_ptr = loaded_table;
- }
+ if (loaded_table)
+ memcpy(loaded_table, rproc->table_ptr, tablesz);
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -878,17 +932,26 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
goto clean_up_resources;
}
+ /* probe any subdevices for the remote processor */
+ ret = rproc_probe_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto stop_rproc;
+ }
+
rproc->state = RPROC_RUNNING;
dev_info(dev, "remote processor %s is now up\n", rproc->name);
return 0;
+stop_rproc:
+ rproc->ops->stop(rproc);
clean_up_resources:
rproc_resource_cleanup(rproc);
clean_up:
- kfree(rproc->cached_table);
- rproc->cached_table = NULL;
+ kfree(rproc->table_ptr);
rproc->table_ptr = NULL;
rproc_disable_iommu(rproc);
@@ -909,7 +972,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot)
- rproc_boot_nowait(rproc);
+ rproc_boot(rproc);
release_firmware(fw);
/* allow rproc_del() contexts, if any, to proceed */
@@ -1007,7 +1070,6 @@ static void rproc_crash_handler_work(struct work_struct *work)
/**
* __rproc_boot() - boot a remote processor
* @rproc: handle of a remote processor
- * @wait: wait for rproc registration completion
*
* Boot a remote processor (i.e. load its firmware, power it on, ...).
*
@@ -1016,7 +1078,7 @@ static void rproc_crash_handler_work(struct work_struct *work)
*
* Returns 0 on success, and an appropriate error value otherwise.
*/
-static int __rproc_boot(struct rproc *rproc, bool wait)
+static int __rproc_boot(struct rproc *rproc)
{
const struct firmware *firmware_p;
struct device *dev;
@@ -1050,10 +1112,6 @@ static int __rproc_boot(struct rproc *rproc, bool wait)
goto downref_rproc;
}
- /* if rproc virtio is not yet configured, wait */
- if (wait)
- wait_for_completion(&rproc->firmware_loading_complete);
-
ret = rproc_fw_boot(rproc, firmware_p);
release_firmware(firmware_p);
@@ -1072,22 +1130,11 @@ unlock_mutex:
*/
int rproc_boot(struct rproc *rproc)
{
- return __rproc_boot(rproc, true);
+ return __rproc_boot(rproc);
}
EXPORT_SYMBOL(rproc_boot);
/**
- * rproc_boot_nowait() - boot a remote processor
- * @rproc: handle of a remote processor
- *
- * Same as rproc_boot() but don't wait for rproc registration completion
- */
-int rproc_boot_nowait(struct rproc *rproc)
-{
- return __rproc_boot(rproc, false);
-}
-
-/**
* rproc_shutdown() - power off the remote processor
* @rproc: the remote processor
*
@@ -1121,6 +1168,9 @@ void rproc_shutdown(struct rproc *rproc)
if (!atomic_dec_and_test(&rproc->power))
goto out;
+ /* remove any subdevices for the remote processor */
+ rproc_remove_subdevices(rproc);
+
/* power off the remote processor */
ret = rproc->ops->stop(rproc);
if (ret) {
@@ -1135,8 +1185,7 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
/* Free the copy of the resource table */
- kfree(rproc->cached_table);
- rproc->cached_table = NULL;
+ kfree(rproc->table_ptr);
rproc->table_ptr = NULL;
/* if in crash state, unlock crash handler */
@@ -1233,9 +1282,6 @@ int rproc_add(struct rproc *rproc)
dev_info(dev, "%s is available\n", rproc->name);
- dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
- dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");
-
/* create debugfs entries */
rproc_create_debug_dir(rproc);
ret = rproc_add_virtio_devices(rproc);
@@ -1273,6 +1319,7 @@ static void rproc_type_release(struct device *dev)
if (rproc->index >= 0)
ida_simple_remove(&rproc_dev_index, rproc->index);
+ kfree(rproc->firmware);
kfree(rproc);
}
@@ -1310,31 +1357,31 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
{
struct rproc *rproc;
char *p, *template = "rproc-%s-fw";
- int name_len = 0;
+ int name_len;
if (!dev || !name || !ops)
return NULL;
- if (!firmware)
+ if (!firmware) {
/*
- * Make room for default firmware name (minus %s plus '\0').
* If the caller didn't pass in a firmware name then
- * construct a default name. We're already glomming 'len'
- * bytes onto the end of the struct rproc allocation, so do
- * a few more for the default firmware name (but only if
- * the caller doesn't pass one).
+ * construct a default name.
*/
name_len = strlen(name) + strlen(template) - 2 + 1;
-
- rproc = kzalloc(sizeof(*rproc) + len + name_len, GFP_KERNEL);
- if (!rproc)
- return NULL;
-
- if (!firmware) {
- p = (char *)rproc + sizeof(struct rproc) + len;
+ p = kmalloc(name_len, GFP_KERNEL);
+ if (!p)
+ return NULL;
snprintf(p, name_len, template, name);
} else {
- p = (char *)firmware;
+ p = kstrdup(firmware, GFP_KERNEL);
+ if (!p)
+ return NULL;
+ }
+
+ rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
+ if (!rproc) {
+ kfree(p);
+ return NULL;
}
rproc->firmware = p;
@@ -1346,6 +1393,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
device_initialize(&rproc->dev);
rproc->dev.parent = dev;
rproc->dev.type = &rproc_type;
+ rproc->dev.class = &rproc_class;
/* Assign a unique device index and name */
rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
@@ -1370,6 +1418,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
INIT_LIST_HEAD(&rproc->mappings);
INIT_LIST_HEAD(&rproc->traces);
INIT_LIST_HEAD(&rproc->rvdevs);
+ INIT_LIST_HEAD(&rproc->subdevs);
INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
init_completion(&rproc->crash_comp);
@@ -1428,8 +1477,6 @@ EXPORT_SYMBOL(rproc_put);
*/
int rproc_del(struct rproc *rproc)
{
- struct rproc_vdev *rvdev, *tmp;
-
if (!rproc)
return -EINVAL;
@@ -1441,10 +1488,6 @@ int rproc_del(struct rproc *rproc)
if (rproc->auto_boot)
rproc_shutdown(rproc);
- /* clean up remote vdev entries */
- list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
- rproc_remove_virtio_dev(rvdev);
-
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
list_del(&rproc->node);
@@ -1457,6 +1500,36 @@ int rproc_del(struct rproc *rproc)
EXPORT_SYMBOL(rproc_del);
/**
+ * rproc_add_subdev() - add a subdevice to a remoteproc
+ * @rproc: rproc handle to add the subdevice to
+ * @subdev: subdev handle to register
+ * @probe: function to call when the rproc boots
+ * @remove: function to call when the rproc shuts down
+ */
+void rproc_add_subdev(struct rproc *rproc,
+ struct rproc_subdev *subdev,
+ int (*probe)(struct rproc_subdev *subdev),
+ void (*remove)(struct rproc_subdev *subdev))
+{
+ subdev->probe = probe;
+ subdev->remove = remove;
+
+ list_add_tail(&subdev->node, &rproc->subdevs);
+}
+EXPORT_SYMBOL(rproc_add_subdev);
+
+/**
+ * rproc_remove_subdev() - remove a subdevice from a remoteproc
+ * @rproc: rproc handle to remove the subdevice from
+ * @subdev: subdev handle, previously registered with rproc_add_subdev()
+ */
+void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
+{
+ list_del(&subdev->node);
+}
+EXPORT_SYMBOL(rproc_remove_subdev);
+
+/**
* rproc_report_crash() - rproc crash reporter function
* @rproc: remote processor
* @type: crash type
@@ -1484,6 +1557,7 @@ EXPORT_SYMBOL(rproc_report_crash);
static int __init remoteproc_init(void)
{
+ rproc_init_sysfs();
rproc_init_debugfs();
return 0;
@@ -1495,6 +1569,7 @@ static void __exit remoteproc_exit(void)
ida_destroy(&rproc_dev_index);
rproc_exit_debugfs();
+ rproc_exit_sysfs();
}
module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 374797206c79..1c122e230cec 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -59,75 +59,6 @@ static const struct file_operations trace_rproc_ops = {
.llseek = generic_file_llseek,
};
-/*
- * A state-to-string lookup table, for exposing a human readable state
- * via debugfs. Always keep in sync with enum rproc_state
- */
-static const char * const rproc_state_string[] = {
- "offline",
- "suspended",
- "running",
- "crashed",
- "invalid",
-};
-
-/* expose the state of the remote processor via debugfs */
-static ssize_t rproc_state_read(struct file *filp, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct rproc *rproc = filp->private_data;
- unsigned int state;
- char buf[30];
- int i;
-
- state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state;
-
- i = scnprintf(buf, 30, "%.28s (%d)\n", rproc_state_string[state],
- rproc->state);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, i);
-}
-
-static ssize_t rproc_state_write(struct file *filp, const char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct rproc *rproc = filp->private_data;
- char buf[10];
- int ret;
-
- if (count > sizeof(buf) || count <= 0)
- return -EINVAL;
-
- ret = copy_from_user(buf, userbuf, count);
- if (ret)
- return -EFAULT;
-
- if (buf[count - 1] == '\n')
- buf[count - 1] = '\0';
-
- if (!strncmp(buf, "start", count)) {
- ret = rproc_boot(rproc);
- if (ret) {
- dev_err(&rproc->dev, "Boot failed: %d\n", ret);
- return ret;
- }
- } else if (!strncmp(buf, "stop", count)) {
- rproc_shutdown(rproc);
- } else {
- dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations rproc_state_ops = {
- .read = rproc_state_read,
- .write = rproc_state_write,
- .open = simple_open,
- .llseek = generic_file_llseek,
-};
-
/* expose the name of the remote processor via debugfs */
static ssize_t rproc_name_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -265,8 +196,6 @@ void rproc_create_debug_dir(struct rproc *rproc)
debugfs_create_file("name", 0400, rproc->dbg_dir,
rproc, &rproc_name_ops);
- debugfs_create_file("state", 0400, rproc->dbg_dir,
- rproc, &rproc_state_ops);
debugfs_create_file("recovery", 0400, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
}
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 4cf93ca2816e..1e9e5b3f021c 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -49,6 +49,7 @@ struct rproc_fw_ops {
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
int rproc_boot_nowait(struct rproc *rproc);
+void rproc_vdev_release(struct kref *ref);
/* from remoteproc_virtio.c */
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
@@ -63,6 +64,11 @@ void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void);
void rproc_exit_debugfs(void);
+/* from remoteproc_sysfs.c */
+extern struct class rproc_class;
+int rproc_init_sysfs(void);
+void rproc_exit_sysfs(void);
+
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
new file mode 100644
index 000000000000..bc5b0e00efb1
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -0,0 +1,151 @@
+/*
+ * Remote Processor Framework
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+
+#define to_rproc(d) container_of(d, struct rproc, dev)
+
+/* Expose the loaded / running firmware name via sysfs */
+static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ return sprintf(buf, "%s\n", rproc->firmware);
+}
+
+/* Change firmware name via sysfs */
+static ssize_t firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ char *p;
+ int err, len = count;
+
+ err = mutex_lock_interruptible(&rproc->lock);
+ if (err) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, err);
+ return -EINVAL;
+ }
+
+ if (rproc->state != RPROC_OFFLINE) {
+ dev_err(dev, "can't change firmware while running\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ len = strcspn(buf, "\n");
+
+ p = kstrndup(buf, len, GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ kfree(rproc->firmware);
+ rproc->firmware = p;
+out:
+ mutex_unlock(&rproc->lock);
+
+ return err ? err : count;
+}
+static DEVICE_ATTR_RW(firmware);
+
+/*
+ * A state-to-string lookup table, for exposing a human readable state
+ * via sysfs. Always keep in sync with enum rproc_state
+ */
+static const char * const rproc_state_string[] = {
+ [RPROC_OFFLINE] = "offline",
+ [RPROC_SUSPENDED] = "suspended",
+ [RPROC_RUNNING] = "running",
+ [RPROC_CRASHED] = "crashed",
+ [RPROC_LAST] = "invalid",
+};
+
+/* Expose the state of the remote processor via sysfs */
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+ unsigned int state;
+
+ state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state;
+ return sprintf(buf, "%s\n", rproc_state_string[state]);
+}
+
+/* Change remote processor state via sysfs */
+static ssize_t state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ int ret = 0;
+
+ if (sysfs_streq(buf, "start")) {
+ if (rproc->state == RPROC_RUNNING)
+ return -EBUSY;
+
+ ret = rproc_boot(rproc);
+ if (ret)
+ dev_err(&rproc->dev, "Boot failed: %d\n", ret);
+ } else if (sysfs_streq(buf, "stop")) {
+ if (rproc->state != RPROC_RUNNING)
+ return -EINVAL;
+
+ rproc_shutdown(rproc);
+ } else {
+ dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
+ ret = -EINVAL;
+ }
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(state);
+
+static struct attribute *rproc_attrs[] = {
+ &dev_attr_firmware.attr,
+ &dev_attr_state.attr,
+ NULL
+};
+
+static const struct attribute_group rproc_devgroup = {
+ .attrs = rproc_attrs
+};
+
+static const struct attribute_group *rproc_devgroups[] = {
+ &rproc_devgroup,
+ NULL
+};
+
+struct class rproc_class = {
+ .name = "remoteproc",
+ .dev_groups = rproc_devgroups,
+};
+
+int __init rproc_init_sysfs(void)
+{
+ /* create remoteproc device class for sysfs */
+ int err = class_register(&rproc_class);
+
+ if (err)
+ pr_err("remoteproc: unable to register class\n");
+ return err;
+}
+
+void __exit rproc_exit_sysfs(void)
+{
+ class_unregister(&rproc_class);
+}
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 01870a16d6d2..364411fb7734 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -79,7 +79,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct rproc_vring *rvring;
struct virtqueue *vq;
void *addr;
- int len, size, ret;
+ int len, size;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
@@ -88,10 +88,6 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
if (!name)
return NULL;
- ret = rproc_alloc_vring(rvdev, id);
- if (ret)
- return ERR_PTR(ret);
-
rvring = &rvdev->vring[id];
addr = rvring->va;
len = rvring->len;
@@ -130,7 +126,6 @@ static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
rvring = vq->priv;
rvring->vq = NULL;
vring_del_virtqueue(vq);
- rproc_free_vring(rvring);
}
}
@@ -282,14 +277,13 @@ static const struct virtio_config_ops rproc_virtio_config_ops = {
* Never call this function directly; it will be called by the driver
* core when needed.
*/
-static void rproc_vdev_release(struct device *dev)
+static void rproc_virtio_dev_release(struct device *dev)
{
struct virtio_device *vdev = dev_to_virtio(dev);
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
- list_del(&rvdev->node);
- kfree(rvdev);
+ kref_put(&rvdev->refcount, rproc_vdev_release);
put_device(&rproc->dev);
}
@@ -313,7 +307,7 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
vdev->id.device = id,
vdev->config = &rproc_virtio_config_ops,
vdev->dev.parent = dev;
- vdev->dev.release = rproc_vdev_release;
+ vdev->dev.release = rproc_virtio_dev_release;
/*
* We're indirectly making a non-temporary copy of the rproc pointer
@@ -325,6 +319,9 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
*/
get_device(&rproc->dev);
+ /* Reference the vdev and vring allocations */
+ kref_get(&rvdev->refcount);
+
ret = register_virtio_device(vdev);
if (ret) {
put_device(&rproc->dev);
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index ae8963fcc8c8..da4e152e9733 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -245,8 +245,10 @@ static int st_rproc_probe(struct platform_device *pdev)
goto free_rproc;
enabled = st_rproc_state(pdev);
- if (enabled < 0)
+ if (enabled < 0) {
+ ret = enabled;
goto free_rproc;
+ }
if (enabled) {
atomic_inc(&rproc->power);
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
new file mode 100644
index 000000000000..507716c8721f
--- /dev/null
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -0,0 +1,364 @@
+/*
+ * SLIM core rproc driver
+ *
+ * Copyright (C) 2016 STMicroelectronics
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/st_slim_rproc.h>
+#include "remoteproc_internal.h"
+
+/* SLIM core registers */
+#define SLIM_ID_OFST 0x0
+#define SLIM_VER_OFST 0x4
+
+#define SLIM_EN_OFST 0x8
+#define SLIM_EN_RUN BIT(0)
+
+#define SLIM_CLK_GATE_OFST 0xC
+#define SLIM_CLK_GATE_DIS BIT(0)
+#define SLIM_CLK_GATE_RESET BIT(2)
+
+#define SLIM_SLIM_PC_OFST 0x20
+
+/* DMEM registers */
+#define SLIM_REV_ID_OFST 0x0
+#define SLIM_REV_ID_MIN_MASK GENMASK(15, 8)
+#define SLIM_REV_ID_MIN(id) ((id & SLIM_REV_ID_MIN_MASK) >> 8)
+#define SLIM_REV_ID_MAJ_MASK GENMASK(23, 16)
+#define SLIM_REV_ID_MAJ(id) ((id & SLIM_REV_ID_MAJ_MASK) >> 16)
+
+
+/* peripherals registers */
+#define SLIM_STBUS_SYNC_OFST 0xF88
+#define SLIM_STBUS_SYNC_DIS BIT(0)
+
+#define SLIM_INT_SET_OFST 0xFD4
+#define SLIM_INT_CLR_OFST 0xFD8
+#define SLIM_INT_MASK_OFST 0xFDC
+
+#define SLIM_CMD_CLR_OFST 0xFC8
+#define SLIM_CMD_MASK_OFST 0xFCC
+
+static const char *mem_names[ST_SLIM_MEM_MAX] = {
+ [ST_SLIM_DMEM] = "dmem",
+ [ST_SLIM_IMEM] = "imem",
+};
+
+static int slim_clk_get(struct st_slim_rproc *slim_rproc, struct device *dev)
+{
+ int clk, err;
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK; clk++) {
+ slim_rproc->clks[clk] = of_clk_get(dev->of_node, clk);
+ if (IS_ERR(slim_rproc->clks[clk])) {
+ err = PTR_ERR(slim_rproc->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ slim_rproc->clks[clk] = NULL;
+ break;
+ }
+ }
+
+ return 0;
+
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(slim_rproc->clks[clk]);
+
+ return err;
+}
+
+static void slim_clk_disable(struct st_slim_rproc *slim_rproc)
+{
+ int clk;
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
+ clk_disable_unprepare(slim_rproc->clks[clk]);
+}
+
+static int slim_clk_enable(struct st_slim_rproc *slim_rproc)
+{
+ int clk, ret;
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) {
+ ret = clk_prepare_enable(slim_rproc->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ return 0;
+
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(slim_rproc->clks[clk]);
+
+ return ret;
+}
+
+/*
+ * Remoteproc slim specific device handlers
+ */
+static int slim_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct st_slim_rproc *slim_rproc = rproc->priv;
+ unsigned long hw_id, hw_ver, fw_rev;
+ u32 val;
+
+ /* disable CPU pipeline clock & reset CPU pipeline */
+ val = SLIM_CLK_GATE_DIS | SLIM_CLK_GATE_RESET;
+ writel(val, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
+
+ /* disable SLIM core STBus sync */
+ writel(SLIM_STBUS_SYNC_DIS, slim_rproc->peri + SLIM_STBUS_SYNC_OFST);
+
+ /* enable cpu pipeline clock */
+ writel(!SLIM_CLK_GATE_DIS,
+ slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
+
+ /* clear int & cmd mailbox */
+ writel(~0U, slim_rproc->peri + SLIM_INT_CLR_OFST);
+ writel(~0U, slim_rproc->peri + SLIM_CMD_CLR_OFST);
+
+ /* enable all channels cmd & int */
+ writel(~0U, slim_rproc->peri + SLIM_INT_MASK_OFST);
+ writel(~0U, slim_rproc->peri + SLIM_CMD_MASK_OFST);
+
+ /* enable cpu */
+ writel(SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
+
+ hw_id = readl_relaxed(slim_rproc->slimcore + SLIM_ID_OFST);
+ hw_ver = readl_relaxed(slim_rproc->slimcore + SLIM_VER_OFST);
+
+ fw_rev = readl(slim_rproc->mem[ST_SLIM_DMEM].cpu_addr +
+ SLIM_REV_ID_OFST);
+
+ dev_info(dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n",
+ SLIM_REV_ID_MAJ(fw_rev), SLIM_REV_ID_MIN(fw_rev),
+ hw_id, hw_ver);
+
+ return 0;
+}
+
+static int slim_rproc_stop(struct rproc *rproc)
+{
+ struct st_slim_rproc *slim_rproc = rproc->priv;
+ u32 val;
+
+ /* mask all (cmd & int) channels */
+ writel(0UL, slim_rproc->peri + SLIM_INT_MASK_OFST);
+ writel(0UL, slim_rproc->peri + SLIM_CMD_MASK_OFST);
+
+ /* disable cpu pipeline clock */
+ writel(SLIM_CLK_GATE_DIS, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
+
+ writel(!SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
+
+ val = readl(slim_rproc->slimcore + SLIM_EN_OFST);
+ if (val & SLIM_EN_RUN)
+ dev_warn(&rproc->dev, "Failed to disable SLIM");
+
+ dev_dbg(&rproc->dev, "slim stopped\n");
+
+ return 0;
+}
+
+static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct st_slim_rproc *slim_rproc = rproc->priv;
+ void *va = NULL;
+ int i;
+
+ for (i = 0; i < ST_SLIM_MEM_MAX; i++) {
+ if (da != slim_rproc->mem[i].bus_addr)
+ continue;
+
+ if (len <= slim_rproc->mem[i].size) {
+ /* __force to make sparse happy with type conversion */
+ va = (__force void *)slim_rproc->mem[i].cpu_addr;
+ break;
+ }
+ }
+
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va);
+
+ return va;
+}
+
+static struct rproc_ops slim_rproc_ops = {
+ .start = slim_rproc_start,
+ .stop = slim_rproc_stop,
+ .da_to_va = slim_rproc_da_to_va,
+};
+
+/*
+ * Firmware handler operations: sanity, boot address, load ...
+ */
+
+static struct resource_table empty_rsc_tbl = {
+ .ver = 1,
+ .num = 0,
+};
+
+static struct resource_table *slim_rproc_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz)
+{
+ *tablesz = sizeof(empty_rsc_tbl);
+ return &empty_rsc_tbl;
+}
+
+static struct rproc_fw_ops slim_rproc_fw_ops = {
+ .find_rsc_table = slim_rproc_find_rsc_table,
+};
+
+/**
+ * st_slim_rproc_alloc() - allocate and initialise slim rproc
+ * @pdev: Pointer to the platform_device struct
+ * @fw_name: Name of firmware for rproc to use
+ *
+ * Function for allocating and initialising a slim rproc for use by
+ * device drivers whose IP is based around the SLIM core. It
+ * obtains and enables any clocks required by the SLIM core and also
+ * ioremaps the various IO.
+ *
+ * Returns st_slim_rproc pointer or PTR_ERR() on error.
+ */
+
+struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
+ char *fw_name)
+{
+ struct device *dev = &pdev->dev;
+ struct st_slim_rproc *slim_rproc;
+ struct device_node *np = dev->of_node;
+ struct rproc *rproc;
+ struct resource *res;
+ int err, i;
+ const struct rproc_fw_ops *elf_ops;
+
+ if (!fw_name)
+ return ERR_PTR(-EINVAL);
+
+ if (!of_device_is_compatible(np, "st,slim-rproc"))
+ return ERR_PTR(-EINVAL);
+
+ rproc = rproc_alloc(dev, np->name, &slim_rproc_ops,
+ fw_name, sizeof(*slim_rproc));
+ if (!rproc)
+ return ERR_PTR(-ENOMEM);
+
+ rproc->has_iommu = false;
+
+ slim_rproc = rproc->priv;
+ slim_rproc->rproc = rproc;
+
+ elf_ops = rproc->fw_ops;
+ /* Use some generic elf ops */
+ slim_rproc_fw_ops.load = elf_ops->load;
+ slim_rproc_fw_ops.sanity_check = elf_ops->sanity_check;
+
+ rproc->fw_ops = &slim_rproc_fw_ops;
+
+ /* get imem and dmem */
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+
+ slim_rproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(slim_rproc->mem[i].cpu_addr)) {
+ dev_err(&pdev->dev, "devm_ioremap_resource failed\n");
+ err = PTR_ERR(slim_rproc->mem[i].cpu_addr);
+ goto err;
+ }
+ slim_rproc->mem[i].bus_addr = res->start;
+ slim_rproc->mem[i].size = resource_size(res);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore");
+ slim_rproc->slimcore = devm_ioremap_resource(dev, res);
+ if (IS_ERR(slim_rproc->slimcore)) {
+ dev_err(&pdev->dev, "failed to ioremap slimcore IO\n");
+ err = PTR_ERR(slim_rproc->slimcore);
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals");
+ slim_rproc->peri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(slim_rproc->peri)) {
+ dev_err(&pdev->dev, "failed to ioremap peripherals IO\n");
+ err = PTR_ERR(slim_rproc->peri);
+ goto err;
+ }
+
+ err = slim_clk_get(slim_rproc, dev);
+ if (err)
+ goto err;
+
+ err = slim_clk_enable(slim_rproc);
+ if (err) {
+ dev_err(dev, "Failed to enable clocks\n");
+ goto err_clk_put;
+ }
+
+ /* Register as a remoteproc device */
+ err = rproc_add(rproc);
+ if (err) {
+ dev_err(dev, "registration of slim remoteproc failed\n");
+ goto err_clk_dis;
+ }
+
+ return slim_rproc;
+
+err_clk_dis:
+ slim_clk_disable(slim_rproc);
+err_clk_put:
+ for (i = 0; i < ST_SLIM_MAX_CLK && slim_rproc->clks[i]; i++)
+ clk_put(slim_rproc->clks[i]);
+err:
+ rproc_free(rproc);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(st_slim_rproc_alloc);
+
+/**
+ * st_slim_rproc_put() - put slim rproc resources
+ * @slim_rproc: Pointer to the st_slim_rproc struct
+ *
+ * Function for calling respective _put() functions on slim_rproc resources.
+ *
+ */
+void st_slim_rproc_put(struct st_slim_rproc *slim_rproc)
+{
+ int clk;
+
+ if (!slim_rproc)
+ return;
+
+ slim_clk_disable(slim_rproc);
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
+ clk_put(slim_rproc->clks[clk]);
+
+ rproc_del(slim_rproc->rproc);
+ rproc_free(slim_rproc->rproc);
+}
+EXPORT_SYMBOL(st_slim_rproc_put);
+
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_DESCRIPTION("STMicroelectronics SLIM core rproc driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
deleted file mode 100644
index 03d69a9a3c5b..000000000000
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2012
- * Author: Sjur Brændeland <sjur.brandeland@stericsson.com>
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <linux/remoteproc.h>
-#include <linux/ste_modem_shm.h>
-#include "remoteproc_internal.h"
-
-#define SPROC_FW_SIZE (50 * 4096)
-#define SPROC_MAX_TOC_ENTRIES 32
-#define SPROC_MAX_NOTIFY_ID 14
-#define SPROC_RESOURCE_NAME "rsc-table"
-#define SPROC_MODEM_NAME "ste-modem"
-#define SPROC_MODEM_FIRMWARE SPROC_MODEM_NAME "-fw.bin"
-
-#define sproc_dbg(sproc, fmt, ...) \
- dev_dbg(&sproc->mdev->pdev.dev, fmt, ##__VA_ARGS__)
-#define sproc_err(sproc, fmt, ...) \
- dev_err(&sproc->mdev->pdev.dev, fmt, ##__VA_ARGS__)
-
-/* STE-modem control structure */
-struct sproc {
- struct rproc *rproc;
- struct ste_modem_device *mdev;
- int error;
- void *fw_addr;
- size_t fw_size;
- dma_addr_t fw_dma_addr;
-};
-
-/* STE-Modem firmware entry */
-struct ste_toc_entry {
- __le32 start;
- __le32 size;
- __le32 flags;
- __le32 entry_point;
- __le32 load_addr;
- char name[12];
-};
-
-/*
- * The Table Of Content is located at the start of the firmware image and
- * at offset zero in the shared memory region. The resource table typically
- * contains the initial boot image (boot strap) and other information elements
- * such as remoteproc resource table. Each entry is identified by a unique
- * name.
- */
-struct ste_toc {
- struct ste_toc_entry table[SPROC_MAX_TOC_ENTRIES];
-};
-
-/* Loads the firmware to shared memory. */
-static int sproc_load_segments(struct rproc *rproc, const struct firmware *fw)
-{
- struct sproc *sproc = rproc->priv;
-
- memcpy(sproc->fw_addr, fw->data, fw->size);
-
- return 0;
-}
-
-/* Find the entry for resource table in the Table of Content */
-static const struct ste_toc_entry *sproc_find_rsc_entry(const void *data)
-{
- int i;
- const struct ste_toc *toc = data;
-
- /* Search the table for the resource table */
- for (i = 0; i < SPROC_MAX_TOC_ENTRIES &&
- toc->table[i].start != 0xffffffff; i++) {
- if (!strncmp(toc->table[i].name, SPROC_RESOURCE_NAME,
- sizeof(toc->table[i].name)))
- return &toc->table[i];
- }
-
- return NULL;
-}
-
-/* Find the resource table inside the remote processor's firmware. */
-static struct resource_table *
-sproc_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
- int *tablesz)
-{
- struct sproc *sproc = rproc->priv;
- struct resource_table *table;
- const struct ste_toc_entry *entry;
-
- if (!fw)
- return NULL;
-
- entry = sproc_find_rsc_entry(fw->data);
- if (!entry) {
- sproc_err(sproc, "resource table not found in fw\n");
- return NULL;
- }
-
- table = (void *)(fw->data + entry->start);
-
- /* sanity check size and offset of resource table */
- if (entry->start > SPROC_FW_SIZE ||
- entry->size > SPROC_FW_SIZE ||
- fw->size > SPROC_FW_SIZE ||
- entry->start + entry->size > fw->size ||
- sizeof(struct resource_table) > entry->size) {
- sproc_err(sproc, "bad size of fw or resource table\n");
- return NULL;
- }
-
- /* we don't support any version beyond the first */
- if (table->ver != 1) {
- sproc_err(sproc, "unsupported fw ver: %d\n", table->ver);
- return NULL;
- }
-
- /* make sure reserved bytes are zeroes */
- if (table->reserved[0] || table->reserved[1]) {
- sproc_err(sproc, "non zero reserved bytes\n");
- return NULL;
- }
-
- /* make sure the offsets array isn't truncated */
- if (table->num > SPROC_MAX_TOC_ENTRIES ||
- table->num * sizeof(table->offset[0]) +
- sizeof(struct resource_table) > entry->size) {
- sproc_err(sproc, "resource table incomplete\n");
- return NULL;
- }
-
- /* If the fw size has grown, release the previous fw allocation */
- if (SPROC_FW_SIZE < fw->size) {
- sproc_err(sproc, "Insufficient space for fw (%d < %zd)\n",
- SPROC_FW_SIZE, fw->size);
- return NULL;
- }
-
- sproc->fw_size = fw->size;
- *tablesz = entry->size;
-
- return table;
-}
-
-/* Find the resource table inside the remote processor's firmware. */
-static struct resource_table *
-sproc_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
-{
- struct sproc *sproc = rproc->priv;
- const struct ste_toc_entry *entry;
-
- if (!fw || !sproc->fw_addr)
- return NULL;
-
- entry = sproc_find_rsc_entry(sproc->fw_addr);
- if (!entry) {
- sproc_err(sproc, "resource table not found in fw\n");
- return NULL;
- }
-
- return sproc->fw_addr + entry->start;
-}
-
-/* STE modem firmware handler operations */
-static const struct rproc_fw_ops sproc_fw_ops = {
- .load = sproc_load_segments,
- .find_rsc_table = sproc_find_rsc_table,
- .find_loaded_rsc_table = sproc_find_loaded_rsc_table,
-};
-
-/* Kick the modem with specified notification id */
-static void sproc_kick(struct rproc *rproc, int vqid)
-{
- struct sproc *sproc = rproc->priv;
-
- sproc_dbg(sproc, "kick vqid:%d\n", vqid);
-
- /*
- * We need different notification IDs for RX and TX so add
- * an offset on TX notification IDs.
- */
- sproc->mdev->ops.kick(sproc->mdev, vqid + SPROC_MAX_NOTIFY_ID);
-}
-
-/* Received a kick from a modem, kick the virtqueue */
-static void sproc_kick_callback(struct ste_modem_device *mdev, int vqid)
-{
- struct sproc *sproc = mdev->drv_data;
-
- if (rproc_vq_interrupt(sproc->rproc, vqid) == IRQ_NONE)
- sproc_dbg(sproc, "no message was found in vqid %d\n", vqid);
-}
-
-static struct ste_modem_dev_cb sproc_dev_cb = {
- .kick = sproc_kick_callback,
-};
-
-/* Start the STE modem */
-static int sproc_start(struct rproc *rproc)
-{
- struct sproc *sproc = rproc->priv;
- int i, err;
-
- sproc_dbg(sproc, "start ste-modem\n");
-
- /* Sanity test the max_notifyid */
- if (rproc->max_notifyid > SPROC_MAX_NOTIFY_ID) {
- sproc_err(sproc, "Notification IDs too high:%d\n",
- rproc->max_notifyid);
- return -EINVAL;
- }
-
- /* Subscribe to notifications */
- for (i = 0; i <= rproc->max_notifyid; i++) {
- err = sproc->mdev->ops.kick_subscribe(sproc->mdev, i);
- if (err) {
- sproc_err(sproc,
- "subscription of kicks failed:%d\n", err);
- return err;
- }
- }
-
- /* Request modem start-up*/
- return sproc->mdev->ops.power(sproc->mdev, true);
-}
-
-/* Stop the STE modem */
-static int sproc_stop(struct rproc *rproc)
-{
- struct sproc *sproc = rproc->priv;
-
- sproc_dbg(sproc, "stop ste-modem\n");
-
- return sproc->mdev->ops.power(sproc->mdev, false);
-}
-
-static struct rproc_ops sproc_ops = {
- .start = sproc_start,
- .stop = sproc_stop,
- .kick = sproc_kick,
-};
-
-/* STE modem device is unregistered */
-static int sproc_drv_remove(struct platform_device *pdev)
-{
- struct ste_modem_device *mdev =
- container_of(pdev, struct ste_modem_device, pdev);
- struct sproc *sproc = mdev->drv_data;
-
- sproc_dbg(sproc, "remove ste-modem\n");
-
- /* Reset device callback functions */
- sproc->mdev->ops.setup(sproc->mdev, NULL);
-
- /* Unregister as remoteproc device */
- rproc_del(sproc->rproc);
- dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE,
- sproc->fw_addr, sproc->fw_dma_addr);
- rproc_free(sproc->rproc);
-
- mdev->drv_data = NULL;
-
- return 0;
-}
-
-/* Handle probe of a modem device */
-static int sproc_probe(struct platform_device *pdev)
-{
- struct ste_modem_device *mdev =
- container_of(pdev, struct ste_modem_device, pdev);
- struct sproc *sproc;
- struct rproc *rproc;
- int err;
-
- dev_dbg(&mdev->pdev.dev, "probe ste-modem\n");
-
- if (!mdev->ops.setup || !mdev->ops.kick || !mdev->ops.kick_subscribe ||
- !mdev->ops.power) {
- dev_err(&mdev->pdev.dev, "invalid mdev ops\n");
- return -EINVAL;
- }
-
- rproc = rproc_alloc(&mdev->pdev.dev, mdev->pdev.name, &sproc_ops,
- SPROC_MODEM_FIRMWARE, sizeof(*sproc));
- if (!rproc)
- return -ENOMEM;
-
- sproc = rproc->priv;
- sproc->mdev = mdev;
- sproc->rproc = rproc;
- rproc->has_iommu = false;
- mdev->drv_data = sproc;
-
- /* Provide callback functions to modem device */
- sproc->mdev->ops.setup(sproc->mdev, &sproc_dev_cb);
-
- /* Set the STE-modem specific firmware handler */
- rproc->fw_ops = &sproc_fw_ops;
-
- /*
- * STE-modem requires the firmware to be located
- * at the start of the shared memory region. So we need to
- * reserve space for firmware at the start.
- */
- sproc->fw_addr = dma_alloc_coherent(rproc->dev.parent, SPROC_FW_SIZE,
- &sproc->fw_dma_addr,
- GFP_KERNEL);
- if (!sproc->fw_addr) {
- sproc_err(sproc, "Cannot allocate memory for fw\n");
- err = -ENOMEM;
- goto free_rproc;
- }
-
- /* Register as a remoteproc device */
- err = rproc_add(rproc);
- if (err)
- goto free_mem;
-
- return 0;
-
-free_mem:
- dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE,
- sproc->fw_addr, sproc->fw_dma_addr);
-free_rproc:
- /* Reset device data upon error */
- mdev->drv_data = NULL;
- rproc_free(rproc);
- return err;
-}
-
-static struct platform_driver sproc_driver = {
- .driver = {
- .name = SPROC_MODEM_NAME,
- },
- .probe = sproc_probe,
- .remove = sproc_drv_remove,
-};
-
-module_platform_driver(sproc_driver);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("STE Modem driver using the Remote Processor Framework");
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 06d9fa2f3bc0..172dc966a01f 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -94,5 +94,6 @@ config RESET_ZYNQ
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
+source "drivers/reset/tegra/Kconfig"
endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index bbe7026617fc..13b346e03d84 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,6 +1,7 @@
obj-y += core.o
obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index b8ae1dbd4c17..10368ed8fd13 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -32,6 +32,9 @@ static LIST_HEAD(reset_controller_list);
* @refcnt: Number of gets of this reset_control
* @shared: Is this a shared (1), or an exclusive (0) reset_control?
* @deassert_cnt: Number of times this reset line has been deasserted
+ * @triggered_count: Number of times this reset line has been reset. Currently
+ * only used for shared resets, which means that the value
+ * will be either 0 or 1.
*/
struct reset_control {
struct reset_controller_dev *rcdev;
@@ -40,6 +43,7 @@ struct reset_control {
unsigned int refcnt;
int shared;
atomic_t deassert_count;
+ atomic_t triggered_count;
};
/**
@@ -134,18 +138,35 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
* reset_control_reset - reset the controlled device
* @rstc: reset controller
*
- * Calling this on a shared reset controller is an error.
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance: for all but the first caller this is
+ * a no-op.
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset has been used.
*/
int reset_control_reset(struct reset_control *rstc)
{
- if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
- WARN_ON(rstc->shared))
+ int ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
return -EINVAL;
- if (rstc->rcdev->ops->reset)
- return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (!rstc->rcdev->ops->reset)
+ return -ENOTSUPP;
- return -ENOTSUPP;
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+
+ if (atomic_inc_return(&rstc->triggered_count) != 1)
+ return 0;
+ }
+
+ ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (rstc->shared && !ret)
+ atomic_dec(&rstc->triggered_count);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(reset_control_reset);
@@ -159,6 +180,8 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
*
* For shared reset controls a driver cannot expect the hw's registers and
* internal state to be reset, but must be prepared for this to happen.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_assert(struct reset_control *rstc)
{
@@ -169,6 +192,9 @@ int reset_control_assert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
return -EINVAL;
@@ -185,6 +211,8 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
* @rstc: reset controller
*
* After calling this function, the reset is guaranteed to be deasserted.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_deassert(struct reset_control *rstc)
{
@@ -195,6 +223,9 @@ int reset_control_deassert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (atomic_inc_return(&rstc->deassert_count) != 1)
return 0;
}
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 369f3917fd8e..371197bbd055 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -1,6 +1,8 @@
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
+ * Marvell Berlin reset driver
+ *
* Antoine Tenart <antoine.tenart@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
*
@@ -12,7 +14,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -91,7 +93,6 @@ static const struct of_device_id berlin_reset_dt_match[] = {
{ .compatible = "marvell,berlin2-reset" },
{ },
};
-MODULE_DEVICE_TABLE(of, berlin_reset_dt_match);
static struct platform_driver berlin_reset_driver = {
.probe = berlin2_reset_probe,
@@ -100,9 +101,4 @@ static struct platform_driver berlin_reset_driver = {
.of_match_table = berlin_reset_dt_match,
},
};
-module_platform_driver(berlin_reset_driver);
-
-MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
-MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
-MODULE_DESCRIPTION("Marvell Berlin reset driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin_reset_driver);
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 54cca0055171..a62ad52e262b 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -13,7 +13,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -218,39 +218,17 @@ dis_clk_reg:
return ret;
}
-static int lpc18xx_rgu_remove(struct platform_device *pdev)
-{
- struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
- int ret;
-
- ret = unregister_restart_handler(&rc->restart_nb);
- if (ret)
- dev_warn(&pdev->dev, "failed to unregister restart handler\n");
-
- reset_controller_unregister(&rc->rcdev);
-
- clk_disable_unprepare(rc->clk_delay);
- clk_disable_unprepare(rc->clk_reg);
-
- return 0;
-}
-
static const struct of_device_id lpc18xx_rgu_match[] = {
{ .compatible = "nxp,lpc1850-rgu" },
{ }
};
-MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match);
static struct platform_driver lpc18xx_rgu_driver = {
.probe = lpc18xx_rgu_probe,
- .remove = lpc18xx_rgu_remove,
.driver = {
- .name = "lpc18xx-reset",
- .of_match_table = lpc18xx_rgu_match,
+ .name = "lpc18xx-reset",
+ .of_match_table = lpc18xx_rgu_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(lpc18xx_rgu_driver);
-
-MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
-MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(lpc18xx_rgu_driver);
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index 944980572f79..0d9036dea010 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -80,6 +80,7 @@ static const struct reset_control_ops oxnas_reset_ops = {
static const struct of_device_id oxnas_reset_dt_ids[] = {
{ .compatible = "oxsemi,ox810se-reset", },
+ { .compatible = "oxsemi,ox820-reset", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 78ebf8424375..43e4a9f39b9b 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -1,4 +1,6 @@
/*
+ * Socfpga Reset Controller Driver
+ *
* Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* based on
@@ -16,7 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
@@ -148,8 +150,4 @@ static struct platform_driver socfpga_reset_driver = {
.of_match_table = socfpga_reset_dt_ids,
},
};
-module_platform_driver(socfpga_reset_driver);
-
-MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
-MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(socfpga_reset_driver);
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 3080190b3f90..b44f6b5f87b6 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -142,7 +142,6 @@ static const struct of_device_id sunxi_reset_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-clock-reset", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
static int sunxi_reset_probe(struct platform_device *pdev)
{
@@ -175,8 +174,4 @@ static struct platform_driver sunxi_reset_driver = {
.of_match_table = sunxi_reset_dt_ids,
},
};
-module_platform_driver(sunxi_reset_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sunxi_reset_driver);
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index 138f2f205662..87a4e355578f 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -3,6 +3,8 @@
*
* Xilinx Zynq Reset controller driver
*
+ * Author: Moritz Fischer <moritz.fischer@ettus.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
@@ -15,7 +17,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -137,8 +139,4 @@ static struct platform_driver zynq_reset_driver = {
.of_match_table = zynq_reset_dt_ids,
},
};
-module_platform_driver(zynq_reset_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
-MODULE_DESCRIPTION("Zynq Reset Controller Driver");
+builtin_platform_driver(zynq_reset_driver);
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index 613178553612..71592b5bfd14 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -3,14 +3,6 @@ if ARCH_STI
config STI_RESET_SYSCFG
bool
-config STIH415_RESET
- bool
- select STI_RESET_SYSCFG
-
-config STIH416_RESET
- bool
- select STI_RESET_SYSCFG
-
config STIH407_RESET
bool
select STI_RESET_SYSCFG
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
index dc85dfbe56a9..f9d82411f29e 100644
--- a/drivers/reset/sti/Makefile
+++ b/drivers/reset/sti/Makefile
@@ -1,5 +1,3 @@
obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
-obj-$(CONFIG_STIH415_RESET) += reset-stih415.o
-obj-$(CONFIG_STIH416_RESET) += reset-stih416.o
obj-$(CONFIG_STIH407_RESET) += reset-stih407.o
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
deleted file mode 100644
index 6f220cdbef46..000000000000
--- a/drivers/reset/sti/reset-stih415.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih415-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH415 Peripheral powerdown definitions.
- */
-static const char stih415_front[] = "st,stih415-front-syscfg";
-static const char stih415_rear[] = "st,stih415-rear-syscfg";
-static const char stih415_sbc[] = "st,stih415-sbc-syscfg";
-static const char stih415_lpm[] = "st,stih415-lpm-syscfg";
-
-#define STIH415_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit)
-
-#define STIH415_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat)
-
-#define STIH415_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit)
-
-#define STIH415_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit)
-
-#define STIH415_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit)
-
-#define STIH415_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit)
-
-#define SYSCFG_114 0x38 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_187 0x15c /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_336 0x90 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_384 0x150 /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_376 0x130 /* Reset generator 0 control 0 */
-#define SYSCFG_166 0x108 /* Softreset Ethernet 0 */
-#define SYSCFG_31 0x7c /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-
-static const struct syscfg_reset_channel_data stih415_powerdowns[] = {
- [STIH415_EMISS_POWERDOWN] = STIH415_PDN_FRONT(0),
- [STIH415_NAND_POWERDOWN] = STIH415_PDN_FRONT(1),
- [STIH415_KEYSCAN_POWERDOWN] = STIH415_PDN_FRONT(2),
- [STIH415_USB0_POWERDOWN] = STIH415_PDN_REAR(0, 0),
- [STIH415_USB1_POWERDOWN] = STIH415_PDN_REAR(1, 1),
- [STIH415_USB2_POWERDOWN] = STIH415_PDN_REAR(2, 2),
- [STIH415_SATA0_POWERDOWN] = STIH415_PDN_REAR(3, 3),
- [STIH415_SATA1_POWERDOWN] = STIH415_PDN_REAR(4, 4),
- [STIH415_PCIE_POWERDOWN] = STIH415_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih415_softresets[] = {
- [STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0),
- [STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0),
- [STIH415_IRB_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9),
- [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10),
- [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11),
- [STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih415_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih415_powerdowns),
- .channels = stih415_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih415_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih415_softresets),
- .channels = stih415_softresets,
-};
-
-static const struct of_device_id stih415_reset_match[] = {
- { .compatible = "st,stih415-powerdown",
- .data = &stih415_powerdown_controller, },
- { .compatible = "st,stih415-softreset",
- .data = &stih415_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih415_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih415",
- .of_match_table = stih415_reset_match,
- },
-};
-
-static int __init stih415_reset_init(void)
-{
- return platform_driver_register(&stih415_reset_driver);
-}
-arch_initcall(stih415_reset_init);
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
deleted file mode 100644
index c581d606ef0f..000000000000
--- a/drivers/reset/sti/reset-stih416.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih416-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH416 Peripheral powerdown definitions.
- */
-static const char stih416_front[] = "st,stih416-front-syscfg";
-static const char stih416_rear[] = "st,stih416-rear-syscfg";
-static const char stih416_sbc[] = "st,stih416-sbc-syscfg";
-static const char stih416_lpm[] = "st,stih416-lpm-syscfg";
-static const char stih416_cpu[] = "st,stih416-cpu-syscfg";
-
-#define STIH416_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit)
-
-#define STIH416_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat)
-
-#define SYSCFG_1500 0x7d0 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_1578 0x908 /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_2525 0x834 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_2583 0x91c /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_2552 0x8A0 /* Reset Generator control 0 */
-#define SYSCFG_1539 0x86c /* Softreset Ethernet 0 */
-#define SYSCFG_510 0x7f8 /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-#define SYSCFG_2553 0x8a4 /* Softreset SATA0/1, PCIE0/1 */
-#define SYSCFG_7563 0x8cc /* MPE softresets 0 */
-#define SYSCFG_7564 0x8d0 /* MPE softresets 1 */
-
-#define STIH416_SRST_CPU(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit)
-
-#define STIH416_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit)
-
-#define STIH416_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit)
-
-#define STIH416_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit)
-
-#define STIH416_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit)
-
-static const struct syscfg_reset_channel_data stih416_powerdowns[] = {
- [STIH416_EMISS_POWERDOWN] = STIH416_PDN_FRONT(0),
- [STIH416_NAND_POWERDOWN] = STIH416_PDN_FRONT(1),
- [STIH416_KEYSCAN_POWERDOWN] = STIH416_PDN_FRONT(2),
- [STIH416_USB0_POWERDOWN] = STIH416_PDN_REAR(0, 0),
- [STIH416_USB1_POWERDOWN] = STIH416_PDN_REAR(1, 1),
- [STIH416_USB2_POWERDOWN] = STIH416_PDN_REAR(2, 2),
- [STIH416_USB3_POWERDOWN] = STIH416_PDN_REAR(6, 5),
- [STIH416_SATA0_POWERDOWN] = STIH416_PDN_REAR(3, 3),
- [STIH416_SATA1_POWERDOWN] = STIH416_PDN_REAR(4, 4),
- [STIH416_PCIE0_POWERDOWN] = STIH416_PDN_REAR(7, 9),
- [STIH416_PCIE1_POWERDOWN] = STIH416_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih416_softresets[] = {
- [STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0),
- [STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0),
- [STIH416_IRB_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9),
- [STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10),
- [STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11),
- [STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28),
- [STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7),
- [STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3),
- [STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15),
- [STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2),
- [STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14),
- [STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5),
- [STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25),
- [STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26),
- [STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5),
- [STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6),
- [STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10),
- [STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11),
- [STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18),
- [STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19),
- [STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21),
- [STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23),
- [STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2),
- [STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3),
- [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4),
- [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10),
- [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16),
- [STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih416_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih416_powerdowns),
- .channels = stih416_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih416_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih416_softresets),
- .channels = stih416_softresets,
-};
-
-static const struct of_device_id stih416_reset_match[] = {
- { .compatible = "st,stih416-powerdown",
- .data = &stih416_powerdown_controller, },
- { .compatible = "st,stih416-softreset",
- .data = &stih416_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih416_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih416",
- .of_match_table = stih416_reset_match,
- },
-};
-
-static int __init stih416_reset_init(void)
-{
- return platform_driver_register(&stih416_reset_driver);
-}
-arch_initcall(stih416_reset_init);
diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig
new file mode 100644
index 000000000000..d2afa293df7d
--- /dev/null
+++ b/drivers/reset/tegra/Kconfig
@@ -0,0 +1,2 @@
+config RESET_TEGRA_BPMP
+ def_bool TEGRA_BPMP
diff --git a/drivers/reset/tegra/Makefile b/drivers/reset/tegra/Makefile
new file mode 100644
index 000000000000..775243ab7383
--- /dev/null
+++ b/drivers/reset/tegra/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RESET_TEGRA_BPMP) += reset-bpmp.o
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
new file mode 100644
index 000000000000..5daf2ee1a396
--- /dev/null
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/reset-controller.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+static struct tegra_bpmp *to_tegra_bpmp(struct reset_controller_dev *rstc)
+{
+ return container_of(rstc, struct tegra_bpmp, rstc);
+}
+
+static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
+ enum mrq_reset_commands command,
+ unsigned int id)
+{
+ struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
+ struct mrq_reset_request request;
+ struct tegra_bpmp_message msg;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = command;
+ request.reset_id = id;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_RESET;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_MODULE, id);
+}
+
+static int tegra_bpmp_reset_assert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_ASSERT, id);
+}
+
+static int tegra_bpmp_reset_deassert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_DEASSERT, id);
+}
+
+static const struct reset_control_ops tegra_bpmp_reset_ops = {
+ .reset = tegra_bpmp_reset_module,
+ .assert = tegra_bpmp_reset_assert,
+ .deassert = tegra_bpmp_reset_deassert,
+};
+
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+ bpmp->rstc.ops = &tegra_bpmp_reset_ops;
+ bpmp->rstc.owner = THIS_MODULE;
+ bpmp->rstc.of_node = bpmp->dev->of_node;
+ bpmp->rstc.nr_resets = bpmp->soc->num_resets;
+
+ return devm_reset_controller_register(bpmp->dev, &bpmp->rstc);
+}
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 06fef2b4c814..0fae48116a0d 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -25,6 +25,7 @@
#include <linux/soc/qcom/smem.h>
#include <linux/wait.h>
#include <linux/rpmsg.h>
+#include <linux/rpmsg/qcom_smd.h>
#include "rpmsg_internal.h"
@@ -739,7 +740,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
while (qcom_smd_get_tx_avail(channel) < tlen) {
if (!wait) {
- ret = -ENOMEM;
+ ret = -EAGAIN;
goto out;
}
@@ -820,20 +821,13 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
struct qcom_smd_channel *channel;
struct qcom_smd_channel *ret = NULL;
unsigned long flags;
- unsigned state;
spin_lock_irqsave(&edge->channels_lock, flags);
list_for_each_entry(channel, &edge->channels, list) {
- if (strcmp(channel->name, name))
- continue;
-
- state = GET_RX_CHANNEL_INFO(channel, state);
- if (state != SMD_CHANNEL_OPENING &&
- state != SMD_CHANNEL_OPENED)
- continue;
-
- ret = channel;
- break;
+ if (!strcmp(channel->name, name)) {
+ ret = channel;
+ break;
+ }
}
spin_unlock_irqrestore(&edge->channels_lock, flags);
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index b6ea9ffa7381..a79cb5a9e5f2 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -71,6 +71,9 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo)
{
+ if (WARN_ON(!rpdev))
+ return ERR_PTR(-EINVAL);
+
return rpdev->ops->create_ept(rpdev, cb, priv, chinfo);
}
EXPORT_SYMBOL(rpmsg_create_ept);
@@ -80,11 +83,13 @@ EXPORT_SYMBOL(rpmsg_create_ept);
* @ept: endpoing to destroy
*
* Should be used by drivers to destroy an rpmsg endpoint previously
- * created with rpmsg_create_ept().
+ * created with rpmsg_create_ept(). As with other types of "free" NULL
+ * is a valid parameter.
*/
void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
{
- ept->ops->destroy_ept(ept);
+ if (ept)
+ ept->ops->destroy_ept(ept);
}
EXPORT_SYMBOL(rpmsg_destroy_ept);
@@ -108,6 +113,11 @@ EXPORT_SYMBOL(rpmsg_destroy_ept);
*/
int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->send)
+ return -ENXIO;
+
return ept->ops->send(ept, data, len);
}
EXPORT_SYMBOL(rpmsg_send);
@@ -132,6 +142,11 @@ EXPORT_SYMBOL(rpmsg_send);
*/
int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->sendto)
+ return -ENXIO;
+
return ept->ops->sendto(ept, data, len, dst);
}
EXPORT_SYMBOL(rpmsg_sendto);
@@ -159,6 +174,11 @@ EXPORT_SYMBOL(rpmsg_sendto);
int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len)
{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->send_offchannel)
+ return -ENXIO;
+
return ept->ops->send_offchannel(ept, src, dst, data, len);
}
EXPORT_SYMBOL(rpmsg_send_offchannel);
@@ -182,6 +202,11 @@ EXPORT_SYMBOL(rpmsg_send_offchannel);
*/
int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->trysend)
+ return -ENXIO;
+
return ept->ops->trysend(ept, data, len);
}
EXPORT_SYMBOL(rpmsg_trysend);
@@ -205,6 +230,11 @@ EXPORT_SYMBOL(rpmsg_trysend);
*/
int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->trysendto)
+ return -ENXIO;
+
return ept->ops->trysendto(ept, data, len, dst);
}
EXPORT_SYMBOL(rpmsg_trysendto);
@@ -231,6 +261,11 @@ EXPORT_SYMBOL(rpmsg_trysendto);
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len)
{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->trysend_offchannel)
+ return -ENXIO;
+
return ept->ops->trysend_offchannel(ept, src, dst, data, len);
}
EXPORT_SYMBOL(rpmsg_trysend_offchannel);
@@ -315,6 +350,9 @@ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
const struct rpmsg_device_id *ids = rpdrv->id_table;
unsigned int i;
+ if (rpdev->driver_override)
+ return !strcmp(rpdev->driver_override, drv->name);
+
if (ids)
for (i = 0; ids[i].name[0]; i++)
if (rpmsg_id_match(rpdev, &ids[i]))
@@ -344,27 +382,30 @@ static int rpmsg_dev_probe(struct device *dev)
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
struct rpmsg_channel_info chinfo = {};
- struct rpmsg_endpoint *ept;
+ struct rpmsg_endpoint *ept = NULL;
int err;
- strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
- chinfo.src = rpdev->src;
- chinfo.dst = RPMSG_ADDR_ANY;
+ if (rpdrv->callback) {
+ strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ chinfo.src = rpdev->src;
+ chinfo.dst = RPMSG_ADDR_ANY;
- ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, chinfo);
- if (!ept) {
- dev_err(dev, "failed to create endpoint\n");
- err = -ENOMEM;
- goto out;
- }
+ ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, chinfo);
+ if (!ept) {
+ dev_err(dev, "failed to create endpoint\n");
+ err = -ENOMEM;
+ goto out;
+ }
- rpdev->ept = ept;
- rpdev->src = ept->addr;
+ rpdev->ept = ept;
+ rpdev->src = ept->addr;
+ }
err = rpdrv->probe(rpdev);
if (err) {
dev_err(dev, "%s: failed: %d\n", __func__, err);
- rpmsg_destroy_ept(ept);
+ if (ept)
+ rpmsg_destroy_ept(ept);
goto out;
}
@@ -385,7 +426,8 @@ static int rpmsg_dev_remove(struct device *dev)
rpdrv->remove(rpdev);
- rpmsg_destroy_ept(rpdev->ept);
+ if (rpdev->ept)
+ rpmsg_destroy_ept(rpdev->ept);
return err;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e859d148aba9..c93c5a8fba32 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -303,7 +303,7 @@ config RTC_DRV_MAX6900
config RTC_DRV_MAX8907
tristate "Maxim MAX8907"
- depends on MFD_MAX8907
+ depends on MFD_MAX8907 || COMPILE_TEST
help
If you say yes here you will get support for the
RTC of Maxim MAX8907 PMIC.
@@ -343,7 +343,7 @@ config RTC_DRV_MAX8997
config RTC_DRV_MAX77686
tristate "Maxim MAX77686"
- depends on MFD_MAX77686 || MFD_MAX77620
+ depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST
help
If you say yes here you will get support for the
RTC of Maxim MAX77686/MAX77620/MAX77802 PMIC.
@@ -481,6 +481,7 @@ config RTC_DRV_TWL92330
config RTC_DRV_TWL4030
tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0"
depends on TWL4030_CORE
+ depends on OF
help
If you say yes here you get support for the RTC on the
TWL4030/TWL5030/TWL6030 family chips, used mostly with OMAP3 platforms.
@@ -602,7 +603,8 @@ config RTC_DRV_RV8803
config RTC_DRV_S5M
tristate "Samsung S2M/S5M series"
- depends on MFD_SEC_CORE
+ depends on MFD_SEC_CORE || COMPILE_TEST
+ select REGMAP_IRQ
help
If you say yes here you will get support for the
RTC of Samsung S2MPS14 and S5M PMIC series.
@@ -820,8 +822,8 @@ config RTC_DRV_RV3029_HWMON
comment "Platform RTC drivers"
-# this 'CMOS' RTC driver is arch dependent because <asm-generic/rtc.h>
-# requires <asm/mc146818rtc.h> defining CMOS_READ/CMOS_WRITE, and a
+# this 'CMOS' RTC driver is arch dependent because it requires
+# <asm/mc146818rtc.h> defining CMOS_READ/CMOS_WRITE, and a
# global rtc_lock ... it's not yet just another platform_device.
config RTC_DRV_CMOS
@@ -1549,14 +1551,11 @@ config RTC_DRV_MPC5121
will be called rtc-mpc5121.
config RTC_DRV_JZ4740
- tristate "Ingenic JZ4740 SoC"
- depends on MACH_JZ4740 || COMPILE_TEST
+ bool "Ingenic JZ4740 SoC"
+ depends on MACH_INGENIC || COMPILE_TEST
help
- If you say yes here you get support for the Ingenic JZ4740 SoC RTC
- controller.
-
- This driver can also be buillt as a module. If so, the module
- will be called rtc-jz4740.
+ If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
+ controllers.
config RTC_DRV_LPC24XX
tristate "NXP RTC for LPC178x/18xx/408x/43xx"
@@ -1567,7 +1566,7 @@ config RTC_DRV_LPC24XX
NXP LPC178x/18xx/408x/43xx devices.
If you have one of the devices above enable this driver to use
- the hardware RTC. This driver can also be buillt as a module. If
+ the hardware RTC. This driver can also be built as a module. If
so, the module will be called rtc-lpc24xx.
config RTC_DRV_LPC32XX
@@ -1576,7 +1575,7 @@ config RTC_DRV_LPC32XX
help
This enables support for the NXP RTC in the LPC32XX
- This driver can also be buillt as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called rtc-lpc32xx.
config RTC_DRV_PM8XXX
@@ -1706,6 +1705,17 @@ config RTC_DRV_PIC32
This driver can also be built as a module. If so, the module
will be called rtc-pic32
+config RTC_DRV_R7301
+ tristate "EPSON TOYOCOM RTC-7301SF/DG"
+ select REGMAP_MMIO
+ depends on OF && HAS_IOMEM
+ help
+ If you say yes here you get support for the EPSON TOYOCOM
+ RTC-7301SF/DG chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-r7301.
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 1ac694a330c8..f13ab1c5c222 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -120,6 +120,7 @@ obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
+obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-rc5t583.o
obj-$(CONFIG_RTC_DRV_RK808) += rtc-rk808.o
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7030d7cd3861..f4a96dbdabf2 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -191,6 +191,13 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
static int cmos_read_time(struct device *dev, struct rtc_time *t)
{
+ /*
+ * If pm_trace abused the RTC for storage, set the timespec to 0,
+ * which tells the caller that this RTC value is unusable.
+ */
+ if (!pm_trace_rtc_valid())
+ return -EIO;
+
/* REVISIT: if the clock has a "century" register, use
* that instead of the heuristic in mc146818_get_time().
* That'll make Y3K compatility (year > 2070) easy!
@@ -325,14 +332,86 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
cmos_checkintr(cmos, rtc_control);
}
+static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_time now;
+
+ cmos_read_time(dev, &now);
+
+ if (!cmos->day_alrm) {
+ time64_t t_max_date;
+ time64_t t_alrm;
+
+ t_max_date = rtc_tm_to_time64(&now);
+ t_max_date += 24 * 60 * 60 - 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one day in the future\n");
+ return -EINVAL;
+ }
+ } else if (!cmos->mon_alrm) {
+ struct rtc_time max_date = now;
+ time64_t t_max_date;
+ time64_t t_alrm;
+ int max_mday;
+
+ if (max_date.tm_mon == 11) {
+ max_date.tm_mon = 0;
+ max_date.tm_year += 1;
+ } else {
+ max_date.tm_mon += 1;
+ }
+ max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year);
+ if (max_date.tm_mday > max_mday)
+ max_date.tm_mday = max_mday;
+
+ t_max_date = rtc_tm_to_time64(&max_date);
+ t_max_date -= 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one month in the future\n");
+ return -EINVAL;
+ }
+ } else {
+ struct rtc_time max_date = now;
+ time64_t t_max_date;
+ time64_t t_alrm;
+ int max_mday;
+
+ max_date.tm_year += 1;
+ max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year);
+ if (max_date.tm_mday > max_mday)
+ max_date.tm_mday = max_mday;
+
+ t_max_date = rtc_tm_to_time64(&max_date);
+ t_max_date -= 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one year in the future\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char mon, mday, hrs, min, sec, rtc_control;
+ int ret;
if (!is_valid_irq(cmos->irq))
return -EIO;
+ ret = cmos_validate_alarm(dev, t);
+ if (ret < 0)
+ return ret;
+
mon = t->time.tm_mon + 1;
mday = t->time.tm_mday;
hrs = t->time.tm_hour;
@@ -700,9 +779,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
spin_unlock_irq(&rtc_lock);
- /* FIXME:
- * <asm-generic/rtc.h> doesn't know 12-hour mode either.
- */
if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) {
dev_warn(dev, "only 24-hr supported\n");
retval = -ENXIO;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4e31036ee259..4ad97be48043 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -191,6 +192,26 @@ static const struct i2c_device_id ds1307_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ds1307_acpi_ids[] = {
+ { .id = "DS1307", .driver_data = ds_1307 },
+ { .id = "DS1337", .driver_data = ds_1337 },
+ { .id = "DS1338", .driver_data = ds_1338 },
+ { .id = "DS1339", .driver_data = ds_1339 },
+ { .id = "DS1388", .driver_data = ds_1388 },
+ { .id = "DS1340", .driver_data = ds_1340 },
+ { .id = "DS3231", .driver_data = ds_3231 },
+ { .id = "M41T00", .driver_data = m41t00 },
+ { .id = "MCP7940X", .driver_data = mcp794xx },
+ { .id = "MCP7941X", .driver_data = mcp794xx },
+ { .id = "PT7C4338", .driver_data = ds_1307 },
+ { .id = "RX8025", .driver_data = rx_8025 },
+ { .id = "ISL12057", .driver_data = ds_1337 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
+#endif
+
/*----------------------------------------------------------------------*/
#define BLOCK_DATA_MAX_TRIES 10
@@ -874,17 +895,17 @@ static u8 do_trickle_setup_ds1339(struct i2c_client *client,
return setup;
}
-static void ds1307_trickle_of_init(struct i2c_client *client,
- struct chip_desc *chip)
+static void ds1307_trickle_init(struct i2c_client *client,
+ struct chip_desc *chip)
{
uint32_t ohms = 0;
bool diode = true;
if (!chip->do_trickle_setup)
goto out;
- if (of_property_read_u32(client->dev.of_node, "trickle-resistor-ohms" , &ohms))
+ if (device_property_read_u32(&client->dev, "trickle-resistor-ohms", &ohms))
goto out;
- if (of_property_read_bool(client->dev.of_node, "trickle-diode-disable"))
+ if (device_property_read_bool(&client->dev, "trickle-diode-disable"))
diode = false;
chip->trickle_charger_setup = chip->do_trickle_setup(client,
ohms, diode);
@@ -1268,7 +1289,7 @@ static int ds1307_probe(struct i2c_client *client,
struct ds1307 *ds1307;
int err = -ENODEV;
int tmp, wday;
- struct chip_desc *chip = &chips[id->driver_data];
+ struct chip_desc *chip;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
bool want_irq = false;
bool ds1307_can_wakeup_device = false;
@@ -1297,11 +1318,23 @@ static int ds1307_probe(struct i2c_client *client,
i2c_set_clientdata(client, ds1307);
ds1307->client = client;
- ds1307->type = id->driver_data;
+ if (id) {
+ chip = &chips[id->driver_data];
+ ds1307->type = id->driver_data;
+ } else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(ACPI_PTR(ds1307_acpi_ids),
+ &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ chip = &chips[acpi_id->driver_data];
+ ds1307->type = acpi_id->driver_data;
+ }
- if (!pdata && client->dev.of_node)
- ds1307_trickle_of_init(client, chip);
- else if (pdata && pdata->trickle_charger_setup)
+ if (!pdata)
+ ds1307_trickle_init(client, chip);
+ else if (pdata->trickle_charger_setup)
chip->trickle_charger_setup = pdata->trickle_charger_setup;
if (chip->trickle_charger_setup && chip->trickle_charger_reg) {
@@ -1678,6 +1711,7 @@ static int ds1307_remove(struct i2c_client *client)
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
+ .acpi_match_table = ACPI_PTR(ds1307_acpi_ids),
},
.probe = ds1307_probe,
.remove = ds1307_remove,
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 3b3049c8c9e0..52429f0a57cc 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -89,10 +89,8 @@ static int ds1374_read_rtc(struct i2c_client *client, u32 *time,
int ret;
int i;
- if (nbytes > 4) {
- WARN_ON(1);
+ if (WARN_ON(nbytes > 4))
return -EINVAL;
- }
ret = i2c_smbus_read_i2c_block_data(client, reg, nbytes, buf);
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 8d8049bdfaf6..67b56b80dc70 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -67,7 +67,7 @@
#define DSR_ETAD (1 << 21) /* External tamper A detected */
#define DSR_EBD (1 << 20) /* External boot detected */
#define DSR_SAD (1 << 19) /* SCC alarm detected */
-#define DSR_TTD (1 << 18) /* Temperatur tamper detected */
+#define DSR_TTD (1 << 18) /* Temperature tamper detected */
#define DSR_CTD (1 << 17) /* Clock tamper detected */
#define DSR_VTD (1 << 16) /* Voltage tamper detected */
#define DSR_WBF (1 << 10) /* Write Busy Flag (synchronous) */
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 5e14651b71a8..72918c1ba092 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -14,10 +14,12 @@
*
*/
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -27,8 +29,14 @@
#define JZ_REG_RTC_SEC_ALARM 0x08
#define JZ_REG_RTC_REGULATOR 0x0C
#define JZ_REG_RTC_HIBERNATE 0x20
+#define JZ_REG_RTC_WAKEUP_FILTER 0x24
+#define JZ_REG_RTC_RESET_COUNTER 0x28
#define JZ_REG_RTC_SCRATCHPAD 0x34
+/* The following are present on the jz4780 */
+#define JZ_REG_RTC_WENR 0x3C
+#define JZ_RTC_WENR_WEN BIT(31)
+
#define JZ_RTC_CTRL_WRDY BIT(7)
#define JZ_RTC_CTRL_1HZ BIT(6)
#define JZ_RTC_CTRL_1HZ_IRQ BIT(5)
@@ -37,16 +45,34 @@
#define JZ_RTC_CTRL_AE BIT(2)
#define JZ_RTC_CTRL_ENABLE BIT(0)
+/* Magic value to enable writes on jz4780 */
+#define JZ_RTC_WENR_MAGIC 0xA55A
+
+#define JZ_RTC_WAKEUP_FILTER_MASK 0x0000FFE0
+#define JZ_RTC_RESET_COUNTER_MASK 0x00000FE0
+
+enum jz4740_rtc_type {
+ ID_JZ4740,
+ ID_JZ4780,
+};
+
struct jz4740_rtc {
void __iomem *base;
+ enum jz4740_rtc_type type;
struct rtc_device *rtc;
+ struct clk *clk;
int irq;
spinlock_t lock;
+
+ unsigned int min_wakeup_pin_assert_time;
+ unsigned int reset_pin_assert_time;
};
+static struct device *dev_for_power_off;
+
static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
{
return readl(rtc->base + reg);
@@ -64,11 +90,33 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
return timeout ? 0 : -EIO;
}
+static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
+{
+ uint32_t ctrl;
+ int ret, timeout = 1000;
+
+ ret = jz4740_rtc_wait_write_ready(rtc);
+ if (ret != 0)
+ return ret;
+
+ writel(JZ_RTC_WENR_MAGIC, rtc->base + JZ_REG_RTC_WENR);
+
+ do {
+ ctrl = readl(rtc->base + JZ_REG_RTC_WENR);
+ } while (!(ctrl & JZ_RTC_WENR_WEN) && --timeout);
+
+ return timeout ? 0 : -EIO;
+}
+
static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg,
uint32_t val)
{
- int ret;
- ret = jz4740_rtc_wait_write_ready(rtc);
+ int ret = 0;
+
+ if (rtc->type >= ID_JZ4780)
+ ret = jz4780_rtc_enable_write(rtc);
+ if (ret == 0)
+ ret = jz4740_rtc_wait_write_ready(rtc);
if (ret == 0)
writel(val, rtc->base + reg);
@@ -203,12 +251,57 @@ static irqreturn_t jz4740_rtc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-void jz4740_rtc_poweroff(struct device *dev)
+static void jz4740_rtc_poweroff(struct device *dev)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_HIBERNATE, 1);
}
-EXPORT_SYMBOL_GPL(jz4740_rtc_poweroff);
+
+static void jz4740_rtc_power_off(void)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev_for_power_off);
+ unsigned long rtc_rate;
+ unsigned long wakeup_filter_ticks;
+ unsigned long reset_counter_ticks;
+
+ clk_prepare_enable(rtc->clk);
+
+ rtc_rate = clk_get_rate(rtc->clk);
+
+ /*
+ * Set minimum wakeup pin assertion time: 100 ms.
+ * Range is 0 to 2 sec if RTC is clocked at 32 kHz.
+ */
+ wakeup_filter_ticks =
+ (rtc->min_wakeup_pin_assert_time * rtc_rate) / 1000;
+ if (wakeup_filter_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
+ wakeup_filter_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
+ else
+ wakeup_filter_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
+ jz4740_rtc_reg_write(rtc,
+ JZ_REG_RTC_WAKEUP_FILTER, wakeup_filter_ticks);
+
+ /*
+ * Set reset pin low-level assertion time after wakeup: 60 ms.
+ * Range is 0 to 125 ms if RTC is clocked at 32 kHz.
+ */
+ reset_counter_ticks = (rtc->reset_pin_assert_time * rtc_rate) / 1000;
+ if (reset_counter_ticks < JZ_RTC_RESET_COUNTER_MASK)
+ reset_counter_ticks &= JZ_RTC_RESET_COUNTER_MASK;
+ else
+ reset_counter_ticks = JZ_RTC_RESET_COUNTER_MASK;
+ jz4740_rtc_reg_write(rtc,
+ JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
+
+ jz4740_rtc_poweroff(dev_for_power_off);
+ machine_halt();
+}
+
+static const struct of_device_id jz4740_rtc_of_match[] = {
+ { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
+ { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
+ {},
+};
static int jz4740_rtc_probe(struct platform_device *pdev)
{
@@ -216,11 +309,20 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
struct jz4740_rtc *rtc;
uint32_t scratchpad;
struct resource *mem;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ const struct of_device_id *of_id = of_match_device(
+ jz4740_rtc_of_match, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
+ if (of_id)
+ rtc->type = (enum jz4740_rtc_type)of_id->data;
+ else
+ rtc->type = id->driver_data;
+
rtc->irq = platform_get_irq(pdev, 0);
if (rtc->irq < 0) {
dev_err(&pdev->dev, "Failed to get platform irq\n");
@@ -232,6 +334,12 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
+ rtc->clk = devm_clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(rtc->clk)) {
+ dev_err(&pdev->dev, "Failed to get RTC clock\n");
+ return PTR_ERR(rtc->clk);
+ }
+
spin_lock_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
@@ -263,6 +371,27 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
}
}
+ if (np && of_device_is_system_power_controller(np)) {
+ if (!pm_power_off) {
+ /* Default: 60ms */
+ rtc->reset_pin_assert_time = 60;
+ of_property_read_u32(np, "reset-pin-assert-time-ms",
+ &rtc->reset_pin_assert_time);
+
+ /* Default: 100ms */
+ rtc->min_wakeup_pin_assert_time = 100;
+ of_property_read_u32(np,
+ "min-wakeup-pin-assert-time-ms",
+ &rtc->min_wakeup_pin_assert_time);
+
+ dev_for_power_off = &pdev->dev;
+ pm_power_off = jz4740_rtc_power_off;
+ } else {
+ dev_warn(&pdev->dev,
+ "Poweroff handler already present!\n");
+ }
+ }
+
return 0;
}
@@ -295,17 +424,20 @@ static const struct dev_pm_ops jz4740_pm_ops = {
#define JZ4740_RTC_PM_OPS NULL
#endif /* CONFIG_PM */
+static const struct platform_device_id jz4740_rtc_ids[] = {
+ { "jz4740-rtc", ID_JZ4740 },
+ { "jz4780-rtc", ID_JZ4780 },
+ {}
+};
+
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.driver = {
.name = "jz4740-rtc",
.pm = JZ4740_RTC_PM_OPS,
+ .of_match_table = of_match_ptr(jz4740_rtc_of_match),
},
+ .id_table = jz4740_rtc_ids,
};
-module_platform_driver(jz4740_rtc_driver);
-
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
-MODULE_ALIAS("platform:jz4740-rtc");
+builtin_platform_driver(jz4740_rtc_driver);
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index e6bfb9c42a10..1ae7da5cfc60 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -11,7 +11,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/rtc.h>
static const unsigned char rtc_days_in_month[] = {
@@ -148,5 +148,3 @@ struct rtc_time rtc_ktime_to_tm(ktime_t kt)
return ret;
}
EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 4021fd04cb0a..ce75e421ba00 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -12,7 +12,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * */
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -21,6 +21,8 @@
#include <linux/spi/spi.h>
#include <linux/rtc.h>
#include <linux/of.h>
+#include <linux/bcd.h>
+#include <linux/delay.h>
/* MCP795 Instructions, see datasheet table 3-1 */
#define MCP795_EEREAD 0x03
@@ -29,7 +31,7 @@
#define MCP795_EEWREN 0x06
#define MCP795_SRREAD 0x05
#define MCP795_SRWRITE 0x01
-#define MCP795_READ 0x13
+#define MCP795_READ 0x13
#define MCP795_WRITE 0x12
#define MCP795_UNLOCK 0x14
#define MCP795_IDWRITE 0x32
@@ -37,8 +39,17 @@
#define MCP795_CLRWDT 0x44
#define MCP795_CLRRAM 0x54
-#define MCP795_ST_BIT 0x80
-#define MCP795_24_BIT 0x40
+/* MCP795 RTCC registers, see datasheet table 4-1 */
+#define MCP795_REG_SECONDS 0x01
+#define MCP795_REG_DAY 0x04
+#define MCP795_REG_MONTH 0x06
+#define MCP795_REG_CONTROL 0x08
+
+#define MCP795_ST_BIT BIT(7)
+#define MCP795_24_BIT BIT(6)
+#define MCP795_LP_BIT BIT(5)
+#define MCP795_EXTOSC_BIT BIT(3)
+#define MCP795_OSCON_BIT BIT(5)
static int mcp795_rtcc_read(struct device *dev, u8 addr, u8 *buf, u8 count)
{
@@ -93,30 +104,97 @@ static int mcp795_rtcc_set_bits(struct device *dev, u8 addr, u8 mask, u8 state)
return ret;
}
+static int mcp795_stop_oscillator(struct device *dev, bool *extosc)
+{
+ int retries = 5;
+ int ret;
+ u8 data;
+
+ ret = mcp795_rtcc_set_bits(dev, MCP795_REG_SECONDS, MCP795_ST_BIT, 0);
+ if (ret)
+ return ret;
+ ret = mcp795_rtcc_read(dev, MCP795_REG_CONTROL, &data, 1);
+ if (ret)
+ return ret;
+ *extosc = !!(data & MCP795_EXTOSC_BIT);
+ ret = mcp795_rtcc_set_bits(
+ dev, MCP795_REG_CONTROL, MCP795_EXTOSC_BIT, 0);
+ if (ret)
+ return ret;
+ /* wait for the OSCON bit to clear */
+ do {
+ usleep_range(700, 800);
+ ret = mcp795_rtcc_read(dev, MCP795_REG_DAY, &data, 1);
+ if (ret)
+ break;
+ if (!(data & MCP795_OSCON_BIT))
+ break;
+
+ } while (--retries);
+
+ return !retries ? -EIO : ret;
+}
+
+static int mcp795_start_oscillator(struct device *dev, bool *extosc)
+{
+ if (extosc) {
+ u8 data = *extosc ? MCP795_EXTOSC_BIT : 0;
+ int ret;
+
+ ret = mcp795_rtcc_set_bits(
+ dev, MCP795_REG_CONTROL, MCP795_EXTOSC_BIT, data);
+ if (ret)
+ return ret;
+ }
+ return mcp795_rtcc_set_bits(
+ dev, MCP795_REG_SECONDS, MCP795_ST_BIT, MCP795_ST_BIT);
+}
+
static int mcp795_set_time(struct device *dev, struct rtc_time *tim)
{
int ret;
u8 data[7];
+ bool extosc;
+
+ /* Stop RTC and store current value of EXTOSC bit */
+ ret = mcp795_stop_oscillator(dev, &extosc);
+ if (ret)
+ return ret;
/* Read first, so we can leave config bits untouched */
- ret = mcp795_rtcc_read(dev, 0x01, data, sizeof(data));
+ ret = mcp795_rtcc_read(dev, MCP795_REG_SECONDS, data, sizeof(data));
if (ret)
return ret;
- data[0] = (data[0] & 0x80) | ((tim->tm_sec / 10) << 4) | (tim->tm_sec % 10);
- data[1] = (data[1] & 0x80) | ((tim->tm_min / 10) << 4) | (tim->tm_min % 10);
- data[2] = ((tim->tm_hour / 10) << 4) | (tim->tm_hour % 10);
- data[4] = ((tim->tm_mday / 10) << 4) | ((tim->tm_mday) % 10);
- data[5] = (data[5] & 0x10) | (tim->tm_mon / 10) | (tim->tm_mon % 10);
+ data[0] = (data[0] & 0x80) | bin2bcd(tim->tm_sec);
+ data[1] = (data[1] & 0x80) | bin2bcd(tim->tm_min);
+ data[2] = bin2bcd(tim->tm_hour);
+ data[4] = bin2bcd(tim->tm_mday);
+ data[5] = (data[5] & MCP795_LP_BIT) | bin2bcd(tim->tm_mon + 1);
if (tim->tm_year > 100)
tim->tm_year -= 100;
- data[6] = ((tim->tm_year / 10) << 4) | (tim->tm_year % 10);
+ data[6] = bin2bcd(tim->tm_year);
+
+ /* Always write the date and month using a separate Write command.
+ * This is a workaround for a know silicon issue that some combinations
+ * of date and month values may result in the date being reset to 1.
+ */
+ ret = mcp795_rtcc_write(dev, MCP795_REG_SECONDS, data, 5);
+ if (ret)
+ return ret;
- ret = mcp795_rtcc_write(dev, 0x01, data, sizeof(data));
+ ret = mcp795_rtcc_write(dev, MCP795_REG_MONTH, &data[5], 2);
+ if (ret)
+ return ret;
+ /* Start back RTC and restore previous value of EXTOSC bit.
+ * There is no need to clear EXTOSC bit when the previous value was 0
+ * because it was already cleared when stopping the RTC oscillator.
+ */
+ ret = mcp795_start_oscillator(dev, extosc ? &extosc : NULL);
if (ret)
return ret;
@@ -132,17 +210,17 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim)
int ret;
u8 data[7];
- ret = mcp795_rtcc_read(dev, 0x01, data, sizeof(data));
+ ret = mcp795_rtcc_read(dev, MCP795_REG_SECONDS, data, sizeof(data));
if (ret)
return ret;
- tim->tm_sec = ((data[0] & 0x70) >> 4) * 10 + (data[0] & 0x0f);
- tim->tm_min = ((data[1] & 0x70) >> 4) * 10 + (data[1] & 0x0f);
- tim->tm_hour = ((data[2] & 0x30) >> 4) * 10 + (data[2] & 0x0f);
- tim->tm_mday = ((data[4] & 0x30) >> 4) * 10 + (data[4] & 0x0f);
- tim->tm_mon = ((data[5] & 0x10) >> 4) * 10 + (data[5] & 0x0f);
- tim->tm_year = ((data[6] & 0xf0) >> 4) * 10 + (data[6] & 0x0f) + 100; /* Assume we are in 20xx */
+ tim->tm_sec = bcd2bin(data[0] & 0x7F);
+ tim->tm_min = bcd2bin(data[1] & 0x7F);
+ tim->tm_hour = bcd2bin(data[2] & 0x3F);
+ tim->tm_mday = bcd2bin(data[4] & 0x3F);
+ tim->tm_mon = bcd2bin(data[5] & 0x1F) - 1;
+ tim->tm_year = bcd2bin(data[6]) + 100; /* Assume we are in 20xx */
dev_dbg(dev, "Read from mcp795: %04d-%02d-%02d %02d:%02d:%02d\n",
tim->tm_year + 1900, tim->tm_mon, tim->tm_mday,
@@ -169,13 +247,13 @@ static int mcp795_probe(struct spi_device *spi)
return ret;
}
- /* Start the oscillator */
- mcp795_rtcc_set_bits(&spi->dev, 0x01, MCP795_ST_BIT, MCP795_ST_BIT);
+ /* Start the oscillator but don't set the value of EXTOSC bit */
+ mcp795_start_oscillator(&spi->dev, NULL);
/* Clear the 12 hour mode flag*/
mcp795_rtcc_set_bits(&spi->dev, 0x03, MCP795_24_BIT, 0);
rtc = devm_rtc_device_register(&spi->dev, "rtc-mcp795",
- &mcp795_rtc_ops, THIS_MODULE);
+ &mcp795_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index efb0a08ac117..a06dff994c83 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -191,12 +191,19 @@ static int pcf85063_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct rtc_device *rtc;
+ int err;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
+ err = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
+ if (err < 0) {
+ dev_err(&client->dev, "RTC chip is not present\n");
+ return err;
+ }
+
rtc = devm_rtc_device_register(&client->dev,
pcf85063_driver.driver.name,
&pcf85063_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
new file mode 100644
index 000000000000..28d540885f3d
--- /dev/null
+++ b/drivers/rtc/rtc-r7301.c
@@ -0,0 +1,453 @@
+/*
+ * EPSON TOYOCOM RTC-7301SF/DG Driver
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * Based on rtc-rp5c01.c
+ *
+ * Datasheet: http://www5.epsondevice.com/en/products/parallel/rtc7301sf.html
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define DRV_NAME "rtc-r7301"
+
+#define RTC7301_1_SEC 0x0 /* Bank 0 and Band 1 */
+#define RTC7301_10_SEC 0x1 /* Bank 0 and Band 1 */
+#define RTC7301_AE BIT(3)
+#define RTC7301_1_MIN 0x2 /* Bank 0 and Band 1 */
+#define RTC7301_10_MIN 0x3 /* Bank 0 and Band 1 */
+#define RTC7301_1_HOUR 0x4 /* Bank 0 and Band 1 */
+#define RTC7301_10_HOUR 0x5 /* Bank 0 and Band 1 */
+#define RTC7301_DAY_OF_WEEK 0x6 /* Bank 0 and Band 1 */
+#define RTC7301_1_DAY 0x7 /* Bank 0 and Band 1 */
+#define RTC7301_10_DAY 0x8 /* Bank 0 and Band 1 */
+#define RTC7301_1_MONTH 0x9 /* Bank 0 */
+#define RTC7301_10_MONTH 0xa /* Bank 0 */
+#define RTC7301_1_YEAR 0xb /* Bank 0 */
+#define RTC7301_10_YEAR 0xc /* Bank 0 */
+#define RTC7301_100_YEAR 0xd /* Bank 0 */
+#define RTC7301_1000_YEAR 0xe /* Bank 0 */
+#define RTC7301_ALARM_CONTROL 0xe /* Bank 1 */
+#define RTC7301_ALARM_CONTROL_AIE BIT(0)
+#define RTC7301_ALARM_CONTROL_AF BIT(1)
+#define RTC7301_TIMER_CONTROL 0xe /* Bank 2 */
+#define RTC7301_TIMER_CONTROL_TIE BIT(0)
+#define RTC7301_TIMER_CONTROL_TF BIT(1)
+#define RTC7301_CONTROL 0xf /* All banks */
+#define RTC7301_CONTROL_BUSY BIT(0)
+#define RTC7301_CONTROL_STOP BIT(1)
+#define RTC7301_CONTROL_BANK_SEL_0 BIT(2)
+#define RTC7301_CONTROL_BANK_SEL_1 BIT(3)
+
+struct rtc7301_priv {
+ struct regmap *regmap;
+ int irq;
+ spinlock_t lock;
+ u8 bank;
+};
+
+static const struct regmap_config rtc7301_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .reg_stride = 4,
+};
+
+static u8 rtc7301_read(struct rtc7301_priv *priv, unsigned int reg)
+{
+ int reg_stride = regmap_get_reg_stride(priv->regmap);
+ unsigned int val;
+
+ regmap_read(priv->regmap, reg_stride * reg, &val);
+
+ return val & 0xf;
+}
+
+static void rtc7301_write(struct rtc7301_priv *priv, u8 val, unsigned int reg)
+{
+ int reg_stride = regmap_get_reg_stride(priv->regmap);
+
+ regmap_write(priv->regmap, reg_stride * reg, val);
+}
+
+static void rtc7301_update_bits(struct rtc7301_priv *priv, unsigned int reg,
+ u8 mask, u8 val)
+{
+ int reg_stride = regmap_get_reg_stride(priv->regmap);
+
+ regmap_update_bits(priv->regmap, reg_stride * reg, mask, val);
+}
+
+static int rtc7301_wait_while_busy(struct rtc7301_priv *priv)
+{
+ int retries = 100;
+
+ while (retries-- > 0) {
+ u8 val;
+
+ val = rtc7301_read(priv, RTC7301_CONTROL);
+ if (!(val & RTC7301_CONTROL_BUSY))
+ return 0;
+
+ usleep_range(200, 300);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void rtc7301_stop(struct rtc7301_priv *priv)
+{
+ rtc7301_update_bits(priv, RTC7301_CONTROL, RTC7301_CONTROL_STOP,
+ RTC7301_CONTROL_STOP);
+}
+
+static void rtc7301_start(struct rtc7301_priv *priv)
+{
+ rtc7301_update_bits(priv, RTC7301_CONTROL, RTC7301_CONTROL_STOP, 0);
+}
+
+static void rtc7301_select_bank(struct rtc7301_priv *priv, u8 bank)
+{
+ u8 val = 0;
+
+ if (bank == priv->bank)
+ return;
+
+ if (bank & BIT(0))
+ val |= RTC7301_CONTROL_BANK_SEL_0;
+ if (bank & BIT(1))
+ val |= RTC7301_CONTROL_BANK_SEL_1;
+
+ rtc7301_update_bits(priv, RTC7301_CONTROL,
+ RTC7301_CONTROL_BANK_SEL_0 |
+ RTC7301_CONTROL_BANK_SEL_1, val);
+
+ priv->bank = bank;
+}
+
+static void rtc7301_get_time(struct rtc7301_priv *priv, struct rtc_time *tm,
+ bool alarm)
+{
+ int year;
+
+ tm->tm_sec = rtc7301_read(priv, RTC7301_1_SEC);
+ tm->tm_sec += (rtc7301_read(priv, RTC7301_10_SEC) & ~RTC7301_AE) * 10;
+ tm->tm_min = rtc7301_read(priv, RTC7301_1_MIN);
+ tm->tm_min += (rtc7301_read(priv, RTC7301_10_MIN) & ~RTC7301_AE) * 10;
+ tm->tm_hour = rtc7301_read(priv, RTC7301_1_HOUR);
+ tm->tm_hour += (rtc7301_read(priv, RTC7301_10_HOUR) & ~RTC7301_AE) * 10;
+ tm->tm_mday = rtc7301_read(priv, RTC7301_1_DAY);
+ tm->tm_mday += (rtc7301_read(priv, RTC7301_10_DAY) & ~RTC7301_AE) * 10;
+
+ if (alarm) {
+ tm->tm_wday = -1;
+ tm->tm_mon = -1;
+ tm->tm_year = -1;
+ tm->tm_yday = -1;
+ tm->tm_isdst = -1;
+ return;
+ }
+
+ tm->tm_wday = (rtc7301_read(priv, RTC7301_DAY_OF_WEEK) & ~RTC7301_AE);
+ tm->tm_mon = rtc7301_read(priv, RTC7301_10_MONTH) * 10 +
+ rtc7301_read(priv, RTC7301_1_MONTH) - 1;
+ year = rtc7301_read(priv, RTC7301_1000_YEAR) * 1000 +
+ rtc7301_read(priv, RTC7301_100_YEAR) * 100 +
+ rtc7301_read(priv, RTC7301_10_YEAR) * 10 +
+ rtc7301_read(priv, RTC7301_1_YEAR);
+
+ tm->tm_year = year - 1900;
+}
+
+static void rtc7301_write_time(struct rtc7301_priv *priv, struct rtc_time *tm,
+ bool alarm)
+{
+ int year;
+
+ rtc7301_write(priv, tm->tm_sec % 10, RTC7301_1_SEC);
+ rtc7301_write(priv, tm->tm_sec / 10, RTC7301_10_SEC);
+
+ rtc7301_write(priv, tm->tm_min % 10, RTC7301_1_MIN);
+ rtc7301_write(priv, tm->tm_min / 10, RTC7301_10_MIN);
+
+ rtc7301_write(priv, tm->tm_hour % 10, RTC7301_1_HOUR);
+ rtc7301_write(priv, tm->tm_hour / 10, RTC7301_10_HOUR);
+
+ rtc7301_write(priv, tm->tm_mday % 10, RTC7301_1_DAY);
+ rtc7301_write(priv, tm->tm_mday / 10, RTC7301_10_DAY);
+
+ /* Don't care for alarm register */
+ rtc7301_write(priv, alarm ? RTC7301_AE : tm->tm_wday,
+ RTC7301_DAY_OF_WEEK);
+
+ if (alarm)
+ return;
+
+ rtc7301_write(priv, (tm->tm_mon + 1) % 10, RTC7301_1_MONTH);
+ rtc7301_write(priv, (tm->tm_mon + 1) / 10, RTC7301_10_MONTH);
+
+ year = tm->tm_year + 1900;
+
+ rtc7301_write(priv, year % 10, RTC7301_1_YEAR);
+ rtc7301_write(priv, (year / 10) % 10, RTC7301_10_YEAR);
+ rtc7301_write(priv, (year / 100) % 10, RTC7301_100_YEAR);
+ rtc7301_write(priv, year / 1000, RTC7301_1000_YEAR);
+}
+
+static void rtc7301_alarm_irq(struct rtc7301_priv *priv, unsigned int enabled)
+{
+ rtc7301_update_bits(priv, RTC7301_ALARM_CONTROL,
+ RTC7301_ALARM_CONTROL_AF |
+ RTC7301_ALARM_CONTROL_AIE,
+ enabled ? RTC7301_ALARM_CONTROL_AIE : 0);
+}
+
+static int rtc7301_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 0);
+
+ err = rtc7301_wait_while_busy(priv);
+ if (!err)
+ rtc7301_get_time(priv, tm, false);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err ? err : rtc_valid_tm(tm);
+}
+
+static int rtc7301_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_stop(priv);
+ usleep_range(200, 300);
+ rtc7301_select_bank(priv, 0);
+ rtc7301_write_time(priv, tm, false);
+ rtc7301_start(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int rtc7301_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+ u8 alrm_ctrl;
+
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+ rtc7301_get_time(priv, &alarm->time, true);
+
+ alrm_ctrl = rtc7301_read(priv, RTC7301_ALARM_CONTROL);
+
+ alarm->enabled = !!(alrm_ctrl & RTC7301_ALARM_CONTROL_AIE);
+ alarm->pending = !!(alrm_ctrl & RTC7301_ALARM_CONTROL_AF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int rtc7301_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+ rtc7301_write_time(priv, &alarm->time, true);
+ rtc7301_alarm_irq(priv, alarm->enabled);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int rtc7301_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+ rtc7301_alarm_irq(priv, enabled);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static const struct rtc_class_ops rtc7301_rtc_ops = {
+ .read_time = rtc7301_read_time,
+ .set_time = rtc7301_set_time,
+ .read_alarm = rtc7301_read_alarm,
+ .set_alarm = rtc7301_set_alarm,
+ .alarm_irq_enable = rtc7301_alarm_irq_enable,
+};
+
+static irqreturn_t rtc7301_irq_handler(int irq, void *dev_id)
+{
+ struct rtc_device *rtc = dev_id;
+ struct rtc7301_priv *priv = dev_get_drvdata(rtc->dev.parent);
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u8 alrm_ctrl;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+
+ alrm_ctrl = rtc7301_read(priv, RTC7301_ALARM_CONTROL);
+ if (alrm_ctrl & RTC7301_ALARM_CONTROL_AF) {
+ ret = IRQ_HANDLED;
+ rtc7301_alarm_irq(priv, false);
+ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static void rtc7301_init(struct rtc7301_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 2);
+ rtc7301_write(priv, 0, RTC7301_TIMER_CONTROL);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int __init rtc7301_rtc_probe(struct platform_device *dev)
+{
+ struct resource *res;
+ void __iomem *regs;
+ struct rtc7301_priv *priv;
+ struct rtc_device *rtc;
+ int ret;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ regs = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->regmap = devm_regmap_init_mmio(&dev->dev, regs,
+ &rtc7301_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->irq = platform_get_irq(dev, 0);
+
+ spin_lock_init(&priv->lock);
+ priv->bank = -1;
+
+ rtc7301_init(priv);
+
+ platform_set_drvdata(dev, priv);
+
+ rtc = devm_rtc_device_register(&dev->dev, DRV_NAME, &rtc7301_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ if (priv->irq > 0) {
+ ret = devm_request_irq(&dev->dev, priv->irq,
+ rtc7301_irq_handler, IRQF_SHARED,
+ dev_name(&dev->dev), rtc);
+ if (ret) {
+ priv->irq = 0;
+ dev_err(&dev->dev, "unable to request IRQ\n");
+ } else {
+ device_set_wakeup_capable(&dev->dev, true);
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int rtc7301_suspend(struct device *dev)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(priv->irq);
+
+ return 0;
+}
+
+static int rtc7301_resume(struct device *dev)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(priv->irq);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(rtc7301_pm_ops, rtc7301_suspend, rtc7301_resume);
+
+static const struct of_device_id rtc7301_dt_match[] = {
+ { .compatible = "epson,rtc7301sf" },
+ { .compatible = "epson,rtc7301dg" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtc7301_dt_match);
+
+static struct platform_driver rtc7301_rtc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = rtc7301_dt_match,
+ .pm = &rtc7301_pm_ops,
+ },
+};
+
+module_platform_driver_probe(rtc7301_rtc_driver, rtc7301_rtc_probe);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EPSON TOYOCOM RTC-7301SF/DG Driver");
+MODULE_ALIAS("platform:rtc-r7301");
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index 83a057a03060..7fc36973fa33 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -1,20 +1,18 @@
/* rtc-starfire.c: Starfire platform RTC driver.
*
+ * Author: David S. Miller
+ * License: GPL
+ *
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/oplib.h>
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Starfire RTC driver");
-MODULE_LICENSE("GPL");
-
static u32 starfire_get_time(void)
{
static char obp_gettod[32];
@@ -57,4 +55,4 @@ static struct platform_driver starfire_rtc_driver = {
},
};
-module_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe);
+builtin_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe);
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 7c696c12f28f..11bc562eba5d 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -1,12 +1,14 @@
/* rtc-sun4v.c: Hypervisor based RTC for SUN4V systems.
*
+ * Author: David S. Miller
+ * License: GPL
+ *
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/rtc.h>
@@ -98,8 +100,4 @@ static struct platform_driver sun4v_rtc_driver = {
},
};
-module_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe);
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("SUN4V RTC driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 176720b7b9e5..c18c39212ce6 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -33,6 +33,10 @@
#include <linux/i2c/twl.h>
+enum twl_class {
+ TWL_4030 = 0,
+ TWL_6030,
+};
/*
* RTC block register offsets (use TWL_MODULE_RTC)
@@ -136,16 +140,30 @@ static const u8 twl6030_rtc_reg_map[] = {
#define ALL_TIME_REGS 6
/*----------------------------------------------------------------------*/
-static u8 *rtc_reg_map;
+struct twl_rtc {
+ struct device *dev;
+ struct rtc_device *rtc;
+ u8 *reg_map;
+ /*
+ * Cache the value for timer/alarm interrupts register; this is
+ * only changed by callers holding rtc ops lock (or resume).
+ */
+ unsigned char rtc_irq_bits;
+ bool wake_enabled;
+#ifdef CONFIG_PM_SLEEP
+ unsigned char irqstat;
+#endif
+ enum twl_class class;
+};
/*
* Supports 1 byte read from TWL RTC register.
*/
-static int twl_rtc_read_u8(u8 *data, u8 reg)
+static int twl_rtc_read_u8(struct twl_rtc *twl_rtc, u8 *data, u8 reg)
{
int ret;
- ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
+ ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (twl_rtc->reg_map[reg]));
if (ret < 0)
pr_err("Could not read TWL register %X - error %d\n", reg, ret);
return ret;
@@ -154,11 +172,11 @@ static int twl_rtc_read_u8(u8 *data, u8 reg)
/*
* Supports 1 byte write to TWL RTC registers.
*/
-static int twl_rtc_write_u8(u8 data, u8 reg)
+static int twl_rtc_write_u8(struct twl_rtc *twl_rtc, u8 data, u8 reg)
{
int ret;
- ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
+ ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (twl_rtc->reg_map[reg]));
if (ret < 0)
pr_err("Could not write TWL register %X - error %d\n",
reg, ret);
@@ -166,28 +184,22 @@ static int twl_rtc_write_u8(u8 data, u8 reg)
}
/*
- * Cache the value for timer/alarm interrupts register; this is
- * only changed by callers holding rtc ops lock (or resume).
- */
-static unsigned char rtc_irq_bits;
-
-/*
* Enable 1/second update and/or alarm interrupts.
*/
-static int set_rtc_irq_bit(unsigned char bit)
+static int set_rtc_irq_bit(struct twl_rtc *twl_rtc, unsigned char bit)
{
unsigned char val;
int ret;
/* if the bit is set, return from here */
- if (rtc_irq_bits & bit)
+ if (twl_rtc->rtc_irq_bits & bit)
return 0;
- val = rtc_irq_bits | bit;
+ val = twl_rtc->rtc_irq_bits | bit;
val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M;
- ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(twl_rtc, val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
- rtc_irq_bits = val;
+ twl_rtc->rtc_irq_bits = val;
return ret;
}
@@ -195,19 +207,19 @@ static int set_rtc_irq_bit(unsigned char bit)
/*
* Disable update and/or alarm interrupts.
*/
-static int mask_rtc_irq_bit(unsigned char bit)
+static int mask_rtc_irq_bit(struct twl_rtc *twl_rtc, unsigned char bit)
{
unsigned char val;
int ret;
/* if the bit is clear, return from here */
- if (!(rtc_irq_bits & bit))
+ if (!(twl_rtc->rtc_irq_bits & bit))
return 0;
- val = rtc_irq_bits & ~bit;
- ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ val = twl_rtc->rtc_irq_bits & ~bit;
+ ret = twl_rtc_write_u8(twl_rtc, val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
- rtc_irq_bits = val;
+ twl_rtc->rtc_irq_bits = val;
return ret;
}
@@ -215,21 +227,23 @@ static int mask_rtc_irq_bit(unsigned char bit)
static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
int irq = platform_get_irq(pdev, 0);
- static bool twl_rtc_wake_enabled;
int ret;
if (enabled) {
- ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
- if (device_can_wakeup(dev) && !twl_rtc_wake_enabled) {
+ ret = set_rtc_irq_bit(twl_rtc,
+ BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ if (device_can_wakeup(dev) && !twl_rtc->wake_enabled) {
enable_irq_wake(irq);
- twl_rtc_wake_enabled = true;
+ twl_rtc->wake_enabled = true;
}
} else {
- ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
- if (twl_rtc_wake_enabled) {
+ ret = mask_rtc_irq_bit(twl_rtc,
+ BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ if (twl_rtc->wake_enabled) {
disable_irq_wake(irq);
- twl_rtc_wake_enabled = false;
+ twl_rtc->wake_enabled = false;
}
}
@@ -247,21 +261,23 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
*/
static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
unsigned char rtc_data[ALL_TIME_REGS];
int ret;
u8 save_control;
u8 rtc_control;
- ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(twl_rtc, &save_control, REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret);
return ret;
}
/* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */
- if (twl_class_is_6030()) {
+ if (twl_rtc->class == TWL_6030) {
if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) {
save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M;
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, save_control,
+ REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s clr GET_TIME, error %d\n",
__func__, ret);
@@ -274,17 +290,17 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M;
/* for twl6030/32 enable read access to static shadowed registers */
- if (twl_class_is_6030())
+ if (twl_rtc->class == TWL_6030)
rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT;
- ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, rtc_control, REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret);
return ret;
}
ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
- (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
+ (twl_rtc->reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "%s: reading data, error %d\n", __func__, ret);
@@ -292,8 +308,8 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
}
/* for twl6030 restore original state of rtc control register */
- if (twl_class_is_6030()) {
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ if (twl_rtc->class == TWL_6030) {
+ ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s: restore CTRL_REG, error %d\n",
__func__, ret);
@@ -313,6 +329,7 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
unsigned char save_control;
unsigned char rtc_data[ALL_TIME_REGS];
int ret;
@@ -325,18 +342,18 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_data[5] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the TC registers */
- ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(twl_rtc, &save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
/* update all the time registers in one shot */
ret = twl_i2c_write(TWL_MODULE_RTC, rtc_data,
- (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
+ (twl_rtc->reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_set_time error %d\n", ret);
goto out;
@@ -344,7 +361,7 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Start back RTC */
save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG);
out:
return ret;
@@ -355,11 +372,12 @@ out:
*/
static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
unsigned char rtc_data[ALL_TIME_REGS];
int ret;
ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
- (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
+ twl_rtc->reg_map[REG_ALARM_SECONDS_REG], ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_alarm error %d\n", ret);
return ret;
@@ -374,7 +392,7 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->time.tm_year = bcd2bin(rtc_data[5]) + 100;
/* report cached alarm enable state */
- if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
+ if (twl_rtc->rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
alm->enabled = 1;
return ret;
@@ -382,6 +400,8 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
+
unsigned char alarm_data[ALL_TIME_REGS];
int ret;
@@ -398,7 +418,7 @@ static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
/* update all the alarm registers in one shot */
ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data,
- (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
+ twl_rtc->reg_map[REG_ALARM_SECONDS_REG], ALL_TIME_REGS);
if (ret) {
dev_err(dev, "rtc_set_alarm error %d\n", ret);
goto out;
@@ -410,14 +430,15 @@ out:
return ret;
}
-static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
+static irqreturn_t twl_rtc_interrupt(int irq, void *data)
{
+ struct twl_rtc *twl_rtc = data;
unsigned long events;
int ret = IRQ_NONE;
int res;
u8 rd_reg;
- res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ res = twl_rtc_read_u8(twl_rtc, &rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
/*
@@ -431,12 +452,12 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
else
events = RTC_IRQF | RTC_PF;
- res = twl_rtc_write_u8(BIT_RTC_STATUS_REG_ALARM_M,
- REG_RTC_STATUS_REG);
+ res = twl_rtc_write_u8(twl_rtc, BIT_RTC_STATUS_REG_ALARM_M,
+ REG_RTC_STATUS_REG);
if (res)
goto out;
- if (twl_class_is_4030()) {
+ if (twl_rtc->class == TWL_4030) {
/* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
* needs 2 reads to clear the interrupt. One read is done in
* do_twl_pwrirq(). Doing the second read, to clear
@@ -455,7 +476,7 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
}
/* Notify RTC core on event */
- rtc_update_irq(rtc, 1, events);
+ rtc_update_irq(twl_rtc->rtc, 1, events);
ret = IRQ_HANDLED;
out:
@@ -474,21 +495,36 @@ static const struct rtc_class_ops twl_rtc_ops = {
static int twl_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
+ struct twl_rtc *twl_rtc;
+ struct device_node *np = pdev->dev.of_node;
int ret = -EINVAL;
int irq = platform_get_irq(pdev, 0);
u8 rd_reg;
+ if (!np) {
+ dev_err(&pdev->dev, "no DT info\n");
+ return -EINVAL;
+ }
+
if (irq <= 0)
return ret;
- /* Initialize the register map */
- if (twl_class_is_4030())
- rtc_reg_map = (u8 *)twl4030_rtc_reg_map;
- else
- rtc_reg_map = (u8 *)twl6030_rtc_reg_map;
+ twl_rtc = devm_kzalloc(&pdev->dev, sizeof(*twl_rtc), GFP_KERNEL);
+ if (!twl_rtc)
+ return -ENOMEM;
- ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ if (twl_class_is_4030()) {
+ twl_rtc->class = TWL_4030;
+ twl_rtc->reg_map = (u8 *)twl4030_rtc_reg_map;
+ } else if (twl_class_is_6030()) {
+ twl_rtc->class = TWL_6030;
+ twl_rtc->reg_map = (u8 *)twl6030_rtc_reg_map;
+ } else {
+ dev_err(&pdev->dev, "TWL Class not supported.\n");
+ return -EINVAL;
+ }
+
+ ret = twl_rtc_read_u8(twl_rtc, &rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
return ret;
@@ -499,11 +535,11 @@ static int twl_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
/* Clear RTC Power up reset and pending alarm interrupts */
- ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
+ ret = twl_rtc_write_u8(twl_rtc, rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
return ret;
- if (twl_class_is_6030()) {
+ if (twl_rtc->class == TWL_6030) {
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
REG_INT_MSK_LINE_A);
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
@@ -511,40 +547,42 @@ static int twl_rtc_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "Enabling TWL-RTC\n");
- ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, BIT_RTC_CTRL_REG_STOP_RTC_M,
+ REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
/* ensure interrupts are disabled, bootloaders can be strange */
- ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(twl_rtc, 0, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
dev_warn(&pdev->dev, "unable to disable interrupt\n");
/* init cached IRQ enable bits */
- ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_read_u8(twl_rtc, &twl_rtc->rtc_irq_bits,
+ REG_RTC_INTERRUPTS_REG);
if (ret < 0)
return ret;
+ platform_set_drvdata(pdev, twl_rtc);
device_init_wakeup(&pdev->dev, 1);
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ twl_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&twl_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
+ if (IS_ERR(twl_rtc->rtc)) {
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(rtc));
- return PTR_ERR(rtc);
+ PTR_ERR(twl_rtc->rtc));
+ return PTR_ERR(twl_rtc->rtc);
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
twl_rtc_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- dev_name(&rtc->dev), rtc);
+ dev_name(&twl_rtc->rtc->dev), twl_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
}
- platform_set_drvdata(pdev, rtc);
return 0;
}
@@ -554,10 +592,12 @@ static int twl_rtc_probe(struct platform_device *pdev)
*/
static int twl_rtc_remove(struct platform_device *pdev)
{
+ struct twl_rtc *twl_rtc = platform_get_drvdata(pdev);
+
/* leave rtc running, but disable irqs */
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
- if (twl_class_is_6030()) {
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ if (twl_rtc->class == TWL_6030) {
twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
REG_INT_MSK_LINE_A);
twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
@@ -569,40 +609,40 @@ static int twl_rtc_remove(struct platform_device *pdev)
static void twl_rtc_shutdown(struct platform_device *pdev)
{
+ struct twl_rtc *twl_rtc = platform_get_drvdata(pdev);
+
/* mask timer interrupts, but leave alarm interrupts on to enable
power-on when alarm is triggered */
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
}
#ifdef CONFIG_PM_SLEEP
-static unsigned char irqstat;
-
static int twl_rtc_suspend(struct device *dev)
{
- irqstat = rtc_irq_bits;
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
+
+ twl_rtc->irqstat = twl_rtc->rtc_irq_bits;
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
return 0;
}
static int twl_rtc_resume(struct device *dev)
{
- set_rtc_irq_bit(irqstat);
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
+
+ set_rtc_irq_bit(twl_rtc, twl_rtc->irqstat);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(twl_rtc_pm_ops, twl_rtc_suspend, twl_rtc_resume);
-#ifdef CONFIG_OF
static const struct of_device_id twl_rtc_of_match[] = {
{.compatible = "ti,twl4030-rtc", },
{ },
};
MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
-#endif
-
-MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
.probe = twl_rtc_probe,
@@ -611,7 +651,7 @@ static struct platform_driver twl4030rtc_driver = {
.driver = {
.name = "twl_rtc",
.pm = &twl_rtc_pm_ops,
- .of_match_table = of_match_ptr(twl_rtc_of_match),
+ .of_match_table = twl_rtc_of_match,
},
};
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1de089019268..0e3fdfdbd098 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -69,6 +69,7 @@ static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
+static void do_requeue_requests(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
@@ -125,6 +126,7 @@ struct dasd_device *dasd_alloc_device(void)
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
+ INIT_WORK(&device->requeue_requests, do_requeue_requests);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
@@ -1448,9 +1450,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->starttime = jiffies;
cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
- cqr->lpm &= device->path_data.opm;
+ cqr->lpm &= dasd_path_get_opm(device);
if (!cqr->lpm)
- cqr->lpm = device->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(device);
}
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
@@ -1483,8 +1485,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_WARNING, device,
"start_IO: selected paths gone (%x)",
cqr->lpm);
- } else if (cqr->lpm != device->path_data.opm) {
- cqr->lpm = device->path_data.opm;
+ } else if (cqr->lpm != dasd_path_get_opm(device)) {
+ cqr->lpm = dasd_path_get_opm(device);
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: selected paths gone,"
" retry on all paths");
@@ -1493,11 +1495,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
"start_IO: all paths in opm gone,"
" do path verification");
dasd_generic_last_path_gone(device);
- device->path_data.opm = 0;
- device->path_data.ppm = 0;
- device->path_data.npm = 0;
- device->path_data.tbvpm =
- ccw_device_get_path_mask(device->cdev);
+ dasd_path_no_path(device);
+ dasd_path_set_tbvpm(device,
+ ccw_device_get_path_mask(
+ device->cdev));
}
break;
case -ENODEV:
@@ -1623,6 +1624,13 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
+static int dasd_check_hpf_error(struct irb *irb)
+{
+ return (scsw_tm_is_valid_schxs(&irb->scsw) &&
+ (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
+ irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
+}
+
/*
* Interrupt handler for "normal" ssch-io based dasd devices.
*/
@@ -1642,7 +1650,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
switch (PTR_ERR(irb)) {
case -EIO:
if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
- device = (struct dasd_device *) cqr->startdev;
+ device = cqr->startdev;
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
@@ -1749,19 +1757,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct dasd_ccw_req, devlist);
}
} else { /* error */
+ /* check for HPF error
+ * call discipline function to requeue all requests
+ * and disable HPF accordingly
+ */
+ if (cqr->cpmode && dasd_check_hpf_error(irb) &&
+ device->discipline->handle_hpf_error)
+ device->discipline->handle_hpf_error(device, irb);
/*
* If we don't want complex ERP for this request, then just
* reset this and retry it in the fastpath
*/
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
- if (cqr->lpm == device->path_data.opm)
+ if (cqr->lpm == dasd_path_get_opm(device))
DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP in fastpath "
"(%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
- cqr->lpm = device->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_QUEUED;
next = cqr;
} else
@@ -2002,17 +2017,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
{
int rc;
- if (device->path_data.tbvpm) {
- if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
- DASD_UNRESUMED_PM))
- return;
- rc = device->discipline->verify_path(
- device, device->path_data.tbvpm);
- if (rc)
- dasd_device_set_timer(device, 50);
- else
- device->path_data.tbvpm = 0;
- }
+ if (!dasd_path_get_tbvpm(device))
+ return;
+
+ if (device->stopped &
+ ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
+ return;
+ rc = device->discipline->verify_path(device,
+ dasd_path_get_tbvpm(device));
+ if (rc)
+ dasd_device_set_timer(device, 50);
+ else
+ dasd_path_clear_all_verify(device);
};
/*
@@ -2924,10 +2940,10 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
if (!block)
return -EINVAL;
- spin_lock_irqsave(&block->queue_lock, flags);
+ spin_lock_irqsave(&block->request_queue_lock, flags);
req = (struct request *) cqr->callback_data;
blk_requeue_request(block->request_queue, req);
- spin_unlock_irqrestore(&block->queue_lock, flags);
+ spin_unlock_irqrestore(&block->request_queue_lock, flags);
return 0;
}
@@ -3121,6 +3137,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
*/
static void dasd_setup_queue(struct dasd_block *block)
{
+ struct request_queue *q = block->request_queue;
int max;
if (block->base->features & DASD_FEATURE_USERAW) {
@@ -3135,17 +3152,16 @@ static void dasd_setup_queue(struct dasd_block *block)
} else {
max = block->base->discipline->max_blocks << block->s2b_shift;
}
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
- block->request_queue->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(block->request_queue,
- block->bp_block);
- blk_queue_max_hw_sectors(block->request_queue, max);
- blk_queue_max_segments(block->request_queue, -1L);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, block->bp_block);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
/* with page sized segments we can translate each segement into
* one idaw/tidaw
*/
- blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
- blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
}
/*
@@ -3517,11 +3533,15 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
struct dasd_device *device;
struct dasd_block *block;
int max_count, open_count, rc;
+ unsigned long flags;
rc = 0;
- device = dasd_device_from_cdev(cdev);
- if (IS_ERR(device))
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device)) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return PTR_ERR(device);
+ }
/*
* We must make sure that this device is currently not in use.
@@ -3540,8 +3560,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
pr_warn("%s: The DASD cannot be set offline while it is in use\n",
dev_name(&cdev->dev));
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
- dasd_put_device(device);
- return -EBUSY;
+ goto out_busy;
}
}
@@ -3551,19 +3570,19 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* could only be called by normal offline so safe_offline flag
* needs to be removed to run normal offline and kill all I/O
*/
- if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags))
/* Already doing normal offline processing */
- dasd_put_device(device);
- return -EBUSY;
- } else
+ goto out_busy;
+ else
clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
-
- } else
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ } else {
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
/* Already doing offline processing */
- dasd_put_device(device);
- return -EBUSY;
- }
+ goto out_busy;
+ }
+
+ set_bit(DASD_FLAG_OFFLINE, &device->flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
/*
* if safe_offline called set safe_offline_running flag and
@@ -3591,7 +3610,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
goto interrupted;
}
- set_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
@@ -3610,7 +3628,14 @@ interrupted:
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_put_device(device);
+
return rc;
+
+out_busy:
+ dasd_put_device(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ return -EBUSY;
}
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
@@ -3675,14 +3700,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
case CIO_GONE:
case CIO_BOXED:
case CIO_NO_PATH:
- device->path_data.opm = 0;
- device->path_data.ppm = 0;
- device->path_data.npm = 0;
+ dasd_path_no_path(device);
ret = dasd_generic_last_path_gone(device);
break;
case CIO_OPER:
ret = 1;
- if (device->path_data.opm)
+ if (dasd_path_get_opm(device))
ret = dasd_generic_path_operational(device);
break;
}
@@ -3693,48 +3716,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify);
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{
- int chp;
- __u8 oldopm, eventlpm;
struct dasd_device *device;
+ int chp, oldopm, hpfpm, ifccpm;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return;
+
+ oldopm = dasd_path_get_opm(device);
for (chp = 0; chp < 8; chp++) {
- eventlpm = 0x80 >> chp;
if (path_event[chp] & PE_PATH_GONE) {
- oldopm = device->path_data.opm;
- device->path_data.opm &= ~eventlpm;
- device->path_data.ppm &= ~eventlpm;
- device->path_data.npm &= ~eventlpm;
- if (oldopm && !device->path_data.opm) {
- dev_warn(&device->cdev->dev,
- "No verified channel paths remain "
- "for the device\n");
- DBF_DEV_EVENT(DBF_WARNING, device,
- "%s", "last verified path gone");
- dasd_eer_write(device, NULL, DASD_EER_NOPATH);
- dasd_device_set_stop_bits(device,
- DASD_STOPPED_DC_WAIT);
- }
+ dasd_path_notoper(device, chp);
}
if (path_event[chp] & PE_PATH_AVAILABLE) {
- device->path_data.opm &= ~eventlpm;
- device->path_data.ppm &= ~eventlpm;
- device->path_data.npm &= ~eventlpm;
- device->path_data.tbvpm |= eventlpm;
+ dasd_path_available(device, chp);
dasd_schedule_device_bh(device);
}
if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
- if (!(device->path_data.opm & eventlpm) &&
- !(device->path_data.tbvpm & eventlpm)) {
+ if (!dasd_path_is_operational(device, chp) &&
+ !dasd_path_need_verify(device, chp)) {
/*
* we can not establish a pathgroup on an
* unavailable path, so trigger a path
* verification first
*/
- device->path_data.tbvpm |= eventlpm;
- dasd_schedule_device_bh(device);
+ dasd_path_available(device, chp);
+ dasd_schedule_device_bh(device);
}
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Pathgroup re-established\n");
@@ -3742,45 +3749,65 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
device->discipline->kick_validate(device);
}
}
+ hpfpm = dasd_path_get_hpfpm(device);
+ ifccpm = dasd_path_get_ifccpm(device);
+ if (!dasd_path_get_opm(device) && hpfpm) {
+ /*
+ * device has no operational paths but at least one path is
+ * disabled due to HPF errors
+ * disable HPF at all and use the path(s) again
+ */
+ if (device->discipline->disable_hpf)
+ device->discipline->disable_hpf(device);
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ dasd_path_set_tbvpm(device, hpfpm);
+ dasd_schedule_device_bh(device);
+ dasd_schedule_requeue(device);
+ } else if (!dasd_path_get_opm(device) && ifccpm) {
+ /*
+ * device has no operational paths but at least one path is
+ * disabled due to IFCC errors
+ * trigger path verification on paths with IFCC errors
+ */
+ dasd_path_set_tbvpm(device, ifccpm);
+ dasd_schedule_device_bh(device);
+ }
+ if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
+ dev_warn(&device->cdev->dev,
+ "No verified channel paths remain for the device\n");
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "%s", "last verified path gone");
+ dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+ dasd_device_set_stop_bits(device,
+ DASD_STOPPED_DC_WAIT);
+ }
dasd_put_device(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
{
- if (!device->path_data.opm && lpm) {
- device->path_data.opm = lpm;
+ if (!dasd_path_get_opm(device) && lpm) {
+ dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device);
} else
- device->path_data.opm |= lpm;
+ dasd_path_add_opm(device, lpm);
return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
-
-int dasd_generic_pm_freeze(struct ccw_device *cdev)
+/*
+ * clear active requests and requeue them to block layer if possible
+ */
+static int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
- struct dasd_device *device = dasd_device_from_cdev(cdev);
- struct list_head freeze_queue;
+ struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
struct dasd_ccw_req *refers;
int rc;
- if (IS_ERR(device))
- return PTR_ERR(device);
-
- /* mark device as suspended */
- set_bit(DASD_FLAG_SUSPENDED, &device->flags);
-
- if (device->discipline->freeze)
- rc = device->discipline->freeze(device);
-
- /* disallow new I/O */
- dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
-
- /* clear active requests and requeue them to block layer if possible */
- INIT_LIST_HEAD(&freeze_queue);
- spin_lock_irq(get_ccwdev_lock(cdev));
+ INIT_LIST_HEAD(&requeue_queue);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = 0;
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
/* Check status and move request to flush_queue */
@@ -3791,25 +3818,22 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
dev_err(&device->cdev->dev,
"Unable to terminate request %p "
"on suspend\n", cqr);
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
dasd_put_device(device);
return rc;
}
}
- list_move_tail(&cqr->devlist, &freeze_queue);
+ list_move_tail(&cqr->devlist, &requeue_queue);
}
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
- list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
+ list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
wait_event(dasd_flush_wq,
(cqr->status != DASD_CQR_CLEAR_PENDING));
- if (cqr->status == DASD_CQR_CLEARED)
- cqr->status = DASD_CQR_QUEUED;
- /* requeue requests to blocklayer will only work for
- block device requests */
- if (_dasd_requeue_request(cqr))
- continue;
+ /* mark sleepon requests as ended */
+ if (cqr->callback_data == DASD_SLEEPON_START_TAG)
+ cqr->callback_data = DASD_SLEEPON_END_TAG;
/* remove requests from device and block queue */
list_del_init(&cqr->devlist);
@@ -3821,6 +3845,14 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
}
+
+ /*
+ * requeue requests to blocklayer will only work
+ * for block device requests
+ */
+ if (_dasd_requeue_request(cqr))
+ continue;
+
if (cqr->block)
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
@@ -3831,15 +3863,56 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
* if requests remain then they are internal request
* and go back to the device queue
*/
- if (!list_empty(&freeze_queue)) {
+ if (!list_empty(&requeue_queue)) {
/* move freeze_queue to start of the ccw_queue */
- spin_lock_irq(get_ccwdev_lock(cdev));
- list_splice_tail(&freeze_queue, &device->ccw_queue);
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ list_splice_tail(&requeue_queue, &device->ccw_queue);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
- dasd_put_device(device);
+ /* wake up generic waitqueue for eventually ended sleepon requests */
+ wake_up(&generic_waitq);
return rc;
}
+
+static void do_requeue_requests(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ requeue_requests);
+ dasd_generic_requeue_all_requests(device);
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+ dasd_put_device(device);
+}
+
+void dasd_schedule_requeue(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_reload_device to the kernel event daemon. */
+ if (!schedule_work(&device->requeue_requests))
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_schedule_requeue);
+
+int dasd_generic_pm_freeze(struct ccw_device *cdev)
+{
+ struct dasd_device *device = dasd_device_from_cdev(cdev);
+ int rc;
+
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ /* mark device as suspended */
+ set_bit(DASD_FLAG_SUSPENDED, &device->flags);
+
+ if (device->discipline->freeze)
+ rc = device->discipline->freeze(device);
+
+ /* disallow new I/O */
+ dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
+
+ return dasd_generic_requeue_all_requests(device);
+}
EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
int dasd_generic_restore_device(struct ccw_device *cdev)
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 8305ab688d57..774da20ceb58 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
opm = ccw_device_get_path_mask(device->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (erp->lpm == 0)
- erp->lpm = device->path_data.opm &
+ erp->lpm = dasd_path_get_opm(device) &
~(erp->irb.esw.esw0.sublog.lpum);
else
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
@@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
erp->status = DASD_CQR_FILLED;
erp->retries = 10;
- erp->lpm = erp->startdev->path_data.opm;
+ erp->lpm = dasd_path_get_opm(erp->startdev);
erp->function = dasd_3990_erp_action_1_sec;
}
return erp;
@@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
case 0x0D:
dev_warn(&device->cdev->dev,
- "FORMAT 4 - No syn byte in count "
+ "FORMAT 4 - No sync byte in count "
"address area; offset active\n");
break;
case 0x0E:
@@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
case 0x0F:
dev_warn(&device->cdev->dev,
- "FORMAT 4 - No syn byte in data area; "
+ "FORMAT 4 - No sync byte in data area; "
"offset active\n");
break;
default:
@@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
default:
dev_warn(&device->cdev->dev,
- "FORMAT D - Reserved\n");
+ "FORMAT F - Reserved\n");
}
break;
@@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
/* reset the lpm and the status to be able to
* try further actions. */
- erp->lpm = erp->startdev->path_data.opm;
+ erp->lpm = dasd_path_get_opm(erp->startdev);
erp->status = DASD_CQR_NEED_ERP;
}
}
@@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_inspect_32 */
+static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
+{
+ int pos = pathmask_to_pos(lpum);
+
+ /* no remaining path, cannot disable */
+ if (!(dasd_path_get_opm(device) & ~lpum))
+ return;
+
+ dev_err(&device->cdev->dev,
+ "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
+ device->path[pos].cssid, device->path[pos].chpid, lpum);
+ dasd_path_remove_opm(device, lpum);
+ dasd_path_add_ifccpm(device, lpum);
+ device->path[pos].errorclk = 0;
+ atomic_set(&device->path[pos].error_count, 0);
+}
+
+static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
+{
+ struct dasd_device *device = erp->startdev;
+ __u8 lpum = erp->refers->irb.esw.esw1.lpum;
+ int pos = pathmask_to_pos(lpum);
+ unsigned long long clk;
+
+ if (!device->path_thrhld)
+ return;
+
+ clk = get_tod_clock();
+ /*
+ * check if the last error is longer ago than the timeout,
+ * if so reset error state
+ */
+ if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
+ >= device->path_interval) {
+ atomic_set(&device->path[pos].error_count, 0);
+ device->path[pos].errorclk = 0;
+ }
+ atomic_inc(&device->path[pos].error_count);
+ device->path[pos].errorclk = clk;
+ /* threshold exceeded disable path if possible */
+ if (atomic_read(&device->path[pos].error_count) >=
+ device->path_thrhld)
+ dasd_3990_erp_disable_path(device, lpum);
+}
+
/*
*****************************************************************************
* main ERP control functions (24 and 32 byte sense)
@@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
| SCHN_STAT_CHN_CTRL_CHK)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"channel or interface control check");
+ dasd_3990_erp_account_error(erp);
erp = dasd_3990_erp_action_4(erp, NULL);
}
return erp;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 15a1a70cace9..84ca314c87e3 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -725,27 +725,15 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
- int val;
- char *endp;
-
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
+ unsigned int val;
+ int rc;
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- spin_lock(&dasd_devmap_lock);
- if (val)
- devmap->features |= DASD_FEATURE_FAILFAST;
- else
- devmap->features &= ~DASD_FEATURE_FAILFAST;
- if (devmap->device)
- devmap->device->features = devmap->features;
- spin_unlock(&dasd_devmap_lock);
- return count;
+ rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
+
+ return rc ? : count;
}
static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
@@ -771,32 +759,41 @@ static ssize_t
dasd_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
+ struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
- int val;
- char *endp;
-
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
+ unsigned long flags;
+ unsigned int val;
+ int rc;
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- spin_lock(&dasd_devmap_lock);
- if (val)
- devmap->features |= DASD_FEATURE_READONLY;
- else
- devmap->features &= ~DASD_FEATURE_READONLY;
- device = devmap->device;
- if (device) {
- device->features = devmap->features;
- val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+ rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
+ if (rc)
+ return rc;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+ if (!device->block || !device->block->gdp ||
+ test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ goto out;
}
- spin_unlock(&dasd_devmap_lock);
- if (device && device->block && device->block->gdp)
- set_disk_ro(device->block->gdp, val);
+ /* Increase open_count to avoid losing the block device */
+ atomic_inc(&device->block->open_count);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ set_disk_ro(device->block->gdp, val);
+ atomic_dec(&device->block->open_count);
+
+out:
+ dasd_put_device(device);
+
return count;
}
@@ -823,27 +820,15 @@ static ssize_t
dasd_erplog_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
- int val;
- char *endp;
-
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
+ unsigned int val;
+ int rc;
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- spin_lock(&dasd_devmap_lock);
- if (val)
- devmap->features |= DASD_FEATURE_ERPLOG;
- else
- devmap->features &= ~DASD_FEATURE_ERPLOG;
- if (devmap->device)
- devmap->device->features = devmap->features;
- spin_unlock(&dasd_devmap_lock);
- return count;
+ rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
+
+ return rc ? : count;
}
static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
@@ -871,16 +856,14 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
+ unsigned int val;
ssize_t rc;
- int val;
- char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
spin_lock(&dasd_devmap_lock);
@@ -994,10 +977,12 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device))
return PTR_ERR(device);
- if (device->discipline->host_access_count)
- count = device->discipline->host_access_count(device);
- else
+ if (!device->discipline)
+ count = -ENODEV;
+ else if (!device->discipline->host_access_count)
count = -EOPNOTSUPP;
+ else
+ count = device->discipline->host_access_count(device);
dasd_put_device(device);
if (count < 0)
@@ -1197,27 +1182,25 @@ static ssize_t
dasd_eer_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
- int val, rc;
- char *endp;
+ struct dasd_device *device;
+ unsigned int val;
+ int rc = 0;
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
- if (!devmap->device)
- return -ENODEV;
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return PTR_ERR(device);
- val = simple_strtoul(buf, &endp, 0);
- if (((endp + 1) < (buf + count)) || (val > 1))
+ if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
- if (val) {
- rc = dasd_eer_enable(devmap->device);
- if (rc)
- return rc;
- } else
- dasd_eer_disable(devmap->device);
- return count;
+ if (val)
+ rc = dasd_eer_enable(device);
+ else
+ dasd_eer_disable(device);
+
+ dasd_put_device(device);
+
+ return rc ? : count;
}
static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
@@ -1360,6 +1343,50 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(timeout, 0644,
dasd_timeout_show, dasd_timeout_store);
+
+static ssize_t
+dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned int val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
+ val = 0;
+
+ if (device->discipline && device->discipline->reset_path)
+ device->discipline->reset_path(device, (__u8) val);
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
+
+static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int hpf;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ if (!device->discipline || !device->discipline->hpf_enabled) {
+ dasd_put_device(device);
+ return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
+ }
+ hpf = device->discipline->hpf_enabled(device);
+ dasd_put_device(device);
+ return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
+}
+
+static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
+
static ssize_t dasd_reservation_policy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1385,27 +1412,17 @@ static ssize_t dasd_reservation_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dasd_devmap *devmap;
+ struct ccw_device *cdev = to_ccwdev(dev);
int rc;
- devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
- rc = 0;
- spin_lock(&dasd_devmap_lock);
if (sysfs_streq("ignore", buf))
- devmap->features &= ~DASD_FEATURE_FAILONSLCK;
+ rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
else if (sysfs_streq("fail", buf))
- devmap->features |= DASD_FEATURE_FAILONSLCK;
+ rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
else
rc = -EINVAL;
- if (devmap->device)
- devmap->device->features = devmap->features;
- spin_unlock(&dasd_devmap_lock);
- if (rc)
- return rc;
- else
- return count;
+
+ return rc ? : count;
}
static DEVICE_ATTR(reservation_policy, 0644,
@@ -1461,25 +1478,120 @@ static ssize_t dasd_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
- u8 opm, nppm, cablepm, cuirpm, hpfpm;
+ u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
- opm = device->path_data.opm;
- nppm = device->path_data.npm;
- cablepm = device->path_data.cablepm;
- cuirpm = device->path_data.cuirpm;
- hpfpm = device->path_data.hpfpm;
+ opm = dasd_path_get_opm(device);
+ nppm = dasd_path_get_nppm(device);
+ cablepm = dasd_path_get_cablepm(device);
+ cuirpm = dasd_path_get_cuirpm(device);
+ hpfpm = dasd_path_get_hpfpm(device);
+ ifccpm = dasd_path_get_ifccpm(device);
dasd_put_device(device);
- return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
- cablepm, cuirpm, hpfpm);
+ return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+ cablepm, cuirpm, hpfpm, ifccpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
+/*
+ * threshold value for IFCC/CCC errors
+ */
+static ssize_t
+dasd_path_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ (val > DASD_THRHLD_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ if (val)
+ device->path_thrhld = val;
+ spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
+ dasd_path_threshold_store);
+/*
+ * interval for IFCC/CCC checks
+ * meaning time with no IFCC/CCC error before the error counter
+ * gets reset
+ */
+static ssize_t
+dasd_path_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ (val > DASD_INTERVAL_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ if (val)
+ device->path_interval = val;
+ spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
+ dasd_path_interval_store);
+
+
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
@@ -1500,6 +1612,10 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_safe_offline.attr,
&dev_attr_host_access_count.attr,
&dev_attr_path_masks.attr,
+ &dev_attr_path_threshold.attr,
+ &dev_attr_path_interval.attr,
+ &dev_attr_path_reset.attr,
+ &dev_attr_hpf.attr,
NULL,
};
@@ -1531,7 +1647,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
{
struct dasd_devmap *devmap;
- devmap = dasd_find_busid(dev_name(&cdev->dev));
+ devmap = dasd_devmap_from_cdev(cdev);
if (IS_ERR(devmap))
return PTR_ERR(devmap);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a7a88476e215..67bf50c9946f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
private->conf_data = NULL;
private->conf_len = 0;
for (i = 0; i < 8; i++) {
- kfree(private->path_conf_data[i]);
- private->path_conf_data[i] = NULL;
+ kfree(device->path[i].conf_data);
+ device->path[i].conf_data = NULL;
+ device->path[i].cssid = 0;
+ device->path[i].ssid = 0;
+ device->path[i].chpid = 0;
}
}
@@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
int rc, path_err, pos;
__u8 lpm, opm;
struct dasd_eckd_private *private, path_private;
- struct dasd_path *path_data;
struct dasd_uid *uid;
char print_path_uid[60], print_device_uid[60];
+ struct channel_path_desc *chp_desc;
+ struct subchannel_id sch_id;
private = device->private;
- path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
+ ccw_device_get_schid(device->cdev, &sch_id);
conf_data_saved = 0;
path_err = 0;
/* get configuration data per operational path */
@@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"No configuration data "
"retrieved");
/* no further analysis possible */
- path_data->opm |= lpm;
+ dasd_path_add_opm(device, opm);
continue; /* no error */
}
/* save first valid configuration data */
@@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
}
pos = pathmask_to_pos(lpm);
/* store per path conf_data */
- private->path_conf_data[pos] =
- (struct dasd_conf_data *) conf_data;
+ device->path[pos].conf_data = conf_data;
+ device->path[pos].cssid = sch_id.cssid;
+ device->path[pos].ssid = sch_id.ssid;
+ chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+ if (chp_desc)
+ device->path[pos].chpid = chp_desc->chpid;
+ kfree(chp_desc);
/*
* build device UID that other path data
* can be compared to it
@@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
path_err = -EINVAL;
- path_data->cablepm |= lpm;
+ dasd_path_add_cablepm(device, lpm);
continue;
}
pos = pathmask_to_pos(lpm);
/* store per path conf_data */
- private->path_conf_data[pos] =
- (struct dasd_conf_data *) conf_data;
+ device->path[pos].conf_data = conf_data;
+ device->path[pos].cssid = sch_id.cssid;
+ device->path[pos].ssid = sch_id.ssid;
+ chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+ if (chp_desc)
+ device->path[pos].chpid = chp_desc->chpid;
+ kfree(chp_desc);
path_private.conf_data = NULL;
path_private.conf_len = 0;
}
switch (dasd_eckd_path_access(conf_data, conf_len)) {
case 0x02:
- path_data->npm |= lpm;
+ dasd_path_add_nppm(device, lpm);
break;
case 0x03:
- path_data->ppm |= lpm;
+ dasd_path_add_ppm(device, lpm);
break;
}
- if (!path_data->opm) {
- path_data->opm = lpm;
+ if (!dasd_path_get_opm(device)) {
+ dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device);
} else {
- path_data->opm |= lpm;
+ dasd_path_add_opm(device, lpm);
}
- /*
- * if the path is used
- * it should not be in one of the negative lists
- */
- path_data->cablepm &= ~lpm;
- path_data->hpfpm &= ~lpm;
- path_data->cuirpm &= ~lpm;
}
return path_err;
}
+static u32 get_fcx_max_data(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int fcx_in_css, fcx_in_gneq, fcx_in_features;
+ int tpm, mdc;
+
+ if (dasd_nofcx)
+ return 0;
+ /* is transport mode supported? */
+ fcx_in_css = css_general_characteristics.fcx;
+ fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+ fcx_in_features = private->features.feature[40] & 0x80;
+ tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+ if (!tpm)
+ return 0;
+
+ mdc = ccw_device_get_mdc(device->cdev, 0);
+ if (mdc < 0) {
+ dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
+ return 0;
+ } else {
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
+ }
+}
+
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
@@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device,
struct path_verification_work_data *data)
{
struct dasd_eckd_private *private = device->private;
- struct dasd_path *path_data = &device->path_data;
- __u8 lpm, opm = path_data->opm;
+ __u8 lpm, opm = dasd_path_get_opm(device);
int rc = -ENODEV;
for (lpm = 0x80; lpm; lpm >>= 1) {
@@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work)
* in other case the device UID may have changed and
* the first working path UID will be used as device UID
*/
- if (device->path_data.opm &&
+ if (dasd_path_get_opm(device) &&
dasd_eckd_compare_path_uid(device, &path_private)) {
/*
* the comparison was not successful
@@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work)
* situation in dasd_start_IO.
*/
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- if (!device->path_data.opm && opm) {
- device->path_data.opm = opm;
- device->path_data.cablepm &= ~opm;
- device->path_data.cuirpm &= ~opm;
- device->path_data.hpfpm &= ~opm;
+ if (!dasd_path_get_opm(device) && opm) {
+ dasd_path_set_opm(device, opm);
dasd_generic_path_operational(device);
} else {
- device->path_data.opm |= opm;
- device->path_data.cablepm &= ~opm;
- device->path_data.cuirpm &= ~opm;
- device->path_data.hpfpm &= ~opm;
+ dasd_path_add_opm(device, opm);
}
- device->path_data.npm |= npm;
- device->path_data.ppm |= ppm;
- device->path_data.tbvpm |= epm;
- device->path_data.cablepm |= cablepm;
- device->path_data.hpfpm |= hpfpm;
+ dasd_path_add_nppm(device, npm);
+ dasd_path_add_ppm(device, ppm);
+ dasd_path_add_tbvpm(device, epm);
+ dasd_path_add_cablepm(device, cablepm);
+ dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
@@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
return 0;
}
+static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned long flags;
+
+ if (!private->fcx_max_data)
+ private->fcx_max_data = get_fcx_max_data(device);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
+ dasd_schedule_device_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
static int dasd_eckd_read_features(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
@@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
dasd_put_device(device);
}
-static u32 get_fcx_max_data(struct dasd_device *device)
-{
- struct dasd_eckd_private *private = device->private;
- int fcx_in_css, fcx_in_gneq, fcx_in_features;
- int tpm, mdc;
-
- if (dasd_nofcx)
- return 0;
- /* is transport mode supported? */
- fcx_in_css = css_general_characteristics.fcx;
- fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
- fcx_in_features = private->features.feature[40] & 0x80;
- tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
-
- if (!tpm)
- return 0;
-
- mdc = ccw_device_get_mdc(device->cdev, 0);
- if (mdc < 0) {
- dev_warn(&device->cdev->dev, "Detecting the maximum supported"
- " data size for zHPF requests failed\n");
- return 0;
- } else
- return (u32)mdc * FCX_MAX_DATA_FACTOR;
-}
-
/*
* Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled.
@@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
- /* set default timeout */
+ /* set some default values */
device->default_expires = DASD_EXPIRES;
- /* set default retry count */
device->default_retries = DASD_RETRIES;
+ device->path_thrhld = DASD_ECKD_PATH_THRHLD;
+ device->path_interval = DASD_ECKD_PATH_INTERVAL;
if (private->gneq) {
value = 1;
@@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->gneq = NULL;
private->conf_len = 0;
for (i = 0; i < 8; i++) {
- kfree(private->path_conf_data[i]);
- if ((__u8 *)private->path_conf_data[i] ==
+ kfree(device->path[i].conf_data);
+ if ((__u8 *)device->path[i].conf_data ==
private->conf_data) {
private->conf_data = NULL;
private->conf_len = 0;
}
- private->path_conf_data[i] = NULL;
+ device->path[i].conf_data = NULL;
+ device->path[i].cssid = 0;
+ device->path[i].ssid = 0;
+ device->path[i].chpid = 0;
}
kfree(private->conf_data);
private->conf_data = NULL;
@@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
if (cqr->block && (cqr->startdev != cqr->block->base)) {
dasd_eckd_reset_ccw_to_base_io(cqr);
cqr->startdev = cqr->block->base;
- cqr->lpm = cqr->block->base->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(cqr->block->base);
}
};
@@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ;
- cqr->lpm = startdev->path_data.ppm;
+ cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
- irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
+ irb->scsw.tm.fcxs,
+ (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing TCW: %p\n",
@@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
*/
static int
dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
- __u32 message_id,
- struct channel_path_desc *desc,
- struct subchannel_id sch_id)
+ __u32 message_id, __u8 lpum)
{
struct dasd_psf_cuir_response *psf_cuir;
+ int pos = pathmask_to_pos(lpum);
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
@@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
psf_cuir->cc = response;
- if (desc)
- psf_cuir->chpid = desc->chpid;
+ psf_cuir->chpid = device->path[pos].chpid;
psf_cuir->message_id = message_id;
- psf_cuir->cssid = sch_id.cssid;
- psf_cuir->ssid = sch_id.ssid;
+ psf_cuir->cssid = device->path[pos].cssid;
+ psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)(addr_t)psf_cuir;
@@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
__u8 lpum,
struct dasd_cuir_message *cuir)
{
- struct dasd_eckd_private *private = device->private;
struct dasd_conf_data *conf_data;
int path, pos;
if (cuir->record_selector == 0)
goto out;
for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
- conf_data = private->path_conf_data[pos];
+ conf_data = device->path[pos].conf_data;
if (conf_data->gneq.record_selector ==
cuir->record_selector)
return conf_data;
}
out:
- return private->path_conf_data[pathmask_to_pos(lpum)];
+ return device->path[pathmask_to_pos(lpum)].conf_data;
}
/*
@@ -5391,7 +5406,6 @@ out:
static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
- struct dasd_eckd_private *private = device->private;
struct dasd_conf_data *ref_conf_data;
unsigned long bitmask = 0, mask = 0;
struct dasd_conf_data *conf_data;
@@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
mask |= cuir->neq_map[1] << 8;
mask |= cuir->neq_map[0] << 16;
- for (path = 0x80; path; path >>= 1) {
+ for (path = 0; path < 8; path++) {
/* initialise data per path */
bitmask = mask;
- pos = pathmask_to_pos(path);
- conf_data = private->path_conf_data[pos];
+ conf_data = device->path[path].conf_data;
pos = 8 - ffs(cuir->ned_map);
ned = (char *) &conf_data->neds[pos];
/* compare reference ned and per path ned */
@@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
continue;
/* device and path match the reference values
add path to CUIR scope */
- tbcpm |= path;
+ tbcpm |= 0x80 >> path;
}
return tbcpm;
}
static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
- unsigned long paths,
- struct subchannel_id sch_id, int action)
+ unsigned long paths, int action)
{
- struct channel_path_desc *desc;
int pos;
while (paths) {
/* get position of bit in mask */
- pos = ffs(paths) - 1;
+ pos = 8 - ffs(paths);
/* get channel path descriptor from this position */
- desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
if (action == CUIR_QUIESCE)
- pr_warn("Service on the storage server caused path "
- "%x.%02x to go offline", sch_id.cssid,
- desc ? desc->chpid : 0);
+ pr_warn("Service on the storage server caused path %x.%02x to go offline",
+ device->path[pos].cssid,
+ device->path[pos].chpid);
else if (action == CUIR_RESUME)
- pr_info("Path %x.%02x is back online after service "
- "on the storage server", sch_id.cssid,
- desc ? desc->chpid : 0);
- kfree(desc);
- clear_bit(pos, &paths);
+ pr_info("Path %x.%02x is back online after service on the storage server",
+ device->path[pos].cssid,
+ device->path[pos].chpid);
+ clear_bit(7 - pos, &paths);
}
}
@@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
/* nothing to do if path is not in use */
- if (!(device->path_data.opm & tbcpm))
+ if (!(dasd_path_get_opm(device) & tbcpm))
return 0;
- if (!(device->path_data.opm & ~tbcpm)) {
+ if (!(dasd_path_get_opm(device) & ~tbcpm)) {
/* no path would be left if the CUIR action is taken
return error */
return -EINVAL;
}
/* remove device from operational path mask */
- device->path_data.opm &= ~tbcpm;
- device->path_data.cuirpm |= tbcpm;
+ dasd_path_remove_opm(device, tbcpm);
+ dasd_path_add_cuirpm(device, tbcpm);
return tbcpm;
}
@@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
* notify the already set offline devices again
*/
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
- struct subchannel_id sch_id,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
@@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
}
}
/* notify user about all paths affected by CUIR action */
- dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE);
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
return 0;
out_err:
return tbcpm;
}
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
- struct subchannel_id sch_id,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
@@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
@@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
@@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
@@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
- if (!(dev->path_data.opm & tbcpm)) {
- dev->path_data.tbvpm |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
}
/* notify user about all paths affected by CUIR action */
- dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME);
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
return 0;
}
@@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
__u8 lpum)
{
struct dasd_cuir_message *cuir = messages;
- struct channel_path_desc *desc;
- struct subchannel_id sch_id;
- int pos, response;
+ int response;
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR request: %016llx %016llx %016llx %08x",
((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
((u32 *)cuir)[3]);
- ccw_device_get_schid(device->cdev, &sch_id);
- pos = pathmask_to_pos(lpum);
- desc = ccw_device_get_chp_desc(device->cdev, pos);
if (cuir->code == CUIR_QUIESCE) {
/* quiesce */
- if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir))
+ if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
response = PSF_CUIR_LAST_PATH;
else
response = PSF_CUIR_COMPLETED;
} else if (cuir->code == CUIR_RESUME) {
/* resume */
- dasd_eckd_cuir_resume(device, lpum, sch_id, cuir);
+ dasd_eckd_cuir_resume(device, lpum, cuir);
response = PSF_CUIR_COMPLETED;
} else
response = PSF_CUIR_NOT_SUPPORTED;
dasd_eckd_psf_cuir_response(device, response,
- cuir->message_id, desc, sch_id);
+ cuir->message_id, lpum);
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR response: %d on message ID %08x", response,
cuir->message_id);
- /* free descriptor copy */
- kfree(desc);
/* to make sure there is no attention left schedule work again */
device->discipline->check_attention(device, lpum);
}
@@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
return 0;
}
+static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
+{
+ if (~lpum & dasd_path_get_opm(device)) {
+ dasd_path_add_nohpfpm(device, lpum);
+ dasd_path_remove_opm(device, lpum);
+ dev_err(&device->cdev->dev,
+ "Channel path %02X lost HPF functionality and is disabled\n",
+ lpum);
+ return 1;
+ }
+ return 0;
+}
+
+static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ dev_err(&device->cdev->dev,
+ "High Performance FICON disabled\n");
+ private->fcx_max_data = 0;
+}
+
+static int dasd_eckd_hpf_enabled(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->fcx_max_data ? 1 : 0;
+}
+
+static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
+ struct irb *irb)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ if (!private->fcx_max_data) {
+ /* sanity check for no HPF, the error makes no sense */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Trying to disable HPF for a non HPF device");
+ return;
+ }
+ if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
+ dasd_eckd_disable_hpf_device(device);
+ } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
+ if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
+ return;
+ dasd_eckd_disable_hpf_device(device);
+ dasd_path_set_tbvpm(device,
+ dasd_path_get_hpfpm(device));
+ }
+ /*
+ * prevent that any new I/O ist started on the device and schedule a
+ * requeue of existing requests
+ */
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ dasd_schedule_requeue(device);
+}
+
static struct ccw_driver dasd_eckd_driver = {
.driver = {
.name = "dasd-eckd",
@@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
.check_attention = dasd_eckd_check_attention,
.host_access_count = dasd_eckd_host_access_count,
.hosts_print = dasd_hosts_print,
+ .handle_hpf_error = dasd_eckd_handle_hpf_error,
+ .disable_hpf = dasd_eckd_disable_hpf_device,
+ .hpf_enabled = dasd_eckd_hpf_enabled,
+ .reset_path = dasd_eckd_reset_path,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 59803626ea36..e2a710c250a5 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -94,6 +94,8 @@
#define FCX_MAX_DATA_FACTOR 65536
#define DASD_ECKD_RCD_DATA_SIZE 256
+#define DASD_ECKD_PATH_THRHLD 256
+#define DASD_ECKD_PATH_INTERVAL 300
/*****************************************************************************
* SECTION: Type Definitions
@@ -535,8 +537,7 @@ struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
int conf_len;
- /* per path configuration data */
- struct dasd_conf_data *path_conf_data[8];
+
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 21ef63cf0960..6c5d671304b4 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -454,20 +454,30 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
*/
int dasd_eer_enable(struct dasd_device *device)
{
- struct dasd_ccw_req *cqr;
+ struct dasd_ccw_req *cqr = NULL;
unsigned long flags;
struct ccw1 *ccw;
+ int rc = 0;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (device->eer_cqr)
- return 0;
+ goto out;
+ else if (!device->discipline ||
+ strcmp(device->discipline->name, "ECKD"))
+ rc = -EMEDIUMTYPE;
+ else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
+ rc = -EBUSY;
- if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
- return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
+ if (rc)
+ goto out;
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device);
- if (IS_ERR(cqr))
- return -ENOMEM;
+ if (IS_ERR(cqr)) {
+ rc = -ENOMEM;
+ cqr = NULL;
+ goto out;
+ }
cqr->startdev = device;
cqr->retries = 255;
@@ -485,15 +495,18 @@ int dasd_eer_enable(struct dasd_device *device)
cqr->status = DASD_CQR_FILLED;
cqr->callback = dasd_eer_snss_cb;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!device->eer_cqr) {
device->eer_cqr = cqr;
cqr = NULL;
}
+
+out:
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
if (cqr)
dasd_kfree_request(cqr, device);
- return 0;
+
+ return rc;
}
/*
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d138d0116734..113c1c1fa1af 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
"default ERP called (%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
- cqr->lpm = device->path_data.opm;
+ cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_FILLED;
} else {
pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index d7b5b550364b..462cab5d4302 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
device->default_expires = DASD_EXPIRES;
device->default_retries = FBA_DEFAULT_RETRIES;
- device->path_data.opm = LPM_ANYPATH;
+ dasd_path_set_opm(device, LPM_ANYPATH);
readonly = dasd_device_is_ro(device);
if (readonly)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 87ff6cef872f..24be210c10e5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -55,6 +55,7 @@
#include <asm/debug.h>
#include <asm/dasd.h>
#include <asm/idals.h>
+#include <linux/bitops.h>
/* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4
@@ -377,6 +378,10 @@ struct dasd_discipline {
int (*check_attention)(struct dasd_device *, __u8);
int (*host_access_count)(struct dasd_device *);
int (*hosts_print)(struct dasd_device *, struct seq_file *);
+ void (*handle_hpf_error)(struct dasd_device *, struct irb *);
+ void (*disable_hpf)(struct dasd_device *);
+ int (*hpf_enabled)(struct dasd_device *);
+ void (*reset_path)(struct dasd_device *, __u8);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -397,17 +402,31 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
#define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4
+/* DASD path handling */
+
+#define DASD_PATH_OPERATIONAL 1
+#define DASD_PATH_TBV 2
+#define DASD_PATH_PP 3
+#define DASD_PATH_NPP 4
+#define DASD_PATH_MISCABLED 5
+#define DASD_PATH_NOHPF 6
+#define DASD_PATH_CUIR 7
+#define DASD_PATH_IFCC 8
+
+#define DASD_THRHLD_MAX 4294967295U
+#define DASD_INTERVAL_MAX 4294967295U
+
struct dasd_path {
- __u8 opm;
- __u8 tbvpm;
- __u8 ppm;
- __u8 npm;
- /* paths that are not used because of a special condition */
- __u8 cablepm; /* miss-cabled */
- __u8 hpfpm; /* the HPF requirements of the other paths are not met */
- __u8 cuirpm; /* CUIR varied offline */
+ unsigned long flags;
+ u8 cssid;
+ u8 ssid;
+ u8 chpid;
+ struct dasd_conf_data *conf_data;
+ atomic_t error_count;
+ unsigned long long errorclk;
};
+
struct dasd_profile_info {
/* legacy part of profile data, as in dasd_profile_info_t */
unsigned int dasd_io_reqs; /* number of requests processed */
@@ -458,7 +477,8 @@ struct dasd_device {
struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline;
void *private;
- struct dasd_path path_data;
+ struct dasd_path path[8];
+ __u8 opm;
/* Device state and target state. */
int state, target;
@@ -483,6 +503,7 @@ struct dasd_device {
struct work_struct reload_device;
struct work_struct kick_validate;
struct work_struct suc_work;
+ struct work_struct requeue_requests;
struct timer_list timer;
debug_info_t *debug_area;
@@ -498,6 +519,9 @@ struct dasd_device {
unsigned long blk_timeout;
+ unsigned long path_thrhld;
+ unsigned long path_interval;
+
struct dentry *debugfs_dentry;
struct dentry *hosts_dentry;
struct dasd_profile profile;
@@ -707,6 +731,7 @@ void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
void dasd_reload_device(struct dasd_device *);
+void dasd_schedule_requeue(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -835,4 +860,410 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */
+
+/* DASD path handling functions */
+
+/*
+ * helper functions to modify bit masks for a given channel path for a device
+ */
+static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+}
+
+static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_verify(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_all_verify(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_operational(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+ device->opm |= (0x80 >> chp);
+}
+
+static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
+ int chp)
+{
+ __clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_preferred(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_preferred(struct dasd_device *device,
+ int chp)
+{
+ __clear_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+ device->opm &= ~(0x80 >> chp);
+}
+
+static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_cuir(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
+{
+ set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
+{
+ clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+/*
+ * get functions for path masks
+ * will return a path masks for the given device
+ */
+
+static inline __u8 dasd_path_get_opm(struct dasd_device *device)
+{
+ return device->opm;
+}
+
+static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 tbvpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_need_verify(device, chp))
+ tbvpm |= 0x80 >> chp;
+ return tbvpm;
+}
+
+static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
+{
+ int chp;
+ __u8 npm = 0x00;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (dasd_path_is_nonpreferred(device, chp))
+ npm |= 0x80 >> chp;
+ }
+ return npm;
+}
+
+static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
+{
+ int chp;
+ __u8 ppm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_preferred(device, chp))
+ ppm |= 0x80 >> chp;
+ return ppm;
+}
+
+static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
+{
+ int chp;
+ __u8 cablepm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_miscabled(device, chp))
+ cablepm |= 0x80 >> chp;
+ return cablepm;
+}
+
+static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 cuirpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_cuir(device, chp))
+ cuirpm |= 0x80 >> chp;
+ return cuirpm;
+}
+
+static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 ifccpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_ifcc(device, chp))
+ ifccpm |= 0x80 >> chp;
+ return ifccpm;
+}
+
+static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 hpfpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_nohpf(device, chp))
+ hpfpm |= 0x80 >> chp;
+ return hpfpm;
+}
+
+/*
+ * add functions for path masks
+ * the existing path mask will be extended by the given path mask
+ */
+static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_verify(device, chp);
+}
+
+static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 nopm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_nohpf(device, chp) ||
+ dasd_path_is_ifcc(device, chp) ||
+ dasd_path_is_cuir(device, chp) ||
+ dasd_path_is_miscabled(device, chp))
+ nopm |= 0x80 >> chp;
+ return nopm;
+}
+
+static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp)) {
+ dasd_path_operational(device, chp);
+ /*
+ * if the path is used
+ * it should not be in one of the negative lists
+ */
+ dasd_path_clear_nohpf(device, chp);
+ dasd_path_clear_cuir(device, chp);
+ dasd_path_clear_cable(device, chp);
+ dasd_path_clear_ifcc(device, chp);
+ }
+}
+
+static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_miscabled(device, chp);
+}
+
+static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_cuir(device, chp);
+}
+
+static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_ifcc(device, chp);
+}
+
+static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_nonpreferred(device, chp);
+}
+
+static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_nohpf(device, chp);
+}
+
+static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_preferred(device, chp);
+}
+
+/*
+ * set functions for path masks
+ * the existing path mask will be replaced by the given path mask
+ */
+static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_verify(device, chp);
+ else
+ dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ dasd_path_clear_oper(device, chp);
+ if (pm & (0x80 >> chp)) {
+ dasd_path_operational(device, chp);
+ /*
+ * if the path is used
+ * it should not be in one of the negative lists
+ */
+ dasd_path_clear_nohpf(device, chp);
+ dasd_path_clear_cuir(device, chp);
+ dasd_path_clear_cable(device, chp);
+ dasd_path_clear_ifcc(device, chp);
+ }
+ }
+}
+
+/*
+ * remove functions for path masks
+ * the existing path mask will be cleared with the given path mask
+ */
+static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (pm & (0x80 >> chp))
+ dasd_path_clear_oper(device, chp);
+ }
+}
+
+/*
+ * add the newly available path to the to be verified pm and remove it from
+ * normal operation until it is verified
+ */
+static inline void dasd_path_available(struct dasd_device *device, int chp)
+{
+ dasd_path_clear_oper(device, chp);
+ dasd_path_verify(device, chp);
+}
+
+static inline void dasd_path_notoper(struct dasd_device *device, int chp)
+{
+ dasd_path_clear_oper(device, chp);
+ dasd_path_clear_preferred(device, chp);
+ dasd_path_clear_nonpreferred(device, chp);
+}
+
+/*
+ * remove all paths from normal operation
+ */
+static inline void dasd_path_no_path(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_notoper(device, chp);
+
+ dasd_path_clear_all_verify(device);
+}
+
+/* end - path handling */
+
#endif /* DASD_H */
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 931d10e86837..1b8d825623bd 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,7 +9,6 @@
* Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
@@ -1215,13 +1214,4 @@ static int __init tty3215_init(void)
tty3215_driver = driver;
return 0;
}
-
-static void __exit tty3215_exit(void)
-{
- tty_unregister_driver(tty3215_driver);
- put_tty_driver(tty3215_driver);
- ccw_driver_unregister(&raw3215_ccw_driver);
-}
-
-module_init(tty3215_init);
-module_exit(tty3215_exit);
+device_initcall(tty3215_init);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7a10c56334bb..e1fc7eb043d6 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -59,6 +59,7 @@
typedef unsigned int sclp_cmdw_t;
+#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
@@ -102,6 +103,28 @@ struct init_sccb {
sccb_mask_t sclp_send_mask;
} __attribute__((packed));
+struct read_cpu_info_sccb {
+ struct sccb_header header;
+ u16 nr_configured;
+ u16 offset_configured;
+ u16 nr_standby;
+ u16 offset_standby;
+ u8 reserved[4096 - 16];
+} __attribute__((packed, aligned(PAGE_SIZE)));
+
+static inline void sclp_fill_core_info(struct sclp_core_info *info,
+ struct read_cpu_info_sccb *sccb)
+{
+ char *page = (char *) sccb;
+
+ memset(info, 0, sizeof(*info));
+ info->configured = sccb->nr_configured;
+ info->standby = sccb->nr_standby;
+ info->combined = sccb->nr_configured + sccb->nr_standby;
+ memcpy(&info->core, page + sccb->offset_configured,
+ info->combined * sizeof(struct sclp_core_entry));
+}
+
#define SCLP_HAS_CHP_INFO (sclp.facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp.facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index e3fc7539116b..b9c5522b8a68 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -80,33 +80,10 @@ out:
* CPU configuration related functions.
*/
-#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
-struct read_cpu_info_sccb {
- struct sccb_header header;
- u16 nr_configured;
- u16 offset_configured;
- u16 nr_standby;
- u16 offset_standby;
- u8 reserved[4096 - 16];
-} __attribute__((packed, aligned(PAGE_SIZE)));
-
-static void sclp_fill_core_info(struct sclp_core_info *info,
- struct read_cpu_info_sccb *sccb)
-{
- char *page = (char *) sccb;
-
- memset(info, 0, sizeof(*info));
- info->configured = sccb->nr_configured;
- info->standby = sccb->nr_standby;
- info->combined = sccb->nr_configured + sccb->nr_standby;
- memcpy(&info->core, page + sccb->offset_configured,
- info->combined * sizeof(struct sclp_core_entry));
-}
-
-int sclp_get_core_info(struct sclp_core_info *info)
+int _sclp_get_core_info(struct sclp_core_info *info)
{
int rc;
struct read_cpu_info_sccb *sccb;
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 554eaa1e347d..78a7e4f94721 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -10,7 +10,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/gfp.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <asm/compat.h>
@@ -126,4 +126,4 @@ static struct miscdevice sclp_ctl_device = {
.name = "sclp",
.fops = &sclp_ctl_fops,
};
-module_misc_device(sclp_ctl_device);
+builtin_misc_device(sclp_ctl_device);
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index c71df0c7dedc..f8e46c22e641 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -221,6 +221,36 @@ static int __init sclp_set_event_mask(struct init_sccb *sccb,
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
+static struct sclp_core_info sclp_core_info_early __initdata;
+static int sclp_core_info_early_valid __initdata;
+
+static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb)
+{
+ int rc;
+
+ if (!SCLP_HAS_CPU_INFO)
+ return;
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ do {
+ rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
+ } while (rc == -EBUSY);
+ if (rc)
+ return;
+ if (sccb->header.response_code != 0x0010)
+ return;
+ sclp_fill_core_info(&sclp_core_info_early, sccb);
+ sclp_core_info_early_valid = 1;
+}
+
+int __init _sclp_get_core_info_early(struct sclp_core_info *info)
+{
+ if (!sclp_core_info_early_valid)
+ return -EIO;
+ *info = sclp_core_info_early;
+ return 0;
+}
+
static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
sccb_init_eq_size(sccb);
@@ -293,6 +323,7 @@ void __init sclp_early_detect(void)
void *sccb = &sccb_early;
sclp_facilities_detect(sccb);
+ sclp_init_core_info_early(sccb);
sclp_hsa_size_detect(sccb);
/* Turn off SCLP event notifications. Also save remote masks in the
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 475e470d9768..e4958511168a 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -6,7 +6,6 @@
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
@@ -80,5 +79,4 @@ static int __init sclp_quiesce_init(void)
{
return sclp_register(&sclp_quiesce_event);
}
-
-module_init(sclp_quiesce_init);
+device_initcall(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 3c6e174e19b6..9259017a1295 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -7,7 +7,6 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
@@ -573,4 +572,4 @@ sclp_tty_init(void)
sclp_tty_driver = driver;
return 0;
}
-module_init(sclp_tty_init);
+device_initcall(sclp_tty_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index e883063c7258..3167e8581994 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
+ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 16992e2a40ad..f771e5e9e26b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -7,6 +7,7 @@
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
+ * License: GPL
*/
#define KMSG_COMPONENT "zdump"
@@ -16,7 +17,6 @@
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
#include <linux/memblock.h>
#include <asm/asm-offsets.h>
@@ -320,7 +320,7 @@ static int __init zcore_init(void)
goto fail;
}
- pr_alert("DETECTED 'S390X (64 bit) OS'\n");
+ pr_alert("The dump process started for a 64-bit operating system\n");
rc = init_cpu_info();
if (rc)
goto fail;
@@ -364,22 +364,4 @@ fail:
diag308(DIAG308_REL_HSA, NULL);
return rc;
}
-
-static void __exit zcore_exit(void)
-{
- debug_unregister(zcore_dbf);
- sclp_sdias_exit();
- free_page((unsigned long) ipl_block);
- debugfs_remove(zcore_hsa_file);
- debugfs_remove(zcore_reipl_file);
- debugfs_remove(zcore_memmap_file);
- debugfs_remove(zcore_dir);
- diag308(DIAG308_REL_HSA, NULL);
-}
-
-MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
-MODULE_DESCRIPTION("zcore module for zfcpdump support");
-MODULE_LICENSE("GPL");
-
subsys_initcall(zcore_init);
-module_exit(zcore_exit);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 268aa23afa01..6b6386e9a500 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -30,7 +30,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/timex.h> /* get_tod_clock() */
@@ -1389,13 +1389,7 @@ static int __init init_cmf(void)
"%s (mode %s)\n", format_string, detect_string);
return 0;
}
-module_init(init_cmf);
-
-
-MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("channel measurement facility base driver\n"
- "Copyright IBM Corp. 2003\n");
+device_initcall(init_cmf);
EXPORT_SYMBOL_GPL(enable_cmf);
EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3d2b20ee613f..bc099b61394d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -5,12 +5,14 @@
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
+ *
+ * License: GPL
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
@@ -1285,5 +1287,3 @@ void css_driver_unregister(struct css_driver *cdrv)
driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6a58bc8f46e2..79823ee9c100 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -5,12 +5,14 @@
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * License: GPL
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
@@ -145,7 +147,6 @@ static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(css, io_subchannel_ids);
static int io_subchannel_prepare(struct subchannel *sch)
{
@@ -2150,7 +2151,6 @@ int ccw_device_siosl(struct ccw_device *cdev)
}
EXPORT_SYMBOL_GPL(ccw_device_siosl);
-MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 065b1be98e2c..ec497af99dd8 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -13,7 +13,6 @@
*/
enum dev_state {
DEV_STATE_NOT_OPER,
- DEV_STATE_SENSE_PGID,
DEV_STATE_SENSE_ID,
DEV_STATE_OFFLINE,
DEV_STATE_VERIFY,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8327d47e08b6..9afb5ce13007 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1058,12 +1058,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
- [DEV_STATE_SENSE_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_request_event,
- [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
- [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
- },
[DEV_STATE_SENSE_ID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 877d9f601e63..cf8c4ac6323a 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -3,8 +3,10 @@
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
+ *
+ * License: GPL
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -676,7 +678,6 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
-MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index b8ab18676e69..0a7fb83f35e5 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,10 +2,11 @@
# S/390 crypto devices
#
-ap-objs := ap_bus.o
-# zcrypt_api depends on ap
-obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
-# msgtype* depend on zcrypt_api
-obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
-# adapter drivers depend on ap, zcrypt_api and msgtype*
+ap-objs := ap_bus.o ap_card.o ap_queue.o
+obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
+# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
+zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+obj-$(CONFIG_ZCRYPT) += zcrypt.o
+# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
new file mode 100644
index 000000000000..7a630047c372
--- /dev/null
+++ b/drivers/s390/crypto/ap_asm.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus inline assemblies.
+ */
+
+#ifndef _AP_ASM_H_
+#define _AP_ASM_H_
+
+#include <asm/isc.h>
+
+/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+ register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
+ register unsigned long reg1 asm ("1") = -ENODEV;
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ "0: la %1,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
+ return reg1;
+}
+
+/**
+ * ap_tapq(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @info: Pointer to queue descriptor
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ if (info)
+ *info = reg2;
+ return reg1;
+}
+
+/**
+ * ap_pqap_rapq(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(RAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ return reg1;
+}
+
+/**
+ * ap_aqic(): Enable interruption for a specific AP.
+ * @qid: The AP queue number
+ * @ind: The notification indicator byte
+ *
+ * Returns AP queue status.
+ */
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind)
+{
+ register unsigned long reg0 asm ("0") = qid | (3UL << 24);
+ register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC;
+ register struct ap_queue_status reg1_out asm ("1");
+ register void *reg2 asm ("2") = ind;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(AQIC) */
+ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+ :
+ : "cc");
+ return reg1_out;
+}
+
+/**
+ * ap_qci(): Get AP configuration data
+ *
+ * Returns 0 on success, or -EOPNOTSUPP.
+ */
+static inline int ap_qci(void *config)
+{
+ register unsigned long reg0 asm ("0") = 0x04000000UL;
+ register unsigned long reg1 asm ("1") = -EINVAL;
+ register void *reg2 asm ("2") = (void *) config;
+
+ asm volatile(
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: la %1,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg0), "+d" (reg1), "+d" (reg2)
+ :
+ : "cc", "memory");
+
+ return reg1;
+}
+
+/**
+ * ap_nqap(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
+ unsigned long long psmid,
+ void *msg, size_t length)
+{
+ struct msgblock { char _[length]; };
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* NQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_dqap(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially. The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
+ unsigned long long *psmid,
+ void *msg, size_t length)
+{
+ struct msgblock { char _[length]; };
+ register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm("2") = 0UL;
+ register unsigned long reg4 asm("4") = (unsigned long) msg;
+ register unsigned long reg5 asm("5") = (unsigned long) length;
+ register unsigned long reg6 asm("6") = 0UL;
+ register unsigned long reg7 asm("7") = 0UL;
+
+
+ asm volatile(
+ "0: .long 0xb2ae0064\n" /* DQAP */
+ " brc 6,0b\n"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2),
+ "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
+ "=m" (*(struct msgblock *) msg) : : "cc");
+ *psmid = (((unsigned long long) reg6) << 32) + reg7;
+ return reg1;
+}
+
+#endif /* _AP_ASM_H_ */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed92fb09fc8e..6d75984a3d85 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -46,8 +46,12 @@
#include <linux/ktime.h>
#include <asm/facility.h>
#include <linux/crypto.h>
+#include <linux/mod_devicetable.h>
+#include <linux/debugfs.h>
#include "ap_bus.h"
+#include "ap_asm.h"
+#include "ap_debug.h"
/*
* Module description.
@@ -62,6 +66,7 @@ MODULE_ALIAS_CRYPTO("z90crypt");
* Module parameter
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
+static DEFINE_SPINLOCK(ap_domain_lock);
module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(domain, "domain index for ap devices");
EXPORT_SYMBOL(ap_domain_index);
@@ -70,13 +75,21 @@ static int ap_thread_flag = 0;
module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
-static struct device *ap_root_device = NULL;
+static struct device *ap_root_device;
+
+DEFINE_SPINLOCK(ap_list_lock);
+LIST_HEAD(ap_card_list);
+
static struct ap_config_info *ap_configuration;
-static DEFINE_SPINLOCK(ap_device_list_lock);
-static LIST_HEAD(ap_device_list);
static bool initialised;
/*
+ * AP bus related debug feature things.
+ */
+static struct dentry *ap_dbf_root;
+debug_info_t *ap_dbf_info;
+
+/*
* Workqueue timer for bus rescan.
*/
static struct timer_list ap_config_timer;
@@ -89,7 +102,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
*/
static void ap_tasklet_fn(unsigned long);
static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
-static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -129,23 +141,17 @@ static inline int ap_using_interrupts(void)
}
/**
- * ap_intructions_available() - Test if AP instructions are available.
+ * ap_airq_ptr() - Get the address of the adapter interrupt indicator
*
- * Returns 0 if the AP instructions are installed.
+ * Returns the address of the local-summary-indicator of the adapter
+ * interrupt handler for AP, or NULL if adapter interrupts are not
+ * available.
*/
-static inline int ap_instructions_available(void)
+void *ap_airq_ptr(void)
{
- register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
- register unsigned long reg1 asm ("1") = -ENODEV;
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
- return reg1;
+ if (ap_using_interrupts())
+ return ap_airq.lsi_ptr;
+ return NULL;
}
/**
@@ -169,19 +175,6 @@ static int ap_configuration_available(void)
return test_facility(12);
}
-static inline struct ap_queue_status
-__pqap_tapq(ap_qid_t qid, unsigned long *info)
-{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- *info = reg2;
- return reg1;
-}
-
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -192,85 +185,16 @@ __pqap_tapq(ap_qid_t qid, unsigned long *info)
static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid, unsigned long *info)
{
- struct ap_queue_status aqs;
- unsigned long _info;
-
if (test_facility(15))
qid |= 1UL << 23; /* set APFT T bit*/
- aqs = __pqap_tapq(qid, &_info);
- if (info)
- *info = _info;
- return aqs;
-}
-
-/**
- * ap_reset_queue(): Reset adjunct processor queue.
- * @qid: The AP queue number
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
-{
- register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(RAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_queue_interruption_control(): Enable interruption for a specific AP.
- * @qid: The AP queue number
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- */
-static inline struct ap_queue_status
-ap_queue_interruption_control(ap_qid_t qid, void *ind)
-{
- register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
- register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
- register struct ap_queue_status reg1_out asm ("1");
- register void *reg2 asm ("2") = ind;
- asm volatile(
- ".long 0xb2af0000" /* PQAP(AQIC) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- :
- : "cc" );
- return reg1_out;
-}
-
-/**
- * ap_query_configuration(): Get AP configuration data
- *
- * Returns 0 on success, or -EOPNOTSUPP.
- */
-static inline int __ap_query_configuration(void)
-{
- register unsigned long reg0 asm ("0") = 0x04000000UL;
- register unsigned long reg1 asm ("1") = -EINVAL;
- register void *reg2 asm ("2") = (void *) ap_configuration;
-
- asm volatile(
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2)
- :
- : "cc");
-
- return reg1;
+ return ap_tapq(qid, info);
}
static inline int ap_query_configuration(void)
{
if (!ap_configuration)
return -EOPNOTSUPP;
- return __ap_query_configuration();
+ return ap_qci(ap_configuration);
}
/**
@@ -331,162 +255,6 @@ static inline int ap_test_config_domain(unsigned int domain)
}
/**
- * ap_queue_enable_interruption(): Enable interruption on an AP.
- * @qid: The AP queue number
- * @ind: the notification indicator byte
- *
- * Enables interruption on AP queue via ap_queue_interruption_control(). Based
- * on the return value it waits a while and tests the AP queue if interrupts
- * have been switched on using ap_test_queue().
- */
-static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
-{
- struct ap_queue_status status;
-
- status = ap_queue_interruption_control(ap_dev->qid, ind);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- case AP_RESPONSE_OTHERWISE_CHANGED:
- return 0;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- case AP_RESPONSE_INVALID_ADDRESS:
- pr_err("Registering adapter interrupts for AP %d failed\n",
- AP_QID_DEVICE(ap_dev->qid));
- return -EOPNOTSUPP;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
- default:
- return -EBUSY;
- }
-}
-
-static inline struct ap_queue_status
-__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
-{
- typedef struct { char _[length]; } msgblock;
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
- asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
- : "cc");
- return reg1;
-}
-
-/**
- * __ap_send(): Send message to adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: The program supplied message identifier
- * @msg: The message text
- * @length: The message length
- * @special: Special Bit
- *
- * Returns AP queue status structure.
- * Condition code 1 on NQAP can't happen because the L bit is 1.
- * Condition code 2 on NQAP also means the send is incomplete,
- * because a segment boundary was reached. The NQAP is repeated.
- */
-static inline struct ap_queue_status
-__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
- unsigned int special)
-{
- if (special == 1)
- qid |= 0x400000UL;
- return __nqap(qid, psmid, msg, length);
-}
-
-int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
-{
- struct ap_queue_status status;
-
- status = __ap_send(qid, psmid, msg, length, 0);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_Q_FULL:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- case AP_RESPONSE_REQ_FAC_NOT_INST:
- return -EINVAL;
- default: /* Device is gone. */
- return -ENODEV;
- }
-}
-EXPORT_SYMBOL(ap_send);
-
-/**
- * __ap_recv(): Receive message from adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on DQAP means the receive has taken place
- * but only partially. The response is incomplete, hence the
- * DQAP is repeated.
- * Condition code 2 on DQAP also means the receive is incomplete,
- * this time because a segment boundary was reached. Again, the
- * DQAP is repeated.
- * Note that gpr2 is used by the DQAP instruction to keep track of
- * any 'residual' length, in case the instruction gets interrupted.
- * Hence it gets zeroed before the instruction.
- */
-static inline struct ap_queue_status
-__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
-{
- typedef struct { char _[length]; } msgblock;
- register unsigned long reg0 asm("0") = qid | 0x80000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm("2") = 0UL;
- register unsigned long reg4 asm("4") = (unsigned long) msg;
- register unsigned long reg5 asm("5") = (unsigned long) length;
- register unsigned long reg6 asm("6") = 0UL;
- register unsigned long reg7 asm("7") = 0UL;
-
-
- asm volatile(
- "0: .long 0xb2ae0064\n" /* DQAP */
- " brc 6,0b\n"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2),
- "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
- "=m" (*(msgblock *) msg) : : "cc" );
- *psmid = (((unsigned long long) reg6) << 32) + reg7;
- return reg1;
-}
-
-int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
-{
- struct ap_queue_status status;
-
- if (msg == NULL)
- return -EINVAL;
- status = __ap_recv(qid, psmid, msg, length);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (status.queue_empty)
- return -ENOENT;
- return -EBUSY;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- default:
- return -ENODEV;
- }
-}
-EXPORT_SYMBOL(ap_recv);
-
-/**
* ap_query_queue(): Check if an AP queue is available.
* @qid: The AP queue number
* @queue_depth: Pointer to queue depth value
@@ -500,7 +268,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
unsigned long info;
int nd;
- if (!ap_test_config_card_id(AP_QID_DEVICE(qid)))
+ if (!ap_test_config_card_id(AP_QID_CARD(qid)))
return -ENODEV;
status = ap_test_queue(qid, &info);
@@ -511,8 +279,28 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
*facilities = (unsigned int)(info >> 32);
/* Update maximum domain id */
nd = (info >> 16) & 0xff;
+ /* if N bit is available, z13 and newer */
if ((info & (1UL << 57)) && nd > 0)
ap_max_domain_id = nd;
+ else /* older machine types */
+ ap_max_domain_id = 15;
+ switch (*device_type) {
+ /* For CEX2 and CEX3 the available functions
+ * are not refrected by the facilities bits.
+ * Instead it is coded into the type. So here
+ * modify the function bits based on the type.
+ */
+ case AP_DEVICE_TYPE_CEX2A:
+ case AP_DEVICE_TYPE_CEX3A:
+ *facilities |= 0x08000000;
+ break;
+ case AP_DEVICE_TYPE_CEX2C:
+ case AP_DEVICE_TYPE_CEX3C:
+ *facilities |= 0x10000000;
+ break;
+ default:
+ break;
+ }
return 0;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
@@ -528,9 +316,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
}
}
-/* State machine definitions and helpers */
-
-static void ap_sm_wait(enum ap_wait wait)
+void ap_wait(enum ap_wait wait)
{
ktime_t hr_time;
@@ -559,350 +345,21 @@ static void ap_sm_wait(enum ap_wait wait)
}
}
-static enum ap_wait ap_sm_nop(struct ap_device *ap_dev)
-{
- return AP_WAIT_NONE;
-}
-
-/**
- * ap_sm_recv(): Receive pending reply messages from an AP device but do
- * not change the state of the device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- struct ap_message *ap_msg;
-
- status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
- ap_dev->reply->message, ap_dev->reply->length);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- atomic_dec(&ap_poll_requests);
- ap_dev->queue_count--;
- if (ap_dev->queue_count > 0)
- mod_timer(&ap_dev->timeout,
- jiffies + ap_dev->drv->request_timeout);
- list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
- if (ap_msg->psmid != ap_dev->reply->psmid)
- continue;
- list_del_init(&ap_msg->list);
- ap_dev->pendingq_count--;
- ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
- break;
- }
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (!status.queue_empty || ap_dev->queue_count <= 0)
- break;
- /* The card shouldn't forget requests but who knows. */
- atomic_sub(ap_dev->queue_count, &ap_poll_requests);
- ap_dev->queue_count = 0;
- list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
- ap_dev->requestq_count += ap_dev->pendingq_count;
- ap_dev->pendingq_count = 0;
- break;
- default:
- break;
- }
- return status;
-}
-
-/**
- * ap_sm_read(): Receive pending reply messages from an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
-
- if (!ap_dev->reply)
- return AP_WAIT_NONE;
- status = ap_sm_recv(ap_dev);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_dev->queue_count > 0) {
- ap_dev->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
- }
- ap_dev->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (ap_dev->queue_count > 0)
- return AP_WAIT_INTERRUPT;
- ap_dev->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP device
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
-
- if (!ap_dev->reply)
- return AP_WAIT_NONE;
- status = ap_sm_recv(ap_dev);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_dev->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fall through */
- default:
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_write(): Send messages from the request queue to an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_write(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- struct ap_message *ap_msg;
-
- if (ap_dev->requestq_count <= 0)
- return AP_WAIT_NONE;
- /* Start the next request on the queue. */
- ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
- status = __ap_send(ap_dev->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length, ap_msg->special);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- atomic_inc(&ap_poll_requests);
- ap_dev->queue_count++;
- if (ap_dev->queue_count == 1)
- mod_timer(&ap_dev->timeout,
- jiffies + ap_dev->drv->request_timeout);
- list_move_tail(&ap_msg->list, &ap_dev->pendingq);
- ap_dev->requestq_count--;
- ap_dev->pendingq_count++;
- if (ap_dev->queue_count < ap_dev->queue_depth) {
- ap_dev->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
- }
- /* fall through */
- case AP_RESPONSE_Q_FULL:
- ap_dev->state = AP_STATE_QUEUE_FULL;
- return AP_WAIT_INTERRUPT;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- ap_dev->state = AP_STATE_RESET_WAIT;
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_MESSAGE_TOO_BIG:
- case AP_RESPONSE_REQ_FAC_NOT_INST:
- list_del_init(&ap_msg->list);
- ap_dev->requestq_count--;
- ap_msg->rc = -EINVAL;
- ap_msg->receive(ap_dev, ap_msg, NULL);
- return AP_WAIT_AGAIN;
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_read_write(): Send and receive messages to/from an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev)
-{
- return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev));
-}
-
-/**
- * ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
- *
- * Submit the Reset command to an AP queue.
- */
-static enum ap_wait ap_sm_reset(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
-
- status = ap_reset_queue(ap_dev->qid);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- ap_dev->state = AP_STATE_RESET_WAIT;
- ap_dev->interrupt = AP_INTR_DISABLED;
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_BUSY:
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_reset_wait(): Test queue for completion of the reset operation
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
- */
-static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- unsigned long info;
-
- if (ap_dev->queue_count > 0 && ap_dev->reply)
- /* Try to read a completed message and get the status */
- status = ap_sm_recv(ap_dev);
- else
- /* Get the status with TAPQ */
- status = ap_test_queue(ap_dev->qid, &info);
-
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_using_interrupts() &&
- ap_queue_enable_interruption(ap_dev,
- ap_airq.lsi_ptr) == 0)
- ap_dev->state = AP_STATE_SETIRQ_WAIT;
- else
- ap_dev->state = (ap_dev->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
- return AP_WAIT_AGAIN;
- case AP_RESPONSE_BUSY:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
- */
-static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- unsigned long info;
-
- if (ap_dev->queue_count > 0 && ap_dev->reply)
- /* Try to read a completed message and get the status */
- status = ap_sm_recv(ap_dev);
- else
- /* Get the status with TAPQ */
- status = ap_test_queue(ap_dev->qid, &info);
-
- if (status.int_enabled == 1) {
- /* Irqs are now enabled */
- ap_dev->interrupt = AP_INTR_ENABLED;
- ap_dev->state = (ap_dev->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
- }
-
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_dev->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fallthrough */
- case AP_RESPONSE_NO_PENDING_REPLY:
- return AP_WAIT_TIMEOUT;
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/*
- * AP state machine jump table
- */
-static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
- [AP_STATE_RESET_START] = {
- [AP_EVENT_POLL] = ap_sm_reset,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_RESET_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_reset_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_SETIRQ_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_setirq_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_IDLE] = {
- [AP_EVENT_POLL] = ap_sm_write,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_WORKING] = {
- [AP_EVENT_POLL] = ap_sm_read_write,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
- },
- [AP_STATE_QUEUE_FULL] = {
- [AP_EVENT_POLL] = ap_sm_read,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
- },
- [AP_STATE_SUSPEND_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_suspend_read,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_BORKED] = {
- [AP_EVENT_POLL] = ap_sm_nop,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
-};
-
-static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev,
- enum ap_event event)
-{
- return ap_jumptable[ap_dev->state][event](ap_dev);
-}
-
-static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev,
- enum ap_event event)
-{
- enum ap_wait wait;
-
- while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN)
- ;
- return wait;
-}
-
/**
* ap_request_timeout(): Handling of request timeouts
* @data: Holds the AP device.
*
* Handles request timeouts.
*/
-static void ap_request_timeout(unsigned long data)
+void ap_request_timeout(unsigned long data)
{
- struct ap_device *ap_dev = (struct ap_device *) data;
+ struct ap_queue *aq = (struct ap_queue *) data;
if (ap_suspend_flag)
return;
- spin_lock_bh(&ap_dev->lock);
- ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT));
- spin_unlock_bh(&ap_dev->lock);
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+ spin_unlock_bh(&aq->lock);
}
/**
@@ -937,7 +394,8 @@ static void ap_interrupt_handler(struct airq_struct *airq)
*/
static void ap_tasklet_fn(unsigned long dummy)
{
- struct ap_device *ap_dev;
+ struct ap_card *ac;
+ struct ap_queue *aq;
enum ap_wait wait = AP_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can
@@ -947,14 +405,35 @@ static void ap_tasklet_fn(unsigned long dummy)
if (ap_using_interrupts())
xchg(ap_airq.lsi_ptr, 0);
- spin_lock(&ap_device_list_lock);
- list_for_each_entry(ap_dev, &ap_device_list, list) {
- spin_lock_bh(&ap_dev->lock);
- wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_card(ac) {
+ for_each_ap_queue(aq, ac) {
+ spin_lock_bh(&aq->lock);
+ wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ }
}
- spin_unlock(&ap_device_list_lock);
- ap_sm_wait(wait);
+ spin_unlock_bh(&ap_list_lock);
+
+ ap_wait(wait);
+}
+
+static int ap_pending_requests(void)
+{
+ struct ap_card *ac;
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_card(ac) {
+ for_each_ap_queue(aq, ac) {
+ if (aq->queue_count == 0)
+ continue;
+ spin_unlock_bh(&ap_list_lock);
+ return 1;
+ }
+ }
+ spin_unlock_bh(&ap_list_lock);
+ return 0;
}
/**
@@ -976,8 +455,7 @@ static int ap_poll_thread(void *data)
while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (ap_suspend_flag ||
- atomic_read(&ap_poll_requests) <= 0) {
+ if (ap_suspend_flag || !ap_pending_requests()) {
schedule();
try_to_freeze();
}
@@ -989,7 +467,8 @@ static int ap_poll_thread(void *data)
continue;
}
ap_tasklet_fn(0);
- } while (!kthread_should_stop());
+ }
+
return 0;
}
@@ -1018,207 +497,8 @@ static void ap_poll_thread_stop(void)
mutex_unlock(&ap_poll_thread_mutex);
}
-/**
- * ap_queue_message(): Queue a request to an AP device.
- * @ap_dev: The AP device to queue the message to
- * @ap_msg: The message that is to be added
- */
-void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
-{
- /* For asynchronous message handling a valid receive-callback
- * is required. */
- BUG_ON(!ap_msg->receive);
-
- spin_lock_bh(&ap_dev->lock);
- /* Queue the message. */
- list_add_tail(&ap_msg->list, &ap_dev->requestq);
- ap_dev->requestq_count++;
- ap_dev->total_request_count++;
- /* Send/receive as many request from the queue as possible. */
- ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_queue_message);
-
-/**
- * ap_cancel_message(): Cancel a crypto request.
- * @ap_dev: The AP device that has the message queued
- * @ap_msg: The message that is to be removed
- *
- * Cancel a crypto request. This is done by removing the request
- * from the device pending or request queue. Note that the
- * request stays on the AP queue. When it finishes the message
- * reply will be discarded because the psmid can't be found.
- */
-void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
-{
- struct ap_message *tmp;
-
- spin_lock_bh(&ap_dev->lock);
- if (!list_empty(&ap_msg->list)) {
- list_for_each_entry(tmp, &ap_dev->pendingq, list)
- if (tmp->psmid == ap_msg->psmid) {
- ap_dev->pendingq_count--;
- goto found;
- }
- ap_dev->requestq_count--;
-found:
- list_del_init(&ap_msg->list);
- }
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_cancel_message);
-
-/*
- * AP device related attributes.
- */
-static ssize_t ap_hwtype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
-}
-
-static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
-
-static ssize_t ap_raw_hwtype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
-}
-
-static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
-
-static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
-}
-
-static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
-static ssize_t ap_request_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc;
-
- spin_lock_bh(&ap_dev->lock);
- rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
-
-static ssize_t ap_requestq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc;
-
- spin_lock_bh(&ap_dev->lock);
- rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
-
-static ssize_t ap_pendingq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc;
-
- spin_lock_bh(&ap_dev->lock);
- rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
-
-static ssize_t ap_reset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc = 0;
-
- spin_lock_bh(&ap_dev->lock);
- switch (ap_dev->state) {
- case AP_STATE_RESET_START:
- case AP_STATE_RESET_WAIT:
- rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
- break;
- case AP_STATE_WORKING:
- case AP_STATE_QUEUE_FULL:
- rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
- break;
- default:
- rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
- }
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
-
-static ssize_t ap_interrupt_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc = 0;
-
- spin_lock_bh(&ap_dev->lock);
- if (ap_dev->state == AP_STATE_SETIRQ_WAIT)
- rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
- else if (ap_dev->interrupt == AP_INTR_ENABLED)
- rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
- else
- rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
-
-static ssize_t ap_modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
-}
-
-static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
-
-static ssize_t ap_functions_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
-}
-
-static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
-
-static struct attribute *ap_dev_attrs[] = {
- &dev_attr_hwtype.attr,
- &dev_attr_raw_hwtype.attr,
- &dev_attr_depth.attr,
- &dev_attr_request_count.attr,
- &dev_attr_requestq_count.attr,
- &dev_attr_pendingq_count.attr,
- &dev_attr_reset.attr,
- &dev_attr_interrupt.attr,
- &dev_attr_modalias.attr,
- &dev_attr_ap_functions.attr,
- NULL
-};
-static struct attribute_group ap_dev_attr_group = {
- .attrs = ap_dev_attrs
-};
+#define is_card_dev(x) ((x)->parent == ap_root_device)
+#define is_queue_dev(x) ((x)->parent != ap_root_device)
/**
* ap_bus_match()
@@ -1229,7 +509,6 @@ static struct attribute_group ap_dev_attr_group = {
*/
static int ap_bus_match(struct device *dev, struct device_driver *drv)
{
- struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
@@ -1238,10 +517,14 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* supported types of the device_driver.
*/
for (id = ap_drv->ids; id->match_flags; id++) {
- if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
- (id->dev_type != ap_dev->device_type))
- continue;
- return 1;
+ if (is_card_dev(dev) &&
+ id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
+ id->dev_type == to_ap_dev(dev)->device_type)
+ return 1;
+ if (is_queue_dev(dev) &&
+ id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
+ id->dev_type == to_ap_dev(dev)->device_type)
+ return 1;
}
return 0;
}
@@ -1273,27 +556,28 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
return retval;
}
-static int ap_dev_suspend(struct device *dev, pm_message_t state)
+static int ap_dev_suspend(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
- /* Poll on the device until all requests are finished. */
- spin_lock_bh(&ap_dev->lock);
- ap_dev->state = AP_STATE_SUSPEND_WAIT;
- while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE)
- ;
- ap_dev->state = AP_STATE_BORKED;
- spin_unlock_bh(&ap_dev->lock);
+ if (ap_dev->drv && ap_dev->drv->suspend)
+ ap_dev->drv->suspend(ap_dev);
return 0;
}
static int ap_dev_resume(struct device *dev)
{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ if (ap_dev->drv && ap_dev->drv->resume)
+ ap_dev->drv->resume(ap_dev);
return 0;
}
static void ap_bus_suspend(void)
{
+ AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n");
+
ap_suspend_flag = 1;
/*
* Disable scanning for devices, thus we do not want to scan
@@ -1303,9 +587,25 @@ static void ap_bus_suspend(void)
tasklet_disable(&ap_tasklet);
}
-static int __ap_devices_unregister(struct device *dev, void *dummy)
+static int __ap_card_devices_unregister(struct device *dev, void *dummy)
+{
+ if (is_card_dev(dev))
+ device_unregister(dev);
+ return 0;
+}
+
+static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
+{
+ if (is_queue_dev(dev))
+ device_unregister(dev);
+ return 0;
+}
+
+static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
- device_unregister(dev);
+ if (is_queue_dev(dev) &&
+ AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
+ device_unregister(dev);
return 0;
}
@@ -1313,8 +613,15 @@ static void ap_bus_resume(void)
{
int rc;
- /* Unconditionally remove all AP devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
+ AP_DBF(DBF_DEBUG, "ap_bus_resume running\n");
+
+ /* remove all queue devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_queue_devices_unregister);
+ /* remove all card devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_card_devices_unregister);
+
/* Reset thin interrupt setting */
if (ap_interrupts_available() && !ap_using_interrupts()) {
rc = register_adapter_interrupt(&ap_airq);
@@ -1356,25 +663,15 @@ static struct notifier_block ap_power_notifier = {
.notifier_call = ap_power_event,
};
+static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
+
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
- .suspend = ap_dev_suspend,
- .resume = ap_dev_resume,
+ .pm = &ap_bus_pm_ops,
};
-void ap_device_init_reply(struct ap_device *ap_dev,
- struct ap_message *reply)
-{
- ap_dev->reply = reply;
-
- spin_lock_bh(&ap_dev->lock);
- ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_device_init_reply);
-
static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
@@ -1388,61 +685,22 @@ static int ap_device_probe(struct device *dev)
return rc;
}
-/**
- * __ap_flush_queue(): Flush requests.
- * @ap_dev: Pointer to the AP device
- *
- * Flush all requests from the request/pending queue of an AP device.
- */
-static void __ap_flush_queue(struct ap_device *ap_dev)
-{
- struct ap_message *ap_msg, *next;
-
- list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
- list_del_init(&ap_msg->list);
- ap_dev->pendingq_count--;
- ap_msg->rc = -EAGAIN;
- ap_msg->receive(ap_dev, ap_msg, NULL);
- }
- list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
- list_del_init(&ap_msg->list);
- ap_dev->requestq_count--;
- ap_msg->rc = -EAGAIN;
- ap_msg->receive(ap_dev, ap_msg, NULL);
- }
-}
-
-void ap_flush_queue(struct ap_device *ap_dev)
-{
- spin_lock_bh(&ap_dev->lock);
- __ap_flush_queue(ap_dev);
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_flush_queue);
-
static int ap_device_remove(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
- ap_flush_queue(ap_dev);
- del_timer_sync(&ap_dev->timeout);
- spin_lock_bh(&ap_device_list_lock);
- list_del_init(&ap_dev->list);
- spin_unlock_bh(&ap_device_list_lock);
+ spin_lock_bh(&ap_list_lock);
+ if (is_card_dev(dev))
+ list_del_init(&to_ap_card(dev)->list);
+ else
+ list_del_init(&to_ap_queue(dev)->list);
+ spin_unlock_bh(&ap_list_lock);
if (ap_drv->remove)
ap_drv->remove(ap_dev);
- spin_lock_bh(&ap_dev->lock);
- atomic_sub(ap_dev->queue_count, &ap_poll_requests);
- spin_unlock_bh(&ap_dev->lock);
return 0;
}
-static void ap_device_release(struct device *dev)
-{
- kfree(to_ap_dev(dev));
-}
-
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
@@ -1485,18 +743,30 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
}
-static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
+static ssize_t ap_domain_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int domain;
+
+ if (sscanf(buf, "%i\n", &domain) != 1 ||
+ domain < 0 || domain > ap_max_domain_id)
+ return -EINVAL;
+ spin_lock_bh(&ap_domain_lock);
+ ap_domain_index = domain;
+ spin_unlock_bh(&ap_domain_lock);
+
+ AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain);
+
+ return count;
+}
+
+static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
return snprintf(buf, PAGE_SIZE, "not supported\n");
- if (!test_facility(76))
- /* format 0 - 16 bit domain field */
- return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
- ap_configuration->adm[0],
- ap_configuration->adm[1]);
- /* format 1 - 256 bit domain field */
+
return snprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_configuration->adm[0], ap_configuration->adm[1],
@@ -1508,6 +778,22 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
static BUS_ATTR(ap_control_domain_mask, 0444,
ap_control_domain_mask_show, NULL);
+static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
+{
+ if (!ap_configuration) /* QCI not supported */
+ return snprintf(buf, PAGE_SIZE, "not supported\n");
+
+ return snprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->aqm[0], ap_configuration->aqm[1],
+ ap_configuration->aqm[2], ap_configuration->aqm[3],
+ ap_configuration->aqm[4], ap_configuration->aqm[5],
+ ap_configuration->aqm[6], ap_configuration->aqm[7]);
+}
+
+static BUS_ATTR(ap_usage_domain_mask, 0444,
+ ap_usage_domain_mask_show, NULL);
+
static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
@@ -1603,6 +889,7 @@ static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_ap_control_domain_mask,
+ &bus_attr_ap_usage_domain_mask,
&bus_attr_config_time,
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
@@ -1627,9 +914,12 @@ static int ap_select_domain(void)
* the "domain=" parameter or the domain with the maximum number
* of devices.
*/
- if (ap_domain_index >= 0)
+ spin_lock_bh(&ap_domain_lock);
+ if (ap_domain_index >= 0) {
/* Domain has already been selected. */
+ spin_unlock_bh(&ap_domain_lock);
return 0;
+ }
best_domain = -1;
max_count = 0;
for (i = 0; i < AP_DOMAINS; i++) {
@@ -1651,109 +941,171 @@ static int ap_select_domain(void)
}
if (best_domain >= 0){
ap_domain_index = best_domain;
+ spin_unlock_bh(&ap_domain_lock);
return 0;
}
+ spin_unlock_bh(&ap_domain_lock);
return -ENODEV;
}
-/**
- * __ap_scan_bus(): Scan the AP bus.
- * @dev: Pointer to device
- * @data: Pointer to data
- *
- * Scan the AP bus for new devices.
+/*
+ * helper function to be used with bus_find_dev
+ * matches for the card device with the given id
*/
-static int __ap_scan_bus(struct device *dev, void *data)
+static int __match_card_device_with_id(struct device *dev, void *data)
{
- return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
+ return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
}
+/* helper function to be used with bus_find_dev
+ * matches for the queue device with a given qid
+ */
+static int __match_queue_device_with_qid(struct device *dev, void *data)
+{
+ return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
+}
+
+/**
+ * ap_scan_bus(): Scan the AP bus for new devices
+ * Runs periodically, workqueue timer (ap_config_time)
+ */
static void ap_scan_bus(struct work_struct *unused)
{
- struct ap_device *ap_dev;
+ struct ap_queue *aq;
+ struct ap_card *ac;
struct device *dev;
ap_qid_t qid;
- int queue_depth = 0, device_type = 0;
- unsigned int device_functions = 0;
- int rc, i, borked;
+ int depth = 0, type = 0;
+ unsigned int functions = 0;
+ int rc, id, dom, borked, domains;
+
+ AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
ap_query_configuration();
if (ap_select_domain() != 0)
goto out;
- for (i = 0; i < AP_DEVICES; i++) {
- qid = AP_MKQID(i, ap_domain_index);
+ for (id = 0; id < AP_DEVICES; id++) {
+ /* check if device is registered */
dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(unsigned long)qid,
- __ap_scan_bus);
- rc = ap_query_queue(qid, &queue_depth, &device_type,
- &device_functions);
- if (dev) {
- ap_dev = to_ap_dev(dev);
- spin_lock_bh(&ap_dev->lock);
- if (rc == -ENODEV)
- ap_dev->state = AP_STATE_BORKED;
- borked = ap_dev->state == AP_STATE_BORKED;
- spin_unlock_bh(&ap_dev->lock);
- if (borked) /* Remove broken device */
+ (void *)(long) id,
+ __match_card_device_with_id);
+ ac = dev ? to_ap_card(dev) : NULL;
+ if (!ap_test_config_card_id(id)) {
+ if (dev) {
+ /* Card device has been removed from
+ * configuration, remove the belonging
+ * queue devices.
+ */
+ bus_for_each_dev(&ap_bus_type, NULL,
+ (void *)(long) id,
+ __ap_queue_devices_with_id_unregister);
+ /* now remove the card device */
device_unregister(dev);
- put_device(dev);
- if (!borked)
- continue;
- }
- if (rc)
- continue;
- ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
- if (!ap_dev)
- break;
- ap_dev->qid = qid;
- ap_dev->state = AP_STATE_RESET_START;
- ap_dev->interrupt = AP_INTR_DISABLED;
- ap_dev->queue_depth = queue_depth;
- ap_dev->raw_hwtype = device_type;
- ap_dev->device_type = device_type;
- ap_dev->functions = device_functions;
- spin_lock_init(&ap_dev->lock);
- INIT_LIST_HEAD(&ap_dev->pendingq);
- INIT_LIST_HEAD(&ap_dev->requestq);
- INIT_LIST_HEAD(&ap_dev->list);
- setup_timer(&ap_dev->timeout, ap_request_timeout,
- (unsigned long) ap_dev);
-
- ap_dev->device.bus = &ap_bus_type;
- ap_dev->device.parent = ap_root_device;
- rc = dev_set_name(&ap_dev->device, "card%02x",
- AP_QID_DEVICE(ap_dev->qid));
- if (rc) {
- kfree(ap_dev);
- continue;
- }
- /* Add to list of devices */
- spin_lock_bh(&ap_device_list_lock);
- list_add(&ap_dev->list, &ap_device_list);
- spin_unlock_bh(&ap_device_list_lock);
- /* Start with a device reset */
- spin_lock_bh(&ap_dev->lock);
- ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
- /* Register device */
- ap_dev->device.release = ap_device_release;
- rc = device_register(&ap_dev->device);
- if (rc) {
- spin_lock_bh(&ap_dev->lock);
- list_del_init(&ap_dev->list);
- spin_unlock_bh(&ap_dev->lock);
- put_device(&ap_dev->device);
+ put_device(dev);
+ }
continue;
}
- /* Add device attributes. */
- rc = sysfs_create_group(&ap_dev->device.kobj,
- &ap_dev_attr_group);
- if (rc) {
- device_unregister(&ap_dev->device);
- continue;
+ /* According to the configuration there should be a card
+ * device, so check if there is at least one valid queue
+ * and maybe create queue devices and the card device.
+ */
+ domains = 0;
+ for (dom = 0; dom < AP_DOMAINS; dom++) {
+ qid = AP_MKQID(id, dom);
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) qid,
+ __match_queue_device_with_qid);
+ aq = dev ? to_ap_queue(dev) : NULL;
+ if (!ap_test_config_domain(dom)) {
+ if (dev) {
+ /* Queue device exists but has been
+ * removed from configuration.
+ */
+ device_unregister(dev);
+ put_device(dev);
+ }
+ continue;
+ }
+ rc = ap_query_queue(qid, &depth, &type, &functions);
+ if (dev) {
+ spin_lock_bh(&aq->lock);
+ if (rc == -ENODEV ||
+ /* adapter reconfiguration */
+ (ac && ac->functions != functions))
+ aq->state = AP_STATE_BORKED;
+ borked = aq->state == AP_STATE_BORKED;
+ spin_unlock_bh(&aq->lock);
+ if (borked) /* Remove broken device */
+ device_unregister(dev);
+ put_device(dev);
+ if (!borked) {
+ domains++;
+ continue;
+ }
+ }
+ if (rc)
+ continue;
+ /* new queue device needed */
+ if (!ac) {
+ /* but first create the card device */
+ ac = ap_card_create(id, depth,
+ type, functions);
+ if (!ac)
+ continue;
+ ac->ap_dev.device.bus = &ap_bus_type;
+ ac->ap_dev.device.parent = ap_root_device;
+ dev_set_name(&ac->ap_dev.device,
+ "card%02x", id);
+ /* Register card with AP bus */
+ rc = device_register(&ac->ap_dev.device);
+ if (rc) {
+ put_device(&ac->ap_dev.device);
+ ac = NULL;
+ break;
+ }
+ /* get it and thus adjust reference counter */
+ get_device(&ac->ap_dev.device);
+ /* Add card device to card list */
+ spin_lock_bh(&ap_list_lock);
+ list_add(&ac->list, &ap_card_list);
+ spin_unlock_bh(&ap_list_lock);
+ }
+ /* now create the new queue device */
+ aq = ap_queue_create(qid, type);
+ if (!aq)
+ continue;
+ aq->card = ac;
+ aq->ap_dev.device.bus = &ap_bus_type;
+ aq->ap_dev.device.parent = &ac->ap_dev.device;
+ dev_set_name(&aq->ap_dev.device,
+ "%02x.%04x", id, dom);
+ /* Add queue device to card queue list */
+ spin_lock_bh(&ap_list_lock);
+ list_add(&aq->list, &ac->queues);
+ spin_unlock_bh(&ap_list_lock);
+ /* Start with a device reset */
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ /* Register device */
+ rc = device_register(&aq->ap_dev.device);
+ if (rc) {
+ spin_lock_bh(&ap_list_lock);
+ list_del_init(&aq->list);
+ spin_unlock_bh(&ap_list_lock);
+ put_device(&aq->ap_dev.device);
+ continue;
+ }
+ domains++;
+ } /* end domain loop */
+ if (ac) {
+ /* remove card dev if there are no queue devices */
+ if (!domains)
+ device_unregister(&ac->ap_dev.device);
+ put_device(&ac->ap_dev.device);
}
- }
+ } /* end device loop */
out:
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
@@ -1772,7 +1124,7 @@ static void ap_reset_domain(void)
if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
return;
for (i = 0; i < AP_DEVICES; i++)
- ap_reset_queue(AP_MKQID(i, ap_domain_index));
+ ap_rapq(AP_MKQID(i, ap_domain_index));
}
static void ap_reset_all(void)
@@ -1785,7 +1137,7 @@ static void ap_reset_all(void)
for (j = 0; j < AP_DEVICES; j++) {
if (!ap_test_config_card_id(j))
continue;
- ap_reset_queue(AP_MKQID(j, i));
+ ap_rapq(AP_MKQID(j, i));
}
}
}
@@ -1794,6 +1146,23 @@ static struct reset_call ap_reset_call = {
.fn = ap_reset_all,
};
+int __init ap_debug_init(void)
+{
+ ap_dbf_root = debugfs_create_dir("ap", NULL);
+ ap_dbf_info = debug_register("ap", 1, 1,
+ DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ debug_register_view(ap_dbf_info, &debug_sprintf_view);
+ debug_set_level(ap_dbf_info, DBF_ERR);
+
+ return 0;
+}
+
+void ap_debug_exit(void)
+{
+ debugfs_remove(ap_dbf_root);
+ debug_unregister(ap_dbf_info);
+}
+
/**
* ap_module_init(): The module initialization code.
*
@@ -1804,6 +1173,10 @@ int __init ap_module_init(void)
int max_domain_id;
int rc, i;
+ rc = ap_debug_init();
+ if (rc)
+ return rc;
+
if (ap_instructions_available() != 0) {
pr_warn("The hardware system does not support AP instructions\n");
return -ENODEV;
@@ -1913,7 +1286,15 @@ void ap_module_exit(void)
del_timer_sync(&ap_config_timer);
hrtimer_cancel(&ap_poll_timer);
tasklet_kill(&ap_tasklet);
- bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
+
+ /* first remove queue devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_queue_devices_unregister);
+ /* now remove the card devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_card_devices_unregister);
+
+ /* remove bus attributes */
for (i = 0; ap_bus_attrs[i]; i++)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
unregister_pm_notifier(&ap_power_notifier);
@@ -1923,6 +1304,8 @@ void ap_module_exit(void)
unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
+
+ ap_debug_exit();
}
module_init(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d7fdf5c024d7..4dc7c88fb054 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -27,7 +27,6 @@
#define _AP_BUS_H_
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
#include <linux/types.h>
#define AP_DEVICES 64 /* Number of AP devices. */
@@ -38,14 +37,17 @@
extern int ap_domain_index;
+extern spinlock_t ap_list_lock;
+extern struct list_head ap_card_list;
+
/**
* The ap_qid_t identifier of an ap queue. It contains a
- * 6 bit device index and a 4 bit queue index (domain).
+ * 6 bit card index and a 4 bit queue index (domain).
*/
typedef unsigned int ap_qid_t;
-#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255))
-#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
+#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
+#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
#define AP_QID_QUEUE(_qid) ((_qid) & 255)
/**
@@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t;
* @queue_full: Is 1 if the queue is full
* @pad: A 4 bit pad
* @int_enabled: Shows if interrupts are enabled for the AP
- * @response_conde: Holds the 8 bit response code
+ * @response_code: Holds the 8 bit response code
* @pad2: A 16 bit pad
*
* The ap queue status word is returned by all three AP functions
@@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
+#define AP_DEVICE_TYPE_CEX6 12
/*
* Known function facilities
@@ -166,7 +169,8 @@ struct ap_driver {
int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *);
- int request_timeout; /* request timeout in jiffies */
+ void (*suspend)(struct ap_device *);
+ void (*resume)(struct ap_device *);
};
#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -174,38 +178,51 @@ struct ap_driver {
int ap_driver_register(struct ap_driver *, struct module *, char *);
void ap_driver_unregister(struct ap_driver *);
-typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev);
-
struct ap_device {
struct device device;
struct ap_driver *drv; /* Pointer to AP device driver. */
- spinlock_t lock; /* Per device lock. */
- struct list_head list; /* private list of all AP devices. */
+ int device_type; /* AP device type. */
+};
- enum ap_state state; /* State of the AP device. */
+#define to_ap_dev(x) container_of((x), struct ap_device, device)
- ap_qid_t qid; /* AP queue id. */
- int queue_depth; /* AP queue depth.*/
- int device_type; /* AP device type. */
+struct ap_card {
+ struct ap_device ap_dev;
+ struct list_head list; /* Private list of AP cards. */
+ struct list_head queues; /* List of assoc. AP queues */
+ void *private; /* ap driver private pointer. */
int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */
- struct timer_list timeout; /* Timer for request timeouts. */
+ int queue_depth; /* AP queue depth.*/
+ int id; /* AP card number. */
+ atomic_t total_request_count; /* # requests ever for this AP device.*/
+};
+
+#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+struct ap_queue {
+ struct ap_device ap_dev;
+ struct list_head list; /* Private list of AP queues. */
+ struct ap_card *card; /* Ptr to assoc. AP card. */
+ spinlock_t lock; /* Per device lock. */
+ void *private; /* ap driver private pointer. */
+ ap_qid_t qid; /* AP queue id. */
int interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */
-
- struct list_head pendingq; /* List of message sent to AP queue. */
+ enum ap_state state; /* State of the AP device. */
int pendingq_count; /* # requests on pendingq list. */
- struct list_head requestq; /* List of message yet to be sent. */
int requestq_count; /* # requests on requestq list. */
- int total_request_count; /* # requests ever for this AP device. */
-
+ int total_request_count; /* # requests ever for this AP device.*/
+ int request_timeout; /* Request timout in jiffies. */
+ struct timer_list timeout; /* Timer for request timeouts. */
+ struct list_head pendingq; /* List of message sent to AP queue. */
+ struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */
-
- void *private; /* ap driver private pointer. */
};
-#define to_ap_dev(x) container_of((x), struct ap_device, device)
+#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
+
+typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
struct ap_message {
struct list_head list; /* Request queueing. */
@@ -217,7 +234,7 @@ struct ap_message {
void *private; /* ap driver private pointer. */
unsigned int special:1; /* Used for special commands. */
/* receive is called from tasklet context */
- void (*receive)(struct ap_device *, struct ap_message *,
+ void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *);
};
@@ -232,10 +249,6 @@ struct ap_config_info {
unsigned char reserved4[16];
} __packed;
-#define AP_DEVICE(dt) \
- .dev_type=(dt), \
- .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
-
/**
* ap_init_message() - Initialize ap_message.
* Initialize a message before using. Otherwise this might result in
@@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg)
ap_msg->receive = NULL;
}
+#define for_each_ap_card(_ac) \
+ list_for_each_entry(_ac, &ap_card_list, list)
+
+#define for_each_ap_queue(_aq, _ac) \
+ list_for_each_entry(_aq, &(_ac)->queues, list)
+
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
@@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg)
int ap_send(ap_qid_t, unsigned long long, void *, size_t);
int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
-void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
-void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
-void ap_flush_queue(struct ap_device *ap_dev);
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_flush_queue(struct ap_queue *aq);
+
+void *ap_airq_ptr(void);
+void ap_wait(enum ap_wait wait);
+void ap_request_timeout(unsigned long data);
void ap_bus_force_rescan(void);
-void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg);
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_remove(struct ap_queue *aq);
+void ap_queue_suspend(struct ap_device *ap_dev);
+void ap_queue_resume(struct ap_device *ap_dev);
+
+struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
+ unsigned int device_functions);
int ap_module_init(void);
void ap_module_exit(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
new file mode 100644
index 000000000000..0110d44172a3
--- /dev/null
+++ b/drivers/s390/crypto/ap_card.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, card related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+#include "ap_asm.h"
+
+/*
+ * AP card related attributes.
+ */
+static ssize_t ap_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+}
+
+static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+
+static ssize_t ap_raw_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+}
+
+static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
+
+static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+}
+
+static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
+
+static ssize_t ap_functions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+}
+
+static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
+
+static ssize_t ap_request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ unsigned int req_cnt;
+
+ req_cnt = 0;
+ spin_lock_bh(&ap_list_lock);
+ req_cnt = atomic_read(&ac->total_request_count);
+ spin_unlock_bh(&ap_list_lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ap_queue *aq;
+ unsigned int reqq_cnt;
+
+ reqq_cnt = 0;
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_queue(aq, ac)
+ reqq_cnt += aq->requestq_count;
+ spin_unlock_bh(&ap_list_lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+
+static ssize_t ap_pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ap_queue *aq;
+ unsigned int penq_cnt;
+
+ penq_cnt = 0;
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_queue(aq, ac)
+ penq_cnt += aq->pendingq_count;
+ spin_unlock_bh(&ap_list_lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+
+static ssize_t ap_modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+}
+
+static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
+
+static struct attribute *ap_card_dev_attrs[] = {
+ &dev_attr_hwtype.attr,
+ &dev_attr_raw_hwtype.attr,
+ &dev_attr_depth.attr,
+ &dev_attr_ap_functions.attr,
+ &dev_attr_request_count.attr,
+ &dev_attr_requestq_count.attr,
+ &dev_attr_pendingq_count.attr,
+ &dev_attr_modalias.attr,
+ NULL
+};
+
+static struct attribute_group ap_card_dev_attr_group = {
+ .attrs = ap_card_dev_attrs
+};
+
+static const struct attribute_group *ap_card_dev_attr_groups[] = {
+ &ap_card_dev_attr_group,
+ NULL
+};
+
+struct device_type ap_card_type = {
+ .name = "ap_card",
+ .groups = ap_card_dev_attr_groups,
+};
+
+static void ap_card_device_release(struct device *dev)
+{
+ kfree(to_ap_card(dev));
+}
+
+struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
+ unsigned int functions)
+{
+ struct ap_card *ac;
+
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return NULL;
+ INIT_LIST_HEAD(&ac->queues);
+ ac->ap_dev.device.release = ap_card_device_release;
+ ac->ap_dev.device.type = &ap_card_type;
+ ac->ap_dev.device_type = device_type;
+ /* CEX6 toleration: map to CEX5 */
+ if (device_type == AP_DEVICE_TYPE_CEX6)
+ ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
+ ac->raw_hwtype = device_type;
+ ac->queue_depth = queue_depth;
+ ac->functions = functions;
+ ac->id = id;
+ return ac;
+}
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
new file mode 100644
index 000000000000..78dbff842dae
--- /dev/null
+++ b/drivers/s390/crypto/ap_debug.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Harald Freudenberger <freude@de.ibm.com>
+ */
+#ifndef AP_DEBUG_H
+#define AP_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 5 /* informational */
+#define DBF_DEBUG 6 /* for debugging only */
+
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define AP_DBF(...) \
+ debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+
+extern debug_info_t *ap_dbf_info;
+
+int ap_debug_init(void);
+void ap_debug_exit(void);
+
+#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
new file mode 100644
index 000000000000..b58a917dc510
--- /dev/null
+++ b/drivers/s390/crypto/ap_queue.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, queue related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+#include "ap_asm.h"
+
+/**
+ * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * @qid: The AP queue number
+ * @ind: the notification indicator byte
+ *
+ * Enables interruption on AP queue via ap_aqic(). Based on the return
+ * value it waits a while and tests the AP queue if interrupts
+ * have been switched on using ap_test_queue().
+ */
+static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+{
+ struct ap_queue_status status;
+
+ status = ap_aqic(aq->qid, ind);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ return 0;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
+ AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid));
+ return -EOPNOTSUPP;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ default:
+ return -EBUSY;
+ }
+}
+
+/**
+ * __ap_send(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ * @special: Special Bit
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+ unsigned int special)
+{
+ if (special == 1)
+ qid |= 0x400000UL;
+ return ap_nqap(qid, psmid, msg, length);
+}
+
+int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ status = __ap_send(qid, psmid, msg, length, 0);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_Q_FULL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return -EBUSY;
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return -EINVAL;
+ default: /* Device is gone. */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_send);
+
+int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ if (msg == NULL)
+ return -EINVAL;
+ status = ap_dqap(qid, psmid, msg, length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (status.queue_empty)
+ return -ENOENT;
+ return -EBUSY;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return -EBUSY;
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_recv);
+
+/* State machine definitions and helpers */
+
+static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+{
+ return AP_WAIT_NONE;
+}
+
+/**
+ * ap_sm_recv(): Receive pending reply messages from an AP queue but do
+ * not change the state of the device.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+
+ status = ap_dqap(aq->qid, &aq->reply->psmid,
+ aq->reply->message, aq->reply->length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ aq->queue_count--;
+ if (aq->queue_count > 0)
+ mod_timer(&aq->timeout,
+ jiffies + aq->request_timeout);
+ list_for_each_entry(ap_msg, &aq->pendingq, list) {
+ if (ap_msg->psmid != aq->reply->psmid)
+ continue;
+ list_del_init(&ap_msg->list);
+ aq->pendingq_count--;
+ ap_msg->receive(aq, ap_msg, aq->reply);
+ break;
+ }
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (!status.queue_empty || aq->queue_count <= 0)
+ break;
+ /* The card shouldn't forget requests but who knows. */
+ aq->queue_count = 0;
+ list_splice_init(&aq->pendingq, &aq->requestq);
+ aq->requestq_count += aq->pendingq_count;
+ aq->pendingq_count = 0;
+ break;
+ default:
+ break;
+ }
+ return status;
+}
+
+/**
+ * ap_sm_read(): Receive pending reply messages from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (!aq->reply)
+ return AP_WAIT_NONE;
+ status = ap_sm_recv(aq);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0) {
+ aq->state = AP_STATE_WORKING;
+ return AP_WAIT_AGAIN;
+ }
+ aq->state = AP_STATE_IDLE;
+ return AP_WAIT_NONE;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (aq->queue_count > 0)
+ return AP_WAIT_INTERRUPT;
+ aq->state = AP_STATE_IDLE;
+ return AP_WAIT_NONE;
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
+ * without changing the device state in between. In suspend mode we don't
+ * allow sending new requests, therefore just fetch pending replies.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
+ */
+static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (!aq->reply)
+ return AP_WAIT_NONE;
+ status = ap_sm_recv(aq);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0)
+ return AP_WAIT_AGAIN;
+ /* fall through */
+ default:
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_write(): Send messages from the request queue to an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_write(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+
+ if (aq->requestq_count <= 0)
+ return AP_WAIT_NONE;
+ /* Start the next request on the queue. */
+ ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+ status = __ap_send(aq->qid, ap_msg->psmid,
+ ap_msg->message, ap_msg->length, ap_msg->special);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ aq->queue_count++;
+ if (aq->queue_count == 1)
+ mod_timer(&aq->timeout, jiffies + aq->request_timeout);
+ list_move_tail(&ap_msg->list, &aq->pendingq);
+ aq->requestq_count--;
+ aq->pendingq_count++;
+ if (aq->queue_count < aq->card->queue_depth) {
+ aq->state = AP_STATE_WORKING;
+ return AP_WAIT_AGAIN;
+ }
+ /* fall through */
+ case AP_RESPONSE_Q_FULL:
+ aq->state = AP_STATE_QUEUE_FULL;
+ return AP_WAIT_INTERRUPT;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ aq->state = AP_STATE_RESET_WAIT;
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ list_del_init(&ap_msg->list);
+ aq->requestq_count--;
+ ap_msg->rc = -EINVAL;
+ ap_msg->receive(aq, ap_msg, NULL);
+ return AP_WAIT_AGAIN;
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_read_write(): Send and receive messages to/from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+{
+ return min(ap_sm_read(aq), ap_sm_write(aq));
+}
+
+/**
+ * ap_sm_reset(): Reset an AP queue.
+ * @qid: The AP queue number
+ *
+ * Submit the Reset command to an AP queue.
+ */
+static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ status = ap_rapq(aq->qid);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ aq->state = AP_STATE_RESET_WAIT;
+ aq->interrupt = AP_INTR_DISABLED;
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_BUSY:
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_reset_wait(): Test queue for completion of the reset operation
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ void *lsi_ptr;
+
+ if (aq->queue_count > 0 && aq->reply)
+ /* Try to read a completed message and get the status */
+ status = ap_sm_recv(aq);
+ else
+ /* Get the status with TAPQ */
+ status = ap_tapq(aq->qid, NULL);
+
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ lsi_ptr = ap_airq_ptr();
+ if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+ aq->state = AP_STATE_SETIRQ_WAIT;
+ else
+ aq->state = (aq->queue_count > 0) ?
+ AP_STATE_WORKING : AP_STATE_IDLE;
+ return AP_WAIT_AGAIN;
+ case AP_RESPONSE_BUSY:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (aq->queue_count > 0 && aq->reply)
+ /* Try to read a completed message and get the status */
+ status = ap_sm_recv(aq);
+ else
+ /* Get the status with TAPQ */
+ status = ap_tapq(aq->qid, NULL);
+
+ if (status.int_enabled == 1) {
+ /* Irqs are now enabled */
+ aq->interrupt = AP_INTR_ENABLED;
+ aq->state = (aq->queue_count > 0) ?
+ AP_STATE_WORKING : AP_STATE_IDLE;
+ }
+
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0)
+ return AP_WAIT_AGAIN;
+ /* fallthrough */
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ return AP_WAIT_TIMEOUT;
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/*
+ * AP state machine jump table
+ */
+static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
+ [AP_STATE_RESET_START] = {
+ [AP_EVENT_POLL] = ap_sm_reset,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_RESET_WAIT] = {
+ [AP_EVENT_POLL] = ap_sm_reset_wait,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_SETIRQ_WAIT] = {
+ [AP_EVENT_POLL] = ap_sm_setirq_wait,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_IDLE] = {
+ [AP_EVENT_POLL] = ap_sm_write,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_WORKING] = {
+ [AP_EVENT_POLL] = ap_sm_read_write,
+ [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ },
+ [AP_STATE_QUEUE_FULL] = {
+ [AP_EVENT_POLL] = ap_sm_read,
+ [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ },
+ [AP_STATE_SUSPEND_WAIT] = {
+ [AP_EVENT_POLL] = ap_sm_suspend_read,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_BORKED] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+};
+
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+{
+ return ap_jumptable[aq->state][event](aq);
+}
+
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+{
+ enum ap_wait wait;
+
+ while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+ ;
+ return wait;
+}
+
+/*
+ * Power management for queue devices
+ */
+void ap_queue_suspend(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+
+ /* Poll on the device until all requests are finished. */
+ spin_lock_bh(&aq->lock);
+ aq->state = AP_STATE_SUSPEND_WAIT;
+ while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
+ ;
+ aq->state = AP_STATE_BORKED;
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_suspend);
+
+void ap_queue_resume(struct ap_device *ap_dev)
+{
+}
+EXPORT_SYMBOL(ap_queue_resume);
+
+/*
+ * AP queue related attributes.
+ */
+static ssize_t ap_request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int req_cnt;
+
+ spin_lock_bh(&aq->lock);
+ req_cnt = aq->total_request_count;
+ spin_unlock_bh(&aq->lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int reqq_cnt = 0;
+
+ spin_lock_bh(&aq->lock);
+ reqq_cnt = aq->requestq_count;
+ spin_unlock_bh(&aq->lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+
+static ssize_t ap_pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int penq_cnt = 0;
+
+ spin_lock_bh(&aq->lock);
+ penq_cnt = aq->pendingq_count;
+ spin_unlock_bh(&aq->lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+
+static ssize_t ap_reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ switch (aq->state) {
+ case AP_STATE_RESET_START:
+ case AP_STATE_RESET_WAIT:
+ rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+ break;
+ case AP_STATE_WORKING:
+ case AP_STATE_QUEUE_FULL:
+ rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+ break;
+ default:
+ rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+ }
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
+
+static ssize_t ap_interrupt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ if (aq->state == AP_STATE_SETIRQ_WAIT)
+ rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+ else if (aq->interrupt == AP_INTR_ENABLED)
+ rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+ else
+ rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
+
+static struct attribute *ap_queue_dev_attrs[] = {
+ &dev_attr_request_count.attr,
+ &dev_attr_requestq_count.attr,
+ &dev_attr_pendingq_count.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_interrupt.attr,
+ NULL
+};
+
+static struct attribute_group ap_queue_dev_attr_group = {
+ .attrs = ap_queue_dev_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_attr_groups[] = {
+ &ap_queue_dev_attr_group,
+ NULL
+};
+
+struct device_type ap_queue_type = {
+ .name = "ap_queue",
+ .groups = ap_queue_dev_attr_groups,
+};
+
+static void ap_queue_device_release(struct device *dev)
+{
+ kfree(to_ap_queue(dev));
+}
+
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
+{
+ struct ap_queue *aq;
+
+ aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+ if (!aq)
+ return NULL;
+ aq->ap_dev.device.release = ap_queue_device_release;
+ aq->ap_dev.device.type = &ap_queue_type;
+ aq->ap_dev.device_type = device_type;
+ /* CEX6 toleration: map to CEX5 */
+ if (device_type == AP_DEVICE_TYPE_CEX6)
+ aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
+ aq->qid = qid;
+ aq->state = AP_STATE_RESET_START;
+ aq->interrupt = AP_INTR_DISABLED;
+ spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->pendingq);
+ INIT_LIST_HEAD(&aq->requestq);
+ setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
+
+ return aq;
+}
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
+{
+ aq->reply = reply;
+
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_init_reply);
+
+/**
+ * ap_queue_message(): Queue a request to an AP device.
+ * @aq: The AP device to queue the message to
+ * @ap_msg: The message that is to be added
+ */
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+ /* For asynchronous message handling a valid receive-callback
+ * is required.
+ */
+ BUG_ON(!ap_msg->receive);
+
+ spin_lock_bh(&aq->lock);
+ /* Queue the message. */
+ list_add_tail(&ap_msg->list, &aq->requestq);
+ aq->requestq_count++;
+ aq->total_request_count++;
+ atomic_inc(&aq->card->total_request_count);
+ /* Send/receive as many request from the queue as possible. */
+ ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_message);
+
+/**
+ * ap_cancel_message(): Cancel a crypto request.
+ * @aq: The AP device that has the message queued
+ * @ap_msg: The message that is to be removed
+ *
+ * Cancel a crypto request. This is done by removing the request
+ * from the device pending or request queue. Note that the
+ * request stays on the AP queue. When it finishes the message
+ * reply will be discarded because the psmid can't be found.
+ */
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+ struct ap_message *tmp;
+
+ spin_lock_bh(&aq->lock);
+ if (!list_empty(&ap_msg->list)) {
+ list_for_each_entry(tmp, &aq->pendingq, list)
+ if (tmp->psmid == ap_msg->psmid) {
+ aq->pendingq_count--;
+ goto found;
+ }
+ aq->requestq_count--;
+found:
+ list_del_init(&ap_msg->list);
+ }
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_cancel_message);
+
+/**
+ * __ap_flush_queue(): Flush requests.
+ * @aq: Pointer to the AP queue
+ *
+ * Flush all requests from the request/pending queue of an AP device.
+ */
+static void __ap_flush_queue(struct ap_queue *aq)
+{
+ struct ap_message *ap_msg, *next;
+
+ list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
+ list_del_init(&ap_msg->list);
+ aq->pendingq_count--;
+ ap_msg->rc = -EAGAIN;
+ ap_msg->receive(aq, ap_msg, NULL);
+ }
+ list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
+ list_del_init(&ap_msg->list);
+ aq->requestq_count--;
+ ap_msg->rc = -EAGAIN;
+ ap_msg->receive(aq, ap_msg, NULL);
+ }
+}
+
+void ap_flush_queue(struct ap_queue *aq)
+{
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_flush_queue);
+
+void ap_queue_remove(struct ap_queue *aq)
+{
+ ap_flush_queue(aq);
+ del_timer_sync(&aq->timeout);
+}
+EXPORT_SYMBOL(ap_queue_remove);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 5d3d04c040c2..854a6e58dfea 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -41,10 +41,14 @@
#include <linux/debugfs.h>
#include <asm/debug.h>
-#include "zcrypt_debug.h"
+#define CREATE_TRACE_POINTS
+#include <asm/trace/zcrypt.h>
+
#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
/*
* Module description.
@@ -54,76 +58,31 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
+/*
+ * zcrypt tracepoint functions
+ */
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
+
static int zcrypt_hwrng_seed = 1;
module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
-static DEFINE_SPINLOCK(zcrypt_device_lock);
-static LIST_HEAD(zcrypt_device_list);
-static int zcrypt_device_count = 0;
+DEFINE_SPINLOCK(zcrypt_list_lock);
+LIST_HEAD(zcrypt_card_list);
+int zcrypt_device_count;
+
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
EXPORT_SYMBOL(zcrypt_rescan_req);
-static int zcrypt_rng_device_add(void);
-static void zcrypt_rng_device_remove(void);
-
-static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
static LIST_HEAD(zcrypt_ops_list);
-static debug_info_t *zcrypt_dbf_common;
-static debug_info_t *zcrypt_dbf_devices;
-static struct dentry *debugfs_root;
-
-/*
- * Device attributes common for all crypto devices.
- */
-static ssize_t zcrypt_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zcrypt_device *zdev = to_ap_dev(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
-}
-
-static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
-
-static ssize_t zcrypt_online_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zcrypt_device *zdev = to_ap_dev(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
-}
-
-static ssize_t zcrypt_online_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct zcrypt_device *zdev = to_ap_dev(dev)->private;
- int online;
-
- if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
- return -EINVAL;
- zdev->online = online;
- ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
- zdev->online);
- if (!online)
- ap_flush_queue(zdev->ap_dev);
- return count;
-}
-
-static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
-
-static struct attribute * zcrypt_device_attrs[] = {
- &dev_attr_type.attr,
- &dev_attr_online.attr,
- NULL,
-};
-
-static struct attribute_group zcrypt_device_attr_group = {
- .attrs = zcrypt_device_attrs,
-};
+/* Zcrypt related debug feature stuff. */
+static struct dentry *zcrypt_dbf_root;
+debug_info_t *zcrypt_dbf_info;
/**
* Process a rescan of the transport layer.
@@ -136,242 +95,34 @@ static inline int zcrypt_process_rescan(void)
atomic_set(&zcrypt_rescan_req, 0);
atomic_inc(&zcrypt_rescan_count);
ap_bus_force_rescan();
- ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
- atomic_inc_return(&zcrypt_rescan_count));
+ ZCRYPT_DBF(DBF_INFO, "rescan count=%07d",
+ atomic_inc_return(&zcrypt_rescan_count));
return 1;
}
return 0;
}
-/**
- * __zcrypt_increase_preference(): Increase preference of a crypto device.
- * @zdev: Pointer the crypto device
- *
- * Move the device towards the head of the device list.
- * Need to be called while holding the zcrypt device list lock.
- * Note: cards with speed_rating of 0 are kept at the end of the list.
- */
-static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
-{
- struct zcrypt_device *tmp;
- struct list_head *l;
-
- if (zdev->speed_rating == 0)
- return;
- for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
- tmp = list_entry(l, struct zcrypt_device, list);
- if ((tmp->request_count + 1) * tmp->speed_rating <=
- (zdev->request_count + 1) * zdev->speed_rating &&
- tmp->speed_rating != 0)
- break;
- }
- if (l == zdev->list.prev)
- return;
- /* Move zdev behind l */
- list_move(&zdev->list, l);
-}
-
-/**
- * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
- * @zdev: Pointer to a crypto device.
- *
- * Move the device towards the tail of the device list.
- * Need to be called while holding the zcrypt device list lock.
- * Note: cards with speed_rating of 0 are kept at the end of the list.
- */
-static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
-{
- struct zcrypt_device *tmp;
- struct list_head *l;
-
- if (zdev->speed_rating == 0)
- return;
- for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
- tmp = list_entry(l, struct zcrypt_device, list);
- if ((tmp->request_count + 1) * tmp->speed_rating >
- (zdev->request_count + 1) * zdev->speed_rating ||
- tmp->speed_rating == 0)
- break;
- }
- if (l == zdev->list.next)
- return;
- /* Move zdev before l */
- list_move_tail(&zdev->list, l);
-}
-
-static void zcrypt_device_release(struct kref *kref)
-{
- struct zcrypt_device *zdev =
- container_of(kref, struct zcrypt_device, refcount);
- zcrypt_device_free(zdev);
-}
-
-void zcrypt_device_get(struct zcrypt_device *zdev)
-{
- kref_get(&zdev->refcount);
-}
-EXPORT_SYMBOL(zcrypt_device_get);
-
-int zcrypt_device_put(struct zcrypt_device *zdev)
-{
- return kref_put(&zdev->refcount, zcrypt_device_release);
-}
-EXPORT_SYMBOL(zcrypt_device_put);
-
-struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
-{
- struct zcrypt_device *zdev;
-
- zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
- if (!zdev)
- return NULL;
- zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
- if (!zdev->reply.message)
- goto out_free;
- zdev->reply.length = max_response_size;
- spin_lock_init(&zdev->lock);
- INIT_LIST_HEAD(&zdev->list);
- zdev->dbf_area = zcrypt_dbf_devices;
- return zdev;
-
-out_free:
- kfree(zdev);
- return NULL;
-}
-EXPORT_SYMBOL(zcrypt_device_alloc);
-
-void zcrypt_device_free(struct zcrypt_device *zdev)
-{
- kfree(zdev->reply.message);
- kfree(zdev);
-}
-EXPORT_SYMBOL(zcrypt_device_free);
-
-/**
- * zcrypt_device_register() - Register a crypto device.
- * @zdev: Pointer to a crypto device
- *
- * Register a crypto device. Returns 0 if successful.
- */
-int zcrypt_device_register(struct zcrypt_device *zdev)
-{
- int rc;
-
- if (!zdev->ops)
- return -ENODEV;
- rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
- &zcrypt_device_attr_group);
- if (rc)
- goto out;
- get_device(&zdev->ap_dev->device);
- kref_init(&zdev->refcount);
- spin_lock_bh(&zcrypt_device_lock);
- zdev->online = 1; /* New devices are online by default. */
- ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
- zdev->online);
- list_add_tail(&zdev->list, &zcrypt_device_list);
- __zcrypt_increase_preference(zdev);
- zcrypt_device_count++;
- spin_unlock_bh(&zcrypt_device_lock);
- if (zdev->ops->rng) {
- rc = zcrypt_rng_device_add();
- if (rc)
- goto out_unregister;
- }
- return 0;
-
-out_unregister:
- spin_lock_bh(&zcrypt_device_lock);
- zcrypt_device_count--;
- list_del_init(&zdev->list);
- spin_unlock_bh(&zcrypt_device_lock);
- sysfs_remove_group(&zdev->ap_dev->device.kobj,
- &zcrypt_device_attr_group);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
-out:
- return rc;
-}
-EXPORT_SYMBOL(zcrypt_device_register);
-
-/**
- * zcrypt_device_unregister(): Unregister a crypto device.
- * @zdev: Pointer to crypto device
- *
- * Unregister a crypto device.
- */
-void zcrypt_device_unregister(struct zcrypt_device *zdev)
-{
- if (zdev->ops->rng)
- zcrypt_rng_device_remove();
- spin_lock_bh(&zcrypt_device_lock);
- zcrypt_device_count--;
- list_del_init(&zdev->list);
- spin_unlock_bh(&zcrypt_device_lock);
- sysfs_remove_group(&zdev->ap_dev->device.kobj,
- &zcrypt_device_attr_group);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
-}
-EXPORT_SYMBOL(zcrypt_device_unregister);
-
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
{
- spin_lock_bh(&zcrypt_ops_list_lock);
list_add_tail(&zops->list, &zcrypt_ops_list);
- spin_unlock_bh(&zcrypt_ops_list_lock);
}
-EXPORT_SYMBOL(zcrypt_msgtype_register);
void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
{
- spin_lock_bh(&zcrypt_ops_list_lock);
list_del_init(&zops->list);
- spin_unlock_bh(&zcrypt_ops_list_lock);
}
-EXPORT_SYMBOL(zcrypt_msgtype_unregister);
-static inline
-struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
{
struct zcrypt_ops *zops;
- int found = 0;
- spin_lock_bh(&zcrypt_ops_list_lock);
- list_for_each_entry(zops, &zcrypt_ops_list, list) {
+ list_for_each_entry(zops, &zcrypt_ops_list, list)
if ((zops->variant == variant) &&
- (!strncmp(zops->name, name, sizeof(zops->name)))) {
- found = 1;
- break;
- }
- }
- if (!found || !try_module_get(zops->owner))
- zops = NULL;
-
- spin_unlock_bh(&zcrypt_ops_list_lock);
-
- return zops;
-}
-
-struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
-{
- struct zcrypt_ops *zops = NULL;
-
- zops = __ops_lookup(name, variant);
- if (!zops) {
- request_module("%s", name);
- zops = __ops_lookup(name, variant);
- }
- return zops;
-}
-EXPORT_SYMBOL(zcrypt_msgtype_request);
-
-void zcrypt_msgtype_release(struct zcrypt_ops *zops)
-{
- if (zops)
- module_put(zops->owner);
+ (!strncmp(zops->name, name, sizeof(zops->name))))
+ return zops;
+ return NULL;
}
-EXPORT_SYMBOL(zcrypt_msgtype_release);
+EXPORT_SYMBOL(zcrypt_msgtype);
/**
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
@@ -417,16 +168,80 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
return 0;
}
+static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+ struct zcrypt_queue *zq,
+ unsigned int weight)
+{
+ if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+ return NULL;
+ zcrypt_queue_get(zq);
+ get_device(&zq->queue->ap_dev.device);
+ atomic_add(weight, &zc->load);
+ atomic_add(weight, &zq->load);
+ zq->request_count++;
+ return zq;
+}
+
+static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+ struct zcrypt_queue *zq,
+ unsigned int weight)
+{
+ struct module *mod = zq->queue->ap_dev.drv->driver.owner;
+
+ zq->request_count--;
+ atomic_sub(weight, &zc->load);
+ atomic_sub(weight, &zq->load);
+ put_device(&zq->queue->ap_dev.device);
+ zcrypt_queue_put(zq);
+ module_put(mod);
+}
+
+static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
+ struct zcrypt_card *pref_zc,
+ unsigned weight, unsigned pref_weight)
+{
+ if (!pref_zc)
+ return 0;
+ weight += atomic_read(&zc->load);
+ pref_weight += atomic_read(&pref_zc->load);
+ if (weight == pref_weight)
+ return atomic_read(&zc->card->total_request_count) >
+ atomic_read(&pref_zc->card->total_request_count);
+ return weight > pref_weight;
+}
+
+static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
+ struct zcrypt_queue *pref_zq,
+ unsigned weight, unsigned pref_weight)
+{
+ if (!pref_zq)
+ return 0;
+ weight += atomic_read(&zq->load);
+ pref_weight += atomic_read(&pref_zq->load);
+ if (weight == pref_weight)
+ return &zq->queue->total_request_count >
+ &pref_zq->queue->total_request_count;
+ return weight > pref_weight;
+}
+
/*
* zcrypt ioctls.
*/
static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
{
- struct zcrypt_device *zdev;
- int rc;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+
+ if (mex->outputdatalength < mex->inputdatalength) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (mex->outputdatalength < mex->inputdatalength)
- return -EINVAL;
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
@@ -434,44 +249,73 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
*/
mex->outputdatalength = mex->inputdatalength;
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online ||
- !zdev->ops->rsa_modexpo ||
- zdev->min_mod_size > mex->inputdatalength ||
- zdev->max_mod_size < mex->inputdatalength)
+ rc = get_rsa_modex_fc(mex, &func_code);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online accelarator and CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x18000000))
+ continue;
+ /* Check for size limits */
+ if (zc->min_mod_size > mex->inputdatalength ||
+ zc->max_mod_size < mex->inputdatalength)
+ continue;
+ /* get weight index of the card device */
+ weight = zc->speed_rating[func_code];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
continue;
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->rsa_modexpo(zdev, mex);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo)
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
}
- else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(mex, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
}
static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
{
- struct zcrypt_device *zdev;
- unsigned long long z1, z2, z3;
- int rc, copied;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+
+ if (crt->outputdatalength < crt->inputdatalength) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (crt->outputdatalength < crt->inputdatalength)
- return -EINVAL;
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
@@ -479,308 +323,445 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
*/
crt->outputdatalength = crt->inputdatalength;
- copied = 0;
- restart:
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online ||
- !zdev->ops->rsa_modexpo_crt ||
- zdev->min_mod_size > crt->inputdatalength ||
- zdev->max_mod_size < crt->inputdatalength)
+ rc = get_rsa_crt_fc(crt, &func_code);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online accelarator and CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x18000000))
+ continue;
+ /* Check for size limits */
+ if (zc->min_mod_size > crt->inputdatalength ||
+ zc->max_mod_size < crt->inputdatalength)
continue;
- if (zdev->short_crt && crt->inputdatalength > 240) {
- /*
- * Check inputdata for leading zeros for cards
- * that can't handle np_prime, bp_key, or
- * u_mult_inv > 128 bytes.
- */
- if (copied == 0) {
- unsigned int len;
- spin_unlock_bh(&zcrypt_device_lock);
- /* len is max 256 / 2 - 120 = 8
- * For bigger device just assume len of leading
- * 0s is 8 as stated in the requirements for
- * ica_rsa_modexpo_crt struct in zcrypt.h.
- */
- if (crt->inputdatalength <= 256)
- len = crt->inputdatalength / 2 - 120;
- else
- len = 8;
- if (len > sizeof(z1))
- return -EFAULT;
- z1 = z2 = z3 = 0;
- if (copy_from_user(&z1, crt->np_prime, len) ||
- copy_from_user(&z2, crt->bp_key, len) ||
- copy_from_user(&z3, crt->u_mult_inv, len))
- return -EFAULT;
- z1 = z2 = z3 = 0;
- copied = 1;
- /*
- * We have to restart device lookup -
- * the device list may have changed by now.
- */
- goto restart;
- }
- if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
- /* The device can't handle this request. */
+ /* get weight index of the card device */
+ weight = zc->speed_rating[func_code];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo_crt)
continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
}
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
- }
- else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(crt, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
}
static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{
- struct zcrypt_device *zdev;
- int rc;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ap_message ap_msg;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ unsigned short *domain;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online || !zdev->ops->send_cprb ||
- (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
- (xcRB->user_defined != AUTOSELECT &&
- AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
+ rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x10000000))
+ continue;
+ /* Check for user selected CCA card */
+ if (xcRB->user_defined != AUTOSELECT &&
+ xcRB->user_defined != zc->card->id)
continue;
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->send_cprb(zdev, xcRB);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
+ /* get weight index of the card device */
+ weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online ||
+ !zq->ops->send_cprb ||
+ ((*domain != (unsigned short) AUTOSELECT) &&
+ (*domain != AP_QID_QUEUE(zq->queue->qid))))
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
}
- else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
-}
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
-struct ep11_target_dev_list {
- unsigned short targets_num;
- struct ep11_target_dev *targets;
-};
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* in case of auto select, provide the correct domain */
+ qid = pref_zq->queue->qid;
+ if (*domain == (unsigned short) AUTOSELECT)
+ *domain = AP_QID_QUEUE(qid);
-static bool is_desired_ep11dev(unsigned int dev_qid,
- struct ep11_target_dev_list dev_list)
+ rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(xcRB, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+static bool is_desired_ep11_card(unsigned int dev_id,
+ unsigned short target_num,
+ struct ep11_target_dev *targets)
{
- int n;
+ while (target_num-- > 0) {
+ if (dev_id == targets->ap_id)
+ return true;
+ targets++;
+ }
+ return false;
+}
- for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
- if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
- (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
+static bool is_desired_ep11_queue(unsigned int dev_qid,
+ unsigned short target_num,
+ struct ep11_target_dev *targets)
+{
+ while (target_num-- > 0) {
+ if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
return true;
- }
+ targets++;
}
return false;
}
static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
- struct zcrypt_device *zdev;
- bool autoselect = false;
- int rc;
- struct ep11_target_dev_list ep11_dev_list = {
- .targets_num = 0x00,
- .targets = NULL,
- };
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ep11_target_dev *targets;
+ unsigned short target_num;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ struct ap_message ap_msg;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
- ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
+ target_num = (unsigned short) xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
- if (ep11_dev_list.targets_num == 0)
- autoselect = true;
- else {
- ep11_dev_list.targets = kcalloc((unsigned short)
- xcrb->targets_num,
- sizeof(struct ep11_target_dev),
- GFP_KERNEL);
- if (!ep11_dev_list.targets)
- return -ENOMEM;
+ targets = NULL;
+ if (target_num != 0) {
+ struct ep11_target_dev __user *uptr;
- if (copy_from_user(ep11_dev_list.targets,
- (struct ep11_target_dev __force __user *)
- xcrb->targets, xcrb->targets_num *
- sizeof(struct ep11_target_dev)))
- return -EFAULT;
+ targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+ if (!targets) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
+ if (copy_from_user(targets, uptr,
+ target_num * sizeof(*targets))) {
+ rc = -EFAULT;
+ goto out;
+ }
}
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- /* check if device is eligible */
- if (!zdev->online ||
- zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
- continue;
+ rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+ if (rc)
+ goto out_free;
- /* check if device is selected as valid target */
- if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
- !autoselect)
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online EP11 cards */
+ if (!zc->online || !(zc->card->functions & 0x04000000))
+ continue;
+ /* Check for user selected EP11 card */
+ if (targets &&
+ !is_desired_ep11_card(zc->card->id, target_num, targets))
continue;
+ /* get weight index of the card device */
+ weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online ||
+ !zq->ops->send_ep11_cprb ||
+ (targets &&
+ !is_desired_ep11_queue(zq->queue->qid,
+ target_num, targets)))
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
- } else {
- rc = -EAGAIN;
- }
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out_free;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out_free:
+ kfree(targets);
+out:
+ trace_s390_zcrypt_rep(xcrb, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
}
static long zcrypt_rng(char *buffer)
{
- struct zcrypt_device *zdev;
- int rc;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ struct ap_message ap_msg;
+ unsigned int domain;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online || !zdev->ops->rng)
+ rc = get_rng_fc(&ap_msg, &func_code, &domain);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x10000000))
continue;
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->rng(zdev, buffer);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
- } else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
+ /* get weight index of the card device */
+ weight = zc->speed_rating[func_code];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online || !zq->ops->rng)
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq)
+ return -ENODEV;
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(buffer, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ struct zcrypt_device_status *stat;
+
+ memset(matrix, 0, sizeof(*matrix));
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ stat = matrix->device;
+ stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
+ stat += AP_QID_QUEUE(zq->queue->qid);
+ stat->hwtype = zc->card->ap_dev.device_type;
+ stat->functions = zc->card->functions >> 26;
+ stat->qid = zq->queue->qid;
+ stat->online = zq->online ? 0x01 : 0x00;
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+ spin_unlock(&zcrypt_list_lock);
}
+EXPORT_SYMBOL(zcrypt_device_status_mask);
static void zcrypt_status_mask(char status[AP_DEVICES])
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
memset(status, 0, sizeof(char) * AP_DEVICES);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
- zdev->online ? zdev->user_space_type : 0x0d;
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ status[AP_QID_CARD(zq->queue->qid)] =
+ zc->online ? zc->user_space_type : 0x0d;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
memset(qdepth, 0, sizeof(char) * AP_DEVICES);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
- zdev->ap_dev->pendingq_count +
- zdev->ap_dev->requestq_count;
- spin_unlock(&zdev->ap_dev->lock);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ qdepth[AP_QID_CARD(zq->queue->qid)] =
+ zq->queue->pendingq_count +
+ zq->queue->requestq_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
- zdev->ap_dev->total_request_count;
- spin_unlock(&zdev->ap_dev->lock);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ reqcnt[AP_QID_CARD(zq->queue->qid)] =
+ zq->queue->total_request_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
}
static int zcrypt_pendingq_count(void)
{
- struct zcrypt_device *zdev;
- int pendingq_count = 0;
-
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- pendingq_count += zdev->ap_dev->pendingq_count;
- spin_unlock(&zdev->ap_dev->lock);
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int pendingq_count;
+
+ pendingq_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ pendingq_count += zq->queue->pendingq_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
return pendingq_count;
}
static int zcrypt_requestq_count(void)
{
- struct zcrypt_device *zdev;
- int requestq_count = 0;
-
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- requestq_count += zdev->ap_dev->requestq_count;
- spin_unlock(&zdev->ap_dev->lock);
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int requestq_count;
+
+ requestq_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ requestq_count += zq->queue->requestq_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
return requestq_count;
}
static int zcrypt_count_type(int type)
{
- struct zcrypt_device *zdev;
- int device_count = 0;
-
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- if (zdev->user_space_type == type)
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int device_count;
+
+ device_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ if (zc->card->id != type)
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
device_count++;
- spin_unlock_bh(&zcrypt_device_lock);
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
return device_count;
}
@@ -887,6 +868,25 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
+ case ZDEVICESTATUS: {
+ struct zcrypt_device_matrix *device_status;
+
+ device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+
+ zcrypt_device_status_mask(device_status);
+
+ if (copy_to_user((char __user *) arg, device_status,
+ sizeof(struct zcrypt_device_matrix))) {
+ kfree(device_status);
+ return -EFAULT;
+ }
+
+ kfree(device_status);
+ return 0;
+ }
case Z90STAT_STATUS_MASK: {
char status[AP_DEVICES];
zcrypt_status_mask(status);
@@ -1249,29 +1249,36 @@ static int zcrypt_proc_open(struct inode *inode, struct file *file)
static void zcrypt_disable_card(int index)
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
- zdev->online = 0;
- ap_flush_queue(zdev->ap_dev);
- break;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ zq->online = 0;
+ ap_flush_queue(zq->queue);
}
- spin_unlock_bh(&zcrypt_device_lock);
+ }
+ spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_enable_card(int index)
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
- zdev->online = 1;
- break;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ zq->online = 1;
+ ap_flush_queue(zq->queue);
}
- spin_unlock_bh(&zcrypt_device_lock);
+ }
+ spin_unlock(&zcrypt_list_lock);
}
static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
@@ -1369,7 +1376,7 @@ static struct hwrng zcrypt_rng_dev = {
.quality = 990,
};
-static int zcrypt_rng_device_add(void)
+int zcrypt_rng_device_add(void)
{
int rc = 0;
@@ -1399,7 +1406,7 @@ out:
return rc;
}
-static void zcrypt_rng_device_remove(void)
+void zcrypt_rng_device_remove(void)
{
mutex_lock(&zcrypt_rng_mutex);
zcrypt_rng_device_count--;
@@ -1412,24 +1419,19 @@ static void zcrypt_rng_device_remove(void)
int __init zcrypt_debug_init(void)
{
- debugfs_root = debugfs_create_dir("zcrypt", NULL);
-
- zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
- debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
- debug_set_level(zcrypt_dbf_common, DBF_ERR);
-
- zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
- debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
- debug_set_level(zcrypt_dbf_devices, DBF_ERR);
+ zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL);
+ zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
+ DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
+ debug_set_level(zcrypt_dbf_info, DBF_ERR);
return 0;
}
void zcrypt_debug_exit(void)
{
- debugfs_remove(debugfs_root);
- debug_unregister(zcrypt_dbf_common);
- debug_unregister(zcrypt_dbf_devices);
+ debugfs_remove(zcrypt_dbf_root);
+ debug_unregister(zcrypt_dbf_info);
}
/**
@@ -1453,12 +1455,15 @@ int __init zcrypt_api_init(void)
goto out;
/* Set up the proc file system */
- zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
+ zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
+ &zcrypt_proc_fops);
if (!zcrypt_entry) {
rc = -ENOMEM;
goto out_misc;
}
+ zcrypt_msgtype6_init();
+ zcrypt_msgtype50_init();
return 0;
out_misc:
@@ -1472,10 +1477,12 @@ out:
*
* The module termination code.
*/
-void zcrypt_api_exit(void)
+void __exit zcrypt_api_exit(void)
{
remove_proc_entry("driver/z90crypt", NULL);
misc_deregister(&zcrypt_misc_device);
+ zcrypt_msgtype6_exit();
+ zcrypt_msgtype50_exit();
zcrypt_debug_exit();
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 38618f05ad92..274a59051534 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -84,57 +84,110 @@ struct ica_z90_status {
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
-struct zcrypt_device;
+/*
+ * Identifier for Crypto Request Performance Index
+ */
+enum crypto_ops {
+ MEX_1K,
+ MEX_2K,
+ MEX_4K,
+ CRT_1K,
+ CRT_2K,
+ CRT_4K,
+ HWRNG,
+ SECKEY,
+ NUM_OPS
+};
+
+struct zcrypt_queue;
struct zcrypt_ops {
- long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
- long (*rsa_modexpo_crt)(struct zcrypt_device *,
+ long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+ long (*rsa_modexpo_crt)(struct zcrypt_queue *,
struct ica_rsa_modexpo_crt *);
- long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
- long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
- long (*rng)(struct zcrypt_device *, char *);
+ long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+ struct ap_message *);
+ long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+ struct ap_message *);
+ long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
struct list_head list; /* zcrypt ops list. */
struct module *owner;
int variant;
char name[128];
};
-struct zcrypt_device {
+struct zcrypt_card {
struct list_head list; /* Device list. */
- spinlock_t lock; /* Per device lock. */
+ struct list_head zqueues; /* List of zcrypt queues */
struct kref refcount; /* device refcounting */
- struct ap_device *ap_dev; /* The "real" ap device. */
- struct zcrypt_ops *ops; /* Crypto operations. */
+ struct ap_card *card; /* The "real" ap card device. */
int online; /* User online/offline */
int user_space_type; /* User space device id. */
char *type_string; /* User space device name. */
int min_mod_size; /* Min number of bits. */
int max_mod_size; /* Max number of bits. */
- int short_crt; /* Card has crt length restriction. */
- int speed_rating; /* Speed of the crypto device. */
+ int max_exp_bit_length;
+ int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */
+ atomic_t load; /* Utilization of the crypto device */
int request_count; /* # current requests. */
+};
- struct ap_message reply; /* Per-device reply structure. */
- int max_exp_bit_length;
+struct zcrypt_queue {
+ struct list_head list; /* Device list. */
+ struct kref refcount; /* device refcounting */
+ struct zcrypt_card *zcard;
+ struct zcrypt_ops *ops; /* Crypto operations. */
+ struct ap_queue *queue; /* The "real" ap queue device. */
+ int online; /* User online/offline */
+
+ atomic_t load; /* Utilization of the crypto device */
- debug_info_t *dbf_area; /* debugging */
+ int request_count; /* # current requests. */
+
+ struct ap_message reply; /* Per-device reply structure. */
};
/* transport layer rescanning */
extern atomic_t zcrypt_rescan_req;
-struct zcrypt_device *zcrypt_device_alloc(size_t);
-void zcrypt_device_free(struct zcrypt_device *);
-void zcrypt_device_get(struct zcrypt_device *);
-int zcrypt_device_put(struct zcrypt_device *);
-int zcrypt_device_register(struct zcrypt_device *);
-void zcrypt_device_unregister(struct zcrypt_device *);
+extern spinlock_t zcrypt_list_lock;
+extern int zcrypt_device_count;
+extern struct list_head zcrypt_card_list;
+
+#define for_each_zcrypt_card(_zc) \
+ list_for_each_entry(_zc, &zcrypt_card_list, list)
+
+#define for_each_zcrypt_queue(_zq, _zc) \
+ list_for_each_entry(_zq, &(_zc)->zqueues, list)
+
+struct zcrypt_card *zcrypt_card_alloc(void);
+void zcrypt_card_free(struct zcrypt_card *);
+void zcrypt_card_get(struct zcrypt_card *);
+int zcrypt_card_put(struct zcrypt_card *);
+int zcrypt_card_register(struct zcrypt_card *);
+void zcrypt_card_unregister(struct zcrypt_card *);
+struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
+ unsigned int, unsigned int);
+void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t);
+void zcrypt_queue_free(struct zcrypt_queue *);
+void zcrypt_queue_get(struct zcrypt_queue *);
+int zcrypt_queue_put(struct zcrypt_queue *);
+int zcrypt_queue_register(struct zcrypt_queue *);
+void zcrypt_queue_unregister(struct zcrypt_queue *);
+void zcrypt_queue_force_online(struct zcrypt_queue *, int);
+struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
+void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
+
+int zcrypt_rng_device_add(void);
+void zcrypt_rng_device_remove(void);
+
void zcrypt_msgtype_register(struct zcrypt_ops *);
void zcrypt_msgtype_unregister(struct zcrypt_ops *);
-struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int);
-void zcrypt_msgtype_release(struct zcrypt_ops *);
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
new file mode 100644
index 000000000000..53436ea52230
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -0,0 +1,187 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto card devices.
+ */
+
+static ssize_t zcrypt_card_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+}
+
+static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL);
+
+static ssize_t zcrypt_card_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+}
+
+static ssize_t zcrypt_card_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_queue *zq;
+ int online, id;
+
+ if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+ return -EINVAL;
+
+ zc->online = online;
+ id = zc->card->id;
+
+ ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
+
+ spin_lock(&zcrypt_list_lock);
+ list_for_each_entry(zq, &zc->zqueues, list)
+ zcrypt_queue_force_online(zq, online);
+ spin_unlock(&zcrypt_list_lock);
+ return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
+ zcrypt_card_online_store);
+
+static struct attribute *zcrypt_card_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_online.attr,
+ NULL,
+};
+
+static struct attribute_group zcrypt_card_attr_group = {
+ .attrs = zcrypt_card_attrs,
+};
+
+struct zcrypt_card *zcrypt_card_alloc(void)
+{
+ struct zcrypt_card *zc;
+
+ zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
+ if (!zc)
+ return NULL;
+ INIT_LIST_HEAD(&zc->list);
+ INIT_LIST_HEAD(&zc->zqueues);
+ kref_init(&zc->refcount);
+ return zc;
+}
+EXPORT_SYMBOL(zcrypt_card_alloc);
+
+void zcrypt_card_free(struct zcrypt_card *zc)
+{
+ kfree(zc);
+}
+EXPORT_SYMBOL(zcrypt_card_free);
+
+static void zcrypt_card_release(struct kref *kref)
+{
+ struct zcrypt_card *zdev =
+ container_of(kref, struct zcrypt_card, refcount);
+ zcrypt_card_free(zdev);
+}
+
+void zcrypt_card_get(struct zcrypt_card *zc)
+{
+ kref_get(&zc->refcount);
+}
+EXPORT_SYMBOL(zcrypt_card_get);
+
+int zcrypt_card_put(struct zcrypt_card *zc)
+{
+ return kref_put(&zc->refcount, zcrypt_card_release);
+}
+EXPORT_SYMBOL(zcrypt_card_put);
+
+/**
+ * zcrypt_card_register() - Register a crypto card device.
+ * @zc: Pointer to a crypto card device
+ *
+ * Register a crypto card device. Returns 0 if successful.
+ */
+int zcrypt_card_register(struct zcrypt_card *zc)
+{
+ int rc;
+
+ rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+ if (rc)
+ return rc;
+
+ spin_lock(&zcrypt_list_lock);
+ list_add_tail(&zc->list, &zcrypt_card_list);
+ spin_unlock(&zcrypt_list_lock);
+
+ zc->online = 1;
+
+ ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_card_register);
+
+/**
+ * zcrypt_card_unregister(): Unregister a crypto card device.
+ * @zc: Pointer to crypto card device
+ *
+ * Unregister a crypto card device.
+ */
+void zcrypt_card_unregister(struct zcrypt_card *zc)
+{
+ ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
+
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zc->list);
+ spin_unlock(&zcrypt_list_lock);
+ sysfs_remove_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+}
+EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 15104aaa075a..c7d48a18199e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
+#include <linux/mod_devicetable.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
@@ -43,9 +44,6 @@
#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX2A_SPEED_RATING 970
-#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
-
#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
@@ -57,107 +55,195 @@
#define CEX2A_CLEANUP_TIME (15*HZ)
#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
-static struct ap_device_id zcrypt_cex2a_ids[] = {
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
-static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
+static struct ap_device_id zcrypt_cex2a_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2A,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3A,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
-static struct ap_driver zcrypt_cex2a_driver = {
- .probe = zcrypt_cex2a_probe,
- .remove = zcrypt_cex2a_remove,
- .ids = zcrypt_cex2a_ids,
- .request_timeout = CEX2A_CLEANUP_TIME,
+static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2A,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3A,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
};
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
+
/**
- * Probe function for CEX2A cards. It always accepts the AP device
- * since the bus_match already checked the hardware type.
+ * Probe function for CEX2A card devices. It always accepts the AP device
+ * since the bus_match already checked the card type.
* @ap_dev: pointer to the AP device.
*/
-static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
+static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = NULL;
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX2A_SPEED_IDX[] = {
+ 800, 1000, 2000, 900, 1200, 2400, 0, 0};
+ static const int CEX3A_SPEED_IDX[] = {
+ 400, 500, 1000, 450, 550, 1200, 0, 0};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
int rc = 0;
+ zc = zcrypt_card_alloc();
+ if (!zc)
+ return -ENOMEM;
+ zc->card = ac;
+ ac->private = zc;
+
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
+ zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
+ sizeof(CEX2A_SPEED_IDX));
+ zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+ zc->type_string = "CEX2A";
+ zc->user_space_type = ZCRYPT_CEX2A;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
+ zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+ if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
+ }
+ memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
+ sizeof(CEX3A_SPEED_IDX));
+ zc->type_string = "CEX3A";
+ zc->user_space_type = ZCRYPT_CEX3A;
+ } else {
+ zcrypt_card_free(zc);
+ return -ENODEV;
+ }
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
+ if (rc) {
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX2A card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex2a_card_driver = {
+ .probe = zcrypt_cex2a_card_probe,
+ .remove = zcrypt_cex2a_card_remove,
+ .ids = zcrypt_cex2a_card_ids,
+};
+
+/**
+ * Probe function for CEX2A queue devices. It always accepts the AP device
+ * since the bus_match already checked the queue type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = NULL;
+ int rc;
+
switch (ap_dev->device_type) {
case AP_DEVICE_TYPE_CEX2A:
- zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
- if (!zdev)
+ zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
+ if (!zq)
return -ENOMEM;
- zdev->user_space_type = ZCRYPT_CEX2A;
- zdev->type_string = "CEX2A";
- zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zdev->short_crt = 1;
- zdev->speed_rating = CEX2A_SPEED_RATING;
- zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
break;
case AP_DEVICE_TYPE_CEX3A:
- zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
- if (!zdev)
+ zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
+ if (!zq)
return -ENOMEM;
- zdev->user_space_type = ZCRYPT_CEX3A;
- zdev->type_string = "CEX3A";
- zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
- if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
- zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
- }
- zdev->short_crt = 1;
- zdev->speed_rating = CEX3A_SPEED_RATING;
break;
}
- if (!zdev)
+ if (!zq)
return -ENODEV;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
- MSGTYPE50_VARIANT_DEFAULT);
- zdev->ap_dev = ap_dev;
- zdev->online = 1;
- ap_device_init_reply(ap_dev, &zdev->reply);
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
+ zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = CEX2A_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
if (rc) {
- ap_dev->private = NULL;
- zcrypt_msgtype_release(zdev->ops);
- zcrypt_device_free(zdev);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
}
+
return rc;
}
/**
- * This is called to remove the extended CEX2A driver information
- * if an AP device is removed.
+ * This is called to remove the CEX2A queue driver information
+ * if an AP queue device is removed.
*/
-static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
+static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = ap_dev->private;
- struct zcrypt_ops *zops = zdev->ops;
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
- zcrypt_device_unregister(zdev);
- zcrypt_msgtype_release(zops);
+ ap_queue_remove(aq);
+ if (zq)
+ zcrypt_queue_unregister(zq);
}
+static struct ap_driver zcrypt_cex2a_queue_driver = {
+ .probe = zcrypt_cex2a_queue_probe,
+ .remove = zcrypt_cex2a_queue_remove,
+ .suspend = ap_queue_suspend,
+ .resume = ap_queue_resume,
+ .ids = zcrypt_cex2a_queue_ids,
+};
+
int __init zcrypt_cex2a_init(void)
{
- return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_cex2a_card_driver,
+ THIS_MODULE, "cex2acard");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
+ THIS_MODULE, "cex2aqueue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_cex2a_card_driver);
+
+ return rc;
}
void __exit zcrypt_cex2a_exit(void)
{
- ap_driver_unregister(&zcrypt_cex2a_driver);
+ ap_driver_unregister(&zcrypt_cex2a_queue_driver);
+ ap_driver_unregister(&zcrypt_cex2a_card_driver);
}
module_init(zcrypt_cex2a_init);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index ccb2e78ebf0e..4e91163d70a6 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -9,6 +9,7 @@
#include <linux/err.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
@@ -24,13 +25,6 @@
#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
-#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
-#define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */
-#define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */
-#define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */
-#define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */
-
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
@@ -41,147 +35,246 @@
*/
#define CEX4_CLEANUP_TIME (900*HZ)
-static struct ap_device_id zcrypt_cex4_ids[] = {
- { AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX5) },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
"Copyright IBM Corp. 2012");
MODULE_LICENSE("GPL");
-static int zcrypt_cex4_probe(struct ap_device *ap_dev);
-static void zcrypt_cex4_remove(struct ap_device *ap_dev);
+static struct ap_device_id zcrypt_cex4_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
+};
-static struct ap_driver zcrypt_cex4_driver = {
- .probe = zcrypt_cex4_probe,
- .remove = zcrypt_cex4_remove,
- .ids = zcrypt_cex4_ids,
- .request_timeout = CEX4_CLEANUP_TIME,
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
+
+static struct ap_device_id zcrypt_cex4_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
};
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
+
/**
- * Probe function for CEX4 cards. It always accepts the AP device
+ * Probe function for CEX4 card device. It always accepts the AP device
* since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP device.
*/
-static int zcrypt_cex4_probe(struct ap_device *ap_dev)
+static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = NULL;
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX4A_SPEED_IDX[] = {
+ 5, 6, 59, 20, 115, 581, 0, 0};
+ static const int CEX5A_SPEED_IDX[] = {
+ 3, 3, 6, 8, 32, 218, 0, 0};
+ static const int CEX4C_SPEED_IDX[] = {
+ 24, 25, 82, 41, 138, 1111, 79, 8};
+ static const int CEX5C_SPEED_IDX[] = {
+ 10, 14, 23, 17, 45, 242, 63, 4};
+ static const int CEX4P_SPEED_IDX[] = {
+ 142, 198, 1852, 203, 331, 1563, 0, 8};
+ static const int CEX5P_SPEED_IDX[] = {
+ 49, 67, 131, 52, 85, 287, 0, 4};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
int rc = 0;
- switch (ap_dev->device_type) {
- case AP_DEVICE_TYPE_CEX4:
- case AP_DEVICE_TYPE_CEX5:
- if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
- zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
- if (!zdev)
- return -ENOMEM;
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
- zdev->type_string = "CEX4A";
- zdev->speed_rating = CEX4A_SPEED_RATING;
- } else {
- zdev->type_string = "CEX5A";
- zdev->speed_rating = CEX5A_SPEED_RATING;
- }
- zdev->user_space_type = ZCRYPT_CEX3A;
- zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
- if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
- zdev->max_mod_size =
- CEX4A_MAX_MOD_SIZE_4K;
- zdev->max_exp_bit_length =
- CEX4A_MAX_MOD_SIZE_4K;
- } else {
- zdev->max_mod_size =
- CEX4A_MAX_MOD_SIZE_2K;
- zdev->max_exp_bit_length =
- CEX4A_MAX_MOD_SIZE_2K;
- }
- zdev->short_crt = 1;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
- MSGTYPE50_VARIANT_DEFAULT);
- } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
- zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
- if (!zdev)
- return -ENOMEM;
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
- zdev->type_string = "CEX4C";
- zdev->speed_rating = CEX4C_SPEED_RATING;
- } else {
- zdev->type_string = "CEX5C";
- zdev->speed_rating = CEX5C_SPEED_RATING;
- }
- zdev->user_space_type = ZCRYPT_CEX3C;
- zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
- zdev->short_crt = 0;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_DEFAULT);
- } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
- zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
- if (!zdev)
- return -ENOMEM;
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
- zdev->type_string = "CEX4P";
- zdev->speed_rating = CEX4P_SPEED_RATING;
- } else {
- zdev->type_string = "CEX5P";
- zdev->speed_rating = CEX5P_SPEED_RATING;
- }
- zdev->user_space_type = ZCRYPT_CEX4;
- zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
- zdev->short_crt = 0;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_EP11);
+ zc = zcrypt_card_alloc();
+ if (!zc)
+ return -ENOMEM;
+ zc->card = ac;
+ ac->private = zc;
+ if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4A";
+ zc->user_space_type = ZCRYPT_CEX4;
+ memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
+ sizeof(CEX4A_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX5A";
+ zc->user_space_type = ZCRYPT_CEX5;
+ memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
+ sizeof(CEX5A_SPEED_IDX));
}
- break;
- }
- if (!zdev)
+ zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
+ if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
+ zc->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_4K;
+ } else {
+ zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
+ zc->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_2K;
+ }
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4C";
+ /* wrong user space type, must be CEX4
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
+ sizeof(CEX4C_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX5C";
+ /* wrong user space type, must be CEX5
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
+ sizeof(CEX5C_SPEED_IDX));
+ }
+ zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4P";
+ zc->user_space_type = ZCRYPT_CEX4;
+ memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
+ sizeof(CEX4P_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX5P";
+ zc->user_space_type = ZCRYPT_CEX5;
+ memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
+ sizeof(CEX5P_SPEED_IDX));
+ }
+ zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ } else {
+ zcrypt_card_free(zc);
return -ENODEV;
- zdev->ap_dev = ap_dev;
- zdev->online = 1;
- ap_device_init_reply(ap_dev, &zdev->reply);
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
+ }
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
if (rc) {
- zcrypt_msgtype_release(zdev->ops);
- ap_dev->private = NULL;
- zcrypt_device_free(zdev);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
}
+
return rc;
}
/**
- * This is called to remove the extended CEX4 driver information
- * if an AP device is removed.
+ * This is called to remove the CEX4 card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex4_card_driver = {
+ .probe = zcrypt_cex4_card_probe,
+ .remove = zcrypt_cex4_card_remove,
+ .ids = zcrypt_cex4_card_ids,
+};
+
+/**
+ * Probe function for CEX4 queue device. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
*/
-static void zcrypt_cex4_remove(struct ap_device *ap_dev)
+static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = ap_dev->private;
- struct zcrypt_ops *zops;
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq;
+ int rc;
- if (zdev) {
- zops = zdev->ops;
- zcrypt_device_unregister(zdev);
- zcrypt_msgtype_release(zops);
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
+ zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
+ MSGTYPE50_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_EP11);
+ } else {
+ return -ENODEV;
}
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = CEX4_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
+ if (rc) {
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX4 queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
+
+ ap_queue_remove(aq);
+ if (zq)
+ zcrypt_queue_unregister(zq);
}
+static struct ap_driver zcrypt_cex4_queue_driver = {
+ .probe = zcrypt_cex4_queue_probe,
+ .remove = zcrypt_cex4_queue_remove,
+ .suspend = ap_queue_suspend,
+ .resume = ap_queue_resume,
+ .ids = zcrypt_cex4_queue_ids,
+};
+
int __init zcrypt_cex4_init(void)
{
- return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4");
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_cex4_card_driver,
+ THIS_MODULE, "cex4card");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_cex4_queue_driver,
+ THIS_MODULE, "cex4queue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_cex4_card_driver);
+
+ return rc;
}
void __exit zcrypt_cex4_exit(void)
{
- ap_driver_unregister(&zcrypt_cex4_driver);
+ ap_driver_unregister(&zcrypt_cex4_queue_driver);
+ ap_driver_unregister(&zcrypt_cex4_card_driver);
}
module_init(zcrypt_cex4_init);
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 28d9349de1ad..13e38defb6b8 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -1,51 +1,27 @@
/*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2016
* Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
+ * Harald Freudenberger <freude@de.ibm.com>
*/
#ifndef ZCRYPT_DEBUG_H
#define ZCRYPT_DEBUG_H
#include <asm/debug.h>
-#include "zcrypt_api.h"
-/* that gives us 15 characters in the text event views */
-#define ZCRYPT_DBF_LEN 16
-
-#define DBF_ERR 3 /* error conditions */
-#define DBF_WARN 4 /* warning conditions */
-#define DBF_INFO 6 /* informational */
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 5 /* informational */
+#define DBF_DEBUG 6 /* for debugging only */
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define ZCRYPT_DBF_COMMON(level, text...) \
- do { \
- if (debug_level_enabled(zcrypt_dbf_common, level)) { \
- char debug_buffer[ZCRYPT_DBF_LEN]; \
- snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
- debug_text_event(zcrypt_dbf_common, level, \
- debug_buffer); \
- } \
- } while (0)
-
-#define ZCRYPT_DBF_DEVICES(level, text...) \
- do { \
- if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
- char debug_buffer[ZCRYPT_DBF_LEN]; \
- snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
- debug_text_event(zcrypt_dbf_devices, level, \
- debug_buffer); \
- } \
- } while (0)
-
-#define ZCRYPT_DBF_DEV(level, device, text...) \
- do { \
- if (debug_level_enabled(device->dbf_area, level)) { \
- char debug_buffer[ZCRYPT_DBF_LEN]; \
- snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
- debug_text_event(device->dbf_area, level, \
- debug_buffer); \
- } \
- } while (0)
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define ZCRYPT_DBF(...) \
+ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+
+extern debug_info_t *zcrypt_dbf_info;
int zcrypt_debug_init(void);
void zcrypt_debug_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index de1b6c1d172c..13df60209ed3 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -55,52 +55,61 @@ struct error_hdr {
#define TYPE82_RSP_CODE 0x82
#define TYPE88_RSP_CODE 0x88
-#define REP82_ERROR_MACHINE_FAILURE 0x10
-#define REP82_ERROR_PREEMPT_FAILURE 0x12
-#define REP82_ERROR_CHECKPT_FAILURE 0x14
-#define REP82_ERROR_MESSAGE_TYPE 0x20
-#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
-#define REP82_ERROR_INVALID_MSG_LEN 0x23
-#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
-#define REP82_ERROR_FORMAT_FIELD 0x29
-#define REP82_ERROR_INVALID_COMMAND 0x30
-#define REP82_ERROR_MALFORMED_MSG 0x40
-#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
-#define REP82_ERROR_WORD_ALIGNMENT 0x60
-#define REP82_ERROR_MESSAGE_LENGTH 0x80
-#define REP82_ERROR_OPERAND_INVALID 0x82
-#define REP82_ERROR_OPERAND_SIZE 0x84
-#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
-#define REP82_ERROR_RESERVED_FIELD 0x88
-#define REP82_ERROR_TRANSPORT_FAIL 0x90
-#define REP82_ERROR_PACKET_TRUNCATED 0xA0
-#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
+#define REP82_ERROR_MACHINE_FAILURE 0x10
+#define REP82_ERROR_PREEMPT_FAILURE 0x12
+#define REP82_ERROR_CHECKPT_FAILURE 0x14
+#define REP82_ERROR_MESSAGE_TYPE 0x20
+#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
+#define REP82_ERROR_INVALID_MSG_LEN 0x23
+#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
+#define REP82_ERROR_FORMAT_FIELD 0x29
+#define REP82_ERROR_INVALID_COMMAND 0x30
+#define REP82_ERROR_MALFORMED_MSG 0x40
+#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
+#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
+#define REP82_ERROR_WORD_ALIGNMENT 0x60
+#define REP82_ERROR_MESSAGE_LENGTH 0x80
+#define REP82_ERROR_OPERAND_INVALID 0x82
+#define REP82_ERROR_OPERAND_SIZE 0x84
+#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
+#define REP82_ERROR_RESERVED_FIELD 0x88
+#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_TRANSPORT_FAIL 0x90
+#define REP82_ERROR_PACKET_TRUNCATED 0xA0
+#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
-#define REP88_ERROR_MODULE_FAILURE 0x10
+#define REP88_ERROR_MODULE_FAILURE 0x10
-#define REP88_ERROR_MESSAGE_TYPE 0x20
-#define REP88_ERROR_MESSAGE_MALFORMD 0x22
-#define REP88_ERROR_MESSAGE_LENGTH 0x23
-#define REP88_ERROR_RESERVED_FIELD 0x24
-#define REP88_ERROR_KEY_TYPE 0x34
-#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
-#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
-#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
+#define REP88_ERROR_MESSAGE_TYPE 0x20
+#define REP88_ERROR_MESSAGE_MALFORMD 0x22
+#define REP88_ERROR_MESSAGE_LENGTH 0x23
+#define REP88_ERROR_RESERVED_FIELD 0x24
+#define REP88_ERROR_KEY_TYPE 0x34
+#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
+#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
+#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
-static inline int convert_error(struct zcrypt_device *zdev,
+static inline int convert_error(struct zcrypt_queue *zq,
struct ap_message *reply)
{
struct error_hdr *ehdr = reply->message;
+ int card = AP_QID_CARD(zq->queue->qid);
+ int queue = AP_QID_QUEUE(zq->queue->qid);
switch (ehdr->reply_code) {
case REP82_ERROR_OPERAND_INVALID:
case REP82_ERROR_OPERAND_SIZE:
case REP82_ERROR_EVEN_MOD_IN_OPND:
case REP88_ERROR_MESSAGE_MALFORMD:
+ case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
+ case REP82_ERROR_INVALID_DOMAIN_PENDING:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
/* Invalid input data. */
+ ZCRYPT_DBF(DBF_WARN,
+ "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
+ card, queue, ehdr->reply_code);
return -EINVAL;
case REP82_ERROR_MESSAGE_TYPE:
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
@@ -110,32 +119,32 @@ static inline int convert_error(struct zcrypt_device *zdev,
* and then repeat the request.
*/
atomic_set(&zcrypt_rescan_req, 1);
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- ehdr->reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ card, queue);
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
// REP88_ERROR_MODULE_FAILURE // '10' CEX2A
/* If a card fails disable it and repeat the request. */
atomic_set(&zcrypt_rescan_req, 1);
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- ehdr->reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ card, queue);
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
return -EAGAIN;
default:
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- ehdr->reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ card, queue);
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index eedfaa2cf715..6dd5d7c58dd0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -53,9 +53,6 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
- struct ap_message *);
-
/**
* The type 50 message family is associated with a CEX2A card.
*
@@ -173,16 +170,48 @@ struct type80_hdr {
unsigned char reserved3[8];
} __packed;
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
+{
+
+ if (!mex->inputdatalength)
+ return -EINVAL;
+
+ if (mex->inputdatalength <= 128) /* 1024 bit */
+ *fcode = MEX_1K;
+ else if (mex->inputdatalength <= 256) /* 2048 bit */
+ *fcode = MEX_2K;
+ else /* 4096 bit */
+ *fcode = MEX_4K;
+
+ return 0;
+}
+
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
+{
+
+ if (!crt->inputdatalength)
+ return -EINVAL;
+
+ if (crt->inputdatalength <= 128) /* 1024 bit */
+ *fcode = CRT_1K;
+ else if (crt->inputdatalength <= 256) /* 2048 bit */
+ *fcode = CRT_2K;
+ else /* 4096 bit */
+ *fcode = CRT_4K;
+
+ return 0;
+}
+
/**
* Convert a ICAMEX message to a type50 MEX message.
*
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
* @mex: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
@@ -234,13 +263,13 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
/**
* Convert a ICACRT message to a type50 CRT message.
*
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
* @crt: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
@@ -283,7 +312,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
u = crb2->u + sizeof(crb2->u) - short_len;
inp = crb2->message + sizeof(crb2->message) - mod_len;
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
- (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
+ (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
struct type50_crb3_msg *crb3 = ap_msg->message;
memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3);
@@ -317,14 +346,14 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
/**
* Copy results from a type 80 reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
*
* Returns 0 on success or -EFAULT.
*/
-static int convert_type80(struct zcrypt_device *zdev,
+static int convert_type80(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
@@ -334,16 +363,18 @@ static int convert_type80(struct zcrypt_device *zdev,
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEX2A card may not do that.. */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid),
- zdev->online, t80h->code);
-
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ t80h->code);
return -EAGAIN; /* repeat the request on a different device. */
}
- if (zdev->user_space_type == ZCRYPT_CEX2A)
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
else
BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
@@ -353,25 +384,31 @@ static int convert_type80(struct zcrypt_device *zdev,
return 0;
}
-static int convert_response(struct zcrypt_device *zdev,
+static int convert_response(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
+ unsigned char rtype = ((unsigned char *) reply->message)[1];
+
+ switch (rtype) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE80_RSP_CODE:
- return convert_type80(zdev, reply,
+ return convert_type80(zq, reply,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (unsigned int) rtype);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -380,11 +417,11 @@ static int convert_response(struct zcrypt_device *zdev,
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP device
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
+static void zcrypt_cex2a_receive(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
@@ -400,7 +437,7 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
goto out; /* ap_msg->rc indicates the error */
t80h = reply->message;
if (t80h->type == TYPE80_RSP_CODE) {
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
+ if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
length = min_t(int,
CEX2A_MAX_RESPONSE_SIZE, t80h->len);
else
@@ -418,11 +455,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
* The request distributor calls this function if it picked the CEX2A
* device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* CEX2A device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
-static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
+static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex)
{
struct ap_message ap_msg;
@@ -430,7 +467,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
int rc;
ap_init_message(&ap_msg);
- if (zdev->user_space_type == ZCRYPT_CEX2A)
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL);
else
@@ -442,20 +479,20 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &work;
- rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
+ rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
if (rc)
goto out_free;
init_completion(&work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response(zdev, &ap_msg, mex->outputdata,
+ rc = convert_response(zq, &ap_msg, mex->outputdata,
mex->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
out_free:
kfree(ap_msg.message);
return rc;
@@ -464,11 +501,11 @@ out_free:
/**
* The request distributor calls this function if it picked the CEX2A
* device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* CEX2A device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
-static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt)
{
struct ap_message ap_msg;
@@ -476,7 +513,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
int rc;
ap_init_message(&ap_msg);
- if (zdev->user_space_type == ZCRYPT_CEX2A)
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL);
else
@@ -488,20 +525,20 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &work;
- rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
+ rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
if (rc)
goto out_free;
init_completion(&work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response(zdev, &ap_msg, crt->outputdata,
+ rc = convert_response(zq, &ap_msg, crt->outputdata,
crt->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
out_free:
kfree(ap_msg.message);
return rc;
@@ -518,16 +555,12 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
.variant = MSGTYPE50_VARIANT_DEFAULT,
};
-int __init zcrypt_msgtype50_init(void)
+void __init zcrypt_msgtype50_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
- return 0;
}
void __exit zcrypt_msgtype50_exit(void)
{
zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
}
-
-module_init(zcrypt_msgtype50_init);
-module_exit(zcrypt_msgtype50_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 0a66e4aeeb50..5cc280318ee7 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -35,7 +35,10 @@
#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
-int zcrypt_msgtype50_init(void);
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
+
+void zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void);
#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 21959719daef..e5563ffeb839 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -60,9 +60,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
- struct ap_message *);
-
/**
* CPRB
* Note that all shorts, ints and longs are little-endian.
@@ -149,16 +146,122 @@ static struct CPRBX static_cprbx = {
.func_id = {0x54, 0x32},
};
+int speed_idx_cca(int req_type)
+{
+ switch (req_type) {
+ case 0x4142:
+ case 0x4149:
+ case 0x414D:
+ case 0x4341:
+ case 0x4344:
+ case 0x4354:
+ case 0x4358:
+ case 0x444B:
+ case 0x4558:
+ case 0x4643:
+ case 0x4651:
+ case 0x4C47:
+ case 0x4C4B:
+ case 0x4C51:
+ case 0x4F48:
+ case 0x504F:
+ case 0x5053:
+ case 0x5058:
+ case 0x5343:
+ case 0x5344:
+ case 0x5345:
+ case 0x5350:
+ return LOW;
+ case 0x414B:
+ case 0x4345:
+ case 0x4349:
+ case 0x434D:
+ case 0x4847:
+ case 0x4849:
+ case 0x484D:
+ case 0x4850:
+ case 0x4851:
+ case 0x4954:
+ case 0x4958:
+ case 0x4B43:
+ case 0x4B44:
+ case 0x4B45:
+ case 0x4B47:
+ case 0x4B48:
+ case 0x4B49:
+ case 0x4B4E:
+ case 0x4B50:
+ case 0x4B52:
+ case 0x4B54:
+ case 0x4B58:
+ case 0x4D50:
+ case 0x4D53:
+ case 0x4D56:
+ case 0x4D58:
+ case 0x5044:
+ case 0x5045:
+ case 0x5046:
+ case 0x5047:
+ case 0x5049:
+ case 0x504B:
+ case 0x504D:
+ case 0x5254:
+ case 0x5347:
+ case 0x5349:
+ case 0x534B:
+ case 0x534D:
+ case 0x5356:
+ case 0x5358:
+ case 0x5443:
+ case 0x544B:
+ case 0x5647:
+ return HIGH;
+ default:
+ return MEDIUM;
+ }
+}
+
+int speed_idx_ep11(int req_type)
+{
+ switch (req_type) {
+ case 1:
+ case 2:
+ case 36:
+ case 37:
+ case 38:
+ case 39:
+ case 40:
+ return LOW;
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 21:
+ case 22:
+ case 26:
+ case 30:
+ case 31:
+ case 32:
+ case 33:
+ case 34:
+ case 35:
+ return HIGH;
+ default:
+ return MEDIUM;
+ }
+}
+
+
/**
* Convert a ICAMEX message to a type6 MEX message.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @mex: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
@@ -173,11 +276,6 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
.ulen = 10,
.only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
};
- static struct function_and_rules_block static_pke_fnr_MCL2 = {
- .function_code = {'P', 'K'},
- .ulen = 10,
- .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
- };
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
@@ -204,11 +302,10 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
- msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
- msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
- static_pke_fnr_MCL2 : static_pke_fnr;
+ msg->fr = static_pke_fnr;
msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
@@ -219,13 +316,13 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
/**
* Convert a ICACRT message to a type6 CRT message.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @crt: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
@@ -241,11 +338,6 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
.only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
};
- static struct function_and_rules_block static_pkd_fnr_MCL2 = {
- .function_code = {'P', 'D'},
- .ulen = 10,
- .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
- };
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
@@ -272,12 +364,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
- msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
size - sizeof(msg->hdr) - sizeof(msg->cprbx);
- msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
- static_pkd_fnr_MCL2 : static_pkd_fnr;
+ msg->fr = static_pkd_fnr;
ap_msg->length = size;
return 0;
@@ -286,7 +377,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
/**
* Convert a XCRB message to a type6 CPRB message.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @xcRB: pointer to user input data
*
@@ -297,9 +388,10 @@ struct type86_fmt2_msg {
struct type86_fmt2_ext fmt2;
} __packed;
-static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ica_xcRB *xcRB)
+static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
+ struct ica_xcRB *xcRB,
+ unsigned int *fcode,
+ unsigned short **dom)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
@@ -379,6 +471,9 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
memcpy(msg->hdr.function_code, function_code,
sizeof(msg->hdr.function_code));
+ *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
+ *dom = (unsigned short *)&msg->cprbx.domain;
+
if (memcmp(function_code, "US", 2) == 0)
ap_msg->special = 1;
else
@@ -389,15 +484,15 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
copy_from_user(req_data, xcRB->request_data_address,
xcRB->request_data_length))
return -EFAULT;
+
return 0;
}
-static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ep11_urb *xcRB)
+static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
+ struct ep11_urb *xcRB,
+ unsigned int *fcode)
{
unsigned int lfmt;
-
static struct type6_hdr static_type6_ep11_hdr = {
.type = 0x06,
.rqid = {0x00, 0x01},
@@ -421,7 +516,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
unsigned char dom_tag; /* fixed value 0x4 */
unsigned char dom_len; /* fixed value 0x4 */
unsigned int dom_val; /* domain id */
- } __packed * payload_hdr;
+ } __packed * payload_hdr = NULL;
if (CEIL4(xcRB->req_len) < xcRB->req_len)
return -EINVAL; /* overflow after alignment*/
@@ -450,43 +545,30 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
return -EFAULT;
}
- /*
- The target domain field within the cprb body/payload block will be
- replaced by the usage domain for non-management commands only.
- Therefore we check the first bit of the 'flags' parameter for
- management command indication.
- 0 - non management command
- 1 - management command
- */
- if (!((msg->cprbx.flags & 0x80) == 0x80)) {
- msg->cprbx.target_id = (unsigned int)
- AP_QID_QUEUE(zdev->ap_dev->qid);
-
- if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
- switch (msg->pld_lenfmt & 0x03) {
- case 1:
- lfmt = 2;
- break;
- case 2:
- lfmt = 3;
- break;
- default:
- return -EINVAL;
- }
- } else {
- lfmt = 1; /* length format #1 */
- }
- payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
- payload_hdr->dom_val = (unsigned int)
- AP_QID_QUEUE(zdev->ap_dev->qid);
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
}
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ *fcode = payload_hdr->func_val & 0xFFFF;
+
return 0;
}
/**
* Copy results from a type 86 ICA reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
@@ -508,7 +590,7 @@ struct type86_ep11_reply {
struct ep11_cprb cprbx;
} __packed;
-static int convert_type86_ica(struct zcrypt_device *zdev,
+static int convert_type86_ica(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
@@ -556,26 +638,37 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
service_rc = msg->cprbx.ccp_rtcode;
if (unlikely(service_rc != 0)) {
service_rs = msg->cprbx.ccp_rscode;
- if (service_rc == 8 && service_rs == 66)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 65)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 770)
+ if ((service_rc == 8 && service_rs == 66) ||
+ (service_rc == 8 && service_rs == 65) ||
+ (service_rc == 8 && service_rs == 72) ||
+ (service_rc == 8 && service_rs == 770) ||
+ (service_rc == 12 && service_rs == 769)) {
+ ZCRYPT_DBF(DBF_DEBUG,
+ "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
return -EINVAL;
+ }
if (service_rc == 8 && service_rs == 783) {
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+ zq->zcard->min_mod_size =
+ PCIXCC_MIN_MOD_SIZE_OLD;
+ ZCRYPT_DBF(DBF_DEBUG,
+ "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
return -EAGAIN;
}
- if (service_rc == 12 && service_rs == 769)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 72)
- return -EINVAL;
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- msg->hdr.reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
@@ -611,13 +704,13 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
/**
* Copy results from a type 86 XCRB reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @xcRB: pointer to XCRB
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_xcrb(struct zcrypt_device *zdev,
+static int convert_type86_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
@@ -642,13 +735,13 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
/**
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @xcRB: pointer to EP11 user request block
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
+static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ep11_urb *xcRB)
{
@@ -666,7 +759,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
return 0;
}
-static int convert_type86_rng(struct zcrypt_device *zdev,
+static int convert_type86_rng(struct zcrypt_queue *zq,
struct ap_message *reply,
char *buffer)
{
@@ -683,104 +776,113 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
return msg->fmt2.count2;
}
-static int convert_response_ica(struct zcrypt_device *zdev,
+static int convert_response_ica(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type86x_reply *msg = reply->message;
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
+ switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->cprbx.ccp_rtcode &&
(msg->cprbx.ccp_rscode == 0x14f) &&
(outputdatalength > 256)) {
- if (zdev->max_exp_bit_length <= 17) {
- zdev->max_exp_bit_length = 17;
+ if (zq->zcard->max_exp_bit_length <= 17) {
+ zq->zcard->max_exp_bit_length = 17;
return -EAGAIN;
} else
return -EINVAL;
}
if (msg->hdr.reply_code)
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_ica(zdev, reply,
+ return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
-static int convert_response_xcrb(struct zcrypt_device *zdev,
+static int convert_response_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
struct type86x_reply *msg = reply->message;
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
+ switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) {
memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
}
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_xcrb(zdev, reply, xcRB);
+ return convert_type86_xcrb(zq, reply, xcRB);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
-static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
+static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply, struct ep11_urb *xcRB)
{
struct type86_ep11_reply *msg = reply->message;
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *)reply->message)[1]) {
+ switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE87_RSP_CODE:
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
- return convert_type86_ep11_xcrb(zdev, reply, xcRB);
+ return convert_type86_ep11_xcrb(zq, reply, xcRB);
/* Fall through, no break, incorrect cprb version is an unknown resp.*/
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
-static int convert_response_rng(struct zcrypt_device *zdev,
+static int convert_response_rng(struct zcrypt_queue *zq,
struct ap_message *reply,
char *data)
{
@@ -794,15 +896,19 @@ static int convert_response_rng(struct zcrypt_device *zdev,
if (msg->hdr.reply_code)
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_rng(zdev, reply, data);
+ return convert_type86_rng(zq, reply, data);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -811,11 +917,11 @@ static int convert_response_rng(struct zcrypt_device *zdev,
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP queue
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
+static void zcrypt_msgtype6_receive(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
@@ -860,11 +966,11 @@ out:
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP queue
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev,
+static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
@@ -904,11 +1010,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
-static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
+static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex)
{
struct ap_message ap_msg;
@@ -925,21 +1031,21 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
- rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
+ rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
if (rc)
goto out_free;
init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response_ica(zdev, &ap_msg,
+ rc = convert_response_ica(zq, &ap_msg,
mex->outputdata,
mex->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
@@ -948,11 +1054,11 @@ out_free:
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
-static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
+static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt)
{
struct ap_message ap_msg;
@@ -969,148 +1075,258 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
- rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
+ rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
if (rc)
goto out_free;
init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response_ica(zdev, &ap_msg,
+ rc = convert_response_ica(zq, &ap_msg,
crt->outputdata,
crt->outputdatalength);
- } else
+ } else {
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
+ }
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
+unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg,
+ unsigned int *func_code, unsigned short **dom)
+{
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ };
+ int rc;
+
+ ap_init_message(ap_msg);
+ ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->message)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private) {
+ kzfree(ap_msg->message);
+ return -ENOMEM;
+ }
+ memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+ rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+ if (rc) {
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
+ }
+ return rc;
+}
+
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a send_cprb request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @xcRB: pointer to the send_cprb request buffer
*/
-static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev,
- struct ica_xcRB *xcRB)
+static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
+ struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
- };
int rc;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
- if (rc)
- goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+ init_completion(&rtype->work);
+ ap_queue_message(zq->queue, ap_msg);
+ rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
+ rc = convert_response_xcrb(zq, ap_msg, xcRB);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
- kzfree(ap_msg.message);
+ ap_cancel_message(zq->queue, ap_msg);
+
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
+ return rc;
+}
+
+unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *func_code)
+{
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_EP11,
+ };
+ int rc;
+
+ ap_init_message(ap_msg);
+ ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->message)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive_ep11;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private) {
+ kzfree(ap_msg->message);
+ return -ENOMEM;
+ }
+ memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+ rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+ if (rc) {
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
+ }
return rc;
}
/**
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* CEX4P device to the request distributor
* @xcRB: pointer to the ep11 user request block
*/
-static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev,
- struct ep11_urb *xcrb)
+static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+ struct ep11_urb *xcrb,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_EP11,
- };
int rc;
+ unsigned int lfmt;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct {
+ struct type6_hdr hdr;
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* payload length format */
+ } __packed * msg = ap_msg->message;
+ struct pld_hdr {
+ unsigned char func_tag; /* fixed value 0x4 */
+ unsigned char func_len; /* fixed value 0x4 */
+ unsigned int func_val; /* function ID */
+ unsigned char dom_tag; /* fixed value 0x4 */
+ unsigned char dom_len; /* fixed value 0x4 */
+ unsigned int dom_val; /* domain id */
+ } __packed * payload_hdr = NULL;
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive_ep11;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb);
- if (rc)
- goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+
+ /**
+ * The target domain field within the cprb body/payload block will be
+ * replaced by the usage domain for non-management commands only.
+ * Therefore we check the first bit of the 'flags' parameter for
+ * management command indication.
+ * 0 - non management command
+ * 1 - management command
+ */
+ if (!((msg->cprbx.flags & 0x80) == 0x80)) {
+ msg->cprbx.target_id = (unsigned int)
+ AP_QID_QUEUE(zq->queue->qid);
+
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
+ }
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr->dom_val = (unsigned int)
+ AP_QID_QUEUE(zq->queue->qid);
+ }
+
+ init_completion(&rtype->work);
+ ap_queue_message(zq->queue, ap_msg);
+ rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb);
+ rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, ap_msg);
-out_free:
- kzfree(ap_msg.message);
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
return rc;
}
+unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
+ unsigned int *domain)
+{
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ };
+
+ ap_init_message(ap_msg);
+ ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->message)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private) {
+ kzfree(ap_msg->message);
+ return -ENOMEM;
+ }
+ memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+
+ rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
+
+ *func_code = HWRNG;
+ return 0;
+}
+
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @buffer: pointer to a memory page to return random data
*/
-
-static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
- char *buffer)
+static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
+ char *buffer, struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
- };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed * msg = ap_msg->message;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
int rc;
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+
+ init_completion(&rtype->work);
+ ap_queue_message(zq->queue, ap_msg);
+ rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_rng(zdev, &ap_msg, buffer);
+ rc = convert_response_rng(zq, ap_msg, buffer);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
- kfree(ap_msg.message);
+ ap_cancel_message(zq->queue, ap_msg);
+
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
return rc;
}
@@ -1145,12 +1361,11 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
.send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
};
-int __init zcrypt_msgtype6_init(void)
+void __init zcrypt_msgtype6_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
- return 0;
}
void __exit zcrypt_msgtype6_exit(void)
@@ -1159,6 +1374,3 @@ void __exit zcrypt_msgtype6_exit(void)
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
}
-
-module_init(zcrypt_msgtype6_init);
-module_exit(zcrypt_msgtype6_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 207247570623..7a0d5b57821f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -116,15 +116,28 @@ struct type86_fmt2_ext {
unsigned int offset4; /* 0x00000000 */
} __packed;
+unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
+ unsigned int *, unsigned short **);
+unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
+ unsigned int *);
+unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
+
+#define LOW 10
+#define MEDIUM 100
+#define HIGH 500
+
+int speed_idx_cca(int);
+int speed_idx_ep11(int);
+
/**
* Prepare a type6 CPRB message for random number generation
*
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
-static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
- struct ap_message *ap_msg,
- unsigned random_number_length)
+static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
+ unsigned int random_number_length,
+ unsigned int *domain)
{
struct {
struct type6_hdr hdr;
@@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
msg->hdr.FromCardLen2 = random_number_length,
msg->cprbx = local_cprbx;
msg->cprbx.rpl_datal = random_number_length,
- msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
ap_msg->length = sizeof(*msg);
+ *domain = (unsigned short)msg->cprbx.domain;
}
-int zcrypt_msgtype6_init(void);
+void zcrypt_msgtype6_init(void);
void zcrypt_msgtype6_exit(void);
#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index df8f0c4dacb7..26ceaa696765 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
+#include <linux/mod_devicetable.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
@@ -46,11 +47,6 @@
#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define PCIXCC_MCL2_SPEED_RATING 7870
-#define PCIXCC_MCL3_SPEED_RATING 7870
-#define CEX2C_SPEED_RATING 7000
-#define CEX3C_SPEED_RATING 6500
-
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -67,142 +63,34 @@ struct response_type {
#define PCIXCC_RESPONSE_TYPE_ICA 0
#define PCIXCC_RESPONSE_TYPE_XCRB 1
-static struct ap_device_id zcrypt_pcixcc_ids[] = {
- { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
-static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
-
-static struct ap_driver zcrypt_pcixcc_driver = {
- .probe = zcrypt_pcixcc_probe,
- .remove = zcrypt_pcixcc_remove,
- .ids = zcrypt_pcixcc_ids,
- .request_timeout = PCIXCC_CLEANUP_TIME,
+static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_PCIXCC,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX2C,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3C,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
};
-/**
- * Micro-code detection function. Its sends a message to a pcixcc card
- * to find out the microcode level.
- * @ap_dev: pointer to the AP device.
- */
-static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
-{
- static unsigned char msg[] = {
- 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
- 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
- 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
- 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
- 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
- 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
- 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
- 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
- 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
- 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
- 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
- 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
- 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
- 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
- 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
- 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
- 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
- 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
- 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
- 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
- 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
- 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
- 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
- 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
- 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
- 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
- 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
- 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
- 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
- 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
- 0xF1,0x3D,0x93,0x53
- };
- unsigned long long psmid;
- struct CPRBX *cprbx;
- char *reply;
- int rc, i;
-
- reply = (void *) get_zeroed_page(GFP_KERNEL);
- if (!reply)
- return -ENOMEM;
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids);
- rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
- if (rc)
- goto out_free;
-
- /* Wait for the test message to complete. */
- for (i = 0; i < 6; i++) {
- msleep(300);
- rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
- if (rc == 0 && psmid == 0x0102030405060708ULL)
- break;
- }
-
- if (i >= 6) {
- /* Got no answer. */
- rc = -ENODEV;
- goto out_free;
- }
+static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_PCIXCC,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX2C,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3C,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
+};
- cprbx = (struct CPRBX *) (reply + 48);
- if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
- rc = ZCRYPT_PCIXCC_MCL2;
- else
- rc = ZCRYPT_PCIXCC_MCL3;
-out_free:
- free_page((unsigned long) reply);
- return rc;
-}
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids);
/**
* Large random number detection function. Its sends a message to a pcixcc
@@ -211,15 +99,25 @@ out_free:
*
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/
-static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
+static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
{
struct ap_message ap_msg;
unsigned long long psmid;
+ unsigned int domain;
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *reply;
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed * msg;
int rc, i;
ap_init_message(&ap_msg);
@@ -227,8 +125,12 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
if (!ap_msg.message)
return -ENOMEM;
- rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
- rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
+ rng_type6CPRB_msgX(&ap_msg, 4, &domain);
+
+ msg = ap_msg.message;
+ msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
+
+ rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length);
if (rc)
goto out_free;
@@ -236,7 +138,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
- rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
+ rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
@@ -258,110 +160,168 @@ out_free:
}
/**
- * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
- * since the bus_match already checked the hardware type. The PCIXCC
- * cards come in two flavours: micro code level 2 and micro code level 3.
- * This is checked by sending a test message to the device.
- * @ap_dev: pointer to the AP device.
+ * Probe function for PCIXCC/CEX2C card devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
*/
-static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
+static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev;
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX2C_SPEED_IDX[] = {
+ 1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
+ static const int CEX3C_SPEED_IDX[] = {
+ 500, 700, 1400, 550, 800, 1500, 80, 10};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
int rc = 0;
- zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
- if (!zdev)
+ zc = zcrypt_card_alloc();
+ if (!zc)
return -ENOMEM;
- zdev->ap_dev = ap_dev;
- zdev->online = 1;
- switch (ap_dev->device_type) {
- case AP_DEVICE_TYPE_PCIXCC:
- rc = zcrypt_pcixcc_mcl(ap_dev);
- if (rc < 0) {
- zcrypt_device_free(zdev);
- return rc;
- }
- zdev->user_space_type = rc;
- if (rc == ZCRYPT_PCIXCC_MCL2) {
- zdev->type_string = "PCIXCC_MCL2";
- zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
- zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
- } else {
- zdev->type_string = "PCIXCC_MCL3";
- zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
- zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
- }
- break;
+ zc->card = ac;
+ ac->private = zc;
+ switch (ac->ap_dev.device_type) {
case AP_DEVICE_TYPE_CEX2C:
- zdev->user_space_type = ZCRYPT_CEX2C;
- zdev->type_string = "CEX2C";
- zdev->speed_rating = CEX2C_SPEED_RATING;
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
- zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+ zc->user_space_type = ZCRYPT_CEX2C;
+ zc->type_string = "CEX2C";
+ memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
+ sizeof(CEX2C_SPEED_IDX));
+ zc->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+ zc->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
break;
case AP_DEVICE_TYPE_CEX3C:
- zdev->user_space_type = ZCRYPT_CEX3C;
- zdev->type_string = "CEX3C";
- zdev->speed_rating = CEX3C_SPEED_RATING;
- zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
+ zc->user_space_type = ZCRYPT_CEX3C;
+ zc->type_string = "CEX3C";
+ memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
+ sizeof(CEX3C_SPEED_IDX));
+ zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
break;
default:
- goto out_free;
+ zcrypt_card_free(zc);
+ return -ENODEV;
}
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
+ if (rc) {
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+
+ return rc;
+}
- rc = zcrypt_pcixcc_rng_supported(ap_dev);
+/**
+ * This is called to remove the PCIXCC/CEX2C card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_pcixcc_card_driver = {
+ .probe = zcrypt_pcixcc_card_probe,
+ .remove = zcrypt_pcixcc_card_remove,
+ .ids = zcrypt_pcixcc_card_ids,
+};
+
+/**
+ * Probe function for PCIXCC/CEX2C queue devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
+ */
+static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq;
+ int rc;
+
+ zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ rc = zcrypt_pcixcc_rng_supported(aq);
if (rc < 0) {
- zcrypt_device_free(zdev);
+ zcrypt_queue_free(zq);
return rc;
}
if (rc)
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_DEFAULT);
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
else
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_NORNG);
- ap_device_init_reply(ap_dev, &zdev->reply);
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
- if (rc)
- goto out_free;
- return 0;
-
- out_free:
- ap_dev->private = NULL;
- zcrypt_msgtype_release(zdev->ops);
- zcrypt_device_free(zdev);
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = PCIXCC_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
+ if (rc) {
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
return rc;
}
/**
- * This is called to remove the extended PCIXCC/CEX2C driver information
- * if an AP device is removed.
+ * This is called to remove the PCIXCC/CEX2C queue driver information
+ * if an AP queue device is removed.
*/
-static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
+static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = ap_dev->private;
- struct zcrypt_ops *zops = zdev->ops;
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
- zcrypt_device_unregister(zdev);
- zcrypt_msgtype_release(zops);
+ ap_queue_remove(aq);
+ if (zq)
+ zcrypt_queue_unregister(zq);
}
+static struct ap_driver zcrypt_pcixcc_queue_driver = {
+ .probe = zcrypt_pcixcc_queue_probe,
+ .remove = zcrypt_pcixcc_queue_remove,
+ .suspend = ap_queue_suspend,
+ .resume = ap_queue_resume,
+ .ids = zcrypt_pcixcc_queue_ids,
+};
+
int __init zcrypt_pcixcc_init(void)
{
- return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_pcixcc_card_driver,
+ THIS_MODULE, "pcixcccard");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_pcixcc_queue_driver,
+ THIS_MODULE, "pcixccqueue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_pcixcc_card_driver);
+
+ return rc;
}
void zcrypt_pcixcc_exit(void)
{
- ap_driver_unregister(&zcrypt_pcixcc_driver);
+ ap_driver_unregister(&zcrypt_pcixcc_queue_driver);
+ ap_driver_unregister(&zcrypt_pcixcc_card_driver);
}
module_init(zcrypt_pcixcc_init);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
new file mode 100644
index 000000000000..a303f3b2c328
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -0,0 +1,226 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto queue devices.
+ */
+
+static ssize_t zcrypt_queue_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+}
+
+static ssize_t zcrypt_queue_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct zcrypt_card *zc = zq->zcard;
+ int online;
+
+ if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+ return -EINVAL;
+
+ if (online && !zc->online)
+ return -EINVAL;
+ zq->online = online;
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ online);
+
+ if (!online)
+ ap_flush_queue(zq->queue);
+ return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
+ zcrypt_queue_online_store);
+
+static struct attribute *zcrypt_queue_attrs[] = {
+ &dev_attr_online.attr,
+ NULL,
+};
+
+static struct attribute_group zcrypt_queue_attr_group = {
+ .attrs = zcrypt_queue_attrs,
+};
+
+void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
+{
+ zq->online = online;
+ if (!online)
+ ap_flush_queue(zq->queue);
+}
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
+{
+ struct zcrypt_queue *zq;
+
+ zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
+ if (!zq)
+ return NULL;
+ zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
+ if (!zq->reply.message)
+ goto out_free;
+ zq->reply.length = max_response_size;
+ INIT_LIST_HEAD(&zq->list);
+ kref_init(&zq->refcount);
+ return zq;
+
+out_free:
+ kfree(zq);
+ return NULL;
+}
+EXPORT_SYMBOL(zcrypt_queue_alloc);
+
+void zcrypt_queue_free(struct zcrypt_queue *zq)
+{
+ kfree(zq->reply.message);
+ kfree(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_free);
+
+static void zcrypt_queue_release(struct kref *kref)
+{
+ struct zcrypt_queue *zq =
+ container_of(kref, struct zcrypt_queue, refcount);
+ zcrypt_queue_free(zq);
+}
+
+void zcrypt_queue_get(struct zcrypt_queue *zq)
+{
+ kref_get(&zq->refcount);
+}
+EXPORT_SYMBOL(zcrypt_queue_get);
+
+int zcrypt_queue_put(struct zcrypt_queue *zq)
+{
+ return kref_put(&zq->refcount, zcrypt_queue_release);
+}
+EXPORT_SYMBOL(zcrypt_queue_put);
+
+/**
+ * zcrypt_queue_register() - Register a crypto queue device.
+ * @zq: Pointer to a crypto queue device
+ *
+ * Register a crypto queue device. Returns 0 if successful.
+ */
+int zcrypt_queue_register(struct zcrypt_queue *zq)
+{
+ struct zcrypt_card *zc;
+ int rc;
+
+ spin_lock(&zcrypt_list_lock);
+ zc = zq->queue->card->private;
+ zcrypt_card_get(zc);
+ zq->zcard = zc;
+ zq->online = 1; /* New devices are online by default. */
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
+ AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+ list_add_tail(&zq->list, &zc->zqueues);
+ zcrypt_device_count++;
+ spin_unlock(&zcrypt_list_lock);
+
+ rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ if (rc)
+ goto out;
+ get_device(&zq->queue->ap_dev.device);
+
+ if (zq->ops->rng) {
+ rc = zcrypt_rng_device_add();
+ if (rc)
+ goto out_unregister;
+ }
+ return 0;
+
+out_unregister:
+ sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ put_device(&zq->queue->ap_dev.device);
+out:
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zq->list);
+ spin_unlock(&zcrypt_list_lock);
+ zcrypt_card_put(zc);
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_queue_register);
+
+/**
+ * zcrypt_queue_unregister(): Unregister a crypto queue device.
+ * @zq: Pointer to crypto queue device
+ *
+ * Unregister a crypto queue device.
+ */
+void zcrypt_queue_unregister(struct zcrypt_queue *zq)
+{
+ struct zcrypt_card *zc;
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
+ AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+ zc = zq->zcard;
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zq->list);
+ zcrypt_device_count--;
+ spin_unlock(&zcrypt_list_lock);
+ zcrypt_card_put(zc);
+ if (zq->ops->rng)
+ zcrypt_rng_device_remove();
+ sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ put_device(&zq->queue->ap_dev.device);
+ zcrypt_queue_put(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index ad17fc5883f6..ac65f12bcd43 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1032,9 +1032,6 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
struct ctcm_priv *priv;
int max_bufsize;
- if (new_mtu < 576 || new_mtu > 65527)
- return -EINVAL;
-
priv = dev->ml_priv;
max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
@@ -1123,6 +1120,8 @@ void static ctcm_dev_setup(struct net_device *dev)
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->min_mtu = 576;
+ dev->max_mtu = 65527;
}
/*
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 251db0a02e73..211b31d9f157 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1888,7 +1888,7 @@ lcs_stop_device(struct net_device *dev)
rc = lcs_stopcard(card);
if (rc)
dev_err(&card->dev->dev,
- " Shutting down the LCS device failed\n ");
+ " Shutting down the LCS device failed\n");
return rc;
}
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index b0e8ffdf864b..2981024a2438 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -302,8 +302,7 @@ static char *netiucv_printuser(struct iucv_connection *conn)
if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
tmp_uid[8] = '\0';
tmp_udat[16] = '\0';
- memcpy(tmp_uid, conn->userid, 8);
- memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+ memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
memcpy(tmp_udat, conn->userdata, 16);
EBCASC(tmp_udat, 16);
memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
@@ -1429,27 +1428,6 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
return &priv->stats;
}
-/**
- * netiucv_change_mtu
- * @dev: Pointer to interface struct.
- * @new_mtu: The new MTU to use for this interface.
- *
- * Sets MTU of an interface.
- *
- * Returns 0 on success, -EINVAL if MTU is out of valid range.
- * (valid range is 576 .. NETIUCV_MTU_MAX).
- */
-static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
-{
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
- IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
- return -EINVAL;
- }
- dev->mtu = new_mtu;
- return 0;
-}
-
/*
* attributes in sysfs
*/
@@ -1564,21 +1542,21 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->conn->netdev;
- char *e;
- int bs1;
+ unsigned int bs1;
+ int rc;
IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= 39)
return -EINVAL;
- bs1 = simple_strtoul(buf, &e, 0);
+ rc = kstrtouint(buf, 0, &bs1);
- if (e && (!isspace(*e))) {
- IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
- *e);
+ if (rc == -EINVAL) {
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
+ buf);
return -EINVAL;
}
- if (bs1 > NETIUCV_BUFSIZE_MAX) {
+ if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
IUCV_DBF_TEXT_(setup, 2,
"buffer_write: buffer size %d too large\n",
bs1);
@@ -1987,12 +1965,13 @@ static const struct net_device_ops netiucv_netdev_ops = {
.ndo_stop = netiucv_close,
.ndo_get_stats = netiucv_stats,
.ndo_start_xmit = netiucv_tx,
- .ndo_change_mtu = netiucv_change_mtu,
};
static void netiucv_setup_netdevice(struct net_device *dev)
{
dev->mtu = NETIUCV_MTU_DEFAULT;
+ dev->min_mtu = 576;
+ dev->max_mtu = NETIUCV_MTU_MAX;
dev->destructor = netiucv_free_netdevice;
dev->hard_header_len = NETIUCV_HDRLEN;
dev->addr_len = 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 20cf29613043..e33558313834 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4202,10 +4202,6 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu)
sprintf(dbf_text, "%8x", new_mtu);
QETH_CARD_TEXT(card, 4, dbf_text);
- if (new_mtu < 64)
- return -EINVAL;
- if (new_mtu > 65535)
- return -EINVAL;
if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
(!qeth_mtu_is_valid(card, new_mtu)))
return -EINVAL;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bb27058fa9f0..9c921c2833f1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1107,6 +1107,8 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->ml_priv = card;
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
+ card->dev->min_mtu = 64;
+ card->dev->max_mtu = ETH_MAX_MTU;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->ethtool_ops =
(card->info.type != QETH_CARD_TYPE_OSN) ?
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 272d9e7419be..ac37d050e765 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3140,6 +3140,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->ml_priv = card;
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
+ card->dev->min_mtu = 64;
+ card->dev->max_mtu = ETH_MAX_MTU;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 581001989937..d5bf36ec8a75 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
/**
- * zfcp_dbf_rec_run - trace event related to running recovery
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+ * @level: trace level to be used for event
* @tag: identifier for event
* @erp: erp_action running
*/
-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
{
struct zfcp_dbf *dbf = erp->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
else
rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
- debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+ zfcp_dbf_rec_run_lvl(1, tag, erp);
+}
+
+/**
* zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
* @tag: identifier for event
* @wka_port: well known address port
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 36d07584271d..db186d44cfaf 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2015
+ * Copyright IBM Corp. 2008, 2016
*/
#ifndef ZFCP_DBF_H
@@ -283,6 +283,30 @@ struct zfcp_dbf {
struct zfcp_dbf_scsi scsi_buf;
};
+/**
+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
+ * @req: request that has been completed
+ *
+ * Returns true if FCP response with only benign residual under count.
+ */
+static inline
+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
+{
+ struct fsf_qtcb *qtcb = req->qtcb;
+ u32 fsf_stat = qtcb->header.fsf_status;
+ struct fcp_resp *fcp_rsp;
+ u8 rsp_flags, fr_status;
+
+ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
+ return false; /* not an FCP response */
+ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+ rsp_flags = fcp_rsp->fr_flags;
+ fr_status = fcp_rsp->fr_status;
+ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
+ (rsp_flags == FCP_RESID_UNDER) &&
+ (fr_status == SAM_STAT_GOOD);
+}
+
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
} else if (qtcb->header.fsf_status != FSF_GOOD) {
- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+ zfcp_dbf_hba_fsf_resp("fs_ferr",
+ zfcp_dbf_hba_fsf_resp_suppress(req)
+ ? 5 : 1, req);
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
}
+/**
+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
+ * @scmnd: SCSI command that was NULLified.
+ * @fsf_req: request that owned @scmnd.
+ */
+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
+ struct zfcp_fsf_req *fsf_req)
+{
+ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
+}
+
#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index a59d678125bd..7ccfce559034 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
}
}
+/**
+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
+ * @port: zfcp_port whose fc_rport we should try to unblock
+ */
+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
+{
+ unsigned long flags;
+ struct zfcp_adapter *adapter = port->adapter;
+ int port_status;
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct scsi_device *sdev;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ port_status = atomic_read(&port->status);
+ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
+ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
+ /* new ERP of severity >= port triggered elsewhere meanwhile or
+ * local link down (adapter erp_failed but not clear unblock)
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ spin_lock(shost->host_lock);
+ __shost_for_each_device(sdev, shost) {
+ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+ int lun_status;
+
+ if (zsdev->port != port)
+ continue;
+ /* LUN under port of interest */
+ lun_status = atomic_read(&zsdev->status);
+ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+ continue; /* unblock rport despite failed LUNs */
+ /* LUN recovery not given up yet [maybe follow-up pending] */
+ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
+ /* LUN blocked:
+ * not yet unblocked [LUN recovery pending]
+ * or meanwhile blocked [new LUN recovery triggered]
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ }
+ /* now port has no child or all children have completed recovery,
+ * and no ERP of severity >= port was meanwhile triggered elsewhere
+ */
+ zfcp_scsi_schedule_rport_register(port);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
+ zfcp_erp_try_rport_unblock(port);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
*/
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
if (result == ZFCP_ERP_SUCCEEDED)
- zfcp_scsi_schedule_rport_register(port);
+ zfcp_erp_try_rport_unblock(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c8fed9fa1cca..9afdbc32b23f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef ZFCP_EXT_H
@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+ struct zfcp_erp_action *erp);
extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
@@ -84,8 +86,8 @@ extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
-extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
-extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_exec_bsg_job(struct bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
extern unsigned int zfcp_fc_port_scan_backoff(void);
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 237688af179b..7331eea67435 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/random.h>
+#include <linux/bsg-lib.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
@@ -885,26 +886,30 @@ out_free:
static void zfcp_fc_ct_els_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
- job->job_done(job);
+ bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
}
-static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct Scsi_Host *shost;
- preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+ preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
- adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
+ adapter = (struct zfcp_adapter *) shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
@@ -924,7 +929,7 @@ static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
static void zfcp_fc_ct_job_handler(void *data)
{
- struct fc_bsg_job *job = data;
+ struct bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
@@ -933,11 +938,12 @@ static void zfcp_fc_ct_job_handler(void *data)
zfcp_fc_ct_els_job_handler(data);
}
-static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_els_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
- struct fc_rport *rport = job->rport;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct fc_bsg_request *bsg_request = job->request;
struct zfcp_port *port;
u32 d_id;
@@ -949,13 +955,13 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
d_id = port->d_id;
put_device(&port->dev);
} else
- d_id = ntoh24(job->request->rqst_data.h_els.port_id);
+ d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
-static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_ct_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
@@ -978,13 +984,15 @@ static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
return ret;
}
-int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_exec_bsg_job(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_rport *rport = fc_bsg_to_rport(job);
- shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+ shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
@@ -994,7 +1002,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
@@ -1006,7 +1014,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
}
}
-int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index be1c04b334c5..ea3c76ac0de1 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef FSF_H
@@ -78,6 +78,7 @@
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
+#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 7c2c6194dfca..703fce59befe 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -4,7 +4,7 @@
* Data structure and helper functions for tracking pending FSF
* requests.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2016
*/
#ifndef ZFCP_REQLIST_H
@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
spin_unlock_irqrestore(&rl->lock, flags);
}
+/**
+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
+ * @rl: the requestlist that contains the target requests.
+ * @f: the function to apply to each request; the first parameter of the
+ * function will be the target-request; the second parameter is the same
+ * pointer as given with the argument @data.
+ * @data: freely chosen argument; passed through to @f as second parameter.
+ *
+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
+ * table (not a 'safe' variant, so don't modify the list).
+ *
+ * Holds @rl->lock over the entire request-iteration.
+ */
+static inline void
+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
+ void (*f)(struct zfcp_fsf_req *, void *), void *data)
+{
+ struct zfcp_fsf_req *req;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ list_for_each_entry(req, &rl->buckets[i], list)
+ f(req, data);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9069f98a1817..07ffdbb5107f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
}
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
- /* This could be either
- * open LUN pending: this is temporary, will result in
- * open LUN or ERP_FAILED, so retry command
+ /* This could be
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
@@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
return retval;
}
+struct zfcp_scsi_req_filter {
+ u8 tmf_scope;
+ u32 lun_handle;
+ u32 port_handle;
+};
+
+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
+{
+ struct zfcp_scsi_req_filter *filter =
+ (struct zfcp_scsi_req_filter *)data;
+
+ /* already aborted - prevent side-effects - or not a SCSI command */
+ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+ return;
+
+ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
+ if (old_req->qtcb->header.port_handle != filter->port_handle)
+ return;
+
+ if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
+ old_req->qtcb->header.lun_handle != filter->lun_handle)
+ return;
+
+ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
+ old_req->data = NULL;
+}
+
+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
+{
+ struct zfcp_adapter *adapter = zsdev->port->adapter;
+ struct zfcp_scsi_req_filter filter = {
+ .tmf_scope = FCP_TMF_TGT_RESET,
+ .port_handle = zsdev->port->handle,
+ };
+ unsigned long flags;
+
+ if (tm_flags == FCP_TMF_LUN_RESET) {
+ filter.tmf_scope = FCP_TMF_LUN_RESET;
+ filter.lun_handle = zsdev->lun_handle;
+ }
+
+ /*
+ * abort_lock secures against other processings - in the abort-function
+ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
+ */
+ write_lock_irqsave(&adapter->abort_lock, flags);
+ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
+ &filter);
+ write_unlock_irqrestore(&adapter->abort_lock, flags);
+}
+
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
@@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
- } else
+ } else {
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+ }
zfcp_fsf_req_free(fsf_req);
return retval;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 8688ad4c825f..639ed4e6afd1 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -24,7 +24,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/bitops.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/kvm_para.h>
#include <linux/notifier.h>
@@ -235,16 +235,6 @@ static struct airq_info *new_airq_info(void)
return info;
}
-static void destroy_airq_info(struct airq_info *info)
-{
- if (!info)
- return;
-
- unregister_adapter_interrupt(&info->airq);
- airq_iv_release(info->aiv);
- kfree(info);
-}
-
static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
@@ -1294,7 +1284,6 @@ static struct ccw_device_id virtio_ids[] = {
{ CCW_DEVICE(0x3832, 0) },
{},
};
-MODULE_DEVICE_TABLE(ccw, virtio_ids);
static struct ccw_driver virtio_ccw_driver = {
.driver = {
@@ -1406,14 +1395,4 @@ static int __init virtio_ccw_init(void)
no_auto_parse();
return ccw_driver_register(&virtio_ccw_driver);
}
-module_init(virtio_ccw_init);
-
-static void __exit virtio_ccw_exit(void)
-{
- int i;
-
- ccw_driver_unregister(&virtio_ccw_driver);
- for (i = 0; i < MAX_AIRQ_AREAS; i++)
- destroy_airq_info(airq_areas[i]);
-}
-module_exit(virtio_ccw_exit);
+device_initcall(virtio_ccw_init);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b243e91..316f87fe3299 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,8 +1,8 @@
/*
3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
Note: This version of the driver does not contain a bundled firmware
image.
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 0fdc83cfa0e1..b6c208cc474f 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,8 +1,8 @@
/*
3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f8374850f714..970d8fa6bd53 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1,7 +1,7 @@
/*
3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -43,10 +43,7 @@
LSI 3ware 9750 6Gb/s SAS/SATA-RAID
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
History
-------
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index fec6449c7595..05e77d84c16d 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -1,7 +1,7 @@
/*
3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -39,10 +39,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_SAS_H
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 25aba1613e21..aa412ab02765 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,7 +1,7 @@
/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -47,10 +47,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
+
History
-------
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 6f65e663d393..69e80c1ed1ca 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,7 +1,7 @@
/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -45,7 +45,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
+
+ aradford@gmail.com
For more information, goto:
http://www.lsi.com
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e2bdb90813c..a4f6b0d95515 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,7 @@ config SCSI_SPI_ATTRS
config SCSI_FC_ATTRS
tristate "FiberChannel Transport Attributes"
depends on SCSI && NET
+ select BLK_DEV_BSGLIB
select SCSI_NETLINK
help
If you wish to export transport-specific information about
@@ -743,40 +744,18 @@ config SCSI_ISCI
control unit found in the Intel(R) C600 series chipset.
config SCSI_GENERIC_NCR5380
- tristate "Generic NCR5380/53c400 SCSI PIO support"
- depends on ISA && SCSI
+ tristate "Generic NCR5380/53c400 SCSI ISA card support"
+ depends on ISA && SCSI && HAS_IOPORT_MAP
select SCSI_SPI_ATTRS
---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using PIO. Most boards such as the Trantor T130 fit this
- category, along with a large number of ISA 8bit controllers shipped
- for free with SCSI scanners. If you have a PAS16, T128 or DMX3191
- you should select the specific driver for that card rather than
- generic 5380 support.
-
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
+ This is a driver for old ISA card SCSI controllers based on a
+ NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device.
+ Most boards such as the Trantor T130 fit this category, as do
+ various 8-bit and 16-bit ISA cards bundled with SCSI scanners.
To compile this driver as a module, choose M here: the
module will be called g_NCR5380.
-config SCSI_GENERIC_NCR5380_MMIO
- tristate "Generic NCR5380/53c400 SCSI MMIO support"
- depends on ISA && SCSI
- select SCSI_SPI_ATTRS
- ---help---
- This is a driver for the old NCR 53c80 series of SCSI controllers
- on boards using memory mapped I/O.
- It is explained in section 3.8 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/g_NCR5380.h>.
-
- To compile this driver as a module, choose M here: the
- module will be called g_NCR5380_mmio.
-
config SCSI_IPS
tristate "IBM ServeRAID support"
depends on PCI && SCSI
@@ -1254,6 +1233,7 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
+source "drivers/scsi/qedi/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 38d938d7fe67..736b77414a4b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -74,7 +74,6 @@ obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
@@ -132,6 +131,7 @@ obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_QEDI) += libiscsi.o qedi/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
@@ -173,6 +173,7 @@ hv_storvsc-y := storvsc_drv.o
sd_mod-objs := sd.o
sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
+sd_mod-$(CONFIG_BLK_DEV_ZONED) += sd_zbc.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 790babc5ef66..4f5ca794bb71 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -97,9 +97,6 @@
* and macros and include this file in your driver.
*
* These macros control options :
- * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
- * defined.
- *
* AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
* for commands that return with a CHECK CONDITION status.
*
@@ -121,14 +118,13 @@
*
* Either real DMA *or* pseudo DMA may be implemented
*
- * NCR5380_dma_write_setup(instance, src, count) - initialize
- * NCR5380_dma_read_setup(instance, dst, count) - initialize
- * NCR5380_dma_residual(instance); - residual count
+ * NCR5380_dma_xfer_len - determine size of DMA/PDMA transfer
+ * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380
+ * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory
+ * NCR5380_dma_residual - residual byte count
*
* The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID. If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
+ * after setting the appropriate host specific fields and ID.
*/
#ifndef NCR5380_io_delay
@@ -178,7 +174,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
/**
* NCR5380_poll_politely2 - wait for two chip register values
- * @instance: controller to poll
+ * @hostdata: host private data
* @reg1: 5380 register to poll
* @bit1: Bitmask to check
* @val1: Expected value
@@ -195,18 +191,14 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
* Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
*/
-static int NCR5380_poll_politely2(struct Scsi_Host *instance,
- int reg1, int bit1, int val1,
- int reg2, int bit2, int val2, int wait)
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
+ unsigned int reg1, u8 bit1, u8 val1,
+ unsigned int reg2, u8 bit2, u8 val2,
+ unsigned long wait)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned long n = hostdata->poll_loops;
unsigned long deadline = jiffies + wait;
- unsigned long n;
- /* Busy-wait for up to 10 ms */
- n = min(10000U, jiffies_to_usecs(wait));
- n *= hostdata->accesses_per_ms;
- n /= 2000;
do {
if ((NCR5380_read(reg1) & bit1) == val1)
return 0;
@@ -288,6 +280,7 @@ mrs[] = {
static void NCR5380_print(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status, data, basr, mr, icr, i;
data = NCR5380_read(CURRENT_SCSI_DATA_REG);
@@ -337,6 +330,7 @@ static struct {
static void NCR5380_print_phase(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char status;
int i;
@@ -352,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
}
#endif
-
-static int probe_irq;
-
-/**
- * probe_intr - helper for IRQ autoprobe
- * @irq: interrupt number
- * @dev_id: unused
- * @regs: unused
- *
- * Set a flag to indicate the IRQ in question was received. This is
- * used by the IRQ probe code.
- */
-
-static irqreturn_t probe_intr(int irq, void *dev_id)
-{
- probe_irq = irq;
- return IRQ_HANDLED;
-}
-
-/**
- * NCR5380_probe_irq - find the IRQ of an NCR5380
- * @instance: NCR5380 controller
- * @possible: bitmask of ISA IRQ lines
- *
- * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
- * and then looking to see what interrupt actually turned up.
- */
-
-static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
- int possible)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned long timeout;
- int trying_irqs, i, mask;
-
- for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
- trying_irqs |= mask;
-
- timeout = jiffies + msecs_to_jiffies(250);
- probe_irq = NO_IRQ;
-
- /*
- * A interrupt is triggered whenever BSY = false, SEL = true
- * and a bit set in the SELECT_ENABLE_REG is asserted on the
- * SCSI bus.
- *
- * Note that the bus is only driven when the phase control signals
- * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
- * to zero.
- */
-
- NCR5380_write(TARGET_COMMAND_REG, 0);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
-
- while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
- schedule_timeout_uninterruptible(1);
-
- NCR5380_write(SELECT_ENABLE_REG, 0);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if (trying_irqs & mask)
- free_irq(i, NULL);
-
- return probe_irq;
-}
-
/**
* NCR58380_info - report driver and host information
* @instance: relevant scsi host instance
@@ -441,14 +365,14 @@ static void prepare_info(struct Scsi_Host *instance)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
snprintf(hostdata->info, sizeof(hostdata->info),
- "%s, io_port 0x%lx, n_io_port %d, "
- "base 0x%lx, irq %d, "
+ "%s, irq %d, "
+ "io_port 0x%lx, base 0x%lx, "
"can_queue %d, cmd_per_lun %d, "
"sg_tablesize %d, this_id %d, "
"flags { %s%s%s}, "
"options { %s} ",
- instance->hostt->name, instance->io_port, instance->n_io_port,
- instance->base, instance->irq,
+ instance->hostt->name, instance->irq,
+ hostdata->io_port, hostdata->base,
instance->can_queue, instance->cmd_per_lun,
instance->sg_tablesize, instance->this_id,
hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
@@ -482,6 +406,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i;
unsigned long deadline;
+ unsigned long accesses_per_ms;
instance->max_lun = 7;
@@ -530,7 +455,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
++i;
cpu_relax();
} while (time_is_after_jiffies(deadline));
- hostdata->accesses_per_ms = i / 256;
+ accesses_per_ms = i / 256;
+ hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2;
return 0;
}
@@ -560,7 +486,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
case 3:
case 5:
shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
- NCR5380_poll_politely(instance,
+ NCR5380_poll_politely(hostdata,
STATUS_REG, SR_BSY, 0, 5 * HZ);
break;
case 2:
@@ -871,7 +797,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+ transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
hostdata->dma_len = 0;
data = (unsigned char **)&hostdata->connected->SCp.ptr;
@@ -994,7 +920,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
}
handled = 1;
} else {
- shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
+ dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n");
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -1075,7 +1001,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
*/
spin_unlock_irq(&hostdata->lock);
- err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0,
+ err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0,
INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
ICR_ARBITRATION_PROGRESS, HZ);
spin_lock_irq(&hostdata->lock);
@@ -1201,7 +1127,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
* selection.
*/
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY,
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY,
msecs_to_jiffies(250));
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
@@ -1247,7 +1173,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Wait for start of REQ/ACK handshake */
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
if (err < 0) {
shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
@@ -1318,6 +1244,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned char *phase, int *count,
unsigned char **data)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
int c = *count;
unsigned char *d = *data;
@@ -1336,7 +1263,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1381,7 +1308,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
}
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
break;
@@ -1440,6 +1367,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
static void do_reset(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance);
unsigned long flags;
local_irq_save(flags);
@@ -1462,6 +1390,7 @@ static void do_reset(struct Scsi_Host *instance)
static int do_abort(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *msgptr, phase, tmp;
int len;
int rc;
@@ -1479,7 +1408,7 @@ static int do_abort(struct Scsi_Host *instance)
* the target sees, so we just handshake.
*/
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
goto timeout;
@@ -1490,7 +1419,7 @@ static int do_abort(struct Scsi_Host *instance)
if (tmp != PHASE_MSGOUT) {
NCR5380_write(INITIATOR_COMMAND_REG,
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ);
+ rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
goto timeout;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1575,9 +1504,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* starting the NCR. This is also the cleaner way for the TT.
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/*
@@ -1609,9 +1538,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* NCR access, else the DMA setup gets trashed!
*/
if (p & SR_IO)
- result = NCR5380_dma_recv_setup(instance, d, c);
+ result = NCR5380_dma_recv_setup(hostdata, d, c);
else
- result = NCR5380_dma_send_setup(instance, d, c);
+ result = NCR5380_dma_send_setup(hostdata, d, c);
}
/* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
@@ -1678,12 +1607,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* byte.
*/
- if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ, BASR_DRQ, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
}
- if (NCR5380_poll_politely(instance, STATUS_REG,
+ if (NCR5380_poll_politely(hostdata, STATUS_REG,
SR_REQ, 0, HZ) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
@@ -1694,7 +1623,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* Wait for the last byte to be sent. If REQ is being asserted for
* the byte we're interested, we'll ACK it and it will go false.
*/
- if (NCR5380_poll_politely2(instance,
+ if (NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
result = -1;
@@ -1751,22 +1680,26 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
#ifdef CONFIG_SUN3
- if (phase == PHASE_CMDOUT) {
- void *d;
- unsigned long count;
+ if (phase == PHASE_CMDOUT &&
+ sun3_dma_setup_done != cmd) {
+ int count;
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- count = cmd->SCp.buffer->length;
- d = sg_virt(cmd->SCp.buffer);
- } else {
- count = cmd->SCp.this_residual;
- d = cmd->SCp.ptr;
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
- if (sun3_dma_setup_done != cmd &&
- sun3scsi_dma_xfer_len(count, cmd) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(cmd->request));
+ count = sun3scsi_dma_xfer_len(hostdata, cmd);
+
+ if (count > 0) {
+ if (rq_data_dir(cmd->request))
+ sun3scsi_dma_send_setup(hostdata,
+ cmd->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ cmd->SCp.ptr, count);
sun3_dma_setup_done = cmd;
}
#ifdef SUN3_SCSI_VME
@@ -1827,7 +1760,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
transfersize = 0;
if (!cmd->device->borken)
- transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
+ transfersize = NCR5380_dma_xfer_len(hostdata, cmd);
if (transfersize > 0) {
len = transfersize;
@@ -2073,7 +2006,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
} /* switch(phase) */
} else {
spin_unlock_irq(&hostdata->lock);
- NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
}
}
@@ -2119,7 +2052,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
*/
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
@@ -2130,7 +2063,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
* Wait for target to go into MSGIN.
*/
- if (NCR5380_poll_politely(instance,
+ if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
do_abort(instance);
return;
@@ -2204,22 +2137,25 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
}
#ifdef CONFIG_SUN3
- {
- void *d;
- unsigned long count;
+ if (sun3_dma_setup_done != tmp) {
+ int count;
if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
- count = tmp->SCp.buffer->length;
- d = sg_virt(tmp->SCp.buffer);
- } else {
- count = tmp->SCp.this_residual;
- d = tmp->SCp.ptr;
+ ++tmp->SCp.buffer;
+ --tmp->SCp.buffers_residual;
+ tmp->SCp.this_residual = tmp->SCp.buffer->length;
+ tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
}
- if (sun3_dma_setup_done != tmp &&
- sun3scsi_dma_xfer_len(count, tmp) > 0) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(tmp->request));
+ count = sun3scsi_dma_xfer_len(hostdata, tmp);
+
+ if (count > 0) {
+ if (rq_data_dir(tmp->request))
+ sun3scsi_dma_send_setup(hostdata,
+ tmp->SCp.ptr, count);
+ else
+ sun3scsi_dma_recv_setup(hostdata,
+ tmp->SCp.ptr, count);
sun3_dma_setup_done = tmp;
}
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 965d92339455..51a3567a6fb2 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -199,16 +199,6 @@
#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
-/*
- * These are "special" values for the irq and dma_channel fields of the
- * Scsi_Host structure
- */
-
-#define DMA_NONE 255
-#define IRQ_AUTO 254
-#define DMA_AUTO 254
-#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
-
#ifndef NO_IRQ
#define NO_IRQ 0
#endif
@@ -219,27 +209,32 @@
#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */
struct NCR5380_hostdata {
- NCR5380_implementation_fields; /* implementation specific */
- struct Scsi_Host *host; /* Host backpointer */
- unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
- unsigned char busy[8]; /* index = target, bit = lun */
- int dma_len; /* requested length of DMA */
- unsigned char last_message; /* last message OUT */
- struct scsi_cmnd *connected; /* currently connected cmnd */
- struct scsi_cmnd *selecting; /* cmnd to be connected */
- struct list_head unissued; /* waiting to be issued */
- struct list_head autosense; /* priority issue queue */
- struct list_head disconnected; /* waiting for reconnect */
- spinlock_t lock; /* protects this struct */
- int flags;
- struct scsi_eh_save ses;
- struct scsi_cmnd *sensing;
+ NCR5380_implementation_fields; /* Board-specific data */
+ u8 __iomem *io; /* Remapped 5380 address */
+ u8 __iomem *pdma_io; /* Remapped PDMA address */
+ unsigned long poll_loops; /* Register polling limit */
+ spinlock_t lock; /* Protects this struct */
+ struct scsi_cmnd *connected; /* Currently connected cmnd */
+ struct list_head disconnected; /* Waiting for reconnect */
+ struct Scsi_Host *host; /* SCSI host backpointer */
+ struct workqueue_struct *work_q; /* SCSI host work queue */
+ struct work_struct main_task; /* Work item for main loop */
+ int flags; /* Board-specific quirks */
+ int dma_len; /* Requested length of DMA */
+ int read_overruns; /* Transfer size reduction for DMA erratum */
+ unsigned long io_port; /* Device IO port */
+ unsigned long base; /* Device base address */
+ struct list_head unissued; /* Waiting to be issued */
+ struct scsi_cmnd *selecting; /* Cmnd to be connected */
+ struct list_head autosense; /* Priority cmnd queue */
+ struct scsi_cmnd *sensing; /* Cmnd needing autosense */
+ struct scsi_eh_save ses; /* Cmnd state saved for EH */
+ unsigned char busy[8]; /* Index = target, bit = lun */
+ unsigned char id_mask; /* 1 << Host ID */
+ unsigned char id_higher_mask; /* All bits above id_mask */
+ unsigned char last_message; /* Last Message Out */
+ unsigned long region_size; /* Size of address/port range */
char info[256];
- int read_overruns; /* number of bytes to cut from a
- * transfer to handle chip overruns */
- struct work_struct main_task;
- struct workqueue_struct *work_q;
- unsigned long accesses_per_ms; /* chip register accesses per ms */
};
#ifdef __KERNEL__
@@ -252,6 +247,9 @@ struct NCR5380_cmd {
#define NCR5380_PIO_CHUNK_SIZE 256
+/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
+#define NCR5380_REG_POLL_TIME 15
+
static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
{
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -282,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
#define NCR5380_dprint_phase(flg, arg) do {} while (0)
#endif
-static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
static int NCR5380_init(struct Scsi_Host *instance, int flags);
static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
static void NCR5380_exit(struct Scsi_Host *instance);
@@ -294,14 +291,45 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
+ unsigned int, u8, u8,
+ unsigned int, u8, u8, unsigned long);
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
- int reg, int bit, int val, int wait)
+static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 bit, u8 val,
+ unsigned long wait)
{
- return NCR5380_poll_politely2(instance, reg, bit, val,
+ if ((NCR5380_read(reg) & bit) == val)
+ return 0;
+
+ return NCR5380_poll_politely2(hostdata, reg, bit, val,
reg, bit, val, wait);
}
+static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *,
+ struct scsi_cmnd *);
+static int NCR5380_dma_send_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *,
+ unsigned char *, int);
+static int NCR5380_dma_residual(struct NCR5380_hostdata *);
+
+static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return 0;
+}
+
+static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
+{
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 969c312de1be..f059c14efa0c 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1246,7 +1246,6 @@ struct aac_dev
u32 max_msix; /* max. MSI-X vectors */
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
- struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
u8 adapter_shutdown;
u32 handle_pci_error;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 341ea327ae79..4f56b1003cc7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -378,16 +378,12 @@ void aac_define_int_mode(struct aac_dev *dev)
if (msi_count > AAC_MAX_MSIX)
msi_count = AAC_MAX_MSIX;
- for (i = 0; i < msi_count; i++)
- dev->msixentry[i].entry = i;
-
if (msi_count > 1 &&
pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
min_msix = 2;
- i = pci_enable_msix_range(dev->pdev,
- dev->msixentry,
- min_msix,
- msi_count);
+ i = pci_alloc_irq_vectors(dev->pdev,
+ min_msix, msi_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (i > 0) {
dev->msi_enabled = 1;
msi_count = i;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0aeecec1f5ea..9e7551fe4b19 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2043,30 +2043,22 @@ int aac_acquire_irq(struct aac_dev *dev)
int i;
int j;
int ret = 0;
- int cpu;
- cpu = cpumask_first(cpu_online_mask);
if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++) {
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
dev->name, dev->id, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
ret = -1;
}
- if (irq_set_affinity_hint(dev->msixentry[i].vector,
- get_cpu_mask(cpu))) {
- printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
dev->aac_msix[0].vector_no = 0;
@@ -2096,16 +2088,9 @@ void aac_free_irq(struct aac_dev *dev)
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) {
if (dev->max_msix > 1) {
- for (i = 0; i < dev->max_msix; i++) {
- if (irq_set_affinity_hint(
- dev->msixentry[i].vector, NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- dev->name, dev->id, cpu);
- }
- cpu = cpumask_next(cpu, cpu_online_mask);
- free_irq(dev->msixentry[i].vector,
- &(dev->aac_msix[i]));
- }
+ for (i = 0; i < dev->max_msix; i++)
+ free_irq(pci_irq_vector(dev->pdev, i),
+ &(dev->aac_msix[i]));
} else {
free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 79871f3519ff..3ecbf20ca29f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
- { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
@@ -1071,7 +1069,6 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
int i;
- int cpu;
aac_send_shutdown(aac);
@@ -1087,24 +1084,13 @@ static void __aac_shutdown(struct aac_dev * aac)
kthread_stop(aac->thread);
}
aac_adapter_disable_int(aac);
- cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
aac->pdev->device == PMC_DEVICE_S7 ||
aac->pdev->device == PMC_DEVICE_S8 ||
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
- if (irq_set_affinity_hint(
- aac->msixentry[i].vector,
- NULL)) {
- printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
- aac->name,
- aac->id,
- cpu);
- }
- cpu = cpumask_next(cpu,
- cpu_online_mask);
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
}
} else {
@@ -1350,7 +1336,7 @@ static void aac_release_resources(struct aac_dev *aac)
aac->pdev->device == PMC_DEVICE_S9) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++)
- free_irq(aac->msixentry[i].vector,
+ free_irq(pci_irq_vector(aac->pdev, i),
&(aac->aac_msix[i]));
} else {
free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
@@ -1396,13 +1382,13 @@ static int aac_acquire_resources(struct aac_dev *dev)
dev->aac_msix[i].vector_no = i;
dev->aac_msix[i].dev = dev;
- if (request_irq(dev->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(dev->pdev, i),
dev->a_ops.adapter_intr,
0, "aacraid", &(dev->aac_msix[i]))) {
printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
name, instance, i);
for (j = 0 ; j < i ; j++)
- free_irq(dev->msixentry[j].vector,
+ free_irq(pci_irq_vector(dev->pdev, j),
&(dev->aac_msix[j]));
pci_disable_msix(dev->pdev);
goto error_iounmap;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83e2ecd..81dd0927246b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -11030,6 +11030,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
ASC_DBG(2, "AdvInitGetConfig()\n");
ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
+#else
+ share_irq = 0;
+ ret = -ENODEV;
#endif /* CONFIG_PCI */
}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 2e3117aa382f..21ac265280bf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -254,7 +254,7 @@ main(int argc, char *argv[])
argv += optind;
if (argc != 1) {
- fprintf(stderr, "%s: No input file specifiled\n", appname);
+ fprintf(stderr, "%s: No input file specified\n", appname);
usage();
/* NOTREACHED */
}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7c713f797535..f2671a8fa7e3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -228,8 +228,11 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha)
bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
- if (!asd_ha->seq.tc_index_bitmap)
+ if (!asd_ha->seq.tc_index_bitmap) {
+ kfree(asd_ha->seq.tc_index_array);
+ asd_ha->seq.tc_index_array = NULL;
return -ENOMEM;
+ }
spin_lock_init(&seq->tc_index_lock);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index cf99f8cf4cdd..a254b32eba39 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -629,7 +629,6 @@ struct AdapterControlBlock
struct pci_dev * pdev;
struct Scsi_Host * host;
unsigned long vir2phy_offset;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
/* Offset is used in making arc cdb physical to virtual calculations */
uint32_t outbound_int_enable;
uint32_t cdb_phyaddr_hi32;
@@ -671,8 +670,6 @@ struct AdapterControlBlock
/* iop init */
#define ACB_F_ABORT 0x0200
#define ACB_F_FIRMWARE_TRAP 0x0400
- #define ACB_F_MSI_ENABLED 0x1000
- #define ACB_F_MSIX_ENABLED 0x2000
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
struct list_head ccb_free_list;
@@ -725,7 +722,7 @@ struct AdapterControlBlock
atomic_t rq_map_token;
atomic_t ante_token_value;
uint32_t maxOutstanding;
- int msix_vector_count;
+ int vector_count;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f0cfb0451757..9e45749d55ed 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -720,51 +720,39 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
static int
arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
{
- int i, j, r;
- struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
-
- for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
- entries[i].entry = i;
- r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
- if (r < 0)
- goto msi_int;
- acb->msix_vector_count = r;
- for (i = 0; i < r; i++) {
- if (request_irq(entries[i].vector,
- arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+ unsigned long flags;
+ int nvec, i;
+
+ nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
+ PCI_IRQ_MSIX);
+ if (nvec > 0) {
+ pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+ flags = 0;
+ } else {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvec < 1)
+ return FAILED;
+
+ flags = IRQF_SHARED;
+ }
+
+ acb->vector_count = nvec;
+ for (i = 0; i < nvec; i++) {
+ if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
+ flags, "arcmsr", acb)) {
pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, entries[i].vector);
- for (j = 0 ; j < i ; j++)
- free_irq(entries[j].vector, acb);
- pci_disable_msix(pdev);
- goto msi_int;
+ acb->host->host_no, pci_irq_vector(pdev, i));
+ goto out_free_irq;
}
- acb->entries[i] = entries[i];
- }
- acb->acb_flags |= ACB_F_MSIX_ENABLED;
- pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
- return SUCCESS;
-msi_int:
- if (pci_enable_msi_exact(pdev, 1) < 0)
- goto legacy_int;
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq =%d failed!\n",
- acb->host->host_no, pdev->irq);
- pci_disable_msi(pdev);
- goto legacy_int;
- }
- acb->acb_flags |= ACB_F_MSI_ENABLED;
- pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
- return SUCCESS;
-legacy_int:
- if (request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_SHARED, "arcmsr", acb)) {
- pr_warn("arcmsr%d: request_irq = %d failed!\n",
- acb->host->host_no, pdev->irq);
- return FAILED;
}
+
return SUCCESS;
+out_free_irq:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
+ return FAILED;
}
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -886,15 +874,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
{
int i;
- if (acb->acb_flags & ACB_F_MSI_ENABLED) {
- free_irq(pdev->irq, acb);
- pci_disable_msi(pdev);
- } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
- for (i = 0; i < acb->msix_vector_count; i++)
- free_irq(acb->entries[i].vector, acb);
- pci_disable_msix(pdev);
- } else
- free_irq(pdev->irq, acb);
+ for (i = 0; i < acb->vector_count; i++)
+ free_irq(pci_irq_vector(pdev, i), acb);
+ pci_free_irq_vectors(pdev);
}
static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8e9cfe8f22f5..a87b99c7fb9a 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -14,49 +14,48 @@
#include <scsi/scsi_host.h>
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) cumanascsi_read(instance, reg)
-#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value)
+#define NCR5380_read(reg) cumanascsi_read(hostdata, reg)
+#define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len
#define NCR5380_dma_recv_setup cumanascsi_pread
#define NCR5380_dma_send_setup cumanascsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr cumanascsi_intr
#define NCR5380_queue_command cumanascsi_queue_command
#define NCR5380_info cumanascsi_info
#define NCR5380_implementation_fields \
- unsigned ctrl; \
- void __iomem *base; \
- void __iomem *dma
+ unsigned ctrl
-#include "../NCR5380.h"
+struct NCR5380_hostdata;
+static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int);
+static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8);
-void cumanascsi_setup(char *str, int *ints)
-{
-}
+#include "../NCR5380.h"
#define CTRL 0x16fc
#define STAT 0x2004
#define L(v) (((v)<<16)|((v) & 0x0000ffff))
#define H(v) (((v)>>16)|((v) & 0xffff0000))
-static inline int cumanascsi_pwrite(struct Scsi_Host *host,
+static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x02, priv(host)->base + CTRL);
+ writeb(0x02, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
unsigned long v;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -75,12 +74,12 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x12, priv(host)->base + CTRL);
+ writeb(0x12, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -90,7 +89,7 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -101,27 +100,28 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static inline int cumanascsi_pread(struct Scsi_Host *host,
+static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
unsigned long *laddr;
- void __iomem *dma = priv(host)->dma + 0x2000;
+ u8 __iomem *base = hostdata->io;
+ u8 __iomem *dma = hostdata->pdma_io + 0x2000;
if(!len) return 0;
- writeb(0x00, priv(host)->base + CTRL);
+ writeb(0x00, base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
@@ -140,12 +140,12 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
addr = (unsigned char *)laddr;
- writeb(0x10, priv(host)->base + CTRL);
+ writeb(0x10, base + CTRL);
while(len > 0)
{
unsigned int status;
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -155,7 +155,7 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
break;
}
- status = readb(priv(host)->base + STAT);
+ status = readb(base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
@@ -166,37 +166,45 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
}
}
end:
- writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ writeb(hostdata->ctrl | 0x40, base + CTRL);
if (len)
return -1;
return 0;
}
-static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
+static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+{
+ return cmd->transfersize;
+}
+
+static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata,
+ unsigned int reg)
{
- void __iomem *base = priv(host)->base;
- unsigned char val;
+ u8 __iomem *base = hostdata->io;
+ u8 val;
writeb(0, base + CTRL);
val = readb(base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
return val;
}
-static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
+static void cumanascsi_write(struct NCR5380_hostdata *hostdata,
+ unsigned int reg, u8 value)
{
- void __iomem *base = priv(host)->base;
+ u8 __iomem *base = hostdata->io;
writeb(0, base + CTRL);
writeb(value, base + 0x2100 + (reg << 2));
- priv(host)->ctrl = 0x40;
+ hostdata->ctrl = 0x40;
writeb(0x40, base + CTRL);
}
@@ -235,11 +243,11 @@ static int cumanascsi1_probe(struct expansion_card *ec,
goto out_release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
- ecard_resource_len(ec, ECARD_RES_IOCSLOW));
- priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base || !priv(host)->dma) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
+ ecard_resource_len(ec, ECARD_RES_IOCSLOW));
+ priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io || !priv(host)->pdma_io) {
ret = -ENOMEM;
goto out_unmap;
}
@@ -253,7 +261,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
NCR5380_maybe_reset_bus(host);
priv(host)->ctrl = 0;
- writeb(0, priv(host)->base + CTRL);
+ writeb(0, priv(host)->io + CTRL);
ret = request_irq(host->irq, cumanascsi_intr, 0,
"CumanaSCSI-1", host);
@@ -275,8 +283,8 @@ static int cumanascsi1_probe(struct expansion_card *ec,
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
+ iounmap(priv(host)->io);
+ iounmap(priv(host)->pdma_io);
scsi_host_put(host);
out_release:
ecard_release_resources(ec);
@@ -287,15 +295,17 @@ static int cumanascsi1_probe(struct expansion_card *ec,
static void cumanascsi1_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
+ void __iomem *dma = priv(host)->pdma_io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
free_irq(host->irq, host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
- iounmap(priv(host)->dma);
scsi_host_put(host);
+ iounmap(base);
+ iounmap(dma);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index a396024a3cae..6be6666534d4 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -16,21 +16,18 @@
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) \
- readb(priv(instance)->base + ((reg) << 2))
-#define NCR5380_write(reg, value) \
- writeb(value, priv(instance)->base + ((reg) << 2))
+#define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2))
+#define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
#define NCR5380_dma_recv_setup oakscsi_pread
#define NCR5380_dma_send_setup oakscsi_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_queue_command oakscsi_queue_command
#define NCR5380_info oakscsi_info
-#define NCR5380_implementation_fields \
- void __iomem *base
+#define NCR5380_implementation_fields /* none */
#include "../NCR5380.h"
@@ -40,10 +37,10 @@
#define STAT ((128 + 16) << 2)
#define DATA ((128 + 8) << 2)
-static inline int oakscsi_pwrite(struct Scsi_Host *instance,
+static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
printk("writing %p len %d\n",addr, len);
@@ -55,10 +52,11 @@ printk("writing %p len %d\n",addr, len);
return 0;
}
-static inline int oakscsi_pread(struct Scsi_Host *instance,
+static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *addr, int len)
{
- void __iomem *base = priv(instance)->base;
+ u8 __iomem *base = hostdata->io;
+
printk("reading %p len %d\n", addr, len);
while(len > 0)
{
@@ -133,15 +131,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
goto release;
}
- priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
- ecard_resource_len(ec, ECARD_RES_MEMC));
- if (!priv(host)->base) {
+ priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->io) {
ret = -ENOMEM;
goto unreg;
}
host->irq = NO_IRQ;
- host->n_io_port = 255;
ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -159,7 +156,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
out_exit:
NCR5380_exit(host);
out_unmap:
- iounmap(priv(host)->base);
+ iounmap(priv(host)->io);
unreg:
scsi_host_put(host);
release:
@@ -171,13 +168,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
static void oakscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
+ void __iomem *base = priv(host)->io;
ecard_set_drvdata(ec, NULL);
scsi_remove_host(host);
NCR5380_exit(host);
- iounmap(priv(host)->base);
scsi_host_put(host);
+ iounmap(base);
ecard_release_resources(ec);
}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a59ad94ea52b..105b35393ce9 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -57,6 +57,9 @@
#define NCR5380_implementation_fields /* none */
+static u8 (*atari_scsi_reg_read)(unsigned int);
+static void (*atari_scsi_reg_write)(unsigned int, u8);
+
#define NCR5380_read(reg) atari_scsi_reg_read(reg)
#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
@@ -64,14 +67,10 @@
#define NCR5380_abort atari_scsi_abort
#define NCR5380_info atari_scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_send_setup(instance, data, count) \
- atari_scsi_dma_setup(instance, data, count, 1)
-#define NCR5380_dma_residual(instance) \
- atari_scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+#define NCR5380_dma_xfer_len atari_scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup atari_scsi_dma_recv_setup
+#define NCR5380_dma_send_setup atari_scsi_dma_send_setup
+#define NCR5380_dma_residual atari_scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
#define NCR5380_release_dma_irq(instance) falcon_release_lock()
@@ -126,9 +125,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
static void atari_scsi_fetch_restbytes(void);
-static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
-static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
-
static unsigned long atari_dma_residual, atari_dma_startaddr;
static short atari_dma_active;
/* pointer to the dribble buffer */
@@ -457,15 +453,14 @@ static int __init atari_scsi_setup(char *str)
__setup("atascsi=", atari_scsi_setup);
#endif /* !MODULE */
-
-static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
+static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata,
void *data, unsigned long count,
int dir)
{
unsigned long addr = virt_to_phys(data);
- dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
- "dir = %d\n", instance->host_no, data, addr, count, dir);
+ dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n",
+ hostdata->host->host_no, data, addr, count, dir);
if (!IS_A_TT() && !STRAM_ADDR(addr)) {
/* If we have a non-DMAable address on a Falcon, use the dribble
@@ -522,8 +517,19 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
return count;
}
+static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return atari_scsi_dma_setup(hostdata, data, count, 1);
+}
-static long atari_scsi_dma_residual(struct Scsi_Host *instance)
+static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return atari_dma_residual;
}
@@ -564,10 +570,11 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
* the overrun problem, so this question is academic :-)
*/
-static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd, int write_flag)
+static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
- unsigned long possible_len, limit;
+ int wanted_len = cmd->SCp.this_residual;
+ int possible_len, limit;
if (wanted_len < DMA_MIN_SIZE)
return 0;
@@ -604,7 +611,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes.
*/
- if (write_flag) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
/* Write operation can always use the DMA, but the transfer size must
* be rounded up to the next multiple of 512 (atari_dma_setup() does
* this).
@@ -644,8 +651,8 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
possible_len = limit;
if (possible_len != wanted_len)
- dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
- "instead of %ld\n", possible_len, wanted_len);
+ dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n",
+ possible_len, wanted_len);
return possible_len;
}
@@ -658,26 +665,38 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
* NCR5380_write call these functions via function pointers.
*/
-static unsigned char atari_scsi_tt_reg_read(unsigned char reg)
+static u8 atari_scsi_tt_reg_read(unsigned int reg)
{
return tt_scsi_regp[reg * 2];
}
-static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_tt_reg_write(unsigned int reg, u8 value)
{
tt_scsi_regp[reg * 2] = value;
}
-static unsigned char atari_scsi_falcon_reg_read(unsigned char reg)
+static u8 atari_scsi_falcon_reg_read(unsigned int reg)
{
- dma_wd.dma_mode_status= (u_short)(0x88 + reg);
- return (u_char)dma_wd.fdc_acces_seccount;
+ unsigned long flags;
+ u8 result;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
+ result = (u8)dma_wd.fdc_acces_seccount;
+ local_irq_restore(flags);
+ return result;
}
-static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
{
- dma_wd.dma_mode_status = (u_short)(0x88 + reg);
+ unsigned long flags;
+
+ reg += 0x88;
+ local_irq_save(flags);
+ dma_wd.dma_mode_status = (u_short)reg;
dma_wd.fdc_acces_seccount = (u_short)value;
+ local_irq_restore(flags);
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d9239c2d49b1..b5112d6d7e73 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3049,8 +3049,10 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq_vaddress = pci_alloc_consistent(phba->pcidev,
num_eq_pages * PAGE_SIZE,
&paddr);
- if (!eq_vaddress)
+ if (!eq_vaddress) {
+ ret = -ENOMEM;
goto create_eq_error;
+ }
mem->va = eq_vaddress;
ret = be_fill_queue(eq, phba->params.num_eq_entries,
@@ -3113,8 +3115,10 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
cq_vaddress = pci_alloc_consistent(phba->pcidev,
num_cq_pages * PAGE_SIZE,
&paddr);
- if (!cq_vaddress)
+ if (!cq_vaddress) {
+ ret = -ENOMEM;
goto create_cq_error;
+ }
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 713745da44c6..0f9fab770339 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -111,20 +111,24 @@ struct bfa_meminfo_s {
struct bfa_mem_kva_s kva_info;
};
-/* BFA memory segment setup macros */
-#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
- ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
- &(_meminfo)->dma_info.qe); \
-} while (0)
+/* BFA memory segment setup helpers */
+static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_dma_s *dm_ptr,
+ size_t seg_sz)
+{
+ dm_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
+}
-#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
- ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
- if (_seg_sz) \
- list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
- &(_meminfo)->kva_info.qe); \
-} while (0)
+static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
+ struct bfa_mem_kva_s *kva_ptr,
+ size_t seg_sz)
+{
+ kva_ptr->mem_len = seg_sz;
+ if (seg_sz)
+ list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
+}
/* BFA dma memory segments iterator */
#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d1ad0208dfe7..a9a00169ad91 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3130,11 +3130,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
}
static int
-bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+bfad_im_bsg_vendor_request(struct bsg_job *job)
{
- uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
struct request_queue *request_q = job->req->q;
void *payload_kbuf;
@@ -3175,18 +3176,19 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
/* Fill the BSG job reply data */
job->reply_len = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
- job->reply->result = rc;
+ bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ bsg_reply->result = rc;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
error:
/* free the command buffer */
kfree(payload_kbuf);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->reply_len = sizeof(uint32_t);
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
return rc;
}
@@ -3312,7 +3314,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
}
int
-bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
bfa_bsg_fcpt_t *bsg_fcpt)
{
struct bfa_fcxp_s *hal_fcxp;
@@ -3352,28 +3354,29 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
}
int
-bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
- struct bfad_im_port_s *im_port =
- (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
bfa_bsg_fcpt_t *bsg_fcpt;
struct bfad_fcxp *drv_fcxp;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
- uint32_t command_type = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = bsg_request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t command_type = bsg_request->msgcode;
unsigned long flags;
struct bfad_buf_info *rsp_buf_info;
void *req_kbuf = NULL, *rsp_kbuf = NULL;
int rc = -EINVAL;
job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* Get the payload passed in from userspace */
- bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
- sizeof(struct fc_bsg_request));
+ bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
+ sizeof(struct fc_bsg_request));
if (bsg_data == NULL)
goto out;
@@ -3517,13 +3520,13 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
/* fill the job->reply data */
if (drv_fcxp->req_status == BFA_STATUS_OK) {
job->reply_len = drv_fcxp->rsp_len;
- job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
- job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
} else {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
job->reply_len = sizeof(uint32_t);
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
FC_CTELS_STATUS_REJECT;
}
@@ -3549,20 +3552,23 @@ out_free_mem:
kfree(bsg_fcpt);
kfree(drv_fcxp);
out:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == BFA_STATUS_OK)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
int
-bfad_im_bsg_request(struct fc_bsg_job *job)
+bfad_im_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t rc = BFA_STATUS_OK;
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_VENDOR:
/* Process BSG HST Vendor requests */
rc = bfad_im_bsg_vendor_request(job);
@@ -3575,8 +3581,8 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
rc = bfad_im_bsg_els_ct_request(job);
break;
default:
- job->reply->result = rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = rc = -EINVAL;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
}
@@ -3584,7 +3590,7 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
}
int
-bfad_im_bsg_timeout(struct fc_bsg_job *job)
+bfad_im_bsg_timeout(struct bsg_job *job)
{
/* Don't complete the BSG job request - return -EAGAIN
* to reset bsg job timeout : for ELS/CT pass thru we
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 836fdc221edd..c81ec2a77ef5 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -166,8 +166,8 @@ extern struct device_attribute *bfad_im_vport_attrs[];
irqreturn_t bfad_intx(int irq, void *dev_id);
-int bfad_im_bsg_request(struct fc_bsg_job *job);
-int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+int bfad_im_bsg_request(struct bsg_job *job);
+int bfad_im_bsg_timeout(struct bsg_job *job);
/*
* Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb6156f14..0990130821fa 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -970,7 +970,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
sizeof(struct libfc_function_template));
fc_elsct_init(lport);
fc_exch_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fc_disc_config(lport, lport);
return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 08ec318afb99..739bfb62aff6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -80,7 +80,6 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt,
struct fc_rport_priv *rdata)
{
- struct fc_lport *lport = rdata->local_port;
struct fc_rport *rport = rdata->rport;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
@@ -160,7 +159,7 @@ ofld_err:
tgt_init_err:
if (tgt->fcoe_conn_id != -1)
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 0039bebaa9e2..9a2fdc305cf2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -85,6 +85,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *);
static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.lro = false,
.add = t4_uld_add,
@@ -188,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -231,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -259,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
+ } else {
+ struct cpl_t6_act_open_req *req =
+ (struct cpl_t6_act_open_req *)skb->head;
+ u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_atid));
+ req->local_port = csk->saddr.sin_port;
+ req->peer_port = csk->daddr.sin_port;
+ req->local_ip = csk->saddr.sin_addr.s_addr;
+ req->peer_ip = csk->daddr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be64(FILTER_TUPLE_V(
+ cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+ req->rsvd = cpu_to_be32(isn);
+
+ opt2 |= T5_ISS_VALID;
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+ csk, &req->local_ip, ntohs(req->local_port),
+ &req->peer_ip, ntohs(req->peer_port),
+ csk->atid, csk->rss_qid);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
- (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+ (&csk->saddr), (&csk->daddr),
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
csk->state, csk->flags, csk->atid, csk->rss_qid);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
@@ -275,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -293,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F |
- RX_FC_DISABLE_F |
RSS_QUEUE_V(csk->rss_qid);
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
struct cpl_act_open_req6 *req =
(struct cpl_act_open_req6 *)skb->head;
@@ -321,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be32(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t));
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req6 *req =
(struct cpl_t5_act_open_req6 *)skb->head;
@@ -344,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
+ } else {
+ struct cpl_t6_act_open_req6 *req =
+ (struct cpl_t6_act_open_req6 *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_atid));
+ req->local_port = csk->saddr6.sin6_port;
+ req->peer_port = csk->daddr6.sin6_port;
+ req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+ req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+ 8);
+ req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+ req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+ 8);
+ req->opt0 = cpu_to_be64(opt0);
+
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+
+ req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
- t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
+ csk->flags, csk->atid,
&csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
&csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
csk->rss_qid);
@@ -741,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
(&csk->saddr), (&csk->daddr),
atid, tid, csk, csk->state, csk->flags, rcv_isn);
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
cxgbi_sock_get(csk);
csk->tid = tid;
@@ -890,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
if (is_neg_adv(status))
goto rel_skb;
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
@@ -1172,6 +1232,101 @@ rel_skb:
__kfree_skb(skb);
}
+static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *lskb;
+ u32 tid = GET_TID(cpl);
+ u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, skb,
+ skb->len, pdu_len_ddp);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
+ cxgbi_skcb_flags(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*cpl));
+ __pskb_trim(skb, ntohs(cpl->len));
+
+ if (!csk->skb_ulp_lhdr)
+ csk->skb_ulp_lhdr = skb;
+
+ lskb = csk->skb_ulp_lhdr;
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
+ csk, csk->state, csk->flags, skb, lskb);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ spin_unlock_bh(&csk->lock);
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
+ struct sk_buff *skb, u32 ddpvld)
+{
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
+ }
+
+ if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
+ !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
+ }
+}
+
static void do_rx_data_ddp(struct cxgbi_device *cdev,
struct sk_buff *skb)
{
@@ -1181,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
- unsigned int status = ntohl(rpl->ddpvld);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
@@ -1191,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
- csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
spin_lock_bh(&csk->lock);
@@ -1219,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
- if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
- }
- if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
- !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
- }
+ cxgb4i_process_ddpvld(csk, lskb, ddpvld);
+
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, lskb 0x%p, f 0x%lx.\n",
csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1259,6 +1393,98 @@ rel_skb:
__kfree_skb(skb);
}
+static void
+do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *data_skb = NULL;
+ u32 tid = GET_TID(rpl);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
+ u32 seq = be32_to_cpu(rpl->seq);
+ u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
+ "pdu_len_ddp %u, status %u.\n",
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
+ ntohs(rpl->len), pdu_len_ddp, rpl->status);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = seq;
+ cxgbi_skcb_flags(skb) = 0;
+ cxgbi_skcb_rx_pdulen(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*rpl));
+ __pskb_trim(skb, be16_to_cpu(rpl->len));
+
+ csk->rcv_nxt = seq + pdu_len_ddp;
+
+ if (csk->skb_ulp_lhdr) {
+ data_skb = skb_peek(&csk->receive_queue);
+ if (!data_skb ||
+ !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
+ pr_err("Error! freelist data not found 0x%p, tid %u\n",
+ data_skb, tid);
+
+ goto abort_conn;
+ }
+ __skb_unlink(data_skb, &csk->receive_queue);
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ __skb_queue_tail(&csk->receive_queue, data_skb);
+ } else {
+ __skb_queue_tail(&csk->receive_queue, skb);
+ }
+
+ csk->skb_ulp_lhdr = NULL;
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
+ cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
+
+ cxgb4i_process_ddpvld(csk, skb, ddpvld);
+
+ log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
+ csk, skb, cxgbi_skcb_flags(skb));
+
+ cxgbi_conn_pdu_ready(csk);
+ spin_unlock_bh(&csk->lock);
+
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -1381,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk)
void *daddr;
unsigned int step;
unsigned int size, size6;
- int t4 = is_t4(lldi->adapter_type);
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
@@ -1410,7 +1635,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
if (csk->atid < 0) {
pr_err("%s, NO atid available.\n", ndev->name);
- return -EINVAL;
+ goto rel_resource_without_clip;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
@@ -1427,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
#endif
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
size = sizeof(struct cpl_act_open_req);
size6 = sizeof(struct cpl_act_open_req6);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
size = sizeof(struct cpl_t5_act_open_req);
size6 = sizeof(struct cpl_t5_act_open_req6);
+ } else {
+ size = sizeof(struct cpl_t6_act_open_req);
+ size6 = sizeof(struct cpl_t6_act_open_req6);
}
if (csk->csk_family == AF_INET)
@@ -1451,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu = dst_mtu(csk->dst);
cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
csk->tx_chan = cxgb4_port_chan(ndev);
- /* SMT two entries per row */
- csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
+ csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
+ cxgb4_port_viid(ndev));
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
@@ -1485,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu, csk->mss_idx, csk->smac_idx);
/* must wait for either a act_open_rpl or act_open_establish */
- try_module_get(THIS_MODULE);
+ if (!try_module_get(cdev->owner)) {
+ pr_err("%s, try_module_get failed.\n", ndev->name);
+ goto rel_resource;
+ }
+
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
if (csk->csk_family == AF_INET)
send_act_open_req(csk, skb, csk->l2t);
@@ -1520,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
[CPL_FW4_ACK] = do_fw4_ack,
[CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
- [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
+ [CPL_ISCSI_DATA] = do_rx_iscsi_data,
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+ [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
[CPL_RX_DATA] = do_rx_data,
};
@@ -1793,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->nports = lldi->nports;
cdev->mtus = lldi->mtus;
cdev->nmtus = NMTUS;
- cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
+ cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
+ CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
cdev->itp = &cxgb4i_iscsi_transport;
+ cdev->owner = THIS_MODULE;
cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
<< FW_VIID_PFN_S;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 2ffe029ff2b6..9167bcd9fffe 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
n->dev->name, ndev->name, mtu);
}
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_neigh;
+ }
+
cdev = cxgbi_device_find_by_netdev(ndev, &port);
if (!cdev) {
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
@@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
}
ndev = n->dev;
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_rt;
+ }
+
if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
pr_info("multi-cast route %pI6 port %u, dev %s.\n",
daddr6->sin6_addr.s6_addr,
@@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+ struct module *owner = csk->cdev->owner;
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
csk, (csk)->state, (csk)->flags, (csk)->tid);
@@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
__kfree_skb(skb);
+
+ module_put(owner);
}
EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
@@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
return -EIO;
}
+ if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
+ cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
+ /* If completion flag is set and data is directly
+ * placed in to the host memory then update
+ * task->exp_datasn to the datasn in completion
+ * iSCSI hdr as T6 adapter generates completion only
+ * for the last pdu of a sequence.
+ */
+ itt_t itt = ((struct iscsi_data *)skb->data)->itt;
+ struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
+ u32 data_sn = be32_to_cpu(((struct iscsi_data *)
+ skb->data)->datasn);
+ if (task && task->sc) {
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ tcp_task->exp_datasn = data_sn;
+ }
+ }
+
return read_pdu_skb(conn, skb, 0, 0);
}
@@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
csk->rcv_wup, cdev->rx_credit_thres,
csk->rcv_win);
+ if (!cdev->rx_credit_thres)
+ return;
+
if (csk->state != CTP_ESTABLISHED)
return;
credits = csk->copied_seq - csk->rcv_wup;
if (unlikely(!credits))
return;
- if (unlikely(cdev->rx_credit_thres == 0))
- return;
-
must_send = credits + 16384 >= csk->rcv_win;
if (must_send || credits >= cdev->rx_credit_thres)
csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index e7802738f5d2..95ba99044c3e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
SKCBF_RX_STATUS, /* received ddp status */
+ SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */
SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
SKCBF_RX_HCRC_ERR, /* header digest error */
SKCBF_RX_DCRC_ERR, /* data digest error */
@@ -467,6 +468,7 @@ struct cxgbi_device {
struct pci_dev *pdev;
struct dentry *debugfs_root;
struct iscsi_transport *itp;
+ struct module *owner;
unsigned int pfvf;
unsigned int rx_credit_thres;
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6e6815545a71..0e9de5d62da2 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -19,6 +19,7 @@
#include <linux/rwsem.h>
#include <linux/types.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
extern const struct file_operations cxlflash_cxl_fops;
@@ -62,11 +63,6 @@ static inline void check_sizes(void)
/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
#define CMD_BUFSIZE SIZE_4K
-/* flags in IOA status area for host use */
-#define B_DONE 0x01
-#define B_ERROR 0x02 /* set with B_DONE */
-#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
-
enum cxlflash_lr_state {
LINK_RESET_INVALID,
LINK_RESET_REQUIRED,
@@ -132,12 +128,9 @@ struct cxlflash_cfg {
struct afu_cmd {
struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- spinlock_t slock;
- struct completion cevent;
- char *buf; /* per command buffer */
struct afu *parent;
- int slot;
- atomic_t free;
+ struct scsi_cmnd *scp;
+ struct completion cevent;
u8 cmd_tmf:1;
@@ -147,19 +140,31 @@ struct afu_cmd {
*/
} __aligned(cache_line_size());
+static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
+{
+ return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
+}
+
+static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
+{
+ struct afu_cmd *afuc = sc_to_afuc(sc);
+
+ memset(afuc, 0, sizeof(*afuc));
+ return afuc;
+}
+
struct afu {
/* Stuff requiring alignment go first. */
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
- /*
- * Command & data for AFU commands.
- */
- struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
/* Beware of alignment till here. Preferably introduce new
* fields after this point
*/
+ int (*send_cmd)(struct afu *, struct afu_cmd *);
+ void (*context_reset)(struct afu_cmd *);
+
/* AFU HW */
struct cxl_ioctl_start_work work;
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
@@ -173,10 +178,10 @@ struct afu {
u64 *hrrq_end;
u64 *hrrq_curr;
bool toggle;
- bool read_room;
- atomic64_t room;
+ atomic_t cmds_active; /* Number of currently active AFU commands */
+ s64 room;
+ spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
u64 hb;
- u32 cmd_couts; /* Number of command checkouts */
u32 internal_lun; /* User-desired LUN mode for this AFU */
char version[16];
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index a0923cade6f3..6c318db90c85 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -254,8 +254,14 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
if (lli->parent->mode != MODE_NONE)
rc = -EBUSY;
else {
+ /*
+ * Clean up local LUN for this port and reset table
+ * tracking when no more references exist.
+ */
sdev->hostdata = NULL;
lli->port_sel &= ~CHAN2PORT(chan);
+ if (lli->port_sel == 0U)
+ lli->in_table = false;
}
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b301655f91cd..b17ebf6d0a7e 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -35,67 +35,6 @@ MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
/**
- * cmd_checkout() - checks out an AFU command
- * @afu: AFU to checkout from.
- *
- * Commands are checked out in a round-robin fashion. Note that since
- * the command pool is larger than the hardware queue, the majority of
- * times we will only loop once or twice before getting a command. The
- * buffer and CDB within the command are initialized (zeroed) prior to
- * returning.
- *
- * Return: The checked out command or NULL when command pool is empty.
- */
-static struct afu_cmd *cmd_checkout(struct afu *afu)
-{
- int k, dec = CXLFLASH_NUM_CMDS;
- struct afu_cmd *cmd;
-
- while (dec--) {
- k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
-
- cmd = &afu->cmd[k];
-
- if (!atomic_dec_if_positive(&cmd->free)) {
- pr_devel("%s: returning found index=%d cmd=%p\n",
- __func__, cmd->slot, cmd);
- memset(cmd->buf, 0, CMD_BUFSIZE);
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
- return cmd;
- }
- }
-
- return NULL;
-}
-
-/**
- * cmd_checkin() - checks in an AFU command
- * @cmd: AFU command to checkin.
- *
- * Safe to pass commands that have already been checked in. Several
- * internal tracking fields are reset as part of the checkin. Note
- * that these are intentionally reset prior to toggling the free bit
- * to avoid clobbering values in the event that the command is checked
- * out right away.
- */
-static void cmd_checkin(struct afu_cmd *cmd)
-{
- cmd->rcb.scp = NULL;
- cmd->rcb.timeout = 0;
- cmd->sa.ioasc = 0;
- cmd->cmd_tmf = false;
- cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
-
- if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
- pr_err("%s: Freeing cmd (%d) that is not in use!\n",
- __func__, cmd->slot);
- return;
- }
-
- pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
-}
-
-/**
* process_cmd_err() - command error handler
* @cmd: AFU command that experienced the error.
* @scp: SCSI command associated with the AFU command in error.
@@ -212,7 +151,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
*
* Prepares and submits command that has either completed or timed out to
* the SCSI stack. Checks AFU command back into command pool for non-internal
- * (rcb.scp populated) commands.
+ * (cmd->scp populated) commands.
*/
static void cmd_complete(struct afu_cmd *cmd)
{
@@ -222,19 +161,14 @@ static void cmd_complete(struct afu_cmd *cmd)
struct cxlflash_cfg *cfg = afu->parent;
bool cmd_is_tmf;
- spin_lock_irqsave(&cmd->slock, lock_flags);
- cmd->sa.host_use_b[0] |= B_DONE;
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- if (cmd->rcb.scp) {
- scp = cmd->rcb.scp;
+ if (cmd->scp) {
+ scp = cmd->scp;
if (unlikely(cmd->sa.ioasc))
process_cmd_err(cmd, scp);
else
scp->result = (DID_OK << 16);
cmd_is_tmf = cmd->cmd_tmf;
- cmd_checkin(cmd); /* Don't use cmd after here */
pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
"ioasc=%d\n", __func__, scp, scp->result,
@@ -254,49 +188,19 @@ static void cmd_complete(struct afu_cmd *cmd)
}
/**
- * context_reset() - timeout handler for AFU commands
+ * context_reset_ioarrin() - reset command owner context via IOARRIN register
* @cmd: AFU command that timed out.
- *
- * Sends a reset to the AFU.
*/
-static void context_reset(struct afu_cmd *cmd)
+static void context_reset_ioarrin(struct afu_cmd *cmd)
{
int nretry = 0;
u64 rrin = 0x1;
- u64 room = 0;
struct afu *afu = cmd->parent;
- ulong lock_flags;
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
pr_debug("%s: cmd=%p\n", __func__, cmd);
- spin_lock_irqsave(&cmd->slock, lock_flags);
-
- /* Already completed? */
- if (cmd->sa.host_use_b[0] & B_DONE) {
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
- return;
- }
-
- cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
- spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
- /*
- * We really want to send this reset at all costs, so spread
- * out wait time on successive retries for available room.
- */
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_rrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- pr_err("%s: no cmd_room to send reset\n", __func__);
- return;
-
-write_rrin:
- nretry = 0;
writeq_be(rrin, &afu->host_map->ioarrin);
do {
rrin = readq_be(&afu->host_map->ioarrin);
@@ -305,93 +209,81 @@ write_rrin:
/* Double delay each time */
udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
+ __func__, rrin, nretry);
}
/**
- * send_cmd() - sends an AFU command
+ * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
* @afu: AFU associated with the host.
* @cmd: AFU command to send.
*
* Return:
* 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
*/
-static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
+static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- int nretry = 0;
int rc = 0;
- u64 room;
- long newval;
+ s64 room;
+ ulong lock_flags;
/*
- * This routine is used by critical users such an AFU sync and to
- * send a task management function (TMF). Thus we want to retry a
- * bit before returning an error. To avoid the performance penalty
- * of MMIO, we spread the update of 'room' over multiple commands.
+ * To avoid the performance penalty of MMIO, spread the update of
+ * 'room' over multiple commands.
*/
-retry:
- newval = atomic64_dec_if_positive(&afu->room);
- if (!newval) {
- do {
- room = readq_be(&afu->host_map->cmd_room);
- atomic64_set(&afu->room, room);
- if (room)
- goto write_ioarrin;
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- dev_err(dev, "%s: no cmd_room to send 0x%X\n",
- __func__, cmd->rcb.cdb[0]);
-
- goto no_room;
- } else if (unlikely(newval < 0)) {
- /* This should be rare. i.e. Only if two threads race and
- * decrement before the MMIO read is done. In this case
- * just benefit from the other thread having updated
- * afu->room.
- */
- if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(1 << nretry);
- goto retry;
+ spin_lock_irqsave(&afu->rrin_slock, lock_flags);
+ if (--afu->room < 0) {
+ room = readq_be(&afu->host_map->cmd_room);
+ if (room <= 0) {
+ dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
+ "0x%02X, room=0x%016llX\n",
+ __func__, cmd->rcb.cdb[0], room);
+ afu->room = 0;
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
}
-
- goto no_room;
+ afu->room = room - 1;
}
-write_ioarrin:
writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
out:
+ spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
return rc;
-
-no_room:
- afu->read_room = true;
- kref_get(&cfg->afu->mapcount);
- schedule_work(&cfg->work_q);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
}
/**
* wait_resp() - polls for a response or timeout to a sent AFU command
* @afu: AFU associated with the host.
* @cmd: AFU command that was sent.
+ *
+ * Return:
+ * 0 on success, -1 on timeout/error
*/
-static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
+static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
+ int rc = 0;
ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- context_reset(cmd);
+ if (!timeout) {
+ afu->context_reset(cmd);
+ rc = -1;
+ }
- if (unlikely(cmd->sa.ioasc != 0))
+ if (unlikely(cmd->sa.ioasc != 0)) {
pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
"scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
cmd->sa.rc.fc_rc);
+ rc = -1;
+ }
+
+ return rc;
}
/**
@@ -405,24 +297,15 @@ static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
*/
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
- struct afu_cmd *cmd;
-
u32 port_sel = scp->device->channel + 1;
- short lflag = 0;
struct Scsi_Host *host = scp->device->host;
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
struct device *dev = &cfg->dev->dev;
ulong lock_flags;
int rc = 0;
ulong to;
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
/* When Task Management Function is active do not send another */
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
@@ -430,28 +313,23 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
!cfg->tmf_active,
cfg->tmf_slock);
cfg->tmf_active = true;
- cmd->cmd_tmf = true;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
+ cmd->scp = scp;
+ cmd->parent = afu;
+ cmd->cmd_tmf = true;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- lflag = SISL_REQ_FLAGS_TMF_CMD;
-
cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- /* Copy the CDB from the cmd passed in */
+ SISL_REQ_FLAGS_SUP_UNDERRUN |
+ SISL_REQ_FLAGS_TMF_CMD);
memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
- /* Send the command */
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc)) {
- cmd_checkin(cmd);
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -507,12 +385,12 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd;
+ struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct scatterlist *sg = scsi_sglist(scp);
u32 port_sel = scp->device->channel + 1;
- int nseg, i, ncount;
- struct scatterlist *sg;
+ u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
ulong lock_flags;
- short lflag = 0;
+ int nseg = 0;
int rc = 0;
int kref_got = 0;
@@ -552,55 +430,38 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
break;
}
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- dev_err(dev, "%s: could not get a free command\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
kref_get(&cfg->afu->mapcount);
kref_got = 1;
+ if (likely(sg)) {
+ nseg = scsi_dma_map(scp);
+ if (unlikely(nseg < 0)) {
+ dev_err(dev, "%s: Fail DMA map!\n", __func__);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ cmd->rcb.data_len = sg_dma_len(sg);
+ cmd->rcb.data_ea = sg_dma_address(sg);
+ }
+
+ cmd->scp = scp;
+ cmd->parent = afu;
+
cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = port_sel;
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
if (scp->sc_data_direction == DMA_TO_DEVICE)
- lflag = SISL_REQ_FLAGS_HOST_WRITE;
- else
- lflag = SISL_REQ_FLAGS_HOST_READ;
-
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
- /* Stash the scp in the reserved field, for reuse during interrupt */
- cmd->rcb.scp = scp;
-
- nseg = scsi_dma_map(scp);
- if (unlikely(nseg < 0)) {
- dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
- __func__, nseg);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
+ req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
- ncount = scsi_sg_count(scp);
- scsi_for_each_sg(scp, sg, ncount, i) {
- cmd->rcb.data_len = sg_dma_len(sg);
- cmd->rcb.data_ea = sg_dma_address(sg);
- }
-
- /* Copy the CDB from the scsi_cmnd passed in */
+ cmd->rcb.req_flags = req_flags;
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
- /* Send the command */
- rc = send_cmd(afu, cmd);
- if (unlikely(rc)) {
- cmd_checkin(cmd);
+ rc = afu->send_cmd(afu, cmd);
+ if (unlikely(rc))
scsi_dma_unmap(scp);
- }
-
out:
if (kref_got)
kref_put(&afu->mapcount, afu_unmap);
@@ -628,17 +489,9 @@ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
*/
static void free_mem(struct cxlflash_cfg *cfg)
{
- int i;
- char *buf = NULL;
struct afu *afu = cfg->afu;
if (cfg->afu) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- buf = afu->cmd[i].buf;
- if (!((u64)buf & (PAGE_SIZE - 1)))
- free_page((ulong)buf);
- }
-
free_pages((ulong)afu, get_order(sizeof(struct afu)));
cfg->afu = NULL;
}
@@ -650,30 +503,16 @@ static void free_mem(struct cxlflash_cfg *cfg)
*
* Safe to call with AFU in a partially allocated/initialized state.
*
- * Cleans up all state associated with the command queue, and unmaps
+ * Waits for any active internal AFU commands to timeout and then unmaps
* the MMIO space.
- *
- * - complete() will take care of commands we initiated (they'll be checked
- * in as part of the cleanup that occurs after the completion)
- *
- * - cmd_checkin() will take care of entries that we did not initiate and that
- * have not (and will not) complete because they are sitting on a [now stale]
- * hardware queue
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
- int i;
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
if (likely(afu)) {
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
- complete(&cmd->cevent);
- if (!atomic_read(&cmd->free))
- cmd_checkin(cmd);
- }
-
+ while (atomic_read(&afu->cmds_active))
+ ssleep(1);
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
@@ -886,8 +725,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
static int alloc_mem(struct cxlflash_cfg *cfg)
{
int rc = 0;
- int i;
- char *buf = NULL;
struct device *dev = &cfg->dev->dev;
/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
@@ -901,25 +738,6 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
}
cfg->afu->parent = cfg;
cfg->afu->afu_map = NULL;
-
- for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
- if (!((u64)buf & (PAGE_SIZE - 1))) {
- buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (unlikely(!buf)) {
- dev_err(dev,
- "%s: Allocate command buffers fail!\n",
- __func__);
- rc = -ENOMEM;
- free_mem(cfg);
- goto out;
- }
- }
-
- cfg->afu->cmd[i].buf = buf;
- atomic_set(&cfg->afu->cmd[i].free, 1);
- cfg->afu->cmd[i].slot = i;
- }
-
out:
return rc;
}
@@ -1549,13 +1367,6 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Program the Endian Control for the master context */
writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
-
- /* Initialize cmd fields that never change */
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
- afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
- afu->cmd[i].rcb.rrq = 0x0;
- }
}
/**
@@ -1644,19 +1455,8 @@ out:
static int start_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
- struct afu_cmd *cmd;
-
- int i = 0;
int rc = 0;
- for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
- cmd = &afu->cmd[i];
-
- init_completion(&cmd->cevent);
- spin_lock_init(&cmd->slock);
- cmd->parent = afu;
- }
-
init_pcr(cfg);
/* After an AFU reset, RRQ entries are stale, clear them */
@@ -1829,6 +1629,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
goto err2;
}
+ afu->send_cmd = send_cmd_ioarrin;
+ afu->context_reset = context_reset_ioarrin;
+
pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
afu->version, afu->interface_version);
@@ -1840,7 +1643,8 @@ static int init_afu(struct cxlflash_cfg *cfg)
}
afu_err_intr_init(cfg->afu);
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+ spin_lock_init(&afu->rrin_slock);
+ afu->room = readq_be(&afu->host_map->cmd_room);
/* Restore the LUN mappings */
cxlflash_restore_luntable(cfg);
@@ -1884,8 +1688,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
+ char *buf = NULL;
int rc = 0;
- int retry_cnt = 0;
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
@@ -1894,27 +1698,23 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
}
mutex_lock(&sync_active);
-retry:
- cmd = cmd_checkout(afu);
- if (unlikely(!cmd)) {
- retry_cnt++;
- udelay(1000 * retry_cnt);
- if (retry_cnt < MC_RETRY_CNT)
- goto retry;
- dev_err(dev, "%s: could not get a free command\n", __func__);
+ atomic_inc(&afu->cmds_active);
+ buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ dev_err(dev, "%s: no memory for command\n", __func__);
rc = -1;
goto out;
}
- pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+ cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+ init_completion(&cmd->cevent);
+ cmd->parent = afu;
- memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+ pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- cmd->rcb.port_sel = 0x0; /* NA */
- cmd->rcb.lun_id = 0x0; /* NA */
- cmd->rcb.data_len = 0x0;
- cmd->rcb.data_ea = 0x0;
+ cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
@@ -1924,20 +1724,17 @@ retry:
*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
- rc = send_cmd(afu, cmd);
+ rc = afu->send_cmd(afu, cmd);
if (unlikely(rc))
goto out;
- wait_resp(afu, cmd);
-
- /* Set on timeout */
- if (unlikely((cmd->sa.ioasc != 0) ||
- (cmd->sa.host_use_b[0] & B_ERROR)))
+ rc = wait_resp(afu, cmd);
+ if (unlikely(rc))
rc = -1;
out:
+ atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
- if (cmd)
- cmd_checkin(cmd);
+ kfree(buf);
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -2376,8 +2173,9 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = cxlflash_change_queue_depth,
.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
+ .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
.this_id = -1,
- .sg_tablesize = SG_NONE, /* No scatter gather support */
+ .sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = cxlflash_host_attrs,
@@ -2412,7 +2210,6 @@ MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
* Handles the following events:
* - Link reset which cannot be performed on interrupt context due to
* blocking up to a few seconds
- * - Read AFU command room
* - Rescan the host
*/
static void cxlflash_worker_thread(struct work_struct *work)
@@ -2449,11 +2246,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
cfg->lr_state = LINK_RESET_COMPLETE;
}
- if (afu->read_room) {
- atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
- afu->read_room = false;
- }
-
spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 347fc1671975..1a2d09c148b3 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,7 @@ struct sisl_ioarcb {
u16 timeout; /* in units specified by req_flags */
u32 rsvd1;
u8 cdb[16]; /* must be in big endian */
- struct scsi_cmnd *scp;
+ u64 reserved; /* Reserved area */
} __packed;
struct sisl_rc {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 7bb20684e9fa..d704752b6332 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -95,7 +95,7 @@ struct alua_port_group {
struct alua_dh_data {
struct list_head node;
- struct alua_port_group *pg;
+ struct alua_port_group __rcu *pg;
int group_id;
spinlock_t pg_lock;
struct scsi_device *sdev;
@@ -154,7 +154,8 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
buff, bufflen, sshdr,
ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, NULL, req_flags);
+ ALUA_FAILOVER_RETRIES, NULL,
+ req_flags, 0);
}
/*
@@ -187,7 +188,8 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
stpg_data, stpg_len,
sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, NULL, req_flags);
+ ALUA_FAILOVER_RETRIES, NULL,
+ req_flags, 0);
}
static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
@@ -369,7 +371,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
/* Check for existing port group references */
spin_lock(&h->pg_lock);
- old_pg = h->pg;
+ old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
if (old_pg != pg) {
/* port group has changed. Update to new port group */
if (h->pg) {
@@ -388,7 +390,9 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags);
- alua_rtpg_queue(h->pg, sdev, NULL, true);
+ alua_rtpg_queue(rcu_dereference_protected(h->pg,
+ lockdep_is_held(&h->pg_lock)),
+ sdev, NULL, true);
spin_unlock(&h->pg_lock);
if (old_pg)
@@ -940,7 +944,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
static int alua_set_params(struct scsi_device *sdev, const char *params)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg = NULL;
+ struct alua_port_group *pg = NULL;
unsigned int optimize = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
@@ -987,7 +991,7 @@ static int alua_activate(struct scsi_device *sdev,
struct alua_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
struct alua_queue_data *qdata;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
if (!qdata) {
@@ -1051,7 +1055,7 @@ static void alua_check(struct scsi_device *sdev, bool force)
static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct alua_dh_data *h = sdev->handler_data;
- struct alua_port_group __rcu *pg;
+ struct alua_port_group *pg;
unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
int ret = BLKPREP_OK;
@@ -1066,7 +1070,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
state != SCSI_ACCESS_STATE_ACTIVE &&
state != SCSI_ACCESS_STATE_LBA) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
@@ -1121,7 +1125,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
struct alua_port_group *pg;
spin_lock(&h->pg_lock);
- pg = h->pg;
+ pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
h->sdev = NULL;
spin_unlock(&h->pg_lock);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 375d81850f15..5b80746980b8 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -452,7 +452,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
if (h->lun_state != CLARIION_LUN_OWNED) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9406d5f4a3d3..308e87195dc1 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -266,7 +266,7 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
if (h->path_state != HP_SW_PATH_ACTIVE) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 06fbd0b0c68a..00d9c326158e 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -724,7 +724,7 @@ static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
if (h->state != RDAC_STATE_ACTIVE) {
ret = BLKPREP_KILL;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
return ret;
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 9b5a457d4bca..6af3394d051d 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,13 +34,13 @@
* Definitions for the generic 5380 driver.
*/
-#define NCR5380_read(reg) inb(instance->io_port + reg)
-#define NCR5380_write(reg, value) outb(value, instance->io_port + reg)
+#define NCR5380_read(reg) inb(hostdata->base + (reg))
+#define NCR5380_write(reg, value) outb(value, hostdata->base + (reg))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
-#define NCR5380_dma_recv_setup(instance, dst, len) (0)
-#define NCR5380_dma_send_setup(instance, src, len) (0)
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_xfer_len NCR5380_dma_xfer_none
+#define NCR5380_dma_recv_setup NCR5380_dma_setup_none
+#define NCR5380_dma_send_setup NCR5380_dma_setup_none
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_implementation_fields /* none */
@@ -71,6 +71,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
+ struct NCR5380_hostdata *hostdata;
unsigned long io;
int error = -ENODEV;
@@ -88,7 +89,9 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
sizeof(struct NCR5380_hostdata));
if (!shost)
goto out_release_region;
- shost->io_port = io;
+
+ hostdata = shost_priv(shost);
+ hostdata->base = io;
/* This card does not seem to raise an interrupt on pdev->irq.
* Steam-powered SCSI controllers run without an IRQ anyway.
@@ -125,7 +128,8 @@ out_host_put:
static void dmx3191d_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- unsigned long io = shost->io_port;
+ struct NCR5380_hostdata *hostdata = shost_priv(shost);
+ unsigned long io = hostdata->base;
scsi_remove_host(shost);
@@ -149,18 +153,7 @@ static struct pci_driver dmx3191d_pci_driver = {
.remove = dmx3191d_remove_one,
};
-static int __init dmx3191d_init(void)
-{
- return pci_register_driver(&dmx3191d_pci_driver);
-}
-
-static void __exit dmx3191d_exit(void)
-{
- pci_unregister_driver(&dmx3191d_pci_driver);
-}
-
-module_init(dmx3191d_init);
-module_exit(dmx3191d_exit);
+module_pci_driver(dmx3191d_pci_driver);
MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
MODULE_DESCRIPTION("Domex DMX3191D SCSI driver");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 21c8d210c456..27c0dce22e72 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -651,7 +651,6 @@ static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
}
spin_unlock_irqrestore(pHba->host->host_lock, flags);
if (i >= nr) {
- kfree (reply);
printk(KERN_WARNING"%s: Too many outstanding "
"ioctl commands\n", pHba->name);
return (u32)-1;
@@ -1754,8 +1753,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
sg_offset = (msg[0]>>4)&0xf;
msg[2] = 0x40000000; // IOCTL context
msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1)
+ if (msg[3] == (u32)-1) {
+ kfree(reply);
return -EBUSY;
+ }
memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
if(sg_offset) {
@@ -3350,7 +3351,7 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
if (opblk_va == NULL) {
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
+ printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
pHba->name);
return -ENOMEM;
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9bd41a35a78a..59150cad0353 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -63,6 +63,14 @@ unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+unsigned int fcoe_e_d_tov = 2 * 1000;
+module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000");
+
+unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
+module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000");
+
static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -582,7 +590,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
* Use default VLAN for FIP VLAN discovery protocol
*/
frame = (struct fip_frame *)skb->data;
- if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) &&
+ if (ntohs(frame->eth.h_proto) == ETH_P_FIP &&
+ ntohs(frame->fip.fip_op) == FIP_OP_VLAN &&
fcoe->realdev != fcoe->netdev)
skb->dev = fcoe->realdev;
else
@@ -633,8 +642,8 @@ static int fcoe_lport_config(struct fc_lport *lport)
lport->qfull = 0;
lport->max_retry_count = 3;
lport->max_rport_retry_count = 3;
- lport->e_d_tov = 2 * 1000; /* FC-FS default */
- lport->r_a_tov = 2 * 2 * 1000;
+ lport->e_d_tov = fcoe_e_d_tov;
+ lport->r_a_tov = fcoe_r_a_tov;
lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
lport->does_npiv = 1;
@@ -2160,11 +2169,13 @@ static bool fcoe_match(struct net_device *netdev)
*/
static void fcoe_dcb_create(struct fcoe_interface *fcoe)
{
+ int ctlr_prio = TC_PRIO_BESTEFFORT;
+ int fcoe_prio = TC_PRIO_INTERACTIVE;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
#ifdef CONFIG_DCB
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
- struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@@ -2186,10 +2197,12 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
fup = dcb_getapp(netdev, &app);
}
- fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
- ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
+ fcoe_prio = ffs(up) ? ffs(up) - 1 : 0;
+ ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio;
}
#endif
+ fcoe->priority = fcoe_prio;
+ ctlr->priority = ctlr_prio;
}
enum fcoe_create_link_state {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index dcf36537a767..cea57e27e713 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -801,6 +801,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
return -EINPROGRESS;
drop:
kfree_skb(skb);
+ LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
+ op, ntoh24(fh->fh_d_id));
return -EINVAL;
}
EXPORT_SYMBOL(fcoe_ctlr_els_send);
@@ -1316,7 +1318,7 @@ drop:
* The overall length has already been checked.
*/
static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
- struct fip_header *fh)
+ struct sk_buff *skb)
{
struct fip_desc *desc;
struct fip_mac_desc *mp;
@@ -1331,14 +1333,18 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
int num_vlink_desc;
int reset_phys_port = 0;
struct fip_vn_desc **vlink_desc_arr = NULL;
+ struct fip_header *fh = (struct fip_header *)skb->data;
+ struct ethhdr *eh = eth_hdr(skb);
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id) {
+ if (!fcf) {
/*
* We are yet to select best FCF, but we got CVL in the
* meantime. reset the ctlr and let it rediscover the FCF
*/
+ LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been "
+ "selected yet\n");
mutex_lock(&fip->ctlr_mutex);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
@@ -1346,6 +1352,31 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
}
/*
+ * If we've selected an FCF check that the CVL is from there to avoid
+ * processing CVLs from an unexpected source. If it is from an
+ * unexpected source drop it on the floor.
+ */
+ if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) {
+ LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address "
+ "mismatch with FCF src=%pM\n", eh->h_source);
+ return;
+ }
+
+ /*
+ * If we haven't logged into the fabric but receive a CVL we should
+ * reset everything and go back to solicitation.
+ */
+ if (!lport->port_id) {
+ LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n");
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
+ return;
+ }
+
+ /*
* mask of required descriptors. Validating each one clears its bit.
*/
desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
@@ -1576,7 +1607,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
fcoe_ctlr_recv_adv(fip, skb);
else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
- fcoe_ctlr_recv_clr_vlink(fip, fiph);
+ fcoe_ctlr_recv_clr_vlink(fip, skb);
kfree_skb(skb);
return 0;
drop:
@@ -2122,7 +2153,7 @@ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
LIBFCOE_FIP_DBG(fip,
"rport FLOGI limited port_id %6.6x\n",
rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
default:
@@ -2145,9 +2176,15 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
mutex_lock(&lport->disc.disc_mutex);
- list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
- lport->tt.rport_logoff(rdata);
lport->disc.disc_callback = NULL;
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -2178,7 +2215,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
{
fcoe_ctlr_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
synchronize_rcu();
}
@@ -2393,6 +2430,8 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
switch (fip->state) {
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n",
+ fip->state);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
@@ -2407,15 +2446,21 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
*/
if (fip->lp->wwpn > rdata->ids.port_name &&
!(frport->flags & FIP_FL_REC_OR_P2P)) {
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "port_id collision\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
frport->enode_mac, 0);
break;
}
/* fall through */
case FIP_ST_VNMP_START:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+ "restart VN2VN negotiation\n");
fcoe_ctlr_vn_restart(fip);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n",
+ fip->state);
break;
}
}
@@ -2437,9 +2482,12 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
case FIP_ST_VNMP_CLAIM:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
break;
case FIP_ST_VNMP_UP:
+ LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
default:
@@ -2467,26 +2515,33 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
return;
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, port_id);
+ rdata = fc_rport_create(lport, port_id);
if (!rdata) {
mutex_unlock(&lport->disc.disc_mutex);
return;
}
+ mutex_lock(&rdata->rp_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
rdata->ops = &fcoe_ctlr_vn_rport_ops;
rdata->disc_id = lport->disc.disc_id;
ids = &rdata->ids;
if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
- (ids->node_name != -1 && ids->node_name != new->ids.node_name))
- lport->tt.rport_logoff(rdata);
+ (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+ mutex_unlock(&rdata->rp_mutex);
+ LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
+ fc_rport_logoff(rdata);
+ mutex_lock(&rdata->rp_mutex);
+ }
ids->port_name = new->ids.port_name;
ids->node_name = new->ids.node_name;
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&rdata->rp_mutex);
frport = fcoe_ctlr_rport(rdata);
- LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n",
- port_id, frport->fcoe_len ? "old" : "new");
+ LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
+ port_id, frport->fcoe_len ? "old" : "new",
+ rdata->rp_state);
*frport = *fcoe_ctlr_rport(new);
frport->time = 0;
}
@@ -2506,12 +2561,12 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
struct fcoe_rport *frport;
int ret = -1;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata) {
frport = fcoe_ctlr_rport(rdata);
memcpy(mac, frport->enode_mac, ETH_ALEN);
ret = 0;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
return ret;
}
@@ -2529,6 +2584,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
struct fcoe_rport *frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
@@ -2536,25 +2592,37 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_START:
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
- if (new->ids.port_id == fip->port_id)
+ if (new->ids.port_id == fip->port_id) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, state %d\n",
+ fip->state);
fcoe_ctlr_vn_restart(fip);
+ }
break;
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
if (new->ids.port_id == fip->port_id) {
if (new->ids.port_name > fip->lp->wwpn) {
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "restart, port_id collision\n");
fcoe_ctlr_vn_restart(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
break;
}
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
+ new->ids.port_id);
fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
min((u32)frport->fcoe_len,
fcoe_ctlr_fcoe_size(fip)));
fcoe_ctlr_vn_add(fip, new);
break;
default:
+ LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+ "ignoring claim from %x\n", new->ids.port_id);
break;
}
}
@@ -2591,19 +2659,26 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
frport = fcoe_ctlr_rport(new);
if (frport->flags & FIP_FL_REC_OR_P2P) {
+ LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
+ rdata = fc_rport_lookup(lport, new->ids.port_id);
if (rdata) {
if (rdata->ids.node_name == new->ids.node_name &&
rdata->ids.port_name == new->ids.port_name) {
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time && fip->state == FIP_ST_VNMP_UP)
- lport->tt.rport_login(rdata);
+ LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
+ rdata->ids.port_id);
+ if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
+ LIBFCOE_FIP_DBG(fip, "beacon expired "
+ "for rport %x\n",
+ rdata->ids.port_id);
+ fc_rport_login(rdata);
+ }
frport->time = jiffies;
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
if (fip->state != FIP_ST_VNMP_UP)
@@ -2638,11 +2713,15 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
unsigned long deadline;
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
- mutex_lock(&lport->disc.disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
- if (!frport->time)
+ if (!frport->time) {
+ kref_put(&rdata->kref, fc_rport_destroy);
continue;
+ }
deadline = frport->time +
msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
if (time_after_eq(jiffies, deadline)) {
@@ -2650,11 +2729,12 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
LIBFCOE_FIP_DBG(fip,
"port %16.16llx fc_id %6.6x beacon expired\n",
rdata->ids.port_name, rdata->ids.port_id);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else if (time_before(deadline, next_time))
next_time = deadline;
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&lport->disc.disc_mutex);
+ rcu_read_unlock();
return next_time;
}
@@ -2674,11 +2754,21 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fc_rport_priv rdata;
struct fcoe_rport frport;
} buf;
- int rc;
+ int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
+ if (fip->lp->vlan)
+ vlan_id = skb_vlan_tag_get_id(skb);
+
+ if (vlan_id && vlan_id != fip->lp->vlan) {
+ LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n",
+ sub, vlan_id);
+ rc = -EAGAIN;
+ goto drop;
+ }
+
rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
@@ -2941,7 +3031,7 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
rjt_data.reason = ELS_RJT_UNSUP;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -2991,12 +3081,17 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
mutex_lock(&disc->disc_mutex);
callback = disc->pending ? disc->disc_callback : NULL;
disc->pending = 0;
+ mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
frport = fcoe_ctlr_rport(rdata);
if (frport->time)
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_unlock();
if (callback)
callback(lport, DISC_EV_SUCCESS);
}
@@ -3015,11 +3110,13 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
switch (fip->state) {
case FIP_ST_VNMP_START:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
break;
case FIP_ST_VNMP_PROBE1:
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3030,6 +3127,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
hton24(mac + 3, new_port_id);
fcoe_ctlr_map_dest(fip);
fip->update_mac(fip->lp, mac);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n");
fcoe_ctlr_vn_send_claim(fip);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
break;
@@ -3041,6 +3139,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
if (time_after_eq(jiffies, next_time)) {
fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
@@ -3051,6 +3150,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
case FIP_ST_VNMP_UP:
next_time = fcoe_ctlr_vn_age(fip);
if (time_after_eq(jiffies, fip->port_ka_time)) {
+ LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
@@ -3135,7 +3235,6 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
fc_exch_init(lport);
fc_elsct_init(lport);
fc_lport_init(lport);
- fc_rport_init(lport);
fc_disc_init(lport);
fcoe_ctlr_mode_set(lport, fip, fip->mode);
return 0;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 0675fd128734..9cf3d56296ab 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -335,16 +335,24 @@ static ssize_t store_ctlr_enabled(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ bool enabled;
int rc;
+ if (*buf == '1')
+ enabled = true;
+ else if (*buf == '0')
+ enabled = false;
+ else
+ return -EINVAL;
+
switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED:
- if (*buf == '1')
+ if (enabled)
return count;
ctlr->enabled = FCOE_CTLR_DISABLED;
break;
case FCOE_CTLR_DISABLED:
- if (*buf == '0')
+ if (!enabled)
return count;
ctlr->enabled = FCOE_CTLR_ENABLED;
break;
@@ -424,6 +432,75 @@ static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
store_ctlr_fip_resp);
static ssize_t
+fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count)
+{
+ int err;
+ unsigned long v;
+
+ err = kstrtoul(buf, 10, &v);
+ if (err || v > UINT_MAX)
+ return -EINVAL;
+
+ *var = v;
+
+ return count;
+}
+
+static ssize_t store_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_r_a_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->r_a_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_r_a_tov, store_ctlr_r_a_tov);
+
+static ssize_t store_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+ return -EBUSY;
+ if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+ return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count);
+ return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_e_d_tov(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ return sprintf(buf, "%d\n", ctlr->lp->e_d_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR,
+ show_ctlr_e_d_tov, store_ctlr_e_d_tov);
+
+static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -507,6 +584,8 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fip_vlan_responder.attr,
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_r_a_tov.attr,
+ &device_attr_fcoe_ctlr_e_d_tov.attr,
&device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d9fd2f841585..2544a37ece0a 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -441,30 +441,38 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
+ struct fc_rport_libfc_priv *rp;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
rport = starget_to_rport(scsi_target(sc->device));
+ if (!rport) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "returning DID_NO_CONNECT for IO as rport is NULL\n");
+ sc->result = DID_NO_CONNECT << 16;
+ done(sc);
+ return 0;
+ }
+
ret = fc_remote_port_chkready(rport);
if (ret) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "rport is not ready\n");
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
- if (rport) {
- struct fc_rport_libfc_priv *rp = rport->dd_data;
-
- if (!rp || rp->rp_state != RPORT_ST_READY) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ rp = rport->dd_data;
+ if (!rp || rp->rp_state != RPORT_ST_READY) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"returning DID_NO_CONNECT for IO as rport is removed\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT<<16;
- done(sc);
- return 0;
- }
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ sc->result = DID_NO_CONNECT<<16;
+ done(sc);
+ return 0;
}
if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -2543,7 +2551,7 @@ int fnic_reset(struct Scsi_Host *shost)
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
- ret = lp->tt.lport_reset(lp);
+ ret = fc_lport_reset(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4e15c4bf0795..5a5fa01576b7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -613,7 +613,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
fc_trace_entries.rd_idx = 0;
}
- fc_buf->time_stamp = CURRENT_TIME;
+ ktime_get_real_ts64(&fc_buf->time_stamp);
fc_buf->host_no = host_no;
fc_buf->frame_type = frame_type;
@@ -740,7 +740,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
len = *orig_len;
- time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+ time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
len += snprintf(fnic_dbgfs_prt->buffer + len,
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index a8aa0578fcb0..e375d0c2eaaf 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -72,7 +72,7 @@ struct fnic_trace_data {
typedef struct fnic_trace_data fnic_trace_data_t;
struct fc_trace_hdr {
- struct timespec time_stamp;
+ struct timespec64 time_stamp;
u32 host_no;
u8 frame_type;
u8 frame_len;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 9795d6f3e197..ba69d6112fa1 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -499,10 +499,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -517,10 +514,7 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- printk(KERN_ERR
- "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't del addr [%pM], %d\n", addr, err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index cbf010324c18..6f9665d50d84 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -37,7 +37,7 @@
#define MAX_CARDS 8
/* old-style parameters for compatibility */
-static int ncr_irq;
+static int ncr_irq = -1;
static int ncr_addr;
static int ncr_5380;
static int ncr_53c400;
@@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
module_param(hp_c2502, int, 0);
-static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])");
static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
module_param_array(base, int, NULL, 0);
@@ -64,9 +64,59 @@ static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(card, int, NULL, 0);
MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
+MODULE_ALIAS("g_NCR5380_mmio");
MODULE_LICENSE("GPL");
-#ifndef SCSI_G_NCR5380_MEM
+static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ /*
+ * An interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR.
+ */
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG,
+ ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
+
+ msleep(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+}
+
+/**
+ * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent
+ * @instance: SCSI host instance
+ *
+ * Autoprobe for the IRQ line used by the card by triggering an IRQ
+ * and then looking to see what interrupt actually turned up.
+ */
+
+static int g_NCR5380_probe_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ int irq_mask, irq;
+
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ irq_mask = probe_irq_on();
+ g_NCR5380_trigger_irq(instance);
+ irq = probe_irq_off(irq_mask);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ if (irq <= 0)
+ return NO_IRQ;
+ return irq;
+}
+
/*
* Configure I/O address of 53C400A or DTC436 by writing magic numbers
* to ports 0x779 and 0x379.
@@ -81,47 +131,64 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
outb(magic[3], 0x379);
outb(magic[4], 0x379);
- /* allowed IRQs for HP C2502 */
- if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7)
- irq = 0;
+ if (irq == 9)
+ irq = 2;
+
if (idx >= 0 && idx <= 7)
cfg = 0x80 | idx | (irq << 4);
outb(cfg, 0x379);
}
-#endif
+
+static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static int legacy_find_free_irq(int *irq_table)
+{
+ while (*irq_table != -1) {
+ if (!request_irq(*irq_table, legacy_empty_irq_handler,
+ IRQF_PROBE_SHARED, "Test IRQ",
+ (void *)irq_table)) {
+ free_irq(*irq_table, (void *) irq_table);
+ return *irq_table;
+ }
+ irq_table++;
+ }
+ return -1;
+}
+
+static unsigned int ncr_53c400a_ports[] = {
+ 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
+};
+static unsigned int dtc_3181e_ports[] = {
+ 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
+};
+static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
+ 0x59, 0xb9, 0xc5, 0xae, 0xa6
+};
+static u8 hp_c2502_magic[] = { /* HP C2502 */
+ 0x0f, 0x22, 0xf0, 0x20, 0x80
+};
+static int hp_c2502_irqs[] = {
+ 9, 5, 7, 3, 4, -1
+};
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
{
- unsigned int *ports;
+ bool is_pmio = base <= 0xffff;
+ int ret;
+ int flags = 0;
+ unsigned int *ports = NULL;
u8 *magic = NULL;
-#ifndef SCSI_G_NCR5380_MEM
int i;
int port_idx = -1;
unsigned long region_size;
-#endif
- static unsigned int ncr_53c400a_ports[] = {
- 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
- };
- static unsigned int dtc_3181e_ports[] = {
- 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
- };
- static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
- 0x59, 0xb9, 0xc5, 0xae, 0xa6
- };
- static u8 hp_c2502_magic[] = { /* HP C2502 */
- 0x0f, 0x22, 0xf0, 0x20, 0x80
- };
- int flags, ret;
struct Scsi_Host *instance;
struct NCR5380_hostdata *hostdata;
-#ifdef SCSI_G_NCR5380_MEM
- void __iomem *iomem;
- resource_size_t iomem_size;
-#endif
+ u8 __iomem *iomem;
- ports = NULL;
- flags = 0;
switch (board) {
case BOARD_NCR5380:
flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
@@ -140,8 +207,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
break;
}
-#ifndef SCSI_G_NCR5380_MEM
- if (ports && magic) {
+ if (is_pmio && ports && magic) {
/* wakeup sequence for the NCR53C400A and DTC3181E */
/* Disable the adapter and look for a free io port */
@@ -170,84 +236,96 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
if (ports[i]) {
/* At this point we have our region reserved */
magic_configure(i, 0, magic); /* no IRQ yet */
- outb(0xc0, ports[i] + 9);
- if (inb(ports[i] + 9) != 0x80) {
+ base = ports[i];
+ outb(0xc0, base + 9);
+ if (inb(base + 9) != 0x80) {
ret = -ENODEV;
goto out_release;
}
- base = ports[i];
port_idx = i;
} else
return -EINVAL;
- }
- else
- {
+ } else if (is_pmio) {
/* NCR5380 - no configuration, just grab */
region_size = 8;
if (!base || !request_region(base, region_size, "ncr5380"))
return -EBUSY;
+ } else { /* MMIO */
+ region_size = NCR53C400_region_size;
+ if (!request_mem_region(base, region_size, "ncr5380"))
+ return -EBUSY;
}
-#else
- iomem_size = NCR53C400_region_size;
- if (!request_mem_region(base, iomem_size, "ncr5380"))
- return -EBUSY;
- iomem = ioremap(base, iomem_size);
+
+ if (is_pmio)
+ iomem = ioport_map(base, region_size);
+ else
+ iomem = ioremap(base, region_size);
+
if (!iomem) {
- release_mem_region(base, iomem_size);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_release;
}
-#endif
+
instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
if (instance == NULL) {
ret = -ENOMEM;
- goto out_release;
+ goto out_unmap;
}
hostdata = shost_priv(instance);
-#ifndef SCSI_G_NCR5380_MEM
- instance->io_port = base;
- instance->n_io_port = region_size;
- hostdata->io_width = 1; /* 8-bit PDMA by default */
-
- /*
- * On NCR53C400 boards, NCR5380 registers are mapped 8 past
- * the base address.
- */
- switch (board) {
- case BOARD_NCR53C400:
- instance->io_port += 8;
- hostdata->c400_ctl_status = 0;
- hostdata->c400_blk_cnt = 1;
- hostdata->c400_host_buf = 4;
- break;
- case BOARD_DTC3181E:
- hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- hostdata->c400_ctl_status = 9;
- hostdata->c400_blk_cnt = 10;
- hostdata->c400_host_buf = 8;
- break;
+ hostdata->io = iomem;
+ hostdata->region_size = region_size;
+
+ if (is_pmio) {
+ hostdata->io_port = base;
+ hostdata->io_width = 1; /* 8-bit PDMA by default */
+ hostdata->offset = 0;
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->io_port += 8;
+ hostdata->c400_ctl_status = 0;
+ hostdata->c400_blk_cnt = 1;
+ hostdata->c400_host_buf = 4;
+ break;
+ case BOARD_DTC3181E:
+ hostdata->io_width = 2; /* 16-bit PDMA */
+ /* fall through */
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ hostdata->c400_ctl_status = 9;
+ hostdata->c400_blk_cnt = 10;
+ hostdata->c400_host_buf = 8;
+ break;
+ }
+ } else {
+ hostdata->base = base;
+ hostdata->offset = NCR53C400_mem_base;
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->c400_ctl_status = 0x100;
+ hostdata->c400_blk_cnt = 0x101;
+ hostdata->c400_host_buf = 0x104;
+ break;
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+ ret = -EINVAL;
+ goto out_unregister;
+ }
}
-#else
- instance->base = base;
- hostdata->iomem = iomem;
- hostdata->iomem_size = iomem_size;
- switch (board) {
- case BOARD_NCR53C400:
- hostdata->c400_ctl_status = 0x100;
- hostdata->c400_blk_cnt = 0x101;
- hostdata->c400_host_buf = 0x104;
- break;
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
- ret = -EINVAL;
+
+ /* Check for vacant slot */
+ NCR5380_write(MODE_REG, 0);
+ if (NCR5380_read(MODE_REG) != 0) {
+ ret = -ENODEV;
goto out_unregister;
}
-#endif
ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
if (ret)
@@ -263,33 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
NCR5380_maybe_reset_bus(instance);
- if (irq != IRQ_AUTO)
- instance->irq = irq;
- else
- instance->irq = NCR5380_probe_irq(instance, 0xffff);
-
/* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
+ if (irq == 255 || irq == 0)
+ irq = NO_IRQ;
+ else if (irq == -1)
+ irq = IRQ_AUTO;
+
+ if (board == BOARD_HP_C2502) {
+ int *irq_table = hp_c2502_irqs;
+ int board_irq = -1;
+
+ switch (irq) {
+ case NO_IRQ:
+ board_irq = 0;
+ break;
+ case IRQ_AUTO:
+ board_irq = legacy_find_free_irq(irq_table);
+ break;
+ default:
+ while (*irq_table != -1)
+ if (*irq_table++ == irq)
+ board_irq = irq;
+ }
+
+ if (board_irq <= 0) {
+ board_irq = 0;
+ irq = NO_IRQ;
+ }
+
+ magic_configure(port_idx, board_irq, magic);
+ }
+
+ if (irq == IRQ_AUTO) {
+ instance->irq = g_NCR5380_probe_irq(instance);
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq detected\n");
+ } else {
+ instance->irq = irq;
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq provided\n");
+ }
if (instance->irq != NO_IRQ) {
-#ifndef SCSI_G_NCR5380_MEM
- /* set IRQ for HP C2502 */
- if (board == BOARD_HP_C2502)
- magic_configure(port_idx, instance->irq, magic);
-#endif
if (request_irq(instance->irq, generic_NCR5380_intr,
0, "NCR5380", instance)) {
- printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = NO_IRQ;
+ shost_printk(KERN_INFO, instance,
+ "irq %d denied\n", instance->irq);
+ } else {
+ shost_printk(KERN_INFO, instance,
+ "irq %d acquired\n", instance->irq);
}
}
- if (instance->irq == NO_IRQ) {
- printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
- }
-
ret = scsi_add_host(instance, pdev);
if (ret)
goto out_free_irq;
@@ -303,38 +407,39 @@ out_free_irq:
NCR5380_exit(instance);
out_unregister:
scsi_host_put(instance);
-out_release:
-#ifndef SCSI_G_NCR5380_MEM
- release_region(base, region_size);
-#else
+out_unmap:
iounmap(iomem);
- release_mem_region(base, iomem_size);
-#endif
+out_release:
+ if (is_pmio)
+ release_region(base, region_size);
+ else
+ release_mem_region(base, region_size);
return ret;
}
static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *iomem = hostdata->io;
+ unsigned long io_port = hostdata->io_port;
+ unsigned long base = hostdata->base;
+ unsigned long region_size = hostdata->region_size;
+
scsi_remove_host(instance);
if (instance->irq != NO_IRQ)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
-#ifndef SCSI_G_NCR5380_MEM
- release_region(instance->io_port, instance->n_io_port);
-#else
- {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- iounmap(hostdata->iomem);
- release_mem_region(instance->base, hostdata->iomem_size);
- }
-#endif
scsi_host_put(instance);
+ iounmap(iomem);
+ if (io_port)
+ release_region(io_port, region_size);
+ else
+ release_mem_region(base, region_size);
}
/**
* generic_NCR5380_pread - pseudo DMA read
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -342,10 +447,9 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
* controller
*/
-static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -361,18 +465,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -381,18 +483,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; /* FIXME - no timeout */
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- insw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ insw(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 64);
- else
- insb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ insb(hostdata->io_port + hostdata->c400_host_buf,
dst + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_fromio(dst + start,
- hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+ else
+ memcpy_fromio(dst + start,
+ hostdata->io + NCR53C400_host_buffer, 128);
+
start += 128;
blocks--;
}
@@ -412,7 +512,7 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
/**
* generic_NCR5380_pwrite - pseudo DMA write
- * @instance: adapter to read from
+ * @hostdata: scsi host private data
* @dst: buffer to read into
* @len: buffer length
*
@@ -420,10 +520,9 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
* controller
*/
-static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
int start = 0;
@@ -439,18 +538,17 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
break;
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -458,18 +556,16 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
; // FIXME - no timeout
-#ifndef SCSI_G_NCR5380_MEM
- if (hostdata->io_width == 2)
- outsw(instance->io_port + hostdata->c400_host_buf,
+ if (hostdata->io_port && hostdata->io_width == 2)
+ outsw(hostdata->io_port + hostdata->c400_host_buf,
src + start, 64);
- else
- outsb(instance->io_port + hostdata->c400_host_buf,
+ else if (hostdata->io_port)
+ outsb(hostdata->io_port + hostdata->c400_host_buf,
src + start, 128);
-#else
- /* implies SCSI_G_NCR5380_MEM */
- memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
- src + start, 128);
-#endif
+ else
+ memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+ src + start, 128);
+
start += 128;
blocks--;
}
@@ -489,10 +585,9 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
return 0;
}
-static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
+static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
int transfersize = cmd->transfersize;
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
@@ -566,7 +661,7 @@ static struct isa_driver generic_NCR5380_isa_driver = {
},
};
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
{ .id = "" }
@@ -600,7 +695,7 @@ static struct pnp_driver generic_NCR5380_pnp_driver = {
.probe = generic_NCR5380_pnp_probe,
.remove = generic_NCR5380_pnp_remove,
};
-#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+#endif /* defined(CONFIG_PNP) */
static int pnp_registered, isa_registered;
@@ -609,7 +704,7 @@ static int __init generic_NCR5380_init(void)
int ret = 0;
/* compatibility with old-style parameters */
- if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+ if (irq[0] == -1 && base[0] == 0 && card[0] == -1) {
irq[0] = ncr_irq;
base[0] = ncr_addr;
if (ncr_5380)
@@ -624,7 +719,7 @@ static int __init generic_NCR5380_init(void)
card[0] = BOARD_HP_C2502;
}
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
pnp_registered = 1;
#endif
@@ -637,7 +732,7 @@ static int __init generic_NCR5380_init(void)
static void __exit generic_NCR5380_exit(void)
{
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
if (pnp_registered)
pnp_unregister_driver(&generic_NCR5380_pnp_driver);
#endif
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index b175b9234458..81b22d989648 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,49 +14,28 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#ifndef SCSI_G_NCR5380_MEM
#define DRV_MODULE_NAME "g_NCR5380"
#define NCR5380_read(reg) \
- inb(instance->io_port + (reg))
+ ioread8(hostdata->io + hostdata->offset + (reg))
#define NCR5380_write(reg, value) \
- outb(value, instance->io_port + (reg))
+ iowrite8(value, hostdata->io + hostdata->offset + (reg))
#define NCR5380_implementation_fields \
+ int offset; \
int c400_ctl_status; \
int c400_blk_cnt; \
int c400_host_buf; \
int io_width;
-#else
-/* therefore SCSI_G_NCR5380_MEM */
-#define DRV_MODULE_NAME "g_NCR5380_mmio"
-
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
#define NCR53C400_region_size 0x3a00
-#define NCR5380_read(reg) \
- readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-#define NCR5380_write(reg, value) \
- writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
- NCR53C400_mem_base + (reg))
-
-#define NCR5380_implementation_fields \
- void __iomem *iomem; \
- resource_size_t iomem_size; \
- int c400_ctl_status; \
- int c400_blk_cnt; \
- int c400_host_buf;
-
-#endif
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- generic_NCR5380_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
#define NCR5380_dma_recv_setup generic_NCR5380_pread
#define NCR5380_dma_send_setup generic_NCR5380_pwrite
-#define NCR5380_dma_residual(instance) (0)
+#define NCR5380_dma_residual NCR5380_dma_residual_none
#define NCR5380_intr generic_NCR5380_intr
#define NCR5380_queue_command generic_NCR5380_queue_command
@@ -72,5 +51,6 @@
#define BOARD_DTC3181E 3
#define BOARD_HP_C2502 4
-#endif /* GENERIC_NCR5380_H */
+#define IRQ_AUTO 254
+#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c
deleted file mode 100644
index 8cdde71ba0c8..000000000000
--- a/drivers/scsi/g_NCR5380_mmio.c
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * There is probably a nicer way to do this but this one makes
- * pretty obvious what is happening. We rebuild the same file with
- * different options for mmio versus pio.
- */
-
-#define SCSI_G_NCR5380_MEM
-
-#include "g_NCR5380.c"
-
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 72c98522bd26..c0cd505a9ef7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -13,6 +13,7 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -110,7 +111,7 @@ struct hisi_sas_device {
struct domain_device *sas_device;
u64 attached_phy;
u64 device_id;
- u64 running_req;
+ atomic64_t running_req;
u8 dev_status;
};
@@ -149,7 +150,8 @@ struct hisi_sas_hw {
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
- int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s);
+ int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s);
void (*start_delivery)(struct hisi_hba *hisi_hba);
int (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
@@ -166,6 +168,9 @@ struct hisi_sas_hw {
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *linkrates);
+ enum sas_linkrate (*phy_get_max_linkrate)(void);
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
@@ -183,6 +188,7 @@ struct hisi_hba {
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
u32 ctrl_clock_ena_reg;
+ u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
int n_phy;
@@ -205,7 +211,6 @@ struct hisi_hba {
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
int queue_count;
- int queue;
struct hisi_sas_slot *slot_prep;
struct dma_pool *sge_page_pool;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f784e10..d50e9cfefd24 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -162,8 +162,8 @@ out:
hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
if (task->task_done)
task->task_done(task);
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
}
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -232,8 +232,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -303,7 +303,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
++(*pass);
return 0;
@@ -369,9 +369,14 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
struct sas_phy *sphy = sas_phy->phy;
sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
+ sphy->maximum_linkrate_hw =
+ hisi_hba->hw->phy_get_max_linkrate();
+ if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->minimum_linkrate = phy->minimum_linkrate;
+
+ if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
+ sphy->maximum_linkrate = phy->maximum_linkrate;
}
if (phy->phy_type & PORT_TYPE_SAS) {
@@ -537,7 +542,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
+ struct hisi_sas_port *port = &hisi_hba->port[phy->port_id];
unsigned long flags;
if (!sas_port)
@@ -645,6 +650,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_SET_LINK_RATE:
+ hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
+ break;
+
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
return -EOPNOTSUPP;
@@ -764,7 +772,8 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task = NULL;
}
ex_err:
- WARN_ON(retry == TASK_RETRY);
+ if (retry == TASK_RETRY)
+ dev_warn(dev, "abort tmf: executing internal task failed!\n");
sas_free_task(task);
return res;
}
@@ -960,6 +969,9 @@ static int hisi_sas_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
+ default:
+ rc = TMF_RESP_FUNC_FAILED;
+ break;
}
}
return rc;
@@ -987,8 +999,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
- &dlvry_queue_slot);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+ &dlvry_queue, &dlvry_queue_slot);
if (rc)
goto err_out_tag;
@@ -1023,7 +1035,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
hisi_hba->slot_prep = slot;
- sas_dev->running_req++;
+ atomic64_inc(&sas_dev->running_req);
+
/* send abort command to our chip */
hisi_hba->hw->start_delivery(hisi_hba);
@@ -1396,10 +1409,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct clk *refclk;
shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
- if (!shost)
- goto err_out;
+ if (!shost) {
+ dev_err(dev, "scsi host alloc failed\n");
+ return NULL;
+ }
hisi_hba = shost_priv(shost);
hisi_hba->hw = hw;
@@ -1432,6 +1448,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
}
+ refclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(refclk))
+ dev_info(dev, "no ref clk property\n");
+ else
+ hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
goto err_out;
@@ -1457,6 +1479,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
return shost;
err_out:
+ kfree(shost);
dev_err(dev, "shost alloc failed\n");
return NULL;
}
@@ -1483,10 +1506,8 @@ int hisi_sas_probe(struct platform_device *pdev,
int rc, phy_nr, port_nr, i;
shost = hisi_sas_shost_alloc(pdev, hw);
- if (!shost) {
- rc = -ENOMEM;
- goto err_out_ha;
- }
+ if (!shost)
+ return -ENOMEM;
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
@@ -1496,12 +1517,13 @@ int hisi_sas_probe(struct platform_device *pdev,
arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
- if (!arr_phy || !arr_port)
- return -ENOMEM;
+ if (!arr_phy || !arr_port) {
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
sha->lldd_ha = hisi_hba;
shost->transportt = hisi_sas_stt;
@@ -1546,6 +1568,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_free(hisi_hba);
kfree(shost);
return rc;
}
@@ -1555,12 +1578,14 @@ int hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = sha->core.shost;
scsi_remove_host(sha->core.shost);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
hisi_sas_free(hisi_hba);
+ kfree(shost);
return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c0ac49d8bc8d..8a1be0ba8a22 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -843,6 +843,49 @@ static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
+{
+ return SAS_LINK_RATE_6_0_GBPS;
+}
+
+static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v1_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -862,29 +905,23 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "could not find free slot\n");
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1372,8 +1409,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1824,6 +1861,8 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.phy_enable = enable_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
+ .phy_set_linkrate = phy_set_linkrate_v1_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9825a3f49f53..b934aec1eebb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -55,10 +55,44 @@
#define HGC_DFX_CFG2 0xc0
#define HGC_IOMB_PROC1_STATUS 0x104
#define CFG_1US_TIMER_TRSH 0xcc
+#define HGC_LM_DFX_STATUS2 0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR 0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF 0
+#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF 8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR 0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF 0
+#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF 16
+#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR 0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF 0
+#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF 16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
#define HGC_INVLD_DQE_INFO 0x148
#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
+#define HGC_ITCT_ECC_ADDR 0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF 0
+#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF 16
+#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
+ HGC_ITCT_ECC_MB_ADDR_OFF)
+#define HGC_AXI_FIFO_ERR_INFO 0x154
+#define AXI_ERR_INFO_OFF 0
+#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
+#define FIFO_ERR_INFO_OFF 8
+#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
@@ -73,13 +107,41 @@
#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
#define ENT_INT_SRC2 0x1bc
#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_WP_DEPTH_OFF 8
+#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
+#define ENT_INT_SRC3_RP_DEPTH_OFF 10
+#define ENT_INT_SRC3_AXI_OFF 11
+#define ENT_INT_SRC3_FIFO_OFF 12
+#define ENT_INT_SRC3_LM_OFF 14
#define ENT_INT_SRC3_ITC_INT_OFF 15
#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC3_ABT_OFF 16
#define ENT_INT_SRC_MSK1 0x1c4
#define ENT_INT_SRC_MSK2 0x1c8
#define ENT_INT_SRC_MSK3 0x1cc
#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR 0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
@@ -94,7 +156,20 @@
#define COMPL_Q_0_DEPTH 0x4e8
#define COMPL_Q_0_WR_PTR 0x4ec
#define COMPL_Q_0_RD_PTR 0x4f0
-
+#define HGC_RXM_DFX_STATUS14 0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15 0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
+ HGC_RXM_DFX_STATUS15_MEM3_OFF)
/* phy registers need init */
#define PORT_BASE (0x2000)
@@ -119,6 +194,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CONTROL_CTA_OFF 17
#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -267,6 +345,8 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+#define HISI_SAS_FATAL_INT_NR 2
+
struct hisi_sas_complete_v2_hdr {
__le32 dw0;
__le32 dw1;
@@ -659,8 +739,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
- hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
- hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
/* clear the itct */
hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -808,7 +886,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
- hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
@@ -824,7 +902,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
@@ -836,7 +914,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
- hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ if (hisi_hba->refclk_frequency_mhz == 66)
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ /* else, do nothing -> leave it how you found it */
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -980,6 +1060,49 @@ static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
}
+static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
+{
+ return SAS_LINK_RATE_12_0_GBPS;
+}
+
+static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
+ struct sas_phy_linkrates *r)
+{
+ u32 prog_phy_link_rate =
+ hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
+ enum sas_linkrate min, max;
+ u32 rate_mask = 0;
+
+ if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = sas_phy->phy->maximum_linkrate;
+ min = r->minimum_linkrate;
+ } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+ max = r->maximum_linkrate;
+ min = sas_phy->phy->minimum_linkrate;
+ } else
+ return;
+
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
+ min -= SAS_LINK_RATE_1_5_GBPS;
+ max -= SAS_LINK_RATE_1_5_GBPS;
+
+ for (i = 0; i <= max; i++)
+ rate_mask |= 1 << (i * 2);
+
+ prog_phy_link_rate &= ~0xff;
+ prog_phy_link_rate |= rate_mask;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ prog_phy_link_rate);
+
+ phy_hard_reset_v2_hw(hisi_hba, phy_no);
+}
+
static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
@@ -1010,29 +1133,24 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+ int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_dq *dq;
u32 r, w;
- int queue = hisi_hba->queue;
-
- while (1) {
- dq = &hisi_hba->dq[queue];
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- queue = (queue + 1) % hisi_hba->queue_count;
- if (queue == hisi_hba->queue) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
- continue;
- }
- break;
+ int queue = dev_id % hisi_hba->queue_count;
+
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ queue, r, w);
+ return -EAGAIN;
}
- hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
*q = queue;
*s = w;
return 0;
@@ -1653,8 +1771,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
}
out:
- if (sas_dev && sas_dev->running_req)
- sas_dev->running_req--;
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1675,6 +1793,7 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_NCQ_NON_DATA:
return SATA_PROTOCOL_FPDMA;
+ case ATA_CMD_DOWNLOAD_MICRO:
case ATA_CMD_ID_ATA:
case ATA_CMD_PMP_READ:
case ATA_CMD_READ_LOG_EXT:
@@ -1686,18 +1805,27 @@ static u8 get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_PIO_WRITE_EXT:
return SATA_PROTOCOL_PIO;
+ case ATA_CMD_DSM:
+ case ATA_CMD_DOWNLOAD_MICRO_DMA:
+ case ATA_CMD_PMP_READ_DMA:
+ case ATA_CMD_PMP_WRITE_DMA:
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_READ_STREAM_DMA_EXT:
+ case ATA_CMD_TRUSTED_RCV_DMA:
+ case ATA_CMD_TRUSTED_SND_DMA:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
case ATA_CMD_WRITE_QUEUED:
case ATA_CMD_WRITE_LOG_DMA_EXT:
+ case ATA_CMD_WRITE_STREAM_DMA_EXT:
return SATA_PROTOCOL_DMA;
- case ATA_CMD_DOWNLOAD_MICRO:
- case ATA_CMD_DEV_RESET:
case ATA_CMD_CHK_POWER:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_EDD:
case ATA_CMD_FLUSH:
case ATA_CMD_FLUSH_EXT:
case ATA_CMD_VERIFY:
@@ -1970,9 +2098,12 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2005,8 +2136,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
if (irq_value1) {
if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
- dev_name(dev), irq_value1);
+ panic("%s: DMAC RX/TX ecc bad error!\
+ (0x%x)",
+ dev_name(dev), irq_value1);
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT1, irq_value1);
@@ -2037,6 +2169,318 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static void
+one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 reg_val;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ dev_warn(dev, "hgc_dqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
+ HGC_DQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ dev_warn(dev, "hgc_iost_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
+ HGC_IOST_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ dev_warn(dev, "hgc_itct_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
+ HGC_ITCT_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_iostl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ dev_warn(dev, "hgc_itctl_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ dev_warn(dev, "hgc_cqe_acc1b_intr found: \
+ Ram address is 0x%08X\n",
+ (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
+ HGC_CQE_ECC_1B_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem0_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem1_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ dev_warn(dev, "rxm_mem2_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ dev_warn(dev, "rxm_mem3_acc1b_intr found: \
+ memory address is 0x%08X\n",
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
+ u32 irq_value)
+{
+ u32 reg_val;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+ panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
+ HGC_DQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+ panic("%s: hgc_iost_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
+ HGC_IOST_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+ panic("%s: hgc_itct_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
+ HGC_ITCT_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+ panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+ HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+ panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
+ Ram address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
+ HGC_CQE_ECC_MB_ADDR_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+ panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+ HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ }
+
+ if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
+ reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+ panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
+ memory address is 0x%08X\n",
+ dev_name(dev), irq_value,
+ (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+ HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ }
+
+}
+
+static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk;
+
+ irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+ if (irq_value) {
+ one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+ }
+
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
+#define AXI_ERR_NR 8
+static const char axi_err_info[AXI_ERR_NR][32] = {
+ "IOST_AXI_W_ERR",
+ "IOST_AXI_R_ERR",
+ "ITCT_AXI_W_ERR",
+ "ITCT_AXI_R_ERR",
+ "SATA_AXI_W_ERR",
+ "SATA_AXI_R_ERR",
+ "DQE_AXI_R_ERR",
+ "CQE_AXI_W_ERR"
+};
+
+#define FIFO_ERR_NR 5
+static const char fifo_err_info[FIFO_ERR_NR][32] = {
+ "CQE_WINFO_FIFO",
+ "CQE_MSG_FIFIO",
+ "GETDQE_FIFO",
+ "CMDP_FIFO",
+ "AWTCTRL_FIFO"
+};
+
+static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_value, irq_msk, err_value;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
+
+ irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (irq_value) {
+ if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
+ panic("%s: write pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 <<
+ ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
+ panic("%s: iptt no match slot error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
+ panic("%s: read pointer and depth error (0x%x) \
+ found!\n",
+ dev_name(dev), irq_value);
+
+ if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_AXI_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < AXI_ERR_NR; i++) {
+ if (err_value & BIT(i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ axi_err_info[i], irq_value);
+ }
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
+ int i;
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_FIFO_OFF);
+ err_value = hisi_sas_read32(hisi_hba,
+ HGC_AXI_FIFO_ERR_INFO);
+
+ for (i = 0; i < FIFO_ERR_NR; i++) {
+ if (err_value & BIT(AXI_ERR_NR + i))
+ panic("%s: %s (0x%x) found!\n",
+ dev_name(dev),
+ fifo_err_info[i], irq_value);
+ }
+
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_LM_OFF);
+ panic("%s: LM add/fetch list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << ENT_INT_SRC3_ABT_OFF);
+ panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
+ dev_name(dev), irq_value);
+ }
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
{
struct hisi_sas_cq *cq = p;
@@ -2136,6 +2580,16 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
goto end;
}
+ /* check ERR bit of Status Register */
+ if (fis->status & ATA_ERR) {
+ dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
+ fis->status);
+ disable_phy_v2_hw(hisi_hba, phy_no);
+ enable_phy_v2_hw(hisi_hba, phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
if (unlikely(phy_no == 8)) {
u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
@@ -2190,6 +2644,11 @@ static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
int_chnl_int_v2_hw,
};
+static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
+ fatal_ecc_int_v2_hw,
+ fatal_axi_int_v2_hw
+};
+
/**
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -2245,6 +2704,26 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
+ for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
+ int idx = i;
+
+ irq = irq_map[idx + 81];
+ if (!irq) {
+ dev_err(dev, "irq init: fail map fatal interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+ DRV_NAME " fatal", hisi_hba);
+ if (rc) {
+ dev_err(dev,
+ "irq init: could not request fatal interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
for (i = 0; i < hisi_hba->queue_count; i++) {
int idx = i + 96; /* First cq interrupt is irq96 */
@@ -2303,12 +2782,26 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.phy_enable = enable_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
+ .phy_set_linkrate = phy_set_linkrate_v2_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
{
+ /*
+ * Check if we should defer the probe before we probe the
+ * upper layer, as it's hard to defer later on.
+ */
+ int ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+
return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
}
@@ -2319,6 +2812,7 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
static const struct of_device_id sas_v2_of_match[] = {
{ .compatible = "hisilicon,hip06-sas-v2",},
+ { .compatible = "hisilicon,hip07-sas-v2",},
{},
};
MODULE_DEVICE_TABLE(of, sas_v2_of_match);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab76a514..cbc0c5fe5a60 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -276,6 +276,9 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[],
+ int reply_queue);
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
@@ -700,9 +703,7 @@ static ssize_t lunid_show(struct device *dev,
}
memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- lunid[0], lunid[1], lunid[2], lunid[3],
- lunid[4], lunid[5], lunid[6], lunid[7]);
+ return snprintf(buf, 20, "0x%8phN\n", lunid);
}
static ssize_t unique_id_show(struct device *dev,
@@ -864,6 +865,16 @@ static ssize_t path_info_show(struct device *dev,
return output_len;
}
+static ssize_t host_show_ctlr_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", h->ctlr);
+}
+
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -887,6 +898,8 @@ static DEVICE_ATTR(resettable, S_IRUGO,
host_show_resettable, NULL);
static DEVICE_ATTR(lockup_detected, S_IRUGO,
host_show_lockup_detected, NULL);
+static DEVICE_ATTR(ctlr_num, S_IRUGO,
+ host_show_ctlr_num, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
@@ -907,6 +920,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_hp_ssd_smart_path_status,
&dev_attr_raid_offload_debug,
&dev_attr_lockup_detected,
+ &dev_attr_ctlr_num,
NULL,
};
@@ -1001,7 +1015,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
{
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
- if (unlikely(!h->msix_vector))
+ if (unlikely(!h->msix_vectors))
return;
if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
c->Header.ReplyQueue =
@@ -1543,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h,
/* Device is not on the list, add it. */
device = kmalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+ if (!device)
return;
- }
+
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
spin_lock_irqsave(&h->offline_device_lock, flags);
list_add_tail(&device->offline_list, &h->offline_device_list);
@@ -2128,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
GFP_KERNEL);
- if (!h->cmd_sg_list) {
- dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
+ if (!h->cmd_sg_list)
return -ENOMEM;
- }
+
for (i = 0; i < h->nr_cmds; i++) {
h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
h->chainsize, GFP_KERNEL);
- if (!h->cmd_sg_list[i]) {
- dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
+ if (!h->cmd_sg_list[i])
goto clean;
- }
+
}
return 0;
@@ -2541,7 +2552,7 @@ static void complete_scsi_command(struct CommandList *cp)
if ((unlikely(hpsa_is_pending_event(cp)))) {
if (cp->reset_pending)
- return hpsa_cmd_resolve_and_free(h, cp);
+ return hpsa_cmd_free_and_done(h, cp, cmd);
if (cp->abort_pending)
return hpsa_cmd_abort_and_free(h, cp, cmd);
}
@@ -2824,14 +2835,8 @@ static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
const u8 *cdb = c->Request.CDB;
const u8 *lun = c->Header.LUN.LunAddrBytes;
- dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
- " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- txt, lun[0], lun[1], lun[2], lun[3],
- lun[4], lun[5], lun[6], lun[7],
- cdb[0], cdb[1], cdb[2], cdb[3],
- cdb[4], cdb[5], cdb[6], cdb[7],
- cdb[8], cdb[9], cdb[10], cdb[11],
- cdb[12], cdb[13], cdb[14], cdb[15]);
+ dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
+ txt, lun, cdb);
}
static void hpsa_scsi_interpret_error(struct ctlr_info *h,
@@ -3080,6 +3085,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
if (unlikely(rc))
atomic_set(&dev->reset_cmds_out, 0);
+ else
+ wait_for_device_to_become_ready(h, scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -3444,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
struct bmic_sense_subsystem_info *ssi;
ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
- if (ssi == NULL) {
- dev_warn(&h->pdev->dev,
- "%s: out of memory\n", __func__);
+ if (!ssi)
return;
- }
rc = hpsa_bmic_sense_subsystem_information(h,
scsi3addr, 0, ssi, sizeof(*ssi));
@@ -3623,8 +3627,32 @@ out:
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportExtendedLUNdata *buf, int bufsize)
{
- return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
- HPSA_REPORT_PHYS_EXTENDED);
+ int rc;
+ struct ReportLUNdata *lbuf;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+ HPSA_REPORT_PHYS_EXTENDED);
+ if (!rc || !hpsa_allow_any)
+ return rc;
+
+ /* REPORT PHYS EXTENDED is not supported */
+ lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
+ if (!lbuf)
+ return -ENOMEM;
+
+ rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
+ if (!rc) {
+ int i;
+ u32 nphys;
+
+ /* Copy ReportLUNdata header */
+ memcpy(buf, lbuf, 8);
+ nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
+ for (i = 0; i < nphys; i++)
+ memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
+ }
+ kfree(lbuf);
+ return rc;
}
static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -4301,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
if (!currentsd[i]) {
- dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
- __FILE__, __LINE__);
h->drv_req_rescan = 1;
goto out;
}
@@ -5488,7 +5514,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
dev = cmd->device->hostdata;
if (!dev) {
- cmd->result = NOT_READY << 16; /* host byte */
+ cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
@@ -5569,6 +5595,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
if (unlikely(lockup_detected(h)))
return hpsa_scan_complete(h);
+ /*
+ * Do the scan after a reset completion
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
hpsa_update_scsi_devices(h);
hpsa_scan_complete(h);
@@ -5624,7 +5658,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
sh->transportt = hpsa_sas_transport_template;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[h->intr_mode];
+ sh->irq = pci_irq_vector(h->pdev, 0);
sh->unique_id = sh->irq;
h->scsi_host = sh;
@@ -5999,11 +6033,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
- "Reset as abort",
- scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
- scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+ "Reset as abort", scsi3addr);
if (!dev->offload_enabled) {
dev_warn(&h->pdev->dev,
@@ -6020,32 +6052,28 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
/* send the reset */
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
+ psa);
rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
if (rc != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
+ psa);
return rc; /* failed to reset */
}
/* wait for device to recover */
if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
dev_warn(&h->pdev->dev,
- "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
+ psa);
return -1; /* failed to recover */
}
/* device recovered */
dev_info(&h->pdev->dev,
- "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- psa[0], psa[1], psa[2], psa[3],
- psa[4], psa[5], psa[6], psa[7]);
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
+ psa);
return rc; /* success */
}
@@ -6663,8 +6691,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = (BIG_IOCTL_Command_struct *)
- kmalloc(sizeof(*ioc), GFP_KERNEL);
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc) {
status = -ENOMEM;
goto cleanup1;
@@ -7658,67 +7685,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
{
- if (h->msix_vector) {
- if (h->pdev->msix_enabled)
- pci_disable_msix(h->pdev);
- h->msix_vector = 0;
- } else if (h->msi_vector) {
- if (h->pdev->msi_enabled)
- pci_disable_msi(h->pdev);
- h->msi_vector = 0;
- }
+ pci_free_irq_vectors(h->pdev);
+ h->msix_vectors = 0;
}
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode.
*/
-static void hpsa_interrupt_mode(struct ctlr_info *h)
+static int hpsa_interrupt_mode(struct ctlr_info *h)
{
-#ifdef CONFIG_PCI_MSI
- int err, i;
- struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
-
- for (i = 0; i < MAX_REPLY_QUEUES; i++) {
- hpsa_msix_entries[i].vector = 0;
- hpsa_msix_entries[i].entry = i;
- }
+ unsigned int flags = PCI_IRQ_LEGACY;
+ int ret;
/* Some boards advertise MSI but don't really support it */
- if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
- (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
- goto default_int_mode;
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
- dev_info(&h->pdev->dev, "MSI-X capable controller\n");
- h->msix_vector = MAX_REPLY_QUEUES;
- if (h->msix_vector > num_online_cpus())
- h->msix_vector = num_online_cpus();
- err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
- 1, h->msix_vector);
- if (err < 0) {
- dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
- h->msix_vector = 0;
- goto single_msi_mode;
- } else if (err < h->msix_vector) {
- dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
- "available\n", err);
+ switch (h->board_id) {
+ case 0x40700E11:
+ case 0x40800E11:
+ case 0x40820E11:
+ case 0x40830E11:
+ break;
+ default:
+ ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret > 0) {
+ h->msix_vectors = ret;
+ return 0;
}
- h->msix_vector = err;
- for (i = 0; i < h->msix_vector; i++)
- h->intr[i] = hpsa_msix_entries[i].vector;
- return;
- }
-single_msi_mode:
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
- dev_info(&h->pdev->dev, "MSI capable controller\n");
- if (!pci_enable_msi(h->pdev))
- h->msi_vector = 1;
- else
- dev_warn(&h->pdev->dev, "MSI init failed\n");
+
+ flags |= PCI_IRQ_MSI;
+ break;
}
-default_int_mode:
-#endif /* CONFIG_PCI_MSI */
- /* if we get here we're going to use the default interrupt mode */
- h->intr[h->intr_mode] = h->pdev->irq;
+
+ ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
+ if (ret < 0)
+ return ret;
+ return 0;
}
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -8074,7 +8075,9 @@ static int hpsa_pci_init(struct ctlr_info *h)
pci_set_master(h->pdev);
- hpsa_interrupt_mode(h);
+ err = hpsa_interrupt_mode(h);
+ if (err)
+ goto clean1;
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto clean2; /* intmode+region, pci */
@@ -8110,6 +8113,7 @@ clean3: /* vaddr, intmode+region, pci */
h->vaddr = NULL;
clean2: /* intmode+region, pci */
hpsa_disable_interrupt_mode(h);
+clean1:
/*
* call pci_disable_device before pci_release_regions per
* Documentation/PCI/pci.txt
@@ -8243,34 +8247,20 @@ clean_up:
return -ENOMEM;
}
-static void hpsa_irq_affinity_hints(struct ctlr_info *h)
-{
- int i, cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
static void hpsa_free_irqs(struct ctlr_info *h)
{
int i;
- if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+ if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */
- i = h->intr_mode;
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
- h->q[i] = 0;
+ free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+ h->q[h->intr_mode] = 0;
return;
}
- for (i = 0; i < h->msix_vector; i++) {
- irq_set_affinity_hint(h->intr[i], NULL);
- free_irq(h->intr[i], &h->q[i]);
+ for (i = 0; i < h->msix_vectors; i++) {
+ free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
h->q[i] = 0;
}
for (; i < MAX_REPLY_QUEUES; i++)
@@ -8291,11 +8281,11 @@ static int hpsa_request_irqs(struct ctlr_info *h,
for (i = 0; i < MAX_REPLY_QUEUES; i++)
h->q[i] = (u8) i;
- if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
- for (i = 0; i < h->msix_vector; i++) {
+ for (i = 0; i < h->msix_vectors; i++) {
sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
- rc = request_irq(h->intr[i], msixhandler,
+ rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
0, h->intrname[i],
&h->q[i]);
if (rc) {
@@ -8303,9 +8293,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
dev_err(&h->pdev->dev,
"failed to get irq %d for %s\n",
- h->intr[i], h->devname);
+ pci_irq_vector(h->pdev, i), h->devname);
for (j = 0; j < i; j++) {
- free_irq(h->intr[j], &h->q[j]);
+ free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
h->q[j] = 0;
}
for (; j < MAX_REPLY_QUEUES; j++)
@@ -8313,33 +8303,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
return rc;
}
}
- hpsa_irq_affinity_hints(h);
} else {
/* Use single reply pool */
- if (h->msix_vector > 0 || h->msi_vector) {
- if (h->msix_vector)
- sprintf(h->intrname[h->intr_mode],
- "%s-msix", h->devname);
- else
- sprintf(h->intrname[h->intr_mode],
- "%s-msi", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
+ sprintf(h->intrname[0], "%s-msi%s", h->devname,
+ h->msix_vectors ? "x" : "");
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
msixhandler, 0,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
} else {
sprintf(h->intrname[h->intr_mode],
"%s-intx", h->devname);
- rc = request_irq(h->intr[h->intr_mode],
+ rc = request_irq(pci_irq_vector(h->pdev, 0),
intxhandler, IRQF_SHARED,
- h->intrname[h->intr_mode],
+ h->intrname[0],
&h->q[h->intr_mode]);
}
- irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
}
if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
- h->intr[h->intr_mode], h->devname);
+ pci_irq_vector(h->pdev, 0), h->devname);
hpsa_free_irqs(h);
return -ENODEV;
}
@@ -8605,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h)
*/
if (!h->lastlogicals)
- goto out;
+ return rc;
logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
- if (!logdev) {
- dev_warn(&h->pdev->dev,
- "Out of memory, can't track lun changes.\n");
- goto out;
- }
+ if (!logdev)
+ return rc;
+
if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
dev_warn(&h->pdev->dev,
"report luns failed, can't track lun changes.\n");
@@ -8640,6 +8622,14 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
if (h->remove_in_progress)
return;
+ /*
+ * Do the scan after the reset
+ */
+ if (h->reset_in_progress) {
+ h->drv_req_rescan = 1;
+ return;
+ }
+
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
scsi_host_get(h->scsi_host);
hpsa_ack_ctlr_events(h);
@@ -8998,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
return;
options = kzalloc(sizeof(*options), GFP_KERNEL);
- if (!options) {
- dev_err(&h->pdev->dev,
- "Error: failed to disable rld caching, during alloc.\n");
+ if (!options)
return;
- }
c = cmd_alloc(h);
@@ -9525,7 +9512,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
return rc;
}
- h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
+ h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
h->reply_queue_size = h->max_commands * sizeof(u64);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162de80dc..64e98295b707 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -176,9 +176,7 @@ struct ctlr_info {
# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
- unsigned int intr[MAX_REPLY_QUEUES];
- unsigned int msix_vector;
- unsigned int msi_vector;
+ unsigned int msix_vectors;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
@@ -466,7 +464,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
unsigned long register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */
- if (unlikely(!(h->msi_vector || h->msix_vector))) {
+ if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
/* flush the controller write of the reply queue by reading
* outbound doorbell status register.
*/
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7e487c78279c..78b72c28a55d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/stringify.h>
+#include <linux/bsg-lib.h>
#include <asm/firmware.h>
#include <asm/irq.h>
#include <asm/vio.h>
@@ -1701,14 +1702,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
/**
* ibmvfc_bsg_timeout - Handle a BSG timeout
- * @job: struct fc_bsg_job that timed out
+ * @job: struct bsg_job that timed out
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
+static int ibmvfc_bsg_timeout(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
unsigned long port_id = (unsigned long)job->dd_data;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
@@ -1814,41 +1815,43 @@ unlock_out:
/**
* ibmvfc_bsg_request - Handle a BSG request
- * @job: struct fc_bsg_job to be executed
+ * @job: struct bsg_job to be executed
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_bsg_request(struct fc_bsg_job *job)
+static int ibmvfc_bsg_request(struct bsg_job *job)
{
- struct ibmvfc_host *vhost = shost_priv(job->shost);
- struct fc_rport *rport = job->rport;
+ struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct ibmvfc_passthru_mad *mad;
struct ibmvfc_event *evt;
union ibmvfc_iu rsp_iu;
unsigned long flags, port_id = -1;
- unsigned int code = job->request->msgcode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ unsigned int code = bsg_request->msgcode;
int rc = 0, req_seg, rsp_seg, issue_login = 0;
u32 fc_flags, rsp_len;
ENTER;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rport)
port_id = rport->port_id;
switch (code) {
case FC_BSG_HST_ELS_NOLOGIN:
- port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
- (job->request->rqst_data.h_els.port_id[1] << 8) |
- job->request->rqst_data.h_els.port_id[2];
+ port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_els.port_id[1] << 8) |
+ bsg_request->rqst_data.h_els.port_id[2];
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
case FC_BSG_HST_CT:
issue_login = 1;
- port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
- (job->request->rqst_data.h_ct.port_id[1] << 8) |
- job->request->rqst_data.h_ct.port_id[2];
+ port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
+ (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
+ bsg_request->rqst_data.h_ct.port_id[2];
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
@@ -1937,13 +1940,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job)
if (rsp_iu.passthru.common.status)
rc = -EIO;
else
- job->reply->reply_payload_rcv_len = rsp_len;
+ bsg_reply->reply_payload_rcv_len = rsp_len;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
rc = 0;
out:
dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee6ef52..50cd01165e35 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -95,6 +95,7 @@ static int fast_fail = 1;
static int client_reserve = 1;
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
+static LIST_HEAD(ibmvscsi_head);
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data)
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data)
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
} else {
done = 1;
}
@@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit())
dev_warn(evt_struct->hostdata->dev,
- "bad SRP RSP type %d\n", rsp->opcode);
+ "bad SRP RSP type %#02x\n", rsp->opcode);
}
if (cmnd) {
@@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
dev_set_drvdata(&vdev->dev, hostdata);
+ list_add_tail(&hostdata->host_list, &ibmvscsi_head);
return 0;
add_srp_port_failed:
@@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+ list_del(&hostdata->host_list);
unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index e0f6c3aeb4ee..3a7875575616 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
/* all driver data associated with a host adapter */
struct ibmvscsi_host_data {
+ struct list_head host_list;
atomic_t request_limit;
int client_migrated;
int reset_crq;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 642b739ad0da..2583e8b50b21 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
*
****************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/delay.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -81,7 +82,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
}
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
if (se_cmd->data_direction == DMA_TO_DEVICE) {
- /* residual data from an overflow write */
+ /* residual data from an overflow write */
rsp->flags = SRP_RSP_FLAG_DOOVER;
rsp->data_out_res_cnt = cpu_to_be32(residual_count);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -101,7 +102,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
* and the function returns TRUE.
*
* EXECUTION ENVIRONMENT:
- * Interrupt or Process environment
+ * Interrupt or Process environment
*/
static bool connection_broken(struct scsi_info *vscsi)
{
@@ -324,7 +325,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
}
/**
- * ibmvscsis_send_init_message() - send initialize message to the client
+ * ibmvscsis_send_init_message() - send initialize message to the client
* @vscsi: Pointer to our adapter structure
* @format: Which Init Message format to send
*
@@ -382,13 +383,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
vscsi->cmd_q.base_addr);
if (crq) {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
} else {
*format = (uint)(crq->format);
- rc = ERROR;
+ rc = ERROR;
crq->valid = INVALIDATE_CMD_RESP_EL;
dma_rmb();
}
@@ -397,166 +398,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
}
/**
- * ibmvscsis_establish_new_q() - Establish new CRQ queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state being established after resetting the queue
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
-{
- long rc = ADAPT_SUCCESS;
- uint format;
-
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
-
- rc = vio_enable_interrupts(vscsi->dma_dev);
- if (rc) {
- pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
- rc);
- return rc;
- }
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc) {
- dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
- rc);
- return rc;
- }
-
- if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_reset_queue() - Reset CRQ Queue
- * @vscsi: Pointer to our adapter structure
- * @new_state: New state to establish after resetting the queue
- *
- * This function calls h_free_q and then calls h_reg_q and does all
- * of the bookkeeping to get us back to where we can communicate.
- *
- * Actually, we don't always call h_free_crq. A problem was discovered
- * where one partition would close and reopen his queue, which would
- * cause his partner to get a transport event, which would cause him to
- * close and reopen his queue, which would cause the original partition
- * to get a transport event, etc., etc. To prevent this, we don't
- * actually close our queue if the client initiated the reset, (i.e.
- * either we got a transport event or we have detected that the client's
- * queue is gone)
- *
- * EXECUTION ENVIRONMENT:
- * Process environment, called with interrupt lock held
- */
-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
-{
- int bytes;
- long rc = ADAPT_SUCCESS;
-
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
-
- /* don't reset, the client did it for us */
- if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
- vscsi->flags &= PRESERVE_FLAG_FIELDS;
- vscsi->rsp_q_timer.timer_pops = 0;
- vscsi->debit = 0;
- vscsi->credit = 0;
- vscsi->state = new_state;
- vio_enable_interrupts(vscsi->dma_dev);
- } else {
- rc = ibmvscsis_free_command_q(vscsi);
- if (rc == ADAPT_SUCCESS) {
- vscsi->state = new_state;
-
- bytes = vscsi->cmd_q.size * PAGE_SIZE;
- rc = h_reg_crq(vscsi->dds.unit_id,
- vscsi->cmd_q.crq_token, bytes);
- if (rc == H_CLOSED || rc == H_SUCCESS) {
- rc = ibmvscsis_establish_new_q(vscsi,
- new_state);
- }
-
- if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
-
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- ibmvscsis_free_command_q(vscsi);
- }
- } else {
- vscsi->state = ERR_DISCONNECTED;
- vscsi->flags |= RESPONSE_Q_DOWN;
- }
- }
-}
-
-/**
- * ibmvscsis_free_cmd_resources() - Free command resources
- * @vscsi: Pointer to our adapter structure
- * @cmd: Command which is not longer in use
- *
- * Must be called with interrupt lock held.
- */
-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
- struct ibmvscsis_cmd *cmd)
-{
- struct iu_entry *iue = cmd->iue;
-
- switch (cmd->type) {
- case TASK_MANAGEMENT:
- case SCSI_CDB:
- /*
- * When the queue goes down this value is cleared, so it
- * cannot be cleared in this general purpose function.
- */
- if (vscsi->debit)
- vscsi->debit -= 1;
- break;
- case ADAPTER_MAD:
- vscsi->flags &= ~PROCESSING_MAD;
- break;
- case UNSET_TYPE:
- break;
- default:
- dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
- cmd->type);
- break;
- }
-
- cmd->iue = NULL;
- list_add_tail(&cmd->list, &vscsi->free_cmd);
- srp_iu_put(iue);
-
- if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
- list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
- vscsi->flags &= ~WAIT_FOR_IDLE;
- complete(&vscsi->wait_idle);
- }
-}
-
-/**
* ibmvscsis_disconnect() - Helper function to disconnect
* @work: Pointer to work_struct, gives access to our adapter structure
*
@@ -575,7 +416,6 @@ static void ibmvscsis_disconnect(struct work_struct *work)
proc_work);
u16 new_state;
bool wait_idle = false;
- long rc = ADAPT_SUCCESS;
spin_lock_bh(&vscsi->intr_lock);
new_state = vscsi->new_state;
@@ -589,7 +429,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
* should transitition to the new state
*/
switch (vscsi->state) {
- /* Should never be called while in this state. */
+ /* Should never be called while in this state. */
case NO_QUEUE:
/*
* Can never transition from this state;
@@ -628,30 +468,24 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->state = new_state;
break;
- /*
- * If this is a transition into an error state.
- * a client is attempting to establish a connection
- * and has violated the RPA protocol.
- * There can be nothing pending on the adapter although
- * there can be requests in the command queue.
- */
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
switch (new_state) {
- case ERR_DISCONNECT:
- vscsi->flags |= RESPONSE_Q_DOWN;
+ case UNCONFIGURING:
vscsi->state = new_state;
+ vscsi->flags |= RESPONSE_Q_DOWN;
vscsi->flags &= ~(SCHEDULE_DISCONNECT |
DISCONNECT_SCHEDULED);
- ibmvscsis_free_command_q(vscsi);
- break;
- case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+ dma_rmb();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
break;
/* should never happen */
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- rc = ERROR;
dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
vscsi->state);
break;
@@ -660,6 +494,13 @@ static void ibmvscsis_disconnect(struct work_struct *work)
case WAIT_IDLE:
switch (new_state) {
+ case UNCONFIGURING:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = new_state;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+ DISCONNECT_SCHEDULED);
+ ibmvscsis_free_command_q(vscsi);
+ break;
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
vscsi->state = new_state;
@@ -788,7 +629,6 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
break;
case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
case WAIT_IDLE:
case WAIT_CONNECTION:
case CONNECTED:
@@ -806,6 +646,310 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
/**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ case UNCONFIGURING:
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case WAIT_CONNECTION:
+ vscsi->state = CONNECTED;
+ break;
+
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ case WAIT_ENABLED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case WAIT_CONNECTION:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ break;
+
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+
+ case H_DROPPED:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ rc = ERROR;
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+
+ case H_CLOSED:
+ pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ rc = 0;
+ break;
+ }
+ break;
+
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case UNCONFIGURING:
+ break;
+
+ case WAIT_ENABLED:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ case WAIT_IDLE:
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (rc == H_SUCCESS) {
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ pr_debug("init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
+ } else {
+ pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ rc = ADAPT_SUCCESS;
+ }
+
+ if (crq->format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ } else if (crq->format == INIT_COMPLETE_MSG) {
+ rc = ibmvscsis_handle_init_compl_msg(vscsi);
+ } else {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+ uint format;
+
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+
+ rc = vio_enable_interrupts(vscsi->dma_dev);
+ if (rc) {
+ pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc) {
+ dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ if (format == UNUSED_FORMAT) {
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ } else if (format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq. A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc. To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
+{
+ int bytes;
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+ /* don't reset, the client did it for us */
+ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ vscsi->state = WAIT_CONNECTION;
+ vio_enable_interrupts(vscsi->dma_dev);
+ } else {
+ rc = ibmvscsis_free_command_q(vscsi);
+ if (rc == ADAPT_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id,
+ vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
+
+ if (rc != ADAPT_SUCCESS) {
+ pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_command_q(vscsi);
+ }
+ } else {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ }
+ }
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+
+ switch (cmd->type) {
+ case TASK_MANAGEMENT:
+ case SCSI_CDB:
+ /*
+ * When the queue goes down this value is cleared, so it
+ * cannot be cleared in this general purpose function.
+ */
+ if (vscsi->debit)
+ vscsi->debit -= 1;
+ break;
+ case ADAPTER_MAD:
+ vscsi->flags &= ~PROCESSING_MAD;
+ break;
+ case UNSET_TYPE:
+ break;
+ default:
+ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+ cmd->type);
+ break;
+ }
+
+ cmd->iue = NULL;
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ srp_iu_put(iue);
+
+ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+}
+
+/**
* ibmvscsis_trans_event() - Handle a Transport Event
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ entry containing the Transport Event
@@ -863,10 +1007,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
TRANS_EVENT));
break;
- case PART_UP_WAIT_ENAB:
- vscsi->state = WAIT_ENABLED;
- break;
-
case SRP_PROCESSING:
if ((vscsi->debit > 0) ||
!list_empty(&vscsi->schedule_q) ||
@@ -895,7 +1035,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
}
}
- rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
vscsi->flags, vscsi->state, rc);
@@ -1066,16 +1206,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
free_qs = true;
switch (vscsi->state) {
+ case UNCONFIGURING:
+ ibmvscsis_free_command_q(vscsi);
+ dma_rmb();
+ isync();
+ if (vscsi->flags & CFG_SLEEPING) {
+ vscsi->flags &= ~CFG_SLEEPING;
+ complete(&vscsi->unconfig);
+ }
+ break;
case ERR_DISCONNECT_RECONNECT:
- ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+ ibmvscsis_reset_queue(vscsi);
pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
break;
case ERR_DISCONNECT:
ibmvscsis_free_command_q(vscsi);
- vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
vscsi->flags |= RESPONSE_Q_DOWN;
- vscsi->state = ERR_DISCONNECTED;
+ if (vscsi->tport.enabled)
+ vscsi->state = ERR_DISCONNECTED;
+ else
+ vscsi->state = WAIT_ENABLED;
pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
vscsi->flags, vscsi->state);
break;
@@ -1220,7 +1372,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
* @iue: Information Unit containing the Adapter Info MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt adpater lock is held
+ * Interrupt adapter lock is held
*/
static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
struct iu_entry *iue)
@@ -1620,8 +1772,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: tag 0x%llx, rc %ld\n",
- be64_to_cpu(cmd->rsp.tag), rc);
+ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag), rc);
/* if all ok free up the command element resources */
if (rc == H_SUCCESS) {
@@ -1691,7 +1843,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
* @crq: Pointer to the CRQ entry containing the MAD request
*
* EXECUTION ENVIRONMENT:
- * Interrupt called with adapter lock held
+ * Interrupt, called with adapter lock held
*/
static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
@@ -1745,14 +1897,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
- if (be16_to_cpu(mad->length) < 0) {
- dev_err(&vscsi->dev, "mad: length is < 0\n");
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- rc = SRP_VIOLATION;
- } else {
- rc = ibmvscsis_process_mad(vscsi, iue);
- }
+ rc = ibmvscsis_process_mad(vscsi, iue);
pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
rc);
@@ -1864,7 +2009,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
break;
case H_PERMISSION:
if (connection_broken(vscsi))
- flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
rc);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2187,156 +2332,6 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
}
/**
- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- case UNCONFIGURING:
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case WAIT_CONNECTION:
- vscsi->state = CONNECTED;
- break;
-
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- case WAIT_ENABLED:
- case PART_UP_WAIT_ENAB:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_handle_init_msg() - Respond to an Init Message
- * @vscsi: Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
-{
- long rc = ADAPT_SUCCESS;
-
- switch (vscsi->state) {
- case WAIT_ENABLED:
- vscsi->state = PART_UP_WAIT_ENAB;
- break;
-
- case WAIT_CONNECTION:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- break;
-
- case H_PARAMETER:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
- break;
-
- case H_DROPPED:
- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
- rc);
- rc = ERROR;
- ibmvscsis_post_disconnect(vscsi,
- ERR_DISCONNECT_RECONNECT, 0);
- break;
-
- case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
- rc = 0;
- break;
- }
- break;
-
- case UNDEFINED:
- rc = ERROR;
- break;
-
- case UNCONFIGURING:
- break;
-
- case PART_UP_WAIT_ENAB:
- case CONNECTED:
- case SRP_PROCESSING:
- case WAIT_IDLE:
- case NO_QUEUE:
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- case ERR_DISCONNECTED:
- default:
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
- vscsi->state);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- break;
- }
-
- return rc;
-}
-
-/**
- * ibmvscsis_init_msg() - Respond to an init message
- * @vscsi: Pointer to our adapter structure
- * @crq: Pointer to CRQ element containing the Init Message
- *
- * EXECUTION ENVIRONMENT:
- * Interrupt, interrupt lock held
- */
-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
-{
- long rc = ADAPT_SUCCESS;
-
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
-
- rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
- (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
- 0);
- if (rc == H_SUCCESS) {
- vscsi->client_data.partition_number =
- be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
- } else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
- rc = ADAPT_SUCCESS;
- }
-
- if (crq->format == INIT_MSG) {
- rc = ibmvscsis_handle_init_msg(vscsi);
- } else if (crq->format == INIT_COMPLETE_MSG) {
- rc = ibmvscsis_handle_init_compl_msg(vscsi);
- } else {
- rc = ERROR;
- dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
- (uint)crq->format);
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- }
-
- return rc;
-}
-
-/**
* ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
* @vscsi: Pointer to our adapter structure
* @crq: Pointer to CRQ element containing the SRP request
@@ -2391,7 +2386,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi,
break;
case VALID_TRANS_EVENT:
- rc = ibmvscsis_trans_event(vscsi, crq);
+ rc = ibmvscsis_trans_event(vscsi, crq);
break;
case VALID_INIT_MSG:
@@ -2522,7 +2517,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
srp->tag);
goto fail;
- return;
}
cmd->rsp.sol_not = srp->sol_not;
@@ -2559,6 +2553,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
data_len, attr, dir, 0);
if (rc) {
dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
goto fail;
}
return;
@@ -2638,6 +2636,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
if (rc) {
dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
rc);
+ spin_lock_bh(&vscsi->intr_lock);
+ list_del(&cmd->list);
+ spin_unlock_bh(&vscsi->intr_lock);
cmd->se_cmd.se_tmr_req->response =
TMR_FUNCTION_REJECTED;
}
@@ -2786,36 +2787,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
}
/**
- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
- * @vscsi: Pointer to our adapter structure
- *
- * Checks if a initialize message was queued by the initiatior
- * while the timing window was open. This function is called from
- * probe after the CRQ is created and interrupts are enabled.
- * It would only be used by adapters who wait for some event before
- * completing the init handshake with the client. For ibmvscsi, this
- * event is waiting for the port to be enabled.
- *
- * EXECUTION ENVIRONMENT:
- * Process level only, interrupt lock held
- */
-static long ibmvscsis_check_q(struct scsi_info *vscsi)
-{
- uint format;
- long rc;
-
- rc = ibmvscsis_check_init_msg(vscsi, &format);
- if (rc)
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
- else if (format == UNUSED_FORMAT)
- vscsi->state = WAIT_ENABLED;
- else
- vscsi->state = PART_UP_WAIT_ENAB;
-
- return rc;
-}
-
-/**
* ibmvscsis_enable_change_state() - Set new state based on enabled status
* @vscsi: Pointer to our adapter structure
*
@@ -2826,77 +2797,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi)
*/
static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
{
+ int bytes;
long rc = ADAPT_SUCCESS;
-handle_state_change:
- switch (vscsi->state) {
- case WAIT_ENABLED:
- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
- switch (rc) {
- case H_SUCCESS:
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_CONNECTION;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- vscsi->state = UNDEFINED;
- rc = H_HARDWARE;
- break;
- }
- break;
- case PART_UP_WAIT_ENAB:
- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
- switch (rc) {
- case H_SUCCESS:
- vscsi->state = CONNECTED;
- rc = ADAPT_SUCCESS;
- break;
-
- case H_DROPPED:
- case H_CLOSED:
- vscsi->state = WAIT_ENABLED;
- goto handle_state_change;
-
- case H_PARAMETER:
- break;
-
- case H_HARDWARE:
- break;
-
- default:
- rc = H_HARDWARE;
- break;
- }
- break;
-
- case WAIT_CONNECTION:
- case WAIT_IDLE:
- case SRP_PROCESSING:
- case CONNECTED:
- rc = ADAPT_SUCCESS;
- break;
- /* should not be able to get here */
- case UNCONFIGURING:
- rc = ERROR;
- vscsi->state = UNDEFINED;
- break;
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ vscsi->state = WAIT_CONNECTION;
+ rc = ibmvscsis_establish_new_q(vscsi);
+ }
- /* driver should never allow this to happen */
- case ERR_DISCONNECT:
- case ERR_DISCONNECT_RECONNECT:
- default:
- dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
- vscsi->state);
- rc = ADAPT_SUCCESS;
- break;
+ if (rc != ADAPT_SUCCESS) {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
}
return rc;
@@ -2916,7 +2829,6 @@ handle_state_change:
*/
static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
{
- long rc = 0;
int pages;
struct vio_dev *vdev = vscsi->dma_dev;
@@ -2940,22 +2852,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
return -ENOMEM;
}
- rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
- if (rc) {
- if (rc == H_CLOSED) {
- vscsi->state = WAIT_ENABLED;
- rc = 0;
- } else {
- dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)vscsi->cmd_q.base_addr);
- rc = -ENODEV;
- }
- } else {
- vscsi->state = WAIT_ENABLED;
- }
-
- return rc;
+ return 0;
}
/**
@@ -3270,7 +3167,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
/*
* if we are in a path where we are waiting for all pending commands
* to complete because we received a transport event and anything in
- * the command queue is for a new connection, do nothing
+ * the command queue is for a new connection, do nothing
*/
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
@@ -3314,7 +3211,7 @@ cmd_work:
* everything but transport events on the queue
*
* need to decrement the queue index so we can
- * look at the elment again
+ * look at the element again
*/
if (vscsi->cmd_q.index)
vscsi->cmd_q.index -= 1;
@@ -3378,7 +3275,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
INIT_LIST_HEAD(&vscsi->waiting_rsp);
INIT_LIST_HEAD(&vscsi->active_q);
- snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+ snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
+ dev_name(&vdev->dev));
pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
@@ -3393,6 +3291,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
strncat(vscsi->eye, vdev->name, MAX_EYE);
vscsi->dds.unit_id = vdev->unit_address;
+ strncpy(vscsi->dds.partition_name, partition_name,
+ sizeof(vscsi->dds.partition_name));
+ vscsi->dds.partition_num = partition_number;
spin_lock_bh(&ibmvscsis_dev_lock);
list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3469,6 +3370,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
(unsigned long)vscsi);
init_completion(&vscsi->wait_idle);
+ init_completion(&vscsi->unconfig);
snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
vscsi->work_q = create_workqueue(wq_name);
@@ -3485,31 +3387,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
goto destroy_WQ;
}
- spin_lock_bh(&vscsi->intr_lock);
- vio_enable_interrupts(vdev);
- if (rc) {
- dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
- rc = -ENODEV;
- spin_unlock_bh(&vscsi->intr_lock);
- goto free_irq;
- }
-
- if (ibmvscsis_check_q(vscsi)) {
- rc = ERROR;
- dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
- spin_unlock_bh(&vscsi->intr_lock);
- goto disable_interrupt;
- }
- spin_unlock_bh(&vscsi->intr_lock);
+ vscsi->state = WAIT_ENABLED;
dev_set_drvdata(&vdev->dev, vscsi);
return 0;
-disable_interrupt:
- vio_disable_interrupts(vdev);
-free_irq:
- free_irq(vdev->irq, vscsi);
destroy_WQ:
destroy_workqueue(vscsi->work_q);
unmap_buf:
@@ -3543,10 +3426,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
- /*
- * TBD: Need to handle if there are commands on the waiting_rsp q
- * Actually, can there still be cmds outstanding to tcm?
- */
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
+ vscsi->flags |= CFG_SLEEPING;
+ spin_unlock_bh(&vscsi->intr_lock);
+ wait_for_completion(&vscsi->unconfig);
vio_disable_interrupts(vdev);
free_irq(vdev->irq, vscsi);
@@ -3555,7 +3439,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
DMA_BIDIRECTIONAL);
kfree(vscsi->map_buf);
tasklet_kill(&vscsi->work_task);
- ibmvscsis_unregister_command_q(vscsi);
ibmvscsis_destroy_command_q(vscsi);
ibmvscsis_freetimer(vscsi);
ibmvscsis_free_cmds(vscsi);
@@ -3609,7 +3492,7 @@ static int ibmvscsis_get_system_info(void)
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
- partition_number = *num;
+ partition_number = of_read_number(num, 1);
of_node_put(rootdn);
@@ -3903,18 +3786,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
}
if (tmp) {
- tport->enabled = true;
spin_lock_bh(&vscsi->intr_lock);
+ tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
pr_err("enable_change_state failed, rc %ld state %d\n",
lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
+ spin_lock_bh(&vscsi->intr_lock);
tport->enabled = false;
+ /* This simulates the server going down */
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+ pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
return count;
}
@@ -3983,10 +3870,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = {
ATTRIBUTE_GROUPS(ibmvscsis_dev);
static struct class ibmvscsis_class = {
- .name = "ibmvscsis",
- .dev_release = ibmvscsis_dev_release,
- .class_attrs = ibmvscsis_class_attrs,
- .dev_groups = ibmvscsis_dev_groups,
+ .name = "ibmvscsis",
+ .dev_release = ibmvscsis_dev_release,
+ .class_attrs = ibmvscsis_class_attrs,
+ .dev_groups = ibmvscsis_dev_groups,
};
static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c992b6c..65c6189885ab 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -26,6 +26,7 @@
#ifndef __H_IBMVSCSI_TGT
#define __H_IBMVSCSI_TGT
+#include <linux/interrupt.h>
#include "libsrp.h"
#define SYS_ID_NAME_LEN 64
@@ -204,8 +205,6 @@ struct scsi_info {
struct list_head waiting_rsp;
#define NO_QUEUE 0x00
#define WAIT_ENABLED 0X01
- /* driver has received an initialize command */
-#define PART_UP_WAIT_ENAB 0x02
#define WAIT_CONNECTION 0x04
/* have established a connection */
#define CONNECTED 0x08
@@ -259,6 +258,8 @@ struct scsi_info {
#define SCHEDULE_DISCONNECT 0x00400
/* disconnect handler is scheduled */
#define DISCONNECT_SCHEDULED 0x00800
+ /* remove function is sleeping */
+#define CFG_SLEEPING 0x01000
u32 flags;
/* adapter lock */
spinlock_t intr_lock;
@@ -287,6 +288,7 @@ struct scsi_info {
struct workqueue_struct *work_q;
struct completion wait_idle;
+ struct completion unconfig;
struct device dev;
struct vio_dev *dma_dev;
struct srp_target target;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 532474109624..835c59c777f2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -186,16 +186,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds[] = {
@@ -9439,23 +9439,11 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
+ int i;
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
- int i;
- for (i = 0; i < ioa_cfg->nvectors; i++)
- free_irq(ioa_cfg->vectors_info[i].vec,
- &ioa_cfg->hrrq[i]);
- } else
- free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
-
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- pci_disable_msi(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- pci_disable_msix(pdev);
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- }
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
+ pci_free_irq_vectors(pdev);
}
/**
@@ -9883,45 +9871,6 @@ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
-{
- struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
- int i, vectors;
-
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
- entries[i].entry = i;
-
- vectors = pci_enable_msix_range(ioa_cfg->pdev,
- entries, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = entries[i].vector;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
-static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
-{
- int i, vectors;
-
- vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
- if (vectors < 0) {
- ipr_wait_for_pci_err_recovery(ioa_cfg);
- return vectors;
- }
-
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
- ioa_cfg->nvectors = vectors;
-
- return 0;
-}
-
static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
{
int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
@@ -9934,19 +9883,20 @@ static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
}
}
-static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
+ struct pci_dev *pdev)
{
int i, rc;
for (i = 1; i < ioa_cfg->nvectors; i++) {
- rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ rc = request_irq(pci_irq_vector(pdev, i),
ipr_isr_mhrrq,
0,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
while (--i >= 0)
- free_irq(ioa_cfg->vectors_info[i].vec,
+ free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
}
@@ -9984,8 +9934,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
* ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
* @pdev: PCI device struct
*
- * Description: The return value from pci_enable_msi_range() can not always be
- * trusted. This routine sets up and initiates a test interrupt to determine
+ * Description: This routine sets up and initiates a test interrupt to determine
* if the interrupt is received via the ipr_test_intr() service routine.
* If the tests fails, the driver will fall back to LSI.
*
@@ -9997,6 +9946,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int rc;
volatile u32 int_reg;
unsigned long lock_flags = 0;
+ int irq = pci_irq_vector(pdev, 0);
ENTER;
@@ -10008,15 +9958,12 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
- else
- rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
if (rc) {
- dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
return rc;
} else if (ipr_debug)
- dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
@@ -10033,10 +9980,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
- else
- free_irq(pdev->irq, ioa_cfg);
+ free_irq(irq, ioa_cfg);
LEAVE;
@@ -10060,6 +10004,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
unsigned long lock_flags, driver_lock_flags;
+ unsigned int irq_flag;
ENTER;
@@ -10175,18 +10120,18 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
}
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msix(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSIX;
- else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
- ipr_enable_msi(ioa_cfg) == 0)
- ioa_cfg->intr_flag = IPR_USE_MSI;
- else {
- ioa_cfg->intr_flag = IPR_USE_LSI;
- ioa_cfg->clear_isr = 1;
- ioa_cfg->nvectors = 1;
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ irq_flag = PCI_IRQ_LEGACY;
+ if (ioa_cfg->ipr_chip->has_msi)
+ irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
+ if (rc < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ goto cleanup_nomem;
}
+ ioa_cfg->nvectors = rc;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ ioa_cfg->clear_isr = 1;
pci_set_master(pdev);
@@ -10199,33 +10144,23 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
}
}
- if (ioa_cfg->intr_flag == IPR_USE_MSI ||
- ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP) {
+ switch (rc) {
+ case 0:
+ dev_info(&pdev->dev,
+ "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
+ pdev->msix_enabled ? "-X" : "");
+ break;
+ case -EOPNOTSUPP:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSI;
- pci_disable_msi(pdev);
- } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
- ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
- pci_disable_msix(pdev);
- }
+ pci_free_irq_vectors(pdev);
- ioa_cfg->intr_flag = IPR_USE_LSI;
ioa_cfg->nvectors = 1;
- }
- else if (rc)
+ ioa_cfg->clear_isr = 1;
+ break;
+ default:
goto out_msi_disable;
- else {
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- dev_info(&pdev->dev,
- "Request for %d MSIs succeeded with starting IRQ: %d\n",
- ioa_cfg->nvectors, pdev->irq);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- dev_info(&pdev->dev,
- "Request for %d MSIXs succeeded.",
- ioa_cfg->nvectors);
}
}
@@ -10273,15 +10208,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSI
- || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ if (pdev->msi_enabled || pdev->msix_enabled) {
name_msi_vectors(ioa_cfg);
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
- 0,
+ rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
ioa_cfg->vectors_info[0].desc,
&ioa_cfg->hrrq[0]);
if (!rc)
- rc = ipr_request_other_msi_irqs(ioa_cfg);
+ rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
} else {
rc = request_irq(pdev->irq, ipr_isr,
IRQF_SHARED,
@@ -10323,10 +10256,7 @@ cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
ipr_wait_for_pci_err_recovery(ioa_cfg);
- if (ioa_cfg->intr_flag == IPR_USE_MSI)
- pci_disable_msi(pdev);
- else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_disable:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8995053d01b3..b7d2e98eb45b 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1413,10 +1413,7 @@ struct ipr_chip_cfg_t {
struct ipr_chip_t {
u16 vendor;
u16 device;
- u16 intr_type;
-#define IPR_USE_LSI 0x00
-#define IPR_USE_MSI 0x01
-#define IPR_USE_MSIX 0x02
+ bool has_msi;
u16 sis_type;
#define IPR_SIS32 0x00
#define IPR_SIS64 0x01
@@ -1593,11 +1590,9 @@ struct ipr_ioa_cfg {
struct ipr_cmnd **ipr_cmnd_list;
dma_addr_t *ipr_cmnd_list_dma;
- u16 intr_flag;
unsigned int nvectors;
struct {
- unsigned short vec;
char desc[22];
} vectors_info[IPR_MAX_MSIX_VECTORS];
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 02cb76fd4420..3419e1bcdff6 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2241,9 +2241,6 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
uint8_t minor;
uint8_t subminor;
uint8_t *buffer;
- char hexDigits[] =
- { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
- 'D', 'E', 'F' };
METHOD_TRACE("ips_get_bios_version", 1);
@@ -2374,13 +2371,13 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
}
}
- ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4];
+ ha->bios_version[0] = hex_asc_upper_hi(major);
ha->bios_version[1] = '.';
- ha->bios_version[2] = hexDigits[major & 0x0F];
- ha->bios_version[3] = hexDigits[subminor];
+ ha->bios_version[2] = hex_asc_upper_lo(major);
+ ha->bios_version[3] = hex_asc_upper_lo(subminor);
ha->bios_version[4] = '.';
- ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4];
- ha->bios_version[6] = hexDigits[minor & 0x0F];
+ ha->bios_version[5] = hex_asc_upper_hi(minor);
+ ha->bios_version[6] = hex_asc_upper_lo(minor);
ha->bios_version[7] = 0;
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 22a9bb1abae1..b3539928073c 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -295,7 +295,6 @@ enum sci_controller_states {
#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
struct isci_pci_info {
- struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
struct isci_host *hosts[SCI_MAX_CONTROLLERS];
struct isci_orom *orom;
};
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 77128d680e3b..0b5b5db0d0f8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -350,16 +350,12 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
*/
num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
- for (i = 0; i < num_msix; i++)
- pci_info->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
- if (err)
+ err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX);
+ if (err < 0)
goto intx;
for (i = 0; i < num_msix; i++) {
int id = i / SCI_NUM_MSI_X_INT;
- struct msix_entry *msix = &pci_info->msix_entries[i];
irq_handler_t isr;
ihost = pci_info->hosts[id];
@@ -369,8 +365,8 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
else
isr = isci_msix_isr;
- err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
- DRV_NAME"-msix", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ isr, 0, DRV_NAME"-msix", ihost);
if (!err)
continue;
@@ -378,18 +374,19 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
while (i--) {
id = i / SCI_NUM_MSI_X_INT;
ihost = pci_info->hosts[id];
- msix = &pci_info->msix_entries[i];
- devm_free_irq(&pdev->dev, msix->vector, ihost);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ ihost);
}
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
goto intx;
}
return 0;
intx:
for_each_isci_host(i, ihost, pdev) {
- err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
- IRQF_SHARED, DRV_NAME"-intx", ihost);
+ err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
+ isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx",
+ ihost);
if (err)
break;
}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 8ac646e5eddc..a2bbe46f8ccb 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -54,6 +54,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
len = pci_biosrom_size(pdev);
rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
if (!rom) {
+ pci_unmap_biosrom(oprom);
dev_warn(&pdev->dev,
"Unable to allocate memory for orom\n");
return NULL;
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..e3f2a5359d71 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
@@ -454,7 +457,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being invalidated anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being "
+ "suspended by hardware while being "
"invalidated.\n", __func__, sci_rnc);
break;
default:
@@ -473,7 +476,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
* the device since it's being resumed anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
- "suspeneded by hardware while being resumed.\n",
+ "suspended by hardware while being resumed.\n",
__func__, sci_rnc);
break;
default:
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b709d2b20880..47f66e949745 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2473,7 +2473,7 @@ static void isci_request_process_response_iu(
"%s: resp_iu = %p "
"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
"resp_iu->response_data_len = %x, "
- "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ "resp_iu->sense_data_len = %x\nresponse data: ",
__func__,
resp_iu,
resp_iu->status,
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 880a9068ca12..6103231104da 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -68,10 +68,14 @@ static void fc_disc_stop_rports(struct fc_disc *disc)
lport = fc_disc_lport(disc);
- mutex_lock(&disc->disc_mutex);
- list_for_each_entry_rcu(rdata, &disc->rports, peers)
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&disc->disc_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+ rcu_read_unlock();
}
/**
@@ -150,7 +154,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
break;
}
}
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
/*
* If not doing a complete rediscovery, do GPN_ID on
@@ -178,7 +182,7 @@ reject:
FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -289,15 +293,19 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
*/
+ rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
- if (!rdata->disc_id)
+ if (!kref_get_unless_zero(&rdata->kref))
continue;
- if (rdata->disc_id == disc->disc_id)
- lport->tt.rport_login(rdata);
- else
- lport->tt.rport_logoff(rdata);
+ if (rdata->disc_id) {
+ if (rdata->disc_id == disc->disc_id)
+ fc_rport_login(rdata);
+ else
+ fc_rport_logoff(rdata);
+ }
+ kref_put(&rdata->kref, fc_rport_destroy);
}
-
+ rcu_read_unlock();
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
@@ -446,7 +454,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
if (ids.port_id != lport->port_id &&
ids.port_name != lport->wwpn) {
- rdata = lport->tt.rport_create(lport, ids.port_id);
+ rdata = fc_rport_create(lport, ids.port_id);
if (rdata) {
rdata->ids.port_name = ids.port_name;
rdata->disc_id = disc->disc_id;
@@ -592,7 +600,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
lport = rdata->local_port;
disc = &lport->disc;
- mutex_lock(&disc->disc_mutex);
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
if (IS_ERR(fp))
@@ -607,37 +614,41 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
goto redisc;
pn = (struct fc_ns_gid_pn *)(cp + 1);
port_name = get_unaligned_be64(&pn->fn_wwpn);
+ mutex_lock(&rdata->rp_mutex);
if (rdata->ids.port_name == -1)
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
"Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
- lport->tt.rport_logoff(rdata);
-
- new_rdata = lport->tt.rport_create(lport,
- rdata->ids.port_id);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_logoff(rdata);
+ mutex_lock(&lport->disc.disc_mutex);
+ new_rdata = fc_rport_create(lport, rdata->ids.port_id);
+ mutex_unlock(&lport->disc.disc_mutex);
if (new_rdata) {
new_rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(new_rdata);
+ fc_rport_login(new_rdata);
}
goto out;
}
rdata->disc_id = disc->disc_id;
- lport->tt.rport_login(rdata);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_login(rdata);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
cp->ct_reason, cp->ct_explan);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
} else {
FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
ntohs(cp->ct_cmd));
redisc:
+ mutex_lock(&disc->disc_mutex);
fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
}
out:
- mutex_unlock(&disc->disc_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -678,7 +689,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_create(lport, dp->port_id);
+ rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
rdata->disc_id = 0;
@@ -708,7 +719,7 @@ static void fc_disc_stop(struct fc_lport *lport)
static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
}
/**
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index c2384d501470..6384a98048af 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -67,7 +67,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FCTL_REQ, 0);
- return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+ return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
EXPORT_SYMBOL(fc_elsct_send);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16ca31ad5ec0..42bcf7f3a0f9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -94,6 +94,7 @@ struct fc_exch_pool {
struct fc_exch_mgr {
struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
+ struct fc_lport *lport;
enum fc_class class;
struct kref kref;
u16 min_xid;
@@ -362,8 +363,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
fc_exch_hold(ep); /* hold for timer */
if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
- msecs_to_jiffies(timer_msec)))
+ msecs_to_jiffies(timer_msec))) {
+ FC_EXCH_DBG(ep, "Exchange already queued\n");
fc_exch_release(ep);
+ }
}
/**
@@ -406,6 +409,8 @@ static int fc_exch_done_locked(struct fc_exch *ep)
return rc;
}
+static struct fc_exch fc_quarantine_exch;
+
/**
* fc_exch_ptr_get() - Return an exchange from an exchange pool
* @pool: Exchange Pool to get an exchange from
@@ -450,14 +455,17 @@ static void fc_exch_delete(struct fc_exch *ep)
/* update cache of free slot */
index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
- if (pool->left == FC_XID_UNKNOWN)
- pool->left = index;
- else if (pool->right == FC_XID_UNKNOWN)
- pool->right = index;
- else
- pool->next_index = index;
-
- fc_exch_ptr_set(pool, index, NULL);
+ if (!(ep->state & FC_EX_QUARANTINE)) {
+ if (pool->left == FC_XID_UNKNOWN)
+ pool->left = index;
+ else if (pool->right == FC_XID_UNKNOWN)
+ pool->right = index;
+ else
+ pool->next_index = index;
+ fc_exch_ptr_set(pool, index, NULL);
+ } else {
+ fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
+ }
list_del(&ep->ex_list);
spin_unlock_bh(&pool->lock);
fc_exch_release(ep); /* drop hold for exch in mp */
@@ -525,8 +533,7 @@ out:
* Note: The frame will be freed either by a direct call to fc_frame_free(fp)
* or indirectly by calling libfc_function_template.frame_send().
*/
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
- struct fc_frame *fp)
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
{
struct fc_exch *ep;
int error;
@@ -536,6 +543,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
spin_unlock_bh(&ep->ex_lock);
return error;
}
+EXPORT_SYMBOL(fc_seq_send);
/**
* fc_seq_alloc() - Allocate a sequence for a given exchange
@@ -577,7 +585,7 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
* for a given sequence/exchange pair
* @sp: The sequence/exchange to get a new exchange for
*/
-static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
@@ -587,16 +595,16 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+EXPORT_SYMBOL(fc_seq_start_next);
/*
* Set the response handler for the exchange associated with a sequence.
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_seq_set_resp(struct fc_seq *sp,
- void (*resp)(struct fc_seq *, struct fc_frame *,
- void *),
- void *arg)
+void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+ void *arg)
{
struct fc_exch *ep = fc_seq_exch(sp);
DEFINE_WAIT(wait);
@@ -615,12 +623,20 @@ static void fc_seq_set_resp(struct fc_seq *sp,
ep->arg = arg;
spin_unlock_bh(&ep->ex_lock);
}
+EXPORT_SYMBOL(fc_seq_set_resp);
/**
* fc_exch_abort_locked() - Abort an exchange
* @ep: The exchange to be aborted
* @timer_msec: The period of time to wait before aborting
*
+ * Abort an exchange and sequence. Generally called because of a
+ * exchange timeout or an abort from the upper layer.
+ *
+ * A timer_msec can be specified for abort timeout, if non-zero
+ * timer_msec value is specified then exchange resp handler
+ * will be called with timeout error if no response to abort.
+ *
* Locking notes: Called with exch lock held
*
* Return value: 0 on success else error code
@@ -632,9 +648,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
struct fc_frame *fp;
int error;
+ FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
- ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+ FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
+ ep->esb_stat, ep->state);
return -ENXIO;
+ }
/*
* Send the abort on a new sequence if possible.
@@ -680,8 +700,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
*
* Return value: 0 on success else error code
*/
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
- unsigned int timer_msec)
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
{
struct fc_exch *ep;
int error;
@@ -758,7 +777,7 @@ static void fc_exch_timeout(struct work_struct *work)
u32 e_stat;
int rc = 1;
- FC_EXCH_DBG(ep, "Exchange timed out\n");
+ FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
@@ -821,14 +840,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
- index = pool->left;
- pool->left = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
+ index = pool->left;
+ pool->left = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
if (pool->right != FC_XID_UNKNOWN) {
- index = pool->right;
- pool->right = FC_XID_UNKNOWN;
- goto hit;
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
+ index = pool->right;
+ pool->right = FC_XID_UNKNOWN;
+ goto hit;
+ }
}
index = pool->next_index;
@@ -888,14 +911,19 @@ err:
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
-static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
- struct fc_frame *fp)
+static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
+ struct fc_exch *ep;
- list_for_each_entry(ema, &lport->ema_list, ema_list)
- if (!ema->match || ema->match(fp))
- return fc_exch_em_alloc(lport, ema->mp);
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if (!ema->match || ema->match(fp)) {
+ ep = fc_exch_em_alloc(lport, ema->mp);
+ if (ep)
+ return ep;
+ }
+ }
return NULL;
}
@@ -906,14 +934,17 @@ static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
*/
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
+ struct fc_lport *lport = mp->lport;
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;
u16 cpu = xid & fc_cpu_mask;
+ if (xid == FC_XID_UNKNOWN)
+ return NULL;
+
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
- printk_ratelimited(KERN_ERR
- "libfc: lookup request for XID = %d, "
- "indicates invalid CPU %d\n", xid, cpu);
+ pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
+ lport->host->host_no, lport->port_id, xid, cpu);
return NULL;
}
@@ -921,6 +952,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
+ if (ep == &fc_quarantine_exch) {
+ FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
+ ep = NULL;
+ }
if (ep) {
WARN_ON(ep->xid != xid);
fc_exch_hold(ep);
@@ -938,7 +973,7 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
*
* Note: May sleep if invoked from outside a response handler.
*/
-static void fc_exch_done(struct fc_seq *sp)
+void fc_exch_done(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
int rc;
@@ -951,6 +986,7 @@ static void fc_exch_done(struct fc_seq *sp)
if (!rc)
fc_exch_delete(ep);
}
+EXPORT_SYMBOL(fc_exch_done);
/**
* fc_exch_resp() - Allocate a new exchange for a response frame
@@ -1197,8 +1233,8 @@ static void fc_exch_set_addr(struct fc_exch *ep,
*
* The received frame is not freed.
*/
-static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
- struct fc_seq_els_data *els_data)
+void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
{
switch (els_cmd) {
case ELS_LS_RJT:
@@ -1217,6 +1253,7 @@ static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
}
}
+EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
/**
* fc_seq_send_last() - Send a sequence that is the last in the exchange
@@ -1258,8 +1295,10 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
*/
if (fc_sof_needs_ack(fr_sof(rx_fp))) {
fp = fc_frame_alloc(lport, 0);
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ACK_1;
@@ -1312,13 +1351,18 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
struct fc_frame_header *rx_fh;
struct fc_frame_header *fh;
struct fc_ba_rjt *rp;
+ struct fc_seq *sp;
struct fc_lport *lport;
unsigned int f_ctl;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rp));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "Drop BA_RJT request, out of memory\n");
return;
+ }
fh = fc_frame_header_get(fp);
rx_fh = fc_frame_header_get(rx_fp);
@@ -1383,14 +1427,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
if (!ep)
goto reject;
+ FC_EXCH_DBG(ep, "exch: ABTS received\n");
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
goto free;
+ }
spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & ESB_ST_COMPLETE) {
spin_unlock_bh(&ep->ex_lock);
-
+ FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
fc_frame_free(fp);
goto reject;
}
@@ -1433,7 +1480,7 @@ reject:
* A reference will be held on the exchange/sequence for the caller, which
* must call fc_seq_release().
*/
-static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
@@ -1447,15 +1494,17 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
break;
return fr_seq(fp);
}
+EXPORT_SYMBOL(fc_seq_assign);
/**
* fc_seq_release() - Release the hold
* @sp: The sequence.
*/
-static void fc_seq_release(struct fc_seq *sp)
+void fc_seq_release(struct fc_seq *sp)
{
fc_exch_release(fc_seq_exch(sp));
}
+EXPORT_SYMBOL(fc_seq_release);
/**
* fc_exch_recv_req() - Handler for an incoming request
@@ -1491,7 +1540,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* The upper-level protocol may request one later, if needed.
*/
if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
- return lport->tt.lport_recv(lport, fp);
+ return fc_lport_recv(lport, fp);
reject = fc_seq_lookup_recip(lport, mp, fp);
if (reject == FC_RJT_NONE) {
@@ -1512,7 +1561,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* first.
*/
if (!fc_invoke_resp(ep, sp, fp))
- lport->tt.lport_recv(lport, fp);
+ fc_lport_recv(lport, fp);
fc_exch_release(ep); /* release from lookup */
} else {
FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
@@ -1562,9 +1611,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (fc_sof_is_init(sof)) {
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
- } else if (sp->id != fh->fh_seq_id) {
- atomic_inc(&mp->stats.seq_not_found);
- goto rel;
}
f_ctl = ntoh24(fh->fh_f_ctl);
@@ -1761,7 +1807,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
fc_frame_free(fp);
break;
case FC_RCTL_BA_ABTS:
- fc_exch_recv_abts(ep, fp);
+ if (ep)
+ fc_exch_recv_abts(ep, fp);
+ else
+ fc_frame_free(fp);
break;
default: /* ignore junk */
fc_frame_free(fp);
@@ -1784,11 +1833,16 @@ static void fc_seq_ls_acc(struct fc_frame *rx_fp)
struct fc_lport *lport;
struct fc_els_ls_acc *acc;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->la_cmd = ELS_LS_ACC;
@@ -1811,11 +1865,16 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
struct fc_lport *lport;
struct fc_els_ls_rjt *rjt;
struct fc_frame *fp;
+ struct fc_seq *sp;
lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rjt));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
return;
+ }
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
memset(rjt, 0, sizeof(*rjt));
rjt->er_cmd = ELS_LS_RJT;
@@ -1960,8 +2019,7 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
enum fc_els_rjt_explan explan;
u32 sid;
- u16 rxid;
- u16 oxid;
+ u16 xid, rxid, oxid;
lport = fr_dev(rfp);
rp = fc_frame_payload_get(rfp, sizeof(*rp));
@@ -1972,18 +2030,35 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
rxid = ntohs(rp->rec_rx_id);
oxid = ntohs(rp->rec_ox_id);
- ep = fc_exch_lookup(lport,
- sid == fc_host_port_id(lport->host) ? oxid : rxid);
explan = ELS_EXPL_OXID_RXID;
- if (!ep)
+ if (sid == fc_host_port_id(lport->host))
+ xid = oxid;
+ else
+ xid = rxid;
+ if (xid == FC_XID_UNKNOWN) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: invalid rxid %x oxid %x\n",
+ sid, rxid, oxid);
+ goto reject;
+ }
+ ep = fc_exch_lookup(lport, xid);
+ if (!ep) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: rxid %x oxid %x not found\n",
+ sid, rxid, oxid);
goto reject;
+ }
+ FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
+ sid, rxid, oxid);
if (ep->oid != sid || oxid != ep->oxid)
goto rel;
if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
goto rel;
fp = fc_frame_alloc(lport, sizeof(*acc));
- if (!fp)
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
goto out;
+ }
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
@@ -2065,6 +2140,24 @@ cleanup:
* @arg: The argument to be passed to the response handler
* @timer_msec: The timeout period for the exchange
*
+ * The exchange response handler is set in this routine to resp()
+ * function pointer. It can be called in two scenarios: if a timeout
+ * occurs or if a response frame is received for the exchange. The
+ * fc_frame pointer in response handler will also indicate timeout
+ * as error using IS_ERR related macros.
+ *
+ * The exchange destructor handler is also set in this routine.
+ * The destructor handler is invoked by EM layer when exchange
+ * is about to free, this can be used by caller to free its
+ * resources along with exchange free.
+ *
+ * The arg is passed back to resp and destructor handler.
+ *
+ * The timeout value (in msec) for an exchange is set if non zero
+ * timer_msec argument is specified. The timer is canceled when
+ * it fires or when the exchange is done. The exchange timeout handler
+ * is registered by EM layer.
+ *
* The frame pointer with some of the header's fields must be
* filled before calling this routine, those fields are:
*
@@ -2075,14 +2168,13 @@ cleanup:
* - frame control
* - parameter or relative offset
*/
-static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
- struct fc_frame *fp,
- void (*resp)(struct fc_seq *,
- struct fc_frame *fp,
- void *arg),
- void (*destructor)(struct fc_seq *,
- void *),
- void *arg, u32 timer_msec)
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec)
{
struct fc_exch *ep;
struct fc_seq *sp = NULL;
@@ -2101,7 +2193,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
ep->resp = resp;
ep->destructor = destructor;
ep->arg = arg;
- ep->r_a_tov = FC_DEF_R_A_TOV;
+ ep->r_a_tov = lport->r_a_tov;
ep->lp = lport;
sp = &ep->seq;
@@ -2135,6 +2227,7 @@ err:
fc_exch_delete(ep);
return NULL;
}
+EXPORT_SYMBOL(fc_exch_seq_send);
/**
* fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
@@ -2176,6 +2269,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
return;
retry:
+ FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
spin_unlock_bh(&ep->ex_lock);
@@ -2218,6 +2312,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
if (!ep)
goto reject;
spin_lock_bh(&ep->ex_lock);
+ FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
+ sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
if (ep->oxid != ntohs(rp->rrq_ox_id))
goto unlock_reject;
if (ep->rxid != ntohs(rp->rrq_rx_id) &&
@@ -2385,6 +2481,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
return NULL;
mp->class = class;
+ mp->lport = lport;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
@@ -2558,36 +2655,9 @@ EXPORT_SYMBOL(fc_exch_recv);
*/
int fc_exch_init(struct fc_lport *lport)
{
- if (!lport->tt.seq_start_next)
- lport->tt.seq_start_next = fc_seq_start_next;
-
- if (!lport->tt.seq_set_resp)
- lport->tt.seq_set_resp = fc_seq_set_resp;
-
- if (!lport->tt.exch_seq_send)
- lport->tt.exch_seq_send = fc_exch_seq_send;
-
- if (!lport->tt.seq_send)
- lport->tt.seq_send = fc_seq_send;
-
- if (!lport->tt.seq_els_rsp_send)
- lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
-
- if (!lport->tt.exch_done)
- lport->tt.exch_done = fc_exch_done;
-
if (!lport->tt.exch_mgr_reset)
lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
- if (!lport->tt.seq_exch_abort)
- lport->tt.seq_exch_abort = fc_seq_exch_abort;
-
- if (!lport->tt.seq_assign)
- lport->tt.seq_assign = fc_seq_assign;
-
- if (!lport->tt.seq_release)
- lport->tt.seq_release = fc_seq_release;
-
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5121272f28fd..0e67621477a8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -122,6 +122,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
#define FC_HRD_ERROR 9
#define FC_CRC_ERROR 10
#define FC_TIMED_OUT 11
+#define FC_TRANS_RESET 12
/*
* Error recovery timeout values.
@@ -195,7 +196,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
* @seq: The sequence that the FCP packet is on (required by destructor API)
* @fsp: The FCP packet to be released
*
- * This routine is called by a destructor callback in the exch_seq_send()
+ * This routine is called by a destructor callback in the fc_exch_seq_send()
* routine of the libfc Transport Template. The 'struct fc_seq' is a required
* argument even though it is not used by this routine.
*
@@ -253,8 +254,21 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
{
- if (!(fsp->state & FC_SRB_COMPL))
+ if (!(fsp->state & FC_SRB_COMPL)) {
mod_timer(&fsp->timer, jiffies + delay);
+ fsp->timer_delay = delay;
+ }
+}
+
+static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
+{
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
}
/**
@@ -264,6 +278,8 @@ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
*/
static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
{
+ int rc;
+
if (!fsp->seq_ptr)
return -EINVAL;
@@ -271,7 +287,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
put_cpu();
fsp->state |= FC_SRB_ABORT_PENDING;
- return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+ rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
+ /*
+ * fc_seq_exch_abort() might return -ENXIO if
+ * the sequence is already completed
+ */
+ if (rc == -ENXIO) {
+ fc_fcp_abort_done(fsp);
+ rc = 0;
+ }
+ return rc;
}
/**
@@ -283,16 +308,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
* fc_io_compl() will notify the SCSI-ml that the I/O is done.
* The SCSI-ml will retry the command.
*/
-static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
{
if (fsp->seq_ptr) {
- fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->state &= ~FC_SRB_ABORT_PENDING;
fsp->io_status = 0;
- fsp->status_code = FC_ERROR;
+ fsp->status_code = status_code;
fc_fcp_complete_locked(fsp);
}
@@ -402,8 +427,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
if (!can_queue)
can_queue = 1;
lport->host->can_queue = can_queue;
- shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
- "Reducing can_queue to %d.\n", can_queue);
unlock:
spin_unlock_irqrestore(lport->host->host_lock, flags);
@@ -430,10 +453,29 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
put_cpu();
/* error case */
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: Could not allocate frame, "
+ "reducing can_queue to %d.\n", lport->host->can_queue);
return NULL;
}
/**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ *
+ * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+ struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
+ unsigned int e_d_tov = FC_DEF_E_D_TOV;
+
+ if (rpriv && rpriv->e_d_tov > e_d_tov)
+ e_d_tov = rpriv->e_d_tov;
+ return msecs_to_jiffies(e_d_tov) + HZ;
+}
+
+/**
* fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
* @fsp: The FCP packet the data is on
* @fp: The data frame
@@ -536,8 +578,10 @@ crc_err:
* and completes the transfer, call the completion handler.
*/
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
- fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
+ FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
fc_fcp_complete_locked(fsp);
+ }
return;
err:
fc_fcp_recovery(fsp, host_bcode);
@@ -609,7 +653,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
remaining = seq_blen;
fh_parm_offset = frame_offset = offset;
tlen = 0;
- seq = lport->tt.seq_start_next(seq);
+ seq = fc_seq_start_next(seq);
f_ctl = FC_FC_REL_OFF;
WARN_ON(!seq);
@@ -687,7 +731,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
/*
* send fragment using for a sequence.
*/
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
return error;
@@ -727,15 +771,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
ba_done = 0;
}
- if (ba_done) {
- fsp->state |= FC_SRB_ABORTED;
- fsp->state &= ~FC_SRB_ABORT_PENDING;
-
- if (fsp->wait_for_comp)
- complete(&fsp->tm_done);
- else
- fc_fcp_complete_locked(fsp);
- }
+ if (ba_done)
+ fc_fcp_abort_done(fsp);
}
/**
@@ -764,8 +801,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
r_ctl = fh->fh_r_ctl;
- if (lport->state != LPORT_ST_READY)
+ if (lport->state != LPORT_ST_READY) {
+ FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
+ lport->state, r_ctl);
goto out;
+ }
if (fc_fcp_lock_pkt(fsp))
goto out;
@@ -774,8 +814,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
goto unlock;
}
- if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
+ FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
goto unlock;
+ }
if (r_ctl == FC_RCTL_DD_DATA_DESC) {
/*
@@ -910,7 +952,16 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Wait a at least one jiffy to see if it is delivered.
* If this expires without data, we may do SRR.
*/
- fc_fcp_timer_set(fsp, 2);
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
+ fsp->rport->port_id);
+ return;
+ }
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
return;
}
fsp->status_code = FC_DATA_OVRRUN;
@@ -959,8 +1010,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
if (fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len < fsp->data_len && !fsp->io_status &&
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
- fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
+ fsp->xfer_len, fsp->data_len);
fsp->status_code = FC_DATA_UNDRUN;
+ }
}
seq = fsp->seq_ptr;
@@ -970,7 +1024,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
struct fc_frame *conf_frame;
struct fc_seq *csp;
- csp = lport->tt.seq_start_next(seq);
+ csp = fc_seq_start_next(seq);
conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
if (conf_frame) {
f_ctl = FC_FC_SEQ_INIT;
@@ -979,10 +1033,10 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, 0);
- lport->tt.seq_send(lport, csp, conf_frame);
+ fc_seq_send(lport, csp, conf_frame);
}
}
- lport->tt.exch_done(seq);
+ fc_exch_done(seq);
}
/*
* Some resets driven by SCSI are not I/Os and do not have
@@ -1000,10 +1054,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
{
- struct fc_lport *lport = fsp->lp;
-
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->status_code = error;
@@ -1116,19 +1168,6 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
}
/**
- * get_fsp_rec_tov() - Helper function to get REC_TOV
- * @fsp: the FCP packet
- *
- * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
- */
-static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
-{
- struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
-
- return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
-}
-
-/**
* fc_fcp_cmd_send() - Send a FCP command
* @lport: The local port to send the command on
* @fsp: The FCP packet the command is on
@@ -1165,8 +1204,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
- fsp, 0);
+ seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
if (!seq) {
rc = -1;
goto unlock;
@@ -1196,7 +1234,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
if (error == -FC_EX_CLOSED) {
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
goto unlock;
}
@@ -1222,8 +1260,16 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
int rc = FAILED;
unsigned long ticks_left;
- if (fc_fcp_send_abort(fsp))
+ FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
+ if (fc_fcp_send_abort(fsp)) {
+ FC_FCP_DBG(fsp, "failed to send abort\n");
return FAILED;
+ }
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ FC_FCP_DBG(fsp, "target abort cmd completed\n");
+ return SUCCESS;
+ }
init_completion(&fsp->tm_done);
fsp->wait_for_comp = 1;
@@ -1301,7 +1347,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
spin_lock_bh(&fsp->scsi_pkt_lock);
if (fsp->seq_ptr) {
- lport->tt.exch_done(fsp->seq_ptr);
+ fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->wait_for_comp = 0;
@@ -1355,7 +1401,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
if (fh->fh_type != FC_TYPE_BLS)
fc_fcp_resp(fsp, fp);
fsp->seq_ptr = NULL;
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
out_unlock:
fc_fcp_unlock_pkt(fsp);
out:
@@ -1394,6 +1440,15 @@ static void fc_fcp_timeout(unsigned long data)
if (fsp->cdb_cmd.fc_tm_flags)
goto unlock;
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
+ fsp->timer_delay);
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, fsp->timer_delay);
+ goto unlock;
+ }
+ FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
+ fsp->timer_delay, rpriv->flags, fsp->state);
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
@@ -1486,8 +1541,8 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
switch (rjt->er_reason) {
default:
- FC_FCP_DBG(fsp, "device %x unexpected REC reject "
- "reason %d expl %d\n",
+ FC_FCP_DBG(fsp,
+ "device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
/* fall through */
@@ -1503,18 +1558,23 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
case ELS_RJT_LOGIC:
case ELS_RJT_UNAB:
+ FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
/*
- * If no data transfer, the command frame got dropped
- * so we just retry. If data was transferred, we
- * lost the response but the target has no record,
- * so we abort and retry.
+ * If response got lost or is stuck in the
+ * queue somewhere we have no idea if and when
+ * the response will be received. So quarantine
+ * the xid and retry the command.
*/
- if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
- fsp->xfer_len == 0) {
- fc_fcp_retry_cmd(fsp);
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ ep->state |= FC_EX_QUARANTINE;
+ fsp->state |= FC_SRB_ABORTED;
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
break;
}
- fc_fcp_recovery(fsp, FC_ERROR);
+ fc_fcp_recovery(fsp, FC_TRANS_RESET);
break;
}
} else if (opcode == ELS_LS_ACC) {
@@ -1608,7 +1668,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
switch (error) {
case -FC_EX_CLOSED:
- fc_fcp_retry_cmd(fsp);
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
+ fsp, fsp->rport->port_id);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
default:
@@ -1622,8 +1684,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
- fsp->rport->port_id, error, fsp->recov_retry,
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
+ fsp, fsp->rport->port_id, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
@@ -1642,6 +1704,7 @@ out:
*/
static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
{
+ FC_FCP_DBG(fsp, "start recovery code %x\n", code);
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
@@ -1668,7 +1731,6 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
struct fc_seq *seq;
struct fcp_srr *srr;
struct fc_frame *fp;
- unsigned int rec_tov;
rport = fsp->rport;
rpriv = rport->dd_data;
@@ -1692,10 +1754,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
- rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
- fc_fcp_pkt_destroy,
- fsp, jiffies_to_msecs(rec_tov));
+ seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
+ fsp, get_fsp_rec_tov(fsp));
if (!seq)
goto retry;
@@ -1706,7 +1767,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
return;
retry:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
}
/**
@@ -1730,9 +1791,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
/*
- * BUG? fc_fcp_srr_error calls exch_done which would release
+ * BUG? fc_fcp_srr_error calls fc_exch_done which would release
* the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
- * then fc_exch_timeout would be sending an abort. The exch_done
+ * then fc_exch_timeout would be sending an abort. The fc_exch_done
* call by fc_fcp_srr_error would prevent fc_exch.c from seeing
* an abort response though.
*/
@@ -1753,7 +1814,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(seq);
+ fc_exch_done(seq);
fc_frame_free(fp);
}
@@ -1768,20 +1829,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
goto out;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
+ FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
+ FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
/* fall through */
default:
- fc_fcp_retry_cmd(fsp);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
}
fc_fcp_unlock_pkt(fsp);
out:
- fsp->lp->tt.exch_done(fsp->recov_seq);
+ fc_exch_done(fsp->recov_seq);
}
/**
@@ -1832,8 +1895,13 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
rpriv = rport->dd_data;
if (!fc_fcp_lport_queue_ready(lport)) {
- if (lport->qfull)
+ if (lport->qfull) {
fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: queue full, "
+ "reducing can_queue to %d.\n",
+ lport->host->can_queue);
+ }
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -1980,15 +2048,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
- FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
- "due to FC_CMD_ABORTED\n");
- sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
+ if (host_byte(sc_cmd->result) == DID_TIME_OUT)
+ FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ set_host_byte(sc_cmd, DID_ERROR);
+ }
+ sc_cmd->result |= fsp->io_status;
break;
case FC_CMD_RESET:
FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
"due to FC_CMD_RESET\n");
sc_cmd->result = (DID_RESET << 16);
break;
+ case FC_TRANS_RESET:
+ FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
+ "due to FC_TRANS_RESET\n");
+ sc_cmd->result = (DID_SOFT_ERROR << 16);
+ break;
case FC_HRD_ERROR:
FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
"due to FC_HRD_ERROR\n");
@@ -2142,7 +2221,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
fc_block_scsi_eh(sc_cmd);
- lport->tt.lport_reset(lport);
+ fc_lport_reset(lport);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
wait_tmo))
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index c11a638f32e6..d623d084b7ec 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -226,7 +226,7 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
sp = fr_seq(in_fp);
if (sp)
- fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp);
+ fr_seq(fp) = fc_seq_start_next(sp);
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 50c71678a156..919736a74ffa 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -149,7 +149,7 @@ static const char *fc_lport_state_names[] = {
* @offset: The offset into the response data
*/
struct fc_bsg_info {
- struct fc_bsg_job *job;
+ struct bsg_job *job;
struct fc_lport *lport;
u16 rsp_code;
struct scatterlist *sg;
@@ -200,7 +200,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
"in the DNS or FDMI state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
- lport->tt.rport_logoff(rdata);
+ fc_rport_logoff(rdata);
}
break;
case RPORT_EV_LOGO:
@@ -237,23 +237,26 @@ static const char *fc_lport_state(struct fc_lport *lport)
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
- mutex_lock(&lport->disc.disc_mutex);
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
}
- lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
- lport->tt.rport_login(lport->ptp_rdata);
+ fc_rport_login(lport->ptp_rdata);
fc_lport_enter_ready(lport);
}
@@ -409,7 +412,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_frame_free(fp);
}
@@ -478,7 +481,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
if (!req) {
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
} else {
fmt = req->rnid_fmt;
len = sizeof(*rp);
@@ -518,7 +521,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
}
@@ -620,9 +623,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
lport->tt.disc_stop_final(lport);
mutex_lock(&lport->lp_mutex);
if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ fc_rport_logoff(lport->dns_rdata);
mutex_unlock(&lport->lp_mutex);
- lport->tt.rport_flush_queue();
+ fc_rport_flush_queue();
mutex_lock(&lport->lp_mutex);
fc_lport_enter_logo(lport);
mutex_unlock(&lport->lp_mutex);
@@ -899,7 +902,7 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
/*
* Check opcode.
*/
- recv = lport->tt.rport_recv_req;
+ recv = fc_rport_recv_req;
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
if (!lport->point_to_multipoint)
@@ -941,15 +944,14 @@ struct fc4_prov fc_lport_els_prov = {
};
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv() - The generic lport request handler
* @lport: The lport that received the request
* @fp: The frame the request is in
*
* Locking Note: This function should not be called with the lport
* lock held because it may grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport,
- struct fc_frame *fp)
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = fr_seq(fp);
@@ -978,8 +980,9 @@ drop:
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
fc_frame_free(fp);
if (sp)
- lport->tt.exch_done(sp);
+ fc_exch_done(sp);
}
+EXPORT_SYMBOL(fc_lport_recv);
/**
* fc_lport_reset() - Reset a local port
@@ -1007,12 +1010,14 @@ EXPORT_SYMBOL(fc_lport_reset);
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
- if (lport->dns_rdata)
- lport->tt.rport_logoff(lport->dns_rdata);
+ if (lport->dns_rdata) {
+ fc_rport_logoff(lport->dns_rdata);
+ lport->dns_rdata = NULL;
+ }
if (lport->ptp_rdata) {
- lport->tt.rport_logoff(lport->ptp_rdata);
- kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
lport->ptp_rdata = NULL;
}
@@ -1426,13 +1431,13 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_DNS);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+ rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1543,13 +1548,13 @@ static void fc_lport_enter_fdmi(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_FDMI);
mutex_lock(&lport->disc.disc_mutex);
- rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+ rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
- lport->tt.rport_login(rdata);
+ fc_rport_login(rdata);
return;
err:
@@ -1772,7 +1777,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if ((csp_flags & FC_SP_FT_FPORT) == 0) {
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
- lport->r_a_tov = 2 * e_d_tov;
+ lport->r_a_tov = 2 * lport->e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
"Port (%6.6x) entered "
@@ -1784,8 +1789,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
get_unaligned_be64(
&flp->fl_wwnn));
} else {
- lport->e_d_tov = e_d_tov;
- lport->r_a_tov = r_a_tov;
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ if (r_a_tov > lport->r_a_tov)
+ lport->r_a_tov = r_a_tov;
fc_host_fabric_name(lport->host) =
get_unaligned_be64(&flp->fl_wwnn);
fc_lport_set_port_id(lport, did, fp);
@@ -1858,12 +1865,6 @@ EXPORT_SYMBOL(fc_lport_config);
*/
int fc_lport_init(struct fc_lport *lport)
{
- if (!lport->tt.lport_recv)
- lport->tt.lport_recv = fc_lport_recv_req;
-
- if (!lport->tt.lport_reset)
- lport->tt.lport_reset = fc_lport_reset;
-
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
@@ -1900,18 +1901,19 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
void *info_arg)
{
struct fc_bsg_info *info = info_arg;
- struct fc_bsg_job *job = info->job;
+ struct bsg_job *job = info->job;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct fc_lport *lport = info->lport;
struct fc_frame_header *fh;
size_t len;
void *buf;
if (IS_ERR(fp)) {
- job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+ bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
-ECONNABORTED : -ETIMEDOUT;
job->reply_len = sizeof(uint32_t);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
return;
}
@@ -1928,25 +1930,25 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
(unsigned short)fc_frame_payload_op(fp);
/* Save the reply status of the job */
- job->reply->reply_data.ctels_reply.status =
+ bsg_reply->reply_data.ctels_reply.status =
(cmd == info->rsp_code) ?
FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
}
- job->reply->reply_payload_rcv_len +=
+ bsg_reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
&info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
- if (job->reply->reply_payload_rcv_len >
+ if (bsg_reply->reply_payload_rcv_len >
job->reply_payload.payload_len)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
job->reply_payload.payload_len;
- job->reply->result = 0;
- job->state_flags |= FC_RQST_STATE_DONE;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(info);
}
fc_frame_free(fp);
@@ -1962,7 +1964,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_els_request(struct fc_bsg_job *job,
+static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
u32 did, u32 tov)
{
@@ -2005,8 +2007,8 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2023,7 +2025,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-static int fc_lport_ct_request(struct fc_bsg_job *job,
+static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
{
struct fc_bsg_info *info;
@@ -2066,8 +2068,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
- if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
- NULL, info, tov)) {
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
@@ -2079,25 +2081,27 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
* FC Passthrough requests
* @job: The BSG passthrough job
*/
-int fc_lport_bsg_request(struct fc_bsg_job *job)
+int fc_lport_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct request *rsp = job->req->next_rq;
- struct Scsi_Host *shost = job->shost;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport;
struct fc_rport_priv *rdata;
int rc = -EINVAL;
u32 did, tov;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (rsp)
rsp->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2107,7 +2111,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_RPT_CT:
- rport = job->rport;
+ rport = fc_bsg_to_rport(job);
if (!rport)
break;
@@ -2117,25 +2121,25 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
break;
case FC_BSG_HST_CT:
- did = ntoh24(job->request->rqst_data.h_ct.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
if (did == FC_FID_DIR_SERV) {
rdata = lport->dns_rdata;
if (!rdata)
break;
tov = rdata->e_d_tov;
} else {
- rdata = lport->tt.rport_lookup(lport, did);
+ rdata = fc_rport_lookup(lport, did);
if (!rdata)
break;
tov = rdata->e_d_tov;
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
rc = fc_lport_ct_request(job, lport, did, tov);
break;
case FC_BSG_HST_ELS_NOLOGIN:
- did = ntoh24(job->request->rqst_data.h_els.port_id);
+ did = ntoh24(bsg_request->rqst_data.h_els.port_id);
rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
break;
}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 97aeaddd600d..c991f3b822f8 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -44,6 +44,19 @@
* path this potential over-use of the mutex is acceptable.
*/
+/*
+ * RPORT REFERENCE COUNTING
+ *
+ * A rport reference should be taken when:
+ * - an rport is allocated
+ * - a workqueue item is scheduled
+ * - an ELS request is send
+ * The reference should be dropped when:
+ * - the workqueue function has finished
+ * - the ELS response is handled
+ * - an rport is removed
+ */
+
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -74,8 +87,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
static void fc_rport_timeout(struct work_struct *);
-static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
-static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_error(struct fc_rport_priv *, int);
+static void fc_rport_error_retry(struct fc_rport_priv *, int);
static void fc_rport_work(struct work_struct *);
static const char *fc_rport_state_names[] = {
@@ -98,8 +111,8 @@ static const char *fc_rport_state_names[] = {
* The reference count of the fc_rport_priv structure is
* increased by one.
*/
-static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id)
{
struct fc_rport_priv *rdata = NULL, *tmp_rdata;
@@ -113,6 +126,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
rcu_read_unlock();
return rdata;
}
+EXPORT_SYMBOL(fc_rport_lookup);
/**
* fc_rport_create() - Create a new remote port
@@ -123,12 +137,11 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
*
* Locking note: must be called with the disc_mutex held.
*/
-static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
- u32 port_id)
+struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
- rdata = lport->tt.rport_lookup(lport, port_id);
+ rdata = fc_rport_lookup(lport, port_id);
if (rdata)
return rdata;
@@ -158,18 +171,20 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
}
return rdata;
}
+EXPORT_SYMBOL(fc_rport_create);
/**
* fc_rport_destroy() - Free a remote port after last reference is released
* @kref: The remote port's kref
*/
-static void fc_rport_destroy(struct kref *kref)
+void fc_rport_destroy(struct kref *kref)
{
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
kfree_rcu(rdata, rcu);
}
+EXPORT_SYMBOL(fc_rport_destroy);
/**
* fc_rport_state() - Return a string identifying the remote port's state
@@ -242,6 +257,8 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
/**
* fc_rport_work() - Handler for remote port events in the rport_event_queue
* @work: Handle to the remote port being dequeued
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_work(struct work_struct *work)
{
@@ -272,12 +289,14 @@ static void fc_rport_work(struct work_struct *work)
kref_get(&rdata->kref);
mutex_unlock(&rdata->rp_mutex);
- if (!rport)
+ if (!rport) {
+ FC_RPORT_DBG(rdata, "No rport!\n");
rport = fc_remote_port_add(lport->host, 0, &ids);
+ }
if (!rport) {
FC_RPORT_DBG(rdata, "Failed to add the rport\n");
- lport->tt.rport_logoff(rdata);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
mutex_lock(&rdata->rp_mutex);
@@ -303,7 +322,7 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
break;
case RPORT_EV_FAILED:
@@ -329,7 +348,8 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
- cancel_delayed_work_sync(&rdata->retry_work);
+ if (cancel_delayed_work_sync(&rdata->retry_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
/*
* Reset any outstanding exchanges before freeing rport.
@@ -351,7 +371,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -362,17 +382,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
} else {
FC_RPORT_DBG(rdata, "work delete\n");
+ mutex_lock(&lport->disc.disc_mutex);
list_del_rcu(&rdata->peers);
+ mutex_unlock(&lport->disc.disc_mutex);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
} else {
/*
* Re-open for events. Reissue READY event if ready.
*/
rdata->event = RPORT_EV_NONE;
- if (rdata->rp_state == RPORT_ST_READY)
+ if (rdata->rp_state == RPORT_ST_READY) {
+ FC_RPORT_DBG(rdata, "work reopen\n");
fc_rport_enter_ready(rdata);
+ }
mutex_unlock(&rdata->rp_mutex);
}
break;
@@ -381,12 +405,21 @@ static void fc_rport_work(struct work_struct *work)
mutex_unlock(&rdata->rp_mutex);
break;
}
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_login() - Start the remote port login state machine
* @rdata: The remote port to be logged in to
*
+ * Initiates the RP state machine. It is called from the LP module.
+ * This function will issue the following commands to the N_Port
+ * identified by the FC ID provided.
+ *
+ * - PLOGI
+ * - PRLI
+ * - RTV
+ *
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
@@ -395,10 +428,16 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-static int fc_rport_login(struct fc_rport_priv *rdata)
+int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
+ if (rdata->flags & FC_RP_STARTED) {
+ FC_RPORT_DBG(rdata, "port already started\n");
+ mutex_unlock(&rdata->rp_mutex);
+ return 0;
+ }
+
rdata->flags |= FC_RP_STARTED;
switch (rdata->rp_state) {
case RPORT_ST_READY:
@@ -408,15 +447,20 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
case RPORT_ST_DELETE:
FC_RPORT_DBG(rdata, "Restart deleted port\n");
break;
- default:
+ case RPORT_ST_INIT:
FC_RPORT_DBG(rdata, "Login to port\n");
fc_rport_enter_flogi(rdata);
break;
+ default:
+ FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
+ fc_rport_state(rdata));
+ break;
}
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_login);
/**
* fc_rport_enter_delete() - Schedule a remote port to be deleted
@@ -431,6 +475,8 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
* Set the new event so that the old pending event will not occur.
* Since we have the mutex, even if fc_rport_work() is already started,
* it'll see the new event.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
@@ -442,8 +488,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = event;
}
@@ -455,7 +504,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-static int fc_rport_logoff(struct fc_rport_priv *rdata)
+int fc_rport_logoff(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
u32 port_id = rdata->ids.port_id;
@@ -489,6 +538,7 @@ out:
mutex_unlock(&rdata->rp_mutex);
return 0;
}
+EXPORT_SYMBOL(fc_rport_logoff);
/**
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
@@ -496,6 +546,8 @@ out:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
@@ -503,8 +555,11 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
FC_RPORT_DBG(rdata, "Port is Ready\n");
- if (rdata->event == RPORT_EV_NONE)
- queue_work(rport_event_queue, &rdata->event_work);
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
rdata->event = RPORT_EV_READY;
}
@@ -515,6 +570,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
+ *
+ * Reference counting: Drops kref on return.
*/
static void fc_rport_timeout(struct work_struct *work)
{
@@ -522,6 +579,7 @@ static void fc_rport_timeout(struct work_struct *work)
container_of(work, struct fc_rport_priv, retry_work.work);
mutex_lock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -547,23 +605,25 @@ static void fc_rport_timeout(struct work_struct *work)
}
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_error() - Error handler, called once retries have been exhausted
* @rdata: The remote port the error is happened on
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
+static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
- IS_ERR(fp) ? -PTR_ERR(fp) : 0,
- fc_rport_state(rdata), rdata->retries);
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
+ -err, fc_rport_state(rdata), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
@@ -595,36 +655,39 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
/**
* fc_rport_error_retry() - Handler for remote port state retries
* @rdata: The remote port whose state is to be retried
- * @fp: The error code encapsulated in a frame pointer
+ * @err: The error code
*
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
* Locking Note: The rport lock is expected to be held before
* calling this routine
+ *
+ * Reference counting: increments kref when scheduling retry_work
*/
-static void fc_rport_error_retry(struct fc_rport_priv *rdata,
- struct fc_frame *fp)
+static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
- unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
+ unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
- if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ if (err == -FC_EX_CLOSED)
goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
- FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
- PTR_ERR(fp), fc_rport_state(rdata));
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
+ err, fc_rport_state(rdata));
rdata->retries++;
/* no additional delay on exchange timeouts */
- if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
+ if (err == -FC_EX_TIMEOUT)
delay = 0;
- schedule_delayed_work(&rdata->retry_work, delay);
+ kref_get(&rdata->kref);
+ if (!schedule_delayed_work(&rdata->retry_work, delay))
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
}
out:
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, err);
}
/**
@@ -684,8 +747,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *flogi;
unsigned int r_a_tov;
+ u8 opcode;
+ int err = 0;
- FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+ FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
+ IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
@@ -701,18 +767,34 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
-
- if (fc_frame_payload_op(fp) != ELS_LS_ACC)
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ struct fc_els_ls_rjt *rjt;
+
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ err = -FC_EX_ELS_RJT;
goto bad;
- if (fc_rport_login_complete(rdata, fp))
+ } else if (opcode != ELS_LS_ACC) {
+ FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
+ err = -FC_EX_ELS_RJT;
goto bad;
+ }
+ if (fc_rport_login_complete(rdata, fp)) {
+ FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
+ err = -FC_EX_INV_LOGIN;
+ goto bad;
+ }
flogi = fc_frame_payload_get(fp, sizeof(*flogi));
- if (!flogi)
+ if (!flogi) {
+ err = -FC_EX_ALLOC_ERR;
goto bad;
+ }
r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
if (r_a_tov > rdata->r_a_tov)
rdata->r_a_tov = r_a_tov;
@@ -726,11 +808,11 @@ out:
err:
mutex_unlock(&rdata->rp_mutex);
put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
bad:
FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, err);
goto out;
}
@@ -740,6 +822,8 @@ bad:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
{
@@ -756,20 +840,23 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp)
- return fc_rport_error_retry(rdata, fp);
+ return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
fc_rport_flogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
* fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -799,7 +886,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
goto reject;
}
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (!rdata) {
rjt_data.reason = ELS_RJT_FIP;
rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
@@ -824,8 +911,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
* RPORT wouldn;t have created and 'rport_lookup' would have
* failed anyway in that case.
*/
- if (lport->point_to_multipoint)
- break;
+ break;
case RPORT_ST_DELETE:
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_FIP;
@@ -867,20 +953,27 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- if (rdata->ids.port_name < lport->wwpn)
- fc_rport_enter_plogi(rdata);
- else
- fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ /*
+ * Do not proceed with the state machine if our
+ * FLOGI has crossed with an FLOGI from the
+ * remote port; wait for the FLOGI response instead.
+ */
+ if (rdata->rp_state != RPORT_ST_FLOGI) {
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ }
out:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
fc_frame_free(rx_fp);
return;
reject_put:
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(rx_fp);
}
@@ -904,10 +997,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
u16 cssp_seq;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PLOGI) {
FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -917,7 +1013,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -939,14 +1035,20 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->max_seq = csp_seq;
rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
fc_rport_enter_prli(rdata);
- } else
- fc_rport_error_retry(rdata, fp);
+ } else {
+ struct fc_els_ls_rjt *rjt;
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
+ }
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
static bool
@@ -969,6 +1071,8 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
{
@@ -990,17 +1094,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp) {
FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
rdata->e_d_tov = lport->e_d_tov;
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
fc_rport_plogi_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1022,16 +1127,20 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_spp spp;
} *pp;
struct fc_els_spp temp_spp;
+ struct fc_els_ls_rjt *rjt;
struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
- u8 resp_code = 0;
-
- mutex_lock(&rdata->rp_mutex);
+ enum fc_els_spp_resp resp_code;
FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_PRLI) {
FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1041,7 +1150,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
@@ -1055,14 +1164,14 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
goto out;
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
- FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
- pp->spp.spp_flags);
+ FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
+ pp->spp.spp_flags, pp->spp.spp_type);
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, -FC_EX_SEQ_ERR);
else
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
if (pp->prli.prli_spp_len < sizeof(pp->spp))
@@ -1074,13 +1183,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
if (fcp_parm & FCP_SPPF_CONF_COMPL)
rdata->flags |= FC_RP_FLAGS_CONF_REQ;
- prov = fc_passive_prov[FC_TYPE_FCP];
+ /*
+ * Call prli provider if we should act as a target
+ */
+ prov = fc_passive_prov[rdata->spp_type];
if (prov) {
memset(&temp_spp, 0, sizeof(temp_spp));
prov->prli(rdata, pp->prli.prli_spp_len,
&pp->spp, &temp_spp);
}
-
+ /*
+ * Check if the image pair could be established
+ */
+ if (rdata->spp_type != FC_TYPE_FCP ||
+ !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
+ /*
+ * Nope; we can't use this port as a target.
+ */
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
@@ -1091,15 +1212,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_enter_rtv(rdata);
} else {
- FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
- fc_rport_error_retry(rdata, fp);
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1108,6 +1232,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
{
@@ -1128,6 +1254,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
+ /*
+ * And if the local port does not support the initiator function
+ * there's no need to send a PRLI, either.
+ */
+ if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
+ fc_rport_enter_ready(rdata);
+ return;
+ }
+
FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
fc_rport_state(rdata));
@@ -1135,7 +1270,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(*pp));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
@@ -1151,15 +1286,16 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
fc_host_port_id(lport->host), FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
- if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
- NULL, rdata, 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ kref_get(&rdata->kref);
+ if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
- * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
* @sp: The sequence the RTV was on
* @fp: The RTV response frame
* @rdata_arg: The remote port that sent the RTV response
@@ -1176,10 +1312,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_rport_priv *rdata = rdata_arg;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_RTV) {
FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
"%s\n", fc_rport_state(rdata));
@@ -1189,7 +1328,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1205,13 +1344,15 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
tov = ntohl(rtv->rtv_r_a_tov);
if (tov == 0)
tov = 1;
- rdata->r_a_tov = tov;
+ if (tov > rdata->r_a_tov)
+ rdata->r_a_tov = tov;
tov = ntohl(rtv->rtv_e_d_tov);
if (toq & FC_ELS_RTV_EDRES)
tov /= 1000000;
if (tov == 0)
tov = 1;
- rdata->e_d_tov = tov;
+ if (tov > rdata->e_d_tov)
+ rdata->e_d_tov = tov;
}
}
@@ -1221,7 +1362,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1230,6 +1372,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
{
@@ -1243,16 +1387,52 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
fc_rport_rtv_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
+ * @rdata: The remote port that sent the RTV request
+ * @in_fp: The RTV request frame
+ *
+ * Locking Note: Called with the lport and rport locks held.
+ */
+static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
+ struct fc_frame *in_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_rtv_acc *rtv;
+ struct fc_seq_els_data rjt_data;
+
+ FC_RPORT_DBG(rdata, "Received RTV request\n");
+
+ fp = fc_frame_alloc(lport, sizeof(*rtv));
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.reason = ELS_EXPL_INSUF_RES;
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ goto drop;
+ }
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_LS_ACC;
+ rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
+ rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
+ rtv->rtv_toq = 0;
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+drop:
+ fc_frame_free(in_fp);
}
/**
@@ -1262,15 +1442,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
* @lport_arg: The local port
*/
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *lport_arg)
+ void *rdata_arg)
{
- struct fc_lport *lport = lport_arg;
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_lport *lport = rdata->local_port;
FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
"Received a LOGO %s\n", fc_els_resp_type(fp));
- if (IS_ERR(fp))
- return;
- fc_frame_free(fp);
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1279,6 +1460,8 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
{
@@ -1291,8 +1474,10 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
if (!fp)
return;
- (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
- fc_rport_logo_resp, lport, 0);
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+ fc_rport_logo_resp, rdata, 0))
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1312,10 +1497,13 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_adisc *adisc;
u8 op;
- mutex_lock(&rdata->rp_mutex);
-
FC_RPORT_DBG(rdata, "Received a ADISC response\n");
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
if (rdata->rp_state != RPORT_ST_ADISC) {
FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
fc_rport_state(rdata));
@@ -1325,7 +1513,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
}
if (IS_ERR(fp)) {
- fc_rport_error(rdata, fp);
+ fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
@@ -1350,7 +1538,8 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
}
/**
@@ -1359,6 +1548,8 @@ err:
*
* Locking Note: The rport lock is expected to be held before calling
* this routine.
+ *
+ * Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
{
@@ -1372,15 +1563,16 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
if (!fp) {
- fc_rport_error_retry(rdata, fp);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
+ kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
fc_rport_adisc_resp, rdata,
- 2 * lport->r_a_tov))
- fc_rport_error_retry(rdata, NULL);
- else
- kref_get(&rdata->kref);
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
/**
@@ -1404,7 +1596,7 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
if (!adisc) {
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
- lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
goto drop;
}
@@ -1480,7 +1672,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
goto out;
out_rjt:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
out:
fc_frame_free(rx_fp);
}
@@ -1494,15 +1686,21 @@ out:
* The ELS opcode has already been validated by the caller.
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
- rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
- if (!rdata)
+ rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
+ if (!rdata) {
+ FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
+ "Received ELS 0x%02x from non-logged-in port\n",
+ fc_frame_payload_op(fp));
goto reject;
+ }
mutex_lock(&rdata->rp_mutex);
@@ -1512,9 +1710,21 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
case RPORT_ST_READY:
case RPORT_ST_ADISC:
break;
+ case RPORT_ST_PLOGI:
+ if (fc_frame_payload_op(fp) == ELS_PRLI) {
+ FC_RPORT_DBG(rdata, "Reject ELS PRLI "
+ "while in state %s\n",
+ fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ goto busy;
+ }
default:
+ FC_RPORT_DBG(rdata,
+ "Reject ELS 0x%02x while in state %s\n",
+ fc_frame_payload_op(fp), fc_rport_state(rdata));
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, lport->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
goto reject;
}
@@ -1529,30 +1739,41 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_recv_adisc_req(rdata, fp);
break;
case ELS_RRQ:
- lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
+ fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
fc_frame_free(fp);
break;
case ELS_REC:
- lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_REC, NULL);
fc_frame_free(fp);
break;
case ELS_RLS:
fc_rport_recv_rls_req(rdata, fp);
break;
+ case ELS_RTV:
+ fc_rport_recv_rtv_req(rdata, fp);
+ break;
default:
fc_frame_free(fp); /* can't happen */
break;
}
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
return;
reject:
els_data.reason = ELS_RJT_UNAB;
els_data.explan = ELS_EXPL_PLOGI_REQD;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ return;
+
+busy:
+ els_data.reason = ELS_RJT_BUSY;
+ els_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
+ return;
}
/**
@@ -1561,8 +1782,10 @@ reject:
* @fp: The request frame
*
* Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
*/
-static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
@@ -1588,16 +1811,18 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
case ELS_RRQ:
case ELS_REC:
case ELS_RLS:
+ case ELS_RTV:
fc_rport_recv_els_req(lport, fp);
break;
default:
els_data.reason = ELS_RJT_UNSUP;
els_data.explan = ELS_EXPL_NONE;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
break;
}
}
+EXPORT_SYMBOL(fc_rport_recv_req);
/**
* fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
@@ -1605,6 +1830,8 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
* @rx_fp: The PLOGI request frame
*
* Locking Note: The rport lock is held before calling this function.
+ *
+ * Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -1630,7 +1857,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
disc = &lport->disc;
mutex_lock(&disc->disc_mutex);
- rdata = lport->tt.rport_create(lport, sid);
+ rdata = fc_rport_create(lport, sid);
if (!rdata) {
mutex_unlock(&disc->disc_mutex);
rjt_data.reason = ELS_RJT_UNAB;
@@ -1718,7 +1945,7 @@ out:
return;
reject:
- lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
@@ -1744,7 +1971,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
- enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
@@ -1794,15 +2020,21 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
resp = 0;
if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ enum fc_els_spp_resp active = 0, passive = 0;
+
prov = fc_active_prov[rspp->spp_type];
if (prov)
- resp = prov->prli(rdata, plen, rspp, spp);
+ active = prov->prli(rdata, plen, rspp, spp);
prov = fc_passive_prov[rspp->spp_type];
- if (prov) {
+ if (prov)
passive = prov->prli(rdata, plen, rspp, spp);
- if (!resp || passive == FC_SPP_RESP_ACK)
- resp = passive;
- }
+ if (!active || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ else
+ resp = active;
+ FC_RPORT_DBG(rdata, "PRLI rspp type %x "
+ "active %x passive %x\n",
+ rspp->spp_type, active, passive);
}
if (!resp) {
if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
@@ -1823,20 +2055,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- fc_rport_enter_ready(rdata);
- break;
- default:
- break;
- }
goto drop;
reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1907,7 +2132,7 @@ reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
- lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
@@ -1919,17 +2144,19 @@ drop:
*
* Locking Note: The rport lock is expected to be held before calling
* this function.
+ *
+ * Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
u32 sid;
- lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
- rdata = lport->tt.rport_lookup(lport, sid);
+ rdata = fc_rport_lookup(lport, sid);
if (rdata) {
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
@@ -1937,7 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, fc_rport_destroy);
} else
FC_RPORT_ID_DBG(lport, sid,
"Received LOGO from non-logged-in port\n");
@@ -1947,41 +2174,11 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_rport_flush_queue() - Flush the rport_event_queue
*/
-static void fc_rport_flush_queue(void)
+void fc_rport_flush_queue(void)
{
flush_workqueue(rport_event_queue);
}
-
-/**
- * fc_rport_init() - Initialize the remote port layer for a local port
- * @lport: The local port to initialize the remote port layer for
- */
-int fc_rport_init(struct fc_lport *lport)
-{
- if (!lport->tt.rport_lookup)
- lport->tt.rport_lookup = fc_rport_lookup;
-
- if (!lport->tt.rport_create)
- lport->tt.rport_create = fc_rport_create;
-
- if (!lport->tt.rport_login)
- lport->tt.rport_login = fc_rport_login;
-
- if (!lport->tt.rport_logoff)
- lport->tt.rport_logoff = fc_rport_logoff;
-
- if (!lport->tt.rport_recv_req)
- lport->tt.rport_recv_req = fc_rport_recv_req;
-
- if (!lport->tt.rport_flush_queue)
- lport->tt.rport_flush_queue = fc_rport_flush_queue;
-
- if (!lport->tt.rport_destroy)
- lport->tt.rport_destroy = fc_rport_destroy;
-
- return 0;
-}
-EXPORT_SYMBOL(fc_rport_init);
+EXPORT_SYMBOL(fc_rport_flush_queue);
/**
* fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b484859464f6..8a20b4e86224 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -648,6 +648,10 @@ struct lpfc_hba {
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
+#define HBA_FORCED_LINK_SPEED 0x40000 /*
+ * Firmware supports Forced Link Speed
+ * capability
+ */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -746,6 +750,8 @@ struct lpfc_hba {
uint32_t cfg_oas_priority;
uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
+ uint32_t cfg_prot_mask;
+ uint32_t cfg_prot_guard;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f1019908800e..c84775562c65 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2759,18 +2759,14 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1,
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
-int lpfc_enable_rrq = 2;
-module_param(lpfc_enable_rrq, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
-lpfc_param_show(enable_rrq);
/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
*/
-lpfc_param_init(enable_rrq, 2, 0, 2);
-static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+LPFC_ATTR_R(enable_rrq, 2, 0, 2,
+ "Enable RRQ functionality");
/*
# lpfc_suppress_link_up: Bring link up at initialization
@@ -2827,14 +2823,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_iocb_cnt,
+LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-lpfc_param_show(iocb_cnt);
-lpfc_param_init(iocb_cnt, 2, 1, 5);
-static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
- lpfc_iocb_cnt_show, NULL);
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2887,9 +2877,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0407 Ignoring nodev_tmo module "
- "parameter because devloss_tmo is "
- "set.\n");
+ "0407 Ignoring lpfc_nodev_tmo module "
+ "parameter because lpfc_devloss_tmo "
+ "is set.\n");
return 0;
}
@@ -2948,8 +2938,8 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
if (vport->dev_loss_tmo_changed ||
(lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0401 Ignoring change to nodev_tmo "
- "because devloss_tmo is set.\n");
+ "0401 Ignoring change to lpfc_nodev_tmo "
+ "because lpfc_devloss_tmo is set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
@@ -2964,7 +2954,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0403 lpfc_nodev_tmo attribute cannot be set to"
+ "0403 lpfc_nodev_tmo attribute cannot be set to "
"%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
@@ -3015,8 +3005,8 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0404 lpfc_devloss_tmo attribute cannot be set to"
- " %d, allowed range is [%d, %d]\n",
+ "0404 lpfc_devloss_tmo attribute cannot be set to "
+ "%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
@@ -3204,6 +3194,8 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
+LPFC_ATTR(topology, 0, 0, 6,
+ "Select Fibre Channel topology");
/**
* lpfc_topology_set - Set the adapters topology field
@@ -3281,11 +3273,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
phba->brd_no, val);
return -EINVAL;
}
-static int lpfc_topology = 0;
-module_param(lpfc_topology, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+
lpfc_param_show(topology)
-lpfc_param_init(topology, 0, 0, 6)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
@@ -3679,7 +3668,12 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
int nolip = 0;
const char *val_buf = buf;
int err;
- uint32_t prev_val;
+ uint32_t prev_val, if_type;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
+ phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ return -EPERM;
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
nolip = 1;
@@ -3789,6 +3783,9 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
# 1 = aer supported and enabled (default)
# Value range is [0,1]. Default value is 1.
*/
+LPFC_ATTR(aer_support, 1, 0, 1,
+ "Enable PCIe device AER support");
+lpfc_param_show(aer_support)
/**
* lpfc_aer_support_store - Set the adapter for aer support
@@ -3871,46 +3868,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
-lpfc_param_show(aer_support)
-
-/**
- * lpfc_aer_support_init - Set the initial adapters aer support flag
- * @phba: lpfc_hba pointer.
- * @val: enable aer or disable aer flag.
- *
- * Description:
- * If val is in a valid range [0,1], then set the adapter's initial
- * cfg_aer_support field. It will be up to the driver's probe_one
- * routine to determine whether the device's AER support can be set
- * or not.
- *
- * Notes:
- * If the value is not in range log a kernel error message, and
- * choose the default value of setting AER support and return.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_aer_support_init(struct lpfc_hba *phba, int val)
-{
- if (val == 0 || val == 1) {
- phba->cfg_aer_support = val;
- return 0;
- }
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2712 lpfc_aer_support attribute value %d out "
- "of range, allowed values are 0|1, setting it "
- "to default value of 1\n", val);
- /* By default, try to enable AER on a device */
- phba->cfg_aer_support = 1;
- return -EINVAL;
-}
-
static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
lpfc_aer_support_show, lpfc_aer_support_store);
@@ -4055,39 +4012,10 @@ lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
-module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
-lpfc_param_show(sriov_nr_virtfn)
-
-/**
- * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
- *
- * Description:
- * If val is in a valid range [0,255], then set the adapter's initial
- * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
- * number shall be used instead. It will be up to the driver's probe_one
- * routine to determine whether the device's SR-IOV is supported or not.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
-{
- if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
- phba->cfg_sriov_nr_virtfn = val;
- return 0;
- }
+LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
+ "Enable PCIe device SR-IOV virtual fn");
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3017 Enabling %d virtual functions is not "
- "allowed.\n", val);
- return -EINVAL;
-}
+lpfc_param_show(sriov_nr_virtfn)
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
@@ -4251,7 +4179,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3016 fcp_imax: %d out of range, using default\n", val);
+ "3016 lpfc_fcp_imax: %d out of range, using default\n",
+ val);
phba->cfg_fcp_imax = LPFC_DEF_IMAX;
return 0;
@@ -4401,8 +4330,8 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3326 fcp_cpu_map: %d out of range, using default\n",
- val);
+ "3326 lpfc_fcp_cpu_map: %d out of range, using "
+ "default\n", val);
phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
return 0;
@@ -4441,12 +4370,10 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
# to limit the I/O completion time to the parameter value.
# The value is set in milliseconds.
*/
-static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
"Use command completion time to control queue depth");
+
lpfc_vport_param_show(max_scsicmpl_time);
-lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
@@ -4691,12 +4618,15 @@ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
# HBA supports DIX Type 1: Host to HBA Type 1 protection
#
*/
-unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIX_TYPE0_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION;
-
-module_param(lpfc_prot_mask, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+LPFC_ATTR(prot_mask,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ 0,
+ (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION),
+ "T10-DIF host protection capabilities mask");
/*
# lpfc_prot_guard: i
@@ -4706,9 +4636,9 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
# - Default will result in registering capabilities for all guard types
#
*/
-unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+LPFC_ATTR(prot_guard,
+ SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
+ "T10-DIF host protection guard type");
/*
* Delay initial NPort discovery when Clean Address bit is cleared in
@@ -5828,6 +5758,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_oas_flags = 0;
phba->cfg_oas_priority = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
+ lpfc_prot_mask_init(phba, lpfc_prot_mask);
+ lpfc_prot_guard_init(phba, lpfc_prot_guard);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 05dcc2abd541..7dca4d6a8883 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -97,7 +98,7 @@ struct lpfc_bsg_menlo {
#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
- struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
+ struct bsg_job *set_job; /* job waiting for this iocb to finish */
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
@@ -211,7 +212,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
- struct fc_bsg_buffer *bsg_buffers,
+ struct bsg_buffer *bsg_buffers,
unsigned int bytes_to_transfer, int to_buffers)
{
@@ -297,7 +298,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
@@ -312,6 +314,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -350,7 +353,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -367,8 +370,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -378,12 +382,13 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -398,7 +403,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
int iocb_stat;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -542,7 +547,7 @@ no_ndlp:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -570,7 +575,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
@@ -588,6 +594,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -609,17 +616,17 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
if (job) {
if (rsp->ulpStatus == IOSTAT_SUCCESS) {
rsp_size = rsp->un.elsreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
- els_reply = &job->reply->reply_data.ctels_reply;
+ els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
@@ -637,8 +644,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -648,12 +656,14 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_rport_els(struct fc_bsg_job *job)
+lpfc_bsg_rport_els(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t elscmd;
uint32_t cmdsize;
struct lpfc_iocbq *cmdiocbq;
@@ -664,7 +674,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* verify the els command is not greater than the
* maximum ELS transfer size.
@@ -684,7 +694,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
goto no_dd_data;
}
- elscmd = job->request->rqst_data.r_els.els_code;
+ elscmd = bsg_request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
if (!lpfc_nlp_get(ndlp)) {
@@ -771,7 +781,7 @@ free_dd_data:
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -917,7 +927,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
- struct fc_bsg_job *job = NULL;
+ struct bsg_job *job = NULL;
+ struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
@@ -1120,13 +1131,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dd_data->set_job = NULL;
lpfc_bsg_event_unref(evt);
if (job) {
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply = job->reply;
+ bsg_reply->reply_payload_rcv_len = size;
/* make error code available to userspace */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
/* complete the job back to userspace */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
}
}
@@ -1187,10 +1200,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
* @job: SET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
struct set_ct_event *event_req;
struct lpfc_bsg_event *evt;
int rc = 0;
@@ -1208,7 +1222,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
}
event_req = (struct set_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
FC_REG_EVENT_MASK);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1271,10 +1285,12 @@ job_error:
* @job: GET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
struct lpfc_bsg_event *evt, *evt_next;
@@ -1292,10 +1308,10 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
}
event_req = (struct get_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_ct_event_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
@@ -1315,7 +1331,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
* an error indicating that there isn't anymore
*/
if (evt_dat == NULL) {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
goto job_error;
}
@@ -1331,12 +1347,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply->type = evt_dat->type;
event_reply->immed_data = evt_dat->immed_dat;
if (evt_dat->len > 0)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (evt_dat) {
kfree(evt_dat->data);
@@ -1347,13 +1363,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->dd_data = NULL;
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
job_error:
job->dd_data = NULL;
- job->reply->result = rc;
+ bsg_reply->result = rc;
return rc;
}
@@ -1380,7 +1397,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
@@ -1411,6 +1429,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Copy the completed job data or set the error status */
if (job) {
+ bsg_reply = job->reply;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
@@ -1428,7 +1447,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
}
@@ -1442,8 +1461,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -1457,7 +1477,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @num_entry: Number of enties in the bde.
**/
static int
-lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
@@ -1603,12 +1623,14 @@ no_dd_data:
* @job: SEND_MGMT_RESP fc_bsg_job
**/
static int
-lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
int bpl_entries;
@@ -1618,7 +1640,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
rc = -ERANGE;
@@ -1664,7 +1686,7 @@ send_mgmt_rsp_free_bmp:
kfree(bmp);
send_mgmt_rsp_exit:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -1760,8 +1782,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
* All of this is done in-line.
*/
static int
-lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
@@ -1771,7 +1795,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
int rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -1791,7 +1815,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
/* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -1864,10 +1888,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2015,14 +2040,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
* loopback mode in order to perform a diagnostic loopback test.
*/
static int
-lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags, timeout;
int i, rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -2054,7 +2081,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -2151,10 +2178,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2166,17 +2194,17 @@ job_error:
* command from the user to proper driver action routines.
*/
static int
-lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
int rc;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2202,8 +2230,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
* command from the user to proper driver action routines.
*/
static int
-lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2211,10 +2241,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
uint32_t timeout;
int rc, i;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2232,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
loopback_mode_end_cmd = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
timeout = loopback_mode_end_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
@@ -2263,10 +2293,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
loopback_mode_end_exit:
/* make return code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2278,8 +2309,10 @@ loopback_mode_end_exit:
* applicaiton.
*/
static int
-lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2292,12 +2325,12 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
struct diag_status *diag_status_reply;
int mbxstatus, rc = 0;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost) {
rc = -ENODEV;
goto job_error;
}
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport) {
rc = -ENODEV;
goto job_error;
@@ -2335,7 +2368,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
goto job_error;
link_diag_test_cmd = (struct sli4_link_diag *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
@@ -2385,7 +2418,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
diag_status_reply = (struct diag_status *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
@@ -2413,10 +2446,11 @@ link_diag_test_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2982,9 +3016,10 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_bsg_event *evt;
struct event_data *evdat;
@@ -3012,7 +3047,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint32_t total_mem;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
@@ -3237,11 +3272,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = IOCB_SUCCESS;
/* skip over elx loopback header */
rx_databuf += ELX_LOOPBACK_HEADER_SZ;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
rx_databuf, size);
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply->reply_payload_rcv_len = size;
}
}
@@ -3271,11 +3306,12 @@ err_loopback_test_exit:
loopback_test_exit:
kfree(dataout);
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
if (rc == IOCB_SUCCESS)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3284,9 +3320,10 @@ loopback_test_exit:
* @job: GET_DFC_REV fc_bsg_job
**/
static int
-lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+lpfc_bsg_get_dfc_rev(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct get_mgmt_rev_reply *event_reply;
int rc = 0;
@@ -3301,7 +3338,7 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
}
event_reply = (struct get_mgmt_rev_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
if (job->reply_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
@@ -3315,9 +3352,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
job_error:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3336,7 +3374,8 @@ static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
+ struct bsg_job *job;
uint32_t size;
unsigned long flags;
uint8_t *pmb, *pmb_buf;
@@ -3364,8 +3403,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Copy the mailbox data to the job if it is still active */
if (job) {
+ bsg_reply = job->reply;
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
@@ -3379,8 +3419,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -3510,11 +3551,12 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
* This is routine handles BSG job for mailbox commands completions with
* multiple external buffers.
**/
-static struct fc_bsg_job *
+static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
uint32_t size;
@@ -3529,6 +3571,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -3559,13 +3602,13 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
if (job) {
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
/* result for successful */
- job->reply->result = 0;
+ bsg_reply->result = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer maibox command "
@@ -3603,7 +3646,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3623,9 +3667,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
-
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3640,7 +3686,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3658,8 +3705,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_bsg_mbox_ext_session_reset(phba);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3768,10 +3818,11 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
@@ -3784,7 +3835,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3955,10 +4006,12 @@ job_error:
* non-embedded external bufffers.
**/
static int
-lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct dfc_mbox_req *mbox_req;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint32_t ext_buf_cnt;
@@ -3969,7 +4022,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -4096,8 +4149,9 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4119,7 +4173,7 @@ job_error:
* with embedded sussystem 0x1 and opcodes with external HBDs.
**/
static int
-lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -4268,8 +4322,9 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
* user space through BSG.
**/
static int
-lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct lpfc_dmabuf *dmabuf;
uint8_t *pbuf;
@@ -4307,7 +4362,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
dmabuf, index);
pbuf = (uint8_t *)dmabuf->virt;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pbuf, size);
@@ -4321,8 +4376,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_bsg_mbox_ext_session_reset(phba);
}
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
}
@@ -4336,9 +4392,10 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
* from user space through BSG.
**/
static int
-lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
@@ -4436,8 +4493,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4457,7 +4515,7 @@ job_error:
* command with multiple non-embedded external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
int rc;
@@ -4502,14 +4560,15 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
* (0x9B) mailbox commands and external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct dfc_mbox_req *mbox_req;
int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
@@ -4579,9 +4638,11 @@ sli_cfg_ext_error:
* let our completion handler finish the command.
**/
static int
-lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_vport *vport)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
@@ -4600,7 +4661,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t size;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
@@ -4619,7 +4680,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
@@ -4841,7 +4902,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* job finished, copy the data */
memcpy(pmbx, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmbx, size);
@@ -4870,15 +4931,17 @@ job_cont:
* @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
**/
static int
-lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+lpfc_bsg_mbox_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct dfc_mbox_req *mbox_req;
int rc = 0;
/* mix-and-match backward compatibility */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -4889,7 +4952,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
sizeof(struct fc_bsg_request)),
(int)sizeof(struct dfc_mbox_req));
mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
mbox_req->extMboxTag = 0;
mbox_req->extSeqNum = 0;
}
@@ -4898,15 +4961,16 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
if (rc == 0) {
/* job done */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
} else if (rc == 1)
/* job submitted, will complete later*/
rc = 0; /* return zero, no error */
else {
/* some error occurred */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
}
@@ -4936,7 +5000,8 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_bsg_menlo *menlo;
@@ -4956,6 +5021,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -4970,7 +5036,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
*/
menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
menlo_resp->xri = rsp->ulpContext;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
@@ -4990,7 +5056,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
}
} else {
rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
rsp_size, 0);
}
@@ -5007,8 +5073,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
/* Complete the job if active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
@@ -5024,9 +5091,11 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
* supplied in the menlo request header xri field.
**/
static int
-lpfc_menlo_cmd(struct fc_bsg_job *job)
+lpfc_menlo_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd;
@@ -5039,7 +5108,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
struct ulp_bde64 *bpl = NULL;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
@@ -5069,7 +5138,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
}
menlo_cmd = (struct menlo_command *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -5180,19 +5249,65 @@ free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
+static int
+lpfc_forced_link_speed(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct forced_link_speed_support_reply *forced_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct get_forced_link_speed_support)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0048 Received FORCED_LINK_SPEED request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply = (struct forced_link_speed_support_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct forced_link_speed_support_reply)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0049 Received FORCED_LINK_SPEED reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
+ ? LPFC_FORCED_LINK_SPEED_SUPPORTED
+ : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
+job_error:
+ bsg_reply->result = rc;
+ if (rc == 0)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+lpfc_bsg_hst_vendor(struct bsg_job *job)
{
- int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
int rc;
switch (command) {
@@ -5227,11 +5342,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
case LPFC_BSG_VENDOR_MENLO_DATA:
rc = lpfc_menlo_cmd(job);
break;
+ case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
+ rc = lpfc_forced_link_speed(job);
+ break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5243,12 +5361,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
* @job: fc_bsg_job to handle
**/
int
-lpfc_bsg_request(struct fc_bsg_job *job)
+lpfc_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t msgcode;
int rc;
- msgcode = job->request->msgcode;
+ msgcode = bsg_request->msgcode;
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
@@ -5261,9 +5381,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5278,9 +5398,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
* the waiting function which will handle passing the error back to userspace
**/
int
-lpfc_bsg_timeout(struct fc_bsg_job *job)
+lpfc_bsg_timeout(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e557bcdbcb19..f2247aa4fa17 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -35,6 +35,7 @@
#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
+#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
struct set_ct_event {
uint32_t command;
@@ -284,6 +285,15 @@ struct lpfc_sli_config_mbox {
} un;
};
+#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0
+#define LPFC_FORCED_LINK_SPEED_SUPPORTED 1
+struct get_forced_link_speed_support {
+ uint32_t command;
+};
+struct forced_link_speed_support_reply {
+ uint8_t supported;
+};
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bd7576d452f2..15d2bfdf582d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -397,8 +397,6 @@ extern spinlock_t _dump_buf_lock;
extern int _dump_buf_done;
extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
-extern unsigned int lpfc_prot_mask;
-extern unsigned char lpfc_prot_guard;
extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
@@ -431,8 +429,8 @@ struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
#define HBA_EVENT_LINK_DOWN 3
/* functions to support SGIOv4/bsg interface */
-int lpfc_bsg_request(struct fc_bsg_job *);
-int lpfc_bsg_timeout(struct fc_bsg_job *);
+int lpfc_bsg_request(struct bsg_job *);
+int lpfc_bsg_timeout(struct bsg_job *);
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b7d54bfb1df9..236e4e51d161 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7610,7 +7610,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* reject till our FLOGI completes */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
(cmd != ELS_CMD_FLOGI)) {
- rjt_err = LSRJT_UNABLE_TPC;
+ rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index ee8022737591..5646699b0516 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -921,6 +921,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
+#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
@@ -2289,6 +2290,9 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
+#define lpfc_mbx_rd_conf_link_speed_SHIFT 16
+#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_link_speed_WORD word6
uint32_t rsvd_7;
uint32_t rsvd_8;
uint32_t word9;
@@ -2919,6 +2923,16 @@ struct lpfc_mbx_set_feature {
};
+#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
+struct lpfc_mbx_set_host_data {
+#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
+ struct mbox_header header;
+ uint32_t param_id;
+ uint32_t param_len;
+ uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+};
+
+
struct lpfc_mbx_get_sli4_parameters {
struct mbox_header header;
struct lpfc_sli4_parameters sli4_parameters;
@@ -3313,6 +3327,7 @@ struct lpfc_mqe {
struct lpfc_mbx_get_port_name get_port_name;
struct lpfc_mbx_set_feature set_feature;
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
+ struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop;
} un;
};
@@ -3981,7 +3996,8 @@ union lpfc_wqe128 {
struct gen_req64_wqe gen_req;
};
-#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G5 0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G6 0xfeaa0003
#define LPFC_FILE_TYPE_GROUP 0xf7
#define LPFC_FILE_ID_GROUP 0xa2
struct lpfc_grp_hdr {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a0428ef0e..4776fd85514f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6279,34 +6279,36 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
uint32_t old_guard;
int pagecnt = 10;
- if (lpfc_prot_mask && lpfc_prot_guard) {
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
"SCSI layer\n");
- old_mask = lpfc_prot_mask;
- old_guard = lpfc_prot_guard;
+ old_mask = phba->cfg_prot_mask;
+ old_guard = phba->cfg_prot_guard;
/* Only allow supported values */
- lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+ phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIX_TYPE0_PROTECTION |
SHOST_DIX_TYPE1_PROTECTION);
- lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
+ phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
/* DIF Type 1 protection for profiles AST1/C1 is end to end */
- if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
- lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+ if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+ phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
- if (lpfc_prot_mask && lpfc_prot_guard) {
- if ((old_mask != lpfc_prot_mask) ||
- (old_guard != lpfc_prot_guard))
+ if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
+ if ((old_mask != phba->cfg_prot_mask) ||
+ (old_guard != phba->cfg_prot_guard))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1475 Registering BlockGuard with the "
"SCSI layer: mask %d guard %d\n",
- lpfc_prot_mask, lpfc_prot_guard);
+ phba->cfg_prot_mask,
+ phba->cfg_prot_guard);
- scsi_host_set_prot(shost, lpfc_prot_mask);
- scsi_host_set_guard(shost, lpfc_prot_guard);
+ scsi_host_set_prot(shost, phba->cfg_prot_mask);
+ scsi_host_set_guard(shost, phba->cfg_prot_guard);
} else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1479 Not Registering BlockGuard with the SCSI "
@@ -6929,6 +6931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_mbx_get_func_cfg *get_func_cfg;
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
+ uint16_t forced_link_speed;
+ uint32_t if_type;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7022,6 +7026,58 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
if (rc)
goto read_cfg_out;
+ /* Update link speed if forced link speed is supported */
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ forced_link_speed =
+ bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
+ if (forced_link_speed) {
+ phba->hba_flag |= HBA_FORCED_LINK_SPEED;
+
+ switch (forced_link_speed) {
+ case LINK_SPEED_1G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_1G;
+ break;
+ case LINK_SPEED_2G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_2G;
+ break;
+ case LINK_SPEED_4G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_4G;
+ break;
+ case LINK_SPEED_8G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_8G;
+ break;
+ case LINK_SPEED_10G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_10G;
+ break;
+ case LINK_SPEED_16G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_16G;
+ break;
+ case LINK_SPEED_32G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_32G;
+ break;
+ case 0xffff:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0047 Unrecognized link "
+ "speed : %d\n",
+ forced_link_speed);
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_AUTO;
+ }
+ }
+ }
+
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
length = phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
@@ -7256,6 +7312,7 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
+ uint32_t wqesize;
int idx;
/*
@@ -7340,15 +7397,10 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq[idx] = qdesc;
/* Create Fast Path FCP WQs */
- if (phba->fcp_embed_io) {
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_WQE128_SIZE,
- LPFC_WQE128_DEF_COUNT);
- } else {
- qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
- }
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP "
@@ -10260,6 +10312,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
int i, rc = 0;
struct lpfc_dmabuf *dmabuf, *next;
uint32_t offset = 0, temp_offset = 0;
+ uint32_t magic_number, ftype, fid, fsize;
/* It can be null in no-wait mode, sanity check */
if (!fw) {
@@ -10268,18 +10321,19 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
}
image = (struct lpfc_grp_hdr *)fw->data;
+ magic_number = be32_to_cpu(image->magic_number);
+ ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
+ fid = bf_get_be32(lpfc_grp_hdr_id, image),
+ fsize = be32_to_cpu(image->size);
+
INIT_LIST_HEAD(&dma_buffer_list);
- if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
- (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
- LPFC_FILE_TYPE_GROUP) ||
- (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
- (be32_to_cpu(image->size) != fw->size)) {
+ if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
+ magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
+ ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3022 Invalid FW image found. "
- "Magic:%x Type:%x ID:%x\n",
- be32_to_cpu(image->magic_number),
- bf_get_be32(lpfc_grp_hdr_file_type, image),
- bf_get_be32(lpfc_grp_hdr_id, image));
+ "Magic:%x Type:%x ID:%x Size %d %zd\n",
+ magic_number, ftype, fid, fsize, fw->size);
rc = -EINVAL;
goto release_out;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d197aa176dee..ad350d969bdc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -413,15 +413,13 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
* necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- /* Initialize virtual ptrs to dma_buf region. */
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
@@ -607,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
+ * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
* @phba: pointer to lpfc hba data structure.
* @post_sblist: pointer to the scsi buffer list.
*
@@ -736,7 +734,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
+ * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
* @phba: pointer to lpfc hba data structure.
*
* This routine walks the list of scsi buffers that have been allocated and
@@ -821,13 +819,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
* for the struct fcp_cmnd, struct fcp_rsp and the number
* of bde's necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
break;
}
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
/*
* 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -857,7 +854,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3368 Failed to allocated IOTAG for"
+ "3368 Failed to allocate IOTAG for"
" XRI:0x%x\n", lxri);
lpfc_sli4_free_xri(phba, lxri);
break;
@@ -1136,7 +1133,7 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
*
* This routine does the pci dma mapping for scatter-gather list of scsi cmnd
* field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
- * through sg elements and format the bdea. This routine also initializes all
+ * through sg elements and format the bde. This routine also initializes all
* IOCB fields which are dependent on scsi command request buffer.
*
* Return codes:
@@ -1269,13 +1266,16 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-/* Return if if error injection is detected by Initiator */
+/* Return BG_ERR_INIT if error injection is detected by Initiator */
#define BG_ERR_INIT 0x1
-/* Return if if error injection is detected by Target */
+/* Return BG_ERR_TGT if error injection is detected by Target */
#define BG_ERR_TGT 0x2
-/* Return if if swapping CSUM<-->CRC is required for error injection */
+/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP 0x10
-/* Return if disabling Guard/Ref/App checking is required for error injection */
+/**
+ * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
+ * error injection
+ **/
#define BG_ERR_CHECK 0x20
/**
@@ -4139,13 +4139,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- /* The sdev is not guaranteed to be valid post scsi_done upcall. */
- cmd->scsi_done(cmd);
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ cmd->scsi_done(cmd);
+
/*
* If there is a thread waiting for command completion
* wake up the thread.
@@ -4822,7 +4822,7 @@ wait_for_cmpl:
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
- "for abortng I/O (xri:x%x) to complete: "
+ "for aborting I/O (xri:x%x) to complete: "
"ret %#x, ID %d, LUN %llu\n",
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
@@ -4945,26 +4945,30 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
- unsigned tgt_id, uint64_t lun_id,
- uint8_t task_mgmt_cmd)
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+ unsigned int tgt_id, uint64_t lun_id,
+ uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
- struct lpfc_nodelist *pnode = rdata->pnode;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
int ret;
int status;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
+ pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->pCmd = cmnd;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
@@ -5171,7 +5175,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_LUN_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5249,7 +5253,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5328,7 +5332,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (!match)
continue;
- status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+ status = lpfc_send_taskmgmt(vport, cmnd,
i, 0, FCP_TARGET_RESET);
if (status != SUCCESS) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f4f77c5b0c83..4faa7672fc1d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -47,6 +47,7 @@
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
+#include "lpfc_version.h"
/* There are only four IOCB completion types. */
typedef enum _lpfc_iocb_type {
@@ -2678,15 +2679,16 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ /* remove from txcmpl queue list */
+ list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ return cmd_iocb;
}
- return cmd_iocb;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0317 iotag x%x is out off "
+ "0317 iotag x%x is out of "
"range: max iotag x%x wd0 x%x\n",
iotag, phba->sli.last_iotag,
*(((uint32_t *) &prspiocb->iocb) + 7));
@@ -2721,8 +2723,9 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
return cmd_iocb;
}
}
+
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0372 iotag x%x is out off range: max iotag (x%x)\n",
+ "0372 iotag x%x is out of range: max iotag (x%x)\n",
iotag, phba->sli.last_iotag);
return NULL;
}
@@ -6291,6 +6294,25 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
return 0;
}
+void
+lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ uint32_t len;
+
+ len = sizeof(struct lpfc_mbx_set_host_data) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+ LPFC_SLI4_MBX_EMBED);
+
+ mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
+ mbox->u.mqe.un.set_host_data.param_len = 8;
+ snprintf(mbox->u.mqe.un.set_host_data.data,
+ LPFC_HOST_OS_DRIVER_VERSION_SIZE,
+ "Linux %s v"LPFC_DRIVER_VERSION,
+ (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+}
+
/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
@@ -6542,6 +6564,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ lpfc_set_host_data(phba, mboxq);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "2134 Failed to set host os driver version %x",
+ rc);
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -11781,6 +11812,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ /* Put the iocb back on the txcmplq */
+ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c9bf20eb7223..50bfc43ebcb0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.0."
+#define LPFC_DRIVER_VERSION "11.2.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index a590089b9397..ccb68d12692c 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,17 +28,15 @@
/* Definitions for the core NCR5380 driver. */
-#define NCR5380_implementation_fields unsigned char *pdma_base; \
- int pdma_residual
+#define NCR5380_implementation_fields int pdma_residual
-#define NCR5380_read(reg) macscsi_read(instance, reg)
-#define NCR5380_write(reg, value) macscsi_write(instance, reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + ((reg) << 4))
+#define NCR5380_write(reg, value) out_8(hostdata->io + ((reg) << 4), value)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- macscsi_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len macscsi_dma_xfer_len
#define NCR5380_dma_recv_setup macscsi_pread
#define NCR5380_dma_send_setup macscsi_pwrite
-#define NCR5380_dma_residual(instance) (hostdata->pdma_residual)
+#define NCR5380_dma_residual macscsi_dma_residual
#define NCR5380_intr macscsi_intr
#define NCR5380_queue_command macscsi_queue_command
@@ -61,20 +59,6 @@ module_param(setup_hostid, int, 0);
static int setup_toshiba_delay = -1;
module_param(setup_toshiba_delay, int, 0);
-/*
- * NCR 5380 register access functions
- */
-
-static inline char macscsi_read(struct Scsi_Host *instance, int reg)
-{
- return in_8(instance->base + (reg << 4));
-}
-
-static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
-{
- out_8(instance->base + (reg << 4), value);
-}
-
#ifndef MODULE
static int __init mac_scsi_setup(char *str)
{
@@ -167,16 +151,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pread(struct Scsi_Host *instance,
- unsigned char *dst, int len)
+static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ unsigned char *dst, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+ unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_IO_TO_MEM(s, d, n);
@@ -189,23 +172,23 @@ static int macscsi_pread(struct Scsi_Host *instance,
return 0;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
return 0;
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
d = dst + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
@@ -270,16 +253,15 @@ __asm__ __volatile__ \
: "0"(s), "1"(d), "2"(n) \
: "d0")
-static int macscsi_pwrite(struct Scsi_Host *instance,
- unsigned char *src, int len)
+static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ unsigned char *src, int len)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char *s = src;
- unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+ unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
int n = len;
int transferred;
- while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
CP_MEM_TO_IO(s, d, n);
@@ -288,7 +270,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
hostdata->pdma_residual = len - transferred;
/* Target changed phase early? */
- if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
@@ -297,7 +279,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
/* No bus error. */
if (n == 0) {
- if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG,
+ if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT, HZ / 64) < 0)
scmd_printk(KERN_ERR, hostdata->connected,
@@ -305,25 +287,23 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
return 0;
}
- dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, transferred, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
s = src + transferred;
n = len - transferred;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
return -1;
}
-static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
+static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
cmd->SCp.this_residual < 16)
return 0;
@@ -331,6 +311,11 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
return cmd->SCp.this_residual;
}
+static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
+{
+ return hostdata->pdma_residual;
+}
+
#include "NCR5380.c"
#define DRV_MODULE_NAME "mac_scsi"
@@ -356,6 +341,7 @@ static struct scsi_host_template mac_scsi_template = {
static int __init mac_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
int host_flags = 0;
struct resource *irq, *pio_mem, *pdma_mem = NULL;
@@ -388,17 +374,18 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (!instance)
return -ENOMEM;
- instance->base = pio_mem->start;
if (irq)
instance->irq = irq->start;
else
instance->irq = NO_IRQ;
- if (pdma_mem && setup_use_pdma) {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ hostdata = shost_priv(instance);
+ hostdata->base = pio_mem->start;
+ hostdata->io = (void *)pio_mem->start;
- hostdata->pdma_base = (unsigned char *)pdma_mem->start;
- } else
+ if (pdma_mem && setup_use_pdma)
+ hostdata->pdma_io = (void *)pdma_mem->start;
+ else
host_flags |= FLAG_NO_PSEUDO_DMA;
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3aaea713bf37..fdd519c1dd57 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.811.02.00-rc1"
-#define MEGASAS_RELDATE "April 12, 2016"
+#define MEGASAS_VERSION "06.812.07.00-rc1"
+#define MEGASAS_RELDATE "August 22, 2016"
/*
* Device IDs
@@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
#define MR_MAX_MSIX_REG_ARRAY 16
#define MR_RDPQ_MODE_OFFSET 0X00800000
+#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -2118,7 +2120,6 @@ struct megasas_instance {
u32 ctrl_context_pages;
struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
- struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
u64 pd_seq_map_id;
@@ -2140,6 +2141,7 @@ struct megasas_instance {
u8 is_imr;
u8 is_rdpq;
bool dev_handle;
+ bool fw_sync_cache_support;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd4c8aa..6484c382f670 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,11 +1700,8 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done;
}
- /*
- * FW takes care of flush cache on its own for Virtual Disk.
- * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
- */
- if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
+ (!instance->fw_sync_cache_support)) {
scmd->result = DID_OK << 16;
goto out_done;
}
@@ -4840,7 +4837,7 @@ fail_alloc_cmds:
}
/*
- * megasas_setup_irqs_msix - register legacy interrupts.
+ * megasas_setup_irqs_ioapic - register legacy interrupts.
* @instance: Adapter soft state
*
* Do not enable interrupt, only setup ISRs.
@@ -4855,8 +4852,9 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
- if (request_irq(pdev->irq, instance->instancet->service_isr,
- IRQF_SHARED, "megasas", &instance->irq_context[0])) {
+ if (request_irq(pci_irq_vector(pdev, 0),
+ instance->instancet->service_isr, IRQF_SHARED,
+ "megasas", &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -4877,28 +4875,23 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
static int
megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
{
- int i, j, cpu;
+ int i, j;
struct pci_dev *pdev;
pdev = instance->pdev;
/* Try MSI-x */
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
- if (request_irq(instance->msixentry[i].vector,
+ if (request_irq(pci_irq_vector(pdev, i),
instance->instancet->service_isr, 0, "megasas",
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
- for (j = 0; j < i; j++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
- free_irq(instance->msixentry[j].vector,
- &instance->irq_context[j]);
- }
+ for (j = 0; j < i; j++)
+ free_irq(pci_irq_vector(pdev, j),
+ &instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
if (is_probe)
@@ -4906,14 +4899,6 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
else
return -1;
}
- if (smp_affinity_enable) {
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev,
- "Failed to set affinity hint"
- " for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
}
return 0;
}
@@ -4930,14 +4915,12 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- if (smp_affinity_enable)
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
- free_irq(instance->msixentry[i].vector,
+ free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
}
else
- free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ free_irq(pci_irq_vector(instance->pdev, 0),
+ &instance->irq_context[0]);
}
/**
@@ -5095,6 +5078,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
0x4000000) >> 0x1a;
if (msix_enable && !msix_disable) {
+ int irq_flags = PCI_IRQ_MSIX;
+
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
@@ -5131,15 +5116,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Don't bother allocating more MSI-X vectors than cpus */
instance->msix_vectors = min(instance->msix_vectors,
(unsigned int)num_online_cpus());
- for (i = 0; i < instance->msix_vectors; i++)
- instance->msixentry[i].entry = i;
- i = pci_enable_msix_range(instance->pdev, instance->msixentry,
- 1, instance->msix_vectors);
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ i = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors, irq_flags);
if (i > 0)
instance->msix_vectors = i;
else
instance->msix_vectors = 0;
}
+ i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (i < 0)
+ goto fail_setup_irqs;
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
@@ -5152,11 +5140,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- if (instance->msix_vectors ?
- megasas_setup_irqs_msix(instance, 1) :
- megasas_setup_irqs_ioapic(instance))
- goto fail_setup_irqs;
-
instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
GFP_KERNEL);
if (instance->ctrl_info == NULL)
@@ -5172,6 +5155,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
+ if (instance->msix_vectors ?
+ megasas_setup_irqs_msix(instance, 1) :
+ megasas_setup_irqs_ioapic(instance))
+ goto fail_init_adapter;
instance->instancet->enable_intr(instance);
@@ -5315,7 +5302,7 @@ fail_init_adapter:
megasas_destroy_irqs(instance);
fail_setup_irqs:
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
fail_ready_state:
kfree(instance->ctrl_info);
@@ -5584,7 +5571,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
/*
* Export parameters required by SCSI mid-layer
*/
- host->irq = instance->pdev->irq;
host->unique_id = instance->unique_id;
host->can_queue = instance->max_scsi_cmds;
host->this_id = instance->init_id;
@@ -5947,7 +5933,7 @@ fail_io_attach:
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
@@ -6105,7 +6091,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -6125,6 +6111,7 @@ megasas_resume(struct pci_dev *pdev)
int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ int irq_flags = PCI_IRQ_LEGACY;
instance = pci_get_drvdata(pdev);
host = instance->host;
@@ -6160,9 +6147,15 @@ megasas_resume(struct pci_dev *pdev)
goto fail_ready_state;
/* Now re-enable MSI-X */
- if (instance->msix_vectors &&
- pci_enable_msix_exact(instance->pdev, instance->msixentry,
- instance->msix_vectors))
+ if (instance->msix_vectors) {
+ irq_flags = PCI_IRQ_MSIX;
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ }
+ rval = pci_alloc_irq_vectors(instance->pdev, 1,
+ instance->msix_vectors ?
+ instance->msix_vectors : 1, irq_flags);
+ if (rval < 0)
goto fail_reenable_msix;
if (instance->ctrl_context) {
@@ -6245,6 +6238,34 @@ fail_reenable_msix:
#define megasas_resume NULL
#endif
+static inline int
+megasas_wait_for_adapter_operational(struct megasas_instance *instance)
+{
+ int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
+ int i;
+
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+ return 1;
+
+ for (i = 0; i < wait_time; i++) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
+ dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
+
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
@@ -6269,9 +6290,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+skip_firing_dcmds:
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
@@ -6302,7 +6328,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
if (instance->ctrl_context) {
megasas_release_fusion(instance);
@@ -6385,13 +6411,19 @@ static void megasas_shutdown(struct pci_dev *pdev)
struct megasas_instance *instance = pci_get_drvdata(pdev);
instance->unload = 1;
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
if (instance->msix_vectors)
- pci_disable_msix(instance->pdev);
+ pci_free_irq_vectors(instance->pdev);
}
/**
@@ -6752,8 +6784,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while"
- "waiting for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
error = -ENODEV;
goto out_up;
}
@@ -6821,8 +6852,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
spin_lock_irqsave(&instance->hba_lock, flags);
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting"
- "for HBA to recover\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
return -ENODEV;
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e413113c86ac..f237d0003df3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -782,7 +782,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
*pDevHandle = MR_PdDevHandleGet(pd, map);
}
@@ -879,7 +880,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
- pd = MR_ArPdGet(arRef, physArm + 1, map);
+ physArm = physArm + 1;
+ pd = MR_ArPdGet(arRef, physArm, map);
if (pd != MR_PD_INVALID)
/* Get dev handle from Pd */
*pDevHandle = MR_PdDevHandleGet(pd, map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 52d8bbf7feb5..24778ba4b6e8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -748,6 +748,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
+ instance->fw_sync_cache_support = (scratch_pad_2 &
+ MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+ dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
+ instance->fw_sync_cache_support ? "Yes" : "No");
+
IOCInitMessage =
dma_alloc_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -2000,6 +2005,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
pRAID_Context->regLockFlags |=
(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ pRAID_Context->Type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
} else if (fusion->fast_path_io) {
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
pRAID_Context->configSeqNum = 0;
@@ -2035,12 +2042,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeoutValue =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type == INVADER_SERIES) {
- pRAID_Context->Type = MPI2_TYPE_CUDA;
- pRAID_Context->nseg = 0x1;
+ if (fusion->adapter_type == INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- }
+
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2463,12 +2468,15 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* Start collecting crash, if DMA bit is done */
if ((fw_state == MFI_STATE_FAULT) && dma_state)
schedule_work(&instance->crash_init);
- else if (fw_state == MFI_STATE_FAULT)
- schedule_work(&instance->work_init);
+ else if (fw_state == MFI_STATE_FAULT) {
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
+ }
} else if (fw_state == MFI_STATE_FAULT) {
dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
"for scsi%d\n", instance->host->host_no);
- schedule_work(&instance->work_init);
+ if (instance->unload == 0)
+ schedule_work(&instance->work_init);
}
}
@@ -2823,6 +2831,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
"will reset adapter scsi%d.\n",
instance->host->host_no);
+ *convert = 1;
retval = 1;
}
out:
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 95356a82ee99..fa61baf7c74d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -478,6 +478,13 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2)
#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3)
+#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA)
+#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB)
+#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC)
+#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
+#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
+#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
+
/*Manufacturing Page 0 */
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a1a5ceb42ce6..f00ef88a378a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -849,7 +849,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
ack_request->EventContext = mpi_reply->EventContext;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
out:
@@ -1078,7 +1078,7 @@ _base_interrupt(int irq, void *bus_id)
* new reply host index value in ReplyPostIndex Field and msix_index
* value in MSIxIndex field.
*/
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
ioc->replyPostRegisterIndex[msix_index/8]);
@@ -1959,7 +1959,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
{
struct msix_entry *entries, *a;
int r;
- int i;
+ int i, local_max_msix_vectors;
u8 try_msix = 0;
if (msix_disable == -1 || msix_disable == 0)
@@ -1979,13 +1979,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
- max_msix_vectors = 8;
+ local_max_msix_vectors = 8;
+ else
+ local_max_msix_vectors = max_msix_vectors;
- if (max_msix_vectors > 0) {
- ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ if (local_max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
ioc->reply_queue_count);
ioc->msix_vector_count = ioc->reply_queue_count;
- } else if (max_msix_vectors == 0)
+ } else if (local_max_msix_vectors == 0)
goto try_ioapic;
if (ioc->msix_vector_count < ioc->cpu_count)
@@ -2050,7 +2052,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->msix96_vector) {
+ if (ioc->combined_reply_queue) {
kfree(ioc->replyPostRegisterIndex);
ioc->replyPostRegisterIndex = NULL;
}
@@ -2160,7 +2162,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
- if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
+ if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
/* Determine the Supplemental Reply Post Host Index Registers
* Addresse. Supplemental Reply Post Host Index Registers
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -2168,7 +2170,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
*/
ioc->replyPostRegisterIndex = kcalloc(
- MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
+ ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
dfailprintk(ioc, printk(MPT3SAS_FMT
@@ -2178,14 +2180,14 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
+ for (i = 0; i < ioc->combined_reply_index_count; i++) {
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
((u8 *)&ioc->chip->Doorbell +
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
} else
- ioc->msix96_vector = 0;
+ ioc->combined_reply_queue = 0;
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -2462,15 +2464,15 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
#endif
/**
- * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * _base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+static void
+_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2486,15 +2488,15 @@ mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
}
/**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2511,14 +2513,14 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -2535,14 +2537,14 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
*
* Return nothing.
*/
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -2557,6 +2559,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+* Atomic Request Descriptor
+* @ioc: per adapter object
+* @smid: system request message index
+* @handle: device handle, unused in this function, for function type match
+*
+* Return nothing.
+*/
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.MSIxIndex = msix_task;
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
*
@@ -4070,7 +4161,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -4170,7 +4261,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -4355,6 +4446,8 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
if ((facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
ioc->rdpq_array_capable = 1;
+ if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+ ioc->atomic_desc_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -4582,7 +4675,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
init_completion(&ioc->port_enable_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4645,7 +4738,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
}
@@ -4764,7 +4857,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -5138,7 +5231,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
/* initialize reply post host index */
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->msix96_vector)
+ if (ioc->combined_reply_queue)
writel((reply_q->msix_index & 7)<<
MPI2_RPHI_MSIX_INDEX_SHIFT,
ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
@@ -5280,9 +5373,23 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg = &_base_build_sg_ieee;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
break;
}
+ if (ioc->atomic_desc_capable) {
+ ioc->put_smid_default = &_base_put_smid_default_atomic;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
+ } else {
+ ioc->put_smid_default = &_base_put_smid_default;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+ }
+
+
/*
* These function pointers for other requests that don't
* the require IEEE scatter gather elements.
@@ -5332,6 +5439,21 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
}
+ /* allocate memory for pending OS device add list */
+ ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pend_os_device_add_sz++;
+ ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ GFP_KERNEL);
+ if (!ioc->pend_os_device_add)
+ goto out_free_resources;
+
+ ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
+ ioc->device_remove_in_progress =
+ kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
+ if (!ioc->device_remove_in_progress)
+ goto out_free_resources;
+
ioc->fwfault_debug = mpt3sas_fwfault_debug;
/* base internal command bits */
@@ -5414,6 +5536,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->scsih_cmds.reply);
@@ -5455,6 +5579,8 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post_host_index);
kfree(ioc->pd_handles);
kfree(ioc->blocking_handles);
+ kfree(ioc->device_remove_in_progress);
+ kfree(ioc->pend_os_device_add);
kfree(ioc->pfacts);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 3e71bc1b4a80..394fe1338d09 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "13.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 13
-#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_DRIVER_VERSION "14.101.00.00"
+#define MPT3SAS_MAJOR_VERSION 14
+#define MPT3SAS_MINOR_VERSION 101
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -300,8 +300,9 @@
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one.
*/
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -375,7 +376,6 @@ struct MPT3SAS_TARGET {
* per device private data
*/
#define MPT_DEVICE_FLAGS_INIT 0x01
-#define MPT_DEVICE_TLR_ON 0x02
#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
@@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE {
u8 block;
u8 tlr_snoop_check;
u8 ignore_delay_remove;
+ /* Iopriority Command Handling */
+ u8 ncq_prio_enable;
+
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
@@ -736,7 +739,10 @@ typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
void *paddr);
-
+/* To support atomic and non atomic descriptors*/
+typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 funcdep);
+typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi3_version_union {
@@ -1079,6 +1085,9 @@ struct MPT3SAS_ADAPTER {
void *pd_handles;
u16 pd_handles_sz;
+ void *pend_os_device_add;
+ u16 pend_os_device_add_sz;
+
/* config page */
u16 config_page_sz;
void *config_page;
@@ -1156,7 +1165,8 @@ struct MPT3SAS_ADAPTER {
u8 reply_queue_count;
struct list_head reply_queue_list;
- u8 msix96_vector;
+ u8 combined_reply_queue;
+ u8 combined_reply_index_count;
/* reply post register index */
resource_size_t **replyPostRegisterIndex;
@@ -1187,6 +1197,15 @@ struct MPT3SAS_ADAPTER {
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+ void *device_remove_in_progress;
+ u16 device_remove_in_progress_sz;
+ u8 is_gen35_ioc;
+ u8 atomic_desc_capable;
+ PUT_SMID_IO_FP_HIP put_smid_scsi_io;
+ PUT_SMID_IO_FP_HIP put_smid_fast_path;
+ PUT_SMID_IO_FP_HIP put_smid_hi_priority;
+ PUT_SMID_DEFAULT put_smid_default;
+
};
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1232,13 +1251,6 @@ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
- u16 smid, u16 msix_task);
-void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
void mpt3sas_base_release_callback_handler(u8 cb_idx);
@@ -1449,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
u16 smid);
+/* NCQ Prio Handling Check */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+
#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index cebfd734fd76..dd6270125614 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -384,7 +384,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
_config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 26cdc127ac89..95f0f24bac05 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -654,6 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 wait_state_count;
+ u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
issue_reset = 0;
@@ -738,10 +739,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
data_in_sz = karg.data_in_size;
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
- mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
- if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
- le16_to_cpu(mpi_request->FunctionDependent1) >
- ioc->facts.MaxDevHandle) {
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
+
+ device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
+ if (!device_handle || (device_handle >
+ ioc->facts.MaxDevHandle)) {
ret = -EINVAL;
mpt3sas_base_free_smid(ioc, smid);
goto out;
@@ -797,14 +801,20 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
scsiio_request->SenseBufferLowAddress =
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
-
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ ioc->put_smid_scsi_io(ioc, smid, device_handle);
else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -827,11 +837,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
}
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -862,16 +880,30 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ {
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ ioc->put_smid_default(ioc, smid);
+ break;
+ }
case MPI2_FUNCTION_FW_DOWNLOAD:
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_TOOLBOX:
@@ -886,7 +918,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
}
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -905,7 +937,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
@@ -1064,7 +1096,10 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
break;
case MPI25_VERSION:
case MPI26_VERSION:
- karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->is_gen35_ioc)
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
+ else
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
break;
}
@@ -1491,7 +1526,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
cpu_to_le32(ioc->product_specific[buffer_type][i]);
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1838,7 +1873,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2105,7 +2140,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -3290,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
/*********** diagnostic trigger suppport *** END ****************************/
-
-
/*****************************************/
struct device_attribute *mpt3sas_host_attrs[] = {
@@ -3367,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+/**
+ * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * @dev - pointer to embedded device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA
+ */
+static ssize_t
+_ctl_device_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ sas_device_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+_ctl_device_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!scsih_ncq_prio_supp(sdev))
+ return -EINVAL;
+
+ sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+ return strlen(buf);
+}
+static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
+ _ctl_device_ncq_prio_enable_show,
+ _ctl_device_ncq_prio_enable_store);
+
struct device_attribute *mpt3sas_dev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq_prio_enable,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 89408356d252..f3e17a8c1b07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -143,6 +143,7 @@ struct mpt3_ioctl_pci_info {
#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1c4744e78173..b5c966e319d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -423,7 +423,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
return 0;
}
- /* we hit this becuase the given parent handle doesn't exist */
+ /* we hit this because the given parent handle doesn't exist */
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
return -ENXIO;
@@ -788,6 +788,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
list_add_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (ioc->hide_drives) {
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
+ return;
+ }
+
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
@@ -803,7 +808,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
sas_device->sas_address_parent);
_scsih_sas_device_remove(ioc, sas_device);
}
- }
+ } else
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
}
/**
@@ -1517,7 +1523,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/*
* raid transport support -
* Enabled for SLES11 and newer, in older kernels the driver will panic when
- * unloading the driver followed by a load - I beleive that the subroutine
+ * unloading the driver followed by a load - I believe that the subroutine
* raid_class_release() is not cleaning up properly.
*/
@@ -2279,7 +2285,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
msix_task = scsi_lookup->msix_io;
else
msix_task = 0;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+ ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2837,7 +2843,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
/**
@@ -2867,20 +2873,20 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
sdev_printk(KERN_WARNING, sdev,
"device_unblock failed with return(%d) for handle(0x%04x) "
"performing a block followed by an unblock\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
r = scsi_internal_device_block(sdev);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
" failed with return(%d) for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle, r);
+ r, sas_device_priv_data->sas_target->handle);
}
}
@@ -2942,7 +2948,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
@@ -2971,7 +2977,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -3138,6 +3144,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (test_bit(handle, ioc->pd_handles))
return;
+ clear_bit(handle, ioc->pend_os_device_add);
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device && sas_device->starget &&
@@ -3192,7 +3200,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ set_bit(handle, ioc->device_remove_in_progress);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3291,7 +3300,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = mpi_request_tm->DevHandle;
- mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+ ioc->put_smid_default(ioc, smid_sas_ctrl);
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3326,6 +3335,11 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
le16_to_cpu(mpi_reply->IOCStatus),
le32_to_cpu(mpi_reply->IOCLogInfo)));
+ if (le16_to_cpu(mpi_reply->IOCStatus) ==
+ MPI2_IOCSTATUS_SUCCESS) {
+ clear_bit(le16_to_cpu(mpi_reply->DevHandle),
+ ioc->device_remove_in_progress);
+ }
} else {
pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -3381,7 +3395,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3473,7 +3487,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
ack_request->EventContext = event_context;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3530,7 +3544,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = handle;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -3930,7 +3944,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
* _scsih_setup_eedp - setup MPI request for EEDP transfer
* @ioc: per adapter object
* @scmd: pointer to scsi command object
- * @mpi_request: pointer to the SCSI_IO reqest message frame
+ * @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
*
@@ -3983,6 +3997,9 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
mpi_request_3v->EEDPBlockSize =
cpu_to_le16(scmd->device->sector_size);
+
+ if (ioc->is_gen35_ioc)
+ eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
}
@@ -4036,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
+ struct request *rq = scmd->request;
+ int class;
Mpi2SCSIIORequest_t *mpi_request;
u32 mpi_control;
u16 smid;
@@ -4084,7 +4103,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
- /* device busy with task managment */
+ /* device busy with task management */
} else if (sas_target_priv_data->tm_busy ||
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -4098,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* set tags */
mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
+ /* NCQ Prio supported, make sure control indicated high priority */
+ if (sas_device_priv_data->ncq_prio_enable) {
+ class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (class == IOPRIO_CLASS_RT)
+ mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
+ }
/* Make sure Device is not raid volume.
* We do not expose raid functionality to upper layer for warpdrive.
*/
@@ -4154,12 +4178,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
MPI25_SCSIIO_IOFLAGS_FAST_PATH);
- mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ ioc->put_smid_fast_path(ioc, smid, handle);
} else
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
le16_to_cpu(mpi_request->DevHandle));
} else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
out:
@@ -4658,7 +4682,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
- mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ ioc->put_smid_scsi_io(ioc, smid,
sas_device_priv_data->sas_target->handle);
return 0;
}
@@ -5383,10 +5407,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->handle, handle);
sas_target_priv_data->handle = handle;
sas_device->handle = handle;
- if (sas_device_pg0.Flags &
+ if (le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5465,6 +5489,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
return -1;
+ set_bit(handle, ioc->pend_os_device_add);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
/* check if device is present */
@@ -5483,6 +5508,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device = mpt3sas_get_sdev_by_addr(ioc,
sas_address);
if (sas_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
sas_device_put(sas_device);
return -1;
}
@@ -5513,9 +5539,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
- if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ if (le16_to_cpu(sas_device_pg0.Flags)
+ & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ sas_device_pg0.EnclosureLevel;
memcpy(sas_device->connector_name,
sas_device_pg0.ConnectorName, 4);
sas_device->connector_name[4] = '\0';
@@ -5806,6 +5833,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_check_device(ioc, sas_address, handle,
phy_number, link_rate);
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6267,7 +6297,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -6320,7 +6350,7 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
{
sdev->no_uld_attach = no_uld_attach ? 1 : 0;
sdev_printk(KERN_INFO, sdev, "%s raid component\n",
- sdev->no_uld_attach ? "hidding" : "exposing");
+ sdev->no_uld_attach ? "hiding" : "exposing");
WARN_ON(scsi_device_reprobe(sdev));
}
@@ -7050,7 +7080,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
if (sas_device_pg0->Flags &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
- le16_to_cpu(sas_device_pg0->EnclosureLevel);
+ sas_device_pg0->EnclosureLevel;
memcpy(&sas_device->connector_name[0],
&sas_device_pg0->ConnectorName[0], 4);
} else {
@@ -7112,6 +7142,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_pg0.SASAddress =
le64_to_cpu(sas_device_pg0.SASAddress);
sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+ sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
}
@@ -7723,6 +7754,9 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
complete(&ioc->tm_cmds.done);
}
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
_scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
break;
@@ -8113,7 +8147,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->hide_ir_msg)
pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -8654,6 +8688,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3324_2:
case MPI26_MFGPAGE_DEVID_SAS3324_3:
case MPI26_MFGPAGE_DEVID_SAS3324_4:
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
return MPI26_VERSION;
}
return 0;
@@ -8722,10 +8762,29 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->hba_mpi_version_belonged = hba_mpi_version;
ioc->id = mpt3_ids++;
sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
+ switch (pdev->device) {
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
+ ioc->is_gen35_ioc = 1;
+ break;
+ default:
+ ioc->is_gen35_ioc = 0;
+ }
if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
- (ioc->hba_mpi_version_belonged == MPI26_VERSION))
- ioc->msix96_vector = 1;
+ (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
+ ioc->combined_reply_queue = 1;
+ if (ioc->is_gen35_ioc)
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
+ else
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
+ }
break;
default:
return -ENODEV;
@@ -9047,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
+/**
+ * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * @sdev: scsi device struct
+ *
+ * This is called when a user indicates they would like to enable
+ * ncq command priorities. This works only on SATA devices.
+ */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+{
+ unsigned char *buf;
+ bool ncq_prio_supp = false;
+
+ if (!scsi_device_supports_vpd(sdev))
+ return ncq_prio_supp;
+
+ buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
+ if (!buf)
+ return ncq_prio_supp;
+
+ if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
+ ncq_prio_supp = (buf[213] >> 4) & 1;
+
+ kfree(buf);
+ return ncq_prio_supp;
+}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
@@ -9128,6 +9212,19 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@ -9168,7 +9265,7 @@ scsih_init(void)
/* queuecommand callback hander */
scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
- /* task managment callback handler */
+ /* task management callback handler */
tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
/* base internal commands callback handler */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index b74faf1a69b2..7f1d5785bc30 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -392,7 +392,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
"report_manufacture - send to sas_addr(0x%016llx)\n",
ioc->name, (unsigned long long)sas_address));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1198,7 +1198,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1514,7 +1514,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)phy->identify.sas_address,
phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2032,7 +2032,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
"%s - sending smp request\n", ioc->name, __func__));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 4c57d9abce7b..7de5d8d75480 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
- if (tmp && 1 << (slot_idx % 32)) {
+ if (tmp & 1 << (slot_idx % 32)) {
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
1 << (slot_idx % 32));
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 2f2a9910e30e..ef99f62831fb 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1595,7 +1595,7 @@ static int _init_blk_request(struct osd_request *or,
}
or->request = req;
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
req->timeout = or->timeout;
req->retries = or->retries;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 5033223f6287..a2960f5d98ec 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -368,7 +368,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
return DRIVER_ERROR << 24;
blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
SRpnt->bio = NULL;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 68a5c347fae9..337982cf3d63 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1368,13 +1368,8 @@ static struct genl_multicast_group pmcraid_mcgrps[] = {
{ .name = "events", /* not really used - see ID discussion below */ },
};
-static struct genl_family pmcraid_event_family = {
- /*
- * Due to prior multicast group abuse (the code having assumed that
- * the family ID can be used as a multicast group ID) we need to
- * statically allocate a family (and thus group) ID.
- */
- .id = GENL_ID_PMCRAID,
+static struct genl_family pmcraid_event_family __ro_after_init = {
+ .module = THIS_MODULE,
.name = "pmcraid",
.version = 1,
.maxattr = PMCRAID_AEN_ATTR_MAX,
@@ -1389,7 +1384,7 @@ static struct genl_family pmcraid_event_family = {
* 0 if the pmcraid_event_family is successfully registered
* with netlink generic, non-zero otherwise
*/
-static int pmcraid_netlink_init(void)
+static int __init pmcraid_netlink_init(void)
{
int result;
@@ -3792,11 +3787,11 @@ static long pmcraid_ioctl_passthrough(
direction);
if (rc) {
pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_buffer;
+ goto out_free_cmd;
}
} else if (request_size < 0) {
rc = -EINVAL;
- goto out_free_buffer;
+ goto out_free_cmd;
}
/* If data is being written into the device, copy the data from user
@@ -3913,6 +3908,8 @@ out_handle_response:
out_free_sglist:
pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+
+out_free_cmd:
pmcraid_return_cmd(cmd);
out_free_buffer:
@@ -6023,8 +6020,10 @@ static int __init pmcraid_init(void)
error = pmcraid_netlink_init();
- if (error)
+ if (error) {
+ class_destroy(pmcraid_class);
goto out_unreg_chrdev;
+ }
error = pci_register_driver(&pmcraid_driver);
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
new file mode 100644
index 000000000000..23ca8a274586
--- /dev/null
+++ b/drivers/scsi/qedi/Kconfig
@@ -0,0 +1,10 @@
+config QEDI
+ tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
+ depends on PCI && SCSI
+ depends on QED
+ select SCSI_ISCSI_ATTRS
+ select QED_LL2
+ select QED_ISCSI
+ ---help---
+ This driver supports iSCSI offload for the QLogic FastLinQ
+ 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
new file mode 100644
index 000000000000..2b3e16b24299
--- /dev/null
+++ b/drivers/scsi/qedi/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QEDI) := qedi.o
+qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
+ qedi_dbg.o
+
+qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
new file mode 100644
index 000000000000..5ca3e8c28a3f
--- /dev/null
+++ b/drivers/scsi/qedi/qedi.h
@@ -0,0 +1,364 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_H_
+#define _QEDI_H_
+
+#define __PREVENT_QED_HSI__
+
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_host.h>
+#include <linux/uio_driver.h>
+
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedi_dbg.h"
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qedi_version.h"
+
+#define QEDI_MODULE_NAME "qedi"
+
+struct qedi_endpoint;
+
+/*
+ * PCI function probe defines
+ */
+#define QEDI_MODE_NORMAL 0
+#define QEDI_MODE_RECOVERY 1
+
+#define ISCSI_WQE_SET_PTU_INVALIDATE 1
+#define QEDI_MAX_ISCSI_TASK 4096
+#define QEDI_MAX_TASK_NUM 0x0FFF
+#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
+#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */
+#define MAX_OUSTANDING_TASKS_PER_CON 1024
+
+#define QEDI_MAX_BD_LEN 0xffff
+#define QEDI_BD_SPLIT_SZ 0x1000
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_FAST_SGE_COUNT 4
+/* MAX Length for cached SGL */
+#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
+
+#define MAX_NUM_MSIX_PF 8
+#define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus())
+
+#define QEDI_LOCAL_PORT_MIN 60000
+#define QEDI_LOCAL_PORT_MAX 61024
+#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
+#define QEDI_LOCAL_PORT_INVALID 0xffff
+#define TX_RX_RING 16
+#define RX_RING (TX_RX_RING - 1)
+#define LL2_SINGLE_BUF_SIZE 0x400
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
+#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
+
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PATH_HANDLE 0xFE0000000UL
+
+struct qedi_uio_ctrl {
+ /* meta data */
+ u32 uio_hsi_version;
+
+ /* user writes */
+ u32 host_tx_prod;
+ u32 host_rx_cons;
+ u32 host_rx_bd_cons;
+ u32 host_tx_pkt_len;
+ u32 host_rx_cons_cnt;
+
+ /* driver writes */
+ u32 hw_tx_cons;
+ u32 hw_rx_prod;
+ u32 hw_rx_bd_prod;
+ u32 hw_rx_prod_cnt;
+
+ /* other */
+ u8 mac_addr[6];
+ u8 reserve[2];
+};
+
+struct qedi_rx_bd {
+ u32 rx_pkt_index;
+ u32 rx_pkt_len;
+ u16 vlan_id;
+};
+
+#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
+#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1)
+#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1)
+#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1)
+
+#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \
+ (QEDI_MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 2 : (x) + 1)
+
+struct qedi_uio_dev {
+ struct uio_info qedi_uinfo;
+ u32 uio_dev;
+ struct list_head list;
+
+ u32 ll2_ring_size;
+ void *ll2_ring;
+
+ u32 ll2_buf_size;
+ void *ll2_buf;
+
+ void *rx_pkt;
+ void *tx_pkt;
+
+ struct qedi_ctx *qedi;
+ struct pci_dev *pdev;
+ void *uctrl;
+};
+
+/* List to maintain the skb pointers */
+struct skb_work_list {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 vlan_id;
+};
+
+/* Queue sizes in number of elements */
+#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_CQ_SIZE 2048
+#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
+#define QEDI_PROTO_CQ_PROD_IDX 0
+
+struct qedi_glbl_q_params {
+ u64 hw_p_cq; /* Completion queue PBL */
+ u64 hw_p_rq; /* Request queue PBL */
+ u64 hw_p_cmdq; /* Command queue PBL */
+};
+
+struct global_queue {
+ union iscsi_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_mem_size;
+ u32 cq_cons_idx; /* Completion queue consumer index */
+
+ void *cq_pbl;
+ dma_addr_t cq_pbl_dma;
+ u32 cq_pbl_size;
+
+};
+
+struct qedi_fastpath {
+ struct qed_sb_info *sb_info;
+ u16 sb_id;
+#define QEDI_NAME_SIZE 16
+ char name[QEDI_NAME_SIZE];
+ struct qedi_ctx *qedi;
+};
+
+/* Used to pass fastpath information needed to process CQEs */
+struct qedi_io_work {
+ struct list_head list;
+ struct iscsi_cqe_solicited cqe;
+ u16 que_idx;
+};
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base: queue base memory
+ * @cid_que: queue memory pointer
+ * @cid_q_prod_idx: produce index
+ * @cid_q_cons_idx: consumer index
+ * @cid_q_max_idx: max index. used to detect wrap around condition
+ * @cid_free_cnt: queue size
+ * @conn_cid_tbl: iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+ void *cid_que_base;
+ u32 *cid_que;
+ u32 cid_q_prod_idx;
+ u32 cid_q_cons_idx;
+ u32 cid_q_max_idx;
+ u32 cid_free_cnt;
+ struct qedi_conn **conn_cid_tbl;
+};
+
+struct qedi_portid_tbl {
+ spinlock_t lock; /* Port id lock */
+ u16 start;
+ u16 max;
+ u16 next;
+ unsigned long *table;
+};
+
+struct qedi_itt_map {
+ __le32 itt;
+ struct qedi_cmd *p_cmd;
+};
+
+/* I/O tracing entry */
+#define QEDI_IO_TRACE_SIZE 2048
+struct qedi_io_log {
+#define QEDI_IO_TRACE_REQ 0
+#define QEDI_IO_TRACE_RSP 1
+ u8 direction;
+ u16 task_id;
+ u32 cid;
+ u32 port_id; /* Remote port fabric ID */
+ int lun;
+ u8 op; /* SCSI CDB */
+ u8 lba[4];
+ unsigned int bufflen; /* SCSI buffer length */
+ unsigned int sg_count; /* Number of SG elements */
+ u8 fast_sgs; /* number of fast sgls */
+ u8 slow_sgs; /* number of slow sgls */
+ u8 cached_sgs; /* number of cached sgls */
+ int result; /* Result passed back to mid-layer */
+ unsigned long jiffies; /* Time stamp when I/O logged */
+ int refcount; /* Reference count for task id */
+ unsigned int blk_req_cpu; /* CPU that the task is queued on by
+ * blk layer
+ */
+ unsigned int req_cpu; /* CPU that the task is queued on */
+ unsigned int intr_cpu; /* Interrupt CPU that the task is received on */
+ unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
+ * returned to blk layer
+ */
+ bool cached_sge;
+ bool slow_sge;
+ bool fast_sge;
+};
+
+/* Number of entries in BDQ */
+#define QEDI_BDQ_NUM 256
+#define QEDI_BDQ_BUF_SIZE 256
+
+/* DMA coherent buffers for BDQ */
+struct qedi_bdq_buf {
+ void *buf_addr;
+ dma_addr_t buf_dma;
+};
+
+/* Main port level struct */
+struct qedi_ctx {
+ struct qedi_dbg_ctx dbg_ctx;
+ struct Scsi_Host *shost;
+ struct pci_dev *pdev;
+ struct qed_dev *cdev;
+ struct qed_dev_iscsi_info dev_info;
+ struct qed_int_info int_info;
+ struct qedi_glbl_q_params *p_cpuq;
+ struct global_queue **global_queues;
+ /* uio declaration */
+ struct qedi_uio_dev *udev;
+ struct list_head ll2_skb_list;
+ spinlock_t ll2_lock; /* Light L2 lock */
+ spinlock_t hba_lock; /* per port lock */
+ struct task_struct *ll2_recv_thread;
+ unsigned long flags;
+#define UIO_DEV_OPENED 1
+#define QEDI_IOTHREAD_WAKE 2
+#define QEDI_IN_RECOVERY 5
+#define QEDI_IN_OFFLINE 6
+
+ u8 mac[ETH_ALEN];
+ u32 src_ip[4];
+ u8 ip_type;
+
+ /* Physical address of above array */
+ dma_addr_t hw_p_cpuq;
+
+ struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
+ void *bdq_pbl;
+ dma_addr_t bdq_pbl_dma;
+ size_t bdq_pbl_mem_size;
+ void *bdq_pbl_list;
+ dma_addr_t bdq_pbl_list_dma;
+ u8 bdq_pbl_list_num_entries;
+ void __iomem *bdq_primary_prod;
+ void __iomem *bdq_secondary_prod;
+ u16 bdq_prod_idx;
+ u16 rq_num_entries;
+
+ u32 msix_count;
+ u32 max_sqes;
+ u8 num_queues;
+ u32 max_active_conns;
+
+ struct iscsi_cid_queue cid_que;
+ struct qedi_endpoint **ep_tbl;
+ struct qedi_portid_tbl lcl_port_tbl;
+
+ /* Rx fast path intr context */
+ struct qed_sb_info *sb_array;
+ struct qedi_fastpath *fp_array;
+ struct qed_iscsi_tid tasks;
+
+#define QEDI_LINK_DOWN 0
+#define QEDI_LINK_UP 1
+ atomic_t link_state;
+
+#define QEDI_RESERVE_TASK_ID 0
+#define MAX_ISCSI_TASK_ENTRIES 4096
+#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1)
+ unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
+ struct qedi_itt_map *itt_map;
+ u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
+ struct qed_pf_params pf_params;
+
+ struct workqueue_struct *tmf_thread;
+ struct workqueue_struct *offload_thread;
+
+ u16 ll2_mtu;
+
+ struct workqueue_struct *dpc_wq;
+
+ spinlock_t task_idx_lock; /* To protect gbl context */
+ s32 last_tidx_alloc;
+ s32 last_tidx_clear;
+
+ struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
+ spinlock_t io_trace_lock; /* prtect trace Log buf */
+ u16 io_trace_idx;
+ unsigned int intr_cpu;
+ u32 cached_sgls;
+ bool use_cached_sge;
+ u32 slow_sgls;
+ bool use_slow_sge;
+ u32 fast_sgls;
+ bool use_fast_sge;
+
+ atomic_t num_offloads;
+};
+
+struct qedi_work {
+ struct list_head list;
+ struct qedi_ctx *qedi;
+ union iscsi_cqe cqe;
+ u16 que_idx;
+ bool is_solicited;
+};
+
+struct qedi_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t p_work_lock; /* Per cpu worker lock */
+};
+
+static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
+{
+ return (info->blocks[tid / info->num_tids_per_block] +
+ (tid % info->num_tids_per_block) * info->size);
+}
+
+#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+
+#endif /* _QEDI_H_ */
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
new file mode 100644
index 000000000000..2bdedb9c39bc
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -0,0 +1,143 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi_dbg.h"
+#include <linux/vmalloc.h>
+
+void
+qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_WARN))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_notice("[%s]:[%s:%d]:%d: %pV",
+ dev_name(&qedi->pdev->dev), nfunc, line,
+ qedi->host_no, &vaf);
+ else
+ pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 level, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & level))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+int
+qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ int ret = 0;
+
+ for (; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ pr_err("Unable to create sysfs %s attr, err(%d).\n",
+ iter->name, ret);
+ }
+ return ret;
+}
+
+void
+qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ for (; iter->name; iter++)
+ sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
+}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
new file mode 100644
index 000000000000..c55572badfb0
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -0,0 +1,144 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_DBG_H_
+#define _QEDI_DBG_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <linux/fs.h>
+
+#define __PREVENT_QED_HSI__
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_if.h>
+
+extern uint qedi_dbg_log;
+
+/* Debug print level definitions */
+#define QEDI_LOG_DEFAULT 0x1 /* Set default logging mask */
+#define QEDI_LOG_INFO 0x2 /* Informational logs,
+ * MAC address, WWPN, WWNN
+ */
+#define QEDI_LOG_DISC 0x4 /* Init, discovery, rport */
+#define QEDI_LOG_LL2 0x8 /* LL2, VLAN logs */
+#define QEDI_LOG_CONN 0x10 /* Connection setup, cleanup */
+#define QEDI_LOG_EVT 0x20 /* Events, link, mtu */
+#define QEDI_LOG_TIMER 0x40 /* Timer events */
+#define QEDI_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
+#define QEDI_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
+#define QEDI_LOG_UNSOL 0x200 /* unsolicited event logs */
+#define QEDI_LOG_IO 0x400 /* scsi cmd, completion */
+#define QEDI_LOG_MQ 0x800 /* Multi Queue logs */
+#define QEDI_LOG_BSG 0x1000 /* BSG logs */
+#define QEDI_LOG_DEBUGFS 0x2000 /* debugFS logs */
+#define QEDI_LOG_LPORT 0x4000 /* lport logs */
+#define QEDI_LOG_ELS 0x8000 /* ELS logs */
+#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
+#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
+#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
+ * free
+ */
+#define QEDI_TRACK_TID 0x100000 /* Track TID state. To be
+ * enabled only at module load
+ * and not run-time.
+ */
+#define QEDI_TRACK_CMD_LIST 0x300000 /* Track active cmd list nodes,
+ * done with reference to TID,
+ * hence TRACK_TID also enabled.
+ */
+#define QEDI_LOG_NOTICE 0x40000000 /* Notice logs */
+#define QEDI_LOG_WARN 0x80000000 /* Warning logs */
+
+/* Debug context structure */
+struct qedi_dbg_ctx {
+ unsigned int host_no;
+ struct pci_dev *pdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *bdf_dentry;
+#endif
+};
+
+#define QEDI_ERR(pdev, fmt, ...) \
+ qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_WARN(pdev, fmt, ...) \
+ qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_NOTICE(pdev, fmt, ...) \
+ qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_INFO(pdev, level, fmt, ...) \
+ qedi_dbg_info(pdev, __func__, __LINE__, level, fmt, \
+ ## __VA_ARGS__)
+
+void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 info, const char *fmt, ...);
+
+struct Scsi_Host;
+
+struct sysfs_bin_attrs {
+ char *name;
+ struct bin_attribute *attr;
+};
+
+int qedi_create_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+
+#ifdef CONFIG_DEBUG_FS
+/* DebugFS related code */
+struct qedi_list_of_funcs {
+ char *oper_str;
+ ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi);
+};
+
+struct qedi_debugfs_ops {
+ char *name;
+ struct qedi_list_of_funcs *qedi_funcs;
+};
+
+#define qedi_dbg_fileops(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = drv##_dbg_##ops##_cmd_read, \
+ .write = drv##_dbg_##ops##_cmd_write \
+}
+
+/* Used for debugfs sequential files */
+#define qedi_dbg_fileops_seq(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = drv##_dbg_##ops##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops);
+void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi);
+void qedi_dbg_init(char *drv_name);
+void qedi_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QEDI_DBG_H_ */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
new file mode 100644
index 000000000000..955936274241
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -0,0 +1,244 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_dbg.h"
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+int do_not_recover;
+static struct dentry *qedi_dbg_root;
+
+void
+qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops)
+{
+ char host_dirname[32];
+ struct dentry *file_dentry = NULL;
+
+ sprintf(host_dirname, "host%u", qedi->host_no);
+ qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
+ if (!qedi->bdf_dentry)
+ return;
+
+ while (dops) {
+ if (!(dops->name))
+ break;
+
+ file_dentry = debugfs_create_file(dops->name, 0600,
+ qedi->bdf_dentry, qedi,
+ fops);
+ if (!file_dentry) {
+ QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
+ "Debugfs entry %s creation failed\n",
+ dops->name);
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ return;
+ }
+ dops++;
+ fops++;
+ }
+}
+
+void
+qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi)
+{
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ qedi->bdf_dentry = NULL;
+}
+
+void
+qedi_dbg_init(char *drv_name)
+{
+ qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
+ if (!qedi_dbg_root)
+ QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
+}
+
+void
+qedi_dbg_exit(void)
+{
+ debugfs_remove_recursive(qedi_dbg_root);
+ qedi_dbg_root = NULL;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (!do_not_recover)
+ do_not_recover = 1;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (do_not_recover)
+ do_not_recover = 0;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = {
+ { "enable", qedi_dbg_do_not_recover_enable },
+ { "disable", qedi_dbg_do_not_recover_disable },
+ { NULL, NULL }
+};
+
+struct qedi_debugfs_ops qedi_debugfs_ops[] = {
+ { "gbl_ctx", NULL },
+ { "do_not_recover", qedi_dbg_do_not_recover_ops},
+ { "io_trace", NULL },
+ { NULL, NULL }
+};
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+ struct qedi_dbg_ctx *qedi_dbg =
+ (struct qedi_dbg_ctx *)filp->private_data;
+ struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops;
+
+ if (*ppos)
+ return 0;
+
+ while (lof) {
+ if (!(lof->oper_str))
+ break;
+
+ if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) {
+ cnt = lof->oper_func(qedi_dbg);
+ break;
+ }
+
+ lof++;
+ }
+ return (count - cnt);
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+
+ if (*ppos)
+ return 0;
+
+ cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static int
+qedi_gbl_ctx_show(struct seq_file *s, void *unused)
+{
+ struct qedi_fastpath *fp = NULL;
+ struct qed_sb_info *sb_info = NULL;
+ struct status_block *sb = NULL;
+ struct global_queue *que = NULL;
+ int id;
+ u16 prod_idx;
+ struct qedi_ctx *qedi = s->private;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP CQ CONTEXT:\n");
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id);
+ fp = &qedi->fp_array[id];
+ sb_info = fp->sb_info;
+ sb = sb_info->sb_virt;
+ prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
+ STATUS_BLOCK_PROD_INDEX_MASK);
+ seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
+ que = qedi->global_queues[fp->sb_id];
+ seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
+ seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id);
+ seq_puts(s, "=========== END ==================\n\n\n");
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ return 0;
+}
+
+static int
+qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_gbl_ctx_show, qedi);
+}
+
+static int
+qedi_io_trace_show(struct seq_file *s, void *unused)
+{
+ int id, idx = 0;
+ struct qedi_ctx *qedi = s->private;
+ struct qedi_io_log *io_log;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP IO LOGS:\n");
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+ idx = qedi->io_trace_idx;
+ for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) {
+ io_log = &qedi->io_trace_buf[idx];
+ seq_printf(s, "iodir-%d:", io_log->direction);
+ seq_printf(s, "tid-0x%x:", io_log->task_id);
+ seq_printf(s, "cid-0x%x:", io_log->cid);
+ seq_printf(s, "lun-%d:", io_log->lun);
+ seq_printf(s, "op-0x%02x:", io_log->op);
+ seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
+ io_log->lba[1], io_log->lba[2], io_log->lba[3]);
+ seq_printf(s, "buflen-%d:", io_log->bufflen);
+ seq_printf(s, "sgcnt-%d:", io_log->sg_count);
+ seq_printf(s, "res-0x%08x:", io_log->result);
+ seq_printf(s, "jif-%lu:", io_log->jiffies);
+ seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu);
+ seq_printf(s, "req_cpu-%d:", io_log->req_cpu);
+ seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu);
+ seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu);
+
+ idx++;
+ if (idx == QEDI_IO_TRACE_SIZE)
+ idx = 0;
+ }
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+ return 0;
+}
+
+static int
+qedi_dbg_io_trace_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_io_trace_show, qedi);
+}
+
+const struct file_operations qedi_dbg_fops[] = {
+ qedi_dbg_fileops_seq(qedi, gbl_ctx),
+ qedi_dbg_fileops(qedi, do_not_recover),
+ qedi_dbg_fileops_seq(qedi, io_trace),
+ { NULL, NULL },
+};
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
new file mode 100644
index 000000000000..b1d3904ae8fd
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -0,0 +1,2378 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/delay.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (cmd->io_tbl.sge_valid && sc) {
+ cmd->io_tbl.sge_valid = 0;
+ scsi_dma_unmap(sc);
+ }
+}
+
+static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_logout_rsp *resp_hdr;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_logout_response_hdr *cqe_logout_response;
+ struct qedi_cmd *cmd;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
+ spin_lock(&session->back_lock);
+ resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_logout_response->opcode;
+ resp_hdr->flags = cqe_logout_response->flags;
+ resp_hdr->hlength = 0;
+
+ resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
+
+ resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
+ resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_text_rsp *resp_hdr_ptr;
+ struct iscsi_text_response_hdr *cqe_text_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr_ptr->opcode = cqe_text_response->opcode;
+ resp_hdr_ptr->flags = cqe_text_response->flags;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->ttt = cqe_text_response->ttt;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
+
+ pld_len = cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_tmf_resp_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_cls_session *cls_sess;
+ int rval = 0;
+
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+
+ iscsi_block_session(session->cls_session);
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+ if (rval) {
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ iscsi_unblock_session(session->cls_session);
+ return;
+ }
+
+ iscsi_unblock_session(session->cls_session);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ spin_unlock(&session->back_lock);
+ kfree(resp_hdr_ptr);
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tmf_response_hdr *cqe_tmp_response;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 *tmp;
+
+ cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
+
+ qedi_cmd = task->dd_data;
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ if (!qedi_cmd->tmf_resp_buf) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate resp buf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
+
+ /* Fill up the header */
+ resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
+ resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
+ resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_tmp_response->hdr_second_dword &
+ ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
+
+ tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
+ if (likely(qedi_cmd->io_cmd_in_list)) {
+ qedi_cmd->io_cmd_in_list = false;
+ list_del_init(&qedi_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+ goto unblock_sess;
+ }
+
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ kfree(resp_hdr_ptr);
+
+unblock_sess:
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_login_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_login_rsp *resp_hdr_ptr;
+ struct iscsi_login_response_hdr *cqe_login_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+
+ cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
+ resp_hdr_ptr->opcode = cqe_login_response->opcode;
+ resp_hdr_ptr->flags = cqe_login_response->flags_attr;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->tsih = cqe_login_response->tsih;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
+ resp_hdr_ptr->status_class = cqe_login_response->status_class;
+ resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
+ pld_len = cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+
+ spin_unlock(&session->back_lock);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+}
+
+static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ char *ptr, int len)
+{
+ u16 idx = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
+ len, qedi->bdq_prod_idx,
+ (qedi->bdq_prod_idx % qedi->rq_num_entries));
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
+ cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
+ switch (cqe->unsol_cqe_type) {
+ case ISCSI_CQE_UNSOLICITED_SINGLE:
+ case ISCSI_CQE_UNSOLICITED_FIRST:
+ if (len)
+ memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
+ break;
+ case ISCSI_CQE_UNSOLICITED_MIDDLE:
+ case ISCSI_CQE_UNSOLICITED_LAST:
+ break;
+ default:
+ break;
+ }
+}
+
+static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ int count)
+{
+ u16 tmp;
+ u16 idx = 0;
+ struct scsi_bd *pbl;
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
+ pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
+ pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, idx);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+
+ /* Increment producer to let f/w know we've handled the frame */
+ qedi->bdq_prod_idx += count;
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+}
+
+static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ u32 pdu_len, u32 num_bdqs,
+ char *bdq_data)
+{
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "num_bdqs [%d]\n", num_bdqs);
+
+ qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
+ qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
+}
+
+static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn, u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_nop_in_hdr *cqe_nop_in;
+ struct iscsi_nopin *hdr;
+ struct qedi_cmd *cmd;
+ int tgt_async_nop = 0;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
+
+ pdu_len = cqe_nop_in->hdr_second_dword &
+ ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_nop_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
+ hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ hdr->itt = RESERVED_ITT;
+ tgt_async_nop = 1;
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ goto done;
+ }
+
+ /* Response to one of our nop-outs */
+ if (task) {
+ cmd = task->dd_data;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ lun[0] = 0xffffffff;
+ lun[1] = 0xffffffff;
+ memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ }
+
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+ return tgt_async_nop;
+}
+
+static void qedi_process_async_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_async_msg_hdr *cqe_async_msg;
+ struct iscsi_async *resp_hdr;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+
+ cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
+ pdu_len = cqe_async_msg->hdr_second_dword &
+ ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+
+ resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_async_msg->opcode;
+ resp_hdr->flags = 0x80;
+
+ lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
+ lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
+ memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
+ resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
+
+ resp_hdr->async_event = cqe_async_msg->async_event;
+ resp_hdr->async_vcode = cqe_async_msg->async_vcode;
+
+ resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
+ resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
+ resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
+ pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ uint16_t que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_reject_hdr *cqe_reject;
+ struct iscsi_reject *hdr;
+ u32 pld_len, num_bdqs;
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
+ pld_len = cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pld_len, num_bdqs, conn->data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_reject->opcode;
+ hdr->reason = cqe_reject->hdr_reason;
+ hdr->flags = cqe_reject->hdr_flags;
+ hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
+ hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
+ hdr->ffffffff = cpu_to_be32(0xffffffff);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, pld_len);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_scsi_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct scsi_cmnd *sc_cmd;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_scsi_rsp *hdr;
+ struct iscsi_data_in_hdr *cqe_data_in;
+ int datalen = 0;
+ struct qedi_conn *qedi_conn;
+ u32 iscsi_cid;
+ bool mark_cmd_node_deleted = false;
+ u8 cqe_err_bits = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ spin_lock_bh(&session->back_lock);
+ /* get the scsi command */
+ sc_cmd = cmd->scsi_cmd;
+
+ if (!sc_cmd) {
+ QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
+ goto error;
+ }
+
+ if (!sc_cmd->SCp.ptr) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "SCp.ptr is NULL, returned in another context.\n");
+ goto error;
+ }
+
+ if (!sc_cmd->request) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "sc_cmd->request is NULL, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->special) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->special is NULL so request not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->q) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->q is NULL so request is not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ qedi_iscsi_unmap_sg_list(cmd);
+
+ hdr = (struct iscsi_scsi_rsp *)task->hdr;
+ hdr->opcode = cqe_data_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
+ hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ hdr->response = cqe_data_in->reserved1;
+ hdr->cmd_status = cqe_data_in->status_rsvd;
+ hdr->flags = cqe_data_in->flags;
+ hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
+
+ if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
+ datalen = cqe_data_in->reserved2 &
+ ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
+ memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
+ }
+
+ /* If f/w reports data underrun err then set residual to IO transfer
+ * length, set Underrun flag and clear Overrun flag explicitly
+ */
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
+ hdr->itt, cqe_data_in->flags, cmd->task_id,
+ qedi_conn->iscsi_conn_id, hdr->residual_count,
+ scsi_bufflen(sc_cmd));
+ hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
+ hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+ hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
+ }
+
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ mark_cmd_node_deleted = true;
+ }
+ spin_unlock(&qedi_conn->list_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
+
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, datalen);
+error:
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_mtask_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *conn, uint16_t que_idx)
+{
+ struct iscsi_conn *iscsi_conn;
+ u32 hdr_opcode;
+
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ iscsi_conn = conn->cls_conn->dd_data;
+
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_SCSI_RESPONSE:
+ case ISCSI_OPCODE_DATA_IN:
+ qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
+ break;
+ case ISCSI_OPCODE_LOGIN_RESPONSE:
+ qedi_process_login_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TMF_RESPONSE:
+ qedi_process_tmf_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TEXT_RESPONSE:
+ qedi_process_text_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_LOGOUT_RESPONSE:
+ qedi_process_logout_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
+ }
+}
+
+static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
+ "itid=0x%x, cmd task id=0x%x\n",
+ cqe->itid, cmd->task_id);
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ spin_lock_bh(&session->back_lock);
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct qedi_work_map *work, *work_tmp;
+ u32 proto_itt = cqe->itid;
+ u32 ptmp_itt = 0;
+ itt_t protoitt = 0;
+ int found = 0;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 rtid = 0;
+ u32 iscsi_cid;
+ struct qedi_conn *qedi_conn;
+ struct qedi_cmd *cmd_new, *dbg_cmd;
+ struct iscsi_task *mtask;
+ struct iscsi_tm *tmf_hdr = NULL;
+
+ iscsi_cid = cqe->conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ /* Based on this itt get the corresponding qedi_cmd */
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
+ list) {
+ if (work->rtid == proto_itt) {
+ /* We found the command */
+ qedi_cmd = work->qedi_cmd;
+ if (!qedi_cmd->list_tmf_work) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
+ proto_itt, qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+ found = 1;
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ rtid = work->rtid;
+
+ list_del_init(&work->list);
+ kfree(work);
+ qedi_cmd->list_tmf_work = NULL;
+ }
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ if (found) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+ proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ spin_lock_bh(&conn->session->back_lock);
+
+ protoitt = build_itt(get_itt(tmf_hdr->rtt),
+ conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+
+ spin_unlock_bh(&conn->session->back_lock);
+
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt),
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ dbg_cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(task->itt),
+ dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
+ qedi_cmd->state = CLEANUP_RECV;
+
+ qedi_clear_task_idx(qedi_conn->qedi, rtid);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+ wake_up_interruptible(&qedi_conn->wait_queue);
+ }
+ } else if (qedi_conn->cmd_cleanup_req > 0) {
+ spin_lock_bh(&conn->session->back_lock);
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ spin_unlock_bh(&conn->session->back_lock);
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "task is null, itid=0x%x, cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ return;
+ }
+ qedi_conn->cmd_cleanup_cmpl++;
+ wake_up(&qedi_conn->wait_queue);
+ cmd_new = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
+
+ } else {
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
+ protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+ WARN_ON(1);
+ }
+}
+
+void qedi_fp_process_cqes(struct qedi_work *work)
+{
+ struct qedi_ctx *qedi = work->qedi;
+ union iscsi_cqe *cqe = &work->cqe;
+ struct iscsi_task *task = NULL;
+ struct iscsi_nopout *nopout_hdr;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 comp_type;
+ u32 iscsi_cid;
+ u32 hdr_opcode;
+ u16 que_idx = work->que_idx;
+ u8 cqe_err_bits = 0;
+
+ comp_type = cqe->cqe_common.cqe_type;
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
+ cqe->cqe_common.conn_id, comp_type, hdr_opcode);
+
+ if (comp_type >= MAX_ISCSI_CQES_TYPE) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
+ return;
+ }
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return;
+ }
+
+ conn = q_conn->cls_conn->dd_data;
+
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits,
+ CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
+ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+ return;
+ }
+
+ switch (comp_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
+ task = qedi_cmd->task;
+ if (!task) {
+ QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
+ return;
+ }
+
+ /* Process NOPIN local completion */
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ if ((nopout_hdr->itt == RESERVED_ITT) &&
+ (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
+ qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
+ task, q_conn);
+ } else {
+ cqe->cqe_solicited.itid =
+ qedi_get_itt(cqe->cqe_solicited);
+ /* Process other solicited responses */
+ qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
+ }
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_ASYNC_MSG:
+ qedi_process_async_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_REJECT:
+ qedi_process_reject_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ }
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_DUMMY:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
+ qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
+ conn);
+ goto exit_fp_process;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
+ break;
+ }
+
+exit_fp_process:
+ return;
+}
+
+static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
+ u16 tid, uint16_t ptu_invalidate, int is_cleanup)
+{
+ struct iscsi_wqe *wqe;
+ struct iscsi_wqe_field *cont_field;
+ struct qedi_endpoint *ep;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_login_req *login_hdr;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+ ep = qedi_conn->ep;
+ wqe = &ep->sq[ep->sq_prod_idx];
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ ep->sq_prod_idx++;
+ ep->fw_sq_prod_idx++;
+ if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+ ep->sq_prod_idx = 0;
+
+ if (is_cleanup) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_TASK_CLEANUP);
+ wqe->task_id = tid;
+ return;
+ }
+
+ if (ptu_invalidate) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
+ ISCSI_WQE_SET_PTU_INVALIDATE);
+ }
+
+ cont_field = &wqe->cont_prevtid_union.cont_field;
+
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ case ISCSI_OP_TEXT:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_MIDDLE_PATH);
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ 1);
+ cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
+ break;
+ case ISCSI_OP_LOGOUT:
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_SCSI_TMFUNC:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ break;
+ default:
+ if (!sc)
+ break;
+
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ cont_field->contlen_cdbsize_field =
+ (sc->sc_data_direction == DMA_TO_DEVICE) ?
+ scsi_bufflen(sc) : 0;
+ if (cmd->use_slowpath)
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
+ else
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ (sc->sc_data_direction ==
+ DMA_TO_DEVICE) ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ break;
+ }
+
+ wqe->task_id = tid;
+ /* Make sure SQ data is coherent */
+ wmb();
+}
+
+static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_db_data dbell = { 0 };
+
+ dbell.agg_flags = 0;
+
+ dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
+ dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
+ dbell.params |=
+ DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
+
+ dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
+ writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+
+ /* Make sure fw write idx is coherent, and include both memory barriers
+ * as a failsafe as for some architectures the call is the same but on
+ * others they are two different assembly operations.
+ */
+ wmb();
+ mmiowb();
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
+ "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
+ qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
+ qedi_conn->iscsi_conn_id);
+}
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_login_req *login_hdr;
+ struct iscsi_login_req_hdr *fw_login_req = NULL;
+ struct iscsi_cached_sge_ctx *cached_sge = NULL;
+ struct iscsi_sge *single_sge = NULL;
+ struct iscsi_sge *req_sge = NULL;
+ struct iscsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
+ fw_login_req->opcode = login_hdr->opcode;
+ fw_login_req->version_min = login_hdr->min_version;
+ fw_login_req->version_max = login_hdr->max_version;
+ fw_login_req->flags_attr = login_hdr->flags;
+ fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
+ fw_login_req->isid_d = *((u32 *)login_hdr->isid);
+ fw_login_req->tsih = login_hdr->tsih;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_login_req->cid = qedi_conn->iscsi_conn_id;
+ fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ fw_login_req->exp_stat_sn = 0;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(login_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(login_hdr->dlength);
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_logout_req_hdr *fw_logout_req = NULL;
+ struct iscsi_task_context *fw_task_ctx = NULL;
+ struct iscsi_logout *logout_hdr = NULL;
+ struct qedi_cmd *qedi_cmd = NULL;
+ s16 tid = 0;
+ s16 ptu_invalidate = 0;
+
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ logout_hdr = (struct iscsi_logout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
+ fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
+ fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+ fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ fw_logout_req->cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery)
+{
+ int rval;
+ struct iscsi_task *ctask;
+ struct qedi_cmd *cmd, *cmd_tmp;
+ struct iscsi_tm *tmf_hdr;
+ unsigned int lun = 0;
+ bool lun_reset = false;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+
+ /* From recovery, task is NULL or from tmf resp valid task */
+ if (task) {
+ tmf_hdr = (struct iscsi_tm *)task->hdr;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
+ lun_reset = true;
+ lun = scsilun_to_int(&tmf_hdr->lun);
+ }
+ }
+
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
+ qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
+ in_recovery, lun_reset);
+
+ if (lun_reset)
+ spin_lock_bh(&session->back_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ ctask = cmd->task;
+ if (ctask == task)
+ continue;
+
+ if (lun_reset) {
+ if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(ctask->itt),
+ cmd->scsi_cmd, cmd->scsi_cmd->device,
+ ctask->state, cmd->state,
+ qedi_conn->iscsi_conn_id);
+ if (cmd->scsi_cmd->device->lun != lun)
+ continue;
+ }
+ }
+ qedi_conn->cmd_cleanup_req++;
+ qedi_iscsi_cleanup_task(ctask, true);
+
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
+ &cmd->io_cmd, qedi_conn->iscsi_conn_id);
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+
+ if (lun_reset)
+ spin_unlock_bh(&session->back_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cmd_cleanup_req=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->iscsi_conn_id);
+
+ rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl) ||
+ qedi_conn->ep),
+ 5 * HZ);
+ if (rval) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ return 0;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_missing);
+ qedi_ops->common->drain(qedi->cdev);
+
+ /* Enable IOs for all other sessions except current.*/
+ if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ (qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl),
+ 5 * HZ)) {
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+ return -1;
+ }
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+
+ return 0;
+}
+
+void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot proceed, ep already disconnected, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
+ qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
+
+ qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
+
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fatal error, need hard reset, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+}
+
+static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ struct qedi_cmd *qedi_cmd,
+ struct qedi_work_map *list_work)
+{
+ struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
+ int wait;
+
+ wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_cmd->state ==
+ CLEANUP_RECV) ||
+ ((qedi_cmd->type == TYPEIO) &&
+ (cmd->state ==
+ RESPONSE_RECEIVED))),
+ 5 * HZ);
+ if (!wait) {
+ qedi_cmd->state = CLEANUP_WAIT_FAILED;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ return -1;
+ }
+ return 0;
+}
+
+static void qedi_tmf_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_cls_session *cls_sess;
+ struct qedi_work_map *list_work = NULL;
+ struct iscsi_task *mtask;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ s16 rval = 0;
+ s16 tid = 0;
+
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
+ goto abort_ret;
+ }
+
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
+ qedi_conn->iscsi_conn_id);
+
+ if (do_not_recover) {
+ QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
+ do_not_recover);
+ goto abort_ret;
+ }
+
+ list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+ if (!list_work) {
+ QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+ goto abort_ret;
+ }
+
+ qedi_cmd->type = TYPEIO;
+ list_work->qedi_cmd = qedi_cmd;
+ list_work->rtid = cmd->task_id;
+ list_work->state = QEDI_WORK_SCHEDULED;
+ qedi_cmd->list_tmf_work = list_work;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
+ list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
+ tmf_hdr->flags);
+
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ qedi_iscsi_cleanup_task(ctask, false);
+
+ rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
+ list_work);
+ if (rval == -1) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "FW cleanup got escalated, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ qedi_cmd->task_id = tid;
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+abort_ret:
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ return;
+
+ldel_exit:
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ if (!qedi_cmd->list_tmf_work) {
+ list_del_init(&list_work->list);
+ qedi_cmd->list_tmf_work = NULL;
+ kfree(list_work);
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_tmf_request_hdr *fw_tmf_request;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 tid = 0, ptu_invalidate = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+
+ tid = qedi_cmd->task_id;
+ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
+ fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
+ fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+ memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+ fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
+ fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not get reference task\n");
+ return 0;
+ }
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ fw_tmf_request->rtt =
+ qedi_set_itt(cmd->task_id,
+ get_itt(tmf_hdr->rtt));
+ } else {
+ fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+ }
+
+ fw_tmf_request->opcode = tmf_hdr->opcode;
+ fw_tmf_request->function = tmf_hdr->flags;
+ fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
+ fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
+ tid, mtask->itt, qedi_conn->iscsi_conn_id);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ s16 tid = 0;
+
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ qedi_cmd->task = mtask;
+
+ /* If abort task then schedule the work and return */
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ qedi_cmd->state = CLEANUP_WAIT;
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+
+ } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+ qedi_cmd->task_id = tid;
+
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_text_request_hdr *fw_text_request;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_text *text_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ text_hdr = (struct iscsi_text *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_text_request =
+ &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
+ fw_text_request->opcode = text_hdr->opcode;
+ fw_text_request->flags_attr = text_hdr->flags;
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_text_request->ttt = text_hdr->ttt;
+ fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+ fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+ fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(text_hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_nop_out_hdr *fw_nop_out;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_nopout *nopout_hdr;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+ return -ENOMEM;
+ }
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+
+ memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+ fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
+ fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+
+ if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
+ fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
+ fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+ fw_task_ctx->ystorm_st_context.state.local_comp = 1;
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+ } else {
+ fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+ }
+
+ fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
+ fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+
+ while (sg_len) {
+ if (addr % QEDI_PAGE_SIZE)
+ frag_size =
+ (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
+ else
+ frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
+ (sg_len % QEDI_BD_SPLIT_SZ);
+
+ if (frag_size == 0)
+ frag_size = QEDI_BD_SPLIT_SZ;
+
+ bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
+ bd[bd_index + sg_frags].sge_len = (u16)frag_size;
+ QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
+ "split sge %d: addr=%llx, len=%x",
+ (bd_index + sg_frags), addr, frag_size);
+
+ addr += (u64)frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+}
+
+static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int bd_count = 0;
+ int sg_count;
+ int sg_len;
+ int sg_frags;
+ u64 addr, end_addr;
+ int i;
+
+ WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
+
+ sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+
+ /*
+ * New condition to send single SGE as cached-SGL.
+ * Single SGE with length less than 64K.
+ */
+ sg = scsi_sglist(sc);
+ if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+
+ bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_count].sge_addr.hi = (addr >> 32);
+ bd[bd_count].sge_len = (u16)sg_len;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
+ sg_count, addr, sg_len);
+
+ return ++bd_count;
+ }
+
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ end_addr = (addr + sg_len);
+
+ /*
+ * first sg elem in the 'list',
+ * check if end addr is page-aligned.
+ */
+ if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * last sg elem in the 'list',
+ * check if start addr is page-aligned.
+ */
+ else if ((i == (sg_count - 1)) &&
+ (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * middle sg elements in list,
+ * check if start and end addr is page-aligned
+ */
+ else if ((i != 0) && (i != (sg_count - 1)) &&
+ ((addr % QEDI_PAGE_SIZE) ||
+ (end_addr % QEDI_PAGE_SIZE)))
+ cmd->use_slowpath = true;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
+ i, sg_len);
+
+ if (sg_len > QEDI_BD_SPLIT_SZ) {
+ sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
+ } else {
+ sg_frags = 1;
+ bd[bd_count].sge_addr.lo = addr & 0xffffffff;
+ bd[bd_count].sge_addr.hi = addr >> 32;
+ bd[bd_count].sge_len = sg_len;
+ }
+ byte_count += sg_len;
+ bd_count += sg_frags;
+ }
+
+ if (byte_count != scsi_bufflen(sc))
+ QEDI_ERR(&qedi->dbg_ctx,
+ "byte_count = %d != scsi_bufflen = %d\n", byte_count,
+ scsi_bufflen(sc));
+ else
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
+ byte_count);
+
+ WARN_ON(byte_count != scsi_bufflen(sc));
+
+ return bd_count;
+}
+
+static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
+{
+ int bd_count;
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (scsi_sg_count(sc)) {
+ bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
+ if (bd_count == 0)
+ return;
+ } else {
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+
+ bd[0].sge_addr.lo = 0;
+ bd[0].sge_addr.hi = 0;
+ bd[0].sge_len = 0;
+ bd_count = 0;
+ }
+ cmd->io_tbl.sge_valid = bd_count;
+}
+
+static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
+{
+ u32 dword;
+ int lpcnt;
+ u8 *srcp;
+
+ lpcnt = sc->cmd_len / sizeof(dword);
+ srcp = (u8 *)sc->cmnd;
+ while (lpcnt--) {
+ memcpy(&dword, (const void *)srcp, 4);
+ *dstp = cpu_to_be32(dword);
+ srcp += 4;
+ dstp++;
+ }
+ if (sc->cmd_len & 0x3) {
+ dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
+ *dstp = cpu_to_be32(dword);
+ }
+}
+
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction)
+{
+ struct qedi_io_log *io_log;
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct scsi_cmnd *sc_cmd = task->sc;
+ unsigned long flags;
+ u8 op;
+
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+
+ io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
+ io_log->direction = direction;
+ io_log->task_id = tid;
+ io_log->cid = qedi_conn->iscsi_conn_id;
+ io_log->lun = sc_cmd->device->lun;
+ io_log->op = sc_cmd->cmnd[0];
+ op = sc_cmd->cmnd[0];
+ io_log->lba[0] = sc_cmd->cmnd[2];
+ io_log->lba[1] = sc_cmd->cmnd[3];
+ io_log->lba[2] = sc_cmd->cmnd[4];
+ io_log->lba[3] = sc_cmd->cmnd[5];
+ io_log->bufflen = scsi_bufflen(sc_cmd);
+ io_log->sg_count = scsi_sg_count(sc_cmd);
+ io_log->fast_sgs = qedi->fast_sgls;
+ io_log->cached_sgs = qedi->cached_sgls;
+ io_log->slow_sgs = qedi->slow_sgls;
+ io_log->cached_sge = qedi->use_cached_sge;
+ io_log->slow_sge = qedi->use_slow_sge;
+ io_log->fast_sge = qedi->use_fast_sge;
+ io_log->result = sc_cmd->result;
+ io_log->jiffies = jiffies;
+ io_log->blk_req_cpu = smp_processor_id();
+
+ if (direction == QEDI_IO_TRACE_REQ) {
+ /* For requests we only care about the submission CPU */
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = 0;
+ io_log->blk_rsp_cpu = 0;
+ } else if (direction == QEDI_IO_TRACE_RSP) {
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = qedi->intr_cpu;
+ io_log->blk_rsp_cpu = smp_processor_id();
+ }
+
+ qedi->io_trace_idx++;
+ if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
+ qedi->io_trace_idx = 0;
+
+ qedi->use_cached_sge = false;
+ qedi->use_slow_sge = false;
+ qedi->use_fast_sge = false;
+
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+}
+
+int qedi_iscsi_send_ioreq(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_phys_sgl_ctx *phys_sgl;
+ struct iscsi_virt_sgl_ctx *virt_sgl;
+ struct ystorm_iscsi_task_st_ctx *yst_cxt;
+ struct mstorm_iscsi_task_st_ctx *mst_cxt;
+ struct iscsi_sgl *sgl_struct;
+ struct iscsi_sge *single_sge;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ enum iscsi_task_type task_type;
+ struct iscsi_cmd_hdr *fw_cmd;
+ u32 lun[2];
+ u32 exp_data;
+ u16 cq_idx = smp_processor_id() % qedi->num_queues;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+ u8 num_fast_sgs;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ qedi_iscsi_map_sg_list(cmd);
+
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ if (conn->session->initial_r2t_en) {
+ exp_data = min((conn->session->imm_data_en *
+ conn->max_xmit_dlength),
+ conn->session->first_burst);
+ exp_data = min(exp_data, scsi_bufflen(sc));
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32(exp_data);
+ } else {
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ min(conn->session->first_burst, scsi_bufflen(sc));
+ }
+
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+ } else {
+ if (scsi_bufflen(sc))
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ }
+
+ fw_cmd->lun.lo = be32_to_cpu(lun[0]);
+ fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, cmd);
+ fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_cmd->expected_transfer_length = scsi_bufflen(sc);
+ fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ fw_cmd->opcode = hdr->opcode;
+ qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
+ fw_task_ctx->mstorm_st_context.sense_db.hi =
+ (u32)((u64)cmd->sense_buffer_dma >> 32);
+ fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->mstorm_st_context.task_type = task_type;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.task_type = task_type;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ num_fast_sgs = (cmd->io_tbl.sge_valid ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
+ cmd->io_tbl.sge_valid);
+
+ yst_cxt = &fw_task_ctx->ystorm_st_context;
+ mst_cxt = &fw_task_ctx->mstorm_st_context;
+ /* Tx path */
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+ /* not considering superIO or FastIO */
+ if (cmd->io_tbl.sge_valid == 1) {
+ cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
+ cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
+ cached_sge->sge.sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
+ phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ phys_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES,
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid));
+ virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
+ virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ virt_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ virt_sgl->sgl_initial_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ } else {
+ /* Rx path */
+ if (cmd->io_tbl.sge_valid == 1) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ single_sge = &mst_cxt->sgl_union.single_sge;
+ single_sge->sge_addr.lo = bd[0].sge_addr.lo;
+ single_sge->sge_addr.hi = bd[0].sge_addr.hi;
+ single_sge->sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ sgl_struct->byte_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ }
+
+ if (cmd->io_tbl.sge_valid == 1)
+ /* Singel-SGL */
+ qedi->use_cached_sge = true;
+ else {
+ if (cmd->use_slowpath)
+ qedi->use_slow_sge = true;
+ else
+ qedi->use_fast_sge = true;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+ (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
+ "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
+ "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
+ (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
+ cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
+
+ return 0;
+}
+
+int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ s16 ptu_invalidate = 0;
+
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(task->itt), task->state,
+ cmd->state, qedi_conn->iscsi_conn_id);
+
+ qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
new file mode 100644
index 000000000000..8e488de88ece
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -0,0 +1,73 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_GBL_H_
+#define _QEDI_GBL_H_
+
+#include "qedi_iscsi.h"
+
+extern uint qedi_io_tracing;
+extern int do_not_recover;
+extern struct scsi_host_template qedi_host_template;
+extern struct iscsi_transport qedi_iscsi_transport;
+extern const struct qed_iscsi_ops *qedi_ops;
+extern struct qedi_debugfs_ops qedi_debugfs_ops;
+extern const struct file_operations qedi_dbg_fops;
+extern struct device_attribute *qedi_shost_attrs[];
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol);
+int qedi_iscsi_send_ioreq(struct iscsi_task *task);
+int qedi_get_task_idx(struct qedi_ctx *qedi);
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx);
+int qedi_iscsi_cleanup_task(struct iscsi_task *task,
+ bool mark_cmd_node_deleted);
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *qedi_cmd);
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+ struct async_data *data);
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn);
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
+int qedi_recover_all_conns(struct qedi_ctx *qedi);
+void qedi_fp_process_cqes(struct qedi_work *work);
+int qedi_cleanup_all_io(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery);
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction);
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
+int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_clearsq(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+
+#endif
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
new file mode 100644
index 000000000000..8ca44c78f093
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_hsi.h
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef __QEDI_HSI__
+#define __QEDI_HSI__
+/*
+ * Add include to common target
+ */
+#include <linux/qed/common_hsi.h>
+
+/*
+ * Add include to common storage target
+ */
+#include <linux/qed/storage_common.h>
+
+/*
+ * Add include to common TCP target
+ */
+#include <linux/qed/tcp_common.h>
+
+/*
+ * Add include to common iSCSI target for both eCore and protocol driver
+ */
+#include <linux/qed/iscsi_common.h>
+
+/*
+ * iSCSI CMDQ element
+ */
+struct iscsi_cmdqe {
+ __le16 conn_id;
+ u8 invalid_command;
+ u8 cmd_hdr_type;
+ __le32 reserved1[2];
+ __le32 cmd_payload[13];
+};
+
+/*
+ * iSCSI CMD header type
+ */
+enum iscsi_cmd_hdr_type {
+ ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */,
+ ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */,
+ ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */,
+ MAX_ISCSI_CMD_HDR_TYPE
+};
+
+#endif /* __QEDI_HSI__ */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
new file mode 100644
index 000000000000..d6a205433b66
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -0,0 +1,1624 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <scsi/scsi_tcq.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+int qedi_recover_all_conns(struct qedi_ctx *qedi)
+{
+ struct qedi_conn *qedi_conn;
+ int i;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi_conn = qedi_get_conn_from_id(qedi, i);
+ if (!qedi_conn)
+ continue;
+
+ qedi_start_conn_recovery(qedi, qedi_conn);
+ }
+
+ return SUCCESS;
+}
+
+static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *shost = cmd->device->host;
+ struct qedi_ctx *qedi;
+
+ qedi = iscsi_host_priv(shost);
+
+ return qedi_recover_all_conns(qedi);
+}
+
+struct scsi_host_template qedi_host_template = {
+ .module = THIS_MODULE,
+ .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
+ .proc_name = QEDI_MODULE_NAME,
+ .queuecommand = iscsi_queuecommand,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .eh_host_reset_handler = qedi_eh_host_reset,
+ .target_alloc = iscsi_target_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = QEDI_MAX_ISCSI_TASK,
+ .this_id = -1,
+ .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
+ .max_sectors = 0xffff,
+ .cmd_per_lun = 128,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = qedi_shost_attrs,
+};
+
+static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ if (qedi_conn->gen_pdu.resp_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.resp_bd_tbl,
+ qedi_conn->gen_pdu.resp_bd_dma);
+ qedi_conn->gen_pdu.resp_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.resp_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+ }
+}
+
+static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ qedi_conn->gen_pdu.req_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.req_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_buf)
+ goto login_req_buf_failure;
+
+ qedi_conn->gen_pdu.req_buf_size = 0;
+ qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
+
+ qedi_conn->gen_pdu.resp_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.resp_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_buf)
+ goto login_resp_buf_failure;
+
+ qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
+
+ qedi_conn->gen_pdu.req_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_bd_tbl)
+ goto login_req_bd_tbl_failure;
+
+ qedi_conn->gen_pdu.resp_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.resp_bd_dma,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_bd_tbl)
+ goto login_resp_bd_tbl_failure;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
+ "Allocation successful, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return 0;
+
+login_resp_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+ iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
+ "login resource alloc failed!!\n");
+ return -ENOMEM;
+}
+
+static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ if (cmd->io_tbl.sge_tbl)
+ dma_free_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(struct iscsi_sge),
+ cmd->io_tbl.sge_tbl,
+ cmd->io_tbl.sge_tbl_dma);
+
+ if (cmd->sense_buffer)
+ dma_free_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ cmd->sense_buffer,
+ cmd->sense_buffer_dma);
+ }
+}
+
+static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
+ struct qedi_cmd *cmd)
+{
+ struct qedi_io_bdt *io = &cmd->io_tbl;
+ struct iscsi_sge *sge;
+
+ io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(*sge),
+ &io->sge_tbl_dma, GFP_KERNEL);
+ if (!io->sge_tbl) {
+ iscsi_session_printk(KERN_ERR, session,
+ "Could not allocate BD table.\n");
+ return -ENOMEM;
+ }
+
+ io->sge_valid = 0;
+ return 0;
+}
+
+static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ task->hdr = &cmd->hdr;
+ task->hdr_max = sizeof(struct iscsi_hdr);
+
+ if (qedi_alloc_sget(qedi, session, cmd))
+ goto free_sgets;
+
+ cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ &cmd->sense_buffer_dma,
+ GFP_KERNEL);
+ if (!cmd->sense_buffer)
+ goto free_sgets;
+ }
+
+ return 0;
+
+free_sgets:
+ qedi_destroy_cmd_pool(qedi, session);
+ return -ENOMEM;
+}
+
+static struct iscsi_cls_session *
+qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
+ u16 qdepth, uint32_t initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+
+ if (!ep)
+ return NULL;
+
+ qedi_ep = ep->dd_data;
+ shost = qedi_ep->qedi->shost;
+ qedi = iscsi_host_priv(shost);
+
+ if (cmds_max > qedi->max_sqes)
+ cmds_max = qedi->max_sqes;
+ else if (cmds_max < QEDI_SQ_WQES_MIN)
+ cmds_max = QEDI_SQ_WQES_MIN;
+
+ cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
+ cmds_max, 0, sizeof(struct qedi_cmd),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup session for ep=%p\n", qedi_ep);
+ return NULL;
+ }
+
+ if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup cmd pool for ep=%p\n", qedi_ep);
+ goto session_teardown;
+ }
+
+ return cls_session;
+
+session_teardown:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+
+ qedi_destroy_cmd_pool(qedi, session);
+ iscsi_session_teardown(cls_session);
+}
+
+static struct iscsi_cls_conn *
+qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_conn *qedi_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
+ cid);
+ if (!cls_conn) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
+ cid, cls_session);
+ return NULL;
+ }
+
+ conn = cls_conn->dd_data;
+ qedi_conn = conn->dd_data;
+ qedi_conn->cls_conn = cls_conn;
+ qedi_conn->qedi = qedi;
+ qedi_conn->ep = NULL;
+ qedi_conn->active_cmd_count = 0;
+ INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
+ spin_lock_init(&qedi_conn->list_lock);
+
+ if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
+ cid, cls_session);
+ goto free_conn;
+ }
+
+ return cls_conn;
+
+free_conn:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+}
+
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+ iscsi_block_session(cls_session);
+}
+
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
+{
+ iscsi_unblock_session(cls_session);
+}
+
+static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ u32 iscsi_cid = qedi_conn->iscsi_conn_id;
+
+ if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "conn bind - entry #%d not free\n",
+ iscsi_cid);
+ return -EBUSY;
+ }
+
+ qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
+ return 0;
+}
+
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
+{
+ if (!qedi->cid_que.conn_cid_tbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
+ return NULL;
+
+ } else if (iscsi_cid >= qedi->max_active_conns) {
+ QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
+ return NULL;
+ }
+ return qedi->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_endpoint *qedi_ep;
+ struct iscsi_endpoint *ep;
+
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+
+ qedi_ep = ep->dd_data;
+ if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
+ return -EINVAL;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ qedi_ep->conn = qedi_conn;
+ qedi_conn->ep = qedi_ep;
+ qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
+ qedi_conn->fw_cid = qedi_ep->fw_cid;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
+ return -EINVAL;
+
+ spin_lock_init(&qedi_conn->tmf_work_lock);
+ INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
+ init_waitqueue_head(&qedi_conn->wait_queue);
+ return 0;
+}
+
+static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct qed_iscsi_params_update *conn_info;
+ struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ conn_info->update_flag = 0;
+
+ if (conn->hdrdgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
+ if (conn->datadgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
+ if (conn->session->initial_r2t_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
+ true);
+ if (conn->session->imm_data_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
+ true);
+
+ conn_info->max_seq_size = conn->session->max_burst;
+ conn_info->max_recv_pdu_length = conn->max_recv_dlength;
+ conn_info->max_send_pdu_length = conn->max_xmit_dlength;
+ conn_info->first_seq_length = conn->session->first_burst;
+ conn_info->exp_stat_sn = conn->exp_statsn;
+
+ rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
+ conn_info);
+ if (rval) {
+ rval = -ENXIO;
+ QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
+ goto update_conn_err;
+ }
+
+ kfree(conn_info);
+ rval = 0;
+
+update_conn_err:
+ return rval;
+}
+
+static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
+{
+ u16 mss = 0;
+ u16 hdrs = TCP_HDR_LEN;
+
+ if (is_ipv6)
+ hdrs += IPV6_HDR_LEN;
+ else
+ hdrs += IPV4_HDR_LEN;
+
+ if (vlan_en)
+ hdrs += VLAN_LEN;
+
+ mss = pmtu - hdrs;
+
+ if (tcp_ts_en)
+ mss -= TCP_OPTION_LEN;
+
+ if (!mss)
+ mss = DEF_MSS;
+
+ return mss;
+}
+
+static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
+{
+ struct qedi_ctx *qedi = qedi_ep->qedi;
+ struct qed_iscsi_params_offload *conn_info;
+ int rval;
+ int i;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate memory ep=%p\n", qedi_ep);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
+ ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
+
+ conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
+ conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ conn_info->ip_version = 0;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ } else {
+ for (i = 1; i < 4; i++) {
+ conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
+ conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
+ }
+
+ conn_info->ip_version = 1;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ }
+
+ conn_info->src.port = qedi_ep->src_port;
+ conn_info->dst.port = qedi_ep->dst_port;
+
+ conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
+ conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
+ conn_info->vlan_id = qedi_ep->vlan_id;
+
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
+
+ conn_info->default_cq = (qedi_ep->fw_cid % 8);
+
+ conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
+ conn_info->dup_ack_theshold = 3;
+ conn_info->rcv_wnd = 65535;
+ conn_info->cwnd = DEF_MAX_CWND;
+
+ conn_info->ss_thresh = 65535;
+ conn_info->srtt = 300;
+ conn_info->rtt_var = 150;
+ conn_info->flow_label = 0;
+ conn_info->ka_timeout = DEF_KA_TIMEOUT;
+ conn_info->ka_interval = DEF_KA_INTERVAL;
+ conn_info->max_rt_time = DEF_MAX_RT_TIME;
+ conn_info->ttl = DEF_TTL;
+ conn_info->tos_or_tc = DEF_TOS;
+ conn_info->remote_port = qedi_ep->dst_port;
+ conn_info->local_port = qedi_ep->src_port;
+
+ conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
+ (qedi_ep->ip_type == TCP_IPV6),
+ 1, (qedi_ep->vlan_id != 0));
+
+ conn_info->rcv_wnd_scale = 4;
+ conn_info->ts_ticks_per_second = 1000;
+ conn_info->da_timeout_value = 200;
+ conn_info->ack_frequency = 2;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Default cq index [%d], mss [%d]\n",
+ conn_info->default_cq, conn_info->mss);
+
+ rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
+ if (rval)
+ QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
+ rval, qedi_ep);
+
+ kfree(conn_info);
+ return rval;
+}
+
+static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_ctx *qedi;
+ int rval;
+
+ qedi = qedi_conn->qedi;
+
+ rval = qedi_iscsi_update_conn(qedi, qedi_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_start: FW oflload conn failed.\n");
+ rval = -EINVAL;
+ goto start_err;
+ }
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_conn->abrt_conn = 0;
+
+ rval = iscsi_conn_start(cls_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "iscsi_conn_start: FW oflload conn failed!!\n");
+ }
+
+start_err:
+ return rval;
+}
+
+static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+
+ qedi_conn_free_login_resources(qedi, qedi_conn);
+ iscsi_conn_teardown(cls_conn);
+}
+
+static int qedi_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct qedi_endpoint *qedi_ep = ep->dd_data;
+ int len;
+
+ if (!qedi_ep)
+ return -ENOTCONN;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (qedi_ep->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
+ else
+ len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
+ break;
+ default:
+ return -ENOTCONN;
+ }
+
+ return len;
+}
+
+static int qedi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct qedi_ctx *qedi;
+ int len;
+
+ qedi = iscsi_host_priv(shost);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, qedi->mac, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "host%d\n", shost->host_no);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (qedi->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi->src_ip);
+ else
+ len = sprintf(buf, "%pI6\n", qedi->src_ip);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return len;
+}
+
+static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qed_iscsi_stats iscsi_stats;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+ qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
+
+ conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
+ conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
+ conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
+ conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
+ conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+ stats->custom_length = 1;
+}
+
+static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_sge *bd_tbl;
+
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
+ bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
+ qedi_conn->gen_pdu.req_buf;
+ bd_tbl->reserved0 = 0;
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
+ bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bd_tbl->reserved0 = 0;
+}
+
+static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
+{
+ struct qedi_cmd *cmd = task->dd_data;
+ struct qedi_conn *qedi_conn = cmd->conn;
+ char *buf;
+ int data_len;
+ int rc = 0;
+
+ qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ qedi_send_iscsi_login(qedi_conn, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ data_len = qedi_conn->gen_pdu.req_buf_size;
+ buf = qedi_conn->gen_pdu.req_buf;
+ if (data_len)
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ buf, data_len, 1);
+ else
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ NULL, 0, 1);
+ break;
+ case ISCSI_OP_LOGOUT:
+ rc = qedi_send_iscsi_logout(qedi_conn, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ rc = qedi_iscsi_abort_work(qedi_conn, task);
+ break;
+ case ISCSI_OP_TEXT:
+ rc = qedi_send_iscsi_text(qedi_conn, task);
+ break;
+ default:
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "unsupported op 0x%x\n", task->hdr->opcode);
+ }
+
+ return rc;
+}
+
+static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+ qedi_conn->gen_pdu.req_buf_size = task->data_count;
+
+ if (task->data_count) {
+ memcpy(qedi_conn->gen_pdu.req_buf, task->data,
+ task->data_count);
+ qedi_conn->gen_pdu.req_wr_ptr =
+ qedi_conn->gen_pdu.req_buf + task->data_count;
+ }
+
+ cmd->conn = conn->dd_data;
+ cmd->scsi_cmd = NULL;
+ return qedi_iscsi_send_generic_request(task);
+}
+
+static int qedi_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+
+ cmd->state = 0;
+ cmd->task = NULL;
+ cmd->use_slowpath = false;
+ cmd->conn = qedi_conn;
+ cmd->task = task;
+ cmd->io_cmd_in_list = false;
+ INIT_LIST_HEAD(&cmd->io_cmd);
+
+ if (!sc)
+ return qedi_mtask_xmit(conn, task);
+
+ cmd->scsi_cmd = sc;
+ return qedi_iscsi_send_ioreq(task);
+}
+
+static struct iscsi_endpoint *
+qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct qedi_ctx *qedi;
+ struct iscsi_endpoint *ep;
+ struct qedi_endpoint *qedi_ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct qed_dev *cdev = NULL;
+ struct qedi_uio_dev *udev = NULL;
+ struct iscsi_path path_req;
+ u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+ u32 iscsi_cid = QEDI_CID_RESERVED;
+ u16 len = 0;
+ char *buf = NULL;
+ int ret;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ERR_PTR(ret);
+ }
+
+ if (do_not_recover) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ qedi = iscsi_host_priv(shost);
+ cdev = qedi->cdev;
+ udev = qedi->udev;
+
+ if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
+ if (!ep) {
+ QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+ qedi_ep = ep->dd_data;
+ memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi_ep->iscsi_cid = (u32)-1;
+ qedi_ep->qedi = qedi;
+
+ if (dst_addr->sa_family == AF_INET) {
+ addr = (struct sockaddr_in *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
+ sizeof(struct in_addr));
+ qedi_ep->dst_port = ntohs(addr->sin_port);
+ qedi_ep->ip_type = TCP_IPV4;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI4, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else if (dst_addr->sa_family == AF_INET6) {
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
+ sizeof(struct in6_addr));
+ qedi_ep->dst_port = ntohs(addr6->sin6_port);
+ qedi_ep->ip_type = TCP_IPV6;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI6, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
+ }
+
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ ret = -ENXIO;
+ goto ep_conn_exit;
+ }
+
+ ret = qedi_alloc_sq(qedi, qedi_ep);
+ if (ret)
+ goto ep_conn_exit;
+
+ ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
+ &qedi_ep->fw_cid, &qedi_ep->p_doorbell);
+
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
+ ret = -ENXIO;
+ goto ep_free_sq;
+ }
+
+ iscsi_cid = qedi_ep->handle;
+ qedi_ep->iscsi_cid = iscsi_cid;
+
+ init_waitqueue_head(&qedi_ep->ofld_wait);
+ init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
+ qedi_ep->state = EP_STATE_OFLDCONN_START;
+ qedi->ep_tbl[iscsi_cid] = qedi_ep;
+
+ buf = (char *)&path_req;
+ len = sizeof(path_req);
+ memset(&path_req, 0, len);
+
+ msg_type = ISCSI_KEVENT_PATH_REQ;
+ path_req.handle = (u64)qedi_ep->iscsi_cid;
+ path_req.pmtu = qedi->ll2_mtu;
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
+ sizeof(struct in_addr));
+ path_req.ip_addr_len = 4;
+ } else {
+ memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
+ sizeof(struct in6_addr));
+ path_req.ip_addr_len = 16;
+ }
+
+ ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
+ len);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
+ iscsi_cid, ret);
+ goto ep_rel_conn;
+ }
+
+ atomic_inc(&qedi->num_offloads);
+ return ep;
+
+ep_rel_conn:
+ qedi->ep_tbl[iscsi_cid] = NULL;
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
+ ret);
+ep_free_sq:
+ qedi_free_sq(qedi, qedi_ep);
+ep_conn_exit:
+ iscsi_destroy_endpoint(ep);
+ return ERR_PTR(ret);
+}
+
+static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+
+ if (do_not_recover)
+ return 1;
+
+ qedi_ep = ep->dd_data;
+ if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ return -1;
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
+ ret = 1;
+
+ ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
+ QEDI_OFLD_WAIT_STATE(qedi_ep),
+ msecs_to_jiffies(timeout_ms));
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ ret = -1;
+
+ if (ret > 0)
+ return 1;
+ else if (!ret)
+ return 0;
+ else
+ return ret;
+}
+
+static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+{
+ struct qedi_cmd *cmd, *cmd_tmp;
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+}
+
+static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct qedi_endpoint *qedi_ep;
+ struct qedi_conn *qedi_conn = NULL;
+ struct iscsi_conn *conn = NULL;
+ struct qedi_ctx *qedi;
+ int ret = 0;
+ int wait_delay = 20 * HZ;
+ int abrt_conn = 0;
+ int count = 10;
+
+ qedi_ep = ep->dd_data;
+ qedi = qedi_ep->qedi;
+
+ flush_work(&qedi_ep->offload_work);
+
+ if (qedi_ep->conn) {
+ qedi_conn = qedi_ep->conn;
+ conn = qedi_conn->cls_conn->dd_data;
+ iscsi_suspend_queue(conn);
+ abrt_conn = qedi_conn->abrt_conn;
+
+ while (count--) {
+ if (!test_bit(QEDI_CONN_FW_CLEANUP,
+ &qedi_conn->flags)) {
+ break;
+ }
+ msleep(1000);
+ }
+
+ if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ if (do_not_recover) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Do not recover cid=0x%x\n",
+ qedi_ep->iscsi_cid);
+ goto ep_exit_recover;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
+ qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
+ qedi_cleanup_active_cmd_list(qedi_conn);
+ goto ep_release_conn;
+ }
+ }
+
+ if (do_not_recover)
+ goto ep_exit_recover;
+
+ switch (qedi_ep->state) {
+ case EP_STATE_OFLDCONN_START:
+ goto ep_release_conn;
+ case EP_STATE_OFLDCONN_FAILED:
+ break;
+ case EP_STATE_OFLDCONN_COMPL:
+ if (unlikely(!qedi_conn))
+ break;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
+ qedi_conn->active_cmd_count, abrt_conn,
+ qedi_ep->state,
+ qedi_ep->iscsi_cid,
+ qedi_ep->conn
+ );
+
+ if (!qedi_conn->active_cmd_count)
+ abrt_conn = 0;
+ else
+ abrt_conn = 1;
+
+ if (abrt_conn)
+ qedi_clearsq(qedi, qedi_conn, NULL);
+ break;
+ default:
+ break;
+ }
+
+ qedi_ep->state = EP_STATE_DISCONN_START;
+ ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+ if (ret) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "destroy_conn failed returned %d\n", ret);
+ } else {
+ ret = wait_event_interruptible_timeout(
+ qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state !=
+ EP_STATE_DISCONN_START),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
+ ret, wait_delay, qedi_ep->iscsi_cid);
+ }
+ }
+
+ep_release_conn:
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx,
+ "release_conn returned %d, cid=0x%x\n",
+ ret, qedi_ep->iscsi_cid);
+ep_exit_recover:
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
+ qedi_free_sq(qedi, qedi_ep);
+
+ if (qedi_conn)
+ qedi_conn->ep = NULL;
+
+ qedi_ep->conn = NULL;
+ qedi_ep->qedi = NULL;
+ atomic_dec(&qedi->num_offloads);
+
+ iscsi_destroy_endpoint(ep);
+}
+
+static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
+{
+ struct qed_dev *cdev = qedi->cdev;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct sk_buff *skb;
+ u32 len;
+ int rc = 0;
+
+ udev = qedi->udev;
+ if (!udev) {
+ QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
+ return -EINVAL;
+ }
+
+ uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
+ if (!uctrl) {
+ QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
+ return -EINVAL;
+ }
+
+ len = uctrl->host_tx_pkt_len;
+ if (!len) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
+ return -EINVAL;
+ }
+
+ skb_put(skb, len);
+ memcpy(skb->data, udev->tx_pkt, len);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (vlanid)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+
+ rc = qedi_ops->ll2->start_xmit(cdev, skb);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
+ rc);
+ kfree_skb(skb);
+ }
+
+ uctrl->host_tx_pkt_len = 0;
+ uctrl->hw_tx_cons++;
+
+ return rc;
+}
+
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 20 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
+static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+ u32 iscsi_cid;
+ u16 port_id = 0;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ret;
+ }
+
+ if (strcmp(shost->hostt->proc_name, "qedi")) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost %s is invalid\n",
+ shost->hostt->proc_name);
+ return ret;
+ }
+
+ qedi = iscsi_host_priv(shost);
+ if (path_data->handle == QEDI_PATH_HANDLE) {
+ ret = qedi_data_avail(qedi, path_data->vlan_id);
+ goto set_path_exit;
+ }
+
+ iscsi_cid = (u32)path_data->handle;
+ qedi_ep = qedi->ep_tbl[iscsi_cid];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+
+ if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
+ QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ ret = -EIO;
+ goto set_path_exit;
+ }
+
+ ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
+ ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
+
+ qedi_ep->vlan_id = path_data->vlan_id;
+ if (path_data->pmtu < DEF_PATH_MTU) {
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "MTU cannot be %u, using default MTU %u\n",
+ path_data->pmtu, qedi_ep->pmtu);
+ }
+
+ if (path_data->pmtu != qedi->ll2_mtu) {
+ if (path_data->pmtu > JUMBO_MTU) {
+ ret = -EINVAL;
+ QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
+ goto set_path_exit;
+ }
+
+ qedi_reset_host_mtu(qedi, path_data->pmtu);
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ }
+
+ port_id = qedi_ep->src_port;
+ if (port_id >= QEDI_LOCAL_PORT_MIN &&
+ port_id < QEDI_LOCAL_PORT_MAX) {
+ if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
+ port_id = 0;
+ } else {
+ port_id = 0;
+ }
+
+ if (!port_id) {
+ port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
+ if (port_id == QEDI_LOCAL_PORT_INVALID) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate port id for iscsi_cid=0x%x\n",
+ iscsi_cid);
+ ret = -ENOMEM;
+ goto set_path_exit;
+ }
+ }
+
+ qedi_ep->src_port = port_id;
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ qedi->ip_type = TCP_IPV4;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ qedi->ip_type = TCP_IPV6;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ }
+
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+ queue_work(qedi->offload_thread, &qedi_ep->offload_work);
+
+ ret = 0;
+
+set_path_exit:
+ return ret;
+}
+
+static umode_t qedi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ return 0444;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_BOOT_ROOT:
+ case ISCSI_PARAM_BOOT_NIC:
+ case ISCSI_PARAM_BOOT_TARGET:
+ return 0444;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void qedi_cleanup_task(struct iscsi_task *task)
+{
+ if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+ QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
+ atomic_read(&task->refcount));
+ return;
+ }
+
+ qedi_iscsi_unmap_sg_list(task->dd_data);
+}
+
+struct iscsi_transport qedi_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = QEDI_MODULE_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
+ CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
+ .create_session = qedi_session_create,
+ .destroy_session = qedi_session_destroy,
+ .create_conn = qedi_conn_create,
+ .bind_conn = qedi_conn_bind,
+ .start_conn = qedi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .destroy_conn = qedi_conn_destroy,
+ .set_param = iscsi_set_param,
+ .get_ep_param = qedi_ep_get_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = qedi_host_get_param,
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = qedi_conn_get_stats,
+ .xmit_task = qedi_task_xmit,
+ .cleanup_task = qedi_cleanup_task,
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ .ep_connect = qedi_ep_connect,
+ .ep_poll = qedi_ep_poll,
+ .ep_disconnect = qedi_ep_disconnect,
+ .set_path = qedi_set_path,
+ .attr_is_visible = qedi_attr_is_visible,
+};
+
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
+ cls_sess = iscsi_conn_to_session(cls_conn);
+
+ if (iscsi_is_session_online(cls_sess)) {
+ qedi_conn->abrt_conn = 1;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failing connection, state=0x%x, cid=0x%x\n",
+ conn->session->state, qedi_conn->iscsi_conn_id);
+ iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ }
+}
+
+static const struct {
+ enum iscsi_error_types error_code;
+ char *err_string;
+} qedi_iscsi_error[] = {
+ { ISCSI_STATUS_NONE,
+ "tcp_error none"
+ },
+ { ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+ "task cid mismatch"
+ },
+ { ISCSI_CONN_ERROR_TASK_NOT_VALID,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+ "rq ring full"
+ },
+ { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+ "cmdq ring full"
+ },
+ { ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+ "sge caching failed"
+ },
+ { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+ "hdr digest error"
+ },
+ { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+ "local cmpl error"
+ },
+ { ISCSI_CONN_ERROR_DATA_OVERRUN,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+ "out of sge error"
+ },
+ { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+ "tcp seg ip options error"
+ },
+ { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+ "tcp ip fragment error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+ "AHS len protocol error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+ "itt out of range error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+ "data seg more than pdu size"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+ "invalid opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+ "invalid opcode before update"
+ },
+ { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+ "unexpected opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+ "r2t carries no data"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+ "data sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+ "data TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+ "r2t TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+ "buffer offset error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+ "buffer offset ooo"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+ "data seg len 0"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+ "data xer len error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+ "data xer len1 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+ "data xer len2 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+ "protocol lun error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+ "f bit zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+ "exp stat sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+ "dsl not zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+ "invalid dsl"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+ "data seg len too big"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+ "outstanding r2t count error"
+ },
+ { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+ "sense datalen error"
+ },
+};
+
+char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
+{
+ int i;
+ char *msg = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
+ if (qedi_iscsi_error[i].error_code == err_code) {
+ msg = qedi_iscsi_error[i].err_string;
+ break;
+ }
+ }
+ return msg;
+}
+
+void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+ struct qedi_ctx *qedi;
+ char warn_notice[] = "iscsi_warning";
+ char error_notice[] = "iscsi_error";
+ char unknown_msg[] = "Unknown error";
+ char *message;
+ int need_recovery = 0;
+ u32 err_mask = 0;
+ char *msg;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ qedi = ep->qedi;
+
+ QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
+ data->error_code);
+
+ if (err_mask) {
+ need_recovery = 0;
+ message = warn_notice;
+ } else {
+ need_recovery = 1;
+ message = error_notice;
+ }
+
+ msg = qedi_get_iscsi_error(data->error_code);
+ if (!msg) {
+ need_recovery = 0;
+ msg = unknown_msg;
+ }
+
+ iscsi_conn_printk(KERN_ALERT,
+ qedi_conn->cls_conn->dd_data,
+ "qedi: %s - %s\n", message, msg);
+
+ if (need_recovery)
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
+ data->error_code);
+
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
new file mode 100644
index 000000000000..d3c06bbddb4e
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -0,0 +1,232 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_ISCSI_H_
+#define _QEDI_ISCSI_H_
+
+#include <linux/socket.h>
+#include <linux/completion.h>
+#include "qedi.h"
+
+#define ISCSI_MAX_SESS_PER_HBA 4096
+
+#define DEF_KA_TIMEOUT 7200000
+#define DEF_KA_INTERVAL 10000
+#define DEF_KA_MAX_PROBE_COUNT 10
+#define DEF_TOS 0
+#define DEF_TTL 0xfe
+#define DEF_SND_SEQ_SCALE 0
+#define DEF_RCV_BUF 0xffff
+#define DEF_SND_BUF 0xffff
+#define DEF_SEED 0
+#define DEF_MAX_RT_TIME 8000
+#define DEF_MAX_DA_COUNT 2
+#define DEF_SWS_TIMER 1000
+#define DEF_MAX_CWND 2
+#define DEF_PATH_MTU 1500
+#define DEF_MSS 1460
+#define DEF_LL2_MTU 1560
+#define JUMBO_MTU 9000
+
+#define MIN_MTU 576 /* rfc 793 */
+#define IPV4_HDR_LEN 20
+#define IPV6_HDR_LEN 40
+#define TCP_HDR_LEN 20
+#define TCP_OPTION_LEN 12
+#define VLAN_LEN 4
+
+enum {
+ EP_STATE_IDLE = 0x0,
+ EP_STATE_ACQRCONN_START = 0x1,
+ EP_STATE_ACQRCONN_COMPL = 0x2,
+ EP_STATE_OFLDCONN_START = 0x4,
+ EP_STATE_OFLDCONN_COMPL = 0x8,
+ EP_STATE_DISCONN_START = 0x10,
+ EP_STATE_DISCONN_COMPL = 0x20,
+ EP_STATE_CLEANUP_START = 0x40,
+ EP_STATE_CLEANUP_CMPL = 0x80,
+ EP_STATE_TCP_FIN_RCVD = 0x100,
+ EP_STATE_TCP_RST_RCVD = 0x200,
+ EP_STATE_LOGOUT_SENT = 0x400,
+ EP_STATE_LOGOUT_RESP_RCVD = 0x800,
+ EP_STATE_CLEANUP_FAILED = 0x1000,
+ EP_STATE_OFLDCONN_FAILED = 0x2000,
+ EP_STATE_CONNECT_FAILED = 0x4000,
+ EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+};
+
+struct qedi_conn;
+
+struct qedi_endpoint {
+ struct qedi_ctx *qedi;
+ u32 dst_addr[4];
+ u32 src_addr[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_id;
+ u16 pmtu;
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ u8 ip_type;
+ int state;
+ wait_queue_head_t ofld_wait;
+ wait_queue_head_t tcp_ofld_wait;
+ u32 iscsi_cid;
+ /* identifier of the connection from qed */
+ u32 handle;
+ u32 fw_cid;
+ void __iomem *p_doorbell;
+
+ /* Send queue management */
+ struct iscsi_wqe *sq;
+ dma_addr_t sq_dma;
+
+ u16 sq_prod_idx;
+ u16 fw_sq_prod_idx;
+ u16 sq_con_idx;
+ u32 sq_mem_size;
+
+ void *sq_pbl;
+ dma_addr_t sq_pbl_dma;
+ u32 sq_pbl_size;
+ struct qedi_conn *conn;
+ struct work_struct offload_work;
+};
+
+#define QEDI_SQ_WQES_MIN 16
+
+struct qedi_io_bdt {
+ struct iscsi_sge *sge_tbl;
+ dma_addr_t sge_tbl_dma;
+ u16 sge_valid;
+};
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf: driver buffer used to stage payload associated with
+ * the login request
+ * @req_dma_addr: dma address for iscsi login request payload buffer
+ * @req_buf_size: actual login request payload length
+ * @req_wr_ptr: pointer into login request buffer when next data is
+ * to be written
+ * @resp_hdr: iscsi header where iscsi login response header is to
+ * be recreated
+ * @resp_buf: buffer to stage login response payload
+ * @resp_dma_addr: login response payload buffer dma address
+ * @resp_buf_size: login response paylod length
+ * @resp_wr_ptr: pointer into login response buffer when next data is
+ * to be written
+ * @req_bd_tbl: iscsi login request payload BD table
+ * @req_bd_dma: login request BD table dma address
+ * @resp_bd_tbl: iscsi login response payload BD table
+ * @resp_bd_dma: login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ * Logout and NOP
+ */
+struct generic_pdu_resc {
+ char *req_buf;
+ dma_addr_t req_dma_addr;
+ u32 req_buf_size;
+ char *req_wr_ptr;
+ struct iscsi_hdr resp_hdr;
+ char *resp_buf;
+ dma_addr_t resp_dma_addr;
+ u32 resp_buf_size;
+ char *resp_wr_ptr;
+ char *req_bd_tbl;
+ dma_addr_t req_bd_dma;
+ char *resp_bd_tbl;
+ dma_addr_t resp_bd_dma;
+};
+
+struct qedi_conn {
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *ep;
+ struct list_head active_cmd_list;
+ spinlock_t list_lock; /* internal conn lock */
+ u32 active_cmd_count;
+ u32 cmd_cleanup_req;
+ u32 cmd_cleanup_cmpl;
+
+ u32 iscsi_conn_id;
+ int itt;
+ int abrt_conn;
+#define QEDI_CID_RESERVED 0x5AFF
+ u32 fw_cid;
+ /*
+ * Buffer for login negotiation process
+ */
+ struct generic_pdu_resc gen_pdu;
+
+ struct list_head tmf_work_list;
+ wait_queue_head_t wait_queue;
+ spinlock_t tmf_work_lock; /* tmf work lock */
+ unsigned long flags;
+#define QEDI_CONN_FW_CLEANUP 1
+};
+
+struct qedi_cmd {
+ struct list_head io_cmd;
+ bool io_cmd_in_list;
+ struct iscsi_hdr hdr;
+ struct qedi_conn *conn;
+ struct scsi_cmnd *scsi_cmd;
+ struct scatterlist *sg;
+ struct qedi_io_bdt io_tbl;
+ struct iscsi_task_context request;
+ unsigned char *sense_buffer;
+ dma_addr_t sense_buffer_dma;
+ u16 task_id;
+
+ /* field populated for tmf work queue */
+ struct iscsi_task *task;
+ struct work_struct tmf_work;
+ int state;
+#define CLEANUP_WAIT 1
+#define CLEANUP_RECV 2
+#define CLEANUP_WAIT_FAILED 3
+#define CLEANUP_NOT_REQUIRED 4
+#define LUN_RESET_RESPONSE_RECEIVED 5
+#define RESPONSE_RECEIVED 6
+
+ int type;
+#define TYPEIO 1
+#define TYPERESET 2
+
+ struct qedi_work_map *list_tmf_work;
+ /* slowpath management */
+ bool use_slowpath;
+
+ struct iscsi_tm_rsp *tmf_resp_buf;
+ struct qedi_work cqe_work;
+};
+
+struct qedi_work_map {
+ struct list_head list;
+ struct qedi_cmd *qedi_cmd;
+ int rtid;
+
+ int state;
+#define QEDI_WORK_QUEUED 1
+#define QEDI_WORK_SCHEDULED 2
+#define QEDI_WORK_EXIT 3
+
+ struct work_struct *ptr_tmf_work;
+};
+
+#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
+#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
+
+#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
+ (q)->state == EP_STATE_OFLDCONN_COMPL)
+
+#endif /* _QEDI_ISCSI_H_ */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
new file mode 100644
index 000000000000..19ead8d17e55
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -0,0 +1,2127 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <scsi/iscsi_if.h>
+#include <linux/inet.h>
+#include <net/arp.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+
+static uint qedi_fw_debug;
+module_param(qedi_fw_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
+
+uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
+module_param(qedi_dbg_log, uint, 0644);
+MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
+
+uint qedi_io_tracing;
+module_param(qedi_io_tracing, uint, 0644);
+MODULE_PARM_DESC(qedi_io_tracing,
+ " Enable logging of SCSI requests/completions into trace buffer. (default off).");
+
+const struct qed_iscsi_ops *qedi_ops;
+static struct scsi_transport_template *qedi_scsi_transport;
+static struct pci_driver qedi_pci_driver;
+static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
+static LIST_HEAD(qedi_udev_list);
+/* Static function declaration */
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
+static void qedi_free_global_queues(struct qedi_ctx *qedi);
+static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+
+static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ struct async_data *data;
+ int rval = 0;
+
+ if (!context || !fw_handle) {
+ QEDI_ERR(NULL, "Recv event with ctx NULL\n");
+ return -EINVAL;
+ }
+
+ qedi = (struct qedi_ctx *)context;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
+
+ data = (struct async_data *)fw_handle;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
+ data->cid, data->itid, data->error_code,
+ data->fw_debug_param);
+
+ qedi_ep = qedi->ep_tbl[data->cid];
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot process event, ep already disconnected, cid=0x%x\n",
+ data->cid);
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ switch (fw_event_code) {
+ case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
+ if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+ qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
+
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
+ qedi_ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
+ qedi_process_iscsi_error(qedi_ep, data);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
+ case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
+ qedi_process_tcp_error(qedi_ep, data);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
+ fw_event_code);
+ }
+
+ return rval;
+}
+
+static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (udev->uio_dev != -1)
+ return -EBUSY;
+
+ rtnl_lock();
+ udev->uio_dev = iminor(inode);
+ qedi_reset_uio_rings(udev);
+ set_bit(UIO_DEV_OPENED, &qedi->flags);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ udev->uio_dev = -1;
+ clear_bit(UIO_DEV_OPENED, &qedi->flags);
+ qedi_ll2_free_skbs(qedi);
+ return 0;
+}
+
+static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
+{
+ if (udev->ll2_ring) {
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+ }
+
+ if (udev->ll2_buf) {
+ free_pages((unsigned long)udev->ll2_buf, 2);
+ udev->ll2_buf = NULL;
+ }
+}
+
+static void __qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ uio_unregister_device(&udev->qedi_uinfo);
+
+ __qedi_free_uio_rings(udev);
+
+ pci_dev_put(udev->pdev);
+ kfree(udev->uctrl);
+ kfree(udev);
+}
+
+static void qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ if (!udev)
+ return;
+
+ list_del_init(&udev->list);
+ __qedi_free_uio(udev);
+}
+
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
+{
+ struct qedi_ctx *qedi = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+
+ qedi = udev->qedi;
+ uctrl = udev->uctrl;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ uctrl->host_rx_cons = 0;
+ uctrl->hw_rx_prod = 0;
+ uctrl->hw_rx_bd_prod = 0;
+ uctrl->host_rx_bd_cons = 0;
+
+ memset(udev->ll2_ring, 0, udev->ll2_ring_size);
+ memset(udev->ll2_buf, 0, udev->ll2_buf_size);
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
+{
+ int rc = 0;
+
+ if (udev->ll2_ring || udev->ll2_buf)
+ return rc;
+
+ /* Allocating memory for LL2 ring */
+ udev->ll2_ring_size = QEDI_PAGE_SIZE;
+ udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
+ if (!udev->ll2_ring) {
+ rc = -ENOMEM;
+ goto exit_alloc_ring;
+ }
+
+ /* Allocating memory for Tx/Rx pkt buffer */
+ udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+ udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
+ udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
+ __GFP_ZERO, 2);
+ if (!udev->ll2_buf) {
+ rc = -ENOMEM;
+ goto exit_alloc_buf;
+ }
+ return rc;
+
+exit_alloc_buf:
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+exit_alloc_ring:
+ return rc;
+}
+
+static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ int rc = 0;
+
+ list_for_each_entry(udev, &qedi_udev_list, list) {
+ if (udev->pdev == qedi->pdev) {
+ udev->qedi = qedi;
+ if (__qedi_alloc_uio_rings(udev)) {
+ udev->qedi = NULL;
+ return -ENOMEM;
+ }
+ qedi->udev = udev;
+ return 0;
+ }
+ }
+
+ udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+ if (!udev) {
+ rc = -ENOMEM;
+ goto err_udev;
+ }
+
+ uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
+ if (!uctrl) {
+ rc = -ENOMEM;
+ goto err_uctrl;
+ }
+
+ udev->uio_dev = -1;
+
+ udev->qedi = qedi;
+ udev->pdev = qedi->pdev;
+ udev->uctrl = uctrl;
+
+ rc = __qedi_alloc_uio_rings(udev);
+ if (rc)
+ goto err_uio_rings;
+
+ list_add(&udev->list, &qedi_udev_list);
+
+ pci_dev_get(udev->pdev);
+ qedi->udev = udev;
+
+ udev->tx_pkt = udev->ll2_buf;
+ udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+ return 0;
+
+ err_uio_rings:
+ kfree(uctrl);
+ err_uctrl:
+ kfree(udev);
+ err_udev:
+ return -ENOMEM;
+}
+
+static int qedi_init_uio(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = qedi->udev;
+ struct uio_info *uinfo;
+ int ret = 0;
+
+ if (!udev)
+ return -ENOMEM;
+
+ uinfo = &udev->qedi_uinfo;
+
+ uinfo->mem[0].addr = (unsigned long)udev->uctrl;
+ uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
+ uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
+ uinfo->mem[1].size = udev->ll2_ring_size;
+ uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
+ uinfo->mem[2].size = udev->ll2_buf_size;
+ uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->name = "qedi_uio";
+ uinfo->version = QEDI_MODULE_VERSION;
+ uinfo->irq = UIO_IRQ_CUSTOM;
+
+ uinfo->open = qedi_uio_open;
+ uinfo->release = qedi_uio_close;
+
+ if (udev->uio_dev == -1) {
+ if (!uinfo->priv) {
+ uinfo->priv = udev;
+
+ ret = uio_register_device(&udev->pdev->dev, uinfo);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO registration failed\n");
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int ret;
+
+ sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
+ sizeof(struct status_block), &sb_phys,
+ GFP_KERNEL);
+ if (!sb_virt) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block allocation failed for id = %d.\n",
+ sb_id);
+ return -ENOMEM;
+ }
+
+ ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
+ sb_id, QED_SB_TYPE_STORAGE);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block initialization failed for id = %d.\n",
+ sb_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qedi_free_sb(struct qedi_ctx *qedi)
+{
+ struct qed_sb_info *sb_info;
+ int id;
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ sb_info = &qedi->sb_array[id];
+ if (sb_info->sb_virt)
+ dma_free_coherent(&qedi->pdev->dev,
+ sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt,
+ sb_info->sb_phys);
+ }
+}
+
+static void qedi_free_fp(struct qedi_ctx *qedi)
+{
+ kfree(qedi->fp_array);
+ kfree(qedi->sb_array);
+}
+
+static void qedi_destroy_fp(struct qedi_ctx *qedi)
+{
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+}
+
+static int qedi_alloc_fp(struct qedi_ctx *qedi)
+{
+ int ret = 0;
+
+ qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qedi_fastpath), GFP_KERNEL);
+ if (!qedi->fp_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath fp array allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qed_sb_info), GFP_KERNEL);
+ if (!qedi->sb_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath sb array allocation failed.\n");
+ ret = -ENOMEM;
+ goto free_fp;
+ }
+
+ return ret;
+
+free_fp:
+ qedi_free_fp(qedi);
+ return ret;
+}
+
+static void qedi_int_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id;
+
+ memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->fp_array));
+ memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->sb_array));
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ fp->sb_info = &qedi->sb_array[id];
+ fp->sb_id = id;
+ fp->qedi = qedi;
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ "qedi", id);
+
+ /* fp_array[i] ---- irq cookie
+ * So init data which is needed in int ctx
+ */
+ }
+}
+
+static int qedi_prepare_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id, ret = 0;
+
+ ret = qedi_alloc_fp(qedi);
+ if (ret)
+ goto err;
+
+ qedi_int_fp(qedi);
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "SB allocation and initialization failed.\n");
+ ret = -EIO;
+ goto err_init;
+ }
+ }
+
+ return 0;
+
+err_init:
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+err:
+ return ret;
+}
+
+static int qedi_setup_cid_que(struct qedi_ctx *qedi)
+{
+ int i;
+
+ qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
+ sizeof(u32), GFP_KERNEL);
+ if (!qedi->cid_que.cid_que_base)
+ return -ENOMEM;
+
+ qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
+ sizeof(struct qedi_conn *),
+ GFP_KERNEL);
+ if (!qedi->cid_que.conn_cid_tbl) {
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+ return -ENOMEM;
+ }
+
+ qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
+ qedi->cid_que.cid_q_prod_idx = 0;
+ qedi->cid_que.cid_q_cons_idx = 0;
+ qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
+ qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi->cid_que.cid_que[i] = i;
+ qedi->cid_que.conn_cid_tbl[i] = NULL;
+ }
+
+ return 0;
+}
+
+static void qedi_release_cid_que(struct qedi_ctx *qedi)
+{
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+
+ kfree(qedi->cid_que.conn_cid_tbl);
+ qedi->cid_que.conn_cid_tbl = NULL;
+}
+
+static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
+ u16 start_id, u16 next)
+{
+ id_tbl->start = start_id;
+ id_tbl->max = size;
+ id_tbl->next = next;
+ spin_lock_init(&id_tbl->lock);
+ id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+ if (!id_tbl->table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
+{
+ kfree(id_tbl->table);
+ id_tbl->table = NULL;
+}
+
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ int ret = -1;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return ret;
+
+ spin_lock(&id_tbl->lock);
+ if (!test_bit(id, id_tbl->table)) {
+ set_bit(id, id_tbl->table);
+ ret = 0;
+ }
+ spin_unlock(&id_tbl->lock);
+ return ret;
+}
+
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
+{
+ u16 id;
+
+ spin_lock(&id_tbl->lock);
+ id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+ if (id >= id_tbl->max) {
+ id = QEDI_LOCAL_PORT_INVALID;
+ if (id_tbl->next != 0) {
+ id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+ if (id >= id_tbl->next)
+ id = QEDI_LOCAL_PORT_INVALID;
+ }
+ }
+
+ if (id < id_tbl->max) {
+ set_bit(id, id_tbl->table);
+ id_tbl->next = (id + 1) & (id_tbl->max - 1);
+ id += id_tbl->start;
+ }
+
+ spin_unlock(&id_tbl->lock);
+
+ return id;
+}
+
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ if (id == QEDI_LOCAL_PORT_INVALID)
+ return;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return;
+
+ clear_bit(id, id_tbl->table);
+}
+
+static void qedi_cm_free_mem(struct qedi_ctx *qedi)
+{
+ kfree(qedi->ep_tbl);
+ qedi->ep_tbl = NULL;
+ qedi_free_id_tbl(&qedi->lcl_port_tbl);
+}
+
+static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
+{
+ u16 port_id;
+
+ qedi->ep_tbl = kzalloc((qedi->max_active_conns *
+ sizeof(struct qedi_endpoint *)), GFP_KERNEL);
+ if (!qedi->ep_tbl)
+ return -ENOMEM;
+ port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
+ QEDI_LOCAL_PORT_MIN, port_id)) {
+ qedi_cm_free_mem(qedi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi = NULL;
+
+ shost = iscsi_host_alloc(&qedi_host_template,
+ sizeof(struct qedi_ctx), 0);
+ if (!shost) {
+ QEDI_ERR(NULL, "Could not allocate shost\n");
+ goto exit_setup_shost;
+ }
+
+ shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ shost->max_channel = 0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = 16;
+ shost->transportt = qedi_scsi_transport;
+
+ qedi = iscsi_host_priv(shost);
+ memset(qedi, 0, sizeof(*qedi));
+ qedi->shost = shost;
+ qedi->dbg_ctx.host_no = shost->host_no;
+ qedi->pdev = pdev;
+ qedi->dbg_ctx.pdev = pdev;
+ qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
+ qedi->max_sqes = QEDI_SQ_SIZE;
+
+ if (shost_use_blk_mq(shost))
+ shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ pci_set_drvdata(pdev, qedi);
+
+exit_setup_shost:
+ return qedi;
+}
+
+static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct skb_work_list *work;
+ u32 prod;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
+ "UIO DEV is not opened\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate work so dropping frame.\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&work->list);
+ work->skb = skb;
+
+ if (skb_vlan_tag_present(skb))
+ work->vlan_id = skb_vlan_tag_get(skb);
+
+ if (work->vlan_id)
+ __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_add_tail(&work->list, &qedi->ll2_skb_list);
+
+ ++uctrl->hw_rx_prod_cnt;
+ prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+ if (prod != uctrl->host_rx_cons) {
+ uctrl->hw_rx_prod = prod;
+ spin_unlock_bh(&qedi->ll2_lock);
+ wake_up_process(qedi->ll2_recv_thread);
+ return 0;
+ }
+
+ spin_unlock_bh(&qedi->ll2_lock);
+ return 0;
+}
+
+/* map this skb to iscsiuio mmaped region */
+static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
+ u16 vlan_id)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ struct qedi_rx_bd rxbd;
+ struct qedi_rx_bd *p_rxbd;
+ u32 rx_bd_prod;
+ void *pkt;
+ int len = 0;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+ pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
+ len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+ memcpy(pkt, skb->data, len);
+
+ memset(&rxbd, 0, sizeof(rxbd));
+ rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+ rxbd.rx_pkt_len = len;
+ rxbd.vlan_id = vlan_id;
+
+ uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
+ rx_bd_prod = uctrl->hw_rx_bd_prod;
+ p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
+ p_rxbd += rx_bd_prod;
+
+ memcpy(p_rxbd, &rxbd, sizeof(rxbd));
+
+ /* notify the iscsiuio about new packet */
+ uio_event_notify(&udev->qedi_uinfo);
+
+ return 0;
+}
+
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
+{
+ struct skb_work_list *work, *work_tmp;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
+ list_del(&work->list);
+ if (work->skb)
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int qedi_ll2_recv_thread(void *arg)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
+ struct skb_work_list *work, *work_tmp;
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
+ list) {
+ list_del(&work->list);
+ qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&qedi->ll2_lock);
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ u8 num_sq_pages;
+ u32 log_page_size;
+ int rval = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
+ MIN_NUM_CPUS_MSIX(qedi));
+
+ num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+
+ qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ memset(&qedi->pf_params.iscsi_pf_params, 0,
+ sizeof(qedi->pf_params.iscsi_pf_params));
+
+ qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+ qedi->num_queues * sizeof(struct qedi_glbl_q_params),
+ &qedi->hw_p_cpuq);
+ if (!qedi->p_cpuq) {
+ QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ rval = qedi_alloc_global_queues(qedi);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
+ qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
+ qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
+ qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+
+ for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
+ if ((1 << log_page_size) == PAGE_SIZE)
+ break;
+ }
+ qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
+
+ qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
+ (u64)qedi->hw_p_cpuq;
+
+ /* RQ BDQ initializations.
+ * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
+ * rqe_log_size: 8 for 256B RQE
+ */
+ qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
+ /* BDQ address and size */
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_dma;
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_num_entries;
+ qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
+
+ /* cq_num_entries: num_tasks + rq_num_entries */
+ qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
+
+ qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
+ qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
+ qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
+
+err_alloc_mem:
+ return rval;
+}
+
+/* Free DMA coherent memory for array of queue pointers we pass to qed */
+static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ size_t size = 0;
+
+ if (qedi->p_cpuq) {
+ size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
+ pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+ qedi->hw_p_cpuq);
+ }
+
+ qedi_free_global_queues(qedi);
+
+ kfree(qedi->global_queues);
+}
+
+static void qedi_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+
+ if (link->link_up) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_UP);
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Link Down event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+}
+
+static struct qed_iscsi_cb_ops qedi_cb_ops = {
+ {
+ .link_update = qedi_link_update,
+ }
+};
+
+static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
+ u16 que_idx, struct qedi_percpu_s *p)
+{
+ struct qedi_work *qedi_work;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 iscsi_cid;
+ int rc = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return -1;
+ }
+ conn = q_conn->cls_conn->dd_data;
+
+ switch (cqe->cqe_common.cqe_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
+ if (!qedi_cmd) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
+ qedi_cmd->cqe_work.qedi = qedi;
+ memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_cmd->cqe_work.que_idx = que_idx;
+ qedi_cmd->cqe_work.is_solicited = true;
+ list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ case ISCSI_CQE_TYPE_DUMMY:
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
+ if (!qedi_work) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_work->list);
+ qedi_work->qedi = qedi;
+ memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_work->que_idx = que_idx;
+ qedi_work->is_solicited = false;
+ list_add_tail(&qedi_work->list, &p->work_list);
+ break;
+ default:
+ rc = -1;
+ QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
+ }
+ return rc;
+}
+
+static bool qedi_process_completions(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ struct qedi_percpu_s *p = NULL;
+ struct global_queue *que;
+ u16 prod_idx;
+ unsigned long flags;
+ union iscsi_cqe *cqe;
+ int cpu;
+ int ret;
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ que = qedi->global_queues[fp->sb_id];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
+ que, prod_idx, que->cq_cons_idx, fp->sb_id);
+
+ qedi->intr_cpu = fp->sb_id;
+ cpu = smp_processor_id();
+ p = &per_cpu(qedi_percpu, cpu);
+
+ if (unlikely(!p->iothread))
+ WARN_ON(1);
+
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (que->cq_cons_idx != prod_idx) {
+ cqe = &que->cq[que->cq_cons_idx];
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "cqe=%p prod_idx=%d cons_idx=%d.\n",
+ cqe, prod_idx, que->cq_cons_idx);
+
+ ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
+ if (ret)
+ continue;
+
+ que->cq_cons_idx++;
+ if (que->cq_cons_idx == QEDI_CQ_SIZE)
+ que->cq_cons_idx = 0;
+ }
+ wake_up_process(p->iothread);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ return true;
+}
+
+static bool qedi_fp_has_work(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct global_queue *que;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ u16 prod_idx;
+
+ barrier();
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ /* Get the pointer to the global CQ this completion is on */
+ que = qedi->global_queues[fp->sb_id];
+
+ /* prod idx wrap around uint16 */
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ return (que->cq_cons_idx != prod_idx);
+}
+
+/* MSI-X fastpath handler code */
+static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
+{
+ struct qedi_fastpath *fp = dev_id;
+ struct qedi_ctx *qedi = fp->qedi;
+ bool wake_io_thread = true;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+
+process_again:
+ wake_io_thread = qedi_process_completions(fp);
+ if (wake_io_thread) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "process already running\n");
+ }
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* Check for more work */
+ rmb();
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+ else
+ goto process_again;
+
+ return IRQ_HANDLED;
+}
+
+/* simd handler for MSI/INTa */
+static void qedi_simd_int_handler(void *cookie)
+{
+ /* Cookie is qedi_ctx struct */
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+
+ QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
+}
+
+#define QEDI_SIMD_HANDLER_NUM 0
+static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->int_info.msix_cnt) {
+ for (i = 0; i < qedi->int_info.used_cnt; i++) {
+ synchronize_irq(qedi->int_info.msix[i].vector);
+ irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ NULL);
+ free_irq(qedi->int_info.msix[i].vector,
+ &qedi->fp_array[i]);
+ }
+ } else {
+ qedi_ops->common->simd_handler_clean(qedi->cdev,
+ QEDI_SIMD_HANDLER_NUM);
+ }
+
+ qedi->int_info.used_cnt = 0;
+ qedi_ops->common->set_fp_int(qedi->cdev, 0);
+}
+
+static int qedi_request_msix_irq(struct qedi_ctx *qedi)
+{
+ int i, rc, cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ rc = request_irq(qedi->int_info.msix[i].vector,
+ qedi_msix_handler, 0, "qedi",
+ &qedi->fp_array[i]);
+
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
+ qedi_sync_free_irqs(qedi);
+ return rc;
+ }
+ qedi->int_info.used_cnt++;
+ rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ return 0;
+}
+
+static int qedi_setup_int(struct qedi_ctx *qedi)
+{
+ int rc = 0;
+
+ rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+ rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
+ if (rc)
+ goto exit_setup_int;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
+ qedi->int_info.msix_cnt, num_online_cpus());
+
+ if (qedi->int_info.msix_cnt) {
+ rc = qedi_request_msix_irq(qedi);
+ goto exit_setup_int;
+ } else {
+ qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
+ QEDI_SIMD_HANDLER_NUM,
+ qedi_simd_int_handler);
+ qedi->int_info.used_cnt = 1;
+ }
+
+exit_setup_int:
+ return rc;
+}
+
+static void qedi_free_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->bdq_pbl_list)
+ dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
+
+ if (qedi->bdq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
+ qedi->bdq_pbl, qedi->bdq_pbl_dma);
+
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ if (qedi->bdq[i].buf_addr) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
+ qedi->bdq[i].buf_addr,
+ qedi->bdq[i].buf_dma);
+ }
+ }
+}
+
+static void qedi_free_global_queues(struct qedi_ctx *qedi)
+{
+ int i;
+ struct global_queue **gl = qedi->global_queues;
+
+ for (i = 0; i < qedi->num_queues; i++) {
+ if (!gl[i])
+ continue;
+
+ if (gl[i]->cq)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
+ gl[i]->cq, gl[i]->cq_dma);
+ if (gl[i]->cq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
+ gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
+
+ kfree(gl[i]);
+ }
+ qedi_free_bdq(qedi);
+}
+
+static int qedi_alloc_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+ struct scsi_bd *pbl;
+ u64 *list;
+ dma_addr_t page;
+
+ /* Alloc dma memory for BDQ buffers */
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ qedi->bdq[i].buf_addr =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_BDQ_BUF_SIZE,
+ &qedi->bdq[i].buf_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq[i].buf_addr) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate BDQ buffer %d.\n", i);
+ return -ENOMEM;
+ }
+ }
+
+ /* Alloc dma memory for BDQ page buffer list */
+ qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
+ qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+ qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
+ qedi->rq_num_entries);
+
+ qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->bdq_pbl_mem_size,
+ &qedi->bdq_pbl_dma, GFP_KERNEL);
+ if (!qedi->bdq_pbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Populate BDQ PBL with physical and virtual address of individual
+ * BDQ buffers
+ */
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ pbl->address.hi =
+ cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
+ pbl->address.lo =
+ cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, i);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl++;
+ }
+
+ /* Allocate list of PBL pages */
+ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+ PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq_pbl_list) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate list of PBL pages.\n");
+ return -ENOMEM;
+ }
+ memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
+
+ /*
+ * Now populate PBL list with pages that contain pointers to the
+ * individual buffers.
+ */
+ qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+ list = (u64 *)qedi->bdq_pbl_list;
+ page = qedi->bdq_pbl_list_dma;
+ for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
+ *list = qedi->bdq_pbl_dma;
+ list++;
+ page += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
+{
+ u32 *list;
+ int i;
+ int status = 0, rc;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ /*
+ * Number of global queues (CQ / RQ). This should
+ * be <= number of available MSIX vectors for the PF
+ */
+ if (!qedi->num_queues) {
+ QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
+ return 1;
+ }
+
+ /* Make sure we allocated the PBL that will contain the physical
+ * addresses of our queues
+ */
+ if (!qedi->p_cpuq) {
+ status = 1;
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
+ qedi->num_queues), GFP_KERNEL);
+ if (!qedi->global_queues) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate global queues array ptr memory\n");
+ return -ENOMEM;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "qedi->global_queues=%p.\n", qedi->global_queues);
+
+ /* Allocate DMA coherent buffers for BDQ */
+ rc = qedi_alloc_bdq(qedi);
+ if (rc)
+ goto mem_alloc_failure;
+
+ /* Allocate a CQ and an associated PBL for each MSI-X
+ * vector.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ qedi->global_queues[i] =
+ kzalloc(sizeof(*qedi->global_queues[0]),
+ GFP_KERNEL);
+ if (!qedi->global_queues[i]) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocation global queue %d.\n", i);
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues[i]->cq_mem_size =
+ (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
+ qedi->global_queues[i]->cq_mem_size =
+ (qedi->global_queues[i]->cq_mem_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE) * sizeof(void *);
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_pbl_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq, 0,
+ qedi->global_queues[i]->cq_mem_size);
+
+ qedi->global_queues[i]->cq_pbl =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq PBL.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq_pbl, 0,
+ qedi->global_queues[i]->cq_pbl_size);
+
+ /* Create PBL */
+ num_pages = qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE;
+ page = qedi->global_queues[i]->cq_dma;
+ pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+ }
+
+ list = (u32 *)qedi->p_cpuq;
+
+ /*
+ * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
+ * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
+ * to the physical address which contains an array of pointers to the
+ * physical addresses of the specific queue pages.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
+ list++;
+ *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
+ list++;
+
+ *list = (u32)0;
+ list++;
+ *list = (u32)((u64)0 >> 32);
+ list++;
+ }
+
+ return 0;
+
+mem_alloc_failure:
+ qedi_free_global_queues(qedi);
+ return status;
+}
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ int rval = 0;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ if (!ep)
+ return -EIO;
+
+ /* Calculate appropriate queue and PBL sizes */
+ ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
+ ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
+
+ ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
+ ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
+
+ ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
+ if (!ep->sq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue.\n");
+ rval = -ENOMEM;
+ goto out;
+ }
+ memset(ep->sq, 0, ep->sq_mem_size);
+
+ ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
+ if (!ep->sq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue PBL.\n");
+ rval = -ENOMEM;
+ goto out_free_sq;
+ }
+ memset(ep->sq_pbl, 0, ep->sq_pbl_size);
+
+ /* Create PBL */
+ num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
+ page = ep->sq_dma;
+ pbl = (u32 *)ep->sq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+
+ return rval;
+
+out_free_sq:
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+out:
+ return rval;
+}
+
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ if (ep->sq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
+ ep->sq_pbl_dma);
+ if (ep->sq)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+}
+
+int qedi_get_task_idx(struct qedi_ctx *qedi)
+{
+ s16 tmp_idx;
+
+again:
+ tmp_idx = find_first_zero_bit(qedi->task_idx_map,
+ MAX_ISCSI_TASK_ENTRIES);
+
+ if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
+ QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
+ tmp_idx = -1;
+ goto err_idx;
+ }
+
+ if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
+ goto again;
+
+err_idx:
+ return tmp_idx;
+}
+
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
+{
+ if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "FW task context, already cleared, tid=0x%x\n", idx);
+ WARN_ON(1);
+ }
+}
+
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *cmd)
+{
+ qedi->itt_map[tid].itt = proto_itt;
+ qedi->itt_map[tid].p_cmd = cmd;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
+ qedi->itt_map[tid].itt);
+}
+
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
+{
+ u16 i;
+
+ for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
+ if (qedi->itt_map[i].itt == itt) {
+ *tid = i;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Ref itt=0x%x, found at tid=0x%x\n",
+ itt, *tid);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
+{
+ *proto_itt = qedi->itt_map[tid].itt;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Get itt map tid [0x%x with proto itt[0x%x]",
+ tid, *proto_itt);
+}
+
+struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
+{
+ struct qedi_cmd *cmd = NULL;
+
+ if (tid > MAX_ISCSI_TASK_ENTRIES)
+ return NULL;
+
+ cmd = qedi->itt_map[tid].p_cmd;
+ if (cmd->task_id != tid)
+ return NULL;
+
+ qedi->itt_map[tid].p_cmd = NULL;
+
+ return cmd;
+}
+
+static int qedi_alloc_itt(struct qedi_ctx *qedi)
+{
+ qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
+ sizeof(struct qedi_itt_map), GFP_KERNEL);
+ if (!qedi->itt_map) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate itt map array memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void qedi_free_itt(struct qedi_ctx *qedi)
+{
+ kfree(qedi->itt_map);
+}
+
+static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
+ .rx_cb = qedi_ll2_rx,
+ .tx_cb = NULL,
+};
+
+static int qedi_percpu_io_thread(void *arg)
+{
+ struct qedi_percpu_s *p = arg;
+ struct qedi_work *work, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+ cond_resched();
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static void qedi_percpu_thread_create(unsigned int cpu)
+{
+ struct qedi_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(qedi_percpu, cpu);
+
+ thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
+ cpu_to_node(cpu),
+ "qedi_thread/%d", cpu);
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void qedi_percpu_thread_destroy(unsigned int cpu)
+{
+ struct qedi_percpu_s *p;
+ struct task_struct *thread;
+ struct qedi_work *work, *tmp;
+
+ p = &per_cpu(qedi_percpu, cpu);
+ spin_lock_bh(&p->p_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->p_work_lock);
+ if (thread)
+ kthread_stop(thread);
+}
+
+static int qedi_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ QEDI_ERR(NULL, "CPU %d online.\n", cpu);
+ qedi_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
+ qedi_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qedi_cpu_notifier = {
+ .notifier_call = qedi_cpu_callback,
+};
+
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
+{
+ struct qed_ll2_params params;
+
+ qedi_recover_all_conns(qedi);
+
+ qedi_ops->ll2->stop(qedi->cdev);
+ qedi_ll2_free_skbs(qedi);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
+ qedi->ll2_mtu, mtu);
+ memset(&params, 0, sizeof(params));
+ qedi->ll2_mtu = mtu;
+ params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+}
+
+static void __qedi_remove(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ if (qedi->tmf_thread) {
+ flush_workqueue(qedi->tmf_thread);
+ destroy_workqueue(qedi->tmf_thread);
+ qedi->tmf_thread = NULL;
+ }
+
+ if (qedi->offload_thread) {
+ flush_workqueue(qedi->offload_thread);
+ destroy_workqueue(qedi->offload_thread);
+ qedi->offload_thread = NULL;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ qedi_sync_free_irqs(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->stop(qedi->cdev);
+ qedi_ops->ll2->stop(qedi->cdev);
+ }
+
+ if (mode == QEDI_MODE_NORMAL)
+ qedi_free_iscsi_pf_param(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+ qedi_ops->common->remove(qedi->cdev);
+ }
+
+ qedi_destroy_fp(qedi);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ qedi_release_cid_que(qedi);
+ qedi_cm_free_mem(qedi);
+ qedi_free_uio(qedi->udev);
+ qedi_free_itt(qedi);
+
+ iscsi_host_remove(qedi->shost);
+ iscsi_host_free(qedi->shost);
+
+ if (qedi->ll2_recv_thread) {
+ kthread_stop(qedi->ll2_recv_thread);
+ qedi->ll2_recv_thread = NULL;
+ }
+ qedi_ll2_free_skbs(qedi);
+ }
+}
+
+static int __qedi_probe(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi;
+ struct qed_ll2_params params;
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+ bool is_vf = false;
+ char host_buf[16];
+ struct qed_link_params link_params;
+ struct qed_slowpath_params sp_params;
+ struct qed_probe_params qed_params;
+ void *task_start, *task_end;
+ int rc;
+ u16 tmp;
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi = qedi_host_alloc(pdev);
+ if (!qedi) {
+ rc = -ENOMEM;
+ goto exit_probe;
+ }
+ } else {
+ qedi = pci_get_drvdata(pdev);
+ }
+
+ memset(&qed_params, 0, sizeof(qed_params));
+ qed_params.protocol = QED_PROTOCOL_ISCSI;
+ qed_params.dp_module = dp_module;
+ qed_params.dp_level = dp_level;
+ qed_params.is_vf = is_vf;
+ qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
+ if (!qedi->cdev) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
+ goto free_host;
+ }
+
+ qedi->msix_count = MAX_NUM_MSIX_PF;
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ rc = qedi_set_iscsi_pf_param(qedi);
+ if (rc) {
+ rc = -ENOMEM;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Set iSCSI pf param fail\n");
+ goto free_host;
+ }
+ }
+
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ rc = qedi_prepare_fp(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
+ goto free_pf_params;
+ }
+
+ /* Start the Slowpath-process */
+ memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
+ sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
+ sp_params.drv_rev = QEDI_DRIVER_REV_VER;
+ sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
+ strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
+ rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
+ goto stop_hw;
+ }
+
+ /* update_pf_params needs to be called before and after slowpath
+ * start
+ */
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ qedi_setup_int(qedi);
+ if (rc)
+ goto stop_iscsi_func;
+
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ /* Learn information crucial for qedi to progress */
+ rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+ if (rc)
+ goto stop_iscsi_func;
+
+ /* Record BDQ producer doorbell addresses */
+ qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
+ qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "BDQ primary_prod=%p secondary_prod=%p.\n",
+ qedi->bdq_primary_prod,
+ qedi->bdq_secondary_prod);
+
+ /*
+ * We need to write the number of BDs in the BDQ we've preallocated so
+ * the f/w will do a prefetch and we'll get an unsolicited CQE when a
+ * packet arrives.
+ */
+ qedi->bdq_prod_idx = QEDI_BDQ_NUM;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Writing %d to primary and secondary BDQ doorbell registers.\n",
+ qedi->bdq_prod_idx);
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+
+ ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
+ qedi->mac);
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+
+ qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
+
+ memset(&params, 0, sizeof(params));
+ params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
+ qedi->ll2_mtu = DEF_PATH_MTU;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ /* set up rx path */
+ INIT_LIST_HEAD(&qedi->ll2_skb_list);
+ spin_lock_init(&qedi->ll2_lock);
+ /* start qedi context */
+ spin_lock_init(&qedi->hba_lock);
+ spin_lock_init(&qedi->task_idx_lock);
+ }
+ qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
+ (void *)qedi,
+ "qedi_ll2_thread");
+ }
+
+ rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
+ qedi, qedi_iscsi_event_cb);
+ if (rc) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
+ goto stop_slowpath;
+ }
+
+ task_start = qedi_get_task_mem(&qedi->tasks, 0);
+ task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Task context start=%p, end=%p block_size=%u.\n",
+ task_start, task_end, qedi->tasks.size);
+
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
+ &qedi_dbg_fops);
+#endif
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
+ QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ if (iscsi_host_add(qedi->shost, &pdev->dev)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not add iscsi host\n");
+ rc = -ENOMEM;
+ goto remove_host;
+ }
+
+ /* Allocate uio buffers */
+ rc = qedi_alloc_uio_rings(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO alloc ring failed err=%d\n", rc);
+ goto remove_host;
+ }
+
+ rc = qedi_init_uio(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO init failed, err=%d\n", rc);
+ goto free_uio;
+ }
+
+ /* host the array on iscsi_conn */
+ rc = qedi_setup_cid_que(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not setup cid que\n");
+ goto free_uio;
+ }
+
+ rc = qedi_cm_alloc_mem(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc cm memory\n");
+ goto free_cid_que;
+ }
+
+ rc = qedi_alloc_itt(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc itt memory\n");
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+ if (!qedi->tmf_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start tmf thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
+ qedi->offload_thread = create_workqueue(host_buf);
+ if (!qedi->offload_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start offload thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ /* F/w needs 1st task context memory entry for performance */
+ set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
+ atomic_set(&qedi->num_offloads, 0);
+ }
+
+ return 0;
+
+free_cid_que:
+ qedi_release_cid_que(qedi);
+free_uio:
+ qedi_free_uio(qedi->udev);
+remove_host:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ iscsi_host_remove(qedi->shost);
+stop_iscsi_func:
+ qedi_ops->stop(qedi->cdev);
+stop_slowpath:
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+stop_hw:
+ qedi_ops->common->remove(qedi->cdev);
+free_pf_params:
+ qedi_free_iscsi_pf_param(qedi);
+free_host:
+ iscsi_host_free(qedi->shost);
+exit_probe:
+ return rc;
+}
+
+static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return __qedi_probe(pdev, QEDI_MODE_NORMAL);
+}
+
+static void qedi_remove(struct pci_dev *pdev)
+{
+ __qedi_remove(pdev, QEDI_MODE_NORMAL);
+}
+
+static struct pci_device_id qedi_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+
+static struct pci_driver qedi_pci_driver = {
+ .name = QEDI_MODULE_NAME,
+ .id_table = qedi_pci_tbl,
+ .probe = qedi_probe,
+ .remove = qedi_remove,
+};
+
+static int __init qedi_init(void)
+{
+ int rc = 0;
+ int ret;
+ struct qedi_percpu_s *p;
+ unsigned int cpu = 0;
+
+ qedi_ops = qed_get_iscsi_ops();
+ if (!qedi_ops) {
+ QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
+ rc = -EINVAL;
+ goto exit_qedi_init_0;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_init("qedi");
+#endif
+
+ qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
+ if (!qedi_scsi_transport) {
+ QEDI_ERR(NULL, "Could not register qedi transport");
+ rc = -ENOMEM;
+ goto exit_qedi_init_1;
+ }
+
+ register_hotcpu_notifier(&qedi_cpu_notifier);
+
+ ret = pci_register_driver(&qedi_pci_driver);
+ if (ret) {
+ QEDI_ERR(NULL, "Failed to register driver\n");
+ rc = -EINVAL;
+ goto exit_qedi_init_2;
+ }
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(qedi_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->p_work_lock);
+ p->iothread = NULL;
+ }
+
+ for_each_online_cpu(cpu)
+ qedi_percpu_thread_create(cpu);
+
+ return rc;
+
+exit_qedi_init_2:
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+exit_qedi_init_1:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+exit_qedi_init_0:
+ return rc;
+}
+
+static void __exit qedi_cleanup(void)
+{
+ unsigned int cpu = 0;
+
+ for_each_online_cpu(cpu)
+ qedi_percpu_thread_destroy(cpu);
+
+ pci_unregister_driver(&qedi_pci_driver);
+ unregister_hotcpu_notifier(&qedi_cpu_notifier);
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+}
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(QEDI_MODULE_VERSION);
+module_init(qedi_init);
+module_exit(qedi_cleanup);
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
new file mode 100644
index 000000000000..b10c48bd1428
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+#include "qedi_dbg.h"
+
+static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ return iscsi_host_priv(shost);
+}
+
+static ssize_t qedi_show_port_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+
+ if (atomic_read(&qedi->link_state) == QEDI_LINK_UP)
+ return sprintf(buf, "Online\n");
+ else
+ return sprintf(buf, "Linkdown\n");
+}
+
+static ssize_t qedi_show_speed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+ struct qed_link_output if_link;
+
+ qedi_ops->common->get_link(qedi->cdev, &if_link);
+
+ return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
+}
+
+static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
+static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+
+struct device_attribute *qedi_shost_attrs[] = {
+ &dev_attr_port_state,
+ &dev_attr_speed,
+ NULL
+};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
new file mode 100644
index 000000000000..9543a1b139d4
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -0,0 +1,14 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#define QEDI_MODULE_VERSION "8.10.3.0"
+#define QEDI_DRIVER_MAJOR_VER 8
+#define QEDI_DRIVER_MINOR_VER 10
+#define QEDI_DRIVER_REV_VER 3
+#define QEDI_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe7469c901f7..47eb4d545d13 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = NULL;
struct qla_hw_data *ha = base_vha->hw;
- uint16_t options = 0;
int cnt;
struct req_que *req = ha->req_q_map[0];
+ struct qla_qpair *qpair;
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
@@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
- if (ha->flags.cpu_affinity_enabled) {
- req = ha->req_q_map[1];
- ql_dbg(ql_dbg_multiq, vha, 0xc000,
- "Request queue %p attached with "
- "VP[%d], cpu affinity =%d\n",
- req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
- goto vport_queue;
- } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+ if (!ql2xmqsupport || !ha->npiv_info)
goto vport_queue;
+
/* Create a request queue in QoS mode for the vport */
for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
@@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (qos) {
- ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
- qos);
- if (!ret)
+ qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+ if (!qpair)
ql_log(ql_log_warn, vha, 0x7084,
- "Can't create request queue for VP[%d]\n",
+ "Can't create qpair for VP[%d]\n",
vha->vp_idx);
else {
ql_dbg(ql_dbg_multiq, vha, 0xc001,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
+ "Queue pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
ql_dbg(ql_dbg_user, vha, 0x7085,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
- req = ha->req_q_map[ret];
+ "Queue Pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
+ req = qpair->req;
+ vha->qpair = qpair;
}
}
@@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
- if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
- if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
+ if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
- "Queue delete failed.\n");
+ "Queue Pair delete failed.\n");
}
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 643014f82f7d..1bf8061ff803 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
-/*
+ /*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
*
@@ -9,6 +9,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
+#include <linux/bsg-lib.h>
/* BSG support for ELS/CT pass through */
void
@@ -16,10 +17,12 @@ qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
- bsg_job->reply->result = res;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = res;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
sp->free(vha, sp);
}
@@ -28,13 +31,15 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = sp->fcport->vha;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
if (sp->type == SRB_FXIOCB_BCMD) {
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
@@ -116,9 +121,11 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
}
static int
-qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int ret = 0;
@@ -131,7 +138,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
/* Get the sub command */
- oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Only set config is allowed if config memory is not allocated */
if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
@@ -145,10 +152,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes &=
~FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
break;
@@ -160,10 +167,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ha->fcp_prio_cfg->attributes |=
FCP_PRIO_ATTR_ENABLE;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
} else {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
}
@@ -173,12 +180,12 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
len = bsg_job->reply_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
ret = -EINVAL;
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
goto exit_fcp_prio_cfg;
}
- bsg_job->reply->result = DID_OK;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
@@ -189,7 +196,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
case QLFC_FCP_PRIO_SET_CONFIG:
len = bsg_job->request_payload.payload_len;
if (!len || len > FCP_PRIO_CFG_SIZE) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -200,7 +207,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7050,
"Unable to allocate memory for fcp prio "
"config data (%x).\n", FCP_PRIO_CFG_SIZE);
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -ENOMEM;
goto exit_fcp_prio_cfg;
}
@@ -215,7 +222,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (!qla24xx_fcp_prio_cfg_valid(vha,
(struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
- bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
* fcp_prio_cfg is of no use
@@ -229,7 +236,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
ha->flags.fcp_prio_enabled = 1;
qla24xx_update_all_fcp_prio(vha);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
break;
default:
ret = -EINVAL;
@@ -237,13 +244,15 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
}
exit_fcp_prio_cfg:
if (!ret)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return ret;
}
static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
+qla2x00_process_els(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
struct fc_rport *rport;
fc_port_t *fcport = NULL;
struct Scsi_Host *host;
@@ -255,15 +264,15 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_RPT_ELS";
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
@@ -296,7 +305,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* ELS request for rport */
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
/* make sure the rport is logged in,
* if not perform fabric login
*/
@@ -322,11 +331,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->d_id.b.al_pa =
- bsg_job->request->rqst_data.h_els.port_id[0];
+ bsg_request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
- bsg_job->request->rqst_data.h_els.port_id[1];
+ bsg_request->rqst_data.h_els.port_id[1];
fcport->d_id.b.domain =
- bsg_job->request->rqst_data.h_els.port_id[2];
+ bsg_request->rqst_data.h_els.port_id[2];
fcport->loop_id =
(fcport->d_id.b.al_pa == 0xFD) ?
NPH_FABRIC_CONTROLLER : NPH_F_PORT;
@@ -366,11 +375,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
sp->type =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
sp->name =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- "bsg_els_rpt" : "bsg_els_hst");
+ (bsg_request->msgcode == FC_BSG_RPT_ELS ?
+ "bsg_els_rpt" : "bsg_els_hst");
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
@@ -378,7 +387,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%-2x%02x%02x.\n", type,
- bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+ bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
@@ -399,7 +408,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS)
kfree(fcport);
done:
return rval;
@@ -420,10 +429,11 @@ qla24xx_calc_ct_iocbs(uint16_t dsds)
}
static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+qla2x00_process_ct(struct bsg_job *bsg_job)
{
srb_t *sp;
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -469,7 +479,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
}
loop_id =
- (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
>> 24;
switch (loop_id) {
case 0xFC:
@@ -500,9 +510,9 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
- fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
- fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
fcport->loop_id = loop_id;
/* Alloc SRB structure */
@@ -524,7 +534,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
"loop-id=%x portid=%02x%02x%02x.\n", type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
@@ -697,9 +707,11 @@ done_set_internal:
}
static int
-qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+qla2x00_process_loopback(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval;
@@ -780,9 +792,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.rcv_dma = rsp_data_dma;
elreq.transfer_size = req_data_len;
- elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
elreq.iteration_count =
- bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
+ bsg_request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
@@ -896,12 +908,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"Vendor request %s failed.\n", type);
rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_ERROR << 16);
+ bsg_reply->reply_payload_rcv_len = 0;
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
- bsg_job->reply->result = (DID_OK << 16);
+ bsg_reply->result = (DID_OK << 16);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
@@ -930,14 +942,17 @@ done_unmap_req_sg:
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_reset(struct fc_bsg_job *bsg_job)
+qla84xx_reset(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -948,7 +963,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
@@ -960,17 +975,20 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
} else {
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+qla84xx_updatefw(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct verify_chip_entry_84xx *mn = NULL;
@@ -1027,7 +1045,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
goto done_free_fw_buf;
}
- flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
memset(mn, 0, sizeof(struct access_chip_84xx));
@@ -1059,7 +1077,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
"Vendor request 84xx updatefw completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
}
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
@@ -1072,14 +1090,17 @@ done_unmap_sg:
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct access_chip_84xx *mn = NULL;
@@ -1107,7 +1128,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
memset(mn, 0, sizeof(struct access_chip_84xx));
mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
mn->entry_count = 1;
- ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
switch (ql84_mgmt->mgmt.cmd) {
case QLA84_MGMT_READ_MEM:
case QLA84_MGMT_GET_INFO:
@@ -1239,11 +1260,11 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
"Vendor request 84xx mgmt completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
@@ -1267,14 +1288,17 @@ exit_mgmt:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (!rval)
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla24xx_iidma(struct fc_bsg_job *bsg_job)
+qla24xx_iidma(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_port_param *port_param = NULL;
@@ -1288,7 +1312,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
return -EINVAL;
}
- port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
ql_log(ql_log_warn, vha, 0x7048,
"Invalid destination type.\n");
@@ -1343,24 +1367,26 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(struct qla_port_param);
- rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ rsp_ptr = ((uint8_t *)bsg_reply) +
sizeof(struct fc_bsg_reply);
memcpy(rsp_ptr, port_param,
sizeof(struct qla_port_param));
}
- bsg_job->reply->result = DID_OK;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return rval;
}
static int
-qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
+qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
uint8_t is_update)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
uint32_t start = 0;
int valid = 0;
struct qla_hw_data *ha = vha->hw;
@@ -1368,7 +1394,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
if (unlikely(pci_channel_offline(ha->pdev)))
return -EINVAL;
- start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
if (start > ha->optrom_size) {
ql_log(ql_log_warn, vha, 0x7055,
"start %d > optrom_size %d.\n", start, ha->optrom_size);
@@ -1427,9 +1453,10 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
}
static int
-qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_read_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1451,20 +1478,22 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
- bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
- bsg_job->reply->result = DID_OK;
+ bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_update_optrom(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1486,19 +1515,21 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
- bsg_job->reply->result = DID_OK;
+ bsg_reply->result = DID_OK;
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
mutex_unlock(&ha->optrom_mutex);
- bsg_job->job_done(bsg_job);
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rval;
}
static int
-qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
+qla2x00_update_fru_versions(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1509,7 +1540,7 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1525,30 +1556,32 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
image->field_address.device, image->field_address.offset,
sizeof(image->field_info), image->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
image++;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_read_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1557,7 +1590,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1571,7 +1604,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sr->status_reg = *sfp;
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1579,24 +1612,26 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*sr);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_write_fru_status(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1605,7 +1640,7 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1619,28 +1654,30 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
sizeof(sr->status_reg), sr->field_address.option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_write_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1649,7 +1686,7 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1662,28 +1699,30 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_read_i2c(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
@@ -1692,7 +1731,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
if (!sfp) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
goto done;
}
@@ -1704,7 +1743,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
i2c->device, i2c->offset, i2c->length, i2c->option);
if (rval) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_MAILBOX;
goto dealloc;
}
@@ -1713,24 +1752,26 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
dealloc:
dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint32_t rval = EXT_STATUS_OK;
@@ -1895,19 +1936,21 @@ done:
/* Return an error vendor specific response
* and complete the bsg request
*/
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->reply_payload_rcv_len = 0;
- bsg_job->reply->result = (DID_OK) << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = (DID_OK) << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
/* Always return success, vendor rsp carries correct status */
return 0;
}
static int
-qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
@@ -1919,7 +1962,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
/* Copy the IOCB specific information */
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
/* Dump the vendor information */
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
@@ -2027,9 +2070,10 @@ done:
}
static int
-qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
+qla26xx_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg sr;
@@ -2042,13 +2086,13 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x708c,
@@ -2057,19 +2101,21 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+qla8044_serdes_op(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval = 0;
struct qla_serdes_reg_ex sr;
@@ -2082,13 +2128,13 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
switch (sr.cmd) {
case INT_SC_SERDES_WRITE_REG:
rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
break;
case INT_SC_SERDES_READ_REG:
rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
ql_dbg(ql_dbg_user, vha, 0x70cf,
@@ -2097,19 +2143,21 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
break;
}
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_flash_update_caps cap;
@@ -2125,21 +2173,23 @@ qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
- bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+ bsg_reply->reply_payload_rcv_len = sizeof(cap);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
uint64_t online_fw_attr = 0;
@@ -2158,32 +2208,34 @@ qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
(uint64_t)ha->fw_attributes;
if (online_fw_attr != cap.capabilities) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_INVALID_PARAM;
return -EINVAL;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct qla_bbcr_data bbcr;
@@ -2227,27 +2279,30 @@ qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
done:
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
- bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+ bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
}
static int
-qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+qla2x00_get_priv_stats(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats = NULL;
dma_addr_t stats_dma;
int rval;
- uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd;
+ uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -2281,13 +2336,14 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*stats);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*stats);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
stats, stats_dma);
@@ -2296,9 +2352,10 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
+qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
{
- struct Scsi_Host *host = bsg_job->shost;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
int rval;
struct qla_dport_diag *dd;
@@ -2323,13 +2380,14 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
}
- bsg_job->reply->reply_payload_rcv_len = sizeof(*dd);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- bsg_job->reply_len = sizeof(*bsg_job->reply);
- bsg_job->reply->result = DID_OK << 16;
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
kfree(dd);
@@ -2337,9 +2395,11 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
{
- switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+
+ switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
return qla2x00_process_loopback(bsg_job);
@@ -2413,36 +2473,38 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_request(struct bsg_job *bsg_job)
{
+ struct fc_bsg_request *bsg_request = bsg_job->request;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
int ret = -EINVAL;
struct fc_rport *rport;
struct Scsi_Host *host;
scsi_qla_host_t *vha;
/* In case no data transferred. */
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
+ if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ rport = fc_bsg_to_rport(bsg_job);
host = rport_to_shost(rport);
vha = shost_priv(host);
} else {
- host = bsg_job->shost;
+ host = fc_bsg_to_shost(bsg_job);
vha = shost_priv(host);
}
if (qla2x00_reset_active(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
- bsg_job->request->msgcode);
+ bsg_request->msgcode);
return -EBUSY;
}
ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
- switch (bsg_job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
ret = qla2x00_process_els(bsg_job);
@@ -2464,9 +2526,10 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
}
int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_timeout(struct bsg_job *bsg_job)
{
- scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
int cnt, que;
@@ -2494,13 +2557,13 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
"mbx abort_command "
"failed.\n");
bsg_job->req->errors =
- bsg_job->reply->result = -EIO;
+ bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
bsg_job->req->errors =
- bsg_job->reply->result = 0;
+ bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
goto done;
@@ -2510,7 +2573,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ bsg_job->req->errors = bsg_reply->result = -ENXIO;
return 0;
done:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45af34ddc432..21d9fb7fc887 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0191 | 0x0146 |
+ * | Module Init and Probe | 0x0193 | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
@@ -58,7 +58,7 @@
* | | | 0xb13a,0xb142 |
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
- * | MultiQ | 0xc00c | |
+ * | MultiQ | 0xc010 | |
* | Misc | 0xd301 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e41d992..f7df01b76714 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -401,9 +401,10 @@ typedef struct srb {
uint16_t type;
char *name;
int iocbs;
+ struct qla_qpair *qpair;
union {
struct srb_iocb iocb_cmd;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
void (*done)(void *, void *, int);
@@ -2719,6 +2720,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
+ int (*start_scsi_mq) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
int (*iospace_config)(struct qla_hw_data*);
int (*initialize_adapter)(struct scsi_qla_host *);
@@ -2730,8 +2732,10 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
+#define QLA_MSIX_DEFAULT 0x00
+#define QLA_MSIX_RSP_Q 0x01
+#define QLA_ATIO_VECTOR 0x02
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
@@ -2745,9 +2749,11 @@ struct scsi_qla_host;
struct qla_msix_entry {
int have_irq;
+ int in_use;
uint32_t vector;
uint16_t entry;
- struct rsp_que *rsp;
+ char name[30];
+ void *handle;
struct irq_affinity_notify irq_notify;
int cpuid;
};
@@ -2872,7 +2878,6 @@ struct rsp_que {
struct qla_msix_entry *msix;
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
- struct work_struct q_work;
dma_addr_t dma_fx00;
response_t *ring_fx00;
@@ -2909,6 +2914,37 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
+/*Queue pair data structure */
+struct qla_qpair {
+ spinlock_t qp_lock;
+ atomic_t ref_count;
+ /* distill these fields down to 'online=0/1'
+ * ha->flags.eeh_busy
+ * ha->flags.pci_channel_io_perm_failure
+ * base_vha->loop_state
+ */
+ uint32_t online:1;
+ /* move vha->flags.difdix_supported here */
+ uint32_t difdix_supported:1;
+ uint32_t delete_in_progress:1;
+
+ uint16_t id; /* qp number used with FW */
+ uint16_t num_active_cmd; /* cmds down at firmware */
+ cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
+ uint16_t vp_idx; /* vport ID */
+
+ mempool_t *srb_mempool;
+
+ /* to do: New driver: move queues to here instead of pointers */
+ struct req_que *req;
+ struct rsp_que *rsp;
+ struct atio_que *atio;
+ struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
+ struct qla_hw_data *hw;
+ struct work_struct q_work;
+ struct list_head qp_list_elem; /* vha->qp_list */
+};
+
/* Place holder for FW buffer parameters */
struct qlfc_fw {
void *fw_buf;
@@ -3004,7 +3040,6 @@ struct qla_hw_data {
uint32_t chip_reset_done :1;
uint32_t running_gold_fw :1;
uint32_t eeh_busy :1;
- uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
uint32_t isp82xx_fw_hung:1;
@@ -3061,10 +3096,15 @@ struct qla_hw_data {
uint8_t mqenable;
struct req_que **req_q_map;
struct rsp_que **rsp_q_map;
+ struct qla_qpair **queue_pair_map;
unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+ / sizeof(unsigned long)];
uint8_t max_req_queues;
uint8_t max_rsp_queues;
+ uint8_t max_qpairs;
+ struct qla_qpair *base_qpair;
struct qla_npiv_entry *npiv_info;
uint16_t nvram_npiv_size;
@@ -3328,6 +3368,7 @@ struct qla_hw_data {
struct mutex vport_lock; /* Virtual port synchronization */
spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+ struct mutex mq_lock; /* multi-queue synchronization */
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
@@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host {
uint32_t fw_tgt_reported:1;
uint32_t bbcr_enable:1;
+ uint32_t qpairs_available:1;
} flags;
atomic_t loop_state;
@@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host {
#define FX00_TARGET_SCAN 24
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
+#define QPAIR_ONLINE_CHECK_NEEDED 27
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host {
/* List of pending PLOGI acks, protected by hw lock */
struct list_head plogi_ack_list;
+ struct list_head qp_list;
+
uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
uint16_t vp_idx; /* vport ID */
+ struct qla_qpair *qpair; /* base qpair */
unsigned long vp_flags;
#define VP_IDX_ACQUIRED 0 /* bit no 0 */
@@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map {
scsi_qla_host_t *vha;
};
+struct qla2_sgx {
+ dma_addr_t dma_addr; /* OUT */
+ uint32_t dma_len; /* OUT */
+
+ uint32_t tot_bytes; /* IN */
+ struct scatterlist *cur_sg; /* IN */
+
+ /* for book keeping, bzero on initial invocation */
+ uint32_t bytes_consumed;
+ uint32_t num_bytes;
+ uint32_t tot_partial;
+
+ /* for debugging */
+ uint32_t num_sg;
+ srb_t *sp;
+};
+
/*
* Macros to help code, maintain, etc.
*/
@@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map {
(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
-#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
- atomic_inc(&__vha->vref_count); \
- mb(); \
- if (__vha->flags.delete_progress) { \
- atomic_dec(&__vha->vref_count); \
- __bail = 1; \
- } else { \
- __bail = 0; \
- } \
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
- atomic_dec(&__vha->vref_count); \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+ atomic_dec(&__vha->vref_count); \
+
+#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
+ atomic_inc(&__qpair->ref_count); \
+ mb(); \
+ if (__qpair->delete_in_progress) { \
+ atomic_dec(&__qpair->ref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
+#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
+ atomic_dec(&__qpair->ref_count); \
+
/*
* qla2x00 local function return status codes
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6ca00813c71f..afa0116a163b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -91,12 +91,17 @@ extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+ int, int);
+extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
/*
* Global Data in qla_os.c source file.
*/
extern char qla2x00_version_str[];
+extern struct kmem_cache *srb_cachep;
+
extern int ql2xlogintimeout;
extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
@@ -105,8 +110,7 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
-extern int ql2xmaxqueues;
-extern int ql2xmultique_tag;
+extern int ql2xmqsupport;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
extern int ql2xshiftctondsd;
@@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla2x00_sp_compl(void *, void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *, void *);
+extern void qla2xxx_qpair_sp_compl(void *, void *, int);
/*
* Global Functions in qla_mid.c source file.
@@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
+ uint16_t, struct req_que *);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
@@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern int qla2xxx_dif_start_scsi_mq(srb_t *);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
@@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
-
+extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
+extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
+ struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
extern void
qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
+extern irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *);
extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */
-extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
+ struct qla_msix_entry *, int);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
uint16_t, int, uint8_t);
extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, int);
+ uint16_t, struct qla_qpair *);
+
extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
@@ -733,8 +751,8 @@ extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
/* BSG related functions */
-extern int qla24xx_bsg_request(struct fc_bsg_job *);
-extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla24xx_bsg_request(struct bsg_job *);
+extern int qla24xx_bsg_timeout(struct bsg_job *);
extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
dma_addr_t, size_t, uint32_t);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5b09296b46a3..632d5f30386a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
if (req->outstanding_cmds)
return QLA_SUCCESS;
- if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
- (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ if (!IS_FWI2_CAPABLE(ha))
req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
else {
if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
@@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
struct req_que *req;
struct rsp_que *rsp;
- if (vha->hw->flags.cpu_affinity_enabled)
- req = vha->hw->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
rsp = req->rsp;
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
return -EINVAL;
rval = qla2x00_fw_ready(base_vha);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
rsp = req->rsp;
if (rval == QLA_SUCCESS) {
@@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
return ret;
}
+
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+{
+ int rsp_id = 0;
+ int req_id = 0;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t qpair_id = 0;
+ struct qla_qpair *qpair = NULL;
+ struct qla_msix_entry *msix;
+
+ if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
+ ql_log(ql_log_warn, vha, 0x00181,
+ "FW/Driver is not multi-queue capable.\n");
+ return NULL;
+ }
+
+ if (ql2xmqsupport) {
+ qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate memory for queue pair.\n");
+ return NULL;
+ }
+ memset(qpair, 0, sizeof(struct qla_qpair));
+
+ qpair->hw = vha->hw;
+
+ /* Assign available que pair id */
+ mutex_lock(&ha->mq_lock);
+ qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+ if (qpair_id >= ha->max_qpairs) {
+ mutex_unlock(&ha->mq_lock);
+ ql_log(ql_log_warn, vha, 0x0183,
+ "No resources to create additional q pair.\n");
+ goto fail_qid_map;
+ }
+ set_bit(qpair_id, ha->qpair_qid_map);
+ ha->queue_pair_map[qpair_id] = qpair;
+ qpair->id = qpair_id;
+ qpair->vp_idx = vp_idx;
+
+ for (i = 0; i < ha->msix_count; i++) {
+ msix = &ha->msix_entries[i];
+ if (msix->in_use)
+ continue;
+ qpair->msix = msix;
+ ql_log(ql_dbg_multiq, vha, 0xc00f,
+ "Vector %x selected for qpair\n", msix->vector);
+ break;
+ }
+ if (!qpair->msix) {
+ ql_log(ql_log_warn, vha, 0x0184,
+ "Out of MSI-X vectors!.\n");
+ goto fail_msix;
+ }
+
+ qpair->msix->in_use = 1;
+ list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+
+ mutex_unlock(&ha->mq_lock);
+
+ /* Create response queue first */
+ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+ if (!rsp_id) {
+ ql_log(ql_log_warn, vha, 0x0185,
+ "Failed to create response queue.\n");
+ goto fail_rsp;
+ }
+
+ qpair->rsp = ha->rsp_q_map[rsp_id];
+
+ /* Create request queue */
+ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+ if (!req_id) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to create request queue.\n");
+ goto fail_req;
+ }
+
+ qpair->req = ha->req_q_map[req_id];
+ qpair->rsp->req = qpair->req;
+
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4)
+ qpair->difdix_supported = 1;
+ }
+
+ qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+ if (!qpair->srb_mempool) {
+ ql_log(ql_log_warn, vha, 0x0191,
+ "Failed to create srb mempool for qpair %d\n",
+ qpair->id);
+ goto fail_mempool;
+ }
+
+ /* Mark as online */
+ qpair->online = 1;
+
+ if (!vha->flags.qpairs_available)
+ vha->flags.qpairs_available = 1;
+
+ ql_dbg(ql_dbg_multiq, vha, 0xc00d,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ ql_dbg(ql_dbg_init, vha, 0x0187,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ }
+ return qpair;
+
+fail_mempool:
+fail_req:
+ qla25xx_delete_rsp_que(vha, qpair->rsp);
+fail_rsp:
+ mutex_lock(&ha->mq_lock);
+ qpair->msix->in_use = 0;
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+fail_msix:
+ ha->queue_pair_map[qpair_id] = NULL;
+ clear_bit(qpair_id, ha->qpair_qid_map);
+ mutex_unlock(&ha->mq_lock);
+fail_qid_map:
+ kfree(qpair);
+ return NULL;
+}
+
+int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+{
+ int ret;
+ struct qla_hw_data *ha = qpair->hw;
+
+ qpair->delete_in_progress = 1;
+ while (atomic_read(&qpair->ref_count))
+ msleep(500);
+
+ ret = qla25xx_delete_req_que(vha, qpair->req);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+ ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+
+ mutex_lock(&ha->mq_lock);
+ ha->queue_pair_map[qpair->id] = NULL;
+ clear_bit(qpair->id, ha->qpair_qid_map);
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+ mempool_destroy(qpair->srb_mempool);
+ kfree(qpair);
+ mutex_unlock(&ha->mq_lock);
+
+ return QLA_SUCCESS;
+fail:
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index edc48f3b8230..44e404583c86 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -216,6 +216,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
}
static inline srb_t *
+qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+{
+ srb_t *sp = NULL;
+ uint8_t bail;
+
+ QLA_QPAIR_MARK_BUSY(qpair, bail);
+ if (unlikely(bail))
+ return NULL;
+
+ sp = mempool_alloc(qpair->srb_mempool, flag);
+ if (!sp)
+ goto done;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+done:
+ if (!sp)
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+ return sp;
+}
+
+static inline void
+qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+{
+ mempool_free(sp, qpair->srb_mempool);
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+}
+
+static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b41265a75ed5..58e49a3e1de8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -12,7 +12,6 @@
#include <scsi/scsi_tcq.h>
-static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @cmd: SCSI command
@@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
return (cont_pkt);
}
-static inline int
+inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
+ * @req: pointer to request queue
*/
-static inline void
+inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
- uint16_t tot_dsds)
+ uint16_t tot_dsds, struct req_que *req)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
@@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
}
}
-struct qla2_sgx {
- dma_addr_t dma_addr; /* OUT */
- uint32_t dma_len; /* OUT */
-
- uint32_t tot_bytes; /* IN */
- struct scatterlist *cur_sg; /* IN */
-
- /* for book keeping, bzero on initial invocation */
- uint32_t bytes_consumed;
- uint32_t num_bytes;
- uint32_t tot_partial;
-
- /* for debugging */
- uint32_t num_sg;
- srb_t *sp;
-};
-
-static int
+int
qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
uint32_t *partial)
{
@@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-static inline int
+inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
@@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where completion should happen */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
/* Adjust ring index. */
req->ring_index++;
@@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
}
/* Setup device pointers. */
-
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1764,18 +1744,365 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
-
-static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+/**
+ * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla2xxx_start_scsi_mq(srb_t *sp)
{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ struct cmd_type_7 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct qla_hw_data *ha = sp->fcport->vha->hw;
- int affinity = cmd->request->cpu;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ cmd_pkt->task = TSK_SIMPLE;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+
+/**
+ * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2xxx_dif_start_scsi_mq(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt = 0;
+ uint16_t tot_dsds;
+ uint16_t tot_prot_dsds;
+ uint16_t fw_prot_opts = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_crc_2 *cmd_pkt;
+ uint32_t status = 0;
+ struct qla_qpair *qpair = sp->qpair;
+
+#define QDSS_GOT_Q_SPACE BIT_0
+
+ /* Check for host side state */
+ if (!qpair->online) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ if (!qpair->difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ /* Only process protection or >16 cdb in this routine */
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+ if (cmd->cmd_len <= 16)
+ return qla2xxx_start_scsi_mq(sp);
+ }
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Compute number of required data segments */
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ struct qla2_sgx sgx;
+ uint32_t partial;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ nseg = 0;
+ while (qla24xx_get_one_block_sg(
+ cmd->device->sector_size, &sgx, &partial))
+ nseg++;
+ }
+ } else
+ nseg = 0;
+
+ /* number of required data segments */
+ tot_dsds = nseg;
+
+ /* Compute number of required protection segments */
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+ }
+ } else {
+ nseg = 0;
+ }
+
+ req_cnt = 1;
+ /* Total Data and protection sg segment(s) */
+ tot_prot_dsds = nseg;
+ tot_dsds += nseg;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ status |= QDSS_GOT_Q_SPACE;
+
+ /* Build header part of command packet (excluding the OPCODE). */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Fill-in common area */
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
- affinity < ha->max_rsp_queues - 1)
- *rsp = ha->rsp_q_map[affinity + 1];
- else
- *rsp = ha->rsp_q_map[0];
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* Total Data and protection segment(s) */
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Build IOCB segments and adjust for data protection segments */
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+ QLA_SUCCESS)
+ goto queuing_error;
+
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ cmd_pkt->timeout = cpu_to_le16(0);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (status & QDSS_GOT_Q_SPACE) {
+ req->outstanding_cmds[handle] = NULL;
+ req->cnt += req_cnt;
+ }
+ /* Cleanup will be performed by the caller (queuecommand) */
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
}
/* Generic Control-SRB manipulation functions. */
@@ -2197,7 +2524,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
+ struct fc_bsg_request *bsg_request = bsg_job->request;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2212,8 +2540,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->opcode =
sp->type == SRB_ELS_CMD_RPT ?
- bsg_job->request->rqst_data.r_els.els_code :
- bsg_job->request->rqst_data.h_els.command_code;
+ bsg_request->rqst_data.r_els.els_code :
+ bsg_request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -2250,7 +2578,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2327,7 +2655,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int entry_count = 1;
@@ -2663,7 +2991,7 @@ sufficient_dsds:
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
@@ -2833,7 +3161,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
struct scatterlist *sg;
int index;
int entry_count = 1;
- struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct bsg_job *bsg_job = sp->u.bsg_job;
/*Update entry type to indicate bidir command */
*((uint32_t *)(&cmd_pkt->entry_type)) =
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 068c4e47fac9..5093ca9b02ec 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/t10-pi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
@@ -1356,7 +1357,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
int res;
@@ -1365,6 +1367,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = "ct pass-through";
@@ -1373,32 +1376,32 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
ql_log(ql_log_warn, vha, 0x5048,
"CT pass-through-%s error "
"comp_status-status=0x%x total_byte = 0x%x.\n",
type, comp_status,
- bsg_job->reply->reply_payload_rcv_len);
+ bsg_reply->reply_payload_rcv_len);
} else {
ql_log(ql_log_warn, vha, 0x5049,
"CT pass-through-%s error "
"comp_status-status=0x%x.\n", type, comp_status);
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1413,7 +1416,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
uint8_t* fw_sts_ptr;
@@ -1423,6 +1427,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
type = NULL;
switch (sp->type) {
@@ -1452,13 +1457,13 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
- bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
@@ -1480,7 +1485,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
res = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
@@ -1489,7 +1494,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
else {
res = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
@@ -1904,7 +1909,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
uint16_t scsi_status;
uint16_t thread_id;
uint32_t rval = EXT_STATUS_OK;
- struct fc_bsg_job *bsg_job = NULL;
+ struct bsg_job *bsg_job = NULL;
+ struct fc_bsg_request *bsg_request;
+ struct fc_bsg_reply *bsg_reply;
sts_entry_t *sts;
struct sts_entry_24xx *sts24;
sts = (sts_entry_t *) pkt;
@@ -1919,11 +1926,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
sp = req->outstanding_cmds[index];
- if (sp) {
- /* Free outstanding command slot. */
- req->outstanding_cmds[index] = NULL;
- bsg_job = sp->u.bsg_job;
- } else {
+ if (!sp) {
ql_log(ql_log_warn, vha, 0x70b0,
"Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
req->id, index);
@@ -1932,6 +1935,12 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
return;
}
+ /* Free outstanding command slot. */
+ req->outstanding_cmds[index] = NULL;
+ bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
+ bsg_reply = bsg_job->reply;
+
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -1940,14 +1949,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
}
- thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
switch (comp_status) {
case CS_COMPLETE:
if (scsi_status == 0) {
- bsg_job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
vha->qla_stats.input_bytes +=
- bsg_job->reply->reply_payload_rcv_len;
+ bsg_reply->reply_payload_rcv_len;
vha->qla_stats.input_requests++;
rval = EXT_STATUS_OK;
}
@@ -2028,11 +2037,11 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
rval = EXT_STATUS_ERR;
break;
}
- bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
done:
/* Return the vendor specific reply to API */
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
@@ -2863,41 +2872,6 @@ out:
}
static irqreturn_t
-qla25xx_msix_rsp_q(int irq, void *dev_id)
-{
- struct qla_hw_data *ha;
- scsi_qla_host_t *vha;
- struct rsp_que *rsp;
- struct device_reg_24xx __iomem *reg;
- unsigned long flags;
- uint32_t hccr = 0;
-
- rsp = (struct rsp_que *) dev_id;
- if (!rsp) {
- ql_log(ql_log_info, NULL, 0x505b,
- "%s: NULL response queue pointer.\n", __func__);
- return IRQ_NONE;
- }
- ha = rsp->hw;
- vha = pci_get_drvdata(ha->pdev);
-
- /* Clear the interrupt, if enabled, for this response queue */
- if (!ha->flags.disable_msix_handshake) {
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
- if (qla2x00_check_reg32_for_disconnect(vha, hccr))
- goto out;
- queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
-
-out:
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
scsi_qla_host_t *vha;
@@ -2993,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
+ struct device_reg_24xx __iomem *reg;
+ unsigned long flags;
+
+ qpair = dev_id;
+ if (!qpair) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = qpair->hw;
+
+ /* Clear the interrupt, if enabled, for this response queue */
+ if (unlikely(!ha->flags.disable_msix_handshake)) {
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ queue_work(ha->wq, &qpair->q_work);
+
+ return IRQ_HANDLED;
+}
+
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
@@ -3000,69 +3003,28 @@ struct qla_init_msix_entry {
irq_handler_t handler;
};
-static struct qla_init_msix_entry msix_entries[3] = {
+static struct qla_init_msix_entry msix_entries[] = {
{ "qla2xxx (default)", qla24xx_msix_default },
{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+ { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+static struct qla_init_msix_entry qla82xx_msix_entries[] = {
{ "qla2xxx (default)", qla82xx_msix_default },
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
- { "qla2xxx (default)", qla24xx_msix_default },
- { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
-};
-
-static void
-qla24xx_disable_msix(struct qla_hw_data *ha)
-{
- int i;
- struct qla_msix_entry *qentry;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-
- for (i = 0; i < ha->msix_count; i++) {
- qentry = &ha->msix_entries[i];
- if (qentry->have_irq) {
- /* un-register irq cpu affinity notification */
- irq_set_affinity_notifier(qentry->vector, NULL);
- free_irq(qentry->vector, qentry->rsp);
- }
- }
- pci_disable_msix(ha->pdev);
- kfree(ha->msix_entries);
- ha->msix_entries = NULL;
- ha->flags.msix_enabled = 0;
- ql_dbg(ql_dbg_init, vha, 0x0042,
- "Disabled the MSI.\n");
-}
-
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
-#define ATIO_VECTOR 2
int i, ret;
- struct msix_entry *entries;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
- GFP_KERNEL);
- if (!entries) {
- ql_log(ql_log_warn, vha, 0x00bc,
- "Failed to allocate memory for msix_entry.\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < ha->msix_count; i++)
- entries[i].entry = i;
-
- ret = pci_enable_msix_range(ha->pdev,
- entries, MIN_MSIX_COUNT, ha->msix_count);
+ ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, "
@@ -3072,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
} else if (ret < ha->msix_count) {
ql_log(ql_log_warn, vha, 0x00c6,
"MSI-X: Failed to enable support "
- "-- %d/%d\n Retry with %d vectors.\n",
- ha->msix_count, ret, ret);
+ "with %d vectors, using %d vectors.\n",
+ ha->msix_count, ret);
ha->msix_count = ret;
- ha->max_rsp_queues = ha->msix_count - 1;
+ /* Recalculate queue values */
+ if (ha->mqiobase && ql2xmqsupport) {
+ ha->max_req_queues = ha->msix_count - 1;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
+ }
}
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
@@ -3089,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
+ qentry->vector = pci_irq_vector(ha->pdev, i);
+ qentry->entry = i;
qentry->have_irq = 0;
- qentry->rsp = NULL;
+ qentry->in_use = 0;
+ qentry->handle = NULL;
qentry->irq_notify.notify = qla_irq_affinity_notify;
qentry->irq_notify.release = qla_irq_affinity_release;
qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
qentry = &ha->msix_entries[i];
- qentry->rsp = rsp;
+ qentry->handle = rsp;
rsp->msix = qentry;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[i].name);
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3114,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
+ qentry->in_use = 1;
/* Register for CPU affinity notification. */
irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3133,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
* queue.
*/
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- qentry = &ha->msix_entries[ATIO_VECTOR];
- qentry->rsp = rsp;
+ qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
rsp->msix = qentry;
+ qentry->handle = rsp;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[QLA_ATIO_VECTOR].name);
+ qentry->in_use = 1;
ret = request_irq(qentry->vector,
- qla83xx_msix_entries[ATIO_VECTOR].handler,
- 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ msix_entries[QLA_ATIO_VECTOR].handler,
+ 0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
qentry->have_irq = 1;
}
@@ -3147,7 +3129,7 @@ msix_register_fail:
ql_log(ql_log_fatal, vha, 0x00cb,
"MSI-X: unable to register handler -- %x/%d.\n",
qentry->vector, ret);
- qla24xx_disable_msix(ha);
+ qla2x00_free_irqs(vha);
ha->mqenable = 0;
goto msix_out;
}
@@ -3155,11 +3137,13 @@ msix_register_fail:
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
} else
- if (ha->mqiobase
- && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ if (ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
@@ -3169,7 +3153,6 @@ msix_register_fail:
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
msix_out:
- kfree(entries);
return ret;
}
@@ -3222,7 +3205,7 @@ skip_msix:
!IS_QLA27XX(ha))
goto skip_msi;
- ret = pci_enable_msi(ha->pdev);
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
if (!ret) {
ql_dbg(ql_dbg_init, vha, 0x0038,
"MSI: Enabled.\n");
@@ -3267,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct rsp_que *rsp;
+ struct qla_msix_entry *qentry;
+ int i;
/*
* We need to check that ha->rsp_q_map is valid in case we are called
@@ -3276,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
return;
rsp = ha->rsp_q_map[0];
- if (ha->flags.msix_enabled)
- qla24xx_disable_msix(ha);
- else if (ha->flags.msi_enabled) {
- free_irq(ha->pdev->irq, rsp);
- pci_disable_msi(ha->pdev);
- } else
- free_irq(ha->pdev->irq, rsp);
-}
+ if (ha->flags.msix_enabled) {
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
+ if (qentry->have_irq) {
+ irq_set_affinity_notifier(qentry->vector, NULL);
+ free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
+ }
+ }
+ kfree(ha->msix_entries);
+ ha->msix_entries = NULL;
+ ha->flags.msix_enabled = 0;
+ ql_dbg(ql_dbg_init, vha, 0x0042,
+ "Disabled MSI-X.\n");
+ } else {
+ free_irq(pci_irq_vector(ha->pdev, 0), rsp);
+ }
+ pci_free_irq_vectors(ha->pdev);
+}
-int qla25xx_request_irq(struct rsp_que *rsp)
+int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+ struct qla_msix_entry *msix, int vector_type)
{
- struct qla_hw_data *ha = rsp->hw;
- struct qla_init_msix_entry *intr = &msix_entries[2];
- struct qla_msix_entry *msix = rsp->msix;
+ struct qla_init_msix_entry *intr = &msix_entries[vector_type];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
- ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+ scnprintf(msix->name, sizeof(msix->name),
+ "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
+ ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
if (ret) {
ql_log(ql_log_fatal, vha, 0x00e6,
"MSI-X: Unable to register handler -- %x/%d.\n",
@@ -3302,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
return ret;
}
msix->have_irq = 1;
- msix->rsp = rsp;
+ msix->handle = qpair;
return ret;
}
@@ -3315,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
container_of(notify, struct qla_msix_entry, irq_notify);
struct qla_hw_data *ha;
struct scsi_qla_host *base_vha;
+ struct rsp_que *rsp = e->handle;
/* user is recommended to set mask to just 1 cpu */
e->cpuid = cpumask_first(mask);
- ha = e->rsp->hw;
+ ha = rsp->hw;
base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3343,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref)
container_of(ref, struct irq_affinity_notify, kref);
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
- struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
+ struct rsp_que *rsp = e->handle;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: vector %d cpu %d \n", __func__,
+ "%s: host%ld: vector %d cpu %d\n", __func__,
base_vha->host_no, e->vector, e->cpuid);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 23698c998699..2819ceb96041 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,43 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+struct rom_cmd {
+ uint16_t cmd;
+} rom_cmds[] = {
+ { MBC_LOAD_RAM },
+ { MBC_EXECUTE_FIRMWARE },
+ { MBC_READ_RAM_WORD },
+ { MBC_MAILBOX_REGISTER_TEST },
+ { MBC_VERIFY_CHECKSUM },
+ { MBC_GET_FIRMWARE_VERSION },
+ { MBC_LOAD_RISC_RAM },
+ { MBC_DUMP_RISC_RAM },
+ { MBC_LOAD_RISC_RAM_EXTENDED },
+ { MBC_DUMP_RISC_RAM_EXTENDED },
+ { MBC_WRITE_RAM_WORD_EXTENDED },
+ { MBC_READ_RAM_EXTENDED },
+ { MBC_GET_RESOURCE_COUNTS },
+ { MBC_SET_FIRMWARE_OPTION },
+ { MBC_MID_INITIALIZE_FIRMWARE },
+ { MBC_GET_FIRMWARE_STATE },
+ { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+ { MBC_GET_RETRY_COUNT },
+ { MBC_TRACE_CONTROL },
+};
+
+static int is_rom_cmd(uint16_t cmd)
+{
+ int i;
+ struct rom_cmd *wc;
+
+ for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
+ wc = rom_cmds + i;
+ if (wc->cmd == cmd)
+ return 1;
+ }
+
+ return 0;
+}
/*
* qla2x00_mailbox_command
@@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ /* check if ISP abort is active and return cmd with timeout */
+ if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+ !is_rom_cmd(mcp->mb[0])) {
+ ql_log(ql_log_info, vha, 0x1005,
+ "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
+ mcp->mb[0]);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ wait_time = jiffies;
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ if (time_after(jiffies, wait_time + 5 * HZ))
+ ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+ command, jiffies_to_msecs(jiffies - wait_time));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
@@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp)
fc_port_t *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
+ struct req_que *req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+ else
+ req = vha->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->vp_idx && vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
- if (ql2xmaxqueues > 1)
- req = ha->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
lg->handle = MAKE_HANDLE(req->id, lg->handle);
@@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
@@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ struct qla_qpair *qpair;
vha = fcport->vha;
ha = vha->hw;
@@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- rsp = ha->rsp_q_map[tag + 1];
- else
+ if (vha->vp_idx && vha->qpair) {
+ /* NPIV port */
+ qpair = vha->qpair;
+ rsp = qpair->rsp;
+ req = qpair->req;
+ } else {
rsp = req->rsp;
+ }
+
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
ql_log(ql_log_warn, vha, 0x1093,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52bae66..c6d6f0d912ff 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
uint16_t que_id = rsp->id;
if (rsp->msix && rsp->msix->have_irq) {
- free_irq(rsp->msix->vector, rsp);
+ free_irq(rsp->msix->vector, rsp->msix->handle);
rsp->msix->have_irq = 0;
- rsp->msix->rsp = NULL;
+ rsp->msix->in_use = 0;
+ rsp->msix->handle = NULL;
}
dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
sizeof(response_t), rsp->ring, rsp->dma);
@@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
return ret;
}
-static int
+int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
int ret = -1;
@@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair, *tqpair;
- /* Delete request queues */
- for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
- req = ha->req_q_map[cnt];
- if (req && test_bit(cnt, ha->req_qid_map)) {
- ret = qla25xx_delete_req_que(vha, req);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00ea,
- "Couldn't delete req que %d.\n",
- req->id);
- return ret;
+ if (ql2xmqsupport) {
+ list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
+ qp_list_elem)
+ qla2xxx_delete_qpair(vha, qpair);
+ } else {
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+ if (req && test_bit(cnt, ha->req_qid_map)) {
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00ea,
+ "Couldn't delete req que %d.\n",
+ req->id);
+ return ret;
+ }
}
}
- }
- /* Delete response queues */
- for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
- rsp = ha->rsp_q_map[cnt];
- if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
- ret = qla25xx_delete_rsp_que(vha, rsp);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00eb,
- "Couldn't delete rsp que %d.\n",
- rsp->id);
- return ret;
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00eb,
+ "Couldn't delete rsp que %d.\n",
+ rsp->id);
+ return ret;
+ }
}
}
}
+
return ret;
}
@@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS)
goto que_failed;
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00db,
"No resources to create additional request queue.\n");
goto que_failed;
@@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
req->out_ptr = (void *)(req->ring + req->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
@@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00df,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->req_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
@@ -741,20 +750,20 @@ failed:
static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
- struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+ struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
struct scsi_qla_host *vha;
- struct qla_hw_data *ha = rsp->hw;
+ struct qla_hw_data *ha = qpair->hw;
- spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ spin_lock_irqsave(&qpair->qp_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- qla24xx_process_response_queue(vha, rsp);
- spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
+ qla24xx_process_response_queue(vha, qpair->rsp);
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
}
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, int req)
+ uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
{
int ret = 0;
struct rsp_que *rsp = NULL;
@@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00e2,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
- if (ha->flags.msix_enabled)
- rsp->msix = &ha->msix_entries[que_id + 1];
- else
- ql_log(ql_log_warn, base_vha, 0x00e3,
- "MSIX not enabled.\n");
+ rsp->msix = qpair->msix;
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
- "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+ "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
@@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (!IS_MSIX_NACK_CAPABLE(ha))
options |= BIT_6;
+ /* Set option to indicate response queue creation */
+ options |= BIT_1;
+
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
- ret = qla25xx_request_irq(rsp);
+ ret = qla25xx_request_irq(ha, qpair, qpair->msix,
+ QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
if (ret)
goto que_failed;
@@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00e7,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->rsp_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
- if (req >= 0)
- rsp->req = ha->req_q_map[req];
- else
- rsp->req = NULL;
+ rsp->req = NULL;
qla2x00_init_response_q_entries(rsp);
- if (rsp->hw->wq)
- INIT_WORK(&rsp->q_work, qla_do_work);
+ if (qpair->hw->wq)
+ INIT_WORK(&qpair->q_work, qla_do_work);
return rsp->id;
que_failed:
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 15dff7099955..02f1de18bc2b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_tcq.h>
#include <linux/utsname.h>
@@ -2206,7 +2207,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "IOSB_IOCB";
srb_t *sp;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
struct srb_iocb *iocb_job;
int res;
struct qla_mt_iocb_rsp_fx00 fstatus;
@@ -2226,6 +2228,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt->dataword_r;
} else {
bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
@@ -2257,8 +2260,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->fcport->vha, 0x5074,
(uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
- res = bsg_job->reply->result = DID_OK << 16;
- bsg_job->reply->reply_payload_rcv_len =
+ res = bsg_reply->result = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
}
sp->done(vha, sp, res);
@@ -3252,7 +3255,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
- struct fc_bsg_job *bsg_job;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_request *bsg_request;
struct fxdisc_entry_fx00 fx_iocb;
uint8_t entry_cnt = 1;
@@ -3301,8 +3305,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
} else {
struct scatterlist *sg;
bsg_job = sp->u.bsg_job;
+ bsg_request = bsg_job->request;
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
- &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
fx_iocb.func_num = piocb_rqst->func_type;
fx_iocb.adapid = piocb_rqst->adapid;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 56d6142852a5..8521cfe302e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
/*
* SRB allocation cache
*/
-static struct kmem_cache *srb_cachep;
+struct kmem_cache *srb_cachep;
/*
* CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
"Enables iIDMA settings "
"Default is 1 - perform iIDMA. 0 - no iIDMA.");
-int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmaxqueues,
- "Enables MQ settings "
- "Default is 1 for single queue. Set it to number "
- "of queues in MQ mode.");
-
-int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmultique_tag,
- "Enables CPU affinity settings for the driver "
- "Default is 0 for no affinity of request and response IO. "
- "Set it to 1 to turn on the cpu affinity.");
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+ "Enable on demand multiple queue pairs support "
+ "Default is 1 for supported. "
+ "Set it to 0 to turn off mq qpair support.");
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
+ .map_queues = qla2xxx_map_queues,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
+
+ if (ql2xmqsupport && ha->max_qpairs) {
+ ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+ GFP_KERNEL);
+ if (!ha->queue_pair_map) {
+ ql_log(ql_log_fatal, vha, 0x0180,
+ "Unable to allocate memory for queue pair ptrs.\n");
+ goto fail_qpair_map;
+ }
+ ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (ha->base_qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate base queue pair memory.\n");
+ goto fail_base_qpair;
+ }
+ ha->base_qpair->req = req;
+ ha->base_qpair->rsp = rsp;
+ }
+
/*
* Make sure we record at least the request and response queue zero in
* case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
set_bit(0, ha->req_qid_map);
return 1;
+fail_base_qpair:
+ kfree(ha->queue_pair_map);
+fail_qpair_map:
+ kfree(ha->rsp_q_map);
+ ha->rsp_q_map = NULL;
fail_rsp_map:
kfree(ha->req_q_map);
ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
struct req_que *req;
struct rsp_que *rsp;
int cnt;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
req = ha->req_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->req_q_map[cnt] = NULL;
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_req_que(ha, req);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
kfree(ha->req_q_map);
ha->req_q_map = NULL;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
if (!test_bit(cnt, ha->rsp_qid_map))
continue;
rsp = ha->rsp_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->rsp_q_map[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
- kfree(ha->rsp_q_map);
- ha->rsp_q_map = NULL;
-}
-
-static int qla25xx_setup_mode(struct scsi_qla_host *vha)
-{
- uint16_t options = 0;
- int ques, req, ret;
- struct qla_hw_data *ha = vha->hw;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (!(ha->fw_attributes & BIT_6)) {
- ql_log(ql_log_warn, vha, 0x00d8,
- "Firmware is not multi-queue capable.\n");
- goto fail;
- }
- if (ql2xmultique_tag) {
- /* create a request queue for IO */
- options |= BIT_7;
- req = qla25xx_create_req_que(ha, options, 0, 0, -1,
- QLA_DEFAULT_QUE_QOS);
- if (!req) {
- ql_log(ql_log_warn, vha, 0x00e0,
- "Failed to create request queue.\n");
- goto fail;
- }
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
- vha->req = ha->req_q_map[req];
- options |= BIT_1;
- for (ques = 1; ques < ha->max_rsp_queues; ques++) {
- ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
- if (!ret) {
- ql_log(ql_log_warn, vha, 0x00e8,
- "Failed to create response queue.\n");
- goto fail2;
- }
- }
- ha->flags.cpu_affinity_enabled = 1;
- ql_dbg(ql_dbg_multiq, vha, 0xc007,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- ql_dbg(ql_dbg_init, vha, 0x00e9,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- }
- return 0;
-fail2:
- qla25xx_delete_queues(vha);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- vha->req = ha->req_q_map[0];
-fail:
- ha->mqenable = 0;
- kfree(ha->req_q_map);
kfree(ha->rsp_q_map);
- ha->max_req_queues = ha->max_rsp_queues = 1;
- return 1;
+ ha->rsp_q_map = NULL;
}
static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
qla2x00_rel_sp(sp->fcport->vha, sp);
}
-static void
+void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
cmd->scsi_done(cmd);
}
+void
+qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
+
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp, NULL);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ }
+
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ WARN_ON(atomic_read(&sp->ref_count) == 0);
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+ cmd->scsi_done(cmd);
+}
+
/* If we are SP1 here, we need to still take and release the host_lock as SP1
* does not have the changes necessary to avoid taking host->host_lock.
*/
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
+ struct qla_qpair *qpair = NULL;
+ uint32_t tag;
+ uint16_t hwq;
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
+ if (ha->mqenable) {
+ if (shost_use_blk_mq(vha->host)) {
+ tag = blk_mq_unique_tag(cmd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ qpair = ha->queue_pair_map[hwq];
+ } else if (vha->vp_idx && vha->qpair) {
+ qpair = vha->qpair;
+ }
+
+ if (qpair)
+ return qla2xxx_mqueuecommand(host, cmd, qpair);
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qc24_fail_command:
return 0;
}
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ cmd->result = rval;
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+ "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+ cmd, rval);
+ goto qc24_fail_command;
+ }
+
+ if (!fcport) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ ql_dbg(ql_dbg_io, vha, 0x3077,
+ "Returning DNC, fcport_state=%d loop_state=%d.\n",
+ atomic_read(&fcport->state),
+ atomic_read(&base_vha->loop_state));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+ goto qc24_target_busy;
+ }
+
+ /*
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+
+ sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto qc24_host_busy;
+
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2xxx_qpair_sp_free_dma;
+ sp->done = qla2xxx_qpair_sp_compl;
+ sp->qpair = qpair;
+
+ rval = ha->isp_ops->start_scsi_mq(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+ "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ if (rval == QLA_INTERFACE_ERROR)
+ goto qc24_fail_command;
+ goto qc24_host_busy_free_sp;
+ }
+
+ return 0;
+
+qc24_host_busy_free_sp:
+ qla2xxx_qpair_sp_free_dma(vha, sp);
+
+qc24_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+ cmd->scsi_done(cmd);
+
+ return 0;
+}
+
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
{
resource_size_t pio;
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
- if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
- (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
- (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ skip_pio:
"MQIO Base=%p.\n", ha->mqiobase);
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
+ ha->msix_count = msix + 1;
/* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- }
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+ /* Queue pairs is the max value minus the base queue pair */
+ ha->max_qpairs = ha->max_rsp_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
ql_log_pci(ql_log_info, ha->pdev, 0x001a,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x001b,
"BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
qla83xx_iospace_config(struct qla_hw_data *ha)
{
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
- /* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
+ ha->msix_count = msix + 1;
+ /*
+ * By default, driver uses at least two msix vectors
+ * (default & rspq)
+ */
+ if (ql2xmqsupport) {
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ /* Queue pairs is the max value minus
+ * the base queue pair */
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x011e,
"BAR 1 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+ if (QLA_TGT_MODE_ENABLED())
+ ha->msix_count++;
qlt_83xx_iospace_config(ha);
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
.write_optrom = qla82xx_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
.write_optrom = qla8044_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla8044_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qlafx00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qlafx00_abort_isp,
.iospace_config = qlafx00_iospace_config,
.initialize_adapter = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ int i;
+
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2xxx_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Found an ISP%04X irq %d iobase 0x%p.\n",
pdev->device, pdev->irq, ha->iobase);
mutex_init(&ha->vport_lock);
+ mutex_init(&ha->mq_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
-que_init:
+ /* Set up the irqs */
+ ret = qla2x00_request_irqs(ha, rsp);
+ if (ret)
+ goto probe_init_failed;
+
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ que_init:
goto probe_init_failed;
}
- qlt_probe_one_stage1(base_vha, ha);
+ if (ha->mqenable && shost_use_blk_mq(host)) {
+ /* number of hardware queues supported by blk/scsi-mq*/
+ host->nr_hw_queues = ha->max_qpairs;
- /* Set up the irqs */
- ret = qla2x00_request_irqs(ha, rsp);
- if (ret)
- goto probe_init_failed;
+ ql_dbg(ql_dbg_init, base_vha, 0x0192,
+ "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+ } else
+ ql_dbg(ql_dbg_init, base_vha, 0x0193,
+ "blk/scsi-mq disabled.\n");
+
+ qlt_probe_one_stage1(base_vha, ha);
pci_save_state(pdev);
@@ -2842,11 +3013,12 @@ que_init:
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- if (ha->mqenable) {
- if (qla25xx_setup_mode(base_vha)) {
- ql_log(ql_log_warn, base_vha, 0x00ec,
- "Failed to create queues, falling back to single queue mode.\n");
- goto que_init;
+ if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+ /* Create start of day qpairs for Block MQ */
+ if (shost_use_blk_mq(host)) {
+ for (i = 0; i < ha->max_qpairs; i++)
+ qla2xxx_create_qpair(base_vha, 5, 0);
}
}
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
static void
qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
{
- /* Flush the work queue and remove it */
- if (ha->wq) {
- flush_workqueue(ha->wq);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- }
-
/* Cancel all work and destroy DPC workqueues */
if (ha->dpc_lp_wq) {
cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
}
+ qla2x00_free_fcports(vha);
+
qla2x00_free_irqs(vha);
- qla2x00_free_fcports(vha);
+ /* Flush the work queue and remove it */
+ if (ha->wq) {
+ flush_workqueue(ha->wq);
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
+ }
+
qla2x00_mem_free(ha);
@@ -4034,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
+ INIT_LIST_HEAD(&vha->qp_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
base_vha->flags.init_done = 0;
qla25xx_delete_queues(base_vha);
- qla2x00_free_irqs(base_vha);
qla2x00_free_fcports(base_vha);
+ qla2x00_free_irqs(base_vha);
qla2x00_mem_free(ha);
qla82xx_md_free(base_vha);
qla2x00_free_queues(ha);
@@ -5073,6 +5247,8 @@ qla2x00_do_dpc(void *data)
{
scsi_qla_host_t *base_vha;
struct qla_hw_data *ha;
+ uint32_t online;
+ struct qla_qpair *qpair;
ha = (struct qla_hw_data *)data;
base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5510,22 @@ intr_on_check:
ha->isp_ops->beacon_blink(base_vha);
}
+ /* qpair online check */
+ if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+ &base_vha->dpc_flags)) {
+ if (ha->flags.eeh_busy ||
+ ha->flags.pci_channel_io_perm_failure)
+ online = 0;
+ else
+ online = 1;
+
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list,
+ qp_list_elem)
+ qpair->online = online;
+ mutex_unlock(&ha->mq_lock);
+ }
+
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -5676,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
ha->flags.eeh_busy = 0;
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
ha->flags.eeh_busy = 1;
@@ -5689,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
/* Return back all IOs */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
qla83xx_wr_reg(vha, reg, data);
}
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+ return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+}
+
static const struct pci_error_handlers qla2xxx_err_handler = {
.error_detected = qla2xxx_pci_error_detected,
.mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc270bd08..aeebefb1e9f8 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -409,18 +409,9 @@ struct qla4_8xxx_legacy_intr_set {
/* MSI-X Support */
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
-
+#define QLA_MSIX_DEFAULT 0
+#define QLA_MSIX_RSP_Q 1
#define QLA_MSIX_ENTRIES 2
-#define QLA_MIDX_DEFAULT 0
-#define QLA_MIDX_RSP_Q 1
-
-struct ql4_msix_entry {
- int have_irq;
- uint16_t msix_vector;
- uint16_t msix_entry;
-};
/*
* ISP Operations
@@ -572,9 +563,6 @@ struct scsi_qla_host {
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
#define AF_HA_REMOVAL 12 /* 0x00001000 */
-#define AF_INTx_ENABLED 15 /* 0x00008000 */
-#define AF_MSI_ENABLED 16 /* 0x00010000 */
-#define AF_MSIX_ENABLED 17 /* 0x00020000 */
#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
@@ -762,8 +750,6 @@ struct scsi_qla_host {
struct isp_operations *isp_ops;
struct ql82xx_hw_data hw;
- struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
-
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
void *fw_dump;
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 2559144f5475..bce96a58f14e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -134,7 +134,6 @@ int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
-void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4f9c0f2be89d..d2cd33d8d67f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1107,7 +1107,7 @@ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
if (is_qla8022(ha)) {
writel(0, &ha->qla4_82xx_reg->host_int);
- if (test_bit(AF_INTx_ENABLED, &ha->flags))
+ if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -1564,19 +1564,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
try_msi:
/* Trying MSI */
- ret = pci_enable_msi(ha->pdev);
- if (!ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret > 0) {
ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
0, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
- set_bit(AF_MSI_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"MSI: Failed to reserve interrupt %d "
"already in use.\n", ha->pdev->irq);
- pci_disable_msi(ha->pdev);
+ pci_free_irq_vectors(ha->pdev);
}
}
@@ -1592,7 +1591,6 @@ try_intx:
IRQF_SHARED, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
- set_bit(AF_INTx_ENABLED, &ha->flags);
goto irq_attached;
} else {
@@ -1614,14 +1612,11 @@ irq_not_attached:
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
- if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
- if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
- qla4_8xxx_disable_msix(ha);
- } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- pci_disable_msi(ha->pdev);
- } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- }
- }
+ if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
+ return;
+
+ if (ha->pdev->msix_enabled)
+ free_irq(pci_irq_vector(ha->pdev, 1), ha);
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+ pci_free_irq_vectors(ha->pdev);
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c291fdff1b33..1da04f323d38 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2032,10 +2032,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
ptid = (uint16_t *)&fw_ddb_entry->isid[1];
*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
- DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
- fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
- fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
- fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 06ddd13cb7cc..e91abb327745 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3945,7 +3945,7 @@ void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
ha->isp_ops->interrupt_service_routine(ha, intr_status);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
- test_bit(AF_INTx_ENABLED, &ha->flags))
+ (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
@@ -4094,12 +4094,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
ha->phy_port_num = sys_info->port_num;
ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
- DEBUG2(printk("scsi%ld: %s: "
- "mac %02x:%02x:%02x:%02x:%02x:%02x "
- "serial %s\n", ha->host_no, __func__,
- ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
- ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
- ha->serial_number));
+ DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
+ ha->host_no, __func__, ha->my_mac, ha->serial_number));
status = QLA_SUCCESS;
@@ -4178,78 +4174,37 @@ qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
spin_unlock_irq(&ha->hardware_lock);
}
-struct ql4_init_msix_entry {
- uint16_t entry;
- uint16_t index;
- const char *name;
- irq_handler_t handler;
-};
-
-static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
- { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
- "qla4xxx (default)",
- (irq_handler_t)qla4_8xxx_default_intr_handler },
- { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
- "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
-};
-
-void
-qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
-{
- int i;
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- if (qentry->have_irq) {
- free_irq(qentry->msix_vector, ha);
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
- }
- }
- pci_disable_msix(ha->pdev);
- clear_bit(AF_MSIX_ENABLED, &ha->flags);
-}
-
int
qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
{
- int i, ret;
- struct msix_entry entries[QLA_MSIX_ENTRIES];
- struct ql4_msix_entry *qentry;
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++)
- entries[i].entry = qla4_8xxx_msix_entries[i].entry;
+ int ret;
- ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
- if (ret) {
+ ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
+ QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+ if (ret < 0) {
ql4_printk(KERN_WARNING, ha,
"MSI-X: Failed to enable support -- %d/%d\n",
QLA_MSIX_ENTRIES, ret);
- goto msix_out;
- }
- set_bit(AF_MSIX_ENABLED, &ha->flags);
-
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
- qentry->msix_vector = entries[i].vector;
- qentry->msix_entry = entries[i].entry;
- qentry->have_irq = 0;
- ret = request_irq(qentry->msix_vector,
- qla4_8xxx_msix_entries[i].handler, 0,
- qla4_8xxx_msix_entries[i].name, ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- qla4_8xxx_msix_entries[i].index, ret);
- qla4_8xxx_disable_msix(ha);
- goto msix_out;
- }
- qentry->have_irq = 1;
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
- __func__, qla4_8xxx_msix_entries[i].name));
+ return ret;
}
-msix_out:
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 0),
+ qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
+ ha);
+ if (ret)
+ goto out_free_vectors;
+
+ ret = request_irq(pci_irq_vector(ha->pdev, 1),
+ qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
+ if (ret)
+ goto out_free_default_irq;
+
+ return 0;
+
+out_free_default_irq:
+ free_irq(pci_irq_vector(ha->pdev, 0), ha);
+out_free_vectors:
+ pci_free_irq_vectors(ha->pdev);
return ret;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610a60cf..9fbb33fc90c7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6304,13 +6304,9 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
* ISID would not match firmware generated ISID.
*/
if (is_isid_compare) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
- "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
- __func__, old_tddb->isid[5], old_tddb->isid[4],
- old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
- old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
- new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
- new_tddb->isid[0]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: old ISID [%pmR] New ISID [%pmR]\n",
+ __func__, old_tddb->isid, new_tddb->isid));
if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
sizeof(old_tddb->isid)))
@@ -7925,10 +7921,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
break;
case ISCSI_FLASHNODE_ISID:
- rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
- fnode_sess->isid[0], fnode_sess->isid[1],
- fnode_sess->isid[2], fnode_sess->isid[3],
- fnode_sess->isid[4], fnode_sess->isid[5]);
+ rc = sprintf(buf, "%pm\n", fnode_sess->isid);
break;
case ISCSI_FLASHNODE_TSID:
rc = sprintf(buf, "%u\n", fnode_sess->tsid);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1deb6adc411f..75455d4dab68 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -621,6 +621,9 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
wmb();
}
+ if (sdev->request_queue)
+ blk_set_queue_depth(sdev->request_queue, depth);
+
return sdev->queue_depth;
}
EXPORT_SYMBOL(scsi_change_queue_depth);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 246456925335..28fea83ae2fe 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,8 +220,6 @@ static struct {
{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
- {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
- {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106a6adbd6f1..996e134d79fa 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1988,7 +1988,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
req->retries = 5;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2cca9cffc63f..c35b6de4ca64 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -86,10 +86,8 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
- struct request_queue *q = cmd->request->q;
- blk_mq_requeue_request(cmd->request);
- blk_mq_kick_requeue_list(q);
+ blk_mq_requeue_request(cmd->request, true);
put_device(&sdev->sdev_gendev);
}
@@ -163,26 +161,11 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
__scsi_queue_insert(cmd, reason, 1);
}
-/**
- * scsi_execute - insert request and wait for the result
- * @sdev: scsi device
- * @cmd: scsi command
- * @data_direction: data direction
- * @buffer: data buffer
- * @bufflen: len of buffer
- * @sense: optional sense buffer
- * @timeout: request timeout in seconds
- * @retries: number of times to retry request
- * @flags: or into request flags;
- * @resid: optional residual length
- *
- * returns the req->errors value which is the scsi_cmnd result
- * field.
- */
-int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+
+static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, u64 flags,
- int *resid)
+ req_flags_t rq_flags, int *resid)
{
struct request *req;
int write = (data_direction == DMA_TO_DEVICE);
@@ -203,7 +186,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req->sense_len = 0;
req->retries = retries;
req->timeout = timeout;
- req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
+ req->cmd_flags |= flags;
+ req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
/*
* head injection *required* here otherwise quiesce won't work
@@ -227,12 +211,37 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
return ret;
}
+
+/**
+ * scsi_execute - insert request and wait for the result
+ * @sdev: scsi device
+ * @cmd: scsi command
+ * @data_direction: data direction
+ * @buffer: data buffer
+ * @bufflen: len of buffer
+ * @sense: optional sense buffer
+ * @timeout: request timeout in seconds
+ * @retries: number of times to retry request
+ * @flags: or into request flags;
+ * @resid: optional residual length
+ *
+ * returns the req->errors value which is the scsi_cmnd result
+ * field.
+ */
+int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ unsigned char *sense, int timeout, int retries, u64 flags,
+ int *resid)
+{
+ return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,
+ timeout, retries, flags, 0, resid);
+}
EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries,
- int *resid, u64 flags)
+ int *resid, u64 flags, req_flags_t rq_flags)
{
char *sense = NULL;
int result;
@@ -242,8 +251,8 @@ int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
if (!sense)
return DRIVER_ERROR << 24;
}
- result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
- sense, timeout, retries, flags, resid);
+ result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
+ sense, timeout, retries, flags, rq_flags, resid);
if (sshdr)
scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
@@ -813,7 +822,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
;
- else if (!(req->cmd_flags & REQ_QUIET))
+ else if (!(req->rq_flags & RQF_QUIET))
scsi_print_sense(cmd);
result = 0;
/* BLOCK_PC may have set error */
@@ -943,7 +952,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
- if (!(req->cmd_flags & REQ_QUIET)) {
+ if (!(req->rq_flags & RQF_QUIET)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -972,7 +981,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* A new command will be prepared and issued.
*/
if (q->mq_ops) {
- cmd->request->cmd_flags &= ~REQ_DONTPREP;
+ cmd->request->rq_flags &= ~RQF_DONTPREP;
scsi_mq_uninit_cmd(cmd);
scsi_mq_requeue_cmd(cmd);
} else {
@@ -998,8 +1007,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
/*
* If sg table allocation fails, requeue request later.
*/
- if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments,
- sdb->table.sgl)))
+ if (unlikely(sg_alloc_table_chained(&sdb->table,
+ blk_rq_nr_phys_segments(req), sdb->table.sgl)))
return BLKPREP_DEFER;
/*
@@ -1031,7 +1040,7 @@ int scsi_init_io(struct scsi_cmnd *cmd)
bool is_mq = (rq->mq_ctx != NULL);
int error;
- BUG_ON(!rq->nr_phys_segments);
+ BUG_ON(!blk_rq_nr_phys_segments(rq));
error = scsi_init_sgtable(rq, &cmd->sdb);
if (error)
@@ -1234,7 +1243,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
/*
* If the devices is blocked we defer normal commands.
*/
- if (!(req->cmd_flags & REQ_PREEMPT))
+ if (!(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
@@ -1243,7 +1252,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* special commands. In particular any user initiated
* command is not allowed.
*/
- if (!(req->cmd_flags & REQ_PREEMPT))
+ if (!(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_KILL;
break;
}
@@ -1279,7 +1288,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
break;
default:
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
}
return ret;
@@ -1736,7 +1745,7 @@ static void scsi_request_fn(struct request_queue *q)
* we add the dev to the starved list so it eventually gets
* a run when a tag is freed.
*/
- if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
+ if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
spin_lock_irq(shost->host_lock);
if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry,
@@ -1801,7 +1810,7 @@ static inline int prep_to_mq(int ret)
{
switch (ret) {
case BLKPREP_OK:
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
case BLKPREP_DEFER:
return BLK_MQ_RQ_QUEUE_BUSY;
default:
@@ -1888,7 +1897,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
int reason;
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
ret = BLK_MQ_RQ_QUEUE_BUSY;
@@ -1903,11 +1912,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_dec_target_busy;
- if (!(req->cmd_flags & REQ_DONTPREP)) {
+ if (!(req->rq_flags & RQF_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_dec_host_busy;
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
} else {
blk_mq_start_request(req);
}
@@ -1941,7 +1950,6 @@ out_put_device:
out:
switch (ret) {
case BLK_MQ_RQ_QUEUE_BUSY:
- blk_mq_stop_hw_queue(hctx);
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
@@ -1952,7 +1960,7 @@ out:
* we hit an error, as we will never see this command
* again.
*/
- if (req->cmd_flags & REQ_DONTPREP)
+ if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
break;
default:
@@ -1990,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq,
kfree(cmd->sense_buffer);
}
+static int scsi_map_queues(struct blk_mq_tag_set *set)
+{
+ struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
+
+ if (shost->hostt->map_queues)
+ return shost->hostt->map_queues(shost);
+ return blk_mq_map_queues(set);
+}
+
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
@@ -2082,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = {
.timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
+ .map_queues = scsi_map_queues,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -2724,6 +2742,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
+ */
+static int scsi_request_fn_active(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+ int request_fn_active;
+
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active = q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+
+ return request_fn_active;
+}
+
+/**
+ * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
+ * @sdev: SCSI device pointer.
+ *
+ * Wait until the ongoing shost->hostt->queuecommand() calls that are
+ * invoked from scsi_request_fn() have finished.
+ */
+static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
+{
+ WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+ while (scsi_request_fn_active(sdev))
+ msleep(20);
+}
+
+/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
*
@@ -2807,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* @sdev: device to block
*
* Block request made by scsi lld's to temporarily stop all
- * scsi commands on the specified device. Called from interrupt
- * or normal process context.
+ * scsi commands on the specified device. May sleep.
*
* Returns zero if successful or error if not
*
@@ -2817,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
+ *
+ * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+ * scsi_internal_device_block() has blocked a SCSI device and also
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
scsi_internal_device_block(struct scsi_device *sdev)
@@ -2844,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
+ scsi_wait_for_queuecommand(sdev);
}
return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 07349270535d..82dfe07b1d47 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- error = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (error)
- return error;
-
error = scsi_target_add(starget);
if (error)
return error;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 0f3a3869524b..03577bde6ac5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/bsg-lib.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -2592,7 +2593,7 @@ fc_rport_final_delete(struct work_struct *work)
/**
- * fc_rport_create - allocates and creates a remote FC port.
+ * fc_remote_port_create - allocates and creates a remote FC port.
* @shost: scsi host the remote port is connected to.
* @channel: Channel on shost port connected to.
* @ids: The world wide names, fc address, and FC4 port
@@ -2605,8 +2606,8 @@ fc_rport_final_delete(struct work_struct *work)
* This routine assumes no locks are held on entry.
*/
static struct fc_rport *
-fc_rport_create(struct Scsi_Host *shost, int channel,
- struct fc_rport_identifiers *ids)
+fc_remote_port_create(struct Scsi_Host *shost, int channel,
+ struct fc_rport_identifiers *ids)
{
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_internal *fci = to_fc_internal(shost->transportt);
@@ -2914,7 +2915,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
/* No consistent binding found - create new remote port entry */
- rport = fc_rport_create(shost, channel, ids);
+ rport = fc_remote_port_create(shost, channel, ids);
return rport;
}
@@ -3554,81 +3555,6 @@ fc_vport_sched_delete(struct work_struct *work)
* BSG support
*/
-
-/**
- * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
- * @job: fc_bsg_job that is to be torn down
- */
-static void
-fc_destroy_bsgjob(struct fc_bsg_job *job)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->ref_cnt) {
- spin_unlock_irqrestore(&job->job_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- put_device(job->dev); /* release reference for the request */
-
- kfree(job->request_payload.sg_list);
- kfree(job->reply_payload.sg_list);
- kfree(job);
-}
-
-/**
- * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
- * completed
- * @job: fc_bsg_job that is complete
- */
-static void
-fc_bsg_jobdone(struct fc_bsg_job *job)
-{
- struct request *req = job->req;
- struct request *rsp = req->next_rq;
- int err;
-
- err = job->req->errors = job->reply->result;
-
- if (err < 0)
- /* we're only returning the result field in the reply */
- job->req->sense_len = sizeof(uint32_t);
- else
- job->req->sense_len = job->reply_len;
-
- /* we assume all request payload was transferred, residual == 0 */
- req->resid_len = 0;
-
- if (rsp) {
- WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
-
- /* set reply (bidi) residual */
- rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
- rsp->resid_len);
- }
- blk_complete_request(req);
-}
-
-/**
- * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
- * @rq: BSG request that holds the job to be destroyed
- */
-static void fc_bsg_softirq_done(struct request *rq)
-{
- struct fc_bsg_job *job = rq->special;
- unsigned long flags;
-
- spin_lock_irqsave(&job->job_lock, flags);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
-
- blk_end_request_all(rq, rq->errors);
- fc_destroy_bsgjob(job);
-}
-
/**
* fc_bsg_job_timeout - handler for when a bsg request timesout
* @req: request that timed out
@@ -3636,27 +3562,22 @@ static void fc_bsg_softirq_done(struct request *rq)
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
- struct fc_bsg_job *job = (void *) req->special;
- struct Scsi_Host *shost = job->shost;
+ struct bsg_job *job = (void *) req->special;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_internal *i = to_fc_internal(shost->transportt);
- unsigned long flags;
- int err = 0, done = 0;
+ int err = 0, inflight = 0;
- if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+ if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
return BLK_EH_RESET_TIMER;
- spin_lock_irqsave(&job->job_lock, flags);
- if (job->state_flags & FC_RQST_STATE_DONE)
- done = 1;
- else
- job->ref_cnt++;
- spin_unlock_irqrestore(&job->job_lock, flags);
+ inflight = bsg_job_get(job);
- if (!done && i->f->bsg_timeout) {
+ if (inflight && i->f->bsg_timeout) {
/* call LLDD to abort the i/o as it has timed out */
err = i->f->bsg_timeout(job);
if (err == -EAGAIN) {
- job->ref_cnt--;
+ bsg_job_put(job);
return BLK_EH_RESET_TIMER;
} else if (err)
printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
@@ -3664,126 +3585,33 @@ fc_bsg_job_timeout(struct request *req)
}
/* the blk_end_sync_io() doesn't check the error */
- if (done)
+ if (!inflight)
return BLK_EH_NOT_HANDLED;
else
return BLK_EH_HANDLED;
}
-static int
-fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
-{
- size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
-
- BUG_ON(!req->nr_phys_segments);
-
- buf->sg_list = kzalloc(sz, GFP_KERNEL);
- if (!buf->sg_list)
- return -ENOMEM;
- sg_init_table(buf->sg_list, req->nr_phys_segments);
- buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
- buf->payload_len = blk_rq_bytes(req);
- return 0;
-}
-
-
-/**
- * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
- * bsg request
- * @shost: SCSI Host corresponding to the bsg object
- * @rport: (optional) FC Remote Port corresponding to the bsg object
- * @req: BSG request that needs a job structure
- */
-static int
-fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
- struct request *req)
-{
- struct fc_internal *i = to_fc_internal(shost->transportt);
- struct request *rsp = req->next_rq;
- struct fc_bsg_job *job;
- int ret;
-
- BUG_ON(req->special);
-
- job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
- GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- /*
- * Note: this is a bit silly.
- * The request gets formatted as a SGIO v4 ioctl request, which
- * then gets reformatted as a blk request, which then gets
- * reformatted as a fc bsg request. And on completion, we have
- * to wrap return results such that SGIO v4 thinks it was a scsi
- * status. I hope this was all worth it.
- */
-
- req->special = job;
- job->shost = shost;
- job->rport = rport;
- job->req = req;
- if (i->f->dd_bsg_size)
- job->dd_data = (void *)&job[1];
- spin_lock_init(&job->job_lock);
- job->request = (struct fc_bsg_request *)req->cmd;
- job->request_len = req->cmd_len;
- job->reply = req->sense;
- job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
- * allocated */
- if (req->bio) {
- ret = fc_bsg_map_buffer(&job->request_payload, req);
- if (ret)
- goto failjob_rls_job;
- }
- if (rsp && rsp->bio) {
- ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
- if (ret)
- goto failjob_rls_rqst_payload;
- }
- job->job_done = fc_bsg_jobdone;
- if (rport)
- job->dev = &rport->dev;
- else
- job->dev = &shost->shost_gendev;
- get_device(job->dev); /* take a reference for the request */
-
- job->ref_cnt = 1;
-
- return 0;
-
-
-failjob_rls_rqst_payload:
- kfree(job->request_payload.sg_list);
-failjob_rls_job:
- kfree(job);
- return -ENOMEM;
-}
-
-
-enum fc_dispatch_result {
- FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
- FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
- FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
-};
-
-
/**
* fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
- * @q: fc host request queue
* @shost: scsi host rport attached to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_bsg_job *job)
+static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
/* Validate the host command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_HST_ADD_RPORT:
cmdlen += sizeof(struct fc_bsg_host_add_rport);
break;
@@ -3815,7 +3643,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
case FC_BSG_HST_VENDOR:
cmdlen += sizeof(struct fc_bsg_host_vendor);
if ((shost->hostt->vendor_id == 0L) ||
- (job->request->rqst_data.h_vendor.vendor_id !=
+ (bsg_request->rqst_data.h_vendor.vendor_id !=
shost->hostt->vendor_id)) {
ret = -ESRCH;
goto fail_host_msg;
@@ -3827,24 +3655,19 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
goto fail_host_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_host_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_host_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
@@ -3855,34 +3678,38 @@ fail_host_msg:
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
- if (!rport->rqst_q)
+ struct request_queue *q = rport->rqst_q;
+ unsigned long flags;
+
+ if (!q)
return;
- /*
- * This get/put dance makes no sense
- */
- get_device(&rport->dev);
- blk_run_queue_async(rport->rqst_q);
- put_device(&rport->dev);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
- * @q: rport request queue
* @shost: scsi host rport attached to
- * @rport: rport request destined to
* @job: bsg job to be processed
*/
-static enum fc_dispatch_result
-fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct fc_bsg_job *job)
+static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_rport_msg;
+ }
+
/* Validate the rport command */
- switch (job->request->msgcode) {
+ switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
cmdlen += sizeof(struct fc_bsg_rport_els);
goto check_bidi;
@@ -3902,133 +3729,31 @@ check_bidi:
goto fail_rport_msg;
}
- /* check if we really have all the request data needed */
- if (job->request_len < cmdlen) {
- ret = -ENOMSG;
- goto fail_rport_msg;
- }
-
ret = i->f->bsg_request(job);
if (!ret)
- return FC_DISPATCH_UNLOCKED;
+ return 0;
fail_rport_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = ret;
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_reply->result = ret;
job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- return FC_DISPATCH_UNLOCKED;
-}
-
-
-/**
- * fc_bsg_request_handler - generic handler for bsg requests
- * @q: request queue to manage
- * @shost: Scsi_Host related to the bsg object
- * @rport: FC remote port related to the bsg object (optional)
- * @dev: device structure for bsg object
- */
-static void
-fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
- struct fc_rport *rport, struct device *dev)
-{
- struct request *req;
- struct fc_bsg_job *job;
- enum fc_dispatch_result ret;
-
- if (!get_device(dev))
- return;
-
- while (1) {
- if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
- !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
- break;
-
- req = blk_fetch_request(q);
- if (!req)
- break;
-
- if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
- req->errors = -ENXIO;
- spin_unlock_irq(q->queue_lock);
- blk_end_request_all(req, -ENXIO);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- spin_unlock_irq(q->queue_lock);
-
- ret = fc_req_to_bsgjob(shost, rport, req);
- if (ret) {
- req->errors = ret;
- blk_end_request_all(req, ret);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- job = req->special;
-
- /* check if we have the msgcode value at least */
- if (job->request_len < sizeof(uint32_t)) {
- BUG_ON(job->reply_len < sizeof(uint32_t));
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = -ENOMSG;
- job->reply_len = sizeof(uint32_t);
- fc_bsg_jobdone(job);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- /* the dispatch routines will unlock the queue_lock */
- if (rport)
- ret = fc_bsg_rport_dispatch(q, shost, rport, job);
- else
- ret = fc_bsg_host_dispatch(q, shost, job);
-
- /* did dispatcher hit state that can't process any more */
- if (ret == FC_DISPATCH_BREAK)
- break;
-
- /* did dispatcher had released the lock */
- if (ret == FC_DISPATCH_UNLOCKED)
- spin_lock_irq(q->queue_lock);
- }
-
- spin_unlock_irq(q->queue_lock);
- put_device(dev);
- spin_lock_irq(q->queue_lock);
-}
-
-
-/**
- * fc_bsg_host_handler - handler for bsg requests for a fc host
- * @q: fc host request queue
- */
-static void
-fc_bsg_host_handler(struct request_queue *q)
-{
- struct Scsi_Host *shost = q->queuedata;
-
- fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return 0;
}
-
-/**
- * fc_bsg_rport_handler - handler for bsg requests for a fc rport
- * @q: rport request queue
- */
-static void
-fc_bsg_rport_handler(struct request_queue *q)
+static int fc_bsg_dispatch(struct bsg_job *job)
{
- struct fc_rport *rport = q->queuedata;
- struct Scsi_Host *shost = rport_to_shost(rport);
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
- fc_bsg_request_handler(q, shost, rport, &rport->dev);
+ if (scsi_is_fc_rport(job->dev))
+ return fc_bsg_rport_dispatch(shost, job);
+ else
+ return fc_bsg_host_dispatch(shost, job);
}
-
/**
* fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
* @shost: shost for fc_host
@@ -4051,33 +3776,42 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - no request queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - no request queue\n",
+ shost->host_no);
return -ENOMEM;
}
- q->queuedata = shost;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, bsg_name, NULL);
+ err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
+ i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "fc_host%d: bsg interface failed to "
- "initialize - register queue\n",
- shost->host_no);
+ dev_err(dev,
+ "fc_host%d: bsg interface failed to initialize - setup queue\n",
+ shost->host_no);
blk_cleanup_queue(q);
return err;
}
-
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
return 0;
}
+static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
+{
+ struct fc_rport *rport = dev_to_rport(q->queuedata);
+
+ if (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+ return BLKPREP_DEFER;
+
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ return BLKPREP_KILL;
+
+ return BLKPREP_OK;
+}
/**
* fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
@@ -4097,29 +3831,22 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
if (!q) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - no request queue\n",
- dev->kobj.name);
+ dev_err(dev, "bsg interface failed to initialize - no request queue\n");
return -ENOMEM;
}
- q->queuedata = rport;
- queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, fc_bsg_softirq_done);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
- blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
-
- err = bsg_register_queue(q, dev, NULL, NULL);
+ err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
if (err) {
- printk(KERN_ERR "%s: bsg interface failed to "
- "initialize - register queue\n",
- dev->kobj.name);
+ dev_err(dev, "failed to setup bsg queue\n");
blk_cleanup_queue(q);
return err;
}
+ blk_queue_prep_rq(q, fc_bsg_rport_prep);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
rport->rqst_q = q;
return 0;
}
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ece4412..b87a78673f65 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -115,21 +114,12 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
-#define SRP_PID(p) \
- (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
- (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
- (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
- (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
-
-#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
-
static ssize_t
show_srp_rport_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_rport *rport = transport_class_to_srp_rport(dev);
- return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
+ return sprintf(buf, "%16phC\n", rport->port_id);
}
static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
@@ -402,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work)
}
}
-/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- *
- * To do: add support for scsi-mq in this function.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
- struct scsi_device *sdev;
- struct request_queue *q;
- int request_fn_active = 0;
-
- shost_for_each_device(sdev, shost) {
- q = sdev->request_queue;
-
- spin_lock_irq(q->queue_lock);
- request_fn_active += q->request_fn_active;
- spin_unlock_irq(q->queue_lock);
- }
-
- return request_fn_active;
-}
-
-/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
-static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
-{
- while (scsi_request_fn_active(shost))
- msleep(20);
-}
-
static void __rport_fail_io_fast(struct srp_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
@@ -441,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
return;
+ /*
+ * Call scsi_target_block() to wait for ongoing shost->queuecommand()
+ * calls before invoking i->f->terminate_rport_io().
+ */
+ scsi_target_block(rport->dev.parent);
scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
/* Involve the LLD if possible to terminate all I/O on the rport. */
i = to_srp_internal(shost->transportt);
- if (i->f->terminate_rport_io) {
- srp_wait_for_queuecommand(shost);
+ if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
- }
}
/**
@@ -576,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
if (res)
goto out;
scsi_target_block(&shost->shost_gendev);
- srp_wait_for_queuecommand(shost);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 51e56296f465..1622e23138e0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -93,6 +93,7 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
#define SD_MINORS 16
@@ -163,7 +164,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
static const char temp[] = "temporary ";
int len;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
/* no cache control on RBC devices; theoretically they
* can do it, but there's probably so many exceptions
* it's not worth the risk */
@@ -262,7 +263,7 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
sdp->allow_restart = simple_strtoul(buf, NULL, 10);
@@ -392,6 +393,11 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ if (sd_is_zoned(sdkp)) {
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ return count;
+ }
+
if (sdp->type != TYPE_DISK)
return -EINVAL;
@@ -459,7 +465,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
err = kstrtoul(buf, 10, &max);
@@ -710,7 +716,6 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t sector = blk_rq_pos(rq);
unsigned int nr_sectors = blk_rq_sectors(rq);
- unsigned int nr_bytes = blk_rq_bytes(rq);
unsigned int len;
int ret;
char *buf;
@@ -766,24 +771,19 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
goto out;
}
- rq->completion_data = page;
rq->timeout = SD_TIMEOUT;
cmd->transfersize = len;
cmd->allowed = SD_MAX_RETRIES;
- /*
- * Initially __data_len is set to the amount of data that needs to be
- * transferred to the target. This amount depends on whether WRITE SAME
- * or UNMAP is being used. After the scatterlist has been mapped by
- * scsi_init_io() we set __data_len to the size of the area to be
- * discarded on disk. This allows us to report completion on the full
- * amount of blocks described by the request.
- */
- blk_add_request_payload(rq, page, 0, len);
- ret = scsi_init_io(cmd);
- rq->__data_len = nr_bytes;
+ rq->special_vec.bv_page = page;
+ rq->special_vec.bv_offset = 0;
+ rq->special_vec.bv_len = len;
+
+ rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+ rq->resid_len = len;
+ ret = scsi_init_io(cmd);
out:
if (ret != BLKPREP_OK)
__free_page(page);
@@ -844,6 +844,12 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
+ if (sd_is_zoned(sdkp)) {
+ ret = sd_zbc_setup_write_cmnd(cmd);
+ if (ret != BLKPREP_OK)
+ return ret;
+ }
+
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
@@ -901,19 +907,25 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
struct request *rq = SCpnt->request;
struct scsi_device *sdp = SCpnt->device;
struct gendisk *disk = rq->rq_disk;
- struct scsi_disk *sdkp;
+ struct scsi_disk *sdkp = scsi_disk(disk);
sector_t block = blk_rq_pos(rq);
sector_t threshold;
unsigned int this_count = blk_rq_sectors(rq);
unsigned int dif, dix;
+ bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE;
int ret;
unsigned char protect;
+ if (zoned_write) {
+ ret = sd_zbc_setup_write_cmnd(SCpnt);
+ if (ret != BLKPREP_OK)
+ return ret;
+ }
+
ret = scsi_init_io(SCpnt);
if (ret != BLKPREP_OK)
goto out;
SCpnt = rq->special;
- sdkp = scsi_disk(disk);
/* from here on until we're complete, any goto out
* is used for a killable error condition */
@@ -1013,8 +1025,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
} else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
- req_op(rq), (unsigned long long) rq->cmd_flags);
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
goto out;
}
@@ -1132,6 +1143,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
*/
ret = BLKPREP_OK;
out:
+ if (zoned_write && ret != BLKPREP_OK)
+ sd_zbc_cancel_write_cmnd(SCpnt);
+
return ret;
}
@@ -1149,6 +1163,10 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_READ:
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
+ case REQ_OP_ZONE_REPORT:
+ return sd_zbc_setup_report_cmnd(cmd);
+ case REQ_OP_ZONE_RESET:
+ return sd_zbc_setup_reset_cmnd(cmd);
default:
BUG();
}
@@ -1158,8 +1176,8 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
- if (req_op(rq) == REQ_OP_DISCARD)
- __free_page(rq->completion_data);
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ __free_page(rq->special_vec.bv_page);
if (SCpnt->cmnd != rq->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool);
@@ -1495,7 +1513,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
*/
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
&sshdr, timeout, SD_MAX_RETRIES,
- NULL, REQ_PM);
+ NULL, 0, RQF_PM);
if (res == 0)
break;
}
@@ -1780,7 +1798,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned char op = SCpnt->cmnd[0];
unsigned char unmap = SCpnt->cmnd[1] & 8;
- if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_ZONE_RESET:
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
@@ -1788,6 +1809,17 @@ static int sd_done(struct scsi_cmnd *SCpnt)
good_bytes = 0;
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
+ break;
+ case REQ_OP_ZONE_REPORT:
+ if (!result) {
+ good_bytes = scsi_bufflen(SCpnt)
+ - scsi_get_resid(SCpnt);
+ scsi_set_resid(SCpnt, 0);
+ } else {
+ good_bytes = 0;
+ scsi_set_resid(SCpnt, blk_rq_bytes(req));
+ }
+ break;
}
if (result) {
@@ -1840,7 +1872,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
good_bytes = 0;
req->__data_len = blk_rq_bytes(req);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
}
}
}
@@ -1848,7 +1880,11 @@ static int sd_done(struct scsi_cmnd *SCpnt)
default:
break;
}
+
out:
+ if (sd_is_zoned(sdkp))
+ sd_zbc_complete(SCpnt, good_bytes, &sshdr);
+
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
"sd_done: completed %d of %d bytes\n",
good_bytes, scsi_bufflen(SCpnt)));
@@ -1983,7 +2019,6 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
}
-
/*
* Determine whether disk supports Data Integrity Field.
*/
@@ -2133,6 +2168,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
/* Logical blocks per physical block exponent */
sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
+ /* RC basis */
+ sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
+
/* Lowest aligned logical block */
alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
blk_queue_alignment_offset(sdp->request_queue, alignment);
@@ -2242,7 +2280,6 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
{
int sector_size;
struct scsi_device *sdp = sdkp->device;
- sector_t old_capacity = sdkp->capacity;
if (sd_try_rc16_first(sdp)) {
sector_size = read_capacity_16(sdkp, sdp, buffer);
@@ -2323,35 +2360,44 @@ got_data:
sector_size = 512;
}
blk_queue_logical_block_size(sdp->request_queue, sector_size);
+ blk_queue_physical_block_size(sdp->request_queue,
+ sdkp->physical_block_size);
+ sdkp->device->sector_size = sector_size;
- {
- char cap_str_2[10], cap_str_10[10];
+ if (sdkp->capacity > 0xffffffff)
+ sdp->use_16_for_rw = 1;
- string_get_size(sdkp->capacity, sector_size,
- STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(sdkp->capacity, sector_size,
- STRING_UNITS_10, cap_str_10,
- sizeof(cap_str_10));
+}
- if (sdkp->first_scan || old_capacity != sdkp->capacity) {
- sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte logical blocks: (%s/%s)\n",
- (unsigned long long)sdkp->capacity,
- sector_size, cap_str_10, cap_str_2);
+/*
+ * Print disk capacity
+ */
+static void
+sd_print_capacity(struct scsi_disk *sdkp,
+ sector_t old_capacity)
+{
+ int sector_size = sdkp->device->sector_size;
+ char cap_str_2[10], cap_str_10[10];
- if (sdkp->physical_block_size != sector_size)
- sd_printk(KERN_NOTICE, sdkp,
- "%u-byte physical blocks\n",
- sdkp->physical_block_size);
- }
- }
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_10, cap_str_10,
+ sizeof(cap_str_10));
- if (sdkp->capacity > 0xffffffff)
- sdp->use_16_for_rw = 1;
+ if (sdkp->first_scan || old_capacity != sdkp->capacity) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "%llu %d-byte logical blocks: (%s/%s)\n",
+ (unsigned long long)sdkp->capacity,
+ sector_size, cap_str_10, cap_str_2);
- blk_queue_physical_block_size(sdp->request_queue,
- sdkp->physical_block_size);
- sdkp->device->sector_size = sector_size;
+ if (sdkp->physical_block_size != sector_size)
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u-byte physical blocks\n",
+ sdkp->physical_block_size);
+
+ sd_zbc_print_zones(sdkp);
+ }
}
/* called with buffer of length 512 */
@@ -2419,9 +2465,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdkp->first_scan || old_wp != sdkp->write_prot) {
sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
sdkp->write_prot ? "on" : "off");
- sd_printk(KERN_DEBUG, sdkp,
- "Mode Sense: %02x %02x %02x %02x\n",
- buffer[0], buffer[1], buffer[2], buffer[3]);
+ sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
}
}
}
@@ -2613,7 +2657,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
- if (sdp->type != TYPE_DISK)
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return;
if (sdkp->protection_type == 0)
@@ -2720,6 +2764,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
*/
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
+ struct request_queue *q = sdkp->disk->queue;
unsigned char *buffer;
u16 rot;
const int vpd_len = 64;
@@ -2734,10 +2779,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
rot = get_unaligned_be16(&buffer[4]);
if (rot == 1) {
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
}
+ sdkp->zoned = (buffer[8] >> 4) & 3;
+ if (sdkp->zoned == 1)
+ q->limits.zoned = BLK_ZONED_HA;
+ else if (sdkp->device->type == TYPE_ZBC)
+ q->limits.zoned = BLK_ZONED_HM;
+ else
+ q->limits.zoned = BLK_ZONED_NONE;
+ if (blk_queue_is_zoned(q) && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
+ q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
+
out:
kfree(buffer);
}
@@ -2809,6 +2865,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct request_queue *q = sdkp->disk->queue;
+ sector_t old_capacity = sdkp->capacity;
unsigned char *buffer;
unsigned int dev_max, rw_max;
@@ -2842,8 +2899,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
+ sd_zbc_read_zones(sdkp, buffer);
}
+ sd_print_capacity(sdkp, old_capacity);
+
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
@@ -3041,9 +3101,16 @@ static int sd_probe(struct device *dev)
scsi_autopm_get_device(sdp);
error = -ENODEV;
- if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
+ if (sdp->type != TYPE_DISK &&
+ sdp->type != TYPE_ZBC &&
+ sdp->type != TYPE_MOD &&
+ sdp->type != TYPE_RBC)
goto out;
+#ifndef CONFIG_BLK_DEV_ZONED
+ if (sdp->type == TYPE_ZBC)
+ goto out;
+#endif
SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
"sd_probe\n"));
@@ -3147,6 +3214,8 @@ static int sd_remove(struct device *dev)
del_gendisk(sdkp->disk);
sd_shutdown(dev);
+ sd_zbc_remove(sdkp);
+
blk_register_region(devt, SD_MINORS, NULL,
sd_default_probe, NULL, NULL);
@@ -3200,7 +3269,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
return -ENODEV;
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (driver_byte(res) & DRIVER_SENSE)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c8d986368da9..4dac35e96a75 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -64,6 +64,15 @@ struct scsi_disk {
struct scsi_device *device;
struct device dev;
struct gendisk *disk;
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int nr_zones;
+ unsigned int zone_blocks;
+ unsigned int zone_shift;
+ unsigned long *zones_wlock;
+ unsigned int zones_optimal_open;
+ unsigned int zones_optimal_nonseq;
+ unsigned int zones_max_open;
+#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
u32 max_xfer_blocks;
@@ -94,6 +103,9 @@ struct scsi_disk {
unsigned lbpvpd : 1;
unsigned ws10 : 1;
unsigned ws16 : 1;
+ unsigned rc_basis: 2;
+ unsigned zoned: 2;
+ unsigned urswrz : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
@@ -156,6 +168,11 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b
return blocks * sdev->sector_size;
}
+static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector)
+{
+ return sector >> (ilog2(sdev->sector_size) - 9);
+}
+
/*
* Look up the DIX operation based on whether the command is read or
* write and whether dix and dif are enabled.
@@ -239,4 +256,57 @@ static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int sd_is_zoned(struct scsi_disk *sdkp)
+{
+ return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC;
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+
+extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
+extern void sd_zbc_remove(struct scsi_disk *sdkp);
+extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
+extern int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd);
+extern void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd);
+extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
+extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
+extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr);
+
+#else /* CONFIG_BLK_DEV_ZONED */
+
+static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+ return 0;
+}
+
+static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
+
+static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
+
+static inline int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+{
+ /* Let the drive fail requests */
+ return BLKPREP_OK;
+}
+
+static inline void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) {}
+
+static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+{
+ return BLKPREP_INVALID;
+}
+
+static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+{
+ return BLKPREP_INVALID;
+}
+
+static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
+ unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr) {}
+
+#endif /* CONFIG_BLK_DEV_ZONED */
+
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
new file mode 100644
index 000000000000..92620c8ea8ad
--- /dev/null
+++ b/drivers/scsi/sd_zbc.c
@@ -0,0 +1,648 @@
+/*
+ * SCSI Zoned Block commands
+ *
+ * Copyright (C) 2014-2015 SUSE Linux GmbH
+ * Written by: Hannes Reinecke <hare@suse.de>
+ * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
+ * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+
+#include "sd.h"
+#include "scsi_priv.h"
+
+enum zbc_zone_type {
+ ZBC_ZONE_TYPE_CONV = 0x1,
+ ZBC_ZONE_TYPE_SEQWRITE_REQ,
+ ZBC_ZONE_TYPE_SEQWRITE_PREF,
+ ZBC_ZONE_TYPE_RESERVED,
+};
+
+enum zbc_zone_cond {
+ ZBC_ZONE_COND_NO_WP,
+ ZBC_ZONE_COND_EMPTY,
+ ZBC_ZONE_COND_IMP_OPEN,
+ ZBC_ZONE_COND_EXP_OPEN,
+ ZBC_ZONE_COND_CLOSED,
+ ZBC_ZONE_COND_READONLY = 0xd,
+ ZBC_ZONE_COND_FULL,
+ ZBC_ZONE_COND_OFFLINE,
+};
+
+/**
+ * Convert a zone descriptor to a zone struct.
+ */
+static void sd_zbc_parse_report(struct scsi_disk *sdkp,
+ u8 *buf,
+ struct blk_zone *zone)
+{
+ struct scsi_device *sdp = sdkp->device;
+
+ memset(zone, 0, sizeof(struct blk_zone));
+
+ zone->type = buf[0] & 0x0f;
+ zone->cond = (buf[1] >> 4) & 0xf;
+ if (buf[1] & 0x01)
+ zone->reset = 1;
+ if (buf[1] & 0x02)
+ zone->non_seq = 1;
+
+ zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
+ zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
+ if (zone->type != ZBC_ZONE_TYPE_CONV &&
+ zone->cond == ZBC_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+}
+
+/**
+ * Issue a REPORT ZONES scsi command.
+ */
+static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ unsigned int buflen, sector_t lba)
+{
+ struct scsi_device *sdp = sdkp->device;
+ const int timeout = sdp->request_queue->rq_timeout;
+ struct scsi_sense_hdr sshdr;
+ unsigned char cmd[16];
+ unsigned int rep_len;
+ int result;
+
+ memset(cmd, 0, 16);
+ cmd[0] = ZBC_IN;
+ cmd[1] = ZI_REPORT_ZONES;
+ put_unaligned_be64(lba, &cmd[2]);
+ put_unaligned_be32(buflen, &cmd[10]);
+ memset(buf, 0, buflen);
+
+ result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
+ buf, buflen, &sshdr,
+ timeout, SD_MAX_RETRIES, NULL);
+ if (result) {
+ sd_printk(KERN_ERR, sdkp,
+ "REPORT ZONES lba %llu failed with %d/%d\n",
+ (unsigned long long)lba,
+ host_byte(result), driver_byte(result));
+ return -EIO;
+ }
+
+ rep_len = get_unaligned_be32(&buf[0]);
+ if (rep_len < 64) {
+ sd_printk(KERN_ERR, sdkp,
+ "REPORT ZONES report invalid length %u\n",
+ rep_len);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t lba, sector = blk_rq_pos(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
+ int ret;
+
+ WARN_ON(nr_bytes == 0);
+
+ if (!sd_is_zoned(sdkp))
+ /* Not a zoned device */
+ return BLKPREP_KILL;
+
+ ret = scsi_init_io(cmd);
+ if (ret != BLKPREP_OK)
+ return ret;
+
+ cmd->cmd_len = 16;
+ memset(cmd->cmnd, 0, cmd->cmd_len);
+ cmd->cmnd[0] = ZBC_IN;
+ cmd->cmnd[1] = ZI_REPORT_ZONES;
+ lba = sectors_to_logical(sdkp->device, sector);
+ put_unaligned_be64(lba, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
+ /* Do partial report for speeding things up */
+ cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
+
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+ cmd->sdb.length = nr_bytes;
+ cmd->transfersize = sdkp->device->sector_size;
+ cmd->allowed = 0;
+
+ /*
+ * Report may return less bytes than requested. Make sure
+ * to report completion on the entire initial request.
+ */
+ rq->__data_len = nr_bytes;
+
+ return BLKPREP_OK;
+}
+
+static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
+ unsigned int good_bytes)
+{
+ struct request *rq = scmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct sg_mapping_iter miter;
+ struct blk_zone_report_hdr hdr;
+ struct blk_zone zone;
+ unsigned int offset, bytes = 0;
+ unsigned long flags;
+ u8 *buf;
+
+ if (good_bytes < 64)
+ return;
+
+ memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
+
+ sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+
+ local_irq_save(flags);
+ while (sg_miter_next(&miter) && bytes < good_bytes) {
+
+ buf = miter.addr;
+ offset = 0;
+
+ if (bytes == 0) {
+ /* Set the report header */
+ hdr.nr_zones = min_t(unsigned int,
+ (good_bytes - 64) / 64,
+ get_unaligned_be32(&buf[0]) / 64);
+ memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
+ offset += 64;
+ bytes += 64;
+ }
+
+ /* Parse zone descriptors */
+ while (offset < miter.length && hdr.nr_zones) {
+ WARN_ON(offset > miter.length);
+ buf = miter.addr + offset;
+ sd_zbc_parse_report(sdkp, buf, &zone);
+ memcpy(buf, &zone, sizeof(struct blk_zone));
+ offset += 64;
+ bytes += 64;
+ hdr.nr_zones--;
+ }
+
+ if (!hdr.nr_zones)
+ break;
+
+ }
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+}
+
+static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
+{
+ return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+}
+
+static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
+ sector_t sector)
+{
+ return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
+}
+
+int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t block = sectors_to_logical(sdkp->device, sector);
+ unsigned int zno = block >> sdkp->zone_shift;
+
+ if (!sd_is_zoned(sdkp))
+ /* Not a zoned device */
+ return BLKPREP_KILL;
+
+ if (sdkp->device->changed)
+ return BLKPREP_KILL;
+
+ if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
+ /* Unaligned request */
+ return BLKPREP_KILL;
+
+ /* Do not allow concurrent reset and writes */
+ if (sdkp->zones_wlock &&
+ test_and_set_bit(zno, sdkp->zones_wlock))
+ return BLKPREP_DEFER;
+
+ cmd->cmd_len = 16;
+ memset(cmd->cmnd, 0, cmd->cmd_len);
+ cmd->cmnd[0] = ZBC_OUT;
+ cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
+ put_unaligned_be64(block, &cmd->cmnd[2]);
+
+ rq->timeout = SD_TIMEOUT;
+ cmd->sc_data_direction = DMA_NONE;
+ cmd->transfersize = 0;
+ cmd->allowed = 0;
+
+ return BLKPREP_OK;
+}
+
+int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t zone_sectors = sd_zbc_zone_sectors(sdkp);
+ unsigned int zno = sd_zbc_zone_no(sdkp, sector);
+
+ /*
+ * Note: Checks of the alignment of the write command on
+ * logical blocks is done in sd.c
+ */
+
+ /* Do not allow zone boundaries crossing on host-managed drives */
+ if (blk_queue_zoned_model(sdkp->disk->queue) == BLK_ZONED_HM &&
+ (sector & (zone_sectors - 1)) + blk_rq_sectors(rq) > zone_sectors)
+ return BLKPREP_KILL;
+
+ /*
+ * Do not issue more than one write at a time per
+ * zone. This solves write ordering problems due to
+ * the unlocking of the request queue in the dispatch
+ * path in the non scsi-mq case. For scsi-mq, this
+ * also avoids potential write reordering when multiple
+ * threads running on different CPUs write to the same
+ * zone (with a synchronized sequential pattern).
+ */
+ if (sdkp->zones_wlock &&
+ test_and_set_bit(zno, sdkp->zones_wlock))
+ return BLKPREP_DEFER;
+
+ return BLKPREP_OK;
+}
+
+static void sd_zbc_unlock_zone(struct request *rq)
+{
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+
+ if (sdkp->zones_wlock) {
+ unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
+ WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
+ clear_bit_unlock(zno, sdkp->zones_wlock);
+ smp_mb__after_atomic();
+ }
+}
+
+void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd)
+{
+ sd_zbc_unlock_zone(cmd->request);
+}
+
+void sd_zbc_complete(struct scsi_cmnd *cmd,
+ unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr)
+{
+ int result = cmd->result;
+ struct request *rq = cmd->request;
+
+ switch (req_op(rq)) {
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_ZONE_RESET:
+
+ /* Unlock the zone */
+ sd_zbc_unlock_zone(rq);
+
+ if (!result ||
+ sshdr->sense_key != ILLEGAL_REQUEST)
+ break;
+
+ switch (sshdr->asc) {
+ case 0x24:
+ /*
+ * INVALID FIELD IN CDB error: For a zone reset,
+ * this means that a reset of a conventional
+ * zone was attempted. Nothing to worry about in
+ * this case, so be quiet about the error.
+ */
+ if (req_op(rq) == REQ_OP_ZONE_RESET)
+ rq->rq_flags |= RQF_QUIET;
+ break;
+ case 0x21:
+ /*
+ * INVALID ADDRESS FOR WRITE error: It is unlikely that
+ * retrying write requests failed with any kind of
+ * alignement error will result in success. So don't.
+ */
+ cmd->allowed = 0;
+ break;
+ }
+
+ break;
+
+ case REQ_OP_ZONE_REPORT:
+
+ if (!result)
+ sd_zbc_report_zones_complete(cmd, good_bytes);
+ break;
+
+ }
+}
+
+/**
+ * Read zoned block device characteristics (VPD page B6).
+ */
+static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+
+ if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "Unconstrained-read check failed\n");
+ return -ENODEV;
+ }
+
+ if (sdkp->device->type != TYPE_ZBC) {
+ /* Host-aware */
+ sdkp->urswrz = 1;
+ sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]);
+ sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]);
+ sdkp->zones_max_open = 0;
+ } else {
+ /* Host-managed */
+ sdkp->urswrz = buf[4] & 1;
+ sdkp->zones_optimal_open = 0;
+ sdkp->zones_optimal_nonseq = 0;
+ sdkp->zones_max_open = get_unaligned_be64(&buf[16]);
+ }
+
+ return 0;
+}
+
+/**
+ * Check reported capacity.
+ */
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+ sector_t lba;
+ int ret;
+
+ if (sdkp->rc_basis != 0)
+ return 0;
+
+ /* Do a report zone to get the maximum LBA to check capacity */
+ ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
+ if (ret)
+ return ret;
+
+ /* The max_lba field is the capacity of this device */
+ lba = get_unaligned_be64(&buf[8]);
+ if (lba + 1 == sdkp->capacity)
+ return 0;
+
+ if (sdkp->first_scan)
+ sd_printk(KERN_WARNING, sdkp,
+ "Changing capacity from %llu to max LBA+1 %llu\n",
+ (unsigned long long)sdkp->capacity,
+ (unsigned long long)lba + 1);
+ sdkp->capacity = lba + 1;
+
+ return 0;
+}
+
+#define SD_ZBC_BUF_SIZE 131072
+
+static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+{
+ u64 zone_blocks;
+ sector_t block = 0;
+ unsigned char *buf;
+ unsigned char *rec;
+ unsigned int buf_len;
+ unsigned int list_length;
+ int ret;
+ u8 same;
+
+ sdkp->zone_blocks = 0;
+
+ /* Get a buffer */
+ buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do a report zone to get the same field */
+ ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
+ if (ret) {
+ zone_blocks = 0;
+ goto out;
+ }
+
+ same = buf[4] & 0x0f;
+ if (same > 0) {
+ rec = &buf[64];
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ goto out;
+ }
+
+ /*
+ * Check the size of all zones: all zones must be of
+ * equal size, except the last zone which can be smaller
+ * than other zones.
+ */
+ do {
+
+ /* Parse REPORT ZONES header */
+ list_length = get_unaligned_be32(&buf[0]) + 64;
+ rec = buf + 64;
+ if (list_length < SD_ZBC_BUF_SIZE)
+ buf_len = list_length;
+ else
+ buf_len = SD_ZBC_BUF_SIZE;
+
+ /* Parse zone descriptors */
+ while (rec < buf + buf_len) {
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ if (sdkp->zone_blocks == 0) {
+ sdkp->zone_blocks = zone_blocks;
+ } else if (zone_blocks != sdkp->zone_blocks &&
+ (block + zone_blocks < sdkp->capacity
+ || zone_blocks > sdkp->zone_blocks)) {
+ zone_blocks = 0;
+ goto out;
+ }
+ block += zone_blocks;
+ rec += 64;
+ }
+
+ if (block < sdkp->capacity) {
+ ret = sd_zbc_report_zones(sdkp, buf,
+ SD_ZBC_BUF_SIZE, block);
+ if (ret)
+ return ret;
+ }
+
+ } while (block < sdkp->capacity);
+
+ zone_blocks = sdkp->zone_blocks;
+
+out:
+ kfree(buf);
+
+ if (!zone_blocks) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Devices with non constant zone "
+ "size are not supported\n");
+ return -ENODEV;
+ }
+
+ if (!is_power_of_2(zone_blocks)) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Devices with non power of 2 zone "
+ "size are not supported\n");
+ return -ENODEV;
+ }
+
+ if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Zone size too large\n");
+ return -ENODEV;
+ }
+
+ sdkp->zone_blocks = zone_blocks;
+
+ return 0;
+}
+
+static int sd_zbc_setup(struct scsi_disk *sdkp)
+{
+
+ /* chunk_sectors indicates the zone size */
+ blk_queue_chunk_sectors(sdkp->disk->queue,
+ logical_to_sectors(sdkp->device, sdkp->zone_blocks));
+ sdkp->zone_shift = ilog2(sdkp->zone_blocks);
+ sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift;
+ if (sdkp->capacity & (sdkp->zone_blocks - 1))
+ sdkp->nr_zones++;
+
+ if (!sdkp->zones_wlock) {
+ sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!sdkp->zones_wlock)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int sd_zbc_read_zones(struct scsi_disk *sdkp,
+ unsigned char *buf)
+{
+ sector_t capacity;
+ int ret = 0;
+
+ if (!sd_is_zoned(sdkp))
+ /*
+ * Device managed or normal SCSI disk,
+ * no special handling required
+ */
+ return 0;
+
+
+ /* Get zoned block device characteristics */
+ ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
+ if (ret)
+ goto err;
+
+ /*
+ * Check for unconstrained reads: host-managed devices with
+ * constrained reads (drives failing read after write pointer)
+ * are not supported.
+ */
+ if (!sdkp->urswrz) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "constrained reads devices are not supported\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Check capacity */
+ ret = sd_zbc_check_capacity(sdkp, buf);
+ if (ret)
+ goto err;
+ capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
+
+ /*
+ * Check zone size: only devices with a constant zone size (except
+ * an eventual last runt zone) that is a power of 2 are supported.
+ */
+ ret = sd_zbc_check_zone_size(sdkp);
+ if (ret)
+ goto err;
+
+ /* The drive satisfies the kernel restrictions: set it up */
+ ret = sd_zbc_setup(sdkp);
+ if (ret)
+ goto err;
+
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ return 0;
+
+err:
+ sdkp->capacity = 0;
+
+ return ret;
+}
+
+void sd_zbc_remove(struct scsi_disk *sdkp)
+{
+ kfree(sdkp->zones_wlock);
+ sdkp->zones_wlock = NULL;
+}
+
+void sd_zbc_print_zones(struct scsi_disk *sdkp)
+{
+ if (!sd_is_zoned(sdkp) || !sdkp->capacity)
+ return;
+
+ if (sdkp->capacity & (sdkp->zone_blocks - 1))
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks + 1 runt zone\n",
+ sdkp->nr_zones - 1,
+ sdkp->zone_blocks);
+ else
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u zones of %u logical blocks\n",
+ sdkp->nr_zones,
+ sdkp->zone_blocks);
+}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 070332eb41f3..dbe5b4b95df0 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -581,6 +581,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07b6444d3e0a..b673825f46b5 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -929,8 +929,6 @@ struct pqi_ctrl_info {
int max_msix_vectors;
int num_msix_vectors_enabled;
int num_msix_vectors_initialized;
- u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
- void *intr_data[PQI_MAX_MSIX_VECTORS];
int event_irq;
struct Scsi_Host *scsi_host;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a535b2661f38..8702d9cf8040 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -25,6 +25,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/cciss_ioctl.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -2887,19 +2888,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
{
+ struct pci_dev *pdev = ctrl_info->pci_dev;
int i;
int rc;
- ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+ ctrl_info->event_irq = pci_irq_vector(pdev, 0);
for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
- rc = request_irq(ctrl_info->msix_vectors[i],
- pqi_irq_handler, 0,
- DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+ rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
+ DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
+ dev_err(&pdev->dev,
"irq %u init failed with error %d\n",
- ctrl_info->msix_vectors[i], rc);
+ pci_irq_vector(pdev, i), rc);
return rc;
}
ctrl_info->num_msix_vectors_initialized++;
@@ -2908,72 +2909,23 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- free_irq(ctrl_info->msix_vectors[i],
- ctrl_info->intr_data[i]);
-}
-
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int i;
- int max_vectors;
- int num_vectors_enabled;
- struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
-
- max_vectors = ctrl_info->num_queue_groups;
-
- for (i = 0; i < max_vectors; i++)
- msix_entries[i].entry = i;
-
- num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
- msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
+ int ret;
- if (num_vectors_enabled < 0) {
+ ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
+ PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (ret < 0) {
dev_err(&ctrl_info->pci_dev->dev,
- "MSI-X init failed with error %d\n",
- num_vectors_enabled);
- return num_vectors_enabled;
- }
-
- ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
- for (i = 0; i < num_vectors_enabled; i++) {
- ctrl_info->msix_vectors[i] = msix_entries[i].vector;
- ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
+ "MSI-X init failed with error %d\n", ret);
+ return ret;
}
+ ctrl_info->num_msix_vectors_enabled = ret;
return 0;
}
-static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
- int rc;
- int cpu;
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
- rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
- get_cpu_mask(cpu));
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "error %d setting affinity hint for irq vector %u\n",
- rc, ctrl_info->msix_vectors[i]);
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-}
-
-static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
- irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
-}
-
static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4743,6 +4695,13 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return 0;
}
+static int pqi_map_queues(struct Scsi_Host *shost)
+{
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
void __user *arg)
{
@@ -5130,6 +5089,7 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
};
@@ -5159,7 +5119,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->cmd_per_lun = shost->can_queue;
shost->sg_tablesize = ctrl_info->sg_tablesize;
shost->transportt = pqi_sas_transport_template;
- shost->irq = ctrl_info->msix_vectors[0];
+ shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
shost->unique_id = shost->irq;
shost->nr_hw_queues = ctrl_info->num_queue_groups;
shost->hostdata[0] = (unsigned long)ctrl_info;
@@ -5409,8 +5369,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- pqi_irq_set_affinity_hint(ctrl_info);
-
rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
@@ -5557,10 +5515,14 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- pqi_irq_unset_affinity_hint(ctrl_info);
- pqi_free_irqs(ctrl_info);
- if (ctrl_info->num_msix_vectors_enabled)
- pci_disable_msix(ctrl_info->pci_dev);
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+ free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
+ &ctrl_info->queue_groups[i]);
+ }
+
+ pci_free_irq_vectors(ctrl_info->pci_dev);
}
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 618422ea3a41..605887d5ee57 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -546,7 +546,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
return DRIVER_ERROR << 24;
blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ccfc9ea874b..05526b71541b 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1495,9 +1495,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
if (sg_count) {
if (sg_count > MAX_PAGE_BUFFER_COUNT) {
- payload_sz = (sg_count * sizeof(void *) +
+ payload_sz = (sg_count * sizeof(u64) +
sizeof(struct vmbus_packet_mpb_array));
- payload = kmalloc(payload_sz, GFP_ATOMIC);
+ payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3c4c07038948..88db6992420e 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -43,20 +43,18 @@
#define NCR5380_implementation_fields /* none */
-#define NCR5380_read(reg) sun3scsi_read(reg)
-#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
+#define NCR5380_read(reg) in_8(hostdata->io + (reg))
+#define NCR5380_write(reg, value) out_8(hostdata->io + (reg), value)
#define NCR5380_queue_command sun3scsi_queue_command
#define NCR5380_bus_reset sun3scsi_bus_reset
#define NCR5380_abort sun3scsi_abort
#define NCR5380_info sun3scsi_info
-#define NCR5380_dma_recv_setup(instance, data, count) (count)
-#define NCR5380_dma_send_setup(instance, data, count) (count)
-#define NCR5380_dma_residual(instance) \
- sun3scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
+#define NCR5380_dma_xfer_len sun3scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup sun3scsi_dma_count
+#define NCR5380_dma_send_setup sun3scsi_dma_count
+#define NCR5380_dma_residual sun3scsi_dma_residual
#define NCR5380_acquire_dma_irq(instance) (1)
#define NCR5380_release_dma_irq(instance)
@@ -82,7 +80,6 @@ module_param(setup_hostid, int, 0);
#define SUN3_DVMA_BUFSIZE 0xe000
static struct scsi_cmnd *sun3_dma_setup_done;
-static unsigned char *sun3_scsi_regp;
static volatile struct sun3_dma_regs *dregs;
static struct sun3_udc_regs *udc_regs;
static unsigned char *sun3_dma_orig_addr;
@@ -90,20 +87,6 @@ static unsigned long sun3_dma_orig_count;
static int sun3_dma_active;
static unsigned long last_residual;
-/*
- * NCR 5380 register access functions
- */
-
-static inline unsigned char sun3scsi_read(int reg)
-{
- return in_8(sun3_scsi_regp + reg);
-}
-
-static inline void sun3scsi_write(int reg, int value)
-{
- out_8(sun3_scsi_regp + reg, value);
-}
-
#ifndef SUN3_SCSI_VME
/* dma controller register access functions */
@@ -158,8 +141,8 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dev)
}
/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
-static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
- void *data, unsigned long count, int write_flag)
+static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count, int write_flag)
{
void *addr;
@@ -211,9 +194,10 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
dregs->csr |= CSR_FIFO;
if(dregs->fifo_count != count) {
- shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n",
+ shost_printk(KERN_ERR, hostdata->host,
+ "FIFO mismatch %04x not %04x\n",
dregs->fifo_count, (unsigned int) count);
- NCR5380_dprint(NDEBUG_DMA, instance);
+ NCR5380_dprint(NDEBUG_DMA, hostdata->host);
}
/* setup udc */
@@ -248,14 +232,34 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
}
-static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
+static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return count;
+}
+
+static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+ unsigned char *data, int count)
+{
+ return sun3scsi_dma_setup(hostdata, data, count, 1);
+}
+
+static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
{
return last_residual;
}
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len,
- struct scsi_cmnd *cmd)
+static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
{
+ int wanted_len = cmd->SCp.this_residual;
+
if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
return 0;
@@ -428,9 +432,10 @@ static struct scsi_host_template sun3_scsi_template = {
static int __init sun3_scsi_probe(struct platform_device *pdev)
{
struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
int error;
struct resource *irq, *mem;
- unsigned char *ioaddr;
+ void __iomem *ioaddr;
int host_flags = 0;
#ifdef SUN3_SCSI_VME
int i;
@@ -493,8 +498,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
}
#endif
- sun3_scsi_regp = ioaddr;
-
instance = scsi_host_alloc(&sun3_scsi_template,
sizeof(struct NCR5380_hostdata));
if (!instance) {
@@ -502,9 +505,12 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
goto fail_alloc;
}
- instance->io_port = (unsigned long)ioaddr;
instance->irq = irq->start;
+ hostdata = shost_priv(instance);
+ hostdata->base = mem->start;
+ hostdata->io = ioaddr;
+
error = NCR5380_init(instance, host_flags);
if (error)
goto fail_init;
@@ -552,13 +558,15 @@ fail_init:
fail_alloc:
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return error;
}
static int __exit sun3_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ void __iomem *ioaddr = hostdata->io;
scsi_remove_host(instance);
free_irq(instance->irq, instance);
@@ -566,7 +574,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
scsi_host_put(instance);
if (udc_regs)
dvma_free(udc_regs);
- iounmap(sun3_scsi_regp);
+ iounmap(ioaddr);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73f1131..abe617372661 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -23,6 +23,7 @@
#include "unipro.h"
#include "ufs-qcom.h"
#include "ufshci.h"
+#include "ufs_quirks.h"
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
@@ -1031,6 +1032,34 @@ out:
return ret;
}
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+ int err;
+ u32 pa_vs_config_reg1;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ &pa_vs_config_reg1);
+ if (err)
+ goto out;
+
+ /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+ err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+ return err;
+}
+
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1094,10 +1123,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success, non-zero on failure.
*/
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1111,18 +1142,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (!host)
return 0;
- if (on) {
- err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
- if (err)
- goto out;
+ if (on && (status == POST_CHANGE)) {
+ phy_power_on(host->generic_phy);
- err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
- if (err) {
- dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- goto out;
- }
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1130,14 +1152,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host);
- } else {
-
- /* M-PHY RMMI interface clocks can be turned off */
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba))
+ } else if (!on && (status == PRE_CHANGE)) {
+ if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
+ /* powering off PHY during aggressive clk gating */
+ phy_power_off(host->generic_phy);
+ }
+
vote = host->bus_vote.min_bw_vote;
}
@@ -1146,7 +1169,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "%s: set bus vote failed %d\n",
__func__, err);
-out:
return err;
}
@@ -1201,15 +1223,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
*/
host->generic_phy = devm_phy_get(dev, "ufsphy");
- if (IS_ERR(host->generic_phy)) {
+ if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+ /*
+ * UFS driver might be probed before the phy driver does.
+ * In that case we would like to return EPROBE_DEFER code.
+ */
+ err = -EPROBE_DEFER;
+ dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+ __func__, err);
+ goto out_variant_clear;
+ } else if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out;
+ goto out_variant_clear;
}
err = ufs_qcom_bus_register(host);
if (err)
- goto out_host_free;
+ goto out_variant_clear;
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
@@ -1254,7 +1285,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
- ufs_qcom_setup_clocks(hba, true);
+ ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,8 +1305,7 @@ out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
phy_exit(host->generic_phy);
-out_host_free:
- devm_kfree(dev, host);
+out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
return err;
@@ -1287,6 +1317,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
+ phy_exit(host->generic_phy);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
@@ -1439,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
- ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1616,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
.pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
.suspend = ufs_qcom_suspend,
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a19307a57ce2..fe517cd7dac3 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type {
UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
/* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874e2977..8e6709a3fb6b 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,6 +46,7 @@
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -162,7 +163,7 @@ enum desc_header_offset {
};
enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
QUERY_DESC_UNIT_MAX_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
@@ -416,7 +417,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
- u8 sense_data[18];
+ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e9253a..08b799d4efcc 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -128,26 +128,23 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
+
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
-static struct ufs_dev_fix ufs_fixups[] = {
- /* UFS cards deviations table */
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_NO_FASTAUTO),
- UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-
- END_FIX
-};
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa466c59..52b546fb509b 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
}
/**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
+ ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38da864..a72a4ba78125 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (ret) {
dev_err(dev, "%s: unable to find %s err %d\n",
__func__, prop_name, ret);
- goto out_free;
+ goto out;
}
vreg->min_uA = 0;
@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
goto out;
-out_free:
- devm_kfree(dev, vreg);
- vreg = NULL;
out:
if (!ret)
*out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 05c745663c10..a2c2817fc566 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,8 @@
#include "ufs_quirks.h"
#include "unipro.h"
+#define UFSHCD_REQ_SENSE_SIZE 18
+
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -57,15 +59,9 @@
#define NOP_OUT_TIMEOUT 30 /* msecs */
/* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
-/*
- * Query request timeout for fDeviceInit flag
- * fDeviceInit query response time for some devices is too large that default
- * QUERY_REQ_TIMEOUT may not be enough for such devices.
- */
-#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -123,6 +119,7 @@ enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
};
/* UFSHCD error handling flags */
@@ -188,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
return ufs_pm_lvl_states[lvl].link_state;
}
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+ END_FIX
+};
+
static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
@@ -291,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == UFSHCI_VERSION_10)
- return INTERRUPT_MASK_ALL_VER_10;
- else
- return INTERRUPT_MASK_ALL_VER_11;
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ }
+
+ return intr_mask;
}
/**
@@ -598,6 +633,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -631,8 +680,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
scsi_unblock_requests(hba->host);
}
@@ -660,6 +708,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -709,7 +772,14 @@ static void ufshcd_gate_work(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.is_suspended) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
goto rel_lock;
}
@@ -731,10 +801,7 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
+ ufshcd_suspend_clkscaling(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -878,6 +945,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
}
/**
@@ -889,10 +958,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
}
}
@@ -1088,7 +1161,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*
* Returns 0 in case of success, non-zero value in case of failure
*/
-static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufshcd_sg_entry *prd_table;
struct scatterlist *sg;
@@ -1102,8 +1175,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
return sg_segments;
if (sg_segments) {
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -1410,6 +1488,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
+ case UFSHCD_STATE_EH_SCHEDULED:
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_unlock;
@@ -1457,7 +1536,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
- lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -1465,15 +1544,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_comp_scsi_upiu(hba, lrbp);
- err = ufshcd_map_sg(lrbp);
+ err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1581,6 +1663,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -1683,6 +1767,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1789,9 +1874,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
- timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
-
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
@@ -1861,8 +1943,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -1961,8 +2043,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -2055,18 +2137,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
desc_id, desc_index, 0, desc_buf,
&buff_len);
- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
- ufs_query_desc_max_size[desc_id])
- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
- __func__, desc_id, param_offset, buff_len, ret);
- if (!ret)
- ret = -EINVAL;
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * While reading variable size descriptors (like string descriptor),
+ * some UFS devices may report the "LENGTH" (field in "Transaction
+ * Specific fields" of Query Response UPIU) same as what was requested
+ * in Query Request UPIU instead of reporting the actual size of the
+ * variable size descriptor.
+ * Although it's safe to ignore the "LENGTH" field for variable size
+ * descriptors as we can always derive the length of the descriptor from
+ * the descriptor header fields. Hence this change impose the length
+ * match check only for fixed size descriptors (for which we always
+ * request the correct size as part of Query Request UPIU).
+ */
+ if ((desc_id != QUERY_DESC_IDN_STRING) &&
+ (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+ dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+ __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
out:
@@ -2088,7 +2193,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
u8 *buf,
u32 size)
{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ int err = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ if (!err)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+
+ return err;
}
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -2320,12 +2436,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- utrdlp[i].response_upiu_offset =
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
+ utrdlp[i].prd_table_offset =
cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
+ utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
hba->lrb[i].ucd_req_ptr =
@@ -2429,10 +2554,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val,
- retries);
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
return ret;
}
@@ -2496,9 +2621,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel), ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
- get, UIC_GET_ATTR_ID(attr_sel), retries);
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
@@ -2651,6 +2777,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
int ret;
struct uic_command uic_cmd = {0};
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
@@ -2664,7 +2792,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
*/
if (ufshcd_link_recovery(hba))
ret = -ENOLINK;
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
return ret;
}
@@ -2687,13 +2817,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
struct uic_command uic_cmd = {0};
int ret;
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
ret = ufshcd_link_recovery(hba);
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
return ret;
}
@@ -2725,8 +2859,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FASTAUTO_MODE;
- pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -2757,7 +2891,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_rx);
return -EINVAL;
}
- pwr_info->pwr_rx = SLOWAUTO_MODE;
+ pwr_info->pwr_rx = SLOW_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2770,7 +2904,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_tx);
return -EINVAL;
}
- pwr_info->pwr_tx = SLOWAUTO_MODE;
+ pwr_info->pwr_tx = SLOW_MODE;
}
hba->max_pwr_info.is_valid = true;
@@ -3090,7 +3224,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
@@ -3116,6 +3259,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
/* failed to get the link up... retire */
goto out;
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
@@ -3181,16 +3330,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
{
int ret = 0;
u8 lun_qdepth;
+ int retries;
struct ufs_hba *hba;
hba = shost_priv(sdev->host);
lun_qdepth = hba->nutrs;
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+ if (!ret || ret == -ENOTSUPP)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
+ }
/* Some WLUN doesn't support unit descriptor */
if (ret == -EOPNOTSUPP)
@@ -4097,6 +4254,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@ -4158,7 +4326,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* block commands from scsi mid-layer */
scsi_block_requests(hba->host);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
schedule_work(&hba->eh_work);
}
}
@@ -4311,6 +4479,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
@@ -4318,6 +4488,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
wmb();
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -4722,6 +4894,24 @@ out:
return icc_level;
}
+static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
+{
+ int ret = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* write attribute */
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
+ if (!ret)
+ break;
+
+ dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
@@ -4742,9 +4932,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_set_icc_levels_attr(hba,
+ hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4965,6 +5154,76 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +5234,11 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
}
/**
@@ -5027,9 +5291,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__);
} else {
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ goto out;
+ }
}
/* set the state as operational after switching to desired gear */
@@ -5062,8 +5328,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->is_init_prefetch = true;
/* Resume devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
out:
/*
@@ -5389,6 +5654,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (!head || list_empty(head))
goto out;
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
@@ -5410,7 +5679,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on);
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+
out:
if (ret) {
list_for_each_entry(clki, head, list) {
@@ -5500,8 +5772,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->vops)
return;
- ufshcd_vops_setup_clocks(hba, false);
-
ufshcd_vops_setup_regulators(hba, false);
ufshcd_vops_exit(hba);
@@ -5564,6 +5834,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -5577,20 +5848,20 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
0,
0,
0,
- SCSI_SENSE_BUFFERSIZE,
+ UFSHCD_REQ_SENSE_SIZE,
0};
char *buffer;
int ret;
- buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
- SCSI_SENSE_BUFFERSIZE, NULL,
- msecs_to_jiffies(1000), 3, NULL, REQ_PM);
+ UFSHCD_REQ_SENSE_SIZE, NULL,
+ msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5652,11 +5923,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
/*
* Current function would be generally called from the power management
- * callbacks hence set the REQ_PM flag so that it doesn't resume the
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- START_STOP_TIMEOUT, 0, NULL, REQ_PM);
+ START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
@@ -5766,7 +6037,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) {
ret = ufshcd_setup_vreg(hba, true);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
if (!ret && !ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
if (ret)
@@ -5775,6 +6045,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
if (ret)
goto vccq_lpm;
}
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
@@ -5839,6 +6110,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
+ ufshcd_suspend_clkscaling(hba);
+
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
goto disable_clks;
@@ -5846,12 +6119,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto out;
+ goto enable_gating;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto out;
+ goto enable_gating;
}
if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5888,15 +6161,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
disable_clks:
/*
- * The clock scaling needs access to controller registers. Hence, Wait
- * for pending clock scaling work to be done before clocks are
- * turned off.
- */
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
- /*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
* host clocks are ON.
@@ -5905,10 +6169,6 @@ disable_clks:
if (ret)
goto set_link_active;
- ret = ufshcd_vops_setup_clocks(hba, false);
- if (ret)
- goto vops_resume;
-
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -5925,9 +6185,8 @@ disable_clks:
ufshcd_hba_vreg_set_lpm(hba);
goto out;
-vops_resume:
- ufshcd_vops_resume(hba, pm_op);
set_link_active:
+ ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
@@ -5937,6 +6196,7 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+ ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
out:
@@ -6015,8 +6275,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false;
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -6030,6 +6289,7 @@ disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
out:
hba->pm_op_in_progress = 0;
@@ -6052,16 +6312,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if (!hba || !hba->is_powered)
return 0;
- if (pm_runtime_suspended(hba->dev)) {
- if (hba->rpm_lvl == hba->spm_lvl)
- /*
- * There is possibility that device may still be in
- * active state during the runtime suspend.
- */
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
- goto out;
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+ if (pm_runtime_suspended(hba->dev)) {
/*
* UFS device and/or UFS link low power states during runtime
* suspend seems to be different than what is expected during
@@ -6092,7 +6349,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
* Let the runtime resume take care of resuming
* if runtime suspended.
@@ -6113,7 +6373,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -6143,10 +6406,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
- else
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -6198,11 +6464,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
- scsi_host_put(hba->host);
-
ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_remove_device(hba->devfreq);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6324,15 +6586,47 @@ static int ufshcd_devfreq_target(struct device *dev,
{
int err = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool release_clk_hold = false;
+ unsigned long irq_flags;
if (!ufshcd_is_clkscaling_enabled(hba))
return -EINVAL;
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON)) {
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /* hold the vote until the scaling work is completed */
+ hba->clk_gating.active_reqs++;
+ release_clk_hold = true;
+ hba->clk_gating.state = CLKS_ON;
+ } else {
+ /*
+ * Clock gating work seems to be running in parallel
+ * hence skip scaling work to avoid deadlock between
+ * current scaling work and gating work.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
if (*freq == UINT_MAX)
err = ufshcd_scale_clks(hba, true);
else if (*freq == 0)
err = ufshcd_scale_clks(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (release_clk_hold)
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
return err;
}
@@ -6413,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
@@ -6498,7 +6799,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
if (ufshcd_is_clkscaling_enabled(hba)) {
- hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+ hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
"simple_ondemand", NULL);
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
@@ -6507,18 +6808,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
+ ufshcd_suspend_clkscaling(hba);
}
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
/*
- * The device-initialize-sequence hasn't been invoked yet.
- * Set the device to power-off state
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
*/
- ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
@@ -6530,7 +6832,6 @@ exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
- scsi_host_put(host);
ufshcd_hba_exit(hba);
out_error:
return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef111293..08cd26ed2382 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -261,6 +261,12 @@ struct ufs_pwr_mode_info {
* @pwr_change_notify: called before and after a power mode change
* is carried out to allow vendor spesific capabilities
* to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ * to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ * to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ * @apply_dev_quirks: called to apply device specific quirks
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -273,7 +279,8 @@ struct ufs_hba_variant_ops {
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*clk_scale_notify)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool);
+ int (*setup_clocks)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
int (*setup_regulators)(struct ufs_hba *, bool);
int (*hce_enable_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
@@ -283,6 +290,11 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
+ void (*setup_xfer_req)(struct ufs_hba *, int, bool);
+ void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ enum ufs_notify_change_status);
+ int (*apply_dev_quirks)(struct ufs_hba *);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -474,6 +486,12 @@ struct ufs_hba {
*/
#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ /*
+ * This quirk needs to be enabled if the host contoller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -755,10 +773,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
return 0;
}
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on);
+ return hba->vops->setup_clocks(hba, on, status);
return 0;
}
@@ -799,6 +818,35 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
return -ENOTSUPP;
}
+static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd)
+{
+ if (hba->vops && hba->vops->setup_xfer_req)
+ return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+}
+
+static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
+ int tag, u8 tm_function)
+{
+ if (hba->vops && hba->vops->setup_task_mgmt)
+ return hba->vops->setup_task_mgmt(hba, tag, tm_function);
+}
+
+static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (hba->vops && hba->vops->hibern8_notify)
+ return hba->vops->hibern8_notify(hba, cmd, status);
+}
+
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->apply_dev_quirks)
+ return hba->vops->apply_dev_quirks(hba);
+ return 0;
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741ff606..8c5190e2e1c9 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -72,6 +72,10 @@ enum {
REG_UIC_COMMAND_ARG_1 = 0x94,
REG_UIC_COMMAND_ARG_2 = 0x98,
REG_UIC_COMMAND_ARG_3 = 0x9C,
+ REG_UFS_CCAP = 0x100,
+ REG_UFS_CRYPTOCAP = 0x104,
+
+ UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};
/* Controller capability masks */
@@ -83,6 +87,8 @@ enum {
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
};
+#define UFS_MASK(mask, offset) ((mask) << (offset))
+
/* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
@@ -166,6 +172,7 @@ enum {
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
@@ -272,6 +279,9 @@ enum {
/* Interrupt disable mask for UFSHCI v1.1 */
INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+
+ /* Interrupt disable mask for UFSHCI v2.1 */
+ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
};
/*
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b5675575..23129d7b2678 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -123,6 +123,7 @@
#define PA_MAXRXHSGEAR 0x1587
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
+#define PA_GRANULARITY 0x15AA
#define PA_PACPREQTIMEOUT 0x1590
#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
@@ -158,6 +159,9 @@
#define VS_DEBUGOMC 0xD09E
#define VS_POWERSTATE 0xD083
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9dc8687bf048..9aa1fe1fc939 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
struct vscsifrnt_shadow {
/* command between backend and frontend */
unsigned char act;
+ uint8_t nr_segments;
uint16_t rqid;
+ uint16_t ref_rqid;
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg; /* scatter/gather elements */
+ struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
/* Do reset or abort function. */
wait_queue_head_t wq_reset; /* reset work queue */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
scsifront_wake_up(info);
}
-static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+static int scsifront_do_request(struct vscsifrnt_info *info,
+ struct vscsifrnt_shadow *shadow)
{
struct vscsiif_front_ring *ring = &(info->ring);
struct vscsiif_request *ring_req;
+ struct scsi_cmnd *sc = shadow->sc;
uint32_t id;
+ int i, notify;
+
+ if (RING_FULL(&info->ring))
+ return -EBUSY;
id = scsifront_get_rqid(info); /* use id in response */
if (id >= VSCSIIF_MAX_REQS)
- return NULL;
+ return -EBUSY;
- ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
+ info->shadow[id] = shadow;
+ shadow->rqid = id;
+ ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
ring->req_prod_pvt++;
- ring_req->rqid = (uint16_t)id;
+ ring_req->rqid = id;
+ ring_req->act = shadow->act;
+ ring_req->ref_rqid = shadow->ref_rqid;
+ ring_req->nr_segments = shadow->nr_segments;
- return ring_req;
-}
+ ring_req->id = sc->device->id;
+ ring_req->lun = sc->device->lun;
+ ring_req->channel = sc->device->channel;
+ ring_req->cmd_len = sc->cmd_len;
-static void scsifront_do_request(struct vscsifrnt_info *info)
-{
- struct vscsiif_front_ring *ring = &(info->ring);
- int notify;
+ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
+ ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+ for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
+ ring_req->seg[i] = shadow->seg[i];
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
+
+ return 0;
}
-static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+static void scsifront_gnttab_done(struct vscsifrnt_info *info,
+ struct vscsifrnt_shadow *shadow)
{
- struct vscsifrnt_shadow *s = info->shadow[id];
int i;
- if (s->sc->sc_data_direction == DMA_NONE)
+ if (shadow->sc->sc_data_direction == DMA_NONE)
return;
- for (i = 0; i < s->nr_grants; i++) {
- if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+ for (i = 0; i < shadow->nr_grants; i++) {
+ if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
"grant still in use by backend\n");
BUG();
}
- gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+ gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
- kfree(s->sg);
+ kfree(shadow->sg);
}
static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
+ struct vscsifrnt_shadow *shadow;
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
id = ring_rsp->rqid;
- sc = info->shadow[id]->sc;
+ shadow = info->shadow[id];
+ sc = shadow->sc;
BUG_ON(sc == NULL);
- scsifront_gnttab_done(info, id);
+ scsifront_gnttab_done(info, shadow);
scsifront_put_rqid(info, id);
sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info)
static int map_data_for_request(struct vscsifrnt_info *info,
struct scsi_cmnd *sc,
- struct vscsiif_request *ring_req,
struct vscsifrnt_shadow *shadow)
{
grant_ref_t gref_head;
@@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
struct scatterlist *sg;
struct scsiif_request_segment *seg;
- ring_req->nr_segments = 0;
if (sc->sc_data_direction == DMA_NONE || !data_len)
return 0;
@@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
if (!shadow->sg)
return -ENOMEM;
}
- seg = shadow->sg ? : ring_req->seg;
+ seg = shadow->sg ? : shadow->seg;
err = gnttab_alloc_grant_references(seg_grants + data_grants,
&gref_head);
@@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
info->dev->otherend_id,
xen_page_to_gfn(page), 1);
shadow->gref[ref_cnt] = ref;
- ring_req->seg[ref_cnt].gref = ref;
- ring_req->seg[ref_cnt].offset = (uint16_t)off;
- ring_req->seg[ref_cnt].length = (uint16_t)bytes;
+ shadow->seg[ref_cnt].gref = ref;
+ shadow->seg[ref_cnt].offset = (uint16_t)off;
+ shadow->seg[ref_cnt].length = (uint16_t)bytes;
page++;
len -= bytes;
@@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info,
}
if (seg_grants)
- ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
+ shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
else
- ring_req->nr_segments = (uint8_t)ref_cnt;
+ shadow->nr_segments = (uint8_t)ref_cnt;
shadow->nr_grants = ref_cnt;
return 0;
}
-static struct vscsiif_request *scsifront_command2ring(
- struct vscsifrnt_info *info, struct scsi_cmnd *sc,
- struct vscsifrnt_shadow *shadow)
-{
- struct vscsiif_request *ring_req;
-
- memset(shadow, 0, sizeof(*shadow));
-
- ring_req = scsifront_pre_req(info);
- if (!ring_req)
- return NULL;
-
- info->shadow[ring_req->rqid] = shadow;
- shadow->rqid = ring_req->rqid;
-
- ring_req->id = sc->device->id;
- ring_req->lun = sc->device->lun;
- ring_req->channel = sc->device->channel;
- ring_req->cmd_len = sc->cmd_len;
-
- BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
-
- memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
-
- ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
- ring_req->timeout_per_command = sc->request->timeout / HZ;
-
- return ring_req;
-}
-
static int scsifront_enter(struct vscsifrnt_info *info)
{
if (info->pause)
@@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *sc)
{
struct vscsifrnt_info *info = shost_priv(shost);
- struct vscsiif_request *ring_req;
struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
unsigned long flags;
int err;
- uint16_t rqid;
+
+ sc->result = 0;
+ memset(shadow, 0, sizeof(*shadow));
+
+ shadow->sc = sc;
+ shadow->act = VSCSIIF_ACT_SCSI_CDB;
spin_lock_irqsave(shost->host_lock, flags);
if (scsifront_enter(info)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if (RING_FULL(&info->ring))
- goto busy;
-
- ring_req = scsifront_command2ring(info, sc, shadow);
- if (!ring_req)
- goto busy;
-
- sc->result = 0;
-
- rqid = ring_req->rqid;
- ring_req->act = VSCSIIF_ACT_SCSI_CDB;
- shadow->sc = sc;
- shadow->act = VSCSIIF_ACT_SCSI_CDB;
-
- err = map_data_for_request(info, sc, ring_req, shadow);
+ err = map_data_for_request(info, sc, shadow);
if (err < 0) {
pr_debug("%s: err %d\n", __func__, err);
- scsifront_put_rqid(info, rqid);
scsifront_return(info);
spin_unlock_irqrestore(shost->host_lock, flags);
if (err == -ENOMEM)
@@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
return 0;
}
- scsifront_do_request(info);
+ if (scsifront_do_request(info, shadow)) {
+ scsifront_gnttab_done(info, shadow);
+ goto busy;
+ }
+
scsifront_return(info);
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
struct Scsi_Host *host = sc->device->host;
struct vscsifrnt_info *info = shost_priv(host);
struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
- struct vscsiif_request *ring_req;
int err = 0;
- shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
+ shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
if (!shadow)
return FAILED;
+ shadow->act = act;
+ shadow->rslt_reset = RSLT_RESET_WAITING;
+ shadow->sc = sc;
+ shadow->ref_rqid = s->rqid;
+ init_waitqueue_head(&shadow->wq_reset);
+
spin_lock_irq(host->host_lock);
for (;;) {
- if (!RING_FULL(&info->ring)) {
- ring_req = scsifront_command2ring(info, sc, shadow);
- if (ring_req)
- break;
- }
- if (err || info->pause) {
- spin_unlock_irq(host->host_lock);
- kfree(shadow);
- return FAILED;
- }
+ if (scsifront_enter(info))
+ goto fail;
+
+ if (!scsifront_do_request(info, shadow))
+ break;
+
+ scsifront_return(info);
+ if (err)
+ goto fail;
info->wait_ring_available = 1;
spin_unlock_irq(host->host_lock);
err = wait_event_interruptible(info->wq_sync,
@@ -625,22 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
spin_lock_irq(host->host_lock);
}
- if (scsifront_enter(info)) {
- spin_unlock_irq(host->host_lock);
- return FAILED;
- }
-
- ring_req->act = act;
- ring_req->ref_rqid = s->rqid;
-
- shadow->act = act;
- shadow->rslt_reset = RSLT_RESET_WAITING;
- init_waitqueue_head(&shadow->wq_reset);
-
- ring_req->nr_segments = 0;
-
- scsifront_do_request(info);
-
spin_unlock_irq(host->host_lock);
err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
spin_lock_irq(host->host_lock);
@@ -659,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
scsifront_return(info);
spin_unlock_irq(host->host_lock);
return err;
+
+fail:
+ spin_unlock_irq(host->host_lock);
+ kfree(shadow);
+ return FAILED;
}
static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
@@ -1060,13 +1039,9 @@ static void scsifront_read_backend_params(struct xenbus_device *dev,
struct vscsifrnt_info *info)
{
unsigned int sg_grant, nr_segs;
- int ret;
struct Scsi_Host *host = info->host;
- ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
- &sg_grant);
- if (ret != 1)
- sg_grant = 0;
+ sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0);
nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
nr_segs = min_t(unsigned int, nr_segs,
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index e7899624aa0b..35bbe288ddb4 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -254,7 +254,7 @@ restart:
radix_tree_tag_clear(&d->tree, entry->enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
- radix_tree_replace_slot((void **)entries[i],
+ radix_tree_replace_slot(&d->tree, (void **)entries[i],
&intc_irq_xlate[irq]);
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index e6e90e80519a..f31bceb69c0d 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,8 +1,7 @@
menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/bcm/Kconfig"
-source "drivers/soc/fsl/qbman/Kconfig"
-source "drivers/soc/fsl/qe/Kconfig"
+source "drivers/soc/fsl/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/rockchip/Kconfig"
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
new file mode 100644
index 000000000000..7a9fb9baa66d
--- /dev/null
+++ b/drivers/soc/fsl/Kconfig
@@ -0,0 +1,18 @@
+#
+# Freescale SOC drivers
+#
+
+source "drivers/soc/fsl/qbman/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
+
+config FSL_GUTS
+ bool
+ select SOC_BUS
+ help
+ The global utilities block controls power management, I/O device
+ enabling, power-onreset(POR) configuration monitoring, alternate
+ function selection for multiplexed signals,and clock control.
+ This driver is to manage and access global utilities block.
+ Initially only reading SVR and registering soc device are supported.
+ Other guts accesses, such as reading RCW, should eventually be moved
+ into this driver as well.
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 75e1f5334821..44b3bebef24a 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
+obj-$(CONFIG_FSL_GUTS) += guts.o
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
new file mode 100644
index 000000000000..6af7a11f09a5
--- /dev/null
+++ b/drivers/soc/fsl/guts.c
@@ -0,0 +1,239 @@
+/*
+ * Freescale QorIQ Platforms GUTS Driver
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/sys_soc.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/fsl/guts.h>
+
+struct guts {
+ struct ccsr_guts __iomem *regs;
+ bool little_endian;
+};
+
+struct fsl_soc_die_attr {
+ char *die;
+ u32 svr;
+ u32 mask;
+};
+
+static struct guts *guts;
+static struct soc_device_attribute soc_dev_attr;
+static struct soc_device *soc_dev;
+
+
+/* SoC die attribute definition for QorIQ platform */
+static const struct fsl_soc_die_attr fsl_soc_die[] = {
+ /*
+ * Power Architecture-based SoCs T Series
+ */
+
+ /* Die: T4240, SoC: T4240/T4160/T4080 */
+ { .die = "T4240",
+ .svr = 0x82400000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */
+ { .die = "T1040",
+ .svr = 0x85200000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T2080, SoC: T2080/T2081 */
+ { .die = "T2080",
+ .svr = 0x85300000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */
+ { .die = "T1024",
+ .svr = 0x85400000,
+ .mask = 0xfff00000,
+ },
+
+ /*
+ * ARM-based SoCs LS Series
+ */
+
+ /* Die: LS1043A, SoC: LS1043A/LS1023A */
+ { .die = "LS1043A",
+ .svr = 0x87920000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */
+ { .die = "LS2080A",
+ .svr = 0x87010000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */
+ { .die = "LS1088A",
+ .svr = 0x87030000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1012A, SoC: LS1012A */
+ { .die = "LS1012A",
+ .svr = 0x87040000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS1046A, SoC: LS1046A/LS1026A */
+ { .die = "LS1046A",
+ .svr = 0x87070000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */
+ { .die = "LS2088A",
+ .svr = 0x87090000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */
+ { .die = "LS1021A",
+ .svr = 0x87000000,
+ .mask = 0xfff70000,
+ },
+ { },
+};
+
+static const struct fsl_soc_die_attr *fsl_soc_die_match(
+ u32 svr, const struct fsl_soc_die_attr *matches)
+{
+ while (matches->svr) {
+ if (matches->svr == (svr & matches->mask))
+ return matches;
+ matches++;
+ };
+ return NULL;
+}
+
+u32 fsl_guts_get_svr(void)
+{
+ u32 svr = 0;
+
+ if (!guts || !guts->regs)
+ return svr;
+
+ if (guts->little_endian)
+ svr = ioread32(&guts->regs->svr);
+ else
+ svr = ioread32be(&guts->regs->svr);
+
+ return svr;
+}
+EXPORT_SYMBOL(fsl_guts_get_svr);
+
+static int fsl_guts_probe(struct platform_device *pdev)
+{
+ struct device_node *root, *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ const struct fsl_soc_die_attr *soc_die;
+ const char *machine;
+ u32 svr;
+
+ /* Initialize guts */
+ guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
+ if (!guts)
+ return -ENOMEM;
+
+ guts->little_endian = of_property_read_bool(np, "little-endian");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ guts->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(guts->regs))
+ return PTR_ERR(guts->regs);
+
+ /* Register soc device */
+ root = of_find_node_by_path("/");
+ if (of_property_read_string(root, "model", &machine))
+ of_property_read_string_index(root, "compatible", 0, &machine);
+ of_node_put(root);
+ if (machine)
+ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+
+ svr = fsl_guts_get_svr();
+ soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+ if (soc_die) {
+ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
+ "QorIQ %s", soc_die->die);
+ } else {
+ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
+ }
+ soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
+ "svr:0x%08x", svr);
+ soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+ (svr >> 4) & 0xf, svr & 0xf);
+
+ soc_dev = soc_device_register(&soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ pr_info("Machine: %s\n", soc_dev_attr.machine);
+ pr_info("SoC family: %s\n", soc_dev_attr.family);
+ pr_info("SoC ID: %s, Revision: %s\n",
+ soc_dev_attr.soc_id, soc_dev_attr.revision);
+ return 0;
+}
+
+static int fsl_guts_remove(struct platform_device *dev)
+{
+ soc_device_unregister(soc_dev);
+ return 0;
+}
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for Freescale QorIQ SOCs.
+ */
+static const struct of_device_id fsl_guts_of_match[] = {
+ { .compatible = "fsl,qoriq-device-config-1.0", },
+ { .compatible = "fsl,qoriq-device-config-2.0", },
+ { .compatible = "fsl,p1010-guts", },
+ { .compatible = "fsl,p1020-guts", },
+ { .compatible = "fsl,p1021-guts", },
+ { .compatible = "fsl,p1022-guts", },
+ { .compatible = "fsl,p1023-guts", },
+ { .compatible = "fsl,p2020-guts", },
+ { .compatible = "fsl,bsc9131-guts", },
+ { .compatible = "fsl,bsc9132-guts", },
+ { .compatible = "fsl,mpc8536-guts", },
+ { .compatible = "fsl,mpc8544-guts", },
+ { .compatible = "fsl,mpc8548-guts", },
+ { .compatible = "fsl,mpc8568-guts", },
+ { .compatible = "fsl,mpc8569-guts", },
+ { .compatible = "fsl,mpc8572-guts", },
+ { .compatible = "fsl,ls1021a-dcfg", },
+ { .compatible = "fsl,ls1043a-dcfg", },
+ { .compatible = "fsl,ls2080a-dcfg", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
+
+static struct platform_driver fsl_guts_driver = {
+ .driver = {
+ .name = "fsl-guts",
+ .of_match_table = fsl_guts_of_match,
+ },
+ .probe = fsl_guts_probe,
+ .remove = fsl_guts_remove,
+};
+
+static int __init fsl_guts_init(void)
+{
+ return platform_driver_register(&fsl_guts_driver);
+}
+core_initcall(fsl_guts_init);
+
+static void __exit fsl_guts_exit(void)
+{
+ platform_driver_unregister(&fsl_guts_driver);
+}
+module_exit(fsl_guts_exit);
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index ffa48fdbb1a9..a3d6d7cfa929 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -167,12 +167,12 @@ struct bm_portal {
/* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
struct bman_portal {
@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal)
i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr_ptr2idx(rcr->cursor))
- pr_crit("losing uncommited RCR entries\n");
+ pr_crit("losing uncommitted RCR entries\n");
i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr->ci)
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 9deb0524543f..a8e8389a6894 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev)
node->full_name);
return -ENXIO;
}
- bm_ccsr_start = devm_ioremap(dev, res->start,
- res->end - res->start + 1);
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
if (!bm_ccsr_start)
return -ENXIO;
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 6579cc18811a..8354d4dabdad 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -53,58 +53,38 @@ static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
return p;
}
-static void bman_offline_cpu(unsigned int cpu)
+static int bman_offline_cpu(unsigned int cpu)
{
struct bman_portal *p = affine_bportals[cpu];
const struct bm_portal_config *pcfg;
if (!p)
- return;
+ return 0;
pcfg = bman_get_bm_portal_config(p);
if (!pcfg)
- return;
+ return 0;
irq_set_affinity(pcfg->irq, cpumask_of(0));
+ return 0;
}
-static void bman_online_cpu(unsigned int cpu)
+static int bman_online_cpu(unsigned int cpu)
{
struct bman_portal *p = affine_bportals[cpu];
const struct bm_portal_config *pcfg;
if (!p)
- return;
+ return 0;
pcfg = bman_get_bm_portal_config(p);
if (!pcfg)
- return;
+ return 0;
irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ return 0;
}
-static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- bman_online_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- bman_offline_cpu(cpu);
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block bman_hotplug_cpu_notifier = {
- .notifier_call = bman_hotplug_cpu_callback,
-};
-
static int bman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -146,15 +126,19 @@ static int bman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -170,8 +154,10 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_unlock(&bman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -179,10 +165,11 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
@@ -210,8 +197,14 @@ static int __init bman_portal_driver_register(struct platform_driver *drv)
if (ret < 0)
return ret;
- register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
-
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qbman_portal:online",
+ bman_online_cpu, bman_offline_cpu);
+ if (ret < 0) {
+ pr_err("bman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index b63fd72295c6..2eaf3184f61d 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -38,6 +38,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/prefetch.h>
#include <linux/genalloc.h>
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 119054bc922b..6f509f68085e 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */
struct qm_eqcr_entry {
u8 _ncw_verb; /* writes to this are non-coherent */
u8 dca;
- u16 seqnum;
- u32 orp; /* 24-bit */
- u32 fqid; /* 24-bit */
- u32 tag;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
} __packed;
@@ -183,41 +183,22 @@ struct qm_mr {
};
/* MC (Management Command) command */
-/* "Query FQ" */
-struct qm_mcc_queryfq {
+/* "FQ" command layout */
+struct qm_mcc_fq {
u8 _ncw_verb;
u8 __reserved1[3];
- u32 fqid; /* 24-bit */
+ __be32 fqid; /* 24-bit */
u8 __reserved2[56];
} __packed;
-/* "Alter FQ State Commands " */
-struct qm_mcc_alterfq {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2;
- u8 count; /* number of consecutive FQID */
- u8 __reserved3[10];
- u32 context_b; /* frame queue context b */
- u8 __reserved4[40];
-} __packed;
-/* "Query CGR" */
-struct qm_mcc_querycgr {
+/* "CGR" command layout */
+struct qm_mcc_cgr {
u8 _ncw_verb;
u8 __reserved1[30];
u8 cgid;
u8 __reserved2[32];
};
-struct qm_mcc_querywq {
- u8 _ncw_verb;
- u8 __reserved;
- /* select channel if verb != QUERYWQ_DEDICATED */
- u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
- u8 __reserved2[60];
-} __packed;
-
#define QM_MCC_VERB_VBIT 0x80
#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
#define QM_MCC_VERB_INITFQ_PARKED 0x40
@@ -243,12 +224,9 @@ union qm_mc_command {
u8 __reserved[63];
};
struct qm_mcc_initfq initfq;
- struct qm_mcc_queryfq queryfq;
- struct qm_mcc_alterfq alterfq;
struct qm_mcc_initcgr initcgr;
- struct qm_mcc_querycgr querycgr;
- struct qm_mcc_querywq querywq;
- struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
};
/* MC (Management Command) result */
@@ -343,12 +321,12 @@ struct qm_portal {
/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
/* --- EQCR API --- */
@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
DPAA_ASSERT(!eqcr->busy);
if (pi != eqcr_ptr2idx(eqcr->cursor))
- pr_crit("losing uncommited EQCR entries\n");
+ pr_crit("losing uncommitted EQCR entries\n");
if (ci != eqcr->ci)
pr_crit("missing existing EQCR completions\n");
if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{
DPAA_ASSERT(eqcr->busy);
- DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
- DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
DPAA_ASSERT(eqcr->available >= 1);
}
@@ -962,8 +939,6 @@ struct qman_portal {
u32 sdqcr;
/* probing time config params for cpu-affine portals */
const struct qm_portal_config *config;
- /* needed for providing a non-NULL device to dma_map_***() */
- struct platform_device *pdev;
/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
struct qman_cgrs *cgrs;
/* linked-list of CSCN handlers. */
@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal,
const struct qman_cgrs *cgrs)
{
struct qm_portal *p;
- char buf[16];
int ret;
u32 isdr;
@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal,
portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
- sprintf(buf, "qportal-%d", c->channel);
- portal->pdev = platform_device_alloc(buf, -1);
- if (!portal->pdev)
- goto fail_devalloc;
- if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
- goto fail_devadd;
- ret = platform_device_add(portal->pdev);
- if (ret)
- goto fail_devadd;
isdr = 0xffffffff;
qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0;
@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal,
/* special handling, drain just in case it's a few FQRNIs */
const union qm_mr_entry *e = qm_mr_current(p);
- dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
- e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
goto fail_dqrr_mr_empty;
}
/* Success */
@@ -1256,10 +1221,6 @@ fail_eqcr_empty:
fail_affinity:
free_irq(c->irq, portal);
fail_irq:
- platform_device_del(portal->pdev);
-fail_devadd:
- platform_device_put(portal->pdev);
-fail_devalloc:
kfree(portal->cgrs);
fail_cgrs:
qm_mc_finish(p);
@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm)
qm_dqrr_finish(&qm->p);
qm_eqcr_finish(&qm->p);
- platform_device_del(qm->pdev);
- platform_device_put(qm->pdev);
-
qm->config = NULL;
}
@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work)
case QM_MR_VERB_FQRN:
case QM_MR_VERB_FQRL:
/* Lookup in the retirement table */
- fq = fqid_to_fq(msg->fq.fqid);
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
if (WARN_ON(!fq))
break;
fq_state_change(p, fq, msg, verb);
@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work)
break;
case QM_MR_VERB_FQPN:
/* Parked */
- fq = tag_to_fq(msg->fq.contextB);
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
fq->cb.fqs(p, fq, msg);
@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work)
}
} else {
/* Its a software ERN */
- fq = tag_to_fq(msg->ern.tag);
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
fq->cb.ern(p, fq, msg);
}
num++;
@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/*
- * VDQCR: don't trust contextB as the FQ may have
+ * VDQCR: don't trust context_b as the FQ may have
* been configured for h/w consumption and we're
* draining it post-retirement.
*/
@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
clear_vdqcr(p, fq);
} else {
- /* SDQCR: contextB points to the FQ */
- fq = tag_to_fq(dq->contextB);
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq);
/*
@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
return -EINVAL;
#endif
- if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
/* And can't be set at the same time as TDTHRESH */
- if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
return -EINVAL;
}
/* Issue an INITFQ_[PARKED|SCHED] management command */
@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
mcc = qm_mc_start(&p->p);
if (opts)
mcc->initfq = *opts;
- mcc->initfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
mcc->initfq.count = 0;
/*
- * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
* demux pointer. Otherwise, the caller-provided value is allowed to
* stand, don't overwrite it.
*/
if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
dma_addr_t phys_fq;
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
- mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
/*
* and the physical address - NB, if the user wasn't trying to
* set CONTEXTA, clear the stashing settings.
*/
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a));
} else {
- phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
- DMA_TO_DEVICE);
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
}
}
if (flags & QMAN_INITFQ_FLAG_LOCAL) {
int wq = 0;
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
wq = 4;
}
qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
goto out;
}
if (opts) {
- if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
- if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
fq_set(fq, QMAN_FQ_STATE_CGR_EN);
else
fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
}
- if (opts->we_mask & QM_INITFQ_WE_CGID)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
fq->cgr_groupid = opts->fqd.cgid;
}
fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(p->config->dev, "ALTER_SCHED timeout\n");
@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
msg.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
- msg.fq.fqid = fq->fqid;
- msg.fq.contextB = fq_to_tag(fq);
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
fq->cb.fqs(p, fq, &msg);
}
} else if (res == QM_MCR_RESULT_PENDING) {
@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->querycgr.cgid = cgr->cgrid;
+ mcc->cgr.cgid = cgr->cgrid;
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
if (unlikely(!eq))
goto out;
- eq->fqid = fq->fqid;
- eq->tag = fq_to_tag(fq);
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
eq->fd = *fd;
qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
@@ -2282,7 +2252,24 @@ out:
}
#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
-#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
static u8 qman_cgr_cpus[CGR_NUM];
@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts)
{
struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts = {};
int ret;
struct qman_portal *p;
@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
spin_lock(&p->cgr_lock);
if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
ret = qman_query_cgr(cgr, &cgr_state);
if (ret)
goto out;
- if (opts)
- local_opts = *opts;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl =
- QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
- else
- /* Overwrite TARG */
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
- TARG_MASK(p);
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
/* send init if flags indicate so */
- if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
&local_opts);
else
@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr)
list_add(&cgr->node, &p->cgr_cbs);
goto release_lock;
}
- /* Overwrite TARG */
- local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
- else
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
- ~(TARG_MASK(p));
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
ret = qm_modify_cgr(cgr, 0, &local_opts);
if (ret)
/* add back to the list */
@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
} while (wait && !dqrr);
while (dqrr) {
- if (dqrr->fqid == fqid && (dqrr->stat & s))
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
found = 1;
qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
qm_dqrr_pvb_update(p);
@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid)
dev = p->config->dev;
/* Determine the state of the FQID */
mcc = qm_mc_start(&p->p);
- mcc->queryfq_np.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid)
/* Query which channel the FQ is using */
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ timeout\n");
@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_PARKED:
orl_empty = 0;
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid)
cpu_relax();
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_RETIRED:
/* Send OOS Command */
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config(
{
return portal->config;
}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
struct gen_pool *qm_fqalloc; /* FQID allocator */
struct gen_pool *qm_qpalloc; /* pool-channel allocator */
@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
+ return err;
if (qm_fqd_get_chan(&fqd) == qp) {
/* The channel is the FQ's target, clean it */
err = qman_shutdown_fq(fq.fqid);
@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid)
* error, looking for non-OOS FQDs whose CGR is the CGR being released
*/
struct qman_fq fq = {
- .fqid = 1
+ .fqid = QM_FQID_RANGE_START
};
int err;
@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
- if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
fqd.cgid == cgrid) {
pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
cgrid, fq.fqid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 0cace9e0077e..f4e6e70de259 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node,
/* map as cacheable, non-guarded */
void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+ if (!tmpp)
+ return -ENOMEM;
+
memset_io(tmpp, 0, sz);
flush_dcache_range((unsigned long)tmpp,
(unsigned long)tmpp + sz);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 148614388fca..adbaa30d3c5a 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -30,6 +30,9 @@
#include "qman_priv.h"
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
/* Enable portal interupts (as opposed to polling mode) */
#define CONFIG_FSL_DPA_PIRQ_SLOW 1
#define CONFIG_FSL_DPA_PIRQ_FAST 1
@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
/* all assigned portals are initialized now */
qman_init_cgr_all();
}
+
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
+
spin_unlock(&qman_lock);
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
@@ -179,7 +186,7 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
qman_set_sdest(pcfg->channel, cpu);
}
-static void qman_offline_cpu(unsigned int cpu)
+static int qman_offline_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
@@ -192,9 +199,10 @@ static void qman_offline_cpu(unsigned int cpu)
qman_portal_update_sdest(pcfg, 0);
}
}
+ return 0;
}
-static void qman_online_cpu(unsigned int cpu)
+static int qman_online_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
@@ -207,40 +215,18 @@ static void qman_online_cpu(unsigned int cpu)
qman_portal_update_sdest(pcfg, cpu);
}
}
+ return 0;
}
-static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- qman_online_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- qman_offline_cpu(cpu);
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block qman_hotplug_cpu_notifier = {
- .notifier_call = qman_hotplug_cpu_callback,
-};
-
static int qman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- const u32 *channel;
void __iomem *va;
- int irq, len, cpu;
+ int irq, cpu, err;
+ u32 val;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg)
@@ -264,13 +250,13 @@ static int qman_portal_probe(struct platform_device *pdev)
return -ENXIO;
}
- channel = of_get_property(node, "cell-index", &len);
- if (!channel || (len != 4)) {
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
dev_err(dev, "Can't get %s property 'cell-index'\n",
node->full_name);
- return -ENXIO;
+ return err;
}
- pcfg->channel = *channel;
+ pcfg->channel = val;
pcfg->cpu = -1;
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
@@ -280,15 +266,19 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -306,8 +296,15 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_unlock(&qman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -315,10 +312,11 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
@@ -346,8 +344,14 @@ static int __init qman_portal_driver_register(struct platform_driver *drv)
if (ret < 0)
return ret;
- register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
-
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 5cf821e623a9..53685b59718e 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -73,29 +73,23 @@ struct qm_mcr_querycgr {
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[6];
u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
- u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
u8 __reserved3[3];
u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
- u32 a_bcnt_lo; /* low 32-bits of 40-bit */
- u32 cscn_targ_swp[4];
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
} __packed;
static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
}
static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
}
/* "Query FQ Non-Programmable Fields" */
-struct qm_mcc_queryfq_np {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2[56];
-} __packed;
struct qm_mcr_queryfq_np {
u8 verb;
@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids);
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 6880ff17f45e..2895d062cf51 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd)
{
qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
qm_fd_set_contig_big(fd, 0x0000ffff);
- fd->cmd = 0xfeedf00d;
+ fd->cmd = cpu_to_be32(0xfeedf00d);
}
static void fd_inc(struct qm_fd *fd)
@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd)
len--;
qm_fd_set_param(fd, fmt, off, len);
- fd->cmd++;
+ fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
-static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
{
- int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
- if (!r) {
- enum qm_fd_format fmt_a, fmt_b;
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
- fmt_a = qm_fd_get_format(a);
- fmt_b = qm_fd_get_format(b);
- r = fmt_a - fmt_b;
- }
- if (!r)
- r = a->cfg - b->cfg;
- if (!r)
- r = a->cmd - b->cmd;
- return r;
+ return neq;
}
/* test */
@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
- if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
return qman_cb_dqrr_consume;
}
fd_inc(&fd_dq);
- if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
sdqcr_complete = 1;
wake_up(&waitqueue);
}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index 43cf66ba42f5..e87b65403b67 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list);
-static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+static DEFINE_SPINLOCK(hp_lock);
static unsigned int hp_cpu_list_length;
@@ -191,6 +191,9 @@ static void *__frame_ptr;
static u32 *frame_ptr;
static dma_addr_t frame_dma;
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
/* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue);
@@ -210,16 +213,14 @@ static int allocate_frame_data(void)
{
u32 lfsr = HP_FIRST_WORD;
int loop;
- struct platform_device *pdev = platform_device_alloc("foobar", -1);
- if (!pdev) {
- pr_crit("platform_device_alloc() failed");
- return -EIO;
- }
- if (platform_device_add(pdev)) {
- pr_crit("platform_device_add() failed");
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
return -EIO;
}
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
return -ENOMEM;
@@ -229,15 +230,22 @@ static int allocate_frame_data(void)
frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr);
}
- frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
DMA_BIDIRECTIONAL);
- platform_device_del(pdev);
- platform_device_put(pdev);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
return 0;
}
static void deallocate_frame_data(void)
{
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
kfree(__frame_ptr);
}
@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler,
int loop;
if (qm_fd_addr_get64(fd) != handler->addr) {
- pr_crit("bad frame address");
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
return -EIO;
}
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
@@ -397,8 +406,9 @@ static int init_handler(void *h)
goto failed;
}
memset(&opts, 0, sizeof(opts));
- opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
- opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
QMAN_INITFQ_FLAG_LOCAL, &opts);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 2707a827261b..ade168f5328e 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = {
.resume = qe_resume,
};
-static int __init qe_drv_init(void)
-{
- return platform_driver_register(&qe_driver);
-}
-device_initcall(qe_drv_init);
+builtin_platform_driver(qe_driver);
#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 0a4ea809a61b..609bb3424c14 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,7 +23,7 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default ARM64 && ARCH_MEDIATEK
+ default ARCH_MEDIATEK
select REGMAP
select MTK_INFRACFG
select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 837effe19907..beb79162369a 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -11,17 +11,16 @@
* GNU General Public License for more details.
*/
#include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
-#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
-#include <linux/regmap.h>
-#include <linux/soc/mediatek/infracfg.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include <dt-bindings/power/mt2701-power.h>
#include <dt-bindings/power/mt8173-power.h>
#define SPM_VDE_PWR_CON 0x0210
@@ -29,11 +28,17 @@
#define SPM_VEN_PWR_CON 0x0230
#define SPM_ISP_PWR_CON 0x0238
#define SPM_DIS_PWR_CON 0x023c
+#define SPM_CONN_PWR_CON 0x0280
#define SPM_VEN2_PWR_CON 0x0298
-#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_AUDIO_PWR_CON 0x029c /* MT8173 */
+#define SPM_BDP_PWR_CON 0x029c /* MT2701 */
+#define SPM_ETH_PWR_CON 0x02a0
+#define SPM_HIF_PWR_CON 0x02a4
+#define SPM_IFR_MSC_PWR_CON 0x02a8
#define SPM_MFG_2D_PWR_CON 0x02c0
#define SPM_MFG_ASYNC_PWR_CON 0x02c4
#define SPM_USB_PWR_CON 0x02cc
+
#define SPM_PWR_STATUS 0x060c
#define SPM_PWR_STATUS_2ND 0x0610
@@ -43,10 +48,15 @@
#define PWR_ON_2ND_BIT BIT(3)
#define PWR_CLK_DIS_BIT BIT(4)
+#define PWR_STATUS_CONN BIT(1)
#define PWR_STATUS_DISP BIT(3)
#define PWR_STATUS_MFG BIT(4)
#define PWR_STATUS_ISP BIT(5)
#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_BDP BIT(14)
+#define PWR_STATUS_ETH BIT(15)
+#define PWR_STATUS_HIF BIT(16)
+#define PWR_STATUS_IFR_MSC BIT(17)
#define PWR_STATUS_VENC_LT BIT(20)
#define PWR_STATUS_VENC BIT(21)
#define PWR_STATUS_MFG_2D BIT(22)
@@ -55,12 +65,23 @@
#define PWR_STATUS_USB BIT(25)
enum clk_id {
- MT8173_CLK_NONE,
- MT8173_CLK_MM,
- MT8173_CLK_MFG,
- MT8173_CLK_VENC,
- MT8173_CLK_VENC_LT,
- MT8173_CLK_MAX,
+ CLK_NONE,
+ CLK_MM,
+ CLK_MFG,
+ CLK_VENC,
+ CLK_VENC_LT,
+ CLK_ETHIF,
+ CLK_MAX,
+};
+
+static const char * const clk_names[] = {
+ NULL,
+ "mm",
+ "mfg",
+ "venc",
+ "venc_lt",
+ "ethif",
+ NULL,
};
#define MAX_CLKS 2
@@ -76,98 +97,6 @@ struct scp_domain_data {
bool active_wakeup;
};
-static const struct scp_domain_data scp_domain_data[] = {
- [MT8173_POWER_DOMAIN_VDEC] = {
- .name = "vdec",
- .sta_mask = PWR_STATUS_VDEC,
- .ctl_offs = SPM_VDE_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_VENC] = {
- .name = "venc",
- .sta_mask = PWR_STATUS_VENC,
- .ctl_offs = SPM_VEN_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC},
- },
- [MT8173_POWER_DOMAIN_ISP] = {
- .name = "isp",
- .sta_mask = PWR_STATUS_ISP,
- .ctl_offs = SPM_ISP_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_MM] = {
- .name = "mm",
- .sta_mask = PWR_STATUS_DISP,
- .ctl_offs = SPM_DIS_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
- MT8173_TOP_AXI_PROT_EN_MM_M1,
- },
- [MT8173_POWER_DOMAIN_VENC_LT] = {
- .name = "venc_lt",
- .sta_mask = PWR_STATUS_VENC_LT,
- .ctl_offs = SPM_VEN2_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC_LT},
- },
- [MT8173_POWER_DOMAIN_AUDIO] = {
- .name = "audio",
- .sta_mask = PWR_STATUS_AUDIO,
- .ctl_offs = SPM_AUDIO_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_USB] = {
- .name = "usb",
- .sta_mask = PWR_STATUS_USB,
- .ctl_offs = SPM_USB_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- .active_wakeup = true,
- },
- [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
- .name = "mfg_async",
- .sta_mask = PWR_STATUS_MFG_ASYNC,
- .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = 0,
- .clk_id = {MT8173_CLK_MFG},
- },
- [MT8173_POWER_DOMAIN_MFG_2D] = {
- .name = "mfg_2d",
- .sta_mask = PWR_STATUS_MFG_2D,
- .ctl_offs = SPM_MFG_2D_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_MFG] = {
- .name = "mfg",
- .sta_mask = PWR_STATUS_MFG,
- .ctl_offs = SPM_MFG_PWR_CON,
- .sram_pdn_bits = GENMASK(13, 8),
- .sram_pdn_ack_bits = GENMASK(21, 16),
- .clk_id = {MT8173_CLK_NONE},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
- MT8173_TOP_AXI_PROT_EN_MFG_M0 |
- MT8173_TOP_AXI_PROT_EN_MFG_M1 |
- MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
- },
-};
-
-#define NUM_DOMAINS ARRAY_SIZE(scp_domain_data)
-
struct scp;
struct scp_domain {
@@ -179,7 +108,7 @@ struct scp_domain {
};
struct scp {
- struct scp_domain domains[NUM_DOMAINS];
+ struct scp_domain *domains;
struct genpd_onecell_data pd_data;
struct device *dev;
void __iomem *base;
@@ -408,57 +337,55 @@ static bool scpsys_active_wakeup(struct device *dev)
return scpd->data->active_wakeup;
}
-static int scpsys_probe(struct platform_device *pdev)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
+{
+ int i;
+
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
+ clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+}
+
+static struct scp *init_scp(struct platform_device *pdev,
+ const struct scp_domain_data *scp_domain_data, int num)
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j, ret;
+ int i, j;
struct scp *scp;
- struct clk *clk[MT8173_CLK_MAX];
+ struct clk *clk[CLK_MAX];
scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
if (!scp)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
scp->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(scp->base))
- return PTR_ERR(scp->base);
+ return ERR_CAST(scp->base);
+
+ scp->domains = devm_kzalloc(&pdev->dev,
+ sizeof(*scp->domains) * num, GFP_KERNEL);
+ if (!scp->domains)
+ return ERR_PTR(-ENOMEM);
pd_data = &scp->pd_data;
pd_data->domains = devm_kzalloc(&pdev->dev,
- sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+ sizeof(*pd_data->domains) * num, GFP_KERNEL);
if (!pd_data->domains)
- return -ENOMEM;
-
- clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
- if (IS_ERR(clk[MT8173_CLK_MM]))
- return PTR_ERR(clk[MT8173_CLK_MM]);
-
- clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
- if (IS_ERR(clk[MT8173_CLK_MFG]))
- return PTR_ERR(clk[MT8173_CLK_MFG]);
-
- clk[MT8173_CLK_VENC] = devm_clk_get(&pdev->dev, "venc");
- if (IS_ERR(clk[MT8173_CLK_VENC]))
- return PTR_ERR(clk[MT8173_CLK_VENC]);
-
- clk[MT8173_CLK_VENC_LT] = devm_clk_get(&pdev->dev, "venc_lt");
- if (IS_ERR(clk[MT8173_CLK_VENC_LT]))
- return PTR_ERR(clk[MT8173_CLK_VENC_LT]);
+ return ERR_PTR(-ENOMEM);
scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"infracfg");
if (IS_ERR(scp->infracfg)) {
dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
PTR_ERR(scp->infracfg));
- return PTR_ERR(scp->infracfg);
+ return ERR_CAST(scp->infracfg);
}
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -467,13 +394,15 @@ static int scpsys_probe(struct platform_device *pdev)
if (PTR_ERR(scpd->supply) == -ENODEV)
scpd->supply = NULL;
else
- return PTR_ERR(scpd->supply);
+ return ERR_CAST(scpd->supply);
}
}
- pd_data->num_domains = NUM_DOMAINS;
+ pd_data->num_domains = num;
+
+ init_clks(pdev, clk);
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -482,13 +411,37 @@ static int scpsys_probe(struct platform_device *pdev)
scpd->scp = scp;
scpd->data = data;
- for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++)
- scpd->clk[j] = clk[data->clk_id[j]];
+
+ for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
+ struct clk *c = clk[data->clk_id[j]];
+
+ if (IS_ERR(c)) {
+ dev_err(&pdev->dev, "%s: clk unavailable\n",
+ data->name);
+ return ERR_CAST(c);
+ }
+
+ scpd->clk[j] = c;
+ }
genpd->name = data->name;
genpd->power_off = scpsys_power_off;
genpd->power_on = scpsys_power_on;
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+ }
+
+ return scp;
+}
+
+static void mtk_register_power_domains(struct platform_device *pdev,
+ struct scp *scp, int num)
+{
+ struct genpd_onecell_data *pd_data;
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
/*
* Initially turn on all domains to make the domains usable
@@ -507,6 +460,222 @@ static int scpsys_probe(struct platform_device *pdev)
* valid.
*/
+ pd_data = &scp->pd_data;
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+}
+
+/*
+ * MT2701 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt2701[] = {
+ [MT2701_POWER_DOMAIN_CONN] = {
+ .name = "conn",
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = SPM_CONN_PWR_CON,
+ .bus_prot_mask = 0x0104,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_DISP] = {
+ .name = "disp",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = 0x0002,
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MFG},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_BDP] = {
+ .name = "bdp",
+ .sta_mask = PWR_STATUS_BDP,
+ .ctl_offs = SPM_BDP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ETH] = {
+ .name = "eth",
+ .sta_mask = PWR_STATUS_ETH,
+ .ctl_offs = SPM_ETH_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_HIF] = {
+ .name = "hif",
+ .sta_mask = PWR_STATUS_HIF,
+ .ctl_offs = SPM_HIF_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_IFR_MSC] = {
+ .name = "ifr_msc",
+ .sta_mask = PWR_STATUS_IFR_MSC,
+ .ctl_offs = SPM_IFR_MSC_PWR_CON,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+};
+
+#define NUM_DOMAINS_MT2701 ARRAY_SIZE(scp_domain_data_mt2701)
+
+static int __init scpsys_probe_mt2701(struct platform_device *pdev)
+{
+ struct scp *scp;
+
+ scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT2701);
+
+ return 0;
+}
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC},
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1,
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC_LT},
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {CLK_MFG},
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .clk_id = {CLK_NONE},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+ },
+};
+
+#define NUM_DOMAINS_MT8173 ARRAY_SIZE(scp_domain_data_mt8173)
+
+static int __init scpsys_probe_mt8173(struct platform_device *pdev)
+{
+ struct scp *scp;
+ struct genpd_onecell_data *pd_data;
+ int ret;
+
+ scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT8173);
+
+ pd_data = &scp->pd_data;
+
ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
if (ret && IS_ENABLED(CONFIG_PM))
@@ -517,21 +686,39 @@ static int scpsys_probe(struct platform_device *pdev)
if (ret && IS_ENABLED(CONFIG_PM))
dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
- ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
- if (ret)
- dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
-
return 0;
}
+/*
+ * scpsys driver init
+ */
+
static const struct of_device_id of_scpsys_match_tbl[] = {
{
+ .compatible = "mediatek,mt2701-scpsys",
+ .data = scpsys_probe_mt2701,
+ }, {
.compatible = "mediatek,mt8173-scpsys",
+ .data = scpsys_probe_mt8173,
}, {
/* sentinel */
}
};
+static int scpsys_probe(struct platform_device *pdev)
+{
+ int (*probe)(struct platform_device *);
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(of_scpsys_match_tbl, pdev->dev.of_node);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ probe = of_id->data;
+
+ return probe(pdev);
+}
+
static struct platform_driver scpsys_drv = {
.probe = scpsys_probe,
.driver = {
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 623039c3514c..d9115cb5ed9d 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,3 +1,12 @@
+obj-$(CONFIG_SOC_BUS) += renesas-soc.o
+
+obj-$(CONFIG_ARCH_RCAR_GEN1) += rcar-rst.o
+obj-$(CONFIG_ARCH_RCAR_GEN2) += rcar-rst.o
+obj-$(CONFIG_ARCH_R8A7795) += rcar-rst.o
+obj-$(CONFIG_ARCH_R8A7796) += rcar-rst.o
+
+obj-$(CONFIG_ARCH_R8A7743) += rcar-sysc.o r8a7743-sysc.o
+obj-$(CONFIG_ARCH_R8A7745) += rcar-sysc.o r8a7745-sysc.o
obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o
obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o
obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
new file mode 100644
index 000000000000..9583a327d90c
--- /dev/null
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1M System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7743-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7743_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7743_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7743_PD_CA15_SCU, R8A7743_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7743_PD_CA15_CPU0, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7743_PD_CA15_CPU1, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7743_PD_SGX, R8A7743_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7743_sysc_info __initconst = {
+ .areas = r8a7743_areas,
+ .num_areas = ARRAY_SIZE(r8a7743_areas),
+};
diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c
new file mode 100644
index 000000000000..d17887c08aa1
--- /dev/null
+++ b/drivers/soc/renesas/r8a7745-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1E System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7745-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7745_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7745_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca7-scu", 0x100, 0, R8A7745_PD_CA7_SCU, R8A7745_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7745_PD_CA7_CPU0, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7745_PD_CA7_CPU1, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7745_PD_SGX, R8A7745_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7745_sysc_info __initconst = {
+ .areas = r8a7745_areas,
+ .num_areas = ARRAY_SIZE(r8a7745_areas),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
new file mode 100644
index 000000000000..a6d1c26d3167
--- /dev/null
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -0,0 +1,92 @@
+/*
+ * R-Car Gen1 RESET/WDT, R-Car Gen2, Gen3, and RZ/G RST Driver
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+struct rst_config {
+ unsigned int modemr; /* Mode Monitoring Register Offset */
+};
+
+static const struct rst_config rcar_rst_gen1 __initconst = {
+ .modemr = 0x20,
+};
+
+static const struct rst_config rcar_rst_gen2 __initconst = {
+ .modemr = 0x60,
+};
+
+static const struct of_device_id rcar_rst_matches[] __initconst = {
+ /* RZ/G is handled like R-Car Gen2 */
+ { .compatible = "renesas,r8a7743-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7745-rst", .data = &rcar_rst_gen2 },
+ /* R-Car Gen1 */
+ { .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 },
+ { .compatible = "renesas,r8a7779-reset-wdt", .data = &rcar_rst_gen1 },
+ /* R-Car Gen2 */
+ { .compatible = "renesas,r8a7790-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7791-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7792-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7793-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7794-rst", .data = &rcar_rst_gen2 },
+ /* R-Car Gen3 is handled like R-Car Gen2 */
+ { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 },
+ { /* sentinel */ }
+};
+
+static void __iomem *rcar_rst_base __initdata;
+static u32 saved_mode __initdata;
+
+static int __init rcar_rst_init(void)
+{
+ const struct of_device_id *match;
+ const struct rst_config *cfg;
+ struct device_node *np;
+ void __iomem *base;
+ int error = 0;
+
+ np = of_find_matching_node_and_match(NULL, rcar_rst_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%s: Cannot map regs\n", np->full_name);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ rcar_rst_base = base;
+ cfg = match->data;
+ saved_mode = ioread32(base + cfg->modemr);
+
+ pr_debug("%s: MODE = 0x%08x\n", np->full_name, saved_mode);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+
+int __init rcar_rst_read_mode_pins(u32 *mode)
+{
+ int error;
+
+ if (!rcar_rst_base) {
+ error = rcar_rst_init();
+ if (error)
+ return error;
+ }
+
+ *mode = saved_mode;
+ return 0;
+}
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 65c8e1eb90c0..225c35c79d9a 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -275,6 +275,12 @@ finalize:
}
static const struct of_device_id rcar_sysc_matches[] = {
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info },
+#endif
#ifdef CONFIG_ARCH_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 77dbe861473f..f6e842e2976e 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -50,6 +50,8 @@ struct rcar_sysc_info {
unsigned int num_areas;
};
+extern const struct rcar_sysc_info r8a7743_sysc_info;
+extern const struct rcar_sysc_info r8a7745_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
new file mode 100644
index 000000000000..330960312296
--- /dev/null
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -0,0 +1,257 @@
+/*
+ * Renesas SoC Identification
+ *
+ * Copyright (C) 2014-2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+
+struct renesas_family {
+ const char name[16];
+ u32 reg; /* CCCR or PRR, if not in DT */
+};
+
+static const struct renesas_family fam_rcar_gen1 __initconst __maybe_unused = {
+ .name = "R-Car Gen1",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen2 __initconst __maybe_unused = {
+ .name = "R-Car Gen2",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
+ .name = "R-Car Gen3",
+ .reg = 0xfff00044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
+ .name = "R-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+static const struct renesas_family fam_rza __initconst __maybe_unused = {
+ .name = "RZ/A",
+};
+
+static const struct renesas_family fam_rzg __initconst __maybe_unused = {
+ .name = "RZ/G",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
+ .name = "SH-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+
+struct renesas_soc {
+ const struct renesas_family *family;
+ u8 id;
+};
+
+static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = {
+ .family = &fam_rza,
+};
+
+static const struct renesas_soc soc_rmobile_ape6 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x3f,
+};
+
+static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x40,
+};
+
+static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+};
+
+static const struct renesas_soc soc_rcar_h1 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+ .id = 0x3b,
+};
+
+static const struct renesas_soc soc_rcar_h2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x45,
+};
+
+static const struct renesas_soc soc_rcar_m2_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rcar_v2h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4a,
+};
+
+static const struct renesas_soc soc_rcar_m2_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4b,
+};
+
+static const struct renesas_soc soc_rcar_e2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_h3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x4f,
+};
+
+static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x52,
+};
+
+static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
+ .family = &fam_shmobile,
+ .id = 0x37,
+};
+
+
+static const struct of_device_id renesas_socs[] __initconst = {
+#ifdef CONFIG_ARCH_R7S72100
+ { .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
+#endif
+#ifdef CONFIG_ARCH_R8A73A4
+ { .compatible = "renesas,r8a73a4", .data = &soc_rmobile_ape6 },
+#endif
+#ifdef CONFIG_ARCH_R8A7740
+ { .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743", .data = &soc_rz_g1m },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745", .data = &soc_rz_g1e },
+#endif
+#ifdef CONFIG_ARCH_R8A7778
+ { .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
+#endif
+#ifdef CONFIG_ARCH_R8A7779
+ { .compatible = "renesas,r8a7779", .data = &soc_rcar_h1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+ { .compatible = "renesas,r8a7790", .data = &soc_rcar_h2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+ { .compatible = "renesas,r8a7791", .data = &soc_rcar_m2_w },
+#endif
+#ifdef CONFIG_ARCH_R8A7792
+ { .compatible = "renesas,r8a7792", .data = &soc_rcar_v2h },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+ { .compatible = "renesas,r8a7793", .data = &soc_rcar_m2_n },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+ { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7795
+ { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
+#endif
+#ifdef CONFIG_ARCH_R8A7796
+ { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
+#endif
+#ifdef CONFIG_ARCH_SH73A0
+ { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
+#endif
+ { /* sentinel */ }
+};
+
+static int __init renesas_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct renesas_family *family;
+ const struct of_device_id *match;
+ const struct renesas_soc *soc;
+ void __iomem *chipid = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ unsigned int product;
+
+ match = of_match_node(renesas_socs, of_root);
+ if (!match)
+ return -ENODEV;
+
+ soc = match->data;
+ family = soc->family;
+
+ /* Try PRR first, then hardcoded fallback */
+ np = of_find_compatible_node(NULL, NULL, "renesas,prr");
+ if (np) {
+ chipid = of_iomap(np, 0);
+ of_node_put(np);
+ } else if (soc->id) {
+ chipid = ioremap(family->reg, 4);
+ }
+ if (chipid) {
+ product = readl(chipid);
+ iounmap(chipid);
+ if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+ pr_warn("SoC mismatch (product = 0x%x)\n", product);
+ return -ENODEV;
+ }
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1,
+ GFP_KERNEL);
+ if (chipid)
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
+ ((product >> 4) & 0x0f) + 1,
+ product & 0xf);
+
+ pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision ?: "");
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree_const(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ return 0;
+}
+core_initcall(renesas_soc_init);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 7acd1517dd37..1c78c42416c6 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -9,6 +9,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
@@ -105,12 +106,24 @@ static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
return (val & pd_info->idle_mask) == pd_info->idle_mask;
}
+static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
+{
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+ return val;
+}
+
static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
bool idle)
{
const struct rockchip_domain_info *pd_info = pd->info;
+ struct generic_pm_domain *genpd = &pd->genpd;
struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int target_ack;
unsigned int val;
+ bool is_idle;
+ int ret;
if (pd_info->req_mask == 0)
return 0;
@@ -120,12 +133,26 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
dsb(sy);
- do {
- regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
- } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
+ /* Wait util idle_ack = 1 */
+ target_ack = idle ? pd_info->ack_mask : 0;
+ ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
+ (val & pd_info->ack_mask) == target_ack,
+ 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get ack on domain '%s', val=0x%x\n",
+ genpd->name, val);
+ return ret;
+ }
- while (rockchip_pmu_domain_is_idle(pd) != idle)
- cpu_relax();
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
+ is_idle, is_idle == idle, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to set idle on domain '%s', val=%d\n",
+ genpd->name, is_idle);
+ return ret;
+ }
return 0;
}
@@ -198,6 +225,8 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
+ struct generic_pm_domain *genpd = &pd->genpd;
+ bool is_on;
if (pd->info->pwr_mask == 0)
return;
@@ -207,8 +236,13 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
dsb(sy);
- while (rockchip_pmu_domain_is_on(pd) != on)
- cpu_relax();
+ if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+ is_on == on, 0, 10000)) {
+ dev_err(pmu->dev,
+ "failed to set domain '%s', val=%d\n",
+ genpd->name, is_on);
+ return;
+ }
}
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
@@ -445,7 +479,16 @@ err_out:
static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
{
- int i;
+ int i, ret;
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
for (i = 0; i < pd->num_clks; i++) {
clk_unprepare(pd->clks[i]);
@@ -597,10 +640,12 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
* Configure power up and down transition delays for CORE
* and GPU domains.
*/
- rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
- pmu_info->core_power_transition_time);
- rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
- pmu_info->gpu_power_transition_time);
+ if (pmu_info->core_power_transition_time)
+ rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+ pmu_info->core_power_transition_time);
+ if (pmu_info->gpu_pwrcnt_offset)
+ rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+ pmu_info->gpu_power_transition_time);
error = -ENODEV;
@@ -627,7 +672,11 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
goto err_out;
}
- of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ if (error) {
+ dev_err(dev, "failed to add provider: %d\n", error);
+ goto err_out;
+ }
return 0;
@@ -722,11 +771,7 @@ static const struct rockchip_pmu_info rk3399_pmu = {
.idle_offset = 0x64,
.ack_offset = 0x68,
- .core_pwrcnt_offset = 0x9c,
- .gpu_pwrcnt_offset = 0xa4,
-
- .core_power_transition_time = 24,
- .gpu_power_transition_time = 24,
+ /* ARM Trusted Firmware manages power transition times */
.num_domains = ARRAY_SIZE(rk3399_pm_domains),
.domain_info = rk3399_pm_domains,
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 03089ad2fc65..e5e124c07066 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -77,5 +77,19 @@ config ARCH_TEGRA_210_SOC
controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to
name only a few.
+config ARCH_TEGRA_186_SOC
+ bool "NVIDIA Tegra186 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ help
+ Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
+ combination of Denver and Cortex-A57 CPU cores and a GPU based on
+ the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU
+ used for audio processing, hardware video encoders/decoders with
+ multi-format support, ISP for image capture processing and BPMP for
+ power management.
+
endif
endif
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 7792ed88d80b..e233dd5dcab3 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -45,29 +45,31 @@
#include <soc/tegra/pmc.h>
#define PMC_CNTRL 0x0
-#define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
-#define PMC_CNTRL_SYSCLK_OE (1 << 11) /* system clock enable */
-#define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) /* LP0 when CPU pwr gated */
-#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */
-#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */
-#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */
-#define PMC_CNTRL_MAIN_RST (1 << 4)
+#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
+#define PMC_CNTRL_CPU_PWRREQ_OE BIT(16) /* CPU pwr req enable */
+#define PMC_CNTRL_CPU_PWRREQ_POLARITY BIT(15) /* CPU pwr req polarity */
+#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */
+#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
+#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
+#define PMC_CNTRL_MAIN_RST BIT(4)
#define DPD_SAMPLE 0x020
-#define DPD_SAMPLE_ENABLE (1 << 0)
+#define DPD_SAMPLE_ENABLE BIT(0)
#define DPD_SAMPLE_DISABLE (0 << 0)
#define PWRGATE_TOGGLE 0x30
-#define PWRGATE_TOGGLE_START (1 << 8)
+#define PWRGATE_TOGGLE_START BIT(8)
#define REMOVE_CLAMPING 0x34
#define PWRGATE_STATUS 0x38
+#define PMC_PWR_DET 0x48
+
#define PMC_SCRATCH0 0x50
-#define PMC_SCRATCH0_MODE_RECOVERY (1 << 31)
-#define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30)
-#define PMC_SCRATCH0_MODE_RCM (1 << 1)
+#define PMC_SCRATCH0_MODE_RECOVERY BIT(31)
+#define PMC_SCRATCH0_MODE_BOOTLOADER BIT(30)
+#define PMC_SCRATCH0_MODE_RCM BIT(1)
#define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \
PMC_SCRATCH0_MODE_BOOTLOADER | \
PMC_SCRATCH0_MODE_RCM)
@@ -75,11 +77,13 @@
#define PMC_CPUPWRGOOD_TIMER 0xc8
#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_PWR_DET_VALUE 0xe4
+
#define PMC_SCRATCH41 0x140
#define PMC_SENSOR_CTRL 0x1b0
-#define PMC_SENSOR_CTRL_SCRATCH_WRITE (1 << 2)
-#define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1)
+#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
+#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
#define PMC_RST_STATUS 0x1b4
#define PMC_RST_STATUS_POR 0
@@ -90,10 +94,10 @@
#define PMC_RST_STATUS_AOTAG 5
#define IO_DPD_REQ 0x1b8
-#define IO_DPD_REQ_CODE_IDLE (0 << 30)
-#define IO_DPD_REQ_CODE_OFF (1 << 30)
-#define IO_DPD_REQ_CODE_ON (2 << 30)
-#define IO_DPD_REQ_CODE_MASK (3 << 30)
+#define IO_DPD_REQ_CODE_IDLE (0U << 30)
+#define IO_DPD_REQ_CODE_OFF (1U << 30)
+#define IO_DPD_REQ_CODE_ON (2U << 30)
+#define IO_DPD_REQ_CODE_MASK (3U << 30)
#define IO_DPD_STATUS 0x1bc
#define IO_DPD2_REQ 0x1c0
@@ -101,16 +105,16 @@
#define SEL_DPD_TIM 0x1c8
#define PMC_SCRATCH54 0x258
-#define PMC_SCRATCH54_DATA_SHIFT 8
-#define PMC_SCRATCH54_ADDR_SHIFT 0
+#define PMC_SCRATCH54_DATA_SHIFT 8
+#define PMC_SCRATCH54_ADDR_SHIFT 0
#define PMC_SCRATCH55 0x25c
-#define PMC_SCRATCH55_RESET_TEGRA (1 << 31)
-#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
-#define PMC_SCRATCH55_PINMUX_SHIFT 24
-#define PMC_SCRATCH55_16BITOP (1 << 15)
-#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
-#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
+#define PMC_SCRATCH55_RESET_TEGRA BIT(31)
+#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
+#define PMC_SCRATCH55_PINMUX_SHIFT 24
+#define PMC_SCRATCH55_16BITOP BIT(15)
+#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
+#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
#define GPU_RG_CNTRL 0x2d4
@@ -124,6 +128,12 @@ struct tegra_powergate {
unsigned int num_resets;
};
+struct tegra_io_pad_soc {
+ enum tegra_io_pad id;
+ unsigned int dpd;
+ unsigned int voltage;
+};
+
struct tegra_pmc_soc {
unsigned int num_powergates;
const char *const *powergates;
@@ -132,6 +142,9 @@ struct tegra_pmc_soc {
bool has_tsense_reset;
bool has_gpu_clamps;
+
+ const struct tegra_io_pad_soc *io_pads;
+ unsigned int num_io_pads;
};
/**
@@ -238,8 +251,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
return i;
}
- dev_err(pmc->dev, "powergate %s not found\n", name);
-
return -ENODEV;
}
@@ -456,13 +467,12 @@ disable_clks:
static int tegra_genpd_power_on(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
- struct tegra_pmc *pmc = pg->pmc;
int err;
err = tegra_powergate_power_up(pg, true);
if (err)
- dev_err(pmc->dev, "failed to turn on PM domain %s: %d\n",
- pg->genpd.name, err);
+ pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
+ err);
return err;
}
@@ -470,13 +480,12 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
static int tegra_genpd_power_off(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
- struct tegra_pmc *pmc = pg->pmc;
int err;
err = tegra_powergate_power_down(pg);
if (err)
- dev_err(pmc->dev, "failed to turn off PM domain %s: %d\n",
- pg->genpd.name, err);
+ pr_err("failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -801,8 +810,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- dev_err(pmc->dev, "powergate lookup failed for %s: %d\n",
- np->name, id);
+ pr_err("powergate lookup failed for %s: %d\n", np->name, id);
goto free_mem;
}
@@ -822,20 +830,22 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- dev_err(pmc->dev, "failed to get clocks for %s: %d\n",
- np->name, err);
+ pr_err("failed to get clocks for %s: %d\n", np->name, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- dev_err(pmc->dev, "failed to get resets for %s: %d\n",
- np->name, err);
+ pr_err("failed to get resets for %s: %d\n", np->name, err);
goto remove_clks;
}
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
- goto power_on_cleanup;
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
/*
* FIXME: If XHCI is enabled for Tegra, then power-up the XUSB
@@ -846,25 +856,33 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
* to be unused.
*/
if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) &&
- (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC))
- goto power_on_cleanup;
+ (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
- pm_genpd_init(&pg->genpd, NULL, off);
+ err = pm_genpd_init(&pg->genpd, NULL, off);
+ if (err < 0) {
+ pr_err("failed to initialise PM domain %s: %d\n", np->name,
+ err);
+ goto remove_resets;
+ }
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n",
- np->name, err);
- goto remove_resets;
+ pr_err("failed to add PM domain provider for %s: %d\n",
+ np->name, err);
+ goto remove_genpd;
}
- dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
+ pr_debug("added PM domain %s\n", pg->genpd.name);
return;
-power_on_cleanup:
- if (off)
- WARN_ON(tegra_powergate_power_up(pg, true));
+remove_genpd:
+ pm_genpd_remove(&pg->genpd);
remove_resets:
while (pg->num_resets--)
@@ -908,21 +926,36 @@ static void tegra_powergate_init(struct tegra_pmc *pmc,
of_node_put(np);
}
-static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
- unsigned long *status, unsigned int *bit)
+static const struct tegra_io_pad_soc *
+tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
+ unsigned int i;
+
+ for (i = 0; i < pmc->soc->num_io_pads; i++)
+ if (pmc->soc->io_pads[i].id == id)
+ return &pmc->soc->io_pads[i];
+
+ return NULL;
+}
+
+static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
+ unsigned long *status, u32 *mask)
+{
+ const struct tegra_io_pad_soc *pad;
unsigned long rate, value;
- *bit = id % 32;
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad) {
+ pr_err("invalid I/O pad ID %u\n", id);
+ return -ENOENT;
+ }
- /*
- * There are two sets of 30 bits to select IO rails, but bits 30 and
- * 31 are control bits rather than IO rail selection bits.
- */
- if (id > 63 || *bit == 30 || *bit == 31)
- return -EINVAL;
+ if (pad->dpd == UINT_MAX)
+ return -ENOTSUPP;
- if (id < 32) {
+ *mask = BIT(pad->dpd % 32);
+
+ if (pad->dpd < 32) {
*status = IO_DPD_STATUS;
*request = IO_DPD_REQ;
} else {
@@ -931,6 +964,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
}
rate = clk_get_rate(pmc->clk);
+ if (!rate) {
+ pr_err("failed to get clock rate\n");
+ return -ENODEV;
+ }
tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
@@ -942,10 +979,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
return 0;
}
-static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
- unsigned long val, unsigned long timeout)
+static int tegra_io_pad_poll(unsigned long offset, u32 mask,
+ u32 val, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -960,67 +997,164 @@ static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
return -ETIMEDOUT;
}
-static void tegra_io_rail_unprepare(void)
+static void tegra_io_pad_unprepare(void)
{
tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
}
-int tegra_io_rail_power_on(unsigned int id)
+/**
+ * tegra_io_pad_power_enable() - enable power to I/O pad
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_enable(enum tegra_io_pad id)
{
unsigned long request, status;
- unsigned int bit;
+ u32 mask;
int err;
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err)
- goto error;
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
+ }
- tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
- err = tegra_io_rail_poll(status, BIT(bit), 0, 250);
- if (err) {
- pr_info("tegra_io_rail_poll() failed: %d\n", err);
- goto error;
+ err = tegra_io_pad_poll(status, mask, 0, 250);
+ if (err < 0) {
+ pr_err("failed to enable I/O pad: %d\n", err);
+ goto unlock;
}
- tegra_io_rail_unprepare();
+ tegra_io_pad_unprepare();
-error:
+unlock:
mutex_unlock(&pmc->powergates_lock);
-
return err;
}
-EXPORT_SYMBOL(tegra_io_rail_power_on);
+EXPORT_SYMBOL(tegra_io_pad_power_enable);
-int tegra_io_rail_power_off(unsigned int id)
+/**
+ * tegra_io_pad_power_disable() - disable power to I/O pad
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_disable(enum tegra_io_pad id)
{
unsigned long request, status;
- unsigned int bit;
+ u32 mask;
int err;
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err) {
- pr_info("tegra_io_rail_prepare() failed: %d\n", err);
- goto error;
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
- err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250);
- if (err)
- goto error;
+ err = tegra_io_pad_poll(status, mask, mask, 250);
+ if (err < 0) {
+ pr_err("failed to disable I/O pad: %d\n", err);
+ goto unlock;
+ }
- tegra_io_rail_unprepare();
+ tegra_io_pad_unprepare();
-error:
+unlock:
mutex_unlock(&pmc->powergates_lock);
-
return err;
}
+EXPORT_SYMBOL(tegra_io_pad_power_disable);
+
+int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+ enum tegra_io_pad_voltage voltage)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
+ value = tegra_pmc_readl(PMC_PWR_DET);
+ value |= BIT(pad->voltage);
+ tegra_pmc_writel(value, PMC_PWR_DET);
+
+ /* update I/O voltage */
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if (voltage == TEGRA_IO_PAD_1800000UV)
+ value &= ~BIT(pad->voltage);
+ else
+ value |= BIT(pad->voltage);
+
+ tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ usleep_range(100, 250);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_io_pad_set_voltage);
+
+int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if ((value & BIT(pad->voltage)) == 0)
+ return TEGRA_IO_PAD_1800000UV;
+
+ return TEGRA_IO_PAD_3300000UV;
+}
+EXPORT_SYMBOL(tegra_io_pad_get_voltage);
+
+/**
+ * tegra_io_rail_power_on() - enable power to I/O rail
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * See also: tegra_io_pad_power_enable()
+ */
+int tegra_io_rail_power_on(unsigned int id)
+{
+ return tegra_io_pad_power_enable(id);
+}
+EXPORT_SYMBOL(tegra_io_rail_power_on);
+
+/**
+ * tegra_io_rail_power_off() - disable power to I/O rail
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * See also: tegra_io_pad_power_disable()
+ */
+int tegra_io_rail_power_off(unsigned int id)
+{
+ return tegra_io_pad_power_disable(id);
+}
EXPORT_SYMBOL(tegra_io_rail_power_off);
#ifdef CONFIG_PM_SLEEP
@@ -1454,6 +1588,39 @@ static const u8 tegra124_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
+static const struct tegra_io_pad_soc tegra124_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_BB, .dpd = 15, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_COMP, .dpd = 22, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HV, .dpd = 38, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_NAND, .dpd = 13, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SYS_DDC, .dpd = 58, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra124_powergates),
.powergates = tegra124_powergates,
@@ -1461,6 +1628,8 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.cpu_powergates = tegra124_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .num_io_pads = ARRAY_SIZE(tegra124_io_pads),
+ .io_pads = tegra124_io_pads,
};
static const char * const tegra210_powergates[] = {
@@ -1497,6 +1666,47 @@ static const u8 tegra210_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
+static const struct tegra_io_pad_soc tegra210_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = 5 },
+ { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 18 },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = 10 },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIC, .dpd = 42, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSID, .dpd = 43, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIF, .dpd = 45, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = 19 },
+ { .id = TEGRA_IO_PAD_DEBUG_NONAO, .dpd = 26, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DMIC, .dpd = 50, .voltage = 20 },
+ { .id = TEGRA_IO_PAD_DP, .dpd = 51, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC2, .dpd = 37, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_GPIO, .dpd = 27, .voltage = 21 },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = UINT_MAX, .voltage = 11 },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = 12 },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = 13 },
+ { .id = TEGRA_IO_PAD_SPI, .dpd = 46, .voltage = 22 },
+ { .id = TEGRA_IO_PAD_SPI_HV, .dpd = 47, .voltage = 23 },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = 2 },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB3, .dpd = 18, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra210_powergates),
.powergates = tegra210_powergates,
@@ -1504,6 +1714,8 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.cpu_powergates = tegra210_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .num_io_pads = ARRAY_SIZE(tegra210_io_pads),
+ .io_pads = tegra210_io_pads,
};
static const struct of_device_id tegra_pmc_match[] = {
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index b73e3534f67b..eacad57f2977 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1228,7 +1228,7 @@ static int knav_setup_queue_range(struct knav_device *kdev,
range->num_irqs++;
- if (oirq.args_count == 3)
+ if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
range->irqs[i].cpu_map =
(oirq.args[2] & 0x0000ff00) >> 8;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b7995474148c..ec4aa252d6e8 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -67,6 +67,13 @@ config SPI_ATH79
This enables support for the SPI controller present on the
Atheros AR71XX/AR724X/AR913X SoCs.
+config SPI_ARMADA_3700
+ tristate "Marvell Armada 3700 SPI Controller"
+ depends on (ARCH_MVEBU && OF) || COMPILE_TEST
+ help
+ This enables support for the SPI controller present on the
+ Marvell Armada 3700 SoCs.
+
config SPI_ATMEL
tristate "Atmel SPI Controller"
depends on HAS_DMA
@@ -264,6 +271,12 @@ config SPI_FALCON
has only been tested with m25p80 type chips. The hardware has no
support for other types of SPI peripherals.
+config SPI_FSL_LPSPI
+ tristate "Freescale i.MX LPSPI controller"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This enables Freescale i.MX LPSPI controllers in master mode.
+
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GPIOLIB || COMPILE_TEST
@@ -373,7 +386,6 @@ config SPI_FSL_DSPI
config SPI_FSL_ESPI
tristate "Freescale eSPI controller"
depends on FSL_SOC
- select SPI_FSL_LIB
help
This enables using the Freescale eSPI controllers in master mode.
From MPC8536, 85xx platform uses the controller, and all P10xx,
@@ -451,7 +463,8 @@ config SPI_ORION
tristate "Orion SPI master"
depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
help
- This enables using the SPI master controller on the Orion chips.
+ This enables using the SPI master controller on the Orion
+ and MVEBU chips.
config SPI_PIC32
tristate "Microchip PIC32 series SPI"
@@ -553,7 +566,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
+ depends on (PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST)
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aa939d955521..7a6b64662c82 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
# SPI master controller drivers (bus)
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
+obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
new file mode 100644
index 000000000000..e89da0af45d2
--- /dev/null
+++ b/drivers/spi/spi-armada-3700.c
@@ -0,0 +1,923 @@
+/*
+ * Marvell Armada-3700 SPI controller driver
+ *
+ * Copyright (C) 2016 Marvell Ltd.
+ *
+ * Author: Wilson Ding <dingwei@marvell.com>
+ * Author: Romain Perier <romain.perier@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "armada_3700_spi"
+
+#define A3700_SPI_TIMEOUT 10
+
+/* SPI Register Offest */
+#define A3700_SPI_IF_CTRL_REG 0x00
+#define A3700_SPI_IF_CFG_REG 0x04
+#define A3700_SPI_DATA_OUT_REG 0x08
+#define A3700_SPI_DATA_IN_REG 0x0C
+#define A3700_SPI_IF_INST_REG 0x10
+#define A3700_SPI_IF_ADDR_REG 0x14
+#define A3700_SPI_IF_RMODE_REG 0x18
+#define A3700_SPI_IF_HDR_CNT_REG 0x1C
+#define A3700_SPI_IF_DIN_CNT_REG 0x20
+#define A3700_SPI_IF_TIME_REG 0x24
+#define A3700_SPI_INT_STAT_REG 0x28
+#define A3700_SPI_INT_MASK_REG 0x2C
+
+/* A3700_SPI_IF_CTRL_REG */
+#define A3700_SPI_EN BIT(16)
+#define A3700_SPI_ADDR_NOT_CONFIG BIT(12)
+#define A3700_SPI_WFIFO_OVERFLOW BIT(11)
+#define A3700_SPI_WFIFO_UNDERFLOW BIT(10)
+#define A3700_SPI_RFIFO_OVERFLOW BIT(9)
+#define A3700_SPI_RFIFO_UNDERFLOW BIT(8)
+#define A3700_SPI_WFIFO_FULL BIT(7)
+#define A3700_SPI_WFIFO_EMPTY BIT(6)
+#define A3700_SPI_RFIFO_FULL BIT(5)
+#define A3700_SPI_RFIFO_EMPTY BIT(4)
+#define A3700_SPI_WFIFO_RDY BIT(3)
+#define A3700_SPI_RFIFO_RDY BIT(2)
+#define A3700_SPI_XFER_RDY BIT(1)
+#define A3700_SPI_XFER_DONE BIT(0)
+
+/* A3700_SPI_IF_CFG_REG */
+#define A3700_SPI_WFIFO_THRS BIT(28)
+#define A3700_SPI_RFIFO_THRS BIT(24)
+#define A3700_SPI_AUTO_CS BIT(20)
+#define A3700_SPI_DMA_RD_EN BIT(18)
+#define A3700_SPI_FIFO_MODE BIT(17)
+#define A3700_SPI_SRST BIT(16)
+#define A3700_SPI_XFER_START BIT(15)
+#define A3700_SPI_XFER_STOP BIT(14)
+#define A3700_SPI_INST_PIN BIT(13)
+#define A3700_SPI_ADDR_PIN BIT(12)
+#define A3700_SPI_DATA_PIN1 BIT(11)
+#define A3700_SPI_DATA_PIN0 BIT(10)
+#define A3700_SPI_FIFO_FLUSH BIT(9)
+#define A3700_SPI_RW_EN BIT(8)
+#define A3700_SPI_CLK_POL BIT(7)
+#define A3700_SPI_CLK_PHA BIT(6)
+#define A3700_SPI_BYTE_LEN BIT(5)
+#define A3700_SPI_CLK_PRESCALE BIT(0)
+#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
+
+#define A3700_SPI_WFIFO_THRS_BIT 28
+#define A3700_SPI_RFIFO_THRS_BIT 24
+#define A3700_SPI_FIFO_THRS_MASK 0x7
+
+#define A3700_SPI_DATA_PIN_MASK 0x3
+
+/* A3700_SPI_IF_HDR_CNT_REG */
+#define A3700_SPI_DUMMY_CNT_BIT 12
+#define A3700_SPI_DUMMY_CNT_MASK 0x7
+#define A3700_SPI_RMODE_CNT_BIT 8
+#define A3700_SPI_RMODE_CNT_MASK 0x3
+#define A3700_SPI_ADDR_CNT_BIT 4
+#define A3700_SPI_ADDR_CNT_MASK 0x7
+#define A3700_SPI_INSTR_CNT_BIT 0
+#define A3700_SPI_INSTR_CNT_MASK 0x3
+
+/* A3700_SPI_IF_TIME_REG */
+#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
+
+/* Flags and macros for struct a3700_spi */
+#define A3700_INSTR_CNT 1
+#define A3700_ADDR_CNT 3
+#define A3700_DUMMY_CNT 1
+
+struct a3700_spi {
+ struct spi_master *master;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int irq;
+ unsigned int flags;
+ bool xmit_data;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ size_t buf_len;
+ u8 byte_len;
+ u32 wait_mask;
+ struct completion done;
+ u32 addr_cnt;
+ u32 instr_cnt;
+ size_t hdr_cnt;
+};
+
+static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
+{
+ return readl(a3700_spi->base + offset);
+}
+
+static void spireg_write(struct a3700_spi *a3700_spi, u32 offset, u32 data)
+{
+ writel(data, a3700_spi->base + offset);
+}
+
+static void a3700_spi_auto_cs_unset(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_AUTO_CS;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ val |= (A3700_SPI_EN << cs);
+ spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
+ unsigned int cs)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ val &= ~(A3700_SPI_EN << cs);
+ spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
+ unsigned int pin_mode)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_INST_PIN | A3700_SPI_ADDR_PIN);
+ val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1);
+
+ switch (pin_mode) {
+ case 1:
+ break;
+ case 2:
+ val |= A3700_SPI_DATA_PIN0;
+ break;
+ case 4:
+ val |= A3700_SPI_DATA_PIN1;
+ break;
+ default:
+ dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
+ return -EINVAL;
+ }
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ return 0;
+}
+
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_FIFO_MODE;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
+ unsigned int mode_bits)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+
+ if (mode_bits & SPI_CPOL)
+ val |= A3700_SPI_CLK_POL;
+ else
+ val &= ~A3700_SPI_CLK_POL;
+
+ if (mode_bits & SPI_CPHA)
+ val |= A3700_SPI_CLK_PHA;
+ else
+ val &= ~A3700_SPI_CLK_PHA;
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
+ unsigned int speed_hz, u16 mode)
+{
+ u32 val;
+ u32 prescale;
+
+ prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
+
+ val = val | (prescale & A3700_SPI_CLK_PRESCALE_MASK);
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ if (prescale <= 2) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_TIME_REG);
+ val |= A3700_SPI_CLK_CAPT_EDGE;
+ spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
+ }
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA);
+
+ if (mode & SPI_CPOL)
+ val |= A3700_SPI_CLK_POL;
+
+ if (mode & SPI_CPHA)
+ val |= A3700_SPI_CLK_PHA;
+
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (len == 4)
+ val |= A3700_SPI_BYTE_LEN;
+ else
+ val &= ~A3700_SPI_BYTE_LEN;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ a3700_spi->byte_len = len;
+}
+
+static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
+{
+ int timeout = A3700_SPI_TIMEOUT;
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_FIFO_FLUSH;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_FIFO_FLUSH))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int a3700_spi_init(struct a3700_spi *a3700_spi)
+{
+ struct spi_master *master = a3700_spi->master;
+ u32 val;
+ int i, ret = 0;
+
+ /* Reset SPI unit */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_SRST;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ udelay(A3700_SPI_TIMEOUT);
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_SRST;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ /* Disable AUTO_CS and deactivate all chip-selects */
+ a3700_spi_auto_cs_unset(a3700_spi);
+ for (i = 0; i < master->num_chipselect; i++)
+ a3700_spi_deactivate_cs(a3700_spi, i);
+
+ /* Enable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi);
+
+ /* Set SPI mode */
+ a3700_spi_mode_set(a3700_spi, master->mode_bits);
+
+ /* Reset counters */
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 0);
+
+ /* Mask the interrupts and clear cause bits */
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U);
+
+ return ret;
+}
+
+static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct a3700_spi *a3700_spi;
+ u32 cause;
+
+ a3700_spi = spi_master_get_devdata(master);
+
+ /* Get interrupt causes */
+ cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG);
+
+ if (!cause || !(a3700_spi->wait_mask & cause))
+ return IRQ_NONE;
+
+ /* mask and acknowledge the SPI interrupts */
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause);
+
+ /* Wake up the transfer */
+ if (a3700_spi->wait_mask & cause)
+ complete(&a3700_spi->done);
+
+ return IRQ_HANDLED;
+}
+
+static bool a3700_spi_wait_completion(struct spi_device *spi)
+{
+ struct a3700_spi *a3700_spi;
+ unsigned int timeout;
+ unsigned int ctrl_reg;
+ unsigned long timeout_jiffies;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+
+ /* SPI interrupt is edge-triggered, which means an interrupt will
+ * be generated only when detecting a specific status bit changed
+ * from '0' to '1'. So when we start waiting for a interrupt, we
+ * need to check status bit in control reg first, if it is already 1,
+ * then we do not need to wait for interrupt
+ */
+ ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ if (a3700_spi->wait_mask & ctrl_reg)
+ return true;
+
+ reinit_completion(&a3700_spi->done);
+
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG,
+ a3700_spi->wait_mask);
+
+ timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT);
+ timeout = wait_for_completion_timeout(&a3700_spi->done,
+ timeout_jiffies);
+
+ a3700_spi->wait_mask = 0;
+
+ if (timeout)
+ return true;
+
+ /* there might be the case that right after we checked the
+ * status bits in this routine and before start to wait for
+ * interrupt by wait_for_completion_timeout, the interrupt
+ * happens, to avoid missing it we need to double check
+ * status bits in control reg, if it is already 1, then
+ * consider that we have the interrupt successfully and
+ * return true.
+ */
+ ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ if (a3700_spi->wait_mask & ctrl_reg)
+ return true;
+
+ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+
+ return true;
+}
+
+static bool a3700_spi_transfer_wait(struct spi_device *spi,
+ unsigned int bit_mask)
+{
+ struct a3700_spi *a3700_spi;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi->wait_mask = bit_mask;
+
+ return a3700_spi_wait_completion(spi);
+}
+
+static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi,
+ unsigned int bytes)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_RFIFO_THRS_BIT);
+ val |= (bytes - 1) << A3700_SPI_RFIFO_THRS_BIT;
+ val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_WFIFO_THRS_BIT);
+ val |= (7 - bytes) << A3700_SPI_WFIFO_THRS_BIT;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi;
+ unsigned int byte_len;
+
+ a3700_spi = spi_master_get_devdata(spi->master);
+
+ a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode);
+
+ byte_len = xfer->bits_per_word >> 3;
+
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+}
+
+static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master);
+
+ if (!enable)
+ a3700_spi_activate_cs(a3700_spi, spi->chip_select);
+ else
+ a3700_spi_deactivate_cs(a3700_spi, spi->chip_select);
+}
+
+static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
+{
+ u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0;
+ u32 val = 0;
+
+ /* Clear the header registers */
+ spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+
+ /* Set header counters */
+ if (a3700_spi->tx_buf) {
+ if (a3700_spi->buf_len <= a3700_spi->instr_cnt) {
+ instr_cnt = a3700_spi->buf_len;
+ } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt +
+ a3700_spi->addr_cnt)) {
+ instr_cnt = a3700_spi->instr_cnt;
+ addr_cnt = a3700_spi->buf_len - instr_cnt;
+ } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) {
+ instr_cnt = a3700_spi->instr_cnt;
+ addr_cnt = a3700_spi->addr_cnt;
+ /* Need to handle the normal write case with 1 byte
+ * data
+ */
+ if (!a3700_spi->tx_buf[instr_cnt + addr_cnt])
+ dummy_cnt = a3700_spi->buf_len - instr_cnt -
+ addr_cnt;
+ }
+ val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
+ << A3700_SPI_INSTR_CNT_BIT);
+ val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+ << A3700_SPI_ADDR_CNT_BIT);
+ val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
+ << A3700_SPI_DUMMY_CNT_BIT);
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+ /* Update the buffer length to be transferred */
+ a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
+
+ /* Set Instruction */
+ val = 0;
+ while (instr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
+
+ /* Set Address */
+ val = 0;
+ while (addr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
+}
+
+static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+ return (val & A3700_SPI_WFIFO_FULL);
+}
+
+static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+ int i = 0;
+
+ while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
+ val = 0;
+ if (a3700_spi->buf_len >= 4) {
+ val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ a3700_spi->buf_len -= 4;
+ a3700_spi->tx_buf += 4;
+ } else {
+ /*
+ * If the remained buffer length is less than 4-bytes,
+ * we should pad the write buffer with all ones. So that
+ * it avoids overwrite the unexpected bytes following
+ * the last one.
+ */
+ val = GENMASK(31, 0);
+ while (a3700_spi->buf_len) {
+ val &= ~(0xff << (8 * i));
+ val |= *a3700_spi->tx_buf++ << (8 * i);
+ i++;
+ a3700_spi->buf_len--;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
+ val);
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int a3700_is_rfifo_empty(struct a3700_spi *a3700_spi)
+{
+ u32 val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+
+ return (val & A3700_SPI_RFIFO_EMPTY);
+}
+
+static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
+{
+ u32 val;
+
+ while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+ if (a3700_spi->buf_len >= 4) {
+ u32 data = le32_to_cpu(val);
+ memcpy(a3700_spi->rx_buf, &data, 4);
+
+ a3700_spi->buf_len -= 4;
+ a3700_spi->rx_buf += 4;
+ } else {
+ /*
+ * When remain bytes is not larger than 4, we should
+ * avoid memory overwriting and just write the left rx
+ * buffer bytes.
+ */
+ while (a3700_spi->buf_len) {
+ *a3700_spi->rx_buf = val & 0xff;
+ val >>= 8;
+
+ a3700_spi->buf_len--;
+ a3700_spi->rx_buf++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi)
+{
+ int timeout = A3700_SPI_TIMEOUT;
+ u32 val;
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_XFER_START))
+ break;
+ udelay(1);
+ }
+
+ a3700_spi_fifo_flush(a3700_spi);
+
+ val &= ~A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static int a3700_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+ int ret;
+
+ ret = clk_enable(a3700_spi->clk);
+ if (ret) {
+ dev_err(&spi->dev, "failed to enable clk with error %d\n", ret);
+ return ret;
+ }
+
+ /* Flush the FIFOs */
+ ret = a3700_spi_fifo_flush(a3700_spi);
+ if (ret)
+ return ret;
+
+ a3700_spi_bytelen_set(a3700_spi, 4);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ int ret = 0, timeout = A3700_SPI_TIMEOUT;
+ unsigned int nbits = 0;
+ u32 val;
+
+ a3700_spi_transfer_setup(spi, xfer);
+
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
+
+ /* SPI transfer headers */
+ a3700_spi_header_set(a3700_spi);
+
+ if (xfer->tx_buf)
+ nbits = xfer->tx_nbits;
+ else if (xfer->rx_buf)
+ nbits = xfer->rx_nbits;
+
+ a3700_spi_pin_mode_set(a3700_spi, nbits);
+
+ if (xfer->rx_buf) {
+ /* Set read data length */
+ spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
+ a3700_spi->buf_len);
+ /* Start READ transfer */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val &= ~A3700_SPI_RW_EN;
+ val |= A3700_SPI_XFER_START;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ } else if (xfer->tx_buf) {
+ /* Start Write transfer */
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= (A3700_SPI_XFER_START | A3700_SPI_RW_EN);
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+ /*
+ * If there are data to be written to the SPI device, xmit_data
+ * flag is set true; otherwise the instruction in SPI_INSTR does
+ * not require data to be written to the SPI device, then
+ * xmit_data flag is set false.
+ */
+ a3700_spi->xmit_data = (a3700_spi->buf_len != 0);
+ }
+
+ while (a3700_spi->buf_len) {
+ if (a3700_spi->tx_buf) {
+ /* Wait wfifo ready */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_WFIFO_RDY)) {
+ dev_err(&spi->dev,
+ "wait wfifo ready timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ /* Fill up the wfifo */
+ ret = a3700_spi_fifo_write(a3700_spi);
+ if (ret)
+ goto error;
+ } else if (a3700_spi->rx_buf) {
+ /* Wait rfifo ready */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_RFIFO_RDY)) {
+ dev_err(&spi->dev,
+ "wait rfifo ready timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ /* Drain out the rfifo */
+ ret = a3700_spi_fifo_read(a3700_spi);
+ if (ret)
+ goto error;
+ }
+ }
+
+ /*
+ * Stop a write transfer in fifo mode:
+ * - wait all the bytes in wfifo to be shifted out
+ * - set XFER_STOP bit
+ * - wait XFER_START bit clear
+ * - clear XFER_STOP bit
+ * Stop a read transfer in fifo mode:
+ * - the hardware is to reset the XFER_START bit
+ * after the number of bytes indicated in DIN_CNT
+ * register
+ * - just wait XFER_START bit clear
+ */
+ if (a3700_spi->tx_buf) {
+ if (a3700_spi->xmit_data) {
+ /*
+ * If there are data written to the SPI device, wait
+ * until SPI_WFIFO_EMPTY is 1 to wait for all data to
+ * transfer out of write FIFO.
+ */
+ if (!a3700_spi_transfer_wait(spi,
+ A3700_SPI_WFIFO_EMPTY)) {
+ dev_err(&spi->dev, "wait wfifo empty timed out\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ /*
+ * If the instruction in SPI_INSTR does not require data
+ * to be written to the SPI device, wait until SPI_RDY
+ * is 1 for the SPI interface to be in idle.
+ */
+ if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+ dev_err(&spi->dev, "wait xfer ready timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ val |= A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ }
+
+ while (--timeout) {
+ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+ if (!(val & A3700_SPI_XFER_START))
+ break;
+ udelay(1);
+ }
+
+ if (timeout == 0) {
+ dev_err(&spi->dev, "wait transfer start clear timed out\n");
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ val &= ~A3700_SPI_XFER_STOP;
+ spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+ goto out;
+
+error:
+ a3700_spi_transfer_abort_fifo(a3700_spi);
+out:
+ spi_finalize_current_transfer(master);
+
+ return ret;
+}
+
+static int a3700_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+
+ clk_disable(a3700_spi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id a3700_spi_dt_ids[] = {
+ { .compatible = "marvell,armada-3700-spi", .data = NULL },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids);
+
+static int a3700_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct resource *res;
+ struct spi_master *master;
+ struct a3700_spi *spi;
+ u32 num_cs = 0;
+ int ret = 0;
+
+ master = spi_alloc_master(dev, sizeof(*spi));
+ if (!master) {
+ dev_err(dev, "master allocation failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (of_property_read_u32(of_node, "num-cs", &num_cs)) {
+ dev_err(dev, "could not find num-cs\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ master->bus_num = pdev->id;
+ master->dev.of_node = of_node;
+ master->mode_bits = SPI_MODE_3;
+ master->num_chipselect = num_cs;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
+ master->prepare_message = a3700_spi_prepare_message;
+ master->transfer_one = a3700_spi_transfer_one;
+ master->unprepare_message = a3700_spi_unprepare_message;
+ master->set_cs = a3700_spi_set_cs;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+ SPI_RX_QUAD | SPI_TX_QUAD);
+
+ platform_set_drvdata(pdev, master);
+
+ spi = spi_master_get_devdata(master);
+ memset(spi, 0, sizeof(struct a3700_spi));
+
+ spi->master = master;
+ spi->instr_cnt = A3700_INSTR_CNT;
+ spi->addr_cnt = A3700_ADDR_CNT;
+ spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
+ A3700_DUMMY_CNT;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(spi->base)) {
+ ret = PTR_ERR(spi->base);
+ goto error;
+ }
+
+ spi->irq = platform_get_irq(pdev, 0);
+ if (spi->irq < 0) {
+ dev_err(dev, "could not get irq: %d\n", spi->irq);
+ ret = -ENXIO;
+ goto error;
+ }
+
+ init_completion(&spi->done);
+
+ spi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ dev_err(dev, "could not find clk: %ld\n", PTR_ERR(spi->clk));
+ goto error;
+ }
+
+ ret = clk_prepare(spi->clk);
+ if (ret) {
+ dev_err(dev, "could not prepare clk: %d\n", ret);
+ goto error;
+ }
+
+ ret = a3700_spi_init(spi);
+ if (ret)
+ goto error_clk;
+
+ ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
+ dev_name(dev), master);
+ if (ret) {
+ dev_err(dev, "could not request IRQ: %d\n", ret);
+ goto error_clk;
+ }
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "Failed to register master\n");
+ goto error_clk;
+ }
+
+ return 0;
+
+error_clk:
+ clk_disable_unprepare(spi->clk);
+error:
+ spi_master_put(master);
+out:
+ return ret;
+}
+
+static int a3700_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct a3700_spi *spi = spi_master_get_devdata(master);
+
+ clk_unprepare(spi->clk);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static struct platform_driver a3700_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(a3700_spi_dt_ids),
+ },
+ .probe = a3700_spi_probe,
+ .remove = a3700_spi_remove,
+};
+
+module_platform_driver(a3700_spi_driver);
+
+MODULE_DESCRIPTION("Armada-3700 SPI driver");
+MODULE_AUTHOR("Wilson Ding <dingwei@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 6165bf21d427..f369174fbd88 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -304,6 +304,7 @@ static const struct of_device_id ath79_spi_of_match[] = {
{ .compatible = "qca,ar7100-spi", },
{ },
};
+MODULE_DEVICE_TABLE(of, ath79_spi_of_match);
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8feac599e9ab..0e7712bac3b6 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
@@ -264,17 +265,6 @@
#define AUTOSUSPEND_TIMEOUT 2000
-struct atmel_spi_dma {
- struct dma_chan *chan_rx;
- struct dma_chan *chan_tx;
- struct scatterlist sgrx;
- struct scatterlist sgtx;
- struct dma_async_tx_descriptor *data_desc_rx;
- struct dma_async_tx_descriptor *data_desc_tx;
-
- struct at_dma_slave dma_slave;
-};
-
struct atmel_spi_caps {
bool is_spi2;
bool has_wdrbt;
@@ -295,6 +285,7 @@ struct atmel_spi {
int irq;
struct clk *clk;
struct platform_device *pdev;
+ unsigned long spi_clk;
struct spi_transfer *current_transfer;
int current_remaining_bytes;
@@ -302,17 +293,11 @@ struct atmel_spi {
struct completion xfer_completion;
- /* scratch buffer */
- void *buffer;
- dma_addr_t buffer_dma;
-
struct atmel_spi_caps caps;
bool use_dma;
bool use_pdc;
bool use_cs_gpios;
- /* dmaengine data */
- struct atmel_spi_dma dma;
bool keep_cs;
bool cs_active;
@@ -326,7 +311,7 @@ struct atmel_spi_device {
u32 csr;
};
-#define BUFFER_SIZE PAGE_SIZE
+#define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */
#define INVALID_DMA_ADDRESS 0xffffffff
/*
@@ -456,10 +441,20 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
return as->use_dma && xfer->len >= DMA_MIN_BYTES;
}
+static bool atmel_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ return atmel_spi_use_dma(as, xfer);
+}
+
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
struct dma_slave_config *slave_config,
u8 bits_per_word)
{
+ struct spi_master *master = platform_get_drvdata(as->pdev);
int err = 0;
if (bits_per_word > 8) {
@@ -491,7 +486,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
* path works the same whether FIFOs are available (and enabled) or not.
*/
slave_config->direction = DMA_MEM_TO_DEV;
- if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
+ if (dmaengine_slave_config(master->dma_tx, slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure tx dma channel\n");
err = -EINVAL;
@@ -506,7 +501,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
* enabled) or not.
*/
slave_config->direction = DMA_DEV_TO_MEM;
- if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
+ if (dmaengine_slave_config(master->dma_rx, slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure rx dma channel\n");
err = -EINVAL;
@@ -515,7 +510,8 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
return err;
}
-static int atmel_spi_configure_dma(struct atmel_spi *as)
+static int atmel_spi_configure_dma(struct spi_master *master,
+ struct atmel_spi *as)
{
struct dma_slave_config slave_config;
struct device *dev = &as->pdev->dev;
@@ -525,26 +521,26 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx");
- if (IS_ERR(as->dma.chan_tx)) {
- err = PTR_ERR(as->dma.chan_tx);
+ master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
+ err = PTR_ERR(master->dma_tx);
if (err == -EPROBE_DEFER) {
dev_warn(dev, "no DMA channel available at the moment\n");
- return err;
+ goto error_clear;
}
dev_err(dev,
"DMA TX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
- goto error;
+ goto error_clear;
}
/*
* No reason to check EPROBE_DEFER here since we have already requested
* tx channel. If it fails here, it's for another reason.
*/
- as->dma.chan_rx = dma_request_slave_channel(dev, "rx");
+ master->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!as->dma.chan_rx) {
+ if (!master->dma_rx) {
dev_err(dev,
"DMA RX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
@@ -557,31 +553,38 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
dev_info(&as->pdev->dev,
"Using %s (tx) and %s (rx) for DMA transfers\n",
- dma_chan_name(as->dma.chan_tx),
- dma_chan_name(as->dma.chan_rx));
+ dma_chan_name(master->dma_tx),
+ dma_chan_name(master->dma_rx));
+
return 0;
error:
- if (as->dma.chan_rx)
- dma_release_channel(as->dma.chan_rx);
- if (!IS_ERR(as->dma.chan_tx))
- dma_release_channel(as->dma.chan_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+ if (!IS_ERR(master->dma_tx))
+ dma_release_channel(master->dma_tx);
+error_clear:
+ master->dma_tx = master->dma_rx = NULL;
return err;
}
-static void atmel_spi_stop_dma(struct atmel_spi *as)
+static void atmel_spi_stop_dma(struct spi_master *master)
{
- if (as->dma.chan_rx)
- dmaengine_terminate_all(as->dma.chan_rx);
- if (as->dma.chan_tx)
- dmaengine_terminate_all(as->dma.chan_tx);
+ if (master->dma_rx)
+ dmaengine_terminate_all(master->dma_rx);
+ if (master->dma_tx)
+ dmaengine_terminate_all(master->dma_tx);
}
-static void atmel_spi_release_dma(struct atmel_spi *as)
+static void atmel_spi_release_dma(struct spi_master *master)
{
- if (as->dma.chan_rx)
- dma_release_channel(as->dma.chan_rx);
- if (as->dma.chan_tx)
- dma_release_channel(as->dma.chan_tx);
+ if (master->dma_rx) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+ if (master->dma_tx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
}
/* This function is called by the DMA driver from tasklet context */
@@ -611,14 +614,10 @@ static void atmel_spi_next_xfer_single(struct spi_master *master,
cpu_relax();
}
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8)
- spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
- else
- spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
- } else {
- spi_writel(as, TDR, 0);
- }
+ if (xfer->bits_per_word > 8)
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
+ else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
dev_dbg(master->dev.parent,
" start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
@@ -665,17 +664,12 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
/* Fill TX FIFO */
while (num_data >= 2) {
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8) {
- td0 = *words++;
- td1 = *words++;
- } else {
- td0 = *bytes++;
- td1 = *bytes++;
- }
+ if (xfer->bits_per_word > 8) {
+ td0 = *words++;
+ td1 = *words++;
} else {
- td0 = 0;
- td1 = 0;
+ td0 = *bytes++;
+ td1 = *bytes++;
}
spi_writel(as, TDR, (td1 << 16) | td0);
@@ -683,14 +677,10 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
}
if (num_data) {
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8)
- td0 = *words++;
- else
- td0 = *bytes++;
- } else {
- td0 = 0;
- }
+ if (xfer->bits_per_word > 8)
+ td0 = *words++;
+ else
+ td0 = *bytes++;
spi_writew(as, TDR, td0);
num_data--;
@@ -730,13 +720,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
u32 *plen)
{
struct atmel_spi *as = spi_master_get_devdata(master);
- struct dma_chan *rxchan = as->dma.chan_rx;
- struct dma_chan *txchan = as->dma.chan_tx;
+ struct dma_chan *rxchan = master->dma_rx;
+ struct dma_chan *txchan = master->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
struct dma_slave_config slave_config;
dma_cookie_t cookie;
- u32 len = *plen;
dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
@@ -747,44 +736,22 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
/* release lock for DMA operations */
atmel_spi_unlock(as);
- /* prepare the RX dma transfer */
- sg_init_table(&as->dma.sgrx, 1);
- if (xfer->rx_buf) {
- as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen;
- } else {
- as->dma.sgrx.dma_address = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- }
-
- /* prepare the TX dma transfer */
- sg_init_table(&as->dma.sgtx, 1);
- if (xfer->tx_buf) {
- as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen;
- } else {
- as->dma.sgtx.dma_address = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- memset(as->buffer, 0, len);
- }
-
- sg_dma_len(&as->dma.sgtx) = len;
- sg_dma_len(&as->dma.sgrx) = len;
-
- *plen = len;
+ *plen = xfer->len;
if (atmel_spi_dma_slave_config(as, &slave_config,
xfer->bits_per_word))
goto err_exit;
/* Send both scatterlists */
- rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1,
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl, xfer->rx_sg.nents,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_dma;
- txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1,
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl, xfer->tx_sg.nents,
DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
@@ -818,7 +785,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
- atmel_spi_stop_dma(as);
+ atmel_spi_stop_dma(master);
err_exit:
atmel_spi_lock(as);
return -ENOMEM;
@@ -830,30 +797,10 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
dma_addr_t *rx_dma,
u32 *plen)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
- u32 len = *plen;
-
- /* use scratch buffer only when rx or tx data is unspecified */
- if (xfer->rx_buf)
- *rx_dma = xfer->rx_dma + xfer->len - *plen;
- else {
- *rx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- }
-
- if (xfer->tx_buf)
- *tx_dma = xfer->tx_dma + xfer->len - *plen;
- else {
- *tx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- memset(as->buffer, 0, len);
- dma_sync_single_for_device(&as->pdev->dev,
- as->buffer_dma, len, DMA_TO_DEVICE);
- }
-
- *plen = len;
+ *rx_dma = xfer->rx_dma + xfer->len - *plen;
+ *tx_dma = xfer->tx_dma + xfer->len - *plen;
+ if (*plen > master->max_dma_len)
+ *plen = master->max_dma_len;
}
static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
@@ -864,7 +811,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
unsigned long bus_hz;
/* v1 chips start out at half the peripheral bus speed. */
- bus_hz = clk_get_rate(as->clk);
+ bus_hz = as->spi_clk;
if (!atmel_spi_is_v2(as))
bus_hz /= 2;
@@ -1025,16 +972,12 @@ atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
u16 *rxp16;
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
- if (xfer->rx_buf) {
- if (xfer->bits_per_word > 8) {
- rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
- *rxp16 = spi_readl(as, RDR);
- } else {
- rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
- *rxp = spi_readl(as, RDR);
- }
+ if (xfer->bits_per_word > 8) {
+ rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+ *rxp16 = spi_readl(as, RDR);
} else {
- spi_readl(as, RDR);
+ rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+ *rxp = spi_readl(as, RDR);
}
if (xfer->bits_per_word > 8) {
if (as->current_remaining_bytes > 2)
@@ -1073,12 +1016,10 @@ atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
/* Read data */
while (num_data) {
rd = spi_readl(as, RDR);
- if (xfer->rx_buf) {
- if (xfer->bits_per_word > 8)
- *words++ = rd;
- else
- *bytes++ = rd;
- }
+ if (xfer->bits_per_word > 8)
+ *words++ = rd;
+ else
+ *bytes++ = rd;
num_data--;
}
}
@@ -1204,7 +1145,6 @@ static int atmel_spi_setup(struct spi_device *spi)
u32 csr;
unsigned int bits = spi->bits_per_word;
unsigned int npcs_pin;
- int ret;
as = spi_master_get_devdata(spi->master);
@@ -1247,16 +1187,9 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
- if (as->use_cs_gpios) {
- ret = gpio_request(npcs_pin, dev_name(&spi->dev));
- if (ret) {
- kfree(asd);
- return ret;
- }
-
+ if (as->use_cs_gpios)
gpio_direction_output(npcs_pin,
!(spi->mode & SPI_CS_HIGH));
- }
asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
@@ -1307,7 +1240,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* better fault reporting.
*/
if ((!msg->is_dma_mapped)
- && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) {
+ && as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
@@ -1380,11 +1313,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
spi_readl(as, SR);
} else if (atmel_spi_use_dma(as, xfer)) {
- atmel_spi_stop_dma(as);
+ atmel_spi_stop_dma(master);
}
if (!msg->is_dma_mapped
- && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ && as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
return 0;
@@ -1395,7 +1328,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
}
if (!msg->is_dma_mapped
- && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ && as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
if (xfer->delay_usecs)
@@ -1471,13 +1404,11 @@ msg_done:
static void atmel_spi_cleanup(struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
- unsigned gpio = (unsigned long) spi->controller_data;
if (!asd)
return;
spi->controller_state = NULL;
- gpio_free(gpio);
kfree(asd);
}
@@ -1499,6 +1430,39 @@ static void atmel_get_caps(struct atmel_spi *as)
}
/*-------------------------------------------------------------------------*/
+static int atmel_spi_gpio_cs(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct device_node *np = master->dev.of_node;
+ int i;
+ int ret = 0;
+ int nb = 0;
+
+ if (!as->use_cs_gpios)
+ return 0;
+
+ if (!np)
+ return 0;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ for (i = 0; i < nb; i++) {
+ int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "cs-gpios", i);
+
+ if (cs_gpio == -EPROBE_DEFER)
+ return cs_gpio;
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request(&pdev->dev, cs_gpio,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
static int atmel_spi_probe(struct platform_device *pdev)
{
@@ -1537,29 +1501,23 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = master->dev.of_node ? 0 : 4;
master->setup = atmel_spi_setup;
+ master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
master->transfer_one_message = atmel_spi_transfer_one_message;
master->cleanup = atmel_spi_cleanup;
master->auto_runtime_pm = true;
+ master->max_dma_len = SPI_MAX_DMA_XFER;
+ master->can_dma = atmel_spi_can_dma;
platform_set_drvdata(pdev, master);
as = spi_master_get_devdata(master);
- /*
- * Scratch buffer is used for throwaway rx and tx data.
- * It's coherent to minimize dcache pollution.
- */
- as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
- &as->buffer_dma, GFP_KERNEL);
- if (!as->buffer)
- goto out_free;
-
spin_lock_init(&as->lock);
as->pdev = pdev;
as->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(as->regs)) {
ret = PTR_ERR(as->regs);
- goto out_free_buffer;
+ goto out_unmap_regs;
}
as->phybase = regs->start;
as->irq = irq;
@@ -1577,14 +1535,19 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
}
+ ret = atmel_spi_gpio_cs(pdev);
+ if (ret)
+ goto out_unmap_regs;
+
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
- ret = atmel_spi_configure_dma(as);
- if (ret == 0)
+ ret = atmel_spi_configure_dma(master, as);
+ if (ret == 0) {
as->use_dma = true;
- else if (ret == -EPROBE_DEFER)
+ } else if (ret == -EPROBE_DEFER) {
return ret;
+ }
} else {
as->use_pdc = true;
}
@@ -1606,6 +1569,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
ret = clk_prepare_enable(clk);
if (ret)
goto out_free_irq;
+
+ as->spi_clk = clk_get_rate(clk);
+
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
if (as->caps.has_wdrbt) {
@@ -1626,10 +1592,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
spi_writel(as, CR, SPI_BIT(FIFOEN));
}
- /* go! */
- dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
- (unsigned long)regs->start, irq);
-
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -1639,6 +1601,10 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (ret)
goto out_free_dma;
+ /* go! */
+ dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
+ (unsigned long)regs->start, irq);
+
return 0;
out_free_dma:
@@ -1646,16 +1612,13 @@ out_free_dma:
pm_runtime_set_suspended(&pdev->dev);
if (as->use_dma)
- atmel_spi_release_dma(as);
+ atmel_spi_release_dma(master);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable_unprepare(clk);
out_free_irq:
out_unmap_regs:
-out_free_buffer:
- dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
- as->buffer_dma);
out_free:
spi_master_put(master);
return ret;
@@ -1671,8 +1634,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
if (as->use_dma) {
- atmel_spi_stop_dma(as);
- atmel_spi_release_dma(as);
+ atmel_spi_stop_dma(master);
+ atmel_spi_release_dma(master);
}
spi_writel(as, CR, SPI_BIT(SWRST));
@@ -1680,9 +1643,6 @@ static int atmel_spi_remove(struct platform_device *pdev)
spi_readl(as, SR);
spin_unlock_irq(&as->lock);
- dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
- as->buffer_dma);
-
clk_disable_unprepare(as->clk);
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 2b1456e5e221..319225d7e761 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -574,6 +574,7 @@ static const struct of_device_id spi_engine_match_table[] = {
{ .compatible = "adi,axi-spi-engine-1.00.a" },
{ },
};
+MODULE_DEVICE_TABLE(of, spi_engine_match_table);
static struct platform_driver spi_engine_driver = {
.probe = spi_engine_probe,
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 27960e46135d..b715a26a9148 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -502,6 +502,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->handle_err = dw_spi_handle_err;
master->max_speed_hz = dws->max_freq;
master->dev.of_node = dev->of_node;
+ master->flags = SPI_MASTER_GPIO_SS;
/* Basic HW init */
spi_hw_init(dev, dws);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index a67b0ff6a362..14c8e7ce1913 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -15,6 +15,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -40,6 +42,7 @@
#define TRAN_STATE_WORD_ODD_NUM 0x04
#define DSPI_FIFO_SIZE 4
+#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
#define SPI_MCR 0x00
#define SPI_MCR_MASTER (1 << 31)
@@ -72,6 +75,11 @@
#define SPI_SR_TCFQF 0x80000000
#define SPI_SR_CLEAR 0xdaad0000
+#define SPI_RSER_TFFFE BIT(25)
+#define SPI_RSER_TFFFD BIT(24)
+#define SPI_RSER_RFDFE BIT(17)
+#define SPI_RSER_RFDFD BIT(16)
+
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
#define SPI_RSER_TCFQE 0x80000000
@@ -109,6 +117,8 @@
#define SPI_TCR_TCNT_MAX 0x10000
+#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+
struct chip_data {
u32 mcr_val;
u32 ctar_val;
@@ -118,6 +128,7 @@ struct chip_data {
enum dspi_trans_mode {
DSPI_EOQ_MODE = 0,
DSPI_TCFQ_MODE,
+ DSPI_DMA_MODE,
};
struct fsl_dspi_devtype_data {
@@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
};
static const struct fsl_dspi_devtype_data vf610_data = {
- .trans_mode = DSPI_EOQ_MODE,
+ .trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 2,
};
@@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+struct fsl_dspi_dma {
+ /* Length of transfer in words of DSPI_FIFO_SIZE */
+ u32 curr_xfer_len;
+
+ u32 *tx_dma_buf;
+ struct dma_chan *chan_tx;
+ dma_addr_t tx_dma_phys;
+ struct completion cmd_tx_complete;
+ struct dma_async_tx_descriptor *tx_desc;
+
+ u32 *rx_dma_buf;
+ struct dma_chan *chan_rx;
+ dma_addr_t rx_dma_phys;
+ struct completion cmd_rx_complete;
+ struct dma_async_tx_descriptor *rx_desc;
+};
+
struct fsl_dspi {
struct spi_master *master;
struct platform_device *pdev;
@@ -166,8 +194,11 @@ struct fsl_dspi {
u32 waitflags;
u32 spi_tcnt;
+ struct fsl_dspi_dma *dma;
};
+static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
+
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
{
unsigned int val;
@@ -177,6 +208,255 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
+static void dspi_tx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+
+ complete(&dma->cmd_tx_complete);
+}
+
+static void dspi_rx_dma_callback(void *arg)
+{
+ struct fsl_dspi *dspi = arg;
+ struct fsl_dspi_dma *dma = dspi->dma;
+ int rx_word;
+ int i;
+ u16 d;
+
+ rx_word = is_double_byte_mode(dspi);
+
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
+ for (i = 0; i < dma->curr_xfer_len; i++) {
+ d = dspi->dma->rx_dma_buf[i];
+ rx_word ? (*(u16 *)dspi->rx = d) :
+ (*(u8 *)dspi->rx = d);
+ dspi->rx += rx_word + 1;
+ }
+ }
+
+ complete(&dma->cmd_rx_complete);
+}
+
+static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int time_left;
+ int tx_word;
+ int i;
+
+ tx_word = is_double_byte_mode(dspi);
+
+ for (i = 0; i < dma->curr_xfer_len; i++) {
+ dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
+ if ((dspi->cs_change) && (!dspi->len))
+ dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
+ }
+
+ dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
+ dma->tx_dma_phys,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->tx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->tx_desc->callback = dspi_tx_dma_callback;
+ dma->tx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
+ dma->rx_dma_phys,
+ dma->curr_xfer_len *
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma->rx_desc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ return -EIO;
+ }
+
+ dma->rx_desc->callback = dspi_rx_dma_callback;
+ dma->rx_desc->callback_param = dspi;
+ if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
+ dev_err(dev, "DMA submit failed\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&dspi->dma->cmd_rx_complete);
+ reinit_completion(&dspi->dma->cmd_tx_complete);
+
+ dma_async_issue_pending(dma->chan_rx);
+ dma_async_issue_pending(dma->chan_tx);
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA tx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
+ DMA_COMPLETION_TIMEOUT);
+ if (time_left == 0) {
+ dev_err(dev, "DMA rx timeout\n");
+ dmaengine_terminate_all(dma->chan_tx);
+ dmaengine_terminate_all(dma->chan_rx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ int curr_remaining_bytes;
+ int bytes_per_buffer;
+ int word = 1;
+ int ret = 0;
+
+ if (is_double_byte_mode(dspi))
+ word = 2;
+ curr_remaining_bytes = dspi->len;
+ bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
+ while (curr_remaining_bytes) {
+ /* Check if current transfer fits the DMA buffer */
+ dma->curr_xfer_len = curr_remaining_bytes / word;
+ if (dma->curr_xfer_len > bytes_per_buffer)
+ dma->curr_xfer_len = bytes_per_buffer;
+
+ ret = dspi_next_xfer_dma_submit(dspi);
+ if (ret) {
+ dev_err(dev, "DMA transfer failed\n");
+ goto exit;
+
+ } else {
+ curr_remaining_bytes -= dma->curr_xfer_len * word;
+ if (curr_remaining_bytes < 0)
+ curr_remaining_bytes = 0;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+ struct fsl_dspi_dma *dma;
+ struct dma_slave_config cfg;
+ struct device *dev = &dspi->pdev->dev;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->chan_rx = dma_request_slave_channel(dev, "rx");
+ if (!dma->chan_rx) {
+ dev_err(dev, "rx dma channel not available\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ dma->chan_tx = dma_request_slave_channel(dev, "tx");
+ if (!dma->chan_tx) {
+ dev_err(dev, "tx dma channel not available\n");
+ ret = -ENODEV;
+ goto err_tx_channel;
+ }
+
+ dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->tx_dma_phys, GFP_KERNEL);
+ if (!dma->tx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_tx_dma_buf;
+ }
+
+ dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+ &dma->rx_dma_phys, GFP_KERNEL);
+ if (!dma->rx_dma_buf) {
+ ret = -ENOMEM;
+ goto err_rx_dma_buf;
+ }
+
+ cfg.src_addr = phy_addr + SPI_POPR;
+ cfg.dst_addr = phy_addr + SPI_PUSHR;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure rx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &cfg);
+ if (ret) {
+ dev_err(dev, "can't configure tx dma channel\n");
+ ret = -EINVAL;
+ goto err_slave_config;
+ }
+
+ dspi->dma = dma;
+ init_completion(&dma->cmd_tx_complete);
+ init_completion(&dma->cmd_rx_complete);
+
+ return 0;
+
+err_slave_config:
+ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+ dma->rx_dma_buf, dma->rx_dma_phys);
+err_rx_dma_buf:
+ dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+ dma->tx_dma_buf, dma->tx_dma_phys);
+err_tx_dma_buf:
+ dma_release_channel(dma->chan_tx);
+err_tx_channel:
+ dma_release_channel(dma->chan_rx);
+
+ devm_kfree(dev, dma);
+ dspi->dma = NULL;
+
+ return ret;
+}
+
+static void dspi_release_dma(struct fsl_dspi *dspi)
+{
+ struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+
+ if (dma) {
+ if (dma->chan_tx) {
+ dma_unmap_single(dev, dma->tx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->chan_tx);
+ }
+
+ if (dma->chan_rx) {
+ dma_unmap_single(dev, dma->rx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+ dma_release_channel(dma->chan_rx);
+ }
+ }
+}
+
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
unsigned long clkrate)
{
@@ -425,6 +705,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
dspi_tcfq_write(dspi);
break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ status = dspi_dma_xfer(dspi);
+ break;
default:
dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
trans_mode);
@@ -432,9 +718,13 @@ static int dspi_transfer_one_message(struct spi_master *master,
goto out;
}
- if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
- dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
- dspi->waitflags = 0;
+ if (trans_mode != DSPI_DMA_MODE) {
+ if (wait_event_interruptible(dspi->waitq,
+ dspi->waitflags))
+ dev_err(&dspi->pdev->dev,
+ "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
+ }
if (transfer->delay_usecs)
udelay(transfer->delay_usecs);
@@ -740,6 +1030,13 @@ static int dspi_probe(struct platform_device *pdev)
if (ret)
goto out_master_put;
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ if (dspi_request_dma(dspi, res->start)) {
+ dev_err(&pdev->dev, "can't get dma channels\n");
+ goto out_clk_put;
+ }
+ }
+
master->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
@@ -768,6 +1065,7 @@ static int dspi_remove(struct platform_device *pdev)
struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
+ dspi_release_dma(dspi);
clk_disable_unprepare(dspi->clk);
spi_unregister_master(dspi->master);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 2c175b9495f7..1d332e23f6ed 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -23,8 +23,6 @@
#include <linux/pm_runtime.h>
#include <sysdev/fsl_soc.h>
-#include "spi-fsl-lib.h"
-
/* eSPI Controller registers */
#define ESPI_SPMODE 0x00 /* eSPI mode register */
#define ESPI_SPIE 0x04 /* eSPI event register */
@@ -54,8 +52,11 @@
#define CSMODE_AFT(x) ((x) << 8)
#define CSMODE_CG(x) ((x) << 3)
+#define FSL_ESPI_FIFO_SIZE 32
+#define FSL_ESPI_RXTHR 15
+
/* Default mode/csmode for eSPI controller */
-#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3))
+#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
| CSMODE_AFT(0) | CSMODE_CG(1))
@@ -90,219 +91,342 @@
#define AUTOSUSPEND_TIMEOUT 2000
-static inline u32 fsl_espi_read_reg(struct mpc8xxx_spi *mspi, int offset)
+struct fsl_espi {
+ struct device *dev;
+ void __iomem *reg_base;
+
+ struct list_head *m_transfers;
+ struct spi_transfer *tx_t;
+ unsigned int tx_pos;
+ bool tx_done;
+ struct spi_transfer *rx_t;
+ unsigned int rx_pos;
+ bool rx_done;
+
+ bool swab;
+ unsigned int rxskip;
+
+ spinlock_t lock;
+
+ u32 spibrg; /* SPIBRG input clock */
+
+ struct completion done;
+};
+
+struct fsl_espi_cs {
+ u32 hw_mode;
+};
+
+static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
{
- return ioread32be(mspi->reg_base + offset);
+ return ioread32be(espi->reg_base + offset);
}
-static inline u8 fsl_espi_read_reg8(struct mpc8xxx_spi *mspi, int offset)
+static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
{
- return ioread8(mspi->reg_base + offset);
+ return ioread16be(espi->reg_base + offset);
}
-static inline void fsl_espi_write_reg(struct mpc8xxx_spi *mspi, int offset,
- u32 val)
+static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
{
- iowrite32be(val, mspi->reg_base + offset);
+ return ioread8(espi->reg_base + offset);
}
-static inline void fsl_espi_write_reg8(struct mpc8xxx_spi *mspi, int offset,
- u8 val)
+static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
+ u32 val)
{
- iowrite8(val, mspi->reg_base + offset);
+ iowrite32be(val, espi->reg_base + offset);
}
-static void fsl_espi_copy_to_buf(struct spi_message *m,
- struct mpc8xxx_spi *mspi)
+static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
+ u16 val)
{
- struct spi_transfer *t;
- u8 *buf = mspi->local_buf;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf)
- memcpy(buf, t->tx_buf, t->len);
- else
- memset(buf, 0, t->len);
- buf += t->len;
- }
+ iowrite16be(val, espi->reg_base + offset);
}
-static void fsl_espi_copy_from_buf(struct spi_message *m,
- struct mpc8xxx_spi *mspi)
+static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
+ u8 val)
{
- struct spi_transfer *t;
- u8 *buf = mspi->local_buf;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->rx_buf)
- memcpy(t->rx_buf, buf, t->len);
- buf += t->len;
- }
+ iowrite8(val, espi->reg_base + offset);
}
static int fsl_espi_check_message(struct spi_message *m)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+ struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
struct spi_transfer *t, *first;
if (m->frame_length > SPCOM_TRANLEN_MAX) {
- dev_err(mspi->dev, "message too long, size is %u bytes\n",
+ dev_err(espi->dev, "message too long, size is %u bytes\n",
m->frame_length);
return -EMSGSIZE;
}
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
+
list_for_each_entry(t, &m->transfers, transfer_list) {
if (first->bits_per_word != t->bits_per_word ||
first->speed_hz != t->speed_hz) {
- dev_err(mspi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
+ dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
return -EINVAL;
}
}
+ /* ESPI supports MSB-first transfers for word size 8 / 16 only */
+ if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
+ first->bits_per_word != 16) {
+ dev_err(espi->dev,
+ "MSB-first transfer not supported for wordsize %u\n",
+ first->bits_per_word);
+ return -EINVAL;
+ }
+
return 0;
}
-static void fsl_espi_change_mode(struct spi_device *spi)
+static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
- struct spi_mpc8xxx_cs *cs = spi->controller_state;
- u32 tmp;
- unsigned long flags;
-
- /* Turn off IRQs locally to minimize time that SPI is disabled. */
- local_irq_save(flags);
-
- /* Turn off SPI unit prior changing mode */
- tmp = fsl_espi_read_reg(mspi, ESPI_SPMODE);
- fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp & ~SPMODE_ENABLE);
- fsl_espi_write_reg(mspi, ESPI_SPMODEx(spi->chip_select),
- cs->hw_mode);
- fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp);
-
- local_irq_restore(flags);
+ struct spi_transfer *t;
+ unsigned int i = 0, rxskip = 0;
+
+ /*
+ * prerequisites for ESPI rxskip mode:
+ * - message has two transfers
+ * - first transfer is a write and second is a read
+ *
+ * In addition the current low-level transfer mechanism requires
+ * that the rxskip bytes fit into the TX FIFO. Else the transfer
+ * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
+ * the TX FIFO isn't re-filled.
+ */
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (i == 0) {
+ if (!t->tx_buf || t->rx_buf ||
+ t->len > FSL_ESPI_FIFO_SIZE)
+ return 0;
+ rxskip = t->len;
+ } else if (i == 1) {
+ if (t->tx_buf || !t->rx_buf)
+ return 0;
+ }
+ i++;
+ }
+
+ return i == 2 ? rxskip : 0;
}
-static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
+static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
{
- u32 data;
- u16 data_h;
- u16 data_l;
- const u32 *tx = mpc8xxx_spi->tx;
+ u32 tx_fifo_avail;
+ unsigned int tx_left;
+ const void *tx_buf;
+
+ /* if events is zero transfer has not started and tx fifo is empty */
+ tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE;
+start:
+ tx_left = espi->tx_t->len - espi->tx_pos;
+ tx_buf = espi->tx_t->tx_buf;
+ while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
+ if (tx_left >= 4) {
+ if (!tx_buf)
+ fsl_espi_write_reg(espi, ESPI_SPITF, 0);
+ else if (espi->swab)
+ fsl_espi_write_reg(espi, ESPI_SPITF,
+ swahb32p(tx_buf + espi->tx_pos));
+ else
+ fsl_espi_write_reg(espi, ESPI_SPITF,
+ *(u32 *)(tx_buf + espi->tx_pos));
+ espi->tx_pos += 4;
+ tx_left -= 4;
+ tx_fifo_avail -= 4;
+ } else if (tx_left >= 2 && tx_buf && espi->swab) {
+ fsl_espi_write_reg16(espi, ESPI_SPITF,
+ swab16p(tx_buf + espi->tx_pos));
+ espi->tx_pos += 2;
+ tx_left -= 2;
+ tx_fifo_avail -= 2;
+ } else {
+ if (!tx_buf)
+ fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
+ else
+ fsl_espi_write_reg8(espi, ESPI_SPITF,
+ *(u8 *)(tx_buf + espi->tx_pos));
+ espi->tx_pos += 1;
+ tx_left -= 1;
+ tx_fifo_avail -= 1;
+ }
+ }
- if (!tx)
- return 0;
+ if (!tx_left) {
+ /* Last transfer finished, in rxskip mode only one is needed */
+ if (list_is_last(&espi->tx_t->transfer_list,
+ espi->m_transfers) || espi->rxskip) {
+ espi->tx_done = true;
+ return;
+ }
+ espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
+ espi->tx_pos = 0;
+ /* continue with next transfer if tx fifo is not full */
+ if (tx_fifo_avail)
+ goto start;
+ }
+}
- data = *tx++ << mpc8xxx_spi->tx_shift;
- data_l = data & 0xffff;
- data_h = (data >> 16) & 0xffff;
- swab16s(&data_l);
- swab16s(&data_h);
- data = data_h | data_l;
+static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
+{
+ u32 rx_fifo_avail = SPIE_RXCNT(events);
+ unsigned int rx_left;
+ void *rx_buf;
+
+start:
+ rx_left = espi->rx_t->len - espi->rx_pos;
+ rx_buf = espi->rx_t->rx_buf;
+ while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
+ if (rx_left >= 4) {
+ u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
+
+ if (rx_buf && espi->swab)
+ *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
+ else if (rx_buf)
+ *(u32 *)(rx_buf + espi->rx_pos) = val;
+ espi->rx_pos += 4;
+ rx_left -= 4;
+ rx_fifo_avail -= 4;
+ } else if (rx_left >= 2 && rx_buf && espi->swab) {
+ u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
+
+ *(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
+ espi->rx_pos += 2;
+ rx_left -= 2;
+ rx_fifo_avail -= 2;
+ } else {
+ u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
+
+ if (rx_buf)
+ *(u8 *)(rx_buf + espi->rx_pos) = val;
+ espi->rx_pos += 1;
+ rx_left -= 1;
+ rx_fifo_avail -= 1;
+ }
+ }
- mpc8xxx_spi->tx = tx;
- return data;
+ if (!rx_left) {
+ if (list_is_last(&espi->rx_t->transfer_list,
+ espi->m_transfers)) {
+ espi->rx_done = true;
+ return;
+ }
+ espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+ espi->rx_pos = 0;
+ /* continue with next transfer if rx fifo is not empty */
+ if (rx_fifo_avail)
+ goto start;
+ }
}
static void fsl_espi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_espi *espi = spi_master_get_devdata(spi->master);
int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
- u32 hz = t ? t->speed_hz : spi->max_speed_hz;
- u8 pm;
- struct spi_mpc8xxx_cs *cs = spi->controller_state;
-
- cs->rx_shift = 0;
- cs->tx_shift = 0;
- cs->get_rx = mpc8xxx_spi_rx_buf_u32;
- cs->get_tx = mpc8xxx_spi_tx_buf_u32;
- if (bits_per_word <= 8) {
- cs->rx_shift = 8 - bits_per_word;
- } else {
- cs->rx_shift = 16 - bits_per_word;
- if (spi->mode & SPI_LSB_FIRST)
- cs->get_tx = fsl_espi_tx_buf_lsb;
- }
-
- mpc8xxx_spi->rx_shift = cs->rx_shift;
- mpc8xxx_spi->tx_shift = cs->tx_shift;
- mpc8xxx_spi->get_rx = cs->get_rx;
- mpc8xxx_spi->get_tx = cs->get_tx;
+ u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
+ u32 hw_mode_old = cs->hw_mode;
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
- if ((mpc8xxx_spi->spibrg / hz) > 64) {
+ pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
+
+ if (pm > 15) {
cs->hw_mode |= CSMODE_DIV16;
- pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4);
-
- WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. "
- "Will use %d Hz instead.\n", dev_name(&spi->dev),
- hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1)));
- if (pm > 33)
- pm = 33;
- } else {
- pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4);
+ pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
}
- if (pm)
- pm--;
- if (pm < 2)
- pm = 2;
cs->hw_mode |= CSMODE_PM(pm);
- fsl_espi_change_mode(spi);
+ /* don't write the mode register if the mode doesn't change */
+ if (cs->hw_mode != hw_mode_old)
+ fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select),
+ cs->hw_mode);
}
static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- u32 word;
+ struct fsl_espi *espi = spi_master_get_devdata(spi->master);
+ unsigned int rx_len = t->len;
+ u32 mask, spcom;
int ret;
- mpc8xxx_spi->len = t->len;
- mpc8xxx_spi->count = roundup(t->len, 4) / 4;
-
- mpc8xxx_spi->tx = t->tx_buf;
- mpc8xxx_spi->rx = t->rx_buf;
-
- reinit_completion(&mpc8xxx_spi->done);
+ reinit_completion(&espi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM,
- (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
+ spcom = SPCOM_CS(spi->chip_select);
+ spcom |= SPCOM_TRANLEN(t->len - 1);
+
+ /* configure RXSKIP mode */
+ if (espi->rxskip) {
+ spcom |= SPCOM_RXSKIP(espi->rxskip);
+ rx_len = t->len - espi->rxskip;
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ spcom |= SPCOM_DO;
+ }
+
+ fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
- /* enable rx ints */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, SPIM_RNE);
+ /* enable interrupts */
+ mask = SPIM_DON;
+ if (rx_len > FSL_ESPI_FIFO_SIZE)
+ mask |= SPIM_RXT;
+ fsl_espi_write_reg(espi, ESPI_SPIM, mask);
- /* transmit word */
- word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPITF, word);
+ /* Prevent filling the fifo from getting interrupted */
+ spin_lock_irq(&espi->lock);
+ fsl_espi_fill_tx_fifo(espi, 0);
+ spin_unlock_irq(&espi->lock);
/* Won't hang up forever, SPI bus sometimes got lost interrupts... */
- ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ);
+ ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
if (ret == 0)
- dev_err(mpc8xxx_spi->dev,
- "Transaction hanging up (left %d bytes)\n",
- mpc8xxx_spi->count);
+ dev_err(espi->dev, "Transfer timed out!\n");
/* disable rx ints */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIM, 0);
- return mpc8xxx_spi->count > 0 ? -EMSGSIZE : 0;
+ return ret == 0 ? -ETIMEDOUT : 0;
}
static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+ struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
struct spi_device *spi = m->spi;
int ret;
- fsl_espi_copy_to_buf(m, mspi);
+ /* In case of LSB-first and bits_per_word > 8 byte-swap all words */
+ espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
+
+ espi->m_transfers = &m->transfers;
+ espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ espi->tx_pos = 0;
+ espi->tx_done = false;
+ espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ espi->rx_pos = 0;
+ espi->rx_done = false;
+
+ espi->rxskip = fsl_espi_check_rxskip_mode(m);
+ if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
+ dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
+ return -EINVAL;
+ }
+
+ /* In RXSKIP mode skip first transfer for reads */
+ if (espi->rxskip)
+ espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+
fsl_espi_setup_transfer(spi, trans);
ret = fsl_espi_bufs(spi, trans);
@@ -310,19 +434,13 @@ static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
if (trans->delay_usecs)
udelay(trans->delay_usecs);
- fsl_espi_setup_transfer(spi, NULL);
-
- if (!ret)
- fsl_espi_copy_from_buf(m, mspi);
-
return ret;
}
static int fsl_espi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
- unsigned int delay_usecs = 0;
+ unsigned int delay_usecs = 0, rx_nbits = 0;
struct spi_transfer *t, trans = {};
int ret;
@@ -333,6 +451,8 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->delay_usecs > delay_usecs)
delay_usecs = t->delay_usecs;
+ if (t->rx_nbits > rx_nbits)
+ rx_nbits = t->rx_nbits;
}
t = list_first_entry(&m->transfers, struct spi_transfer,
@@ -342,8 +462,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
trans.speed_hz = t->speed_hz;
trans.bits_per_word = t->bits_per_word;
trans.delay_usecs = delay_usecs;
- trans.tx_buf = mspi->local_buf;
- trans.rx_buf = mspi->local_buf;
+ trans.rx_nbits = rx_nbits;
if (trans.len)
ret = fsl_espi_trans(m, &trans);
@@ -360,12 +479,9 @@ out:
static int fsl_espi_setup(struct spi_device *spi)
{
- struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi *espi;
u32 loop_mode;
- struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
-
- if (!spi->max_speed_hz)
- return -EINVAL;
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -374,12 +490,11 @@ static int fsl_espi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
}
- mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ espi = spi_master_get_devdata(spi->master);
- pm_runtime_get_sync(mpc8xxx_spi->dev);
+ pm_runtime_get_sync(espi->dev);
- cs->hw_mode = fsl_espi_read_reg(mpc8xxx_spi,
- ESPI_SPMODEx(spi->chip_select));
+ cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select));
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
| CSMODE_REV);
@@ -392,115 +507,74 @@ static int fsl_espi_setup(struct spi_device *spi)
cs->hw_mode |= CSMODE_REV;
/* Handle the loop mode */
- loop_mode = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+ loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
loop_mode &= ~SPMODE_LOOP;
if (spi->mode & SPI_LOOP)
loop_mode |= SPMODE_LOOP;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, loop_mode);
+ fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
fsl_espi_setup_transfer(spi, NULL);
- pm_runtime_mark_last_busy(mpc8xxx_spi->dev);
- pm_runtime_put_autosuspend(mpc8xxx_spi->dev);
+ pm_runtime_mark_last_busy(espi->dev);
+ pm_runtime_put_autosuspend(espi->dev);
return 0;
}
static void fsl_espi_cleanup(struct spi_device *spi)
{
- struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+ struct fsl_espi_cs *cs = spi_get_ctldata(spi);
kfree(cs);
spi_set_ctldata(spi, NULL);
}
-static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
{
- /* We need handle RX first */
- if (events & SPIE_RNE) {
- u32 rx_data, tmp;
- u8 rx_data_8;
- int rx_nr_bytes = 4;
- int ret;
-
- /* Spin until RX is done */
- if (SPIE_RXCNT(events) < min(4, mspi->len)) {
- ret = spin_event_timeout(
- !(SPIE_RXCNT(events =
- fsl_espi_read_reg(mspi, ESPI_SPIE)) <
- min(4, mspi->len)),
- 10000, 0); /* 10 msec */
- if (!ret)
- dev_err(mspi->dev,
- "tired waiting for SPIE_RXCNT\n");
- }
+ if (!espi->rx_done)
+ fsl_espi_read_rx_fifo(espi, events);
- if (mspi->len >= 4) {
- rx_data = fsl_espi_read_reg(mspi, ESPI_SPIRF);
- } else if (mspi->len <= 0) {
- dev_err(mspi->dev,
- "unexpected RX(SPIE_RNE) interrupt occurred,\n"
- "(local rxlen %d bytes, reg rxlen %d bytes)\n",
- min(4, mspi->len), SPIE_RXCNT(events));
- rx_nr_bytes = 0;
- } else {
- rx_nr_bytes = mspi->len;
- tmp = mspi->len;
- rx_data = 0;
- while (tmp--) {
- rx_data_8 = fsl_espi_read_reg8(mspi,
- ESPI_SPIRF);
- rx_data |= (rx_data_8 << (tmp * 8));
- }
-
- rx_data <<= (4 - mspi->len) * 8;
- }
+ if (!espi->tx_done)
+ fsl_espi_fill_tx_fifo(espi, events);
- mspi->len -= rx_nr_bytes;
+ if (!espi->tx_done || !espi->rx_done)
+ return;
- if (rx_nr_bytes && mspi->rx)
- mspi->get_rx(rx_data, mspi);
- }
+ /* we're done, but check for errors before returning */
+ events = fsl_espi_read_reg(espi, ESPI_SPIE);
- if (!(events & SPIE_TNF)) {
- int ret;
-
- /* spin until TX is done */
- ret = spin_event_timeout(((events = fsl_espi_read_reg(
- mspi, ESPI_SPIE)) & SPIE_TNF), 1000, 0);
- if (!ret) {
- dev_err(mspi->dev, "tired waiting for SPIE_TNF\n");
- complete(&mspi->done);
- return;
- }
- }
+ if (!(events & SPIE_DON))
+ dev_err(espi->dev,
+ "Transfer done but SPIE_DON isn't set!\n");
- mspi->count -= 1;
- if (mspi->count) {
- u32 word = mspi->get_tx(mspi);
+ if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE)
+ dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
- fsl_espi_write_reg(mspi, ESPI_SPITF, word);
- } else {
- complete(&mspi->done);
- }
+ complete(&espi->done);
}
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
- struct mpc8xxx_spi *mspi = context_data;
+ struct fsl_espi *espi = context_data;
u32 events;
+ spin_lock(&espi->lock);
+
/* Get interrupt events(tx/rx) */
- events = fsl_espi_read_reg(mspi, ESPI_SPIE);
- if (!events)
+ events = fsl_espi_read_reg(espi, ESPI_SPIE);
+ if (!events) {
+ spin_unlock(&espi->lock);
return IRQ_NONE;
+ }
- dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
+ dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
- fsl_espi_cpu_irq(mspi, events);
+ fsl_espi_cpu_irq(espi, events);
/* Clear the events */
- fsl_espi_write_reg(mspi, ESPI_SPIE, events);
+ fsl_espi_write_reg(espi, ESPI_SPIE, events);
+
+ spin_unlock(&espi->lock);
return IRQ_HANDLED;
}
@@ -509,12 +583,12 @@ static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
static int fsl_espi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
u32 regval;
- regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+ regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
regval &= ~SPMODE_ENABLE;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
return 0;
}
@@ -522,12 +596,12 @@ static int fsl_espi_runtime_suspend(struct device *dev)
static int fsl_espi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
u32 regval;
- regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+ regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
regval |= SPMODE_ENABLE;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
return 0;
}
@@ -538,96 +612,105 @@ static size_t fsl_espi_max_message_size(struct spi_device *spi)
return SPCOM_TRANLEN_MAX;
}
+static void fsl_espi_init_regs(struct device *dev, bool initial)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_master_get_devdata(master);
+ struct device_node *nc;
+ u32 csmode, cs, prop;
+ int ret;
+
+ /* SPI controller initializations */
+ fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
+ fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for_each_available_child_of_node(master->dev.of_node, nc) {
+ /* get chip select */
+ ret = of_property_read_u32(nc, "reg", &cs);
+ if (ret || cs >= master->num_chipselect)
+ continue;
+
+ csmode = CSMODE_INIT_VAL;
+
+ /* check if CSBEF is set in device tree */
+ ret = of_property_read_u32(nc, "fsl,csbef", &prop);
+ if (!ret) {
+ csmode &= ~(CSMODE_BEF(0xf));
+ csmode |= CSMODE_BEF(prop);
+ }
+
+ /* check if CSAFT is set in device tree */
+ ret = of_property_read_u32(nc, "fsl,csaft", &prop);
+ if (!ret) {
+ csmode &= ~(CSMODE_AFT(0xf));
+ csmode |= CSMODE_AFT(prop);
+ }
+
+ fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
+
+ if (initial)
+ dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
+ }
+
+ /* Enable SPI interface */
+ fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
+}
+
static int fsl_espi_probe(struct device *dev, struct resource *mem,
- unsigned int irq)
+ unsigned int irq, unsigned int num_cs)
{
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
- struct mpc8xxx_spi *mpc8xxx_spi;
- struct device_node *nc;
- const __be32 *prop;
- u32 regval, csmode;
- int i, len, ret;
+ struct fsl_espi *espi;
+ int ret;
- master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
+ master = spi_alloc_master(dev, sizeof(struct fsl_espi));
if (!master)
return -ENOMEM;
dev_set_drvdata(dev, master);
- mpc8xxx_spi_probe(dev, mem, irq);
-
+ master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_LOOP;
+ master->dev.of_node = dev->of_node;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->setup = fsl_espi_setup;
master->cleanup = fsl_espi_cleanup;
master->transfer_one_message = fsl_espi_do_one_msg;
master->auto_runtime_pm = true;
master->max_message_size = fsl_espi_max_message_size;
+ master->num_chipselect = num_cs;
- mpc8xxx_spi = spi_master_get_devdata(master);
+ espi = spi_master_get_devdata(master);
+ spin_lock_init(&espi->lock);
- mpc8xxx_spi->local_buf =
- devm_kmalloc(dev, SPCOM_TRANLEN_MAX, GFP_KERNEL);
- if (!mpc8xxx_spi->local_buf) {
- ret = -ENOMEM;
+ espi->dev = dev;
+ espi->spibrg = fsl_get_sys_freq();
+ if (espi->spibrg == -1) {
+ dev_err(dev, "Can't get sys frequency!\n");
+ ret = -EINVAL;
goto err_probe;
}
+ /* determined by clock divider fields DIV16/PM in register SPMODEx */
+ master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
+ master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
- mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
- if (IS_ERR(mpc8xxx_spi->reg_base)) {
- ret = PTR_ERR(mpc8xxx_spi->reg_base);
+ init_completion(&espi->done);
+
+ espi->reg_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(espi->reg_base)) {
+ ret = PTR_ERR(espi->reg_base);
goto err_probe;
}
/* Register for SPI Interrupt */
- ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_espi_irq,
- 0, "fsl_espi", mpc8xxx_spi);
+ ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
if (ret)
goto err_probe;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- mpc8xxx_spi->rx_shift = 16;
- mpc8xxx_spi->tx_shift = 24;
- }
-
- /* SPI controller initializations */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
-
- /* Init eSPI CS mode register */
- for_each_available_child_of_node(master->dev.of_node, nc) {
- /* get chip select */
- prop = of_get_property(nc, "reg", &len);
- if (!prop || len < sizeof(*prop))
- continue;
- i = be32_to_cpup(prop);
- if (i < 0 || i >= pdata->max_chipselect)
- continue;
-
- csmode = CSMODE_INIT_VAL;
- /* check if CSBEF is set in device tree */
- prop = of_get_property(nc, "fsl,csbef", &len);
- if (prop && len >= sizeof(*prop)) {
- csmode &= ~(CSMODE_BEF(0xf));
- csmode |= CSMODE_BEF(be32_to_cpup(prop));
- }
- /* check if CSAFT is set in device tree */
- prop = of_get_property(nc, "fsl,csaft", &len);
- if (prop && len >= sizeof(*prop)) {
- csmode &= ~(CSMODE_AFT(0xf));
- csmode |= CSMODE_AFT(be32_to_cpup(prop));
- }
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i), csmode);
-
- dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
- }
-
- /* Enable SPI interface */
- regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
-
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_init_regs(dev, true);
pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
@@ -639,8 +722,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
if (ret < 0)
goto err_pm;
- dev_info(dev, "at 0x%p (irq = %d)\n", mpc8xxx_spi->reg_base,
- mpc8xxx_spi->irq);
+ dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -659,20 +741,16 @@ err_probe:
static int of_fsl_espi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- const u32 *prop;
- int len;
+ u32 num_cs;
+ int ret;
- prop = of_get_property(np, "fsl,espi-num-chipselects", &len);
- if (!prop || len < sizeof(*prop)) {
+ ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
+ if (ret) {
dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
- return -EINVAL;
+ return 0;
}
- pdata->max_chipselect = *prop;
- pdata->cs_control = NULL;
-
- return 0;
+ return num_cs;
}
static int of_fsl_espi_probe(struct platform_device *ofdev)
@@ -680,16 +758,17 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct resource mem;
- unsigned int irq;
+ unsigned int irq, num_cs;
int ret;
- ret = of_mpc8xxx_spi_probe(ofdev);
- if (ret)
- return ret;
+ if (of_property_read_bool(np, "mode")) {
+ dev_err(dev, "mode property is not supported on ESPI!\n");
+ return -EINVAL;
+ }
- ret = of_fsl_espi_get_chipselects(dev);
- if (ret)
- return ret;
+ num_cs = of_fsl_espi_get_chipselects(dev);
+ if (!num_cs)
+ return -EINVAL;
ret = of_address_to_resource(np, 0, &mem);
if (ret)
@@ -699,7 +778,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
if (!irq)
return -EINVAL;
- return fsl_espi_probe(dev, &mem, irq);
+ return fsl_espi_probe(dev, &mem, irq, num_cs);
}
static int of_fsl_espi_remove(struct platform_device *dev)
@@ -721,38 +800,15 @@ static int of_fsl_espi_suspend(struct device *dev)
return ret;
}
- ret = pm_runtime_force_suspend(dev);
- if (ret < 0)
- return ret;
-
- return 0;
+ return pm_runtime_force_suspend(dev);
}
static int of_fsl_espi_resume(struct device *dev)
{
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi;
- u32 regval;
- int i, ret;
-
- mpc8xxx_spi = spi_master_get_devdata(master);
-
- /* SPI controller initializations */
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
-
- /* Init eSPI CS mode register */
- for (i = 0; i < pdata->max_chipselect; i++)
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i),
- CSMODE_INIT_VAL);
-
- /* Enable SPI interface */
- regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ int ret;
- fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+ fsl_espi_init_regs(dev, false);
ret = pm_runtime_force_resume(dev);
if (ret < 0)
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 2925c8089fd9..f303f306b38e 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -28,10 +28,6 @@ struct mpc8xxx_spi {
/* rx & tx bufs from the spi_transfer */
const void *tx;
void *rx;
-#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
- int len;
- u8 *local_buf;
-#endif
int subblock;
struct spi_pram __iomem *pram;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
new file mode 100644
index 000000000000..52551f6d0c7d
--- /dev/null
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -0,0 +1,525 @@
+/*
+ * Freescale i.MX7ULP LPSPI driver
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "fsl_lpspi"
+
+/* i.MX7ULP LPSPI registers */
+#define IMX7ULP_VERID 0x0
+#define IMX7ULP_PARAM 0x4
+#define IMX7ULP_CR 0x10
+#define IMX7ULP_SR 0x14
+#define IMX7ULP_IER 0x18
+#define IMX7ULP_DER 0x1c
+#define IMX7ULP_CFGR0 0x20
+#define IMX7ULP_CFGR1 0x24
+#define IMX7ULP_DMR0 0x30
+#define IMX7ULP_DMR1 0x34
+#define IMX7ULP_CCR 0x40
+#define IMX7ULP_FCR 0x58
+#define IMX7ULP_FSR 0x5c
+#define IMX7ULP_TCR 0x60
+#define IMX7ULP_TDR 0x64
+#define IMX7ULP_RSR 0x70
+#define IMX7ULP_RDR 0x74
+
+/* General control register field define */
+#define CR_RRF BIT(9)
+#define CR_RTF BIT(8)
+#define CR_RST BIT(1)
+#define CR_MEN BIT(0)
+#define SR_TCF BIT(10)
+#define SR_RDF BIT(1)
+#define SR_TDF BIT(0)
+#define IER_TCIE BIT(10)
+#define IER_RDIE BIT(1)
+#define IER_TDIE BIT(0)
+#define CFGR1_PCSCFG BIT(27)
+#define CFGR1_PCSPOL BIT(8)
+#define CFGR1_NOSTALL BIT(3)
+#define CFGR1_MASTER BIT(0)
+#define RSR_RXEMPTY BIT(1)
+#define TCR_CPOL BIT(31)
+#define TCR_CPHA BIT(30)
+#define TCR_CONT BIT(21)
+#define TCR_CONTC BIT(20)
+#define TCR_RXMSK BIT(19)
+#define TCR_TXMSK BIT(18)
+
+static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128};
+
+struct lpspi_config {
+ u8 bpw;
+ u8 chip_select;
+ u8 prescale;
+ u16 mode;
+ u32 speed_hz;
+};
+
+struct fsl_lpspi_data {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+
+ void *rx_buf;
+ const void *tx_buf;
+ void (*tx)(struct fsl_lpspi_data *);
+ void (*rx)(struct fsl_lpspi_data *);
+
+ u32 remain;
+ u8 txfifosize;
+ u8 rxfifosize;
+
+ struct lpspi_config config;
+ struct completion xfer_done;
+};
+
+static const struct of_device_id fsl_lpspi_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-spi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
+
+#define LPSPI_BUF_RX(type) \
+static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
+{ \
+ unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
+ \
+ if (fsl_lpspi->rx_buf) { \
+ *(type *)fsl_lpspi->rx_buf = val; \
+ fsl_lpspi->rx_buf += sizeof(type); \
+ } \
+}
+
+#define LPSPI_BUF_TX(type) \
+static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
+{ \
+ type val = 0; \
+ \
+ if (fsl_lpspi->tx_buf) { \
+ val = *(type *)fsl_lpspi->tx_buf; \
+ fsl_lpspi->tx_buf += sizeof(type); \
+ } \
+ \
+ fsl_lpspi->remain -= sizeof(type); \
+ writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
+}
+
+LPSPI_BUF_RX(u8)
+LPSPI_BUF_TX(u8)
+LPSPI_BUF_RX(u16)
+LPSPI_BUF_TX(u16)
+LPSPI_BUF_RX(u32)
+LPSPI_BUF_TX(u32)
+
+static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
+ unsigned int enable)
+{
+ writel(enable, fsl_lpspi->base + IMX7ULP_IER);
+}
+
+static int lpspi_prepare_xfer_hardware(struct spi_master *master)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+ return clk_prepare_enable(fsl_lpspi->clk);
+}
+
+static int lpspi_unprepare_xfer_hardware(struct spi_master *master)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(fsl_lpspi->clk);
+
+ return 0;
+}
+
+static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 txcnt;
+ unsigned long orig_jiffies = jiffies;
+
+ do {
+ txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n");
+ return -ETIMEDOUT;
+ }
+ cond_resched();
+
+ } while (txcnt);
+
+ return 0;
+}
+
+static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u8 txfifo_cnt;
+
+ txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
+
+ while (txfifo_cnt < fsl_lpspi->txfifosize) {
+ if (!fsl_lpspi->remain)
+ break;
+ fsl_lpspi->tx(fsl_lpspi);
+ txfifo_cnt++;
+ }
+
+ if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize))
+ writel(0, fsl_lpspi->base + IMX7ULP_TDR);
+ else
+ fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
+}
+
+static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+ while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
+ fsl_lpspi->rx(fsl_lpspi);
+}
+
+static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
+ bool is_first_xfer)
+{
+ u32 temp = 0;
+
+ temp |= fsl_lpspi->config.bpw - 1;
+ temp |= fsl_lpspi->config.prescale << 27;
+ temp |= (fsl_lpspi->config.mode & 0x3) << 30;
+ temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
+
+ /*
+ * Set TCR_CONT will keep SS asserted after current transfer.
+ * For the first transfer, clear TCR_CONTC to assert SS.
+ * For subsequent transfer, set TCR_CONTC to keep SS asserted.
+ */
+ temp |= TCR_CONT;
+ if (is_first_xfer)
+ temp &= ~TCR_CONTC;
+ else
+ temp |= TCR_CONTC;
+
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+
+ dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
+}
+
+static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+
+ temp = fsl_lpspi->txfifosize >> 1 | (fsl_lpspi->rxfifosize >> 1) << 16;
+
+ writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
+
+ dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
+}
+
+static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+{
+ struct lpspi_config config = fsl_lpspi->config;
+ unsigned int perclk_rate, scldiv;
+ u8 prescale;
+
+ perclk_rate = clk_get_rate(fsl_lpspi->clk);
+ for (prescale = 0; prescale < 8; prescale++) {
+ scldiv = perclk_rate /
+ (clkdivs[prescale] * config.speed_hz) - 2;
+ if (scldiv < 256) {
+ fsl_lpspi->config.prescale = prescale;
+ break;
+ }
+ }
+
+ if (prescale == 8 && scldiv >= 256)
+ return -EINVAL;
+
+ writel(scldiv, fsl_lpspi->base + IMX7ULP_CCR);
+
+ dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n",
+ perclk_rate, config.speed_hz, prescale, scldiv);
+
+ return 0;
+}
+
+static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+ int ret;
+
+ temp = CR_RST;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+ writel(0, fsl_lpspi->base + IMX7ULP_CR);
+
+ ret = fsl_lpspi_set_bitrate(fsl_lpspi);
+ if (ret)
+ return ret;
+
+ fsl_lpspi_set_watermark(fsl_lpspi);
+
+ temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL;
+ if (fsl_lpspi->config.mode & SPI_CS_HIGH)
+ temp |= CFGR1_PCSPOL;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
+
+ temp = readl(fsl_lpspi->base + IMX7ULP_CR);
+ temp |= CR_RRF | CR_RTF | CR_MEN;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+
+ return 0;
+}
+
+static void fsl_lpspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(spi->master);
+
+ fsl_lpspi->config.mode = spi->mode;
+ fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word;
+ fsl_lpspi->config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+ fsl_lpspi->config.chip_select = spi->chip_select;
+
+ if (!fsl_lpspi->config.speed_hz)
+ fsl_lpspi->config.speed_hz = spi->max_speed_hz;
+ if (!fsl_lpspi->config.bpw)
+ fsl_lpspi->config.bpw = spi->bits_per_word;
+
+ /* Initialize the functions for transfer */
+ if (fsl_lpspi->config.bpw <= 8) {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
+ } else if (fsl_lpspi->config.bpw <= 16) {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
+ } else {
+ fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
+ fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
+ }
+
+ fsl_lpspi_config(fsl_lpspi);
+}
+
+static int fsl_lpspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ int ret;
+
+ fsl_lpspi->tx_buf = t->tx_buf;
+ fsl_lpspi->rx_buf = t->rx_buf;
+ fsl_lpspi->remain = t->len;
+
+ reinit_completion(&fsl_lpspi->xfer_done);
+ fsl_lpspi_write_tx_fifo(fsl_lpspi);
+
+ ret = wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ);
+ if (!ret) {
+ dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = fsl_lpspi_txfifo_empty(fsl_lpspi);
+ if (ret)
+ return ret;
+
+ fsl_lpspi_read_rx_fifo(fsl_lpspi);
+
+ return 0;
+}
+
+static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *xfer;
+ bool is_first_xfer = true;
+ u32 temp;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ fsl_lpspi_setup_transfer(spi, xfer);
+ fsl_lpspi_set_cmd(fsl_lpspi, is_first_xfer);
+
+ is_first_xfer = false;
+
+ ret = fsl_lpspi_transfer_one(master, spi, xfer);
+ if (ret < 0)
+ goto complete;
+
+ msg->actual_length += xfer->len;
+ }
+
+complete:
+ /* de-assert SS, then finalize current message */
+ temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
+ temp &= ~TCR_CONTC;
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+
+ msg->status = ret;
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
+{
+ struct fsl_lpspi_data *fsl_lpspi = dev_id;
+ u32 temp;
+
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+ temp = readl(fsl_lpspi->base + IMX7ULP_SR);
+
+ fsl_lpspi_read_rx_fifo(fsl_lpspi);
+
+ if (temp & SR_TDF) {
+ fsl_lpspi_write_tx_fifo(fsl_lpspi);
+
+ if (!fsl_lpspi->remain)
+ complete(&fsl_lpspi->xfer_done);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int fsl_lpspi_probe(struct platform_device *pdev)
+{
+ struct fsl_lpspi_data *fsl_lpspi;
+ struct spi_master *master;
+ struct resource *res;
+ int ret, irq;
+ u32 temp;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_lpspi_data));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->bus_num = pdev->id;
+
+ fsl_lpspi = spi_master_get_devdata(master);
+ fsl_lpspi->dev = &pdev->dev;
+
+ master->transfer_one_message = fsl_lpspi_transfer_one_msg;
+ master->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ master->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+
+ init_completion(&fsl_lpspi->xfer_done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_lpspi->base)) {
+ ret = PTR_ERR(fsl_lpspi->base);
+ goto out_master_put;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_master_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
+ dev_name(&pdev->dev), fsl_lpspi);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+ goto out_master_put;
+ }
+
+ fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fsl_lpspi->clk)) {
+ ret = PTR_ERR(fsl_lpspi->clk);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(fsl_lpspi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret);
+ goto out_master_put;
+ }
+
+ temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
+ fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
+ fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+
+ clk_disable_unprepare(fsl_lpspi->clk);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto out_master_put;
+ }
+
+ return 0;
+
+out_master_put:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int fsl_lpspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(fsl_lpspi->clk);
+
+ return 0;
+}
+
+static struct platform_driver fsl_lpspi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = fsl_lpspi_dt_ids,
+ },
+ .probe = fsl_lpspi_probe,
+ .remove = fsl_lpspi_remove,
+};
+module_platform_driver(fsl_lpspi_driver);
+
+MODULE_DESCRIPTION("LPSPI Master Controller driver");
+MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index deb782f6556c..32ced64a5bb9 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -173,15 +173,16 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
/* MX21, MX27 */
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
- unsigned int fspi, unsigned int max)
+ unsigned int fspi, unsigned int max, unsigned int *fres)
{
int i;
for (i = 2; i < max; i++)
if (fspi * mxc_clkdivs[i] >= fin)
- return i;
+ break;
- return max;
+ *fres = fin / mxc_clkdivs[i];
+ return i;
}
/* MX1, MX31, MX35, MX51 CSPI */
@@ -442,6 +443,7 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
#define MX31_CSPICTRL_ENABLE (1 << 0)
#define MX31_CSPICTRL_MASTER (1 << 1)
#define MX31_CSPICTRL_XCH (1 << 2)
+#define MX31_CSPICTRL_SMC (1 << 3)
#define MX31_CSPICTRL_POL (1 << 4)
#define MX31_CSPICTRL_PHA (1 << 5)
#define MX31_CSPICTRL_SSCTL (1 << 6)
@@ -452,6 +454,10 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
#define MX35_CSPICTRL_CS_SHIFT 12
#define MX31_CSPICTRL_DR_SHIFT 16
+#define MX31_CSPI_DMAREG 0x10
+#define MX31_DMAREG_RH_DEN (1<<4)
+#define MX31_DMAREG_TH_DEN (1<<1)
+
#define MX31_CSPISTATUS 0x14
#define MX31_STATUS_RR (1 << 3)
@@ -511,6 +517,9 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
MX31_CSPICTRL_CS_SHIFT);
+ if (spi_imx->usedma)
+ reg |= MX31_CSPICTRL_SMC;
+
writel(reg, spi_imx->base + MXC_CSPICTRL);
reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
@@ -520,6 +529,13 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
reg &= ~MX31_TEST_LBC;
writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
+ if (spi_imx->usedma) {
+ /* configure DMA requests when RXFIFO is half full and
+ when TXFIFO is half empty */
+ writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
+ spi_imx->base + MX31_CSPI_DMAREG);
+ }
+
return 0;
}
@@ -574,9 +590,12 @@ static int mx21_config(struct spi_device *spi, struct spi_imx_config *config)
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
+ unsigned int clk;
+
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max, &clk)
+ << MX21_CSPICTRL_DR_SHIFT;
+ spi_imx->spi_bus_clk = clk;
- reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
- MX21_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
if (spi->mode & SPI_CPHA)
@@ -1244,10 +1263,10 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
/*
- * Only validated on i.mx6 now, can remove the constrain if validated on
- * other chips.
+ * Only validated on i.mx35 and i.mx6 now, can remove the constraint
+ * if validated on other chips.
*/
- if (is_imx51_ecspi(spi_imx)) {
+ if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx)) {
ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
if (ret == -EPROBE_DEFER)
goto out_clk_put;
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index f8117b80fa22..cebfea5faa4b 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -214,6 +214,7 @@ static const struct of_device_id jcore_spi_of_match[] = {
{ .compatible = "jcore,spi2" },
{},
};
+MODULE_DEVICE_TABLE(of, jcore_spi_of_match);
static struct platform_driver jcore_spi_driver = {
.probe = jcore_spi_probe,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index d5157b2222ce..79800e991ccd 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1386,20 +1386,13 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
regs_offset = pdata->regs_offset;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- status = -ENODEV;
- goto free_master;
- }
-
- r->start += regs_offset;
- r->end += regs_offset;
- mcspi->phys = r->start;
-
mcspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(mcspi->base)) {
status = PTR_ERR(mcspi->base);
goto free_master;
}
+ mcspi->phys = r->start + regs_offset;
+ mcspi->base += regs_offset;
mcspi->dev = &pdev->dev;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index ded37025b445..6b001c4a5640 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -138,37 +138,62 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
tclk_hz = clk_get_rate(orion_spi->clk);
if (devdata->typ == ARMADA_SPI) {
- unsigned int clk, spr, sppr, sppr2, err;
- unsigned int best_spr, best_sppr, best_err;
-
- best_err = speed;
- best_spr = 0;
- best_sppr = 0;
-
- /* Iterate over the valid range looking for best fit */
- for (sppr = 0; sppr < 8; sppr++) {
- sppr2 = 0x1 << sppr;
-
- spr = tclk_hz / sppr2;
- spr = DIV_ROUND_UP(spr, speed);
- if ((spr == 0) || (spr > 15))
- continue;
-
- clk = tclk_hz / (spr * sppr2);
- err = speed - clk;
-
- if (err < best_err) {
- best_spr = spr;
- best_sppr = sppr;
- best_err = err;
- }
- }
+ /*
+ * Given the core_clk (tclk_hz) and the target rate (speed) we
+ * determine the best values for SPR (in [0 .. 15]) and SPPR (in
+ * [0..7]) such that
+ *
+ * core_clk / (SPR * 2 ** SPPR)
+ *
+ * is as big as possible but not bigger than speed.
+ */
- if ((best_sppr == 0) && (best_spr == 0))
- return -EINVAL;
+ /* best integer divider: */
+ unsigned divider = DIV_ROUND_UP(tclk_hz, speed);
+ unsigned spr, sppr;
+
+ if (divider < 16) {
+ /* This is the easy case, divider is less than 16 */
+ spr = divider;
+ sppr = 0;
+
+ } else {
+ unsigned two_pow_sppr;
+ /*
+ * Find the highest bit set in divider. This and the
+ * three next bits define SPR (apart from rounding).
+ * SPPR is then the number of zero bits that must be
+ * appended:
+ */
+ sppr = fls(divider) - 4;
+
+ /*
+ * As SPR only has 4 bits, we have to round divider up
+ * to the next multiple of 2 ** sppr.
+ */
+ two_pow_sppr = 1 << sppr;
+ divider = (divider + two_pow_sppr - 1) & -two_pow_sppr;
+
+ /*
+ * recalculate sppr as rounding up divider might have
+ * increased it enough to change the position of the
+ * highest set bit. In this case the bit that now
+ * doesn't make it into SPR is 0, so there is no need to
+ * round again.
+ */
+ sppr = fls(divider) - 4;
+ spr = divider >> sppr;
+
+ /*
+ * Now do range checking. SPR is constructed to have a
+ * width of 4 bits, so this is fine for sure. So we
+ * still need to check for sppr to fit into 3 bits:
+ */
+ if (sppr > 7)
+ return -EINVAL;
+ }
- prescale = ((best_sppr & 0x6) << 5) |
- ((best_sppr & 0x1) << 4) | best_spr;
+ prescale = ((sppr & 0x6) << 5) | ((sppr & 0x1) << 4) | spr;
} else {
/*
* the supported rates are: 4,6,8...30
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index ce31b8199bb3..2823a00a9405 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -109,7 +109,6 @@ static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
#define DONE_STATE ((void *)2)
#define ERROR_STATE ((void *)-1)
-#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
#define DMA_ALIGNMENT 8
static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a816f07e168e..9daf50031737 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -413,7 +413,7 @@ static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
return n;
}
-static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
+static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
{
unsigned int n;
@@ -428,6 +428,7 @@ static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
qspi_update(rspi, SPBFCR_RXTRG_MASK,
SPBFCR_RXTRG_1B, QSPI_SPBFCR);
}
+ return n;
}
#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
@@ -785,6 +786,9 @@ static int qspi_transfer_out_in(struct rspi_data *rspi,
static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
{
+ const u8 *tx = xfer->tx_buf;
+ unsigned int n = xfer->len;
+ unsigned int i, len;
int ret;
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
@@ -793,9 +797,23 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
return ret;
}
- ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
- if (ret < 0)
- return ret;
+ while (n > 0) {
+ len = qspi_set_send_trigger(rspi, n);
+ if (len == QSPI_BUFFER_SIZE) {
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return ret;
+ }
+ for (i = 0; i < len; i++)
+ rspi_write_data(rspi, *tx++);
+ } else {
+ ret = rspi_pio_transfer(rspi, tx, NULL, n);
+ if (ret < 0)
+ return ret;
+ }
+ n -= len;
+ }
/* Wait for the last transmission */
rspi_wait_for_tx_empty(rspi);
@@ -805,13 +823,37 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
{
+ u8 *rx = xfer->rx_buf;
+ unsigned int n = xfer->len;
+ unsigned int i, len;
+ int ret;
+
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
}
- return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
+ while (n > 0) {
+ len = qspi_set_receive_trigger(rspi, n);
+ if (len == QSPI_BUFFER_SIZE) {
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return ret;
+ }
+ for (i = 0; i < len; i++)
+ *rx++ = rspi_read_data(rspi);
+ } else {
+ ret = rspi_pio_transfer(rspi, NULL, rx, n);
+ if (ret < 0)
+ return ret;
+ *rx++ = ret;
+ }
+ n -= len;
+ }
+
+ return 0;
}
static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 3c09e94cf827..28dfdce4beae 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -341,27 +341,20 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
- dma_filter_fn filter = sdd->cntrlr_info->filter;
struct device *dev = &sdd->pdev->dev;
- dma_cap_mask_t mask;
if (is_polling(sdd))
return 0;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
/* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- sdd->cntrlr_info->dma_rx, dev, "rx");
+ sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx");
if (!sdd->rx_dma.ch) {
dev_err(dev, "Failed to get RX DMA channel\n");
return -EBUSY;
}
spi->dma_rx = sdd->rx_dma.ch;
- sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- sdd->cntrlr_info->dma_tx, dev, "tx");
+ sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx");
if (!sdd->tx_dma.ch) {
dev_err(dev, "Failed to get TX DMA channel\n");
dma_release_channel(sdd->rx_dma.ch);
@@ -1091,11 +1084,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cur_bpw = 8;
- if (!sdd->pdev->dev.of_node && (!sci->dma_tx || !sci->dma_rx)) {
- dev_warn(&pdev->dev, "Unable to get SPI tx/rx DMA data. Switching to poll mode\n");
- sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
- }
-
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
@@ -1205,9 +1193,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
- dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%p, Tx-%p]\n",
- mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
- sci->dma_rx, sci->dma_tx);
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
+ mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1de3a772eb7d..0012ad02e569 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -980,6 +980,7 @@ static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 4969dc10684a..c5cd635c28f3 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -46,6 +46,8 @@
#define SUN4I_CTL_TP BIT(18)
#define SUN4I_INT_CTL_REG 0x0c
+#define SUN4I_INT_CTL_RF_F34 BIT(4)
+#define SUN4I_INT_CTL_TF_E34 BIT(12)
#define SUN4I_INT_CTL_TC BIT(16)
#define SUN4I_INT_STA_REG 0x10
@@ -61,11 +63,14 @@
#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
#define SUN4I_CLK_CTL_DRS BIT(12)
+#define SUN4I_MAX_XFER_SIZE 0xffffff
+
#define SUN4I_BURST_CNT_REG 0x20
-#define SUN4I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+#define SUN4I_BURST_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
#define SUN4I_XMIT_CNT_REG 0x24
-#define SUN4I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
+
#define SUN4I_FIFO_STA_REG 0x28
#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
@@ -96,6 +101,31 @@ static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
writel(value, sspi->base_addr + reg);
}
+static inline u32 sun4i_spi_get_tx_fifo_count(struct sun4i_spi *sspi)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+
+ reg >>= SUN4I_FIFO_STA_TF_CNT_BITS;
+
+ return reg & SUN4I_FIFO_STA_TF_CNT_MASK;
+}
+
+static inline void sun4i_spi_enable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+ reg |= mask;
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
+static inline void sun4i_spi_disable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+ u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+ reg &= ~mask;
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
{
u32 reg, cnt;
@@ -118,10 +148,13 @@ static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
{
+ u32 cnt;
u8 byte;
- if (len > sspi->len)
- len = sspi->len;
+ /* See how much data we can fit */
+ cnt = SUN4I_FIFO_DEPTH - sun4i_spi_get_tx_fifo_count(sspi);
+
+ len = min3(len, (int)cnt, sspi->len);
while (len--) {
byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
@@ -184,10 +217,10 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
u32 reg;
/* We don't support transfer larger than the FIFO */
- if (tfr->len > SUN4I_FIFO_DEPTH)
+ if (tfr->len > SUN4I_MAX_XFER_SIZE)
return -EMSGSIZE;
- if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+ if (tfr->tx_buf && tfr->len >= SUN4I_MAX_XFER_SIZE)
return -EMSGSIZE;
reinit_completion(&sspi->done);
@@ -286,7 +319,11 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
- sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+ sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TC |
+ SUN4I_INT_CTL_RF_F34);
+ /* Only enable Tx FIFO interrupt if we really need it */
+ if (tx_len > SUN4I_FIFO_DEPTH)
+ sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
/* Start the transfer */
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
@@ -306,7 +343,6 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
goto out;
}
- sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
out:
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
@@ -322,10 +358,33 @@ static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
/* Transfer complete */
if (status & SUN4I_INT_CTL_TC) {
sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
complete(&sspi->done);
return IRQ_HANDLED;
}
+ /* Receive FIFO 3/4 full */
+ if (status & SUN4I_INT_CTL_RF_F34) {
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /* Only clear the interrupt _after_ draining the FIFO */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_RF_F34);
+ return IRQ_HANDLED;
+ }
+
+ /* Transmit FIFO 3/4 empty */
+ if (status & SUN4I_INT_CTL_TF_E34) {
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+ if (!sspi->len)
+ /* nothing left to transmit */
+ sun4i_spi_disable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
+
+ /* Only clear the interrupt _after_ re-seeding the FIFO */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TF_E34);
+
+ return IRQ_HANDLED;
+ }
+
return IRQ_NONE;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 9918a57a6a6e..e3114832c485 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -24,6 +25,7 @@
#include <linux/spi/spi.h>
#define SUN6I_FIFO_DEPTH 128
+#define SUN8I_FIFO_DEPTH 64
#define SUN6I_GBL_CTL_REG 0x04
#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
@@ -90,6 +92,7 @@ struct sun6i_spi {
const u8 *tx_buf;
u8 *rx_buf;
int len;
+ unsigned long fifo_depth;
};
static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
@@ -155,7 +158,9 @@ static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
{
- return SUN6I_FIFO_DEPTH - 1;
+ struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+
+ return sspi->fifo_depth - 1;
}
static int sun6i_spi_transfer_one(struct spi_master *master,
@@ -170,7 +175,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
u32 reg;
/* We don't support transfer larger than the FIFO */
- if (tfr->len > SUN6I_FIFO_DEPTH)
+ if (tfr->len > sspi->fifo_depth)
return -EINVAL;
reinit_completion(&sspi->done);
@@ -265,7 +270,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
SUN6I_BURST_CTL_CNT_STC(tx_len));
/* Fill the TX FIFO */
- sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+ sun6i_spi_fill_fifo(sspi, sspi->fifo_depth);
/* Enable the interrupts */
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
@@ -288,7 +293,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
goto out;
}
- sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+ sun6i_spi_drain_fifo(sspi, sspi->fifo_depth);
out:
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
@@ -398,6 +403,8 @@ static int sun6i_spi_probe(struct platform_device *pdev)
}
sspi->master = master;
+ sspi->fifo_depth = (unsigned long)of_device_get_match_data(&pdev->dev);
+
master->max_speed_hz = 100 * 1000 * 1000;
master->min_speed_hz = 3 * 1000;
master->set_cs = sun6i_spi_set_cs;
@@ -470,7 +477,8 @@ static int sun6i_spi_remove(struct platform_device *pdev)
}
static const struct of_device_id sun6i_spi_match[] = {
- { .compatible = "allwinner,sun6i-a31-spi", },
+ { .compatible = "allwinner,sun6i-a31-spi", .data = (void *)SUN6I_FIFO_DEPTH },
+ { .compatible = "allwinner,sun8i-h3-spi", .data = (void *)SUN8I_FIFO_DEPTH },
{}
};
MODULE_DEVICE_TABLE(of, sun6i_spi_match);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index caeac66a3977..ec6fb09e2e17 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -411,6 +411,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
tx->callback = ti_qspi_dma_callback;
tx->callback_param = qspi;
cookie = tx->tx_submit(tx);
+ reinit_completion(&qspi->transfer_complete);
ret = dma_submit_error(cookie);
if (ret) {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index c54ee6674471..fcb991034c3d 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1268,11 +1268,8 @@ static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
- int retval = 0;
-
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
-
/* reset PCH SPI h/w */
pch_spi_reset(data->master);
dev_dbg(&board_dat->pdev->dev,
@@ -1280,15 +1277,7 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
- if (retval != 0) {
- dev_err(&board_dat->pdev->dev,
- "%s FAIL:invoking pch_spi_free_resources\n", __func__);
- pch_spi_free_resources(board_dat, data);
- }
-
- dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
-
- return retval;
+ return 0;
}
static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 4071a729eb2f..bea7a93a6046 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -451,6 +451,7 @@ static const struct of_device_id xlp_spi_dt_id[] = {
{ .compatible = "netlogic,xlp832-spi" },
{ },
};
+MODULE_DEVICE_TABLE(of, xlp_spi_dt_id);
static struct platform_driver xlp_spi_driver = {
.probe = xlp_spi_probe,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 838783c3fed0..656dd3e3220c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -697,10 +697,15 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (gpio_is_valid(spi->cs_gpio))
+ if (gpio_is_valid(spi->cs_gpio)) {
gpio_set_value(spi->cs_gpio, !enable);
- else if (spi->master->set_cs)
+ /* Some SPI masters need both GPIO CS & slave_select */
+ if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
+ spi->master->set_cs)
+ spi->master->set_cs(spi, !enable);
+ } else if (spi->master->set_cs) {
spi->master->set_cs(spi, !enable);
+ }
}
#ifdef CONFIG_HAS_DMA
@@ -720,6 +725,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
int desc_len;
int sgs;
struct page *vm_page;
+ struct scatterlist *sg;
void *sg_buf;
size_t min;
int i, ret;
@@ -738,6 +744,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
if (ret != 0)
return ret;
+ sg = &sgt->sgl[0];
for (i = 0; i < sgs; i++) {
if (vmalloced_buf || kmap_buf) {
@@ -751,16 +758,17 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
sg_free_table(sgt);
return -ENOMEM;
}
- sg_set_page(&sgt->sgl[i], vm_page,
+ sg_set_page(sg, vm_page,
min, offset_in_page(buf));
} else {
min = min_t(size_t, len, desc_len);
sg_buf = buf;
- sg_set_buf(&sgt->sgl[i], sg_buf, min);
+ sg_set_buf(sg, sg_buf, min);
}
buf += min;
len -= min;
+ sg = sg_next(sg);
}
ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
@@ -1034,8 +1042,14 @@ static int spi_transfer_one_message(struct spi_master *master,
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ if (xfer->delay_usecs) {
+ u16 us = xfer->delay_usecs;
+
+ if (us <= 10)
+ udelay(us);
+ else
+ usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ }
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2e05046f866b..9e2e099baf8c 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -696,6 +696,7 @@ static struct class *spidev_class;
static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "rohm,dh2228fv" },
{ .compatible = "lineartechnology,ltc2488" },
+ { .compatible = "ge,achc" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 0f28c08fcb3c..77b551da5728 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -909,6 +909,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
if (err) {
ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
err);
+ goto out_free;
} else {
ssb_dbg("Using SPROM revision %d provided by platform\n",
sprom->revision);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 58a7b3504b82..cd005cd41413 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,8 +24,6 @@ menuconfig STAGING
if STAGING
-source "drivers/staging/slicoss/Kconfig"
-
source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/comedi/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2fa9745db614..831e2e891989 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,7 +1,6 @@
# Makefile for staging directory
obj-y += media/
-obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
@@ -41,4 +40,4 @@ obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_ISDN_I4L) += i4l/
obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
-obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
+obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 64d8c8720960..8f3ac37bfe12 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -25,13 +25,5 @@ ion/
exposes existing cma regions and doesn't reserve unecessarily memory when
booting a system which doesn't use ion.
-sync framework:
- - remove CONFIG_SW_SYNC_USER, it is used only for testing/debugging and
- should not be upstreamed.
- - port CONFIG_SW_SYNC_USER tests interfaces to use debugfs somehow
- - port libsync tests to kselftest
- - clean up and ABI check for security issues
- - move it to drivers/base/dma-buf
-
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index ca9a53c03f0f..7cbad0d45b9c 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -100,39 +100,43 @@ static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
-#define range_size(range) \
- ((range)->pgend - (range)->pgstart + 1)
+static inline unsigned long range_size(struct ashmem_range *range)
+{
+ return range->pgend - range->pgstart + 1;
+}
-#define range_on_lru(range) \
- ((range)->purged == ASHMEM_NOT_PURGED)
+static inline bool range_on_lru(struct ashmem_range *range)
+{
+ return range->purged == ASHMEM_NOT_PURGED;
+}
-static inline int page_range_subsumes_range(struct ashmem_range *range,
- size_t start, size_t end)
+static inline bool page_range_subsumes_range(struct ashmem_range *range,
+ size_t start, size_t end)
{
- return (((range)->pgstart >= (start)) && ((range)->pgend <= (end)));
+ return (range->pgstart >= start) && (range->pgend <= end);
}
-static inline int page_range_subsumed_by_range(struct ashmem_range *range,
- size_t start, size_t end)
+static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
+ size_t start, size_t end)
{
- return (((range)->pgstart <= (start)) && ((range)->pgend >= (end)));
+ return (range->pgstart <= start) && (range->pgend >= end);
}
-static inline int page_in_range(struct ashmem_range *range, size_t page)
+static inline bool page_in_range(struct ashmem_range *range, size_t page)
{
- return (((range)->pgstart <= (page)) && ((range)->pgend >= (page)));
+ return (range->pgstart <= page) && (range->pgend >= page);
}
-static inline int page_range_in_range(struct ashmem_range *range,
- size_t start, size_t end)
+static inline bool page_range_in_range(struct ashmem_range *range,
+ size_t start, size_t end)
{
- return (page_in_range(range, start) || page_in_range(range, end) ||
- page_range_subsumes_range(range, start, end));
+ return page_in_range(range, start) || page_in_range(range, end) ||
+ page_range_subsumes_range(range, start, end);
}
-static inline int range_before_page(struct ashmem_range *range, size_t page)
+static inline bool range_before_page(struct ashmem_range *range, size_t page)
{
- return ((range)->pgend < (page));
+ return range->pgend < page;
}
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 209a8f7ef02b..b653451843c8 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -882,7 +882,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
mutex_unlock(&buffer->lock);
if (ret)
return VM_FAULT_ERROR;
@@ -1013,7 +1013,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return 0;
}
-static struct dma_buf_ops dma_buf_ops = {
+static const struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index b23f2c76c753..cf5c010d32bc 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -58,7 +58,7 @@ static struct ion_platform_heap dummy_heaps[] = {
},
};
-static struct ion_platform_data dummy_ion_pdata = {
+static const struct ion_platform_data dummy_ion_pdata = {
.nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7e023d505af8..3ebbb75746e8 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -30,7 +30,7 @@
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
+static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
static const unsigned int orders[] = {8, 4, 0};
static int order_to_index(unsigned int order)
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
index ffef06f63133..480242e02f8d 100644
--- a/drivers/staging/android/uapi/ion_test.h
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -66,5 +66,4 @@ struct ion_test_rw_data {
#define ION_IOC_TEST_KERNEL_MAPPING \
_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
-
#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 7b8be5293883..bf3fe7c61be5 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -68,7 +68,7 @@ struct clk_wzrd {
struct clk *axi_clk;
struct clk *clks_internal[wzrd_clk_int_max];
struct clk *clkout[WZRD_NUM_OUTPUTS];
- int speed_grade;
+ unsigned int speed_grade;
bool suspended;
};
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 08fb26b51a5f..a1c1081906c5 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -245,6 +245,22 @@ enum comedi_subdevice_type {
/* configuration instructions */
/**
+ * enum comedi_io_direction - COMEDI I/O directions
+ * @COMEDI_INPUT: Input.
+ * @COMEDI_OUTPUT: Output.
+ * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
+ *
+ * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
+ * report a direction. They may also be used in other places where a direction
+ * needs to be specified.
+ */
+enum comedi_io_direction {
+ COMEDI_INPUT = 0,
+ COMEDI_OUTPUT = 1,
+ COMEDI_OPENDRAIN = 2
+};
+
+/**
* enum configuration_ids - COMEDI configuration instruction codes
* @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input.
* @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output.
@@ -296,9 +312,9 @@ enum comedi_subdevice_type {
* @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
*/
enum configuration_ids {
- INSN_CONFIG_DIO_INPUT = 0,
- INSN_CONFIG_DIO_OUTPUT = 1,
- INSN_CONFIG_DIO_OPENDRAIN = 2,
+ INSN_CONFIG_DIO_INPUT = COMEDI_INPUT,
+ INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT,
+ INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN,
INSN_CONFIG_ANALOG_TRIG = 16,
/* INSN_CONFIG_WAVEFORM = 17, */
/* INSN_CONFIG_TRIG = 18, */
@@ -397,22 +413,6 @@ enum comedi_digital_trig_op {
};
/**
- * enum comedi_io_direction - COMEDI I/O directions
- * @COMEDI_INPUT: Input.
- * @COMEDI_OUTPUT: Output.
- * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
- *
- * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
- * report a direction. They may also be used in other places where a direction
- * needs to be specified.
- */
-enum comedi_io_direction {
- COMEDI_INPUT = 0,
- COMEDI_OUTPUT = 1,
- COMEDI_OPENDRAIN = 2
-};
-
-/**
* enum comedi_support_level - support level for a COMEDI feature
* @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature.
* @COMEDI_SUPPORTED: Feature is supported.
@@ -1104,18 +1104,19 @@ enum ni_gpct_other_select {
enum ni_gpct_arm_source {
NI_GPCT_ARM_IMMEDIATE = 0x0,
/*
- * Start both the counter and the adjacent pared
- * counter simultaneously
+ * Start both the counter and the adjacent paired counter simultaneously
*/
NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1,
/*
- * NI doesn't document bits for selecting hardware arm triggers.
- * If the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
- * significant bits (3 bits for 660x or 5 bits for m-series)
- * through to the hardware. This will at least allow someone to
- * figure out what the bits do later.
+ * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant
+ * bits (3 bits for 660x or 5 bits for m-series) through to the
+ * hardware. To select a hardware trigger, pass the appropriate select
+ * bit, e.g.,
+ * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or
+ * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number)
*/
- NI_GPCT_ARM_UNKNOWN = 0x1000,
+ NI_GPCT_HW_ARM = 0x1000,
+ NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM, /* for backward compatibility */
};
/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index dcb637665eb7..0c7c37a8ff33 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -426,6 +426,18 @@ enum comedi_cb {
* handler will be called with the COMEDI device structure's board_ptr member
* pointing to the matched pointer to a board name within the driver's private
* array of static, read-only board type information.
+ *
+ * The @detach handler has two roles. If a COMEDI device was successfully
+ * configured by the @attach or @auto_attach handler, it is called when the
+ * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
+ * unloading of the driver, or due to device removal). It is also called when
+ * the @attach or @auto_attach handler returns an error. Therefore, the
+ * @attach or @auto_attach handlers can defer clean-up on error until the
+ * @detach handler is called. If the @attach or @auto_attach handlers free
+ * any resources themselves, they must prevent the @detach handler from
+ * freeing the same resources. The @detach handler must not assume that all
+ * resources requested by the @attach or @auto_attach handler were
+ * successfully allocated.
*/
struct comedi_driver {
/* private: */
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index ccb37d1f0f8e..987414741605 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -248,8 +248,8 @@ static void cb_pcidda_write_caldac(struct comedi_device *dev,
cb_pcidda_serial_out(dev, value, num_caldac_bits);
/*
-* latch stream into appropriate caldac deselect reference dac
-*/
+ * latch stream into appropriate caldac deselect reference dac
+ */
cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index b1c0860135d0..05126ba4ba51 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -837,7 +837,7 @@ static int mite_setup(struct comedi_device *dev, struct mite *mite,
* of 0x61f and bursts worked. 6281 powered up with register value of
* 0x1f and bursts didn't work. The NI windows driver reads the
* register, then does a bitwise-or of 0x600 with it and writes it back.
- *
+ *
* The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
* written and read back. The bits 0x1f always read as 1.
* The rest always read as zero.
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 0f97d7b611d7..b2e382888981 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1832,11 +1832,10 @@ static int ni_ai_insn_read(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned int mask = (s->maxdata + 1) >> 1;
+ unsigned int mask = s->maxdata;
int i, n;
unsigned int signbits;
unsigned int d;
- unsigned long dl;
ni_load_channelgain_list(dev, s, 1, &insn->chanspec);
@@ -1875,7 +1874,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
return -ETIME;
}
d += signbits;
- data[n] = d;
+ data[n] = d & 0xffff;
}
} else if (devpriv->is_6143) {
for (n = 0; n < insn->n; n++) {
@@ -1887,15 +1886,15 @@ static int ni_ai_insn_read(struct comedi_device *dev,
* bit to move a single 16bit stranded sample into
* the FIFO.
*/
- dl = 0;
+ d = 0;
for (i = 0; i < NI_TIMEOUT; i++) {
if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
0x01) {
/* Get stranded sample into FIFO */
ni_writel(dev, 0x01,
NI6143_AI_FIFO_CTRL_REG);
- dl = ni_readl(dev,
- NI6143_AI_FIFO_DATA_REG);
+ d = ni_readl(dev,
+ NI6143_AI_FIFO_DATA_REG);
break;
}
}
@@ -1903,7 +1902,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
dev_err(dev->class_dev, "timeout\n");
return -ETIME;
}
- data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
+ data[n] = (((d >> 16) & 0xFFFF) + signbits) & 0xFFFF;
}
} else {
for (n = 0; n < insn->n; n++) {
@@ -1919,14 +1918,13 @@ static int ni_ai_insn_read(struct comedi_device *dev,
return -ETIME;
}
if (devpriv->is_m_series) {
- dl = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
- dl &= mask;
- data[n] = dl;
+ d = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
+ d &= mask;
+ data[n] = d;
} else {
d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- /* subtle: needs to be short addition */
d += signbits;
- data[n] = d;
+ data[n] = d & 0xffff;
}
}
}
@@ -2729,66 +2727,36 @@ static int ni_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static int ni_ao_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- unsigned int nbytes;
-
- switch (data[0]) {
- case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
- switch (data[1]) {
- case COMEDI_OUTPUT:
- nbytes = comedi_samples_to_bytes(s,
- board->ao_fifo_depth);
- data[2] = 1 + nbytes;
- if (devpriv->mite)
- data[2] += devpriv->mite->fifo_size;
- break;
- case COMEDI_INPUT:
- data[2] = 0;
- break;
- default:
- return -EINVAL;
- }
- return 0;
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int ni_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
+/*
+ * Arms the AO device in preparation for a trigger event.
+ * This function also allocates and prepares a DMA channel (or FIFO if DMA is
+ * not used). As a part of this preparation, this function preloads the DAC
+ * registers with the first values of the output stream. This ensures that the
+ * first clock cycle after the trigger can be used for output.
+ *
+ * Note that this function _must_ happen after a user has written data to the
+ * output buffers via either mmap or write(fileno,...).
+ */
+static int ni_ao_arm(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
int ret;
int interrupt_b_bits;
int i;
static const int timeout = 1000;
/*
- * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
- * For backwards compatibility, also allow trig_num == 0 when
- * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
- * in that case, the internal trigger is being used as a pre-trigger
- * before the external trigger.
+ * Prevent ao from doing things like trying to allocate the ao dma
+ * channel multiple times.
*/
- if (!(trig_num == cmd->start_arg ||
- (trig_num == 0 && cmd->start_src != TRIG_INT)))
+ if (!devpriv->ao_needs_arming) {
+ dev_dbg(dev->class_dev, "%s: device does not need arming!\n",
+ __func__);
return -EINVAL;
+ }
- /*
- * Null trig at beginning prevent ao start trigger from executing more
- * than once per command (and doing things like trying to allocate the
- * ao dma channel multiple times).
- */
- s->async->inttrig = NULL;
+ devpriv->ao_needs_arming = 0;
ni_set_bits(dev, NISTC_INTB_ENA_REG,
NISTC_INTB_ENA_AO_FIFO | NISTC_INTB_ENA_AO_ERR, 0);
@@ -2840,6 +2808,75 @@ static int ni_ao_inttrig(struct comedi_device *dev,
devpriv->ao_cmd1,
NISTC_AO_CMD1_REG);
+ return 0;
+}
+
+static int ni_ao_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ const struct ni_board_struct *board = dev->board_ptr;
+ struct ni_private *devpriv = dev->private;
+ unsigned int nbytes;
+
+ switch (data[0]) {
+ case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
+ switch (data[1]) {
+ case COMEDI_OUTPUT:
+ nbytes = comedi_samples_to_bytes(s,
+ board->ao_fifo_depth);
+ data[2] = 1 + nbytes;
+ if (devpriv->mite)
+ data[2] += devpriv->mite->fifo_size;
+ break;
+ case COMEDI_INPUT:
+ data[2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ case INSN_CONFIG_ARM:
+ return ni_ao_arm(dev, s);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ni_ao_inttrig(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int trig_num)
+{
+ struct ni_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ int ret;
+
+ /*
+ * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+ * For backwards compatibility, also allow trig_num == 0 when
+ * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+ * in that case, the internal trigger is being used as a pre-trigger
+ * before the external trigger.
+ */
+ if (!(trig_num == cmd->start_arg ||
+ (trig_num == 0 && cmd->start_src != TRIG_INT)))
+ return -EINVAL;
+
+ /*
+ * Null trig at beginning prevent ao start trigger from executing more
+ * than once per command.
+ */
+ s->async->inttrig = NULL;
+
+ if (devpriv->ao_needs_arming) {
+ /* only arm this device if it still needs arming */
+ ret = ni_ao_arm(dev, s);
+ if (ret)
+ return ret;
+ }
+
ni_stc_writew(dev, NISTC_AO_CMD2_START1_PULSE | devpriv->ao_cmd2,
NISTC_AO_CMD2_REG);
@@ -3227,10 +3264,17 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
ni_ao_cmd_set_interrupts(dev, s);
/*
- * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be
- * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA
- * must be setup and initially written to before arm/start happen.
+ * arm(ing) must happen later so that DMA can be setup and DACs
+ * preloaded with the actual output buffer before starting.
+ *
+ * start(ing) must happen _after_ arming is completed. Starting can be
+ * done either via ni_ao_inttrig, or via an external trigger.
+ *
+ * **Currently, ni_ao_inttrig will automatically attempt a call to
+ * ni_ao_arm if the device still needs arming at that point. This
+ * allows backwards compatibility.
*/
+ devpriv->ao_needs_arming = 1;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1966519cb6e5..f27b545f83eb 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1053,6 +1053,20 @@ struct ni_private {
unsigned int is_67xx:1;
unsigned int is_6711:1;
unsigned int is_6713:1;
+
+ /*
+ * Boolean value of whether device needs to be armed.
+ *
+ * Currently, only NI AO devices are known to be needing arming, since
+ * the DAC registers must be preloaded before triggering.
+ * This variable should only be set true during a command operation
+ * (e.g ni_ao_cmd) and should then be set false by the arming
+ * function (e.g. ni_ao_arm).
+ *
+ * This variable helps to ensure that multiple DMA allocations are not
+ * possible.
+ */
+ unsigned int ao_needs_arming:1;
};
static const struct comedi_lrange range_ni_E_ao_ext;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 5ab49a798164..15cb4088467b 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -452,8 +452,9 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
unsigned int bits = 0;
unsigned int reg;
unsigned int mode;
- unsigned int clk_src;
- u64 ps;
+ unsigned int clk_src = 0;
+ u64 ps = 0;
+ int ret;
bool force_alt_sync;
/* only m series and 660x variants have counting mode registers */
@@ -483,9 +484,12 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
break;
}
- ni_tio_generic_clock_src_select(counter, &clk_src);
- ni_tio_clock_period_ps(counter, clk_src, &ps);
-
+ ret = ni_tio_generic_clock_src_select(counter, &clk_src);
+ if (ret)
+ return;
+ ret = ni_tio_clock_period_ps(counter, clk_src, &ps);
+ if (ret)
+ return;
/*
* It's not clear what we should do if clock_period is unknown, so we
* are not using the alt sync bit in that case.
@@ -809,7 +813,7 @@ static int ni_tio_get_clock_src(struct ni_gpct *counter,
unsigned int *clock_source,
unsigned int *period_ns)
{
- u64 temp64;
+ u64 temp64 = 0;
int ret;
ret = ni_tio_generic_clock_src_select(counter, clock_source);
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 5aeed44dff70..5b5df0596ad9 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -771,9 +771,9 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
s->async->scans_done < cmd->stop_arg)) {
if (!devpriv->ai_cmd_canceled) {
/*
- * Wait for running dma transfer to end,
- * do cleanup in interrupt.
- */
+ * Wait for running dma transfer to end,
+ * do cleanup in interrupt.
+ */
devpriv->ai_cmd_canceled = 1;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index c14a02564432..0dd5fe286855 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -75,24 +75,24 @@ struct s626_buffer_dma {
};
struct s626_private {
- uint8_t ai_cmd_running; /* ai_cmd is running */
+ u8 ai_cmd_running; /* ai_cmd is running */
unsigned int ai_sample_timer; /* time between samples in
* units of the timer */
int ai_convert_count; /* conversion counter */
unsigned int ai_convert_timer; /* time between conversion in
* units of the timer */
- uint16_t counter_int_enabs; /* counter interrupt enable mask
+ u16 counter_int_enabs; /* counter interrupt enable mask
* for MISC2 register */
- uint8_t adc_items; /* number of items in ADC poll list */
+ u8 adc_items; /* number of items in ADC poll list */
struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1)
* program */
struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data
* and hold DAC data */
- uint32_t *dac_wbuf; /* pointer to logical adrs of DMA buffer
+ u32 *dac_wbuf; /* pointer to logical adrs of DMA buffer
* used to hold DAC data */
- uint16_t dacpol; /* image of DAC polarity register */
- uint8_t trim_setpoint[12]; /* images of TrimDAC setpoints */
- uint32_t i2c_adrs; /* I2C device address for onboard EEPROM
+ u16 dacpol; /* image of DAC polarity register */
+ u8 trim_setpoint[12]; /* images of TrimDAC setpoints */
+ u32 i2c_adrs; /* I2C device address for onboard EEPROM
* (board rev dependent) */
};
@@ -179,7 +179,7 @@ static void s626_debi_transfer(struct comedi_device *dev)
/*
* Read a value from a gate array register.
*/
-static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
+static u16 s626_debi_read(struct comedi_device *dev, u16 addr)
{
/* Set up DEBI control register value in shadow RAM */
writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -193,8 +193,8 @@ static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
/*
* Write a value to a gate array register.
*/
-static void s626_debi_write(struct comedi_device *dev, uint16_t addr,
- uint16_t wdata)
+static void s626_debi_write(struct comedi_device *dev, u16 addr,
+ u16 wdata)
{
/* Set up DEBI control register value in shadow RAM */
writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -241,7 +241,7 @@ static int s626_i2c_handshake_eoc(struct comedi_device *dev,
return -EBUSY;
}
-static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
+static int s626_i2c_handshake(struct comedi_device *dev, u32 val)
{
unsigned int ctrl;
int ret;
@@ -267,8 +267,8 @@ static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
return ctrl & S626_I2C_ERR;
}
-/* Read uint8_t from EEPROM. */
-static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
+/* Read u8 from EEPROM. */
+static u8 s626_i2c_read(struct comedi_device *dev, u8 addr)
{
struct s626_private *devpriv = dev->private;
@@ -304,10 +304,10 @@ static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
/* *********** DAC FUNCTIONS *********** */
/* TrimDac LogicalChan-to-PhysicalChan mapping table. */
-static const uint8_t s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
+static const u8 s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
-static const uint8_t s626_trimadrs[] = {
+static const u8 s626_trimadrs[] = {
0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
};
@@ -357,7 +357,7 @@ static int s626_send_dac_eoc(struct comedi_device *dev,
* channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
* dacpol contains valid target image.
*/
-static int s626_send_dac(struct comedi_device *dev, uint32_t val)
+static int s626_send_dac(struct comedi_device *dev, u32 val)
{
struct s626_private *devpriv = dev->private;
int ret;
@@ -516,12 +516,12 @@ static int s626_send_dac(struct comedi_device *dev, uint32_t val)
* Private helper function: Write setpoint to an application DAC channel.
*/
static int s626_set_dac(struct comedi_device *dev,
- uint16_t chan, int16_t dacdata)
+ u16 chan, int16_t dacdata)
{
struct s626_private *devpriv = dev->private;
- uint16_t signmask;
- uint32_t ws_image;
- uint32_t val;
+ u16 signmask;
+ u32 ws_image;
+ u32 val;
/*
* Adjust DAC data polarity and set up Polarity Control Register image.
@@ -535,7 +535,7 @@ static int s626_set_dac(struct comedi_device *dev,
}
/* Limit DAC setpoint value to valid range. */
- if ((uint16_t)dacdata > 0x1FFF)
+ if ((u16)dacdata > 0x1FFF)
dacdata = 0x1FFF;
/*
@@ -575,23 +575,23 @@ static int s626_set_dac(struct comedi_device *dev,
* (write to non-existent trimdac). */
val |= 0x00004000; /* Address the two main dual-DAC devices
* (TSL's chip select enables target device). */
- val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel
+ val |= ((u32)(chan & 1) << 15); /* Address the DAC channel
* within the device. */
- val |= (uint32_t)dacdata; /* Include DAC setpoint data. */
+ val |= (u32)dacdata; /* Include DAC setpoint data. */
return s626_send_dac(dev, val);
}
static int s626_write_trim_dac(struct comedi_device *dev,
- uint8_t logical_chan, uint8_t dac_data)
+ u8 logical_chan, u8 dac_data)
{
struct s626_private *devpriv = dev->private;
- uint32_t chan;
+ u32 chan;
/*
* Save the new setpoint in case the application needs to read it back
* later.
*/
- devpriv->trim_setpoint[logical_chan] = (uint8_t)dac_data;
+ devpriv->trim_setpoint[logical_chan] = (u8)dac_data;
/* Map logical channel number to physical channel number. */
chan = s626_trimchan[logical_chan];
@@ -633,7 +633,7 @@ static int s626_write_trim_dac(struct comedi_device *dev,
static int s626_load_trim_dacs(struct comedi_device *dev)
{
- uint8_t i;
+ u8 i;
int ret;
/* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
@@ -661,7 +661,7 @@ static int s626_load_trim_dacs(struct comedi_device *dev)
* latches B.
*/
static void s626_set_latch_source(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
+ unsigned int chan, u16 value)
{
s626_debi_replace(dev, S626_LP_CRB(chan),
~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC),
@@ -672,7 +672,7 @@ static void s626_set_latch_source(struct comedi_device *dev,
* Write value into counter preload register.
*/
static void s626_preload(struct comedi_device *dev,
- unsigned int chan, uint32_t value)
+ unsigned int chan, u32 value)
{
s626_debi_write(dev, S626_LP_CNTR(chan), value);
s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16);
@@ -686,7 +686,7 @@ static void s626_preload(struct comedi_device *dev,
static void s626_reset_cap_flags(struct comedi_device *dev,
unsigned int chan)
{
- uint16_t set;
+ u16 set;
set = S626_SET_CRB_INTRESETCMD(1);
if (chan < 3)
@@ -704,12 +704,12 @@ static void s626_reset_cap_flags(struct comedi_device *dev,
* ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
*/
static void s626_set_mode_a(struct comedi_device *dev,
- unsigned int chan, uint16_t setup,
- uint16_t disable_int_src)
+ unsigned int chan, u16 setup,
+ u16 disable_int_src)
{
struct s626_private *devpriv = dev->private;
- uint16_t cra;
- uint16_t crb;
+ u16 cra;
+ u16 crb;
unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
@@ -782,12 +782,12 @@ static void s626_set_mode_a(struct comedi_device *dev,
}
static void s626_set_mode_b(struct comedi_device *dev,
- unsigned int chan, uint16_t setup,
- uint16_t disable_int_src)
+ unsigned int chan, u16 setup,
+ u16 disable_int_src)
{
struct s626_private *devpriv = dev->private;
- uint16_t cra;
- uint16_t crb;
+ u16 cra;
+ u16 crb;
unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
@@ -868,7 +868,7 @@ static void s626_set_mode_b(struct comedi_device *dev,
static void s626_set_mode(struct comedi_device *dev,
unsigned int chan,
- uint16_t setup, uint16_t disable_int_src)
+ u16 setup, u16 disable_int_src)
{
if (chan < 3)
s626_set_mode_a(dev, chan, setup, disable_int_src);
@@ -880,7 +880,7 @@ static void s626_set_mode(struct comedi_device *dev,
* Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index.
*/
static void s626_set_enable(struct comedi_device *dev,
- unsigned int chan, uint16_t enab)
+ unsigned int chan, u16 enab)
{
unsigned int mask = S626_CRBMSK_INTCTRL;
unsigned int set;
@@ -901,11 +901,11 @@ static void s626_set_enable(struct comedi_device *dev,
* 2=OverflowA (B counters only), 3=disabled.
*/
static void s626_set_load_trig(struct comedi_device *dev,
- unsigned int chan, uint16_t trig)
+ unsigned int chan, u16 trig)
{
- uint16_t reg;
- uint16_t mask;
- uint16_t set;
+ u16 reg;
+ u16 mask;
+ u16 set;
if (chan < 3) {
reg = S626_LP_CRA(chan);
@@ -925,11 +925,11 @@ static void s626_set_load_trig(struct comedi_device *dev,
* 2=IndexOnly, 3=IndexAndOverflow.
*/
static void s626_set_int_src(struct comedi_device *dev,
- unsigned int chan, uint16_t int_source)
+ unsigned int chan, u16 int_source)
{
struct s626_private *devpriv = dev->private;
- uint16_t cra_reg = S626_LP_CRA(chan);
- uint16_t crb_reg = S626_LP_CRB(chan);
+ u16 cra_reg = S626_LP_CRA(chan);
+ u16 crb_reg = S626_LP_CRB(chan);
if (chan < 3) {
/* Reset any pending counter overflow or index captures */
@@ -941,7 +941,7 @@ static void s626_set_int_src(struct comedi_device *dev,
s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A,
S626_SET_CRA_INTSRC_A(int_source));
} else {
- uint16_t crb;
+ u16 crb;
/* Cache writeable CRB register image */
crb = s626_debi_read(dev, crb_reg);
@@ -985,7 +985,7 @@ static void s626_pulse_index(struct comedi_device *dev,
unsigned int chan)
{
if (chan < 3) {
- uint16_t cra;
+ u16 cra;
cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -994,7 +994,7 @@ static void s626_pulse_index(struct comedi_device *dev,
(cra ^ S626_CRAMSK_INDXPOL_A));
s626_debi_write(dev, S626_LP_CRA(chan), cra);
} else {
- uint16_t crb;
+ u16 crb;
crb = s626_debi_read(dev, S626_LP_CRB(chan));
crb &= ~S626_CRBMSK_INTCTRL;
@@ -1062,7 +1062,7 @@ static int s626_dio_clear_irq(struct comedi_device *dev)
}
static void s626_handle_dio_interrupt(struct comedi_device *dev,
- uint16_t irqbit, uint8_t group)
+ u16 irqbit, u8 group)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -1110,8 +1110,8 @@ static void s626_handle_dio_interrupt(struct comedi_device *dev,
static void s626_check_dio_interrupts(struct comedi_device *dev)
{
- uint16_t irqbit;
- uint8_t group;
+ u16 irqbit;
+ u8 group;
for (group = 0; group < S626_DIO_BANKS; group++) {
/* read interrupt type */
@@ -1131,7 +1131,7 @@ static void s626_check_counter_interrupts(struct comedi_device *dev)
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- uint16_t irqbit;
+ u16 irqbit;
/* read interrupt type */
irqbit = s626_debi_read(dev, S626_LP_RDMISC2);
@@ -1196,7 +1196,7 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
* first uint16_t in the buffer because it contains junk data
* from the final ADC of the previous poll list scan.
*/
- uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
+ u32 *readaddr = (u32 *)devpriv->ana_buf.logical_base + 1;
int i;
/* get the data and hand it over to comedi */
@@ -1231,7 +1231,7 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
{
struct comedi_device *dev = d;
unsigned long flags;
- uint32_t irqtype, irqstatus;
+ u32 irqtype, irqstatus;
if (!dev->attached)
return IRQ_NONE;
@@ -1272,25 +1272,25 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
/*
* This function builds the RPS program for hardware driven acquisition.
*/
-static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
+static void s626_reset_adc(struct comedi_device *dev, u8 *ppl)
{
struct s626_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
- uint32_t *rps;
- uint32_t jmp_adrs;
- uint16_t i;
- uint16_t n;
- uint32_t local_ppl;
+ u32 *rps;
+ u32 jmp_adrs;
+ u16 i;
+ u16 n;
+ u32 local_ppl;
/* Stop RPS program in case it is currently running */
s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
/* Set starting logical address to write RPS commands. */
- rps = (uint32_t *)devpriv->rps_buf.logical_base;
+ rps = (u32 *)devpriv->rps_buf.logical_base;
/* Initialize RPS instruction pointer */
- writel((uint32_t)devpriv->rps_buf.physical_base,
+ writel((u32)devpriv->rps_buf.physical_base,
dev->mmio + S626_P_RPSADDR1);
/* Construct RPS program in rps_buf DMA buffer */
@@ -1372,8 +1372,8 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
* flushes the RPS' instruction prefetch pipeline.
*/
jmp_adrs =
- (uint32_t)devpriv->rps_buf.physical_base +
- (uint32_t)((unsigned long)rps -
+ (u32)devpriv->rps_buf.physical_base +
+ (u32)((unsigned long)rps -
(unsigned long)devpriv->
rps_buf.logical_base);
for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
@@ -1408,7 +1408,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
/* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
*rps++ = S626_RPS_STREG |
(S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
- *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+ *rps++ = (u32)devpriv->ana_buf.physical_base +
(devpriv->adc_items << 2);
/*
@@ -1452,7 +1452,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
/* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
*rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
- *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+ *rps++ = (u32)devpriv->ana_buf.physical_base +
(devpriv->adc_items << 2);
/* Indicate ADC scan loop is finished. */
@@ -1465,7 +1465,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
/* Restart RPS program at its beginning. */
*rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */
- *rps++ = (uint32_t)devpriv->rps_buf.physical_base;
+ *rps++ = (u32)devpriv->rps_buf.physical_base;
/* End of RPS program build */
}
@@ -1488,11 +1488,11 @@ static int s626_ai_insn_read(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- uint16_t chan = CR_CHAN(insn->chanspec);
- uint16_t range = CR_RANGE(insn->chanspec);
- uint16_t adc_spec = 0;
- uint32_t gpio_image;
- uint32_t tmp;
+ u16 chan = CR_CHAN(insn->chanspec);
+ u16 range = CR_RANGE(insn->chanspec);
+ u16 adc_spec = 0;
+ u32 gpio_image;
+ u32 tmp;
int ret;
int n;
@@ -1585,7 +1585,7 @@ static int s626_ai_insn_read(struct comedi_device *dev,
return n;
}
-static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd)
+static int s626_ai_load_polllist(u8 *ppl, struct comedi_cmd *cmd)
{
int n;
@@ -1651,7 +1651,7 @@ static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
static void s626_timer_load(struct comedi_device *dev,
unsigned int chan, int tick)
{
- uint16_t setup =
+ u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
@@ -1664,7 +1664,7 @@ static void s626_timer_load(struct comedi_device *dev,
S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
/* Enabled by index */
S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
- uint16_t value_latchsrc = S626_LATCHSRC_A_INDXA;
+ u16 value_latchsrc = S626_LATCHSRC_A_INDXA;
/* uint16_t enab = S626_CLKENAB_ALWAYS; */
s626_set_mode(dev, chan, setup, false);
@@ -1693,7 +1693,7 @@ static void s626_timer_load(struct comedi_device *dev,
static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct s626_private *devpriv = dev->private;
- uint8_t ppl[16];
+ u8 ppl[16];
struct comedi_cmd *cmd = &s->async->cmd;
int tick;
@@ -1953,7 +1953,7 @@ static int s626_ao_insn_write(struct comedi_device *dev,
static void s626_dio_init(struct comedi_device *dev)
{
- uint16_t group;
+ u16 group;
/* Prepare to treat writes to WRCapSel as capture disables. */
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
@@ -2017,7 +2017,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- uint16_t setup =
+ u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
@@ -2032,8 +2032,8 @@ static int s626_enc_insn_config(struct comedi_device *dev,
S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
/* uint16_t disable_int_src = true; */
/* uint32_t Preloadvalue; //Counter initial value */
- uint16_t value_latchsrc = S626_LATCHSRC_AB_READ;
- uint16_t enab = S626_CLKENAB_ALWAYS;
+ u16 value_latchsrc = S626_LATCHSRC_AB_READ;
+ u16 enab = S626_CLKENAB_ALWAYS;
/* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
@@ -2052,7 +2052,7 @@ static int s626_enc_insn_read(struct comedi_device *dev,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- uint16_t cntr_latch_reg = S626_LP_CNTR(chan);
+ u16 cntr_latch_reg = S626_LP_CNTR(chan);
int i;
for (i = 0; i < insn->n; i++) {
@@ -2090,7 +2090,7 @@ static int s626_enc_insn_write(struct comedi_device *dev,
return 1;
}
-static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
+static void s626_write_misc2(struct comedi_device *dev, u16 new_image)
{
s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE);
s626_debi_write(dev, S626_LP_WRMISC2, new_image);
@@ -2100,7 +2100,7 @@ static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
static void s626_counters_init(struct comedi_device *dev)
{
int chan;
- uint16_t setup =
+ u16 setup =
/* Preload upon index. */
S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
/* Disable hardware index. */
@@ -2169,7 +2169,7 @@ static int s626_initialize(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
dma_addr_t phys_buf;
- uint16_t chan;
+ u16 chan;
int i;
int ret;
@@ -2248,7 +2248,7 @@ static int s626_initialize(struct comedi_device *dev)
*/
/* Physical start of RPS program */
- writel((uint32_t)devpriv->rps_buf.physical_base,
+ writel((u32)devpriv->rps_buf.physical_base,
dev->mmio + S626_P_RPSADDR1);
/* RPS program performs no explicit mem writes */
writel(0, dev->mmio + S626_P_RPSPAGE1);
@@ -2318,16 +2318,16 @@ static int s626_initialize(struct comedi_device *dev)
* enabled.
*/
phys_buf = devpriv->ana_buf.physical_base +
- (S626_DAC_WDMABUF_OS * sizeof(uint32_t));
- writel((uint32_t)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
- writel((uint32_t)(phys_buf + sizeof(uint32_t)),
+ (S626_DAC_WDMABUF_OS * sizeof(u32));
+ writel((u32)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
+ writel((u32)(phys_buf + sizeof(u32)),
dev->mmio + S626_P_PROTA2_OUT);
/*
* Cache Audio2's output DMA buffer logical address. This is
* where DAC data is buffered for A2 output DMA transfers.
*/
- devpriv->dac_wbuf = (uint32_t *)devpriv->ana_buf.logical_base +
+ devpriv->dac_wbuf = (u32 *)devpriv->ana_buf.logical_base +
S626_DAC_WDMABUF_OS;
/*
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index d0a8a28edd36..55d43c076b1c 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -250,3 +250,15 @@ int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
return n;
}
EXPORT_SYMBOL_GPL(comedi_get_n_channels);
+
+static int __init kcomedilib_module_init(void)
+{
+ return 0;
+}
+
+static void __exit kcomedilib_module_exit(void)
+{
+}
+
+module_init(kcomedilib_module_init);
+module_exit(kcomedilib_module_exit);
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
index 995c874f40eb..40ff0d007695 100644
--- a/drivers/staging/dgnc/Makefile
+++ b/drivers/staging/dgnc/Makefile
@@ -2,5 +2,4 @@ obj-$(CONFIG_DGNC) += dgnc.o
dgnc-objs := dgnc_cls.o dgnc_driver.o\
dgnc_mgmt.o dgnc_neo.o\
- dgnc_tty.o dgnc_sysfs.o\
- dgnc_utils.o
+ dgnc_tty.o dgnc_utils.o
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index aedca66cbe41..c20ffdd254d8 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -385,9 +385,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_rxcount++;
}
- /*
- * Write new final heads to channel structure.
- */
+ /* Write new final heads to channel structure. */
+
ch->ch_r_head = head & RQUEUEMASK;
ch->ch_e_head = head & EQUEUEMASK;
@@ -666,9 +665,8 @@ static void cls_param(struct tty_struct *tty)
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
+ /* If baud rate is zero, flush queues, and set mval to drop DTR. */
+
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
@@ -887,9 +885,8 @@ static void cls_param(struct tty_struct *tty)
cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
}
-/*
- * Our board poller function.
- */
+/* Our board poller function. */
+
static void cls_tasklet(unsigned long data)
{
struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -914,9 +911,8 @@ static void cls_tasklet(unsigned long data)
*/
spin_lock_irqsave(&bd->bd_intr_lock, flags);
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
+ /* If board is ready, parse deeper to see if there is anything to do. */
+
if ((state == BOARD_READY) && (ports > 0)) {
/* Loop on each port */
for (i = 0; i < ports; i++) {
@@ -938,9 +934,8 @@ static void cls_tasklet(unsigned long data)
cls_copy_data_from_queue_to_uart(ch);
dgnc_wakeup_writes(ch);
- /*
- * Check carrier function.
- */
+ /* Check carrier function. */
+
dgnc_carrier(ch);
/*
@@ -992,9 +987,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
for (i = 0; i < brd->nasync; i++)
cls_parse_isr(brd, i);
- /*
- * Schedule tasklet to more in-depth servicing at a better time.
- */
+ /* Schedule tasklet to more in-depth servicing at a better time. */
+
tasklet_schedule(&brd->helper_tasklet);
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1043,9 +1037,7 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
un->un_flags |= UN_EMPTY;
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /*
- * NOTE: Do something with time passed in.
- */
+ /* NOTE: Do something with time passed in. */
/* If ret is non-zero, user ctrl-c'ed us */
@@ -1112,9 +1104,8 @@ static void cls_uart_init(struct channel_t *ch)
readb(&ch->ch_cls_uart->msr);
}
-/*
- * Turns off UART.
- */
+/* Turns off UART. */
+
static void cls_uart_off(struct channel_t *ch)
{
writeb(0, &ch->ch_cls_uart->ier);
@@ -1160,9 +1151,8 @@ static void cls_send_break(struct channel_t *ch, int msecs)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- /*
- * If we receive a time of 0, this means turn off the break.
- */
+ /* If we receive a time of 0, this means turn off the break. */
+
if (msecs == 0) {
/* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
index 2597e36d38c4..463ad30efb3b 100644
--- a/drivers/staging/dgnc/dgnc_cls.h
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -69,7 +69,7 @@ struct cls_uart_struct {
#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
-#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */
#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */
#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index fd372d3afa46..5381dbddd8bb 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -24,31 +24,14 @@
#include "dgnc_tty.h"
#include "dgnc_cls.h"
#include "dgnc_neo.h"
-#include "dgnc_sysfs.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Digi International, http://www.digi.com");
MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
MODULE_SUPPORTED_DEVICE("dgnc");
-/**************************************************************************
- *
- * protos for this file
- *
- */
-static int dgnc_start(void);
-static int dgnc_request_irq(struct dgnc_board *brd);
-static void dgnc_free_irq(struct dgnc_board *brd);
-static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id);
-static void dgnc_cleanup_board(struct dgnc_board *brd);
-static void dgnc_poll_handler(ulong dummy);
-static int dgnc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static int dgnc_do_remap(struct dgnc_board *brd);
+/* File operations permitted on Control/Management major. */
-/*
- * File operations permitted on Control/Management major.
- */
static const struct file_operations dgnc_board_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
@@ -56,9 +39,8 @@ static const struct file_operations dgnc_board_fops = {
.release = dgnc_mgmt_close
};
-/*
- * Globals
- */
+/* Globals */
+
uint dgnc_num_boards;
struct dgnc_board *dgnc_board[MAXBOARDS];
DEFINE_SPINLOCK(dgnc_global_lock);
@@ -66,14 +48,12 @@ DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
uint dgnc_major;
int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
-/*
- * Static vars.
- */
+/* Static vars. */
+
static struct class *dgnc_class;
-/*
- * Poller stuff
- */
+/* Poller stuff */
+
static ulong dgnc_poll_time; /* Time of next poll */
static uint dgnc_poll_stop; /* Used to tell poller to stop */
static struct timer_list dgnc_poll_timer;
@@ -93,7 +73,7 @@ struct board_id {
unsigned int is_pci_express;
};
-static struct board_id dgnc_ids[] = {
+static const struct board_id dgnc_ids[] = {
{ PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
@@ -114,274 +94,20 @@ static struct board_id dgnc_ids[] = {
{ NULL, 0, 0 }
};
-static struct pci_driver dgnc_driver = {
- .name = "dgnc",
- .probe = dgnc_init_one,
- .id_table = dgnc_pci_tbl,
-};
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-static void cleanup(bool sysfiles)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- dgnc_poll_stop = 1;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- /* Turn off poller right away. */
- del_timer_sync(&dgnc_poll_timer);
-
- if (sysfiles)
- dgnc_remove_driver_sysfiles(&dgnc_driver);
-
- device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
- class_destroy(dgnc_class);
- unregister_chrdev(dgnc_major, "dgnc");
-
- for (i = 0; i < dgnc_num_boards; ++i) {
- dgnc_remove_ports_sysfiles(dgnc_board[i]);
- dgnc_cleanup_tty(dgnc_board[i]);
- dgnc_cleanup_board(dgnc_board[i]);
- }
-
- dgnc_tty_post_uninit();
-}
-
-/*
- * dgnc_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-static void __exit dgnc_cleanup_module(void)
-{
- cleanup(true);
- pci_unregister_driver(&dgnc_driver);
-}
-
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
-static int __init dgnc_init_module(void)
-{
- int rc;
-
- /*
- * Initialize global stuff
- */
- rc = dgnc_start();
-
- if (rc < 0)
- return rc;
-
- /*
- * Find and configure all the cards
- */
- rc = pci_register_driver(&dgnc_driver);
- if (rc) {
- pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
- cleanup(false);
- return rc;
- }
- dgnc_create_driver_sysfiles(&dgnc_driver);
-
- return 0;
-}
-
-module_init(dgnc_init_module);
-module_exit(dgnc_cleanup_module);
+/* Remap PCI memory. */
-/*
- * Start of driver.
- */
-static int dgnc_start(void)
+static int dgnc_do_remap(struct dgnc_board *brd)
{
int rc = 0;
- unsigned long flags;
- struct device *dev;
-
- /* make sure timer is initialized before we do anything else */
- init_timer(&dgnc_poll_timer);
-
- /*
- * Register our base character device into the kernel.
- * This allows the download daemon to connect to the downld device
- * before any of the boards are init'ed.
- *
- * Register management/dpa devices
- */
- rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
- return rc;
- }
- dgnc_major = rc;
-
- dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
- if (IS_ERR(dgnc_class)) {
- rc = PTR_ERR(dgnc_class);
- pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
- goto failed_class;
- }
-
- dev = device_create(dgnc_class, NULL,
- MKDEV(dgnc_major, 0),
- NULL, "dgnc_mgmt");
- if (IS_ERR(dev)) {
- rc = PTR_ERR(dev);
- pr_err(DRVSTR ": Can't create device (%d)\n", rc);
- goto failed_device;
- }
-
- /*
- * Init any global tty stuff.
- */
- rc = dgnc_tty_preinit();
-
- if (rc < 0) {
- pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
- goto failed_tty;
- }
-
- /* Start the poller */
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
- dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
- dgnc_poll_timer.expires = dgnc_poll_time;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- add_timer(&dgnc_poll_timer);
-
- return 0;
-
-failed_tty:
- device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
-failed_device:
- class_destroy(dgnc_class);
-failed_class:
- unregister_chrdev(dgnc_major, "dgnc");
- return rc;
-}
-
-/* returns count (>= 0), or negative on error */
-static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
- struct dgnc_board *brd;
-
- /* wake up and enable device */
- rc = pci_enable_device(pdev);
-
- if (rc)
- return -EIO;
-
- brd = dgnc_found_board(pdev, ent->driver_data);
- if (IS_ERR(brd))
- return PTR_ERR(brd);
-
- /*
- * Do tty device initialization.
- */
-
- rc = dgnc_tty_register(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
- goto failed;
- }
-
- rc = dgnc_request_irq(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
- goto unregister_tty;
- }
-
- rc = dgnc_tty_init(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
- goto free_irq;
- }
-
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
- dgnc_create_ports_sysfiles(brd);
-
- dgnc_board[dgnc_num_boards++] = brd;
-
- return 0;
-
-free_irq:
- dgnc_free_irq(brd);
-unregister_tty:
- dgnc_tty_unregister(brd);
-
-failed:
- kfree(brd);
+ brd->re_map_membase = ioremap(brd->membase, 0x1000);
+ if (!brd->re_map_membase)
+ rc = -ENOMEM;
return rc;
}
/*
- * dgnc_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgnc_cleanup_board(struct dgnc_board *brd)
-{
- int i = 0;
-
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- switch (brd->device) {
- case PCI_DEVICE_CLASSIC_4_DID:
- case PCI_DEVICE_CLASSIC_8_DID:
- case PCI_DEVICE_CLASSIC_4_422_DID:
- case PCI_DEVICE_CLASSIC_8_422_DID:
-
- /* Tell card not to interrupt anymore. */
- outb(0, brd->iobase + 0x4c);
- break;
-
- default:
- break;
- }
-
- if (brd->irq)
- free_irq(brd->irq, brd);
-
- tasklet_kill(&brd->helper_tasklet);
-
- if (brd->re_map_membase) {
- iounmap(brd->re_map_membase);
- brd->re_map_membase = NULL;
- }
-
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++) {
- if (brd->channels[i]) {
- kfree(brd->channels[i]->ch_rqueue);
- kfree(brd->channels[i]->ch_equeue);
- kfree(brd->channels[i]->ch_wqueue);
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- }
-
- dgnc_board[brd->boardnum] = NULL;
-
- kfree(brd);
-}
-
-/*
* dgnc_found_board()
*
* A board has been found, init it.
@@ -587,21 +313,6 @@ static void dgnc_free_irq(struct dgnc_board *brd)
}
/*
- * Remap PCI memory.
- */
-static int dgnc_do_remap(struct dgnc_board *brd)
-{
- int rc = 0;
-
- brd->re_map_membase = ioremap(brd->membase, 0x1000);
- if (!brd->re_map_membase)
- rc = -ENOMEM;
-
- return rc;
-}
-
-/*
- *
* Function:
*
* dgnc_poll_handler
@@ -623,7 +334,6 @@ static int dgnc_do_remap(struct dgnc_board *brd)
* As each timer expires, it determines (a) whether the "transmit"
* waiter needs to be woken up, and (b) whether the poller needs to
* be rescheduled.
- *
*/
static void dgnc_poll_handler(ulong dummy)
@@ -651,9 +361,8 @@ static void dgnc_poll_handler(ulong dummy)
spin_unlock_irqrestore(&brd->bd_lock, flags);
}
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
+ /* Schedule ourself back at the nominal wakeup interval. */
+
spin_lock_irqsave(&dgnc_poll_lock, flags);
dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
@@ -669,3 +378,240 @@ static void dgnc_poll_handler(ulong dummy)
if (!dgnc_poll_stop)
add_timer(&dgnc_poll_timer);
}
+
+/* returns count (>= 0), or negative on error */
+static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+ struct dgnc_board *brd;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc)
+ return -EIO;
+
+ brd = dgnc_found_board(pdev, ent->driver_data);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
+
+ /* Do tty device initialization. */
+
+ rc = dgnc_tty_register(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
+ goto failed;
+ }
+
+ rc = dgnc_request_irq(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
+ goto unregister_tty;
+ }
+
+ rc = dgnc_tty_init(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
+ goto free_irq;
+ }
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ dgnc_board[dgnc_num_boards++] = brd;
+
+ return 0;
+
+free_irq:
+ dgnc_free_irq(brd);
+unregister_tty:
+ dgnc_tty_unregister(brd);
+
+failed:
+ kfree(brd);
+
+ return rc;
+}
+
+static struct pci_driver dgnc_driver = {
+ .name = "dgnc",
+ .probe = dgnc_init_one,
+ .id_table = dgnc_pci_tbl,
+};
+
+/* Start of driver. */
+
+static int dgnc_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct device *dev;
+
+ /* make sure timer is initialized before we do anything else */
+ init_timer(&dgnc_poll_timer);
+
+ /*
+ * Register our base character device into the kernel.
+ * This allows the download daemon to connect to the downld device
+ * before any of the boards are init'ed.
+ *
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
+ return rc;
+ }
+ dgnc_major = rc;
+
+ dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
+ if (IS_ERR(dgnc_class)) {
+ rc = PTR_ERR(dgnc_class);
+ pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
+ goto failed_class;
+ }
+
+ dev = device_create(dgnc_class, NULL,
+ MKDEV(dgnc_major, 0),
+ NULL, "dgnc_mgmt");
+ if (IS_ERR(dev)) {
+ rc = PTR_ERR(dev);
+ pr_err(DRVSTR ": Can't create device (%d)\n", rc);
+ goto failed_device;
+ }
+
+ /* Start the poller */
+ spin_lock_irqsave(&dgnc_poll_lock, flags);
+ setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
+ dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
+ dgnc_poll_timer.expires = dgnc_poll_time;
+ spin_unlock_irqrestore(&dgnc_poll_lock, flags);
+
+ add_timer(&dgnc_poll_timer);
+
+ return 0;
+
+failed_device:
+ class_destroy(dgnc_class);
+failed_class:
+ unregister_chrdev(dgnc_major, "dgnc");
+ return rc;
+}
+
+/*
+ * dgnc_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgnc_cleanup_board(struct dgnc_board *brd)
+{
+ int i = 0;
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ switch (brd->device) {
+ case PCI_DEVICE_CLASSIC_4_DID:
+ case PCI_DEVICE_CLASSIC_8_DID:
+ case PCI_DEVICE_CLASSIC_4_422_DID:
+ case PCI_DEVICE_CLASSIC_8_422_DID:
+
+ /* Tell card not to interrupt anymore. */
+ outb(0, brd->iobase + 0x4c);
+ break;
+
+ default:
+ break;
+ }
+
+ if (brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_membase) {
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++) {
+ if (brd->channels[i]) {
+ kfree(brd->channels[i]->ch_rqueue);
+ kfree(brd->channels[i]->ch_equeue);
+ kfree(brd->channels[i]->ch_wqueue);
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ }
+
+ dgnc_board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+/* Driver load/unload functions */
+
+static void cleanup(void)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dgnc_poll_lock, flags);
+ dgnc_poll_stop = 1;
+ spin_unlock_irqrestore(&dgnc_poll_lock, flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync(&dgnc_poll_timer);
+
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
+ class_destroy(dgnc_class);
+ unregister_chrdev(dgnc_major, "dgnc");
+
+ for (i = 0; i < dgnc_num_boards; ++i) {
+ dgnc_cleanup_tty(dgnc_board[i]);
+ dgnc_cleanup_board(dgnc_board[i]);
+ }
+}
+
+/*
+ * dgnc_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+static void __exit dgnc_cleanup_module(void)
+{
+ cleanup();
+ pci_unregister_driver(&dgnc_driver);
+}
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+static int __init dgnc_init_module(void)
+{
+ int rc;
+
+ /* Initialize global stuff */
+
+ rc = dgnc_start();
+
+ if (rc < 0)
+ return rc;
+
+ /* Find and configure all the cards */
+
+ rc = pci_register_driver(&dgnc_driver);
+ if (rc) {
+ pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
+ cleanup();
+ return rc;
+ }
+
+ return 0;
+}
+
+module_init(dgnc_init_module);
+module_exit(dgnc_cleanup_module);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 879202663a98..c8119f2fe881 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -12,11 +12,8 @@
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
- *************************************************************************
- *
* Driver includes
- *
- *************************************************************************/
+ */
#ifndef __DGNC_DRIVER_H
#define __DGNC_DRIVER_H
@@ -26,19 +23,14 @@
#include <linux/interrupt.h>
#include "digi.h" /* Digi specific ioctl header */
-#include "dgnc_sysfs.h" /* Support for SYSFS */
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
+/* Driver defines */
-/* Driver identification and error statments */
-#define PROCSTR "dgnc" /* /proc entries */
-#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
-#define DRVSTR "dgnc" /* Driver name string */
-#define DG_PART "40002369_F" /* RPM part number */
+/* Driver identification and error statements */
+#define PROCSTR "dgnc" /* /proc entries */
+#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
+#define DRVSTR "dgnc" /* Driver name string */
+#define DG_PART "40002369_F" /* RPM part number */
#define TRC_TO_CONSOLE 1
@@ -61,7 +53,8 @@
#define PORT_NUM(dev) ((dev) & 0x7f)
#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80)
-/* MAX number of stop characters we will send
+/*
+ *MAX number of stop characters we will send
* when our read queue is getting full
*/
#define MAX_STOPS_SENT 5
@@ -88,35 +81,28 @@
#define _POSIX_VDISABLE '\0'
#endif
-/*
- * All the possible states the driver can be while being loaded.
- */
+/* All the possible states the driver can be while being loaded. */
+
enum {
DRIVER_INITIALIZED = 0,
DRIVER_READY
};
-/*
- * All the possible states the board can be while booting up.
- */
+/* All the possible states the board can be while booting up. */
+
enum {
BOARD_FAILED = 0,
BOARD_FOUND,
BOARD_READY
};
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
+/* Structures and closely related defines. */
struct dgnc_board;
struct channel_t;
-/************************************************************************
- * Per board operations structure *
- ************************************************************************/
+/* Per board operations structure */
+
struct board_ops {
void (*tasklet)(unsigned long data);
irqreturn_t (*intr)(int irq, void *voidbrd);
@@ -138,16 +124,14 @@ struct board_ops {
void (*send_immediate_char)(struct channel_t *ch, unsigned char);
};
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
+/* Device flag definitions for bd_flags. */
+
#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
-/*
- * Per-board information
- */
+/* Per-board information */
+
struct dgnc_board {
- int magic; /* Board Magic number. */
+ int magic; /* Board Magic number. */
int boardnum; /* Board number: 0-32 */
int type; /* Type of board */
@@ -220,62 +204,56 @@ struct dgnc_board {
};
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN 0x0001 /* Device is open */
-#define UN_CLOSING 0x0002 /* Line is being closed */
-#define UN_IMM 0x0004 /* Service immediately */
-#define UN_BUSY 0x0008 /* Some work this channel */
-#define UN_BREAKI 0x0010 /* Input break received */
+/* Unit flag definitions for un_flags. */
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
-#define UN_TIME 0x0040 /* Waiting on time */
-#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
#define UN_LOW 0x0100 /* Waiting output low water mark*/
-#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
-#define UN_WOPEN 0x0400 /* Device waiting for open */
-#define UN_WIOCTL 0x0800 /* Device waiting for open */
-#define UN_HANGUP 0x8000 /* Carrier lost */
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
struct device;
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
+/* Structure for terminal or printer unit. */
struct un_t {
- int magic; /* Unit Magic Number. */
+ int magic; /* Unit Magic Number. */
struct channel_t *un_ch;
ulong un_time;
uint un_type;
- uint un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty;/* Pointer to unit tty structure */
- uint un_flags; /* Unit flags */
+ uint un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty; /* Pointer to unit tty structure */
+ uint un_flags; /* Unit flags */
wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- uint un_dev; /* Minor device number */
+ uint un_dev; /* Minor device number */
struct device *un_sysfs;
};
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON 0x0001 /* Printer on string */
-#define CH_STOP 0x0002 /* Output is stopped */
-#define CH_STOPI 0x0004 /* Input is stopped */
-#define CH_CD 0x0008 /* Carrier is present */
-#define CH_FCAR 0x0010 /* Carrier forced on */
-#define CH_HANGUP 0x0020 /* Hangup received */
-
-#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
-#define CH_OPENING 0x0080 /* Port in fragile open state */
-#define CH_CLOSING 0x0100 /* Port in fragile close state */
-#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
-#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
-#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
-#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
-#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
+/* Device flag definitions for ch_flags. */
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_STOP 0x0002 /* Output is stopped */
+#define CH_STOPI 0x0004 /* Input is stopped */
+#define CH_CD 0x0008 /* Carrier is present */
+#define CH_FCAR 0x0010 /* Carrier forced on */
+#define CH_HANGUP 0x0020 /* Hangup received */
+
+#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
+#define CH_OPENING 0x0080 /* Port in fragile open state */
+#define CH_CLOSING 0x0100 /* Port in fragile close state */
+#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
+#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
+#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
+#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
+#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
-#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
-#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
+#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
+#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
/* Our Read/Error/Write queue sizes */
#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
@@ -285,43 +263,41 @@ struct un_t {
#define EQUEUESIZE RQUEUESIZE
#define WQUEUESIZE (WQUEUEMASK + 1)
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
+/* Channel information structure. */
struct channel_t {
- int magic; /* Channel Magic Number */
- struct dgnc_board *ch_bd; /* Board structure pointer */
+ int magic; /* Channel Magic Number */
+ struct dgnc_board *ch_bd; /* Board structure pointer */
struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
spinlock_t ch_lock; /* provide for serialization */
wait_queue_head_t ch_flags_wait;
- uint ch_portnum; /* Port number, 0 offset. */
- uint ch_open_count; /* open count */
- uint ch_flags; /* Channel flags */
+ uint ch_portnum; /* Port number, 0 offset. */
+ uint ch_open_count; /* open count */
+ uint ch_flags; /* Channel flags */
ulong ch_close_delay; /* How long we should
* drop RTS/DTR for
*/
- ulong ch_cpstime; /* Time for CPS calculations */
+ ulong ch_cpstime; /* Time for CPS calculations */
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
- unsigned char ch_stopc; /* Stop character */
- unsigned char ch_startc; /* Start character */
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+ unsigned char ch_stopc; /* Stop character */
+ unsigned char ch_startc; /* Start character */
uint ch_old_baud; /* Cache of the current baud */
uint ch_custom_speed;/* Custom baud, if set */
uint ch_wopen; /* Waiting for open process cnt */
- unsigned char ch_mostat; /* FEP output modem status */
- unsigned char ch_mistat; /* FEP input modem status */
+ unsigned char ch_mostat; /* FEP output modem status */
+ unsigned char ch_mistat; /* FEP input modem status */
struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the
* "mapped" UART struct
@@ -347,10 +323,10 @@ struct channel_t {
ulong ch_rxcount; /* total of data received so far */
ulong ch_txcount; /* total of data transmitted so far */
- unsigned char ch_r_tlevel; /* Receive Trigger level */
- unsigned char ch_t_tlevel; /* Transmit Trigger level */
+ unsigned char ch_r_tlevel; /* Receive Trigger level */
+ unsigned char ch_t_tlevel; /* Transmit Trigger level */
- unsigned char ch_r_watermark; /* Receive Watermark */
+ unsigned char ch_r_watermark; /* Receive Watermark */
ulong ch_stop_sending_break; /* Time we should STOP
* sending a break
@@ -374,16 +350,15 @@ struct channel_t {
};
-/*
- * Our Global Variables.
- */
+/* Our Global Variables. */
+
extern uint dgnc_major; /* Our driver/mgmt major */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
extern uint dgnc_num_boards; /* Total number of boards */
-extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board
- * structs
- */
+extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of board
+ * structs
+ */
#endif
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 683c098391d9..9d9b15d6358a 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -13,13 +13,11 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-/************************************************************************
- *
+/*
* This file implements the mgmt functionality for the
* Neo and ClassicBoard based product lines.
- *
- ************************************************************************
*/
+
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/sched.h> /* For jiffies, task states */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 5becb3741b67..3eefefe53174 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -107,7 +107,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
/* Turn off auto Xon flow control */
efr &= ~UART_17158_EFR_IXON;
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -145,7 +146,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
ier &= ~UART_17158_IER_XOFF;
efr &= ~UART_17158_EFR_IXOFF;
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -185,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
/* Turn on auto Xon flow control */
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -225,7 +228,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
ier |= UART_17158_IER_XOFF;
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -268,7 +272,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
else
efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
- /* Why? Because Exar's spec says we have to zero
+ /*
+ * Why? Because Exar's spec says we have to zero
* it out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -308,7 +313,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
else
efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
- /* Why? Because Exar's spec says we have to zero it
+ /*
+ * Why? Because Exar's spec says we have to zero it
* out before setting it
*/
writeb(0, &ch->ch_neo_uart->efr);
@@ -351,9 +357,8 @@ static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
neo_pci_posting_flush(ch->ch_bd);
}
-/*
- * No locks are assumed to be held when calling this function.
- */
+/* No locks are assumed to be held when calling this function. */
+
static inline void neo_clear_break(struct channel_t *ch, int force)
{
unsigned long flags;
@@ -381,9 +386,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-/*
- * Parse the ISR register.
- */
+/* Parse the ISR register. */
+
static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
{
struct channel_t *ch;
@@ -412,8 +416,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
/* Read data from uart -> queue */
neo_copy_data_from_uart_to_queue(ch);
-
- /* Call our tty layer to enforce queue
+ /*
+ * Call our tty layer to enforce queue
* flow control if needed.
*/
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -438,7 +442,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
* one it was, so we can suspend or resume data flow.
*/
if (cause == UART_17158_XON_DETECT) {
- /* Is output stopped right now, if so,
+ /*
+ * Is output stopped right now, if so,
* resume it
*/
if (brd->channels[port]->ch_flags & CH_STOP) {
@@ -609,9 +614,8 @@ static void neo_param(struct tty_struct *tty)
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
+ /* If baud rate is zero, flush queues, and set mval to drop DTR. */
+
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
@@ -672,7 +676,8 @@ static void neo_param(struct tty_struct *tty)
4800, 9600, 19200, 38400 }
};
- /* Only use the TXPrint baud rate if the terminal unit
+ /*
+ * Only use the TXPrint baud rate if the terminal unit
* is NOT open
*/
if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
@@ -797,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
- /* If start/stop is set to disable, then we should
+ /*
+ * If start/stop is set to disable, then we should
* disable flow control
*/
if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -812,7 +818,8 @@ static void neo_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
- /* If start/stop is set to disable, then we should
+ /*
+ * If start/stop is set to disable, then we should
* disable flow control
*/
if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -840,9 +847,8 @@ static void neo_param(struct tty_struct *tty)
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
}
-/*
- * Our board poller function.
- */
+/* Our board poller function. */
+
static void neo_tasklet(unsigned long data)
{
struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -867,9 +873,8 @@ static void neo_tasklet(unsigned long data)
*/
spin_lock_irqsave(&bd->bd_intr_lock, flags);
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
+ /* If board is ready, parse deeper to see if there is anything to do. */
+
if ((state == BOARD_READY) && (ports > 0)) {
/* Loop on each port */
for (i = 0; i < ports; i++) {
@@ -997,9 +1002,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
break;
case UART_17158_RX_LINE_STATUS:
- /*
- * RXRDY and RX LINE Status (logic OR of LSR[4:1])
- */
+
+ /* RXRDY and RX LINE Status (logic OR of LSR[4:1]) */
+
neo_parse_lsr(brd, port);
break;
@@ -1022,9 +1027,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
break;
case UART_17158_MSR:
- /*
- * MSR or flow control was seen.
- */
+
+ /* MSR or flow control was seen. */
+
neo_parse_isr(brd, port);
break;
@@ -1041,9 +1046,8 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
port++;
}
- /*
- * Schedule tasklet to more in-depth servicing at a better time.
- */
+ /* Schedule tasklet to more in-depth servicing at a better time. */
+
tasklet_schedule(&brd->helper_tasklet);
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1238,9 +1242,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
}
- /*
- * Discard character if we are ignoring the error mask.
- */
+ /* Discard character if we are ignoring the error mask. */
+
if (linestatus & error_mask) {
unsigned char discard;
@@ -1279,9 +1282,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_rxcount++;
}
- /*
- * Write new final heads to channel structure.
- */
+ /* Write new final heads to channel structure. */
+
ch->ch_r_head = head & RQUEUEMASK;
ch->ch_e_head = head & EQUEUEMASK;
@@ -1412,9 +1414,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
(ch->ch_flags & CH_BREAK_SENDING))
goto exit_unlock;
- /*
- * If FIFOs are disabled. Send data directly to txrx register
- */
+ /* If FIFOs are disabled. Send data directly to txrx register */
+
if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr);
@@ -1458,9 +1459,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
goto exit_unlock;
}
- /*
- * We have to do it this way, because of the EXAR TXFIFO count bug.
- */
+ /* We have to do it this way, because of the EXAR TXFIFO count bug. */
+
if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
goto exit_unlock;
@@ -1645,9 +1645,8 @@ static void neo_send_stop_character(struct channel_t *ch)
}
}
-/*
- * neo_uart_init
- */
+/* neo_uart_init */
+
static void neo_uart_init(struct channel_t *ch)
{
writeb(0, &ch->ch_neo_uart->ier);
@@ -1668,9 +1667,8 @@ static void neo_uart_init(struct channel_t *ch)
neo_pci_posting_flush(ch->ch_bd);
}
-/*
- * Make the UART completely turn off.
- */
+/* Make the UART completely turn off. */
+
static void neo_uart_off(struct channel_t *ch)
{
/* Turn off UART enhanced bits */
@@ -1705,9 +1703,8 @@ static uint neo_get_uart_bytes_left(struct channel_t *ch)
/* Channel lock MUST be held by the calling function! */
static void neo_send_break(struct channel_t *ch, int msecs)
{
- /*
- * If we receive a time of 0, this means turn off the break.
- */
+ /* If we receive a time of 0, this means turn off the break. */
+
if (msecs == 0) {
if (ch->ch_flags & CH_BREAK_SENDING) {
unsigned char temp = readb(&ch->ch_neo_uart->lcr);
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
index abddd48353d0..77ecd9baae45 100644
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -18,37 +18,38 @@
#include "dgnc_driver.h"
-/************************************************************************
- * Per channel/port NEO UART structure *
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * U = Unused. *
- ************************************************************************/
+/*
+ * Per channel/port NEO UART structure
+ * Base Structure Entries Usage Meanings to Host
+ *
+ * W = read write R = read only
+ * U = Unused.
+ */
struct neo_uart_struct {
- u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 txrx; /* WR RHR/THR - Holding Reg */
u8 ier; /* WR IER - Interrupt Enable Reg */
- u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo
+ * Control Reg
+ */
u8 lcr; /* WR LCR - Line Control Reg */
u8 mcr; /* WR MCR - Modem Control Reg */
u8 lsr; /* WR LSR - Line Status Reg */
u8 msr; /* WR MSR - Modem Status Reg */
u8 spr; /* WR SPR - Scratch Pad Reg */
- u8 fctr; /* WR FCTR - Feature Control Reg */
+ u8 fctr; /* WR FCTR - Feature Control Reg */
u8 efr; /* WR EFR - Enhanced Function Reg */
- u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
- u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
+ u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
+ u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
- u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
+ u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
- u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
+ u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
};
/* Where to read the extended interrupt register (32bits instead of 8bits) */
@@ -108,7 +109,9 @@ struct neo_uart_struct {
/* 17158 Extended IIR's */
#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
-#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */
+#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR
+ * state change
+ */
#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
/*
@@ -119,8 +122,12 @@ struct neo_uart_struct {
#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
#define UART_17158_TXRDY 0x3 /* TX Ready */
#define UART_17158_MSR 0x4 /* Modem State Change */
-#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */
-#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */
+#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding
+ * Reg Empty
+ */
+#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO
+ * Data error
+ */
/*
* These are the EXTENDED definitions for the 17C158's Interrupt
@@ -130,19 +137,22 @@ struct neo_uart_struct {
#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
-#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */
-#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
-#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an
+ * incoming XOFF char
+ */
+#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an
+ * incoming XON char
+ */
#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
-/*
- * Our Global Variables
- */
+/* Our Global Variables */
+
extern struct board_ops dgnc_neo_ops;
#endif
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
deleted file mode 100644
index 290bf6e226ac..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/serial_reg.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-
-#include "dgnc_driver.h"
-#include "dgnc_mgmt.h"
-
-static ssize_t version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR_RO(version);
-
-static ssize_t boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
-}
-static DRIVER_ATTR_RO(boards);
-
-static ssize_t maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR_RO(maxboards);
-
-static ssize_t pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
-}
-
-static ssize_t pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- unsigned long flags;
- int tick;
- int ret;
-
- ret = sscanf(buf, "%d\n", &tick);
- if (ret != 1)
- return -EINVAL;
-
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- dgnc_poll_tick = tick;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- return count;
-}
-static DRIVER_ATTR_RW(pollrate);
-
-void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgnc_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- if (rc)
- pr_err("DGNC: sysfs driver_create_file failed!\n");
-}
-
-void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
- struct device_driver *driverfs = &dgnc_driver->driver;
-
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_pollrate);
-}
-
-#define DGNC_VERIFY_BOARD(p, bd) \
- do { \
- if (!p) \
- return 0; \
- \
- bd = dev_get_drvdata(p); \
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) \
- return 0; \
- if (bd->state != BOARD_READY) \
- return 0; \
- } while (0)
-
-static ssize_t vpd_show(struct device *p, struct device_attribute *attr,
- char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- count += sprintf(buf + count,
- "\n 0 1 2 3 4 5 6 7 8 9 A B C D E F");
- for (i = 0; i < 0x40 * 2; i++) {
- if (!(i % 16))
- count += sprintf(buf + count, "\n%04X ", i * 2);
- count += sprintf(buf + count, "%02X ", bd->vpd[i]);
- }
- count += sprintf(buf + count, "\n");
-
- return count;
-}
-static DEVICE_ATTR_RO(vpd);
-
-static ssize_t serial_number_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- if (bd->serial_num[0] == '\0')
- count += sprintf(buf + count, "<UNKNOWN>\n");
- else
- count += sprintf(buf + count, "%s\n", bd->serial_num);
-
- return count;
-}
-static DEVICE_ATTR_RO(serial_number);
-
-static ssize_t ports_state_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_open_count ? "Open" : "Closed");
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_state);
-
-static ssize_t ports_baud_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %d\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_old_baud);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_baud);
-
-static ssize_t ports_msignals_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- struct channel_t *ch = bd->channels[i];
-
- if (ch->ch_open_count) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s %s %s %s %s %s\n",
- ch->ch_portnum,
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- } else {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", ch->ch_portnum);
- }
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_msignals);
-
-static ssize_t ports_iflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_iflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_iflag);
-
-static ssize_t ports_cflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_cflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_cflag);
-
-static ssize_t ports_oflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_oflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_oflag);
-
-static ssize_t ports_lflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_lflag);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_lflag);
-
-static ssize_t ports_digi_flag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_digi.digi_flags);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_digi_flag);
-
-static ssize_t ports_rxcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_rxcount);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_rxcount);
-
-static ssize_t ports_txcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_txcount);
- }
- return count;
-}
-static DEVICE_ATTR_RO(ports_txcount);
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd)
-{
- int rc = 0;
-
- dev_set_drvdata(&bd->pdev->dev, bd);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_state);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_baud);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_vpd);
- rc |= device_create_file(&bd->pdev->dev, &dev_attr_serial_number);
- if (rc)
- dev_err(&bd->pdev->dev, "dgnc: sysfs device_create_file failed!\n");
-}
-
-/* removes all the sys files created for that port */
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd)
-{
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_state);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount);
- device_remove_file(&bd->pdev->dev, &dev_attr_vpd);
- device_remove_file(&bd->pdev->dev, &dev_attr_serial_number);
-}
-
-static ssize_t tty_state_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s",
- un->un_open_count ? "Open" : "Closed");
-}
-static DEVICE_ATTR_RO(tty_state);
-
-static ssize_t tty_baud_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
-}
-static DEVICE_ATTR_RO(tty_baud);
-
-static ssize_t tty_msignals_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- if (ch->ch_open_count) {
- return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- }
- return 0;
-}
-static DEVICE_ATTR_RO(tty_msignals);
-
-static ssize_t tty_iflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR_RO(tty_iflag);
-
-static ssize_t tty_cflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR_RO(tty_cflag);
-
-static ssize_t tty_oflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR_RO(tty_oflag);
-
-static ssize_t tty_lflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR_RO(tty_lflag);
-
-static ssize_t tty_digi_flag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR_RO(tty_digi_flag);
-
-static ssize_t tty_rxcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR_RO(tty_rxcount);
-
-static ssize_t tty_txcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR_RO(tty_txcount);
-
-static ssize_t tty_custom_name_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
- (un->un_type == DGNC_PRINT) ? "pr" : "tty",
- bd->boardnum + 1, 'a' + ch->ch_portnum);
-}
-static DEVICE_ATTR_RO(tty_custom_name);
-
-static struct attribute *dgnc_sysfs_tty_entries[] = {
- &dev_attr_tty_state.attr,
- &dev_attr_tty_baud.attr,
- &dev_attr_tty_msignals.attr,
- &dev_attr_tty_iflag.attr,
- &dev_attr_tty_cflag.attr,
- &dev_attr_tty_oflag.attr,
- &dev_attr_tty_lflag.attr,
- &dev_attr_tty_digi_flag.attr,
- &dev_attr_tty_rxcount.attr,
- &dev_attr_tty_txcount.attr,
- &dev_attr_tty_custom_name.attr,
- NULL
-};
-
-static const struct attribute_group dgnc_tty_attribute_group = {
- .name = NULL,
- .attrs = dgnc_sysfs_tty_entries,
-};
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c)
-{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group);
- if (ret) {
- dev_err(c, "dgnc: failed to create sysfs tty device attributes.\n");
- sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
- return;
- }
-
- dev_set_drvdata(c, un);
-}
-
-void dgnc_remove_tty_sysfs(struct device *c)
-{
- sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
-}
-
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
deleted file mode 100644
index 7be7d55bc49e..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_SYSFS_H
-#define __DGNC_SYSFS_H
-
-#include <linux/device.h>
-#include "dgnc_driver.h"
-
-struct dgnc_board;
-struct channel_t;
-struct un_t;
-struct pci_driver;
-struct class_device;
-
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
-
-void dgnc_create_driver_sysfiles(struct pci_driver *);
-void dgnc_remove_driver_sysfiles(struct pci_driver *);
-
-int dgnc_tty_class_init(void);
-int dgnc_tty_class_destroy(void);
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
-void dgnc_remove_tty_sysfs(struct device *c);
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 953d9310fa74..1e10c0fe4745 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -13,13 +13,9 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-/************************************************************************
- *
+/*
* This file implements the tty driver functionality for the
* Neo and ClassicBoard PCI based product lines.
- *
- ************************************************************************
- *
*/
#include <linux/kernel.h>
@@ -39,27 +35,20 @@
#include "dgnc_tty.h"
#include "dgnc_neo.h"
#include "dgnc_cls.h"
-#include "dgnc_sysfs.h"
#include "dgnc_utils.h"
-/*
- * internal variables
- */
-static unsigned char *dgnc_TmpWriteBuf;
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgnc_digi_init = {
- .digi_flags = DIGI_COOK, /* Flags */
- .digi_maxcps = 100, /* Max CPS */
- .digi_maxchar = 50, /* Max chars in print queue */
- .digi_bufsize = 100, /* Printer buffer size */
- .digi_onlen = 4, /* size of printer on string */
- .digi_offlen = 4, /* size of printer off string */
- .digi_onstr = "\033[5i", /* ANSI printer on string ] */
- .digi_offstr = "\033[4i", /* ANSI printer off string ] */
- .digi_term = "ansi" /* default terminal type */
+/* Default transparent print information. */
+
+static const struct digi_t dgnc_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
};
/*
@@ -69,7 +58,7 @@ static struct digi_t dgnc_digi_init = {
* This defines a raw port at 9600 baud, 8 data bits, no parity,
* 1 stop bit.
*/
-static struct ktermios DgncDefaultTermios = {
+static struct ktermios default_termios = {
.c_iflag = (DEFAULT_IFLAGS), /* iflags */
.c_oflag = (DEFAULT_OFLAGS), /* oflags */
.c_cflag = (DEFAULT_CFLAGS), /* cflags */
@@ -113,6 +102,8 @@ static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf,
static void dgnc_tty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios);
static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
+static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char line);
+static void dgnc_wake_up_unit(struct un_t *unit);
static const struct tty_operations dgnc_tty_ops = {
.open = dgnc_tty_open,
@@ -137,36 +128,7 @@ static const struct tty_operations dgnc_tty_ops = {
.send_xchar = dgnc_tty_send_xchar
};
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgnc_tty_preinit()
- *
- * Initialize any global tty related data before we download any boards.
- */
-int dgnc_tty_preinit(void)
-{
- /*
- * Allocate a buffer for doing the copy from user space to
- * kernel space in dgnc_write(). We only use one buffer and
- * control access to it with a semaphore. If we are paging, we
- * are already in trouble so one buffer won't hurt much anyway.
- *
- * We are okay to sleep in the malloc, as this routine
- * is only called during module load, (not in interrupt context),
- * and with no locks held.
- */
- dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
-
- if (!dgnc_TmpWriteBuf)
- return -ENOMEM;
-
- return 0;
-}
+/* TTY Initialization/Cleanup Functions */
/*
* dgnc_tty_register()
@@ -194,7 +156,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
brd->serial_driver->minor_start = 0;
brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->serial_driver->init_termios = DgncDefaultTermios;
+ brd->serial_driver->init_termios = default_termios;
brd->serial_driver->driver_name = DRVSTR;
/*
@@ -233,7 +195,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
brd->print_driver->minor_start = 0x80;
brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->print_driver->init_termios = DgncDefaultTermios;
+ brd->print_driver->init_termios = default_termios;
brd->print_driver->driver_name = DRVSTR;
/*
@@ -285,9 +247,7 @@ int dgnc_tty_init(struct dgnc_board *brd)
if (!brd)
return -ENXIO;
- /*
- * Initialize board structure elements.
- */
+ /* Initialize board structure elements. */
vaddr = brd->re_map_membase;
@@ -345,12 +305,10 @@ int dgnc_tty_init(struct dgnc_board *brd)
classp = tty_register_device(brd->serial_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_tun.un_sysfs = classp;
- dgnc_create_tty_sysfs(&ch->ch_tun, classp);
classp = tty_register_device(brd->print_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_pun.un_sysfs = classp;
- dgnc_create_tty_sysfs(&ch->ch_pun, classp);
}
}
@@ -365,17 +323,6 @@ err_free_channels:
}
/*
- * dgnc_tty_post_uninit()
- *
- * UnInitialize any global tty related data.
- */
-void dgnc_tty_post_uninit(void)
-{
- kfree(dgnc_TmpWriteBuf);
- dgnc_TmpWriteBuf = NULL;
-}
-
-/*
* dgnc_cleanup_tty()
*
* Uninitialize the TTY portion of this driver. Free all memory and
@@ -385,20 +332,14 @@ void dgnc_cleanup_tty(struct dgnc_board *brd)
{
int i = 0;
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_tun.un_sysfs);
+ for (i = 0; i < brd->nasync; i++)
tty_unregister_device(brd->serial_driver, i);
- }
+
tty_unregister_driver(brd->serial_driver);
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_pun.un_sysfs);
+ for (i = 0; i < brd->nasync; i++)
tty_unregister_device(brd->print_driver, i);
- }
+
tty_unregister_driver(brd->print_driver);
put_tty_driver(brd->serial_driver);
@@ -437,9 +378,7 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
}
if (n > 0) {
- /*
- * Move rest of data.
- */
+ /* Move rest of data. */
remain = n;
memcpy(ch->ch_wqueue + head, buf, remain);
head += remain;
@@ -509,9 +448,8 @@ void dgnc_input(struct channel_t *ch)
goto exit_unlock;
}
- /*
- * If we are throttled, simply don't read any data.
- */
+ /* If we are throttled, simply don't read any data. */
+
if (ch->ch_flags & CH_FORCED_STOPI)
goto exit_unlock;
@@ -624,10 +562,10 @@ exit_unlock:
tty_ldisc_deref(ld);
}
-/************************************************************************
+/*
* Determines when CARRIER changes state and takes appropriate
* action.
- ************************************************************************/
+ */
void dgnc_carrier(struct channel_t *ch)
{
int virt_carrier = 0;
@@ -645,28 +583,24 @@ void dgnc_carrier(struct channel_t *ch)
if (ch->ch_c_cflag & CLOCAL)
virt_carrier = 1;
- /*
- * Test for a VIRTUAL carrier transition to HIGH.
- */
+ /* Test for a VIRTUAL carrier transition to HIGH. */
+
if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
/*
* When carrier rises, wake any threads waiting
* for carrier in the open routine.
*/
-
if (waitqueue_active(&ch->ch_flags_wait))
wake_up_interruptible(&ch->ch_flags_wait);
}
- /*
- * Test for a PHYSICAL carrier transition to HIGH.
- */
+ /* Test for a PHYSICAL carrier transition to HIGH. */
+
if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
/*
* When carrier rises, wake any threads waiting
* for carrier in the open routine.
*/
-
if (waitqueue_active(&ch->ch_flags_wait))
wake_up_interruptible(&ch->ch_flags_wait);
}
@@ -704,9 +638,8 @@ void dgnc_carrier(struct channel_t *ch)
tty_hangup(ch->ch_pun.un_tty);
}
- /*
- * Make sure that our cached values reflect the current reality.
- */
+ /* Make sure that our cached values reflect the current reality. */
+
if (virt_carrier == 1)
ch->ch_flags |= CH_FCAR;
else
@@ -718,9 +651,8 @@ void dgnc_carrier(struct channel_t *ch)
ch->ch_flags &= ~CH_CD;
}
-/*
- * Assign the custom baud rate to the channel structure
- */
+/* Assign the custom baud rate to the channel structure */
+
static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
{
int testdiv;
@@ -854,6 +786,12 @@ void dgnc_check_queue_flow_control(struct channel_t *ch)
}
}
+static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char sig)
+{
+ ch->ch_mostat &= ~(sig);
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+}
+
void dgnc_wakeup_writes(struct channel_t *ch)
{
int qlen = 0;
@@ -864,9 +802,8 @@ void dgnc_wakeup_writes(struct channel_t *ch)
spin_lock_irqsave(&ch->ch_lock, flags);
- /*
- * If channel now has space, wake up anyone waiting on the condition.
- */
+ /* If channel now has space, wake up anyone waiting on the condition. */
+
qlen = ch->ch_w_head - ch->ch_w_tail;
if (qlen < 0)
qlen += WQUEUESIZE;
@@ -892,19 +829,15 @@ void dgnc_wakeup_writes(struct channel_t *ch)
* If RTS Toggle mode is on, whenever
* the queue and UART is empty, keep RTS low.
*/
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- ch->ch_mostat &= ~(UART_MCR_RTS);
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
- }
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
+ dgnc_set_signal_low(ch, UART_MCR_RTS);
/*
* If DTR Toggle mode is on, whenever
* the queue and UART is empty, keep DTR low.
*/
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- ch->ch_mostat &= ~(UART_MCR_DTR);
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
- }
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
+ dgnc_set_signal_low(ch, UART_MCR_DTR);
}
}
@@ -930,7 +863,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-struct dgnc_board *find_board_by_major(unsigned int major)
+static struct dgnc_board *find_board_by_major(unsigned int major)
{
int i;
@@ -948,16 +881,10 @@ struct dgnc_board *find_board_by_major(unsigned int major)
return NULL;
}
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
+/* TTY Entry points and helper functions */
+
+/* dgnc_tty_open() */
-/*
- * dgnc_tty_open()
- *
- */
static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
{
struct dgnc_board *brd;
@@ -1045,8 +972,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
* ch_flags_wait to wake us back up.
*/
rc = wait_event_interruptible(ch->ch_flags_wait,
- (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
- UN_CLOSING) == 0));
+ (((ch->ch_tun.un_flags |
+ ch->ch_pun.un_flags) & UN_CLOSING) == 0));
/* If ret is non-zero, user ctrl-c'ed us */
if (rc)
@@ -1057,9 +984,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
/* Store our unit into driver_data, so we always have it available. */
tty->driver_data = un;
- /*
- * Initialize tty's
- */
+ /* Initialize tty's */
+
if (!(un->un_flags & UN_ISOPEN)) {
/* Store important variables. */
un->un_tty = tty;
@@ -1096,13 +1022,10 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
ch->ch_flags &= ~(CH_OPENING);
wake_up_interruptible(&ch->ch_flags_wait);
- /*
- * Initialize if neither terminal or printer is open.
- */
+ /* Initialize if neither terminal or printer is open. */
+
if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
- /*
- * Flush input queues.
- */
+ /* Flush input queues. */
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
ch->ch_e_head = 0;
@@ -1138,16 +1061,13 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
brd->bd_ops->uart_init(ch);
}
- /*
- * Run param in case we changed anything
- */
+ /* Run param in case we changed anything */
+
brd->bd_ops->param(tty);
dgnc_carrier(ch);
- /*
- * follow protocol for opening port
- */
+ /* follow protocol for opening port */
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -1248,9 +1168,8 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
break;
}
- /*
- * Store the flags before we let go of channel lock
- */
+ /* Store the flags before we let go of channel lock */
+
if (sleep_on_un_flags)
old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
else
@@ -1269,12 +1188,13 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
* from the current value.
*/
if (sleep_on_un_flags)
- retval = wait_event_interruptible(un->un_flags_wait,
- (old_flags != (ch->ch_tun.un_flags |
- ch->ch_pun.un_flags)));
+ retval = wait_event_interruptible
+ (un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags |
+ ch->ch_pun.un_flags)));
else
retval = wait_event_interruptible(ch->ch_flags_wait,
- (old_flags != ch->ch_flags));
+ (old_flags != ch->ch_flags));
/*
* We got woken up for some reason.
@@ -1304,10 +1224,8 @@ static void dgnc_tty_hangup(struct tty_struct *tty)
dgnc_tty_flush_buffer(tty);
}
-/*
- * dgnc_tty_close()
- *
- */
+/* dgnc_tty_close() */
+
static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
{
struct dgnc_board *bd;
@@ -1377,9 +1295,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
!(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
- /*
- * turn off print device when closing print device.
- */
+ /* turn off print device when closing print device. */
+
if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
dgnc_wmove(ch, ch->ch_digi.digi_offstr,
(int)ch->ch_digi.digi_offlen);
@@ -1399,9 +1316,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
tty->closing = 0;
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
+ /* If we have HUPCL set, lower DTR and RTS */
+
if (ch->ch_c_cflag & HUPCL) {
/* Drop RTS/DTR */
ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
@@ -1424,9 +1340,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
/* Turn off UART interrupts for this port */
ch->ch_bd->bd_ops->uart_off(ch);
} else {
- /*
- * turn off print device when closing print device.
- */
+ /* turn off print device when closing print device. */
+
if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
dgnc_wmove(ch, ch->ch_digi.digi_offstr,
(int)ch->ch_digi.digi_offlen);
@@ -1543,7 +1458,7 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
int ret = 0;
unsigned long flags;
- if (!tty || !dgnc_TmpWriteBuf)
+ if (!tty)
return 0;
un = tty->driver_data;
@@ -1598,9 +1513,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
*/
static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
{
- /*
- * Simply call tty_write.
- */
+ /* Simply call tty_write. */
+
dgnc_tty_write(tty, &c, 1);
return 1;
}
@@ -1623,7 +1537,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
ushort tmask;
uint remain;
- if (!tty || !dgnc_TmpWriteBuf)
+ if (!tty)
return 0;
un = tty->driver_data;
@@ -1667,9 +1581,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
*/
count = min(count, bufcount);
- /*
- * Bail if no space left.
- */
+ /* Bail if no space left. */
+
if (count <= 0)
goto exit_retry;
@@ -1712,9 +1625,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
}
if (n > 0) {
- /*
- * Move rest of data.
- */
+ /* Move rest of data. */
remain = n;
memcpy(ch->ch_wqueue + head, buf, remain);
head += remain;
@@ -1749,9 +1660,7 @@ exit_retry:
return 0;
}
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
static int dgnc_tty_tiocmget(struct tty_struct *tty)
{
@@ -1960,9 +1869,8 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n");
}
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
+
static inline int dgnc_get_mstat(struct channel_t *ch)
{
unsigned char mstat;
@@ -1994,9 +1902,8 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
return result;
}
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
+
static int dgnc_get_modem_info(struct channel_t *ch,
unsigned int __user *value)
{
@@ -2070,9 +1977,6 @@ static int dgnc_set_modem_info(struct channel_t *ch,
* dgnc_tty_digigeta()
*
* Ioctl to get the information for ditty.
- *
- *
- *
*/
static int dgnc_tty_digigeta(struct tty_struct *tty,
struct digi_t __user *retinfo)
@@ -2112,9 +2016,6 @@ static int dgnc_tty_digigeta(struct tty_struct *tty,
* dgnc_tty_digiseta()
*
* Ioctl to set the information for ditty.
- *
- *
- *
*/
static int dgnc_tty_digiseta(struct tty_struct *tty,
struct digi_t __user *new_info)
@@ -2145,9 +2046,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
spin_lock_irqsave(&ch->ch_lock, flags);
- /*
- * Handle transistions to and from RTS Toggle.
- */
+ /* Handle transitions to and from RTS Toggle. */
+
if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) &&
(new_digi.digi_flags & DIGI_RTS_TOGGLE))
ch->ch_mostat &= ~(UART_MCR_RTS);
@@ -2155,9 +2055,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
!(new_digi.digi_flags & DIGI_RTS_TOGGLE))
ch->ch_mostat |= (UART_MCR_RTS);
- /*
- * Handle transistions to and from DTR Toggle.
- */
+ /* Handle transitions to and from DTR Toggle. */
+
if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) &&
(new_digi.digi_flags & DIGI_DTR_TOGGLE))
ch->ch_mostat &= ~(UART_MCR_DTR);
@@ -2195,9 +2094,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
return 0;
}
-/*
- * dgnc_set_termios()
- */
+/* dgnc_set_termios() */
+
static void dgnc_tty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
@@ -2428,11 +2326,18 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
+/*
+ * dgnc_wake_up_unit()
*
- *****************************************************************************/
+ * Wakes up processes waiting in the unit's (teminal/printer) wait queue
+ */
+static void dgnc_wake_up_unit(struct un_t *unit)
+{
+ unit->un_flags &= ~(UN_LOW | UN_EMPTY);
+ wake_up_interruptible(&unit->un_flags_wait);
+}
+
+/* The IOCTL function and all of its helpers */
/*
* dgnc_tty_ioctl()
@@ -2506,7 +2411,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return 0;
case TCSBRKP:
- /* support for POSIX tcsendbreak()
+ /*
+ * support for POSIX tcsendbreak()
* According to POSIX.1 spec (7.2.2.1.2) breaks should be
* between 0.25 and 0.5 seconds so we'll ask for something
* in the middle: 0.375 seconds.
@@ -2583,9 +2489,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_unlock_irqrestore(&ch->ch_lock, flags);
return dgnc_set_modem_info(ch, cmd, uarg);
- /*
- * Here are any additional ioctl's that we want to implement
- */
+ /* Here are any additional ioctl's that we want to implement */
case TCFLSH:
/*
@@ -2615,17 +2519,11 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
ch->ch_w_head = ch->ch_w_tail;
ch_bd_ops->flush_uart_write(ch);
- if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
- ch->ch_tun.un_flags &=
- ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
-
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &=
- ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
+ if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY))
+ dgnc_wake_up_unit(&ch->ch_tun);
+
+ if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY))
+ dgnc_wake_up_unit(&ch->ch_pun);
}
}
@@ -2705,9 +2603,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case DIGI_LOOPBACK:
{
uint loopback = 0;
- /* Let go of locks when accessing user space,
+ /*
+ * Let go of locks when accessing user space,
* could sleep
- */
+ */
spin_unlock_irqrestore(&ch->ch_lock, flags);
rc = get_user(loopback, (unsigned int __user *)arg);
if (rc)
@@ -2749,7 +2648,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
* This ioctl allows insertion of a character into the front
* of any pending data to be transmitted.
*
- * This ioctl is to satify the "Send Character Immediate"
+ * This ioctl is to satisfy the "Send Character Immediate"
* call that the RealPort protocol spec requires.
*/
case DIGI_REALPORT_SENDIMMEDIATE:
@@ -2769,7 +2668,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* This ioctl returns all the current counts for the port.
*
- * This ioctl is to satify the "Line Error Counters"
+ * This ioctl is to satisfy the "Line Error Counters"
* call that the RealPort protocol spec requires.
*/
case DIGI_REALPORT_GETCOUNTERS:
@@ -2795,7 +2694,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* This ioctl returns all current events.
*
- * This ioctl is to satify the "Event Reporting"
+ * This ioctl is to satisfy the "Event Reporting"
* call that the RealPort protocol spec requires.
*/
case DIGI_REALPORT_GETEVENTS:
@@ -2831,23 +2730,23 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /*
- * Get data from user first.
- */
+ /* Get data from user first. */
+
if (copy_from_user(&buf, uarg, sizeof(buf)))
return -EFAULT;
spin_lock_irqsave(&ch->ch_lock, flags);
- /*
- * Figure out how much data is in our RX and TX queues.
- */
+ /* Figure out how much data is in our RX and TX queues. */
+
buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
/*
- * Is the UART empty? Add that value to whats in our TX queue.
+ * Is the UART empty?
+ * Add that value to whats in our TX queue.
*/
+
count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
/*
@@ -2867,9 +2766,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (buf.txbuf > tdist)
buf.txbuf = tdist;
- /*
- * Report whether our queue and UART TX are completely empty.
- */
+ /* Report whether our queue and UART TX are completely empty. */
+
if (count)
buf.txdone = 0;
else
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 24c9a412211e..1ee0eeeb4730 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -21,11 +21,9 @@
int dgnc_tty_register(struct dgnc_board *brd);
void dgnc_tty_unregister(struct dgnc_board *brd);
-int dgnc_tty_preinit(void);
-int dgnc_tty_init(struct dgnc_board *);
+int dgnc_tty_init(struct dgnc_board *brd);
-void dgnc_tty_post_uninit(void);
-void dgnc_cleanup_tty(struct dgnc_board *);
+void dgnc_cleanup_tty(struct dgnc_board *brd);
void dgnc_input(struct channel_t *ch);
void dgnc_carrier(struct channel_t *ch);
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 5b983e6f5ee2..ec2e3dda6119 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -17,16 +17,16 @@
#define __DIGI_H
#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
#endif
@@ -40,72 +40,71 @@
#define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */
#endif
-#define DIGI_GETA (('e' << 8) | 94) /* Read params */
-#define DIGI_SETA (('e' << 8) | 95) /* Set params */
-#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
+#define DIGI_GETA (('e' << 8) | 94) /* Read params */
+#define DIGI_SETA (('e' << 8) | 95) /* Set params */
+#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */
-#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
-#define DIGI_LOOPBACK (('d' << 8) | 252) /*
- * Enable/disable UART
- * internal loopback
- */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
+#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
+#define DIGI_LOOPBACK (('d' << 8) | 252) /*
+ * Enable/disable UART
+ * internal loopback
+ */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
-#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
-#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
-#define DIGI_PLEN 28 /* String length */
-#define DIGI_TSIZ 10 /* Terminal string len */
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
-/************************************************************************
+/*
* Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
+ */
struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
};
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
+/* Structure to get driver status information */
+
struct digi_dinfo {
- unsigned int dinfo_nboards; /* # boards configured */
+ unsigned int dinfo_nboards; /* # boards configured */
char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
+ char dinfo_version[16]; /* driver version */
};
-#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
+#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
-/************************************************************************
+/*
* Structure used with ioctl commands for per-board information
*
* physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
+ */
struct digi_info {
- unsigned int info_bdnum; /* Board number (0 based) */
- unsigned int info_ioport; /* io port address */
- unsigned int info_physaddr; /* memory address */
+ unsigned int info_bdnum; /* Board number (0 based) */
+ unsigned int info_ioport; /* io port address */
+ unsigned int info_physaddr; /* memory address */
unsigned int info_physsize; /* Size of host mem window */
unsigned int info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
};
-#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
+#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
struct digi_getbuffer /* Struct for holding buffer use counts */
{
@@ -139,7 +138,7 @@ struct digi_getcounter {
#define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111)
#define EV_OPU 0x0001 /* !<Output paused by client */
-#define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */
+#define EV_OPS 0x0002 /* !<Output paused by regular sw flowctrl */
#define EV_IPU 0x0010 /* !<Input paused unconditionally by user */
#define EV_IPS 0x0020 /* !<Input paused by high/low water marks */
#define EV_TXB 0x0040 /* !<Transmit break pending */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index c3e298843b43..3f42fa8b0bf3 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -153,7 +153,6 @@ static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
udc = (struct nbu2ss_udc *)_req->context;
p_ctrl = &udc->ctrl;
if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
-
if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
/*-------------------------------------------------*/
/* SET_FEATURE */
@@ -263,7 +262,7 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
}
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum|ep->direct));
+ _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct));
if (ep->direct == USB_DIR_OUT) {
/*---------------------------------------------------------*/
@@ -460,7 +459,7 @@ static void _nbu2ss_ep_in_end(
if (length)
_nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
- data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND;
+ data = (((length) << 5) & EPn_DW) | EPn_DEND;
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
@@ -753,7 +752,6 @@ static int _nbu2ss_ep0_out_transfer(
/* Receive data confirmation */
iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
if (iRecvLength != 0) {
-
fRcvZero = 0;
iRemainSize = req->req.length - req->req.actual;
@@ -928,9 +926,8 @@ static int _nbu2ss_epn_out_pio(
req->req.actual += result;
- if ((req->req.actual == req->req.length)
- || ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
+ if ((req->req.actual == req->req.length) ||
+ ((req->req.actual % ep->ep.maxpacket) != 0)) {
result = 0;
}
@@ -956,9 +953,8 @@ static int _nbu2ss_epn_out_data(
iBufSize = min((req->req.length - req->req.actual), data_size);
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
- && (req->req.dma != 0)
- && (iBufSize >= sizeof(u32))) {
+ if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
+ (iBufSize >= sizeof(u32))) {
nret = _nbu2ss_out_dma(udc, req, num, iBufSize);
} else {
iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket);
@@ -999,9 +995,8 @@ static int _nbu2ss_epn_out_transfer(
}
}
} else {
- if ((req->req.actual == req->req.length)
- || ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
+ if ((req->req.actual == req->req.length) ||
+ ((req->req.actual % ep->ep.maxpacket) != 0)) {
result = 0;
}
}
@@ -1170,9 +1165,8 @@ static int _nbu2ss_epn_in_data(
num = ep->epnum - 1;
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
- && (req->req.dma != 0)
- && (data_size >= sizeof(u32))) {
+ if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
+ (data_size >= sizeof(u32))) {
nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
} else {
data_size = min_t(u32, data_size, ep->ep.maxpacket);
@@ -1557,7 +1551,6 @@ static void _nbu2ss_epn_set_stall(
for (limit_cnt = 0
; limit_cnt < IN_DATA_EMPTY_COUNT
; limit_cnt++) {
-
regdata = _nbu2ss_readl(
&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
@@ -1582,11 +1575,8 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
u8 ep_adrs;
int result = -EINVAL;
- if ((udc->ctrl.wValue != 0x0000)
- || (direction != USB_DIR_IN)) {
-
+ if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN))
return result;
- }
length = min_t(u16, udc->ctrl.wLength, sizeof(status_data));
@@ -1852,7 +1842,7 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
intr = status & EP0_STATUS_RW_BIT;
- _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~(u32)intr);
+ _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr);
status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
| STG_END_INT | EP0_OUT_NULL_INT);
@@ -1897,9 +1887,8 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
break;
case EP0_OUT_STATUS_PAHSE:
- if ((status & STG_END_INT)
- || (status & SETUP_INT)
- || (status & EP0_OUT_NULL_INT)) {
+ if ((status & STG_END_INT) || (status & SETUP_INT) ||
+ (status & EP0_OUT_NULL_INT)) {
status &= ~(STG_END_INT
| EP0_OUT_INT
| EP0_OUT_NULL_INT);
@@ -1982,7 +1971,6 @@ static inline void _nbu2ss_epn_in_int(
} else {
if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
-
status =
_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
@@ -2127,7 +2115,7 @@ static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
/* Interrupt Clear */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~(u32)status);
+ _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status);
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
if (!req) {
@@ -2330,7 +2318,6 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
/* VBUS ON Check*/
reg_dt = gpio_get_value(VBUS_VALUE);
if (reg_dt == 0) {
-
udc->linux_suspended = 0;
_nbu2ss_reset_controller(udc);
@@ -2502,7 +2489,6 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
int_bit = status >> 8;
for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
-
if (0x01 & int_bit)
_nbu2ss_ep_int(udc, epnum);
@@ -2546,9 +2532,8 @@ static int nbu2ss_ep_enable(
}
ep_type = usb_endpoint_type(desc);
- if ((ep_type == USB_ENDPOINT_XFER_CONTROL)
- || (ep_type == USB_ENDPOINT_XFER_ISOC)) {
-
+ if ((ep_type == USB_ENDPOINT_XFER_CONTROL) ||
+ (ep_type == USB_ENDPOINT_XFER_ISOC)) {
pr_err(" *** %s, bat bmAttributes\n", __func__);
return -EINVAL;
}
@@ -2557,9 +2542,7 @@ static int nbu2ss_ep_enable(
if (udc->vbus_active == 0)
return -ESHUTDOWN;
- if ((!udc->driver)
- || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
-
+ if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
return -ESHUTDOWN;
}
@@ -2674,10 +2657,7 @@ static int nbu2ss_ep_queue(
}
req = container_of(_req, struct nbu2ss_req, req);
- if (unlikely
- (!_req->complete || !_req->buf
- || !list_empty(&req->queue))) {
-
+ if (unlikely(!_req->complete || !_req->buf || !list_empty(&req->queue))) {
if (!_req->complete)
pr_err("udc: %s --- !_req->complete\n", __func__);
@@ -2736,7 +2716,6 @@ static int nbu2ss_ep_queue(
list_add_tail(&req->queue, &ep->queue);
if (bflag && !ep->stalled) {
-
result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
if (result < 0) {
dev_err(udc->dev, " *** %s, result = %d\n", __func__,
@@ -2938,7 +2917,7 @@ static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
}
/*-------------------------------------------------------------------------*/
-static struct usb_ep_ops nbu2ss_ep_ops = {
+static const struct usb_ep_ops nbu2ss_ep_ops = {
.enable = nbu2ss_ep_enable,
.disable = nbu2ss_ep_disable,
@@ -2979,9 +2958,7 @@ static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
if (data == 0)
return -EINVAL;
- data = _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
-
- return data;
+ return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
}
/*-------------------------------------------------------------------------*/
@@ -3307,8 +3284,8 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
if (ep->virt_buf)
- dma_free_coherent(NULL, PAGE_SIZE,
- (void *)ep->virt_buf, ep->phys_buf);
+ dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
+ ep->phys_buf);
}
/* Interrupt Handler - Release */
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 7561385761e9..a6e3af74a904 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -264,6 +264,39 @@ construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src,
}
}
+static void iterate_diffusion_matrix(u32 xres, u32 yres, int x,
+ int y, signed short *convert_buf,
+ signed short pixel, signed short error)
+{
+ u16 i, j;
+
+ /* diffusion matrix row */
+ for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
+ /* diffusion matrix column */
+ for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
+ signed short *write_pos;
+ signed char coeff;
+
+ /* skip pixels out of zone */
+ if (x + i < 0 || x + i >= xres || y + j >= yres)
+ continue;
+ write_pos = &convert_buf[(y + j) * xres + x + i];
+ coeff = diffusing_matrix[i][j];
+ if (-1 == coeff)
+ /* pixel itself */
+ *write_pos = pixel;
+ else {
+ signed short p = *write_pos + error * coeff;
+
+ if (p > WHITE)
+ p = WHITE;
+ if (p < BLACK)
+ p = BLACK;
+ *write_pos = p;
+ }
+ }
+}
+
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_buffer;
@@ -303,7 +336,6 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
signed short error_b = pixel - BLACK;
signed short error_w = pixel - WHITE;
signed short error;
- u16 i, j;
/* what color close? */
if (abs(error_b) >= abs(error_w)) {
@@ -318,36 +350,10 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
error /= 8;
- /* diffusion matrix row */
- for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
- /* diffusion matrix column */
- for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
- signed short *write_pos;
- signed char coeff;
-
- /* skip pixels out of zone */
- if (x + i < 0 ||
- x + i >= par->info->var.xres
- || y + j >= par->info->var.yres)
- continue;
- write_pos = &convert_buf[
- (y + j) * par->info->var.xres +
- x + i];
- coeff = diffusing_matrix[i][j];
- if (coeff == -1)
- /* pixel itself */
- *write_pos = pixel;
- else {
- signed short p = *write_pos +
- error * coeff;
-
- if (p > WHITE)
- p = WHITE;
- if (p < BLACK)
- p = BLACK;
- *write_pos = p;
- }
- }
+ iterate_diffusion_matrix(par->info->var.xres,
+ par->info->var.yres,
+ x, y, convert_buf,
+ pixel, error);
}
/* 1 string = 2 pages */
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index c31e2e051d4a..19e33bab9cac 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -33,26 +33,23 @@
"04 16 2 7 6 3 2 1 7 7"
static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
-module_param(bt, uint, 0);
+module_param(bt, uint, 0000);
MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits");
static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */
-module_param(vc, uint, 0);
-MODULE_PARM_DESC(vc,
-"Sets the ratio factor of Vci to generate the reference voltages Vci1");
+module_param(vc, uint, 0000);
+MODULE_PARM_DESC(vc, "Sets the ratio factor of Vci to generate the reference voltages Vci1");
static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
-module_param(vrh, uint, 0);
-MODULE_PARM_DESC(vrh,
-"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
+module_param(vrh, uint, 0000);
+MODULE_PARM_DESC(vrh, "Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
-module_param(vdv, uint, 0);
-MODULE_PARM_DESC(vdv,
-"Select the factor of VREG1OUT to set the amplitude of Vcom");
+module_param(vdv, uint, 0000);
+MODULE_PARM_DESC(vdv, "Select the factor of VREG1OUT to set the amplitude of Vcom");
static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
-module_param(vcm, uint, 0);
+module_param(vcm, uint, 0000);
MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
/*
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 242adb3859bd..4e75f5abe2f9 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -27,7 +27,7 @@
#define WIDTH 320
#define HEIGHT 480
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
/* SLP_OUT - Sleep out */
-1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index fa38d8885f0b..f4b314265f9e 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -26,7 +26,7 @@
#define HEIGHT 480
/* this init sequence matches PiScreen */
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
/* Interface Mode Control */
-1, 0xb0, 0x0,
-1, MIPI_DCS_EXIT_SLEEP_MODE,
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index 774b0ff69e6d..eb712aa0d692 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -24,7 +24,7 @@
#define DRVNAME "fb_s6d02a1"
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
-1, 0xf0, 0x5a, 0x5a,
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index 6670f2bb62ec..710b74bbba97 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -25,7 +25,7 @@
#define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \
"0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
-1, MIPI_DCS_SOFT_RESET,
-2, 150, /* delay */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 587f68aa466c..bbe89c9c4fb9 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -253,7 +253,8 @@ static int fbtft_backlight_update_status(struct backlight_device *bd)
"%s: polarity=%d, power=%d, fb_blank=%d\n",
__func__, polarity, bd->props.power, bd->props.fb_blank);
- if ((bd->props.power == FB_BLANK_UNBLANK) && (bd->props.fb_blank == FB_BLANK_UNBLANK))
+ if ((bd->props.power == FB_BLANK_UNBLANK) &&
+ (bd->props.fb_blank == FB_BLANK_UNBLANK))
gpio_set_value(par->gpio.led[0], polarity);
else
gpio_set_value(par->gpio.led[0], !polarity);
@@ -299,7 +300,8 @@ void fbtft_register_backlight(struct fbtft_par *par)
bl_props.state |= BL_CORE_DRIVER1;
bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &fbtft_bl_ops, &bl_props);
+ par->info->device, par,
+ &fbtft_bl_ops, &bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
@@ -350,9 +352,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
bool timeit = false;
int ret = 0;
- if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | DEBUG_TIME_EACH_UPDATE))) {
+ if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE |
+ DEBUG_TIME_EACH_UPDATE))) {
if ((par->debug & DEBUG_TIME_EACH_UPDATE) ||
- ((par->debug & DEBUG_TIME_FIRST_UPDATE) && !par->first_update_done)) {
+ ((par->debug & DEBUG_TIME_FIRST_UPDATE) &&
+ !par->first_update_done)) {
ts_start = ktime_get();
timeit = true;
}
@@ -361,15 +365,17 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
/* Sanity checks */
if (start_line > end_line) {
dev_warn(par->info->device,
- "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
- __func__, start_line, end_line);
+ "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
+ __func__, start_line, end_line);
start_line = 0;
end_line = par->info->var.yres - 1;
}
- if (start_line > par->info->var.yres - 1 || end_line > par->info->var.yres - 1) {
+ if (start_line > par->info->var.yres - 1 ||
+ end_line > par->info->var.yres - 1) {
dev_warn(par->info->device,
"%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
- __func__, start_line, end_line, par->info->var.yres - 1);
+ __func__, start_line,
+ end_line, par->info->var.yres - 1);
start_line = 0;
end_line = par->info->var.yres - 1;
}
@@ -660,12 +666,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
unsigned int bpp = display->bpp;
unsigned int fps = display->fps;
int vmem_size, i;
- int *init_sequence = display->init_sequence;
+ s16 *init_sequence = display->init_sequence;
char *gamma = display->gamma;
unsigned long *gamma_curves = NULL;
/* sanity check */
- if (display->gamma_num * display->gamma_len > FBTFT_GAMMA_MAX_VALUES_TOTAL) {
+ if (display->gamma_num * display->gamma_len >
+ FBTFT_GAMMA_MAX_VALUES_TOTAL) {
dev_err(dev, "FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n",
FBTFT_GAMMA_MAX_VALUES_TOTAL);
return NULL;
@@ -832,11 +839,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
#ifdef CONFIG_HAS_DMA
if (dma) {
dev->coherent_dma_mask = ~0;
- txbuf = dmam_alloc_coherent(dev, txbuflen, &par->txbuf.dma, GFP_DMA);
+ txbuf = dmam_alloc_coherent(dev, txbuflen,
+ &par->txbuf.dma, GFP_DMA);
} else
#endif
{
- txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
+ txbuf = devm_kzalloc(par->info->device,
+ txbuflen, GFP_KERNEL);
}
if (!txbuf)
goto alloc_fail;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 89c4b5b76ce6..aacdde92cc2e 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -124,7 +124,7 @@ struct fbtft_display {
unsigned int bpp;
unsigned int fps;
int txbuflen;
- int *init_sequence;
+ s16 *init_sequence;
char *gamma;
int gamma_num;
int gamma_len;
@@ -229,7 +229,7 @@ struct fbtft_par {
int led[16];
int aux[16];
} gpio;
- int *init_sequence;
+ s16 *init_sequence;
struct {
struct mutex lock;
unsigned long *curves;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index e9211831b6a1..de46f8d988d2 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -96,9 +96,9 @@ static unsigned int buswidth = 8;
module_param(buswidth, uint, 0);
MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
-static int init[FBTFT_MAX_INIT_SEQUENCE];
+static s16 init[FBTFT_MAX_INIT_SEQUENCE];
static int init_num;
-module_param_array(init, int, &init_num, 0);
+module_param_array(init, short, &init_num, 0);
MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
static unsigned long debug;
@@ -131,7 +131,7 @@ static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
"D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
"D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
-static int cberry28_init_sequence[] = {
+static s16 cberry28_init_sequence[] = {
/* turn off sleep mode */
-1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 120,
@@ -180,7 +180,7 @@ static int cberry28_init_sequence[] = {
-3,
};
-static int hy28b_init_sequence[] = {
+static s16 hy28b_init_sequence[] = {
-1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
-1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
-1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
@@ -211,7 +211,7 @@ static int hy28b_init_sequence[] = {
"04 1F 4 7 7 0 7 7 6 0\n" \
"0F 00 1 7 4 0 0 0 6 7"
-static int pitft_init_sequence[] = {
+static s16 pitft_init_sequence[] = {
-1, MIPI_DCS_SOFT_RESET,
-2, 5,
-1, MIPI_DCS_SET_DISPLAY_OFF,
@@ -242,7 +242,7 @@ static int pitft_init_sequence[] = {
-3
};
-static int waveshare32b_init_sequence[] = {
+static s16 waveshare32b_init_sequence[] = {
-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-1, 0xCF, 0x00, 0xC1, 0x30,
-1, 0xE8, 0x85, 0x00, 0x78,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index ce0d254148e4..ded10718712b 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -38,9 +38,9 @@ static unsigned int height;
module_param(height, uint, 0);
MODULE_PARM_DESC(height, "Display height");
-static int init[512];
+static s16 init[512];
static int init_num;
-module_param_array(init, int, &init_num, 0);
+module_param_array(init, short, &init_num, 0);
MODULE_PARM_DESC(init, "Init sequence");
static unsigned int setaddrwin;
@@ -63,68 +63,316 @@ static bool latched;
module_param(latched, bool, 0);
MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-static int *initp;
+static s16 *initp;
static int initp_num;
/* default init sequences */
-static int st7735r_init[] = {
--1, 0x01, -2, 150, -1, 0x11, -2, 500, -1, 0xB1, 0x01, 0x2C, 0x2D, -1, 0xB2, 0x01, 0x2C, 0x2D, -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
--1, 0xB4, 0x07, -1, 0xC0, 0xA2, 0x02, 0x84, -1, 0xC1, 0xC5, -1, 0xC2, 0x0A, 0x00, -1, 0xC3, 0x8A, 0x2A, -1, 0xC4, 0x8A, 0xEE, -1, 0xC5, 0x0E,
--1, 0x20, -1, 0x36, 0xC0, -1, 0x3A, 0x05, -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
--1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, -1, 0x29, -2, 100, -1, 0x13, -2, 10, -3 };
-
-static int ssd1289_init[] = {
--1, 0x00, 0x0001, -1, 0x03, 0xA8A4, -1, 0x0C, 0x0000, -1, 0x0D, 0x080C, -1, 0x0E, 0x2B00, -1, 0x1E, 0x00B7, -1, 0x01, 0x2B3F, -1, 0x02, 0x0600,
--1, 0x10, 0x0000, -1, 0x11, 0x6070, -1, 0x05, 0x0000, -1, 0x06, 0x0000, -1, 0x16, 0xEF1C, -1, 0x17, 0x0003, -1, 0x07, 0x0233, -1, 0x0B, 0x0000,
--1, 0x0F, 0x0000, -1, 0x41, 0x0000, -1, 0x42, 0x0000, -1, 0x48, 0x0000, -1, 0x49, 0x013F, -1, 0x4A, 0x0000, -1, 0x4B, 0x0000, -1, 0x44, 0xEF00,
--1, 0x45, 0x0000, -1, 0x46, 0x013F, -1, 0x30, 0x0707, -1, 0x31, 0x0204, -1, 0x32, 0x0204, -1, 0x33, 0x0502, -1, 0x34, 0x0507, -1, 0x35, 0x0204,
--1, 0x36, 0x0204, -1, 0x37, 0x0502, -1, 0x3A, 0x0302, -1, 0x3B, 0x0302, -1, 0x23, 0x0000, -1, 0x24, 0x0000, -1, 0x25, 0x8000, -1, 0x4f, 0x0000,
--1, 0x4e, 0x0000, -1, 0x22, -3 };
-
-static int hx8340bn_init[] = {
--1, 0xC1, 0xFF, 0x83, 0x40, -1, 0x11, -2, 150, -1, 0xCA, 0x70, 0x00, 0xD9, -1, 0xB0, 0x01, 0x11,
--1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, -2, 20, -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
--1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, -2, 10, -1, 0xB5, 0x35, 0x20, 0x45, -1, 0xB4, 0x33, 0x25, 0x4C, -2, 10,
--1, 0x3A, 0x05, -1, 0x29, -2, 10, -3 };
-
-static int ili9225_init[] = {
--1, 0x0001, 0x011C, -1, 0x0002, 0x0100, -1, 0x0003, 0x1030, -1, 0x0008, 0x0808, -1, 0x000C, 0x0000, -1, 0x000F, 0x0A01, -1, 0x0020, 0x0000,
--1, 0x0021, 0x0000, -2, 50, -1, 0x0010, 0x0A00, -1, 0x0011, 0x1038, -2, 50, -1, 0x0012, 0x1121, -1, 0x0013, 0x004E, -1, 0x0014, 0x676F,
--1, 0x0030, 0x0000, -1, 0x0031, 0x00DB, -1, 0x0032, 0x0000, -1, 0x0033, 0x0000, -1, 0x0034, 0x00DB, -1, 0x0035, 0x0000, -1, 0x0036, 0x00AF,
--1, 0x0037, 0x0000, -1, 0x0038, 0x00DB, -1, 0x0039, 0x0000, -1, 0x0050, 0x0000, -1, 0x0051, 0x060A, -1, 0x0052, 0x0D0A, -1, 0x0053, 0x0303,
--1, 0x0054, 0x0A0D, -1, 0x0055, 0x0A06, -1, 0x0056, 0x0000, -1, 0x0057, 0x0303, -1, 0x0058, 0x0000, -1, 0x0059, 0x0000, -2, 50,
--1, 0x0007, 0x1017, -2, 50, -3 };
-
-static int ili9320_init[] = {
--1, 0x00E5, 0x8000, -1, 0x0000, 0x0001, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, -1, 0x0008, 0x0202,
--1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
--1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x17B0, -1, 0x0011, 0x0031, -2, 50, -1, 0x0012, 0x0138, -2, 50, -1, 0x0013, 0x1800,
--1, 0x0029, 0x0008, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, -1, 0x0031, 0x0505, -1, 0x0032, 0x0004,
--1, 0x0035, 0x0006, -1, 0x0036, 0x0707, -1, 0x0037, 0x0105, -1, 0x0038, 0x0002, -1, 0x0039, 0x0707, -1, 0x003C, 0x0704, -1, 0x003D, 0x0807,
--1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0x2700, -1, 0x0061, 0x0001, -1, 0x006A, 0x0000,
--1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, -1, 0x0085, 0x0000, -1, 0x0090, 0x0010,
--1, 0x0092, 0x0000, -1, 0x0093, 0x0003, -1, 0x0095, 0x0110, -1, 0x0097, 0x0000, -1, 0x0098, 0x0000, -1, 0x0007, 0x0173, -3 };
-
-static int ili9325_init[] = {
--1, 0x00E3, 0x3008, -1, 0x00E7, 0x0012, -1, 0x00EF, 0x1231, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
--1, 0x0008, 0x0207, -1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000,
--1, 0x0011, 0x0007, -1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x1690, -1, 0x0011, 0x0223, -2, 50, -1, 0x0012, 0x000D, -2, 50,
--1, 0x0013, 0x1200, -1, 0x0029, 0x000A, -1, 0x002B, 0x000C, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000,
--1, 0x0031, 0x0506, -1, 0x0032, 0x0104, -1, 0x0035, 0x0207, -1, 0x0036, 0x000F, -1, 0x0037, 0x0306, -1, 0x0038, 0x0102, -1, 0x0039, 0x0707,
--1, 0x003C, 0x0702, -1, 0x003D, 0x1604, -1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0xA700,
--1, 0x0061, 0x0001, -1, 0x006A, 0x0000, -1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000,
--1, 0x0085, 0x0000, -1, 0x0090, 0x0010, -1, 0x0092, 0x0600, -1, 0x0007, 0x0133, -3 };
-
-static int ili9341_init[] = {
--1, 0x28, -2, 20, -1, 0xCF, 0x00, 0x83, 0x30, -1, 0xED, 0x64, 0x03, 0x12, 0x81, -1, 0xE8, 0x85, 0x01, 0x79,
--1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00, -1, 0xC0, 0x26, -1, 0xC1, 0x11,
--1, 0xC5, 0x35, 0x3E, -1, 0xC7, 0xBE, -1, 0xB1, 0x00, 0x1B, -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, -1, 0xB7, 0x07,
--1, 0x3A, 0x55, -1, 0x36, 0x48, -1, 0x11, -2, 120, -1, 0x29, -2, 20, -3 };
-
-static int ssd1351_init[] = { -1, 0xfd, 0x12, -1, 0xfd, 0xb1, -1, 0xae, -1, 0xb3, 0xf1, -1, 0xca, 0x7f, -1, 0xa0, 0x74,
- -1, 0x15, 0x00, 0x7f, -1, 0x75, 0x00, 0x7f, -1, 0xa1, 0x00, -1, 0xa2, 0x00, -1, 0xb5, 0x00,
- -1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05,
- -1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 };
+static s16 st7735r_init[] = {
+ -1, 0x01,
+ -2, 150,
+ -1, 0x11,
+ -2, 500,
+ -1, 0xB1, 0x01, 0x2C, 0x2D,
+ -1, 0xB2, 0x01, 0x2C, 0x2D,
+ -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
+ -1, 0xB4, 0x07,
+ -1, 0xC0, 0xA2, 0x02, 0x84,
+ -1, 0xC1, 0xC5,
+ -1, 0xC2, 0x0A, 0x00,
+ -1, 0xC3, 0x8A, 0x2A,
+ -1, 0xC4, 0x8A, 0xEE,
+ -1, 0xC5, 0x0E,
+ -1, 0x20,
+ -1, 0x36, 0xC0,
+ -1, 0x3A, 0x05,
+ -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
+ 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
+ -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
+ 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
+ -1, 0x29,
+ -2, 100,
+ -1, 0x13,
+ -2, 10,
+ -3
+};
+
+static s16 ssd1289_init[] = {
+ -1, 0x00, 0x0001,
+ -1, 0x03, 0xA8A4,
+ -1, 0x0C, 0x0000,
+ -1, 0x0D, 0x080C,
+ -1, 0x0E, 0x2B00,
+ -1, 0x1E, 0x00B7,
+ -1, 0x01, 0x2B3F,
+ -1, 0x02, 0x0600,
+ -1, 0x10, 0x0000,
+ -1, 0x11, 0x6070,
+ -1, 0x05, 0x0000,
+ -1, 0x06, 0x0000,
+ -1, 0x16, 0xEF1C,
+ -1, 0x17, 0x0003,
+ -1, 0x07, 0x0233,
+ -1, 0x0B, 0x0000,
+ -1, 0x0F, 0x0000,
+ -1, 0x41, 0x0000,
+ -1, 0x42, 0x0000,
+ -1, 0x48, 0x0000,
+ -1, 0x49, 0x013F,
+ -1, 0x4A, 0x0000,
+ -1, 0x4B, 0x0000,
+ -1, 0x44, 0xEF00,
+ -1, 0x45, 0x0000,
+ -1, 0x46, 0x013F,
+ -1, 0x30, 0x0707,
+ -1, 0x31, 0x0204,
+ -1, 0x32, 0x0204,
+ -1, 0x33, 0x0502,
+ -1, 0x34, 0x0507,
+ -1, 0x35, 0x0204,
+ -1, 0x36, 0x0204,
+ -1, 0x37, 0x0502,
+ -1, 0x3A, 0x0302,
+ -1, 0x3B, 0x0302,
+ -1, 0x23, 0x0000,
+ -1, 0x24, 0x0000,
+ -1, 0x25, 0x8000,
+ -1, 0x4f, 0x0000,
+ -1, 0x4e, 0x0000,
+ -1, 0x22,
+ -3
+};
+
+static s16 hx8340bn_init[] = {
+ -1, 0xC1, 0xFF, 0x83, 0x40,
+ -1, 0x11,
+ -2, 150,
+ -1, 0xCA, 0x70, 0x00, 0xD9,
+ -1, 0xB0, 0x01, 0x11,
+ -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
+ -2, 20,
+ -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
+ -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
+ -2, 10,
+ -1, 0xB5, 0x35, 0x20, 0x45,
+ -1, 0xB4, 0x33, 0x25, 0x4C,
+ -2, 10,
+ -1, 0x3A, 0x05,
+ -1, 0x29,
+ -2, 10,
+ -3
+};
+
+static s16 ili9225_init[] = {
+ -1, 0x0001, 0x011C,
+ -1, 0x0002, 0x0100,
+ -1, 0x0003, 0x1030,
+ -1, 0x0008, 0x0808,
+ -1, 0x000C, 0x0000,
+ -1, 0x000F, 0x0A01,
+ -1, 0x0020, 0x0000,
+ -1, 0x0021, 0x0000,
+ -2, 50,
+ -1, 0x0010, 0x0A00,
+ -1, 0x0011, 0x1038,
+ -2, 50,
+ -1, 0x0012, 0x1121,
+ -1, 0x0013, 0x004E,
+ -1, 0x0014, 0x676F,
+ -1, 0x0030, 0x0000,
+ -1, 0x0031, 0x00DB,
+ -1, 0x0032, 0x0000,
+ -1, 0x0033, 0x0000,
+ -1, 0x0034, 0x00DB,
+ -1, 0x0035, 0x0000,
+ -1, 0x0036, 0x00AF,
+ -1, 0x0037, 0x0000,
+ -1, 0x0038, 0x00DB,
+ -1, 0x0039, 0x0000,
+ -1, 0x0050, 0x0000,
+ -1, 0x0051, 0x060A,
+ -1, 0x0052, 0x0D0A,
+ -1, 0x0053, 0x0303,
+ -1, 0x0054, 0x0A0D,
+ -1, 0x0055, 0x0A06,
+ -1, 0x0056, 0x0000,
+ -1, 0x0057, 0x0303,
+ -1, 0x0058, 0x0000,
+ -1, 0x0059, 0x0000,
+ -2, 50,
+ -1, 0x0007, 0x1017,
+ -2, 50,
+ -3
+};
+
+static s16 ili9320_init[] = {
+ -1, 0x00E5, 0x8000,
+ -1, 0x0000, 0x0001,
+ -1, 0x0001, 0x0100,
+ -1, 0x0002, 0x0700,
+ -1, 0x0003, 0x1030,
+ -1, 0x0004, 0x0000,
+ -1, 0x0008, 0x0202,
+ -1, 0x0009, 0x0000,
+ -1, 0x000A, 0x0000,
+ -1, 0x000C, 0x0000,
+ -1, 0x000D, 0x0000,
+ -1, 0x000F, 0x0000,
+ -1, 0x0010, 0x0000,
+ -1, 0x0011, 0x0007,
+ -1, 0x0012, 0x0000,
+ -1, 0x0013, 0x0000,
+ -2, 200,
+ -1, 0x0010, 0x17B0,
+ -1, 0x0011, 0x0031,
+ -2, 50,
+ -1, 0x0012, 0x0138,
+ -2, 50,
+ -1, 0x0013, 0x1800,
+ -1, 0x0029, 0x0008,
+ -2, 50,
+ -1, 0x0020, 0x0000,
+ -1, 0x0021, 0x0000,
+ -1, 0x0030, 0x0000,
+ -1, 0x0031, 0x0505,
+ -1, 0x0032, 0x0004,
+ -1, 0x0035, 0x0006,
+ -1, 0x0036, 0x0707,
+ -1, 0x0037, 0x0105,
+ -1, 0x0038, 0x0002,
+ -1, 0x0039, 0x0707,
+ -1, 0x003C, 0x0704,
+ -1, 0x003D, 0x0807,
+ -1, 0x0050, 0x0000,
+ -1, 0x0051, 0x00EF,
+ -1, 0x0052, 0x0000,
+ -1, 0x0053, 0x013F,
+ -1, 0x0060, 0x2700,
+ -1, 0x0061, 0x0001,
+ -1, 0x006A, 0x0000,
+ -1, 0x0080, 0x0000,
+ -1, 0x0081, 0x0000,
+ -1, 0x0082, 0x0000,
+ -1, 0x0083, 0x0000,
+ -1, 0x0084, 0x0000,
+ -1, 0x0085, 0x0000,
+ -1, 0x0090, 0x0010,
+ -1, 0x0092, 0x0000,
+ -1, 0x0093, 0x0003,
+ -1, 0x0095, 0x0110,
+ -1, 0x0097, 0x0000,
+ -1, 0x0098, 0x0000,
+ -1, 0x0007, 0x0173,
+ -3
+};
+
+static s16 ili9325_init[] = {
+ -1, 0x00E3, 0x3008,
+ -1, 0x00E7, 0x0012,
+ -1, 0x00EF, 0x1231,
+ -1, 0x0001, 0x0100,
+ -1, 0x0002, 0x0700,
+ -1, 0x0003, 0x1030,
+ -1, 0x0004, 0x0000,
+ -1, 0x0008, 0x0207,
+ -1, 0x0009, 0x0000,
+ -1, 0x000A, 0x0000,
+ -1, 0x000C, 0x0000,
+ -1, 0x000D, 0x0000,
+ -1, 0x000F, 0x0000,
+ -1, 0x0010, 0x0000,
+ -1, 0x0011, 0x0007,
+ -1, 0x0012, 0x0000,
+ -1, 0x0013, 0x0000,
+ -2, 200,
+ -1, 0x0010, 0x1690,
+ -1, 0x0011, 0x0223,
+ -2, 50,
+ -1, 0x0012, 0x000D,
+ -2, 50,
+ -1, 0x0013, 0x1200,
+ -1, 0x0029, 0x000A,
+ -1, 0x002B, 0x000C,
+ -2, 50,
+ -1, 0x0020, 0x0000,
+ -1, 0x0021, 0x0000,
+ -1, 0x0030, 0x0000,
+ -1, 0x0031, 0x0506,
+ -1, 0x0032, 0x0104,
+ -1, 0x0035, 0x0207,
+ -1, 0x0036, 0x000F,
+ -1, 0x0037, 0x0306,
+ -1, 0x0038, 0x0102,
+ -1, 0x0039, 0x0707,
+ -1, 0x003C, 0x0702,
+ -1, 0x003D, 0x1604,
+ -1, 0x0050, 0x0000,
+ -1, 0x0051, 0x00EF,
+ -1, 0x0052, 0x0000,
+ -1, 0x0053, 0x013F,
+ -1, 0x0060, 0xA700,
+ -1, 0x0061, 0x0001,
+ -1, 0x006A, 0x0000,
+ -1, 0x0080, 0x0000,
+ -1, 0x0081, 0x0000,
+ -1, 0x0082, 0x0000,
+ -1, 0x0083, 0x0000,
+ -1, 0x0084, 0x0000,
+ -1, 0x0085, 0x0000,
+ -1, 0x0090, 0x0010,
+ -1, 0x0092, 0x0600,
+ -1, 0x0007, 0x0133,
+ -3
+};
+
+static s16 ili9341_init[] = {
+ -1, 0x28,
+ -2, 20,
+ -1, 0xCF, 0x00, 0x83, 0x30,
+ -1, 0xED, 0x64, 0x03, 0x12, 0x81,
+ -1, 0xE8, 0x85, 0x01, 0x79,
+ -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
+ -1, 0xF7, 0x20,
+ -1, 0xEA, 0x00, 0x00,
+ -1, 0xC0, 0x26,
+ -1, 0xC1, 0x11,
+ -1, 0xC5, 0x35, 0x3E,
+ -1, 0xC7, 0xBE,
+ -1, 0xB1, 0x00, 0x1B,
+ -1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
+ -1, 0xB7, 0x07,
+ -1, 0x3A, 0x55,
+ -1, 0x36, 0x48,
+ -1, 0x11,
+ -2, 120,
+ -1, 0x29,
+ -2, 20,
+ -3
+};
+
+static s16 ssd1351_init[] = {
+ -1, 0xfd, 0x12,
+ -1, 0xfd, 0xb1,
+ -1, 0xae,
+ -1, 0xb3, 0xf1,
+ -1, 0xca, 0x7f,
+ -1, 0xa0, 0x74,
+ -1, 0x15, 0x00, 0x7f,
+ -1, 0x75, 0x00, 0x7f,
+ -1, 0xa1, 0x00,
+ -1, 0xa2, 0x00,
+ -1, 0xb5, 0x00,
+ -1, 0xab, 0x01,
+ -1, 0xb1, 0x32,
+ -1, 0xb4, 0xa0, 0xb5, 0x55,
+ -1, 0xbb, 0x17,
+ -1, 0xbe, 0x05,
+ -1, 0xc1, 0xc8, 0x80, 0xc8,
+ -1, 0xc7, 0x0f,
+ -1, 0xb6, 0x01,
+ -1, 0xa6,
+ -1, 0xaf,
+ -3
+};
/**
* struct flexfb_lcd_controller - Describes the LCD controller properties
@@ -142,7 +390,7 @@ struct flexfb_lcd_controller {
unsigned int height;
unsigned int setaddrwin;
unsigned int regwidth;
- int *init_seq;
+ s16 *init_seq;
int init_seq_sz;
};
@@ -582,6 +830,7 @@ static const struct platform_device_id flexfb_platform_ids[] = {
{ "flexpfb", 0 },
{ },
};
+MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
static struct platform_driver flexfb_platform_driver = {
.driver = {
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 1f959339c671..5c009ab48f00 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -1,25 +1,17 @@
#
-# Freescale Management Complex (MC) bus drivers
+# DPAA2 fsl-mc bus
#
-# Copyright (C) 2014 Freescale Semiconductor, Inc.
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
#
# This file is released under the GPLv2
#
config FSL_MC_BUS
- bool "Freescale Management Complex (MC) bus driver"
- depends on OF && ARM64
+ bool "QorIQ DPAA2 fsl-mc bus driver"
+ depends on OF && ARCH_LAYERSCAPE
select GENERIC_MSI_IRQ_DOMAIN
help
- Driver to enable the bus infrastructure for the Freescale
- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
- module of the QorIQ LS2 SoCs, that does resource management
- for hardware building-blocks in the SoC that can be used
- to dynamically create networking hardware objects such as
- network interfaces (NICs), crypto accelerator instances,
- or L2 switches.
-
- Only enable this option when building the kernel for
- Freescale QorQIQ LS2xxxx SoCs.
-
-
+ Driver to enable the bus infrastructure for the QorIQ DPAA2
+ architecture. The fsl-mc bus driver handles discovery of
+ DPAA2 objects (which are represented as Linux devices) and
+ binding objects to drivers.
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
index 2860411ddb51..7d86539b5414 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -33,37 +33,48 @@
#define _FSL_DPBP_CMD_H
/* DPBP Version */
-#define DPBP_VER_MAJOR 2
+#define DPBP_VER_MAJOR 3
#define DPBP_VER_MINOR 2
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
/* Command IDs */
-#define DPBP_CMDID_CLOSE 0x800
-#define DPBP_CMDID_OPEN 0x804
-#define DPBP_CMDID_CREATE 0x904
-#define DPBP_CMDID_DESTROY 0x900
-
-#define DPBP_CMDID_ENABLE 0x002
-#define DPBP_CMDID_DISABLE 0x003
-#define DPBP_CMDID_GET_ATTR 0x004
-#define DPBP_CMDID_RESET 0x005
-#define DPBP_CMDID_IS_ENABLED 0x006
-
-#define DPBP_CMDID_SET_IRQ 0x010
-#define DPBP_CMDID_GET_IRQ 0x011
-#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
-#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
-#define DPBP_CMDID_SET_IRQ_MASK 0x014
-#define DPBP_CMDID_GET_IRQ_MASK 0x015
-#define DPBP_CMDID_GET_IRQ_STATUS 0x016
-#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
-#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
+#define DPBP_CMDID_CREATE DPBP_CMD(0x904)
+#define DPBP_CMDID_DESTROY DPBP_CMD(0x984)
+#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
+
+#define DPBP_CMDID_SET_IRQ DPBP_CMD(0x010)
+#define DPBP_CMDID_GET_IRQ DPBP_CMD(0x011)
+#define DPBP_CMDID_SET_IRQ_ENABLE DPBP_CMD(0x012)
+#define DPBP_CMDID_GET_IRQ_ENABLE DPBP_CMD(0x013)
+#define DPBP_CMDID_SET_IRQ_MASK DPBP_CMD(0x014)
+#define DPBP_CMDID_GET_IRQ_MASK DPBP_CMD(0x015)
+#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
+#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
+
+#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x01b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x01b1)
struct dpbp_cmd_open {
__le32 dpbp_id;
};
+struct dpbp_cmd_destroy {
+ __le32 object_id;
+};
+
#define DPBP_ENABLE 0x1
struct dpbp_rsp_is_enabled {
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 5d4cd812a400..cf4782f6a049 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -32,7 +32,8 @@
#include "../include/mc-sys.h"
#include "../include/mc-cmd.h"
#include "../include/dpbp.h"
-#include "../include/dpbp-cmd.h"
+
+#include "dpbp-cmd.h"
/**
* dpbp_open() - Open a control session for the specified object.
@@ -107,28 +108,26 @@ EXPORT_SYMBOL(dpbp_close);
/**
* dpbp_create() - Create the DPBP object.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
+ * @obj_id: Returned object id; use in subsequent API calls
*
* Create the DPBP object, allocate required resources and
* perform required initialization.
*
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpbp_open function to get an authentication
- * token first.
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
const struct dpbp_cfg *cfg,
- u16 *token)
+ u32 *obj_id)
{
struct mc_command cmd = { 0 };
int err;
@@ -137,7 +136,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
- cmd_flags, 0);
+ cmd_flags, dprc_token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -145,7 +144,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = mc_cmd_hdr_read_token(&cmd);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
@@ -153,20 +152,25 @@ int dpbp_create(struct fsl_mc_io *mc_io,
/**
* dpbp_destroy() - Destroy the DPBP object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
+ * @obj_id: ID of DPBP object
*
* Return: '0' on Success; error code otherwise.
*/
int dpbp_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
- u16 token)
+ u32 obj_id)
{
+ struct dpbp_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
- cmd_flags, token);
+ cmd_flags, dprc_token);
+ cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -609,8 +613,6 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
attr->bpid = le16_to_cpu(rsp_params->bpid);
attr->id = le32_to_cpu(rsp_params->id);
- attr->version.major = le16_to_cpu(rsp_params->version_major);
- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
return 0;
}
@@ -689,3 +691,35 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpbp_get_api_version - Get Data Path Buffer Pool API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Buffer Pool API
+ * @minor_ver: Minor version of Buffer Pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
index 536b2ef13507..d0a5e194c5e1 100644
--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index d098a6d8f6bc..7cb514963c26 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -33,25 +33,32 @@
#define _FSL_DPMCP_CMD_H
/* Minimal supported DPMCP Version */
-#define DPMCP_MIN_VER_MAJOR 3
-#define DPMCP_MIN_VER_MINOR 0
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
+
+/* Command versioning */
+#define DPMCP_CMD_BASE_VERSION 1
+#define DPMCP_CMD_ID_OFFSET 4
+
+#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
/* Command IDs */
-#define DPMCP_CMDID_CLOSE 0x800
-#define DPMCP_CMDID_OPEN 0x80b
-#define DPMCP_CMDID_CREATE 0x90b
-#define DPMCP_CMDID_DESTROY 0x900
-
-#define DPMCP_CMDID_GET_ATTR 0x004
-#define DPMCP_CMDID_RESET 0x005
-
-#define DPMCP_CMDID_SET_IRQ 0x010
-#define DPMCP_CMDID_GET_IRQ 0x011
-#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
-#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
-#define DPMCP_CMDID_SET_IRQ_MASK 0x014
-#define DPMCP_CMDID_GET_IRQ_MASK 0x015
-#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
+#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
+#define DPMCP_CMDID_CREATE DPMCP_CMD(0x90b)
+#define DPMCP_CMDID_DESTROY DPMCP_CMD(0x98b)
+#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
+
+#define DPMCP_CMDID_GET_ATTR DPMCP_CMD(0x004)
+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+#define DPMCP_CMDID_SET_IRQ DPMCP_CMD(0x010)
+#define DPMCP_CMDID_GET_IRQ DPMCP_CMD(0x011)
+#define DPMCP_CMDID_SET_IRQ_ENABLE DPMCP_CMD(0x012)
+#define DPMCP_CMDID_GET_IRQ_ENABLE DPMCP_CMD(0x013)
+#define DPMCP_CMDID_SET_IRQ_MASK DPMCP_CMD(0x014)
+#define DPMCP_CMDID_GET_IRQ_MASK DPMCP_CMD(0x015)
+#define DPMCP_CMDID_GET_IRQ_STATUS DPMCP_CMD(0x016)
struct dpmcp_cmd_open {
__le32 dpmcp_id;
@@ -61,6 +68,10 @@ struct dpmcp_cmd_create {
__le32 portal_id;
};
+struct dpmcp_cmd_destroy {
+ __le32 object_id;
+};
+
struct dpmcp_cmd_set_irq {
/* cmd word 0 */
u8 irq_index;
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index 55766f78a528..e4d16519bcb4 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -106,28 +106,29 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
/**
* dpmcp_create() - Create the DPMCP object.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
+ * @obj_id: Returned object id; use in subsequent API calls
*
* Create the DPMCP object, allocate required resources and
* perform required initialization.
*
* The object can be created either by declaring it in the
* DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpmcp_open function to get an authentication
- * token first.
+
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpmcp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
const struct dpmcp_cfg *cfg,
- u16 *token)
+ u32 *obj_id)
{
struct mc_command cmd = { 0 };
struct dpmcp_cmd_create *cmd_params;
@@ -136,7 +137,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
- cmd_flags, 0);
+ cmd_flags, dprc_token);
cmd_params = (struct dpmcp_cmd_create *)cmd.params;
cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
@@ -146,7 +147,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = mc_cmd_hdr_read_token(&cmd);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
@@ -154,20 +155,25 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
/**
* dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
+ * @obj_id: ID of DPMCP object
*
* Return: '0' on Success; error code otherwise.
*/
int dpmcp_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
- u16 token)
+ u32 obj_id)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_destroy *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
- cmd_flags, token);
+ cmd_flags, dprc_token);
+ cmd_params = (struct dpmcp_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -497,8 +503,38 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
- attr->version.major = le16_to_cpu(rsp_params->version_major);
- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * dpmcp_get_api_version - Get Data Path Management Command Portal API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Management Command Portal API
+ * @minor_ver: Minor version of Data Path Management Command Portal API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index fe79d4d9293d..98a100d543f6 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -32,23 +32,24 @@
#ifndef __FSL_DPMCP_H
#define __FSL_DPMCP_H
-/* Data Path Management Command Portal API
+/*
+ * Data Path Management Command Portal API
* Contains initialization APIs and runtime control APIs for DPMCP
*/
struct fsl_mc_io;
int dpmcp_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
+ u32 cmd_flags,
int dpmcp_id,
- uint16_t *token);
+ u16 *token);
/* Get portal ID from pool */
#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
int dpmcp_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+ u32 cmd_flags,
+ u16 token);
/**
* struct dpmcp_cfg - Structure representing DPMCP configuration
@@ -59,18 +60,20 @@ struct dpmcp_cfg {
int portal_id;
};
-int dpmcp_create(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- const struct dpmcp_cfg *cfg,
- uint16_t *token);
+int dpmcp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpmcp_cfg *cfg,
+ u32 *obj_id);
int dpmcp_destroy(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 obj_id);
int dpmcp_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+ u32 cmd_flags,
+ u16 token);
/* IRQ */
/* IRQ Index */
@@ -85,75 +88,65 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
* @irq_num: A user defined number associated with this IRQ
*/
struct dpmcp_irq_cfg {
- uint64_t paddr;
- uint32_t val;
- int irq_num;
+ u64 paddr;
+ u32 val;
+ int irq_num;
};
-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- struct dpmcp_irq_cfg *irq_cfg);
-
-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- int *type,
- struct dpmcp_irq_cfg *irq_cfg);
-
-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t en);
-
-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t *en);
-
-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t mask);
-
-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *mask);
-
-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *status);
+int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dpmcp_irq_cfg *irq_cfg);
+
+int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ int *type,
+ struct dpmcp_irq_cfg *irq_cfg);
+
+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
/**
* struct dpmcp_attr - Structure representing DPMCP attributes
* @id: DPMCP object ID
- * @version: DPMCP version
*/
struct dpmcp_attr {
int id;
- /**
- * struct version - Structure representing DPMCP version
- * @major: DPMCP major version
- * @minor: DPMCP minor version
- */
- struct {
- uint16_t major;
- uint16_t minor;
- } version;
};
-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpmcp_attr *attr);
+int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmcp_attr *attr);
#endif /* __FSL_DPMCP_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index a7b77d58c8cd..cdddfb80eecc 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -12,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -41,13 +40,14 @@
#ifndef __FSL_DPMNG_CMD_H
#define __FSL_DPMNG_CMD_H
-/* Command IDs */
-#define DPMNG_CMDID_GET_CONT_ID 0x830
-#define DPMNG_CMDID_GET_VERSION 0x831
+/* Command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
-struct dpmng_rsp_get_container_id {
- __le32 container_id;
-};
+#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
struct dpmng_rsp_get_version {
__le32 revision;
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
index 96b1d67756fa..ad5d5bbec529 100644
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ b/drivers/staging/fsl-mc/bus/dpmng.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_io,
}
EXPORT_SYMBOL(mc_get_version);
-/**
- * dpmng_get_container_id() - Get container ID associated with a given portal.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @container_id: Requested container ID
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int *container_id)
-{
- struct mc_command cmd = { 0 };
- struct dpmng_rsp_get_container_id *rsp_params;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
- cmd_flags,
- 0);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
- *container_id = le32_to_cpu(rsp_params->container_id);
-
- return 0;
-}
-
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 009d65673155..588b8cafdbc7 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -12,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -42,48 +41,56 @@
#define _FSL_DPRC_CMD_H
/* Minimal supported DPRC Version */
-#define DPRC_MIN_VER_MAJOR 5
+#define DPRC_MIN_VER_MAJOR 6
#define DPRC_MIN_VER_MINOR 0
+/* Command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
/* Command IDs */
-#define DPRC_CMDID_CLOSE 0x800
-#define DPRC_CMDID_OPEN 0x805
-#define DPRC_CMDID_CREATE 0x905
-
-#define DPRC_CMDID_GET_ATTR 0x004
-#define DPRC_CMDID_RESET_CONT 0x005
-
-#define DPRC_CMDID_SET_IRQ 0x010
-#define DPRC_CMDID_GET_IRQ 0x011
-#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
-#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
-#define DPRC_CMDID_SET_IRQ_MASK 0x014
-#define DPRC_CMDID_GET_IRQ_MASK 0x015
-#define DPRC_CMDID_GET_IRQ_STATUS 0x016
-#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#define DPRC_CMDID_CREATE_CONT 0x151
-#define DPRC_CMDID_DESTROY_CONT 0x152
-#define DPRC_CMDID_SET_RES_QUOTA 0x155
-#define DPRC_CMDID_GET_RES_QUOTA 0x156
-#define DPRC_CMDID_ASSIGN 0x157
-#define DPRC_CMDID_UNASSIGN 0x158
-#define DPRC_CMDID_GET_OBJ_COUNT 0x159
-#define DPRC_CMDID_GET_OBJ 0x15A
-#define DPRC_CMDID_GET_RES_COUNT 0x15B
-#define DPRC_CMDID_GET_RES_IDS 0x15C
-#define DPRC_CMDID_GET_OBJ_REG 0x15E
-#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
-#define DPRC_CMDID_GET_OBJ_IRQ 0x160
-#define DPRC_CMDID_SET_OBJ_LABEL 0x161
-#define DPRC_CMDID_GET_OBJ_DESC 0x162
-
-#define DPRC_CMDID_CONNECT 0x167
-#define DPRC_CMDID_DISCONNECT 0x168
-#define DPRC_CMDID_GET_POOL 0x169
-#define DPRC_CMDID_GET_POOL_COUNT 0x16A
-
-#define DPRC_CMDID_GET_CONNECTION 0x16C
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_CREATE DPRC_CMD(0x905)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_CREATE_CONT DPRC_CMD(0x151)
+#define DPRC_CMDID_DESTROY_CONT DPRC_CMD(0x152)
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_SET_RES_QUOTA DPRC_CMD(0x155)
+#define DPRC_CMDID_GET_RES_QUOTA DPRC_CMD(0x156)
+#define DPRC_CMDID_ASSIGN DPRC_CMD(0x157)
+#define DPRC_CMDID_UNASSIGN DPRC_CMD(0x158)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
+#define DPRC_CMDID_GET_RES_IDS DPRC_CMD(0x15C)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
+#define DPRC_CMDID_SET_OBJ_LABEL DPRC_CMD(0x161)
+#define DPRC_CMDID_GET_OBJ_DESC DPRC_CMD(0x162)
+
+#define DPRC_CMDID_CONNECT DPRC_CMD(0x167)
+#define DPRC_CMDID_DISCONNECT DPRC_CMD(0x168)
+#define DPRC_CMDID_GET_POOL DPRC_CMD(0x169)
+#define DPRC_CMDID_GET_POOL_COUNT DPRC_CMD(0x16A)
+
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
struct dprc_cmd_open {
__le32 container_id;
@@ -199,9 +206,6 @@ struct dprc_rsp_get_attributes {
/* response word 1 */
__le32 options;
__le32 portal_id;
- /* response word 2 */
- __le16 version_major;
- __le16 version_minor;
};
struct dprc_cmd_set_res_quota {
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index c5ee4639682b..4e416d89b736 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -1,7 +1,7 @@
/*
* Freescale data path resource container (DPRC) driver
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -505,7 +505,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
dprc_irq0_handler,
dprc_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
- "FSL MC DPRC irq0",
+ dev_name(&mc_dev->dev),
&mc_dev->dev);
if (error < 0) {
dev_err(&mc_dev->dev,
@@ -597,6 +597,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
bool mc_io_created = false;
bool msi_domain_set = false;
+ u16 major_ver, minor_ver;
if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
return -EINVAL;
@@ -669,13 +670,21 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_open;
}
- if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
- (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
- mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
+ error = dprc_get_api_version(mc_dev->mc_io, 0,
+ &major_ver,
+ &minor_ver);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (major_ver < DPRC_MIN_VER_MAJOR ||
+ (major_ver == DPRC_MIN_VER_MAJOR &&
+ minor_ver < DPRC_MIN_VER_MINOR)) {
dev_err(&mc_dev->dev,
"ERROR: DPRC version %d.%d not supported\n",
- mc_bus->dprc_attr.version.major,
- mc_bus->dprc_attr.version.minor);
+ major_ver, minor_ver);
error = -ENOTSUPP;
goto error_cleanup_open;
}
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 9fea3def6041..572edd4c066e 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -565,8 +565,6 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
attr->icid = le16_to_cpu(rsp_params->icid);
attr->options = le32_to_cpu(rsp_params->options);
attr->portal_id = le32_to_cpu(rsp_params->portal_id);
- attr->version.major = le16_to_cpu(rsp_params->version_major);
- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
return 0;
}
@@ -1386,3 +1384,66 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dprc_get_api_version - Get Data Path Resource Container API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Resource Container API
+ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dprc_get_container_id - Get container ID associated with a given portal.
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index e93ab53bae67..ce07096c3b1f 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -1,7 +1,7 @@
/*
- * Freescale MC object device allocator driver
+ * fsl-mc object allocator driver
*
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -12,9 +12,9 @@
#include <linux/msi.h>
#include "../include/mc-bus.h"
#include "../include/mc-sys.h"
-#include "../include/dpbp-cmd.h"
-#include "../include/dpcon-cmd.h"
+#include "dpbp-cmd.h"
+#include "dpcon-cmd.h"
#include "fsl-mc-private.h"
#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
@@ -23,15 +23,12 @@
strcmp(_obj_type, "dpcon") == 0)
/**
- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
- * pool of a given MC bus
+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
+ * pool of a given fsl-mc bus
*
- * @mc_bus: pointer to the MC bus
- * @pool_type: MC bus pool type
- * @mc_dev: Pointer to allocatable MC object device
- *
- * It adds an allocatable MC object device to a container's resource pool of
- * the given resource type
+ * @mc_bus: pointer to the fsl-mc bus
+ * @pool_type: pool type
+ * @mc_dev: pointer to allocatable fsl-mc device
*/
static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
*mc_bus,
@@ -95,10 +92,10 @@ out:
* fsl_mc_resource_pool_remove_device - remove an allocatable device from a
* resource pool
*
- * @mc_dev: Pointer to allocatable MC object device
+ * @mc_dev: pointer to allocatable fsl-mc device
*
- * It permanently removes an allocatable MC object device from the resource
- * pool, the device is currently in, as long as it is in the pool's free list.
+ * It permanently removes an allocatable fsl-mc device from the resource
+ * pool. It's an error if the device is in use.
*/
static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
*mc_dev)
@@ -255,17 +252,18 @@ out_unlock:
EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
/**
- * fsl_mc_object_allocate - Allocates a MC object device of the given
- * pool type from a given MC bus
+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
+ * pool type from a given fsl-mc bus instance
*
- * @mc_dev: MC device for which the MC object device is to be allocated
- * @pool_type: MC bus resource pool type
- * @new_mc_dev: Pointer to area where the pointer to the allocated
- * MC object device is to be returned
+ * @mc_dev: fsl-mc device which is used in conjunction with the
+ * allocated object
+ * @pool_type: pool type
+ * @new_mc_dev: pointer to area where the pointer to the allocated device
+ * is to be returned
*
- * This function allocates a MC object device from the device's parent DPRC,
- * from the corresponding MC bus' pool of allocatable MC object devices of
- * the given resource type. mc_dev cannot be a DPRC itself.
+ * Allocatable objects are always used in conjunction with some functional
+ * device. This function allocates an object of the specified type from
+ * the DPRC containing the functional device.
*
* NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
* portals are allocated using fsl_mc_portal_allocate(), instead of
@@ -312,10 +310,9 @@ error:
EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
/**
- * fsl_mc_object_free - Returns an allocatable MC object device to the
- * corresponding resource pool of a given MC bus.
- *
- * @mc_adev: Pointer to the MC object device
+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
+ * pool where it came from.
+ * @mc_adev: Pointer to the fsl-mc device
*/
void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
{
@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
/*
- * Initialize the interrupt pool associated with a MC bus.
- * It allocates a block of IRQs from the GIC-ITS
+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
+ * ID. A block of IRQs is pre-allocated and maintained in a pool
+ * from which devices can allocate them when needed.
+ */
+
+/*
+ * Initialize the interrupt pool associated with an fsl-mc bus.
+ * It allocates a block of IRQs from the GIC-ITS.
*/
int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
unsigned int irq_count)
@@ -395,7 +398,7 @@ cleanup_msi_irqs:
EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
/**
- * Teardown the interrupt pool associated with an MC bus.
+ * Teardown the interrupt pool associated with an fsl-mc bus.
* It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
*/
void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
/**
- * It allocates the IRQs required by a given MC object device. The
- * IRQs are allocated from the interrupt pool associated with the
- * MC bus that contains the device, if the device is not a DPRC device.
- * Otherwise, the IRQs are allocated from the interrupt pool associated
- * with the MC bus that represents the DPRC device itself.
+ * Allocate the IRQs required by a given fsl-mc device.
*/
int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
{
@@ -495,8 +494,7 @@ error_resource_alloc:
EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
/*
- * It frees the IRQs that were allocated for a MC object device, by
- * returning them to the corresponding interrupt pool.
+ * Frees the IRQs that were allocated for an fsl-mc device.
*/
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
{
@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
return error;
dev_dbg(&mc_dev->dev,
- "Allocatable MC object device bound to fsl_mc_allocator driver");
+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
return 0;
}
@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
}
dev_dbg(&mc_dev->dev,
- "Allocatable MC object device unbound from fsl_mc_allocator driver");
+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
index 44f64b6f0fc9..5ac373c0c716 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus driver
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -9,6 +9,8 @@
* warranty of any kind, whether express or implied.
*/
+#define pr_fmt(fmt) "fsl-mc: " fmt
+
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -34,7 +36,7 @@ static struct kmem_cache *mc_dev_cache;
/**
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
- * @root_mc_bus_dev: MC object device representing the root DPRC
+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
* @num_translation_ranges: number of entries in addr_translation_ranges
* @translation_ranges: array of bus to system address translation ranges
*/
@@ -62,8 +64,8 @@ struct fsl_mc_addr_translation_range {
/**
* fsl_mc_bus_match - device to driver matching callback
- * @dev: the MC object device structure to match against
- * @drv: the device driver to search for matching MC object device id
+ * @dev: the fsl-mc device to match against
+ * @drv: the device driver to search for matching fsl-mc object type
* structures
*
* Returns 1 on success, 0 otherwise.
@@ -91,7 +93,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
/*
* Traverse the match_id table of the given driver, trying to find
- * a matching for the given MC object device.
+ * a matching for the given device.
*/
for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
if (id->vendor == mc_dev->obj_desc.vendor &&
@@ -164,8 +166,7 @@ static int fsl_mc_driver_probe(struct device *dev)
error = mc_drv->probe(mc_dev);
if (error < 0) {
- dev_err(dev, "MC object device probe callback failed: %d\n",
- error);
+ dev_err(dev, "%s failed: %d\n", __func__, error);
return error;
}
@@ -183,9 +184,7 @@ static int fsl_mc_driver_remove(struct device *dev)
error = mc_drv->remove(mc_dev);
if (error < 0) {
- dev_err(dev,
- "MC object device remove callback failed: %d\n",
- error);
+ dev_err(dev, "%s failed: %d\n", __func__, error);
return error;
}
@@ -232,8 +231,6 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
return error;
}
- pr_info("MC object device driver %s registered\n",
- mc_driver->driver.name);
return 0;
}
EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
@@ -315,21 +312,6 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
return error;
}
-static int get_dprc_version(struct fsl_mc_io *mc_io,
- int container_id, u16 *major, u16 *minor)
-{
- struct dprc_attributes attr;
- int error;
-
- error = get_dprc_attr(mc_io, container_id, &attr);
- if (error == 0) {
- *major = attr.version.major;
- *minor = attr.version.minor;
- }
-
- return error;
-}
-
static int translate_mc_addr(struct fsl_mc_device *mc_dev,
enum dprc_region_type mc_region_type,
u64 mc_offset, phys_addr_t *phys_addr)
@@ -452,7 +434,7 @@ bool fsl_mc_is_root_dprc(struct device *dev)
}
/**
- * Add a newly discovered MC object device to be visible in Linux
+ * Add a newly discovered fsl-mc device to be visible in Linux
*/
int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
struct fsl_mc_io *mc_io,
@@ -533,8 +515,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
goto error_cleanup_dev;
} else {
/*
- * A non-DPRC MC object device has to be a child of another
- * MC object (specifically a DPRC object)
+ * A non-DPRC object has to be a child of a DPRC, use the
+ * parent's ICID and interrupt domain.
*/
mc_dev->icid = parent_mc_dev->icid;
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
@@ -572,8 +554,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
}
(void)get_device(&mc_dev->dev);
- dev_dbg(parent_dev, "Added MC object device %s\n",
- dev_name(&mc_dev->dev));
+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
*new_mc_dev = mc_dev;
return 0;
@@ -590,10 +571,10 @@ error_cleanup_dev:
EXPORT_SYMBOL_GPL(fsl_mc_device_add);
/**
- * fsl_mc_device_remove - Remove a MC object device from being visible to
+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
* Linux
*
- * @mc_dev: Pointer to a MC object device object
+ * @mc_dev: Pointer to an fsl-mc device
*/
void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
{
@@ -749,8 +730,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
struct mc_version mc_version;
struct resource res;
- dev_info(&pdev->dev, "Root MC bus device probed");
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
@@ -783,8 +762,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
}
- dev_info(&pdev->dev,
- "Freescale Management Complex Firmware version: %u.%u.%u\n",
+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
error = get_mc_addr_translation_ranges(&pdev->dev,
@@ -793,7 +771,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
if (error < 0)
goto error_cleanup_mc_io;
- error = dpmng_get_container_id(mc_io, 0, &container_id);
+ error = dprc_get_container_id(mc_io, 0, &container_id);
if (error < 0) {
dev_err(&pdev->dev,
"dpmng_get_container_id() failed: %d\n", error);
@@ -801,8 +779,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
}
memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
- error = get_dprc_version(mc_io, container_id,
- &obj_desc.ver_major, &obj_desc.ver_minor);
+ error = dprc_get_api_version(mc_io, 0,
+ &obj_desc.ver_major,
+ &obj_desc.ver_minor);
if (error < 0)
goto error_cleanup_mc_io;
@@ -840,7 +819,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
mc->root_mc_bus_dev->mc_io = NULL;
- dev_info(&pdev->dev, "Root MC bus device removed");
return 0;
}
@@ -875,12 +853,10 @@ static int __init fsl_mc_bus_driver_init(void)
error = bus_register(&fsl_mc_bus_type);
if (error < 0) {
- pr_err("fsl-mc bus type registration failed: %d\n", error);
+ pr_err("bus type registration failed: %d\n", error);
goto error_cleanup_cache;
}
- pr_info("fsl-mc bus type registered\n");
-
error = platform_driver_register(&fsl_mc_bus_driver);
if (error < 0) {
pr_err("platform_driver_register() failed: %d\n", error);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 3d46b1b1fa18..7975c6e6fee3 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus driver MSI support
*
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
index d459c2673f39..5c49c9d2df6a 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
@@ -10,6 +10,9 @@
#ifndef _FSL_MC_PRIVATE_H_
#define _FSL_MC_PRIVATE_H_
+#include "../include/mc.h"
+#include "../include/mc-bus.h"
+
int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
struct fsl_mc_io *mc_io,
struct device *parent_dev,
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 7a6ac640752f..6b1cd574644f 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus driver MSI support
*
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -19,7 +19,7 @@
#include "../include/mc-bus.h"
static struct irq_chip its_msi_irq_chip = {
- .name = "fsl-mc-bus-msi",
+ .name = "ITS-fMSI",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
index 798c965fe203..d66b87f0903b 100644
--- a/drivers/staging/fsl-mc/bus/mc-io.c
+++ b/drivers/staging/fsl-mc/bus/mc-io.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 285917c7c8e4..4d82802b384d 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* I/O services to send MC commands to the MC hardware
*
@@ -13,7 +14,6 @@
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
- *
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 cmd_id = le16_to_cpu(hdr->cmd_id);
- return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
+ return cmd_id;
}
static int mc_status_to_error(enum mc_cmd_status status)
@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
if (time_after_eq(jiffies, jiffies_until_timeout)) {
dev_dbg(mc_io->dev,
- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
if (timeout_usecs == 0) {
dev_dbg(mc_io->dev,
- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
if (status != MC_CMD_STATUS_OK) {
dev_dbg(mc_io->dev,
- "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
+ "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd),
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index e14e85a5d6df..bf34b1e0e730 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
#ifndef __FSL_DPBP_H
#define __FSL_DPBP_H
-/* Data Path Buffer Pool API
+/*
+ * Data Path Buffer Pool API
* Contains initialization APIs and runtime control APIs for DPBP
*/
@@ -44,8 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io,
u16 *token);
int dpbp_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
+ u32 cmd_flags,
+ u16 token);
/**
* struct dpbp_cfg - Structure representing DPBP configuration
@@ -55,14 +57,16 @@ struct dpbp_cfg {
u32 options;
};
-int dpbp_create(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- const struct dpbp_cfg *cfg,
- u16 *token);
+int dpbp_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpbp_cfg *cfg,
+ u32 *obj_id);
int dpbp_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
u32 cmd_flags,
- u16 token);
+ u32 obj_id);
int dpbp_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -88,85 +92,75 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
* @irq_num: A user defined number associated with this IRQ
*/
struct dpbp_irq_cfg {
- u64 addr;
- u32 val;
- int irq_num;
+ u64 addr;
+ u32 val;
+ int irq_num;
};
-int dpbp_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dpbp_irq_cfg *irq_cfg);
-
-int dpbp_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dpbp_irq_cfg *irq_cfg);
-
-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
+int dpbp_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dpbp_irq_cfg *irq_cfg);
+
+int dpbp_get_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ int *type,
+ struct dpbp_irq_cfg *irq_cfg);
+
+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
/**
* struct dpbp_attr - Structure representing DPBP attributes
* @id: DPBP object ID
- * @version: DPBP version
* @bpid: Hardware buffer pool ID; should be used as an argument in
* acquire/release operations on buffers
*/
struct dpbp_attr {
int id;
- /**
- * struct version - Structure representing DPBP version
- * @major: DPBP major version
- * @minor: DPBP minor version
- */
- struct {
- u16 major;
- u16 minor;
- } version;
u16 bpid;
};
-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_attr *attr);
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr);
/**
* DPBP notifications options
@@ -196,24 +190,29 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
* @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
*/
struct dpbp_notification_cfg {
- u32 depletion_entry;
- u32 depletion_exit;
- u32 surplus_entry;
- u32 surplus_exit;
- u64 message_iova;
- u64 message_ctx;
- u16 options;
+ u32 depletion_entry;
+ u32 depletion_exit;
+ u32 surplus_entry;
+ u32 surplus_exit;
+ u64 message_iova;
+ u64 message_ctx;
+ u16 options;
};
-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_notification_cfg *cfg);
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_notification_cfg *cfg);
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
/** @} */
diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
index e5cfd017f9a5..7d8e255da578 100644
--- a/drivers/staging/fsl-mc/include/dpmng.h
+++ b/drivers/staging/fsl-mc/include/dpmng.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
#ifndef __FSL_DPMNG_H
#define __FSL_DPMNG_H
-/* Management Complex General API
+/*
+ * Management Complex General API
* Contains general API for the Management Complex firmware
*/
@@ -58,12 +60,12 @@ struct mc_version {
u32 revision;
};
-int mc_get_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- struct mc_version *mc_ver_info);
+int mc_get_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ struct mc_version *mc_ver_info);
-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int *container_id);
+int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
#endif /* __FSL_DPMNG_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 593b2bbe7f71..f9ea769ccfab 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,7 +35,8 @@
#include "mc-cmd.h"
-/* Data Path Resource Container API
+/*
+ * Data Path Resource Container API
* Contains DPRC API for managing and querying DPAA resources
*/
@@ -70,12 +72,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
* and can be retrieved using dprc_get_attributes()
*/
-/* Spawn Policy Option allowed - Indicates that the new container is allowed
+/*
+ * Spawn Policy Option allowed - Indicates that the new container is allowed
* to spawn and have its own child containers.
*/
#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
-/* General Container allocation policy - Indicates that the new container is
+/*
+ * General Container allocation policy - Indicates that the new container is
* allowed to allocate requested resources from its parent container; if not
* set, the container is only allowed to use resources in its own pools; Note
* that this is a container's global policy, but the parent container may
@@ -83,12 +87,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
*/
#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
-/* Object initialization allowed - software context associated with this
+/*
+ * Object initialization allowed - software context associated with this
* container is allowed to invoke object initialization operations.
*/
#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
-/* Topology change allowed - software context associated with this
+/*
+ * Topology change allowed - software context associated with this
* container is allowed to invoke topology operations, such as attach/detach
* of network objects.
*/
@@ -116,17 +122,17 @@ struct dprc_cfg {
char label[16];
};
-int dprc_create_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_cfg *cfg,
- int *child_container_id,
- u64 *child_portal_offset);
+int dprc_create_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_cfg *cfg,
+ int *child_container_id,
+ u64 *child_portal_offset);
-int dprc_destroy_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id);
+int dprc_destroy_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id);
int dprc_reset_container(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -139,7 +145,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
#define DPRC_IRQ_INDEX 0
/* Number of dprc's IRQs */
-#define DPRC_NUM_OF_IRQS 1
+#define DPRC_NUM_OF_IRQS 1
/* DPRC IRQ events */
@@ -151,12 +157,14 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
/* IRQ event - Indicates that resources removed from the container */
#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
-/* IRQ event - Indicates that one of the descendant containers that opened by
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
* this container is destroyed
*/
#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
-/* IRQ event - Indicates that on one of the container's opened object is
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
* destroyed
*/
#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
@@ -171,59 +179,59 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
* @irq_num: A user defined number associated with this IRQ
*/
struct dprc_irq_cfg {
- phys_addr_t paddr;
- u32 val;
- int irq_num;
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
};
-int dprc_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
-
-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
-
-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_get_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ int *type,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
/**
* struct dprc_attributes - Container attributes
@@ -231,63 +239,56 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
* @icid: Container's ICID
* @portal_id: Container's portal ID
* @options: Container's options as set at container's creation
- * @version: DPRC version
*/
struct dprc_attributes {
int container_id;
u16 icid;
int portal_id;
u64 options;
- /**
- * struct version - DPRC version
- * @major: DPRC major version
- * @minor: DPRC minor version
- */
- struct {
- u16 major;
- u16 minor;
- } version;
};
-int dprc_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_attributes *attributes);
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
int dprc_set_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 quota);
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ char *type,
+ u16 quota);
int dprc_get_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 *quota);
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ char *type,
+ u16 *quota);
/* Resource request options */
-/* Explicit resource ID request - The requested objects/resources
+/*
+ * Explicit resource ID request - The requested objects/resources
* are explicit and sequential (in case of resources).
* The base ID is given at res_req at base_align field
*/
-#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
+#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
-/* Aligned resources request - Relevant only for resources
+/*
+ * Aligned resources request - Relevant only for resources
* request (and not objects). Indicates that resources base ID should be
* sequential and aligned to the value given at dprc_res_req base_align field
*/
-#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
+#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
-/* Plugged Flag - Relevant only for object assignment request.
+/*
+ * Plugged Flag - Relevant only for object assignment request.
* Indicates that after all objects assigned. An interrupt will be invoked at
* the relevant GPP. The assigned object will be marked as plugged.
* plugged objects can't be assigned from their container
*/
-#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
+#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
/**
* struct dprc_res_req - Resource request descriptor, to be used in assignment
@@ -312,33 +313,33 @@ struct dprc_res_req {
int id_base_align;
};
-int dprc_assign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int container_id,
- struct dprc_res_req *res_req);
-
-int dprc_unassign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- struct dprc_res_req *res_req);
-
-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *pool_count);
-
-int dprc_get_pool(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int pool_index,
- char *type);
+int dprc_assign(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int container_id,
+ struct dprc_res_req *res_req);
+
+int dprc_unassign(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ struct dprc_res_req *res_req);
+
+int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *pool_count);
+
+int dprc_get_pool(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int pool_index,
+ char *type);
int dprc_get_obj_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *obj_count);
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
/* Objects Attributes Flags */
@@ -353,7 +354,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
* masters;
* user is responsible for proper memory handling through IOMMU configuration.
*/
-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
/**
* struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
@@ -381,41 +382,41 @@ struct dprc_obj_desc {
u16 flags;
};
-int dprc_get_obj(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int obj_index,
- struct dprc_obj_desc *obj_desc);
-
-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- struct dprc_obj_desc *obj_desc);
-
-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_res_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- int *res_count);
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct dprc_obj_desc *obj_desc);
+
+int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ struct dprc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ int *type,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_get_res_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *type,
+ int *res_count);
/**
* enum dprc_iter_status - Iteration status
@@ -444,11 +445,11 @@ struct dprc_res_ids_range_desc {
enum dprc_iter_status iter_status;
};
-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- struct dprc_res_ids_range_desc *range_desc);
+int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *type,
+ struct dprc_res_ids_range_desc *range_desc);
/* Region flags */
/* Cacheable - Indicates that region should be mapped as cacheable */
@@ -481,20 +482,20 @@ struct dprc_region_desc {
enum dprc_region_type type;
};
-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 region_index,
- struct dprc_region_desc *region_desc);
-
-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- char *label);
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ char *label);
/**
* struct dprc_endpoint - Endpoint description for link connect/disconnect
@@ -521,24 +522,33 @@ struct dprc_connection_cfg {
u32 max_rate;
};
-int dprc_connect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- const struct dprc_endpoint *endpoint2,
+int dprc_connect(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ const struct dprc_endpoint *endpoint2,
const struct dprc_connection_cfg *cfg);
-int dprc_disconnect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint);
-
-int dprc_get_connection(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- struct dprc_endpoint *endpoint2,
- int *state);
+int dprc_disconnect(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint);
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
#endif /* _FSL_DPRC_H */
diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h
index 170684a57ca2..42700de94d59 100644
--- a/drivers/staging/fsl-mc/include/mc-bus.h
+++ b/drivers/staging/fsl-mc/include/mc-bus.h
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus declarations
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -42,8 +42,8 @@ struct msi_domain_info;
*/
struct fsl_mc_resource_pool {
enum fsl_mc_pool_type type;
- int16_t max_count;
- int16_t free_count;
+ int max_count;
+ int free_count;
struct mutex mutex; /* serializes access to free_list */
struct list_head free_list;
struct fsl_mc_bus *mc_bus;
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
index 5decb9890c31..2e08aa31b084 100644
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ b/drivers/staging/fsl-mc/include/mc-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -48,6 +49,15 @@ struct mc_command {
u64 params[MC_CMD_NUM_OF_PARAMS];
};
+struct mc_rsp_create {
+ __le32 object_id;
+};
+
+struct mc_rsp_api_ver {
+ __le16 major_ver;
+ __le16 minor_ver;
+};
+
enum mc_cmd_status {
MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
@@ -72,11 +82,6 @@ enum mc_cmd_status {
/* Command completion flag */
#define MC_CMD_FLAG_INTR_DIS 0x01
-#define MC_CMD_HDR_CMDID_MASK 0xFFF0
-#define MC_CMD_HDR_CMDID_SHIFT 4
-#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
-#define MC_CMD_HDR_TOKEN_SHIFT 6
-
static inline u64 mc_encode_cmd_header(u16 cmd_id,
u32 cmd_flags,
u16 token)
@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u16 cmd_id,
u64 header = 0;
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
- hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
- MC_CMD_HDR_CMDID_MASK);
- hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
- MC_CMD_HDR_TOKEN_MASK);
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
hdr->status = MC_CMD_STATUS_READY;
if (cmd_flags & MC_CMD_FLAG_PRI)
hdr->flags_hw = MC_CMD_FLAG_PRI;
@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 token = le16_to_cpu(hdr->token);
- return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
+ return token;
+}
+
+static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
+
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline void mc_cmd_read_api_version(struct mc_command *cmd,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_rsp_api_ver *rsp_params;
+
+ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
+ *major_ver = le16_to_cpu(rsp_params->major_ver);
+ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
}
#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
index 89ad0cf54702..dca7f9084e05 100644
--- a/drivers/staging/fsl-mc/include/mc-sys.h
+++ b/drivers/staging/fsl-mc/include/mc-sys.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Interface of the I/O services to send MC commands to the MC hardware
*
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index f6e720e84460..1c46c0c2a895 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -1,7 +1,7 @@
/*
* Freescale Management Complex (MC) bus public interface
*
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -81,7 +81,7 @@ enum fsl_mc_pool_type {
*/
struct fsl_mc_resource {
enum fsl_mc_pool_type type;
- int32_t id;
+ s32 id;
void *data;
struct fsl_mc_resource_pool *parent_pool;
struct list_head node;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 49c718b91e55..41a49c8194e5 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -1667,12 +1667,6 @@ static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
}
-static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
{
pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index 88414e5a70cc..7ddeabc0e50a 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -47,15 +47,15 @@ struct phy_dev {
void *priv_dev;
struct net_device *dev[MAX_NIC_TYPE];
int (*send_hci_func)(void *priv_dev, void *data, int len,
- void (*cb)(void *cb_data), void *cb_data);
+ void (*cb)(void *cb_data), void *cb_data);
int (*send_sdu_func)(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
- void (*cb)(void *cb_data), void *cb_data,
- int dev_idx, int nic_type);
+ unsigned int dftEpsId, unsigned int epsId,
+ void (*cb)(void *cb_data), void *cb_data,
+ int dev_idx, int nic_type);
int (*rcv_func)(void *priv_dev,
- int (*cb)(void *cb_data, void *data, int len,
- int context),
- void *cb_data, int context);
+ int (*cb)(void *cb_data, void *data, int len,
+ int context),
+ void *cb_data, int context);
struct gdm_endian * (*get_endian)(void *priv_dev);
};
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
index 297438b4ddcb..195c5902989f 100644
--- a/drivers/staging/gdm724x/gdm_tty.h
+++ b/drivers/staging/gdm724x/gdm_tty.h
@@ -17,7 +17,6 @@
#include <linux/types.h>
#include <linux/tty.h>
-
#define TTY_MAX_COUNT 2
#define MAX_ISSUE_NUM 3
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
index 7cf979b3f826..5ebd73157f5a 100644
--- a/drivers/staging/gdm724x/netlink_k.h
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -18,7 +18,8 @@
#include <net/sock.h>
struct sock *netlink_init(int unit,
- void (*cb)(struct net_device *dev, u16 type, void *msg, int len));
+ void (*cb)(struct net_device *dev,
+ u16 type, void *msg, int len));
int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
#endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 70323aa11f24..3fda0cd6bb42 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -183,7 +183,7 @@ static int standby_boot_seq(struct platform_device *pdev)
* Pasted from WDM spec,
* - A falling edge on POWEROFF_L is detected (a)
* - WDM enters standby mode, but no output signals are changed
- * */
+ */
/* TODO: POWEROFF_L is input to WDM module */
apb->state = ARCHE_PLATFORM_STATE_STANDBY;
@@ -285,8 +285,10 @@ static ssize_t state_store(struct device *dev,
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return count;
- /* First we want to make sure we power off everything
- * and then enter FW flashing state */
+ /*
+ * First we want to make sure we power off everything
+ * and then enter FW flashing state
+ */
poweroff_seq(pdev);
ret = fw_flashing_seq(pdev);
} else {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index d33d6fe078ad..338c2d3ee842 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -457,7 +457,8 @@ retry:
goto exit;
/* First we want to make sure we power off everything
- * and then activate back again */
+ * and then activate back again
+ */
device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
arche_platform_poweroff_seq(arche_pdata);
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 8a0744b58a32..f8862c6d7102 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -405,7 +405,6 @@ static void gbcodec_shutdown(struct snd_pcm_substream *substream,
params->state = GBAUDIO_CODEC_SHUTDOWN;
mutex_unlock(&codec->lock);
pm_relax(dai->dev);
- return;
}
static int gbcodec_hw_params(struct snd_pcm_substream *substream,
@@ -655,8 +654,10 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
ret = gb_audio_apbridgea_shutdown_rx(data->connection,
0);
params->state = GBAUDIO_CODEC_STOP;
- } else
+ } else {
ret = -EINVAL;
+ }
+
if (ret)
dev_err_ratelimited(dai->dev,
"%s:Error during %s %s stream:%d\n",
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index ca027bd99ad7..62fd93939a1f 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -158,7 +158,6 @@ struct gbaudio_module_info {
int dev_id; /* check if it should be bundle_id/hd_cport_id */
int vid;
int pid;
- int slot;
int type;
int set_uevent;
char vstr[NAME_SIZE];
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
index c4ca09754a6a..5ab8f5e0ed3f 100644
--- a/drivers/staging/greybus/audio_manager.h
+++ b/drivers/staging/greybus/audio_manager.h
@@ -18,10 +18,9 @@
struct gb_audio_manager_module_descriptor {
char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
- int slot;
int vid;
int pid;
- int cport;
+ int intf_id;
unsigned int ip_devices;
unsigned int op_devices;
};
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index a10e96ad79c1..adc16977452d 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -81,16 +81,6 @@ static ssize_t gb_audio_module_name_show(
static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
__ATTR(name, 0664, gb_audio_module_name_show, NULL);
-static ssize_t gb_audio_module_slot_show(
- struct gb_audio_manager_module *module,
- struct gb_audio_manager_module_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d", module->desc.slot);
-}
-
-static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
- __ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
-
static ssize_t gb_audio_module_vid_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
@@ -111,16 +101,16 @@ static ssize_t gb_audio_module_pid_show(
static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
__ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
-static ssize_t gb_audio_module_cport_show(
+static ssize_t gb_audio_module_intf_id_show(
struct gb_audio_manager_module *module,
struct gb_audio_manager_module_attribute *attr, char *buf)
{
- return sprintf(buf, "%d", module->desc.cport);
+ return sprintf(buf, "%d", module->desc.intf_id);
}
static struct gb_audio_manager_module_attribute
- gb_audio_module_cport_attribute =
- __ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
+ gb_audio_module_intf_id_attribute =
+ __ATTR(intf_id, 0664, gb_audio_module_intf_id_show, NULL);
static ssize_t gb_audio_module_ip_devices_show(
struct gb_audio_manager_module *module,
@@ -146,10 +136,9 @@ static struct gb_audio_manager_module_attribute
static struct attribute *gb_audio_module_default_attrs[] = {
&gb_audio_module_name_attribute.attr,
- &gb_audio_module_slot_attribute.attr,
&gb_audio_module_vid_attribute.attr,
&gb_audio_module_pid_attribute.attr,
- &gb_audio_module_cport_attribute.attr,
+ &gb_audio_module_intf_id_attribute.attr,
&gb_audio_module_ip_devices_attribute.attr,
&gb_audio_module_op_devices_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
@@ -164,29 +153,26 @@ static struct kobj_type gb_audio_module_type = {
static void send_add_uevent(struct gb_audio_manager_module *module)
{
char name_string[128];
- char slot_string[64];
char vid_string[64];
char pid_string[64];
- char cport_string[64];
+ char intf_id_string[64];
char ip_devices_string[64];
char op_devices_string[64];
char *envp[] = {
name_string,
- slot_string,
vid_string,
pid_string,
- cport_string,
+ intf_id_string,
ip_devices_string,
op_devices_string,
NULL
};
snprintf(name_string, 128, "NAME=%s", module->desc.name);
- snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
snprintf(vid_string, 64, "VID=%d", module->desc.vid);
snprintf(pid_string, 64, "PID=%d", module->desc.pid);
- snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
+ snprintf(intf_id_string, 64, "INTF_ID=%d", module->desc.intf_id);
snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
module->desc.ip_devices);
snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
@@ -246,13 +232,12 @@ int gb_audio_manager_module_create(
void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
{
- pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
+ pr_info("audio module #%d name=%s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X\n",
module->id,
module->desc.name,
- module->desc.slot,
module->desc.vid,
module->desc.pid,
- module->desc.cport,
+ module->desc.intf_id,
module->desc.ip_devices,
module->desc.op_devices);
}
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
index d8bf8591ff9e..34ebd147052f 100644
--- a/drivers/staging/greybus/audio_manager_sysfs.c
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -20,10 +20,9 @@ static ssize_t manager_sysfs_add_store(
int num = sscanf(buf,
"name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
- "slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
- "o/p devices=0x%X",
- desc.name, &desc.slot, &desc.vid, &desc.pid,
- &desc.cport, &desc.ip_devices, &desc.op_devices);
+ "vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X",
+ desc.name, &desc.vid, &desc.pid, &desc.intf_id,
+ &desc.ip_devices, &desc.op_devices);
if (num != 7)
return -EINVAL;
@@ -44,7 +43,7 @@ static ssize_t manager_sysfs_remove_store(
{
int id;
- int num = sscanf(buf, "%d", &id);
+ int num = kstrtoint(buf, 10, &id);
if (num != 1)
return -EINVAL;
@@ -65,16 +64,17 @@ static ssize_t manager_sysfs_dump_store(
{
int id;
- int num = sscanf(buf, "%d", &id);
+ int num = kstrtoint(buf, 10, &id);
if (num == 1) {
num = gb_audio_manager_dump_module(id);
if (num)
return num;
- } else if (!strncmp("all", buf, 3))
+ } else if (!strncmp("all", buf, 3)) {
gb_audio_manager_dump_all();
- else
+ } else {
return -EINVAL;
+ }
return count;
}
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index ae1c0fa85752..17a9948b1ba1 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -207,10 +207,8 @@ static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
struct gbaudio_data_connection *dai;
dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
- if (!dai) {
- dev_err(gbmodule->dev, "DAI Malloc failure\n");
+ if (!dai)
return -ENOMEM;
- }
connection = gb_connection_create_offloaded(bundle,
le16_to_cpu(cport_desc->id),
@@ -345,10 +343,9 @@ static int gb_audio_probe(struct gb_bundle *bundle,
dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
/* prepare for the audio manager */
strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
- desc.slot = 1; /* todo */
desc.vid = 2; /* todo */
desc.pid = 3; /* todo */
- desc.cport = gbmodule->dev_id;
+ desc.intf_id = gbmodule->dev_id;
desc.op_devices = gbmodule->op_devices;
desc.ip_devices = gbmodule->ip_devices;
gbmodule->manager_id = gb_audio_manager_add(&desc);
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index b6251691a33d..8b216ca99cf9 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -114,6 +114,7 @@ static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
const char *name)
{
struct gbaudio_widget *widget;
+
list_for_each_entry(widget, &module->widget_list, list) {
if (!strncmp(widget->name, name, NAME_SIZE))
return widget->id;
@@ -1044,8 +1045,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
control->items = gbenum->items;
- } else
+ } else {
csize = sizeof(struct gb_audio_control);
+ }
+
*w_size += csize;
curr = (void *)curr + csize;
list_add(&control->list, &module->widget_ctl_list);
@@ -1190,8 +1193,9 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
control->items = gbenum->items;
- } else
+ } else {
csize = sizeof(struct gb_audio_control);
+ }
list_add(&control->list, &module->ctl_list);
dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 491bdd720c0c..0ee291ca2c72 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -289,6 +289,7 @@ static const int gb_camera_configure_streams_validate_response(
for (i = 0; i < resp->num_streams; i++) {
struct gb_camera_stream_config_response *cfg = &resp->config[i];
+
if (cfg->padding) {
gcam_err(gcam, "stream #%u padding != 0\n", i);
return -EIO;
@@ -796,7 +797,7 @@ static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
return -EINVAL;
- gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL);
+ gb_streams = kcalloc(gb_nstreams, sizeof(*gb_streams), GFP_KERNEL);
if (!gb_streams)
return -ENOMEM;
@@ -937,7 +938,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
return ret;
/* For each stream to configure parse width, height and format */
- streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL);
+ streams = kcalloc(nstreams, sizeof(*streams), GFP_KERNEL);
if (!streams)
return -ENOMEM;
@@ -1091,7 +1092,7 @@ static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
const struct gb_camera_debugfs_entry *op = file->private_data;
- struct gb_camera *gcam = file->f_inode->i_private;
+ struct gb_camera *gcam = file_inode(file)->i_private;
struct gb_camera_debugfs_buffer *buffer;
ssize_t ret;
@@ -1113,12 +1114,12 @@ static ssize_t gb_camera_debugfs_write(struct file *file,
loff_t *offset)
{
const struct gb_camera_debugfs_entry *op = file->private_data;
- struct gb_camera *gcam = file->f_inode->i_private;
+ struct gb_camera *gcam = file_inode(file)->i_private;
ssize_t ret;
char *kbuf;
if (len > 1024)
- return -EINVAL;
+ return -EINVAL;
kbuf = kmalloc(len + 1, GFP_KERNEL);
if (!kbuf)
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index baab460eeaa3..c1929dfa9b31 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -175,10 +175,9 @@ static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
u8 *data;
int retval;
- data = kmalloc(size, GFP_KERNEL);
+ data = kmemdup(req, size, GFP_KERNEL);
if (!data)
return -ENOMEM;
- memcpy(data, req, size);
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
cmd,
@@ -1034,7 +1033,7 @@ static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
goto err_free_req;
rpc->req->type = type;
- rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
+ rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
memcpy(rpc->req->data, payload, size);
init_completion(&rpc->response_received);
@@ -1250,7 +1249,7 @@ static int apb_log_poll(void *data)
static ssize_t apb_log_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
- struct es2_ap_dev *es2 = f->f_inode->i_private;
+ struct es2_ap_dev *es2 = file_inode(f)->i_private;
ssize_t ret;
size_t copied;
char *tmp_buf;
@@ -1304,7 +1303,7 @@ static void usb_log_disable(struct es2_ap_dev *es2)
static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
- struct es2_ap_dev *es2 = f->f_inode->i_private;
+ struct es2_ap_dev *es2 = file_inode(f)->i_private;
int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
char tmp_buf[3];
@@ -1317,7 +1316,7 @@ static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
{
int enable;
ssize_t retval;
- struct es2_ap_dev *es2 = f->f_inode->i_private;
+ struct es2_ap_dev *es2 = file_inode(f)->i_private;
retval = kstrtoint_from_user(buf, count, 10, &enable);
if (retval)
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 70dd9e5a1cf2..1a18ab1ff8aa 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -55,8 +55,10 @@ static int gb_log_request_handler(struct gb_operation *op)
/* Ensure the buffer is 0 terminated */
receive->msg[len - 1] = '\0';
- /* Print with dev_dbg() so that it can be easily turned off using
- * dynamic debugging (and prevent any DoS) */
+ /*
+ * Print with dev_dbg() so that it can be easily turned off using
+ * dynamic debugging (and prevent any DoS)
+ */
dev_dbg(dev, "%s", receive->msg);
return 0;
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 5649ef1e379d..66b37ea29ef0 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -191,9 +191,8 @@ static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
state_changed = 1;
}
- if (event & GB_SDIO_WP) {
+ if (event & GB_SDIO_WP)
host->read_only = true;
- }
if (state_changed) {
dev_info(mmc_dev(host->mmc), "card %s now event\n",
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
index 550055ec27a5..8779270cadc1 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/staging/greybus/svc.c
@@ -757,7 +757,7 @@ static int gb_svc_version_request(struct gb_operation *op)
static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
- struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
struct gb_svc *svc = pwrmon_rails->svc;
int ret, desc;
u32 value;
@@ -780,7 +780,7 @@ static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
- struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
struct gb_svc *svc = pwrmon_rails->svc;
int ret, desc;
u32 value;
@@ -803,7 +803,7 @@ static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
- struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
struct gb_svc *svc = pwrmon_rails->svc;
int ret, desc;
u32 value;
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
index 2e68af7dea6d..29e6c1c12807 100644
--- a/drivers/staging/greybus/timesync.c
+++ b/drivers/staging/greybus/timesync.c
@@ -807,11 +807,11 @@ static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
return -EINVAL;
mutex_lock(&timesync_svc->mutex);
- if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
+ if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID)
gb_timesync_set_state_atomic(timesync_svc, state);
- } else {
+ else
ret = -ENODEV;
- }
+
mutex_unlock(&timesync_svc->mutex);
return ret;
}
@@ -921,7 +921,7 @@ EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
size_t len, loff_t *offset, bool ktime)
{
- struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
+ struct gb_timesync_svc *timesync_svc = file_inode(file)->i_private;
char *buf;
ssize_t ret = 0;
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 2633d2bfb1b4..6d39f4a04754 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -623,9 +623,6 @@ static int get_serial_info(struct gb_tty *gb_tty,
{
struct serial_struct tmp;
- if (!info)
- return -EINVAL;
-
memset(&tmp, 0, sizeof(tmp));
tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST;
tmp.type = PORT_16550A;
@@ -711,25 +708,20 @@ static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
return retval;
}
-static int get_serial_usage(struct gb_tty *gb_tty,
- struct serial_icounter_struct __user *count)
+static int gb_tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
{
- struct serial_icounter_struct icount;
- int retval = 0;
-
- memset(&icount, 0, sizeof(icount));
- icount.dsr = gb_tty->iocount.dsr;
- icount.rng = gb_tty->iocount.rng;
- icount.dcd = gb_tty->iocount.dcd;
- icount.frame = gb_tty->iocount.frame;
- icount.overrun = gb_tty->iocount.overrun;
- icount.parity = gb_tty->iocount.parity;
- icount.brk = gb_tty->iocount.brk;
+ struct gb_tty *gb_tty = tty->driver_data;
- if (copy_to_user(count, &icount, sizeof(icount)) > 0)
- retval = -EFAULT;
+ icount->dsr = gb_tty->iocount.dsr;
+ icount->rng = gb_tty->iocount.rng;
+ icount->dcd = gb_tty->iocount.dcd;
+ icount->frame = gb_tty->iocount.frame;
+ icount->overrun = gb_tty->iocount.overrun;
+ icount->parity = gb_tty->iocount.parity;
+ icount->brk = gb_tty->iocount.brk;
- return retval;
+ return 0;
}
static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
@@ -746,9 +738,6 @@ static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
(struct serial_struct __user *)arg);
case TIOCMIWAIT:
return wait_serial_change(gb_tty, arg);
- case TIOCGICOUNT:
- return get_serial_usage(gb_tty,
- (struct serial_icounter_struct __user *)arg);
}
return -ENOIOCTLCMD;
@@ -830,9 +819,10 @@ static const struct tty_operations gb_ops = {
.set_termios = gb_tty_set_termios,
.tiocmget = gb_tty_tiocmget,
.tiocmset = gb_tty_tiocmset,
+ .get_icount = gb_tty_get_icount,
};
-static struct tty_port_operations gb_port_ops = {
+static const struct tty_port_operations gb_port_ops = {
.dtr_rts = gb_tty_dtr_rts,
.activate = gb_tty_port_activate,
.shutdown = gb_tty_port_shutdown,
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 8ed4d395be58..19b550fff04b 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -38,7 +38,7 @@ static u8 bits_magic[] = {
static struct platform_device *firmware_pdev;
static char *file = "xlinx_fpga_firmware.bit";
-module_param(file, charp, S_IRUGO);
+module_param(file, charp, 0444);
MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index ad7a0391369f..76ff5de65781 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -259,6 +259,7 @@ act2000_isa_receive(act2000_card *card)
"act2000_isa_receive: Invalid CAPI msg\n");
{
int i; __u8 *p; __u8 *t; __u8 tmp[30];
+
for (i = 0, p = (__u8 *)&card->idat.isa.rcvhdr, t = tmp; i < 8; i++)
t += sprintf(t, "%02x ", *(p++));
printk(KERN_WARNING "act2000_isa_receive: %s\n", tmp);
diff --git a/drivers/staging/i4l/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c
index 62f56294853c..61386a78fb91 100644
--- a/drivers/staging/i4l/act2000/capi.c
+++ b/drivers/staging/i4l/act2000/capi.c
@@ -99,7 +99,7 @@ actcapi_chkhdr(act2000_card *card, actcapi_msghdr *hdr)
for (i = 0; i < num_valid_imsg; i++)
if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) &&
(hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) {
- return (i ? 1 : 2);
+ return i ? 1 : 2;
}
return 0;
}
@@ -506,6 +506,7 @@ static int
new_plci(act2000_card *card, __u16 plci)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].plci == 0x8000) {
card->bch[i].plci = plci;
@@ -518,6 +519,7 @@ static int
find_plci(act2000_card *card, __u16 plci)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].plci == plci)
return i;
@@ -528,6 +530,7 @@ static int
find_ncci(act2000_card *card, __u16 ncci)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].ncci == ncci)
return i;
@@ -538,6 +541,7 @@ static int
find_dialing(act2000_card *card, __u16 callref)
{
int i;
+
for (i = 0; i < ACT2000_BCH; i++)
if ((card->bch[i].callref == callref) &&
(card->bch[i].fsm_state == ACT2000_STATE_OCALL))
@@ -1088,6 +1092,7 @@ actcapi_debug_msg(struct sk_buff *skb, int direction)
int l = msg->hdr.len - 12;
int j;
char *p = tmp;
+
for (j = 0; j < l; j++)
p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]);
printk(KERN_DEBUG " D = '%s'\n", tmp);
diff --git a/drivers/staging/i4l/act2000/module.c b/drivers/staging/i4l/act2000/module.c
index 99c9c0a1c63e..6aa120319e52 100644
--- a/drivers/staging/i4l/act2000/module.c
+++ b/drivers/staging/i4l/act2000/module.c
@@ -19,8 +19,7 @@
#include <linux/slab.h>
#include <linux/init.h>
-static unsigned short act2000_isa_ports[] =
-{
+static unsigned short act2000_isa_ports[] = {
0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380,
0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60,
};
@@ -95,7 +94,7 @@ act2000_find_msn(act2000_card *card, char *msn, int ia5)
p = p->next;
}
if (!ia5)
- return (1 << (eaz - '0'));
+ return 1 << (eaz - '0');
else
return eaz;
}
@@ -111,10 +110,10 @@ act2000_find_eaz(act2000_card *card, char eaz)
while (p) {
if (p->eaz == eaz)
- return (p->msn);
+ return p->msn;
p = p->next;
}
- return ("\0");
+ return "\0";
}
/*
@@ -293,7 +292,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
if (ret)
return ret;
if (card->flags & ACT2000_FLAGS_RUNNING)
- return (actcapi_manufacturer_req_msn(card));
+ return actcapi_manufacturer_req_msn(card);
return 0;
case ACT2000_IOCTL_ADDCARD:
if (copy_from_user(&cdef, arg,
@@ -377,6 +376,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
}
if (card->ptype == ISDN_PTYPE_1TR6) {
int i;
+
chan->eazmask = 0;
for (i = 0; i < strlen(c->parm.num); i++)
if (isdigit(c->parm.num[i]))
@@ -512,7 +512,7 @@ if_command(isdn_ctrl *c)
act2000_card *card = act2000_findcard(c->driver);
if (card)
- return (act2000_command(card, c));
+ return act2000_command(card, c);
printk(KERN_ERR
"act2000: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
@@ -527,7 +527,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
- return (len);
+ return len;
}
printk(KERN_ERR
"act2000: if_writecmd called with invalid driverId!\n");
@@ -542,7 +542,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
- return (act2000_readstatus(buf, len, card));
+ return act2000_readstatus(buf, len, card);
}
printk(KERN_ERR
"act2000: if_readstatus called with invalid driverId!\n");
@@ -557,7 +557,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
- return (act2000_sendbuf(card, channel, ack, skb));
+ return act2000_sendbuf(card, channel, ack, skb);
}
printk(KERN_ERR
"act2000: if_sendbuf called with invalid driverId!\n");
@@ -574,6 +574,7 @@ act2000_alloccard(int bus, int port, int irq, char *id)
{
int i;
act2000_card *card;
+
if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) {
printk(KERN_WARNING
"act2000: (%s) Could not allocate card-struct.\n", id);
@@ -776,7 +777,7 @@ act2000_addcard(int bus, int port, int irq, char *id)
failed++;
}
}
- return (added - failed);
+ return added - failed;
}
#define DRIVERNAME "IBM Active 2000 ISDN driver"
@@ -795,6 +796,7 @@ static void __exit act2000_exit(void)
{
act2000_card *card = cards;
act2000_card *last;
+
while (card) {
unregister_card(card);
del_timer_sync(&card->ptimer);
diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c
index 514bfc2c5b53..3750ba38adc5 100644
--- a/drivers/staging/i4l/icn/icn.c
+++ b/drivers/staging/i4l/icn/icn.c
@@ -411,8 +411,7 @@ typedef struct icn_stat {
int action;
} icn_stat;
/* *INDENT-OFF* */
-static icn_stat icn_stat_table[] =
-{
+static icn_stat icn_stat_table[] = {
{"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
{"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
/*
diff --git a/drivers/staging/i4l/icn/icn.h b/drivers/staging/i4l/icn/icn.h
index f8f2e76d34bf..07e2e0196527 100644
--- a/drivers/staging/i4l/icn/icn.h
+++ b/drivers/staging/i4l/icn/icn.h
@@ -54,7 +54,7 @@ typedef struct icn_cdef {
/* some useful macros for debugging */
#ifdef ICN_DEBUG_PORT
-#define OUTB_P(v, p) {printk(KERN_DEBUG "icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);}
+#define OUTB_P(v, p) {pr_debug("icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);}
#else
#define OUTB_P outb
#endif
@@ -186,8 +186,7 @@ typedef icn_dev *icn_devptr;
#ifdef __KERNEL__
static icn_card *cards = (icn_card *) 0;
-static u_char chan2bank[] =
-{0, 4, 8, 12}; /* for icn_map_channel() */
+static u_char chan2bank[] = {0, 4, 8, 12}; /* for icn_map_channel() */
static icn_dev dev;
diff --git a/drivers/staging/i4l/pcbit/callbacks.c b/drivers/staging/i4l/pcbit/callbacks.c
index efb6d6a3639a..212ab0b229d4 100644
--- a/drivers/staging/i4l/pcbit/callbacks.c
+++ b/drivers/staging/i4l/pcbit/callbacks.c
@@ -22,7 +22,7 @@
#include <linux/mm.h>
#include <linux/skbuff.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/isdnif.h>
diff --git a/drivers/staging/i4l/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c
index 373f90feda5a..a6c4e00dc726 100644
--- a/drivers/staging/i4l/pcbit/capi.c
+++ b/drivers/staging/i4l/pcbit/capi.c
@@ -27,7 +27,6 @@
* encode our number in CallerPN and ConnectedPN
*/
-#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -36,8 +35,8 @@
#include <linux/skbuff.h>
-#include <asm/io.h>
-#include <asm/string.h>
+#include <linux/io.h>
+#include <linux/string.h>
#include <linux/isdnif.h>
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index d417df5efb5f..89b0b5b94ce5 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -27,12 +27,11 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
-#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/isdnif.h>
-#include <asm/string.h>
-#include <asm/io.h>
+#include <linux/string.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include "pcbit.h"
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index 6d291d548423..5980d1b5da95 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -23,7 +23,7 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/isdnif.h>
diff --git a/drivers/staging/i4l/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c
index a136c72547e5..0592bf6ee9c9 100644
--- a/drivers/staging/i4l/pcbit/layer2.c
+++ b/drivers/staging/i4l/pcbit/layer2.c
@@ -36,7 +36,7 @@
#include <linux/isdnif.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "pcbit.h"
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 470f7ad9c073..000000000000
--- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,6 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 660781df409f..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,20 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/lux_table
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the table of coefficients
- used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/illuminance0_calibrate
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/illuminance0_input_target
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is the known externally illuminance (in lux).
- It is used in the process of calibrating the device accuracy.
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 93a896883e37..4922402e2e98 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -1,76 +1,8 @@
-2009 8/18
-
-Core:
-1) Get reviews
-2) Additional testing
-3) Ensure all desirable features present by adding more devices.
- Major changes not expected except in response to comments
-
-Max1363 core:
-1) Possibly add sysfs exports of constant useful to userspace.
-Would be nice
-2) Support hardware generated interrupts
-3) Expand device set. Lots of other maxim adc's have very
- similar interfaces.
-
-MXS LRADC driver:
-This is a classic MFD device as it combines the following subdevices
- - touchscreen controller (input subsystem related device)
- - general purpose ADC channels
- - battery voltage monitor (power subsystem related device)
- - die temperature monitor (thermal management)
-
-At least the battery voltage and die temperature feature is required in-kernel
-by a driver of the SoC's battery charging unit to avoid any damage to the
-silicon and the battery.
-
-TSL2561
-Would be nice
-1) Open question of userspace vs kernel space balance when
-converting to useful light measurements from device ones.
-2) Add sysfs elements necessary to allow device agnostic
-unit conversion.
-
-LIS3L02DQ core
-
-LIS3L02DQ ring
-
-KXSD9
-Currently minimal driver, would be nice to add:
-1) Support for all chip generated interrupts (events),
-basically get support up to level of lis3l02dq driver.
-
-Ring buffer core
-
-SCA3000
-Would be nice
-1) Testing on devices other than sca3000-e05
-
-Trigger core support
-1) Discussion of approach. Is it general enough?
-
-Ring Buffer:
-1) Discussion of approach.
-There are probably better ways of doing this. The
-intention is to allow for more than one software ring
-buffer implementation as different users will have
-different requirements. This one suits mid range
-frequencies (100Hz - 4kHz).
-2) Lots of testing
-
-GPIO trigger
-1) Add control over the type of interrupt etc. This will
-necessitate a header that is also visible from arch board
-files. (avoided at the moment to keep the driver set
-contained in staging).
+2016 10/09
ADI Drivers:
CC the device-drivers-devel@blackfin.uclinux.org mailing list when
e-mailing the normal IIO list (see below).
-Documentation
-1) Lots of cleanup and expansion.
-2) Some device require individual docs.
-
Contact: Jonathan Cameron <jic23@kernel.org>.
Mailing list: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 1c994b57c7d2..c6b0f5eae7ab 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -51,14 +51,4 @@ config ADIS16240
To compile this driver as a module, say M here: the module will be
called adis16240.
-config SCA3000
- depends on IIO_BUFFER
- depends on SPI
- tristate "VTI SCA3000 series accelerometers"
- help
- Say Y here to build support for the VTI SCA3000 series of SPI
- accelerometers. These devices use a hardware ring buffer.
-
- To compile this driver as a module, say M here: the module will be
- called sca3000.
endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1810a434a755..febb137b60c4 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -13,6 +13,3 @@ obj-$(CONFIG_ADIS16209) += adis16209.o
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
-
-sca3000-y := sca3000_core.o sca3000_ring.o
-obj-$(CONFIG_SCA3000) += sca3000.o
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
deleted file mode 100644
index 4dcc8575cbe3..000000000000
--- a/drivers/staging/iio/accel/sca3000.h
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * sca3000.c -- support VTI sca3000 series accelerometers
- * via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Partly based upon tle62x0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Initial mode is direct measurement.
- *
- * Untested things
- *
- * Temperature reading (the e05 I'm testing with doesn't have a sensor)
- *
- * Free fall detection mode - supported but untested as I'm not droping my
- * dubious wire rig far enough to test it.
- *
- * Unsupported as yet
- *
- * Time stamping of data from ring. Various ideas on how to do this but none
- * are remotely simple. Suggestions welcome.
- *
- * Individual enabling disabling of channels going into ring buffer
- *
- * Overflow handling (this is signaled for all but 8 bit ring buffer mode.)
- *
- * Motion detector using AND combinations of signals.
- *
- * Note: Be very careful about not touching an register bytes marked
- * as reserved on the data sheet. They really mean it as changing convents of
- * some will cause the device to lock up.
- *
- * Known issues - on rare occasions the interrupts lock up. Not sure why as yet.
- * Can probably alleviate this by reading the interrupt register on start, but
- * that is really just brushing the problem under the carpet.
- */
-#ifndef _SCA3000
-#define _SCA3000
-
-#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
-#define SCA3000_READ_REG(a) ((a) << 2)
-
-#define SCA3000_REG_ADDR_REVID 0x00
-#define SCA3000_REVID_MAJOR_MASK 0xf0
-#define SCA3000_REVID_MINOR_MASK 0x0f
-
-#define SCA3000_REG_ADDR_STATUS 0x02
-#define SCA3000_LOCKED 0x20
-#define SCA3000_EEPROM_CS_ERROR 0x02
-#define SCA3000_SPI_FRAME_ERROR 0x01
-
-/* All reads done using register decrement so no need to directly access LSBs */
-#define SCA3000_REG_ADDR_X_MSB 0x05
-#define SCA3000_REG_ADDR_Y_MSB 0x07
-#define SCA3000_REG_ADDR_Z_MSB 0x09
-
-#define SCA3000_REG_ADDR_RING_OUT 0x0f
-
-/* Temp read untested - the e05 doesn't have the sensor */
-#define SCA3000_REG_ADDR_TEMP_MSB 0x13
-
-#define SCA3000_REG_ADDR_MODE 0x14
-#define SCA3000_MODE_PROT_MASK 0x28
-
-#define SCA3000_RING_BUF_ENABLE 0x80
-#define SCA3000_RING_BUF_8BIT 0x40
-/*
- * Free fall detection triggers an interrupt if the acceleration
- * is below a threshold for equivalent of 25cm drop
- */
-#define SCA3000_FREE_FALL_DETECT 0x10
-#define SCA3000_MEAS_MODE_NORMAL 0x00
-#define SCA3000_MEAS_MODE_OP_1 0x01
-#define SCA3000_MEAS_MODE_OP_2 0x02
-
-/*
- * In motion detection mode the accelerations are band pass filtered
- * (approx 1 - 25Hz) and then a programmable threshold used to trigger
- * and interrupt.
- */
-#define SCA3000_MEAS_MODE_MOT_DET 0x03
-
-#define SCA3000_REG_ADDR_BUF_COUNT 0x15
-
-#define SCA3000_REG_ADDR_INT_STATUS 0x16
-
-#define SCA3000_INT_STATUS_THREE_QUARTERS 0x80
-#define SCA3000_INT_STATUS_HALF 0x40
-
-#define SCA3000_INT_STATUS_FREE_FALL 0x08
-#define SCA3000_INT_STATUS_Y_TRIGGER 0x04
-#define SCA3000_INT_STATUS_X_TRIGGER 0x02
-#define SCA3000_INT_STATUS_Z_TRIGGER 0x01
-
-/* Used to allow access to multiplexed registers */
-#define SCA3000_REG_ADDR_CTRL_SEL 0x18
-/* Only available for SCA3000-D03 and SCA3000-D01 */
-#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
-#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
-#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
-#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
-#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
-/*
- * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
- * will not function
- */
-#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
-#define SCA3000_OUT_CTRL_PROT_MASK 0xE0
-#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
-#define SCA3000_OUT_CTRL_BUF_Y_EN 0x08
-#define SCA3000_OUT_CTRL_BUF_Z_EN 0x04
-#define SCA3000_OUT_CTRL_BUF_DIV_MASK 0x03
-#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
-#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
-
-/*
- * Control which motion detector interrupts are on.
- * For now only OR combinations are supported.
- */
-#define SCA3000_MD_CTRL_PROT_MASK 0xC0
-#define SCA3000_MD_CTRL_OR_Y 0x01
-#define SCA3000_MD_CTRL_OR_X 0x02
-#define SCA3000_MD_CTRL_OR_Z 0x04
-/* Currently unsupported */
-#define SCA3000_MD_CTRL_AND_Y 0x08
-#define SCA3000_MD_CTRL_AND_X 0x10
-#define SAC3000_MD_CTRL_AND_Z 0x20
-
-/*
- * Some control registers of complex access methods requiring this register to
- * be used to remove a lock.
- */
-#define SCA3000_REG_ADDR_UNLOCK 0x1e
-
-#define SCA3000_REG_ADDR_INT_MASK 0x21
-#define SCA3000_INT_MASK_PROT_MASK 0x1C
-
-#define SCA3000_INT_MASK_RING_THREE_QUARTER 0x80
-#define SCA3000_INT_MASK_RING_HALF 0x40
-
-#define SCA3000_INT_MASK_ALL_INTS 0x02
-#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
-#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
-
-/* Values of multiplexed registers (write to ctrl_data after select) */
-#define SCA3000_REG_ADDR_CTRL_DATA 0x22
-
-/*
- * Measurement modes available on some sca3000 series chips. Code assumes others
- * may become available in the future.
- *
- * Bypass - Bypass the low-pass filter in the signal channel so as to increase
- * signal bandwidth.
- *
- * Narrow - Narrow low-pass filtering of the signal channel and half output
- * data rate by decimation.
- *
- * Wide - Widen low-pass filtering of signal channel to increase bandwidth
- */
-#define SCA3000_OP_MODE_BYPASS 0x01
-#define SCA3000_OP_MODE_NARROW 0x02
-#define SCA3000_OP_MODE_WIDE 0x04
-#define SCA3000_MAX_TX 6
-#define SCA3000_MAX_RX 2
-
-/**
- * struct sca3000_state - device instance state information
- * @us: the associated spi device
- * @info: chip variant information
- * @interrupt_handler_ws: event interrupt handler for all events
- * @last_timestamp: the timestamp of the last event
- * @mo_det_use_count: reference counter for the motion detection unit
- * @lock: lock used to protect elements of sca3000_state
- * and the underlying device state.
- * @bpse: number of bits per scan element
- * @tx: dma-able transmit buffer
- * @rx: dma-able receive buffer
- **/
-struct sca3000_state {
- struct spi_device *us;
- const struct sca3000_chip_info *info;
- struct work_struct interrupt_handler_ws;
- s64 last_timestamp;
- int mo_det_use_count;
- struct mutex lock;
- int bpse;
- /* Can these share a cacheline ? */
- u8 rx[2] ____cacheline_aligned;
- u8 tx[6] ____cacheline_aligned;
-};
-
-/**
- * struct sca3000_chip_info - model dependent parameters
- * @scale: scale * 10^-6
- * @temp_output: some devices have temperature sensors.
- * @measurement_mode_freq: normal mode sampling frequency
- * @option_mode_1: first optional mode. Not all models have one
- * @option_mode_1_freq: option mode 1 sampling frequency
- * @option_mode_2: second optional mode. Not all chips have one
- * @option_mode_2_freq: option mode 2 sampling frequency
- *
- * This structure is used to hold information about the functionality of a given
- * sca3000 variant.
- **/
-struct sca3000_chip_info {
- unsigned int scale;
- bool temp_output;
- int measurement_mode_freq;
- int option_mode_1;
- int option_mode_1_freq;
- int option_mode_2;
- int option_mode_2_freq;
- int mot_det_mult_xz[6];
- int mot_det_mult_y[7];
-};
-
-int sca3000_read_data_short(struct sca3000_state *st,
- u8 reg_address_high,
- int len);
-
-/**
- * sca3000_write_reg() write a single register
- * @address: address of register on chip
- * @val: value to be written to register
- *
- * The main lock must be held.
- **/
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val);
-
-#ifdef CONFIG_IIO_BUFFER
-/**
- * sca3000_register_ring_funcs() setup the ring state change functions
- **/
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev);
-
-/**
- * sca3000_configure_ring() - allocate and configure ring buffer
- * @indio_dev: iio-core device whose ring is to be configured
- *
- * The hardware ring buffer needs far fewer ring buffer functions than
- * a software one as a lot of things are handled automatically.
- * This function also tells the iio core that our device supports a
- * hardware ring buffer mode.
- **/
-int sca3000_configure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_unconfigure_ring() - deallocate the ring buffer
- * @indio_dev: iio-core device whose ring we are freeing
- **/
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_ring_int_process() handles ring related event pushing and escalation
- * @val: the event code
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring);
-
-#else
-static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
-}
-
-static inline
-int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void sca3000_ring_int_process(u8 val, void *ring)
-{
-}
-
-#endif
-#endif /* _SCA3000 */
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
deleted file mode 100644
index 564b36d4f648..000000000000
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ /dev/null
@@ -1,1210 +0,0 @@
-/*
- * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- * See industrialio/accels/sca3000.h for comments.
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-
-#include "sca3000.h"
-
-enum sca3000_variant {
- d01,
- e02,
- e04,
- e05,
-};
-
-/*
- * Note where option modes are not defined, the chip simply does not
- * support any.
- * Other chips in the sca3000 series use i2c and are not included here.
- *
- * Some of these devices are only listed in the family data sheet and
- * do not actually appear to be available.
- */
-static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
- [d01] = {
- .scale = 7357,
- .temp_output = true,
- .measurement_mode_freq = 250,
- .option_mode_1 = SCA3000_OP_MODE_BYPASS,
- .option_mode_1_freq = 250,
- .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
- .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
- },
- [e02] = {
- .scale = 9810,
- .measurement_mode_freq = 125,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 63,
- .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
- .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
- },
- [e04] = {
- .scale = 19620,
- .measurement_mode_freq = 100,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 50,
- .option_mode_2 = SCA3000_OP_MODE_WIDE,
- .option_mode_2_freq = 400,
- .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
- .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
- },
- [e05] = {
- .scale = 61313,
- .measurement_mode_freq = 200,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 50,
- .option_mode_2 = SCA3000_OP_MODE_WIDE,
- .option_mode_2_freq = 400,
- .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
- .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
- },
-};
-
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
-{
- st->tx[0] = SCA3000_WRITE_REG(address);
- st->tx[1] = val;
- return spi_write(st->us, st->tx, 2);
-}
-
-int sca3000_read_data_short(struct sca3000_state *st,
- u8 reg_address_high,
- int len)
-{
- struct spi_transfer xfer[2] = {
- {
- .len = 1,
- .tx_buf = st->tx,
- }, {
- .len = len,
- .rx_buf = st->rx,
- }
- };
- st->tx[0] = SCA3000_READ_REG(reg_address_high);
-
- return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_reg_lock_on() test if the ctrl register lock is on
- *
- * Lock must be held.
- **/
-static int sca3000_reg_lock_on(struct sca3000_state *st)
-{
- int ret;
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1);
- if (ret < 0)
- return ret;
-
- return !(st->rx[0] & SCA3000_LOCKED);
-}
-
-/**
- * __sca3000_unlock_reg_lock() unlock the control registers
- *
- * Note the device does not appear to support doing this in a single transfer.
- * This should only ever be used as part of ctrl reg read.
- * Lock must be held before calling this
- **/
-static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
-{
- struct spi_transfer xfer[3] = {
- {
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx,
- }, {
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx + 2,
- }, {
- .len = 2,
- .tx_buf = st->tx + 4,
- },
- };
- st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[1] = 0x00;
- st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[3] = 0x50;
- st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[5] = 0xA0;
-
- return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_write_ctrl_reg() write to a lock protect ctrl register
- * @sel: selects which registers we wish to write to
- * @val: the value to be written
- *
- * Certain control registers are protected against overwriting by the lock
- * register and use a shared write address. This function allows writing of
- * these registers.
- * Lock must be held.
- **/
-static int sca3000_write_ctrl_reg(struct sca3000_state *st,
- u8 sel,
- uint8_t val)
-{
- int ret;
-
- ret = sca3000_reg_lock_on(st);
- if (ret < 0)
- goto error_ret;
- if (ret) {
- ret = __sca3000_unlock_reg_lock(st);
- if (ret)
- goto error_ret;
- }
-
- /* Set the control select register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel);
- if (ret)
- goto error_ret;
-
- /* Write the actual value into the register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val);
-
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_ctrl_reg() read from lock protected control register.
- *
- * Lock must be held.
- **/
-static int sca3000_read_ctrl_reg(struct sca3000_state *st,
- u8 ctrl_reg)
-{
- int ret;
-
- ret = sca3000_reg_lock_on(st);
- if (ret < 0)
- goto error_ret;
- if (ret) {
- ret = __sca3000_unlock_reg_lock(st);
- if (ret)
- goto error_ret;
- }
- /* Set the control select register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg);
- if (ret)
- goto error_ret;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1);
- if (ret)
- goto error_ret;
- return st->rx[0];
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_show_rev() - sysfs interface to read the chip revision number
- **/
-static ssize_t sca3000_show_rev(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0, ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1);
- if (ret < 0)
- goto error_ret;
- len += sprintf(buf + len,
- "major=%d, minor=%d\n",
- st->rx[0] & SCA3000_REVID_MAJOR_MASK,
- st->rx[0] & SCA3000_REVID_MINOR_MASK);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_show_available_measurement_modes() display available modes
- *
- * This is all read from chip specific data in the driver. Not all
- * of the sca3000 series support modes other than normal.
- **/
-static ssize_t
-sca3000_show_available_measurement_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0;
-
- len += sprintf(buf + len, "0 - normal mode");
- switch (st->info->option_mode_1) {
- case SCA3000_OP_MODE_NARROW:
- len += sprintf(buf + len, ", 1 - narrow mode");
- break;
- case SCA3000_OP_MODE_BYPASS:
- len += sprintf(buf + len, ", 1 - bypass mode");
- break;
- }
- switch (st->info->option_mode_2) {
- case SCA3000_OP_MODE_WIDE:
- len += sprintf(buf + len, ", 2 - wide mode");
- break;
- }
- /* always supported */
- len += sprintf(buf + len, " 3 - motion detection\n");
-
- return len;
-}
-
-/**
- * sca3000_show_measurement_mode() sysfs read of current mode
- **/
-static ssize_t
-sca3000_show_measurement_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0, ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- /* mask bottom 2 bits - only ones that are relevant */
- st->rx[0] &= 0x03;
- switch (st->rx[0]) {
- case SCA3000_MEAS_MODE_NORMAL:
- len += sprintf(buf + len, "0 - normal mode\n");
- break;
- case SCA3000_MEAS_MODE_MOT_DET:
- len += sprintf(buf + len, "3 - motion detection\n");
- break;
- case SCA3000_MEAS_MODE_OP_1:
- switch (st->info->option_mode_1) {
- case SCA3000_OP_MODE_NARROW:
- len += sprintf(buf + len, "1 - narrow mode\n");
- break;
- case SCA3000_OP_MODE_BYPASS:
- len += sprintf(buf + len, "1 - bypass mode\n");
- break;
- }
- break;
- case SCA3000_MEAS_MODE_OP_2:
- switch (st->info->option_mode_2) {
- case SCA3000_OP_MODE_WIDE:
- len += sprintf(buf + len, "2 - wide mode\n");
- break;
- }
- break;
- }
-
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_store_measurement_mode() set the current mode
- **/
-static ssize_t
-sca3000_store_measurement_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 mask = 0x03;
- u8 val;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- if (val > 3) {
- ret = -EINVAL;
- goto error_ret;
- }
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- st->rx[0] &= ~mask;
- st->rx[0] |= (val & mask);
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, st->rx[0]);
- if (ret)
- goto error_ret;
- mutex_unlock(&st->lock);
-
- return len;
-
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/*
- * Not even vaguely standard attributes so defined here rather than
- * in the relevant IIO core headers
- */
-static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
- sca3000_show_available_measurement_modes,
- NULL, 0);
-
-static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
- sca3000_show_measurement_mode,
- sca3000_store_measurement_mode,
- 0);
-
-/* More standard attributes */
-
-static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
-
-static const struct iio_event_spec sca3000_event = {
- .type = IIO_EV_TYPE_MAG,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
-};
-
-#define SCA3000_CHAN(index, mod) \
- { \
- .type = IIO_ACCEL, \
- .modified = 1, \
- .channel2 = mod, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
- .address = index, \
- .scan_index = index, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 11, \
- .storagebits = 16, \
- .shift = 5, \
- }, \
- .event_spec = &sca3000_event, \
- .num_event_specs = 1, \
- }
-
-static const struct iio_chan_spec sca3000_channels[] = {
- SCA3000_CHAN(0, IIO_MOD_X),
- SCA3000_CHAN(1, IIO_MOD_Y),
- SCA3000_CHAN(2, IIO_MOD_Z),
-};
-
-static const struct iio_chan_spec sca3000_channels_with_temp[] = {
- SCA3000_CHAN(0, IIO_MOD_X),
- SCA3000_CHAN(1, IIO_MOD_Y),
- SCA3000_CHAN(2, IIO_MOD_Z),
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_OFFSET),
- /* No buffer support */
- .scan_index = -1,
- },
-};
-
-static u8 sca3000_addresses[3][3] = {
- [0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH,
- SCA3000_MD_CTRL_OR_X},
- [1] = {SCA3000_REG_ADDR_Y_MSB, SCA3000_REG_CTRL_SEL_MD_Y_TH,
- SCA3000_MD_CTRL_OR_Y},
- [2] = {SCA3000_REG_ADDR_Z_MSB, SCA3000_REG_CTRL_SEL_MD_Z_TH,
- SCA3000_MD_CTRL_OR_Z},
-};
-
-/**
- * __sca3000_get_base_freq() obtain mode specific base frequency
- *
- * lock must be held
- **/
-static inline int __sca3000_get_base_freq(struct sca3000_state *st,
- const struct sca3000_chip_info *info,
- int *base_freq)
-{
- int ret;
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- switch (0x03 & st->rx[0]) {
- case SCA3000_MEAS_MODE_NORMAL:
- *base_freq = info->measurement_mode_freq;
- break;
- case SCA3000_MEAS_MODE_OP_1:
- *base_freq = info->option_mode_1_freq;
- break;
- case SCA3000_MEAS_MODE_OP_2:
- *base_freq = info->option_mode_2_freq;
- break;
- default:
- ret = -EINVAL;
- }
-error_ret:
- return ret;
-}
-
-/**
- * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int read_raw_samp_freq(struct sca3000_state *st, int *val)
-{
- int ret;
-
- ret = __sca3000_get_base_freq(st, st->info, val);
- if (ret)
- return ret;
-
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- return ret;
-
- if (*val > 0) {
- ret &= SCA3000_OUT_CTRL_BUF_DIV_MASK;
- switch (ret) {
- case SCA3000_OUT_CTRL_BUF_DIV_2:
- *val /= 2;
- break;
- case SCA3000_OUT_CTRL_BUF_DIV_4:
- *val /= 4;
- break;
- }
- }
-
- return 0;
-}
-
-/**
- * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int write_raw_samp_freq(struct sca3000_state *st, int val)
-{
- int ret, base_freq, ctrlval;
-
- ret = __sca3000_get_base_freq(st, st->info, &base_freq);
- if (ret)
- return ret;
-
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- return ret;
-
- ctrlval = ret & ~SCA3000_OUT_CTRL_BUF_DIV_MASK;
-
- if (val == base_freq / 2)
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2;
- if (val == base_freq / 4)
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4;
- else if (val != base_freq)
- return -EINVAL;
-
- return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
- ctrlval);
-}
-
-static int sca3000_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 address;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&st->lock);
- if (chan->type == IIO_ACCEL) {
- if (st->mo_det_use_count) {
- mutex_unlock(&st->lock);
- return -EBUSY;
- }
- address = sca3000_addresses[chan->address][0];
- ret = sca3000_read_data_short(st, address, 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
- *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
- (sizeof(*val) * 8 - 13);
- } else {
- /* get the temperature when available */
- ret = sca3000_read_data_short(st,
- SCA3000_REG_ADDR_TEMP_MSB,
- 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
- *val = ((st->rx[0] & 0x3F) << 3) |
- ((st->rx[1] & 0xE0) >> 5);
- }
- mutex_unlock(&st->lock);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- if (chan->type == IIO_ACCEL)
- *val2 = st->info->scale;
- else /* temperature */
- *val2 = 555556;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_OFFSET:
- *val = -214;
- *val2 = 600000;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
- ret = read_raw_samp_freq(st, val);
- mutex_unlock(&st->lock);
- return ret ? ret : IIO_VAL_INT;
- default:
- return -EINVAL;
- }
-}
-
-static int sca3000_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2)
- return -EINVAL;
- mutex_lock(&st->lock);
- ret = write_raw_samp_freq(st, val);
- mutex_unlock(&st->lock);
- return ret;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-/**
- * sca3000_read_av_freq() sysfs function to get available frequencies
- *
- * The later modes are only relevant to the ring buffer - and depend on current
- * mode. Note that data sheet gives rather wide tolerances for these so integer
- * division will give good enough answer and not all chips have them specified
- * at all.
- **/
-static ssize_t sca3000_read_av_freq(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0, ret, val;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- goto error_ret;
-
- switch (val & 0x03) {
- case SCA3000_MEAS_MODE_NORMAL:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->measurement_mode_freq,
- st->info->measurement_mode_freq / 2,
- st->info->measurement_mode_freq / 4);
- break;
- case SCA3000_MEAS_MODE_OP_1:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->option_mode_1_freq,
- st->info->option_mode_1_freq / 2,
- st->info->option_mode_1_freq / 4);
- break;
- case SCA3000_MEAS_MODE_OP_2:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->option_mode_2_freq,
- st->info->option_mode_2_freq / 2,
- st->info->option_mode_2_freq / 4);
- break;
- }
- return len;
-error_ret:
- return ret;
-}
-
-/*
- * Should only really be registered if ring buffer support is compiled in.
- * Does no harm however and doing it right would add a fair bit of complexity
- */
-static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
-
-/**
- * sca3000_read_thresh() - query of a threshold
- **/
-static int sca3000_read_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- int ret, i;
- struct sca3000_state *st = iio_priv(indio_dev);
- int num = chan->channel2;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]);
- mutex_unlock(&st->lock);
- if (ret < 0)
- return ret;
- *val = 0;
- if (num == 1)
- for_each_set_bit(i, (unsigned long *)&ret,
- ARRAY_SIZE(st->info->mot_det_mult_y))
- *val += st->info->mot_det_mult_y[i];
- else
- for_each_set_bit(i, (unsigned long *)&ret,
- ARRAY_SIZE(st->info->mot_det_mult_xz))
- *val += st->info->mot_det_mult_xz[i];
-
- return IIO_VAL_INT;
-}
-
-/**
- * sca3000_write_thresh() control of threshold
- **/
-static int sca3000_write_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int num = chan->channel2;
- int ret;
- int i;
- u8 nonlinear = 0;
-
- if (num == 1) {
- i = ARRAY_SIZE(st->info->mot_det_mult_y);
- while (i > 0)
- if (val >= st->info->mot_det_mult_y[--i]) {
- nonlinear |= (1 << i);
- val -= st->info->mot_det_mult_y[i];
- }
- } else {
- i = ARRAY_SIZE(st->info->mot_det_mult_xz);
- while (i > 0)
- if (val >= st->info->mot_det_mult_xz[--i]) {
- nonlinear |= (1 << i);
- val -= st->info->mot_det_mult_xz[i];
- }
- }
-
- mutex_lock(&st->lock);
- ret = sca3000_write_ctrl_reg(st, sca3000_addresses[num][1], nonlinear);
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-static struct attribute *sca3000_attributes[] = {
- &iio_dev_attr_revision.dev_attr.attr,
- &iio_dev_attr_measurement_mode_available.dev_attr.attr,
- &iio_dev_attr_measurement_mode.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group sca3000_attribute_group = {
- .attrs = sca3000_attributes,
-};
-
-/**
- * sca3000_event_handler() - handling ring and non ring events
- *
- * Ring related interrupt handler. Depending on event, push to
- * the ring buffer event chrdev or the event one.
- *
- * This function is complicated by the fact that the devices can signify ring
- * and non ring events via the same interrupt line and they can only
- * be distinguished via a read of the relevant status register.
- **/
-static irqreturn_t sca3000_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, val;
- s64 last_timestamp = iio_get_time_ns(indio_dev);
-
- /*
- * Could lead if badly timed to an extra read of status reg,
- * but ensures no interrupt is missed.
- */
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- goto done;
-
- sca3000_ring_int_process(val, indio_dev->buffer);
-
- if (val & SCA3000_INT_STATUS_FREE_FALL)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_FALLING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_Y_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_X_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_Z_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
-done:
- return IRQ_HANDLED;
-}
-
-/**
- * sca3000_read_event_config() what events are enabled
- **/
-static int sca3000_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 protect_mask = 0x03;
- int num = chan->channel2;
-
- /* read current value of mode register */
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
-
- if ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET) {
- ret = 0;
- } else {
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto error_ret;
- /* only supporting logical or's for now */
- ret = !!(ret & sca3000_addresses[num][2]);
- }
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/**
- * sca3000_query_free_fall_mode() is free fall mode enabled
- **/
-static ssize_t sca3000_query_free_fall_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int val;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret < 0)
- return ret;
- return sprintf(buf, "%d\n", !!(val & SCA3000_FREE_FALL_DETECT));
-}
-
-/**
- * sca3000_set_free_fall_mode() simple on off control for free fall int
- *
- * In these chips the free fall detector should send an interrupt if
- * the device falls more than 25cm. This has not been tested due
- * to fragile wiring.
- **/
-static ssize_t sca3000_set_free_fall_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- u8 val;
- int ret;
- u8 protect_mask = SCA3000_FREE_FALL_DETECT;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- /* read current value of mode register */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
-
- /* if off and should be on */
- if (val && !(st->rx[0] & protect_mask))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] | SCA3000_FREE_FALL_DETECT));
- /* if on and should be off */
- else if (!val && (st->rx[0] & protect_mask))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask));
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_write_event_config() simple on off control for motion detector
- *
- * This is a per axis control, but enabling any will result in the
- * motion detector unit being enabled.
- * N.B. enabling motion detector stops normal data acquisition.
- * There is a complexity in knowing which mode to return to when
- * this mode is disabled. Currently normal mode is assumed.
- **/
-static int sca3000_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int state)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, ctrlval;
- u8 protect_mask = 0x03;
- int num = chan->channel2;
-
- mutex_lock(&st->lock);
- /*
- * First read the motion detector config to find out if
- * this axis is on
- */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto exit_point;
- ctrlval = ret;
- /* if off and should be on */
- if (state && !(ctrlval & sca3000_addresses[num][2])) {
- ret = sca3000_write_ctrl_reg(st,
- SCA3000_REG_CTRL_SEL_MD_CTRL,
- ctrlval |
- sca3000_addresses[num][2]);
- if (ret)
- goto exit_point;
- st->mo_det_use_count++;
- } else if (!state && (ctrlval & sca3000_addresses[num][2])) {
- ret = sca3000_write_ctrl_reg(st,
- SCA3000_REG_CTRL_SEL_MD_CTRL,
- ctrlval &
- ~(sca3000_addresses[num][2]));
- if (ret)
- goto exit_point;
- st->mo_det_use_count--;
- }
-
- /* read current value of mode register */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto exit_point;
- /* if off and should be on */
- if ((st->mo_det_use_count) &&
- ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask)
- | SCA3000_MEAS_MODE_MOT_DET);
- /* if on and should be off */
- else if (!(st->mo_det_use_count) &&
- ((st->rx[0] & protect_mask) == SCA3000_MEAS_MODE_MOT_DET))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask));
-exit_point:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/* Free fall detector related event attribute */
-static IIO_DEVICE_ATTR_NAMED(accel_xayaz_mag_falling_en,
- in_accel_x & y & z_mag_falling_en,
- S_IRUGO | S_IWUSR,
- sca3000_query_free_fall_mode,
- sca3000_set_free_fall_mode,
- 0);
-
-static IIO_CONST_ATTR_NAMED(accel_xayaz_mag_falling_period,
- in_accel_x & y & z_mag_falling_period,
- "0.226");
-
-static struct attribute *sca3000_event_attributes[] = {
- &iio_dev_attr_accel_xayaz_mag_falling_en.dev_attr.attr,
- &iio_const_attr_accel_xayaz_mag_falling_period.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group sca3000_event_attribute_group = {
- .attrs = sca3000_event_attributes,
- .name = "events",
-};
-
-/**
- * sca3000_clean_setup() get the device into a predictable state
- *
- * Devices use flash memory to store many of the register values
- * and hence can come up in somewhat unpredictable states.
- * Hence reset everything on driver load.
- **/
-static int sca3000_clean_setup(struct sca3000_state *st)
-{
- int ret;
-
- mutex_lock(&st->lock);
- /* Ensure all interrupts have been acknowledged */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
- if (ret)
- goto error_ret;
-
- /* Turn off all motion detection channels */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto error_ret;
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
- ret & SCA3000_MD_CTRL_PROT_MASK);
- if (ret)
- goto error_ret;
-
- /* Disable ring buffer */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- goto error_ret;
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
- (ret & SCA3000_OUT_CTRL_PROT_MASK)
- | SCA3000_OUT_CTRL_BUF_X_EN
- | SCA3000_OUT_CTRL_BUF_Y_EN
- | SCA3000_OUT_CTRL_BUF_Z_EN
- | SCA3000_OUT_CTRL_BUF_DIV_4);
- if (ret)
- goto error_ret;
- /* Enable interrupts, relevant to mode and set up as active low */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- (ret & SCA3000_INT_MASK_PROT_MASK)
- | SCA3000_INT_MASK_ACTIVE_LOW);
- if (ret)
- goto error_ret;
- /*
- * Select normal measurement mode, free fall off, ring off
- * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
- * as that occurs in one of the example on the datasheet
- */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & SCA3000_MODE_PROT_MASK));
- st->bpse = 11;
-
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static const struct iio_info sca3000_info = {
- .attrs = &sca3000_attribute_group,
- .read_raw = &sca3000_read_raw,
- .write_raw = &sca3000_write_raw,
- .event_attrs = &sca3000_event_attribute_group,
- .read_event_value = &sca3000_read_thresh,
- .write_event_value = &sca3000_write_thresh,
- .read_event_config = &sca3000_read_event_config,
- .write_event_config = &sca3000_write_event_config,
- .driver_module = THIS_MODULE,
-};
-
-static int sca3000_probe(struct spi_device *spi)
-{
- int ret;
- struct sca3000_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
- st->us = spi;
- mutex_init(&st->lock);
- st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
- ->driver_data];
-
- indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &sca3000_info;
- if (st->info->temp_output) {
- indio_dev->channels = sca3000_channels_with_temp;
- indio_dev->num_channels =
- ARRAY_SIZE(sca3000_channels_with_temp);
- } else {
- indio_dev->channels = sca3000_channels;
- indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
- }
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- sca3000_configure_ring(indio_dev);
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- return ret;
-
- if (spi->irq) {
- ret = request_threaded_irq(spi->irq,
- NULL,
- &sca3000_event_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "sca3000",
- indio_dev);
- if (ret)
- goto error_unregister_dev;
- }
- sca3000_register_ring_funcs(indio_dev);
- ret = sca3000_clean_setup(st);
- if (ret)
- goto error_free_irq;
- return 0;
-
-error_free_irq:
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
-error_unregister_dev:
- iio_device_unregister(indio_dev);
- return ret;
-}
-
-static int sca3000_stop_all_interrupts(struct sca3000_state *st)
-{
- int ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK,
- (st->rx[0] &
- ~(SCA3000_INT_MASK_RING_THREE_QUARTER |
- SCA3000_INT_MASK_RING_HALF |
- SCA3000_INT_MASK_ALL_INTS)));
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static int sca3000_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- /* Must ensure no interrupts can be generated after this! */
- sca3000_stop_all_interrupts(st);
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
- iio_device_unregister(indio_dev);
- sca3000_unconfigure_ring(indio_dev);
-
- return 0;
-}
-
-static const struct spi_device_id sca3000_id[] = {
- {"sca3000_d01", d01},
- {"sca3000_e02", e02},
- {"sca3000_e04", e04},
- {"sca3000_e05", e05},
- {}
-};
-MODULE_DEVICE_TABLE(spi, sca3000_id);
-
-static struct spi_driver sca3000_driver = {
- .driver = {
- .name = "sca3000",
- },
- .probe = sca3000_probe,
- .remove = sca3000_remove,
- .id_table = sca3000_id,
-};
-module_spi_driver(sca3000_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
deleted file mode 100644
index d1cb9b9cf22b..000000000000
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include "../ring_hw.h"
-#include "sca3000.h"
-
-/* RFC / future work
- *
- * The internal ring buffer doesn't actually change what it holds depending
- * on which signals are enabled etc, merely whether you can read them.
- * As such the scan mode selection is somewhat different than for a software
- * ring buffer and changing it actually covers any data already in the buffer.
- * Currently scan elements aren't configured so it doesn't matter.
- */
-
-static int sca3000_read_data(struct sca3000_state *st,
- u8 reg_address_high,
- u8 **rx_p,
- int len)
-{
- int ret;
- struct spi_transfer xfer[2] = {
- {
- .len = 1,
- .tx_buf = st->tx,
- }, {
- .len = len,
- }
- };
- *rx_p = kmalloc(len, GFP_KERNEL);
- if (!*rx_p) {
- ret = -ENOMEM;
- goto error_ret;
- }
- xfer[1].rx_buf = *rx_p;
- st->tx[0] = SCA3000_READ_REG(reg_address_high);
- ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
- if (ret) {
- dev_err(get_device(&st->us->dev), "problem reading register");
- goto error_free_rx;
- }
-
- return 0;
-error_free_rx:
- kfree(*rx_p);
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
- * @r: the ring
- * @count: number of samples to try and pull
- * @data: output the actual samples pulled from the hw ring
- *
- * Currently does not provide timestamps. As the hardware doesn't add them they
- * can only be inferred approximately from ring buffer events such as 50% full
- * and knowledge of when buffer was last emptied. This is left to userspace.
- **/
-static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
- size_t count, char __user *buf)
-{
- struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
- struct iio_dev *indio_dev = hw_ring->private;
- struct sca3000_state *st = iio_priv(indio_dev);
- u8 *rx;
- int ret, i, num_available, num_read = 0;
- int bytes_per_sample = 1;
-
- if (st->bpse == 11)
- bytes_per_sample = 2;
-
- mutex_lock(&st->lock);
- if (count % bytes_per_sample) {
- ret = -EINVAL;
- goto error_ret;
- }
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
- if (ret)
- goto error_ret;
- num_available = st->rx[0];
- /*
- * num_available is the total number of samples available
- * i.e. number of time points * number of channels.
- */
- if (count > num_available * bytes_per_sample)
- num_read = num_available * bytes_per_sample;
- else
- num_read = count;
-
- ret = sca3000_read_data(st,
- SCA3000_REG_ADDR_RING_OUT,
- &rx, num_read);
- if (ret)
- goto error_ret;
-
- for (i = 0; i < num_read / sizeof(u16); i++)
- *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
-
- if (copy_to_user(buf, rx, num_read))
- ret = -EFAULT;
- kfree(rx);
- r->stufftoread = 0;
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : num_read;
-}
-
-static size_t sca3000_ring_buf_data_available(struct iio_buffer *r)
-{
- return r->stufftoread ? r->watermark : 0;
-}
-
-/**
- * sca3000_query_ring_int() is the hardware ring status interrupt enabled
- **/
-static ssize_t sca3000_query_ring_int(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret, val;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", !!(val & this_attr->address));
-}
-
-/**
- * sca3000_set_ring_int() set state of ring status interrupt
- **/
-static ssize_t sca3000_set_ring_int(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u8 val;
- int ret;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- if (val)
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- st->rx[0] | this_attr->address);
- else
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- st->rx[0] & ~this_attr->address);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
- sca3000_query_ring_int,
- sca3000_set_ring_int,
- SCA3000_INT_MASK_RING_HALF);
-
-static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
- sca3000_query_ring_int,
- sca3000_set_ring_int,
- SCA3000_INT_MASK_RING_THREE_QUARTER);
-
-static ssize_t sca3000_show_buffer_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "0.%06d\n", 4 * st->info->scale);
-}
-
-static IIO_DEVICE_ATTR(in_accel_scale,
- S_IRUGO,
- sca3000_show_buffer_scale,
- NULL,
- 0);
-
-/*
- * Ring buffer attributes
- * This device is a bit unusual in that the sampling frequency and bpse
- * only apply to the ring buffer. At all times full rate and accuracy
- * is available via direct reading from registers.
- */
-static const struct attribute *sca3000_ring_attributes[] = {
- &iio_dev_attr_50_percent.dev_attr.attr,
- &iio_dev_attr_75_percent.dev_attr.attr,
- &iio_dev_attr_in_accel_scale.dev_attr.attr,
- NULL,
-};
-
-static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buf;
- struct iio_hw_buffer *ring;
-
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return NULL;
-
- ring->private = indio_dev;
- buf = &ring->buf;
- buf->stufftoread = 0;
- buf->length = 64;
- buf->attrs = sca3000_ring_attributes;
- iio_buffer_init(buf);
-
- return buf;
-}
-
-static void sca3000_ring_release(struct iio_buffer *r)
-{
- kfree(iio_to_hw_buf(r));
-}
-
-static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
- .read_first_n = &sca3000_read_first_n_hw_rb,
- .data_available = sca3000_ring_buf_data_available,
- .release = sca3000_ring_release,
-
- .modes = INDIO_BUFFER_HARDWARE,
-};
-
-int sca3000_configure_ring(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer;
-
- buffer = sca3000_rb_allocate(indio_dev);
- if (!buffer)
- return -ENOMEM;
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
- indio_dev->buffer->access = &sca3000_ring_access_funcs;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- return 0;
-}
-
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
-{
- iio_buffer_put(indio_dev->buffer);
-}
-
-static inline
-int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- if (state) {
- dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_MODE,
- (st->rx[0] | SCA3000_RING_BUF_ENABLE));
- } else
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/**
- * sca3000_hw_ring_preenable() hw ring buffer preenable function
- *
- * Very simple enable function as the chip will allows normal reads
- * during ring buffer operation so as long as it is indeed running
- * before we notify the core, the precise ordering does not matter.
- **/
-static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
-{
- return __sca3000_hw_ring_state_set(indio_dev, 1);
-}
-
-static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
-{
- return __sca3000_hw_ring_state_set(indio_dev, 0);
-}
-
-static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
- .preenable = &sca3000_hw_ring_preenable,
- .postdisable = &sca3000_hw_ring_postdisable,
-};
-
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
- indio_dev->setup_ops = &sca3000_ring_setup_ops;
-}
-
-/**
- * sca3000_ring_int_process() ring specific interrupt handling.
- *
- * This is only split from the main interrupt handler so as to
- * reduce the amount of code if the ring buffer is not enabled.
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
-{
- if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
- SCA3000_INT_STATUS_HALF)) {
- ring->stufftoread = true;
- wake_up_interruptible(&ring->pollq);
- }
-}
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 3cdd83ccec8e..ac09485923b6 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -2,7 +2,6 @@
# Makefile for industrial I/O ADC drivers
#
-ad7606-y := ad7606_core.o ad7606_ring.o
obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
obj-$(CONFIG_AD7606) += ad7606.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 1cf6b79801a9..1fb68c01abd5 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -152,7 +152,8 @@
*/
struct ad7192_state {
- struct regulator *reg;
+ struct regulator *avdd;
+ struct regulator *dvdd;
u16 int_vref_mv;
u32 mclk;
u32 f_order;
@@ -322,57 +323,6 @@ out:
return ret;
}
-static ssize_t ad7192_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", st->mclk /
- (st->f_order * 1024 * AD7192_MODE_RATE(st->mode)));
-}
-
-static ssize_t ad7192_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
- unsigned long lval;
- int div, ret;
-
- ret = kstrtoul(buf, 10, &lval);
- if (ret)
- return ret;
- if (lval == 0)
- return -EINVAL;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- div = st->mclk / (lval * st->f_order * 1024);
- if (div < 1 || div > 1023) {
- ret = -EINVAL;
- goto out;
- }
-
- st->mode &= ~AD7192_MODE_RATE(-1);
- st->mode |= AD7192_MODE_RATE(div);
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
-
-out:
- iio_device_release_direct_mode(indio_dev);
-
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ad7192_read_frequency,
- ad7192_write_frequency);
-
static ssize_t
ad7192_show_scale_available(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -471,7 +421,6 @@ static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR,
AD7192_REG_MODE);
static struct attribute *ad7192_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -484,7 +433,6 @@ static const struct attribute_group ad7192_attribute_group = {
};
static struct attribute *ad7195_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -536,6 +484,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
if (chan->type == IIO_TEMP)
*val -= 273 * ad7192_get_temp_scale(unipolar);
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->mclk /
+ (st->f_order * 1024 * AD7192_MODE_RATE(st->mode));
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -548,7 +500,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad7192_state *st = iio_priv(indio_dev);
- int ret, i;
+ int ret, i, div;
unsigned int tmp;
ret = iio_device_claim_direct_mode(indio_dev);
@@ -572,6 +524,22 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val) {
+ ret = -EINVAL;
+ break;
+ }
+
+ div = st->mclk / (val * st->f_order * 1024);
+ if (div < 1 || div > 1023) {
+ ret = -EINVAL;
+ break;
+ }
+
+ st->mode &= ~AD7192_MODE_RATE(-1);
+ st->mode |= AD7192_MODE_RATE(div);
+ ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ break;
default:
ret = -EINVAL;
}
@@ -585,7 +553,14 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask)
{
- return IIO_VAL_INT_PLUS_NANO;
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
}
static const struct iio_info ad7192_info = {
@@ -659,15 +634,30 @@ static int ad7192_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
+ st->avdd = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->avdd))
+ return PTR_ERR(st->avdd);
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_enable(st->avdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
+ return ret;
+ }
+
+ st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
+ if (IS_ERR(st->dvdd)) {
+ ret = PTR_ERR(st->dvdd);
+ goto error_disable_avdd;
}
+ ret = regulator_enable(st->dvdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified DVdd supply\n");
+ goto error_disable_avdd;
+ }
+
+ voltage_uv = regulator_get_voltage(st->avdd);
+
if (pdata->vref_mv)
st->int_vref_mv = pdata->vref_mv;
else if (voltage_uv)
@@ -701,7 +691,7 @@ static int ad7192_probe(struct spi_device *spi)
ret = ad_sd_setup_buffer_and_trigger(indio_dev);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad7192_setup(st, pdata);
if (ret)
@@ -714,9 +704,10 @@ static int ad7192_probe(struct spi_device *spi)
error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+error_disable_dvdd:
+ regulator_disable(st->dvdd);
+error_disable_avdd:
+ regulator_disable(st->avdd);
return ret;
}
@@ -729,8 +720,8 @@ static int ad7192_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->dvdd);
+ regulator_disable(st->avdd);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index b460dda7eb65..ee679ac0368f 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -777,7 +777,7 @@ static struct attribute *ad7280_event_attributes[] = {
NULL,
};
-static struct attribute_group ad7280_event_attrs_group = {
+static const struct attribute_group ad7280_event_attrs_group = {
.attrs = ad7280_event_attributes,
};
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606.c
index f79ee61851f6..453190864b2f 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -13,7 +13,7 @@
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -21,58 +21,109 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include "ad7606.h"
-int ad7606_reset(struct ad7606_state *st)
+static int ad7606_reset(struct ad7606_state *st)
{
- if (gpio_is_valid(st->pdata->gpio_reset)) {
- gpio_set_value(st->pdata->gpio_reset, 1);
+ if (st->gpio_reset) {
+ gpiod_set_value(st->gpio_reset, 1);
ndelay(100); /* t_reset >= 100ns */
- gpio_set_value(st->pdata->gpio_reset, 0);
+ gpiod_set_value(st->gpio_reset, 0);
return 0;
}
return -ENODEV;
}
+static int ad7606_read_samples(struct ad7606_state *st)
+{
+ unsigned int num = st->chip_info->num_channels;
+ u16 *data = st->data;
+ int ret;
+
+ /*
+ * The frstdata signal is set to high while and after reading the sample
+ * of the first channel and low for all other channels. This can be used
+ * to check that the incoming data is correctly aligned. During normal
+ * operation the data should never become unaligned, but some glitch or
+ * electrostatic discharge might cause an extra read or clock cycle.
+ * Monitoring the frstdata signal allows to recover from such failure
+ * situations.
+ */
+
+ if (st->gpio_frstdata) {
+ ret = st->bops->read_block(st->dev, 1, data);
+ if (ret)
+ return ret;
+
+ if (!gpiod_get_value(st->gpio_frstdata)) {
+ ad7606_reset(st);
+ return -EIO;
+ }
+
+ data++;
+ num--;
+ }
+
+ return st->bops->read_block(st->dev, num, data);
+}
+
+static irqreturn_t ad7606_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct ad7606_state *st = iio_priv(pf->indio_dev);
+
+ gpiod_set_value(st->gpio_convst, 1);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
+ * @work_s: the work struct through which this was scheduled
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ * I think the one copy of this at a time was to avoid problems if the
+ * trigger was set far too high and the reads then locked up the computer.
+ **/
+static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
+{
+ struct ad7606_state *st = container_of(work_s, struct ad7606_state,
+ poll_work);
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ int ret;
+
+ ret = ad7606_read_samples(st);
+ if (ret == 0)
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns(indio_dev));
+
+ gpiod_set_value(st->gpio_convst, 0);
+ iio_trigger_notify_done(indio_dev->trig);
+}
+
static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
{
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
st->done = false;
- gpio_set_value(st->pdata->gpio_convst, 1);
+ gpiod_set_value(st->gpio_convst, 1);
ret = wait_event_interruptible(st->wq_data_avail, st->done);
if (ret)
goto error_ret;
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = st->bops->read_block(st->dev, 1, st->data);
- if (ret)
- goto error_ret;
- if (!gpio_get_value(st->pdata->gpio_frstdata)) {
- /* This should never happen */
- ad7606_reset(st);
- ret = -EIO;
- goto error_ret;
- }
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels - 1, &st->data[1]);
- if (ret)
- goto error_ret;
- } else {
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels, st->data);
- if (ret)
- goto error_ret;
- }
-
- ret = st->data[ch];
+ ret = ad7606_read_samples(st);
+ if (ret == 0)
+ ret = st->data[ch];
error_ret:
- gpio_set_value(st->pdata->gpio_convst, 0);
+ gpiod_set_value(st->gpio_convst, 0);
return ret;
}
@@ -103,6 +154,9 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
*val = st->range * 2;
*val2 = st->chip_info->channels[0].scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = st->oversampling;
+ return IIO_VAL_INT;
}
return -EINVAL;
}
@@ -129,12 +183,11 @@ static ssize_t ad7606_store_range(struct device *dev,
if (ret)
return ret;
- if (!(lval == 5000 || lval == 10000)) {
- dev_err(dev, "range is not supported\n");
+ if (!(lval == 5000 || lval == 10000))
return -EINVAL;
- }
+
mutex_lock(&indio_dev->mlock);
- gpio_set_value(st->pdata->gpio_range, lval == 10000);
+ gpiod_set_value(st->gpio_range, lval == 10000);
st->range = lval;
mutex_unlock(&indio_dev->mlock);
@@ -145,19 +198,9 @@ static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR,
ad7606_show_range, ad7606_store_range, 0);
static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000");
-static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%u\n", st->oversampling);
-}
-
static int ad7606_oversampling_get_index(unsigned int val)
{
- unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
+ unsigned char supported[] = {1, 2, 4, 8, 16, 32, 64};
int i;
for (i = 0; i < ARRAY_SIZE(supported); i++)
@@ -167,44 +210,45 @@ static int ad7606_oversampling_get_index(unsigned int val)
return -EINVAL;
}
-static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int ad7606_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- unsigned long lval;
+ int values[3];
int ret;
- ret = kstrtoul(buf, 10, &lval);
- if (ret)
- return ret;
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (val2)
+ return -EINVAL;
+ ret = ad7606_oversampling_get_index(val);
+ if (ret < 0)
+ return ret;
- ret = ad7606_oversampling_get_index(lval);
- if (ret < 0) {
- dev_err(dev, "oversampling %lu is not supported\n", lval);
- return ret;
- }
+ values[0] = (ret >> 0) & 1;
+ values[1] = (ret >> 1) & 1;
+ values[2] = (ret >> 2) & 1;
- mutex_lock(&indio_dev->mlock);
- gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
- gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
- gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
- st->oversampling = lval;
- mutex_unlock(&indio_dev->mlock);
+ mutex_lock(&indio_dev->mlock);
+ gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
+ values);
+ st->oversampling = val;
+ mutex_unlock(&indio_dev->mlock);
- return count;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
- ad7606_show_oversampling_ratio,
- ad7606_store_oversampling_ratio, 0);
-static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
+static IIO_CONST_ATTR(oversampling_ratio_available, "1 2 4 8 16 32 64");
static struct attribute *ad7606_attributes_os_and_range[] = {
&iio_dev_attr_in_voltage_range.dev_attr.attr,
&iio_const_attr_in_voltage_range_available.dev_attr.attr,
- &iio_dev_attr_oversampling_ratio.dev_attr.attr,
&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -214,7 +258,6 @@ static const struct attribute_group ad7606_attribute_group_os_and_range = {
};
static struct attribute *ad7606_attributes_os[] = {
- &iio_dev_attr_oversampling_ratio.dev_attr.attr,
&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -241,6 +284,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
.address = num, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.scan_index = num, \
.scan_type = { \
.sign = 's', \
@@ -267,20 +312,14 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
* More devices added in future
*/
[ID_AD7606_8] = {
- .name = "ad7606",
- .int_vref_mv = 2500,
.channels = ad7606_channels,
.num_channels = 9,
},
[ID_AD7606_6] = {
- .name = "ad7606-6",
- .int_vref_mv = 2500,
.channels = ad7606_channels,
.num_channels = 7,
},
[ID_AD7606_4] = {
- .name = "ad7606-4",
- .int_vref_mv = 2500,
.channels = ad7606_channels,
.num_channels = 5,
},
@@ -288,119 +327,34 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
static int ad7606_request_gpios(struct ad7606_state *st)
{
- struct gpio gpio_array[3] = {
- [0] = {
- .gpio = st->pdata->gpio_os0,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS0",
- },
- [1] = {
- .gpio = st->pdata->gpio_os1,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS1",
- },
- [2] = {
- .gpio = st->pdata->gpio_os2,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS2",
- },
- };
- int ret;
-
- if (gpio_is_valid(st->pdata->gpio_convst)) {
- ret = gpio_request_one(st->pdata->gpio_convst,
- GPIOF_OUT_INIT_LOW,
- "AD7606_CONVST");
- if (ret) {
- dev_err(st->dev, "failed to request GPIO CONVST\n");
- goto error_ret;
- }
- } else {
- ret = -EIO;
- goto error_ret;
- }
-
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
- if (ret < 0)
- goto error_free_convst;
- }
-
- if (gpio_is_valid(st->pdata->gpio_reset)) {
- ret = gpio_request_one(st->pdata->gpio_reset,
- GPIOF_OUT_INIT_LOW,
- "AD7606_RESET");
- if (ret < 0)
- goto error_free_os;
- }
-
- if (gpio_is_valid(st->pdata->gpio_range)) {
- ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
- ((st->range == 10000) ? GPIOF_INIT_HIGH :
- GPIOF_INIT_LOW), "AD7606_RANGE");
- if (ret < 0)
- goto error_free_reset;
- }
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- ret = gpio_request_one(st->pdata->gpio_stby,
- GPIOF_OUT_INIT_HIGH,
- "AD7606_STBY");
- if (ret < 0)
- goto error_free_range;
- }
-
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN,
- "AD7606_FRSTDATA");
- if (ret < 0)
- goto error_free_stby;
- }
-
- return 0;
-
-error_free_stby:
- if (gpio_is_valid(st->pdata->gpio_stby))
- gpio_free(st->pdata->gpio_stby);
-error_free_range:
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_free(st->pdata->gpio_range);
-error_free_reset:
- if (gpio_is_valid(st->pdata->gpio_reset))
- gpio_free(st->pdata->gpio_reset);
-error_free_os:
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2))
- gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array));
-error_free_convst:
- gpio_free(st->pdata->gpio_convst);
-error_ret:
- return ret;
-}
-
-static void ad7606_free_gpios(struct ad7606_state *st)
-{
- if (gpio_is_valid(st->pdata->gpio_frstdata))
- gpio_free(st->pdata->gpio_frstdata);
- if (gpio_is_valid(st->pdata->gpio_stby))
- gpio_free(st->pdata->gpio_stby);
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_free(st->pdata->gpio_range);
- if (gpio_is_valid(st->pdata->gpio_reset))
- gpio_free(st->pdata->gpio_reset);
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- gpio_free(st->pdata->gpio_os2);
- gpio_free(st->pdata->gpio_os1);
- gpio_free(st->pdata->gpio_os0);
- }
- gpio_free(st->pdata->gpio_convst);
+ struct device *dev = st->dev;
+
+ st->gpio_convst = devm_gpiod_get(dev, "conversion-start",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_convst))
+ return PTR_ERR(st->gpio_convst);
+
+ st->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_reset))
+ return PTR_ERR(st->gpio_reset);
+
+ st->gpio_range = devm_gpiod_get_optional(dev, "range", GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_range))
+ return PTR_ERR(st->gpio_range);
+
+ st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->gpio_standby))
+ return PTR_ERR(st->gpio_standby);
+
+ st->gpio_frstdata = devm_gpiod_get_optional(dev, "first-data",
+ GPIOD_IN);
+ if (IS_ERR(st->gpio_frstdata))
+ return PTR_ERR(st->gpio_frstdata);
+
+ st->gpio_os = devm_gpiod_get_array_optional(dev, "oversampling-ratio",
+ GPIOD_OUT_LOW);
+ return PTR_ERR_OR_ZERO(st->gpio_os);
}
/**
@@ -429,12 +383,14 @@ static const struct iio_info ad7606_info_no_os_or_range = {
static const struct iio_info ad7606_info_os_and_range = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
+ .write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os_and_range,
};
static const struct iio_info ad7606_info_os = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
+ .write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os,
};
@@ -444,81 +400,73 @@ static const struct iio_info ad7606_info_range = {
.attrs = &ad7606_attribute_group_range,
};
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address,
- unsigned int id,
- const struct ad7606_bus_ops *bops)
+int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+ const char *name, unsigned int id,
+ const struct ad7606_bus_ops *bops)
{
- struct ad7606_platform_data *pdata = dev->platform_data;
struct ad7606_state *st;
int ret;
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->dev = dev;
st->bops = bops;
st->base_address = base_address;
- st->range = pdata->default_range == 10000 ? 10000 : 5000;
+ st->range = 5000;
+ st->oversampling = 1;
+ INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
- ret = ad7606_oversampling_get_index(pdata->default_os);
- if (ret < 0) {
- dev_warn(dev, "oversampling %d is not supported\n",
- pdata->default_os);
- st->oversampling = 0;
- } else {
- st->oversampling = pdata->default_os;
- }
+ st->reg = devm_regulator_get(dev, "avcc");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
- st->reg = devm_regulator_get(dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ERR_PTR(ret);
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable specified AVcc supply\n");
+ return ret;
}
- st->pdata = pdata;
+ ret = ad7606_request_gpios(st);
+ if (ret)
+ goto error_disable_reg;
+
st->chip_info = &ad7606_chip_info_tbl[id];
indio_dev->dev.parent = dev;
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- if (gpio_is_valid(st->pdata->gpio_range))
+ if (st->gpio_os) {
+ if (st->gpio_range)
indio_dev->info = &ad7606_info_os_and_range;
else
indio_dev->info = &ad7606_info_os;
} else {
- if (gpio_is_valid(st->pdata->gpio_range))
+ if (st->gpio_range)
indio_dev->info = &ad7606_info_range;
else
indio_dev->info = &ad7606_info_no_os_or_range;
}
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = st->chip_info->name;
+ indio_dev->name = name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
init_waitqueue_head(&st->wq_data_avail);
- ret = ad7606_request_gpios(st);
- if (ret)
- goto error_disable_reg;
-
ret = ad7606_reset(st);
if (ret)
dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
- ret = request_irq(irq, ad7606_interrupt,
- IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev);
+ ret = request_irq(irq, ad7606_interrupt, IRQF_TRIGGER_FALLING, name,
+ indio_dev);
if (ret)
- goto error_free_gpios;
+ goto error_disable_reg;
- ret = ad7606_register_ring_funcs_and_init(indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, &ad7606_trigger_handler,
+ NULL, NULL);
if (ret)
goto error_free_irq;
@@ -526,35 +474,31 @@ struct iio_dev *ad7606_probe(struct device *dev, int irq,
if (ret)
goto error_unregister_ring;
- return indio_dev;
+ dev_set_drvdata(dev, indio_dev);
+
+ return 0;
error_unregister_ring:
- ad7606_ring_cleanup(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
error_free_irq:
free_irq(irq, indio_dev);
-error_free_gpios:
- ad7606_free_gpios(st);
-
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
- return ERR_PTR(ret);
+ regulator_disable(st->reg);
+ return ret;
}
EXPORT_SYMBOL_GPL(ad7606_probe);
-int ad7606_remove(struct iio_dev *indio_dev, int irq)
+int ad7606_remove(struct device *dev, int irq)
{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- ad7606_ring_cleanup(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
free_irq(irq, indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- ad7606_free_gpios(st);
+ regulator_disable(st->reg);
return 0;
}
@@ -567,10 +511,9 @@ static int ad7606_suspend(struct device *dev)
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_set_value(st->pdata->gpio_range, 1);
- gpio_set_value(st->pdata->gpio_stby, 0);
+ if (st->gpio_standby) {
+ gpiod_set_value(st->gpio_range, 1);
+ gpiod_set_value(st->gpio_standby, 0);
}
return 0;
@@ -581,12 +524,9 @@ static int ad7606_resume(struct device *dev)
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_set_value(st->pdata->gpio_range,
- st->range == 10000);
-
- gpio_set_value(st->pdata->gpio_stby, 1);
+ if (st->gpio_standby) {
+ gpiod_set_value(st->gpio_range, st->range == 10000);
+ gpiod_set_value(st->gpio_standby, 1);
ad7606_reset(st);
}
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 39f50440d915..746f9553d2ba 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -9,48 +9,14 @@
#ifndef IIO_ADC_AD7606_H_
#define IIO_ADC_AD7606_H_
-/*
- * TODO: struct ad7606_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad7606_platform_data - platform/board specific information
- * @default_os: default oversampling value {0, 2, 4, 8, 16, 32, 64}
- * @default_range: default range +/-{5000, 10000} mVolt
- * @gpio_convst: number of gpio connected to the CONVST pin
- * @gpio_reset: gpio connected to the RESET pin, if not used set to -1
- * @gpio_range: gpio connected to the RANGE pin, if not used set to -1
- * @gpio_os0: gpio connected to the OS0 pin, if not used set to -1
- * @gpio_os1: gpio connected to the OS1 pin, if not used set to -1
- * @gpio_os2: gpio connected to the OS2 pin, if not used set to -1
- * @gpio_frstdata: gpio connected to the FRSTDAT pin, if not used set to -1
- * @gpio_stby: gpio connected to the STBY pin, if not used set to -1
- */
-
-struct ad7606_platform_data {
- unsigned int default_os;
- unsigned int default_range;
- unsigned int gpio_convst;
- unsigned int gpio_reset;
- unsigned int gpio_range;
- unsigned int gpio_os0;
- unsigned int gpio_os1;
- unsigned int gpio_os2;
- unsigned int gpio_frstdata;
- unsigned int gpio_stby;
-};
-
/**
* struct ad7606_chip_info - chip specific information
* @name: identification string for chip
- * @int_vref_mv: the internal reference voltage
* @channels: channel specification
* @num_channels: number of channels
*/
struct ad7606_chip_info {
- const char *name;
- u16 int_vref_mv;
const struct iio_chan_spec *channels;
unsigned int num_channels;
};
@@ -62,7 +28,6 @@ struct ad7606_chip_info {
struct ad7606_state {
struct device *dev;
const struct ad7606_chip_info *chip_info;
- struct ad7606_platform_data *pdata;
struct regulator *reg;
struct work_struct poll_work;
wait_queue_head_t wq_data_avail;
@@ -72,12 +37,19 @@ struct ad7606_state {
bool done;
void __iomem *base_address;
+ struct gpio_desc *gpio_convst;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_range;
+ struct gpio_desc *gpio_standby;
+ struct gpio_desc *gpio_frstdata;
+ struct gpio_descs *gpio_os;
+
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * 8 * 16-bit samples + 64-bit timestamp
*/
-
- unsigned short data[8] ____cacheline_aligned;
+ unsigned short data[12] ____cacheline_aligned;
};
struct ad7606_bus_ops {
@@ -85,11 +57,10 @@ struct ad7606_bus_ops {
int (*read_block)(struct device *, int, void *);
};
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address, unsigned int id,
- const struct ad7606_bus_ops *bops);
-int ad7606_remove(struct iio_dev *indio_dev, int irq);
-int ad7606_reset(struct ad7606_state *st);
+int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+ const char *name, unsigned int id,
+ const struct ad7606_bus_ops *bops);
+int ad7606_remove(struct device *dev, int irq);
enum ad7606_supported_device_ids {
ID_AD7606_8,
@@ -97,9 +68,6 @@ enum ad7606_supported_device_ids {
ID_AD7606_4
};
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
-void ad7606_ring_cleanup(struct iio_dev *indio_dev);
-
#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops ad7606_pm_ops;
#define AD7606_PM_OPS (&ad7606_pm_ops)
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 84d23930fdde..cd6c410c0484 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -49,8 +49,8 @@ static const struct ad7606_bus_ops ad7606_par8_bops = {
static int ad7606_par_probe(struct platform_device *pdev)
{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct resource *res;
- struct iio_dev *indio_dev;
void __iomem *addr;
resource_size_t remap_size;
int irq;
@@ -68,26 +68,15 @@ static int ad7606_par_probe(struct platform_device *pdev)
remap_size = resource_size(res);
- indio_dev = ad7606_probe(&pdev->dev, irq, addr,
- platform_get_device_id(pdev)->driver_data,
- remap_size > 1 ? &ad7606_par16_bops :
- &ad7606_par8_bops);
-
- if (IS_ERR(indio_dev))
- return PTR_ERR(indio_dev);
-
- platform_set_drvdata(pdev, indio_dev);
-
- return 0;
+ return ad7606_probe(&pdev->dev, irq, addr,
+ id->name, id->driver_data,
+ remap_size > 1 ? &ad7606_par16_bops :
+ &ad7606_par8_bops);
}
static int ad7606_par_remove(struct platform_device *pdev)
{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- ad7606_remove(indio_dev, platform_get_irq(pdev, 0));
-
- return 0;
+ return ad7606_remove(&pdev->dev, platform_get_irq(pdev, 0));
}
static const struct platform_device_id ad7606_driver_ids[] = {
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
deleted file mode 100644
index 0572df9aad85..000000000000
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright 2011-2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-
-#include "ad7606.h"
-
-/**
- * ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
- *
- **/
-static irqreturn_t ad7606_trigger_handler_th_bh(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct ad7606_state *st = iio_priv(pf->indio_dev);
-
- gpio_set_value(st->pdata->gpio_convst, 1);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
- * @work_s: the work struct through which this was scheduled
- *
- * Currently there is no option in this driver to disable the saving of
- * timestamps within the ring.
- * I think the one copy of this at a time was to avoid problems if the
- * trigger was set far too high and the reads then locked up the computer.
- **/
-static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
-{
- struct ad7606_state *st = container_of(work_s, struct ad7606_state,
- poll_work);
- struct iio_dev *indio_dev = iio_priv_to_dev(st);
- __u8 *buf;
- int ret;
-
- buf = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!buf)
- return;
-
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = st->bops->read_block(st->dev, 1, buf);
- if (ret)
- goto done;
- if (!gpio_get_value(st->pdata->gpio_frstdata)) {
- /* This should never happen. However
- * some signal glitch caused by bad PCB desgin or
- * electrostatic discharge, could cause an extra read
- * or clock. This allows recovery.
- */
- ad7606_reset(st);
- goto done;
- }
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels - 1, buf + 2);
- if (ret)
- goto done;
- } else {
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels, buf);
- if (ret)
- goto done;
- }
-
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
- iio_get_time_ns(indio_dev));
-done:
- gpio_set_value(st->pdata->gpio_convst, 0);
- iio_trigger_notify_done(indio_dev->trig);
- kfree(buf);
-}
-
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
-
- return iio_triggered_buffer_setup(indio_dev,
- &ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh,
- NULL);
-}
-
-void ad7606_ring_cleanup(struct iio_dev *indio_dev)
-{
- iio_triggered_buffer_cleanup(indio_dev);
-}
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 9587fa86dc69..c9b1f26685f4 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -42,25 +42,16 @@ static const struct ad7606_bus_ops ad7606_spi_bops = {
static int ad7606_spi_probe(struct spi_device *spi)
{
- struct iio_dev *indio_dev;
+ const struct spi_device_id *id = spi_get_device_id(spi);
- indio_dev = ad7606_probe(&spi->dev, spi->irq, NULL,
- spi_get_device_id(spi)->driver_data,
- &ad7606_spi_bops);
-
- if (IS_ERR(indio_dev))
- return PTR_ERR(indio_dev);
-
- spi_set_drvdata(spi, indio_dev);
-
- return 0;
+ return ad7606_probe(&spi->dev, spi->irq, NULL,
+ id->name, id->driver_data,
+ &ad7606_spi_bops);
}
static int ad7606_spi_remove(struct spi_device *spi)
{
- struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
-
- return ad7606_remove(indio_dev, spi->irq);
+ return ad7606_remove(&spi->dev, spi->irq);
}
static const struct spi_device_id ad7606_id[] = {
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index c9a0c2aa602f..e14960038d3e 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -173,14 +173,16 @@ static int ad7780_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info);
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- voltage_uv = regulator_get_voltage(st->reg);
+ st->reg = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
+ return ret;
}
+ voltage_uv = regulator_get_voltage(st->reg);
st->chip_info =
&ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -222,8 +224,7 @@ static int ad7780_probe(struct spi_device *spi)
error_cleanup_buffer_and_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return ret;
}
@@ -236,8 +237,7 @@ static int ad7780_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 5e8115b01011..72551f827382 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -327,7 +327,7 @@ static struct attribute *ad7816_event_attributes[] = {
NULL,
};
-static struct attribute_group ad7816_event_attribute_group = {
+static const struct attribute_group ad7816_event_attribute_group = {
.attrs = ad7816_event_attributes,
.name = "events",
};
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3faffe59c933..a7d90c8bac5e 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2039,7 +2039,7 @@ static struct attribute *adt7316_event_attributes[] = {
NULL,
};
-static struct attribute_group adt7316_event_attribute_group = {
+static const struct attribute_group adt7316_event_attribute_group = {
.attrs = adt7316_event_attributes,
.name = "events",
};
@@ -2060,7 +2060,7 @@ static struct attribute *adt7516_event_attributes[] = {
NULL,
};
-static struct attribute_group adt7516_event_attribute_group = {
+static const struct attribute_group adt7516_event_attribute_group = {
.attrs = adt7516_event_attributes,
.name = "events",
};
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 5578a077fcfb..6998c3ddfb6a 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -562,7 +562,7 @@ static struct attribute *ad7150_event_attributes[] = {
NULL,
};
-static struct attribute_group ad7150_event_attribute_group = {
+static const struct attribute_group ad7150_event_attribute_group = {
.attrs = ad7150_event_attributes,
.name = "events",
};
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 485d0a5af53c..b91b50f345bd 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -89,6 +89,7 @@ struct ad7152_chip_info {
*/
u8 filter_rate_setup;
u8 setup[2];
+ struct mutex state_lock; /* protect hardware state */
};
static inline ssize_t ad7152_start_calib(struct device *dev,
@@ -115,10 +116,10 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
else
regval |= AD7152_CONF_CH2EN;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->state_lock);
ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
@@ -126,14 +127,15 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
mdelay(20);
ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
} while ((ret == regval) && timeout--);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return len;
}
+
static ssize_t ad7152_start_offset_calib(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -142,6 +144,7 @@ static ssize_t ad7152_start_offset_calib(struct device *dev,
return ad7152_start_calib(dev, attr, buf, len,
AD7152_CONF_MODE_OFFS_CAL);
}
+
static ssize_t ad7152_start_gain_calib(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -165,63 +168,12 @@ static const unsigned char ad7152_filter_rate_table[][2] = {
{200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1},
};
-static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n",
- ad7152_filter_rate_table[chip->filter_rate_setup][0]);
-}
-
-static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
- if (data >= ad7152_filter_rate_table[i][0])
- break;
-
- if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
- i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
-
- mutex_lock(&indio_dev->mlock);
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
-
- chip->filter_rate_setup = i;
- mutex_unlock(&indio_dev->mlock);
-
- return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
- ad7152_show_filter_rate_setup,
- ad7152_store_filter_rate_setup);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17");
static IIO_CONST_ATTR(in_capacitance_scale_available,
"0.000061050 0.000030525 0.000015263 0.000007631");
static struct attribute *ad7152_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
@@ -247,6 +199,51 @@ static const int ad7152_scale_table[] = {
30525, 7631, 15263, 61050
};
+/**
+ * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int ad7152_read_raw_samp_freq(struct device *dev, int *val)
+{
+ struct ad7152_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
+
+ *val = ad7152_filter_rate_table[chip->filter_rate_setup][0];
+
+ return 0;
+}
+
+/**
+ * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int ad7152_write_raw_samp_freq(struct device *dev, int val)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7152_chip_info *chip = iio_priv(indio_dev);
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
+ if (val >= ad7152_filter_rate_table[i][0])
+ break;
+
+ if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
+ i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
+
+ mutex_lock(&chip->state_lock);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
+ if (ret < 0) {
+ mutex_unlock(&chip->state_lock);
+ return ret;
+ }
+
+ chip->filter_rate_setup = i;
+ mutex_unlock(&chip->state_lock);
+
+ return ret;
+}
static int ad7152_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
@@ -256,7 +253,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
struct ad7152_chip_info *chip = iio_priv(indio_dev);
int ret, i;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->state_lock);
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
@@ -309,14 +306,26 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = ad7152_write_raw_samp_freq(&indio_dev->dev, val);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
+
static int ad7152_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
@@ -326,7 +335,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
int ret;
u8 regval = 0;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->state_lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -403,11 +412,18 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_NANO;
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ad7152_read_raw_samp_freq(&indio_dev->dev, val);
+ if (ret < 0)
+ goto out;
+
+ ret = IIO_VAL_INT;
+ break;
default:
ret = -EINVAL;
}
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->state_lock);
return ret;
}
@@ -440,6 +456,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
@@ -450,6 +467,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}, {
.type = IIO_CAPACITANCE,
.indexed = 1,
@@ -458,6 +476,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
@@ -468,8 +487,10 @@ static const struct iio_chan_spec ad7152_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
}
};
+
/*
* device probe and remove
*/
@@ -489,6 +510,7 @@ static int ad7152_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
chip->client = client;
+ mutex_init(&chip->state_lock);
/* Establish that the iio_dev is a child of the i2c device */
indio_dev->name = id->name;
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 5771d4ee8ef1..81f8b9ee1120 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -70,8 +70,10 @@
#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
/* Config Register Bit Designations (AD7746_REG_CFG) */
-#define AD7746_CONF_VTFS(x) ((x) << 6)
-#define AD7746_CONF_CAPFS(x) ((x) << 3)
+#define AD7746_CONF_VTFS_SHIFT 6
+#define AD7746_CONF_CAPFS_SHIFT 3
+#define AD7746_CONF_VTFS_MASK GENMASK(7, 6)
+#define AD7746_CONF_CAPFS_MASK GENMASK(5, 3)
#define AD7746_CONF_MODE_IDLE (0 << 0)
#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
@@ -122,7 +124,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_EXT_VIN,
},
@@ -132,7 +135,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
.channel = 1,
.extend_name = "supply",
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_VDD_MON,
},
@@ -159,7 +163,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8,
},
[CIN1_DIFF] = {
@@ -171,7 +175,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF
},
@@ -182,7 +186,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CIN2,
},
@@ -195,7 +199,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
}
@@ -215,15 +219,16 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay;
+ int ret, delay, idx;
u8 vt_setup, cap_setup;
switch (chan->type) {
case IIO_CAPACITANCE:
cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
- delay = ad7746_cap_filter_rate_table[(chip->config >> 3) &
- 0x7][1];
+ idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
+ AD7746_CONF_CAPFS_SHIFT;
+ delay = ad7746_cap_filter_rate_table[idx][1];
if (chip->capdac_set != chan->channel) {
ret = i2c_smbus_write_byte_data(chip->client,
@@ -244,8 +249,9 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
case IIO_TEMP:
vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
- delay = ad7746_cap_filter_rate_table[(chip->config >> 6) &
- 0x3][1];
+ idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
+ AD7746_CONF_VTFS_SHIFT;
+ delay = ad7746_cap_filter_rate_table[idx][1];
break;
default:
return -EINVAL;
@@ -355,101 +361,47 @@ static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
-static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
+ int val)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[
- (chip->config >> 3) & 0x7][0]);
-}
-
-static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
- if (data >= ad7746_cap_filter_rate_table[i][0])
+ if (val >= ad7746_cap_filter_rate_table[i][0])
break;
if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
- mutex_lock(&indio_dev->mlock);
- chip->config &= ~AD7746_CONF_CAPFS(0x7);
- chip->config |= AD7746_CONF_CAPFS(i);
- mutex_unlock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_CAPFS_MASK;
+ chip->config |= i << AD7746_CONF_CAPFS_SHIFT;
- return len;
+ return 0;
}
-static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip,
+ int val)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[
- (chip->config >> 6) & 0x3][0]);
-}
-
-static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
- if (data >= ad7746_vt_filter_rate_table[i][0])
+ if (val >= ad7746_vt_filter_rate_table[i][0])
break;
if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
- mutex_lock(&indio_dev->mlock);
- chip->config &= ~AD7746_CONF_VTFS(0x3);
- chip->config |= AD7746_CONF_VTFS(i);
- mutex_unlock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_VTFS_MASK;
+ chip->config |= i << AD7746_CONF_VTFS_SHIFT;
- return len;
+ return 0;
}
-static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency,
- S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup,
- ad7746_store_cap_filter_rate_setup, 0);
-
-static IIO_DEVICE_ATTR(in_voltage_sampling_frequency,
- S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup,
- ad7746_store_vt_filter_rate_setup, 0);
-
static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
"91 84 50 26 16 13 11 9");
static struct attribute *ad7746_attributes[] = {
- &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
@@ -547,6 +499,23 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ ret = ad7746_store_cap_filter_rate_setup(chip, val);
+ break;
+ case IIO_VOLTAGE:
+ ret = ad7746_store_vt_filter_rate_setup(chip, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -562,7 +531,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay;
+ int ret, delay, idx;
u8 regval, reg;
mutex_lock(&indio_dev->mlock);
@@ -667,6 +636,24 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
+ AD7746_CONF_CAPFS_SHIFT;
+ *val = ad7746_cap_filter_rate_table[idx][0];
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_VOLTAGE:
+ idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
+ AD7746_CONF_VTFS_SHIFT;
+ *val = ad7746_vt_filter_rate_table[idx][0];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 358400b22d33..a5b2f068168d 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -204,7 +204,6 @@ static int ad9832_probe(struct spi_device *spi)
struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
struct iio_dev *indio_dev;
struct ad9832_state *st;
- struct regulator *reg;
int ret;
if (!pdata) {
@@ -212,21 +211,35 @@ static int ad9832_probe(struct spi_device *spi)
return -ENODEV;
}
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
- }
-
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
+ if (!indio_dev)
+ return -ENOMEM;
+
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
- st->reg = reg;
+
+ st->avdd = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->avdd))
+ return PTR_ERR(st->avdd);
+
+ ret = regulator_enable(st->avdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
+ return ret;
+ }
+
+ st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
+ if (IS_ERR(st->dvdd)) {
+ ret = PTR_ERR(st->dvdd);
+ goto error_disable_avdd;
+ }
+
+ ret = regulator_enable(st->dvdd);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified DVDD supply\n");
+ goto error_disable_avdd;
+ }
+
st->mclk = pdata->mclk;
st->spi = spi;
@@ -277,42 +290,43 @@ static int ad9832_probe(struct spi_device *spi)
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_disable_reg;
+ goto error_disable_dvdd;
}
ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_disable_reg;
+ goto error_disable_dvdd;
return 0;
-error_disable_reg:
- if (!IS_ERR(reg))
- regulator_disable(reg);
+error_disable_dvdd:
+ regulator_disable(st->dvdd);
+error_disable_avdd:
+ regulator_disable(st->avdd);
return ret;
}
@@ -323,8 +337,8 @@ static int ad9832_remove(struct spi_device *spi)
struct ad9832_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->dvdd);
+ regulator_disable(st->avdd);
return 0;
}
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index d32323b46be6..1b08b04482a4 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -58,7 +58,8 @@
/**
* struct ad9832_state - driver instance specific data
* @spi: spi_device
- * @reg: supply regulator
+ * @avdd: supply regulator for the analog section
+ * @dvdd: supply regulator for the digital section
* @mclk: external master clock
* @ctrl_fp: cached frequency/phase control word
* @ctrl_ss: cached sync/selsrc control word
@@ -76,7 +77,8 @@
struct ad9832_state {
struct spi_device *spi;
- struct regulator *reg;
+ struct regulator *avdd;
+ struct regulator *dvdd;
unsigned long mclk;
unsigned short ctrl_fp;
unsigned short ctrl_ss;
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 6366216e4f37..19216af1dfc9 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -329,11 +329,14 @@ static int ad9834_probe(struct spi_device *spi)
return -ENODEV;
}
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
+ reg = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ret = regulator_enable(reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
+ return ret;
}
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -416,8 +419,7 @@ static int ad9834_probe(struct spi_device *spi)
return 0;
error_disable_reg:
- if (!IS_ERR(reg))
- regulator_disable(reg);
+ regulator_disable(reg);
return ret;
}
@@ -428,8 +430,7 @@ static int ad9834_remove(struct spi_device *spi)
struct ad9834_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return 0;
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3892a7470410..944789843938 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -726,13 +726,16 @@ static int ad5933_probe(struct i2c_client *client,
if (!pdata)
pdata = &ad5933_default_pdata;
- st->reg = devm_regulator_get(&client->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
- voltage_uv = regulator_get_voltage(st->reg);
+ st->reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&client->dev, "Failed to enable specified VDD supply\n");
+ return ret;
}
+ voltage_uv = regulator_get_voltage(st->reg);
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
@@ -775,8 +778,7 @@ static int ad5933_probe(struct i2c_client *client,
error_unreg_ring:
iio_kfifo_free(indio_dev->buffer);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return ret;
}
@@ -788,8 +790,7 @@ static int ad5933_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
iio_kfifo_free(indio_dev->buffer);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(st->reg);
return 0;
}
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index ca8d6e66c899..4fbf6298c0f3 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -3,18 +3,6 @@
#
menu "Light sensors"
-config SENSORS_ISL29018
- tristate "ISL 29018 light and proximity sensor"
- depends on I2C
- select REGMAP_I2C
- default n
- help
- If you say yes here you get support for ambient light sensing and
- proximity infrared sensing from Intersil ISL29018.
- This driver will provide the measurements of ambient light intensity
- in lux, proximity infrared sensing and normal infrared sensing.
- Data from sensor is accessible via sysfs.
-
config SENSORS_ISL29028
tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
depends on I2C
@@ -25,13 +13,6 @@ config SENSORS_ISL29028
Proximity value via iio. The ISL29028 provides the concurrent sensing
of ambient light and proximity.
-config TSL2583
- tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
- depends on I2C
- help
- Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
- Access ALS data via iio, sysfs.
-
config TSL2x7x
tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
depends on I2C
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index 9960fdf7c15b..f8693e9fdc94 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -2,7 +2,5 @@
# Makefile for industrial I/O Light sensors
#
-obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
-obj-$(CONFIG_TSL2583) += tsl2583.o
obj-$(CONFIG_TSL2x7x) += tsl2x7x_core.o
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
deleted file mode 100644
index 08f1583ee34e..000000000000
--- a/drivers/staging/iio/light/tsl2583.c
+++ /dev/null
@@ -1,963 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity (lux)
- * within the TAOS tsl258x family of devices (tsl2580, tsl2581).
- *
- * Copyright (c) 2011, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-
-#define TSL258X_MAX_DEVICE_REGS 32
-
-/* Triton register offsets */
-#define TSL258X_REG_MAX 8
-
-/* Device Registers and Masks */
-#define TSL258X_CNTRL 0x00
-#define TSL258X_ALS_TIME 0X01
-#define TSL258X_INTERRUPT 0x02
-#define TSL258X_GAIN 0x07
-#define TSL258X_REVID 0x11
-#define TSL258X_CHIPID 0x12
-#define TSL258X_ALS_CHAN0LO 0x14
-#define TSL258X_ALS_CHAN0HI 0x15
-#define TSL258X_ALS_CHAN1LO 0x16
-#define TSL258X_ALS_CHAN1HI 0x17
-#define TSL258X_TMR_LO 0x18
-#define TSL258X_TMR_HI 0x19
-
-/* tsl2583 cmd reg masks */
-#define TSL258X_CMD_REG 0x80
-#define TSL258X_CMD_SPL_FN 0x60
-#define TSL258X_CMD_ALS_INT_CLR 0X01
-
-/* tsl2583 cntrl reg masks */
-#define TSL258X_CNTL_ADC_ENBL 0x02
-#define TSL258X_CNTL_PWR_ON 0x01
-
-/* tsl2583 status reg masks */
-#define TSL258X_STA_ADC_VALID 0x01
-#define TSL258X_STA_ADC_INTR 0x10
-
-/* Lux calculation constants */
-#define TSL258X_LUX_CALC_OVER_FLOW 65535
-
-enum {
- TSL258X_CHIP_UNKNOWN = 0,
- TSL258X_CHIP_WORKING = 1,
- TSL258X_CHIP_SUSPENDED = 2
-};
-
-/* Per-device data */
-struct taos_als_info {
- u16 als_ch0;
- u16 als_ch1;
- u16 lux;
-};
-
-struct taos_settings {
- int als_time;
- int als_gain;
- int als_gain_trim;
- int als_cal_target;
-};
-
-struct tsl2583_chip {
- struct mutex als_mutex;
- struct i2c_client *client;
- struct taos_als_info als_cur_info;
- struct taos_settings taos_settings;
- int als_time_scale;
- int als_saturation;
- int taos_chip_status;
- u8 taos_config[8];
-};
-
-/*
- * Initial values for device - this values can/will be changed by driver.
- * and applications as needed.
- * These values are dynamic.
- */
-static const u8 taos_config[8] = {
- 0x00, 0xee, 0x00, 0x03, 0x00, 0xFF, 0xFF, 0x00
-}; /* cntrl atime intC Athl0 Athl1 Athh0 Athh1 gain */
-
-struct taos_lux {
- unsigned int ratio;
- unsigned int ch0;
- unsigned int ch1;
-};
-
-/* This structure is intentionally large to accommodate updates via sysfs. */
-/* Sized to 11 = max 10 segments + 1 termination segment */
-/* Assumption is one and only one type of glass used */
-static struct taos_lux taos_device_lux[11] = {
- { 9830, 8520, 15729 },
- { 12452, 10807, 23344 },
- { 14746, 6383, 11705 },
- { 17695, 4063, 6554 },
-};
-
-struct gainadj {
- s16 ch0;
- s16 ch1;
-};
-
-/* Index = (0 - 3) Used to validate the gain selection index */
-static const struct gainadj gainadj[] = {
- { 1, 1 },
- { 8, 8 },
- { 16, 16 },
- { 107, 115 }
-};
-
-/*
- * Provides initial operational parameter defaults.
- * These defaults may be changed through the device's sysfs files.
- */
-static void taos_defaults(struct tsl2583_chip *chip)
-{
- /* Operational parameters */
- chip->taos_settings.als_time = 100;
- /* must be a multiple of 50mS */
- chip->taos_settings.als_gain = 0;
- /* this is actually an index into the gain table */
- /* assume clear glass as default */
- chip->taos_settings.als_gain_trim = 1000;
- /* default gain trim to account for aperture effects */
- chip->taos_settings.als_cal_target = 130;
- /* Known external ALS reading used for calibration */
-}
-
-/*
- * Read a number of bytes starting at register (reg) location.
- * Return 0, or i2c_smbus_write_byte ERROR code.
- */
-static int
-taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len)
-{
- int i, ret;
-
- for (i = 0; i < len; i++) {
- /* select register to write */
- ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | reg));
- if (ret < 0) {
- dev_err(&client->dev,
- "taos_i2c_read failed to write register %x\n",
- reg);
- return ret;
- }
- /* read the data */
- *val = i2c_smbus_read_byte(client);
- val++;
- reg++;
- }
- return 0;
-}
-
-/*
- * Reads and calculates current lux value.
- * The raw ch0 and ch1 values of the ambient light sensed in the last
- * integration cycle are read from the device.
- * Time scale factor array values are adjusted based on the integration time.
- * The raw values are multiplied by a scale factor, and device gain is obtained
- * using gain index. Limit checks are done next, then the ratio of a multiple
- * of ch1 value, to the ch0 value, is calculated. The array taos_device_lux[]
- * declared above is then scanned to find the first ratio value that is just
- * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
- * the array are then used along with the time scale factor array values, to
- * calculate the lux.
- */
-static int taos_get_lux(struct iio_dev *indio_dev)
-{
- u16 ch0, ch1; /* separated ch0/ch1 data from device */
- u32 lux; /* raw lux calculated from device data */
- u64 lux64;
- u32 ratio;
- u8 buf[5];
- struct taos_lux *p;
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int i, ret;
- u32 ch0lux = 0;
- u32 ch1lux = 0;
-
- if (mutex_trylock(&chip->als_mutex) == 0) {
- dev_info(&chip->client->dev, "taos_get_lux device is busy\n");
- return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
- }
-
- if (chip->taos_chip_status != TSL258X_CHIP_WORKING) {
- /* device is not enabled */
- dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n");
- ret = -EBUSY;
- goto out_unlock;
- }
-
- ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n");
- goto out_unlock;
- }
- /* is data new & valid */
- if (!(buf[0] & TSL258X_STA_ADC_INTR)) {
- dev_err(&chip->client->dev, "taos_get_lux data not valid\n");
- ret = chip->als_cur_info.lux; /* return LAST VALUE */
- goto out_unlock;
- }
-
- for (i = 0; i < 4; i++) {
- int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i);
-
- ret = taos_i2c_read(chip->client, reg, &buf[i], 1);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_get_lux failed to read register %x\n",
- reg);
- goto out_unlock;
- }
- }
-
- /*
- * clear status, really interrupt status (interrupts are off), but
- * we use the bit anyway - don't forget 0x80 - this is a command
- */
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
- TSL258X_CMD_ALS_INT_CLR));
-
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_i2c_write_command failed in taos_get_lux, err = %d\n",
- ret);
- goto out_unlock; /* have no data, so return failure */
- }
-
- /* extract ALS/lux data */
- ch0 = le16_to_cpup((const __le16 *)&buf[0]);
- ch1 = le16_to_cpup((const __le16 *)&buf[2]);
-
- chip->als_cur_info.als_ch0 = ch0;
- chip->als_cur_info.als_ch1 = ch1;
-
- if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
- goto return_max;
-
- if (!ch0) {
- /* have no data, so return LAST VALUE */
- ret = 0;
- chip->als_cur_info.lux = 0;
- goto out_unlock;
- }
- /* calculate ratio */
- ratio = (ch1 << 15) / ch0;
- /* convert to unscaled lux using the pointer to the table */
- for (p = (struct taos_lux *)taos_device_lux;
- p->ratio != 0 && p->ratio < ratio; p++)
- ;
-
- if (p->ratio == 0) {
- lux = 0;
- } else {
- ch0lux = ((ch0 * p->ch0) +
- (gainadj[chip->taos_settings.als_gain].ch0 >> 1))
- / gainadj[chip->taos_settings.als_gain].ch0;
- ch1lux = ((ch1 * p->ch1) +
- (gainadj[chip->taos_settings.als_gain].ch1 >> 1))
- / gainadj[chip->taos_settings.als_gain].ch1;
- lux = ch0lux - ch1lux;
- }
-
- /* note: lux is 31 bit max at this point */
- if (ch1lux > ch0lux) {
- dev_dbg(&chip->client->dev, "No Data - Return last value\n");
- ret = 0;
- chip->als_cur_info.lux = 0;
- goto out_unlock;
- }
-
- /* adjust for active time scale */
- if (chip->als_time_scale == 0)
- lux = 0;
- else
- lux = (lux + (chip->als_time_scale >> 1)) /
- chip->als_time_scale;
-
- /* Adjust for active gain scale.
- * The taos_device_lux tables above have a factor of 8192 built in,
- * so we need to shift right.
- * User-specified gain provides a multiplier.
- * Apply user-specified gain before shifting right to retain precision.
- * Use 64 bits to avoid overflow on multiplication.
- * Then go back to 32 bits before division to avoid using div_u64().
- */
- lux64 = lux;
- lux64 = lux64 * chip->taos_settings.als_gain_trim;
- lux64 >>= 13;
- lux = lux64;
- lux = (lux + 500) / 1000;
- if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
-return_max:
- lux = TSL258X_LUX_CALC_OVER_FLOW;
- }
-
- /* Update the structure with the latest VALID lux. */
- chip->als_cur_info.lux = lux;
- ret = lux;
-
-out_unlock:
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-/*
- * Obtain single reading and calculate the als_gain_trim (later used
- * to derive actual lux).
- * Return updated gain_trim value.
- */
-static int taos_als_calibrate(struct iio_dev *indio_dev)
-{
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- u8 reg_val;
- unsigned int gain_trim_val;
- int ret;
- int lux_val;
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed to reach the CNTRL register, ret=%d\n",
- ret);
- return ret;
- }
-
- reg_val = i2c_smbus_read_byte(chip->client);
- if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON))
- != (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: device not powered on with ADC enabled\n");
- return -1;
- }
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed to reach the STATUS register, ret=%d\n",
- ret);
- return ret;
- }
- reg_val = i2c_smbus_read_byte(chip->client);
-
- if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: STATUS - ADC not valid.\n");
- return -ENODATA;
- }
- lux_val = taos_get_lux(indio_dev);
- if (lux_val < 0) {
- dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
- return lux_val;
- }
- gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target)
- * chip->taos_settings.als_gain_trim) / lux_val);
-
- if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: trim_val of %d is out of range\n",
- gain_trim_val);
- return -ENODATA;
- }
- chip->taos_settings.als_gain_trim = (int)gain_trim_val;
-
- return (int)gain_trim_val;
-}
-
-/*
- * Turn the device on.
- * Configuration must be set before calling this function.
- */
-static int taos_chip_on(struct iio_dev *indio_dev)
-{
- int i;
- int ret;
- u8 *uP;
- u8 utmp;
- int als_count;
- int als_time;
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- /* and make sure we're not already on */
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
- /* if forcing a register update - turn off, then on */
- dev_info(&chip->client->dev, "device is already enabled\n");
- return -EINVAL;
- }
-
- /* determine als integration register */
- als_count = (chip->taos_settings.als_time * 100 + 135) / 270;
- if (!als_count)
- als_count = 1; /* ensure at least one cycle */
-
- /* convert back to time (encompasses overrides) */
- als_time = (als_count * 27 + 5) / 10;
- chip->taos_config[TSL258X_ALS_TIME] = 256 - als_count;
-
- /* Set the gain based on taos_settings struct */
- chip->taos_config[TSL258X_GAIN] = chip->taos_settings.als_gain;
-
- /* set chip struct re scaling and saturation */
- chip->als_saturation = als_count * 922; /* 90% of full scale */
- chip->als_time_scale = (als_time + 25) / 50;
-
- /*
- * TSL258x Specific power-on / adc enable sequence
- * Power on the device 1st.
- */
- utmp = TSL258X_CNTL_PWR_ON;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
- return ret;
- }
-
- /*
- * Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers.
- */
- for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG + i,
- *uP++);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_chip_on failed on reg %d.\n", i);
- return ret;
- }
- }
-
- usleep_range(3000, 3500);
- /*
- * NOW enable the ADC
- * initialize the desired mode of operation
- */
- utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL,
- utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
- return ret;
- }
- chip->taos_chip_status = TSL258X_CHIP_WORKING;
-
- return ret;
-}
-
-static int taos_chip_off(struct iio_dev *indio_dev)
-{
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- /* turn device off */
- chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
- return i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL,
- 0x00);
-}
-
-/* Sysfs Interface Functions */
-
-static ssize_t taos_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_chip_status);
-}
-
-static ssize_t taos_power_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (!value)
- taos_chip_off(indio_dev);
- else
- taos_chip_on(indio_dev);
-
- return len;
-}
-
-static ssize_t taos_gain_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- char gain[4] = {0};
-
- switch (chip->taos_settings.als_gain) {
- case 0:
- strcpy(gain, "001");
- break;
- case 1:
- strcpy(gain, "008");
- break;
- case 2:
- strcpy(gain, "016");
- break;
- case 3:
- strcpy(gain, "111");
- break;
- }
-
- return sprintf(buf, "%s\n", gain);
-}
-
-static ssize_t taos_gain_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- switch (value) {
- case 1:
- chip->taos_settings.als_gain = 0;
- break;
- case 8:
- chip->taos_settings.als_gain = 1;
- break;
- case 16:
- chip->taos_settings.als_gain = 2;
- break;
- case 111:
- chip->taos_settings.als_gain = 3;
- break;
- default:
- dev_err(dev, "Invalid Gain Index (must be 1,8,16,111)\n");
- return -1;
- }
-
- return len;
-}
-
-static ssize_t taos_gain_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n", "1 8 16 111");
-}
-
-static ssize_t taos_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_time);
-}
-
-static ssize_t taos_als_time_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if ((value < 50) || (value > 650))
- return -EINVAL;
-
- if (value % 50)
- return -EINVAL;
-
- chip->taos_settings.als_time = value;
-
- return len;
-}
-
-static ssize_t taos_als_time_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n",
- "50 100 150 200 250 300 350 400 450 500 550 600 650");
-}
-
-static ssize_t taos_als_trim_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim);
-}
-
-static ssize_t taos_als_trim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->taos_settings.als_gain_trim = value;
-
- return len;
-}
-
-static ssize_t taos_als_cal_target_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target);
-}
-
-static ssize_t taos_als_cal_target_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->taos_settings.als_cal_target = value;
-
- return len;
-}
-
-static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int ret;
-
- ret = taos_get_lux(dev_to_iio_dev(dev));
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", ret);
-}
-
-static ssize_t taos_do_calibrate(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value == 1)
- taos_als_calibrate(indio_dev);
-
- return len;
-}
-
-static ssize_t taos_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int i;
- int offset = 0;
-
- for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) {
- offset += sprintf(buf + offset, "%u,%u,%u,",
- taos_device_lux[i].ratio,
- taos_device_lux[i].ch0,
- taos_device_lux[i].ch1);
- if (taos_device_lux[i].ratio == 0) {
- /*
- * We just printed the first "0" entry.
- * Now get rid of the extra "," and break.
- */
- offset--;
- break;
- }
- }
-
- offset += sprintf(buf + offset, "\n");
- return offset;
-}
-
-static ssize_t taos_luxtable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(taos_device_lux) * 3 + 1];
- int n;
-
- get_options(buf, ARRAY_SIZE(value), value);
-
- /* We now have an array of ints starting at value[1], and
- * enumerated by value[0].
- * We expect each group of three ints is one table entry,
- * and the last table entry is all 0.
- */
- n = value[0];
- if ((n % 3) || n < 6 || n > ((ARRAY_SIZE(taos_device_lux) - 1) * 3)) {
- dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
- return -EINVAL;
- }
- if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
- dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
- return -EINVAL;
- }
-
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING)
- taos_chip_off(indio_dev);
-
- /* Zero out the table */
- memset(taos_device_lux, 0, sizeof(taos_device_lux));
- memcpy(taos_device_lux, &value[1], (value[0] * 4));
-
- taos_chip_on(indio_dev);
-
- return len;
-}
-
-static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
- taos_power_state_show, taos_power_state_store);
-
-static DEVICE_ATTR(illuminance0_calibscale, S_IRUGO | S_IWUSR,
- taos_gain_show, taos_gain_store);
-static DEVICE_ATTR(illuminance0_calibscale_available, S_IRUGO,
- taos_gain_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR,
- taos_als_time_show, taos_als_time_store);
-static DEVICE_ATTR(illuminance0_integration_time_available, S_IRUGO,
- taos_als_time_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_calibbias, S_IRUGO | S_IWUSR,
- taos_als_trim_show, taos_als_trim_store);
-
-static DEVICE_ATTR(illuminance0_input_target, S_IRUGO | S_IWUSR,
- taos_als_cal_target_show, taos_als_cal_target_store);
-
-static DEVICE_ATTR(illuminance0_input, S_IRUGO, taos_lux_show, NULL);
-static DEVICE_ATTR(illuminance0_calibrate, S_IWUSR, NULL, taos_do_calibrate);
-static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR,
- taos_luxtable_show, taos_luxtable_store);
-
-static struct attribute *sysfs_attrs_ctrl[] = {
- &dev_attr_power_state.attr,
- &dev_attr_illuminance0_calibscale.attr, /* Gain */
- &dev_attr_illuminance0_calibscale_available.attr,
- &dev_attr_illuminance0_integration_time.attr, /* I time*/
- &dev_attr_illuminance0_integration_time_available.attr,
- &dev_attr_illuminance0_calibbias.attr, /* trim */
- &dev_attr_illuminance0_input_target.attr,
- &dev_attr_illuminance0_input.attr,
- &dev_attr_illuminance0_calibrate.attr,
- &dev_attr_illuminance0_lux_table.attr,
- NULL
-};
-
-static const struct attribute_group tsl2583_attribute_group = {
- .attrs = sysfs_attrs_ctrl,
-};
-
-/* Use the default register values to identify the Taos device */
-static int taos_tsl258x_device(unsigned char *bufp)
-{
- return ((bufp[TSL258X_CHIPID] & 0xf0) == 0x90);
-}
-
-static const struct iio_info tsl2583_info = {
- .attrs = &tsl2583_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-/*
- * Client probe function - When a valid device is found, the driver's device
- * data structure is updated, and initialization completes successfully.
- */
-static int taos_probe(struct i2c_client *clientp,
- const struct i2c_device_id *idp)
-{
- int i, ret;
- unsigned char buf[TSL258X_MAX_DEVICE_REGS];
- struct tsl2583_chip *chip;
- struct iio_dev *indio_dev;
-
- if (!i2c_check_functionality(clientp->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n");
- return -EOPNOTSUPP;
- }
-
- indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
- chip = iio_priv(indio_dev);
- chip->client = clientp;
- i2c_set_clientdata(clientp, indio_dev);
-
- mutex_init(&chip->als_mutex);
- chip->taos_chip_status = TSL258X_CHIP_UNKNOWN;
- memcpy(chip->taos_config, taos_config, sizeof(chip->taos_config));
-
- for (i = 0; i < TSL258X_MAX_DEVICE_REGS; i++) {
- ret = i2c_smbus_write_byte(clientp,
- (TSL258X_CMD_REG | (TSL258X_CNTRL + i)));
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_write_byte to cmd reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
- ret = i2c_smbus_read_byte(clientp);
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_read_byte from reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
- buf[i] = ret;
- }
-
- if (!taos_tsl258x_device(buf)) {
- dev_info(&clientp->dev,
- "i2c device found but does not match expected id in taos_probe()\n");
- return -EINVAL;
- }
-
- ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_write_byte() to cmd reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
-
- indio_dev->info = &tsl2583_info;
- indio_dev->dev.parent = &clientp->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = chip->client->name;
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
- if (ret) {
- dev_err(&clientp->dev, "iio registration failed\n");
- return ret;
- }
-
- /* Load up the V2 defaults (these are hard coded defaults for now) */
- taos_defaults(chip);
-
- /* Make sure the chip is on */
- taos_chip_on(indio_dev);
-
- dev_info(&clientp->dev, "Light sensor found.\n");
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int taos_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- mutex_lock(&chip->als_mutex);
-
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
- ret = taos_chip_off(indio_dev);
- chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
- }
-
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-static int taos_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- mutex_lock(&chip->als_mutex);
-
- if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED)
- ret = taos_chip_on(indio_dev);
-
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
-#define TAOS_PM_OPS (&taos_pm_ops)
-#else
-#define TAOS_PM_OPS NULL
-#endif
-
-static struct i2c_device_id taos_idtable[] = {
- { "tsl2580", 0 },
- { "tsl2581", 1 },
- { "tsl2583", 2 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, taos_idtable);
-
-/* Driver definition */
-static struct i2c_driver taos_driver = {
- .driver = {
- .name = "tsl2583",
- .pm = TAOS_PM_OPS,
- },
- .id_table = taos_idtable,
- .probe = taos_probe,
-};
-module_i2c_driver(taos_driver);
-
-MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
-MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index ebb8a1993303..3af8f77b8e41 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -465,38 +465,26 @@ err_ret:
return ret;
}
-static ssize_t ade7758_read_frequency(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ade7758_read_samp_freq(struct device *dev, int *val)
{
int ret;
u8 t;
- int sps;
ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
if (ret)
return ret;
t = (t >> 5) & 0x3;
- sps = 26040 / (1 << t);
+ *val = 26040 / (1 << t);
- return sprintf(buf, "%d SPS\n", sps);
+ return 0;
}
-static ssize_t ade7758_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ade7758_write_samp_freq(struct device *dev, int val)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- u16 val;
int ret;
u8 reg, t;
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&indio_dev->mlock);
-
switch (val) {
case 26040:
t = 0;
@@ -525,9 +513,49 @@ static ssize_t ade7758_write_frequency(struct device *dev,
ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
out:
- mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
- return ret ? ret : len;
+static int ade7758_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&indio_dev->mlock);
+ ret = ade7758_read_samp_freq(&indio_dev->dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ade7758_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&indio_dev->mlock);
+ ret = ade7758_write_samp_freq(&indio_dev->dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
}
static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
@@ -553,17 +581,12 @@ static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
ADE7758_CVAHR);
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ade7758_read_frequency,
- ade7758_write_frequency);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
static struct attribute *ade7758_attributes[] = {
&iio_dev_attr_in_temp_raw.dev_attr.attr,
&iio_const_attr_in_temp_offset.dev_attr.attr,
&iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_awatthr.dev_attr.attr,
&iio_dev_attr_bwatthr.dev_attr.attr,
@@ -611,6 +634,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
.scan_index = 0,
.scan_type = {
@@ -622,6 +646,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_CURRENT,
.indexed = 1,
.channel = 0,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
.scan_index = 1,
.scan_type = {
@@ -634,6 +659,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "apparent",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
.scan_index = 2,
.scan_type = {
@@ -646,6 +672,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "active",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
.scan_index = 3,
.scan_type = {
@@ -658,6 +685,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "reactive",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
.scan_index = 4,
.scan_type = {
@@ -669,6 +697,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
.scan_index = 5,
.scan_type = {
@@ -680,6 +709,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_CURRENT,
.indexed = 1,
.channel = 1,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
.scan_index = 6,
.scan_type = {
@@ -692,6 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "apparent",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
.scan_index = 7,
.scan_type = {
@@ -704,6 +735,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "active",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
.scan_index = 8,
.scan_type = {
@@ -716,6 +748,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "reactive",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
.scan_index = 9,
.scan_type = {
@@ -727,6 +760,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 2,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
.scan_index = 10,
.scan_type = {
@@ -738,6 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.type = IIO_CURRENT,
.indexed = 1,
.channel = 2,
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
.scan_index = 11,
.scan_type = {
@@ -750,6 +785,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "apparent",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
.scan_index = 12,
.scan_type = {
@@ -762,6 +798,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "active",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
.scan_index = 13,
.scan_type = {
@@ -774,6 +811,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "reactive",
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
.scan_index = 14,
.scan_type = {
@@ -787,6 +825,8 @@ static const struct iio_chan_spec ade7758_channels[] = {
static const struct iio_info ade7758_info = {
.attrs = &ade7758_attribute_group,
+ .read_raw = &ade7758_read_raw,
+ .write_raw = &ade7758_write_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h
deleted file mode 100644
index 75bf47bfee78..000000000000
--- a/drivers/staging/iio/ring_hw.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * ring_hw.h - common functionality for iio hardware ring buffers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#ifndef _RING_HW_H_
-#define _RING_HW_H_
-
-/**
- * struct iio_hw_ring_buffer- hardware ring buffer
- * @buf: generic ring buffer elements
- * @private: device specific data
- */
-struct iio_hw_buffer {
- struct iio_buffer buf;
- void *private;
-};
-
-#define iio_to_hw_buf(r) container_of(r, struct iio_hw_buffer, buf)
-
-#endif /* _RING_HW_H_ */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 81c46f4d0935..a604c83c957e 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -35,18 +35,18 @@ MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
/* macro */
#define inc_txqhead(priv) \
- (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
+ (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
#define inc_txqtail(priv) \
- (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
+ (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
#define cnt_txqbody(priv) \
- (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
+ (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
#define inc_rxqhead(priv) \
- (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
+ (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
#define inc_rxqtail(priv) \
- (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
+ (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
#define cnt_rxqbody(priv) \
- (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
+ (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
unsigned char *buffer, int length)
@@ -76,10 +76,9 @@ static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
card = priv->ks_wlan_hw.sdio_card;
if (length == 1) /* CMD52 */
- sdio_writeb(card->func, *buffer, (unsigned int)address, &rc);
+ sdio_writeb(card->func, *buffer, address, &rc);
else /* CMD53 */
- rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer,
- length);
+ rc = sdio_memcpy_toio(card->func, address, buffer, length);
if (rc != 0)
DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
@@ -255,7 +254,7 @@ int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
unsigned long size,
- void (*complete_handler) (void *arg1, void *arg2),
+ void (*complete_handler)(void *arg1, void *arg2),
void *arg1, void *arg2)
{
struct tx_device_buffer *sp;
@@ -294,6 +293,7 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
int retval;
unsigned char rw_data;
struct hostif_hdr *hdr;
+
hdr = (struct hostif_hdr *)buffer;
DPRINTK(4, "size=%d\n", hdr->size);
@@ -353,11 +353,12 @@ static void tx_device_task(void *dev)
}
int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
- void (*complete_handler) (void *arg1, void *arg2),
+ void (*complete_handler)(void *arg1, void *arg2),
void *arg1, void *arg2)
{
int result = 0;
struct hostif_hdr *hdr;
+
hdr = (struct hostif_hdr *)p;
if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
@@ -412,7 +413,7 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
/* receive data */
if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
/* in case of buffer overflow */
- DPRINTK(1, "rx buffer overflow \n");
+ DPRINTK(1, "rx buffer overflow\n");
goto error_out;
}
rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
@@ -658,10 +659,12 @@ static void ks_sdio_interrupt(struct sdio_func *func)
static int trx_device_init(struct ks_wlan_private *priv)
{
/* initialize values (tx) */
- priv->tx_dev.qtail = priv->tx_dev.qhead = 0;
+ priv->tx_dev.qhead = 0;
+ priv->tx_dev.qtail = 0;
/* initialize values (rx) */
- priv->rx_dev.qtail = priv->rx_dev.qhead = 0;
+ priv->rx_dev.qhead = 0;
+ priv->rx_dev.qtail = 0;
/* initialize spinLock (tx,rx) */
spin_lock_init(&priv->tx_dev.tx_dev_lock);
@@ -718,7 +721,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
return rc;
}
-#define ROM_BUFF_SIZE (64*1024)
+#define ROM_BUFF_SIZE (64 * 1024)
static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
unsigned char *data, unsigned int size)
{
@@ -955,7 +958,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
priv = NULL;
netdev = NULL;
- /* initilize ks_sdio_card */
+ /* initialize ks_sdio_card */
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
@@ -1117,6 +1120,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
int ret;
struct ks_sdio_card *card;
struct ks_wlan_private *priv;
+
DPRINTK(1, "ks7010_sdio_remove()\n");
card = sdio_get_drvdata(func);
@@ -1142,6 +1146,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
/* send stop request to MAC */
{
struct hostif_stop_request_t *pp;
+
pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
index c72064b48bd8..0f5fd848e23d 100644
--- a/drivers/staging/ks7010/ks7010_sdio.h
+++ b/drivers/staging/ks7010/ks7010_sdio.h
@@ -1,5 +1,5 @@
/*
- * Driver for KeyStream, KS7010 based SDIO cards.
+ * Driver for KeyStream, KS7010 based SDIO cards.
*
* Copyright (C) 2006-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
@@ -41,7 +41,7 @@
/* Write Index Register */
#define WRITE_INDEX 0x000010
-/* Write Status/Read Data Size Register
+/* Write Status/Read Data Size Register
* for network packet (less than 2048 bytes data)
*/
#define WSTATUS_RSIZE 0x000014
@@ -53,14 +53,14 @@
/* ARM to SD interrupt Pending */
#define INT_PENDING 0x000024
-#define INT_GCR_B (1<<7)
-#define INT_GCR_A (1<<6)
-#define INT_WRITE_STATUS (1<<5)
-#define INT_WRITE_INDEX (1<<4)
-#define INT_WRITE_SIZE (1<<3)
-#define INT_READ_STATUS (1<<2)
-#define INT_READ_INDEX (1<<1)
-#define INT_READ_SIZE (1<<0)
+#define INT_GCR_B BIT(7)
+#define INT_GCR_A BIT(6)
+#define INT_WRITE_STATUS BIT(5)
+#define INT_WRITE_INDEX BIT(4)
+#define INT_WRITE_SIZE BIT(3)
+#define INT_READ_STATUS BIT(2)
+#define INT_READ_INDEX BIT(1)
+#define INT_READ_SIZE BIT(0)
/* General Communication Register A */
#define GCR_A 0x000028
@@ -100,7 +100,7 @@ struct hw_info_t {
struct ks_sdio_packet {
struct ks_sdio_packet *next;
u16 nb;
- u8 buffer[0] __attribute__ ((aligned(4)));
+ u8 buffer[0] __aligned(4);
};
struct ks_sdio_card {
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index c57ca581550a..1fbd495e5e63 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -23,11 +23,11 @@
/* macro */
#define inc_smeqhead(priv) \
- ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE )
+ (priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE)
#define inc_smeqtail(priv) \
- ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE )
+ (priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE)
#define cnt_smeqbody(priv) \
- (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE )
+ (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE)
#define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
@@ -97,11 +97,10 @@ int ks_wlan_do_power_save(struct ks_wlan_private *priv)
{
DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
- } else {
+ else
priv->dev_state = DEVICE_STATE_READY;
- }
return 0;
}
@@ -187,13 +186,7 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
memcpy(wrqu.ap_addr.sa_data,
&(priv->current_ap.bssid[0]), ETH_ALEN);
DPRINTK(3,
- "IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n",
- (unsigned char)wrqu.ap_addr.sa_data[0],
- (unsigned char)wrqu.ap_addr.sa_data[1],
- (unsigned char)wrqu.ap_addr.sa_data[2],
- (unsigned char)wrqu.ap_addr.sa_data[3],
- (unsigned char)wrqu.ap_addr.sa_data[4],
- (unsigned char)wrqu.ap_addr.sa_data[5]);
+ "IWEVENT: connect bssid=%pM\n", wrqu.ap_addr.sa_data);
wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
}
DPRINTK(4, "\n Link AP\n");
@@ -420,16 +413,11 @@ void hostif_data_indication(struct ks_wlan_private *priv)
/* needed parameters: count, keyid, key type, TSC */
sprintf(buf,
"MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
- "%02x:%02x:%02x:%02x:%02x:%02x)",
+ "%pM)",
auth_type - 1,
eth_hdr->
h_dest[0] & 0x01 ? "broad" :
- "uni", eth_hdr->h_source[0],
- eth_hdr->h_source[1],
- eth_hdr->h_source[2],
- eth_hdr->h_source[3],
- eth_hdr->h_source[4],
- eth_hdr->h_source[5]);
+ "uni", eth_hdr->h_source);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = strlen(buf);
DPRINTK(4,
@@ -476,8 +464,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
- printk(KERN_WARNING
- "ks_wlan: Memory squeeze, dropping packet.\n");
priv->nstats.rx_dropped++;
}
break;
@@ -511,8 +497,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
- printk(KERN_WARNING
- "ks_wlan: Memory squeeze, dropping packet.\n");
priv->nstats.rx_dropped++;
}
break;
@@ -560,10 +544,7 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
dev->dev_addr[5] = priv->eth_addr[5];
dev->dev_addr[6] = 0x00;
dev->dev_addr[7] = 0x00;
- printk(KERN_INFO
- "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
- priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
break;
case DOT11_PRODUCT_VERSION:
/* firmware version */
@@ -571,8 +552,8 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
priv->version_size = priv->rx_size;
memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
priv->firmware_version[priv->rx_size] = '\0';
- printk(KERN_INFO "ks_wlan: firmware ver. = %s\n",
- priv->firmware_version);
+ netdev_info(dev, "firmware ver. = %s\n",
+ priv->firmware_version);
hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
@@ -592,12 +573,12 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
} else if (priv->eeprom_sum.type == 1) {
if (priv->eeprom_sum.result == 0) {
priv->eeprom_checksum = EEPROM_NG;
- printk("LOCAL_EEPROM_SUM NG\n");
+ netdev_info(dev, "LOCAL_EEPROM_SUM NG\n");
} else if (priv->eeprom_sum.result == 1) {
priv->eeprom_checksum = EEPROM_OK;
}
} else {
- printk("LOCAL_EEPROM_SUM error!\n");
+ netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
}
break;
default:
@@ -705,15 +686,13 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv)
break;
case DOT11_GMK1_TSC:
DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status);
- if (atomic_read(&priv->psstatus.snooze_guard)) {
+ if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
- }
break;
case DOT11_GMK2_TSC:
DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status);
- if (atomic_read(&priv->psstatus.snooze_guard)) {
+ if (atomic_read(&priv->psstatus.snooze_guard))
atomic_set(&priv->psstatus.snooze_guard, 0);
- }
break;
case LOCAL_PMK:
DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status);
@@ -766,8 +745,9 @@ void hostif_sleep_confirm(struct ks_wlan_private *priv)
static
void hostif_start_confirm(struct ks_wlan_private *priv)
{
-#ifdef WPS
+#ifdef WPS
union iwreq_data wrqu;
+
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
@@ -789,6 +769,7 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
unsigned int old_status = priv->connect_status;
struct net_device *netdev = priv->net_dev;
union iwreq_data wrqu0;
+
connect_code = get_WORD(priv);
switch (connect_code) {
@@ -894,7 +875,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
netif_carrier_off(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp | DISCONNECT_STATUS;
- printk("IWEVENT: disconnect\n");
+ netdev_info(netdev, "IWEVENT: disconnect\n");
wrqu0.data.length = 0;
wrqu0.data.flags = 0;
@@ -904,7 +885,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
&& (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
eth_zero_addr(wrqu0.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
- printk("IWEVENT: disconnect\n");
+ netdev_info(netdev, "IWEVENT: disconnect\n");
DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
priv->scan_ind_count);
wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
@@ -928,6 +909,7 @@ static
void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
{
uint16_t result_code;
+
DPRINTK(3, "\n");
result_code = get_WORD(priv);
DPRINTK(3, "result code = %d\n", result_code);
@@ -993,6 +975,7 @@ void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
unsigned int result_code;
struct net_device *dev = priv->net_dev;
union iwreq_data wrqu;
+
result_code = get_DWORD(priv);
DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code,
priv->scan_ind_count);
@@ -1110,7 +1093,7 @@ void hostif_event_check(struct ks_wlan_private *priv)
case HIF_AP_SET_CONF:
default:
//DPRINTK(1, "undefined event[%04X]\n", event);
- printk("undefined event[%04X]\n", event);
+ netdev_err(priv->net_dev, "undefined event[%04X]\n", event);
/* wake_up_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
break;
@@ -1184,9 +1167,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
eth = (struct ethhdr *)packet->data;
if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
DPRINTK(1, "invalid mac address !!\n");
- DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n",
- eth->h_source[0], eth->h_source[1], eth->h_source[2],
- eth->h_source[3], eth->h_source[4], eth->h_source[5]);
+ DPRINTK(1, "ethernet->h_source=%pM\n", eth->h_source);
dev_kfree_skb(packet);
kfree(pp);
return -3;
@@ -1244,7 +1225,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */
} else {
if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
- MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
+ MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) &pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
(uint8_t *) michel_mic.
Result);
memcpy(p, michel_mic.Result, 8);
@@ -1294,10 +1275,11 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
return result;
}
-#define ps_confirm_wait_inc(priv) do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \
- atomic_inc(&priv->psstatus.confirm_wait); \
- /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
- } }while(0)
+#define ps_confirm_wait_inc(priv) do { \
+ if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) { \
+ atomic_inc(&priv->psstatus.confirm_wait); \
+ /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
+ } } while (0)
static
void hostif_mib_get_request(struct ks_wlan_private *priv,
@@ -1891,6 +1873,7 @@ static
void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
{
uint32_t val;
+
switch (type) {
case SME_WEP_INDEX_REQUEST:
val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
@@ -1936,18 +1919,17 @@ void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
break;
}
- return;
}
struct wpa_suite_t {
unsigned short size;
unsigned char suite[4][CIPHER_ID_LEN];
-} __attribute__ ((packed));
+} __packed;
struct rsn_mode_t {
uint32_t rsn_mode;
uint16_t rsn_capability;
-} __attribute__ ((packed));
+} __packed;
static
void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
@@ -2125,7 +2107,6 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
break;
}
- return;
}
static
@@ -2216,10 +2197,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
} else {
hostif_infrastructure_set2_request(priv);
DPRINTK(2,
- "Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->reg.bssid[0], priv->reg.bssid[1],
- priv->reg.bssid[2], priv->reg.bssid[3],
- priv->reg.bssid[4], priv->reg.bssid[5]);
+ "Infra bssid = %pM\n", priv->reg.bssid);
}
break;
case MODE_ADHOC:
@@ -2229,17 +2207,13 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
} else {
hostif_adhoc_set2_request(priv);
DPRINTK(2,
- "Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->reg.bssid[0], priv->reg.bssid[1],
- priv->reg.bssid[2], priv->reg.bssid[3],
- priv->reg.bssid[4], priv->reg.bssid[5]);
+ "Adhoc bssid = %pM\n", priv->reg.bssid);
}
break;
default:
break;
}
- return;
}
static
@@ -2340,7 +2314,6 @@ void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
}
hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
- return;
}
static
@@ -2358,13 +2331,13 @@ void hostif_sme_sleep_set(struct ks_wlan_private *priv)
break;
}
- return;
}
static
void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
{
uint32_t val;
+
switch (type) {
case SME_SET_FLAG:
val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
@@ -2416,7 +2389,6 @@ void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
&priv->wpa.key[2].rx_seq[0]);
break;
}
- return;
}
static
@@ -2427,16 +2399,14 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
struct {
uint8_t bssid[ETH_ALEN];
uint8_t pmkid[IW_PMKID_LEN];
- } __attribute__ ((packed)) list[PMK_LIST_MAX];
- } __attribute__ ((packed)) pmkcache;
+ } __packed list[PMK_LIST_MAX];
+ } __packed pmkcache;
struct pmk_t *pmk;
- struct list_head *ptr;
int i;
DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size);
i = 0;
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk_t, list);
+ list_for_each_entry(pmk, &priv->pmklist.head, list) {
if (i < PMK_LIST_MAX) {
memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
@@ -2461,9 +2431,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
DPRINTK(3, "event=%d\n", event);
switch (event) {
case SME_START:
- if (priv->dev_state == DEVICE_STATE_BOOT) {
+ if (priv->dev_state == DEVICE_STATE_BOOT)
hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
- }
break;
case SME_MULTICAST_REQUEST:
hostif_sme_multicast_set(priv);
@@ -2508,14 +2477,12 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
}
break;
case SME_GET_MAC_ADDRESS:
- if (priv->dev_state == DEVICE_STATE_BOOT) {
+ if (priv->dev_state == DEVICE_STATE_BOOT)
hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
- }
break;
case SME_GET_PRODUCT_VERSION:
- if (priv->dev_state == DEVICE_STATE_BOOT) {
+ if (priv->dev_state == DEVICE_STATE_BOOT)
priv->dev_state = DEVICE_STATE_PREINIT;
- }
break;
case SME_STOP_REQUEST:
hostif_stop_request(priv);
@@ -2594,9 +2561,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
/* for power save */
atomic_set(&priv->psstatus.snooze_guard, 0);
atomic_set(&priv->psstatus.confirm_wait, 0);
- if (priv->dev_state == DEVICE_STATE_PREINIT) {
+ if (priv->dev_state == DEVICE_STATE_PREINIT)
priv->dev_state = DEVICE_STATE_INIT;
- }
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
break;
@@ -2652,7 +2618,6 @@ void hostif_sme_task(unsigned long dev)
tasklet_schedule(&priv->sme_task);
}
}
- return;
}
/* send to Station Management Entity module */
@@ -2672,7 +2637,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
} else {
/* in case of buffer overflow */
//DPRINTK(2,"sme queue buffer overflow\n");
- printk("sme queue buffer overflow\n");
+ netdev_err(priv->net_dev, "sme queue buffer overflow\n");
}
tasklet_schedule(&priv->sme_task);
@@ -2736,5 +2701,4 @@ int hostif_init(struct ks_wlan_private *priv)
void hostif_exit(struct ks_wlan_private *priv)
{
tasklet_kill(&priv->sme_task);
- return;
}
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index c2cc288ae899..279e9b06fc4b 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -14,7 +14,6 @@
#define WPS
-#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -25,13 +24,13 @@
#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
#include <linux/etherdevice.h>
#include <linux/wireless.h>
-#include <asm/atomic.h> /* struct atmic_t */
+#include <linux/atomic.h> /* struct atomic_t */
#include <linux/timer.h> /* struct timer_list */
#include <linux/string.h>
#include <linux/completion.h> /* struct completion */
#include <linux/workqueue.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "ks7010_sdio.h"
@@ -43,36 +42,36 @@
#endif
struct ks_wlan_parameter {
- uint8_t operation_mode; /* Operation Mode */
- uint8_t channel; /* Channel */
- uint8_t tx_rate; /* Transmit Rate */
+ u8 operation_mode; /* Operation Mode */
+ u8 channel; /* Channel */
+ u8 tx_rate; /* Transmit Rate */
struct {
- uint8_t size;
- uint8_t body[16];
+ u8 size;
+ u8 body[16];
} rate_set;
- uint8_t bssid[ETH_ALEN]; /* BSSID */
+ u8 bssid[ETH_ALEN]; /* BSSID */
struct {
- uint8_t size;
- uint8_t body[32 + 1];
+ u8 size;
+ u8 body[32 + 1];
} ssid; /* SSID */
- uint8_t preamble; /* Preamble */
- uint8_t powermgt; /* PowerManagementMode */
- uint32_t scan_type; /* AP List Scan Type */
+ u8 preamble; /* Preamble */
+ u8 powermgt; /* PowerManagementMode */
+ u32 scan_type; /* AP List Scan Type */
#define BEACON_LOST_COUNT_MIN 0
#define BEACON_LOST_COUNT_MAX 65535
- uint32_t beacon_lost_count; /* Beacon Lost Count */
- uint32_t rts; /* RTS Threashold */
- uint32_t fragment; /* Fragmentation Threashold */
- uint32_t privacy_invoked;
- uint32_t wep_index;
+ u32 beacon_lost_count; /* Beacon Lost Count */
+ u32 rts; /* RTS Threashold */
+ u32 fragment; /* Fragmentation Threashold */
+ u32 privacy_invoked;
+ u32 wep_index;
struct {
- uint8_t size;
- uint8_t val[13 * 2 + 1];
+ u8 size;
+ u8 val[13 * 2 + 1];
} wep_key[4];
- uint16_t authenticate_type;
- uint16_t phy_type; /* 11b/11g/11bg mode type */
- uint16_t cts_mode; /* for 11g/11bg mode cts mode */
- uint16_t phy_info_timer; /* phy information timer */
+ u16 authenticate_type;
+ u16 phy_type; /* 11b/11g/11bg mode type */
+ u16 cts_mode; /* for 11g/11bg mode cts mode */
+ u16 phy_info_timer; /* phy information timer */
};
enum {
@@ -216,37 +215,37 @@ struct hostt_t {
#define RSN_IE_BODY_MAX 64
struct rsn_ie_t {
- uint8_t id; /* 0xdd = WPA or 0x30 = RSN */
- uint8_t size; /* max ? 255 ? */
- uint8_t body[RSN_IE_BODY_MAX];
+ u8 id; /* 0xdd = WPA or 0x30 = RSN */
+ u8 size; /* max ? 255 ? */
+ u8 body[RSN_IE_BODY_MAX];
} __packed;
#ifdef WPS
#define WPS_IE_BODY_MAX 255
struct wps_ie_t {
- uint8_t id; /* 221 'dd <len> 00 50 F2 04' */
- uint8_t size; /* max ? 255 ? */
- uint8_t body[WPS_IE_BODY_MAX];
+ u8 id; /* 221 'dd <len> 00 50 F2 04' */
+ u8 size; /* max ? 255 ? */
+ u8 body[WPS_IE_BODY_MAX];
} __packed;
#endif /* WPS */
struct local_ap_t {
- uint8_t bssid[6];
- uint8_t rssi;
- uint8_t sq;
+ u8 bssid[6];
+ u8 rssi;
+ u8 sq;
struct {
- uint8_t size;
- uint8_t body[32];
- uint8_t ssid_pad;
+ u8 size;
+ u8 body[32];
+ u8 ssid_pad;
} ssid;
struct {
- uint8_t size;
- uint8_t body[16];
- uint8_t rate_pad;
+ u8 size;
+ u8 body[16];
+ u8 rate_pad;
} rate_set;
- uint16_t capability;
- uint8_t channel;
- uint8_t noise;
+ u16 capability;
+ u8 channel;
+ u8 noise;
struct rsn_ie_t wpa_ie;
struct rsn_ie_t rsn_ie;
#ifdef WPS
@@ -262,15 +261,15 @@ struct local_aplist_t {
};
struct local_gain_t {
- uint8_t TxMode;
- uint8_t RxMode;
- uint8_t TxGain;
- uint8_t RxGain;
+ u8 TxMode;
+ u8 RxMode;
+ u8 TxGain;
+ u8 RxGain;
};
struct local_eeprom_sum_t {
- uint8_t type;
- uint8_t result;
+ u8 type;
+ u8 result;
};
enum {
@@ -352,25 +351,25 @@ enum {
#define MIC_KEY_SIZE 8
struct wpa_key_t {
- uint32_t ext_flags; /* IW_ENCODE_EXT_xxx */
- uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
- uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ u32 ext_flags; /* IW_ENCODE_EXT_xxx */
+ u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
* (group) keys or unicast address for
* individual keys */
- uint16_t alg;
- uint16_t key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
- uint8_t key_val[IW_ENCODING_TOKEN_MAX];
- uint8_t tx_mic_key[MIC_KEY_SIZE];
- uint8_t rx_mic_key[MIC_KEY_SIZE];
+ u16 alg;
+ u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
+ u8 key_val[IW_ENCODING_TOKEN_MAX];
+ u8 tx_mic_key[MIC_KEY_SIZE];
+ u8 rx_mic_key[MIC_KEY_SIZE];
};
#define WPA_KEY_INDEX_MAX 4
#define WPA_RX_SEQ_LEN 6
struct mic_failure_t {
- uint16_t failure; /* MIC Failure counter 0 or 1 or 2 */
- uint16_t counter; /* 1sec counter 0-60 */
- uint32_t last_failure_time;
+ u16 failure; /* MIC Failure counter 0 or 1 or 2 */
+ u16 counter; /* 1sec counter 0-60 */
+ u32 last_failure_time;
int stop; /* stop flag */
};
@@ -391,12 +390,12 @@ struct wpa_status_t {
#include <linux/list.h>
#define PMK_LIST_MAX 8
struct pmk_list_t {
- uint16_t size;
+ u16 size;
struct list_head head;
struct pmk_t {
struct list_head list;
- uint8_t bssid[ETH_ALEN];
- uint8_t pmkid[IW_PMKID_LEN];
+ u8 bssid[ETH_ALEN];
+ u8 pmkid[IW_PMKID_LEN];
} pmk[PMK_LIST_MAX];
};
@@ -404,7 +403,7 @@ struct pmk_list_t {
struct wps_status_t {
int wps_enabled;
int ielen;
- uint8_t ie[255];
+ u8 ie[255];
};
#endif /* WPS */
@@ -439,7 +438,7 @@ struct ks_wlan_private {
struct pmk_list_t pmklist;
/* wireless parameter */
struct ks_wlan_parameter reg;
- uint8_t current_rate;
+ u8 current_rate;
char nick[IW_ESSID_MAX_SIZE + 1];
@@ -472,24 +471,24 @@ struct ks_wlan_private {
/* spinlock_t lock; */
#define FORCE_DISCONNECT 0x80000000
#define CONNECT_STATUS_MASK 0x7FFFFFFF
- uint32_t connect_status; /* connect status */
+ u32 connect_status; /* connect status */
int infra_status; /* Infractructure status */
- uint8_t data_buff[0x1000];
+ u8 data_buff[0x1000];
- uint8_t scan_ssid_len;
- uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1];
+ u8 scan_ssid_len;
+ u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
struct local_gain_t gain;
#ifdef WPS
struct net_device *l2_dev;
int l2_fd;
struct wps_status_t wps;
#endif /* WPS */
- uint8_t sleep_mode;
+ u8 sleep_mode;
- uint8_t region;
+ u8 region;
struct local_eeprom_sum_t eeprom_sum;
- uint8_t eeprom_checksum;
+ u8 eeprom_checksum;
struct hostt_t hostt;
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index b2b4fa4c3834..e5d04adaeb1a 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -24,9 +24,9 @@
#include <linux/pci.h>
#include <linux/ctype.h>
#include <linux/timer.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static int wep_on_off;
#define WEP_OFF 0
@@ -50,10 +50,10 @@ static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
/* A few details needed for WEP (Wireless Equivalent Privacy) */
#define MAX_KEY_SIZE 13 /* 128 (?) bits */
#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
-typedef struct wep_key_t {
+struct wep_key {
u16 len;
u8 key[16]; /* 40-bit and 104-bit keys */
-} wep_key_t;
+};
/* Backward compatibility */
#ifndef IW_ENCODE_NOKEY
@@ -88,9 +88,9 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
return -1; /* not finished initialize */
- }
+
if (atomic_read(&update_phyinfo))
return 1;
@@ -182,19 +182,18 @@ static int ks_wlan_get_name(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
strcpy(cwrq, "NOT READY!");
- } else if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ else if (priv->reg.phy_type == D_11B_ONLY_MODE)
strcpy(cwrq, "IEEE 802.11b");
- } else if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ else if (priv->reg.phy_type == D_11G_ONLY_MODE)
strcpy(cwrq, "IEEE 802.11g");
- } else {
+ else
strcpy(cwrq, "IEEE 802.11b/g");
- }
return 0;
}
@@ -209,9 +208,8 @@ static int ks_wlan_set_freq(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int rc = -EINPROGRESS; /* Call commit handler */
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* If setting by frequency, convert to a channel */
@@ -219,6 +217,7 @@ static int ks_wlan_set_freq(struct net_device *dev,
(fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
int f = fwrq->m / 100000;
int c = 0;
+
while ((c < 14) && (f != frequency_list[c]))
c++;
/* Hack to fall through... */
@@ -257,13 +256,13 @@ static int ks_wlan_get_freq(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int f;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
f = (int)priv->current_ap.channel;
- } else
+ else
f = (int)priv->reg.channel;
fwrq->m = frequency_list[f - 1] * 100000;
fwrq->e = 1;
@@ -283,9 +282,9 @@ static int ks_wlan_set_essid(struct net_device *dev,
DPRINTK(2, " %d\n", dwrq->flags);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
/* Check if we asked for `any' */
@@ -301,14 +300,14 @@ static int ks_wlan_set_essid(struct net_device *dev,
len--;
/* Check the size of the string */
- if (len > IW_ESSID_MAX_SIZE) {
+ if (len > IW_ESSID_MAX_SIZE)
return -EINVAL;
- }
+
#else
/* Check the size of the string */
- if (dwrq->length > IW_ESSID_MAX_SIZE + 1) {
+ if (dwrq->length > IW_ESSID_MAX_SIZE + 1)
return -E2BIG;
- }
+
#endif
/* Set the SSID */
@@ -340,9 +339,9 @@ static int ks_wlan_get_essid(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
/* Note : if dwrq->flags != 0, we should
@@ -385,25 +384,23 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (priv->reg.operation_mode == MODE_ADHOC ||
priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN);
+ memcpy(priv->reg.bssid, &ap_addr->sa_data, ETH_ALEN);
- if (is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ if (is_valid_ether_addr((u8 *)priv->reg.bssid))
priv->need_commit |= SME_MODE_SET;
- }
+
} else {
eth_zero_addr(priv->reg.bssid);
return -EOPNOTSUPP;
}
- DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2],
- priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]);
+ DPRINTK(2, "bssid = %pM\n", priv->reg.bssid);
/* Write it to the card */
if (priv->need_commit) {
@@ -421,15 +418,14 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
- } else {
+ else
eth_zero_addr(awrq->sa_data);
- }
awrq->sa_family = ARPHRD_ETHER;
@@ -445,15 +441,14 @@ static int ks_wlan_set_nick(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* Check the size of the string */
- if (dwrq->length > 16 + 1) {
+ if (dwrq->length > 16 + 1)
return -E2BIG;
- }
+
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, dwrq->length);
@@ -469,9 +464,9 @@ static int ks_wlan_get_nick(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
strncpy(extra, priv->nick, 16);
extra[16] = '\0';
@@ -490,9 +485,9 @@ static int ks_wlan_set_rate(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int i = 0;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
if (vwrq->fixed == 1) {
@@ -727,13 +722,13 @@ static int ks_wlan_get_rate(struct net_device *dev,
DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
in_interrupt(), atomic_read(&update_phyinfo));
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if (!atomic_read(&update_phyinfo)) {
+ if (!atomic_read(&update_phyinfo))
ks_wlan_update_phy_information(priv);
- }
+
vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
if (priv->reg.tx_rate == TX_RATE_FIXED)
vwrq->fixed = 1;
@@ -752,15 +747,15 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
(struct ks_wlan_private *)netdev_priv(dev);
int rthr = vwrq->value;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (vwrq->disabled)
rthr = 2347;
- if ((rthr < 0) || (rthr > 2347)) {
+ if ((rthr < 0) || (rthr > 2347))
return -EINVAL;
- }
+
priv->reg.rts = rthr;
priv->need_commit |= SME_RTS;
@@ -775,9 +770,9 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
vwrq->value = priv->reg.rts;
vwrq->disabled = (vwrq->value >= 2347);
@@ -796,15 +791,15 @@ static int ks_wlan_set_frag(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int fthr = vwrq->value;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (vwrq->disabled)
fthr = 2346;
- if ((fthr < 256) || (fthr > 2346)) {
+ if ((fthr < 256) || (fthr > 2346))
return -EINVAL;
- }
+
fthr &= ~0x1; /* Get an even value - is it really needed ??? */
priv->reg.fragment = fthr;
priv->need_commit |= SME_FRAG;
@@ -821,9 +816,9 @@ static int ks_wlan_get_frag(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
vwrq->value = priv->reg.fragment;
vwrq->disabled = (vwrq->value >= 2346);
@@ -835,7 +830,7 @@ static int ks_wlan_get_frag(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Wireless Handler : set Mode of Operation */
static int ks_wlan_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
@@ -843,9 +838,9 @@ static int ks_wlan_set_mode(struct net_device *dev,
DPRINTK(2, "mode=%d\n", *uwrq);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
switch (*uwrq) {
case IW_MODE_ADHOC:
@@ -871,15 +866,14 @@ static int ks_wlan_set_mode(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Wireless Handler : get Mode of Operation */
static int ks_wlan_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* If not managed, assume it's ad-hoc */
@@ -906,16 +900,15 @@ static int ks_wlan_set_encode(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- wep_key_t key;
+ struct wep_key key;
int index = (dwrq->flags & IW_ENCODE_INDEX);
int current_index = priv->reg.wep_index;
int i;
DPRINTK(2, "flags=%04X\n", dwrq->flags);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* index check */
@@ -959,9 +952,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
}
/* Send the key to the card */
priv->reg.wep_key[index].size = key.len;
- for (i = 0; i < (priv->reg.wep_key[index].size); i++) {
+ for (i = 0; i < (priv->reg.wep_key[index].size); i++)
priv->reg.wep_key[index].val[i] = key.key[i];
- }
+
priv->need_commit |= (SME_WEP_VAL1 << index);
priv->reg.wep_index = index;
priv->need_commit |= SME_WEP_INDEX;
@@ -973,9 +966,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
priv->reg.wep_key[2].size = 0;
priv->reg.wep_key[3].size = 0;
priv->reg.privacy_invoked = 0x00;
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
priv->need_commit |= SME_MODE_SET;
- }
+
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
wep_on_off = WEP_OFF;
priv->need_commit |= SME_WEP_FLAG;
@@ -997,14 +990,14 @@ static int ks_wlan_set_encode(struct net_device *dev,
priv->need_commit |= SME_WEP_FLAG;
if (dwrq->flags & IW_ENCODE_OPEN) {
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
priv->need_commit |= SME_MODE_SET;
- }
+
priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
} else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
- if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
priv->need_commit |= SME_MODE_SET;
- }
+
priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
}
// return -EINPROGRESS; /* Call commit handler */
@@ -1026,9 +1019,9 @@ static int ks_wlan_get_encode(struct net_device *dev,
char zeros[16];
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
dwrq->flags = IW_ENCODE_DISABLED;
@@ -1056,9 +1049,8 @@ static int ks_wlan_get_encode(struct net_device *dev,
/* Copy the key to the user buffer */
if ((index >= 0) && (index < 4))
dwrq->length = priv->reg.wep_key[index].size;
- if (dwrq->length > 16) {
+ if (dwrq->length > 16)
dwrq->length = 0;
- }
#if 1 /* IW_ENCODE_NOKEY; */
if (dwrq->length) {
if ((index >= 0) && (index < 4))
@@ -1086,9 +1078,8 @@ static int ks_wlan_get_txpow(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* Not Support */
@@ -1113,9 +1104,8 @@ static int ks_wlan_get_retry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* Not Support */
@@ -1139,9 +1129,9 @@ static int ks_wlan_get_range(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
dwrq->length = sizeof(struct iw_range);
memset(range, 0, sizeof(*range));
@@ -1267,9 +1257,9 @@ static int ks_wlan_set_power(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
short enabled;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
enabled = vwrq->disabled ? 0 : 1;
if (enabled == 0) { /* 0 */
@@ -1301,9 +1291,8 @@ static int ks_wlan_get_power(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (priv->reg.powermgt > 0)
vwrq->disabled = 0;
@@ -1322,9 +1311,8 @@ static int ks_wlan_get_iwstats(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
vwrq->qual = 0; /* not supported */
vwrq->level = priv->wstats.qual.level;
@@ -1372,9 +1360,8 @@ static int ks_wlan_get_aplist(struct net_device *dev,
int i;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
for (i = 0; i < priv->aplist.size; i++) {
memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
@@ -1404,11 +1391,11 @@ static int ks_wlan_set_scan(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
struct iw_scan_req *req = NULL;
+
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* specified SSID SCAN */
@@ -1598,11 +1585,11 @@ static int ks_wlan_get_scan(struct net_device *dev,
(struct ks_wlan_private *)netdev_priv(dev);
int i;
char *current_ev = extra;
+
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (priv->sme_i.sme_flag & SME_AP_SCAN) {
DPRINTK(2, "flag AP_SCAN\n");
@@ -1675,9 +1662,8 @@ static int ks_wlan_set_genie(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
return 0;
// return -EOPNOTSUPP;
@@ -1696,26 +1682,23 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
DPRINTK(2, "index=%d:value=%08X\n", index, value);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
switch (index) {
case IW_AUTH_WPA_VERSION: /* 0 */
switch (value) {
case IW_AUTH_WPA_VERSION_DISABLED:
priv->wpa.version = value;
- if (priv->wpa.rsn_enabled) {
+ if (priv->wpa.rsn_enabled)
priv->wpa.rsn_enabled = 0;
- }
priv->need_commit |= SME_RSN;
break;
case IW_AUTH_WPA_VERSION_WPA:
case IW_AUTH_WPA_VERSION_WPA2:
priv->wpa.version = value;
- if (!(priv->wpa.rsn_enabled)) {
+ if (!(priv->wpa.rsn_enabled))
priv->wpa.rsn_enabled = 1;
- }
priv->need_commit |= SME_RSN;
break;
default:
@@ -1832,11 +1815,11 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
int index = (vwrq->flags & IW_AUTH_INDEX);
+
DPRINTK(2, "index=%d\n", index);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* WPA (not used ?? wpa_supplicant) */
@@ -1886,18 +1869,17 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
enc->ext_flags);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (index < 1 || index > 4)
return -EINVAL;
else
index--;
- if (dwrq->flags & IW_ENCODE_DISABLED) {
+ if (dwrq->flags & IW_ENCODE_DISABLED)
priv->wpa.key[index].key_len = 0;
- }
if (enc) {
priv->wpa.key[index].ext_flags = enc->ext_flags;
@@ -1986,9 +1968,8 @@ static int ks_wlan_get_encode_ext(struct net_device *dev,
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
/* WPA (not used ?? wpa_supplicant)
@@ -2015,13 +1996,13 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
- if (!extra) {
+ if (!extra)
return -EINVAL;
- }
+
pmksa = (struct iw_pmksa *)extra;
DPRINTK(2, "cmd=%d\n", pmksa->cmd);
@@ -2141,16 +2122,16 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
/*------------------------------------------------------------------*/
/* Private handler : set stop request */
static int ks_wlan_set_stop_request(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (!(*uwrq))
return -EINVAL;
@@ -2173,15 +2154,14 @@ static int ks_wlan_set_mlme(struct net_device *dev,
DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
- if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) {
+ if (mlme->reason_code == WLAN_REASON_MIC_FAILURE)
return 0;
- }
case IW_MLME_DISASSOC:
mode = 1;
return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
@@ -2207,14 +2187,14 @@ static int ks_wlan_get_firmware_version(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set force disconnect status */
static int ks_wlan_set_detach(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (*uwrq == CONNECT_STATUS) { /* 0 */
priv->connect_status &= ~FORCE_DISCONNECT;
@@ -2232,14 +2212,14 @@ static int ks_wlan_set_detach(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get force disconnect status */
static int ks_wlan_get_detach(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
return 0;
@@ -2248,14 +2228,14 @@ static int ks_wlan_get_detach(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get connect status */
static int ks_wlan_get_connect(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
return 0;
@@ -2265,15 +2245,15 @@ static int ks_wlan_get_connect(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set preamble */
static int ks_wlan_set_preamble(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (*uwrq == LONG_PREAMBLE) { /* 0 */
priv->reg.preamble = LONG_PREAMBLE;
@@ -2290,15 +2270,15 @@ static int ks_wlan_set_preamble(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get preamble */
static int ks_wlan_get_preamble(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = priv->reg.preamble;
return 0;
@@ -2307,15 +2287,15 @@ static int ks_wlan_get_preamble(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set power save mode */
static int ks_wlan_set_powermgt(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */
priv->reg.powermgt = POWMGT_ACTIVE_MODE;
@@ -2340,15 +2320,15 @@ static int ks_wlan_set_powermgt(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get power save made */
static int ks_wlan_get_powermgt(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
+
/* for SLEEP MODE */
*uwrq = priv->reg.powermgt;
return 0;
@@ -2357,15 +2337,14 @@ static int ks_wlan_get_powermgt(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set scan type */
static int ks_wlan_set_scan_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == ACTIVE_SCAN) { /* 0 */
priv->reg.scan_type = ACTIVE_SCAN;
@@ -2380,15 +2359,14 @@ static int ks_wlan_set_scan_type(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get scan type */
static int ks_wlan_get_scan_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.scan_type;
return 0;
@@ -2404,9 +2382,8 @@ static int ks_wlan_data_write(struct net_device *dev,
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
unsigned char *wbuff = NULL;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
if (!wbuff)
@@ -2428,9 +2405,8 @@ static int ks_wlan_data_read(struct net_device *dev,
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
unsigned short read_length;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (!atomic_read(&priv->event_count)) {
if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */
@@ -2488,9 +2464,8 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
int i, j, len = 0;
char tmp[WEP_ASCII_BUFF_SIZE];
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
strcpy(tmp, " WEP keys ASCII \n");
len += strlen(" WEP keys ASCII \n");
@@ -2531,19 +2506,18 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set beacon lost count */
static int ks_wlan_set_beacon_lost(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
- if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) {
+ if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX)
priv->reg.beacon_lost_count = *uwrq;
- } else
+ else
return -EINVAL;
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
@@ -2556,15 +2530,14 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get beacon lost count */
static int ks_wlan_get_beacon_lost(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.beacon_lost_count;
return 0;
@@ -2573,15 +2546,14 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set phy type */
static int ks_wlan_set_phy_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == D_11B_ONLY_MODE) { /* 0 */
priv->reg.phy_type = D_11B_ONLY_MODE;
@@ -2599,15 +2571,14 @@ static int ks_wlan_set_phy_type(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get phy type */
static int ks_wlan_get_phy_type(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.phy_type;
return 0;
@@ -2616,15 +2587,14 @@ static int ks_wlan_get_phy_type(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set cts mode */
static int ks_wlan_set_cts_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == CTS_MODE_FALSE) { /* 0 */
priv->reg.cts_mode = CTS_MODE_FALSE;
@@ -2644,15 +2614,14 @@ static int ks_wlan_set_cts_mode(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get cts mode */
static int ks_wlan_get_cts_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.cts_mode;
return 0;
@@ -2662,7 +2631,7 @@ static int ks_wlan_get_cts_mode(struct net_device *dev,
/* Private handler : set sleep mode */
static int ks_wlan_set_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
@@ -2692,7 +2661,7 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
/* Private handler : get sleep mode */
static int ks_wlan_get_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
@@ -2708,16 +2677,15 @@ static int ks_wlan_get_sleep_mode(struct net_device *dev,
/* Private handler : set phy information timer */
static int ks_wlan_set_phy_information_timer(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */
- priv->reg.phy_info_timer = (uint16_t) * uwrq;
+ priv->reg.phy_info_timer = (uint16_t)*uwrq;
else
return -EINVAL;
@@ -2730,13 +2698,12 @@ static int ks_wlan_set_phy_information_timer(struct net_device *dev,
/* Private handler : get phy information timer */
static int ks_wlan_get_phy_information_timer(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->reg.phy_info_timer;
return 0;
@@ -2747,16 +2714,15 @@ static int ks_wlan_get_phy_information_timer(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set WPS enable */
static int ks_wlan_set_wps_enable(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq == 0 || *uwrq == 1)
priv->wps.wps_enabled = *uwrq;
@@ -2771,16 +2737,15 @@ static int ks_wlan_set_wps_enable(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get WPS enable */
static int ks_wlan_get_wps_enable(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->wps.wps_enabled;
netdev_info(dev, "return=%d\n", *uwrq);
@@ -2801,16 +2766,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
DPRINTK(2, "dwrq->length=%d\n", dwrq->length);
/* length check */
- if (p[1] + 2 != dwrq->length || dwrq->length > 256) {
+ if (p[1] + 2 != dwrq->length || dwrq->length > 256)
return -EINVAL;
- }
priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
len = p[1] + 2; /* IE header + IE */
@@ -2833,14 +2796,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
/* Private handler : get WPS probe req */
static int ks_wlan_get_wps_probe_req(struct net_device *dev,
struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
DPRINTK(2, "\n");
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
return 0;
}
@@ -2850,18 +2813,17 @@ static int ks_wlan_get_wps_probe_req(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set tx gain control value */
static int ks_wlan_set_tx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
- priv->gain.TxGain = (uint8_t) * uwrq;
+ priv->gain.TxGain = (uint8_t)*uwrq;
else
return -EINVAL;
@@ -2877,15 +2839,14 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get tx gain control value */
static int ks_wlan_get_tx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->gain.TxGain;
hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2895,18 +2856,17 @@ static int ks_wlan_get_tx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set rx gain control value */
static int ks_wlan_set_rx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
- priv->gain.RxGain = (uint8_t) * uwrq;
+ priv->gain.RxGain = (uint8_t)*uwrq;
else
return -EINVAL;
@@ -2922,15 +2882,14 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get rx gain control value */
static int ks_wlan_get_rx_gain(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
(struct ks_wlan_private *)netdev_priv(dev);
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
*uwrq = priv->gain.RxGain;
hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2941,17 +2900,16 @@ static int ks_wlan_get_rx_gain(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : set region value */
static int ks_wlan_set_region(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- if (priv->sleep_mode == SLP_SLEEP) {
+ if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- }
/* for SLEEP MODE */
if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */
- priv->region = (uint8_t) * uwrq;
+ priv->region = (uint8_t)*uwrq;
else
return -EINVAL;
@@ -2963,7 +2921,7 @@ static int ks_wlan_set_region(struct net_device *dev,
/*------------------------------------------------------------------*/
/* Private handler : get eeprom checksum result */
static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
- struct iw_request_info *info, __u32 * uwrq,
+ struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
struct ks_wlan_private *priv =
@@ -3090,7 +3048,7 @@ static void print_hif_event(struct net_device *dev, int event)
/*------------------------------------------------------------------*/
/* Private handler : get host command history */
static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
- __u32 * uwrq, char *extra)
+ __u32 *uwrq, char *extra)
{
int i, event;
struct ks_wlan_private *priv =
@@ -3293,6 +3251,7 @@ static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
{
int rc = 0;
struct iwreq *wrq = (struct iwreq *)rq;
+
switch (cmd) {
case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
@@ -3311,9 +3270,8 @@ struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
return NULL; /* not finished initialize */
- }
return &priv->nstats;
}
@@ -3323,6 +3281,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct sockaddr *mac_addr = (struct sockaddr *)addr;
+
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
@@ -3330,10 +3289,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
priv->mac_address_valid = 0;
hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
- netdev_info(dev,
- "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
- priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
- priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr);
return 0;
}
@@ -3344,9 +3300,8 @@ void ks_wlan_tx_timeout(struct net_device *dev)
DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
priv->tx_dev.qtail);
- if (!netif_queue_stopped(dev)) {
+ if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
- }
priv->nstats.tx_errors++;
netif_wake_queue(dev);
}
@@ -3375,9 +3330,8 @@ int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
DPRINTK(4, "rc=%d\n", rc);
- if (rc) {
+ if (rc)
rc = 0;
- }
return rc;
}
@@ -3410,9 +3364,8 @@ void ks_wlan_set_multicast_list(struct net_device *dev)
struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(4, "\n");
- if (priv->dev_state < DEVICE_STATE_READY) {
+ if (priv->dev_state < DEVICE_STATE_READY)
return; /* not finished initialize */
- }
hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
}
@@ -3426,8 +3379,8 @@ int ks_wlan_open(struct net_device *dev)
if (!priv->mac_address_valid) {
netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
return -EBUSY;
- } else
- netif_start_queue(dev);
+ }
+ netif_start_queue(dev);
return 0;
}
@@ -3474,9 +3427,8 @@ int ks_wlan_net_start(struct net_device *dev)
/* phy information update timer */
atomic_set(&update_phyinfo, 0);
- init_timer(&update_phyinfo_timer);
- update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout;
- update_phyinfo_timer.data = (unsigned long)priv;
+ setup_timer(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout,
+ (unsigned long)priv);
/* dummy address set */
memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
index 78ae2b8fb7f3..2f535c08e172 100644
--- a/drivers/staging/ks7010/michael_mic.c
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -14,10 +14,11 @@
#include "michael_mic.h"
// Rotation functions on 32 bit values
-#define ROL32( A, n ) ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
-#define ROR32( A, n ) ROL32( (A), 32-(n) )
+#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n) ROL32((A), 32-(n))
// Convert from Byte[] to UInt32 in a portable way
-#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
+#define getUInt32(A, B) ((uint32_t)(A[B+0] << 0) \
+ + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24))
// Convert from UInt32 to Byte[] in a portable way
#define putUInt32(A, B, C) \
@@ -48,21 +49,22 @@ void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t *key)
}
#define MichaelBlockFunction(L, R) \
-do{ \
- R ^= ROL32( L, 17 ); \
+do { \
+ R ^= ROL32(L, 17); \
L += R; \
R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \
L += R; \
- R ^= ROL32( L, 3 ); \
+ R ^= ROL32(L, 3); \
L += R; \
- R ^= ROR32( L, 2 ); \
+ R ^= ROR32(L, 2); \
L += R; \
-}while(0)
+} while (0)
static
void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
{
int addlen;
+
if (Mic->nBytesInM) {
addlen = 4 - Mic->nBytesInM;
if (addlen > nBytes)
@@ -96,7 +98,8 @@ void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
static
void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
{
- uint8_t *data = Mic->M;
+ u8 *data = Mic->M;
+
switch (Mic->nBytesInM) {
case 0:
Mic->L ^= 0x5a;
@@ -122,11 +125,11 @@ void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
MichaelClear(Mic);
}
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
- uint8_t *Data, int Len, uint8_t priority,
- uint8_t *Result)
+void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+ u8 *Data, int Len, u8 priority,
+ u8 *Result)
{
- uint8_t pad_data[4] = { priority, 0, 0, 0 };
+ u8 pad_data[4] = { priority, 0, 0, 0 };
// Compute the MIC value
/*
* IEEE802.11i page 47
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
index efaa21788fc7..248f849fc4a5 100644
--- a/drivers/staging/ks7010/michael_mic.h
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -11,15 +11,15 @@
/* MichelMIC routine define */
struct michel_mic_t {
- uint32_t K0; // Key
- uint32_t K1; // Key
- uint32_t L; // Current state
- uint32_t R; // Current state
- uint8_t M[4]; // Message accumulator (single word)
- int nBytesInM; // # bytes in M
- uint8_t Result[8];
+ u32 K0; // Key
+ u32 K1; // Key
+ u32 L; // Current state
+ u32 R; // Current state
+ u8 M[4]; // Message accumulator (single word)
+ int nBytesInM; // # bytes in M
+ u8 Result[8];
};
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
- uint8_t *Data, int Len, uint8_t priority,
- uint8_t *Result);
+void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+ u8 *Data, int Len, u8 priority,
+ u8 *Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index be0675d8ff5e..1ea27c9e3708 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -53,7 +53,7 @@
#define current_pid() (current->pid)
#define current_comm() (current->comm)
-typedef __u32 cfs_cap_t;
+typedef u32 cfs_cap_t;
#define CFS_CAP_CHOWN 0
#define CFS_CAP_DAC_OVERRIDE 1
@@ -65,15 +65,15 @@ typedef __u32 cfs_cap_t;
#define CFS_CAP_SYS_BOOT 23
#define CFS_CAP_SYS_RESOURCE 24
-#define CFS_CAP_FS_MASK ((1 << CFS_CAP_CHOWN) | \
- (1 << CFS_CAP_DAC_OVERRIDE) | \
- (1 << CFS_CAP_DAC_READ_SEARCH) | \
- (1 << CFS_CAP_FOWNER) | \
- (1 << CFS_CAP_FSETID) | \
- (1 << CFS_CAP_LINUX_IMMUTABLE) | \
- (1 << CFS_CAP_SYS_ADMIN) | \
- (1 << CFS_CAP_SYS_BOOT) | \
- (1 << CFS_CAP_SYS_RESOURCE))
+#define CFS_CAP_FS_MASK (BIT(CFS_CAP_CHOWN) | \
+ BIT(CFS_CAP_DAC_OVERRIDE) | \
+ BIT(CFS_CAP_DAC_READ_SEARCH) | \
+ BIT(CFS_CAP_FOWNER) | \
+ BIT(CFS_CAP_FSETID) | \
+ BIT(CFS_CAP_LINUX_IMMUTABLE) | \
+ BIT(CFS_CAP_SYS_ADMIN) | \
+ BIT(CFS_CAP_SYS_BOOT) | \
+ BIT(CFS_CAP_SYS_RESOURCE))
void cfs_cap_raise(cfs_cap_t cap);
void cfs_cap_lower(cfs_cap_t cap);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 3b92d38d37e2..cc2c0e97bb7e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -61,7 +61,7 @@
sigset_t cfs_block_allsigs(void);
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
-void cfs_restore_sigs(sigset_t);
+void cfs_restore_sigs(sigset_t sigset);
void cfs_clear_sigpending(void);
/*
@@ -71,7 +71,7 @@ void cfs_clear_sigpending(void);
/* returns a random 32-bit integer */
unsigned int cfs_rand(void);
/* seed the generator */
-void cfs_srand(unsigned int, unsigned int);
+void cfs_srand(unsigned int seed1, unsigned int seed2);
void cfs_get_random_bytes(void *buf, int size);
#include "libcfs_debug.h"
@@ -125,7 +125,6 @@ extern struct miscdevice libcfs_dev;
/**
* The path of debug log dump upcall script.
*/
-extern char lnet_upcall[1024];
extern char lnet_debug_log_upcall[1024];
extern struct cfs_wi_sched *cfs_sched_rehash;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 81d8079e3b5e..6d8752a368fa 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -92,7 +92,7 @@ struct cfs_cpt_table {
/* node mask */
nodemask_t ctb_nodemask;
/* version */
- __u64 ctb_version;
+ u64 ctb_version;
};
static inline cpumask_t *
@@ -211,7 +211,7 @@ int cfs_cpu_ht_nsiblings(int cpu);
*/
void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
/*
- * destory per-cpu-partition variable
+ * destroy per-cpu-partition variable
*/
void cfs_percpt_free(void *vars);
int cfs_percpt_number(void *vars);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index 02be7d7608a5..8f34c5ddc63e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -29,10 +29,12 @@
#define _LIBCFS_CRYPTO_H
struct cfs_crypto_hash_type {
- char *cht_name; /**< hash algorithm name, equal to
- * format name for crypto api */
- unsigned int cht_key; /**< init key by default (valid for
- * 4 bytes context like crc32, adler */
+ char *cht_name; /*< hash algorithm name, equal to
+ * format name for crypto api
+ */
+ unsigned int cht_key; /*< init key by default (valid for
+ * 4 bytes context like crc32, adler
+ */
unsigned int cht_size; /**< hash digest size */
};
@@ -135,7 +137,7 @@ static inline unsigned char cfs_crypto_hash_alg(const char *algname)
enum cfs_crypto_hash_alg hash_alg;
for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
- if (strcmp(hash_types[hash_alg].cht_name, algname) == 0)
+ if (!strcmp(hash_types[hash_alg].cht_name, algname))
return hash_alg;
return CFS_HASH_ALG_UNKNOWN;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index bdbbe934584c..fedb46dff696 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -39,8 +39,8 @@ extern int cfs_fail_err;
extern wait_queue_head_t cfs_race_waitq;
extern int cfs_race_state;
-int __cfs_fail_check_set(__u32 id, __u32 value, int set);
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set);
+int __cfs_fail_check_set(u32 id, u32 value, int set);
+int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set);
enum {
CFS_FAIL_LOC_NOSET = 0,
@@ -55,11 +55,11 @@ enum {
#define CFS_FAILED_BIT 30
/* CFS_FAILED is 0x40000000 */
-#define CFS_FAILED (1 << CFS_FAILED_BIT)
+#define CFS_FAILED BIT(CFS_FAILED_BIT)
#define CFS_FAIL_ONCE_BIT 31
/* CFS_FAIL_ONCE is 0x80000000 */
-#define CFS_FAIL_ONCE (1 << CFS_FAIL_ONCE_BIT)
+#define CFS_FAIL_ONCE BIT(CFS_FAIL_ONCE_BIT)
/* The following flags aren't made to be combined */
#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */
@@ -69,14 +69,14 @@ enum {
#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
-static inline bool CFS_FAIL_PRECHECK(__u32 id)
+static inline bool CFS_FAIL_PRECHECK(u32 id)
{
- return cfs_fail_loc != 0 &&
+ return cfs_fail_loc &&
((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
- (cfs_fail_loc & id & CFS_FAULT));
+ (cfs_fail_loc & id & CFS_FAULT));
}
-static inline int cfs_fail_check_set(__u32 id, __u32 value,
+static inline int cfs_fail_check_set(u32 id, u32 value,
int set, int quiet)
{
int ret = 0;
@@ -103,28 +103,34 @@ static inline int cfs_fail_check_set(__u32 id, __u32 value,
#define CFS_FAIL_CHECK_QUIET(id) \
cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1)
-/* If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
+ * otherwise return 0
+ */
#define CFS_FAIL_CHECK_VALUE(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0)
#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1)
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
+ * otherwise return 0
+ */
#define CFS_FAIL_CHECK_ORSET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0)
#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1)
-/* If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
+ * otherwise return 0
+ */
#define CFS_FAIL_CHECK_RESET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0)
#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \
cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1)
-static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+static inline int cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
{
if (unlikely(CFS_FAIL_PRECHECK(id)))
return __cfs_fail_timeout_set(id, value, ms, set);
@@ -138,8 +144,10 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAIL_TIMEOUT_MS(id, ms) \
cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET)
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and
- * sleep seconds or milliseconds */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc |= value and
+ * sleep seconds or milliseconds
+ */
#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
@@ -152,13 +160,14 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAULT_CHECK(id) \
CFS_FAIL_CHECK(CFS_FAULT | (id))
-/* The idea here is to synchronise two threads to force a race. The
+/*
+ * The idea here is to synchronise two threads to force a race. The
* first thread that calls this with a matching fail_loc is put to
* sleep. The next thread that calls with the same fail_loc wakes up
- * the first and continues. */
-static inline void cfs_race(__u32 id)
+ * the first and continues.
+ */
+static inline void cfs_race(u32 id)
{
-
if (CFS_FAIL_PRECHECK(id)) {
if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
int rc;
@@ -166,7 +175,7 @@ static inline void cfs_race(__u32 id)
cfs_race_state = 0;
CERROR("cfs_race id %x sleeping\n", id);
rc = wait_event_interruptible(cfs_race_waitq,
- cfs_race_state != 0);
+ !!cfs_race_state);
CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
} else {
CERROR("cfs_fail_race id %x waking\n", id);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 6949a1846635..0cc2fc465c1a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -57,8 +57,10 @@
/** disable debug */
#define CFS_HASH_DEBUG_NONE 0
-/** record hash depth and output to console when it's too deep,
- * computing overhead is low but consume more memory */
+/*
+ * record hash depth and output to console when it's too deep,
+ * computing overhead is low but consume more memory
+ */
#define CFS_HASH_DEBUG_1 1
/** expensive, check key validation */
#define CFS_HASH_DEBUG_2 2
@@ -87,8 +89,8 @@ union cfs_hash_lock {
*/
struct cfs_hash_bucket {
union cfs_hash_lock hsb_lock; /**< bucket lock */
- __u32 hsb_count; /**< current entries */
- __u32 hsb_version; /**< change version */
+ u32 hsb_count; /**< current entries */
+ u32 hsb_version; /**< change version */
unsigned int hsb_index; /**< index of bucket */
int hsb_depmax; /**< max depth on bucket */
long hsb_head[0]; /**< hash-head array */
@@ -123,38 +125,40 @@ enum cfs_hash_tag {
* . Some functions will be disabled with this flag, i.e:
* cfs_hash_for_each_empty, cfs_hash_rehash
*/
- CFS_HASH_NO_LOCK = 1 << 0,
+ CFS_HASH_NO_LOCK = BIT(0),
/** no bucket lock, use one spinlock to protect the whole hash */
- CFS_HASH_NO_BKTLOCK = 1 << 1,
+ CFS_HASH_NO_BKTLOCK = BIT(1),
/** rwlock to protect bucket */
- CFS_HASH_RW_BKTLOCK = 1 << 2,
+ CFS_HASH_RW_BKTLOCK = BIT(2),
/** spinlock to protect bucket */
- CFS_HASH_SPIN_BKTLOCK = 1 << 3,
+ CFS_HASH_SPIN_BKTLOCK = BIT(3),
/** always add new item to tail */
- CFS_HASH_ADD_TAIL = 1 << 4,
+ CFS_HASH_ADD_TAIL = BIT(4),
/** hash-table doesn't have refcount on item */
- CFS_HASH_NO_ITEMREF = 1 << 5,
+ CFS_HASH_NO_ITEMREF = BIT(5),
/** big name for param-tree */
- CFS_HASH_BIGNAME = 1 << 6,
+ CFS_HASH_BIGNAME = BIT(6),
/** track global count */
- CFS_HASH_COUNTER = 1 << 7,
+ CFS_HASH_COUNTER = BIT(7),
/** rehash item by new key */
- CFS_HASH_REHASH_KEY = 1 << 8,
+ CFS_HASH_REHASH_KEY = BIT(8),
/** Enable dynamic hash resizing */
- CFS_HASH_REHASH = 1 << 9,
+ CFS_HASH_REHASH = BIT(9),
/** can shrink hash-size */
- CFS_HASH_SHRINK = 1 << 10,
+ CFS_HASH_SHRINK = BIT(10),
/** assert hash is empty on exit */
- CFS_HASH_ASSERT_EMPTY = 1 << 11,
+ CFS_HASH_ASSERT_EMPTY = BIT(11),
/** record hlist depth */
- CFS_HASH_DEPTH = 1 << 12,
+ CFS_HASH_DEPTH = BIT(12),
/**
* rehash is always scheduled in a different thread, so current
* change on hash table is non-blocking
*/
- CFS_HASH_NBLK_CHANGE = 1 << 13,
- /** NB, we typed hs_flags as __u16, please change it
- * if you need to extend >=16 flags */
+ CFS_HASH_NBLK_CHANGE = BIT(13),
+ /**
+ * NB, we typed hs_flags as u16, please change it
+ * if you need to extend >=16 flags
+ */
};
/** most used attributes */
@@ -201,8 +205,10 @@ enum cfs_hash_tag {
*/
struct cfs_hash {
- /** serialize with rehash, or serialize all operations if
- * the hash-table has CFS_HASH_NO_BKTLOCK */
+ /**
+ * serialize with rehash, or serialize all operations if
+ * the hash-table has CFS_HASH_NO_BKTLOCK
+ */
union cfs_hash_lock hs_lock;
/** hash operations */
struct cfs_hash_ops *hs_ops;
@@ -215,31 +221,31 @@ struct cfs_hash {
/** total number of items on this hash-table */
atomic_t hs_count;
/** hash flags, see cfs_hash_tag for detail */
- __u16 hs_flags;
+ u16 hs_flags;
/** # of extra-bytes for bucket, for user saving extended attributes */
- __u16 hs_extra_bytes;
+ u16 hs_extra_bytes;
/** wants to iterate */
- __u8 hs_iterating;
+ u8 hs_iterating;
/** hash-table is dying */
- __u8 hs_exiting;
+ u8 hs_exiting;
/** current hash bits */
- __u8 hs_cur_bits;
+ u8 hs_cur_bits;
/** min hash bits */
- __u8 hs_min_bits;
+ u8 hs_min_bits;
/** max hash bits */
- __u8 hs_max_bits;
+ u8 hs_max_bits;
/** bits for rehash */
- __u8 hs_rehash_bits;
+ u8 hs_rehash_bits;
/** bits for each bucket */
- __u8 hs_bkt_bits;
+ u8 hs_bkt_bits;
/** resize min threshold */
- __u16 hs_min_theta;
+ u16 hs_min_theta;
/** resize max threshold */
- __u16 hs_max_theta;
+ u16 hs_max_theta;
/** resize count */
- __u32 hs_rehash_count;
+ u32 hs_rehash_count;
/** # of iterators (caller of cfs_hash_for_each_*) */
- __u32 hs_iterators;
+ u32 hs_iterators;
/** rehash workitem */
struct cfs_workitem hs_rehash_wi;
/** refcount on this hash table */
@@ -291,8 +297,8 @@ struct cfs_hash_hlist_ops {
struct cfs_hash_ops {
/** return hashed value from @key */
- unsigned (*hs_hash)(struct cfs_hash *hs, const void *key,
- unsigned mask);
+ unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key,
+ unsigned int mask);
/** return key address of @hnode */
void * (*hs_key)(struct hlist_node *hnode);
/** copy key from @hnode to @key */
@@ -317,110 +323,112 @@ struct cfs_hash_ops {
/** total number of buckets in @hs */
#define CFS_HASH_NBKT(hs) \
- (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
+ BIT((hs)->hs_cur_bits - (hs)->hs_bkt_bits)
/** total number of buckets in @hs while rehashing */
#define CFS_HASH_RH_NBKT(hs) \
- (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
+ BIT((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)
/** number of hlist for in bucket */
-#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
+#define CFS_HASH_BKT_NHLIST(hs) BIT((hs)->hs_bkt_bits)
/** total number of hlist in @hs */
-#define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits)
+#define CFS_HASH_NHLIST(hs) BIT((hs)->hs_cur_bits)
/** total number of hlist in @hs while rehashing */
-#define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits)
+#define CFS_HASH_RH_NHLIST(hs) BIT((hs)->hs_rehash_bits)
static inline int
cfs_hash_with_no_lock(struct cfs_hash *hs)
{
/* caller will serialize all operations for this hash-table */
- return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
+ return hs->hs_flags & CFS_HASH_NO_LOCK;
}
static inline int
cfs_hash_with_no_bktlock(struct cfs_hash *hs)
{
/* no bucket lock, one single lock to protect the hash-table */
- return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
+ return hs->hs_flags & CFS_HASH_NO_BKTLOCK;
}
static inline int
cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
{
/* rwlock to protect hash bucket */
- return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
+ return hs->hs_flags & CFS_HASH_RW_BKTLOCK;
}
static inline int
cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
{
/* spinlock to protect hash bucket */
- return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
+ return hs->hs_flags & CFS_HASH_SPIN_BKTLOCK;
}
static inline int
cfs_hash_with_add_tail(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
+ return hs->hs_flags & CFS_HASH_ADD_TAIL;
}
static inline int
cfs_hash_with_no_itemref(struct cfs_hash *hs)
{
- /* hash-table doesn't keep refcount on item,
+ /*
+ * hash-table doesn't keep refcount on item,
* item can't be removed from hash unless it's
- * ZERO refcount */
- return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
+ * ZERO refcount
+ */
+ return hs->hs_flags & CFS_HASH_NO_ITEMREF;
}
static inline int
cfs_hash_with_bigname(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
+ return hs->hs_flags & CFS_HASH_BIGNAME;
}
static inline int
cfs_hash_with_counter(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
+ return hs->hs_flags & CFS_HASH_COUNTER;
}
static inline int
cfs_hash_with_rehash(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_REHASH) != 0;
+ return hs->hs_flags & CFS_HASH_REHASH;
}
static inline int
cfs_hash_with_rehash_key(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
+ return hs->hs_flags & CFS_HASH_REHASH_KEY;
}
static inline int
cfs_hash_with_shrink(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
+ return hs->hs_flags & CFS_HASH_SHRINK;
}
static inline int
cfs_hash_with_assert_empty(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
+ return hs->hs_flags & CFS_HASH_ASSERT_EMPTY;
}
static inline int
cfs_hash_with_depth(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
+ return hs->hs_flags & CFS_HASH_DEPTH;
}
static inline int
cfs_hash_with_nblk_change(struct cfs_hash *hs)
{
- return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
+ return hs->hs_flags & CFS_HASH_NBLK_CHANGE;
}
static inline int
@@ -434,14 +442,14 @@ static inline int
cfs_hash_is_rehashing(struct cfs_hash *hs)
{
/* rehash is launched */
- return hs->hs_rehash_bits != 0;
+ return !!hs->hs_rehash_bits;
}
static inline int
cfs_hash_is_iterating(struct cfs_hash *hs)
{
/* someone is calling cfs_hash_for_each_* */
- return hs->hs_iterating || hs->hs_iterators != 0;
+ return hs->hs_iterating || hs->hs_iterators;
}
static inline int
@@ -453,7 +461,7 @@ cfs_hash_bkt_size(struct cfs_hash *hs)
}
static inline unsigned
-cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
+cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return hs->hs_ops->hs_hash(hs, key, mask);
}
@@ -562,7 +570,7 @@ cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
}
static inline void
-cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index,
+cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index,
struct cfs_hash_bd *bd)
{
bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
@@ -576,14 +584,14 @@ cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
}
-static inline __u32
+static inline u32
cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
{
/* need hold cfs_hash_bd_lock */
return bd->bd_bucket->hsb_version;
}
-static inline __u32
+static inline u32
cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
{
/* need hold cfs_hash_bd_lock */
@@ -669,10 +677,10 @@ cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
/* Hash init/cleanup functions */
struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- struct cfs_hash_ops *ops, unsigned flags);
+cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
+ unsigned int bkt_bits, unsigned int extra_bytes,
+ unsigned int min_theta, unsigned int max_theta,
+ struct cfs_hash_ops *ops, unsigned int flags);
struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
void cfs_hash_putref(struct cfs_hash *hs);
@@ -700,27 +708,28 @@ typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
void *
cfs_hash_lookup(struct cfs_hash *hs, const void *key);
void
-cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data);
void
-cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
+ void *data);
int
-cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
- void *data);
+cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
+ void *data, int start);
int
-cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
+cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
void *data);
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t, void *data);
+ cfs_hash_for_each_cb_t cb, void *data);
typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
void
-cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t cb, void *data);
void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
- cfs_hash_for_each_cb_t, void *data);
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
+ cfs_hash_for_each_cb_t cb, void *data);
int cfs_hash_is_empty(struct cfs_hash *hs);
-__u64 cfs_hash_size_get(struct cfs_hash *hs);
+u64 cfs_hash_size_get(struct cfs_hash *hs);
/*
* Rehash - Theta is calculated to be the average chained
@@ -766,8 +775,8 @@ cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
#endif /* CFS_HASH_DEBUG_LEVEL */
#define CFS_HASH_THETA_BITS 10
-#define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1))
-#define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1))
+#define CFS_HASH_MIN_THETA BIT(CFS_HASH_THETA_BITS - 1)
+#define CFS_HASH_MAX_THETA BIT(CFS_HASH_THETA_BITS + 1)
/* Return integer component of theta */
static inline int __cfs_hash_theta_int(int theta)
@@ -792,8 +801,8 @@ static inline void
__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
{
LASSERT(min < max);
- hs->hs_min_theta = (__u16)min;
- hs->hs_max_theta = (__u16)max;
+ hs->hs_min_theta = (u16)min;
+ hs->hs_max_theta = (u16)max;
}
/* Generic debug formatting routines mainly for proc handler */
@@ -805,11 +814,11 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
* Generic djb2 hash algorithm for character arrays.
*/
static inline unsigned
-cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
+cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask)
{
- unsigned i, hash = 5381;
+ unsigned int i, hash = 5381;
- LASSERT(key != NULL);
+ LASSERT(key);
for (i = 0; i < size; i++)
hash = hash * 33 + ((char *)key)[i];
@@ -821,7 +830,7 @@ cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
* Generic u32 hash algorithm.
*/
static inline unsigned
-cfs_hash_u32_hash(const __u32 key, unsigned mask)
+cfs_hash_u32_hash(const u32 key, unsigned int mask)
{
return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
}
@@ -830,9 +839,9 @@ cfs_hash_u32_hash(const __u32 key, unsigned mask)
* Generic u64 hash algorithm.
*/
static inline unsigned
-cfs_hash_u64_hash(const __u64 key, unsigned mask)
+cfs_hash_u64_hash(const u64 key, unsigned int mask)
{
- return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
+ return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
}
/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index e0e1a5d0949d..aab15d8112a4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -75,7 +75,7 @@ do { \
#define KLASSERT(e) LASSERT(e)
-void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
+void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg);
#define LBUG() \
do { \
@@ -96,7 +96,7 @@ do { \
#define LIBCFS_ALLOC_POST(ptr, size) \
do { \
- if (unlikely((ptr) == NULL)) { \
+ if (unlikely(!(ptr))) { \
CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
#ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
} else { \
@@ -147,7 +147,7 @@ do { \
#define LIBCFS_FREE(ptr, size) \
do { \
- if (unlikely((ptr) == NULL)) { \
+ if (unlikely(!(ptr))) { \
CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
"%s:%d\n", (int)(size), __FILE__, __LINE__); \
break; \
@@ -169,8 +169,6 @@ do { \
#define ntohs(x) ___ntohs(x)
#endif
-void libcfs_run_upcall(char **argv);
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
void libcfs_debug_dumplog(void);
int libcfs_debug_init(unsigned long bufsize);
int libcfs_debug_cleanup(void);
@@ -280,7 +278,7 @@ do { \
#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
/** Compile-time assertion.
-
+ *
* Check an invariant described by a constant expression at compile time by
* forcing a compiler error if it does not hold. \a cond must be a constant
* expression as defined by the ISO C Standard:
@@ -306,7 +304,8 @@ do { \
/* --------------------------------------------------------------------
* Light-weight trace
* Support for temporary event tracing with minimal Heisenberg effect.
- * -------------------------------------------------------------------- */
+ * --------------------------------------------------------------------
+ */
#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 0ee60ff336f2..41795d9b3b9b 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -62,9 +62,9 @@ struct cfs_range_expr {
* Link to cfs_expr_list::el_exprs.
*/
struct list_head re_link;
- __u32 re_lo;
- __u32 re_hi;
- __u32 re_stride;
+ u32 re_lo;
+ u32 re_hi;
+ u32 re_stride;
};
struct cfs_expr_list {
@@ -74,24 +74,26 @@ struct cfs_expr_list {
char *cfs_trimwhite(char *str);
int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
-int cfs_str2num_check(char *str, int nob, unsigned *num,
- unsigned min, unsigned max);
-int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
+int cfs_str2num_check(char *str, int nob, unsigned int *num,
+ unsigned int min, unsigned int max);
+int cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list);
int cfs_expr_list_print(char *buffer, int count,
struct cfs_expr_list *expr_list);
int cfs_expr_list_values(struct cfs_expr_list *expr_list,
- int max, __u32 **values);
+ int max, u32 **values);
static inline void
-cfs_expr_list_values_free(__u32 *values, int num)
+cfs_expr_list_values_free(u32 *values, int num)
{
- /* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
+ /*
+ * This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
* by OBD_FREE() if it's called by module other than libcfs & LNet,
- * otherwise we will see fake memory leak */
+ * otherwise we will see fake memory leak
+ */
LIBCFS_FREE(values, num * sizeof(values[0]));
}
void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
struct cfs_expr_list **elpp);
void cfs_expr_list_free_list(struct list_head *list);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index a7e1340e69a1..2accd9a85472 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -62,9 +62,9 @@
struct cfs_wi_sched;
-void cfs_wi_sched_destroy(struct cfs_wi_sched *);
+void cfs_wi_sched_destroy(struct cfs_wi_sched *sched);
int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
- int nthrs, struct cfs_wi_sched **);
+ int nthrs, struct cfs_wi_sched **sched_pp);
struct cfs_workitem;
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index f63cb47bc309..dd0cd0442b86 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -52,17 +52,17 @@ struct cfs_cpu_partition {
/* nodes mask for this partition */
nodemask_t *cpt_nodemask;
/* spread rotor for NUMA allocator */
- unsigned cpt_spread_rotor;
+ unsigned int cpt_spread_rotor;
};
/** descriptor for CPU partitions */
struct cfs_cpt_table {
/* version, reserved for hotplug */
- unsigned ctb_version;
+ unsigned int ctb_version;
/* spread rotor for NUMA allocator */
- unsigned ctb_spread_rotor;
+ unsigned int ctb_spread_rotor;
/* # of CPU partitions */
- unsigned ctb_nparts;
+ unsigned int ctb_nparts;
/* partitions tables */
struct cfs_cpu_partition *ctb_parts;
/* shadow HW CPU to CPU partition ID */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index b646acd1f7e7..709e1ce98d8d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -76,23 +76,23 @@ static inline long cfs_duration_sec(long d)
#define cfs_time_current_64 get_jiffies_64
-static inline __u64 cfs_time_add_64(__u64 t, __u64 d)
+static inline u64 cfs_time_add_64(u64 t, u64 d)
{
return t + d;
}
-static inline __u64 cfs_time_shift_64(int seconds)
+static inline u64 cfs_time_shift_64(int seconds)
{
return cfs_time_add_64(cfs_time_current_64(),
cfs_time_seconds(seconds));
}
-static inline int cfs_time_before_64(__u64 t1, __u64 t2)
+static inline int cfs_time_before_64(u64 t1, u64 t2)
{
return (__s64)t2 - (__s64)t1 > 0;
}
-static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2)
+static inline int cfs_time_beforeq_64(u64 t1, u64 t2)
{
return (__s64)t2 - (__s64)t1 >= 0;
}
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index 417044552d3f..8a84888635ff 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -244,7 +244,7 @@ typedef struct {
int lstio_ses_timeout; /* IN: session timeout */
int lstio_ses_force; /* IN: force create ? */
/** IN: session features */
- unsigned lstio_ses_feats;
+ unsigned int lstio_ses_feats;
lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
int lstio_ses_nmlen; /* IN: name length */
char __user *lstio_ses_namep; /* IN: session name */
@@ -255,7 +255,7 @@ typedef struct {
lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
int __user *lstio_ses_keyp; /* OUT: local key */
/** OUT: session features */
- unsigned __user *lstio_ses_featp;
+ unsigned int __user *lstio_ses_featp;
lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */
int lstio_ses_nmlen; /* IN: name length */
char __user *lstio_ses_namep; /* OUT: session name */
@@ -328,7 +328,7 @@ typedef struct {
char __user *lstio_grp_namep; /* IN: group name */
int lstio_grp_count; /* IN: # of nodes */
/** OUT: session features */
- unsigned __user *lstio_grp_featp;
+ unsigned int __user *lstio_grp_featp;
lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */
struct list_head __user *lstio_grp_resultp; /* OUT: list head of
result buffer */
@@ -490,6 +490,8 @@ typedef struct {
int blk_size; /* size (bytes) */
int blk_time; /* time of running the test*/
int blk_flags; /* reserved flags */
+ int blk_cli_off; /* bulk offset on client */
+ int blk_srv_off; /* reserved: bulk offset on server */
} lst_test_bulk_param_t;
typedef struct {
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index f8be0e2f7bf7..8ca1e9d0cfe2 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -34,6 +34,7 @@
#define __LNET_TYPES_H__
#include <linux/types.h>
+#include <linux/bvec.h>
/** \addtogroup lnet
* @{
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 9e8802181452..7f761b327166 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1489,7 +1489,7 @@ out_fpo:
static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
struct list_head *zombies)
{
- if (!fps->fps_net) /* intialized? */
+ if (!fps->fps_net) /* initialized? */
return;
spin_lock(&fps->fps_lock);
@@ -1637,7 +1637,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
{
__u64 *pages = tx->tx_pages;
bool is_rx = (rd != tx->tx_rd);
- bool tx_pages_mapped = 0;
+ bool tx_pages_mapped = false;
struct kib_fmr_pool *fpo;
int npages = 0;
__u64 version;
@@ -1812,7 +1812,7 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
- if (!ps->ps_net) /* intialized? */
+ if (!ps->ps_net) /* initialized? */
return;
spin_lock(&ps->ps_lock);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index b27de8888149..c7917abf9944 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1912,12 +1912,12 @@ kiblnd_close_conn_locked(struct kib_conn *conn, int error)
libcfs_nid2str(peer->ibp_nid));
} else {
CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ libcfs_nid2str(peer->ibp_nid), error,
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+ list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
@@ -2643,7 +2643,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
if (incarnation)
peer->ibp_incarnation = incarnation;
out:
- write_unlock_irqrestore(glock, flags);
+ write_unlock_irqrestore(glock, flags);
CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
libcfs_nid2str(peer->ibp_nid),
@@ -2651,7 +2651,7 @@ out:
reason, IBLND_MSG_VERSION, version, msg_size,
conn->ibc_queue_depth, queue_dep,
conn->ibc_max_frags, frag_num);
- /**
+ /**
* if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
* while destroying the zombie
*/
@@ -2976,7 +2976,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ADDR_ERROR:
peer = (struct kib_peer *)cmid->context;
CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
return -EHOSTUNREACH; /* rc destroys cmid */
@@ -3021,7 +3021,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return kiblnd_active_connect(cmid);
CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, event->status);
kiblnd_peer_decref(peer);
return event->status; /* rc destroys cmid */
@@ -3031,7 +3031,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
kiblnd_connreq_done(conn, -ENETDOWN);
kiblnd_conn_decref(conn);
return 0;
@@ -3269,14 +3269,14 @@ kiblnd_disconnect_conn(struct kib_conn *conn)
#define KIB_RECONN_HIGH_RACE 10
/**
* Allow connd to take a break and handle other things after consecutive
- * reconnection attemps.
+ * reconnection attempts.
*/
#define KIB_RECONN_BREAK 100
int
kiblnd_connd(void *arg)
{
- spinlock_t *lock= &kiblnd_data.kib_connd_lock;
+ spinlock_t *lock = &kiblnd_data.kib_connd_lock;
wait_queue_t wait;
unsigned long flags;
struct kib_conn *conn;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cbc9a9c5385f..b74cf635faee 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -96,7 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route)
}
static int
-ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
+ lnet_process_id_t id)
{
int cpt = lnet_cpt_of_nid(id.nid);
struct ksock_net *net = ni->ni_data;
@@ -319,7 +320,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
}
static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route,
+ struct ksock_conn *conn)
{
struct ksock_peer *peer = route->ksnr_peer;
int type = conn->ksnc_type;
@@ -821,7 +823,8 @@ ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
if (k < peer->ksnp_n_passive_ips) /* using it already */
continue;
- k = ksocknal_match_peerip(iface, peerips, n_peerips);
+ k = ksocknal_match_peerip(iface, peerips,
+ n_peerips);
xor = ip ^ peerips[k];
this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
@@ -1302,8 +1305,11 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
/* Take packets blocking for this connection. */
list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
- if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
- continue;
+ int match = conn->ksnc_proto->pro_match_tx(conn, tx,
+ tx->tx_nonblk);
+
+ if (match == SOCKNAL_MATCH_NO)
+ continue;
list_del(&tx->tx_list);
ksocknal_queue_tx_locked(tx, conn);
@@ -1493,8 +1499,8 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
- peer->ksnp_proto = NULL; /* renegotiate protocol version */
- peer->ksnp_error = error; /* stash last conn close reason */
+ peer->ksnp_proto = NULL; /* renegotiate protocol version */
+ peer->ksnp_error = error; /* stash last conn close reason */
if (list_empty(&peer->ksnp_routes)) {
/*
@@ -1786,7 +1792,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
(id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
continue;
- count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
+ count += ksocknal_close_peer_conns_locked(peer, ipaddr,
+ 0);
}
}
@@ -2026,7 +2033,10 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
}
rc = 0;
- /* NB only new connections will pay attention to the new interface! */
+ /*
+ * NB only new connections will pay attention to the
+ * new interface!
+ */
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2200,8 +2210,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
int txmem;
int rxmem;
int nagle;
- struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+ struct ksock_conn *conn;
+ conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (!conn)
return -ENOENT;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index e6ca0cf52691..842c45393b38 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -84,7 +84,8 @@ struct ksock_sched { /* per scheduler state */
struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
wait_queue_head_t kss_waitq; /* where scheduler sleeps */
int kss_nconns; /* # connections assigned to
- * this scheduler */
+ * this scheduler
+ */
struct ksock_sched_info *kss_info; /* owner of it */
};
@@ -110,15 +111,19 @@ struct ksock_interface { /* in-use interface */
struct ksock_tunables {
int *ksnd_timeout; /* "stuck" socket timeout
- * (seconds) */
+ * (seconds)
+ */
int *ksnd_nscheds; /* # scheduler threads in each
- * pool while starting */
+ * pool while starting
+ */
int *ksnd_nconnds; /* # connection daemons */
int *ksnd_nconnds_max; /* max # connection daemons */
int *ksnd_min_reconnectms; /* first connection retry after
- * (ms)... */
+ * (ms)...
+ */
int *ksnd_max_reconnectms; /* ...exponentially increasing to
- * this */
+ * this
+ */
int *ksnd_eager_ack; /* make TCP ack eagerly? */
int *ksnd_typed_conns; /* drive sockets by type? */
int *ksnd_min_bulk; /* smallest "large" message */
@@ -126,9 +131,11 @@ struct ksock_tunables {
int *ksnd_rx_buffer_size; /* socket rx buffer size */
int *ksnd_nagle; /* enable NAGLE? */
int *ksnd_round_robin; /* round robin for multiple
- * interfaces */
+ * interfaces
+ */
int *ksnd_keepalive; /* # secs for sending keepalive
- * NOOP */
+ * NOOP
+ */
int *ksnd_keepalive_idle; /* # idle secs before 1st probe
*/
int *ksnd_keepalive_count; /* # probes */
@@ -137,20 +144,26 @@ struct ksock_tunables {
int *ksnd_peertxcredits; /* # concurrent sends to 1 peer
*/
int *ksnd_peerrtrcredits; /* # per-peer router buffer
- * credits */
+ * credits
+ */
int *ksnd_peertimeout; /* seconds to consider peer dead
*/
int *ksnd_enable_csum; /* enable check sum */
int *ksnd_inject_csum_error; /* set non-zero to inject
- * checksum error */
+ * checksum error
+ */
int *ksnd_nonblk_zcack; /* always send zc-ack on
- * non-blocking connection */
+ * non-blocking connection
+ */
unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload
- * size */
+ * size
+ */
int *ksnd_zc_recv; /* enable ZC receive (for
- * Chelsio TOE) */
+ * Chelsio TOE)
+ */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
- * enable ZC receive */
+ * enable ZC receive
+ */
};
struct ksock_net {
@@ -174,9 +187,11 @@ struct ksock_nal_data {
int ksnd_nnets; /* # networks set up */
struct list_head ksnd_nets; /* list of nets */
rwlock_t ksnd_global_lock; /* stabilize peer/conn
- * ops */
+ * ops
+ */
struct list_head *ksnd_peers; /* hash table of all my
- * known peers */
+ * known peers
+ */
int ksnd_peer_hash_size; /* size of ksnd_peers */
int ksnd_nthreads; /* # live threads */
@@ -187,11 +202,14 @@ struct ksock_nal_data {
atomic_t ksnd_nactive_txs; /* #active txs */
struct list_head ksnd_deathrow_conns; /* conns to close:
- * reaper_lock*/
+ * reaper_lock
+ */
struct list_head ksnd_zombie_conns; /* conns to free:
- * reaper_lock */
+ * reaper_lock
+ */
struct list_head ksnd_enomem_conns; /* conns to retry:
- * reaper_lock*/
+ * reaper_lock
+ */
wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
unsigned long ksnd_reaper_waketime; /* when reaper will wake
*/
@@ -201,30 +219,34 @@ struct ksock_nal_data {
int ksnd_stall_tx; /* test sluggish sender
*/
int ksnd_stall_rx; /* test sluggish
- * receiver */
-
+ * receiver
+ */
struct list_head ksnd_connd_connreqs; /* incoming connection
- * requests */
+ * requests
+ */
struct list_head ksnd_connd_routes; /* routes waiting to be
- * connected */
+ * connected
+ */
wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
int ksnd_connd_connecting; /* # connds connecting
*/
time64_t ksnd_connd_failed_stamp;/* time stamp of the
* last failed
- * connecting attempt */
+ * connecting attempt
+ */
time64_t ksnd_connd_starting_stamp;/* time stamp of the
* last starting connd
*/
- unsigned ksnd_connd_starting; /* # starting connd */
- unsigned ksnd_connd_running; /* # running connd */
+ unsigned int ksnd_connd_starting; /* # starting connd */
+ unsigned int ksnd_connd_running; /* # running connd */
spinlock_t ksnd_connd_lock; /* serialise */
struct list_head ksnd_idle_noop_txs; /* list head for freed
- * noop tx */
+ * noop tx
+ */
spinlock_t ksnd_tx_lock; /* serialise, g_lock
- * unsafe */
-
+ * unsafe
+ */
};
#define SOCKNAL_INIT_NOTHING 0
@@ -304,18 +326,21 @@ struct ksock_conn {
struct list_head ksnc_list; /* stash on peer's conn list */
struct socket *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original
- * data_ready() callback */
+ * data_ready() callback
+ */
void *ksnc_saved_write_space; /* socket's original
- * write_space() callback */
+ * write_space() callback
+ */
atomic_t ksnc_conn_refcount;/* conn refcount */
atomic_t ksnc_sock_refcount;/* sock refcount */
struct ksock_sched *ksnc_scheduler; /* who schedules this connection
- */
+ */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
int ksnc_port; /* peer's port */
signed int ksnc_type:3; /* type of connection, should be
- * signed value */
+ * signed value
+ */
unsigned int ksnc_closing:1; /* being shut down */
unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
unsigned int ksnc_zc_capable:1; /* enable to ZC */
@@ -323,9 +348,11 @@ struct ksock_conn {
/* reader */
struct list_head ksnc_rx_list; /* where I enq waiting input or a
- * forwarding descriptor */
+ * forwarding descriptor
+ */
unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times
- * out */
+ * out
+ */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled; /* being progressed */
@@ -338,7 +365,8 @@ struct ksock_conn {
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
- * data */
+ * data
+ */
void *ksnc_cookie; /* rx lnet_finalize passthru arg
*/
ksock_msg_t ksnc_msg; /* incoming message buffer:
@@ -346,14 +374,16 @@ struct ksock_conn {
* whole struct
* V1.x message is a bare
* lnet_hdr_t, it's stored in
- * ksnc_msg.ksm_u.lnetmsg */
-
+ * ksnc_msg.ksm_u.lnetmsg
+ */
/* WRITER */
struct list_head ksnc_tx_list; /* where I enq waiting for output
- * space */
+ * space
+ */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
- struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
- * message or ZC-ACK */
+ struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
+ * message or ZC-ACK
+ */
unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
*/
int ksnc_tx_bufnob; /* send buffer marker */
@@ -361,7 +391,8 @@ struct ksock_conn {
int ksnc_tx_ready; /* write space */
int ksnc_tx_scheduled; /* being progressed */
unsigned long ksnc_tx_last_post; /* time stamp of the last posted
- * TX */
+ * TX
+ */
};
struct ksock_route {
@@ -370,20 +401,24 @@ struct ksock_route {
struct ksock_peer *ksnr_peer; /* owning peer */
atomic_t ksnr_refcount; /* # users */
unsigned long ksnr_timeout; /* when (in jiffies) reconnection
- * can happen next */
+ * can happen next
+ */
long ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
int ksnr_port; /* port to connect to */
unsigned int ksnr_scheduled:1; /* scheduled for attention */
unsigned int ksnr_connecting:1; /* connection establishment in
- * progress */
+ * progress
+ */
unsigned int ksnr_connected:4; /* connections established by
- * type */
+ * type
+ */
unsigned int ksnr_deleted:1; /* been removed from peer? */
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this
- * route */
+ * route
+ */
};
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
@@ -391,7 +426,8 @@ struct ksock_route {
struct ksock_peer {
struct list_head ksnp_list; /* stash on global peer list */
unsigned long ksnp_last_alive; /* when (in jiffies) I was last
- * alive */
+ * alive
+ */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
@@ -408,7 +444,8 @@ struct ksock_peer {
struct list_head ksnp_tx_queue; /* waiting packets */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
struct list_head ksnp_zc_req_list; /* zero copy requests wait for
- * ACK */
+ * ACK
+ */
unsigned long ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
@@ -429,7 +466,8 @@ extern struct ksock_tunables ksocknal_tunables;
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
- * preferred */
+ * preferred
+ */
struct ksock_proto {
/* version number of protocol */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index c1c6f604e6ad..972f6094be75 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -620,7 +620,8 @@ ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
}
struct ksock_conn *
-ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
+ int nonblk)
{
struct list_head *tmp;
struct ksock_conn *conn;
@@ -630,10 +631,12 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonb
int fnob = 0;
list_for_each(tmp, &peer->ksnp_conns) {
- struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list);
- int nob = atomic_read(&c->ksnc_tx_nob) +
- c->ksnc_sock->sk->sk_wmem_queued;
- int rc;
+ struct ksock_conn *c;
+ int nob, rc;
+
+ c = list_entry(tmp, struct ksock_conn, ksnc_list);
+ nob = atomic_read(&c->ksnc_tx_nob) +
+ c->ksnc_sock->sk->sk_wmem_queued;
LASSERT(!c->ksnc_closing);
LASSERT(c->ksnc_proto &&
@@ -752,9 +755,9 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
LASSERT(msg->ksm_zc_cookies[1]);
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
+ /* ZC ACK piggybacked on ztx release tx later */
if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
- ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
-
+ ztx = tx;
} else {
/*
* It's a normal packet - can it piggback a noop zc-ack that
@@ -796,7 +799,8 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
- if (route->ksnr_scheduled) /* connections being established */
+ /* connections being established */
+ if (route->ksnr_scheduled)
continue;
/* all route types connected ? */
@@ -1514,7 +1518,10 @@ int ksocknal_scheduler(void *arg)
rc = ksocknal_process_transmit(conn, tx);
if (rc == -ENOMEM || rc == -EAGAIN) {
- /* Incomplete send: replace tx on HEAD of tx_queue */
+ /*
+ * Incomplete send: replace tx on HEAD of
+ * tx_queue
+ */
spin_lock_bh(&sched->kss_lock);
list_add(&tx->tx_list, &conn->ksnc_tx_queue);
} else {
@@ -1724,7 +1731,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
timeout = active ? *ksocknal_tunables.ksnd_timeout :
lnet_acceptor_timeout();
- rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+ rc = lnet_sock_read(sock, &hello->kshm_magic,
+ sizeof(hello->kshm_magic), timeout);
if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
rc, &conn->ksnc_ipaddr);
@@ -1798,7 +1806,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
/* Userspace NAL assigns peer process ID from socket */
recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
- recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+ recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+ conn->ksnc_ipaddr);
} else {
recv_id.nid = hello->kshm_src_nid;
recv_id.pid = hello->kshm_src_pid;
@@ -1882,7 +1891,8 @@ ksocknal_connect(struct ksock_route *route)
if (peer->ksnp_accepting > 0) {
CDEBUG(D_NET,
"peer %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+ libcfs_nid2str(peer->ksnp_id.nid),
+ peer->ksnp_accepting);
retry_later = 1;
}
@@ -2241,7 +2251,8 @@ ksocknal_connd(void *arg)
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+ &wait);
spin_unlock_bh(connd_lock);
nloops = 0;
@@ -2371,7 +2382,8 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
struct ksock_conn *conn;
struct ksock_tx *tx;
- if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+ /* last_alive will be updated by create_conn */
+ if (list_empty(&peer->ksnp_conns))
return 0;
if (peer->ksnp_proto != &ksocknal_protocol_v3x)
@@ -2473,8 +2485,8 @@ ksocknal_check_peer_timeouts(int idx)
* holding only shared lock
*/
if (!list_empty(&peer->ksnp_tx_queue)) {
- struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
+ tx = list_entry(peer->ksnp_tx_queue.next,
+ struct ksock_tx, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 6c95e989ca12..4bcab4bcc2de 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -202,7 +202,8 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- iov[i].iov_base, fragnob);
+ iov[i].iov_base,
+ fragnob);
}
conn->ksnc_msg.ksm_csum = saved_csum;
}
@@ -291,7 +292,8 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
}
int
-ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
+ int *rxmem, int *nagle)
{
struct socket *sock = conn->ksnc_sock;
int len;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 82e174f6d9fe..8f0ff6ca1f39 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -194,7 +194,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
}
if (!tx->tx_msg.ksm_zc_cookies[0]) {
- /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+ /*
+ * NOOP tx has only one ZC-ACK cookie,
+ * can carry at least one more
+ */
if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
tx->tx_msg.ksm_zc_cookies[1] = cookie;
@@ -203,7 +206,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
}
if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
- /* not likely to carry more ACKs, skip it to simplify logic */
+ /*
+ * not likely to carry more ACKs, skip it
+ * to simplify logic
+ */
ksocknal_next_tx_carrier(conn);
}
@@ -237,7 +243,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
}
} else {
- /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+ /*
+ * ksm_zc_cookies[0] < ksm_zc_cookies[1],
+ * it is range of cookies
+ */
if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -425,7 +434,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
- if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
+ if (c == cookie1 || c == cookie2 ||
+ (cookie1 < c && c < cookie2)) {
tx->tx_msg.ksm_zc_cookies[0] = 0;
list_del(&tx->tx_zc_list);
list_add(&tx->tx_zc_list, &zlist);
@@ -639,7 +649,8 @@ out:
}
static int
-ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
+ int timeout)
{
struct socket *sock = conn->ksnc_sock;
int rc;
@@ -737,7 +748,10 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
}
- /* Don't checksum before start sending, because packet can be piggybacked with ACK */
+ /*
+ * Don't checksum before start sending, because packet can be
+ * piggybacked with ACK
+ */
}
static void
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 23b36b890964..a38db2322225 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -57,7 +57,7 @@ static int libcfs_param_debug_mb_set(const char *val,
const struct kernel_param *kp)
{
int rc;
- unsigned num;
+ unsigned int num;
rc = kstrtouint(val, 0, &num);
if (rc < 0)
@@ -228,7 +228,8 @@ int libcfs_panic_in_progress;
static const char *
libcfs_debug_subsys2str(int subsys)
{
- static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
+ static const char * const libcfs_debug_subsystems[] =
+ LIBCFS_DEBUG_SUBSYS_NAMES;
if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
return NULL;
@@ -240,7 +241,8 @@ libcfs_debug_subsys2str(int subsys)
static const char *
libcfs_debug_dbg2str(int debug)
{
- static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
+ static const char * const libcfs_debug_masks[] =
+ LIBCFS_DEBUG_MASKS_NAMES;
if (debug >= ARRAY_SIZE(libcfs_debug_masks))
return NULL;
@@ -253,17 +255,17 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
{
const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
libcfs_debug_dbg2str;
- int len = 0;
- const char *token;
- int i;
+ int len = 0;
+ const char *token;
+ int i;
- if (mask == 0) { /* "0" */
+ if (!mask) { /* "0" */
if (size > 0)
str[0] = '0';
len = 1;
} else { /* space-separated tokens */
for (i = 0; i < 32; i++) {
- if ((mask & (1 << i)) == 0)
+ if (!(mask & (1 << i)))
continue;
token = fn(i);
@@ -276,7 +278,7 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
len++;
}
- while (*token != 0) {
+ while (*token) {
if (len < size)
str[len] = *token;
token++;
@@ -299,10 +301,10 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
{
const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
libcfs_debug_dbg2str;
- int m = 0;
- int matched;
- int n;
- int t;
+ int m = 0;
+ int matched;
+ int n;
+ int t;
/* Allow a number for backwards compatibility */
@@ -313,7 +315,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
t = sscanf(str, "%i%n", &m, &matched);
if (t >= 1 && matched == n) {
/* don't print warning for lctl set_param debug=0 or -1 */
- if (m != 0 && m != -1)
+ if (m && m != -1)
CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
*mask = m;
return 0;
@@ -387,8 +389,8 @@ EXPORT_SYMBOL(libcfs_debug_dumplog);
int libcfs_debug_init(unsigned long bufsize)
{
- int rc = 0;
unsigned int max = libcfs_debug_mb;
+ int rc = 0;
init_waitqueue_head(&debug_ctlwq);
@@ -414,9 +416,9 @@ int libcfs_debug_init(unsigned long bufsize)
max = max / num_possible_cpus();
max <<= (20 - PAGE_SHIFT);
}
- rc = cfs_tracefile_init(max);
- if (rc == 0) {
+ rc = cfs_tracefile_init(max);
+ if (!rc) {
libcfs_register_panic_notifier();
libcfs_debug_mb = cfs_trace_get_debug_mb();
}
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index e4b1a0a86eae..12dd50ad4efb 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(cfs_race_waitq);
int cfs_race_state;
EXPORT_SYMBOL(cfs_race_state);
-int __cfs_fail_check_set(__u32 id, __u32 value, int set)
+int __cfs_fail_check_set(u32 id, u32 value, int set)
{
static atomic_t cfs_fail_count = ATOMIC_INIT(0);
@@ -113,6 +113,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
break;
case CFS_FAIL_LOC_RESET:
cfs_fail_loc = value;
+ atomic_set(&cfs_fail_count, 0);
break;
default:
LASSERTF(0, "called with bad set %u\n", set);
@@ -123,7 +124,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
}
EXPORT_SYMBOL(__cfs_fail_check_set);
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
{
int ret;
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index 23283b6e09ab..c93c59d8fe6c 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -289,7 +289,7 @@ cfs_hash_hd_hhead_size(struct cfs_hash *hs)
static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
- struct cfs_hash_head_dep *head;
+ struct cfs_hash_head_dep *head;
head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
return &head[bd->bd_offset].hd_head;
@@ -492,7 +492,7 @@ cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
cfs_hash_bd_from_key(hs, hs->hs_buckets,
hs->hs_cur_bits, key, bd);
} else {
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
hs->hs_rehash_bits, key, bd);
}
@@ -507,14 +507,14 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
bd->bd_bucket->hsb_depmax = dep_cur;
# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
- if (likely(warn_on_depth == 0 ||
+ if (likely(!warn_on_depth ||
max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
return;
spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
+ hs->hs_dep_max = dep_cur;
+ hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+ hs->hs_dep_off = bd->bd_offset;
hs->hs_dep_bits = hs->hs_cur_bits;
spin_unlock(&hs->hs_dep_lock);
@@ -531,7 +531,7 @@ cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
cfs_hash_bd_dep_record(hs, bd, rc);
bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
+ if (unlikely(!bd->bd_bucket->hsb_version))
bd->bd_bucket->hsb_version++;
bd->bd_bucket->hsb_count++;
@@ -551,7 +551,7 @@ cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
LASSERT(bd->bd_bucket->hsb_count > 0);
bd->bd_bucket->hsb_count--;
bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
+ if (unlikely(!bd->bd_bucket->hsb_version))
bd->bd_bucket->hsb_version++;
if (cfs_hash_with_counter(hs)) {
@@ -571,7 +571,7 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
int rc;
- if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
+ if (!cfs_hash_bd_compare(bd_old, bd_new))
return;
/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
@@ -584,11 +584,11 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
LASSERT(obkt->hsb_count > 0);
obkt->hsb_count--;
obkt->hsb_version++;
- if (unlikely(obkt->hsb_version == 0))
+ if (unlikely(!obkt->hsb_version))
obkt->hsb_version++;
nbkt->hsb_count++;
nbkt->hsb_version++;
- if (unlikely(nbkt->hsb_version == 0))
+ if (unlikely(!nbkt->hsb_version))
nbkt->hsb_version++;
}
@@ -629,7 +629,7 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
struct hlist_node *ehnode;
struct hlist_node *match;
- int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
+ int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD;
/* with this function, we can avoid a lot of useless refcount ops,
* which are expensive atomic operations most time.
@@ -643,13 +643,13 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
continue;
/* match and ... */
- if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
+ if (intent & CFS_HS_LOOKUP_MASK_DEL) {
cfs_hash_bd_del_locked(hs, bd, ehnode);
return ehnode;
}
/* caller wants refcount? */
- if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
+ if (intent & CFS_HS_LOOKUP_MASK_REF)
cfs_hash_get(hs, ehnode);
return ehnode;
}
@@ -682,7 +682,7 @@ EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
static void
cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, int excl)
+ unsigned int n, int excl)
{
struct cfs_hash_bucket *prev = NULL;
int i;
@@ -704,7 +704,7 @@ cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static void
cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, int excl)
+ unsigned int n, int excl)
{
struct cfs_hash_bucket *prev = NULL;
int i;
@@ -719,10 +719,10 @@ cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key)
+ unsigned int n, const void *key)
{
struct hlist_node *ehnode;
- unsigned i;
+ unsigned int i;
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
@@ -735,12 +735,12 @@ cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key,
+ unsigned int n, const void *key,
struct hlist_node *hnode, int noref)
{
struct hlist_node *ehnode;
int intent;
- unsigned i;
+ unsigned int i;
LASSERT(hnode);
intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
@@ -766,7 +766,7 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key,
+ unsigned int n, const void *key,
struct hlist_node *hnode)
{
struct hlist_node *ehnode;
@@ -815,7 +815,7 @@ cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
return;
}
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
hs->hs_rehash_bits, key, &bds[1]);
@@ -883,7 +883,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
struct cfs_hash_bucket **new_bkts;
int i;
- LASSERT(old_size == 0 || old_bkts);
+ LASSERT(!old_size || old_bkts);
if (old_bkts && old_size == new_size)
return old_bkts;
@@ -908,9 +908,9 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
return NULL;
}
- new_bkts[i]->hsb_index = i;
- new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
- new_bkts[i]->hsb_depmax = -1; /* unknown */
+ new_bkts[i]->hsb_index = i;
+ new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
+ new_bkts[i]->hsb_depmax = -1; /* unknown */
bd.bd_bucket = new_bkts[i];
cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
INIT_HLIST_HEAD(hhead);
@@ -950,9 +950,9 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi)
int bits;
spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
+ dep = hs->hs_dep_max;
+ bkt = hs->hs_dep_bkt;
+ off = hs->hs_dep_off;
bits = hs->hs_dep_bits;
spin_unlock(&hs->hs_dep_lock);
@@ -976,7 +976,7 @@ static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
return;
spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits != 0) {
+ while (hs->hs_dep_bits) {
spin_unlock(&hs->hs_dep_lock);
cond_resched();
spin_lock(&hs->hs_dep_lock);
@@ -992,10 +992,10 @@ static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- struct cfs_hash_ops *ops, unsigned flags)
+cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
+ unsigned int bkt_bits, unsigned int extra_bytes,
+ unsigned int min_theta, unsigned int max_theta,
+ struct cfs_hash_ops *ops, unsigned int flags)
{
struct cfs_hash *hs;
int len;
@@ -1010,18 +1010,17 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
LASSERT(ops->hs_get);
LASSERT(ops->hs_put_locked);
- if ((flags & CFS_HASH_REHASH) != 0)
+ if (flags & CFS_HASH_REHASH)
flags |= CFS_HASH_COUNTER; /* must have counter */
LASSERT(cur_bits > 0);
LASSERT(cur_bits >= bkt_bits);
LASSERT(max_bits >= cur_bits && max_bits < 31);
- LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
- LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
- (flags & CFS_HASH_NO_LOCK) == 0));
- LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
+ LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits));
+ LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK)));
+ LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy));
- len = (flags & CFS_HASH_BIGNAME) == 0 ?
+ len = !(flags & CFS_HASH_BIGNAME) ?
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
if (!hs)
@@ -1036,12 +1035,12 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
cfs_hash_lock_setup(hs);
cfs_hash_hlist_setup(hs);
- hs->hs_cur_bits = (__u8)cur_bits;
- hs->hs_min_bits = (__u8)cur_bits;
- hs->hs_max_bits = (__u8)max_bits;
- hs->hs_bkt_bits = (__u8)bkt_bits;
+ hs->hs_cur_bits = (u8)cur_bits;
+ hs->hs_min_bits = (u8)cur_bits;
+ hs->hs_max_bits = (u8)max_bits;
+ hs->hs_bkt_bits = (u8)bkt_bits;
- hs->hs_ops = ops;
+ hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
@@ -1107,12 +1106,12 @@ cfs_hash_destroy(struct cfs_hash *hs)
cfs_hash_exit(hs, hnode);
}
}
- LASSERT(bd.bd_bucket->hsb_count == 0);
+ LASSERT(!bd.bd_bucket->hsb_count);
cfs_hash_bd_unlock(hs, &bd, 1);
cond_resched();
}
- LASSERT(atomic_read(&hs->hs_count) == 0);
+ LASSERT(!atomic_read(&hs->hs_count));
cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
0, CFS_HASH_NBKT(hs));
@@ -1216,7 +1215,7 @@ cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
struct cfs_hash_bd bds[2];
int bits = 0;
- LASSERT(hlist_unhashed(hnode));
+ LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
@@ -1293,7 +1292,7 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
}
if (hnode) {
- obj = cfs_hash_object(hs, hnode);
+ obj = cfs_hash_object(hs, hnode);
bits = cfs_hash_rehash_bits(hs);
}
@@ -1388,7 +1387,7 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
bits = cfs_hash_rehash_bits(hs);
cfs_hash_unlock(hs, 1);
/* NB: it's race on cfs_has_t::hs_iterating, see above */
- if (remained == 0)
+ if (!remained)
hs->hs_iterating = 0;
if (bits > 0) {
cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
@@ -1406,14 +1405,14 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
* . if @removal_safe is true, use can remove current item by
* cfs_hash_bd_del_locked
*/
-static __u64
+static u64
cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data, int remove_safe)
{
struct hlist_node *hnode;
struct hlist_node *pos;
struct cfs_hash_bd bd;
- __u64 count = 0;
+ u64 count = 0;
int excl = !!remove_safe;
int loop = 0;
int i;
@@ -1526,7 +1525,7 @@ cfs_hash_is_empty(struct cfs_hash *hs)
}
EXPORT_SYMBOL(cfs_hash_is_empty);
-__u64
+u64
cfs_hash_size_get(struct cfs_hash *hs)
{
return cfs_hash_with_counter(hs) ?
@@ -1552,26 +1551,33 @@ EXPORT_SYMBOL(cfs_hash_size_get);
*/
static int
cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data)
+ void *data, int start)
{
struct hlist_node *hnode;
struct hlist_node *tmp;
struct cfs_hash_bd bd;
- __u32 version;
+ u32 version;
int count = 0;
int stop_on_change;
- int rc;
+ int end = -1;
+ int rc = 0;
int i;
stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) ||
!hs->hs_ops->hs_put_locked;
cfs_hash_lock(hs, 0);
+again:
LASSERT(!cfs_hash_is_rehashing(hs));
cfs_hash_for_each_bucket(hs, &bd, i) {
struct hlist_head *hhead;
+ if (i < start)
+ continue;
+ else if (end > 0 && i >= end)
+ break;
+
cfs_hash_bd_lock(hs, &bd, 0);
version = cfs_hash_bd_version_get(&bd);
@@ -1611,14 +1617,19 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
if (rc) /* callback wants to break iteration */
break;
}
- cfs_hash_unlock(hs, 0);
+ if (start > 0 && !rc) {
+ end = start;
+ start = 0;
+ goto again;
+ }
+ cfs_hash_unlock(hs, 0);
return count;
}
int
cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data)
+ void *data, int start)
{
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
@@ -1630,7 +1641,7 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
- cfs_hash_for_each_relax(hs, func, data);
+ cfs_hash_for_each_relax(hs, func, data, start);
cfs_hash_for_each_exit(hs);
return 0;
@@ -1652,7 +1663,7 @@ int
cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data)
{
- unsigned i = 0;
+ unsigned int i = 0;
if (cfs_hash_with_no_lock(hs))
return -EOPNOTSUPP;
@@ -1662,7 +1673,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
- while (cfs_hash_for_each_relax(hs, func, data)) {
+ while (cfs_hash_for_each_relax(hs, func, data, 0)) {
CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
hs->hs_name, i++);
}
@@ -1672,7 +1683,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
EXPORT_SYMBOL(cfs_hash_for_each_empty);
void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_head *hhead;
@@ -1704,7 +1715,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
* the passed callback @func and pass to it as an argument each hash
* item and the private @data. During the callback the bucket lock
* is held so the callback must never sleep.
- */
+ */
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
cfs_hash_for_each_cb_t func, void *data)
@@ -1936,7 +1947,7 @@ out:
/* can't refer to @hs anymore because it could be destroyed */
if (bkts)
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
- if (rc != 0)
+ if (rc)
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
/* return 1 only if cfs_wi_exit is called */
return rc == -ESRCH;
@@ -2005,7 +2016,7 @@ cfs_hash_full_bkts(struct cfs_hash *hs)
if (!hs->hs_rehash_buckets)
return hs->hs_buckets;
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
return hs->hs_rehash_bits > hs->hs_cur_bits ?
hs->hs_rehash_buckets : hs->hs_buckets;
}
@@ -2017,7 +2028,7 @@ cfs_hash_full_nbkt(struct cfs_hash *hs)
if (!hs->hs_rehash_buckets)
return CFS_HASH_NBKT(hs);
- LASSERT(hs->hs_rehash_bits != 0);
+ LASSERT(hs->hs_rehash_bits);
return hs->hs_rehash_bits > hs->hs_cur_bits ?
CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
}
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index 33352af6c27f..55caa19def51 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(cfs_cpt_table_free);
int
cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
{
- int rc;
+ int rc;
rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
len -= rc;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 83543f928279..1967b97c4afc 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -52,9 +52,9 @@ struct cfs_percpt_lock *
cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
struct lock_class_key *keys)
{
- struct cfs_percpt_lock *pcl;
- spinlock_t *lock;
- int i;
+ struct cfs_percpt_lock *pcl;
+ spinlock_t *lock;
+ int i;
/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
LIBCFS_ALLOC(pcl, sizeof(*pcl));
@@ -73,7 +73,7 @@ cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
spin_lock_init(lock);
- if (keys != NULL)
+ if (keys)
lockdep_set_class(lock, &keys[i]);
}
@@ -94,8 +94,8 @@ void
cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
__acquires(pcl->pcl_locks)
{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
@@ -114,7 +114,7 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
/* exclusive lock request */
for (i = 0; i < ncpt; i++) {
spin_lock(pcl->pcl_locks[i]);
- if (i == 0) {
+ if (!i) {
LASSERT(!pcl->pcl_locked);
/* nobody should take private lock after this
* so I wouldn't starve for too long time
@@ -130,8 +130,8 @@ void
cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
__releases(pcl->pcl_locks)
{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
index = ncpt == 1 ? 0 : index;
@@ -141,7 +141,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
}
for (i = ncpt - 1; i >= 0; i--) {
- if (i == 0) {
+ if (!i) {
LASSERT(pcl->pcl_locked);
pcl->pcl_locked = 0;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index d0e81bb41cdc..ef085ba23194 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -43,8 +43,8 @@ struct cfs_var_array {
void
cfs_percpt_free(void *vars)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
@@ -72,9 +72,9 @@ EXPORT_SYMBOL(cfs_percpt_free);
void *
cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
{
- struct cfs_var_array *arr;
- int count;
- int i;
+ struct cfs_var_array *arr;
+ int count;
+ int i;
count = cfs_cpt_number(cptab);
@@ -120,8 +120,8 @@ EXPORT_SYMBOL(cfs_percpt_number);
void
cfs_array_free(void *vars)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
@@ -144,15 +144,15 @@ EXPORT_SYMBOL(cfs_array_free);
void *
cfs_array_alloc(int count, unsigned int size)
{
- struct cfs_var_array *arr;
- int i;
+ struct cfs_var_array *arr;
+ int i;
LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
if (!arr)
return NULL;
- arr->va_count = count;
- arr->va_size = size;
+ arr->va_count = count;
+ arr->va_size = size;
for (i = 0; i < count; i++) {
LIBCFS_ALLOC(arr->va_ptrs[i], size);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 56a614d7713b..02de1ee720fd 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -79,7 +79,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
for (i = 0; i < 32; i++) {
debugstr = bit2str(i);
if (debugstr && strlen(debugstr) == len &&
- strncasecmp(str, debugstr, len) == 0) {
+ !strncasecmp(str, debugstr, len)) {
if (op == '-')
newmask &= ~(1 << i);
else
@@ -89,7 +89,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
}
}
if (!found && len == 3 &&
- (strncasecmp(str, "ALL", len) == 0)) {
+ !strncasecmp(str, "ALL", len)) {
if (op == '-')
newmask = minmask;
else
@@ -112,7 +112,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
char *cfs_firststr(char *str, size_t size)
{
size_t i = 0;
- char *end;
+ char *end;
/* trim leading spaces */
while (i < size && *str && isspace(*str)) {
@@ -182,7 +182,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
next->ls_len--;
}
- if (next->ls_len == 0) /* whitespaces only */
+ if (!next->ls_len) /* whitespaces only */
return 0;
if (*next->ls_str == delim) {
@@ -222,8 +222,8 @@ EXPORT_SYMBOL(cfs_gettok);
* \retval 0 otherwise
*/
int
-cfs_str2num_check(char *str, int nob, unsigned *num,
- unsigned min, unsigned max)
+cfs_str2num_check(char *str, int nob, unsigned int *num,
+ unsigned int min, unsigned int max)
{
bool all_numbers = true;
char *endp, cache;
@@ -273,11 +273,11 @@ EXPORT_SYMBOL(cfs_str2num_check);
* -ENOMEM will be returned.
*/
static int
-cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
+cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
int bracketed, struct cfs_range_expr **expr)
{
- struct cfs_range_expr *re;
- struct cfs_lstr tok;
+ struct cfs_range_expr *re;
+ struct cfs_lstr tok;
LIBCFS_ALLOC(re, sizeof(*re));
if (!re)
@@ -391,7 +391,7 @@ cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list)
i += scnprintf(buffer + i, count - i, "[");
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- if (j++ != 0)
+ if (j++)
i += scnprintf(buffer + i, count - i, ",");
i += cfs_range_expr_print(buffer + i, count - i, expr,
numexprs > 1);
@@ -411,13 +411,13 @@ EXPORT_SYMBOL(cfs_expr_list_print);
* \retval 0 otherwise
*/
int
-cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
+cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list)
{
- struct cfs_range_expr *expr;
+ struct cfs_range_expr *expr;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
if (value >= expr->re_lo && value <= expr->re_hi &&
- ((value - expr->re_lo) % expr->re_stride) == 0)
+ !((value - expr->re_lo) % expr->re_stride))
return 1;
}
@@ -433,21 +433,21 @@ EXPORT_SYMBOL(cfs_expr_list_match);
* \retval < 0 for failure
*/
int
-cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
+cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp)
{
- struct cfs_range_expr *expr;
- __u32 *val;
- int count = 0;
- int i;
+ struct cfs_range_expr *expr;
+ u32 *val;
+ int count = 0;
+ int i;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (((i - expr->re_lo) % expr->re_stride) == 0)
+ if (!((i - expr->re_lo) % expr->re_stride))
count++;
}
}
- if (count == 0) /* empty expression list */
+ if (!count) /* empty expression list */
return 0;
if (count > max) {
@@ -463,7 +463,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
count = 0;
list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (((i - expr->re_lo) % expr->re_stride) == 0)
+ if (!((i - expr->re_lo) % expr->re_stride))
val[count++] = i;
}
}
@@ -501,13 +501,13 @@ EXPORT_SYMBOL(cfs_expr_list_free);
* \retval -errno otherwise
*/
int
-cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
struct cfs_expr_list **elpp)
{
- struct cfs_expr_list *expr_list;
- struct cfs_range_expr *expr;
- struct cfs_lstr src;
- int rc;
+ struct cfs_expr_list *expr_list;
+ struct cfs_range_expr *expr;
+ struct cfs_lstr src;
+ int rc;
LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
if (!expr_list)
@@ -533,18 +533,18 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
}
rc = cfs_range_expr_parse(&tok, min, max, 1, &expr);
- if (rc != 0)
+ if (rc)
break;
list_add_tail(&expr->re_link, &expr_list->el_exprs);
}
} else {
rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
- if (rc == 0)
+ if (!rc)
list_add_tail(&expr->re_link, &expr_list->el_exprs);
}
- if (rc != 0)
+ if (rc)
cfs_expr_list_free(expr_list);
else
*elpp = expr_list;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index e8b1a61420de..6b9cf06e8df2 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -55,6 +55,8 @@ MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
* i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
* are NUMA node ID, number before bracket is CPU partition ID.
*
+ * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
+ *
* NB: If user specified cpu_pattern, cpu_npartitions will be ignored
*/
static char *cpu_pattern = "";
@@ -88,7 +90,7 @@ cfs_node_to_cpumask(int node, cpumask_t *mask)
void
cfs_cpt_table_free(struct cfs_cpt_table *cptab)
{
- int i;
+ int i;
if (cptab->ctb_cpu2cpt) {
LIBCFS_FREE(cptab->ctb_cpu2cpt,
@@ -126,7 +128,7 @@ struct cfs_cpt_table *
cfs_cpt_table_alloc(unsigned int ncpt)
{
struct cfs_cpt_table *cptab;
- int i;
+ int i;
LIBCFS_ALLOC(cptab, sizeof(*cptab));
if (!cptab)
@@ -177,10 +179,10 @@ EXPORT_SYMBOL(cfs_cpt_table_alloc);
int
cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
{
- char *tmp = buf;
- int rc = 0;
- int i;
- int j;
+ char *tmp = buf;
+ int rc = 0;
+ int i;
+ int j;
for (i = 0; i < cptab->ctb_nparts; i++) {
if (len > 0) {
@@ -271,7 +273,7 @@ EXPORT_SYMBOL(cfs_cpt_nodemask);
int
cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
{
- int node;
+ int node;
LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
@@ -311,8 +313,8 @@ EXPORT_SYMBOL(cfs_cpt_set_cpu);
void
cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
{
- int node;
- int i;
+ int node;
+ int i;
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
@@ -371,9 +373,9 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpu);
int
cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
{
- int i;
+ int i;
- if (cpumask_weight(mask) == 0 ||
+ if (!cpumask_weight(mask) ||
cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
cpt);
@@ -392,7 +394,7 @@ EXPORT_SYMBOL(cfs_cpt_set_cpumask);
void
cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
{
- int i;
+ int i;
for_each_cpu(i, mask)
cfs_cpt_unset_cpu(cptab, cpt, i);
@@ -402,8 +404,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
int
cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
{
- cpumask_t *mask;
- int rc;
+ cpumask_t *mask;
+ int rc;
if (node < 0 || node >= MAX_NUMNODES) {
CDEBUG(D_INFO,
@@ -449,7 +451,7 @@ EXPORT_SYMBOL(cfs_cpt_unset_node);
int
cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
{
- int i;
+ int i;
for_each_node_mask(i, *mask) {
if (!cfs_cpt_set_node(cptab, cpt, i))
@@ -463,7 +465,7 @@ EXPORT_SYMBOL(cfs_cpt_set_nodemask);
void
cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
{
- int i;
+ int i;
for_each_node_mask(i, *mask)
cfs_cpt_unset_node(cptab, cpt, i);
@@ -473,8 +475,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
void
cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
{
- int last;
- int i;
+ int last;
+ int i;
if (cpt == CFS_CPT_ANY) {
last = cptab->ctb_nparts - 1;
@@ -493,10 +495,10 @@ EXPORT_SYMBOL(cfs_cpt_clear);
int
cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
{
- nodemask_t *mask;
- int weight;
- int rotor;
- int node;
+ nodemask_t *mask;
+ int weight;
+ int rotor;
+ int node;
/* convert CPU partition ID to HW node id */
@@ -514,7 +516,7 @@ cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
rotor %= weight;
for_each_node_mask(node, *mask) {
- if (rotor-- == 0)
+ if (!rotor--)
return node;
}
@@ -526,8 +528,8 @@ EXPORT_SYMBOL(cfs_cpt_spread_node);
int
cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
{
- int cpu = smp_processor_id();
- int cpt = cptab->ctb_cpu2cpt[cpu];
+ int cpu = smp_processor_id();
+ int cpt = cptab->ctb_cpu2cpt[cpu];
if (cpt < 0) {
if (!remap)
@@ -555,10 +557,10 @@ EXPORT_SYMBOL(cfs_cpt_of_cpu);
int
cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
{
- cpumask_t *cpumask;
- nodemask_t *nodemask;
- int rc;
- int i;
+ cpumask_t *cpumask;
+ nodemask_t *nodemask;
+ int rc;
+ int i;
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
@@ -582,7 +584,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
rc = set_cpus_allowed_ptr(current, cpumask);
set_mems_allowed(*nodemask);
- if (rc == 0)
+ if (!rc)
schedule(); /* switch to allowed CPU */
return rc;
@@ -601,10 +603,10 @@ static int
cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
cpumask_t *node, int number)
{
- cpumask_t *socket = NULL;
- cpumask_t *core = NULL;
- int rc = 0;
- int cpu;
+ cpumask_t *socket = NULL;
+ cpumask_t *core = NULL;
+ int rc = 0;
+ int cpu;
LASSERT(number > 0);
@@ -638,7 +640,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
LASSERT(!cpumask_empty(socket));
while (!cpumask_empty(socket)) {
- int i;
+ int i;
/* get cpumask for hts in the same core */
cpumask_copy(core, topology_sibling_cpumask(cpu));
@@ -656,14 +658,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
goto out;
}
- if (--number == 0)
+ if (!--number)
goto out;
}
cpu = cpumask_first(socket);
}
}
- out:
+out:
if (socket)
LIBCFS_FREE(socket, cpumask_size());
if (core)
@@ -676,9 +678,9 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
static unsigned int
cfs_cpt_num_estimate(void)
{
- unsigned nnode = num_online_nodes();
- unsigned ncpu = num_online_cpus();
- unsigned ncpt;
+ unsigned int nnode = num_online_nodes();
+ unsigned int ncpu = num_online_cpus();
+ unsigned int ncpt;
if (ncpu <= CPT_WEIGHT_MIN) {
ncpt = 1;
@@ -703,14 +705,14 @@ cfs_cpt_num_estimate(void)
ncpt = nnode;
- out:
+out:
#if (BITS_PER_LONG == 32)
/* config many CPU partitions on 32-bit system could consume
* too much memory
*/
ncpt = min(2U, ncpt);
#endif
- while (ncpu % ncpt != 0)
+ while (ncpu % ncpt)
ncpt--; /* worst case is 1 */
return ncpt;
@@ -720,11 +722,11 @@ static struct cfs_cpt_table *
cfs_cpt_table_create(int ncpt)
{
struct cfs_cpt_table *cptab = NULL;
- cpumask_t *mask = NULL;
- int cpt = 0;
- int num;
- int rc;
- int i;
+ cpumask_t *mask = NULL;
+ int cpt = 0;
+ int num;
+ int rc;
+ int i;
rc = cfs_cpt_num_estimate();
if (ncpt <= 0)
@@ -735,7 +737,7 @@ cfs_cpt_table_create(int ncpt)
ncpt, rc);
}
- if (num_online_cpus() % ncpt != 0) {
+ if (num_online_cpus() % ncpt) {
CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
(int)num_online_cpus(), ncpt);
goto failed;
@@ -748,7 +750,7 @@ cfs_cpt_table_create(int ncpt)
}
num = num_online_cpus() / ncpt;
- if (num == 0) {
+ if (!num) {
CERROR("CPU changed while setting CPU partition\n");
goto failed;
}
@@ -764,7 +766,7 @@ cfs_cpt_table_create(int ncpt)
while (!cpumask_empty(mask)) {
struct cfs_cpu_partition *part;
- int n;
+ int n;
/*
* Each emulated NUMA node has all allowed CPUs in
@@ -817,27 +819,36 @@ cfs_cpt_table_create(int ncpt)
static struct cfs_cpt_table *
cfs_cpt_table_create_pattern(char *pattern)
{
- struct cfs_cpt_table *cptab;
- char *str = pattern;
- int node = 0;
- int high;
- int ncpt;
- int c;
-
- for (ncpt = 0;; ncpt++) { /* quick scan bracket */
- str = strchr(str, '[');
- if (!str)
- break;
- str++;
- }
+ struct cfs_cpt_table *cptab;
+ char *str;
+ int node = 0;
+ int high;
+ int ncpt = 0;
+ int cpt;
+ int rc;
+ int c;
+ int i;
str = cfs_trimwhite(pattern);
if (*str == 'n' || *str == 'N') {
pattern = str + 1;
- node = 1;
+ if (*pattern != '\0') {
+ node = 1;
+ } else { /* shortcut to create CPT from NUMA & CPU topology */
+ node = -1;
+ ncpt = num_online_nodes();
+ }
+ }
+
+ if (!ncpt) { /* scanning bracket which is mark of partition */
+ for (str = pattern;; str++, ncpt++) {
+ str = strchr(str, '[');
+ if (!str)
+ break;
+ }
}
- if (ncpt == 0 ||
+ if (!ncpt ||
(node && ncpt > num_online_nodes()) ||
(!node && ncpt > num_online_cpus())) {
CERROR("Invalid pattern %s, or too many partitions %d\n",
@@ -845,25 +856,39 @@ cfs_cpt_table_create_pattern(char *pattern)
return NULL;
}
- high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
-
cptab = cfs_cpt_table_alloc(ncpt);
if (!cptab) {
CERROR("Failed to allocate cpu partition table\n");
return NULL;
}
+ if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
+ cpt = 0;
+
+ for_each_online_node(i) {
+ if (cpt >= ncpt) {
+ CERROR("CPU changed while setting CPU partition table, %d/%d\n",
+ cpt, ncpt);
+ goto failed;
+ }
+
+ rc = cfs_cpt_set_node(cptab, cpt++, i);
+ if (!rc)
+ goto failed;
+ }
+ return cptab;
+ }
+
+ high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
+
for (str = cfs_trimwhite(pattern), c = 0;; c++) {
- struct cfs_range_expr *range;
- struct cfs_expr_list *el;
- char *bracket = strchr(str, '[');
- int cpt;
- int rc;
- int i;
- int n;
+ struct cfs_range_expr *range;
+ struct cfs_expr_list *el;
+ char *bracket = strchr(str, '[');
+ int n;
if (!bracket) {
- if (*str != 0) {
+ if (*str) {
CERROR("Invalid pattern %s\n", str);
goto failed;
}
@@ -886,7 +911,7 @@ cfs_cpt_table_create_pattern(char *pattern)
goto failed;
}
- if (cfs_cpt_weight(cptab, cpt) != 0) {
+ if (cfs_cpt_weight(cptab, cpt)) {
CERROR("Partition %d has already been set.\n", cpt);
goto failed;
}
@@ -905,14 +930,14 @@ cfs_cpt_table_create_pattern(char *pattern)
}
if (cfs_expr_list_parse(str, (bracket - str) + 1,
- 0, high, &el) != 0) {
+ 0, high, &el)) {
CERROR("Can't parse number range: %s\n", str);
goto failed;
}
list_for_each_entry(range, &el->el_exprs, re_link) {
for (i = range->re_lo; i <= range->re_hi; i++) {
- if ((i - range->re_lo) % range->re_stride != 0)
+ if ((i - range->re_lo) % range->re_stride)
continue;
rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
@@ -945,8 +970,8 @@ cfs_cpt_table_create_pattern(char *pattern)
static int
cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- bool warn;
+ unsigned int cpu = (unsigned long)hcpu;
+ bool warn;
switch (action) {
case CPU_DEAD:
@@ -1019,7 +1044,7 @@ cfs_cpu_init(void)
register_hotcpu_notifier(&cfs_cpu_notifier);
#endif
- if (*cpu_pattern != 0) {
+ if (*cpu_pattern) {
cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
if (!cfs_cpt_table) {
CERROR("Failed to create cptab from pattern %s\n",
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 7f56d2c9dd00..68e34b4a76c9 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -64,7 +64,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
unsigned int key_len)
{
struct crypto_ahash *tfm;
- int err = 0;
+ int err = 0;
*type = cfs_crypto_hash_type(hash_alg);
@@ -93,12 +93,12 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
if (key)
err = crypto_ahash_setkey(tfm, key, key_len);
- else if ((*type)->cht_key != 0)
+ else if ((*type)->cht_key)
err = crypto_ahash_setkey(tfm,
(unsigned char *)&((*type)->cht_key),
(*type)->cht_size);
- if (err != 0) {
+ if (err) {
ahash_request_free(*req);
crypto_free_ahash(tfm);
return err;
@@ -147,16 +147,16 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len)
{
- struct scatterlist sl;
+ struct scatterlist sl;
struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
+ int err;
+ const struct cfs_crypto_hash_type *type;
- if (!buf || buf_len == 0 || !hash_len)
+ if (!buf || !buf_len || !hash_len)
return -EINVAL;
err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
- if (err != 0)
+ if (err)
return err;
if (!hash || *hash_len < type->cht_size) {
@@ -177,7 +177,7 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
EXPORT_SYMBOL(cfs_crypto_hash_digest);
/**
- * Allocate and initialize desriptor for hash algorithm.
+ * Allocate and initialize descriptor for hash algorithm.
*
* This should be used to initialize a hash descriptor for multiple calls
* to a single hash function when computing the hash across multiple
@@ -198,8 +198,8 @@ cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
unsigned char *key, unsigned int key_len)
{
struct ahash_request *req;
- int err;
- const struct cfs_crypto_hash_type *type;
+ int err;
+ const struct cfs_crypto_hash_type *type;
err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
@@ -273,7 +273,7 @@ EXPORT_SYMBOL(cfs_crypto_hash_update);
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
unsigned char *hash, unsigned int *hash_len)
{
- int err;
+ int err;
struct ahash_request *req = (void *)hdesc;
int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
@@ -312,8 +312,8 @@ static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
{
int buf_len = max(PAGE_SIZE, 1048576UL);
void *buf;
- unsigned long start, end;
- int bcount, err = 0;
+ unsigned long start, end;
+ int bcount, err = 0;
struct page *page;
unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
unsigned int hash_len = sizeof(hash);
@@ -358,7 +358,7 @@ out_err:
CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
cfs_crypto_hash_name(hash_alg), err);
} else {
- unsigned long tmp;
+ unsigned long tmp;
tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
1000) / (1024 * 1024);
@@ -440,6 +440,6 @@ int cfs_crypto_register(void)
*/
void cfs_crypto_unregister(void)
{
- if (adler32 == 0)
+ if (!adler32)
cfs_crypto_adler32_unregister();
}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
index 18e8cd4d8758..d0b3aa80cfa6 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
@@ -1,4 +1,4 @@
- /*
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 435b784c52f8..39a72e3f0c18 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -57,7 +57,6 @@
#include <linux/kallsyms.h>
-char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
/**
@@ -68,11 +67,12 @@ char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
void libcfs_run_debug_log_upcall(char *file)
{
char *argv[3];
- int rc;
- char *envp[] = {
+ int rc;
+ static const char * const envp[] = {
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
- NULL};
+ NULL
+ };
argv[0] = lnet_debug_log_upcall;
@@ -81,7 +81,7 @@ void libcfs_run_debug_log_upcall(char *file)
argv[2] = NULL;
- rc = call_usermodehelper(argv[0], argv, envp, 1);
+ rc = call_usermodehelper(argv[0], argv, (char **)envp, 1);
if (rc < 0 && rc != -ENOENT) {
CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n",
rc, argv[0], argv[1]);
@@ -91,57 +91,6 @@ void libcfs_run_debug_log_upcall(char *file)
}
}
-void libcfs_run_upcall(char **argv)
-{
- int rc;
- int argc;
- char *envp[] = {
- "HOME=/",
- "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
- NULL};
-
- argv[0] = lnet_upcall;
- argc = 1;
- while (argv[argc])
- argc++;
-
- LASSERT(argc >= 2);
-
- rc = call_usermodehelper(argv[0], argv, envp, 1);
- if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n",
- rc, argv[0], argv[1],
- argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
- argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
- argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
- argc < 6 ? "" : ",...");
- } else {
- CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n",
- argv[0], argv[1],
- argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
- argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
- argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
- argc < 6 ? "" : ",...");
- }
-}
-
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
-{
- char *argv[6];
- char buf[32];
-
- snprintf(buf, sizeof(buf), "%d", msgdata->msg_line);
-
- argv[1] = "LBUG";
- argv[2] = (char *)msgdata->msg_file;
- argv[3] = (char *)msgdata->msg_fn;
- argv[4] = buf;
- argv[5] = NULL;
-
- libcfs_run_upcall(argv);
-}
-EXPORT_SYMBOL(libcfs_run_lbug_upcall);
-
/* coverity[+kill] */
void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
{
@@ -156,7 +105,6 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
dump_stack();
if (!libcfs_panic_on_lbug)
libcfs_debug_dumplog();
- libcfs_run_lbug_upcall(msgdata);
if (libcfs_panic_on_lbug)
panic("LBUG");
set_task_state(current, TASK_UNINTERRUPTIBLE);
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index 38308f8b6aae..3f5d58babc2f 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -83,7 +83,7 @@ static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
return true;
}
- if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
+ if ((u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
CERROR("LIBCFS ioctl: packlen != ioc_len\n");
return true;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 291d286eab48..cf902154f0aa 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -45,8 +45,8 @@
sigset_t
cfs_block_allsigs(void)
{
- unsigned long flags;
- sigset_t old;
+ unsigned long flags;
+ sigset_t old;
spin_lock_irqsave(&current->sighand->siglock, flags);
old = current->blocked;
@@ -60,8 +60,8 @@ EXPORT_SYMBOL(cfs_block_allsigs);
sigset_t cfs_block_sigs(unsigned long sigs)
{
- unsigned long flags;
- sigset_t old;
+ unsigned long flags;
+ sigset_t old;
spin_lock_irqsave(&current->sighand->siglock, flags);
old = current->blocked;
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(cfs_block_sigsinv);
void
cfs_restore_sigs(sigset_t old)
{
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = old;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 8b551d2708ba..75eb84e7f0f8 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -49,8 +49,8 @@ static DECLARE_RWSEM(cfs_tracefile_sem);
int cfs_tracefile_init_arch(void)
{
- int i;
- int j;
+ int i;
+ int j;
struct cfs_trace_cpu_data *tcd;
/* initialize trace_data */
@@ -85,14 +85,14 @@ int cfs_tracefile_init_arch(void)
out:
cfs_tracefile_fini_arch();
- printk(KERN_ERR "lnet: Not enough memory\n");
+ pr_err("lnet: Not enough memory\n");
return -ENOMEM;
}
void cfs_tracefile_fini_arch(void)
{
- int i;
- int j;
+ int i;
+ int j;
for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++) {
@@ -224,26 +224,26 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
{
char *prefix = "Lustre", *ptype = NULL;
- if ((mask & D_EMERG) != 0) {
+ if (mask & D_EMERG) {
prefix = dbghdr_to_err_string(hdr);
ptype = KERN_EMERG;
- } else if ((mask & D_ERROR) != 0) {
+ } else if (mask & D_ERROR) {
prefix = dbghdr_to_err_string(hdr);
ptype = KERN_ERR;
- } else if ((mask & D_WARNING) != 0) {
+ } else if (mask & D_WARNING) {
prefix = dbghdr_to_info_string(hdr);
ptype = KERN_WARNING;
- } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
+ } else if (mask & (D_CONSOLE | libcfs_printk)) {
prefix = dbghdr_to_info_string(hdr);
ptype = KERN_INFO;
}
- if ((mask & D_CONSOLE) != 0) {
- printk("%s%s: %.*s", ptype, prefix, len, buf);
+ if (mask & D_CONSOLE) {
+ pr_info("%s%s: %.*s", ptype, prefix, len, buf);
} else {
- printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
- hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
- fn, len, buf);
+ pr_info("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
+ hdr->ph_pid, hdr->ph_extern_pid, file,
+ hdr->ph_line_num, fn, len, buf);
}
}
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 86b4d25cad46..161e04226521 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -183,12 +183,12 @@ EXPORT_SYMBOL(lprocfs_call_handler);
static int __proc_dobitmasks(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
- const int tmpstrlen = 512;
- char *tmpstr;
- int rc;
+ const int tmpstrlen = 512;
+ char *tmpstr;
+ int rc;
unsigned int *mask = data;
- int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
- int is_printk = (mask == &libcfs_printk) ? 1 : 0;
+ int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
+ int is_printk = (mask == &libcfs_printk) ? 1 : 0;
rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
if (rc < 0)
@@ -293,8 +293,8 @@ static int __proc_cpt_table(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
char *buf = NULL;
- int len = 4096;
- int rc = 0;
+ int len = 4096;
+ int rc = 0;
if (write)
return -EPERM;
@@ -365,14 +365,6 @@ static struct ctl_table lnet_table[] = {
.mode = 0444,
.proc_handler = &proc_cpt_table,
},
-
- {
- .procname = "upcall",
- .data = lnet_upcall,
- .maxlen = sizeof(lnet_upcall),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
{
.procname = "debug_log_upcall",
.data = lnet_debug_log_upcall,
@@ -547,7 +539,7 @@ static int libcfs_init(void)
}
rc = cfs_cpu_init();
- if (rc != 0)
+ if (rc)
goto cleanup_debug;
rc = misc_register(&libcfs_dev);
@@ -566,7 +558,7 @@ static int libcfs_init(void)
rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
rc, &cfs_sched_rehash);
- if (rc != 0) {
+ if (rc) {
CERROR("Startup workitem scheduler: error: %d\n", rc);
goto cleanup_deregister;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index a9bdb284fd15..21d5a3912c5f 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -33,7 +33,7 @@
* x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16,
* number and carry packed within the same 32 bit integer.
* algorithm recommended by Marsaglia
-*/
+ */
#include "../../include/linux/libcfs/libcfs.h"
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 1c7efdfaffcf..d7b29f8997c0 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -59,13 +59,13 @@ struct page_collection {
* ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
* only ->tcd_pages are spilled.
*/
- int pc_want_daemon_pages;
+ int pc_want_daemon_pages;
};
struct tracefiled_ctl {
struct completion tctl_start;
struct completion tctl_stop;
- wait_queue_head_t tctl_waitq;
+ wait_queue_head_t tctl_waitq;
pid_t tctl_pid;
atomic_t tctl_shutdown;
};
@@ -77,24 +77,24 @@ struct cfs_trace_page {
/*
* page itself
*/
- struct page *page;
+ struct page *page;
/*
* linkage into one of the lists in trace_data_union or
* page_collection
*/
- struct list_head linkage;
+ struct list_head linkage;
/*
* number of bytes used within this page
*/
- unsigned int used;
+ unsigned int used;
/*
* cpu that owns this page
*/
- unsigned short cpu;
+ unsigned short cpu;
/*
* type(context) of this page
*/
- unsigned short type;
+ unsigned short type;
};
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
@@ -108,7 +108,7 @@ cfs_tage_from_list(struct list_head *list)
static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
{
- struct page *page;
+ struct page *page;
struct cfs_trace_page *tage;
/* My caller is trying to free memory */
@@ -236,7 +236,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
INIT_LIST_HEAD(&pc.pc_pages);
list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
- if (pgcount-- == 0)
+ if (!pgcount--)
break;
list_move_tail(&tage->linkage, &pc.pc_pages);
@@ -278,7 +278,7 @@ int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format, ...)
{
va_list args;
- int rc;
+ int rc;
va_start(args, format);
rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
@@ -293,21 +293,21 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
const char *format2, ...)
{
struct cfs_trace_cpu_data *tcd = NULL;
- struct ptldebug_header header = {0};
- struct cfs_trace_page *tage;
+ struct ptldebug_header header = { 0 };
+ struct cfs_trace_page *tage;
/* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int depth;
- int i;
- int remain;
- int mask = msgdata->msg_mask;
- const char *file = kbasename(msgdata->msg_file);
- struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
+ char *string_buf = NULL;
+ char *debug_buf;
+ int known_size;
+ int needed = 85; /* average message length */
+ int max_nob;
+ va_list ap;
+ int depth;
+ int i;
+ int remain;
+ int mask = msgdata->msg_mask;
+ const char *file = kbasename(msgdata->msg_file);
+ struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
tcd = cfs_trace_get_tcd();
@@ -320,7 +320,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
if (!tcd) /* arch may not log in IRQ context */
goto console;
- if (tcd->tcd_cur_pages == 0)
+ if (!tcd->tcd_cur_pages)
header.ph_flags |= PH_FLAG_FIRST_RECORD;
if (tcd->tcd_shutting_down) {
@@ -423,7 +423,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
__LASSERT(tage->used <= PAGE_SIZE);
console:
- if ((mask & libcfs_printk) == 0) {
+ if (!(mask & libcfs_printk)) {
/* no console output requested */
if (tcd)
cfs_trace_put_tcd(tcd);
@@ -432,7 +432,7 @@ console:
if (cdls) {
if (libcfs_console_ratelimit &&
- cdls->cdls_next != 0 && /* not first time ever */
+ cdls->cdls_next && /* not first time ever */
!cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
/* skipping a console message */
cdls->cdls_count++;
@@ -489,7 +489,7 @@ console:
put_cpu();
}
- if (cdls && cdls->cdls_count != 0) {
+ if (cdls && cdls->cdls_count) {
string_buf = cfs_trace_get_console_buffer();
needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -535,9 +535,9 @@ panic_collect_pages(struct page_collection *pc)
* CPUs have been stopped during a panic. If this isn't true for some
* arch, this will have to be implemented separately in each arch.
*/
- int i;
- int j;
struct cfs_trace_cpu_data *tcd;
+ int i;
+ int j;
INIT_LIST_HEAD(&pc->pc_pages);
@@ -698,11 +698,11 @@ void cfs_trace_debug_print(void)
int cfs_tracefile_dump_all_pages(char *filename)
{
- struct page_collection pc;
- struct file *filp;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- char *buf;
+ struct page_collection pc;
+ struct file *filp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ char *buf;
mm_segment_t __oldfs;
int rc;
@@ -778,7 +778,7 @@ void cfs_trace_flush_pages(void)
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
const char __user *usr_buffer, int usr_buffer_nob)
{
- int nob;
+ int nob;
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
@@ -810,7 +810,7 @@ int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
* NB if 'append' != NULL, it's a single character to append to the
* copied out string - usually "\n" or "" (i.e. a terminating zero byte)
*/
- int nob = strlen(knl_buffer);
+ int nob = strlen(knl_buffer);
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
@@ -843,16 +843,16 @@ int cfs_trace_allocate_string_buffer(char **str, int nob)
int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
{
- char *str;
- int rc;
+ char *str;
+ int rc;
rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc != 0)
+ if (rc)
return rc;
rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
usr_str, usr_str_nob);
- if (rc != 0)
+ if (rc)
goto out;
if (str[0] != '/') {
@@ -867,17 +867,17 @@ out:
int cfs_trace_daemon_command(char *str)
{
- int rc = 0;
+ int rc = 0;
cfs_tracefile_write_lock();
- if (strcmp(str, "stop") == 0) {
+ if (!strcmp(str, "stop")) {
cfs_tracefile_write_unlock();
cfs_trace_stop_thread();
cfs_tracefile_write_lock();
memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
- } else if (strncmp(str, "size=", 5) == 0) {
+ } else if (!strncmp(str, "size=", 5)) {
unsigned long tmp;
rc = kstrtoul(str + 5, 10, &tmp);
@@ -909,15 +909,15 @@ int cfs_trace_daemon_command(char *str)
int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
{
char *str;
- int rc;
+ int rc;
rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc != 0)
+ if (rc)
return rc;
rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
usr_str, usr_str_nob);
- if (rc == 0)
+ if (!rc)
rc = cfs_trace_daemon_command(str);
kfree(str);
@@ -1003,7 +1003,7 @@ static int tracefiled(void *arg)
filp = NULL;
cfs_tracefile_read_lock();
- if (cfs_tracefile[0] != 0) {
+ if (cfs_tracefile[0]) {
filp = filp_open(cfs_tracefile,
O_CREAT | O_RDWR | O_LARGEFILE,
0600);
@@ -1072,7 +1072,7 @@ static int tracefiled(void *arg)
__LASSERT(list_empty(&pc.pc_pages));
end_loop:
if (atomic_read(&tctl->tctl_shutdown)) {
- if (last_loop == 0) {
+ if (!last_loop) {
last_loop = 1;
continue;
} else {
@@ -1135,13 +1135,13 @@ void cfs_trace_stop_thread(void)
int cfs_tracefile_init(int max_pages)
{
struct cfs_trace_cpu_data *tcd;
- int i;
- int j;
- int rc;
- int factor;
+ int i;
+ int j;
+ int rc;
+ int factor;
rc = cfs_tracefile_init_arch();
- if (rc != 0)
+ if (rc)
return rc;
cfs_tcd_for_each(tcd, i, j) {
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index d878676bc375..f644cbc5a277 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -45,7 +45,7 @@ enum cfs_trace_buf_type {
/* trace file lock routines */
#define TRACEFILE_NAME_SIZE 1024
-extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
+extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
extern long long cfs_tracefile_size;
void libcfs_run_debug_log_upcall(char *file);
@@ -80,7 +80,7 @@ int cfs_trace_get_debug_mb(void);
void libcfs_debug_dumplog_internal(void *arg);
void libcfs_register_panic_notifier(void);
void libcfs_unregister_panic_notifier(void);
-extern int libcfs_panic_in_progress;
+extern int libcfs_panic_in_progress;
int cfs_trace_max_debug_mb(void);
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
@@ -113,14 +113,14 @@ union cfs_trace_data_union {
* tcd_for_each_type_lock
*/
spinlock_t tcd_lock;
- unsigned long tcd_lock_flags;
+ unsigned long tcd_lock_flags;
/*
* pages with trace records not yet processed by tracefiled.
*/
- struct list_head tcd_pages;
+ struct list_head tcd_pages;
/* number of pages on ->tcd_pages */
- unsigned long tcd_cur_pages;
+ unsigned long tcd_cur_pages;
/*
* pages with trace records already processed by
@@ -132,9 +132,9 @@ union cfs_trace_data_union {
* (put_pages_on_daemon_list()). LRU pages from this list are
* discarded when list grows too large.
*/
- struct list_head tcd_daemon_pages;
+ struct list_head tcd_daemon_pages;
/* number of pages on ->tcd_daemon_pages */
- unsigned long tcd_cur_daemon_pages;
+ unsigned long tcd_cur_daemon_pages;
/*
* Maximal number of pages allowed on ->tcd_pages and
@@ -142,7 +142,7 @@ union cfs_trace_data_union {
* Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
* implementation.
*/
- unsigned long tcd_max_pages;
+ unsigned long tcd_max_pages;
/*
* preallocated pages to write trace records into. Pages from
@@ -166,15 +166,15 @@ union cfs_trace_data_union {
* TCD_STOCK_PAGES pagesful are consumed by trace records all
* emitted in non-blocking contexts. Which is quite unlikely.
*/
- struct list_head tcd_stock_pages;
+ struct list_head tcd_stock_pages;
/* number of pages on ->tcd_stock_pages */
- unsigned long tcd_cur_stock_pages;
+ unsigned long tcd_cur_stock_pages;
- unsigned short tcd_shutting_down;
- unsigned short tcd_cpu;
- unsigned short tcd_type;
+ unsigned short tcd_shutting_down;
+ unsigned short tcd_cpu;
+ unsigned short tcd_type;
/* The factors to share debug memory. */
- unsigned short tcd_pages_factor;
+ unsigned short tcd_pages_factor;
} tcd;
char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
};
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index e98c818a14fb..d0512da6bcde 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -45,7 +45,7 @@ struct cfs_wi_sched {
/* chain on global list */
struct list_head ws_list;
/** serialised workitems */
- spinlock_t ws_lock;
+ spinlock_t ws_lock;
/** where schedulers sleep */
wait_queue_head_t ws_waitq;
/** concurrent workitems */
@@ -59,26 +59,26 @@ struct cfs_wi_sched {
*/
struct list_head ws_rerunq;
/** CPT-table for this scheduler */
- struct cfs_cpt_table *ws_cptab;
+ struct cfs_cpt_table *ws_cptab;
/** CPT id for affinity */
- int ws_cpt;
+ int ws_cpt;
/** number of scheduled workitems */
- int ws_nscheduled;
+ int ws_nscheduled;
/** started scheduler thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_nthreads:30;
+ unsigned int ws_nthreads:30;
/** shutting down, protected by cfs_wi_data::wi_glock */
- unsigned int ws_stopping:1;
+ unsigned int ws_stopping:1;
/** serialize starting thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_starting:1;
+ unsigned int ws_starting:1;
/** scheduler name */
- char ws_name[CFS_WS_NAME_LEN];
+ char ws_name[CFS_WS_NAME_LEN];
};
static struct cfs_workitem_data {
/** serialize */
spinlock_t wi_glock;
/** list of all schedulers */
- struct list_head wi_scheds;
+ struct list_head wi_scheds;
/** WI module is initialized */
int wi_init;
/** shutting down the whole WI module */
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
int
cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
- int rc;
+ int rc;
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -202,13 +202,13 @@ EXPORT_SYMBOL(cfs_wi_schedule);
static int cfs_wi_scheduler(void *arg)
{
- struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
+ struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
cfs_block_allsigs();
/* CPT affinity scheduler? */
if (sched->ws_cptab)
- if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
+ if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt))
CWARN("Failed to bind %s on CPT %d\n",
sched->ws_name, sched->ws_cpt);
@@ -223,8 +223,8 @@ static int cfs_wi_scheduler(void *arg)
spin_lock(&sched->ws_lock);
while (!sched->ws_stopping) {
- int nloops = 0;
- int rc;
+ int nloops = 0;
+ int rc;
struct cfs_workitem *wi;
while (!list_empty(&sched->ws_runq) &&
@@ -238,16 +238,16 @@ static int cfs_wi_scheduler(void *arg)
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
- wi->wi_running = 1;
+ wi->wi_running = 1;
wi->wi_scheduled = 0;
spin_unlock(&sched->ws_lock);
nloops++;
- rc = (*wi->wi_action) (wi);
+ rc = (*wi->wi_action)(wi);
spin_lock(&sched->ws_lock);
- if (rc != 0) /* WI should be dead, even be freed! */
+ if (rc) /* WI should be dead, even be freed! */
continue;
wi->wi_running = 0;
@@ -273,7 +273,7 @@ static int cfs_wi_scheduler(void *arg)
spin_unlock(&sched->ws_lock);
rc = wait_event_interruptible_exclusive(sched->ws_waitq,
- !cfs_wi_sched_cansleep(sched));
+ !cfs_wi_sched_cansleep(sched));
spin_lock(&sched->ws_lock);
}
@@ -289,7 +289,7 @@ static int cfs_wi_scheduler(void *arg)
void
cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
{
- int i;
+ int i;
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
@@ -325,7 +325,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
list_del(&sched->ws_list);
spin_unlock(&cfs_wi_data.wi_glock);
- LASSERT(sched->ws_nscheduled == 0);
+ LASSERT(!sched->ws_nscheduled);
LIBCFS_FREE(sched, sizeof(*sched));
}
@@ -335,8 +335,8 @@ int
cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
{
- struct cfs_wi_sched *sched;
- int rc;
+ struct cfs_wi_sched *sched;
+ int rc;
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
@@ -364,7 +364,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
rc = 0;
while (nthrs > 0) {
- char name[16];
+ char name[16];
struct task_struct *task;
spin_lock(&cfs_wi_data.wi_glock);
@@ -431,7 +431,7 @@ cfs_wi_startup(void)
void
cfs_wi_shutdown(void)
{
- struct cfs_wi_sched *sched;
+ struct cfs_wi_sched *sched;
struct cfs_wi_sched *temp;
spin_lock(&cfs_wi_data.wi_glock);
@@ -447,7 +447,7 @@ cfs_wi_shutdown(void)
list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
spin_lock(&cfs_wi_data.wi_glock);
- while (sched->ws_nthreads != 0) {
+ while (sched->ws_nthreads) {
spin_unlock(&cfs_wi_data.wi_glock);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 20);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 4daf828198c3..b2ba10d59f84 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1551,16 +1551,16 @@ LNetNIInit(lnet_pid_t requested_pid)
rc = lnet_check_routes();
if (rc)
- goto err_destory_routes;
+ goto err_destroy_routes;
rc = lnet_rtrpools_alloc(im_a_router);
if (rc)
- goto err_destory_routes;
+ goto err_destroy_routes;
}
rc = lnet_acceptor_start();
if (rc)
- goto err_destory_routes;
+ goto err_destroy_routes;
the_lnet.ln_refcount = 1;
/* Now I may use my own API functions... */
@@ -1587,7 +1587,7 @@ err_stop_ping:
err_acceptor_stop:
the_lnet.ln_refcount = 0;
lnet_acceptor_stop();
-err_destory_routes:
+err_destroy_routes:
if (!the_lnet.ln_nis_from_mod_params)
lnet_destroy_routes();
err_shutdown_lndnis:
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index b430046dc294..eb796a86e6ab 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -271,21 +271,3 @@ lnet_me_unlink(lnet_me_t *me)
lnet_res_lh_invalidate(&me->me_lh);
lnet_me_free(me);
}
-
-#if 0
-static void
-lib_me_dump(lnet_me_t *me)
-{
- CWARN("Match Entry %p (%#llx)\n", me,
- me->me_lh.lh_cookie);
-
- CWARN("\tMatch/Ignore\t= %016lx / %016lx\n",
- me->me_match_bits, me->me_ignore_bits);
-
- CWARN("\tMD\t= %p\n", me->md);
- CWARN("\tprev\t= %p\n",
- list_entry(me->me_list.prev, lnet_me_t, me_list));
- CWARN("\tnext\t= %p\n",
- list_entry(me->me_list.next, lnet_me_t, me_list));
-}
-#endif
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 48e6f8f2392f..f3dd6e42f4d4 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -192,6 +192,7 @@ lnet_copy_iov2iter(struct iov_iter *to,
left = siov->iov_len - soffset;
do {
size_t n, copy = left;
+
LASSERT(nsiov > 0);
if (copy > nob)
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index a6d7a6159b8f..a9fe3e69daae 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -193,7 +193,7 @@ add_nidrange(const struct cfs_lstr *src,
struct netstrfns *nf;
struct nidrange *nr;
int endlen;
- unsigned netnum;
+ unsigned int netnum;
if (src->ls_len >= LNET_NIDSTR_SIZE)
return NULL;
@@ -247,10 +247,8 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
{
struct cfs_lstr addrrange;
struct cfs_lstr net;
- struct cfs_lstr tmp;
struct nidrange *nr;
- tmp = *src;
if (!cfs_gettok(src, '@', &addrrange))
goto failed;
@@ -1156,7 +1154,7 @@ EXPORT_SYMBOL(libcfs_nid2str_r);
static struct netstrfns *
libcfs_str2net_internal(const char *str, __u32 *net)
{
- struct netstrfns *uninitialized_var(nf);
+ struct netstrfns *nf = NULL;
int nob;
unsigned int netnum;
int i;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 063ad55ec950..8afa0abf15cd 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -903,6 +903,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
{
lnet_rc_data_t *rcd = NULL;
lnet_ping_info_t *pi;
+ lnet_md_t md;
int rc;
int i;
@@ -925,15 +926,15 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
}
rcd->rcd_pinginfo = pi;
+ md.start = pi;
+ md.user_ptr = rcd;
+ md.length = LNET_PINGINFO_SIZE;
+ md.threshold = LNET_MD_THRESH_INF;
+ md.options = LNET_MD_TRUNCATE;
+ md.eq_handle = the_lnet.ln_rc_eqh;
+
LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
- rc = LNetMDBind((lnet_md_t){.start = pi,
- .user_ptr = rcd,
- .length = LNET_PINGINFO_SIZE,
- .threshold = LNET_MD_THRESH_INF,
- .options = LNET_MD_TRUNCATE,
- .eq_handle = the_lnet.ln_rc_eqh},
- LNET_UNLINK,
- &rcd->rcd_mdh);
+ rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh);
if (rc < 0) {
CERROR("Can't bind MD: %d\n", rc);
goto out;
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index b20c5d394e3b..67b460f41d6e 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -44,6 +44,10 @@ static int brw_inject_errors;
module_param(brw_inject_errors, int, 0644);
MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
+#define BRW_POISON 0xbeefbeefbeefbeefULL
+#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
+#define BRW_MSIZE sizeof(u64)
+
static void
brw_client_fini(struct sfw_test_instance *tsi)
{
@@ -67,6 +71,7 @@ brw_client_init(struct sfw_test_instance *tsi)
{
struct sfw_session *sn = tsi->tsi_batch->bat_session;
int flags;
+ int off;
int npg;
int len;
int opc;
@@ -87,6 +92,7 @@ brw_client_init(struct sfw_test_instance *tsi)
* but we have to keep it for compatibility
*/
len = npg * PAGE_SIZE;
+ off = 0;
} else {
struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
@@ -99,9 +105,13 @@ brw_client_init(struct sfw_test_instance *tsi)
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ off = breq->blk_offset & ~PAGE_MASK;
+ npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
+ if (off % BRW_MSIZE)
+ return -EINVAL;
+
if (npg > LNET_MAX_IOV || npg <= 0)
return -EINVAL;
@@ -114,7 +124,7 @@ brw_client_init(struct sfw_test_instance *tsi)
list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
- npg, len, opc == LST_BRW_READ);
+ off, npg, len, opc == LST_BRW_READ);
if (!bulk) {
brw_client_fini(tsi);
return -ENOMEM;
@@ -126,12 +136,7 @@ brw_client_init(struct sfw_test_instance *tsi)
return 0;
}
-#define BRW_POISON 0xbeefbeefbeefbeefULL
-#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
-#define BRW_MSIZE sizeof(__u64)
-
-static int
-brw_inject_one_error(void)
+int brw_inject_one_error(void)
{
struct timespec64 ts;
@@ -147,12 +152,13 @@ brw_inject_one_error(void)
}
static void
-brw_fill_page(struct page *pg, int pattern, __u64 magic)
+brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic)
{
- char *addr = page_address(pg);
+ char *addr = page_address(pg) + off;
int i;
LASSERT(addr);
+ LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
if (pattern == LST_BRW_CHECK_NONE)
return;
@@ -162,14 +168,16 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
- addr += PAGE_SIZE - BRW_MSIZE;
- memcpy(addr, &magic, BRW_MSIZE);
+ if (len > BRW_MSIZE) {
+ addr += PAGE_SIZE - BRW_MSIZE;
+ memcpy(addr, &magic, BRW_MSIZE);
+ }
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
- memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
+ for (i = 0; i < len; i += BRW_MSIZE)
+ memcpy(addr + i, &magic, BRW_MSIZE);
return;
}
@@ -177,13 +185,14 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
}
static int
-brw_check_page(struct page *pg, int pattern, __u64 magic)
+brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic)
{
- char *addr = page_address(pg);
+ char *addr = page_address(pg) + off;
__u64 data = 0; /* make compiler happy */
int i;
LASSERT(addr);
+ LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
if (pattern == LST_BRW_CHECK_NONE)
return 0;
@@ -193,21 +202,21 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
if (data != magic)
goto bad_data;
- addr += PAGE_SIZE - BRW_MSIZE;
- data = *((__u64 *)addr);
- if (data != magic)
- goto bad_data;
-
+ if (len > BRW_MSIZE) {
+ addr += PAGE_SIZE - BRW_MSIZE;
+ data = *((__u64 *)addr);
+ if (data != magic)
+ goto bad_data;
+ }
return 0;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
- data = *(((__u64 *)addr) + i);
+ for (i = 0; i < len; i += BRW_MSIZE) {
+ data = *(u64 *)(addr + i);
if (data != magic)
goto bad_data;
}
-
return 0;
}
@@ -226,8 +235,12 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
+ int off, len;
+
pg = bk->bk_iovs[i].bv_page;
- brw_fill_page(pg, pattern, magic);
+ off = bk->bk_iovs[i].bv_offset;
+ len = bk->bk_iovs[i].bv_len;
+ brw_fill_page(pg, off, len, pattern, magic);
}
}
@@ -238,8 +251,12 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
+ int off, len;
+
pg = bk->bk_iovs[i].bv_page;
- if (brw_check_page(pg, pattern, magic)) {
+ off = bk->bk_iovs[i].bv_offset;
+ len = bk->bk_iovs[i].bv_len;
+ if (brw_check_page(pg, off, len, pattern, magic)) {
CERROR("Bulk page %p (%d/%d) is corrupted!\n",
pg, i, bk->bk_niov);
return 1;
@@ -276,6 +293,7 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
len = npg * PAGE_SIZE;
} else {
struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
+ int off;
/*
* I should never get this step if it's unknown feature
@@ -286,7 +304,8 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ off = breq->blk_offset;
+ npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index b786f8b4a73d..94383023c1be 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -315,7 +315,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
static int
lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
{
- unsigned feats;
+ unsigned int feats;
int rc;
char *name;
@@ -742,6 +742,10 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
PAGE_SIZE - sizeof(struct lstcon_test)))
return -EINVAL;
+ /* Enforce zero parameter length if there's no parameter */
+ if (!args->lstio_tes_param && args->lstio_tes_param_len)
+ return -EINVAL;
+
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
if (!batch_name)
return rc;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 55afb53b0743..994422c62487 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -86,8 +86,9 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc)
}
static int
-lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
+lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
+ int bulk_npg, int bulk_len, int embedded,
+ struct lstcon_rpc *crpc)
{
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
@@ -111,7 +112,7 @@ lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
}
static int
-lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
+lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats,
int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
{
struct lstcon_rpc *crpc = NULL;
@@ -292,8 +293,8 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
spin_lock(&rpc->crpc_lock);
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp) { /* rpc done or aborted already */
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = -EINTR;
@@ -589,7 +590,7 @@ lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
int
lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned feats, struct lstcon_rpc **crpc)
+ unsigned int feats, struct lstcon_rpc **crpc)
{
struct srpc_mksn_reqst *msrq;
struct srpc_rmsn_reqst *rsrq;
@@ -627,7 +628,8 @@ lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
}
int
-lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
+lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned int feats,
+ struct lstcon_rpc **crpc)
{
struct srpc_debug_reqst *drq;
int rc;
@@ -645,7 +647,7 @@ lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **c
}
int
-lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
{
struct lstcon_batch *batch;
@@ -678,7 +680,8 @@ lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
}
int
-lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
+lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats,
+ struct lstcon_rpc **crpc)
{
struct srpc_stat_reqst *srq;
int rc;
@@ -776,7 +779,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
}
static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param,
+ struct srpc_test_reqst *req)
{
struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
@@ -789,20 +793,21 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req
}
static int
-lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, bool is_client,
+ struct srpc_test_reqst *req)
{
struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
brq->blk_opc = param->blk_opc;
brq->blk_flags = param->blk_flags;
brq->blk_len = param->blk_size;
- brq->blk_offset = 0; /* reserved */
+ brq->blk_offset = is_client ? param->blk_cli_off : param->blk_srv_off;
return 0;
}
int
-lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
struct lstcon_test *test, struct lstcon_rpc **crpc)
{
struct lstcon_group *sgrp = test->tes_src_grp;
@@ -897,7 +902,8 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
&test->tes_param[0], trq);
} else {
rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *)
- &test->tes_param[0], trq);
+ &test->tes_param[0],
+ trq->tsr_is_client, trq);
}
break;
@@ -1084,7 +1090,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct lstcon_ndlink *ndl;
struct lstcon_node *nd;
struct lstcon_rpc *rpc;
- unsigned feats;
+ unsigned int feats;
int rc;
/* Creating session RPG for list of nodes */
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 7ec6fc96959e..e629e87c461c 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -78,8 +78,8 @@ struct lstcon_rpc_trans {
struct list_head tas_olink; /* link chain on owner list */
struct list_head tas_link; /* link chain on global list */
int tas_opc; /* operation code of transaction */
- unsigned tas_feats_updated; /* features mask is uptodate */
- unsigned tas_features; /* test features mask */
+ unsigned int tas_feats_updated; /* features mask is uptodate */
+ unsigned int tas_features; /* test features mask */
wait_queue_head_t tas_waitq; /* wait queue head */
atomic_t tas_remaining; /* # of un-scheduled rpcs */
struct list_head tas_rpcs_list; /* queued requests */
@@ -106,14 +106,16 @@ typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
lstcon_rpc_ent_t __user *);
int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned version, struct lstcon_rpc **crpc);
+ unsigned int version, struct lstcon_rpc **crpc);
int lstcon_dbgrpc_prep(struct lstcon_node *nd,
- unsigned version, struct lstcon_rpc **crpc);
-int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
-int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_test *test, struct lstcon_rpc **crpc);
-int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
+ unsigned int version, struct lstcon_rpc **crpc);
+int lstcon_batrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned int version, struct lstcon_tsb_hdr *tsb,
+ struct lstcon_rpc **crpc);
+int lstcon_testrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned int version, struct lstcon_test *test,
+ struct lstcon_rpc **crpc);
+int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int version,
struct lstcon_rpc **crpc);
void lstcon_rpc_put(struct lstcon_rpc *crpc);
int lstcon_rpc_trans_prep(struct list_head *translist,
@@ -129,7 +131,8 @@ int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
lstcon_rpc_readent_func_t readent);
void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
-void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans,
+ struct lstcon_rpc *req);
int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
int lstcon_rpc_pinger_start(void);
void lstcon_rpc_pinger_stop(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index a0fcbf3bcc95..1456d2395cc9 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -86,7 +86,7 @@ lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
if (!create)
return -ENOENT;
- LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+ LIBCFS_ALLOC(*ndpp, sizeof(**ndpp) + sizeof(*ndl));
if (!*ndpp)
return -ENOMEM;
@@ -131,12 +131,12 @@ lstcon_node_put(struct lstcon_node *nd)
list_del(&ndl->ndl_link);
list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+ LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl));
}
static int
-lstcon_ndlink_find(struct list_head *hash,
- lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
+lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id,
+ struct lstcon_ndlink **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
struct lstcon_ndlink *ndl;
@@ -230,7 +230,8 @@ lstcon_group_addref(struct lstcon_group *grp)
grp->grp_ref++;
}
-static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
+static void lstcon_group_ndlink_release(struct lstcon_group *,
+ struct lstcon_ndlink *);
static void
lstcon_group_drain(struct lstcon_group *grp, int keep)
@@ -397,7 +398,8 @@ lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
static int
lstcon_group_nodes_add(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
- unsigned *featp, struct list_head __user *result_up)
+ unsigned int *featp,
+ struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_ndlink *ndl;
@@ -542,7 +544,8 @@ lstcon_group_add(char *name)
int
lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
- unsigned *featp, struct list_head __user *result_up)
+ unsigned int *featp,
+ struct list_head __user *result_up)
{
struct lstcon_group *grp;
int rc;
@@ -820,7 +823,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
lstcon_group_decref(grp);
- return 0;
+ return rc;
}
static int
@@ -1181,7 +1184,8 @@ lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
}
static int
-lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test,
+ struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_group *grp;
@@ -1364,7 +1368,8 @@ out:
}
static int
-lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx,
+ struct lstcon_test **testpp)
{
struct lstcon_test *test;
@@ -1702,7 +1707,7 @@ lstcon_new_session_id(lst_sid_t *sid)
}
int
-lstcon_session_new(char *name, int key, unsigned feats,
+lstcon_session_new(char *name, int key, unsigned int feats,
int timeout, int force, lst_sid_t __user *sid_up)
{
int rc = 0;
@@ -1868,7 +1873,7 @@ lstcon_session_end(void)
}
int
-lstcon_session_feats_check(unsigned feats)
+lstcon_session_feats_check(unsigned int feats)
{
int rc = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 78388a611c22..5dc1de48a10e 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -92,14 +92,16 @@ struct lstcon_batch {
int bat_ntest; /* # of test */
int bat_state; /* state of the batch */
int bat_arg; /* parameter for run|stop, timeout
- * for run, force for stop */
+ * for run, force for stop
+ */
char bat_name[LST_NAME_SIZE];/* name of batch */
struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
*/
struct list_head bat_trans_list; /* list head of transaction */
struct list_head bat_cli_list; /* list head of client nodes
- * (struct lstcon_node) */
+ * (struct lstcon_node)
+ */
struct list_head *bat_cli_hash; /* hash table of client nodes */
struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
@@ -144,13 +146,14 @@ struct lstcon_session {
int ses_timeout; /* timeout in seconds */
time64_t ses_laststamp; /* last operation stamp (seconds)
*/
- unsigned ses_features; /* tests features of the session
+ unsigned int ses_features; /* tests features of the session
*/
- unsigned ses_feats_updated:1; /* features are synced with
- * remote test nodes */
- unsigned ses_force:1; /* force creating */
- unsigned ses_shutdown:1; /* session is shutting down */
- unsigned ses_expired:1; /* console is timedout */
+ unsigned int ses_feats_updated:1; /* features are synced with
+ * remote test nodes
+ */
+ unsigned int ses_force:1; /* force creating */
+ unsigned int ses_shutdown:1; /* session is shutting down */
+ unsigned int ses_expired:1; /* console is timedout */
__u64 ses_id_cookie; /* batch id cookie */
char ses_name[LST_NAME_SIZE];/* session name */
struct lstcon_rpc_trans *ses_ping; /* session pinger */
@@ -188,14 +191,14 @@ int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
int lstcon_console_init(void);
int lstcon_console_fini(void);
int lstcon_session_match(lst_sid_t sid);
-int lstcon_session_new(char *name, int key, unsigned version,
+int lstcon_session_new(char *name, int key, unsigned int version,
int timeout, int flags, lst_sid_t __user *sid_up);
int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key,
unsigned __user *verp, lstcon_ndlist_ent_t __user *entp,
char __user *name_up, int len);
int lstcon_session_end(void);
int lstcon_session_debug(int timeout, struct list_head __user *result_up);
-int lstcon_session_feats_check(unsigned feats);
+int lstcon_session_feats_check(unsigned int feats);
int lstcon_batch_debug(int timeout, char *name,
int client, struct list_head __user *result_up);
int lstcon_group_debug(int timeout, char *name,
@@ -207,7 +210,7 @@ int lstcon_group_del(char *name);
int lstcon_group_clean(char *name, int args);
int lstcon_group_refresh(char *name, struct list_head __user *result_up);
int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up,
- unsigned *featp, struct list_head __user *result_up);
+ unsigned int *featp, struct list_head __user *result_up);
int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up,
struct list_head __user *result_up);
int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up,
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index abbd6287b4bd..48dcc330dc9b 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -131,7 +131,8 @@ sfw_find_test_case(int id)
}
static int
-sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
+sfw_register_test(struct srpc_service *service,
+ struct sfw_test_client_ops *cliops)
{
struct sfw_test_case *tsc;
@@ -254,7 +255,7 @@ sfw_session_expired(void *data)
static inline void
sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
- unsigned features, const char *name)
+ unsigned int features, const char *name)
{
struct stt_timer *timer = &sn->sn_timer;
@@ -469,7 +470,8 @@ sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
}
static int
-sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request,
+ struct srpc_rmsn_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
@@ -501,7 +503,8 @@ sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *repl
}
static int
-sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
+sfw_debug_session(struct srpc_debug_reqst *request,
+ struct srpc_debug_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
@@ -897,7 +900,7 @@ sfw_test_rpc_done(struct srpc_client_rpc *rpc)
int
sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
- unsigned features, int nblk, int blklen,
+ unsigned int features, int nblk, int blklen,
struct srpc_client_rpc **rpcpp)
{
struct srpc_client_rpc *rpc = NULL;
@@ -1064,7 +1067,8 @@ sfw_stop_batch(struct sfw_batch *tsb, int force)
}
static int
-sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx,
+ struct srpc_batch_reply *reply)
{
struct sfw_test_instance *tsi;
@@ -1101,7 +1105,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
LASSERT(!rpc->srpc_bulk);
LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
- rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
+ rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink);
if (!rpc->srpc_bulk)
return -ENOMEM;
@@ -1179,7 +1183,8 @@ sfw_add_test(struct srpc_server_rpc *rpc)
}
static int
-sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
+sfw_control_batch(struct srpc_batch_reqst *request,
+ struct srpc_batch_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
int rc = 0;
@@ -1225,7 +1230,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
struct srpc_msg *reply = &rpc->srpc_replymsg;
struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
- unsigned features = LST_FEATS_MASK;
+ unsigned int features = LST_FEATS_MASK;
int rc = 0;
LASSERT(!sfw_data.fw_active_srpc);
@@ -1375,7 +1380,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
- unsigned features, int nbulkiov, int bulklen,
+ unsigned int features, int nbulkiov, int bulklen,
void (*done)(struct srpc_client_rpc *), void *priv)
{
struct srpc_client_rpc *rpc = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 9331ca4e3606..b9601b00a273 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -159,8 +159,8 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
ktime_get_real_ts64(&ts);
CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
- (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
- (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
+ (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
+ (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
}
static int
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index f5619d8744ef..ce9de8c9be57 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -84,14 +84,13 @@ void srpc_set_counters(const srpc_counters_t *cnt)
}
static int
-srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
+srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
+ int nob)
{
- nob = min_t(int, nob, PAGE_SIZE);
+ LASSERT(off < PAGE_SIZE);
+ LASSERT(nob > 0 && nob <= PAGE_SIZE);
- LASSERT(nob > 0);
- LASSERT(i >= 0 && i < bk->bk_niov);
-
- bk->bk_iovs[i].bv_offset = 0;
+ bk->bk_iovs[i].bv_offset = off;
bk->bk_iovs[i].bv_page = pg;
bk->bk_iovs[i].bv_len = nob;
return nob;
@@ -117,7 +116,8 @@ srpc_free_bulk(struct srpc_bulk *bk)
}
struct srpc_bulk *
-srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
+srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
+ unsigned int bulk_len, int sink)
{
struct srpc_bulk *bk;
int i;
@@ -148,8 +148,11 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
return NULL;
}
- nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
+ nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) -
+ bulk_off;
+ srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
bulk_len -= nob;
+ bulk_off = 0;
}
return bk;
@@ -693,7 +696,8 @@ srpc_finish_service(struct srpc_service *sv)
/* called with sv->sv_lock held */
static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd,
+ struct srpc_buffer *buf)
__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 4ab2ee264004..f353a634cc8e 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -113,7 +113,8 @@ struct srpc_join_reply {
__u32 join_status; /* returned status */
lst_sid_t join_sid; /* session id */
__u32 join_timeout; /* # seconds' inactivity to
- * expire */
+ * expire
+ */
char join_session[LST_NAME_SIZE]; /* session name */
} WIRE_ATTR;
@@ -175,7 +176,7 @@ struct test_bulk_req_v1 {
__u16 blk_opc; /* bulk operation code */
__u16 blk_flags; /* data check flags */
__u32 blk_len; /* data length */
- __u32 blk_offset; /* reserved: offset */
+ __u32 blk_offset; /* offset */
} WIRE_ATTR;
struct test_ping_req {
@@ -190,7 +191,8 @@ struct srpc_test_reqst {
lst_bid_t tsr_bid; /* batch id */
__u32 tsr_service; /* test type: bulk|ping|... */
__u32 tsr_loop; /* test client loop count or
- * # server buffers needed */
+ * # server buffers needed
+ */
__u32 tsr_concur; /* concurrency of test */
__u8 tsr_is_client; /* is test client or not */
__u8 tsr_stop_onerr; /* stop on error */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index d033ac03d953..c8833a016b6d 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -131,7 +131,8 @@ srpc_service2reply(int service)
enum srpc_event_type {
SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
- * received */
+ * received
+ */
SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */
SRPC_REPLY_RCVD = 4, /* incoming reply received */
@@ -295,7 +296,8 @@ struct srpc_service_cd {
#define SFW_TEST_WI_MIN 256
#define SFW_TEST_WI_MAX 2048
/* extra buffers for tolerating buggy peers, or unbalanced number
- * of peers between partitions */
+ * of peers between partitions
+ */
#define SFW_TEST_WI_EXTRA 64
/* number of server workitems (mini-thread) for framework service */
@@ -347,9 +349,11 @@ struct sfw_batch {
struct sfw_test_client_ops {
int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
- * client */
+ * client
+ */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
- * client */
+ * client
+ */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
struct srpc_client_rpc **rpc); /* prep a tests rpc */
@@ -374,7 +378,8 @@ struct sfw_test_instance {
spinlock_t tsi_lock; /* serialize */
unsigned int tsi_stopping:1; /* test is stopping */
atomic_t tsi_nactive; /* # of active test
- * unit */
+ * unit
+ */
struct list_head tsi_units; /* test units */
struct list_head tsi_free_rpcs; /* free rpcs */
struct list_head tsi_active_rpcs; /* active rpcs */
@@ -386,8 +391,10 @@ struct sfw_test_instance {
} tsi_u;
};
-/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
- * pages are not used */
+/*
+ * XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used
+ */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
@@ -410,10 +417,10 @@ struct sfw_test_case {
struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
- unsigned features, int nbulkiov, int bulklen,
+ unsigned int features, int nbulkiov, int bulklen,
void (*done)(struct srpc_client_rpc *), void *priv);
int sfw_create_test_rpc(struct sfw_test_unit *tsu,
- lnet_process_id_t peer, unsigned features,
+ lnet_process_id_t peer, unsigned int features,
int nblk, int blklen, struct srpc_client_rpc **rpc);
void sfw_abort_rpc(struct srpc_client_rpc *rpc);
void sfw_post_rpc(struct srpc_client_rpc *rpc);
@@ -434,8 +441,9 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
void srpc_post_rpc(struct srpc_client_rpc *rpc);
void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
void srpc_free_bulk(struct srpc_bulk *bk);
-struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
- unsigned bulk_len, int sink);
+struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off,
+ unsigned int bulk_npg, unsigned int bulk_len,
+ int sink);
int srpc_send_rpc(struct swi_workitem *wi);
int srpc_send_reply(struct srpc_server_rpc *rpc);
int srpc_add_service(struct srpc_service *sv);
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index dcd22580b1f0..2fe692df19d0 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -46,16 +46,17 @@
* to cover a time period of 1024 seconds into the future before wrapping.
*/
#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
-#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
+#define STTIMER_SLOTTIME BIT(STTIMER_MINPOLL)
#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
-#define STTIMER_NSLOTS (1 << 7)
+#define STTIMER_NSLOTS BIT(7)
#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
(STTIMER_NSLOTS - 1))])
static struct st_timer_data {
spinlock_t stt_lock;
unsigned long stt_prev_slot; /* start time of the slot processed
- * previously */
+ * previously
+ */
struct list_head stt_hash[STTIMER_NSLOTS];
int stt_shuttingdown;
wait_queue_head_t stt_waitq;
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index edd72b926f81..999f250ceed0 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -74,7 +74,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
/* Zero out input range, this is not recovery yet. */
in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
- range_init(in);
+ lu_seq_range_init(in);
ptlrpc_request_set_replen(req);
@@ -112,25 +112,21 @@ static int seq_client_rpc(struct lu_client_seq *seq,
ptlrpc_at_set_req_timeout(req);
- if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
rc = ptlrpc_queue_wait(req);
- if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
*output = *out;
- if (!range_is_sane(output)) {
+ if (!lu_seq_range_is_sane(output)) {
CERROR("%s: Invalid range received from server: "
DRANGE "\n", seq->lcs_name, PRANGE(output));
rc = -EINVAL;
goto out_req;
}
- if (range_is_exhausted(output)) {
+ if (lu_seq_range_is_exhausted(output)) {
CERROR("%s: Range received from server is exhausted: "
DRANGE "]\n", seq->lcs_name, PRANGE(output));
rc = -EINVAL;
@@ -170,9 +166,9 @@ static int seq_client_alloc_seq(const struct lu_env *env,
{
int rc;
- LASSERT(range_is_sane(&seq->lcs_space));
+ LASSERT(lu_seq_range_is_sane(&seq->lcs_space));
- if (range_is_exhausted(&seq->lcs_space)) {
+ if (lu_seq_range_is_exhausted(&seq->lcs_space)) {
rc = seq_client_alloc_meta(env, seq);
if (rc) {
CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
@@ -185,7 +181,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
rc = 0;
}
- LASSERT(!range_is_exhausted(&seq->lcs_space));
+ LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space));
*seqnr = seq->lcs_space.lsr_start;
seq->lcs_space.lsr_start += 1;
@@ -320,7 +316,7 @@ void seq_client_flush(struct lu_client_seq *seq)
seq->lcs_space.lsr_index = -1;
- range_init(&seq->lcs_space);
+ lu_seq_range_init(&seq->lcs_space);
mutex_unlock(&seq->lcs_mutex);
}
EXPORT_SYMBOL(seq_client_flush);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 3ed32d77f38b..97d4849c7199 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -83,7 +83,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count,
(unsigned long long *)&tmp.lsr_end);
if (rc != 2)
return -EINVAL;
- if (!range_is_sane(&tmp) || range_is_zero(&tmp) ||
+ if (!lu_seq_range_is_sane(&tmp) || lu_seq_range_is_zero(&tmp) ||
tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
return -EINVAL;
*range = tmp;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 0100a935f4ff..11f697496180 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -143,7 +143,7 @@ restart_fixup:
c_range = &f_curr->fce_range;
n_range = &f_next->fce_range;
- LASSERT(range_is_sane(c_range));
+ LASSERT(lu_seq_range_is_sane(c_range));
if (&f_next->fce_list == head)
break;
@@ -358,7 +358,7 @@ struct fld_cache_entry
{
struct fld_cache_entry *f_new;
- LASSERT(range_is_sane(range));
+ LASSERT(lu_seq_range_is_sane(range));
f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
if (!f_new)
@@ -503,7 +503,7 @@ int fld_cache_lookup(struct fld_cache *cache,
}
prev = flde;
- if (range_within(&flde->fce_range, seq)) {
+ if (lu_seq_range_within(&flde->fce_range, seq)) {
*range = flde->fce_range;
cache->fci_stat.fst_cache++;
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 08eaec735d6f..4a7f0b71c48d 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -62,11 +62,6 @@
#include "../include/lustre_req_layout.h"
#include "../include/lustre_fld.h"
-enum {
- LUSTRE_FLD_INIT = 1 << 0,
- LUSTRE_FLD_RUN = 1 << 1
-};
-
struct fld_stats {
__u64 fst_count;
__u64 fst_cache;
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 0de72b717ce5..4cade7a16800 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -159,11 +159,6 @@ int fld_client_add_target(struct lu_client_fld *fld,
LASSERT(name);
LASSERT(tar->ft_srv || tar->ft_exp);
- if (fld->lcf_flags != LUSTRE_FLD_INIT) {
- CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n",
- fld->lcf_name, name, tar->ft_idx);
- return 0;
- }
CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n",
fld->lcf_name, name, tar->ft_idx);
@@ -282,7 +277,6 @@ int fld_client_init(struct lu_client_fld *fld,
fld->lcf_count = 0;
spin_lock_init(&fld->lcf_lock);
fld->lcf_hash = &fld_hash[hash];
- fld->lcf_flags = LUSTRE_FLD_INIT;
INIT_LIST_HEAD(&fld->lcf_targets);
cache_size = FLD_CLIENT_CACHE_SIZE /
@@ -421,8 +415,6 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
struct lu_fld_target *target;
int rc;
- fld->lcf_flags |= LUSTRE_FLD_RUN;
-
rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
if (rc == 0) {
*mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 89292c93dcd5..dc685610c4c4 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -59,10 +59,6 @@
* read/write system call it is associated with the single user
* thread, that issued the system call).
*
- * - cl_req represents a collection of pages for a transfer. cl_req is
- * constructed by req-forming engine that tries to saturate
- * transport with large and continuous transfers.
- *
* Terminology
*
* - to avoid confusion high-level I/O operation like read or write system
@@ -103,11 +99,8 @@
struct inode;
struct cl_device;
-struct cl_device_operations;
struct cl_object;
-struct cl_object_page_operations;
-struct cl_object_lock_operations;
struct cl_page;
struct cl_page_slice;
@@ -120,27 +113,7 @@ struct cl_page_operations;
struct cl_io;
struct cl_io_slice;
-struct cl_req;
-struct cl_req_slice;
-
-/**
- * Operations for each data device in the client stack.
- *
- * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
- */
-struct cl_device_operations {
- /**
- * Initialize cl_req. This method is called top-to-bottom on all
- * devices in the stack to get them a chance to allocate layer-private
- * data, and to attach them to the cl_req by calling
- * cl_req_slice_add().
- *
- * \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see vvp_req_init()
- */
- int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-};
+struct cl_req_attr;
/**
* Device in the client stack.
@@ -150,8 +123,6 @@ struct cl_device_operations {
struct cl_device {
/** Super-class. */
struct lu_device cd_lu_dev;
- /** Per-layer operation vector. */
- const struct cl_device_operations *cd_ops;
};
/** \addtogroup cl_object cl_object
@@ -267,7 +238,7 @@ struct cl_object_conf {
/**
* Object layout. This is consumed by lov.
*/
- struct lustre_md *coc_md;
+ struct lu_buf coc_layout;
/**
* Description of particular stripe location in the
* cluster. This is consumed by osc.
@@ -301,6 +272,26 @@ enum {
OBJECT_CONF_WAIT = 2
};
+enum {
+ CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */
+ CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */
+};
+
+struct cl_layout {
+ /** the buffer to return the layout in lov_mds_md format. */
+ struct lu_buf cl_buf;
+ /** size of layout in lov_mds_md format. */
+ size_t cl_size;
+ /** Layout generation. */
+ u32 cl_layout_gen;
+ /**
+ * True if this is a released file.
+ * Temporarily added for released file truncate in ll_setattr_raw().
+ * It will be removed later. -Jinshan
+ */
+ bool cl_is_released;
+};
+
/**
* Operations implemented for each cl object layer.
*
@@ -400,6 +391,27 @@ struct cl_object_operations {
*/
int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
struct lov_user_md __user *lum);
+ /**
+ * Get FIEMAP mapping from the object.
+ */
+ int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey,
+ struct fiemap *fiemap, size_t *buflen);
+ /**
+ * Get layout and generation of the object.
+ */
+ int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *layout);
+ /**
+ * Get maximum size of the object.
+ */
+ loff_t (*coo_maxbytes)(struct cl_object *obj);
+ /**
+ * Set request attributes.
+ */
+ void (*coo_req_attr_set)(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_req_attr *attr);
};
/**
@@ -591,7 +603,7 @@ enum cl_page_state {
*
* - [cl_page_state::CPS_PAGEOUT] page is dirty, the
* req-formation engine decides that it wants to include this page
- * into an cl_req being constructed, and yanks it from the cache;
+ * into an RPC being constructed, and yanks it from the cache;
*
* - [cl_page_state::CPS_FREEING] VM callback is executed to
* evict the page form the memory;
@@ -660,7 +672,7 @@ enum cl_page_state {
* Page is being read in, as a part of a transfer. This is quite
* similar to the cl_page_state::CPS_PAGEOUT state, except that
* read-in is always "immediate"---there is no such thing a sudden
- * construction of read cl_req from cached, presumably not up to date,
+ * construction of read request from cached, presumably not up to date,
* pages.
*
* Underlying VM page is locked for the duration of transfer.
@@ -714,8 +726,6 @@ struct cl_page {
struct list_head cp_batch;
/** List of slices. Immutable after creation. */
struct list_head cp_layers;
- /** Linkage of pages within cl_req. */
- struct list_head cp_flight;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
@@ -732,12 +742,6 @@ struct cl_page {
* by sub-io. Protected by a VM lock.
*/
struct cl_io *cp_owner;
- /**
- * Owning IO request in cl_page_state::CPS_PAGEOUT and
- * cl_page_state::CPS_PAGEIN states. This field is maintained only in
- * the top-level pages. Protected by a VM lock.
- */
- struct cl_req *cp_req;
/** List of references to this page, for debugging. */
struct lu_ref cp_reference;
/** Link to an object, for debugging. */
@@ -779,7 +783,6 @@ enum cl_lock_mode {
/**
* Requested transfer type.
- * \ingroup cl_req
*/
enum cl_req_type {
CRT_READ,
@@ -884,26 +887,6 @@ struct cl_page_operations {
/** Destructor. Frees resources and slice itself. */
void (*cpo_fini)(const struct lu_env *env,
struct cl_page_slice *slice);
-
- /**
- * Checks whether the page is protected by a cl_lock. This is a
- * per-layer method, because certain layers have ways to check for the
- * lock much more efficiently than through the generic locks scan, or
- * implement locking mechanisms separate from cl_lock, e.g.,
- * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
- * being canceled, or scheduled for cancellation as soon as the last
- * user goes away, too.
- *
- * \retval -EBUSY: page is protected by a lock of a given mode;
- * \retval -ENODATA: page is not protected by a lock;
- * \retval 0: this layer cannot decide.
- *
- * \see cl_page_is_under_lock()
- */
- int (*cpo_is_under_lock)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, pgoff_t *max);
-
/**
* Optional debugging helper. Prints given page slice.
*
@@ -915,8 +898,7 @@ struct cl_page_operations {
/**
* \name transfer
*
- * Transfer methods. See comment on cl_req for a description of
- * transfer formation and life-cycle.
+ * Transfer methods.
*
* @{
*/
@@ -962,7 +944,7 @@ struct cl_page_operations {
int ioret);
/**
* Called when cached page is about to be added to the
- * cl_req as a part of req formation.
+ * ptlrpc request as a part of req formation.
*
* \return 0 : proceed with this page;
* \return -EAGAIN : skip this page;
@@ -1365,7 +1347,6 @@ struct cl_2queue {
* (3) sort all locks to avoid dead-locks, and acquire them
*
* (4) process the chunk: call per-page methods
- * (cl_io_operations::cio_read_page() for read,
* cl_io_operations::cio_prepare_write(),
* cl_io_operations::cio_commit_write() for write)
*
@@ -1388,6 +1369,8 @@ enum cl_io_type {
CIT_WRITE,
/** truncate, utime system calls */
CIT_SETATTR,
+ /** get data version */
+ CIT_DATA_VERSION,
/**
* page fault handling
*/
@@ -1467,6 +1450,31 @@ struct cl_io_slice {
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
struct cl_page *);
+
+struct cl_read_ahead {
+ /*
+ * Maximum page index the readahead window will end.
+ * This is determined DLM lock coverage, RPC and stripe boundary.
+ * cra_end is included.
+ */
+ pgoff_t cra_end;
+ /*
+ * Release routine. If readahead holds resources underneath, this
+ * function should be called to release it.
+ */
+ void (*cra_release)(const struct lu_env *env, void *cbdata);
+ /* Callback data for cra_release routine */
+ void *cra_cbdata;
+};
+
+static inline void cl_read_ahead_release(const struct lu_env *env,
+ struct cl_read_ahead *ra)
+{
+ if (ra->cra_release)
+ ra->cra_release(env, ra->cra_cbdata);
+ memset(ra, 0, sizeof(*ra));
+}
+
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -1573,16 +1581,13 @@ struct cl_io_operations {
struct cl_page_list *queue, int from, int to,
cl_commit_cbt cb);
/**
- * Read missing page.
- *
- * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
- * method, when it hits not-up-to-date page in the range. Optional.
+ * Decide maximum read ahead extent
*
* \pre io->ci_type == CIT_READ
*/
- int (*cio_read_page)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page);
+ int (*cio_read_ahead)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ pgoff_t start, struct cl_read_ahead *ra);
/**
* Optional debugging helper. Print given io slice.
*/
@@ -1765,10 +1770,15 @@ struct cl_io {
struct cl_io_rw_common ci_rw;
struct cl_setattr_io {
struct ost_lvb sa_attr;
+ unsigned int sa_attr_flags;
unsigned int sa_valid;
int sa_stripe_index;
- struct lu_fid *sa_parent_fid;
+ const struct lu_fid *sa_parent_fid;
} ci_setattr;
+ struct cl_data_version_io {
+ u64 dv_data_version;
+ int dv_flags;
+ } ci_data_version;
struct cl_fault_io {
/** page index within file. */
pgoff_t ft_index;
@@ -1836,179 +1846,20 @@ struct cl_io {
/** @} cl_io */
-/** \addtogroup cl_req cl_req
- * @{
- */
-/** \struct cl_req
- * Transfer.
- *
- * There are two possible modes of transfer initiation on the client:
- *
- * - immediate transfer: this is started when a high level io wants a page
- * or a collection of pages to be transferred right away. Examples:
- * read-ahead, synchronous read in the case of non-page aligned write,
- * page write-out as a part of extent lock cancellation, page write-out
- * as a part of memory cleansing. Immediate transfer can be both
- * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
- *
- * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
- * when io wants to transfer a page to the server some time later, when
- * it can be done efficiently. Example: pages dirtied by the write(2)
- * path.
- *
- * In any case, transfer takes place in the form of a cl_req, which is a
- * representation for a network RPC.
- *
- * Pages queued for an opportunistic transfer are cached until it is decided
- * that efficient RPC can be composed of them. This decision is made by "a
- * req-formation engine", currently implemented as a part of osc
- * layer. Req-formation depends on many factors: the size of the resulting
- * RPC, whether or not multi-object RPCs are supported by the server,
- * max-rpc-in-flight limitations, size of the dirty cache, etc.
- *
- * For the immediate transfer io submits a cl_page_list, that req-formation
- * engine slices into cl_req's, possibly adding cached pages to some of
- * the resulting req's.
- *
- * Whenever a page from cl_page_list is added to a newly constructed req, its
- * cl_page_operations::cpo_prep() layer methods are called. At that moment,
- * page state is atomically changed from cl_page_state::CPS_OWNED to
- * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
- * is zeroed, and cl_page::cp_req is set to the
- * req. cl_page_operations::cpo_prep() method at the particular layer might
- * return -EALREADY to indicate that it does not need to submit this page
- * at all. This is possible, for example, if page, submitted for read,
- * became up-to-date in the meantime; and for write, the page don't have
- * dirty bit marked. \see cl_io_submit_rw()
- *
- * Whenever a cached page is added to a newly constructed req, its
- * cl_page_operations::cpo_make_ready() layer methods are called. At that
- * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
- * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
- * req. cl_page_operations::cpo_make_ready() method at the particular layer
- * might return -EAGAIN to indicate that this page is not eligible for the
- * transfer right now.
- *
- * FUTURE
- *
- * Plan is to divide transfers into "priority bands" (indicated when
- * submitting cl_page_list, and queuing a page for the opportunistic transfer)
- * and allow glueing of cached pages to immediate transfers only within single
- * band. This would make high priority transfers (like lock cancellation or
- * memory pressure induced write-out) really high priority.
- *
- */
-
/**
* Per-transfer attributes.
*/
struct cl_req_attr {
+ enum cl_req_type cra_type;
+ u64 cra_flags;
+ struct cl_page *cra_page;
+
/** Generic attributes for the server consumption. */
struct obdo *cra_oa;
/** Jobid */
char cra_jobid[LUSTRE_JOBID_SIZE];
};
-/**
- * Transfer request operations definable at every layer.
- *
- * Concurrency: transfer formation engine synchronizes calls to all transfer
- * methods.
- */
-struct cl_req_operations {
- /**
- * Invoked top-to-bottom by cl_req_prep() when transfer formation is
- * complete (all pages are added).
- *
- * \see osc_req_prep()
- */
- int (*cro_prep)(const struct lu_env *env,
- const struct cl_req_slice *slice);
- /**
- * Called top-to-bottom to fill in \a oa fields. This is called twice
- * with different flags, see bug 10150 and osc_build_req().
- *
- * \param obj an object from cl_req which attributes are to be set in
- * \a oa.
- *
- * \param oa struct obdo where attributes are placed
- *
- * \param flags \a oa fields to be filled.
- */
- void (*cro_attr_set)(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags);
- /**
- * Called top-to-bottom from cl_req_completion() to notify layers that
- * transfer completed. Has to free all state allocated by
- * cl_device_operations::cdo_req_init().
- */
- void (*cro_completion)(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-};
-
-/**
- * A per-object state that (potentially multi-object) transfer request keeps.
- */
-struct cl_req_obj {
- /** object itself */
- struct cl_object *ro_obj;
- /** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link ro_obj_ref;
- /* something else? Number of pages for a given object? */
-};
-
-/**
- * Transfer request.
- *
- * Transfer requests are not reference counted, because IO sub-system owns
- * them exclusively and knows when to free them.
- *
- * Life cycle.
- *
- * cl_req is created by cl_req_alloc() that calls
- * cl_device_operations::cdo_req_init() device methods to allocate per-req
- * state in every layer.
- *
- * Then pages are added (cl_req_page_add()), req keeps track of all objects it
- * contains pages for.
- *
- * Once all pages were collected, cl_page_operations::cpo_prep() method is
- * called top-to-bottom. At that point layers can modify req, let it pass, or
- * deny it completely. This is to support things like SNS that have transfer
- * ordering requirements invisible to the individual req-formation engine.
- *
- * On transfer completion (or transfer timeout, or failure to initiate the
- * transfer of an allocated req), cl_req_operations::cro_completion() method
- * is called, after execution of cl_page_operations::cpo_completion() of all
- * req's pages.
- */
-struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transferred */
- struct list_head crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- struct list_head crq_layers;
-};
-
-/**
- * Per-layer state for request.
- */
-struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- struct list_head crs_linkage;
- const struct cl_req_operations *crs_ops;
-};
-
-/* @} cl_req */
-
enum cache_stats_item {
/** how many cache lookups were performed */
CS_lookup = 0,
@@ -2153,9 +2004,6 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
const struct cl_lock_operations *ops);
void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
struct cl_object *obj, const struct cl_io_operations *ops);
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
- struct cl_device *dev,
- const struct cl_req_operations *ops);
/** @} helpers */
/** \defgroup cl_object cl_object
@@ -2183,6 +2031,12 @@ int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
struct lov_user_md __user *lum);
+int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
+ size_t *buflen);
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *cl);
+loff_t cl_object_maxbytes(struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
@@ -2302,8 +2156,6 @@ void cl_page_discard(const struct lu_env *env, struct cl_io *io,
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
size_t cl_page_size(const struct cl_object *obj);
@@ -2414,8 +2266,6 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
struct cl_io_lock_link *link);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_lock_descr *descr);
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue);
int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
@@ -2424,6 +2274,8 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue, int from, int to,
cl_commit_cbt cb);
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+ pgoff_t start, struct cl_read_ahead *ra);
int cl_io_is_going(const struct lu_env *env);
/**
@@ -2520,19 +2372,8 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
/** @} cl_page_list */
-/** \defgroup cl_req cl_req
- * @{
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
- enum cl_req_type crt, int nr_objects);
-
-void cl_req_page_add(const struct lu_env *env, struct cl_req *req,
- struct cl_page *page);
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page);
-int cl_req_prep(const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags);
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr);
/** \defgroup cl_sync_io cl_sync_io
* @{
@@ -2568,8 +2409,6 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
/** @} cl_sync_io */
-/** @} cl_req */
-
/** \defgroup cl_env cl_env
*
* lu_env handling for a client.
@@ -2593,35 +2432,13 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
* - allocation and destruction of environment is amortized by caching no
* longer used environments instead of destroying them;
*
- * - there is a notion of "current" environment, attached to the kernel
- * data structure representing current thread Top-level lustre code
- * allocates an environment and makes it current, then calls into
- * non-lustre code, that in turn calls lustre back. Low-level lustre
- * code thus called can fetch environment created by the top-level code
- * and reuse it, avoiding additional environment allocation.
- * Right now, three interfaces can attach the cl_env to running thread:
- * - cl_env_get
- * - cl_env_implant
- * - cl_env_reexit(cl_env_reenter had to be called priorly)
- *
* \see lu_env, lu_context, lu_context_key
* @{
*/
-struct cl_env_nest {
- int cen_refcheck;
- void *cen_cookie;
-};
-
struct lu_env *cl_env_get(int *refcheck);
struct lu_env *cl_env_alloc(int *refcheck, __u32 tags);
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest);
void cl_env_put(struct lu_env *env, int *refcheck);
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env);
-void *cl_env_reenter(void);
-void cl_env_reexit(void *cookie);
-void cl_env_implant(struct lu_env *env, int *refcheck);
-void cl_env_unplant(struct lu_env *env, int *refcheck);
unsigned int cl_env_cache_purge(unsigned int nr);
struct lu_env *cl_env_percpu_get(void);
void cl_env_percpu_put(struct lu_env *env);
diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h
new file mode 100644
index 000000000000..fd7ffb154ad1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/llog_swab.h
@@ -0,0 +1,65 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * We assume all nodes are either little-endian or big-endian, and we
+ * always send messages in the sender's native format. The receiver
+ * detects the message format by checking the 'magic' field of the message
+ * (see lustre_msg_swabbed() below).
+ *
+ * Each type has corresponding 'lustre_swab_xxxtypexxx()' routines
+ * are implemented in ptlrpc/pack_generic.c. These 'swabbers' convert the
+ * type from "other" endian, in-place in the message buffer.
+ *
+ * A swabber takes a single pointer argument. The caller must already have
+ * verified that the length of the message buffer >= sizeof (type).
+ *
+ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
+ * may be defined that swabs just the variable part, after the caller has
+ * verified that the message buffer is large enough.
+ */
+
+#ifndef _LLOG_SWAB_H_
+#define _LLOG_SWAB_H_
+
+#include "lustre/lustre_idl.h"
+struct lustre_cfg;
+
+void lustre_swab_lu_fid(struct lu_fid *fid);
+void lustre_swab_ost_id(struct ost_id *oid);
+void lustre_swab_llogd_body(struct llogd_body *d);
+void lustre_swab_llog_hdr(struct llog_log_hdr *h);
+void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
+void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
+void lustre_swab_cfg_marker(struct cfg_marker *marker,
+ int swab, int size);
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index cc0713ef8ae5..62753dae0bfa 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -43,6 +43,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "../../include/linux/libcfs/libcfs.h"
+#include "lustre_cfg.h"
#include "lustre/lustre_idl.h"
struct lprocfs_vars {
@@ -540,7 +542,8 @@ lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
void lprocfs_clear_stats(struct lprocfs_stats *stats);
void lprocfs_free_stats(struct lprocfs_stats **stats);
void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned conf, const char *name, const char *units);
+ unsigned int conf, const char *name,
+ const char *units);
struct obd_export;
int lprocfs_exp_cleanup(struct obd_export *exp);
struct dentry *ldebugfs_add_simple(struct dentry *root,
@@ -701,9 +704,9 @@ static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
extern const struct sysfs_ops lustre_sysfs_ops;
struct root_squash_info;
-int lprocfs_wr_root_squash(const char *buffer, unsigned long count,
+int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
struct root_squash_info *squash, char *name);
-int lprocfs_wr_nosquash_nids(const char *buffer, unsigned long count,
+int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
struct root_squash_info *squash, char *name);
/* all quota proc functions */
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index c2340d643e84..b8ad5559a3b9 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -41,79 +41,24 @@
#ifndef _LUSTRE_FIEMAP_H
#define _LUSTRE_FIEMAP_H
-struct ll_fiemap_extent {
- __u64 fe_logical; /* logical offset in bytes for the start of
- * the extent from the beginning of the file
- */
- __u64 fe_physical; /* physical offset in bytes for the start
- * of the extent from the beginning of the disk
- */
- __u64 fe_length; /* length in bytes for this extent */
- __u64 fe_reserved64[2];
- __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
- __u32 fe_device; /* device number for this extent */
- __u32 fe_reserved[2];
-};
-
-struct ll_user_fiemap {
- __u64 fm_start; /* logical offset (inclusive) at
- * which to start mapping (in)
- */
- __u64 fm_length; /* logical length of mapping which
- * userspace wants (in)
- */
- __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
- __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
- __u32 fm_extent_count; /* size of fm_extents array (in) */
- __u32 fm_reserved;
- struct ll_fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
-};
-
-#define FIEMAP_MAX_OFFSET (~0ULL)
+#ifndef __KERNEL__
+#include <stddef.h>
+#include <fiemap.h>
+#endif
-#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before
- * map
- */
-#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute
- * tree
- */
-#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
-#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
-#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
- * Sets EXTENT_UNKNOWN.
- */
-#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
- * while fs is unmounted
- */
-#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
- * Sets EXTENT_NO_DIRECT.
- */
-#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be
- * block aligned.
- */
-#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata.
- * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
- * Sets EXTENT_NOT_ALIGNED.
- */
-#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
- * no data (i.e. zero).
- */
-#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
- * support extents. Result
- * merged for efficiency.
- */
+/* XXX: We use fiemap_extent::fe_reserved[0] */
+#define fe_device fe_reserved[0]
static inline size_t fiemap_count_to_size(size_t extent_count)
{
- return (sizeof(struct ll_user_fiemap) + extent_count *
- sizeof(struct ll_fiemap_extent));
+ return sizeof(struct fiemap) + extent_count *
+ sizeof(struct fiemap_extent);
}
static inline unsigned fiemap_size_to_count(size_t array_size)
{
- return ((array_size - sizeof(struct ll_user_fiemap)) /
- sizeof(struct ll_fiemap_extent));
+ return (array_size - sizeof(struct fiemap)) /
+ sizeof(struct fiemap_extent);
}
#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 72eaee95c6b8..65ce503ad595 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -48,8 +48,7 @@
* that the Lustre wire protocol is not influenced by external dependencies.
*
* The only other acceptable items in this file are VERY SIMPLE accessor
- * functions to avoid callers grubbing inside the structures, and the
- * prototypes of the swabber functions for each struct. Nothing that
+ * functions to avoid callers grubbing inside the structures. Nothing that
* depends on external functions or definitions should be in here.
*
* Structs must be properly aligned to put 64-bit values on an 8-byte
@@ -64,23 +63,6 @@
* in the code to ensure that new/old clients that see this larger struct
* do not fail, otherwise you need to implement protocol compatibility).
*
- * We assume all nodes are either little-endian or big-endian, and we
- * always send messages in the sender's native format. The receiver
- * detects the message format by checking the 'magic' field of the message
- * (see lustre_msg_swabbed() below).
- *
- * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
- * implemented either here, inline (trivial implementations) or in
- * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
- * endian, in-place in the message buffer.
- *
- * A swabber takes a single pointer argument. The caller must already have
- * verified that the length of the message buffer >= sizeof (type).
- *
- * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
- * may be defined that swabs just the variable part, after the caller has
- * verified that the message buffer is large enough.
- *
* @{
*/
@@ -192,113 +174,6 @@ struct lu_seq_range_array {
#define LU_SEQ_RANGE_MASK 0x3
-static inline unsigned fld_range_type(const struct lu_seq_range *range)
-{
- return range->lsr_flags & LU_SEQ_RANGE_MASK;
-}
-
-static inline bool fld_range_is_ost(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_OST;
-}
-
-static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_MDT;
-}
-
-/**
- * This all range is only being used when fld client sends fld query request,
- * but it does not know whether the seq is MDT or OST, so it will send req
- * with ALL type, which means either seq type gotten from lookup can be
- * expected.
- */
-static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_ANY;
-}
-
-static inline void fld_range_set_type(struct lu_seq_range *range,
- unsigned flags)
-{
- range->lsr_flags |= flags;
-}
-
-static inline void fld_range_set_mdt(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_MDT);
-}
-
-static inline void fld_range_set_ost(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_OST);
-}
-
-static inline void fld_range_set_any(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_ANY);
-}
-
-/**
- * returns width of given range \a r
- */
-
-static inline __u64 range_space(const struct lu_seq_range *range)
-{
- return range->lsr_end - range->lsr_start;
-}
-
-/**
- * initialize range to zero
- */
-
-static inline void range_init(struct lu_seq_range *range)
-{
- memset(range, 0, sizeof(*range));
-}
-
-/**
- * check if given seq id \a s is within given range \a r
- */
-
-static inline bool range_within(const struct lu_seq_range *range,
- __u64 s)
-{
- return s >= range->lsr_start && s < range->lsr_end;
-}
-
-static inline bool range_is_sane(const struct lu_seq_range *range)
-{
- return (range->lsr_end >= range->lsr_start);
-}
-
-static inline bool range_is_zero(const struct lu_seq_range *range)
-{
- return (range->lsr_start == 0 && range->lsr_end == 0);
-}
-
-static inline bool range_is_exhausted(const struct lu_seq_range *range)
-
-{
- return range_space(range) == 0;
-}
-
-/* return 0 if two range have the same location */
-static inline int range_compare_loc(const struct lu_seq_range *r1,
- const struct lu_seq_range *r2)
-{
- return r1->lsr_index != r2->lsr_index ||
- r1->lsr_flags != r2->lsr_flags;
-}
-
-#define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
-
-#define PRANGE(range) \
- (range)->lsr_start, \
- (range)->lsr_end, \
- (range)->lsr_index, \
- fld_range_is_mdt(range) ? "mdt" : "ost"
-
/** \defgroup lu_fid lu_fid
* @{
*/
@@ -310,7 +185,7 @@ static inline int range_compare_loc(const struct lu_seq_range *r1,
*/
enum lma_compat {
LMAC_HSM = 0x00000001,
- LMAC_SOM = 0x00000002,
+/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */
LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
* under /O/<seq>/d<x>.
@@ -644,13 +519,14 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
+ CERROR("Too large OID %#llx to set MDT0 " DOSTID "\n",
+ oid, POSTID(oi));
return;
}
oi->oi.oi_id = oid;
} else if (fid_is_idif(&oi->oi_fid)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad %llu to set "DOSTID"\n",
+ CERROR("Too large OID %#llx to set IDIF " DOSTID "\n",
oid, POSTID(oi));
return;
}
@@ -676,7 +552,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
if (fid_is_idif(fid)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+ CERROR("Too large OID %#llx to set IDIF " DFID "\n",
(unsigned long long)oid, PFID(fid));
return -EBADF;
}
@@ -685,7 +561,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
fid->f_ver = oid >> 48;
} else {
if (oid >= OBIF_MAX_OID) {
- CERROR("Too large OID %#llx to set REG "DFID"\n",
+ CERROR("Too large OID %#llx to set REG " DFID "\n",
(unsigned long long)oid, PFID(fid));
return -EBADF;
}
@@ -785,8 +661,6 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid)
return fid_seq(fid);
}
-void lustre_swab_ost_id(struct ost_id *oid);
-
/**
* Get inode generation from a igif.
* \param fid a igif to get inode generation from.
@@ -847,9 +721,6 @@ static inline bool fid_is_sane(const struct lu_fid *fid)
fid_seq_is_rsvd(fid_seq(fid)));
}
-void lustre_swab_lu_fid(struct lu_fid *fid);
-void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-
static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
return memcmp(f0, f1, sizeof(*f0)) == 0;
@@ -1099,8 +970,10 @@ struct ptlrpc_body_v3 {
__u32 pb_version;
__u32 pb_opc;
__u32 pb_status;
- __u64 pb_last_xid;
- __u64 pb_last_seen;
+ __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
+ __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
+ __u16 pb_padding0;
+ __u32 pb_padding1;
__u64 pb_last_committed;
__u64 pb_transno;
__u32 pb_flags;
@@ -1112,8 +985,11 @@ struct ptlrpc_body_v3 {
__u64 pb_slv;
/* VBR: pre-versions */
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ __u64 pb_mbits; /**< match bits for bulk request */
/* padding for future needs */
- __u64 pb_padding[4];
+ __u64 pb_padding64_0;
+ __u64 pb_padding64_1;
+ __u64 pb_padding64_2;
char pb_jobid[LUSTRE_JOBID_SIZE];
};
@@ -1125,8 +1001,10 @@ struct ptlrpc_body_v2 {
__u32 pb_version;
__u32 pb_opc;
__u32 pb_status;
- __u64 pb_last_xid;
- __u64 pb_last_seen;
+ __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
+ __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
+ __u16 pb_padding0;
+ __u32 pb_padding1;
__u64 pb_last_committed;
__u64 pb_transno;
__u32 pb_flags;
@@ -1140,12 +1018,13 @@ struct ptlrpc_body_v2 {
__u64 pb_slv;
/* VBR: pre-versions */
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ __u64 pb_mbits; /**< unused in V2 */
/* padding for future needs */
- __u64 pb_padding[4];
+ __u64 pb_padding64_0;
+ __u64 pb_padding64_1;
+ __u64 pb_padding64_2;
};
-void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
-
/* message body offset for lustre_msg_v2 */
/* ptlrpc body offset in all request/reply messages */
#define MSG_PTLRPC_BODY_OFF 0
@@ -1282,7 +1161,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
*/
#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
+#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
+ * RPCs in parallel
+ */
#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */
+#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */
+#define OBD_CONNECT_LOCK_AHEAD 0x1000000000000000ULL /* lock ahead */
+/** bulk matchbits is sent within ptlrpc_body */
+#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL
+#define OBD_CONNECT_OBDOPACK 0x4000000000000000ULL /* compact OUT obdo */
+#define OBD_CONNECT_FLAGS2 0x8000000000000000ULL /* second flags word */
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
@@ -1313,25 +1201,6 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
* If we eventually have separate connect data for different types, which we
* almost certainly will, then perhaps we stick a union in here.
*/
-struct obd_connect_data_v1 {
- __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
- __u32 ocd_version; /* lustre release version number */
- __u32 ocd_grant; /* initial cache grant amount (bytes) */
- __u32 ocd_index; /* LOV index to connect to */
- __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
- __u64 ocd_ibits_known; /* inode bits this client understands */
- __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
- __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
- __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
- __u32 ocd_unused; /* also fix lustre_swab_connect */
- __u64 ocd_transno; /* first transno from client to be replayed */
- __u32 ocd_group; /* MDS group on OST */
- __u32 ocd_cksum_types; /* supported checksum algorithms */
- __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
- __u32 ocd_instance; /* also fix lustre_swab_connect */
- __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
-};
-
struct obd_connect_data {
__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
__u32 ocd_version; /* lustre release version number */
@@ -1354,8 +1223,10 @@ struct obd_connect_data {
* any field after ocd_maxbytes on the receiver without a valid flag
* may result in out-of-bound memory access and kernel oops.
*/
- __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
+ __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
+ __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */
+ __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 ocd_connect_flags2;
__u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
__u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
__u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
@@ -1380,8 +1251,6 @@ struct obd_connect_data {
* reserve the flag for future use.
*/
-void lustre_swab_connect(struct obd_connect_data *ocd);
-
/*
* Supported checksum algorithms. Up to 32 checksum types are supported.
* (32-bit mask stored in obd_connect_data::ocd_cksum_types)
@@ -1416,7 +1285,7 @@ enum ost_cmd {
OST_STATFS = 13,
OST_SYNC = 16,
OST_SET_INFO = 17,
- OST_QUOTACHECK = 18,
+ OST_QUOTACHECK = 18, /* not used since 2.4 */
OST_QUOTACTL = 19,
OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
OST_LAST_OPC
@@ -1580,8 +1449,6 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
}
-/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
-
#define MAX_MD_SIZE \
(sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
#define MIN_MD_SIZE \
@@ -1674,7 +1541,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
-#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
+/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */
#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
@@ -1713,7 +1580,9 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
-#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
+#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
+ * executed
+ */
#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
@@ -1742,11 +1611,6 @@ struct hsm_state_set {
__u64 hss_clearmask;
};
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
-
-void lustre_swab_obd_statfs(struct obd_statfs *os);
-
/* ost_body.data values for OST_BRW */
#define OBD_BRW_READ 0x01
@@ -1786,14 +1650,16 @@ struct obd_ioobj {
__u32 ioo_bufcnt; /* number of niobufs for this object */
};
+/*
+ * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
+ * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
+ * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
+ */
#define IOOBJ_MAX_BRW_BITS 16
-#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
#define ioobj_max_brw_set(ioo, num) \
do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
-
/* multiple of 8 bytes => can array */
struct niobuf_remote {
__u64 rnb_offset;
@@ -1801,8 +1667,6 @@ struct niobuf_remote {
__u32 rnb_flags;
};
-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
-
/* lock value block communicated between the filter and llite */
/* OST_LVB_ERR_INIT is needed because the return code in rc is
@@ -1824,8 +1688,6 @@ struct ost_lvb_v1 {
__u64 lvb_blocks;
};
-void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
-
struct ost_lvb {
__u64 lvb_size;
__s64 lvb_mtime;
@@ -1838,8 +1700,6 @@ struct ost_lvb {
__u32 lvb_padding;
};
-void lustre_swab_ost_lvb(struct ost_lvb *lvb);
-
/*
* lquota data structures
*/
@@ -1866,8 +1726,6 @@ struct obd_quotactl {
struct obd_dqblk qc_dqblk;
};
-void lustre_swab_obd_quotactl(struct obd_quotactl *q);
-
#define Q_COPY(out, in, member) (out)->member = (in)->member
#define QCTL_COPY(out, in) \
@@ -1905,8 +1763,6 @@ struct lquota_lvb {
__u64 lvb_pad1;
};
-void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
-
/* op codes */
enum quota_cmd {
QUOTA_DQACQ = 601,
@@ -1933,9 +1789,9 @@ enum mds_cmd {
MDS_PIN = 42, /* obsolete, never used in a release */
MDS_UNPIN = 43, /* obsolete, never used in a release */
MDS_SYNC = 44,
- MDS_DONE_WRITING = 45,
+ MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */
MDS_SET_INFO = 46,
- MDS_QUOTACHECK = 47,
+ MDS_QUOTACHECK = 47, /* not used since 2.4 */
MDS_QUOTACTL = 48,
MDS_GETXATTR = 49,
MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
@@ -1972,8 +1828,6 @@ enum mdt_reint_cmd {
REINT_MAX
};
-void lustre_swab_generic_32s(__u32 *val);
-
/* the disposition of the intent outlines what was executed */
#define DISP_IT_EXECD 0x00000001
#define DISP_LOOKUP_EXECD 0x00000002
@@ -2031,36 +1885,19 @@ enum {
#define MDS_STATUS_CONN 1
#define MDS_STATUS_LOV 2
-/* mdt_thread_info.mti_flags. */
-enum md_op_flags {
- /* The flag indicates Size-on-MDS attributes are changed. */
- MF_SOM_CHANGE = (1 << 0),
- /* Flags indicates an epoch opens or closes. */
- MF_EPOCH_OPEN = (1 << 1),
- MF_EPOCH_CLOSE = (1 << 2),
- MF_MDC_CANCEL_FID1 = (1 << 3),
- MF_MDC_CANCEL_FID2 = (1 << 4),
- MF_MDC_CANCEL_FID3 = (1 << 5),
- MF_MDC_CANCEL_FID4 = (1 << 6),
- /* There is a pending attribute update. */
- MF_SOM_AU = (1 << 7),
- /* Cancel OST locks while getattr OST attributes. */
- MF_GETATTR_LOCK = (1 << 8),
- MF_GET_MDT_IDX = (1 << 9),
-};
-
-#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
-
-#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
-
/* these should be identical to their EXT4_*_FL counterparts, they are
* redefined here only to avoid dragging in fs/ext4/ext4.h
*/
#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
+#define LUSTRE_NODUMP_FL 0x00000040 /* do not dump file */
#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
+#define LUSTRE_INDEX_FL 0x00001000 /* hash-indexed directory */
#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
+#define LUSTRE_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
+#define LUSTRE_DIRECTIO_FL 0x00100000 /* Use direct i/o */
+#define LUSTRE_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
* for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
@@ -2113,7 +1950,7 @@ struct mdt_body {
__u32 mbo_mode;
__u32 mbo_uid;
__u32 mbo_gid;
- __u32 mbo_flags;
+ __u32 mbo_flags; /* LUSTRE_*_FL file attributes */
__u32 mbo_rdev;
__u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
__u32 mbo_unused2; /* was "generation" until 2.4.0 */
@@ -2121,7 +1958,7 @@ struct mdt_body {
__u32 mbo_eadatasize;
__u32 mbo_aclsize;
__u32 mbo_max_mdsize;
- __u32 mbo_max_cookiesize;
+ __u32 mbo_unused3; /* was max_cookiesize until 2.8 */
__u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
__u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
__u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
@@ -2132,17 +1969,13 @@ struct mdt_body {
__u64 mbo_padding_10;
}; /* 216 */
-void lustre_swab_mdt_body(struct mdt_body *b);
-
struct mdt_ioepoch {
- struct lustre_handle handle;
- __u64 ioepoch;
- __u32 flags;
- __u32 padding;
+ struct lustre_handle mio_handle;
+ __u64 mio_unused1; /* was ioepoch */
+ __u32 mio_unused2; /* was flags */
+ __u32 mio_padding;
};
-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
-
/* permissions for md_perm.mp_perm */
enum {
CFS_SETUID_PERM = 0x01,
@@ -2178,8 +2011,6 @@ struct mdt_rec_setattr {
__u32 sa_padding_5;
};
-void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
-
/*
* Attribute flags used in mdt_rec_setattr::sa_valid.
* The kernel's #defines for ATTR_* should not be used over the network
@@ -2207,12 +2038,9 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
#define MDS_FMODE_CLOSED 00000000
#define MDS_FMODE_EXEC 00000004
-/* IO Epoch is opened on a closed file. */
-#define MDS_FMODE_EPOCH 01000000
-/* IO Epoch is opened on a file truncate. */
-#define MDS_FMODE_TRUNC 02000000
-/* Size-on-MDS Attribute Update is pending. */
-#define MDS_FMODE_SOM 04000000
+/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */
+/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */
+/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */
#define MDS_OPEN_CREATED 00000010
#define MDS_OPEN_CROSS 00000020
@@ -2258,7 +2086,7 @@ enum mds_op_bias {
MDS_CROSS_REF = 1 << 1,
MDS_VTX_BYPASS = 1 << 2,
MDS_PERM_BYPASS = 1 << 3,
- MDS_SOM = 1 << 4,
+/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */
MDS_QUOTA_IGNORE = 1 << 5,
MDS_CLOSE_CLEANUP = 1 << 6,
MDS_KEEP_ORPHAN = 1 << 7,
@@ -2268,6 +2096,7 @@ enum mds_op_bias {
MDS_OWNEROVERRIDE = 1 << 11,
MDS_HSM_RELEASE = 1 << 12,
MDS_RENAME_MIGRATE = BIT(13),
+ MDS_CLOSE_LAYOUT_SWAP = BIT(14),
};
/* instance of mdt_reint_rec */
@@ -2456,8 +2285,6 @@ struct mdt_rec_reint {
__u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
};
-void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
-
/* lmv structures */
struct lmv_desc {
__u32 ld_tgt_count; /* how many MDS's */
@@ -2547,8 +2374,6 @@ union lmv_mds_md {
struct lmv_user_md lmv_user_md;
};
-void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
-
static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
{
ssize_t len = -EINVAL;
@@ -2652,8 +2477,6 @@ struct lov_desc {
#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
-void lustre_swab_lov_desc(struct lov_desc *ld);
-
/*
* LDLM requests:
*/
@@ -2749,24 +2572,38 @@ struct ldlm_flock_wire {
* on the resource type.
*/
-typedef union {
+union ldlm_wire_policy_data {
struct ldlm_extent l_extent;
struct ldlm_flock_wire l_flock;
struct ldlm_inodebits l_inodebits;
-} ldlm_wire_policy_data_t;
+};
union ldlm_gl_desc {
struct ldlm_gl_lquota_desc lquota_desc;
};
-void lustre_swab_gl_desc(union ldlm_gl_desc *);
+enum ldlm_intent_flags {
+ IT_OPEN = BIT(0),
+ IT_CREAT = BIT(1),
+ IT_OPEN_CREAT = BIT(1) | BIT(0),
+ IT_READDIR = BIT(2),
+ IT_GETATTR = BIT(3),
+ IT_LOOKUP = BIT(4),
+ IT_UNLINK = BIT(5),
+ IT_TRUNC = BIT(6),
+ IT_GETXATTR = BIT(7),
+ IT_EXEC = BIT(8),
+ IT_PIN = BIT(9),
+ IT_LAYOUT = BIT(10),
+ IT_QUOTA_DQACQ = BIT(11),
+ IT_QUOTA_CONN = BIT(12),
+ IT_SETXATTR = BIT(13),
+};
struct ldlm_intent {
__u64 opc;
};
-void lustre_swab_ldlm_intent(struct ldlm_intent *i);
-
struct ldlm_resource_desc {
enum ldlm_type lr_type;
__u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
@@ -2777,7 +2614,7 @@ struct ldlm_lock_desc {
struct ldlm_resource_desc l_resource;
enum ldlm_mode l_req_mode;
enum ldlm_mode l_granted_mode;
- ldlm_wire_policy_data_t l_policy_data;
+ union ldlm_wire_policy_data l_policy_data;
};
#define LDLM_LOCKREQ_HANDLES 2
@@ -2790,8 +2627,6 @@ struct ldlm_request {
struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
};
-void lustre_swab_ldlm_request(struct ldlm_request *rq);
-
/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
* Otherwise, 2 are available.
*/
@@ -2813,8 +2648,6 @@ struct ldlm_reply {
__u64 lock_policy_res2;
};
-void lustre_swab_ldlm_reply(struct ldlm_reply *r);
-
#define ldlm_flags_to_wire(flags) ((__u32)(flags))
#define ldlm_flags_from_wire(flags) ((__u64)(flags))
@@ -2858,8 +2691,6 @@ struct mgs_target_info {
char mti_params[MTI_PARAM_MAXLEN];
};
-void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
-
struct mgs_nidtbl_entry {
__u64 mne_version; /* table version of this entry */
__u32 mne_instance; /* target instance # */
@@ -2874,8 +2705,6 @@ struct mgs_nidtbl_entry {
} u;
};
-void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
-
struct mgs_config_body {
char mcb_name[MTI_NAME_MAXLEN]; /* logname */
__u64 mcb_offset; /* next index of config log to request */
@@ -2885,15 +2714,11 @@ struct mgs_config_body {
__u32 mcb_units; /* # of units for bulk transfer */
};
-void lustre_swab_mgs_config_body(struct mgs_config_body *body);
-
struct mgs_config_res {
__u64 mcr_offset; /* index of last config log */
__u64 mcr_size; /* size of the log */
};
-void lustre_swab_mgs_config_res(struct mgs_config_res *body);
-
/* Config marker flags (in config log) */
#define CM_START 0x01
#define CM_END 0x02
@@ -2913,8 +2738,6 @@ struct cfg_marker {
char cm_comment[MTI_NAME_MAXLEN];
};
-void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
-
/*
* Opcodes for multiple servers.
*/
@@ -2922,7 +2745,7 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
enum obd_cmd {
OBD_PING = 400,
OBD_LOG_CANCEL,
- OBD_QC_CALLBACK,
+ OBD_QC_CALLBACK, /* not used since 2.4 */
OBD_IDX_READ,
OBD_LAST_OPC
};
@@ -3155,23 +2978,32 @@ struct llog_gen_rec {
struct llog_rec_tail lgr_tail;
};
-/* On-disk header structure of each log object, stored in little endian order */
-#define LLOG_CHUNK_SIZE 8192
-#define LLOG_HEADER_SIZE (96)
-#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
-
-#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
-
/* flags for the logs */
enum llog_flag {
LLOG_F_ZAP_WHEN_EMPTY = 0x1,
LLOG_F_IS_CAT = 0x2,
LLOG_F_IS_PLAIN = 0x4,
LLOG_F_EXT_JOBID = BIT(3),
+ LLOG_F_IS_FIXSIZE = BIT(4),
+ /*
+ * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
+ * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
+ * because the catlog record is usually fixed size, but its plain
+ * log record can be variable
+ */
LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
};
+/* On-disk header structure of each log object, stored in little endian order */
+#define LLOG_MIN_CHUNK_SIZE 8192
+#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) +
+ * sizeof(llh_tail) - sizeof(llh_bitmap)
+ */
+#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
+#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
+
+/* flags for the logs */
struct llog_log_hdr {
struct llog_rec_hdr llh_hdr;
__s64 llh_timestamp;
@@ -3183,13 +3015,30 @@ struct llog_log_hdr {
/* for a catalog the first plain slot is next to it */
struct obd_uuid llh_tgtuuid;
__u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
+ /* These fields must always be at the end of the llog_log_hdr.
+ * Note: llh_bitmap size is variable because llog chunk size could be
+ * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
+ * bytes, and the real size is stored in llh_hdr.lrh_len, which means
+ * llh_tail should only be referred by LLOG_HDR_TAIL().
+ * But this structure is also used by client/server llog interface
+ * (see llog_client.c), it will be kept in its original way to avoid
+ * compatibility issue.
+ */
__u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
struct llog_rec_tail llh_tail;
} __packed;
-#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
- llh->llh_bitmap_offset - \
- sizeof(llh->llh_tail)) * 8)
+#undef LLOG_HEADER_SIZE
+#undef LLOG_BITMAP_BYTES
+
+#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
+ llh->llh_bitmap_offset - \
+ sizeof(llh->llh_tail)) * 8)
+#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \
+ (llh)->llh_bitmap_offset)
+#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \
+ llh->llh_hdr.lrh_len - \
+ sizeof(llh->llh_tail)))
/** log cookies are used to reference a specific log file and a record
* therein
@@ -3259,7 +3108,8 @@ struct obdo {
__u32 o_parent_ver;
struct lustre_handle o_handle; /* brw: lock handle to prolong locks
*/
- struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS
+ struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS,
+ * obsolete in 2.8, reused in OSP
*/
__u32 o_uid_h;
__u32 o_gid_h;
@@ -3333,30 +3183,11 @@ struct ost_body {
/* Key for FIEMAP to be used in get_info calls */
struct ll_fiemap_info_key {
- char name[8];
- struct obdo oa;
- struct ll_user_fiemap fiemap;
+ char lfik_name[8];
+ struct obdo lfik_oa;
+ struct fiemap lfik_fiemap;
};
-void lustre_swab_ost_body(struct ost_body *b);
-void lustre_swab_ost_last_id(__u64 *id);
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
-
-void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
-void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
-void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
- int stripe_count);
-void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
-
-/* llog_swab.c */
-void lustre_swab_llogd_body(struct llogd_body *d);
-void lustre_swab_llog_hdr(struct llog_log_hdr *h);
-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
-void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
-
-struct lustre_cfg;
-void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
-
/* Functions for dumping PTLRPC fields */
void dump_rniobuf(struct niobuf_remote *rnb);
void dump_ioo(struct obd_ioobj *nb);
@@ -3394,8 +3225,6 @@ struct lustre_capa {
__u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
} __packed;
-void lustre_swab_lustre_capa(struct lustre_capa *c);
-
/** lustre_capa::lc_opc */
enum {
CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
@@ -3458,8 +3287,6 @@ struct getinfo_fid2path {
char gf_path[0];
} __packed;
-void lustre_swab_fid2path(struct getinfo_fid2path *gf);
-
/** path2parent request/reply structures */
struct getparent {
struct lu_fid gp_fid; /**< parent FID */
@@ -3486,8 +3313,6 @@ struct layout_intent {
__u64 li_end;
};
-void lustre_swab_layout_intent(struct layout_intent *li);
-
/**
* On the wire version of hsm_progress structure.
*
@@ -3506,13 +3331,6 @@ struct hsm_progress_kernel {
__u64 hpk_padding2;
} __packed;
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_current_action(struct hsm_current_action *action);
-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
-void lustre_swab_hsm_request(struct hsm_request *hr);
-
/** layout swap request structure
* fid1 and fid2 are in mdt_body
*/
@@ -3520,8 +3338,6 @@ struct mdc_swap_layouts {
__u64 msl_flags;
} __packed;
-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
-
struct close_data {
struct lustre_handle cd_handle;
struct lu_fid cd_fid;
@@ -3529,7 +3345,5 @@ struct close_data {
__u64 cd_reserved[8];
};
-void lustre_swab_close_data(struct close_data *data);
-
#endif
/** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
index f3d7c94c3b50..eb08df33b2db 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
@@ -363,8 +363,8 @@ obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf, int max_len)
/* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */
/* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */
/* lustre/lustre_user.h 157-159 */
-#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
-#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
+/* OBD_IOC_QUOTACHECK _IOW('f', 160, int) */
+/* OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) */
#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
/* lustre/lustre_user.h 163-176 */
#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6fc985571cba..3301ad652db1 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -63,9 +63,13 @@
#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
typedef struct stat64 lstat_t;
#define lstat_f lstat64
+#define fstat_f fstat64
+#define fstatat_f fstatat64
#else
typedef struct stat lstat_t;
#define lstat_f lstat
+#define fstat_f fstat
+#define fstatat_f fstatat
#endif
#define HAVE_LOV_USER_MDS_DATA
@@ -82,7 +86,6 @@ typedef struct stat lstat_t;
#define FSFILT_IOC_SETVERSION _IOW('f', 4, long)
#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long)
#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long)
-#define FSFILT_IOC_FIEMAP _IOWR('f', 11, struct ll_user_fiemap)
#endif
/* FIEMAP flags supported by Lustre */
@@ -235,7 +238,7 @@ struct ost_id {
/* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */
/* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
-#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
+/* IOC_LOV_GETINFO 165 obsolete */
#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
/* LL_IOC_RMTACL 167 obsolete */
#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
@@ -343,6 +346,9 @@ enum ll_lease_type {
#define LOV_ALL_STRIPES 0xffff /* only valid for directories */
#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
+#define XATTR_LUSTRE_PREFIX "lustre."
+#define XATTR_LUSTRE_LOV "lustre.lov"
+
#define lov_user_ost_data lov_user_ost_data_v1
struct lov_user_ost_data_v1 { /* per-stripe data structure */
struct ost_id l_ost_oi; /* OST object ID */
@@ -451,8 +457,6 @@ static inline int lmv_user_md_size(int stripes, int lmm_magic)
stripes * sizeof(struct lmv_user_mds_data);
}
-void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
-
struct ll_recreate_obj {
__u64 lrc_id;
__u32 lrc_ost_idx;
@@ -522,25 +526,20 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
}
/* printf display format
- * e.g. printf("file FID is "DFID"\n", PFID(fid));
+ * * usage: printf("file FID is "DFID"\n", PFID(fid));
*/
#define FID_NOBRACE_LEN 40
#define FID_LEN (FID_NOBRACE_LEN + 2)
#define DFID_NOBRACE "%#llx:0x%x:0x%x"
#define DFID "["DFID_NOBRACE"]"
-#define PFID(fid) \
- (fid)->f_seq, \
- (fid)->f_oid, \
- (fid)->f_ver
+#define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver
-/* scanf input parse format -- strip '[' first.
- * e.g. sscanf(fidstr, SFID, RFID(&fid));
+/* scanf input parse format for fids in DFID_NOBRACE format
+ * Need to strip '[' from DFID format first or use "["SFID"]" at caller.
+ * usage: sscanf(fidstr, SFID, RFID(&fid));
*/
#define SFID "0x%llx:0x%x:0x%x"
-#define RFID(fid) \
- &((fid)->f_seq), \
- &((fid)->f_oid), \
- &((fid)->f_ver)
+#define RFID(fid) &((fid)->f_seq), &((fid)->f_oid), &((fid)->f_ver)
/********* Quotas **********/
@@ -551,23 +550,18 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
-#define LUSTRE_Q_QUOTAON 0x800002 /* turn quotas on */
-#define LUSTRE_Q_QUOTAOFF 0x800003 /* turn quotas off */
+#define LUSTRE_Q_QUOTAON 0x800002 /* deprecated as of 2.4 */
+#define LUSTRE_Q_QUOTAOFF 0x800003 /* deprecated as of 2.4 */
#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */
#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */
#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */
#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */
/* lustre-specific control commands */
-#define LUSTRE_Q_INVALIDATE 0x80000b /* invalidate quota data */
-#define LUSTRE_Q_FINVALIDATE 0x80000c /* invalidate filter quota data */
+#define LUSTRE_Q_INVALIDATE 0x80000b /* deprecated as of 2.4 */
+#define LUSTRE_Q_FINVALIDATE 0x80000c /* deprecated as of 2.4 */
#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */
-struct if_quotacheck {
- char obd_type[16];
- struct obd_uuid obd_uuid;
-};
-
#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629
/* permission */
@@ -649,6 +643,7 @@ struct if_quotactl {
#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
+#define SWAP_LAYOUTS_CLOSE BIT(4)
/* Swap XATTR_NAME_HSM as well, only on the MDT so far */
#define SWAP_LAYOUTS_MDS_HSM (1 << 31)
@@ -999,6 +994,7 @@ struct ioc_data_version {
* See HSM_FLAGS below.
*/
enum hsm_states {
+ HS_NONE = 0x00000000,
HS_EXISTS = 0x00000001,
HS_DIRTY = 0x00000002,
HS_RELEASED = 0x00000004,
diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
index 567c438e93cb..300e96fb032a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_compat.h
@@ -74,4 +74,6 @@
# define ext2_find_next_zero_bit find_next_zero_bit_le
#endif
+#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
+
#endif /* _LUSTRE_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index d03534432624..b7e61d082e55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -59,7 +59,7 @@ struct obd_device;
#define OBD_LDLM_DEVICENAME "ldlm"
#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
-#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
+#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */
#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
/**
@@ -86,10 +86,10 @@ enum ldlm_error {
* decisions about lack of conflicts or do any autonomous lock granting without
* first speaking to a server.
*/
-typedef enum {
+enum ldlm_side {
LDLM_NAMESPACE_SERVER = 1 << 0,
LDLM_NAMESPACE_CLIENT = 1 << 1
-} ldlm_side_t;
+};
/**
* The blocking callback is overloaded to perform two functions. These flags
@@ -359,7 +359,7 @@ struct ldlm_namespace {
struct obd_device *ns_obd;
/** Flag indicating if namespace is on client instead of server */
- ldlm_side_t ns_client;
+ enum ldlm_side ns_client;
/** Resource hash table for namespace. */
struct cfs_hash *ns_rs_hash;
@@ -550,20 +550,18 @@ struct ldlm_flock {
__u64 owner;
__u64 blocking_owner;
struct obd_export *blocking_export;
- /* Protected by the hash lock */
- __u32 blocking_refs;
__u32 pid;
};
-typedef union {
+union ldlm_policy_data {
struct ldlm_extent l_extent;
struct ldlm_flock l_flock;
struct ldlm_inodebits l_inodebits;
-} ldlm_policy_data_t;
+};
void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
enum lvb_type {
LVB_T_NONE = 0,
@@ -692,7 +690,7 @@ struct ldlm_lock {
* Representation of private data specific for a lock type.
* Examples are: extent range for extent lock or bitmask for ibits locks
*/
- ldlm_policy_data_t l_policy_data;
+ union ldlm_policy_data l_policy_data;
/**
* Lock state flags. Protected by lr_lock.
@@ -967,8 +965,8 @@ struct ldlm_ast_work {
* Common ldlm_enqueue parameters
*/
struct ldlm_enqueue_info {
- __u32 ei_type; /** Type of the lock being enqueued. */
- __u32 ei_mode; /** Mode of the lock being enqueued. */
+ enum ldlm_type ei_type; /** Type of the lock being enqueued. */
+ enum ldlm_mode ei_mode; /** Mode of the lock being enqueued. */
void *ei_cb_bl; /** blocking lock callback */
void *ei_cb_cp; /** lock completion callback */
void *ei_cb_gl; /** lock glimpse callback */
@@ -979,7 +977,7 @@ struct ldlm_enqueue_info {
extern struct obd_ops ldlm_obd_ops;
extern char *ldlm_lockname[];
-char *ldlm_it2str(int it);
+const char *ldlm_it2str(enum ldlm_intent_flags it);
/**
* Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
@@ -1168,16 +1166,18 @@ do { \
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
void ldlm_lock_put(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode);
-int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode);
+int ldlm_lock_addref_try(const struct lustre_handle *lockh,
+ enum ldlm_mode mode);
+void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode);
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
+ enum ldlm_mode mode);
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
const struct ldlm_res_id *,
- enum ldlm_type type, ldlm_policy_data_t *,
+ enum ldlm_type type, union ldlm_policy_data *,
enum ldlm_mode mode, struct lustre_handle *,
int unref);
enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
@@ -1189,7 +1189,7 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
/* resource.c */
struct ldlm_namespace *
ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client, enum ldlm_appetite apt,
+ enum ldlm_side client, enum ldlm_appetite apt,
enum ldlm_ns_type ns_type);
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
void ldlm_namespace_get(struct ldlm_namespace *ns);
@@ -1208,7 +1208,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res,
struct ldlm_lock *lock);
void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level);
void ldlm_namespace_dump(int level, struct ldlm_namespace *);
void ldlm_resource_dump(int level, struct ldlm_resource *);
int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
@@ -1241,7 +1241,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
+ union ldlm_policy_data const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async);
int ldlm_prep_enqueue_req(struct obd_export *exp,
@@ -1265,13 +1265,13 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
enum ldlm_cancel_flags flags, void *opaque);
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode, __u64 lock_flags,
enum ldlm_cancel_flags cancel_flags,
void *opaque);
@@ -1333,7 +1333,7 @@ int ldlm_pools_init(void);
void ldlm_pools_fini(void);
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client);
+ int idx, enum ldlm_side client);
void ldlm_pool_fini(struct ldlm_pool *pl);
void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 316780693193..b5a1aadbcb93 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -150,6 +150,7 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "lustre/lustre_idl.h"
+#include "seq_range.h"
struct lu_env;
struct lu_site;
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 932410d3e3cc..6ef1b03cb986 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -103,8 +103,6 @@ struct lu_client_fld {
/** Client fld debugfs entry name. */
char lcf_name[LUSTRE_MDT_MAXNAMELEN];
-
- int lcf_flags;
};
/* Client methods */
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index cde7ed702c86..dec1e99d594d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -53,6 +53,7 @@ void ptlrpc_activate_import(struct obd_import *imp);
void ptlrpc_deactivate_import(struct obd_import *imp);
void ptlrpc_invalidate_import(struct obd_import *imp);
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
+void ptlrpc_pinger_force(struct obd_import *imp);
/** @} ha */
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 5461ba33d90c..f0c931ce1a67 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -185,6 +185,11 @@ struct obd_import {
struct list_head *imp_replay_cursor;
/** @} */
+ /** List of not replied requests */
+ struct list_head imp_unreplied_list;
+ /** Known maximal replied XID */
+ __u64 imp_known_replied_xid;
+
/** obd device for this import */
struct obd_device *imp_obd;
@@ -294,7 +299,9 @@ struct obd_import {
*/
imp_force_reconnect:1,
/* import has tried to connect with server */
- imp_connect_tried:1;
+ imp_connect_tried:1,
+ /* connected but not FULL yet */
+ imp_connected:1;
__u32 imp_connect_op;
struct obd_connect_data imp_connect_data;
__u64 imp_connect_flags_orig;
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 6b231913ba2e..27f3148c4344 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -350,8 +350,6 @@ do { \
l_wait_event_exclusive_head(wq, condition, &lwi); \
})
-#define LIBLUSTRE_CLIENT (0)
-
/** @} lib */
#endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
index d7f7afa8dfa7..5aa3645e64dc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lmv.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h
@@ -76,18 +76,7 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
union lmv_mds_md;
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
- const union lmv_mds_md *lmm, int stripe_count);
-
-static inline int lmv_alloc_memmd(struct lmv_stripe_md **lsmp, int stripe_count)
-{
- return lmv_unpack_md(NULL, lsmp, NULL, stripe_count);
-}
-
-static inline void lmv_free_memmd(struct lmv_stripe_md *lsm)
-{
- lmv_unpack_md(NULL, &lsm, NULL, 0);
-}
+void lmv_free_memmd(struct lmv_stripe_md *lsm);
static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
const struct lmv_mds_md_v1 *lmv_src)
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 995b266932e3..35e37eb1bc2c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -214,6 +214,7 @@ struct llog_handle {
spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
struct llog_logid lgh_id; /* id of this log */
struct llog_log_hdr *lgh_hdr;
+ size_t lgh_hdr_size;
int lgh_last_idx;
int lgh_cur_idx; /* used during llog_process */
__u64 lgh_cur_offset; /* used during llog_process */
@@ -244,6 +245,11 @@ struct llog_ctxt {
struct mutex loc_mutex; /* protect loc_imp */
atomic_t loc_refcount;
long loc_flags; /* flags, see above defines */
+ /*
+ * llog chunk size, and llog record size can not be bigger than
+ * loc_chunk_size
+ */
+ __u32 loc_chunk_size;
};
#define LLOG_PROC_BREAK 0x0001
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 8fc2d3f2dfd6..198ceb0c66f9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -156,16 +156,39 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
mutex_unlock(&lck->rpcl_mutex);
}
+static inline void mdc_get_mod_rpc_slot(struct ptlrpc_request *req,
+ struct lookup_intent *it)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ u32 opc;
+ u16 tag;
+
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ tag = obd_get_mod_rpc_slot(cli, opc, it);
+ lustre_msg_set_tag(req->rq_reqmsg, tag);
+}
+
+static inline void mdc_put_mod_rpc_slot(struct ptlrpc_request *req,
+ struct lookup_intent *it)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ u32 opc;
+ u16 tag;
+
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ tag = lustre_msg_get_tag(req->rq_reqmsg);
+ obd_put_mod_rpc_slot(cli, opc, it, tag);
+}
+
/**
- * Update the maximum possible easize and cookiesize.
+ * Update the maximum possible easize.
*
- * The values are learned from ptlrpc replies sent by the MDT. The
- * default easize and cookiesize is initialized to the minimum value but
- * allowed to grow up to a single page in size if required to handle the
+ * This value is learned from ptlrpc replies sent by the MDT. The
+ * default easize is initialized to the minimum value but allowed
+ * to grow up to a single page in size if required to handle the
* common case.
*
- * \see client_obd::cl_default_mds_easize and
- * client_obd::cl_default_mds_cookiesize
+ * \see client_obd::cl_default_mds_easize
*
* \param[in] exp export for MDC device
* \param[in] body body of ptlrpc reply from MDT
@@ -176,7 +199,7 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
{
if (body->mbo_valid & OBD_MD_FLMODEASIZE) {
struct client_obd *cli = &exp->exp_obd->u.cli;
- u32 def_cookiesize, def_easize;
+ u32 def_easize;
if (cli->cl_max_mds_easize < body->mbo_max_mdsize)
cli->cl_max_mds_easize = body->mbo_max_mdsize;
@@ -184,13 +207,6 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
def_easize = min_t(__u32, body->mbo_max_mdsize,
OBD_MAX_DEFAULT_EA_SIZE);
cli->cl_default_mds_easize = def_easize;
-
- if (cli->cl_max_mds_cookiesize < body->mbo_max_cookiesize)
- cli->cl_max_mds_cookiesize = body->mbo_max_cookiesize;
-
- def_cookiesize = min_t(__u32, body->mbo_max_cookiesize,
- OBD_MAX_DEFAULT_COOKIE_SIZE);
- cli->cl_default_mds_cookiesize = def_cookiesize;
}
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index e9aba99ee52a..411eb0dc7f38 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -50,6 +50,7 @@
* @{
*/
+#include <linux/uio.h>
#include "../../include/linux/libcfs/libcfs.h"
#include "../../include/linux/lnet/nidstr.h"
#include "../../include/linux/lnet/api.h"
@@ -68,13 +69,17 @@
#define PTLRPC_MD_OPTIONS 0
/**
- * Max # of bulk operations in one request.
+ * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ...
* In order for the client and server to properly negotiate the maximum
* possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
* value. The client is free to limit the actual RPC size for any bulk
* transfer via cl_max_pages_per_rpc to some non-power-of-two value.
+ * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS.
*/
-#define PTLRPC_BULK_OPS_BITS 2
+#define PTLRPC_BULK_OPS_BITS 4
+#if PTLRPC_BULK_OPS_BITS > 16
+#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS."
+#endif
#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
/**
* PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
@@ -437,6 +442,10 @@ struct ptlrpc_reply_state {
unsigned long rs_committed:1;/* the transaction was committed
* and the rs was dispatched
*/
+ atomic_t rs_refcount; /* number of users */
+ /** Number of locks awaiting client ACK */
+ int rs_nlocks;
+
/** Size of the state */
int rs_size;
/** opcode */
@@ -449,7 +458,6 @@ struct ptlrpc_reply_state {
struct ptlrpc_service_part *rs_svcpt;
/** Lnet metadata handle for the reply */
lnet_handle_md_t rs_md_h;
- atomic_t rs_refcount;
/** Context for the service thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
@@ -466,8 +474,6 @@ struct ptlrpc_reply_state {
*/
struct lustre_msg *rs_msg; /* reply message */
- /** Number of locks awaiting client ACK */
- int rs_nlocks;
/** Handles of locks awaiting client reply ACK */
struct lustre_handle rs_locks[RS_MAX_LOCKS];
/** Lock modes of locks in \a rs_locks */
@@ -515,717 +521,7 @@ struct lu_env;
struct ldlm_lock;
-/**
- * \defgroup nrs Network Request Scheduler
- * @{
- */
-struct ptlrpc_nrs_policy;
-struct ptlrpc_nrs_resource;
-struct ptlrpc_nrs_request;
-
-/**
- * NRS control operations.
- *
- * These are common for all policies.
- */
-enum ptlrpc_nrs_ctl {
- /**
- * Not a valid opcode.
- */
- PTLRPC_NRS_CTL_INVALID,
- /**
- * Activate the policy.
- */
- PTLRPC_NRS_CTL_START,
- /**
- * Reserved for multiple primary policies, which may be a possibility
- * in the future.
- */
- PTLRPC_NRS_CTL_STOP,
- /**
- * Policies can start using opcodes from this value and onwards for
- * their own purposes; the assigned value itself is arbitrary.
- */
- PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
-};
-
-/**
- * ORR policy operations
- */
-enum nrs_ctl_orr {
- NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
- NRS_CTL_ORR_WR_QUANTUM,
- NRS_CTL_ORR_RD_OFF_TYPE,
- NRS_CTL_ORR_WR_OFF_TYPE,
- NRS_CTL_ORR_RD_SUPP_REQ,
- NRS_CTL_ORR_WR_SUPP_REQ,
-};
-
-/**
- * NRS policy operations.
- *
- * These determine the behaviour of a policy, and are called in response to
- * NRS core events.
- */
-struct ptlrpc_nrs_pol_ops {
- /**
- * Called during policy registration; this operation is optional.
- *
- * \param[in,out] policy The policy being initialized
- */
- int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called during policy unregistration; this operation is optional.
- *
- * \param[in,out] policy The policy being unregistered/finalized
- */
- void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called when activating a policy via lprocfs; policies allocate and
- * initialize their resources here; this operation is optional.
- *
- * \param[in,out] policy The policy being started
- *
- * \see nrs_policy_start_locked()
- */
- int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
- /**
- * Called when deactivating a policy via lprocfs; policies deallocate
- * their resources here; this operation is optional
- *
- * \param[in,out] policy The policy being stopped
- *
- * \see nrs_policy_stop0()
- */
- void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
- /**
- * Used for policy-specific operations; i.e. not generic ones like
- * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
- * to an ioctl; this operation is optional.
- *
- * \param[in,out] policy The policy carrying out operation \a opc
- * \param[in] opc The command operation being carried out
- * \param[in,out] arg An generic buffer for communication between the
- * user and the control operation
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \see ptlrpc_nrs_policy_control()
- */
- int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg);
-
- /**
- * Called when obtaining references to the resources of the resource
- * hierarchy for a request that has arrived for handling at the PTLRPC
- * service. Policies should return -ve for requests they do not wish
- * to handle. This operation is mandatory.
- *
- * \param[in,out] policy The policy we're getting resources for.
- * \param[in,out] nrq The request we are getting resources for.
- * \param[in] parent The parent resource of the resource being
- * requested; set to NULL if none.
- * \param[out] resp The resource is to be returned here; the
- * fallback policy in an NRS head should
- * \e always return a non-NULL pointer value.
- * \param[in] moving_req When set, signifies that this is an attempt
- * to obtain resources for a request being moved
- * to the high-priority NRS head by
- * ldlm_lock_reorder_req().
- * This implies two things:
- * 1. We are under obd_export::exp_rpc_lock and
- * so should not sleep.
- * 2. We should not perform non-idempotent or can
- * skip performing idempotent operations that
- * were carried out when resources were first
- * taken for the request when it was initialized
- * in ptlrpc_nrs_req_initialize().
- *
- * \retval 0, +ve The level of the returned resource in the resource
- * hierarchy; currently only 0 (for a non-leaf resource)
- * and 1 (for a leaf resource) are supported by the
- * framework.
- * \retval -ve error
- *
- * \see ptlrpc_nrs_req_initialize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- */
- int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req);
- /**
- * Called when releasing references taken for resources in the resource
- * hierarchy for the request; this operation is optional.
- *
- * \param[in,out] policy The policy the resource belongs to
- * \param[in] res The resource to be freed
- *
- * \see ptlrpc_nrs_req_finalize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- */
- void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
- const struct ptlrpc_nrs_resource *res);
-
- /**
- * Obtains a request for handling from the policy, and optionally
- * removes the request from the policy; this operation is mandatory.
- *
- * \param[in,out] policy The policy to poll
- * \param[in] peek When set, signifies that we just want to
- * examine the request, and not handle it, so the
- * request is not removed from the policy.
- * \param[in] force When set, it will force a policy to return a
- * request if it has one queued.
- *
- * \retval NULL No request available for handling
- * \retval valid-pointer The request polled for handling
- *
- * \see ptlrpc_nrs_req_get_nolock()
- */
- struct ptlrpc_nrs_request *
- (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
- bool force);
- /**
- * Called when attempting to add a request to a policy for later
- * handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy on which to enqueue \a nrq
- * \param[in,out] nrq The request to enqueue
- *
- * \retval 0 success
- * \retval != 0 error
- *
- * \see ptlrpc_nrs_req_add_nolock()
- */
- int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Removes a request from the policy's set of pending requests. Normally
- * called after a request has been polled successfully from the policy
- * for handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy the request \a nrq belongs to
- * \param[in,out] nrq The request to dequeue
- */
- void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Called after the request being carried out. Could be used for
- * job/resource control; this operation is optional.
- *
- * \param[in,out] policy The policy which is stopping to handle request
- * \a nrq
- * \param[in,out] nrq The request
- *
- * \pre assert_spin_locked(&svcpt->scp_req_lock)
- *
- * \see ptlrpc_nrs_req_stop_nolock()
- */
- void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Registers the policy's lprocfs interface with a PTLRPC service.
- *
- * \param[in] svc The service
- *
- * \retval 0 success
- * \retval != 0 error
- */
- int (*op_lprocfs_init)(struct ptlrpc_service *svc);
- /**
- * Unegisters the policy's lprocfs interface with a PTLRPC service.
- *
- * In cases of failed policy registration in
- * \e ptlrpc_nrs_policy_register(), this function may be called for a
- * service which has not registered the policy successfully, so
- * implementations of this method should make sure their operations are
- * safe in such cases.
- *
- * \param[in] svc The service
- */
- void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
-};
-
-/**
- * Policy flags
- */
-enum nrs_policy_flags {
- /**
- * Fallback policy, use this flag only on a single supported policy per
- * service. The flag cannot be used on policies that use
- * \e PTLRPC_NRS_FL_REG_EXTERN
- */
- PTLRPC_NRS_FL_FALLBACK = (1 << 0),
- /**
- * Start policy immediately after registering.
- */
- PTLRPC_NRS_FL_REG_START = (1 << 1),
- /**
- * This is a policy registering from a module different to the one NRS
- * core ships in (currently ptlrpc).
- */
- PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
-};
-
-/**
- * NRS queue type.
- *
- * Denotes whether an NRS instance is for handling normal or high-priority
- * RPCs, or whether an operation pertains to one or both of the NRS instances
- * in a service.
- */
-enum ptlrpc_nrs_queue_type {
- PTLRPC_NRS_QUEUE_REG = (1 << 0),
- PTLRPC_NRS_QUEUE_HP = (1 << 1),
- PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
-};
-
-/**
- * NRS head
- *
- * A PTLRPC service has at least one NRS head instance for handling normal
- * priority RPCs, and may optionally have a second NRS head instance for
- * handling high-priority RPCs. Each NRS head maintains a list of available
- * policies, of which one and only one policy is acting as the fallback policy,
- * and optionally a different policy may be acting as the primary policy. For
- * all RPCs handled by this NRS head instance, NRS core will first attempt to
- * enqueue the RPC using the primary policy (if any). The fallback policy is
- * used in the following cases:
- * - when there was no primary policy in the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
- * was initialized.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, denoted it did not wish, or for some other reason was
- * not able to handle the request, by returning a non-valid NRS resource
- * reference.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, fails later during the request enqueueing stage.
- *
- * \see nrs_resource_get_safe()
- * \see nrs_request_enqueue()
- */
-struct ptlrpc_nrs {
- spinlock_t nrs_lock;
- /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
- /**
- * List of registered policies
- */
- struct list_head nrs_policy_list;
- /**
- * List of policies with queued requests. Policies that have any
- * outstanding requests are queued here, and this list is queried
- * in a round-robin manner from NRS core when obtaining a request
- * for handling. This ensures that requests from policies that at some
- * point transition away from the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
- */
- struct list_head nrs_policy_queued;
- /**
- * Service partition for this NRS head
- */
- struct ptlrpc_service_part *nrs_svcpt;
- /**
- * Primary policy, which is the preferred policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_primary;
- /**
- * Fallback policy, which is the backup policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_fallback;
- /**
- * This NRS head handles either HP or regular requests
- */
- enum ptlrpc_nrs_queue_type nrs_queue_type;
- /**
- * # queued requests from all policies in this NRS head
- */
- unsigned long nrs_req_queued;
- /**
- * # scheduled requests from all policies in this NRS head
- */
- unsigned long nrs_req_started;
- /**
- * # policies on this NRS
- */
- unsigned nrs_num_pols;
- /**
- * This NRS head is in progress of starting a policy
- */
- unsigned nrs_policy_starting:1;
- /**
- * In progress of shutting down the whole NRS head; used during
- * unregistration
- */
- unsigned nrs_stopping:1;
-};
-
-#define NRS_POL_NAME_MAX 16
-
-struct ptlrpc_nrs_pol_desc;
-
-/**
- * Service compatibility predicate; this determines whether a policy is adequate
- * for handling RPCs of a particular PTLRPC service.
- *
- * XXX:This should give the same result during policy registration and
- * unregistration, and for all partitions of a service; so the result should not
- * depend on temporal service or other properties, that may influence the
- * result.
- */
-typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc);
-
-struct ptlrpc_nrs_pol_conf {
- /**
- * Human-readable policy name
- */
- char nc_name[NRS_POL_NAME_MAX];
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *nc_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t nc_compat;
- /**
- * Set for policies that support a single ptlrpc service, i.e. ones that
- * have \a pd_compat set to nrs_policy_compat_one(). The variable value
- * depicts the name of the single service that such policies are
- * compatible with.
- */
- const char *nc_compat_svc_name;
- /**
- * Owner module for this policy descriptor; policies registering from a
- * different module to the one the NRS framework is held within
- * (currently ptlrpc), should set this field to THIS_MODULE.
- */
- struct module *nc_owner;
- /**
- * Policy registration flags; a bitmask of \e nrs_policy_flags
- */
- unsigned nc_flags;
-};
-
-/**
- * NRS policy registering descriptor
- *
- * Is used to hold a description of a policy that can be passed to NRS core in
- * order to register the policy with NRS heads in different PTLRPC services.
- */
-struct ptlrpc_nrs_pol_desc {
- /**
- * Human-readable policy name
- */
- char pd_name[NRS_POL_NAME_MAX];
- /**
- * Link into nrs_core::nrs_policies
- */
- struct list_head pd_list;
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *pd_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t pd_compat;
- /**
- * Set for policies that are compatible with only one PTLRPC service.
- *
- * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
- */
- const char *pd_compat_svc_name;
- /**
- * Owner module for this policy descriptor.
- *
- * We need to hold a reference to the module whenever we might make use
- * of any of the module's contents, i.e.
- * - If one or more instances of the policy are at a state where they
- * might be handling a request, i.e.
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
- * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
- * is taken on the module when
- * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
- * becomes 0, so that we hold only one reference to the module maximum
- * at any time.
- *
- * We do not need to hold a reference to the module, even though we
- * might use code and data from the module, in the following cases:
- * - During external policy registration, because this should happen in
- * the module's init() function, in which case the module is safe from
- * removal because a reference is being held on the module by the
- * kernel, and iirc kmod (and I guess module-init-tools also) will
- * serialize any racing processes properly anyway.
- * - During external policy unregistration, because this should happen
- * in a module's exit() function, and any attempts to start a policy
- * instance would need to take a reference on the module, and this is
- * not possible once we have reached the point where the exit()
- * handler is called.
- * - During service registration and unregistration, as service setup
- * and cleanup, and policy registration, unregistration and policy
- * instance starting, are serialized by \e nrs_core::nrs_mutex, so
- * as long as users adhere to the convention of registering policies
- * in init() and unregistering them in module exit() functions, there
- * should not be a race between these operations.
- * - During any policy-specific lprocfs operations, because a reference
- * is held by the kernel on a proc entry that has been entered by a
- * syscall, so as long as proc entries are removed during unregistration time,
- * then unregistration and lprocfs operations will be properly
- * serialized.
- */
- struct module *pd_owner;
- /**
- * Bitmask of \e nrs_policy_flags
- */
- unsigned pd_flags;
- /**
- * # of references on this descriptor
- */
- atomic_t pd_refs;
-};
-
-/**
- * NRS policy state
- *
- * Policies transition from one state to the other during their lifetime
- */
-enum ptlrpc_nrs_pol_state {
- /**
- * Not a valid policy state.
- */
- NRS_POL_STATE_INVALID,
- /**
- * Policies are at this state either at the start of their life, or
- * transition here when the user selects a different policy to act
- * as the primary one.
- */
- NRS_POL_STATE_STOPPED,
- /**
- * Policy is progress of stopping
- */
- NRS_POL_STATE_STOPPING,
- /**
- * Policy is in progress of starting
- */
- NRS_POL_STATE_STARTING,
- /**
- * A policy is in this state in two cases:
- * - it is the fallback policy, which is always in this state.
- * - it has been activated by the user; i.e. it is the primary policy,
- */
- NRS_POL_STATE_STARTED,
-};
-
-/**
- * NRS policy information
- *
- * Used for obtaining information for the status of a policy via lprocfs
- */
-struct ptlrpc_nrs_pol_info {
- /**
- * Policy name
- */
- char pi_name[NRS_POL_NAME_MAX];
- /**
- * Current policy state
- */
- enum ptlrpc_nrs_pol_state pi_state;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pi_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pi_req_started;
- /**
- * Is this a fallback policy?
- */
- unsigned pi_fallback:1;
-};
-
-/**
- * NRS policy
- *
- * There is one instance of this for each policy in each NRS head of each
- * PTLRPC service partition.
- */
-struct ptlrpc_nrs_policy {
- /**
- * Linkage into the NRS head's list of policies,
- * ptlrpc_nrs:nrs_policy_list
- */
- struct list_head pol_list;
- /**
- * Linkage into the NRS head's list of policies with enqueued
- * requests ptlrpc_nrs:nrs_policy_queued
- */
- struct list_head pol_list_queued;
- /**
- * Current state of this policy
- */
- enum ptlrpc_nrs_pol_state pol_state;
- /**
- * Bitmask of nrs_policy_flags
- */
- unsigned pol_flags;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pol_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pol_req_started;
- /**
- * Usage Reference count taken on the policy instance
- */
- long pol_ref;
- /**
- * The NRS head this policy has been created at
- */
- struct ptlrpc_nrs *pol_nrs;
- /**
- * Private policy data; varies by policy type
- */
- void *pol_private;
- /**
- * Policy descriptor for this policy instance.
- */
- struct ptlrpc_nrs_pol_desc *pol_desc;
-};
-
-/**
- * NRS resource
- *
- * Resources are embedded into two types of NRS entities:
- * - Inside NRS policies, in the policy's private data in
- * ptlrpc_nrs_policy::pol_private
- * - In objects that act as prime-level scheduling entities in different NRS
- * policies; e.g. on a policy that performs round robin or similar order
- * scheduling across client NIDs, there would be one NRS resource per unique
- * client NID. On a policy which performs round robin scheduling across
- * backend filesystem objects, there would be one resource associated with
- * each of the backend filesystem objects partaking in the scheduling
- * performed by the policy.
- *
- * NRS resources share a parent-child relationship, in which resources embedded
- * in policy instances are the parent entities, with all scheduling entities
- * a policy schedules across being the children, thus forming a simple resource
- * hierarchy. This hierarchy may be extended with one or more levels in the
- * future if the ability to have more than one primary policy is added.
- *
- * Upon request initialization, references to the then active NRS policies are
- * taken and used to later handle the dispatching of the request with one of
- * these policies.
- *
- * \see nrs_resource_get_safe()
- * \see ptlrpc_nrs_req_add()
- */
-struct ptlrpc_nrs_resource {
- /**
- * This NRS resource's parent; is NULL for resources embedded in NRS
- * policy instances; i.e. those are top-level ones.
- */
- struct ptlrpc_nrs_resource *res_parent;
- /**
- * The policy associated with this resource.
- */
- struct ptlrpc_nrs_policy *res_policy;
-};
-
-enum {
- NRS_RES_FALLBACK,
- NRS_RES_PRIMARY,
- NRS_RES_MAX
-};
-
-/* \name fifo
- *
- * FIFO policy
- *
- * This policy is a logical wrapper around previous, non-NRS functionality.
- * It dispatches RPCs in the same order as they arrive from the network. This
- * policy is currently used as the fallback policy, and the only enabled policy
- * on all NRS heads of all PTLRPC service partitions.
- * @{
- */
-
-/**
- * Private data structure for the FIFO policy
- */
-struct nrs_fifo_head {
- /**
- * Resource object for policy instance.
- */
- struct ptlrpc_nrs_resource fh_res;
- /**
- * List of queued requests.
- */
- struct list_head fh_list;
- /**
- * For debugging purposes.
- */
- __u64 fh_sequence;
-};
-
-struct nrs_fifo_req {
- struct list_head fr_list;
- __u64 fr_sequence;
-};
-
-/** @} fifo */
-
-/**
- * NRS request
- *
- * Instances of this object exist embedded within ptlrpc_request; the main
- * purpose of this object is to hold references to the request's resources
- * for the lifetime of the request, and to hold properties that policies use
- * use for determining the request's scheduling priority.
- */
-struct ptlrpc_nrs_request {
- /**
- * The request's resource hierarchy.
- */
- struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
- /**
- * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
- * policy that was used to enqueue the request.
- *
- * \see nrs_request_enqueue()
- */
- unsigned nr_res_idx;
- unsigned nr_initialized:1;
- unsigned nr_enqueued:1;
- unsigned nr_started:1;
- unsigned nr_finalized:1;
-
- /**
- * Policy-specific fields, used for determining a request's scheduling
- * priority, and other supporting functionality.
- */
- union {
- /**
- * Fields for the FIFO policy
- */
- struct nrs_fifo_req fifo;
- } nr_u;
- /**
- * Externally-registering policies may want to use this to allocate
- * their own request properties.
- */
- void *ext;
-};
-
-/** @} nrs */
+#include "lustre_nrs.h"
/**
* Basic request prioritization operations structure.
@@ -1304,6 +600,8 @@ struct ptlrpc_cli_req {
union ptlrpc_async_args cr_async_args;
/** Opaq data for replay and commit callbacks. */
void *cr_cb_data;
+ /** Link to the imp->imp_unreplied_list */
+ struct list_head cr_unreplied_list;
/**
* Commit callback, called when request is committed and about to be
* freed.
@@ -1343,6 +641,7 @@ struct ptlrpc_cli_req {
#define rq_interpret_reply rq_cli.cr_reply_interp
#define rq_async_args rq_cli.cr_async_args
#define rq_cb_data rq_cli.cr_cb_data
+#define rq_unreplied_list rq_cli.cr_unreplied_list
#define rq_commit_cb rq_cli.cr_commit_cb
#define rq_replay_cb rq_cli.cr_replay_cb
@@ -1505,6 +804,8 @@ struct ptlrpc_request {
__u64 rq_transno;
/** xid */
__u64 rq_xid;
+ /** bulk match bits */
+ u64 rq_mbits;
/**
* List item to for replay list. Not yet committed requests get linked
* there.
@@ -1793,10 +1094,93 @@ struct ptlrpc_bulk_page {
struct page *bp_page;
};
-#define BULK_GET_SOURCE 0
-#define BULK_PUT_SINK 1
-#define BULK_GET_SINK 2
-#define BULK_PUT_SOURCE 3
+enum ptlrpc_bulk_op_type {
+ PTLRPC_BULK_OP_ACTIVE = 0x00000001,
+ PTLRPC_BULK_OP_PASSIVE = 0x00000002,
+ PTLRPC_BULK_OP_PUT = 0x00000004,
+ PTLRPC_BULK_OP_GET = 0x00000008,
+ PTLRPC_BULK_BUF_KVEC = 0x00000010,
+ PTLRPC_BULK_BUF_KIOV = 0x00000020,
+ PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET,
+ PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT,
+ PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET,
+ PTLRPC_BULK_PUT_SOURCE = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT,
+};
+
+static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET;
+}
+
+static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE;
+}
+
+static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK;
+}
+
+static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK;
+}
+
+static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type)
+{
+ return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE;
+}
+
+static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
+ == PTLRPC_BULK_BUF_KVEC;
+}
+
+static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
+ == PTLRPC_BULK_BUF_KIOV;
+}
+
+static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_OP_ACTIVE) |
+ (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_ACTIVE;
+}
+
+static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type)
+{
+ return ((type & PTLRPC_BULK_OP_ACTIVE) |
+ (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_PASSIVE;
+}
+
+struct ptlrpc_bulk_frag_ops {
+ /**
+ * Add a page \a page to the bulk descriptor \a desc
+ * Data to transfer in the page starts at offset \a pageoffset and
+ * amount of data to transfer from the page is \a len
+ */
+ void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc,
+ struct page *page, int pageoffset, int len);
+
+ /*
+ * Add a \a fragment to the bulk descriptor \a desc.
+ * Data to transfer in the fragment is pointed to by \a frag
+ * The size of the fragment is \a len
+ */
+ int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len);
+
+ /**
+ * Uninitialize and free bulk descriptor \a desc.
+ * Works on bulk descriptors both from server and client side.
+ */
+ void (*release_frags)(struct ptlrpc_bulk_desc *desc);
+};
+
+extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops;
+extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops;
/**
* Definition of bulk descriptor.
@@ -1811,14 +1195,14 @@ struct ptlrpc_bulk_page {
struct ptlrpc_bulk_desc {
/** completed with failure */
unsigned long bd_failure:1;
- /** {put,get}{source,sink} */
- unsigned long bd_type:2;
/** client side */
unsigned long bd_registered:1;
/** For serialization with callback */
spinlock_t bd_lock;
/** Import generation when request for this bulk was sent */
int bd_import_generation;
+ /** {put,get}{source,sink}{kvec,kiov} */
+ enum ptlrpc_bulk_op_type bd_type;
/** LNet portal for this bulk */
__u32 bd_portal;
/** Server side - export this bulk created for */
@@ -1827,13 +1211,14 @@ struct ptlrpc_bulk_desc {
struct obd_import *bd_import;
/** Back pointer to the request */
struct ptlrpc_request *bd_req;
+ struct ptlrpc_bulk_frag_ops *bd_frag_ops;
wait_queue_head_t bd_waitq; /* server side only WQ */
int bd_iov_count; /* # entries in bd_iov */
int bd_max_iov; /* allocated size of bd_iov */
int bd_nob; /* # bytes covered */
int bd_nob_transferred; /* # bytes GOT/PUT */
- __u64 bd_last_xid;
+ u64 bd_last_mbits;
struct ptlrpc_cb_id bd_cbid; /* network callback info */
lnet_nid_t bd_sender; /* stash event::sender */
@@ -1842,14 +1227,31 @@ struct ptlrpc_bulk_desc {
/** array of associated MDs */
lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
- /*
- * encrypt iov, size is either 0 or bd_iov_count.
- */
- lnet_kiov_t *bd_enc_iov;
-
- lnet_kiov_t bd_iov[0];
+ union {
+ struct {
+ /*
+ * encrypt iov, size is either 0 or bd_iov_count.
+ */
+ struct bio_vec *bd_enc_vec;
+ struct bio_vec *bd_vec; /* Array of bio_vecs */
+ } bd_kiov;
+
+ struct {
+ struct kvec *bd_enc_kvec;
+ struct kvec *bd_kvec; /* Array of kvecs */
+ } bd_kvec;
+ } bd_u;
};
+#define GET_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_vec)
+#define BD_GET_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_vec[i])
+#define GET_ENC_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_enc_vec)
+#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_enc_vec[i])
+#define GET_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_kvec)
+#define BD_GET_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_kvec[i])
+#define GET_ENC_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_enc_kvec)
+#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i])
+
enum {
SVC_STOPPED = 1 << 0,
SVC_STOPPING = 1 << 1,
@@ -2464,21 +1866,17 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
void ptlrpc_req_finished(struct ptlrpc_request *request);
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal);
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
-static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
-{
- __ptlrpc_free_bulk(bulk, 1);
-}
-
-static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
-{
- __ptlrpc_free_bulk(bulk, 0);
-}
-
+ unsigned int nfrags,
+ unsigned int max_brw,
+ unsigned int type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops);
+
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+ void *frag, int len);
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset, int len, int);
+ struct page *page, int pageoffset, int len,
+ int pin);
static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset,
int len)
@@ -2493,6 +1891,16 @@ static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
}
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
+
+static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
+{
+ int i;
+
+ for (i = 0; i < desc->bd_iov_count ; i++)
+ put_page(BD_GET_KIOV(desc, i).bv_page);
+}
+
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp);
__u64 ptlrpc_next_xid(void);
@@ -2652,6 +2060,7 @@ struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
__u32 lustre_msg_get_type(struct lustre_msg *msg);
void lustre_msg_add_version(struct lustre_msg *msg, u32 version);
__u32 lustre_msg_get_opc(struct lustre_msg *msg);
+__u16 lustre_msg_get_tag(struct lustre_msg *msg);
__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
__u64 lustre_msg_get_transno(struct lustre_msg *msg);
@@ -2670,6 +2079,8 @@ void lustre_msg_set_handle(struct lustre_msg *msg,
struct lustre_handle *handle);
void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
+void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid);
+void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag);
void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
@@ -2679,6 +2090,7 @@ void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
+void lustre_msg_set_mbits(struct lustre_msg *msg, u64 mbits);
static inline void
lustre_shrink_reply(struct ptlrpc_request *req, int segment,
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h
new file mode 100644
index 000000000000..a5028aaa19cd
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs.h
@@ -0,0 +1,717 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ *
+ * Network Request Scheduler (NRS)
+ *
+ */
+
+#ifndef _LUSTRE_NRS_H
+#define _LUSTRE_NRS_H
+
+/**
+ * \defgroup nrs Network Request Scheduler
+ * @{
+ */
+struct ptlrpc_nrs_policy;
+struct ptlrpc_nrs_resource;
+struct ptlrpc_nrs_request;
+
+/**
+ * NRS control operations.
+ *
+ * These are common for all policies.
+ */
+enum ptlrpc_nrs_ctl {
+ /**
+ * Not a valid opcode.
+ */
+ PTLRPC_NRS_CTL_INVALID,
+ /**
+ * Activate the policy.
+ */
+ PTLRPC_NRS_CTL_START,
+ /**
+ * Reserved for multiple primary policies, which may be a possibility
+ * in the future.
+ */
+ PTLRPC_NRS_CTL_STOP,
+ /**
+ * Policies can start using opcodes from this value and onwards for
+ * their own purposes; the assigned value itself is arbitrary.
+ */
+ PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
+};
+
+/**
+ * NRS policy operations.
+ *
+ * These determine the behaviour of a policy, and are called in response to
+ * NRS core events.
+ */
+struct ptlrpc_nrs_pol_ops {
+ /**
+ * Called during policy registration; this operation is optional.
+ *
+ * \param[in,out] policy The policy being initialized
+ */
+ int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called during policy unregistration; this operation is optional.
+ *
+ * \param[in,out] policy The policy being unregistered/finalized
+ */
+ void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called when activating a policy via lprocfs; policies allocate and
+ * initialize their resources here; this operation is optional.
+ *
+ * \param[in,out] policy The policy being started
+ *
+ * \see nrs_policy_start_locked()
+ */
+ int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called when deactivating a policy via lprocfs; policies deallocate
+ * their resources here; this operation is optional
+ *
+ * \param[in,out] policy The policy being stopped
+ *
+ * \see nrs_policy_stop0()
+ */
+ void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
+ /**
+ * Used for policy-specific operations; i.e. not generic ones like
+ * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
+ * to an ioctl; this operation is optional.
+ *
+ * \param[in,out] policy The policy carrying out operation \a opc
+ * \param[in] opc The command operation being carried out
+ * \param[in,out] arg An generic buffer for communication between the
+ * user and the control operation
+ *
+ * \retval -ve error
+ * \retval 0 success
+ *
+ * \see ptlrpc_nrs_policy_control()
+ */
+ int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg);
+
+ /**
+ * Called when obtaining references to the resources of the resource
+ * hierarchy for a request that has arrived for handling at the PTLRPC
+ * service. Policies should return -ve for requests they do not wish
+ * to handle. This operation is mandatory.
+ *
+ * \param[in,out] policy The policy we're getting resources for.
+ * \param[in,out] nrq The request we are getting resources for.
+ * \param[in] parent The parent resource of the resource being
+ * requested; set to NULL if none.
+ * \param[out] resp The resource is to be returned here; the
+ * fallback policy in an NRS head should
+ * \e always return a non-NULL pointer value.
+ * \param[in] moving_req When set, signifies that this is an attempt
+ * to obtain resources for a request being moved
+ * to the high-priority NRS head by
+ * ldlm_lock_reorder_req().
+ * This implies two things:
+ * 1. We are under obd_export::exp_rpc_lock and
+ * so should not sleep.
+ * 2. We should not perform non-idempotent or can
+ * skip performing idempotent operations that
+ * were carried out when resources were first
+ * taken for the request when it was initialized
+ * in ptlrpc_nrs_req_initialize().
+ *
+ * \retval 0, +ve The level of the returned resource in the resource
+ * hierarchy; currently only 0 (for a non-leaf resource)
+ * and 1 (for a leaf resource) are supported by the
+ * framework.
+ * \retval -ve error
+ *
+ * \see ptlrpc_nrs_req_initialize()
+ * \see ptlrpc_nrs_hpreq_add_nolock()
+ * \see ptlrpc_nrs_req_hp_move()
+ */
+ int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp,
+ bool moving_req);
+ /**
+ * Called when releasing references taken for resources in the resource
+ * hierarchy for the request; this operation is optional.
+ *
+ * \param[in,out] policy The policy the resource belongs to
+ * \param[in] res The resource to be freed
+ *
+ * \see ptlrpc_nrs_req_finalize()
+ * \see ptlrpc_nrs_hpreq_add_nolock()
+ * \see ptlrpc_nrs_req_hp_move()
+ */
+ void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
+ const struct ptlrpc_nrs_resource *res);
+
+ /**
+ * Obtains a request for handling from the policy, and optionally
+ * removes the request from the policy; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy to poll
+ * \param[in] peek When set, signifies that we just want to
+ * examine the request, and not handle it, so the
+ * request is not removed from the policy.
+ * \param[in] force When set, it will force a policy to return a
+ * request if it has one queued.
+ *
+ * \retval NULL No request available for handling
+ * \retval valid-pointer The request polled for handling
+ *
+ * \see ptlrpc_nrs_req_get_nolock()
+ */
+ struct ptlrpc_nrs_request *
+ (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
+ bool force);
+ /**
+ * Called when attempting to add a request to a policy for later
+ * handling; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy on which to enqueue \a nrq
+ * \param[in,out] nrq The request to enqueue
+ *
+ * \retval 0 success
+ * \retval != 0 error
+ *
+ * \see ptlrpc_nrs_req_add_nolock()
+ */
+ int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Removes a request from the policy's set of pending requests. Normally
+ * called after a request has been polled successfully from the policy
+ * for handling; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy the request \a nrq belongs to
+ * \param[in,out] nrq The request to dequeue
+ *
+ * \see ptlrpc_nrs_req_del_nolock()
+ */
+ void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Called after the request being carried out. Could be used for
+ * job/resource control; this operation is optional.
+ *
+ * \param[in,out] policy The policy which is stopping to handle request
+ * \a nrq
+ * \param[in,out] nrq The request
+ *
+ * \pre assert_spin_locked(&svcpt->scp_req_lock)
+ *
+ * \see ptlrpc_nrs_req_stop_nolock()
+ */
+ void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Registers the policy's lprocfs interface with a PTLRPC service.
+ *
+ * \param[in] svc The service
+ *
+ * \retval 0 success
+ * \retval != 0 error
+ */
+ int (*op_lprocfs_init)(struct ptlrpc_service *svc);
+ /**
+ * Unegisters the policy's lprocfs interface with a PTLRPC service.
+ *
+ * In cases of failed policy registration in
+ * \e ptlrpc_nrs_policy_register(), this function may be called for a
+ * service which has not registered the policy successfully, so
+ * implementations of this method should make sure their operations are
+ * safe in such cases.
+ *
+ * \param[in] svc The service
+ */
+ void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
+};
+
+/**
+ * Policy flags
+ */
+enum nrs_policy_flags {
+ /**
+ * Fallback policy, use this flag only on a single supported policy per
+ * service. The flag cannot be used on policies that use
+ * \e PTLRPC_NRS_FL_REG_EXTERN
+ */
+ PTLRPC_NRS_FL_FALLBACK = BIT(0),
+ /**
+ * Start policy immediately after registering.
+ */
+ PTLRPC_NRS_FL_REG_START = BIT(1),
+ /**
+ * This is a policy registering from a module different to the one NRS
+ * core ships in (currently ptlrpc).
+ */
+ PTLRPC_NRS_FL_REG_EXTERN = BIT(2),
+};
+
+/**
+ * NRS queue type.
+ *
+ * Denotes whether an NRS instance is for handling normal or high-priority
+ * RPCs, or whether an operation pertains to one or both of the NRS instances
+ * in a service.
+ */
+enum ptlrpc_nrs_queue_type {
+ PTLRPC_NRS_QUEUE_REG = BIT(0),
+ PTLRPC_NRS_QUEUE_HP = BIT(1),
+ PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
+};
+
+/**
+ * NRS head
+ *
+ * A PTLRPC service has at least one NRS head instance for handling normal
+ * priority RPCs, and may optionally have a second NRS head instance for
+ * handling high-priority RPCs. Each NRS head maintains a list of available
+ * policies, of which one and only one policy is acting as the fallback policy,
+ * and optionally a different policy may be acting as the primary policy. For
+ * all RPCs handled by this NRS head instance, NRS core will first attempt to
+ * enqueue the RPC using the primary policy (if any). The fallback policy is
+ * used in the following cases:
+ * - when there was no primary policy in the
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
+ * was initialized.
+ * - when the primary policy that was at the
+ * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ * RPC was initialized, denoted it did not wish, or for some other reason was
+ * not able to handle the request, by returning a non-valid NRS resource
+ * reference.
+ * - when the primary policy that was at the
+ * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ * RPC was initialized, fails later during the request enqueueing stage.
+ *
+ * \see nrs_resource_get_safe()
+ * \see nrs_request_enqueue()
+ */
+struct ptlrpc_nrs {
+ spinlock_t nrs_lock;
+ /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
+ /**
+ * List of registered policies
+ */
+ struct list_head nrs_policy_list;
+ /**
+ * List of policies with queued requests. Policies that have any
+ * outstanding requests are queued here, and this list is queried
+ * in a round-robin manner from NRS core when obtaining a request
+ * for handling. This ensures that requests from policies that at some
+ * point transition away from the
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
+ */
+ struct list_head nrs_policy_queued;
+ /**
+ * Service partition for this NRS head
+ */
+ struct ptlrpc_service_part *nrs_svcpt;
+ /**
+ * Primary policy, which is the preferred policy for handling RPCs
+ */
+ struct ptlrpc_nrs_policy *nrs_policy_primary;
+ /**
+ * Fallback policy, which is the backup policy for handling RPCs
+ */
+ struct ptlrpc_nrs_policy *nrs_policy_fallback;
+ /**
+ * This NRS head handles either HP or regular requests
+ */
+ enum ptlrpc_nrs_queue_type nrs_queue_type;
+ /**
+ * # queued requests from all policies in this NRS head
+ */
+ unsigned long nrs_req_queued;
+ /**
+ * # scheduled requests from all policies in this NRS head
+ */
+ unsigned long nrs_req_started;
+ /**
+ * # policies on this NRS
+ */
+ unsigned int nrs_num_pols;
+ /**
+ * This NRS head is in progress of starting a policy
+ */
+ unsigned int nrs_policy_starting:1;
+ /**
+ * In progress of shutting down the whole NRS head; used during
+ * unregistration
+ */
+ unsigned int nrs_stopping:1;
+ /**
+ * NRS policy is throttling request
+ */
+ unsigned int nrs_throttling:1;
+};
+
+#define NRS_POL_NAME_MAX 16
+#define NRS_POL_ARG_MAX 16
+
+struct ptlrpc_nrs_pol_desc;
+
+/**
+ * Service compatibility predicate; this determines whether a policy is adequate
+ * for handling RPCs of a particular PTLRPC service.
+ *
+ * XXX:This should give the same result during policy registration and
+ * unregistration, and for all partitions of a service; so the result should not
+ * depend on temporal service or other properties, that may influence the
+ * result.
+ */
+typedef bool (*nrs_pol_desc_compat_t)(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc);
+
+struct ptlrpc_nrs_pol_conf {
+ /**
+ * Human-readable policy name
+ */
+ char nc_name[NRS_POL_NAME_MAX];
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *nc_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t nc_compat;
+ /**
+ * Set for policies that support a single ptlrpc service, i.e. ones that
+ * have \a pd_compat set to nrs_policy_compat_one(). The variable value
+ * depicts the name of the single service that such policies are
+ * compatible with.
+ */
+ const char *nc_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor; policies registering from a
+ * different module to the one the NRS framework is held within
+ * (currently ptlrpc), should set this field to THIS_MODULE.
+ */
+ struct module *nc_owner;
+ /**
+ * Policy registration flags; a bitmask of \e nrs_policy_flags
+ */
+ unsigned int nc_flags;
+};
+
+/**
+ * NRS policy registering descriptor
+ *
+ * Is used to hold a description of a policy that can be passed to NRS core in
+ * order to register the policy with NRS heads in different PTLRPC services.
+ */
+struct ptlrpc_nrs_pol_desc {
+ /**
+ * Human-readable policy name
+ */
+ char pd_name[NRS_POL_NAME_MAX];
+ /**
+ * Link into nrs_core::nrs_policies
+ */
+ struct list_head pd_list;
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *pd_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t pd_compat;
+ /**
+ * Set for policies that are compatible with only one PTLRPC service.
+ *
+ * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
+ */
+ const char *pd_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor.
+ *
+ * We need to hold a reference to the module whenever we might make use
+ * of any of the module's contents, i.e.
+ * - If one or more instances of the policy are at a state where they
+ * might be handling a request, i.e.
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
+ * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
+ * is taken on the module when
+ * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
+ * becomes 0, so that we hold only one reference to the module maximum
+ * at any time.
+ *
+ * We do not need to hold a reference to the module, even though we
+ * might use code and data from the module, in the following cases:
+ * - During external policy registration, because this should happen in
+ * the module's init() function, in which case the module is safe from
+ * removal because a reference is being held on the module by the
+ * kernel, and iirc kmod (and I guess module-init-tools also) will
+ * serialize any racing processes properly anyway.
+ * - During external policy unregistration, because this should happen
+ * in a module's exit() function, and any attempts to start a policy
+ * instance would need to take a reference on the module, and this is
+ * not possible once we have reached the point where the exit()
+ * handler is called.
+ * - During service registration and unregistration, as service setup
+ * and cleanup, and policy registration, unregistration and policy
+ * instance starting, are serialized by \e nrs_core::nrs_mutex, so
+ * as long as users adhere to the convention of registering policies
+ * in init() and unregistering them in module exit() functions, there
+ * should not be a race between these operations.
+ * - During any policy-specific lprocfs operations, because a reference
+ * is held by the kernel on a proc entry that has been entered by a
+ * syscall, so as long as proc entries are removed during
+ * unregistration time, then unregistration and lprocfs operations
+ * will be properly serialized.
+ */
+ struct module *pd_owner;
+ /**
+ * Bitmask of \e nrs_policy_flags
+ */
+ unsigned int pd_flags;
+ /**
+ * # of references on this descriptor
+ */
+ atomic_t pd_refs;
+};
+
+/**
+ * NRS policy state
+ *
+ * Policies transition from one state to the other during their lifetime
+ */
+enum ptlrpc_nrs_pol_state {
+ /**
+ * Not a valid policy state.
+ */
+ NRS_POL_STATE_INVALID,
+ /**
+ * Policies are at this state either at the start of their life, or
+ * transition here when the user selects a different policy to act
+ * as the primary one.
+ */
+ NRS_POL_STATE_STOPPED,
+ /**
+ * Policy is progress of stopping
+ */
+ NRS_POL_STATE_STOPPING,
+ /**
+ * Policy is in progress of starting
+ */
+ NRS_POL_STATE_STARTING,
+ /**
+ * A policy is in this state in two cases:
+ * - it is the fallback policy, which is always in this state.
+ * - it has been activated by the user; i.e. it is the primary policy,
+ */
+ NRS_POL_STATE_STARTED,
+};
+
+/**
+ * NRS policy information
+ *
+ * Used for obtaining information for the status of a policy via lprocfs
+ */
+struct ptlrpc_nrs_pol_info {
+ /**
+ * Policy name
+ */
+ char pi_name[NRS_POL_NAME_MAX];
+ /**
+ * Policy argument
+ */
+ char pi_arg[NRS_POL_ARG_MAX];
+ /**
+ * Current policy state
+ */
+ enum ptlrpc_nrs_pol_state pi_state;
+ /**
+ * # RPCs enqueued for later dispatching by the policy
+ */
+ long pi_req_queued;
+ /**
+ * # RPCs started for dispatch by the policy
+ */
+ long pi_req_started;
+ /**
+ * Is this a fallback policy?
+ */
+ unsigned pi_fallback:1;
+};
+
+/**
+ * NRS policy
+ *
+ * There is one instance of this for each policy in each NRS head of each
+ * PTLRPC service partition.
+ */
+struct ptlrpc_nrs_policy {
+ /**
+ * Linkage into the NRS head's list of policies,
+ * ptlrpc_nrs:nrs_policy_list
+ */
+ struct list_head pol_list;
+ /**
+ * Linkage into the NRS head's list of policies with enqueued
+ * requests ptlrpc_nrs:nrs_policy_queued
+ */
+ struct list_head pol_list_queued;
+ /**
+ * Current state of this policy
+ */
+ enum ptlrpc_nrs_pol_state pol_state;
+ /**
+ * Bitmask of nrs_policy_flags
+ */
+ unsigned int pol_flags;
+ /**
+ * # RPCs enqueued for later dispatching by the policy
+ */
+ long pol_req_queued;
+ /**
+ * # RPCs started for dispatch by the policy
+ */
+ long pol_req_started;
+ /**
+ * Usage Reference count taken on the policy instance
+ */
+ long pol_ref;
+ /**
+ * Human-readable policy argument
+ */
+ char pol_arg[NRS_POL_ARG_MAX];
+ /**
+ * The NRS head this policy has been created at
+ */
+ struct ptlrpc_nrs *pol_nrs;
+ /**
+ * Private policy data; varies by policy type
+ */
+ void *pol_private;
+ /**
+ * Policy descriptor for this policy instance.
+ */
+ struct ptlrpc_nrs_pol_desc *pol_desc;
+};
+
+/**
+ * NRS resource
+ *
+ * Resources are embedded into two types of NRS entities:
+ * - Inside NRS policies, in the policy's private data in
+ * ptlrpc_nrs_policy::pol_private
+ * - In objects that act as prime-level scheduling entities in different NRS
+ * policies; e.g. on a policy that performs round robin or similar order
+ * scheduling across client NIDs, there would be one NRS resource per unique
+ * client NID. On a policy which performs round robin scheduling across
+ * backend filesystem objects, there would be one resource associated with
+ * each of the backend filesystem objects partaking in the scheduling
+ * performed by the policy.
+ *
+ * NRS resources share a parent-child relationship, in which resources embedded
+ * in policy instances are the parent entities, with all scheduling entities
+ * a policy schedules across being the children, thus forming a simple resource
+ * hierarchy. This hierarchy may be extended with one or more levels in the
+ * future if the ability to have more than one primary policy is added.
+ *
+ * Upon request initialization, references to the then active NRS policies are
+ * taken and used to later handle the dispatching of the request with one of
+ * these policies.
+ *
+ * \see nrs_resource_get_safe()
+ * \see ptlrpc_nrs_req_add()
+ */
+struct ptlrpc_nrs_resource {
+ /**
+ * This NRS resource's parent; is NULL for resources embedded in NRS
+ * policy instances; i.e. those are top-level ones.
+ */
+ struct ptlrpc_nrs_resource *res_parent;
+ /**
+ * The policy associated with this resource.
+ */
+ struct ptlrpc_nrs_policy *res_policy;
+};
+
+enum {
+ NRS_RES_FALLBACK,
+ NRS_RES_PRIMARY,
+ NRS_RES_MAX
+};
+
+#include "lustre_nrs_fifo.h"
+
+/**
+ * NRS request
+ *
+ * Instances of this object exist embedded within ptlrpc_request; the main
+ * purpose of this object is to hold references to the request's resources
+ * for the lifetime of the request, and to hold properties that policies use
+ * use for determining the request's scheduling priority.
+ **/
+struct ptlrpc_nrs_request {
+ /**
+ * The request's resource hierarchy.
+ */
+ struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
+ /**
+ * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
+ * policy that was used to enqueue the request.
+ *
+ * \see nrs_request_enqueue()
+ */
+ unsigned int nr_res_idx;
+ unsigned int nr_initialized:1;
+ unsigned int nr_enqueued:1;
+ unsigned int nr_started:1;
+ unsigned int nr_finalized:1;
+
+ /**
+ * Policy-specific fields, used for determining a request's scheduling
+ * priority, and other supporting functionality.
+ */
+ union {
+ /**
+ * Fields for the FIFO policy
+ */
+ struct nrs_fifo_req fifo;
+ } nr_u;
+ /**
+ * Externally-registering policies may want to use this to allocate
+ * their own request properties.
+ */
+ void *ext;
+};
+
+/** @} nrs */
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
new file mode 100644
index 000000000000..3b5418eac6c4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
@@ -0,0 +1,70 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ *
+ * Network Request Scheduler (NRS) First-in First-out (FIFO) policy
+ *
+ */
+
+#ifndef _LUSTRE_NRS_FIFO_H
+#define _LUSTRE_NRS_FIFO_H
+
+/* \name fifo
+ *
+ * FIFO policy
+ *
+ * This policy is a logical wrapper around previous, non-NRS functionality.
+ * It dispatches RPCs in the same order as they arrive from the network. This
+ * policy is currently used as the fallback policy, and the only enabled policy
+ * on all NRS heads of all PTLRPC service partitions.
+ * @{
+ */
+
+/**
+ * Private data structure for the FIFO policy
+ */
+struct nrs_fifo_head {
+ /**
+ * Resource object for policy instance.
+ */
+ struct ptlrpc_nrs_resource fh_res;
+ /**
+ * List of queued requests.
+ */
+ struct list_head fh_list;
+ /**
+ * For debugging purposes.
+ */
+ __u64 fh_sequence;
+};
+
+struct nrs_fifo_req {
+ struct list_head fr_list;
+ __u64 fr_sequence;
+};
+
+/** @} fifo */
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index a13558e53274..fbcd39572cd0 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -148,13 +148,12 @@ extern struct req_format RQF_MDS_GETATTR;
*/
extern struct req_format RQF_MDS_GETATTR_NAME;
extern struct req_format RQF_MDS_CLOSE;
-extern struct req_format RQF_MDS_RELEASE_CLOSE;
+extern struct req_format RQF_MDS_INTENT_CLOSE;
extern struct req_format RQF_MDS_CONNECT;
extern struct req_format RQF_MDS_DISCONNECT;
extern struct req_format RQF_MDS_GET_INFO;
extern struct req_format RQF_MDS_READPAGE;
extern struct req_format RQF_MDS_WRITEPAGE;
-extern struct req_format RQF_MDS_DONE_WRITING;
extern struct req_format RQF_MDS_REINT;
extern struct req_format RQF_MDS_REINT_CREATE;
extern struct req_format RQF_MDS_REINT_CREATE_ACL;
@@ -166,10 +165,9 @@ extern struct req_format RQF_MDS_REINT_LINK;
extern struct req_format RQF_MDS_REINT_RENAME;
extern struct req_format RQF_MDS_REINT_SETATTR;
extern struct req_format RQF_MDS_REINT_SETXATTR;
-extern struct req_format RQF_MDS_QUOTACHECK;
extern struct req_format RQF_MDS_QUOTACTL;
-extern struct req_format RQF_QC_CALLBACK;
extern struct req_format RQF_MDS_SWAP_LAYOUTS;
+extern struct req_format RQF_MDS_REINT_MIGRATE;
/* MDS hsm formats */
extern struct req_format RQF_MDS_HSM_STATE_GET;
extern struct req_format RQF_MDS_HSM_STATE_SET;
@@ -181,7 +179,6 @@ extern struct req_format RQF_MDS_HSM_REQUEST;
/* OST req_format */
extern struct req_format RQF_OST_CONNECT;
extern struct req_format RQF_OST_DISCONNECT;
-extern struct req_format RQF_OST_QUOTACHECK;
extern struct req_format RQF_OST_QUOTACTL;
extern struct req_format RQF_OST_GETATTR;
extern struct req_format RQF_OST_SETATTR;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 90c183424802..03a970bcac55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -50,6 +50,7 @@ struct brw_page;
/* Linux specific */
struct key;
struct seq_file;
+struct lustre_cfg;
/*
* forward declaration
@@ -1029,6 +1030,8 @@ int sptlrpc_target_export_check(struct obd_export *exp,
/* bulk security api */
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
+int get_free_pages_in_pool(void);
+int pool_is_at_full_capacity(void);
int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h
new file mode 100644
index 000000000000..26d01c2d6633
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_swab.h
@@ -0,0 +1,102 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * We assume all nodes are either little-endian or big-endian, and we
+ * always send messages in the sender's native format. The receiver
+ * detects the message format by checking the 'magic' field of the message
+ * (see lustre_msg_swabbed() below).
+ *
+ * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines
+ * are implemented in ptlrpc/lustre_swab.c. These 'swabbers' convert the
+ * type from "other" endian, in-place in the message buffer.
+ *
+ * A swabber takes a single pointer argument. The caller must already have
+ * verified that the length of the message buffer >= sizeof (type).
+ *
+ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
+ * may be defined that swabs just the variable part, after the caller has
+ * verified that the message buffer is large enough.
+ */
+
+#ifndef _LUSTRE_SWAB_H_
+#define _LUSTRE_SWAB_H_
+
+#include "lustre/lustre_idl.h"
+
+void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
+void lustre_swab_connect(struct obd_connect_data *ocd);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
+void lustre_swab_obd_statfs(struct obd_statfs *os);
+void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
+void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
+void lustre_swab_ost_lvb(struct ost_lvb *lvb);
+void lustre_swab_obd_quotactl(struct obd_quotactl *q);
+void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
+void lustre_swab_generic_32s(__u32 *val);
+void lustre_swab_mdt_body(struct mdt_body *b);
+void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
+void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
+void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+void lustre_swab_lmv_desc(struct lmv_desc *ld);
+void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
+void lustre_swab_lov_desc(struct lov_desc *ld);
+void lustre_swab_gl_desc(union ldlm_gl_desc *desc);
+void lustre_swab_ldlm_intent(struct ldlm_intent *i);
+void lustre_swab_ldlm_request(struct ldlm_request *rq);
+void lustre_swab_ldlm_reply(struct ldlm_reply *r);
+void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
+void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
+void lustre_swab_mgs_config_body(struct mgs_config_body *body);
+void lustre_swab_mgs_config_res(struct mgs_config_res *body);
+void lustre_swab_ost_body(struct ost_body *b);
+void lustre_swab_ost_last_id(__u64 *id);
+void lustre_swab_fiemap(struct fiemap *fiemap);
+void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
+void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
+void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+ int stripe_count);
+void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+void lustre_swab_lustre_capa(struct lustre_capa *c);
+void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
+void lustre_swab_fid2path(struct getinfo_fid2path *gf);
+void lustre_swab_layout_intent(struct layout_intent *li);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+void lustre_swab_hsm_request(struct hsm_request *hr);
+void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
+void lustre_swab_close_data(struct close_data *data);
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f6fc4dd05bd6..0f48e9c3d9e3 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -73,70 +73,17 @@ static inline void loi_init(struct lov_oinfo *loi)
{
}
-/*
- * If we are unable to get the maximum object size from the OST in
- * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
- * the old maximum object size from ext3.
- */
-#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
-
-struct lov_stripe_md {
- atomic_t lsm_refc;
- spinlock_t lsm_lock;
- pid_t lsm_lock_owner; /* debugging */
-
- /* maximum possible file size, might change as OSTs status changes,
- * e.g. disconnected, deactivated
- */
- __u64 lsm_maxbytes;
- struct ost_id lsm_oi;
- __u32 lsm_magic;
- __u32 lsm_stripe_size;
- __u32 lsm_pattern; /* striping pattern (RAID0, RAID1) */
- __u16 lsm_stripe_count;
- __u16 lsm_layout_gen;
- char lsm_pool_name[LOV_MAXPOOLNAME + 1];
- struct lov_oinfo *lsm_oinfo[0];
-};
-
-static inline bool lsm_is_released(struct lov_stripe_md *lsm)
-{
- return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
-}
-
-static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
-{
- if (!lsm)
- return false;
- if (lsm_is_released(lsm))
- return false;
- return true;
-}
-
-static inline int lov_stripe_md_size(unsigned int stripe_count)
-{
- struct lov_stripe_md lsm;
-
- return sizeof(lsm) + stripe_count * sizeof(lsm.lsm_oinfo[0]);
-}
-
+struct lov_stripe_md;
struct obd_info;
typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
/* obd info for a particular level (lov, osc). */
struct obd_info {
- /* Flags used for set request specific flags:
- - while lock handling, the flags obtained on the enqueue
- request are set here.
- - while stats, the flags used for control delay/resend.
- - while setattr, the flags used for distinguish punch operation
- */
+ /* OBD_STATFS_* flags */
__u64 oi_flags;
/* lsm data specific for every OSC. */
struct lov_stripe_md *oi_md;
- /* obdo data specific for every OSC, if needed at all. */
- struct obdo *oi_oa;
/* statfs data specific for every OSC, if needed at all. */
struct obd_statfs *oi_osfs;
/* An update callback which is called to update some data on upper
@@ -204,7 +151,6 @@ enum obd_cl_sem_lock_class {
* on the MDS.
*/
#define OBD_MAX_DEFAULT_EA_SIZE 4096
-#define OBD_MAX_DEFAULT_COOKIE_SIZE 4096
struct mdc_rpc_lock;
struct obd_import;
@@ -214,7 +160,7 @@ struct client_obd {
struct obd_import *cl_import; /* ptlrpc connection state */
size_t cl_conn_count;
/*
- * Cache maximum and default values for easize and cookiesize. This is
+ * Cache maximum and default values for easize. This is
* strictly a performance optimization to minimize calls to
* obd_size_diskmd(). The default values are used to calculate the
* initial size of a request buffer. The ptlrpc layer will resize the
@@ -235,18 +181,6 @@ struct client_obd {
* run-time if a larger observed size is advertised by the MDT.
*/
u32 cl_max_mds_easize;
- /* Default cookie size for llog cookies (see struct llog_cookie). It is
- * initialized to zero at mount-time, then it tracks the largest
- * observed cookie size advertised by the MDT, up to a maximum value of
- * OBD_MAX_DEFAULT_COOKIE_SIZE. Note that llog_cookies are not
- * used by clients communicating with MDS versions 2.4.0 and later.
- */
- u32 cl_default_mds_cookiesize;
- /* Maximum possible cookie size computed at mount-time based on
- * the number of OSTs in the filesystem. May be increased at
- * run-time if a larger observed size is advertised by the MDT.
- */
- u32 cl_max_mds_cookiesize;
enum lustre_sec_part cl_sp_me;
enum lustre_sec_part cl_sp_to;
@@ -313,15 +247,42 @@ struct client_obd {
struct obd_histogram cl_read_offset_hist;
struct obd_histogram cl_write_offset_hist;
- /* lru for osc caching pages */
+ /* LRU for osc caching pages */
struct cl_client_cache *cl_cache;
- struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
+ /** member of cl_cache->ccc_lru */
+ struct list_head cl_lru_osc;
+ /** # of available LRU slots left in the per-OSC cache.
+ * Available LRU slots are shared by all OSCs of the same file system,
+ * therefore this is a pointer to cl_client_cache::ccc_lru_left.
+ */
atomic_long_t *cl_lru_left;
+ /** # of busy LRU pages. A page is considered busy if it's in writeback
+ * queue, or in transfer. Busy pages can't be discarded so they are not
+ * in LRU cache.
+ */
atomic_long_t cl_lru_busy;
+ /** # of LRU pages in the cache for this client_obd */
atomic_long_t cl_lru_in_list;
+ /** # of threads are shrinking LRU cache. To avoid contention, it's not
+ * allowed to have multiple threads shrinking LRU cache.
+ */
atomic_t cl_lru_shrinkers;
- struct list_head cl_lru_list; /* lru page list */
- spinlock_t cl_lru_list_lock; /* page list protector */
+ /** The time when this LRU cache was last used. */
+ time64_t cl_lru_last_used;
+ /** stats: how many reclaims have happened for this client_obd.
+ * reclaim and shrink - shrink is async, voluntarily rebalancing;
+ * reclaim is sync, initiated by IO thread when the LRU slots are
+ * in shortage.
+ */
+ u64 cl_lru_reclaim;
+ /** List of LRU pages for this client_obd */
+ struct list_head cl_lru_list;
+ /** Lock for LRU page list */
+ spinlock_t cl_lru_list_lock;
+ /** # of unstable pages in this client_obd.
+ * An unstable page is a page state that WRITE RPC has finished but
+ * the transaction has NOT yet committed.
+ */
atomic_long_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
@@ -329,7 +290,17 @@ struct client_obd {
wait_queue_head_t cl_destroy_waitq;
struct mdc_rpc_lock *cl_rpc_lock;
- struct mdc_rpc_lock *cl_close_lock;
+
+ /* modify rpcs in flight
+ * currently used for metadata only
+ */
+ spinlock_t cl_mod_rpcs_lock;
+ u16 cl_max_mod_rpcs_in_flight;
+ u16 cl_mod_rpcs_in_flight;
+ u16 cl_close_rpcs_in_flight;
+ wait_queue_head_t cl_mod_rpcs_waitq;
+ unsigned long *cl_mod_tag_bitmap;
+ struct obd_histogram cl_mod_rpcs_hist;
/* mgc datastruct */
atomic_t cl_mgc_refcount;
@@ -345,13 +316,6 @@ struct client_obd {
/* also protected by the poorly named _loi_list_lock lock above */
struct osc_async_rc cl_ar;
- /* used by quotacheck when the servers are older than 2.4 */
- int cl_qchk_stat; /* quotacheck stat of the peer */
-#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
-#if OBD_OCD_VERSION(2, 7, 53, 0) < LUSTRE_VERSION_CODE
-#warning "please consider removing quotacheck compatibility code"
-#endif
-
/* sequence manager */
struct lu_client_seq *cl_seq;
@@ -454,8 +418,6 @@ struct lmv_obd {
int connected;
int max_easize;
int max_def_easize;
- int max_cookiesize;
- int max_def_cookiesize;
u32 tgts_size; /* size of tgts array */
struct lmv_tgt_desc **tgts;
@@ -469,9 +431,9 @@ struct niobuf_local {
__u32 lnb_page_offset;
__u32 lnb_len;
__u32 lnb_flags;
+ int lnb_rc;
struct page *lnb_page;
void *lnb_data;
- int lnb_rc;
};
#define LUSTRE_FLD_NAME "fld"
@@ -512,21 +474,6 @@ struct niobuf_local {
/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
#define N_LOCAL_TEMP_PAGE 0x10000000
-struct obd_trans_info {
- __u64 oti_xid;
- /* Only used on the server side for tracking acks. */
- struct oti_req_ack_lock {
- struct lustre_handle lock;
- __u32 mode;
- } oti_ack_locks[4];
- void *oti_handle;
- struct llog_cookie oti_onecookie;
- struct llog_cookie *oti_logcookies;
-
- /** VBR: versions */
- __u64 oti_pre_version;
-};
-
/*
* Events signalled through obd_notify() upcall-chain.
*/
@@ -587,15 +534,14 @@ struct lvfs_run_ctxt {
struct obd_device {
struct obd_type *obd_type;
- __u32 obd_magic;
+ u32 obd_magic; /* OBD_DEVICE_MAGIC */
+ int obd_minor; /* device number: lctl dl */
+ struct lu_device *obd_lu_dev;
/* common and UUID name of this device */
- char obd_name[MAX_OBD_NAME];
- struct obd_uuid obd_uuid;
-
- struct lu_device *obd_lu_dev;
+ struct obd_uuid obd_uuid;
+ char obd_name[MAX_OBD_NAME];
- int obd_minor;
/* bitfield modification is protected by obd_dev_lock */
unsigned long obd_attached:1, /* finished attach */
obd_set_up:1, /* finished setup */
@@ -619,22 +565,22 @@ struct obd_device {
unsigned long obd_recovery_expired:1;
/* uuid-export hash body */
struct cfs_hash *obd_uuid_hash;
- atomic_t obd_refcount;
wait_queue_head_t obd_refcount_waitq;
struct list_head obd_exports;
struct list_head obd_unlinked_exports;
struct list_head obd_delayed_exports;
+ atomic_t obd_refcount;
int obd_num_exports;
spinlock_t obd_nid_lock;
struct ldlm_namespace *obd_namespace;
struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
/* a spinlock is OK for what we do now, may need a semaphore later */
spinlock_t obd_dev_lock; /* protect OBD bitfield above */
- struct mutex obd_dev_mutex;
- __u64 obd_last_committed;
spinlock_t obd_osfs_lock;
struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
__u64 obd_osfs_age;
+ u64 obd_last_committed;
+ struct mutex obd_dev_mutex;
struct lvfs_run_ctxt obd_lvfs_ctxt;
struct obd_llog_group obd_olg; /* default llog group */
struct obd_device *obd_observer;
@@ -648,12 +594,13 @@ struct obd_device {
struct lov_obd lov;
struct lmv_obd lmv;
} u;
+
/* Fields used by LProcFS */
- unsigned int obd_cntr_base;
- struct lprocfs_stats *obd_stats;
+ struct lprocfs_stats *obd_stats;
+ unsigned int obd_cntr_base;
- unsigned int md_cntr_base;
- struct lprocfs_stats *md_stats;
+ struct lprocfs_stats *md_stats;
+ unsigned int md_cntr_base;
struct dentry *obd_debugfs_entry;
struct dentry *obd_svc_debugfs_entry;
@@ -665,9 +612,11 @@ struct obd_device {
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
- rwlock_t obd_pool_lock;
- int obd_pool_limit;
- __u64 obd_pool_slv;
+ rwlock_t obd_pool_lock;
+ u64 obd_pool_slv;
+ int obd_pool_limit;
+
+ int obd_conn_inprogress;
/**
* A list of outstanding class_incref()'s against this obd. For
@@ -675,19 +624,10 @@ struct obd_device {
*/
struct lu_ref obd_reference;
- int obd_conn_inprogress;
-
struct kobject obd_kobj; /* sysfs object */
struct completion obd_kobj_unregister;
};
-enum obd_cleanup_stage {
-/* Special case hack for MDS LOVs */
- OBD_CLEANUP_EARLY,
-/* can be directly mapped to .ldto_device_fini() */
- OBD_CLEANUP_EXPORTS,
-};
-
/* get/set_info keys */
#define KEY_ASYNC "async"
#define KEY_CHANGELOG_CLEAR "changelog_clear"
@@ -704,7 +644,6 @@ enum obd_cleanup_stage {
#define KEY_INTERMDS "inter_mds"
#define KEY_LAST_ID "last_id"
#define KEY_LAST_FID "last_fid"
-#define KEY_LOVDESC "lovdesc"
#define KEY_MAX_EASIZE "max_easize"
#define KEY_DEFAULT_EASIZE "default_easize"
#define KEY_MGSSEC "mgssec"
@@ -720,22 +659,6 @@ enum obd_cleanup_stage {
struct lu_context;
-/* /!\ must be coherent with include/linux/namei.h on patched kernel */
-#define IT_OPEN (1 << 0)
-#define IT_CREAT (1 << 1)
-#define IT_READDIR (1 << 2)
-#define IT_GETATTR (1 << 3)
-#define IT_LOOKUP (1 << 4)
-#define IT_UNLINK (1 << 5)
-#define IT_TRUNC (1 << 6)
-#define IT_GETXATTR (1 << 7)
-#define IT_EXEC (1 << 8)
-#define IT_PIN (1 << 9)
-#define IT_LAYOUT (1 << 10)
-#define IT_QUOTA_DQACQ (1 << 11)
-#define IT_QUOTA_CONN (1 << 12)
-#define IT_SETXATTR (1 << 13)
-
static inline int it_to_lock_mode(struct lookup_intent *it)
{
/* CREAT needs to be tested before open (both could be set) */
@@ -755,6 +678,14 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
return -EINVAL;
}
+enum md_op_flags {
+ MF_MDC_CANCEL_FID1 = BIT(0),
+ MF_MDC_CANCEL_FID2 = BIT(1),
+ MF_MDC_CANCEL_FID3 = BIT(2),
+ MF_MDC_CANCEL_FID4 = BIT(3),
+ MF_GET_MDT_IDX = BIT(4),
+};
+
enum md_cli_flags {
CLI_SET_MEA = BIT(0),
CLI_RM_ENTRY = BIT(1),
@@ -789,8 +720,6 @@ struct md_op_data {
__u64 op_valid;
loff_t op_attr_blocks;
- /* Size-on-MDS epoch and flags. */
- __u64 op_ioepoch;
__u32 op_flags;
/* Various operation flags. */
@@ -839,15 +768,13 @@ struct obd_ops {
int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void __user *uarg);
int (*get_info)(const struct lu_env *env, struct obd_export *,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm);
+ __u32 keylen, void *key, __u32 *vallen, void *val);
int (*set_info_async)(const struct lu_env *, struct obd_export *,
__u32 keylen, void *key,
__u32 vallen, void *val,
struct ptlrpc_request_set *set);
int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg);
- int (*precleanup)(struct obd_device *dev,
- enum obd_cleanup_stage cleanup_stage);
+ int (*precleanup)(struct obd_device *dev);
int (*cleanup)(struct obd_device *dev);
int (*process_config)(struct obd_device *dev, u32 len, void *data);
int (*postrecov)(struct obd_device *dev);
@@ -887,35 +814,23 @@ struct obd_ops {
struct obd_statfs *osfs, __u64 max_age, __u32 flags);
int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
__u64 max_age, struct ptlrpc_request_set *set);
- int (*packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt,
- struct lov_stripe_md *mem_src);
- int (*unpackmd)(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt,
- struct lov_mds_md *disk_src, int disk_len);
int (*create)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti);
+ struct obdo *oa);
int (*destroy)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti);
+ struct obdo *oa);
int (*setattr)(const struct lu_env *, struct obd_export *exp,
- struct obd_info *oinfo, struct obd_trans_info *oti);
- int (*setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset);
+ struct obdo *oa);
int (*getattr)(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo);
- int (*getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *set);
+ struct obdo *oa);
int (*preprw)(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa, int objcount,
struct obd_ioobj *obj, struct niobuf_remote *remote,
- int *nr_pages, struct niobuf_local *local,
- struct obd_trans_info *oti);
+ int *nr_pages, struct niobuf_local *local);
int (*commitrw)(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *remote, int pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti, int rc);
+ struct niobuf_local *local, int rc);
int (*init_export)(struct obd_export *exp);
int (*destroy_export)(struct obd_export *exp);
@@ -930,8 +845,6 @@ struct obd_ops {
struct obd_uuid *(*get_uuid)(struct obd_export *exp);
/* quota methods */
- int (*quotacheck)(struct obd_device *, struct obd_export *,
- struct obd_quotactl *);
int (*quotactl)(struct obd_device *, struct obd_export *,
struct obd_quotactl *);
@@ -954,7 +867,7 @@ struct obd_ops {
/* lmv structures */
struct lustre_md {
struct mdt_body *body;
- struct lov_stripe_md *lsm;
+ struct lu_buf layout;
struct lmv_stripe_md *lmv;
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *posix_acl;
@@ -992,10 +905,8 @@ struct md_ops {
int (*create)(struct obd_export *, struct md_op_data *,
const void *, size_t, umode_t, uid_t, gid_t,
cfs_cap_t, __u64, struct ptlrpc_request **);
- int (*done_writing)(struct obd_export *, struct md_op_data *,
- struct md_open_data *);
int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
- const ldlm_policy_data_t *,
+ const union ldlm_policy_data *,
struct lookup_intent *, struct md_op_data *,
struct lustre_handle *, __u64);
int (*getattr)(struct obd_export *, struct md_op_data *,
@@ -1012,8 +923,7 @@ struct md_ops {
const char *, size_t, const char *, size_t,
struct ptlrpc_request **);
int (*setattr)(struct obd_export *, struct md_op_data *, void *,
- size_t, void *, size_t, struct ptlrpc_request **,
- struct md_open_data **mod);
+ size_t, struct ptlrpc_request **);
int (*sync)(struct obd_export *, const struct lu_fid *,
struct ptlrpc_request **);
int (*read_page)(struct obd_export *, struct md_op_data *,
@@ -1030,7 +940,7 @@ struct md_ops {
u64, const char *, const char *, int, int, int,
struct ptlrpc_request **);
- int (*init_ea_size)(struct obd_export *, u32, u32, u32, u32);
+ int (*init_ea_size)(struct obd_export *, u32, u32);
int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
struct obd_export *, struct obd_export *,
@@ -1052,11 +962,11 @@ struct md_ops {
enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
const struct lu_fid *, enum ldlm_type,
- ldlm_policy_data_t *, enum ldlm_mode,
+ union ldlm_policy_data *, enum ldlm_mode,
struct lustre_handle *);
int (*cancel_unused)(struct obd_export *, const struct lu_fid *,
- ldlm_policy_data_t *, enum ldlm_mode,
+ union ldlm_policy_data *, enum ldlm_mode,
enum ldlm_cancel_flags flags, void *opaque);
int (*get_fid_from_lsm)(struct obd_export *,
@@ -1071,6 +981,8 @@ struct md_ops {
int (*revalidate_lock)(struct obd_export *, struct lookup_intent *,
struct lu_fid *, __u64 *bits);
+ int (*unpackmd)(struct obd_export *exp, struct lmv_stripe_md **plsm,
+ const union lmv_mds_md *lmv, size_t lmv_size);
/*
* NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
* lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
@@ -1078,33 +990,6 @@ struct md_ops {
*/
};
-struct lsm_operations {
- void (*lsm_free)(struct lov_stripe_md *);
- void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, u64 *,
- u64 *);
- void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *,
- u64 *);
- int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
- __u16 *stripe_count);
- int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm);
-};
-
-extern const struct lsm_operations lsm_v1_ops;
-extern const struct lsm_operations lsm_v3_ops;
-static inline const struct lsm_operations *lsm_op_find(int magic)
-{
- switch (magic) {
- case LOV_MAGIC_V1:
- return &lsm_v1_ops;
- case LOV_MAGIC_V3:
- return &lsm_v3_ops;
- default:
- CERROR("Cannot recognize lsm_magic %08x\n", magic);
- return NULL;
- }
-}
-
static inline struct md_open_data *obd_mod_alloc(void)
{
struct md_open_data *mod;
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 16094dbec08b..7ec25202cd22 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -100,6 +100,13 @@ int obd_get_request_slot(struct client_obd *cli);
void obd_put_request_slot(struct client_obd *cli);
__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli);
int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max);
+int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, u16 max);
+int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq);
+
+u16 obd_get_mod_rpc_slot(struct client_obd *cli, u32 opc,
+ struct lookup_intent *it);
+void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
+ struct lookup_intent *it, u16 tag);
struct llog_handle;
struct llog_rec_hdr;
@@ -175,10 +182,13 @@ struct lustre_profile {
char *lp_profile;
char *lp_dt;
char *lp_md;
+ int lp_refs;
+ bool lp_list_deleted;
};
struct lustre_profile *class_get_profile(const char *prof);
void class_del_profile(const char *prof);
+void class_put_profile(struct lustre_profile *lprof);
void class_del_profiles(void);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
@@ -269,10 +279,8 @@ static inline int lprocfs_climp_check(struct obd_device *obd)
struct inode;
struct lu_attr;
struct obdo;
-void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid);
void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj);
-void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid);
#define OBT(dev) (dev)->obd_type
#define OBP(dev, op) (dev)->obd_type->typ_dt_ops->op
@@ -417,16 +425,14 @@ static inline int class_devno_max(void)
static inline int obd_get_info(const struct lu_env *env,
struct obd_export *exp, __u32 keylen,
- void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ void *key, __u32 *vallen, void *val)
{
int rc;
EXP_CHECK_DT_OP(exp, get_info);
EXP_COUNTER_INCREMENT(exp, get_info);
- rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val,
- lsm);
+ rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val);
return rc;
}
@@ -505,8 +511,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
return rc;
}
-static inline int obd_precleanup(struct obd_device *obd,
- enum obd_cleanup_stage cleanup_stage)
+static inline int obd_precleanup(struct obd_device *obd)
{
int rc;
DECLARE_LU_VARS(ldt, d);
@@ -517,20 +522,18 @@ static inline int obd_precleanup(struct obd_device *obd,
ldt = obd->obd_type->typ_lu;
d = obd->obd_lu_dev;
if (ldt && d) {
- if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
- struct lu_env env;
+ struct lu_env env;
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- ldt->ldt_ops->ldto_device_fini(&env, d);
- lu_env_fini(&env);
- }
+ rc = lu_env_init(&env, ldt->ldt_ctx_tags);
+ if (!rc) {
+ ldt->ldt_ops->ldto_device_fini(&env, d);
+ lu_env_fini(&env);
}
}
OBD_CHECK_DT_OP(obd, precleanup, 0);
OBD_COUNTER_INCREMENT(obd, precleanup);
- rc = OBP(obd, precleanup)(obd, cleanup_stage);
+ rc = OBP(obd, precleanup)(obd);
return rc;
}
@@ -612,181 +615,51 @@ obd_process_config(struct obd_device *obd, int datalen, void *data)
return rc;
}
-/* Pack an in-memory MD struct for storage on disk.
- * Returns +ve size of packed MD (0 for free), or -ve error.
- *
- * If @disk_tgt == NULL, MD size is returned (max size if @mem_src == NULL).
- * If @*disk_tgt != NULL and @mem_src == NULL, @*disk_tgt will be freed.
- * If @*disk_tgt == NULL, it will be allocated
- */
-static inline int obd_packmd(struct obd_export *exp,
- struct lov_mds_md **disk_tgt,
- struct lov_stripe_md *mem_src)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, packmd);
- EXP_COUNTER_INCREMENT(exp, packmd);
-
- rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src);
- return rc;
-}
-
-static inline int obd_size_diskmd(struct obd_export *exp,
- struct lov_stripe_md *mem_src)
-{
- return obd_packmd(exp, NULL, mem_src);
-}
-
-static inline int obd_free_diskmd(struct obd_export *exp,
- struct lov_mds_md **disk_tgt)
-{
- LASSERT(disk_tgt);
- LASSERT(*disk_tgt);
- /*
- * LU-2590, for caller's convenience, *disk_tgt could be host
- * endianness, it needs swab to LE if necessary, while just
- * lov_mds_md header needs it for figuring out how much memory
- * needs to be freed.
- */
- if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
- (((*disk_tgt)->lmm_magic == LOV_MAGIC_V1) ||
- ((*disk_tgt)->lmm_magic == LOV_MAGIC_V3)))
- lustre_swab_lov_mds_md(*disk_tgt);
- return obd_packmd(exp, disk_tgt, NULL);
-}
-
-/* Unpack an MD struct from disk to in-memory format.
- * Returns +ve size of unpacked MD (0 for free), or -ve error.
- *
- * If @mem_tgt == NULL, MD size is returned (max size if @disk_src == NULL).
- * If @*mem_tgt != NULL and @disk_src == NULL, @*mem_tgt will be freed.
- * If @*mem_tgt == NULL, it will be allocated
- */
-static inline int obd_unpackmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt,
- struct lov_mds_md *disk_src,
- int disk_len)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, unpackmd);
- EXP_COUNTER_INCREMENT(exp, unpackmd);
-
- rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len);
- return rc;
-}
-
-static inline int obd_free_memmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt)
-{
- int rc;
-
- LASSERT(mem_tgt);
- LASSERT(*mem_tgt);
- rc = obd_unpackmd(exp, mem_tgt, NULL, 0);
- *mem_tgt = NULL;
- return rc;
-}
-
static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct obd_trans_info *oti)
+ struct obdo *obdo)
{
int rc;
EXP_CHECK_DT_OP(exp, create);
EXP_COUNTER_INCREMENT(exp, create);
- rc = OBP(exp->exp_obd, create)(env, exp, obdo, oti);
+ rc = OBP(exp->exp_obd, create)(env, exp, obdo);
return rc;
}
static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct obd_trans_info *oti)
+ struct obdo *obdo)
{
int rc;
EXP_CHECK_DT_OP(exp, destroy);
EXP_COUNTER_INCREMENT(exp, destroy);
- rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, oti);
+ rc = OBP(exp->exp_obd, destroy)(env, exp, obdo);
return rc;
}
static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo)
+ struct obdo *oa)
{
int rc;
EXP_CHECK_DT_OP(exp, getattr);
EXP_COUNTER_INCREMENT(exp, getattr);
- rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo);
- return rc;
-}
-
-static inline int obd_getattr_async(struct obd_export *exp,
- struct obd_info *oinfo,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, getattr_async);
- EXP_COUNTER_INCREMENT(exp, getattr_async);
-
- rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set);
+ rc = OBP(exp->exp_obd, getattr)(env, exp, oa);
return rc;
}
static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti)
+ struct obdo *oa)
{
int rc;
EXP_CHECK_DT_OP(exp, setattr);
EXP_COUNTER_INCREMENT(exp, setattr);
- rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti);
- return rc;
-}
-
-/* This performs all the requests set init/wait/destroy actions. */
-static inline int obd_setattr_rqset(struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti)
-{
- struct ptlrpc_request_set *set = NULL;
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr_async);
- EXP_COUNTER_INCREMENT(exp, setattr_async);
-
- set = ptlrpc_prep_set();
- if (!set)
- return -ENOMEM;
-
- rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- return rc;
-}
-
-/* This adds all the requests into @set if @set != NULL, otherwise
- * all requests are sent asynchronously without waiting for response.
- */
-static inline int obd_setattr_async(struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr_async);
- EXP_COUNTER_INCREMENT(exp, setattr_async);
-
- rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
+ rc = OBP(exp->exp_obd, setattr)(env, exp, oa);
return rc;
}
@@ -1053,15 +926,16 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
__u32 flags)
{
struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = { };
+ struct obd_info oinfo = {
+ .oi_osfs = osfs,
+ .oi_flags = flags,
+ };
int rc = 0;
- set = ptlrpc_prep_set();
+ set = ptlrpc_prep_set();
if (!set)
return -ENOMEM;
- oinfo.oi_osfs = osfs;
- oinfo.oi_flags = flags;
rc = obd_statfs_async(exp, &oinfo, max_age, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
@@ -1112,8 +986,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *remote, int *pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti)
+ struct niobuf_local *local)
{
int rc;
@@ -1121,7 +994,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
EXP_COUNTER_INCREMENT(exp, preprw);
rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
- pages, local, oti);
+ pages, local);
return rc;
}
@@ -1129,14 +1002,13 @@ static inline int obd_commitrw(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *rnb, int pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti, int rc)
+ struct niobuf_local *local, int rc)
{
EXP_CHECK_DT_OP(exp, commitrw);
EXP_COUNTER_INCREMENT(exp, commitrw);
rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
- rnb, pages, local, oti, rc);
+ rnb, pages, local, rc);
return rc;
}
@@ -1219,18 +1091,6 @@ static inline int obd_notify_observer(struct obd_device *observer,
return rc1 ? rc1 : rc2;
}
-static inline int obd_quotacheck(struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, quotacheck);
- EXP_COUNTER_INCREMENT(exp, quotacheck);
-
- rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl);
- return rc;
-}
-
static inline int obd_quotactl(struct obd_export *exp,
struct obd_quotactl *oqctl)
{
@@ -1346,21 +1206,9 @@ static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-static inline int md_done_writing(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, done_writing);
- EXP_MD_COUNTER_INCREMENT(exp, done_writing);
- rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
- return rc;
-}
-
static inline int md_enqueue(struct obd_export *exp,
struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it,
struct md_op_data *op_data,
struct lustre_handle *lockh,
@@ -1428,16 +1276,14 @@ static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
}
static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request,
- struct md_open_data **mod)
+ void *ea, size_t ealen,
+ struct ptlrpc_request **request)
{
int rc;
EXP_CHECK_MD_OP(exp, setattr);
EXP_MD_COUNTER_INCREMENT(exp, setattr);
- rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen,
- ea2, ea2len, request, mod);
+ rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, request);
return rc;
}
@@ -1561,7 +1407,7 @@ static inline int md_set_lock_data(struct obd_export *exp,
static inline int md_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque)
@@ -1579,7 +1425,7 @@ static inline int md_cancel_unused(struct obd_export *exp,
static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid,
enum ldlm_type type,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
struct lustre_handle *lockh)
{
@@ -1589,14 +1435,12 @@ static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
policy, mode, lockh);
}
-static inline int md_init_ea_size(struct obd_export *exp, int easize,
- int def_asize, int cookiesize,
- int def_cookiesize)
+static inline int md_init_ea_size(struct obd_export *exp, u32 easize,
+ u32 def_asize)
{
EXP_CHECK_MD_OP(exp, init_ea_size);
EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
- return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
- cookiesize, def_cookiesize);
+ return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize);
}
static inline int md_intent_getattr_async(struct obd_export *exp,
@@ -1636,6 +1480,24 @@ static inline int md_get_fid_from_lsm(struct obd_export *exp,
return rc;
}
+/* Unpack an MD struct from disk to in-memory format.
+ * Returns +ve size of unpacked MD (0 for free), or -ve error.
+ *
+ * If *plsm != NULL and lmm == NULL then *lsm will be freed.
+ * If *plsm == NULL then it will be allocated.
+ */
+static inline int md_unpackmd(struct obd_export *exp,
+ struct lmv_stripe_md **plsm,
+ const union lmv_mds_md *lmm, size_t lmm_size)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, unpackmd);
+ EXP_MD_COUNTER_INCREMENT(exp, unpackmd);
+ rc = MDP(exp->exp_obd, unpackmd)(exp, plsm, lmm, lmm_size);
+ return rc;
+}
+
/* OBD Metadata Support */
int obd_init_caches(void);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index b346a7f10aa4..aaedec7d793c 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -172,14 +172,14 @@ extern char obd_jobid_var[];
#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
#define OBD_FAIL_MDS_SYNC_NET 0x124
#define OBD_FAIL_MDS_SYNC_PACK 0x125
-#define OBD_FAIL_MDS_DONE_WRITING_NET 0x126
-#define OBD_FAIL_MDS_DONE_WRITING_PACK 0x127
+/* OBD_FAIL_MDS_DONE_WRITING_NET 0x126 obsolete since 2.8.0 */
+/* OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 obsolete since 2.8.0 */
#define OBD_FAIL_MDS_ALLOC_OBDO 0x128
#define OBD_FAIL_MDS_PAUSE_OPEN 0x129
#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a
#define OBD_FAIL_MDS_OPEN_CREATE 0x12b
#define OBD_FAIL_MDS_OST_SETATTR 0x12c
-#define OBD_FAIL_MDS_QUOTACHECK_NET 0x12d
+/* OBD_FAIL_MDS_QUOTACHECK_NET 0x12d obsolete since 2.4 */
#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e
#define OBD_FAIL_MDS_CLIENT_ADD 0x12f
#define OBD_FAIL_MDS_GETXATTR_NET 0x130
@@ -264,7 +264,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OST_ENOSPC 0x215
#define OBD_FAIL_OST_EROFS 0x216
#define OBD_FAIL_OST_ENOENT 0x217
-#define OBD_FAIL_OST_QUOTACHECK_NET 0x218
+/* OBD_FAIL_OST_QUOTACHECK_NET 0x218 obsolete since 2.4 */
#define OBD_FAIL_OST_QUOTACTL_NET 0x219
#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a
#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b
@@ -321,6 +321,8 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322
#define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323
+#define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
+
/* LOCKLESS IO */
#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
@@ -343,6 +345,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410
#define OBD_FAIL_OSC_NO_GRANT 0x411
#define OBD_FAIL_OSC_DELAY_SETTIME 0x412
+#define OBD_FAIL_OSC_DELAY_IO 0x414
#define OBD_FAIL_PTLRPC 0x500
#define OBD_FAIL_PTLRPC_ACK 0x501
@@ -373,7 +376,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OBD_PING_NET 0x600
#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
#define OBD_FAIL_OBD_LOGD_NET 0x602
-#define OBD_FAIL_OBD_QC_CALLBACK_NET 0x603
+/* OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 obsolete since 2.4 */
#define OBD_FAIL_OBD_DQACQ 0x604
#define OBD_FAIL_OBD_LLOG_SETUP 0x605
#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606
@@ -458,6 +461,8 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LOV_INIT 0x1403
#define OBD_FAIL_GLIMPSE_DELAY 0x1404
#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
+#define OBD_FAIL_MAKE_LOVEA_HOLE 0x1406
+#define OBD_FAIL_LLITE_LOST_LAYOUT 0x1407
#define OBD_FAIL_GETATTR_DELAY 0x1409
#define OBD_FAIL_FID_INDIR 0x1501
diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h
new file mode 100644
index 000000000000..30c4dd66d5c4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/seq_range.h
@@ -0,0 +1,199 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * Define lu_seq_range associated functions
+ */
+
+#ifndef _SEQ_RANGE_H_
+#define _SEQ_RANGE_H_
+
+#include "lustre/lustre_idl.h"
+
+/**
+ * computes the sequence range type \a range
+ */
+
+static inline unsigned int fld_range_type(const struct lu_seq_range *range)
+{
+ return range->lsr_flags & LU_SEQ_RANGE_MASK;
+}
+
+/**
+ * Is this sequence range an OST? \a range
+ */
+
+static inline bool fld_range_is_ost(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_OST;
+}
+
+/**
+ * Is this sequence range an MDT? \a range
+ */
+
+static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_MDT;
+}
+
+/**
+ * ANY range is only used when the fld client sends a fld query request,
+ * but it does not know whether the seq is an MDT or OST, so it will send the
+ * request with ANY type, which means any seq type from the lookup can be
+ * expected. /a range
+ */
+static inline unsigned int fld_range_is_any(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_ANY;
+}
+
+/**
+ * Apply flags to range \a range \a flags
+ */
+
+static inline void fld_range_set_type(struct lu_seq_range *range,
+ unsigned int flags)
+{
+ range->lsr_flags |= flags;
+}
+
+/**
+ * Add MDT to range type \a range
+ */
+
+static inline void fld_range_set_mdt(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_MDT);
+}
+
+/**
+ * Add OST to range type \a range
+ */
+
+static inline void fld_range_set_ost(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_OST);
+}
+
+/**
+ * Add ANY to range type \a range
+ */
+
+static inline void fld_range_set_any(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_ANY);
+}
+
+/**
+ * computes width of given sequence range \a range
+ */
+
+static inline u64 lu_seq_range_space(const struct lu_seq_range *range)
+{
+ return range->lsr_end - range->lsr_start;
+}
+
+/**
+ * initialize range to zero \a range
+ */
+
+static inline void lu_seq_range_init(struct lu_seq_range *range)
+{
+ memset(range, 0, sizeof(*range));
+}
+
+/**
+ * check if given seq id \a s is within given range \a range
+ */
+
+static inline bool lu_seq_range_within(const struct lu_seq_range *range,
+ u64 seq)
+{
+ return seq >= range->lsr_start && seq < range->lsr_end;
+}
+
+/**
+ * Is the range sane? Is the end after the beginning? \a range
+ */
+
+static inline bool lu_seq_range_is_sane(const struct lu_seq_range *range)
+{
+ return range->lsr_end >= range->lsr_start;
+}
+
+/**
+ * Is the range 0? \a range
+ */
+
+static inline bool lu_seq_range_is_zero(const struct lu_seq_range *range)
+{
+ return range->lsr_start == 0 && range->lsr_end == 0;
+}
+
+/**
+ * Is the range out of space? \a range
+ */
+
+static inline bool lu_seq_range_is_exhausted(const struct lu_seq_range *range)
+{
+ return lu_seq_range_space(range) == 0;
+}
+
+/**
+ * return 0 if two ranges have the same location, nonzero if they are
+ * different \a r1 \a r2
+ */
+
+static inline int lu_seq_range_compare_loc(const struct lu_seq_range *r1,
+ const struct lu_seq_range *r2)
+{
+ return r1->lsr_index != r2->lsr_index ||
+ r1->lsr_flags != r2->lsr_flags;
+}
+
+#if !defined(__REQ_LAYOUT_USER__)
+/**
+ * byte swap range structure \a range
+ */
+
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+#endif
+/**
+ * printf string and argument list for sequence range
+ */
+#define DRANGE "[%#16.16llx-%#16.16llx]:%x:%s"
+
+#define PRANGE(range) \
+ (range)->lsr_start, \
+ (range)->lsr_end, \
+ (range)->lsr_index, \
+ fld_range_is_mdt(range) ? "mdt" : "ost"
+
+#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index ecf472e4813d..32b73ee62639 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -193,6 +193,26 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
* add the locks into grant list, for debug purpose, ..
*/
ldlm_resource_add_lock(res, &res->lr_granted, lock);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
+ struct ldlm_lock *lck;
+
+ list_for_each_entry_reverse(lck, &res->lr_granted,
+ l_res_link) {
+ if (lck == lock)
+ continue;
+ if (lockmode_compat(lck->l_granted_mode,
+ lock->l_granted_mode))
+ continue;
+ if (ldlm_extent_overlap(&lck->l_req_extent,
+ &lock->l_req_extent)) {
+ CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
+ lck, lock);
+ ldlm_resource_dump(D_ERROR, res);
+ LBUG();
+ }
+ }
+ }
}
/** Remove cancelled lock from resource interval tree. */
@@ -220,8 +240,8 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
}
}
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_extent.start = wpolicy->l_extent.start;
@@ -229,8 +249,8 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
lpolicy->l_extent.gid = wpolicy->l_extent.gid;
}
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_extent.start = lpolicy->l_extent.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 861f36f039b5..722160784f83 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -612,22 +612,8 @@ granted:
}
EXPORT_SYMBOL(ldlm_flock_completion_ast);
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
- lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
- lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
- /* Compat code, old clients had no idea about owner field and
- * relied solely on pid for ownership. Introduced in LU-104, 2.1,
- * April 2011
- */
- lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
-}
-
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
@@ -636,8 +622,8 @@ void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
}
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index 79f4e6fa193e..8e1709dc073c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -54,15 +54,15 @@
#include "../include/lustre_lib.h"
#include "ldlm_internal.h"
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
}
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 5e82cfc245b2..5c02501d0560 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -39,13 +39,13 @@ extern struct list_head ldlm_srv_namespace_list;
extern struct mutex ldlm_cli_namespace_lock;
extern struct list_head ldlm_cli_active_namespace_list;
-static inline int ldlm_namespace_nr_read(ldlm_side_t client)
+static inline int ldlm_namespace_nr_read(enum ldlm_side client)
{
return client == LDLM_NAMESPACE_SERVER ?
ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
}
-static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
+static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
{
if (client == LDLM_NAMESPACE_SERVER)
ldlm_srv_namespace_nr++;
@@ -53,7 +53,7 @@ static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
ldlm_cli_namespace_nr++;
}
-static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
+static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
{
if (client == LDLM_NAMESPACE_SERVER)
ldlm_srv_namespace_nr--;
@@ -61,13 +61,13 @@ static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
ldlm_cli_namespace_nr--;
}
-static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
}
-static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
+static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
@@ -79,22 +79,23 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
return atomic_read(&ns->ns_bref) == 0;
}
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
+ enum ldlm_side);
void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
- ldlm_side_t);
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
+ enum ldlm_side);
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
/* ldlm_request.c */
/* Cancel lru flag, it indicates we cancel aged locks. */
enum {
- LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */
- LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
- LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
- LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
- LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs)
- */
- LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
+ LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */
+ LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
+ LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
+ LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
+ LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither
+ * sending nor waiting for any rpcs)
+ */
+ LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -137,10 +138,10 @@ ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
void *data, __u32 lvb_len, enum lvb_type lvb_type);
enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
void *cookie, __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
+void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
enum ldlm_desc_ast_t ast_type);
int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
@@ -311,28 +312,25 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
return ret;
}
-typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *,
- ldlm_policy_data_t *);
-
-typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *,
- ldlm_wire_policy_data_t *);
-
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
+typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
+ union ldlm_policy_data *);
+
+typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
+ union ldlm_wire_policy_data *);
+
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 153e990c494e..9be01426c955 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -170,6 +170,9 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
ptlrpc_connection_put(dlmexp->exp_connection);
dlmexp->exp_connection = NULL;
}
+
+ if (dlmexp)
+ class_export_put(dlmexp);
}
list_del(&imp_conn->oic_item);
@@ -372,6 +375,25 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
} else {
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
}
+
+ spin_lock_init(&cli->cl_mod_rpcs_lock);
+ spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock);
+ cli->cl_max_mod_rpcs_in_flight = 0;
+ cli->cl_mod_rpcs_in_flight = 0;
+ cli->cl_close_rpcs_in_flight = 0;
+ init_waitqueue_head(&cli->cl_mod_rpcs_waitq);
+ cli->cl_mod_tag_bitmap = NULL;
+
+ if (connect_op == MDS_CONNECT) {
+ cli->cl_max_mod_rpcs_in_flight = cli->cl_max_rpcs_in_flight - 1;
+ cli->cl_mod_tag_bitmap = kcalloc(BITS_TO_LONGS(OBD_MAX_RIF_MAX),
+ sizeof(long), GFP_NOFS);
+ if (!cli->cl_mod_tag_bitmap) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
rc = ldlm_get_ref();
if (rc) {
CERROR("ldlm_get_ref failed: %d\n", rc);
@@ -399,9 +421,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
}
cli->cl_import = imp;
- /* cli->cl_max_mds_{easize,cookiesize} updated by mdc_init_ea_size() */
+ /* cli->cl_max_mds_easize updated by mdc_init_ea_size() */
cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3);
- cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
@@ -425,8 +446,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
goto err_import;
}
- cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
-
return rc;
err_import:
@@ -434,12 +453,16 @@ err_import:
err_ldlm:
ldlm_put_ref();
err:
+ kfree(cli->cl_mod_tag_bitmap);
+ cli->cl_mod_tag_bitmap = NULL;
return rc;
}
EXPORT_SYMBOL(client_obd_setup);
int client_obd_cleanup(struct obd_device *obddev)
{
+ struct client_obd *cli = &obddev->u.cli;
+
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
@@ -447,6 +470,10 @@ int client_obd_cleanup(struct obd_device *obddev)
LASSERT(!obddev->u.cli.cl_import);
ldlm_put_ref();
+
+ kfree(cli->cl_mod_tag_bitmap);
+ cli->cl_mod_tag_bitmap = NULL;
+
return 0;
}
EXPORT_SYMBOL(client_obd_cleanup);
@@ -461,6 +488,7 @@ int client_connect_import(const struct lu_env *env,
struct obd_import *imp = cli->cl_import;
struct obd_connect_data *ocd;
struct lustre_handle conn = { 0 };
+ bool is_mdc = false;
int rc;
*exp = NULL;
@@ -487,6 +515,10 @@ int client_connect_import(const struct lu_env *env,
ocd = &imp->imp_connect_data;
if (data) {
*ocd = *data;
+ is_mdc = !strncmp(imp->imp_obd->obd_type->typ_name,
+ LUSTRE_MDC_NAME, 3);
+ if (is_mdc)
+ data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS;
imp->imp_connect_flags_orig = data->ocd_connect_flags;
}
@@ -502,6 +534,11 @@ int client_connect_import(const struct lu_env *env,
ocd->ocd_connect_flags, "old %#llx, new %#llx\n",
data->ocd_connect_flags, ocd->ocd_connect_flags);
data->ocd_connect_flags = ocd->ocd_connect_flags;
+ /* clear the flag as it was not set and is not known
+ * by upper layers
+ */
+ if (is_mdc)
+ data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
}
ptlrpc_pinger_add_import(imp);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3c48b4fb96f1..a4a291acb659 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -39,6 +39,7 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "../include/lustre_intent.h"
+#include "../include/lustre_swab.h"
#include "../include/obd_class.h"
#include "ldlm_internal.h"
@@ -63,17 +64,10 @@ static char *ldlm_typename[] = {
[LDLM_IBITS] = "IBT",
};
-static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
+static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
[LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
[LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
-};
-
-static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local,
[LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
};
@@ -88,8 +82,8 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
* Converts lock policy from local format to on the wire lock_desc format
*/
static void ldlm_convert_policy_to_wire(enum ldlm_type type,
- const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+ const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
ldlm_policy_local_to_wire_t convert;
@@ -102,23 +96,17 @@ static void ldlm_convert_policy_to_wire(enum ldlm_type type,
* Converts lock policy from on the wire lock_desc format to local format
*/
void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+ const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
ldlm_policy_wire_to_local_t convert;
- int new_client;
- /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */
- new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
- if (new_client)
- convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
- else
- convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
+ convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
convert(wpolicy, lpolicy);
}
-char *ldlm_it2str(int it)
+const char *ldlm_it2str(enum ldlm_intent_flags it)
{
switch (it) {
case IT_OPEN:
@@ -140,7 +128,7 @@ char *ldlm_it2str(int it)
case IT_LAYOUT:
return "layout";
default:
- CERROR("Unknown intent %d\n", it);
+ CERROR("Unknown intent 0x%08x\n", it);
return "UNKNOWN";
}
}
@@ -512,7 +500,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
return 0;
}
-EXPORT_SYMBOL(ldlm_lock_change_resource);
/** \defgroup ldlm_handles LDLM HANDLES
* Ways to get hold of locks without any addresses.
@@ -595,7 +582,6 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
&lock->l_policy_data,
&desc->l_policy_data);
}
-EXPORT_SYMBOL(ldlm_lock2desc);
/**
* Add a lock to list of conflicting locks to send AST to.
@@ -658,7 +644,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
* r/w reference type is determined by \a mode
* Calls ldlm_lock_addref_internal.
*/
-void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock;
@@ -676,7 +662,8 @@ EXPORT_SYMBOL(ldlm_lock_addref);
* Removes lock from LRU if it is there.
* Assumes the LDLM lock is already locked.
*/
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
ldlm_lock_remove_from_lru(lock);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -700,7 +687,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
*
* \retval -EAGAIN lock is being canceled.
*/
-int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock;
int result;
@@ -726,7 +713,7 @@ EXPORT_SYMBOL(ldlm_lock_addref_try);
* Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
* Only called for local locks.
*/
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
lock_res_and_lock(lock);
ldlm_lock_addref_internal_nolock(lock, mode);
@@ -740,7 +727,8 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
* Does NOT add lock to LRU if no r/w references left to accommodate flock locks
* that cannot be placed in LRU.
*/
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -766,7 +754,7 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
* on the namespace.
* For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
*/
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
struct ldlm_namespace *ns;
@@ -786,11 +774,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
}
if (!lock->l_readers && !lock->l_writers &&
- ldlm_is_cbpending(lock)) {
+ (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
/* If we received a blocked AST and this was the last reference,
* run the callback.
+ * Group locks are special:
+ * They must not go in LRU, but they are not called back
+ * like non-group locks, instead they are manually released.
+ * They have an l_writers reference which they keep until
+ * they are manually released, so we remove them when they have
+ * no more reader or writer references. - LU-6368
*/
-
LDLM_DEBUG(lock, "final decref done on cbpending lock");
LDLM_LOCK_GET(lock); /* dropped by bl thread */
@@ -832,7 +825,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
/**
* Decrease reader/writer refcount for LDLM lock with handle \a lockh
*/
-void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -846,10 +839,9 @@ EXPORT_SYMBOL(ldlm_lock_decref);
* Decrease reader/writer refcount for LDLM lock with handle
* \a lockh and mark it for subsequent cancellation once r/w refcount
* drops to zero instead of putting into LRU.
- *
- * Typical usage is for GROUP locks which we cannot allow to be cached.
*/
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
+ enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -1055,88 +1047,173 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
}
/**
- * Search for a lock with given properties in a queue.
+ * Describe the overlap between two locks. itree_overlap_cb data.
+ */
+struct lock_match_data {
+ struct ldlm_lock *lmd_old;
+ struct ldlm_lock *lmd_lock;
+ enum ldlm_mode *lmd_mode;
+ union ldlm_policy_data *lmd_policy;
+ __u64 lmd_flags;
+ int lmd_unref;
+};
+
+/**
+ * Check if the given @lock meets the criteria for a match.
+ * A reference on the lock is taken if matched.
*
- * \retval a referenced lock or NULL. See the flag descriptions below, in the
- * comment above ldlm_lock_match
+ * \param lock test-against this lock
+ * \param data parameters
*/
-static struct ldlm_lock *search_queue(struct list_head *queue,
- enum ldlm_mode *mode,
- ldlm_policy_data_t *policy,
- struct ldlm_lock *old_lock,
- __u64 flags, int unref)
+static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
{
- struct ldlm_lock *lock;
- struct list_head *tmp;
+ union ldlm_policy_data *lpol = &lock->l_policy_data;
+ enum ldlm_mode match;
- list_for_each(tmp, queue) {
- enum ldlm_mode match;
+ if (lock == data->lmd_old)
+ return INTERVAL_ITER_STOP;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (lock == old_lock)
- break;
+ /*
+ * Check if this lock can be matched.
+ * Used by LU-2919(exclusive open) for open lease lock
+ */
+ if (ldlm_is_excl(lock))
+ return INTERVAL_ITER_CONT;
- /* Check if this lock can be matched.
- * Used by LU-2919(exclusive open) for open lease lock
- */
- if (ldlm_is_excl(lock))
- continue;
+ /*
+ * llite sometimes wants to match locks that will be
+ * canceled when their users drop, but we allow it to match
+ * if it passes in CBPENDING and the lock still has users.
+ * this is generally only going to be used by children
+ * whose parents already hold a lock so forward progress
+ * can still happen.
+ */
+ if (ldlm_is_cbpending(lock) &&
+ !(data->lmd_flags & LDLM_FL_CBPENDING))
+ return INTERVAL_ITER_CONT;
- /* llite sometimes wants to match locks that will be
- * canceled when their users drop, but we allow it to match
- * if it passes in CBPENDING and the lock still has users.
- * this is generally only going to be used by children
- * whose parents already hold a lock so forward progress
- * can still happen.
- */
- if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
- continue;
- if (!unref && ldlm_is_cbpending(lock) &&
- lock->l_readers == 0 && lock->l_writers == 0)
- continue;
+ if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
+ !lock->l_readers && !lock->l_writers)
+ return INTERVAL_ITER_CONT;
- if (!(lock->l_req_mode & *mode))
- continue;
- match = lock->l_req_mode;
+ if (!(lock->l_req_mode & *data->lmd_mode))
+ return INTERVAL_ITER_CONT;
+ match = lock->l_req_mode;
- if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_policy_data.l_extent.start >
- policy->l_extent.start ||
- lock->l_policy_data.l_extent.end < policy->l_extent.end))
- continue;
+ switch (lock->l_resource->lr_type) {
+ case LDLM_EXTENT:
+ if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
+ lpol->l_extent.end < data->lmd_policy->l_extent.end)
+ return INTERVAL_ITER_CONT;
if (unlikely(match == LCK_GROUP) &&
- lock->l_resource->lr_type == LDLM_EXTENT &&
- policy->l_extent.gid != LDLM_GID_ANY &&
- lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
- continue;
-
- /* We match if we have existing lock with same or wider set
+ data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
+ lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
+ return INTERVAL_ITER_CONT;
+ break;
+ case LDLM_IBITS:
+ /*
+ * We match if we have existing lock with same or wider set
* of bits.
*/
- if (lock->l_resource->lr_type == LDLM_IBITS &&
- ((lock->l_policy_data.l_inodebits.bits &
- policy->l_inodebits.bits) !=
- policy->l_inodebits.bits))
- continue;
+ if ((lpol->l_inodebits.bits &
+ data->lmd_policy->l_inodebits.bits) !=
+ data->lmd_policy->l_inodebits.bits)
+ return INTERVAL_ITER_CONT;
+ break;
+ default:
+ break;
+ }
+ /*
+ * We match if we have existing lock with same or wider set
+ * of bits.
+ */
+ if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
+ return INTERVAL_ITER_CONT;
+
+ if ((data->lmd_flags & LDLM_FL_LOCAL_ONLY) &&
+ !ldlm_is_local(lock))
+ return INTERVAL_ITER_CONT;
+
+ if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
+ LDLM_LOCK_GET(lock);
+ ldlm_lock_touch_in_lru(lock);
+ } else {
+ ldlm_lock_addref_internal_nolock(lock, match);
+ }
+
+ *data->lmd_mode = match;
+ data->lmd_lock = lock;
+
+ return INTERVAL_ITER_STOP;
+}
+
+static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
+{
+ struct ldlm_interval *node = to_ldlm_interval(in);
+ struct lock_match_data *data = args;
+ struct ldlm_lock *lock;
+ int rc;
+
+ list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ rc = lock_matches(lock, data);
+ if (rc == INTERVAL_ITER_STOP)
+ return INTERVAL_ITER_STOP;
+ }
+ return INTERVAL_ITER_CONT;
+}
+
+/**
+ * Search for a lock with given parameters in interval trees.
+ *
+ * \param res search for a lock in this resource
+ * \param data parameters
+ *
+ * \retval a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_itree(struct ldlm_resource *res,
+ struct lock_match_data *data)
+{
+ struct interval_node_extent ext = {
+ .start = data->lmd_policy->l_extent.start,
+ .end = data->lmd_policy->l_extent.end
+ };
+ int idx;
+
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ struct ldlm_interval_tree *tree = &res->lr_itree[idx];
- if (!unref && LDLM_HAVE_MASK(lock, GONE))
+ if (!tree->lit_root)
continue;
- if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
+ if (!(tree->lit_mode & *data->lmd_mode))
continue;
- if (flags & LDLM_FL_TEST_LOCK) {
- LDLM_LOCK_GET(lock);
- ldlm_lock_touch_in_lru(lock);
- } else {
- ldlm_lock_addref_internal_nolock(lock, match);
- }
- *mode = match;
- return lock;
+ interval_search(tree->lit_root, &ext,
+ itree_overlap_cb, data);
}
+ return data->lmd_lock;
+}
+/**
+ * Search for a lock with given properties in a queue.
+ *
+ * \param queue search for a lock in this queue
+ * \param data parameters
+ *
+ * \retval a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_queue(struct list_head *queue,
+ struct lock_match_data *data)
+{
+ struct ldlm_lock *lock;
+ int rc;
+
+ list_for_each_entry(lock, queue, l_res_link) {
+ rc = lock_matches(lock, data);
+ if (rc == INTERVAL_ITER_STOP)
+ return data->lmd_lock;
+ }
return NULL;
}
@@ -1147,7 +1224,6 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
wake_up_all(&lock->l_waitq);
}
}
-EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
/**
* Mark lock as "matchable" by OST.
@@ -1208,35 +1284,45 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
const struct ldlm_res_id *res_id,
enum ldlm_type type,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
struct lustre_handle *lockh, int unref)
{
+ struct lock_match_data data = {
+ .lmd_old = NULL,
+ .lmd_lock = NULL,
+ .lmd_mode = &mode,
+ .lmd_policy = policy,
+ .lmd_flags = flags,
+ .lmd_unref = unref,
+ };
struct ldlm_resource *res;
- struct ldlm_lock *lock, *old_lock = NULL;
+ struct ldlm_lock *lock;
int rc = 0;
if (!ns) {
- old_lock = ldlm_handle2lock(lockh);
- LASSERT(old_lock);
+ data.lmd_old = ldlm_handle2lock(lockh);
+ LASSERT(data.lmd_old);
- ns = ldlm_lock_to_ns(old_lock);
- res_id = &old_lock->l_resource->lr_name;
- type = old_lock->l_resource->lr_type;
- mode = old_lock->l_req_mode;
+ ns = ldlm_lock_to_ns(data.lmd_old);
+ res_id = &data.lmd_old->l_resource->lr_name;
+ type = data.lmd_old->l_resource->lr_type;
+ *data.lmd_mode = data.lmd_old->l_req_mode;
}
res = ldlm_resource_get(ns, NULL, res_id, type, 0);
if (IS_ERR(res)) {
- LASSERT(!old_lock);
+ LASSERT(!data.lmd_old);
return 0;
}
LDLM_RESOURCE_ADDREF(res);
lock_res(res);
- lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
- flags, unref);
+ if (res->lr_type == LDLM_EXTENT)
+ lock = search_itree(res, &data);
+ else
+ lock = search_queue(&res->lr_granted, &data);
if (lock) {
rc = 1;
goto out;
@@ -1245,14 +1331,12 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
rc = 0;
goto out;
}
- lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
- flags, unref);
+ lock = search_queue(&res->lr_waiting, &data);
if (lock) {
rc = 1;
goto out;
}
-
- out:
+out:
unlock_res(res);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
@@ -1324,8 +1408,8 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
(type == LDLM_PLAIN || type == LDLM_IBITS) ?
res_id->name[3] : policy->l_extent.end);
}
- if (old_lock)
- LDLM_LOCK_PUT(old_lock);
+ if (data.lmd_old)
+ LDLM_LOCK_PUT(data.lmd_old);
return rc ? mode : 0;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index fde697ebaadc..12647af5a336 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -511,23 +511,6 @@ static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
CWARN("Send reply failed, maybe cause bug 21636.\n");
}
-static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
-{
- struct obd_quotactl *oqctl;
- struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
-
- oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (!oqctl) {
- CERROR("Can't unpack obd_quotactl\n");
- return -EPROTO;
- }
-
- oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
-
- cli->cl_qchk_stat = oqctl->qc_stat;
- return 0;
-}
-
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
static int ldlm_callback_handler(struct ptlrpc_request *req)
{
@@ -577,13 +560,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_handle_setinfo(req);
ldlm_callback_reply(req, rc);
return 0;
- case OBD_QC_CALLBACK:
- req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
- return 0;
- rc = ldlm_handle_qc_callback(req);
- ldlm_callback_reply(req, rc);
- return 0;
default:
CERROR("unknown opcode %u\n",
lustre_msg_get_opc(req->rq_reqmsg));
@@ -858,7 +834,6 @@ int ldlm_get_ref(void)
return rc;
}
-EXPORT_SYMBOL(ldlm_get_ref);
void ldlm_put_ref(void)
{
@@ -875,7 +850,6 @@ void ldlm_put_ref(void)
}
mutex_unlock(&ldlm_ref_mutex);
}
-EXPORT_SYMBOL(ldlm_put_ref);
static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
struct attribute *attr,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 0aed39c46154..862ea0a1dc97 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -54,14 +54,14 @@
#include "ldlm_internal.h"
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
/* No policy for plain locks */
}
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
/* No policy for plain locks */
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 9a1136e32dfc..8dfb3c8e6b7a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -293,7 +293,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* take into account pl->pl_recalc_time here.
*/
ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
- 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
+ 0, LCF_ASYNC, LDLM_LRU_FLAG_LRUR);
out:
spin_lock(&pl->pl_lock);
@@ -339,7 +339,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
if (nr == 0)
return (unused / 100) * sysctl_vfs_cache_pressure;
else
- return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK);
}
static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
@@ -356,10 +356,10 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
u32 recalc_interval_sec;
int count;
- recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+ recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
spin_lock(&pl->pl_lock);
- recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+ recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
/*
@@ -382,7 +382,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
count);
}
- recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
+ recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() +
pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
/* DEBUG: should be re-removed after LU-4536 is fixed */
@@ -651,13 +651,13 @@ static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
}
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client)
+ int idx, enum ldlm_side client)
{
int rc;
spin_lock_init(&pl->pl_lock);
atomic_set(&pl->pl_granted, 0);
- pl->pl_recalc_time = ktime_get_seconds();
+ pl->pl_recalc_time = ktime_get_real_seconds();
atomic_set(&pl->pl_lock_volume_factor, 1);
atomic_set(&pl->pl_grant_rate, 0);
@@ -684,7 +684,6 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
return rc;
}
-EXPORT_SYMBOL(ldlm_pool_init);
void ldlm_pool_fini(struct ldlm_pool *pl)
{
@@ -698,7 +697,6 @@ void ldlm_pool_fini(struct ldlm_pool *pl)
*/
POISON(pl, 0x5a, sizeof(*pl));
}
-EXPORT_SYMBOL(ldlm_pool_fini);
/**
* Add new taken ldlm lock \a lock into pool \a pl accounting.
@@ -724,7 +722,6 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
* with too long call paths.
*/
}
-EXPORT_SYMBOL(ldlm_pool_add);
/**
* Remove ldlm lock \a lock from pool \a pl accounting.
@@ -743,7 +740,6 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
}
-EXPORT_SYMBOL(ldlm_pool_del);
/**
* Returns current \a pl SLV.
@@ -792,13 +788,12 @@ static struct completion ldlm_pools_comp;
* count locks from all namespaces (if possible). Returns number of
* cached locks.
*/
-static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
+static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
{
unsigned long total = 0;
int nr_ns;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL; /* loop detection */
- void *cookie;
if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
return 0;
@@ -806,8 +801,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
- cookie = cl_env_reenter();
-
/*
* Find out how many resources we may release.
*/
@@ -816,7 +809,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
mutex_lock(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
- cl_env_reexit(cookie);
return 0;
}
ns = ldlm_namespace_first_locked(client);
@@ -842,22 +834,19 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
ldlm_namespace_put(ns);
}
- cl_env_reexit(cookie);
return total;
}
-static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
+static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
+ gfp_t gfp_mask)
{
unsigned long freed = 0;
int tmp, nr_ns;
struct ldlm_namespace *ns;
- void *cookie;
if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
return -1;
- cookie = cl_env_reenter();
-
/*
* Shrink at least ldlm_namespace_nr_read(client) namespaces.
*/
@@ -887,7 +876,6 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
ldlm_namespace_put(ns);
}
- cl_env_reexit(cookie);
/*
* we only decrease the SLV in server pools shrinker, return
* SHRINK_STOP to kernel to avoid needless loop. LU-1128
@@ -908,7 +896,7 @@ static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
sc->gfp_mask);
}
-static int ldlm_pools_recalc(ldlm_side_t client)
+static int ldlm_pools_recalc(enum ldlm_side client)
{
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL;
@@ -1095,7 +1083,6 @@ int ldlm_pools_init(void)
return rc;
}
-EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
@@ -1104,4 +1091,3 @@ void ldlm_pools_fini(void)
ldlm_pools_thread_stop();
}
-EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 35ba6f14d95f..c1f8693f94a5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -93,11 +93,7 @@ static int ldlm_expired_completion_wait(void *data)
if (!lock->l_conn_export) {
static unsigned long next_dump, last_dump;
- LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
- (s64)lock->l_last_activity,
- (s64)(ktime_get_real_seconds() -
- lock->l_last_activity));
- LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
+ LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() -
lock->l_last_activity));
@@ -475,12 +471,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
"client-side enqueue, new policy data");
}
- if ((*flags) & LDLM_FL_AST_SENT ||
- /* Cancel extent locks as soon as possible on a liblustre client,
- * because it cannot handle asynchronous ASTs robustly (see
- * bug 7311).
- */
- (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
+ if ((*flags) & LDLM_FL_AST_SENT) {
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
unlock_res_and_lock(lock);
@@ -602,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
+ LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
@@ -657,6 +648,27 @@ int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
}
EXPORT_SYMBOL(ldlm_prep_enqueue_req);
+static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp,
+ int lvb_len)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+
/**
* Client-side lock enqueue.
*
@@ -670,7 +682,7 @@ EXPORT_SYMBOL(ldlm_prep_enqueue_req);
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
+ union ldlm_policy_data const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async)
{
@@ -727,17 +739,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_last_activity = ktime_get_real_seconds();
/* lock not sent to server yet */
-
if (!reqp || !*reqp) {
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE,
- LUSTRE_DLM_VERSION,
- LDLM_ENQUEUE);
- if (!req) {
+ req = ldlm_enqueue_pack(exp, lvb_len);
+ if (IS_ERR(req)) {
failed_lock_cleanup(ns, lock, einfo->ei_mode);
LDLM_LOCK_RELEASE(lock);
- return -ENOMEM;
+ return PTR_ERR(req);
}
+
req_passed_in = 0;
if (reqp)
*reqp = req;
@@ -757,24 +766,6 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
body->lock_flags = ldlm_flags_to_wire(*flags);
body->lock_handle[0] = *lockh;
- /* Continue as normal. */
- if (!req_passed_in) {
- if (lvb_len > 0)
- req_capsule_extend(&req->rq_pill,
- &RQF_LDLM_ENQUEUE_LVB);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- lvb_len);
- ptlrpc_request_set_replen(req);
- }
-
- /*
- * Liblustre client doesn't get extent locks, except for O_APPEND case
- * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
- * [i_size, OBD_OBJECT_EOF] lock is taken.
- */
- LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
- policy->l_extent.end == OBD_OBJECT_EOF));
-
if (async) {
LASSERT(reqp);
return 0;
@@ -1022,7 +1013,6 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
return 0;
}
-EXPORT_SYMBOL(ldlm_cli_update_pool);
/**
* Client side lock cancel.
@@ -1067,7 +1057,7 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
ns = ldlm_lock_to_ns(lock);
flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
LCF_BL_AST, flags);
}
@@ -1125,7 +1115,6 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
return count;
}
-EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
/**
* Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
@@ -1184,6 +1173,14 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
if (count && added >= count)
return LDLM_POLICY_KEEP_LOCK;
+ /*
+ * Despite of the LV, It doesn't make sense to keep the lock which
+ * is unused for ns_max_age time.
+ */
+ if (cfs_time_after(cfs_time_current(),
+ cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+ return LDLM_POLICY_CANCEL_LOCK;
+
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
@@ -1287,21 +1284,21 @@ typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
static ldlm_cancel_lru_policy_t
ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
{
- if (flags & LDLM_CANCEL_NO_WAIT)
+ if (flags & LDLM_LRU_FLAG_NO_WAIT)
return ldlm_cancel_no_wait_policy;
if (ns_connect_lru_resize(ns)) {
- if (flags & LDLM_CANCEL_SHRINK)
+ if (flags & LDLM_LRU_FLAG_SHRINK)
/* We kill passed number of old locks. */
return ldlm_cancel_passed_policy;
- else if (flags & LDLM_CANCEL_LRUR)
+ else if (flags & LDLM_LRU_FLAG_LRUR)
return ldlm_cancel_lrur_policy;
- else if (flags & LDLM_CANCEL_PASSED)
+ else if (flags & LDLM_LRU_FLAG_PASSED)
return ldlm_cancel_passed_policy;
- else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
+ else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT)
return ldlm_cancel_lrur_no_wait_policy;
} else {
- if (flags & LDLM_CANCEL_AGED)
+ if (flags & LDLM_LRU_FLAG_AGED)
return ldlm_cancel_aged_policy;
}
@@ -1325,21 +1322,21 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
*
* Calling policies for enabled LRU resize:
* ----------------------------------------
- * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
- * cancel not more than \a count locks;
+ * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
+ * cancel not more than \a count locks;
*
- * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
- * the beginning of LRU list);
+ * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at
+ * the beginning of LRU list);
*
- * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
- * memory pressure policy function;
+ * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to
+ * memory pressure policy function;
*
- * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
+ * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy".
*
- * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
- * (typically before replaying locks) w/o
- * sending any RPCs or waiting for any
- * outstanding RPC to complete.
+ * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
+ * (typically before replaying locks) w/o
+ * sending any RPCs or waiting for any
+ * outstanding RPC to complete.
*/
static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
@@ -1348,7 +1345,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
- int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
+ int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -1531,7 +1528,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
*/
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode, __u64 lock_flags,
enum ldlm_cancel_flags cancel_flags,
void *opaque)
@@ -1648,7 +1645,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list);
*/
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque)
@@ -1723,7 +1720,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
opaque);
} else {
cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_cli_hash_cancel_unused, &arg);
+ ldlm_cli_hash_cancel_unused, &arg, 0);
return ELDLM_OK;
}
}
@@ -1796,7 +1793,7 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
};
cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_res_iter_helper, &helper);
+ ldlm_res_iter_helper, &helper, 0);
}
/* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1840,7 +1837,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disappear under us (e.g. due to cancel)
*/
- if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) {
+ if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) {
list_add(&lock->l_pending_chain, list);
LDLM_LOCK_GET(lock);
}
@@ -1909,7 +1906,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
int flags;
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (ldlm_is_canceling(lock)) {
+ if (ldlm_is_bl_done(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
return 0;
}
@@ -2003,11 +2000,11 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
ldlm_ns_name(ns), ns->ns_nr_unused);
/* We don't need to care whether or not LRU resize is enabled
- * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+ * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
* count parameter
*/
canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
- LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+ LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
canceled, ldlm_ns_name(ns));
@@ -2048,4 +2045,3 @@ int ldlm_replay_locks(struct obd_import *imp)
return rc;
}
-EXPORT_SYMBOL(ldlm_replay_locks);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index a09c25aea698..b22f5bae7201 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -226,7 +226,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
/* Try to cancel all @ns_nr_unused locks. */
canceled = ldlm_cancel_lru(ns, unused, 0,
- LDLM_CANCEL_PASSED);
+ LDLM_LRU_FLAG_PASSED);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
"not all requested locks are canceled, requested: %d, canceled: %d\n",
@@ -237,7 +237,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
} else {
tmp = ns->ns_max_unused;
ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
ns->ns_max_unused = tmp;
}
return count;
@@ -262,7 +262,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
"changing namespace %s unused locks from %u to %u\n",
ldlm_ns_name(ns), ns->ns_nr_unused,
(unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
if (!lru_resize) {
CDEBUG(D_DLMTRACE,
@@ -276,7 +276,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
ldlm_ns_name(ns), ns->ns_max_unused,
(unsigned int)tmp);
ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
/* Make sure that LRU resize was originally supported before
* turning it on here.
@@ -445,8 +445,8 @@ static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
return res;
}
-static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
+static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs,
+ const void *key, unsigned int mask)
{
const struct ldlm_res_id *id = key;
unsigned int val = 0;
@@ -457,8 +457,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
return val & mask;
}
-static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
+static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs,
+ const void *key, unsigned int mask)
{
const struct ldlm_res_id *id = key;
struct lu_fid fid;
@@ -612,7 +612,7 @@ static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
/** Register \a ns in the list of namespaces */
static void ldlm_namespace_register(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(list_empty(&ns->ns_list_chain));
@@ -625,7 +625,7 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns,
* Create and initialize new empty namespace.
*/
struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client,
+ enum ldlm_side client,
enum ldlm_appetite apt,
enum ldlm_ns_type ns_type)
{
@@ -855,8 +855,10 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
return ELDLM_OK;
}
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
+ cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
+ &flags, 0);
+ cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
+ NULL, 0);
return ELDLM_OK;
}
EXPORT_SYMBOL(ldlm_namespace_cleanup);
@@ -952,7 +954,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
/** Unregister \a ns from the list of namespaces. */
static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(!list_empty(&ns->ns_list_chain));
@@ -999,7 +1001,6 @@ void ldlm_namespace_get(struct ldlm_namespace *ns)
{
atomic_inc(&ns->ns_bref);
}
-EXPORT_SYMBOL(ldlm_namespace_get);
/* This is only for callers that care about refcount */
static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
@@ -1014,11 +1015,10 @@ void ldlm_namespace_put(struct ldlm_namespace *ns)
spin_unlock(&ns->ns_lock);
}
}
-EXPORT_SYMBOL(ldlm_namespace_put);
/** Should be called with ldlm_namespace_lock(client) taken. */
void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1027,7 +1027,7 @@ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
/** Should be called with ldlm_namespace_lock(client) taken. */
void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1035,7 +1035,7 @@ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
}
/** Should be called with ldlm_namespace_lock(client) taken. */
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
{
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
LASSERT(!list_empty(ldlm_namespace_list(client)));
@@ -1305,7 +1305,7 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
* Print information about all locks in all namespaces on this node to debug
* log.
*/
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
{
struct list_head *tmp;
@@ -1323,7 +1323,6 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
mutex_unlock(ldlm_namespace_lock(client));
}
-EXPORT_SYMBOL(ldlm_dump_all_namespaces);
static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
@@ -1355,12 +1354,11 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_hash_dump,
- (void *)(unsigned long)level);
+ (void *)(unsigned long)level, 0);
spin_lock(&ns->ns_lock);
ns->ns_next_dump = cfs_time_shift(10);
spin_unlock(&ns->ns_lock);
}
-EXPORT_SYMBOL(ldlm_namespace_dump);
/**
* Print information about all locks in this resource to debug log.
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 1ac0940bd8df..322d4fa63f5d 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
-lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
- rw.o namei.o symlink.o llite_mmap.o range_lock.o \
- xattr.o xattr_cache.o rw26.o super25.o statahead.o \
- glimpse.o lcommon_cl.o lcommon_misc.o \
- vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
+lustre-y := dcache.o dir.o file.o llite_lib.o llite_nfs.o \
+ rw.o rw26.o namei.o symlink.o llite_mmap.o range_lock.o \
+ xattr.o xattr_cache.o xattr_security.o \
+ super25.o statahead.o glimpse.o lcommon_cl.o lcommon_misc.o \
+ vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o \
lproc_llite.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 0e45d8fc4d7c..65bf0c401b44 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -57,9 +57,6 @@ static void ll_release(struct dentry *de)
LASSERT(de);
lld = ll_d2d(de);
- if (!lld) /* NFS copies the de->d_op methods (bug 4655) */
- return;
-
if (lld->lld_it) {
ll_intent_release(lld->lld_it);
kfree(lld->lld_it);
@@ -126,30 +123,13 @@ static int ll_ddelete(const struct dentry *de)
return 0;
}
-int ll_d_init(struct dentry *de)
+static int ll_d_init(struct dentry *de)
{
- CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
- de, de, de->d_parent, d_inode(de), d_count(de));
-
- if (!de->d_fsdata) {
- struct ll_dentry_data *lld;
-
- lld = kzalloc(sizeof(*lld), GFP_NOFS);
- if (likely(lld)) {
- spin_lock(&de->d_lock);
- if (likely(!de->d_fsdata)) {
- de->d_fsdata = lld;
- __d_lustre_invalidate(de);
- } else {
- kfree(lld);
- }
- spin_unlock(&de->d_lock);
- } else {
- return -ENOMEM;
- }
- }
- LASSERT(de->d_op == &ll_d_ops);
-
+ struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL);
+ if (unlikely(!lld))
+ return -ENOMEM;
+ lld->lld_invalid = 1;
+ de->d_fsdata = lld;
return 0;
}
@@ -300,6 +280,7 @@ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
}
const struct dentry_operations ll_d_ops = {
+ .d_init = ll_d_init,
.d_revalidate = ll_revalidate_nd,
.d_release = ll_release,
.d_delete = ll_ddelete,
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 7f32a539d260..ea5d247a3f70 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -51,6 +51,8 @@
#include "../include/lustre_dlm.h"
#include "../include/lustre_fid.h"
#include "../include/lustre_kernelcomm.h"
+#include "../include/lustre_swab.h"
+
#include "llite_internal.h"
/*
@@ -410,6 +412,8 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
struct ll_sb_info *sbi = ll_i2sbi(parent);
+ struct inode *inode = NULL;
+ struct dentry dentry;
int err;
if (unlikely(lump->lum_magic != LMV_USER_MAGIC))
@@ -419,6 +423,10 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
PFID(ll_inode2fid(parent)), parent, dirname,
(int)lump->lum_stripe_offset, lump->lum_stripe_count);
+ if (lump->lum_stripe_count > 1 &&
+ !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE))
+ return -EINVAL;
+
if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
lustre_swab_lmv_user_md(lump);
@@ -439,8 +447,17 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
from_kgid(&init_user_ns, current_fsgid()),
cfs_curproc_cap_pack(), 0, &request);
ll_finish_md_op_data(op_data);
+
+ err = ll_prep_inode(&inode, request, parent->i_sb, NULL);
if (err)
goto err_exit;
+
+ memset(&dentry, 0, sizeof(dentry));
+ dentry.d_inode = inode;
+
+ err = ll_init_security(&dentry, inode, parent);
+ iput(inode);
+
err_exit:
ptlrpc_req_finished(request);
return err;
@@ -501,8 +518,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
return PTR_ERR(op_data);
/* swabbing is done in lov_setstripe() on server side */
- rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
- NULL, 0, &req, NULL);
+ rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req);
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
if (rc) {
@@ -682,7 +698,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
- int rc;
+ int rc2, rc = 0;
/* Forge a hsm_progress based on data from copy. */
hpk.hpk_fid = copy->hc_hai.hai_fid;
@@ -732,10 +748,10 @@ progress:
/* On error, the request should be considered as completed */
if (hpk.hpk_errval > 0)
hpk.hpk_flags |= HP_FLAG_COMPLETED;
- rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
+ rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+ &hpk, NULL);
- return rc;
+ return rc ? rc : rc2;
}
/**
@@ -757,7 +773,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
- int rc;
+ int rc2, rc = 0;
/* If you modify the logic here, also check llapi_hsm_copy_end(). */
/* Take care: copy->hc_hai.hai_action, len, gid and data are not
@@ -823,18 +839,18 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
* when the file will not be modified for some tunable
* time
*/
- /* we do not notify caller */
hpk.hpk_flags &= ~HP_FLAG_RETRY;
+ rc = -EBUSY;
/* hpk_errval must be >= 0 */
- hpk.hpk_errval = EBUSY;
+ hpk.hpk_errval = -rc;
}
}
progress:
- rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
+ rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+ &hpk, NULL);
- return rc;
+ return rc ? rc : rc2;
}
static int copy_and_ioctl(int cmd, struct obd_export *exp,
@@ -862,10 +878,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
int rc = 0;
switch (cmd) {
- case LUSTRE_Q_INVALIDATE:
- case LUSTRE_Q_FINVALIDATE:
- case Q_QUOTAON:
- case Q_QUOTAOFF:
case Q_SETQUOTA:
case Q_SETINFO:
if (!capable(CFS_CAP_SYS_ADMIN))
@@ -930,10 +942,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(sbi->ll_md_exp, oqctl);
if (rc) {
- if (rc != -EALREADY && cmd == Q_QUOTAON) {
- oqctl->qc_cmd = Q_QUOTAOFF;
- obd_quotactl(sbi->ll_md_exp, oqctl);
- }
kfree(oqctl);
return rc;
}
@@ -1077,7 +1085,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto out_free;
}
- rc = ll_get_fid_by_name(inode, filename, namelen, NULL);
+ rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL);
if (rc < 0) {
CERROR("%s: lookup %.*s failed: rc = %d\n",
ll_get_fsname(inode->i_sb, NULL, 0), namelen,
@@ -1189,6 +1197,7 @@ lmv_out_free:
struct lmv_user_md *tmp = NULL;
union lmv_mds_md *lmm = NULL;
u64 valid = 0;
+ int max_stripe_count;
int stripe_count;
int mdt_index;
int lum_size;
@@ -1200,6 +1209,7 @@ lmv_out_free:
if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
return -EFAULT;
+ max_stripe_count = lum.lum_stripe_count;
/*
* lum_magic will indicate which stripe the ioctl will like
* to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
@@ -1219,9 +1229,6 @@ lmv_out_free:
/* Get default LMV EA */
if (lum.lum_magic == LMV_USER_MAGIC) {
- if (rc)
- goto finish_req;
-
if (lmmsize > sizeof(*ulmv)) {
rc = -EINVAL;
goto finish_req;
@@ -1234,6 +1241,16 @@ lmv_out_free:
}
stripe_count = lmv_mds_md_stripe_count_get(lmm);
+ if (max_stripe_count < stripe_count) {
+ lum.lum_stripe_count = stripe_count;
+ if (copy_to_user(ulmv, &lum, sizeof(lum))) {
+ rc = -EFAULT;
+ goto finish_req;
+ }
+ rc = -E2BIG;
+ goto finish_req;
+ }
+
lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1);
tmp = kzalloc(lum_size, GFP_NOFS);
if (!tmp) {
@@ -1370,134 +1387,6 @@ out_req:
ll_putname(filename);
return rc;
}
- case IOC_LOV_GETINFO: {
- struct lov_user_mds_data __user *lumd;
- struct lov_stripe_md *lsm;
- struct lov_user_md __user *lum;
- struct lov_mds_md *lmm;
- int lmmsize;
- lstat_t st;
-
- lumd = (struct lov_user_mds_data __user *)arg;
- lum = &lumd->lmd_lmm;
-
- rc = ll_get_max_mdsize(sbi, &lmmsize);
- if (rc)
- return rc;
-
- lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
- if (!lmm)
- return -ENOMEM;
- if (copy_from_user(lmm, lum, lmmsize)) {
- rc = -EFAULT;
- goto free_lmm;
- }
-
- switch (lmm->lmm_magic) {
- case LOV_USER_MAGIC_V1:
- if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
- break;
- /* swab objects first so that stripes num will be sane */
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v1 *)lmm)->lmm_objects,
- ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- break;
- case LOV_USER_MAGIC_V3:
- if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
- break;
- /* swab objects first so that stripes num will be sane */
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
- lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- break;
- default:
- rc = -EINVAL;
- goto free_lmm;
- }
-
- rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
- if (rc < 0) {
- rc = -ENOMEM;
- goto free_lmm;
- }
-
- /* Perform glimpse_size operation. */
- memset(&st, 0, sizeof(st));
-
- rc = ll_glimpse_ioctl(sbi, lsm, &st);
- if (rc)
- goto free_lsm;
-
- if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
- rc = -EFAULT;
- goto free_lsm;
- }
-
-free_lsm:
- obd_free_memmd(sbi->ll_dt_exp, &lsm);
-free_lmm:
- kvfree(lmm);
- return rc;
- }
- case OBD_IOC_QUOTACHECK: {
- struct obd_quotactl *oqctl;
- int error = 0;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
- oqctl->qc_type = arg;
- rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
- error = rc;
- }
-
- rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
- if (rc < 0)
- CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
-
- kfree(oqctl);
- return error ?: rc;
- }
- case OBD_IOC_POLL_QUOTACHECK: {
- struct if_quotacheck *check;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- check = kzalloc(sizeof(*check), GFP_NOFS);
- if (!check)
- return -ENOMEM;
-
- rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
- NULL);
- if (rc) {
- CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void __user *)arg, check,
- sizeof(*check)))
- CDEBUG(D_QUOTA, "copy_to_user failed\n");
- goto out_poll;
- }
-
- rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
- NULL);
- if (rc) {
- CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void __user *)arg, check,
- sizeof(*check)))
- CDEBUG(D_QUOTA, "copy_to_user failed\n");
- goto out_poll;
- }
-out_poll:
- kfree(check);
- return rc;
- }
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl;
@@ -1536,7 +1425,7 @@ out_quotactl:
exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
vallen = sizeof(count);
rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
- KEY_TGT_COUNT, &vallen, &count, NULL);
+ KEY_TGT_COUNT, &vallen, &count);
if (rc) {
CERROR("get target count failed: %d\n", rc);
return rc;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index e1d784bae064..f634c11216e6 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -44,6 +44,7 @@
#include <linux/mount.h>
#include "../include/lustre/ll_fiemap.h"
#include "../include/lustre/lustre_ioctl.h"
+#include "../include/lustre_swab.h"
#include "../include/cl_object.h"
#include "llite_internal.h"
@@ -75,60 +76,56 @@ static void ll_file_data_put(struct ll_file_data *fd)
kmem_cache_free(ll_file_data_slab, fd);
}
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
- struct lustre_handle *fh)
+/**
+ * Packs all the attributes into @op_data for the CLOSE rpc.
+ */
+static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
+ struct obd_client_handle *och)
{
- op_data->op_fid1 = ll_i2info(inode)->lli_fid;
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ ll_prep_md_op_data(op_data, inode, NULL, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
+
op_data->op_attr.ia_mode = inode->i_mode;
op_data->op_attr.ia_atime = inode->i_atime;
op_data->op_attr.ia_mtime = inode->i_mtime;
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
+ op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET |
+ ATTR_CTIME | ATTR_CTIME_SET;
op_data->op_attr_blocks = inode->i_blocks;
op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
- op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
- if (fh)
- op_data->op_handle = *fh;
+ op_data->op_handle = och->och_fh;
- if (ll_i2info(inode)->lli_flags & LLIF_DATA_MODIFIED)
+ /*
+ * For HSM: if inode data has been modified, pack it so that
+ * MDT can set data dirty flag in the archive.
+ */
+ if (och->och_flags & FMODE_WRITE &&
+ test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags))
op_data->op_bias |= MDS_DATA_MODIFIED;
}
/**
- * Closes the IO epoch and packs all the attributes into @op_data for
- * the CLOSE rpc.
+ * Perform a close, possibly with a bias.
+ * The meaning of "data" depends on the value of "bias".
+ *
+ * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
+ * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
+ * swap layouts with.
*/
-static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle *och)
-{
- op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET;
-
- if (!(och->och_flags & FMODE_WRITE))
- goto out;
-
- if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
- else
- ll_ioepoch_close(inode, op_data, &och, 0);
-
-out:
- ll_pack_inode2opdata(inode, op_data, &och->och_fh);
- ll_prep_md_op_data(op_data, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
static int ll_close_inode_openhandle(struct obd_export *md_exp,
- struct inode *inode,
struct obd_client_handle *och,
- const __u64 *data_version)
+ struct inode *inode,
+ enum mds_op_bias bias,
+ void *data)
{
struct obd_export *exp = ll_i2mdexp(inode);
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
struct obd_device *obd = class_exp2obd(exp);
- int epoch_close = 1;
int rc;
if (!obd) {
@@ -150,65 +147,51 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
}
ll_prepare_close(inode, op_data, och);
- if (data_version) {
- /* Pass in data_version implies release. */
+ switch (bias) {
+ case MDS_CLOSE_LAYOUT_SWAP:
+ LASSERT(data);
+ op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP;
+ op_data->op_data_version = 0;
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_fid2 = *ll_inode2fid(data);
+ break;
+
+ case MDS_HSM_RELEASE:
+ LASSERT(data);
op_data->op_bias |= MDS_HSM_RELEASE;
- op_data->op_data_version = *data_version;
+ op_data->op_data_version = *(__u64 *)data;
op_data->op_lease_handle = och->och_lease_handle;
op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ break;
+
+ default:
+ LASSERT(!data);
+ break;
}
- epoch_close = op_data->op_flags & MF_EPOCH_CLOSE;
+
rc = md_close(md_exp, op_data, och->och_mod, &req);
- if (rc == -EAGAIN) {
- /* This close must have the epoch closed. */
- LASSERT(epoch_close);
- /* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS.
- */
- rc = ll_som_update(inode, op_data);
- if (rc) {
- CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
- ll_i2mdexp(inode)->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)), rc);
- rc = 0;
- }
- } else if (rc) {
+ if (rc) {
CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
ll_i2mdexp(inode)->exp_obd->obd_name,
PFID(ll_inode2fid(inode)), rc);
}
- /* DATA_MODIFIED flag was successfully sent on close, cancel data
- * modification flag.
- */
- if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
+ if (op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP) &&
+ !rc) {
struct mdt_body *body;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!(body->mbo_valid & OBD_MD_FLRELEASED))
+ if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
rc = -EBUSY;
}
ll_finish_md_op_data(op_data);
out:
- if (exp_connect_som(exp) && !epoch_close &&
- S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
- ll_queue_done_writing(inode, LLIF_DONE_WRITING);
- } else {
- md_clear_open_replay_data(md_exp, och);
- /* Free @och if it is not waiting for DONE_WRITING. */
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
- kfree(och);
- }
+ md_clear_open_replay_data(md_exp, och);
+ och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ kfree(och);
+
if (req) /* This is close request */
ptlrpc_req_finished(req);
return rc;
@@ -252,7 +235,7 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
* be closed.
*/
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
+ och, inode, 0, NULL);
}
return rc;
@@ -266,7 +249,9 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
int lockmode;
__u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
struct lustre_handle lockh;
- ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN} };
+ union ldlm_policy_data policy = {
+ .l_inodebits = { MDS_INODELOCK_OPEN }
+ };
int rc = 0;
/* clear group lock, if present */
@@ -288,7 +273,8 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
}
if (fd->fd_och) {
- rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
+ rc = ll_close_inode_openhandle(md_exp, fd->fd_och, inode, 0,
+ NULL);
fd->fd_och = NULL;
goto out;
}
@@ -437,20 +423,6 @@ out:
return rc;
}
-/**
- * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
- * not believe attributes if a few ioepoch holders exist. Attributes for
- * previous ioepoch if new one is opened are also skipped by MDS.
- */
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
-{
- if (ioepoch && lli->lli_ioepoch != ioepoch) {
- lli->lli_ioepoch = ioepoch;
- CDEBUG(D_INODE, "Epoch %llu opened on "DFID"\n",
- ioepoch, PFID(&lli->lli_fid));
- }
-}
-
static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
struct obd_client_handle *och)
{
@@ -470,23 +442,17 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
struct ll_file_data *fd, struct obd_client_handle *och)
{
struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
LASSERT(!LUSTRE_FPRIVATE(file));
LASSERT(fd);
if (och) {
- struct mdt_body *body;
int rc;
rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
if (rc != 0)
return rc;
-
- body = req_capsule_server_get(&it->it_request->rq_pill,
- &RMF_MDT_BODY);
- ll_ioepoch_open(lli, body->mbo_ioepoch);
}
LUSTRE_FPRIVATE(file) = fd;
@@ -677,12 +643,6 @@ restart:
if (!S_ISREG(inode->i_mode))
goto out_och_free;
- if (!lli->lli_has_smd &&
- (cl_is_lov_delay_create(file->f_flags) ||
- (file->f_mode & FMODE_WRITE) == 0)) {
- CDEBUG(D_INODE, "object creation was delayed\n");
- goto out_och_free;
- }
cl_lov_delay_create_clear(&file->f_flags);
goto out_och_free;
@@ -867,7 +827,7 @@ out_close:
it.it_lock_mode = 0;
och->och_lease_handle.cookie = 0ULL;
}
- rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+ rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, och, inode, 0, NULL);
if (rc2 < 0)
CERROR("%s: error closing file "DFID": %d\n",
ll_get_fsname(inode->i_sb, NULL, 0),
@@ -881,6 +841,69 @@ out:
}
/**
+ * Check whether a layout swap can be done between two inodes.
+ *
+ * \param[in] inode1 First inode to check
+ * \param[in] inode2 Second inode to check
+ *
+ * \retval 0 on success, layout swap can be performed between both inodes
+ * \retval negative error code if requirements are not met
+ */
+static int ll_check_swap_layouts_validity(struct inode *inode1,
+ struct inode *inode2)
+{
+ if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
+ return -EINVAL;
+
+ if (inode_permission(inode1, MAY_WRITE) ||
+ inode_permission(inode2, MAY_WRITE))
+ return -EPERM;
+
+ if (inode1->i_sb != inode2->i_sb)
+ return -EXDEV;
+
+ return 0;
+}
+
+static int ll_swap_layouts_close(struct obd_client_handle *och,
+ struct inode *inode, struct inode *inode2)
+{
+ const struct lu_fid *fid1 = ll_inode2fid(inode);
+ const struct lu_fid *fid2;
+ int rc;
+
+ CDEBUG(D_INODE, "%s: biased close of file " DFID "\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1));
+
+ rc = ll_check_swap_layouts_validity(inode, inode2);
+ if (rc < 0)
+ goto out_free_och;
+
+ /* We now know that inode2 is a lustre inode */
+ fid2 = ll_inode2fid(inode2);
+
+ rc = lu_fid_cmp(fid1, fid2);
+ if (!rc) {
+ rc = -EINVAL;
+ goto out_free_och;
+ }
+
+ /*
+ * Close the file and swap layouts between inode & inode2.
+ * NB: lease lock handle is released in mdc_close_layout_swap_pack()
+ * because we still need it to pack l_remote_handle to MDT.
+ */
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
+ MDS_CLOSE_LAYOUT_SWAP, inode2);
+
+ och = NULL; /* freed in ll_close_inode_openhandle() */
+
+out_free_och:
+ kfree(och);
+ return rc;
+}
+
+/**
* Release lease and close the file.
* It will check if the lease has ever broken.
*/
@@ -907,84 +930,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
*lease_broken = cancelled;
return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
-}
-
-/* Fills the obdo with the attributes for the lsm */
-static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
- struct obdo *obdo, __u64 ioepoch, int dv_flags)
-{
- struct ptlrpc_request_set *set;
- struct obd_info oinfo = { };
- int rc;
-
- LASSERT(lsm);
-
- oinfo.oi_md = lsm;
- oinfo.oi_oa = obdo;
- oinfo.oi_oa->o_oi = lsm->lsm_oi;
- oinfo.oi_oa->o_mode = S_IFREG;
- oinfo.oi_oa->o_ioepoch = ioepoch;
- oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
- OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
- OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
- OBD_MD_FLDATAVERSION;
- if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
- oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
- oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
- if (dv_flags & LL_DV_WR_FLUSH)
- oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
- }
-
- set = ptlrpc_prep_set();
- if (!set) {
- CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
- rc = -ENOMEM;
- } else {
- rc = obd_getattr_async(exp, &oinfo, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- }
- if (rc == 0) {
- oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE |
- OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
- if (dv_flags & LL_DV_WR_FLUSH &&
- !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
- oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
- return -ENOTSUPP;
- }
- return rc;
-}
-
-/**
- * Performs the getattr on the inode and updates its fields.
- * If @sync != 0, perform the getattr under the server-side lock.
- */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
- __u64 ioepoch, int sync)
-{
- struct lov_stripe_md *lsm;
- int rc;
-
- lsm = ccc_inode_lsm_get(inode);
- rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
- obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
- if (rc == 0) {
- struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
-
- obdo_refresh_inode(inode, obdo, obdo->o_valid);
- CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n",
- POSTID(oi), i_size_read(inode),
- (unsigned long long)inode->i_blocks,
- 1UL << inode->i_blkbits);
- }
- ccc_inode_lsm_put(inode, lsm);
- return rc;
+ och, inode, 0, NULL);
}
int ll_merge_attr(const struct lu_env *env, struct inode *inode)
@@ -1043,23 +989,6 @@ out_size_unlock:
return rc;
}
-int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
- lstat_t *st)
-{
- struct obdo obdo = { 0 };
- int rc;
-
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, &obdo, 0, 0);
- if (rc == 0) {
- st->st_size = obdo.o_size;
- st->st_blocks = obdo.o_blocks;
- st->st_mtime = obdo.o_mtime;
- st->st_atime = obdo.o_atime;
- st->st_ctime = obdo.o_ctime;
- }
- return rc;
-}
-
static bool file_is_noatime(const struct file *file)
{
const struct vfsmount *mnt = file->f_path.mnt;
@@ -1117,9 +1046,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
{
struct ll_inode_info *lli = ll_i2info(file_inode(file));
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct vvp_io *vio = vvp_env_io(env);
struct range_lock range;
struct cl_io *io;
- ssize_t result;
+ ssize_t result = 0;
+ int rc = 0;
CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
file, iot, *ppos, count);
@@ -1151,18 +1082,15 @@ restart:
CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
range.rl_node.in_extent.start,
range.rl_node.in_extent.end);
- result = range_lock(&lli->lli_write_tree,
- &range);
- if (result < 0)
+ rc = range_lock(&lli->lli_write_tree, &range);
+ if (rc < 0)
goto out;
range_locked = true;
}
- down_read(&lli->lli_trunc_sem);
ll_cl_add(file, env, io);
- result = cl_io_loop(env, io);
+ rc = cl_io_loop(env, io);
ll_cl_remove(file, env);
- up_read(&lli->lli_trunc_sem);
if (range_locked) {
CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n",
range.rl_node.in_extent.start,
@@ -1171,24 +1099,26 @@ restart:
}
} else {
/* cl_io_rw_init() handled IO */
- result = io->ci_result;
+ rc = io->ci_result;
}
if (io->ci_nob > 0) {
result = io->ci_nob;
+ count -= io->ci_nob;
*ppos = io->u.ci_wr.wr.crw_pos;
+
+ /* prepare IO restart */
+ if (count > 0)
+ args->u.normal.via_iter = vio->vui_iter;
}
- goto out;
out:
cl_io_fini(env, io);
- /* If any bit been read/written (result != 0), we just return
- * short read/write instead of restart io.
- */
- if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
- CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zu\n",
+
+ if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
+ CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count:%zu, result: %zd\n",
+ file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write",
- file, *ppos, count);
- LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob);
+ *ppos, count, result);
goto restart;
}
@@ -1201,13 +1131,19 @@ out:
ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
LPROC_LL_WRITE_BYTES, result);
fd->fd_write_failed = false;
- } else if (result != -ERESTARTSYS) {
+ } else if (!result && !rc) {
+ rc = io->ci_result;
+ if (rc < 0)
+ fd->fd_write_failed = true;
+ else
+ fd->fd_write_failed = false;
+ } else if (rc != -ERESTARTSYS) {
fd->fd_write_failed = true;
}
}
CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
- return result;
+ return result > 0 ? result : rc;
}
static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -1259,37 +1195,22 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
__u64 flags, struct lov_user_md *lum,
int lum_size)
{
- struct lov_stripe_md *lsm = NULL;
struct lookup_intent oit = {
.it_op = IT_OPEN,
.it_flags = flags | MDS_OPEN_BY_FID,
};
int rc = 0;
- lsm = ccc_inode_lsm_get(inode);
- if (lsm) {
- ccc_inode_lsm_put(inode, lsm);
- CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
- PFID(ll_inode2fid(inode)));
- rc = -EEXIST;
- goto out;
- }
-
ll_inode_size_lock(inode);
rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
if (rc < 0)
goto out_unlock;
- rc = oit.it_status;
- if (rc < 0)
- goto out_unlock;
ll_release_openhandle(inode, &oit);
out_unlock:
ll_inode_size_unlock(inode);
ll_intent_release(&oit);
- ccc_inode_lsm_put(inode, lsm);
-out:
return rc;
}
@@ -1566,7 +1487,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
+ och, inode, 0, NULL);
out:
/* this one is in place of ll_file_open */
if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
@@ -1579,15 +1500,17 @@ out:
/**
* Get size for inode for which FIEMAP mapping is requested.
* Make the FIEMAP get_info call and returns the result.
+ *
+ * \param fiemap kernel buffer to hold extens
+ * \param num_bytes kernel buffer size
*/
-static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
size_t num_bytes)
{
- struct obd_export *exp = ll_i2dtexp(inode);
- struct lov_stripe_md *lsm = NULL;
- struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
- __u32 vallen = num_bytes;
- int rc;
+ struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
+ struct lu_env *env;
+ int refcheck;
+ int rc = 0;
/* Checks for fiemap flags */
if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
@@ -1602,21 +1525,9 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
return rc;
}
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm)
- return -ENOENT;
-
- /* If the stripe_count > 1 and the application does not understand
- * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
- */
- if (lsm->lsm_stripe_count > 1 &&
- !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
- rc = -EOPNOTSUPP;
- goto out;
- }
-
- fm_key.oa.o_oi = lsm->lsm_oi;
- fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
if (i_size_read(inode) == 0) {
rc = ll_glimpse_size(inode);
@@ -1624,24 +1535,23 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
goto out;
}
- obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE);
- obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid);
+ fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+ obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
+ obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
+
/* If filesize is 0, then there would be no objects for mapping */
- if (fm_key.oa.o_size == 0) {
+ if (fmkey.lfik_oa.o_size == 0) {
fiemap->fm_mapped_extents = 0;
rc = 0;
goto out;
}
- memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap));
-
- rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen,
- fiemap, lsm);
- if (rc)
- CERROR("obd_get_info failed: rc = %d\n", rc);
+ memcpy(&fmkey.lfik_fiemap, fiemap, sizeof(*fiemap));
+ rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
+ &fmkey, fiemap, &num_bytes);
out:
- ccc_inode_lsm_put(inode, lsm);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -1689,113 +1599,56 @@ gf_free:
return rc;
}
-static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
-{
- struct ll_user_fiemap *fiemap_s;
- size_t num_bytes, ret_bytes;
- unsigned int extent_count;
- int rc = 0;
-
- /* Get the extent count so we can calculate the size of
- * required fiemap buffer
- */
- if (get_user(extent_count,
- &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
- return -EFAULT;
-
- if (extent_count >=
- (SIZE_MAX - sizeof(*fiemap_s)) / sizeof(struct ll_fiemap_extent))
- return -EINVAL;
- num_bytes = sizeof(*fiemap_s) + (extent_count *
- sizeof(struct ll_fiemap_extent));
-
- fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS);
- if (!fiemap_s)
- return -ENOMEM;
-
- /* get the fiemap value */
- if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
- sizeof(*fiemap_s))) {
- rc = -EFAULT;
- goto error;
- }
-
- /* If fm_extent_count is non-zero, read the first extent since
- * it is used to calculate end_offset and device from previous
- * fiemap call.
- */
- if (extent_count) {
- if (copy_from_user(&fiemap_s->fm_extents[0],
- (char __user *)arg + sizeof(*fiemap_s),
- sizeof(struct ll_fiemap_extent))) {
- rc = -EFAULT;
- goto error;
- }
- }
-
- rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
- if (rc)
- goto error;
-
- ret_bytes = sizeof(struct ll_user_fiemap);
-
- if (extent_count != 0)
- ret_bytes += (fiemap_s->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent));
-
- if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes))
- rc = -EFAULT;
-
-error:
- kvfree(fiemap_s);
- return rc;
-}
-
/*
* Read the data_version for inode.
*
* This value is computed using stripe object version on OST.
* Version is computed using server side locking.
*
- * @param sync if do sync on the OST side;
+ * @param flags if do sync on the OST side;
* 0: no sync
* LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
* LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
*/
int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
{
- struct lov_stripe_md *lsm = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obdo *obdo = NULL;
- int rc;
+ struct cl_object *obj = ll_i2info(inode)->lli_clob;
+ struct lu_env *env;
+ struct cl_io *io;
+ int refcheck;
+ int result;
- /* If no stripe, we consider version is 0. */
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
+ /* If no file object initialized, we consider its version is 0. */
+ if (!obj) {
*data_version = 0;
- CDEBUG(D_INODE, "No object for inode\n");
- rc = 0;
- goto out;
+ return 0;
}
- obdo = kzalloc(sizeof(*obdo), GFP_NOFS);
- if (!obdo) {
- rc = -ENOMEM;
- goto out;
- }
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
- if (rc == 0) {
- if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
- rc = -EOPNOTSUPP;
- else
- *data_version = obdo->o_data_version;
- }
+ io = vvp_env_thread_io(env);
+ io->ci_obj = obj;
+ io->u.ci_data_version.dv_data_version = 0;
+ io->u.ci_data_version.dv_flags = flags;
- kfree(obdo);
-out:
- ccc_inode_lsm_put(inode, lsm);
- return rc;
+restart:
+ if (!cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj))
+ result = cl_io_loop(env, io);
+ else
+ result = io->ci_result;
+
+ *data_version = io->u.ci_data_version.dv_data_version;
+
+ cl_io_fini(env, io);
+
+ if (unlikely(io->ci_need_restart))
+ goto restart;
+
+ cl_env_put(env, &refcheck);
+
+ return result;
}
/*
@@ -1803,11 +1656,11 @@ out:
*/
int ll_hsm_release(struct inode *inode)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct obd_client_handle *och = NULL;
__u64 data_version = 0;
int rc;
+ int refcheck;
CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
ll_get_fsname(inode->i_sb, NULL, 0),
@@ -1824,21 +1677,21 @@ int ll_hsm_release(struct inode *inode)
if (rc != 0)
goto out;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
rc = PTR_ERR(env);
goto out;
}
ll_merge_attr(env, inode);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
/* Release the file.
* NB: lease lock handle is released in mdc_hsm_release_pack() because
* we still need it to pack l_remote_handle to MDT.
*/
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
- &data_version);
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
+ MDS_HSM_RELEASE, &data_version);
och = NULL;
out:
@@ -1849,10 +1702,12 @@ out:
}
struct ll_swap_stack {
- struct iattr ia1, ia2;
- __u64 dv1, dv2;
- struct inode *inode1, *inode2;
- bool check_dv1, check_dv2;
+ u64 dv1;
+ u64 dv2;
+ struct inode *inode1;
+ struct inode *inode2;
+ bool check_dv1;
+ bool check_dv2;
};
static int ll_swap_layouts(struct file *file1, struct file *file2,
@@ -1872,21 +1727,9 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
llss->inode1 = file_inode(file1);
llss->inode2 = file_inode(file2);
- if (!S_ISREG(llss->inode2->i_mode)) {
- rc = -EINVAL;
- goto free;
- }
-
- if (inode_permission(llss->inode1, MAY_WRITE) ||
- inode_permission(llss->inode2, MAY_WRITE)) {
- rc = -EPERM;
+ rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
+ if (rc < 0)
goto free;
- }
-
- if (llss->inode2->i_sb != llss->inode1->i_sb) {
- rc = -EXDEV;
- goto free;
- }
/* we use 2 bool because it is easier to swap than 2 bits */
if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
@@ -1900,10 +1743,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
llss->dv2 = lsl->sl_dv2;
rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
- if (rc == 0) /* same file, done! */ {
- rc = 0;
+ if (!rc) /* same file, done! */
goto free;
- }
if (rc < 0) { /* sequentialize it */
swap(llss->inode1, llss->inode2);
@@ -1925,19 +1766,6 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
}
}
- /* to be able to restore mtime and atime after swap
- * we need to first save them
- */
- if (lsl->sl_flags &
- (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
- llss->ia1.ia_mtime = llss->inode1->i_mtime;
- llss->ia1.ia_atime = llss->inode1->i_atime;
- llss->ia1.ia_valid = ATTR_MTIME | ATTR_ATIME;
- llss->ia2.ia_mtime = llss->inode2->i_mtime;
- llss->ia2.ia_atime = llss->inode2->i_atime;
- llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
- }
-
/* ultimate check, before swapping the layouts we check if
* dataversion has changed (if requested)
*/
@@ -1987,39 +1815,6 @@ putgl:
ll_put_grouplock(llss->inode1, file1, gid);
}
- /* rc can be set from obd_iocontrol() or from a GOTO(putgl, ...) */
- if (rc != 0)
- goto free;
-
- /* clear useless flags */
- if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_MTIME)) {
- llss->ia1.ia_valid &= ~ATTR_MTIME;
- llss->ia2.ia_valid &= ~ATTR_MTIME;
- }
-
- if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_ATIME)) {
- llss->ia1.ia_valid &= ~ATTR_ATIME;
- llss->ia2.ia_valid &= ~ATTR_ATIME;
- }
-
- /* update time if requested */
- rc = 0;
- if (llss->ia2.ia_valid != 0) {
- inode_lock(llss->inode1);
- rc = ll_setattr(file1->f_path.dentry, &llss->ia2);
- inode_unlock(llss->inode1);
- }
-
- if (llss->ia1.ia_valid != 0) {
- int rc1;
-
- inode_lock(llss->inode2);
- rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1);
- inode_unlock(llss->inode2);
- if (rc == 0)
- rc = rc1;
- }
-
free:
kfree(llss);
@@ -2176,24 +1971,52 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
sizeof(struct lustre_swap_layouts)))
return -EFAULT;
- if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
file2 = fget(lsl.sl_fd);
if (!file2)
return -EBADF;
- rc = -EPERM;
- if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */
+ /* O_WRONLY or O_RDWR */
+ if ((file2->f_flags & O_ACCMODE) == O_RDONLY) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
+ struct obd_client_handle *och = NULL;
+ struct ll_inode_info *lli;
+ struct inode *inode2;
+
+ if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ lli = ll_i2info(inode);
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och) {
+ och = fd->fd_lease_och;
+ fd->fd_lease_och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ if (!och) {
+ rc = -ENOLCK;
+ goto out;
+ }
+ inode2 = file_inode(file2);
+ rc = ll_swap_layouts_close(och, inode, inode2);
+ } else {
rc = ll_swap_layouts(file, file2, &lsl);
+ }
+out:
fput(file2);
return rc;
}
case LL_IOC_LOV_GETSTRIPE:
return ll_file_getstripe(inode,
(struct lov_user_md __user *)arg);
- case FSFILT_IOC_FIEMAP:
- return ll_ioctl_fiemap(inode, arg);
case FSFILT_IOC_GETFLAGS:
case FSFILT_IOC_SETFLAGS:
return ll_iocontrol(inode, file, cmd, arg);
@@ -2489,17 +2312,17 @@ static int ll_flush(struct file *file, fl_owner_t id)
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
enum cl_fsync_mode mode, int ignore_layout)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct cl_fsync_io *fio;
int result;
+ int refcheck;
if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
return -EINVAL;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
@@ -2522,7 +2345,7 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
if (result == 0)
result = fio->fi_nr_written;
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return result;
}
@@ -2549,9 +2372,11 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
lli->lli_async_rc = 0;
if (rc == 0)
rc = err;
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (rc == 0)
- rc = err;
+ if (lli->lli_clob) {
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
+ }
}
err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
@@ -2588,7 +2413,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
};
struct md_op_data *op_data;
struct lustre_handle lockh = {0};
- ldlm_policy_data_t flock = { {0} };
+ union ldlm_policy_data flock = { { 0 } };
int fl_type = file_lock->fl_type;
__u64 flags = 0;
int rc;
@@ -2707,7 +2532,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
}
int ll_get_fid_by_name(struct inode *parent, const char *name,
- int namelen, struct lu_fid *fid)
+ int namelen, struct lu_fid *fid,
+ struct inode **inode)
{
struct md_op_data *op_data = NULL;
struct ptlrpc_request *req;
@@ -2719,7 +2545,7 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- op_data->op_valid = OBD_MD_FLID;
+ op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0)
@@ -2732,6 +2558,9 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
}
if (fid)
*fid = body->mbo_fid1;
+
+ if (inode)
+ rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
out_req:
ptlrpc_req_finished(req);
return rc;
@@ -2741,9 +2570,12 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
const char *name, int namelen)
{
struct ptlrpc_request *request = NULL;
+ struct obd_client_handle *och = NULL;
struct inode *child_inode = NULL;
struct dentry *dchild = NULL;
struct md_op_data *op_data;
+ struct mdt_body *body;
+ u64 data_version = 0;
struct qstr qstr;
int rc;
@@ -2762,22 +2594,25 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
dchild = d_lookup(file_dentry(file), &qstr);
if (dchild) {
op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
- if (dchild->d_inode) {
+ if (dchild->d_inode)
child_inode = igrab(dchild->d_inode);
- if (child_inode) {
- inode_lock(child_inode);
- op_data->op_fid3 = *ll_inode2fid(child_inode);
- ll_invalidate_aliases(child_inode);
- }
- }
dput(dchild);
- } else {
+ }
+
+ if (!child_inode) {
rc = ll_get_fid_by_name(parent, name, namelen,
- &op_data->op_fid3);
+ &op_data->op_fid3, &child_inode);
if (rc)
goto out_free;
}
+ if (!child_inode) {
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ inode_lock(child_inode);
+ op_data->op_fid3 = *ll_inode2fid(child_inode);
if (!fid_is_sane(&op_data->op_fid3)) {
CERROR("%s: migrate %s, but fid "DFID" is insane\n",
ll_get_fsname(parent->i_sb, NULL, 0), name,
@@ -2796,6 +2631,26 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
rc = 0;
goto out_free;
}
+again:
+ if (S_ISREG(child_inode->i_mode)) {
+ och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
+ if (IS_ERR(och)) {
+ rc = PTR_ERR(och);
+ och = NULL;
+ goto out_free;
+ }
+
+ rc = ll_data_version(child_inode, &data_version,
+ LL_DV_WR_FLUSH);
+ if (rc)
+ goto out_free;
+
+ op_data->op_handle = och->och_fh;
+ op_data->op_data = och->och_mod;
+ op_data->op_data_version = data_version;
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_bias |= MDS_RENAME_MIGRATE;
+ }
op_data->op_mds = mdtidx;
op_data->op_cli_flags = CLI_MIGRATE;
@@ -2804,10 +2659,32 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
if (!rc)
ll_update_times(request, parent);
- ptlrpc_req_finished(request);
+ body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
+ if (!body) {
+ rc = -EPROTO;
+ goto out_free;
+ }
+
+ /*
+ * If the server does release layout lock, then we cleanup
+ * the client och here, otherwise release it in out_free:
+ */
+ if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
+ obd_mod_put(och->och_mod);
+ md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp, och);
+ och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ kfree(och);
+ och = NULL;
+ }
+ ptlrpc_req_finished(request);
+ /* Try again if the file layout has changed. */
+ if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
+ goto again;
out_free:
if (child_inode) {
+ if (och) /* close the file */
+ ll_lease_close(och, child_inode, NULL);
clear_nlink(child_inode);
inode_unlock(child_inode);
iput(child_inode);
@@ -2837,7 +2714,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits,
enum ldlm_mode l_req_mode)
{
struct lustre_handle lockh;
- ldlm_policy_data_t policy;
+ union ldlm_policy_data policy;
enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
(LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
struct lu_fid *fid;
@@ -2878,7 +2755,7 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
struct lustre_handle *lockh, __u64 flags,
enum ldlm_mode mode)
{
- ldlm_policy_data_t policy = { .l_inodebits = {bits} };
+ union ldlm_policy_data policy = { .l_inodebits = { bits } };
struct lu_fid *fid;
fid = &ll_i2info(inode)->lli_fid;
@@ -2893,6 +2770,13 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
/* Already unlinked. Just update nlink and return success */
if (rc == -ENOENT) {
clear_nlink(inode);
+ /* If it is striped directory, and there is bad stripe
+ * Let's revalidate the dentry again, instead of returning
+ * error
+ */
+ if (S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md)
+ return 0;
+
/* This path cannot be hit for regular files unless in
* case of obscure races, so no need to validate size.
*/
@@ -3040,6 +2924,8 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
} else {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
/* In case of restore, the MDT has the right size and has
* already send it back without granting the layout lock,
* inode is up-to-date so glimpse is useless.
@@ -3047,7 +2933,7 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
* restore the MDT holds the layout lock so the glimpse will
* block up to the end of restore (getattr will block)
*/
- if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING))
+ if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags))
rc = ll_glimpse_size(inode);
}
return rc;
@@ -3095,13 +2981,12 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
{
int rc;
size_t num_bytes;
- struct ll_user_fiemap *fiemap;
+ struct fiemap *fiemap;
unsigned int extent_count = fieinfo->fi_extents_max;
num_bytes = sizeof(*fiemap) + (extent_count *
- sizeof(struct ll_fiemap_extent));
+ sizeof(struct fiemap_extent));
fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS);
-
if (!fiemap)
return -ENOMEM;
@@ -3109,9 +2994,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
fiemap->fm_extent_count = fieinfo->fi_extents_max;
fiemap->fm_start = start;
fiemap->fm_length = len;
+
if (extent_count > 0 &&
copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
- sizeof(struct ll_fiemap_extent)) != 0) {
+ sizeof(struct fiemap_extent))) {
rc = -EFAULT;
goto out;
}
@@ -3123,11 +3009,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (extent_count > 0 &&
copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
fiemap->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent)) != 0) {
+ sizeof(struct fiemap_extent))) {
rc = -EFAULT;
goto out;
}
-
out:
kvfree(fiemap);
return rc;
@@ -3370,35 +3255,50 @@ ll_iocontrol_call(struct inode *inode, struct file *file,
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_env_nest nest;
+ struct cl_object *obj = lli->lli_clob;
struct lu_env *env;
- int result;
+ int rc;
+ int refcheck;
- if (!lli->lli_clob)
+ if (!obj)
return 0;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
- result = cl_conf_set(env, lli->lli_clob, conf);
- cl_env_nested_put(&nest, env);
+ rc = cl_conf_set(env, obj, conf);
+ if (rc < 0)
+ goto out;
if (conf->coc_opc == OBJECT_CONF_SET) {
struct ldlm_lock *lock = conf->coc_lock;
+ struct cl_layout cl = {
+ .cl_layout_gen = 0,
+ };
LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- if (result == 0) {
- /* it can only be allowed to match after layout is
- * applied to inode otherwise false layout would be
- * seen. Applying layout should happen before dropping
- * the intent lock.
- */
- ldlm_lock_allow_match(lock);
- }
+
+ /* it can only be allowed to match after layout is
+ * applied to inode otherwise false layout would be
+ * seen. Applying layout should happen before dropping
+ * the intent lock.
+ */
+ ldlm_lock_allow_match(lock);
+
+ rc = cl_object_layout_get(env, obj, &cl);
+ if (rc < 0)
+ goto out;
+
+ CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
+ PFID(&lli->lli_fid), ll_layout_version_get(lli),
+ cl.cl_layout_gen);
+ ll_layout_version_set(lli, cl.cl_layout_gen);
}
- return result;
+out:
+ cl_env_put(env, &refcheck);
+ return rc;
}
/* Fetch layout from MDT with getxattr request, if it's not ready yet */
@@ -3477,12 +3377,11 @@ out:
* in this function.
*/
static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
- struct inode *inode, __u32 *gen, bool reconf)
+ struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ldlm_lock *lock;
- struct lustre_md md = { NULL };
struct cl_object_conf conf;
int rc = 0;
bool lvb_ready;
@@ -3494,8 +3393,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d",
- PFID(&lli->lli_fid), inode, reconf);
+ LDLM_DEBUG(lock, "File " DFID "(%p) being reconfigured",
+ PFID(&lli->lli_fid), inode);
/* in case this is a caching lock and reinstate with new inode */
md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
@@ -3506,15 +3405,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time.
*/
- if (lvb_ready || !reconf) {
- rc = -ENODATA;
- if (lvb_ready) {
- /* layout_gen must be valid if layout lock is not
- * cancelled and stripe has already set
- */
- *gen = ll_layout_version_get(lli);
- rc = 0;
- }
+ if (lvb_ready) {
+ rc = 0;
goto out;
}
@@ -3524,39 +3416,19 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
/* for layout lock, lmm is returned in lock's lvb.
* lvb_data is immutable if the lock is held so it's safe to access it
- * without res lock. See the description in ldlm_lock_decref_internal()
- * for the condition to free lvb_data of layout lock
- */
- if (lock->l_lvb_data) {
- rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
- lock->l_lvb_data, lock->l_lvb_len);
- if (rc >= 0) {
- *gen = LL_LAYOUT_GEN_EMPTY;
- if (md.lsm)
- *gen = md.lsm->lsm_layout_gen;
- rc = 0;
- } else {
- CERROR("%s: file " DFID " unpackmd error: %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), rc);
- }
- }
- if (rc < 0)
- goto out;
-
- /* set layout to file. Unlikely this will fail as old layout was
+ * without res lock.
+ *
+ * set layout to file. Unlikely this will fail as old layout was
* surely eliminated
*/
memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_SET;
conf.coc_inode = inode;
conf.coc_lock = lock;
- conf.u.coc_md = &md;
+ conf.u.coc_layout.lb_buf = lock->l_lvb_data;
+ conf.u.coc_layout.lb_len = lock->l_lvb_len;
rc = ll_layout_conf(inode, &conf);
- if (md.lsm)
- obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
-
/* refresh layout failed, need to wait */
wait_layout = rc == -EBUSY;
@@ -3584,20 +3456,7 @@ out:
return rc;
}
-/**
- * This function checks if there exists a LAYOUT lock on the client side,
- * or enqueues it if it doesn't have one in cache.
- *
- * This function will not hold layout lock so it may be revoked any time after
- * this function returns. Any operations depend on layout should be redone
- * in that case.
- *
- * This function should be called before lov_io_init() to get an uptodate
- * layout version, the caller should save the version number and after IO
- * is finished, this function should be called again to verify that layout
- * is not changed during IO time.
- */
-int ll_layout_refresh(struct inode *inode, __u32 *gen)
+static int ll_layout_refresh_locked(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -3613,17 +3472,6 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
};
int rc;
- *gen = ll_layout_version_get(lli);
- if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != LL_LAYOUT_GEN_NONE)
- return 0;
-
- /* sanity checks */
- LASSERT(fid_is_sane(ll_inode2fid(inode)));
- LASSERT(S_ISREG(inode->i_mode));
-
- /* take layout lock mutex to enqueue layout lock exclusively. */
- mutex_lock(&lli->lli_layout_mutex);
-
again:
/* mostly layout lock is caching on the local side, so try to match
* it before grabbing layout lock mutex.
@@ -3631,20 +3479,16 @@ again:
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
LCK_CR | LCK_CW | LCK_PR | LCK_PW);
if (mode != 0) { /* hit cached lock */
- rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ rc = ll_layout_lock_set(&lockh, mode, inode);
if (rc == -EAGAIN)
goto again;
-
- mutex_unlock(&lli->lli_layout_mutex);
return rc;
}
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- mutex_unlock(&lli->lli_layout_mutex);
+ if (IS_ERR(op_data))
return PTR_ERR(op_data);
- }
/* have to enqueue one */
memset(&it, 0, sizeof(it));
@@ -3668,10 +3512,50 @@ again:
if (rc == 0) {
/* set lock data in case this is a new lock */
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ rc = ll_layout_lock_set(&lockh, mode, inode);
if (rc == -EAGAIN)
goto again;
}
+
+ return rc;
+}
+
+/**
+ * This function checks if there exists a LAYOUT lock on the client side,
+ * or enqueues it if it doesn't have one in cache.
+ *
+ * This function will not hold layout lock so it may be revoked any time after
+ * this function returns. Any operations depend on layout should be redone
+ * in that case.
+ *
+ * This function should be called before lov_io_init() to get an uptodate
+ * layout version, the caller should save the version number and after IO
+ * is finished, this function should be called again to verify that layout
+ * is not changed during IO time.
+ */
+int ll_layout_refresh(struct inode *inode, __u32 *gen)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int rc;
+
+ *gen = ll_layout_version_get(lli);
+ if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
+ return 0;
+
+ /* sanity checks */
+ LASSERT(fid_is_sane(ll_inode2fid(inode)));
+ LASSERT(S_ISREG(inode->i_mode));
+
+ /* take layout lock mutex to enqueue layout lock exclusively. */
+ mutex_lock(&lli->lli_layout_mutex);
+
+ rc = ll_layout_refresh_locked(inode);
+ if (rc < 0)
+ goto out;
+
+ *gen = ll_layout_version_get(lli);
+out:
mutex_unlock(&lli->lli_layout_mutex);
return rc;
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 22507b9c6d69..504498de536e 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -80,69 +80,60 @@ blkcnt_t dirty_cnt(struct inode *inode)
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
struct inode *inode, struct cl_object *clob, int agl)
{
- struct ll_inode_info *lli = ll_i2info(inode);
const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
- int result;
+ struct cl_lock *lock = vvp_env_lock(env);
+ struct cl_lock_descr *descr = &lock->cll_descr;
+ int result = 0;
+
+ CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
+
+ /* NOTE: this looks like DLM lock request, but it may
+ * not be one. Due to CEF_ASYNC flag (translated
+ * to LDLM_FL_HAS_INTENT by osc), this is
+ * glimpse request, that won't revoke any
+ * conflicting DLM locks held. Instead,
+ * ll_glimpse_callback() will be called on each
+ * client holding a DLM lock against this file,
+ * and resulting size will be returned for each
+ * stripe. DLM lock on [0, EOF] is acquired only
+ * if there were no conflicting locks. If there
+ * were conflicting locks, enqueuing or waiting
+ * fails with -ENAVAIL, but valid inode
+ * attributes are returned anyway.
+ */
+ *descr = whole_file;
+ descr->cld_obj = clob;
+ descr->cld_mode = CLM_READ;
+ descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
+ if (agl)
+ descr->cld_enq_flags |= CEF_AGL;
+ /*
+ * CEF_ASYNC is used because glimpse sub-locks cannot
+ * deadlock (because they never conflict with other
+ * locks) and, hence, can be enqueued out-of-order.
+ *
+ * CEF_MUST protects glimpse lock from conversion into
+ * a lockless mode.
+ */
+ result = cl_lock_request(env, io, lock);
+ if (result < 0)
+ return result;
- result = 0;
- if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
- CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
- if (lli->lli_has_smd) {
- struct cl_lock *lock = vvp_env_lock(env);
- struct cl_lock_descr *descr = &lock->cll_descr;
-
- /* NOTE: this looks like DLM lock request, but it may
- * not be one. Due to CEF_ASYNC flag (translated
- * to LDLM_FL_HAS_INTENT by osc), this is
- * glimpse request, that won't revoke any
- * conflicting DLM locks held. Instead,
- * ll_glimpse_callback() will be called on each
- * client holding a DLM lock against this file,
- * and resulting size will be returned for each
- * stripe. DLM lock on [0, EOF] is acquired only
- * if there were no conflicting locks. If there
- * were conflicting locks, enqueuing or waiting
- * fails with -ENAVAIL, but valid inode
- * attributes are returned anyway.
- */
- *descr = whole_file;
- descr->cld_obj = clob;
- descr->cld_mode = CLM_READ;
- descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
- if (agl)
- descr->cld_enq_flags |= CEF_AGL;
+ if (!agl) {
+ ll_merge_attr(env, inode);
+ if (i_size_read(inode) > 0 && !inode->i_blocks) {
/*
- * CEF_ASYNC is used because glimpse sub-locks cannot
- * deadlock (because they never conflict with other
- * locks) and, hence, can be enqueued out-of-order.
- *
- * CEF_MUST protects glimpse lock from conversion into
- * a lockless mode.
+ * LU-417: Add dirty pages block count
+ * lest i_blocks reports 0, some "cp" or
+ * "tar" may think it's a completely
+ * sparse file and skip it.
*/
- result = cl_lock_request(env, io, lock);
- if (result < 0)
- return result;
-
- if (!agl) {
- ll_merge_attr(env, inode);
- if (i_size_read(inode) > 0 &&
- inode->i_blocks == 0) {
- /*
- * LU-417: Add dirty pages block count
- * lest i_blocks reports 0, some "cp" or
- * "tar" may think it's a completely
- * sparse file and skip it.
- */
- inode->i_blocks = dirty_cnt(inode);
- }
- }
- cl_lock_release(env, lock);
- } else {
- CDEBUG(D_DLMTRACE, "No objects for inode\n");
- ll_merge_attr(env, inode);
+ inode->i_blocks = dirty_cnt(inode);
}
}
+ cl_lock_release(env, lock);
+
return result;
}
@@ -212,39 +203,3 @@ again:
}
return result;
}
-
-int cl_local_size(struct inode *inode)
-{
- struct lu_env *env = NULL;
- struct cl_io *io = NULL;
- struct cl_object *clob;
- int result;
- int refcheck;
-
- if (!ll_i2info(inode)->lli_has_smd)
- return 0;
-
- result = cl_io_get(inode, &env, &io, &refcheck);
- if (result <= 0)
- return result;
-
- clob = io->ci_obj;
- result = cl_io_init(env, io, CIT_MISC, clob);
- if (result > 0) {
- result = io->ci_result;
- } else if (result == 0) {
- struct cl_lock *lock = vvp_env_lock(env);
-
- lock->cll_descr = whole_file;
- lock->cll_descr.cld_enq_flags = CEF_PEEK;
- lock->cll_descr.cld_obj = clob;
- result = cl_lock_request(env, io, lock);
- if (result == 0) {
- ll_merge_attr(env, inode);
- cl_lock_release(env, lock);
- }
- }
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 084330d08f7a..dd1cfd8f5213 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -80,7 +80,8 @@ int cl_inode_fini_refcheck;
*/
static DEFINE_MUTEX(cl_inode_fini_guard);
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
+int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
+ unsigned int attr_flags)
{
struct lu_env *env;
struct cl_io *io;
@@ -92,14 +93,15 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
return PTR_ERR(env);
io = vvp_env_thread_io(env);
- io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_obj = obj;
io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_attr_flags = attr_flags;
io->u.ci_setattr.sa_valid = attr->ia_valid;
- io->u.ci_setattr.sa_parent_fid = ll_inode2fid(inode);
+ io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu);
again:
if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
@@ -148,7 +150,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
struct cl_object_conf conf = {
.coc_inode = inode,
.u = {
- .coc_md = md
+ .coc_layout = md->layout,
}
};
int result = 0;
@@ -182,7 +184,6 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
* locked by I_NEW bit.
*/
lli->lli_clob = clob;
- lli->lli_has_smd = lsm_has_objects(md->lsm);
lu_object_ref_add(&clob->co_lu, "inode", inode);
} else {
result = PTR_ERR(clob);
@@ -245,15 +246,11 @@ void cl_inode_fini(struct inode *inode)
int emergency;
if (clob) {
- void *cookie;
-
- cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
if (emergency) {
mutex_lock(&cl_inode_fini_guard);
LASSERT(cl_inode_fini_env);
- cl_env_implant(cl_inode_fini_env, &refcheck);
env = cl_inode_fini_env;
}
/*
@@ -265,13 +262,10 @@ void cl_inode_fini(struct inode *inode)
lu_object_ref_del(&clob->co_lu, "inode", inode);
cl_object_put_last(env, clob);
lli->lli_clob = NULL;
- if (emergency) {
- cl_env_unplant(cl_inode_fini_env, &refcheck);
+ if (emergency)
mutex_unlock(&cl_inode_fini_guard);
- } else {
+ else
cl_env_put(env, &refcheck);
- }
- cl_env_reexit(cookie);
}
}
@@ -302,22 +296,3 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid)
gen = fid_flatten(fid) >> 32;
return gen;
}
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT.
- */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
- return lov_lsm_get(ll_i2info(inode)->lli_clob);
-}
-
-inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
- lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index fb346c12dad2..f48660ed350f 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -47,36 +47,29 @@
*/
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
{
- struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
- __u32 valsize = sizeof(struct lov_desc);
- int rc, easize, def_easize, cookiesize;
- struct lov_desc desc;
- __u16 stripes, def_stripes;
-
- rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
- &valsize, &desc, NULL);
+ u32 val_size, max_easize, def_easize;
+ int rc;
+
+ val_size = sizeof(max_easize);
+ rc = obd_get_info(NULL, dt_exp, sizeof(KEY_MAX_EASIZE), KEY_MAX_EASIZE,
+ &val_size, &max_easize);
if (rc)
return rc;
- stripes = min_t(__u32, desc.ld_tgt_count, LOV_MAX_STRIPE_COUNT);
- lsm.lsm_stripe_count = stripes;
- easize = obd_size_diskmd(dt_exp, &lsm);
-
- def_stripes = min_t(__u32, desc.ld_default_stripe_count,
- LOV_MAX_STRIPE_COUNT);
- lsm.lsm_stripe_count = def_stripes;
- def_easize = obd_size_diskmd(dt_exp, &lsm);
-
- cookiesize = stripes * sizeof(struct llog_cookie);
+ val_size = sizeof(def_easize);
+ rc = obd_get_info(NULL, dt_exp, sizeof(KEY_DEFAULT_EASIZE),
+ KEY_DEFAULT_EASIZE, &val_size, &def_easize);
+ if (rc)
+ return rc;
- /* default cookiesize is 0 because from 2.4 server doesn't send
+ /*
+ * default cookiesize is 0 because from 2.4 server doesn't send
* llog cookies to client.
*/
- CDEBUG(D_HA,
- "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n",
- def_easize, easize, cookiesize);
+ CDEBUG(D_HA, "updating def/max_easize: %d/%d\n",
+ def_easize, max_easize);
- rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize, 0);
+ rc = md_init_ea_size(md_exp, max_easize, def_easize);
return rc;
}
@@ -169,13 +162,11 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
return rc;
}
- cg->lg_env = cl_env_get(&refcheck);
+ cg->lg_env = env;
cg->lg_io = io;
cg->lg_lock = lock;
cg->lg_gid = gid;
- LASSERT(cg->lg_env == env);
- cl_env_unplant(env, &refcheck);
return 0;
}
@@ -184,14 +175,10 @@ void cl_put_grouplock(struct ll_grouplock *cg)
struct lu_env *env = cg->lg_env;
struct cl_io *io = cg->lg_io;
struct cl_lock *lock = cg->lg_lock;
- int refcheck;
LASSERT(cg->lg_env);
LASSERT(cg->lg_gid);
- cl_env_implant(env, &refcheck);
- cl_env_put(env, &refcheck);
-
cl_lock_release(env, lock);
cl_io_fini(env, io);
cl_env_put(env, NULL);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
deleted file mode 100644
index 8644631bf2ba..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_close.c
- *
- * Lustre Lite routines to issue a secondary close after writeback
- */
-
-#include <linux/module.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "llite_internal.h"
-
-/** records that a write is in flight */
-void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
-{
- struct ll_inode_info *lli = ll_i2info(club->vob_inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page && list_empty(&page->vpg_pending_linkage))
- list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
- spin_unlock(&lli->lli_lock);
-}
-
-/** records that a write has completed */
-void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
-{
- struct ll_inode_info *lli = ll_i2info(club->vob_inode);
- int rc = 0;
-
- spin_lock(&lli->lli_lock);
- if (page && !list_empty(&page->vpg_pending_linkage)) {
- list_del_init(&page->vpg_pending_linkage);
- rc = 1;
- }
- spin_unlock(&lli->lli_lock);
- if (rc)
- ll_queue_done_writing(club->vob_inode, 0);
-}
-
-/** Queues DONE_WRITING if
- * - done writing is allowed;
- * - inode has no no dirty pages;
- */
-void ll_queue_done_writing(struct inode *inode, unsigned long flags)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= flags;
-
- if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->vob_pending_list)) {
- struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
-
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), lli->lli_flags);
- /* DONE_WRITING is allowed and inode has no dirty page. */
- spin_lock(&lcq->lcq_lock);
-
- LASSERT(list_empty(&lli->lli_close_list));
- CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
- PFID(ll_inode2fid(inode)));
- list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
-
- /* Avoid a concurrent insertion into the close thread queue:
- * an inode is already in the close thread, open(), write(),
- * close() happen, epoch is closed as the inode is marked as
- * LLIF_EPOCH_PENDING. When pages are written inode should not
- * be inserted into the queue again, clear this flag to avoid
- * it.
- */
- lli->lli_flags &= ~LLIF_DONE_WRITING;
-
- wake_up(&lcq->lcq_waitq);
- spin_unlock(&lcq->lcq_lock);
- }
- spin_unlock(&lli->lli_lock);
-}
-
-/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- op_data->op_flags |= MF_SOM_CHANGE;
- /* Check if Size-on-MDS attributes are valid. */
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), lli->lli_flags);
-
- if (!cl_local_size(inode)) {
- /* Send Size-on-MDS Attributes if valid. */
- op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
- ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
- }
-}
-
-/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle **och, unsigned long flags)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
-
- spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->vob_pending_list))) {
- if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
- LASSERT(*och);
- LASSERT(!lli->lli_pending_och);
- /* Inode is dirty and there is no pending write done
- * request yet, DONE_WRITE is to be sent later.
- */
- lli->lli_flags |= LLIF_EPOCH_PENDING;
- lli->lli_pending_och = *och;
- spin_unlock(&lli->lli_lock);
-
- inode = igrab(inode);
- LASSERT(inode);
- goto out;
- }
- if (flags & LLIF_DONE_WRITING) {
- /* Some pages are still dirty, it is early to send
- * DONE_WRITE. Wait until all pages will be flushed
- * and try DONE_WRITE again later.
- */
- LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
- lli->lli_flags |= LLIF_DONE_WRITING;
- spin_unlock(&lli->lli_lock);
-
- inode = igrab(inode);
- LASSERT(inode);
- goto out;
- }
- }
- CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
- ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
- op_data->op_flags |= MF_EPOCH_CLOSE;
-
- if (flags & LLIF_DONE_WRITING) {
- LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
- LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
- *och = lli->lli_pending_och;
- lli->lli_pending_och = NULL;
- lli->lli_flags &= ~LLIF_EPOCH_PENDING;
- } else {
- /* Pack Size-on-MDS inode attributes only if they has changed */
- if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
- spin_unlock(&lli->lli_lock);
- goto out;
- }
-
- /* There is a pending DONE_WRITE -- close epoch with no
- * attribute change.
- */
- if (lli->lli_flags & LLIF_EPOCH_PENDING) {
- spin_unlock(&lli->lli_lock);
- goto out;
- }
- }
-
- LASSERT(list_empty(&club->vob_pending_list));
- lli->lli_flags &= ~LLIF_SOM_DIRTY;
- spin_unlock(&lli->lli_lock);
- ll_done_writing_attr(inode, op_data);
-
-out:
- return;
-}
-
-/**
- * Cliens updates SOM attributes on MDS (including llog cookies):
- * obd_getattr with no lock and md_setattr.
- */
-int ll_som_update(struct inode *inode, struct md_op_data *op_data)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ptlrpc_request *request = NULL;
- __u32 old_flags;
- struct obdo *oa;
- int rc;
-
- LASSERT(op_data);
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), lli->lli_flags);
-
- oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oa) {
- CERROR("can't allocate memory for Size-on-MDS update.\n");
- return -ENOMEM;
- }
-
- old_flags = op_data->op_flags;
- op_data->op_flags = MF_SOM_CHANGE;
-
- /* If inode is already in another epoch, skip getattr from OSTs. */
- if (lli->lli_ioepoch == op_data->op_ioepoch) {
- rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
- old_flags & MF_GETATTR_LOCK);
- if (rc) {
- oa->o_valid = 0;
- if (rc != -ENOENT)
- CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), rc);
- } else {
- CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
- PFID(&lli->lli_fid));
- }
- /* Install attributes into op_data. */
- md_from_obdo(op_data, oa, oa->o_valid);
- }
-
- rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
- NULL, 0, NULL, 0, &request, NULL);
- ptlrpc_req_finished(request);
-
- kmem_cache_free(obdo_cachep, oa);
- return rc;
-}
-
-/**
- * Closes the ioepoch and packs all the attributes into @op_data for
- * DONE_WRITING rpc.
- */
-static void ll_prepare_done_writing(struct inode *inode,
- struct md_op_data *op_data,
- struct obd_client_handle **och)
-{
- ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
- /* If there is no @och, we do not do D_W yet. */
- if (!*och)
- return;
-
- ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
- ll_prep_md_op_data(op_data, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
-/** Send a DONE_WRITING rpc. */
-static void ll_done_writing(struct inode *inode)
-{
- struct obd_client_handle *och = NULL;
- struct md_op_data *op_data;
- int rc;
-
- LASSERT(exp_connect_som(ll_i2mdexp(inode)));
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return;
-
- ll_prepare_done_writing(inode, op_data, &och);
- /* If there is no @och, we do not do D_W yet. */
- if (!och)
- goto out;
-
- rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
- if (rc == -EAGAIN)
- /* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS.
- */
- rc = ll_som_update(inode, op_data);
- else if (rc) {
- CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), rc);
- }
-out:
- ll_finish_md_op_data(op_data);
- if (och) {
- md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
- kfree(och);
- }
-}
-
-static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
-{
- struct ll_inode_info *lli = NULL;
-
- spin_lock(&lcq->lcq_lock);
-
- if (!list_empty(&lcq->lcq_head)) {
- lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
- lli_close_list);
- list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop)) {
- lli = ERR_PTR(-EALREADY);
- }
-
- spin_unlock(&lcq->lcq_lock);
- return lli;
-}
-
-static int ll_close_thread(void *arg)
-{
- struct ll_close_queue *lcq = arg;
-
- complete(&lcq->lcq_comp);
-
- while (1) {
- struct l_wait_info lwi = { 0 };
- struct ll_inode_info *lli;
- struct inode *inode;
-
- l_wait_event_exclusive(lcq->lcq_waitq,
- (lli = ll_close_next_lli(lcq)) != NULL,
- &lwi);
- if (IS_ERR(lli))
- break;
-
- inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
- PFID(ll_inode2fid(inode)));
- ll_done_writing(inode);
- iput(inode);
- }
-
- CDEBUG(D_INFO, "ll_close exiting\n");
- complete(&lcq->lcq_comp);
- return 0;
-}
-
-int ll_close_thread_start(struct ll_close_queue **lcq_ret)
-{
- struct ll_close_queue *lcq;
- struct task_struct *task;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
- return -EINTR;
-
- lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
- if (!lcq)
- return -ENOMEM;
-
- spin_lock_init(&lcq->lcq_lock);
- INIT_LIST_HEAD(&lcq->lcq_head);
- init_waitqueue_head(&lcq->lcq_waitq);
- init_completion(&lcq->lcq_comp);
-
- task = kthread_run(ll_close_thread, lcq, "ll_close");
- if (IS_ERR(task)) {
- kfree(lcq);
- return PTR_ERR(task);
- }
-
- wait_for_completion(&lcq->lcq_comp);
- *lcq_ret = lcq;
- return 0;
-}
-
-void ll_close_thread_shutdown(struct ll_close_queue *lcq)
-{
- init_completion(&lcq->lcq_comp);
- atomic_inc(&lcq->lcq_stop);
- wake_up(&lcq->lcq_waitq);
- wait_for_completion(&lcq->lcq_comp);
- kfree(lcq);
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 4bc551279aa4..065a9a7e120a 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -97,31 +97,20 @@ struct ll_grouplock {
unsigned long lg_gid;
};
-enum lli_flags {
- /* MDS has an authority for the Size-on-MDS attributes. */
- LLIF_MDS_SIZE_LOCK = (1 << 0),
- /* Epoch close is postponed. */
- LLIF_EPOCH_PENDING = (1 << 1),
- /* DONE WRITING is allowed. */
- LLIF_DONE_WRITING = (1 << 2),
- /* Sizeon-on-MDS attributes are changed. An attribute update needs to
- * be sent to MDS.
- */
- LLIF_SOM_DIRTY = (1 << 3),
+enum ll_file_flags {
/* File data is modified. */
- LLIF_DATA_MODIFIED = (1 << 4),
+ LLIF_DATA_MODIFIED = 0,
/* File is being restored */
- LLIF_FILE_RESTORING = (1 << 5),
+ LLIF_FILE_RESTORING = 1,
/* Xattr cache is attached to the file */
- LLIF_XATTR_CACHE = (1 << 6),
+ LLIF_XATTR_CACHE = 2,
};
struct ll_inode_info {
__u32 lli_inode_magic;
- __u32 lli_flags;
- __u64 lli_ioepoch;
spinlock_t lli_lock;
+ unsigned long lli_flags;
struct posix_acl *lli_posix_acl;
/* identifying fields for both metadata and data stacks. */
@@ -129,14 +118,6 @@ struct ll_inode_info {
/* master inode fid for stripe directory */
struct lu_fid lli_pfid;
- struct list_head lli_close_list;
-
- /* handle is to be sent to MDS later on done_writing and setattr.
- * Open handle data are needed for the recovery to reconstruct
- * the inode state on the MDS. XXX: recovery is not ready yet.
- */
- struct obd_client_handle *lli_pending_och;
-
/* We need all three because every inode may be opened in different
* modes
*/
@@ -204,7 +185,6 @@ struct ll_inode_info {
struct {
struct mutex lli_size_mutex;
char *lli_symlink_name;
- __u64 lli_maxbytes;
/*
* struct rw_semaphore {
* signed long count; // align d.d_def_acl
@@ -245,7 +225,6 @@ struct ll_inode_info {
* In the future, if more members are added only for directory,
* some of the following members can be moved into u.f.
*/
- bool lli_has_smd;
struct cl_object *lli_clob;
/* mutex to request for layout lock exclusively. */
@@ -282,6 +261,9 @@ int ll_xattr_cache_destroy(struct inode *inode);
int ll_xattr_cache_get(struct inode *inode, const char *name,
char *buffer, size_t size, __u64 valid);
+int ll_init_security(struct dentry *dentry, struct inode *inode,
+ struct inode *dir);
+
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
* consistency between file size and KMS.
@@ -400,7 +382,7 @@ enum stats_track_type {
#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
-#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */
+/* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */
#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
@@ -409,6 +391,8 @@ enum stats_track_type {
#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
+#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
+ * suppress_pings */
#define LL_SBI_FLAGS { \
"nolck", \
@@ -432,6 +416,7 @@ enum stats_track_type {
"user_fid2path",\
"xattr_cache", \
"norootsquash", \
+ "always_ping", \
}
/*
@@ -466,10 +451,10 @@ struct ll_sb_info {
int ll_flags;
unsigned int ll_umounting:1,
- ll_xattr_cache_enabled:1;
- struct lustre_client_ocd ll_lco;
+ ll_xattr_cache_enabled:1,
+ ll_client_common_fill_super_succeeded:1;
- struct ll_close_queue *ll_lcq;
+ struct lustre_client_ocd ll_lco;
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
@@ -630,8 +615,6 @@ struct ll_file_data {
struct list_head fd_lccs; /* list of ll_cl_context */
};
-struct lov_stripe_md;
-
extern struct dentry *llite_root;
extern struct kset *llite_kset;
@@ -682,8 +665,6 @@ enum {
LPROC_LL_WRITE_BYTES,
LPROC_LL_BRW_READ,
LPROC_LL_BRW_WRITE,
- LPROC_LL_OSC_READ,
- LPROC_LL_OSC_WRITE,
LPROC_LL_IOCTL,
LPROC_LL_OPEN,
LPROC_LL_RELEASE,
@@ -741,9 +722,7 @@ int ll_writepage(struct page *page, struct writeback_control *wbc);
int ll_writepages(struct address_space *, struct writeback_control *wbc);
int ll_readpage(struct file *file, struct page *page);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct ll_readahead_state *ras,
- bool hit);
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
struct ll_cl_context *ll_cl_find(struct file *file);
void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
void ll_cl_remove(struct file *file, const struct lu_env *env);
@@ -762,25 +741,14 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
enum ldlm_mode mode);
int ll_file_open(struct inode *inode, struct file *file);
int ll_file_release(struct inode *inode, struct file *file);
-int ll_glimpse_ioctl(struct ll_sb_info *sbi,
- struct lov_stripe_md *lsm, lstat_t *st);
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
int ll_release_openhandle(struct inode *, struct lookup_intent *);
int ll_md_real_close(struct inode *inode, fmode_t fmode);
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle **och, unsigned long flags);
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
-int ll_som_update(struct inode *inode, struct md_op_data *op_data);
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
- __u64 ioepoch, int sync);
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
- struct lustre_handle *fh);
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
struct posix_acl *ll_get_acl(struct inode *inode, int type);
int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
const char *name, int namelen);
int ll_get_fid_by_name(struct inode *parent, const char *name,
- int namelen, struct lu_fid *fid);
+ int namelen, struct lu_fid *fid, struct inode **inode);
int ll_inode_permission(struct inode *inode, int mask);
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
@@ -801,7 +769,6 @@ int ll_hsm_release(struct inode *inode);
/* llite/dcache.c */
-int ll_d_init(struct dentry *de);
extern const struct dentry_operations ll_d_ops;
void ll_intent_drop_lock(struct lookup_intent *);
void ll_intent_release(struct lookup_intent *);
@@ -818,6 +785,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
void ll_put_super(struct super_block *sb);
void ll_kill_super(struct super_block *sb);
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
+void ll_dir_clear_lsm_md(struct inode *inode);
void ll_clear_inode(struct inode *inode);
int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
int ll_setattr(struct dentry *de, struct iattr *attr);
@@ -891,18 +859,6 @@ int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
/* llite/symlink.c */
extern const struct inode_operations ll_fast_symlink_inode_operations;
-/* llite/llite_close.c */
-struct ll_close_queue {
- spinlock_t lcq_lock;
- struct list_head lcq_head;
- wait_queue_head_t lcq_waitq;
- struct completion lcq_comp;
- atomic_t lcq_stop;
-};
-
-void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
-void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
-
/**
* IO arguments for various VFS I/O interfaces.
*/
@@ -945,15 +901,11 @@ static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
return &ll_env_info(env)->lti_args;
}
-void ll_queue_done_writing(struct inode *inode, unsigned long flags);
-void ll_close_thread_shutdown(struct ll_close_queue *lcq);
-int ll_close_thread_start(struct ll_close_queue **lcq_ret);
-
/* llite/llite_mmap.c */
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
int ll_file_mmap(struct file *file, struct vm_area_struct *vma);
-void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma,
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
unsigned long addr, size_t count);
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
size_t count);
@@ -1024,9 +976,14 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode)
return fid;
}
-static inline __u64 ll_file_maxbytes(struct inode *inode)
+static inline loff_t ll_file_maxbytes(struct inode *inode)
{
- return ll_i2info(inode)->lli_maxbytes;
+ struct cl_object *obj = ll_i2info(inode)->lli_clob;
+
+ if (!obj)
+ return MAX_LFS_FILESIZE;
+
+ return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
}
/* llite/xattr.c */
@@ -1043,17 +1000,18 @@ extern const struct xattr_handler *ll_xattr_handlers[];
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ll_xattr_list(struct inode *inode, const char *name, int type,
void *buffer, size_t size, __u64 valid);
+const struct xattr_handler *get_xattr_type(const char *name);
/**
* Common IO arguments for various VFS I/O interfaces.
*/
int cl_sb_init(struct super_block *sb);
int cl_sb_fini(struct super_block *sb);
-void ll_io_init(struct cl_io *io, const struct file *file, int write);
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit);
+enum ras_update_flags {
+ LL_RAS_HIT = 0x1,
+ LL_RAS_MMAP = 0x2
+};
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
@@ -1189,7 +1147,7 @@ dentry_may_statahead(struct inode *dir, struct dentry *dentry)
* 'lld_sa_generation == lli->lli_sa_generation'.
*/
ldd = ll_d2d(dentry);
- if (ldd && ldd->lld_sa_generation == lli->lli_sa_generation)
+ if (ldd->lld_sa_generation == lli->lli_sa_generation)
return false;
return true;
@@ -1258,15 +1216,6 @@ struct ll_dio_pages {
int ldp_nr;
};
-static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
- int rc)
-{
- int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
- LPROC_LL_OSC_WRITE;
-
- ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
-}
-
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
int rw, struct inode *inode,
struct ll_dio_pages *pv);
@@ -1317,17 +1266,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
static inline int d_lustre_invalid(const struct dentry *dentry)
{
- struct ll_dentry_data *lld = ll_d2d(dentry);
-
- return !lld || lld->lld_invalid;
-}
-
-static inline void __d_lustre_invalidate(struct dentry *dentry)
-{
- struct ll_dentry_data *lld = ll_d2d(dentry);
-
- if (lld)
- lld->lld_invalid = 1;
+ return ll_d2d(dentry)->lld_invalid;
}
/*
@@ -1343,7 +1282,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
- __d_lustre_invalidate(dentry);
+ ll_d2d(dentry)->lld_invalid = 1;
/*
* We should be careful about dentries created by d_obtain_alias().
* These dentries are not put in the dentry tree, instead they are
@@ -1365,11 +1304,6 @@ static inline void d_lustre_revalidate(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-enum {
- LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
- LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
-};
-
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
int ll_layout_refresh(struct inode *inode, __u32 *gen);
int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
@@ -1383,14 +1317,14 @@ int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
int ll_getparent(struct file *file, struct getparent __user *arg);
/* lcommon_cl.c */
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
+int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
+ unsigned int attr_flags);
extern struct lu_env *cl_inode_fini_env;
extern int cl_inode_fini_refcheck;
int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
void cl_inode_fini(struct inode *inode);
-int cl_local_size(struct inode *inode);
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
__u32 cl_fid_build_gen(const struct lu_fid *fid);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index e5c62f4ce3d8..25f5aed97f63 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -191,10 +191,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_FLOCK_DEAD |
OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
OBD_CONNECT_OPEN_BY_FID |
- OBD_CONNECT_DIR_STRIPE;
-
- if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
- data->ocd_connect_flags |= OBD_CONNECT_SOM;
+ OBD_CONNECT_DIR_STRIPE |
+ OBD_CONNECT_BULK_MBITS;
if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
@@ -226,6 +224,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* real client */
data->ocd_connect_flags |= OBD_CONNECT_REAL;
+ /* always ping even if server suppress_pings */
+ if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+ data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
+
data->ocd_brw_size = MD_MAX_BRW_SIZE;
err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
@@ -288,7 +290,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
size = sizeof(*data);
err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
- KEY_CONN_DATA, &size, data, NULL);
+ KEY_CONN_DATA, &size, data);
if (err) {
CERROR("%s: Get connect data failed: rc = %d\n",
sbi->ll_md_exp->exp_obd->obd_name, err);
@@ -355,10 +357,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
-
- if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
- data->ocd_connect_flags |= OBD_CONNECT_SOM;
+ OBD_CONNECT_LAYOUTLOCK |
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
+ OBD_CONNECT_BULK_MBITS;
if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
/* OBD_CONNECT_CKSUM should always be set, even if checksums are
@@ -376,6 +377,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+ /* always ping even if server suppress_pings */
+ if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+ data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
+
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
@@ -475,8 +480,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
ptlrpc_req_finished(request);
if (IS_ERR(root)) {
- if (lmd.lsm)
- obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
#ifdef CONFIG_FS_POSIX_ACL
if (lmd.posix_acl) {
posix_acl_release(lmd.posix_acl);
@@ -488,12 +491,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
goto out_root;
}
- err = ll_close_thread_start(&sbi->ll_lcq);
- if (err) {
- CERROR("cannot start close thread: rc %d\n", err);
- goto out_root;
- }
-
checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
KEY_CHECKSUM, sizeof(checksum), &checksum,
@@ -572,10 +569,18 @@ int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
{
int size, rc;
- *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
+ size = sizeof(*lmmsize);
+ rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
+ KEY_MAX_EASIZE, &size, lmmsize);
+ if (rc) {
+ CERROR("%s: cannot get max LOV EA size: rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, rc);
+ return rc;
+ }
+
size = sizeof(int);
rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
- KEY_MAX_EASIZE, &size, lmmsize, NULL);
+ KEY_MAX_EASIZE, &size, lmmsize);
if (rc)
CERROR("Get max mdsize error rc %d\n", rc);
@@ -599,7 +604,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
size = sizeof(int);
rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
- KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
+ KEY_DEFAULT_EASIZE, &size, lmmsize);
if (rc)
CERROR("Get default mdsize error rc %d\n", rc);
@@ -633,8 +638,6 @@ static void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
- ll_close_thread_shutdown(sbi->ll_lcq);
-
cl_sb_fini(sb);
obd_fid_fini(sbi->ll_dt_exp->exp_obd);
@@ -725,6 +728,18 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
+ tmp = ll_set_opt("context", s1, 1);
+ if (tmp)
+ goto next;
+ tmp = ll_set_opt("fscontext", s1, 1);
+ if (tmp)
+ goto next;
+ tmp = ll_set_opt("defcontext", s1, 1);
+ if (tmp)
+ goto next;
+ tmp = ll_set_opt("rootcontext", s1, 1);
+ if (tmp)
+ goto next;
tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
if (tmp) {
*flags |= tmp;
@@ -766,11 +781,6 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
- tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
if (tmp) {
*flags |= tmp;
@@ -786,6 +796,11 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
+ tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
s1);
return -EINVAL;
@@ -804,14 +819,10 @@ void ll_lli_init(struct ll_inode_info *lli)
{
lli->lli_inode_magic = LLI_INODE_MAGIC;
lli->lli_flags = 0;
- lli->lli_ioepoch = 0;
- lli->lli_maxbytes = MAX_LFS_FILESIZE;
spin_lock_init(&lli->lli_lock);
lli->lli_posix_acl = NULL;
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
- INIT_LIST_HEAD(&lli->lli_close_list);
- lli->lli_pending_och = NULL;
lli->lli_mds_read_och = NULL;
lli->lli_mds_write_och = NULL;
lli->lli_mds_exec_och = NULL;
@@ -820,9 +831,8 @@ void ll_lli_init(struct ll_inode_info *lli)
lli->lli_open_fd_exec_count = 0;
mutex_init(&lli->lli_och_mutex);
spin_lock_init(&lli->lli_agl_lock);
- lli->lli_has_smd = false;
spin_lock_init(&lli->lli_layout_lock);
- ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
+ ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
lli->lli_clob = NULL;
init_rwsem(&lli->lli_xattrs_list_rwsem);
@@ -941,10 +951,14 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* connections, registrations, sb setup */
err = client_common_fill_super(sb, md, dt, mnt);
+ if (!err)
+ sbi->ll_client_common_fill_super_succeeded = 1;
out_free:
kfree(md);
kfree(dt);
+ if (lprof)
+ class_put_profile(lprof);
if (err)
ll_put_super(sb);
else if (sbi->ll_flags & LL_SBI_VERBOSE)
@@ -1002,7 +1016,7 @@ void ll_put_super(struct super_block *sb)
}
}
- if (sbi->ll_lcq) {
+ if (sbi->ll_client_common_fill_super_succeeded) {
/* Only if client_common_fill_super succeeded */
client_common_put_super(sb);
}
@@ -1057,7 +1071,7 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
return inode;
}
-static void ll_dir_clear_lsm_md(struct inode *inode)
+void ll_dir_clear_lsm_md(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -1205,16 +1219,44 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
/* set the directory layout */
if (!lli->lli_lsm_md) {
+ struct cl_attr *attr;
+
rc = ll_init_lsm_md(inode, md);
if (rc)
return rc;
- lli->lli_lsm_md = lsm;
/*
* set lsm_md to NULL, so the following free lustre_md
* will not free this lsm
*/
md->lmv = NULL;
+ lli->lli_lsm_md = lsm;
+
+ attr = kzalloc(sizeof(*attr), GFP_NOFS);
+ if (!attr)
+ return -ENOMEM;
+
+ /* validate the lsm */
+ rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr,
+ ll_md_blocking_ast);
+ if (rc) {
+ kfree(attr);
+ return rc;
+ }
+
+ if (md->body->mbo_valid & OBD_MD_FLNLINK)
+ md->body->mbo_nlink = attr->cat_nlink;
+ if (md->body->mbo_valid & OBD_MD_FLSIZE)
+ md->body->mbo_size = attr->cat_size;
+ if (md->body->mbo_valid & OBD_MD_FLATIME)
+ md->body->mbo_atime = attr->cat_atime;
+ if (md->body->mbo_valid & OBD_MD_FLCTIME)
+ md->body->mbo_ctime = attr->cat_ctime;
+ if (md->body->mbo_valid & OBD_MD_FLMTIME)
+ md->body->mbo_mtime = attr->cat_mtime;
+
+ kfree(attr);
+
CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
return 0;
@@ -1272,9 +1314,6 @@ void ll_clear_inode(struct inode *inode)
LASSERT(lli->lli_opendir_pid == 0);
}
- spin_lock(&lli->lli_lock);
- ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
- spin_unlock(&lli->lli_lock);
md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
LASSERT(!lli->lli_open_fd_write_count);
@@ -1313,13 +1352,11 @@ void ll_clear_inode(struct inode *inode)
* cl_object still uses inode lsm.
*/
cl_inode_fini(inode);
- lli->lli_has_smd = false;
}
#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
-static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
- struct md_open_data **mod)
+static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
{
struct lustre_md md;
struct inode *inode = d_inode(dentry);
@@ -1332,8 +1369,7 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
- &request, mod);
+ rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
if (rc) {
ptlrpc_req_finished(request);
if (rc == -ENOENT) {
@@ -1369,48 +1405,12 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
rc = simple_setattr(dentry, &op_data->op_attr);
op_data->op_attr.ia_valid = ia_valid;
- /* Extract epoch data if obtained. */
- op_data->op_handle = md.body->mbo_handle;
- op_data->op_ioepoch = md.body->mbo_ioepoch;
-
rc = ll_update_inode(inode, &md);
ptlrpc_req_finished(request);
return rc;
}
-/* Close IO epoch and send Size-on-MDS attribute update. */
-static int ll_setattr_done_writing(struct inode *inode,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc = 0;
-
- if (!S_ISREG(inode->i_mode))
- return 0;
-
- CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
- op_data->op_ioepoch, PFID(&lli->lli_fid));
-
- op_data->op_flags = MF_EPOCH_CLOSE;
- ll_done_writing_attr(inode, op_data);
- ll_pack_inode2opdata(inode, op_data, NULL);
-
- rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
- if (rc == -EAGAIN)
- /* MDS has instructed us to obtain Size-on-MDS attribute
- * from OSTs and send setattr to back to MDS.
- */
- rc = ll_som_update(inode, op_data);
- else if (rc) {
- CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
- ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)), rc);
- }
- return rc;
-}
-
/* If this inode has objects allocated to it (lsm != NULL), then the OST
* object(s) determine the file size and mtime. Otherwise, the MDS will
* keep these values until such a time that objects are allocated for it.
@@ -1431,9 +1431,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
struct inode *inode = d_inode(dentry);
struct ll_inode_info *lli = ll_i2info(inode);
struct md_op_data *op_data = NULL;
- struct md_open_data *mod = NULL;
bool file_is_released = false;
- int rc = 0, rc1 = 0;
+ int rc = 0;
CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
@@ -1503,14 +1502,33 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
* but other attributes must be set
*/
if (S_ISREG(inode->i_mode)) {
- struct lov_stripe_md *lsm;
+ struct cl_layout cl = {
+ .cl_is_released = false,
+ };
+ struct lu_env *env;
+ int refcheck;
__u32 gen;
- ll_layout_refresh(inode, &gen);
- lsm = ccc_inode_lsm_get(inode);
- if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
- file_is_released = true;
- ccc_inode_lsm_put(inode, lsm);
+ rc = ll_layout_refresh(inode, &gen);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * XXX: the only place we need to know the layout type,
+ * this will be removed by a later patch. -Jinshan
+ */
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env)) {
+ rc = PTR_ERR(env);
+ goto out;
+ }
+
+ rc = cl_object_layout_get(env, lli->lli_clob, &cl);
+ cl_env_put(env, &refcheck);
+ if (rc < 0)
+ goto out;
+
+ file_is_released = cl.cl_is_released;
if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
if (file_is_released) {
@@ -1527,32 +1545,16 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
* modified, flag it.
*/
attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
op_data->op_bias |= MDS_DATA_MODIFIED;
}
}
memcpy(&op_data->op_attr, attr, sizeof(*attr));
- /* Open epoch for truncate. */
- if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
- (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
- op_data->op_flags = MF_EPOCH_OPEN;
-
- rc = ll_md_setattr(dentry, op_data, &mod);
+ rc = ll_md_setattr(dentry, op_data);
if (rc)
goto out;
- /* RPC to MDT is sent, cancel data modification flag */
- if (op_data->op_bias & MDS_DATA_MODIFIED) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- ll_ioepoch_open(lli, op_data->op_ioepoch);
if (!S_ISREG(inode->i_mode) || file_is_released) {
rc = 0;
goto out;
@@ -1568,19 +1570,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
* setting times to past, but it is necessary due to possible
* time de-synchronization between MDT inode and OST objects
*/
- if (attr->ia_valid & ATTR_SIZE)
- down_write(&lli->lli_trunc_sem);
- rc = cl_setattr_ost(inode, attr);
- if (attr->ia_valid & ATTR_SIZE)
- up_write(&lli->lli_trunc_sem);
+ rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
}
out:
- if (op_data->op_ioepoch) {
- rc1 = ll_setattr_done_writing(inode, op_data, mod);
- if (!rc)
- rc = rc1;
- }
- ll_finish_md_op_data(op_data);
+ if (op_data)
+ ll_finish_md_op_data(op_data);
if (!S_ISDIR(inode->i_mode)) {
inode_lock(inode);
@@ -1736,19 +1730,10 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct mdt_body *body = md->body;
- struct lov_stripe_md *lsm = md->lsm;
struct ll_sb_info *sbi = ll_i2sbi(inode);
- LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0));
- if (lsm) {
- if (!lli->lli_has_smd &&
- !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
- cl_file_inode_init(inode, md);
-
- lli->lli_maxbytes = lsm->lsm_maxbytes;
- if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
- lli->lli_maxbytes = MAX_LFS_FILESIZE;
- }
+ if (body->mbo_valid & OBD_MD_FLEASIZE)
+ cl_file_inode_init(inode, md);
if (S_ISDIR(inode->i_mode)) {
int rc;
@@ -1828,48 +1813,11 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
LASSERT(fid_seq(&lli->lli_fid) != 0);
if (body->mbo_valid & OBD_MD_FLSIZE) {
- if (exp_connect_som(ll_i2mdexp(inode)) &&
- S_ISREG(inode->i_mode)) {
- struct lustre_handle lockh;
- enum ldlm_mode mode;
-
- /* As it is possible a blocking ast has been processed
- * by this time, we need to check there is an UPDATE
- * lock on the client and set LLIF_MDS_SIZE_LOCK holding
- * it.
- */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
- &lockh, LDLM_FL_CBPENDING,
- LCK_CR | LCK_CW |
- LCK_PR | LCK_PW);
- if (mode) {
- if (lli->lli_flags & (LLIF_DONE_WRITING |
- LLIF_EPOCH_PENDING |
- LLIF_SOM_DIRTY)) {
- CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
- sbi->ll_md_exp->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)),
- lli->lli_flags);
- } else {
- /* Use old size assignment to avoid
- * deadlock bz14138 & bz14326
- */
- i_size_write(inode, body->mbo_size);
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
- spin_unlock(&lli->lli_lock);
- }
- ldlm_lock_decref(&lockh, mode);
- }
- } else {
- /* Use old size assignment to avoid
- * deadlock bz14138 & bz14326
- */
- i_size_write(inode, body->mbo_size);
+ i_size_write(inode, body->mbo_size);
- CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
- inode->i_ino, (unsigned long long)body->mbo_size);
- }
+ CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
+ PFID(ll_inode2fid(inode)),
+ (unsigned long long)body->mbo_size);
if (body->mbo_valid & OBD_MD_FLBLOCKS)
inode->i_blocks = body->mbo_blocks;
@@ -1877,7 +1825,7 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
if (body->mbo_valid & OBD_MD_TSTATE) {
if (body->mbo_t_state & MS_RESTORE)
- lli->lli_flags |= LLIF_FILE_RESTORING;
+ set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
}
return 0;
@@ -1892,8 +1840,6 @@ int ll_read_inode2(struct inode *inode, void *opaque)
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(&lli->lli_fid), inode);
- LASSERT(!lli->lli_has_smd);
-
/* Core attributes from the MDS first. This is a new inode, and
* the VFS doesn't zero times in the core inode so we have to do
* it ourselves. They will be overwritten by either MDS or OST
@@ -1988,9 +1934,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
return put_user(flags, (int __user *)arg);
}
case FSFILT_IOC_SETFLAGS: {
- struct lov_stripe_md *lsm;
- struct obd_info oinfo = { };
struct md_op_data *op_data;
+ struct cl_object *obj;
+ struct iattr *attr;
if (get_user(flags, (int __user *)arg))
return -EFAULT;
@@ -2002,8 +1948,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
op_data->op_attr_flags = flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
- rc = md_setattr(sbi->ll_md_exp, op_data,
- NULL, 0, NULL, 0, &req, NULL);
+ rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
if (rc)
@@ -2011,30 +1956,17 @@ int ll_iocontrol(struct inode *inode, struct file *file,
inode->i_flags = ll_ext_to_inode_flags(flags);
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
- ccc_inode_lsm_put(inode, lsm);
+ obj = ll_i2info(inode)->lli_clob;
+ if (!obj)
return 0;
- }
- oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oinfo.oi_oa) {
- ccc_inode_lsm_put(inode, lsm);
+ attr = kzalloc(sizeof(*attr), GFP_NOFS);
+ if (!attr)
return -ENOMEM;
- }
- oinfo.oi_md = lsm;
- oinfo.oi_oa->o_oi = lsm->lsm_oi;
- oinfo.oi_oa->o_flags = flags;
- oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
- OBD_MD_FLGROUP;
- obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
- rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
- kmem_cache_free(obdo_cachep, oinfo.oi_oa);
- ccc_inode_lsm_put(inode, lsm);
-
- if (rc && rc != -EPERM && rc != -EACCES)
- CERROR("osc_setattr_async fails: rc = %d\n", rc);
+ attr->ia_valid = ATTR_ATTR_FLAG;
+ rc = cl_setattr_ost(obj, attr, flags);
+ kfree(attr);
return rc;
}
default:
@@ -2164,7 +2096,6 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
return;
op_data->op_fid1 = body->mbo_fid1;
- op_data->op_ioepoch = body->mbo_ioepoch;
op_data->op_handle = body->mbo_handle;
op_data->op_mod_time = get_seconds();
md_close(exp, op_data, NULL, &close_req);
@@ -2244,17 +2175,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
conf.coc_opc = OBJECT_CONF_SET;
conf.coc_inode = *inode;
conf.coc_lock = lock;
- conf.u.coc_md = &md;
+ conf.u.coc_layout = md.layout;
(void)ll_layout_conf(*inode, &conf);
}
LDLM_LOCK_PUT(lock);
}
out:
- if (md.lsm)
- obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
md_free_lustre_md(sbi->ll_md_exp, &md);
-
cleanup:
if (rc != 0 && it && it->it_op & IT_OPEN)
ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
@@ -2380,8 +2308,9 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_default_stripe_offset = -1;
if (S_ISDIR(i1->i_mode)) {
op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
- op_data->op_default_stripe_offset =
- ll_i2info(i1)->lli_def_stripe_offset;
+ if (opc == LUSTRE_OPC_MKDIR)
+ op_data->op_default_stripe_offset =
+ ll_i2info(i1)->lli_def_stripe_offset;
}
if (i2) {
@@ -2405,8 +2334,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
- op_data->op_bias = 0;
- op_data->op_cli_flags = 0;
if ((opc == LUSTRE_OPC_CREATE) && name &&
filename_is_volatile(name, namelen, &op_data->op_mds))
op_data->op_bias |= MDS_CREATE_VOLATILE;
@@ -2414,10 +2341,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_mds = 0;
op_data->op_data = data;
- /* When called by ll_setattr_raw, file is i1. */
- if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
- op_data->op_bias |= MDS_DATA_MODIFIED;
-
return op_data;
}
@@ -2451,6 +2374,9 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry)
if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
seq_puts(seq, ",user_fid2path");
+ if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+ seq_puts(seq, ",always_ping");
+
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 436691814a5e..ee01f20d8b11 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -47,7 +47,7 @@
static const struct vm_operations_struct ll_file_vm_ops;
-void policy_from_vma(ldlm_policy_data_t *policy,
+void policy_from_vma(union ldlm_policy_data *policy,
struct vm_area_struct *vma, unsigned long addr,
size_t count)
{
@@ -80,43 +80,24 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
* API independent part for page fault initialization.
* \param vma - virtual memory area addressed to page fault
* \param env - corespondent lu_env to processing
- * \param nest - nested level
* \param index - page index corespondent to fault.
* \parm ra_flags - vma readahead flags.
*
- * \return allocated and initialized env for fault operation.
- * \retval EINVAL if env can't allocated
- * \return other error codes from cl_io_init.
+ * \return error codes from cl_io_init.
*/
static struct cl_io *
-ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
- struct cl_env_nest *nest, pgoff_t index,
- unsigned long *ra_flags)
+ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
+ pgoff_t index, unsigned long *ra_flags)
{
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct cl_io *io;
struct cl_fault_io *fio;
- struct lu_env *env;
int rc;
- *env_ret = NULL;
if (ll_file_nolock(file))
return ERR_PTR(-EOPNOTSUPP);
- /*
- * page fault can be called when lustre IO is
- * already active for the current thread, e.g., when doing read/write
- * against user level buffer mapped from Lustre buffer. To avoid
- * stomping on existing context, optionally force an allocation of a new
- * one.
- */
- env = cl_env_nested_get(nest);
- if (IS_ERR(env))
- return ERR_PTR(-EINVAL);
-
- *env_ret = env;
-
restart:
io = vvp_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
@@ -155,7 +136,6 @@ restart:
if (io->ci_need_restart)
goto restart;
- cl_env_nested_put(nest, env);
io = ERR_PTR(rc);
}
@@ -169,13 +149,17 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
- struct cl_env_nest nest;
int result;
+ int refcheck;
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
- io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ io = ll_fault_io_init(env, vma, vmpage->index, NULL);
if (IS_ERR(io)) {
result = PTR_ERR(io);
goto out;
@@ -231,17 +215,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
result = -EAGAIN;
}
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
+ if (!result)
+ set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
}
out_io:
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
out:
+ cl_env_put(env, &refcheck);
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
@@ -285,13 +266,19 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
struct vvp_io *vio = NULL;
struct page *vmpage;
unsigned long ra_flags;
- struct cl_env_nest nest;
- int result;
+ int result = 0;
int fault_ret = 0;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
- io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
- if (IS_ERR(io))
- return to_fault_error(PTR_ERR(io));
+ io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
+ if (IS_ERR(io)) {
+ result = to_fault_error(PTR_ERR(io));
+ goto out;
+ }
result = io->ci_result;
if (result == 0) {
@@ -322,14 +309,15 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
vma->vm_flags |= ra_flags;
+
+out:
+ cl_env_put(env, &refcheck);
if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= to_fault_error(result);
- CDEBUG(D_MMAP, "%s fault %d/%d\n",
- current->comm, fault_ret, result);
+ CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
return fault_ret;
}
@@ -381,6 +369,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
bool retry;
int result;
+ file_update_time(vma->vm_file);
do {
retry = false;
result = ll_page_mkwrite0(vma, vmf->page, &retry);
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 709230571b4b..49a930f0fc5d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -169,22 +169,12 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
/* N.B. d_obtain_alias() drops inode ref on error */
result = d_obtain_alias(inode);
if (!IS_ERR(result)) {
- int rc;
-
- rc = ll_d_init(result);
- if (rc < 0) {
- dput(result);
- result = ERR_PTR(rc);
- } else {
- struct ll_dentry_data *ldd = ll_d2d(result);
-
- /*
- * Need to signal to the ll_intent_file_open that
- * we came from NFS and so opencache needs to be
- * enabled for this one
- */
- ldd->lld_nfs_dentry = 1;
- }
+ /*
+ * Need to signal to the ll_intent_file_open that
+ * we came from NFS and so opencache needs to be
+ * enabled for this one
+ */
+ ll_d2d(result)->lld_nfs_dentry = 1;
}
return result;
@@ -226,7 +216,7 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
int namelen, loff_t hash, u64 ino,
- unsigned type)
+ unsigned int type)
{
/* It is hack to access lde_fid for comparison with lgd_fid.
* So the input 'name' must be part of the 'lu_dirent'.
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 23fda9d98bff..03682c10fc9e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -1060,10 +1060,6 @@ static const struct llite_file_opcode {
"brw_read" },
{ LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
"brw_write" },
- { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "osc_read" },
- { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
- "osc_write" },
{ LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
{ LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
{ LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 180f35e3afd9..a8f4e7fb0a46 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -113,13 +113,18 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
if (inode->i_state & I_NEW) {
rc = ll_read_inode2(inode, md);
if (!rc && S_ISREG(inode->i_mode) &&
- !ll_i2info(inode)->lli_clob) {
- CDEBUG(D_INODE, "%s: apply lsm %p to inode "DFID"\n",
- ll_get_fsname(sb, NULL, 0), md->lsm,
- PFID(ll_inode2fid(inode)));
+ !ll_i2info(inode)->lli_clob)
rc = cl_file_inode_init(inode, md);
- }
+
if (rc) {
+ /*
+ * Let's clear directory lsm here, otherwise
+ * make_bad_inode() will reset the inode mode
+ * to regular, then ll_clear_inode will not
+ * be able to clear lsm_md
+ */
+ if (S_ISDIR(inode->i_mode))
+ ll_dir_clear_lsm_md(inode);
make_bad_inode(inode);
unlock_new_inode(inode);
iput(inode);
@@ -132,6 +137,8 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n",
PFID(&md->body->mbo_fid1), inode, rc);
if (rc) {
+ if (S_ISDIR(inode->i_mode))
+ ll_dir_clear_lsm_md(inode);
iput(inode);
inode = ERR_PTR(rc);
}
@@ -258,7 +265,9 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
struct ll_inode_info *lli = ll_i2info(inode);
spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+ LTIME_S(inode->i_mtime) = 0;
+ LTIME_S(inode->i_atime) = 0;
+ LTIME_S(inode->i_ctime) = 0;
spin_unlock(&lli->lli_lock);
}
@@ -287,11 +296,39 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
hash = cl_fid_build_ino(&lli->lli_pfid,
ll_need_32bit_api(ll_i2sbi(inode)));
-
- master_inode = ilookup5(inode->i_sb, hash,
- ll_test_inode_by_fid,
- (void *)&lli->lli_pfid);
- if (master_inode && !IS_ERR(master_inode)) {
+ /*
+ * Do not lookup the inode with ilookup5,
+ * otherwise it will cause dead lock,
+ *
+ * 1. Client1 send chmod req to the MDT0, then
+ * on MDT0, it enqueues master and all of its
+ * slaves lock, (mdt_attr_set() ->
+ * mdt_lock_slaves()), after gets master and
+ * stripe0 lock, it will send the enqueue req
+ * (for stripe1) to MDT1, then MDT1 finds the
+ * lock has been granted to client2. Then MDT1
+ * sends blocking ast to client2.
+ *
+ * 2. At the same time, client2 tries to unlink
+ * the striped dir (rm -rf striped_dir), and
+ * during lookup, it will hold the master inode
+ * of the striped directory, whose inode state
+ * is NEW, then tries to revalidate all of its
+ * slaves, (ll_prep_inode()->ll_iget()->
+ * ll_read_inode2()-> ll_update_inode().). And
+ * it will be blocked on the server side because
+ * of 1.
+ *
+ * 3. Then the client get the blocking_ast req,
+ * cancel the lock, but being blocked if using
+ * ->ilookup5()), because master inode state is
+ * NEW.
+ */
+ master_inode = ilookup5_nowait(inode->i_sb,
+ hash,
+ ll_test_inode_by_fid,
+ (void *)&lli->lli_pfid);
+ if (master_inode) {
ll_invalidate_negative_children(master_inode);
iput(master_inode);
}
@@ -395,17 +432,9 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
*/
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
{
- struct dentry *new;
- int rc;
-
if (inode) {
- new = ll_find_alias(inode, de);
+ struct dentry *new = ll_find_alias(inode, de);
if (new) {
- rc = ll_d_init(new);
- if (rc < 0) {
- dput(new);
- return ERR_PTR(rc);
- }
d_move(new, de);
iput(inode);
CDEBUG(D_DENTRY,
@@ -414,9 +443,6 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
return new;
}
}
- rc = ll_d_init(de);
- if (rc < 0)
- return ERR_PTR(rc);
d_add(de, inode);
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
de, d_inode(de), d_count(de), de->d_flags);
@@ -535,6 +561,10 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
}
}
+ if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE &&
+ dentry->d_sb->s_flags & MS_RDONLY)
+ return ERR_PTR(-EROFS);
+
if (it->it_op & IT_CREAT)
opc = LUSTRE_OPC_CREATE;
else
@@ -801,7 +831,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry,
return PTR_ERR(inode);
d_instantiate(dentry, inode);
- return 0;
+
+ return ll_init_security(dentry, inode, dir);
}
void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
@@ -896,6 +927,8 @@ again:
goto err_exit;
d_instantiate(dentry, inode);
+
+ err = ll_init_security(dentry, inode, dir);
err_exit:
if (request)
ptlrpc_req_finished(request);
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 50c0152ba022..f10e092979fe 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -47,6 +47,7 @@
#include <linux/pagemap.h>
/* current_is_kswapd() */
#include <linux/swap.h>
+#include <linux/bvec.h>
#define DEBUG_SUBSYSTEM S_LLITE
@@ -180,90 +181,73 @@ void ll_ras_enter(struct file *f)
spin_unlock(&ras->ras_lock);
}
-static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_page *page,
- struct cl_object *clob, pgoff_t *max_index)
+/**
+ * Initiates read-ahead of a page with given index.
+ *
+ * \retval +ve: page was already uptodate so it will be skipped
+ * from being added;
+ * \retval -ve: page wasn't added to \a queue for error;
+ * \retval 0: page was added into \a queue for read ahead.
+ */
+static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, pgoff_t index)
{
- struct page *vmpage = page->cp_vmpage;
+ enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
+ struct cl_object *clob = io->ci_obj;
+ struct inode *inode = vvp_object_inode(clob);
+ const char *msg = NULL;
+ struct cl_page *page;
struct vvp_page *vpg;
- int rc;
+ struct page *vmpage;
+ int rc = 0;
+
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+ if (!vmpage) {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "g_c_p_n failed";
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* Check if vmpage was truncated or reclaimed */
+ if (vmpage->mapping != inode->i_mapping) {
+ which = RA_STAT_WRONG_GRAB_PAGE;
+ msg = "g_c_p_n returned invalid page";
+ rc = -EBUSY;
+ goto out;
+ }
+
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page)) {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "cl_page_find failed";
+ rc = PTR_ERR(page);
+ goto out;
+ }
- rc = 0;
- cl_page_assume(env, io, page);
lu_ref_add(&page->cp_reference, "ra", current);
+ cl_page_assume(env, io, page);
vpg = cl2vvp_page(cl_object_page_slice(clob, page));
if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
- CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
- vvp_index(vpg), *max_index);
- if (*max_index == 0 || vvp_index(vpg) > *max_index)
- rc = cl_page_is_under_lock(env, io, page, max_index);
- if (rc == 0) {
- vpg->vpg_defer_uptodate = 1;
- vpg->vpg_ra_used = 0;
- cl_page_list_add(queue, page);
- rc = 1;
- } else {
- cl_page_discard(env, io, page);
- rc = -ENOLCK;
- }
+ vpg->vpg_defer_uptodate = 1;
+ vpg->vpg_ra_used = 0;
+ cl_page_list_add(queue, page);
} else {
/* skip completed pages */
cl_page_unassume(env, io, page);
+ /* This page is already uptodate, returning a positive number
+ * to tell the callers about this
+ */
+ rc = 1;
}
+
lu_ref_del(&page->cp_reference, "ra", current);
cl_page_put(env, page);
- return rc;
-}
-
-/**
- * Initiates read-ahead of a page with given index.
- *
- * \retval +ve: page was added to \a queue.
- *
- * \retval -ENOLCK: there is no extent lock for this part of a file, stop
- * read-ahead.
- *
- * \retval -ve, 0: page wasn't added to \a queue for other reason.
- */
-static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue,
- pgoff_t index, pgoff_t *max_index)
-{
- struct cl_object *clob = io->ci_obj;
- struct inode *inode = vvp_object_inode(clob);
- struct page *vmpage;
- struct cl_page *page;
- enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
- int rc = 0;
- const char *msg = NULL;
-
- vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+out:
if (vmpage) {
- /* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping == inode->i_mapping) {
- page = cl_page_find(env, clob, vmpage->index,
- vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- rc = cl_read_ahead_page(env, io, queue,
- page, clob, max_index);
- if (rc == -ENOLCK) {
- which = RA_STAT_FAILED_MATCH;
- msg = "lock match failed";
- }
- } else {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "cl_page_find failed";
- }
- } else {
- which = RA_STAT_WRONG_GRAB_PAGE;
- msg = "g_c_p_n returned invalid page";
- }
- if (rc != 1)
+ if (rc)
unlock_page(vmpage);
put_page(vmpage);
- } else {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "g_c_p_n failed";
}
if (msg) {
ll_ra_stats_inc(inode, which);
@@ -378,12 +362,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *queue,
struct ra_io_arg *ria,
unsigned long *reserved_pages,
- unsigned long *ra_end)
+ pgoff_t *ra_end)
{
+ struct cl_read_ahead ra = { 0 };
int rc, count = 0;
bool stride_ria;
pgoff_t page_idx;
- pgoff_t max_index = 0;
LASSERT(ria);
RIA_DEBUG(ria);
@@ -392,14 +376,23 @@ static int ll_read_ahead_pages(const struct lu_env *env,
for (page_idx = ria->ria_start;
page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) {
if (ras_inside_ra_window(page_idx, ria)) {
+ if (!ra.cra_end || ra.cra_end < page_idx) {
+ cl_read_ahead_release(env, &ra);
+
+ rc = cl_io_read_ahead(env, io, page_idx, &ra);
+ if (rc < 0)
+ break;
+
+ LASSERTF(ra.cra_end >= page_idx,
+ "object: %p, indcies %lu / %lu\n",
+ io->ci_obj, ra.cra_end, page_idx);
+ }
+
/* If the page is inside the read-ahead window*/
- rc = ll_read_ahead_page(env, io, queue,
- page_idx, &max_index);
- if (rc == 1) {
+ rc = ll_read_ahead_page(env, io, queue, page_idx);
+ if (!rc) {
(*reserved_pages)--;
count++;
- } else if (rc == -ENOLCK) {
- break;
}
} else if (stride_ria) {
/* If it is not in the read-ahead window, and it is
@@ -425,19 +418,21 @@ static int ll_read_ahead_pages(const struct lu_env *env,
}
}
}
+ cl_read_ahead_release(env, &ra);
+
*ra_end = page_idx;
return count;
}
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct ll_readahead_state *ras,
- bool hit)
+static int ll_readahead(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue,
+ struct ll_readahead_state *ras, bool hit)
{
struct vvp_io *vio = vvp_env_io(env);
struct ll_thread_info *lti = ll_env_info(env);
struct cl_attr *attr = vvp_env_thread_attr(env);
- unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len, mlen = 0;
+ unsigned long len, mlen = 0, reserved;
+ pgoff_t ra_end, start = 0, end = 0;
struct inode *inode;
struct ra_io_arg *ria = &lti->lti_ria;
struct cl_object *clob;
@@ -463,30 +458,25 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
spin_lock(&ras->ras_lock);
- /* Enlarge the RA window to encompass the full read */
- if (vio->vui_ra_valid &&
- ras->ras_window_start + ras->ras_window_len <
- vio->vui_ra_start + vio->vui_ra_count) {
- ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
- ras->ras_window_start;
- }
+ /**
+ * Note: other thread might rollback the ras_next_readahead,
+ * if it can not get the full size of prepared pages, see the
+ * end of this function. For stride read ahead, it needs to
+ * make sure the offset is no less than ras_stride_offset,
+ * so that stride read ahead can work correctly.
+ */
+ if (stride_io_mode(ras))
+ start = max(ras->ras_next_readahead, ras->ras_stride_offset);
+ else
+ start = ras->ras_next_readahead;
- /* Reserve a part of the read-ahead window that we'll be issuing */
- if (ras->ras_window_len > 0) {
- /*
- * Note: other thread might rollback the ras_next_readahead,
- * if it can not get the full size of prepared pages, see the
- * end of this function. For stride read ahead, it needs to
- * make sure the offset is no less than ras_stride_offset,
- * so that stride read ahead can work correctly.
- */
- if (stride_io_mode(ras))
- start = max(ras->ras_next_readahead,
- ras->ras_stride_offset);
- else
- start = ras->ras_next_readahead;
+ if (ras->ras_window_len > 0)
end = ras->ras_window_start + ras->ras_window_len - 1;
- }
+
+ /* Enlarge the RA window to encompass the full read */
+ if (vio->vui_ra_valid &&
+ end < vio->vui_ra_start + vio->vui_ra_count - 1)
+ end = vio->vui_ra_start + vio->vui_ra_count - 1;
if (end != 0) {
unsigned long rpc_boundary;
@@ -575,8 +565,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
* if the region we failed to issue read-ahead on is still ahead
* of the app and behind the next index to start read-ahead from
*/
- CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n",
- ra_end, end, ria->ria_end);
+ CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n",
+ ra_end, end, ria->ria_end, ret);
if (ra_end != end + 1) {
ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
@@ -608,7 +598,7 @@ static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
ras->ras_consecutive_pages = 0;
ras->ras_window_len = 0;
ras_set_start(inode, ras, index);
- ras->ras_next_readahead = max(ras->ras_window_start, index);
+ ras->ras_next_readahead = max(ras->ras_window_start, index + 1);
RAS_CDEBUG(ras);
}
@@ -737,12 +727,13 @@ static void ras_increase_window(struct inode *inode,
ra->ra_max_pages_per_file);
}
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit)
+static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ enum ras_update_flags flags)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
int zero = 0, stride_detect = 0, ra_miss = 0;
+ bool hit = flags & LL_RAS_HIT;
spin_lock(&ras->ras_lock);
@@ -772,7 +763,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
* to for subsequent IO. The mmap case does not increment
* ras_requests and thus can never trigger this behavior.
*/
- if (ras->ras_requests == 2 && !ras->ras_request_index) {
+ if (ras->ras_requests >= 2 && !ras->ras_request_index) {
__u64 kms_pages;
kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
@@ -784,8 +775,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if (kms_pages &&
kms_pages <= ra->ra_max_read_ahead_whole_pages) {
ras->ras_window_start = 0;
- ras->ras_last_readpage = 0;
- ras->ras_next_readahead = 0;
+ ras->ras_next_readahead = index + 1;
ras->ras_window_len = min(ra->ra_max_pages_per_file,
ra->ra_max_read_ahead_whole_pages);
goto out_unlock;
@@ -815,13 +805,20 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if (ra_miss) {
if (index_in_stride_window(ras, index) &&
stride_io_mode(ras)) {
- /*If stride-RA hit cache miss, the stride dector
- *will not be reset to avoid the overhead of
- *redetecting read-ahead mode
- */
if (index != ras->ras_last_readpage + 1)
ras->ras_consecutive_pages = 0;
ras_reset(inode, ras, index);
+
+ /* If stride-RA hit cache miss, the stride
+ * detector will not be reset to avoid the
+ * overhead of redetecting read-ahead mode,
+ * but on the condition that the stride window
+ * is still intersect with normal sequential
+ * read-ahead window.
+ */
+ if (ras->ras_window_start <
+ ras->ras_stride_offset)
+ ras_stride_reset(ras);
RAS_CDEBUG(ras);
} else {
/* Reset both stride window and normal RA
@@ -866,8 +863,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
/* Trigger RA in the mmap case where ras_consecutive_requests
* is not incremented and thus can't be used to trigger RA
*/
- if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
- ras->ras_window_len = RAS_INCREASE_STEP(inode);
+ if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) {
+ ras_increase_window(inode, ras, ra);
+ /*
+ * reset consecutive pages so that the readahead window can
+ * grow gradually.
+ */
+ ras->ras_consecutive_pages = 0;
goto out_unlock;
}
@@ -902,17 +904,17 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
struct cl_io *io;
struct cl_page *page;
struct cl_object *clob;
- struct cl_env_nest nest;
bool redirtied = false;
bool unlocked = false;
int result;
+ int refcheck;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
LASSERT(ll_i2dtexp(inode));
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
result = PTR_ERR(env);
goto out;
@@ -977,7 +979,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
}
}
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
goto out;
out:
@@ -1087,6 +1089,63 @@ void ll_cl_remove(struct file *file, const struct lu_env *env)
write_unlock(&fd->fd_lock);
}
+static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct inode *inode = vvp_object_inode(page->cp_obj);
+ struct ll_file_data *fd = vvp_env_io(env)->vui_fd;
+ struct ll_readahead_state *ras = &fd->fd_ras;
+ struct cl_2queue *queue = &io->ci_queue;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct vvp_page *vpg;
+ int rc = 0;
+
+ vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
+ if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+ sbi->ll_ra_info.ra_max_pages > 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+ enum ras_update_flags flags = 0;
+
+ if (vpg->vpg_defer_uptodate)
+ flags |= LL_RAS_HIT;
+ if (!vio->vui_ra_valid)
+ flags |= LL_RAS_MMAP;
+ ras_update(sbi, inode, ras, vvp_index(vpg), flags);
+ }
+
+ if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
+ cl_page_export(env, page, 1);
+ }
+
+ cl_2queue_init(queue);
+ /*
+ * Add page into the queue even when it is marked uptodate above.
+ * this will unlock it automatically as part of cl_page_list_disown().
+ */
+ cl_page_list_add(&queue->c2_qin, page);
+ if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+ sbi->ll_ra_info.ra_max_pages > 0) {
+ int rc2;
+
+ rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
+ vpg->vpg_defer_uptodate);
+ CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n",
+ PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
+ }
+
+ if (queue->c2_qin.pl_nr > 0)
+ rc = cl_io_submit_rw(env, io, CRT_READ, queue);
+
+ /*
+ * Unlock unsent pages in case of error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+
+ return rc;
+}
+
int ll_readpage(struct file *file, struct page *vmpage)
{
struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
@@ -1110,7 +1169,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
LASSERT(page->cp_type == CPT_CACHEABLE);
if (likely(!PageUptodate(vmpage))) {
cl_page_assume(env, io, page);
- result = cl_io_read_page(env, io, page);
+ result = ll_io_read_page(env, io, page);
} else {
/* Page from a non-object file. */
unlock_page(vmpage);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 26f3a37873a7..21e06e5b514e 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -71,8 +71,6 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
struct cl_page *page;
struct cl_object *obj;
- int refcheck;
-
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
@@ -82,28 +80,27 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
* happening with locked page too
*/
if (offset == 0 && length == PAGE_SIZE) {
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- inode = vmpage->mapping->host;
- obj = ll_i2info(inode)->lli_clob;
- if (obj) {
- page = cl_vmpage_page(vmpage, obj);
- if (page) {
- cl_page_delete(env, page);
- cl_page_put(env, page);
- }
- } else {
- LASSERT(vmpage->private == 0);
+ /* See the comment in ll_releasepage() */
+ env = cl_env_percpu_get();
+ LASSERT(!IS_ERR(env));
+ inode = vmpage->mapping->host;
+ obj = ll_i2info(inode)->lli_clob;
+ if (obj) {
+ page = cl_vmpage_page(vmpage, obj);
+ if (page) {
+ cl_page_delete(env, page);
+ cl_page_put(env, page);
}
- cl_env_put(env, &refcheck);
+ } else {
+ LASSERT(vmpage->private == 0);
}
+ cl_env_percpu_put(env);
}
}
static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
{
struct lu_env *env;
- void *cookie;
struct cl_object *obj;
struct cl_page *page;
struct address_space *mapping;
@@ -129,7 +126,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
if (!page)
return 1;
- cookie = cl_env_reenter();
env = cl_env_percpu_get();
LASSERT(!IS_ERR(env));
@@ -155,7 +151,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
cl_page_put(env, page);
cl_env_percpu_put(env);
- cl_env_reexit(cookie);
return result;
}
@@ -340,19 +335,15 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
{
- struct lu_env *env;
+ struct ll_cl_context *lcc;
+ const struct lu_env *env;
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
loff_t file_offset = iocb->ki_pos;
ssize_t count = iov_iter_count(iter);
ssize_t tot_bytes = 0, result = 0;
- struct ll_inode_info *lli = ll_i2info(inode);
long size = MAX_DIO_SIZE;
- int refcheck;
-
- if (!lli->lli_has_smd)
- return -EBADF;
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
@@ -367,9 +358,13 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
if (iov_iter_alignment(iter) & ~PAGE_MASK)
return -EINVAL;
- env = cl_env_get(&refcheck);
+ lcc = ll_cl_find(file);
+ if (!lcc)
+ return -EIO;
+
+ env = lcc->lcc_env;
LASSERT(!IS_ERR(env));
- io = vvp_env_io(env)->vui_cl.cis_io;
+ io = lcc->lcc_io;
LASSERT(io);
while (iov_iter_count(iter)) {
@@ -426,7 +421,6 @@ out:
vio->u.write.vui_written += tot_bytes;
}
- cl_env_put(env, &refcheck);
return tot_bytes ? tot_bytes : result;
}
@@ -466,13 +460,13 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
}
static int ll_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned int len, unsigned int flags,
struct page **pagep, void **fsdata)
{
struct ll_cl_context *lcc;
- const struct lu_env *env;
+ const struct lu_env *env = NULL;
struct cl_io *io;
- struct cl_page *page;
+ struct cl_page *page = NULL;
struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
pgoff_t index = pos >> PAGE_SHIFT;
struct page *vmpage = NULL;
@@ -484,6 +478,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
lcc = ll_cl_find(file);
if (!lcc) {
+ io = NULL;
result = -EIO;
goto out;
}
@@ -560,6 +555,12 @@ out:
unlock_page(vmpage);
put_page(vmpage);
}
+ if (!IS_ERR_OR_NULL(page)) {
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ if (io)
+ io->ci_result = result;
} else {
*pagep = vmpage;
*fsdata = lcc;
@@ -576,7 +577,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
struct cl_io *io;
struct vvp_io *vio;
struct cl_page *page;
- unsigned from = pos & (PAGE_SIZE - 1);
+ unsigned int from = pos & (PAGE_SIZE - 1);
bool unplug = false;
int result = 0;
@@ -629,6 +630,8 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
result = vvp_io_write_commit(env, io);
+ if (result < 0)
+ io->ci_result = result;
return result >= 0 ? copied : result;
}
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 0677513476ec..f1ee17f9ec0d 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -659,8 +659,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
struct ll_inode_info *lli = ll_i2info(dir);
struct ll_statahead_info *sai = lli->lli_sai;
struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
+ wait_queue_head_t *waitq = NULL;
__u64 handle = 0;
- bool wakeup;
if (it_disposition(it, DISP_LOOKUP_NEG))
rc = -ENOENT;
@@ -693,7 +693,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
spin_lock(&lli->lli_sa_lock);
if (rc) {
- wakeup = __sa_make_ready(sai, entry, rc);
+ if (__sa_make_ready(sai, entry, rc))
+ waitq = &sai->sai_waitq;
} else {
entry->se_minfo = minfo;
entry->se_req = ptlrpc_request_addref(req);
@@ -704,13 +705,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
* with parent's lock held, for example: unlink.
*/
entry->se_handle = handle;
- wakeup = !sa_has_callback(sai);
+ if (!sa_has_callback(sai))
+ waitq = &sai->sai_thread.t_ctl_waitq;
+
list_add_tail(&entry->se_list, &sai->sai_interim_entries);
}
sai->sai_replied++;
- if (wakeup)
- wake_up(&sai->sai_thread.t_ctl_waitq);
+ if (waitq)
+ wake_up(waitq);
spin_unlock(&lli->lli_sa_lock);
return rc;
@@ -1397,10 +1400,10 @@ static int revalidate_statahead_dentry(struct inode *dir,
struct dentry **dentryp,
bool unplug)
{
+ struct ll_inode_info *lli = ll_i2info(dir);
struct sa_entry *entry = NULL;
struct l_wait_info lwi = { 0 };
struct ll_dentry_data *ldd;
- struct ll_inode_info *lli;
int rc = 0;
if ((*dentryp)->d_name.name[0] == '.') {
@@ -1446,7 +1449,9 @@ static int revalidate_statahead_dentry(struct inode *dir,
sa_handle_callback(sai);
if (!sa_ready(entry)) {
+ spin_lock(&lli->lli_sa_lock);
sai->sai_index_wait = entry->se_index;
+ spin_unlock(&lli->lli_sa_lock);
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
@@ -1475,6 +1480,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
alias = ll_splice_alias(inode, *dentryp);
if (IS_ERR(alias)) {
+ ll_intent_release(&it);
rc = PTR_ERR(alias);
goto out_unplug;
}
@@ -1493,6 +1499,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
*dentryp,
PFID(ll_inode2fid((*dentryp)->d_inode)),
PFID(ll_inode2fid(inode)));
+ ll_intent_release(&it);
rc = -ESTALE;
goto out_unplug;
}
@@ -1512,10 +1519,7 @@ out_unplug:
* dentry_may_statahead().
*/
ldd = ll_d2d(*dentryp);
- lli = ll_i2info(dir);
- /* ldd can be NULL if llite lookup failed. */
- if (ldd)
- ldd->lld_sa_generation = lli->lli_sa_generation;
+ ldd->lld_sa_generation = lli->lli_sa_generation;
sa_put(sai, entry);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 82c7c48aa619..cd77b55d3895 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -149,7 +149,6 @@ static const char *ll_get_link(struct dentry *dentry,
}
const struct inode_operations ll_fast_symlink_inode_operations = {
- .readlink = generic_readlink,
.setattr = ll_setattr,
.get_link = ll_get_link,
.getattr = ll_getattr,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 8aa8ecc09a48..12c129f7e4ad 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -55,7 +55,6 @@
static struct kmem_cache *ll_thread_kmem;
struct kmem_cache *vvp_lock_kmem;
struct kmem_cache *vvp_object_kmem;
-struct kmem_cache *vvp_req_kmem;
static struct kmem_cache *vvp_session_kmem;
static struct kmem_cache *vvp_thread_kmem;
@@ -76,11 +75,6 @@ static struct lu_kmem_descr vvp_caches[] = {
.ckd_size = sizeof(struct vvp_object),
},
{
- .ckd_cache = &vvp_req_kmem,
- .ckd_name = "vvp_req_kmem",
- .ckd_size = sizeof(struct vvp_req),
- },
- {
.ckd_cache = &vvp_session_kmem,
.ckd_name = "vvp_session_kmem",
.ckd_size = sizeof(struct vvp_session)
@@ -177,10 +171,6 @@ static const struct lu_device_operations vvp_lu_ops = {
.ldo_object_alloc = vvp_object_alloc
};
-static const struct cl_device_operations vvp_cl_ops = {
- .cdo_req_init = vvp_req_init
-};
-
static struct lu_device *vvp_device_free(const struct lu_env *env,
struct lu_device *d)
{
@@ -213,7 +203,6 @@ static struct lu_device *vvp_device_alloc(const struct lu_env *env,
lud = &vdv->vdv_cl.cd_lu_dev;
cl_device_init(&vdv->vdv_cl, t);
vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
- vdv->vdv_cl.cd_ops = &vvp_cl_ops;
site = kzalloc(sizeof(*site), GFP_NOFS);
if (site) {
@@ -332,7 +321,6 @@ int cl_sb_init(struct super_block *sb)
cl = cl_type_setup(env, NULL, &vvp_device_type,
sbi->ll_dt_exp->exp_obd->obd_lu_dev);
if (!IS_ERR(cl)) {
- cl2vvp_dev(cl)->vdv_sb = sb;
sbi->ll_cl = cl;
sbi->ll_site = cl2lu_dev(cl)->ld_site;
}
@@ -521,11 +509,10 @@ static void vvp_pgcache_page_show(const struct lu_env *env,
vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
vmpage = vpg->vpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
+ seq_printf(seq, " %5i | %p %p %s %s %s | %p " DFID "(%p) %lu %u [",
0 /* gen */,
vpg, page,
"none",
- vpg->vpg_write_queued ? "wq" : "- ",
vpg->vpg_defer_uptodate ? "du" : "- ",
PageWriteback(vmpage) ? "wb" : "-",
vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 4464ad258387..c60d0414ac25 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -42,9 +42,7 @@
enum obd_notify_event;
struct inode;
-struct lov_stripe_md;
struct lustre_md;
-struct obd_capa;
struct obd_device;
struct obd_export;
struct page;
@@ -122,7 +120,6 @@ extern struct lu_context_key vvp_thread_key;
extern struct kmem_cache *vvp_lock_kmem;
extern struct kmem_cache *vvp_object_kmem;
-extern struct kmem_cache *vvp_req_kmem;
struct vvp_thread_info {
struct cl_lock vti_lock;
@@ -195,14 +192,6 @@ struct vvp_object {
struct inode *vob_inode;
/**
- * A list of dirty pages pending IO in the cache. Used by
- * SOM. Protected by ll_inode_info::lli_lock.
- *
- * \see vvp_page::vpg_pending_linkage
- */
- struct list_head vob_pending_list;
-
- /**
* Number of transient pages. This is no longer protected by i_sem,
* and needs to be atomic. This is not actually used for anything,
* and can probably be removed.
@@ -235,15 +224,7 @@ struct vvp_object {
struct vvp_page {
struct cl_page_slice vpg_cl;
unsigned int vpg_defer_uptodate:1,
- vpg_ra_used:1,
- vpg_write_queued:1;
- /**
- * Non-empty iff this page is already counted in
- * vvp_object::vob_pending_list. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- struct list_head vpg_pending_linkage;
+ vpg_ra_used:1;
/** VM page */
struct page *vpg_page;
};
@@ -260,7 +241,6 @@ static inline pgoff_t vvp_index(struct vvp_page *vvp)
struct vvp_device {
struct cl_device vdv_cl;
- struct super_block *vdv_sb;
struct cl_device *vdv_next;
};
@@ -268,10 +248,6 @@ struct vvp_lock {
struct cl_lock_slice vlk_cl;
};
-struct vvp_req {
- struct cl_req_slice vrq_cl;
-};
-
void *ccc_key_init(const struct lu_context *ctx,
struct lu_context_key *key);
void ccc_key_fini(const struct lu_context *ctx,
@@ -325,21 +301,8 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
# define CLOBINVRNT(env, clob, expr) \
((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
-/**
- * New interfaces to get and put lov_stripe_md from lov layer. This violates
- * layering because lov_stripe_md is supposed to be a private data in lov.
- *
- * NB: If you find you have to use these interfaces for your new code, please
- * think about it again. These interfaces may be removed in the future for
- * better layering.
- */
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
-void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
int lov_read_and_clear_async_rc(struct cl_object *clob);
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
-void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
-
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
@@ -347,8 +310,6 @@ int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index);
-int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 2b7f182a15e2..697cbfbe9374 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -72,9 +72,10 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
/* don't need lock here to check lli_layout_gen as we have held
* extent lock and GROUP lock has to hold to swap layout
*/
- if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
+ if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
io->ci_need_restart = 1;
- /* this will return application a short read/write */
+ /* this will cause a short read/write */
io->ci_continue = 0;
rc = false;
}
@@ -328,8 +329,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
vio->vui_layout_gen, gen);
/* today successful restore is the only possible case */
/* restore was done, clear restoring state */
- ll_i2info(vvp_object_inode(obj))->lli_flags &=
- ~LLIF_FILE_RESTORING;
+ clear_bit(LLIF_FILE_RESTORING,
+ &ll_i2info(inode)->lli_flags);
}
}
}
@@ -369,7 +370,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct cl_lock_descr *descr = &cti->vti_descr;
- ldlm_policy_data_t policy;
+ union ldlm_policy_data policy;
unsigned long addr;
ssize_t count;
int result = 0;
@@ -450,7 +451,8 @@ static void vvp_io_advance(const struct lu_env *env,
struct vvp_io *vio = cl2vvp_io(env, ios);
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
+ vio->vui_tot_count -= nob;
+ iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
}
static void vvp_io_update_iov(const struct lu_env *env,
@@ -551,9 +553,16 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
if (new_size == 0)
enqflags = CEF_DISCARD_DATA;
} else {
- if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
- io->u.ci_setattr.sa_attr.lvb_ctime) ||
- (io->u.ci_setattr.sa_attr.lvb_atime >=
+ unsigned int valid = io->u.ci_setattr.sa_valid;
+
+ if (!(valid & TIMES_SET_FLAGS))
+ return 0;
+
+ if ((!(valid & ATTR_MTIME) ||
+ io->u.ci_setattr.sa_attr.lvb_mtime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime) &&
+ (!(valid & ATTR_ATIME) ||
+ io->u.ci_setattr.sa_attr.lvb_atime >=
io->u.ci_setattr.sa_attr.lvb_ctime))
return 0;
new_size = 0;
@@ -580,14 +589,6 @@ static int vvp_do_vmtruncate(struct inode *inode, size_t size)
return result;
}
-static int vvp_io_setattr_trunc(const struct lu_env *env,
- const struct cl_io_slice *ios,
- struct inode *inode, loff_t size)
-{
- inode_dio_wait(inode);
- return 0;
-}
-
static int vvp_io_setattr_time(const struct lu_env *env,
const struct cl_io_slice *ios)
{
@@ -618,15 +619,20 @@ static int vvp_io_setattr_start(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct inode *inode = vvp_object_inode(io->ci_obj);
- int result = 0;
+ struct ll_inode_info *lli = ll_i2info(inode);
- inode_lock(inode);
- if (cl_io_is_trunc(io))
- result = vvp_io_setattr_trunc(env, ios, inode,
- io->u.ci_setattr.sa_attr.lvb_size);
- if (result == 0)
- result = vvp_io_setattr_time(env, ios);
- return result;
+ if (cl_io_is_trunc(io)) {
+ down_write(&lli->lli_trunc_sem);
+ inode_lock(inode);
+ inode_dio_wait(inode);
+ } else {
+ inode_lock(inode);
+ }
+
+ if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS)
+ return vvp_io_setattr_time(env, ios);
+
+ return 0;
}
static void vvp_io_setattr_end(const struct lu_env *env,
@@ -634,14 +640,18 @@ static void vvp_io_setattr_end(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct inode *inode = vvp_object_inode(io->ci_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
- if (cl_io_is_trunc(io))
+ if (cl_io_is_trunc(io)) {
/* Truncate in memory pages - they must be clean pages
* because osc has already notified to destroy osc_extents.
*/
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
-
- inode_unlock(inode);
+ inode_unlock(inode);
+ up_write(&lli->lli_trunc_sem);
+ } else {
+ inode_unlock(inode);
+ }
}
static void vvp_io_setattr_fini(const struct lu_env *env,
@@ -657,6 +667,7 @@ static int vvp_io_read_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct file *file = vio->vui_fd->fd_file;
int result;
@@ -669,6 +680,8 @@ static int vvp_io_read_start(const struct lu_env *env,
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
+ down_read(&lli->lli_trunc_sem);
+
if (!can_populate_pages(env, io, inode))
return 0;
@@ -770,16 +783,11 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
struct cl_page *page)
{
- struct vvp_page *vpg;
struct page *vmpage = page->cp_vmpage;
- struct cl_object *clob = cl_io_top(io)->ci_obj;
SetPageUptodate(vmpage);
set_page_dirty(vmpage);
- vpg = cl2vvp_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2vvp(clob), vpg);
-
cl_page_disown(env, io, page);
/* held in ll_cl_init() */
@@ -899,10 +907,13 @@ static int vvp_io_write_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
+ down_read(&lli->lli_trunc_sem);
+
if (!can_populate_pages(env, io, inode))
return 0;
@@ -921,6 +932,20 @@ static int vvp_io_write_start(const struct lu_env *env,
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+ /*
+ * The maximum Lustre file size is variable, based on the OST maximum
+ * object size and number of stripes. This needs another check in
+ * addition to the VFS checks earlier.
+ */
+ if (pos + cnt > ll_file_maxbytes(inode)) {
+ CDEBUG(D_INODE,
+ "%s: file " DFID " offset %llu > maxbytes %llu\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), pos + cnt,
+ ll_file_maxbytes(inode));
+ return -EFBIG;
+ }
+
if (!vio->vui_iter) {
/* from a temp io in ll_cl_init(). */
result = 0;
@@ -957,11 +982,7 @@ static int vvp_io_write_start(const struct lu_env *env,
}
}
if (result > 0) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
+ set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags);
if (result < cnt)
io->ci_continue = 0;
@@ -972,6 +993,15 @@ static int vvp_io_write_start(const struct lu_env *env,
return result;
}
+static void vvp_io_rw_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ up_read(&lli->lli_trunc_sem);
+}
+
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
struct vm_fault *vmf = cfio->ft_vmf;
@@ -984,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
"page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
vmf->page, vmf->page->mapping, vmf->page->index,
(long)vmf->page->flags, page_count(vmf->page),
- page_private(vmf->page), vmf->virtual_address);
+ page_private(vmf->page), (void *)vmf->address);
if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
cfio->ft_flags |= VM_FAULT_LOCKED;
@@ -995,12 +1025,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
}
if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address);
return -EFAULT;
}
if (cfio->ft_flags & VM_FAULT_OOM) {
- CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
+ CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address);
return -ENOMEM;
}
@@ -1014,13 +1044,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
struct cl_page *page)
{
- struct vvp_page *vpg;
- struct cl_object *clob = cl_io_top(io)->ci_obj;
-
set_page_dirty(page->cp_vmpage);
-
- vpg = cl2vvp_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2vvp(clob), vpg);
}
static int vvp_io_fault_start(const struct lu_env *env,
@@ -1030,6 +1054,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
@@ -1039,11 +1064,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
loff_t size;
pgoff_t last_index;
- if (fio->ft_executable &&
- inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
- CWARN("binary "DFID
- " changed while waiting for the page fault lock\n",
- PFID(lu_object_fid(&obj->co_lu)));
+ down_read(&lli->lli_trunc_sem);
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
@@ -1192,6 +1213,17 @@ out:
return result;
}
+static void vvp_io_fault_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ vvp_object_invariant(ios->cis_io->ci_obj));
+ up_read(&lli->lli_trunc_sem);
+}
+
static int vvp_io_fsync_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
@@ -1202,46 +1234,23 @@ static int vvp_io_fsync_start(const struct lu_env *env,
return 0;
}
-static int vvp_io_read_page(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice)
+static int vvp_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
{
- struct cl_io *io = ios->cis_io;
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct cl_page *page = slice->cpl_page;
- struct inode *inode = vvp_object_inode(slice->cpl_obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
- struct ll_readahead_state *ras = &fd->fd_ras;
- struct cl_2queue *queue = &io->ci_queue;
-
- if (sbi->ll_ra_info.ra_max_pages_per_file &&
- sbi->ll_ra_info.ra_max_pages)
- ras_update(sbi, inode, ras, vvp_index(vpg),
- vpg->vpg_defer_uptodate);
-
- if (vpg->vpg_defer_uptodate) {
- vpg->vpg_ra_used = 1;
- cl_page_export(env, page, 1);
- }
- /*
- * Add page into the queue even when it is marked uptodate above.
- * this will unlock it automatically as part of cl_page_list_disown().
- */
+ int result = 0;
- cl_page_list_add(&queue->c2_qin, page);
- if (sbi->ll_ra_info.ra_max_pages_per_file &&
- sbi->ll_ra_info.ra_max_pages)
- ll_readahead(env, io, &queue->c2_qin, ras,
- vpg->vpg_defer_uptodate);
+ if (ios->cis_io->ci_type == CIT_READ ||
+ ios->cis_io->ci_type == CIT_FAULT) {
+ struct vvp_io *vio = cl2vvp_io(env, ios);
- return 0;
-}
+ if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ ra->cra_end = CL_PAGE_EOF;
+ result = 1; /* no need to call down */
+ }
+ }
-static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- CLOBINVRNT(env, ios->cis_io->ci_obj,
- vvp_object_invariant(ios->cis_io->ci_obj));
+ return result;
}
static const struct cl_io_operations vvp_io_ops = {
@@ -1250,6 +1259,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_fini = vvp_io_fini,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
+ .cio_end = vvp_io_rw_end,
.cio_advance = vvp_io_advance,
},
[CIT_WRITE] = {
@@ -1258,6 +1268,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_iter_fini = vvp_io_write_iter_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
+ .cio_end = vvp_io_rw_end,
.cio_advance = vvp_io_advance,
},
[CIT_SETATTR] = {
@@ -1272,7 +1283,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_iter_init = vvp_io_fault_iter_init,
.cio_lock = vvp_io_fault_lock,
.cio_start = vvp_io_fault_start,
- .cio_end = vvp_io_end,
+ .cio_end = vvp_io_fault_end,
},
[CIT_FSYNC] = {
.cio_start = vvp_io_fsync_start,
@@ -1282,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_fini = vvp_io_fini
}
},
- .cio_read_page = vvp_io_read_page,
+ .cio_read_ahead = vvp_io_read_ahead,
};
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index b57195d15674..8e18cf86cefc 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -65,8 +65,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
struct inode *inode = obj->vob_inode;
struct ll_inode_info *lli;
- (*p)(env, cookie, "(%s %d %d) inode: %p ",
- list_empty(&obj->vob_pending_list) ? "-" : "+",
+ (*p)(env, cookie, "(%d %d) inode: %p ",
atomic_read(&obj->vob_transient_pages),
atomic_read(&obj->vob_mmap_cnt), inode);
if (inode) {
@@ -133,7 +132,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
PFID(&lli->lli_fid));
- ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
+ ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
/* Clean up page mmap for this inode.
* The reason for us to do this is that if the page has
@@ -146,27 +145,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
*/
unmap_mapping_range(conf->coc_inode->i_mapping,
0, OBD_OBJECT_EOF, 0);
-
- return 0;
}
- if (conf->coc_opc != OBJECT_CONF_SET)
- return 0;
-
- if (conf->u.coc_md && conf->u.coc_md->lsm) {
- CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
- PFID(&lli->lli_fid), lli->lli_layout_gen,
- conf->u.coc_md->lsm->lsm_layout_gen);
-
- lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
- ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
- } else {
- CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
- PFID(&lli->lli_fid), lli->lli_layout_gen);
-
- lli->lli_has_smd = false;
- ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
- }
return 0;
}
@@ -204,6 +184,26 @@ static int vvp_object_glimpse(const struct lu_env *env,
return 0;
}
+static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
+{
+ u64 valid_flags = OBD_MD_FLTYPE;
+ struct inode *inode;
+ struct obdo *oa;
+
+ oa = attr->cra_oa;
+ inode = vvp_object_inode(obj);
+
+ if (attr->cra_type == CRT_WRITE)
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ obdo_from_inode(oa, inode, valid_flags & attr->cra_flags);
+ obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
+ oa->o_parent_oid++;
+ memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE);
+}
+
static const struct cl_object_operations vvp_ops = {
.coo_page_init = vvp_page_init,
.coo_lock_init = vvp_lock_init,
@@ -212,7 +212,8 @@ static const struct cl_object_operations vvp_ops = {
.coo_attr_update = vvp_attr_update,
.coo_conf_set = vvp_conf_set,
.coo_prune = vvp_prune,
- .coo_glimpse = vvp_object_glimpse
+ .coo_glimpse = vvp_object_glimpse,
+ .coo_req_attr_set = vvp_req_attr_set
};
static int vvp_object_init0(const struct lu_env *env,
@@ -240,7 +241,6 @@ static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
const struct cl_object_conf *cconf;
cconf = lu2cl_conf(conf);
- INIT_LIST_HEAD(&vob->vob_pending_list);
lu_object_add(obj, below);
result = vvp_object_init0(env, vob, cconf);
} else {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 046e84d7a158..23d66308ff20 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -162,13 +162,10 @@ static void vvp_page_delete(const struct lu_env *env,
LASSERT((struct cl_page *)vmpage->private == page);
LASSERT(inode == vvp_object_inode(obj));
- vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
-
/* Drop the reference count held in vvp_page_init */
refc = atomic_dec_return(&page->cp_ref);
LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
- ClearPageUptodate(vmpage);
ClearPagePrivate(vmpage);
vmpage->private = 0;
/*
@@ -221,8 +218,6 @@ static int vvp_page_prep_write(const struct lu_env *env,
if (!pg->cp_sync_io)
set_page_writeback(vmpage);
- vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
-
return 0;
}
@@ -287,19 +282,6 @@ static void vvp_page_completion_write(const struct lu_env *env,
CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
- /*
- * TODO: Actually it makes sense to add the page into oap pending
- * list again and so that we don't need to take the page out from
- * SoM write pending list, if we just meet a recoverable error,
- * -ENOMEM, etc.
- * To implement this, we just need to return a non zero value in
- * ->cpo_completion method. The underlying transfer should be notified
- * and then re-add the page into pending transfer queue. -jay
- */
-
- vpg->vpg_write_queued = 0;
- vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
-
if (pg->cp_sync_io) {
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
@@ -341,7 +323,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
LASSERT(pg->cp_state == CPS_CACHED);
/* This actually clears the dirty bit in the radix tree. */
set_page_writeback(vmpage);
- vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
} else if (pg->cp_state == CPS_PAGEOUT) {
/* is it possible for osc_flush_async_page() to already
@@ -357,20 +338,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
return result;
}
-static int vvp_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, pgoff_t *max_index)
-{
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- io->ci_type == CIT_FAULT) {
- struct vvp_io *vio = vvp_env_io(env);
-
- if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
- *max_index = CL_PAGE_EOF;
- }
- return 0;
-}
-
static int vvp_page_print(const struct lu_env *env,
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
@@ -378,9 +345,8 @@ static int vvp_page_print(const struct lu_env *env,
struct vvp_page *vpg = cl2vvp_page(slice);
struct page *vmpage = vpg->vpg_page;
- (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
- vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
- vpg->vpg_write_queued, vmpage);
+ (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d) vm@%p ",
+ vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
if (vmpage) {
(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
(long)vmpage->flags, page_count(vmpage),
@@ -416,7 +382,6 @@ static const struct cl_page_operations vvp_page_ops = {
.cpo_is_vmlocked = vvp_page_is_vmlocked,
.cpo_fini = vvp_page_fini,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
.cpo_prep = vvp_page_prep_read,
@@ -515,7 +480,6 @@ static const struct cl_page_operations vvp_transient_page_ops = {
.cpo_fini = vvp_transient_page_fini,
.cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
.cpo_prep = vvp_transient_page_prep,
@@ -539,7 +503,6 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
vpg->vpg_page = vmpage;
get_page(vmpage);
- INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
/* in cache, decref in vvp_page_delete */
atomic_inc(&page->cp_ref);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
deleted file mode 100644
index e3f4c790d646..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_req.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre/lustre_idl.h"
-#include "../include/cl_object.h"
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct vvp_req, vrq_cl);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for VVP
- * layer. VVP is responsible for
- *
- * - o_[mac]time
- *
- * - o_mode
- *
- * - o_parent_seq
- *
- * - o_[ug]id
- *
- * - o_parent_oid
- *
- * - o_parent_ver
- *
- * - o_ioepoch,
- *
- */
-static void vvp_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct inode *inode;
- struct obdo *oa;
- u32 valid_flags;
-
- oa = attr->cra_oa;
- inode = vvp_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE;
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
- obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
- oa->o_parent_oid++;
- memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
- LUSTRE_JOBID_SIZE);
-}
-
-static void vvp_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct vvp_req *vrq;
-
- if (ioret > 0)
- cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
- vrq = cl2vvp_req(slice);
- kmem_cache_free(vvp_req_kmem, vrq);
-}
-
-static const struct cl_req_operations vvp_req_ops = {
- .cro_attr_set = vvp_req_attr_set,
- .cro_completion = vvp_req_completion
-};
-
-int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct vvp_req *vrq;
- int result;
-
- vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
- if (vrq) {
- cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index e070adb7a3cc..7a848ebc57c1 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -44,48 +44,39 @@
#include "llite_internal.h"
-static
-int get_xattr_type(const char *name)
+const struct xattr_handler *get_xattr_type(const char *name)
{
- if (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS))
- return XATTR_ACL_ACCESS_T;
+ int i = 0;
- if (!strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT))
- return XATTR_ACL_DEFAULT_T;
+ while (ll_xattr_handlers[i]) {
+ size_t len = strlen(ll_xattr_handlers[i]->prefix);
- if (!strncmp(name, XATTR_USER_PREFIX,
- sizeof(XATTR_USER_PREFIX) - 1))
- return XATTR_USER_T;
-
- if (!strncmp(name, XATTR_TRUSTED_PREFIX,
- sizeof(XATTR_TRUSTED_PREFIX) - 1))
- return XATTR_TRUSTED_T;
-
- if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1))
- return XATTR_SECURITY_T;
-
- if (!strncmp(name, XATTR_LUSTRE_PREFIX,
- sizeof(XATTR_LUSTRE_PREFIX) - 1))
- return XATTR_LUSTRE_T;
-
- return XATTR_OTHER_T;
+ if (!strncmp(ll_xattr_handlers[i]->prefix, name, len))
+ return ll_xattr_handlers[i];
+ i++;
+ }
+ return NULL;
}
-static
-int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
+static int xattr_type_filter(struct ll_sb_info *sbi,
+ const struct xattr_handler *handler)
{
- if ((xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T) &&
+ /* No handler means XATTR_OTHER_T */
+ if (!handler)
+ return -EOPNOTSUPP;
+
+ if ((handler->flags == XATTR_ACL_ACCESS_T ||
+ handler->flags == XATTR_ACL_DEFAULT_T) &&
!(sbi->ll_flags & LL_SBI_ACL))
return -EOPNOTSUPP;
- if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR))
+ if (handler->flags == XATTR_USER_T &&
+ !(sbi->ll_flags & LL_SBI_USER_XATTR))
return -EOPNOTSUPP;
- if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN))
+
+ if (handler->flags == XATTR_TRUSTED_T &&
+ !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
- if (xattr_type == XATTR_OTHER_T)
- return -EOPNOTSUPP;
return 0;
}
@@ -111,7 +102,7 @@ ll_xattr_set_common(const struct xattr_handler *handler,
valid = OBD_MD_FLXATTR;
}
- rc = xattr_type_filter(sbi, handler->flags);
+ rc = xattr_type_filter(sbi, handler);
if (rc)
return rc;
@@ -121,8 +112,9 @@ ll_xattr_set_common(const struct xattr_handler *handler,
return -EPERM;
/* b10667: ignore lustre special xattr for now */
- if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
- (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov")))
+ if (!strcmp(name, "hsm") ||
+ ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
+ (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))))
return 0;
/* b15587: ignore security.capability xattr for now */
@@ -135,6 +127,11 @@ ll_xattr_set_common(const struct xattr_handler *handler,
strcmp(name, "selinux") == 0)
return -EOPNOTSUPP;
+ /*FIXME: enable IMA when the conditions are ready */
+ if (handler->flags == XATTR_SECURITY_T &&
+ (!strcmp(name, "ima") || !strcmp(name, "evm")))
+ return -EOPNOTSUPP;
+
sprintf(fullname, "%s%s\n", handler->prefix, name);
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
valid, fullname, pv, size, 0, flags,
@@ -151,6 +148,37 @@ ll_xattr_set_common(const struct xattr_handler *handler,
return 0;
}
+static int get_hsm_state(struct inode *inode, u32 *hus_states)
+{
+ struct md_op_data *op_data;
+ struct hsm_user_state *hus;
+ int rc;
+
+ hus = kzalloc(sizeof(*hus), GFP_NOFS);
+ if (!hus)
+ return -ENOMEM;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hus);
+ if (!IS_ERR(op_data)) {
+ rc = obd_iocontrol(LL_IOC_HSM_STATE_GET, ll_i2mdexp(inode),
+ sizeof(*op_data), op_data, NULL);
+ if (!rc)
+ *hus_states = hus->hus_states;
+ else
+ CDEBUG(D_VFSTRACE, "obd_iocontrol failed. rc = %d\n",
+ rc);
+
+ ll_finish_md_op_data(op_data);
+ } else {
+ rc = PTR_ERR(op_data);
+ CDEBUG(D_VFSTRACE, "Could not prepare the opdata. rc = %d\n",
+ rc);
+ }
+ kfree(hus);
+ return rc;
+}
+
static int ll_xattr_set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
@@ -187,6 +215,31 @@ static int ll_xattr_set(const struct xattr_handler *handler,
if (lump && lump->lmm_stripe_offset == 0)
lump->lmm_stripe_offset = -1;
+ /* Avoid anyone directly setting the RELEASED flag. */
+ if (lump && (lump->lmm_pattern & LOV_PATTERN_F_RELEASED)) {
+ /* Only if we have a released flag check if the file
+ * was indeed archived.
+ */
+ u32 state = HS_NONE;
+
+ rc = get_hsm_state(inode, &state);
+ if (rc)
+ return rc;
+
+ if (!(state & HS_ARCHIVED)) {
+ CDEBUG(D_VFSTRACE,
+ "hus_states state = %x, pattern = %x\n",
+ state, lump->lmm_pattern);
+ /*
+ * Here the state is: real file is not
+ * archived but user is requesting to set
+ * the RELEASED flag so we mask off the
+ * released flag from the request
+ */
+ lump->lmm_pattern ^= LOV_PATTERN_F_RELEASED;
+ }
+ }
+
if (lump && S_ISREG(inode->i_mode)) {
__u64 it_flags = FMODE_WRITE;
int lum_size;
@@ -225,7 +278,8 @@ ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer,
void *xdata;
int rc;
- if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) {
+ if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T &&
+ (type != XATTR_SECURITY_T || strcmp(name, "security.selinux"))) {
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
if (rc == -EAGAIN)
goto getxattr_nocache;
@@ -313,7 +367,7 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
- rc = xattr_type_filter(sbi, handler->flags);
+ rc = xattr_type_filter(sbi, handler);
if (rc)
return rc;
@@ -353,80 +407,99 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
OBD_MD_FLXATTR);
}
-static int ll_xattr_get(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
+static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size)
{
- LASSERT(inode);
- LASSERT(name);
+ ssize_t rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
- PFID(ll_inode2fid(inode)), inode, name);
+ if (S_ISREG(inode->i_mode)) {
+ struct cl_object *obj = ll_i2info(inode)->lli_clob;
+ struct cl_layout cl = {
+ .cl_buf.lb_buf = buf,
+ .cl_buf.lb_len = buf_size,
+ };
+ struct lu_env *env;
+ int refcheck;
+
+ if (!obj)
+ return -ENODATA;
- if (!strcmp(name, "lov")) {
- struct lov_stripe_md *lsm;
- struct lov_user_md *lump;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *request = NULL;
- int rc = 0, lmmsize = 0;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
-
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -ENODATA;
+ rc = cl_object_layout_get(env, obj, &cl);
+ if (rc < 0)
+ goto out_env;
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm) {
- if (S_ISDIR(inode->i_mode)) {
- rc = ll_dir_getstripe(inode, (void **)&lmm,
- &lmmsize, &request, 0);
- } else {
- rc = -ENODATA;
- }
- } else {
- /* LSM is present already after lookup/getattr call.
- * we need to grab layout lock once it is implemented
- */
- rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
- lmmsize = rc;
+ if (!cl.cl_size) {
+ rc = -ENODATA;
+ goto out_env;
}
- ccc_inode_lsm_put(inode, lsm);
+ rc = cl.cl_size;
+
+ if (!buf_size)
+ goto out_env;
+
+ LASSERT(buf && rc <= buf_size);
+
+ /*
+ * Do not return layout gen for getxattr() since
+ * otherwise it would confuse tar --xattr by
+ * recognizing layout gen as stripe offset when the
+ * file is restored. See LU-2809.
+ */
+ ((struct lov_mds_md *)buf)->lmm_layout_gen = 0;
+out_env:
+ cl_env_put(env, &refcheck);
+
+ return rc;
+ } else if (S_ISDIR(inode->i_mode)) {
+ struct ptlrpc_request *req = NULL;
+ struct lov_mds_md *lmm = NULL;
+ int lmm_size = 0;
+
+ rc = ll_dir_getstripe(inode, (void **)&lmm, &lmm_size,
+ &req, 0);
if (rc < 0)
- goto out;
+ goto out_req;
- if (size == 0) {
- /* used to call ll_get_max_mdsize() forward to get
- * the maximum buffer size, while some apps (such as
- * rsync 3.0.x) care much about the exact xattr value
- * size
- */
- rc = lmmsize;
- goto out;
+ if (!buf_size) {
+ rc = lmm_size;
+ goto out_req;
}
- if (size < lmmsize) {
- CERROR("server bug: replied size %d > %d for %pd (%s)\n",
- lmmsize, (int)size, dentry, name);
+ if (buf_size < lmm_size) {
rc = -ERANGE;
- goto out;
+ goto out_req;
}
- lump = buffer;
- memcpy(lump, lmm, lmmsize);
- /* do not return layout gen for getxattr otherwise it would
- * confuse tar --xattr by recognizing layout gen as stripe
- * offset when the file is restored. See LU-2809.
- */
- lump->lmm_layout_gen = 0;
+ memcpy(buf, lmm, lmm_size);
+ rc = lmm_size;
+out_req:
+ if (req)
+ ptlrpc_req_finished(req);
- rc = lmmsize;
-out:
- if (request)
- ptlrpc_req_finished(request);
- else if (lmm)
- obd_free_diskmd(ll_i2dtexp(inode), &lmm);
return rc;
+ } else {
+ return -ENODATA;
+ }
+}
+
+static int ll_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ LASSERT(inode);
+ LASSERT(name);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
+
+ if (!strcmp(name, "lov")) {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
+
+ return ll_getxattr_lov(inode, buffer, size);
}
return ll_xattr_get_common(handler, dentry, inode, name, buffer, size);
@@ -435,10 +508,10 @@ out:
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct inode *inode = d_inode(dentry);
- int rc = 0, rc2 = 0;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *request = NULL;
- int lmmsize;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ char *xattr_name;
+ ssize_t rc, rc2;
+ size_t len, rem;
LASSERT(inode);
@@ -450,65 +523,48 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size,
OBD_MD_FLXATTRLS);
if (rc < 0)
- goto out;
-
- if (buffer) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- char *xattr_name = buffer;
- int xlen, rem = rc;
-
- while (rem > 0) {
- xlen = strnlen(xattr_name, rem - 1) + 1;
- rem -= xlen;
- if (xattr_type_filter(sbi,
- get_xattr_type(xattr_name)) == 0) {
- /* skip OK xattr type
- * leave it in buffer
- */
- xattr_name += xlen;
- continue;
- }
- /* move up remaining xattrs in buffer
- * removing the xattr that is not OK
- */
- memmove(xattr_name, xattr_name + xlen, rem);
- rc -= xlen;
+ return rc;
+ /*
+ * If we're being called to get the size of the xattr list
+ * (buf_size == 0) then just assume that a lustre.lov xattr
+ * exists.
+ */
+ if (!size)
+ return rc + sizeof(XATTR_LUSTRE_LOV);
+
+ xattr_name = buffer;
+ rem = rc;
+
+ while (rem > 0) {
+ len = strnlen(xattr_name, rem - 1) + 1;
+ rem -= len;
+ if (!xattr_type_filter(sbi, get_xattr_type(xattr_name))) {
+ /* Skip OK xattr type leave it in buffer */
+ xattr_name += len;
+ continue;
}
- }
- if (S_ISREG(inode->i_mode)) {
- if (!ll_i2info(inode)->lli_has_smd)
- rc2 = -1;
- } else if (S_ISDIR(inode->i_mode)) {
- rc2 = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize,
- &request, 0);
+
+ /*
+ * Move up remaining xattrs in buffer
+ * removing the xattr that is not OK
+ */
+ memmove(xattr_name, xattr_name + len, rem);
+ rc -= len;
}
- if (rc2 < 0) {
- rc2 = 0;
- goto out;
- } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) {
- const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1;
- const size_t name_len = sizeof("lov") - 1;
- const size_t total_len = prefix_len + name_len + 1;
-
- if (((rc + total_len) > size) && buffer) {
- ptlrpc_req_finished(request);
- return -ERANGE;
- }
+ rc2 = ll_getxattr_lov(inode, NULL, 0);
+ if (rc2 == -ENODATA)
+ return rc;
- if (buffer) {
- buffer += rc;
- memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
- memcpy(buffer + prefix_len, "lov", name_len);
- buffer[prefix_len + name_len] = '\0';
- }
- rc2 = total_len;
- }
-out:
- ptlrpc_req_finished(request);
- rc = rc + rc2;
+ if (rc2 < 0)
+ return rc2;
- return rc;
+ if (size < rc + sizeof(XATTR_LUSTRE_LOV))
+ return -ERANGE;
+
+ memcpy(buffer + rc, XATTR_LUSTRE_LOV, sizeof(XATTR_LUSTRE_LOV));
+
+ return rc + sizeof(XATTR_LUSTRE_LOV);
}
static const struct xattr_handler ll_user_xattr_handler = {
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 50a19a40bd4e..38f75f6aa887 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -26,8 +26,8 @@ struct ll_xattr_entry {
*/
char *xe_name; /* xattr name, \0-terminated */
char *xe_value; /* xattr value */
- unsigned xe_namelen; /* strlen(xe_name) + 1 */
- unsigned xe_vallen; /* xattr value length */
+ unsigned int xe_namelen; /* strlen(xe_name) + 1 */
+ unsigned int xe_vallen; /* xattr value length */
};
static struct kmem_cache *xattr_kmem;
@@ -60,7 +60,7 @@ void ll_xattr_fini(void)
static void ll_xattr_cache_init(struct ll_inode_info *lli)
{
INIT_LIST_HEAD(&lli->lli_xattrs);
- lli->lli_flags |= LLIF_XATTR_CACHE;
+ set_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
}
/**
@@ -104,7 +104,7 @@ static int ll_xattr_cache_find(struct list_head *cache,
static int ll_xattr_cache_add(struct list_head *cache,
const char *xattr_name,
const char *xattr_val,
- unsigned xattr_val_len)
+ unsigned int xattr_val_len)
{
struct ll_xattr_entry *xattr;
@@ -216,7 +216,7 @@ static int ll_xattr_cache_list(struct list_head *cache,
*/
static int ll_xattr_cache_valid(struct ll_inode_info *lli)
{
- return !!(lli->lli_flags & LLIF_XATTR_CACHE);
+ return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
}
/**
@@ -233,7 +233,8 @@ static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
; /* empty loop */
- lli->lli_flags &= ~LLIF_XATTR_CACHE;
+
+ clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
return 0;
}
@@ -415,6 +416,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
CDEBUG(D_CACHE, "not caching %s\n",
XATTR_NAME_ACL_ACCESS);
rc = 0;
+ } else if (!strcmp(xdata, "security.selinux")) {
+ /* Filter out security.selinux, it is cached in slab */
+ CDEBUG(D_CACHE, "not caching security.selinux\n");
+ rc = 0;
} else {
rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
*xsizes);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c
new file mode 100644
index 000000000000..d61d8018001a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/xattr_security.c
@@ -0,0 +1,88 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright (c) 2014 Bull SAS
+ * Author: Sebastien Buisson sebastien.buisson@bull.net
+ */
+
+/*
+ * lustre/llite/xattr_security.c
+ * Handler for storing security labels as extended attributes.
+ */
+#include <linux/security.h>
+#include <linux/xattr.h>
+#include "llite_internal.h"
+
+/**
+ * A helper function for ll_security_inode_init_security()
+ * that takes care of setting xattrs
+ *
+ * Get security context of @inode from @xattr_array,
+ * and put it in 'security.xxx' xattr of dentry
+ * stored in @fs_info.
+ *
+ * \retval 0 success
+ * \retval -ENOMEM if no memory could be allocated for xattr name
+ * \retval < 0 failure to set xattr
+ */
+static int
+ll_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr_handler *handler;
+ struct dentry *dentry = fs_info;
+ const struct xattr *xattr;
+ int err = 0;
+
+ handler = get_xattr_type(XATTR_SECURITY_PREFIX);
+ if (!handler)
+ return -ENXIO;
+
+ for (xattr = xattr_array; xattr->name; xattr++) {
+ err = handler->set(handler, dentry, inode, xattr->name,
+ xattr->value, xattr->value_len,
+ XATTR_CREATE);
+ if (err < 0)
+ break;
+ }
+ return err;
+}
+
+/**
+ * Initializes security context
+ *
+ * Get security context of @inode in @dir,
+ * and put it in 'security.xxx' xattr of @dentry.
+ *
+ * \retval 0 success, or SELinux is disabled
+ * \retval -ENOMEM if no memory could be allocated for xattr name
+ * \retval < 0 failure to get security context or set xattr
+ */
+int
+ll_init_security(struct dentry *dentry, struct inode *inode, struct inode *dir)
+{
+ if (!selinux_is_enabled())
+ return 0;
+
+ return security_inode_init_security(inode, dir, NULL,
+ &ll_initxattrs, dentry);
+}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 9f4e826bb0af..b1071cf5a70c 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -223,7 +223,14 @@ int lmv_revalidate_slaves(struct obd_export *exp,
LASSERT(body);
if (unlikely(body->mbo_nlink < 2)) {
- CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
+ /*
+ * If this is bad stripe, most likely due
+ * to the race between close(unlink) and
+ * getattr, let's return -EONENT, so llite
+ * will revalidate the dentry see
+ * ll_inode_revalidate_fini()
+ */
+ CDEBUG(D_INODE, "%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
obd->obd_name, body->mbo_nlink, i,
PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
PFID(&lsm->lsm_md_oinfo[0].lmo_fid));
@@ -233,7 +240,7 @@ int lmv_revalidate_slaves(struct obd_export *exp,
it.it_lock_mode = 0;
}
- rc = -EIO;
+ rc = -ENOENT;
goto cleanup;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 52b03745ac19..12731a17e263 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -54,9 +54,6 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
struct lu_fid *fid, struct md_op_data *op_data);
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
- const union lmv_mds_md *lmm, int stripe_count);
-
int lmv_revalidate_slaves(struct obd_export *exp,
const struct lmv_stripe_md *lsm,
ldlm_blocking_callback cb_blocking,
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 7dbb2b946acf..f124f6c05ea4 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -62,6 +62,7 @@ static void lmv_activate_target(struct lmv_obd *lmv,
tgt->ltd_active = activate;
lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
+ tgt->ltd_exp->exp_obd->obd_inactive = !activate;
}
/**
@@ -245,8 +246,7 @@ static int lmv_connect(const struct lu_env *env,
return rc;
}
-static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
- u32 cookiesize, u32 def_cookiesize)
+static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -262,14 +262,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
lmv->max_def_easize = def_easize;
change = 1;
}
- if (lmv->max_cookiesize < cookiesize) {
- lmv->max_cookiesize = cookiesize;
- change = 1;
- }
- if (lmv->max_def_cookiesize < def_cookiesize) {
- lmv->max_def_cookiesize = def_cookiesize;
- change = 1;
- }
+
if (change == 0)
return 0;
@@ -284,8 +277,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
continue;
}
- rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize,
- cookiesize, def_cookiesize);
+ rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
if (rc) {
CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
obd->obd_name, i, rc);
@@ -368,8 +360,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
tgt->ltd_exp = mdc_exp;
lmv->desc.ld_active_tgt_count++;
- md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize,
- lmv->max_cookiesize, lmv->max_def_cookiesize);
+ md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize);
CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
@@ -396,27 +387,23 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
__u32 index, int gen)
{
struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_device *mdc_obd;
struct lmv_tgt_desc *tgt;
int orig_tgt_count = 0;
int rc = 0;
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
- mutex_lock(&lmv->lmv_init_mutex);
-
- if (lmv->desc.ld_tgt_count == 0) {
- struct obd_device *mdc_obd;
-
- mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
- &obd->obd_uuid);
- if (!mdc_obd) {
- mutex_unlock(&lmv->lmv_init_mutex);
- CERROR("%s: Target %s not attached: rc = %d\n",
- obd->obd_name, uuidp->uuid, -EINVAL);
- return -EINVAL;
- }
+ mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
+ &obd->obd_uuid);
+ if (!mdc_obd) {
+ CERROR("%s: Target %s not attached: rc = %d\n",
+ obd->obd_name, uuidp->uuid, -EINVAL);
+ return -EINVAL;
}
+ mutex_lock(&lmv->lmv_init_mutex);
+
if ((index < lmv->tgts_size) && lmv->tgts[index]) {
tgt = lmv->tgts[index];
CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
@@ -472,22 +459,27 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
lmv->desc.ld_tgt_count = index + 1;
}
- if (lmv->connected) {
- rc = lmv_connect_mdc(obd, tgt);
- if (rc) {
- spin_lock(&lmv->lmv_lock);
- if (lmv->desc.ld_tgt_count == index + 1)
- lmv->desc.ld_tgt_count = orig_tgt_count;
- memset(tgt, 0, sizeof(*tgt));
- spin_unlock(&lmv->lmv_lock);
- } else {
- int easize = sizeof(struct lmv_stripe_md) +
- lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
- lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
- }
+ if (!lmv->connected) {
+ /* lmv_check_connect() will connect this target. */
+ mutex_unlock(&lmv->lmv_init_mutex);
+ return rc;
}
+ /* Otherwise let's connect it ourselves */
mutex_unlock(&lmv->lmv_init_mutex);
+ rc = lmv_connect_mdc(obd, tgt);
+ if (rc) {
+ spin_lock(&lmv->lmv_lock);
+ if (lmv->desc.ld_tgt_count == index + 1)
+ lmv->desc.ld_tgt_count = orig_tgt_count;
+ memset(tgt, 0, sizeof(*tgt));
+ spin_unlock(&lmv->lmv_lock);
+ } else {
+ int easize = sizeof(struct lmv_stripe_md) +
+ lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
+ lmv_init_ea_size(obd->obd_self_export, easize, 0);
+ }
+
return rc;
}
@@ -538,7 +530,7 @@ int lmv_check_connect(struct obd_device *obd)
class_export_put(lmv->exp);
lmv->connected = 1;
easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
- lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
+ lmv_init_ea_size(obd->obd_self_export, easize, 0);
mutex_unlock(&lmv->lmv_init_mutex);
return 0;
@@ -1128,9 +1120,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
mdc_obd = class_exp2obd(tgt->ltd_exp);
mdc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
- return err;
- } else if (err) {
+ if (err) {
if (tgt->ltd_active) {
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
tgt->ltd_uuid.uuid, i, cmd, err);
@@ -1284,7 +1274,6 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
lmv->desc.ld_tgt_count = 0;
lmv->desc.ld_active_tgt_count = 0;
- lmv->max_cookiesize = 0;
lmv->max_def_easize = 0;
lmv->max_easize = 0;
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
@@ -1630,27 +1619,28 @@ lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
* ct_restore().
*/
if (op_data->op_bias & MDS_CREATE_VOLATILE &&
- (int)op_data->op_mds != -1 && lsm) {
+ (int)op_data->op_mds != -1) {
int i;
tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
if (IS_ERR(tgt))
return tgt;
- /* refill the right parent fid */
- for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
- struct lmv_oinfo *oinfo;
+ if (lsm) {
+ /* refill the right parent fid */
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ struct lmv_oinfo *oinfo;
- oinfo = &lsm->lsm_md_oinfo[i];
- if (oinfo->lmo_mds == op_data->op_mds) {
- *fid = oinfo->lmo_fid;
- break;
+ oinfo = &lsm->lsm_md_oinfo[i];
+ if (oinfo->lmo_mds == op_data->op_mds) {
+ *fid = oinfo->lmo_fid;
+ break;
+ }
}
- }
- /* Hmm, can not find the stripe by mdt_index(op_mds) */
- if (i == lsm->lsm_md_stripe_count)
- tgt = ERR_PTR(-EINVAL);
+ if (i == lsm->lsm_md_stripe_count)
+ *fid = lsm->lsm_md_oinfo[0].lmo_fid;
+ }
return tgt;
}
@@ -1728,30 +1718,9 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-static int lmv_done_writing(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_done_writing(tgt->ltd_exp, op_data, mod);
- return rc;
-}
-
static int
lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, __u64 extra_lock_flags)
{
@@ -1847,7 +1816,7 @@ static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
struct lu_fid *fid = md_op_data_fid(op_data, flag);
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_policy_data_t policy = { {0} };
+ union ldlm_policy_data policy = { { 0 } };
int rc = 0;
if (!fid_is_sane(fid))
@@ -1937,7 +1906,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_export *target_exp;
struct lmv_tgt_desc *src_tgt;
+ struct lmv_tgt_desc *tgt_tgt;
+ struct mdt_body *body;
int rc;
LASSERT(oldlen != 0);
@@ -1977,6 +1949,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
if (rc)
return rc;
src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
+ if (IS_ERR(src_tgt))
+ return PTR_ERR(src_tgt);
+
+ target_exp = src_tgt->ltd_exp;
} else {
if (op_data->op_mea1) {
struct lmv_stripe_md *lsm = op_data->op_mea1;
@@ -1985,29 +1961,27 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
oldlen,
&op_data->op_fid1,
&op_data->op_mds);
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
} else {
src_tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
-
- op_data->op_mds = src_tgt->ltd_idx;
}
+ if (IS_ERR(src_tgt))
+ return PTR_ERR(src_tgt);
if (op_data->op_mea2) {
struct lmv_stripe_md *lsm = op_data->op_mea2;
- const struct lmv_oinfo *oinfo;
- oinfo = lsm_name_to_stripe_info(lsm, new, newlen);
- if (IS_ERR(oinfo))
- return PTR_ERR(oinfo);
-
- op_data->op_fid2 = oinfo->lmo_fid;
+ tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new,
+ newlen,
+ &op_data->op_fid2,
+ &op_data->op_mds);
+ } else {
+ tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2);
}
+ if (IS_ERR(tgt_tgt))
+ return PTR_ERR(tgt_tgt);
+
+ target_exp = tgt_tgt->ltd_exp;
}
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
/*
* LOOKUP lock on src child (fid3) should also be cancelled for
@@ -2048,26 +2022,56 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
+retry_rename:
/*
* Cancel all the locks on tgt child (fid4).
*/
- if (fid_is_sane(&op_data->op_fid4))
+ if (fid_is_sane(&op_data->op_fid4)) {
+ struct lmv_tgt_desc *tgt;
+
rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
LCK_EX, MDS_INODELOCK_FULL,
MF_MDC_CANCEL_FID4);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid4);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
- CDEBUG(D_INODE, DFID":m%d to "DFID"\n", PFID(&op_data->op_fid1),
- op_data->op_mds, PFID(&op_data->op_fid2));
+ /*
+ * Since the target child might be destroyed, and it might
+ * become orphan, and we can only check orphan on the local
+ * MDT right now, so we send rename request to the MDT where
+ * target child is located. If target child does not exist,
+ * then it will send the request to the target parent
+ */
+ target_exp = tgt->ltd_exp;
+ }
- rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
- new, newlen, request);
- return rc;
+ rc = md_rename(target_exp, op_data, old, oldlen, new, newlen, request);
+ if (rc && rc != -EREMOTE)
+ return rc;
+
+ body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
+ if (!body)
+ return -EPROTO;
+
+ /* Not cross-ref case, just get out of here. */
+ if (likely(!(body->mbo_valid & OBD_MD_MDS)))
+ return rc;
+
+ CDEBUG(D_INODE, "%s: try rename to another MDT for " DFID "\n",
+ exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
+
+ op_data->op_fid4 = body->mbo_fid1;
+ ptlrpc_req_finished(*request);
+ *request = NULL;
+ goto retry_rename;
}
static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request,
- struct md_open_data **mod)
+ void *ea, size_t ealen, struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2086,10 +2090,7 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
- ea2len, request, mod);
-
- return rc;
+ return md_setattr(tgt->ltd_exp, op_data, ea, ealen, request);
}
static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
@@ -2623,23 +2624,10 @@ try_next_stripe:
goto retry_unlink;
}
-static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int lmv_precleanup(struct obd_device *obd)
{
- struct lmv_obd *lmv = &obd->u.lmv;
-
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- /* XXX: here should be calling obd_precleanup() down to
- * stack.
- */
- break;
- case OBD_CLEANUP_EXPORTS:
- fld_client_debugfs_fini(&lmv->lmv_fld);
- lprocfs_obd_cleanup(obd);
- break;
- default:
- break;
- }
+ fld_client_debugfs_fini(&obd->u.lmv.lmv_fld);
+ lprocfs_obd_cleanup(obd);
return 0;
}
@@ -2654,14 +2642,12 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
* \param[in] key identifier of key to get value for
* \param[in] vallen size of \a val
* \param[out] val pointer to storage location for value
- * \param[in] lsm optional striping metadata of object
*
* \retval 0 on success
* \retval negative negated errno on failure
*/
static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
struct obd_device *obd;
struct lmv_obd *lmv;
@@ -2693,7 +2679,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
continue;
if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
- vallen, val, NULL))
+ vallen, val))
return 0;
}
return -EINVAL;
@@ -2709,7 +2695,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
* desc.
*/
rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
- vallen, val, NULL);
+ vallen, val);
if (!rc && KEY_IS(KEY_CONN_DATA))
exp->exp_connect_data = *(struct obd_connect_data *)val;
return rc;
@@ -2777,90 +2763,6 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
return -EINVAL;
}
-static int lmv_pack_md_v1(const struct lmv_stripe_md *lsm,
- struct lmv_mds_md_v1 *lmm1)
-{
- int cplen;
- int i;
-
- lmm1->lmv_magic = cpu_to_le32(lsm->lsm_md_magic);
- lmm1->lmv_stripe_count = cpu_to_le32(lsm->lsm_md_stripe_count);
- lmm1->lmv_master_mdt_index = cpu_to_le32(lsm->lsm_md_master_mdt_index);
- lmm1->lmv_hash_type = cpu_to_le32(lsm->lsm_md_hash_type);
- cplen = strlcpy(lmm1->lmv_pool_name, lsm->lsm_md_pool_name,
- sizeof(lmm1->lmv_pool_name));
- if (cplen >= sizeof(lmm1->lmv_pool_name))
- return -E2BIG;
-
- for (i = 0; i < lsm->lsm_md_stripe_count; i++)
- fid_cpu_to_le(&lmm1->lmv_stripe_fids[i],
- &lsm->lsm_md_oinfo[i].lmo_fid);
- return 0;
-}
-
-static int
-lmv_pack_md(union lmv_mds_md **lmmp, const struct lmv_stripe_md *lsm,
- int stripe_count)
-{
- int lmm_size = 0, rc = 0;
- bool allocated = false;
-
- LASSERT(lmmp);
-
- /* Free lmm */
- if (*lmmp && !lsm) {
- int stripe_cnt;
-
- stripe_cnt = lmv_mds_md_stripe_count_get(*lmmp);
- lmm_size = lmv_mds_md_size(stripe_cnt,
- le32_to_cpu((*lmmp)->lmv_magic));
- if (!lmm_size)
- return -EINVAL;
- kvfree(*lmmp);
- *lmmp = NULL;
- return 0;
- }
-
- /* Alloc lmm */
- if (!*lmmp && !lsm) {
- lmm_size = lmv_mds_md_size(stripe_count, LMV_MAGIC);
- LASSERT(lmm_size > 0);
- *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- lmv_mds_md_stripe_count_set(*lmmp, stripe_count);
- (*lmmp)->lmv_magic = cpu_to_le32(LMV_MAGIC);
- return lmm_size;
- }
-
- /* pack lmm */
- LASSERT(lsm);
- lmm_size = lmv_mds_md_size(lsm->lsm_md_stripe_count,
- lsm->lsm_md_magic);
- if (!*lmmp) {
- *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- allocated = true;
- }
-
- switch (lsm->lsm_md_magic) {
- case LMV_MAGIC_V1:
- rc = lmv_pack_md_v1(lsm, &(*lmmp)->lmv_md_v1);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- if (rc && allocated) {
- kvfree(*lmmp);
- *lmmp = NULL;
- }
-
- return lmm_size;
-}
-
static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
const struct lmv_mds_md_v1 *lmm1)
{
@@ -2903,8 +2805,8 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
return rc;
}
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
- const union lmv_mds_md *lmm, int stripe_count)
+static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+ const union lmv_mds_md *lmm, size_t lmm_size)
{
struct lmv_stripe_md *lsm;
bool allocated = false;
@@ -2933,17 +2835,6 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
return 0;
}
- /* Alloc memmd */
- if (!lsm && !lmm) {
- lsm_size = lmv_stripe_md_size(stripe_count);
- lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
- if (!lsm)
- return -ENOMEM;
- lsm->lsm_md_stripe_count = stripe_count;
- *lsmp = lsm;
- return 0;
- }
-
if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
return -EPERM;
@@ -2991,38 +2882,17 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
}
return lsm_size;
}
-EXPORT_SYMBOL(lmv_unpack_md);
-static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int disk_len)
+void lmv_free_memmd(struct lmv_stripe_md *lsm)
{
- return lmv_unpack_md(exp, (struct lmv_stripe_md **)lsmp,
- (union lmv_mds_md *)lmm, disk_len);
-}
-
-static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- const struct lmv_stripe_md *lmv = (struct lmv_stripe_md *)lsm;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv_obd = &obd->u.lmv;
- int stripe_count;
-
- if (!lmmp) {
- if (lsm)
- stripe_count = lmv->lsm_md_stripe_count;
- else
- stripe_count = lmv_obd->desc.ld_tgt_count;
-
- return lmv_mds_md_size(stripe_count, LMV_MAGIC_V1);
- }
-
- return lmv_pack_md((union lmv_mds_md **)lmmp, lmv, 0);
+ lmv_unpackmd(NULL, &lsm, NULL, 0);
}
+EXPORT_SYMBOL(lmv_free_memmd);
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
- enum ldlm_cancel_flags flags, void *opaque)
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, enum ldlm_cancel_flags flags,
+ void *opaque)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -3064,7 +2934,7 @@ static int lmv_set_lock_data(struct obd_export *exp,
static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid,
enum ldlm_type type,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
struct lustre_handle *lockh)
{
@@ -3271,32 +3141,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
return rc;
}
-static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc = 0;
- u32 i;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- int err;
-
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
- CERROR("lmv idx %d inactive\n", i);
- return -EIO;
- }
-
- err = obd_quotacheck(tgt->ltd_exp, oqctl);
- if (err && !rc)
- rc = err;
- }
-
- return rc;
-}
-
static int lmv_merge_attr(struct obd_export *exp,
const struct lmv_stripe_md *lsm,
struct cl_attr *attr,
@@ -3349,12 +3193,9 @@ static struct obd_ops lmv_obd_ops = {
.statfs = lmv_statfs,
.get_info = lmv_get_info,
.set_info_async = lmv_set_info_async,
- .packmd = lmv_packmd,
- .unpackmd = lmv_unpackmd,
.notify = lmv_notify,
.get_uuid = lmv_get_uuid,
.iocontrol = lmv_iocontrol,
- .quotacheck = lmv_quotacheck,
.quotactl = lmv_quotactl
};
@@ -3363,7 +3204,6 @@ static struct md_ops lmv_md_ops = {
.null_inode = lmv_null_inode,
.close = lmv_close,
.create = lmv_create,
- .done_writing = lmv_done_writing,
.enqueue = lmv_enqueue,
.getattr = lmv_getattr,
.getxattr = lmv_getxattr,
@@ -3388,6 +3228,7 @@ static struct md_ops lmv_md_ops = {
.intent_getattr_async = lmv_intent_getattr_async,
.revalidate_lock = lmv_revalidate_lock,
.get_fid_from_lsm = lmv_get_fid_from_lsm,
+ .unpackmd = lmv_unpackmd,
};
static int __init lmv_init(void)
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 4d2b7d303fea..c49a34bf10e5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -217,7 +217,7 @@ struct lov_object {
union lov_layout_state {
struct lov_layout_raid0 {
- unsigned lo_nr;
+ unsigned int lo_nr;
/**
* When this is true, lov_object::lo_attr contains
* valid up to date attributes for a top-level
@@ -412,7 +412,6 @@ struct lov_io_sub {
int sub_refcheck;
int sub_refcheck2;
int sub_reenter;
- void *sub_cookie;
};
/**
@@ -473,20 +472,6 @@ struct lov_session {
struct lov_sublock_env ls_subenv;
};
-/**
- * State of transfer for lov.
- */
-struct lov_req {
- struct cl_req_slice lr_cl;
-};
-
-/**
- * State of transfer for lovsub.
- */
-struct lovsub_req {
- struct cl_req_slice lsrq_cl;
-};
-
extern struct lu_device_type lov_device_type;
extern struct lu_device_type lovsub_device_type;
@@ -497,11 +482,9 @@ extern struct kmem_cache *lov_lock_kmem;
extern struct kmem_cache *lov_object_kmem;
extern struct kmem_cache *lov_thread_kmem;
extern struct kmem_cache *lov_session_kmem;
-extern struct kmem_cache *lov_req_kmem;
extern struct kmem_cache *lovsub_lock_kmem;
extern struct kmem_cache *lovsub_object_kmem;
-extern struct kmem_cache *lovsub_req_kmem;
extern struct kmem_cache *lov_lock_link_kmem;
@@ -700,11 +683,6 @@ static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
return container_of0(slice, struct lov_page, lps_cl);
}
-static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lov_req, lr_cl);
-}
-
static inline struct lovsub_page *
cl2lovsub_page(const struct cl_page_slice *slice)
{
@@ -712,11 +690,6 @@ cl2lovsub_page(const struct cl_page_slice *slice)
return container_of0(slice, struct lovsub_page, lsb_cl);
}
-static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lovsub_req, lsrq_cl);
-}
-
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
const struct cl_io_slice *ios)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 056ae2ed88e8..7301f6e579a1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -46,11 +46,9 @@ struct kmem_cache *lov_lock_kmem;
struct kmem_cache *lov_object_kmem;
struct kmem_cache *lov_thread_kmem;
struct kmem_cache *lov_session_kmem;
-struct kmem_cache *lov_req_kmem;
struct kmem_cache *lovsub_lock_kmem;
struct kmem_cache *lovsub_object_kmem;
-struct kmem_cache *lovsub_req_kmem;
struct kmem_cache *lov_lock_link_kmem;
@@ -79,11 +77,6 @@ struct lu_kmem_descr lov_caches[] = {
.ckd_size = sizeof(struct lov_session)
},
{
- .ckd_cache = &lov_req_kmem,
- .ckd_name = "lov_req_kmem",
- .ckd_size = sizeof(struct lov_req)
- },
- {
.ckd_cache = &lovsub_lock_kmem,
.ckd_name = "lovsub_lock_kmem",
.ckd_size = sizeof(struct lovsub_lock)
@@ -94,11 +87,6 @@ struct lu_kmem_descr lov_caches[] = {
.ckd_size = sizeof(struct lovsub_object)
},
{
- .ckd_cache = &lovsub_req_kmem,
- .ckd_name = "lovsub_req_kmem",
- .ckd_size = sizeof(struct lovsub_req)
- },
- {
.ckd_cache = &lov_lock_link_kmem,
.ckd_name = "lov_lock_link_kmem",
.ckd_size = sizeof(struct lov_lock_link)
@@ -110,25 +98,6 @@ struct lu_kmem_descr lov_caches[] = {
/*****************************************************************************
*
- * Lov transfer operations.
- *
- */
-
-static void lov_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct lov_req *lr;
-
- lr = cl2lov_req(slice);
- kmem_cache_free(lov_req_kmem, lr);
-}
-
-static const struct cl_req_operations lov_req_ops = {
- .cro_completion = lov_req_completion
-};
-
-/*****************************************************************************
- *
* Lov device and device type functions.
*
*/
@@ -248,26 +217,6 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
return rc;
}
-static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct lov_req *lr;
- int result;
-
- lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS);
- if (lr) {
- cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
-static const struct cl_device_operations lov_cl_ops = {
- .cdo_req_init = lov_req_init
-};
-
static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
{
int i;
@@ -478,7 +427,6 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
cl_device_init(&ld->ld_cl, t);
d = lov2lu_dev(ld);
d->ld_ops = &lov_lu_ops;
- ld->ld_cl.cd_ops = &lov_cl_ops;
mutex_init(&ld->ld_mutex);
lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 214c561767e0..ac0bf64c08c1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -76,18 +76,19 @@ static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
return 0;
}
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
+struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count)
{
+ size_t oinfo_ptrs_size, lsm_size;
struct lov_stripe_md *lsm;
struct lov_oinfo *loi;
- int i, oinfo_ptrs_size;
+ int i;
LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
- *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;
+ lsm_size = sizeof(*lsm) + oinfo_ptrs_size;
- lsm = libcfs_kvzalloc(*size, GFP_NOFS);
+ lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
if (!lsm)
return NULL;
@@ -117,9 +118,43 @@ void lsm_free_plain(struct lov_stripe_md *lsm)
kvfree(lsm);
}
-static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm)
+/*
+ * Find minimum stripe maxbytes value. For inactive or
+ * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
+ */
+static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
+{
+ loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
+ struct obd_import *imp;
+
+ if (!tgt->ltd_active)
+ return maxbytes;
+
+ imp = tgt->ltd_obd->u.cli.cl_import;
+ if (!imp)
+ return maxbytes;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL &&
+ (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
+ imp->imp_connect_data.ocd_maxbytes > 0)
+ maxbytes = imp->imp_connect_data.ocd_maxbytes;
+
+ spin_unlock(&imp->imp_lock);
+
+ return maxbytes;
+}
+
+static int lsm_unpackmd_common(struct lov_obd *lov,
+ struct lov_stripe_md *lsm,
+ struct lov_mds_md *lmm,
+ struct lov_ost_data_v1 *objects)
{
+ loff_t stripe_maxbytes = LLONG_MAX;
+ unsigned int stripe_count;
+ struct lov_oinfo *loi;
+ unsigned int i;
+
/*
* This supposes lov_mds_md_v1/v3 first fields are
* are the same
@@ -129,11 +164,54 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
lsm->lsm_pool_name[0] = '\0';
+
+ stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
+
+ for (i = 0; i < stripe_count; i++) {
+ loff_t tgt_bytes;
+
+ loi = lsm->lsm_oinfo[i];
+ ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
+ loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
+ loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
+ if (lov_oinfo_is_dummy(loi))
+ continue;
+
+ if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
+ !lov2obd(lov)->obd_process_conf) {
+ CERROR("%s: OST index %d more than OST count %d\n",
+ (char *)lov->desc.ld_uuid.uuid,
+ loi->loi_ost_idx, lov->desc.ld_tgt_count);
+ lov_dump_lmm_v1(D_WARNING, lmm);
+ return -EINVAL;
+ }
+
+ if (!lov->lov_tgts[loi->loi_ost_idx]) {
+ CERROR("%s: OST index %d missing\n",
+ (char *)lov->desc.ld_uuid.uuid,
+ loi->loi_ost_idx);
+ lov_dump_lmm_v1(D_WARNING, lmm);
+ continue;
+ }
+
+ tgt_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]);
+ stripe_maxbytes = min_t(loff_t, stripe_maxbytes, tgt_bytes);
+ }
+
+ if (stripe_maxbytes == LLONG_MAX)
+ stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
+
+ if (!lsm->lsm_stripe_count)
+ lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
+ else
+ lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
+
+ return 0;
}
static void
lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
- u64 *lov_off, u64 *swidth)
+ loff_t *lov_off, loff_t *swidth)
{
if (swidth)
*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
@@ -141,36 +219,12 @@ lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
static void
lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
- u64 *lov_off, u64 *swidth)
+ loff_t *lov_off, loff_t *swidth)
{
if (swidth)
*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
}
-/* Find minimum stripe maxbytes value. For inactive or
- * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
- */
-static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
-{
- struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
-
- if (!imp || !tgt->ltd_active) {
- *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
- return;
- }
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
- imp->imp_connect_data.ocd_maxbytes > 0) {
- if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
- *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
- } else {
- *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
- }
- spin_unlock(&imp->imp_lock);
-}
-
static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
__u16 *stripe_count)
{
@@ -197,45 +251,7 @@ static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
struct lov_mds_md_v1 *lmm)
{
- struct lov_oinfo *loi;
- int i;
- int stripe_count;
- __u64 stripe_maxbytes = OBD_OBJECT_EOF;
-
- lsm_unpackmd_common(lsm, lmm);
-
- stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
- for (i = 0; i < stripe_count; i++) {
- /* XXX LOV STACKING call down to osc_unpackmd() */
- loi = lsm->lsm_oinfo[i];
- ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
- loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
- loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
- CERROR("OST index %d more than OST count %d\n",
- loi->loi_ost_idx, lov->desc.ld_tgt_count);
- lov_dump_lmm_v1(D_WARNING, lmm);
- return -EINVAL;
- }
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CERROR("OST index %d missing\n", loi->loi_ost_idx);
- lov_dump_lmm_v1(D_WARNING, lmm);
- return -EINVAL;
- }
- /* calculate the minimum stripe max bytes */
- lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
- &stripe_maxbytes);
- }
-
- lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
- return 0;
+ return lsm_unpackmd_common(lov, lsm, lmm, lmm->lmm_objects);
}
const struct lsm_operations lsm_v1_ops = {
@@ -275,55 +291,21 @@ static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
}
static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmmv1)
+ struct lov_mds_md *lmm)
{
- struct lov_mds_md_v3 *lmm;
- struct lov_oinfo *loi;
- int i;
- int stripe_count;
- __u64 stripe_maxbytes = OBD_OBJECT_EOF;
- int cplen = 0;
+ struct lov_mds_md_v3 *lmm_v3 = (struct lov_mds_md_v3 *)lmm;
+ size_t cplen = 0;
+ int rc;
- lmm = (struct lov_mds_md_v3 *)lmmv1;
+ rc = lsm_unpackmd_common(lov, lsm, lmm, lmm_v3->lmm_objects);
+ if (rc)
+ return rc;
- lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm);
-
- stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
- cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name,
+ cplen = strlcpy(lsm->lsm_pool_name, lmm_v3->lmm_pool_name,
sizeof(lsm->lsm_pool_name));
if (cplen >= sizeof(lsm->lsm_pool_name))
return -E2BIG;
- for (i = 0; i < stripe_count; i++) {
- /* XXX LOV STACKING call down to osc_unpackmd() */
- loi = lsm->lsm_oinfo[i];
- ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
- loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
- loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
- CERROR("OST index %d more than OST count %d\n",
- loi->loi_ost_idx, lov->desc.ld_tgt_count);
- lov_dump_lmm_v3(D_WARNING, lmm);
- return -EINVAL;
- }
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CERROR("OST index %d missing\n", loi->loi_ost_idx);
- lov_dump_lmm_v3(D_WARNING, lmm);
- return -EINVAL;
- }
- /* calculate the minimum stripe max bytes */
- lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
- &stripe_maxbytes);
- }
-
- lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 07e5ede3e952..774499c74daa 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -36,6 +36,77 @@
#include "../include/obd_class.h"
#include "../include/lustre/lustre_user.h"
+/*
+ * If we are unable to get the maximum object size from the OST in
+ * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
+ * the old maximum object size from ext3.
+ */
+#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
+
+struct lov_stripe_md {
+ atomic_t lsm_refc;
+ spinlock_t lsm_lock;
+ pid_t lsm_lock_owner; /* debugging */
+
+ /*
+ * maximum possible file size, might change as OSTs status changes,
+ * e.g. disconnected, deactivated
+ */
+ loff_t lsm_maxbytes;
+ struct ost_id lsm_oi;
+ u32 lsm_magic;
+ u32 lsm_stripe_size;
+ u32 lsm_pattern; /* RAID0, RAID1, released, ... */
+ u16 lsm_stripe_count;
+ u16 lsm_layout_gen;
+ char lsm_pool_name[LOV_MAXPOOLNAME + 1];
+ struct lov_oinfo *lsm_oinfo[0];
+};
+
+static inline bool lsm_is_released(struct lov_stripe_md *lsm)
+{
+ return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
+}
+
+static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
+{
+ if (!lsm)
+ return false;
+
+ if (lsm_is_released(lsm))
+ return false;
+
+ return true;
+}
+
+struct lsm_operations {
+ void (*lsm_free)(struct lov_stripe_md *);
+ void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, loff_t *,
+ loff_t *);
+ void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, loff_t *,
+ loff_t *);
+ int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
+ u16 *stripe_count);
+ int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
+ struct lov_mds_md *lmm);
+};
+
+extern const struct lsm_operations lsm_v1_ops;
+extern const struct lsm_operations lsm_v3_ops;
+
+static inline const struct lsm_operations *lsm_op_find(int magic)
+{
+ switch (magic) {
+ case LOV_MAGIC_V1:
+ return &lsm_v1_ops;
+ case LOV_MAGIC_V3:
+ return &lsm_v3_ops;
+ default:
+ CERROR("unrecognized lsm_magic %08x\n", magic);
+ return NULL;
+ }
+}
+
/* lov_do_div64(a, b) returns a % b, and a = a / b.
* The 32-bit code is LOV-specific due to knowing about stripe limits in
* order to reduce the divisor to a 32-bit number. If the divisor is
@@ -110,8 +181,6 @@ struct lov_request_set {
atomic_t set_completes;
atomic_t set_success;
atomic_t set_finish_checked;
- struct llog_cookie *set_cookies;
- int set_cookie_sent;
struct list_head set_list;
wait_queue_head_t set_waitq;
};
@@ -132,8 +201,6 @@ static inline void lov_put_reqset(struct lov_request_set *set)
(char *)((lv)->lov_tgts[index]->ltd_uuid.uuid)
/* lov_merge.c */
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
- struct lov_stripe_md *lsm, int stripeno, int *set);
int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
struct ost_lvb *lvb, __u64 *kms_place);
@@ -150,17 +217,9 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
int stripe);
/* lov_request.c */
-int lov_update_common_set(struct lov_request_set *set,
- struct lov_request *req, int rc);
int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set **reqset);
int lov_fini_getattr_set(struct lov_request_set *set);
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset);
-int lov_update_setattr_set(struct lov_request_set *set,
- struct lov_request *req, int rc);
-int lov_fini_setattr_set(struct lov_request_set *set);
int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
struct lov_request_set **reqset);
int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
@@ -186,12 +245,10 @@ int lov_del_target(struct obd_device *obd, __u32 index,
struct obd_uuid *uuidp, int gen);
/* lov_pack.c */
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm,
- struct lov_stripe_md *lsm);
-int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes);
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
- int pattern, int magic);
+ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
+ size_t buf_size);
+struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
+ size_t lmm_size);
int lov_free_memmd(struct lov_stripe_md **lsmp);
void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm);
@@ -199,7 +256,7 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm);
void lov_dump_lmm_common(int level, void *lmmp);
/* lov_ea.c */
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size);
+struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count);
void lsm_free_plain(struct lov_stripe_md *lsm);
void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm);
@@ -244,4 +301,9 @@ static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi)
return false;
}
+static inline struct obd_device *lov2obd(const struct lov_obd *lov)
+{
+ return container_of0(lov, struct obd_device, u.lov);
+}
+
#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index d10157985ed9..002326c282a7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -86,6 +86,8 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
switch (io->ci_type) {
case CIT_SETATTR: {
io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
+ io->u.ci_setattr.sa_attr_flags =
+ parent->u.ci_setattr.sa_attr_flags;
io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
io->u.ci_setattr.sa_stripe_index = stripe;
io->u.ci_setattr.sa_parent_fid =
@@ -98,6 +100,12 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
}
break;
}
+ case CIT_DATA_VERSION: {
+ io->u.ci_data_version.dv_data_version = 0;
+ io->u.ci_data_version.dv_flags =
+ parent->u.ci_data_version.dv_flags;
+ break;
+ }
case CIT_FAULT: {
struct cl_object *obj = parent->ci_obj;
loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
@@ -159,12 +167,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
sub->sub_borrowed = 1;
} else {
- void *cookie;
-
- /* obtain new environment */
- cookie = cl_env_reenter();
sub->sub_env = cl_env_get(&sub->sub_refcheck);
- cl_env_reexit(cookie);
if (IS_ERR(sub->sub_env))
result = PTR_ERR(sub->sub_env);
@@ -337,6 +340,11 @@ static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj,
lio->lis_endpos = OBD_OBJECT_EOF;
break;
+ case CIT_DATA_VERSION:
+ lio->lis_pos = 0;
+ lio->lis_endpos = OBD_OBJECT_EOF;
+ break;
+
case CIT_FAULT: {
pgoff_t index = io->u.ci_fault.ft_index;
@@ -514,6 +522,24 @@ static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
return 0;
}
+static void
+lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct cl_io *parent = lio->lis_cl.cis_io;
+ struct lov_io_sub *sub;
+
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ lov_io_end_wrapper(env, sub->sub_io);
+
+ parent->u.ci_data_version.dv_data_version +=
+ sub->sub_io->u.ci_data_version.dv_data_version;
+
+ if (!parent->ci_result)
+ parent->ci_result = sub->sub_io->ci_result;
+ }
+}
+
static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
{
cl_io_iter_fini(env, io);
@@ -555,6 +581,65 @@ static void lov_io_unlock(const struct lu_env *env,
LASSERT(rc == 0);
}
+static int lov_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_object *loo = lio->lis_object;
+ struct cl_object *obj = lov2cl(loo);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ unsigned int pps; /* pages per stripe */
+ struct lov_io_sub *sub;
+ pgoff_t ra_end;
+ loff_t suboff;
+ int stripe;
+ int rc;
+
+ stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start));
+ if (unlikely(!r0->lo_sub[stripe]))
+ return -EIO;
+
+ sub = lov_sub_get(env, lio, stripe);
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
+
+ lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff);
+ rc = cl_io_read_ahead(sub->sub_env, sub->sub_io,
+ cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
+ ra);
+ lov_sub_put(sub);
+
+ CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
+ PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
+ if (rc)
+ return rc;
+
+ /**
+ * Adjust the stripe index by layout of raid0. ra->cra_end is
+ * the maximum page index covered by an underlying DLM lock.
+ * This function converts cra_end from stripe level to file
+ * level, and make sure it's not beyond stripe boundary.
+ */
+ if (r0->lo_nr == 1) /* single stripe file */
+ return 0;
+
+ /* cra_end is stripe level, convert it into file level */
+ ra_end = ra->cra_end;
+ if (ra_end != CL_PAGE_EOF)
+ ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
+
+ pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
+
+ CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, stripe_size = %u, stripe no = %u, start index = %lu\n",
+ PFID(lu_object_fid(lov2lu(loo))), ra_end, pps,
+ loo->lo_lsm->lsm_stripe_size, stripe, start);
+
+ /* never exceed the end of the stripe */
+ ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1);
+ return 0;
+}
+
/**
* lov implementation of cl_operations::cio_submit() method. It takes a list
* of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -779,6 +864,15 @@ static const struct cl_io_operations lov_io_ops = {
.cio_start = lov_io_start,
.cio_end = lov_io_end
},
+ [CIT_DATA_VERSION] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_start,
+ .cio_end = lov_io_data_version_end,
+ },
[CIT_FAULT] = {
.cio_fini = lov_io_fini,
.cio_iter_init = lov_io_iter_init,
@@ -801,6 +895,7 @@ static const struct cl_io_operations lov_io_ops = {
.cio_fini = lov_io_fini
}
},
+ .cio_read_ahead = lov_io_read_ahead,
.cio_submit = lov_io_submit,
.cio_commit_async = lov_io_commit_async,
};
@@ -820,6 +915,13 @@ static void lov_empty_io_fini(const struct lu_env *env,
wake_up_all(&lov->lo_waitq);
}
+static int lov_empty_io_submit(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ enum cl_req_type crt, struct cl_2queue *queue)
+{
+ return -EBADF;
+}
+
static void lov_empty_impossible(const struct lu_env *env,
struct cl_io_slice *ios)
{
@@ -870,7 +972,7 @@ static const struct cl_io_operations lov_empty_io_ops = {
.cio_fini = lov_empty_io_fini
}
},
- .cio_submit = LOV_EMPTY_IMPOSSIBLE,
+ .cio_submit = lov_empty_io_submit,
.cio_commit_async = LOV_EMPTY_IMPOSSIBLE
};
@@ -909,6 +1011,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
break;
case CIT_FSYNC:
case CIT_SETATTR:
+ case CIT_DATA_VERSION:
result = 1;
break;
case CIT_WRITE:
@@ -944,6 +1047,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
LASSERTF(0, "invalid type %d\n", io->ci_type);
case CIT_MISC:
case CIT_FSYNC:
+ case CIT_DATA_VERSION:
result = 1;
break;
case CIT_SETATTR:
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 674af106b50b..391dfd207177 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -104,53 +104,3 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
lvb->lvb_ctime = current_ctime;
return rc;
}
-
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
- struct lov_stripe_md *lsm, int stripeno, int *set)
-{
- valid &= src->o_valid;
-
- if (*set) {
- tgt->o_valid &= valid;
- if (valid & OBD_MD_FLSIZE) {
- /* this handles sparse files properly */
- u64 lov_size;
-
- lov_size = lov_stripe_size(lsm, src->o_size, stripeno);
- if (lov_size > tgt->o_size)
- tgt->o_size = lov_size;
- }
- if (valid & OBD_MD_FLBLOCKS)
- tgt->o_blocks += src->o_blocks;
- if (valid & OBD_MD_FLBLKSZ)
- tgt->o_blksize += src->o_blksize;
- if (valid & OBD_MD_FLCTIME && tgt->o_ctime < src->o_ctime)
- tgt->o_ctime = src->o_ctime;
- if (valid & OBD_MD_FLMTIME && tgt->o_mtime < src->o_mtime)
- tgt->o_mtime = src->o_mtime;
- if (valid & OBD_MD_FLDATAVERSION)
- tgt->o_data_version += src->o_data_version;
-
- /* handle flags */
- if (valid & OBD_MD_FLFLAGS)
- tgt->o_flags &= src->o_flags;
- else
- tgt->o_flags = 0;
- } else {
- memcpy(tgt, src, sizeof(*tgt));
- tgt->o_oi = lsm->lsm_oi;
- tgt->o_valid = valid;
- if (valid & OBD_MD_FLSIZE)
- tgt->o_size = lov_stripe_size(lsm, src->o_size,
- stripeno);
- tgt->o_flags = 0;
- if (valid & OBD_MD_FLFLAGS)
- tgt->o_flags = src->o_flags;
- }
-
- /* data_version needs to be valid on all stripes to be correct! */
- if (!(valid & OBD_MD_FLDATAVERSION))
- tgt->o_valid &= ~OBD_MD_FLDATAVERSION;
-
- *set += 1;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index b23016f7ec26..63b064523c6a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -40,19 +40,20 @@
#define DEBUG_SUBSYSTEM S_LOV
#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/obd_support.h"
-#include "../include/lustre/lustre_ioctl.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_net.h"
#include "../include/lustre/lustre_idl.h"
+#include "../include/lustre/lustre_ioctl.h"
+
+#include "../include/cl_object.h"
#include "../include/lustre_dlm.h"
+#include "../include/lustre_fid.h"
+#include "../include/lustre_lib.h"
#include "../include/lustre_mds.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
+#include "../include/lustre_net.h"
#include "../include/lustre_param.h"
-#include "../include/cl_object.h"
-#include "../include/lustre/ll_fiemap.h"
-#include "../include/lustre_fid.h"
+#include "../include/lustre_swab.h"
+#include "../include/lprocfs_status.h"
+#include "../include/obd_class.h"
+#include "../include/obd_support.h"
#include "lov_internal.h"
@@ -826,29 +827,6 @@ out:
return rc;
}
-static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
- struct lov_obd *lov = &obd->u.lov;
-
- switch (stage) {
- case OBD_CLEANUP_EARLY: {
- int i;
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
- continue;
- obd_precleanup(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
- OBD_CLEANUP_EARLY);
- }
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
static int lov_cleanup(struct obd_device *obd)
{
struct lov_obd *lov = &obd->u.lov;
@@ -972,163 +950,6 @@ out:
return rc;
}
-#define ASSERT_LSM_MAGIC(lsmp) \
-do { \
- LASSERT((lsmp)); \
- LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \
- (lsmp)->lsm_magic == LOV_MAGIC_V3), \
- "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \
-} while (0)
-
-static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
- void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- /* don't do attribute merge if this async op failed */
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_getattr_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *rqset)
-{
- struct lov_request_set *lovset;
- struct lov_obd *lov;
- struct lov_request *req;
- int rc = 0, err;
-
- LASSERT(oinfo);
- ASSERT_LSM_MAGIC(oinfo->oi_md);
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
-
- rc = lov_prep_getattr_set(exp, oinfo, &lovset);
- if (rc)
- return rc;
-
- CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
- POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
- oinfo->oi_md->lsm_stripe_size);
-
- list_for_each_entry(req, &lovset->set_list, rq_link) {
- CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
- POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
- POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
- rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, rqset);
- if (rc) {
- CERROR("%s: getattr objid "DOSTID" subobj"
- DOSTID" on OST idx %d: rc = %d\n",
- exp->exp_obd->obd_name,
- POSTID(&oinfo->oi_oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, rc);
- goto out;
- }
- }
-
- if (!list_empty(&rqset->set_requests)) {
- LASSERT(rc == 0);
- LASSERT(!rqset->set_interpret);
- rqset->set_interpret = lov_getattr_interpret;
- rqset->set_arg = (void *)lovset;
- return rc;
- }
-out:
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_getattr_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
- void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_setattr_set(lovset);
- return rc ? rc : err;
-}
-
-/* If @oti is given, the request goes from MDS and responses from OSTs are not
- * needed. Otherwise, a client is waiting for responses.
- */
-static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
-{
- struct lov_request_set *set;
- struct lov_request *req;
- struct lov_obd *lov;
- int rc = 0;
-
- LASSERT(oinfo);
- ASSERT_LSM_MAGIC(oinfo->oi_md);
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
- LASSERT(oti);
- LASSERT(oti->oti_logcookies);
- }
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
- rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
- if (rc)
- return rc;
-
- CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
- POSTID(&oinfo->oi_md->lsm_oi),
- oinfo->oi_md->lsm_stripe_count,
- oinfo->oi_md->lsm_stripe_size);
-
- list_for_each_entry(req, &set->set_list, rq_link) {
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- oti->oti_logcookies = set->set_cookies + req->rq_stripe;
-
- CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
- POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
- POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
-
- rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, oti, rqset);
- if (rc) {
- CERROR("error: setattr objid "DOSTID" subobj"
- DOSTID" on OST idx %d: rc = %d\n",
- POSTID(&set->set_oi->oi_oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, rc);
- break;
- }
- }
-
- /* If we are not waiting for responses on async requests, return. */
- if (rc || !rqset || list_empty(&rqset->set_requests)) {
- int err;
-
- if (rc)
- atomic_set(&set->set_completes, 0);
- err = lov_fini_setattr_set(set);
- return rc ? rc : err;
- }
-
- LASSERT(!rqset->set_interpret);
- rqset->set_interpret = lov_setattr_interpret;
- rqset->set_arg = (void *)set;
-
- return 0;
-}
-
int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
@@ -1183,7 +1004,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_statfs *osfs, __u64 max_age, __u32 flags)
{
struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = { };
+ struct obd_info oinfo = {
+ .oi_osfs = osfs,
+ .oi_flags = flags,
+ };
int rc = 0;
/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
@@ -1193,8 +1017,6 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
if (!set)
return -ENOMEM;
- oinfo.oi_osfs = osfs;
- oinfo.oi_flags = flags;
rc = lov_statfs_async(exp, &oinfo, max_age, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
@@ -1235,8 +1057,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
- min((int)data->ioc_plen2,
- (int)sizeof(struct obd_uuid))))
+ min_t(unsigned long, data->ioc_plen2,
+ sizeof(struct obd_uuid))))
return -EFAULT;
memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32));
@@ -1249,8 +1071,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
if (rc)
return rc;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int)data->ioc_plen1,
- (int)sizeof(stat_buf))))
+ min_t(unsigned long, data->ioc_plen1,
+ sizeof(stat_buf))))
return -EFAULT;
break;
}
@@ -1367,8 +1189,6 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
osc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
len, karg, uarg);
- if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK)
- return err;
if (err) {
if (lov->lov_tgts[i]->ltd_active) {
CDEBUG(err == -ENOTTY ?
@@ -1391,454 +1211,35 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
return rc;
}
-#define FIEMAP_BUFFER_SIZE 4096
-
-/**
- * Non-zero fe_logical indicates that this is a continuation FIEMAP
- * call. The local end offset and the device are sent in the first
- * fm_extent. This function calculates the stripe number from the index.
- * This function returns a stripe_no on which mapping is to be restarted.
- *
- * This function returns fm_end_offset which is the in-OST offset at which
- * mapping should be restarted. If fm_end_offset=0 is returned then caller
- * will re-calculate proper offset in next stripe.
- * Note that the first extent is passed to lov_get_info via the value field.
- *
- * \param fiemap fiemap request header
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe will be returned in this
- */
-static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
- struct lov_stripe_md *lsm, u64 fm_start,
- u64 fm_end, int *start_stripe)
-{
- u64 local_end = fiemap->fm_extents[0].fe_logical;
- u64 lun_start, lun_end;
- u64 fm_end_offset;
- int stripe_no = -1, i;
-
- if (fiemap->fm_extent_count == 0 ||
- fiemap->fm_extents[0].fe_logical == 0)
- return 0;
-
- /* Find out stripe_no from ost_index saved in the fe_device */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(oinfo))
- continue;
-
- if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
- stripe_no = i;
- break;
- }
- }
- if (stripe_no == -1)
- return -EINVAL;
-
- /* If we have finished mapping on previous device, shift logical
- * offset to start of next device
- */
- if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
- &lun_start, &lun_end)) != 0 &&
- local_end < lun_end) {
- fm_end_offset = local_end;
- *start_stripe = stripe_no;
- } else {
- /* This is a special value to indicate that caller should
- * calculate offset in next stripe.
- */
- fm_end_offset = 0;
- *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
- }
-
- return fm_end_offset;
-}
-
-/**
- * We calculate on which OST the mapping will end. If the length of mapping
- * is greater than (stripe_size * stripe_count) then the last_stripe will
- * will be one just before start_stripe. Else we check if the mapping
- * intersects each OST and find last_stripe.
- * This function returns the last_stripe and also sets the stripe_count
- * over which the mapping is spread
- *
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe of the mapping
- * \param stripe_count the number of stripes across which to map is returned
- *
- * \retval last_stripe return the last stripe of the mapping
- */
-static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, u64 fm_start,
- u64 fm_end, int start_stripe,
- int *stripe_count)
-{
- int last_stripe;
- u64 obd_start, obd_end;
- int i, j;
-
- if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
- last_stripe = start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
- start_stripe - 1;
- *stripe_count = lsm->lsm_stripe_count;
- } else {
- for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
- i = (i + 1) % lsm->lsm_stripe_count, j++) {
- if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
- &obd_start, &obd_end)) == 0)
- break;
- }
- *stripe_count = j;
- last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
- }
-
- return last_stripe;
-}
-
-/**
- * Set fe_device and copy extents from local buffer into main return buffer.
- *
- * \param fiemap fiemap request header
- * \param lcl_fm_ext array of local fiemap extents to be copied
- * \param ost_index OST index to be written into the fm_device field for each
- extent
- * \param ext_count number of extents to be copied
- * \param current_extent where to start copying in main extent array
- */
-static void fiemap_prepare_and_copy_exts(struct ll_user_fiemap *fiemap,
- struct ll_fiemap_extent *lcl_fm_ext,
- int ost_index, unsigned int ext_count,
- int current_extent)
-{
- char *to;
- int ext;
-
- for (ext = 0; ext < ext_count; ext++) {
- lcl_fm_ext[ext].fe_device = ost_index;
- lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
- }
-
- /* Copy fm_extent's from fm_local to return buffer */
- to = (char *)fiemap + fiemap_count_to_size(current_extent);
- memcpy(to, lcl_fm_ext, ext_count * sizeof(struct ll_fiemap_extent));
-}
-
-/**
- * Break down the FIEMAP request and send appropriate calls to individual OSTs.
- * This also handles the restarting of FIEMAP calls in case mapping overflows
- * the available number of extents in single call.
- */
-static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
- __u32 *vallen, void *val, struct lov_stripe_md *lsm)
-{
- struct ll_fiemap_info_key *fm_key = key;
- struct ll_user_fiemap *fiemap = val;
- struct ll_user_fiemap *fm_local = NULL;
- struct ll_fiemap_extent *lcl_fm_ext;
- int count_local;
- unsigned int get_num_extents = 0;
- int ost_index = 0, actual_start_stripe, start_stripe;
- u64 fm_start, fm_end, fm_length, fm_end_offset;
- u64 curr_loc;
- int current_extent = 0, rc = 0, i;
- /* Whether have we collected enough extents */
- bool enough = false;
- int ost_eof = 0; /* EOF for object */
- int ost_done = 0; /* done with required mapping for this OST? */
- int last_stripe;
- int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count;
- unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
-
- if (!lsm_has_objects(lsm)) {
- if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
- fm_key->oa.o_size)) {
- /*
- * released file, return a minimal FIEMAP if
- * request fits in file-size.
- */
- fiemap->fm_mapped_extents = 1;
- fiemap->fm_extents[0].fe_logical =
- fm_key->fiemap.fm_start;
- if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
- fm_key->oa.o_size) {
- fiemap->fm_extents[0].fe_length =
- fm_key->fiemap.fm_length;
- } else {
- fiemap->fm_extents[0].fe_length =
- fm_key->oa.o_size - fm_key->fiemap.fm_start;
- fiemap->fm_extents[0].fe_flags |=
- (FIEMAP_EXTENT_UNKNOWN |
- FIEMAP_EXTENT_LAST);
- }
- }
- rc = 0;
- goto out;
- }
-
- if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size)
- buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
-
- fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
- if (!fm_local) {
- rc = -ENOMEM;
- goto out;
- }
- lcl_fm_ext = &fm_local->fm_extents[0];
-
- count_local = fiemap_size_to_count(buffer_size);
-
- memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
- fm_start = fiemap->fm_start;
- fm_length = fiemap->fm_length;
- /* Calculate start stripe, last stripe and length of mapping */
- start_stripe = lov_stripe_number(lsm, fm_start);
- actual_start_stripe = start_stripe;
- fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
- fm_start + fm_length - 1);
- /* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
- if (fm_end > fm_key->oa.o_size)
- fm_end = fm_key->oa.o_size;
-
- last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
- actual_start_stripe,
- &stripe_count);
-
- fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start,
- fm_end, &start_stripe);
- if (fm_end_offset == -EINVAL) {
- rc = -EINVAL;
- goto out;
- }
-
- if (fiemap_count_to_size(fiemap->fm_extent_count) > *vallen)
- fiemap->fm_extent_count = fiemap_size_to_count(*vallen);
- if (fiemap->fm_extent_count == 0) {
- get_num_extents = 1;
- count_local = 0;
- }
- /* Check each stripe */
- for (cur_stripe = start_stripe, i = 0; i < stripe_count;
- i++, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
- u64 req_fm_len; /* Stores length of required mapping */
- u64 len_mapped_single_call;
- u64 lun_start, lun_end, obd_object_end;
- unsigned int ext_count;
-
- cur_stripe_wrap = cur_stripe;
-
- /* Find out range of mapping on this stripe */
- if ((lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
- &lun_start, &obd_object_end)) == 0)
- continue;
-
- if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
- rc = -EIO;
- goto out;
- }
-
- /* If this is a continuation FIEMAP call and we are on
- * starting stripe then lun_start needs to be set to
- * fm_end_offset
- */
- if (fm_end_offset != 0 && cur_stripe == start_stripe)
- lun_start = fm_end_offset;
-
- if (fm_length != ~0ULL) {
- /* Handle fm_start + fm_length overflow */
- if (fm_start + fm_length < fm_start)
- fm_length = ~0ULL - fm_start;
- lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
- cur_stripe);
- } else {
- lun_end = ~0ULL;
- }
-
- if (lun_start == lun_end)
- continue;
-
- req_fm_len = obd_object_end - lun_start;
- fm_local->fm_length = 0;
- len_mapped_single_call = 0;
-
- /* If the output buffer is very large and the objects have many
- * extents we may need to loop on a single OST repeatedly
- */
- ost_eof = 0;
- ost_done = 0;
- do {
- if (get_num_extents == 0) {
- /* Don't get too many extents. */
- if (current_extent + count_local >
- fiemap->fm_extent_count)
- count_local = fiemap->fm_extent_count -
- current_extent;
- }
-
- lun_start += len_mapped_single_call;
- fm_local->fm_length = req_fm_len - len_mapped_single_call;
- req_fm_len = fm_local->fm_length;
- fm_local->fm_extent_count = enough ? 1 : count_local;
- fm_local->fm_mapped_extents = 0;
- fm_local->fm_flags = fiemap->fm_flags;
-
- fm_key->oa.o_oi = lsm->lsm_oinfo[cur_stripe]->loi_oi;
- ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
-
- if (ost_index < 0 ||
- ost_index >= lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out;
- }
-
- /* If OST is inactive, return extent with UNKNOWN flag */
- if (!lov->lov_tgts[ost_index]->ltd_active) {
- fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
- fm_local->fm_mapped_extents = 1;
-
- lcl_fm_ext[0].fe_logical = lun_start;
- lcl_fm_ext[0].fe_length = obd_object_end -
- lun_start;
- lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
-
- goto inactive_tgt;
- }
-
- fm_local->fm_start = lun_start;
- fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
- memcpy(&fm_key->fiemap, fm_local, sizeof(*fm_local));
- *vallen = fiemap_count_to_size(fm_local->fm_extent_count);
- rc = obd_get_info(NULL,
- lov->lov_tgts[ost_index]->ltd_exp,
- keylen, key, vallen, fm_local, lsm);
- if (rc != 0)
- goto out;
-
-inactive_tgt:
- ext_count = fm_local->fm_mapped_extents;
- if (ext_count == 0) {
- ost_done = 1;
- /* If last stripe has hole at the end,
- * then we need to return
- */
- if (cur_stripe_wrap == last_stripe) {
- fiemap->fm_mapped_extents = 0;
- goto finish;
- }
- break;
- } else if (enough) {
- /*
- * We've collected enough extents and there are
- * more extents after it.
- */
- goto finish;
- }
-
- /* If we just need num of extents then go to next device */
- if (get_num_extents) {
- current_extent += ext_count;
- break;
- }
-
- len_mapped_single_call =
- lcl_fm_ext[ext_count - 1].fe_logical -
- lun_start + lcl_fm_ext[ext_count - 1].fe_length;
-
- /* Have we finished mapping on this device? */
- if (req_fm_len <= len_mapped_single_call)
- ost_done = 1;
-
- /* Clear the EXTENT_LAST flag which can be present on
- * last extent
- */
- if (lcl_fm_ext[ext_count - 1].fe_flags &
- FIEMAP_EXTENT_LAST)
- lcl_fm_ext[ext_count - 1].fe_flags &=
- ~FIEMAP_EXTENT_LAST;
-
- curr_loc = lov_stripe_size(lsm,
- lcl_fm_ext[ext_count - 1].fe_logical +
- lcl_fm_ext[ext_count - 1].fe_length,
- cur_stripe);
- if (curr_loc >= fm_key->oa.o_size)
- ost_eof = 1;
-
- fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
- ost_index, ext_count,
- current_extent);
-
- current_extent += ext_count;
-
- /* Ran out of available extents? */
- if (current_extent >= fiemap->fm_extent_count)
- enough = true;
- } while (ost_done == 0 && ost_eof == 0);
-
- if (cur_stripe_wrap == last_stripe)
- goto finish;
- }
-
-finish:
- /* Indicate that we are returning device offsets unless file just has
- * single stripe
- */
- if (lsm->lsm_stripe_count > 1)
- fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
-
- if (get_num_extents)
- goto skip_last_device_calc;
-
- /* Check if we have reached the last stripe and whether mapping for that
- * stripe is done.
- */
- if (cur_stripe_wrap == last_stripe) {
- if (ost_done || ost_eof)
- fiemap->fm_extents[current_extent - 1].fe_flags |=
- FIEMAP_EXTENT_LAST;
- }
-
-skip_last_device_calc:
- fiemap->fm_mapped_extents = current_extent;
-
-out:
- kvfree(fm_local);
- return rc;
-}
-
static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
struct obd_device *obddev = class_exp2obd(exp);
struct lov_obd *lov = &obddev->u.lov;
- int rc;
+ struct lov_desc *ld = &lov->desc;
+ int rc = 0;
if (!vallen || !val)
return -EFAULT;
obd_getref(obddev);
- if (KEY_IS(KEY_LOVDESC)) {
- struct lov_desc *desc_ret = val;
- *desc_ret = lov->desc;
+ if (KEY_IS(KEY_MAX_EASIZE)) {
+ u32 max_stripe_count = min_t(u32, ld->ld_active_tgt_count,
+ LOV_MAX_STRIPE_COUNT);
- rc = 0;
- goto out;
- } else if (KEY_IS(KEY_FIEMAP)) {
- rc = lov_fiemap(lov, keylen, key, vallen, val, lsm);
- goto out;
+ *((u32 *)val) = lov_mds_md_size(max_stripe_count, LOV_MAGIC_V3);
+ } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
+ u32 def_stripe_count = min_t(u32, ld->ld_default_stripe_count,
+ LOV_MAX_STRIPE_COUNT);
+
+ *((u32 *)val) = lov_mds_md_size(def_stripe_count, LOV_MAGIC_V3);
} else if (KEY_IS(KEY_TGT_COUNT)) {
*((int *)val) = lov->desc.ld_tgt_count;
- rc = 0;
- goto out;
+ } else {
+ rc = -EINVAL;
}
- rc = -EINVAL;
-
-out:
obd_putref(obddev);
return rc;
}
@@ -1926,12 +1327,8 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
__u64 bhardlimit = 0;
int i, rc = 0;
- if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON &&
- oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF &&
- oqctl->qc_cmd != Q_GETOQUOTA &&
- oqctl->qc_cmd != Q_INITQUOTA &&
- oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
- oqctl->qc_cmd != Q_FINVALIDATE) {
+ if (oqctl->qc_cmd != Q_GETOQUOTA &&
+ oqctl->qc_cmd != LUSTRE_Q_SETQUOTA) {
CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd);
return -EFAULT;
}
@@ -1978,63 +1375,15 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
return rc;
}
-static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct lov_obd *lov = &obd->u.lov;
- int i, rc = 0;
-
- obd_getref(obd);
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i])
- continue;
-
- /* Skip quota check on the administratively disabled OSTs. */
- if (!lov->lov_tgts[i]->ltd_activate) {
- CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n",
- i);
- continue;
- }
-
- if (!lov->lov_tgts[i]->ltd_active) {
- CERROR("lov idx %d inactive\n", i);
- rc = -EIO;
- goto out;
- }
- }
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- int err;
-
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_activate)
- continue;
-
- err = obd_quotacheck(lov->lov_tgts[i]->ltd_exp, oqctl);
- if (err && !rc)
- rc = err;
- }
-
-out:
- obd_putref(obd);
-
- return rc;
-}
-
static struct obd_ops lov_obd_ops = {
.owner = THIS_MODULE,
.setup = lov_setup,
- .precleanup = lov_precleanup,
.cleanup = lov_cleanup,
/*.process_config = lov_process_config,*/
.connect = lov_connect,
.disconnect = lov_disconnect,
.statfs = lov_statfs,
.statfs_async = lov_statfs_async,
- .packmd = lov_packmd,
- .unpackmd = lov_unpackmd,
- .getattr_async = lov_getattr_async,
- .setattr_async = lov_setattr_async,
.iocontrol = lov_iocontrol,
.get_info = lov_get_info,
.set_info_async = lov_set_info_async,
@@ -2046,7 +1395,6 @@ static struct obd_ops lov_obd_ops = {
.getref = lov_getref,
.putref = lov_putref,
.quotactl = lov_quotactl,
- .quotacheck = lov_quotacheck,
};
struct kmem_cache *lov_oinfo_slab;
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 52f736338887..76d4256fa828 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -39,6 +39,11 @@
#include "lov_cl_internal.h"
+static inline struct lov_device *lov_object_dev(struct lov_object *obj)
+{
+ return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
+}
+
/** \addtogroup lov
* @{
*/
@@ -51,7 +56,7 @@
struct lov_layout_operations {
int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
- struct lov_object *lov,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
union lov_layout_state *state);
int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
@@ -75,12 +80,11 @@ struct lov_layout_operations {
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
-void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
+static void lov_lsm_put(struct lov_stripe_md *lsm)
{
if (lsm)
lov_free_memmd(&lsm);
}
-EXPORT_SYMBOL(lov_lsm_put);
/*****************************************************************************
*
@@ -97,17 +101,17 @@ static void lov_install_empty(const struct lu_env *env,
*/
}
-static int lov_init_empty(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
+static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
- union lov_layout_state *state)
+ union lov_layout_state *state)
{
return 0;
}
static void lov_install_raid0(const struct lu_env *env,
struct lov_object *lov,
- union lov_layout_state *state)
+ union lov_layout_state *state)
{
}
@@ -212,8 +216,8 @@ static int lov_page_slice_fixup(struct lov_object *lov,
return cl_object_header(stripe)->coh_page_bufsize;
}
-static int lov_init_raid0(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
+static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
union lov_layout_state *state)
{
@@ -223,7 +227,6 @@ static int lov_init_raid0(const struct lu_env *env,
struct cl_object *stripe;
struct lov_thread_info *lti = lov_env_info(env);
struct cl_object_conf *subconf = &lti->lti_stripe_conf;
- struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
struct lu_fid *ofid = &lti->lti_fid;
struct lov_layout_raid0 *r0 = &state->raid0;
@@ -298,13 +301,11 @@ out:
return result;
}
-static int lov_init_released(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
+static int lov_init_released(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf,
union lov_layout_state *state)
{
- struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
-
LASSERT(lsm);
LASSERT(lsm_is_released(lsm));
LASSERT(!lov->lo_lsm);
@@ -313,6 +314,40 @@ static int lov_init_released(const struct lu_env *env,
return 0;
}
+static struct cl_object *lov_find_subobj(const struct lu_env *env,
+ struct lov_object *lov,
+ struct lov_stripe_md *lsm,
+ int stripe_idx)
+{
+ struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
+ struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx];
+ struct lov_thread_info *lti = lov_env_info(env);
+ struct lu_fid *ofid = &lti->lti_fid;
+ struct cl_device *subdev;
+ struct cl_object *result;
+ int ost_idx;
+ int rc;
+
+ if (lov->lo_type != LLT_RAID0) {
+ result = NULL;
+ goto out;
+ }
+
+ ost_idx = oinfo->loi_ost_idx;
+ rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
+ if (rc) {
+ result = NULL;
+ goto out;
+ }
+
+ subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
+ result = lov_sub_find(env, subdev, ofid, NULL);
+out:
+ if (!result)
+ result = ERR_PTR(-EINVAL);
+ return result;
+}
+
static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state)
{
@@ -687,31 +722,24 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
}
static int lov_layout_change(const struct lu_env *unused,
- struct lov_object *lov,
+ struct lov_object *lov, struct lov_stripe_md *lsm,
const struct cl_object_conf *conf)
{
- int result;
- enum lov_layout_type llt = LLT_EMPTY;
+ enum lov_layout_type llt = lov_type(lsm);
union lov_layout_state *state = &lov->u;
const struct lov_layout_operations *old_ops;
const struct lov_layout_operations *new_ops;
-
- void *cookie;
struct lu_env *env;
int refcheck;
+ int rc;
LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
- if (conf->u.coc_md)
- llt = lov_type(conf->u.coc_md->lsm);
- LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
-
- cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- cl_env_reexit(cookie);
+ if (IS_ERR(env))
return PTR_ERR(env);
- }
+
+ LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
CDEBUG(D_INODE, DFID" from %s to %s\n",
PFID(lu_object_fid(lov2lu(lov))),
@@ -720,38 +748,37 @@ static int lov_layout_change(const struct lu_env *unused,
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
- result = cl_object_prune(env, &lov->lo_cl);
- if (result != 0)
+ rc = cl_object_prune(env, &lov->lo_cl);
+ if (rc)
+ goto out;
+
+ rc = old_ops->llo_delete(env, lov, &lov->u);
+ if (rc)
goto out;
- result = old_ops->llo_delete(env, lov, &lov->u);
- if (result == 0) {
- old_ops->llo_fini(env, lov, &lov->u);
+ old_ops->llo_fini(env, lov, &lov->u);
- LASSERT(atomic_read(&lov->lo_active_ios) == 0);
+ LASSERT(!atomic_read(&lov->lo_active_ios));
- lov->lo_type = LLT_EMPTY;
- /* page bufsize fixup */
- cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
+ lov->lo_type = LLT_EMPTY;
+
+ /* page bufsize fixup */
+ cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
lov_page_slice_fixup(lov, NULL);
- result = new_ops->llo_init(env,
- lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
- lov, conf, state);
- if (result == 0) {
- new_ops->llo_install(env, lov, state);
- lov->lo_type = llt;
- } else {
- new_ops->llo_delete(env, lov, state);
- new_ops->llo_fini(env, lov, state);
- /* this file becomes an EMPTY file. */
- }
+ rc = new_ops->llo_init(env, lov_object_dev(lov), lov, lsm, conf, state);
+ if (rc) {
+ new_ops->llo_delete(env, lov, state);
+ new_ops->llo_fini(env, lov, state);
+ /* this file becomes an EMPTY file. */
+ goto out;
}
+ new_ops->llo_install(env, lov, state);
+ lov->lo_type = llt;
out:
cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
- return result;
+ return rc;
}
/*****************************************************************************
@@ -762,26 +789,38 @@ out:
int lov_object_init(const struct lu_env *env, struct lu_object *obj,
const struct lu_object_conf *conf)
{
- struct lov_device *dev = lu2lov_dev(obj->lo_dev);
struct lov_object *lov = lu2lov(obj);
+ struct lov_device *dev = lov_object_dev(lov);
const struct cl_object_conf *cconf = lu2cl_conf(conf);
union lov_layout_state *set = &lov->u;
const struct lov_layout_operations *ops;
- int result;
+ struct lov_stripe_md *lsm = NULL;
+ int rc;
init_rwsem(&lov->lo_type_guard);
atomic_set(&lov->lo_active_ios, 0);
init_waitqueue_head(&lov->lo_waitq);
-
cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
+ lov->lo_type = LLT_EMPTY;
+ if (cconf->u.coc_layout.lb_buf) {
+ lsm = lov_unpackmd(dev->ld_lov,
+ cconf->u.coc_layout.lb_buf,
+ cconf->u.coc_layout.lb_len);
+ if (IS_ERR(lsm))
+ return PTR_ERR(lsm);
+ }
+
/* no locking is necessary, as object is being created */
- lov->lo_type = lov_type(cconf->u.coc_md->lsm);
+ lov->lo_type = lov_type(lsm);
ops = &lov_dispatch[lov->lo_type];
- result = ops->llo_init(env, dev, lov, cconf, set);
- if (result == 0)
+ rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
+ if (!rc)
ops->llo_install(env, lov, set);
- return result;
+
+ lov_lsm_put(lsm);
+
+ return rc;
}
static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
@@ -791,6 +830,15 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
struct lov_object *lov = cl2lov(obj);
int result = 0;
+ if (conf->coc_opc == OBJECT_CONF_SET &&
+ conf->u.coc_layout.lb_buf) {
+ lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
+ conf->u.coc_layout.lb_buf,
+ conf->u.coc_layout.lb_len);
+ if (IS_ERR(lsm))
+ return PTR_ERR(lsm);
+ }
+
lov_conf_lock(lov);
if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
lov->lo_layout_invalid = true;
@@ -810,8 +858,6 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
LASSERT(conf->coc_opc == OBJECT_CONF_SET);
- if (conf->u.coc_md)
- lsm = conf->u.coc_md->lsm;
if ((!lsm && !lov->lo_lsm) ||
((lsm && lov->lo_lsm) &&
(lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
@@ -829,11 +875,12 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
goto out;
}
- result = lov_layout_change(env, lov, conf);
+ result = lov_layout_change(env, lov, lsm, conf);
lov->lo_layout_invalid = result != 0;
out:
lov_conf_unlock(lov);
+ lov_lsm_put(lsm);
CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
return result;
@@ -911,6 +958,473 @@ int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
io);
}
+/**
+ * We calculate on which OST the mapping will end. If the length of mapping
+ * is greater than (stripe_size * stripe_count) then the last_stripe will
+ * will be one just before start_stripe. Else we check if the mapping
+ * intersects each OST and find last_stripe.
+ * This function returns the last_stripe and also sets the stripe_count
+ * over which the mapping is spread
+ *
+ * \param lsm [in] striping information for the file
+ * \param fm_start [in] logical start of mapping
+ * \param fm_end [in] logical end of mapping
+ * \param start_stripe [in] starting stripe of the mapping
+ * \param stripe_count [out] the number of stripes across which to map is
+ * returned
+ *
+ * \retval last_stripe return the last stripe of the mapping
+ */
+static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm,
+ loff_t fm_start, loff_t fm_end,
+ int start_stripe, int *stripe_count)
+{
+ int last_stripe;
+ loff_t obd_start;
+ loff_t obd_end;
+ int i, j;
+
+ if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
+ last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
+ start_stripe - 1);
+ *stripe_count = lsm->lsm_stripe_count;
+ } else {
+ for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
+ i = (i + 1) % lsm->lsm_stripe_count, j++) {
+ if (!(lov_stripe_intersects(lsm, i, fm_start, fm_end,
+ &obd_start, &obd_end)))
+ break;
+ }
+ *stripe_count = j;
+ last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
+ }
+
+ return last_stripe;
+}
+
+/**
+ * Set fe_device and copy extents from local buffer into main return buffer.
+ *
+ * \param fiemap [out] fiemap to hold all extents
+ * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer
+ * \param ost_index [in] OST index to be written into the fm_device
+ * field for each extent
+ * \param ext_count [in] number of extents to be copied
+ * \param current_extent [in] where to start copying in the extent array
+ */
+static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
+ struct fiemap_extent *lcl_fm_ext,
+ int ost_index, unsigned int ext_count,
+ int current_extent)
+{
+ unsigned int ext;
+ char *to;
+
+ for (ext = 0; ext < ext_count; ext++) {
+ lcl_fm_ext[ext].fe_device = ost_index;
+ lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
+ }
+
+ /* Copy fm_extent's from fm_local to return buffer */
+ to = (char *)fiemap + fiemap_count_to_size(current_extent);
+ memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
+}
+
+#define FIEMAP_BUFFER_SIZE 4096
+
+/**
+ * Non-zero fe_logical indicates that this is a continuation FIEMAP
+ * call. The local end offset and the device are sent in the first
+ * fm_extent. This function calculates the stripe number from the index.
+ * This function returns a stripe_no on which mapping is to be restarted.
+ *
+ * This function returns fm_end_offset which is the in-OST offset at which
+ * mapping should be restarted. If fm_end_offset=0 is returned then caller
+ * will re-calculate proper offset in next stripe.
+ * Note that the first extent is passed to lov_get_info via the value field.
+ *
+ * \param fiemap [in] fiemap request header
+ * \param lsm [in] striping information for the file
+ * \param fm_start [in] logical start of mapping
+ * \param fm_end [in] logical end of mapping
+ * \param start_stripe [out] starting stripe will be returned in this
+ */
+static loff_t fiemap_calc_fm_end_offset(struct fiemap *fiemap,
+ struct lov_stripe_md *lsm,
+ loff_t fm_start, loff_t fm_end,
+ int *start_stripe)
+{
+ loff_t local_end = fiemap->fm_extents[0].fe_logical;
+ loff_t lun_start, lun_end;
+ loff_t fm_end_offset;
+ int stripe_no = -1;
+ int i;
+
+ if (!fiemap->fm_extent_count || !fiemap->fm_extents[0].fe_logical)
+ return 0;
+
+ /* Find out stripe_no from ost_index saved in the fe_device */
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
+
+ if (lov_oinfo_is_dummy(oinfo))
+ continue;
+
+ if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
+ stripe_no = i;
+ break;
+ }
+ }
+
+ if (stripe_no == -1)
+ return -EINVAL;
+
+ /*
+ * If we have finished mapping on previous device, shift logical
+ * offset to start of next device
+ */
+ if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
+ &lun_start, &lun_end) &&
+ local_end < lun_end) {
+ fm_end_offset = local_end;
+ *start_stripe = stripe_no;
+ } else {
+ /* This is a special value to indicate that caller should
+ * calculate offset in next stripe.
+ */
+ fm_end_offset = 0;
+ *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
+ }
+
+ return fm_end_offset;
+}
+
+/**
+ * Break down the FIEMAP request and send appropriate calls to individual OSTs.
+ * This also handles the restarting of FIEMAP calls in case mapping overflows
+ * the available number of extents in single call.
+ *
+ * \param env [in] lustre environment
+ * \param obj [in] file object
+ * \param fmkey [in] fiemap request header and other info
+ * \param fiemap [out] fiemap buffer holding retrived map extents
+ * \param buflen [in/out] max buffer length of @fiemap, when iterate
+ * each OST, it is used to limit max map needed
+ * \retval 0 success
+ * \retval < 0 error
+ */
+static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey,
+ struct fiemap *fiemap, size_t *buflen)
+{
+ struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
+ unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
+ struct fiemap_extent *lcl_fm_ext;
+ struct cl_object *subobj = NULL;
+ struct fiemap *fm_local = NULL;
+ struct lov_stripe_md *lsm;
+ loff_t fm_start;
+ loff_t fm_end;
+ loff_t fm_length;
+ loff_t fm_end_offset;
+ int count_local;
+ int ost_index = 0;
+ int start_stripe;
+ int current_extent = 0;
+ int rc = 0;
+ int last_stripe;
+ int cur_stripe = 0;
+ int cur_stripe_wrap = 0;
+ int stripe_count;
+ /* Whether have we collected enough extents */
+ bool enough = false;
+ /* EOF for object */
+ bool ost_eof = false;
+ /* done with required mapping for this OST? */
+ bool ost_done = false;
+
+ lsm = lov_lsm_addref(cl2lov(obj));
+ if (!lsm)
+ return -ENODATA;
+
+ /**
+ * If the stripe_count > 1 and the application does not understand
+ * DEVICE_ORDER flag, it cannot interpret the extents correctly.
+ */
+ if (lsm->lsm_stripe_count > 1 &&
+ !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ if (lsm_is_released(lsm)) {
+ if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
+ /**
+ * released file, return a minimal FIEMAP if
+ * request fits in file-size.
+ */
+ fiemap->fm_mapped_extents = 1;
+ fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
+ if (fiemap->fm_start + fiemap->fm_length <
+ fmkey->lfik_oa.o_size)
+ fiemap->fm_extents[0].fe_length =
+ fiemap->fm_length;
+ else
+ fiemap->fm_extents[0].fe_length =
+ fmkey->lfik_oa.o_size -
+ fiemap->fm_start;
+ fiemap->fm_extents[0].fe_flags |=
+ FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
+ }
+ rc = 0;
+ goto out;
+ }
+
+ if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
+ buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
+
+ fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
+ if (!fm_local) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ lcl_fm_ext = &fm_local->fm_extents[0];
+ count_local = fiemap_size_to_count(buffer_size);
+
+ fm_start = fiemap->fm_start;
+ fm_length = fiemap->fm_length;
+ /* Calculate start stripe, last stripe and length of mapping */
+ start_stripe = lov_stripe_number(lsm, fm_start);
+ fm_end = (fm_length == ~0ULL) ? fmkey->lfik_oa.o_size :
+ fm_start + fm_length - 1;
+ /* If fm_length != ~0ULL but fm_start_fm_length-1 exceeds file size */
+ if (fm_end > fmkey->lfik_oa.o_size)
+ fm_end = fmkey->lfik_oa.o_size;
+
+ last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
+ start_stripe, &stripe_count);
+ fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end,
+ &start_stripe);
+ if (fm_end_offset == -EINVAL) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /**
+ * Requested extent count exceeds the fiemap buffer size, shrink our
+ * ambition.
+ */
+ if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
+ fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
+ if (!fiemap->fm_extent_count)
+ count_local = 0;
+
+ /* Check each stripe */
+ for (cur_stripe = start_stripe; stripe_count > 0;
+ --stripe_count,
+ cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
+ loff_t req_fm_len; /* Stores length of required mapping */
+ loff_t len_mapped_single_call;
+ loff_t lun_start;
+ loff_t lun_end;
+ loff_t obd_object_end;
+ unsigned int ext_count;
+
+ cur_stripe_wrap = cur_stripe;
+
+ /* Find out range of mapping on this stripe */
+ if (!(lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
+ &lun_start, &obd_object_end)))
+ continue;
+
+ if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /*
+ * If this is a continuation FIEMAP call and we are on
+ * starting stripe then lun_start needs to be set to
+ * fm_end_offset
+ */
+ if (fm_end_offset && cur_stripe == start_stripe)
+ lun_start = fm_end_offset;
+
+ if (fm_length != ~0ULL) {
+ /* Handle fm_start + fm_length overflow */
+ if (fm_start + fm_length < fm_start)
+ fm_length = ~0ULL - fm_start;
+ lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
+ cur_stripe);
+ } else {
+ lun_end = ~0ULL;
+ }
+
+ if (lun_start == lun_end)
+ continue;
+
+ req_fm_len = obd_object_end - lun_start;
+ fm_local->fm_length = 0;
+ len_mapped_single_call = 0;
+
+ /* find lobsub object */
+ subobj = lov_find_subobj(env, cl2lov(obj), lsm,
+ cur_stripe);
+ if (IS_ERR(subobj)) {
+ rc = PTR_ERR(subobj);
+ goto out;
+ }
+ /*
+ * If the output buffer is very large and the objects have many
+ * extents we may need to loop on a single OST repeatedly
+ */
+ ost_eof = false;
+ ost_done = false;
+ do {
+ if (fiemap->fm_extent_count > 0) {
+ /* Don't get too many extents. */
+ if (current_extent + count_local >
+ fiemap->fm_extent_count)
+ count_local = fiemap->fm_extent_count -
+ current_extent;
+ }
+
+ lun_start += len_mapped_single_call;
+ fm_local->fm_length = req_fm_len -
+ len_mapped_single_call;
+ req_fm_len = fm_local->fm_length;
+ fm_local->fm_extent_count = enough ? 1 : count_local;
+ fm_local->fm_mapped_extents = 0;
+ fm_local->fm_flags = fiemap->fm_flags;
+
+ ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
+
+ if (ost_index < 0 ||
+ ost_index >= lov->desc.ld_tgt_count) {
+ rc = -EINVAL;
+ goto obj_put;
+ }
+ /*
+ * If OST is inactive, return extent with UNKNOWN
+ * flag.
+ */
+ if (!lov->lov_tgts[ost_index]->ltd_active) {
+ fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
+ fm_local->fm_mapped_extents = 1;
+
+ lcl_fm_ext[0].fe_logical = lun_start;
+ lcl_fm_ext[0].fe_length = obd_object_end -
+ lun_start;
+ lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
+
+ goto inactive_tgt;
+ }
+
+ fm_local->fm_start = lun_start;
+ fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
+ memcpy(&fmkey->lfik_fiemap, fm_local, sizeof(*fm_local));
+ *buflen = fiemap_count_to_size(fm_local->fm_extent_count);
+
+ rc = cl_object_fiemap(env, subobj, fmkey, fm_local,
+ buflen);
+ if (rc)
+ goto obj_put;
+inactive_tgt:
+ ext_count = fm_local->fm_mapped_extents;
+ if (!ext_count) {
+ ost_done = true;
+ /*
+ * If last stripe has hold at the end,
+ * we need to return
+ */
+ if (cur_stripe_wrap == last_stripe) {
+ fiemap->fm_mapped_extents = 0;
+ goto finish;
+ }
+ break;
+ } else if (enough) {
+ /*
+ * We've collected enough extents and there are
+ * more extents after it.
+ */
+ goto finish;
+ }
+
+ /* If we just need num of extents, got to next device */
+ if (!fiemap->fm_extent_count) {
+ current_extent += ext_count;
+ break;
+ }
+
+ /* prepare to copy retrived map extents */
+ len_mapped_single_call =
+ lcl_fm_ext[ext_count - 1].fe_logical -
+ lun_start + lcl_fm_ext[ext_count - 1].fe_length;
+
+ /* Have we finished mapping on this device? */
+ if (req_fm_len <= len_mapped_single_call)
+ ost_done = true;
+
+ /*
+ * Clear the EXTENT_LAST flag which can be present on
+ * the last extent
+ */
+ if (lcl_fm_ext[ext_count - 1].fe_flags &
+ FIEMAP_EXTENT_LAST)
+ lcl_fm_ext[ext_count - 1].fe_flags &=
+ ~FIEMAP_EXTENT_LAST;
+
+ if (lov_stripe_size(lsm,
+ lcl_fm_ext[ext_count - 1].fe_logical +
+ lcl_fm_ext[ext_count - 1].fe_length,
+ cur_stripe) >= fmkey->lfik_oa.o_size)
+ ost_eof = true;
+
+ fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
+ ost_index, ext_count,
+ current_extent);
+ current_extent += ext_count;
+
+ /* Ran out of available extents? */
+ if (current_extent >= fiemap->fm_extent_count)
+ enough = true;
+ } while (!ost_done && !ost_eof);
+
+ cl_object_put(env, subobj);
+ subobj = NULL;
+
+ if (cur_stripe_wrap == last_stripe)
+ goto finish;
+ } /* for each stripe */
+finish:
+ /*
+ * Indicate that we are returning device offsets unless file just has
+ * single stripe
+ */
+ if (lsm->lsm_stripe_count > 1)
+ fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
+
+ if (!fiemap->fm_extent_count)
+ goto skip_last_device_calc;
+
+ /*
+ * Check if we have reached the last stripe and whether mapping for that
+ * stripe is done.
+ */
+ if ((cur_stripe_wrap == last_stripe) && (ost_done || ost_eof))
+ fiemap->fm_extents[current_extent - 1].fe_flags |=
+ FIEMAP_EXTENT_LAST;
+skip_last_device_calc:
+ fiemap->fm_mapped_extents = current_extent;
+obj_put:
+ if (subobj)
+ cl_object_put(env, subobj);
+out:
+ kvfree(fm_local);
+ lov_lsm_put(lsm);
+ return rc;
+}
+
static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
struct lov_user_md __user *lum)
{
@@ -923,10 +1437,53 @@ static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
return -ENODATA;
rc = lov_getstripe(cl2lov(obj), lsm, lum);
- lov_lsm_put(obj, lsm);
+ lov_lsm_put(lsm);
return rc;
}
+static int lov_object_layout_get(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_layout *cl)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_stripe_md *lsm = lov_lsm_addref(lov);
+ struct lu_buf *buf = &cl->cl_buf;
+ ssize_t rc;
+
+ if (!lsm) {
+ cl->cl_size = 0;
+ cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
+ cl->cl_is_released = false;
+
+ return 0;
+ }
+
+ cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
+ cl->cl_layout_gen = lsm->lsm_layout_gen;
+ cl->cl_is_released = lsm_is_released(lsm);
+
+ rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
+ lov_lsm_put(lsm);
+
+ return rc < 0 ? rc : 0;
+}
+
+static loff_t lov_object_maxbytes(struct cl_object *obj)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_stripe_md *lsm = lov_lsm_addref(lov);
+ loff_t maxbytes;
+
+ if (!lsm)
+ return LLONG_MAX;
+
+ maxbytes = lsm->lsm_maxbytes;
+
+ lov_lsm_put(lsm);
+
+ return maxbytes;
+}
+
static const struct cl_object_operations lov_ops = {
.coo_page_init = lov_page_init,
.coo_lock_init = lov_lock_init,
@@ -934,7 +1491,10 @@ static const struct cl_object_operations lov_ops = {
.coo_attr_get = lov_attr_get,
.coo_attr_update = lov_attr_update,
.coo_conf_set = lov_conf_set,
- .coo_getstripe = lov_object_getstripe
+ .coo_getstripe = lov_object_getstripe,
+ .coo_layout_get = lov_object_layout_get,
+ .coo_maxbytes = lov_object_maxbytes,
+ .coo_fiemap = lov_object_fiemap,
};
static const struct lu_object_operations lov_lu_obj_ops = {
@@ -986,22 +1546,6 @@ struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
return lsm;
}
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
-{
- struct lu_object *luobj;
- struct lov_stripe_md *lsm = NULL;
-
- if (!clobj)
- return NULL;
-
- luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
- &lov_device_type);
- if (luobj)
- lsm = lov_lsm_addref(lu2lov(luobj));
- return lsm;
-}
-EXPORT_SYMBOL(lov_lsm_get);
-
int lov_read_and_clear_async_rc(struct cl_object *clob)
{
struct lu_object *luobj;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index be6e9857ce2a..6c93d180aef7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -38,14 +38,17 @@
#define DEBUG_SUBSYSTEM S_LOV
+#include "../include/lustre/lustre_idl.h"
+#include "../include/lustre/lustre_user.h"
+
#include "../include/lustre_net.h"
+#include "../include/lustre_swab.h"
#include "../include/obd.h"
#include "../include/obd_class.h"
#include "../include/obd_support.h"
-#include "../include/lustre/lustre_user.h"
-#include "lov_internal.h"
#include "lov_cl_internal.h"
+#include "lov_internal.h"
void lov_dump_lmm_common(int level, void *lmmp)
{
@@ -97,120 +100,54 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
le16_to_cpu(lmm->lmm_stripe_count));
}
-/* Pack LOV object metadata for disk storage. It is packed in LE byte
- * order and is opaque to the networking layer.
+/**
+ * Pack LOV striping metadata for disk storage format (in little
+ * endian byte order).
*
- * XXX In the future, this will be enhanced to get the EA size from the
- * underlying OSC device(s) to get their EA sizes so we can stack
- * LOVs properly. For now lov_mds_md_size() just assumes one u64
- * per stripe.
+ * This follows the getxattr() conventions. If \a buf_size is zero
+ * then return the size needed. If \a buf_size is too small then
+ * return -ERANGE. Otherwise return the size of the result.
*/
-int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
+ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
+ size_t buf_size)
{
- struct lov_mds_md_v1 *lmmv1;
- struct lov_mds_md_v3 *lmmv3;
- __u16 stripe_count;
struct lov_ost_data_v1 *lmm_objects;
- int lmm_size, lmm_magic;
- int i;
- int cplen = 0;
-
- if (lsm) {
- lmm_magic = lsm->lsm_magic;
- } else {
- if (lmmp && *lmmp)
- lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
- else
- /* lsm == NULL and lmmp == NULL */
- lmm_magic = LOV_MAGIC;
- }
-
- if ((lmm_magic != LOV_MAGIC_V1) &&
- (lmm_magic != LOV_MAGIC_V3)) {
- CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
- lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
- return -EINVAL;
- }
-
- if (lsm) {
- /* If we are just sizing the EA, limit the stripe count
- * to the actual number of OSTs in this filesystem.
- */
- if (!lmmp) {
- stripe_count = lov_get_stripecnt(lov, lmm_magic,
- lsm->lsm_stripe_count);
- lsm->lsm_stripe_count = stripe_count;
- } else if (!lsm_is_released(lsm)) {
- stripe_count = lsm->lsm_stripe_count;
- } else {
- stripe_count = 0;
- }
- } else {
- /*
- * To calculate maximum easize by active targets at present,
- * which is exactly the maximum easize to be seen by LOV
- */
- stripe_count = lov->desc.ld_active_tgt_count;
- }
+ struct lov_mds_md_v1 *lmmv1 = buf;
+ struct lov_mds_md_v3 *lmmv3 = buf;
+ size_t lmm_size;
+ unsigned int i;
- /* XXX LOV STACKING call into osc for sizes */
- lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
-
- if (!lmmp)
+ lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
+ if (!buf_size)
return lmm_size;
- if (*lmmp && !lsm) {
- stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
- lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
- kvfree(*lmmp);
- *lmmp = NULL;
- return 0;
- }
-
- if (!*lmmp) {
- *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- }
-
- CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
- lmm_magic, lmm_size);
-
- lmmv1 = *lmmp;
- lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
- if (lmm_magic == LOV_MAGIC_V3)
- lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
- else
- lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
-
- if (!lsm)
- return lmm_size;
+ if (buf_size < lmm_size)
+ return -ERANGE;
- /* lmmv1 and lmmv3 point to the same struct and have the
+ /*
+ * lmmv1 and lmmv3 point to the same struct and have the
* same first fields
*/
+ lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
- lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
+ lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count);
lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
+
if (lsm->lsm_magic == LOV_MAGIC_V3) {
- cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
- sizeof(lmmv3->lmm_pool_name));
- if (cplen >= sizeof(lmmv3->lmm_pool_name))
- return -E2BIG;
+ CLASSERT(sizeof(lsm->lsm_pool_name) ==
+ sizeof(lmmv3->lmm_pool_name));
+ strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
+ sizeof(lmmv3->lmm_pool_name));
lmm_objects = lmmv3->lmm_objects;
} else {
lmm_objects = lmmv1->lmm_objects;
}
- for (i = 0; i < stripe_count; i++) {
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[i];
- /* XXX LOV STACKING call down to osc_packmd() to do packing */
- LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
- " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
- i, stripe_count, loi->loi_ost_idx);
+
ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
@@ -219,15 +156,6 @@ int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
return lmm_size;
}
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
-
- return lov_obd_packmd(lov, lmmp, lsm);
-}
-
/* Find the max stripecount we should use */
__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
{
@@ -270,34 +198,34 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
return rc;
}
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
- int pattern, int magic)
+struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern, u32 magic)
{
- int i, lsm_size;
+ struct lov_stripe_md *lsm;
+ unsigned int i;
- CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
+ CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count);
- *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
- if (!*lsmp) {
- CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
- return -ENOMEM;
+ lsm = lsm_alloc_plain(stripe_count);
+ if (!lsm) {
+ CERROR("cannot allocate LSM stripe_count %u\n", stripe_count);
+ return ERR_PTR(-ENOMEM);
}
- atomic_set(&(*lsmp)->lsm_refc, 1);
- spin_lock_init(&(*lsmp)->lsm_lock);
- (*lsmp)->lsm_magic = magic;
- (*lsmp)->lsm_stripe_count = stripe_count;
- (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
- (*lsmp)->lsm_pattern = pattern;
- (*lsmp)->lsm_pool_name[0] = '\0';
- (*lsmp)->lsm_layout_gen = 0;
+ atomic_set(&lsm->lsm_refc, 1);
+ spin_lock_init(&lsm->lsm_lock);
+ lsm->lsm_magic = magic;
+ lsm->lsm_stripe_count = stripe_count;
+ lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
+ lsm->lsm_pattern = pattern;
+ lsm->lsm_pool_name[0] = '\0';
+ lsm->lsm_layout_gen = 0;
if (stripe_count > 0)
- (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
+ lsm->lsm_oinfo[0]->loi_ost_idx = ~0;
for (i = 0; i < stripe_count; i++)
- loi_init((*lsmp)->lsm_oinfo[i]);
+ loi_init(lsm->lsm_oinfo[i]);
- return lsm_size;
+ return lsm;
}
int lov_free_memmd(struct lov_stripe_md **lsmp)
@@ -317,56 +245,34 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
/* Unpack LOV object metadata from disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*/
-int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes)
+struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
+ size_t lmm_size)
{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
- int rc = 0, lsm_size;
- __u16 stripe_count;
- __u32 magic;
- __u32 pattern;
-
- /* If passed an MDS struct use values from there, otherwise defaults */
- if (lmm) {
- rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
- if (rc)
- return rc;
- magic = le32_to_cpu(lmm->lmm_magic);
- pattern = le32_to_cpu(lmm->lmm_pattern);
- } else {
- magic = LOV_MAGIC;
- stripe_count = lov_get_stripecnt(lov, magic, 0);
- pattern = LOV_PATTERN_RAID0;
- }
+ struct lov_stripe_md *lsm;
+ u16 stripe_count;
+ u32 pattern;
+ u32 magic;
+ int rc;
- /* If we aren't passed an lsmp struct, we just want the size */
- if (!lsmp) {
- /* XXX LOV STACKING call into osc for sizes */
- LBUG();
- return lov_stripe_md_size(stripe_count);
- }
- /* If we are passed an allocated struct but nothing to unpack, free */
- if (*lsmp && !lmm) {
- lov_free_memmd(lsmp);
- return 0;
- }
+ rc = lov_verify_lmm(lmm, lmm_size, &stripe_count);
+ if (rc)
+ return ERR_PTR(rc);
- lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
- if (lsm_size < 0)
- return lsm_size;
+ magic = le32_to_cpu(lmm->lmm_magic);
+ pattern = le32_to_cpu(lmm->lmm_pattern);
- /* If we are passed a pointer but nothing to unpack, we only alloc */
- if (!lmm)
- return lsm_size;
+ lsm = lov_lsm_alloc(stripe_count, pattern, magic);
+ if (IS_ERR(lsm))
+ return lsm;
- rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
+ LASSERT(lsm_op_find(magic));
+ rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm);
if (rc) {
- lov_free_memmd(lsmp);
- return rc;
+ lov_free_memmd(&lsm);
+ return ERR_PTR(rc);
}
- return lsm_size;
+ return lsm;
}
/* Retrieve object striping information.
@@ -378,15 +284,14 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
struct lov_user_md __user *lump)
{
- /*
- * XXX huge struct allocated on stack.
- */
/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
- struct lov_obd *lov;
struct lov_user_md_v3 lum;
- struct lov_mds_md *lmmk = NULL;
- int rc, lmmk_size, lmm_size;
- int lum_size;
+ struct lov_mds_md *lmmk;
+ u32 stripe_count;
+ ssize_t lmm_size;
+ size_t lmmk_size;
+ size_t lum_size;
+ int rc;
mm_segment_t seg;
if (!lsm)
@@ -399,6 +304,18 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
seg = get_fs();
set_fs(KERNEL_DS);
+ if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
+ CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+ lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+ rc = -EIO;
+ goto out;
+ }
+
+ if (!lsm_is_released(lsm))
+ stripe_count = lsm->lsm_stripe_count;
+ else
+ stripe_count = 0;
+
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3)
*/
@@ -417,32 +334,40 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
if (lum.lmm_stripe_count &&
(lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
/* Return right size of stripe to user */
- lum.lmm_stripe_count = lsm->lsm_stripe_count;
+ lum.lmm_stripe_count = stripe_count;
rc = copy_to_user(lump, &lum, lum_size);
rc = -EOVERFLOW;
goto out;
}
- lov = lu2lov_dev(obj->lo_cl.co_lu.lo_dev)->ld_lov;
- rc = lov_obd_packmd(lov, &lmmk, lsm);
- if (rc < 0)
+ lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic);
+
+
+ lmmk = libcfs_kvzalloc(lmmk_size, GFP_NOFS);
+ if (!lmmk) {
+ rc = -ENOMEM;
goto out;
- lmmk_size = rc;
- lmm_size = rc;
- rc = 0;
+ }
+
+ lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
+ if (lmm_size < 0) {
+ rc = lmm_size;
+ goto out_free;
+ }
/* FIXME: Bug 1185 - copy fields properly when structs change */
/* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
- if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
- ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
- (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
+ if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC &&
+ (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
+ lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) {
lustre_swab_lov_mds_md(lmmk);
lustre_swab_lov_user_md_objects(
(struct lov_user_ost_data *)lmmk->lmm_objects,
lmmk->lmm_stripe_count);
}
+
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
@@ -474,9 +399,11 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
if (copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
+ else
+ rc = 0;
out_free:
- kfree(lmmk);
+ kvfree(lmmk);
out:
set_fs(seg);
return rc;
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 00bfabad78eb..62ceb6dfdfdf 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -49,51 +49,6 @@
*
*/
-/**
- * Adjust the stripe index by layout of raid0. @max_index is the maximum
- * page index covered by an underlying DLM lock.
- * This function converts max_index from stripe level to file level, and make
- * sure it's not beyond one stripe.
- */
-static int lov_raid0_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused,
- pgoff_t *max_index)
-{
- struct lov_object *loo = cl2lov(slice->cpl_obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- pgoff_t index = *max_index;
- unsigned int pps; /* pages per stripe */
-
- CDEBUG(D_READA, DFID "*max_index = %lu, nr = %d\n",
- PFID(lu_object_fid(lov2lu(loo))), index, r0->lo_nr);
-
- if (index == 0) /* the page is not covered by any lock */
- return 0;
-
- if (r0->lo_nr == 1) /* single stripe file */
- return 0;
-
- /* max_index is stripe level, convert it into file level */
- if (index != CL_PAGE_EOF) {
- int stripeno = lov_page_stripe(slice->cpl_page);
- *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
- }
-
- /* calculate the end of current stripe */
- pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
- index = slice->cpl_index + pps - slice->cpl_index % pps - 1;
-
- CDEBUG(D_READA, DFID "*max_index = %lu, index = %lu, pps = %u, stripe_size = %u, stripe no = %u, page index = %lu\n",
- PFID(lu_object_fid(lov2lu(loo))), *max_index, index, pps,
- loo->lo_lsm->lsm_stripe_size, lov_page_stripe(slice->cpl_page),
- slice->cpl_index);
-
- /* never exceed the end of the stripe */
- *max_index = min_t(pgoff_t, *max_index, index);
- return 0;
-}
-
static int lov_raid0_page_print(const struct lu_env *env,
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
@@ -104,7 +59,6 @@ static int lov_raid0_page_print(const struct lu_env *env,
}
static const struct cl_page_operations lov_raid0_page_ops = {
- .cpo_is_under_lock = lov_raid0_page_is_under_lock,
.cpo_print = lov_raid0_page_print
};
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index f8c8a361ef79..7daa8671fdc3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -81,7 +81,8 @@ static void lov_pool_putref_locked(struct pool_desc *pool)
* Chapter 6.4.
* Addison Wesley, 1973
*/
-static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask)
+static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
+ unsigned int mask)
{
int i;
__u32 result;
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 09dcaf484c89..d43cc88ae641 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -44,7 +44,6 @@ static void lov_init_set(struct lov_request_set *set)
atomic_set(&set->set_completes, 0);
atomic_set(&set->set_success, 0);
atomic_set(&set->set_finish_checked, 0);
- set->set_cookies = NULL;
INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
@@ -61,8 +60,6 @@ void lov_finish_set(struct lov_request_set *set)
rq_link);
list_del_init(&req->rq_link);
- if (req->rq_oi.oi_oa)
- kmem_cache_free(obdo_cachep, req->rq_oi.oi_oa);
kfree(req->rq_oi.oi_osfs);
kfree(req);
}
@@ -97,22 +94,6 @@ static void lov_update_set(struct lov_request_set *set,
wake_up(&set->set_waitq);
}
-int lov_update_common_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
-
- lov_update_set(set, req, rc);
-
- /* grace error on inactive ost */
- if (rc && !(lov->lov_tgts[req->rq_idx] &&
- lov->lov_tgts[req->rq_idx]->ltd_active))
- rc = 0;
-
- /* FIXME in raid1 regime, should return 0 */
- return rc;
-}
-
static void lov_set_add_req(struct lov_request *req,
struct lov_request_set *set)
{
@@ -183,279 +164,6 @@ out:
return rc;
}
-static int common_attr_done(struct lov_request_set *set)
-{
- struct lov_request *req;
- struct obdo *tmp_oa;
- int rc = 0, attrset = 0;
-
- if (!set->set_oi->oi_oa)
- return 0;
-
- if (!atomic_read(&set->set_success))
- return -EIO;
-
- tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!tmp_oa) {
- rc = -ENOMEM;
- goto out;
- }
-
- list_for_each_entry(req, &set->set_list, rq_link) {
- if (!req->rq_complete || req->rq_rc)
- continue;
- if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */
- continue;
- lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa,
- req->rq_oi.oi_oa->o_valid,
- set->set_oi->oi_md, req->rq_stripe, &attrset);
- }
- if (!attrset) {
- CERROR("No stripes had valid attrs\n");
- rc = -EIO;
- }
- if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
- (set->set_oi->oi_md->lsm_stripe_count != attrset)) {
- /* When we take attributes of some epoch, we require all the
- * ost to be active.
- */
- CERROR("Not all the stripes had valid attrs\n");
- rc = -EIO;
- goto out;
- }
-
- tmp_oa->o_oi = set->set_oi->oi_oa->o_oi;
- memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa));
-out:
- if (tmp_oa)
- kmem_cache_free(obdo_cachep, tmp_oa);
- return rc;
-}
-
-int lov_fini_getattr_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (!set)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes))
- rc = common_attr_done(set);
-
- lov_put_reqset(set);
-
- return rc;
-}
-
-/* The callback for osc_getattr_async that finalizes a request info when a
- * response is received.
- */
-static int cb_getattr_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- return lov_update_common_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oi = oinfo;
-
- for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
- struct lov_oinfo *loi;
- struct lov_request *req;
-
- loi = oinfo->oi_md->lsm_oinfo[i];
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH) {
- /* SOM requires all the OSTs to be active. */
- rc = -EIO;
- goto out_set;
- }
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!req->rq_oi.oi_oa) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
- sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- req->rq_oi.oi_cb_up = cb_getattr_update;
-
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_getattr_set(set);
- return rc;
-}
-
-int lov_fini_setattr_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (!set)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes)) {
- rc = common_attr_done(set);
- /* FIXME update qos data here */
- }
-
- lov_put_reqset(set);
- return rc;
-}
-
-int lov_update_setattr_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
- struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
-
- lov_update_set(set, req, rc);
-
- /* grace error on inactive ost */
- if (rc && !(lov->lov_tgts[req->rq_idx] &&
- lov->lov_tgts[req->rq_idx]->ltd_active))
- rc = 0;
-
- if (rc == 0) {
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime =
- req->rq_oi.oi_oa->o_ctime;
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime =
- req->rq_oi.oi_oa->o_mtime;
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime =
- req->rq_oi.oi_oa->o_atime;
- }
-
- return rc;
-}
-
-/* The callback for osc_setattr_async that finalizes a request info when a
- * response is received.
- */
-static int cb_setattr_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oi = oinfo;
- if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- set->set_cookies = oti->oti_logcookies;
-
- for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
- struct lov_request *req;
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!req->rq_oi.oi_oa) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
- sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- req->rq_oi.oi_oa->o_stripe_idx = i;
- req->rq_oi.oi_cb_up = cb_setattr_update;
-
- if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) {
- int off = lov_stripe_offset(oinfo->oi_md,
- oinfo->oi_oa->o_size, i,
- &req->rq_oi.oi_oa->o_size);
-
- if (off < 0 && req->rq_oi.oi_oa->o_size)
- req->rq_oi.oi_oa->o_size--;
-
- CDEBUG(D_INODE, "stripe %d has size %llu/%llu\n",
- i, req->rq_oi.oi_oa->o_size,
- oinfo->oi_oa->o_size);
- }
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_setattr_set(set);
- return rc;
-}
-
#define LOV_U64_MAX ((__u64)~0ULL)
#define LOV_SUM_MAX(tot, add) \
do { \
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index b519a1940e1e..5d6536f8a4f7 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -44,46 +44,6 @@
/*****************************************************************************
*
- * Lovsub transfer operations.
- *
- */
-
-static void lovsub_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct lovsub_req *lsr;
-
- lsr = cl2lovsub_req(slice);
- kmem_cache_free(lovsub_req_kmem, lsr);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for lovsub
- * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
- * field, which is filled there.
- */
-static void lovsub_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct lovsub_object *subobj;
-
- subobj = cl2lovsub(obj);
- /*
- * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
- * unconditionally. It never changes anyway.
- */
- attr->cra_oa->o_stripe_idx = subobj->lso_index;
-}
-
-static const struct cl_req_operations lovsub_req_ops = {
- .cro_attr_set = lovsub_req_attr_set,
- .cro_completion = lovsub_req_completion
-};
-
-/*****************************************************************************
- *
* Lov-sub device and device type functions.
*
*/
@@ -137,32 +97,12 @@ static struct lu_device *lovsub_device_free(const struct lu_env *env,
return next;
}
-static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct lovsub_req *lsr;
- int result;
-
- lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS);
- if (lsr) {
- cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
static const struct lu_device_operations lovsub_lu_ops = {
.ldo_object_alloc = lovsub_object_alloc,
.ldo_process_config = NULL,
.ldo_recovery_complete = NULL
};
-static const struct cl_device_operations lovsub_cl_ops = {
- .cdo_req_init = lovsub_req_init
-};
-
static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
@@ -178,7 +118,6 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
if (result == 0) {
d = lovsub2lu_dev(lsd);
d->ld_ops = &lovsub_lu_ops;
- lsd->acid_cl.cd_ops = &lovsub_cl_ops;
} else {
d = ERR_PTR(result);
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index a2bac7a3b71b..011296ee16e6 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -116,11 +116,31 @@ static int lovsub_object_glimpse(const struct lu_env *env,
return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
}
+/**
+ * Implementation of struct cl_object_operations::coo_req_attr_set() for lovsub
+ * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
+ * field, which is filled there.
+ */
+static void lovsub_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
+{
+ struct lovsub_object *subobj = cl2lovsub(obj);
+
+ cl_req_attr_set(env, &subobj->lso_super->lo_cl, attr);
+
+ /*
+ * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
+ * unconditionally. It never changes anyway.
+ */
+ attr->cra_oa->o_stripe_idx = subobj->lso_index;
+}
+
static const struct cl_object_operations lovsub_ops = {
.coo_page_init = lovsub_page_init,
.coo_lock_init = lovsub_lock_init,
.coo_attr_update = lovsub_attr_update,
- .coo_glimpse = lovsub_object_glimpse
+ .coo_glimpse = lovsub_object_glimpse,
+ .coo_req_attr_set = lovsub_req_attr_set
};
static const struct lu_object_operations lovsub_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index fca9450de57c..9021c465c044 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -36,6 +36,42 @@
#include "../include/lprocfs_status.h"
#include "mdc_internal.h"
+static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+
+ return sprintf(buf, "%u\n", !dev->u.cli.cl_import->imp_deactive);
+}
+
+static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ if (val < 0 || val > 1)
+ return -ERANGE;
+
+ /* opposite senses */
+ if (dev->u.cli.cl_import->imp_deactive == val) {
+ rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
+ if (rc)
+ count = rc;
+ } else {
+ CDEBUG(D_CONFIG, "activate %lu: ignoring repeat request\n", val);
+ }
+ return count;
+}
+LUSTRE_RW_ATTR(active);
+
static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
@@ -73,6 +109,64 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(max_rpcs_in_flight);
+static ssize_t max_mod_rpcs_in_flight_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ u16 max;
+ int len;
+
+ max = dev->u.cli.cl_max_mod_rpcs_in_flight;
+ len = sprintf(buf, "%hu\n", max);
+
+ return len;
+}
+
+static ssize_t max_mod_rpcs_in_flight_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t count)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ u16 val;
+ int rc;
+
+ rc = kstrtou16(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ rc = obd_set_max_mod_rpcs_in_flight(&dev->u.cli, val);
+ if (rc)
+ count = rc;
+
+ return count;
+}
+LUSTRE_RW_ATTR(max_mod_rpcs_in_flight);
+
+static int mdc_rpc_stats_seq_show(struct seq_file *seq, void *v)
+{
+ struct obd_device *dev = seq->private;
+
+ return obd_mod_rpc_stats_seq_show(&dev->u.cli, seq);
+}
+
+static ssize_t mdc_rpc_stats_seq_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct obd_device *dev = seq->private;
+ struct client_obd *cli = &dev->u.cli;
+
+ lprocfs_oh_clear(&cli->cl_mod_rpcs_hist);
+
+ return len;
+}
+LPROC_SEQ_FOPS(mdc_rpc_stats);
+
LPROC_SEQ_FOPS_WR_ONLY(mdc, ping);
LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags);
@@ -112,11 +206,15 @@ static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
{ "import", &mdc_import_fops, NULL, 0 },
{ "state", &mdc_state_fops, NULL, 0 },
{ "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 },
+ { .name = "rpc_stats",
+ .fops = &mdc_rpc_stats_fops },
{ NULL }
};
static struct attribute *mdc_attrs[] = {
+ &lustre_attr_active.attr,
&lustre_attr_max_rpcs_in_flight.attr,
+ &lustre_attr_max_mod_rpcs_in_flight.attr,
&lustre_attr_max_pages_per_rpc.attr,
NULL,
};
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index f446c1c2584b..881c6a0676a6 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -46,7 +46,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
struct md_op_data *data, size_t ea_size);
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len);
+ void *ea, size_t ealen);
void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const void *data, size_t datalen, umode_t mode, uid_t uid,
gid_t gid, cfs_cap_t capability, __u64 rdev);
@@ -75,7 +75,7 @@ int mdc_intent_lock(struct obd_export *exp,
__u64 extra_lock_flags);
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, __u64 extra_lock_flags);
@@ -105,12 +105,11 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
const char *new, size_t newlen,
struct ptlrpc_request **request);
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request, struct md_open_data **mod);
+ void *ea, size_t ealen, struct ptlrpc_request **request);
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request);
int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
enum ldlm_cancel_flags flags, void *opaque);
int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
@@ -122,7 +121,8 @@ int mdc_intent_getattr_async(struct obd_export *exp,
enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid, enum ldlm_type type,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
struct lustre_handle *lockh);
static inline int mdc_prep_elc_req(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index aac7e04873e2..f35e1f9afdef 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -139,7 +139,7 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_time = op_data->op_mod_time;
rec->cr_suppgid1 = op_data->op_suppgids[0];
rec->cr_suppgid2 = op_data->op_suppgids[1];
- flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+ flags = 0;
if (op_data->op_bias & MDS_CREATE_VOLATILE)
flags |= MDS_OPEN_VOLATILE;
set_mrc_cr_flags(rec, flags);
@@ -301,16 +301,16 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
struct md_op_data *op_data)
{
- memcpy(&epoch->handle, &op_data->op_handle, sizeof(epoch->handle));
- epoch->ioepoch = op_data->op_ioepoch;
- epoch->flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+ epoch->mio_handle = op_data->op_handle;
+ epoch->mio_unused1 = 0;
+ epoch->mio_unused2 = 0;
+ epoch->mio_padding = 0;
}
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len)
+ void *ea, size_t ealen)
{
struct mdt_rec_setattr *rec;
- struct mdt_ioepoch *epoch;
struct lov_user_md *lum = NULL;
CLASSERT(sizeof(struct mdt_rec_reint) ==
@@ -318,11 +318,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
mdc_setattr_pack_rec(rec, op_data);
- if (op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) {
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- mdc_ioepoch_pack(epoch, op_data);
- }
-
if (ealen == 0)
return;
@@ -335,12 +330,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
} else {
memcpy(lum, ea, ealen);
}
-
- if (ea2len == 0)
- return;
-
- memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2,
- ea2len);
}
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
@@ -387,6 +376,31 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
}
+static void mdc_intent_close_pack(struct ptlrpc_request *req,
+ struct md_op_data *op_data)
+{
+ enum mds_op_bias bias = op_data->op_bias;
+ struct close_data *data;
+ struct ldlm_lock *lock;
+
+ if (!(bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP |
+ MDS_RENAME_MIGRATE)))
+ return;
+
+ data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
+ LASSERT(data);
+
+ lock = ldlm_handle2lock(&op_data->op_lease_handle);
+ if (lock) {
+ data->cd_handle = lock->l_remote_handle;
+ LDLM_LOCK_PUT(lock);
+ }
+ ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
+
+ data->cd_data_version = op_data->op_data_version;
+ data->cd_fid = op_data->op_fid2;
+}
+
void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const char *old, size_t oldlen,
const char *new, size_t newlen)
@@ -415,6 +429,15 @@ void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
if (new)
mdc_pack_name(req, &RMF_SYMTGT, new, newlen);
+
+ if (op_data->op_cli_flags & CLI_MIGRATE &&
+ op_data->op_bias & MDS_RENAME_MIGRATE) {
+ struct mdt_ioepoch *epoch;
+
+ mdc_intent_close_pack(req, op_data);
+ epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
+ mdc_ioepoch_pack(epoch, op_data);
+ }
}
void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
@@ -441,27 +464,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
op_data->op_namelen);
}
-static void mdc_hsm_release_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data)
-{
- if (op_data->op_bias & MDS_HSM_RELEASE) {
- struct close_data *data;
- struct ldlm_lock *lock;
-
- data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
-
- lock = ldlm_handle2lock(&op_data->op_lease_handle);
- if (lock) {
- data->cd_handle = lock->l_remote_handle;
- LDLM_LOCK_PUT(lock);
- }
- ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
-
- data->cd_data_version = op_data->op_data_version;
- data->cd_fid = op_data->op_fid2;
- }
-}
-
void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
{
struct mdt_ioepoch *epoch;
@@ -484,5 +486,5 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
rec->sa_valid &= ~MDS_ATTR_ATIME;
mdc_ioepoch_pack(epoch, op_data);
- mdc_hsm_release_pack(req, op_data);
+ mdc_intent_close_pack(req, op_data);
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index f1f6c082fa42..54ebb9952d66 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -38,10 +38,12 @@
#include "../include/obd.h"
#include "../include/obd_class.h"
#include "../include/lustre_dlm.h"
-#include "../include/lustre_fid.h" /* fid_res_name_eq() */
+#include "../include/lustre_fid.h"
#include "../include/lustre_mdc.h"
#include "../include/lustre_net.h"
#include "../include/lustre_req_layout.h"
+#include "../include/lustre_swab.h"
+
#include "mdc_internal.h"
struct mdc_getattr_args {
@@ -131,7 +133,8 @@ int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh,
enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid, enum ldlm_type type,
- ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
struct lustre_handle *lockh)
{
struct ldlm_res_id res_id;
@@ -147,7 +150,7 @@ enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
int mdc_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
+ union ldlm_policy_data *policy,
enum ldlm_mode mode,
enum ldlm_cancel_flags flags,
void *opaque)
@@ -386,8 +389,6 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obddev->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- obddev->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
return req;
}
@@ -688,20 +689,20 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* we don't know in advance the file type.
*/
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- const ldlm_policy_data_t *policy,
+ const union ldlm_policy_data *policy,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, u64 extra_lock_flags)
{
- static const ldlm_policy_data_t lookup_policy = {
+ static const union ldlm_policy_data lookup_policy = {
.l_inodebits = { MDS_INODELOCK_LOOKUP }
};
- static const ldlm_policy_data_t update_policy = {
+ static const union ldlm_policy_data update_policy = {
.l_inodebits = { MDS_INODELOCK_UPDATE }
};
- static const ldlm_policy_data_t layout_policy = {
+ static const union ldlm_policy_data layout_policy = {
.l_inodebits = { MDS_INODELOCK_LAYOUT }
};
- static const ldlm_policy_data_t getxattr_policy = {
+ static const union ldlm_policy_data getxattr_policy = {
.l_inodebits = { MDS_INODELOCK_XATTR }
};
struct obd_device *obddev = class_exp2obd(exp);
@@ -762,27 +763,22 @@ resend:
if (IS_ERR(req))
return PTR_ERR(req);
- if (req && it && it->it_op & IT_CREAT)
- /* ask ptlrpc not to resend on EINPROGRESS since we have our own
- * retry logic
- */
- req->rq_no_retry_einprogress = 1;
-
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
req->rq_sent = ktime_get_real_seconds() + resends;
}
- /* It is important to obtain rpc_lock first (if applicable), so that
- * threads that are serialised with rpc_lock are not polluting our
- * rpcs in flight counter. We do not do flock request limiting, though
+ /* It is important to obtain modify RPC slot first (if applicable), so
+ * that threads that are waiting for a modify RPC slot are not polluting
+ * our rpcs in flight counter.
+ * We do not do flock request limiting, though
*/
if (it) {
- mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ mdc_get_mod_rpc_slot(req, it);
rc = obd_get_request_slot(&obddev->u.cli);
if (rc != 0) {
- mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ mdc_put_mod_rpc_slot(req, it);
mdc_clear_replay_flag(req, 0);
ptlrpc_req_finished(req);
return rc;
@@ -809,7 +805,7 @@ resend:
}
obd_put_request_slot(&obddev->u.cli);
- mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ mdc_put_mod_rpc_slot(req, it);
if (rc < 0) {
CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n",
@@ -825,11 +821,12 @@ resend:
lockrep->lock_policy_res2 =
ptlrpc_status_ntoh(lockrep->lock_policy_res2);
- /* Retry the create infinitely when we get -EINPROGRESS from
- * server. This is required by the new quota design.
+ /*
+ * Retry infinitely when the server returns -EINPROGRESS for the
+ * intent operation, when server returns -EINPROGRESS for acquiring
+ * intent lock, we'll retry in after_reply().
*/
- if (it->it_op & IT_CREAT &&
- (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
+ if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
mdc_clear_replay_flag(req, rc);
ptlrpc_req_finished(req);
resends++;
@@ -931,7 +928,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
*/
lock = ldlm_handle2lock(lockh);
if (lock) {
- ldlm_policy_data_t policy = lock->l_policy_data;
+ union ldlm_policy_data policy = lock->l_policy_data;
LDLM_DEBUG(lock, "matching against this");
@@ -967,7 +964,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
*/
struct ldlm_res_id res_id;
struct lustre_handle lockh;
- ldlm_policy_data_t policy;
+ union ldlm_policy_data policy;
enum ldlm_mode mode;
if (it->it_lock_handle) {
@@ -1169,10 +1166,9 @@ int mdc_intent_getattr_async(struct obd_export *exp,
* for statahead currently. Consider CMD in future, such two bits
* maybe managed by different MDS, should be adjusted then.
*/
- ldlm_policy_data_t policy = {
- .l_inodebits = { MDS_INODELOCK_LOOKUP |
- MDS_INODELOCK_UPDATE }
- };
+ union ldlm_policy_data policy = {
+ .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE }
+ };
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index c921e471fa27..07b168490f09 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -40,17 +40,15 @@
#include "../include/lustre_fid.h"
/* mdc_setattr does its own semaphore handling */
-static int mdc_reint(struct ptlrpc_request *request,
- struct mdc_rpc_lock *rpc_lock,
- int level)
+static int mdc_reint(struct ptlrpc_request *request, int level)
{
int rc;
request->rq_send_state = level;
- mdc_get_rpc_lock(rpc_lock, NULL);
+ mdc_get_mod_rpc_slot(request, NULL);
rc = ptlrpc_queue_wait(request);
- mdc_put_rpc_lock(rpc_lock, NULL);
+ mdc_put_mod_rpc_slot(request, NULL);
if (rc)
CDEBUG(D_INFO, "error in handling %d\n", rc);
else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY))
@@ -68,7 +66,7 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
__u64 bits)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- ldlm_policy_data_t policy = {};
+ union ldlm_policy_data policy = {};
struct ldlm_res_id res_id;
struct ldlm_resource *res;
int count;
@@ -99,13 +97,10 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
}
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, size_t ealen, void *ea2, size_t ea2len,
- struct ptlrpc_request **request, struct md_open_data **mod)
+ void *ea, size_t ealen, struct ptlrpc_request **request)
{
LIST_HEAD(cancels);
struct ptlrpc_request *req;
- struct mdc_rpc_lock *rpc_lock;
- struct obd_device *obd = exp->exp_obd;
int count = 0, rc;
__u64 bits;
@@ -122,12 +117,9 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
- if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
- 0);
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT,
- ea2len);
+ req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
@@ -135,63 +127,21 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
- rpc_lock = obd->u.cli.cl_rpc_lock;
-
if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
LTIME_S(op_data->op_attr.ia_mtime),
LTIME_S(op_data->op_attr.ia_ctime));
- mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len);
+ mdc_setattr_pack(req, op_data, ea, ealen);
ptlrpc_request_set_replen(req);
- if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
- req->rq_import->imp_replayable) {
- LASSERT(!*mod);
-
- *mod = obd_mod_alloc();
- if (!*mod) {
- DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data");
- } else {
- req->rq_replay = 1;
- req->rq_cb_data = *mod;
- (*mod)->mod_open_req = req;
- req->rq_commit_cb = mdc_commit_open;
- (*mod)->mod_is_create = true;
- /**
- * Take an extra reference on \var mod, it protects \var
- * mod from being freed on eviction (commit callback is
- * called despite rq_replay flag).
- * Will be put on mdc_done_writing().
- */
- obd_mod_get(*mod);
- }
- }
-
- rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
- /* Save the obtained info in the original RPC for the replay case. */
- if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
- struct mdt_ioepoch *epoch;
- struct mdt_body *body;
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- epoch->handle = body->mbo_handle;
- epoch->ioepoch = body->mbo_ioepoch;
- req->rq_replay_cb = mdc_replay_open;
- /** bug 3633, open may be committed and estale answer is not error */
- } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
- rc = 0;
- } else if (rc == -ERESTARTSYS) {
+ if (rc == -ERESTARTSYS)
rc = 0;
- }
+
*request = req;
- if (rc && req->rq_commit_cb) {
- /* Put an extra reference on \var mod on error case. */
- if (mod && *mod)
- obd_mod_put(*mod);
- req->rq_commit_cb(req);
- }
+
return rc;
}
@@ -264,7 +214,7 @@ rebuild:
}
level = LUSTRE_IMP_FULL;
resend:
- rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);
+ rc = mdc_reint(req, level);
/* Resend if we were told to. */
if (rc == -ERESTARTSYS) {
@@ -332,13 +282,11 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
*request = req;
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
if (rc == -ERESTARTSYS)
rc = 0;
return rc;
@@ -348,7 +296,6 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
LIST_HEAD(cancels);
- struct obd_device *obd = exp->exp_obd;
struct ptlrpc_request *req;
int count = 0, rc;
@@ -380,7 +327,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
mdc_link_pack(req, op_data);
ptlrpc_request_set_replen(req);
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
*request = req;
if (rc == -ERESTARTSYS)
rc = 0;
@@ -419,7 +366,8 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
MDS_INODELOCK_FULL);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_RENAME);
+ op_data->op_cli_flags & CLI_MIGRATE ?
+ &RQF_MDS_REINT_MIGRATE : &RQF_MDS_REINT_RENAME);
if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
@@ -435,6 +383,23 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
+ if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_data) {
+ struct md_open_data *mod = op_data->op_data;
+
+ LASSERTF(mod->mod_open_req &&
+ mod->mod_open_req->rq_type != LI_POISON,
+ "POISONED open %p!\n", mod->mod_open_req);
+
+ DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
+ /*
+ * We no longer want to preserve this open for replay even
+ * though the open was committed. b=3632, b=3633
+ */
+ spin_lock(&mod->mod_open_req->rq_lock);
+ mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&mod->mod_open_req->rq_lock);
+ }
+
if (exp_connect_cancelset(exp) && req)
ldlm_cli_cancel_list(&cancels, count, req, 0);
@@ -442,11 +407,9 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ rc = mdc_reint(req, LUSTRE_IMP_FULL);
*request = req;
if (rc == -ERESTARTSYS)
rc = 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index f56ea643f9bf..2cfd913f9bc5 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -38,15 +38,18 @@
# include <linux/init.h>
# include <linux/utsname.h>
+#include "../include/cl_object.h"
+#include "../include/llog_swab.h"
+#include "../include/lprocfs_status.h"
#include "../include/lustre_acl.h"
+#include "../include/lustre_fid.h"
#include "../include/lustre/lustre_ioctl.h"
-#include "../include/obd_class.h"
+#include "../include/lustre_kernelcomm.h"
#include "../include/lustre_lmv.h"
-#include "../include/lustre_fid.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_param.h"
#include "../include/lustre_log.h"
-#include "../include/lustre_kernelcomm.h"
+#include "../include/lustre_param.h"
+#include "../include/lustre_swab.h"
+#include "../include/obd_class.h"
#include "mdc_internal.h"
@@ -327,12 +330,12 @@ static int mdc_xattr_common(struct obd_export *exp,
/* make rpc */
if (opcode == MDS_REINT)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ mdc_get_mod_rpc_slot(req, NULL);
rc = ptlrpc_queue_wait(req);
if (opcode == MDS_REINT)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ mdc_put_mod_rpc_slot(req, NULL);
if (rc)
ptlrpc_req_finished(req);
@@ -420,9 +423,6 @@ static int mdc_get_lustre_md(struct obd_export *exp,
md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
- int lmmsize;
- struct lov_mds_md *lmm;
-
if (!S_ISREG(md->body->mbo_mode)) {
CDEBUG(D_INFO,
"OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
@@ -436,28 +436,18 @@ static int mdc_get_lustre_md(struct obd_export *exp,
rc = -EPROTO;
goto out;
}
- lmmsize = md->body->mbo_eadatasize;
- lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
- if (!lmm) {
- rc = -EPROTO;
- goto out;
- }
-
- rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize);
- if (rc < 0)
- goto out;
- if (rc < (typeof(rc))sizeof(*md->lsm)) {
- CDEBUG(D_INFO,
- "lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n",
- rc, (int)sizeof(*md->lsm));
+ md->layout.lb_len = md->body->mbo_eadatasize;
+ md->layout.lb_buf = req_capsule_server_sized_get(pill,
+ &RMF_MDT_MD,
+ md->layout.lb_len);
+ if (!md->layout.lb_buf) {
rc = -EPROTO;
goto out;
}
-
} else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
- int lmvsize;
- struct lov_mds_md *lmv;
+ const union lmv_mds_md *lmv;
+ size_t lmv_size;
if (!S_ISDIR(md->body->mbo_mode)) {
CDEBUG(D_INFO,
@@ -466,22 +456,21 @@ static int mdc_get_lustre_md(struct obd_export *exp,
goto out;
}
- if (md->body->mbo_eadatasize == 0) {
+ lmv_size = md->body->mbo_eadatasize;
+ if (!lmv_size) {
CDEBUG(D_INFO,
"OBD_MD_FLDIREA is set, but eadatasize 0\n");
return -EPROTO;
}
if (md->body->mbo_valid & OBD_MD_MEA) {
- lmvsize = md->body->mbo_eadatasize;
lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- lmvsize);
+ lmv_size);
if (!lmv) {
rc = -EPROTO;
goto out;
}
- rc = obd_unpackmd(md_exp, (void *)&md->lmv, lmv,
- lmvsize);
+ rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size);
if (rc < 0)
goto out;
@@ -517,8 +506,6 @@ out:
#ifdef CONFIG_FS_POSIX_ACL
posix_acl_release(md->posix_acl);
#endif
- if (md->lsm)
- obd_free_memmd(dt_exp, &md->lsm);
}
return rc;
}
@@ -528,10 +515,6 @@ static int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
return 0;
}
-/**
- * Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING
- * RPC chains.
- */
void mdc_replay_open(struct ptlrpc_request *req)
{
struct md_open_data *mod = req->rq_cb_data;
@@ -565,15 +548,15 @@ void mdc_replay_open(struct ptlrpc_request *req)
__u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
struct mdt_ioepoch *epoch;
- LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING);
+ LASSERT(opc == MDS_CLOSE);
epoch = req_capsule_client_get(&close_req->rq_pill,
&RMF_MDT_EPOCH);
LASSERT(epoch);
if (och)
- LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
+ LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old)));
DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
- epoch->handle = body->mbo_handle;
+ epoch->mio_handle = body->mbo_handle;
}
}
@@ -715,22 +698,6 @@ static int mdc_clear_open_replay_data(struct obd_export *exp,
return 0;
}
-/* Prepares the request for the replay by the given reply */
-static void mdc_close_handle_reply(struct ptlrpc_request *req,
- struct md_op_data *op_data, int rc) {
- struct mdt_body *repbody;
- struct mdt_ioepoch *epoch;
-
- if (req && rc == -EAGAIN) {
- repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
-
- epoch->flags |= MF_SOM_AU;
- if (repbody->mbo_valid & OBD_MD_FLGETATTRLOCK)
- op_data->op_flags |= MF_GETATTR_LOCK;
- }
-}
-
static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod, struct ptlrpc_request **request)
{
@@ -740,9 +707,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
int rc;
int saved_rc = 0;
- req_fmt = &RQF_MDS_CLOSE;
if (op_data->op_bias & MDS_HSM_RELEASE) {
- req_fmt = &RQF_MDS_RELEASE_CLOSE;
+ req_fmt = &RQF_MDS_INTENT_CLOSE;
/* allocate a FID for volatile file */
rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
@@ -752,6 +718,10 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
/* save the errcode and proceed to close */
saved_rc = rc;
}
+ } else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) {
+ req_fmt = &RQF_MDS_INTENT_CLOSE;
+ } else {
+ req_fmt = &RQF_MDS_CLOSE;
}
*request = NULL;
@@ -807,14 +777,12 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
- mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+ mdc_get_mod_rpc_slot(req, NULL);
rc = ptlrpc_queue_wait(req);
- mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+ mdc_put_mod_rpc_slot(req, NULL);
if (!req->rq_repmsg) {
CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
@@ -857,79 +825,9 @@ out:
obd_mod_put(mod);
}
*request = req;
- mdc_close_handle_reply(req, op_data, rc);
return rc < 0 ? rc : saved_rc;
}
-static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_DONE_WRITING);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- if (mod) {
- LASSERTF(mod->mod_open_req &&
- mod->mod_open_req->rq_type != LI_POISON,
- "POISONED setattr %p!\n", mod->mod_open_req);
-
- mod->mod_close_req = req;
- DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
- /* We no longer want to preserve this setattr for replay even
- * though the open was committed. b=3632, b=3633
- */
- spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
- }
-
- mdc_close_pack(req, op_data);
- ptlrpc_request_set_replen(req);
-
- mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
-
- if (rc == -ESTALE) {
- /**
- * it can be allowed error after 3633 if open or setattr were
- * committed and server failed before close was sent.
- * Let's check if mod exists and return no error in that case
- */
- if (mod) {
- if (mod->mod_open_req->rq_committed)
- rc = 0;
- }
- }
-
- if (mod) {
- if (rc != 0)
- mod->mod_close_req = NULL;
- LASSERT(mod->mod_open_req);
- mdc_free_open(mod);
-
- /* Since now, mod is accessed through setattr req only,
- * thus DW req does not keep a reference on mod anymore.
- */
- obd_mod_put(mod);
- }
-
- mdc_close_handle_reply(req, op_data, rc);
- ptlrpc_req_finished(req);
- return rc;
-}
-
static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
u64 offset, struct page **pages, int npages,
struct ptlrpc_request **request)
@@ -959,8 +857,10 @@ restart_bulk:
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
- desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
- MDS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, npages, 1,
+ PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ MDS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (!desc) {
ptlrpc_request_free(req);
return -ENOMEM;
@@ -968,7 +868,7 @@ restart_bulk:
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
+ desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
@@ -1546,7 +1446,7 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
/* Val is struct getinfo_fid2path result plus path */
vallen = sizeof(*gf) + gf->gf_pathlen;
- rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL);
+ rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
if (rc != 0 && rc != -EREMOTE)
goto out;
@@ -1558,8 +1458,11 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
goto out;
}
- CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n%s\n",
- PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path);
+ CDEBUG(D_IOCTL, "path got " DFID " from %llu #%d: %s\n",
+ PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
+ gf->gf_pathlen < 512 ? gf->gf_path :
+ /* only log the last 512 characters of the path */
+ gf->gf_path + gf->gf_pathlen - 512);
out:
kfree(key);
@@ -1595,7 +1498,9 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
ptlrpc_request_set_replen(req);
- rc = mdc_queue_wait(req);
+ mdc_get_mod_rpc_slot(req, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_mod_rpc_slot(req, NULL);
out:
ptlrpc_req_finished(req);
return rc;
@@ -1773,7 +1678,9 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
ptlrpc_request_set_replen(req);
- rc = mdc_queue_wait(req);
+ mdc_get_mod_rpc_slot(req, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_mod_rpc_slot(req, NULL);
out:
ptlrpc_req_finished(req);
return rc;
@@ -1836,7 +1743,9 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
ptlrpc_request_set_replen(req);
- rc = mdc_queue_wait(req);
+ mdc_get_mod_rpc_slot(req, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_mod_rpc_slot(req, NULL);
out:
ptlrpc_req_finished(req);
return rc;
@@ -1957,10 +1866,8 @@ static int mdc_changelog_send_thread(void *csdata)
/* Send EOF no matter what our result */
kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
- if (kuch) {
- kuch->kuc_msgtype = CL_EOF;
- libcfs_kkuc_msg_put(cs->cs_fp, kuch);
- }
+ kuch->kuc_msgtype = CL_EOF;
+ libcfs_kkuc_msg_put(cs->cs_fp, kuch);
out:
fput(cs->cs_fp);
@@ -2015,52 +1922,6 @@ static int mdc_ioc_changelog_send(struct obd_device *obd,
static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
struct lustre_kernelcomm *lk);
-static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct obd_quotactl *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
- MDS_QUOTACHECK);
- if (!req)
- return -ENOMEM;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *body = *oqctl;
-
- ptlrpc_request_set_replen(req);
-
- /* the next poll will find -ENODATA, that means quotacheck is
- * going on
- */
- cli->cl_qchk_stat = -ENODATA;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- cli->cl_qchk_stat = rc;
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_quota_poll_check(struct obd_export *exp,
- struct if_quotacheck *qchk)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc;
-
- qchk->obd_uuid = cli->cl_target_uuid;
- memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
-
- rc = cli->cl_qchk_stat;
- /* the client is not the previous one */
- if (rc == CL_NOT_QUOTACHECKED)
- rc = -EINTR;
- return rc;
-}
-
static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl)
{
@@ -2215,9 +2076,6 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
case IOC_OSC_SET_ACTIVE:
rc = ptlrpc_set_import_active(imp, data->ioc_offset);
goto out;
- case OBD_IOC_POLL_QUOTACHECK:
- rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
- goto out;
case OBD_IOC_PING_TARGET:
rc = ptlrpc_obd_ping(obd);
goto out;
@@ -2528,8 +2386,7 @@ static int mdc_set_info_async(const struct lu_env *env,
}
static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
int rc = -EINVAL;
@@ -2733,29 +2590,17 @@ static void mdc_llog_finish(struct obd_device *obd)
static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
{
- struct client_obd *cli = &obd->u.cli;
struct lprocfs_static_vars lvars = { NULL };
int rc;
- cli->cl_rpc_lock = kzalloc(sizeof(*cli->cl_rpc_lock), GFP_NOFS);
- if (!cli->cl_rpc_lock)
- return -ENOMEM;
- mdc_init_rpc_lock(cli->cl_rpc_lock);
-
rc = ptlrpcd_addref();
if (rc < 0)
- goto err_rpc_lock;
-
- cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
- if (!cli->cl_close_lock) {
- rc = -ENOMEM;
- goto err_ptlrpcd_decref;
- }
- mdc_init_rpc_lock(cli->cl_close_lock);
+ return rc;
rc = client_obd_setup(obd, cfg);
if (rc)
- goto err_close_lock;
+ goto err_ptlrpcd_decref;
+
lprocfs_mdc_init_vars(&lvars);
lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
sptlrpc_lprocfs_cliobd_attach(obd);
@@ -2769,29 +2614,25 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
if (rc) {
mdc_cleanup(obd);
CERROR("failed to setup llogging subsystems\n");
+ return rc;
}
return rc;
-err_close_lock:
- kfree(cli->cl_close_lock);
err_ptlrpcd_decref:
ptlrpcd_decref();
-err_rpc_lock:
- kfree(cli->cl_rpc_lock);
return rc;
}
-/* Initialize the default and maximum LOV EA and cookie sizes. This allows
+/* Initialize the default and maximum LOV EA sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold a default
- * sized EA and cookie without having to calculate this (via a call into the
+ * sized EA without having to calculate this (via a call into the
* LOV + OSCs) each time we make an RPC. The maximum size is also tracked
* but not used to avoid wastefully vmalloc()'ing large reply buffers when
* a large number of stripes is possible. If a larger reply buffer is
* required it will be reallocated in the ptlrpc layer due to overflow.
*/
-static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
- u32 cookiesize, u32 def_cookiesize)
+static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
{
struct obd_device *obd = exp->exp_obd;
struct client_obd *cli = &obd->u.cli;
@@ -2802,42 +2643,24 @@ static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
if (cli->cl_default_mds_easize < def_easize)
cli->cl_default_mds_easize = def_easize;
- if (cli->cl_max_mds_cookiesize < cookiesize)
- cli->cl_max_mds_cookiesize = cookiesize;
-
- if (cli->cl_default_mds_cookiesize < def_cookiesize)
- cli->cl_default_mds_cookiesize = def_cookiesize;
-
return 0;
}
-static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int mdc_precleanup(struct obd_device *obd)
{
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- break;
- case OBD_CLEANUP_EXPORTS:
- /* Failsafe, ok if racy */
- if (obd->obd_type->typ_refcnt <= 1)
- libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
+ /* Failsafe, ok if racy */
+ if (obd->obd_type->typ_refcnt <= 1)
+ libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
-
- mdc_llog_finish(obd);
- break;
- }
+ obd_cleanup_client_import(obd);
+ ptlrpc_lprocfs_unregister_obd(obd);
+ lprocfs_obd_cleanup(obd);
+ mdc_llog_finish(obd);
return 0;
}
static int mdc_cleanup(struct obd_device *obd)
{
- struct client_obd *cli = &obd->u.cli;
-
- kfree(cli->cl_rpc_lock);
- kfree(cli->cl_close_lock);
-
ptlrpcd_decref();
return client_obd_cleanup(obd);
@@ -2881,7 +2704,6 @@ static struct obd_ops mdc_obd_ops = {
.process_config = mdc_process_config,
.get_uuid = mdc_get_uuid,
.quotactl = mdc_quotactl,
- .quotacheck = mdc_quotacheck
};
static struct md_ops mdc_md_ops = {
@@ -2889,7 +2711,6 @@ static struct md_ops mdc_md_ops = {
.null_inode = mdc_null_inode,
.close = mdc_close,
.create = mdc_create,
- .done_writing = mdc_done_writing,
.enqueue = mdc_enqueue,
.getattr = mdc_getattr,
.getattr_name = mdc_getattr_name,
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 23374cae5133..b9c522a3c7a4 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -38,11 +38,13 @@
#define D_MGC D_CONFIG /*|D_WARNING*/
#include <linux/module.h>
-#include "../include/obd_class.h"
-#include "../include/lustre_dlm.h"
+
#include "../include/lprocfs_status.h"
-#include "../include/lustre_log.h"
+#include "../include/lustre_dlm.h"
#include "../include/lustre_disk.h"
+#include "../include/lustre_log.h"
+#include "../include/lustre_swab.h"
+#include "../include/obd_class.h"
#include "mgc_internal.h"
@@ -373,7 +375,7 @@ out_err:
return rc;
}
-DEFINE_MUTEX(llog_process_lock);
+static DEFINE_MUTEX(llog_process_lock);
/** Stop watching for updates on this log.
*/
@@ -684,35 +686,33 @@ static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
}
static atomic_t mgc_count = ATOMIC_INIT(0);
-static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int mgc_precleanup(struct obd_device *obd)
{
int rc = 0;
int temp;
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- break;
- case OBD_CLEANUP_EXPORTS:
- if (atomic_dec_and_test(&mgc_count)) {
- LASSERT(rq_state & RQ_RUNNING);
- /* stop requeue thread */
- temp = RQ_STOP;
- } else {
- /* wakeup requeue thread to clean our cld */
- temp = RQ_NOW | RQ_PRECLEANUP;
- }
- spin_lock(&config_list_lock);
- rq_state |= temp;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
- if (temp & RQ_STOP)
- wait_for_completion(&rq_exit);
- obd_cleanup_client_import(obd);
- rc = mgc_llog_fini(NULL, obd);
- if (rc != 0)
- CERROR("failed to cleanup llogging subsystems\n");
- break;
+ if (atomic_dec_and_test(&mgc_count)) {
+ LASSERT(rq_state & RQ_RUNNING);
+ /* stop requeue thread */
+ temp = RQ_STOP;
+ } else {
+ /* wakeup requeue thread to clean our cld */
+ temp = RQ_NOW | RQ_PRECLEANUP;
}
+
+ spin_lock(&config_list_lock);
+ rq_state |= temp;
+ spin_unlock(&config_list_lock);
+ wake_up(&rq_waitq);
+
+ if (temp & RQ_STOP)
+ wait_for_completion(&rq_exit);
+ obd_cleanup_client_import(obd);
+
+ rc = mgc_llog_fini(NULL, obd);
+ if (rc)
+ CERROR("failed to cleanup llogging subsystems\n");
+
return rc;
}
@@ -887,8 +887,8 @@ static int mgc_set_mgs_param(struct obd_export *exp,
}
/* Take a config lock so we can get cancel notifications */
-static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+static int mgc_enqueue(struct obd_export *exp, __u32 type,
+ union ldlm_policy_data *policy, __u32 mode,
__u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
@@ -1059,8 +1059,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
}
static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *unused)
+ __u32 keylen, void *key, __u32 *vallen, void *val)
{
int rc = -EINVAL;
@@ -1387,15 +1386,17 @@ again:
body->mcb_units = nrpages;
/* allocate bulk transfer descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
- MGS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
+ PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+ MGS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (!desc) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
+ desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
@@ -1553,14 +1554,52 @@ out_free:
return rc;
}
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Copy the log locally before parsing it if appropriate (non-MGS server)
+static bool mgc_import_in_recovery(struct obd_import *imp)
+{
+ bool in_recovery = true;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL ||
+ imp->imp_state == LUSTRE_IMP_CLOSED)
+ in_recovery = false;
+ spin_unlock(&imp->imp_lock);
+
+ return in_recovery;
+}
+
+/**
+ * Get a configuration log from the MGS and process it.
+ *
+ * This function is called for both clients and servers to process the
+ * configuration log from the MGS. The MGC enqueues a DLM lock on the
+ * log from the MGS, and if the lock gets revoked the MGC will be notified
+ * by the lock cancellation callback that the config log has changed,
+ * and will enqueue another MGS lock on it, and then continue processing
+ * the new additions to the end of the log.
+ *
+ * Since the MGC import is not replayable, if the import is being evicted
+ * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process
+ * the log until recovery is finished or the import is closed.
+ *
+ * Make a local copy of the log before parsing it if appropriate (non-MGS
+ * server) so that the server can start even when the MGS is down.
+ *
+ * There shouldn't be multiple processes running process_log at once --
+ * sounds like badness. It actually might be fine, as long as they're not
+ * trying to update from the same log simultaneously, in which case we
+ * should use a per-log semaphore instead of cld_lock.
+ *
+ * \param[in] mgc MGC device by which to fetch the configuration log
+ * \param[in] cld log processing state (stored in lock callback data)
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
*/
int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
{
struct lustre_handle lockh = { 0 };
__u64 flags = LDLM_FL_NO_LRU;
+ bool retry = false;
int rc = 0, rcl;
LASSERT(cld);
@@ -1570,6 +1609,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
* we're not trying to update from the same log
* simultaneously (in which case we should use a per-log sem.)
*/
+restart:
mutex_lock(&cld->cld_lock);
if (cld->cld_stopping) {
mutex_unlock(&cld->cld_lock);
@@ -1582,7 +1622,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
/* Get the cfg lock on the llog */
- rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
+ rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL,
LCK_CR, &flags, NULL, NULL, NULL,
cld, 0, NULL, &lockh);
if (rcl == 0) {
@@ -1593,18 +1633,57 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
} else {
CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
- /* mark cld_lostlock so that it will requeue
- * after MGC becomes available.
- */
- cld->cld_lostlock = 1;
+ if (rcl == -ESHUTDOWN &&
+ atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
+ int secs = cfs_time_seconds(obd_timeout);
+ struct obd_import *imp;
+ struct l_wait_info lwi;
+
+ mutex_unlock(&cld->cld_lock);
+ imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp);
+
+ /*
+ * Let's force the pinger, and wait the import to be
+ * connected, note: since mgc import is non-replayable,
+ * and even the import state is disconnected, it does
+ * not mean the "recovery" is stopped, so we will keep
+ * waitting until timeout or the import state is
+ * FULL or closed
+ */
+ ptlrpc_pinger_force(imp);
+
+ lwi = LWI_TIMEOUT(secs, NULL, NULL);
+ l_wait_event(imp->imp_recovery_waitq,
+ !mgc_import_in_recovery(imp), &lwi);
+
+ if (imp->imp_state == LUSTRE_IMP_FULL) {
+ retry = true;
+ goto restart;
+ } else {
+ mutex_lock(&cld->cld_lock);
+ cld->cld_lostlock = 1;
+ }
+ } else {
+ /* mark cld_lostlock so that it will requeue
+ * after MGC becomes available.
+ */
+ cld->cld_lostlock = 1;
+ }
/* Get extra reference, it will be put in requeue thread */
config_log_get(cld);
}
if (cld_is_recover(cld)) {
rc = 0; /* this is not a fatal error for recover log */
- if (rcl == 0)
+ if (!rcl) {
rc = mgc_process_recover_log(mgc, cld);
+ if (rc) {
+ CERROR("%s: recover log %s failed: rc = %d not fatal.\n",
+ mgc->obd_name, cld->cld_logname, rc);
+ rc = 0;
+ cld->cld_lostlock = 1;
+ }
+ }
} else {
rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index b42e109b30e0..af570c0db15b 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += obdclass.o
-obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
+obdclass-y := linux/linux-module.o linux/linux-sysctl.o \
llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index e866754a42d5..7b403fbd5f94 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -50,25 +50,6 @@ enum clt_nesting_level {
};
/**
- * Counters used to check correctness of cl_lock interface usage.
- */
-struct cl_thread_counters {
- /**
- * Number of outstanding calls to cl_lock_mutex_get() made by the
- * current thread. For debugging.
- */
- int ctc_nr_locks_locked;
- /** List of locked locks. */
- struct lu_ref ctc_locks_locked;
- /** Number of outstanding holds on locks. */
- int ctc_nr_held;
- /** Number of outstanding uses on locks. */
- int ctc_nr_used;
- /** Number of held extent locks. */
- int ctc_nr_locks_acquired;
-};
-
-/**
* Thread local state internal for generic cl-code.
*/
struct cl_thread_info {
@@ -83,10 +64,6 @@ struct cl_thread_info {
*/
struct cl_lock_descr clt_descr;
struct cl_page_list clt_list;
- /**
- * Counters for every level of lock nesting.
- */
- struct cl_thread_counters clt_counters[CNL_NR];
/** @} debugging */
/*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index bc4b7b6b9a20..3f42457b0d7d 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -126,6 +126,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
switch (io->ci_type) {
case CIT_READ:
case CIT_WRITE:
+ case CIT_DATA_VERSION:
break;
case CIT_FAULT:
break;
@@ -411,7 +412,6 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
}
io->ci_state = CIS_UNLOCKED;
- LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
}
EXPORT_SYMBOL(cl_io_unlock);
@@ -586,67 +586,32 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
}
EXPORT_SYMBOL(cl_io_end);
-static const struct cl_page_slice *
-cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
- LINVRNT(slice);
- return slice;
-}
-
/**
- * Called by read io, when page has to be read from the server.
+ * Called by read io, to decide the readahead extent
*
- * \see cl_io_operations::cio_read_page()
+ * \see cl_io_operations::cio_read_ahead()
*/
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+ pgoff_t start, struct cl_read_ahead *ra)
{
const struct cl_io_slice *scan;
- struct cl_2queue *queue;
int result = 0;
LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
- LINVRNT(cl_page_is_owned(page, io));
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
LINVRNT(cl_io_invariant(io));
- queue = &io->ci_queue;
-
- cl_2queue_init(queue);
- /*
- * ->cio_read_page() methods called in the loop below are supposed to
- * never block waiting for network (the only subtle point is the
- * creation of new pages for read-ahead that might result in cache
- * shrinking, but currently only clean pages are shrunk and this
- * requires no network io).
- *
- * Should this ever starts blocking, retry loop would be needed for
- * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
- */
cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_read_page) {
- const struct cl_page_slice *slice;
+ if (!scan->cis_iop->cio_read_ahead)
+ continue;
- slice = cl_io_slice_page(scan, page);
- LINVRNT(slice);
- result = scan->cis_iop->cio_read_page(env, scan, slice);
- if (result != 0)
- break;
- }
+ result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
+ if (result)
+ break;
}
- if (result == 0 && queue->c2_qin.pl_nr > 0)
- result = cl_io_submit_rw(env, io, CRT_READ, queue);
- /*
- * Unlock unsent pages in case of error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
- return result;
+ return result > 0 ? 0 : result;
}
-EXPORT_SYMBOL(cl_io_read_page);
+EXPORT_SYMBOL(cl_io_read_ahead);
/**
* Commit a list of contiguous pages into writeback cache.
@@ -1080,235 +1045,18 @@ struct cl_io *cl_io_top(struct cl_io *io)
EXPORT_SYMBOL(cl_io_top);
/**
- * Adds request slice to the compound request.
- *
- * This is called by cl_device_operations::cdo_req_init() methods to add a
- * per-layer state to the request. New state is added at the end of
- * cl_req::crq_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
- */
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
- struct cl_device *dev,
- const struct cl_req_operations *ops)
-{
- list_add_tail(&slice->crs_linkage, &req->crq_layers);
- slice->crs_dev = dev;
- slice->crs_ops = ops;
- slice->crs_req = req;
-}
-EXPORT_SYMBOL(cl_req_slice_add);
-
-static void cl_req_free(const struct lu_env *env, struct cl_req *req)
-{
- unsigned i;
-
- LASSERT(list_empty(&req->crq_pages));
- LASSERT(req->crq_nrpages == 0);
- LINVRNT(list_empty(&req->crq_layers));
- LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));
-
- if (req->crq_o) {
- for (i = 0; i < req->crq_nrobjs; ++i) {
- struct cl_object *obj = req->crq_o[i].ro_obj;
-
- if (obj) {
- lu_object_ref_del_at(&obj->co_lu,
- &req->crq_o[i].ro_obj_ref,
- "cl_req", req);
- cl_object_put(env, obj);
- }
- }
- kfree(req->crq_o);
- }
- kfree(req);
-}
-
-static int cl_req_init(const struct lu_env *env, struct cl_req *req,
- struct cl_page *page)
-{
- struct cl_device *dev;
- struct cl_page_slice *slice;
- int result;
-
- result = 0;
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init) {
- result = dev->cd_ops->cdo_req_init(env, dev, req);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-
-/**
- * Invokes per-request transfer completion call-backs
- * (cl_req_operations::cro_completion()) bottom-to-top.
- */
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
-{
- struct cl_req_slice *slice;
-
- /*
- * for the lack of list_for_each_entry_reverse_safe()...
- */
- while (!list_empty(&req->crq_layers)) {
- slice = list_entry(req->crq_layers.prev,
- struct cl_req_slice, crs_linkage);
- list_del_init(&slice->crs_linkage);
- if (slice->crs_ops->cro_completion)
- slice->crs_ops->cro_completion(env, slice, rc);
- }
- cl_req_free(env, req);
-}
-EXPORT_SYMBOL(cl_req_completion);
-
-/**
- * Allocates new transfer request.
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
- enum cl_req_type crt, int nr_objects)
-{
- struct cl_req *req;
-
- LINVRNT(nr_objects > 0);
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req) {
- int result;
-
- req->crq_type = crt;
- INIT_LIST_HEAD(&req->crq_pages);
- INIT_LIST_HEAD(&req->crq_layers);
-
- req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
- GFP_NOFS);
- if (req->crq_o) {
- req->crq_nrobjs = nr_objects;
- result = cl_req_init(env, req, page);
- } else {
- result = -ENOMEM;
- }
- if (result != 0) {
- cl_req_completion(env, req, result);
- req = ERR_PTR(result);
- }
- } else {
- req = ERR_PTR(-ENOMEM);
- }
- return req;
-}
-EXPORT_SYMBOL(cl_req_alloc);
-
-/**
- * Adds a page to a request.
- */
-void cl_req_page_add(const struct lu_env *env,
- struct cl_req *req, struct cl_page *page)
-{
- struct cl_object *obj;
- struct cl_req_obj *rqo;
- unsigned int i;
-
- LASSERT(list_empty(&page->cp_flight));
- LASSERT(!page->cp_req);
-
- CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
- req, req->crq_type, req->crq_nrpages);
-
- list_add_tail(&page->cp_flight, &req->crq_pages);
- ++req->crq_nrpages;
- page->cp_req = req;
- obj = cl_object_top(page->cp_obj);
- for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
- if (!rqo->ro_obj) {
- rqo->ro_obj = obj;
- cl_object_get(obj);
- lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
- "cl_req", req);
- break;
- }
- }
- LASSERT(i < req->crq_nrobjs);
-}
-EXPORT_SYMBOL(cl_req_page_add);
-
-/**
- * Removes a page from a request.
- */
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
-{
- struct cl_req *req = page->cp_req;
-
- LASSERT(!list_empty(&page->cp_flight));
- LASSERT(req->crq_nrpages > 0);
-
- list_del_init(&page->cp_flight);
- --req->crq_nrpages;
- page->cp_req = NULL;
-}
-EXPORT_SYMBOL(cl_req_page_done);
-
-/**
- * Notifies layers that request is about to depart by calling
- * cl_req_operations::cro_prep() top-to-bottom.
- */
-int cl_req_prep(const struct lu_env *env, struct cl_req *req)
-{
- unsigned int i;
- int result;
- const struct cl_req_slice *slice;
-
- /*
- * Check that the caller of cl_req_alloc() didn't lie about the number
- * of objects.
- */
- for (i = 0; i < req->crq_nrobjs; ++i)
- LASSERT(req->crq_o[i].ro_obj);
-
- result = 0;
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
- if (slice->crs_ops->cro_prep) {
- result = slice->crs_ops->cro_prep(env, slice);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_req_prep);
-
-/**
* Fills in attributes that are passed to server together with transfer. Only
* attributes from \a flags may be touched. This can be called multiple times
* for the same request.
*/
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags)
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
{
- const struct cl_req_slice *slice;
- struct cl_page *page;
- unsigned int i;
-
- LASSERT(!list_empty(&req->crq_pages));
-
- /* Take any page to use as a model. */
- page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
-
- for (i = 0; i < req->crq_nrobjs; ++i) {
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
- const struct cl_page_slice *scan;
- const struct cl_object *obj;
-
- scan = cl_page_at(page,
- slice->crs_dev->cd_lu_dev.ld_type);
- obj = scan->cpl_obj;
- if (slice->crs_ops->cro_attr_set)
- slice->crs_ops->cro_attr_set(env, slice, obj,
- attr + i, flags);
- }
+ struct cl_object *scan;
+
+ cl_object_for_each(scan, obj) {
+ if (scan->co_ops->coo_req_attr_set)
+ scan->co_ops->coo_req_attr_set(env, scan, attr);
}
}
EXPORT_SYMBOL(cl_req_attr_set);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 3199dd4a3b72..f5d4e23c64b7 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -335,7 +335,7 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
if (obj->co_ops->coo_getstripe) {
result = obj->co_ops->coo_getstripe(env, obj, uarg);
if (result)
- break;
+ break;
}
}
return result;
@@ -343,6 +343,67 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
EXPORT_SYMBOL(cl_object_getstripe);
/**
+ * Get fiemap extents from file object.
+ *
+ * \param env [in] lustre environment
+ * \param obj [in] file object
+ * \param key [in] fiemap request argument
+ * \param fiemap [out] fiemap extents mapping retrived
+ * \param buflen [in] max buffer length of @fiemap
+ *
+ * \retval 0 success
+ * \retval < 0 error
+ */
+int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *key,
+ struct fiemap *fiemap, size_t *buflen)
+{
+ struct lu_object_header *top;
+ int result = 0;
+
+ top = obj->co_lu.lo_header;
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_fiemap) {
+ result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
+ buflen);
+ if (result)
+ break;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_object_fiemap);
+
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *cl)
+{
+ struct lu_object_header *top = obj->co_lu.lo_header;
+
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_layout_get)
+ return obj->co_ops->coo_layout_get(env, obj, cl);
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(cl_object_layout_get);
+
+loff_t cl_object_maxbytes(struct cl_object *obj)
+{
+ struct lu_object_header *top = obj->co_lu.lo_header;
+ loff_t maxbytes = LLONG_MAX;
+
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_maxbytes)
+ maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
+ maxbytes);
+ }
+
+ return maxbytes;
+}
+EXPORT_SYMBOL(cl_object_maxbytes);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
@@ -483,36 +544,20 @@ EXPORT_SYMBOL(cl_site_stats_print);
* bz20044, bz22683.
*/
-static LIST_HEAD(cl_envs);
-static unsigned int cl_envs_cached_nr;
-static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
- * for now.
- */
-static DEFINE_SPINLOCK(cl_envs_guard);
+static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
+ * for now.
+ */
+static struct cl_env_cache {
+ rwlock_t cec_guard;
+ unsigned int cec_count;
+ struct list_head cec_envs;
+} *cl_envs = NULL;
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
struct lu_context ce_ses;
- /**
- * This allows cl_env to be entered into cl_env_hash which implements
- * the current thread -> client environment lookup.
- */
- struct hlist_node ce_node;
- /**
- * Owner for the current cl_env.
- *
- * If LL_TASK_CL_ENV is defined, this point to the owning current,
- * only for debugging purpose ;
- * Otherwise hash is used, and this is the key for cfs_hash.
- * Now current thread pid is stored. Note using thread pointer would
- * lead to unbalanced hash because of its specific allocation locality
- * and could be varied for different platforms and OSes, even different
- * OS versions.
- */
- void *ce_owner;
-
/*
* Linkage into global list of all client environments. Used for
* garbage collection.
@@ -536,122 +581,13 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
{
LASSERT(cle->ce_ref == 0);
LASSERT(cle->ce_magic == &cl_env_init0);
- LASSERT(!cle->ce_debug && !cle->ce_owner);
+ LASSERT(!cle->ce_debug);
cle->ce_ref = 1;
cle->ce_debug = debug;
CL_ENV_INC(busy);
}
-/*
- * The implementation of using hash table to connect cl_env and thread
- */
-
-static struct cfs_hash *cl_env_hash;
-
-static unsigned cl_env_hops_hash(struct cfs_hash *lh,
- const void *key, unsigned mask)
-{
-#if BITS_PER_LONG == 64
- return cfs_hash_u64_hash((__u64)key, mask);
-#else
- return cfs_hash_u32_hash((__u32)key, mask);
-#endif
-}
-
-static void *cl_env_hops_obj(struct hlist_node *hn)
-{
- struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
-
- LASSERT(cle->ce_magic == &cl_env_init0);
- return (void *)cle;
-}
-
-static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
-{
- struct cl_env *cle = cl_env_hops_obj(hn);
-
- LASSERT(cle->ce_owner);
- return (key == cle->ce_owner);
-}
-
-static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
-{
- struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
-
- LASSERT(cle->ce_magic == &cl_env_init0);
-}
-
-static struct cfs_hash_ops cl_env_hops = {
- .hs_hash = cl_env_hops_hash,
- .hs_key = cl_env_hops_obj,
- .hs_keycmp = cl_env_hops_keycmp,
- .hs_object = cl_env_hops_obj,
- .hs_get = cl_env_hops_noop,
- .hs_put_locked = cl_env_hops_noop,
-};
-
-static inline struct cl_env *cl_env_fetch(void)
-{
- struct cl_env *cle;
-
- cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
- LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
- return cle;
-}
-
-static inline void cl_env_attach(struct cl_env *cle)
-{
- if (cle) {
- int rc;
-
- LASSERT(!cle->ce_owner);
- cle->ce_owner = (void *)(long)current->pid;
- rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(rc == 0);
- }
-}
-
-static inline void cl_env_do_detach(struct cl_env *cle)
-{
- void *cookie;
-
- LASSERT(cle->ce_owner == (void *)(long)current->pid);
- cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(cookie == cle);
- cle->ce_owner = NULL;
-}
-
-static int cl_env_store_init(void)
-{
- cl_env_hash = cfs_hash_create("cl_env",
- HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
- HASH_CL_ENV_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &cl_env_hops,
- CFS_HASH_RW_BKTLOCK);
- return cl_env_hash ? 0 : -ENOMEM;
-}
-
-static void cl_env_store_fini(void)
-{
- cfs_hash_putref(cl_env_hash);
-}
-
-static inline struct cl_env *cl_env_detach(struct cl_env *cle)
-{
- if (!cle)
- cle = cl_env_fetch();
-
- if (cle && cle->ce_owner)
- cl_env_do_detach(cle);
-
- return cle;
-}
-
static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
{
struct lu_env *env;
@@ -701,16 +637,20 @@ static struct lu_env *cl_env_obtain(void *debug)
{
struct cl_env *cle;
struct lu_env *env;
+ int cpu = get_cpu();
- spin_lock(&cl_envs_guard);
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
- if (cl_envs_cached_nr > 0) {
+ read_lock(&cl_envs[cpu].cec_guard);
+ LASSERT(equi(cl_envs[cpu].cec_count == 0,
+ list_empty(&cl_envs[cpu].cec_envs)));
+ if (cl_envs[cpu].cec_count > 0) {
int rc;
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
+ ce_linkage);
list_del_init(&cle->ce_linkage);
- cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ cl_envs[cpu].cec_count--;
+ read_unlock(&cl_envs[cpu].cec_guard);
+ put_cpu();
env = &cle->ce_lu;
rc = lu_env_refill(env);
@@ -723,7 +663,8 @@ static struct lu_env *cl_env_obtain(void *debug)
env = ERR_PTR(rc);
}
} else {
- spin_unlock(&cl_envs_guard);
+ read_unlock(&cl_envs[cpu].cec_guard);
+ put_cpu();
env = cl_env_new(lu_context_tags_default,
lu_session_tags_default, debug);
}
@@ -735,27 +676,6 @@ static inline struct cl_env *cl_env_container(struct lu_env *env)
return container_of(env, struct cl_env, ce_lu);
}
-static struct lu_env *cl_env_peek(int *refcheck)
-{
- struct lu_env *env;
- struct cl_env *cle;
-
- CL_ENV_INC(lookup);
-
- /* check that we don't go far from untrusted pointer */
- CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
-
- env = NULL;
- cle = cl_env_fetch();
- if (cle) {
- CL_ENV_INC(hit);
- env = &cle->ce_lu;
- *refcheck = ++cle->ce_ref;
- }
- CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
- return env;
-}
-
/**
* Returns lu_env: if there already is an environment associated with the
* current thread, it is returned, otherwise, new environment is allocated.
@@ -773,17 +693,13 @@ struct lu_env *cl_env_get(int *refcheck)
{
struct lu_env *env;
- env = cl_env_peek(refcheck);
- if (!env) {
- env = cl_env_obtain(__builtin_return_address(0));
- if (!IS_ERR(env)) {
- struct cl_env *cle;
+ env = cl_env_obtain(__builtin_return_address(0));
+ if (!IS_ERR(env)) {
+ struct cl_env *cle;
- cle = cl_env_container(env);
- cl_env_attach(cle);
- *refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- }
+ cle = cl_env_container(env);
+ *refcheck = cle->ce_ref;
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
}
return env;
}
@@ -798,7 +714,6 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
{
struct lu_env *env;
- LASSERT(!cl_env_peek(refcheck));
env = cl_env_new(tags, tags, __builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
@@ -813,7 +728,6 @@ EXPORT_SYMBOL(cl_env_alloc);
static void cl_env_exit(struct cl_env *cle)
{
- LASSERT(!cle->ce_owner);
lu_context_exit(&cle->ce_lu.le_ctx);
lu_context_exit(&cle->ce_ses);
}
@@ -826,20 +740,25 @@ static void cl_env_exit(struct cl_env *cle)
unsigned int cl_env_cache_purge(unsigned int nr)
{
struct cl_env *cle;
+ unsigned int i;
- spin_lock(&cl_envs_guard);
- for (; !list_empty(&cl_envs) && nr > 0; --nr) {
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- list_del_init(&cle->ce_linkage);
- LASSERT(cl_envs_cached_nr > 0);
- cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ for_each_possible_cpu(i) {
+ write_lock(&cl_envs[i].cec_guard);
+ for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs[i].cec_envs.next,
+ struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs[i].cec_count > 0);
+ cl_envs[i].cec_count--;
+ write_unlock(&cl_envs[i].cec_guard);
- cl_env_fini(cle);
- spin_lock(&cl_envs_guard);
+ cl_env_fini(cle);
+ write_lock(&cl_envs[i].cec_guard);
+ }
+ LASSERT(equi(cl_envs[i].cec_count == 0,
+ list_empty(&cl_envs[i].cec_envs)));
+ write_unlock(&cl_envs[i].cec_guard);
}
- LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
- spin_unlock(&cl_envs_guard);
return nr;
}
EXPORT_SYMBOL(cl_env_cache_purge);
@@ -862,8 +781,9 @@ void cl_env_put(struct lu_env *env, int *refcheck)
CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
if (--cle->ce_ref == 0) {
+ int cpu = get_cpu();
+
CL_ENV_DEC(busy);
- cl_env_detach(cle);
cle->ce_debug = NULL;
cl_env_exit(cle);
/*
@@ -872,107 +792,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
* Return environment to the cache only when it was allocated
* with the standard tags.
*/
- if (cl_envs_cached_nr < cl_envs_cached_max &&
+ if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
(env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
(env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
- spin_lock(&cl_envs_guard);
- list_add(&cle->ce_linkage, &cl_envs);
- cl_envs_cached_nr++;
- spin_unlock(&cl_envs_guard);
+ read_lock(&cl_envs[cpu].cec_guard);
+ list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
+ cl_envs[cpu].cec_count++;
+ read_unlock(&cl_envs[cpu].cec_guard);
} else {
cl_env_fini(cle);
}
+ put_cpu();
}
}
EXPORT_SYMBOL(cl_env_put);
/**
- * Declares a point of re-entrancy.
- *
- * \see cl_env_reexit()
- */
-void *cl_env_reenter(void)
-{
- return cl_env_detach(NULL);
-}
-EXPORT_SYMBOL(cl_env_reenter);
-
-/**
- * Exits re-entrancy.
- */
-void cl_env_reexit(void *cookie)
-{
- cl_env_detach(NULL);
- cl_env_attach(cookie);
-}
-EXPORT_SYMBOL(cl_env_reexit);
-
-/**
- * Setup user-supplied \a env as a current environment. This is to be used to
- * guaranteed that environment exists even when cl_env_get() fails. It is up
- * to user to ensure proper concurrency control.
- *
- * \see cl_env_unplant()
- */
-void cl_env_implant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 0);
-
- cl_env_attach(cle);
- cl_env_get(refcheck);
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-}
-EXPORT_SYMBOL(cl_env_implant);
-
-/**
- * Detach environment installed earlier by cl_env_implant().
- */
-void cl_env_unplant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 1);
-
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-
- cl_env_detach(cle);
- cl_env_put(env, refcheck);
-}
-EXPORT_SYMBOL(cl_env_unplant);
-
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
-{
- struct lu_env *env;
-
- nest->cen_cookie = NULL;
- env = cl_env_peek(&nest->cen_refcheck);
- if (env) {
- if (!cl_io_is_going(env))
- return env;
- cl_env_put(env, &nest->cen_refcheck);
- nest->cen_cookie = cl_env_reenter();
- }
- env = cl_env_get(&nest->cen_refcheck);
- if (IS_ERR(env)) {
- cl_env_reexit(nest->cen_cookie);
- return env;
- }
-
- LASSERT(!cl_io_is_going(env));
- return env;
-}
-EXPORT_SYMBOL(cl_env_nested_get);
-
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
-{
- cl_env_put(env, &nest->cen_refcheck);
- cl_env_reexit(nest->cen_cookie);
-}
-EXPORT_SYMBOL(cl_env_nested_put);
-
-/**
* Converts struct ost_lvb to struct cl_attr.
*
* \see cl_attr2lvb
@@ -999,6 +834,10 @@ static int cl_env_percpu_init(void)
for_each_possible_cpu(i) {
struct lu_env *env;
+ rwlock_init(&cl_envs[i].cec_guard);
+ INIT_LIST_HEAD(&cl_envs[i].cec_envs);
+ cl_envs[i].cec_count = 0;
+
cle = &cl_env_percpu[i];
env = &cle->ce_lu;
@@ -1066,7 +905,6 @@ void cl_env_percpu_put(struct lu_env *env)
LASSERT(cle->ce_ref == 0);
CL_ENV_DEC(busy);
- cl_env_detach(cle);
cle->ce_debug = NULL;
put_cpu();
@@ -1080,7 +918,6 @@ struct lu_env *cl_env_percpu_get(void)
cle = &cl_env_percpu[get_cpu()];
cl_env_init0(cle, __builtin_return_address(0));
- cl_env_attach(cle);
return &cle->ce_lu;
}
EXPORT_SYMBOL(cl_env_percpu_get);
@@ -1144,51 +981,19 @@ LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
static void *cl_key_init(const struct lu_context *ctx,
struct lu_context_key *key)
{
- struct cl_thread_info *info;
-
- info = cl0_key_init(ctx, key);
- if (!IS_ERR(info)) {
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
- return info;
+ return cl0_key_init(ctx, key);
}
static void cl_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
- struct cl_thread_info *info;
- size_t i;
-
- info = data;
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
cl0_key_fini(ctx, key, data);
}
-static void cl_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct cl_thread_info *info = data;
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
- LASSERT(info->clt_counters[i].ctc_nr_held == 0);
- LASSERT(info->clt_counters[i].ctc_nr_used == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
-}
-
static struct lu_context_key cl_key = {
.lct_tags = LCT_CL_THREAD,
.lct_init = cl_key_init,
.lct_fini = cl_key_fini,
- .lct_exit = cl_key_exit
};
static struct lu_kmem_descr cl_object_caches[] = {
@@ -1212,13 +1017,15 @@ int cl_global_init(void)
{
int result;
- result = cl_env_store_init();
- if (result)
- return result;
+ cl_envs = kzalloc(sizeof(*cl_envs) * num_possible_cpus(), GFP_KERNEL);
+ if (!cl_envs) {
+ result = -ENOMEM;
+ goto out;
+ }
result = lu_kmem_init(cl_object_caches);
if (result)
- goto out_store;
+ goto out_envs;
LU_CONTEXT_KEY_INIT(&cl_key);
result = lu_context_key_register(&cl_key);
@@ -1228,16 +1035,17 @@ int cl_global_init(void)
result = cl_env_percpu_init();
if (result)
/* no cl_env_percpu_fini on error */
- goto out_context;
+ goto out_keys;
return 0;
-out_context:
+out_keys:
lu_context_key_degister(&cl_key);
out_kmem:
lu_kmem_fini(cl_object_caches);
-out_store:
- cl_env_store_fini();
+out_envs:
+ kfree(cl_envs);
+out:
return result;
}
@@ -1249,5 +1057,5 @@ void cl_global_fini(void)
cl_env_percpu_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
- cl_env_store_fini();
+ kfree(cl_envs);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 63973ba096da..cd9a40ca4448 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -99,7 +99,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, !page->cp_owner);
- PASSERT(env, page, !page->cp_req);
PASSERT(env, page, page->cp_state == CPS_FREEING);
while (!list_empty(&page->cp_layers)) {
@@ -150,7 +149,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
page->cp_type = type;
INIT_LIST_HEAD(&page->cp_layers);
INIT_LIST_HEAD(&page->cp_batch);
- INIT_LIST_HEAD(&page->cp_flight);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
@@ -390,30 +388,6 @@ EXPORT_SYMBOL(cl_page_at);
__result; \
})
-#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
- \
- __result = 0; \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method) { \
- __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- if (__result > 0) \
- __result = 0; \
- __result; \
-})
-
#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
do { \
const struct lu_env *__env = (_env); \
@@ -552,7 +526,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
io, nonblock);
if (result == 0) {
PASSERT(env, pg, !pg->cp_owner);
- PASSERT(env, pg, !pg->cp_req);
pg->cp_owner = cl_io_top(io);
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
@@ -694,7 +667,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
/*
- * Severe all ways to obtain new pointers to @pg.
+ * Sever all ways to obtain new pointers to @pg.
*/
cl_page_owner_clear(pg);
@@ -845,8 +818,6 @@ void cl_page_completion(const struct lu_env *env,
struct cl_sync_io *anchor = pg->cp_sync_io;
PASSERT(env, pg, crt < CRT_NR);
- /* cl_page::cp_req already cleared by the caller (osc_completion()) */
- PASSERT(env, pg, !pg->cp_req);
PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
@@ -860,16 +831,8 @@ void cl_page_completion(const struct lu_env *env,
if (anchor) {
LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
- }
- /*
- * As page->cp_obj is pinned by a reference from page->cp_req, it is
- * safe to call cl_page_put() without risking object destruction in a
- * non-blocking context.
- */
- cl_page_put(env, pg);
-
- if (anchor)
cl_sync_io_note(env, anchor, ioret);
+ }
}
EXPORT_SYMBOL(cl_page_completion);
@@ -927,29 +890,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_page_flush);
/**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, pgoff_t *max_index)
-{
- int rc;
-
- PINVRNT(env, page, cl_page_invariant(page));
-
- rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *,
- struct cl_io *, pgoff_t *),
- io, max_index);
- return rc;
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-/**
* Tells transfer engine that only part of a page is to be transmitted.
*
* \see cl_page_operations::cpo_clip()
@@ -974,10 +914,10 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p %d %d %p %p]\n",
+ "page@%p[%d %p %d %d %p]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
pg->cp_state, pg->cp_type,
- pg->cp_owner, pg->cp_req);
+ pg->cp_owner);
}
EXPORT_SYMBOL(cl_page_header_print);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf8bb2a2f40b..fa0d38ddccb2 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -907,6 +907,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
INIT_LIST_HEAD(&imp->imp_sending_list);
INIT_LIST_HEAD(&imp->imp_delayed_list);
INIT_LIST_HEAD(&imp->imp_committed_list);
+ INIT_LIST_HEAD(&imp->imp_unreplied_list);
+ imp->imp_known_replied_xid = 0;
imp->imp_replay_cursor = &imp->imp_committed_list;
spin_lock_init(&imp->imp_lock);
imp->imp_last_success_conn = 0;
@@ -1408,13 +1410,33 @@ EXPORT_SYMBOL(obd_get_max_rpcs_in_flight);
int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
{
struct obd_request_slot_waiter *orsw;
+ const char *typ_name;
__u32 old;
int diff;
+ int rc;
int i;
if (max > OBD_MAX_RIF_MAX || max < 1)
return -ERANGE;
+ typ_name = cli->cl_import->imp_obd->obd_type->typ_name;
+ if (!strcmp(typ_name, LUSTRE_MDC_NAME)) {
+ /*
+ * adjust max_mod_rpcs_in_flight to ensure it is always
+ * strictly lower that max_rpcs_in_flight
+ */
+ if (max < 2) {
+ CERROR("%s: cannot set max_rpcs_in_flight to 1 because it must be higher than max_mod_rpcs_in_flight value\n",
+ cli->cl_import->imp_obd->obd_name);
+ return -ERANGE;
+ }
+ if (max <= cli->cl_max_mod_rpcs_in_flight) {
+ rc = obd_set_max_mod_rpcs_in_flight(cli, max - 1);
+ if (rc)
+ return rc;
+ }
+ }
+
spin_lock(&cli->cl_loi_list_lock);
old = cli->cl_max_rpcs_in_flight;
cli->cl_max_rpcs_in_flight = max;
@@ -1436,3 +1458,209 @@ int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
return 0;
}
EXPORT_SYMBOL(obd_set_max_rpcs_in_flight);
+
+int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, __u16 max)
+{
+ struct obd_connect_data *ocd;
+ u16 maxmodrpcs;
+ u16 prev;
+
+ if (max > OBD_MAX_RIF_MAX || max < 1)
+ return -ERANGE;
+
+ /* cannot exceed or equal max_rpcs_in_flight */
+ if (max >= cli->cl_max_rpcs_in_flight) {
+ CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher or equal to max_rpcs_in_flight value (%u)\n",
+ cli->cl_import->imp_obd->obd_name,
+ max, cli->cl_max_rpcs_in_flight);
+ return -ERANGE;
+ }
+
+ /* cannot exceed max modify RPCs in flight supported by the server */
+ ocd = &cli->cl_import->imp_connect_data;
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+ maxmodrpcs = ocd->ocd_maxmodrpcs;
+ else
+ maxmodrpcs = 1;
+ if (max > maxmodrpcs) {
+ CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher than max_mod_rpcs_per_client value (%hu) returned by the server at connection\n",
+ cli->cl_import->imp_obd->obd_name,
+ max, maxmodrpcs);
+ return -ERANGE;
+ }
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+
+ prev = cli->cl_max_mod_rpcs_in_flight;
+ cli->cl_max_mod_rpcs_in_flight = max;
+
+ /* wakeup waiters if limit has been increased */
+ if (cli->cl_max_mod_rpcs_in_flight > prev)
+ wake_up(&cli->cl_mod_rpcs_waitq);
+
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(obd_set_max_mod_rpcs_in_flight);
+
+#define pct(a, b) (b ? (a * 100) / b : 0)
+
+int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq)
+{
+ unsigned long mod_tot = 0, mod_cum;
+ struct timespec64 now;
+ int i;
+
+ ktime_get_real_ts64(&now);
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+
+ seq_printf(seq, "snapshot_time: %llu.%9lu (secs.nsecs)\n",
+ (s64)now.tv_sec, (unsigned long)now.tv_nsec);
+ seq_printf(seq, "modify_RPCs_in_flight: %hu\n",
+ cli->cl_mod_rpcs_in_flight);
+
+ seq_puts(seq, "\n\t\t\tmodify\n");
+ seq_puts(seq, "rpcs in flight rpcs %% cum %%\n");
+
+ mod_tot = lprocfs_oh_sum(&cli->cl_mod_rpcs_hist);
+
+ mod_cum = 0;
+ for (i = 0; i < OBD_HIST_MAX; i++) {
+ unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i];
+
+ mod_cum += mod;
+ seq_printf(seq, "%d:\t\t%10lu %3lu %3lu\n",
+ i, mod, pct(mod, mod_tot),
+ pct(mod_cum, mod_tot));
+ if (mod_cum == mod_tot)
+ break;
+ }
+
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(obd_mod_rpc_stats_seq_show);
+#undef pct
+
+/*
+ * The number of modify RPCs sent in parallel is limited
+ * because the server has a finite number of slots per client to
+ * store request result and ensure reply reconstruction when needed.
+ * On the client, this limit is stored in cl_max_mod_rpcs_in_flight
+ * that takes into account server limit and cl_max_rpcs_in_flight
+ * value.
+ * On the MDC client, to avoid a potential deadlock (see Bugzilla 3462),
+ * one close request is allowed above the maximum.
+ */
+static inline bool obd_mod_rpc_slot_avail_locked(struct client_obd *cli,
+ bool close_req)
+{
+ bool avail;
+
+ /* A slot is available if
+ * - number of modify RPCs in flight is less than the max
+ * - it's a close RPC and no other close request is in flight
+ */
+ avail = cli->cl_mod_rpcs_in_flight < cli->cl_max_mod_rpcs_in_flight ||
+ (close_req && !cli->cl_close_rpcs_in_flight);
+
+ return avail;
+}
+
+static inline bool obd_mod_rpc_slot_avail(struct client_obd *cli,
+ bool close_req)
+{
+ bool avail;
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+ avail = obd_mod_rpc_slot_avail_locked(cli, close_req);
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+ return avail;
+}
+
+/* Get a modify RPC slot from the obd client @cli according
+ * to the kind of operation @opc that is going to be sent
+ * and the intent @it of the operation if it applies.
+ * If the maximum number of modify RPCs in flight is reached
+ * the thread is put to sleep.
+ * Returns the tag to be set in the request message. Tag 0
+ * is reserved for non-modifying requests.
+ */
+u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc,
+ struct lookup_intent *it)
+{
+ struct l_wait_info lwi = LWI_INTR(NULL, NULL);
+ bool close_req = false;
+ u16 i, max;
+
+ /* read-only metadata RPCs don't consume a slot on MDT
+ * for reply reconstruction
+ */
+ if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
+ return 0;
+
+ if (opc == MDS_CLOSE)
+ close_req = true;
+
+ do {
+ spin_lock(&cli->cl_mod_rpcs_lock);
+ max = cli->cl_max_mod_rpcs_in_flight;
+ if (obd_mod_rpc_slot_avail_locked(cli, close_req)) {
+ /* there is a slot available */
+ cli->cl_mod_rpcs_in_flight++;
+ if (close_req)
+ cli->cl_close_rpcs_in_flight++;
+ lprocfs_oh_tally(&cli->cl_mod_rpcs_hist,
+ cli->cl_mod_rpcs_in_flight);
+ /* find a free tag */
+ i = find_first_zero_bit(cli->cl_mod_tag_bitmap,
+ max + 1);
+ LASSERT(i < OBD_MAX_RIF_MAX);
+ LASSERT(!test_and_set_bit(i, cli->cl_mod_tag_bitmap));
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+ /* tag 0 is reserved for non-modify RPCs */
+ return i + 1;
+ }
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+
+ CDEBUG(D_RPCTRACE, "%s: sleeping for a modify RPC slot opc %u, max %hu\n",
+ cli->cl_import->imp_obd->obd_name, opc, max);
+
+ l_wait_event(cli->cl_mod_rpcs_waitq,
+ obd_mod_rpc_slot_avail(cli, close_req), &lwi);
+ } while (true);
+}
+EXPORT_SYMBOL(obd_get_mod_rpc_slot);
+
+/*
+ * Put a modify RPC slot from the obd client @cli according
+ * to the kind of operation @opc that has been sent and the
+ * intent @it of the operation if it applies.
+ */
+void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
+ struct lookup_intent *it, u16 tag)
+{
+ bool close_req = false;
+
+ if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
+ return;
+
+ if (opc == MDS_CLOSE)
+ close_req = true;
+
+ spin_lock(&cli->cl_mod_rpcs_lock);
+ cli->cl_mod_rpcs_in_flight--;
+ if (close_req)
+ cli->cl_close_rpcs_in_flight--;
+ /* release the tag in the bitmap */
+ LASSERT(tag - 1 < OBD_MAX_RIF_MAX);
+ LASSERT(test_and_clear_bit(tag - 1, cli->cl_mod_tag_bitmap) != 0);
+ spin_unlock(&cli->cl_mod_rpcs_lock);
+ wake_up(&cli->cl_mod_rpcs_waitq);
+}
+EXPORT_SYMBOL(obd_put_mod_rpc_slot);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index be09e04b042f..9f5e8299d7e4 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -217,8 +217,8 @@ static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr,
return sprintf(buf, "%s\n", "on");
}
-static ssize_t health_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
+static ssize_t
+health_check_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
bool healthy = true;
int i;
@@ -311,14 +311,14 @@ EXPORT_SYMBOL_GPL(debugfs_lustre_root);
LUSTRE_RO_ATTR(version);
LUSTRE_RO_ATTR(pinger);
-LUSTRE_RO_ATTR(health);
+LUSTRE_RO_ATTR(health_check);
LUSTRE_RW_ATTR(jobid_var);
LUSTRE_RW_ATTR(jobid_name);
static struct attribute *lustre_attrs[] = {
&lustre_attr_version.attr,
&lustre_attr_pinger.attr,
- &lustre_attr_health.attr,
+ &lustre_attr_health_check.attr,
&lustre_attr_jobid_name.attr,
&lustre_attr_jobid_var.attr,
NULL,
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
deleted file mode 100644
index 41b77a30feb3..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/linux/linux-obdo.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/module.h>
-#include "../../include/obd_class.h"
-#include "../../include/lustre/lustre_idl.h"
-
-#include <linux/fs.h>
-
-void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid)
-{
- valid &= src->o_valid;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE,
- "valid %#llx, cur time %lu/%lu, new %llu/%llu\n",
- src->o_valid, LTIME_S(dst->i_mtime),
- LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime);
-
- if (valid & OBD_MD_FLATIME && src->o_atime > LTIME_S(dst->i_atime))
- LTIME_S(dst->i_atime) = src->o_atime;
- if (valid & OBD_MD_FLMTIME && src->o_mtime > LTIME_S(dst->i_mtime))
- LTIME_S(dst->i_mtime) = src->o_mtime;
- if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime))
- LTIME_S(dst->i_ctime) = src->o_ctime;
- if (valid & OBD_MD_FLSIZE)
- i_size_write(dst, src->o_size);
- /* optimum IO size */
- if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
- dst->i_blkbits = ffs(src->o_blksize) - 1;
-
- if (dst->i_blkbits < PAGE_SHIFT)
- dst->i_blkbits = PAGE_SHIFT;
-
- /* allocation of space */
- if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
- /*
- * XXX shouldn't overflow be checked here like in
- * obdo_to_inode().
- */
- dst->i_blocks = src->o_blocks;
-}
-EXPORT_SYMBOL(obdo_refresh_inode);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 43797f106745..736ea1067c93 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -43,8 +43,9 @@
#define DEBUG_SUBSYSTEM S_LOG
-#include "../include/obd_class.h"
+#include "../include/llog_swab.h"
#include "../include/lustre_log.h"
+#include "../include/obd_class.h"
#include "llog_internal.h"
/*
@@ -80,8 +81,7 @@ static void llog_free_handle(struct llog_handle *loghandle)
LASSERT(list_empty(&loghandle->u.phd.phd_entry));
else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
LASSERT(list_empty(&loghandle->u.chd.chd_head));
- LASSERT(sizeof(*loghandle->lgh_hdr) == LLOG_CHUNK_SIZE);
- kfree(loghandle->lgh_hdr);
+ kvfree(loghandle->lgh_hdr);
out:
kfree(loghandle);
}
@@ -115,20 +115,29 @@ static int llog_read_header(const struct lu_env *env,
rc = lop->lop_read_header(env, handle);
if (rc == LLOG_EEMPTY) {
struct llog_log_hdr *llh = handle->lgh_hdr;
+ size_t len;
+ /* lrh_len should be initialized in llog_init_handle */
handle->lgh_last_idx = 0; /* header is record with index 0 */
llh->llh_count = 1; /* for the header record */
llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
- llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE;
- llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+ LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE);
+ llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size;
llh->llh_hdr.lrh_index = 0;
- llh->llh_tail.lrt_index = 0;
llh->llh_timestamp = ktime_get_real_seconds();
if (uuid)
memcpy(&llh->llh_tgtuuid, uuid,
sizeof(llh->llh_tgtuuid));
llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
- ext2_set_bit(0, llh->llh_bitmap);
+ /*
+ * Since update llog header might also call this function,
+ * let's reset the bitmap to 0 here
+ */
+ len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset;
+ memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail));
+ ext2_set_bit(0, LLOG_HDR_BITMAP(llh));
+ LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len;
+ LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index;
rc = 0;
}
return rc;
@@ -137,16 +146,19 @@ static int llog_read_header(const struct lu_env *env,
int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
int flags, struct obd_uuid *uuid)
{
+ int chunk_size = handle->lgh_ctxt->loc_chunk_size;
enum llog_flag fmt = flags & LLOG_F_EXT_MASK;
struct llog_log_hdr *llh;
int rc;
LASSERT(!handle->lgh_hdr);
- llh = kzalloc(sizeof(*llh), GFP_NOFS);
+ LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE);
+ llh = libcfs_kvzalloc(sizeof(*llh), GFP_NOFS);
if (!llh)
return -ENOMEM;
handle->lgh_hdr = llh;
+ handle->lgh_hdr_size = chunk_size;
/* first assign flags to use llog_client_ops */
llh->llh_flags = flags;
rc = llog_read_header(env, handle, uuid);
@@ -189,6 +201,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
LASSERT(list_empty(&handle->u.chd.chd_head));
INIT_LIST_HEAD(&handle->u.chd.chd_head);
llh->llh_size = sizeof(struct llog_logid_rec);
+ llh->llh_flags |= LLOG_F_IS_FIXSIZE;
} else if (!(flags & LLOG_F_IS_PLAIN)) {
CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
handle->lgh_ctxt->loc_obd->obd_name,
@@ -198,7 +211,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
llh->llh_flags |= fmt;
out:
if (rc) {
- kfree(llh);
+ kvfree(llh);
handle->lgh_hdr = NULL;
}
return rc;
@@ -212,15 +225,21 @@ static int llog_process_thread(void *arg)
struct llog_log_hdr *llh = loghandle->lgh_hdr;
struct llog_process_cat_data *cd = lpi->lpi_catdata;
char *buf;
- __u64 cur_offset = LLOG_CHUNK_SIZE;
- __u64 last_offset;
+ u64 cur_offset, tmp_offset;
+ int chunk_size;
int rc = 0, index = 1, last_index;
int saved_index = 0;
int last_called_index = 0;
- LASSERT(llh);
+ if (!llh)
+ return -EINVAL;
+
+ cur_offset = llh->llh_hdr.lrh_len;
+ chunk_size = llh->llh_hdr.lrh_len;
+ /* expect chunk_size to be power of two */
+ LASSERT(is_power_of_2(chunk_size));
- buf = kzalloc(LLOG_CHUNK_SIZE, GFP_NOFS);
+ buf = libcfs_kvzalloc(chunk_size, GFP_NOFS);
if (!buf) {
lpi->lpi_rc = -ENOMEM;
return 0;
@@ -233,41 +252,53 @@ static int llog_process_thread(void *arg)
if (cd && cd->lpcd_last_idx)
last_index = cd->lpcd_last_idx;
else
- last_index = LLOG_BITMAP_BYTES * 8 - 1;
-
- /* Record is not in this buffer. */
- if (index > last_index)
- goto out;
+ last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1;
while (rc == 0) {
+ unsigned int buf_offset = 0;
struct llog_rec_hdr *rec;
+ bool partial_chunk;
+ off_t chunk_offset;
/* skip records not set in bitmap */
while (index <= last_index &&
- !ext2_test_bit(index, llh->llh_bitmap))
+ !ext2_test_bit(index, LLOG_HDR_BITMAP(llh)))
++index;
- LASSERT(index <= last_index + 1);
- if (index == last_index + 1)
+ if (index > last_index)
break;
-repeat:
+
CDEBUG(D_OTHER, "index: %d last_index %d\n",
index, last_index);
-
+repeat:
/* get the buf with our target record; avoid old garbage */
- memset(buf, 0, LLOG_CHUNK_SIZE);
- last_offset = cur_offset;
+ memset(buf, 0, chunk_size);
rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
- index, &cur_offset, buf, LLOG_CHUNK_SIZE);
+ index, &cur_offset, buf, chunk_size);
if (rc)
goto out;
+ /*
+ * NB: after llog_next_block() call the cur_offset is the
+ * offset of the next block after read one.
+ * The absolute offset of the current chunk is calculated
+ * from cur_offset value and stored in chunk_offset variable.
+ */
+ tmp_offset = cur_offset;
+ if (do_div(tmp_offset, chunk_size)) {
+ partial_chunk = true;
+ chunk_offset = cur_offset & ~(chunk_size - 1);
+ } else {
+ partial_chunk = false;
+ chunk_offset = cur_offset - chunk_size;
+ }
+
/* NB: when rec->lrh_len is accessed it is already swabbed
* since it is used at the "end" of the loop and the rec
* swabbing is done at the beginning of the loop.
*/
- for (rec = (struct llog_rec_hdr *)buf;
- (char *)rec < buf + LLOG_CHUNK_SIZE;
+ for (rec = (struct llog_rec_hdr *)(buf + buf_offset);
+ (char *)rec < buf + chunk_size;
rec = llog_rec_hdr_next(rec)) {
CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
rec, rec->lrh_type);
@@ -278,15 +309,29 @@ repeat:
CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
rec->lrh_type, rec->lrh_index);
- if (rec->lrh_index == 0) {
- /* probably another rec just got added? */
- rc = 0;
- if (index <= loghandle->lgh_last_idx)
- goto repeat;
- goto out; /* no more records */
+ /*
+ * for partial chunk the end of it is zeroed, check
+ * for index 0 to distinguish it.
+ */
+ if (partial_chunk && !rec->lrh_index) {
+ /* concurrent llog_add() might add new records
+ * while llog_processing, check this is not
+ * the case and re-read the current chunk
+ * otherwise.
+ */
+ if (index > loghandle->lgh_last_idx) {
+ rc = 0;
+ goto out;
+ }
+ CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n",
+ index, loghandle->lgh_last_idx);
+ /* save offset inside buffer for the re-read */
+ buf_offset = (char *)rec - (char *)buf;
+ cur_offset = chunk_offset;
+ goto repeat;
}
- if (rec->lrh_len == 0 ||
- rec->lrh_len > LLOG_CHUNK_SIZE) {
+
+ if (!rec->lrh_len || rec->lrh_len > chunk_size) {
CWARN("invalid length %d in llog record for index %d/%d\n",
rec->lrh_len,
rec->lrh_index, index);
@@ -300,32 +345,38 @@ repeat:
continue;
}
+ if (rec->lrh_index != index) {
+ CERROR("%s: Invalid record: index %u but expected %u\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name,
+ rec->lrh_index, index);
+ rc = -ERANGE;
+ goto out;
+ }
+
CDEBUG(D_OTHER,
"lrh_index: %d lrh_len: %d (%d remains)\n",
rec->lrh_index, rec->lrh_len,
- (int)(buf + LLOG_CHUNK_SIZE - (char *)rec));
+ (int)(buf + chunk_size - (char *)rec));
loghandle->lgh_cur_idx = rec->lrh_index;
loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
- last_offset;
+ chunk_offset;
/* if set, process the callback on this record */
- if (ext2_test_bit(index, llh->llh_bitmap)) {
+ if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) {
rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
lpi->lpi_cbdata);
last_called_index = index;
if (rc)
goto out;
- } else {
- CDEBUG(D_OTHER, "Skipped index %d\n", index);
}
- /* next record, still in buffer? */
- ++index;
- if (index > last_index) {
+ /* exit if the last index is reached */
+ if (index >= last_index) {
rc = 0;
goto out;
}
+ index++;
}
}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index a4277d684614..8574ad401f66 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -158,6 +158,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
mutex_init(&ctxt->loc_mutex);
ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
+ ctxt->loc_chunk_size = LLOG_MIN_CHUNK_SIZE;
rc = llog_group_set_ctxt(olg, ctxt, index);
if (rc) {
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 8c4c1b3f1b45..723c212c6747 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -38,6 +38,7 @@
#define DEBUG_SUBSYSTEM S_LOG
+#include "../include/llog_swab.h"
#include "../include/lustre_log.h"
static void print_llogd_body(struct llogd_body *d)
@@ -244,7 +245,7 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
__swab32s(&llh->llh_flags);
__swab32s(&llh->llh_size);
__swab32s(&llh->llh_cat_idx);
- tail = &llh->llh_tail;
+ tail = LLOG_HDR_TAIL(llh);
break;
}
case LLOG_LOGID_MAGIC:
@@ -290,8 +291,10 @@ static void print_llog_hdr(struct llog_log_hdr *h)
CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags);
CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size);
CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", h->llh_tail.lrt_index);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len);
+ CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n",
+ LLOG_HDR_TAIL(h)->lrt_index);
+ CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n",
+ LLOG_HDR_TAIL(h)->lrt_len);
}
void lustre_swab_llog_hdr(struct llog_log_hdr *h)
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 852a5acfefab..2c99717b0aba 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -100,9 +100,13 @@ static const char * const obd_connect_names[] = {
"lfsck",
"unknown",
"unlink_close",
- "unknown",
+ "multi_mod_rpcs",
"dir_stripe",
- "unknown",
+ "subtree",
+ "lock_ahead",
+ "bulk_mbits",
+ "compact_obdo",
+ "second_flags",
NULL
};
@@ -127,7 +131,7 @@ EXPORT_SYMBOL(obd_connect_flags2str);
static void obd_connect_data_seqprint(struct seq_file *m,
struct obd_connect_data *ocd)
{
- int flags;
+ u64 flags;
LASSERT(ocd);
flags = ocd->ocd_connect_flags;
@@ -172,6 +176,9 @@ static void obd_connect_data_seqprint(struct seq_file *m,
if (flags & OBD_CONNECT_MAXBYTES)
seq_printf(m, " max_object_bytes: %llx\n",
ocd->ocd_maxbytes);
+ if (flags & OBD_CONNECT_MULTIMODRPCS)
+ seq_printf(m, " max_mod_rpcs: %hu\n",
+ ocd->ocd_maxmodrpcs);
}
int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
@@ -396,10 +403,17 @@ int lprocfs_wr_uint(struct file *file, const char __user *buffer,
char dummy[MAX_STRING_SIZE + 1], *end;
unsigned long tmp;
- dummy[MAX_STRING_SIZE] = '\0';
- if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ if (count >= sizeof(dummy))
+ return -EINVAL;
+
+ if (count == 0)
+ return 0;
+
+ if (copy_from_user(dummy, buffer, count))
return -EFAULT;
+ dummy[count] = '\0';
+
tmp = simple_strtoul(dummy, &end, 0);
if (dummy == end)
return -EINVAL;
@@ -1275,7 +1289,8 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name,
EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned conf, const char *name, const char *units)
+ unsigned int conf, const char *name,
+ const char *units)
{
struct lprocfs_counter_header *header;
struct lprocfs_counter *percpu_cntr;
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 054e567e6c8d..7971562a3efd 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -68,6 +68,7 @@ enum {
#define LU_SITE_BITS_MIN 12
#define LU_SITE_BITS_MAX 24
+#define LU_SITE_BITS_MAX_CL 19
/**
* total 256 buckets, we don't want too many buckets because:
* - consume too much memory
@@ -338,7 +339,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
struct cfs_hash_bd bd2;
struct list_head dispose;
int did_sth;
- unsigned int start;
+ unsigned int start = 0;
int count;
int bnr;
unsigned int i;
@@ -351,7 +352,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
*/
- start = s->ls_purge_start;
+ if (nr != ~0)
+ start = s->ls_purge_start;
bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
again:
/*
@@ -877,6 +879,9 @@ static unsigned long lu_htable_order(struct lu_device *top)
unsigned long cache_size;
unsigned long bits;
+ if (!strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME))
+ bits_max = LU_SITE_BITS_MAX_CL;
+
/*
* Calculate hash table size, assuming that we want reasonable
* performance when 20% of total memory is occupied by cache of
@@ -909,8 +914,8 @@ static unsigned long lu_htable_order(struct lu_device *top)
return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
}
-static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
+static unsigned int lu_obj_hop_hash(struct cfs_hash *hs,
+ const void *key, unsigned int mask)
{
struct lu_fid *fid = (struct lu_fid *)key;
__u32 hash;
@@ -1311,6 +1316,7 @@ enum {
static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
static DEFINE_SPINLOCK(lu_keys_guard);
+static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
/**
* Global counter incremented whenever key is registered, unregistered,
@@ -1318,7 +1324,7 @@ static DEFINE_SPINLOCK(lu_keys_guard);
* lu_context_refill(). No locking is provided, as initialization and shutdown
* are supposed to be externally serialized.
*/
-static unsigned key_set_version;
+static unsigned int key_set_version;
/**
* Register new key.
@@ -1385,6 +1391,19 @@ void lu_context_key_degister(struct lu_context_key *key)
++key_set_version;
spin_lock(&lu_keys_guard);
key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+
+ /**
+ * Wait until all transient contexts referencing this key have
+ * run lu_context_key::lct_fini() method.
+ */
+ while (atomic_read(&key->lct_used) > 1) {
+ spin_unlock(&lu_keys_guard);
+ CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n",
+ key->lct_owner ? key->lct_owner->name : "", key,
+ atomic_read(&key->lct_used));
+ schedule();
+ spin_lock(&lu_keys_guard);
+ }
if (lu_keys[key->lct_index]) {
lu_keys[key->lct_index] = NULL;
lu_ref_fini(&key->lct_reference);
@@ -1507,14 +1526,25 @@ void lu_context_key_quiesce(struct lu_context_key *key)
if (!(key->lct_tags & LCT_QUIESCENT)) {
/*
- * XXX layering violation.
- */
- cl_env_cache_purge(~0);
- key->lct_tags |= LCT_QUIESCENT;
- /*
* XXX memory barrier has to go here.
*/
spin_lock(&lu_keys_guard);
+ key->lct_tags |= LCT_QUIESCENT;
+
+ /**
+ * Wait until all lu_context_key::lct_init() methods
+ * have completed.
+ */
+ while (atomic_read(&lu_key_initing_cnt) > 0) {
+ spin_unlock(&lu_keys_guard);
+ CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\" %p, %d (%d)\n",
+ key->lct_owner ? key->lct_owner->name : "",
+ key, atomic_read(&key->lct_used),
+ atomic_read(&lu_key_initing_cnt));
+ schedule();
+ spin_lock(&lu_keys_guard);
+ }
+
list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
key_fini(ctx, key->lct_index);
spin_unlock(&lu_keys_guard);
@@ -1546,6 +1576,19 @@ static int keys_fill(struct lu_context *ctx)
{
unsigned int i;
+ /*
+ * A serialisation with lu_context_key_quiesce() is needed, but some
+ * "key->lct_init()" are calling kernel memory allocation routine and
+ * can't be called while holding a spin_lock.
+ * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
+ * to ensure the start of the serialisation.
+ * An atomic_t variable is still used, in order not to reacquire the
+ * lock when decrementing the counter.
+ */
+ spin_lock(&lu_keys_guard);
+ atomic_inc(&lu_key_initing_cnt);
+ spin_unlock(&lu_keys_guard);
+
LINVRNT(ctx->lc_value);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
struct lu_context_key *key;
@@ -1563,12 +1606,19 @@ static int keys_fill(struct lu_context *ctx)
LINVRNT(key->lct_init);
LINVRNT(key->lct_index == i);
+ LASSERT(key->lct_owner);
+ if (!(ctx->lc_tags & LCT_NOREF) &&
+ !try_module_get(key->lct_owner)) {
+ /* module is unloading, skip this key */
+ continue;
+ }
+
value = key->lct_init(ctx, key);
- if (IS_ERR(value))
+ if (unlikely(IS_ERR(value))) {
+ atomic_dec(&lu_key_initing_cnt);
return PTR_ERR(value);
+ }
- if (!(ctx->lc_tags & LCT_NOREF))
- try_module_get(key->lct_owner);
lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
atomic_inc(&key->lct_used);
/*
@@ -1582,6 +1632,7 @@ static int keys_fill(struct lu_context *ctx)
}
ctx->lc_version = key_set_version;
}
+ atomic_dec(&lu_key_initing_cnt);
return 0;
}
@@ -1663,6 +1714,9 @@ void lu_context_exit(struct lu_context *ctx)
ctx->lc_state = LCS_LEFT;
if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
+ /* could race with key quiescency */
+ if (ctx->lc_tags & LCT_REMEMBER)
+ spin_lock(&lu_keys_guard);
if (ctx->lc_value[i]) {
struct lu_context_key *key;
@@ -1671,6 +1725,8 @@ void lu_context_exit(struct lu_context *ctx)
key->lct_exit(ctx,
key, ctx->lc_value[i]);
}
+ if (ctx->lc_tags & LCT_REMEMBER)
+ spin_unlock(&lu_keys_guard);
}
}
}
@@ -1930,7 +1986,7 @@ int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
memset(&stats, 0, sizeof(stats));
lu_site_stats_get(s->ls_obj_hash, &stats, 1);
- seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
+ seq_printf(m, "%d/%d %d/%ld %d %d %d %d %d %d %d %d\n",
stats.lss_busy,
stats.lss_total,
stats.lss_populated,
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index bbed1b72d52e..9ca84c7d49de 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -35,12 +35,15 @@
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#include "../include/obd_class.h"
+
#include <linux/string.h>
+
#include "../include/lustre/lustre_ioctl.h"
-#include "../include/lustre_log.h"
+#include "../include/llog_swab.h"
#include "../include/lprocfs_status.h"
+#include "../include/lustre_log.h"
#include "../include/lustre_param.h"
+#include "../include/obd_class.h"
#include "llog_internal.h"
@@ -446,7 +449,7 @@ static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
LASSERT(obd->obd_self_export);
/* Precleanup, we must make sure all exports get destroyed. */
- err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS);
+ err = obd_precleanup(obd);
if (err)
CERROR("Precleanup %s returned %d\n",
obd->obd_name, err);
@@ -585,16 +588,21 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
}
static LIST_HEAD(lustre_profile_list);
+static DEFINE_SPINLOCK(lustre_profile_list_lock);
struct lustre_profile *class_get_profile(const char *prof)
{
struct lustre_profile *lprof;
+ spin_lock(&lustre_profile_list_lock);
list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
if (!strcmp(lprof->lp_profile, prof)) {
+ lprof->lp_refs++;
+ spin_unlock(&lustre_profile_list_lock);
return lprof;
}
}
+ spin_unlock(&lustre_profile_list_lock);
return NULL;
}
EXPORT_SYMBOL(class_get_profile);
@@ -639,7 +647,11 @@ static int class_add_profile(int proflen, char *prof, int osclen, char *osc,
}
}
+ spin_lock(&lustre_profile_list_lock);
+ lprof->lp_refs = 1;
+ lprof->lp_list_deleted = false;
list_add(&lprof->lp_list, &lustre_profile_list);
+ spin_unlock(&lustre_profile_list_lock);
return err;
free_lp_dt:
@@ -659,27 +671,59 @@ void class_del_profile(const char *prof)
lprof = class_get_profile(prof);
if (lprof) {
+ spin_lock(&lustre_profile_list_lock);
+ /* because get profile increments the ref counter */
+ lprof->lp_refs--;
list_del(&lprof->lp_list);
- kfree(lprof->lp_profile);
- kfree(lprof->lp_dt);
- kfree(lprof->lp_md);
- kfree(lprof);
+ lprof->lp_list_deleted = true;
+ spin_unlock(&lustre_profile_list_lock);
+
+ class_put_profile(lprof);
}
}
EXPORT_SYMBOL(class_del_profile);
+void class_put_profile(struct lustre_profile *lprof)
+{
+ spin_lock(&lustre_profile_list_lock);
+ if (--lprof->lp_refs > 0) {
+ LASSERT(lprof->lp_refs > 0);
+ spin_unlock(&lustre_profile_list_lock);
+ return;
+ }
+ spin_unlock(&lustre_profile_list_lock);
+
+ /* confirm not a negative number */
+ LASSERT(!lprof->lp_refs);
+
+ /*
+ * At least one class_del_profile/profiles must be called
+ * on the target profile or lustre_profile_list will corrupt
+ */
+ LASSERT(lprof->lp_list_deleted);
+ kfree(lprof->lp_profile);
+ kfree(lprof->lp_dt);
+ kfree(lprof->lp_md);
+ kfree(lprof);
+}
+EXPORT_SYMBOL(class_put_profile);
+
/* COMPAT_146 */
void class_del_profiles(void)
{
struct lustre_profile *lprof, *n;
+ spin_lock(&lustre_profile_list_lock);
list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
list_del(&lprof->lp_list);
- kfree(lprof->lp_profile);
- kfree(lprof->lp_dt);
- kfree(lprof->lp_md);
- kfree(lprof);
+ lprof->lp_list_deleted = true;
+ spin_unlock(&lustre_profile_list_lock);
+
+ class_put_profile(lprof);
+
+ spin_lock(&lustre_profile_list_lock);
}
+ spin_unlock(&lustre_profile_list_lock);
}
EXPORT_SYMBOL(class_del_profiles);
@@ -1406,8 +1450,8 @@ EXPORT_SYMBOL(class_manual_cleanup);
* uuid<->export lustre hash operations
*/
-static unsigned
-uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+uuid_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
sizeof(((struct obd_uuid *)key)->uuid), mask);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 0d3a3b05a637..2283e920d839 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -261,7 +261,7 @@ int lustre_start_mgc(struct super_block *sb)
rc = obd_get_info(NULL, obd->obd_self_export,
strlen(KEY_CONN_DATA), KEY_CONN_DATA,
- &vallen, data, NULL);
+ &vallen, data);
LASSERT(rc == 0);
has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
@@ -382,7 +382,7 @@ int lustre_start_mgc(struct super_block *sb)
/* We connect to the MGS at setup, and don't disconnect until cleanup */
data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT |
OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
- OBD_CONNECT_LVB_TYPE;
+ OBD_CONNECT_LVB_TYPE | OBD_CONNECT_BULK_MBITS;
#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
@@ -1216,8 +1216,7 @@ static struct file_system_type lustre_fs_type = {
.name = "lustre",
.mount = lustre_mount,
.kill_sb = lustre_kill_super,
- .fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
- FS_RENAME_DOES_D_MOVE,
+ .fs_flags = FS_REQUIRES_DEV | FS_RENAME_DOES_D_MOVE,
};
MODULE_ALIAS_FS("lustre");
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 79104a66da96..c52b9e07d7dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -124,68 +124,3 @@ void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj)
ioobj->ioo_max_brw = 0;
}
EXPORT_SYMBOL(obdo_to_ioobj);
-
-static void iattr_from_obdo(struct iattr *attr, const struct obdo *oa,
- u32 valid)
-{
- valid &= oa->o_valid;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE, "valid %#llx, new time %llu/%llu\n",
- oa->o_valid, oa->o_mtime, oa->o_ctime);
-
- attr->ia_valid = 0;
- if (valid & OBD_MD_FLATIME) {
- LTIME_S(attr->ia_atime) = oa->o_atime;
- attr->ia_valid |= ATTR_ATIME;
- }
- if (valid & OBD_MD_FLMTIME) {
- LTIME_S(attr->ia_mtime) = oa->o_mtime;
- attr->ia_valid |= ATTR_MTIME;
- }
- if (valid & OBD_MD_FLCTIME) {
- LTIME_S(attr->ia_ctime) = oa->o_ctime;
- attr->ia_valid |= ATTR_CTIME;
- }
- if (valid & OBD_MD_FLSIZE) {
- attr->ia_size = oa->o_size;
- attr->ia_valid |= ATTR_SIZE;
- }
-#if 0 /* you shouldn't be able to change a file's type with setattr */
- if (valid & OBD_MD_FLTYPE) {
- attr->ia_mode = (attr->ia_mode & ~S_IFMT) |
- (oa->o_mode & S_IFMT);
- attr->ia_valid |= ATTR_MODE;
- }
-#endif
- if (valid & OBD_MD_FLMODE) {
- attr->ia_mode = (attr->ia_mode & S_IFMT) |
- (oa->o_mode & ~S_IFMT);
- attr->ia_valid |= ATTR_MODE;
- if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
- !capable(CFS_CAP_FSETID))
- attr->ia_mode &= ~S_ISGID;
- }
- if (valid & OBD_MD_FLUID) {
- attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid);
- attr->ia_valid |= ATTR_UID;
- }
- if (valid & OBD_MD_FLGID) {
- attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid);
- attr->ia_valid |= ATTR_GID;
- }
-}
-
-void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid)
-{
- iattr_from_obdo(&op_data->op_attr, oa, valid);
- if (valid & OBD_MD_FLBLOCKS) {
- op_data->op_attr_blocks = oa->o_blocks;
- op_data->op_attr.ia_valid |= ATTR_BLOCKS;
- }
- if (valid & OBD_MD_FLFLAGS) {
- op_data->op_attr_flags = oa->o_flags;
- op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
- }
-}
-EXPORT_SYMBOL(md_from_obdo);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 505582ff4d1e..549076193bde 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -55,7 +55,7 @@ struct echo_device {
struct echo_client_obd *ed_ec;
struct cl_site ed_site_myself;
- struct cl_site *ed_site;
+ struct lu_site *ed_site;
struct lu_device *ed_next;
};
@@ -505,9 +505,6 @@ static const struct lu_device_operations echo_device_lu_ops = {
/** @} echo_lu_dev_ops */
-static const struct cl_device_operations echo_device_cl_ops = {
-};
-
/** \defgroup echo_init Setup and teardown
*
* Init and fini functions for echo client.
@@ -527,17 +524,19 @@ static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
}
rc = lu_site_init_finish(&site->cs_lu);
- if (rc)
+ if (rc) {
+ cl_site_fini(site);
return rc;
+ }
- ed->ed_site = site;
+ ed->ed_site = &site->cs_lu;
return 0;
}
static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
{
if (ed->ed_site) {
- cl_site_fini(ed->ed_site);
+ lu_site_fini(ed->ed_site);
ed->ed_site = NULL;
}
}
@@ -561,16 +560,10 @@ static void echo_thread_key_fini(const struct lu_context *ctx,
kmem_cache_free(echo_thread_kmem, info);
}
-static void echo_thread_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
-}
-
static struct lu_context_key echo_thread_key = {
.lct_tags = LCT_CL_THREAD,
.lct_init = echo_thread_key_init,
.lct_fini = echo_thread_key_fini,
- .lct_exit = echo_thread_key_exit
};
static void *echo_session_key_init(const struct lu_context *ctx,
@@ -592,16 +585,10 @@ static void echo_session_key_fini(const struct lu_context *ctx,
kmem_cache_free(echo_session_kmem, session);
}
-static void echo_session_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
-}
-
static struct lu_context_key echo_session_key = {
.lct_tags = LCT_SESSION,
.lct_init = echo_session_key_init,
.lct_fini = echo_session_key_fini,
- .lct_exit = echo_session_key_exit
};
LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
@@ -630,7 +617,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
goto out_free;
cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
- cd->cd_ops = &echo_device_cl_ops;
obd = class_name2obd(lustre_cfg_string(cfg, 0));
LASSERT(obd);
@@ -674,7 +660,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
goto out_cleanup;
}
- next->ld_site = &ed->ed_site->cs_lu;
+ next->ld_site = ed->ed_site;
rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
next->ld_type->ldt_name,
NULL);
@@ -741,7 +727,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
ed, next);
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ lu_site_purge(env, ed->ed_site, -1);
/* check if there are objects still alive.
* It shouldn't have any object because lu_site_purge would cleanup
@@ -754,7 +740,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_unlock(&ec->ec_lock);
/* purge again */
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ lu_site_purge(env, ed->ed_site, -1);
CDEBUG(D_INFO,
"Waiting for the reference of echo object to be dropped\n");
@@ -766,7 +752,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ lu_site_purge(env, ed->ed_site, -1);
spin_lock(&ec->ec_lock);
}
spin_unlock(&ec->ec_lock);
@@ -780,11 +766,13 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
while (next)
next = next->ld_type->ldt_ops->ldto_device_free(env, next);
- LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
+ LASSERT(ed->ed_site == d->ld_site);
echo_site_fini(env, ed);
cl_device_fini(&ed->ed_cl);
kfree(ed);
+ cl_env_cache_purge(~0);
+
return NULL;
}
@@ -1100,7 +1088,7 @@ out:
static u64 last_object_id;
static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
- struct obdo *oa, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct echo_object *eco;
struct echo_client_obd *ec = ed->ed_ec;
@@ -1117,7 +1105,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
if (!ostid_id(&oa->o_oi))
ostid_set_id(&oa->o_oi, ++last_object_id);
- rc = obd_create(env, ec->ec_exp, oa, oti);
+ rc = obd_create(env, ec->ec_exp, oa);
if (rc != 0) {
CERROR("Cannot create objects: rc = %d\n", rc);
goto failed;
@@ -1137,7 +1125,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
failed:
if (created && rc)
- obd_destroy(env, ec->ec_exp, oa, oti);
+ obd_destroy(env, ec->ec_exp, oa);
if (rc)
CERROR("create object failed with: rc = %d\n", rc);
return rc;
@@ -1237,8 +1225,7 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
struct echo_object *eco, u64 offset,
- u64 count, int async,
- struct obd_trans_info *oti)
+ u64 count, int async)
{
u32 npages;
struct brw_page *pga;
@@ -1332,12 +1319,11 @@ static int echo_client_prep_commit(const struct lu_env *env,
struct obd_export *exp, int rw,
struct obdo *oa, struct echo_object *eco,
u64 offset, u64 count,
- u64 batch, struct obd_trans_info *oti,
- int async)
+ u64 batch, int async)
{
struct obd_ioobj ioo;
struct niobuf_local *lnb;
- struct niobuf_remote *rnb;
+ struct niobuf_remote rnb;
u64 off;
u64 npages, tot_pages;
int i, ret = 0, brw_flags = 0;
@@ -1349,9 +1335,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
tot_pages = count >> PAGE_SHIFT;
lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
- rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
-
- if (!lnb || !rnb) {
+ if (!lnb) {
ret = -ENOMEM;
goto out;
}
@@ -1363,26 +1347,22 @@ static int echo_client_prep_commit(const struct lu_env *env,
off = offset;
- for (; tot_pages; tot_pages -= npages) {
+ for (; tot_pages > 0; tot_pages -= npages) {
int lpages;
if (tot_pages < npages)
npages = tot_pages;
- for (i = 0; i < npages; i++, off += PAGE_SIZE) {
- rnb[i].rnb_offset = off;
- rnb[i].rnb_len = PAGE_SIZE;
- rnb[i].rnb_flags = brw_flags;
- }
-
- ioo.ioo_bufcnt = npages;
+ rnb.rnb_offset = off;
+ rnb.rnb_len = npages * PAGE_SIZE;
+ rnb.rnb_flags = brw_flags;
+ ioo.ioo_bufcnt = 1;
+ off += npages * PAGE_SIZE;
lpages = npages;
- ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
- lnb, oti);
+ ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
if (ret != 0)
goto out;
- LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
struct page *page = lnb[i].lnb_page;
@@ -1401,24 +1381,21 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (rw == OBD_BRW_WRITE)
echo_client_page_debug_setup(page, rw,
- ostid_id(&oa->o_oi),
- rnb[i].rnb_offset,
- rnb[i].rnb_len);
+ ostid_id(&oa->o_oi),
+ lnb[i].lnb_file_offset,
+ lnb[i].lnb_len);
else
echo_client_page_debug_check(page,
- ostid_id(&oa->o_oi),
- rnb[i].rnb_offset,
- rnb[i].rnb_len);
+ ostid_id(&oa->o_oi),
+ lnb[i].lnb_file_offset,
+ lnb[i].lnb_len);
}
- ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
- rnb, npages, lnb, oti, ret);
+ ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb,
+ ret);
if (ret != 0)
goto out;
- /* Reset oti otherwise it would confuse ldiskfs. */
- memset(oti, 0, sizeof(*oti));
-
/* Reuse env context. */
lu_context_exit((struct lu_context *)&env->le_ctx);
lu_context_enter((struct lu_context *)&env->le_ctx);
@@ -1426,14 +1403,12 @@ static int echo_client_prep_commit(const struct lu_env *env,
out:
kfree(lnb);
- kfree(rnb);
return ret;
}
static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
struct obd_export *exp,
- struct obd_ioctl_data *data,
- struct obd_trans_info *dummy_oti)
+ struct obd_ioctl_data *data)
{
struct obd_device *obd = class_exp2obd(exp);
struct echo_device *ed = obd2echo_dev(obd);
@@ -1470,15 +1445,13 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
case 1:
/* fall through */
case 2:
- rc = echo_client_kbrw(ed, rw, oa,
- eco, data->ioc_offset,
- data->ioc_count, async, dummy_oti);
+ rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
+ data->ioc_count, async);
break;
case 3:
- rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa,
- eco, data->ioc_offset,
- data->ioc_count, data->ioc_plen1,
- dummy_oti, async);
+ rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
+ data->ioc_offset, data->ioc_count,
+ data->ioc_plen1, async);
break;
default:
rc = -EINVAL;
@@ -1496,16 +1469,11 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct echo_client_obd *ec = ed->ed_ec;
struct echo_object *eco;
struct obd_ioctl_data *data = karg;
- struct obd_trans_info dummy_oti;
struct lu_env *env;
- struct oti_req_ack_lock *ack_lock;
struct obdo *oa;
struct lu_fid fid;
int rw = OBD_BRW_READ;
int rc = 0;
- int i;
-
- memset(&dummy_oti, 0, sizeof(dummy_oti));
oa = &data->ioc_obdo1;
if (!(oa->o_valid & OBD_MD_FLGROUP)) {
@@ -1535,7 +1503,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
goto out;
}
- rc = echo_create_object(env, ed, oa, &dummy_oti);
+ rc = echo_create_object(env, ed, oa);
goto out;
case OBD_IOC_DESTROY:
@@ -1546,7 +1514,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- rc = obd_destroy(env, ec->ec_exp, oa, &dummy_oti);
+ rc = obd_destroy(env, ec->ec_exp, oa);
if (rc == 0)
eco->eo_deleted = 1;
echo_put_object(eco);
@@ -1556,11 +1524,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
case OBD_IOC_GETATTR:
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- struct obd_info oinfo = {
- .oi_oa = oa,
- };
-
- rc = obd_getattr(env, ec->ec_exp, &oinfo);
+ rc = obd_getattr(env, ec->ec_exp, oa);
echo_put_object(eco);
}
goto out;
@@ -1573,11 +1537,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- struct obd_info oinfo = {
- .oi_oa = oa,
- };
-
- rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
+ rc = obd_setattr(env, ec->ec_exp, oa);
echo_put_object(eco);
}
goto out;
@@ -1591,7 +1551,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rw = OBD_BRW_WRITE;
/* fall through */
case OBD_IOC_BRW_READ:
- rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
+ rc = echo_client_brw_ioctl(env, rw, exp, data);
goto out;
default:
@@ -1604,14 +1564,6 @@ out:
lu_env_fini(env);
kfree(env);
- /* XXX this should be in a helper also called by target_send_reply */
- for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
- i++, ack_lock++) {
- if (!ack_lock->mode)
- break;
- ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
- }
-
return rc;
}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index f0062d44ee03..575b2969ad83 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,7 +162,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number <= 0 ||
- pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
+ pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
@@ -183,10 +183,12 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
seq_printf(m,
"used_mb: %ld\n"
- "busy_cnt: %ld\n",
+ "busy_cnt: %ld\n"
+ "reclaim: %llu\n",
(atomic_long_read(&cli->cl_lru_in_list) +
atomic_long_read(&cli->cl_lru_busy)) >> shift,
- atomic_long_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_busy),
+ cli->cl_lru_reclaim);
return 0;
}
@@ -585,7 +587,8 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
/* max_pages_per_rpc must be chunk aligned */
val = (val + ~chunk_mask) & chunk_mask;
- if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
+ if (!val || (ocd->ocd_brw_size &&
+ val > ocd->ocd_brw_size >> PAGE_SHIFT)) {
return -ERANGE;
}
spin_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4bbe219add98..b0f030c6c9c9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -360,6 +360,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
RB_CLEAR_NODE(&ext->oe_node);
ext->oe_obj = obj;
+ cl_object_get(osc2cl(obj));
atomic_set(&ext->oe_refc, 1);
atomic_set(&ext->oe_users, 0);
INIT_LIST_HEAD(&ext->oe_link);
@@ -398,6 +399,7 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
LDLM_LOCK_PUT(ext->oe_dlmlock);
ext->oe_dlmlock = NULL;
}
+ cl_object_put(env, osc2cl(ext->oe_obj));
osc_extent_free(ext);
}
}
@@ -959,7 +961,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
if (rc == -ETIMEDOUT) {
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %u timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
+ cli_name(osc_cli(obj)), state);
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
@@ -977,7 +979,6 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
bool partial)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct osc_object *obj = ext->oe_obj;
@@ -990,6 +991,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
int grants = 0;
int nr_pages = 0;
int rc = 0;
+ int refcheck;
LASSERT(sanity_check(ext) == 0);
EASSERT(ext->oe_state == OES_TRUNC, ext);
@@ -999,7 +1001,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
* We can't use that env from osc_cache_truncate_start() because
* it's from lov_io_sub and not fully initialized.
*/
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
io = &osc_env_info(env)->oti_io;
io->ci_obj = cl_object_top(osc2cl(obj));
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
@@ -1085,7 +1087,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
out:
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -1327,7 +1329,6 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
{
struct osc_page *opg = oap2osc_page(oap);
struct cl_page *page = oap2cl_page(oap);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
@@ -1338,25 +1339,10 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
"cp_state:%u, cmd:%d\n", page->cp_state, cmd);
LASSERT(opg->ops_transfer_pinned);
- /*
- * page->cp_req can be NULL if io submission failed before
- * cl_req was allocated.
- */
- if (page->cp_req)
- cl_req_page_done(env, page);
- LASSERT(!page->cp_req);
-
crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- spin_unlock(&obj->oo_seatbelt);
-
opg->ops_submit_time = 0;
srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
@@ -1380,16 +1366,17 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
lu_ref_del(&page->cp_reference, "transfer", page);
cl_page_completion(env, page, crt, rc);
+ cl_page_put(env, page);
return 0;
}
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \
"dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
"lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \
- __tmp->cl_import->imp_obd->obd_name, \
+ cli_name(__tmp), \
__tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
@@ -1627,7 +1614,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
osc_io_unplug_async(env, cli, NULL);
CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
- cli->cl_import->imp_obd->obd_name, &ocw, oap);
+ cli_name(cli), &ocw, oap);
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
@@ -1671,7 +1658,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
break;
default:
CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n",
- cli->cl_import->imp_obd->obd_name, &ocw, rc);
+ cli_name(cli), &ocw, rc);
break;
}
out:
@@ -1931,7 +1918,8 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
}
if (tmp->oe_srvlock != ext->oe_srvlock ||
- !tmp->oe_grants != !ext->oe_grants)
+ !tmp->oe_grants != !ext->oe_grants ||
+ tmp->oe_no_merge || ext->oe_no_merge)
return 0;
/* remove break for strict check */
@@ -2250,14 +2238,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
return 0;
if (!async) {
- /* disable osc_lru_shrink() temporarily to avoid
- * potential stack overrun problem. LU-2859
- */
- atomic_inc(&cli->cl_lru_shrinkers);
spin_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli);
spin_unlock(&cli->cl_loi_list_lock);
- atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
LASSERT(cli->cl_writeback_work);
@@ -2479,7 +2462,6 @@ int osc_teardown_async_page(const struct lu_env *env,
struct osc_object *obj, struct osc_page *ops)
{
struct osc_async_page *oap = &ops->ops_oap;
- struct osc_extent *ext = NULL;
int rc = 0;
LASSERT(oap->oap_magic == OAP_MAGIC);
@@ -2487,12 +2469,15 @@ int osc_teardown_async_page(const struct lu_env *env,
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
oap, ops, osc_index(oap2osc(oap)));
- osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
+ struct osc_extent *ext = NULL;
+
+ osc_object_lock(obj);
ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
+ osc_object_unlock(obj);
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details.
@@ -2502,10 +2487,9 @@ int osc_teardown_async_page(const struct lu_env *env,
osc_index(oap2osc(oap)));
rc = -EBUSY;
}
+ if (ext)
+ osc_extent_put(env, ext);
}
- osc_object_unlock(obj);
- if (ext)
- osc_extent_put(env, ext);
return rc;
}
@@ -2666,11 +2650,13 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
struct osc_async_page *oap, *tmp;
int page_count = 0;
int mppr = cli->cl_max_pages_per_rpc;
+ bool can_merge = true;
pgoff_t start = CL_PAGE_EOF;
pgoff_t end = 0;
list_for_each_entry(oap, list, oap_pending_item) {
- pgoff_t index = osc_index(oap2osc(oap));
+ struct osc_page *opg = oap2osc_page(oap);
+ pgoff_t index = osc_index(opg);
if (index > end)
end = index;
@@ -2678,6 +2664,9 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
start = index;
++page_count;
mppr <<= (page_count > mppr);
+
+ if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE))
+ can_merge = false;
}
ext = osc_extent_alloc(obj);
@@ -2691,6 +2680,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
ext->oe_rw = !!(cmd & OBD_BRW_READ);
ext->oe_sync = 1;
+ ext->oe_no_merge = !can_merge;
ext->oe_urgent = 1;
ext->oe_start = start;
ext->oe_end = end;
@@ -3158,7 +3148,8 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
struct cl_page *page = ops->ops_cl.cpl_page;
/* refresh non-overlapped index */
- tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
+ tmp = osc_dlmlock_at_pgoff(env, osc, index,
+ OSC_DAP_FL_TEST_LOCK);
if (tmp) {
__u64 end = tmp->l_policy_data.l_extent.end;
/* Cache the first-non-overlapped index so as to skip
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 9c8de15c309c..cce55a9689f0 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -77,7 +77,6 @@ struct osc_io {
/** write osc_lock for this IO, used by osc_extent_find(). */
struct osc_lock *oi_write_osclock;
- struct obd_info oi_info;
struct obdo oi_oa;
struct osc_async_cbargs {
bool opc_rpc_sent;
@@ -87,13 +86,6 @@ struct osc_io {
};
/**
- * State of transfer for osc.
- */
-struct osc_req {
- struct cl_req_slice or_cl;
-};
-
-/**
* State maintained by osc layer for the duration of a system call.
*/
struct osc_session {
@@ -103,7 +95,7 @@ struct osc_session {
#define OTI_PVEC_SIZE 256
struct osc_thread_info {
struct ldlm_res_id oti_resname;
- ldlm_policy_data_t oti_policy;
+ union ldlm_policy_data oti_policy;
struct cl_lock_descr oti_descr;
struct cl_attr oti_attr;
struct lustre_handle oti_handle;
@@ -116,6 +108,7 @@ struct osc_thread_info {
pgoff_t oti_next_index;
pgoff_t oti_fn_index; /* first non-overlapped index */
struct cl_sync_io oti_anchor;
+ struct cl_req_attr oti_req_attr;
};
struct osc_object {
@@ -127,16 +120,6 @@ struct osc_object {
int oo_contended;
unsigned long oo_contention_time;
/**
- * List of pages in transfer.
- */
- struct list_head oo_inflight[CRT_NR];
- /**
- * Lock, protecting osc_page::ops_inflight, because a seat-belt is
- * locked during take-off and landing.
- */
- spinlock_t oo_seatbelt;
-
- /**
* used by the osc to keep track of what objects to build into rpcs.
* Protected by client_obd->cli_loi_list_lock.
*/
@@ -364,15 +347,6 @@ struct osc_page {
*/
struct list_head ops_lru;
/**
- * Linkage into a per-osc_object list of pages in flight. For
- * debugging.
- */
- struct list_head ops_inflight;
- /**
- * Thread that submitted this page for transfer. For debugging.
- */
- struct task_struct *ops_submitter;
- /**
* Submit time - the time when the page is starting RPC. For debugging.
*/
unsigned long ops_submit_time;
@@ -382,7 +356,6 @@ extern struct kmem_cache *osc_lock_kmem;
extern struct kmem_cache *osc_object_kmem;
extern struct kmem_cache *osc_thread_kmem;
extern struct kmem_cache *osc_session_kmem;
-extern struct kmem_cache *osc_req_kmem;
extern struct kmem_cache *osc_extent_kmem;
extern struct lu_device_type osc_device_type;
@@ -396,15 +369,14 @@ int osc_lock_init(const struct lu_env *env,
const struct cl_io *io);
int osc_io_init(const struct lu_env *env,
struct cl_object *obj, struct cl_io *io);
-int osc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
struct lu_object *osc_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t ind);
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj,
pgoff_t start, pgoff_t end);
int osc_lvb_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct ost_lvb *lvb);
@@ -554,6 +526,16 @@ static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
}
+static inline struct osc_page *
+osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
+{
+ const struct cl_page_slice *slice;
+
+ LASSERT(osc);
+ slice = cl_object_page_slice(&osc->oo_cl, page);
+ return cl2osc_page(slice);
+}
+
static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
{
LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
@@ -615,6 +597,10 @@ struct osc_extent {
oe_rw:1,
/** sync extent, queued by osc_queue_sync_pages() */
oe_sync:1,
+ /** set if this extent has partial, sync pages.
+ * Extents with partial page(s) can't merge with others in RPC
+ */
+ oe_no_merge:1,
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 83d30c135ba4..c5d62aeaeab5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -29,7 +29,7 @@
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * Implementation of cl_device, cl_req for OSC layer.
+ * Implementation of cl_device, for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
*/
@@ -49,7 +49,6 @@ struct kmem_cache *osc_lock_kmem;
struct kmem_cache *osc_object_kmem;
struct kmem_cache *osc_thread_kmem;
struct kmem_cache *osc_session_kmem;
-struct kmem_cache *osc_req_kmem;
struct kmem_cache *osc_extent_kmem;
struct kmem_cache *osc_quota_kmem;
@@ -75,11 +74,6 @@ struct lu_kmem_descr osc_caches[] = {
.ckd_size = sizeof(struct osc_session)
},
{
- .ckd_cache = &osc_req_kmem,
- .ckd_name = "osc_req_kmem",
- .ckd_size = sizeof(struct osc_req)
- },
- {
.ckd_cache = &osc_extent_kmem,
.ckd_name = "osc_extent_kmem",
.ckd_size = sizeof(struct osc_extent)
@@ -94,8 +88,6 @@ struct lu_kmem_descr osc_caches[] = {
}
};
-struct lock_class_key osc_ast_guard_class;
-
/*****************************************************************************
*
* Type conversions.
@@ -178,10 +170,6 @@ static const struct lu_device_operations osc_lu_ops = {
.ldo_recovery_complete = NULL
};
-static const struct cl_device_operations osc_cl_ops = {
- .cdo_req_init = osc_req_init
-};
-
static int osc_device_init(const struct lu_env *env, struct lu_device *d,
const char *name, struct lu_device *next)
{
@@ -220,7 +208,6 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
cl_device_init(&od->od_cl, t);
d = osc2lu_dev(od);
d->ld_ops = &osc_lu_ops;
- od->od_cl.cd_ops = &osc_cl_ops;
/* Setup OSC OBD */
obd = class_name2obd(lustre_cfg_string(cfg, 0));
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 67fe0a254991..688783dcc1e4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -107,26 +107,24 @@ typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
int rc);
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, union ldlm_policy_data *policy,
struct ost_lvb *lvb, int kms_valid,
osc_enqueue_upcall_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
struct ptlrpc_request_set *rqset, int async, int agl);
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ __u32 type, union ldlm_policy_data *policy, __u32 mode,
__u64 *flags, void *data, struct lustre_handle *lockh,
int unref);
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset);
+int osc_punch_base(struct obd_export *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset);
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_sync_base(struct osc_object *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset);
@@ -135,7 +133,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd);
long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
long target, bool force);
-long osc_lru_reclaim(struct client_obd *cli);
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages);
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
@@ -157,6 +155,11 @@ static inline unsigned long rpcs_in_flight(struct client_obd *cli)
return cli->cl_r_in_flight + cli->cl_w_in_flight;
}
+static inline char *cli_name(struct client_obd *cli)
+{
+ return cli->cl_import->imp_obd->obd_name;
+}
+
struct osc_device {
struct cl_device od_cl;
struct obd_export *od_exp;
@@ -192,15 +195,27 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl);
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl);
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
void osc_inc_unstable_pages(struct ptlrpc_request *req);
void osc_dec_unstable_pages(struct ptlrpc_request *req);
bool osc_over_unstable_soft_limit(struct client_obd *cli);
+/**
+ * Bit flags for osc_dlm_lock_at_pageoff().
+ */
+enum osc_dap_flags {
+ /**
+ * Just check if the desired lock exists, it won't hold reference
+ * count on lock.
+ */
+ OSC_DAP_FL_TEST_LOCK = BIT(0),
+ /**
+ * Return the lock even if it is being canceled.
+ */
+ OSC_DAP_FL_CANCELING = BIT(1),
+};
+
struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
- int pending, int canceling);
+ enum osc_dap_flags flags);
#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 8a559cbcdd0c..228a97c098fe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -49,12 +49,6 @@
*
*/
-static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
-{
- LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
- return container_of0(slice, struct osc_req, or_cl);
-}
-
static struct osc_io *cl2osc_io(const struct lu_env *env,
const struct cl_io_slice *slice)
{
@@ -64,20 +58,6 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
return oio;
}
-static struct osc_page *osc_cl_page_osc(struct cl_page *page,
- struct osc_object *osc)
-{
- const struct cl_page_slice *slice;
-
- if (osc)
- slice = cl_object_page_slice(&osc->oo_cl, page);
- else
- slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice);
-
- return cl2osc_page(slice);
-}
-
/*****************************************************************************
*
* io operations.
@@ -88,6 +68,45 @@ static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
{
}
+static void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
+{
+ struct ldlm_lock *dlmlock = cbdata;
+ struct lustre_handle lockh;
+
+ ldlm_lock2handle(dlmlock, &lockh);
+ ldlm_lock_decref(&lockh, LCK_PR);
+ LDLM_LOCK_PUT(dlmlock);
+}
+
+static int osc_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
+{
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct ldlm_lock *dlmlock;
+ int result = -ENODATA;
+
+ dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
+ if (dlmlock) {
+ LASSERT(dlmlock->l_ast_data == osc);
+ if (dlmlock->l_req_mode != LCK_PR) {
+ struct lustre_handle lockh;
+
+ ldlm_lock2handle(dlmlock, &lockh);
+ ldlm_lock_addref(&lockh, LCK_PR);
+ ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
+ }
+
+ ra->cra_end = cl_index(osc2cl(osc),
+ dlmlock->l_policy_data.l_extent.end);
+ ra->cra_release = osc_read_ahead_release;
+ ra->cra_cbdata = dlmlock;
+ result = 0;
+ }
+
+ return result;
+}
+
/**
* An implementation of cl_io_operations::cio_io_submit() method for osc
* layer. Iterates over pages in the in-queue, prepares each for io by calling
@@ -334,7 +353,7 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
npages = max_pages;
c = atomic_long_read(cli->cl_lru_left);
- if (c < npages && osc_lru_reclaim(cli) > 0)
+ if (c < npages && osc_lru_reclaim(cli, npages) > 0)
c = atomic_long_read(cli->cl_lru_left);
while (c >= npages) {
if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
@@ -343,6 +362,17 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
}
c = atomic_long_read(cli->cl_lru_left);
}
+ if (atomic_long_read(cli->cl_lru_left) < max_pages) {
+ /*
+ * If there aren't enough pages in the per-OSC LRU then
+ * wake up the LRU thread to try and clear out space, so
+ * we don't block if pages are being dirtied quickly.
+ */
+ CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
+ cli_name(cli), atomic_long_read(cli->cl_lru_left),
+ max_pages);
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
return 0;
}
@@ -446,7 +476,6 @@ static int osc_io_setattr_start(const struct lu_env *env,
__u64 size = io->u.ci_setattr.sa_attr.lvb_size;
unsigned int ia_valid = io->u.ci_setattr.sa_valid;
int result = 0;
- struct obd_info oinfo = { };
/* truncate cache dirty pages first */
if (cl_io_is_trunc(io))
@@ -486,11 +515,19 @@ static int osc_io_setattr_start(const struct lu_env *env,
oa->o_oi = loi->loi_oi;
obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
- oa->o_mtime = attr->cat_mtime;
- oa->o_atime = attr->cat_atime;
- oa->o_ctime = attr->cat_ctime;
- oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
- OBD_MD_FLCTIME | OBD_MD_FLMTIME;
+ oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
+ if (ia_valid & ATTR_CTIME) {
+ oa->o_valid |= OBD_MD_FLCTIME;
+ oa->o_ctime = attr->cat_ctime;
+ }
+ if (ia_valid & ATTR_ATIME) {
+ oa->o_valid |= OBD_MD_FLATIME;
+ oa->o_atime = attr->cat_atime;
+ }
+ if (ia_valid & ATTR_MTIME) {
+ oa->o_valid |= OBD_MD_FLMTIME;
+ oa->o_mtime = attr->cat_mtime;
+ }
if (ia_valid & ATTR_SIZE) {
oa->o_size = size;
oa->o_blocks = OBD_OBJECT_EOF;
@@ -503,19 +540,21 @@ static int osc_io_setattr_start(const struct lu_env *env,
} else {
LASSERT(oio->oi_lockless == 0);
}
+ if (ia_valid & ATTR_ATTR_FLAG) {
+ oa->o_flags = io->u.ci_setattr.sa_attr_flags;
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ }
- oinfo.oi_oa = oa;
init_completion(&cbargs->opc_sync);
if (ia_valid & ATTR_SIZE)
result = osc_punch_base(osc_export(cl2osc(obj)),
- &oinfo, osc_async_upcall,
+ oa, osc_async_upcall,
cbargs, PTLRPCD_SET);
else
- result = osc_setattr_async_base(osc_export(cl2osc(obj)),
- &oinfo, NULL,
- osc_async_upcall,
- cbargs, PTLRPCD_SET);
+ result = osc_setattr_async(osc_export(cl2osc(obj)),
+ oa, osc_async_upcall,
+ cbargs, PTLRPCD_SET);
cbargs->opc_rpc_sent = result == 0;
}
return result;
@@ -557,6 +596,107 @@ static void osc_io_setattr_end(const struct lu_env *env,
}
}
+struct osc_data_version_args {
+ struct osc_io *dva_oio;
+};
+
+static int
+osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
+ void *arg, int rc)
+{
+ struct osc_data_version_args *dva = arg;
+ struct osc_io *oio = dva->dva_oio;
+ const struct ost_body *body;
+
+ if (rc < 0)
+ goto out;
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (!body) {
+ rc = -EPROTO;
+ goto out;
+ }
+
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
+ &body->oa);
+out:
+ oio->oi_cbarg.opc_rc = rc;
+ complete(&oio->oi_cbarg.opc_sync);
+
+ return 0;
+}
+
+static int osc_io_data_version_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+ struct osc_object *obj = cl2osc(slice->cis_obj);
+ struct obd_export *exp = osc_export(obj);
+ struct lov_oinfo *loi = obj->oo_oinfo;
+ struct osc_data_version_args *dva;
+ struct obdo *oa = &oio->oi_oa;
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+ int rc;
+
+ memset(oa, 0, sizeof(*oa));
+ oa->o_oi = loi->loi_oi;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+ if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ oa->o_flags |= OBD_FL_SRVLOCK;
+ if (dv->dv_flags & LL_DV_WR_FLUSH)
+ oa->o_flags |= OBD_FL_FLUSH;
+ }
+
+ init_completion(&cbargs->opc_sync);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+ if (!req)
+ return -ENOMEM;
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+ if (rc < 0) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
+
+ ptlrpc_request_set_replen(req);
+ req->rq_interpret_reply = osc_data_version_interpret;
+ CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
+ dva = ptlrpc_req_async_args(req);
+ dva->dva_oio = oio;
+
+ ptlrpcd_add_req(req);
+
+ return 0;
+}
+
+static void osc_io_data_version_end(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+
+ wait_for_completion(&cbargs->opc_sync);
+
+ if (cbargs->opc_rc) {
+ slice->cis_io->ci_result = cbargs->opc_rc;
+ } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
+ slice->cis_io->ci_result = -EOPNOTSUPP;
+ } else {
+ dv->dv_data_version = oio->oi_oa.o_data_version;
+ slice->cis_io->ci_result = 0;
+ }
+}
+
static int osc_io_read_start(const struct lu_env *env,
const struct cl_io_slice *slice)
{
@@ -595,7 +735,6 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
{
struct osc_io *oio = osc_env_io(env);
struct obdo *oa = &oio->oi_oa;
- struct obd_info *oinfo = &oio->oi_info;
struct lov_oinfo *loi = obj->oo_oinfo;
struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
int rc = 0;
@@ -611,12 +750,9 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
obdo_set_parent_fid(oa, fio->fi_fid);
- memset(oinfo, 0, sizeof(*oinfo));
- oinfo->oi_oa = oa;
init_completion(&cbargs->opc_sync);
- rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
- PTLRPCD_SET);
+ rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
return rc;
}
@@ -710,6 +846,10 @@ static const struct cl_io_operations osc_io_ops = {
.cio_start = osc_io_setattr_start,
.cio_end = osc_io_setattr_end
},
+ [CIT_DATA_VERSION] = {
+ .cio_start = osc_io_data_version_start,
+ .cio_end = osc_io_data_version_end,
+ },
[CIT_FAULT] = {
.cio_start = osc_io_fault_start,
.cio_end = osc_io_end,
@@ -724,6 +864,7 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
}
},
+ .cio_read_ahead = osc_io_read_ahead,
.cio_submit = osc_io_submit,
.cio_commit_async = osc_io_commit_async
};
@@ -734,103 +875,6 @@ static const struct cl_io_operations osc_io_ops = {
*
*/
-static int osc_req_prep(const struct lu_env *env,
- const struct cl_req_slice *slice)
-{
- return 0;
-}
-
-static void osc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct osc_req *or;
-
- or = cl2osc_req(slice);
- kmem_cache_free(osc_req_kmem, or);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for osc
- * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
- * fields.
- */
-static void osc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct lov_oinfo *oinfo;
- struct cl_req *clerq;
- struct cl_page *apage; /* _some_ page in @clerq */
- struct ldlm_lock *lock; /* _some_ lock protecting @apage */
- struct osc_page *opg;
- struct obdo *oa;
- struct ost_lvb *lvb;
-
- oinfo = cl2osc(obj)->oo_oinfo;
- lvb = &oinfo->loi_lvb;
- oa = attr->cra_oa;
-
- if ((flags & OBD_MD_FLMTIME) != 0) {
- oa->o_mtime = lvb->lvb_mtime;
- oa->o_valid |= OBD_MD_FLMTIME;
- }
- if ((flags & OBD_MD_FLATIME) != 0) {
- oa->o_atime = lvb->lvb_atime;
- oa->o_valid |= OBD_MD_FLATIME;
- }
- if ((flags & OBD_MD_FLCTIME) != 0) {
- oa->o_ctime = lvb->lvb_ctime;
- oa->o_valid |= OBD_MD_FLCTIME;
- }
- if (flags & OBD_MD_FLGROUP) {
- ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
- oa->o_valid |= OBD_MD_FLGROUP;
- }
- if (flags & OBD_MD_FLID) {
- ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
- oa->o_valid |= OBD_MD_FLID;
- }
- if (flags & OBD_MD_FLHANDLE) {
- clerq = slice->crs_req;
- LASSERT(!list_empty(&clerq->crq_pages));
- apage = container_of(clerq->crq_pages.next,
- struct cl_page, cp_flight);
- opg = osc_cl_page_osc(apage, NULL);
- lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
- 1, 1);
- if (!lock && !opg->ops_srvlock) {
- struct ldlm_resource *res;
- struct ldlm_res_id *resname;
-
- CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
-
- resname = &osc_env_info(env)->oti_resname;
- ostid_build_res_name(&oinfo->loi_oi, resname);
- res = ldlm_resource_get(
- osc_export(cl2osc(obj))->exp_obd->obd_namespace,
- NULL, resname, LDLM_EXTENT, 0);
- ldlm_resource_dump(D_ERROR, res);
-
- dump_stack();
- LBUG();
- }
-
- /* check for lockless io. */
- if (lock) {
- oa->o_handle = lock->l_remote_handle;
- oa->o_valid |= OBD_MD_FLHANDLE;
- LDLM_LOCK_PUT(lock);
- }
- }
-}
-
-static const struct cl_req_operations osc_req_ops = {
- .cro_prep = osc_req_prep,
- .cro_attr_set = osc_req_attr_set,
- .cro_completion = osc_req_completion
-};
-
int osc_io_init(const struct lu_env *env,
struct cl_object *obj, struct cl_io *io)
{
@@ -841,20 +885,4 @@ int osc_io_init(const struct lu_env *env,
return 0;
}
-int osc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct osc_req *or;
- int result;
-
- or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS);
- if (or) {
- cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
- return result;
-}
-
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 39a8a5851603..5f799a4c78f9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -145,7 +145,7 @@ static void osc_lock_fini(const struct lu_env *env,
static void osc_lock_build_policy(const struct lu_env *env,
const struct cl_lock *lock,
- ldlm_policy_data_t *policy)
+ union ldlm_policy_data *policy)
{
const struct cl_lock_descr *d = &lock->cll_descr;
@@ -188,7 +188,7 @@ static void osc_lock_lvb_update(const struct lu_env *env,
struct cl_object *obj = osc2cl(osc);
struct lov_oinfo *oinfo = osc->oo_oinfo;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- unsigned valid;
+ unsigned int valid;
valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
if (!lvb)
@@ -294,10 +294,10 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
struct osc_lock *oscl = cookie;
struct cl_lock_slice *slice = &oscl->ols_cl;
struct lu_env *env;
- struct cl_env_nest nest;
int rc;
+ int refcheck;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
/* should never happen, similar to osc_ldlm_blocking_ast(). */
LASSERT(!IS_ERR(env));
@@ -336,7 +336,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
if (oscl->ols_owner)
cl_sync_io_note(env, oscl->ols_owner, rc);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -347,9 +347,9 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
struct osc_object *osc = cookie;
struct ldlm_lock *dlmlock;
struct lu_env *env;
- struct cl_env_nest nest;
+ int refcheck;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
if (errcode == ELDLM_LOCK_MATCHED) {
@@ -374,7 +374,7 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
out:
cl_object_put(env, osc2cl(osc));
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return ldlm_error2errno(errcode);
}
@@ -382,11 +382,11 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
enum cl_lock_mode mode, int discard)
{
struct lu_env *env;
- struct cl_env_nest nest;
+ int refcheck;
int rc = 0;
int rc2 = 0;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
@@ -404,7 +404,7 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
if (rc == 0 && rc2 < 0)
rc = rc2;
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return rc;
}
@@ -536,7 +536,7 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
}
case LDLM_CB_CANCELING: {
struct lu_env *env;
- struct cl_env_nest nest;
+ int refcheck;
/*
* This can be called in the context of outer IO, e.g.,
@@ -549,14 +549,14 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
* new environment has to be created to not corrupt outer
* context.
*/
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
result = PTR_ERR(env);
break;
}
result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
break;
}
default:
@@ -568,61 +568,63 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
{
struct ptlrpc_request *req = data;
- struct cl_env_nest nest;
struct lu_env *env;
struct ost_lvb *lvb;
struct req_capsule *cap;
+ struct cl_object *obj = NULL;
int result;
+ int refcheck;
LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- struct cl_object *obj = NULL;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env)) {
+ result = PTR_ERR(env);
+ goto out;
+ }
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_ast_data) {
- obj = osc2cl(dlmlock->l_ast_data);
- cl_object_get(obj);
- }
- unlock_res_and_lock(dlmlock);
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ cl_object_get(obj);
+ }
+ unlock_res_and_lock(dlmlock);
- if (obj) {
- /* Do not grab the mutex of cl_lock for glimpse.
- * See LU-1274 for details.
- * BTW, it's okay for cl_lock to be cancelled during
- * this period because server can handle this race.
- * See ldlm_server_glimpse_ast() for details.
- * cl_lock_mutex_get(env, lock);
- */
- cap = &req->rq_pill;
- req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
- req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- result = req_capsule_server_pack(cap);
- if (result == 0) {
- lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
- result = cl_object_glimpse(env, obj, lvb);
- }
- if (!exp_connect_lvb_type(req->rq_export))
- req_capsule_shrink(&req->rq_pill,
- &RMF_DLM_LVB,
- sizeof(struct ost_lvb_v1),
- RCL_SERVER);
- cl_object_put(env, obj);
- } else {
- /*
- * These errors are normal races, so we don't want to
- * fill the console with messages by calling
- * ptlrpc_error()
- */
- lustre_pack_reply(req, 1, NULL, NULL);
- result = -ELDLM_NO_LOCK_DATA;
+ if (obj) {
+ /* Do not grab the mutex of cl_lock for glimpse.
+ * See LU-1274 for details.
+ * BTW, it's okay for cl_lock to be cancelled during
+ * this period because server can handle this race.
+ * See ldlm_server_glimpse_ast() for details.
+ * cl_lock_mutex_get(env, lock);
+ */
+ cap = &req->rq_pill;
+ req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
+ req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof(*lvb));
+ result = req_capsule_server_pack(cap);
+ if (result == 0) {
+ lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
+ result = cl_object_glimpse(env, obj, lvb);
+ }
+ if (!exp_connect_lvb_type(req->rq_export)) {
+ req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
+ sizeof(struct ost_lvb_v1),
+ RCL_SERVER);
}
- cl_env_nested_put(&nest, env);
+ cl_object_put(env, obj);
} else {
- result = PTR_ERR(env);
+ /*
+ * These errors are normal races, so we don't want to
+ * fill the console with messages by calling
+ * ptlrpc_error()
+ */
+ lustre_pack_reply(req, 1, NULL, NULL);
+ result = -ELDLM_NO_LOCK_DATA;
}
+ cl_env_put(env, &refcheck);
+
+out:
req->rq_status = result;
return result;
}
@@ -677,12 +679,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
*/
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct osc_object *obj;
struct osc_lock *oscl;
unsigned long weight;
bool found = false;
+ int refcheck;
might_sleep();
/*
@@ -692,7 +694,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
* the upper context because cl_lock_put don't modify environment
* variables. But just in case ..
*/
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
/* Mostly because lack of memory, do not eliminate this lock */
return 1;
@@ -722,7 +724,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
out:
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
return weight;
}
@@ -912,7 +914,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
struct osc_lock *oscl = cl2osc_lock(slice);
struct cl_lock *lock = slice->cls_lock;
struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
+ union ldlm_policy_data *policy = &info->oti_policy;
osc_enqueue_upcall_f upcall = osc_lock_upcall;
void *cookie = oscl;
bool async = false;
@@ -1009,7 +1011,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
if (olck->ols_hold) {
olck->ols_hold = 0;
- osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode);
olck->ols_handle.cookie = 0ULL;
}
@@ -1180,11 +1182,11 @@ int osc_lock_init(const struct lu_env *env,
*/
struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
- int pending, int canceling)
+ enum osc_dap_flags dap_flags)
{
struct osc_thread_info *info = osc_env_info(env);
struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
+ union ldlm_policy_data *policy = &info->oti_policy;
struct lustre_handle lockh;
struct ldlm_lock *lock = NULL;
enum ldlm_mode mode;
@@ -1194,17 +1196,18 @@ struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
osc_index2policy(policy, osc2cl(obj), index, index);
policy->l_extent.gid = LDLM_GID_ANY;
- flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
- if (pending)
- flags |= LDLM_FL_CBPENDING;
+ flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
+ if (dap_flags & OSC_DAP_FL_TEST_LOCK)
+ flags |= LDLM_FL_TEST_LOCK;
+
/*
* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too
*/
again:
- mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
- flags, resname, LDLM_EXTENT, policy,
- LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling);
+ mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
+ LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh,
+ dap_flags & OSC_DAP_FL_CANCELING);
if (mode != 0) {
lock = ldlm_handle2lock(&lockh);
/* RACE: the lock is cancelled so let's try again */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index aae3a2d4243f..e0c3324857dd 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -71,13 +71,8 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
{
struct osc_object *osc = lu2osc(obj);
const struct cl_object_conf *cconf = lu2cl_conf(conf);
- int i;
osc->oo_oinfo = cconf->u.coc_oinfo;
- spin_lock_init(&osc->oo_seatbelt);
- for (i = 0; i < CRT_NR; ++i)
- INIT_LIST_HEAD(&osc->oo_inflight[i]);
-
INIT_LIST_HEAD(&osc->oo_ready_item);
INIT_LIST_HEAD(&osc->oo_hp_ready_item);
INIT_LIST_HEAD(&osc->oo_write_item);
@@ -103,10 +98,6 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct osc_object *osc = lu2osc(obj);
- int i;
-
- for (i = 0; i < CRT_NR; ++i)
- LASSERT(list_empty(&osc->oo_inflight[i]));
LASSERT(list_empty(&osc->oo_ready_item));
LASSERT(list_empty(&osc->oo_hp_ready_item));
@@ -218,6 +209,94 @@ static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
return 0;
}
+static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+ struct ll_fiemap_info_key *fmkey,
+ struct fiemap *fiemap, size_t *buflen)
+{
+ struct obd_export *exp = osc_export(cl2osc(obj));
+ union ldlm_policy_data policy;
+ struct ptlrpc_request *req;
+ struct lustre_handle lockh;
+ struct ldlm_res_id resid;
+ enum ldlm_mode mode = 0;
+ struct fiemap *reply;
+ char *tmp;
+ int rc;
+
+ fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
+ if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
+ goto skip_locking;
+
+ policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;
+
+ if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
+ fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
+ policy.l_extent.end = OBD_OBJECT_EOF;
+ else
+ policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
+ fmkey->lfik_fiemap.fm_length +
+ PAGE_SIZE - 1) & PAGE_MASK;
+
+ ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
+ mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
+ LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
+ &resid, LDLM_EXTENT, &policy,
+ LCK_PR | LCK_PW, &lockh, 0);
+ if (mode) { /* lock is cached on client */
+ if (mode != LCK_PR) {
+ ldlm_lock_addref(&lockh, LCK_PR);
+ ldlm_lock_decref(&lockh, LCK_PW);
+ }
+ } else { /* no cached lock, needs acquire lock on server side */
+ fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
+ fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
+ }
+
+skip_locking:
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_GET_INFO_FIEMAP);
+ if (!req) {
+ rc = -ENOMEM;
+ goto drop_lock;
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
+ sizeof(*fmkey));
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
+ *buflen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
+ *buflen);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ goto drop_lock;
+ }
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
+ memcpy(tmp, fmkey, sizeof(*fmkey));
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ memcpy(tmp, fiemap, *buflen);
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ goto fini_req;
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ if (!reply) {
+ rc = -EPROTO;
+ goto fini_req;
+ }
+
+ memcpy(fiemap, reply, *buflen);
+fini_req:
+ ptlrpc_req_finished(req);
+drop_lock:
+ if (mode)
+ ldlm_lock_decref(&lockh, LCK_PR);
+ return rc;
+}
+
void osc_object_set_contended(struct osc_object *obj)
{
obj->oo_contention_time = cfs_time_current();
@@ -256,6 +335,76 @@ int osc_object_is_contended(struct osc_object *obj)
return 1;
}
+/**
+ * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
+ * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
+ * fields.
+ */
+static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr)
+{
+ u64 flags = attr->cra_flags;
+ struct lov_oinfo *oinfo;
+ struct ost_lvb *lvb;
+ struct obdo *oa;
+
+ oinfo = cl2osc(obj)->oo_oinfo;
+ lvb = &oinfo->loi_lvb;
+ oa = attr->cra_oa;
+
+ if (flags & OBD_MD_FLMTIME) {
+ oa->o_mtime = lvb->lvb_mtime;
+ oa->o_valid |= OBD_MD_FLMTIME;
+ }
+ if (flags & OBD_MD_FLATIME) {
+ oa->o_atime = lvb->lvb_atime;
+ oa->o_valid |= OBD_MD_FLATIME;
+ }
+ if (flags & OBD_MD_FLCTIME) {
+ oa->o_ctime = lvb->lvb_ctime;
+ oa->o_valid |= OBD_MD_FLCTIME;
+ }
+ if (flags & OBD_MD_FLGROUP) {
+ ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
+ oa->o_valid |= OBD_MD_FLGROUP;
+ }
+ if (flags & OBD_MD_FLID) {
+ ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
+ oa->o_valid |= OBD_MD_FLID;
+ }
+ if (flags & OBD_MD_FLHANDLE) {
+ struct ldlm_lock *lock;
+ struct osc_page *opg;
+
+ opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
+ lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
+ OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
+ if (!lock && !opg->ops_srvlock) {
+ struct ldlm_resource *res;
+ struct ldlm_res_id *resname;
+
+ CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
+ "uncovered page!\n");
+
+ resname = &osc_env_info(env)->oti_resname;
+ ostid_build_res_name(&oinfo->loi_oi, resname);
+ res = ldlm_resource_get(
+ osc_export(cl2osc(obj))->exp_obd->obd_namespace,
+ NULL, resname, LDLM_EXTENT, 0);
+ ldlm_resource_dump(D_ERROR, res);
+
+ LBUG();
+ }
+
+ /* check for lockless io. */
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
+ oa->o_valid |= OBD_MD_FLHANDLE;
+ LDLM_LOCK_PUT(lock);
+ }
+ }
+}
+
static const struct cl_object_operations osc_ops = {
.coo_page_init = osc_page_init,
.coo_lock_init = osc_lock_init,
@@ -263,7 +412,9 @@ static const struct cl_object_operations osc_ops = {
.coo_attr_get = osc_attr_get,
.coo_attr_update = osc_attr_update,
.coo_glimpse = osc_object_glimpse,
- .coo_prune = osc_object_prune
+ .coo_prune = osc_object_prune,
+ .coo_fiemap = osc_object_fiemap,
+ .coo_req_attr_set = osc_req_attr_set
};
static const struct lu_object_operations osc_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 2a7a70aa9e80..e356e4af08e1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -37,6 +37,7 @@
#define DEBUG_SUBSYSTEM S_OSC
+#include <linux/math64.h>
#include "osc_cl_internal.h"
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
@@ -86,11 +87,6 @@ static void osc_page_transfer_add(const struct lu_env *env,
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
osc_lru_use(osc_cli(obj), opg);
-
- spin_lock(&obj->oo_seatbelt);
- list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = current;
- spin_unlock(&obj->oo_seatbelt);
}
int osc_page_cache_add(const struct lu_env *env,
@@ -109,7 +105,8 @@ int osc_page_cache_add(const struct lu_env *env,
return result;
}
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj,
pgoff_t start, pgoff_t end)
{
memset(policy, 0, sizeof(*policy));
@@ -117,25 +114,6 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
-static int osc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused, pgoff_t *max_index)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct ldlm_lock *dlmlock;
- int result = -ENODATA;
-
- dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
- osc_index(opg), 1, 0);
- if (dlmlock) {
- *max_index = cl_index(slice->cpl_obj,
- dlmlock->l_policy_data.l_extent.end);
- LDLM_LOCK_PUT(dlmlock);
- result = 0;
- }
- return result;
-}
-
static const char *osc_list(struct list_head *head)
{
return list_empty(head) ? "-" : "+";
@@ -158,7 +136,7 @@ static int osc_page_print(const struct lu_env *env,
struct osc_object *obj = cl2osc(slice->cpl_obj);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
- return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
opg, osc_index(opg),
/* 1 */
oap->oap_magic, oap->oap_cmd,
@@ -170,8 +148,7 @@ static int osc_page_print(const struct lu_env *env,
oap->oap_async_flags, oap->oap_brw_flags,
oap->oap_request, oap->oap_cli, obj,
/* 3 */
- osc_list(&opg->ops_inflight),
- opg->ops_submitter, opg->ops_transfer_pinned,
+ opg->ops_transfer_pinned,
osc_submit_duration(opg), opg->ops_srvlock,
/* 4 */
cli->cl_r_in_flight, cli->cl_w_in_flight,
@@ -210,14 +187,6 @@ static void osc_page_delete(const struct lu_env *env,
LASSERT(0);
}
- spin_lock(&obj->oo_seatbelt);
- if (opg->ops_submitter) {
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- }
- spin_unlock(&obj->oo_seatbelt);
-
osc_lru_del(osc_cli(obj), opg);
if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
@@ -276,7 +245,6 @@ static int osc_page_flush(const struct lu_env *env,
static const struct cl_page_operations osc_page_ops = {
.cpo_print = osc_page_print,
.cpo_delete = osc_page_delete,
- .cpo_is_under_lock = osc_page_is_under_lock,
.cpo_clip = osc_page_clip,
.cpo_cancel = osc_page_cancel,
.cpo_flush = osc_page_flush
@@ -301,10 +269,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
cl_page_slice_add(page, &opg->ops_cl, obj, index,
&osc_page_ops);
}
- /* ops_inflight and ops_lru are the same field, but it doesn't
- * hurt to initialize it twice :-)
- */
- INIT_LIST_HEAD(&opg->ops_inflight);
INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
@@ -362,16 +326,27 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
* OSC to free slots voluntarily to maintain a reasonable number of free slots
* at any time.
*/
-
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-/* LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU budget, and..
+
+/**
+ * LRU pages are freed in batch mode. OSC should at least free this
+ * number of pages to avoid running out of LRU slots.
+ */
+static inline int lru_shrink_min(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * 2;
+}
+
+/**
+ * free this number at most otherwise it will take too long time to finish.
*/
-static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
-/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
+static inline int lru_shrink_max(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+}
-/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
+/**
+ * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
* step to maintain fairness among OSCs.
*
@@ -388,13 +363,20 @@ static int osc_cache_too_much(struct client_obd *cli)
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
*/
- if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
if (pages >= budget)
- return lru_shrink_max;
+ return lru_shrink_max(cli);
else if (pages >= budget / 2)
- return lru_shrink_min;
- } else if (pages >= budget * 2) {
- return lru_shrink_min;
+ return lru_shrink_min(cli);
+ } else {
+ time64_t duration = ktime_get_real_seconds();
+
+ /* knock out pages by duration of no IO activity */
+ duration -= cli->cl_lru_last_used;
+ duration >>= 6; /* approximately 1 minute */
+ if (duration > 0 &&
+ pages >= div64_s64((s64)budget, duration))
+ return lru_shrink_min(cli);
}
return 0;
}
@@ -402,11 +384,21 @@ static int osc_cache_too_much(struct client_obd *cli)
int lru_queue_work(const struct lu_env *env, void *data)
{
struct client_obd *cli = data;
+ int count;
- CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
+ CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
- if (osc_cache_too_much(cli))
- osc_lru_shrink(env, cli, lru_shrink_max, true);
+ count = osc_cache_too_much(cli);
+ if (count > 0) {
+ int rc = osc_lru_shrink(env, cli, count, false);
+
+ CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
+ cli_name(cli), rc, count);
+ if (rc >= count) {
+ CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
+ ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+ }
return 0;
}
@@ -433,10 +425,10 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
list_splice_tail(&lru, &cli->cl_lru_list);
atomic_long_sub(npages, &cli->cl_lru_busy);
atomic_long_add(npages, &cli->cl_lru_in_list);
+ cli->cl_lru_last_used = ktime_get_real_seconds();
spin_unlock(&cli->cl_lru_list_lock);
- /* XXX: May set force to be true for better performance */
- if (osc_cache_too_much(cli))
+ if (waitqueue_active(&osc_lru_waitq))
(void)ptlrpcd_queue_work(cli->cl_lru_work);
}
}
@@ -469,8 +461,10 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
* this osc occupies too many LRU pages and kernel is
* stealing one of them.
*/
- if (!memory_pressure_get())
+ if (osc_cache_too_much(cli)) {
+ CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli));
(void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
wake_up(&osc_lru_waitq);
} else {
LASSERT(list_empty(&opg->ops_lru));
@@ -502,6 +496,7 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
struct cl_page *page = pvec[i];
LASSERT(cl_page_is_owned(page, io));
+ cl_page_delete(env, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
cl_page_put(env, page);
@@ -542,7 +537,6 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
struct cl_object *clobj = NULL;
struct cl_page **pvec;
struct osc_page *opg;
- struct osc_page *temp;
int maxscan = 0;
long count = 0;
int index = 0;
@@ -552,6 +546,8 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
return 0;
+ CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
+ cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
if (!force) {
if (atomic_read(&cli->cl_lru_shrinkers) > 0)
return -EBUSY;
@@ -568,14 +564,21 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
io = &osc_env_info(env)->oti_io;
spin_lock(&cli->cl_lru_list_lock);
+ if (force)
+ cli->cl_lru_reclaim++;
maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
- list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
+ while (!list_empty(&cli->cl_lru_list)) {
struct cl_page *page;
bool will_free = false;
+ if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
+ break;
+
if (--maxscan < 0)
break;
+ opg = list_entry(cli->cl_lru_list.next, struct osc_page,
+ ops_lru);
page = opg->ops_cl.cpl_page;
if (lru_page_busy(cli, page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
@@ -662,34 +665,43 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
return count > 0 ? count : rc;
}
-long osc_lru_reclaim(struct client_obd *cli)
+/**
+ * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
+ * \@npages of LRU slots. For performance consideration, it's better to drop
+ * LRU pages in batch. Therefore, the actual number is adjusted at least
+ * max_pages_per_rpc.
+ */
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
+ int refcheck;
long rc = 0;
LASSERT(cache);
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
return 0;
- rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
- if (rc != 0) {
- if (rc == -EBUSY)
- rc = 0;
-
- CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
- cli->cl_import->imp_obd->obd_name, rc, cli);
+ npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
+ CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
+ cli_name(cli), npages);
+ rc = osc_lru_shrink(env, cli, npages, true);
+ if (rc >= npages) {
+ CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
+ cli_name(cli), rc, npages);
+ if (osc_cache_too_much(cli) > 0)
+ ptlrpcd_queue_work(cli->cl_lru_work);
goto out;
+ } else if (rc > 0) {
+ npages -= rc;
}
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_long_read(&cli->cl_lru_in_list),
- atomic_long_read(&cli->cl_lru_busy));
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
+ cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy), npages);
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen.
@@ -706,7 +718,7 @@ long osc_lru_reclaim(struct client_obd *cli)
cl_lru_osc);
CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
- cli->cl_import->imp_obd->obd_name, cli,
+ cli_name(cli), cli,
atomic_long_read(&cli->cl_lru_in_list),
atomic_long_read(&cli->cl_lru_busy));
@@ -714,19 +726,20 @@ long osc_lru_reclaim(struct client_obd *cli)
if (osc_cache_too_much(cli) > 0) {
spin_unlock(&cache->ccc_lru_lock);
- rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
- true);
+ rc = osc_lru_shrink(env, cli, npages, true);
spin_lock(&cache->ccc_lru_lock);
- if (rc != 0)
+ if (rc >= npages)
break;
+ if (rc > 0)
+ npages -= rc;
}
}
spin_unlock(&cache->ccc_lru_lock);
out:
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
- cli->cl_import->imp_obd->obd_name, cli, rc);
+ cli_name(cli), cli, rc);
return rc;
}
@@ -756,7 +769,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
/* run out of LRU spaces, try to drop some by itself */
- rc = osc_lru_reclaim(cli);
+ rc = osc_lru_reclaim(cli, 1);
if (rc < 0)
break;
if (rc > 0)
@@ -796,8 +809,10 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
int count = 0;
int i;
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
for (i = 0; i < page_count; i++) {
- pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page);
+ pg_data_t *pgdat = page_pgdat(BD_GET_KIOV(desc, i).bv_page);
if (likely(pgdat == last)) {
++count;
@@ -857,7 +872,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
if (!unstable_count)
wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
- if (osc_cache_too_much(cli))
+ if (waitqueue_active(&osc_lru_waitq))
(void)ptlrpcd_queue_work(cli->cl_lru_work);
}
@@ -913,8 +928,7 @@ bool osc_over_unstable_soft_limit(struct client_obd *cli)
CDEBUG(D_CACHE,
"%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
- cli->cl_import->imp_obd->obd_name, cli,
- unstable_nr, osc_unstable_count);
+ cli_name(cli), cli, unstable_nr, osc_unstable_count);
/*
* If the LRU slots are in shortage - 25% remaining AND this OSC
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 194d8ede40a2..fed4da63ee45 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -106,7 +106,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
}
CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
- cli->cl_import->imp_obd->obd_name,
+ cli_name(cli),
type == USRQUOTA ? "user" : "group",
qid[type], rc);
} else {
@@ -122,7 +122,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
kmem_cache_free(osc_quota_kmem, oqi);
CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
- cli->cl_import->imp_obd->obd_name,
+ cli_name(cli),
type == USRQUOTA ? "user" : "group",
qid[type], oqi);
}
@@ -134,8 +134,8 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
/*
* Hash operations for uid/gid <-> osc_quota_info
*/
-static unsigned
-oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return cfs_hash_u32_hash(*((__u32 *)key), mask);
}
@@ -281,47 +281,3 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
return rc;
}
-
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct obd_quotactl *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
- OST_QUOTACHECK);
- if (!req)
- return -ENOMEM;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *body = *oqctl;
-
- ptlrpc_request_set_replen(req);
-
- /* the next poll will find -ENODATA, that means quotacheck is going on
- */
- cli->cl_qchk_stat = -ENODATA;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- cli->cl_qchk_stat = rc;
- ptlrpc_req_finished(req);
- return rc;
-}
-
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc;
-
- qchk->obd_uuid = cli->cl_target_uuid;
- memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
-
- rc = cli->cl_qchk_stat;
- /* the client is not the previous one */
- if (rc == CL_NOT_QUOTACHECKED)
- rc = -EINTR;
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 749781f022e2..7143564ae7e7 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -68,7 +68,6 @@ struct osc_brw_async_args {
struct client_obd *aa_cli;
struct list_head aa_oaps;
struct list_head aa_exts;
- struct cl_req *aa_clerq;
};
struct osc_async_args {
@@ -82,7 +81,8 @@ struct osc_setattr_args {
};
struct osc_fsync_args {
- struct obd_info *fa_oi;
+ struct osc_object *fa_obj;
+ struct obdo *fa_oa;
obd_enqueue_update_f fa_upcall;
void *fa_cookie;
};
@@ -103,140 +103,19 @@ static void osc_release_ppga(struct brw_page **ppga, u32 count);
static int brw_interpret(const struct lu_env *env,
struct ptlrpc_request *req, void *data, int rc);
-/* Unpack OSC object metadata from disk storage (LE byte order). */
-static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes)
-{
- int lsm_size;
- struct obd_import *imp = class_exp2cliimp(exp);
-
- if (lmm) {
- if (lmm_bytes < sizeof(*lmm)) {
- CERROR("%s: lov_mds_md too small: %d, need %d\n",
- exp->exp_obd->obd_name, lmm_bytes,
- (int)sizeof(*lmm));
- return -EINVAL;
- }
- /* XXX LOV_MAGIC etc check? */
-
- if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
- CERROR("%s: zero lmm_object_id: rc = %d\n",
- exp->exp_obd->obd_name, -EINVAL);
- return -EINVAL;
- }
- }
-
- lsm_size = lov_stripe_md_size(1);
- if (!lsmp)
- return lsm_size;
-
- if (*lsmp && !lmm) {
- kfree((*lsmp)->lsm_oinfo[0]);
- kfree(*lsmp);
- *lsmp = NULL;
- return 0;
- }
-
- if (!*lsmp) {
- *lsmp = kzalloc(lsm_size, GFP_NOFS);
- if (unlikely(!*lsmp))
- return -ENOMEM;
- (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
- GFP_NOFS);
- if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
- kfree(*lsmp);
- return -ENOMEM;
- }
- loi_init((*lsmp)->lsm_oinfo[0]);
- } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
- return -EBADF;
- }
-
- if (lmm)
- /* XXX zero *lsmp? */
- ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
-
- if (imp &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
- (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
- else
- (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
-
- return lsm_size;
-}
-
static inline void osc_pack_req_body(struct ptlrpc_request *req,
- struct obd_info *oinfo)
+ struct obdo *oa)
{
struct ost_body *body;
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
-}
-
-static int osc_getattr_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
-{
- struct ost_body *body;
-
- if (rc != 0)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body) {
- CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
- aa->aa_oi->oi_oa, &body->oa);
-
- /* This should really be sent by the OST */
- aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
- aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
- } else {
- CDEBUG(D_INFO, "can't unpack ost_body\n");
- rc = -EPROTO;
- aa->aa_oi->oi_oa->o_valid = 0;
- }
-out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- return rc;
-}
-
-static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- struct osc_async_args *aa;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oinfo);
-
- ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oi = oinfo;
-
- ptlrpc_set_add_req(set, req);
- return 0;
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
}
static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo)
+ struct obdo *oa)
{
struct ptlrpc_request *req;
struct ost_body *body;
@@ -252,7 +131,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
return rc;
}
- osc_pack_req_body(req, oinfo);
+ osc_pack_req_body(req, oa);
ptlrpc_request_set_replen(req);
@@ -267,11 +146,11 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
}
CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
&body->oa);
- oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
- oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
+ oa->o_blksize = cli_brw_size(exp->exp_obd);
+ oa->o_valid |= OBD_MD_FLBLKSZ;
out:
ptlrpc_req_finished(req);
@@ -279,13 +158,13 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
}
static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct ptlrpc_request *req;
struct ost_body *body;
int rc;
- LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
if (!req)
@@ -297,7 +176,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
return rc;
}
- osc_pack_req_body(req, oinfo);
+ osc_pack_req_body(req, oa);
ptlrpc_request_set_replen(req);
@@ -311,7 +190,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
goto out;
}
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
&body->oa);
out:
@@ -341,10 +220,9 @@ out:
return rc;
}
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
+int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset)
{
struct ptlrpc_request *req;
struct osc_setattr_args *sa;
@@ -360,10 +238,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
return rc;
}
- if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
-
- osc_pack_req_body(req, oinfo);
+ osc_pack_req_body(req, oa);
ptlrpc_request_set_replen(req);
@@ -377,7 +252,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oinfo->oi_oa;
+ sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
@@ -390,16 +265,8 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
return 0;
}
-static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
-{
- return osc_setattr_async_base(exp, oinfo, oti,
- oinfo->oi_cb_up, oinfo, rqset);
-}
-
static int osc_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct ptlrpc_request *req;
struct ost_body *body;
@@ -428,15 +295,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
ptlrpc_request_set_replen(req);
- if ((oa->o_valid & OBD_MD_FLFLAGS) &&
- oa->o_flags == OBD_FL_DELORPHAN) {
- DEBUG_REQ(D_HA, req,
- "delorphan from OST integration");
- /* Don't resend the delorphan req */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
rc = ptlrpc_queue_wait(req);
if (rc)
goto out_req;
@@ -453,12 +311,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
oa->o_blksize = cli_brw_size(exp->exp_obd);
oa->o_valid |= OBD_MD_FLBLKSZ;
- if (oti && oa->o_valid & OBD_MD_FLCOOKIE) {
- if (!oti->oti_logcookies)
- oti->oti_logcookies = &oti->oti_onecookie;
- *oti->oti_logcookies = oa->o_lcookie;
- }
-
CDEBUG(D_HA, "transno: %lld\n",
lustre_msg_get_transno(req->rq_repmsg));
out_req:
@@ -467,7 +319,7 @@ out:
return rc;
}
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_punch_base(struct obd_export *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset)
{
@@ -491,14 +343,14 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
+ oa);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oinfo->oi_oa;
+ sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
if (rqset == PTLRPCD_SET)
@@ -513,8 +365,11 @@ static int osc_sync_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void *arg, int rc)
{
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
struct osc_fsync_args *fa = arg;
+ unsigned long valid = 0;
struct ost_body *body;
+ struct cl_object *obj;
if (rc)
goto out;
@@ -526,16 +381,30 @@ static int osc_sync_interpret(const struct lu_env *env,
goto out;
}
- *fa->fa_oi->oi_oa = body->oa;
+ *fa->fa_oa = body->oa;
+ obj = osc2cl(fa->fa_obj);
+
+ /* Update osc object's blocks attribute */
+ cl_object_attr_lock(obj);
+ if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
+ attr->cat_blocks = body->oa.o_blocks;
+ valid |= CAT_BLOCKS;
+ }
+
+ if (valid)
+ cl_object_attr_update(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
+
out:
rc = fa->fa_upcall(fa->fa_cookie, rc);
return rc;
}
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_sync_base(struct osc_object *obj, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
struct ptlrpc_request_set *rqset)
{
+ struct obd_export *exp = osc_export(obj);
struct ptlrpc_request *req;
struct ost_body *body;
struct osc_fsync_args *fa;
@@ -555,14 +424,15 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
+ oa);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = osc_sync_interpret;
CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
fa = ptlrpc_req_async_args(req);
- fa->fa_oi = oinfo;
+ fa->fa_obj = obj;
+ fa->fa_oa = oa;
fa->fa_upcall = upcall;
fa->fa_cookie = cookie;
@@ -639,19 +509,8 @@ static int osc_can_send_destroy(struct client_obd *cli)
return 0;
}
-/* Destroy requests can be async always on the client, and we don't even really
- * care about the return code since the client cannot do anything at all about
- * a destroy failure.
- * When the MDS is unlinking a filename, it saves the file objects into a
- * recovery llog, and these object records are cancelled when the OST reports
- * they were destroyed and sync'd to disk (i.e. transaction committed).
- * If the client dies, or the OST is down when the object should be destroyed,
- * the records are not cancelled, and when the OST reconnects to the MDS next,
- * it will retrieve the llog unlink logs and then sends the log cancellation
- * cookies to the MDS after committing destroy transactions.
- */
static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct obd_trans_info *oti)
+ struct obdo *oa)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct ptlrpc_request *req;
@@ -683,32 +542,22 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
ptlrpc_at_set_req_timeout(req);
- if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
- oa->o_lcookie = *oti->oti_logcookies;
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
ptlrpc_request_set_replen(req);
- /* If osc_destroy is for destroying the unlink orphan,
- * sent from MDT to OST, which should not be blocked here,
- * because the process might be triggered by ptlrpcd, and
- * it is not good to block ptlrpcd thread (b=16006
- **/
- if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
- req->rq_interpret_reply = osc_destroy_interpret;
- if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
- NULL);
-
- /*
- * Wait until the number of on-going destroy RPCs drops
- * under max_rpc_in_flight
- */
- l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
- }
+ req->rq_interpret_reply = osc_destroy_interpret;
+ if (!osc_can_send_destroy(cli)) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+ /*
+ * Wait until the number of on-going destroy RPCs drops
+ * under max_rpc_in_flight
+ */
+ l_wait_event_exclusive(cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli), &lwi);
}
/* Do not wait for response */
@@ -734,14 +583,13 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_undirty = 0;
} else if (unlikely(atomic_long_read(&obd_dirty_pages) -
atomic_long_read(&obd_dirty_transit_pages) >
- (obd_max_dirty_pages + 1))) {
+ (long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1).
*/
- CERROR("%s: dirty %ld + %ld > system dirty_max %lu\n",
- cli->cl_import->imp_obd->obd_name,
- atomic_long_read(&obd_dirty_pages),
+ CERROR("%s: dirty %ld + %ld > system dirty_max %ld\n",
+ cli_name(cli), atomic_long_read(&obd_dirty_pages),
atomic_long_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
@@ -936,12 +784,10 @@ static int osc_add_shrink_grant(struct client_obd *client)
osc_grant_shrink_grant_cb, NULL,
&client->cl_grant_shrink_list);
if (rc) {
- CERROR("add grant client %s error %d\n",
- client->cl_import->imp_obd->obd_name, rc);
+ CERROR("add grant client %s error %d\n", cli_name(client), rc);
return rc;
}
- CDEBUG(D_CACHE, "add grant client %s\n",
- client->cl_import->imp_obd->obd_name);
+ CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
osc_update_next_shrink(client);
return 0;
}
@@ -970,23 +816,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
cli->cl_avail_grant = ocd->ocd_grant -
(cli->cl_dirty_pages << PAGE_SHIFT);
- if (cli->cl_avail_grant < 0) {
- CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
- cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
- ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT);
- /* workaround for servers which do not have the patch from
- * LU-2679
- */
- cli->cl_avail_grant = ocd->ocd_grant;
- }
-
/* determine the appropriate chunk size used by osc_extent. */
cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
- cli->cl_import->imp_obd->obd_name,
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+ cli_name(cli), cli->cl_avail_grant, cli->cl_lost_grant,
+ cli->cl_chunkbits);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
list_empty(&cli->cl_grant_shrink_list))
@@ -1072,9 +908,9 @@ static int check_write_rcs(struct ptlrpc_request *req,
static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
{
if (p1->flag != p2->flag) {
- unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
- OBD_BRW_SYNC | OBD_BRW_ASYNC |
- OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
+ unsigned int mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
+ OBD_BRW_SYNC | OBD_BRW_ASYNC |
+ OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine
@@ -1097,7 +933,6 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
int i = 0;
struct cfs_crypto_hash_desc *hdesc;
unsigned int bufsize;
- int err;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
LASSERT(pg_count > 0);
@@ -1139,7 +974,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
}
bufsize = sizeof(cksum);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo
@@ -1151,8 +986,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
}
static int osc_brw_prep_request(int cmd, struct client_obd *cli,
- struct obdo *oa,
- struct lov_stripe_md *lsm, u32 page_count,
+ struct obdo *oa, u32 page_count,
struct brw_page **pga,
struct ptlrpc_request **reqp,
int reserve,
@@ -1210,8 +1044,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
desc = ptlrpc_prep_bulk_imp(req, page_count,
cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
- opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
- OST_BULK_PORTAL);
+ (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
+ PTLRPC_BULK_PUT_SINK) | PTLRPC_BULK_BUF_KIOV, OST_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_pin_ops);
if (!desc) {
rc = -ENOMEM;
@@ -1259,7 +1094,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
- ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
+ desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff, pg->count);
requested_nob += pg->count;
if (i > 0 && can_merge_pages(pg_prev, pg)) {
@@ -1569,7 +1404,6 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
aa->aa_cli, aa->aa_oa,
- NULL /* lsm unused by osc currently */,
aa->aa_page_count, aa->aa_ppga,
&new_req, 0, 1);
if (rc)
@@ -1764,8 +1598,6 @@ static int brw_interpret(const struct lu_env *env,
LASSERT(list_empty(&aa->aa_exts));
LASSERT(list_empty(&aa->aa_oaps));
- cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
- req->rq_bulk->bd_nob_transferred);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
@@ -1818,9 +1650,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_brw_async_args *aa = NULL;
struct obdo *oa = NULL;
struct osc_async_page *oap;
- struct osc_async_page *tmp;
- struct cl_req *clerq = NULL;
- enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+ struct osc_object *obj = NULL;
struct cl_req_attr *crattr = NULL;
u64 starting_offset = OBD_OBJECT_EOF;
u64 ending_offset = 0;
@@ -1828,6 +1658,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
int mem_tight = 0;
int page_count = 0;
bool soft_sync = false;
+ bool interrupted = false;
int i;
int rc;
struct ost_body *body;
@@ -1839,32 +1670,15 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
list_for_each_entry(ext, ext_list, oe_link) {
LASSERT(ext->oe_state == OES_RPC);
mem_tight |= ext->oe_memalloc;
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- ++page_count;
- list_add_tail(&oap->oap_rpc_item, &rpc_list);
- if (starting_offset > oap->oap_obj_off)
- starting_offset = oap->oap_obj_off;
- else
- LASSERT(oap->oap_page_off == 0);
- if (ending_offset < oap->oap_obj_off + oap->oap_count)
- ending_offset = oap->oap_obj_off +
- oap->oap_count;
- else
- LASSERT(oap->oap_page_off + oap->oap_count ==
- PAGE_SIZE);
- }
+ page_count += ext->oe_nr_pages;
+ if (!obj)
+ obj = ext->oe_obj;
}
soft_sync = osc_over_unstable_soft_limit(cli);
if (mem_tight)
mpflag = cfs_memory_pressure_get_and_set();
- crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
- if (!crattr) {
- rc = -ENOMEM;
- goto out;
- }
-
pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
if (!pga) {
rc = -ENOMEM;
@@ -1878,44 +1692,46 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
}
i = 0;
- list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
- struct cl_page *page = oap2cl_page(oap);
-
- if (!clerq) {
- clerq = cl_req_alloc(env, page, crt,
- 1 /* only 1-object rpcs for now */);
- if (IS_ERR(clerq)) {
- rc = PTR_ERR(clerq);
- goto out;
- }
+ list_for_each_entry(ext, ext_list, oe_link) {
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ if (mem_tight)
+ oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+ if (soft_sync)
+ oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+ pga[i] = &oap->oap_brw_page;
+ pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
+ i++;
+
+ list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ if (starting_offset == OBD_OBJECT_EOF ||
+ starting_offset > oap->oap_obj_off)
+ starting_offset = oap->oap_obj_off;
+ else
+ LASSERT(!oap->oap_page_off);
+ if (ending_offset < oap->oap_obj_off + oap->oap_count)
+ ending_offset = oap->oap_obj_off +
+ oap->oap_count;
+ else
+ LASSERT(oap->oap_page_off + oap->oap_count ==
+ PAGE_SIZE);
+ if (oap->oap_interrupted)
+ interrupted = true;
}
- if (mem_tight)
- oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
- if (soft_sync)
- oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
- pga[i] = &oap->oap_brw_page;
- pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
- CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, oap->oap_page->index, oap,
- pga[i]->flag);
- i++;
- cl_req_page_add(env, clerq, page);
}
- /* always get the data for the obdo for the rpc */
- LASSERT(clerq);
- crattr->cra_oa = oa;
- cl_req_attr_set(env, clerq, crattr, ~0ULL);
+ /* first page in the list */
+ oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
- rc = cl_req_prep(env, clerq);
- if (rc != 0) {
- CERROR("cl_req_prep failed: %d\n", rc);
- goto out;
- }
+ crattr = &osc_env_info(env)->oti_req_attr;
+ memset(crattr, 0, sizeof(*crattr));
+ crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+ crattr->cra_flags = ~0ULL;
+ crattr->cra_page = oap2cl_page(oap);
+ crattr->cra_oa = oa;
+ cl_req_attr_set(env, osc2cl(obj), crattr);
sort_brw_pages(pga, page_count);
- rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
- pga, &req, 1, 0);
+ rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0);
if (rc != 0) {
CERROR("prep_req failed: %d\n", rc);
goto out;
@@ -1924,8 +1740,10 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
req->rq_commit_cb = brw_commit;
req->rq_interpret_reply = brw_interpret;
- if (mem_tight != 0)
- req->rq_memalloc = 1;
+ req->rq_memalloc = mem_tight != 0;
+ oap->oap_request = ptlrpc_request_addref(req);
+ if (interrupted && !req->rq_intr)
+ ptlrpc_mark_interrupted(req);
/* Need to update the timestamps after the request is built in case
* we race with setattr (locally or in queue at OST). If OST gets
@@ -1935,9 +1753,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
*/
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
crattr->cra_oa = &body->oa;
- cl_req_attr_set(env, clerq, crattr,
- OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
-
+ crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
+ cl_req_attr_set(env, osc2cl(obj), crattr);
lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
@@ -1946,24 +1763,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
list_splice_init(&rpc_list, &aa->aa_oaps);
INIT_LIST_HEAD(&aa->aa_exts);
list_splice_init(ext_list, &aa->aa_exts);
- aa->aa_clerq = clerq;
-
- /* queued sync pages can be torn down while the pages
- * were between the pending list and the rpc
- */
- tmp = NULL;
- list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- /* only one oap gets a request reference */
- if (!tmp)
- tmp = oap;
- if (oap->oap_interrupted && !req->rq_intr) {
- CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
- oap, req);
- ptlrpc_mark_interrupted(req);
- }
- }
- if (tmp)
- tmp->oap_request = ptlrpc_request_addref(req);
spin_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_SHIFT;
@@ -1985,6 +1784,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
page_count, aa, cli->cl_r_in_flight,
cli->cl_w_in_flight);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
ptlrpcd_add_req(req);
rc = 0;
@@ -1993,8 +1793,6 @@ out:
if (mem_tight != 0)
cfs_memory_pressure_restore(mpflag);
- kfree(crattr);
-
if (rc != 0) {
LASSERT(!req);
@@ -2010,22 +1808,15 @@ out:
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 0, rc);
}
- if (clerq && !IS_ERR(clerq))
- cl_req_completion(env, clerq, rc);
}
return rc;
}
-static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
- struct ldlm_enqueue_info *einfo)
+static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
{
- void *data = einfo->ei_cbdata;
int set = 0;
- LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
- LASSERT(lock->l_resource->lr_type == einfo->ei_type);
- LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
- LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
+ LASSERT(lock);
lock_res_and_lock(lock);
@@ -2039,21 +1830,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
return set;
}
-static int osc_set_data_with_check(struct lustre_handle *lockh,
- struct ldlm_enqueue_info *einfo)
-{
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- int set = 0;
-
- if (lock) {
- set = osc_set_lock_data_with_check(lock, einfo);
- LDLM_LOCK_PUT(lock);
- } else
- CERROR("lockh %p, data %p - client evicted?\n",
- lockh, einfo->ei_cbdata);
- return set;
-}
-
static int osc_enqueue_fini(struct ptlrpc_request *req,
osc_enqueue_upcall_f upcall, void *cookie,
struct lustre_handle *lockh, enum ldlm_mode mode,
@@ -2153,7 +1929,7 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
* release locks just after they are obtained.
*/
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, union ldlm_policy_data *policy,
struct ost_lvb *lvb, int kms_valid,
osc_enqueue_upcall_f upcall, void *cookie,
struct ldlm_enqueue_info *einfo,
@@ -2219,7 +1995,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return -ECANCELED;
- } else if (osc_set_lock_data_with_check(matched, einfo)) {
+ } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
*flags |= LDLM_FL_LVB_READY;
/* We already have a lock, and it's referenced. */
(*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
@@ -2304,7 +2080,7 @@ no_match:
}
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ __u32 type, union ldlm_policy_data *policy, __u32 mode,
__u64 *flags, void *data, struct lustre_handle *lockh,
int unref)
{
@@ -2331,31 +2107,20 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
rc |= LCK_PW;
rc = ldlm_lock_match(obd->obd_namespace, lflags,
res_id, type, policy, rc, lockh, unref);
- if (rc) {
- if (data) {
- if (!osc_set_data_with_check(lockh, data)) {
- if (!(lflags & LDLM_FL_TEST_LOCK))
- ldlm_lock_decref(lockh, rc);
- return 0;
- }
- }
- if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
- ldlm_lock_addref(lockh, LCK_PR);
- ldlm_lock_decref(lockh, LCK_PW);
- }
+ if (!rc || lflags & LDLM_FL_TEST_LOCK)
return rc;
- }
- return rc;
-}
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
-{
- if (unlikely(mode == LCK_GROUP))
- ldlm_lock_decref_and_cancel(lockh, mode);
- else
- ldlm_lock_decref(lockh, mode);
+ if (data) {
+ struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- return 0;
+ LASSERT(lock);
+ if (!osc_set_lock_data(lock, data)) {
+ ldlm_lock_decref(lockh, rc);
+ rc = 0;
+ }
+ LDLM_LOCK_PUT(lock);
+ }
+ return rc;
}
static int osc_statfs_interpret(const struct lu_env *env,
@@ -2526,9 +2291,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
err = ptlrpc_set_import_active(obd->u.cli.cl_import,
data->ioc_offset);
goto out;
- case OBD_IOC_POLL_QUOTACHECK:
- err = osc_quota_poll_check(exp, karg);
- goto out;
case OBD_IOC_PING_TARGET:
err = ptlrpc_obd_ping(obd);
goto out;
@@ -2543,103 +2305,6 @@ out:
return err;
}
-static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
-{
- if (!vallen || !val)
- return -EFAULT;
-
- if (KEY_IS(KEY_FIEMAP)) {
- struct ll_fiemap_info_key *fm_key = key;
- struct ldlm_res_id res_id;
- ldlm_policy_data_t policy;
- struct lustre_handle lockh;
- enum ldlm_mode mode = 0;
- struct ptlrpc_request *req;
- struct ll_user_fiemap *reply;
- char *tmp;
- int rc;
-
- if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
- goto skip_locking;
-
- policy.l_extent.start = fm_key->fiemap.fm_start &
- PAGE_MASK;
-
- if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
- fm_key->fiemap.fm_start + PAGE_SIZE - 1)
- policy.l_extent.end = OBD_OBJECT_EOF;
- else
- policy.l_extent.end = (fm_key->fiemap.fm_start +
- fm_key->fiemap.fm_length +
- PAGE_SIZE - 1) & PAGE_MASK;
-
- ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
- mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
- LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_LVB_READY,
- &res_id, LDLM_EXTENT, &policy,
- LCK_PR | LCK_PW, &lockh, 0);
- if (mode) { /* lock is cached on client */
- if (mode != LCK_PR) {
- ldlm_lock_addref(&lockh, LCK_PR);
- ldlm_lock_decref(&lockh, LCK_PW);
- }
- } else { /* no cached lock, needs acquire lock on server side */
- fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
- fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
- }
-
-skip_locking:
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_OST_GET_INFO_FIEMAP);
- if (!req) {
- rc = -ENOMEM;
- goto drop_lock;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
- RCL_CLIENT, *vallen);
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
- RCL_SERVER, *vallen);
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- goto drop_lock;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- memcpy(tmp, val, *vallen);
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto fini_req;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- if (!reply) {
- rc = -EPROTO;
- goto fini_req;
- }
-
- memcpy(val, reply, *vallen);
-fini_req:
- ptlrpc_req_finished(req);
-drop_lock:
- if (mode)
- ldlm_lock_decref(&lockh, LCK_PR);
- return rc;
- }
-
- return -EINVAL;
-}
-
static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
u32 keylen, void *key, u32 vallen,
void *val, struct ptlrpc_request_set *set)
@@ -2999,47 +2664,33 @@ out_ptlrpcd:
return rc;
}
-static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int osc_precleanup(struct obd_device *obd)
{
- switch (stage) {
- case OBD_CLEANUP_EARLY: {
- struct obd_import *imp;
-
- imp = obd->u.cli.cl_import;
- CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
- /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
- ptlrpc_deactivate_import(imp);
- spin_lock(&imp->imp_lock);
- imp->imp_pingable = 0;
- spin_unlock(&imp->imp_lock);
- break;
+ struct client_obd *cli = &obd->u.cli;
+
+ /* LU-464
+ * for echo client, export may be on zombie list, wait for
+ * zombie thread to cull it, because cli.cl_import will be
+ * cleared in client_disconnect_export():
+ * class_export_destroy() -> obd_cleanup() ->
+ * echo_device_free() -> echo_client_cleanup() ->
+ * obd_disconnect() -> osc_disconnect() ->
+ * client_disconnect_export()
+ */
+ obd_zombie_barrier();
+ if (cli->cl_writeback_work) {
+ ptlrpcd_destroy_work(cli->cl_writeback_work);
+ cli->cl_writeback_work = NULL;
}
- case OBD_CLEANUP_EXPORTS: {
- struct client_obd *cli = &obd->u.cli;
- /* LU-464
- * for echo client, export may be on zombie list, wait for
- * zombie thread to cull it, because cli.cl_import will be
- * cleared in client_disconnect_export():
- * class_export_destroy() -> obd_cleanup() ->
- * echo_device_free() -> echo_client_cleanup() ->
- * obd_disconnect() -> osc_disconnect() ->
- * client_disconnect_export()
- */
- obd_zombie_barrier();
- if (cli->cl_writeback_work) {
- ptlrpcd_destroy_work(cli->cl_writeback_work);
- cli->cl_writeback_work = NULL;
- }
- if (cli->cl_lru_work) {
- ptlrpcd_destroy_work(cli->cl_lru_work);
- cli->cl_lru_work = NULL;
- }
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
- break;
- }
+
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
}
+
+ obd_cleanup_client_import(obd);
+ ptlrpc_lprocfs_unregister_obd(obd);
+ lprocfs_obd_cleanup(obd);
return 0;
}
@@ -3104,24 +2755,18 @@ static struct obd_ops osc_obd_ops = {
.disconnect = osc_disconnect,
.statfs = osc_statfs,
.statfs_async = osc_statfs_async,
- .unpackmd = osc_unpackmd,
.create = osc_create,
.destroy = osc_destroy,
.getattr = osc_getattr,
- .getattr_async = osc_getattr_async,
.setattr = osc_setattr,
- .setattr_async = osc_setattr_async,
.iocontrol = osc_iocontrol,
- .get_info = osc_get_info,
.set_info_async = osc_set_info_async,
.import_event = osc_import_event,
.process_config = osc_process_config,
.quotactl = osc_quotactl,
- .quotacheck = osc_quotacheck,
};
extern struct lu_kmem_descr osc_caches[];
-extern struct lock_class_key osc_ast_guard_class;
static int __init osc_init(void)
{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 8c51d51a678b..804741362bc0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -43,6 +43,18 @@
#include "ptlrpc_internal.h"
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
+ .add_kiov_frag = ptlrpc_prep_bulk_page_pin,
+ .release_frags = ptlrpc_release_bulk_page_pin,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
+
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
+ .add_kiov_frag = ptlrpc_prep_bulk_page_nopin,
+ .release_frags = NULL,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
+
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
static int ptlrpcd_check_work(struct ptlrpc_request *req);
static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
@@ -95,24 +107,43 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
* Allocate and initialize new bulk descriptor on the sender.
* Returns pointer to the descriptor or NULL on error.
*/
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
+ unsigned int max_brw,
+ enum ptlrpc_bulk_op_type type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops)
{
struct ptlrpc_bulk_desc *desc;
int i;
- desc = kzalloc(offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]),
- GFP_NOFS);
+ /* ensure that only one of KIOV or IOVEC is set but not both */
+ LASSERT((ptlrpc_is_bulk_desc_kiov(type) && ops->add_kiov_frag) ||
+ (ptlrpc_is_bulk_desc_kvec(type) && ops->add_iov_frag));
+
+ desc = kzalloc(sizeof(*desc), GFP_NOFS);
if (!desc)
return NULL;
+ if (type & PTLRPC_BULK_BUF_KIOV) {
+ GET_KIOV(desc) = kcalloc(nfrags, sizeof(*GET_KIOV(desc)),
+ GFP_NOFS);
+ if (!GET_KIOV(desc))
+ goto free_desc;
+ } else {
+ GET_KVEC(desc) = kcalloc(nfrags, sizeof(*GET_KVEC(desc)),
+ GFP_NOFS);
+ if (!GET_KVEC(desc))
+ goto free_desc;
+ }
+
spin_lock_init(&desc->bd_lock);
init_waitqueue_head(&desc->bd_waitq);
- desc->bd_max_iov = npages;
+ desc->bd_max_iov = nfrags;
desc->bd_iov_count = 0;
desc->bd_portal = portal;
desc->bd_type = type;
desc->bd_md_count = 0;
+ desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *)ops;
LASSERT(max_brw > 0);
desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
/*
@@ -123,24 +154,31 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
LNetInvalidateHandle(&desc->bd_mds[i]);
return desc;
+free_desc:
+ kfree(desc);
+ return NULL;
}
/**
* Prepare bulk descriptor for specified outgoing request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
* the bulk to be sent. Used on client-side.
* Returns pointer to newly allocated initialized bulk descriptor or NULL on
* error.
*/
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
+ unsigned int nfrags,
+ unsigned int max_brw,
+ unsigned int type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops)
{
struct obd_import *imp = req->rq_import;
struct ptlrpc_bulk_desc *desc;
- LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
- desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+ LASSERT(ptlrpc_is_bulk_op_passive(type));
+
+ desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
if (!desc)
return NULL;
@@ -158,56 +196,82 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
}
EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
-/**
- * Add a page \a page to the bulk descriptor \a desc.
- * Data to transfer in the page starts at offset \a pageoffset and
- * amount of data to transfer from the page is \a len
- */
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset, int len, int pin)
{
+ struct bio_vec *kiov;
+
LASSERT(desc->bd_iov_count < desc->bd_max_iov);
LASSERT(page);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
LASSERT(pageoffset + len <= PAGE_SIZE);
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+ kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
desc->bd_nob += len;
if (pin)
get_page(page);
- ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+ kiov->bv_page = page;
+ kiov->bv_offset = pageoffset;
+ kiov->bv_len = len;
+
+ desc->bd_iov_count++;
}
EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
-/**
- * Uninitialize and free bulk descriptor \a desc.
- * Works on bulk descriptors both from server and client side.
- */
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+ void *frag, int len)
{
- int i;
+ struct kvec *iovec;
+
+ LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+ LASSERT(frag);
+ LASSERT(len > 0);
+ LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
+ iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
+
+ desc->bd_nob += len;
+
+ iovec->iov_base = frag;
+ iovec->iov_len = len;
+
+ desc->bd_iov_count++;
+
+ return desc->bd_nob;
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
+
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
+{
LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
LASSERT(desc->bd_md_count == 0); /* network hands off */
LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
+ LASSERT(desc->bd_frag_ops);
- sptlrpc_enc_pool_put_pages(desc);
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+ sptlrpc_enc_pool_put_pages(desc);
if (desc->bd_export)
class_export_put(desc->bd_export);
else
class_import_put(desc->bd_import);
- if (unpin) {
- for (i = 0; i < desc->bd_iov_count; i++)
- put_page(desc->bd_iov[i].bv_page);
- }
+ if (desc->bd_frag_ops->release_frags)
+ desc->bd_frag_ops->release_frags(desc);
+
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+ kfree(GET_KIOV(desc));
+ else
+ kfree(GET_KVEC(desc));
kfree(desc);
}
-EXPORT_SYMBOL(__ptlrpc_free_bulk);
+EXPORT_SYMBOL(ptlrpc_free_bulk);
/**
* Set server timelimit for this req, i.e. how long are we willing to wait
@@ -589,6 +653,42 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
spin_unlock(&pool->prp_lock);
}
+void ptlrpc_add_unreplied(struct ptlrpc_request *req)
+{
+ struct obd_import *imp = req->rq_import;
+ struct list_head *tmp;
+ struct ptlrpc_request *iter;
+
+ assert_spin_locked(&imp->imp_lock);
+ LASSERT(list_empty(&req->rq_unreplied_list));
+
+ /* unreplied list is sorted by xid in ascending order */
+ list_for_each_prev(tmp, &imp->imp_unreplied_list) {
+ iter = list_entry(tmp, struct ptlrpc_request,
+ rq_unreplied_list);
+
+ LASSERT(req->rq_xid != iter->rq_xid);
+ if (req->rq_xid < iter->rq_xid)
+ continue;
+ list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
+ return;
+ }
+ list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
+}
+
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
+{
+ req->rq_xid = ptlrpc_next_xid();
+ ptlrpc_add_unreplied(req);
+}
+
+static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_import->imp_lock);
+ ptlrpc_assign_next_xid_nolock(req);
+ spin_unlock(&req->rq_import->imp_lock);
+}
+
int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
__u32 version, int opcode, char **bufs,
struct ptlrpc_cli_ctx *ctx)
@@ -637,8 +737,8 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- request->rq_xid = ptlrpc_next_xid();
lustre_msg_set_opc(request->rq_reqmsg, opcode);
+ ptlrpc_assign_next_xid(request);
/* Let's setup deadline for req/reply/bulk unlink for opcode. */
if (cfs_fail_val == opcode) {
@@ -1129,7 +1229,9 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
lnet_nid_t nid = imp->imp_connection->c_peer.nid;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
- if (ptlrpc_console_allow(req))
+ /* -EAGAIN is normal when using POSIX flocks */
+ if (ptlrpc_console_allow(req) &&
+ !(opc == LDLM_ENQUEUE && err == -EAGAIN))
LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n",
imp->imp_obd->obd_name,
ll_opcode2str(opc),
@@ -1166,6 +1268,24 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
versions[0], versions[1]);
}
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ assert_spin_locked(&imp->imp_lock);
+ if (list_empty(&imp->imp_unreplied_list))
+ return 0;
+
+ req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
+ rq_unreplied_list);
+ LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
+
+ if (imp->imp_known_replied_xid < req->rq_xid - 1)
+ imp->imp_known_replied_xid = req->rq_xid - 1;
+
+ return req->rq_xid - 1;
+}
+
/**
* Callback function called when client receives RPC reply for \a req.
* Returns 0 on success or error code.
@@ -1180,6 +1300,7 @@ static int after_reply(struct ptlrpc_request *req)
int rc;
struct timespec64 work_start;
long timediff;
+ u64 committed;
LASSERT(obd);
/* repbuf must be unlinked */
@@ -1206,6 +1327,10 @@ static int after_reply(struct ptlrpc_request *req)
return 0;
}
+ ktime_get_real_ts64(&work_start);
+ timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
+ (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
+ NSEC_PER_USEC;
/*
* NB Until this point, the whole of the incoming message,
* including buflens, status etc is in the sender's byte order.
@@ -1235,13 +1360,6 @@ static int after_reply(struct ptlrpc_request *req)
spin_unlock(&req->rq_lock);
req->rq_nr_resend++;
- /* allocate new xid to avoid reply reconstruction */
- if (!req->rq_bulk) {
- /* new xid is already allocated for bulk in ptlrpc_check_set() */
- req->rq_xid = ptlrpc_next_xid();
- DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS");
- }
-
/* Readjust the timeout for current conditions */
ptlrpc_at_set_req_timeout(req);
/*
@@ -1255,13 +1373,14 @@ static int after_reply(struct ptlrpc_request *req)
else
req->rq_sent = now + req->rq_nr_resend;
+ /* Resend for EINPROGRESS will use a new XID */
+ spin_lock(&imp->imp_lock);
+ list_del_init(&req->rq_unreplied_list);
+ spin_unlock(&imp->imp_lock);
+
return 0;
}
- ktime_get_real_ts64(&work_start);
- timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
- (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
- NSEC_PER_USEC;
if (obd->obd_svc_stats) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
timediff);
@@ -1338,10 +1457,9 @@ static int after_reply(struct ptlrpc_request *req)
}
/* Replay-enabled imports return commit-status information. */
- if (lustre_msg_get_last_committed(req->rq_repmsg)) {
- imp->imp_peer_committed_transno =
- lustre_msg_get_last_committed(req->rq_repmsg);
- }
+ committed = lustre_msg_get_last_committed(req->rq_repmsg);
+ if (likely(committed > imp->imp_peer_committed_transno))
+ imp->imp_peer_committed_transno = committed;
ptlrpc_free_committed(imp);
@@ -1373,9 +1491,17 @@ static int after_reply(struct ptlrpc_request *req)
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp = req->rq_import;
+ u64 min_xid = 0;
int rc;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
+
+ /* do not try to go further if there is not enough memory in enc_pool */
+ if (req->rq_sent && req->rq_bulk)
+ if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
+ pool_is_at_full_capacity())
+ return -ENOMEM;
+
if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
@@ -1385,6 +1511,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
spin_lock(&imp->imp_lock);
+ LASSERT(req->rq_xid);
+ LASSERT(!list_empty(&req->rq_unreplied_list));
+
if (!req->rq_generation_set)
req->rq_import_generation = imp->imp_generation;
@@ -1414,8 +1543,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
LASSERT(list_empty(&req->rq_list));
list_add_tail(&req->rq_list, &imp->imp_sending_list);
atomic_inc(&req->rq_import->imp_inflight);
+
+ /* find the known replied XID from the unreplied list, CONNECT
+ * and DISCONNECT requests are skipped to make the sanity check
+ * on server side happy. see process_req_last_xid().
+ *
+ * For CONNECT: Because replay requests have lower XID, it'll
+ * break the sanity check if CONNECT bump the exp_last_xid on
+ * server.
+ *
+ * For DISCONNECT: Since client will abort inflight RPC before
+ * sending DISCONNECT, DISCONNECT may carry an XID which higher
+ * than the inflight RPC.
+ */
+ if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
+ min_xid = ptlrpc_known_replied_xid(imp);
spin_unlock(&imp->imp_lock);
+ lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
+
lustre_msg_set_status(req->rq_reqmsg, current_pid());
rc = sptlrpc_req_refresh_ctx(req, -1);
@@ -1438,6 +1584,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
+ atomic_dec(&req->rq_import->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ return rc;
+ }
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
spin_lock(&req->rq_lock);
@@ -1688,18 +1844,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
spin_lock(&req->rq_lock);
req->rq_resend = 1;
spin_unlock(&req->rq_lock);
- if (req->rq_bulk) {
- __u64 old_xid;
-
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
-
- /* ensure previous bulk fails */
- old_xid = req->rq_xid;
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
- old_xid, req->rq_xid);
- }
+ if (req->rq_bulk &&
+ !ptlrpc_unregister_bulk(req, 1))
+ continue;
}
/*
* rq_wait_ctx is only touched by ptlrpcd,
@@ -1727,6 +1874,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
}
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list))
+ list_del_init(&req->rq_list);
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ continue;
+ }
if (rc) {
DEBUG_REQ(D_HA, req,
"send failed: rc = %d", rc);
@@ -1850,6 +2005,7 @@ interpret:
list_del_init(&req->rq_list);
atomic_dec(&imp->imp_inflight);
}
+ list_del_init(&req->rq_unreplied_list);
spin_unlock(&imp->imp_lock);
atomic_dec(&set->set_remaining);
@@ -2247,6 +2403,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
if (!locked)
spin_lock(&request->rq_import->imp_lock);
list_del_init(&request->rq_replay_list);
+ list_del_init(&request->rq_unreplied_list);
if (!locked)
spin_unlock(&request->rq_import->imp_lock);
}
@@ -2266,7 +2423,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
request->rq_import = NULL;
}
if (request->rq_bulk)
- ptlrpc_free_bulk_pin(request->rq_bulk);
+ ptlrpc_free_bulk(request->rq_bulk);
if (request->rq_reqbuf || request->rq_clrbuf)
sptlrpc_cli_free_reqbuf(request);
@@ -2542,14 +2699,6 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
- if (req->rq_bulk) {
- __u64 old_xid = req->rq_xid;
-
- /* ensure previous bulk fails */
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
- old_xid, req->rq_xid);
- }
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
@@ -2592,6 +2741,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 0;
+ spin_unlock(&req->rq_lock);
+
LASSERT(imp->imp_replayable);
/* Balanced in ptlrpc_free_committed, usually. */
ptlrpc_request_addref(req);
@@ -2667,8 +2820,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
atomic_dec(&imp->imp_replay_inflight);
- if (!ptlrpc_client_replied(req)) {
- CERROR("request replay timed out, restarting recovery\n");
+ /*
+ * Note: if it is bulk replay (MDS-MDS replay), then even if
+ * server got the request, but bulk transfer timeout, let's
+ * replay the bulk req again
+ */
+ if (!ptlrpc_client_replied(req) ||
+ (req->rq_bulk &&
+ lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
+ DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
rc = -ETIMEDOUT;
goto out;
}
@@ -2939,6 +3099,48 @@ __u64 ptlrpc_next_xid(void)
}
/**
+ * If request has a new allocated XID (new request or EINPROGRESS resend),
+ * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
+ * request to ensure previous bulk fails and avoid problems with lost replies
+ * and therefore several transfers landing into the same buffer from different
+ * sending attempts.
+ */
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *bd = req->rq_bulk;
+
+ LASSERT(bd);
+
+ if (!req->rq_resend) {
+ /* this request has a new xid, just use it as bulk matchbits */
+ req->rq_mbits = req->rq_xid;
+
+ } else { /* needs to generate a new matchbits for resend */
+ u64 old_mbits = req->rq_mbits;
+
+ if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_BULK_MBITS)) {
+ req->rq_mbits = ptlrpc_next_xid();
+ } else {
+ /* old version transfers rq_xid to peer as matchbits */
+ req->rq_mbits = ptlrpc_next_xid();
+ req->rq_xid = req->rq_mbits;
+ }
+
+ CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
+ old_mbits, req->rq_mbits);
+ }
+
+ /*
+ * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
+ * that server can infer the number of bulks that were prepared,
+ * see LU-1431
+ */
+ req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
+ LNET_MAX_IOV) - 1;
+}
+
+/**
* Get a glimpse at what next xid value might have been.
* Returns possible next xid.
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index 7b020d60c9e5..6c7c8b68a909 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -152,8 +152,8 @@ void ptlrpc_connection_fini(void)
/*
* Hash operations for net_peer<->connection
*/
-static unsigned
-conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+conn_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 283dfb296d35..49f3e6368415 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -182,9 +182,9 @@ void client_bulk_callback(lnet_event_t *ev)
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
- LASSERT((desc->bd_type == BULK_PUT_SINK &&
+ LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
ev->type == LNET_EVENT_PUT) ||
- (desc->bd_type == BULK_GET_SOURCE &&
+ (ptlrpc_is_bulk_get_source(desc->bd_type) &&
ev->type == LNET_EVENT_GET) ||
ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index a23d0a05b574..e8280194001c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -396,7 +396,7 @@ void ptlrpc_activate_import(struct obd_import *imp)
}
EXPORT_SYMBOL(ptlrpc_activate_import);
-static void ptlrpc_pinger_force(struct obd_import *imp)
+void ptlrpc_pinger_force(struct obd_import *imp)
{
CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(imp->imp_state));
@@ -408,6 +408,7 @@ static void ptlrpc_pinger_force(struct obd_import *imp)
if (imp->imp_state != LUSTRE_IMP_CONNECTING)
ptlrpc_pinger_wake_up();
}
+EXPORT_SYMBOL(ptlrpc_pinger_force);
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
{
@@ -621,7 +622,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
spin_unlock(&imp->imp_lock);
CERROR("already connected\n");
return 0;
- } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ } else if (imp->imp_state == LUSTRE_IMP_CONNECTING ||
+ imp->imp_connected) {
spin_unlock(&imp->imp_lock);
CERROR("already connecting\n");
return -EALREADY;
@@ -691,8 +693,6 @@ int ptlrpc_connect_import(struct obd_import *imp)
request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
- lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
-
request->rq_no_resend = 1;
request->rq_no_delay = 1;
request->rq_send_state = LUSTRE_IMP_CONNECTING;
@@ -859,6 +859,17 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
client_adjust_max_dirty(cli);
/*
+ * Update client max modify RPCs in flight with value returned
+ * by the server
+ */
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+ cli->cl_max_mod_rpcs_in_flight = min(
+ cli->cl_max_mod_rpcs_in_flight,
+ ocd->ocd_maxmodrpcs);
+ else
+ cli->cl_max_mod_rpcs_in_flight = 1;
+
+ /*
* Reset ns_connect_flags only for initial connect. It might be
* changed in while using FS and if we reset it in reconnect
* this leads to losing user settings done before such as
@@ -873,8 +884,7 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
ocd->ocd_connect_flags;
}
- if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
+ if (ocd->ocd_connect_flags & OBD_CONNECT_AT)
/*
* We need a per-message support flag, because
* a. we don't know if the incoming connect reply
@@ -889,16 +899,45 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
else
imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
- if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
- imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
- else
- imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
+ imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
return 0;
}
/**
+ * Add all replay requests back to unreplied list before start replay,
+ * so that we can make sure the known replied XID is always increased
+ * only even if when replaying requests.
+ */
+static void ptlrpc_prepare_replay(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ if (imp->imp_state != LUSTRE_IMP_REPLAY ||
+ imp->imp_resend_replay)
+ return;
+
+ /*
+ * If the server was restart during repaly, the requests may
+ * have been added to the unreplied list in former replay.
+ */
+ spin_lock(&imp->imp_lock);
+
+ list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) {
+ if (list_empty(&req->rq_unreplied_list))
+ ptlrpc_add_unreplied(req);
+ }
+
+ list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) {
+ if (list_empty(&req->rq_unreplied_list))
+ ptlrpc_add_unreplied(req);
+ }
+
+ imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+ spin_unlock(&imp->imp_lock);
+}
+
+/**
* interpret_reply callback for connect RPCs.
* Looks into returned status of connect operation and decides
* what to do with the import - i.e enter recovery, promote it to
@@ -933,6 +972,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
ptlrpc_maybe_ping_import_soon(imp);
goto out;
}
+
+ /*
+ * LU-7558: indicate that we are interpretting connect reply,
+ * pltrpc_connect_import() will not try to reconnect until
+ * interpret will finish.
+ */
+ imp->imp_connected = 1;
spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_conn_current);
@@ -967,6 +1013,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
spin_unlock(&imp->imp_lock);
+ if (!exp) {
+ /* This could happen if export is cleaned during the
+ * connect attempt
+ */
+ CERROR("%s: missing export after connect\n",
+ imp->imp_obd->obd_name);
+ rc = -ENODEV;
+ goto out;
+ }
+
/* check that server granted subset of flags we asked for. */
if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
ocd->ocd_connect_flags) {
@@ -977,15 +1033,6 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
goto out;
}
- if (!exp) {
- /* This could happen if export is cleaned during the
- * connect attempt
- */
- CERROR("%s: missing export after connect\n",
- imp->imp_obd->obd_name);
- rc = -ENODEV;
- goto out;
- }
old_connect_flags = exp_connect_flags(exp);
exp->exp_connect_data = *ocd;
imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
@@ -1124,6 +1171,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
imp->imp_last_replay_transno = 0;
+ imp->imp_replay_cursor = &imp->imp_committed_list;
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
} else {
DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
@@ -1147,18 +1195,25 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
}
finish:
+ ptlrpc_prepare_replay(imp);
rc = ptlrpc_import_recovery_state_machine(imp);
if (rc == -ENOTCONN) {
CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid);
ptlrpc_connect_import(imp);
+ spin_lock(&imp->imp_lock);
+ imp->imp_connected = 0;
imp->imp_connect_tried = 1;
+ spin_unlock(&imp->imp_lock);
return 0;
}
out:
+ spin_lock(&imp->imp_lock);
+ imp->imp_connected = 0;
imp->imp_connect_tried = 1;
+ spin_unlock(&imp->imp_lock);
if (rc != 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 839ef3e80c1a..99d7c667df28 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -48,14 +48,14 @@
#include <linux/module.h>
-/* LUSTRE_VERSION_CODE */
-#include "../include/lustre_ver.h"
-
-#include "../include/obd_support.h"
-/* lustre_swab_mdt_body */
#include "../include/lustre/lustre_idl.h"
-/* obd2cli_tgt() (required by DEBUG_REQ()) */
+
+#include "../include/llog_swab.h"
+#include "../include/lustre_debug.h"
+#include "../include/lustre_swab.h"
+#include "../include/lustre_ver.h"
#include "../include/obd.h"
+#include "../include/obd_support.h"
/* __REQ_LAYOUT_USER__ */
#endif
@@ -121,7 +121,7 @@ static const struct req_msg_field *mdt_close_client[] = {
&RMF_CAPA1
};
-static const struct req_msg_field *mdt_release_close_client[] = {
+static const struct req_msg_field *mdt_intent_close_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_EPOCH,
&RMF_REC_REINT,
@@ -257,6 +257,18 @@ static const struct req_msg_field *mds_reint_rename_client[] = {
&RMF_DLM_REQ
};
+static const struct req_msg_field *mds_reint_migrate_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_CAPA2,
+ &RMF_NAME,
+ &RMF_SYMTGT,
+ &RMF_DLM_REQ,
+ &RMF_MDT_EPOCH,
+ &RMF_CLOSE_DATA
+};
+
static const struct req_msg_field *mds_last_unlink_server[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_BODY,
@@ -666,10 +678,9 @@ static struct req_format *req_formats[] = {
&RQF_MDS_GETXATTR,
&RQF_MDS_SYNC,
&RQF_MDS_CLOSE,
- &RQF_MDS_RELEASE_CLOSE,
+ &RQF_MDS_INTENT_CLOSE,
&RQF_MDS_READPAGE,
&RQF_MDS_WRITEPAGE,
- &RQF_MDS_DONE_WRITING,
&RQF_MDS_REINT,
&RQF_MDS_REINT_CREATE,
&RQF_MDS_REINT_CREATE_ACL,
@@ -679,9 +690,9 @@ static struct req_format *req_formats[] = {
&RQF_MDS_REINT_UNLINK,
&RQF_MDS_REINT_LINK,
&RQF_MDS_REINT_RENAME,
+ &RQF_MDS_REINT_MIGRATE,
&RQF_MDS_REINT_SETATTR,
&RQF_MDS_REINT_SETXATTR,
- &RQF_MDS_QUOTACHECK,
&RQF_MDS_QUOTACTL,
&RQF_MDS_HSM_PROGRESS,
&RQF_MDS_HSM_CT_REGISTER,
@@ -691,10 +702,8 @@ static struct req_format *req_formats[] = {
&RQF_MDS_HSM_ACTION,
&RQF_MDS_HSM_REQUEST,
&RQF_MDS_SWAP_LAYOUTS,
- &RQF_QC_CALLBACK,
&RQF_OST_CONNECT,
&RQF_OST_DISCONNECT,
- &RQF_OST_QUOTACHECK,
&RQF_OST_QUOTACTL,
&RQF_OST_GETATTR,
&RQF_OST_SETATTR,
@@ -1180,14 +1189,6 @@ struct req_format RQF_LOG_CANCEL =
DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
EXPORT_SYMBOL(RQF_LOG_CANCEL);
-struct req_format RQF_MDS_QUOTACHECK =
- DEFINE_REQ_FMT0("MDS_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_MDS_QUOTACHECK);
-
-struct req_format RQF_OST_QUOTACHECK =
- DEFINE_REQ_FMT0("OST_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_OST_QUOTACHECK);
-
struct req_format RQF_MDS_QUOTACTL =
DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only);
EXPORT_SYMBOL(RQF_MDS_QUOTACTL);
@@ -1196,10 +1197,6 @@ struct req_format RQF_OST_QUOTACTL =
DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only);
EXPORT_SYMBOL(RQF_OST_QUOTACTL);
-struct req_format RQF_QC_CALLBACK =
- DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_QC_CALLBACK);
-
struct req_format RQF_MDS_GETSTATUS =
DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
@@ -1270,6 +1267,11 @@ struct req_format RQF_MDS_REINT_RENAME =
mds_last_unlink_server);
EXPORT_SYMBOL(RQF_MDS_REINT_RENAME);
+struct req_format RQF_MDS_REINT_MIGRATE =
+ DEFINE_REQ_FMT0("MDS_REINT_MIGRATE", mds_reint_migrate_client,
+ mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_REINT_MIGRATE);
+
struct req_format RQF_MDS_REINT_SETATTR =
DEFINE_REQ_FMT0("MDS_REINT_SETATTR",
mds_reint_setattr_client, mds_setattr_server);
@@ -1381,15 +1383,10 @@ struct req_format RQF_MDS_CLOSE =
mdt_close_client, mds_last_unlink_server);
EXPORT_SYMBOL(RQF_MDS_CLOSE);
-struct req_format RQF_MDS_RELEASE_CLOSE =
+struct req_format RQF_MDS_INTENT_CLOSE =
DEFINE_REQ_FMT0("MDS_CLOSE",
- mdt_release_close_client, mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE);
-
-struct req_format RQF_MDS_DONE_WRITING =
- DEFINE_REQ_FMT0("MDS_DONE_WRITING",
- mdt_close_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_DONE_WRITING);
+ mdt_intent_close_client, mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_INTENT_CLOSE);
struct req_format RQF_MDS_READPAGE =
DEFINE_REQ_FMT0("MDS_READPAGE",
@@ -1874,13 +1871,14 @@ static void *__req_capsule_get(struct req_capsule *pill,
getter = (field->rmf_flags & RMF_F_STRING) ?
(typeof(getter))lustre_msg_string : lustre_msg_buf;
- if (field->rmf_flags & RMF_F_STRUCT_ARRAY) {
+ if (field->rmf_flags & (RMF_F_STRUCT_ARRAY | RMF_F_NO_SIZE_CHECK)) {
/*
* We've already asserted that field->rmf_size > 0 in
* req_layout_init().
*/
len = lustre_msg_buflen(msg, offset);
- if ((len % field->rmf_size) != 0) {
+ if (!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
+ (len % field->rmf_size)) {
CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n",
field->rmf_name, len, field->rmf_size, loc);
return NULL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 0f55c01feba8..110d9f505787 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -287,8 +287,13 @@ static int llog_client_read_header(const struct lu_env *env,
goto out;
}
- memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
- handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
+ if (handle->lgh_hdr_size < hdr->llh_hdr.lrh_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(handle->lgh_hdr, hdr, hdr->llh_hdr.lrh_len);
+ handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index;
/* sanity checks */
llh_hdr = &handle->lgh_hdr->llh_hdr;
@@ -296,9 +301,14 @@ static int llog_client_read_header(const struct lu_env *env,
CERROR("bad log header magic: %#x (expecting %#x)\n",
llh_hdr->lrh_type, LLOG_HDR_MAGIC);
rc = -EIO;
- } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
- CERROR("incorrectly sized log header: %#x (expecting %#x)\n",
- llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
+ } else if (llh_hdr->lrh_len !=
+ LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len ||
+ (llh_hdr->lrh_len & (llh_hdr->lrh_len - 1)) ||
+ llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE ||
+ llh_hdr->lrh_len > handle->lgh_hdr_size) {
+ CERROR("incorrectly sized log header: %#x (expecting %#x) (power of two > 8192)\n",
+ llh_hdr->lrh_len,
+ LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len);
CERROR("you may need to re-run lconf --write_conf.\n");
rc = -EIO;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 9bad57d65db4..f87478180013 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -479,8 +479,8 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
struct ptlrpc_nrs_policy *policy;
struct ptlrpc_nrs_pol_info *infos;
struct ptlrpc_nrs_pol_info tmp;
- unsigned num_pols;
- unsigned pol_idx = 0;
+ unsigned int num_pols;
+ unsigned int pol_idx = 0;
bool hp = false;
int i;
int rc = 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 9c937398a085..da1209e40f03 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -114,7 +114,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
int rc2;
int posted_md;
int total_md;
- __u64 xid;
+ u64 mbits;
lnet_handle_me_t me_h;
lnet_md_t md;
@@ -127,8 +127,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
LASSERT(desc->bd_req);
- LASSERT(desc->bd_type == BULK_PUT_SINK ||
- desc->bd_type == BULK_GET_SOURCE);
+ LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
/* cleanup the state of the bulk for it will be reused */
if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
@@ -143,40 +142,37 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
LASSERT(desc->bd_cbid.cbid_arg == desc);
- /* An XID is only used for a single request from the client.
- * For retried bulk transfers, a new XID will be allocated in
- * in ptlrpc_check_set() if it needs to be resent, so it is not
- * using the same RDMA match bits after an error.
- *
- * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
- * first bulk XID is power-of-two aligned before rq_xid. LU-1431
- */
- xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+ total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+ /* rq_mbits is matchbits of the final bulk */
+ mbits = req->rq_mbits - total_md + 1;
+
+ LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
+ "first mbits = x%llu, last mbits = x%llu\n",
+ mbits, req->rq_mbits);
LASSERTF(!(desc->bd_registered &&
req->rq_send_state != LUSTRE_IMP_REPLAY) ||
- xid != desc->bd_last_xid,
- "registered: %d rq_xid: %llu bd_last_xid: %llu\n",
- desc->bd_registered, xid, desc->bd_last_xid);
+ mbits != desc->bd_last_mbits,
+ "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
+ desc->bd_registered, mbits, desc->bd_last_mbits);
- total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
desc->bd_registered = 1;
- desc->bd_last_xid = xid;
+ desc->bd_last_mbits = mbits;
desc->bd_md_count = total_md;
md.user_ptr = &desc->bd_cbid;
md.eq_handle = ptlrpc_eq_h;
md.threshold = 1; /* PUT or GET */
- for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
+ for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
md.options = PTLRPC_MD_OPTIONS |
- ((desc->bd_type == BULK_GET_SOURCE) ?
+ (ptlrpc_is_bulk_op_get(desc->bd_type) ?
LNET_MD_OP_GET : LNET_MD_OP_PUT);
ptlrpc_fill_bulk_md(&md, desc, posted_md);
- rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
+ rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
LNET_UNLINK, LNET_INS_AFTER, &me_h);
if (rc != 0) {
CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
break;
}
@@ -186,7 +182,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
&desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
+ desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
rc2 = LNetMEUnlink(me_h);
LASSERT(rc2 == 0);
@@ -205,27 +201,19 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
return -ENOMEM;
}
- /* Set rq_xid to matchbits of the final bulk so that server can
- * infer the number of bulks that were prepared
- */
- req->rq_xid = --xid;
- LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
- "bd_last_xid = x%llu, rq_xid = x%llu\n",
- desc->bd_last_xid, req->rq_xid);
-
spin_lock(&desc->bd_lock);
- /* Holler if peer manages to touch buffers before he knows the xid */
+ /* Holler if peer manages to touch buffers before he knows the mbits */
if (desc->bd_md_count != total_md)
CWARN("%s: Peer %s touched %d buffers while I registered\n",
desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
total_md - desc->bd_md_count);
spin_unlock(&desc->bd_lock);
- CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n",
+ CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
desc->bd_md_count,
- desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
+ ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
desc->bd_iov_count, desc->bd_nob,
- desc->bd_last_xid, req->rq_xid, desc->bd_portal);
+ desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
return 0;
}
@@ -521,6 +509,39 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt);
lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags);
+ /*
+ * If it's the first time to resend the request for EINPROGRESS,
+ * we need to allocate a new XID (see after_reply()), it's different
+ * from the resend for reply timeout.
+ */
+ if (request->rq_nr_resend && list_empty(&request->rq_unreplied_list)) {
+ __u64 min_xid = 0;
+ /*
+ * resend for EINPROGRESS, allocate new xid to avoid reply
+ * reconstruction
+ */
+ spin_lock(&imp->imp_lock);
+ ptlrpc_assign_next_xid_nolock(request);
+ request->rq_mbits = request->rq_xid;
+ min_xid = ptlrpc_known_replied_xid(imp);
+ spin_unlock(&imp->imp_lock);
+
+ lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
+ DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for resend on EINPROGRESS");
+ } else if (request->rq_bulk) {
+ ptlrpc_set_bulk_mbits(request);
+ lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
+ }
+
+ if (list_empty(&request->rq_unreplied_list) ||
+ request->rq_xid <= imp->imp_known_replied_xid) {
+ DEBUG_REQ(D_ERROR, request,
+ "xid: %llu, replied: %llu, list_empty:%d\n",
+ request->rq_xid, imp->imp_known_replied_xid,
+ list_empty(&request->rq_unreplied_list));
+ LBUG();
+ }
+
/**
* For enabled AT all request should have AT_SUPPORT in the
* FULL import state when OBD_CONNECT_AT is set
@@ -537,8 +558,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
mpflag = cfs_memory_pressure_get_and_set();
rc = sptlrpc_cli_wrap_request(request);
- if (rc)
+ if (rc) {
+ /*
+ * set rq_sent so that this request is treated
+ * as a delayed send in the upper layers
+ */
+ if (rc == -ENOMEM)
+ request->rq_sent = ktime_get_seconds();
goto out;
+ }
/* bulk register should be done after wrap_request() */
if (request->rq_bulk) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index d88faf61e740..7b6ffb195834 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -82,16 +82,9 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
{
- struct ptlrpc_nrs *nrs = policy->pol_nrs;
-
- if (policy->pol_desc->pd_ops->op_policy_stop) {
- spin_unlock(&nrs->nrs_lock);
-
+ if (policy->pol_desc->pd_ops->op_policy_stop)
policy->pol_desc->pd_ops->op_policy_stop(policy);
- spin_lock(&nrs->nrs_lock);
- }
-
LASSERT(list_empty(&policy->pol_list_queued));
LASSERT(policy->pol_req_queued == 0 &&
policy->pol_req_started == 0);
@@ -619,6 +612,12 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
goto out;
}
+ if (policy->pol_state != NRS_POL_STATE_STARTED &&
+ policy->pol_state != NRS_POL_STATE_STOPPED) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
switch (opc) {
/**
* Unknown opcode, pass it down to the policy-specific control
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 871768511e8c..13f00b7cbbe5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -42,11 +42,14 @@
#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
+#include "../include/lustre/ll_fiemap.h"
+
+#include "../include/llog_swab.h"
#include "../include/lustre_net.h"
+#include "../include/lustre_swab.h"
#include "../include/obd_cksum.h"
-#include "../include/lustre/ll_fiemap.h"
+#include "../include/obd_support.h"
+#include "../include/obd_class.h"
#include "ptlrpc_internal.h"
@@ -942,6 +945,25 @@ __u32 lustre_msg_get_opc(struct lustre_msg *msg)
}
EXPORT_SYMBOL(lustre_msg_get_opc);
+__u16 lustre_msg_get_tag(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_tag;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_tag);
+
__u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
{
switch (msg->lm_magic) {
@@ -1236,6 +1258,37 @@ void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
}
}
+void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_last_xid = last_xid;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
+void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_tag = tag;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_tag);
+
void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
{
switch (msg->lm_magic) {
@@ -1373,6 +1426,21 @@ void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
}
}
+void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_mbits = mbits;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
void ptlrpc_request_set_replen(struct ptlrpc_request *req)
{
int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
@@ -1442,7 +1510,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
__swab32s(&b->pb_opc);
__swab32s(&b->pb_status);
__swab64s(&b->pb_last_xid);
- __swab64s(&b->pb_last_seen);
+ __swab16s(&b->pb_tag);
__swab64s(&b->pb_last_committed);
__swab64s(&b->pb_transno);
__swab32s(&b->pb_flags);
@@ -1456,7 +1524,12 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
__swab64s(&b->pb_pre_versions[1]);
__swab64s(&b->pb_pre_versions[2]);
__swab64s(&b->pb_pre_versions[3]);
- CLASSERT(offsetof(typeof(*b), pb_padding) != 0);
+ __swab64s(&b->pb_mbits);
+ CLASSERT(offsetof(typeof(*b), pb_padding0) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding1) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding64_0) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding64_1) != 0);
+ CLASSERT(offsetof(typeof(*b), pb_padding64_2) != 0);
/* While we need to maintain compatibility between
* clients and servers without ptlrpc_body_v2 (< 2.3)
* do not swab any fields beyond pb_jobid, as we are
@@ -1492,8 +1565,12 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
__swab32s(&ocd->ocd_max_easize);
if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
__swab64s(&ocd->ocd_maxbytes);
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+ __swab16s(&ocd->ocd_maxmodrpcs);
+ CLASSERT(offsetof(typeof(*ocd), padding0));
CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding2) != 0);
+ if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
+ __swab64s(&ocd->ocd_connect_flags2);
CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
@@ -1666,7 +1743,7 @@ void lustre_swab_mdt_body(struct mdt_body *b)
__swab32s(&b->mbo_eadatasize);
__swab32s(&b->mbo_aclsize);
__swab32s(&b->mbo_max_mdsize);
- __swab32s(&b->mbo_max_cookiesize);
+ CLASSERT(offsetof(typeof(*b), mbo_unused3));
__swab32s(&b->mbo_uid_h);
__swab32s(&b->mbo_gid_h);
CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0);
@@ -1675,9 +1752,10 @@ void lustre_swab_mdt_body(struct mdt_body *b)
void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
{
/* handle is opaque */
- __swab64s(&b->ioepoch);
- __swab32s(&b->flags);
- CLASSERT(offsetof(typeof(*b), padding) != 0);
+ /* mio_handle is opaque */
+ CLASSERT(offsetof(typeof(*b), mio_unused1));
+ CLASSERT(offsetof(typeof(*b), mio_unused2));
+ CLASSERT(offsetof(typeof(*b), mio_padding));
}
void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
@@ -1772,7 +1850,7 @@ void lustre_swab_fid2path(struct getinfo_fid2path *gf)
}
EXPORT_SYMBOL(lustre_swab_fid2path);
-static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
+static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
{
__swab64s(&fm_extent->fe_logical);
__swab64s(&fm_extent->fe_physical);
@@ -1781,7 +1859,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
__swab32s(&fm_extent->fe_device);
}
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
+void lustre_swab_fiemap(struct fiemap *fiemap)
{
__u32 i;
@@ -1938,7 +2016,7 @@ static void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
__swab64s(&id->name[i]);
}
-static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
+static void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
{
/* the lock data is a union and the first two fields are always an
* extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
@@ -2062,8 +2140,6 @@ static void dump_obdo(struct obdo *oa)
if (valid & OBD_MD_FLHANDLE)
CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
oa->o_handle.cookie);
- if (valid & OBD_MD_FLCOOKIE)
- CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
}
void dump_ost_body(struct ost_body *ob)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 5b9fb11c0b6b..94e9fa85d774 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -43,6 +43,8 @@
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
int mdidx)
{
+ int offset = mdidx * LNET_MAX_IOV;
+
CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
LASSERT(mdidx < desc->bd_md_max_brw);
@@ -50,23 +52,20 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
LNET_MD_PHYS)));
- md->options |= LNET_MD_KIOV;
md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
- if (desc->bd_enc_iov)
- md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV];
- else
- md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV];
-}
-
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
- int pageoffset, int len)
-{
- lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
-
- kiov->bv_page = page;
- kiov->bv_offset = pageoffset;
- kiov->bv_len = len;
- desc->bd_iov_count++;
+ if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
+ md->options |= LNET_MD_KIOV;
+ if (GET_ENC_KIOV(desc))
+ md->start = &BD_GET_ENC_KIOV(desc, offset);
+ else
+ md->start = &BD_GET_KIOV(desc, offset);
+ } else {
+ md->options |= LNET_MD_IOVEC;
+ if (GET_ENC_KVEC(desc))
+ md->start = &BD_GET_ENC_KVEC(desc, offset);
+ else
+ md->start = &BD_GET_KVEC(desc, offset);
+ }
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index f14d193287da..e0f859ca6223 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -55,8 +55,11 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc);
/* client.c */
void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
unsigned int service_time);
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal);
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
+ unsigned int max_brw,
+ enum ptlrpc_bulk_op_type type,
+ unsigned int portal,
+ const struct ptlrpc_bulk_frag_ops *ops);
int ptlrpc_request_cache_init(void);
void ptlrpc_request_cache_fini(void);
struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
@@ -67,6 +70,10 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
int ptlrpc_expired_set(void *data);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
void ptlrpc_resend_req(struct ptlrpc_request *request);
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req);
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req);
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp);
+void ptlrpc_add_unreplied(struct ptlrpc_request *req);
/* events.c */
int ptlrpc_init_portals(void);
@@ -226,8 +233,6 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
/* pers.c */
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
int mdcnt);
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
- int pageoffset, int len);
/* pack_generic.c */
struct ptlrpc_reply_state *
@@ -322,6 +327,7 @@ static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
INIT_LIST_HEAD(&cr->cr_set_chain);
INIT_LIST_HEAD(&cr->cr_ctx_chain);
+ INIT_LIST_HEAD(&cr->cr_unreplied_list);
init_waitqueue_head(&cr->cr_reply_waitq);
init_waitqueue_head(&cr->cr_set_waitq);
}
@@ -338,4 +344,24 @@ static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
INIT_LIST_HEAD(&sr->sr_hist_list);
}
+static inline bool ptlrpc_req_is_connect(struct ptlrpc_request *req)
+{
+ if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == OST_CONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == MGS_CONNECT)
+ return true;
+ else
+ return false;
+}
+
+static inline bool ptlrpc_req_is_disconnect(struct ptlrpc_request *req)
+{
+ if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_DISCONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == OST_DISCONNECT ||
+ lustre_msg_get_opc(req->rq_reqmsg) == MGS_DISCONNECT)
+ return true;
+ else
+ return false;
+}
+
#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 405faf0dc9fc..c00449036884 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -111,7 +111,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
* all of it's requests being replayed, it's safe to
* use a cursor to accelerate the search
*/
- imp->imp_replay_cursor = imp->imp_replay_cursor->next;
+ if (!imp->imp_resend_replay ||
+ imp->imp_replay_cursor == &imp->imp_committed_list)
+ imp->imp_replay_cursor = imp->imp_replay_cursor->next;
while (imp->imp_replay_cursor !=
&imp->imp_committed_list) {
@@ -155,10 +157,24 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
spin_lock(&imp->imp_lock);
+ /* The resend replay request may have been removed from the
+ * unreplied list.
+ */
+ if (req && imp->imp_resend_replay &&
+ list_empty(&req->rq_unreplied_list)) {
+ ptlrpc_add_unreplied(req);
+ imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+ }
+
imp->imp_resend_replay = 0;
spin_unlock(&imp->imp_lock);
if (req) {
+ /* The request should have been added back in unreplied list
+ * by ptlrpc_prepare_replay().
+ */
+ LASSERT(!list_empty(&req->rq_unreplied_list));
+
rc = ptlrpc_replay_req(req);
if (rc) {
CERROR("recovery replay error %d for req %llu\n",
@@ -194,7 +210,13 @@ int ptlrpc_resend(struct obd_import *imp)
LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
- if (!ptlrpc_no_resend(req))
+
+ /*
+ * If the request is allowed to be sent during replay and it
+ * is not timeout yet, then it does not need to be resent.
+ */
+ if (!ptlrpc_no_resend(req) &&
+ (req->rq_timedout || !req->rq_allow_replay))
ptlrpc_resend_req(req);
}
spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index a7416cd9ac71..e860df7c45a2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -379,7 +379,7 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
if (!req->rq_cli_ctx) {
CERROR("req %p: fail to get context\n", req);
- return -ENOMEM;
+ return -ECONNREFUSED;
}
return 0;
@@ -515,6 +515,13 @@ static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
+ } else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) {
+ /*
+ * new ctx not up to date yet
+ */
+ CDEBUG(D_SEC,
+ "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
+ newctx, newctx->cc_flags);
} else {
/*
* it's possible newctx == oldctx if we're switching
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index b2cc5ea6cb93..2fe9085e2034 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -108,6 +108,7 @@ static struct ptlrpc_enc_page_pool {
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
unsigned long epp_st_max_wait; /* in jiffies */
+ unsigned long epp_st_outofmem; /* # of out of mem requests */
/*
* pointers to pools
*/
@@ -139,7 +140,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
"cache missing: %lu\n"
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
- "max wait time: %ld/%lu\n",
+ "max wait time: %ld/%lu\n"
+ "out of mem: %lu\n",
totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
@@ -158,7 +160,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC));
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem);
spin_unlock(&page_pools.epp_lock);
@@ -306,12 +309,30 @@ static inline void enc_pools_wakeup(void)
}
}
+/*
+ * Export the number of free pages in the pool
+ */
+int get_free_pages_in_pool(void)
+{
+ return page_pools.epp_free_pages;
+}
+
+/*
+ * Let outside world know if enc_pool full capacity is reached
+ */
+int pool_is_at_full_capacity(void)
+{
+ return (page_pools.epp_total_pages == page_pools.epp_max_pages);
+}
+
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
{
int p_idx, g_idx;
int i;
- if (!desc->bd_enc_iov)
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+ if (!GET_ENC_KIOV(desc))
return;
LASSERT(desc->bd_iov_count > 0);
@@ -326,12 +347,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_iov[i].bv_page);
+ LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
page_pools.epp_pools[p_idx][g_idx] =
- desc->bd_enc_iov[i].bv_page;
+ BD_GET_ENC_KIOV(desc, i).bv_page;
if (++g_idx == PAGES_PER_POOL) {
p_idx++;
@@ -345,8 +366,8 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
spin_unlock(&page_pools.epp_lock);
- kfree(desc->bd_enc_iov);
- desc->bd_enc_iov = NULL;
+ kfree(GET_ENC_KIOV(desc));
+ GET_ENC_KIOV(desc) = NULL;
}
static inline void enc_pools_alloc(void)
@@ -404,6 +425,7 @@ int sptlrpc_enc_pool_init(void)
page_pools.epp_st_lowfree = 0;
page_pools.epp_st_max_wqlen = 0;
page_pools.epp_st_max_wait = 0;
+ page_pools.epp_st_outofmem = 0;
enc_pools_alloc();
if (!page_pools.epp_pools)
@@ -431,13 +453,14 @@ void sptlrpc_enc_pool_fini(void)
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld\n",
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld, out of mem %lu\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC));
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem);
}
}
@@ -520,10 +543,11 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
for (i = 0; i < desc->bd_iov_count; i++) {
- cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page,
- desc->bd_iov[i].bv_offset &
+ cfs_crypto_hash_update_page(hdesc,
+ BD_GET_KIOV(desc, i).bv_page,
+ BD_GET_KIOV(desc, i).bv_offset &
~PAGE_MASK,
- desc->bd_iov[i].bv_len);
+ BD_GET_KIOV(desc, i).bv_len);
}
if (hashsize > buflen) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index cd305bcb334a..c5e7a2309fce 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -153,14 +153,16 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
char *ptr;
unsigned int off, i;
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].bv_len == 0)
+ if (!BD_GET_KIOV(desc, i).bv_len)
continue;
- ptr = kmap(desc->bd_iov[i].bv_page);
- off = desc->bd_iov[i].bv_offset & ~PAGE_MASK;
+ ptr = kmap(BD_GET_KIOV(desc, i).bv_page);
+ off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
- kunmap(desc->bd_iov[i].bv_page);
+ kunmap(BD_GET_KIOV(desc, i).bv_page);
return;
}
}
@@ -352,11 +354,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) {
- desc->bd_iov[i].bv_len =
- desc->bd_nob_transferred - nob;
- }
- nob += desc->bd_iov[i].bv_len;
+ struct bio_vec bv_desc = BD_GET_KIOV(desc, i);
+
+ if (bv_desc.bv_len + nob > desc->bd_nob_transferred)
+ bv_desc.bv_len = desc->bd_nob_transferred - nob;
+ nob += bv_desc.bv_len;
}
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 72f39308eebb..70c70558e177 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -343,9 +343,9 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
struct ptlrpc_service_conf *conf)
{
struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
- unsigned init;
- unsigned total;
- unsigned nthrs;
+ unsigned int init;
+ unsigned int total;
+ unsigned int nthrs;
int weight;
/*
@@ -2541,8 +2541,9 @@ int ptlrpc_hr_init(void)
hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
hrp->hrp_nthrs /= weight;
+ if (hrp->hrp_nthrs == 0)
+ hrp->hrp_nthrs = 1;
- LASSERT(hrp->hrp_nthrs > 0);
hrp->hrp_thrs =
kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index b05b1f935e4c..a04e36cf6dd4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -195,49 +195,29 @@ void lustre_assert_wire_constants(void)
LASSERTF(REINT_MAX == 10, "found %lld\n",
(long long)REINT_MAX);
LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)DISP_IT_EXECD);
+ (unsigned int)DISP_IT_EXECD);
LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_EXECD);
+ (unsigned int)DISP_LOOKUP_EXECD);
LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_NEG);
+ (unsigned int)DISP_LOOKUP_NEG);
LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_POS);
+ (unsigned int)DISP_LOOKUP_POS);
LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_CREATE);
+ (unsigned int)DISP_OPEN_CREATE);
LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_OPEN);
+ (unsigned int)DISP_OPEN_OPEN);
LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_COMPLETE);
+ (unsigned int)DISP_ENQ_COMPLETE);
LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_OPEN_REF);
+ (unsigned int)DISP_ENQ_OPEN_REF);
LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_CREATE_REF);
+ (unsigned int)DISP_ENQ_CREATE_REF);
LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_LOCK);
+ (unsigned int)DISP_OPEN_LOCK);
LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
(long long)MDS_STATUS_CONN);
LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
(long long)MDS_STATUS_LOV);
- LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n",
- (long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES);
- LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_CHANGE);
- LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_OPEN);
- LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_CLOSE);
- LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID1);
- LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID2);
- LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID3);
- LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID4);
- LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_AU);
- LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MF_GETATTR_LOCK);
LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
(long long)MDS_ATTR_MODE);
LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
@@ -420,15 +400,13 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
(long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAI_RELEASED);
+ (unsigned int)LMAI_RELEASED);
LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_HSM);
- LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_SOM);
+ (unsigned int)LMAC_HSM);
LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_NOT_IN_OI);
+ (unsigned int)LMAC_NOT_IN_OI);
LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_FID_ON_OST);
+ (unsigned int)LMAC_FID_ON_OST);
/* Checks for struct ost_id */
LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
@@ -478,11 +456,11 @@ void lustre_assert_wire_constants(void)
LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
(long long)FID_SEQ_LOV_DEFAULT);
LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_SPECIAL_BFL);
+ (unsigned int)FID_OID_SPECIAL_BFL);
LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE);
+ (unsigned int)FID_OID_DOT_LUSTRE);
LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE_OBF);
+ (unsigned int)FID_OID_DOT_LUSTRE_OBF);
/* Checks for struct lu_dirent */
LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
@@ -512,11 +490,11 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
(long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_FID);
+ (unsigned int)LUDA_FID);
LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_TYPE);
+ (unsigned int)LUDA_TYPE);
LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_64BITHASH);
+ (unsigned int)LUDA_64BITHASH);
/* Checks for struct luda_type */
LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
@@ -635,10 +613,18 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n",
(long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_seen));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_tag));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == 34, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1));
LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n",
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n",
@@ -680,10 +666,22 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n",
(long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == 120, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_mbits));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == 136, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == 144, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_2));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2));
CLASSERT(LUSTRE_JOBID_SIZE == 32);
LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
@@ -713,10 +711,18 @@ void lustre_assert_wire_constants(void)
(int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n",
(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == (int)offsetof(struct ptlrpc_body_v2, pb_last_seen), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_last_seen), (int)offsetof(struct ptlrpc_body_v2, pb_last_seen));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == (int)offsetof(struct ptlrpc_body_v2, pb_tag), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_tag), (int)offsetof(struct ptlrpc_body_v2, pb_tag));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding0), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding0), (int)offsetof(struct ptlrpc_body_v2, pb_padding0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding1), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding1), (int)offsetof(struct ptlrpc_body_v2, pb_padding1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1));
LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n",
(int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n",
@@ -757,10 +763,22 @@ void lustre_assert_wire_constants(void)
(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n",
(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == (int)offsetof(struct ptlrpc_body_v2, pb_padding), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding), (int)offsetof(struct ptlrpc_body_v2, pb_padding));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == (int)offsetof(struct ptlrpc_body_v2, pb_mbits), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_mbits), (int)offsetof(struct ptlrpc_body_v2, pb_mbits));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding64_0), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding64_1), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding64_2), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2));
LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n",
(long long)MSG_PTLRPC_BODY_OFF);
LASSERTF(REQ_REC_OFF == 1, "found %lld\n",
@@ -802,41 +820,41 @@ void lustre_assert_wire_constants(void)
LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
(long long)MSGHDR_CKSUM_INCOMPAT18);
LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
- (unsigned)MSG_OP_FLAG_MASK);
+ (unsigned int)MSG_OP_FLAG_MASK);
LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
(long long)MSG_OP_FLAG_SHIFT);
LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
- (unsigned)MSG_GEN_FLAG_MASK);
+ (unsigned int)MSG_GEN_FLAG_MASK);
LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LAST_REPLAY);
+ (unsigned int)MSG_LAST_REPLAY);
LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_RESENT);
+ (unsigned int)MSG_RESENT);
LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REPLAY);
+ (unsigned int)MSG_REPLAY);
LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_DELAY_REPLAY);
+ (unsigned int)MSG_DELAY_REPLAY);
LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_VERSION_REPLAY);
+ (unsigned int)MSG_VERSION_REPLAY);
LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REQ_REPLAY_DONE);
+ (unsigned int)MSG_REQ_REPLAY_DONE);
LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LOCK_REPLAY_DONE);
+ (unsigned int)MSG_LOCK_REPLAY_DONE);
LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECOVERING);
+ (unsigned int)MSG_CONNECT_RECOVERING);
LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECONNECT);
+ (unsigned int)MSG_CONNECT_RECONNECT);
LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_REPLAYABLE);
+ (unsigned int)MSG_CONNECT_REPLAYABLE);
LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_LIBCLIENT);
+ (unsigned int)MSG_CONNECT_LIBCLIENT);
LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_INITIAL);
+ (unsigned int)MSG_CONNECT_INITIAL);
LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_ASYNC);
+ (unsigned int)MSG_CONNECT_ASYNC);
LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_NEXT_VER);
+ (unsigned int)MSG_CONNECT_NEXT_VER);
LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_TRANSNO);
+ (unsigned int)MSG_CONNECT_TRANSNO);
/* Checks for struct obd_connect_data */
LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
@@ -905,14 +923,22 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes));
LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n",
(long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes));
- LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 72, "found %lld\n",
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxmodrpcs) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_maxmodrpcs));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding0) == 74, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding0));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding0) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding0));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 76, "found %lld\n",
(long long)(int)offsetof(struct obd_connect_data, padding1));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 8, "found %lld\n",
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 4, "found %lld\n",
(long long)(int)sizeof(((struct obd_connect_data *)0)->padding1));
- LASSERTF((int)offsetof(struct obd_connect_data, padding2) == 80, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding2));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding2));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags2) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags2));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2));
LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n",
(long long)(int)offsetof(struct obd_connect_data, padding3));
LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n",
@@ -1075,14 +1101,24 @@ void lustre_assert_wire_constants(void)
OBD_CONNECT_LFSCK);
LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n",
OBD_CONNECT_UNLINK_CLOSE);
+ LASSERTF(OBD_CONNECT_MULTIMODRPCS == 0x200000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_MULTIMODRPCS);
LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n",
OBD_CONNECT_DIR_STRIPE);
+ LASSERTF(OBD_CONNECT_SUBTREE == 0x800000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_SUBTREE);
+ LASSERTF(OBD_CONNECT_LOCK_AHEAD == 0x1000000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LOCK_AHEAD);
+ LASSERTF(OBD_CONNECT_OBDOPACK == 0x4000000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_OBDOPACK);
+ LASSERTF(OBD_CONNECT_FLAGS2 == 0x8000000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_FLAGS2);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32);
+ (unsigned int)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_ADLER);
+ (unsigned int)OBD_CKSUM_ADLER);
LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32C);
+ (unsigned int)OBD_CKSUM_CRC32C);
/* Checks for struct obdo */
LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
@@ -1239,8 +1275,6 @@ void lustre_assert_wire_constants(void)
OBD_MD_FLCKSUM);
LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLQOS);
- LASSERTF(OBD_MD_FLCOOKIE == (0x00800000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCOOKIE);
LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLGROUP);
LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n",
@@ -1394,13 +1428,13 @@ void lustre_assert_wire_constants(void)
(long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
CLASSERT(LOV_MAGIC_V3 == (0x0BD30000 | 0x0BD0));
LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID0);
+ (unsigned int)LOV_PATTERN_RAID0);
LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID1);
+ (unsigned int)LOV_PATTERN_RAID1);
LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_FIRST);
+ (unsigned int)LOV_PATTERN_FIRST);
LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_CMOBD);
+ (unsigned int)LOV_PATTERN_CMOBD);
/* Checks for struct lmv_mds_md_v1 */
LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n",
@@ -1542,6 +1576,8 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt));
LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n",
(long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt));
+ LASSERTF(IOOBJ_MAX_BRW_BITS == 16, "found %lld\n",
+ (long long)IOOBJ_MAX_BRW_BITS);
/* Checks for union lquota_id */
LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
@@ -1817,10 +1853,10 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct mdt_body, mbo_max_mdsize));
LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n",
(long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize));
- LASSERTF((int)offsetof(struct mdt_body, mbo_max_cookiesize) == 160, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mbo_max_cookiesize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_unused3) == 160, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_unused3));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused3));
LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n",
(long long)(int)offsetof(struct mdt_body, mbo_uid_h));
LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n",
@@ -1857,12 +1893,6 @@ void lustre_assert_wire_constants(void)
MDS_FMODE_CLOSED);
LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
MDS_FMODE_EXEC);
- LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n",
- MDS_FMODE_EPOCH);
- LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n",
- MDS_FMODE_TRUNC);
- LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n",
- MDS_FMODE_SOM);
LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
MDS_OPEN_CREATED);
LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
@@ -1905,10 +1935,20 @@ void lustre_assert_wire_constants(void)
LUSTRE_IMMUTABLE_FL);
LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
LUSTRE_APPEND_FL);
+ LASSERTF(LUSTRE_NODUMP_FL == 0x00000040, "found 0x%.8x\n",
+ LUSTRE_NODUMP_FL);
LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
LUSTRE_NOATIME_FL);
+ LASSERTF(LUSTRE_INDEX_FL == 0x00001000, "found 0x%.8x\n",
+ LUSTRE_INDEX_FL);
LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
LUSTRE_DIRSYNC_FL);
+ LASSERTF(LUSTRE_TOPDIR_FL == 0x00020000, "found 0x%.8x\n",
+ LUSTRE_TOPDIR_FL);
+ LASSERTF(LUSTRE_DIRECTIO_FL == 0x00100000, "found 0x%.8x\n",
+ LUSTRE_DIRECTIO_FL);
+ LASSERTF(LUSTRE_INLINE_DATA_FL == 0x10000000, "found 0x%.8x\n",
+ LUSTRE_INLINE_DATA_FL);
LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
MDS_INODELOCK_LOOKUP);
LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
@@ -1921,22 +1961,22 @@ void lustre_assert_wire_constants(void)
/* Checks for struct mdt_ioepoch */
LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
(long long)(int)sizeof(struct mdt_ioepoch));
- LASSERTF((int)offsetof(struct mdt_ioepoch, handle) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, handle));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->handle));
- LASSERTF((int)offsetof(struct mdt_ioepoch, ioepoch) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, ioepoch));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->ioepoch) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->ioepoch));
- LASSERTF((int)offsetof(struct mdt_ioepoch, flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, flags));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->flags));
- LASSERTF((int)offsetof(struct mdt_ioepoch, padding) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, padding));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_handle) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_handle));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_handle));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused1) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_unused1));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused2) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_unused2));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, mio_padding) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, mio_padding));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_padding));
/* Checks for struct mdt_rec_setattr */
LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
@@ -3520,21 +3560,21 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n",
(long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx));
- /* Checks for struct ll_fiemap_info_key */
+ /* Checks for struct fiemap_info_key */
LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n",
(long long)(int)sizeof(struct ll_fiemap_info_key));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, name[8]) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, name[8]));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, oa) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, oa));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->oa) == 208, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->oa));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, fiemap) == 216, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, fiemap));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_name[8]) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_name[8]));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_oa) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_oa));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa) == 208, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_fiemap) == 216, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_fiemap));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap));
/* Checks for struct mgs_target_info */
LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
@@ -3670,64 +3710,64 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n",
(long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]));
- /* Checks for struct ll_user_fiemap */
- LASSERTF((int)sizeof(struct ll_user_fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(struct ll_user_fiemap));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_start));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_start));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_length) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_length));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_length));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_flags));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_flags));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_mapped_extents) == 20, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_mapped_extents));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extent_count) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_extent_count));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_reserved) == 28, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_reserved));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extents) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_extents));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extents) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extents));
+ /* Checks for struct fiemap */
+ LASSERTF((int)sizeof(struct fiemap) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct fiemap));
+ LASSERTF((int)offsetof(struct fiemap, fm_start) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_start));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_start));
+ LASSERTF((int)offsetof(struct fiemap, fm_length) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_length));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_length) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_length));
+ LASSERTF((int)offsetof(struct fiemap, fm_flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_flags));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_flags));
+ LASSERTF((int)offsetof(struct fiemap, fm_mapped_extents) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_mapped_extents));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_mapped_extents));
+ LASSERTF((int)offsetof(struct fiemap, fm_extent_count) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_extent_count));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_extent_count));
+ LASSERTF((int)offsetof(struct fiemap, fm_reserved) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_reserved));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_reserved) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_reserved));
+ LASSERTF((int)offsetof(struct fiemap, fm_extents) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap, fm_extents));
+ LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extents) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap *)0)->fm_extents));
CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001);
CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002);
CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000);
- /* Checks for struct ll_fiemap_extent */
- LASSERTF((int)sizeof(struct ll_fiemap_extent) == 56, "found %lld\n",
- (long long)(int)sizeof(struct ll_fiemap_extent));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_logical) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_logical));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_physical) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_physical));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_length) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_length));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_length));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_flags));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_device) == 44, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_device));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_device) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_device));
+ /* Checks for struct fiemap_extent */
+ LASSERTF((int)sizeof(struct fiemap_extent) == 56, "found %lld\n",
+ (long long)(int)sizeof(struct fiemap_extent));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_logical) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_logical));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_logical));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_physical) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_physical));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_physical));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_length) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_length));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_length));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_flags) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_flags));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_flags));
+ LASSERTF((int)offsetof(struct fiemap_extent, fe_reserved[0]) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct fiemap_extent, fe_reserved[0]));
+ LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]));
CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001);
CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002);
CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004);
@@ -4093,9 +4133,9 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
(long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)HSM_FORCE_ACTION);
+ (unsigned int)HSM_FORCE_ACTION);
LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)HSM_GHOST_COPY);
+ (unsigned int)HSM_GHOST_COPY);
/* Checks for struct hsm_user_request */
LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 20206ba965af..8691c6543a9c 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -11,7 +11,7 @@ Description:
Shows if the lustre module has pinger support.
"on" means yes and "off" means no.
-What: /sys/fs/lustre/health
+What: /sys/fs/lustre/health_check
Date: May 2015
Contact: "Oleg Drokin" <oleg.drokin@intel.com>
Description:
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 6620d96ee44d..ffb8fa72c3da 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -21,16 +21,12 @@ if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/bcm2048/Kconfig"
-source "drivers/staging/media/cec/Kconfig"
-
source "drivers/staging/media/cxd2099/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
-source "drivers/staging/media/pulse8-cec/Kconfig"
-
source "drivers/staging/media/s5p-cec/Kconfig"
# Keep LIRC at the end, as it has sub-menus
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 906257e94dda..a28e82cf6447 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,9 +1,7 @@
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
-obj-$(CONFIG_MEDIA_CEC) += cec/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
-obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += st-cec/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 4d9bd02ede47..37bd439ee08b 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
/*
@@ -999,7 +995,7 @@ static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
if (!wait_for_completion_timeout(&bdev->compl,
- msecs_to_jiffies(timeout)))
+ msecs_to_jiffies(timeout)))
dev_err(&bdev->client->dev, "IRQ timeout.\n");
if (value)
@@ -2059,67 +2055,67 @@ property_signed_read(fm_rssi, int, "%d")
DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
static struct device_attribute attrs[] = {
- __ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read,
+ __ATTR(power_state, 0644, bcm2048_power_state_read,
bcm2048_power_state_write),
- __ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read,
+ __ATTR(mute, 0644, bcm2048_mute_read,
bcm2048_mute_write),
- __ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read,
+ __ATTR(audio_route, 0644, bcm2048_audio_route_read,
bcm2048_audio_route_write),
- __ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read,
+ __ATTR(dac_output, 0644, bcm2048_dac_output_read,
bcm2048_dac_output_write),
- __ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR,
+ __ATTR(fm_hi_lo_injection, 0644,
bcm2048_fm_hi_lo_injection_read,
bcm2048_fm_hi_lo_injection_write),
- __ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read,
+ __ATTR(fm_frequency, 0644, bcm2048_fm_frequency_read,
bcm2048_fm_frequency_write),
- __ATTR(fm_af_frequency, S_IRUGO | S_IWUSR,
+ __ATTR(fm_af_frequency, 0644,
bcm2048_fm_af_frequency_read,
bcm2048_fm_af_frequency_write),
- __ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read,
+ __ATTR(fm_deemphasis, 0644, bcm2048_fm_deemphasis_read,
bcm2048_fm_deemphasis_write),
- __ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read,
+ __ATTR(fm_rds_mask, 0644, bcm2048_fm_rds_mask_read,
bcm2048_fm_rds_mask_write),
- __ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR,
+ __ATTR(fm_best_tune_mode, 0644,
bcm2048_fm_best_tune_mode_read,
bcm2048_fm_best_tune_mode_write),
- __ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR,
+ __ATTR(fm_search_rssi_threshold, 0644,
bcm2048_fm_search_rssi_threshold_read,
bcm2048_fm_search_rssi_threshold_write),
- __ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR,
+ __ATTR(fm_search_mode_direction, 0644,
bcm2048_fm_search_mode_direction_read,
bcm2048_fm_search_mode_direction_write),
- __ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR,
+ __ATTR(fm_search_tune_mode, 0644,
bcm2048_fm_search_tune_mode_read,
bcm2048_fm_search_tune_mode_write),
- __ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read,
+ __ATTR(rds, 0644, bcm2048_rds_read,
bcm2048_rds_write),
- __ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR,
+ __ATTR(rds_b_block_mask, 0644,
bcm2048_rds_b_block_mask_read,
bcm2048_rds_b_block_mask_write),
- __ATTR(rds_b_block_match, S_IRUGO | S_IWUSR,
+ __ATTR(rds_b_block_match, 0644,
bcm2048_rds_b_block_match_read,
bcm2048_rds_b_block_match_write),
- __ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read,
+ __ATTR(rds_pi_mask, 0644, bcm2048_rds_pi_mask_read,
bcm2048_rds_pi_mask_write),
- __ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read,
+ __ATTR(rds_pi_match, 0644, bcm2048_rds_pi_match_read,
bcm2048_rds_pi_match_write),
- __ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read,
+ __ATTR(rds_wline, 0644, bcm2048_rds_wline_read,
bcm2048_rds_wline_write),
- __ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL),
- __ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL),
- __ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL),
- __ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL),
- __ATTR(region_bottom_frequency, S_IRUGO,
+ __ATTR(rds_pi, 0444, bcm2048_rds_pi_read, NULL),
+ __ATTR(rds_rt, 0444, bcm2048_rds_rt_read, NULL),
+ __ATTR(rds_ps, 0444, bcm2048_rds_ps_read, NULL),
+ __ATTR(fm_rds_flags, 0444, bcm2048_fm_rds_flags_read, NULL),
+ __ATTR(region_bottom_frequency, 0444,
bcm2048_region_bottom_frequency_read, NULL),
- __ATTR(region_top_frequency, S_IRUGO,
+ __ATTR(region_top_frequency, 0444,
bcm2048_region_top_frequency_read, NULL),
- __ATTR(fm_carrier_error, S_IRUGO,
+ __ATTR(fm_carrier_error, 0444,
bcm2048_fm_carrier_error_read, NULL),
- __ATTR(fm_rssi, S_IRUGO,
+ __ATTR(fm_rssi, 0444,
bcm2048_fm_rssi_read, NULL),
- __ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read,
+ __ATTR(region, 0644, bcm2048_region_read,
bcm2048_region_write),
- __ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL),
+ __ATTR(rds_data, 0444, bcm2048_rds_data_read, NULL),
};
static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
@@ -2204,7 +2200,7 @@ static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
}
/* interruptible_sleep_on(&bdev->read_queue); */
if (wait_event_interruptible(bdev->read_queue,
- bdev->rds_data_available) < 0) {
+ bdev->rds_data_available) < 0) {
retval = -EINTR;
goto done;
}
@@ -2542,7 +2538,7 @@ static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
return err;
}
-static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
+static const struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
.vidioc_querycap = bcm2048_vidioc_querycap,
.vidioc_g_input = bcm2048_vidioc_g_input,
.vidioc_s_input = bcm2048_vidioc_s_input,
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
index 4c90a32db795..4d950c1e2e8b 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.h
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.h
@@ -14,11 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef BCM2048_H
diff --git a/drivers/staging/media/cec/Kconfig b/drivers/staging/media/cec/Kconfig
deleted file mode 100644
index 6e12d41b1f86..000000000000
--- a/drivers/staging/media/cec/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config MEDIA_CEC
- bool "CEC API (EXPERIMENTAL)"
- depends on MEDIA_SUPPORT
- select MEDIA_CEC_EDID
- ---help---
- Enable the CEC API.
-
-config MEDIA_CEC_DEBUG
- bool "CEC debugfs interface (EXPERIMENTAL)"
- depends on MEDIA_CEC && DEBUG_FS
- ---help---
- Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO
deleted file mode 100644
index 13224694a8ae..000000000000
--- a/drivers/staging/media/cec/TODO
+++ /dev/null
@@ -1,32 +0,0 @@
-The reason why cec.c is still in staging is that I would like
-to have a bit more confidence in the uABI. The kABI is fine,
-no problem there, but I would like to let the public API mature
-a bit.
-
-Once I'm confident that I didn't miss anything then the cec.c source
-can move to drivers/media and the linux/cec.h and linux/cec-funcs.h
-headers can move to uapi/linux and added to uapi/linux/Kbuild to make
-them public.
-
-Hopefully this will happen later in 2016.
-
-Other TODOs:
-
-- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
-- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
- Applications should be able to choose this when calling S_LOG_ADDRS.
-- If the reply field of cec_msg is set then when the reply arrives it
- is only sent to the filehandle that transmitted the original message
- and not to any followers. Should this behavior change or perhaps
- controlled through a cec_msg flag?
-- Should CEC_LOG_ADDR_TYPE_SPECIFIC be replaced by TYPE_2ND_TV and TYPE_PROCESSOR?
- And also TYPE_SWITCH and TYPE_CDC_ONLY in addition to the TYPE_UNREGISTERED?
- This should give the framework more information about the device type
- since SPECIFIC and UNREGISTERED give no useful information.
-- Once this is out of staging this should no longer be a separate
- config option, instead it should be selected by drivers that want it.
-- Revisit the IS_REACHABLE(RC_CORE): perhaps the RC_CORE support should
- be enabled through a separate config option in drivers/media/Kconfig
- or rc/Kconfig?
-
-Hans Verkuil <hans.verkuil@cisco.com>
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index fedeb3c3549e..c72c3f09f175 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -336,7 +336,8 @@ static int init(struct cxd *ci)
break;
#endif
/* TOSTRT = 8, Mode B (gated clock), falling Edge,
- * Serial, POL=HIGH, MSB */
+ * Serial, POL=HIGH, MSB
+ */
status = write_reg(ci, 0x0A, 0xA7);
if (status < 0)
break;
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
index c64515c644cd..3019c9ecd548 100644
--- a/drivers/staging/media/davinci_vpfe/Makefile
+++ b/drivers/staging/media/davinci_vpfe/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_VIDEO_DM365_VPFE) += \
+obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci-vfpe.o
+
+davinci-vfpe-objs := \
dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \
dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 128662623ea8..5fbc2d447ff2 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -237,9 +237,8 @@ resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index)
((informat->width) * 256) / (outformat->width);
}
-void
-static resizer_enable_422_420_conversion(struct resizer_params *param,
- int index, bool en)
+static void resizer_enable_422_420_conversion(struct resizer_params *param,
+ int index, bool en)
{
param->rsz_rsc_param[index].cen = en;
param->rsz_rsc_param[index].yen = en;
@@ -490,7 +489,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
int line_len;
int ret;
- if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY) {
dev_err(dev, "enable resizer - Resizer-A\n");
return -EINVAL;
}
@@ -502,7 +501,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
param->rsz_en[RSZ_B] = DISABLE;
param->oper_mode = RESIZER_MODE_CONTINIOUS;
- if (resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
struct v4l2_mbus_framefmt *outformat2;
param->rsz_en[RSZ_B] = ENABLE;
@@ -825,7 +824,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
.o_hsz = WIDTH_O - 1,
.v_dif = 256,
.v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
.h_dif = 256,
.h_typ_y = VPFE_RSZ_INTP_CUBIC,
.h_typ_c = VPFE_RSZ_INTP_CUBIC,
@@ -843,7 +842,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
.o_hsz = WIDTH_O - 1,
.v_dif = 256,
.v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
.h_dif = 256,
.h_typ_y = VPFE_RSZ_INTP_CUBIC,
.h_typ_c = VPFE_RSZ_INTP_CUBIC,
@@ -1043,13 +1042,13 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
if (ipipeif_sink != IPIPEIF_INPUT_MEMORY)
return;
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
val = vpss_dma_complete_interrupt();
if (val != 0 && val != 2)
return;
}
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
spin_lock(&video_out->dma_queue_lock);
vpfe_video_process_buffer_complete(video_out);
video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
@@ -1059,7 +1058,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
/* If resizer B is enabled */
if (pipe->output_num > 1 && resizer->resizer_b.output ==
- RESIZER_OUPUT_MEMORY) {
+ RESIZER_OUTPUT_MEMORY) {
spin_lock(&video_out->dma_queue_lock);
vpfe_video_process_buffer_complete(video_out2);
video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
@@ -1069,7 +1068,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
/* start HW if buffers are queued */
if (vpfe_video_is_pipe_ready(pipe) &&
- resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
resizer_enable(resizer, 1);
vpfe_ipipe_enable(vpfe_dev, 1);
vpfe_ipipeif_enable(vpfe_dev);
@@ -1237,8 +1236,8 @@ static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer)
struct resizer_params *param = &resizer->config;
int ret = 0;
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY ||
- resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY ||
+ resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
if (ipipeif_sink == IPIPEIF_INPUT_MEMORY &&
ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
ret = resizer_configure_in_single_shot_mode(resizer);
@@ -1263,7 +1262,7 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
if (&resizer->crop_resizer.subdev != sd)
return 0;
- if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY)
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY)
return 0;
switch (enable) {
@@ -1724,7 +1723,7 @@ static int resizer_link_setup(struct media_entity *entity,
}
if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
return -EBUSY;
- resizer->resizer_a.output = RESIZER_OUPUT_MEMORY;
+ resizer->resizer_a.output = RESIZER_OUTPUT_MEMORY;
break;
default:
@@ -1749,7 +1748,7 @@ static int resizer_link_setup(struct media_entity *entity,
}
if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
return -EBUSY;
- resizer->resizer_b.output = RESIZER_OUPUT_MEMORY;
+ resizer->resizer_b.output = RESIZER_OUTPUT_MEMORY;
break;
default:
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
index 93b0f44030aa..00e64b0d0295 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
@@ -210,7 +210,7 @@ enum resizer_input_entity {
enum resizer_output_entity {
RESIZER_OUTPUT_NONE = 0,
- RESIZER_OUPUT_MEMORY = 1,
+ RESIZER_OUTPUT_MEMORY = 1,
};
struct dm365_resizer_device {
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8be9f854510f..c27d7e9a1bdb 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -198,7 +198,7 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
return 0;
}
-/* checks wether pipeline is ready for enabling */
+/* checks whether pipeline is ready for enabling */
int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
{
int i;
@@ -1143,8 +1143,8 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
/* Initialize buffer */
vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
if (vb2_plane_vaddr(vb, 0) &&
- vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
- return -EINVAL;
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Make sure user addresses are aligned to 32 bytes */
@@ -1362,7 +1362,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
ret = vb2_queue_init(q);
if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
- return ret;
+ goto unlock_out;
}
fh->io_allowed = 1;
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index 6879c4651b46..25b7e7ccf554 100644
--- a/drivers/staging/media/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
@@ -38,19 +38,6 @@ config LIRC_SASEM
help
Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module
-config LIRC_SERIAL
- tristate "Homebrew Serial Port Receiver"
- depends on LIRC
- help
- Driver for Homebrew Serial Port Receivers
-
-config LIRC_SERIAL_TRANSMITTER
- bool "Serial Port Transmitter"
- default y
- depends on LIRC_SERIAL
- help
- Serial Port Transmitter support
-
config LIRC_SIR
tristate "Built-in SIR IrDA port"
depends on LIRC
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index 5430adf0475d..7f919eab1989 100644
--- a/drivers/staging/media/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
@@ -7,6 +7,5 @@ obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o
obj-$(CONFIG_LIRC_IMON) += lirc_imon.o
obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o
obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
-obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o
obj-$(CONFIG_LIRC_SIR) += lirc_sir.o
obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 198a8057f2f1..1e650fba4a92 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -334,7 +334,7 @@ static int send_packet(struct imon_context *context)
context->tx_urb->actual_length = 0;
- init_completion(&context->tx.finished);
+ reinit_completion(&context->tx.finished);
atomic_set(&context->tx.busy, 1);
retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
@@ -408,9 +408,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
data_buf = memdup_user(buf, n_bytes);
if (IS_ERR(data_buf)) {
- retval = PTR_ERR(data_buf);
- data_buf = NULL;
- goto exit;
+ mutex_unlock(&context->ctx_lock);
+ return PTR_ERR(data_buf);
}
memcpy(context->tx.data_buf, data_buf, n_bytes);
@@ -497,6 +496,8 @@ static int ir_open(void *data)
context->rx.initial_space = 1;
context->rx.prev_bit = 0;
+ init_completion(&context->tx.finished);
+
context->ir_isopen = 1;
dev_info(context->driver->dev, "IR port opened\n");
@@ -930,7 +931,7 @@ static void imon_disconnect(struct usb_interface *interface)
/* Abort ongoing write */
if (atomic_read(&context->tx.busy)) {
usb_kill_urb(context->tx_urb);
- complete_all(&context->tx.finished);
+ complete(&context->tx.finished);
}
context->dev_present = 0;
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 4678ae10b030..b0c176e14b6b 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -103,7 +103,8 @@ struct sasem_context {
struct tx_t {
unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
- * buffer */
+ * buffer
+ */
struct completion finished; /* wait for write to finish */
atomic_t busy; /* write in progress */
int status; /* status of tx completion */
@@ -295,7 +296,8 @@ static int vfd_close(struct inode *inode, struct file *file)
if (!context->dev_present && !context->ir_isopen) {
/* Device disconnected before close and IR port is
* not open. If IR port is open, context will be
- * deleted by ir_close. */
+ * deleted by ir_close.
+ */
mutex_unlock(&context->ctx_lock);
delete_context(context);
return retval;
@@ -384,9 +386,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
data_buf = memdup_user(buf, n_bytes);
if (IS_ERR(data_buf)) {
- retval = PTR_ERR(data_buf);
- data_buf = NULL;
- goto exit;
+ mutex_unlock(&context->ctx_lock);
+ return PTR_ERR(data_buf);
}
memcpy(context->tx.data_buf, data_buf, n_bytes);
@@ -397,7 +398,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
/* Nine 8 byte packets to be sent */
/* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0"
- * will clear the VFD */
+ * will clear the VFD
+ */
for (i = 0; i < 9; i++) {
switch (i) {
case 0:
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
deleted file mode 100644
index b798b311d32c..000000000000
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- * lirc_serial.c
- *
- * lirc_serial - Device driver that records pulse- and pause-lengths
- * (space-lengths) between DDCD event on a serial port.
- *
- * Copyright (C) 1996,97 Ralph Metzler <rjkm@thp.uni-koeln.de>
- * Copyright (C) 1998 Trent Piepho <xyzzy@u.washington.edu>
- * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>
- * Copyright (C) 1999 Christoph Bartelmus <lirc@bartelmus.de>
- * Copyright (C) 2007 Andrei Tanas <andrei@tanas.ca> (suspend/resume support)
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-/*
- * Steve's changes to improve transmission fidelity:
- * - for systems with the rdtsc instruction and the clock counter, a
- * send_pule that times the pulses directly using the counter.
- * This means that the LIRC_SERIAL_TRANSMITTER_LATENCY fudge is
- * not needed. Measurement shows very stable waveform, even where
- * PCI activity slows the access to the UART, which trips up other
- * versions.
- * - For other system, non-integer-microsecond pulse/space lengths,
- * done using fixed point binary. So, much more accurate carrier
- * frequency.
- * - fine tuned transmitter latency, taking advantage of fractional
- * microseconds in previous change
- * - Fixed bug in the way transmitter latency was accounted for by
- * tuning the pulse lengths down - the send_pulse routine ignored
- * this overhead as it timed the overall pulse length - so the
- * pulse frequency was right but overall pulse length was too
- * long. Fixed by accounting for latency on each pulse/space
- * iteration.
- *
- * Steve Davies <steve@daviesfam.org> July 2001
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/fcntl.h>
-#include <linux/spinlock.h>
-
-/* From Intel IXP42X Developer's Manual (#252480-005): */
-/* ftp://download.intel.com/design/network/manuals/25248005.pdf */
-#define UART_IE_IXP42X_UUE 0x40 /* IXP42X UART Unit enable */
-#define UART_IE_IXP42X_RTOIE 0x10 /* IXP42X Receiver Data Timeout int.enable */
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#define LIRC_DRIVER_NAME "lirc_serial"
-
-struct lirc_serial {
- int signal_pin;
- int signal_pin_change;
- u8 on;
- u8 off;
- long (*send_pulse)(unsigned long length);
- void (*send_space)(long length);
- int features;
- spinlock_t lock;
-};
-
-#define LIRC_HOMEBREW 0
-#define LIRC_IRDEO 1
-#define LIRC_IRDEO_REMOTE 2
-#define LIRC_ANIMAX 3
-#define LIRC_IGOR 4
-#define LIRC_NSLU2 5
-
-/*** module parameters ***/
-static int type;
-static int io;
-static int irq;
-static bool iommap;
-static int ioshift;
-static bool softcarrier = true;
-static bool share_irq;
-static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
-static bool txsense; /* 0 = active high, 1 = active low */
-
-/* forward declarations */
-static long send_pulse_irdeo(unsigned long length);
-static long send_pulse_homebrew(unsigned long length);
-static void send_space_irdeo(long length);
-static void send_space_homebrew(long length);
-
-static struct lirc_serial hardware[] = {
- [LIRC_HOMEBREW] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
- .signal_pin = UART_MSR_DCD,
- .signal_pin_change = UART_MSR_DDCD,
- .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
- .off = (UART_MCR_RTS | UART_MCR_OUT2),
- .send_pulse = send_pulse_homebrew,
- .send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SET_SEND_CARRIER |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
- .features = LIRC_CAN_REC_MODE2
-#endif
- },
-
- [LIRC_IRDEO] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = UART_MCR_OUT2,
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = send_pulse_irdeo,
- .send_space = send_space_irdeo,
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
- },
-
- [LIRC_IRDEO_REMOTE] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = send_pulse_irdeo,
- .send_space = send_space_irdeo,
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
- },
-
- [LIRC_ANIMAX] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
- .signal_pin = UART_MSR_DCD,
- .signal_pin_change = UART_MSR_DDCD,
- .on = 0,
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = NULL,
- .send_space = NULL,
- .features = LIRC_CAN_REC_MODE2
- },
-
- [LIRC_IGOR] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
- .off = (UART_MCR_RTS | UART_MCR_OUT2),
- .send_pulse = send_pulse_homebrew,
- .send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SET_SEND_CARRIER |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
- .features = LIRC_CAN_REC_MODE2
-#endif
- },
-};
-
-#define RS_ISR_PASS_LIMIT 256
-
-/*
- * A long pulse code from a remote might take up to 300 bytes. The
- * daemon should read the bytes as soon as they are generated, so take
- * the number of keys you think you can push before the daemon runs
- * and multiply by 300. The driver will warn you if you overrun this
- * buffer. If you have a slow computer or non-busmastering IDE disks,
- * maybe you will need to increase this.
- */
-
-/* This MUST be a power of two! It has to be larger than 1 as well. */
-
-#define RBUF_LEN 256
-
-static ktime_t lastkt;
-
-static struct lirc_buffer rbuf;
-
-static unsigned int freq = 38000;
-static unsigned int duty_cycle = 50;
-
-/* Initialized in init_timing_params() */
-static unsigned long period;
-static unsigned long pulse_width;
-static unsigned long space_width;
-
-#if defined(__i386__)
-/*
- * From:
- * Linux I/O port programming mini-HOWTO
- * Author: Riku Saikkonen <Riku.Saikkonen@hut.fi>
- * v, 28 December 1997
- *
- * [...]
- * Actually, a port I/O instruction on most ports in the 0-0x3ff range
- * takes almost exactly 1 microsecond, so if you're, for example, using
- * the parallel port directly, just do additional inb()s from that port
- * to delay.
- * [...]
- */
-/* transmitter latency 1.5625us 0x1.90 - this figure arrived at from
- * comment above plus trimming to match actual measured frequency.
- * This will be sensitive to cpu speed, though hopefully most of the 1.5us
- * is spent in the uart access. Still - for reference test machine was a
- * 1.13GHz Athlon system - Steve
- */
-
-/*
- * changed from 400 to 450 as this works better on slower machines;
- * faster machines will use the rdtsc code anyway
- */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 450
-
-#else
-
-/* does anybody have information on other platforms ? */
-/* 256 = 1<<8 */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 256
-
-#endif /* __i386__ */
-/*
- * FIXME: should we be using hrtimers instead of this
- * LIRC_SERIAL_TRANSMITTER_LATENCY nonsense?
- */
-
-/* fetch serial input packet (1 byte) from register offset */
-static u8 sinp(int offset)
-{
- if (iommap)
- /* the register is memory-mapped */
- offset <<= ioshift;
-
- return inb(io + offset);
-}
-
-/* write serial output packet (1 byte) of value to register offset */
-static void soutp(int offset, u8 value)
-{
- if (iommap)
- /* the register is memory-mapped */
- offset <<= ioshift;
-
- outb(value, io + offset);
-}
-
-static void on(void)
-{
- if (txsense)
- soutp(UART_MCR, hardware[type].off);
- else
- soutp(UART_MCR, hardware[type].on);
-}
-
-static void off(void)
-{
- if (txsense)
- soutp(UART_MCR, hardware[type].on);
- else
- soutp(UART_MCR, hardware[type].off);
-}
-
-#ifndef MAX_UDELAY_MS
-#define MAX_UDELAY_US 5000
-#else
-#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
-#endif
-
-static void safe_udelay(unsigned long usecs)
-{
- while (usecs > MAX_UDELAY_US) {
- udelay(MAX_UDELAY_US);
- usecs -= MAX_UDELAY_US;
- }
- udelay(usecs);
-}
-
-#ifdef USE_RDTSC
-/*
- * This is an overflow/precision juggle, complicated in that we can't
- * do long long divide in the kernel
- */
-
-/*
- * When we use the rdtsc instruction to measure clocks, we keep the
- * pulse and space widths as clock cycles. As this is CPU speed
- * dependent, the widths must be calculated in init_port and ioctl
- * time
- */
-
-static int init_timing_params(unsigned int new_duty_cycle,
- unsigned int new_freq)
-{
- __u64 loops_per_sec, work;
-
- duty_cycle = new_duty_cycle;
- freq = new_freq;
-
- loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
- loops_per_sec *= HZ;
-
- /* How many clocks in a microsecond?, avoiding long long divide */
- work = loops_per_sec;
- work *= 4295; /* 4295 = 2^32 / 1e6 */
-
- /*
- * Carrier period in clocks, approach good up to 32GHz clock,
- * gets carrier frequency within 8Hz
- */
- period = loops_per_sec >> 3;
- period /= (freq >> 3);
-
- /* Derive pulse and space from the period */
- pulse_width = period * duty_cycle / 100;
- space_width = period - pulse_width;
- pr_debug("in init_timing_params, freq=%d, duty_cycle=%d, clk/jiffy=%ld, pulse=%ld, space=%ld, conv_us_to_clocks=%ld\n",
- freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
- pulse_width, space_width, conv_us_to_clocks);
- return 0;
-}
-#else /* ! USE_RDTSC */
-static int init_timing_params(unsigned int new_duty_cycle,
- unsigned int new_freq)
-{
-/*
- * period, pulse/space width are kept with 8 binary places -
- * IE multiplied by 256.
- */
- if (256 * 1000000L / new_freq * new_duty_cycle / 100 <=
- LIRC_SERIAL_TRANSMITTER_LATENCY)
- return -EINVAL;
- if (256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <=
- LIRC_SERIAL_TRANSMITTER_LATENCY)
- return -EINVAL;
- duty_cycle = new_duty_cycle;
- freq = new_freq;
- period = 256 * 1000000L / freq;
- pulse_width = period * duty_cycle / 100;
- space_width = period - pulse_width;
- pr_debug("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
- freq, pulse_width, space_width);
- return 0;
-}
-#endif /* USE_RDTSC */
-
-
-/* return value: space length delta */
-
-static long send_pulse_irdeo(unsigned long length)
-{
- long rawbits, ret;
- int i;
- unsigned char output;
- unsigned char chunk, shifted;
-
- /* how many bits have to be sent ? */
- rawbits = length * 1152 / 10000;
- if (duty_cycle > 50)
- chunk = 3;
- else
- chunk = 1;
- for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
- shifted = chunk << (i * 3);
- shifted >>= 1;
- output &= (~shifted);
- i++;
- if (i == 3) {
- soutp(UART_TX, output);
- while (!(sinp(UART_LSR) & UART_LSR_THRE))
- ;
- output = 0x7f;
- i = 0;
- }
- }
- if (i != 0) {
- soutp(UART_TX, output);
- while (!(sinp(UART_LSR) & UART_LSR_TEMT))
- ;
- }
-
- if (i == 0)
- ret = (-rawbits) * 10000 / 1152;
- else
- ret = (3 - i) * 3 * 10000 / 1152 + (-rawbits) * 10000 / 1152;
-
- return ret;
-}
-
-/* Version using udelay() */
-
-/*
- * here we use fixed point arithmetic, with 8
- * fractional bits. that gets us within 0.1% or so of the right average
- * frequency, albeit with some jitter in pulse length - Steve
- *
- * This should use ndelay instead.
- */
-
-/* To match 8 fractional bits used for pulse/space length */
-
-static long send_pulse_homebrew_softcarrier(unsigned long length)
-{
- int flag;
- unsigned long actual, target, d;
-
- length <<= 8;
-
- actual = 0; target = 0; flag = 0;
- while (actual < length) {
- if (flag) {
- off();
- target += space_width;
- } else {
- on();
- target += pulse_width;
- }
- d = (target - actual -
- LIRC_SERIAL_TRANSMITTER_LATENCY + 128) >> 8;
- /*
- * Note - we've checked in ioctl that the pulse/space
- * widths are big enough so that d is > 0
- */
- udelay(d);
- actual += (d << 8) + LIRC_SERIAL_TRANSMITTER_LATENCY;
- flag = !flag;
- }
- return (actual-length) >> 8;
-}
-
-static long send_pulse_homebrew(unsigned long length)
-{
- if (length <= 0)
- return 0;
-
- if (softcarrier)
- return send_pulse_homebrew_softcarrier(length);
-
- on();
- safe_udelay(length);
- return 0;
-}
-
-static void send_space_irdeo(long length)
-{
- if (length <= 0)
- return;
-
- safe_udelay(length);
-}
-
-static void send_space_homebrew(long length)
-{
- off();
- if (length <= 0)
- return;
- safe_udelay(length);
-}
-
-static void rbwrite(int l)
-{
- if (lirc_buffer_full(&rbuf)) {
- /* no new signals will be accepted */
- pr_debug("Buffer overrun\n");
- return;
- }
- lirc_buffer_write(&rbuf, (void *)&l);
-}
-
-static void frbwrite(int l)
-{
- /* simple noise filter */
- static int pulse, space;
- static unsigned int ptr;
-
- if (ptr > 0 && (l & PULSE_BIT)) {
- pulse += l & PULSE_MASK;
- if (pulse > 250) {
- rbwrite(space);
- rbwrite(pulse | PULSE_BIT);
- ptr = 0;
- pulse = 0;
- }
- return;
- }
- if (!(l & PULSE_BIT)) {
- if (ptr == 0) {
- if (l > 20000) {
- space = l;
- ptr++;
- return;
- }
- } else {
- if (l > 20000) {
- space += pulse;
- if (space > PULSE_MASK)
- space = PULSE_MASK;
- space += l;
- if (space > PULSE_MASK)
- space = PULSE_MASK;
- pulse = 0;
- return;
- }
- rbwrite(space);
- rbwrite(pulse | PULSE_BIT);
- ptr = 0;
- pulse = 0;
- }
- }
- rbwrite(l);
-}
-
-static irqreturn_t lirc_irq_handler(int i, void *blah)
-{
- ktime_t kt;
- int counter, dcd;
- u8 status;
- ktime_t delkt;
- int data;
- static int last_dcd = -1;
-
- if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
- /* not our interrupt */
- return IRQ_NONE;
- }
-
- counter = 0;
- do {
- counter++;
- status = sinp(UART_MSR);
- if (counter > RS_ISR_PASS_LIMIT) {
- pr_warn("AIEEEE: We're caught!\n");
- break;
- }
- if ((status & hardware[type].signal_pin_change)
- && sense != -1) {
- /* get current time */
- kt = ktime_get();
-
- /* New mode, written by Trent Piepho
- <xyzzy@u.washington.edu>. */
-
- /*
- * The old format was not very portable.
- * We now use an int to pass pulses
- * and spaces to user space.
- *
- * If PULSE_BIT is set a pulse has been
- * received, otherwise a space has been
- * received. The driver needs to know if your
- * receiver is active high or active low, or
- * the space/pulse sense could be
- * inverted. The bits denoted by PULSE_MASK are
- * the length in microseconds. Lengths greater
- * than or equal to 16 seconds are clamped to
- * PULSE_MASK. All other bits are unused.
- * This is a much simpler interface for user
- * programs, as well as eliminating "out of
- * phase" errors with space/pulse
- * autodetection.
- */
-
- /* calc time since last interrupt in microseconds */
- dcd = (status & hardware[type].signal_pin) ? 1 : 0;
-
- if (dcd == last_dcd) {
- pr_warn("ignoring spike: %d %d %llx %llx\n",
- dcd, sense, ktime_to_us(kt),
- ktime_to_us(lastkt));
- continue;
- }
-
- delkt = ktime_sub(kt, lastkt);
- if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
- data = PULSE_MASK; /* really long time */
- if (!(dcd^sense)) {
- /* sanity check */
- pr_warn("AIEEEE: %d %d %llx %llx\n",
- dcd, sense, ktime_to_us(kt),
- ktime_to_us(lastkt));
- /*
- * detecting pulse while this
- * MUST be a space!
- */
- sense = sense ? 0 : 1;
- }
- } else
- data = (int) ktime_to_us(delkt);
- frbwrite(dcd^sense ? data : (data|PULSE_BIT));
- lastkt = kt;
- last_dcd = dcd;
- wake_up_interruptible(&rbuf.wait_poll);
- }
- } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
- return IRQ_HANDLED;
-}
-
-
-static int hardware_init_port(void)
-{
- u8 scratch, scratch2, scratch3;
-
- /*
- * This is a simple port existence test, borrowed from the autoconfig
- * function in drivers/serial/8250.c
- */
- scratch = sinp(UART_IER);
- soutp(UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, 0x0f);
-#ifdef __i386__
- outb(0x00, 0x080);
-#endif
- scratch3 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0f) {
- /* we fail, there's nothing here */
- pr_err("port existence test failed, cannot continue\n");
- return -ENODEV;
- }
-
-
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Clear registers. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- /* Set line for power source */
- off();
-
- /* Clear registers again to be sure. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- switch (type) {
- case LIRC_IRDEO:
- case LIRC_IRDEO_REMOTE:
- /* setup port to 7N1 @ 115200 Baud */
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
-
- /* Set DLAB 1. */
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
- /* Set DLAB 0 + 7N1 */
- soutp(UART_LCR, UART_LCR_WLEN7);
- /* THR interrupt already disabled at this point */
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int lirc_serial_probe(struct platform_device *dev)
-{
- int i, nlow, nhigh, result;
-
- result = devm_request_irq(&dev->dev, irq, lirc_irq_handler,
- (share_irq ? IRQF_SHARED : 0),
- LIRC_DRIVER_NAME, &hardware);
- if (result < 0) {
- if (result == -EBUSY)
- dev_err(&dev->dev, "IRQ %d busy\n", irq);
- else if (result == -EINVAL)
- dev_err(&dev->dev, "Bad irq number or handler\n");
- return result;
- }
-
- /* Reserve io region. */
- /*
- * Future MMAP-Developers: Attention!
- * For memory mapped I/O you *might* need to use ioremap() first,
- * for the NSLU2 it's done in boot code.
- */
- if (((iommap)
- && (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
- LIRC_DRIVER_NAME) == NULL))
- || ((!iommap)
- && (devm_request_region(&dev->dev, io, 8,
- LIRC_DRIVER_NAME) == NULL))) {
- dev_err(&dev->dev, "port %04x already in use\n", io);
- dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
- dev_warn(&dev->dev,
- "or compile the serial port driver as module and\n");
- dev_warn(&dev->dev, "make sure this module is loaded first\n");
- return -EBUSY;
- }
-
- result = hardware_init_port();
- if (result < 0)
- return result;
-
- /* Initialize pulse/space widths */
- init_timing_params(duty_cycle, freq);
-
- /* If pin is high, then this must be an active low receiver. */
- if (sense == -1) {
- /* wait 1/2 sec for the power supply */
- msleep(500);
-
- /*
- * probe 9 times every 0.04s, collect "votes" for
- * active high/low
- */
- nlow = 0;
- nhigh = 0;
- for (i = 0; i < 9; i++) {
- if (sinp(UART_MSR) & hardware[type].signal_pin)
- nlow++;
- else
- nhigh++;
- msleep(40);
- }
- sense = nlow >= nhigh ? 1 : 0;
- dev_info(&dev->dev, "auto-detected active %s receiver\n",
- sense ? "low" : "high");
- } else
- dev_info(&dev->dev, "Manually using active %s receiver\n",
- sense ? "low" : "high");
-
- dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
- return 0;
-}
-
-static int set_use_inc(void *data)
-{
- unsigned long flags;
-
- /* initialize timestamp */
- lastkt = ktime_get();
-
- spin_lock_irqsave(&hardware[type].lock, flags);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
-
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- return 0;
-}
-
-static void set_use_dec(void *data)
-{ unsigned long flags;
-
- spin_lock_irqsave(&hardware[type].lock, flags);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-}
-
-static ssize_t lirc_write(struct file *file, const char __user *buf,
- size_t n, loff_t *ppos)
-{
- int i, count;
- unsigned long flags;
- long delta = 0;
- int *wbuf;
-
- if (!(hardware[type].features & LIRC_CAN_SEND_PULSE))
- return -EPERM;
-
- count = n / sizeof(int);
- if (n % sizeof(int) || count % 2 == 0)
- return -EINVAL;
- wbuf = memdup_user(buf, n);
- if (IS_ERR(wbuf))
- return PTR_ERR(wbuf);
- spin_lock_irqsave(&hardware[type].lock, flags);
- if (type == LIRC_IRDEO) {
- /* DTR, RTS down */
- on();
- }
- for (i = 0; i < count; i++) {
- if (i%2)
- hardware[type].send_space(wbuf[i] - delta);
- else
- delta = hardware[type].send_pulse(wbuf[i]);
- }
- off();
- spin_unlock_irqrestore(&hardware[type].lock, flags);
- kfree(wbuf);
- return n;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- int result;
- u32 __user *uptr = (u32 __user *)arg;
- u32 value;
-
- switch (cmd) {
- case LIRC_GET_SEND_MODE:
- if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
- return -ENOIOCTLCMD;
-
- result = put_user(LIRC_SEND2MODE
- (hardware[type].features&LIRC_CAN_SEND_MASK),
- uptr);
- if (result)
- return result;
- break;
-
- case LIRC_SET_SEND_MODE:
- if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- /* only LIRC_MODE_PULSE supported */
- if (value != LIRC_MODE_PULSE)
- return -EINVAL;
- break;
-
- case LIRC_GET_LENGTH:
- return -ENOIOCTLCMD;
-
- case LIRC_SET_SEND_DUTY_CYCLE:
- pr_debug("SET_SEND_DUTY_CYCLE\n");
- if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- if (value <= 0 || value > 100)
- return -EINVAL;
- return init_timing_params(value, freq);
-
- case LIRC_SET_SEND_CARRIER:
- pr_debug("SET_SEND_CARRIER\n");
- if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- if (value > 500000 || value < 20000)
- return -EINVAL;
- return init_timing_params(duty_cycle, value);
-
- default:
- return lirc_dev_fop_ioctl(filep, cmd, arg);
- }
- return 0;
-}
-
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .write = lirc_write,
- .unlocked_ioctl = lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_ioctl,
-#endif
- .read = lirc_dev_fop_read,
- .poll = lirc_dev_fop_poll,
- .open = lirc_dev_fop_open,
- .release = lirc_dev_fop_close,
- .llseek = no_llseek,
-};
-
-static struct lirc_driver driver = {
- .name = LIRC_DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .sample_rate = 0,
- .data = NULL,
- .add_to_buf = NULL,
- .rbuf = &rbuf,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .dev = NULL,
- .owner = THIS_MODULE,
-};
-
-static struct platform_device *lirc_serial_dev;
-
-static int lirc_serial_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* Disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Clear registers. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- return 0;
-}
-
-/* twisty maze... need a forward-declaration here... */
-static void lirc_serial_exit(void);
-
-static int lirc_serial_resume(struct platform_device *dev)
-{
- unsigned long flags;
- int result;
-
- result = hardware_init_port();
- if (result < 0)
- return result;
-
- spin_lock_irqsave(&hardware[type].lock, flags);
- /* Enable Interrupt */
- lastkt = ktime_get();
- soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
- off();
-
- lirc_buffer_clear(&rbuf);
-
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- return 0;
-}
-
-static struct platform_driver lirc_serial_driver = {
- .probe = lirc_serial_probe,
- .suspend = lirc_serial_suspend,
- .resume = lirc_serial_resume,
- .driver = {
- .name = "lirc_serial",
- },
-};
-
-static int __init lirc_serial_init(void)
-{
- int result;
-
- /* Init read buffer. */
- result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN);
- if (result < 0)
- return result;
-
- result = platform_driver_register(&lirc_serial_driver);
- if (result) {
- printk("lirc register returned %d\n", result);
- goto exit_buffer_free;
- }
-
- lirc_serial_dev = platform_device_alloc("lirc_serial", 0);
- if (!lirc_serial_dev) {
- result = -ENOMEM;
- goto exit_driver_unregister;
- }
-
- result = platform_device_add(lirc_serial_dev);
- if (result)
- goto exit_device_put;
-
- return 0;
-
-exit_device_put:
- platform_device_put(lirc_serial_dev);
-exit_driver_unregister:
- platform_driver_unregister(&lirc_serial_driver);
-exit_buffer_free:
- lirc_buffer_free(&rbuf);
- return result;
-}
-
-static void lirc_serial_exit(void)
-{
- platform_device_unregister(lirc_serial_dev);
- platform_driver_unregister(&lirc_serial_driver);
- lirc_buffer_free(&rbuf);
-}
-
-static int __init lirc_serial_init_module(void)
-{
- int result;
-
- switch (type) {
- case LIRC_HOMEBREW:
- case LIRC_IRDEO:
- case LIRC_IRDEO_REMOTE:
- case LIRC_ANIMAX:
- case LIRC_IGOR:
- /* if nothing specified, use ttyS0/com1 and irq 4 */
- io = io ? io : 0x3f8;
- irq = irq ? irq : 4;
- break;
- default:
- return -EINVAL;
- }
- if (!softcarrier) {
- switch (type) {
- case LIRC_HOMEBREW:
- case LIRC_IGOR:
- hardware[type].features &=
- ~(LIRC_CAN_SET_SEND_DUTY_CYCLE|
- LIRC_CAN_SET_SEND_CARRIER);
- break;
- }
- }
-
- /* make sure sense is either -1, 0, or 1 */
- if (sense != -1)
- sense = !!sense;
-
- result = lirc_serial_init();
- if (result)
- return result;
-
- driver.features = hardware[type].features;
- driver.dev = &lirc_serial_dev->dev;
- driver.minor = lirc_register_driver(&driver);
- if (driver.minor < 0) {
- pr_err("register_chrdev failed!\n");
- lirc_serial_exit();
- return driver.minor;
- }
- return 0;
-}
-
-static void __exit lirc_serial_exit_module(void)
-{
- lirc_unregister_driver(driver.minor);
- lirc_serial_exit();
- pr_debug("cleaned up module\n");
-}
-
-
-module_init(lirc_serial_init_module);
-module_exit(lirc_serial_exit_module);
-
-MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
-MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, "
- "Christoph Bartelmus, Andrei Tanas");
-MODULE_LICENSE("GPL");
-
-module_param(type, int, S_IRUGO);
-MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo,"
- " 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug,"
- " 5 = NSLU2 RX:CTS2/TX:GreenLED)");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-/* some architectures (e.g. intel xscale) have memory mapped registers */
-module_param(iommap, bool, S_IRUGO);
-MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O"
- " (0 = no memory mapped io)");
-
-/*
- * some architectures (e.g. intel xscale) align the 8bit serial registers
- * on 32bit word boundaries.
- * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
- */
-module_param(ioshift, int, S_IRUGO);
-MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(share_irq, bool, S_IRUGO);
-MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
-
-module_param(sense, int, S_IRUGO);
-MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
- " (0 = active high, 1 = active low )");
-
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
-module_param(txsense, bool, S_IRUGO);
-MODULE_PARM_DESC(txsense, "Sense of transmitter circuit"
- " (0 = active high, 1 = active low )");
-#endif
-
-module_param(softcarrier, bool, S_IRUGO);
-MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 3551aed589c0..34aac3e2eb87 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1157,8 +1157,8 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
/* Send the code */
if (ret == 0) {
- ret = send_code(tx, (unsigned)command >> 16,
- (unsigned)command & 0xFFFF);
+ ret = send_code(tx, (unsigned int)command >> 16,
+ (unsigned int)command & 0xFFFF);
if (ret == -EPROTO) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index aaca39d751a5..f71d5f2f179f 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -224,7 +224,7 @@ static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
fmtidx = 3;
break;
default:
- WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+ WARN(1, "CSI2: pixel format %08x unsupported!\n",
fmt->code);
return 0;
}
diff --git a/drivers/staging/media/pulse8-cec/TODO b/drivers/staging/media/pulse8-cec/TODO
deleted file mode 100644
index fa6660245e5f..000000000000
--- a/drivers/staging/media/pulse8-cec/TODO
+++ /dev/null
@@ -1,52 +0,0 @@
-This driver needs to mature a bit more and another round of
-code cleanups.
-
-Otherwise it looks to be in good shape. And of course the fact
-that the CEC framework is in staging at the moment also prevents
-this driver from being mainlined.
-
-Some notes:
-
-1) Regarding the "autonomous" mode of the Pulse-Eight: currently this
-is disabled, but the idea is that this allows basic functionality
-when the PC is off, and it can wake-up the PC through USB.
-
-To prevent the device to go into autonomous mode the driver would
-have to send MSGCODE_SET_CONTROLLED 1 and then send a ping every
-30 seconds (in practice once every 15 seconds would be good). When
-powering off or going to standby send MSGCODE_SET_CONTROLLED 0 to
-turn the autonomous mode back on.
-
-This needs to be implemented in the driver. Autonomous mode was
-added in firmware v2.
-
-2) Writing to the EEPROM can only be done once every 10 seconds.
-
-3) To use this driver you also need to patch the inputattach utility,
-this patch will be submitted once this driver is moved out of staging.
-
-diff -urN linuxconsoletools-1.4.9/utils/inputattach.c linuxconsoletools-1.4.9.new/utils/inputattach.c
---- linuxconsoletools-1.4.9/utils/inputattach.c 2016-01-09 16:27:02.000000000 +0100
-+++ linuxconsoletools-1.4.9.new/utils/inputattach.c 2016-03-20 11:35:31.707788967 +0100
-@@ -861,6 +861,9 @@
- { "--wacom_iv", "-wacom_iv", "Wacom protocol IV tablet",
- B9600, CS8 | CRTSCTS,
- SERIO_WACOM_IV, 0x00, 0x00, 0, wacom_iv_init },
-+{ "--pulse8-cec", "-pulse8-cec", "Pulse Eight HDMI CEC dongle",
-+ B9600, CS8,
-+ SERIO_PULSE8_CEC, 0x00, 0x00, 0, NULL },
- { NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, NULL }
- };
-
-diff -urN linuxconsoletools-1.4.9/utils/serio-ids.h linuxconsoletools-1.4.9.new/utils/serio-ids.h
---- linuxconsoletools-1.4.9/utils/serio-ids.h 2015-04-26 18:29:42.000000000 +0200
-+++ linuxconsoletools-1.4.9.new/utils/serio-ids.h 2016-03-20 11:41:00.153558539 +0100
-@@ -131,5 +131,8 @@
- #ifndef SERIO_EASYPEN
- # define SERIO_EASYPEN 0x3f
- #endif
-+#ifndef SERIO_PULSE8_CEC
-+# define SERIO_PULSE8_CEC 0x40
-+#endif
-
- #endif
diff --git a/drivers/staging/media/s5p-cec/Kconfig b/drivers/staging/media/s5p-cec/Kconfig
index 0315fd7ad0f1..ddfd955da0d4 100644
--- a/drivers/staging/media/s5p-cec/Kconfig
+++ b/drivers/staging/media/s5p-cec/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_SAMSUNG_S5P_CEC
tristate "Samsung S5P CEC driver"
- depends on VIDEO_DEV && MEDIA_CEC && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
+ depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
---help---
This is a driver for Samsung S5P HDMI CEC interface. It uses the
generic CEC framework interface.
diff --git a/drivers/staging/media/s5p-cec/TODO b/drivers/staging/media/s5p-cec/TODO
index f51d5268ac40..64f21bab38f5 100644
--- a/drivers/staging/media/s5p-cec/TODO
+++ b/drivers/staging/media/s5p-cec/TODO
@@ -1,7 +1,7 @@
-This driver depends on the CEC framework, which is currently in
-staging, so therefor this driver is in staging as well.
+This driver requires that userspace sets the physical address.
+However, this should be passed on from the corresponding
+Samsung HDMI driver.
-In addition, this driver requires that userspace sets the physical
-address. However, this should be passed on from the corresponding
-samsung HDMI driver. It is very annoying if userspace has to do this,
-and other than USB CEC adapters this must be handled automatically.
+We have to wait until the HDMI notifier framework has been merged
+in order to handle this gracefully, until that time this driver
+has to remain in staging.
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
index 1780a08b73c9..2a07968b5ac6 100644
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <media/cec.h>
@@ -204,12 +203,11 @@ static int s5p_cec_probe(struct platform_device *pdev)
cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec,
CEC_NAME,
CEC_CAP_PHYS_ADDR | CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
- 1, &pdev->dev);
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, 1);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
- ret = cec_register_adapter(cec->adap);
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
cec_delete_adapter(cec->adap);
return ret;
@@ -231,7 +229,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
return 0;
}
-static int s5p_cec_runtime_suspend(struct device *dev)
+static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
{
struct s5p_cec_dev *cec = dev_get_drvdata(dev);
@@ -239,7 +237,7 @@ static int s5p_cec_runtime_suspend(struct device *dev)
return 0;
}
-static int s5p_cec_runtime_resume(struct device *dev)
+static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
{
struct s5p_cec_dev *cec = dev_get_drvdata(dev);
int ret;
@@ -263,6 +261,7 @@ static const struct of_device_id s5p_cec_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, s5p_cec_match);
static struct platform_driver s5p_cec_pdrv = {
.probe = s5p_cec_probe,
diff --git a/drivers/staging/media/st-cec/Kconfig b/drivers/staging/media/st-cec/Kconfig
index 784d2c600aca..c04283db58d6 100644
--- a/drivers/staging/media/st-cec/Kconfig
+++ b/drivers/staging/media/st-cec/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_STI_HDMI_CEC
tristate "STMicroelectronics STiH4xx HDMI CEC driver"
- depends on VIDEO_DEV && MEDIA_CEC && (ARCH_STI || COMPILE_TEST)
+ depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (ARCH_STI || COMPILE_TEST)
---help---
This is a driver for STIH4xx HDMI CEC interface. It uses the
generic CEC framework interface.
diff --git a/drivers/staging/media/st-cec/TODO b/drivers/staging/media/st-cec/TODO
new file mode 100644
index 000000000000..c61289742c5c
--- /dev/null
+++ b/drivers/staging/media/st-cec/TODO
@@ -0,0 +1,7 @@
+This driver requires that userspace sets the physical address.
+However, this should be passed on from the corresponding
+ST HDMI driver.
+
+We have to wait until the HDMI notifier framework has been merged
+in order to handle this gracefully, until that time this driver
+has to remain in staging.
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
index 214344866a6b..3c25638a9610 100644
--- a/drivers/staging/media/st-cec/stih-cec.c
+++ b/drivers/staging/media/st-cec/stih-cec.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/version.h>
#include <media/cec.h>
@@ -108,11 +107,11 @@
/* Constants for CEC_BIT_TOUT_THRESH register */
#define CEC_SBIT_TOUT_47MS BIT(1)
-#define CEC_SBIT_TOUT_48MS BIT(0) | BIT(1)
+#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
#define CEC_SBIT_TOUT_50MS BIT(2)
#define CEC_DBIT_TOUT_27MS BIT(0)
#define CEC_DBIT_TOUT_28MS BIT(1)
-#define CEC_DBIT_TOUT_29MS BIT(0) | BIT(1)
+#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
/* Constants for CEC_BIT_PULSE_THRESH register */
#define CEC_BIT_LPULSE_03MS BIT(1)
@@ -336,13 +335,12 @@ static int stih_cec_probe(struct platform_device *pdev)
cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
CEC_NAME,
CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
- CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT,
- 1, &pdev->dev);
+ CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT, 1);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
- ret = cec_register_adapter(cec->adap);
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
cec_delete_adapter(cec->adap);
return ret;
@@ -363,6 +361,7 @@ static const struct of_device_id stih_cec_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, stih_cec_match);
static struct platform_driver stih_cec_pdrv = {
.probe = stih_cec_probe,
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 4659a6450c04..ce1764cba5f0 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -67,10 +67,10 @@ struct net_dev_context {
struct most_interface *iface;
bool channels_opened;
bool is_mamac;
- unsigned char link_stat;
struct net_device *dev;
struct net_dev_channel rx;
struct net_dev_channel tx;
+ struct completion mac_compl;
struct list_head list;
};
@@ -181,6 +181,7 @@ static int most_nd_set_mac_address(struct net_device *dev, void *p)
static int most_nd_open(struct net_device *dev)
{
struct net_dev_context *nd = dev->ml_priv;
+ long ret;
netdev_info(dev, "open net device\n");
@@ -202,16 +203,30 @@ static int most_nd_open(struct net_device *dev)
return -EBUSY;
}
- nd->channels_opened = true;
-
- if (nd->is_mamac) {
- nd->link_stat = 1;
- netif_wake_queue(dev);
- } else {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
+ ret = wait_for_completion_interruptible_timeout(
+ &nd->mac_compl, msecs_to_jiffies(5000));
+ if (!ret) {
+ netdev_err(dev, "mac timeout\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if (ret < 0) {
+ netdev_warn(dev, "mac waiting interrupted\n");
+ goto err;
+ }
}
+ nd->channels_opened = true;
+ netif_wake_queue(dev);
return 0;
+
+err:
+ most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
+ most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+ return ret;
}
static int most_nd_stop(struct net_device *dev)
@@ -277,7 +292,6 @@ static const struct net_device_ops most_nd_ops = {
static void most_nd_setup(struct net_device *dev)
{
- netdev_info(dev, "setup net device\n");
ether_setup(dev);
dev->netdev_ops = &most_nd_ops;
}
@@ -332,6 +346,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
if (!nd)
return -ENOMEM;
+ init_completion(&nd->mac_compl);
nd->iface = iface;
spin_lock_irqsave(&list_lock, flags);
@@ -548,8 +563,7 @@ void most_deliver_netinfo(struct most_interface *iface,
{
struct net_dev_context *nd;
struct net_device *dev;
-
- pr_info("Received netinfo from %s\n", iface->description);
+ const u8 *m = mac_addr;
nd = get_net_dev_context(iface);
if (!nd)
@@ -559,15 +573,16 @@ void most_deliver_netinfo(struct most_interface *iface,
if (!dev)
return;
- if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
-
- if (nd->link_stat != link_stat) {
- nd->link_stat = link_stat;
- if (nd->link_stat)
- netif_wake_queue(dev);
- else
- netif_stop_queue(dev);
+ if (m && is_valid_ether_addr(m)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+ m[0], m[1], m[2], m[3], m[4], m[5]);
+ ether_addr_copy(dev->dev_addr, m);
+ complete(&nd->mac_compl);
+ } else if (!ether_addr_equal(dev->dev_addr, m)) {
+ netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+ m[0], m[1], m[2], m[3], m[4], m[5]);
+ }
}
}
EXPORT_SYMBOL(most_deliver_netinfo);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 78b2c3dd9bb3..35aee9fbbf02 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -306,14 +306,11 @@ static int deliver_netinfo_thread(void *data)
static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
{
u8 *data = mbo->virt_address;
- u8 *mac = dev->mac_addrs;
pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
dev->link_state = data[18];
pr_info("NIState: %d\n", dev->link_state);
- memcpy(mac, data + 19, 6);
- pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ memcpy(dev->mac_addrs, data + 19, 6);
dev->deliver_netinfo++;
wake_up_interruptible(&dev->netinfo_waitq);
}
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 26c9adb29308..d6db0bd65be0 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -97,9 +97,7 @@ struct clear_hold_work {
* @cap: channel capabilities
* @conf: channel configuration
* @dci: direct communication interface of hardware
- * @hw_addr: MAC address of hardware
* @ep_address: endpoint address table
- * @link_stat: link status of hardware
* @description: device description
* @suffix: suffix for channel name
* @channel_lock: synchronize channel access
@@ -117,9 +115,7 @@ struct most_dev {
struct most_channel_capability *cap;
struct most_channel_config *conf;
struct most_dci_obj *dci;
- u8 hw_addr[6];
u8 *ep_address;
- u16 link_stat;
char description[MAX_STRING_LEN];
char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
@@ -186,28 +182,9 @@ static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
5 * HZ);
}
-/**
- * free_anchored_buffers - free device's anchored items
- * @mdev: the device
- * @channel: channel ID
- * @status: status of MBO termination
- */
-static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel,
- enum mbo_status_flags status)
+static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
{
- struct mbo *mbo;
- struct urb *urb;
-
- while ((urb = usb_get_from_anchor(&mdev->busy_urbs[channel]))) {
- mbo = urb->context;
- usb_kill_urb(urb);
- if (mbo && mbo->complete) {
- mbo->status = status;
- mbo->processed_length = 0;
- mbo->complete(mbo);
- }
- usb_free_urb(urb);
- }
+ return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
}
/**
@@ -278,7 +255,7 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
cancel_work_sync(&mdev->clear_work[channel].ws);
mutex_lock(&mdev->io_mutex);
- free_anchored_buffers(mdev, channel, MBO_E_CLOSE);
+ usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
if (mdev->padding_active[channel])
mdev->padding_active[channel] = false;
@@ -377,33 +354,27 @@ static void hdm_write_completion(struct urb *urb)
unsigned long flags;
spin_lock_irqsave(lock, flags);
- if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
- !mdev->is_channel_healthy[channel]) {
- spin_unlock_irqrestore(lock, flags);
- return;
- }
- if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
- mbo->processed_length = 0;
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ if (likely(mdev->is_channel_healthy[channel])) {
switch (urb->status) {
+ case 0:
+ case -ESHUTDOWN:
+ mbo->processed_length = urb->actual_length;
+ mbo->status = MBO_SUCCESS;
+ break;
case -EPIPE:
dev_warn(dev, "Broken OUT pipe detected\n");
mdev->is_channel_healthy[channel] = false;
- spin_unlock_irqrestore(lock, flags);
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
- return;
+ break;
case -ENODEV:
case -EPROTO:
mbo->status = MBO_E_CLOSE;
break;
- default:
- mbo->status = MBO_E_INVAL;
- break;
}
- } else {
- mbo->status = MBO_SUCCESS;
- mbo->processed_length = urb->actual_length;
}
spin_unlock_irqrestore(lock, flags);
@@ -531,40 +502,35 @@ static void hdm_read_completion(struct urb *urb)
unsigned long flags;
spin_lock_irqsave(lock, flags);
- if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
- !mdev->is_channel_healthy[channel]) {
- spin_unlock_irqrestore(lock, flags);
- return;
- }
- if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
- mbo->processed_length = 0;
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ if (likely(mdev->is_channel_healthy[channel])) {
switch (urb->status) {
+ case 0:
+ case -ESHUTDOWN:
+ mbo->processed_length = urb->actual_length;
+ mbo->status = MBO_SUCCESS;
+ if (mdev->padding_active[channel] &&
+ hdm_remove_padding(mdev, channel, mbo)) {
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ }
+ break;
case -EPIPE:
dev_warn(dev, "Broken IN pipe detected\n");
mdev->is_channel_healthy[channel] = false;
- spin_unlock_irqrestore(lock, flags);
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
- return;
+ break;
case -ENODEV:
case -EPROTO:
mbo->status = MBO_E_CLOSE;
break;
case -EOVERFLOW:
dev_warn(dev, "Babble on IN pipe detected\n");
- default:
- mbo->status = MBO_E_INVAL;
break;
}
- } else {
- mbo->processed_length = urb->actual_length;
- mbo->status = MBO_SUCCESS;
- if (mdev->padding_active[channel] &&
- hdm_remove_padding(mdev, channel, mbo)) {
- mbo->processed_length = 0;
- mbo->status = MBO_E_INVAL;
- }
}
spin_unlock_irqrestore(lock, flags);
@@ -668,6 +634,15 @@ _error:
* @iface: interface
* @channel: channel ID
* @conf: structure that holds the configuration information
+ *
+ * The attached network interface controller (NIC) supports a padding mode
+ * to avoid short packets on USB, hence increasing the performance due to a
+ * lower interrupt load. This mode is default for synchronous data and can
+ * be switched on for isochronous data. In case padding is active the
+ * driver needs to know the frame size of the payload in order to calculate
+ * the number of bytes it needs to pad when transmitting or to cut off when
+ * receiving data.
+ *
*/
static int hdm_configure_channel(struct most_interface *iface, int channel,
struct most_channel_config *conf)
@@ -701,6 +676,11 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
!(conf->data_type == MOST_CH_ISOC &&
conf->packets_per_xact != 0xFF)) {
mdev->padding_active[channel] = false;
+ /*
+ * Since the NIC's padding mode is not going to be
+ * used, we can skip the frame size calculations and
+ * move directly on to exit.
+ */
goto exit;
}
@@ -734,56 +714,12 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
- conf->buffer_size;
exit:
mdev->conf[channel] = *conf;
- return 0;
-}
+ if (conf->data_type == MOST_CH_ASYNC) {
+ u16 ep = mdev->ep_address[channel];
-/**
- * hdm_update_netinfo - retrieve latest networking information
- * @mdev: device interface
- *
- * This triggers the USB vendor requests to read the hardware address and
- * the current link status of the attached device.
- */
-static int hdm_update_netinfo(struct most_dev *mdev)
-{
- struct usb_device *usb_device = mdev->usb_device;
- struct device *dev = &usb_device->dev;
- u16 hi, mi, lo, link;
-
- if (!is_valid_ether_addr(mdev->hw_addr)) {
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
- return -EFAULT;
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
- return -EFAULT;
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
- return -EFAULT;
- }
-
- mutex_lock(&mdev->io_mutex);
- mdev->hw_addr[0] = hi >> 8;
- mdev->hw_addr[1] = hi;
- mdev->hw_addr[2] = mi >> 8;
- mdev->hw_addr[3] = mi;
- mdev->hw_addr[4] = lo >> 8;
- mdev->hw_addr[5] = lo;
- mutex_unlock(&mdev->io_mutex);
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
- dev_err(dev, "Vendor request \"link status\" failed\n");
- return -EFAULT;
+ if (start_sync_ep(mdev->usb_device, ep) < 0)
+ dev_warn(dev, "sync for ep%02x failed", ep);
}
-
- mutex_lock(&mdev->io_mutex);
- mdev->link_stat = link;
- mutex_unlock(&mdev->io_mutex);
return 0;
}
@@ -807,7 +743,7 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel)
}
/**
- * link_stat_timer_handler - add work to link_stat work queue
+ * link_stat_timer_handler - schedule work obtaining mac address and link status
* @data: pointer to USB device instance
*
* The handler runs in interrupt context. That's why we need to defer the
@@ -823,33 +759,47 @@ static void link_stat_timer_handler(unsigned long data)
}
/**
- * wq_netinfo - work queue function
+ * wq_netinfo - work queue function to deliver latest networking information
* @wq_obj: object that holds data for our deferred work to do
*
* This retrieves the network interface status of the USB INIC
- * and compares it with the current status. If the status has
- * changed, it updates the status of the core.
*/
static void wq_netinfo(struct work_struct *wq_obj)
{
struct most_dev *mdev = to_mdev_from_work(wq_obj);
- int i, prev_link_stat = mdev->link_stat;
- u8 prev_hw_addr[6];
+ struct usb_device *usb_device = mdev->usb_device;
+ struct device *dev = &usb_device->dev;
+ u16 hi, mi, lo, link;
+ u8 hw_addr[6];
- for (i = 0; i < 6; i++)
- prev_hw_addr[i] = mdev->hw_addr[i];
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
+ dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
+ return;
+ }
+
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
+ dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
+ return;
+ }
- if (hdm_update_netinfo(mdev) < 0)
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
+ dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
+ return;
+ }
+
+ if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
+ dev_err(dev, "Vendor request 'link status' failed\n");
return;
- if (prev_link_stat != mdev->link_stat ||
- prev_hw_addr[0] != mdev->hw_addr[0] ||
- prev_hw_addr[1] != mdev->hw_addr[1] ||
- prev_hw_addr[2] != mdev->hw_addr[2] ||
- prev_hw_addr[3] != mdev->hw_addr[3] ||
- prev_hw_addr[4] != mdev->hw_addr[4] ||
- prev_hw_addr[5] != mdev->hw_addr[5])
- most_deliver_netinfo(&mdev->iface, mdev->link_stat,
- &mdev->hw_addr[0]);
+ }
+
+ hw_addr[0] = hi >> 8;
+ hw_addr[1] = hi;
+ hw_addr[2] = mi >> 8;
+ hw_addr[3] = mi;
+ hw_addr[4] = lo >> 8;
+ hw_addr[5] = lo;
+
+ most_deliver_netinfo(&mdev->iface, link, hw_addr);
}
/**
@@ -867,7 +817,7 @@ static void wq_clear_halt(struct work_struct *wq_obj)
mutex_lock(&mdev->io_mutex);
most_stop_enqueue(&mdev->iface, channel);
- free_anchored_buffers(mdev, channel, MBO_E_INVAL);
+ usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
if (usb_clear_halt(mdev->usb_device, pipe))
dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
@@ -1053,6 +1003,7 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
u16 val;
u16 reg_addr;
const char *name = attr->attr.name;
+ struct usb_device *usb_dev = dci_obj->usb_device;
int err = kstrtou16(buf, 16, &val);
if (err)
@@ -1063,18 +1014,15 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
return count;
}
- if (!strcmp(name, "arb_value")) {
- reg_addr = dci_obj->reg_addr;
- } else if (!strcmp(name, "sync_ep")) {
- u16 ep = val;
-
- reg_addr = DRCI_REG_BASE + DRCI_COMMAND + ep * 16;
- val = 1;
- } else if (get_static_reg_addr(ro_regs, name, &reg_addr)) {
+ if (!strcmp(name, "arb_value"))
+ err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
+ else if (!strcmp(name, "sync_ep"))
+ err = start_sync_ep(usb_dev, val);
+ else if (!get_static_reg_addr(ro_regs, name, &reg_addr))
+ err = drci_wr_reg(usb_dev, reg_addr, val);
+ else
return -EFAULT;
- }
- err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
if (err < 0)
return err;
@@ -1186,7 +1134,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
struct most_channel_capability *tmp_cap;
struct usb_endpoint_descriptor *ep_desc;
int ret = 0;
- int err;
if (!mdev)
goto exit_ENOMEM;
@@ -1262,13 +1209,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
tmp_cap++;
init_usb_anchor(&mdev->busy_urbs[i]);
spin_lock_init(&mdev->channel_lock[i]);
- err = drci_wr_reg(usb_dev,
- DRCI_REG_BASE + DRCI_COMMAND +
- ep_desc->bEndpointAddress * 16,
- 1);
- if (err < 0)
- dev_warn(dev, "DCI Sync for EP %02x failed",
- ep_desc->bEndpointAddress);
}
dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
le16_to_cpu(usb_dev->descriptor.idVendor),
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index 329109c0024f..191404bc5906 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -342,7 +342,7 @@ static ssize_t show_channel_starving(struct most_c_obj *c,
}
#define create_show_channel_attribute(val) \
- static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
+ static MOST_CHNL_ATTR(val, 0444, show_##val, NULL)
create_show_channel_attribute(available_directions);
create_show_channel_attribute(available_datatypes);
@@ -494,9 +494,7 @@ static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
}
#define create_channel_attribute(value) \
- static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
- show_##value, \
- store_##value)
+ static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value)
create_channel_attribute(set_buffer_size);
create_channel_attribute(set_number_of_buffers);
@@ -690,7 +688,7 @@ static ssize_t show_interface(struct most_inst_obj *instance_obj,
}
#define create_inst_attribute(value) \
- static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
+ static MOST_INST_ATTR(value, 0444, show_##value, NULL)
create_inst_attribute(description);
create_inst_attribute(interface);
@@ -763,8 +761,6 @@ struct most_aim_obj {
struct kobject kobj;
struct list_head list;
struct most_aim *driver;
- char add_link[STRING_SIZE];
- char remove_link[STRING_SIZE];
};
#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
@@ -851,7 +847,7 @@ static void most_aim_release(struct kobject *kobj)
kfree(aim_obj);
}
-static ssize_t show_add_link(struct most_aim_obj *aim_obj,
+static ssize_t add_link_show(struct most_aim_obj *aim_obj,
struct most_aim_attribute *attr,
char *buf)
{
@@ -885,16 +881,16 @@ static ssize_t show_add_link(struct most_aim_obj *aim_obj,
*
* Examples:
*
- * Input: "mdev0:ch0@ep_81:my_channel\n" or
- * "mdev0:ch0@ep_81:my_channel"
+ * Input: "mdev0:ch6:my_channel\n" or
+ * "mdev0:ch6:my_channel"
*
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
+ * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
*
- * Input: "mdev0:ch0@ep_81\n"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
+ * Input: "mdev1:ep81\n"
+ * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
*
- * Input: "mdev0:ch0@ep_81"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
+ * Input: "mdev1:ep81"
+ * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
*/
static int split_string(char *buf, char **a, char **b, char **c)
{
@@ -962,13 +958,13 @@ most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
* Searches for a pair of device and channel and probes the AIM
*
* Example:
- * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
- * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
+ * (1) echo "mdev0:ch6:my_rxchannel" >add_link
+ * (2) echo "mdev1:ep81" >add_link
*
* (1) would create the device node /dev/my_rxchannel
- * (2) would create the device node /dev/mdev0-ch0@ep_81
+ * (2) would create the device node /dev/mdev1-ep81
*/
-static ssize_t store_add_link(struct most_aim_obj *aim_obj,
+static ssize_t add_link_store(struct most_aim_obj *aim_obj,
struct most_aim_attribute *attr,
const char *buf,
size_t len)
@@ -984,7 +980,6 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
strlcpy(buffer, buf, max_len);
- strlcpy(aim_obj->add_link, buf, max_len);
ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
if (ret)
@@ -1019,14 +1014,7 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
}
static struct most_aim_attribute most_aim_attr_add_link =
- __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
-
-static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
-}
+ __ATTR_RW(add_link);
/**
* store_remove_link - store function for remove_link attribute
@@ -1036,9 +1024,9 @@ static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
* @len: buffer length
*
* Example:
- * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
+ * echo "mdev0:ep81" >remove_link
*/
-static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
+static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
struct most_aim_attribute *attr,
const char *buf,
size_t len)
@@ -1051,7 +1039,6 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
strlcpy(buffer, buf, max_len);
- strlcpy(aim_obj->remove_link, buf, max_len);
ret = split_string(buffer, &mdev, &mdev_ch, NULL);
if (ret)
return ret;
@@ -1070,8 +1057,7 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
}
static struct most_aim_attribute most_aim_attr_remove_link =
- __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
- store_remove_link);
+ __ATTR_WO(remove_link);
static struct attribute *most_aim_def_attrs[] = {
&most_aim_attr_add_link.attr,
@@ -1761,9 +1747,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
if (!name_suffix)
snprintf(channel_name, STRING_SIZE, "ch%d", i);
- else if (name_suffix[0] == '@')
- snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
- name_suffix);
else
snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 552a7dcbf50b..fb0928a4fb97 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -172,29 +172,31 @@ static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
/*
* Ethtool operation
*/
-static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int xlr_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
{
struct xlr_net_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = xlr_get_phydev(priv);
if (!phydev)
return -ENODEV;
- return phy_ethtool_gset(phydev, ecmd);
+ return phy_ethtool_ksettings_get(phydev, ecmd);
}
-static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int xlr_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct xlr_net_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = xlr_get_phydev(priv);
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
+ return phy_ethtool_ksettings_set(phydev, ecmd);
}
static const struct ethtool_ops xlr_ethtool_ops = {
- .get_settings = xlr_get_settings,
- .set_settings = xlr_set_settings,
+ .get_link_ksettings = xlr_get_link_ksettings,
+ .set_link_ksettings = xlr_set_link_ksettings,
};
/*
@@ -1005,10 +1007,8 @@ static int xlr_net_probe(struct platform_device *pdev)
*/
adapter = (struct xlr_adapter *)
devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- return err;
- }
+ if (!adapter)
+ return -ENOMEM;
/*
* XLR and XLS have 1 and 2 NAE controller respectively
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index d02e3e31ed29..8130dfe89745 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -259,17 +259,6 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
#endif
int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
- /*
- * Limit the MTU to make sure the ethernet packets are between
- * 64 bytes and 65535 bytes.
- */
- if ((new_mtu + mtu_overhead < VLAN_ETH_ZLEN) ||
- (new_mtu + mtu_overhead > OCTEON_MAX_MTU)) {
- pr_err("MTU must be between %d and %d.\n",
- VLAN_ETH_ZLEN - mtu_overhead,
- OCTEON_MAX_MTU - mtu_overhead);
- return -EINVAL;
- }
dev->mtu = new_mtu;
if ((interface < 2) &&
@@ -457,7 +446,7 @@ int cvm_oct_common_init(struct net_device *dev)
dev->ethtool_ops = &cvm_oct_ethtool_ops;
cvm_oct_set_mac_filter(dev);
- dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
+ dev_set_mtu(dev, dev->mtu);
/*
* Zero out stats for port so we won't mistakenly show
@@ -685,6 +674,11 @@ static int cvm_oct_probe(struct platform_device *pdev)
int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
int qos;
struct device_node *pip;
+ int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ mtu_overhead += VLAN_HLEN;
+#endif
octeon_mdiobus_force_mod_depencency();
@@ -783,6 +777,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
strcpy(dev->name, "pow%d");
for (qos = 0; qos < 16; qos++)
skb_queue_head_init(&priv->tx_free_list[qos]);
+ dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
+ dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device for POW\n");
@@ -836,6 +832,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
for (qos = 0; qos < cvmx_pko_get_num_queues(port);
qos++)
cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
+ dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
+ dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
switch (priv->imode) {
/* These types don't support ports to IPD/PKO */
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 29b9834870fd..27af86e05098 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -53,4 +53,4 @@ r8188eu-y := \
obj-$(CONFIG_R8188EU) := r8188eu.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include
+ccflags-y += -I$(srctree)/$(src)/include
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f1f4788dbd86..36109cec8706 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -308,7 +308,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
mod_timer(&pmlmepriv->scan_to_timer,
jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
- rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
+ LedControl8188eu(padapter, LED_CTL_SITE_SURVEY);
pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
} else {
@@ -335,7 +335,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+ LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
@@ -379,7 +379,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+ LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 14461cf34037..c1478cff5854 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -30,7 +30,7 @@ void BlinkTimerCallback(unsigned long data)
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
return;
- schedule_work(&(pLed->BlinkWorkItem));
+ schedule_work(&pLed->BlinkWorkItem);
}
/* */
@@ -60,7 +60,6 @@ void ResetLedStatus(struct LED_871x *pLed)
pLed->bLedNoLinkBlinkInProgress = false;
pLed->bLedLinkBlinkInProgress = false;
- pLed->bLedStartToLinkBlinkInProgress = false;
pLed->bLedScanBlinkInProgress = false;
}
@@ -72,10 +71,10 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
ResetLedStatus(pLed);
- setup_timer(&(pLed->BlinkTimer), BlinkTimerCallback,
+ setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
(unsigned long)pLed);
- INIT_WORK(&(pLed->BlinkWorkItem), BlinkWorkItemCallback);
+ INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
@@ -85,8 +84,8 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
/* */
void DeInitLed871x(struct LED_871x *pLed)
{
- cancel_work_sync(&(pLed->BlinkWorkItem));
- del_timer_sync(&(pLed->BlinkTimer));
+ cancel_work_sync(&pLed->BlinkWorkItem);
+ del_timer_sync(&pLed->BlinkTimer);
ResetLedStatus(pLed);
}
@@ -99,7 +98,7 @@ void DeInitLed871x(struct LED_871x *pLed)
static void SwLedBlink1(struct LED_871x *pLed)
{
struct adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
@@ -247,9 +246,9 @@ static void SwLedBlink1(struct LED_871x *pLed)
/* ALPHA, added by chiyoko, 20090106 */
static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
- struct led_priv *ledpriv = &(padapter->ledpriv);
- struct LED_871x *pLed = &(ledpriv->SwLed0);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct LED_871x *pLed = &ledpriv->SwLed0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
switch (LedAction) {
case LED_CTL_POWER_ON:
@@ -259,11 +258,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
@@ -282,11 +281,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedLinkBlinkInProgress = true;
@@ -306,15 +305,15 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -326,7 +325,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_TX:
case LED_CTL_RX:
@@ -334,11 +333,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
pLed->bLedBlinkInProgress = true;
@@ -354,21 +353,21 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
case LED_CTL_START_WPS_BOTTON:
- if (!pLed->bLedWPSBlinkInProgress) {
+ if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -379,27 +378,27 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress)
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
else
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -415,7 +414,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
break;
case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -431,23 +430,23 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->CurrLedState = RTW_LED_OFF;
pLed->BlinkingLedState = RTW_LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&(pLed->BlinkTimer));
+ del_timer_sync(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false;
}
SwLedOff(padapter, pLed);
@@ -475,15 +474,10 @@ void BlinkHandler(struct LED_871x *pLed)
void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
- struct led_priv *ledpriv = &(padapter->ledpriv);
-
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
(!padapter->hw_init_completed))
return;
- if (!ledpriv->bRegUseLed)
- return;
-
if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
(LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ee2dcd05010f..032f783b0d83 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -801,7 +801,7 @@ void rtw_indicate_connect(struct adapter *padapter)
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
set_fwstate(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_LINK);
+ LedControl8188eu(padapter, LED_CTL_LINK);
rtw_os_indicate_connect(padapter);
}
@@ -833,7 +833,7 @@ void rtw_indicate_disconnect(struct adapter *padapter)
rtw_os_indicate_disconnect(padapter);
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
rtw_clear_scan_deny(padapter);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index fb13df586441..d9c114776cab 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -133,7 +133,9 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
};
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */
+static const struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {
+ 0x03
+}; /* use the combination for max channel numbers */
/*
* Search the @param channel_num in given @param channel_set
@@ -667,10 +669,10 @@ static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pss
get_rate_set(padapter, bssrate, &bssrate_len);
if (bssrate_len > 8) {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
} else {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
}
/* add wps_ie for wps2.0 */
@@ -999,7 +1001,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
}
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
/* add WPS IE ie for wps 2.0 */
if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
@@ -1120,10 +1122,10 @@ static void issue_assocreq(struct adapter *padapter)
if (bssrate_len > 8) {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
} else {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
}
/* RSN */
@@ -1165,7 +1167,7 @@ static void issue_assocreq(struct adapter *padapter)
memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16);
break;
}
- pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
}
}
@@ -1194,7 +1196,7 @@ static void issue_assocreq(struct adapter *padapter)
}
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
@@ -2644,7 +2646,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress , 65535);
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535);
return _SUCCESS;
}
/* update WMM, ERP in the beacon */
@@ -2802,7 +2804,7 @@ static unsigned int OnAuth(struct adapter *padapter,
/* checking for challenging txt... */
DBG_88E("checking for challenging txt...\n");
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_ , _CHLGETXT_IE_, (int *)&ie_len,
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
if ((p == NULL) || (ie_len <= 0)) {
@@ -3046,7 +3048,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
memcpy(supportRate, p+2, ie_len);
supportRateNum = ie_len;
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_ , &ie_len,
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (p != NULL) {
if (supportRateNum <= sizeof(supportRate)) {
@@ -3146,7 +3148,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
if (pmlmepriv->wps_beacon_ie) {
u8 selected_registrar = 0;
- rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR , &selected_registrar, NULL);
+ rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
if (!selected_registrar) {
DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
@@ -3511,7 +3513,7 @@ static unsigned int OnDeAuth(struct adapter *padapter,
DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
reason, GetAddr3Ptr(pframe));
- receive_disconnect(padapter, GetAddr3Ptr(pframe) , reason);
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
}
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 0b70fe7d3b72..4032121a06f3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -56,7 +56,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
rtw_os_indicate_disconnect(padapter);
@@ -94,7 +94,7 @@ static int rtw_hw_resume(struct adapter *padapter)
pwrpriv->bips_processing = true;
rtw_reset_drv_sw(padapter);
- if (pm_netdev_open(pnetdev, false) != 0) {
+ if (ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev)) != _SUCCESS) {
mutex_unlock(&pwrpriv->mutex_lock);
goto error_exit;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index b87cbbbee054..3e6edb63d36b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -66,16 +66,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
precvpriv->adapter = padapter;
- precvpriv->free_recvframe_cnt = NR_RECVFRAME;
-
precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
if (!precvpriv->pallocated_frame_buf)
return _FAIL;
- precvpriv->precv_frame_buf = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
-
- precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
+ precvframe = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
for (i = 0; i < NR_RECVFRAME; i++) {
INIT_LIST_HEAD(&(precvframe->list));
@@ -83,15 +79,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
list_add_tail(&(precvframe->list),
&(precvpriv->free_recv_queue.queue));
- rtw_os_recv_resource_alloc(precvframe);
-
+ precvframe->pkt = NULL;
precvframe->len = 0;
precvframe->adapter = padapter;
precvframe++;
}
- precvpriv->rx_pending_cnt = 1;
-
res = rtw_hal_init_recv_priv(padapter);
setup_timer(&precvpriv->signal_stat_timer,
@@ -120,20 +113,11 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
struct recv_frame *hdr;
- struct adapter *padapter;
- struct recv_priv *precvpriv;
hdr = list_first_entry_or_null(&pfree_recv_queue->queue,
struct recv_frame, list);
- if (hdr) {
+ if (hdr)
list_del_init(&hdr->list);
- padapter = hdr->adapter;
- if (padapter) {
- precvpriv = &padapter->recvpriv;
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt--;
- }
- }
return hdr;
}
@@ -154,13 +138,8 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
int rtw_free_recvframe(struct recv_frame *precvframe,
struct __queue *pfree_recv_queue)
{
- struct adapter *padapter;
- struct recv_priv *precvpriv;
-
if (!precvframe)
return _FAIL;
- padapter = precvframe->adapter;
- precvpriv = &padapter->recvpriv;
if (precvframe->pkt) {
dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
precvframe->pkt = NULL;
@@ -174,29 +153,16 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
list_add_tail(&(precvframe->list), get_list_head(pfree_recv_queue));
- if (padapter != NULL) {
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
-
- spin_unlock_bh(&pfree_recv_queue->lock);
+ spin_unlock_bh(&pfree_recv_queue->lock);
return _SUCCESS;
}
int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
{
- struct adapter *padapter = precvframe->adapter;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
list_del_init(&(precvframe->list));
list_add_tail(&(precvframe->list), get_list_head(queue));
- if (padapter != NULL) {
- if (queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
-
return _SUCCESS;
}
@@ -1294,7 +1260,7 @@ static int validate_recv_frame(struct adapter *adapter,
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case WIFI_DATA_TYPE: /* data */
- rtw_led_control(adapter, LED_CTL_RX);
+ LedControl8188eu(adapter, LED_CTL_RX);
pattrib->qos = (subtype & BIT(7)) ? 1 : 0;
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
@@ -1989,7 +1955,7 @@ static int recv_func_posthandle(struct adapter *padapter,
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
/* DATA FRAME */
- rtw_led_control(padapter, LED_CTL_RX);
+ LedControl8188eu(padapter, LED_CTL_RX);
prframe = decryptor(padapter, prframe);
if (prframe == NULL) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index a71e25294add..941d1a069d20 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -310,7 +310,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
struct list_head *phead, *plist;
- struct recv_frame *prhdr;
struct recv_frame *prframe;
struct __queue *ppending_recvframe_queue;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -327,8 +326,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
plist = phead->next;
while (!list_empty(phead)) {
- prhdr = container_of(plist, struct recv_frame, list);
- prframe = (struct recv_frame *)prhdr;
+ prframe = container_of(plist, struct recv_frame, list);
plist = plist->next;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 0f8b8e0bffdf..b60b126b860e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -220,7 +220,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
struct adapter *padapter = pxmitpriv->adapter;
struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
- u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
if (pxmitpriv->pxmit_frame_buf == NULL)
@@ -233,7 +232,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
}
for (i = 0; i < NR_XMITBUFF; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ rtw_os_xmit_resource_free(pxmitbuf);
pxmitbuf++;
}
@@ -243,7 +242,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
/* free xmit extension buff */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
for (i = 0; i < num_xmit_extbuf; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ rtw_os_xmit_resource_free(pxmitbuf);
pxmitbuf++;
}
@@ -1064,7 +1063,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
frg_inx++;
- if (bmcst || rtw_endofpktfile(&pktfile)) {
+ if (bmcst || pktfile.pkt_len == 0) {
pattrib->nr_frags = frg_inx;
pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
@@ -1677,7 +1676,7 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
}
pxmitframe->pkt = *ppkt;
- rtw_led_control(padapter, LED_CTL_TX);
+ LedControl8188eu(padapter, LED_CTL_TX);
pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index d983a8029f4c..16476e735011 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -991,7 +991,6 @@ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
{
pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
- pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
if (*(pDM_Odm->mp_mode) != 1)
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 5192ef70bcfc..35c91e06cc47 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -40,12 +40,11 @@ static u32 cal_bit_shift(u32 bitmask)
u32 phy_query_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask)
{
- u32 return_value = 0, original_value, bit_shift;
+ u32 original_value, bit_shift;
original_value = usb_read32(adapt, regaddr);
bit_shift = cal_bit_shift(bitmask);
- return_value = (original_value & bitmask) >> bit_shift;
- return return_value;
+ return (original_value & bitmask) >> bit_shift;
}
void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data)
@@ -119,12 +118,11 @@ static void rf_serial_write(struct adapter *adapt,
u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
u32 reg_addr, u32 bit_mask)
{
- u32 original_value, readback_value, bit_shift;
+ u32 original_value, bit_shift;
original_value = rf_serial_read(adapt, rf_path, reg_addr);
bit_shift = cal_bit_shift(bit_mask);
- readback_value = (original_value & bit_mask) >> bit_shift;
- return readback_value;
+ return (original_value & bit_mask) >> bit_shift;
}
void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
@@ -210,13 +208,6 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- if (hal_data->rf_chip == RF_PSEUDO_11N)
- return;
-
- /* There is no 40MHz mode in RF_8225. */
- if (hal_data->rf_chip == RF_8225)
- return;
-
if (adapt->bDriverStopped)
return;
@@ -265,8 +256,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
}
/* Set RF related register */
- if (hal_data->rf_chip == RF_6052)
- rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
+ rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
}
void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
@@ -286,7 +276,6 @@ void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
{
- u8 rf_path;
u32 param1, param2;
struct hal_data_8188e *hal_data = adapt->HalData;
@@ -294,12 +283,10 @@ static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
param1 = RF_CHNLBW;
param2 = channel;
- for (rf_path = 0; rf_path < hal_data->NumTotalRFPath; rf_path++) {
- hal_data->RfRegChnlVal[rf_path] = (hal_data->RfRegChnlVal[rf_path] &
- 0xfffffc00) | param2;
- phy_set_rf_reg(adapt, (enum rf_radio_path)rf_path, param1,
- bRFRegOffsetMask, hal_data->RfRegChnlVal[rf_path]);
- }
+ hal_data->RfRegChnlVal[0] = (hal_data->RfRegChnlVal[0] &
+ 0xfffffc00) | param2;
+ phy_set_rf_reg(adapt, 0, param1,
+ bRFRegOffsetMask, hal_data->RfRegChnlVal[0]);
}
void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
@@ -307,9 +294,6 @@ void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
struct hal_data_8188e *hal_data = adapt->HalData;
u8 tmpchannel = hal_data->CurrentChannel;
- if (hal_data->rf_chip == RF_PSEUDO_11N)
- return;
-
if (channel == 0)
channel = 1;
@@ -407,9 +391,8 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
s8 ofdm_index[2], cck_index = 0;
s8 ofdm_index_old[2] = {0, 0}, cck_index_old = 0;
u32 i = 0, j = 0;
- bool is2t = false;
- u8 ofdm_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB */
+ u8 ofdm_min_index = 6; /* OFDM BB Swing should be less than +3.0dB */
s8 ofdm_index_mapping[2][index_mapping_NUM_88E] = {
/* 2.4G, decrease power */
{0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
@@ -427,18 +410,12 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
dm_txpwr_track_setpwr(dm_odm);
dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++;
- dm_odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
thermal_val = (u8)rtw_hal_read_rfreg(adapt, RF_PATH_A,
RF_T_METER_88E, 0xfc00);
- if (is2t)
- rf = 2;
- else
- rf = 1;
-
if (thermal_val) {
/* Query OFDM path A default setting */
ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
@@ -450,17 +427,6 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
}
}
- /* Query OFDM path B default setting */
- if (is2t) {
- ele_d = phy_query_bb_reg(adapt, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (OFDMSwingTable[i]&bMaskOFDM_D)) {
- ofdm_index_old[1] = (u8)i;
- break;
- }
- }
- }
-
/* Query CCK default setting From 0xa24 */
temp_cck = dm_odm->RFCalibrateInfo.RegA24;
@@ -479,8 +445,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
dm_odm->RFCalibrateInfo.ThermalValue_LCK = thermal_val;
dm_odm->RFCalibrateInfo.ThermalValue_IQK = thermal_val;
- for (i = 0; i < rf; i++)
- dm_odm->RFCalibrateInfo.OFDM_index[i] = ofdm_index_old[i];
+ dm_odm->RFCalibrateInfo.OFDM_index[0] = ofdm_index_old[0];
dm_odm->RFCalibrateInfo.CCK_index = cck_index_old;
}
@@ -539,13 +504,11 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
offset = index_mapping_NUM_88E-1;
/* Updating ofdm_index values with new OFDM / CCK offset */
- for (i = 0; i < rf; i++) {
- ofdm_index[i] = dm_odm->RFCalibrateInfo.OFDM_index[i] + ofdm_index_mapping[j][offset];
- if (ofdm_index[i] > OFDM_TABLE_SIZE_92D-1)
- ofdm_index[i] = OFDM_TABLE_SIZE_92D-1;
- else if (ofdm_index[i] < ofdm_min_index)
- ofdm_index[i] = ofdm_min_index;
- }
+ ofdm_index[0] = dm_odm->RFCalibrateInfo.OFDM_index[0] + ofdm_index_mapping[j][offset];
+ if (ofdm_index[0] > OFDM_TABLE_SIZE_92D-1)
+ ofdm_index[0] = OFDM_TABLE_SIZE_92D-1;
+ else if (ofdm_index[0] < ofdm_min_index)
+ ofdm_index[0] = ofdm_min_index;
cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset];
if (cck_index > CCK_TABLE_SIZE-1)
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 2f3edf0f850a..8f8c9de6a9bc 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -61,8 +61,6 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
(powerlevel[idx1]<<8) |
(powerlevel[idx1]<<16) |
(powerlevel[idx1]<<24);
- if (tx_agc[idx1] > 0x20 && hal_data->ExternalPA)
- tx_agc[idx1] = 0x20;
}
} else {
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
@@ -139,17 +137,15 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
(powerbase0<<8) | powerbase0;
*(ofdmbase+i) = powerbase0;
}
- for (i = 0; i < adapt->HalData->NumTotalRFPath; i++) {
- /* Check HT20 to HT40 diff */
- if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- powerlevel[i] = pwr_level_bw20[i];
- else
- powerlevel[i] = pwr_level_bw40[i];
- powerbase1 = powerlevel[i];
- powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
- (powerbase1<<8) | powerbase1;
- *(mcs_base+i) = powerbase1;
- }
+ /* Check HT20 to HT40 diff */
+ if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ powerlevel[0] = pwr_level_bw20[0];
+ else
+ powerlevel[0] = pwr_level_bw40[0];
+ powerbase1 = powerlevel[0];
+ powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
+ (powerbase1<<8) | powerbase1;
+ *mcs_base = powerbase1;
}
static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
u8 index, u32 *powerbase0, u32 *powerbase1,
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index dde64417e66a..9712d7b74345 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -230,79 +230,33 @@ static bool rf6052_conf_para(struct adapter *adapt)
{
struct hal_data_8188e *hal_data = adapt->HalData;
u32 u4val = 0;
- u8 rfpath;
bool rtstatus = true;
struct bb_reg_def *pphyreg;
- for (rfpath = 0; rfpath < hal_data->NumTotalRFPath; rfpath++) {
- pphyreg = &hal_data->PHYRegDef[rfpath];
+ pphyreg = &hal_data->PHYRegDef[RF90_PATH_A];
+ u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV);
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV << 16);
- break;
- }
+ phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
- udelay(1);
+ phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
- udelay(1);
+ phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREADDREAALENGTH, 0x0);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2,
- B3WIREADDREAALENGTH, 0x0);
- udelay(1);
+ phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
- phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2,
- B3WIREDATALENGTH, 0x0);
- udelay(1);
+ rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
- switch (rfpath) {
- case RF90_PATH_A:
- rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
- break;
- case RF90_PATH_B:
- rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
- break;
- case RF90_PATH_C:
- break;
- case RF90_PATH_D:
- break;
- }
-
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- phy_set_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV, u4val);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- phy_set_bb_reg(adapt, pphyreg->rfintfs,
- BRFSI_RFENV << 16, u4val);
- break;
- }
-
- if (!rtstatus)
- return false;
- }
+ phy_set_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV, u4val);
return rtstatus;
}
static bool rtl88e_phy_rf6052_config(struct adapter *adapt)
{
- struct hal_data_8188e *hal_data = adapt->HalData;
-
- hal_data->NumTotalRFPath = 1;
-
return rf6052_conf_para(adapt);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 385bc2f56f2f..0ce7db723a5d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -135,7 +135,6 @@ void rtw_hal_read_chip_version(struct adapter *padapter)
dump_chip_info(ChipVersion);
pHalData->VersionID = ChipVersion;
- pHalData->NumTotalRFPath = 1;
}
void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
@@ -470,7 +469,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
{
struct hal_data_8188e *pHalData = padapter->HalData;
struct txpowerinfo24g pwrInfo24G;
- u8 rfPath, ch, group;
+ u8 ch, group;
u8 bIn24G, TxCount;
Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
@@ -478,34 +477,32 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
if (!AutoLoadFail)
pHalData->bTXPowerDataReadFromEEPORM = true;
- for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) {
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- bIn24G = Hal_GetChnlGroup88E(ch, &group);
- if (bIn24G) {
- pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
- if (ch == 14)
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
- else
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
- }
- if (bIn24G) {
- DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
- DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_CCK_Base[rfPath][ch]);
- DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_BW40_Base[rfPath][ch]);
- }
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ bIn24G = Hal_GetChnlGroup88E(ch, &group);
+ if (bIn24G) {
+ pHalData->Index24G_CCK_Base[0][ch] = pwrInfo24G.IndexCCK_Base[0][group];
+ if (ch == 14)
+ pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][4];
+ else
+ pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][group];
}
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
- pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
- pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
- pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
- DBG_88E("======= TxCount %d =======\n", TxCount);
- DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
- DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
+ if (bIn24G) {
+ DBG_88E("======= Path %d, Channel %d =======\n", 0, ch);
+ DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_CCK_Base[0][ch]);
+ DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_BW40_Base[0][ch]);
}
}
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ pHalData->CCK_24G_Diff[0][TxCount] = pwrInfo24G.CCK_Diff[0][TxCount];
+ pHalData->OFDM_24G_Diff[0][TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
+ pHalData->BW20_24G_Diff[0][TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
+ pHalData->BW40_24G_Diff[0][TxCount] = pwrInfo24G.BW40_Diff[0][TxCount];
+ DBG_88E("======= TxCount %d =======\n", TxCount);
+ DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->CCK_24G_Diff[0][TxCount]);
+ DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->OFDM_24G_Diff[0][TxCount]);
+ DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW20_24G_Diff[0][TxCount]);
+ DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW40_24G_Diff[0][TxCount]);
+ }
/* 2010/10/19 MH Add Regulator recognize for CU. */
if (!AutoLoadFail) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 780666a755ee..12879afb992e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -46,16 +46,12 @@ void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
LedCfg = usb_read8(padapter, REG_LEDCFG2);/* 0x4E */
- if (padapter->HalData->bLedOpenDrain) {
- /* Open-drain arrangement for controlling the LED) */
- LedCfg &= 0x90; /* Set to software control. */
- usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
- LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG);
- LedCfg &= 0xFE;
- usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
- } else {
- usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3) | BIT(5) | BIT(6)));
- }
+ /* Open-drain arrangement for controlling the LED) */
+ LedCfg &= 0x90; /* Set to software control. */
+ usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
+ LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG);
+ LedCfg &= 0xFE;
+ usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
exit:
pLed->bLedOn = false;
}
@@ -69,10 +65,6 @@ void rtw_hal_sw_led_init(struct adapter *padapter)
{
struct led_priv *pledpriv = &(padapter->ledpriv);
- pledpriv->bRegUseLed = true;
- pledpriv->LedControlHandler = LedControl8188eu;
- padapter->HalData->bLedOpenDrain = true;
-
InitLed871x(padapter, &(pledpriv->SwLed0));
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d0495a16ff79..0fc093eb7a77 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -37,19 +37,15 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
/* init recv_buf */
_rtw_init_queue(&precvpriv->free_recv_buf_queue);
- precvpriv->pallocated_recv_buf =
+ precvpriv->precv_buf =
kcalloc(NR_RECVBUFF, sizeof(struct recv_buf), GFP_KERNEL);
- if (!precvpriv->pallocated_recv_buf) {
+ if (!precvpriv->precv_buf) {
res = _FAIL;
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("alloc recv_buf fail!\n"));
goto exit;
}
-
- precvpriv->precv_buf = precvpriv->pallocated_recv_buf;
-
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ precvbuf = precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
@@ -58,27 +54,18 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
precvbuf->adapter = padapter;
precvbuf++;
}
- precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
skb_queue_head_init(&precvpriv->rx_skb_queue);
{
int i;
- size_t tmpaddr = 0;
- size_t alignm = 0;
struct sk_buff *pskb = NULL;
skb_queue_head_init(&precvpriv->free_recv_skb_queue);
for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
pskb = __netdev_alloc_skb(padapter->pnetdev,
- MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ,
- GFP_KERNEL);
+ MAX_RECVBUF_SZ, GFP_KERNEL);
if (pskb) {
kmemleak_not_leak(pskb);
- pskb->dev = padapter->pnetdev;
- tmpaddr = (size_t)pskb->data;
- alignm = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignm));
-
skb_queue_tail(&precvpriv->free_recv_skb_queue,
pskb);
}
@@ -95,14 +82,14 @@ void rtw_hal_free_recv_priv(struct adapter *padapter)
struct recv_buf *precvbuf;
struct recv_priv *precvpriv = &padapter->recvpriv;
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ precvbuf = precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
usb_free_urb(precvbuf->purb);
precvbuf++;
}
- kfree(precvpriv->pallocated_recv_buf);
+ kfree(precvpriv->precv_buf);
if (skb_queue_len(&precvpriv->rx_skb_queue))
DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 7692ca495ee5..3675edb61942 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -562,9 +562,6 @@ static void InitUsbAggregationSetting(struct adapter *Adapter)
/* Rx aggregation setting */
usb_AggSettingRxUpdate(Adapter);
-
- /* 201/12/10 MH Add for USB agg mode dynamic switch. */
- Adapter->HalData->UsbRxHighSpeedMode = false;
}
static void _InitBeaconParameters(struct adapter *Adapter)
@@ -604,11 +601,6 @@ static void _BBTurnOnBlock(struct adapter *Adapter)
phy_set_bb_reg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
}
-enum {
- Antenna_Lfet = 1,
- Antenna_Right = 2,
-};
-
static void _InitAntenna_Selection(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = Adapter->HalData;
@@ -994,19 +986,16 @@ u32 rtw_hal_inirp_init(struct adapter *Adapter)
RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
("===> usb_inirp_init\n"));
- precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
-
/* issue Rx irp to receive data */
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ precvbuf = precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (usb_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) {
+ if (usb_read_port(Adapter, RECV_BULK_IN_ADDR, precvbuf) == false) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n"));
status = _FAIL;
goto exit;
}
precvbuf++;
- precvpriv->free_recv_buf_queue_cnt--;
}
exit:
@@ -1107,18 +1096,12 @@ static void _ReadPROMContent(
readAdapterInfo_8188EU(Adapter);
}
-static void _ReadRFType(struct adapter *Adapter)
-{
- Adapter->HalData->rf_chip = RF_6052;
-}
-
void rtw_hal_read_chip_info(struct adapter *Adapter)
{
unsigned long start = jiffies;
MSG_88E("====> %s\n", __func__);
- _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
_ReadPROMContent(Adapter);
MSG_88E("<==== %s in %d ms\n", __func__,
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 0976a761b280..550ad62e7064 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -99,17 +99,6 @@ enum phy_rate_tx_offset_area {
RA_OFFSET_HT_CCK,
};
-/* BB/RF related */
-enum RF_TYPE_8190P {
- RF_TYPE_MIN, /* 0 */
- RF_8225 = 1, /* 1 11b/g RF for verification only */
- RF_8256 = 2, /* 2 11b/g/n */
- RF_8258 = 3, /* 3 11a/b/g/n RF */
- RF_6052 = 4, /* 4 11b/g/n RF */
- /* TODO: We should remove this psudo PHY RF after we get new RF. */
- RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
-};
-
struct bb_reg_def {
u32 rfintfs; /* set software control: */
/* 0x870~0x877[8 bytes] */
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 32326fd1dd24..e86419e525d8 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -156,8 +156,6 @@ struct adapter {
u8 hw_init_completed;
void *cmdThread;
- void (*intf_start)(struct adapter *adapter);
- void (*intf_stop)(struct adapter *adapter);
struct net_device *pnetdev;
struct net_device *pmondev;
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index fa032b0c12ff..e1114a95d442 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -190,6 +190,7 @@ void rtw_hal_set_odm_var(struct adapter *padapter,
u32 rtw_hal_inirp_init(struct adapter *padapter);
void rtw_hal_inirp_deinit(struct adapter *padapter);
+void usb_intf_stop(struct adapter *padapter);
s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 805f52e108b2..4fb3bb07ceaa 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -80,11 +80,6 @@
#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
#define RSSI_OFFSET_DIG 0x05;
-/* ANT Test */
-#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
-#define ANTTESTA 0x01 /* Ant A will be Testing */
-#define ANTTESTB 0x02 /* Ant B will be testing */
-
struct rtw_dig {
u8 Dig_Enable_Flag;
u8 Dig_Ext_Port_Stage;
@@ -590,7 +585,6 @@ struct odm_rf_cal {
s32 RegEBC;
u8 TXPowercount;
- bool bTXPowerTrackingInit;
bool bTXPowerTracking;
u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
* as default */
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index dbd7dc4f87dd..97d3d8504184 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -35,7 +35,8 @@ int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
u16 rtw_recv_select_queue(struct sk_buff *skb);
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
+int netdev_open(struct net_device *pnetdev);
+int ips_netdrv_open(struct adapter *padapter);
void rtw_ips_dev_unload(struct adapter *padapter);
int rtw_ips_pwr_up(struct adapter *padapter);
void rtw_ips_pwr_down(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 7550d58f6b5b..9b43a1314bd5 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -29,8 +29,6 @@ int rtw_recv_indicatepkt(struct adapter *adapter,
void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
-void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
-
int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 7c81e3f3d12e..9330361da4ad 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -200,10 +200,6 @@ struct hal_data_8188e {
u16 BasicRateSet;
- /* rf_ctrl */
- u8 rf_chip;
- u8 NumTotalRFPath;
-
u8 BoardType;
/* EEPROM setting. */
@@ -265,14 +261,6 @@ struct hal_data_8188e {
u32 CCKTxPowerLevelOriginalOffset;
u8 CrystalCap;
- u32 AntennaTxPath; /* Antenna path Tx */
- u32 AntennaRxPath; /* Antenna path Rx */
- u8 BluetoothCoexist;
- u8 ExternalPA;
-
- u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
-
- u8 b1x1RecvCombine; /* for 1T1R receive combining */
u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
@@ -316,14 +304,6 @@ struct hal_data_8188e {
u8 OutEpQueueSel;
u8 OutEpNumber;
- /* Add for USB aggreation mode dynamic shceme. */
- bool UsbRxHighSpeedMode;
-
- /* 2010/11/22 MH Add for slim combo debug mode selective. */
- /* This is used for fix the drawback of CU TSMC-A/UMC-A cut.
- * HW auto suspend ability. Close BT clock. */
- bool SlimComboDbg;
-
u16 EfuseUsedBytes;
/* Auto FSM to Turn On, include clock, isolation, power control
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 80832a5f0732..0d8bf51c72a9 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -51,9 +51,7 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
void rtl8188eu_recv_tasklet(void *priv);
-void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
void rtl8188e_process_phy_info(struct adapter *padapter,
struct recv_frame *prframe);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index f2054ef70358..607d1ba56a46 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -70,12 +70,9 @@ struct LED_871x {
struct timer_list BlinkTimer; /* Timer object for led blinking. */
- u8 bSWLedCtrl;
-
/* ALPHA, added by chiyoko, 20090106 */
u8 bLedNoLinkBlinkInProgress;
u8 bLedLinkBlinkInProgress;
- u8 bLedStartToLinkBlinkInProgress;
u8 bLedScanBlinkInProgress;
struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
* manipulate H/W to blink LED. */
@@ -91,18 +88,9 @@ void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
struct led_priv {
/* add for led control */
struct LED_871x SwLed0;
- u8 bRegUseLed;
- void (*LedControlHandler)(struct adapter *padapter,
- enum LED_CTL_MODE LedAction);
/* add for led control */
};
-#define rtw_led_control(adapt, action) \
- do { \
- if ((adapt)->ledpriv.LedControlHandler) \
- (adapt)->ledpriv.LedControlHandler((adapt), (action)); \
- } while (0)
-
void BlinkTimerCallback(unsigned long data);
void BlinkWorkItemCallback(struct work_struct *work);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 9434b869c5e9..18fb7e7b2273 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -504,7 +504,7 @@ void rtw_scan_abort(struct adapter *adapter);
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
uint in_len);
int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
- uint in_len, uint initial_out_len);
+ uint in_len, uint initial_out_len);
void rtw_init_registrypriv_dev_network(struct adapter *adapter);
void rtw_update_registrypriv_dev_network(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index 49d973881a04..052af7b891da 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -139,8 +139,6 @@ struct rx_pkt_attrib {
#define SN_EQUAL(a, b) (a == b)
#define REORDER_WAIT_TIME (50) /* (ms) */
-#define RECVBUFF_ALIGN_SZ 8
-
#define RXDESC_SIZE 24
#define RXDESC_OFFSET RXDESC_SIZE
@@ -166,9 +164,7 @@ struct recv_priv {
struct __queue free_recv_queue;
struct __queue recv_pending_queue;
struct __queue uc_swdec_pending_queue;
- u8 *pallocated_frame_buf;
- u8 *precv_frame_buf;
- uint free_recvframe_cnt;
+ void *pallocated_frame_buf;
struct adapter *adapter;
u32 bIsAnyNonBEPkts;
u64 rx_bytes;
@@ -176,17 +172,12 @@ struct recv_priv {
u64 rx_drop;
u64 last_rx_bytes;
- uint ff_hwaddr;
- u8 rx_pending_cnt;
-
struct tasklet_struct irq_prepare_beacon_tasklet;
struct tasklet_struct recv_tasklet;
struct sk_buff_head free_recv_skb_queue;
struct sk_buff_head rx_skb_queue;
- u8 *pallocated_recv_buf;
- u8 *precv_buf; /* 4 alignment */
+ struct recv_buf *precv_buf; /* 4 alignment */
struct __queue free_recv_buf_queue;
- u32 free_recv_buf_queue_cnt;
/* For display the phy informatiom */
u8 is_signal_dbg; /* for debug */
u8 signal_strength_dbg; /* for debug */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 78d9b6e035bf..fb586365d2e5 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -53,7 +53,7 @@ u8 usb_read8(struct adapter *adapter, u32 addr);
u16 usb_read16(struct adapter *adapter, u32 addr);
u32 usb_read32(struct adapter *adapter, u32 addr);
-u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf);
void usb_read_port_cancel(struct adapter *adapter);
int usb_write8(struct adapter *adapter, u32 addr, u8 val);
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index f96ca6af934d..959ef4b3066c 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -41,13 +41,11 @@ void rtw_os_xmit_schedule(struct adapter *padapter);
int rtw_os_xmit_resource_alloc(struct adapter *padapter,
struct xmit_buf *pxmitbuf, u32 alloc_sz);
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz);
+void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf);
uint rtw_remainder_len(struct pkt_file *pfile);
void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
-int rtw_endofpktfile(struct pkt_file *pfile);
void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
void rtw_os_xmit_complete(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index d976e5e18d50..c9c9821cfc32 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -145,7 +145,6 @@ static netdev_tx_t mon_xmit(struct sk_buff *skb, struct net_device *dev)
static const struct net_device_ops mon_netdev_ops = {
.ndo_start_xmit = mon_xmit,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 40691f1ec507..8fc3fadf065f 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -144,7 +144,6 @@ static bool rtw_monitor_enable;
module_param_named(monitor_enable, rtw_monitor_enable, bool, 0444);
MODULE_PARM_DESC(monitor_enable, "Enable monitor inferface (default: false)");
-static int netdev_open(struct net_device *pnetdev);
static int netdev_close(struct net_device *pnetdev);
static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
@@ -596,10 +595,9 @@ static int _netdev_open(struct net_device *pnetdev)
pr_info("can't init mlme_ext_priv\n");
goto netdev_open_error;
}
- if (padapter->intf_start)
- padapter->intf_start(padapter);
+ rtw_hal_inirp_init(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
padapter->bup = true;
}
@@ -630,7 +628,7 @@ netdev_open_error:
return -1;
}
-static int netdev_open(struct net_device *pnetdev)
+int netdev_open(struct net_device *pnetdev)
{
int ret;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -642,7 +640,7 @@ static int netdev_open(struct net_device *pnetdev)
return ret;
}
-static int ips_netdrv_open(struct adapter *padapter)
+int ips_netdrv_open(struct adapter *padapter)
{
int status = _SUCCESS;
@@ -658,8 +656,7 @@ static int ips_netdrv_open(struct adapter *padapter)
goto netdev_open_error;
}
- if (padapter->intf_start)
- padapter->intf_start(padapter);
+ rtw_hal_inirp_init(padapter);
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
@@ -684,7 +681,7 @@ int rtw_ips_pwr_up(struct adapter *padapter)
result = ips_netdrv_open(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
+ LedControl8188eu(padapter, LED_CTL_NO_LINK);
DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n",
jiffies_to_msecs(jiffies - start_time));
@@ -699,7 +696,7 @@ void rtw_ips_pwr_down(struct adapter *padapter)
padapter->net_closed = true;
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ LedControl8188eu(padapter, LED_CTL_POWER_OFF);
rtw_ips_dev_unload(padapter);
DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n",
@@ -712,25 +709,13 @@ void rtw_ips_dev_unload(struct adapter *padapter)
rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
- if (padapter->intf_stop)
- padapter->intf_stop(padapter);
+ usb_intf_stop(padapter);
/* s5. */
if (!padapter->bSurpriseRemoved)
rtw_hal_deinit(padapter);
}
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
-{
- int status;
-
- if (bnormal)
- status = netdev_open(pnetdev);
- else
- status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
- return status;
-}
-
static int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -763,7 +748,7 @@ static int netdev_close(struct net_device *pnetdev)
/* s2-4. */
rtw_free_network_queue(padapter, true);
/* Close LED */
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ LedControl8188eu(padapter, LED_CTL_POWER_OFF);
}
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 7cd2655f27fe..6ff836f481da 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -12,8 +12,6 @@
* more details.
*
******************************************************************************/
-
-
#define _OSDEP_SERVICE_C_
#include <osdep_service.h>
@@ -24,9 +22,10 @@
#include <rtw_ioctl_set.h>
/*
-* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
-* @return: one of RTW_STATUS_CODE
-*/
+ * Translate the OS dependent @param error_code to OS independent
+ * RTW_STATUS_CODE
+ * @return: one of RTW_STATUS_CODE
+ */
inline int RTW_STATUS_CODE(int error_code)
{
if (error_code >= 0)
@@ -43,22 +42,20 @@ void *rtw_malloc2d(int h, int w, int size)
{
int j;
- void **a = kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL);
- if (!a) {
- pr_info("%s: alloc memory fail!\n", __func__);
- return NULL;
- }
+ void **a = kzalloc(h * sizeof(void *) + h * w * size, GFP_KERNEL);
+ if (!a)
+ goto out;
for (j = 0; j < h; j++)
- a[j] = ((char *)(a+h)) + j*w*size;
-
+ a[j] = ((char *)(a + h)) + j * w * size;
+out:
return a;
}
-void _rtw_init_queue(struct __queue *pqueue)
+void _rtw_init_queue(struct __queue *pqueue)
{
- INIT_LIST_HEAD(&(pqueue->queue));
- spin_lock_init(&(pqueue->lock));
+ INIT_LIST_HEAD(&pqueue->queue);
+ spin_lock_init(&pqueue->lock);
}
struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv)
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 103cdb4ed073..b85824ec5354 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -21,12 +21,6 @@
#include <osdep_intf.h>
#include <usb_ops_linux.h>
-/* alloc os related resource in struct recv_frame */
-void rtw_os_recv_resource_alloc(struct recv_frame *precvframe)
-{
- precvframe->pkt = NULL;
-}
-
/* alloc os related resource in struct recv_buf */
int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
struct recv_buf *precvbuf)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 68e1e6bbe87f..c6316ffa64d3 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -141,16 +141,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
}
-static void usb_intf_start(struct adapter *padapter)
-{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n"));
-
- rtw_hal_inirp_init(padapter);
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n"));
-}
-
-static void usb_intf_stop(struct adapter *padapter)
+void usb_intf_stop(struct adapter *padapter)
{
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
@@ -183,8 +174,7 @@ static void rtw_dev_unload(struct adapter *padapter)
if (padapter->xmitpriv.ack_tx)
rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
/* s3. */
- if (padapter->intf_stop)
- padapter->intf_stop(padapter);
+ usb_intf_stop(padapter);
/* s4. */
if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
rtw_stop_drv_threads(padapter);
@@ -294,7 +284,7 @@ static int rtw_resume_process(struct adapter *padapter)
pwrpriv->bkeepfwalive = false;
pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open(pnetdev, true) != 0) {
+ if (netdev_open(pnetdev) != 0) {
mutex_unlock(&pwrpriv->mutex_lock);
goto exit;
}
@@ -366,9 +356,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
if (!padapter->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
- padapter->intf_start = &usb_intf_start;
- padapter->intf_stop = &usb_intf_stop;
-
/* step read_chip_version */
rtw_hal_read_chip_version(padapter);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index d0d591501b73..e2dbe1b4afd3 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -167,27 +167,26 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
}
if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
if (pattrib->physt)
- update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status);
+ update_recvframe_phyinfo_88e(precvframe, pphy_status);
if (rtw_recv_entry(precvframe) != _SUCCESS) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n"));
}
- } else {
- /* enqueue recvframe to txrtp queue */
- if (pattrib->pkt_rpt_type == TX_REPORT1) {
- /* CCX-TXRPT ack for xmit mgmt frames. */
- handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
- } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
- ODM_RA_TxRPT2Handle_8188E(
- &haldata->odmpriv,
- precvframe->rx_data,
- pattrib->pkt_len,
- pattrib->MacIDValidEntry[0],
- pattrib->MacIDValidEntry[1]
- );
- } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
- interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
- }
+ } else if (pattrib->pkt_rpt_type == TX_REPORT1) {
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
+ ODM_RA_TxRPT2Handle_8188E(
+ &haldata->odmpriv,
+ precvframe->rx_data,
+ pattrib->pkt_len,
+ pattrib->MacIDValidEntry[0],
+ pattrib->MacIDValidEntry[1]
+ );
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
+ interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
pkt_cnt--;
@@ -253,7 +252,7 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
/* Acquire IO memory for vendorreq */
pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC);
- if (pIo_buf == NULL) {
+ if (!pIo_buf) {
DBG_88E("[%s] pIo_buf == NULL\n", __func__);
status = -ENOMEM;
goto release_mutex;
@@ -384,8 +383,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n"));
- precvpriv->rx_pending_cnt--;
-
if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
@@ -403,7 +400,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n"));
precvbuf->reuse = true;
- usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
} else {
skb_put(precvbuf->pskb, purb->actual_length);
@@ -414,7 +411,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
}
} else {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status));
@@ -437,7 +434,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
case -EOVERFLOW:
adapt->HalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
precvbuf->reuse = true;
- usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
break;
case -EINPROGRESS:
DBG_88E("ERROR: URB IS IN PROGRESS!\n");
@@ -448,17 +445,14 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
}
}
-u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
+u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
{
struct urb *purb = NULL;
- struct recv_buf *precvbuf = (struct recv_buf *)rmem;
struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
struct recv_priv *precvpriv = &adapter->recvpriv;
struct usb_device *pusbd = pdvobj->pusbdev;
int err;
unsigned int pipe;
- size_t tmpaddr = 0;
- size_t alignment = 0;
u32 ret = _SUCCESS;
@@ -483,22 +477,16 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
/* re-assign for linux based on skb */
if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
- precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ);
if (precvbuf->pskb == NULL) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
return _FAIL;
}
-
- tmpaddr = (size_t)precvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
} else { /* reuse skb */
precvbuf->reuse = false;
}
- precvpriv->rx_pending_cnt++;
-
purb = precvbuf->purb;
/* translate DMA FIFO addr to pipehandle */
@@ -528,7 +516,7 @@ void rtw_hal_inirp_deinit(struct adapter *padapter)
int i;
struct recv_buf *precvbuf;
- precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
+ precvbuf = padapter->recvpriv.precv_buf;
DBG_88E("%s\n", __func__);
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 4b1b04e00715..e097c619ed1b 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -59,11 +59,6 @@ uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
return len;
}
-int rtw_endofpktfile(struct pkt_file *pfile)
-{
- return pfile->pkt_len == 0;
-}
-
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
{
int i;
@@ -85,8 +80,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
return _SUCCESS;
}
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz)
+void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf)
{
int i;
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index cb18db74d78c..7101fcc8871b 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -17,5 +17,3 @@ obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o
obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o
obj-$(CONFIG_RTL8192E) += rtl8192e/
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 25725b158eca..017fe04ebe2d 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include "dot11d.h"
struct channel_list {
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
index a2c4fb4ba1af..176a4a2b8b20 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -16,5 +16,3 @@ r8192e_pci-objs := \
rtl_wx.o \
obj-$(CONFIG_RTL8192E) += r8192e_pci.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index f9003a28cae2..757ffd4f2f89 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -49,7 +49,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
else
skb = dev_alloc_skb(frag_length + 4);
- if (skb == NULL) {
+ if (!skb) {
rt_status = false;
goto Failed;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 9aaa85526eb8..bbe399010be1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -202,7 +202,5 @@ bool rtl92e_init_fw(struct net_device *dev)
download_firmware_fail:
netdev_err(dev, "%s: Failed to initialize firmware.\n", __func__);
- rt_status = false;
- return rt_status;
-
+ return false;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 4c30eea45f89..8a9172aa8178 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -367,7 +367,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
}
}
-static struct rtllib_qos_parameters def_qos_parameters = {
+static const struct rtllib_qos_parameters def_qos_parameters = {
{cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
{cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},
@@ -2545,7 +2545,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_set_rx_mode = _rtl92e_set_multicast,
.ndo_set_mac_address = _rtl92e_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_start_xmit = rtllib_xmit,
};
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index c7fd1b1653d6..20260af49ee7 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/etherdevice.h>
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index dd9c0c868361..cded0f43cd33 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include "rtllib.h"
#include "rtl819x_HT.h"
u8 MCS_FILTER_ALL[16] = {
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index a966a8e490ab..48bbd9e8a52f 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
#include "rtllib.h"
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index c743182b933e..e5ba7d1a809f 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -130,7 +130,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
ETH_ALEN /* WDS */ +
/* QOS Control */
(RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0));
- if (skb == NULL)
+ if (!skb)
return NULL;
entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -986,7 +986,7 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
ether_addr_copy(src, hdr->addr4);
ether_addr_copy(bssid, ieee->current_network.bssid);
break;
- case 0:
+ default:
ether_addr_copy(dst, hdr->addr1);
ether_addr_copy(src, hdr->addr2);
ether_addr_copy(bssid, hdr->addr3);
@@ -1201,6 +1201,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (crypt && !(fc & RTLLIB_FCTL_WEP) &&
rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
struct eapol *eap = (struct eapol *)(skb->data + 24);
+
netdev_dbg(ieee->dev, "RX: IEEE 802.1X EAPOL frame: %s\n",
eap_get_type(eap->type));
}
@@ -1430,7 +1431,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC);
- if (rxb == NULL)
+ if (!rxb)
goto rx_dropped;
/* to parse amsdu packets */
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index da74dc49b95e..1430ba27b049 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1524,6 +1524,7 @@ static void rtllib_associate_complete_wq(void *data)
struct rtllib_device,
associate_complete_wq);
struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl);
+
netdev_info(ieee->dev, "Associated successfully\n");
if (!ieee->is_silent_reset) {
netdev_info(ieee->dev, "normal associate\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 6fa96d57d316..e68850897adf 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -553,7 +553,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
break;
- case 0:
+ default:
memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
break;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 89cbc077a48d..82f654305414 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -129,7 +129,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
8 /* WEP */ +
ETH_ALEN /* WDS */ +
(IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */);
- if (skb == NULL)
+ if (!skb)
return NULL;
entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -1079,7 +1079,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
memcpy(src, hdr->addr4, ETH_ALEN);
memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
break;
- case 0:
+ default:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
memcpy(bssid, hdr->addr3, ETH_ALEN);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 457eeb5f5239..fdb03dccb449 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -4930,7 +4930,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_set_rx_mode = r8192_set_multicast,
.ndo_set_mac_address = r8192_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_start_xmit = ieee80211_xmit,
};
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index c9ea50daffff..b8a170978434 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -63,15 +63,6 @@ static inline u32 end_of_queue_search(struct list_head *head,
return (head == plist);
}
-static inline void sleep_schedulable(int ms)
-{
- u32 delta;
-
- delta = msecs_to_jiffies(ms);/*(ms)*/
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(delta);
-}
-
static inline void flush_signals_thread(void)
{
if (signal_pending(current))
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index 57d5d2db3c77..84456bb560ef 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -68,14 +68,14 @@ struct fw_priv { /*8-bytes alignment required*/
unsigned char signature_0; /*0x12: CE product, 0x92: IT product*/
unsigned char signature_1; /*0x87: CE product, 0x81: IT product*/
unsigned char hci_sel; /*0x81: PCI-AP, 01:PCIe, 02: 92S-U, 0x82: USB-AP,
- * 0x12: 72S-U, 03:SDIO
- */
+ * 0x12: 72S-U, 03:SDIO
+ */
unsigned char chip_version; /*the same value as register value*/
unsigned char customer_ID_0; /*customer ID low byte*/
unsigned char customer_ID_1; /*customer ID high byte*/
unsigned char rf_config; /*0x11: 1T1R, 0x12: 1T2R, 0x92: 1T2R turbo,
- * 0x22: 2T2R
- */
+ * 0x22: 2T2R
+ */
unsigned char usb_ep_num; /* 4: 4EP, 6: 6EP, 11: 11EP*/
/*--- long word 1 ----*/
unsigned char regulatory_class_0; /*regulatory class bit map 0*/
@@ -99,8 +99,8 @@ struct fw_priv { /*8-bytes alignment required*/
unsigned char qos_en; /*1: QoS enable*/
unsigned char bw_40MHz_en; /*1: 40MHz BW enable*/
unsigned char AMSDU2AMPDU_en; /*1: 4181 convert AMSDU to AMPDU,
- * 0: disable
- */
+ * 0: disable
+ */
unsigned char AMPDU_en; /*1: 11n AMPDU enable*/
unsigned char rate_control_offload; /*1: FW offloads,0: driver handles*/
unsigned char aggregation_offload; /*1: FW offloads,0: driver handles*/
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index a8e237e480c9..317aeeed38e8 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -355,7 +355,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
}
pLed->bLedScanBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -390,7 +390,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->BlinkTimes = 0;
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -460,7 +460,7 @@ static void SwLedBlink2(struct LED_871x *pLed)
}
pLed->bLedScanBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -667,7 +667,7 @@ static void SwLedBlink4(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -764,7 +764,7 @@ static void SwLedBlink5(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->bLedOn)
pLed->BlinkingLedState = LED_STATE_OFF;
else
pLed->BlinkingLedState = LED_STATE_ON;
@@ -946,7 +946,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
if (psitesurveyctrl->traffic_busy &&
check_fwstate(pmlmepriv, _FW_LINKED))
; /* dummy branch */
- else if (!pLed->bLedScanBlinkInProgress) {
+ else if (!pLed->bLedScanBlinkInProgress) {
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
@@ -970,7 +970,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_TX:
case LED_CTL_RX:
@@ -1000,7 +1000,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
case LED_CTL_START_WPS: /*wait until xinpin finish */
case LED_CTL_START_WPS_BOTTON:
- if (!pLed->bLedWPSBlinkInProgress) {
+ if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
@@ -1113,9 +1113,9 @@ static void SwLedControlMode2(struct _adapter *padapter,
switch (LedAction) {
case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->sitesurveyctrl.traffic_busy)
+ if (pmlmepriv->sitesurveyctrl.traffic_busy)
; /* dummy branch */
- else if (!pLed->bLedScanBlinkInProgress) {
+ else if (!pLed->bLedScanBlinkInProgress) {
if (IS_LED_WPS_BLINKING(pLed))
return;
@@ -1132,7 +1132,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
+ }
break;
case LED_CTL_TX:
@@ -1186,7 +1186,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
- }
+ }
break;
case LED_CTL_STOP_WPS:
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index b7ee5e63af33..04638f1e4e88 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -72,8 +72,11 @@ static sint _init_cmd_priv(struct cmd_priv *pcmdpriv)
((addr_t)(pcmdpriv->cmd_allocated_buf) &
(CMDBUFF_ALIGN_SZ - 1));
pcmdpriv->rsp_allocated_buf = kmalloc(MAX_RSPSZ + 4, GFP_ATOMIC);
- if (!pcmdpriv->rsp_allocated_buf)
+ if (!pcmdpriv->rsp_allocated_buf) {
+ kfree(pcmdpriv->cmd_allocated_buf);
+ pcmdpriv->cmd_allocated_buf = NULL;
return _FAIL;
+ }
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 -
((addr_t)(pcmdpriv->rsp_allocated_buf) & 3);
pcmdpriv->cmd_issued_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 475e7904fe45..590acb5aea3d 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -588,9 +588,9 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
cnt += buf[cnt + 1] + 2;
break;
- } else {
- cnt += buf[cnt + 1] + 2;
}
+
+ cnt += buf[cnt + 1] + 2;
}
}
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 0aaf2aab6dd0..01a150446f5a 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -139,9 +139,10 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid)
if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid,
ETH_ALEN)) {
if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- goto _Abort_Set_BSSID; /* driver is in
- * WIFI_ADHOC_MASTER_STATE
- */
+ /* driver is in
+ * WIFI_ADHOC_MASTER_STATE
+ */
+ goto _Abort_Set_BSSID;
} else {
r8712_disassoc_cmd(padapter);
if (check_fwstate(pmlmepriv, _FW_LINKED))
@@ -203,9 +204,10 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
WIFI_ADHOC_STATE);
}
} else {
- goto _Abort_Set_SSID; /* driver is in
- * WIFI_ADHOC_MASTER_STATE
- */
+ /* driver is in
+ * WIFI_ADHOC_MASTER_STATE
+ */
+ goto _Abort_Set_SSID;
}
}
} else {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index c1feef3da26c..35cbdc71cad4 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -137,11 +137,10 @@ static void free_network_nolock(struct mlme_priv *pmlmepriv,
}
-/*
- return the wlan_network with the matching addr
- Shall be called under atomic context...
- to avoid possible racing condition...
-*/
+/* return the wlan_network with the matching addr
+ * Shall be called under atomic context...
+ * to avoid possible racing condition...
+ */
static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue,
u8 *addr)
{
@@ -239,11 +238,10 @@ void r8712_free_network_queue(struct _adapter *dev)
}
/*
- return the wlan_network with the matching addr
-
- Shall be called under atomic context...
- to avoid possible racing condition...
-*/
+ * return the wlan_network with the matching addr
+ * Shall be called under atomic context...
+ * to avoid possible racing condition...
+ */
static struct wlan_network *r8712_find_network(struct __queue *scanned_queue,
u8 *addr)
{
@@ -369,9 +367,7 @@ static void update_current_network(struct _adapter *adapter,
}
}
-/*
-Caller must hold pmlmepriv->lock first.
-*/
+/* Caller must hold pmlmepriv->lock first */
static void update_scanned_network(struct _adapter *adapter,
struct wlan_bssid_ex *target)
{
@@ -651,8 +647,8 @@ void r8712_free_assoc_resources(struct _adapter *adapter)
}
/*
-*r8712_indicate_connect: the caller has to lock pmlmepriv->lock
-*/
+ * r8712_indicate_connect: the caller has to lock pmlmepriv->lock
+ */
void r8712_indicate_connect(struct _adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -668,8 +664,8 @@ void r8712_indicate_connect(struct _adapter *padapter)
/*
-*r8712_ind_disconnect: the caller has to lock pmlmepriv->lock
-*/
+ * r8712_ind_disconnect: the caller has to lock pmlmepriv->lock
+ */
void r8712_ind_disconnect(struct _adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1347,8 +1343,8 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
(!memcmp(psecuritypriv->PMKIDList[i].Bssid,
bssid, ETH_ALEN)))
break;
- else
- i++;
+ i++;
+
} while (i < NUM_PMKID_CACHE);
if (i == NUM_PMKID_CACHE) {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index ddaaab058b2f..53a23234c598 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -162,24 +162,6 @@ static inline void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
}
-static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
-{
- unsigned long irqL;
-
- spin_lock_irqsave(&pmlmepriv->lock, irqL);
- pmlmepriv->num_of_scanned++;
- spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
-}
-
-static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
-{
- unsigned long irqL;
-
- spin_lock_irqsave(&pmlmepriv->lock, irqL);
- pmlmepriv->num_of_scanned--;
- spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
-}
-
static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv,
sint val)
{
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index d464c136dd98..e42fc1404c35 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -190,19 +190,15 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
}
/*
-Caller: r8712_cmd_thread
-
-Check if the fw_pwrstate is okay for issuing cmd.
-If not (cpwm should be is less than P2 state), then the sub-routine
-will raise the cpwm to be greater than or equal to P2.
-
-Calling Context: Passive
-
-Return Value:
-
-_SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
-_FAIL: r8712_cmd_thread can not do anything.
-*/
+ * Caller: r8712_cmd_thread
+ * Check if the fw_pwrstate is okay for issuing cmd.
+ * If not (cpwm should be is less than P2 state), then the sub-routine
+ * will raise the cpwm to be greater than or equal to P2.
+ * Calling Context: Passive
+ * Return Value:
+ * _SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
+ * _FAIL: r8712_cmd_thread can not do anything.
+ */
sint r8712_register_cmd_alive(struct _adapter *padapter)
{
uint res = _SUCCESS;
@@ -219,13 +215,11 @@ sint r8712_register_cmd_alive(struct _adapter *padapter)
}
/*
-Caller: ISR
-
-If ISR's txdone,
-No more pkts for TX,
-Then driver shall call this fun. to power down firmware again.
-*/
-
+ * Caller: ISR
+ * If ISR's txdone,
+ * No more pkts for TX,
+ * Then driver shall call this fun. to power down firmware again.
+ */
void r8712_unregister_cmd_alive(struct _adapter *padapter)
{
struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index cbd2e51ba42b..35c721a50598 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -125,13 +125,10 @@ union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
}
/*
-caller : defrag; recvframe_chk_defrag in recv_thread (passive)
-pframequeue: defrag_queue : will be accessed in recv_thread (passive)
-
-using spin_lock to protect
-
-*/
-
+ * caller : defrag; recvframe_chk_defrag in recv_thread (passive)
+ * pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+ * using spin_lock to protect
+ */
void r8712_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue)
{
@@ -405,7 +402,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
}
/* filter packets that SA is myself or multicast or broadcast */
- if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN))
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN))
return _FAIL;
/* da should be for me */
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 09242425dad4..a7f04a4b089d 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -159,8 +159,8 @@ static u32 getcrc32(u8 *buf, u32 len)
}
/*
- Need to consider the fragment situation
-*/
+ * Need to consider the fragment situation
+ */
void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
unsigned char crc[4];
@@ -467,22 +467,22 @@ static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */
};
/*
-**********************************************************************
-* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
-*
-* Inputs:
-* tk[] = temporal key [128 bits]
-* ta[] = transmitter's MAC address [ 48 bits]
-* iv32 = upper 32 bits of IV [ 32 bits]
-* Output:
-* p1k[] = Phase 1 key [ 80 bits]
-*
-* Note:
-* This function only needs to be called every 2**16 packets,
-* although in theory it could be called every packet.
-*
-**********************************************************************
-*/
+ **********************************************************************
+ * Routine: Phase 1 -- generate P1K, given TA, TK, IV32
+ *
+ * Inputs:
+ * tk[] = temporal key [128 bits]
+ * ta[] = transmitter's MAC address [ 48 bits]
+ * iv32 = upper 32 bits of IV [ 32 bits]
+ * Output:
+ * p1k[] = Phase 1 key [ 80 bits]
+ *
+ * Note:
+ * This function only needs to be called every 2**16 packets,
+ * although in theory it could be called every packet.
+ *
+ **********************************************************************
+ */
static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
{
sint i;
@@ -506,28 +506,28 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
}
/*
-**********************************************************************
-* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
-*
-* Inputs:
-* tk[] = Temporal key [128 bits]
-* p1k[] = Phase 1 output key [ 80 bits]
-* iv16 = low 16 bits of IV counter [ 16 bits]
-* Output:
-* rc4key[] = the key used to encrypt the packet [128 bits]
-*
-* Note:
-* The value {TA,IV32,IV16} for Phase1/Phase2 must be unique
-* across all packets using the same key TK value. Then, for a
-* given value of TK[], this TKIP48 construction guarantees that
-* the final RC4KEY value is unique across all packets.
-*
-* Suggested implementation optimization: if PPK[] is "overlaid"
-* appropriately on RC4KEY[], there is no need for the final
-* for loop below that copies the PPK[] result into RC4KEY[].
-*
-**********************************************************************
-*/
+ **********************************************************************
+ * Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
+ *
+ * Inputs:
+ * tk[] = Temporal key [128 bits]
+ * p1k[] = Phase 1 output key [ 80 bits]
+ * iv16 = low 16 bits of IV counter [ 16 bits]
+ * Output:
+ * rc4key[] = the key used to encrypt the packet [128 bits]
+ *
+ * Note:
+ * The value {TA,IV32,IV16} for Phase1/Phase2 must be unique
+ * across all packets using the same key TK value. Then, for a
+ * given value of TK[], this TKIP48 construction guarantees that
+ * the final RC4KEY value is unique across all packets.
+ *
+ * Suggested implementation optimization: if PPK[] is "overlaid"
+ * appropriately on RC4KEY[], there is no need for the final
+ * for loop below that copies the PPK[] result into RC4KEY[].
+ *
+ **********************************************************************
+ */
static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
{
sint i;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index be38364c8a7c..4ab82ba9bb3f 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
spin_lock_init(&pxmitpriv->lock);
/*
- Please insert all the queue initialization using _init_queue below
- */
+ *Please insert all the queue initialization using _init_queue below
+ */
pxmitpriv->adapter = padapter;
_init_queue(&pxmitpriv->be_pending);
_init_queue(&pxmitpriv->bk_pending);
@@ -83,10 +83,10 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_init_queue(&pxmitpriv->apsd_queue);
_init_queue(&pxmitpriv->free_xmit_queue);
/*
- Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
- and initialize free_xmit_frame below.
- Please also apply free_txobj to link_up all the xmit_frames...
- */
+ * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ * and initialize free_xmit_frame below.
+ * Please also apply free_txobj to link_up all the xmit_frames...
+ */
pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
GFP_ATOMIC);
if (!pxmitpriv->pallocated_frame_buf) {
@@ -109,8 +109,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
}
pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
/*
- init xmit hw_txqueue
- */
+ * init xmit hw_txqueue
+ */
_r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX);
@@ -128,8 +128,11 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_init_queue(&pxmitpriv->pending_xmitbuf_queue);
pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4,
GFP_ATOMIC);
- if (!pxmitpriv->pallocated_xmitbuf)
+ if (!pxmitpriv->pallocated_xmitbuf) {
+ kfree(pxmitpriv->pallocated_frame_buf);
+ pxmitpriv->pallocated_frame_buf = NULL;
return _FAIL;
+ }
pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
@@ -777,24 +780,23 @@ int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
}
/*
-Calling context:
-1. OS_TXENTRY
-2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
-
-If we turn on USE_RXTHREAD, then, no need for critical section.
-Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
-
-Must be very very cautious...
-
-*/
-
+ * Calling context:
+ * 1. OS_TXENTRY
+ * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+ *
+ * If we turn on USE_RXTHREAD, then, no need for critical section.
+ * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+ *
+ * Must be very very cautious...
+ *
+ */
struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv)
{
/*
- Please remember to use all the osdep_service api,
- and lock/unlock or _enter/_exit critical to protect
- pfree_xmit_queue
- */
+ * Please remember to use all the osdep_service api,
+ * and lock/unlock or _enter/_exit critical to protect
+ * pfree_xmit_queue
+ */
unsigned long irqL;
struct xmit_frame *pxframe;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index d899d0c6d3a6..40927277f498 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -261,12 +261,6 @@ struct xmit_priv {
uint free_xmitbuf_cnt;
};
-static inline struct __queue *get_free_xmit_queue(
- struct xmit_priv *pxmitpriv)
-{
- return &(pxmitpriv->free_xmit_queue);
-}
-
int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv,
struct xmit_buf *pxmitbuf);
struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index f27df0b4cb44..28d56c5d1449 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -432,31 +432,36 @@ static int ms_pull_ctl_disable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
- MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD);
+ MS_D1_PD | MS_D2_PD | MS_CLK_PD |
+ MS_D6_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
- MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD);
+ MS_D3_PD | MS_D0_PD | MS_BS_PD |
+ XD_D4_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
- MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD |
+ XD_CD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD |
+ XD_ALE_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU |
+ SD_CMD_PD);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -507,17 +512,17 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
+ MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
+ MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
@@ -616,14 +621,20 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = rtsx_write_register(chip, MS_CFG, 0xFF,
- SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ SAMPLE_TIME_RISING |
+ PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE |
+ MS_BUS_WIDTH_1);
if (retval) {
rtsx_trace(chip);
return retval;
}
} else {
retval = rtsx_write_register(chip, MS_CFG, 0xFF,
- SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+ SAMPLE_TIME_FALLING |
+ PUSH_TIME_DEFAULT |
+ NO_EXTEND_TOGGLE |
+ MS_BUS_WIDTH_1);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -665,7 +676,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG,
- 6, NO_WAIT_INT);
+ 6, NO_WAIT_INT);
if (retval == STATUS_SUCCESS)
break;
}
@@ -765,7 +776,7 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_read_bytes(chip, GET_INT, 1,
- NO_WAIT_INT, &val, 1);
+ NO_WAIT_INT, &val, 1);
if (retval == STATUS_SUCCESS)
break;
}
@@ -794,9 +805,9 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
}
if (val & INT_REG_ERR) {
- if (val & INT_REG_CMDNK)
+ if (val & INT_REG_CMDNK) {
chip->card_wp |= (MS_CARD);
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -861,7 +872,7 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip)
for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
- 1, NO_WAIT_INT);
+ 1, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1061,8 +1072,8 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
return STATUS_FAIL;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
- PRO_READ_LONG_DATA, 0x40, WAIT_INT,
- 0, 0, buf, 64 * 512);
+ PRO_READ_LONG_DATA, 0x40, WAIT_INT,
+ 0, 0, buf, 64 * 512);
if (retval == STATUS_SUCCESS)
break;
@@ -1087,7 +1098,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
break;
retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
- PRO_READ_LONG_DATA, 0, WAIT_INT);
+ PRO_READ_LONG_DATA, 0, WAIT_INT);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
@@ -1121,7 +1132,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
#ifdef SUPPORT_MSXC
if ((buf[cur_addr_off + 8] == 0x10) ||
- (buf[cur_addr_off + 8] == 0x13)) {
+ (buf[cur_addr_off + 8] == 0x13)) {
#else
if (buf[cur_addr_off + 8] == 0x10) {
#endif
@@ -1264,7 +1275,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
if (device_type != 0x00) {
if ((device_type == 0x01) || (device_type == 0x02) ||
- (device_type == 0x03)) {
+ (device_type == 0x03)) {
chip->card_wp |= MS_CARD;
} else {
rtsx_trace(chip);
@@ -1298,7 +1309,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
#ifdef SUPPORT_MAGIC_GATE
static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
- int type, u8 mg_entry_num);
+ int type, u8 mg_entry_num);
#endif
static int reset_ms_pro(struct rtsx_chip *chip)
@@ -1317,7 +1328,7 @@ static int reset_ms_pro(struct rtsx_chip *chip)
#endif
#ifdef XC_POWERCLASS
-Retry:
+retry:
#endif
retval = ms_pro_reset_flow(chip, 1);
if (retval != STATUS_SUCCESS) {
@@ -1365,10 +1376,10 @@ Retry:
change_power_class = power_class_mode;
if (change_power_class) {
retval = msxc_change_power(chip,
- change_power_class);
+ change_power_class);
if (retval != STATUS_SUCCESS) {
change_power_class--;
- goto Retry;
+ goto retry;
}
}
}
@@ -1418,14 +1429,14 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
}
static int ms_read_extra_data(struct rtsx_chip *chip,
- u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+ u16 block_addr, u8 page_num, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, data[10];
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1488,7 +1499,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 6);
+ MS_EXTRA_SIZE, SystemParm,
+ 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1497,7 +1509,7 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
}
retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
- data, MS_EXTRA_SIZE);
+ data, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1512,8 +1524,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
-static int ms_write_extra_data(struct rtsx_chip *chip,
- u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
+ u8 page_num, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -1525,7 +1537,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6 + MS_EXTRA_SIZE);
+ SystemParm, 6 + MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1588,7 +1600,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
u8 val, data[6];
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1651,7 +1663,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
}
retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
- 0, NO_WAIT_INT);
+ 0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1678,7 +1690,7 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ SystemParm, 7);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1742,7 +1754,7 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
u8 val, data[6];
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1844,7 +1856,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
}
retval = ms_write_extra_data(chip, phy_blk, i,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1855,7 +1867,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
}
static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
- u16 log_blk, u8 start_page, u8 end_page)
+ u16 log_blk, u8 start_page, u8 end_page)
{
struct ms_info *ms_card = &chip->ms_card;
bool uncorrect_flag = false;
@@ -1915,7 +1927,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 6);
+ MS_EXTRA_SIZE, SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1971,9 +1983,9 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
retval = ms_transfer_tpc(chip,
- MS_TM_NORMAL_READ,
- READ_PAGE_DATA,
- 0, NO_WAIT_INT);
+ MS_TM_NORMAL_READ,
+ READ_PAGE_DATA,
+ 0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1981,20 +1993,24 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (uncorrect_flag) {
ms_set_page_status(log_blk, setPS_NG,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
if (i == 0)
extra[0] &= 0xEF;
ms_write_extra_data(chip, old_blk, i,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
dev_dbg(rtsx_dev(chip), "page %d : extra[0] = 0x%x\n",
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
ms_set_page_status(log_blk, setPS_Error,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
ms_write_extra_data(chip, new_blk, i,
- extra, MS_EXTRA_SIZE);
+ extra,
+ MS_EXTRA_SIZE);
continue;
}
@@ -2021,8 +2037,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+ SystemParm, (6 + MS_EXTRA_SIZE));
ms_set_err_code(chip, MS_NO_ERROR);
@@ -2085,7 +2101,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (i == 0) {
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 7);
+ MS_EXTRA_SIZE, SystemParm,
+ 7);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2121,7 +2138,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1,
- NO_WAIT_INT, &val, 1);
+ NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2361,7 +2378,7 @@ RE_SEARCH:
}
retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
- NO_WAIT_INT);
+ NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2369,7 +2386,9 @@ RE_SEARCH:
retval = rtsx_write_register(chip, MS_CFG,
0x58 | MS_NO_CHECK_INT,
- MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT);
+ MS_BUS_WIDTH_4 |
+ PUSH_TIME_ODD |
+ MS_NO_CHECK_INT);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -2474,7 +2493,7 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
}
static void ms_set_l2p_tbl(struct rtsx_chip *chip,
- int seg_no, u16 log_off, u16 phy_blk)
+ int seg_no, u16 log_off, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
@@ -2530,7 +2549,7 @@ static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
7934};
static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
- u16 log_off, u8 us1, u8 us2)
+ u16 log_off, u8 us1, u8 us2)
{
struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
@@ -2627,7 +2646,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
disable_cnt = segment->disable_count;
- segment->get_index = segment->set_index = 0;
+ segment->get_index = 0;
+ segment->set_index = 0;
segment->unused_blk_cnt = 0;
for (phy_blk = start; phy_blk < end; phy_blk++) {
@@ -2646,7 +2666,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
retval = ms_read_extra_data(chip, phy_blk, 0,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
dev_dbg(rtsx_dev(chip), "read extra data fail\n");
ms_set_bad_block(chip, phy_blk);
@@ -2685,7 +2705,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
if ((log_blk < ms_start_idx[seg_no]) ||
- (log_blk >= ms_start_idx[seg_no + 1])) {
+ (log_blk >= ms_start_idx[seg_no + 1])) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
@@ -2705,7 +2725,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
us1 = extra[0] & 0x10;
tmp_blk = segment->l2p_table[idx];
retval = ms_read_extra_data(chip, tmp_blk, 0,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
continue;
us2 = extra[0] & 0x10;
@@ -2774,7 +2794,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
phy_blk = ms_get_unused_block(chip, 0);
retval = ms_copy_page(chip, tmp_blk, phy_blk,
- log_blk, 0, ms_card->page_off + 1);
+ log_blk, 0,
+ ms_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2861,7 +2882,7 @@ int reset_ms_card(struct rtsx_chip *chip)
}
static int mspro_set_rw_cmd(struct rtsx_chip *chip,
- u32 start_sec, u16 sec_cnt, u8 cmd)
+ u32 start_sec, u16 sec_cnt, u8 cmd)
{
int retval, i;
u8 data[8];
@@ -2932,8 +2953,8 @@ static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
}
static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
- struct rtsx_chip *chip, u32 start_sector,
- u16 sector_cnt)
+ struct rtsx_chip *chip, u32 start_sector,
+ u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
bool mode_2k = false;
@@ -2992,12 +3013,13 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
}
if (ms_card->seq_mode) {
- if ((ms_card->pre_dir != srb->sc_data_direction)
- || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector)
- || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ))
- || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ))
- || !(val & MS_INT_BREQ)
- || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
+ if ((ms_card->pre_dir != srb->sc_data_direction) ||
+ ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) !=
+ start_sector) ||
+ (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) ||
+ (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) ||
+ !(val & MS_INT_BREQ) ||
+ ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
ms_card->seq_mode = 0;
ms_card->total_sec_cnt = 0;
if (val & MS_INT_BREQ) {
@@ -3007,7 +3029,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
return STATUS_FAIL;
}
- rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+ rtsx_write_register(chip, RBCTL, RB_FLUSH,
+ RB_FLUSH);
}
}
}
@@ -3038,8 +3061,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
}
retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt,
- WAIT_INT, mode_2k, scsi_sg_count(srb),
- scsi_sglist(srb), scsi_bufflen(srb));
+ WAIT_INT, mode_2k, scsi_sg_count(srb),
+ scsi_sglist(srb), scsi_bufflen(srb));
if (retval != STATUS_SUCCESS) {
ms_card->seq_mode = 0;
rtsx_read_register(chip, MS_TRANS_CFG, &val);
@@ -3076,7 +3099,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
}
static int mspro_read_format_progress(struct rtsx_chip *chip,
- const int short_data_len)
+ const int short_data_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -3102,7 +3125,8 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
}
if (!(tmp & MS_INT_BREQ)) {
- if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) {
+ if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK |
+ MS_INT_ERR)) == MS_INT_CED) {
ms_card->format_status = FORMAT_SUCCESS;
return STATUS_SUCCESS;
}
@@ -3117,7 +3141,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
cnt = (u8)short_data_len;
retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT,
- MS_NO_CHECK_INT);
+ MS_NO_CHECK_INT);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
rtsx_trace(chip);
@@ -3125,7 +3149,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT,
- data, 8);
+ data, 8);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
rtsx_trace(chip);
@@ -3204,7 +3228,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
int i;
if (ms_card->pro_under_formatting &&
- (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
+ (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
rtsx_set_stat(chip, RTSX_STAT_RUN);
for (i = 0; i < 65535; i++) {
@@ -3216,7 +3240,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
}
int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- int short_data_len, bool quick_format)
+ int short_data_len, bool quick_format)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -3305,9 +3329,9 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
- u16 log_blk, u8 start_page, u8 end_page,
- u8 *buf, unsigned int *index,
- unsigned int *offset)
+ u16 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -3315,7 +3339,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
u8 *ptr;
retval = ms_read_extra_data(chip, phy_blk, start_page,
- extra, MS_EXTRA_SIZE);
+ extra, MS_EXTRA_SIZE);
if (retval == STATUS_SUCCESS) {
if ((extra[1] & 0x30) != 0x30) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
@@ -3325,7 +3349,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ SystemParm, 6);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3389,11 +3413,17 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (retval != STATUS_SUCCESS) {
if (!(chip->card_wp & MS_CARD)) {
reset_ms(chip);
- ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE);
- ms_write_extra_data(chip, phy_blk,
- page_addr, extra, MS_EXTRA_SIZE);
+ ms_set_page_status
+ (log_blk, setPS_NG,
+ extra,
+ MS_EXTRA_SIZE);
+ ms_write_extra_data
+ (chip, phy_blk,
+ page_addr, extra,
+ MS_EXTRA_SIZE);
}
- ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+ ms_set_err_code(chip,
+ MS_FLASH_READ_ERROR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3420,7 +3450,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
- &val, 1);
+ &val, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3441,23 +3471,24 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
- 0xFF, trans_cfg);
+ 0xFF, trans_cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, RING_BUFFER);
+ 0x01, RING_BUFFER);
trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
- MS_TRANSFER_START | MS_TM_NORMAL_READ);
+ MS_TRANSFER_START | MS_TM_NORMAL_READ);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
- retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
- 512, scsi_sg_count(chip->srb),
- index, offset, DMA_FROM_DEVICE,
- chip->ms_timeout);
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
+ scsi_sg_count(chip->srb),
+ index, offset,
+ DMA_FROM_DEVICE,
+ chip->ms_timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
@@ -3489,7 +3520,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
- u16 new_blk, u16 log_blk, u8 start_page,
+ u16 new_blk, u16 log_blk, u8 start_page,
u8 end_page, u8 *buf, unsigned int *index,
unsigned int *offset)
{
@@ -3500,7 +3531,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
if (!start_page) {
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ SystemParm, 7);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3534,7 +3565,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
- NO_WAIT_INT);
+ NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3542,7 +3573,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ SystemParm, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3630,25 +3661,26 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
- 0xFF, WRITE_PAGE_DATA);
+ 0xFF, WRITE_PAGE_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
- 0xFF, WAIT_INT);
+ 0xFF, WAIT_INT);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, RING_BUFFER);
+ 0x01, RING_BUFFER);
trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
- MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
- retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
- 512, scsi_sg_count(chip->srb),
- index, offset, DMA_TO_DEVICE,
- chip->ms_timeout);
+ retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
+ scsi_sg_count(chip->srb),
+ index, offset,
+ DMA_TO_DEVICE,
+ chip->ms_timeout);
if (retval < 0) {
ms_set_err_code(chip, MS_TO_ERROR);
rtsx_clear_ms_error(chip);
@@ -3677,7 +3709,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
if (page_addr == (end_page - 1)) {
if (!(val & INT_REG_CED)) {
retval = ms_send_cmd(chip, BLOCK_END,
- WAIT_INT);
+ WAIT_INT);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3685,7 +3717,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
retval = ms_read_bytes(chip, GET_INT, 1,
- NO_WAIT_INT, &val, 1);
+ NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3693,7 +3725,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
if ((page_addr == (end_page - 1)) ||
- (page_addr == ms_card->page_off)) {
+ (page_addr == ms_card->page_off)) {
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip,
MS_FLASH_WRITE_ERROR);
@@ -3711,13 +3743,13 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
}
static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
- u16 log_blk, u8 page_off)
+ u16 log_blk, u8 page_off)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, seg_no;
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
- page_off, ms_card->page_off + 1);
+ page_off, ms_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3740,13 +3772,13 @@ static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
- u16 log_blk, u8 start_page)
+ u16 log_blk, u8 start_page)
{
int retval;
if (start_page) {
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
- 0, start_page);
+ 0, start_page);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3772,7 +3804,7 @@ int ms_delay_write(struct rtsx_chip *chip)
delay_write->delay_write_flag = 0;
retval = ms_finish_write(chip,
- delay_write->old_phyblock,
+ delay_write->old_phyblock,
delay_write->new_phyblock,
delay_write->logblock,
delay_write->pageoff);
@@ -3790,13 +3822,13 @@ static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
}
static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt)
+ u32 start_sector, u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
@@ -3843,16 +3875,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef MS_DELAY_WRITE
if (delay_write->delay_write_flag &&
- (delay_write->logblock == log_blk) &&
- (start_page > delay_write->pageoff)) {
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
retval = ms_copy_page(chip,
- delay_write->old_phyblock,
- delay_write->new_phyblock, log_blk,
- delay_write->pageoff, start_page);
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ log_blk,
+ delay_write->pageoff, start_page);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3868,32 +3901,35 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = ms_delay_write(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
- old_blk = ms_get_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no]);
+ old_blk = ms_get_l2p_tbl
+ (chip, seg_no,
+ log_blk - ms_start_idx[seg_no]);
new_blk = ms_get_unused_block(chip, seg_no);
if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_prepare_write(chip, old_blk, new_blk,
- log_blk, start_page);
+ log_blk, start_page);
if (retval != STATUS_SUCCESS) {
- if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
- set_sense_type(chip, lun,
+ if (detect_card_cd(chip, MS_CARD) !=
+ STATUS_SUCCESS) {
+ set_sense_type
+ (chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3906,21 +3942,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
old_blk = ms_get_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no]);
+ log_blk - ms_start_idx[seg_no]);
if (old_blk == 0xFFFF) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3942,19 +3978,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
retval = ms_read_multiple_pages(chip,
- old_blk, log_blk, start_page, end_page,
- ptr, &index, &offset);
+ old_blk, log_blk,
+ start_page, end_page,
+ ptr, &index, &offset);
} else {
- retval = ms_write_multiple_pages(chip, old_blk,
- new_blk, log_blk, start_page, end_page,
- ptr, &index, &offset);
+ retval = ms_write_multiple_pages(chip, old_blk, new_blk,
+ log_blk, start_page,
+ end_page, ptr, &index,
+ &offset);
}
if (retval != STATUS_SUCCESS) {
toggle_gpio(chip, 1);
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3970,8 +4008,8 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
ms_set_unused_block(chip, old_blk);
ms_set_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no],
- new_blk);
+ log_blk - ms_start_idx[seg_no],
+ new_blk);
}
}
@@ -3995,14 +4033,14 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
chip->card_fail |= MS_CARD;
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
}
old_blk = ms_get_l2p_tbl(chip, seg_no,
- log_blk - ms_start_idx[seg_no]);
+ log_blk - ms_start_idx[seg_no]);
if (old_blk == 0xFFFF) {
ms_rw_fail(srb, chip);
rtsx_trace(chip);
@@ -4034,10 +4072,12 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
delay_write->pageoff = end_page;
#else
retval = ms_finish_write(chip, old_blk, new_blk,
- log_blk, end_page);
+ log_blk, end_page);
if (retval != STATUS_SUCCESS) {
- if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
- set_sense_type(chip, lun,
+ if (detect_card_cd(chip, MS_CARD) !=
+ STATUS_SUCCESS) {
+ set_sense_type
+ (chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
@@ -4057,17 +4097,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt)
+ u32 start_sector, u16 sector_cnt)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
if (CHK_MSPRO(ms_card))
retval = mspro_rw_multi_sector(srb, chip, start_sector,
- sector_cnt);
+ sector_cnt);
else
retval = ms_rw_multi_sector(srb, chip, start_sector,
- sector_cnt);
+ sector_cnt);
return retval;
}
@@ -4189,7 +4229,7 @@ static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
}
static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
- u8 mg_entry_num)
+ u8 mg_entry_num)
{
int retval;
u8 buf[6];
@@ -4306,7 +4346,7 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
- 3, WAIT_INT, 0, 0, buf + 4, 1536);
+ 3, WAIT_INT, 0, 0, buf + 4, 1536);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
@@ -4354,7 +4394,7 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
- buf, 32);
+ buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
rtsx_trace(chip);
@@ -4437,7 +4477,7 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
- buf1, 32);
+ buf1, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_trace(chip);
@@ -4560,7 +4600,7 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
- 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
@@ -4615,11 +4655,12 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
- set_sense_type(chip, lun,
+ set_sense_type
+ (chip, lun,
SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
@@ -4634,17 +4675,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
- 0xFF, PRO_WRITE_LONG_DATA);
+ 0xFF, PRO_WRITE_LONG_DATA);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, RING_BUFFER);
+ 0x01, RING_BUFFER);
trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
- MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
+ MS_TRANSFER_START | MS_TM_NORMAL_WRITE);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
@@ -4654,13 +4695,15 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
- set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ set_sense_type
+ (chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
}
retval = STATUS_FAIL;
rtsx_trace(chip);
@@ -4669,16 +4712,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
#else
retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
- 2, WAIT_INT, 0, 0, buf + 4, 1024);
+ 2, WAIT_INT, 0, 0, buf + 4, 1024);
if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
- set_sense_type(chip, lun,
- SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+ set_sense_type
+ (chip, lun,
+ SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MG_WRITE_ERR);
+ SENSE_TYPE_MG_WRITE_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
@@ -4706,11 +4750,12 @@ void ms_cleanup_work(struct rtsx_chip *chip)
}
if (CHK_MSHG(ms_card)) {
rtsx_write_register(chip, MS_CFG,
- MS_2K_SECTOR_MODE, 0x00);
+ MS_2K_SECTOR_MODE, 0x00);
}
}
#ifdef MS_DELAY_WRITE
- else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) {
+ else if ((!CHK_MSPRO(ms_card)) &&
+ ms_card->delay_write.delay_write_flag) {
dev_dbg(rtsx_dev(chip), "MS: delay write\n");
ms_delay_write(chip);
ms_card->cleanup_counter = 0;
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index d7686399df97..71f98cc03eed 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -202,9 +202,9 @@ void mspro_polling_format_status(struct rtsx_chip *chip);
void mspro_stop_seq_mode(struct rtsx_chip *chip);
int reset_ms_card(struct rtsx_chip *chip);
int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt);
+ u32 start_sector, u16 sector_cnt);
int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- int short_data_len, bool quick_format);
+ int short_data_len, bool quick_format);
void ms_free_l2p_tbl(struct rtsx_chip *chip);
void ms_cleanup_work(struct rtsx_chip *chip);
int ms_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 5d65a5cdc748..68d75d0d5efd 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -107,8 +107,10 @@ static int slave_configure(struct scsi_device *sdev)
* the actual value or the modified one, depending on where the
* data comes from.
*/
- if (sdev->scsi_level < SCSI_2)
- sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+ if (sdev->scsi_level < SCSI_2) {
+ sdev->scsi_level = SCSI_2;
+ sdev->sdev_target->scsi_level = SCSI_2;
+ }
return 0;
}
@@ -120,12 +122,15 @@ static int slave_configure(struct scsi_device *sdev)
/* we use this macro to help us write into the buffer */
#undef SPRINTF
#define SPRINTF(args...) \
- do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+ do { \
+ if (pos < buffer + length) \
+ pos += sprintf(pos, ## args); \
+ } while (0)
/* queue a command */
/* This is always called with scsi_lock(host) held */
static int queuecommand_lck(struct scsi_cmnd *srb,
- void (*done)(struct scsi_cmnd *))
+ void (*done)(struct scsi_cmnd *))
{
struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
struct rtsx_chip *chip = dev->chip;
@@ -313,7 +318,7 @@ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state)
return 0;
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
chip = dev->chip;
@@ -349,7 +354,7 @@ static int rtsx_resume(struct pci_dev *pci)
chip = dev->chip;
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
@@ -418,7 +423,7 @@ static int rtsx_control_thread(void *__dev)
break;
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
/* if the device has disconnected, we are free to exit */
if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -433,7 +438,7 @@ static int rtsx_control_thread(void *__dev)
/* has the command aborted ? */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
chip->srb->result = DID_ABORT << 16;
- goto SkipForAbort;
+ goto skip_for_abort;
}
scsi_unlock(host);
@@ -480,12 +485,12 @@ static int rtsx_control_thread(void *__dev)
else if (chip->srb->result != DID_ABORT << 16) {
chip->srb->scsi_done(chip->srb);
} else {
-SkipForAbort:
+skip_for_abort:
dev_err(&dev->pci->dev, "scsi command aborted\n");
}
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
- complete(&(dev->notify));
+ complete(&dev->notify);
rtsx_set_stat(chip, RTSX_STAT_IDLE);
}
@@ -519,9 +524,9 @@ static int rtsx_polling_thread(void *__dev)
{
struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
- struct sd_info *sd_card = &(chip->sd_card);
- struct xd_info *xd_card = &(chip->xd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct sd_info *sd_card = &chip->sd_card;
+ struct xd_info *xd_card = &chip->xd_card;
+ struct ms_info *ms_card = &chip->ms_card;
sd_card->cleanup_counter = 0;
xd_card->cleanup_counter = 0;
@@ -531,12 +536,11 @@ static int rtsx_polling_thread(void *__dev)
wait_timeout((delay_use + 5) * 1000);
for (;;) {
-
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
/* if the device has disconnected, we are free to exit */
if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -550,7 +554,7 @@ static int rtsx_polling_thread(void *__dev)
mspro_polling_format_status(chip);
/* lock the device pointers */
- mutex_lock(&(dev->dev_mutex));
+ mutex_lock(&dev->dev_mutex);
rtsx_polling_func(chip);
@@ -597,7 +601,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
dev->trans_result = TRANS_RESULT_FAIL;
if (dev->done)
complete(dev->done);
- goto Exit;
+ goto exit;
}
}
@@ -619,7 +623,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
}
}
-Exit:
+exit:
spin_unlock(&dev->reg_lock);
return IRQ_HANDLED;
}
@@ -724,9 +728,10 @@ static int rtsx_scan_thread(void *__dev)
dev_info(&dev->pci->dev,
"%s: waiting for device to settle before scanning\n",
CR_DRIVER_NAME);
- wait_event_interruptible_timeout(dev->delay_wait,
- rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
- delay_use * HZ);
+ wait_event_interruptible_timeout
+ (dev->delay_wait,
+ rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
+ delay_use * HZ);
}
/* If the device is still connected, perform the scanning */
@@ -844,7 +849,7 @@ static void rtsx_init_options(struct rtsx_chip *chip)
}
static int rtsx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+ const struct pci_device_id *pci_id)
{
struct Scsi_Host *host;
struct rtsx_dev *dev;
@@ -879,18 +884,18 @@ static int rtsx_probe(struct pci_dev *pci,
dev = host_to_rtsx(host);
memset(dev, 0, sizeof(struct rtsx_dev));
- dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
+ dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL);
if (!dev->chip) {
err = -ENOMEM;
goto errout;
}
spin_lock_init(&dev->reg_lock);
- mutex_init(&(dev->dev_mutex));
+ mutex_init(&dev->dev_mutex);
init_completion(&dev->cmnd_ready);
init_completion(&dev->control_exit);
init_completion(&dev->polling_exit);
- init_completion(&(dev->notify));
+ init_completion(&dev->notify);
init_completion(&dev->scanning_done);
init_waitqueue_head(&dev->delay_wait);
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index e725b10ed087..575e5734f2a5 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -149,7 +149,7 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len)
getnstimeofday64(&ts64);
- tv_usec = ts64.tv_nsec/NSEC_PER_USEC;
+ tv_usec = ts64.tv_nsec / NSEC_PER_USEC;
timeval_buf[0] = (u8)(ts64.tv_sec >> 24);
timeval_buf[1] = (u8)(ts64.tv_sec >> 16);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 97717744962d..a6b7bffc6714 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -33,11 +33,11 @@
void do_remaining_work(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#ifdef XD_DELAY_WRITE
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
#endif
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
if (chip->card_ready & SD_CARD) {
if (sd_card->seq_mode) {
@@ -100,9 +100,9 @@ void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
chip->sd_int = 1;
rtsx_write_register(chip, SDIO_CTRL, 0xFF,
- SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
}
}
@@ -133,7 +133,7 @@ void dynamic_configure_sdio_aspm(struct rtsx_chip *chip)
if (!chip->sdio_aspm) {
dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC,
- 0x30 | (chip->aspm_level[1] << 2));
+ 0x30 | (chip->aspm_level[1] << 2));
chip->sdio_aspm = 1;
}
} else {
@@ -154,7 +154,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
chip->sd_reset_counter, chip->card2lun[SD_CARD]);
if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) {
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
return;
@@ -169,7 +169,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
if (chip->need_release & SD_CARD)
return;
if (retval == STATUS_SUCCESS) {
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
chip->card_ready |= SD_CARD;
@@ -177,7 +177,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
} else {
if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) {
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
} else {
@@ -208,7 +208,7 @@ void do_reset_xd_card(struct rtsx_chip *chip)
chip->xd_reset_counter, chip->card2lun[XD_CARD]);
if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) {
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
return;
@@ -223,14 +223,14 @@ void do_reset_xd_card(struct rtsx_chip *chip)
if (chip->need_release & XD_CARD)
return;
if (retval == STATUS_SUCCESS) {
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->card_ready |= XD_CARD;
chip->card_fail &= ~XD_CARD;
chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
} else {
if (chip->xd_reset_counter >= MAX_RESET_CNT) {
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
} else {
@@ -256,7 +256,7 @@ void do_reset_ms_card(struct rtsx_chip *chip)
chip->ms_reset_counter, chip->card2lun[MS_CARD]);
if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) {
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
return;
@@ -271,14 +271,14 @@ void do_reset_ms_card(struct rtsx_chip *chip)
if (chip->need_release & MS_CARD)
return;
if (retval == STATUS_SUCCESS) {
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->card_ready |= MS_CARD;
chip->card_fail &= ~MS_CARD;
chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
} else {
if (chip->ms_reset_counter >= MAX_RESET_CNT) {
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
} else {
@@ -300,7 +300,7 @@ static void release_sdio(struct rtsx_chip *chip)
{
if (chip->sd_io) {
rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
- SD_STOP | SD_CLR_ERR);
+ SD_STOP | SD_CLR_ERR);
if (chip->chip_insert_with_sdio) {
chip->chip_insert_with_sdio = 0;
@@ -369,7 +369,7 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
rtsx_disable_aspm(chip);
if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio)
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
if (chip->need_reset & XD_CARD) {
chip->card_exist |= XD_CARD;
@@ -381,8 +381,8 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
}
if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
if (chip->card_exist & XD_CARD) {
- clear_bit(SD_NR, &(chip->need_reset));
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
+ clear_bit(MS_NR, &chip->need_reset);
}
}
if (chip->need_reset & SD_CARD) {
@@ -449,7 +449,7 @@ void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
#ifdef DISABLE_CARD_INT
void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
- unsigned long *need_release)
+ unsigned long *need_release)
{
u8 release_map = 0, reset_map = 0;
@@ -502,13 +502,13 @@ void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
reset_map = 0;
if (!(chip->card_exist & XD_CARD) &&
- (xd_cnt > (DEBOUNCE_CNT-1)))
+ (xd_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= XD_CARD;
if (!(chip->card_exist & SD_CARD) &&
- (sd_cnt > (DEBOUNCE_CNT-1)))
+ (sd_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= SD_CARD;
if (!(chip->card_exist & MS_CARD) &&
- (ms_cnt > (DEBOUNCE_CNT-1)))
+ (ms_cnt > (DEBOUNCE_CNT - 1)))
reset_map |= MS_CARD;
}
@@ -531,23 +531,23 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
#ifdef DISABLE_CARD_INT
- card_cd_debounce(chip, &(chip->need_reset), &(chip->need_release));
+ card_cd_debounce(chip, &chip->need_reset, &chip->need_release);
#endif
if (chip->need_release) {
if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
if (chip->int_reg & XD_EXIST) {
- clear_bit(SD_NR, &(chip->need_release));
- clear_bit(MS_NR, &(chip->need_release));
+ clear_bit(SD_NR, &chip->need_release);
+ clear_bit(MS_NR, &chip->need_release);
}
}
if (!(chip->card_exist & SD_CARD) && !chip->sd_io)
- clear_bit(SD_NR, &(chip->need_release));
+ clear_bit(SD_NR, &chip->need_release);
if (!(chip->card_exist & XD_CARD))
- clear_bit(XD_NR, &(chip->need_release));
+ clear_bit(XD_NR, &chip->need_release);
if (!(chip->card_exist & MS_CARD))
- clear_bit(MS_NR, &(chip->need_release));
+ clear_bit(MS_NR, &chip->need_release);
dev_dbg(rtsx_dev(chip), "chip->need_release = 0x%x\n",
(unsigned int)(chip->need_release));
@@ -556,8 +556,10 @@ void rtsx_init_cards(struct rtsx_chip *chip)
if (chip->need_release) {
if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER))
rtsx_write_register(chip, OCPCLR,
- CARD_OC_INT_CLR | CARD_OC_CLR,
- CARD_OC_INT_CLR | CARD_OC_CLR);
+ CARD_OC_INT_CLR |
+ CARD_OC_CLR,
+ CARD_OC_INT_CLR |
+ CARD_OC_CLR);
chip->ocp_stat = 0;
}
#endif
@@ -567,7 +569,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
if (chip->need_release & SD_CARD) {
- clear_bit(SD_NR, &(chip->need_release));
+ clear_bit(SD_NR, &chip->need_release);
chip->card_exist &= ~SD_CARD;
chip->card_ejected &= ~SD_CARD;
chip->card_fail &= ~SD_CARD;
@@ -580,7 +582,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
if (chip->need_release & XD_CARD) {
- clear_bit(XD_NR, &(chip->need_release));
+ clear_bit(XD_NR, &chip->need_release);
chip->card_exist &= ~XD_CARD;
chip->card_ejected &= ~XD_CARD;
chip->card_fail &= ~XD_CARD;
@@ -590,13 +592,13 @@ void rtsx_init_cards(struct rtsx_chip *chip)
release_xd_card(chip);
if (CHECK_PID(chip, 0x5288) &&
- CHECK_BARO_PKG(chip, QFN))
+ CHECK_BARO_PKG(chip, QFN))
rtsx_write_register(chip, HOST_SLEEP_STATE,
- 0xC0, 0xC0);
+ 0xC0, 0xC0);
}
if (chip->need_release & MS_CARD) {
- clear_bit(MS_NR, &(chip->need_release));
+ clear_bit(MS_NR, &chip->need_release);
chip->card_exist &= ~MS_CARD;
chip->card_ejected &= ~MS_CARD;
chip->card_fail &= ~MS_CARD;
@@ -650,7 +652,7 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
return STATUS_FAIL;
}
- mcu_cnt = (u8)(125/clk + 3);
+ mcu_cnt = (u8)(125 / clk + 3);
if (mcu_cnt > 7)
mcu_cnt = 7;
@@ -681,9 +683,9 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (sd_vpclk_phase_reset) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, 0);
+ PHASE_NOT_RESET, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, PHASE_NOT_RESET);
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
}
retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
@@ -850,7 +852,7 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
}
void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
- u32 byte_cnt, u8 pack_size)
+ u32 byte_cnt, u8 pack_size)
{
if (pack_size > DMA_1024)
pack_size = DMA_512;
@@ -864,11 +866,11 @@ void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
if (dir == DMA_FROM_DEVICE) {
rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
- 0x03 | DMA_PACK_SIZE_MASK,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_FROM_CARD | DMA_EN | pack_size);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
- 0x03 | DMA_PACK_SIZE_MASK,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_TO_CARD | DMA_EN | pack_size);
}
@@ -978,13 +980,13 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
}
int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 sec_addr, u16 sec_cnt)
+ u32 sec_addr, u16 sec_cnt)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
int i;
- if (chip->rw_card[lun] == NULL) {
+ if (!chip->rw_card[lun]) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1115,7 +1117,7 @@ void turn_on_led(struct rtsx_chip *chip, u8 gpio)
{
if (CHECK_PID(chip, 0x5288))
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
- (u8)(1 << gpio));
+ (u8)(1 << gpio));
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
}
@@ -1126,7 +1128,7 @@ void turn_off_led(struct rtsx_chip *chip, u8 gpio)
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
- (u8)(1 << gpio));
+ (u8)(1 << gpio));
}
int detect_card_cd(struct rtsx_chip *chip, int card)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 56df9a431d6d..aa37705bae39 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1011,9 +1011,9 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk);
int enable_card_clock(struct rtsx_chip *chip, u8 card);
int disable_card_clock(struct rtsx_chip *chip, u8 card);
int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 sec_addr, u16 sec_cnt);
+ u32 sec_addr, u16 sec_cnt);
void trans_dma_enable(enum dma_data_direction dir,
- struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
+ struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
void toggle_gpio(struct rtsx_chip *chip, u8 gpio);
void turn_on_led(struct rtsx_chip *chip, u8 gpio);
void turn_off_led(struct rtsx_chip *chip, u8 gpio);
@@ -1030,10 +1030,10 @@ u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if ((get_lun_card(chip, lun) == SD_CARD) &&
- (sd_card->sd_lock_status & SD_LOCKED))
+ (sd_card->sd_lock_status & SD_LOCKED))
return 0;
return chip->capacity[lun];
@@ -1073,25 +1073,25 @@ static inline int card_power_off_all(struct rtsx_chip *chip)
static inline void rtsx_clear_xd_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
- XD_STOP | XD_CLR_ERR);
+ XD_STOP | XD_CLR_ERR);
}
static inline void rtsx_clear_sd_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
- SD_STOP | SD_CLR_ERR);
+ SD_STOP | SD_CLR_ERR);
}
static inline void rtsx_clear_ms_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
- MS_STOP | MS_CLR_ERR);
+ MS_STOP | MS_CLR_ERR);
}
static inline void rtsx_clear_spi_error(struct rtsx_chip *chip)
{
rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR,
- SPI_STOP | SPI_CLR_ERR);
+ SPI_STOP | SPI_CLR_ERR);
}
#ifdef SUPPORT_SDIO_ASPM
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index a10dd6220a7b..3511157a2c78 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -114,7 +114,8 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = rtsx_write_register(chip, CARD_PULL_CTL5,
0xFF,
- MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ MS_INS_PU | SD_WP_PU |
+ SD_CD_PU | SD_CMD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -240,10 +241,10 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
return STATUS_FAIL;
}
} else {
- retval = rtsx_write_register(chip,
- FPGA_PULL_CTL,
- FPGA_SD_PULL_CTL_BIT | 0x20,
- 0);
+ retval = rtsx_write_register
+ (chip, FPGA_PULL_CTL,
+ FPGA_SD_PULL_CTL_BIT | 0x20,
+ 0);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -713,7 +714,8 @@ nextcard:
if (chip->ft2_fast_mode) {
retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF,
- MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
+ MS_PARTIAL_POWER_ON |
+ SD_PARTIAL_POWER_ON);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -1567,7 +1569,8 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
}
retval = rtsx_write_register(chip, CFGRWCTL, 0xFF,
- 0x80 | mode | ((func_no & 0x03) << 4));
+ 0x80 | mode |
+ ((func_no & 0x03) << 4));
if (retval) {
rtsx_trace(chip);
return retval;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index f36642817c6e..4f6e3c1c4621 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -130,16 +130,20 @@
#define PRDCT_REV_LEN 4 /* Product LOT Length */
/* Dynamic flag definitions: used in set_bit() etc. */
-#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */
-#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in progress */
-#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect in progress */
+/* 0x00040000 transfer is active */
+#define RTSX_FLIDX_TRANS_ACTIVE 18
+/* 0x00100000 abort is in progress */
+#define RTSX_FLIDX_ABORTING 20
+/* 0x00200000 disconnect in progress */
+#define RTSX_FLIDX_DISCONNECTING 21
#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \
(1UL << US_FLIDX_DISCONNECTING))
-#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset in progress */
-#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI midlayer timed out */
-
+/* 0x00400000 device reset in progress */
+#define RTSX_FLIDX_RESETTING 22
+/* 0x00800000 SCSI midlayer timed out */
+#define RTSX_FLIDX_TIMED_OUT 23
#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */
#define RMB_DISC 0x80 /* The Device is Removable */
#define ANSI_SCSI2 0x02 /* Based on ANSI-SCSI2 */
@@ -285,23 +289,24 @@ struct sense_data_t {
#define CARD_INT (XD_INT | MS_INT | SD_INT)
#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
-#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | GPIO0_INT | OC_INT)
+#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | \
+ GPIO0_INT | OC_INT)
#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
/* Bus interrupt enable register */
-#define CMD_DONE_INT_EN (1 << 31)
-#define DATA_DONE_INT_EN (1 << 30)
-#define TRANS_OK_INT_EN (1 << 29)
-#define TRANS_FAIL_INT_EN (1 << 28)
-#define XD_INT_EN (1 << 27)
-#define MS_INT_EN (1 << 26)
-#define SD_INT_EN (1 << 25)
-#define GPIO0_INT_EN (1 << 24)
-#define OC_INT_EN (1 << 23)
+#define CMD_DONE_INT_EN BIT(31)
+#define DATA_DONE_INT_EN BIT(30)
+#define TRANS_OK_INT_EN BIT(29)
+#define TRANS_FAIL_INT_EN BIT(28)
+#define XD_INT_EN BIT(27)
+#define MS_INT_EN BIT(26)
+#define SD_INT_EN BIT(25)
+#define GPIO0_INT_EN BIT(24)
+#define OC_INT_EN BIT(23)
#define DELINK_INT_EN GPIO0_INT_EN
-#define MS_OC_INT_EN (1 << 23)
-#define SD_OC_INT_EN (1 << 22)
+#define MS_OC_INT_EN BIT(23)
+#define SD_OC_INT_EN BIT(22)
#define READ_REG_CMD 0
#define WRITE_REG_CMD 1
@@ -318,10 +323,10 @@ struct sense_data_t {
#define MS_NR 3
#define XD_NR 4
#define SPI_NR 7
-#define SD_CARD (1 << SD_NR)
-#define MS_CARD (1 << MS_NR)
-#define XD_CARD (1 << XD_NR)
-#define SPI_CARD (1 << SPI_NR)
+#define SD_CARD BIT(SD_NR)
+#define MS_CARD BIT(MS_NR)
+#define XD_CARD BIT(XD_NR)
+#define SPI_CARD BIT(SPI_NR)
#define MAX_ALLOWED_LUN_CNT 8
@@ -393,14 +398,23 @@ struct zone_entry {
/* SD card */
#define CHK_SD(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_SD)
-#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS))
-#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50))
-#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50))
-#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104))
-#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC))
-#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity <= 0x4000000))
-#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && ((sd_card)->capacity > 0x4000000))
-#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) || CHK_SD_SDR104(sd_card))
+#define CHK_SD_HS(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_HS))
+#define CHK_SD_SDR50(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_SDR50))
+#define CHK_SD_DDR50(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_DDR50))
+#define CHK_SD_SDR104(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_SDR104))
+#define CHK_SD_HCXC(sd_card) (CHK_SD(sd_card) && \
+ ((sd_card)->sd_type & SD_HCXC))
+#define CHK_SD_HC(sd_card) (CHK_SD_HCXC(sd_card) && \
+ ((sd_card)->capacity <= 0x4000000))
+#define CHK_SD_XC(sd_card) (CHK_SD_HCXC(sd_card) && \
+ ((sd_card)->capacity > 0x4000000))
+#define CHK_SD30_SPEED(sd_card) (CHK_SD_SDR50(sd_card) || \
+ CHK_SD_DDR50(sd_card) || \
+ CHK_SD_SDR104(sd_card))
#define SET_SD(sd_card) ((sd_card)->sd_type = TYPE_SD)
#define SET_SD_HS(sd_card) ((sd_card)->sd_type |= SD_HS)
@@ -416,13 +430,20 @@ struct zone_entry {
#define CLR_SD_HCXC(sd_card) ((sd_card)->sd_type &= ~SD_HCXC)
/* MMC card */
-#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == TYPE_MMC)
-#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M))
-#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M))
-#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT))
-#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT))
-#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE))
-#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52))
+#define CHK_MMC(sd_card) (((sd_card)->sd_type & 0xFF) == \
+ TYPE_MMC)
+#define CHK_MMC_26M(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_26M))
+#define CHK_MMC_52M(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_52M))
+#define CHK_MMC_4BIT(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_4BIT))
+#define CHK_MMC_8BIT(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_8BIT))
+#define CHK_MMC_SECTOR_MODE(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_SECTOR_MODE))
+#define CHK_MMC_DDR52(sd_card) (CHK_MMC(sd_card) && \
+ ((sd_card)->sd_type & MMC_DDR52))
#define SET_MMC(sd_card) ((sd_card)->sd_type = TYPE_MMC)
#define SET_MMC_26M(sd_card) ((sd_card)->sd_type |= MMC_26M)
@@ -439,7 +460,8 @@ struct zone_entry {
#define CLR_MMC_SECTOR_MODE(sd_card) ((sd_card)->sd_type &= ~MMC_SECTOR_MODE)
#define CLR_MMC_DDR52(sd_card) ((sd_card)->sd_type &= ~MMC_DDR52)
-#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card))
+#define CHK_MMC_HS(sd_card) (CHK_MMC_52M(sd_card) && \
+ CHK_MMC_26M(sd_card))
#define CLR_MMC_HS(sd_card) \
do { \
CLR_MMC_DDR52(sd_card); \
@@ -450,12 +472,18 @@ do { \
#define SD_SUPPORT_CLASS_TEN 0x01
#define SD_SUPPORT_1V8 0x02
-#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN)
-#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN)
-#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN)
-#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= SD_SUPPORT_1V8)
-#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & SD_SUPPORT_1V8)
-#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= ~SD_SUPPORT_1V8)
+#define SD_SET_CLASS_TEN(sd_card) ((sd_card)->sd_setting |= \
+ SD_SUPPORT_CLASS_TEN)
+#define SD_CHK_CLASS_TEN(sd_card) ((sd_card)->sd_setting & \
+ SD_SUPPORT_CLASS_TEN)
+#define SD_CLR_CLASS_TEN(sd_card) ((sd_card)->sd_setting &= \
+ ~SD_SUPPORT_CLASS_TEN)
+#define SD_SET_1V8(sd_card) ((sd_card)->sd_setting |= \
+ SD_SUPPORT_1V8)
+#define SD_CHK_1V8(sd_card) ((sd_card)->sd_setting & \
+ SD_SUPPORT_1V8)
+#define SD_CLR_1V8(sd_card) ((sd_card)->sd_setting &= \
+ ~SD_SUPPORT_1V8)
struct sd_info {
u16 sd_type;
@@ -544,9 +572,12 @@ struct xd_info {
#define HG8BIT (MS_HG | MS_8BIT)
#define CHK_MSPRO(ms_card) (((ms_card)->ms_type & 0xFF) == TYPE_MSPRO)
-#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT))
-#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC))
-#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG))
+#define CHK_HG8BIT(ms_card) (CHK_MSPRO(ms_card) && \
+ (((ms_card)->ms_type & HG8BIT) == HG8BIT))
+#define CHK_MSXC(ms_card) (CHK_MSPRO(ms_card) && \
+ ((ms_card)->ms_type & MS_XC))
+#define CHK_MSHG(ms_card) (CHK_MSPRO(ms_card) && \
+ ((ms_card)->ms_type & MS_HG))
#define CHK_MS8BIT(ms_card) (((ms_card)->ms_type & MS_8BIT))
#define CHK_MS4BIT(ms_card) (((ms_card)->ms_type & MS_4BIT))
@@ -679,8 +710,10 @@ struct trace_msg_t {
#define CLR_SDIO_EXIST(chip) ((chip)->sdio_func_exist &= ~SDIO_EXIST)
#define CHK_SDIO_IGNORED(chip) ((chip)->sdio_func_exist & SDIO_IGNORED)
-#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= SDIO_IGNORED)
-#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= ~SDIO_IGNORED)
+#define SET_SDIO_IGNORED(chip) ((chip)->sdio_func_exist |= \
+ SDIO_IGNORED)
+#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= \
+ ~SDIO_IGNORED)
struct rtsx_chip {
struct rtsx_dev *rtsx;
@@ -957,12 +990,12 @@ void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
int rtsx_write_cfg_dw(struct rtsx_chip *chip,
- u8 func_no, u16 addr, u32 mask, u32 val);
+ u8 func_no, u16 addr, u32 mask, u32 val);
int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val);
int rtsx_write_cfg_seq(struct rtsx_chip *chip,
- u8 func, u16 addr, u8 *buf, int len);
+ u8 func, u16 addr, u8 *buf, int len);
int rtsx_read_cfg_seq(struct rtsx_chip *chip,
- u8 func, u16 addr, u8 *buf, int len);
+ u8 func, u16 addr, u8 *buf, int len);
int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val);
int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val);
int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index becb4bba166c..a95c5de1aa00 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -354,7 +354,7 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
- ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+ ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
break;
case SENSE_TYPE_FORMAT_IN_PROGRESS:
@@ -397,10 +397,10 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
}
void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
- u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
u16 sns_key_info1)
{
- struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+ struct sense_data_t *sense = &chip->sense_buffer[lun];
sense->err_code = err_code;
sense->sense_key = sense_key;
@@ -436,7 +436,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_SD_LOCK
if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if (sd_card->sd_lock_notify) {
sd_card->sd_lock_notify = 0;
@@ -444,7 +444,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
} else if (sd_card->sd_lock_status & SD_LOCKED) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
return TRANSPORT_FAILED;
}
}
@@ -514,7 +514,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_MAGIC_GATE
if ((chip->mspro_formatter_enable) &&
- (chip->lun2card[lun] & MS_CARD))
+ (chip->lun2card[lun] & MS_CARD))
#else
if (chip->mspro_formatter_enable)
#endif
@@ -603,7 +603,7 @@ static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (prevent) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -615,13 +615,13 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sense_data_t *sense;
unsigned int lun = SCSI_LUN(srb);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned char *tmp, *buf;
- sense = &(chip->sense_buffer[lun]);
+ sense = &chip->sense_buffer[lun];
if ((get_lun_card(chip, lun) == MS_CARD) &&
- ms_card->pro_under_formatting) {
+ ms_card->pro_under_formatting) {
if (ms_card->format_status == FORMAT_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
ms_card->pro_under_formatting = 0;
@@ -629,7 +629,7 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
- 0, (u16)(ms_card->progress));
+ 0, (u16)(ms_card->progress));
} else {
/* Format Command Failed */
set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
@@ -659,9 +659,9 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
- int lun, u8 *buf, int buf_len)
+ int lun, u8 *buf, int buf_len)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int sys_info_offset;
int data_size = buf_len;
bool support_format = false;
@@ -754,10 +754,10 @@ static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
- unsigned int dataSize;
+ unsigned int data_size;
int status;
bool pro_formatter_flag;
- unsigned char pageCode, *buf;
+ unsigned char page_code, *buf;
u8 card = get_lun_card(chip, lun);
#ifndef SUPPORT_MAGIC_GATE
@@ -770,11 +770,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#endif
pro_formatter_flag = false;
- dataSize = 8;
+ data_size = 8;
#ifdef SUPPORT_MAGIC_GATE
if ((chip->lun2card[lun] & MS_CARD)) {
if (!card || (card == MS_CARD)) {
- dataSize = 108;
+ data_size = 108;
if (chip->mspro_formatter_enable)
pro_formatter_flag = true;
}
@@ -783,28 +783,28 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (card == MS_CARD) {
if (chip->mspro_formatter_enable) {
pro_formatter_flag = true;
- dataSize = 108;
+ data_size = 108;
}
}
#endif
- buf = kmalloc(dataSize, GFP_KERNEL);
+ buf = kmalloc(data_size, GFP_KERNEL);
if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
- pageCode = srb->cmnd[2] & 0x3f;
+ page_code = srb->cmnd[2] & 0x3f;
- if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
- (pageCode == 0x00) ||
- (pro_formatter_flag && (pageCode == 0x20))) {
+ if ((page_code == 0x3F) || (page_code == 0x1C) ||
+ (page_code == 0x00) ||
+ (pro_formatter_flag && (page_code == 0x20))) {
if (srb->cmnd[0] == MODE_SENSE) {
- if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ if ((page_code == 0x3F) || (page_code == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0],
- lun, buf, dataSize);
+ lun, buf, data_size);
} else {
- dataSize = 4;
+ data_size = 4;
buf[0] = 0x03;
buf[1] = 0x00;
if (check_card_wp(chip, lun))
@@ -815,11 +815,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf[3] = 0x00;
}
} else {
- if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ if ((page_code == 0x3F) || (page_code == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0],
- lun, buf, dataSize);
+ lun, buf, data_size);
} else {
- dataSize = 8;
+ data_size = 8;
buf[0] = 0x00;
buf[1] = 0x06;
buf[2] = 0x00;
@@ -842,7 +842,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (status == TRANSPORT_GOOD) {
unsigned int len = min_t(unsigned int, scsi_bufflen(srb),
- dataSize);
+ data_size);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
}
@@ -854,7 +854,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#endif
unsigned int lun = SCSI_LUN(srb);
int retval;
@@ -896,7 +896,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (sd_card->sd_lock_status & SD_LOCKED) {
dev_dbg(rtsx_dev(chip), "SD card locked!\n");
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -932,7 +932,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
* need to judge start_sec at first
*/
if ((start_sec > get_card_size(chip, lun)) ||
- ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
+ ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -947,7 +947,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "read/write fail three times in succession\n");
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
@@ -959,7 +959,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (check_card_wp(chip, lun)) {
dev_dbg(rtsx_dev(chip), "Write protected card!\n");
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ SENSE_TYPE_MEDIA_WRITE_PROTECT);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -973,15 +973,16 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
chip->rw_fail_cnt[lun]++;
if (srb->sc_data_direction == DMA_FROM_DEVICE)
- set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ set_sense_type
+ (chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
}
retval = TRANSPORT_FAILED;
rtsx_trace(chip);
- goto Exit;
+ goto exit;
} else {
chip->rw_fail_cnt[lun] = 0;
retval = TRANSPORT_GOOD;
@@ -989,7 +990,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
scsi_set_resid(srb, 0);
-Exit:
+exit:
return retval;
}
@@ -1025,8 +1026,8 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
/* Capacity List Length */
if ((buf_len > 12) && chip->mspro_formatter_enable &&
- (chip->lun2card[lun] & MS_CARD) &&
- (!card || (card == MS_CARD))) {
+ (chip->lun2card[lun] & MS_CARD) &&
+ (!card || (card == MS_CARD))) {
buf[i++] = 0x10;
desc_cnt = 2;
} else {
@@ -1143,7 +1144,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1153,7 +1154,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1195,7 +1196,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1216,7 +1217,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1247,7 +1248,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1271,7 +1272,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1305,7 +1306,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1333,7 +1334,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1346,7 +1347,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
@@ -1399,7 +1400,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1522,9 +1523,9 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (srb->cmnd[3] == 1) {
/* Variable Clock */
- struct xd_info *xd_card = &(chip->xd_card);
- struct sd_info *sd_card = &(chip->sd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct xd_info *xd_card = &chip->xd_card;
+ struct sd_info *sd_card = &chip->sd_card;
+ struct ms_info *ms_card = &chip->ms_card;
switch (srb->cmnd[4]) {
case XD_CARD:
@@ -1541,7 +1542,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1556,7 +1557,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_disable_aspm(chip);
if (chip->ss_en &&
- (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+ (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
@@ -1565,7 +1566,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1586,9 +1587,9 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
unsigned int lun = SCSI_LUN(srb);
if (srb->cmnd[3] == 1) {
- struct xd_info *xd_card = &(chip->xd_card);
- struct sd_info *sd_card = &(chip->sd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct xd_info *xd_card = &chip->xd_card;
+ struct sd_info *sd_card = &chip->sd_card;
+ struct ms_info *ms_card = &chip->ms_card;
u8 tmp;
switch (srb->cmnd[4]) {
@@ -1606,7 +1607,7 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1648,14 +1649,15 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "Write to device\n");
retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
- scsi_sg_count(srb), srb->sc_data_direction, 1000);
+ scsi_sg_count(srb), srb->sc_data_direction,
+ 1000);
if (retval < 0) {
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -1667,8 +1669,8 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct sd_info *sd_card = &chip->sd_card;
+ struct ms_info *ms_card = &chip->ms_card;
int buf_len;
unsigned int lun = SCSI_LUN(srb);
u8 card = get_lun_card(chip, lun);
@@ -1699,8 +1701,8 @@ static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_OCP
status[8] = 0;
- if (CHECK_LUN_MODE(chip,
- SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) {
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) &&
+ (chip->lun2card[lun] == MS_CARD)) {
oc_now_mask = MS_OC_NOW;
oc_ever_mask = MS_OC_EVER;
} else {
@@ -1804,7 +1806,7 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!CHECK_PID(chip, 0x5208)) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1884,7 +1886,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
cmd_type = srb->cmnd[4];
if (cmd_type > 2) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1903,7 +1905,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
value = *(rtsx_get_cmd_data(chip) + idx);
if (scsi_bufflen(srb) < 1) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1971,7 +1973,7 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1980,8 +1982,9 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_read_phy_register(chip, addr + i, &val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ set_sense_type
+ (chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2039,7 +2042,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2050,7 +2053,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2090,7 +2093,7 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2098,13 +2101,13 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_erase_eeprom_byte(chip, addr);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2139,7 +2142,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2149,7 +2152,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2204,7 +2207,7 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2242,7 +2245,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2252,7 +2255,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2311,7 +2314,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_OFF);
+ LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rtsx_trace(chip);
@@ -2321,7 +2324,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
wait_timeout(600);
retval = rtsx_write_phy_register(chip, 0x08,
- 0x4C00 | chip->phy_voltage);
+ 0x4C00 | chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rtsx_trace(chip);
@@ -2329,7 +2332,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rtsx_trace(chip);
@@ -2352,14 +2355,14 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_write_efuse(chip, addr + i, buf[i]);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
result = TRANSPORT_FAILED;
rtsx_trace(chip);
- goto Exit;
+ goto exit;
}
}
-Exit:
+exit:
vfree(buf);
retval = card_power_off(chip, SPI_CARD);
@@ -2370,7 +2373,7 @@ Exit:
if (chip->asic_code) {
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_OFF);
+ LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
@@ -2385,7 +2388,7 @@ Exit:
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
@@ -2425,7 +2428,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2439,7 +2442,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
vfree(buf);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -2484,7 +2487,7 @@ static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2593,7 +2596,7 @@ static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2670,7 +2673,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (get_lun_card(chip, lun) == XD_CARD) {
rtsx_status[13] = 0x40;
} else if (get_lun_card(chip, lun) == SD_CARD) {
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
rtsx_status[13] = 0x20;
if (CHK_SD(sd_card)) {
@@ -2686,7 +2689,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_status[13] |= 0x04;
}
} else if (get_lun_card(chip, lun) == MS_CARD) {
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
if (CHK_MSPRO(ms_card)) {
rtsx_status[13] = 0x38;
@@ -2881,7 +2884,7 @@ static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2895,14 +2898,15 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
unsigned int lun = SCSI_LUN(srb);
u16 sec_cnt;
- if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10))
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
- else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+ } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
sec_cnt = srb->cmnd[4];
if (sec_cnt == 0)
sec_cnt = 256;
- } else
+ } else {
return;
+ }
if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
toggle_gpio(chip, LED_GPIO);
@@ -2915,7 +2919,7 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
bool quick_format;
int retval;
@@ -2927,7 +2931,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
- (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
+ (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
(srb->cmnd[7] != 0x74)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
@@ -2941,7 +2945,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
wait_timeout(100);
if (!check_card_ready(chip, lun) ||
- (get_card_size(chip, lun) == 0)) {
+ (get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -2986,7 +2990,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_PCGL_1P18
static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
u8 dev_info_id, data_len;
u8 *buf;
@@ -3005,8 +3009,8 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
- (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
- (srb->cmnd[7] != 0x44)) {
+ (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+ (srb->cmnd[7] != 0x44)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -3014,17 +3018,20 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_info_id = srb->cmnd[3];
if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
- (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
- !CHK_MSPRO(ms_card)) {
+ (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ !CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
- if (dev_info_id == 0x15)
- buf_len = data_len = 0x3A;
- else
- buf_len = data_len = 0x6A;
+ if (dev_info_id == 0x15) {
+ buf_len = 0x3A;
+ data_len = 0x3A;
+ } else {
+ buf_len = 0x6A;
+ data_len = 0x6A;
+ }
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf) {
@@ -3100,7 +3107,7 @@ static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
#ifdef SUPPORT_CPRM
-static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
int result;
@@ -3164,7 +3171,7 @@ static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_MAGIC_GATE
static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
@@ -3208,8 +3215,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
switch (key_format) {
case KF_GET_LOC_EKB:
if ((scsi_bufflen(srb) == 0x41C) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x1C)) {
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x1C)) {
retval = mg_get_local_EKB(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3218,7 +3225,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3226,8 +3233,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_RSP_CHG:
if ((scsi_bufflen(srb) == 0x24) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x24)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x24)) {
retval = mg_get_rsp_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3236,7 +3243,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3245,12 +3252,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_GET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x04) &&
- (srb->cmnd[2] == 0x00) &&
- (srb->cmnd[3] == 0x00) &&
- (srb->cmnd[4] == 0x00) &&
- (srb->cmnd[5] < 32)) {
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
retval = mg_get_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3259,7 +3266,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3277,7 +3284,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
@@ -3326,8 +3333,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
switch (key_format) {
case KF_SET_LEAF_ID:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
retval = mg_set_leaf_id(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3336,7 +3343,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3344,8 +3351,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_CHG_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
retval = mg_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3354,7 +3361,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3362,8 +3369,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_RSP_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ (srb->cmnd[8] == 0x00) &&
+ (srb->cmnd[9] == 0x0C)) {
retval = mg_rsp(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3372,7 +3379,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3381,12 +3388,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_SET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x04) &&
- (srb->cmnd[2] == 0x00) &&
- (srb->cmnd[3] == 0x00) &&
- (srb->cmnd[4] == 0x00) &&
- (srb->cmnd[5] < 32)) {
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) &&
+ (srb->cmnd[5] < 32)) {
retval = mg_set_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3395,7 +3402,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3415,9 +3422,9 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#endif
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int result;
@@ -3427,9 +3434,9 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
* REQUEST_SENSE and rs_ppstatus
*/
if (!((srb->cmnd[0] == VENDOR_CMND) &&
- (srb->cmnd[1] == SCSI_APP_CMD) &&
- (srb->cmnd[2] == GET_DEV_STATUS)) &&
- (srb->cmnd[0] != REQUEST_SENSE)) {
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ (srb->cmnd[2] == GET_DEV_STATUS)) &&
+ (srb->cmnd[0] != REQUEST_SENSE)) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR,
0x02, 0, 0x04, 0x04, 0, 0);
@@ -3440,12 +3447,12 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#endif
if ((get_lun_card(chip, lun) == MS_CARD) &&
- (ms_card->format_status == FORMAT_IN_PROGRESS)) {
+ (ms_card->format_status == FORMAT_IN_PROGRESS)) {
if ((srb->cmnd[0] != REQUEST_SENSE) &&
- (srb->cmnd[0] != INQUIRY)) {
+ (srb->cmnd[0] != INQUIRY)) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
- 0, (u16)(ms_card->progress));
+ 0, (u16)(ms_card->progress));
rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3510,7 +3517,7 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case SD_EXECUTE_WRITE:
case SD_GET_RSP:
case SD_HW_RST:
- result = sd_extention_cmnd(srb, chip);
+ result = sd_extension_cmnd(srb, chip);
break;
#endif
diff --git a/drivers/staging/rts5208/rtsx_scsi.h b/drivers/staging/rts5208/rtsx_scsi.h
index 03dd76d6c859..30f3724848fe 100644
--- a/drivers/staging/rts5208/rtsx_scsi.h
+++ b/drivers/staging/rts5208/rtsx_scsi.h
@@ -136,8 +136,8 @@
void scsi_show_command(struct rtsx_chip *chip);
void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type);
void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
- u8 sense_key, u32 info, u8 asc, u8 ascq,
- u8 sns_key_info0, u16 sns_key_info1);
+ u8 sense_key, u32 info, u8 asc, u8 ascq,
+ u8 sns_key_info0, u16 sns_key_info1);
int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip);
#endif /* __REALTEK_RTSX_SCSI_H */
diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h
index f49bed9ec76a..817700c0d794 100644
--- a/drivers/staging/rts5208/rtsx_sys.h
+++ b/drivers/staging/rts5208/rtsx_sys.h
@@ -32,9 +32,9 @@ static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip)
{
struct rtsx_dev *dev = chip->rtsx;
- spin_lock(&(dev->reg_lock));
+ spin_lock(&dev->reg_lock);
rtsx_enter_ss(chip);
- spin_unlock(&(dev->reg_lock));
+ spin_unlock(&dev->reg_lock);
}
static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag)
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
index 479137398c3d..99740c33f2fb 100644
--- a/drivers/staging/rts5208/rtsx_transport.h
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -30,18 +30,21 @@
#define WAIT_TIME 2000
unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
- unsigned int *offset, enum xfer_buf_dir dir);
-void rtsx_stor_set_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb);
-void rtsx_stor_get_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb);
+ unsigned int buflen,
+ struct scsi_cmnd *srb,
+ unsigned int *index,
+ unsigned int *offset,
+ enum xfer_buf_dir dir);
+void rtsx_stor_set_xfer_buf(unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb);
+void rtsx_stor_get_xfer_buf(unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb);
void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip);
#define rtsx_init_cmd(chip) ((chip)->ci = 0)
-void rtsx_add_cmd(struct rtsx_chip *chip,
- u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+void rtsx_add_cmd(struct rtsx_chip *chip, u8 cmd_type, u16 reg_addr, u8 mask,
+ u8 data);
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
@@ -55,11 +58,12 @@ static inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
}
int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
- int use_sg, enum dma_data_direction dma_dir, int timeout);
+ int use_sg, enum dma_data_direction dma_dir,
+ int timeout);
-int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
- void *buf, size_t len,
- int use_sg, unsigned int *index, unsigned int *offset,
- enum dma_data_direction dma_dir, int timeout);
+int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, void *buf,
+ size_t len, int use_sg, unsigned int *index,
+ unsigned int *offset,
+ enum dma_data_direction dma_dir, int timeout);
#endif /* __REALTEK_RTSX_TRANSPORT_H */
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index b0bbb36f8988..bdd35b611f27 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -56,21 +56,21 @@ static u16 REG_SD_DCMPS1_CTL;
static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
sd_card->err_code |= err_code;
}
static inline void sd_clr_err_code(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
sd_card->err_code = 0;
}
static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
return sd_card->err_code & err_code;
}
@@ -124,9 +124,9 @@ static int sd_check_data0_status(struct rtsx_chip *chip)
}
static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
- u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int timeout = 100;
u16 reg_addr;
@@ -153,11 +153,12 @@ RTY_SEND_CMD:
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
- 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE);
+ SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END |
+ SD_STAT_IDLE);
if (rsp_type == SD_RSP_TYPE_R2) {
for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -238,7 +239,7 @@ RTY_SEND_CMD:
if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
if ((cmd_idx != SEND_RELATIVE_ADDR) &&
- (cmd_idx != SEND_IF_COND)) {
+ (cmd_idx != SEND_IF_COND)) {
if (cmd_idx != STOP_TRANSMISSION) {
if (ptr[1] & 0x80) {
rtsx_trace(chip);
@@ -285,7 +286,7 @@ static int sd_read_data(struct rtsx_chip *chip,
u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
int timeout)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
@@ -308,27 +309,27 @@ static int sd_read_data(struct rtsx_chip *chip,
0xFF, cmd[i]);
}
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- (u8)byte_cnt);
+ (u8)byte_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- (u8)(byte_cnt >> 8));
+ (u8)(byte_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- (u8)blk_cnt);
+ (u8)blk_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- (u8)(blk_cnt >> 8));
+ (u8)(blk_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
if (trans_mode != SD_TM_AUTO_TUNING)
rtsx_add_cmd(chip, WRITE_REG_CMD,
- CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- trans_mode | SD_TRANSFER_START);
+ trans_mode | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
@@ -353,10 +354,10 @@ static int sd_read_data(struct rtsx_chip *chip,
}
static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
- u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width,
- u8 *buf, int buf_len, int timeout)
+ u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt,
+ u8 bus_width, u8 *buf, int buf_len, int timeout)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
@@ -389,30 +390,30 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
}
}
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- (u8)byte_cnt);
+ (u8)byte_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- (u8)(byte_cnt >> 8));
+ (u8)(byte_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- (u8)blk_cnt);
+ (u8)blk_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- (u8)(blk_cnt >> 8));
+ (u8)(blk_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- trans_mode | SD_TRANSFER_START);
+ trans_mode | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, timeout);
if (retval < 0) {
if (retval == -ETIMEDOUT) {
- sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
}
rtsx_trace(chip);
@@ -424,7 +425,7 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u8 csd_ver, trans_speed;
@@ -438,7 +439,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
}
retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
- SD_RSP_TYPE_R2, rsp, 16);
+ SD_RSP_TYPE_R2, rsp, 16);
if (retval == STATUS_SUCCESS)
break;
}
@@ -534,7 +535,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
static int sd_set_sample_push_timing(struct rtsx_chip *chip)
{
int retval;
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
u8 val = 0;
if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY)
@@ -573,7 +574,7 @@ static int sd_set_sample_push_timing(struct rtsx_chip *chip)
static void sd_choose_proper_clock(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if (CHK_SD_SDR104(sd_card)) {
if (chip->asic_code)
@@ -637,7 +638,7 @@ static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
static int sd_set_init_para(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
retval = sd_set_sample_push_timing(chip);
@@ -659,7 +660,7 @@ static int sd_set_init_para(struct rtsx_chip *chip)
int sd_select_card(struct rtsx_chip *chip, int select)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd_idx, cmd_type;
u32 addr;
@@ -686,12 +687,12 @@ int sd_select_card(struct rtsx_chip *chip, int select)
#ifdef SUPPORT_SD_LOCK
static int sd_update_lock_status(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 rsp[5];
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, rsp, 5);
+ SD_RSP_TYPE_R1, rsp, 5);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -715,23 +716,23 @@ static int sd_update_lock_status(struct rtsx_chip *chip)
#endif
static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
- u8 data_ready, int polling_cnt)
+ u8 data_ready, int polling_cnt)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval, i;
u8 rsp[5];
for (i = 0; i < polling_cnt; i++) {
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, rsp,
- 5);
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ rsp, 5);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
}
if (((rsp[3] & 0x1E) == state) &&
- ((rsp[3] & 0x01) == data_ready))
+ ((rsp[3] & 0x01) == data_ready))
return STATUS_SUCCESS;
}
@@ -746,8 +747,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
if (voltage == SD_IO_3V3) {
if (chip->asic_code) {
retval = rtsx_write_phy_register(chip, 0x08,
- 0x4FC0 |
- chip->phy_voltage);
+ 0x4FC0 |
+ chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -763,8 +764,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
} else if (voltage == SD_IO_1V8) {
if (chip->asic_code) {
retval = rtsx_write_phy_register(chip, 0x08,
- 0x4C40 |
- chip->phy_voltage);
+ 0x4C40 |
+ chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -800,7 +801,7 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
}
retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -851,8 +852,8 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
(SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
SD_DAT1_STATUS | SD_DAT0_STATUS)) {
dev_dbg(rtsx_dev(chip), "SD_BUS_STAT: 0x%x\n", stat);
- rtsx_write_register(chip, SD_BUS_STAT,
- SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+ rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN |
+ SD_CLK_FORCE_STOP, 0);
rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
rtsx_trace(chip);
return STATUS_FAIL;
@@ -903,7 +904,7 @@ static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
u16 SD_VP_CTL, SD_DCMPS_CTL;
u8 val;
int retval;
@@ -968,7 +969,9 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
}
udelay(50);
retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
- PHASE_CHANGE | PHASE_NOT_RESET | sample_point);
+ PHASE_CHANGE |
+ PHASE_NOT_RESET |
+ sample_point);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -982,7 +985,8 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
}
udelay(50);
retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
- PHASE_NOT_RESET | sample_point);
+ PHASE_NOT_RESET |
+ sample_point);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -992,24 +996,24 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
- DCMPS_CHANGE);
+ DCMPS_CHANGE);
rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
- DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
+ DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
val = *rtsx_get_cmd_data(chip);
if (val & DCMPS_ERROR) {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
if ((val & DCMPS_CURRENT_PHASE) != sample_point) {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
retval = rtsx_write_register(chip, SD_DCMPS_CTL,
@@ -1045,7 +1049,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
return STATUS_SUCCESS;
-Fail:
+fail:
rtsx_read_register(chip, SD_VP_CTL, &val);
dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val);
rtsx_read_register(chip, SD_DCMPS_CTL, &val);
@@ -1060,12 +1064,12 @@ Fail:
static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], buf[8];
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1078,7 +1082,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width,
- buf, 8, 250);
+ buf, 8, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
@@ -1096,7 +1100,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
}
static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
- u8 func_to_switch, u8 *buf, int buf_len)
+ u8 func_to_switch, u8 *buf, int buf_len)
{
u8 support_mask = 0, query_switch = 0, switch_busy = 0;
int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
@@ -1198,7 +1202,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
if (func_group == SD_FUNC_GROUP_1) {
if (!(buf[support_offset] & support_mask) ||
- ((buf[query_switch_offset] & 0x0F) != query_switch)) {
+ ((buf[query_switch_offset] & 0x0F) != query_switch)) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1206,7 +1210,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
/* Check 'Busy Status' */
if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
- ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
+ ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1214,10 +1218,10 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
return STATUS_SUCCESS;
}
-static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
- u8 func_group, u8 func_to_switch, u8 bus_width)
+static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
+ u8 func_to_switch, u8 bus_width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], buf[64];
@@ -1247,7 +1251,7 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
}
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width,
- buf, 64, 250);
+ buf, 64, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
@@ -1326,7 +1330,7 @@ static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch)
}
static int sd_check_switch(struct rtsx_chip *chip,
- u8 func_group, u8 func_to_switch, u8 bus_width)
+ u8 func_group, u8 func_to_switch, u8 bus_width)
{
int retval;
int i;
@@ -1340,12 +1344,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
}
retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
- func_to_switch, bus_width);
+ func_to_switch, bus_width);
if (retval == STATUS_SUCCESS) {
u8 stat;
retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
- func_group, func_to_switch, bus_width);
+ func_group,
+ func_to_switch,
+ bus_width);
if (retval == STATUS_SUCCESS) {
switch_good = true;
break;
@@ -1364,7 +1370,7 @@ static int sd_check_switch(struct rtsx_chip *chip,
}
func_to_switch = downgrade_switch_mode(func_group,
- func_to_switch);
+ func_to_switch);
wait_timeout(20);
}
@@ -1379,14 +1385,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u8 func_to_switch = 0;
/* Get supported functions */
- retval = sd_check_switch_mode(chip, SD_CHECK_MODE,
- NO_ARGUMENT, NO_ARGUMENT, bus_width);
+ retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT,
+ NO_ARGUMENT, bus_width);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1396,24 +1402,24 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
/* Function Group 1: Access Mode */
for (i = 0; i < 4; i++) {
- switch ((u8)(chip->sd_speed_prior >> (i*8))) {
+ switch ((u8)(chip->sd_speed_prior >> (i * 8))) {
case SDR104_SUPPORT:
- if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK)
- && chip->sdr104_en) {
+ if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) &&
+ chip->sdr104_en) {
func_to_switch = SDR104_SUPPORT;
}
break;
case DDR50_SUPPORT:
- if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK)
- && chip->ddr50_en) {
+ if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) &&
+ chip->ddr50_en) {
func_to_switch = DDR50_SUPPORT;
}
break;
case SDR50_SUPPORT:
- if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK)
- && chip->sdr50_en) {
+ if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) &&
+ chip->sdr50_en) {
func_to_switch = SDR50_SUPPORT;
}
break;
@@ -1430,7 +1436,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
if (func_to_switch)
break;
-
}
dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_1: func_to_switch = 0x%02x",
func_to_switch);
@@ -1446,7 +1451,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
if (func_to_switch) {
retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
- bus_width);
+ bus_width);
if (retval != STATUS_SUCCESS) {
if (func_to_switch == SDR104_SUPPORT) {
sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
@@ -1496,7 +1501,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
func_to_switch = 0xFF;
for (i = 0; i < 4; i++) {
- switch ((u8)(chip->sd_current_prior >> (i*8))) {
+ switch ((u8)(chip->sd_current_prior >> (i * 8))) {
case CURRENT_LIMIT_800:
if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK)
func_to_switch = CURRENT_LIMIT_800;
@@ -1534,7 +1539,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
if (func_to_switch <= CURRENT_LIMIT_800) {
retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
- bus_width);
+ bus_width);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD)) {
rtsx_trace(chip);
@@ -1596,8 +1601,8 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_read_data(chip, SD_TM_AUTO_TUNING,
- cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_AUTO_TUNING, cmd, 5, 0x40, 1,
+ SD_BUS_WIDTH_4, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
@@ -1611,7 +1616,7 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5];
@@ -1624,7 +1629,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
dev_dbg(rtsx_dev(chip), "sd ddr tuning rx\n");
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1636,8 +1641,8 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_read_data(chip, SD_TM_NORMAL_READ,
- cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
+ SD_BUS_WIDTH_4, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
@@ -1651,7 +1656,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
@@ -1676,8 +1681,8 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_read_data(chip, SD_TM_NORMAL_READ,
- cmd, 5, 0x200, 1, bus_width, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 0x200, 1,
+ bus_width, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
(void)sd_wait_data_idle(chip);
@@ -1691,7 +1696,7 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
retval = sd_change_phase(chip, sample_point, TUNE_TX);
@@ -1708,11 +1713,11 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
}
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
rtsx_write_register(chip, SD_CFG3,
- SD_RSP_80CLK_TIMEOUT_EN, 0);
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1730,7 +1735,7 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
@@ -1770,8 +1775,8 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
cmd[3] = 0;
cmd[4] = 0;
- retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2,
- cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100);
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, cmd, 5, 16, 1,
+ bus_width, sd_card->raw_csd, 16, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
@@ -1787,7 +1792,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
}
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
return STATUS_SUCCESS;
}
@@ -1795,7 +1800,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
u8 tune_dir)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
struct timing_phase_path path[MAX_PHASE + 1];
int i, j, cont_path_cnt;
bool new_block;
@@ -1808,7 +1813,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
else
final_phase = (u8)chip->sd_default_tx_phase;
- goto Search_Finish;
+ goto search_finish;
}
cont_path_cnt = 0;
@@ -1839,7 +1844,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
if (cont_path_cnt == 0) {
dev_dbg(rtsx_dev(chip), "No continuous phase path\n");
- goto Search_Finish;
+ goto search_finish;
} else {
int idx = cont_path_cnt - 1;
@@ -1848,7 +1853,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
}
if ((path[0].start == 0) &&
- (path[cont_path_cnt - 1].end == MAX_PHASE)) {
+ (path[cont_path_cnt - 1].end == MAX_PHASE)) {
path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
path[0].len += path[cont_path_cnt - 1].len;
path[0].mid = path[0].start + path[0].len / 2;
@@ -1906,14 +1911,14 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
}
}
-Search_Finish:
+search_finish:
dev_dbg(rtsx_dev(chip), "Final chosen phase: %d\n", final_phase);
return final_phase;
}
static int sd_tuning_rx(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i, j;
u32 raw_phase_map[3], phase_map;
@@ -1974,7 +1979,7 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i;
u32 phase_map;
@@ -1992,7 +1997,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
rtsx_write_register(chip, SD_CFG3,
- SD_RSP_80CLK_TIMEOUT_EN, 0);
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2002,10 +2007,10 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
continue;
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL,
- 0);
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
if ((retval == STATUS_SUCCESS) ||
- !sd_check_err_code(chip, SD_RSP_TIMEOUT))
+ !sd_check_err_code(chip, SD_RSP_TIMEOUT))
phase_map |= 1 << i;
}
@@ -2039,7 +2044,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
static int sd_tuning_tx(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int i, j;
u32 raw_phase_map[3], phase_map;
@@ -2131,7 +2136,7 @@ static int sd_ddr_tuning(struct rtsx_chip *chip)
}
} else {
retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
- TUNE_TX);
+ TUNE_TX);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2167,7 +2172,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
}
} else {
retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
- TUNE_TX);
+ TUNE_TX);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2193,7 +2198,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
int sd_switch_clock(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
int re_tuning = 0;
@@ -2231,7 +2236,7 @@ int sd_switch_clock(struct rtsx_chip *chip)
static int sd_prepare_reset(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
if (chip->asic_code)
@@ -2286,31 +2291,36 @@ static int sd_pull_ctl_disable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD);
+ XD_D3_PD | SD_D7_PD | SD_CLK_PD |
+ SD_D5_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
- SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD);
+ SD_D6_PD | SD_D0_PD | SD_D1_PD |
+ XD_D5_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
- SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ SD_D4_PD | XD_CE_PD | XD_CLE_PD |
+ XD_CD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PD | SD_D2_PD |
+ XD_ALE_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU |
+ SD_CMD_PD);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -2361,27 +2371,27 @@ int sd_pull_ctl_enable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
+ XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
+ SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
+ XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- 0xA8);
+ 0xA8);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- 0x5A);
+ 0x5A);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- 0x95);
+ 0x95);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- 0xAA);
+ 0xAA);
}
}
@@ -2478,7 +2488,7 @@ static int sd_dummy_clock(struct rtsx_chip *chip)
static int sd_read_lba0(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 cmd[5], bus_width;
@@ -2499,8 +2509,8 @@ static int sd_read_lba0(struct rtsx_chip *chip)
bus_width = SD_BUS_WIDTH_1;
}
- retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
- 5, 512, 1, bus_width, NULL, 0, 100);
+ retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512, 1,
+ bus_width, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
@@ -2512,14 +2522,14 @@ static int sd_read_lba0(struct rtsx_chip *chip)
static int sd_check_wp_state(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u32 val;
u16 sd_card_type;
u8 cmd[5], buf[64];
- retval = sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2532,12 +2542,12 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
cmd[4] = 0;
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
- SD_BUS_WIDTH_4, buf, 64, 250);
+ SD_BUS_WIDTH_4, buf, 64, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2562,7 +2572,7 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
static int reset_sd(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
bool hi_cap_flow = false;
int retval, i = 0, j = 0, k = 0;
bool sd_dont_switch = false;
@@ -2575,7 +2585,7 @@ static int reset_sd(struct rtsx_chip *chip)
SET_SD(sd_card);
-Switch_Fail:
+switch_fail:
i = 0;
j = 0;
@@ -2589,11 +2599,11 @@ Switch_Fail:
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_dummy_clock(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
int rty_cnt = 0;
@@ -2601,11 +2611,11 @@ Switch_Fail:
for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
- SD_RSP_TYPE_R4, rsp, 5);
+ SD_RSP_TYPE_R4, rsp, 5);
if (retval == STATUS_SUCCESS) {
int func_num = (rsp[1] >> 4) & 0x07;
@@ -2613,7 +2623,7 @@ Switch_Fail:
dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n",
func_num);
chip->sd_io = 1;
- goto Status_Fail;
+ goto status_fail;
}
break;
@@ -2630,14 +2640,14 @@ Switch_Fail:
/* Start Initialization Process of SD Card */
RTY_SD_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
wait_timeout(20);
retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
- SD_RSP_TYPE_R7, rsp, 5);
+ SD_RSP_TYPE_R7, rsp, 5);
if (retval == STATUS_SUCCESS) {
if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
hi_cap_flow = true;
@@ -2649,37 +2659,37 @@ RTY_SD_RST:
voltage = SUPPORT_VOLTAGE;
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
- SD_RSP_TYPE_R0, NULL, 0);
+ SD_RSP_TYPE_R0, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
wait_timeout(20);
}
do {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
j++;
if (j < 3)
goto RTY_SD_RST;
else
- goto Status_Fail;
+ goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
- SD_RSP_TYPE_R3, rsp, 5);
+ SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
k++;
if (k < 3)
goto RTY_SD_RST;
else
- goto Status_Fail;
+ goto status_fail;
}
i++;
@@ -2687,7 +2697,7 @@ RTY_SD_RST:
} while (!(rsp[1] & 0x80) && (i < 255));
if (i == 255)
- goto Status_Fail;
+ goto status_fail;
if (hi_cap_flow) {
if (rsp[1] & 0x40)
@@ -2705,19 +2715,19 @@ RTY_SD_RST:
if (support_1v8) {
retval = sd_voltage_switch(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
}
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
for (i = 0; i < 3; i++) {
retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
- SD_RSP_TYPE_R6, rsp, 5);
+ SD_RSP_TYPE_R6, rsp, 5);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
sd_card->sd_addr = (u32)rsp[1] << 24;
sd_card->sd_addr += (u32)rsp[2] << 16;
@@ -2728,17 +2738,17 @@ RTY_SD_RST:
retval = sd_check_csd(chip, 1);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
#ifdef SUPPORT_SD_LOCK
SD_UNLOCK_ENTRY:
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (sd_card->sd_lock_status & SD_LOCKED) {
sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
@@ -2749,25 +2759,25 @@ SD_UNLOCK_ENTRY:
#endif
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
switch_bus_width = SD_BUS_WIDTH_4;
} else {
@@ -2775,13 +2785,13 @@ SD_UNLOCK_ENTRY:
}
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (!(sd_card->raw_csd[4] & 0x40))
sd_dont_switch = true;
@@ -2804,7 +2814,7 @@ SD_UNLOCK_ENTRY:
sd_dont_switch = true;
try_sdio = false;
- goto Switch_Fail;
+ goto switch_fail;
}
} else {
if (support_1v8) {
@@ -2812,21 +2822,21 @@ SD_UNLOCK_ENTRY:
sd_dont_switch = true;
try_sdio = false;
- goto Switch_Fail;
+ goto switch_fail;
}
}
}
if (!support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
}
#ifdef SUPPORT_SD_LOCK
@@ -2845,7 +2855,7 @@ SD_UNLOCK_ENTRY:
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
if (CHK_SD_DDR50(sd_card))
retval = sd_ddr_tuning(chip);
@@ -2854,20 +2864,20 @@ SD_UNLOCK_ENTRY:
if (retval != STATUS_SUCCESS) {
if (sd20_mode) {
- goto Status_Fail;
+ goto status_fail;
} else {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
try_sdio = false;
sd20_mode = true;
- goto Switch_Fail;
+ goto switch_fail;
}
}
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
if (CHK_SD_DDR50(sd_card)) {
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -2879,15 +2889,15 @@ SD_UNLOCK_ENTRY:
retval = sd_read_lba0(chip);
if (retval != STATUS_SUCCESS) {
if (sd20_mode) {
- goto Status_Fail;
+ goto status_fail;
} else {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
try_sdio = false;
sd20_mode = true;
- goto Switch_Fail;
+ goto switch_fail;
}
}
}
@@ -2895,7 +2905,7 @@ SD_UNLOCK_ENTRY:
retval = sd_check_wp_state(chip);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
@@ -2918,21 +2928,21 @@ SD_UNLOCK_ENTRY:
return STATUS_SUCCESS;
-Status_Fail:
+status_fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 buf[8] = {0}, bus_width, *ptr;
u16 byte_cnt;
int len;
retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
- 0);
+ 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return SWITCH_FAIL;
@@ -2957,8 +2967,8 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
return SWITCH_ERR;
}
- retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3,
- NULL, 0, byte_cnt, 1, bus_width, buf, len, 100);
+ retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, NULL, 0, byte_cnt, 1,
+ bus_width, buf, len, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
@@ -2980,23 +2990,23 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
if (width == MMC_8BIT_BUS)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
- 0xFF, 0x08);
+ 0xFF, 0x08);
else
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
- 0xFF, 0x04);
+ 0xFF, 0x04);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
- rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 |
+ SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
if (width == MMC_8BIT_BUS)
@@ -3024,9 +3034,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
arg = 0x03B70200;
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
- SD_RSP_TYPE_R1b, rsp, 5);
+ SD_RSP_TYPE_R1b, rsp, 5);
if ((retval == STATUS_SUCCESS) &&
- !(rsp[4] & MMC_SWITCH_ERR))
+ !(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
} else {
@@ -3041,9 +3051,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
arg = 0x03B70100;
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
- SD_RSP_TYPE_R1b, rsp, 5);
+ SD_RSP_TYPE_R1b, rsp, 5);
if ((retval == STATUS_SUCCESS) &&
- !(rsp[4] & MMC_SWITCH_ERR))
+ !(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
}
@@ -3054,7 +3064,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
u8 *ptr, card_type, card_type_mask = 0;
@@ -3065,7 +3075,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
- 0x40 | SEND_EXT_CSD);
+ 0x40 | SEND_EXT_CSD);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0);
@@ -3077,14 +3087,14 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
- SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+ SD_CHECK_CRC7 | SD_RSP_LEN_6);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- SD_TM_NORMAL_READ | SD_TRANSFER_START);
+ SD_TM_NORMAL_READ | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
@@ -3097,7 +3107,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
if (retval == -ETIMEDOUT) {
rtsx_clear_sd_error(chip);
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
}
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3106,7 +3116,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
ptr = rtsx_get_cmd_data(chip);
if (ptr[0] & SD_TRANSFER_ERR) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3132,8 +3142,8 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
SET_MMC_26M(sd_card);
}
- retval = sd_send_cmd_get_rsp(chip, SWITCH,
- 0x03B90100, SD_RSP_TYPE_R1b, rsp, 5);
+ retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100,
+ SD_RSP_TYPE_R1b, rsp, 5);
if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
CLR_MMC_HS(sd_card);
}
@@ -3178,7 +3188,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
static int reset_mmc(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval, i = 0, j = 0, k = 0;
bool switch_ddr = true;
u8 rsp[16];
@@ -3190,7 +3200,7 @@ static int reset_mmc(struct rtsx_chip *chip)
goto MMC_UNLOCK_ENTRY;
#endif
-Switch_Fail:
+switch_fail:
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -3201,7 +3211,7 @@ Switch_Fail:
RTY_MMC_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3215,11 +3225,11 @@ RTY_MMC_RST:
}
retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
- (SUPPORT_VOLTAGE | 0x40000000),
- SD_RSP_TYPE_R3, rsp, 5);
+ (SUPPORT_VOLTAGE | 0x40000000),
+ SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_BUSY) ||
- sd_check_err_code(chip, SD_TO_ERR)) {
+ sd_check_err_code(chip, SD_TO_ERR)) {
k++;
if (k < 20) {
sd_clr_err_code(chip);
@@ -3255,7 +3265,7 @@ RTY_MMC_RST:
CLR_MMC_SECTOR_MODE(sd_card);
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3263,7 +3273,7 @@ RTY_MMC_RST:
sd_card->sd_addr = 0x00100000;
retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
- SD_RSP_TYPE_R6, rsp, 5);
+ SD_RSP_TYPE_R6, rsp, 5);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3284,7 +3294,7 @@ RTY_MMC_RST:
}
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
- NULL, 0);
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3319,7 +3329,7 @@ MMC_UNLOCK_ENTRY:
}
sd_card->mmc_dont_switch_bus = 1;
rtsx_trace(chip);
- goto Switch_Fail;
+ goto switch_fail;
}
}
@@ -3345,7 +3355,7 @@ MMC_UNLOCK_ENTRY:
switch_ddr = false;
rtsx_trace(chip);
- goto Switch_Fail;
+ goto switch_fail;
}
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -3360,7 +3370,7 @@ MMC_UNLOCK_ENTRY:
switch_ddr = false;
rtsx_trace(chip);
- goto Switch_Fail;
+ goto switch_fail;
}
}
}
@@ -3392,7 +3402,7 @@ MMC_UNLOCK_ENTRY:
int reset_sd_card(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
sd_init_reg_addr(chip);
@@ -3407,7 +3417,7 @@ int reset_sd_card(struct rtsx_chip *chip)
}
if (chip->ignore_sd && CHK_SDIO_EXIST(chip) &&
- !CHK_SDIO_IGNORED(chip)) {
+ !CHK_SDIO_IGNORED(chip)) {
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS) {
@@ -3416,7 +3426,8 @@ int reset_sd_card(struct rtsx_chip *chip)
}
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
- FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ FPGA_SD_PULL_CTL_BIT |
+ 0x20, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3505,7 +3516,7 @@ int reset_sd_card(struct rtsx_chip *chip)
static int reset_mmc_only(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
sd_card->sd_type = 0;
@@ -3574,7 +3585,7 @@ static int reset_mmc_only(struct rtsx_chip *chip)
static int wait_data_buf_ready(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int i, retval;
for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
@@ -3587,7 +3598,8 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
sd_card->sd_data_buf_ready = 0;
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+ sd_card->sd_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -3607,7 +3619,7 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
void sd_stop_seq_mode(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
if (sd_card->seq_mode) {
@@ -3616,7 +3628,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
return;
retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
- SD_RSP_TYPE_R1b, NULL, 0);
+ SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS)
sd_set_err_code(chip, SD_STS_ERR);
@@ -3632,7 +3644,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
if (chip->asic_code) {
@@ -3679,9 +3691,9 @@ static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
}
int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
- u16 sector_cnt)
+ u16 sector_cnt)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
u32 data_addr;
u8 cfg2;
int retval;
@@ -3730,20 +3742,20 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
if (sd_card->seq_mode &&
- ((sd_card->pre_dir != srb->sc_data_direction) ||
- ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
- start_sector))) {
- if ((sd_card->pre_sec_cnt < 0x80)
- && (sd_card->pre_dir == DMA_FROM_DEVICE)
- && !CHK_SD30_SPEED(sd_card)
- && !CHK_SD_HS(sd_card)
- && !CHK_MMC_HS(sd_card)) {
+ ((sd_card->pre_dir != srb->sc_data_direction) ||
+ ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
+ start_sector))) {
+ if ((sd_card->pre_sec_cnt < 0x80) &&
+ (sd_card->pre_dir == DMA_FROM_DEVICE) &&
+ !CHK_SD30_SPEED(sd_card) &&
+ !CHK_SD_HS(sd_card) &&
+ !CHK_MMC_HS(sd_card)) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
}
- retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
- 0, SD_RSP_TYPE_R1b, NULL, 0);
+ retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_STS_ERR);
@@ -3760,12 +3772,12 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
goto RW_FAIL;
}
- if ((sd_card->pre_sec_cnt < 0x80)
- && !CHK_SD30_SPEED(sd_card)
- && !CHK_SD_HS(sd_card)
- && !CHK_MMC_HS(sd_card)) {
+ if ((sd_card->pre_sec_cnt < 0x80) &&
+ !CHK_SD30_SPEED(sd_card) &&
+ !CHK_SD_HS(sd_card) &&
+ !CHK_MMC_HS(sd_card)) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0);
+ SD_RSP_TYPE_R1, NULL, 0);
}
}
@@ -3774,30 +3786,30 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- (u8)sector_cnt);
+ (u8)sector_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- (u8)(sector_cnt >> 8));
+ (u8)(sector_cnt >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
if (CHK_MMC_8BIT(sd_card))
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
- 0x03, SD_BUS_WIDTH_8);
+ 0x03, SD_BUS_WIDTH_8);
else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
- 0x03, SD_BUS_WIDTH_4);
+ 0x03, SD_BUS_WIDTH_4);
else
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
- 0x03, SD_BUS_WIDTH_1);
+ 0x03, SD_BUS_WIDTH_1);
if (sd_card->seq_mode) {
- cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16|
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
SD_RSP_LEN_0;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2);
trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
- DMA_512);
+ DMA_512);
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
@@ -3808,7 +3820,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
} else {
@@ -3818,22 +3830,22 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
0x40 | READ_MULTIPLE_BLOCK);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
- (u8)(data_addr >> 24));
+ (u8)(data_addr >> 24));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
- (u8)(data_addr >> 16));
+ (u8)(data_addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
- (u8)(data_addr >> 8));
+ (u8)(data_addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
- (u8)data_addr);
+ (u8)data_addr);
cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
SD_RSP_LEN_6;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- cfg2);
+ cfg2);
trans_dma_enable(srb->sc_data_direction, chip,
- sector_cnt * 512, DMA_512);
+ sector_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
@@ -3861,7 +3873,8 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
- data_addr, SD_RSP_TYPE_R1, NULL, 0);
+ data_addr, SD_RSP_TYPE_R1,
+ NULL, 0);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
rtsx_trace(chip);
@@ -3874,10 +3887,10 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
SD_NO_WAIT_BUSY_END |
SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
- cfg2);
+ cfg2);
trans_dma_enable(srb->sc_data_direction, chip,
- sector_cnt * 512, DMA_512);
+ sector_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
@@ -3891,7 +3904,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
- scsi_bufflen(srb), scsi_sg_count(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb),
srb->sc_data_direction, chip->sd_timeout);
if (retval < 0) {
u8 stat = 0;
@@ -3916,7 +3929,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
chip->rw_need_retry = 1;
retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
- SD_RSP_TYPE_R1b, NULL, 0);
+ SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_STS_ERR);
rtsx_trace(chip);
@@ -3984,8 +3997,9 @@ int soft_reset_sd_card(struct rtsx_chip *chip)
return reset_sd(chip);
}
-int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
- u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check)
+int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg,
+ u8 rsp_type, u8 *rsp, int rsp_len,
+ bool special_check)
{
int retval;
int timeout = 100;
@@ -4011,11 +4025,11 @@ RTY_SEND_CMD:
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
- 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
- SD_TRANSFER_END);
+ SD_TRANSFER_END);
if (rsp_type == SD_RSP_TYPE_R2) {
for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -4084,7 +4098,7 @@ RTY_SEND_CMD:
}
if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
- (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
+ (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
if ((cmd_idx != STOP_TRANSMISSION) && !special_check) {
if (ptr[1] & 0x80) {
rtsx_trace(chip);
@@ -4172,7 +4186,7 @@ int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int len;
u8 buf[18] = {
@@ -4206,9 +4220,9 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
- (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
- (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
- (srb->cmnd[8] != 0x64)) {
+ (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+ (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+ (srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4245,7 +4259,7 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
- int *rsp_len)
+ int *rsp_len)
{
if (!rsp_type || !rsp_len)
return STATUS_FAIL;
@@ -4285,7 +4299,7 @@ static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len;
u8 cmd_idx, rsp_type;
@@ -4339,7 +4353,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
if (CHK_MMC_8BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_8);
+ SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4347,7 +4361,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_4);
+ SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4366,32 +4380,33 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
}
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
- sd_card->rsp, rsp_len, false);
+ sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
}
@@ -4399,14 +4414,14 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Cmd_Failed;
+ goto sd_execute_cmd_failed;
}
#endif
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
-SD_Execute_Cmd_Failed:
+sd_execute_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
release_sd_card(chip);
@@ -4420,7 +4435,7 @@ SD_Execute_Cmd_Failed:
int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len, i;
bool read_err = false, cmd13_checkbit = false;
@@ -4492,10 +4507,11 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
@@ -4503,17 +4519,18 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
@@ -4539,13 +4556,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
- blk_cnt, bus_width, buf, data_len, 2000);
+ blk_cnt, bus_width, buf, data_len, 2000);
if (retval != STATUS_SUCCESS) {
read_err = true;
kfree(buf);
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
min_len = min(data_len, scsi_bufflen(srb));
@@ -4558,24 +4575,24 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- 0x02);
+ 0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- 0x00);
+ 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
- 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
- 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
- 0x40 | cmd_idx);
+ 0x40 | cmd_idx);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
- srb->cmnd[3]);
+ srb->cmnd[3]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
- srb->cmnd[4]);
+ srb->cmnd[4]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
- srb->cmnd[5]);
+ srb->cmnd[5]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
- srb->cmnd[6]);
+ srb->cmnd[6]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
@@ -4583,66 +4600,69 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
- scsi_bufflen(srb), scsi_sg_count(srb),
- DMA_FROM_DEVICE, 10000);
+ scsi_bufflen(srb),
+ scsi_sg_count(srb),
+ DMA_FROM_DEVICE, 10000);
if (retval < 0) {
read_err = true;
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
} else {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
if (send_cmd12) {
- retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
- 0, SD_RSP_TYPE_R1b, NULL, 0, false);
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
}
@@ -4651,7 +4671,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
for (i = 0; i < 3; i++) {
retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr,
+ sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0,
cmd13_checkbit);
if (retval == STATUS_SUCCESS)
@@ -4659,13 +4679,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Read_Cmd_Failed;
+ goto sd_execute_read_cmd_failed;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
-SD_Execute_Read_Cmd_Failed:
+sd_execute_read_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
if (read_err)
@@ -4682,7 +4702,7 @@ SD_Execute_Read_Cmd_Failed:
int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval, rsp_len, i;
bool write_err = false, cmd13_checkbit = false;
@@ -4754,7 +4774,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
if (CHK_MMC_8BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_8);
+ SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4762,7 +4782,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
- SD_BUS_WIDTH_4);
+ SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4779,10 +4799,11 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -4790,25 +4811,26 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
if (acmd) {
retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
- sd_card->rsp, rsp_len, false);
+ sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
if (data_len <= 512) {
@@ -4832,37 +4854,37 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
for (i = 0; i < 256; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, buf[i]);
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
rtsx_init_cmd(chip);
for (i = 256; i < data_len; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, buf[i]);
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
} else {
rtsx_init_cmd(chip);
for (i = 0; i < data_len; i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
- PPBUF_BASE2 + i, 0xFF, buf[i]);
+ PPBUF_BASE2 + i, 0xFF, buf[i]);
}
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -4871,20 +4893,20 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- srb->cmnd[8] & 0x03);
+ srb->cmnd[8] & 0x03);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- srb->cmnd[9]);
+ srb->cmnd[9]);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
- 0x00);
+ 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
- 0x01);
+ 0x01);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
retval = rtsx_send_cmd(chip, SD_CARD, 250);
} else if (!(data_len & 0x1FF)) {
@@ -4893,35 +4915,36 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
- 0x02);
+ 0x02);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
- 0x00);
+ 0x00);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
- 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+ 0xFF, (srb->cmnd[7] & 0xFE) >> 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
- 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+ 0xFF, (u8)((data_len & 0x0001FE00) >> 9));
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
- SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+ SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
- SD_TRANSFER_END, SD_TRANSFER_END);
+ SD_TRANSFER_END, SD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
- scsi_bufflen(srb), scsi_sg_count(srb),
- DMA_TO_DEVICE, 10000);
+ scsi_bufflen(srb),
+ scsi_sg_count(srb),
+ DMA_TO_DEVICE, 10000);
} else {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
if (retval < 0) {
write_err = true;
rtsx_clear_sd_error(chip);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
#ifdef SUPPORT_SD_LOCK
@@ -4949,37 +4972,39 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
if (send_cmd12) {
- retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
- 0, SD_RSP_TYPE_R1b, NULL, 0, false);
+ retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+ SD_RSP_TYPE_R1b, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
if (data_len < 512) {
retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
- SD_RSP_TYPE_R1, NULL, 0, false);
+ SD_RSP_TYPE_R1, NULL, 0,
+ false);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -4988,15 +5013,15 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
for (i = 0; i < 3; i++) {
retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
- sd_card->sd_addr,
- SD_RSP_TYPE_R1, NULL, 0,
- cmd13_checkbit);
+ sd_card->sd_addr,
+ SD_RSP_TYPE_R1, NULL, 0,
+ cmd13_checkbit);
if (retval == STATUS_SUCCESS)
break;
}
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
#ifdef SUPPORT_SD_LOCK
@@ -5024,7 +5049,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
rtsx_trace(chip);
- goto SD_Execute_Write_Cmd_Failed;
+ goto sd_execute_write_cmd_failed;
}
}
@@ -5045,7 +5070,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
-SD_Execute_Write_Cmd_Failed:
+sd_execute_write_cmd_failed:
sd_card->pre_cmd_err = 1;
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
if (write_err)
@@ -5062,7 +5087,7 @@ SD_Execute_Write_Cmd_Failed:
int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int count;
u16 data_len;
@@ -5104,7 +5129,7 @@ int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
unsigned int lun = SCSI_LUN(srb);
int retval;
@@ -5122,9 +5147,9 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
- (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
- (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
- (srb->cmnd[8] != 0x64)) {
+ (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+ (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+ (srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -5174,7 +5199,7 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
void sd_cleanup_work(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
if (sd_card->seq_mode) {
dev_dbg(rtsx_dev(chip), "SD: stop transmission\n");
@@ -5230,7 +5255,7 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
int release_sd_card(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
int retval;
chip->card_ready &= ~SD_CARD;
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index 60b79280fb5f..55764e16b93a 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -280,14 +280,15 @@ int reset_sd_card(struct rtsx_chip *chip);
int sd_switch_clock(struct rtsx_chip *chip);
void sd_stop_seq_mode(struct rtsx_chip *chip);
int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt);
+ u32 start_sector, u16 sector_cnt);
void sd_cleanup_work(struct rtsx_chip *chip);
int sd_power_off_card3v3(struct rtsx_chip *chip);
int release_sd_card(struct rtsx_chip *chip);
#ifdef SUPPORT_CPRM
int soft_reset_sd_card(struct rtsx_chip *chip);
int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
- u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check);
+ u32 arg, u8 rsp_type, u8 *rsp, int rsp_len,
+ bool special_check);
int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type);
int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index 13c539c83838..8b8cd955dfeb 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -29,7 +29,7 @@
static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
spi->err_code = err_code;
}
@@ -57,7 +57,7 @@ static int spi_init(struct rtsx_chip *chip)
static int spi_set_init_para(struct rtsx_chip *chip)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
int retval;
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF,
@@ -117,9 +117,9 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_POLLING_MODE0);
+ SPI_TRANSFER0_START | SPI_POLLING_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, msec);
if (retval < 0) {
@@ -134,7 +134,7 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
int retval;
if (!spi->write_en)
@@ -144,11 +144,11 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_C_MODE0);
+ SPI_TRANSFER0_START | SPI_C_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -163,7 +163,7 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
int retval;
if (!spi->write_en)
@@ -173,11 +173,11 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_C_MODE0);
+ SPI_TRANSFER0_START | SPI_C_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -191,27 +191,27 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
}
static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr,
- u16 len)
+ u16 len)
{
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8));
if (addr_mode) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADO_MODE0);
+ SPI_TRANSFER0_START | SPI_CADO_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
}
static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
@@ -222,21 +222,21 @@ static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
if (addr_mode) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_C_MODE0);
+ SPI_TRANSFER0_START | SPI_C_MODE0);
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -322,9 +322,9 @@ static int spi_eeprom_program_enable(struct rtsx_chip *chip)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -358,9 +358,9 @@ int spi_erase_eeprom_chip(struct rtsx_chip *chip)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -402,9 +402,9 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -442,9 +442,9 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -497,9 +497,9 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CA_MODE0);
+ SPI_TRANSFER0_START | SPI_CA_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -518,12 +518,12 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
dev_dbg(rtsx_dev(chip), "spi_get_status: err_code = 0x%x\n",
spi->err_code);
- rtsx_stor_set_xfer_buf(&(spi->err_code),
- min_t(int, scsi_bufflen(srb), 1), srb);
+ rtsx_stor_set_xfer_buf(&spi->err_code,
+ min_t(int, scsi_bufflen(srb), 1), srb);
scsi_set_resid(srb, scsi_bufflen(srb) - 1);
return STATUS_SUCCESS;
@@ -531,7 +531,7 @@ int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct spi_info *spi = &(chip->spi);
+ struct spi_info *spi = &chip->spi;
spi_set_err_code(chip, SPI_NO_ERR);
@@ -574,37 +574,37 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]);
if (len == 0) {
if (srb->cmnd[9]) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
- 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
+ 0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
- 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
+ 0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
}
} else {
if (srb->cmnd[9]) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CDI_MODE0);
+ SPI_TRANSFER0_START | SPI_CDI_MODE0);
}
}
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
@@ -682,38 +682,38 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (slow_read) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF,
- (u8)addr);
+ (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
- (u8)addr);
+ (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
- (u8)(addr >> 8));
+ (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF,
- (u8)(addr >> 16));
+ (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
}
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF,
- (u8)(pagelen >> 8));
+ (u8)(pagelen >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF,
- (u8)pagelen);
+ (u8)pagelen);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CADI_MODE0);
+ SPI_TRANSFER0_START | SPI_CADI_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0,
- SPI_TRANSFER0_END, SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END, SPI_TRANSFER0_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
- DMA_FROM_DEVICE, 10000);
+ DMA_FROM_DEVICE, 10000);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
@@ -723,7 +723,7 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset,
- TO_XFER_BUF);
+ TO_XFER_BUF);
addr += pagelen;
len -= pagelen;
@@ -775,14 +775,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
- FROM_XFER_BUF);
+ FROM_XFER_BUF);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
- buf[0]);
+ buf[0]);
sf_program(chip, ins, 1, addr, 1);
retval = rtsx_send_cmd(chip, 0, 100);
@@ -824,14 +824,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
while (len) {
rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
- FROM_XFER_BUF);
+ FROM_XFER_BUF);
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
- buf[0]);
+ buf[0]);
if (first_byte) {
sf_program(chip, ins, 1, addr, 1);
first_byte = 0;
@@ -899,10 +899,10 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_send_cmd_no_wait(chip);
rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index,
- &offset, FROM_XFER_BUF);
+ &offset, FROM_XFER_BUF);
retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
- DMA_TO_DEVICE, 100);
+ DMA_TO_DEVICE, 100);
if (retval < 0) {
kfree(buf);
rtsx_clear_spi_error(chip);
@@ -1010,18 +1010,18 @@ int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
- PINGPONG_BUFFER);
+ PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
- SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+ SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status);
rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
- SPI_TRANSFER0_START | SPI_CDO_MODE0);
+ SPI_TRANSFER0_START | SPI_CDO_MODE0);
rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
- SPI_TRANSFER0_END);
+ SPI_TRANSFER0_END);
retval = rtsx_send_cmd(chip, 0, 100);
if (retval != STATUS_SUCCESS) {
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 1de02bb98839..85aba05acbc1 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -37,21 +37,21 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
xd_card->err_code = err_code;
}
static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
return (xd_card->err_code == err_code);
}
static int xd_set_init_para(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
if (chip->asic_code)
@@ -70,7 +70,7 @@ static int xd_set_init_para(struct rtsx_chip *chip)
static int xd_switch_clock(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
retval = select_card(chip, XD_CARD);
@@ -97,9 +97,9 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_ID);
+ XD_TRANSFER_START | XD_READ_ID);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
- XD_TRANSFER_END);
+ XD_TRANSFER_END);
for (i = 0; i < 4; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
@@ -122,28 +122,30 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
switch (mode) {
case XD_RW_ADDR:
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
- 0xFF, (u8)(addr >> 8));
+ 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
- 0xFF, (u8)(addr >> 16));
+ 0xFF, (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
- xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM);
+ xd_card->addr_cycle |
+ XD_CALC_ECC |
+ XD_BA_NO_TRANSFORM);
break;
case XD_ERASE_ADDR:
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
- 0xFF, (u8)(addr >> 8));
+ 0xFF, (u8)(addr >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
- 0xFF, (u8)(addr >> 16));
+ 0xFF, (u8)(addr >> 16));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
- (xd_card->addr_cycle - 1) | XD_CALC_ECC |
+ (xd_card->addr_cycle - 1) | XD_CALC_ECC |
XD_BA_NO_TRANSFORM);
break;
@@ -153,7 +155,7 @@ static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
}
static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
- u8 *buf, int buf_len)
+ u8 *buf, int buf_len)
{
int retval, i;
@@ -162,16 +164,16 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
+ 0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
for (i = 0; i < 6; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
- 0, 0);
+ 0, 0);
for (i = 0; i < 4; i++)
rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
- 0, 0);
+ 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
@@ -192,7 +194,7 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
}
static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
- u8 *buf, int buf_len)
+ u8 *buf, int buf_len)
{
int retval, i;
@@ -205,7 +207,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
for (i = 0; i < buf_len; i++)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
- 0, 0);
+ 0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
@@ -220,7 +222,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
}
static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
- int buf_len)
+ int buf_len)
{
int retval;
u8 reg;
@@ -235,15 +237,15 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
- XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_PAGES);
+ XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
- XD_TRANSFER_END);
+ XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 250);
if (retval == -ETIMEDOUT) {
@@ -347,27 +349,27 @@ static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
- 0xFF, 0x4B);
+ 0xFF, 0x4B);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
- 0xFF, 0x69);
+ 0xFF, 0x69);
}
}
}
@@ -386,27 +388,27 @@ static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
{
if (CHECK_PID(chip, 0x5208)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
- XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
- XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
+ XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
+ XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
- MS_D5_PD | MS_D4_PD);
+ MS_D5_PD | MS_D4_PD);
} else if (CHECK_PID(chip, 0x5288)) {
if (CHECK_BARO_PKG(chip, QFN)) {
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
- 0xFF, 0x55);
+ 0xFF, 0x55);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
- 0xFF, 0x53);
+ 0xFF, 0x53);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
- 0xFF, 0xA9);
+ 0xFF, 0xA9);
}
}
}
@@ -417,31 +419,46 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
- XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+ XD_D3_PD |
+ XD_D2_PD |
+ XD_D1_PD |
+ XD_D0_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
- XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+ XD_D7_PD |
+ XD_D6_PD |
+ XD_D5_PD |
+ XD_D4_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
- XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+ XD_WP_PD |
+ XD_CE_PD |
+ XD_CLE_PD |
+ XD_CD_PU);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
- XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+ XD_RDY_PD |
+ XD_WE_PD |
+ XD_RE_PD |
+ XD_ALE_PD);
if (retval) {
rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+ MS_INS_PU |
+ SD_WP_PD |
+ SD_CD_PU |
+ SD_CMD_PD);
if (retval) {
rtsx_trace(chip);
return retval;
@@ -486,7 +503,7 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
static int reset_xd(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval, i, j;
u8 *ptr, id_buf[4], redunt[11];
@@ -499,7 +516,7 @@ static int reset_xd(struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
- XD_PGSTS_NOT_FF);
+ XD_PGSTS_NOT_FF);
if (chip->asic_code) {
if (!CHECK_PID(chip, 0x5288))
xd_fill_pull_ctl_disable(chip);
@@ -507,12 +524,13 @@ static int reset_xd(struct rtsx_chip *chip)
xd_fill_pull_ctl_stage1_barossa(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
- (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20);
+ (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) |
+ 0x20);
}
if (!chip->ft2_fast_mode)
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
- XD_NO_AUTO_PWR_OFF, 0);
+ XD_NO_AUTO_PWR_OFF, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
@@ -537,8 +555,9 @@ static int reset_xd(struct rtsx_chip *chip)
xd_fill_pull_ctl_enable(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
- (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
- 0x20);
+ (FPGA_XD_PULL_CTL_EN1 &
+ FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
}
retval = rtsx_send_cmd(chip, XD_CARD, 100);
@@ -571,8 +590,9 @@ static int reset_xd(struct rtsx_chip *chip)
xd_fill_pull_ctl_enable(chip);
} else {
rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
- (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
- 0x20);
+ (FPGA_XD_PULL_CTL_EN1 &
+ FPGA_XD_PULL_CTL_EN2) |
+ 0x20);
}
}
@@ -599,16 +619,17 @@ static int reset_xd(struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
- XD_TIME_SETUP_STEP * 3 +
- XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
+ XD_TIME_SETUP_STEP * 3 +
+ XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
- XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) +
- XD_TIME_RWN_STEP * (3 + i));
+ XD_TIME_SETUP_STEP * 3 +
+ XD_TIME_RW_STEP * (4 + i) +
+ XD_TIME_RWN_STEP * (3 + i));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_RESET);
+ XD_TRANSFER_START | XD_RESET);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
@@ -625,7 +646,7 @@ static int reset_xd(struct rtsx_chip *chip)
ptr[0], ptr[1]);
if (((ptr[0] & READY_FLAG) != READY_STATE) ||
- !(ptr[1] & XD_RDY))
+ !(ptr[1] & XD_RDY))
continue;
retval = xd_read_id(chip, READ_ID, id_buf, 4);
@@ -773,7 +794,7 @@ static int reset_xd(struct rtsx_chip *chip)
if (redunt[PAGE_STATUS] != XD_GPG) {
for (j = 1; j <= 8; j++) {
retval = xd_read_redundant(chip, page_addr + j,
- redunt, 11);
+ redunt, 11);
if (retval == STATUS_SUCCESS) {
if (redunt[PAGE_STATUS] == XD_GPG)
break;
@@ -786,7 +807,7 @@ static int reset_xd(struct rtsx_chip *chip)
/* Check CIS data */
if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
- (redunt[PARITY] & XD_BA1_ALL0)) {
+ (redunt[PARITY] & XD_BA1_ALL0)) {
u8 buf[10];
page_addr += j;
@@ -798,11 +819,11 @@ static int reset_xd(struct rtsx_chip *chip)
}
if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
- (buf[2] == 0xD9)
- && (buf[3] == 0x01) && (buf[4] == 0xFF)
- && (buf[5] == 0x18) && (buf[6] == 0x02)
- && (buf[7] == 0xDF) && (buf[8] == 0x01)
- && (buf[9] == 0x20)) {
+ (buf[2] == 0xD9) &&
+ (buf[3] == 0x01) && (buf[4] == 0xFF) &&
+ (buf[5] == 0x18) && (buf[6] == 0x02) &&
+ (buf[7] == 0xDF) && (buf[8] == 0x01) &&
+ (buf[9] == 0x20)) {
xd_card->cis_block = (u16)i;
}
}
@@ -861,7 +882,7 @@ static u16 xd_load_log_block_addr(u8 *redunt)
static int xd_init_l2p_tbl(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int size, i;
dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n",
@@ -910,7 +931,7 @@ static inline void free_zone(struct zone_entry *zone)
static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int zone_no;
@@ -920,15 +941,15 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
zone_no, xd_card->zone_cnt);
return;
}
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
- if (zone->free_table == NULL) {
+ if (!zone->free_table) {
if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
return;
}
- if ((zone->set_index >= XD_FREE_TABLE_CNT)
- || (zone->set_index < 0)) {
+ if ((zone->set_index >= XD_FREE_TABLE_CNT) ||
+ (zone->set_index < 0)) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n");
return;
@@ -945,7 +966,7 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
u32 phy_blk;
@@ -954,10 +975,10 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
zone_no, xd_card->zone_cnt);
return BLK_NOT_FOUND;
}
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
if ((zone->unused_blk_cnt == 0) ||
- (zone->set_index == zone->get_index)) {
+ (zone->set_index == zone->get_index)) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n");
return BLK_NOT_FOUND;
@@ -982,22 +1003,22 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
}
static void xd_set_l2p_tbl(struct rtsx_chip *chip,
- int zone_no, u16 log_off, u16 phy_off)
+ int zone_no, u16 log_off, u16 phy_off)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
zone->l2p_table[log_off] = phy_off;
}
static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int retval;
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
if (zone->l2p_table[log_off] == 0xFFFF) {
u32 phy_blk = 0;
int i;
@@ -1023,7 +1044,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
}
retval = xd_init_page(chip, phy_blk, log_off,
- 0, xd_card->page_off + 1);
+ 0, xd_card->page_off + 1);
if (retval == STATUS_SUCCESS)
break;
}
@@ -1041,7 +1062,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
int reset_xd_card(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
memset(xd_card, 0, sizeof(struct xd_info));
@@ -1077,7 +1098,7 @@ int reset_xd_card(struct rtsx_chip *chip)
static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
u32 page_addr;
u8 reg = 0;
@@ -1107,12 +1128,12 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
- xd_card->page_off + 1);
+ xd_card->page_off + 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
@@ -1132,7 +1153,7 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
u16 logoff, u8 start_page, u8 end_page)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
u32 page_addr;
u8 reg = 0;
@@ -1153,7 +1174,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
- 0xFF, (u8)(logoff >> 8));
+ 0xFF, (u8)(logoff >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
page_addr = (phy_blk << xd_card->block_shift) + start_page;
@@ -1161,15 +1182,15 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
- XD_BA_TRANSFORM, XD_BA_TRANSFORM);
+ XD_BA_TRANSFORM, XD_BA_TRANSFORM);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
- 0xFF, (end_page - start_page));
+ 0xFF, (end_page - start_page));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+ 0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
@@ -1191,7 +1212,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
u8 start_page, u8 end_page)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 old_page, new_page;
u8 i, reg = 0;
int retval;
@@ -1235,11 +1256,11 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
- XD_AUTO_CHK_DATA_STATUS, 0);
+ XD_AUTO_CHK_DATA_STATUS, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_PAGES);
+ XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
@@ -1250,22 +1271,24 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
wait_timeout(100);
if (detect_card_cd(chip,
- XD_CARD) != STATUS_SUCCESS) {
+ XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
rtsx_trace(chip);
return STATUS_FAIL;
}
if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
- (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
- || ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+ (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
+ ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
(XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
rtsx_write_register(chip,
- XD_PAGE_STATUS, 0xFF,
- XD_BPG);
+ XD_PAGE_STATUS,
+ 0xFF,
+ XD_BPG);
rtsx_write_register(chip,
- XD_BLOCK_STATUS, 0xFF,
- XD_GBLK);
+ XD_BLOCK_STATUS,
+ 0xFF,
+ XD_GBLK);
XD_SET_BAD_OLDBLK(xd_card);
dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n",
old_blk);
@@ -1287,7 +1310,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 300);
if (retval < 0) {
@@ -1320,9 +1343,9 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_RESET);
+ 0xFF, XD_TRANSFER_START | XD_RESET);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
@@ -1342,7 +1365,7 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 page_addr;
u8 reg = 0, *ptr;
int i, retval;
@@ -1360,9 +1383,9 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_ERASE);
+ XD_TRANSFER_START | XD_ERASE);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
retval = rtsx_send_cmd(chip, XD_CARD, 250);
@@ -1403,7 +1426,7 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
struct zone_entry *zone;
int retval;
u32 start, end, i;
@@ -1413,7 +1436,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no);
- if (xd_card->zone == NULL) {
+ if (!xd_card->zone) {
retval = xd_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS)
return retval;
@@ -1425,22 +1448,22 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
return STATUS_SUCCESS;
}
- zone = &(xd_card->zone[zone_no]);
+ zone = &xd_card->zone[zone_no];
- if (zone->l2p_table == NULL) {
+ if (!zone->l2p_table) {
zone->l2p_table = vmalloc(2000);
if (!zone->l2p_table) {
rtsx_trace(chip);
- goto Build_Fail;
+ goto build_fail;
}
}
memset((u8 *)(zone->l2p_table), 0xff, 2000);
- if (zone->free_table == NULL) {
+ if (!zone->free_table) {
zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
if (!zone->free_table) {
rtsx_trace(chip);
- goto Build_Fail;
+ goto build_fail;
}
}
memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
@@ -1466,7 +1489,8 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n",
start, end);
- zone->set_index = zone->get_index = 0;
+ zone->set_index = 0;
+ zone->get_index = 0;
zone->unused_blk_cnt = 0;
for (i = start; i < end; i++) {
@@ -1490,7 +1514,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
cur_fst_page_logoff = xd_load_log_block_addr(redunt);
if ((cur_fst_page_logoff == 0xFFFF) ||
- (cur_fst_page_logoff > max_logoff)) {
+ (cur_fst_page_logoff > max_logoff)) {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
@@ -1498,7 +1522,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
}
if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
- (redunt[PAGE_STATUS] != XD_GPG))
+ (redunt[PAGE_STATUS] != XD_GPG))
XD_SET_MBR_FAIL(xd_card);
if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
@@ -1524,7 +1548,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
for (m = 0; m < 3; m++) {
retval = xd_read_redundant(chip, page_addr,
- redunt, 11);
+ redunt, 11);
if (retval == STATUS_SUCCESS)
break;
}
@@ -1581,7 +1605,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
return STATUS_SUCCESS;
-Build_Fail:
+build_fail:
vfree(zone->l2p_table);
zone->l2p_table = NULL;
vfree(zone->free_table);
@@ -1598,9 +1622,9 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_SET_CMD);
+ XD_TRANSFER_START | XD_SET_CMD);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
retval = rtsx_send_cmd(chip, XD_CARD, 200);
if (retval < 0) {
@@ -1612,18 +1636,18 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
}
static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
- u32 log_blk, u8 start_page, u8 end_page,
- u8 *buf, unsigned int *index,
- unsigned int *offset)
+ u32 log_blk, u8 start_page, u8 end_page,
+ u8 *buf, unsigned int *index,
+ unsigned int *offset)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 page_addr, new_blk;
u16 log_off;
u8 reg_val, page_cnt;
int zone_no, retval, i;
if (start_page > end_page)
- goto Status_Fail;
+ goto status_fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
@@ -1639,7 +1663,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
}
}
@@ -1653,37 +1677,38 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
- XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+ XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
trans_dma_enable(chip->srb->sc_data_direction, chip,
- page_cnt * 512, DMA_512);
+ page_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
- XD_TRANSFER_START | XD_READ_PAGES);
+ XD_TRANSFER_START | XD_READ_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY);
+ XD_TRANSFER_END | XD_PPB_EMPTY,
+ XD_TRANSFER_END | XD_PPB_EMPTY);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
- scsi_sg_count(chip->srb),
- index, offset, DMA_FROM_DEVICE,
- chip->xd_timeout);
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_FROM_DEVICE,
+ chip->xd_timeout);
if (retval < 0) {
rtsx_clear_xd_error(chip);
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
- goto Status_Fail;
+ goto status_fail;
} else {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
}
return STATUS_SUCCESS;
-Fail:
+fail:
retval = rtsx_read_register(chip, XD_PAGE_STATUS, &reg_val);
if (retval) {
rtsx_trace(chip);
@@ -1699,15 +1724,15 @@ Fail:
return retval;
}
- if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
- == (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
- || ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
- == (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+ if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
+ (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
+ ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+ (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
wait_timeout(100);
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
- goto Status_Fail;
+ goto status_fail;
}
xd_set_err_code(chip, XD_ECC_ERROR);
@@ -1715,11 +1740,11 @@ Fail:
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == NO_NEW_BLK) {
XD_CLR_BAD_OLDBLK(xd_card);
- goto Status_Fail;
+ goto status_fail;
}
retval = xd_copy_page(chip, phy_blk, new_blk, 0,
- xd_card->page_off + 1);
+ xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
@@ -1729,7 +1754,7 @@ Fail:
XD_CLR_BAD_NEWBLK(xd_card);
}
XD_CLR_BAD_OLDBLK(xd_card);
- goto Status_Fail;
+ goto status_fail;
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
xd_erase_block(chip, phy_blk);
@@ -1737,15 +1762,15 @@ Fail:
XD_CLR_BAD_OLDBLK(xd_card);
}
-Status_Fail:
+status_fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
static int xd_finish_write(struct rtsx_chip *chip,
- u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval, zone_no;
u16 log_off;
@@ -1762,7 +1787,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
if (old_blk == BLK_NOT_FOUND) {
retval = xd_init_page(chip, new_blk, log_off,
- page_off, xd_card->page_off + 1);
+ page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
@@ -1772,7 +1797,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
}
} else {
retval = xd_copy_page(chip, old_blk, new_blk,
- page_off, xd_card->page_off + 1);
+ page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
@@ -1804,7 +1829,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
}
static int xd_prepare_write(struct rtsx_chip *chip,
- u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+ u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
int retval;
@@ -1823,11 +1848,11 @@ static int xd_prepare_write(struct rtsx_chip *chip,
}
static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
- u32 new_blk, u32 log_blk, u8 start_page,
- u8 end_page, u8 *buf, unsigned int *index,
- unsigned int *offset)
+ u32 new_blk, u32 log_blk, u8 start_page,
+ u8 end_page, u8 *buf, unsigned int *index,
+ unsigned int *offset)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
u32 page_addr;
int zone_no, retval;
u16 log_off;
@@ -1837,7 +1862,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
__func__, old_blk, new_blk, log_blk);
if (start_page > end_page)
- goto Status_Fail;
+ goto status_fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
@@ -1847,12 +1872,12 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
retval = xd_send_cmd(chip, READ1_1);
if (retval != STATUS_SUCCESS)
- goto Status_Fail;
+ goto status_fail;
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
- 0xFF, (u8)(log_off >> 8));
+ 0xFF, (u8)(log_off >> 8));
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
@@ -1860,32 +1885,32 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
- XD_BA_TRANSFORM);
+ XD_BA_TRANSFORM);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
trans_dma_enable(chip->srb->sc_data_direction, chip,
- page_cnt * 512, DMA_512);
+ page_cnt * 512, DMA_512);
rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
- 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
+ 0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
- XD_TRANSFER_END, XD_TRANSFER_END);
+ XD_TRANSFER_END, XD_TRANSFER_END);
rtsx_send_cmd_no_wait(chip);
retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
- scsi_sg_count(chip->srb),
- index, offset, DMA_TO_DEVICE, chip->xd_timeout);
+ scsi_sg_count(chip->srb),
+ index, offset, DMA_TO_DEVICE, chip->xd_timeout);
if (retval < 0) {
rtsx_clear_xd_error(chip);
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
- goto Status_Fail;
+ goto status_fail;
} else {
rtsx_trace(chip);
- goto Fail;
+ goto fail;
}
}
@@ -1911,7 +1936,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
return STATUS_SUCCESS;
-Fail:
+fail:
retval = rtsx_read_register(chip, XD_DAT, &reg_val);
if (retval) {
rtsx_trace(chip);
@@ -1922,7 +1947,7 @@ Fail:
xd_mark_bad_block(chip, new_blk);
}
-Status_Fail:
+status_fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1930,8 +1955,8 @@ Status_Fail:
#ifdef XD_DELAY_WRITE
int xd_delay_write(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
- struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ struct xd_info *xd_card = &chip->xd_card;
+ struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
int retval;
if (delay_write->delay_write_flag) {
@@ -1944,9 +1969,10 @@ int xd_delay_write(struct rtsx_chip *chip)
delay_write->delay_write_flag = 0;
retval = xd_finish_write(chip,
- delay_write->old_phyblock,
- delay_write->new_phyblock,
- delay_write->logblock, delay_write->pageoff);
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->logblock,
+ delay_write->pageoff);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1958,12 +1984,12 @@ int xd_delay_write(struct rtsx_chip *chip)
#endif
int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt)
+ u32 start_sector, u16 sector_cnt)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
unsigned int lun = SCSI_LUN(srb);
#ifdef XD_DELAY_WRITE
- struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+ struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
#endif
int retval, zone_no;
unsigned int index = 0, offset = 0;
@@ -2012,17 +2038,18 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef XD_DELAY_WRITE
if (delay_write->delay_write_flag &&
- (delay_write->logblock == log_blk) &&
- (start_page > delay_write->pageoff)) {
+ (delay_write->logblock == log_blk) &&
+ (start_page > delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
if (delay_write->old_phyblock != BLK_NOT_FOUND) {
retval = xd_copy_page(chip,
- delay_write->old_phyblock,
- delay_write->new_phyblock,
- delay_write->pageoff, start_page);
+ delay_write->old_phyblock,
+ delay_write->new_phyblock,
+ delay_write->pageoff,
+ start_page);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2039,7 +2066,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2047,25 +2074,25 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
new_blk = xd_get_unused_block(chip, zone_no);
if ((old_blk == BLK_NOT_FOUND) ||
- (new_blk == BLK_NOT_FOUND)) {
+ (new_blk == BLK_NOT_FOUND)) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
retval = xd_prepare_write(chip, old_blk, new_blk,
- log_blk, start_page);
+ log_blk, start_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) !=
STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2078,12 +2105,12 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2092,7 +2119,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
if (old_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2116,22 +2143,22 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
page_cnt = end_page - start_page;
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
retval = xd_read_multiple_pages(chip, old_blk, log_blk,
- start_page, end_page, ptr,
- &index, &offset);
+ start_page, end_page,
+ ptr, &index, &offset);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = xd_write_multiple_pages(chip, old_blk,
- new_blk, log_blk,
- start_page, end_page, ptr,
- &index, &offset);
+ new_blk, log_blk,
+ start_page, end_page,
+ ptr, &index, &offset);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2153,7 +2180,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2163,10 +2190,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (old_blk == BLK_NOT_FOUND) {
if (srb->sc_data_direction == DMA_FROM_DEVICE)
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
else
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
@@ -2176,7 +2203,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_WRITE_ERR);
+ SENSE_TYPE_MEDIA_WRITE_ERR);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2186,7 +2213,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
- (end_page != (xd_card->page_off + 1))) {
+ (end_page != (xd_card->page_off + 1))) {
#ifdef XD_DELAY_WRITE
delay_write->delay_write_flag = 1;
delay_write->old_phyblock = old_blk;
@@ -2202,11 +2229,11 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
}
retval = xd_finish_write(chip, old_blk, new_blk,
- log_blk, end_page);
+ log_blk, end_page);
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
- SENSE_TYPE_MEDIA_NOT_PRESENT);
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2224,10 +2251,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
void xd_free_l2p_tbl(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int i = 0;
- if (xd_card->zone != NULL) {
+ if (xd_card->zone) {
for (i = 0; i < xd_card->zone_cnt; i++) {
vfree(xd_card->zone[i].l2p_table);
xd_card->zone[i].l2p_table = NULL;
@@ -2242,7 +2269,7 @@ void xd_free_l2p_tbl(struct rtsx_chip *chip)
void xd_cleanup_work(struct rtsx_chip *chip)
{
#ifdef XD_DELAY_WRITE
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
if (xd_card->delay_write.delay_write_flag) {
dev_dbg(rtsx_dev(chip), "xD: delay write\n");
@@ -2297,7 +2324,7 @@ int xd_power_off_card3v3(struct rtsx_chip *chip)
int release_xd_card(struct rtsx_chip *chip)
{
- struct xd_info *xd_card = &(chip->xd_card);
+ struct xd_info *xd_card = &chip->xd_card;
int retval;
chip->card_ready &= ~XD_CARD;
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
index 938138c50bb5..d5f10880efb7 100644
--- a/drivers/staging/rts5208/xd.h
+++ b/drivers/staging/rts5208/xd.h
@@ -179,7 +179,7 @@ int reset_xd_card(struct rtsx_chip *chip);
int xd_delay_write(struct rtsx_chip *chip);
#endif
int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
- u32 start_sector, u16 sector_cnt);
+ u32 start_sector, u16 sector_cnt);
void xd_free_l2p_tbl(struct rtsx_chip *chip);
void xd_cleanup_work(struct rtsx_chip *chip);
int xd_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index cab26e736111..c6526b6fbfb4 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -98,19 +98,16 @@ int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len,
switch (ctx->skein_size) {
case SKEIN_256:
ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len,
- tree_info,
- (const u8 *)key, key_len);
+ tree_info, key, key_len);
break;
case SKEIN_512:
ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len,
- tree_info,
- (const u8 *)key, key_len);
+ tree_info, key, key_len);
break;
case SKEIN_1024:
ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len,
- tree_info,
- (const u8 *)key, key_len);
+ tree_info, key, key_len);
break;
}
@@ -152,16 +149,13 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
switch (ctx->skein_size) {
case SKEIN_256:
- ret = skein_256_update(&ctx->m.s256, (const u8 *)msg,
- msg_byte_cnt);
+ ret = skein_256_update(&ctx->m.s256, msg, msg_byte_cnt);
break;
case SKEIN_512:
- ret = skein_512_update(&ctx->m.s512, (const u8 *)msg,
- msg_byte_cnt);
+ ret = skein_512_update(&ctx->m.s512, msg, msg_byte_cnt);
break;
case SKEIN_1024:
- ret = skein_1024_update(&ctx->m.s1024, (const u8 *)msg,
- msg_byte_cnt);
+ ret = skein_1024_update(&ctx->m.s1024, msg, msg_byte_cnt);
break;
}
return ret;
@@ -211,7 +205,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
/* partial byte bit mask */
mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
/* apply bit padding on final byte (in the buffer) */
- up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask);
+ up[length - 1] = (up[length - 1] & (0 - mask)) | mask;
return SKEIN_SUCCESS;
}
@@ -224,13 +218,13 @@ int skein_final(struct skein_ctx *ctx, u8 *hash)
switch (ctx->skein_size) {
case SKEIN_256:
- ret = skein_256_final(&ctx->m.s256, (u8 *)hash);
+ ret = skein_256_final(&ctx->m.s256, hash);
break;
case SKEIN_512:
- ret = skein_512_final(&ctx->m.s512, (u8 *)hash);
+ ret = skein_512_final(&ctx->m.s512, hash);
break;
case SKEIN_1024:
- ret = skein_1024_final(&ctx->m.s1024, (u8 *)hash);
+ ret = skein_1024_final(&ctx->m.s1024, hash);
break;
}
return ret;
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index a95563fad071..50640656c10d 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -64,7 +64,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k3 + t2;
b0 += b1 + k2;
b1 = rol64(b1, 14) ^ b0;
@@ -117,7 +116,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k0 + t1;
b0 += b1 + k4;
b1 = rol64(b1, 14) ^ b0;
@@ -170,7 +168,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k2 + t0;
b0 += b1 + k1;
b1 = rol64(b1, 14) ^ b0;
@@ -223,7 +220,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k4 + t2;
b0 += b1 + k3;
b1 = rol64(b1, 14) ^ b0;
@@ -276,7 +272,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k1 + t1;
b0 += b1 + k0;
b1 = rol64(b1, 14) ^ b0;
@@ -329,7 +324,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k3 + t0;
b0 += b1 + k2;
b1 = rol64(b1, 14) ^ b0;
@@ -382,7 +376,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k0 + t2;
b0 += b1 + k4;
b1 = rol64(b1, 14) ^ b0;
@@ -435,7 +428,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 += b1;
b1 = rol64(b1, 32) ^ b2;
-
b1 += k2 + t1;
b0 += b1 + k1;
b1 = rol64(b1, 14) ^ b0;
@@ -579,7 +571,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k3 + t2;
b3 -= k4 + 16;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -648,7 +639,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k1 + t0;
b3 -= k2 + 14;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -717,7 +707,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k4 + t1;
b3 -= k0 + 12;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -786,7 +775,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k2 + t2;
b3 -= k3 + 10;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -855,7 +843,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k0 + t0;
b3 -= k1 + 8;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -924,7 +911,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k3 + t1;
b3 -= k4 + 6;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -993,7 +979,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k1 + t2;
b3 -= k2 + 4;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
@@ -1062,7 +1047,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= b3 + k4 + t0;
b3 -= k0 + 2;
-
tmp = b3 ^ b0;
b3 = ror64(tmp, 32);
b0 -= b3;
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
deleted file mode 100644
index 5c2a15b42dfe..000000000000
--- a/drivers/staging/slicoss/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-config SLICOSS
- tristate "Alacritech Gigabit IS-NIC support"
- depends on PCI && X86 && NET
- default n
- help
- This driver supports Alacritech's IS-NIC gigabit ethernet cards.
-
- This includes the following devices:
- Mojave cards (single port PCI Gigabit) both copper and fiber
- Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
- Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
-
- To compile this driver as a module, choose M here: the module
- will be called slicoss.
diff --git a/drivers/staging/slicoss/Makefile b/drivers/staging/slicoss/Makefile
deleted file mode 100644
index 7bc9e9b9d3ab..000000000000
--- a/drivers/staging/slicoss/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SLICOSS) += slicoss.o
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
deleted file mode 100644
index 4fa50e73ce86..000000000000
--- a/drivers/staging/slicoss/README
+++ /dev/null
@@ -1,7 +0,0 @@
-This driver is supposed to support:
-
- Mojave cards (single port PCI Gigabit) both copper and fiber
- Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
- Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
-
-The driver was actually tested on Oasis and Kalahari cards.
diff --git a/drivers/staging/slicoss/TODO b/drivers/staging/slicoss/TODO
deleted file mode 100644
index 9019729b7be6..000000000000
--- a/drivers/staging/slicoss/TODO
+++ /dev/null
@@ -1,36 +0,0 @@
-TODO:
- - move firmware loading to request_firmware()
- - remove direct memory access of structures
- - any remaining sparse and checkpatch.pl warnings
-
- - use net_device_ops
- - use dev->stats rather than adapter->stats
- - don't cast netdev_priv it is already void
- - GET RID OF MACROS
- - work on all architectures
- - without CONFIG_X86_64 confusion
- - do 64 bit correctly
- - don't depend on order of union
- - get rid of ASSERT(), use BUG() instead but only where necessary
- looks like most aren't really useful
- - no new SIOCDEVPRIVATE ioctl allowed
- - don't use module_param for configuring interrupt mitigation
- use ethtool instead
- - reorder code to elminate use of forward declarations
- - don't keep private linked list of drivers.
- - use PCI_DEVICE()
- - do ethtool correctly using ethtool_ops
- - NAPI?
- - wasted overhead of extra stats
- - state variables for things that are
- easily available and shouldn't be kept in card structure, cardnum, ...
- slotnumber, events, ...
- - volatile == bad design => bad code
- - locking too fine grained, not designed just throw more locks
- at problem
-
-Please send patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
-<charrer@alacritech.com> as well as they are also able to test out any
-changes.
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
deleted file mode 100644
index 420546d43002..000000000000
--- a/drivers/staging/slicoss/slic.h
+++ /dev/null
@@ -1,573 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slic.h
- *
- * This is the base set of header definitions for the SLICOSS driver.
- */
-#ifndef __SLIC_DRIVER_H__
-#define __SLIC_DRIVER_H__
-
-/* firmware stuff */
-#define OASIS_UCODE_VERS_STRING "1.2"
-#define OASIS_UCODE_VERS_DATE "2006/03/27 15:10:37"
-#define OASIS_UCODE_HOSTIF_ID 3
-
-#define MOJAVE_UCODE_VERS_STRING "1.2"
-#define MOJAVE_UCODE_VERS_DATE "2006/03/27 15:12:22"
-#define MOJAVE_UCODE_HOSTIF_ID 3
-
-#define GB_RCVUCODE_VERS_STRING "1.2"
-#define GB_RCVUCODE_VERS_DATE "2006/03/27 15:12:15"
-static u32 OasisRcvUCodeLen = 512;
-static u32 GBRcvUCodeLen = 512;
-#define SECTION_SIZE 65536
-
-#define SLIC_RSPQ_PAGES_GB 10
-#define SLIC_RSPQ_BUFSINPAGE (PAGE_SIZE / SLIC_RSPBUF_SIZE)
-
-struct slic_rspqueue {
- u32 offset;
- u32 pageindex;
- u32 num_pages;
- struct slic_rspbuf *rspbuf;
- u32 *vaddr[SLIC_RSPQ_PAGES_GB];
- dma_addr_t paddr[SLIC_RSPQ_PAGES_GB];
-};
-
-#define SLIC_RCVQ_EXPANSION 1
-#define SLIC_RCVQ_ENTRIES (256 * SLIC_RCVQ_EXPANSION)
-#define SLIC_RCVQ_MINENTRIES (SLIC_RCVQ_ENTRIES / 2)
-#define SLIC_RCVQ_MAX_PROCESS_ISR ((SLIC_RCVQ_ENTRIES * 4))
-#define SLIC_RCVQ_RCVBUFSIZE 2048
-#define SLIC_RCVQ_FILLENTRIES (16 * SLIC_RCVQ_EXPANSION)
-#define SLIC_RCVQ_FILLTHRESH (SLIC_RCVQ_ENTRIES - SLIC_RCVQ_FILLENTRIES)
-
-struct slic_rcvqueue {
- struct sk_buff *head;
- struct sk_buff *tail;
- u32 count;
- u32 size;
- u32 errors;
-};
-
-struct slic_rcvbuf_info {
- u32 id;
- u32 starttime;
- u32 stoptime;
- u32 slicworld;
- u32 lasttime;
- u32 lastid;
-};
-
-/*
- * SLIC Handle structure. Used to restrict handle values to
- * 32 bits by using an index rather than an address.
- * Simplifies ucode in 64-bit systems
- */
-struct slic_handle_word {
- union {
- struct {
- ushort index;
- ushort bottombits; /* to denote num bufs to card */
- } parts;
- u32 whole;
- } handle;
-};
-
-struct slic_handle {
- struct slic_handle_word token; /* token passed between host and card*/
- ushort type;
- void *address; /* actual address of the object*/
- ushort offset;
- struct slic_handle *other_handle;
- struct slic_handle *next;
-};
-
-#define SLIC_HANDLE_FREE 0x0000
-#define SLIC_HANDLE_DATA 0x0001
-#define SLIC_HANDLE_CMD 0x0002
-#define SLIC_HANDLE_CONTEXT 0x0003
-#define SLIC_HANDLE_TEAM 0x0004
-
-#define handle_index handle.parts.index
-#define handle_bottom handle.parts.bottombits
-#define handle_token handle.whole
-
-#define SLIC_HOSTCMD_SIZE 512
-
-struct slic_hostcmd {
- struct slic_host64_cmd cmd64;
- u32 type;
- struct sk_buff *skb;
- u32 paddrl;
- u32 paddrh;
- u32 busy;
- u32 cmdsize;
- ushort numbufs;
- struct slic_handle *pslic_handle;/* handle associated with command */
- struct slic_hostcmd *next;
- struct slic_hostcmd *next_all;
-};
-
-#define SLIC_CMDQ_CMDSINPAGE (PAGE_SIZE / SLIC_HOSTCMD_SIZE)
-#define SLIC_CMD_DUMB 3
-#define SLIC_CMDQ_INITCMDS 256
-#define SLIC_CMDQ_MAXCMDS 256
-#define SLIC_CMDQ_MAXOUTSTAND SLIC_CMDQ_MAXCMDS
-#define SLIC_CMDQ_MAXPAGES (SLIC_CMDQ_MAXCMDS / SLIC_CMDQ_CMDSINPAGE)
-#define SLIC_CMDQ_INITPAGES (SLIC_CMDQ_INITCMDS / SLIC_CMDQ_CMDSINPAGE)
-
-struct slic_cmdqmem {
- int pagecnt;
- u32 *pages[SLIC_CMDQ_MAXPAGES];
- dma_addr_t dma_pages[SLIC_CMDQ_MAXPAGES];
-};
-
-struct slic_cmdqueue {
- struct slic_hostcmd *head;
- struct slic_hostcmd *tail;
- int count;
- spinlock_t lock;
-};
-
-#define SLIC_MAX_CARDS 32
-#define SLIC_MAX_PORTS 4 /* Max # of ports per card */
-
-struct mcast_address {
- unsigned char address[6];
- struct mcast_address *next;
-};
-
-#define CARD_DOWN 0x00000000
-#define CARD_UP 0x00000001
-#define CARD_FAIL 0x00000002
-#define CARD_DIAG 0x00000003
-#define CARD_SLEEP 0x00000004
-
-#define ADAPT_DOWN 0x00
-#define ADAPT_UP 0x01
-#define ADAPT_FAIL 0x02
-#define ADAPT_RESET 0x03
-#define ADAPT_SLEEP 0x04
-
-#define ADAPT_FLAGS_BOOTTIME 0x0001
-#define ADAPT_FLAGS_IS64BIT 0x0002
-#define ADAPT_FLAGS_PENDINGLINKDOWN 0x0004
-#define ADAPT_FLAGS_FIBERMEDIA 0x0008
-#define ADAPT_FLAGS_LOCKS_ALLOCED 0x0010
-#define ADAPT_FLAGS_INT_REGISTERED 0x0020
-#define ADAPT_FLAGS_LOAD_TIMER_SET 0x0040
-#define ADAPT_FLAGS_STATS_TIMER_SET 0x0080
-#define ADAPT_FLAGS_RESET_TIMER_SET 0x0100
-
-#define LINK_DOWN 0x00
-#define LINK_CONFIG 0x01
-#define LINK_UP 0x02
-
-#define LINK_10MB 0x00
-#define LINK_100MB 0x01
-#define LINK_AUTOSPEED 0x02
-#define LINK_1000MB 0x03
-#define LINK_10000MB 0x04
-
-#define LINK_HALFD 0x00
-#define LINK_FULLD 0x01
-#define LINK_AUTOD 0x02
-
-#define MAC_DIRECTED 0x00000001
-#define MAC_BCAST 0x00000002
-#define MAC_MCAST 0x00000004
-#define MAC_PROMISC 0x00000008
-#define MAC_LOOPBACK 0x00000010
-#define MAC_ALLMCAST 0x00000020
-
-#define SLIC_DUPLEX(x) ((x == LINK_FULLD) ? "FDX" : "HDX")
-#define SLIC_SPEED(x) ((x == LINK_100MB) ? "100Mb" : ((x == LINK_1000MB) ?\
- "1000Mb" : " 10Mb"))
-#define SLIC_LINKSTATE(x) ((x == LINK_DOWN) ? "Down" : "Up ")
-#define SLIC_ADAPTER_STATE(x) ((x == ADAPT_UP) ? "UP" : "Down")
-#define SLIC_CARD_STATE(x) ((x == CARD_UP) ? "UP" : "Down")
-
-struct slic_iface_stats {
- /*
- * Stats
- */
- u64 xmt_bytes;
- u64 xmt_ucast;
- u64 xmt_mcast;
- u64 xmt_bcast;
- u64 xmt_errors;
- u64 xmt_discards;
- u64 xmit_collisions;
- u64 xmit_excess_xmit_collisions;
- u64 rcv_bytes;
- u64 rcv_ucast;
- u64 rcv_mcast;
- u64 rcv_bcast;
- u64 rcv_errors;
- u64 rcv_discards;
-};
-
-struct sliccp_stats {
- u64 xmit_tcp_segs;
- u64 xmit_tcp_bytes;
- u64 rcv_tcp_segs;
- u64 rcv_tcp_bytes;
-};
-
-struct slicnet_stats {
- struct sliccp_stats tcp;
- struct slic_iface_stats iface;
-};
-
-#define SLIC_LOADTIMER_PERIOD 1
-#define SLIC_INTAGG_DEFAULT 200
-#define SLIC_LOAD_0 0
-#define SLIC_INTAGG_0 0
-#define SLIC_LOAD_1 8000
-#define SLIC_LOAD_2 10000
-#define SLIC_LOAD_3 12000
-#define SLIC_LOAD_4 14000
-#define SLIC_LOAD_5 16000
-#define SLIC_INTAGG_1 50
-#define SLIC_INTAGG_2 100
-#define SLIC_INTAGG_3 150
-#define SLIC_INTAGG_4 200
-#define SLIC_INTAGG_5 250
-#define SLIC_LOAD_1GB 3000
-#define SLIC_LOAD_2GB 6000
-#define SLIC_LOAD_3GB 12000
-#define SLIC_LOAD_4GB 24000
-#define SLIC_LOAD_5GB 48000
-#define SLIC_INTAGG_1GB 50
-#define SLIC_INTAGG_2GB 75
-#define SLIC_INTAGG_3GB 100
-#define SLIC_INTAGG_4GB 100
-#define SLIC_INTAGG_5GB 100
-
-struct ether_header {
- unsigned char ether_dhost[6];
- unsigned char ether_shost[6];
- ushort ether_type;
-};
-
-struct sliccard {
- uint busnumber;
- uint slotnumber;
- uint state;
- uint cardnum;
- uint card_size;
- uint adapters_activated;
- uint adapters_allocated;
- uint adapters_sleeping;
- uint gennumber;
- u32 events;
- u32 loadlevel_current;
- u32 load;
- uint reset_in_progress;
- u32 pingstatus;
- u32 bad_pingstatus;
- struct timer_list loadtimer;
- u32 loadtimerset;
- uint config_set;
- struct slic_config config;
- struct adapter *master;
- struct adapter *adapter[SLIC_MAX_PORTS];
- struct sliccard *next;
- u32 error_interrupts;
- u32 error_rmiss_interrupts;
- u32 rcv_interrupts;
- u32 xmit_interrupts;
- u32 num_isrs;
- u32 false_interrupts;
- u32 max_isr_rcvs;
- u32 max_isr_xmits;
- u32 rcv_interrupt_yields;
- u32 tx_packets;
- u32 debug_ix;
- ushort reg_type[32];
- ushort reg_offset[32];
- u32 reg_value[32];
- u32 reg_valueh[32];
-};
-
-#define NUM_CFG_SPACES 2
-#define NUM_CFG_REGS 64
-#define NUM_CFG_REG_ULONGS (NUM_CFG_REGS / sizeof(u32))
-
-struct physcard {
- struct adapter *adapter[SLIC_MAX_PORTS];
- struct physcard *next;
- uint adapters_allocd;
-
-/*
- * the following is not currently needed
- * u32 bridge_busnum;
- * u32 bridge_cfg[NUM_CFG_SPACES][NUM_CFG_REG_ULONGS];
- */
-};
-
-struct base_driver {
- spinlock_t driver_lock;
- u32 num_slic_cards;
- u32 num_slic_ports;
- u32 num_slic_ports_active;
- u32 dynamic_intagg;
- struct sliccard *slic_card;
- struct physcard *phys_card;
- uint cardnuminuse[SLIC_MAX_CARDS];
-};
-
-struct slic_stats {
- /* xmit stats */
- u64 xmit_tcp_bytes;
- u64 xmit_tcp_segs;
- u64 xmit_bytes;
- u64 xmit_collisions;
- u64 xmit_unicasts;
- u64 xmit_other_error;
- u64 xmit_excess_collisions;
- /* rcv stats */
- u64 rcv_tcp_bytes;
- u64 rcv_tcp_segs;
- u64 rcv_bytes;
- u64 rcv_unicasts;
- u64 rcv_other_error;
- u64 rcv_drops;
-};
-
-struct slic_shmem_data {
- u32 isr;
- u32 lnkstatus;
- struct slic_stats stats;
-};
-
-struct slic_shmemory {
- dma_addr_t isr_phaddr;
- dma_addr_t lnkstatus_phaddr;
- dma_addr_t stats_phaddr;
- struct slic_shmem_data __iomem *shmem_data;
-};
-
-struct slic_upr {
- uint adapter;
- u32 upr_request;
- u32 upr_data;
- u32 upr_data_h;
- u32 upr_buffer;
- u32 upr_buffer_h;
- struct slic_upr *next;
-};
-
-struct slic_ifevents {
- uint oflow802;
- uint uflow802;
- uint Tprtoflow;
- uint rcvearly;
- uint Bufov;
- uint Carre;
- uint Longe;
- uint Invp;
- uint Crc;
- uint Drbl;
- uint Code;
- uint IpHlen;
- uint IpLen;
- uint IpCsum;
- uint TpCsum;
- uint TpHlen;
-};
-
-struct adapter {
- void *ifp;
- struct sliccard *card;
- uint port;
- struct physcard *physcard;
- uint physport;
- uint cardindex;
- uint card_size;
- uint chipid;
- struct net_device *netdev;
- spinlock_t adapter_lock;
- spinlock_t reset_lock;
- struct pci_dev *pcidev;
- uint busnumber;
- uint slotnumber;
- uint functionnumber;
- ushort vendid;
- ushort devid;
- ushort subsysid;
- u32 irq;
- u32 drambase;
- u32 dramlength;
- uint queues_initialized;
- uint allocated;
- uint activated;
- u32 intrregistered;
- uint isp_initialized;
- uint gennumber;
- struct slic_shmemory shmem;
- dma_addr_t phys_shmem;
- void __iomem *regs;
- unsigned char state;
- unsigned char linkstate;
- unsigned char linkspeed;
- unsigned char linkduplex;
- uint flags;
- unsigned char macaddr[6];
- unsigned char currmacaddr[6];
- u32 macopts;
- ushort devflags_prev;
- u64 mcastmask;
- struct mcast_address *mcastaddrs;
- struct slic_upr *upr_list;
- uint upr_busy;
- struct timer_list pingtimer;
- u32 pingtimerset;
- struct timer_list loadtimer;
- u32 loadtimerset;
- spinlock_t upr_lock;
- spinlock_t bit64reglock;
- struct slic_rspqueue rspqueue;
- struct slic_rcvqueue rcvqueue;
- struct slic_cmdqueue cmdq_free;
- struct slic_cmdqueue cmdq_done;
- struct slic_cmdqueue cmdq_all;
- struct slic_cmdqmem cmdqmem;
- /*
- * SLIC Handles
- */
- /* Object handles*/
- struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS + 1];
- /* Free object handles*/
- struct slic_handle *pfree_slic_handles;
- /* Object handle list lock*/
- spinlock_t handle_lock;
- ushort slic_handle_ix;
-
- u32 xmitq_full;
- u32 all_reg_writes;
- u32 icr_reg_writes;
- u32 isr_reg_writes;
- u32 error_interrupts;
- u32 error_rmiss_interrupts;
- u32 rx_errors;
- u32 rcv_drops;
- u32 rcv_interrupts;
- u32 xmit_interrupts;
- u32 linkevent_interrupts;
- u32 upr_interrupts;
- u32 num_isrs;
- u32 false_interrupts;
- u32 tx_packets;
- u32 xmit_completes;
- u32 tx_drops;
- u32 rcv_broadcasts;
- u32 rcv_multicasts;
- u32 rcv_unicasts;
- u32 max_isr_rcvs;
- u32 max_isr_xmits;
- u32 rcv_interrupt_yields;
- u32 intagg_period;
- u32 intagg_delay;
- u32 dynamic_intagg;
- struct inicpm_state *inicpm_info;
- void *pinicpm_info;
- struct slic_ifevents if_events;
- struct slic_stats inicstats_prev;
- struct slicnet_stats slic_stats;
-};
-
-static inline u32 slic_read32(struct adapter *adapter, unsigned int reg)
-{
- return ioread32(adapter->regs + reg);
-}
-
-static inline void slic_write32(struct adapter *adapter, unsigned int reg,
- u32 val)
-{
- iowrite32(val, adapter->regs + reg);
-}
-
-static inline void slic_write64(struct adapter *adapter, unsigned int reg,
- u32 val, u32 hiaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->bit64reglock, flags);
- slic_write32(adapter, SLIC_REG_ADDR_UPPER, hiaddr);
- slic_write32(adapter, reg, val);
- mmiowb();
- spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-}
-
-static inline void slic_flush_write(struct adapter *adapter)
-{
- ioread32(adapter->regs + SLIC_REG_HOSTID);
-}
-
-#define UPDATE_STATS(largestat, newstat, oldstat) \
-{ \
- if ((newstat) < (oldstat)) \
- (largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \
- else \
- (largestat) += ((newstat) - (oldstat)); \
-}
-
-#define UPDATE_STATS_GB(largestat, newstat, oldstat) \
-{ \
- (largestat) += ((newstat) - (oldstat)); \
-}
-
-#if BITS_PER_LONG == 64
-#define SLIC_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & \
- 0x00000000FFFFFFFF)
-#define SLIC_GET_ADDR_HIGH(_addr) (u32)(((u64)(_addr) >> 32) & \
- 0x00000000FFFFFFFF)
-#elif BITS_PER_LONG == 32
-#define SLIC_GET_ADDR_LOW(_addr) (u32)(_addr)
-#define SLIC_GET_ADDR_HIGH(_addr) (u32)0
-#else
-#error BITS_PER_LONG must be 32 or 64
-#endif
-
-#define FLUSH true
-#define DONT_FLUSH false
-
-#define SIOCSLICSETINTAGG (SIOCDEVPRIVATE + 10)
-
-#endif /* __SLIC_DRIVER_H__ */
diff --git a/drivers/staging/slicoss/slichw.h b/drivers/staging/slicoss/slichw.h
deleted file mode 100644
index 49cb91aa02bb..000000000000
--- a/drivers/staging/slicoss/slichw.h
+++ /dev/null
@@ -1,652 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2000-2002 Alacritech, Inc. All rights reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slichw.h
- *
- * This header file contains definitions that are common to our hardware.
- */
-#ifndef __SLICHW_H__
-#define __SLICHW_H__
-
-#define PCI_VENDOR_ID_ALACRITECH 0x139A
-#define SLIC_1GB_DEVICE_ID 0x0005
-#define SLIC_2GB_DEVICE_ID 0x0007 /* Oasis Device ID */
-
-#define SLIC_1GB_CICADA_SUBSYS_ID 0x0008
-
-#define SLIC_NBR_MACS 4
-
-#define SLIC_RCVBUF_SIZE 2048
-#define SLIC_RCVBUF_HEADSIZE 34
-#define SLIC_RCVBUF_TAILSIZE 0
-#define SLIC_RCVBUF_DATASIZE (SLIC_RCVBUF_SIZE - \
- (SLIC_RCVBUF_HEADSIZE + \
- SLIC_RCVBUF_TAILSIZE))
-
-#define VGBSTAT_XPERR 0x40000000
-#define VGBSTAT_XERRSHFT 25
-#define VGBSTAT_XCSERR 0x23
-#define VGBSTAT_XUFLOW 0x22
-#define VGBSTAT_XHLEN 0x20
-#define VGBSTAT_NETERR 0x01000000
-#define VGBSTAT_NERRSHFT 16
-#define VGBSTAT_NERRMSK 0x1ff
-#define VGBSTAT_NCSERR 0x103
-#define VGBSTAT_NUFLOW 0x102
-#define VGBSTAT_NHLEN 0x100
-#define VGBSTAT_LNKERR 0x00000080
-#define VGBSTAT_LERRMSK 0xff
-#define VGBSTAT_LDEARLY 0x86
-#define VGBSTAT_LBOFLO 0x85
-#define VGBSTAT_LCODERR 0x84
-#define VGBSTAT_LDBLNBL 0x83
-#define VGBSTAT_LCRCERR 0x82
-#define VGBSTAT_LOFLO 0x81
-#define VGBSTAT_LUFLO 0x80
-#define IRHDDR_FLEN_MSK 0x0000ffff
-#define IRHDDR_SVALID 0x80000000
-#define IRHDDR_ERR 0x10000000
-#define VRHSTAT_802OE 0x80000000
-#define VRHSTAT_TPOFLO 0x10000000
-#define VRHSTATB_802UE 0x80000000
-#define VRHSTATB_RCVE 0x40000000
-#define VRHSTATB_BUFF 0x20000000
-#define VRHSTATB_CARRE 0x08000000
-#define VRHSTATB_LONGE 0x02000000
-#define VRHSTATB_PREA 0x01000000
-#define VRHSTATB_CRC 0x00800000
-#define VRHSTATB_DRBL 0x00400000
-#define VRHSTATB_CODE 0x00200000
-#define VRHSTATB_TPCSUM 0x00100000
-#define VRHSTATB_TPHLEN 0x00080000
-#define VRHSTATB_IPCSUM 0x00040000
-#define VRHSTATB_IPLERR 0x00020000
-#define VRHSTATB_IPHERR 0x00010000
-#define SLIC_MAX64_BCNT 23
-#define SLIC_MAX32_BCNT 26
-#define IHCMD_XMT_REQ 0x01
-#define IHFLG_IFSHFT 2
-#define SLIC_RSPBUF_SIZE 32
-
-#define SLIC_RESET_MAGIC 0xDEAD
-#define ICR_INT_OFF 0
-#define ICR_INT_ON 1
-#define ICR_INT_MASK 2
-
-#define ISR_ERR 0x80000000
-#define ISR_RCV 0x40000000
-#define ISR_CMD 0x20000000
-#define ISR_IO 0x60000000
-#define ISR_UPC 0x10000000
-#define ISR_LEVENT 0x08000000
-#define ISR_RMISS 0x02000000
-#define ISR_UPCERR 0x01000000
-#define ISR_XDROP 0x00800000
-#define ISR_UPCBSY 0x00020000
-#define ISR_EVMSK 0xffff0000
-#define ISR_PINGMASK 0x00700000
-#define ISR_PINGDSMASK 0x00710000
-#define ISR_UPCMASK 0x11000000
-#define SLIC_WCS_START 0x80000000
-#define SLIC_WCS_COMPARE 0x40000000
-#define SLIC_RCVWCS_BEGIN 0x40000000
-#define SLIC_RCVWCS_FINISH 0x80000000
-#define SLIC_PM_MAXPATTERNS 6
-#define SLIC_PM_PATTERNSIZE 128
-#define SLIC_PMCAPS_WAKEONLAN 0x00000001
-#define MIICR_REG_PCR 0x00000000
-#define MIICR_REG_4 0x00040000
-#define MIICR_REG_9 0x00090000
-#define MIICR_REG_16 0x00100000
-#define PCR_RESET 0x8000
-#define PCR_POWERDOWN 0x0800
-#define PCR_SPEED_100 0x2000
-#define PCR_SPEED_1000 0x0040
-#define PCR_AUTONEG 0x1000
-#define PCR_AUTONEG_RST 0x0200
-#define PCR_DUPLEX_FULL 0x0100
-#define PSR_LINKUP 0x0004
-
-#define PAR_ADV100FD 0x0100
-#define PAR_ADV100HD 0x0080
-#define PAR_ADV10FD 0x0040
-#define PAR_ADV10HD 0x0020
-#define PAR_ASYMPAUSE 0x0C00
-#define PAR_802_3 0x0001
-
-#define PAR_ADV1000XFD 0x0020
-#define PAR_ADV1000XHD 0x0040
-#define PAR_ASYMPAUSE_FIBER 0x0180
-
-#define PGC_ADV1000FD 0x0200
-#define PGC_ADV1000HD 0x0100
-#define SEEQ_LINKFAIL 0x4000
-#define SEEQ_SPEED 0x0080
-#define SEEQ_DUPLEX 0x0040
-#define TDK_DUPLEX 0x0800
-#define TDK_SPEED 0x0400
-#define MRV_REG16_XOVERON 0x0068
-#define MRV_REG16_XOVEROFF 0x0008
-#define MRV_SPEED_1000 0x8000
-#define MRV_SPEED_100 0x4000
-#define MRV_SPEED_10 0x0000
-#define MRV_FULLDUPLEX 0x2000
-#define MRV_LINKUP 0x0400
-
-#define GIG_LINKUP 0x0001
-#define GIG_FULLDUPLEX 0x0002
-#define GIG_SPEED_MASK 0x000C
-#define GIG_SPEED_1000 0x0008
-#define GIG_SPEED_100 0x0004
-#define GIG_SPEED_10 0x0000
-
-#define MCR_RESET 0x80000000
-#define MCR_CRCEN 0x40000000
-#define MCR_FULLD 0x10000000
-#define MCR_PAD 0x02000000
-#define MCR_RETRYLATE 0x01000000
-#define MCR_BOL_SHIFT 21
-#define MCR_IPG1_SHIFT 14
-#define MCR_IPG2_SHIFT 7
-#define MCR_IPG3_SHIFT 0
-#define GMCR_RESET 0x80000000
-#define GMCR_GBIT 0x20000000
-#define GMCR_FULLD 0x10000000
-#define GMCR_GAPBB_SHIFT 14
-#define GMCR_GAPR1_SHIFT 7
-#define GMCR_GAPR2_SHIFT 0
-#define GMCR_GAPBB_1000 0x60
-#define GMCR_GAPR1_1000 0x2C
-#define GMCR_GAPR2_1000 0x40
-#define GMCR_GAPBB_100 0x70
-#define GMCR_GAPR1_100 0x2C
-#define GMCR_GAPR2_100 0x40
-#define XCR_RESET 0x80000000
-#define XCR_XMTEN 0x40000000
-#define XCR_PAUSEEN 0x20000000
-#define XCR_LOADRNG 0x10000000
-#define RCR_RESET 0x80000000
-#define RCR_RCVEN 0x40000000
-#define RCR_RCVALL 0x20000000
-#define RCR_RCVBAD 0x10000000
-#define RCR_CTLEN 0x08000000
-#define RCR_ADDRAEN 0x02000000
-#define GXCR_RESET 0x80000000
-#define GXCR_XMTEN 0x40000000
-#define GXCR_PAUSEEN 0x20000000
-#define GRCR_RESET 0x80000000
-#define GRCR_RCVEN 0x40000000
-#define GRCR_RCVALL 0x20000000
-#define GRCR_RCVBAD 0x10000000
-#define GRCR_CTLEN 0x08000000
-#define GRCR_ADDRAEN 0x02000000
-#define GRCR_HASHSIZE_SHIFT 17
-#define GRCR_HASHSIZE 14
-
-#define SLIC_EEPROM_ID 0xA5A5
-#define SLIC_SRAM_SIZE2GB (64 * 1024)
-#define SLIC_SRAM_SIZE1GB (32 * 1024)
-#define SLIC_HOSTID_DEFAULT 0xFFFF /* uninitialized hostid */
-#define SLIC_NBR_MACS 4
-
-struct slic_rcvbuf {
- u8 pad1[6];
- u16 pad2;
- u32 pad3;
- u32 pad4;
- u32 buffer;
- u32 length;
- u32 status;
- u32 pad5;
- u16 pad6;
- u8 data[SLIC_RCVBUF_DATASIZE];
-};
-
-struct slic_hddr_wds {
- union {
- struct {
- u32 frame_status;
- u32 frame_status_b;
- u32 time_stamp;
- u32 checksum;
- } hdrs_14port;
- struct {
- u32 frame_status;
- u16 ByteCnt;
- u16 TpChksum;
- u16 CtxHash;
- u16 MacHash;
- u32 BufLnk;
- } hdrs_gbit;
- } u0;
-};
-
-#define frame_status14 u0.hdrs_14port.frame_status
-#define frame_status_b14 u0.hdrs_14port.frame_status_b
-#define frame_statusGB u0.hdrs_gbit.frame_status
-
-struct slic_host64sg {
- u32 paddrl;
- u32 paddrh;
- u32 length;
-};
-
-struct slic_host64_cmd {
- u32 hosthandle;
- u32 RSVD;
- u8 command;
- u8 flags;
- union {
- u16 rsv1;
- u16 rsv2;
- } u0;
- union {
- struct {
- u32 totlen;
- struct slic_host64sg bufs[SLIC_MAX64_BCNT];
- } slic_buffers;
- } u;
-};
-
-struct slic_rspbuf {
- u32 hosthandle;
- u32 pad0;
- u32 pad1;
- u32 status;
- u32 pad2[4];
-};
-
-/* Reset Register */
-#define SLIC_REG_RESET 0x0000
-/* Interrupt Control Register */
-#define SLIC_REG_ICR 0x0008
-/* Interrupt status pointer */
-#define SLIC_REG_ISP 0x0010
-/* Interrupt status */
-#define SLIC_REG_ISR 0x0018
-/*
- * Header buffer address reg
- * 31-8 - phy addr of set of contiguous hdr buffers
- * 7-0 - number of buffers passed
- * Buffers are 256 bytes long on 256-byte boundaries.
- */
-#define SLIC_REG_HBAR 0x0020
-/*
- * Data buffer handle & address reg
- * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page.
- */
-#define SLIC_REG_DBAR 0x0028
-/*
- * Xmt Cmd buf addr regs.
- * 1 per XMT interface
- * 31-5 - phy addr of host command buffer
- * 4-0 - length of cmd in multiples of 32 bytes
- * Buffers are 32 bytes up to 512 bytes long
- */
-#define SLIC_REG_CBAR 0x0030
-/* Write control store */
-#define SLIC_REG_WCS 0x0034
-/*
- * Response buffer address reg.
- * 31-8 - phy addr of set of contiguous response buffers
- * 7-0 - number of buffers passed
- * Buffers are 32 bytes long on 32-byte boundaries.
- */
-#define SLIC_REG_RBAR 0x0038
-/* Read statistics (UPR) */
-#define SLIC_REG_RSTAT 0x0040
-/* Read link status */
-#define SLIC_REG_LSTAT 0x0048
-/* Write Mac Config */
-#define SLIC_REG_WMCFG 0x0050
-/* Write phy register */
-#define SLIC_REG_WPHY 0x0058
-/* Rcv Cmd buf addr reg */
-#define SLIC_REG_RCBAR 0x0060
-/* Read SLIC Config*/
-#define SLIC_REG_RCONFIG 0x0068
-/* Interrupt aggregation time */
-#define SLIC_REG_INTAGG 0x0070
-/* Write XMIT config reg */
-#define SLIC_REG_WXCFG 0x0078
-/* Write RCV config reg */
-#define SLIC_REG_WRCFG 0x0080
-/* Write rcv addr a low */
-#define SLIC_REG_WRADDRAL 0x0088
-/* Write rcv addr a high */
-#define SLIC_REG_WRADDRAH 0x0090
-/* Write rcv addr b low */
-#define SLIC_REG_WRADDRBL 0x0098
-/* Write rcv addr b high */
-#define SLIC_REG_WRADDRBH 0x00a0
-/* Low bits of mcast mask */
-#define SLIC_REG_MCASTLOW 0x00a8
-/* High bits of mcast mask */
-#define SLIC_REG_MCASTHIGH 0x00b0
-/* Ping the card */
-#define SLIC_REG_PING 0x00b8
-/* Dump command */
-#define SLIC_REG_DUMP_CMD 0x00c0
-/* Dump data pointer */
-#define SLIC_REG_DUMP_DATA 0x00c8
-/* Read card's pci_status register */
-#define SLIC_REG_PCISTATUS 0x00d0
-/* Write hostid field */
-#define SLIC_REG_WRHOSTID 0x00d8
-/* Put card in a low power state */
-#define SLIC_REG_LOW_POWER 0x00e0
-/* Force slic into quiescent state before soft reset */
-#define SLIC_REG_QUIESCE 0x00e8
-/* Reset interface queues */
-#define SLIC_REG_RESET_IFACE 0x00f0
-/*
- * Register is only written when it has changed.
- * Bits 63-32 for host i/f addrs.
- */
-#define SLIC_REG_ADDR_UPPER 0x00f8
-/* 64 bit Header buffer address reg */
-#define SLIC_REG_HBAR64 0x0100
-/* 64 bit Data buffer handle & address reg */
-#define SLIC_REG_DBAR64 0x0108
-/* 64 bit Xmt Cmd buf addr regs. */
-#define SLIC_REG_CBAR64 0x0110
-/* 64 bit Response buffer address reg.*/
-#define SLIC_REG_RBAR64 0x0118
-/* 64 bit Rcv Cmd buf addr reg*/
-#define SLIC_REG_RCBAR64 0x0120
-/* Read statistics (64 bit UPR) */
-#define SLIC_REG_RSTAT64 0x0128
-/* Download Gigabit RCV sequencer ucode */
-#define SLIC_REG_RCV_WCS 0x0130
-/* Write VlanId field */
-#define SLIC_REG_WRVLANID 0x0138
-/* Read Transformer info */
-#define SLIC_REG_READ_XF_INFO 0x0140
-/* Write Transformer info */
-#define SLIC_REG_WRITE_XF_INFO 0x0148
-/* Write card ticks per second */
-#define SLIC_REG_TICKS_PER_SEC 0x0170
-
-#define SLIC_REG_HOSTID 0x1554
-
-enum UPR_REQUEST {
- SLIC_UPR_STATS,
- SLIC_UPR_RLSR,
- SLIC_UPR_WCFG,
- SLIC_UPR_RCONFIG,
- SLIC_UPR_RPHY,
- SLIC_UPR_ENLB,
- SLIC_UPR_ENCT,
- SLIC_UPR_PDWN,
- SLIC_UPR_PING,
- SLIC_UPR_DUMP,
-};
-
-struct inicpm_wakepattern {
- u32 patternlength;
- u8 pattern[SLIC_PM_PATTERNSIZE];
- u8 mask[SLIC_PM_PATTERNSIZE];
-};
-
-struct inicpm_state {
- u32 powercaps;
- u32 powerstate;
- u32 wake_linkstatus;
- u32 wake_magicpacket;
- u32 wake_framepattern;
- struct inicpm_wakepattern wakepattern[SLIC_PM_MAXPATTERNS];
-};
-
-struct slicpm_packet_pattern {
- u32 priority;
- u32 reserved;
- u32 masksize;
- u32 patternoffset;
- u32 patternsize;
- u32 patternflags;
-};
-
-enum slicpm_power_state {
- slicpm_state_unspecified = 0,
- slicpm_state_d0,
- slicpm_state_d1,
- slicpm_state_d2,
- slicpm_state_d3,
- slicpm_state_maximum
-};
-
-struct slicpm_wakeup_capabilities {
- enum slicpm_power_state min_magic_packet_wakeup;
- enum slicpm_power_state min_pattern_wakeup;
- enum slicpm_power_state min_link_change_wakeup;
-};
-
-struct slic_pnp_capabilities {
- u32 flags;
- struct slicpm_wakeup_capabilities wakeup_capabilities;
-};
-
-struct slic_config_mac {
- u8 macaddrA[6];
-};
-
-#define ATK_FRU_FORMAT 0x00
-#define VENDOR1_FRU_FORMAT 0x01
-#define VENDOR2_FRU_FORMAT 0x02
-#define VENDOR3_FRU_FORMAT 0x03
-#define VENDOR4_FRU_FORMAT 0x04
-#define NO_FRU_FORMAT 0xFF
-
-struct atk_fru {
- u8 assembly[6];
- u8 revision[2];
- u8 serial[14];
- u8 pad[3];
-};
-
-struct vendor1_fru {
- u8 commodity;
- u8 assembly[4];
- u8 revision[2];
- u8 supplier[2];
- u8 date[2];
- u8 sequence[3];
- u8 pad[13];
-};
-
-struct vendor2_fru {
- u8 part[8];
- u8 supplier[5];
- u8 date[3];
- u8 sequence[4];
- u8 pad[7];
-};
-
-struct vendor3_fru {
- u8 assembly[6];
- u8 revision[2];
- u8 serial[14];
- u8 pad[3];
-};
-
-struct vendor4_fru {
- u8 number[8];
- u8 part[8];
- u8 version[8];
- u8 pad[3];
-};
-
-union oemfru {
- struct vendor1_fru vendor1_fru;
- struct vendor2_fru vendor2_fru;
- struct vendor3_fru vendor3_fru;
- struct vendor4_fru vendor4_fru;
-};
-
-/*
- * SLIC EEPROM structure for Mojave
- */
-struct slic_eeprom {
- u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5'*/
- u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/
- u16 FlashSize; /* 02 Flash size */
- u16 EepromSize; /* 03 EEPROM Size */
- u16 VendorId; /* 04 Vendor ID */
- u16 DeviceId; /* 05 Device ID */
- u8 RevisionId; /* 06 Revision ID */
- u8 ClassCode[3]; /* 07 Class Code */
- u8 DbgIntPin; /* 08 Debug Interrupt pin */
- u8 NetIntPin0; /* Network Interrupt Pin */
- u8 MinGrant; /* 09 Minimum grant */
- u8 MaxLat; /* Maximum Latency */
- u16 PciStatus; /* 10 PCI Status */
- u16 SubSysVId; /* 11 Subsystem Vendor Id */
- u16 SubSysId; /* 12 Subsystem ID */
- u16 DbgDevId; /* 13 Debug Device Id */
- u16 DramRomFn; /* 14 Dram/Rom function */
- u16 DSize2Pci; /* 15 DRAM size to PCI (bytes * 64K) */
- u16 RSize2Pci; /* 16 ROM extension size to PCI (bytes * 4k) */
- u8 NetIntPin1; /* 17 Network Interface Pin 1
- * (simba/leone only)
- */
- u8 NetIntPin2; /* Network Interface Pin 2 (simba/leone only)*/
- union {
- u8 NetIntPin3; /* 18 Network Interface Pin 3 (simba only) */
- u8 FreeTime; /* FreeTime setting (leone/mojave only) */
- } u1;
- u8 TBIctl; /* 10-bit interface control (Mojave only) */
- u16 DramSize; /* 19 DRAM size (bytes * 64k) */
- union {
- struct {
- /* Mac Interface Specific portions */
- struct slic_config_mac MacInfo[SLIC_NBR_MACS];
- } mac; /* MAC access for all boards */
- struct {
- /* use above struct for MAC access */
- struct slic_config_mac pad[SLIC_NBR_MACS - 1];
- u16 DeviceId2; /* Device ID for 2nd PCI function */
- u8 IntPin2; /* Interrupt pin for 2nd PCI function */
- u8 ClassCode2[3]; /* Class Code for 2nd PCI function */
- } mojave; /* 2nd function access for gigabit board */
- } u2;
- u16 CfgByte6; /* Config Byte 6 */
- u16 PMECapab; /* Power Mgment capabilities */
- u16 NwClkCtrls; /* NetworkClockControls */
- u8 FruFormat; /* Alacritech FRU format type */
- struct atk_fru AtkFru; /* Alacritech FRU information */
- u8 OemFruFormat; /* optional OEM FRU format type */
- union oemfru OemFru; /* optional OEM FRU information */
- u8 Pad[4]; /* Pad to 128 bytes - includes 2 cksum bytes
- * (if OEM FRU info exists) and two unusable
- * bytes at the end
- */
-};
-
-/* SLIC EEPROM structure for Oasis */
-struct oslic_eeprom {
- u16 Id; /* 00 EEPROM/FLASH Magic code 'A5A5' */
- u16 EecodeSize; /* 01 Size of EEPROM Codes (bytes * 4)*/
- u16 FlashConfig0; /* 02 Flash Config for SPI device 0 */
- u16 FlashConfig1; /* 03 Flash Config for SPI device 1 */
- u16 VendorId; /* 04 Vendor ID */
- u16 DeviceId; /* 05 Device ID (function 0) */
- u8 RevisionId; /* 06 Revision ID */
- u8 ClassCode[3]; /* 07 Class Code for PCI function 0 */
- u8 IntPin1; /* 08 Interrupt pin for PCI function 1*/
- u8 ClassCode2[3]; /* 09 Class Code for PCI function 1 */
- u8 IntPin2; /* 10 Interrupt pin for PCI function 2*/
- u8 IntPin0; /* Interrupt pin for PCI function 0*/
- u8 MinGrant; /* 11 Minimum grant */
- u8 MaxLat; /* Maximum Latency */
- u16 SubSysVId; /* 12 Subsystem Vendor Id */
- u16 SubSysId; /* 13 Subsystem ID */
- u16 FlashSize; /* 14 Flash size (bytes / 4K) */
- u16 DSize2Pci; /* 15 DRAM size to PCI (bytes / 64K) */
- u16 RSize2Pci; /* 16 Flash (ROM extension) size to PCI
- * (bytes / 4K)
- */
- u16 DeviceId1; /* 17 Device Id (function 1) */
- u16 DeviceId2; /* 18 Device Id (function 2) */
- u16 CfgByte6; /* 19 Device Status Config Bytes 6-7 */
- u16 PMECapab; /* 20 Power Mgment capabilities */
- u8 MSICapab; /* 21 MSI capabilities */
- u8 ClockDivider; /* Clock divider */
- u16 PciStatusLow; /* 22 PCI Status bits 15:0 */
- u16 PciStatusHigh; /* 23 PCI Status bits 31:16 */
- u16 DramConfigLow; /* 24 DRAM Configuration bits 15:0 */
- u16 DramConfigHigh; /* 25 DRAM Configuration bits 31:16 */
- u16 DramSize; /* 26 DRAM size (bytes / 64K) */
- u16 GpioTbiCtl; /* 27 GPIO/TBI controls for functions 1/0 */
- u16 EepromSize; /* 28 EEPROM Size */
- struct slic_config_mac MacInfo[2]; /* 29 MAC addresses (2 ports) */
- u8 FruFormat; /* 35 Alacritech FRU format type */
- struct atk_fru AtkFru; /* Alacritech FRU information */
- u8 OemFruFormat; /* optional OEM FRU format type */
- union oemfru OemFru; /* optional OEM FRU information */
- u8 Pad[4]; /* Pad to 128 bytes - includes 2 checksum bytes
- * (if OEM FRU info exists) and two unusable
- * bytes at the end
- */
-};
-
-#define MAX_EECODE_SIZE sizeof(struct slic_eeprom)
-#define MIN_EECODE_SIZE 0x62 /* code size without optional OEM FRU stuff */
-
-/*
- * SLIC CONFIG structure
- *
- * This structure lives in the CARD structure and is valid for all board types.
- * It is filled in from the appropriate EEPROM structure by
- * SlicGetConfigData()
- */
-struct slic_config {
- bool EepromValid; /* Valid EEPROM flag (checksum good?) */
- u16 DramSize; /* DRAM size (bytes / 64K) */
- struct slic_config_mac MacInfo[SLIC_NBR_MACS]; /* MAC addresses */
- u8 FruFormat; /* Alacritech FRU format type */
- struct atk_fru AtkFru; /* Alacritech FRU information */
- u8 OemFruFormat; /* optional OEM FRU format type */
- union {
- struct vendor1_fru vendor1_fru;
- struct vendor2_fru vendor2_fru;
- struct vendor3_fru vendor3_fru;
- struct vendor4_fru vendor4_fru;
- } OemFru;
-};
-
-#pragma pack()
-
-#endif
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
deleted file mode 100644
index 062307ad7fed..000000000000
--- a/drivers/staging/slicoss/slicoss.c
+++ /dev/null
@@ -1,3132 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2000-2006 Alacritech, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slicoss.c
- *
- * The SLICOSS driver for Alacritech's IS-NIC products.
- *
- * This driver is supposed to support:
- *
- * Mojave cards (single port PCI Gigabit) both copper and fiber
- * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
- * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
- *
- * The driver was actually tested on Oasis and Kalahari cards.
- *
- *
- * NOTE: This is the standard, non-accelerated version of Alacritech's
- * IS-NIC driver.
- */
-
-#define KLUDGE_FOR_4GB_BOUNDARY 1
-#define DEBUG_MICROCODE 1
-#define DBG 1
-#define SLIC_INTERRUPT_PROCESS_LIMIT 1
-#define SLIC_OFFLOAD_IP_CHECKSUM 1
-#define STATS_TIMER_INTERVAL 2
-#define PING_TIMER_INTERVAL 1
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/timer.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-
-#include <linux/firmware.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <asm/unaligned.h>
-
-#include <linux/ethtool.h>
-#include <linux/uaccess.h>
-#include "slichw.h"
-#include "slic.h"
-
-static uint slic_first_init = 1;
-static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Accelerator (Non-Accelerated)";
-
-static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
-
-static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
-#define DEFAULT_INTAGG_DELAY 100
-static unsigned int rcv_count;
-
-#define DRV_NAME "slicoss"
-#define DRV_VERSION "2.0.1"
-#define DRV_AUTHOR "Alacritech, Inc. Engineering"
-#define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\
- "Non-Accelerated Driver"
-#define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\
- "All rights reserved."
-#define PFX DRV_NAME " "
-
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_LICENSE("Dual BSD/GPL");
-
-static const struct pci_device_id slic_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
- { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
- { 0 }
-};
-
-static const struct ethtool_ops slic_ethtool_ops;
-
-MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
-
-static void slic_mcast_set_bit(struct adapter *adapter, char *address)
-{
- unsigned char crcpoly;
-
- /* Get the CRC polynomial for the mac address */
- /*
- * we use bits 1-8 (lsb), bitwise reversed,
- * msb (= lsb bit 0 before bitrev) is automatically discarded
- */
- crcpoly = ether_crc(ETH_ALEN, address) >> 23;
-
- /*
- * We only have space on the SLIC for 64 entries. Lop
- * off the top two bits. (2^6 = 64)
- */
- crcpoly &= 0x3F;
-
- /* OR in the new bit into our 64 bit mask. */
- adapter->mcastmask |= (u64)1 << crcpoly;
-}
-
-static void slic_mcast_set_mask(struct adapter *adapter)
-{
- if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) {
- /*
- * Turn on all multicast addresses. We have to do this for
- * promiscuous mode as well as ALLMCAST mode. It saves the
- * Microcode from having to keep state about the MAC
- * configuration.
- */
- slic_write32(adapter, SLIC_REG_MCASTLOW, 0xFFFFFFFF);
- slic_write32(adapter, SLIC_REG_MCASTHIGH, 0xFFFFFFFF);
- } else {
- /*
- * Commit our multicast mast to the SLIC by writing to the
- * multicast address mask registers
- */
- slic_write32(adapter, SLIC_REG_MCASTLOW,
- (u32)(adapter->mcastmask & 0xFFFFFFFF));
- slic_write32(adapter, SLIC_REG_MCASTHIGH,
- (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF));
- }
-}
-
-static void slic_timer_ping(ulong dev)
-{
- struct adapter *adapter;
- struct sliccard *card;
-
- adapter = netdev_priv((struct net_device *)dev);
- card = adapter->card;
-
- adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ);
- add_timer(&adapter->pingtimer);
-}
-
-/*
- * slic_link_config
- *
- * Write phy control to configure link duplex/speed
- *
- */
-static void slic_link_config(struct adapter *adapter,
- u32 linkspeed, u32 linkduplex)
-{
- u32 speed;
- u32 duplex;
- u32 phy_config;
- u32 phy_advreg;
- u32 phy_gctlreg;
-
- if (adapter->state != ADAPT_UP)
- return;
-
- if (linkspeed > LINK_1000MB)
- linkspeed = LINK_AUTOSPEED;
- if (linkduplex > LINK_AUTOD)
- linkduplex = LINK_AUTOD;
-
- if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) {
- if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) {
- /*
- * We've got a fiber gigabit interface, and register
- * 4 is different in fiber mode than in copper mode
- */
-
- /* advertise FD only @1000 Mb */
- phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD));
- /* enable PAUSE frames */
- phy_advreg |= PAR_ASYMPAUSE_FIBER;
- slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
-
- if (linkspeed == LINK_AUTOSPEED) {
- /* reset phy, enable auto-neg */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_RESET | PCR_AUTONEG |
- PCR_AUTONEG_RST));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- } else { /* forced 1000 Mb FD*/
- /*
- * power down phy to break link
- * this may not work)
- */
- phy_config = (MIICR_REG_PCR | PCR_POWERDOWN);
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- slic_flush_write(adapter);
- /*
- * wait, Marvell says 1 sec,
- * try to get away with 10 ms
- */
- mdelay(10);
-
- /*
- * disable auto-neg, set speed/duplex,
- * soft reset phy, powerup
- */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_RESET | PCR_SPEED_1000 |
- PCR_DUPLEX_FULL));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- }
- } else { /* copper gigabit */
-
- /*
- * Auto-Negotiate or 1000 Mb must be auto negotiated
- * We've got a copper gigabit interface, and
- * register 4 is different in copper mode than
- * in fiber mode
- */
- if (linkspeed == LINK_AUTOSPEED) {
- /* advertise 10/100 Mb modes */
- phy_advreg =
- (MIICR_REG_4 |
- (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD
- | PAR_ADV10HD));
- } else {
- /*
- * linkspeed == LINK_1000MB -
- * don't advertise 10/100 Mb modes
- */
- phy_advreg = MIICR_REG_4;
- }
- /* enable PAUSE frames */
- phy_advreg |= PAR_ASYMPAUSE;
- /* required by the Cicada PHY */
- phy_advreg |= PAR_802_3;
- slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
- /* advertise FD only @1000 Mb */
- phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD));
- slic_write32(adapter, SLIC_REG_WPHY, phy_gctlreg);
-
- if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
- /*
- * if a Marvell PHY
- * enable auto crossover
- */
- phy_config =
- (MIICR_REG_16 | (MRV_REG16_XOVERON));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
-
- /* reset phy, enable auto-neg */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_RESET | PCR_AUTONEG |
- PCR_AUTONEG_RST));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- } else { /* it's a Cicada PHY */
- /* enable and restart auto-neg (don't reset) */
- phy_config =
- (MIICR_REG_PCR |
- (PCR_AUTONEG | PCR_AUTONEG_RST));
- slic_write32(adapter, SLIC_REG_WPHY,
- phy_config);
- }
- }
- } else {
- /* Forced 10/100 */
- if (linkspeed == LINK_10MB)
- speed = 0;
- else
- speed = PCR_SPEED_100;
- if (linkduplex == LINK_HALFD)
- duplex = 0;
- else
- duplex = PCR_DUPLEX_FULL;
-
- if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
- /*
- * if a Marvell PHY
- * disable auto crossover
- */
- phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- }
-
- /* power down phy to break link (this may not work) */
- phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- slic_flush_write(adapter);
- /* wait, Marvell says 1 sec, try to get away with 10 ms */
- mdelay(10);
-
- if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
- /*
- * if a Marvell PHY
- * disable auto-neg, set speed,
- * soft reset phy, powerup
- */
- phy_config =
- (MIICR_REG_PCR | (PCR_RESET | speed | duplex));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- } else { /* it's a Cicada PHY */
- /* disable auto-neg, set speed, powerup */
- phy_config = (MIICR_REG_PCR | (speed | duplex));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
- }
- }
-}
-
-static int slic_card_download_gbrcv(struct adapter *adapter)
-{
- const struct firmware *fw;
- const char *file = "";
- int ret;
- u32 codeaddr;
- u32 instruction;
- int index = 0;
- u32 rcvucodelen = 0;
-
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- file = "slicoss/oasisrcvucode.sys";
- break;
- case SLIC_1GB_DEVICE_ID:
- file = "slicoss/gbrcvucode.sys";
- break;
- default:
- return -ENOENT;
- }
-
- ret = request_firmware(&fw, file, &adapter->pcidev->dev);
- if (ret) {
- dev_err(&adapter->pcidev->dev,
- "Failed to load firmware %s\n", file);
- return ret;
- }
-
- rcvucodelen = *(u32 *)(fw->data + index);
- index += 4;
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- if (rcvucodelen != OasisRcvUCodeLen) {
- release_firmware(fw);
- return -EINVAL;
- }
- break;
- case SLIC_1GB_DEVICE_ID:
- if (rcvucodelen != GBRcvUCodeLen) {
- release_firmware(fw);
- return -EINVAL;
- }
- break;
- }
- /* start download */
- slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
- /* download the rcv sequencer ucode */
- for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) {
- /* write out instruction address */
- slic_write32(adapter, SLIC_REG_RCV_WCS, codeaddr);
-
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- /* write out the instruction data low addr */
- slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
-
- instruction = *(u8 *)(fw->data + index);
- index++;
- /* write out the instruction data high addr */
- slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
- }
-
- /* download finished */
- release_firmware(fw);
- slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
- slic_flush_write(adapter);
-
- return 0;
-}
-
-MODULE_FIRMWARE("slicoss/oasisrcvucode.sys");
-MODULE_FIRMWARE("slicoss/gbrcvucode.sys");
-
-static int slic_card_download(struct adapter *adapter)
-{
- const struct firmware *fw;
- const char *file = "";
- int ret;
- u32 section;
- int thissectionsize;
- int codeaddr;
- u32 instruction;
- u32 baseaddress;
- u32 i;
- u32 numsects = 0;
- u32 sectsize[3];
- u32 sectstart[3];
- int ucode_start, index = 0;
-
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- file = "slicoss/oasisdownload.sys";
- break;
- case SLIC_1GB_DEVICE_ID:
- file = "slicoss/gbdownload.sys";
- break;
- default:
- return -ENOENT;
- }
- ret = request_firmware(&fw, file, &adapter->pcidev->dev);
- if (ret) {
- dev_err(&adapter->pcidev->dev,
- "Failed to load firmware %s\n", file);
- return ret;
- }
- numsects = *(u32 *)(fw->data + index);
- index += 4;
- for (i = 0; i < numsects; i++) {
- sectsize[i] = *(u32 *)(fw->data + index);
- index += 4;
- }
- for (i = 0; i < numsects; i++) {
- sectstart[i] = *(u32 *)(fw->data + index);
- index += 4;
- }
- ucode_start = index;
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- for (section = 0; section < numsects; section++) {
- baseaddress = sectstart[section];
- thissectionsize = sectsize[section] >> 3;
-
- for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
- /* Write out instruction address */
- slic_write32(adapter, SLIC_REG_WCS,
- baseaddress + codeaddr);
- /* Write out instruction to low addr */
- slic_write32(adapter, SLIC_REG_WCS,
- instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
-
- /* Write out instruction to high addr */
- slic_write32(adapter, SLIC_REG_WCS,
- instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- }
- }
- index = ucode_start;
- for (section = 0; section < numsects; section++) {
- instruction = *(u32 *)(fw->data + index);
- baseaddress = sectstart[section];
- if (baseaddress < 0x8000)
- continue;
- thissectionsize = sectsize[section] >> 3;
-
- for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
- /* Write out instruction address */
- slic_write32(adapter, SLIC_REG_WCS,
- SLIC_WCS_COMPARE | (baseaddress +
- codeaddr));
- /* Write out instruction to low addr */
- slic_write32(adapter, SLIC_REG_WCS, instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- /* Write out instruction to high addr */
- slic_write32(adapter, SLIC_REG_WCS, instruction);
- instruction = *(u32 *)(fw->data + index);
- index += 4;
- }
- }
- release_firmware(fw);
- /* Everything OK, kick off the card */
- mdelay(10);
-
- slic_write32(adapter, SLIC_REG_WCS, SLIC_WCS_START);
- slic_flush_write(adapter);
- /*
- * stall for 20 ms, long enough for ucode to init card
- * and reach mainloop
- */
- mdelay(20);
-
- return 0;
-}
-
-MODULE_FIRMWARE("slicoss/oasisdownload.sys");
-MODULE_FIRMWARE("slicoss/gbdownload.sys");
-
-static void slic_adapter_set_hwaddr(struct adapter *adapter)
-{
- struct sliccard *card = adapter->card;
-
- if ((adapter->card) && (card->config_set)) {
- memcpy(adapter->macaddr,
- card->config.MacInfo[adapter->functionnumber].macaddrA,
- sizeof(struct slic_config_mac));
- if (is_zero_ether_addr(adapter->currmacaddr))
- memcpy(adapter->currmacaddr, adapter->macaddr,
- ETH_ALEN);
- if (adapter->netdev)
- memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
- ETH_ALEN);
- }
-}
-
-static void slic_intagg_set(struct adapter *adapter, u32 value)
-{
- slic_write32(adapter, SLIC_REG_INTAGG, value);
- adapter->card->loadlevel_current = value;
-}
-
-static void slic_soft_reset(struct adapter *adapter)
-{
- if (adapter->card->state == CARD_UP) {
- slic_write32(adapter, SLIC_REG_QUIESCE, 0);
- slic_flush_write(adapter);
- mdelay(1);
- }
-
- slic_write32(adapter, SLIC_REG_RESET, SLIC_RESET_MAGIC);
- slic_flush_write(adapter);
-
- mdelay(1);
-}
-
-static void slic_mac_address_config(struct adapter *adapter)
-{
- u32 value;
- u32 value2;
-
- value = ntohl(*(__be32 *)&adapter->currmacaddr[2]);
- slic_write32(adapter, SLIC_REG_WRADDRAL, value);
- slic_write32(adapter, SLIC_REG_WRADDRBL, value);
-
- value2 = (u32)((adapter->currmacaddr[0] << 8 |
- adapter->currmacaddr[1]) & 0xFFFF);
-
- slic_write32(adapter, SLIC_REG_WRADDRAH, value2);
- slic_write32(adapter, SLIC_REG_WRADDRBH, value2);
-
- /*
- * Write our multicast mask out to the card. This is done
- * here in addition to the slic_mcast_addr_set routine
- * because ALL_MCAST may have been enabled or disabled
- */
- slic_mcast_set_mask(adapter);
-}
-
-static void slic_mac_config(struct adapter *adapter)
-{
- u32 value;
-
- /* Setup GMAC gaps */
- if (adapter->linkspeed == LINK_1000MB) {
- value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
- (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
- (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
- } else {
- value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
- (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
- (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
- }
-
- /* enable GMII */
- if (adapter->linkspeed == LINK_1000MB)
- value |= GMCR_GBIT;
-
- /* enable fullduplex */
- if ((adapter->linkduplex == LINK_FULLD)
- || (adapter->macopts & MAC_LOOPBACK)) {
- value |= GMCR_FULLD;
- }
-
- /* write mac config */
- slic_write32(adapter, SLIC_REG_WMCFG, value);
-
- /* setup mac addresses */
- slic_mac_address_config(adapter);
-}
-
-static void slic_config_set(struct adapter *adapter, bool linkchange)
-{
- u32 value;
- u32 RcrReset;
-
- if (linkchange) {
- /* Setup MAC */
- slic_mac_config(adapter);
- RcrReset = GRCR_RESET;
- } else {
- slic_mac_address_config(adapter);
- RcrReset = 0;
- }
-
- if (adapter->linkduplex == LINK_FULLD) {
- /* setup xmtcfg */
- value = (GXCR_RESET | /* Always reset */
- GXCR_XMTEN | /* Enable transmit */
- GXCR_PAUSEEN); /* Enable pause */
-
- slic_write32(adapter, SLIC_REG_WXCFG, value);
-
- /* Setup rcvcfg last */
- value = (RcrReset | /* Reset, if linkchange */
- GRCR_CTLEN | /* Enable CTL frames */
- GRCR_ADDRAEN | /* Address A enable */
- GRCR_RCVBAD | /* Rcv bad frames */
- (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
- } else {
- /* setup xmtcfg */
- value = (GXCR_RESET | /* Always reset */
- GXCR_XMTEN); /* Enable transmit */
-
- slic_write32(adapter, SLIC_REG_WXCFG, value);
-
- /* Setup rcvcfg last */
- value = (RcrReset | /* Reset, if linkchange */
- GRCR_ADDRAEN | /* Address A enable */
- GRCR_RCVBAD | /* Rcv bad frames */
- (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
- }
-
- if (adapter->state != ADAPT_DOWN) {
- /* Only enable receive if we are restarting or running */
- value |= GRCR_RCVEN;
- }
-
- if (adapter->macopts & MAC_PROMISC)
- value |= GRCR_RCVALL;
-
- slic_write32(adapter, SLIC_REG_WRCFG, value);
-}
-
-/*
- * Turn off RCV and XMT, power down PHY
- */
-static void slic_config_clear(struct adapter *adapter)
-{
- u32 value;
- u32 phy_config;
-
- /* Setup xmtcfg */
- value = (GXCR_RESET | /* Always reset */
- GXCR_PAUSEEN); /* Enable pause */
-
- slic_write32(adapter, SLIC_REG_WXCFG, value);
-
- value = (GRCR_RESET | /* Always reset */
- GRCR_CTLEN | /* Enable CTL frames */
- GRCR_ADDRAEN | /* Address A enable */
- (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
-
- slic_write32(adapter, SLIC_REG_WRCFG, value);
-
- /* power down phy */
- phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN));
- slic_write32(adapter, SLIC_REG_WPHY, phy_config);
-}
-
-static bool slic_mac_filter(struct adapter *adapter,
- struct ether_header *ether_frame)
-{
- struct net_device *netdev = adapter->netdev;
- u32 opts = adapter->macopts;
-
- if (opts & MAC_PROMISC)
- return true;
-
- if (is_broadcast_ether_addr(ether_frame->ether_dhost)) {
- if (opts & MAC_BCAST) {
- adapter->rcv_broadcasts++;
- return true;
- }
-
- return false;
- }
-
- if (is_multicast_ether_addr(ether_frame->ether_dhost)) {
- if (opts & MAC_ALLMCAST) {
- adapter->rcv_multicasts++;
- netdev->stats.multicast++;
- return true;
- }
- if (opts & MAC_MCAST) {
- struct mcast_address *mcaddr = adapter->mcastaddrs;
-
- while (mcaddr) {
- if (ether_addr_equal(mcaddr->address,
- ether_frame->ether_dhost)) {
- adapter->rcv_multicasts++;
- netdev->stats.multicast++;
- return true;
- }
- mcaddr = mcaddr->next;
- }
-
- return false;
- }
-
- return false;
- }
- if (opts & MAC_DIRECTED) {
- adapter->rcv_unicasts++;
- return true;
- }
- return false;
-}
-
-static int slic_mac_set_address(struct net_device *dev, void *ptr)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sockaddr *addr = ptr;
-
- if (netif_running(dev))
- return -EBUSY;
- if (!adapter)
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
-
- slic_config_set(adapter, true);
- return 0;
-}
-
-static void slic_timer_load_check(ulong cardaddr)
-{
- struct sliccard *card = (struct sliccard *)cardaddr;
- struct adapter *adapter = card->master;
- u32 load = card->events;
- u32 level = 0;
-
- if ((adapter) && (adapter->state == ADAPT_UP) &&
- (card->state == CARD_UP) && (slic_global.dynamic_intagg)) {
- if (adapter->devid == SLIC_1GB_DEVICE_ID) {
- if (adapter->linkspeed == LINK_1000MB)
- level = 100;
- else {
- if (load > SLIC_LOAD_5)
- level = SLIC_INTAGG_5;
- else if (load > SLIC_LOAD_4)
- level = SLIC_INTAGG_4;
- else if (load > SLIC_LOAD_3)
- level = SLIC_INTAGG_3;
- else if (load > SLIC_LOAD_2)
- level = SLIC_INTAGG_2;
- else if (load > SLIC_LOAD_1)
- level = SLIC_INTAGG_1;
- else
- level = SLIC_INTAGG_0;
- }
- if (card->loadlevel_current != level) {
- card->loadlevel_current = level;
- slic_write32(adapter, SLIC_REG_INTAGG, level);
- }
- } else {
- if (load > SLIC_LOAD_5)
- level = SLIC_INTAGG_5;
- else if (load > SLIC_LOAD_4)
- level = SLIC_INTAGG_4;
- else if (load > SLIC_LOAD_3)
- level = SLIC_INTAGG_3;
- else if (load > SLIC_LOAD_2)
- level = SLIC_INTAGG_2;
- else if (load > SLIC_LOAD_1)
- level = SLIC_INTAGG_1;
- else
- level = SLIC_INTAGG_0;
- if (card->loadlevel_current != level) {
- card->loadlevel_current = level;
- slic_write32(adapter, SLIC_REG_INTAGG, level);
- }
- }
- }
- card->events = 0;
- card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
- add_timer(&card->loadtimer);
-}
-
-static int slic_upr_queue_request(struct adapter *adapter,
- u32 upr_request,
- u32 upr_data,
- u32 upr_data_h,
- u32 upr_buffer, u32 upr_buffer_h)
-{
- struct slic_upr *upr;
- struct slic_upr *uprqueue;
-
- upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
- if (!upr)
- return -ENOMEM;
-
- upr->adapter = adapter->port;
- upr->upr_request = upr_request;
- upr->upr_data = upr_data;
- upr->upr_buffer = upr_buffer;
- upr->upr_data_h = upr_data_h;
- upr->upr_buffer_h = upr_buffer_h;
- upr->next = NULL;
- if (adapter->upr_list) {
- uprqueue = adapter->upr_list;
-
- while (uprqueue->next)
- uprqueue = uprqueue->next;
- uprqueue->next = upr;
- } else {
- adapter->upr_list = upr;
- }
- return 0;
-}
-
-static void slic_upr_start(struct adapter *adapter)
-{
- struct slic_upr *upr;
-
- upr = adapter->upr_list;
- if (!upr)
- return;
- if (adapter->upr_busy)
- return;
- adapter->upr_busy = 1;
-
- switch (upr->upr_request) {
- case SLIC_UPR_STATS:
- if (upr->upr_data_h == 0) {
- slic_write32(adapter, SLIC_REG_RSTAT, upr->upr_data);
- } else {
- slic_write64(adapter, SLIC_REG_RSTAT64, upr->upr_data,
- upr->upr_data_h);
- }
- break;
-
- case SLIC_UPR_RLSR:
- slic_write64(adapter, SLIC_REG_LSTAT, upr->upr_data,
- upr->upr_data_h);
- break;
-
- case SLIC_UPR_RCONFIG:
- slic_write64(adapter, SLIC_REG_RCONFIG, upr->upr_data,
- upr->upr_data_h);
- break;
- case SLIC_UPR_PING:
- slic_write32(adapter, SLIC_REG_PING, 1);
- break;
- }
- slic_flush_write(adapter);
-}
-
-static int slic_upr_request(struct adapter *adapter,
- u32 upr_request,
- u32 upr_data,
- u32 upr_data_h,
- u32 upr_buffer, u32 upr_buffer_h)
-{
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&adapter->upr_lock, flags);
- rc = slic_upr_queue_request(adapter,
- upr_request,
- upr_data,
- upr_data_h, upr_buffer, upr_buffer_h);
- if (rc)
- goto err_unlock_irq;
-
- slic_upr_start(adapter);
-err_unlock_irq:
- spin_unlock_irqrestore(&adapter->upr_lock, flags);
- return rc;
-}
-
-static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
-{
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- u32 lst = sm_data->lnkstatus;
- uint linkup;
- unsigned char linkspeed;
- unsigned char linkduplex;
-
- if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- dma_addr_t phaddr = sm->lnkstatus_phaddr;
-
- slic_upr_queue_request(adapter, SLIC_UPR_RLSR,
- cpu_to_le32(lower_32_bits(phaddr)),
- cpu_to_le32(upper_32_bits(phaddr)),
- 0, 0);
- return;
- }
- if (adapter->state != ADAPT_UP)
- return;
-
- linkup = lst & GIG_LINKUP ? LINK_UP : LINK_DOWN;
- if (lst & GIG_SPEED_1000)
- linkspeed = LINK_1000MB;
- else if (lst & GIG_SPEED_100)
- linkspeed = LINK_100MB;
- else
- linkspeed = LINK_10MB;
-
- if (lst & GIG_FULLDUPLEX)
- linkduplex = LINK_FULLD;
- else
- linkduplex = LINK_HALFD;
-
- if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
- return;
-
- /* link up event, but nothing has changed */
- if ((adapter->linkstate == LINK_UP) &&
- (linkup == LINK_UP) &&
- (adapter->linkspeed == linkspeed) &&
- (adapter->linkduplex == linkduplex))
- return;
-
- /* link has changed at this point */
-
- /* link has gone from up to down */
- if (linkup == LINK_DOWN) {
- adapter->linkstate = LINK_DOWN;
- netif_carrier_off(adapter->netdev);
- return;
- }
-
- /* link has gone from down to up */
- adapter->linkspeed = linkspeed;
- adapter->linkduplex = linkduplex;
-
- if (adapter->linkstate != LINK_UP) {
- /* setup the mac */
- slic_config_set(adapter, true);
- adapter->linkstate = LINK_UP;
- netif_carrier_on(adapter->netdev);
- }
-}
-
-static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
-{
- struct sliccard *card = adapter->card;
- struct slic_upr *upr;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->upr_lock, flags);
- upr = adapter->upr_list;
- if (!upr) {
- spin_unlock_irqrestore(&adapter->upr_lock, flags);
- return;
- }
- adapter->upr_list = upr->next;
- upr->next = NULL;
- adapter->upr_busy = 0;
- switch (upr->upr_request) {
- case SLIC_UPR_STATS: {
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- struct slic_stats *stats = &sm_data->stats;
- struct slic_stats *old = &adapter->inicstats_prev;
- struct slicnet_stats *stst = &adapter->slic_stats;
-
- if (isr & ISR_UPCERR) {
- dev_err(&adapter->netdev->dev,
- "SLIC_UPR_STATS command failed isr[%x]\n", isr);
- break;
- }
-
- UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, stats->xmit_tcp_segs,
- old->xmit_tcp_segs);
-
- UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, stats->xmit_tcp_bytes,
- old->xmit_tcp_bytes);
-
- UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, stats->rcv_tcp_segs,
- old->rcv_tcp_segs);
-
- UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, stats->rcv_tcp_bytes,
- old->rcv_tcp_bytes);
-
- UPDATE_STATS_GB(stst->iface.xmt_bytes, stats->xmit_bytes,
- old->xmit_bytes);
-
- UPDATE_STATS_GB(stst->iface.xmt_ucast, stats->xmit_unicasts,
- old->xmit_unicasts);
-
- UPDATE_STATS_GB(stst->iface.rcv_bytes, stats->rcv_bytes,
- old->rcv_bytes);
-
- UPDATE_STATS_GB(stst->iface.rcv_ucast, stats->rcv_unicasts,
- old->rcv_unicasts);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_collisions,
- old->xmit_collisions);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors,
- stats->xmit_excess_collisions,
- old->xmit_excess_collisions);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_other_error,
- old->xmit_other_error);
-
- UPDATE_STATS_GB(stst->iface.rcv_errors, stats->rcv_other_error,
- old->rcv_other_error);
-
- UPDATE_STATS_GB(stst->iface.rcv_discards, stats->rcv_drops,
- old->rcv_drops);
-
- if (stats->rcv_drops > old->rcv_drops)
- adapter->rcv_drops += (stats->rcv_drops -
- old->rcv_drops);
- memcpy_fromio(old, stats, sizeof(*stats));
- break;
- }
- case SLIC_UPR_RLSR:
- slic_link_upr_complete(adapter, isr);
- break;
- case SLIC_UPR_RCONFIG:
- break;
- case SLIC_UPR_PING:
- card->pingstatus |= (isr & ISR_PINGDSMASK);
- break;
- }
- kfree(upr);
- slic_upr_start(adapter);
- spin_unlock_irqrestore(&adapter->upr_lock, flags);
-}
-
-static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
-{
- return slic_upr_request(adapter, SLIC_UPR_RCONFIG, config, config_h,
- 0, 0);
-}
-
-/*
- * Compute a checksum of the EEPROM according to RFC 1071.
- */
-static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
-{
- u16 *wp = eeprom;
- u32 checksum = 0;
-
- while (len > 1) {
- checksum += *(wp++);
- len -= 2;
- }
-
- if (len > 0)
- checksum += *(u8 *)wp;
-
- while (checksum >> 16)
- checksum = (checksum & 0xFFFF) + ((checksum >> 16) & 0xFFFF);
-
- return ~checksum;
-}
-
-static void slic_rspqueue_free(struct adapter *adapter)
-{
- int i;
- struct slic_rspqueue *rspq = &adapter->rspqueue;
-
- for (i = 0; i < rspq->num_pages; i++) {
- if (rspq->vaddr[i]) {
- pci_free_consistent(adapter->pcidev, PAGE_SIZE,
- rspq->vaddr[i], rspq->paddr[i]);
- }
- rspq->vaddr[i] = NULL;
- rspq->paddr[i] = 0;
- }
- rspq->offset = 0;
- rspq->pageindex = 0;
- rspq->rspbuf = NULL;
-}
-
-static int slic_rspqueue_init(struct adapter *adapter)
-{
- int i;
- struct slic_rspqueue *rspq = &adapter->rspqueue;
- u32 paddrh = 0;
-
- memset(rspq, 0, sizeof(struct slic_rspqueue));
-
- rspq->num_pages = SLIC_RSPQ_PAGES_GB;
-
- for (i = 0; i < rspq->num_pages; i++) {
- rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev,
- PAGE_SIZE,
- &rspq->paddr[i]);
- if (!rspq->vaddr[i]) {
- dev_err(&adapter->pcidev->dev,
- "pci_alloc_consistent failed\n");
- slic_rspqueue_free(adapter);
- return -ENOMEM;
- }
-
- if (paddrh == 0) {
- slic_write32(adapter, SLIC_REG_RBAR,
- rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE);
- } else {
- slic_write64(adapter, SLIC_REG_RBAR64,
- rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE,
- paddrh);
- }
- }
- rspq->offset = 0;
- rspq->pageindex = 0;
- rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0];
- return 0;
-}
-
-static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
-{
- struct slic_rspqueue *rspq = &adapter->rspqueue;
- struct slic_rspbuf *buf;
-
- if (!(rspq->rspbuf->status))
- return NULL;
-
- buf = rspq->rspbuf;
- if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) {
- rspq->rspbuf++;
- } else {
- slic_write64(adapter, SLIC_REG_RBAR64,
- rspq->paddr[rspq->pageindex] |
- SLIC_RSPQ_BUFSINPAGE, 0);
- rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages;
- rspq->offset = 0;
- rspq->rspbuf = (struct slic_rspbuf *)
- rspq->vaddr[rspq->pageindex];
- }
-
- return buf;
-}
-
-static void slic_cmdqmem_free(struct adapter *adapter)
-{
- struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
- int i;
-
- for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) {
- if (cmdqmem->pages[i]) {
- pci_free_consistent(adapter->pcidev,
- PAGE_SIZE,
- (void *)cmdqmem->pages[i],
- cmdqmem->dma_pages[i]);
- }
- }
- memset(cmdqmem, 0, sizeof(struct slic_cmdqmem));
-}
-
-static u32 *slic_cmdqmem_addpage(struct adapter *adapter)
-{
- struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
- u32 *pageaddr;
-
- if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES)
- return NULL;
- pageaddr = pci_alloc_consistent(adapter->pcidev,
- PAGE_SIZE,
- &cmdqmem->dma_pages[cmdqmem->pagecnt]);
- if (!pageaddr)
- return NULL;
-
- cmdqmem->pages[cmdqmem->pagecnt] = pageaddr;
- cmdqmem->pagecnt++;
- return pageaddr;
-}
-
-static void slic_cmdq_free(struct adapter *adapter)
-{
- struct slic_hostcmd *cmd;
-
- cmd = adapter->cmdq_all.head;
- while (cmd) {
- if (cmd->busy) {
- struct sk_buff *tempskb;
-
- tempskb = cmd->skb;
- if (tempskb) {
- cmd->skb = NULL;
- dev_kfree_skb_irq(tempskb);
- }
- }
- cmd = cmd->next_all;
- }
- memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
- slic_cmdqmem_free(adapter);
-}
-
-static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
-{
- struct slic_hostcmd *cmd;
- struct slic_hostcmd *prev;
- struct slic_hostcmd *tail;
- struct slic_cmdqueue *cmdq;
- int cmdcnt;
- void *cmdaddr;
- ulong phys_addr;
- u32 phys_addrl;
- u32 phys_addrh;
- struct slic_handle *pslic_handle;
- unsigned long flags;
-
- cmdaddr = page;
- cmd = cmdaddr;
- cmdcnt = 0;
-
- phys_addr = virt_to_bus((void *)page);
- phys_addrl = SLIC_GET_ADDR_LOW(phys_addr);
- phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr);
-
- prev = NULL;
- tail = cmd;
- while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
- (adapter->slic_handle_ix < 256)) {
- /* Allocate and initialize a SLIC_HANDLE for this command */
- spin_lock_irqsave(&adapter->handle_lock, flags);
- pslic_handle = adapter->pfree_slic_handles;
- adapter->pfree_slic_handles = pslic_handle->next;
- spin_unlock_irqrestore(&adapter->handle_lock, flags);
- pslic_handle->type = SLIC_HANDLE_CMD;
- pslic_handle->address = (void *)cmd;
- pslic_handle->offset = (ushort)adapter->slic_handle_ix++;
- pslic_handle->other_handle = NULL;
- pslic_handle->next = NULL;
-
- cmd->pslic_handle = pslic_handle;
- cmd->cmd64.hosthandle = pslic_handle->token.handle_token;
- cmd->busy = false;
- cmd->paddrl = phys_addrl;
- cmd->paddrh = phys_addrh;
- cmd->next_all = prev;
- cmd->next = prev;
- prev = cmd;
- phys_addrl += SLIC_HOSTCMD_SIZE;
- cmdaddr += SLIC_HOSTCMD_SIZE;
-
- cmd = cmdaddr;
- cmdcnt++;
- }
-
- cmdq = &adapter->cmdq_all;
- cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
- tail->next_all = cmdq->head;
- cmdq->head = prev;
- cmdq = &adapter->cmdq_free;
- spin_lock_irqsave(&cmdq->lock, flags);
- cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
- tail->next = cmdq->head;
- cmdq->head = prev;
- spin_unlock_irqrestore(&cmdq->lock, flags);
-}
-
-static int slic_cmdq_init(struct adapter *adapter)
-{
- int i;
- u32 *pageaddr;
-
- memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
- memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
- spin_lock_init(&adapter->cmdq_all.lock);
- spin_lock_init(&adapter->cmdq_free.lock);
- spin_lock_init(&adapter->cmdq_done.lock);
- memset(&adapter->cmdqmem, 0, sizeof(struct slic_cmdqmem));
- adapter->slic_handle_ix = 1;
- for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
- pageaddr = slic_cmdqmem_addpage(adapter);
- if (!pageaddr) {
- slic_cmdq_free(adapter);
- return -ENOMEM;
- }
- slic_cmdq_addcmdpage(adapter, pageaddr);
- }
- adapter->slic_handle_ix = 1;
-
- return 0;
-}
-
-static void slic_cmdq_reset(struct adapter *adapter)
-{
- struct slic_hostcmd *hcmd;
- struct sk_buff *skb;
- u32 outstanding;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->cmdq_free.lock, flags);
- spin_lock(&adapter->cmdq_done.lock);
- outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
- outstanding -= adapter->cmdq_free.count;
- hcmd = adapter->cmdq_all.head;
- while (hcmd) {
- if (hcmd->busy) {
- skb = hcmd->skb;
- hcmd->busy = 0;
- hcmd->skb = NULL;
- dev_kfree_skb_irq(skb);
- }
- hcmd = hcmd->next_all;
- }
- adapter->cmdq_free.count = 0;
- adapter->cmdq_free.head = NULL;
- adapter->cmdq_free.tail = NULL;
- adapter->cmdq_done.count = 0;
- adapter->cmdq_done.head = NULL;
- adapter->cmdq_done.tail = NULL;
- adapter->cmdq_free.head = adapter->cmdq_all.head;
- hcmd = adapter->cmdq_all.head;
- while (hcmd) {
- adapter->cmdq_free.count++;
- hcmd->next = hcmd->next_all;
- hcmd = hcmd->next_all;
- }
- if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
- dev_err(&adapter->netdev->dev,
- "free_count %d != all count %d\n",
- adapter->cmdq_free.count, adapter->cmdq_all.count);
- }
- spin_unlock(&adapter->cmdq_done.lock);
- spin_unlock_irqrestore(&adapter->cmdq_free.lock, flags);
-}
-
-static void slic_cmdq_getdone(struct adapter *adapter)
-{
- struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
- struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
- unsigned long flags;
-
- spin_lock_irqsave(&done_cmdq->lock, flags);
-
- free_cmdq->head = done_cmdq->head;
- free_cmdq->count = done_cmdq->count;
- done_cmdq->head = NULL;
- done_cmdq->tail = NULL;
- done_cmdq->count = 0;
- spin_unlock_irqrestore(&done_cmdq->lock, flags);
-}
-
-static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter)
-{
- struct slic_cmdqueue *cmdq = &adapter->cmdq_free;
- struct slic_hostcmd *cmd = NULL;
- unsigned long flags;
-
-lock_and_retry:
- spin_lock_irqsave(&cmdq->lock, flags);
-retry:
- cmd = cmdq->head;
- if (cmd) {
- cmdq->head = cmd->next;
- cmdq->count--;
- spin_unlock_irqrestore(&cmdq->lock, flags);
- } else {
- slic_cmdq_getdone(adapter);
- cmd = cmdq->head;
- if (cmd) {
- goto retry;
- } else {
- u32 *pageaddr;
-
- spin_unlock_irqrestore(&cmdq->lock, flags);
- pageaddr = slic_cmdqmem_addpage(adapter);
- if (pageaddr) {
- slic_cmdq_addcmdpage(adapter, pageaddr);
- goto lock_and_retry;
- }
- }
- }
- return cmd;
-}
-
-static void slic_cmdq_putdone_irq(struct adapter *adapter,
- struct slic_hostcmd *cmd)
-{
- struct slic_cmdqueue *cmdq = &adapter->cmdq_done;
-
- spin_lock(&cmdq->lock);
- cmd->busy = 0;
- cmd->next = cmdq->head;
- cmdq->head = cmd;
- cmdq->count++;
- if ((adapter->xmitq_full) && (cmdq->count > 10))
- netif_wake_queue(adapter->netdev);
- spin_unlock(&cmdq->lock);
-}
-
-static int slic_rcvqueue_fill(struct adapter *adapter)
-{
- void *paddr;
- u32 paddrl;
- u32 paddrh;
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- int i = 0;
- struct device *dev = &adapter->netdev->dev;
-
- while (i < SLIC_RCVQ_FILLENTRIES) {
- struct slic_rcvbuf *rcvbuf;
- struct sk_buff *skb;
-#ifdef KLUDGE_FOR_4GB_BOUNDARY
-retry_rcvqfill:
-#endif
- skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC);
- if (skb) {
- paddr = (void *)(unsigned long)
- pci_map_single(adapter->pcidev,
- skb->data,
- SLIC_RCVQ_RCVBUFSIZE,
- PCI_DMA_FROMDEVICE);
- paddrl = SLIC_GET_ADDR_LOW(paddr);
- paddrh = SLIC_GET_ADDR_HIGH(paddr);
-
- skb->len = SLIC_RCVBUF_HEADSIZE;
- rcvbuf = (struct slic_rcvbuf *)skb->head;
- rcvbuf->status = 0;
- skb->next = NULL;
-#ifdef KLUDGE_FOR_4GB_BOUNDARY
- if (paddrl == 0) {
- dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
- __func__);
- dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n",
- skb->data);
- dev_err(dev, " skblen[%x]\n", skb->len);
- dev_err(dev, " paddr[%p]\n", paddr);
- dev_err(dev, " paddrl[%x]\n", paddrl);
- dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n",
- rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n",
- rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n",
- rcvq->count);
- dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
- goto retry_rcvqfill;
- }
-#else
- if (paddrl == 0) {
- dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
- __func__);
- dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n",
- skb->data);
- dev_err(dev, " skblen[%x]\n", skb->len);
- dev_err(dev, " paddr[%p]\n", paddr);
- dev_err(dev, " paddrl[%x]\n", paddrl);
- dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n",
- rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n",
- rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n",
- rcvq->count);
- dev_err(dev, "GIVE TO CARD ANYWAY\n");
- }
-#endif
- if (paddrh == 0) {
- slic_write32(adapter, SLIC_REG_HBAR,
- (u32)paddrl);
- } else {
- slic_write64(adapter, SLIC_REG_HBAR64, paddrl,
- paddrh);
- }
- if (rcvq->head)
- rcvq->tail->next = skb;
- else
- rcvq->head = skb;
- rcvq->tail = skb;
- rcvq->count++;
- i++;
- } else {
- dev_err(&adapter->netdev->dev,
- "slic_rcvqueue_fill could only get [%d] skbuffs\n",
- i);
- break;
- }
- }
- return i;
-}
-
-static void slic_rcvqueue_free(struct adapter *adapter)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- struct sk_buff *skb;
-
- while (rcvq->head) {
- skb = rcvq->head;
- rcvq->head = rcvq->head->next;
- dev_kfree_skb(skb);
- }
- rcvq->tail = NULL;
- rcvq->head = NULL;
- rcvq->count = 0;
-}
-
-static int slic_rcvqueue_init(struct adapter *adapter)
-{
- int i, count;
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-
- rcvq->tail = NULL;
- rcvq->head = NULL;
- rcvq->size = SLIC_RCVQ_ENTRIES;
- rcvq->errors = 0;
- rcvq->count = 0;
- i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES;
- count = 0;
- while (i) {
- count += slic_rcvqueue_fill(adapter);
- i--;
- }
- if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
- slic_rcvqueue_free(adapter);
- return -ENOMEM;
- }
- return 0;
-}
-
-static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- struct sk_buff *skb;
- struct slic_rcvbuf *rcvbuf;
- int count;
-
- if (rcvq->count) {
- skb = rcvq->head;
- rcvbuf = (struct slic_rcvbuf *)skb->head;
-
- if (rcvbuf->status & IRHDDR_SVALID) {
- rcvq->head = rcvq->head->next;
- skb->next = NULL;
- rcvq->count--;
- } else {
- skb = NULL;
- }
- } else {
- dev_err(&adapter->netdev->dev,
- "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
- skb = NULL;
- }
- while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
- count = slic_rcvqueue_fill(adapter);
- if (!count)
- break;
- }
- if (skb)
- rcvq->errors = 0;
- return skb;
-}
-
-static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
-{
- struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
- void *paddr;
- u32 paddrl;
- u32 paddrh;
- struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head;
- struct device *dev;
-
- paddr = (void *)(unsigned long)
- pci_map_single(adapter->pcidev, skb->head,
- SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE);
- rcvbuf->status = 0;
- skb->next = NULL;
-
- paddrl = SLIC_GET_ADDR_LOW(paddr);
- paddrh = SLIC_GET_ADDR_HIGH(paddr);
-
- if (paddrl == 0) {
- dev = &adapter->netdev->dev;
- dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
- __func__);
- dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n", skb->data);
- dev_err(dev, " skblen[%x]\n", skb->len);
- dev_err(dev, " paddr[%p]\n", paddr);
- dev_err(dev, " paddrl[%x]\n", paddrl);
- dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
- }
- if (paddrh == 0)
- slic_write32(adapter, SLIC_REG_HBAR, (u32)paddrl);
- else
- slic_write64(adapter, SLIC_REG_HBAR64, paddrl, paddrh);
- if (rcvq->head)
- rcvq->tail->next = skb;
- else
- rcvq->head = skb;
- rcvq->tail = skb;
- rcvq->count++;
- return rcvq->count;
-}
-
-/*
- * slic_link_event_handler -
- *
- * Initiate a link configuration sequence. The link configuration begins
- * by issuing a READ_LINK_STATUS command to the Utility Processor on the
- * SLIC. Since the command finishes asynchronously, the slic_upr_comlete
- * routine will follow it up witha UP configuration write command, which
- * will also complete asynchronously.
- *
- */
-static int slic_link_event_handler(struct adapter *adapter)
-{
- int status;
- struct slic_shmemory *sm = &adapter->shmem;
- dma_addr_t phaddr = sm->lnkstatus_phaddr;
-
- if (adapter->state != ADAPT_UP) {
- /* Adapter is not operational. Ignore. */
- return -ENODEV;
- }
- /* no 4GB wrap guaranteed */
- status = slic_upr_request(adapter, SLIC_UPR_RLSR,
- cpu_to_le32(lower_32_bits(phaddr)),
- cpu_to_le32(upper_32_bits(phaddr)), 0, 0);
- return status;
-}
-
-static void slic_init_cleanup(struct adapter *adapter)
-{
- if (adapter->intrregistered) {
- adapter->intrregistered = 0;
- free_irq(adapter->netdev->irq, adapter->netdev);
- }
-
- if (adapter->shmem.shmem_data) {
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
-
- pci_free_consistent(adapter->pcidev, sizeof(*sm_data), sm_data,
- sm->isr_phaddr);
- }
-
- if (adapter->pingtimerset) {
- adapter->pingtimerset = 0;
- del_timer(&adapter->pingtimer);
- }
-
- slic_rspqueue_free(adapter);
- slic_cmdq_free(adapter);
- slic_rcvqueue_free(adapter);
-}
-
-/*
- * Allocate a mcast_address structure to hold the multicast address.
- * Link it in.
- */
-static int slic_mcast_add_list(struct adapter *adapter, char *address)
-{
- struct mcast_address *mcaddr, *mlist;
-
- /* Check to see if it already exists */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- if (ether_addr_equal(mlist->address, address))
- return 0;
- mlist = mlist->next;
- }
-
- /* Doesn't already exist. Allocate a structure to hold it */
- mcaddr = kmalloc(sizeof(*mcaddr), GFP_ATOMIC);
- if (!mcaddr)
- return 1;
-
- ether_addr_copy(mcaddr->address, address);
-
- mcaddr->next = adapter->mcastaddrs;
- adapter->mcastaddrs = mcaddr;
-
- return 0;
-}
-
-static void slic_mcast_set_list(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- int status = 0;
- char *addresses;
- struct netdev_hw_addr *ha;
-
- netdev_for_each_mc_addr(ha, dev) {
- addresses = (char *)&ha->addr;
- status = slic_mcast_add_list(adapter, addresses);
- if (status != 0)
- break;
- slic_mcast_set_bit(adapter, addresses);
- }
-
- if (adapter->devflags_prev != dev->flags) {
- adapter->macopts = MAC_DIRECTED;
- if (dev->flags) {
- if (dev->flags & IFF_BROADCAST)
- adapter->macopts |= MAC_BCAST;
- if (dev->flags & IFF_PROMISC)
- adapter->macopts |= MAC_PROMISC;
- if (dev->flags & IFF_ALLMULTI)
- adapter->macopts |= MAC_ALLMCAST;
- if (dev->flags & IFF_MULTICAST)
- adapter->macopts |= MAC_MCAST;
- }
- adapter->devflags_prev = dev->flags;
- slic_config_set(adapter, true);
- } else {
- if (status == 0)
- slic_mcast_set_mask(adapter);
- }
-}
-
-#define XMIT_FAIL_LINK_STATE 1
-#define XMIT_FAIL_ZERO_LENGTH 2
-#define XMIT_FAIL_HOSTCMD_FAIL 3
-
-static void slic_xmit_build_request(struct adapter *adapter,
- struct slic_hostcmd *hcmd, struct sk_buff *skb)
-{
- struct slic_host64_cmd *ihcmd;
- ulong phys_addr;
-
- ihcmd = &hcmd->cmd64;
-
- ihcmd->flags = adapter->port << IHFLG_IFSHFT;
- ihcmd->command = IHCMD_XMT_REQ;
- ihcmd->u.slic_buffers.totlen = skb->len;
- phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
- kfree_skb(skb);
- dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
- return;
- }
- ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
- ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
- ihcmd->u.slic_buffers.bufs[0].length = skb->len;
-#if BITS_PER_LONG == 64
- hcmd->cmdsize = (u32)((((u64)&ihcmd->u.slic_buffers.bufs[1] -
- (u64)hcmd) + 31) >> 5);
-#else
- hcmd->cmdsize = (((u32)&ihcmd->u.slic_buffers.bufs[1] -
- (u32)hcmd) + 31) >> 5;
-#endif
-}
-
-static void slic_xmit_fail(struct adapter *adapter,
- struct sk_buff *skb,
- void *cmd, u32 skbtype, u32 status)
-{
- if (adapter->xmitq_full)
- netif_stop_queue(adapter->netdev);
- if ((!cmd) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
- switch (status) {
- case XMIT_FAIL_LINK_STATE:
- dev_err(&adapter->netdev->dev,
- "reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n",
- skb, skb->pkt_type,
- SLIC_LINKSTATE(adapter->linkstate),
- SLIC_ADAPTER_STATE(adapter->state),
- adapter->state,
- SLIC_CARD_STATE(adapter->card->state),
- adapter->card->state);
- break;
- case XMIT_FAIL_ZERO_LENGTH:
- dev_err(&adapter->netdev->dev,
- "xmit_start skb->len == 0 skb[%p] type[%x]\n",
- skb, skb->pkt_type);
- break;
- case XMIT_FAIL_HOSTCMD_FAIL:
- dev_err(&adapter->netdev->dev,
- "xmit_start skb[%p] type[%x] No host commands available\n",
- skb, skb->pkt_type);
- break;
- }
- }
- dev_kfree_skb(skb);
- adapter->netdev->stats.tx_dropped++;
-}
-
-static void slic_rcv_handle_error(struct adapter *adapter,
- struct slic_rcvbuf *rcvbuf)
-{
- struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
- struct net_device *netdev = adapter->netdev;
-
- if (adapter->devid != SLIC_1GB_DEVICE_ID) {
- if (hdr->frame_status14 & VRHSTAT_802OE)
- adapter->if_events.oflow802++;
- if (hdr->frame_status14 & VRHSTAT_TPOFLO)
- adapter->if_events.Tprtoflow++;
- if (hdr->frame_status_b14 & VRHSTATB_802UE)
- adapter->if_events.uflow802++;
- if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
- adapter->if_events.rcvearly++;
- netdev->stats.rx_fifo_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
- adapter->if_events.Bufov++;
- netdev->stats.rx_over_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
- adapter->if_events.Carre++;
- netdev->stats.tx_carrier_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_LONGE)
- adapter->if_events.Longe++;
- if (hdr->frame_status_b14 & VRHSTATB_PREA)
- adapter->if_events.Invp++;
- if (hdr->frame_status_b14 & VRHSTATB_CRC) {
- adapter->if_events.Crc++;
- netdev->stats.rx_crc_errors++;
- }
- if (hdr->frame_status_b14 & VRHSTATB_DRBL)
- adapter->if_events.Drbl++;
- if (hdr->frame_status_b14 & VRHSTATB_CODE)
- adapter->if_events.Code++;
- if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
- adapter->if_events.TpCsum++;
- if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
- adapter->if_events.TpHlen++;
- if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
- adapter->if_events.IpCsum++;
- if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
- adapter->if_events.IpLen++;
- if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
- adapter->if_events.IpHlen++;
- } else {
- if (hdr->frame_statusGB & VGBSTAT_XPERR) {
- u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
-
- if (xerr == VGBSTAT_XCSERR)
- adapter->if_events.TpCsum++;
- if (xerr == VGBSTAT_XUFLOW)
- adapter->if_events.Tprtoflow++;
- if (xerr == VGBSTAT_XHLEN)
- adapter->if_events.TpHlen++;
- }
- if (hdr->frame_statusGB & VGBSTAT_NETERR) {
- u32 nerr =
- (hdr->
- frame_statusGB >> VGBSTAT_NERRSHFT) &
- VGBSTAT_NERRMSK;
- if (nerr == VGBSTAT_NCSERR)
- adapter->if_events.IpCsum++;
- if (nerr == VGBSTAT_NUFLOW)
- adapter->if_events.IpLen++;
- if (nerr == VGBSTAT_NHLEN)
- adapter->if_events.IpHlen++;
- }
- if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
- u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
-
- if (lerr == VGBSTAT_LDEARLY)
- adapter->if_events.rcvearly++;
- if (lerr == VGBSTAT_LBOFLO)
- adapter->if_events.Bufov++;
- if (lerr == VGBSTAT_LCODERR)
- adapter->if_events.Code++;
- if (lerr == VGBSTAT_LDBLNBL)
- adapter->if_events.Drbl++;
- if (lerr == VGBSTAT_LCRCERR)
- adapter->if_events.Crc++;
- if (lerr == VGBSTAT_LOFLO)
- adapter->if_events.oflow802++;
- if (lerr == VGBSTAT_LUFLO)
- adapter->if_events.uflow802++;
- }
- }
-}
-
-#define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000
-#define M_FAST_PATH 0x0040
-
-static void slic_rcv_handler(struct adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct sk_buff *skb;
- struct slic_rcvbuf *rcvbuf;
- u32 frames = 0;
-
- while ((skb = slic_rcvqueue_getnext(adapter))) {
- u32 rx_bytes;
-
- rcvbuf = (struct slic_rcvbuf *)skb->head;
- adapter->card->events++;
- if (rcvbuf->status & IRHDDR_ERR) {
- adapter->rx_errors++;
- slic_rcv_handle_error(adapter, rcvbuf);
- slic_rcvqueue_reinsert(adapter, skb);
- continue;
- }
-
- if (!slic_mac_filter(adapter, (struct ether_header *)
- rcvbuf->data)) {
- slic_rcvqueue_reinsert(adapter, skb);
- continue;
- }
- skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
- rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
- skb_put(skb, rx_bytes);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += rx_bytes;
-#if SLIC_OFFLOAD_IP_CHECKSUM
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-#endif
-
- skb->dev = adapter->netdev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx(skb);
-
- ++frames;
-#if SLIC_INTERRUPT_PROCESS_LIMIT
- if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
- adapter->rcv_interrupt_yields++;
- break;
- }
-#endif
- }
- adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
-}
-
-static void slic_xmit_complete(struct adapter *adapter)
-{
- struct slic_hostcmd *hcmd;
- struct slic_rspbuf *rspbuf;
- u32 frames = 0;
- struct slic_handle_word slic_handle_word;
-
- do {
- rspbuf = slic_rspqueue_getnext(adapter);
- if (!rspbuf)
- break;
- adapter->xmit_completes++;
- adapter->card->events++;
- /*
- * Get the complete host command buffer
- */
- slic_handle_word.handle_token = rspbuf->hosthandle;
- hcmd =
- adapter->slic_handles[slic_handle_word.handle_index].
- address;
-/* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
- if (hcmd->type == SLIC_CMD_DUMB) {
- if (hcmd->skb)
- dev_kfree_skb_irq(hcmd->skb);
- slic_cmdq_putdone_irq(adapter, hcmd);
- }
- rspbuf->status = 0;
- rspbuf->hosthandle = 0;
- frames++;
- } while (1);
- adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
-}
-
-static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
- struct net_device *dev)
-{
- if (isr & ~ISR_IO) {
- if (isr & ISR_ERR) {
- adapter->error_interrupts++;
- if (isr & ISR_RMISS) {
- int count;
- int pre_count;
- int errors;
-
- struct slic_rcvqueue *rcvq =
- &adapter->rcvqueue;
-
- adapter->error_rmiss_interrupts++;
-
- if (!rcvq->errors)
- rcv_count = rcvq->count;
- pre_count = rcvq->count;
- errors = rcvq->errors;
-
- while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
- count = slic_rcvqueue_fill(adapter);
- if (!count)
- break;
- }
- } else if (isr & ISR_XDROP) {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x] ISR_XDROP\n",
- isr);
- } else {
- dev_err(&dev->dev,
- "isr & ISR_ERR [%x]\n",
- isr);
- }
- }
-
- if (isr & ISR_LEVENT) {
- adapter->linkevent_interrupts++;
- if (slic_link_event_handler(adapter))
- adapter->linkevent_interrupts--;
- }
-
- if ((isr & ISR_UPC) || (isr & ISR_UPCERR) ||
- (isr & ISR_UPCBSY)) {
- adapter->upr_interrupts++;
- slic_upr_request_complete(adapter, isr);
- }
- }
-
- if (isr & ISR_RCV) {
- adapter->rcv_interrupts++;
- slic_rcv_handler(adapter);
- }
-
- if (isr & ISR_CMD) {
- adapter->xmit_interrupts++;
- slic_xmit_complete(adapter);
- }
-}
-
-static irqreturn_t slic_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct adapter *adapter = netdev_priv(dev);
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- u32 isr;
-
- if (sm_data->isr) {
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_MASK);
- slic_flush_write(adapter);
-
- isr = sm_data->isr;
- sm_data->isr = 0;
- adapter->num_isrs++;
- switch (adapter->card->state) {
- case CARD_UP:
- slic_interrupt_card_up(isr, adapter, dev);
- break;
-
- case CARD_DOWN:
- if ((isr & ISR_UPC) ||
- (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- adapter->upr_interrupts++;
- slic_upr_request_complete(adapter, isr);
- }
- break;
- }
-
- adapter->all_reg_writes += 2;
- adapter->isr_reg_writes++;
- slic_write32(adapter, SLIC_REG_ISR, 0);
- } else {
- adapter->false_interrupts++;
- }
- return IRQ_HANDLED;
-}
-
-#define NORMAL_ETHFRAME 0
-
-static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
-{
- struct sliccard *card;
- struct adapter *adapter = netdev_priv(dev);
- struct slic_hostcmd *hcmd = NULL;
- u32 status = 0;
- void *offloadcmd = NULL;
-
- card = adapter->card;
- if ((adapter->linkstate != LINK_UP) ||
- (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
- status = XMIT_FAIL_LINK_STATE;
- goto xmit_fail;
-
- } else if (skb->len == 0) {
- status = XMIT_FAIL_ZERO_LENGTH;
- goto xmit_fail;
- }
-
- hcmd = slic_cmdq_getfree(adapter);
- if (!hcmd) {
- adapter->xmitq_full = 1;
- status = XMIT_FAIL_HOSTCMD_FAIL;
- goto xmit_fail;
- }
- hcmd->skb = skb;
- hcmd->busy = 1;
- hcmd->type = SLIC_CMD_DUMB;
- slic_xmit_build_request(adapter, hcmd, skb);
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
-#ifdef DEBUG_DUMP
- if (adapter->kill_card) {
- struct slic_host64_cmd ihcmd;
-
- ihcmd = &hcmd->cmd64;
-
- ihcmd->flags |= 0x40;
- adapter->kill_card = 0; /* only do this once */
- }
-#endif
- if (hcmd->paddrh == 0) {
- slic_write32(adapter, SLIC_REG_CBAR, (hcmd->paddrl |
- hcmd->cmdsize));
- } else {
- slic_write64(adapter, SLIC_REG_CBAR64,
- hcmd->paddrl | hcmd->cmdsize, hcmd->paddrh);
- }
-xmit_done:
- return NETDEV_TX_OK;
-xmit_fail:
- slic_xmit_fail(adapter, skb, offloadcmd, NORMAL_ETHFRAME, status);
- goto xmit_done;
-}
-
-static void slic_adapter_freeresources(struct adapter *adapter)
-{
- slic_init_cleanup(adapter);
- adapter->error_interrupts = 0;
- adapter->rcv_interrupts = 0;
- adapter->xmit_interrupts = 0;
- adapter->linkevent_interrupts = 0;
- adapter->upr_interrupts = 0;
- adapter->num_isrs = 0;
- adapter->xmit_completes = 0;
- adapter->rcv_broadcasts = 0;
- adapter->rcv_multicasts = 0;
- adapter->rcv_unicasts = 0;
-}
-
-static int slic_adapter_allocresources(struct adapter *adapter,
- unsigned long *flags)
-{
- if (!adapter->intrregistered) {
- int retval;
-
- spin_unlock_irqrestore(&slic_global.driver_lock, *flags);
-
- retval = request_irq(adapter->netdev->irq,
- &slic_interrupt,
- IRQF_SHARED,
- adapter->netdev->name, adapter->netdev);
-
- spin_lock_irqsave(&slic_global.driver_lock, *flags);
-
- if (retval) {
- dev_err(&adapter->netdev->dev,
- "request_irq (%s) FAILED [%x]\n",
- adapter->netdev->name, retval);
- return retval;
- }
- adapter->intrregistered = 1;
- }
- return 0;
-}
-
-/*
- * slic_if_init
- *
- * Perform initialization of our slic interface.
- *
- */
-static int slic_if_init(struct adapter *adapter, unsigned long *flags)
-{
- struct sliccard *card = adapter->card;
- struct net_device *dev = adapter->netdev;
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- int rc;
-
- /* adapter should be down at this point */
- if (adapter->state != ADAPT_DOWN) {
- dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
- __func__);
- rc = -EIO;
- goto err;
- }
-
- adapter->devflags_prev = dev->flags;
- adapter->macopts = MAC_DIRECTED;
- if (dev->flags) {
- if (dev->flags & IFF_BROADCAST)
- adapter->macopts |= MAC_BCAST;
- if (dev->flags & IFF_PROMISC)
- adapter->macopts |= MAC_PROMISC;
- if (dev->flags & IFF_ALLMULTI)
- adapter->macopts |= MAC_ALLMCAST;
- if (dev->flags & IFF_MULTICAST)
- adapter->macopts |= MAC_MCAST;
- }
- rc = slic_adapter_allocresources(adapter, flags);
- if (rc) {
- dev_err(&dev->dev, "slic_adapter_allocresources FAILED %x\n",
- rc);
- slic_adapter_freeresources(adapter);
- goto err;
- }
-
- if (!adapter->queues_initialized) {
- rc = slic_rspqueue_init(adapter);
- if (rc)
- goto err;
- rc = slic_cmdq_init(adapter);
- if (rc)
- goto err;
- rc = slic_rcvqueue_init(adapter);
- if (rc)
- goto err;
- adapter->queues_initialized = 1;
- }
-
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- slic_flush_write(adapter);
- mdelay(1);
-
- if (!adapter->isp_initialized) {
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->bit64reglock, flags);
- slic_write32(adapter, SLIC_REG_ADDR_UPPER,
- cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
- slic_write32(adapter, SLIC_REG_ISP,
- cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
- spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-
- adapter->isp_initialized = 1;
- }
-
- adapter->state = ADAPT_UP;
- if (!card->loadtimerset) {
- setup_timer(&card->loadtimer, &slic_timer_load_check,
- (ulong)card);
- card->loadtimer.expires =
- jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
- add_timer(&card->loadtimer);
-
- card->loadtimerset = 1;
- }
-
- if (!adapter->pingtimerset) {
- setup_timer(&adapter->pingtimer, &slic_timer_ping, (ulong)dev);
- adapter->pingtimer.expires =
- jiffies + (PING_TIMER_INTERVAL * HZ);
- add_timer(&adapter->pingtimer);
- adapter->pingtimerset = 1;
- adapter->card->pingstatus = ISR_PINGMASK;
- }
-
- /*
- * clear any pending events, then enable interrupts
- */
- sm_data->isr = 0;
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_ON);
-
- slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
- slic_flush_write(adapter);
-
- rc = slic_link_event_handler(adapter);
- if (rc) {
- /* disable interrupts then clear pending events */
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_flush_write(adapter);
-
- if (adapter->pingtimerset) {
- del_timer(&adapter->pingtimer);
- adapter->pingtimerset = 0;
- }
- if (card->loadtimerset) {
- del_timer(&card->loadtimer);
- card->loadtimerset = 0;
- }
- adapter->state = ADAPT_DOWN;
- slic_adapter_freeresources(adapter);
- }
-
-err:
- return rc;
-}
-
-static int slic_entry_open(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card = adapter->card;
- unsigned long flags;
- int status;
-
- netif_carrier_off(dev);
-
- spin_lock_irqsave(&slic_global.driver_lock, flags);
- if (!adapter->activated) {
- card->adapters_activated++;
- slic_global.num_slic_ports_active++;
- adapter->activated = 1;
- }
- status = slic_if_init(adapter, &flags);
-
- if (status != 0) {
- if (adapter->activated) {
- card->adapters_activated--;
- slic_global.num_slic_ports_active--;
- adapter->activated = 0;
- }
- goto spin_unlock;
- }
- if (!card->master)
- card->master = adapter;
-
-spin_unlock:
- spin_unlock_irqrestore(&slic_global.driver_lock, flags);
-
- netif_start_queue(adapter->netdev);
-
- return status;
-}
-
-static void slic_card_cleanup(struct sliccard *card)
-{
- if (card->loadtimerset) {
- card->loadtimerset = 0;
- del_timer_sync(&card->loadtimer);
- }
-
- kfree(card);
-}
-
-static void slic_entry_remove(struct pci_dev *pcidev)
-{
- struct net_device *dev = pci_get_drvdata(pcidev);
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card;
- struct mcast_address *mcaddr, *mlist;
-
- unregister_netdev(dev);
-
- slic_adapter_freeresources(adapter);
- iounmap(adapter->regs);
-
- /* free multicast addresses */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- mcaddr = mlist;
- mlist = mlist->next;
- kfree(mcaddr);
- }
- card = adapter->card;
- card->adapters_allocated--;
- adapter->allocated = 0;
- if (!card->adapters_allocated) {
- struct sliccard *curr_card = slic_global.slic_card;
-
- if (curr_card == card) {
- slic_global.slic_card = card->next;
- } else {
- while (curr_card->next != card)
- curr_card = curr_card->next;
- curr_card->next = card->next;
- }
- slic_global.num_slic_cards--;
- slic_card_cleanup(card);
- }
- free_netdev(dev);
- pci_release_regions(pcidev);
- pci_disable_device(pcidev);
-}
-
-static int slic_entry_halt(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct sliccard *card = adapter->card;
- unsigned long flags;
-
- spin_lock_irqsave(&slic_global.driver_lock, flags);
- netif_stop_queue(adapter->netdev);
- adapter->state = ADAPT_DOWN;
- adapter->linkstate = LINK_DOWN;
- adapter->upr_list = NULL;
- adapter->upr_busy = 0;
- adapter->devflags_prev = 0;
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- adapter->all_reg_writes++;
- adapter->icr_reg_writes++;
- slic_config_clear(adapter);
- if (adapter->activated) {
- card->adapters_activated--;
- slic_global.num_slic_ports_active--;
- adapter->activated = 0;
- }
-#ifdef AUTOMATIC_RESET
- slic_write32(adapter, SLIC_REG_RESET_IFACE, 0);
-#endif
- slic_flush_write(adapter);
-
- /*
- * Reset the adapter's cmd queues
- */
- slic_cmdq_reset(adapter);
-
-#ifdef AUTOMATIC_RESET
- if (!card->adapters_activated)
- slic_card_init(card, adapter);
-#endif
-
- spin_unlock_irqrestore(&slic_global.driver_lock, flags);
-
- netif_carrier_off(dev);
-
- return 0;
-}
-
-static struct net_device_stats *slic_get_stats(struct net_device *dev)
-{
- struct adapter *adapter = netdev_priv(dev);
-
- dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
- dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
- dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
- dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
- dev->stats.tx_heartbeat_errors = 0;
- dev->stats.tx_aborted_errors = 0;
- dev->stats.tx_window_errors = 0;
- dev->stats.tx_fifo_errors = 0;
- dev->stats.rx_frame_errors = 0;
- dev->stats.rx_length_errors = 0;
-
- return &dev->stats;
-}
-
-static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct adapter *adapter = netdev_priv(dev);
- struct ethtool_cmd edata;
- struct ethtool_cmd ecmd;
- u32 data[7];
- u32 intagg;
-
- switch (cmd) {
- case SIOCSLICSETINTAGG:
- if (copy_from_user(data, rq->ifr_data, 28))
- return -EFAULT;
- intagg = data[0];
- dev_err(&dev->dev, "set interrupt aggregation to %d\n",
- intagg);
- slic_intagg_set(adapter, intagg);
- return 0;
-
- case SIOCETHTOOL:
- if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
- return -EFAULT;
-
- if (ecmd.cmd == ETHTOOL_GSET) {
- memset(&edata, 0, sizeof(edata));
- edata.supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_MII);
- edata.port = PORT_MII;
- edata.transceiver = XCVR_INTERNAL;
- edata.phy_address = 0;
- if (adapter->linkspeed == LINK_100MB)
- edata.speed = SPEED_100;
- else if (adapter->linkspeed == LINK_10MB)
- edata.speed = SPEED_10;
- else
- edata.speed = 0;
-
- if (adapter->linkduplex == LINK_FULLD)
- edata.duplex = DUPLEX_FULL;
- else
- edata.duplex = DUPLEX_HALF;
-
- edata.autoneg = AUTONEG_ENABLE;
- edata.maxtxpkt = 1;
- edata.maxrxpkt = 1;
- if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
- return -EFAULT;
-
- } else if (ecmd.cmd == ETHTOOL_SSET) {
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (adapter->linkspeed == LINK_100MB)
- edata.speed = SPEED_100;
- else if (adapter->linkspeed == LINK_10MB)
- edata.speed = SPEED_10;
- else
- edata.speed = 0;
-
- if (adapter->linkduplex == LINK_FULLD)
- edata.duplex = DUPLEX_FULL;
- else
- edata.duplex = DUPLEX_HALF;
-
- edata.autoneg = AUTONEG_ENABLE;
- edata.maxtxpkt = 1;
- edata.maxrxpkt = 1;
- if ((ecmd.speed != edata.speed) ||
- (ecmd.duplex != edata.duplex)) {
- u32 speed;
- u32 duplex;
-
- if (ecmd.speed == SPEED_10)
- speed = 0;
- else
- speed = PCR_SPEED_100;
- if (ecmd.duplex == DUPLEX_FULL)
- duplex = PCR_DUPLEX_FULL;
- else
- duplex = 0;
- slic_link_config(adapter, speed, duplex);
- if (slic_link_event_handler(adapter))
- return -EFAULT;
- }
- }
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void slic_config_pci(struct pci_dev *pcidev)
-{
- u16 pci_command;
- u16 new_command;
-
- pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
-
- new_command = pci_command | PCI_COMMAND_MASTER
- | PCI_COMMAND_MEMORY
- | PCI_COMMAND_INVALIDATE
- | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
- if (pci_command != new_command)
- pci_write_config_word(pcidev, PCI_COMMAND, new_command);
-}
-
-static int slic_card_init(struct sliccard *card, struct adapter *adapter)
-{
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data = sm->shmem_data;
- struct slic_eeprom *peeprom;
- struct oslic_eeprom *pOeeprom;
- dma_addr_t phys_config;
- u32 phys_configh;
- u32 phys_configl;
- u32 i = 0;
- int status;
- uint macaddrs = card->card_size;
- ushort eecodesize;
- ushort dramsize;
- ushort ee_chksum;
- ushort calc_chksum;
- struct slic_config_mac *pmac;
- unsigned char fruformat;
- unsigned char oemfruformat;
- struct atk_fru *patkfru;
- union oemfru *poemfru;
- unsigned long flags;
-
- /* Reset everything except PCI configuration space */
- slic_soft_reset(adapter);
-
- /* Download the microcode */
- status = slic_card_download(adapter);
- if (status)
- return status;
-
- if (!card->config_set) {
- peeprom = pci_alloc_consistent(adapter->pcidev,
- sizeof(struct slic_eeprom),
- &phys_config);
-
- if (!peeprom) {
- dev_err(&adapter->pcidev->dev,
- "Failed to allocate DMA memory for EEPROM.\n");
- return -ENOMEM;
- }
-
- phys_configl = SLIC_GET_ADDR_LOW(phys_config);
- phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
-
- memset(peeprom, 0, sizeof(struct slic_eeprom));
-
- slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
- slic_flush_write(adapter);
- mdelay(1);
-
- spin_lock_irqsave(&adapter->bit64reglock, flags);
- slic_write32(adapter, SLIC_REG_ADDR_UPPER,
- cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
- slic_write32(adapter, SLIC_REG_ISP,
- cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
- spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-
- status = slic_config_get(adapter, phys_configl, phys_configh);
- if (status) {
- dev_err(&adapter->pcidev->dev,
- "Failed to fetch config data from device.\n");
- goto card_init_err;
- }
-
- for (;;) {
- if (sm_data->isr) {
- if (sm_data->isr & ISR_UPC) {
- sm_data->isr = 0;
- slic_write64(adapter, SLIC_REG_ISP, 0,
- 0);
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_flush_write(adapter);
-
- slic_upr_request_complete(adapter, 0);
- break;
- }
-
- sm_data->isr = 0;
- slic_write32(adapter, SLIC_REG_ISR, 0);
- slic_flush_write(adapter);
- } else {
- mdelay(1);
- i++;
- if (i > 5000) {
- dev_err(&adapter->pcidev->dev,
- "Fetch of config data timed out.\n");
- slic_write64(adapter, SLIC_REG_ISP,
- 0, 0);
- slic_flush_write(adapter);
-
- status = -EINVAL;
- goto card_init_err;
- }
- }
- }
-
- switch (adapter->devid) {
- /* Oasis card */
- case SLIC_2GB_DEVICE_ID:
- /* extract EEPROM data and pointers to EEPROM data */
- pOeeprom = (struct oslic_eeprom *)peeprom;
- eecodesize = pOeeprom->EecodeSize;
- dramsize = pOeeprom->DramSize;
- pmac = pOeeprom->MacInfo;
- fruformat = pOeeprom->FruFormat;
- patkfru = &pOeeprom->AtkFru;
- oemfruformat = pOeeprom->OemFruFormat;
- poemfru = &pOeeprom->OemFru;
- macaddrs = 2;
- /*
- * Minor kludge for Oasis card
- * get 2 MAC addresses from the
- * EEPROM to ensure that function 1
- * gets the Port 1 MAC address
- */
- break;
- default:
- /* extract EEPROM data and pointers to EEPROM data */
- eecodesize = peeprom->EecodeSize;
- dramsize = peeprom->DramSize;
- pmac = peeprom->u2.mac.MacInfo;
- fruformat = peeprom->FruFormat;
- patkfru = &peeprom->AtkFru;
- oemfruformat = peeprom->OemFruFormat;
- poemfru = &peeprom->OemFru;
- break;
- }
-
- card->config.EepromValid = false;
-
- /* see if the EEPROM is valid by checking it's checksum */
- if ((eecodesize <= MAX_EECODE_SIZE) &&
- (eecodesize >= MIN_EECODE_SIZE)) {
- ee_chksum =
- *(u16 *)((char *)peeprom + (eecodesize - 2));
- /*
- * calculate the EEPROM checksum
- */
- calc_chksum = slic_eeprom_cksum(peeprom,
- eecodesize - 2);
- /*
- * if the ucdoe chksum flag bit worked,
- * we wouldn't need this
- */
- if (ee_chksum == calc_chksum)
- card->config.EepromValid = true;
- }
- /* copy in the DRAM size */
- card->config.DramSize = dramsize;
-
- /* copy in the MAC address(es) */
- for (i = 0; i < macaddrs; i++) {
- memcpy(&card->config.MacInfo[i],
- &pmac[i], sizeof(struct slic_config_mac));
- }
-
- /* copy the Alacritech FRU information */
- card->config.FruFormat = fruformat;
- memcpy(&card->config.AtkFru, patkfru,
- sizeof(struct atk_fru));
-
- pci_free_consistent(adapter->pcidev,
- sizeof(struct slic_eeprom),
- peeprom, phys_config);
-
- if (!card->config.EepromValid) {
- slic_write64(adapter, SLIC_REG_ISP, 0, 0);
- slic_flush_write(adapter);
- dev_err(&adapter->pcidev->dev, "EEPROM invalid.\n");
- return -EINVAL;
- }
-
- card->config_set = 1;
- }
-
- status = slic_card_download_gbrcv(adapter);
- if (status)
- return status;
-
- if (slic_global.dynamic_intagg)
- slic_intagg_set(adapter, 0);
- else
- slic_intagg_set(adapter, adapter->intagg_delay);
-
- /*
- * Initialize ping status to "ok"
- */
- card->pingstatus = ISR_PINGMASK;
-
- /*
- * Lastly, mark our card state as up and return success
- */
- card->state = CARD_UP;
- card->reset_in_progress = 0;
-
- return 0;
-
-card_init_err:
- pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom),
- peeprom, phys_config);
- return status;
-}
-
-static int slic_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coalesce)
-{
- struct adapter *adapter = netdev_priv(dev);
-
- adapter->intagg_delay = coalesce->rx_coalesce_usecs;
- adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce;
- return 0;
-}
-
-static int slic_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coalesce)
-{
- struct adapter *adapter = netdev_priv(dev);
-
- coalesce->rx_coalesce_usecs = adapter->intagg_delay;
- coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg;
- return 0;
-}
-
-static void slic_init_driver(void)
-{
- if (slic_first_init) {
- slic_first_init = 0;
- spin_lock_init(&slic_global.driver_lock);
- }
-}
-
-static int slic_init_adapter(struct net_device *netdev,
- struct pci_dev *pcidev,
- const struct pci_device_id *pci_tbl_entry,
- void __iomem *memaddr, int chip_idx)
-{
- ushort index;
- struct slic_handle *pslic_handle;
- struct adapter *adapter = netdev_priv(netdev);
- struct slic_shmemory *sm = &adapter->shmem;
- struct slic_shmem_data *sm_data;
- dma_addr_t phaddr;
-
-/* adapter->pcidev = pcidev;*/
- adapter->vendid = pci_tbl_entry->vendor;
- adapter->devid = pci_tbl_entry->device;
- adapter->subsysid = pci_tbl_entry->subdevice;
- adapter->busnumber = pcidev->bus->number;
- adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
- adapter->functionnumber = (pcidev->devfn & 0x7);
- adapter->regs = memaddr;
- adapter->irq = pcidev->irq;
- adapter->chipid = chip_idx;
- adapter->port = 0;
- adapter->cardindex = adapter->port;
- spin_lock_init(&adapter->upr_lock);
- spin_lock_init(&adapter->bit64reglock);
- spin_lock_init(&adapter->adapter_lock);
- spin_lock_init(&adapter->reset_lock);
- spin_lock_init(&adapter->handle_lock);
-
- adapter->card_size = 1;
- /*
- * Initialize slic_handle array
- */
- /*
- * Start with 1. 0 is an invalid host handle.
- */
- for (index = 1, pslic_handle = &adapter->slic_handles[1];
- index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
- pslic_handle->token.handle_index = index;
- pslic_handle->type = SLIC_HANDLE_FREE;
- pslic_handle->next = adapter->pfree_slic_handles;
- adapter->pfree_slic_handles = pslic_handle;
- }
- sm_data = pci_zalloc_consistent(adapter->pcidev, sizeof(*sm_data),
- &phaddr);
- if (!sm_data)
- return -ENOMEM;
-
- sm->shmem_data = sm_data;
- sm->isr_phaddr = phaddr;
- sm->lnkstatus_phaddr = phaddr + offsetof(struct slic_shmem_data,
- lnkstatus);
- sm->stats_phaddr = phaddr + offsetof(struct slic_shmem_data, stats);
-
- return 0;
-}
-
-static const struct net_device_ops slic_netdev_ops = {
- .ndo_open = slic_entry_open,
- .ndo_stop = slic_entry_halt,
- .ndo_start_xmit = slic_xmit_start,
- .ndo_do_ioctl = slic_ioctl,
- .ndo_set_mac_address = slic_mac_set_address,
- .ndo_get_stats = slic_get_stats,
- .ndo_set_rx_mode = slic_mcast_set_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-static u32 slic_card_locate(struct adapter *adapter)
-{
- struct sliccard *card = slic_global.slic_card;
- struct physcard *physcard = slic_global.phys_card;
- ushort card_hostid;
- uint i;
-
- card_hostid = slic_read32(adapter, SLIC_REG_HOSTID);
-
- /* Initialize a new card structure if need be */
- if (card_hostid == SLIC_HOSTID_DEFAULT) {
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->next = slic_global.slic_card;
- slic_global.slic_card = card;
- card->busnumber = adapter->busnumber;
- card->slotnumber = adapter->slotnumber;
-
- /* Find an available cardnum */
- for (i = 0; i < SLIC_MAX_CARDS; i++) {
- if (slic_global.cardnuminuse[i] == 0) {
- slic_global.cardnuminuse[i] = 1;
- card->cardnum = i;
- break;
- }
- }
- slic_global.num_slic_cards++;
- } else {
- /* Card exists, find the card this adapter belongs to */
- while (card) {
- if (card->cardnum == card_hostid)
- break;
- card = card->next;
- }
- }
-
- if (!card)
- return -ENXIO;
- /* Put the adapter in the card's adapter list */
- if (!card->adapter[adapter->port]) {
- card->adapter[adapter->port] = adapter;
- adapter->card = card;
- }
-
- card->card_size = 1; /* one port per *logical* card */
-
- while (physcard) {
- for (i = 0; i < SLIC_MAX_PORTS; i++) {
- if (physcard->adapter[i])
- break;
- }
- if (i == SLIC_MAX_PORTS)
- break;
-
- if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
- break;
- physcard = physcard->next;
- }
- if (!physcard) {
- /* no structure allocated for this physical card yet */
- physcard = kzalloc(sizeof(*physcard), GFP_ATOMIC);
- if (!physcard) {
- if (card_hostid == SLIC_HOSTID_DEFAULT)
- kfree(card);
- return -ENOMEM;
- }
-
- physcard->next = slic_global.phys_card;
- slic_global.phys_card = physcard;
- physcard->adapters_allocd = 1;
- } else {
- physcard->adapters_allocd++;
- }
- /* Note - this is ZERO relative */
- adapter->physport = physcard->adapters_allocd - 1;
-
- physcard->adapter[adapter->physport] = adapter;
- adapter->physcard = physcard;
-
- return 0;
-}
-
-static int slic_entry_probe(struct pci_dev *pcidev,
- const struct pci_device_id *pci_tbl_entry)
-{
- static int cards_found;
- static int did_version;
- int err = -ENODEV;
- struct net_device *netdev;
- struct adapter *adapter;
- void __iomem *memmapped_ioaddr = NULL;
- ulong mmio_start = 0;
- ulong mmio_len = 0;
- struct sliccard *card = NULL;
- int pci_using_dac = 0;
-
- err = pci_enable_device(pcidev);
-
- if (err)
- return err;
-
- if (did_version++ == 0) {
- dev_info(&pcidev->dev, "%s\n", slic_banner);
- dev_info(&pcidev->dev, "%s\n", slic_proc_version);
- }
-
- if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for consistent allocations\n");
- goto err_out_disable_pci;
- }
- } else {
- err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pcidev->dev, "no usable DMA configuration\n");
- goto err_out_disable_pci;
- }
- pci_using_dac = 0;
- pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
- }
-
- err = pci_request_regions(pcidev, DRV_NAME);
- if (err) {
- dev_err(&pcidev->dev, "can't obtain PCI resources\n");
- goto err_out_disable_pci;
- }
-
- pci_set_master(pcidev);
-
- netdev = alloc_etherdev(sizeof(struct adapter));
- if (!netdev) {
- err = -ENOMEM;
- goto err_out_exit_slic_probe;
- }
-
- netdev->ethtool_ops = &slic_ethtool_ops;
- SET_NETDEV_DEV(netdev, &pcidev->dev);
-
- pci_set_drvdata(pcidev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pcidev = pcidev;
- slic_global.dynamic_intagg = adapter->dynamic_intagg;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
-
- mmio_start = pci_resource_start(pcidev, 0);
- mmio_len = pci_resource_len(pcidev, 0);
-
- memmapped_ioaddr = ioremap_nocache(mmio_start, mmio_len);
- if (!memmapped_ioaddr) {
- dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
- mmio_len, mmio_start);
- err = -ENOMEM;
- goto err_out_free_netdev;
- }
-
- slic_config_pci(pcidev);
-
- slic_init_driver();
-
- err = slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr,
- cards_found);
- if (err) {
- dev_err(&pcidev->dev, "failed to init adapter: %i\n", err);
- goto err_out_unmap;
- }
-
- err = slic_card_locate(adapter);
- if (err) {
- dev_err(&pcidev->dev, "cannot locate card\n");
- goto err_clean_init;
- }
-
- card = adapter->card;
-
- if (!adapter->allocated) {
- card->adapters_allocated++;
- adapter->allocated = 1;
- }
-
- err = slic_card_init(card, adapter);
- if (err)
- goto err_clean_init;
-
- slic_adapter_set_hwaddr(adapter);
-
- netdev->base_addr = (unsigned long)memmapped_ioaddr;
- netdev->irq = adapter->irq;
- netdev->netdev_ops = &slic_netdev_ops;
-
- netif_carrier_off(netdev);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
- goto err_clean_init;
- }
-
- cards_found++;
-
- return 0;
-
-err_clean_init:
- slic_init_cleanup(adapter);
-err_out_unmap:
- iounmap(memmapped_ioaddr);
-err_out_free_netdev:
- free_netdev(netdev);
-err_out_exit_slic_probe:
- pci_release_regions(pcidev);
-err_out_disable_pci:
- pci_disable_device(pcidev);
- return err;
-}
-
-static struct pci_driver slic_driver = {
- .name = DRV_NAME,
- .id_table = slic_pci_tbl,
- .probe = slic_entry_probe,
- .remove = slic_entry_remove,
-};
-
-static int __init slic_module_init(void)
-{
- slic_init_driver();
-
- return pci_register_driver(&slic_driver);
-}
-
-static void __exit slic_module_cleanup(void)
-{
- pci_unregister_driver(&slic_driver);
-}
-
-static const struct ethtool_ops slic_ethtool_ops = {
- .get_coalesce = slic_get_coalesce,
- .set_coalesce = slic_set_coalesce
-};
-
-module_init(slic_module_init);
-module_exit(slic_module_cleanup);
diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile
index dcce3f487ed5..4d781f78b95c 100644
--- a/drivers/staging/sm750fb/Makefile
+++ b/drivers/staging/sm750fb/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FB_SM750) += sm750fb.o
sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o
-sm750fb-objs += ddk750_display.o ddk750_help.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
+sm750fb-objs += ddk750_display.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h
index 2c10a08ed964..734010324a8f 100644
--- a/drivers/staging/sm750fb/ddk750.h
+++ b/drivers/staging/sm750fb/ddk750.h
@@ -1,22 +1,21 @@
+/*
+ * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ * All rights are reserved. Reproduction or in part is prohibited
+ * without the written consent of the copyright owner.
+ *
+ * RegSC.h --- SM718 SDK
+ * This file contains the definitions for the System Configuration registers.
+ */
+
#ifndef DDK750_H__
#define DDK750_H__
-/*******************************************************************
-*
-* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-* All rights are reserved. Reproduction or in part is prohibited
-* without the written consent of the copyright owner.
-*
-* RegSC.h --- SM718 SDK
-* This file contains the definitions for the System Configuration registers.
-*
-*******************************************************************/
+
#include "ddk750_reg.h"
#include "ddk750_mode.h"
#include "ddk750_chip.h"
#include "ddk750_display.h"
#include "ddk750_power.h"
-#include "ddk750_help.h"
#ifdef USE_HW_I2C
#include "ddk750_hwi2c.h"
#endif
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 839d6730bde9..f59ce5c0867d 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -1,33 +1,32 @@
#include <linux/kernel.h>
#include <linux/sizes.h>
-#include "ddk750_help.h"
#include "ddk750_reg.h"
#include "ddk750_chip.h"
#include "ddk750_power.h"
#define MHz(x) ((x) * 1000000)
+static logical_chip_type_t chip;
+
logical_chip_type_t sm750_get_chip_type(void)
{
- unsigned short physicalID;
- char physicalRev;
- logical_chip_type_t chip;
-
- physicalID = devId750; /* either 0x718 or 0x750 */
- physicalRev = revId750;
+ return chip;
+}
- if (physicalID == 0x718)
+void sm750_set_chip_type(unsigned short devId, u8 revId)
+{
+ if (devId == 0x718)
chip = SM718;
- else if (physicalID == 0x750) {
+ else if (devId == 0x750) {
chip = SM750;
/* SM750 and SM750LE are different in their revision ID only. */
- if (physicalRev == SM750LE_REVISION_ID)
+ if (revId == SM750LE_REVISION_ID) {
chip = SM750LE;
+ pr_info("found sm750le\n");
+ }
} else
chip = SM_UNKNOWN;
-
- return chip;
}
static unsigned int get_mxclk_freq(void)
@@ -52,9 +51,9 @@ static unsigned int get_mxclk_freq(void)
*
* Input: Frequency to be set.
*/
-static void setChipClock(unsigned int frequency)
+static void set_chip_clock(unsigned int frequency)
{
- pll_value_t pll;
+ struct pll_value pll;
unsigned int ulActualMxClk;
/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
@@ -63,29 +62,31 @@ static void setChipClock(unsigned int frequency)
if (frequency) {
/*
- * Set up PLL, a structure to hold the value to be set in clocks.
- */
+ * Set up PLL structure to hold the value to be set in clocks.
+ */
pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
pll.clockType = MXCLK_PLL;
/*
- * Call calcPllValue() to fill the other fields of PLL structure.
- * Sometime, the chip cannot set up the exact clock
- * required by the User.
- * Return value of calcPllValue gives the actual possible clock.
- */
- ulActualMxClk = calcPllValue(frequency, &pll);
+ * Call sm750_calc_pll_value() to fill the other fields of the PLL
+ * structure. Sometimes, the chip cannot set up the exact
+ * clock required by the User.
+ * Return value of sm750_calc_pll_value gives the actual possible
+ * clock.
+ */
+ ulActualMxClk = sm750_calc_pll_value(frequency, &pll);
/* Master Clock Control: MXCLK_PLL */
- POKE32(MXCLK_PLL_CTRL, formatPllReg(&pll));
+ POKE32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
}
}
-static void setMemoryClock(unsigned int frequency)
+static void set_memory_clock(unsigned int frequency)
{
unsigned int reg, divisor;
- /* Cheok_0509: For SM750LE, the memory clock is fixed.
+ /*
+ * Cheok_0509: For SM750LE, the memory clock is fixed.
* Nothing to set.
*/
if (sm750_get_chip_type() == SM750LE)
@@ -120,7 +121,7 @@ static void setMemoryClock(unsigned int frequency)
break;
}
- setCurrentGate(reg);
+ sm750_set_current_gate(reg);
}
}
@@ -132,18 +133,20 @@ static void setMemoryClock(unsigned int frequency)
* NOTE:
* The maximum frequency the engine can run is 168MHz.
*/
-static void setMasterClock(unsigned int frequency)
+static void set_master_clock(unsigned int frequency)
{
unsigned int reg, divisor;
- /* Cheok_0509: For SM750LE, the memory clock is fixed.
+ /*
+ * Cheok_0509: For SM750LE, the memory clock is fixed.
* Nothing to set.
*/
if (sm750_get_chip_type() == SM750LE)
return;
if (frequency) {
- /* Set the frequency to the maximum frequency
+ /*
+ * Set the frequency to the maximum frequency
* that the SM750 engine can run, which is about 190 MHz.
*/
if (frequency > MHz(190))
@@ -170,11 +173,11 @@ static void setMasterClock(unsigned int frequency)
break;
}
- setCurrentGate(reg);
+ sm750_set_current_gate(reg);
}
}
-unsigned int ddk750_getVMSize(void)
+unsigned int ddk750_get_vm_size(void)
{
unsigned int reg;
unsigned int data;
@@ -206,18 +209,18 @@ unsigned int ddk750_getVMSize(void)
return data;
}
-int ddk750_initHw(initchip_param_t *pInitParam)
+int ddk750_init_hw(struct initchip_param *pInitParam)
{
unsigned int reg;
if (pInitParam->powerMode != 0)
pInitParam->powerMode = 0;
- setPowerMode(pInitParam->powerMode);
+ sm750_set_power_mode(pInitParam->powerMode);
/* Enable display power gate & LOCALMEM power gate*/
reg = PEEK32(CURRENT_GATE);
reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM);
- setCurrentGate(reg);
+ sm750_set_current_gate(reg);
if (sm750_get_chip_type() != SM750LE) {
/* set panel pll and graphic mode via mmio_88 */
@@ -233,16 +236,17 @@ int ddk750_initHw(initchip_param_t *pInitParam)
}
/* Set the Main Chip Clock */
- setChipClock(MHz((unsigned int)pInitParam->chipClock));
+ set_chip_clock(MHz((unsigned int)pInitParam->chipClock));
/* Set up memory clock. */
- setMemoryClock(MHz(pInitParam->memClock));
+ set_memory_clock(MHz(pInitParam->memClock));
/* Set up master clock */
- setMasterClock(MHz(pInitParam->masterClock));
+ set_master_clock(MHz(pInitParam->masterClock));
- /* Reset the memory controller.
+ /*
+ * Reset the memory controller.
* If the memory controller is not reset in SM750,
* the system might hang when sw accesses the memory.
* The memory should be resetted after changing the MXCLK.
@@ -257,7 +261,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
}
if (pInitParam->setAllEngOff == 1) {
- enable2DEngine(0);
+ sm750_enable_2d_engine(0);
/* Disable Overlay, if a former application left it on */
reg = PEEK32(VIDEO_DISPLAY_CTRL);
@@ -280,7 +284,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
POKE32(DMA_ABORT_INTERRUPT, reg);
/* Disable DMA Power, if a former application left it on */
- enableDMA(0);
+ sm750_enable_dma(0);
}
/* We can add more initialization as needed. */
@@ -305,9 +309,10 @@ int ddk750_initHw(initchip_param_t *pInitParam)
* M = {1,...,255}
* N = {2,...,15}
*/
-unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
+unsigned int sm750_calc_pll_value(unsigned int request_orig, struct pll_value *pll)
{
- /* as sm750 register definition,
+ /*
+ * as sm750 register definition,
* N located in 2,15 and M located in 1,255
*/
int N, M, X, d;
@@ -319,7 +324,8 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
int max_d = 6;
if (sm750_get_chip_type() == SM750LE) {
- /* SM750LE don't have
+ /*
+ * SM750LE don't have
* programmable PLL and M/N values to work on.
* Just return the requested clock.
*/
@@ -331,14 +337,16 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
request = request_orig / 1000;
input = pll->inputFreq / 1000;
- /* for MXCLK register,
+ /*
+ * for MXCLK register,
* no POD provided, so need be treated differently
*/
if (pll->clockType == MXCLK_PLL)
max_d = 3;
for (N = 15; N > 1; N--) {
- /* RN will not exceed maximum long
+ /*
+ * RN will not exceed maximum long
* if @request <= 285 MHZ (for 32bit cpu)
*/
RN = N * request;
@@ -373,7 +381,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
return ret;
}
-unsigned int formatPllReg(pll_value_t *pPLL)
+unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
{
#ifndef VALIDATION_CHIP
unsigned int POD = pPLL->POD;
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 14357fd1cc6b..e63b8b293816 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -6,6 +6,14 @@
#endif
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+
+/* software control endianness */
+#define PEEK32(addr) readl(addr + mmio750)
+#define POKE32(addr, data) writel(data, addr + mmio750)
+
+extern void __iomem *mmio750;
/* This is all the chips recognized by this library */
typedef enum _logical_chip_type_t {
@@ -25,7 +33,7 @@ typedef enum _clock_type_t {
}
clock_type_t;
-typedef struct _pll_value_t {
+struct pll_value {
clock_type_t clockType;
unsigned long inputFreq; /* Input clock frequency to the PLL */
@@ -34,46 +42,55 @@ typedef struct _pll_value_t {
unsigned long N;
unsigned long OD;
unsigned long POD;
-}
-pll_value_t;
+};
/* input struct to initChipParam() function */
-typedef struct _initchip_param_t {
- unsigned short powerMode; /* Use power mode 0 or 1 */
- unsigned short chipClock; /**
- * Speed of main chip clock in MHz unit
- * 0 = keep the current clock setting
- * Others = the new main chip clock
- */
- unsigned short memClock; /**
- * Speed of memory clock in MHz unit
- * 0 = keep the current clock setting
- * Others = the new memory clock
- */
- unsigned short masterClock; /**
- * Speed of master clock in MHz unit
- * 0 = keep the current clock setting
- * Others = the new master clock
- */
- unsigned short setAllEngOff; /**
- * 0 = leave all engine state untouched.
- * 1 = make sure they are off: 2D, Overlay,
- * video alpha, alpha, hardware cursors
- */
- unsigned char resetMemory; /**
- * 0 = Do not reset the memory controller
- * 1 = Reset the memory controller
- */
+struct initchip_param {
+ /* Use power mode 0 or 1 */
+ unsigned short powerMode;
+
+ /*
+ * Speed of main chip clock in MHz unit
+ * 0 = keep the current clock setting
+ * Others = the new main chip clock
+ */
+ unsigned short chipClock;
+
+ /*
+ * Speed of memory clock in MHz unit
+ * 0 = keep the current clock setting
+ * Others = the new memory clock
+ */
+ unsigned short memClock;
+
+ /*
+ * Speed of master clock in MHz unit
+ * 0 = keep the current clock setting
+ * Others = the new master clock
+ */
+ unsigned short masterClock;
+
+ /*
+ * 0 = leave all engine state untouched.
+ * 1 = make sure they are off: 2D, Overlay,
+ * video alpha, alpha, hardware cursors
+ */
+ unsigned short setAllEngOff;
+
+ /*
+ * 0 = Do not reset the memory controller
+ * 1 = Reset the memory controller
+ */
+ unsigned char resetMemory;
/* More initialization parameter can be added if needed */
-}
-initchip_param_t;
+};
logical_chip_type_t sm750_get_chip_type(void);
-unsigned int calcPllValue(unsigned int request, pll_value_t *pll);
-unsigned int formatPllReg(pll_value_t *pPLL);
-void ddk750_set_mmio(void __iomem *, unsigned short, char);
-unsigned int ddk750_getVMSize(void);
-int ddk750_initHw(initchip_param_t *);
+void sm750_set_chip_type(unsigned short devId, u8 revId);
+unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
+unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
+unsigned int ddk750_get_vm_size(void);
+int ddk750_init_hw(struct initchip_param *);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 4023c476b9e4..c347803f7e19 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -1,11 +1,9 @@
#include "ddk750_reg.h"
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_display.h"
#include "ddk750_power.h"
#include "ddk750_dvi.h"
-#define primaryWaitVerticalSync(delay) waitNextVerticalSync(0, delay)
-
static void setDisplayControl(int ctrl, int disp_state)
{
/* state != 0 means turn on both timing & plane en_bit */
@@ -61,55 +59,28 @@ static void setDisplayControl(int ctrl, int disp_state)
}
}
-static void waitNextVerticalSync(int ctrl, int delay)
+static void primary_wait_vertical_sync(int delay)
{
unsigned int status;
- if (!ctrl) {
- /* primary controller */
+ /*
+ * Do not wait when the Primary PLL is off or display control is
+ * already off. This will prevent the software to wait forever.
+ */
+ if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
+ !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING))
+ return;
- /*
- * Do not wait when the Primary PLL is off or display control is
- * already off. This will prevent the software to wait forever.
- */
- if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
- !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
- return;
- }
-
- while (delay-- > 0) {
- /* Wait for end of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
-
- /* Wait for start of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
- }
+ while (delay-- > 0) {
+ /* Wait for end of vsync. */
+ do {
+ status = PEEK32(SYSTEM_CTRL);
+ } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
- } else {
- /*
- * Do not wait when the Primary PLL is off or display control is
- * already off. This will prevent the software to wait forever.
- */
- if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) ||
- !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
- return;
- }
-
- while (delay-- > 0) {
- /* Wait for end of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
-
- /* Wait for start of vsync. */
- do {
- status = PEEK32(SYSTEM_CTRL);
- } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
- }
+ /* Wait for start of vsync. */
+ do {
+ status = PEEK32(SYSTEM_CTRL);
+ } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
}
}
@@ -121,22 +92,22 @@ static void swPanelPowerSequence(int disp, int delay)
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
- primaryWaitVerticalSync(delay);
+ primary_wait_vertical_sync(delay);
}
void ddk750_setLogicalDispOut(disp_output_t output)
@@ -182,5 +153,5 @@ void ddk750_setLogicalDispOut(disp_output_t output)
setDAC((output & DAC_MASK) >> DAC_OFFSET);
if (output & DPMS_USAGE)
- ddk750_setDPMS((output & DPMS_MASK) >> DPMS_OFFSET);
+ ddk750_set_dpms((output & DPMS_MASK) >> DPMS_OFFSET);
}
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index e3fde428c52b..8abca88f089e 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -1,7 +1,8 @@
#ifndef DDK750_DISPLAY_H__
#define DDK750_DISPLAY_H__
-/* panel path select
+/*
+ * panel path select
* 80000[29:28]
*/
@@ -12,7 +13,8 @@
#define PNL_2_SEC ((2 << PNL_2_OFFSET) | PNL_2_USAGE)
-/* primary timing & plane enable bit
+/*
+ * primary timing & plane enable bit
* 1: 80000[8] & 80000[2] on
* 0: both off
*/
@@ -23,7 +25,8 @@
#define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE)
-/* panel sequency status
+/*
+ * panel sequency status
* 80000[27:24]
*/
#define PNL_SEQ_OFFSET 6
@@ -32,7 +35,8 @@
#define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
#define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
-/* dual digital output
+/*
+ * dual digital output
* 80000[19]
*/
#define DUAL_TFT_OFFSET 8
@@ -41,7 +45,8 @@
#define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
#define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
-/* secondary timing & plane enable bit
+/*
+ * secondary timing & plane enable bit
* 1:80200[8] & 80200[2] on
* 0: both off
*/
@@ -51,7 +56,8 @@
#define SEC_TP_ON ((0x1 << SEC_TP_OFFSET) | SEC_TP_USAGE)
#define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET) | SEC_TP_USAGE)
-/* crt path select
+/*
+ * crt path select
* 80200[19:18]
*/
#define CRT_2_OFFSET 2
@@ -61,7 +67,8 @@
#define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE)
-/* DAC affect both DVI and DSUB
+/*
+ * DAC affect both DVI and DSUB
* 4[20]
*/
#define DAC_OFFSET 7
@@ -70,7 +77,8 @@
#define DAC_ON ((0x0 << DAC_OFFSET) | DAC_USAGE)
#define DAC_OFF ((0x1 << DAC_OFFSET) | DAC_USAGE)
-/* DPMS only affect D-SUB head
+/*
+ * DPMS only affect D-SUB head
* 0[31:30]
*/
#define DPMS_OFFSET 9
@@ -81,7 +89,8 @@
-/* LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
+/*
+ * LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
* CRT means crt path DSUB
*/
typedef enum _disp_output_t {
@@ -89,7 +98,8 @@ typedef enum _disp_output_t {
do_LCD1_SEC = PNL_2_SEC | SEC_TP_ON | PNL_SEQ_ON | DAC_ON,
do_LCD2_PRI = CRT_2_PRI | PRI_TP_ON | DUAL_TFT_ON,
do_LCD2_SEC = CRT_2_SEC | SEC_TP_ON | DUAL_TFT_ON,
- /* do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
+ /*
+ * do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
* do_DSUB_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON|DAC_ON,
*/
do_CRT_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON | DAC_ON,
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 8252f771ef9e..250c2f478778 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -1,6 +1,6 @@
#define USE_DVICHIP
#ifdef USE_DVICHIP
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_dvi.h"
#include "ddk750_sii164.h"
diff --git a/drivers/staging/sm750fb/ddk750_help.c b/drivers/staging/sm750fb/ddk750_help.c
deleted file mode 100644
index 9637dd30d037..000000000000
--- a/drivers/staging/sm750fb/ddk750_help.c
+++ /dev/null
@@ -1,17 +0,0 @@
-#include "ddk750_help.h"
-
-void __iomem *mmio750;
-char revId750;
-unsigned short devId750;
-
-/* after driver mapped io registers, use this function first */
-void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId)
-{
- mmio750 = addr;
- devId750 = devId;
- revId750 = revId;
- if (revId == 0xfe)
- printk("found sm750le\n");
-}
-
-
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
deleted file mode 100644
index 009db9213a73..000000000000
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef DDK750_HELP_H__
-#define DDK750_HELP_H__
-#include "ddk750_chip.h"
-#ifndef USE_INTERNAL_REGISTER_ACCESS
-
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-/* software control endianness */
-#define PEEK32(addr) readl(addr + mmio750)
-#define POKE32(addr, data) writel(data, addr + mmio750)
-
-extern void __iomem *mmio750;
-extern char revId750;
-extern unsigned short devId750;
-#else
-/* implement if you want use it*/
-#endif
-
-#endif
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index d391c127ead7..05d4a73aa1d4 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -1,6 +1,6 @@
#define USE_HW_I2C
#ifdef USE_HW_I2C
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_hwi2c.h"
#include "ddk750_power.h"
@@ -20,10 +20,11 @@ unsigned char bus_speed_mode
value |= (GPIO_MUX_30 | GPIO_MUX_31);
POKE32(GPIO_MUX, value);
- /* Enable Hardware I2C power.
+ /*
+ * Enable Hardware I2C power.
* TODO: Check if we need to enable GPIO power?
*/
- enableI2C(1);
+ sm750_enable_i2c(1);
/* Enable the I2C Controller and set the bus speed mode */
value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN);
@@ -44,7 +45,7 @@ void sm750_hw_i2c_close(void)
POKE32(I2C_CTRL, value);
/* Disable I2C Power */
- enableI2C(0);
+ sm750_enable_i2c(0);
/* Set GPIO 30 & 31 back as GPIO pins */
value = PEEK32(GPIO_MUX);
@@ -92,7 +93,8 @@ static unsigned int hw_i2c_write_data(
/* Set the Device Address */
POKE32(I2C_SLAVE_ADDRESS, addr & ~0x01);
- /* Write data.
+ /*
+ * Write data.
* Note:
* Only 16 byte can be accessed per i2c start instruction.
*/
@@ -158,7 +160,8 @@ static unsigned int hw_i2c_read_data(
/* Set the Device Address */
POKE32(I2C_SLAVE_ADDRESS, addr | 0x01);
- /* Read data and save them to the buffer.
+ /*
+ * Read data and save them to the buffer.
* Note:
* Only 16 byte can be accessed per i2c start instruction.
*/
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 05b83646c2d5..4a4b1de97a87 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -1,10 +1,10 @@
-#include "ddk750_help.h"
#include "ddk750_reg.h"
#include "ddk750_mode.h"
#include "ddk750_chip.h"
-/* SM750LE only:
+/*
+ * SM750LE only:
* This function takes care extra registers and bit fields required to set
* up a mode in SM750LE
*
@@ -19,7 +19,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
x = pModeParam->horizontal_display_end;
y = pModeParam->vertical_display_end;
- /* SM750LE has to set up the top-left and bottom-right
+ /*
+ * SM750LE has to set up the top-left and bottom-right
* registers as well.
* Note that normal SM750/SM718 only use those two register for
* auto-centering mode.
@@ -31,7 +32,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
- /* Assume common fields in dispControl have been properly set before
+ /*
+ * Assume common fields in dispControl have been properly set before
* calling this function.
* This function only sets the extra fields in dispControl.
*/
@@ -72,7 +74,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
/* only timing related registers will be programed */
-static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
+static int programModeRegisters(mode_parameter_t *pModeParam,
+ struct pll_value *pll)
{
int ret = 0;
int cnt = 0;
@@ -80,7 +83,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
if (pll->clockType == SECONDARY_PLL) {
/* programe secondary pixel clock */
- POKE32(CRT_PLL_CTRL, formatPllReg(pll));
+ POKE32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
POKE32(CRT_HORIZONTAL_TOTAL,
(((pModeParam->horizontal_total - 1) <<
CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -130,7 +133,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
} else if (pll->clockType == PRIMARY_PLL) {
unsigned int reserved;
- POKE32(PANEL_PLL_CTRL, formatPllReg(pll));
+ POKE32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
reg = ((pModeParam->horizontal_total - 1) <<
PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -176,14 +179,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING |
DISPLAY_CTRL_PLANE);
- /* May a hardware bug or just my test chip (not confirmed).
- * PANEL_DISPLAY_CTRL register seems requiring few writes
- * before a value can be successfully written in.
- * Added some masks to mask out the reserved bits.
- * Note: This problem happens by design. The hardware will wait for the
- * next vertical sync to turn on/off the plane.
- */
-
+ /*
+ * May a hardware bug or just my test chip (not confirmed).
+ * PANEL_DISPLAY_CTRL register seems requiring few writes
+ * before a value can be successfully written in.
+ * Added some masks to mask out the reserved bits.
+ * Note: This problem happens by design. The hardware will wait
+ * for the next vertical sync to turn on/off the plane.
+ */
POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) !=
@@ -201,13 +204,13 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
{
- pll_value_t pll;
+ struct pll_value pll;
unsigned int uiActualPixelClk;
pll.inputFreq = DEFAULT_INPUT_CLOCK;
pll.clockType = clock;
- uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll);
+ uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll);
if (sm750_get_chip_type() == SM750LE) {
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 7cc6169f884e..6167e30e8e01 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -1,8 +1,8 @@
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_power.h"
-void ddk750_setDPMS(DPMS_t state)
+void ddk750_set_dpms(DPMS_t state)
{
unsigned int value;
@@ -17,7 +17,7 @@ void ddk750_setDPMS(DPMS_t state)
}
}
-static unsigned int getPowerMode(void)
+static unsigned int get_power_mode(void)
{
if (sm750_get_chip_type() == SM750LE)
return 0;
@@ -29,26 +29,26 @@ static unsigned int getPowerMode(void)
* SM50x can operate in one of three modes: 0, 1 or Sleep.
* On hardware reset, power mode 0 is default.
*/
-void setPowerMode(unsigned int powerMode)
+void sm750_set_power_mode(unsigned int mode)
{
- unsigned int control_value = 0;
+ unsigned int ctrl = 0;
- control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
+ ctrl = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
if (sm750_get_chip_type() == SM750LE)
return;
- switch (powerMode) {
+ switch (mode) {
case POWER_MODE_CTRL_MODE_MODE0:
- control_value |= POWER_MODE_CTRL_MODE_MODE0;
+ ctrl |= POWER_MODE_CTRL_MODE_MODE0;
break;
case POWER_MODE_CTRL_MODE_MODE1:
- control_value |= POWER_MODE_CTRL_MODE_MODE1;
+ ctrl |= POWER_MODE_CTRL_MODE_MODE1;
break;
case POWER_MODE_CTRL_MODE_SLEEP:
- control_value |= POWER_MODE_CTRL_MODE_SLEEP;
+ ctrl |= POWER_MODE_CTRL_MODE_SLEEP;
break;
default:
@@ -56,44 +56,28 @@ void setPowerMode(unsigned int powerMode)
}
/* Set up other fields in Power Control Register */
- if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) {
- control_value &= ~POWER_MODE_CTRL_OSC_INPUT;
+ if (mode == POWER_MODE_CTRL_MODE_SLEEP) {
+ ctrl &= ~POWER_MODE_CTRL_OSC_INPUT;
#ifdef VALIDATION_CHIP
- control_value &= ~POWER_MODE_CTRL_336CLK;
+ ctrl &= ~POWER_MODE_CTRL_336CLK;
#endif
} else {
- control_value |= POWER_MODE_CTRL_OSC_INPUT;
+ ctrl |= POWER_MODE_CTRL_OSC_INPUT;
#ifdef VALIDATION_CHIP
- control_value |= POWER_MODE_CTRL_336CLK;
+ ctrl |= POWER_MODE_CTRL_336CLK;
#endif
}
/* Program new power mode. */
- POKE32(POWER_MODE_CTRL, control_value);
+ POKE32(POWER_MODE_CTRL, ctrl);
}
-void setCurrentGate(unsigned int gate)
+void sm750_set_current_gate(unsigned int gate)
{
- unsigned int gate_reg;
- unsigned int mode;
-
- /* Get current power mode. */
- mode = getPowerMode();
-
- switch (mode) {
- case POWER_MODE_CTRL_MODE_MODE0:
- gate_reg = MODE0_GATE;
- break;
-
- case POWER_MODE_CTRL_MODE_MODE1:
- gate_reg = MODE1_GATE;
- break;
-
- default:
- gate_reg = MODE0_GATE;
- break;
- }
- POKE32(gate_reg, gate);
+ if (get_power_mode() == POWER_MODE_CTRL_MODE_MODE1)
+ POKE32(MODE1_GATE, gate);
+ else
+ POKE32(MODE0_GATE, gate);
}
@@ -101,7 +85,7 @@ void setCurrentGate(unsigned int gate)
/*
* This function enable/disable the 2D engine.
*/
-void enable2DEngine(unsigned int enable)
+void sm750_enable_2d_engine(unsigned int enable)
{
u32 gate;
@@ -111,10 +95,10 @@ void enable2DEngine(unsigned int enable)
else
gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC);
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
-void enableDMA(unsigned int enable)
+void sm750_enable_dma(unsigned int enable)
{
u32 gate;
@@ -125,13 +109,13 @@ void enableDMA(unsigned int enable)
else
gate &= ~CURRENT_GATE_DMA;
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
/*
* This function enable/disable the GPIO Engine
*/
-void enableGPIO(unsigned int enable)
+void sm750_enable_gpio(unsigned int enable)
{
u32 gate;
@@ -142,13 +126,13 @@ void enableGPIO(unsigned int enable)
else
gate &= ~CURRENT_GATE_GPIO;
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
/*
* This function enable/disable the I2C Engine
*/
-void enableI2C(unsigned int enable)
+void sm750_enable_i2c(unsigned int enable)
{
u32 gate;
@@ -159,7 +143,7 @@ void enableI2C(unsigned int enable)
else
gate &= ~CURRENT_GATE_I2C;
- setCurrentGate(gate);
+ sm750_set_current_gate(gate);
}
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 5963691f9a68..eb088b0d805f 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -14,37 +14,29 @@ DPMS_t;
(PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
}
-void ddk750_setDPMS(DPMS_t);
-
-/*
- * This function sets the current power mode
- */
-void setPowerMode(unsigned int powerMode);
-
-/*
- * This function sets current gate
- */
-void setCurrentGate(unsigned int gate);
+void ddk750_set_dpms(DPMS_t);
+void sm750_set_power_mode(unsigned int powerMode);
+void sm750_set_current_gate(unsigned int gate);
/*
* This function enable/disable the 2D engine.
*/
-void enable2DEngine(unsigned int enable);
+void sm750_enable_2d_engine(unsigned int enable);
/*
* This function enable/disable the DMA Engine
*/
-void enableDMA(unsigned int enable);
+void sm750_enable_dma(unsigned int enable);
/*
* This function enable/disable the GPIO Engine
*/
-void enableGPIO(unsigned int enable);
+void sm750_enable_gpio(unsigned int enable);
/*
* This function enable/disable the I2C Engine
*/
-void enableI2C(unsigned int enable);
+void sm750_enable_i2c(unsigned int enable);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 99a8683e6383..259006ace219 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -173,7 +173,8 @@ long sii164InitChip(
i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
- /* De-skew enabled with default 111b value.
+ /*
+ * De-skew enabled with default 111b value.
* This fixes some artifacts problem in some mode on board 2.2.
* Somehow this fix does not affect board 2.1.
*/
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index 72a42330e7a1..b8a4e44359af 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -1,21 +1,20 @@
-/*******************************************************************
-*
-* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-* All rights are reserved. Reproduction or in part is prohibited
-* without the written consent of the copyright owner.
-*
-* swi2c.c --- SM750/SM718 DDK
-* This file contains the source code for I2C using software
-* implementation.
-*
-*******************************************************************/
-#include "ddk750_help.h"
+/*
+ * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ * All rights are reserved. Reproduction or in part is prohibited
+ * without the written consent of the copyright owner.
+ *
+ * swi2c.c --- SM750/SM718 DDK
+ * This file contains the source code for I2C using software
+ * implementation.
+ */
+
+#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_swi2c.h"
#include "ddk750_power.h"
-/*******************************************************************
+/*
* I2C Software Master Driver:
* ===========================
* Each i2c cycle is split into 4 sections. Each of these section marks
@@ -51,7 +50,7 @@
* SCL | L | | H | |
* ---------------+---+---+---+---+
*
- ******************************************************************/
+ */
/* GPIO pins used for this I2C. It ranges from 0 to 63. */
static unsigned char sw_i2c_clk_gpio = DEFAULT_I2C_SCL;
@@ -429,7 +428,7 @@ long sm750_sw_i2c_init(
PEEK32(sw_i2c_data_gpio_mux_reg) & ~(1 << sw_i2c_data_gpio));
/* Enable GPIO power */
- enableGPIO(1);
+ sm750_enable_gpio(1);
/* Clear the i2c lines. */
for (i = 0; i < 9; i++)
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h
index b53629cda095..5a9466efc7bd 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.h
+++ b/drivers/staging/sm750fb/ddk750_swi2c.h
@@ -1,15 +1,15 @@
-/*******************************************************************
-*
-* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-* All rights are reserved. Reproduction or in part is prohibited
-* without the written consent of the copyright owner.
-*
-* swi2c.h --- SM750/SM718 DDK
-* This file contains the definitions for i2c using software
-* implementation.
-*
-*******************************************************************/
+/*
+ * Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ * All rights are reserved. Reproduction or in part is prohibited
+ * without the written consent of the copyright owner.
+ *
+ * swi2c.h --- SM750/SM718 DDK
+ * This file contains the definitions for i2c using software
+ * implementation.
+ *
+ */
+
#ifndef _SWI2C_H_
#define _SWI2C_H_
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 7d90e250142c..e9632f162f99 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -118,14 +118,14 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
return -ENXIO;
}
- hw_cursor_disable(cursor);
+ sm750_hw_cursor_disable(cursor);
if (fbcursor->set & FB_CUR_SETSIZE)
- hw_cursor_setSize(cursor,
+ sm750_hw_cursor_setSize(cursor,
fbcursor->image.width,
fbcursor->image.height);
if (fbcursor->set & FB_CUR_SETPOS)
- hw_cursor_setPos(cursor,
+ sm750_hw_cursor_setPos(cursor,
fbcursor->image.dx - info->var.xoffset,
fbcursor->image.dy - info->var.yoffset);
@@ -141,18 +141,18 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
- hw_cursor_setColor(cursor, fg, bg);
+ sm750_hw_cursor_setColor(cursor, fg, bg);
}
if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
- hw_cursor_setData(cursor,
+ sm750_hw_cursor_setData(cursor,
fbcursor->rop,
fbcursor->image.data,
fbcursor->mask);
}
if (fbcursor->enable)
- hw_cursor_enable(cursor);
+ sm750_hw_cursor_enable(cursor);
return 0;
}
@@ -575,11 +575,11 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
return hw_sm750_crtc_checkMode(crtc, var);
}
-static int lynxfb_ops_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
+static int lynxfb_ops_setcolreg(unsigned int regno,
+ unsigned int red,
+ unsigned int green,
+ unsigned int blue,
+ unsigned int transp,
struct fb_info *info)
{
struct lynxfb_par *par;
@@ -788,7 +788,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
if (!g_hwcursor) {
lynxfb_ops.fb_cursor = NULL;
- hw_cursor_disable(&crtc->cursor);
+ sm750_hw_cursor_disable(&crtc->cursor);
}
/* set info->fbops, must be set before fb_find_mode */
@@ -947,13 +947,13 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
g_hwcursor = 3;
if (!src || !*src) {
- pr_warn("no specific g_option.\n");
+ dev_warn(&sm750_dev->pdev->dev, "no specific g_option.\n");
goto NO_PARAM;
}
while ((opt = strsep(&src, ":")) != NULL && *opt != 0) {
- pr_info("opt=%s\n", opt);
- pr_info("src=%s\n", src);
+ dev_info(&sm750_dev->pdev->dev, "opt=%s\n", opt);
+ dev_info(&sm750_dev->pdev->dev, "src=%s\n", src);
if (!strncmp(opt, "swap", strlen("swap")))
swap = 1;
@@ -974,12 +974,12 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
else {
if (!g_fbmode[0]) {
g_fbmode[0] = opt;
- pr_info("find fbmode0 : %s\n", g_fbmode[0]);
+ dev_info(&sm750_dev->pdev->dev, "find fbmode0 : %s\n", g_fbmode[0]);
} else if (!g_fbmode[1]) {
g_fbmode[1] = opt;
- pr_info("find fbmode1 : %s\n", g_fbmode[1]);
+ dev_info(&sm750_dev->pdev->dev, "find fbmode1 : %s\n", g_fbmode[1]);
} else {
- pr_warn("How many view you wann set?\n");
+ dev_warn(&sm750_dev->pdev->dev, "How many view you wann set?\n");
}
}
}
@@ -1083,10 +1083,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
* if some chip need specific function,
* please hook it in smXXX_set_drv routine
*/
- sm750_dev->accel.de_init = hw_de_init;
- sm750_dev->accel.de_fillrect = hw_fillrect;
- sm750_dev->accel.de_copyarea = hw_copyarea;
- sm750_dev->accel.de_imageblit = hw_imageblit;
+ sm750_dev->accel.de_init = sm750_hw_de_init;
+ sm750_dev->accel.de_fillrect = sm750_hw_fillrect;
+ sm750_dev->accel.de_copyarea = sm750_hw_copyarea;
+ sm750_dev->accel.de_imageblit = sm750_hw_imageblit;
}
/* call chip specific setup routine */
@@ -1188,7 +1188,7 @@ static int __init lynxfb_setup(char *options)
return 0;
}
-static struct pci_device_id smi_pci_table[] = {
+static const struct pci_device_id smi_pci_table[] = {
{ PCI_DEVICE(0x126f, 0x0750), },
{0,}
};
@@ -1209,7 +1209,6 @@ static struct pci_driver lynxfb_driver = {
static int __init lynxfb_init(void)
{
char *option;
- int ret;
#ifdef MODULE
option = g_option;
@@ -1219,8 +1218,7 @@ static int __init lynxfb_init(void)
#endif
lynxfb_setup(option);
- ret = pci_register_driver(&lynxfb_driver);
- return ret;
+ return pci_register_driver(&lynxfb_driver);
}
module_init(lynxfb_init);
@@ -1245,4 +1243,4 @@ MODULE_PARM_DESC(g_option,
MODULE_AUTHOR("monk liu <monk.liu@siliconmotion.com>");
MODULE_AUTHOR("Sudip Mukherjee <sudip@vectorindia.org>");
MODULE_DESCRIPTION("Frame buffer driver for SM750 chipset");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index ff31c5c9cc6f..28f4b9b4f95f 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -146,14 +146,16 @@ struct lynxfb_crtc {
struct lynxfb_output {
int dpms;
int paths;
- /* which paths(s) this output stands for,for sm750:
+ /*
+ * which paths(s) this output stands for,for sm750:
* paths=1:means output for panel paths
* paths=2:means output for crt paths
* paths=3:means output for both panel and crt paths
*/
int *channel;
- /* which channel these outputs linked with,for sm750:
+ /*
+ * which channel these outputs linked with,for sm750:
* *channel=0 means primary channel
* *channel=1 means secondary channel
* output->channel ==> &crtc->channel
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 38adae6b5d83..af0db5789c53 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -32,7 +32,7 @@ static inline void write_dpPort(struct lynx_accel *accel, u32 data)
writel(data, accel->dpPortBase);
}
-void hw_de_init(struct lynx_accel *accel)
+void sm750_hw_de_init(struct lynx_accel *accel)
{
/* setup 2d engine registers */
u32 reg, clr;
@@ -65,12 +65,13 @@ void hw_de_init(struct lynx_accel *accel)
write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr);
}
-/* set2dformat only be called from setmode functions
+/*
+ * set2dformat only be called from setmode functions
* but if you need dual framebuffer driver,need call set2dformat
* every time you use 2d function
*/
-void hw_set2dformat(struct lynx_accel *accel, int fmt)
+void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt)
{
u32 reg;
@@ -82,7 +83,7 @@ void hw_set2dformat(struct lynx_accel *accel, int fmt)
write_dpr(accel, DE_STRETCH_FORMAT, reg);
}
-int hw_fillrect(struct lynx_accel *accel,
+int sm750_hw_fillrect(struct lynx_accel *accel,
u32 base, u32 pitch, u32 Bpp,
u32 x, u32 y, u32 width, u32 height,
u32 color, u32 rop)
@@ -90,7 +91,8 @@ int hw_fillrect(struct lynx_accel *accel,
u32 deCtrl;
if (accel->de_wait() != 0) {
- /* int time wait and always busy,seems hardware
+ /*
+ * int time wait and always busy,seems hardware
* got something error
*/
pr_debug("De engine always busy\n");
@@ -126,7 +128,7 @@ int hw_fillrect(struct lynx_accel *accel,
return 0;
}
-int hw_copyarea(
+int sm750_hw_copyarea(
struct lynx_accel *accel,
unsigned int sBase, /* Address of source: offset in frame buffer */
unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -213,25 +215,29 @@ unsigned int rop2) /* ROP value */
opSign = (-1);
}
- /* Note:
+ /*
+ * Note:
* DE_FOREGROUND are DE_BACKGROUND are don't care.
* DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
* are set by set deSetTransparency().
*/
- /* 2D Source Base.
+ /*
+ * 2D Source Base.
* It is an address offset (128 bit aligned)
* from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
- /* 2D Destination Base.
+ /*
+ * 2D Destination Base.
* It is an address offset (128 bit aligned)
* from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
- /* Program pitch (distance between the 1st points of two adjacent lines).
+ /*
+ * Program pitch (distance between the 1st points of two adjacent lines).
* Note that input pitch is BYTE value, but the 2D Pitch register uses
* pixel values. Need Byte to pixel conversion.
*/
@@ -240,7 +246,8 @@ unsigned int rop2) /* ROP value */
DE_PITCH_DESTINATION_MASK) |
(sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
- /* Screen Window width in Pixels.
+ /*
+ * Screen Window width in Pixels.
* 2D engine uses this value to calculate the linear address in frame buffer
* for a given point.
*/
@@ -286,7 +293,7 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
return de_ctrl;
}
-int hw_imageblit(struct lynx_accel *accel,
+int sm750_hw_imageblit(struct lynx_accel *accel,
const char *pSrcbuf, /* pointer to start of source buffer in system memory */
u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
@@ -316,7 +323,8 @@ int hw_imageblit(struct lynx_accel *accel,
if (accel->de_wait() != 0)
return -1;
- /* 2D Source Base.
+ /*
+ * 2D Source Base.
* Use 0 for HOST Blt.
*/
write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
@@ -326,16 +334,19 @@ int hw_imageblit(struct lynx_accel *accel,
* from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
- /* Program pitch (distance between the 1st points of two adjacent lines).
- * Note that input pitch is BYTE value, but the 2D Pitch register uses
- * pixel values. Need Byte to pixel conversion.
- */
+
+ /*
+ * Program pitch (distance between the 1st points of two adjacent
+ * lines). Note that input pitch is BYTE value, but the 2D Pitch
+ * register uses pixel values. Need Byte to pixel conversion.
+ */
write_dpr(accel, DE_PITCH,
((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) &
DE_PITCH_DESTINATION_MASK) |
(dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */
- /* Screen Window width in Pixels.
+ /*
+ * Screen Window width in Pixels.
* 2D engine uses this value to calculate the linear address
* in frame buffer for a given point.
*/
@@ -344,7 +355,8 @@ int hw_imageblit(struct lynx_accel *accel,
DE_WINDOW_WIDTH_DST_MASK) |
(dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK));
- /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
+ /*
+ * Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
* and Y_K2 field is not used.
* For mono bitmap, use startBit for X_K1.
*/
@@ -383,6 +395,6 @@ int hw_imageblit(struct lynx_accel *accel,
pSrcbuf += srcDelta;
}
- return 0;
+ return 0;
}
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index d59d005e0add..4b0ff8feb9a0 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -184,16 +184,16 @@
#define BOTTOM_TO_TOP 1
#define RIGHT_TO_LEFT 1
-void hw_set2dformat(struct lynx_accel *accel, int fmt);
+void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt);
-void hw_de_init(struct lynx_accel *accel);
+void sm750_hw_de_init(struct lynx_accel *accel);
-int hw_fillrect(struct lynx_accel *accel,
+int sm750_hw_fillrect(struct lynx_accel *accel,
u32 base, u32 pitch, u32 Bpp,
u32 x, u32 y, u32 width, u32 height,
u32 color, u32 rop);
-int hw_copyarea(
+int sm750_hw_copyarea(
struct lynx_accel *accel,
unsigned int sBase, /* Address of source: offset in frame buffer */
unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -208,7 +208,7 @@ unsigned int width,
unsigned int height, /* width and height of rectangle in pixel value */
unsigned int rop2);
-int hw_imageblit(struct lynx_accel *accel,
+int sm750_hw_imageblit(struct lynx_accel *accel,
const char *pSrcbuf, /* pointer to start of source buffer in system memory */
u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index d622d65b6cee..2a13353fc492 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -47,25 +47,25 @@ writel((data), cursor->mmio + (addr))
/* hw_cursor_xxx works for voyager,718 and 750 */
-void hw_cursor_enable(struct lynx_cursor *cursor)
+void sm750_hw_cursor_enable(struct lynx_cursor *cursor)
{
u32 reg;
reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE;
POKE32(HWC_ADDRESS, reg);
}
-void hw_cursor_disable(struct lynx_cursor *cursor)
+void sm750_hw_cursor_disable(struct lynx_cursor *cursor)
{
POKE32(HWC_ADDRESS, 0);
}
-void hw_cursor_setSize(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
int w, int h)
{
cursor->w = w;
cursor->h = h;
}
-void hw_cursor_setPos(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
int x, int y)
{
u32 reg;
@@ -74,7 +74,7 @@ void hw_cursor_setPos(struct lynx_cursor *cursor,
(x & HWC_LOCATION_X_MASK));
POKE32(HWC_LOCATION, reg);
}
-void hw_cursor_setColor(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
u32 fg, u32 bg)
{
u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
@@ -84,7 +84,7 @@ void hw_cursor_setColor(struct lynx_cursor *cursor,
POKE32(HWC_COLOR_3, 0xffe0);
}
-void hw_cursor_setData(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
u16 rop, const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
@@ -138,7 +138,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
}
-void hw_cursor_setData2(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
u16 rop, const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index 6c4fc9b73489..c7b86ae235b4 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -2,16 +2,16 @@
#define LYNX_CURSOR_H__
/* hw_cursor_xxx works for voyager,718 and 750 */
-void hw_cursor_enable(struct lynx_cursor *cursor);
-void hw_cursor_disable(struct lynx_cursor *cursor);
-void hw_cursor_setSize(struct lynx_cursor *cursor,
+void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
+void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
int w, int h);
-void hw_cursor_setPos(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
int x, int y);
-void hw_cursor_setColor(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
u32 fg, u32 bg);
-void hw_cursor_setData(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
u16 rop, const u8 *data, const u8 *mask);
-void hw_cursor_setData2(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
u16 rop, const u8 *data, const u8 *mask);
#endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 7dd208caa5eb..b6af3b53076b 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -23,6 +23,8 @@
#include "ddk750.h"
#include "sm750_accel.h"
+void __iomem *mmio750;
+
int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
{
int ret;
@@ -34,7 +36,8 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start);
- /* reserve the vidreg space of smi adaptor
+ /*
+ * reserve the vidreg space of smi adaptor
* if you do this, you need to add release region code
* in lynxfb_remove, or memory will not be mapped again
* successfully
@@ -59,15 +62,17 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1;
sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1;
- ddk750_set_mmio(sm750_dev->pvReg, sm750_dev->devid, sm750_dev->revid);
+ mmio750 = sm750_dev->pvReg;
+ sm750_set_chip_type(sm750_dev->devid, sm750_dev->revid);
sm750_dev->vidmem_start = pci_resource_start(pdev, 0);
- /* don't use pdev_resource[x].end - resource[x].start to
+ /*
+ * don't use pdev_resource[x].end - resource[x].start to
* calculate the resource size, it's only the maximum available
* size but not the actual size, using
- * @ddk750_getVMSize function can be safe.
+ * @ddk750_get_vm_size function can be safe.
*/
- sm750_dev->vidmem_size = ddk750_getVMSize();
+ sm750_dev->vidmem_size = ddk750_get_vm_size();
pr_info("video memory phyAddr = %lx, size = %u bytes\n",
sm750_dev->vidmem_start, sm750_dev->vidmem_size);
@@ -100,7 +105,7 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
if (parm->master_clk == 0)
parm->master_clk = parm->chip_clk / 3;
- ddk750_initHw((initchip_param_t *)&sm750_dev->initParm);
+ ddk750_init_hw((struct initchip_param *)&sm750_dev->initParm);
/* for sm718, open pci burst */
if (sm750_dev->devid == 0x718) {
POKE32(SYSTEM_CTRL,
@@ -141,7 +146,8 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
}
POKE32(PANEL_DISPLAY_CTRL, val);
} else {
- /* for 750LE, no DVI chip initialization
+ /*
+ * for 750LE, no DVI chip initialization
* makes Monitor no signal
*
* Set up GPIO for software I2C to program DVI chip in the
@@ -149,13 +155,15 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
*/
sm750_sw_i2c_init(0, 1);
- /* Customer may NOT use CH7301 DVI chip, which has to be
+ /*
+ * Customer may NOT use CH7301 DVI chip, which has to be
* initialized differently.
*/
if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
- /* The following register values for CH7301 are from
- * Chrontel app note and our experiment.
- */
+ /*
+ * The following register values for CH7301 are from
+ * Chrontel app note and our experiment.
+ */
pr_info("yes,CH7301 DVI chip found\n");
sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
@@ -267,7 +275,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
fmt = 2;
break;
}
- hw_set2dformat(&sm750_dev->accel, fmt);
+ sm750_hw_set2dformat(&sm750_dev->accel, fmt);
}
/* set timing */
@@ -308,7 +316,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK);
reg = var->xres * (var->bits_per_pixel >> 3);
- /* crtc->channel is not equal to par->index on numeric,
+ /*
+ * crtc->channel is not equal to par->index on numeric,
* be aware of that
*/
reg = ALIGN(reg, crtc->line_pad);
@@ -342,7 +351,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
/* not implemented now */
POKE32(CRT_FB_ADDRESS, crtc->oScreen);
reg = var->xres * (var->bits_per_pixel >> 3);
- /* crtc->channel is not equal to par->index on numeric,
+ /*
+ * crtc->channel is not equal to par->index on numeric,
* be aware of that
*/
reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT;
@@ -469,7 +479,7 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
{
u32 reg;
- enable2DEngine(1);
+ sm750_enable_2d_engine(1);
if (sm750_get_chip_type() == SM750LE) {
reg = PEEK32(DE_STATE1);
diff --git a/drivers/staging/speakup/TODO b/drivers/staging/speakup/TODO
index 3094799cf6a0..993410c3e531 100644
--- a/drivers/staging/speakup/TODO
+++ b/drivers/staging/speakup/TODO
@@ -42,6 +42,6 @@ We prefer that you contact us on the mailing list; however, if you do
not want to subscribe to a mailing list, send your email to all of the
following:
-w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@braille.uwo.ca and
+w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@reisers.ca and
samuel.thibault@ens-lyon.org.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 97ca4ecca8a9..5c192042eeac 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -351,14 +351,14 @@ static void speakup_cut(struct vc_data *vc)
if (!mark_cut_flag) {
mark_cut_flag = 1;
- spk_xs = (u_short) spk_x;
- spk_ys = (u_short) spk_y;
+ spk_xs = (u_short)spk_x;
+ spk_ys = (u_short)spk_y;
spk_sel_cons = vc;
synth_printf("%s\n", spk_msg_get(MSG_MARK));
return;
}
- spk_xe = (u_short) spk_x;
- spk_ye = (u_short) spk_y;
+ spk_xe = (u_short)spk_x;
+ spk_ye = (u_short)spk_y;
mark_cut_flag = 0;
synth_printf("%s\n", spk_msg_get(MSG_CUT));
@@ -489,7 +489,7 @@ static void say_char(struct vc_data *vc)
u_short ch;
spk_old_attr = spk_attr;
- ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
+ ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
if (spk_attr != spk_old_attr) {
if (spk_attrib_bleep & 1)
bleep(spk_y);
@@ -504,7 +504,7 @@ static void say_phonetic_char(struct vc_data *vc)
u_short ch;
spk_old_attr = spk_attr;
- ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
+ ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
if (isascii(ch) && isalpha(ch)) {
ch &= 0x1f;
synth_printf("%s\n", phonetic[--ch]);
@@ -556,7 +556,7 @@ static u_long get_word(struct vc_data *vc)
u_char temp;
spk_old_attr = spk_attr;
- ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
/* decided to take out the sayword if on a space (mis-information */
if (spk_say_word_ctl && ch == SPACE) {
@@ -565,26 +565,26 @@ static u_long get_word(struct vc_data *vc)
return 0;
} else if ((tmpx < vc->vc_cols - 2)
&& (ch == SPACE || ch == 0 || IS_WDLM(ch))
- && ((char)get_char(vc, (u_short *) &tmp_pos + 1, &temp) >
+ && ((char)get_char(vc, (u_short *)&tmp_pos + 1, &temp) >
SPACE)) {
tmp_pos += 2;
tmpx++;
} else
while (tmpx > 0) {
- ch = (char)get_char(vc, (u_short *) tmp_pos - 1, &temp);
+ ch = (char)get_char(vc, (u_short *)tmp_pos - 1, &temp);
if ((ch == SPACE || ch == 0 || IS_WDLM(ch))
- && ((char)get_char(vc, (u_short *) tmp_pos, &temp) >
+ && ((char)get_char(vc, (u_short *)tmp_pos, &temp) >
SPACE))
break;
tmp_pos -= 2;
tmpx--;
}
- attr_ch = get_char(vc, (u_short *) tmp_pos, &spk_attr);
+ attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch & 0xff;
while (tmpx < vc->vc_cols - 1) {
tmp_pos += 2;
tmpx++;
- ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
if ((ch == SPACE) || ch == 0
|| (IS_WDLM(buf[cnt - 1]) && (ch > SPACE)))
break;
@@ -639,7 +639,7 @@ static void say_prev_word(struct vc_data *vc)
} else
spk_x--;
spk_pos -= 2;
- ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (IS_WDLM(ch))
@@ -672,7 +672,7 @@ static void say_next_word(struct vc_data *vc)
return;
}
while (1) {
- ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
+ ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (IS_WDLM(ch))
@@ -709,7 +709,7 @@ static void spell_word(struct vc_data *vc)
if (!get_word(vc))
return;
- while ((ch = (u_char) *cp)) {
+ while ((ch = (u_char)*cp)) {
if (cp != buf)
synth_printf(" %s ", delay_str[spk_spell_delay]);
if (IS_CHAR(ch, B_CAP)) {
@@ -751,7 +751,7 @@ static int get_line(struct vc_data *vc)
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)spk_pos);
for (i = 0; i < vc->vc_cols; i++) {
- buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
+ buf[i] = (u_char)get_char(vc, (u_short *)tmp, &tmp2);
tmp += 2;
}
for (--i; i >= 0; i--)
@@ -816,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)from);
while (from < to) {
- buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
+ buf[i++] = (char)get_char(vc, (u_short *)from, &tmp);
from += 2;
if (i >= vc->vc_size_row)
break;
@@ -892,7 +892,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
spk_attr = get_attributes(vc, (u_short *)start);
while (start < end) {
- sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
+ sentbuf[bn][i] = (char)get_char(vc, (u_short *)start, &tmp);
if (i > 0) {
if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.'
&& numsentences[bn] < 9) {
@@ -1040,7 +1040,7 @@ static void say_position(struct vc_data *vc)
static void say_char_num(struct vc_data *vc)
{
u_char tmp;
- u_short ch = get_char(vc, (u_short *) spk_pos, &tmp);
+ u_short ch = get_char(vc, (u_short *)spk_pos, &tmp);
ch &= 0xff;
synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
@@ -1085,7 +1085,7 @@ static void spkup_write(const char *in_buf, int count)
(currsentence <= numsentences[bn]))
synth_insert_next_index(currsentence++);
}
- ch = (u_char) *in_buf++;
+ ch = (u_char)*in_buf++;
char_type = spk_chartab[ch];
if (ch == old_ch && !(char_type & B_NUM)) {
if (++rep_count > 2)
@@ -1579,7 +1579,7 @@ static int count_highlight_color(struct vc_data *vc)
int cc;
int vc_num = vc->vc_num;
u16 ch;
- u16 *start = (u16 *) vc->vc_origin;
+ u16 *start = (u16 *)vc->vc_origin;
for (i = 0; i < 8; i++)
speakup_console[vc_num]->ht.bgcount[i] = 0;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 0149edc1e0ae..aeb2b865615a 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -137,7 +137,7 @@ static void __speakup_paste_selection(struct work_struct *work)
struct speakup_paste_work *spw =
container_of(work, struct speakup_paste_work, work);
struct tty_struct *tty = xchg(&spw->tty, NULL);
- struct vc_data *vc = (struct vc_data *) tty->driver_data;
+ struct vc_data *vc = (struct vc_data *)tty->driver_data;
int pasted = 0, count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index c2c435cc3d63..ef89dc1c21c8 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -99,7 +99,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) {
c = inb_p(speakup_info.port_tts+UART_RX);
- synth->read_buff_add((u_char) c);
+ synth->read_buff_add((u_char)c);
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return IRQ_HANDLED;
@@ -113,7 +113,7 @@ static void start_serial_interrupt(int irq)
return;
rv = request_irq(irq, synth_readbuf_handler, IRQF_SHARED,
- "serial", (void *) synth_readbuf_handler);
+ "serial", (void *)synth_readbuf_handler);
if (rv)
pr_err("Unable to request Speakup serial I R Q\n");
@@ -141,7 +141,7 @@ void spk_stop_serial_interrupt(void)
/* Turn off interrupts */
outb(0, speakup_info.port_tts+UART_IER);
/* Free IRQ */
- free_irq(serstate->irq, (void *) synth_readbuf_handler);
+ free_irq(serstate->irq, (void *)synth_readbuf_handler);
}
int spk_wait_for_xmitr(void)
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 6b1d0f538bbd..ed3e4282f41c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -20,8 +20,8 @@
*/
#include <linux/unistd.h>
-#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
-#include <linux/poll.h> /* for poll_wait() */
+#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
+#include <linux/poll.h> /* for poll_wait() */
#include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
#include "spk_priv.h"
@@ -55,27 +55,26 @@ static struct var_t vars[] = {
V_LAST_VAR
};
-/*
- * These attributes will appear in /sys/accessibility/speakup/soft.
- */
+/* These attributes will appear in /sys/accessibility/speakup/soft. */
+
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
- __ATTR(freq, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(freq, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* We should uncomment the following definition, when we agree on a
@@ -85,15 +84,15 @@ static struct kobj_attribute vol_attribute =
*/
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
@@ -162,8 +161,8 @@ static char *get_initstring(void)
cp = buf;
var = synth_soft.vars;
while (var->var_id != MAXVARS) {
- if (var->var_id != CAPS_START && var->var_id != CAPS_STOP
- && var->var_id != DIRECT)
+ if (var->var_id != CAPS_START && var->var_id != CAPS_STOP &&
+ var->var_id != DIRECT)
cp = cp + sprintf(cp, var->u.n.synth_fmt,
var->u.n.value);
var++;
@@ -277,8 +276,7 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
return count;
}
-static unsigned int softsynth_poll(struct file *fp,
- struct poll_table_struct *wait)
+static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
{
unsigned long flags;
int ret = 0;
@@ -310,10 +308,8 @@ static const struct file_operations softsynth_fops = {
.release = softsynth_close,
};
-
static int softsynth_probe(struct spk_synth *synth)
{
-
if (misc_registered != 0)
return 0;
memset(&synth_device, 0, sizeof(synth_device));
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index e449f2770c1f..586890908826 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -1,6 +1,6 @@
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
-* this version considerably modified by David Borowski, david575@rogers.com
+ * this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
@@ -40,34 +40,33 @@ static struct var_t vars[] = {
V_LAST_VAR
};
-/*
- * These attributes will appear in /sys/accessibility/speakup/spkout.
- */
+/* These attributes will appear in /sys/accessibility/speakup/spkout. */
+
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index fd98d4ffcb3e..b3d2cfd20ac8 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -1,6 +1,6 @@
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
-* this version considerably modified by David Borowski, david575@rogers.com
+ * this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
@@ -36,32 +36,31 @@ static struct var_t vars[] = {
V_LAST_VAR
};
-/*
- * These attributes will appear in /sys/accessibility/speakup/txprt.
- */
+/* These attributes will appear in /sys/accessibility/speakup/txprt. */
+
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
- __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index 130e9cb0118b..c95b68ebd8e7 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -23,84 +23,82 @@
#define FIRST_SYNTH_VAR RATE
/* 0 is reserved for no remap */
-#define SPEAKUP_GOTO 0x01
-#define SPEECH_KILL 0x02
-#define SPEAKUP_QUIET 0x03
-#define SPEAKUP_CUT 0x04
-#define SPEAKUP_PASTE 0x05
-#define SAY_FIRST_CHAR 0x06
-#define SAY_LAST_CHAR 0x07
-#define SAY_CHAR 0x08
-#define SAY_PREV_CHAR 0x09
-#define SAY_NEXT_CHAR 0x0a
-#define SAY_WORD 0x0b
-#define SAY_PREV_WORD 0x0c
-#define SAY_NEXT_WORD 0x0d
-#define SAY_LINE 0x0e
-#define SAY_PREV_LINE 0x0f
-#define SAY_NEXT_LINE 0x10
-#define TOP_EDGE 0x11
-#define BOTTOM_EDGE 0x12
-#define LEFT_EDGE 0x13
-#define RIGHT_EDGE 0x14
-#define SPELL_PHONETIC 0x15
-#define SPELL_WORD 0x16
-#define SAY_SCREEN 0x17
-#define SAY_POSITION 0x18
-#define SAY_ATTRIBUTES 0x19
-#define SPEAKUP_OFF 0x1a
-#define SPEAKUP_PARKED 0x1b
-#define SAY_LINE_INDENT 0x1c
-#define SAY_FROM_TOP 0x1d
-#define SAY_TO_BOTTOM 0x1e
-#define SAY_FROM_LEFT 0x1f
-#define SAY_TO_RIGHT 0x20
-#define SAY_CHAR_NUM 0x21
-#define EDIT_SOME 0x22
-#define EDIT_MOST 0x23
-#define SAY_PHONETIC_CHAR 0x24
-#define EDIT_DELIM 0x25
-#define EDIT_REPEAT 0x26
-#define EDIT_EXNUM 0x27
-#define SET_WIN 0x28
-#define CLEAR_WIN 0x29
-#define ENABLE_WIN 0x2a
-#define SAY_WIN 0x2b
-#define SPK_LOCK 0x2c
-#define SPEAKUP_HELP 0x2d
-#define TOGGLE_CURSORING 0x2e
-#define READ_ALL_DOC 0x2f
-#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */
-
-#define SPK_KEY 0x80
-#define FIRST_EDIT_BITS 0x22
-
+#define SPEAKUP_GOTO 0x01
+#define SPEECH_KILL 0x02
+#define SPEAKUP_QUIET 0x03
+#define SPEAKUP_CUT 0x04
+#define SPEAKUP_PASTE 0x05
+#define SAY_FIRST_CHAR 0x06
+#define SAY_LAST_CHAR 0x07
+#define SAY_CHAR 0x08
+#define SAY_PREV_CHAR 0x09
+#define SAY_NEXT_CHAR 0x0a
+#define SAY_WORD 0x0b
+#define SAY_PREV_WORD 0x0c
+#define SAY_NEXT_WORD 0x0d
+#define SAY_LINE 0x0e
+#define SAY_PREV_LINE 0x0f
+#define SAY_NEXT_LINE 0x10
+#define TOP_EDGE 0x11
+#define BOTTOM_EDGE 0x12
+#define LEFT_EDGE 0x13
+#define RIGHT_EDGE 0x14
+#define SPELL_PHONETIC 0x15
+#define SPELL_WORD 0x16
+#define SAY_SCREEN 0x17
+#define SAY_POSITION 0x18
+#define SAY_ATTRIBUTES 0x19
+#define SPEAKUP_OFF 0x1a
+#define SPEAKUP_PARKED 0x1b
+#define SAY_LINE_INDENT 0x1c
+#define SAY_FROM_TOP 0x1d
+#define SAY_TO_BOTTOM 0x1e
+#define SAY_FROM_LEFT 0x1f
+#define SAY_TO_RIGHT 0x20
+#define SAY_CHAR_NUM 0x21
+#define EDIT_SOME 0x22
+#define EDIT_MOST 0x23
+#define SAY_PHONETIC_CHAR 0x24
+#define EDIT_DELIM 0x25
+#define EDIT_REPEAT 0x26
+#define EDIT_EXNUM 0x27
+#define SET_WIN 0x28
+#define CLEAR_WIN 0x29
+#define ENABLE_WIN 0x2a
+#define SAY_WIN 0x2b
+#define SPK_LOCK 0x2c
+#define SPEAKUP_HELP 0x2d
+#define TOGGLE_CURSORING 0x2e
+#define READ_ALL_DOC 0x2f
+#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */
+#define SPK_KEY 0x80
+#define FIRST_EDIT_BITS 0x22
#define FIRST_SET_VAR SPELL_DELAY
-#define VAR_START 0x40 /* increase if adding more than 0x3f functions */
+#define VAR_START 0x40 /* increase if adding more than 0x3f functions */
/* keys for setting variables, must be ordered same as the enum for var_ids */
/* with dec being even and inc being 1 greater */
-#define SPELL_DELAY_DEC (VAR_START+0)
-#define SPELL_DELAY_INC (SPELL_DELAY_DEC+1)
-#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC+2)
-#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC+1)
-#define READING_PUNC_DEC (PUNC_LEVEL_DEC+2)
-#define READING_PUNC_INC (READING_PUNC_DEC+1)
-#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC+2)
-#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC+1)
-#define BLEEPS_DEC (ATTRIB_BLEEP_DEC+2)
-#define BLEEPS_INC (BLEEPS_DEC+1)
-#define RATE_DEC (BLEEPS_DEC+2)
-#define RATE_INC (RATE_DEC+1)
-#define PITCH_DEC (RATE_DEC+2)
-#define PITCH_INC (PITCH_DEC+1)
-#define VOL_DEC (PITCH_DEC+2)
-#define VOL_INC (VOL_DEC+1)
-#define TONE_DEC (VOL_DEC+2)
-#define TONE_INC (TONE_DEC+1)
-#define PUNCT_DEC (TONE_DEC+2)
-#define PUNCT_INC (PUNCT_DEC+1)
-#define VOICE_DEC (PUNCT_DEC+2)
-#define VOICE_INC (VOICE_DEC+1)
+#define SPELL_DELAY_DEC (VAR_START + 0)
+#define SPELL_DELAY_INC (SPELL_DELAY_DEC + 1)
+#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC + 2)
+#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC + 1)
+#define READING_PUNC_DEC (PUNC_LEVEL_DEC + 2)
+#define READING_PUNC_INC (READING_PUNC_DEC + 1)
+#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC + 2)
+#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC + 1)
+#define BLEEPS_DEC (ATTRIB_BLEEP_DEC + 2)
+#define BLEEPS_INC (BLEEPS_DEC + 1)
+#define RATE_DEC (BLEEPS_DEC + 2)
+#define RATE_INC (RATE_DEC + 1)
+#define PITCH_DEC (RATE_DEC + 2)
+#define PITCH_INC (PITCH_DEC + 1)
+#define VOL_DEC (PITCH_DEC + 2)
+#define VOL_INC (VOL_DEC + 1)
+#define TONE_DEC (VOL_DEC + 2)
+#define TONE_INC (TONE_DEC + 1)
+#define PUNCT_DEC (TONE_DEC + 2)
+#define PUNCT_INC (PUNCT_DEC + 1)
+#define VOICE_DEC (PUNCT_DEC + 2)
+#define VOICE_INC (VOICE_DEC + 1)
#endif
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index e8ff5d7d6419..b07f6cc4f284 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -1,16 +1,14 @@
#ifndef SPEAKUP_TYPES_H
#define SPEAKUP_TYPES_H
-/*
- * This file includes all of the typedefs and structs used in speakup.
- */
+/* This file includes all of the typedefs and structs used in speakup. */
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/wait.h> /* for wait_queue */
-#include <linux/init.h> /* for __init */
+#include <linux/init.h> /* for __init */
#include <linux/module.h>
#include <linux/vt_kern.h>
#include <linux/spinlock.h>
@@ -105,7 +103,7 @@ struct st_var_header {
enum var_id_t var_id;
enum var_type_t var_type;
void *p_val; /* ptr to programs variable to store value */
- void *data; /* ptr to the vars data */
+ void *data; /* ptr to the vars data */
};
struct num_var_t {
@@ -114,8 +112,8 @@ struct num_var_t {
int low;
int high;
short offset, multiplier; /* for fiddling rates etc. */
- char *out_str; /* if synth needs char representation of number */
- int value; /* current value */
+ char *out_str; /* if synth needs char representation of number */
+ int value; /* current value */
};
struct punc_var_t {
@@ -169,7 +167,7 @@ struct spk_synth {
int (*probe)(struct spk_synth *synth);
void (*release)(void);
const char *(*synth_immediate)(struct spk_synth *synth,
- const char *buff);
+ const char *buff);
void (*catch_up)(struct spk_synth *synth);
void (*flush)(struct spk_synth *synth);
int (*is_alive)(struct spk_synth *synth);
@@ -181,7 +179,7 @@ struct spk_synth {
struct attribute_group attributes;
};
-/**
+/*
* module_spk_synth() - Helper macro for registering a speakup driver
* @__spk_synth: spk_synth struct
* Helper macro for speakup drivers which do not do anything special in module
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 54b2f3918628..a61c02ba06da 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -8,7 +8,7 @@
#include <linux/delay.h> /* for loops_per_sec */
#include <linux/kmod.h>
#include <linux/jiffies.h>
-#include <linux/uaccess.h> /* for copy_from_user */
+#include <linux/uaccess.h> /* for copy_from_user */
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
@@ -67,13 +67,14 @@ int spk_serial_synth_probe(struct spk_synth *synth)
return -ENODEV;
}
pr_info("%s: ttyS%i, Driver Version %s\n",
- synth->long_name, synth->ser, synth->version);
+ synth->long_name, synth->ser, synth->version);
synth->alive = 1;
return 0;
}
EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
-/* Main loop of the progression thread: keep eating from the buffer
+/*
+ * Main loop of the progression thread: keep eating from the buffer
* and push to the serial port, waiting as needed
*
* For devices that have a "full" notification mechanism, the driver can
@@ -303,12 +304,11 @@ void spk_get_index_count(int *linecount, int *sentcount)
sentence_count = ind % 10;
if ((ind / 10) <= synth->indexing.currindex)
- index_count = synth->indexing.currindex-(ind/10);
+ index_count = synth->indexing.currindex - (ind / 10);
else
index_count = synth->indexing.currindex
- -synth->indexing.lowindex
- + synth->indexing.highindex-(ind/10)+1;
-
+ - synth->indexing.lowindex
+ + synth->indexing.highindex - (ind / 10) + 1;
}
*sentcount = sentence_count;
*linecount = index_count;
@@ -406,8 +406,8 @@ static int do_synth_init(struct spk_synth *in_synth)
speakup_register_var(var);
if (!spk_quiet_boot)
synth_printf("%s found\n", synth->long_name);
- if (synth->attributes.name
- && sysfs_create_group(speakup_kobj, &synth->attributes) < 0)
+ if (synth->attributes.name && sysfs_create_group(speakup_kobj,
+ &synth->attributes) < 0)
return -ENOMEM;
synth_flags = synth->flags;
wake_up_interruptible_all(&speakup_event);
@@ -476,10 +476,10 @@ void synth_remove(struct spk_synth *in_synth)
break;
}
for ( ; synths[i] != NULL; i++) /* compress table */
- synths[i] = synths[i+1];
+ synths[i] = synths[i + 1];
module_status = 0;
mutex_unlock(&spk_mutex);
}
EXPORT_SYMBOL_GPL(synth_remove);
-short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM };
+short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index 90c383ee7c3f..8c64f1ada6e0 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -27,7 +27,7 @@ int speakup_thread(void *data)
our_sound = spk_unprocessed_sound;
spk_unprocessed_sound.active = 0;
prepare_to_wait(&speakup_event, &wait,
- TASK_INTERRUPTIBLE);
+ TASK_INTERRUPTIBLE);
should_break = kthread_should_stop() ||
our_sound.active ||
(synth && synth->catch_up && synth->alive &&
@@ -47,7 +47,8 @@ int speakup_thread(void *data)
if (our_sound.active)
kd_mksound(our_sound.freq, our_sound.jiffies);
if (synth && synth->catch_up && synth->alive) {
- /* It is up to the callee to take the lock, so that it
+ /*
+ * It is up to the callee to take the lock, so that it
* can sleep whenever it likes
*/
synth->catch_up(synth);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 21186e3dc7ad..cc984196020f 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -237,8 +237,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
if (!var_data->u.n.out_str)
l = sprintf(cp, var_data->u.n.synth_fmt, (int)val);
else
- l = sprintf(cp,
- var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
+ l = sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
synth_printf("%s", cp);
return 0;
}
@@ -266,7 +265,8 @@ int spk_set_string_var(const char *page, struct st_var_header *var, int len)
return 0;
}
-/* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
+/*
+ * spk_set_mask_bits sets or clears the punc/delim/repeat bits,
* if input is null uses the defaults.
* values for how: 0 clears bits of chars supplied,
* 1 clears allk, 2 sets bits for chars
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index cba4433bcd51..54f490090a59 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -1,20 +1,20 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION */
+/* Copyright (C) 2010 - 2016 UNISYS CORPORATION */
/* All rights reserved. */
#ifndef __IOCHANNEL_H__
#define __IOCHANNEL_H__
/*
* Everything needed for IOPart-GuestPart communication is define in
- * this file. Note: Everything is OS-independent because this file is
+ * this file. Note: Everything is OS-independent because this file is
* used by Windows, Linux and possible EFI drivers.
*/
/*
* Communication flow between the IOPart and GuestPart uses the channel headers
- * channel state. The following states are currently being used:
+ * channel state. The following states are currently being used:
* UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
*
- * additional states will be used later. No locking is needed to switch between
+ * Additional states will be used later. No locking is needed to switch between
* states due to the following rules:
*
* 1. IOPart is only the only partition allowed to change from UNIT
@@ -39,10 +39,11 @@
#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \
ULTRA_CHANNEL_PROTOCOL_SIGNATURE
-/* Must increment these whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
+/*
+ * Must increment these whenever you insert or delete fields within this channel
+ * struct. Also increment whenever you change the meaning of fields within this
+ * channel struct so as to break pre-existing software. Note that you can
+ * usually add fields to the END of the channel struct without needing to
* increment this.
*/
#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2
@@ -70,61 +71,62 @@
#define MINNUM(a, b) (((a) < (b)) ? (a) : (b))
#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b))
-/* define the two queues per data channel between iopart and ioguestparts */
-/* used by ioguestpart to 'insert' signals to iopart */
+/* Define the two queues per data channel between iopart and ioguestparts. */
+/* Used by ioguestpart to 'insert' signals to iopart. */
#define IOCHAN_TO_IOPART 0
-/* used by ioguestpart to 'remove' signals from iopart, same previous queue */
+/* Used by ioguestpart to 'remove' signals from iopart, same previous queue. */
#define IOCHAN_FROM_IOPART 1
-/* size of cdb - i.e., scsi cmnd */
+/* Size of cdb - i.e., SCSI cmnd */
#define MAX_CMND_SIZE 16
#define MAX_SENSE_SIZE 64
#define MAX_PHYS_INFO 64
-/* various types of network packets that can be sent in cmdrsp */
+/* Various types of network packets that can be sent in cmdrsp. */
enum net_types {
- NET_RCV_POST = 0, /* submit buffer to hold receiving
+ NET_RCV_POST = 0, /*
+ * Submit buffer to hold receiving
* incoming packet
*/
- /* virtnic -> uisnic */
+ /* visornic -> uisnic */
NET_RCV, /* incoming packet received */
/* uisnic -> virtpci */
NET_XMIT, /* for outgoing net packets */
- /* virtnic -> uisnic */
+ /* visornic -> uisnic */
NET_XMIT_DONE, /* outgoing packet xmitted */
/* uisnic -> virtpci */
NET_RCV_ENBDIS, /* enable/disable packet reception */
- /* virtnic -> uisnic */
+ /* visornic -> uisnic */
NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */
/* reception */
- /* uisnic -> virtnic */
+ /* uisnic -> visornic */
NET_RCV_PROMISC, /* enable/disable promiscuous mode */
- /* virtnic -> uisnic */
- NET_CONNECT_STATUS, /* indicate the loss or restoration of a network
+ /* visornic -> uisnic */
+ NET_CONNECT_STATUS, /*
+ * indicate the loss or restoration of a network
* connection
*/
- /* uisnic -> virtnic */
- NET_MACADDR, /* indicates the client has requested to update
- * its MAC addr
+ /* uisnic -> visornic */
+ NET_MACADDR, /*
+ * Indicates the client has requested to update
+ * it's MAC address
*/
- NET_MACADDR_ACK, /* MAC address */
+ NET_MACADDR_ACK, /* MAC address acknowledge */
};
-#define ETH_HEADER_SIZE 14 /* size of ethernet header */
-
-#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
-#define ETH_MIN_PACKET_SIZE (ETH_HEADER_SIZE + ETH_MIN_DATA_SIZE)
+#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
+#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
-#define ETH_MAX_MTU 16384 /* maximum data size */
+#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */
#ifndef MAX_MACADDR_LEN
#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
-#endif /* MAX_MACADDR_LEN */
+#endif
-/* various types of scsi task mgmt commands */
+/* Various types of scsi task mgmt commands. */
enum task_mgmt_types {
TASK_MGMT_ABORT_TASK = 1,
TASK_MGMT_BUS_RESET,
@@ -132,7 +134,7 @@ enum task_mgmt_types {
TASK_MGMT_TARGET_RESET,
};
-/* various types of vdisk mgmt commands */
+/* Various types of vdisk mgmt commands. */
enum vdisk_mgmt_types {
VDISK_MGMT_ACQUIRE = 1,
VDISK_MGMT_RELEASE,
@@ -146,7 +148,7 @@ struct phys_info {
#define MIN_NUMSIGNALS 64
-/* structs with pragma pack */
+/* Structs with pragma pack. */
struct guest_phys_info {
u64 address;
@@ -156,9 +158,9 @@ struct guest_phys_info {
#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info))
struct uisscsi_dest {
- u32 channel; /* channel == bus number */
- u32 id; /* id == target number */
- u32 lun; /* lun == logical unit number */
+ u32 channel; /* channel == bus number */
+ u32 id; /* id == target number */
+ u32 lun; /* lun == logical unit number */
} __packed;
struct vhba_wwnn {
@@ -166,7 +168,8 @@ struct vhba_wwnn {
u32 wwnn2;
} __packed;
-/* WARNING: Values stired in this structure must contain maximum counts (not
+/*
+ * WARNING: Values stired in this structure must contain maximum counts (not
* maximum values).
*/
struct vhba_config_max {/* 20 bytes */
@@ -189,23 +192,24 @@ struct uiscmdrsp_scsi {
* information for each
* fragment
*/
- enum dma_data_direction data_dir; /* direction of the data, if any */
+ enum dma_data_direction data_dir; /* direction of the data, if any */
struct uisscsi_dest vdest; /* identifies the virtual hba, id, */
/* channel, lun to which cmd was sent */
- /* Needed to queue the rsp back to cmd originator */
- int linuxstat; /* original Linux status used by linux vdisk */
+ /* Needed to queue the rsp back to cmd originator. */
+ int linuxstat; /* original Linux status used by Linux vdisk */
u8 scsistat; /* the scsi status */
u8 addlstat; /* non-scsi status */
#define ADDL_SEL_TIMEOUT 4
- /* the following fields are need to determine the result of command */
+ /* The following fields are need to determine the result of command. */
u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */
- /* it holds the sense_data struct; */
- /* see that struct for details. */
- void *vdisk; /* pointer to the vdisk to clean up when IO completes. */
+ /* sensebuf holds the sense_data struct; */
+ /* See sense_data struct for more details. */
+ void *vdisk; /* Pointer to the vdisk to clean up when IO completes. */
int no_disk_result;
- /* used to return no disk inquiry result
+ /*
+ * Used to return no disk inquiry result
* when no_disk_result is set to 1,
* scsi.scsistat is SAM_STAT_GOOD
* scsi.addlstat is 0
@@ -214,35 +218,44 @@ struct uiscmdrsp_scsi {
*/
} __packed;
-/* Defines to support sending correct inquiry result when no disk is
+/*
+ * Defines to support sending correct inquiry result when no disk is
* configured.
*/
-/* From SCSI SPC2 -
+/*
+ * From SCSI SPC2 -
*
* If the target is not capable of supporting a device on this logical unit, the
* device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
* and PERIPHERAL DEVICE TYPE set to 1Fh).
*
- *The device server is capable of supporting the specified peripheral device
- *type on this logical unit. However, the physical device is not currently
- *connected to this logical unit.
+ * The device server is capable of supporting the specified peripheral device
+ * type on this logical unit. However, the physical device is not currently
+ * connected to this logical unit.
*/
-#define DEV_NOT_CAPABLE 0x7f /* peripheral qualifier of 0x3 */
- /* peripheral type of 0x1f */
- /* specifies no device but target present */
+#define DEV_NOT_CAPABLE 0x7f /*
+ * peripheral qualifier of 0x3
+ * peripheral type of 0x1f
+ * specifies no device but target present
+ */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */
- /* peripheral type of 0 - disk */
- /* specifies device capable, but not present */
+#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1
+ * peripheral type of 0 - disk
+ * Specifies device capable, but
+ * not present
+ */
-#define DEV_HISUPPORT 0x10 /* HiSup = 1; shows support for report luns */
- /* must be returned for lun 0. */
+#define DEV_HISUPPORT 0x10 /*
+ * HiSup = 1; shows support for report luns
+ * must be returned for lun 0.
+ */
-/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
- * in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product
- * & revision. Yikes! So let us always send back 36 bytes, the minimum for
+/*
+ * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
+ * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product
+ * and revision. Yikes! So let us always send back 36 bytes, the minimum for
* inquiry result.
*/
#define NO_DISK_INQUIRY_RESULT_LEN 36
@@ -250,11 +263,12 @@ struct uiscmdrsp_scsi {
#define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */
/* SCSI device version for no disk inquiry result */
-#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
+#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
-/* Struct & Defines to support sense information. */
+/* Struct and Defines to support sense information. */
-/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
+/*
+ * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
* initialized in exactly the manner that is recommended in Windows (hence the
* odd values).
* When set, these fields will have the following values:
@@ -288,9 +302,9 @@ struct net_pkt_xmt {
int len; /* full length of data in the packet */
int num_frags; /* number of fragments in frags containing data */
struct phys_info frags[MAX_PHYS_INFO]; /* physical page information */
- char ethhdr[ETH_HEADER_SIZE]; /* the ethernet header */
+ char ethhdr[ETH_HLEN]; /* the ethernet header */
struct {
- /* these are needed for csum at uisnic end */
+ /* These are needed for csum at uisnic end */
u8 valid; /* 1 = struct is valid - else ignore */
u8 hrawoffv; /* 1 = hwrafoff is valid */
u8 nhrawoffv; /* 1 = nhwrafoff is valid */
@@ -302,7 +316,8 @@ struct net_pkt_xmt {
/* nhrawoff points to the start of the NETWORK LAYER HEADER */
} lincsum;
- /* **** NOTE ****
+ /*
+ * NOTE:
* The full packet is described in frags but the ethernet header is
* separately kept in ethhdr so that uisnic doesn't have "MAP" the
* guest memory to get to the header. uisnic needs ethhdr to
@@ -311,41 +326,54 @@ struct net_pkt_xmt {
} __packed;
struct net_pkt_xmtdone {
- u32 xmt_done_result; /* result of NET_XMIT */
+ u32 xmt_done_result; /* result of NET_XMIT */
} __packed;
-/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The
+/*
+ * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The
* reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
- * virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I
- * prefer to use 1 full cache line size for "overhead" so that transfers are
- * better. IOVM requires that a buffer be represented by 1 phys_info structure
+ * visornic requires that there is "overhead" in the buffer, and pads 16 bytes.
+ * Use 1 full cache line size for "overhead" so that transfers are optimized.
+ * IOVM requires that a buffer be represented by 1 phys_info structure
* which can only cover page_size.
*/
#define RCVPOST_BUF_SIZE 4032
#define MAX_NET_RCV_CHAIN \
- ((ETH_MAX_MTU + ETH_HEADER_SIZE + RCVPOST_BUF_SIZE - 1) \
+ ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
/ RCVPOST_BUF_SIZE)
+/*
+ * rcv buf size must be large enough to include ethernet data len + ethernet
+ * header len - we are choosing 2K because it is guaranteed to be describable.
+ */
struct net_pkt_rcvpost {
- /* rcv buf size must be large enough to include ethernet data len +
- * ethernet header len - we are choosing 2K because it is guaranteed
- * to be describable
- */
- struct phys_info frag; /* physical page information for the */
- /* single fragment 2K rcv buf */
- u64 unique_num;
- /* unique_num ensure that receive posts are returned to */
- /* the Adapter which we sent them originally. */
+ /* Physical page information for the single fragment 2K rcv buf */
+ struct phys_info frag;
+
+ /*
+ * Ensures that receive posts are returned to the adapter which we sent
+ * them from originally.
+ */
+ u64 unique_num;
+
} __packed;
+/*
+ * The number of rcvbuf that can be chained is based on max mtu and size of each
+ * rcvbuf.
+ */
struct net_pkt_rcv {
- /* the number of receive buffers that can be chained */
- /* is based on max mtu and size of each rcv buf */
- u32 rcv_done_len; /* length of received data */
- u8 numrcvbufs; /* number of receive buffers that contain the */
- /* incoming data; guest end MUST chain these together. */
- void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */
- /* each entry is a receive buffer provided by NET_RCV_POST. */
+ u32 rcv_done_len; /* length of received data */
+
+ /*
+ * numrcvbufs: contain the incoming data; guest side MUST chain these
+ * together.
+ */
+ u8 numrcvbufs;
+
+ void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */
+
+ /* Each entry is a receive buffer provided by NET_RCV_POST. */
/* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
u64 unique_num;
u32 rcvs_dropped_delta;
@@ -353,12 +381,12 @@ struct net_pkt_rcv {
struct net_pkt_enbdis {
void *context;
- u16 enable; /* 1 = enable, 0 = disable */
+ u16 enable; /* 1 = enable, 0 = disable */
} __packed;
struct net_pkt_macaddr {
void *context;
- u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
+ u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
} __packed;
/* cmd rsp packet used for VNIC network traffic */
@@ -379,41 +407,44 @@ struct uiscmdrsp_net {
} __packed;
struct uiscmdrsp_scsitaskmgmt {
+ /* The type of task. */
enum task_mgmt_types tasktype;
- /* the type of task */
+ /* The vdisk for which this task mgmt is generated. */
struct uisscsi_dest vdest;
- /* the vdisk for which this task mgmt is generated */
+ /*
+ * This is a handle that the guest has saved off for its own use.
+ * The handle value is preserved by iopart and returned as in task
+ * mgmt rsp.
+ */
u64 handle;
- /* This is a handle that the guest has saved off for its own use.
- * Its value is preserved by iopart & returned as is in the task
- * mgmt rsp.
- */
+ /*
+ * For Linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the taskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the thread waiting for taskmgmt
+ * command completion. It's value is preserved by iopart and returned
+ * as in the task mgmt rsp.
+ */
u64 notify_handle;
- /* For linux guests, this is a pointer to wait_queue_head that a
- * thread is waiting on to see if the taskmgmt command has completed.
- * When the rsp is received by guest, the thread receiving the
- * response uses this to notify the thread waiting for taskmgmt
- * command completion. Its value is preserved by iopart & returned
- * as is in the task mgmt rsp.
- */
+ /*
+ * This is a handle to the location in the guest where the result of
+ * the taskmgmt command (result field) is saved to when the response
+ * is handled. It's value is preserved by iopart and returned as in
+ * the task mgmt rsp.
+ */
u64 notifyresult_handle;
- /* this is a handle to location in guest where the result of the
- * taskmgmt command (result field) is to saved off when the response
- * is handled. Its value is preserved by iopart & returned as is in
- * the task mgmt rsp.
- */
+ /* Result of taskmgmt command - set by IOPart - values are: */
char result;
- /* result of taskmgmt command - set by IOPart - values are: */
#define TASK_MGMT_FAILED 0
} __packed;
-/* Used by uissd to send disk add/remove notifications to Guest */
+/* Used by uissd to send disk add/remove notifications to Guest. */
/* Note that the vHba pointer is not used by the Client/Guest side. */
struct uiscmdrsp_disknotify {
u8 add; /* 0-remove, 1-add */
@@ -421,49 +452,50 @@ struct uiscmdrsp_disknotify {
u32 channel, id, lun; /* SCSI Path of Disk to added or removed */
} __packed;
-/* The following is used by virthba/vSCSI to send the Acquire/Release commands
+/*
+ * The following is used by virthba/vSCSI to send the Acquire/Release commands
* to the IOVM.
*/
struct uiscmdrsp_vdiskmgmt {
+ /* The type of task */
enum vdisk_mgmt_types vdisktype;
- /* the type of task */
+ /* The vdisk for which this task mgmt is generated */
struct uisscsi_dest vdest;
- /* the vdisk for which this task mgmt is generated */
+ /*
+ * This is a handle that the guest has saved off for its own use. It's
+ * value is preserved by iopart and returned as in the task mgmt rsp.
+ */
u64 handle;
- /* This is a handle that the guest has saved off for its own use.
- * Its value is preserved by iopart & returned as is in the task
- * mgmt rsp.
- */
+ /*
+ * For Linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the tskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the thread waiting for taskmgmt
+ * command completion. It's value is preserved by iopart and returned
+ * as in the task mgmt rsp.
+ */
u64 notify_handle;
- /* For linux guests, this is a pointer to wait_queue_head that a
- * thread is waiting on to see if the tskmgmt command has completed.
- * When the rsp is received by guest, the thread receiving the
- * response uses this to notify the thread waiting for taskmgmt
- * command completion. Its value is preserved by iopart & returned
- * as is in the task mgmt rsp.
- */
+ /*
+ * Handle to the location in guest where the result of the
+ * taskmgmt command (result field) is saved to when the response
+ * is handled. It's value is preserved by iopart and returned as in
+ * the task mgmt rsp.
+ */
u64 notifyresult_handle;
- /* this is a handle to location in guest where the result of the
- * taskmgmt command (result field) is to saved off when the response
- * is handled. Its value is preserved by iopart & returned as is in
- * the task mgmt rsp.
- */
+ /* Result of taskmgmt command - set by IOPart - values are: */
char result;
-
- /* result of taskmgmt command - set by IOPart - values are: */
-#define VDISK_MGMT_FAILED 0
} __packed;
-/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */
+/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
struct uiscmdrsp {
char cmdtype;
-/* describes what type of information is in the struct */
+/* Describes what type of information is in the struct */
#define CMD_SCSI_TYPE 1
#define CMD_NET_TYPE 2
#define CMD_SCSITASKMGMT_TYPE 3
@@ -476,11 +508,11 @@ struct uiscmdrsp {
struct uiscmdrsp_disknotify disknotify;
struct uiscmdrsp_vdiskmgmt vdiskmgmt;
};
- void *private_data; /* send the response when the cmd is */
- /* done (scsi & scsittaskmgmt). */
+ /* Send the response when the cmd is done (scsi and scsittaskmgmt). */
+ void *private_data;
struct uiscmdrsp *next; /* General Purpose Queue Link */
- struct uiscmdrsp *activeQ_next; /* Used to track active commands */
- struct uiscmdrsp *activeQ_prev; /* Used to track active commands */
+ struct uiscmdrsp *activeQ_next; /* Pointer to the nextactive commands */
+ struct uiscmdrsp *activeQ_prev; /* Pointer to the prevactive commands */
} __packed;
struct iochannel_vhba {
@@ -493,7 +525,8 @@ struct iochannel_vnic {
u32 mtu; /* 4 bytes */
uuid_le zone_uuid; /* 16 bytes */
} __packed;
-/* This is just the header of the IO channel. It is assumed that directly after
+/*
+ * This is just the header of the IO channel. It is assumed that directly after
* this header there is a large region of memory which contains the command and
* response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
*/
@@ -507,31 +540,19 @@ struct spar_io_channel_protocol {
} __packed;
#define MAX_CLIENTSTRING_LEN 1024
- /* client_string is NULL termimated so holds max -1 bytes */
+ /* client_string is NULL termimated so holds max-1 bytes */
u8 client_string[MAX_CLIENTSTRING_LEN];
} __packed;
-/* INLINE functions for initializing and accessing I/O data channels */
-#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64))
+/* INLINE functions for initializing and accessing I/O data channels. */
#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
-#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \
- 2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
-
-/*
- * INLINE function for expanding a guest's pfn-off-size into multiple 4K page
- * pfn-off-size entires.
- */
-
-/* use 4K page sizes when we it comes to passing page information between */
-/* Guest and IOPartition. */
+/* Use 4K page sizes when passing page info between Guest and IOPartition. */
#define PI_PAGE_SIZE 0x1000
#define PI_PAGE_MASK 0x0FFF
-/* returns next non-zero index on success or zero on failure (i.e. out of
- * room)
- */
-static inline u16
+/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
+static inline u16
add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
u16 max_pi_arr_entries, struct phys_info pi_arr[])
{
@@ -540,7 +561,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
firstlen = PI_PAGE_SIZE - inp_off;
if (inp_len <= firstlen) {
- /* the input entry spans only one page - add as is */
+ /* The input entry spans only one page - add as is. */
if (index >= max_pi_arr_entries)
return 0;
pi_arr[index].pi_pfn = inp_pfn;
@@ -549,7 +570,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
return index + 1;
}
- /* this entry spans multiple pages */
+ /* This entry spans multiple pages. */
for (len = inp_len, i = 0; len;
len -= pi_arr[index + i].pi_len, i++) {
if (index + i >= max_pi_arr_entries)
@@ -567,4 +588,4 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
return index + i;
}
-#endif /* __IOCHANNEL_H__ */
+#endif /* __IOCHANNEL_H__ */
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 677627c72c4c..03d56f818a86 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -166,6 +166,8 @@ struct visor_device {
struct controlvm_message_header *pending_msg_hdr;
void *vbus_hdr_info;
uuid_le partition_uuid;
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_client_bus_info;
};
#define to_visor_device(x) container_of(x, struct visor_device, device)
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index e97917522f6a..b0df26155d02 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -23,6 +23,7 @@
* the client devices and client drivers for the server end to see.
*/
#include <linux/uuid.h>
+#include <linux/ctype.h>
#include "channel.h"
/* {193b331b-c58f-11da-95a9-00e08161165f} */
@@ -50,12 +51,6 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
-#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \
- (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \
- "vbus", \
- sizeof(struct spar_vbus_channel_protocol),\
- actual_bytes))
-
#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
/*
@@ -72,199 +67,38 @@ struct ultra_vbus_deviceinfo {
};
/**
- * vbuschannel_sanitize_buffer() - remove non-printable chars from buffer
- * @p: destination buffer where chars are written to
- * @remain: number of bytes that can be written starting at #p
- * @src: pointer to source buffer
- * @srcmax: number of valid characters at #src
- *
- * Reads chars from the buffer at @src for @srcmax bytes, and writes to
- * the buffer at @p, which is @remain bytes long, ensuring never to
- * overflow the buffer at @p, using the following rules:
- * - printable characters are simply copied from the buffer at @src to the
- * buffer at @p
- * - intervening streaks of non-printable characters in the buffer at @src
- * are replaced with a single space in the buffer at @p
- * Note that we pay no attention to '\0'-termination.
- *
- * Pass @p == NULL and @remain == 0 for this special behavior -- In this
- * case, we simply return the number of bytes that WOULD HAVE been written
- * to a buffer at @p, had it been infinitely big.
- *
- * Return: the number of bytes written to @p (or WOULD HAVE been written to
- * @p, as described in the previous paragraph)
- */
-static inline int
-vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
-{
- int chars = 0;
- int nonprintable_streak = 0;
-
- while (srcmax > 0) {
- if ((*src >= ' ') && (*src < 0x7f)) {
- if (nonprintable_streak) {
- if (remain > 0) {
- *p = ' ';
- p++;
- remain--;
- chars++;
- } else if (!p) {
- chars++;
- }
- nonprintable_streak = 0;
- }
- if (remain > 0) {
- *p = *src;
- p++;
- remain--;
- chars++;
- } else if (!p) {
- chars++;
- }
- } else {
- nonprintable_streak = 1;
- }
- src++;
- srcmax--;
- }
- return chars;
-}
-
-#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
- do { \
- if (remain <= 0) \
- break; \
- *p = ch; \
- p++; chars++; remain--; \
- } while (0)
-
-/**
- * vbuschannel_itoa() - convert non-negative int to string
- * @p: destination string
- * @remain: max number of bytes that can be written to @p
- * @num: input int to convert
- *
- * Converts the non-negative value at @num to an ascii decimal string
- * at @p, writing at most @remain bytes. Note there is NO '\0' termination
- * written to @p.
- *
- * Return: number of bytes written to @p
- *
- */
-static inline int
-vbuschannel_itoa(char *p, int remain, int num)
-{
- int digits = 0;
- char s[32];
- int i;
-
- if (num == 0) {
- /* '0' is a special case */
- if (remain <= 0)
- return 0;
- *p = '0';
- return 1;
- }
- /* form a backwards decimal ascii string in <s> */
- while (num > 0) {
- if (digits >= (int)sizeof(s))
- return 0;
- s[digits++] = (num % 10) + '0';
- num = num / 10;
- }
- if (remain < digits) {
- /* not enough room left at <p> to hold number, so fill with
- * '?'
- */
- for (i = 0; i < remain; i++, p++)
- *p = '?';
- return remain;
- }
- /* plug in the decimal ascii string representing the number, by */
- /* reversing the string we just built in <s> */
- i = digits;
- while (i > 0) {
- i--;
- *p = s[i];
- p++;
- }
- return digits;
-}
-
-/**
- * vbuschannel_devinfo_to_string() - format a struct ultra_vbus_deviceinfo
- * to a printable string
+ * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo
+ * and write it to a seq_file
* @devinfo: the struct ultra_vbus_deviceinfo to format
- * @p: destination string area
- * @remain: size of destination string area in bytes
+ * @seq: seq_file to write to
* @devix: the device index to be included in the output data, or -1 if no
* device index is to be included
*
- * Reads @devInfo, and converts its contents to a printable string at @p,
- * writing at most @remain bytes. Note there is NO '\0' termination
- * written to @p.
- *
- * Return: number of bytes written to @p
+ * Reads @devInfo, and writes it in human-readable notation to @seq.
*/
-static inline int
-vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
- char *p, int remain, int devix)
+static inline void
+vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo,
+ struct seq_file *seq, int devix)
{
- char *psrc;
- int nsrc, x, i, pad;
- int chars = 0;
-
- psrc = &devinfo->devtype[0];
- nsrc = sizeof(devinfo->devtype);
- if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
- return 0;
-
- /* emit device index */
- if (devix >= 0) {
- VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
- x = vbuschannel_itoa(p, remain, devix);
- p += x;
- remain -= x;
- chars += x;
- VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
- } else {
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- }
-
- /* emit device type */
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- pad = 15 - x; /* pad device type to be exactly 15 chars */
- for (i = 0; i < pad; i++)
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
- /* emit driver name */
- psrc = &devinfo->drvname[0];
- nsrc = sizeof(devinfo->drvname);
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- pad = 15 - x; /* pad driver name to be exactly 15 chars */
- for (i = 0; i < pad; i++)
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
- /* emit strings */
- psrc = &devinfo->infostrs[0];
- nsrc = sizeof(devinfo->infostrs);
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
-
- return chars;
+ if (!isprint(devinfo->devtype[0]))
+ return; /* uninitialized vbus device entry */
+
+ if (devix >= 0)
+ seq_printf(seq, "[%d]", devix);
+ else
+ /* vbus device entry is for bus or chipset */
+ seq_puts(seq, " ");
+
+ /*
+ * Note: because the s-Par back-end is free to scribble in this area,
+ * we never assume '\0'-termination.
+ */
+ seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
+ (int)sizeof(devinfo->devtype), devinfo->devtype);
+ seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
+ (int)sizeof(devinfo->drvname), devinfo->drvname);
+ seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
+ devinfo->infostrs);
}
struct spar_vbus_headerinfo {
@@ -293,11 +127,6 @@ struct spar_vbus_channel_protocol {
/* describes client device and driver for each device on the bus */
};
-#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
- (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
- sizeof(ULTRA_VBUS_DEVICEINFO)))
-#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096)
-
#pragma pack(pop)
#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index fec0a54916fe..3457ef338e1e 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -14,6 +14,7 @@
* details.
*/
+#include <linux/debugfs.h>
#include <linux/uuid.h>
#include "visorbus.h"
@@ -33,6 +34,7 @@ static int visorbus_forcenomatch;
#define POLLJIFFIES_NORMALCHANNEL 10
static int busreg_rc = -ENODEV; /* stores the result from bus registration */
+static struct dentry *visorbus_debugfs_dir;
/*
* DEVICE type attributes
@@ -151,6 +153,8 @@ visorbus_release_busdevice(struct device *xdev)
{
struct visor_device *dev = dev_get_drvdata(xdev);
+ debugfs_remove(dev->debugfs_client_bus_info);
+ debugfs_remove_recursive(dev->debugfs_dir);
kfree(dev);
}
@@ -186,6 +190,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_physaddr(vdev->visorchannel));
}
+static DEVICE_ATTR_RO(physaddr);
static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -197,6 +202,7 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0x%lx\n",
visorchannel_get_nbytes(vdev->visorchannel));
}
+static DEVICE_ATTR_RO(nbytes);
static ssize_t clientpartition_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -208,6 +214,7 @@ static ssize_t clientpartition_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_clientpartition(vdev->visorchannel));
}
+static DEVICE_ATTR_RO(clientpartition);
static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -220,6 +227,7 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",
visorchannel_id(vdev->visorchannel, typeid));
}
+static DEVICE_ATTR_RO(typeguid);
static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -232,6 +240,7 @@ static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",
visorchannel_zoneid(vdev->visorchannel, zoneid));
}
+static DEVICE_ATTR_RO(zoneguid);
static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -250,12 +259,6 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
drv = to_visor_driver(xdrv);
return snprintf(buf, PAGE_SIZE, "%s\n", drv->channel_types[i - 1].name);
}
-
-static DEVICE_ATTR_RO(physaddr);
-static DEVICE_ATTR_RO(nbytes);
-static DEVICE_ATTR_RO(clientpartition);
-static DEVICE_ATTR_RO(typeguid);
-static DEVICE_ATTR_RO(zoneguid);
static DEVICE_ATTR_RO(typename);
static struct attribute *channel_attrs[] = {
@@ -295,6 +298,7 @@ static ssize_t partition_handle_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
}
+static DEVICE_ATTR_RO(partition_handle);
static ssize_t partition_guid_show(struct device *dev,
struct device_attribute *attr,
@@ -303,6 +307,7 @@ static ssize_t partition_guid_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "{%pUb}\n", &vdev->partition_uuid);
}
+static DEVICE_ATTR_RO(partition_guid);
static ssize_t partition_name_show(struct device *dev,
struct device_attribute *attr,
@@ -311,6 +316,7 @@ static ssize_t partition_name_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", vdev->name);
}
+static DEVICE_ATTR_RO(partition_name);
static ssize_t channel_addr_show(struct device *dev,
struct device_attribute *attr,
@@ -320,6 +326,7 @@ static ssize_t channel_addr_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
}
+static DEVICE_ATTR_RO(channel_addr);
static ssize_t channel_bytes_show(struct device *dev,
struct device_attribute *attr,
@@ -329,6 +336,7 @@ static ssize_t channel_bytes_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
}
+static DEVICE_ATTR_RO(channel_bytes);
static ssize_t channel_id_show(struct device *dev,
struct device_attribute *attr,
@@ -343,77 +351,7 @@ static ssize_t channel_id_show(struct device *dev,
}
return len;
}
-
-static ssize_t client_bus_info_show(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
- struct visor_device *vdev = to_visor_device(dev);
- struct visorchannel *channel = vdev->visorchannel;
-
- int i, shift, remain = PAGE_SIZE;
- unsigned long off;
- char *pos = buf;
- u8 *partition_name;
- struct ultra_vbus_deviceinfo dev_info;
-
- partition_name = "";
- if (channel) {
- if (vdev->name)
- partition_name = vdev->name;
- shift = snprintf(pos, remain,
- "Client device / client driver info for %s partition (vbus #%u):\n",
- partition_name, vdev->chipset_bus_no);
- pos += shift;
- remain -= shift;
- shift = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- chp_info),
- &dev_info, sizeof(dev_info));
- if (shift >= 0) {
- shift = vbuschannel_devinfo_to_string(&dev_info, pos,
- remain, -1);
- pos += shift;
- remain -= shift;
- }
- shift = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- bus_info),
- &dev_info, sizeof(dev_info));
- if (shift >= 0) {
- shift = vbuschannel_devinfo_to_string(&dev_info, pos,
- remain, -1);
- pos += shift;
- remain -= shift;
- }
- off = offsetof(struct spar_vbus_channel_protocol, dev_info);
- i = 0;
- while (off + sizeof(dev_info) <=
- visorchannel_get_nbytes(channel)) {
- shift = visorchannel_read(channel,
- off, &dev_info,
- sizeof(dev_info));
- if (shift >= 0) {
- shift = vbuschannel_devinfo_to_string
- (&dev_info, pos, remain, i);
- pos += shift;
- remain -= shift;
- }
- off += sizeof(dev_info);
- i++;
- }
- }
- return PAGE_SIZE - remain;
-}
-
-static DEVICE_ATTR_RO(partition_handle);
-static DEVICE_ATTR_RO(partition_guid);
-static DEVICE_ATTR_RO(partition_name);
-static DEVICE_ATTR_RO(channel_addr);
-static DEVICE_ATTR_RO(channel_bytes);
static DEVICE_ATTR_RO(channel_id);
-static DEVICE_ATTR_RO(client_bus_info);
static struct attribute *dev_attrs[] = {
&dev_attr_partition_handle.attr,
@@ -422,7 +360,6 @@ static struct attribute *dev_attrs[] = {
&dev_attr_channel_addr.attr,
&dev_attr_channel_bytes.attr,
&dev_attr_channel_id.attr,
- &dev_attr_client_bus_info.attr,
NULL
};
@@ -435,6 +372,66 @@ static const struct attribute_group *visorbus_groups[] = {
NULL
};
+/*
+ * BUS debugfs entries
+ *
+ * define & implement display of debugfs attributes under
+ * /sys/kernel/debug/visorbus/visorbus<n>.
+ */
+
+static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct visor_device *vdev = seq->private;
+ struct visorchannel *channel = vdev->visorchannel;
+
+ int i;
+ unsigned long off;
+ struct ultra_vbus_deviceinfo dev_info;
+
+ if (!channel)
+ return 0;
+
+ seq_printf(seq,
+ "Client device / client driver info for %s partition (vbus #%u):\n",
+ ((vdev->name) ? (char *)(vdev->name) : ""),
+ vdev->chipset_bus_no);
+ if (visorchannel_read(channel,
+ offsetof(struct spar_vbus_channel_protocol,
+ chp_info),
+ &dev_info, sizeof(dev_info)) >= 0)
+ vbuschannel_print_devinfo(&dev_info, seq, -1);
+ if (visorchannel_read(channel,
+ offsetof(struct spar_vbus_channel_protocol,
+ bus_info),
+ &dev_info, sizeof(dev_info)) >= 0)
+ vbuschannel_print_devinfo(&dev_info, seq, -1);
+ off = offsetof(struct spar_vbus_channel_protocol, dev_info);
+ i = 0;
+ while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
+ if (visorchannel_read(channel, off, &dev_info,
+ sizeof(dev_info)) >= 0)
+ vbuschannel_print_devinfo(&dev_info, seq, i);
+ off += sizeof(dev_info);
+ i++;
+ }
+
+ return 0;
+}
+
+static int client_bus_info_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, client_bus_info_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations client_bus_info_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = client_bus_info_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void
dev_periodic_work(unsigned long __opaque)
{
@@ -610,8 +607,8 @@ create_visor_device(struct visor_device *dev)
u32 chipset_bus_no = dev->chipset_bus_no;
u32 chipset_dev_no = dev->chipset_dev_no;
- POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
+ DIAG_SEVERITY_PRINT);
mutex_init(&dev->visordriver_callback_lock);
dev->device.bus = &visorbus_type;
@@ -651,8 +648,8 @@ create_visor_device(struct visor_device *dev)
*/
err = device_add(&dev->device);
if (err < 0) {
- POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
- DIAG_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_ADD_PC, 0, chipset_bus_no,
+ DIAG_SEVERITY_ERR);
goto err_put;
}
@@ -966,9 +963,10 @@ static int
create_bus_instance(struct visor_device *dev)
{
int id = dev->chipset_bus_no;
+ int err;
struct spar_vbus_headerinfo *hdr_info;
- POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
if (!hdr_info)
@@ -979,11 +977,26 @@ create_bus_instance(struct visor_device *dev)
dev->device.groups = visorbus_groups;
dev->device.release = visorbus_release_busdevice;
+ dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
+ visorbus_debugfs_dir);
+ if (!dev->debugfs_dir) {
+ err = -ENOMEM;
+ goto err_hdr_info;
+ }
+ dev->debugfs_client_bus_info =
+ debugfs_create_file("client_bus_info", S_IRUSR | S_IRGRP,
+ dev->debugfs_dir, dev,
+ &client_bus_info_debugfs_fops);
+ if (!dev->debugfs_client_bus_info) {
+ err = -ENOMEM;
+ goto err_debugfs_dir;
+ }
+
if (device_register(&dev->device) < 0) {
- POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id,
- POSTCODE_SEVERITY_ERR);
- kfree(hdr_info);
- return -ENODEV;
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, 0, id,
+ DIAG_SEVERITY_ERR);
+ err = -ENODEV;
+ goto err_debugfs_created;
}
if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
@@ -998,6 +1011,16 @@ create_bus_instance(struct visor_device *dev)
list_add_tail(&dev->list_all, &list_all_bus_instances);
dev_set_drvdata(&dev->device, dev);
return 0;
+
+err_debugfs_created:
+ debugfs_remove(dev->debugfs_client_bus_info);
+
+err_debugfs_dir:
+ debugfs_remove_recursive(dev->debugfs_dir);
+
+err_hdr_info:
+ kfree(hdr_info);
+ return err;
}
/**
@@ -1069,16 +1092,16 @@ chipset_bus_create(struct visor_device *dev)
int rc;
u32 bus_no = dev->chipset_bus_no;
- POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
rc = create_bus_instance(dev);
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
if (rc < 0)
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
else
- POSTCODE_LINUX_3(CHIPSET_INIT_SUCCESS_PC, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, bus_no,
+ DIAG_SEVERITY_PRINT);
bus_create_response(dev, rc);
}
@@ -1097,18 +1120,18 @@ chipset_device_create(struct visor_device *dev_info)
u32 bus_no = dev_info->chipset_bus_no;
u32 dev_no = dev_info->chipset_dev_no;
- POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
rc = create_visor_device(dev_info);
device_create_response(dev_info, rc);
if (rc < 0)
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
else
- POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
}
void
@@ -1274,12 +1297,17 @@ visorbus_init(void)
{
int err;
- POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DRIVER_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
+
+ visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
+ if (!visorbus_debugfs_dir)
+ return -ENOMEM;
+
bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
err = create_bus_type();
if (err < 0) {
- POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_ERR);
goto error;
}
@@ -1288,7 +1316,7 @@ visorbus_init(void)
return 0;
error:
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
return err;
}
@@ -1306,6 +1334,7 @@ visorbus_exit(void)
remove_bus_instance(dev);
}
remove_bus_type();
+ debugfs_remove_recursive(visorbus_debugfs_dir);
}
module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO);
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 15403fb52847..49bec1763e33 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -70,9 +70,9 @@ struct visorchannel *visorchannel_create_with_lock(u64 physaddr,
gfp_t gfp, uuid_le guid);
void visorchannel_destroy(struct visorchannel *channel);
int visorchannel_read(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes);
+ void *dest, ulong nbytes);
int visorchannel_write(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes);
+ void *dest, ulong nbytes);
u64 visorchannel_get_physaddr(struct visorchannel *channel);
ulong visorchannel_get_nbytes(struct visorchannel *channel);
char *visorchannel_id(struct visorchannel *channel, char *s);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 300a65dc5c6c..f51a7258bef0 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include "visorbus.h"
+#include "visorbus_private.h"
#include "controlvmchannel.h"
#define MYDRVNAME "visorchannel"
@@ -127,19 +128,19 @@ EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
int
visorchannel_read(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes)
+ void *dest, ulong nbytes)
{
if (offset + nbytes > channel->nbytes)
return -EIO;
- memcpy(local, channel->mapped + offset, nbytes);
+ memcpy(dest, channel->mapped + offset, nbytes);
return 0;
}
int
visorchannel_write(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes)
+ void *dest, ulong nbytes)
{
size_t chdr_size = sizeof(struct channel_header);
size_t copy_size;
@@ -150,10 +151,10 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
if (offset < chdr_size) {
copy_size = min(chdr_size - offset, nbytes);
memcpy(((char *)(&channel->chan_hdr)) + offset,
- local, copy_size);
+ dest, copy_size);
}
- memcpy(channel->mapped + offset, local, nbytes);
+ memcpy(channel->mapped + offset, dest, nbytes);
return 0;
}
@@ -236,8 +237,9 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
if (error)
return error;
+ /* No signals to remove; have caller try again. */
if (sig_hdr.head == sig_hdr.tail)
- return -EIO; /* no signals to remove */
+ return -EAGAIN;
sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
@@ -299,22 +301,30 @@ EXPORT_SYMBOL_GPL(visorchannel_signalremove);
* Return: boolean indicating whether any messages in the designated
* channel/queue are present
*/
+
+static bool
+queue_empty(struct visorchannel *channel, u32 queue)
+{
+ struct signal_queue_header sig_hdr;
+
+ if (sig_read_header(channel, queue, &sig_hdr))
+ return true;
+
+ return (sig_hdr.head == sig_hdr.tail);
+}
+
bool
visorchannel_signalempty(struct visorchannel *channel, u32 queue)
{
- unsigned long flags = 0;
- struct signal_queue_header sig_hdr;
- bool rc = false;
+ bool rc;
+ unsigned long flags;
- if (channel->needs_lock)
- spin_lock_irqsave(&channel->remove_lock, flags);
+ if (!channel->needs_lock)
+ return queue_empty(channel, queue);
- if (sig_read_header(channel, queue, &sig_hdr))
- rc = true;
- if (sig_hdr.head == sig_hdr.tail)
- rc = true;
- if (channel->needs_lock)
- spin_unlock_irqrestore(&channel->remove_lock, flags);
+ spin_lock_irqsave(&channel->remove_lock, flags);
+ rc = queue_empty(channel, queue);
+ spin_unlock_irqrestore(&channel->remove_lock, flags);
return rc;
}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 59871495ea85..d7148c351d3f 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -29,7 +29,7 @@
#include "visorbus_private.h"
#include "vmcallinterface.h"
-#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
+#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
@@ -57,7 +57,6 @@ visorchipset_open(struct inode *inode, struct file *file)
if (minor_number)
return -ENODEV;
- file->private_data = NULL;
return 0;
}
@@ -499,7 +498,7 @@ controlvm_init_response(struct controlvm_message *msg,
}
}
-static void
+static int
controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
int response,
enum ultra_chipset_feature features)
@@ -508,34 +507,33 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.init_chipset.features = features;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
- return;
- }
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
-static void
+static int
chipset_init(struct controlvm_message *inmsg)
{
static int chipset_inited;
enum ultra_chipset_feature features = 0;
int rc = CONTROLVM_RESP_SUCCESS;
+ int res = 0;
- POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
if (chipset_inited) {
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ res = -EIO;
goto out_respond;
}
chipset_inited = 1;
- POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
/*
* Set features to indicate we support parahotplug (if Command
* also supports it).
*/
- features =
- inmsg->cmd.init_chipset.
- features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
+ features = inmsg->cmd.init_chipset.features &
+ ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
/*
* Set the "reply" bit so Command knows this is a
@@ -545,25 +543,25 @@ chipset_init(struct controlvm_message *inmsg)
out_respond:
if (inmsg->hdr.flags.response_expected)
- controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+ res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+
+ return res;
}
-static void
+static int
controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
{
struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msg_hdr, response);
if (outmsg.hdr.flags.test_message == 1)
- return;
+ return -EINVAL;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
- return;
- }
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
-static void controlvm_respond_physdev_changestate(
+static int controlvm_respond_physdev_changestate(
struct controlvm_message_header *msg_hdr, int response,
struct spar_segment_state state)
{
@@ -572,10 +570,8 @@ static void controlvm_respond_physdev_changestate(
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.device_change_state.state = state;
outmsg.cmd.device_change_state.flags.phys_device = 1;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
- return;
- }
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
enum crash_obj_type {
@@ -583,74 +579,80 @@ enum crash_obj_type {
CRASH_BUS,
};
-static void
+static int
save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
{
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
+ int err;
- if (visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- saved_crash_message_count),
- &local_crash_msg_count, sizeof(u16)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
+ &local_crash_msg_count, sizeof(u16));
+ if (err) {
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
- local_crash_msg_count,
- POSTCODE_SEVERITY_ERR);
- return;
+ POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
+ local_crash_msg_count,
+ DIAG_SEVERITY_ERR);
+ return -EIO;
}
- if (visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- saved_crash_message_offset),
- &local_crash_msg_offset, sizeof(u32)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
+ &local_crash_msg_offset, sizeof(u32));
+ if (err) {
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
if (typ == CRASH_BUS) {
- if (visorchannel_write(controlvm_channel,
- local_crash_msg_offset,
- msg,
- sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_write(controlvm_channel,
+ local_crash_msg_offset,
+ msg,
+ sizeof(struct controlvm_message));
+ if (err) {
+ POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
} else {
local_crash_msg_offset += sizeof(struct controlvm_message);
- if (visorchannel_write(controlvm_channel,
- local_crash_msg_offset,
- msg,
- sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
- return;
+ err = visorchannel_write(controlvm_channel,
+ local_crash_msg_offset,
+ msg,
+ sizeof(struct controlvm_message));
+ if (err) {
+ POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
+ return err;
}
}
+ return 0;
}
-static void
+static int
bus_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
if (!pending_msg_hdr)
- return; /* no controlvm response needed */
+ return -EIO;
if (pending_msg_hdr->id != (u32)cmd_id)
- return;
+ return -EINVAL;
- controlvm_respond(pending_msg_hdr, response);
+ return controlvm_respond(pending_msg_hdr, response);
}
-static void
+static int
device_changestate_responder(enum controlvm_id cmd_id,
struct visor_device *p, int response,
struct spar_segment_state response_state)
@@ -660,9 +662,9 @@ device_changestate_responder(enum controlvm_id cmd_id,
u32 dev_no = p->chipset_dev_no;
if (!p->pending_msg_hdr)
- return; /* no controlvm response needed */
+ return -EIO;
if (p->pending_msg_hdr->id != cmd_id)
- return;
+ return -EINVAL;
controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
@@ -670,175 +672,74 @@ device_changestate_responder(enum controlvm_id cmd_id,
outmsg.cmd.device_change_state.dev_no = dev_no;
outmsg.cmd.device_change_state.state = response_state;
- if (visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg))
- return;
+ return visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg);
}
-static void
+static int
device_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
if (!pending_msg_hdr)
- return; /* no controlvm response needed */
+ return -EIO;
if (pending_msg_hdr->id != (u32)cmd_id)
- return;
-
- controlvm_respond(pending_msg_hdr, response);
-}
-
-static void
-bus_epilog(struct visor_device *bus_info,
- u32 cmd, struct controlvm_message_header *msg_hdr,
- int response, bool need_response)
-{
- struct controlvm_message_header *pmsg_hdr = NULL;
-
- if (!bus_info) {
- /*
- * relying on a valid passed in response code
- * be lazy and re-use msg_hdr for this failure, is this ok??
- */
- pmsg_hdr = msg_hdr;
- goto out_respond;
- }
-
- if (bus_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
- pmsg_hdr = bus_info->pending_msg_hdr;
- goto out_respond;
- }
-
- if (need_response) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
- bus_info->chipset_bus_no,
- POSTCODE_SEVERITY_ERR);
- return;
- }
-
- memcpy(pmsg_hdr, msg_hdr,
- sizeof(struct controlvm_message_header));
- bus_info->pending_msg_hdr = pmsg_hdr;
- }
-
- if (response == CONTROLVM_RESP_SUCCESS) {
- switch (cmd) {
- case CONTROLVM_BUS_CREATE:
- chipset_bus_create(bus_info);
- break;
- case CONTROLVM_BUS_DESTROY:
- chipset_bus_destroy(bus_info);
- break;
- }
- }
-
-out_respond:
- bus_responder(cmd, pmsg_hdr, response);
-}
-
-static void
-device_epilog(struct visor_device *dev_info,
- struct spar_segment_state state, u32 cmd,
- struct controlvm_message_header *msg_hdr, int response,
- bool need_response, bool for_visorbus)
-{
- struct controlvm_message_header *pmsg_hdr = NULL;
-
- if (!dev_info) {
- /*
- * relying on a valid passed in response code
- * be lazy and re-use msg_hdr for this failure, is this ok??
- */
- pmsg_hdr = msg_hdr;
- goto out_respond;
- }
-
- if (dev_info->pending_msg_hdr) {
- /* only non-NULL if dev is still waiting on a response */
- response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
- pmsg_hdr = dev_info->pending_msg_hdr;
- goto out_respond;
- }
-
- if (need_response) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
- if (!pmsg_hdr) {
- response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto out_respond;
- }
-
- memcpy(pmsg_hdr, msg_hdr,
- sizeof(struct controlvm_message_header));
- dev_info->pending_msg_hdr = pmsg_hdr;
- }
-
- if (response >= 0) {
- switch (cmd) {
- case CONTROLVM_DEVICE_CREATE:
- chipset_device_create(dev_info);
- break;
- case CONTROLVM_DEVICE_CHANGESTATE:
- /* ServerReady / ServerRunning / SegmentStateRunning */
- if (state.alive == segment_state_running.alive &&
- state.operating ==
- segment_state_running.operating) {
- chipset_device_resume(dev_info);
- }
- /* ServerNotReady / ServerLost / SegmentStateStandby */
- else if (state.alive == segment_state_standby.alive &&
- state.operating ==
- segment_state_standby.operating) {
- /*
- * technically this is standby case
- * where server is lost
- */
- chipset_device_pause(dev_info);
- }
- break;
- case CONTROLVM_DEVICE_DESTROY:
- chipset_device_destroy(dev_info);
- break;
- }
- }
+ return -EINVAL;
-out_respond:
- device_responder(cmd, pmsg_hdr, response);
+ return controlvm_respond(pending_msg_hdr, response);
}
-static void
+static int
bus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->create_bus.bus_no;
- int rc = CONTROLVM_RESP_SUCCESS;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
+ int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (bus_info && (bus_info->state.created == 1)) {
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto out_bus_epilog;
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EEXIST;
+ goto err_respond;
}
+
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto out_bus_epilog;
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_respond;
}
INIT_LIST_HEAD(&bus_info->list_all);
bus_info->chipset_bus_no = bus_no;
bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
- POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
+
+ if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
+ save_crash_message(inmsg, CRASH_BUS);
+
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
+ GFP_KERNEL);
+ if (!pmsg_hdr) {
+ POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
+ bus_info->chipset_bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_free_bus_info;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ bus_info->pending_msg_hdr = pmsg_hdr;
+ }
visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
cmd->create_bus.channel_bytes,
@@ -846,89 +747,138 @@ bus_create(struct controlvm_message *inmsg)
cmd->create_bus.bus_data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- kfree(bus_info);
- bus_info = NULL;
- goto out_bus_epilog;
+ POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_free_pending_msg;
}
bus_info->visorchannel = visorchannel;
- if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
- save_crash_message(inmsg, CRASH_BUS);
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ /* Response will be handled by chipset_bus_create */
+ chipset_bus_create(bus_info);
+
+ POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
+ return 0;
+
+err_free_pending_msg:
+ kfree(bus_info->pending_msg_hdr);
+
+err_free_bus_info:
+ kfree(bus_info);
-out_bus_epilog:
- bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
- rc, inmsg->hdr.flags.response_expected == 1);
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return err;
}
-static void
+static int
bus_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->destroy_bus.bus_no;
struct visor_device *bus_info;
- int rc = CONTROLVM_RESP_SUCCESS;
+ int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (!bus_info)
- rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- else if (bus_info->state.created == 0)
- rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ if (!bus_info) {
+ err = -ENODEV;
+ goto err_respond;
+ }
+ if (bus_info->state.created == 0) {
+ err = -ENOENT;
+ goto err_respond;
+ }
+ if (bus_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ err = -EEXIST;
+ goto err_respond;
+ }
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
+ bus_info->chipset_bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -ENOMEM;
+ goto err_respond;
+ }
- bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
- rc, inmsg->hdr.flags.response_expected == 1);
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ bus_info->pending_msg_hdr = pmsg_hdr;
+ }
- /* bus_info is freed as part of the busdevice_release function */
+ /* Response will be handled by chipset_bus_destroy */
+ chipset_bus_destroy(bus_info);
+ return 0;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return err;
}
-static void
+static int
bus_configure(struct controlvm_message *inmsg,
struct parser_context *parser_ctx)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
u32 bus_no;
struct visor_device *bus_info;
- int rc = CONTROLVM_RESP_SUCCESS;
+ int err = 0;
bus_no = cmd->configure_bus.bus_no;
- POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
+ DIAG_SEVERITY_PRINT);
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EINVAL;
+ goto err_respond;
} else if (bus_info->state.created == 0) {
- POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EINVAL;
+ goto err_respond;
} else if (bus_info->pending_msg_hdr) {
- POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
- POSTCODE_SEVERITY_ERR);
- rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
- } else {
- visorchannel_set_clientpartition
- (bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
- bus_info->partition_uuid = parser_id_get(parser_ctx);
- parser_param_start(parser_ctx, PARSERSTRING_NAME);
- bus_info->name = parser_string_get(parser_ctx);
-
- POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+ DIAG_SEVERITY_ERR);
+ err = -EIO;
+ goto err_respond;
}
- bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
- rc, inmsg->hdr.flags.response_expected == 1);
+
+ err = visorchannel_set_clientpartition
+ (bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
+ if (err)
+ goto err_respond;
+
+ bus_info->partition_uuid = parser_id_get(parser_ctx);
+ parser_param_start(parser_ctx, PARSERSTRING_NAME);
+ bus_info->name = parser_string_get(parser_ctx);
+
+ POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
+ DIAG_SEVERITY_PRINT);
+
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return 0;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ return err;
}
static void
my_device_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->create_device.bus_no;
u32 dev_no = cmd->create_device.dev_no;
struct visor_device *dev_info = NULL;
@@ -938,31 +888,31 @@ my_device_create(struct controlvm_message *inmsg)
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto out_respond;
}
if (bus_info->state.created == 0) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto out_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (dev_info && (dev_info->state.created == 1)) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
goto out_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
goto out_respond;
}
@@ -974,8 +924,8 @@ my_device_create(struct controlvm_message *inmsg)
/* not sure where the best place to set the 'parent' */
dev_info->device.parent = &bus_info->device;
- POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
visorchannel =
visorchannel_create_with_lock(cmd->create_device.channel_addr,
@@ -984,12 +934,10 @@ my_device_create(struct controlvm_message *inmsg)
cmd->create_device.data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- kfree(dev_info);
- dev_info = NULL;
- goto out_respond;
+ goto out_free_dev_info;
}
dev_info->visorchannel = visorchannel;
dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -997,18 +945,36 @@ my_device_create(struct controlvm_message *inmsg)
spar_vhba_channel_protocol_uuid) == 0)
save_crash_message(inmsg, CRASH_DEV);
- POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_INFO);
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto out_free_dev_info;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ dev_info->pending_msg_hdr = pmsg_hdr;
+ }
+ /* Chipset_device_create will send response */
+ chipset_device_create(dev_info);
+ POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
+ DIAG_SEVERITY_PRINT);
+ return;
+
+out_free_dev_info:
+ kfree(dev_info);
+
out_respond:
- device_epilog(dev_info, segment_state_running,
- CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1, 1);
+ if (inmsg->hdr.flags.response_expected == 1)
+ device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
}
static void
my_device_changestate(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->device_change_state.bus_no;
u32 dev_no = cmd->device_change_state.dev_no;
struct spar_segment_state state = cmd->device_change_state.state;
@@ -1017,39 +983,97 @@ my_device_changestate(struct controlvm_message *inmsg)
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info) {
- POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
- } else if (dev_info->state.created == 0) {
- POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- POSTCODE_SEVERITY_ERR);
+ goto err_respond;
+ }
+ if (dev_info->state.created == 0) {
+ POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+ DIAG_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ goto err_respond;
+ }
+ if (dev_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ goto err_respond;
}
- if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
- device_epilog(dev_info, state,
- CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1, 1);
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto err_respond;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ dev_info->pending_msg_hdr = pmsg_hdr;
+ }
+
+ if (state.alive == segment_state_running.alive &&
+ state.operating == segment_state_running.operating)
+ /* Response will be sent from chipset_device_resume */
+ chipset_device_resume(dev_info);
+ /* ServerNotReady / ServerLost / SegmentStateStandby */
+ else if (state.alive == segment_state_standby.alive &&
+ state.operating == segment_state_standby.operating)
+ /*
+ * technically this is standby case where server is lost.
+ * Response will be sent from chipset_device_pause.
+ */
+ chipset_device_pause(dev_info);
+
+ return;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
}
static void
my_device_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
+ struct controlvm_message_header *pmsg_hdr = NULL;
u32 bus_no = cmd->destroy_device.bus_no;
u32 dev_no = cmd->destroy_device.dev_no;
struct visor_device *dev_info;
int rc = CONTROLVM_RESP_SUCCESS;
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (!dev_info)
+ if (!dev_info) {
rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
- else if (dev_info->state.created == 0)
+ goto err_respond;
+ }
+ if (dev_info->state.created == 0) {
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto err_respond;
+ }
- if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
- device_epilog(dev_info, segment_state_running,
- CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
- inmsg->hdr.flags.response_expected == 1, 1);
+ if (dev_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ goto err_respond;
+ }
+ if (inmsg->hdr.flags.response_expected == 1) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto err_respond;
+ }
+
+ memcpy(pmsg_hdr, &inmsg->hdr,
+ sizeof(struct controlvm_message_header));
+ dev_info->pending_msg_hdr = pmsg_hdr;
+ }
+
+ chipset_device_destroy(dev_info);
+ return;
+
+err_respond:
+ if (inmsg->hdr.flags.response_expected == 1)
+ device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
}
/**
@@ -1075,7 +1099,6 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
if (!info)
return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- memset(info, 0, sizeof(struct visor_controlvm_payload_info));
if ((offset == 0) || (bytes == 0))
return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
@@ -1083,6 +1106,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
if (!payload)
return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ memset(info, 0, sizeof(struct visor_controlvm_payload_info));
info->offset = offset;
info->bytes = bytes;
info->ptr = payload;
@@ -1111,16 +1135,16 @@ initialize_controlvm_payload(void)
offsetof(struct spar_controlvm_channel_protocol,
request_payload_offset),
&payload_offset, sizeof(payload_offset)) < 0) {
- POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
if (visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
request_payload_bytes),
&payload_bytes, sizeof(payload_bytes)) < 0) {
- POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
initialize_controlvm_payload_info(phys_addr,
@@ -1317,7 +1341,7 @@ static struct attribute *visorchipset_install_attrs[] = {
NULL
};
-static struct attribute_group visorchipset_install_group = {
+static const struct attribute_group visorchipset_install_group = {
.name = "install",
.attrs = visorchipset_install_attrs
};
@@ -1540,7 +1564,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
- POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
/* send init chipset msg */
msg.hdr.id = CONTROLVM_CHIPSET_INIT;
@@ -1554,15 +1578,15 @@ setup_crash_devices_work_queue(struct work_struct *work)
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
- local_crash_msg_count,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
+ local_crash_msg_count,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1571,8 +1595,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1581,8 +1605,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
local_crash_msg_offset,
&local_crash_bus_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1592,8 +1616,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
sizeof(struct controlvm_message),
&local_crash_dev_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1601,8 +1625,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
bus_create(&local_crash_bus_msg);
} else {
- POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
@@ -1610,11 +1634,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
if (local_crash_dev_msg.cmd.create_device.channel_addr) {
my_device_create(&local_crash_dev_msg);
} else {
- POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
return;
}
- POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
}
void
@@ -2119,8 +2143,6 @@ visorchipset_init(struct acpi_device *acpi_device)
if (!addr)
goto error;
- memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
-
controlvm_channel = visorchannel_create_with_lock(addr, 0,
GFP_KERNEL, uuid);
if (!controlvm_channel)
@@ -2152,11 +2174,12 @@ visorchipset_init(struct acpi_device *acpi_device)
visorchipset_platform_device.dev.devt = major_dev;
if (platform_device_register(&visorchipset_platform_device) < 0) {
- POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
+ POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
+ DIAG_SEVERITY_ERR);
err = -ENODEV;
goto error_cancel_work;
}
- POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
err = visorbus_init();
if (err < 0)
@@ -2178,14 +2201,14 @@ error_destroy_channel:
visorchannel_destroy(controlvm_channel);
error:
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
return err;
}
static int
visorchipset_exit(struct acpi_device *acpi_device)
{
- POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
visorbus_exit();
@@ -2196,7 +2219,7 @@ visorchipset_exit(struct acpi_device *acpi_device)
visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
platform_device_unregister(&visorchipset_platform_device);
- POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
return 0;
}
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index 86e695d5a441..674a88b657d3 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -92,15 +92,6 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
#define ISSUE_IO_VMCALL(method, param, result) \
(result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \
(param) >> 32))
-#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, param3) \
- unisys_extended_vmcall(method, param1, param2, param3)
-
- /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
- * not used much
- */
-#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \
- ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \
- MDS_APPOS, postcode)
/* Structures for IO VMCALLs */
@@ -117,118 +108,53 @@ struct vmcall_io_controlvm_addr_params {
/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
enum driver_pc { /* POSTCODE driver identifier tuples */
- /* visorchipset driver files */
- VISOR_CHIPSET_PC = 0xA0,
- VISOR_CHIPSET_PC_controlvm_c = 0xA1,
- VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
- VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
- VISOR_CHIPSET_PC_file_c = 0xA4,
- VISOR_CHIPSET_PC_parser_c = 0xA5,
- VISOR_CHIPSET_PC_testing_c = 0xA6,
- VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
- VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
/* visorbus driver files */
- VISOR_BUS_PC = 0xB0,
- VISOR_BUS_PC_businst_attr_c = 0xB1,
- VISOR_BUS_PC_channel_attr_c = 0xB2,
- VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
- VISOR_BUS_PC_visorbus_main_c = 0xB4,
- /* visorclientbus driver files */
- VISOR_CLIENT_BUS_PC = 0xC0,
- VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
- /* virt hba driver files */
- VIRT_HBA_PC = 0xC2,
- VIRT_HBA_PC_virthba_c = 0xC3,
- /* virtpci driver files */
- VIRT_PCI_PC = 0xC4,
- VIRT_PCI_PC_virtpci_c = 0xC5,
- /* virtnic driver files */
- VIRT_NIC_PC = 0xC6,
- VIRT_NIC_P_virtnic_c = 0xC7,
- /* uislib driver files */
- UISLIB_PC = 0xD0,
- UISLIB_PC_uislib_c = 0xD1,
- UISLIB_PC_uisqueue_c = 0xD2,
- /* 0xD3 RESERVED */
- UISLIB_PC_uisutils_c = 0xD4,
+ VISOR_BUS_PC = 0xF0,
+ VISOR_BUS_PC_visorbus_main_c = 0xFF,
+ VISOR_BUS_PC_visorchipset_c = 0xFE,
};
enum event_pc { /* POSTCODE event identifier tuples */
- ATTACH_PORT_ENTRY_PC = 0x001,
- ATTACH_PORT_FAILURE_PC = 0x002,
- ATTACH_PORT_SUCCESS_PC = 0x003,
- BUS_FAILURE_PC = 0x004,
- BUS_CREATE_ENTRY_PC = 0x005,
- BUS_CREATE_FAILURE_PC = 0x006,
- BUS_CREATE_EXIT_PC = 0x007,
- BUS_CONFIGURE_ENTRY_PC = 0x008,
- BUS_CONFIGURE_FAILURE_PC = 0x009,
- BUS_CONFIGURE_EXIT_PC = 0x00A,
- CHIPSET_INIT_ENTRY_PC = 0x00B,
- CHIPSET_INIT_SUCCESS_PC = 0x00C,
- CHIPSET_INIT_FAILURE_PC = 0x00D,
- CHIPSET_INIT_EXIT_PC = 0x00E,
- CREATE_WORKQUEUE_PC = 0x00F,
- CREATE_WORKQUEUE_FAILED_PC = 0x0A0,
- CONTROLVM_INIT_FAILURE_PC = 0x0A1,
- DEVICE_CREATE_ENTRY_PC = 0x0A2,
- DEVICE_CREATE_FAILURE_PC = 0x0A3,
- DEVICE_CREATE_SUCCESS_PC = 0x0A4,
- DEVICE_CREATE_EXIT_PC = 0x0A5,
- DEVICE_ADD_PC = 0x0A6,
- DEVICE_REGISTER_FAILURE_PC = 0x0A7,
- DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8,
- DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9,
- DEVICE_CHANGESTATE_EXIT_PC = 0x0AA,
- DRIVER_ENTRY_PC = 0x0AB,
- DRIVER_EXIT_PC = 0x0AC,
- MALLOC_FAILURE_PC = 0x0AD,
- QUEUE_DELAYED_WORK_PC = 0x0AE,
- /* 0x0B7 RESERVED */
- VBUS_CHANNEL_ENTRY_PC = 0x0B8,
- VBUS_CHANNEL_FAILURE_PC = 0x0B9,
- VBUS_CHANNEL_EXIT_PC = 0x0BA,
- VHBA_CREATE_ENTRY_PC = 0x0BB,
- VHBA_CREATE_FAILURE_PC = 0x0BC,
- VHBA_CREATE_EXIT_PC = 0x0BD,
- VHBA_CREATE_SUCCESS_PC = 0x0BE,
- VHBA_COMMAND_HANDLER_PC = 0x0BF,
- VHBA_PROBE_ENTRY_PC = 0x0C0,
- VHBA_PROBE_FAILURE_PC = 0x0C1,
- VHBA_PROBE_EXIT_PC = 0x0C2,
- VNIC_CREATE_ENTRY_PC = 0x0C3,
- VNIC_CREATE_FAILURE_PC = 0x0C4,
- VNIC_CREATE_SUCCESS_PC = 0x0C5,
- VNIC_PROBE_ENTRY_PC = 0x0C6,
- VNIC_PROBE_FAILURE_PC = 0x0C7,
- VNIC_PROBE_EXIT_PC = 0x0C8,
- VPCI_CREATE_ENTRY_PC = 0x0C9,
- VPCI_CREATE_FAILURE_PC = 0x0CA,
- VPCI_CREATE_EXIT_PC = 0x0CB,
- VPCI_PROBE_ENTRY_PC = 0x0CC,
- VPCI_PROBE_FAILURE_PC = 0x0CD,
- VPCI_PROBE_EXIT_PC = 0x0CE,
- CRASH_DEV_ENTRY_PC = 0x0CF,
- CRASH_DEV_EXIT_PC = 0x0D0,
- CRASH_DEV_HADDR_NULL = 0x0D1,
- CRASH_DEV_CONTROLVM_NULL = 0x0D2,
- CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
- CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
- CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
- CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
- CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
- CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
- SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
- SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
- CALLHOME_INIT_FAILURE_PC = 0x0DB
+ BUS_CREATE_ENTRY_PC = 0x001,
+ BUS_CREATE_FAILURE_PC = 0x002,
+ BUS_CREATE_EXIT_PC = 0x003,
+ BUS_CONFIGURE_ENTRY_PC = 0x004,
+ BUS_CONFIGURE_FAILURE_PC = 0x005,
+ BUS_CONFIGURE_EXIT_PC = 0x006,
+ CHIPSET_INIT_ENTRY_PC = 0x007,
+ CHIPSET_INIT_SUCCESS_PC = 0x008,
+ CHIPSET_INIT_FAILURE_PC = 0x009,
+ CHIPSET_INIT_EXIT_PC = 0x00A,
+ CONTROLVM_INIT_FAILURE_PC = 0x00B,
+ DEVICE_CREATE_ENTRY_PC = 0x00C,
+ DEVICE_CREATE_FAILURE_PC = 0x00D,
+ DEVICE_CREATE_SUCCESS_PC = 0x00E,
+ DEVICE_CREATE_EXIT_PC = 0x00F,
+ DEVICE_ADD_PC = 0x010,
+ DEVICE_REGISTER_FAILURE_PC = 0x011,
+ DEVICE_CHANGESTATE_FAILURE_PC = 0x012,
+ DRIVER_ENTRY_PC = 0x013,
+ DRIVER_EXIT_PC = 0x014,
+ MALLOC_FAILURE_PC = 0x015,
+ CRASH_DEV_ENTRY_PC = 0x016,
+ CRASH_DEV_EXIT_PC = 0x017,
+ CRASH_DEV_RD_BUS_FAILURE_PC = 0x018,
+ CRASH_DEV_RD_DEV_FAILURE_PC = 0x019,
+ CRASH_DEV_BUS_NULL_FAILURE_PC = 0x01A,
+ CRASH_DEV_DEV_NULL_FAILURE_PC = 0x01B,
+ CRASH_DEV_CTRL_RD_FAILURE_PC = 0x01C,
+ CRASH_DEV_COUNT_FAILURE_PC = 0x01D,
+ SAVE_MSG_BUS_FAILURE_PC = 0x01E,
+ SAVE_MSG_DEV_FAILURE_PC = 0x01F,
};
-#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
-#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
-/* TODO-> Info currently doesn't show, so we set info=warning */
-#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT
-
-/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
+/* Write a 64-bit value to the hypervisor's log file
+ * POSTCODE_LINUX generates a value in the form 0xAABBBCCCDDDDEEEE where
+ * A is an identifier for the file logging the postcode
+ * B is an identifier for the event logging the postcode
+ * C is the line logging the postcode
+ * D is additional information the caller wants to log
+ * E is additional information the caller wants to log
* Please also note that the resulting postcode is in hex, so if you are
* searching for the __LINE__ number, convert it first to decimal. The line
* number combined with driver and type of call, will allow you to track down
@@ -236,35 +162,16 @@ enum event_pc { /* POSTCODE event identifier tuples */
* entered/exited from.
*/
-/* BASE FUNCTIONS */
-#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \
-do { \
- unsigned long long post_code_temp; \
- post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
- ((((u64)__LINE__) & 0xFFF) << 32) | \
- (((u64)pc32bit) & 0xFFFFFFFF); \
- ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
-} while (0)
-
-#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
+#define POSTCODE_LINUX(EVENT_PC, pc16bit1, pc16bit2, severity) \
do { \
unsigned long long post_code_temp; \
- post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
+ post_code_temp = (((u64)CURRENT_FILE_PC) << 56) | \
+ (((u64)EVENT_PC) << 44) | \
((((u64)__LINE__) & 0xFFF) << 32) | \
((((u64)pc16bit1) & 0xFFFF) << 16) | \
(((u64)pc16bit2) & 0xFFFF); \
- ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+ unisys_extended_vmcall(VMCALL_POST_CODE_LOGEVENT, severity, \
+ MDS_APPOS, post_code_temp); \
} while (0)
-/* MOST COMMON */
-#define POSTCODE_LINUX_2(EVENT_PC, severity) \
- POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity)
-
-#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \
- POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity)
-
-#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \
- POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \
- pc16bit2, severity)
-
#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 6f94b646f7c5..949cce680b29 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -409,6 +409,9 @@ devdata_create(struct visor_device *dev, enum visorinput_device_type devtype)
if (!devdata->visorinput_dev)
goto cleanups_register;
break;
+ default:
+ /* No other input devices supported */
+ break;
}
dev_set_drvdata(&dev->device, devdata);
@@ -653,6 +656,9 @@ visorinput_channel_interrupt(struct visor_device *dev)
input_report_rel(visorinput_dev, REL_WHEEL, -1);
input_sync(visorinput_dev);
break;
+ default:
+ /* Unsupported input action */
+ break;
}
}
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 136700756485..c1f674f5268c 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -791,7 +791,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
* pointing to
*/
firstfraglen = skb->len - skb->data_len;
- if (firstfraglen < ETH_HEADER_SIZE) {
+ if (firstfraglen < ETH_HLEN) {
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
dev_err(&netdev->dev,
@@ -864,7 +864,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
/* copy ethernet header from first frag into ocmdrsp
* - everything else will be pass in frags & DMA'ed
*/
- memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HEADER_SIZE);
+ memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
/* copy frags info - from skb->data we need to only provide access
* beyond eth header
*/
@@ -1371,7 +1371,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
" num_rcv_bufs = %d\n",
devdata->num_rcv_bufs);
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " max_oustanding_next_xmits = %lu\n",
+ " max_outstanding_next_xmits = %lu\n",
devdata->max_outstanding_net_xmits);
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
" upper_threshold_net_xmits = %lu\n",
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 9676fb29075a..e61e4ca064a8 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,9 +1,10 @@
-config BCM2708_VCHIQ
+config BCM2835_VCHIQ
tristate "Videocore VCHIQ"
- depends on RASPBERRYPI_FIRMWARE && BROKEN
+ depends on HAS_DMA
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
default y
help
Kernel to VideoCore communication interface for the
- BCM2708 family of products.
+ BCM2835 family of products.
Defaults to Y when the Broadcom Videocore services
are included in the build, N otherwise.
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 90ab4781df2c..1a9e742ee40d 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
+obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o
vchiq-objs := \
interface/vchiq_arm/vchiq_core.o \
diff --git a/drivers/staging/vc04_services/interface/vchi/TODO b/drivers/staging/vc04_services/interface/vchi/TODO
new file mode 100644
index 000000000000..03aa65183b25
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/TODO
@@ -0,0 +1,50 @@
+1) Port to aarch64
+
+This driver won't be very useful unless we also have it working on
+Raspberry Pi 3. This requires, at least:
+
+ - Figure out an alternative to the dmac_map_area() hack.
+
+ - Decide what to use instead of dsb().
+
+ - Do something about (int) cast of bulk->data in
+ vchiq_bulk_transfer().
+
+ bulk->data is a bus address going across to the firmware. We know
+ our bus addresses are <32bit.
+
+2) Write a DT binding doc and get the corresponding DT node merged to
+ bcm2835.
+
+This will let the driver probe when enabled.
+
+3) Import drivers using VCHI.
+
+VCHI is just a tool to let drivers talk to the firmware. Here are
+some of the ones we want:
+
+ - vc_mem (https://github.com/raspberrypi/linux/blob/rpi-4.4.y/drivers/char/broadcom/vc_mem.c)
+
+ This driver is what the vcdbg userspace program uses to set up its
+ requests to the firmware, which are transmitted across VCHIQ. vcdbg
+ is really useful for debugging firmware interactions.
+
+ - bcm2835-camera (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/media/platform/bcm2835)
+
+ This driver will let us get images from the camera using the MMAL
+ protocol over VCHI.
+
+ - VCSM (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/char/broadcom/vc_sm)
+
+ This driver is used for talking about regions of VC memory across
+ firmware protocols including VCHI. We'll want to extend this driver
+ to manage these buffers as dmabufs so that we can zero-copy import
+ camera images into vc4 for rendering/display.
+
+4) Garbage-collect unused code
+
+One of the reasons this driver wasn't upstreamed previously was that
+there's a lot code that got built that's probably unnecessary these
+days. Once we have the set of VCHI-using drivers we want in tree, we
+should be able to do a sweep of the code to see what's left that's
+unused.
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 1b17e98f7379..d6937288210c 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -226,25 +226,12 @@ extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle,
int value);
// Routine to send a message across a service
-extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
- const void *data,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *msg_handle );
-
-// scatter-gather (vector) and send message
-int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
- VCHI_MSG_VECTOR_EX_T *vector,
- uint32_t count,
- VCHI_FLAGS_T flags,
- void *msg_handle );
-
-// legacy scatter-gather (vector) and send message, only handles pointers
-int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
- VCHI_MSG_VECTOR_T *vector,
- uint32_t count,
- VCHI_FLAGS_T flags,
- void *msg_handle );
+extern int32_t
+ vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ uint32_t data_size);
// Routine to receive a msg from a service
// Dequeue is equivalent to hold, copy into client buffer, release
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
index ad398bae6ee4..21adf89a9065 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
@@ -37,4 +37,15 @@
#include "vchiq_if.h"
#include "vchiq_util.h"
+/* Do this so that we can test-build the code on non-rpi systems */
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
+
+#else
+
+#ifndef dsb
+#define dsb(a)
+#endif
+
+#endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
+
#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 1091b9f1dd07..2b500d85cebc 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -45,16 +45,8 @@
#include <asm/pgtable.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
-
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
-
#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
-#define VCHIQ_ARM_ADDRESS(x) ((void *)((char *)x + g_virt_to_bus_offset))
-
#include "vchiq_arm.h"
#include "vchiq_2835.h"
#include "vchiq_connected.h"
@@ -70,13 +62,25 @@ typedef struct vchiq_2835_state_struct {
VCHIQ_ARM_STATE_T arm_state;
} VCHIQ_2835_ARM_STATE_T;
+struct vchiq_pagelist_info {
+ PAGELIST_T *pagelist;
+ size_t pagelist_buffer_size;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ unsigned int num_pages;
+ unsigned int pages_need_release;
+ struct page **pages;
+ struct scatterlist *scatterlist;
+ unsigned int scatterlist_mapped;
+};
+
static void __iomem *g_regs;
static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
static unsigned int g_fragments_size;
static char *g_fragments_base;
static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
-static unsigned long g_virt_to_bus_offset;
+static struct device *g_dev;
extern int vchiq_arm_log_level;
@@ -85,12 +89,13 @@ static DEFINE_SEMAPHORE(g_free_fragments_mutex);
static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id);
-static int
+static struct vchiq_pagelist_info *
create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task, PAGELIST_T ** ppagelist);
+ struct task_struct *task);
static void
-free_pagelist(PAGELIST_T *pagelist, int actual);
+free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+ int actual);
int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
{
@@ -104,7 +109,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
int slot_mem_size, frag_mem_size;
int err, irq, i;
- g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
+ /*
+ * VCHI messages between the CPU and firmware use
+ * 32-bit bus addresses.
+ */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ if (err < 0)
+ return err;
(void)of_property_read_u32(dev->of_node, "cache-line-size",
&g_cache_line_size);
@@ -121,7 +133,7 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
return -ENOMEM;
}
- WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);
+ WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
if (!vchiq_slot_zero)
@@ -173,9 +185,10 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
return err ? : -ENXIO;
}
+ g_dev = dev;
vchiq_log_info(vchiq_arm_log_level,
- "vchiq_init - done (slots %x, phys %pad)",
- (unsigned int)vchiq_slot_zero, &slot_phys);
+ "vchiq_init - done (slots %pK, phys %pad)",
+ vchiq_slot_zero, &slot_phys);
vchiq_call_connected_callbacks();
@@ -213,47 +226,37 @@ remote_event_signal(REMOTE_EVENT_T *event)
event->fired = 1;
- dsb(); /* data barrier operation */
+ dsb(sy); /* data barrier operation */
if (event->armed)
writel(0, g_regs + BELL2); /* trigger vc interrupt */
}
-int
-vchiq_copy_from_user(void *dst, const void *src, int size)
-{
- if ((uint32_t)src < TASK_SIZE) {
- return copy_from_user(dst, src, size);
- } else {
- memcpy(dst, src, size);
- return 0;
- }
-}
-
VCHIQ_STATUS_T
vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
void *offset, int size, int dir)
{
- PAGELIST_T *pagelist;
- int ret;
+ struct vchiq_pagelist_info *pagelistinfo;
WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
- ret = create_pagelist((char __user *)offset, size,
- (dir == VCHIQ_BULK_RECEIVE)
- ? PAGELIST_READ
- : PAGELIST_WRITE,
- current,
- &pagelist);
- if (ret != 0)
+ pagelistinfo = create_pagelist((char __user *)offset, size,
+ (dir == VCHIQ_BULK_RECEIVE)
+ ? PAGELIST_READ
+ : PAGELIST_WRITE,
+ current);
+
+ if (!pagelistinfo)
return VCHIQ_ERROR;
bulk->handle = memhandle;
- bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
+ bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
- /* Store the pagelist address in remote_data, which isn't used by the
- slave. */
- bulk->remote_data = pagelist;
+ /*
+ * Store the pagelistinfo address in remote_data,
+ * which isn't used by the slave.
+ */
+ bulk->remote_data = pagelistinfo;
return VCHIQ_SUCCESS;
}
@@ -262,7 +265,8 @@ void
vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
{
if (bulk && bulk->remote_data && bulk->actual)
- free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
+ free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
+ bulk->actual);
}
void
@@ -350,57 +354,93 @@ vchiq_doorbell_irq(int irq, void *dev_id)
return ret;
}
+static void
+cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
+{
+ if (pagelistinfo->scatterlist_mapped) {
+ dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ pagelistinfo->num_pages, pagelistinfo->dma_dir);
+ }
+
+ if (pagelistinfo->pages_need_release) {
+ unsigned int i;
+
+ for (i = 0; i < pagelistinfo->num_pages; i++)
+ put_page(pagelistinfo->pages[i]);
+ }
+
+ dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
+ pagelistinfo->pagelist, pagelistinfo->dma_addr);
+}
+
/* There is a potential problem with partial cache lines (pages?)
** at the ends of the block when reading. If the CPU accessed anything in
** the same line (page?) then it may have pulled old data into the cache,
** obscuring the new data underneath. We can solve this by transferring the
** partial cache lines separately, and allowing the ARM to copy into the
** cached area.
-
-** N.B. This implementation plays slightly fast and loose with the Linux
-** driver programming rules, e.g. its use of dmac_map_area instead of
-** dma_map_single, but it isn't a multi-platform driver and it benefits
-** from increased speed as a result.
*/
-static int
+static struct vchiq_pagelist_info *
create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task, PAGELIST_T ** ppagelist)
+ struct task_struct *task)
{
PAGELIST_T *pagelist;
+ struct vchiq_pagelist_info *pagelistinfo;
struct page **pages;
- unsigned long *addrs;
- unsigned int num_pages, offset, i;
- char *addr, *base_addr, *next_addr;
- int run, addridx, actual_pages;
- unsigned long *need_release;
-
- offset = (unsigned int)buf & (PAGE_SIZE - 1);
+ u32 *addrs;
+ unsigned int num_pages, offset, i, k;
+ int actual_pages;
+ size_t pagelist_size;
+ struct scatterlist *scatterlist, *sg;
+ int dma_buffers;
+ dma_addr_t dma_addr;
+
+ offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
- *ppagelist = NULL;
+ pagelist_size = sizeof(PAGELIST_T) +
+ (num_pages * sizeof(u32)) +
+ (num_pages * sizeof(pages[0]) +
+ (num_pages * sizeof(struct scatterlist))) +
+ sizeof(struct vchiq_pagelist_info);
/* Allocate enough storage to hold the page pointers and the page
** list
*/
- pagelist = kmalloc(sizeof(PAGELIST_T) +
- (num_pages * sizeof(unsigned long)) +
- sizeof(unsigned long) +
- (num_pages * sizeof(pages[0])),
- GFP_KERNEL);
-
- vchiq_log_trace(vchiq_arm_log_level,
- "create_pagelist - %x", (unsigned int)pagelist);
+ pagelist = dma_zalloc_coherent(g_dev,
+ pagelist_size,
+ &dma_addr,
+ GFP_KERNEL);
+
+ vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
+ pagelist);
if (!pagelist)
- return -ENOMEM;
+ return NULL;
- addrs = pagelist->addrs;
- need_release = (unsigned long *)(addrs + num_pages);
- pages = (struct page **)(addrs + num_pages + 1);
+ addrs = pagelist->addrs;
+ pages = (struct page **)(addrs + num_pages);
+ scatterlist = (struct scatterlist *)(pages + num_pages);
+ pagelistinfo = (struct vchiq_pagelist_info *)
+ (scatterlist + num_pages);
+
+ pagelist->length = count;
+ pagelist->type = type;
+ pagelist->offset = offset;
+
+ /* Populate the fields of the pagelistinfo structure */
+ pagelistinfo->pagelist = pagelist;
+ pagelistinfo->pagelist_buffer_size = pagelist_size;
+ pagelistinfo->dma_addr = dma_addr;
+ pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ pagelistinfo->num_pages = num_pages;
+ pagelistinfo->pages_need_release = 0;
+ pagelistinfo->pages = pages;
+ pagelistinfo->scatterlist = scatterlist;
+ pagelistinfo->scatterlist_mapped = 0;
if (is_vmalloc_addr(buf)) {
- int dir = (type == PAGELIST_WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
unsigned long length = count;
unsigned int off = offset;
@@ -413,14 +453,13 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
if (bytes > length)
bytes = length;
pages[actual_pages] = pg;
- dmac_map_area(page_address(pg) + off, bytes, dir);
length -= bytes;
off = 0;
}
- *need_release = 0; /* do not try and release vmalloc pages */
+ /* do not try and release vmalloc pages */
} else {
down_read(&task->mm->mmap_sem);
- actual_pages = get_user_pages(task, task->mm,
+ actual_pages = get_user_pages(
(unsigned long)buf & ~(PAGE_SIZE - 1),
num_pages,
(type == PAGELIST_READ) ? FOLL_WRITE : 0,
@@ -438,44 +477,59 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
while (actual_pages > 0)
{
actual_pages--;
- page_cache_release(pages[actual_pages]);
+ put_page(pages[actual_pages]);
}
- kfree(pagelist);
- if (actual_pages == 0)
- actual_pages = -ENOMEM;
- return actual_pages;
+ cleaup_pagelistinfo(pagelistinfo);
+ return NULL;
}
- *need_release = 1; /* release user pages */
+ /* release user pages */
+ pagelistinfo->pages_need_release = 1;
}
- pagelist->length = count;
- pagelist->type = type;
- pagelist->offset = offset;
-
- /* Group the pages into runs of contiguous pages */
-
- base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
- next_addr = base_addr + PAGE_SIZE;
- addridx = 0;
- run = 0;
+ /*
+ * Initialize the scatterlist so that the magic cookie
+ * is filled if debugging is enabled
+ */
+ sg_init_table(scatterlist, num_pages);
+ /* Now set the pages for each scatterlist */
+ for (i = 0; i < num_pages; i++)
+ sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0);
+
+ dma_buffers = dma_map_sg(g_dev,
+ scatterlist,
+ num_pages,
+ pagelistinfo->dma_dir);
+
+ if (dma_buffers == 0) {
+ cleaup_pagelistinfo(pagelistinfo);
+ return NULL;
+ }
- for (i = 1; i < num_pages; i++) {
- addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
- if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
- next_addr += PAGE_SIZE;
- run++;
+ pagelistinfo->scatterlist_mapped = 1;
+
+ /* Combine adjacent blocks for performance */
+ k = 0;
+ for_each_sg(scatterlist, sg, dma_buffers, i) {
+ u32 len = sg_dma_len(sg);
+ u32 addr = sg_dma_address(sg);
+
+ /* Note: addrs is the address + page_count - 1
+ * The firmware expects the block to be page
+ * aligned and a multiple of the page size
+ */
+ WARN_ON(len == 0);
+ WARN_ON(len & ~PAGE_MASK);
+ WARN_ON(addr & ~PAGE_MASK);
+ if (k > 0 &&
+ ((addrs[k - 1] & PAGE_MASK) |
+ ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)
+ == addr) {
+ addrs[k - 1] += (len >> PAGE_SHIFT);
} else {
- addrs[addridx] = (unsigned long)base_addr + run;
- addridx++;
- base_addr = addr;
- next_addr = addr + PAGE_SIZE;
- run = 0;
+ addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1);
}
}
- addrs[addridx] = (unsigned long)base_addr + run;
- addridx++;
-
/* Partial cache lines (fragments) require special measures */
if ((type == PAGELIST_READ) &&
((pagelist->offset & (g_cache_line_size - 1)) ||
@@ -484,8 +538,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
char *fragments;
if (down_interruptible(&g_free_fragments_sema) != 0) {
- kfree(pagelist);
- return -EINTR;
+ cleaup_pagelistinfo(pagelistinfo);
+ return NULL;
}
WARN_ON(g_free_fragments == NULL);
@@ -499,29 +553,28 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
(fragments - g_fragments_base) / g_fragments_size;
}
- dmac_flush_range(pagelist, addrs + num_pages);
-
- *ppagelist = pagelist;
-
- return 0;
+ return pagelistinfo;
}
static void
-free_pagelist(PAGELIST_T *pagelist, int actual)
+free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+ int actual)
{
- unsigned long *need_release;
- struct page **pages;
- unsigned int num_pages, i;
-
- vchiq_log_trace(vchiq_arm_log_level,
- "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
+ unsigned int i;
+ PAGELIST_T *pagelist = pagelistinfo->pagelist;
+ struct page **pages = pagelistinfo->pages;
+ unsigned int num_pages = pagelistinfo->num_pages;
- num_pages =
- (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
- PAGE_SIZE;
+ vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
+ pagelistinfo->pagelist, actual);
- need_release = (unsigned long *)(pagelist->addrs + num_pages);
- pages = (struct page **)(pagelist->addrs + num_pages + 1);
+ /*
+ * NOTE: dma_unmap_sg must be called before the
+ * cpu can touch any of the data/pages.
+ */
+ dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+ pagelistinfo->num_pages, pagelistinfo->dma_dir);
+ pagelistinfo->scatterlist_mapped = 0;
/* Deal with any partial cache lines (fragments) */
if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
@@ -559,27 +612,12 @@ free_pagelist(PAGELIST_T *pagelist, int actual)
up(&g_free_fragments_sema);
}
- if (*need_release) {
- unsigned int length = pagelist->length;
- unsigned int offset = pagelist->offset;
-
- for (i = 0; i < num_pages; i++) {
- struct page *pg = pages[i];
-
- if (pagelist->type != PAGELIST_WRITE) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (bytes > length)
- bytes = length;
- dmac_unmap_area(page_address(pg) + offset,
- bytes, DMA_FROM_DEVICE);
- length -= bytes;
- offset = 0;
- set_page_dirty(pg);
- }
- page_cache_release(pg);
- }
+ /* Need to mark all the pages dirty. */
+ if (pagelist->type != PAGELIST_WRITE &&
+ pagelistinfo->pages_need_release) {
+ for (i = 0; i < num_pages; i++)
+ set_page_dirty(pages[i]);
}
- kfree(pagelist);
+ cleaup_pagelistinfo(pagelistinfo);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 7b6cd4d80621..0d987898b4f8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -190,8 +190,8 @@ static const char *const ioctl_names[] = {
"CLOSE_DELIVERED"
};
-vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
- (VCHIQ_IOC_MAX + 1));
+vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
+ (VCHIQ_IOC_MAX + 1));
static void
dump_phys_mem(void *virt_addr, uint32_t num_bytes);
@@ -402,6 +402,107 @@ static void close_delivered(USER_SERVICE_T *user_service)
}
}
+struct vchiq_io_copy_callback_context {
+ VCHIQ_ELEMENT_T *current_element;
+ size_t current_element_offset;
+ unsigned long elements_to_go;
+ size_t current_offset;
+};
+
+static ssize_t
+vchiq_ioc_copy_element_data(
+ void *context,
+ void *dest,
+ size_t offset,
+ size_t maxsize)
+{
+ long res;
+ size_t bytes_this_round;
+ struct vchiq_io_copy_callback_context *copy_context =
+ (struct vchiq_io_copy_callback_context *)context;
+
+ if (offset != copy_context->current_offset)
+ return 0;
+
+ if (!copy_context->elements_to_go)
+ return 0;
+
+ /*
+ * Complex logic here to handle the case of 0 size elements
+ * in the middle of the array of elements.
+ *
+ * Need to skip over these 0 size elements.
+ */
+ while (1) {
+ bytes_this_round = min(copy_context->current_element->size -
+ copy_context->current_element_offset,
+ maxsize);
+
+ if (bytes_this_round)
+ break;
+
+ copy_context->elements_to_go--;
+ copy_context->current_element++;
+ copy_context->current_element_offset = 0;
+
+ if (!copy_context->elements_to_go)
+ return 0;
+ }
+
+ res = copy_from_user(dest,
+ copy_context->current_element->data +
+ copy_context->current_element_offset,
+ bytes_this_round);
+
+ if (res != 0)
+ return -EFAULT;
+
+ copy_context->current_element_offset += bytes_this_round;
+ copy_context->current_offset += bytes_this_round;
+
+ /*
+ * Check if done with current element, and if so advance to the next.
+ */
+ if (copy_context->current_element_offset ==
+ copy_context->current_element->size) {
+ copy_context->elements_to_go--;
+ copy_context->current_element++;
+ copy_context->current_element_offset = 0;
+ }
+
+ return bytes_this_round;
+}
+
+/**************************************************************************
+ *
+ * vchiq_ioc_queue_message
+ *
+ **************************************************************************/
+static VCHIQ_STATUS_T
+vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHIQ_ELEMENT_T *elements,
+ unsigned long count)
+{
+ struct vchiq_io_copy_callback_context context;
+ unsigned long i;
+ size_t total_size = 0;
+
+ context.current_element = elements;
+ context.current_element_offset = 0;
+ context.elements_to_go = count;
+ context.current_offset = 0;
+
+ for (i = 0; i < count; i++) {
+ if (!elements[i].data && elements[i].size != 0)
+ return -EFAULT;
+
+ total_size += elements[i].size;
+ }
+
+ return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
+ &context, total_size);
+}
+
/****************************************************************************
*
* vchiq_ioctl
@@ -418,8 +519,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DEBUG_INITIALISE(g_state.local)
vchiq_log_trace(vchiq_arm_log_level,
- "vchiq_ioctl - instance %x, cmd %s, arg %lx",
- (unsigned int)instance,
+ "vchiq_ioctl - instance %pK, cmd %s, arg %lx",
+ instance,
((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -453,7 +554,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
break;
}
- rc = mutex_lock_interruptible(&instance->state->mutex);
+ rc = mutex_lock_killable(&instance->state->mutex);
if (rc != 0) {
vchiq_log_error(vchiq_arm_log_level,
"vchiq: connect: could not lock mutex for "
@@ -651,7 +752,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
if (copy_from_user(elements, args.elements,
args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
- status = vchiq_queue_message
+ status = vchiq_ioc_queue_message
(args.handle,
elements, args.count);
else
@@ -713,8 +814,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
vchiq_log_info(vchiq_arm_log_level,
- "found bulk_waiter %x for pid %d",
- (unsigned int)waiter, current->pid);
+ "found bulk_waiter %pK for pid %d", waiter,
+ current->pid);
args.userdata = &waiter->bulk_waiter;
}
status = vchiq_bulk_transfer
@@ -743,8 +844,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %x for pid %d",
- (unsigned int)waiter, current->pid);
+ "saved bulk_waiter %pK for pid %d",
+ waiter, current->pid);
if (copy_to_user((void __user *)
&(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
@@ -826,10 +927,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (args.msgbufsize < msglen) {
vchiq_log_error(
vchiq_arm_log_level,
- "header %x: msgbufsize"
- " %x < msglen %x",
- (unsigned int)header,
- args.msgbufsize,
+ "header %pK: msgbufsize %x < msglen %x",
+ header, args.msgbufsize,
msglen);
WARN(1, "invalid message "
"size\n");
@@ -980,9 +1079,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EFAULT;
} else {
vchiq_log_error(vchiq_arm_log_level,
- "header %x: bufsize %x < size %x",
- (unsigned int)header, args.bufsize,
- header->size);
+ "header %pK: bufsize %x < size %x",
+ header, args.bufsize, header->size);
WARN(1, "invalid size\n");
ret = -EMSGSIZE;
}
@@ -1284,9 +1382,8 @@ vchiq_release(struct inode *inode, struct file *file)
list);
list_del(pos);
vchiq_log_info(vchiq_arm_log_level,
- "bulk_waiter - cleaned up %x "
- "for pid %d",
- (unsigned int)waiter, waiter->pid);
+ "bulk_waiter - cleaned up %pK for pid %d",
+ waiter, waiter->pid);
kfree(waiter);
}
}
@@ -1385,9 +1482,8 @@ vchiq_dump_platform_instances(void *dump_context)
instance = service->instance;
if (instance && !instance->mark) {
len = snprintf(buf, sizeof(buf),
- "Instance %x: pid %d,%s completions "
- "%d/%d",
- (unsigned int)instance, instance->pid,
+ "Instance %pK: pid %d,%s completions %d/%d",
+ instance, instance->pid,
instance->connected ? " connected, " :
"",
instance->completion_insert -
@@ -1415,8 +1511,7 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf), " instance %x",
- (unsigned int)service->instance);
+ len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
if ((service->base.callback == service_callback) &&
user_service->is_vchi) {
@@ -1473,8 +1568,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
}
down_read(&current->mm->mmap_sem);
- rc = get_user_pages(current, /* task */
- current->mm, /* mm */
+ rc = get_user_pages(
(unsigned long)virt_addr, /* start */
num_pages, /* len */
0, /* gup_flags */
@@ -1485,6 +1579,12 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
prev_idx = -1;
page = NULL;
+ if (rc < 0) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Failed to get user pages: %d\n", rc);
+ goto out;
+ }
+
while (offset < end_offset) {
int page_offset = offset % PAGE_SIZE;
@@ -1508,11 +1608,13 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
offset += 16;
}
+
+out:
if (page != NULL)
kunmap(page);
for (page_idx = 0; page_idx < num_pages; page_idx++)
- page_cache_release(pages[page_idx]);
+ put_page(pages[page_idx]);
kfree(pages);
}
@@ -1683,8 +1785,6 @@ exit:
VCHIQ_STATUS_T
vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
-
if (arm_state) {
rwlock_init(&arm_state->susp_res_lock);
@@ -1712,14 +1812,13 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
arm_state->suspend_timer_running = 0;
- init_timer(&arm_state->suspend_timer);
- arm_state->suspend_timer.data = (unsigned long)(state);
- arm_state->suspend_timer.function = suspend_timer_callback;
+ setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
+ (unsigned long)(state));
arm_state->first_connect = 0;
}
- return status;
+ return VCHIQ_SUCCESS;
}
/*
@@ -2032,20 +2131,20 @@ static void
output_timeout_error(VCHIQ_STATE_T *state)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
- char service_err[50] = "";
+ char err[50] = "";
int vc_use_count = arm_state->videocore_use_count;
int active_services = state->unused_service;
int i;
if (!arm_state->videocore_use_count) {
- snprintf(service_err, 50, " Videocore usecount is 0");
+ snprintf(err, sizeof(err), " Videocore usecount is 0");
goto output_msg;
}
for (i = 0; i < active_services; i++) {
VCHIQ_SERVICE_T *service_ptr = state->services[i];
if (service_ptr && service_ptr->service_use_count &&
(service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
- snprintf(service_err, 50, " %c%c%c%c(%d) service has "
+ snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
"use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
service_ptr->base.fourcc),
service_ptr->client_id,
@@ -2059,7 +2158,7 @@ output_timeout_error(VCHIQ_STATE_T *state)
output_msg:
vchiq_log_error(vchiq_susp_log_level,
"timed out waiting for vc suspend (%d).%s",
- arm_state->autosuspend_override, service_err);
+ arm_state->autosuspend_override, err);
}
@@ -2780,7 +2879,7 @@ void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
&vchiq_keepalive_thread_func,
(void *)state,
threadname);
- if (arm_state->ka_thread == NULL) {
+ if (IS_ERR(arm_state->ka_thread)) {
vchiq_log_error(vchiq_susp_log_level,
"vchiq: FATAL: couldn't create thread %s",
threadname);
@@ -2800,28 +2899,27 @@ static int vchiq_probe(struct platform_device *pdev)
void *ptr_err;
fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
-/* Remove comment when booting without Device Tree is no longer supported
if (!fw_node) {
dev_err(&pdev->dev, "Missing firmware node\n");
return -ENOENT;
}
-*/
+
fw = rpi_firmware_get(fw_node);
+ of_node_put(fw_node);
if (!fw)
return -EPROBE_DEFER;
platform_set_drvdata(pdev, fw);
- /* create debugfs entries */
- err = vchiq_debugfs_init();
+ err = vchiq_platform_init(pdev, &g_state);
if (err != 0)
- goto failed_debugfs_init;
+ goto failed_platform_init;
err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
if (err != 0) {
vchiq_log_error(vchiq_arm_log_level,
"Unable to allocate device number");
- goto failed_alloc_chrdev;
+ goto failed_platform_init;
}
cdev_init(&vchiq_cdev, &vchiq_fops);
vchiq_cdev.owner = THIS_MODULE;
@@ -2844,9 +2942,10 @@ static int vchiq_probe(struct platform_device *pdev)
if (IS_ERR(ptr_err))
goto failed_device_create;
- err = vchiq_platform_init(pdev, &g_state);
+ /* create debugfs entries */
+ err = vchiq_debugfs_init();
if (err != 0)
- goto failed_platform_init;
+ goto failed_debugfs_init;
vchiq_log_info(vchiq_arm_log_level,
"vchiq: initialised - version %d (min %d), device %d.%d",
@@ -2855,7 +2954,7 @@ static int vchiq_probe(struct platform_device *pdev)
return 0;
-failed_platform_init:
+failed_debugfs_init:
device_destroy(vchiq_class, vchiq_devid);
failed_device_create:
class_destroy(vchiq_class);
@@ -2864,15 +2963,14 @@ failed_class_create:
err = PTR_ERR(ptr_err);
failed_cdev_add:
unregister_chrdev_region(vchiq_devid, 1);
-failed_alloc_chrdev:
- vchiq_debugfs_deinit();
-failed_debugfs_init:
+failed_platform_init:
vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
return err;
}
static int vchiq_remove(struct platform_device *pdev)
{
+ vchiq_debugfs_deinit();
device_destroy(vchiq_class, vchiq_devid);
class_destroy(vchiq_class);
cdev_del(&vchiq_cdev);
@@ -2890,7 +2988,6 @@ MODULE_DEVICE_TABLE(of, vchiq_of_match);
static struct platform_driver vchiq_driver = {
.driver = {
.name = "bcm2835_vchiq",
- .owner = THIS_MODULE,
.of_match_table = vchiq_of_match,
},
.probe = vchiq_probe,
@@ -2899,4 +2996,5 @@ static struct platform_driver vchiq_driver = {
module_platform_driver(vchiq_driver);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Videocore VCHIQ driver");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 5efc62ffb2f5..7ea29665bd0c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -72,7 +72,7 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
{
connected_init();
- if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+ if (mutex_lock_killable(&g_connected_mutex) != 0)
return;
if (g_connected)
@@ -107,7 +107,7 @@ void vchiq_call_connected_callbacks(void)
connected_init();
- if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+ if (mutex_lock_killable(&g_connected_mutex) != 0)
return;
for (i = 0; i < g_num_deferred_callbacks; i++)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 2c98da4307df..028e90bc1cdc 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -296,12 +296,13 @@ lock_service(VCHIQ_SERVICE_T *service)
void
unlock_service(VCHIQ_SERVICE_T *service)
{
- VCHIQ_STATE_T *state = service->state;
spin_lock(&service_spinlock);
BUG_ON(!service || (service->ref_count == 0));
if (service && service->ref_count) {
service->ref_count--;
if (!service->ref_count) {
+ VCHIQ_STATE_T *state = service->state;
+
BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
state->services[service->localport] = NULL;
} else
@@ -380,9 +381,9 @@ make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
VCHIQ_HEADER_T *header, void *bulk_userdata)
{
VCHIQ_STATUS_T status;
- vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
+ vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
- (unsigned int)header, (unsigned int)bulk_userdata);
+ header, bulk_userdata);
status = service->base.callback(reason, header, service->handle,
bulk_userdata);
if (status == VCHIQ_ERROR) {
@@ -406,28 +407,24 @@ vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
}
static inline void
-remote_event_create(REMOTE_EVENT_T *event)
+remote_event_create(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
event->armed = 0;
/* Don't clear the 'fired' flag because it may already have been set
** by the other side. */
- sema_init(event->event, 0);
-}
-
-static inline void
-remote_event_destroy(REMOTE_EVENT_T *event)
-{
- (void)event;
+ sema_init((struct semaphore *)((char *)state + event->event), 0);
}
static inline int
-remote_event_wait(REMOTE_EVENT_T *event)
+remote_event_wait(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
if (!event->fired) {
event->armed = 1;
- dsb();
+ dsb(sy);
if (!event->fired) {
- if (down_interruptible(event->event) != 0) {
+ if (down_interruptible(
+ (struct semaphore *)
+ ((char *)state + event->event)) != 0) {
event->armed = 0;
return 0;
}
@@ -441,34 +438,34 @@ remote_event_wait(REMOTE_EVENT_T *event)
}
static inline void
-remote_event_signal_local(REMOTE_EVENT_T *event)
+remote_event_signal_local(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
event->armed = 0;
- up(event->event);
+ up((struct semaphore *)((char *)state + event->event));
}
static inline void
-remote_event_poll(REMOTE_EVENT_T *event)
+remote_event_poll(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
{
if (event->fired && event->armed)
- remote_event_signal_local(event);
+ remote_event_signal_local(state, event);
}
void
remote_event_pollall(VCHIQ_STATE_T *state)
{
- remote_event_poll(&state->local->sync_trigger);
- remote_event_poll(&state->local->sync_release);
- remote_event_poll(&state->local->trigger);
- remote_event_poll(&state->local->recycle);
+ remote_event_poll(state, &state->local->sync_trigger);
+ remote_event_poll(state, &state->local->sync_release);
+ remote_event_poll(state, &state->local->trigger);
+ remote_event_poll(state, &state->local->recycle);
}
/* Round up message sizes so that any space at the end of a slot is always big
** enough for a header. This relies on header size being a power of two, which
** has been verified earlier by a static assertion. */
-static inline unsigned int
-calc_stride(unsigned int size)
+static inline size_t
+calc_stride(size_t size)
{
/* Allow room for the header */
size += sizeof(VCHIQ_HEADER_T);
@@ -541,13 +538,13 @@ request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
wmb();
/* ... and ensure the slot handler runs. */
- remote_event_signal_local(&state->local->trigger);
+ remote_event_signal_local(state, &state->local->trigger);
}
/* Called from queue_message, by the slot handler and application threads,
** with slot_mutex held */
static VCHIQ_HEADER_T *
-reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
+reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
{
VCHIQ_SHARED_STATE_T *local = state->local;
int tx_pos = state->local_tx_pos;
@@ -626,8 +623,8 @@ process_free_queue(VCHIQ_STATE_T *state)
char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
int data_found = 0;
- vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
- state->id, slot_index, (unsigned int)data,
+ vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
+ state->id, slot_index, data,
local->slot_queue_recycle, slot_queue_available);
/* Initialise the bitmask for services which have used this
@@ -659,16 +656,10 @@ process_free_queue(VCHIQ_STATE_T *state)
up(&service_quota->quota_event);
else if (count == 0) {
vchiq_log_error(vchiq_core_log_level,
- "service %d "
- "message_use_count=%d "
- "(header %x, msgid %x, "
- "header->msgid %x, "
- "header->size %x)",
+ "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
port,
- service_quota->
- message_use_count,
- (unsigned int)header, msgid,
- header->msgid,
+ service_quota->message_use_count,
+ header, msgid, header->msgid,
header->size);
WARN(1, "invalid message use count\n");
}
@@ -690,26 +681,16 @@ process_free_queue(VCHIQ_STATE_T *state)
up(&service_quota->quota_event);
vchiq_log_trace(
vchiq_core_log_level,
- "%d: pfq:%d %x@%x - "
- "slot_use->%d",
+ "%d: pfq:%d %x@%pK - slot_use->%d",
state->id, port,
- header->size,
- (unsigned int)header,
+ header->size, header,
count - 1);
} else {
vchiq_log_error(
vchiq_core_log_level,
- "service %d "
- "slot_use_count"
- "=%d (header %x"
- ", msgid %x, "
- "header->msgid"
- " %x, header->"
- "size %x)",
- port, count,
- (unsigned int)header,
- msgid,
- header->msgid,
+ "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
+ port, count, header,
+ msgid, header->msgid,
header->size);
WARN(1, "bad slot use count\n");
}
@@ -721,10 +702,9 @@ process_free_queue(VCHIQ_STATE_T *state)
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "pfq - pos %x: header %x, msgid %x, "
- "header->msgid %x, header->size %x",
- pos, (unsigned int)header, msgid,
- header->msgid, header->size);
+ "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+ pos, header, msgid, header->msgid,
+ header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -746,18 +726,66 @@ process_free_queue(VCHIQ_STATE_T *state)
}
}
+static ssize_t
+memcpy_copy_callback(
+ void *context, void *dest,
+ size_t offset, size_t maxsize)
+{
+ void *src = context;
+
+ memcpy(dest + offset, src + offset, maxsize);
+ return maxsize;
+}
+
+static ssize_t
+copy_message_data(
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ void *dest,
+ size_t size)
+{
+ size_t pos = 0;
+
+ while (pos < size) {
+ ssize_t callback_result;
+ size_t max_bytes = size - pos;
+
+ callback_result =
+ copy_callback(context, dest + pos,
+ pos, max_bytes);
+
+ if (callback_result < 0)
+ return callback_result;
+
+ if (!callback_result)
+ return -EIO;
+
+ if (callback_result > max_bytes)
+ return -EIO;
+
+ pos += callback_result;
+ }
+
+ return size;
+}
+
/* Called by the slot handler and application threads */
static VCHIQ_STATUS_T
queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
- int msgid, const VCHIQ_ELEMENT_T *elements,
- int count, int size, int flags)
+ int msgid,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size,
+ int flags)
{
VCHIQ_SHARED_STATE_T *local;
VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
VCHIQ_HEADER_T *header;
int type = VCHIQ_MSG_TYPE(msgid);
- unsigned int stride;
+ size_t stride;
local = state->local;
@@ -766,7 +794,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
- (mutex_lock_interruptible(&state->slot_mutex) != 0))
+ (mutex_lock_killable(&state->slot_mutex) != 0))
return VCHIQ_RETRY;
if (type == VCHIQ_MSG_DATA) {
@@ -822,7 +850,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
service_quota->slot_quota))) {
spin_unlock(&quota_spinlock);
vchiq_log_trace(vchiq_core_log_level,
- "%d: qm:%d %s,%x - quota stall "
+ "%d: qm:%d %s,%zx - quota stall "
"(msg %d, slot %d)",
state->id, service->localport,
msg_type_str(type), size,
@@ -835,7 +863,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
return VCHIQ_RETRY;
if (service->closing)
return VCHIQ_ERROR;
- if (mutex_lock_interruptible(&state->slot_mutex) != 0)
+ if (mutex_lock_killable(&state->slot_mutex) != 0)
return VCHIQ_RETRY;
if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
/* The service has been closed */
@@ -863,43 +891,37 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
}
if (type == VCHIQ_MSG_DATA) {
- int i, pos;
+ ssize_t callback_result;
int tx_end_index;
int slot_use_count;
vchiq_log_info(vchiq_core_log_level,
- "%d: qm %s@%x,%x (%d->%d)",
- state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
+ "%d: qm %s@%pK,%zx (%d->%d)",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ header, size, VCHIQ_MSG_SRCPORT(msgid),
VCHIQ_MSG_DSTPORT(msgid));
BUG_ON(!service);
BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
- for (i = 0, pos = 0; i < (unsigned int)count;
- pos += elements[i++].size)
- if (elements[i].size) {
- if (vchiq_copy_from_user
- (header->data + pos, elements[i].data,
- (size_t) elements[i].size) !=
- VCHIQ_SUCCESS) {
- mutex_unlock(&state->slot_mutex);
- VCHIQ_SERVICE_STATS_INC(service,
+ callback_result =
+ copy_message_data(copy_callback, context,
+ header->data, size);
+
+ if (callback_result < 0) {
+ mutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
error_count);
- return VCHIQ_ERROR;
- }
- if (i == 0) {
- if (SRVTRACE_ENABLED(service,
- VCHIQ_LOG_INFO))
- vchiq_log_dump_mem("Sent", 0,
- header->data + pos,
- min(64u,
- elements[0].size));
- }
- }
+ return VCHIQ_ERROR;
+ }
+
+ if (SRVTRACE_ENABLED(service,
+ VCHIQ_LOG_INFO))
+ vchiq_log_dump_mem("Sent", 0,
+ header->data,
+ min((size_t)64,
+ (size_t)callback_result));
spin_lock(&quota_spinlock);
service_quota->message_use_count++;
@@ -927,7 +949,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
if (slot_use_count)
vchiq_log_trace(vchiq_core_log_level,
- "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
+ "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
state->id, service->localport,
msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
slot_use_count, header);
@@ -936,15 +958,22 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
vchiq_log_info(vchiq_core_log_level,
- "%d: qm %s@%x,%x (%d->%d)", state->id,
+ "%d: qm %s@%pK,%zx (%d->%d)", state->id,
msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
+ header, size, VCHIQ_MSG_SRCPORT(msgid),
VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
- WARN_ON(!((count == 1) && (size == elements[0].size)));
- memcpy(header->data, elements[0].data,
- elements[0].size);
+ /* It is assumed for now that this code path
+ * only happens from calls inside this file.
+ *
+ * External callers are through the vchiq_queue_message
+ * path which always sets the type to be VCHIQ_MSG_DATA
+ *
+ * At first glance this appears to be correct but
+ * more review is needed.
+ */
+ copy_message_data(copy_callback, context,
+ header->data, size);
}
VCHIQ_STATS_INC(state, ctrl_tx_count);
}
@@ -960,7 +989,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_info(SRVTRACE_LEVEL(service),
- "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+ "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
msg_type_str(VCHIQ_MSG_TYPE(msgid)),
VCHIQ_MSG_TYPE(msgid),
VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
@@ -990,19 +1019,24 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
/* Called by the slot handler and application threads */
static VCHIQ_STATUS_T
queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
- int msgid, const VCHIQ_ELEMENT_T *elements,
- int count, int size, int is_blocking)
+ int msgid,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ int size,
+ int is_blocking)
{
VCHIQ_SHARED_STATE_T *local;
VCHIQ_HEADER_T *header;
+ ssize_t callback_result;
local = state->local;
if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
- (mutex_lock_interruptible(&state->sync_mutex) != 0))
+ (mutex_lock_killable(&state->sync_mutex) != 0))
return VCHIQ_RETRY;
- remote_event_wait(&local->sync_release);
+ remote_event_wait(state, &local->sync_release);
rmb();
@@ -1017,52 +1051,34 @@ queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
state->id, oldmsgid);
}
- if (service) {
- int i, pos;
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: qms %s@%pK,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ header, size, VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
- vchiq_log_info(vchiq_sync_log_level,
- "%d: qms %s@%x,%x (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
+ callback_result =
+ copy_message_data(copy_callback, context,
+ header->data, size);
- for (i = 0, pos = 0; i < (unsigned int)count;
- pos += elements[i++].size)
- if (elements[i].size) {
- if (vchiq_copy_from_user
- (header->data + pos, elements[i].data,
- (size_t) elements[i].size) !=
- VCHIQ_SUCCESS) {
- mutex_unlock(&state->sync_mutex);
- VCHIQ_SERVICE_STATS_INC(service,
- error_count);
- return VCHIQ_ERROR;
- }
- if (i == 0) {
- if (vchiq_sync_log_level >=
- VCHIQ_LOG_TRACE)
- vchiq_log_dump_mem("Sent Sync",
- 0, header->data + pos,
- min(64u,
- elements[0].size));
- }
- }
+ if (callback_result < 0) {
+ mutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
+ error_count);
+ return VCHIQ_ERROR;
+ }
+
+ if (service) {
+ if (SRVTRACE_ENABLED(service,
+ VCHIQ_LOG_INFO))
+ vchiq_log_dump_mem("Sent", 0,
+ header->data,
+ min((size_t)64,
+ (size_t)callback_result));
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
- vchiq_log_info(vchiq_sync_log_level,
- "%d: qms %s@%x,%x (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- (unsigned int)header, size,
- VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
- if (size != 0) {
- WARN_ON(!((count == 1) && (size == elements[0].size)));
- memcpy(header->data, elements[0].data,
- elements[0].size);
- }
VCHIQ_STATS_INC(state, ctrl_tx_count);
}
@@ -1175,11 +1191,16 @@ notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
service->remoteport);
- VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
/* Only reply to non-dummy bulk requests */
if (bulk->remote_data) {
- status = queue_message(service->state, NULL,
- msgid, &element, 1, 4, 0);
+ status = queue_message(
+ service->state,
+ NULL,
+ msgid,
+ memcpy_copy_callback,
+ &bulk->actual,
+ 4,
+ 0);
if (status != VCHIQ_SUCCESS)
break;
}
@@ -1344,7 +1365,7 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
- rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
+ rc = mutex_lock_killable(&state->bulk_transfer_mutex);
if (rc != 0)
break;
@@ -1356,26 +1377,22 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
"Send Bulk to" : "Recv Bulk from";
if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
vchiq_log_info(SRVTRACE_LEVEL(service),
- "%s %c%c%c%c d:%d len:%d %x<->%x",
+ "%s %c%c%c%c d:%d len:%d %pK<->%pK",
header,
VCHIQ_FOURCC_AS_4CHARS(
service->base.fourcc),
- service->remoteport,
- bulk->size,
- (unsigned int)bulk->data,
- (unsigned int)bulk->remote_data);
+ service->remoteport, bulk->size,
+ bulk->data, bulk->remote_data);
else
vchiq_log_info(SRVTRACE_LEVEL(service),
"%s %c%c%c%c d:%d ABORTED - tx len:%d,"
- " rx len:%d %x<->%x",
+ " rx len:%d %pK<->%pK",
header,
VCHIQ_FOURCC_AS_4CHARS(
service->base.fourcc),
service->remoteport,
- bulk->size,
- bulk->remote_size,
- (unsigned int)bulk->data,
- (unsigned int)bulk->remote_data);
+ bulk->size, bulk->remote_size,
+ bulk->data, bulk->remote_data);
}
vchiq_complete_bulk(bulk);
@@ -1511,9 +1528,8 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
fourcc = payload->fourcc;
vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPEN@%x (%d->'%c%c%c%c')",
- state->id, (unsigned int)header,
- localport,
+ "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
+ state->id, header, localport,
VCHIQ_FOURCC_AS_4CHARS(fourcc));
service = get_listening_service(state, fourcc);
@@ -1544,10 +1560,6 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
struct vchiq_openack_payload ack_payload = {
service->version
};
- VCHIQ_ELEMENT_T body = {
- &ack_payload,
- sizeof(ack_payload)
- };
if (state->version_common <
VCHIQ_VERSION_SYNCHRONOUS_MODE)
@@ -1557,21 +1569,28 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
if (service->sync &&
(state->version_common >=
VCHIQ_VERSION_SYNCHRONOUS_MODE)) {
- if (queue_message_sync(state, NULL,
+ if (queue_message_sync(
+ state,
+ NULL,
VCHIQ_MAKE_MSG(
VCHIQ_MSG_OPENACK,
service->localport,
remoteport),
- &body, 1, sizeof(ack_payload),
+ memcpy_copy_callback,
+ &ack_payload,
+ sizeof(ack_payload),
0) == VCHIQ_RETRY)
goto bail_not_ready;
} else {
- if (queue_message(state, NULL,
- VCHIQ_MAKE_MSG(
+ if (queue_message(state,
+ NULL,
+ VCHIQ_MAKE_MSG(
VCHIQ_MSG_OPENACK,
service->localport,
remoteport),
- &body, 1, sizeof(ack_payload),
+ memcpy_copy_callback,
+ &ack_payload,
+ sizeof(ack_payload),
0) == VCHIQ_RETRY)
goto bail_not_ready;
}
@@ -1650,7 +1669,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
header = (VCHIQ_HEADER_T *)(state->rx_data +
(state->rx_pos & VCHIQ_SLOT_MASK));
- DEBUG_VALUE(PARSE_HEADER, (int)header);
+ DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
msgid = header->msgid;
DEBUG_VALUE(PARSE_MSGID, msgid);
size = header->size;
@@ -1684,21 +1703,18 @@ parse_rx_slots(VCHIQ_STATE_T *state)
remoteport);
if (service)
vchiq_log_warning(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) - "
- "found connected service %d",
+ "%d: prs %s@%pK (%d->%d) - found connected service %d",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
+ header, remoteport, localport,
service->localport);
}
if (!service) {
vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) - "
- "invalid/closed service %d",
+ "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport, localport);
+ header, remoteport, localport,
+ localport);
goto skip_message;
}
break;
@@ -1723,12 +1739,11 @@ parse_rx_slots(VCHIQ_STATE_T *state)
min(64, size));
}
- if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
- > VCHIQ_SLOT_SIZE) {
+ if (((unsigned long)header & VCHIQ_SLOT_MASK) +
+ calc_stride(size) > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "header %x (msgid %x) - size %x too big for "
- "slot",
- (unsigned int)header, (unsigned int)msgid,
+ "header %pK (msgid %x) - size %x too big for slot",
+ header, (unsigned int)msgid,
(unsigned int)size);
WARN(1, "oversized for slot\n");
}
@@ -1747,9 +1762,9 @@ parse_rx_slots(VCHIQ_STATE_T *state)
service->peer_version = payload->version;
}
vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
- state->id, (unsigned int)header, size,
- remoteport, localport, service->peer_version);
+ "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate ==
VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
@@ -1765,9 +1780,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
WARN_ON(size != 0); /* There should be no data */
vchiq_log_info(vchiq_core_log_level,
- "%d: prs CLOSE@%x (%d->%d)",
- state->id, (unsigned int)header,
- remoteport, localport);
+ "%d: prs CLOSE@%pK (%d->%d)",
+ state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
@@ -1783,9 +1797,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_DATA:
vchiq_log_info(vchiq_core_log_level,
- "%d: prs DATA@%x,%x (%d->%d)",
- state->id, (unsigned int)header, size,
- remoteport, localport);
+ "%d: prs DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport)
&& (service->srvstate ==
@@ -1808,8 +1821,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_CONNECT:
vchiq_log_info(vchiq_core_log_level,
- "%d: prs CONNECT@%x",
- state->id, (unsigned int)header);
+ "%d: prs CONNECT@%pK", state->id, header);
state->version_common = ((VCHIQ_SLOT_ZERO_T *)
state->slot_data)->version;
up(&state->connect);
@@ -1827,7 +1839,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
int resolved = 0;
DEBUG_TRACE(PARSE_LINE);
- if (mutex_lock_interruptible(
+ if (mutex_lock_killable(
&service->bulk_mutex) != 0) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
@@ -1838,17 +1850,15 @@ parse_rx_slots(VCHIQ_STATE_T *state)
bulk = &queue->bulks[
BULK_INDEX(queue->remote_insert)];
bulk->remote_data =
- (void *)((int *)header->data)[0];
+ (void *)(long)((int *)header->data)[0];
bulk->remote_size = ((int *)header->data)[1];
wmb();
vchiq_log_info(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) %x@%x",
+ "%d: prs %s@%pK (%d->%d) %x@%pK",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
- bulk->remote_size,
- (unsigned int)bulk->remote_data);
+ header, remoteport, localport,
+ bulk->remote_size, bulk->remote_data);
queue->remote_insert++;
@@ -1893,7 +1903,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
&service->bulk_rx : &service->bulk_tx;
DEBUG_TRACE(PARSE_LINE);
- if (mutex_lock_interruptible(
+ if (mutex_lock_killable(
&service->bulk_mutex) != 0) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
@@ -1901,11 +1911,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) "
+ "%d: prs %s@%pK (%d->%d) "
"unexpected (ri=%d,li=%d)",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
+ header, remoteport, localport,
queue->remote_insert,
queue->local_insert);
mutex_unlock(&service->bulk_mutex);
@@ -1921,11 +1930,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
queue->remote_insert++;
vchiq_log_info(vchiq_core_log_level,
- "%d: prs %s@%x (%d->%d) %x@%x",
+ "%d: prs %s@%pK (%d->%d) %x@%pK",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport,
- bulk->actual, (unsigned int)bulk->data);
+ header, remoteport, localport,
+ bulk->actual, bulk->data);
vchiq_log_trace(vchiq_core_log_level,
"%d: prs:%d %cx li=%x ri=%x p=%x",
@@ -1947,14 +1955,14 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_PADDING:
vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PADDING@%x,%x",
- state->id, (unsigned int)header, size);
+ "%d: prs PADDING@%pK,%x",
+ state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PAUSE@%x,%x",
- state->id, (unsigned int)header, size);
+ "%d: prs PAUSE@%pK,%x",
+ state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
vchiq_log_error(vchiq_core_log_level,
"%d: PAUSE received in state PAUSED",
@@ -1977,8 +1985,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
break;
case VCHIQ_MSG_RESUME:
vchiq_log_trace(vchiq_core_log_level,
- "%d: prs RESUME@%x,%x",
- state->id, (unsigned int)header, size);
+ "%d: prs RESUME@%pK,%x",
+ state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
if (state->is_master)
@@ -1999,8 +2007,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
default:
vchiq_log_error(vchiq_core_log_level,
- "%d: prs invalid msgid %x@%x,%x",
- state->id, msgid, (unsigned int)header, size);
+ "%d: prs invalid msgid %x@%pK,%x",
+ state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
}
@@ -2039,7 +2047,7 @@ slot_handler_func(void *v)
while (1) {
DEBUG_COUNT(SLOT_HANDLER_COUNT);
DEBUG_TRACE(SLOT_HANDLER_LINE);
- remote_event_wait(&local->trigger);
+ remote_event_wait(state, &local->trigger);
rmb();
@@ -2128,7 +2136,7 @@ recycle_func(void *v)
VCHIQ_SHARED_STATE_T *local = state->local;
while (1) {
- remote_event_wait(&local->recycle);
+ remote_event_wait(state, &local->recycle);
process_free_queue(state);
}
@@ -2151,7 +2159,7 @@ sync_func(void *v)
int type;
unsigned int localport, remoteport;
- remote_event_wait(&local->sync_trigger);
+ remote_event_wait(state, &local->sync_trigger);
rmb();
@@ -2165,11 +2173,9 @@ sync_func(void *v)
if (!service) {
vchiq_log_error(vchiq_sync_log_level,
- "%d: sf %s@%x (%d->%d) - "
- "invalid/closed service %d",
+ "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
state->id, msg_type_str(type),
- (unsigned int)header,
- remoteport, localport, localport);
+ header, remoteport, localport, localport);
release_message_sync(state, header);
continue;
}
@@ -2199,9 +2205,9 @@ sync_func(void *v)
service->peer_version = payload->version;
}
vchiq_log_info(vchiq_sync_log_level,
- "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
- state->id, (unsigned int)header, size,
- remoteport, localport, service->peer_version);
+ "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
vchiq_set_service_state(service,
@@ -2214,9 +2220,8 @@ sync_func(void *v)
case VCHIQ_MSG_DATA:
vchiq_log_trace(vchiq_sync_log_level,
- "%d: sf DATA@%x,%x (%d->%d)",
- state->id, (unsigned int)header, size,
- remoteport, localport);
+ "%d: sf DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate ==
@@ -2234,8 +2239,8 @@ sync_func(void *v)
default:
vchiq_log_error(vchiq_sync_log_level,
- "%d: sf unexpected msgid %x@%x,%x",
- state->id, msgid, (unsigned int)header, size);
+ "%d: sf unexpected msgid %x@%pK,%x",
+ state->id, msgid, header, size);
release_message_sync(state, header);
break;
}
@@ -2268,7 +2273,8 @@ get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
VCHIQ_SLOT_ZERO_T *
vchiq_init_slots(void *mem_base, int mem_size)
{
- int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
+ int mem_align =
+ (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
VCHIQ_SLOT_ZERO_T *slot_zero =
(VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
@@ -2316,16 +2322,16 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
int i;
vchiq_log_warning(vchiq_core_log_level,
- "%s: slot_zero = 0x%08lx, is_master = %d",
- __func__, (unsigned long)slot_zero, is_master);
+ "%s: slot_zero = %pK, is_master = %d",
+ __func__, slot_zero, is_master);
/* Check the input configuration */
if (slot_zero->magic != VCHIQ_MAGIC) {
vchiq_loud_error_header();
vchiq_loud_error("Invalid VCHIQ magic value found.");
- vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
- (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
+ vchiq_loud_error("slot_zero=%pK: magic=%x (expected %x)",
+ slot_zero, slot_zero->magic, VCHIQ_MAGIC);
vchiq_loud_error_footer();
return VCHIQ_ERROR;
}
@@ -2333,10 +2339,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
if (slot_zero->version < VCHIQ_VERSION_MIN) {
vchiq_loud_error_header();
vchiq_loud_error("Incompatible VCHIQ versions found.");
- vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
- "(minimum %d)",
- (unsigned int)slot_zero, slot_zero->version,
- VCHIQ_VERSION_MIN);
+ vchiq_loud_error("slot_zero=%pK: VideoCore version=%d (minimum %d)",
+ slot_zero, slot_zero->version, VCHIQ_VERSION_MIN);
vchiq_loud_error("Restart with a newer VideoCore image.");
vchiq_loud_error_footer();
return VCHIQ_ERROR;
@@ -2345,10 +2349,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
if (VCHIQ_VERSION < slot_zero->version_min) {
vchiq_loud_error_header();
vchiq_loud_error("Incompatible VCHIQ versions found.");
- vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
- "minimum %d)",
- (unsigned int)slot_zero, VCHIQ_VERSION,
- slot_zero->version_min);
+ vchiq_loud_error("slot_zero=%pK: version=%d (VideoCore minimum %d)",
+ slot_zero, VCHIQ_VERSION, slot_zero->version_min);
vchiq_loud_error("Restart with a newer kernel.");
vchiq_loud_error_footer();
return VCHIQ_ERROR;
@@ -2360,26 +2362,20 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
(slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
vchiq_loud_error_header();
if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
- vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
- "(expected %x)",
- (unsigned int)slot_zero,
- slot_zero->slot_zero_size,
- sizeof(VCHIQ_SLOT_ZERO_T));
+ vchiq_loud_error("slot_zero=%pK: slot_zero_size=%d (expected %d)",
+ slot_zero, slot_zero->slot_zero_size,
+ (int)sizeof(VCHIQ_SLOT_ZERO_T));
if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
- vchiq_loud_error("slot_zero=%x: slot_size=%d "
- "(expected %d",
- (unsigned int)slot_zero, slot_zero->slot_size,
+ vchiq_loud_error("slot_zero=%pK: slot_size=%d (expected %d)",
+ slot_zero, slot_zero->slot_size,
VCHIQ_SLOT_SIZE);
if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
- vchiq_loud_error("slot_zero=%x: max_slots=%d "
- "(expected %d)",
- (unsigned int)slot_zero, slot_zero->max_slots,
+ vchiq_loud_error("slot_zero=%pK: max_slots=%d (expected %d)",
+ slot_zero, slot_zero->max_slots,
VCHIQ_MAX_SLOTS);
if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
- vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
- "(expected %d)",
- (unsigned int)slot_zero,
- slot_zero->max_slots_per_side,
+ vchiq_loud_error("slot_zero=%pK: max_slots_per_side=%d (expected %d)",
+ slot_zero, slot_zero->max_slots_per_side,
VCHIQ_MAX_SLOTS_PER_SIDE);
vchiq_loud_error_footer();
return VCHIQ_ERROR;
@@ -2463,24 +2459,24 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
state->data_use_count = 0;
state->data_quota = state->slot_queue_available - 1;
- local->trigger.event = &state->trigger_event;
- remote_event_create(&local->trigger);
+ local->trigger.event = offsetof(VCHIQ_STATE_T, trigger_event);
+ remote_event_create(state, &local->trigger);
local->tx_pos = 0;
- local->recycle.event = &state->recycle_event;
- remote_event_create(&local->recycle);
+ local->recycle.event = offsetof(VCHIQ_STATE_T, recycle_event);
+ remote_event_create(state, &local->recycle);
local->slot_queue_recycle = state->slot_queue_available;
- local->sync_trigger.event = &state->sync_trigger_event;
- remote_event_create(&local->sync_trigger);
+ local->sync_trigger.event = offsetof(VCHIQ_STATE_T, sync_trigger_event);
+ remote_event_create(state, &local->sync_trigger);
- local->sync_release.event = &state->sync_release_event;
- remote_event_create(&local->sync_release);
+ local->sync_release.event = offsetof(VCHIQ_STATE_T, sync_release_event);
+ remote_event_create(state, &local->sync_release);
/* At start-of-day, the slot is empty and available */
((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
= VCHIQ_MSGID_PADDING;
- remote_event_signal_local(&local->sync_release);
+ remote_event_signal_local(state, &local->sync_release);
local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
@@ -2494,7 +2490,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
(void *)state,
threadname);
- if (state->slot_handler_thread == NULL) {
+ if (IS_ERR(state->slot_handler_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
@@ -2507,7 +2503,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
state->recycle_thread = kthread_create(&recycle_func,
(void *)state,
threadname);
- if (state->recycle_thread == NULL) {
+ if (IS_ERR(state->recycle_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
@@ -2520,7 +2516,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
state->sync_thread = kthread_create(&sync_func,
(void *)state,
threadname);
- if (state->sync_thread == NULL) {
+ if (IS_ERR(state->sync_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
@@ -2684,14 +2680,19 @@ vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
service->version,
service->version_min
};
- VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
service->client_id = client_id;
vchiq_use_service_internal(service);
- status = queue_message(service->state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
- &body, 1, sizeof(payload), QMFLAGS_IS_BLOCKING);
+ status = queue_message(service->state,
+ NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
+ service->localport,
+ 0),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING);
if (status == VCHIQ_SUCCESS) {
/* Wait for the ACK/NAK */
if (down_interruptible(&service->remove_event) != 0) {
@@ -2756,20 +2757,16 @@ release_service_messages(VCHIQ_SERVICE_T *service)
if ((port == service->localport) &&
(msgid & VCHIQ_MSGID_CLAIMED)) {
vchiq_log_info(vchiq_core_log_level,
- " fsi - hdr %x",
- (unsigned int)header);
+ " fsi - hdr %pK", header);
release_slot(state, slot_info, header,
NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "fsi - pos %x: header %x, "
- "msgid %x, header->msgid %x, "
- "header->size %x",
- pos, (unsigned int)header,
- msgid, header->msgid,
- header->size);
+ "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+ pos, header, msgid,
+ header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -2783,7 +2780,7 @@ do_abort_bulks(VCHIQ_SERVICE_T *service)
VCHIQ_STATUS_T status;
/* Abort any outstanding bulk transfers */
- if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
+ if (mutex_lock_killable(&service->bulk_mutex) != 0)
return 0;
abort_outstanding_bulks(service, &service->bulk_tx);
abort_outstanding_bulks(service, &service->bulk_rx);
@@ -3303,7 +3300,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
queue = (dir == VCHIQ_BULK_TRANSMIT) ?
&service->bulk_tx : &service->bulk_rx;
- if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
+ if (mutex_lock_killable(&service->bulk_mutex) != 0) {
status = VCHIQ_RETRY;
goto error_exit;
}
@@ -3317,7 +3314,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
status = VCHIQ_RETRY;
goto error_exit;
}
- if (mutex_lock_interruptible(&service->bulk_mutex)
+ if (mutex_lock_killable(&service->bulk_mutex)
!= 0) {
status = VCHIQ_RETRY;
goto error_exit;
@@ -3341,14 +3338,13 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
wmb();
vchiq_log_info(vchiq_core_log_level,
- "%d: bt (%d->%d) %cx %x@%x %x",
- state->id,
- service->localport, service->remoteport, dir_char,
- size, (unsigned int)bulk->data, (unsigned int)userdata);
+ "%d: bt (%d->%d) %cx %x@%pK %pK",
+ state->id, service->localport, service->remoteport, dir_char,
+ size, bulk->data, userdata);
/* The slot mutex must be held when the service is being closed, so
claim it here to ensure that isn't happening */
- if (mutex_lock_interruptible(&state->slot_mutex) != 0) {
+ if (mutex_lock_killable(&state->slot_mutex) != 0) {
status = VCHIQ_RETRY;
goto cancel_bulk_error_exit;
}
@@ -3363,16 +3359,19 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
(dir == VCHIQ_BULK_TRANSMIT) ?
VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
} else {
- int payload[2] = { (int)bulk->data, bulk->size };
- VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
-
- status = queue_message(state, NULL,
- VCHIQ_MAKE_MSG(dir_msgtype,
- service->localport, service->remoteport),
- &element, 1, sizeof(payload),
- QMFLAGS_IS_BLOCKING |
- QMFLAGS_NO_MUTEX_LOCK |
- QMFLAGS_NO_MUTEX_UNLOCK);
+ int payload[2] = { (int)(long)bulk->data, bulk->size };
+
+ status = queue_message(state,
+ NULL,
+ VCHIQ_MAKE_MSG(dir_msgtype,
+ service->localport,
+ service->remoteport),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING |
+ QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK);
if (status != VCHIQ_SUCCESS) {
goto unlock_both_error_exit;
}
@@ -3418,26 +3417,22 @@ error_exit:
VCHIQ_STATUS_T
vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
- const VCHIQ_ELEMENT_T *elements, unsigned int count)
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size)
{
VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
VCHIQ_STATUS_T status = VCHIQ_ERROR;
- unsigned int size = 0;
- unsigned int i;
-
if (!service ||
(vchiq_check_service(service) != VCHIQ_SUCCESS))
goto error_exit;
- for (i = 0; i < (unsigned int)count; i++) {
- if (elements[i].size) {
- if (elements[i].data == NULL) {
- VCHIQ_SERVICE_STATS_INC(service, error_count);
- goto error_exit;
- }
- size += elements[i].size;
- }
+ if (!size) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+
}
if (size > VCHIQ_MAX_MSG_SIZE) {
@@ -3451,14 +3446,14 @@ vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
service->localport,
service->remoteport),
- elements, count, size, 1);
+ copy_callback, context, size, 1);
break;
case VCHIQ_SRVSTATE_OPENSYNC:
status = queue_message_sync(service->state, service,
VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
service->localport,
service->remoteport),
- elements, count, size, 1);
+ copy_callback, context, size, 1);
break;
default:
status = VCHIQ_ERROR;
@@ -3691,13 +3686,11 @@ vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
vchiq_dump(dump_context, buf, len + 1);
len = snprintf(buf, sizeof(buf),
- " tx_pos=%x(@%x), rx_pos=%x(@%x)",
+ " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
state->local->tx_pos,
- (uint32_t)state->tx_data +
- (state->local_tx_pos & VCHIQ_SLOT_MASK),
+ state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
state->rx_pos,
- (uint32_t)state->rx_data +
- (state->rx_pos & VCHIQ_SLOT_MASK));
+ state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
vchiq_dump(dump_context, buf, len + 1);
len = snprintf(buf, sizeof(buf),
@@ -3747,7 +3740,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
+ len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
service->localport, srvstate_names[service->srvstate],
service->ref_count - 1); /*Don't include the lock just taken*/
@@ -3759,7 +3752,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
int tx_pending, rx_pending;
if (service->remoteport != VCHIQ_PORT_FREE) {
int len2 = snprintf(remoteport, sizeof(remoteport),
- "%d", service->remoteport);
+ "%u", service->remoteport);
if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
snprintf(remoteport + len2,
sizeof(remoteport) - len2,
@@ -3888,26 +3881,26 @@ VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
return status;
}
-void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
- size_t numBytes)
+void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *void_mem,
+ size_t num_bytes)
{
- const uint8_t *mem = (const uint8_t *)voidMem;
+ const uint8_t *mem = (const uint8_t *)void_mem;
size_t offset;
- char lineBuf[100];
+ char line_buf[100];
char *s;
- while (numBytes > 0) {
- s = lineBuf;
+ while (num_bytes > 0) {
+ s = line_buf;
for (offset = 0; offset < 16; offset++) {
- if (offset < numBytes)
+ if (offset < num_bytes)
s += snprintf(s, 4, "%02x ", mem[offset]);
else
s += snprintf(s, 4, " ");
}
for (offset = 0; offset < 16; offset++) {
- if (offset < numBytes) {
+ if (offset < num_bytes) {
uint8_t ch = mem[offset];
if ((ch < ' ') || (ch > '~'))
@@ -3919,16 +3912,16 @@ void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
if ((label != NULL) && (*label != '\0'))
vchiq_log_trace(VCHIQ_LOG_TRACE,
- "%s: %08x: %s", label, addr, lineBuf);
+ "%s: %08x: %s", label, addr, line_buf);
else
vchiq_log_trace(VCHIQ_LOG_TRACE,
- "%08x: %s", addr, lineBuf);
+ "%08x: %s", addr, line_buf);
addr += 16;
mem += 16;
- if (numBytes > 16)
- numBytes -= 16;
+ if (num_bytes > 16)
+ num_bytes -= 16;
else
- numBytes = 0;
+ num_bytes = 0;
}
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9be484c776d0..9e164652548a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -184,11 +184,11 @@ enum {
#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
#define DEBUG_TRACE(d) \
- do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
+ do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
#define DEBUG_VALUE(d, v) \
- do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
+ do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
#define DEBUG_COUNT(d) \
- do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
+ do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
#else /* VCHIQ_ENABLE_DEBUG */
@@ -264,7 +264,8 @@ typedef struct vchiq_bulk_queue_struct {
typedef struct remote_event_struct {
int armed;
int fired;
- struct semaphore *event;
+ /* Contains offset from the beginning of the VCHIQ_STATE_T structure */
+ u32 event;
} REMOTE_EVENT_T;
typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
@@ -633,9 +634,6 @@ vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
extern void
vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
-extern VCHIQ_STATUS_T
-vchiq_copy_from_user(void *dst, const void *src, int size);
-
extern void
remote_event_signal(REMOTE_EVENT_T *event);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 7e032130d967..f07cd4448ddf 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -120,7 +120,7 @@ static int debugfs_log_open(struct inode *inode, struct file *file)
return single_open(file, debugfs_log_show, inode->i_private);
}
-static int debugfs_log_write(struct file *file,
+static ssize_t debugfs_log_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -229,7 +229,7 @@ static int debugfs_trace_open(struct inode *inode, struct file *file)
return single_open(file, debugfs_trace_show, inode->i_private);
}
-static int debugfs_trace_write(struct file *file,
+static ssize_t debugfs_trace_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 8067bbe7ce8d..377e8e48bb54 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -141,9 +141,12 @@ extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
VCHIQ_SERVICE_HANDLE_T service);
extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
-
-extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
- const VCHIQ_ELEMENT_T *elements, unsigned int count);
+extern VCHIQ_STATUS_T
+vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size);
extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
VCHIQ_HEADER_T *header);
extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
index 25e7011edc50..e93922a87263 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
@@ -70,7 +70,7 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
*
***************************************************************************/
#define VCHIQ_INIT_RETRIES 10
-VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
+VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
{
VCHIQ_STATUS_T status = VCHIQ_ERROR;
VCHIQ_STATE_T *state;
@@ -108,7 +108,7 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
mutex_init(&instance->bulk_waiter_list_mutex);
INIT_LIST_HEAD(&instance->bulk_waiter_list);
- *instanceOut = instance;
+ *instance_out = instance;
status = VCHIQ_SUCCESS;
@@ -134,7 +134,7 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
vchiq_log_trace(vchiq_core_log_level,
"%s(%p) called", __func__, instance);
- if (mutex_lock_interruptible(&state->mutex) != 0)
+ if (mutex_lock_killable(&state->mutex) != 0)
return VCHIQ_RETRY;
/* Remove all services */
@@ -155,9 +155,8 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
list);
list_del(pos);
vchiq_log_info(vchiq_arm_log_level,
- "bulk_waiter - cleaned up %x "
- "for pid %d",
- (unsigned int)waiter, waiter->pid);
+ "bulk_waiter - cleaned up %pK for pid %d",
+ waiter, waiter->pid);
kfree(waiter);
}
kfree(instance);
@@ -192,7 +191,7 @@ VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
vchiq_log_trace(vchiq_core_log_level,
"%s(%p) called", __func__, instance);
- if (mutex_lock_interruptible(&state->mutex) != 0) {
+ if (mutex_lock_killable(&state->mutex) != 0) {
vchiq_log_trace(vchiq_core_log_level,
"%s: call to mutex_lock failed", __func__);
status = VCHIQ_RETRY;
@@ -450,8 +449,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %x for pid %d",
- (unsigned int)waiter, current->pid);
+ "saved bulk_waiter %pK for pid %d",
+ waiter, current->pid);
}
return status;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
index 335446e05476..778063ba312a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
@@ -52,18 +52,4 @@ static inline int __must_check down_interruptible_killable(struct semaphore *sem
}
#define down_interruptible down_interruptible_killable
-
-static inline int __must_check mutex_lock_interruptible_killable(struct mutex *lock)
-{
- /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
- int ret;
- sigset_t blocked, oldset;
- siginitsetinv(&blocked, SHUTDOWN_SIGS);
- sigprocmask(SIG_SETMASK, &blocked, &oldset);
- ret = mutex_lock_interruptible(lock);
- sigprocmask(SIG_SETMASK, &oldset, NULL);
- return ret;
-}
-#define mutex_lock_interruptible mutex_lock_interruptible_killable
-
#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
index d02e7764bd0d..dd43458f306f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
@@ -42,13 +42,13 @@
/* ---- Constants and Types ---------------------------------------------- */
typedef struct {
- void *armSharedMemVirt;
- dma_addr_t armSharedMemPhys;
- size_t armSharedMemSize;
+ void *arm_shared_mem_virt;
+ dma_addr_t arm_shared_mem_phys;
+ size_t arm_shared_mem_size;
- void *vcSharedMemVirt;
- dma_addr_t vcSharedMemPhys;
- size_t vcSharedMemSize;
+ void *vc_shared_mem_virt;
+ dma_addr_t vc_shared_mem_phys;
+ size_t vc_shared_mem_size;
} VCHIQ_SHARED_MEM_INFO_T;
/* ---- Variable Externs ------------------------------------------------- */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
index 54a3ecec69ef..12c304ceb952 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
@@ -43,11 +43,13 @@
#define PAGELIST_READ_WITH_FRAGMENTS 2
typedef struct pagelist_struct {
- unsigned long length;
- unsigned short type;
- unsigned short offset;
- unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
- pages at consecutive addresses. */
+ u32 length;
+ u16 type;
+ u16 offset;
+ u32 addrs[1]; /* N.B. 12 LSBs hold the number
+ * of following pages at consecutive
+ * addresses.
+ */
} PAGELIST_T;
typedef struct fragments_struct {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 8072ff613636..d9771394a041 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL(vchi_msg_remove);
* Name: vchi_msg_queue
*
* Arguments: VCHI_SERVICE_HANDLE_T handle,
- * const void *data,
- * uint32_t data_size,
- * VCHI_FLAGS_T flags,
- * void *msg_handle,
+ * ssize_t (*copy_callback)(void *context, void *dest,
+ * size_t offset, size_t maxsize),
+ * void *context,
+ * uint32_t data_size
*
* Description: Thin wrapper to queue a message onto a connection
*
@@ -159,28 +159,29 @@ EXPORT_SYMBOL(vchi_msg_remove);
*
***********************************************************/
int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
- const void *data,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *msg_handle)
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ uint32_t data_size)
{
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
- VCHIQ_ELEMENT_T element = {data, data_size};
VCHIQ_STATUS_T status;
- (void)msg_handle;
-
- WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
+ while (1) {
+ status = vchiq_queue_message(service->handle,
+ copy_callback,
+ context,
+ data_size);
- status = vchiq_queue_message(service->handle, &element, 1);
+ /*
+ * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
+ * implement a retry mechanism since this function is supposed
+ * to block until queued
+ */
+ if (status != VCHIQ_RETRY)
+ break;
- /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
- ** implement a retry mechanism since this function is supposed
- ** to block until queued
- */
- while (status == VCHIQ_RETRY) {
msleep(1);
- status = vchiq_queue_message(service->handle, &element, 1);
}
return vchiq_status_to_vchi(status);
@@ -229,17 +230,18 @@ int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
return vchiq_status_to_vchi(VCHIQ_ERROR);
}
- status = vchiq_bulk_receive(service->handle, data_dst, data_size,
- bulk_handle, mode);
-
- /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
- ** implement a retry mechanism since this function is supposed
- ** to block until queued
- */
- while (status == VCHIQ_RETRY) {
- msleep(1);
+ while (1) {
status = vchiq_bulk_receive(service->handle, data_dst,
data_size, bulk_handle, mode);
+ /*
+ * vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
+ * implement a retry mechanism since this function is supposed
+ * to block until queued
+ */
+ if (status != VCHIQ_RETRY)
+ break;
+
+ msleep(1);
}
return vchiq_status_to_vchi(status);
@@ -289,17 +291,19 @@ int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
return vchiq_status_to_vchi(VCHIQ_ERROR);
}
- status = vchiq_bulk_transmit(service->handle, data_src, data_size,
- bulk_handle, mode);
-
- /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
- ** implement a retry mechanism since this function is supposed
- ** to block until queued
- */
- while (status == VCHIQ_RETRY) {
- msleep(1);
+ while (1) {
status = vchiq_bulk_transmit(service->handle, data_src,
data_size, bulk_handle, mode);
+
+ /*
+ * vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
+ * implement a retry mechanism since this function is supposed
+ * to block until queued
+ */
+ if (status != VCHIQ_RETRY)
+ break;
+
+ msleep(1);
}
return vchiq_status_to_vchi(status);
@@ -350,44 +354,6 @@ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
EXPORT_SYMBOL(vchi_msg_dequeue);
/***********************************************************
- * Name: vchi_msg_queuev
- *
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
- * VCHI_MSG_VECTOR_T *vector,
- * uint32_t count,
- * VCHI_FLAGS_T flags,
- * void *msg_handle
- *
- * Description: Thin wrapper to queue a message onto a connection
- *
- * Returns: int32_t - success == 0
- *
- ***********************************************************/
-
-vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
-vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
- offsetof(VCHIQ_ELEMENT_T, data));
-vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
- offsetof(VCHIQ_ELEMENT_T, size));
-
-int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
- VCHI_MSG_VECTOR_T *vector,
- uint32_t count,
- VCHI_FLAGS_T flags,
- void *msg_handle)
-{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
-
- (void)msg_handle;
-
- WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
-
- return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
- (const VCHIQ_ELEMENT_T *)vector, count));
-}
-EXPORT_SYMBOL(vchi_msg_queuev);
-
-/***********************************************************
* Name: vchi_held_msg_release
*
* Arguments: VCHI_HELD_MSG_T *message
@@ -400,8 +366,16 @@ EXPORT_SYMBOL(vchi_msg_queuev);
***********************************************************/
int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
{
- vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
- (VCHIQ_HEADER_T *)message->message);
+ /*
+ * Convert the service field pointer back to an
+ * VCHIQ_SERVICE_HANDLE_T which is an int.
+ * This pointer is opaque to everything except
+ * vchi_msg_hold which simply upcasted the int
+ * to a pointer.
+ */
+
+ vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service,
+ (VCHIQ_HEADER_T *)message->message);
return 0;
}
@@ -445,8 +419,16 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
*data = header->data;
*msg_size = header->size;
+ /*
+ * upcast the VCHIQ_SERVICE_HANDLE_T which is an int
+ * to a pointer and stuff it in the held message.
+ * This pointer is opaque to everything except
+ * vchi_held_msg_release which simply downcasts it back
+ * to an int.
+ */
+
message_handle->service =
- (struct opaque_vchi_service_t *)service->handle;
+ (struct opaque_vchi_service_t *)(long)service->handle;
message_handle->message = header;
return 0;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 384acb8d2eae..f76f4d790532 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -61,8 +61,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
{
- if (queue->storage != NULL)
- kfree(queue->storage);
+ kfree(queue->storage);
}
int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index d5d94c43c074..5577df3199e7 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -48,8 +48,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
PIO2_REGS_INT_MASK6,
PIO2_REGS_INT_MASK7 };
-
-
#define PIO2_REGS_CTRL 0x18
#define PIO2_REGS_VME_VECTOR 0x19
#define PIO2_REGS_CNTR0 0x20
@@ -63,7 +61,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
#define PIO2_REGS_ID 0x30
-
/* PIO2_REGS_DATAx (0x0 - 0x3) */
static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
@@ -204,8 +201,6 @@ static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
#define PIO2_CNTR_BCD 1
-
-
enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
@@ -240,10 +235,10 @@ struct pio2_card {
struct pio2_cntr cntr[6];
};
-int pio2_cntr_reset(struct pio2_card *);
+int pio2_cntr_reset(struct pio2_card *card);
-int pio2_gpio_reset(struct pio2_card *);
-int pio2_gpio_init(struct pio2_card *);
-void pio2_gpio_exit(struct pio2_card *);
+int pio2_gpio_reset(struct pio2_card *card);
+int pio2_gpio_init(struct pio2_card *card);
+void pio2_gpio_exit(struct pio2_card *card);
#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 8e66a520266c..20a2d835fdaa 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -365,7 +365,7 @@ static int pio2_probe(struct vme_dev *vdev)
vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
+ &pio2_int, card);
if (retval < 0) {
dev_err(&card->vdev->dev,
"Unable to attach VME interrupt vector0x%x, level 0x%x\n",
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 5dd430f8f921..87aa5174df22 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -47,7 +47,7 @@ static const char driver_name[] = "vme_user";
static int bus[VME_USER_BUS_MAX];
static unsigned int bus_num;
-/* Currently Documentation/devices.txt defines the following for VME:
+/* Currently Documentation/admin-guide/devices.rst defines the following for VME:
*
* 221 char VME bus
* 0 = /dev/bus/vme/m0 First master image
@@ -661,7 +661,7 @@ err_sysfs:
}
class_destroy(vme_user_sysfs_class);
- /* Ensure counter set correcty to unalloc all master windows */
+ /* Ensure counter set correctly to unalloc all master windows */
i = MASTER_MAX + 1;
err_master:
while (i > MASTER_MINOR) {
@@ -671,7 +671,7 @@ err_master:
}
/*
- * Ensure counter set correcty to unalloc all slave windows and buffers
+ * Ensure counter set correctly to unalloc all slave windows and buffers
*/
i = SLAVE_MAX + 1;
err_slave:
@@ -716,7 +716,7 @@ static int vme_user_remove(struct vme_dev *dev)
/* Unregister device driver */
cdev_del(vme_user_cdev);
- /* Unregiser the major and minor device numbers */
+ /* Unregister the major and minor device numbers */
unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
return 0;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index de503a316e71..44dfa5421374 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: baseband.c
*
* Purpose: Implement functions to access baseband
@@ -1916,7 +1911,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byBBAddr - address of register in Baseband
* Out:
* pbyData - data read
@@ -1927,24 +1922,24 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
bool BBbReadEmbedded(struct vnt_private *priv,
unsigned char byBBAddr, unsigned char *pbyData)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
/* BB reg offset */
- VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
/* turn on REGR */
- MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
+ MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue);
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
if (byValue & BBREGCTL_DONE)
break;
}
/* get BB data */
- VNSvInPortB(dwIoBase + MAC_REG_BBREGDATA, pbyData);
+ VNSvInPortB(iobase + MAC_REG_BBREGDATA, pbyData);
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x30)\n");
@@ -1958,7 +1953,7 @@ bool BBbReadEmbedded(struct vnt_private *priv,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byBBAddr - address of register in Baseband
* byData - data to write
* Out:
@@ -1970,20 +1965,20 @@ bool BBbReadEmbedded(struct vnt_private *priv,
bool BBbWriteEmbedded(struct vnt_private *priv,
unsigned char byBBAddr, unsigned char byData)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
/* BB reg offset */
- VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
/* set BB data */
- VNSvOutPortB(dwIoBase + MAC_REG_BBREGDATA, byData);
+ VNSvOutPortB(iobase + MAC_REG_BBREGDATA, byData);
/* turn on BBREGCTL_REGW */
- MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
+ MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue);
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
if (byValue & BBREGCTL_DONE)
break;
}
@@ -2000,7 +1995,7 @@ bool BBbWriteEmbedded(struct vnt_private *priv,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byRevId - Revision ID
* byRFType - RF type
* Out:
@@ -2014,7 +2009,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
{
bool bResult = true;
int ii;
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned char byRFType = priv->byRFType;
unsigned char byLocalID = priv->byLocalID;
@@ -2036,8 +2031,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
byVT3253B0_AGC4_RFMD2959[ii][0],
byVT3253B0_AGC4_RFMD2959[ii][1]);
- VNSvOutPortD(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+ VNSvOutPortD(iobase + MAC_REG_ITRTMSET, 0x23);
+ MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
}
priv->abyBBVGA[0] = 0x18;
priv->abyBBVGA[1] = 0x0A;
@@ -2076,8 +2071,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
byVT3253B0_AGC[ii][0],
byVT3253B0_AGC[ii][1]);
- VNSvOutPortB(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+ VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23);
+ MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
priv->abyBBVGA[0] = 0x14;
priv->abyBBVGA[1] = 0x0A;
@@ -2098,7 +2093,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
/* Init ANT B select,
* RX Config CR10 = 0x28->0x2A,
@@ -2106,7 +2101,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
* make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
@@ -2154,7 +2149,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
- MACvSetRFLE_LatchBase(dwIoBase);
+ MACvSetRFLE_LatchBase(iobase);
/* {{ RobertYu: 20050104 */
} else if (byRFType == RF_AIROHA7230) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
@@ -2162,16 +2157,15 @@ bool BBbVT3253Init(struct vnt_private *priv)
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
-
/* {{ RobertYu:20050223, request by JerryChung */
/* Init ANT B select,TX Config CR09 = 0x61->0x45,
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
/* Init ANT B select,RX Config CR10 = 0x28->0x2A,
* 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
+ /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
/* }} */
@@ -2259,7 +2253,7 @@ void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -2280,7 +2274,7 @@ BBvSoftwareReset(struct vnt_private *priv)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -2302,7 +2296,7 @@ BBvPowerSaveModeON(struct vnt_private *priv)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index b4e8c43180ec..8a567c9155b4 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: baseband.h
*
* Purpose: Implement functions to access baseband
@@ -60,12 +55,6 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
-#define BBvClearFOE(dwIoBase) \
- BBbWriteEmbedded(dwIoBase, 0xB1, 0)
-
-#define BBvSetFOE(dwIoBase) \
- BBbWriteEmbedded(dwIoBase, 0xB1, 0x0C)
-
unsigned int
BBuGetFrameTime(
unsigned char byPreambleType,
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index dbcea4434725..e0c92818ed70 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: card.c
* Purpose: Provide functions to setup NIC operation mode
* Functions:
@@ -36,7 +32,7 @@
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
- * 08-26-2003 Kyle Hsu: Modify the defination type of dwIoBase.
+ * 08-26-2003 Kyle Hsu: Modify the defination type of iobase.
* 09-01-2003 Bryan YC Fan: Add vUpdateIFS().
*
*/
@@ -261,7 +257,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
BBbWriteEmbedded(priv, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
- byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
+ byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
byCWMaxMin = 0xA5;
} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
MACvSetBBType(priv->PortOffset, BB_TYPE_11G);
@@ -289,7 +285,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT;
} else {
bySlot = C_SLOT_LONG;
- byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
+ byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
}
byCWMaxMin = 0xa4;
@@ -528,8 +524,11 @@ CARDvSafeResetTx(
struct vnt_tx_desc *pCurrTD;
/* initialize TD index */
- priv->apTailTD[0] = priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
- priv->apTailTD[1] = priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
+ priv->apTailTD[0] = &(priv->apTD0Rings[0]);
+ priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
+
+ priv->apTailTD[1] = &(priv->apTD1Rings[0]);
+ priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
for (uu = 0; uu < TYPE_MAXTD; uu++)
priv->iTDUsed[uu] = 0;
@@ -938,20 +937,20 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
*/
bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned char byData;
- MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
+ MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_TFTCTL, &byData);
+ VNSvInPortB(iobase + MAC_REG_TFTCTL, &byData);
if (!(byData & TFTCTL_TSFCNTRRD))
break;
}
if (ww == W_MAX_TIMEOUT)
return false;
- VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
- VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
+ VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
+ VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
return true;
}
@@ -989,7 +988,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
*
* Parameters:
* In:
- * dwIoBase - IO Base
+ * iobase - IO Base
* wBeaconInterval - Beacon Interval
* Out:
* none
@@ -999,16 +998,16 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
u64 qwNextTBTT = 0;
CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* Set NextTBTT */
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
- MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
+ MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
}
/*
@@ -1028,12 +1027,12 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
unsigned short wBeaconInterval)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
/* Set NextTBTT */
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwTSF);
- VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
- MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwTSF);
+ VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
+ MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF);
}
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 0203c7fd91a2..44420b5a445f 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: card.h
*
* Purpose: Provide functions to setup NIC operation mode
@@ -50,7 +46,7 @@
#define CB_MAX_CHANNEL_24G 14
#define CB_MAX_CHANNEL_5G 42
-#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
+#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G + CB_MAX_CHANNEL_5G)
typedef enum _CARD_PKT_TYPE {
PKT_TYPE_802_11_BCN,
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 029a8df4ca1c..ab89956511a0 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: channel.c
*
*/
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 2d613e7f169c..2621dfabff06 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: channel.h
*
*/
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 2d7f6ae89164..2fee6e759ad8 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: desc.h
*
* Purpose:The header file of descriptor
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 55405e058196..3ae40d846a09 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: device.h
*
* Purpose: MAC Data structure
@@ -283,12 +279,12 @@ struct vnt_private {
unsigned char byOFDMPwrG;
unsigned char byCurPwr;
char byCurPwrdBm;
- unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G+1];
- unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL+1];
- char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G+1];
- char abyOFDMDefaultPwr[CB_MAX_CHANNEL+1];
- char abyRegPwr[CB_MAX_CHANNEL+1];
- char abyLocalPwr[CB_MAX_CHANNEL+1];
+ unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G + 1];
+ unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL + 1];
+ char abyCCKDefaultPwr[CB_MAX_CHANNEL_24G + 1];
+ char abyOFDMDefaultPwr[CB_MAX_CHANNEL + 1];
+ char abyRegPwr[CB_MAX_CHANNEL + 1];
+ char abyLocalPwr[CB_MAX_CHANNEL + 1];
/* BaseBand Loopback Use */
unsigned char byBBCR4d;
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index b4c9547d3138..0298ea923f97 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: device_cfg.h
*
* Purpose: Driver configuration header
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index f109eeac358d..da0f71191009 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: device_main.c
*
* Purpose: driver entry for initial, open, close, tx and rx.
@@ -314,7 +310,7 @@ static void device_init_registers(struct vnt_private *priv)
SROMbyReadEmbedded(priv->PortOffset,
(unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
if (priv->abyCCKPwrTbl[ii + 1] == 0)
- priv->abyCCKPwrTbl[ii+1] = priv->byCCKPwr;
+ priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
priv->abyOFDMPwrTbl[ii + 1] =
SROMbyReadEmbedded(priv->PortOffset,
@@ -556,7 +552,7 @@ static void device_init_rd0_ring(struct vnt_private *priv)
if (!device_alloc_rx_buf(priv, desc))
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
- desc->next = &(priv->aRD0Ring[(i+1) % priv->opts.rx_descs0]);
+ desc->next = &(priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
@@ -1272,7 +1268,6 @@ static void vnt_remove_interface(struct ieee80211_hw *hw,
priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
}
-
static int vnt_config(struct ieee80211_hw *hw, u32 changed)
{
struct vnt_private *priv = hw->priv;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 700032e9c477..9b3fa779258a 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: dpc.c
*
* Purpose: handle dpc rx functions
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index e80b30816968..6e75fa9c5618 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: dpc.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index e161d5d9aebb..dad9e292d4da 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: key.c
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index d72719741a56..a5024611af60 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: key.h
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 8e13f7f41415..4aaa99bafcda 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: mac.c
*
* Purpose: MAC routines
@@ -147,7 +142,6 @@ void MACvSetShortRetryLimit(struct vnt_private *priv,
iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
}
-
/*
* Description:
* Set 802.11 Long Retry Limit
@@ -321,7 +315,7 @@ bool MACbSoftwareReset(struct vnt_private *priv)
*/
bool MACbSafeSoftwareReset(struct vnt_private *priv)
{
- unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
+ unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
bool bRetVal;
/* PATCH....
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 030f529c339b..33b758cb79d4 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: mac.h
*
* Purpose: MAC routines
@@ -554,341 +549,341 @@
/*--------------------- Export Macros ------------------------------*/
-#define MACvRegBitsOn(dwIoBase, byRegOfs, byBits) \
+#define MACvRegBitsOn(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(dwIoBase + byRegOfs, &byData); \
- VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \
+ VNSvInPortB(iobase + byRegOfs, &byData); \
+ VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
} while (0)
-#define MACvWordRegBitsOn(dwIoBase, byRegOfs, wBits) \
+#define MACvWordRegBitsOn(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(dwIoBase + byRegOfs, &wData); \
- VNSvOutPortW(dwIoBase + byRegOfs, wData | (wBits)); \
+ VNSvInPortW(iobase + byRegOfs, &wData); \
+ VNSvOutPortW(iobase + byRegOfs, wData | (wBits)); \
} while (0)
-#define MACvDWordRegBitsOn(dwIoBase, byRegOfs, dwBits) \
+#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + byRegOfs, &dwData); \
- VNSvOutPortD(dwIoBase + byRegOfs, dwData | (dwBits)); \
+ VNSvInPortD(iobase + byRegOfs, &dwData); \
+ VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits)); \
} while (0)
-#define MACvRegBitsOnEx(dwIoBase, byRegOfs, byMask, byBits) \
+#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(dwIoBase + byRegOfs, &byData); \
+ VNSvInPortB(iobase + byRegOfs, &byData); \
byData &= byMask; \
- VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits)); \
+ VNSvOutPortB(iobase + byRegOfs, byData | (byBits)); \
} while (0)
-#define MACvRegBitsOff(dwIoBase, byRegOfs, byBits) \
+#define MACvRegBitsOff(iobase, byRegOfs, byBits) \
do { \
unsigned char byData; \
- VNSvInPortB(dwIoBase + byRegOfs, &byData); \
- VNSvOutPortB(dwIoBase + byRegOfs, byData & ~(byBits)); \
+ VNSvInPortB(iobase + byRegOfs, &byData); \
+ VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits)); \
} while (0)
-#define MACvWordRegBitsOff(dwIoBase, byRegOfs, wBits) \
+#define MACvWordRegBitsOff(iobase, byRegOfs, wBits) \
do { \
unsigned short wData; \
- VNSvInPortW(dwIoBase + byRegOfs, &wData); \
- VNSvOutPortW(dwIoBase + byRegOfs, wData & ~(wBits)); \
+ VNSvInPortW(iobase + byRegOfs, &wData); \
+ VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits)); \
} while (0)
-#define MACvDWordRegBitsOff(dwIoBase, byRegOfs, dwBits) \
+#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + byRegOfs, &dwData); \
- VNSvOutPortD(dwIoBase + byRegOfs, dwData & ~(dwBits)); \
+ VNSvInPortD(iobase + byRegOfs, &dwData); \
+ VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits)); \
} while (0)
-#define MACvGetCurrRx0DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, \
+#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_RXDMAPTR0, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrRx1DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, \
+#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_RXDMAPTR1, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrTx0DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, \
+#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_TXDMAPTR0, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrAC0DescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, \
+#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_AC0DMAPTR, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrSyncDescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_SYNCDMAPTR, \
+#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR, \
(unsigned long *)pdwCurrDescAddr)
-#define MACvGetCurrATIMDescAddr(dwIoBase, pdwCurrDescAddr) \
- VNSvInPortD(dwIoBase + MAC_REG_ATIMDMAPTR, \
+#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr) \
+ VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR, \
(unsigned long *)pdwCurrDescAddr)
/* set the chip with current BCN tx descriptor address */
-#define MACvSetCurrBCNTxDescAddr(dwIoBase, dwCurrDescAddr) \
- VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, \
+#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr) \
+ VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR, \
dwCurrDescAddr)
/* set the chip with current BCN length */
-#define MACvSetCurrBCNLength(dwIoBase, wCurrBCNLength) \
- VNSvOutPortW(dwIoBase + MAC_REG_BCNDMACTL+2, \
+#define MACvSetCurrBCNLength(iobase, wCurrBCNLength) \
+ VNSvOutPortW(iobase + MAC_REG_BCNDMACTL+2, \
wCurrBCNLength)
-#define MACvReadBSSIDAddress(dwIoBase, pbyEtherAddr) \
+#define MACvReadBSSIDAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvInPortB(iobase + MAC_REG_BSSID0, \
(unsigned char *)pbyEtherAddr); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 1, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 1, \
pbyEtherAddr + 1); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 2, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 2, \
pbyEtherAddr + 2); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 3, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 3, \
pbyEtherAddr + 3); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 4, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 4, \
pbyEtherAddr + 4); \
- VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 5, \
+ VNSvInPortB(iobase + MAC_REG_BSSID0 + 5, \
pbyEtherAddr + 5); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvWriteBSSIDAddress(dwIoBase, pbyEtherAddr) \
+#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0, \
*(pbyEtherAddr)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 1, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1, \
*(pbyEtherAddr + 1)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 2, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2, \
*(pbyEtherAddr + 2)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 3, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3, \
*(pbyEtherAddr + 3)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 4, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4, \
*(pbyEtherAddr + 4)); \
- VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 5, \
+ VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5, \
*(pbyEtherAddr + 5)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvReadEtherAddress(dwIoBase, pbyEtherAddr) \
+#define MACvReadEtherAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvInPortB(iobase + MAC_REG_PAR0, \
(unsigned char *)pbyEtherAddr); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 1, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 1, \
pbyEtherAddr + 1); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 2, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 2, \
pbyEtherAddr + 2); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 3, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 3, \
pbyEtherAddr + 3); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 4, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 4, \
pbyEtherAddr + 4); \
- VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 5, \
+ VNSvInPortB(iobase + MAC_REG_PAR0 + 5, \
pbyEtherAddr + 5); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvWriteEtherAddress(dwIoBase, pbyEtherAddr) \
+#define MACvWriteEtherAddress(iobase, pbyEtherAddr) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0, \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvOutPortB(iobase + MAC_REG_PAR0, \
*pbyEtherAddr); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 1, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 1, \
*(pbyEtherAddr + 1)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 2, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 2, \
*(pbyEtherAddr + 2)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 3, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 3, \
*(pbyEtherAddr + 3)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 4, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 4, \
*(pbyEtherAddr + 4)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 5, \
+ VNSvOutPortB(iobase + MAC_REG_PAR0 + 5, \
*(pbyEtherAddr + 5)); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvClearISR(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_ISR, IMR_MASK_VALUE)
+#define MACvClearISR(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE)
-#define MACvStart(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_HOSTCR, \
+#define MACvStart(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_HOSTCR, \
(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON))
-#define MACvRx0PerPktMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKT)
+#define MACvRx0PerPktMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
-#define MACvRx0BufferFillMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
+#define MACvRx0BufferFillMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
-#define MACvRx1PerPktMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKT)
+#define MACvRx1PerPktMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
-#define MACvRx1BufferFillMode(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
+#define MACvRx1BufferFillMode(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
-#define MACvRxOn(dwIoBase) \
- MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON)
+#define MACvRxOn(iobase) \
+ MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON)
-#define MACvReceive0(dwIoBase) \
+#define MACvReceive0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
} while (0)
-#define MACvReceive1(dwIoBase) \
+#define MACvReceive1(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
} while (0)
-#define MACvTxOn(dwIoBase) \
- MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON)
+#define MACvTxOn(iobase) \
+ MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON)
-#define MACvTransmit0(dwIoBase) \
+#define MACvTransmit0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
} while (0)
-#define MACvTransmitAC0(dwIoBase) \
+#define MACvTransmitAC0(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitSYNC(dwIoBase) \
+#define MACvTransmitSYNC(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_SYNCDMACTL, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitATIM(dwIoBase) \
+#define MACvTransmitATIM(iobase) \
do { \
unsigned long dwData; \
- VNSvInPortD(dwIoBase + MAC_REG_ATIMDMACTL, &dwData); \
+ VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData); \
if (dwData & DMACTL_RUN) \
- VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
+ VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
else \
- VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
+ VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
} while (0)
-#define MACvTransmitBCN(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_BCNDMACTL, BEACON_READY)
+#define MACvTransmitBCN(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY)
-#define MACvClearStckDS(dwIoBase) \
+#define MACvClearStckDS(iobase) \
do { \
unsigned char byOrgValue; \
- VNSvInPortB(dwIoBase + MAC_REG_STICKHW, &byOrgValue); \
+ VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue); \
byOrgValue = byOrgValue & 0xFC; \
- VNSvOutPortB(dwIoBase + MAC_REG_STICKHW, byOrgValue); \
+ VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue); \
} while (0)
-#define MACvReadISR(dwIoBase, pdwValue) \
- VNSvInPortD(dwIoBase + MAC_REG_ISR, pdwValue)
+#define MACvReadISR(iobase, pdwValue) \
+ VNSvInPortD(iobase + MAC_REG_ISR, pdwValue)
-#define MACvWriteISR(dwIoBase, dwValue) \
- VNSvOutPortD(dwIoBase + MAC_REG_ISR, dwValue)
+#define MACvWriteISR(iobase, dwValue) \
+ VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
-#define MACvIntEnable(dwIoBase, dwMask) \
- VNSvOutPortD(dwIoBase + MAC_REG_IMR, dwMask)
+#define MACvIntEnable(iobase, dwMask) \
+ VNSvOutPortD(iobase + MAC_REG_IMR, dwMask)
-#define MACvIntDisable(dwIoBase) \
- VNSvOutPortD(dwIoBase + MAC_REG_IMR, 0)
+#define MACvIntDisable(iobase) \
+ VNSvOutPortD(iobase + MAC_REG_IMR, 0)
-#define MACvSelectPage0(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0)
+#define MACvSelectPage0(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0)
-#define MACvSelectPage1(dwIoBase) \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1)
+#define MACvSelectPage1(iobase) \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1)
-#define MACvReadMIBCounter(dwIoBase, pdwCounter) \
- VNSvInPortD(dwIoBase + MAC_REG_MIBCNTR, pdwCounter)
+#define MACvReadMIBCounter(iobase, pdwCounter) \
+ VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter)
-#define MACvPwrEvntDisable(dwIoBase) \
- VNSvOutPortW(dwIoBase + MAC_REG_WAKEUPEN0, 0x0000)
+#define MACvPwrEvntDisable(iobase) \
+ VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000)
-#define MACvEnableProtectMD(dwIoBase) \
+#define MACvEnableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_ProtectMd; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvDisableProtectMD(dwIoBase) \
+#define MACvDisableProtectMD(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvEnableBarkerPreambleMd(dwIoBase) \
+#define MACvEnableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue | EnCFG_BarkerPream; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvDisableBarkerPreambleMd(dwIoBase) \
+#define MACvDisableBarkerPreambleMd(iobase) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvSetBBType(dwIoBase, byTyp) \
+#define MACvSetBBType(iobase, byTyp) \
do { \
unsigned long dwOrgValue; \
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue); \
+ VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue); \
dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK; \
dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue); \
+ VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue); \
} while (0)
-#define MACvReadATIMW(dwIoBase, pwCounter) \
- VNSvInPortW(dwIoBase + MAC_REG_AIDATIM, pwCounter)
+#define MACvReadATIMW(iobase, pwCounter) \
+ VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter)
-#define MACvWriteATIMW(dwIoBase, wCounter) \
- VNSvOutPortW(dwIoBase + MAC_REG_AIDATIM, wCounter)
+#define MACvWriteATIMW(iobase, wCounter) \
+ VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter)
-#define MACvWriteCRC16_128(dwIoBase, byRegOfs, wCRC) \
+#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC) \
do { \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1); \
- VNSvOutPortW(dwIoBase + byRegOfs, wCRC); \
- VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1); \
+ VNSvOutPortW(iobase + byRegOfs, wCRC); \
+ VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0); \
} while (0)
-#define MACvGPIOIn(dwIoBase, pbyValue) \
- VNSvInPortB(dwIoBase + MAC_REG_GPIOCTL1, pbyValue)
+#define MACvGPIOIn(iobase, pbyValue) \
+ VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue)
-#define MACvSetRFLE_LatchBase(dwIoBase) \
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+#define MACvSetRFLE_LatchBase(iobase) \
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs,
unsigned char byTestBits);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 7d6e7464ae51..716d2a80f840 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: power.c
*
* Purpose: Handles 802.11 power management functions
@@ -133,7 +128,6 @@ PSvDisablePowerSaving(
priv->bPWBitOn = false;
}
-
/*
*
* Routine Description:
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index d82dd8d6d68b..dfcb0ca8b448 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: power.h
*
* Purpose: Handles 802.11 power management functions
@@ -46,7 +42,6 @@ PSvEnablePowerSaving(
unsigned short wListenInterval
);
-
bool
PSbIsNextTBTTWakeUp(
struct vnt_private *
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 447882c7a6be..edf7db9d53b3 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: rf.c
*
* Purpose: rf function code
@@ -50,359 +45,362 @@
#define AL7230_PWR_IDX_LEN 64
static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
+ 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x01A00200 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00FFF300 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0F4DC500 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0805B600 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0146C700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00068800 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0403B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00DBBA00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0BDFFC00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00000D00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00580F00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
};
static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */
+ 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x03F7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x03E7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x06666100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
- 0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04043900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04044900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04045900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04046900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04047900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04048900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04049900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0404F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04050900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04051900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04052900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04053900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04054900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04055900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04056900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04057900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04058900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04059900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0405F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04060900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04061900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04062900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04063900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04064900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04065900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04066900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04067900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04068900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04069900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0406F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04070900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04071900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04072900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04073900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04074900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04075900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04076900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04077900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04078900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x04079900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
+ 0x04040900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04041900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04042900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04043900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04044900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04045900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04046900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04047900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04048900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04049900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0404F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04050900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04051900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04052900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04053900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04054900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04055900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04056900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04057900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04058900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04059900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0405F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04060900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04061900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04062900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04063900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04064900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04065900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04066900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04067900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04068900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04069900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0406F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04070900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04071900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04072900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04073900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04074900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04075900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04076900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04077900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04078900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x04079900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x0407F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
};
/* 40MHz reference frequency
* Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
*/
static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
- 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
- 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11b/g // Need modify for 11a */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+ 0x841FF200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
+ 0x3FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11b/g // Need modify for 11a */
/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
- 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
- 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 860207 */
- 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: E0600A */
- 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+ 0x802B5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
+ 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 860207 */
+ 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xE0000A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: E0600A */
+ 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
- 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 00143C */
- 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11a: 12BACF */
+ 0x000A3C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 00143C */
+ 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x1ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11a: 12BACF */
};
static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
- 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11a // Need modify for 11b/g */
- 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
- 0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
- 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
- 0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11b/g */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+ 0x451FE200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x5FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x67F78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11a // Need modify for 11b/g */
+ 0x853F5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
+ 0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0xE0600A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+ 0x00147C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+ 0x12BACF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11b/g */
};
static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
- 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
- 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
- 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
-
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
- 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
- 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
- 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
- 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
-
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
- 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
- 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
- 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
- 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
+ */
+
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x0FF55000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x0FF59000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+
+ 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x06666100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
- 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
- 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
- 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+ 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
- 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
- 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
- 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
- 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
- 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
- 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
- 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
- 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
- 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
+ */
+ 0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x10000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x1AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
+ */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
/*
@@ -410,7 +408,7 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -419,16 +417,16 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
*/
static bool s_bAL7230Init(struct vnt_private *priv)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
int ii;
bool ret;
ret = true;
/* 3-wire control for normal mode */
- VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
+ VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
@@ -436,20 +434,20 @@ static bool s_bAL7230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
/* PLL On */
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* Calibration */
MACvTimer0MicroSDelay(priv, 150);/* 150us */
/* TXDCOC:active, RCK:disable */
- ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:active */
- ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
@@ -458,7 +456,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
/* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
/* 3-wire control for power saving mode */
- VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+ VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
return ret;
}
@@ -468,26 +466,26 @@ static bool s_bAL7230Init(struct vnt_private *priv)
*/
static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
bool ret;
ret = true;
/* PLLON Off */
- MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
/* PLLOn On */
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
return ret;
}
@@ -497,7 +495,7 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* dwData - data to write
* Out:
* none
@@ -507,15 +505,15 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
*/
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
unsigned short ww;
unsigned long dwValue;
- VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData);
+ VNSvOutPortD(iobase + MAC_REG_IFREGCTL, dwData);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue);
+ VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue);
if (dwValue & IFREGCTL_DONE)
break;
}
@@ -531,7 +529,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* none
*
@@ -540,51 +538,51 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
*/
static bool RFbAL2230Init(struct vnt_private *priv)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
int ii;
bool ret;
ret = true;
/* 3-wire control for normal mode */
- VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
+ VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
/* PLL Off */
- MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* patch abnormal AL2230 frequency output */
- IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
/* PLL On */
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
MACvTimer0MicroSDelay(priv, 150);/* 150us */
- ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
- ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
- MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
+ MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
/* 3-wire control for power saving mode */
- VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+ VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
return ret;
}
static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
bool ret;
ret = true;
@@ -593,10 +591,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
- VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
+ VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
return ret;
}
@@ -681,7 +679,7 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* uChannel - channel number
* bySleepCnt - SleepProgSyn count
*
@@ -691,12 +689,12 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
u16 uChannel)
{
- void __iomem *dwIoBase = priv->PortOffset;
+ void __iomem *iobase = priv->PortOffset;
int ii;
unsigned char byInitCount = 0;
unsigned char bySleepCount = 0;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0);
+ VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
switch (byRFType) {
case RF_AIROHA:
case RF_AL2230S:
@@ -758,7 +756,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* dwRFPowerTable - RF Tx Power Setting
* Out:
* none
@@ -830,7 +828,7 @@ bool RFbSetPower(
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* dwRFPowerTable - RF Tx Power Setting
* Out:
* none
@@ -855,20 +853,20 @@ bool RFbRawSetPower(
case RF_AIROHA:
ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (rate <= RATE_11M)
- ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0001B400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
else
- ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
break;
case RF_AL2230S:
ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (rate <= RATE_11M) {
- ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x040C1400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00299B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
} else {
- ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
}
break;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index e9c786995506..b6e853784a26 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: rf.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 7e69bc99d60f..3efe19a1b13f 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: rxtx.c
*
* Purpose: handle WMAC/802.3/802.11 rx & tx functions
@@ -1086,8 +1082,8 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
}
/*
- * Use for AUTO FALL BACK
- */
+ * Use for AUTO FALL BACK
+ */
if (fifo_ctl & FIFOCTL_AUTO_FB_0)
byFBOption = AUTO_FB_0;
else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 1e30ecb5c63c..89de67115826 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: rxtx.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index ee992772066f..635f271595f6 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: srom.c
*
* Purpose:Implement functions to access eeprom
@@ -64,7 +60,7 @@
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* byContntOffset - address of EEPROM
* Out:
* none
@@ -72,7 +68,7 @@
* Return Value: data read
*
*/
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+unsigned char SROMbyReadEmbedded(void __iomem *iobase,
unsigned char byContntOffset)
{
unsigned short wDelay, wNoACK;
@@ -81,18 +77,18 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
unsigned char byOrg;
byData = 0xFF;
- VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
+ VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg);
/* turn off hardware retry for getting NACK */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
+ VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset);
+ VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
+ VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset);
/* issue read command */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMR);
+ VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR);
/* wait DONE be set */
for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
- VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
+ VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait);
if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
break;
PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
@@ -102,8 +98,8 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
break;
}
}
- VNSvInPortB(dwIoBase + MAC_REG_I2MDIPT, &byData);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
+ VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData);
+ VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg);
return byData;
}
@@ -112,20 +108,20 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* pbyEepromRegs - EEPROM content Buffer
*
* Return Value: none
*
*/
-void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
+void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs)
{
int ii;
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,
+ *pbyEepromRegs = SROMbyReadEmbedded(iobase,
(unsigned char)ii);
pbyEepromRegs++;
}
@@ -136,21 +132,21 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
*
* Parameters:
* In:
- * dwIoBase - I/O base address
+ * iobase - I/O base address
* Out:
* pbyEtherAddress - Ethernet Address buffer
*
* Return Value: none
*
*/
-void SROMvReadEtherAddress(void __iomem *dwIoBase,
+void SROMvReadEtherAddress(void __iomem *iobase,
unsigned char *pbyEtherAddress)
{
unsigned char ii;
/* ii = Rom Address */
for (ii = 0; ii < ETH_ALEN; ii++) {
- *pbyEtherAddress = SROMbyReadEmbedded(dwIoBase, ii);
+ *pbyEtherAddress = SROMbyReadEmbedded(iobase, ii);
pbyEtherAddress++;
}
}
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 531bf0069373..6e03ab6dfa9d 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -12,11 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
* File: srom.h
*
* Purpose: Implement functions to access eeprom
@@ -90,12 +85,12 @@
/*--------------------- Export Functions --------------------------*/
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+unsigned char SROMbyReadEmbedded(void __iomem *iobase,
unsigned char byContntOffset);
-void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs);
+void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs);
-void SROMvReadEtherAddress(void __iomem *dwIoBase,
+void SROMvReadEtherAddress(void __iomem *iobase,
unsigned char *pbyEtherAddress);
#endif /* __EEPROM_H__*/
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index 597efefc017f..d6a0563ad55c 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: tmacro.h
*
* Purpose: define basic common types and macros
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index 85fe0464cfb3..9806b5989014 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* File: upc.h
*
* Purpose: Macros to access device
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 7cc13874f8f1..fe1c25c64cca 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -86,15 +86,15 @@ struct vnt_phy_field {
unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
unsigned int frame_length, u16 tx_rate);
-void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
-
-void vnt_set_short_slot_time(struct vnt_private *);
-void vnt_set_vga_gain_offset(struct vnt_private *, u8);
-void vnt_set_antenna_mode(struct vnt_private *, u8);
-int vnt_vt3184_init(struct vnt_private *);
-void vnt_set_deep_sleep(struct vnt_private *);
-void vnt_exit_deep_sleep(struct vnt_private *);
-void vnt_update_pre_ed_threshold(struct vnt_private *, int scanning);
+void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
+ u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
+
+void vnt_set_short_slot_time(struct vnt_private *priv);
+void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
+void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode);
+int vnt_vt3184_init(struct vnt_private *priv);
+void vnt_set_deep_sleep(struct vnt_private *priv);
+void vnt_exit_deep_sleep(struct vnt_private *priv);
+void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 53b469c71dc2..0e5a99375099 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -501,16 +501,7 @@ u8 vnt_get_pkt_type(struct vnt_private *priv)
*/
u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
{
- u64 tsf_offset = 0;
- u16 rx_bcn_offset;
-
- rx_bcn_offset = cw_rxbcntsf_off[rx_rate % MAX_RATE];
-
- tsf2 += (u64)rx_bcn_offset;
-
- tsf_offset = tsf1 - tsf2;
-
- return tsf_offset;
+ return tsf1 - tsf2 - (u64)cw_rxbcntsf_off[rx_rate % MAX_RATE];
}
/*
@@ -610,8 +601,8 @@ u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval)
beacon_int = beacon_interval * 1024;
/* Next TBTT =
- * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
- */
+ * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
+ */
if (beacon_int) {
do_div(tsf, beacon_int);
tsf += 1;
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index eeed16e9124e..611da4929ddc 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -121,7 +121,7 @@ void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
u16 offset;
offset = MISCFIFO_KEYETRY0;
- offset += (entry_idx * MISCFIFO_KEYENTRYSIZE);
+ offset += entry_idx * MISCFIFO_KEYENTRYSIZE;
set_key.u.write.key_ctl = cpu_to_le16(key_ctl);
ether_addr_copy(set_key.u.write.addr, addr);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 0594828bdabf..50d02d9aa535 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers");
* Static vars definitions
*/
-static struct usb_device_id vt6656_table[] = {
+static const struct usb_device_id vt6656_table[] = {
{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
{}
};
@@ -326,9 +326,9 @@ static int vnt_init_registers(struct vnt_private *priv)
priv->current_net_addr);
/*
- * set BB and packet type at the same time
- * set Short Slot Time, xIFS, and RSPINF
- */
+ * set BB and packet type at the same time
+ * set Short Slot Time, xIFS, and RSPINF
+ */
if (priv->bb_type == BB_TYPE_11A)
priv->short_slot_time = true;
else
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 79a3108719a6..6101a35582b6 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -730,9 +730,9 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
return false;
/*
- * 0x080F1B00 for 3 wire control TxGain(D10)
- * and 0x31 as TX Gain value
- */
+ * 0x080F1B00 for 3 wire control TxGain(D10)
+ * and 0x31 as TX Gain value
+ */
power_setting = 0x080c0b00 | (power << 12);
ret &= vnt_rf_write_embedded(priv, power_setting);
@@ -800,8 +800,8 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
/* Convert rssi to dbm */
void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
{
- u8 idx = (((rssi & 0xc0) >> 6) & 0x03);
- long b = (rssi & 0x3f);
+ u8 idx = ((rssi & 0xc0) >> 6) & 0x03;
+ long b = rssi & 0x3f;
long a = 0;
u8 airoharf[4] = {0, 18, 0, 40};
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 4b51c0ac27ac..622994795222 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -224,9 +224,7 @@ static inline u16 get_asoc_status(u8 *data)
u16 asoc_status;
asoc_status = data[3];
- asoc_status = (asoc_status << 8) | data[2];
-
- return asoc_status;
+ return (asoc_status << 8) | data[2];
}
static inline u16 get_asoc_id(u8 *data)
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6ab7443eabde..b00ea75524e4 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1722,10 +1722,8 @@ _WPAPtk_end_case_:
case PMKSA:
pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL);
- if (!pu8keybuf) {
- netdev_err(vif->ndev, "No buffer to send PMKSA Key\n");
+ if (!pu8keybuf)
return -ENOMEM;
- }
pu8keybuf[0] = pstrHostIFkeyAttr->attr.pmkid.numpmkid;
@@ -1932,7 +1930,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
wid.val = kmalloc(wid.size, GFP_KERNEL);
stamac = wid.val;
- memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
+ ether_addr_copy(stamac, strHostIfStaInactiveT->mac);
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
@@ -2168,7 +2166,7 @@ static void Handle_DelStation(struct wilc_vif *vif,
pu8CurrByte = wid.val;
- memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN);
+ ether_addr_copy(pu8CurrByte, pstrDelStaParam->mac_addr);
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
@@ -2322,10 +2320,8 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif,
wid.size = 2;
wid.val = kmalloc(wid.size, GFP_KERNEL);
- if (!wid.val) {
- netdev_err(vif->ndev, "Failed to allocate memory\n");
+ if (!wid.val)
return -ENOMEM;
- }
wid.val[0] = u8remain_on_chan_flag;
wid.val[1] = FALSE_FRMWR_CHANNEL;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index ddfea29df2a7..f36d3b5a0370 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -367,7 +367,6 @@ extern u8 wilc_connected_ssid[6];
extern u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
extern int wilc_connecting;
-extern u8 wilc_initialized;
extern struct timer_list wilc_during_ip_timer;
#endif
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 242f82f4d24f..f328d75de0d1 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -111,7 +111,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
}
skb->dev = wilc_wfi_mon;
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
@@ -215,7 +215,7 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
cb_hdr->tx_flags = 0x0004;
skb2->dev = wilc_wfi_mon;
- skb_set_mac_header(skb2, 0);
+ skb_reset_mac_header(skb2);
skb2->ip_summed = CHECKSUM_UNNECESSARY;
skb2->pkt_type = PACKET_OTHERHOST;
skb2->protocol = htons(ETH_P_802_2);
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 6370a5efe343..3775706578b2 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -37,6 +37,8 @@ static void linux_wlan_tx_complete(void *priv, int status);
static int mac_init_fn(struct net_device *ndev);
static struct net_device_stats *mac_stats(struct net_device *dev);
static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd);
+static int wilc_mac_open(struct net_device *ndev);
+static int wilc_mac_close(struct net_device *ndev);
static void wilc_set_multicast_list(struct net_device *dev);
bool wilc_enable_ps = true;
@@ -218,17 +220,6 @@ static void deinit_irq(struct net_device *dev)
}
}
-int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout)
-{
- /* FIXME: replace with mutex_lock or wait_for_completion */
- int error = -1;
-
- if (vp)
- error = down_timeout(vp,
- msecs_to_jiffies(timeout));
- return error;
-}
-
void wilc_mac_indicate(struct wilc *wilc, int flag)
{
int status;
@@ -269,23 +260,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
{
- int i = 0;
- int ret = -1;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(wilc_netdev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(wilc_netdev);
- for (i = 0; i < wilc->vif_num; i++)
- if (wilc->vif[i]->ndev == wilc_netdev) {
- memcpy(wilc->vif[i]->bssid, bssid, 6);
- wilc->vif[i]->mode = mode;
- ret = 0;
- break;
- }
+ memcpy(vif->bssid, bssid, 6);
+ vif->mode = mode;
- return ret;
+ return 0;
}
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -847,7 +827,7 @@ static int mac_init_fn(struct net_device *ndev)
return 0;
}
-int wilc_mac_open(struct net_device *ndev)
+static int wilc_mac_open(struct net_device *ndev)
{
struct wilc_vif *vif;
@@ -1038,7 +1018,7 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
-int wilc_mac_close(struct net_device *ndev)
+static int wilc_mac_close(struct net_device *ndev)
{
struct wilc_priv *priv;
struct wilc_vif *vif;
@@ -1212,16 +1192,11 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
void wilc_netdev_cleanup(struct wilc *wilc)
{
- int i = 0;
- struct wilc_vif *vif[NUM_CONCURRENT_IFC];
+ int i;
- if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
+ if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev))
unregister_inetaddr_notifier(&g_dev_notifier);
- for (i = 0; i < NUM_CONCURRENT_IFC; i++)
- vif[i] = netdev_priv(wilc->vif[i]->ndev);
- }
-
if (wilc && wilc->firmware) {
release_firmware(wilc->firmware);
wilc->firmware = NULL;
@@ -1230,7 +1205,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
for (i = 0; i < NUM_CONCURRENT_IFC; i++)
if (wilc->vif[i]->ndev)
- if (vif[i]->mac_opened)
+ if (wilc->vif[i]->mac_opened)
wilc_mac_close(wilc->vif[i]->ndev);
for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
@@ -1278,9 +1253,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
vif->idx = wl->vif_num;
vif->wilc = *wilc;
+ vif->ndev = ndev;
wl->vif[i] = vif;
- wl->vif[wl->vif_num]->ndev = ndev;
- wl->vif_num++;
+ wl->vif_num = i;
ndev->netdev_ops = &wilc_netdev_ops;
{
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 802bb1d5e207..07260c497db4 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -62,16 +62,16 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
return ret;
if (flag > DBG_LEVEL_ALL) {
- printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
+ pr_info("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
return -EINVAL;
}
atomic_set(&WILC_DEBUG_LEVEL, (int)flag);
if (flag == 0)
- printk(KERN_INFO "Debug-level disabled\n");
+ pr_info("Debug-level disabled\n");
else
- printk(KERN_INFO "Debug-level enabled\n");
+ pr_info("Debug-level enabled\n");
return count;
}
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 39b73fb27398..3ad7cec4662d 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -39,6 +39,7 @@ struct wilc_sdio {
};
static struct wilc_sdio g_sdio;
+static const struct wilc_hif_func wilc_hif_sdio;
static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data);
static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data);
@@ -1100,7 +1101,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
*
********************************************/
-const struct wilc_hif_func wilc_hif_sdio = {
+static const struct wilc_hif_func wilc_hif_sdio = {
.hif_init = sdio_init,
.hif_deinit = sdio_deinit,
.hif_read_reg = sdio_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index f08cf6d9e1af..55d53c3a95df 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -30,6 +30,7 @@ struct wilc_spi {
};
static struct wilc_spi g_spi;
+static const struct wilc_hif_func wilc_hif_spi;
static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32);
static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32);
@@ -858,7 +859,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
/* the SPI to it's initial value. */
if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
/* Read failed. Try with CRC off. This might happen when module
- * is removed but chip isn't reset*/
+ * is removed but chip isn't reset
+ */
g_spi.crc_off = 1;
dev_err(&spi->dev, "Failed internal read protocol with CRC on, retrying with CRC off...\n");
if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
@@ -1133,7 +1135,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
* Global spi HIF function table
*
********************************************/
-const struct wilc_hif_func wilc_hif_spi = {
+static const struct wilc_hif_func wilc_hif_spi = {
.hif_init = wilc_spi_init,
.hif_deinit = _wilc_spi_deinit,
.hif_read_reg = wilc_spi_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 60d8b055bb2f..c1a24f7bc85f 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -90,17 +90,12 @@ static const struct wiphy_wowlan_support wowlan_support = {
#define IS_MGMT_STATUS_SUCCES 0x040
#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
-extern int wilc_mac_open(struct net_device *ndev);
-extern int wilc_mac_close(struct net_device *ndev);
-
static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
static u32 last_scanned_cnt;
struct timer_list wilc_during_ip_timer;
static struct timer_list hAgingTimer;
static u8 op_ifcs;
-u8 wilc_initialized = 1;
-
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
@@ -1193,6 +1188,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
u32 i = 0;
u32 associatedsta = ~0;
u32 inactive_time = 0;
+
priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
@@ -1590,28 +1586,25 @@ static int remain_on_channel(struct wiphy *wiphy,
priv->strRemainOnChanParams.u32ListenDuration = duration;
priv->strRemainOnChanParams.u32ListenSessionID++;
- s32Error = wilc_remain_on_channel(vif,
+ return wilc_remain_on_channel(vif,
priv->strRemainOnChanParams.u32ListenSessionID,
duration, chan->hw_value,
WILC_WFI_RemainOnChannelExpired,
WILC_WFI_RemainOnChannelReady, (void *)priv);
-
- return s32Error;
}
static int cancel_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie)
{
- s32 s32Error = 0;
struct wilc_priv *priv;
struct wilc_vif *vif;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID);
- return s32Error;
+ return wilc_listen_state_expired(vif,
+ priv->strRemainOnChanParams.u32ListenSessionID);
}
static int mgmt_tx(struct wiphy *wiphy,
@@ -1935,12 +1928,10 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE);
wilc_set_power_mgmt(vif, 0, 0);
- s32Error = wilc_add_beacon(vif, settings->beacon_interval,
+ return wilc_add_beacon(vif, settings->beacon_interval,
settings->dtim_period, beacon->head_len,
(u8 *)beacon->head, beacon->tail_len,
(u8 *)beacon->tail);
-
- return s32Error;
}
static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -1948,16 +1939,13 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
{
struct wilc_priv *priv;
struct wilc_vif *vif;
- s32 s32Error = 0;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len,
+ return wilc_add_beacon(vif, 0, 0, beacon->head_len,
(u8 *)beacon->head, beacon->tail_len,
(u8 *)beacon->tail);
-
- return s32Error;
}
static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index ec6b1674cf38..d431673bc46c 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -225,7 +225,6 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif);
void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc, int flag);
-int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout);
void wilc_netdev_cleanup(struct wilc *wilc);
int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio,
const struct wilc_hif_func *ops);
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index de6c4ddbf45a..11365efcc5d0 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -248,9 +248,6 @@ struct wilc_hif_func {
void (*disable_interrupt)(struct wilc *nic);
};
-extern const struct wilc_hif_func wilc_hif_spi;
-extern const struct wilc_hif_func wilc_hif_sdio;
-
/********************************************
*
* Configuration Structure
@@ -297,9 +294,6 @@ void wilc_enable_tcp_ack_filter(bool value);
int wilc_wlan_get_num_conn_ifcs(struct wilc *);
int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
-int wilc_mac_open(struct net_device *ndev);
-int wilc_mac_close(struct net_device *ndev);
-
void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
void host_wakeup_notify(struct wilc *wilc);
void host_sleep_notify(struct wilc *wilc);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 182b2d564627..aa0e5a3d4a89 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -323,7 +323,7 @@ static int prism2_scan(struct wiphy *wiphy,
priv->scan_request = request;
- memset(&msg1, 0x00, sizeof(struct p80211msg_dot11req_scan));
+ memset(&msg1, 0x00, sizeof(msg1));
msg1.msgcode = DIDmsg_dot11req_scan;
msg1.bsstype.data = P80211ENUM_bsstype_any;
@@ -375,13 +375,13 @@ static int prism2_scan(struct wiphy *wiphy,
ie_buf[0] = WLAN_EID_SSID;
ie_buf[1] = msg2.ssid.data.len;
ie_len = ie_buf[1] + 2;
- memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
+ memcpy(&ie_buf[2], &msg2.ssid.data.data, msg2.ssid.data.len);
freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
NL80211_BAND_2GHZ);
bss = cfg80211_inform_bss(wiphy,
ieee80211_get_channel(wiphy, freq),
CFG80211_BSS_FTYPE_UNKNOWN,
- (const u8 *)&(msg2.bssid.data.data),
+ (const u8 *)&msg2.bssid.data.data,
msg2.timestamp.data, msg2.capinfo.data,
msg2.beaconperiod.data,
ie_buf,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 43c299c3b631..60caf9c37727 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -137,21 +137,11 @@
#define HFA384x_DLSTATE_FLASHENABLED 2
/*--- Register Field Masks --------------------------*/
-#define HFA384x_CMD_AINFO ((u16)(BIT(14) | BIT(13) \
- | BIT(12) | BIT(11) \
- | BIT(10) | BIT(9) \
- | BIT(8)))
-#define HFA384x_CMD_MACPORT ((u16)(BIT(10) | BIT(9) | \
- BIT(8)))
-#define HFA384x_CMD_PROGMODE ((u16)(BIT(9) | BIT(8)))
-#define HFA384x_CMD_CMDCODE ((u16)(BIT(5) | BIT(4) | \
- BIT(3) | BIT(2) | \
- BIT(1) | BIT(0)))
-
-#define HFA384x_STATUS_RESULT ((u16)(BIT(14) | BIT(13) \
- | BIT(12) | BIT(11) \
- | BIT(10) | BIT(9) \
- | BIT(8)))
+#define HFA384x_CMD_AINFO ((u16)GENMASK(14, 8))
+#define HFA384x_CMD_MACPORT ((u16)GENMASK(10, 8))
+#define HFA384x_CMD_PROGMODE ((u16)GENMASK(9, 8))
+#define HFA384x_CMD_CMDCODE ((u16)GENMASK(5, 0))
+#define HFA384x_STATUS_RESULT ((u16)GENMASK(14, 8))
/*--- Command Code Constants --------------------------*/
/*--- Controller Commands --------------------------*/
@@ -266,7 +256,7 @@
#define HFA384x_RID_DBMCOMMSQUALITY_LEN \
((u16)sizeof(struct hfa384x_dbmcommsquality))
#define HFA384x_RID_JOINREQUEST_LEN \
- ((u16)sizeof(struct hfa384x_JoinRequest_data))
+ ((u16)sizeof(struct hfa384x_join_request_data))
/*--------------------------------------------------------------------
* Information RIDs: Modem Information
@@ -286,7 +276,7 @@
#define HFA384x_RID_CNFWEPFLAGS ((u16)0xFC28)
#define HFA384x_RID_CNFAUTHENTICATION ((u16)0xFC2A)
#define HFA384x_RID_CNFROAMINGMODE ((u16)0xFC2D)
-#define HFA384x_RID_CNFAPBCNint ((u16)0xFC33)
+#define HFA384x_RID_CNFAPBCNINT ((u16)0xFC33)
#define HFA384x_RID_CNFDBMADJUST ((u16)0xFC46)
#define HFA384x_RID_CNFWPADATA ((u16)0xFC48)
#define HFA384x_RID_CNFBASICRATES ((u16)0xFCB3)
@@ -408,27 +398,27 @@ struct hfa384x_caplevel {
#define HFA384x_CREATEIBSS_JOINCREATEIBSS 0
/*-- Configuration Record: HostScanRequest (data portion only) --*/
-struct hfa384x_HostScanRequest_data {
- u16 channelList;
- u16 txRate;
+struct hfa384x_host_scan_request_data {
+ u16 channel_list;
+ u16 tx_rate;
struct hfa384x_bytestr32 ssid;
} __packed;
/*-- Configuration Record: JoinRequest (data portion only) --*/
-struct hfa384x_JoinRequest_data {
+struct hfa384x_join_request_data {
u8 bssid[WLAN_BSSID_LEN];
u16 channel;
} __packed;
/*-- Configuration Record: authenticateStation (data portion only) --*/
-struct hfa384x_authenticateStation_data {
+struct hfa384x_authenticate_station_data {
u8 address[ETH_ALEN];
u16 status;
u16 algorithm;
} __packed;
/*-- Configuration Record: WPAData (data portion only) --*/
-struct hfa384x_WPAData {
+struct hfa384x_wpa_data {
u16 datalen;
u8 data[0]; /* max 80 */
} __packed;
@@ -455,16 +445,16 @@ struct hfa384x_downloadbuffer {
/*-- Information Record: commsquality --*/
struct hfa384x_commsquality {
- u16 CQ_currBSS;
- u16 ASL_currBSS;
- u16 ANL_currFC;
+ u16 cq_curr_bss;
+ u16 asl_curr_bss;
+ u16 anl_curr_fc;
} __packed;
/*-- Information Record: dmbcommsquality --*/
struct hfa384x_dbmcommsquality {
- u16 CQdbm_currBSS;
- u16 ASLdbm_currBSS;
- u16 ANLdbm_currFC;
+ u16 cq_dbm_curr_bss;
+ u16 asl_dbm_curr_bss;
+ u16 anl_dbm_curr_fc;
} __packed;
/*--------------------------------------------------------------------
@@ -511,9 +501,8 @@ struct hfa384x_tx_frame {
#define HFA384x_TXSTATUS_AGEDERR ((u16)BIT(1))
#define HFA384x_TXSTATUS_RETRYERR ((u16)BIT(0))
/*-- Transmit Control Field --*/
-#define HFA384x_TX_MACPORT ((u16)(BIT(10) | \
- BIT(9) | BIT(8)))
-#define HFA384x_TX_STRUCTYPE ((u16)(BIT(4) | BIT(3)))
+#define HFA384x_TX_MACPORT ((u16)GENMASK(10, 8))
+#define HFA384x_TX_STRUCTYPE ((u16)GENMASK(4, 3))
#define HFA384x_TX_TXEX ((u16)BIT(2))
#define HFA384x_TX_TXOK ((u16)BIT(1))
/*--------------------------------------------------------------------
@@ -571,9 +560,7 @@ struct hfa384x_rx_frame {
*/
/*-- Status Fields --*/
-#define HFA384x_RXSTATUS_MACPORT ((u16)(BIT(10) | \
- BIT(9) | \
- BIT(8)))
+#define HFA384x_RXSTATUS_MACPORT ((u16)GENMASK(10, 8))
#define HFA384x_RXSTATUS_FCSERR ((u16)BIT(0))
/*--------------------------------------------------------------------
* Communication Frames: Test/Get/Set Field Values for Receive Frames
@@ -610,7 +597,7 @@ struct hfa384x_rx_frame {
*/
/*-- Inquiry Frame, Diagnose: Communication Tallies --*/
-struct hfa384x_CommTallies16 {
+struct hfa384x_comm_tallies_16 {
u16 txunicastframes;
u16 txmulticastframes;
u16 txfragments;
@@ -634,7 +621,7 @@ struct hfa384x_CommTallies16 {
u16 rxmsginbadmsgfrag;
} __packed;
-struct hfa384x_CommTallies32 {
+struct hfa384x_comm_tallies_32 {
u32 txunicastframes;
u32 txmulticastframes;
u32 txfragments;
@@ -659,7 +646,7 @@ struct hfa384x_CommTallies32 {
} __packed;
/*-- Inquiry Frame, Diagnose: Scan Results & Subfields--*/
-struct hfa384x_ScanResultSub {
+struct hfa384x_scan_result_sub {
u16 chid;
u16 anl;
u16 sl;
@@ -671,14 +658,14 @@ struct hfa384x_ScanResultSub {
u16 proberesp_rate;
} __packed;
-struct hfa384x_ScanResult {
+struct hfa384x_scan_result {
u16 rsvd;
u16 scanreason;
- struct hfa384x_ScanResultSub result[HFA384x_SCANRESULT_MAX];
+ struct hfa384x_scan_result_sub result[HFA384x_SCANRESULT_MAX];
} __packed;
/*-- Inquiry Frame, Diagnose: ChInfo Results & Subfields--*/
-struct hfa384x_ChInfoResultSub {
+struct hfa384x_ch_info_result_sub {
u16 chid;
u16 anl;
u16 pnl;
@@ -688,13 +675,13 @@ struct hfa384x_ChInfoResultSub {
#define HFA384x_CHINFORESULT_BSSACTIVE BIT(0)
#define HFA384x_CHINFORESULT_PCFACTIVE BIT(1)
-struct hfa384x_ChInfoResult {
+struct hfa384x_ch_info_result {
u16 scanchannels;
- struct hfa384x_ChInfoResultSub result[HFA384x_CHINFORESULT_MAX];
+ struct hfa384x_ch_info_result_sub result[HFA384x_CHINFORESULT_MAX];
} __packed;
/*-- Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/
-struct hfa384x_HScanResultSub {
+struct hfa384x_hscan_result_sub {
u16 chid;
u16 anl;
u16 sl;
@@ -707,10 +694,10 @@ struct hfa384x_HScanResultSub {
u16 atim;
} __packed;
-struct hfa384x_HScanResult {
+struct hfa384x_hscan_result {
u16 nresult;
u16 rsvd;
- struct hfa384x_HScanResultSub result[HFA384x_HSCANRESULT_MAX];
+ struct hfa384x_hscan_result_sub result[HFA384x_HSCANRESULT_MAX];
} __packed;
/*-- Unsolicited Frame, MAC Mgmt: LinkStatus --*/
@@ -723,7 +710,7 @@ struct hfa384x_HScanResult {
#define HFA384x_LINK_AP_INRANGE ((u16)5)
#define HFA384x_LINK_ASSOCFAIL ((u16)6)
-struct hfa384x_LinkStatus {
+struct hfa384x_link_status {
u16 linkstatus;
} __packed;
@@ -733,7 +720,7 @@ struct hfa384x_LinkStatus {
#define HFA384x_ASSOCSTATUS_REASSOC ((u16)2)
#define HFA384x_ASSOCSTATUS_AUTHFAIL ((u16)5)
-struct hfa384x_AssocStatus {
+struct hfa384x_assoc_status {
u16 assocstatus;
u8 sta_addr[ETH_ALEN];
/* old_ap_addr is only valid if assocstatus == 2 */
@@ -744,37 +731,37 @@ struct hfa384x_AssocStatus {
/*-- Unsolicited Frame, MAC Mgmt: AuthRequest (AP Only) --*/
-struct hfa384x_AuthRequest {
+struct hfa384x_auth_request {
u8 sta_addr[ETH_ALEN];
u16 algorithm;
} __packed;
/*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/
-struct hfa384x_PSUserCount {
+struct hfa384x_ps_user_count {
u16 usercnt;
} __packed;
-struct hfa384x_KeyIDChanged {
+struct hfa384x_key_id_changed {
u8 sta_addr[ETH_ALEN];
u16 keyid;
} __packed;
/*-- Collection of all Inf frames ---------------*/
union hfa384x_infodata {
- struct hfa384x_CommTallies16 commtallies16;
- struct hfa384x_CommTallies32 commtallies32;
- struct hfa384x_ScanResult scanresult;
- struct hfa384x_ChInfoResult chinforesult;
- struct hfa384x_HScanResult hscanresult;
- struct hfa384x_LinkStatus linkstatus;
- struct hfa384x_AssocStatus assocstatus;
- struct hfa384x_AuthRequest authreq;
- struct hfa384x_PSUserCount psusercnt;
- struct hfa384x_KeyIDChanged keyidchanged;
-} __packed;
-
-struct hfa384x_InfFrame {
+ struct hfa384x_comm_tallies_16 commtallies16;
+ struct hfa384x_comm_tallies_32 commtallies32;
+ struct hfa384x_scan_result scanresult;
+ struct hfa384x_ch_info_result chinforesult;
+ struct hfa384x_hscan_result hscanresult;
+ struct hfa384x_link_status linkstatus;
+ struct hfa384x_assoc_status assocstatus;
+ struct hfa384x_auth_request authreq;
+ struct hfa384x_ps_user_count psusercnt;
+ struct hfa384x_key_id_changed keyidchanged;
+} __packed;
+
+struct hfa384x_inf_frame {
u16 framelen;
u16 infotype;
union hfa384x_infodata info;
@@ -862,7 +849,7 @@ struct hfa384x_usb_rxfrm {
struct hfa384x_usb_infofrm {
u16 type;
- struct hfa384x_InfFrame info;
+ struct hfa384x_inf_frame info;
} __packed;
struct hfa384x_usb_statusresp {
@@ -1169,7 +1156,6 @@ enum ctlx_state {
CTLX_REQ_COMPLETE, /* OUT URB complete */
CTLX_RESP_COMPLETE /* IN URB received */
};
-typedef enum ctlx_state CTLX_STATE;
struct hfa384x_usbctlx;
struct hfa384x;
@@ -1186,7 +1172,7 @@ struct hfa384x_usbctlx {
union hfa384x_usbout outbuf; /* pkt buf for OUT */
union hfa384x_usbin inbuf; /* pkt buf for IN(a copy) */
- CTLX_STATE state; /* Tracks running state */
+ enum ctlx_state state; /* Tracks running state */
struct completion done;
volatile int reapable; /* Food for the reaper task */
@@ -1294,7 +1280,7 @@ struct hfa384x {
int scanflag; /* to signal scan complete */
int join_ap; /* are we joined to a specific ap */
int join_retries; /* number of join retries till we fail */
- struct hfa384x_JoinRequest_data joinreq; /* join request saved data */
+ struct hfa384x_join_request_data joinreq;/* join request saved data */
struct wlandevice *wlandev;
/* Timer to allow for the deferred processing of linkstatus messages */
@@ -1360,17 +1346,17 @@ struct hfa384x {
struct hfa384x_caplevel cap_act_ap_mfi; /* ap f/w to modem interface */
u32 psusercount; /* Power save user count. */
- struct hfa384x_CommTallies32 tallies; /* Communication tallies. */
+ struct hfa384x_comm_tallies_32 tallies; /* Communication tallies. */
u8 comment[WLAN_COMMENT_MAX + 1]; /* User comment */
/* Channel Info request results (AP only) */
struct {
atomic_t done;
u8 count;
- struct hfa384x_ChInfoResult results;
+ struct hfa384x_ch_info_result results;
} channel_info;
- struct hfa384x_InfFrame *scanresults;
+ struct hfa384x_inf_frame *scanresults;
struct prism2sta_authlist authlist; /* Authenticated station list. */
unsigned int accessmode; /* Access mode. */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6a107f8a06e2..4fe037aeef12 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1,114 +1,114 @@
/* src/prism2/driver/hfa384x_usb.c
-*
-* Functions that talk to the USB variantof the Intersil hfa384x MAC
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file implements functions that correspond to the prism2/hfa384x
-* 802.11 MAC hardware and firmware host interface.
-*
-* The functions can be considered to represent several levels of
-* abstraction. The lowest level functions are simply C-callable wrappers
-* around the register accesses. The next higher level represents C-callable
-* prism2 API functions that match the Intersil documentation as closely
-* as is reasonable. The next higher layer implements common sequences
-* of invocations of the API layer (e.g. write to bap, followed by cmd).
-*
-* Common sequences:
-* hfa384x_drvr_xxx Highest level abstractions provided by the
-* hfa384x code. They are driver defined wrappers
-* for common sequences. These functions generally
-* use the services of the lower levels.
-*
-* hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These
-* functions are wrappers for the RID get/set
-* sequence. They call copy_[to|from]_bap() and
-* cmd_access(). These functions operate on the
-* RIDs and buffers without validation. The caller
-* is responsible for that.
-*
-* API wrapper functions:
-* hfa384x_cmd_xxx functions that provide access to the f/w commands.
-* The function arguments correspond to each command
-* argument, even command arguments that get packed
-* into single registers. These functions _just_
-* issue the command by setting the cmd/parm regs
-* & reading the status/resp regs. Additional
-* activities required to fully use a command
-* (read/write from/to bap, get/set int status etc.)
-* are implemented separately. Think of these as
-* C-callable prism2 commands.
-*
-* Lowest Layer Functions:
-* hfa384x_docmd_xxx These functions implement the sequence required
-* to issue any prism2 command. Primarily used by the
-* hfa384x_cmd_xxx functions.
-*
-* hfa384x_bap_xxx BAP read/write access functions.
-* Note: we usually use BAP0 for non-interrupt context
-* and BAP1 for interrupt context.
-*
-* hfa384x_dl_xxx download related functions.
-*
-* Driver State Issues:
-* Note that there are two pairs of functions that manage the
-* 'initialized' and 'running' states of the hw/MAC combo. The four
-* functions are create(), destroy(), start(), and stop(). create()
-* sets up the data structures required to support the hfa384x_*
-* functions and destroy() cleans them up. The start() function gets
-* the actual hardware running and enables the interrupts. The stop()
-* function shuts the hardware down. The sequence should be:
-* create()
-* start()
-* .
-* . Do interesting things w/ the hardware
-* .
-* stop()
-* destroy()
-*
-* Note that destroy() can be called without calling stop() first.
-* --------------------------------------------------------------------
-*/
+ *
+ * Functions that talk to the USB variantof the Intersil hfa384x MAC
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file implements functions that correspond to the prism2/hfa384x
+ * 802.11 MAC hardware and firmware host interface.
+ *
+ * The functions can be considered to represent several levels of
+ * abstraction. The lowest level functions are simply C-callable wrappers
+ * around the register accesses. The next higher level represents C-callable
+ * prism2 API functions that match the Intersil documentation as closely
+ * as is reasonable. The next higher layer implements common sequences
+ * of invocations of the API layer (e.g. write to bap, followed by cmd).
+ *
+ * Common sequences:
+ * hfa384x_drvr_xxx Highest level abstractions provided by the
+ * hfa384x code. They are driver defined wrappers
+ * for common sequences. These functions generally
+ * use the services of the lower levels.
+ *
+ * hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These
+ * functions are wrappers for the RID get/set
+ * sequence. They call copy_[to|from]_bap() and
+ * cmd_access(). These functions operate on the
+ * RIDs and buffers without validation. The caller
+ * is responsible for that.
+ *
+ * API wrapper functions:
+ * hfa384x_cmd_xxx functions that provide access to the f/w commands.
+ * The function arguments correspond to each command
+ * argument, even command arguments that get packed
+ * into single registers. These functions _just_
+ * issue the command by setting the cmd/parm regs
+ * & reading the status/resp regs. Additional
+ * activities required to fully use a command
+ * (read/write from/to bap, get/set int status etc.)
+ * are implemented separately. Think of these as
+ * C-callable prism2 commands.
+ *
+ * Lowest Layer Functions:
+ * hfa384x_docmd_xxx These functions implement the sequence required
+ * to issue any prism2 command. Primarily used by the
+ * hfa384x_cmd_xxx functions.
+ *
+ * hfa384x_bap_xxx BAP read/write access functions.
+ * Note: we usually use BAP0 for non-interrupt context
+ * and BAP1 for interrupt context.
+ *
+ * hfa384x_dl_xxx download related functions.
+ *
+ * Driver State Issues:
+ * Note that there are two pairs of functions that manage the
+ * 'initialized' and 'running' states of the hw/MAC combo. The four
+ * functions are create(), destroy(), start(), and stop(). create()
+ * sets up the data structures required to support the hfa384x_*
+ * functions and destroy() cleans them up. The start() function gets
+ * the actual hardware running and enables the interrupts. The stop()
+ * function shuts the hardware down. The sequence should be:
+ * create()
+ * start()
+ * .
+ * . Do interesting things w/ the hardware
+ * .
+ * stop()
+ * destroy()
+ *
+ * Note that destroy() can be called without calling stop() first.
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -153,8 +153,8 @@ enum cmd_mode {
static void dbprint_urb(struct urb *urb);
#endif
-static void
-hfa384x_int_rxmonitor(struct wlandevice *wlandev, struct hfa384x_usb_rxfrm *rxfrm);
+static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
+ struct hfa384x_usb_rxfrm *rxfrm);
static void hfa384x_usb_defer(struct work_struct *data);
@@ -173,7 +173,8 @@ hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb);
-static void hfa384x_usbin_info(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
+static void hfa384x_usbin_info(struct wlandevice *wlandev,
+ union hfa384x_usbin *usbin);
static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status);
@@ -193,9 +194,11 @@ static void hfa384x_usbctlx_completion_task(unsigned long data);
static void hfa384x_usbctlx_reaper_task(unsigned long data);
-static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
+static int hfa384x_usbctlx_submit(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx);
-static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
+static void unlocked_usbctlx_complete(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx);
struct usbctlx_completor {
int (*complete)(struct usbctlx_completor *);
@@ -209,7 +212,8 @@ hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
static int
unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
-static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx);
+static void hfa384x_cb_status(struct hfa384x *hw,
+ const struct hfa384x_usbctlx *ctlx);
static int
usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
@@ -263,7 +267,7 @@ hfa384x_dowmem(struct hfa384x *hw,
static int hfa384x_isgood_pdrcode(u16 pdrcode);
-static inline const char *ctlxstr(CTLX_STATE s)
+static inline const char *ctlxstr(enum ctlx_state s)
{
static const char * const ctlx_str[] = {
"Initial state",
@@ -307,21 +311,22 @@ void dbprint_urb(struct urb *urb)
#endif
/*----------------------------------------------------------------
-* submit_rx_urb
-*
-* Listen for input data on the BULK-IN pipe. If the pipe has
-* stalled then schedule it to be reset.
-*
-* Arguments:
-* hw device struct
-* memflags memory allocation flags
-*
-* Returns:
-* error code from submission
-*
-* Call context:
-* Any
-----------------------------------------------------------------*/
+ * submit_rx_urb
+ *
+ * Listen for input data on the BULK-IN pipe. If the pipe has
+ * stalled then schedule it to be reset.
+ *
+ * Arguments:
+ * hw device struct
+ * memflags memory allocation flags
+ *
+ * Returns:
+ * error code from submission
+ *
+ * Call context:
+ * Any
+ *----------------------------------------------------------------
+ */
static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags)
{
struct sk_buff *skb;
@@ -367,23 +372,24 @@ done:
}
/*----------------------------------------------------------------
-* submit_tx_urb
-*
-* Prepares and submits the URB of transmitted data. If the
-* submission fails then it will schedule the output pipe to
-* be reset.
-*
-* Arguments:
-* hw device struct
-* tx_urb URB of data for transmission
-* memflags memory allocation flags
-*
-* Returns:
-* error code from submission
-*
-* Call context:
-* Any
-----------------------------------------------------------------*/
+ * submit_tx_urb
+ *
+ * Prepares and submits the URB of transmitted data. If the
+ * submission fails then it will schedule the output pipe to
+ * be reset.
+ *
+ * Arguments:
+ * hw device struct
+ * tx_urb URB of data for transmission
+ * memflags memory allocation flags
+ *
+ * Returns:
+ * error code from submission
+ *
+ * Call context:
+ * Any
+ *----------------------------------------------------------------
+ */
static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
{
struct net_device *netdev = hw->wlandev->netdev;
@@ -412,21 +418,22 @@ static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
}
/*----------------------------------------------------------------
-* hfa394x_usb_defer
-*
-* There are some things that the USB stack cannot do while
-* in interrupt context, so we arrange this function to run
-* in process context.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* nothing
-*
-* Call context:
-* process (by design)
-----------------------------------------------------------------*/
+ * hfa394x_usb_defer
+ *
+ * There are some things that the USB stack cannot do while
+ * in interrupt context, so we arrange this function to run
+ * in process context.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * nothing
+ *
+ * Call context:
+ * process (by design)
+ *----------------------------------------------------------------
+ */
static void hfa384x_usb_defer(struct work_struct *data)
{
struct hfa384x *hw = container_of(data, struct hfa384x, usb_work);
@@ -501,29 +508,30 @@ static void hfa384x_usb_defer(struct work_struct *data)
}
/*----------------------------------------------------------------
-* hfa384x_create
-*
-* Sets up the struct hfa384x data structure for use. Note this
-* does _not_ initialize the actual hardware, just the data structures
-* we use to keep track of its state.
-*
-* Arguments:
-* hw device structure
-* irq device irq number
-* iobase i/o base address for register access
-* membase memory base address for register access
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_create
+ *
+ * Sets up the struct hfa384x data structure for use. Note this
+ * does _not_ initialize the actual hardware, just the data structures
+ * we use to keep track of its state.
+ *
+ * Arguments:
+ * hw device structure
+ * irq device irq number
+ * iobase i/o base address for register access
+ * membase memory base address for register access
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
{
- memset(hw, 0, sizeof(struct hfa384x));
+ memset(hw, 0, sizeof(*hw));
hw->usb = usb;
/* set up the endpoints */
@@ -571,27 +579,28 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
}
/*----------------------------------------------------------------
-* hfa384x_destroy
-*
-* Partner to hfa384x_create(). This function cleans up the hw
-* structure so that it can be freed by the caller using a simple
-* kfree. Currently, this function is just a placeholder. If, at some
-* point in the future, an hw in the 'shutdown' state requires a 'deep'
-* kfree, this is where it should be done. Note that if this function
-* is called on a _running_ hw structure, the drvr_stop() function is
-* called.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* nothing, this function is not allowed to fail.
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_destroy
+ *
+ * Partner to hfa384x_create(). This function cleans up the hw
+ * structure so that it can be freed by the caller using a simple
+ * kfree. Currently, this function is just a placeholder. If, at some
+ * point in the future, an hw in the 'shutdown' state requires a 'deep'
+ * kfree, this is where it should be done. Note that if this function
+ * is called on a _running_ hw structure, the drvr_stop() function is
+ * called.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * nothing, this function is not allowed to fail.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
void hfa384x_destroy(struct hfa384x *hw)
{
struct sk_buff *skb;
@@ -645,10 +654,11 @@ usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
}
/*----------------------------------------------------------------
-* Completor object:
-* This completor must be passed to hfa384x_usbctlx_complete_sync()
-* when processing a CTLX that returns a struct hfa384x_cmdresult structure.
-----------------------------------------------------------------*/
+ * Completor object:
+ * This completor must be passed to hfa384x_usbctlx_complete_sync()
+ * when processing a CTLX that returns a struct hfa384x_cmdresult structure.
+ *----------------------------------------------------------------
+ */
struct usbctlx_cmd_completor {
struct usbctlx_completor head;
@@ -664,24 +674,23 @@ static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
return usbctlx_get_status(complete->cmdresp, complete->result);
}
-static inline struct usbctlx_completor *init_cmd_completor(
- struct usbctlx_cmd_completor
- *completor,
- const struct hfa384x_usb_statusresp
- *cmdresp,
- struct hfa384x_cmdresult *result)
+static inline struct usbctlx_completor *
+init_cmd_completor(struct usbctlx_cmd_completor *completor,
+ const struct hfa384x_usb_statusresp *cmdresp,
+ struct hfa384x_cmdresult *result)
{
completor->head.complete = usbctlx_cmd_completor_fn;
completor->cmdresp = cmdresp;
completor->result = result;
- return &(completor->head);
+ return &completor->head;
}
/*----------------------------------------------------------------
-* Completor object:
-* This completor must be passed to hfa384x_usbctlx_complete_sync()
-* when processing a CTLX that reads a RID.
-----------------------------------------------------------------*/
+ * Completor object:
+ * This completor must be passed to hfa384x_usbctlx_complete_sync()
+ * when processing a CTLX that reads a RID.
+ *----------------------------------------------------------------
+ */
struct usbctlx_rrid_completor {
struct usbctlx_completor head;
@@ -710,37 +719,38 @@ static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
return 0;
}
-static inline struct usbctlx_completor *init_rrid_completor(
- struct usbctlx_rrid_completor
- *completor,
- const struct hfa384x_usb_rridresp
- *rridresp,
- void *riddata,
- unsigned int riddatalen)
+static inline struct usbctlx_completor *
+init_rrid_completor(struct usbctlx_rrid_completor *completor,
+ const struct hfa384x_usb_rridresp *rridresp,
+ void *riddata,
+ unsigned int riddatalen)
{
completor->head.complete = usbctlx_rrid_completor_fn;
completor->rridresp = rridresp;
completor->riddata = riddata;
completor->riddatalen = riddatalen;
- return &(completor->head);
+ return &completor->head;
}
/*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous RID-write
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous RID-write
+ *----------------------------------------------------------------
+ */
#define init_wrid_completor init_cmd_completor
/*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous memory-write
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous memory-write
+ *----------------------------------------------------------------
+ */
#define init_wmem_completor init_cmd_completor
/*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous memory-read
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous memory-read
+ *----------------------------------------------------------------
+ */
struct usbctlx_rmem_completor {
struct usbctlx_completor head;
@@ -759,43 +769,43 @@ static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
return 0;
}
-static inline struct usbctlx_completor *init_rmem_completor(
- struct usbctlx_rmem_completor
- *completor,
- struct hfa384x_usb_rmemresp
- *rmemresp,
- void *data,
- unsigned int len)
+static inline struct usbctlx_completor *
+init_rmem_completor(struct usbctlx_rmem_completor *completor,
+ struct hfa384x_usb_rmemresp *rmemresp,
+ void *data,
+ unsigned int len)
{
completor->head.complete = usbctlx_rmem_completor_fn;
completor->rmemresp = rmemresp;
completor->data = data;
completor->len = len;
- return &(completor->head);
+ return &completor->head;
}
/*----------------------------------------------------------------
-* hfa384x_cb_status
-*
-* Ctlx_complete handler for async CMD type control exchanges.
-* mark the hw struct as such.
-*
-* Note: If the handling is changed here, it should probably be
-* changed in docmd as well.
-*
-* Arguments:
-* hw hw struct
-* ctlx completed CTLX
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
-static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx)
+ * hfa384x_cb_status
+ *
+ * Ctlx_complete handler for async CMD type control exchanges.
+ * mark the hw struct as such.
+ *
+ * Note: If the handling is changed here, it should probably be
+ * changed in docmd as well.
+ *
+ * Arguments:
+ * hw hw struct
+ * ctlx completed CTLX
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
+static void hfa384x_cb_status(struct hfa384x *hw,
+ const struct hfa384x_usbctlx *ctlx)
{
if (ctlx->usercb) {
struct hfa384x_cmdresult cmdresult;
@@ -812,7 +822,8 @@ static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *
}
}
-static inline int hfa384x_docmd_wait(struct hfa384x *hw, struct hfa384x_metacmd *cmd)
+static inline int hfa384x_docmd_wait(struct hfa384x *hw,
+ struct hfa384x_metacmd *cmd)
{
return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
}
@@ -905,24 +916,25 @@ hfa384x_dowmem_async(struct hfa384x *hw,
}
/*----------------------------------------------------------------
-* hfa384x_cmd_initialize
-*
-* Issues the initialize command and sets the hw->state based
-* on the result.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_initialize
+ *
+ * Issues the initialize command and sets the hw->state based
+ * on the result.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_initialize(struct hfa384x *hw)
{
int result = 0;
@@ -950,25 +962,26 @@ int hfa384x_cmd_initialize(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_disable
-*
-* Issues the disable command to stop communications on one of
-* the MACs 'ports'.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_disable
+ *
+ * Issues the disable command to stop communications on one of
+ * the MACs 'ports'.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
{
struct hfa384x_metacmd cmd;
@@ -983,25 +996,26 @@ int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_enable
-*
-* Issues the enable command to enable communications on one of
-* the MACs 'ports'.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_enable
+ *
+ * Issues the enable command to enable communications on one of
+ * the MACs 'ports'.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
{
struct hfa384x_metacmd cmd;
@@ -1016,34 +1030,35 @@ int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_monitor
-*
-* Enables the 'monitor mode' of the MAC. Here's the description of
-* monitor mode that I've received thus far:
-*
-* "The "monitor mode" of operation is that the MAC passes all
-* frames for which the PLCP checks are correct. All received
-* MPDUs are passed to the host with MAC Port = 7, with a
-* receive status of good, FCS error, or undecryptable. Passing
-* certain MPDUs is a violation of the 802.11 standard, but useful
-* for a debugging tool." Normal communication is not possible
-* while monitor mode is enabled.
-*
-* Arguments:
-* hw device structure
-* enable a code (0x0b|0x0f) that enables/disables
-* monitor mode. (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_monitor
+ *
+ * Enables the 'monitor mode' of the MAC. Here's the description of
+ * monitor mode that I've received thus far:
+ *
+ * "The "monitor mode" of operation is that the MAC passes all
+ * frames for which the PLCP checks are correct. All received
+ * MPDUs are passed to the host with MAC Port = 7, with a
+ * receive status of good, FCS error, or undecryptable. Passing
+ * certain MPDUs is a violation of the 802.11 standard, but useful
+ * for a debugging tool." Normal communication is not possible
+ * while monitor mode is enabled.
+ *
+ * Arguments:
+ * hw device structure
+ * enable a code (0x0b|0x0f) that enables/disables
+ * monitor mode. (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
{
struct hfa384x_metacmd cmd;
@@ -1058,43 +1073,44 @@ int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
}
/*----------------------------------------------------------------
-* hfa384x_cmd_download
-*
-* Sets the controls for the MAC controller code/data download
-* process. The arguments set the mode and address associated
-* with a download. Note that the aux registers should be enabled
-* prior to setting one of the download enable modes.
-*
-* Arguments:
-* hw device structure
-* mode 0 - Disable programming and begin code exec
-* 1 - Enable volatile mem programming
-* 2 - Enable non-volatile mem programming
-* 3 - Program non-volatile section from NV download
-* buffer.
-* (host order)
-* lowaddr
-* highaddr For mode 1, sets the high & low order bits of
-* the "destination address". This address will be
-* the execution start address when download is
-* subsequently disabled.
-* For mode 2, sets the high & low order bits of
-* the destination in NV ram.
-* For modes 0 & 3, should be zero. (host order)
-* NOTE: these are CMD format.
-* codelen Length of the data to write in mode 2,
-* zero otherwise. (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_download
+ *
+ * Sets the controls for the MAC controller code/data download
+ * process. The arguments set the mode and address associated
+ * with a download. Note that the aux registers should be enabled
+ * prior to setting one of the download enable modes.
+ *
+ * Arguments:
+ * hw device structure
+ * mode 0 - Disable programming and begin code exec
+ * 1 - Enable volatile mem programming
+ * 2 - Enable non-volatile mem programming
+ * 3 - Program non-volatile section from NV download
+ * buffer.
+ * (host order)
+ * lowaddr
+ * highaddr For mode 1, sets the high & low order bits of
+ * the "destination address". This address will be
+ * the execution start address when download is
+ * subsequently disabled.
+ * For mode 2, sets the high & low order bits of
+ * the destination in NV ram.
+ * For modes 0 & 3, should be zero. (host order)
+ * NOTE: these are CMD format.
+ * codelen Length of the data to write in mode 2,
+ * zero otherwise. (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
u16 highaddr, u16 codelen)
{
@@ -1114,29 +1130,31 @@ int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
}
/*----------------------------------------------------------------
-* hfa384x_corereset
-*
-* Perform a reset of the hfa38xx MAC core. We assume that the hw
-* structure is in its "created" state. That is, it is initialized
-* with proper values. Note that if a reset is done after the
-* device has been active for awhile, the caller might have to clean
-* up some leftover cruft in the hw structure.
-*
-* Arguments:
-* hw device structure
-* holdtime how long (in ms) to hold the reset
-* settletime how long (in ms) to wait after releasing
-* the reset
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
-int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis)
+ * hfa384x_corereset
+ *
+ * Perform a reset of the hfa38xx MAC core. We assume that the hw
+ * structure is in its "created" state. That is, it is initialized
+ * with proper values. Note that if a reset is done after the
+ * device has been active for awhile, the caller might have to clean
+ * up some leftover cruft in the hw structure.
+ *
+ * Arguments:
+ * hw device structure
+ * holdtime how long (in ms) to hold the reset
+ * settletime how long (in ms) to wait after releasing
+ * the reset
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
+int hfa384x_corereset(struct hfa384x *hw, int holdtime,
+ int settletime, int genesis)
{
int result;
@@ -1150,29 +1168,30 @@ int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int gene
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_complete_sync
-*
-* Waits for a synchronous CTLX object to complete,
-* and then handles the response.
-*
-* Arguments:
-* hw device structure
-* ctlx CTLX ptr
-* completor functor object to decide what to
-* do with the CTLX's result.
-*
-* Returns:
-* 0 Success
-* -ERESTARTSYS Interrupted by a signal
-* -EIO CTLX failed
-* -ENODEV Adapter was unplugged
-* ??? Result from completor
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_complete_sync
+ *
+ * Waits for a synchronous CTLX object to complete,
+ * and then handles the response.
+ *
+ * Arguments:
+ * hw device structure
+ * ctlx CTLX ptr
+ * completor functor object to decide what to
+ * do with the CTLX's result.
+ *
+ * Returns:
+ * 0 Success
+ * -ERESTARTSYS Interrupted by a signal
+ * -EIO CTLX failed
+ * -ENODEV Adapter was unplugged
+ * ??? Result from completor
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx,
struct usbctlx_completor *completor)
@@ -1257,37 +1276,38 @@ cleanup:
}
/*----------------------------------------------------------------
-* hfa384x_docmd
-*
-* Constructs a command CTLX and submits it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbcmd() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* cmd cmd structure. Includes all arguments and result
-* data points. All in host order. in host order
-* cmdcb command-specific callback
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls, NULL
-* for DOASYNC calls
-*
-* Returns:
-* 0 success
-* -EIO CTLX failure
-* -ERESTARTSYS Awakened on signal
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_docmd
+ *
+ * Constructs a command CTLX and submits it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbcmd() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * cmd cmd structure. Includes all arguments and result
+ * data points. All in host order. in host order
+ * cmdcb command-specific callback
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls, NULL
+ * for DOASYNC calls
+ *
+ * Returns:
+ * 0 success
+ * -EIO CTLX failure
+ * -ERESTARTSYS Awakened on signal
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
static int
hfa384x_docmd(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1341,41 +1361,42 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dorrid
-*
-* Constructs a read rid CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbrrid() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* rid Read RID number (host order)
-* riddata Caller supplied buffer that MAC formatted RID.data
-* record will be written to for DOWAIT calls. Should
-* be NULL for DOASYNC calls.
-* riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls.
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls, NULL
-* for DOWAIT calls
-*
-* Returns:
-* 0 success
-* -EIO CTLX failure
-* -ERESTARTSYS Awakened on signal
-* -ENODATA riddatalen != macdatalen
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOASYNC)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dorrid
+ *
+ * Constructs a read rid CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbrrid() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * rid Read RID number (host order)
+ * riddata Caller supplied buffer that MAC formatted RID.data
+ * record will be written to for DOWAIT calls. Should
+ * be NULL for DOASYNC calls.
+ * riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls.
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls, NULL
+ * for DOWAIT calls
+ *
+ * Returns:
+ * 0 success
+ * -EIO CTLX failure
+ * -ERESTARTSYS Awakened on signal
+ * -ENODATA riddatalen != macdatalen
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOASYNC)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dorrid(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1426,37 +1447,38 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dowrid
-*
-* Constructs a write rid CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbwrid() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* enum cmd_mode DOWAIT or DOASYNC
-* rid RID code
-* riddata Data portion of RID formatted for MAC
-* riddatalen Length of the data portion in bytes
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls
-*
-* Returns:
-* 0 success
-* -ETIMEDOUT timed out waiting for register ready or
-* command completion
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOASYNC)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dowrid
+ *
+ * Constructs a write rid CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbwrid() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * enum cmd_mode DOWAIT or DOASYNC
+ * rid RID code
+ * riddata Data portion of RID formatted for MAC
+ * riddatalen Length of the data portion in bytes
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls
+ *
+ * Returns:
+ * 0 success
+ * -ETIMEDOUT timed out waiting for register ready or
+ * command completion
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOASYNC)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dowrid(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1512,38 +1534,39 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dormem
-*
-* Constructs a readmem CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbrmem() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* page MAC address space page (CMD format)
-* offset MAC address space offset
-* data Ptr to data buffer to receive read
-* len Length of the data to read (max == 2048)
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls
-*
-* Returns:
-* 0 success
-* -ETIMEDOUT timed out waiting for register ready or
-* command completion
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOASYNC)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dormem
+ *
+ * Constructs a readmem CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbrmem() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * page MAC address space page (CMD format)
+ * offset MAC address space offset
+ * data Ptr to data buffer to receive read
+ * len Length of the data to read (max == 2048)
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls
+ *
+ * Returns:
+ * 0 success
+ * -ETIMEDOUT timed out waiting for register ready or
+ * command completion
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOASYNC)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dormem(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1603,38 +1626,39 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_dowmem
-*
-* Constructs a writemem CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-* need to be carried over to hfa384x_cbwmem() since the handling
-* is virtually identical.
-*
-* Arguments:
-* hw device structure
-* mode DOWAIT or DOASYNC
-* page MAC address space page (CMD format)
-* offset MAC address space offset
-* data Ptr to data buffer containing write data
-* len Length of the data to read (max == 2048)
-* cmdcb command callback for async calls, NULL for DOWAIT calls
-* usercb user callback for async calls, NULL for DOWAIT calls
-* usercb_data user supplied data pointer for async calls.
-*
-* Returns:
-* 0 success
-* -ETIMEDOUT timed out waiting for register ready or
-* command completion
-* >0 command indicated error, Status and Resp0-2 are
-* in hw structure.
-*
-* Side effects:
-*
-* Call context:
-* interrupt (DOWAIT)
-* process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dowmem
+ *
+ * Constructs a writemem CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ * need to be carried over to hfa384x_cbwmem() since the handling
+ * is virtually identical.
+ *
+ * Arguments:
+ * hw device structure
+ * mode DOWAIT or DOASYNC
+ * page MAC address space page (CMD format)
+ * offset MAC address space offset
+ * data Ptr to data buffer containing write data
+ * len Length of the data to read (max == 2048)
+ * cmdcb command callback for async calls, NULL for DOWAIT calls
+ * usercb user callback for async calls, NULL for DOWAIT calls
+ * usercb_data user supplied data pointer for async calls.
+ *
+ * Returns:
+ * 0 success
+ * -ETIMEDOUT timed out waiting for register ready or
+ * command completion
+ * >0 command indicated error, Status and Resp0-2 are
+ * in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt (DOWAIT)
+ * process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
static int
hfa384x_dowmem(struct hfa384x *hw,
enum cmd_mode mode,
@@ -1694,27 +1718,28 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_drvr_disable
-*
-* Issues the disable command to stop communications on one of
-* the MACs 'ports'. Only macport 0 is valid for stations.
-* APs may also disable macports 1-6. Only ports that have been
-* previously enabled may be disabled.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number (host order)
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_disable
+ *
+ * Issues the disable command to stop communications on one of
+ * the MACs 'ports'. Only macport 0 is valid for stations.
+ * APs may also disable macports 1-6. Only ports that have been
+ * previously enabled may be disabled.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number (host order)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
{
int result = 0;
@@ -1732,27 +1757,28 @@ int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_enable
-*
-* Issues the enable command to enable communications on one of
-* the MACs 'ports'. Only macport 0 is valid for stations.
-* APs may also enable macports 1-6. Only ports that are currently
-* disabled may be enabled.
-*
-* Arguments:
-* hw device structure
-* macport MAC port number
-*
-* Returns:
-* 0 success
-* >0 f/w reported failure - f/w status code
-* <0 driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_enable
+ *
+ * Issues the enable command to enable communications on one of
+ * the MACs 'ports'. Only macport 0 is valid for stations.
+ * APs may also enable macports 1-6. Only ports that are currently
+ * disabled may be enabled.
+ *
+ * Arguments:
+ * hw device structure
+ * macport MAC port number
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported failure - f/w status code
+ * <0 driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
{
int result = 0;
@@ -1770,26 +1796,27 @@ int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_enable
-*
-* Begins the flash download state. Checks to see that we're not
-* already in a download state and that a port isn't enabled.
-* Sets the download state and retrieves the flash download
-* buffer location, buffer size, and timeout length.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_flashdl_enable
+ *
+ * Begins the flash download state. Checks to see that we're not
+ * already in a download state and that a port isn't enabled.
+ * Sets the download state and retrieves the flash download
+ * buffer location, buffer size, and timeout length.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
{
int result = 0;
@@ -1809,7 +1836,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
/* Retrieve the buffer loc&size and timeout */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER,
- &(hw->bufinfo), sizeof(hw->bufinfo));
+ &hw->bufinfo, sizeof(hw->bufinfo));
if (result)
return result;
@@ -1817,7 +1844,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset);
hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len);
result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME,
- &(hw->dltimeout));
+ &hw->dltimeout);
if (result)
return result;
@@ -1831,24 +1858,25 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_disable
-*
-* Ends the flash download state. Note that this will cause the MAC
-* firmware to restart.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_flashdl_disable
+ *
+ * Ends the flash download state. Note that this will cause the MAC
+ * firmware to restart.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
@@ -1866,35 +1894,37 @@ int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_write
-*
-* Performs a FLASH download of a chunk of data. First checks to see
-* that we're in the FLASH download state, then sets the download
-* mode, uses the aux functions to 1) copy the data to the flash
-* buffer, 2) sets the download 'write flash' mode, 3) readback and
-* compare. Lather rinse, repeat as many times an necessary to get
-* all the given data into flash.
-* When all data has been written using this function (possibly
-* repeatedly), call drvr_flashdl_disable() to end the download state
-* and restart the MAC.
-*
-* Arguments:
-* hw device structure
-* daddr Card address to write to. (host order)
-* buf Ptr to data to write.
-* len Length of data (host order).
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
-int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
+ * hfa384x_drvr_flashdl_write
+ *
+ * Performs a FLASH download of a chunk of data. First checks to see
+ * that we're in the FLASH download state, then sets the download
+ * mode, uses the aux functions to 1) copy the data to the flash
+ * buffer, 2) sets the download 'write flash' mode, 3) readback and
+ * compare. Lather rinse, repeat as many times an necessary to get
+ * all the given data into flash.
+ * When all data has been written using this function (possibly
+ * repeatedly), call drvr_flashdl_disable() to end the download state
+ * and restart the MAC.
+ *
+ * Arguments:
+ * hw device structure
+ * daddr Card address to write to. (host order)
+ * buf Ptr to data to write.
+ * len Length of data (host order).
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr,
+ void *buf, u32 len)
{
int result = 0;
u32 dlbufaddr;
@@ -2008,30 +2038,31 @@ exit_proc:
}
/*----------------------------------------------------------------
-* hfa384x_drvr_getconfig
-*
-* Performs the sequence necessary to read a config/info item.
-*
-* Arguments:
-* hw device structure
-* rid config/info record id (host order)
-* buf host side record buffer. Upon return it will
-* contain the body portion of the record (minus the
-* RID and len).
-* len buffer length (in bytes, should match record length)
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-* -ENODATA length mismatch between argument and retrieved
-* record.
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_getconfig
+ *
+ * Performs the sequence necessary to read a config/info item.
+ *
+ * Arguments:
+ * hw device structure
+ * rid config/info record id (host order)
+ * buf host side record buffer. Upon return it will
+ * contain the body portion of the record (minus the
+ * RID and len).
+ * len buffer length (in bytes, should match record length)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ * -ENODATA length mismatch between argument and retrieved
+ * record.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dorrid_wait(hw, rid, buf, len);
@@ -2059,7 +2090,8 @@ int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
*
* Call context:
* process
- ----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
int
hfa384x_drvr_setconfig_async(struct hfa384x *hw,
u16 rid,
@@ -2071,23 +2103,24 @@ hfa384x_drvr_setconfig_async(struct hfa384x *hw,
}
/*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_disable
-*
-* Ends the ram download state.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_disable
+ *
+ * Ends the ram download state.
+ *
+ * Arguments:
+ * hw device structure
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
@@ -2105,29 +2138,30 @@ int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_enable
-*
-* Begins the ram download state. Checks to see that we're not
-* already in a download state and that a port isn't enabled.
-* Sets the download state and calls cmd_download with the
-* ENABLE_VOLATILE subcommand and the exeaddr argument.
-*
-* Arguments:
-* hw device structure
-* exeaddr the card execution address that will be
-* jumped to when ramdl_disable() is called
-* (host order).
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_enable
+ *
+ * Begins the ram download state. Checks to see that we're not
+ * already in a download state and that a port isn't enabled.
+ * Sets the download state and calls cmd_download with the
+ * ENABLE_VOLATILE subcommand and the exeaddr argument.
+ *
+ * Arguments:
+ * hw device structure
+ * exeaddr the card execution address that will be
+ * jumped to when ramdl_disable() is called
+ * (host order).
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
{
int result = 0;
@@ -2146,7 +2180,8 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
/* Check that we're not already in a download state */
if (hw->dlstate != HFA384x_DLSTATE_DISABLED) {
- netdev_err(hw->wlandev->netdev, "Download state not disabled.\n");
+ netdev_err(hw->wlandev->netdev,
+ "Download state not disabled.\n");
return -EINVAL;
}
@@ -2171,31 +2206,32 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_write
-*
-* Performs a RAM download of a chunk of data. First checks to see
-* that we're in the RAM download state, then uses the [read|write]mem USB
-* commands to 1) copy the data, 2) readback and compare. The download
-* state is unaffected. When all data has been written using
-* this function, call drvr_ramdl_disable() to end the download state
-* and restart the MAC.
-*
-* Arguments:
-* hw device structure
-* daddr Card address to write to. (host order)
-* buf Ptr to data to write.
-* len Length of data (host order).
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_write
+ *
+ * Performs a RAM download of a chunk of data. First checks to see
+ * that we're in the RAM download state, then uses the [read|write]mem USB
+ * commands to 1) copy the data, 2) readback and compare. The download
+ * state is unaffected. When all data has been written using
+ * this function, call drvr_ramdl_disable() to end the download state
+ * and restart the MAC.
+ *
+ * Arguments:
+ * hw device structure
+ * daddr Card address to write to. (host order)
+ * buf Ptr to data to write.
+ * len Length of data (host order).
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
{
int result = 0;
@@ -2246,36 +2282,37 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_readpda
-*
-* Performs the sequence to read the PDA space. Note there is no
-* drvr_writepda() function. Writing a PDA is
-* generally implemented by a calling component via calls to
-* cmd_download and writing to the flash download buffer via the
-* aux regs.
-*
-* Arguments:
-* hw device structure
-* buf buffer to store PDA in
-* len buffer length
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-* -ETIMEDOUT timeout waiting for the cmd regs to become
-* available, or waiting for the control reg
-* to indicate the Aux port is enabled.
-* -ENODATA the buffer does NOT contain a valid PDA.
-* Either the card PDA is bad, or the auxdata
-* reads are giving us garbage.
-
-*
-* Side effects:
-*
-* Call context:
-* process or non-card interrupt.
-----------------------------------------------------------------*/
+ * hfa384x_drvr_readpda
+ *
+ * Performs the sequence to read the PDA space. Note there is no
+ * drvr_writepda() function. Writing a PDA is
+ * generally implemented by a calling component via calls to
+ * cmd_download and writing to the flash download buffer via the
+ * aux regs.
+ *
+ * Arguments:
+ * hw device structure
+ * buf buffer to store PDA in
+ * len buffer length
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ * -ETIMEDOUT timeout waiting for the cmd regs to become
+ * available, or waiting for the control reg
+ * to indicate the Aux port is enabled.
+ * -ENODATA the buffer does NOT contain a valid PDA.
+ * Either the card PDA is bad, or the auxdata
+ * reads are giving us garbage.
+ *
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process or non-card interrupt.
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
{
int result = 0;
@@ -2306,7 +2343,7 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
/* units of bytes */
result = hfa384x_dormem_wait(hw, currpage, curroffset, buf,
- len);
+ len);
if (result) {
netdev_warn(hw->wlandev->netdev,
@@ -2366,51 +2403,52 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_setconfig
-*
-* Performs the sequence necessary to write a config/info item.
-*
-* Arguments:
-* hw device structure
-* rid config/info record id (in host order)
-* buf host side record buffer
-* len buffer length (in bytes)
-*
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_setconfig
+ *
+ * Performs the sequence necessary to write a config/info item.
+ *
+ * Arguments:
+ * hw device structure
+ * rid config/info record id (in host order)
+ * buf host side record buffer
+ * len buffer length (in bytes)
+ *
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dowrid_wait(hw, rid, buf, len);
}
/*----------------------------------------------------------------
-* hfa384x_drvr_start
-*
-* Issues the MAC initialize command, sets up some data structures,
-* and enables the interrupts. After this function completes, the
-* low-level stuff should be ready for any/all commands.
-*
-* Arguments:
-* hw device structure
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
-
+ * hfa384x_drvr_start
+ *
+ * Issues the MAC initialize command, sets up some data structures,
+ * and enables the interrupts. After this function completes, the
+ * low-level stuff should be ready for any/all commands.
+ *
+ * Arguments:
+ * hw device structure
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_start(struct hfa384x *hw)
{
int result, result1, result2;
@@ -2494,24 +2532,25 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_drvr_stop
-*
-* Shuts down the MAC to the point where it is safe to unload the
-* driver. Any subsystem that may be holding a data or function
-* ptr into the driver must be cleared/deinitialized.
-*
-* Arguments:
-* hw device structure
-* Returns:
-* 0 success
-* >0 f/w reported error - f/w status code
-* <0 driver reported error
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_stop
+ *
+ * Shuts down the MAC to the point where it is safe to unload the
+ * driver. Any subsystem that may be holding a data or function
+ * ptr into the driver must be cleared/deinitialized.
+ *
+ * Arguments:
+ * hw device structure
+ * Returns:
+ * 0 success
+ * >0 f/w reported error - f/w status code
+ * <0 driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_stop(struct hfa384x *hw)
{
int i;
@@ -2542,26 +2581,27 @@ int hfa384x_drvr_stop(struct hfa384x *hw)
}
/*----------------------------------------------------------------
-* hfa384x_drvr_txframe
-*
-* Takes a frame from prism2sta and queues it for transmission.
-*
-* Arguments:
-* hw device structure
-* skb packet buffer struct. Contains an 802.11
-* data frame.
-* p80211_hdr points to the 802.11 header for the packet.
-* Returns:
-* 0 Success and more buffs available
-* 1 Success but no more buffs
-* 2 Allocation failure
-* 4 Buffer full or queue busy
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_drvr_txframe
+ *
+ * Takes a frame from prism2sta and queues it for transmission.
+ *
+ * Arguments:
+ * hw device structure
+ * skb packet buffer struct. Contains an 802.11
+ * data frame.
+ * p80211_hdr points to the 802.11 header for the packet.
+ * Returns:
+ * 0 Success and more buffs available
+ * 1 Success but no more buffs
+ * 2 Allocation failure
+ * 4 Buffer full or queue busy
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
@@ -2608,7 +2648,7 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
cpu_to_le16(hw->txbuff.txfrm.desc.tx_control);
/* copy the header over to the txdesc */
- memcpy(&(hw->txbuff.txfrm.desc.frame_control), p80211_hdr,
+ memcpy(&hw->txbuff.txfrm.desc.frame_control, p80211_hdr,
sizeof(union p80211_hdr));
/* if we're using host WEP, increase size by IV+ICV */
@@ -2638,9 +2678,9 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv));
/* Send the USB packet */
- usb_fill_bulk_urb(&(hw->tx_urb), hw->usb,
+ usb_fill_bulk_urb(&hw->tx_urb, hw->usb,
hw->endp_out,
- &(hw->txbuff), ROUNDUP64(usbpktlen),
+ &hw->txbuff, ROUNDUP64(usbpktlen),
hfa384x_usbout_callback, hw->wlandev);
hw->tx_urb.transfer_flags |= USB_QUEUE_BULK;
@@ -2676,18 +2716,19 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_reaper_task
-*
-* Tasklet to delete dead CTLX objects
-*
-* Arguments:
-* data ptr to a struct hfa384x
-*
-* Returns:
-*
-* Call context:
-* Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_reaper_task
+ *
+ * Tasklet to delete dead CTLX objects
+ *
+ * Arguments:
+ * data ptr to a struct hfa384x
+ *
+ * Returns:
+ *
+ * Call context:
+ * Interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_reaper_task(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -2708,19 +2749,20 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_completion_task
-*
-* Tasklet to call completion handlers for returned CTLXs
-*
-* Arguments:
-* data ptr to struct hfa384x
-*
-* Returns:
-* Nothing
-*
-* Call context:
-* Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_completion_task
+ *
+ * Tasklet to call completion handlers for returned CTLXs
+ *
+ * Arguments:
+ * data ptr to struct hfa384x
+ *
+ * Returns:
+ * Nothing
+ *
+ * Call context:
+ * Interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_completion_task(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -2781,22 +2823,23 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
}
/*----------------------------------------------------------------
-* unlocked_usbctlx_cancel_async
-*
-* Mark the CTLX dead asynchronously, and ensure that the
-* next command on the queue is run afterwards.
-*
-* Arguments:
-* hw ptr to the struct hfa384x structure
-* ctlx ptr to a CTLX structure
-*
-* Returns:
-* 0 the CTLX's URB is inactive
-* -EINPROGRESS the URB is currently being unlinked
-*
-* Call context:
-* Either process or interrupt, but presumably interrupt
-----------------------------------------------------------------*/
+ * unlocked_usbctlx_cancel_async
+ *
+ * Mark the CTLX dead asynchronously, and ensure that the
+ * next command on the queue is run afterwards.
+ *
+ * Arguments:
+ * hw ptr to the struct hfa384x structure
+ * ctlx ptr to a CTLX structure
+ *
+ * Returns:
+ * 0 the CTLX's URB is inactive
+ * -EINPROGRESS the URB is currently being unlinked
+ *
+ * Call context:
+ * Either process or interrupt, but presumably interrupt
+ *----------------------------------------------------------------
+ */
static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx)
{
@@ -2826,28 +2869,30 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
}
/*----------------------------------------------------------------
-* unlocked_usbctlx_complete
-*
-* A CTLX has completed. It may have been successful, it may not
-* have been. At this point, the CTLX should be quiescent. The URBs
-* aren't active and the timers should have been stopped.
-*
-* The CTLX is migrated to the "completing" queue, and the completing
-* tasklet is scheduled.
-*
-* Arguments:
-* hw ptr to a struct hfa384x structure
-* ctlx ptr to a ctlx structure
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* Either, assume interrupt
-----------------------------------------------------------------*/
-static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
+ * unlocked_usbctlx_complete
+ *
+ * A CTLX has completed. It may have been successful, it may not
+ * have been. At this point, the CTLX should be quiescent. The URBs
+ * aren't active and the timers should have been stopped.
+ *
+ * The CTLX is migrated to the "completing" queue, and the completing
+ * tasklet is scheduled.
+ *
+ * Arguments:
+ * hw ptr to a struct hfa384x structure
+ * ctlx ptr to a ctlx structure
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * Either, assume interrupt
+ *----------------------------------------------------------------
+ */
+static void unlocked_usbctlx_complete(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx)
{
/* Timers have been stopped, and ctlx should be in
* a terminal state. Retire it from the "active"
@@ -2871,21 +2916,22 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx
}
/*----------------------------------------------------------------
-* hfa384x_usbctlxq_run
-*
-* Checks to see if the head item is running. If not, starts it.
-*
-* Arguments:
-* hw ptr to struct hfa384x
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* any
-----------------------------------------------------------------*/
+ * hfa384x_usbctlxq_run
+ *
+ * Checks to see if the head item is running. If not, starts it.
+ *
+ * Arguments:
+ * hw ptr to struct hfa384x
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * any
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlxq_run(struct hfa384x *hw)
{
unsigned long flags;
@@ -2916,9 +2962,9 @@ static void hfa384x_usbctlxq_run(struct hfa384x *hw)
list_move_tail(&head->list, &hw->ctlxq.active);
/* Fill the out packet */
- usb_fill_bulk_urb(&(hw->ctlx_urb), hw->usb,
+ usb_fill_bulk_urb(&hw->ctlx_urb, hw->usb,
hw->endp_out,
- &(head->outbuf), ROUNDUP64(head->outbufsize),
+ &head->outbuf, ROUNDUP64(head->outbufsize),
hfa384x_ctlxout_callback, hw);
hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK;
@@ -2971,26 +3017,27 @@ unlock:
}
/*----------------------------------------------------------------
-* hfa384x_usbin_callback
-*
-* Callback for URBs on the BULKIN endpoint.
-*
-* Arguments:
-* urb ptr to the completed urb
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_callback
+ *
+ * Callback for URBs on the BULKIN endpoint.
+ *
+ * Arguments:
+ * urb ptr to the completed urb
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_callback(struct urb *urb)
{
struct wlandevice *wlandev = urb->context;
struct hfa384x *hw;
- union hfa384x_usbin *usbin = (union hfa384x_usbin *)urb->transfer_buffer;
+ union hfa384x_usbin *usbin;
struct sk_buff *skb = NULL;
int result;
int urb_status;
@@ -3010,7 +3057,10 @@ static void hfa384x_usbin_callback(struct urb *urb)
goto exit;
skb = hw->rx_urb_skb;
- BUG_ON(!skb || (skb->data != urb->transfer_buffer));
+ if (!skb || (skb->data != urb->transfer_buffer)) {
+ WARN_ON(1);
+ return;
+ }
hw->rx_urb_skb = NULL;
@@ -3089,6 +3139,7 @@ static void hfa384x_usbin_callback(struct urb *urb)
/* Note: the check of the sw_support field, the type field doesn't
* have bit 12 set like the docs suggest.
*/
+ usbin = (union hfa384x_usbin *)urb->transfer_buffer;
type = le16_to_cpu(usbin->type);
if (HFA384x_USB_ISRXFRM(type)) {
if (action == HANDLE) {
@@ -3147,25 +3198,26 @@ exit:
}
/*----------------------------------------------------------------
-* hfa384x_usbin_ctlx
-*
-* We've received a URB containing a Prism2 "response" message.
-* This message needs to be matched up with a CTLX on the active
-* queue and our state updated accordingly.
-*
-* Arguments:
-* hw ptr to struct hfa384x
-* usbin ptr to USB IN packet
-* urb_status status of this Bulk-In URB
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_ctlx
+ *
+ * We've received a URB containing a Prism2 "response" message.
+ * This message needs to be matched up with a CTLX on the active
+ * queue and our state updated accordingly.
+ *
+ * Arguments:
+ * hw ptr to struct hfa384x
+ * usbin ptr to USB IN packet
+ * urb_status status of this Bulk-In URB
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status)
{
@@ -3269,22 +3321,23 @@ unlock:
}
/*----------------------------------------------------------------
-* hfa384x_usbin_txcompl
-*
-* At this point we have the results of a previous transmit.
-*
-* Arguments:
-* wlandev wlan device
-* usbin ptr to the usb transfer buffer
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_txcompl
+ *
+ * At this point we have the results of a previous transmit.
+ *
+ * Arguments:
+ * wlandev wlan device
+ * usbin ptr to the usb transfer buffer
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
union hfa384x_usbin *usbin)
{
@@ -3300,22 +3353,23 @@ static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
}
/*----------------------------------------------------------------
-* hfa384x_usbin_rx
-*
-* At this point we have a successful received a rx frame packet.
-*
-* Arguments:
-* wlandev wlan device
-* usbin ptr to the usb transfer buffer
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_rx
+ *
+ * At this point we have a successful received a rx frame packet.
+ *
+ * Arguments:
+ * wlandev wlan device
+ * usbin ptr to the usb transfer buffer
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data;
@@ -3396,30 +3450,31 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
}
/*----------------------------------------------------------------
-* hfa384x_int_rxmonitor
-*
-* Helper function for int_rx. Handles monitor frames.
-* Note that this function allocates space for the FCS and sets it
-* to 0xffffffff. The hfa384x doesn't give us the FCS value but the
-* higher layers expect it. 0xffffffff is used as a flag to indicate
-* the FCS is bogus.
-*
-* Arguments:
-* wlandev wlan device structure
-* rxfrm rx descriptor read from card in int_rx
-*
-* Returns:
-* nothing
-*
-* Side effects:
-* Allocates an skb and passes it up via the PF_PACKET interface.
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_int_rxmonitor
+ *
+ * Helper function for int_rx. Handles monitor frames.
+ * Note that this function allocates space for the FCS and sets it
+ * to 0xffffffff. The hfa384x doesn't give us the FCS value but the
+ * higher layers expect it. 0xffffffff is used as a flag to indicate
+ * the FCS is bogus.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * rxfrm rx descriptor read from card in int_rx
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ * Allocates an skb and passes it up via the PF_PACKET interface.
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
struct hfa384x_usb_rxfrm *rxfrm)
{
- struct hfa384x_rx_frame *rxdesc = &(rxfrm->desc);
+ struct hfa384x_rx_frame *rxdesc = &rxfrm->desc;
unsigned int hdrlen = 0;
unsigned int datalen = 0;
unsigned int skblen = 0;
@@ -3474,9 +3529,10 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
}
/* Copy the 802.11 header to the skb
- (ctl frames may be less than a full header) */
+ * (ctl frames may be less than a full header)
+ */
datap = skb_put(skb, hdrlen);
- memcpy(datap, &(rxdesc->frame_control), hdrlen);
+ memcpy(datap, &rxdesc->frame_control, hdrlen);
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
@@ -3501,22 +3557,23 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
}
/*----------------------------------------------------------------
-* hfa384x_usbin_info
-*
-* At this point we have a successful received a Prism2 info frame.
-*
-* Arguments:
-* wlandev wlan device
-* usbin ptr to the usb transfer buffer
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_info
+ *
+ * At this point we have a successful received a Prism2 info frame.
+ *
+ * Arguments:
+ * wlandev wlan device
+ * usbin ptr to the usb transfer buffer
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbin_info(struct wlandevice *wlandev,
union hfa384x_usbin *usbin)
{
@@ -3526,21 +3583,22 @@ static void hfa384x_usbin_info(struct wlandevice *wlandev,
}
/*----------------------------------------------------------------
-* hfa384x_usbout_callback
-*
-* Callback for URBs on the BULKOUT endpoint.
-*
-* Arguments:
-* urb ptr to the completed urb
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbout_callback
+ *
+ * Callback for URBs on the BULKOUT endpoint.
+ *
+ * Arguments:
+ * urb ptr to the completed urb
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbout_callback(struct urb *urb)
{
struct wlandevice *wlandev = urb->context;
@@ -3601,21 +3659,22 @@ static void hfa384x_usbout_callback(struct urb *urb)
}
/*----------------------------------------------------------------
-* hfa384x_ctlxout_callback
-*
-* Callback for control data on the BULKOUT endpoint.
-*
-* Arguments:
-* urb ptr to the completed urb
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_ctlxout_callback
+ *
+ * Callback for control data on the BULKOUT endpoint.
+ *
+ * Arguments:
+ * urb ptr to the completed urb
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_ctlxout_callback(struct urb *urb)
{
struct hfa384x *hw = urb->context;
@@ -3730,23 +3789,24 @@ delresp:
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_reqtimerfn
-*
-* Timer response function for CTLX request timeouts. If this
-* function is called, it means that the callback for the OUT
-* URB containing a Prism2.x XXX_Request was never called.
-*
-* Arguments:
-* data a ptr to the struct hfa384x
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_reqtimerfn
+ *
+ * Timer response function for CTLX request timeouts. If this
+ * function is called, it means that the callback for the OUT
+ * URB containing a Prism2.x XXX_Request was never called.
+ *
+ * Arguments:
+ * data a ptr to the struct hfa384x
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -3788,23 +3848,24 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_resptimerfn
-*
-* Timer response function for CTLX response timeouts. If this
-* function is called, it means that the callback for the IN
-* URB containing a Prism2.x XXX_Response was never called.
-*
-* Arguments:
-* data a ptr to the struct hfa384x
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_resptimerfn
+ *
+ * Timer response function for CTLX response timeouts. If this
+ * function is called, it means that the callback for the IN
+ * URB containing a Prism2.x XXX_Response was never called.
+ *
+ * Arguments:
+ * data a ptr to the struct hfa384x
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usbctlx_resptimerfn(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -3830,20 +3891,21 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usb_throttlefn
-*
-*
-* Arguments:
-* data ptr to hw
-*
-* Returns:
-* Nothing
-*
-* Side effects:
-*
-* Call context:
-* Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usb_throttlefn
+ *
+ *
+ * Arguments:
+ * data ptr to hw
+ *
+ * Returns:
+ * Nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * Interrupt
+ *----------------------------------------------------------------
+ */
static void hfa384x_usb_throttlefn(unsigned long data)
{
struct hfa384x *hw = (struct hfa384x *)data;
@@ -3869,24 +3931,26 @@ static void hfa384x_usb_throttlefn(unsigned long data)
}
/*----------------------------------------------------------------
-* hfa384x_usbctlx_submit
-*
-* Called from the doxxx functions to submit a CTLX to the queue
-*
-* Arguments:
-* hw ptr to the hw struct
-* ctlx ctlx structure to enqueue
-*
-* Returns:
-* -ENODEV if the adapter is unplugged
-* 0
-*
-* Side effects:
-*
-* Call context:
-* process or interrupt
-----------------------------------------------------------------*/
-static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
+ * hfa384x_usbctlx_submit
+ *
+ * Called from the doxxx functions to submit a CTLX to the queue
+ *
+ * Arguments:
+ * hw ptr to the hw struct
+ * ctlx ctlx structure to enqueue
+ *
+ * Returns:
+ * -ENODEV if the adapter is unplugged
+ * 0
+ *
+ * Side effects:
+ *
+ * Call context:
+ * process or interrupt
+ *----------------------------------------------------------------
+ */
+static int hfa384x_usbctlx_submit(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx)
{
unsigned long flags;
@@ -3906,21 +3970,22 @@ static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ct
}
/*----------------------------------------------------------------
-* hfa384x_isgood_pdrcore
-*
-* Quick check of PDR codes.
-*
-* Arguments:
-* pdrcode PDR code number (host order)
-*
-* Returns:
-* zero not good.
-* one is good.
-*
-* Side effects:
-*
-* Call context:
-----------------------------------------------------------------*/
+ * hfa384x_isgood_pdrcore
+ *
+ * Quick check of PDR codes.
+ *
+ * Arguments:
+ * pdrcode PDR code number (host order)
+ *
+ * Returns:
+ * zero not good.
+ * one is good.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *----------------------------------------------------------------
+ */
static int hfa384x_isgood_pdrcode(u16 pdrcode)
{
switch (pdrcode) {
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0247cbc29145..8387e6a3031a 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -1,56 +1,56 @@
/* src/p80211/p80211conv.c
-*
-* Ether/802.11 conversions and packet buffer routines
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file defines the functions that perform Ethernet to/from
-* 802.11 frame conversions.
-*
-* --------------------------------------------------------------------
-*
-*================================================================
-*/
+ *
+ * Ether/802.11 conversions and packet buffer routines
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file defines the functions that perform Ethernet to/from
+ * 802.11 frame conversions.
+ *
+ * --------------------------------------------------------------------
+ *
+ *================================================================
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -79,31 +79,31 @@ static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
/*----------------------------------------------------------------
-* p80211pb_ether_to_80211
-*
-* Uses the contents of the ether frame and the etherconv setting
-* to build the elements of the 802.11 frame.
-*
-* We don't actually set
-* up the frame header here. That's the MAC's job. We're only handling
-* conversion of DIXII or 802.3+LLC frames to something that works
-* with 802.11.
-*
-* Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
-* FCS is also not present and will need to be added elsewhere.
-*
-* Arguments:
-* ethconv Conversion type to perform
-* skb skbuff containing the ether frame
-* p80211_hdr 802.11 header
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211pb_ether_to_80211
+ *
+ * Uses the contents of the ether frame and the etherconv setting
+ * to build the elements of the 802.11 frame.
+ *
+ * We don't actually set
+ * up the frame header here. That's the MAC's job. We're only handling
+ * conversion of DIXII or 802.3+LLC frames to something that works
+ * with 802.11.
+ *
+ * Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
+ * FCS is also not present and will need to be added elsewhere.
+ *
+ * Arguments:
+ * ethconv Conversion type to perform
+ * skb skbuff containing the ether frame
+ * p80211_hdr 802.11 header
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb, union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
@@ -255,25 +255,25 @@ static void orinoco_spy_gather(struct wlandevice *wlandev, char *mac,
}
/*----------------------------------------------------------------
-* p80211pb_80211_to_ether
-*
-* Uses the contents of a received 802.11 frame and the etherconv
-* setting to build an ether frame.
-*
-* This function extracts the src and dest address from the 802.11
-* frame to use in the construction of the eth frame.
-*
-* Arguments:
-* ethconv Conversion type to perform
-* skb Packet buffer containing the 802.11 frame
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211pb_80211_to_ether
+ *
+ * Uses the contents of a received 802.11 frame and the etherconv
+ * setting to build an ether frame.
+ *
+ * This function extracts the src and dest address from the 802.11
+ * frame to use in the construction of the eth frame.
+ *
+ * Arguments:
+ * ethconv Conversion type to perform
+ * skb Packet buffer containing the 802.11 frame
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb)
{
@@ -508,22 +508,22 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
}
/*----------------------------------------------------------------
-* p80211_stt_findproto
-*
-* Searches the 802.1h Selective Translation Table for a given
-* protocol.
-*
-* Arguments:
-* proto protocol number (in host order) to search for.
-*
-* Returns:
-* 1 - if the table is empty or a match is found.
-* 0 - if the table is non-empty and a match is not found.
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211_stt_findproto
+ *
+ * Searches the 802.1h Selective Translation Table for a given
+ * protocol.
+ *
+ * Arguments:
+ * proto protocol number (in host order) to search for.
+ *
+ * Returns:
+ * 1 - if the table is empty or a match is found.
+ * 0 - if the table is non-empty and a match is not found.
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int p80211_stt_findproto(u16 proto)
{
/* Always return found for now. This is the behavior used by the */
@@ -540,21 +540,21 @@ int p80211_stt_findproto(u16 proto)
}
/*----------------------------------------------------------------
-* p80211skb_rxmeta_detach
-*
-* Disconnects the frmmeta and rxmeta from an skb.
-*
-* Arguments:
-* wlandev The wlandev this skb belongs to.
-* skb The skb we're attaching to.
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_rxmeta_detach
+ *
+ * Disconnects the frmmeta and rxmeta from an skb.
+ *
+ * Arguments:
+ * wlandev The wlandev this skb belongs to.
+ * skb The skb we're attaching to.
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
void p80211skb_rxmeta_detach(struct sk_buff *skb)
{
struct p80211_rxmeta *rxmeta;
@@ -584,22 +584,22 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb)
}
/*----------------------------------------------------------------
-* p80211skb_rxmeta_attach
-*
-* Allocates a p80211rxmeta structure, initializes it, and attaches
-* it to an skb.
-*
-* Arguments:
-* wlandev The wlandev this skb belongs to.
-* skb The skb we're attaching to.
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_rxmeta_attach
+ *
+ * Allocates a p80211rxmeta structure, initializes it, and attaches
+ * it to an skb.
+ *
+ * Arguments:
+ * wlandev The wlandev this skb belongs to.
+ * skb The skb we're attaching to.
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
{
int result = 0;
@@ -615,11 +615,9 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
}
/* Allocate the rxmeta */
- rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC);
+ rxmeta = kzalloc(sizeof(*rxmeta), GFP_ATOMIC);
if (!rxmeta) {
- netdev_err(wlandev->netdev,
- "%s: Failed to allocate rxmeta.\n", wlandev->name);
result = 1;
goto exit;
}
@@ -638,22 +636,22 @@ exit:
}
/*----------------------------------------------------------------
-* p80211skb_free
-*
-* Frees an entire p80211skb by checking and freeing the meta struct
-* and then freeing the skb.
-*
-* Arguments:
-* wlandev The wlandev this skb belongs to.
-* skb The skb we're attaching to.
-*
-* Returns:
-* 0 on success, non-zero otherwise
-*
-* Call context:
-* May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_free
+ *
+ * Frees an entire p80211skb by checking and freeing the meta struct
+ * and then freeing the skb.
+ *
+ * Arguments:
+ * wlandev The wlandev this skb belongs to.
+ * skb The skb we're attaching to.
+ *
+ * Returns:
+ * 0 on success, non-zero otherwise
+ *
+ * Call context:
+ * May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
{
struct p80211_frmmeta *meta;
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 8c10357bedf0..ed70d98e5cf1 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -1,54 +1,54 @@
/* p80211conv.h
-*
-* Ether/802.11 conversions and packet buffer routines
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the functions, types and macros that perform
-* Ethernet to/from 802.11 frame conversions.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Ether/802.11 conversions and packet buffer routines
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the functions, types and macros that perform
+ * Ethernet to/from 802.11 frame conversions.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _LINUX_P80211CONV_H
#define _LINUX_P80211CONV_H
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 79d9b20b364d..2c44c613a586 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -1,61 +1,61 @@
/* p80211hdr.h
-*
-* Macros, types, and functions for handling 802.11 MAC headers
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the constants and types used in the interface
-* between a wlan driver and the user mode utilities.
-*
-* Note:
-* - Constant values are always in HOST byte order. To assign
-* values to multi-byte fields they _must_ be converted to
-* ieee byte order. To retrieve multi-byte values from incoming
-* frames, they must be converted to host order.
-*
-* All functions declared here are implemented in p80211.c
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, types, and functions for handling 802.11 MAC headers
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the constants and types used in the interface
+ * between a wlan driver and the user mode utilities.
+ *
+ * Note:
+ * - Constant values are always in HOST byte order. To assign
+ * values to multi-byte fields they _must_ be converted to
+ * ieee byte order. To retrieve multi-byte values from incoming
+ * frames, they must be converted to host order.
+ *
+ * All functions declared here are implemented in p80211.c
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211HDR_H
#define _P80211HDR_H
@@ -131,8 +131,8 @@
/* SET_FC_FSTYPE(WLAN_FSTYPE_RTS) ); */
/*------------------------------------------------------------*/
-#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & (BIT(2) | BIT(3))) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & (BIT(4)|BIT(5)|BIT(6)|BIT(7))) >> 4)
+#define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & GENMASK(3, 2)) >> 2)
+#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & GENMASK(7, 4)) >> 4)
#define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8)
#define WLAN_GET_FC_FROMDS(n) ((((u16)(n)) & (BIT(9))) >> 9)
#define WLAN_GET_FC_ISWEP(n) ((((u16)(n)) & (BIT(14))) >> 14)
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index 06c5e36649a7..ab6067e65050 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -1,64 +1,64 @@
/* p80211ioctl.h
-*
-* Declares constants and types for the p80211 ioctls
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* While this file is called 'ioctl' is purpose goes a little beyond
-* that. This file defines the types and contants used to implement
-* the p80211 request/confirm/indicate interfaces on Linux. The
-* request/confirm interface is, in fact, normally implemented as an
-* ioctl. The indicate interface on the other hand, is implemented
-* using the Linux 'netlink' interface.
-*
-* The reason I say that request/confirm is 'normally' implemented
-* via ioctl is that we're reserving the right to be able to send
-* request commands via the netlink interface. This will be necessary
-* if we ever need to send request messages when there aren't any
-* wlan network devices present (i.e. sending a message that only p80211
-* cares about.
-* --------------------------------------------------------------------
-*/
+ *
+ * Declares constants and types for the p80211 ioctls
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * While this file is called 'ioctl' is purpose goes a little beyond
+ * that. This file defines the types and contants used to implement
+ * the p80211 request/confirm/indicate interfaces on Linux. The
+ * request/confirm interface is, in fact, normally implemented as an
+ * ioctl. The indicate interface on the other hand, is implemented
+ * using the Linux 'netlink' interface.
+ *
+ * The reason I say that request/confirm is 'normally' implemented
+ * via ioctl is that we're reserving the right to be able to send
+ * request commands via the netlink interface. This will be necessary
+ * if we ever need to send request messages when there aren't any
+ * wlan network devices present (i.e. sending a message that only p80211
+ * cares about.
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211IOCTL_H
#define _P80211IOCTL_H
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index b0d3567ca0ad..ea3d9ce222b9 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -1,48 +1,48 @@
/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY.
-* --------------------------------------------------------------------
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ * --------------------------------------------------------------------
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211MKMETADEF_H
#define _P80211MKMETADEF_H
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 3dd066ac034e..653950fd9843 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -1,101 +1,101 @@
/* p80211mgmt.h
-*
-* Macros, types, and functions to handle 802.11 mgmt frames
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the constants and types used in the interface
-* between a wlan driver and the user mode utilities.
-*
-* Notes:
-* - Constant values are always in HOST byte order. To assign
-* values to multi-byte fields they _must_ be converted to
-* ieee byte order. To retrieve multi-byte values from incoming
-* frames, they must be converted to host order.
-*
-* - The len member of the frame structure does NOT!!! include
-* the MAC CRC. Therefore, the len field on rx'd frames should
-* have 4 subtracted from it.
-*
-* All functions declared here are implemented in p80211.c
-*
-* The types, macros, and functions defined here are primarily
-* used for encoding and decoding management frames. They are
-* designed to follow these patterns of use:
-*
-* DECODE:
-* 1) a frame of length len is received into buffer b
-* 2) using the hdr structure and macros, we determine the type
-* 3) an appropriate mgmt frame structure, mf, is allocated and zeroed
-* 4) mf.hdr = b
-* mf.buf = b
-* mf.len = len
-* 5) call mgmt_decode( mf )
-* 6) the frame field pointers in mf are now set. Note that any
-* multi-byte frame field values accessed using the frame field
-* pointers are in ieee byte order and will have to be converted
-* to host order.
-*
-* ENCODE:
-* 1) Library client allocates buffer space for maximum length
-* frame of the desired type
-* 2) Library client allocates a mgmt frame structure, called mf,
-* of the desired type
-* 3) Set the following:
-* mf.type = <desired type>
-* mf.buf = <allocated buffer address>
-* 4) call mgmt_encode( mf )
-* 5) all of the fixed field pointers and fixed length information element
-* pointers in mf are now set to their respective locations in the
-* allocated space (fortunately, all variable length information elements
-* fall at the end of their respective frames).
-* 5a) The length field is set to include the last of the fixed and fixed
-* length fields. It may have to be updated for optional or variable
-* length information elements.
-* 6) Optional and variable length information elements are special cases
-* and must be handled individually by the client code.
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, types, and functions to handle 802.11 mgmt frames
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the constants and types used in the interface
+ * between a wlan driver and the user mode utilities.
+ *
+ * Notes:
+ * - Constant values are always in HOST byte order. To assign
+ * values to multi-byte fields they _must_ be converted to
+ * ieee byte order. To retrieve multi-byte values from incoming
+ * frames, they must be converted to host order.
+ *
+ * - The len member of the frame structure does NOT!!! include
+ * the MAC CRC. Therefore, the len field on rx'd frames should
+ * have 4 subtracted from it.
+ *
+ * All functions declared here are implemented in p80211.c
+ *
+ * The types, macros, and functions defined here are primarily
+ * used for encoding and decoding management frames. They are
+ * designed to follow these patterns of use:
+ *
+ * DECODE:
+ * 1) a frame of length len is received into buffer b
+ * 2) using the hdr structure and macros, we determine the type
+ * 3) an appropriate mgmt frame structure, mf, is allocated and zeroed
+ * 4) mf.hdr = b
+ * mf.buf = b
+ * mf.len = len
+ * 5) call mgmt_decode( mf )
+ * 6) the frame field pointers in mf are now set. Note that any
+ * multi-byte frame field values accessed using the frame field
+ * pointers are in ieee byte order and will have to be converted
+ * to host order.
+ *
+ * ENCODE:
+ * 1) Library client allocates buffer space for maximum length
+ * frame of the desired type
+ * 2) Library client allocates a mgmt frame structure, called mf,
+ * of the desired type
+ * 3) Set the following:
+ * mf.type = <desired type>
+ * mf.buf = <allocated buffer address>
+ * 4) call mgmt_encode( mf )
+ * 5) all of the fixed field pointers and fixed length information element
+ * pointers in mf are now set to their respective locations in the
+ * allocated space (fortunately, all variable length information elements
+ * fall at the end of their respective frames).
+ * 5a) The length field is set to include the last of the fixed and fixed
+ * length fields. It may have to be updated for optional or variable
+ * length information elements.
+ * 6) Optional and variable length information elements are special cases
+ * and must be handled individually by the client code.
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211MGMT_H
#define _P80211MGMT_H
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index 43d2f971e2cd..40c5cf5997c7 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -1,49 +1,49 @@
/* p80211msg.h
-*
-* Macros, constants, types, and funcs for req and ind messages
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, constants, types, and funcs for req and ind messages
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211MSG_H
#define _P80211MSG_H
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 825a63a7c0e3..73fcf07254fe 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -1,53 +1,53 @@
/* src/p80211/p80211knetdev.c
-*
-* Linux Kernel net device interface
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions required for a Linux network device are defined here.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Linux Kernel net device interface
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions required for a Linux network device are defined here.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -112,17 +112,18 @@ module_param(wlan_wext_write, int, 0644);
MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions");
/*----------------------------------------------------------------
-* p80211knetdev_init
-*
-* Init method for a Linux netdevice. Called in response to
-* register_netdev.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * p80211knetdev_init
+ *
+ * Init method for a Linux netdevice. Called in response to
+ * register_netdev.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_init(struct net_device *netdev)
{
/* Called in response to register_netdev */
@@ -133,19 +134,20 @@ static int p80211knetdev_init(struct net_device *netdev)
}
/*----------------------------------------------------------------
-* p80211knetdev_open
-*
-* Linux netdevice open method. Following a successful call here,
-* the device is supposed to be ready for tx and rx. In our
-* situation that may not be entirely true due to the state of the
-* MAC below.
-*
-* Arguments:
-* netdev Linux network device structure
-*
-* Returns:
-* zero on success, non-zero otherwise
-----------------------------------------------------------------*/
+ * p80211knetdev_open
+ *
+ * Linux netdevice open method. Following a successful call here,
+ * the device is supposed to be ready for tx and rx. In our
+ * situation that may not be entirely true due to the state of the
+ * MAC below.
+ *
+ * Arguments:
+ * netdev Linux network device structure
+ *
+ * Returns:
+ * zero on success, non-zero otherwise
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_open(struct net_device *netdev)
{
int result = 0; /* success */
@@ -170,17 +172,18 @@ static int p80211knetdev_open(struct net_device *netdev)
}
/*----------------------------------------------------------------
-* p80211knetdev_stop
-*
-* Linux netdevice stop (close) method. Following this call,
-* no frames should go up or down through this interface.
-*
-* Arguments:
-* netdev Linux network device structure
-*
-* Returns:
-* zero on success, non-zero otherwise
-----------------------------------------------------------------*/
+ * p80211knetdev_stop
+ *
+ * Linux netdevice stop (close) method. Following this call,
+ * no frames should go up or down through this interface.
+ *
+ * Arguments:
+ * netdev Linux network device structure
+ *
+ * Returns:
+ * zero on success, non-zero otherwise
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_stop(struct net_device *netdev)
{
int result = 0;
@@ -196,18 +199,19 @@ static int p80211knetdev_stop(struct net_device *netdev)
}
/*----------------------------------------------------------------
-* p80211netdev_rx
-*
-* Frame receive function called by the mac specific driver.
-*
-* Arguments:
-* wlandev WLAN network device structure
-* skb skbuff containing a full 802.11 frame.
-* Returns:
-* nothing
-* Side effects:
-*
-----------------------------------------------------------------*/
+ * p80211netdev_rx
+ *
+ * Frame receive function called by the mac specific driver.
+ *
+ * Arguments:
+ * wlandev WLAN network device structure
+ * skb skbuff containing a full 802.11 frame.
+ * Returns:
+ * nothing
+ * Side effects:
+ *
+ *----------------------------------------------------------------
+ */
void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
/* Enqueue for post-irq processing */
@@ -227,7 +231,8 @@ void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
* CONV_TO_ETHER_FAILED if conversion failed
* CONV_TO_ETHER_SKIPPED if frame is ignored
*/
-static int p80211_convert_to_ether(struct wlandevice *wlandev, struct sk_buff *skb)
+static int p80211_convert_to_ether(struct wlandevice *wlandev,
+ struct sk_buff *skb)
{
struct p80211_hdr_a3 *hdr;
@@ -272,7 +277,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
/* Let's empty our our queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
if (wlandev->state == WLAN_DEVICE_OPEN) {
-
if (dev->type != ARPHRD_ETHER) {
/* RAW frame; we shouldn't convert it */
/* XXX Append the Prism Header here instead. */
@@ -299,24 +303,25 @@ static void p80211netdev_rx_bh(unsigned long arg)
}
/*----------------------------------------------------------------
-* p80211knetdev_hard_start_xmit
-*
-* Linux netdevice method for transmitting a frame.
-*
-* Arguments:
-* skb Linux sk_buff containing the frame.
-* netdev Linux netdevice.
-*
-* Side effects:
-* If the lower layers report that buffers are full. netdev->tbusy
-* will be set to prevent higher layers from sending more traffic.
-*
-* Note: If this function returns non-zero, higher layers retain
-* ownership of the skb.
-*
-* Returns:
-* zero on success, non-zero on failure.
-----------------------------------------------------------------*/
+ * p80211knetdev_hard_start_xmit
+ *
+ * Linux netdevice method for transmitting a frame.
+ *
+ * Arguments:
+ * skb Linux sk_buff containing the frame.
+ * netdev Linux netdevice.
+ *
+ * Side effects:
+ * If the lower layers report that buffers are full. netdev->tbusy
+ * will be set to prevent higher layers from sending more traffic.
+ *
+ * Note: If this function returns non-zero, higher layers retain
+ * ownership of the skb.
+ *
+ * Returns:
+ * zero on success, non-zero on failure.
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -336,8 +341,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
- memset(&p80211_hdr, 0, sizeof(union p80211_hdr));
- memset(&p80211_wep, 0, sizeof(struct p80211_metawep));
+ memset(&p80211_hdr, 0, sizeof(p80211_hdr));
+ memset(&p80211_wep, 0, sizeof(p80211_wep));
if (netif_queue_stopped(netdev)) {
netdev_dbg(netdev, "called when queue stopped.\n");
@@ -375,8 +380,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
/* move the header over */
- memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr));
- skb_pull(skb, sizeof(union p80211_hdr));
+ memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
+ skb_pull(skb, sizeof(p80211_hdr));
} else {
if (skb_ether_to_p80211
(wlandev, wlandev->ethconv, skb, &p80211_hdr,
@@ -435,17 +440,18 @@ failed:
}
/*----------------------------------------------------------------
-* p80211knetdev_set_multicast_list
-*
-* Called from higher layers whenever there's a need to set/clear
-* promiscuous mode or rewrite the multicast list.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * p80211knetdev_set_multicast_list
+ *
+ * Called from higher layers whenever there's a need to set/clear
+ * promiscuous mode or rewrite the multicast list.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static void p80211knetdev_set_multicast_list(struct net_device *dev)
{
struct wlandevice *wlandev = dev->ml_priv;
@@ -454,12 +460,12 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev)
if (wlandev->set_multicast_list)
wlandev->set_multicast_list(wlandev, dev);
-
}
#ifdef SIOCETHTOOL
-static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useraddr)
+static int p80211netdev_ethtool(struct wlandevice *wlandev,
+ void __user *useraddr)
{
u32 ethcmd;
struct ethtool_drvinfo info;
@@ -505,33 +511,35 @@ static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useradd
#endif
/*----------------------------------------------------------------
-* p80211knetdev_do_ioctl
-*
-* Handle an ioctl call on one of our devices. Everything Linux
-* ioctl specific is done here. Then we pass the contents of the
-* ifr->data to the request message handler.
-*
-* Arguments:
-* dev Linux kernel netdevice
-* ifr Our private ioctl request structure, typed for the
-* generic struct ifreq so we can use ptr to func
-* w/o cast.
-*
-* Returns:
-* zero on success, a negative errno on failure. Possible values:
-* -ENETDOWN Device isn't up.
-* -EBUSY cmd already in progress
-* -ETIME p80211 cmd timed out (MSD may have its own timers)
-* -EFAULT memory fault copying msg from user buffer
-* -ENOMEM unable to allocate kernel msg buffer
-* -ENOSYS bad magic, it the cmd really for us?
-* -EintR sleeping on cmd, awakened by signal, cmd cancelled.
-*
-* Call Context:
-* Process thread (ioctl caller). TODO: SMP support may require
-* locks.
-----------------------------------------------------------------*/
-static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ * p80211knetdev_do_ioctl
+ *
+ * Handle an ioctl call on one of our devices. Everything Linux
+ * ioctl specific is done here. Then we pass the contents of the
+ * ifr->data to the request message handler.
+ *
+ * Arguments:
+ * dev Linux kernel netdevice
+ * ifr Our private ioctl request structure, typed for the
+ * generic struct ifreq so we can use ptr to func
+ * w/o cast.
+ *
+ * Returns:
+ * zero on success, a negative errno on failure. Possible values:
+ * -ENETDOWN Device isn't up.
+ * -EBUSY cmd already in progress
+ * -ETIME p80211 cmd timed out (MSD may have its own timers)
+ * -EFAULT memory fault copying msg from user buffer
+ * -ENOMEM unable to allocate kernel msg buffer
+ * -EINVAL bad magic, it the cmd really for us?
+ * -EintR sleeping on cmd, awakened by signal, cmd cancelled.
+ *
+ * Call Context:
+ * Process thread (ioctl caller). TODO: SMP support may require
+ * locks.
+ *----------------------------------------------------------------
+ */
+static int p80211knetdev_do_ioctl(struct net_device *dev,
+ struct ifreq *ifr, int cmd)
{
int result = 0;
struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
@@ -550,7 +558,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
/* Test the magic, assume ifr is good if it's there */
if (req->magic != P80211_IOCTL_MAGIC) {
- result = -ENOSYS;
+ result = -EINVAL;
goto bail;
}
@@ -558,7 +566,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
result = 0;
goto bail;
} else if (cmd != P80211_IFREQ) {
- result = -ENOSYS;
+ result = -EINVAL;
goto bail;
}
@@ -586,30 +594,31 @@ bail:
}
/*----------------------------------------------------------------
-* p80211knetdev_set_mac_address
-*
-* Handles the ioctl for changing the MACAddress of a netdevice
-*
-* references: linux/netdevice.h and drivers/net/net_init.c
-*
-* NOTE: [MSM] We only prevent address changes when the netdev is
-* up. We don't control anything based on dot11 state. If the
-* address is changed on a STA that's currently associated, you
-* will probably lose the ability to send and receive data frames.
-* Just be aware. Therefore, this should usually only be done
-* prior to scan/join/auth/assoc.
-*
-* Arguments:
-* dev netdevice struct
-* addr the new MACAddress (a struct)
-*
-* Returns:
-* zero on success, a negative errno on failure. Possible values:
-* -EBUSY device is bussy (cmd not possible)
-* -and errors returned by: p80211req_dorequest(..)
-*
-* by: Collin R. Mulliner <collin@mulliner.org>
-----------------------------------------------------------------*/
+ * p80211knetdev_set_mac_address
+ *
+ * Handles the ioctl for changing the MACAddress of a netdevice
+ *
+ * references: linux/netdevice.h and drivers/net/net_init.c
+ *
+ * NOTE: [MSM] We only prevent address changes when the netdev is
+ * up. We don't control anything based on dot11 state. If the
+ * address is changed on a STA that's currently associated, you
+ * will probably lose the ability to send and receive data frames.
+ * Just be aware. Therefore, this should usually only be done
+ * prior to scan/join/auth/assoc.
+ *
+ * Arguments:
+ * dev netdevice struct
+ * addr the new MACAddress (a struct)
+ *
+ * Returns:
+ * zero on success, a negative errno on failure. Possible values:
+ * -EBUSY device is bussy (cmd not possible)
+ * -and errors returned by: p80211req_dorequest(..)
+ *
+ * by: Collin R. Mulliner <collin@mulliner.org>
+ *----------------------------------------------------------------
+ */
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *new_addr = addr;
@@ -629,9 +638,9 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
resultcode = &dot11req.resultcode;
/* Set up a dot11req_mibset */
- memset(&dot11req, 0, sizeof(struct p80211msg_dot11req_mibset));
+ memset(&dot11req, 0, sizeof(dot11req));
dot11req.msgcode = DIDmsg_dot11req_mibset;
- dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset);
+ dot11req.msglen = sizeof(dot11req);
memcpy(dot11req.devname,
((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
@@ -669,18 +678,6 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
return result;
}
-static int wlan_change_mtu(struct net_device *dev, int new_mtu)
-{
- /* 2312 is max 802.11 payload, 20 is overhead, (ether + llc +snap)
- and another 8 for wep. */
- if ((new_mtu < 68) || (new_mtu > (2312 - 20 - 8)))
- return -EINVAL;
-
- dev->mtu = new_mtu;
-
- return 0;
-}
-
static const struct net_device_ops p80211_netdev_ops = {
.ndo_init = p80211knetdev_init,
.ndo_open = p80211knetdev_open,
@@ -690,33 +687,33 @@ static const struct net_device_ops p80211_netdev_ops = {
.ndo_do_ioctl = p80211knetdev_do_ioctl,
.ndo_set_mac_address = p80211knetdev_set_mac_address,
.ndo_tx_timeout = p80211knetdev_tx_timeout,
- .ndo_change_mtu = wlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
/*----------------------------------------------------------------
-* wlan_setup
-*
-* Roughly matches the functionality of ether_setup. Here
-* we set up any members of the wlandevice structure that are common
-* to all devices. Additionally, we allocate a linux 'struct device'
-* and perform the same setup as ether_setup.
-*
-* Note: It's important that the caller have setup the wlandev->name
-* ptr prior to calling this function.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* physdev ptr to usb device
-* Returns:
-* zero on success, non-zero otherwise.
-* Call Context:
-* Should be process thread. We'll assume it might be
-* interrupt though. When we add support for statically
-* compiled drivers, this function will be called in the
-* context of the kernel startup code.
-----------------------------------------------------------------*/
+ * wlan_setup
+ *
+ * Roughly matches the functionality of ether_setup. Here
+ * we set up any members of the wlandevice structure that are common
+ * to all devices. Additionally, we allocate a linux 'struct device'
+ * and perform the same setup as ether_setup.
+ *
+ * Note: It's important that the caller have setup the wlandev->name
+ * ptr prior to calling this function.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * physdev ptr to usb device
+ * Returns:
+ * zero on success, non-zero otherwise.
+ * Call Context:
+ * Should be process thread. We'll assume it might be
+ * interrupt though. When we add support for statically
+ * compiled drivers, this function will be called in the
+ * context of the kernel startup code.
+ *----------------------------------------------------------------
+ */
int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
{
int result = 0;
@@ -756,6 +753,11 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
wdev->wiphy = wiphy;
wdev->iftype = NL80211_IFTYPE_STATION;
netdev->ieee80211_ptr = wdev;
+ netdev->min_mtu = 68;
+ /* 2312 is max 802.11 payload, 20 is overhead,
+ * (ether + llc + snap) and another 8 for wep.
+ */
+ netdev->max_mtu = (2312 - 20 - 8);
netif_stop_queue(netdev);
netif_carrier_off(netdev);
@@ -765,24 +767,25 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
}
/*----------------------------------------------------------------
-* wlan_unsetup
-*
-* This function is paired with the wlan_setup routine. It should
-* be called after unregister_wlandev. Basically, all it does is
-* free the 'struct device' that's associated with the wlandev.
-* We do it here because the 'struct device' isn't allocated
-* explicitly in the driver code, it's done in wlan_setup. To
-* do the free in the driver might seem like 'magic'.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* Call Context:
-* Should be process thread. We'll assume it might be
-* interrupt though. When we add support for statically
-* compiled drivers, this function will be called in the
-* context of the kernel startup code.
-----------------------------------------------------------------*/
+ * wlan_unsetup
+ *
+ * This function is paired with the wlan_setup routine. It should
+ * be called after unregister_wlandev. Basically, all it does is
+ * free the 'struct device' that's associated with the wlandev.
+ * We do it here because the 'struct device' isn't allocated
+ * explicitly in the driver code, it's done in wlan_setup. To
+ * do the free in the driver might seem like 'magic'.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * Call Context:
+ * Should be process thread. We'll assume it might be
+ * interrupt though. When we add support for statically
+ * compiled drivers, this function will be called in the
+ * context of the kernel startup code.
+ *----------------------------------------------------------------
+ */
void wlan_unsetup(struct wlandevice *wlandev)
{
struct wireless_dev *wdev;
@@ -799,46 +802,48 @@ void wlan_unsetup(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* register_wlandev
-*
-* Roughly matches the functionality of register_netdev. This function
-* is called after the driver has successfully probed and set up the
-* resources for the device. It's now ready to become a named device
-* in the Linux system.
-*
-* First we allocate a name for the device (if not already set), then
-* we call the Linux function register_netdevice.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* Returns:
-* zero on success, non-zero otherwise.
-* Call Context:
-* Can be either interrupt or not.
-----------------------------------------------------------------*/
+ * register_wlandev
+ *
+ * Roughly matches the functionality of register_netdev. This function
+ * is called after the driver has successfully probed and set up the
+ * resources for the device. It's now ready to become a named device
+ * in the Linux system.
+ *
+ * First we allocate a name for the device (if not already set), then
+ * we call the Linux function register_netdevice.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * Returns:
+ * zero on success, non-zero otherwise.
+ * Call Context:
+ * Can be either interrupt or not.
+ *----------------------------------------------------------------
+ */
int register_wlandev(struct wlandevice *wlandev)
{
return register_netdev(wlandev->netdev);
}
/*----------------------------------------------------------------
-* unregister_wlandev
-*
-* Roughly matches the functionality of unregister_netdev. This
-* function is called to remove a named device from the system.
-*
-* First we tell linux that the device should no longer exist.
-* Then we remove it from the list of known wlan devices.
-*
-* Arguments:
-* wlandev ptr to the wlandev structure for the
-* interface.
-* Returns:
-* zero on success, non-zero otherwise.
-* Call Context:
-* Can be either interrupt or not.
-----------------------------------------------------------------*/
+ * unregister_wlandev
+ *
+ * Roughly matches the functionality of unregister_netdev. This
+ * function is called to remove a named device from the system.
+ *
+ * First we tell linux that the device should no longer exist.
+ * Then we remove it from the list of known wlan devices.
+ *
+ * Arguments:
+ * wlandev ptr to the wlandev structure for the
+ * interface.
+ * Returns:
+ * zero on success, non-zero otherwise.
+ * Call Context:
+ * Can be either interrupt or not.
+ *----------------------------------------------------------------
+ */
int unregister_wlandev(struct wlandevice *wlandev)
{
struct sk_buff *skb;
@@ -853,35 +858,36 @@ int unregister_wlandev(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* p80211netdev_hwremoved
-*
-* Hardware removed notification. This function should be called
-* immediately after an MSD has detected that the underlying hardware
-* has been yanked out from under us. The primary things we need
-* to do are:
-* - Mark the wlandev
-* - Prevent any further traffic from the knetdev i/f
-* - Prevent any further requests from mgmt i/f
-* - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
-* shut them down.
-* - Call the MSD hwremoved function.
-*
-* The remainder of the cleanup will be handled by unregister().
-* Our primary goal here is to prevent as much tickling of the MSD
-* as possible since the MSD is already in a 'wounded' state.
-*
-* TODO: As new features are added, this function should be
-* updated.
-*
-* Arguments:
-* wlandev WLAN network device structure
-* Returns:
-* nothing
-* Side effects:
-*
-* Call context:
-* Usually interrupt.
-----------------------------------------------------------------*/
+ * p80211netdev_hwremoved
+ *
+ * Hardware removed notification. This function should be called
+ * immediately after an MSD has detected that the underlying hardware
+ * has been yanked out from under us. The primary things we need
+ * to do are:
+ * - Mark the wlandev
+ * - Prevent any further traffic from the knetdev i/f
+ * - Prevent any further requests from mgmt i/f
+ * - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
+ * shut them down.
+ * - Call the MSD hwremoved function.
+ *
+ * The remainder of the cleanup will be handled by unregister().
+ * Our primary goal here is to prevent as much tickling of the MSD
+ * as possible since the MSD is already in a 'wounded' state.
+ *
+ * TODO: As new features are added, this function should be
+ * updated.
+ *
+ * Arguments:
+ * wlandev WLAN network device structure
+ * Returns:
+ * nothing
+ * Side effects:
+ *
+ * Call context:
+ * Usually interrupt.
+ *----------------------------------------------------------------
+ */
void p80211netdev_hwremoved(struct wlandevice *wlandev)
{
wlandev->hwremoved = 1;
@@ -892,26 +898,27 @@ void p80211netdev_hwremoved(struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* p80211_rx_typedrop
-*
-* Classifies the frame, increments the appropriate counter, and
-* returns 0|1|2 indicating whether the driver should handle, ignore, or
-* drop the frame
-*
-* Arguments:
-* wlandev wlan device structure
-* fc frame control field
-*
-* Returns:
-* zero if the frame should be handled by the driver,
-* one if the frame should be ignored
-* anything else means we drop it.
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * p80211_rx_typedrop
+ *
+ * Classifies the frame, increments the appropriate counter, and
+ * returns 0|1|2 indicating whether the driver should handle, ignore, or
+ * drop the frame
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * fc frame control field
+ *
+ * Returns:
+ * zero if the frame should be handled by the driver,
+ * one if the frame should be ignored
+ * anything else means we drop it.
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
{
u16 ftype;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 1e6a774fc7c5..8e0d08298c8b 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -1,54 +1,54 @@
/* p80211netdev.h
-*
-* WLAN net device structure and functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the structure type that represents each wlan
-* interface.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * WLAN net device structure and functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the structure type that represents each wlan
+ * interface.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _LINUX_P80211NETDEV_H
#define _LINUX_P80211NETDEV_H
@@ -143,7 +143,7 @@ extern struct iw_handler_def p80211wext_handler_def;
#define NUM_WEPKEYS 4
#define MAX_KEYLEN 32
-#define HOSTWEP_DEFAULTKEY_MASK (BIT(1)|BIT(0))
+#define HOSTWEP_DEFAULTKEY_MASK GENMASK(1, 0)
#define HOSTWEP_SHAREDKEY BIT(3)
#define HOSTWEP_DECRYPT BIT(4)
#define HOSTWEP_ENCRYPT BIT(5)
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index d43e85b5d49b..621df98183bf 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -1,54 +1,54 @@
/* src/p80211/p80211req.c
-*
-* Request/Indication/MacMgmt interface handling functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file contains the functions, types, and macros to support the
-* MLME request interface that's implemented via the device ioctls.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Request/Indication/MacMgmt interface handling functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file contains the functions, types, and macros to support the
+ * MLME request interface that's implemented via the device ioctls.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -72,10 +72,11 @@
#include "p80211metastruct.h"
#include "p80211req.h"
-static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg);
+static void p80211req_handlemsg(struct wlandevice *wlandev,
+ struct p80211msg *msg);
static void p80211req_mibset_mibget(struct wlandevice *wlandev,
- struct p80211msg_dot11req_mibget *mib_msg,
- int isget);
+ struct p80211msg_dot11req_mibget *mib_msg,
+ int isget);
static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
int isget, u32 flag)
@@ -93,21 +94,22 @@ static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
}
/*----------------------------------------------------------------
-* p80211req_dorequest
-*
-* Handles an MLME request/confirm message.
-*
-* Arguments:
-* wlandev WLAN device struct
-* msgbuf Buffer containing a request message
-*
-* Returns:
-* 0 on success, an errno otherwise
-*
-* Call context:
-* Potentially blocks the caller, so it's a good idea to
-* not call this function from an interrupt context.
-----------------------------------------------------------------*/
+ * p80211req_dorequest
+ *
+ * Handles an MLME request/confirm message.
+ *
+ * Arguments:
+ * wlandev WLAN device struct
+ * msgbuf Buffer containing a request message
+ *
+ * Returns:
+ * 0 on success, an errno otherwise
+ *
+ * Call context:
+ * Potentially blocks the caller, so it's a good idea to
+ * not call this function from an interrupt context.
+ *----------------------------------------------------------------
+ */
int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
{
struct p80211msg *msg = (struct p80211msg *)msgbuf;
@@ -122,7 +124,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
/* Check Permissions */
if (!capable(CAP_NET_ADMIN) &&
- (msg->msgcode != DIDmsg_dot11req_mibget)) {
+ (msg->msgcode != DIDmsg_dot11req_mibget)) {
netdev_err(wlandev->netdev,
"%s: only dot11req_mibget allowed for non-root.\n",
wlandev->name);
@@ -130,7 +132,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
}
/* Check for busy status */
- if (test_and_set_bit(1, &(wlandev->request_pending)))
+ if (test_and_set_bit(1, &wlandev->request_pending))
return -EBUSY;
/* Allow p80211 to look at msg and handle if desired. */
@@ -139,35 +141,36 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
p80211req_handlemsg(wlandev, msg);
/* Pass it down to wlandev via wlandev->mlmerequest */
- if (wlandev->mlmerequest != NULL)
+ if (wlandev->mlmerequest)
wlandev->mlmerequest(wlandev, msg);
- clear_bit(1, &(wlandev->request_pending));
+ clear_bit(1, &wlandev->request_pending);
return 0; /* if result==0, msg->status still may contain an err */
}
/*----------------------------------------------------------------
-* p80211req_handlemsg
-*
-* p80211 message handler. Primarily looks for messages that
-* belong to p80211 and then dispatches the appropriate response.
-* TODO: we don't do anything yet. Once the linuxMIB is better
-* defined we'll need a get/set handler.
-*
-* Arguments:
-* wlandev WLAN device struct
-* msg message structure
-*
-* Returns:
-* nothing (any results are set in the status field of the msg)
-*
-* Call context:
-* Process thread
-----------------------------------------------------------------*/
-static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg)
+ * p80211req_handlemsg
+ *
+ * p80211 message handler. Primarily looks for messages that
+ * belong to p80211 and then dispatches the appropriate response.
+ * TODO: we don't do anything yet. Once the linuxMIB is better
+ * defined we'll need a get/set handler.
+ *
+ * Arguments:
+ * wlandev WLAN device struct
+ * msg message structure
+ *
+ * Returns:
+ * nothing (any results are set in the status field of the msg)
+ *
+ * Call context:
+ * Process thread
+ *----------------------------------------------------------------
+ */
+static void p80211req_handlemsg(struct wlandevice *wlandev,
+ struct p80211msg *msg)
{
switch (msg->msgcode) {
-
case DIDmsg_lnxreq_hostwep:{
struct p80211msg_lnxreq_hostwep *req =
(struct p80211msg_lnxreq_hostwep *)msg;
@@ -192,8 +195,8 @@ static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *ms
}
static void p80211req_mibset_mibget(struct wlandevice *wlandev,
- struct p80211msg_dot11req_mibget *mib_msg,
- int isget)
+ struct p80211msg_dot11req_mibget *mib_msg,
+ int isget)
{
struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data;
struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data;
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index 8d3054c22a05..6c72f59993e0 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -1,49 +1,49 @@
/* p80211req.h
-*
-* Request handling functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Request handling functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _LINUX_P80211REQ_H
#define _LINUX_P80211REQ_H
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 23b183738037..6492ffe59085 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -1,49 +1,49 @@
/* src/p80211/p80211wep.c
-*
-* WEP encode/decode for P80211.
-*
-* Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * WEP encode/decode for P80211.
+ *
+ * Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
/*================================================================*/
/* System Includes */
@@ -52,8 +52,6 @@
#include <linux/wireless.h>
#include <linux/random.h>
#include <linux/kernel.h>
-
-
#include "p80211hdr.h"
#include "p80211types.h"
#include "p80211msg.h"
@@ -125,14 +123,13 @@ int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
return -1;
if (keylen >= MAX_KEYLEN)
return -1;
- if (key == NULL)
+ if (!key)
return -1;
if (keynum < 0)
return -1;
if (keynum >= NUM_WEPKEYS)
return -1;
-
wlandev->wep_keylens[keynum] = keylen;
memcpy(wlandev->wep_keys[keynum], key, keylen);
@@ -176,7 +173,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
keylen += 3; /* add in IV bytes */
-
/* set up the RC4 state */
for (i = 0; i < 256; i++)
s[i] = i;
@@ -217,8 +213,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
}
/* encrypts in-place. */
-int wep_encrypt(struct wlandevice *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
- u8 *iv, u8 *icv)
+int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
+ u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv)
{
u32 i, j, k, crc, keylen;
u8 s[256], key[64];
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 96aa21188669..2e349f87e738 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -1,49 +1,49 @@
/* from src/prism2/download/prism2dl.c
-*
-* utility for downloading prism2 images moved into kernelspace
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * utility for downloading prism2 images moved into kernelspace
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
/*================================================================*/
/* System Includes */
@@ -124,7 +124,7 @@ struct imgchunk {
/* Data records */
static unsigned int ns3data;
-static struct s3datarec s3data[S3DATA_MAX];
+static struct s3datarec *s3data;
/* Plug records */
static unsigned int ns3plug;
@@ -161,7 +161,7 @@ static struct hfa384x_caplevel priid;
/* Local Function Declarations */
static int prism2_fwapply(const struct ihex_binrec *rfptr,
-struct wlandevice *wlandev);
+ struct wlandevice *wlandev);
static int read_fwfile(const struct ihex_binrec *rfptr);
@@ -172,13 +172,15 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev);
static int mkpdrlist(struct pda *pda);
static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda);
+ struct s3plugrec *s3plug, unsigned int ns3plug,
+ struct pda *pda);
static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3crcrec *s3crc, unsigned int ns3crc);
+ struct s3crcrec *s3crc, unsigned int ns3crc);
static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
- unsigned int nfchunks);
+ unsigned int nfchunks);
+
static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks);
static void free_srecs(void);
@@ -189,30 +191,31 @@ static int validate_identity(void);
/* Function Definitions */
/*----------------------------------------------------------------
-* prism2_fwtry
-*
-* Try and get firmware into memory
-*
-* Arguments:
-* udev usb device structure
-* wlandev wlan device structure
-*
-* Returns:
-* 0 - success
-* ~0 - failure
-----------------------------------------------------------------*/
+ * prism2_fwtry
+ *
+ * Try and get firmware into memory
+ *
+ * Arguments:
+ * udev usb device structure
+ * wlandev wlan device structure
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure
+ *----------------------------------------------------------------
+ */
static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
{
const struct firmware *fw_entry = NULL;
netdev_info(wlandev->netdev, "prism2_usb: Checking for firmware %s\n",
- PRISM2_USB_FWFILE);
+ PRISM2_USB_FWFILE);
if (request_ihex_firmware(&fw_entry,
PRISM2_USB_FWFILE, &udev->dev) != 0) {
netdev_info(wlandev->netdev,
- "prism2_usb: Firmware not available, but not essential\n");
+ "prism2_usb: Firmware not available, but not essential\n");
netdev_info(wlandev->netdev,
- "prism2_usb: can continue to use card anyway.\n");
+ "prism2_usb: can continue to use card anyway.\n");
return 1;
}
@@ -226,18 +229,19 @@ static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* prism2_fwapply
-*
-* Apply the firmware loaded into memory
-*
-* Arguments:
-* rfptr firmware image in kernel memory
-* wlandev device
-*
-* Returns:
-* 0 - success
-* ~0 - failure
-----------------------------------------------------------------*/
+ * prism2_fwapply
+ *
+ * Apply the firmware loaded into memory
+ *
+ * Arguments:
+ * rfptr firmware image in kernel memory
+ * wlandev device
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure
+ *----------------------------------------------------------------
+ */
static int prism2_fwapply(const struct ihex_binrec *rfptr,
struct wlandevice *wlandev)
{
@@ -248,7 +252,12 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
/* Initialize the data structures */
ns3data = 0;
- memset(s3data, 0, sizeof(s3data));
+ s3data = kcalloc(S3DATA_MAX, sizeof(*s3data), GFP_KERNEL);
+ if (!s3data) {
+ result = -ENOMEM;
+ goto out;
+ }
+
ns3plug = 0;
memset(s3plug, 0, sizeof(s3plug));
ns3crc = 0;
@@ -372,24 +381,25 @@ out:
}
/*----------------------------------------------------------------
-* crcimage
-*
-* Adds a CRC16 in the two bytes prior to each block identified by
-* an S3 CRC record. Currently, we don't actually do a CRC we just
-* insert the value 0xC0DE in hfa384x order.
-*
-* Arguments:
-* fchunk Array of image chunks
-* nfchunks Number of image chunks
-* s3crc Array of crc records
-* ns3crc Number of crc records
-*
-* Returns:
-* 0 success
-* ~0 failure
-----------------------------------------------------------------*/
+ * crcimage
+ *
+ * Adds a CRC16 in the two bytes prior to each block identified by
+ * an S3 CRC record. Currently, we don't actually do a CRC we just
+ * insert the value 0xC0DE in hfa384x order.
+ *
+ * Arguments:
+ * fchunk Array of image chunks
+ * nfchunks Number of image chunks
+ * s3crc Array of crc records
+ * ns3crc Number of crc records
+ *
+ * Returns:
+ * 0 success
+ * ~0 failure
+ *----------------------------------------------------------------
+ */
static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3crcrec *s3crc, unsigned int ns3crc)
+ struct s3crcrec *s3crc, unsigned int ns3crc)
{
int result = 0;
int i;
@@ -433,22 +443,22 @@ static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
dest = fchunk[c].data + chunkoff;
*dest = 0xde;
*(dest + 1) = 0xc0;
-
}
return result;
}
/*----------------------------------------------------------------
-* free_chunks
-*
-* Clears the chunklist data structures in preparation for a new file.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * free_chunks
+ *
+ * Clears the chunklist data structures in preparation for a new file.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
{
int i;
@@ -458,24 +468,24 @@ static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
*nfchunks = 0;
memset(fchunk, 0, sizeof(*fchunk));
-
}
/*----------------------------------------------------------------
-* free_srecs
-*
-* Clears the srec data structures in preparation for a new file.
-*
-* Arguments:
-* none
-*
-* Returns:
-* nothing
-----------------------------------------------------------------*/
+ * free_srecs
+ *
+ * Clears the srec data structures in preparation for a new file.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * nothing
+ *----------------------------------------------------------------
+ */
static void free_srecs(void)
{
ns3data = 0;
- memset(s3data, 0, sizeof(s3data));
+ kfree(s3data);
ns3plug = 0;
memset(s3plug, 0, sizeof(s3plug));
ns3crc = 0;
@@ -486,19 +496,20 @@ static void free_srecs(void)
}
/*----------------------------------------------------------------
-* mkimage
-*
-* Scans the currently loaded set of S records for data residing
-* in contiguous memory regions. Each contiguous region is then
-* made into a 'chunk'. This function assumes that we're building
-* a new chunk list. Assumes the s3data items are in sorted order.
-*
-* Arguments: none
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * mkimage
+ *
+ * Scans the currently loaded set of S records for data residing
+ * in contiguous memory regions. Each contiguous region is then
+ * made into a 'chunk'. This function assumes that we're building
+ * a new chunk list. Assumes the s3data items are in sorted order.
+ *
+ * Arguments: none
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
{
int result = 0;
@@ -577,19 +588,20 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
}
/*----------------------------------------------------------------
-* mkpdrlist
-*
-* Reads a raw PDA and builds an array of pdrec_t structures.
-*
-* Arguments:
-* pda buffer containing raw PDA bytes
-* pdrec ptr to an array of pdrec_t's. Will be filled on exit.
-* nrec ptr to a variable that will contain the count of PDRs
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * mkpdrlist
+ *
+ * Reads a raw PDA and builds an array of pdrec_t structures.
+ *
+ * Arguments:
+ * pda buffer containing raw PDA bytes
+ * pdrec ptr to an array of pdrec_t's. Will be filled on exit.
+ * nrec ptr to a variable that will contain the count of PDRs
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int mkpdrlist(struct pda *pda)
{
u16 *pda16 = (u16 *)pda->buf;
@@ -599,7 +611,7 @@ static int mkpdrlist(struct pda *pda)
curroff = 0;
while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) &&
le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) {
- pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
+ pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
HFA384x_PDR_NICID) {
@@ -631,37 +643,38 @@ static int mkpdrlist(struct pda *pda)
(pda->nrec)++;
curroff += le16_to_cpu(pda16[curroff]) + 1;
-
}
if (curroff >= (HFA384x_PDA_LEN_MAX / 2 - 1)) {
pr_err("no end record found or invalid lengths in PDR data, exiting. %x %d\n",
curroff, pda->nrec);
return 1;
}
- pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
+ pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
(pda->nrec)++;
return 0;
}
/*----------------------------------------------------------------
-* plugimage
-*
-* Plugs the given image using the given plug records from the given
-* PDA and filename.
-*
-* Arguments:
-* fchunk Array of image chunks
-* nfchunks Number of image chunks
-* s3plug Array of plug records
-* ns3plug Number of plug records
-* pda Current pda data
-*
-* Returns:
-* 0 success
-* ~0 failure
-----------------------------------------------------------------*/
+ * plugimage
+ *
+ * Plugs the given image using the given plug records from the given
+ * PDA and filename.
+ *
+ * Arguments:
+ * fchunk Array of image chunks
+ * nfchunks Number of image chunks
+ * s3plug Array of plug records
+ * ns3plug Number of plug records
+ * pda Current pda data
+ *
+ * Returns:
+ * 0 success
+ * ~0 failure
+ *----------------------------------------------------------------
+ */
static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
- struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda)
+ struct s3plugrec *s3plug, unsigned int ns3plug,
+ struct pda *pda)
{
int result = 0;
int i; /* plug index */
@@ -741,31 +754,31 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
memset(dest, 0, s3plug[i].len);
strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1);
} else { /* plug a PDR */
- memcpy(dest, &(pda->rec[j]->data), s3plug[i].len);
+ memcpy(dest, &pda->rec[j]->data, s3plug[i].len);
}
}
return result;
-
}
/*----------------------------------------------------------------
-* read_cardpda
-*
-* Sends the command for the driver to read the pda from the card
-* named in the device variable. Upon success, the card pda is
-* stored in the "cardpda" variables. Note that the pda structure
-* is considered 'well formed' after this function. That means
-* that the nrecs is valid, the rec array has been set up, and there's
-* a valid PDAEND record in the raw PDA data.
-*
-* Arguments:
-* pda pda structure
-* wlandev device
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * read_cardpda
+ *
+ * Sends the command for the driver to read the pda from the card
+ * named in the device variable. Upon success, the card pda is
+ * stored in the "cardpda" variables. Note that the pda structure
+ * is considered 'well formed' after this function. That means
+ * that the nrecs is valid, the rec array has been set up, and there's
+ * a valid PDAEND record in the raw PDA data.
+ *
+ * Arguments:
+ * pda pda structure
+ * wlandev device
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
{
int result = 0;
@@ -802,65 +815,66 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
}
/*----------------------------------------------------------------
-* read_fwfile
-*
-* Reads the given fw file which should have been compiled from an srec
-* file. Each record in the fw file will either be a plain data record,
-* a start address record, or other records used for plugging.
-*
-* Note that data records are expected to be sorted into
-* ascending address order in the fw file.
-*
-* Note also that the start address record, originally an S7 record in
-* the srec file, is expected in the fw file to be like a data record but
-* with a certain address to make it identifiable.
-*
-* Here's the SREC format that the fw should have come from:
-* S[37]nnaaaaaaaaddd...dddcc
-*
-* nn - number of bytes starting with the address field
-* aaaaaaaa - address in readable (or big endian) format
-* dd....dd - 0-245 data bytes (two chars per byte)
-* cc - checksum
-*
-* The S7 record's (there should be only one) address value gets
-* converted to an S3 record with address of 0xff400000, with the
-* start address being stored as a 4 byte data word. That address is
-* the start execution address used for RAM downloads.
-*
-* The S3 records have a collection of subformats indicated by the
-* value of aaaaaaaa:
-* 0xff000000 - Plug record, data field format:
-* xxxxxxxxaaaaaaaassssssss
-* x - PDR code number (little endian)
-* a - Address in load image to plug (little endian)
-* s - Length of plug data area (little endian)
-*
-* 0xff100000 - CRC16 generation record, data field format:
-* aaaaaaaassssssssbbbbbbbb
-* a - Start address for CRC calculation (little endian)
-* s - Length of data to calculate over (little endian)
-* b - Boolean, true=write crc, false=don't write
-*
-* 0xff200000 - Info record, data field format:
-* ssssttttdd..dd
-* s - Size in words (little endian)
-* t - Info type (little endian), see #defines and
-* struct s3inforec for details about types.
-* d - (s - 1) little endian words giving the contents of
-* the given info type.
-*
-* 0xff400000 - Start address record, data field format:
-* aaaaaaaa
-* a - Address in load image to plug (little endian)
-*
-* Arguments:
-* record firmware image (ihex record structure) in kernel memory
-*
-* Returns:
-* 0 - success
-* ~0 - failure (probably an errno)
-----------------------------------------------------------------*/
+ * read_fwfile
+ *
+ * Reads the given fw file which should have been compiled from an srec
+ * file. Each record in the fw file will either be a plain data record,
+ * a start address record, or other records used for plugging.
+ *
+ * Note that data records are expected to be sorted into
+ * ascending address order in the fw file.
+ *
+ * Note also that the start address record, originally an S7 record in
+ * the srec file, is expected in the fw file to be like a data record but
+ * with a certain address to make it identifiable.
+ *
+ * Here's the SREC format that the fw should have come from:
+ * S[37]nnaaaaaaaaddd...dddcc
+ *
+ * nn - number of bytes starting with the address field
+ * aaaaaaaa - address in readable (or big endian) format
+ * dd....dd - 0-245 data bytes (two chars per byte)
+ * cc - checksum
+ *
+ * The S7 record's (there should be only one) address value gets
+ * converted to an S3 record with address of 0xff400000, with the
+ * start address being stored as a 4 byte data word. That address is
+ * the start execution address used for RAM downloads.
+ *
+ * The S3 records have a collection of subformats indicated by the
+ * value of aaaaaaaa:
+ * 0xff000000 - Plug record, data field format:
+ * xxxxxxxxaaaaaaaassssssss
+ * x - PDR code number (little endian)
+ * a - Address in load image to plug (little endian)
+ * s - Length of plug data area (little endian)
+ *
+ * 0xff100000 - CRC16 generation record, data field format:
+ * aaaaaaaassssssssbbbbbbbb
+ * a - Start address for CRC calculation (little endian)
+ * s - Length of data to calculate over (little endian)
+ * b - Boolean, true=write crc, false=don't write
+ *
+ * 0xff200000 - Info record, data field format:
+ * ssssttttdd..dd
+ * s - Size in words (little endian)
+ * t - Info type (little endian), see #defines and
+ * struct s3inforec for details about types.
+ * d - (s - 1) little endian words giving the contents of
+ * the given info type.
+ *
+ * 0xff400000 - Start address record, data field format:
+ * aaaaaaaa
+ * a - Address in load image to plug (little endian)
+ *
+ * Arguments:
+ * record firmware image (ihex record structure) in kernel memory
+ *
+ * Returns:
+ * 0 - success
+ * ~0 - failure (probably an errno)
+ *----------------------------------------------------------------
+ */
static int read_fwfile(const struct ihex_binrec *record)
{
int i;
@@ -872,7 +886,6 @@ static int read_fwfile(const struct ihex_binrec *record)
pr_debug("Reading fw file ...\n");
while (record) {
-
rcnt++;
len = be16_to_cpu(record->len);
@@ -887,8 +900,8 @@ static int read_fwfile(const struct ihex_binrec *record)
case S3ADDR_START:
startaddr = *ptr32;
pr_debug(" S7 start addr, record=%d addr=0x%08x\n",
- rcnt,
- startaddr);
+ rcnt,
+ startaddr);
break;
case S3ADDR_PLUG:
s3plug[ns3plug].itemcode = *ptr32;
@@ -896,10 +909,10 @@ static int read_fwfile(const struct ihex_binrec *record)
s3plug[ns3plug].len = *(ptr32 + 2);
pr_debug(" S3 plugrec, record=%d itemcode=0x%08x addr=0x%08x len=%d\n",
- rcnt,
- s3plug[ns3plug].itemcode,
- s3plug[ns3plug].addr,
- s3plug[ns3plug].len);
+ rcnt,
+ s3plug[ns3plug].itemcode,
+ s3plug[ns3plug].addr,
+ s3plug[ns3plug].len);
ns3plug++;
if (ns3plug == S3PLUG_MAX) {
@@ -913,10 +926,10 @@ static int read_fwfile(const struct ihex_binrec *record)
s3crc[ns3crc].dowrite = *(ptr32 + 2);
pr_debug(" S3 crcrec, record=%d addr=0x%08x len=%d write=0x%08x\n",
- rcnt,
- s3crc[ns3crc].addr,
- s3crc[ns3crc].len,
- s3crc[ns3crc].dowrite);
+ rcnt,
+ s3crc[ns3crc].addr,
+ s3crc[ns3crc].len,
+ s3crc[ns3crc].dowrite);
ns3crc++;
if (ns3crc == S3CRC_MAX) {
pr_err("S3 crcrec limit reached - aborting\n");
@@ -928,16 +941,16 @@ static int read_fwfile(const struct ihex_binrec *record)
s3info[ns3info].type = *(ptr16 + 1);
pr_debug(" S3 inforec, record=%d len=0x%04x type=0x%04x\n",
- rcnt,
- s3info[ns3info].len,
- s3info[ns3info].type);
+ rcnt,
+ s3info[ns3info].len,
+ s3info[ns3info].type);
if (((s3info[ns3info].len - 1) * sizeof(u16)) >
sizeof(s3info[ns3info].info)) {
pr_err("S3 inforec length too long - aborting\n");
return 1;
}
- tmpinfo = (u16 *)&(s3info[ns3info].info.version);
+ tmpinfo = (u16 *)&s3info[ns3info].info.version;
pr_debug(" info=");
for (i = 0; i < s3info[ns3info].len - 1; i++) {
tmpinfo[i] = *(ptr16 + 2 + i);
@@ -968,22 +981,23 @@ static int read_fwfile(const struct ihex_binrec *record)
}
/*----------------------------------------------------------------
-* writeimage
-*
-* Takes the chunks, builds p80211 messages and sends them down
-* to the driver for writing to the card.
-*
-* Arguments:
-* wlandev device
-* fchunk Array of image chunks
-* nfchunks Number of image chunks
-*
-* Returns:
-* 0 success
-* ~0 failure
-----------------------------------------------------------------*/
+ * writeimage
+ *
+ * Takes the chunks, builds p80211 messages and sends them down
+ * to the driver for writing to the card.
+ *
+ * Arguments:
+ * wlandev device
+ * fchunk Array of image chunks
+ * nfchunks Number of image chunks
+ *
+ * Returns:
+ * 0 success
+ * ~0 failure
+ *----------------------------------------------------------------
+ */
static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
- unsigned int nfchunks)
+ unsigned int nfchunks)
{
int result = 0;
struct p80211msg_p2req_ramdl_state *rstmsg;
@@ -1099,7 +1113,6 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
result = 1;
goto free_result;
}
-
}
}
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 170de1c9eac4..c558ad656c49 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -1,61 +1,61 @@
/* src/prism2/driver/prism2mgmt.c
-*
-* Management request handler functions.
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions in this file handle management requests sent from
-* user mode.
-*
-* Most of these functions have two separate blocks of code that are
-* conditional on whether this is a station or an AP. This is used
-* to separate out the STA and AP responses to these management primitives.
-* It's a choice (good, bad, indifferent?) to have the code in the same
-* place so it's clear that the same primitive is implemented in both
-* cases but has different behavior.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Management request handler functions.
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions in this file handle management requests sent from
+ * user mode.
+ *
+ * Most of these functions have two separate blocks of code that are
+ * conditional on whether this is a station or an AP. This is used
+ * to separate out the STA and AP responses to these management primitives.
+ * It's a choice (good, bad, indifferent?) to have the code in the same
+ * place so it's clear that the same primitive is implemented in both
+ * cases but has different behavior.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -84,35 +84,36 @@
#include "prism2mgmt.h"
/* Converts 802.11 format rate specifications to prism2 */
-#define p80211rate_to_p2bit(n) ((((n)&~BIT(7)) == 2) ? BIT(0) : \
- (((n)&~BIT(7)) == 4) ? BIT(1) : \
- (((n)&~BIT(7)) == 11) ? BIT(2) : \
- (((n)&~BIT(7)) == 22) ? BIT(3) : 0)
+#define p80211rate_to_p2bit(n) ((((n) & ~BIT(7)) == 2) ? BIT(0) : \
+ (((n) & ~BIT(7)) == 4) ? BIT(1) : \
+ (((n) & ~BIT(7)) == 11) ? BIT(2) : \
+ (((n) & ~BIT(7)) == 22) ? BIT(3) : 0)
/*----------------------------------------------------------------
-* prism2mgmt_scan
-*
-* Initiate a scan for BSSs.
-*
-* This function corresponds to MLME-scan.request and part of
-* MLME-scan.confirm. As far as I can tell in the standard, there
-* are no restrictions on when a scan.request may be issued. We have
-* to handle in whatever state the driver/MAC happen to be.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_scan
+ *
+ * Initiate a scan for BSSs.
+ *
+ * This function corresponds to MLME-scan.request and part of
+ * MLME-scan.confirm. As far as I can tell in the standard, there
+ * are no restrictions on when a scan.request may be issued. We have
+ * to handle in whatever state the driver/MAC happen to be.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
@@ -122,7 +123,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
int i, timeout;
int istmpenable = 0;
- struct hfa384x_HostScanRequest_data scanreq;
+ struct hfa384x_host_scan_request_data scanreq;
/* gatekeeper check */
if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
@@ -184,7 +185,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
/* set up the txrate to be 2MBPS. Should be fastest basicrate... */
word = HFA384x_RATEBIT_2;
- scanreq.txRate = cpu_to_le16(word);
+ scanreq.tx_rate = cpu_to_le16(word);
/* set up the channel list */
word = 0;
@@ -196,7 +197,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
/* channel 1 is BIT 0 ... channel 14 is BIT 13 */
word |= (1 << (channel - 1));
}
- scanreq.channelList = cpu_to_le16(word);
+ scanreq.channel_list = cpu_to_le16(word);
/* set up the ssid, if present. */
scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len);
@@ -292,7 +293,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
result = hfa384x_drvr_setconfig(hw,
HFA384x_RID_HOSTSCAN, &scanreq,
- sizeof(struct hfa384x_HostScanRequest_data));
+ sizeof(scanreq));
if (result) {
netdev_err(wlandev->netdev,
"setconfig(SCANREQUEST) failed. result=%d\n",
@@ -347,31 +348,32 @@ exit:
}
/*----------------------------------------------------------------
-* prism2mgmt_scan_results
-*
-* Retrieve the BSS description for one of the BSSs identified in
-* a scan.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_scan_results
+ *
+ * Retrieve the BSS description for one of the BSSs identified in
+ * a scan.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
struct p80211msg_dot11req_scan_results *req;
struct hfa384x *hw = wlandev->priv;
- struct hfa384x_HScanResultSub *item = NULL;
+ struct hfa384x_hscan_result_sub *item = NULL;
int count;
@@ -425,8 +427,8 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
#define REQBASICRATE(N) \
do { \
if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \
- item->supprates[(N)-1])) { \
- req->basicrate ## N .data = item->supprates[(N)-1]; \
+ item->supprates[(N) - 1])) { \
+ req->basicrate ## N .data = item->supprates[(N) - 1]; \
req->basicrate ## N .status = \
P80211ENUM_msgitem_status_data_ok; \
} \
@@ -444,7 +446,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
#define REQSUPPRATE(N) \
do { \
if (count >= N) { \
- req->supprate ## N .data = item->supprates[(N)-1]; \
+ req->supprate ## N .data = item->supprates[(N) - 1]; \
req->supprate ## N .status = \
P80211ENUM_msgitem_status_data_ok; \
} \
@@ -507,24 +509,25 @@ exit:
}
/*----------------------------------------------------------------
-* prism2mgmt_start
-*
-* Start a BSS. Any station can do this for IBSS, only AP for ESS.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_start
+ *
+ * Start a BSS. Any station can do this for IBSS, only AP for ESS.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
@@ -580,7 +583,7 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
/* beacon period */
word = msg->beaconperiod.data;
- result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNint, word);
+ result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNINT, word);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set beacon period=%d.\n", word);
@@ -689,23 +692,24 @@ done:
}
/*----------------------------------------------------------------
-* prism2mgmt_readpda
-*
-* Collect the PDA data and put it in the message.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_readpda
+ *
+ * Collect the PDA data and put it in the message.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -748,30 +752,31 @@ int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_ramdl_state
-*
-* Establishes the beginning/end of a card RAM download session.
-*
-* It is expected that the ramdl_write() function will be called
-* one or more times between the 'enable' and 'disable' calls to
-* this function.
-*
-* Note: This function should not be called when a mac comm port
-* is active.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_ramdl_state
+ *
+ * Establishes the beginning/end of a card RAM download session.
+ *
+ * It is expected that the ramdl_write() function will be called
+ * one or more times between the 'enable' and 'disable' calls to
+ * this function.
+ *
+ * Note: This function should not be called when a mac comm port
+ * is active.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -808,25 +813,26 @@ int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_ramdl_write
-*
-* Writes a buffer to the card RAM using the download state. This
-* is for writing code to card RAM. To just read or write raw data
-* use the aux functions.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_ramdl_write
+ *
+ * Writes a buffer to the card RAM using the download state. This
+ * is for writing code to card RAM. To just read or write raw data
+ * use the aux functions.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -864,30 +870,31 @@ int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_flashdl_state
-*
-* Establishes the beginning/end of a card Flash download session.
-*
-* It is expected that the flashdl_write() function will be called
-* one or more times between the 'enable' and 'disable' calls to
-* this function.
-*
-* Note: This function should not be called when a mac comm port
-* is active.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_flashdl_state
+ *
+ * Establishes the beginning/end of a card Flash download session.
+ *
+ * It is expected that the flashdl_write() function will be called
+ * one or more times between the 'enable' and 'disable' calls to
+ * this function.
+ *
+ * Note: This function should not be called when a mac comm port
+ * is active.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
@@ -942,23 +949,24 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_flashdl_write
-*
-*
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_flashdl_write
+ *
+ *
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ *----------------------------------------------------------------
+ */
int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -1001,24 +1009,25 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_autojoin
-*
-* Associate with an ESS.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_autojoin
+ *
+ * Associate with an ESS.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
{
struct hfa384x *hw = wlandev->priv;
@@ -1072,24 +1081,25 @@ int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
}
/*----------------------------------------------------------------
-* prism2mgmt_wlansniff
-*
-* Start or stop sniffing.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_wlansniff
+ *
+ * Start or stop sniffing.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ *----------------------------------------------------------------
+ */
int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index cc1ac7a60dfe..88b979ff68b3 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -1,61 +1,61 @@
/* prism2mgmt.h
-*
-* Declares the mgmt command handler functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file contains the constants and data structures for interaction
-* with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
-* The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
-*
-* [Implementation and usage notes]
-*
-* [References]
-* CW10 Programmer's Manual v1.5
-* IEEE 802.11 D10.0
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Declares the mgmt command handler functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file contains the constants and data structures for interaction
+ * with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
+ * The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
+ *
+ * [Implementation and usage notes]
+ *
+ * [References]
+ * CW10 Programmer's Manual v1.5
+ * IEEE 802.11 D10.0
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _PRISM2MGMT_H
#define _PRISM2MGMT_H
@@ -65,7 +65,8 @@ extern int prism2_reset_settletime;
u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate);
-void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf);
+void prism2sta_ev_info(struct wlandevice *wlandev,
+ struct hfa384x_inf_frame *inf);
void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status);
void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status);
void prism2sta_ev_alloc(struct wlandevice *wlandev);
@@ -83,9 +84,11 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp);
int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp);
/*---------------------------------------------------------------
-* conversion functions going between wlan message data types and
-* Prism2 data types
----------------------------------------------------------------*/
+ * conversion functions going between wlan message data types and
+ * Prism2 data types
+ *---------------------------------------------------------------
+ */
+
/* byte area conversion functions*/
void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len);
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 63ab6bc88654..8ea6a647d037 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -1,54 +1,54 @@
/* src/prism2/driver/prism2mib.c
-*
-* Management request for mibset/mibget
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions in this file handle the mibset/mibget management
-* functions.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Management request for mibset/mibget
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions in this file handle the mibset/mibget management
+ * functions.
+ *
+ * --------------------------------------------------------------------
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -709,7 +709,7 @@ static int prism2mib_priv(struct mibrec *mib,
switch (mib->did) {
case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{
- struct hfa384x_WPAData wpa;
+ struct hfa384x_wpa_data wpa;
if (isget) {
hfa384x_drvr_getconfig(hw,
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index e1b4a94292ff..984804b92e05 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -104,32 +104,33 @@ static void prism2sta_reset(struct wlandevice *wlandev);
static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep);
-static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg);
+static int prism2sta_mlmerequest(struct wlandevice *wlandev,
+ struct p80211msg *msg);
static int prism2sta_getcardinfo(struct wlandevice *wlandev);
static int prism2sta_globalsetup(struct wlandevice *wlandev);
static int prism2sta_setmulticast(struct wlandevice *wlandev,
struct net_device *dev);
static void prism2sta_inf_handover(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_tallies(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_authreq(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf);
+ struct hfa384x_inf_frame *inf);
/*
* prism2sta_open
@@ -278,7 +279,8 @@ static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
* Call context:
* process thread
*/
-static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg)
+static int prism2sta_mlmerequest(struct wlandevice *wlandev,
+ struct p80211msg *msg)
{
struct hfa384x *hw = wlandev->priv;
@@ -370,9 +372,10 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *m
qualmsg->noise.status =
P80211ENUM_msgitem_status_data_ok;
- qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS);
- qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS);
- qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC);
+ qualmsg->link.data = le16_to_cpu(hw->qual.cq_curr_bss);
+ qualmsg->level.data =
+ le16_to_cpu(hw->qual.asl_curr_bss);
+ qualmsg->noise.data = le16_to_cpu(hw->qual.anl_curr_fc);
qualmsg->txrate.data = hw->txrate;
break;
@@ -606,8 +609,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
- hw->ident_nic.id, hw->ident_nic.major,
- hw->ident_nic.minor, hw->ident_nic.variant);
+ hw->ident_nic.id, hw->ident_nic.major,
+ hw->ident_nic.minor, hw->ident_nic.variant);
/* Primary f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY,
@@ -625,8 +628,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
- hw->ident_pri_fw.id, hw->ident_pri_fw.major,
- hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
+ hw->ident_pri_fw.id, hw->ident_pri_fw.major,
+ hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
/* Station (Secondary?) f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY,
@@ -639,7 +642,7 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
if (hw->ident_nic.id < 0x8000) {
netdev_err(wlandev->netdev,
- "FATAL: Card is not an Intersil Prism2/2.5/3\n");
+ "FATAL: Card is not an Intersil Prism2/2.5/3\n");
result = -1;
goto failed;
}
@@ -651,19 +654,19 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor);
/* strip out the 'special' variant bits */
- hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15));
- hw->ident_sta_fw.variant &= ~((u16)(BIT(14) | BIT(15)));
+ hw->mm_mods = hw->ident_sta_fw.variant & GENMASK(15, 14);
+ hw->ident_sta_fw.variant &= ~((u16)GENMASK(15, 14));
if (hw->ident_sta_fw.id == 0x1f) {
netdev_info(wlandev->netdev,
- "ident: sta f/w: id=0x%02x %d.%d.%d\n",
- hw->ident_sta_fw.id, hw->ident_sta_fw.major,
- hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
+ "ident: sta f/w: id=0x%02x %d.%d.%d\n",
+ hw->ident_sta_fw.id, hw->ident_sta_fw.major,
+ hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
} else {
netdev_info(wlandev->netdev,
- "ident: ap f/w: id=0x%02x %d.%d.%d\n",
- hw->ident_sta_fw.id, hw->ident_sta_fw.major,
- hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
+ "ident: ap f/w: id=0x%02x %d.%d.%d\n",
+ hw->ident_sta_fw.id, hw->ident_sta_fw.major,
+ hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n");
goto failed;
}
@@ -687,10 +690,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
netdev_info(wlandev->netdev,
- "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
- hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
- hw->cap_sup_mfi.top);
+ "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
+ hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
+ hw->cap_sup_mfi.top);
/* Compatibility range, Controller supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE,
@@ -711,10 +714,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
netdev_info(wlandev->netdev,
- "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
- hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
- hw->cap_sup_cfi.top);
+ "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
+ hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
+ hw->cap_sup_cfi.top);
/* Compatibility range, Primary f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE,
@@ -735,10 +738,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
netdev_info(wlandev->netdev,
- "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_sup_pri.role, hw->cap_sup_pri.id,
- hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
- hw->cap_sup_pri.top);
+ "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_sup_pri.role, hw->cap_sup_pri.id,
+ hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
+ hw->cap_sup_pri.top);
/* Compatibility range, Station f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE,
@@ -791,10 +794,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
netdev_info(wlandev->netdev,
- "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
- hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
- hw->cap_act_pri_cfi.top);
+ "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
+ hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
+ hw->cap_act_pri_cfi.top);
/* Compatibility range, sta f/w actor, CFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES,
@@ -815,10 +818,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
netdev_info(wlandev->netdev,
- "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
- hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
- hw->cap_act_sta_cfi.top);
+ "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
+ hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
+ hw->cap_act_sta_cfi.top);
/* Compatibility range, sta f/w actor, MFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES,
@@ -839,10 +842,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
netdev_info(wlandev->netdev,
- "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
- hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
- hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
- hw->cap_act_sta_mfi.top);
+ "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+ hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
+ hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
+ hw->cap_act_sta_mfi.top);
/* Serial Number */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER,
@@ -920,7 +923,7 @@ static int prism2sta_globalsetup(struct wlandevice *wlandev)
}
static int prism2sta_setmulticast(struct wlandevice *wlandev,
- struct net_device *dev)
+ struct net_device *dev)
{
int result = 0;
struct hfa384x *hw = wlandev->priv;
@@ -962,7 +965,7 @@ exit:
* interrupt
*/
static void prism2sta_inf_handover(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
pr_debug("received infoframe:HANDOVER (unhandled)\n");
}
@@ -985,7 +988,7 @@ static void prism2sta_inf_handover(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_tallies(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
u16 *src16;
@@ -999,7 +1002,7 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
* record length of the info record.
*/
- cnt = sizeof(struct hfa384x_CommTallies32) / sizeof(u32);
+ cnt = sizeof(struct hfa384x_comm_tallies_32) / sizeof(u32);
if (inf->framelen > 22) {
dst = (u32 *)&hw->tallies;
src32 = (u32 *)&inf->info.commtallies32;
@@ -1031,19 +1034,19 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
int nbss;
- struct hfa384x_ScanResult *sr = &(inf->info.scanresult);
+ struct hfa384x_scan_result *sr = &inf->info.scanresult;
int i;
- struct hfa384x_JoinRequest_data joinreq;
+ struct hfa384x_join_request_data joinreq;
int result;
/* Get the number of results, first in bytes, then in results */
nbss = (inf->framelen * sizeof(u16)) -
sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason);
- nbss /= sizeof(struct hfa384x_ScanResultSub);
+ nbss /= sizeof(struct hfa384x_scan_result_sub);
/* Print em */
pr_debug("rx scanresults, reason=%d, nbss=%d:\n",
@@ -1064,7 +1067,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
&joinreq, HFA384x_RID_JOINREQUEST_LEN);
if (result) {
netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n",
- result);
+ result);
}
}
@@ -1086,7 +1089,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
int nbss;
@@ -1099,7 +1102,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
kfree(hw->scanresults);
- hw->scanresults = kmemdup(inf, sizeof(struct hfa384x_InfFrame), GFP_ATOMIC);
+ hw->scanresults = kmemdup(inf, sizeof(*inf), GFP_ATOMIC);
if (nbss == 0)
nbss = -1;
@@ -1127,7 +1130,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
unsigned int i, n;
@@ -1136,8 +1139,8 @@ static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
le16_to_cpu(inf->info.chinforesult.scanchannels);
for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) {
- struct hfa384x_ChInfoResultSub *result;
- struct hfa384x_ChInfoResultSub *chinforesult;
+ struct hfa384x_ch_info_result_sub *result;
+ struct hfa384x_ch_info_result_sub *chinforesult;
int chan;
if (!(hw->channel_info.results.scanchannels & (1 << i)))
@@ -1179,10 +1182,10 @@ void prism2sta_processing_defer(struct work_struct *data)
/* First let's process the auth frames */
{
struct sk_buff *skb;
- struct hfa384x_InfFrame *inf;
+ struct hfa384x_inf_frame *inf;
while ((skb = skb_dequeue(&hw->authq))) {
- inf = (struct hfa384x_InfFrame *)skb->data;
+ inf = (struct hfa384x_inf_frame *)skb->data;
prism2sta_inf_authreq_defer(wlandev, inf);
}
@@ -1294,7 +1297,7 @@ void prism2sta_processing_defer(struct work_struct *data)
*/
if (wlandev->netdev->type == ARPHRD_ETHER)
netdev_info(wlandev->netdev,
- "linkstatus=DISCONNECTED (unhandled)\n");
+ "linkstatus=DISCONNECTED (unhandled)\n");
wlandev->macmode = WLAN_MACMODE_NONE;
netif_carrier_off(wlandev->netdev);
@@ -1391,7 +1394,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Disable Transmits, Ignore receives of data frames
*/
if (hw->join_ap && --hw->join_retries > 0) {
- struct hfa384x_JoinRequest_data joinreq;
+ struct hfa384x_join_request_data joinreq;
joinreq = hw->joinreq;
/* Send the join request */
@@ -1415,7 +1418,7 @@ void prism2sta_processing_defer(struct work_struct *data)
default:
/* This is bad, IO port problems? */
netdev_warn(wlandev->netdev,
- "unknown linkstatus=0x%02x\n", hw->link_status);
+ "unknown linkstatus=0x%02x\n", hw->link_status);
return;
}
@@ -1440,7 +1443,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* interrupt
*/
static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
@@ -1468,10 +1471,10 @@ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
- struct hfa384x_AssocStatus rec;
+ struct hfa384x_assoc_status rec;
int i;
memcpy(&rec, &inf->info.assocstatus, sizeof(rec));
@@ -1529,7 +1532,7 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
*
*/
static void prism2sta_inf_authreq(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
struct sk_buff *skb;
@@ -1544,10 +1547,10 @@ static void prism2sta_inf_authreq(struct wlandevice *wlandev,
}
static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
- struct hfa384x_authenticateStation_data rec;
+ struct hfa384x_authenticate_station_data rec;
int i, added, result, cnt;
u8 *addr;
@@ -1718,7 +1721,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
* interrupt
*/
static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
- struct hfa384x_InfFrame *inf)
+ struct hfa384x_inf_frame *inf)
{
struct hfa384x *hw = wlandev->priv;
@@ -1742,7 +1745,8 @@ static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
* Call context:
* interrupt
*/
-void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
+void prism2sta_ev_info(struct wlandevice *wlandev,
+ struct hfa384x_inf_frame *inf)
{
inf->infotype = le16_to_cpu(inf->infotype);
/* Dispatch */
@@ -1785,7 +1789,7 @@ void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
break;
default:
netdev_warn(wlandev->netdev,
- "Unknown info type=0x%02x\n", inf->infotype);
+ "Unknown info type=0x%02x\n", inf->infotype);
break;
}
}
@@ -1859,32 +1863,32 @@ void prism2sta_ev_alloc(struct wlandevice *wlandev)
}
/*
-* create_wlan
-*
-* Called at module init time. This creates the struct wlandevice structure
-* and initializes it with relevant bits.
-*
-* Arguments:
-* none
-*
-* Returns:
-* the created struct wlandevice structure.
-*
-* Side effects:
-* also allocates the priv/hw structures.
-*
-* Call context:
-* process thread
-*
-*/
+ * create_wlan
+ *
+ * Called at module init time. This creates the struct wlandevice structure
+ * and initializes it with relevant bits.
+ *
+ * Arguments:
+ * none
+ *
+ * Returns:
+ * the created struct wlandevice structure.
+ *
+ * Side effects:
+ * also allocates the priv/hw structures.
+ *
+ * Call context:
+ * process thread
+ *
+ */
static struct wlandevice *create_wlan(void)
{
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
/* Alloc our structures */
- wlandev = kzalloc(sizeof(struct wlandevice), GFP_KERNEL);
- hw = kzalloc(sizeof(struct hfa384x), GFP_KERNEL);
+ wlandev = kzalloc(sizeof(*wlandev), GFP_KERNEL);
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!wlandev || !hw) {
kfree(wlandev);
@@ -1943,9 +1947,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
}
pr_debug("commsqual %d %d %d\n",
- le16_to_cpu(hw->qual.CQ_currBSS),
- le16_to_cpu(hw->qual.ASL_currBSS),
- le16_to_cpu(hw->qual.ANL_currFC));
+ le16_to_cpu(hw->qual.cq_curr_bss),
+ le16_to_cpu(hw->qual.asl_curr_bss),
+ le16_to_cpu(hw->qual.anl_curr_fc));
}
/* Get the signal rate */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 85079fea7152..7a80a90f229f 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -139,7 +139,7 @@ static const struct _XGIbios_mode {
static const unsigned short XGI310paneltype[] = {
LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
- LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
+ LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200,
LCD_1024x768, LCD_1024x768, LCD_1024x768};
@@ -174,7 +174,7 @@ static const struct _XGI_tvtype {
{"NTSC", 2},
{"pal", 1},
{"ntsc", 2},
- {"\0", -1}
+ {"\0", -1}
};
static const struct _XGI_vrate {
@@ -183,44 +183,44 @@ static const struct _XGI_vrate {
u16 yres;
u16 refresh;
} XGIfb_vrate[] = {
- {1, 640, 480, 60}, {2, 640, 480, 72},
- {3, 640, 480, 75}, {4, 640, 480, 85},
+ {1, 640, 480, 60}, {2, 640, 480, 72},
+ {3, 640, 480, 75}, {4, 640, 480, 85},
{5, 640, 480, 100}, {6, 640, 480, 120},
- {7, 640, 480, 160}, {8, 640, 480, 200},
+ {7, 640, 480, 160}, {8, 640, 480, 200},
- {1, 720, 480, 60},
- {1, 720, 576, 58},
- {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85},
- {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75},
- {4, 800, 600, 85}, {5, 800, 600, 100},
- {6, 800, 600, 120}, {7, 800, 600, 160},
+ {1, 720, 480, 60},
+ {1, 720, 576, 58},
+ {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85},
+ {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75},
+ {4, 800, 600, 85}, {5, 800, 600, 100},
+ {6, 800, 600, 120}, {7, 800, 600, 160},
- {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75},
- {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120},
- {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85},
- {1, 1024, 600, 60},
- {1, 1152, 768, 60},
- {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85},
- {1, 1280, 768, 60},
+ {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75},
+ {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120},
+ {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85},
+ {1, 1024, 600, 60},
+ {1, 1152, 768, 60},
+ {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85},
+ {1, 1280, 768, 60},
{1, 1280, 1024, 60}, {2, 1280, 1024, 75}, {3, 1280, 1024, 85},
- {1, 1280, 960, 70},
- {1, 1400, 1050, 60},
- {1, 1600, 1200, 60}, {2, 1600, 1200, 65},
+ {1, 1280, 960, 70},
+ {1, 1400, 1050, 60},
+ {1, 1600, 1200, 60}, {2, 1600, 1200, 65},
{3, 1600, 1200, 70}, {4, 1600, 1200, 75},
- {5, 1600, 1200, 85}, {6, 1600, 1200, 100},
+ {5, 1600, 1200, 85}, {6, 1600, 1200, 100},
{7, 1600, 1200, 120},
- {1, 1920, 1440, 60}, {2, 1920, 1440, 65},
+ {1, 1920, 1440, 60}, {2, 1920, 1440, 65},
{3, 1920, 1440, 70}, {4, 1920, 1440, 75},
- {5, 1920, 1440, 85}, {6, 1920, 1440, 100},
- {1, 2048, 1536, 60}, {2, 2048, 1536, 65},
+ {5, 1920, 1440, 85}, {6, 1920, 1440, 100},
+ {1, 2048, 1536, 60}, {2, 2048, 1536, 65},
{3, 2048, 1536, 70}, {4, 2048, 1536, 75},
- {5, 2048, 1536, 85},
- {0, 0, 0, 0}
+ {5, 2048, 1536, 85},
+ {0, 0, 0, 0}
};
static const struct _XGI_TV_filter {
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 0c78491ff5a1..777cd6e11694 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -56,8 +56,8 @@ static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info)
/* --------------- Hardware Access Routines -------------------------- */
static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned char modeno)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, ClockIndex = 0;
@@ -68,7 +68,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
XGI_SearchModeID(ModeNo, &ModeIdIndex);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, XGI_Pr);
+ ModeIdIndex, XGI_Pr);
ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
@@ -76,11 +76,11 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
}
static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno,
- u32 *left_margin, u32 *right_margin, u32 *upper_margin,
- u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync,
- u32 *vmode)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned char modeno, u32 *left_margin,
+ u32 *right_margin, u32 *upper_margin,
+ u32 *lower_margin, u32 *hsync_len,
+ u32 *vsync_len, u32 *sync, u32 *vmode)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex, index = 0;
@@ -95,7 +95,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
if (!XGI_SearchModeID(ModeNo, &ModeIdIndex))
return 0;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, XGI_Pr);
+ ModeIdIndex, XGI_Pr);
index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
sr_data = XGI_CRT1Table[index].CR[5];
@@ -105,7 +105,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
cr_data = XGI_CRT1Table[index].CR[3];
/* Horizontal retrace (=sync) start */
- HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2);
+ HRS = (cr_data & 0xff) | ((unsigned short)(sr_data & 0xC0) << 2);
F = HRS - HDE - 3;
sr_data = XGI_CRT1Table[index].CR[6];
@@ -115,8 +115,8 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
cr_data2 = XGI_CRT1Table[index].CR[4];
/* Horizontal blank end */
- HBE = (cr_data & 0x1f) | ((unsigned short) (cr_data2 & 0x80) >> 2)
- | ((unsigned short) (sr_data & 0x03) << 6);
+ HBE = (cr_data & 0x1f) | ((unsigned short)(cr_data2 & 0x80) >> 2)
+ | ((unsigned short)(sr_data & 0x03) << 6);
/* Horizontal retrace (=sync) end */
HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3);
@@ -142,15 +142,15 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
cr_data = XGI_CRT1Table[index].CR[10];
/* Vertical retrace (=sync) start */
- VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6)
- | ((unsigned short) (cr_data2 & 0x80) << 2)
- | ((unsigned short) (sr_data & 0x08) << 7);
+ VRS = (cr_data & 0xff) | ((unsigned short)(cr_data2 & 0x04) << 6)
+ | ((unsigned short)(cr_data2 & 0x80) << 2)
+ | ((unsigned short)(sr_data & 0x08) << 7);
F = VRS + 1 - VDE;
cr_data = XGI_CRT1Table[index].CR[13];
/* Vertical blank end */
- VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4);
+ VBE = (cr_data & 0xff) | ((unsigned short)(sr_data & 0x10) << 4);
temp = VBE - ((VDE - 1) & 511);
B = (temp > 0) ? temp : (temp + 512);
@@ -231,11 +231,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
{
int i = 0;
- while ((XGIbios_mode[i].mode_no != 0)
- && (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
- if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE)
- && (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE)
- && (XGIbios_mode[i].bpp == 8)) {
+ while ((XGIbios_mode[i].mode_no != 0) &&
+ (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
+ if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) &&
+ (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) &&
+ (XGIbios_mode[i].bpp == 8)) {
return i;
}
i++;
@@ -384,9 +384,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
return -1;
break;
case 640:
- if ((XGIbios_mode[myindex].yres != 400)
- && (XGIbios_mode[myindex].yres
- != 480))
+ if ((XGIbios_mode[myindex].yres != 400) &&
+ (XGIbios_mode[myindex].yres != 480))
return -1;
break;
case 800:
@@ -518,7 +517,7 @@ static void XGIfb_search_crt2type(const char *name)
{
int i = 0;
- if (name == NULL)
+ if (!name)
return;
while (XGI_crt2type[i].type_no != -1) {
@@ -562,7 +561,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
!= 1)) {
pr_debug("Adjusting rate from %d down to %d\n",
rate,
- XGIfb_vrate[i-1].refresh);
+ XGIfb_vrate[i - 1].refresh);
xgifb_info->rate_idx =
XGIfb_vrate[i - 1].idx;
xgifb_info->refresh_rate =
@@ -589,7 +588,7 @@ static void XGIfb_search_tvstd(const char *name)
{
int i = 0;
- if (name == NULL)
+ if (!name)
return;
while (XGI_tvtype[i].type_no != -1) {
@@ -683,7 +682,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31);
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33,
- (xgifb_info->rate_idx & 0x0F));
+ (xgifb_info->rate_idx & 0x0F));
}
static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
@@ -730,7 +729,6 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
if (xgifb_info->display2 == XGIFB_DISP_TV &&
xgifb_info->hasVB == HASVB_301) {
-
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg < 0xB0) { /* Set filter for XGI301 */
@@ -763,16 +761,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
0x01);
if (xgifb_info->TV_type == TVMODE_NTSC) {
-
xgifb_reg_and(XGIPART2, 0x3a, 0x1f);
if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
-
xgifb_reg_and(XGIPART2, 0x30, 0xdf);
} else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
-
xgifb_reg_or(XGIPART2, 0x30, 0x20);
switch (xgifb_info->video_width) {
@@ -822,16 +817,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
}
} else if (xgifb_info->TV_type == TVMODE_PAL) {
-
xgifb_reg_and(XGIPART2, 0x3A, 0x1F);
if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
-
xgifb_reg_and(XGIPART2, 0x30, 0xDF);
} else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
-
xgifb_reg_or(XGIPART2, 0x30, 0x20);
switch (xgifb_info->video_width) {
@@ -912,7 +904,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
}
static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
- struct fb_info *info)
+ struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
@@ -945,17 +937,15 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (var->pixclock) {
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
- xgifb_info->refresh_rate = (unsigned int) (hrate * 2
+ xgifb_info->refresh_rate = (unsigned int)(hrate * 2
/ vtotal);
} else {
xgifb_info->refresh_rate = 60;
}
pr_debug("Change mode to %dx%dx%d-%dHz\n",
- var->xres,
- var->yres,
- var->bits_per_pixel,
- xgifb_info->refresh_rate);
+ var->xres, var->yres, var->bits_per_pixel,
+ xgifb_info->refresh_rate);
old_mode = xgifb_info->mode_idx;
xgifb_info->mode_idx = 0;
@@ -992,7 +982,6 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
}
if (isactive) {
-
XGIfb_pre_setmode(xgifb_info);
if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
@@ -1064,7 +1053,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
break;
}
}
- XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/
+ XGIfb_bpp_to_var(xgifb_info, var); /* update ARGB info */
dumpVGAReg(xgifb_info);
return 0;
@@ -1150,7 +1139,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
}
break;
case 16:
- ((u32 *) (info->pseudo_palette))[regno] = ((red & 0xf800))
+ ((u32 *)(info->pseudo_palette))[regno] = ((red & 0xf800))
| ((green & 0xfc00) >> 5) | ((blue & 0xf800)
>> 11);
break;
@@ -1158,7 +1147,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
red >>= 8;
green >>= 8;
blue >>= 8;
- ((u32 *) (info->pseudo_palette))[regno] = (red << 16) | (green
+ ((u32 *)(info->pseudo_palette))[regno] = (red << 16) | (green
<< 8) | (blue);
break;
}
@@ -1168,7 +1157,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
/* ----------- FBDev related routines for all series ---------- */
static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
- struct fb_info *info)
+ struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
@@ -1250,7 +1239,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
xgifb_info->refresh_rate =
- (unsigned int) (hrate * 2 / vtotal);
+ (unsigned int)(hrate * 2 / vtotal);
pr_debug(
"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
@@ -1262,10 +1251,10 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
search_idx = 0;
while ((XGIbios_mode[search_idx].mode_no != 0) &&
- (XGIbios_mode[search_idx].xres <= var->xres)) {
+ (XGIbios_mode[search_idx].xres <= var->xres)) {
if ((XGIbios_mode[search_idx].xres == var->xres) &&
- (XGIbios_mode[search_idx].yres == var->yres) &&
- (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
+ (XGIbios_mode[search_idx].yres == var->yres) &&
+ (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) {
found_mode = 1;
break;
@@ -1275,9 +1264,8 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
if (!found_mode) {
-
pr_err("%dx%dx%d is no valid mode\n",
- var->xres, var->yres, var->bits_per_pixel);
+ var->xres, var->yres, var->bits_per_pixel);
search_idx = 0;
while (XGIbios_mode[search_idx].mode_no != 0) {
if ((var->xres <= XGIbios_mode[search_idx].xres) &&
@@ -1296,11 +1284,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->xres = XGIbios_mode[search_idx].xres;
var->yres = XGIbios_mode[search_idx].yres;
pr_debug("Adapted to mode %dx%dx%d\n",
- var->xres, var->yres, var->bits_per_pixel);
+ var->xres, var->yres, var->bits_per_pixel);
} else {
pr_err("Failed to find similar mode to %dx%dx%d\n",
- var->xres, var->yres, var->bits_per_pixel);
+ var->xres, var->yres, var->bits_per_pixel);
return -EINVAL;
}
}
@@ -1332,7 +1320,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
static int XGIfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
+ struct fb_info *info)
{
int err;
@@ -1344,9 +1332,8 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset >= info->var.yres_virtual || var->xoffset)
return -EINVAL;
- } else if (var->xoffset + info->var.xres > info->var.xres_virtual
- || var->yoffset + info->var.yres
- > info->var.yres_virtual) {
+ } else if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+ var->yoffset + info->var.yres > info->var.yres_virtual) {
return -EINVAL;
}
err = XGIfb_pan_var(var, info);
@@ -1401,7 +1388,6 @@ static struct fb_ops XGIfb_ops = {
static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
{
-
u8 ChannelNum, tmp;
u8 reg = 0;
@@ -1474,10 +1460,8 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
xgifb_info->video_size = xgifb_info->video_size * ChannelNum;
pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
- reg,
- xgifb_info->video_size, ChannelNum);
+ reg, xgifb_info->video_size, ChannelNum);
return 0;
-
}
static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
@@ -1597,7 +1581,6 @@ static int __init XGIfb_setup(char *options)
pr_info("Options: %s\n", options);
while ((this_opt = strsep(&options, ",")) != NULL) {
-
if (!*this_opt)
continue;
@@ -1634,8 +1617,7 @@ static int __init XGIfb_setup(char *options)
return 0;
}
-static int xgifb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 reg, reg1;
u8 CR48, CR38;
@@ -1670,7 +1652,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->mmio_size = pci_resource_len(pdev, 1);
xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
dev_info(&pdev->dev, "Relocate IO address: %Lx [%08lx]\n",
- (u64) pci_resource_start(pdev, 2),
+ (u64)pci_resource_start(pdev, 2),
xgifb_info->vga_base);
if (pci_enable_device(pdev)) {
@@ -1688,7 +1670,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
- if (reg1 != 0xa1) { /*I/O error */
+ if (reg1 != 0xa1) { /* I/O error */
dev_err(&pdev->dev, "I/O error\n");
ret = -EIO;
goto error_disable;
@@ -1698,7 +1680,7 @@ static int xgifb_probe(struct pci_dev *pdev,
case PCI_DEVICE_ID_XGI_20:
xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
- if (CR48&GPIOG_READ)
+ if (CR48 & GPIOG_READ)
xgifb_info->chip = XG21;
else
xgifb_info->chip = XG20;
@@ -1727,7 +1709,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->video_size = video_size_max;
}
- /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
+ /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
xgifb_reg_or(XGISR,
IND_SIS_PCI_ADDRESS_SET,
(SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
@@ -1740,7 +1722,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->video_size,
"XGIfb FB")) {
dev_err(&pdev->dev, "Unable request memory size %x\n",
- xgifb_info->video_size);
+ xgifb_info->video_size);
dev_err(&pdev->dev,
"Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n");
ret = -ENODEV;
@@ -1763,13 +1745,13 @@ static int xgifb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev,
"Framebuffer at 0x%Lx, mapped to 0x%p, size %dk\n",
- (u64) xgifb_info->video_base,
+ (u64)xgifb_info->video_base,
xgifb_info->video_vbase,
xgifb_info->video_size / 1024);
dev_info(&pdev->dev,
"MMIO at 0x%Lx, mapped to 0x%p, size %ldk\n",
- (u64) xgifb_info->mmio_base, xgifb_info->mmio_vbase,
+ (u64)xgifb_info->mmio_base, xgifb_info->mmio_vbase,
xgifb_info->mmio_size / 1024);
pci_set_drvdata(pdev, xgifb_info);
@@ -1784,9 +1766,9 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->hasVB = HASVB_NONE;
} else if (xgifb_info->chip == XG21) {
CR38 = xgifb_reg_get(XGICR, 0x38);
- if ((CR38&0xE0) == 0xC0)
+ if ((CR38 & 0xE0) == 0xC0)
xgifb_info->display2 = XGIFB_DISP_LCD;
- else if ((CR38&0xE0) == 0x60)
+ else if ((CR38 & 0xE0) == 0x60)
xgifb_info->hasVB = HASVB_CHRONTEL;
else
xgifb_info->hasVB = HASVB_NONE;
@@ -1903,8 +1885,7 @@ static int xgifb_probe(struct pci_dev *pdev,
xgifb_info->refresh_rate = refresh_rate;
if (xgifb_info->refresh_rate == 0)
xgifb_info->refresh_rate = 60;
- if (XGIfb_search_refresh_rate(xgifb_info,
- xgifb_info->refresh_rate) == 0) {
+ if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) {
xgifb_info->rate_idx = 1;
xgifb_info->refresh_rate = 60;
}
@@ -1939,15 +1920,13 @@ static int xgifb_probe(struct pci_dev *pdev,
default:
xgifb_info->video_cmap_len = 16;
pr_info("Unsupported depth %d\n",
- xgifb_info->video_bpp);
+ xgifb_info->video_bpp);
break;
}
pr_info("Default mode is %dx%dx%d (%dHz)\n",
- xgifb_info->video_width,
- xgifb_info->video_height,
- xgifb_info->video_bpp,
- xgifb_info->refresh_rate);
+ xgifb_info->video_width, xgifb_info->video_height,
+ xgifb_info->video_bpp, xgifb_info->refresh_rate);
fb_info->var.red.length = 8;
fb_info->var.green.length = 8;
@@ -1964,22 +1943,20 @@ static int xgifb_probe(struct pci_dev *pdev,
XGIfb_bpp_to_var(xgifb_info, &fb_info->var);
- fb_info->var.pixclock = (u32) (1000000000 /
- XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info,
- hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no));
-
- if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no,
- &fb_info->var.left_margin,
- &fb_info->var.right_margin,
- &fb_info->var.upper_margin,
- &fb_info->var.lower_margin,
- &fb_info->var.hsync_len,
- &fb_info->var.vsync_len,
- &fb_info->var.sync,
- &fb_info->var.vmode)) {
-
+ fb_info->var.pixclock = (u32)(1000000000 / XGIfb_mode_rate_to_dclock
+ (&xgifb_info->dev_info, hw_info,
+ XGIbios_mode[xgifb_info->mode_idx].mode_no));
+
+ if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info,
+ hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no,
+ &fb_info->var.left_margin,
+ &fb_info->var.right_margin,
+ &fb_info->var.upper_margin,
+ &fb_info->var.lower_margin,
+ &fb_info->var.hsync_len,
+ &fb_info->var.vsync_len,
+ &fb_info->var.sync,
+ &fb_info->var.vmode)) {
if ((fb_info->var.vmode & FB_VMODE_MASK) ==
FB_VMODE_INTERLACED) {
fb_info->var.yres <<= 1;
@@ -1990,7 +1967,6 @@ static int xgifb_probe(struct pci_dev *pdev,
fb_info->var.yres >>= 1;
fb_info->var.yres_virtual >>= 1;
}
-
}
fb_info->flags = FBINFO_FLAG_DEFAULT;
@@ -2028,9 +2004,7 @@ error:
return ret;
}
-/*****************************************************/
-/* PCI DEVICE HANDLING */
-/*****************************************************/
+/* -------------------- PCI DEVICE HANDLING -------------------- */
static void xgifb_remove(struct pci_dev *pdev)
{
@@ -2054,25 +2028,23 @@ static struct pci_driver xgifb_driver = {
.remove = xgifb_remove
};
-/*****************************************************/
-/* MODULE */
-/*****************************************************/
+/* -------------------- MODULE -------------------- */
-module_param(mode, charp, 0);
+module_param(mode, charp, 0000);
MODULE_PARM_DESC(mode,
- "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
+ "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
-module_param(forcecrt2type, charp, 0);
+module_param(forcecrt2type, charp, 0000);
MODULE_PARM_DESC(forcecrt2type,
- "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
+ "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
-module_param(vesa, int, 0);
+module_param(vesa, int, 0000);
MODULE_PARM_DESC(vesa,
- "Selects the desired default display mode by VESA mode number (eg. 0x117).");
+ "Selects the desired default display mode by VESA mode number (eg. 0x117).");
-module_param(filter, int, 0);
+module_param(filter, int, 0000);
MODULE_PARM_DESC(filter,
- "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
+ "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
static int __init xgifb_init(void)
{
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 062ece22ed84..14af157958cd 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -55,8 +55,9 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */
/* GPIOF 0:DVI 1:DVO */
data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
- /* HOTPLUG_SUPPORT */
- /* for current XG20 & XG21, GPIOH is floating, driver will
+ /*
+ * HOTPLUG_SUPPORT
+ * for current XG20 & XG21, GPIOH is floating, driver will
* fix DDR temporarily
*/
/* DVI read GPIOH */
@@ -199,7 +200,8 @@ static void XGINew_DDRII_Bootup_XG27(
}
static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned long P3c4, struct vb_device_info *pVBInfo)
+ unsigned long P3c4,
+ struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = P3c4 + 0x10;
@@ -353,8 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
unsigned long Port, struct vb_device_info *pVBInfo)
{
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
-
- /* keep following setting sequence, each setting in
+ /*
+ * keep following setting sequence, each setting in
* the same reg insert idle
*/
xgifb_reg_set(P3d4, 0x82, 0x77);
@@ -387,7 +389,7 @@ static void XGINew_DDR2_DefaultRegister(
}
static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg,
- u8 shift_factor, u8 mask1, u8 mask2)
+ u8 shift_factor, u8 mask1, u8 mask2)
{
u8 j;
@@ -460,15 +462,15 @@ static void XGINew_SetDRAMDefaultRegister340(
for (j = 0; j <= 6; j++) /* CR90 - CR96 */
xgifb_reg_set(P3d4, (0x90 + j),
- pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
+ pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */
xgifb_reg_set(P3d4, (0xC3 + j),
- pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
+ pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
for (j = 0; j < 2; j++) /* CR8A - CR8B */
xgifb_reg_set(P3d4, (0x8A + j),
- pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
+ pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
if (HwDeviceExtension->jChipType == XG42)
xgifb_reg_set(P3d4, 0x8C, 0x87);
@@ -539,7 +541,8 @@ static unsigned short XGINew_SetDRAMSize20Reg(
}
static int XGINew_ReadWriteRest(unsigned short StopAddr,
- unsigned short StartAddr, struct vb_device_info *pVBInfo)
+ unsigned short StartAddr,
+ struct vb_device_info *pVBInfo)
{
int i;
unsigned long Position = 0;
@@ -583,7 +586,7 @@ static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo)
}
static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char data;
@@ -647,7 +650,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->ram_bus = 16; /* 16 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- /* 0x41:16Mx16 bit*/
+ /* 0x41:16Mx16 bit */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
usleep_range(15, 1015);
@@ -660,7 +663,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4,
0x13,
0x31);
- /* 0x31:8Mx16 bit*/
+ /* 0x31:8Mx16 bit */
xgifb_reg_set(pVBInfo->P3c4,
0x14,
0x31);
@@ -678,7 +681,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->ram_bus = 8; /* 8 bits */
/* (0x31:12x8x2) 22bit + 2 rank */
xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- /* 0x30:8Mx8 bit*/
+ /* 0x30:8Mx8 bit */
xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
usleep_range(15, 1015);
@@ -697,7 +700,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
case XG27:
pVBInfo->ram_bus = 16; /* 16 bits */
pVBInfo->ram_channel = 1; /* Single channel */
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/
+ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit */
break;
case XG42:
/*
@@ -785,7 +788,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
}
static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
u8 i, size;
unsigned short memsize, start_addr;
@@ -827,8 +830,8 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short data;
@@ -905,9 +908,9 @@ static bool xgifb_read_vbios(struct pci_dev *pdev)
goto error;
if (j == 0xff)
j = 1;
- /*
- * Read the LVDS table index scratch register set by the BIOS.
- */
+
+ /* Read the LVDS table index scratch register set by the BIOS. */
+
entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
if (entry >= j)
entry = 0;
@@ -1039,8 +1042,9 @@ static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo)
}
tempcl |= SetSimuScanMode;
- if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
- || (temp & ActiveCRT2)))
+ if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) ||
+ (temp & ActiveTV) ||
+ (temp & ActiveCRT2)))
tempcl ^= (SetSimuScanMode | SwitchCRT2);
if ((temp & ActiveLCD) && (temp & ActiveTV))
tempcl ^= (SetSimuScanMode | SwitchCRT2);
@@ -1085,7 +1089,7 @@ static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
}
static void XGINew_GetXG21Sense(struct pci_dev *pdev,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
unsigned char Temp;
@@ -1095,7 +1099,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
} else {
- /* Enable GPIOA/B read */
+ /* Enable GPIOA/B read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0;
if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */
@@ -1119,7 +1123,7 @@ static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo)
unsigned char Temp, bCR4A;
bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- /* Enable GPIOA/B/C read */
+ /* Enable GPIOA/B/C read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07;
xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index d8010c5c1a70..7c7c8c8f1df3 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -55,7 +55,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->XGINew_CR97 = 0xc1;
pVBInfo->SR18 = XG27_SR18;
- /*Z11m DDR*/
+ /* Z11m DDR */
temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
/* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
@@ -73,7 +73,7 @@ static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
/* Get SR1,2,3,4 from file */
/* SR1 is with screen off 0x20 */
SRdata = XGI330_StandTable.SR[i];
- xgifb_reg_set(pVBInfo->P3c4, i+1, SRdata); /* Set SR 1 2 3 4 */
+ xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata); /* Set SR 1 2 3 4 */
}
}
@@ -167,7 +167,8 @@ static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
}
static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex, unsigned short *i,
+ unsigned short RefreshRateTableIndex,
+ unsigned short *i,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempbx, resinfo, modeflag, infoflag;
@@ -244,7 +245,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
}
static void XGI_SetSync(unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short sync, temp;
@@ -257,7 +258,7 @@ static void XGI_SetSync(unsigned short RefreshRateTableIndex,
}
static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
- struct xgi_hw_device_info *HwDeviceExtension)
+ struct xgi_hw_device_info *HwDeviceExtension)
{
unsigned char data, data1, pushax;
unsigned short i, j;
@@ -359,9 +360,9 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
}
static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo,
- struct xgi_hw_device_info *HwDeviceExtension)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo,
+ struct xgi_hw_device_info *HwDeviceExtension)
{
unsigned char index, data;
unsigned short i;
@@ -390,14 +391,14 @@ static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetXG21CRTC */
-/* Input : Stand or enhance CRTC table */
-/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */
-/* Description : Set LCD timing */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetXG21CRTC
+ * Input : Stand or enhance CRTC table
+ * Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F
+ * Description : Set LCD timing
+ */
static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char index, Tempax, Tempbx, Tempcx, Tempdx;
unsigned short Temp1, Temp2, Temp3;
@@ -506,8 +507,8 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
/* SR0B */
Tempax = XGI_CRT1Table[index].CR[5];
- Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
- Tempbx |= (Tempax << 2); /* Tempbx: HRS[9:0] */
+ Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8] */
+ Tempbx |= Tempax << 2; /* Tempbx: HRS[9:0] */
Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */
Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
@@ -530,7 +531,7 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
Tempax = XGI_CRT1Table[index].CR[5]; /* SR0B */
Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/
- Tempax |= ((Tempbx << 2) & 0xFF); /* Tempax[7:2]: HRE[5:0] */
+ Tempax |= (Tempbx << 2) & 0xFF; /* Tempax[7:2]: HRE[5:0] */
/* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */
xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
@@ -548,12 +549,12 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
Tempax >>= 2; /* Tempax[0]: VRS[8] */
/* SR35[0]: VRS[8] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax);
- Tempcx |= (Tempax << 8); /* Tempcx <= VRS[8:0] */
- Tempcx |= ((Tempbx & 0x80) << 2); /* Tempcx <= VRS[9:0] */
+ Tempcx |= Tempax << 8; /* Tempcx <= VRS[8:0] */
+ Tempcx |= (Tempbx & 0x80) << 2; /* Tempcx <= VRS[9:0] */
/* Tempax: SR0A */
Tempax = XGI_CRT1Table[index].CR[14];
Tempax &= 0x08; /* SR0A[3] VRS[10] */
- Tempcx |= (Tempax << 7); /* Tempcx <= VRS[10:0] */
+ Tempcx |= Tempax << 7; /* Tempcx <= VRS[10:0] */
/* Tempax: CR11 VRE */
Tempax = XGI_CRT1Table[index].CR[11];
@@ -636,12 +637,12 @@ static void xgifb_set_lcd(int chip_id,
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_UpdateXG21CRTC */
-/* Input : */
-/* Output : CRT1 CRTC */
-/* Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_UpdateXG21CRTC
+ * Input :
+ * Output : CRT1 CRTC
+ * Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing
+ */
static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex)
@@ -665,19 +666,19 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
if (index != -1) {
xgifb_reg_set(pVBInfo->P3d4, 0x02,
- XGI_UpdateCRT1Table[index].CR02);
+ XGI_UpdateCRT1Table[index].CR02);
xgifb_reg_set(pVBInfo->P3d4, 0x03,
- XGI_UpdateCRT1Table[index].CR03);
+ XGI_UpdateCRT1Table[index].CR03);
xgifb_reg_set(pVBInfo->P3d4, 0x15,
- XGI_UpdateCRT1Table[index].CR15);
+ XGI_UpdateCRT1Table[index].CR15);
xgifb_reg_set(pVBInfo->P3d4, 0x16,
- XGI_UpdateCRT1Table[index].CR16);
+ XGI_UpdateCRT1Table[index].CR16);
}
}
static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag;
@@ -715,7 +716,7 @@ static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
- (unsigned short)((tempcx & 0x0ff00) >> 10));
+ (unsigned short)((tempcx & 0x0ff00) >> 10));
xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
tempax = 0;
tempbx >>= 8;
@@ -796,7 +797,7 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
i |= temp;
xgifb_reg_set(pVBInfo->P3c4, 0x0E, i);
- temp = (unsigned char) temp2;
+ temp = (unsigned char)temp2;
temp &= 0xFF; /* al */
xgifb_reg_set(pVBInfo->P3d4, 0x13, temp);
@@ -822,15 +823,15 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
}
static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short VCLKIndex, modeflag;
/* si+Ext_ResInfo */
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /* 301b */
if (pVBInfo->LCDResInfo != Panel_1024x768)
/* LCDXlat2VCLK */
VCLKIndex = VCLK108_2_315 + 5;
@@ -951,8 +952,8 @@ static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short data, data2 = 0;
short VCLK;
@@ -989,9 +990,9 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short data, data2, data3, infoflag = 0, modeflag, resindex,
xres;
@@ -1087,9 +1088,9 @@ static void XGI_WriteDAC(unsigned short dl,
else
swap(bl, bh);
}
- outb((unsigned short) dh, pVBInfo->P3c9);
- outb((unsigned short) bh, pVBInfo->P3c9);
- outb((unsigned short) bl, pVBInfo->P3c9);
+ outb((unsigned short)dh, pVBInfo->P3c9);
+ outb((unsigned short)bh, pVBInfo->P3c9);
+ outb((unsigned short)bl, pVBInfo->P3c9);
}
static void XGI_LoadDAC(struct vb_device_info *pVBInfo)
@@ -1187,8 +1188,8 @@ static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex,
}
static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempbx, modeflag;
@@ -1201,12 +1202,12 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
while (table[i].PANELID != 0xff) {
tempdx = pVBInfo->LCDResInfo;
if (tempbx & 0x0080) { /* OEMUtil */
- tempbx &= (~0x0080);
+ tempbx &= ~0x0080;
tempdx = pVBInfo->LCDTypeInfo;
}
if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempdx &= (~PanelResInfo);
+ tempdx &= ~PanelResInfo;
if (table[i].PANELID == tempdx) {
tempbx = table[i].MASK;
@@ -1226,8 +1227,8 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
}
static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempal, modeflag;
@@ -1441,9 +1442,9 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
tempbx >>= 3;
xgifb_reg_set(pVBInfo->Part1Port, 0x16,
- (unsigned short) (tempbx & 0xff));
+ (unsigned short)(tempbx & 0xff));
xgifb_reg_set(pVBInfo->Part1Port, 0x17,
- (unsigned short) (tempcx & 0xff));
+ (unsigned short)(tempcx & 0xff));
tempax = pVBInfo->HT;
@@ -1469,7 +1470,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
xgifb_reg_set(pVBInfo->Part1Port, 0x14,
- (unsigned short) (tempbx & 0xff));
+ (unsigned short)(tempbx & 0xff));
tempax = pVBInfo->VT;
tempbx = LCDPtr1->LCDVDES;
@@ -1480,17 +1481,14 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (tempcx >= tempax)
tempcx -= tempax;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
- (unsigned short) (tempbx & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
- (unsigned short) (tempcx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1b, (unsigned short)(tempbx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1c, (unsigned short)(tempcx & 0xff));
tempbx = (tempbx >> 8) & 0x07;
tempcx = (tempcx >> 8) & 0x07;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
- (unsigned short) ((tempcx << 3)
- | tempbx));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1d, (unsigned short)((tempcx << 3) |
+ tempbx));
tempax = pVBInfo->VT;
tempbx = LCDPtr1->LCDVRS;
@@ -1504,10 +1502,8 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (tempcx >= tempax)
tempcx -= tempax;
- xgifb_reg_set(pVBInfo->Part1Port, 0x18,
- (unsigned short) (tempbx & 0xff));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
- (unsigned short) (tempcx & 0x0f));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x18, (unsigned short)(tempbx & 0xff));
+ xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f, (unsigned short)(tempcx & 0x0f));
tempax = ((tempbx >> 8) & 0x07) << 3;
@@ -1518,8 +1514,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
tempax |= 0x40;
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
- tempax);
+ xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, tempax);
tempbx = pVBInfo->VDE;
tempax = pVBInfo->VGAVDE;
@@ -1527,7 +1522,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp = tempax; /* 0430 ylshieh */
temp1 = (temp << 18) / tempbx;
- tempdx = (unsigned short) ((temp << 18) % tempbx);
+ tempdx = (unsigned short)((temp << 18) % tempbx);
if (tempdx != 0)
temp1 += 1;
@@ -1535,12 +1530,10 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp2 = temp1;
push3 = temp2;
- xgifb_reg_set(pVBInfo->Part1Port, 0x37,
- (unsigned short) (temp2 & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x36,
- (unsigned short) ((temp2 >> 8) & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x37, (unsigned short)(temp2 & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x36, (unsigned short)((temp2 >> 8) & 0xff));
- tempbx = (unsigned short) (temp2 >> 16);
+ tempbx = (unsigned short)(temp2 >> 16);
tempax = tempbx & 0x03;
tempbx = pVBInfo->VGAVDE;
@@ -1553,24 +1546,20 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp2 = push3;
xgifb_reg_set(pVBInfo->Part4Port,
0x3c,
- (unsigned short) (temp2 & 0xff));
+ (unsigned short)(temp2 & 0xff));
xgifb_reg_set(pVBInfo->Part4Port,
0x3b,
- (unsigned short) ((temp2 >> 8) &
+ (unsigned short)((temp2 >> 8) &
0xff));
- tempbx = (unsigned short) (temp2 >> 16);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a,
- ~0xc0,
- (unsigned short) ((tempbx &
- 0xff) << 6));
+ tempbx = (unsigned short)(temp2 >> 16);
+ xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, ~0xc0,
+ (unsigned short)((tempbx & 0xff) << 6));
tempcx = pVBInfo->VGAVDE;
if (tempcx == pVBInfo->VDE)
- xgifb_reg_and_or(pVBInfo->Part4Port,
- 0x30, ~0x0c, 0x00);
+ xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x00);
else
- xgifb_reg_and_or(pVBInfo->Part4Port,
- 0x30, ~0x0c, 0x08);
+ xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x08);
}
tempcx = pVBInfo->VGAHDE;
@@ -1578,7 +1567,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp1 = tempcx << 16;
- tempax = (unsigned short) (temp1 / tempbx);
+ tempax = (unsigned short)(temp1 / tempbx);
if ((tempbx & 0xffff) == (tempcx & 0xffff))
tempax = 65535;
@@ -1592,42 +1581,38 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
- tempax = (unsigned short) (temp3 & 0xff);
+ tempax = (unsigned short)(temp3 & 0xff);
xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
temp1 = pVBInfo->VGAVDE << 18;
temp1 = temp1 / push3;
- tempbx = (unsigned short) (temp1 & 0xffff);
+ tempbx = (unsigned short)(temp1 & 0xffff);
if (pVBInfo->LCDResInfo == Panel_1024x768)
tempbx -= 1;
tempax = ((tempbx >> 8) & 0xff) << 3;
- tempax |= (unsigned short) ((temp3 >> 8) & 0x07);
- xgifb_reg_set(pVBInfo->Part1Port, 0x20,
- (unsigned short) (tempax & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x21,
- (unsigned short) (tempbx & 0xff));
+ tempax |= (unsigned short)((temp3 >> 8) & 0x07);
+ xgifb_reg_set(pVBInfo->Part1Port, 0x20, (unsigned short)(tempax & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x21, (unsigned short)(tempbx & 0xff));
temp3 >>= 16;
if (modeflag & HalfDCLK)
temp3 >>= 1;
- xgifb_reg_set(pVBInfo->Part1Port, 0x22,
- (unsigned short) ((temp3 >> 8) & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x23,
- (unsigned short) (temp3 & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x22, (unsigned short)((temp3 >> 8) & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x23, (unsigned short)(temp3 & 0xff));
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GETLCDVCLKPtr */
-/* Input : */
-/* Output : al -> VCLK Index */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GETLCDVCLKPtr
+ * Input :
+ * Output : al -> VCLK Index
+ * Description :
+ */
static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short index;
@@ -1645,7 +1630,8 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
}
static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short index, modeflag;
unsigned char tempal;
@@ -1681,15 +1667,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
return tempal;
}
- if (pVBInfo->TVInfo & TVSetYPbPr750p) {
- tempal = XGI_YPbPr750pVCLK;
- return tempal;
- }
+ if (pVBInfo->TVInfo & TVSetYPbPr750p)
+ return XGI_YPbPr750pVCLK;
- if (pVBInfo->TVInfo & TVSetYPbPr525p) {
- tempal = YPbPr525pVCLK;
- return tempal;
- }
+ if (pVBInfo->TVInfo & TVSetYPbPr525p)
+ return YPbPr525pVCLK;
tempal = NTSC1024VCLK;
@@ -1705,12 +1687,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
} /* {End of VB} */
inb((pVBInfo->P3ca + 0x02));
- tempal = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
- return tempal;
+ return XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
}
static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
- unsigned char *di_1, struct vb_device_info *pVBInfo)
+ unsigned char *di_1, struct vb_device_info *pVBInfo)
{
if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
@@ -1726,8 +1707,8 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
}
static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
int i;
@@ -1738,7 +1719,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
- (unsigned short) (0x10 * i));
+ (unsigned short)(0x10 * i));
if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
!(pVBInfo->VBInfo & SetInSlaveMode)) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
@@ -1876,8 +1857,7 @@ finish:
pVBInfo->VBType = tempbx;
}
-static void XGI_GetVBInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_GetVBInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short tempax, push, tempbx, temp, modeflag;
@@ -1921,7 +1901,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
tempbx |= SetCRT2ToHiVision;
if (temp != YPbPrMode1080i) {
- tempbx &= (~SetCRT2ToHiVision);
+ tempbx &= ~SetCRT2ToHiVision;
tempbx |= SetCRT2ToYPbPr525750;
}
}
@@ -2002,8 +1982,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
pVBInfo->VBInfo = tempbx;
}
-static void XGI_GetTVInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
@@ -2078,7 +2057,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
pVBInfo->LCDTypeInfo = 0;
pVBInfo->LCDInfo = 0;
- /* si+Ext_ResInfo // */
+ /* si+Ext_ResInfo */
resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */
tempbx = temp & 0x0F;
@@ -2175,12 +2154,12 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
return ujRet;
}
-/*----------------------------------------------------------------------------*/
-/* output */
-/* bl[5] : LVDS signal */
-/* bl[1] : LVDS backlight */
-/* bl[0] : LVDS VDD */
-/*----------------------------------------------------------------------------*/
+/*
+ * output
+ * bl[5] : LVDS signal
+ * bl[1] : LVDS backlight
+ * bl[0] : LVDS VDD
+ */
static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
@@ -2196,12 +2175,12 @@ static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
return temp;
}
-/*----------------------------------------------------------------------------*/
-/* output */
-/* bl[5] : LVDS signal */
-/* bl[1] : LVDS backlight */
-/* bl[0] : LVDS VDD */
-/*----------------------------------------------------------------------------*/
+/*
+ * output
+ * bl[5] : LVDS signal
+ * bl[1] : LVDS backlight
+ * bl[0] : LVDS VDD
+ */
static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
{
unsigned char CR4A, CRB4, temp;
@@ -2219,17 +2198,17 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
return temp;
}
-/*----------------------------------------------------------------------------*/
-/* input */
-/* bl[5] : 1;LVDS signal on */
-/* bl[1] : 1;LVDS backlight on */
-/* bl[0] : 1:LVDS VDD on */
-/* bh: 100000b : clear bit 5, to set bit5 */
-/* 000010b : clear bit 1, to set bit1 */
-/* 000001b : clear bit 0, to set bit0 */
-/*----------------------------------------------------------------------------*/
+/*
+ * input
+ * bl[5] : 1;LVDS signal on
+ * bl[1] : 1;LVDS backlight on
+ * bl[0] : 1:LVDS VDD on
+ * bh: 100000b : clear bit 5, to set bit5
+ * 000010b : clear bit 1, to set bit1
+ * 000001b : clear bit 0, to set bit0
+ */
static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
@@ -2254,7 +2233,7 @@ static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
}
static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
unsigned short tempbh0, tempbl0;
@@ -2284,8 +2263,8 @@ static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
}
static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *pXGIHWDE,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *pXGIHWDE,
+ struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00);
if (pXGIHWDE->jChipType == XG21) {
@@ -2328,8 +2307,8 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
}
void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *pXGIHWDE,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *pXGIHWDE,
+ struct vb_device_info *pVBInfo)
{
if (pXGIHWDE->jChipType == XG21) {
if (pVBInfo->IF_DEF_LVDS == 1) {
@@ -2448,7 +2427,7 @@ exit:
static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
- (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
+ (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
return 0;
@@ -2466,16 +2445,15 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
CRT1Index &= IndexMask;
- temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[0];
- temp2 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[5];
+ temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[0];
+ temp2 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[5];
tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8);
- tempbx = (unsigned short) XGI_CRT1Table[CRT1Index].CR[8];
- tempcx = (unsigned short)
- XGI_CRT1Table[CRT1Index].CR[14] << 8;
+ tempbx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[8];
+ tempcx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[14] << 8;
tempcx &= 0x0100;
tempcx <<= 2;
tempbx |= tempcx;
- temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[9];
+ temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[9];
if (temp1 & 0x01)
tempbx |= 0x0100;
@@ -2497,8 +2475,8 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
}
static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempax = 0, tempbx = 0, modeflag, resinfo;
@@ -2667,8 +2645,8 @@ static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
}
static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
@@ -2739,9 +2717,9 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
}
static void XGI_SetCRT2Offset(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short offset;
unsigned char temp;
@@ -2750,11 +2728,11 @@ static void XGI_SetCRT2Offset(unsigned short ModeNo,
return;
offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex);
- temp = (unsigned char) (offset & 0xFF);
+ temp = (unsigned char)(offset & 0xFF);
xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
- temp = (unsigned char) ((offset & 0xFF00) >> 8);
+ temp = (unsigned char)((offset & 0xFF00) >> 8);
xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp);
- temp = (unsigned char) (((offset >> 3) & 0xFF) + 1);
+ temp = (unsigned char)(((offset >> 3) & 0xFF) + 1);
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
}
@@ -2767,8 +2745,8 @@ static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
}
static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
u8 tempcx;
@@ -2783,8 +2761,8 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
}
static void XGI_SetGroup1(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0,
pushbx = 0, CRT1Index, modeflag;
@@ -2933,11 +2911,11 @@ static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT;
tempax = (tempax * pVBInfo->HT) / tempbx;
- return (unsigned short) tempax;
+ return (unsigned short)tempax;
}
static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
modeflag;
@@ -3044,14 +3022,14 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (ModeNo == 0x50) {
if (pVBInfo->TVInfo == SetNTSCTV) {
xgifb_reg_set(pVBInfo->Part1Port,
- 0x07, 0x30);
+ 0x07, 0x30);
xgifb_reg_set(pVBInfo->Part1Port,
- 0x08, 0x03);
+ 0x08, 0x03);
} else {
xgifb_reg_set(pVBInfo->Part1Port,
- 0x07, 0x2f);
+ 0x07, 0x2f);
xgifb_reg_set(pVBInfo->Part1Port,
- 0x08, 0x02);
+ 0x08, 0x02);
}
}
}
@@ -3064,7 +3042,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = pVBInfo->VGAVT;
push1 = tempbx;
tempcx = 0x121;
- tempbx = pVBInfo->VGAVDE; /* 0x0E Virtical Display End */
+ tempbx = pVBInfo->VGAVDE; /* 0x0E Vertical Display End */
if (tempbx == 357)
tempbx = 350;
@@ -3116,7 +3094,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (tempbx & 0x0400)
tempcx |= 0x0600;
- /* 0x11 Vertival Blank End */
+ /* 0x11 Vertical Blank End */
xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00);
tempax = push1;
@@ -3227,7 +3205,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ struct vb_device_info *pVBInfo)
{
unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
modeflag;
@@ -3315,7 +3293,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8);
push1 = tempax;
temp = (tempax & 0xFF00) >> 8;
- temp += (unsigned short) TimingPoint[0];
+ temp += (unsigned short)TimingPoint[0];
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
| VB_SIS302LV | VB_XGI301C)) {
@@ -3526,7 +3504,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempcx = 0x0101;
- if (pVBInfo->VBInfo & SetCRT2ToTV) { /*301b*/
+ if (pVBInfo->VBInfo & SetCRT2ToTV) { /* 301b */
if (pVBInfo->VGAHDE >= 1024) {
tempcx = 0x1920;
if (pVBInfo->VGAHDE >= 1280) {
@@ -3562,7 +3540,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (temp2 != 0)
tempeax += 1;
- tempax = (unsigned short) tempeax;
+ tempax = (unsigned short)tempeax;
/* 301b */
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
@@ -3572,9 +3550,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
/* end 301b */
tempbx = push1;
- tempbx = (unsigned short) (((tempeax & 0x0000FF00) & 0x1F00)
+ tempbx = (unsigned short)(((tempeax & 0x0000FF00) & 0x1F00)
| (tempbx & 0x00FF));
- tempax = (unsigned short) (((tempeax & 0x000000FF) << 8)
+ tempax = (unsigned short)(((tempeax & 0x000000FF) << 8)
| (tempax & 0x00FF));
temp = (tempax & 0xFF00) >> 8;
} else {
@@ -3622,14 +3600,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp);
temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
- xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
+ xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short)(temp - 3));
if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
if (pVBInfo->TVInfo & NTSC1024x768) {
TimingPoint = XGI_NTSC1024AdjTime;
for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
xgifb_reg_set(pVBInfo->Part2Port, i,
- TimingPoint[j]);
+ TimingPoint[j]);
}
xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72);
}
@@ -3639,7 +3617,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBType & VB_XGI301C) {
if (pVBInfo->TVInfo & TVSetPALM)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
- 0x08); /* PALM Mode */
+ 0x08); /* PALM Mode */
}
if (pVBInfo->TVInfo & TVSetPALM) {
@@ -3656,8 +3634,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetLCDRegs(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
tempbh, tempch;
@@ -3853,12 +3830,12 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTap4Ptr */
-/* Input : */
-/* Output : di -> Tap4 Reg. Setting Pointer */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTap4Ptr
+ * Input :
+ * Output : di -> Tap4 Reg. Setting Pointer
+ * Description :
+ */
static struct XGI301C_Tap4TimingStruct const
*XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo)
{
@@ -3882,7 +3859,7 @@ static struct XGI301C_Tap4TimingStruct const
if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
- (pVBInfo->TVInfo & TVSetYPbPr525p))
+ (pVBInfo->TVInfo & TVSetYPbPr525p))
Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
if (pVBInfo->TVInfo & TVSetYPbPr750p)
Tap4TimingPtr = YPbPr750pTap4Timing;
@@ -3988,8 +3965,8 @@ static void XGI_SetGroup3(unsigned short ModeIdIndex,
}
static void XGI_SetGroup4(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
@@ -4080,12 +4057,12 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
if (templong != 0)
tempebx++;
- temp = (unsigned short) (tempebx & 0x000000FF);
+ temp = (unsigned short)(tempebx & 0x000000FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp);
- temp = (unsigned short) ((tempebx & 0x0000FF00) >> 8);
+ temp = (unsigned short)((tempebx & 0x0000FF00) >> 8);
xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp);
- tempbx = (unsigned short) (tempebx >> 16);
+ tempbx = (unsigned short)(tempebx >> 16);
temp = tempbx & 0x00FF;
temp <<= 4;
temp |= ((tempcx & 0xFF00) >> 8);
@@ -4132,8 +4109,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
| TVSetHiVision))) {
temp |= 0x0001;
if ((pVBInfo->VBInfo & SetInSlaveMode) &&
- !(pVBInfo->TVInfo
- & TVSimuMode))
+ !(pVBInfo->TVInfo & TVSimuMode))
temp &= (~0x0001);
}
}
@@ -4174,7 +4150,8 @@ static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo)
}
static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
- unsigned short ModeNo, unsigned short ModeIdIndex)
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex)
{
unsigned short xres, yres, colordepth, modeflag, resindex;
@@ -4221,7 +4198,7 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
unsigned short value;
- temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability &
+ temp = (unsigned char)((xgifb_info->lvds_data.LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
Miscdata = inb(pVBInfo->P3cc);
@@ -4354,12 +4331,12 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
if (chip_id == XG27) {
/* Panel VRS SR35[2:0] SR34[7:0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07,
- (value & 0x700) >> 8);
+ (value & 0x700) >> 8);
xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
} else {
/* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03,
- (value & 0x600) >> 9);
+ (value & 0x600) >> 9);
xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
}
@@ -4372,11 +4349,11 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
/* Panel VRE SR3F[7:2] */
if (chip_id == XG27)
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
- (value << 2) & 0xFC);
+ (value << 2) & 0xFC);
else
/* SR3F[7] has to be 0, h/w bug */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
- (value << 2) & 0x7C);
+ (value << 2) & 0x7C);
for (temp = 0, value = 0; temp < 3; temp++) {
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
@@ -4400,13 +4377,13 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_IsLCDON */
-/* Input : */
-/* Output : 0 : Skip PSC Control */
-/* 1: Disable PSC */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_IsLCDON
+ * Input :
+ * Output : 0 : Skip PSC Control
+ * 1: Disable PSC
+ * Description :
+ */
static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
{
unsigned short tempax;
@@ -4421,8 +4398,8 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
}
static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempah = 0;
@@ -4498,23 +4475,23 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTVPtrIndex */
-/* Input : */
-/* Output : */
-/* Description : bx 0 : ExtNTSC */
-/* 1 : StNTSC */
-/* 2 : ExtPAL */
-/* 3 : StPAL */
-/* 4 : ExtHiTV */
-/* 5 : StHiTV */
-/* 6 : Ext525i */
-/* 7 : St525i */
-/* 8 : Ext525p */
-/* 9 : St525p */
-/* A : Ext750p */
-/* B : St750p */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTVPtrIndex
+ * Input :
+ * Output :
+ * Description : bx 0 : ExtNTSC
+ * 1 : StNTSC
+ * 2 : ExtPAL
+ * 3 : StPAL
+ * 4 : ExtHiTV
+ * 5 : StHiTV
+ * 6 : Ext525i
+ * 7 : St525i
+ * 8 : Ext525p
+ * 9 : St525p
+ * A : Ext750p
+ * B : St750p
+ */
static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0;
@@ -4535,24 +4512,24 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
return tempbx;
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTVPtrIndex2 */
-/* Input : */
-/* Output : bx 0 : NTSC */
-/* 1 : PAL */
-/* 2 : PALM */
-/* 3 : PALN */
-/* 4 : NTSC1024x768 */
-/* 5 : PAL-M 1024x768 */
-/* 6-7: reserved */
-/* cl 0 : YFilter1 */
-/* 1 : YFilter2 */
-/* ch 0 : 301A */
-/* 1 : 301B/302B/301LV/302LV */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTVPtrIndex2
+ * Input :
+ * Output : bx 0 : NTSC
+ * 1 : PAL
+ * 2 : PALM
+ * 3 : PALN
+ * 4 : NTSC1024x768
+ * 5 : PAL-M 1024x768
+ * 6-7: reserved
+ * cl 0 : YFilter1
+ * 1 : YFilter2
+ * ch 0 : 301A
+ * 1 : 301B/302B/301LV/302LV
+ * Description :
+ */
static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
- unsigned char *tempch, struct vb_device_info *pVBInfo)
+ unsigned char *tempch, struct vb_device_info *pVBInfo)
{
*tempbx = 0;
*tempcl = 0;
@@ -4637,33 +4614,32 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
if (temp & LCDRGB18Bit) {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
- /* Enable Dither */
- (unsigned short) (0x20 | (tempcx & 0x00C0)));
+ /* Enable Dither */
+ (unsigned short)(0x20 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
} else {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
- (unsigned short) (0x30 | (tempcx & 0x00C0)));
+ (unsigned short)(0x30 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetLCDCap_B */
-/* Input : cx -> LCD Capability */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetLCDCap_B
+ * Input : cx -> LCD Capability
+ * Output :
+ * Description :
+ */
static void XGI_SetLCDCap_B(unsigned short tempcx,
struct vb_device_info *pVBInfo)
{
if (tempcx & EnableLCD24bpp) /* 24bits */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
- (unsigned short) (((tempcx & 0x00ff) >> 6)
- | 0x0c));
+ (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c));
else
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
- (unsigned short) (((tempcx & 0x00ff) >> 6)
- | 0x18)); /* Enable Dither */
+ (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18));
+ /* Enable Dither */
}
static void XGI_LongWait(struct vb_device_info *pVBInfo)
@@ -4698,13 +4674,13 @@ static void SetSpectrum(struct vb_device_info *pVBInfo)
XGI_LongWait(pVBInfo);
xgifb_reg_set(pVBInfo->Part4Port, 0x31,
- pVBInfo->LCDCapList[index].Spectrum_31);
+ pVBInfo->LCDCapList[index].Spectrum_31);
xgifb_reg_set(pVBInfo->Part4Port, 0x32,
- pVBInfo->LCDCapList[index].Spectrum_32);
+ pVBInfo->LCDCapList[index].Spectrum_32);
xgifb_reg_set(pVBInfo->Part4Port, 0x33,
- pVBInfo->LCDCapList[index].Spectrum_33);
+ pVBInfo->LCDCapList[index].Spectrum_33);
xgifb_reg_set(pVBInfo->Part4Port, 0x34,
- pVBInfo->LCDCapList[index].Spectrum_34);
+ pVBInfo->LCDCapList[index].Spectrum_34);
XGI_LongWait(pVBInfo);
xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */
}
@@ -4721,13 +4697,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
(VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
/* Set 301LV Capability */
xgifb_reg_set(pVBInfo->Part4Port, 0x24,
- (unsigned char) (tempcx & 0x1F));
+ (unsigned char)(tempcx & 0x1F));
}
/* VB Driving */
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D,
- ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
- (unsigned short) ((tempcx & (EnableVBCLKDRVLOW
- | EnablePLLSPLOW)) >> 8));
+ ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
+ (unsigned short)((tempcx & (EnableVBCLKDRVLOW |
+ EnablePLLSPLOW)) >> 8));
if (pVBInfo->VBInfo & SetCRT2ToLCD)
XGI_SetLCDCap_B(tempcx, pVBInfo);
@@ -4744,12 +4720,12 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetAntiFlicker */
-/* Input : */
-/* Output : */
-/* Description : Set TV Customized Param. */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetAntiFlicker
+ * Input :
+ * Output :
+ * Description : Set TV Customized Param.
+ */
static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
@@ -4792,13 +4768,13 @@ static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
tempData = TVPhaseList[tempbx];
- xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short) (tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short)(tempData
& 0x000000FF));
- xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short) ((tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short)((tempData
& 0x0000FF00) >> 8));
- xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short) ((tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short)((tempData
& 0x00FF0000) >> 16));
- xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short) ((tempData
+ xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short)((tempData
& 0xFF000000) >> 24));
}
@@ -4866,12 +4842,12 @@ static void XGI_SetYFilter(unsigned short ModeIdIndex,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_OEM310Setting */
-/* Input : */
-/* Output : */
-/* Description : Customized Param. for 301 */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_OEM310Setting
+ * Input :
+ * Output :
+ * Description : Customized Param. for 301
+ */
static void XGI_OEM310Setting(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
@@ -4890,12 +4866,12 @@ static void XGI_OEM310Setting(unsigned short ModeIdIndex,
}
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetCRT2ModeRegs */
-/* Input : */
-/* Output : */
-/* Description : Origin code for crt2group */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetCRT2ModeRegs
+ * Input :
+ * Output :
+ * Description : Origin code for crt2group
+ */
static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
{
unsigned short tempbl;
@@ -4999,8 +4975,8 @@ reg_and_or:
tempah |= 0x40;
}
- if ((pVBInfo->LCDResInfo == Panel_1280x1024)
- || (pVBInfo->LCDResInfo == Panel_1280x1024x75))
+ if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+ (pVBInfo->LCDResInfo == Panel_1280x1024x75))
tempah |= 0x80;
if (pVBInfo->LCDResInfo == Panel_1280x960)
@@ -5068,8 +5044,9 @@ void XGI_LockCRT2(struct vb_device_info *pVBInfo)
}
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
const u8 LCDARefreshIndex[] = {
0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
@@ -5143,14 +5120,14 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
}
static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex;
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo);
XGI_GetLVDSData(ModeIdIndex, pVBInfo);
XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo);
@@ -5159,8 +5136,8 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
}
static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short ModeIdIndex, RefreshRateTableIndex;
@@ -5168,7 +5145,7 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
XGI_SearchModeID(ModeNo, &ModeIdIndex);
pVBInfo->SelectCRT2Rate = 4;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
XGI_SaveCRT2Info(ModeNo, pVBInfo);
XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo);
XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
@@ -5210,39 +5187,39 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63);
SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01);
- xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char) (SR01 & 0xDF));
- xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char) (CR63 & 0xBF));
+ xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char)(SR01 & 0xDF));
+ xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char)(CR63 & 0xBF));
CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17);
- xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char) (CR17 | 0x80));
+ xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char)(CR17 | 0x80));
SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) (SR1F | 0x04));
+ xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)(SR1F | 0x04));
SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07);
- xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char) (SR07 & 0xFB));
+ xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char)(SR07 & 0xFB));
SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06);
- xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char) (SR06 & 0xC3));
+ xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char)(SR06 & 0xC3));
xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00);
for (i = 0; i < 8; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) i, CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)i, CRTCData[i]);
for (i = 8; i < 11; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 8),
- CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 8),
+ CRTCData[i]);
for (i = 11; i < 13; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 4),
- CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 4),
+ CRTCData[i]);
for (i = 13; i < 16; i++)
- xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i - 3),
- CRTCData[i]);
+ xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i - 3),
+ CRTCData[i]);
- xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char) (CRTCData[16]
- & 0xE0));
+ xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char)(CRTCData[16]
+ & 0xE0));
xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00);
xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B);
@@ -5275,12 +5252,12 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
pVBInfo->P3d4, 0x53) & 0xFD));
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
+ xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)SR1F);
}
static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempah;
@@ -5310,11 +5287,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
- 0x20); /* shampoo 0129 */
+ 0x20); /* shampoo 0129 */
if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo &
(SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
- /* LVDS PLL power on */
+ /* LVDS PLL power on */
xgifb_reg_and(pVBInfo->Part4Port, 0x2A,
0x7F);
/* LVDS Driver power on */
@@ -5358,9 +5335,9 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
}
static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo, unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex, temp;
@@ -5389,14 +5366,14 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
}
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
if (RefreshRateTableIndex != 0xFFFF) {
XGI_SetSync(RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex,
pVBInfo, HwDeviceExtension);
XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
+ HwDeviceExtension, pVBInfo);
XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
}
@@ -5410,15 +5387,15 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo);
XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
- RefreshRateTableIndex);
+ RefreshRateTableIndex);
xgifb_set_lcd(HwDeviceExtension->jChipType,
pVBInfo, RefreshRateTableIndex);
if (pVBInfo->IF_DEF_LVDS == 1)
xgifb_set_lvds(xgifb_info,
- HwDeviceExtension->jChipType,
- ModeIdIndex, pVBInfo);
+ HwDeviceExtension->jChipType,
+ ModeIdIndex, pVBInfo);
}
}
@@ -5430,8 +5407,8 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
}
unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo)
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo)
{
unsigned short ModeIdIndex;
struct vb_device_info VBINF;
@@ -5440,7 +5417,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
pVBInfo->IF_DEF_LVDS = 0;
if (HwDeviceExtension->jChipType >= XG20)
- pVBInfo->VBType = 0; /*set VBType default 0*/
+ pVBInfo->VBType = 0; /* set VBType default 0 */
XGIRegInit(pVBInfo, xgifb_info->vga_base);
@@ -5473,13 +5450,13 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
- !(pVBInfo->VBInfo & SwitchCRT2)) {
+ !(pVBInfo->VBInfo & SwitchCRT2)) {
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
- HwDeviceExtension, pVBInfo);
+ HwDeviceExtension, pVBInfo);
}
}
@@ -5488,7 +5465,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
case VB_CHIP_301: /* fall through */
case VB_CHIP_302:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
- pVBInfo); /*add for CRT2 */
+ pVBInfo); /* add for CRT2 */
break;
default:
@@ -5497,7 +5474,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
XGI_SetCRT2ModeRegs(pVBInfo);
- XGI_OEM310Setting(ModeIdIndex, pVBInfo); /*0212*/
+ XGI_OEM310Setting(ModeIdIndex, pVBInfo); /* 0212 */
XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
@@ -5515,7 +5492,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
}
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index c801deb142f6..f9f98e06e6d5 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1701,6 +1701,7 @@ static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[] = {
{ {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} },/* ; 04 (x768) */
{ {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */
};
+
/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] = {
{ {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 00 (320x) */
@@ -1886,17 +1887,17 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
0x6C, 0xC3, 0x35, 0x62,
0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024 */
- {Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1280x1024, XGI_LCDDualLink + DefaultLCDCap,
0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F,
0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1400x1050 */
- {Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1400x1050, XGI_LCDDualLink + DefaultLCDCap,
0x70, 0x03, VCLK108_2_315,
0x70, 0x44, 0xF8, 0x2F,
0x0A, 0xC0, 0x30, 0x10},
/* LCDCap1600x1200 */
- {Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1600x1200, XGI_LCDDualLink + DefaultLCDCap,
0xC0, 0x03, VCLK162,
0x43, 0x22, 0x70, 0x24,
0x0A, 0xC0, 0x30, 0x10},
@@ -1905,7 +1906,7 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
0x2B, 0x61, 0x2B, 0x61,
0x0A, 0xC0, 0x28, 0x10},
/* LCDCap1280x1024x75 */
- {Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap,
+ {Panel_1280x1024x75, XGI_LCDDualLink + DefaultLCDCap,
0x90, 0x03, VCLK135_5,
0x54, 0x42, 0x4A, 0x61,
0x0A, 0xC0, 0x30, 0x10},
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 08db58b396b2..052694e75053 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -18,7 +18,7 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
u8 temp;
temp = xgifb_reg_get(port, index);
- temp = (u8) ((temp & data_and) | data_or);
+ temp = (u8)((temp & data_and) | data_or);
xgifb_reg_set(port, index, temp);
}
@@ -28,7 +28,7 @@ static inline void xgifb_reg_and(unsigned long port, u8 index,
u8 temp;
temp = xgifb_reg_get(port, index);
- temp = (u8) (temp & data_and);
+ temp = (u8)(temp & data_and);
xgifb_reg_set(port, index, temp);
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index ad26b9372f10..96eedfc49c94 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -653,6 +653,7 @@ static struct iscsit_transport cxgbit_transport = {
static struct cxgb4_uld_info cxgbit_uld_info = {
.name = DRV_NAME,
.nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.lro = true,
.add = cxgbit_uld_add,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index d02bf58aea6d..8bcb9b71f764 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <asm/unaligned.h>
+#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "cxgbit.h"
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b7d747e92c7a..da2c73a255de 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -23,7 +23,9 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/idr.h>
+#include <linux/delay.h>
#include <asm/unaligned.h>
+#include <net/ipv6.h>
#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 4cf2c0f2ba2f..e0db2ceb0f87 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -1,6 +1,18 @@
#ifndef ISCSI_TARGET_H
#define ISCSI_TARGET_H
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_np;
+struct iscsi_portal_group;
+struct iscsi_session;
+struct iscsi_tpg_np;
+struct kref;
+struct sockaddr_storage;
+
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index e116f0e845c0..903b667f8e01 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/err.h>
+#include <linux/random.h>
#include <linux/scatterlist.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index d22f7b96a06c..1b91c13cc965 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,6 +1,8 @@
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
+#include <linux/types.h>
+
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
#define CHAP_DIGEST_SHA 6
@@ -18,6 +20,9 @@
#define CHAP_STAGE_CLIENT_NRIC 4
#define CHAP_STAGE_SERVER_NR 5
+struct iscsi_node_auth;
+struct iscsi_conn;
+
extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
int *, int *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 923c032f0b95..bf40f03755dd 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -21,10 +21,11 @@
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/inet.h>
+#include <linux/module.h>
+#include <net/ipv6.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
@@ -100,8 +101,10 @@ static ssize_t lio_target_np_driver_store(struct config_item *item,
tpg_np_new = iscsit_tpg_add_network_portal(tpg,
&np->np_sockaddr, tpg_np, type);
- if (IS_ERR(tpg_np_new))
+ if (IS_ERR(tpg_np_new)) {
+ rc = PTR_ERR(tpg_np_new);
goto out;
+ }
} else {
tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
if (tpg_np_new) {
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 647d4a5dca52..173ddd93c757 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -16,8 +16,8 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl1.h"
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
index 646429ac5a02..16edeeeb7777 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.h
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_TARGET_DATAIN_VALUES_H
#define ISCSI_TARGET_DATAIN_VALUES_H
+struct iscsi_cmd;
+struct iscsi_datain;
+
extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
index a0e2df9e8090..06dbff5cd520 100644
--- a/drivers/target/iscsi/iscsi_target_device.h
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_TARGET_DEVICE_H
#define ISCSI_TARGET_DEVICE_H
+struct iscsi_cmd;
+struct iscsi_session;
+
extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index a9e2f9497fb2..60e69e2af6ed 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -1,6 +1,12 @@
#ifndef ISCSI_TARGET_ERL0_H
#define ISCSI_TARGET_ERL0_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_session;
+
extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 9214c9dafa2b..fe9b7f1e44ac 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -17,6 +17,7 @@
******************************************************************************/
#include <linux/list.h>
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
index 2a3ebf118a34..54d36bd25bea 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.h
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -1,6 +1,16 @@
#ifndef ISCSI_TARGET_ERL1_H
#define ISCSI_TARGET_ERL1_H
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_datain_req;
+struct iscsi_ooo_cmdsn;
+struct iscsi_pdu;
+struct iscsi_session;
+
extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
struct iscsi_cmd *, struct iscsi_datain_req *);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index e24f1c7c5862..faf9ae014b30 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -17,6 +17,7 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 63f2501f3fe0..7965f1e86506 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -1,6 +1,13 @@
#ifndef ISCSI_TARGET_ERL2_H
#define ISCSI_TARGET_ERL2_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 15f79a2ca34a..450f51deb2a2 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -20,6 +20,8 @@
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/idr.h>
+#include <linux/tcp.h> /* TCP_NODELAY */
+#include <net/ipv6.h> /* ipv6_addr_v4mapped() */
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index b597aa2c61a1..0e1fd6cedd54 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -1,6 +1,13 @@
#ifndef ISCSI_TARGET_LOGIN_H
#define ISCSI_TARGET_LOGIN_H
+#include <linux/types.h>
+
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+struct sockaddr_storage;
+
extern int iscsi_login_setup_crypto(struct iscsi_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 89d34bd6d87f..46388c9e08da 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -18,6 +18,8 @@
#include <linux/ctype.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/sock.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index f021cbd330e5..53438bfca4c6 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -4,6 +4,10 @@
#define DECIMAL 0
#define HEX 1
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+
extern void convert_null_to_semi(char *, int);
extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index 0c69a46a62ec..79cdf06ade48 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,6 +1,11 @@
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
+#include <linux/types.h>
+
+struct iscsi_node_acl;
+struct iscsi_portal_group;
+
extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 0efa80bb8962..e65bf78ceef3 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <linux/slab.h>
-
+#include <linux/uio.h> /* struct kvec */
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h"
#include "iscsi_target_parameters.h"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a0751e3f0813..9962ccf0ccd7 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,6 +1,7 @@
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
+#include <linux/types.h>
#include <scsi/iscsi_proto.h>
struct iscsi_extra_response {
@@ -23,6 +24,11 @@ struct iscsi_param {
struct list_head p_list;
} ____cacheline_aligned;
+struct iscsi_conn;
+struct iscsi_conn_ops;
+struct iscsi_param_list;
+struct iscsi_sess_ops;
+
extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index d5b153751a8d..be1234362271 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_SEQ_AND_PDU_LIST_H
#define ISCSI_SEQ_AND_PDU_LIST_H
+#include <linux/types.h>
+#include <linux/cache.h>
+
/* struct iscsi_pdu->status */
#define DATAOUT_PDU_SENT 1
@@ -78,6 +81,8 @@ struct iscsi_seq {
u32 xfer_len;
} ____cacheline_aligned;
+struct iscsi_cmd;
+
extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
index 142e992cb097..64cc5c07e47c 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.h
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -1,6 +1,12 @@
#ifndef ISCSI_TARGET_TMR_H
#define ISCSI_TARGET_TMR_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_tmr_req;
+
extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
unsigned char *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 0814e5894a96..2e7e08dbda48 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -16,9 +16,9 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_erl0.h"
#include "iscsi_target_login.h"
@@ -260,7 +260,6 @@ err_out:
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
- kfree(tpg);
return -ENOMEM;
}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 2da211920c18..ceba29851167 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -1,6 +1,15 @@
#ifndef ISCSI_TARGET_TPG_H
#define ISCSI_TARGET_TPG_H
+#include <linux/types.h>
+
+struct iscsi_np;
+struct iscsi_session;
+struct iscsi_tiqn;
+struct iscsi_tpg_np;
+struct se_node_acl;
+struct sockaddr_storage;
+
extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
extern int iscsit_load_discovery_tpg(void);
extern void iscsit_release_discovery_tpg(void);
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 08217d62fb0d..c4eb141c6435 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -1,5 +1,6 @@
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_transport_list);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1f38177207e0..b5a1b4ccba12 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/percpu_ida.h>
+#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 995f1cb29d0e..8ff08856516a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -1,8 +1,16 @@
#ifndef ISCSI_TARGET_UTIL_H
#define ISCSI_TARGET_UTIL_H
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
#define MARKER_SIZE 8
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 4346462094a1..a8a230b4e6b5 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,3 +1,7 @@
+#include <linux/types.h>
+#include <linux/device.h>
+#include <target/target_core_base.h> /* struct se_cmd */
+
#define TCM_LOOP_VERSION "v2.1-rc2"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 58bb6ed18185..e5c3e5f827d0 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -28,6 +28,7 @@
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <scsi/scsi_proto.h>
@@ -928,7 +929,7 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
struct sbp_target_request *req;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4c82bbe19003..f5e330099bfc 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -26,8 +26,11 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
+#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/fcntl.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 9b250f9b33bf..c69c11baf07f 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
+#include <target/target_core_base.h>
+
/*
* INQUIRY response data, TPGS Field
*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2001005bef45..54b36c9835be 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -143,13 +143,13 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
pr_err("db_root: cannot open: %s\n", db_root_stage);
return -EINVAL;
}
- if (!S_ISDIR(fp->f_inode->i_mode)) {
- filp_close(fp, 0);
+ if (!S_ISDIR(file_inode(fp)->i_mode)) {
+ filp_close(fp, NULL);
mutex_unlock(&g_tf_lock);
pr_err("db_root: not a directory: %s\n", db_root_stage);
return -EINVAL;
}
- filp_close(fp, 0);
+ filp_close(fp, NULL);
strncpy(db_root, db_root_stage, read_bytes);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 6b423485c5d6..1ebd13ef7bd3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -33,6 +33,7 @@
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
+#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 31a096aa16ab..d8a16ca6baa5 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -137,7 +137,7 @@ static int target_fabric_mappedlun_link(
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
}
-static int target_fabric_mappedlun_unlink(
+static void target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
@@ -146,7 +146,7 @@ static int target_fabric_mappedlun_unlink(
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- return core_dev_del_initiator_node_lun_acl(lun, lacl);
+ core_dev_del_initiator_node_lun_acl(lun, lacl);
}
static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
@@ -669,7 +669,7 @@ out:
return ret;
}
-static int target_fabric_port_unlink(
+static void target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
@@ -688,7 +688,6 @@ static int target_fabric_port_unlink(
}
core_dev_del_lun(se_tpg, lun);
- return 0;
}
static void target_fabric_port_release(struct config_item *item)
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d545993df18b..87aa376a1a1a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/falloc.h>
+#include <linux/uio.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 068966fce308..526595a072de 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
+#include <target/target_core_base.h>
+
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 372d744315f3..d316ed537d59 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);
@@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
- * Force writethrough using WRITE_FUA if a volatile write cache
+ * Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
op = REQ_OP_WRITE;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
}
} else {
op = REQ_OP_READ;
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 01c2afd81500..718d3fcd3e7c 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -1,6 +1,9 @@
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
+#include <linux/atomic.h>
+#include <target/target_core_base.h>
+
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e2c970a9d61c..9ab7090f7c83 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -1,6 +1,11 @@
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
+#include <linux/configfs.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define TARGET_CORE_NAME_MAX_LEN 64
#define TARGET_FABRIC_NAME_SIZE 32
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 47463c99c318..d761025144f9 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -29,6 +29,8 @@
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -253,8 +255,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
- pr_err("LongIO and Obselete Bits set, returning"
- " ILLEGAL_REQUEST\n");
+ pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index e3d26e9126a0..847bd470339c 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -1,5 +1,9 @@
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
+
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
/*
* PERSISTENT_RESERVE_OUT service action codes
*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9125d9358dea..04d7aa7390d0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -935,13 +935,9 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
bio, page, bytes, off);
- if (rc != bytes)
- goto fail;
-
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
- bio->bi_vcnt, nr_vecs);
-
- if (bio->bi_vcnt > nr_vecs) {
+ bio_segments(bio), nr_vecs);
+ if (rc != bytes) {
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 6d2007e35df6..8a02fa47c7e8 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -15,11 +15,12 @@
#define PS_TIMEOUT_DISK (15*HZ)
#define PS_TIMEOUT_OTHER (500*HZ)
-#include <linux/device.h>
-#include <linux/kref.h>
-#include <linux/kobject.h>
+#include <linux/cache.h> /* ___cacheline_aligned */
+#include <target/target_core_base.h> /* struct se_device */
+struct block_device;
struct scsi_device;
+struct Scsi_Host;
struct pscsi_plugin_task {
unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 24b36fd785f1..ddc216c9f1f6 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -26,7 +26,9 @@
#include <linux/string.h>
#include <linux/parser.h>
+#include <linux/highmem.h>
#include <linux/timer.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi_proto.h>
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index cc46a6a89b38..91fc1a34791d 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -1,6 +1,10 @@
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
+#include <linux/module.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define RD_HBA_VERSION "v4.0"
#define RD_MCP_VERSION "4.0"
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 04f616b3ba0a..4879e70e2eef 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/crc-t10dif.h>
+#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index bd6e78ba153d..97402856a8f0 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_UA_H
#define TARGET_CORE_UA_H
+#include <target/target_core_base.h>
+
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 47562509b489..8041710b6972 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -27,6 +27,7 @@
#include <linux/uio_driver.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
+#include <linux/highmem.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -147,8 +148,8 @@ static const struct genl_multicast_group tcmu_mcgrps[] = {
};
/* Our generic netlink family */
-static struct genl_family tcmu_genl_family = {
- .id = GENL_ID_GENERATE,
+static struct genl_family tcmu_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
.hdrsize = 0,
.name = "TCM-USER",
.version = 1,
@@ -537,7 +538,7 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int ret;
+ sense_reason_t ret;
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
@@ -685,8 +686,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
cmd->se_cmd = NULL;
- kmem_cache_free(tcmu_cmd_cache, cmd);
-
return 0;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 094a1440eacb..37d5caebffa6 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/configfs.h>
+#include <linux/ratelimit.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 700a981c7b41..4d3d4dd060f2 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -1,3 +1,5 @@
+#include <target/target_core_base.h>
+
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index e28209b99b59..11d27b93b413 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -17,6 +17,9 @@
#ifndef __TCM_FC_H__
#define __TCM_FC_H__
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define FT_VERSION "0.4"
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ff5de9a96643..9af7842b8178 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -92,7 +92,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
- lport->tt.seq_release(fr_seq(fp));
+ fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(sess); /* undo get from lookup at recv */
@@ -161,11 +161,11 @@ int ft_queue_status(struct se_cmd *se_cmd)
/*
* Send response.
*/
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- rc = lport->tt.seq_send(lport, cmd->seq, fp);
+ rc = fc_seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
@@ -177,7 +177,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
- lport->tt.exch_done(cmd->seq);
+ fc_exch_done(cmd->seq);
/*
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
@@ -221,7 +221,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
@@ -242,7 +242,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
cmd->was_ddp_setup = 1;
}
}
- lport->tt.seq_send(lport, cmd->seq, fp);
+ fc_seq_send(lport, cmd->seq, fp);
return 0;
}
@@ -323,8 +323,8 @@ static void ft_send_resp_status(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
- lport->tt.seq_send(lport, sp, fp);
- lport->tt.exch_done(sp);
+ fc_seq_send(lport, sp, fp);
+ fc_exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
@@ -461,7 +461,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
- cmd->seq = lport->tt.seq_assign(lport, fp);
+ cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
@@ -563,7 +563,7 @@ static void ft_send_work(struct work_struct *work)
task_attr = TCM_SIMPLE_TAG;
}
- fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 6f7c65abfe2a..1eb1f58e00e4 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,7 +82,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
@@ -174,7 +174,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index a13541bdc726..c2c056cc7ea5 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -177,8 +177,10 @@ config THERMAL_EMULATION
config HISI_THERMAL
tristate "Hisilicon thermal driver"
- depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
+ depends on ARCH_HISI || COMPILE_TEST
depends on HAS_IOMEM
+ depends on OF
+ default y
help
Enable this to plug hisilicon's thermal sensor driver into the Linux
thermal framework. cpufreq is used as the cooling device to throttle
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index c92eb22a41ff..6a3d7b573036 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,7 +3,8 @@
#
obj-$(CONFIG_THERMAL) += thermal_sys.o
-thermal_sys-y += thermal_core.o
+thermal_sys-y += thermal_core.o thermal_sysfs.o \
+ thermal_helpers.o
# interface to/from other layers providing sensors
thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index e776cea80cfc..f491faf16592 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -512,6 +512,7 @@ static const struct of_device_id db8500_thermal_match[] = {
{ .compatible = "stericsson,db8500-thermal" },
{},
};
+MODULE_DEVICE_TABLE(of, db8500_thermal_match);
#endif
static struct platform_driver db8500_thermal_driver = {
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 81631b110e17..5a737fd5f1aa 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -238,7 +238,7 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
return 0;
}
- return dfc->power_ops->get_static_power(voltage);
+ return dfc->power_ops->get_static_power(df, voltage);
}
/**
@@ -262,7 +262,8 @@ get_dynamic_power(struct devfreq_cooling_device *dfc, unsigned long freq,
struct devfreq_cooling_power *dfc_power = dfc->power_ops;
if (dfc_power->get_dynamic_power)
- return dfc_power->get_dynamic_power(freq, voltage);
+ return dfc_power->get_dynamic_power(dfc->devfreq, freq,
+ voltage);
freq_mhz = freq / 1000000;
power = (u64)dfc_power->dyn_power_coeff * freq_mhz * voltage * voltage;
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 5836e5554433..9413c4abf0b9 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -96,7 +96,7 @@ static ssize_t current_uuid_store(struct device *dev,
return -EINVAL;
}
-static DEVICE_ATTR(current_uuid, 0644, current_uuid_show, current_uuid_store);
+static DEVICE_ATTR_RW(current_uuid);
static DEVICE_ATTR_RO(available_uuids);
static struct attribute *uuid_attrs[] = {
&dev_attr_available_uuids.attr,
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 19bf2028e508..2b49e8d0fe9e 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -29,6 +29,7 @@
#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
+#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -273,37 +274,44 @@ static struct thermal_zone_device_ops tzd_ops = {
.get_trip_temp = pch_get_trip_temp,
};
+enum board_ids {
+ board_hsw,
+ board_wpt,
+ board_skl,
+};
+
+static const struct board_info {
+ const char *name;
+ const struct pch_dev_ops *ops;
+} board_info[] = {
+ [board_hsw] = {
+ .name = "pch_haswell",
+ .ops = &pch_dev_ops_wpt,
+ },
+ [board_wpt] = {
+ .name = "pch_wildcat_point",
+ .ops = &pch_dev_ops_wpt,
+ },
+ [board_skl] = {
+ .name = "pch_skylake",
+ .ops = &pch_dev_ops_wpt,
+ },
+};
static int intel_pch_thermal_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ enum board_ids board_id = id->driver_data;
+ const struct board_info *bi = &board_info[board_id];
struct pch_thermal_device *ptd;
int err;
int nr_trips;
- char *dev_name;
ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL);
if (!ptd)
return -ENOMEM;
- switch (pdev->device) {
- case PCH_THERMAL_DID_WPT:
- ptd->ops = &pch_dev_ops_wpt;
- dev_name = "pch_wildcat_point";
- break;
- case PCH_THERMAL_DID_SKL:
- ptd->ops = &pch_dev_ops_wpt;
- dev_name = "pch_skylake";
- break;
- case PCH_THERMAL_DID_HSW_1:
- case PCH_THERMAL_DID_HSW_2:
- ptd->ops = &pch_dev_ops_wpt;
- dev_name = "pch_haswell";
- break;
- default:
- dev_err(&pdev->dev, "unknown pch thermal device\n");
- return -ENODEV;
- }
+ ptd->ops = bi->ops;
pci_set_drvdata(pdev, ptd);
ptd->pdev = pdev;
@@ -331,11 +339,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
if (err)
goto error_cleanup;
- ptd->tzd = thermal_zone_device_register(dev_name, nr_trips, 0, ptd,
+ ptd->tzd = thermal_zone_device_register(bi->name, nr_trips, 0, ptd,
&tzd_ops, NULL, 0, 0);
if (IS_ERR(ptd->tzd)) {
dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
- dev_name);
+ bi->name);
err = PTR_ERR(ptd->tzd);
goto error_cleanup;
}
@@ -380,10 +388,16 @@ static int intel_pch_thermal_resume(struct device *device)
}
static struct pci_device_id intel_pch_thermal_id[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1),
+ .driver_data = board_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2),
+ .driver_data = board_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT),
+ .driver_data = board_wpt, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL),
+ .driver_data = board_skl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H),
+ .driver_data = board_skl, },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index afada655f861..df64692e9e64 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -43,7 +43,6 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/kthread.h>
-#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/thermal.h>
#include <linux/slab.h>
@@ -56,7 +55,6 @@
#include <asm/msr.h>
#include <asm/mwait.h>
#include <asm/cpu_device_id.h>
-#include <asm/idle.h>
#include <asm/hardirq.h>
#define MAX_TARGET_RATIO (50U)
@@ -86,11 +84,26 @@ static unsigned int control_cpu; /* The cpu assigned to collect stat and update
*/
static bool clamping;
+static const struct sched_param sparam = {
+ .sched_priority = MAX_USER_RT_PRIO / 2,
+};
+struct powerclamp_worker_data {
+ struct kthread_worker *worker;
+ struct kthread_work balancing_work;
+ struct kthread_delayed_work idle_injection_work;
+ unsigned int cpu;
+ unsigned int count;
+ unsigned int guard;
+ unsigned int window_size_now;
+ unsigned int target_ratio;
+ unsigned int duration_jiffies;
+ bool clamping;
+};
-static struct task_struct * __percpu *powerclamp_thread;
+static struct powerclamp_worker_data * __percpu worker_data;
static struct thermal_cooling_device *cooling_dev;
static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
- * clamping thread
+ * clamping kthread worker
*/
static unsigned int duration;
@@ -262,11 +275,6 @@ static u64 pkg_state_counter(void)
return count;
}
-static void noop_timer(unsigned long foo)
-{
- /* empty... just the fact that we get the interrupt wakes us up */
-}
-
static unsigned int get_compensation(int ratio)
{
unsigned int comp = 0;
@@ -368,103 +376,79 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
return set_target_ratio + guard <= current_ratio;
}
-static int clamp_thread(void *arg)
+static void clamp_balancing_func(struct kthread_work *work)
{
- int cpunr = (unsigned long)arg;
- DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
- static const struct sched_param param = {
- .sched_priority = MAX_USER_RT_PRIO/2,
- };
- unsigned int count = 0;
- unsigned int target_ratio;
+ struct powerclamp_worker_data *w_data;
+ int sleeptime;
+ unsigned long target_jiffies;
+ unsigned int compensated_ratio;
+ int interval; /* jiffies to sleep for each attempt */
- set_bit(cpunr, cpu_clamping_mask);
- set_freezable();
- init_timer_on_stack(&wakeup_timer);
- sched_setscheduler(current, SCHED_FIFO, &param);
-
- while (true == clamping && !kthread_should_stop() &&
- cpu_online(cpunr)) {
- int sleeptime;
- unsigned long target_jiffies;
- unsigned int guard;
- unsigned int compensated_ratio;
- int interval; /* jiffies to sleep for each attempt */
- unsigned int duration_jiffies = msecs_to_jiffies(duration);
- unsigned int window_size_now;
-
- try_to_freeze();
- /*
- * make sure user selected ratio does not take effect until
- * the next round. adjust target_ratio if user has changed
- * target such that we can converge quickly.
- */
- target_ratio = set_target_ratio;
- guard = 1 + target_ratio/20;
- window_size_now = window_size;
- count++;
-
- /*
- * systems may have different ability to enter package level
- * c-states, thus we need to compensate the injected idle ratio
- * to achieve the actual target reported by the HW.
- */
- compensated_ratio = target_ratio +
- get_compensation(target_ratio);
- if (compensated_ratio <= 0)
- compensated_ratio = 1;
- interval = duration_jiffies * 100 / compensated_ratio;
-
- /* align idle time */
- target_jiffies = roundup(jiffies, interval);
- sleeptime = target_jiffies - jiffies;
- if (sleeptime <= 0)
- sleeptime = 1;
- schedule_timeout_interruptible(sleeptime);
- /*
- * only elected controlling cpu can collect stats and update
- * control parameters.
- */
- if (cpunr == control_cpu && !(count%window_size_now)) {
- should_skip =
- powerclamp_adjust_controls(target_ratio,
- guard, window_size_now);
- smp_mb();
- }
+ w_data = container_of(work, struct powerclamp_worker_data,
+ balancing_work);
- if (should_skip)
- continue;
-
- target_jiffies = jiffies + duration_jiffies;
- mod_timer(&wakeup_timer, target_jiffies);
- if (unlikely(local_softirq_pending()))
- continue;
- /*
- * stop tick sched during idle time, interrupts are still
- * allowed. thus jiffies are updated properly.
- */
- preempt_disable();
- /* mwait until target jiffies is reached */
- while (time_before(jiffies, target_jiffies)) {
- unsigned long ecx = 1;
- unsigned long eax = target_mwait;
-
- /*
- * REVISIT: may call enter_idle() to notify drivers who
- * can save power during cpu idle. same for exit_idle()
- */
- local_touch_nmi();
- stop_critical_timings();
- mwait_idle_with_hints(eax, ecx);
- start_critical_timings();
- atomic_inc(&idle_wakeup_counter);
- }
- preempt_enable();
+ /*
+ * make sure user selected ratio does not take effect until
+ * the next round. adjust target_ratio if user has changed
+ * target such that we can converge quickly.
+ */
+ w_data->target_ratio = READ_ONCE(set_target_ratio);
+ w_data->guard = 1 + w_data->target_ratio / 20;
+ w_data->window_size_now = window_size;
+ w_data->duration_jiffies = msecs_to_jiffies(duration);
+ w_data->count++;
+
+ /*
+ * systems may have different ability to enter package level
+ * c-states, thus we need to compensate the injected idle ratio
+ * to achieve the actual target reported by the HW.
+ */
+ compensated_ratio = w_data->target_ratio +
+ get_compensation(w_data->target_ratio);
+ if (compensated_ratio <= 0)
+ compensated_ratio = 1;
+ interval = w_data->duration_jiffies * 100 / compensated_ratio;
+
+ /* align idle time */
+ target_jiffies = roundup(jiffies, interval);
+ sleeptime = target_jiffies - jiffies;
+ if (sleeptime <= 0)
+ sleeptime = 1;
+
+ if (clamping && w_data->clamping && cpu_online(w_data->cpu))
+ kthread_queue_delayed_work(w_data->worker,
+ &w_data->idle_injection_work,
+ sleeptime);
+}
+
+static void clamp_idle_injection_func(struct kthread_work *work)
+{
+ struct powerclamp_worker_data *w_data;
+
+ w_data = container_of(work, struct powerclamp_worker_data,
+ idle_injection_work.work);
+
+ /*
+ * only elected controlling cpu can collect stats and update
+ * control parameters.
+ */
+ if (w_data->cpu == control_cpu &&
+ !(w_data->count % w_data->window_size_now)) {
+ should_skip =
+ powerclamp_adjust_controls(w_data->target_ratio,
+ w_data->guard,
+ w_data->window_size_now);
+ smp_mb();
}
- del_timer_sync(&wakeup_timer);
- clear_bit(cpunr, cpu_clamping_mask);
- return 0;
+ if (should_skip)
+ goto balance;
+
+ play_idle(jiffies_to_msecs(w_data->duration_jiffies));
+
+balance:
+ if (clamping && w_data->clamping && cpu_online(w_data->cpu))
+ kthread_queue_work(w_data->worker, &w_data->balancing_work);
}
/*
@@ -508,10 +492,60 @@ static void poll_pkg_cstate(struct work_struct *dummy)
schedule_delayed_work(&poll_pkg_cstate_work, HZ);
}
+static void start_power_clamp_worker(unsigned long cpu)
+{
+ struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+ struct kthread_worker *worker;
+
+ worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
+ if (IS_ERR(worker))
+ return;
+
+ w_data->worker = worker;
+ w_data->count = 0;
+ w_data->cpu = cpu;
+ w_data->clamping = true;
+ set_bit(cpu, cpu_clamping_mask);
+ sched_setscheduler(worker->task, SCHED_FIFO, &sparam);
+ kthread_init_work(&w_data->balancing_work, clamp_balancing_func);
+ kthread_init_delayed_work(&w_data->idle_injection_work,
+ clamp_idle_injection_func);
+ kthread_queue_work(w_data->worker, &w_data->balancing_work);
+}
+
+static void stop_power_clamp_worker(unsigned long cpu)
+{
+ struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+
+ if (!w_data->worker)
+ return;
+
+ w_data->clamping = false;
+ /*
+ * Make sure that all works that get queued after this point see
+ * the clamping disabled. The counter part is not needed because
+ * there is an implicit memory barrier when the queued work
+ * is proceed.
+ */
+ smp_wmb();
+ kthread_cancel_work_sync(&w_data->balancing_work);
+ kthread_cancel_delayed_work_sync(&w_data->idle_injection_work);
+ /*
+ * The balancing work still might be queued here because
+ * the handling of the "clapming" variable, cancel, and queue
+ * operations are not synchronized via a lock. But it is not
+ * a big deal. The balancing work is fast and destroy kthread
+ * will wait for it.
+ */
+ clear_bit(w_data->cpu, cpu_clamping_mask);
+ kthread_destroy_worker(w_data->worker);
+
+ w_data->worker = NULL;
+}
+
static int start_power_clamp(void)
{
unsigned long cpu;
- struct task_struct *thread;
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
@@ -525,22 +559,9 @@ static int start_power_clamp(void)
clamping = true;
schedule_delayed_work(&poll_pkg_cstate_work, 0);
- /* start one thread per online cpu */
+ /* start one kthread worker per online cpu */
for_each_online_cpu(cpu) {
- struct task_struct **p =
- per_cpu_ptr(powerclamp_thread, cpu);
-
- thread = kthread_create_on_node(clamp_thread,
- (void *) cpu,
- cpu_to_node(cpu),
- "kidle_inject/%ld", cpu);
- /* bind to cpu here */
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- wake_up_process(thread);
- *p = thread;
- }
-
+ start_power_clamp_worker(cpu);
}
put_online_cpus();
@@ -550,71 +571,49 @@ static int start_power_clamp(void)
static void end_power_clamp(void)
{
int i;
- struct task_struct *thread;
- clamping = false;
/*
- * make clamping visible to other cpus and give per cpu clamping threads
- * sometime to exit, or gets killed later.
+ * Block requeuing in all the kthread workers. They will flush and
+ * stop faster.
*/
- smp_mb();
- msleep(20);
+ clamping = false;
if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
- pr_debug("clamping thread for cpu %d alive, kill\n", i);
- thread = *per_cpu_ptr(powerclamp_thread, i);
- kthread_stop(thread);
+ pr_debug("clamping worker for cpu %d alive, destroy\n",
+ i);
+ stop_power_clamp_worker(i);
}
}
}
-static int powerclamp_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int powerclamp_cpu_online(unsigned int cpu)
{
- unsigned long cpu = (unsigned long)hcpu;
- struct task_struct *thread;
- struct task_struct **percpu_thread =
- per_cpu_ptr(powerclamp_thread, cpu);
-
- if (false == clamping)
- goto exit_ok;
-
- switch (action) {
- case CPU_ONLINE:
- thread = kthread_create_on_node(clamp_thread,
- (void *) cpu,
- cpu_to_node(cpu),
- "kidle_inject/%lu", cpu);
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- wake_up_process(thread);
- *percpu_thread = thread;
- }
- /* prefer BSP as controlling CPU */
- if (cpu == 0) {
- control_cpu = 0;
- smp_mb();
- }
- break;
- case CPU_DEAD:
- if (test_bit(cpu, cpu_clamping_mask)) {
- pr_err("cpu %lu dead but powerclamping thread is not\n",
- cpu);
- kthread_stop(*percpu_thread);
- }
- if (cpu == control_cpu) {
- control_cpu = smp_processor_id();
- smp_mb();
- }
+ if (clamping == false)
+ return 0;
+ start_power_clamp_worker(cpu);
+ /* prefer BSP as controlling CPU */
+ if (cpu == 0) {
+ control_cpu = 0;
+ smp_mb();
}
-
-exit_ok:
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block powerclamp_cpu_notifier = {
- .notifier_call = powerclamp_cpu_callback,
-};
+static int powerclamp_cpu_predown(unsigned int cpu)
+{
+ if (clamping == false)
+ return 0;
+
+ stop_power_clamp_worker(cpu);
+ if (cpu != control_cpu)
+ return 0;
+
+ control_cpu = cpumask_first(cpu_online_mask);
+ if (control_cpu == cpu)
+ control_cpu = cpumask_next(cpu, cpu_online_mask);
+ smp_mb();
+ return 0;
+}
static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
@@ -742,6 +741,8 @@ file_error:
debugfs_remove_recursive(debug_dir);
}
+static enum cpuhp_state hp_state;
+
static int __init powerclamp_init(void)
{
int retval;
@@ -759,10 +760,17 @@ static int __init powerclamp_init(void)
/* set default limit, maybe adjusted during runtime based on feedback */
window_size = 2;
- register_hotcpu_notifier(&powerclamp_cpu_notifier);
+ retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "thermal/intel_powerclamp:online",
+ powerclamp_cpu_online,
+ powerclamp_cpu_predown);
+ if (retval < 0)
+ goto exit_free;
+
+ hp_state = retval;
- powerclamp_thread = alloc_percpu(struct task_struct *);
- if (!powerclamp_thread) {
+ worker_data = alloc_percpu(struct powerclamp_worker_data);
+ if (!worker_data) {
retval = -ENOMEM;
goto exit_unregister;
}
@@ -782,9 +790,9 @@ static int __init powerclamp_init(void)
return 0;
exit_free_thread:
- free_percpu(powerclamp_thread);
+ free_percpu(worker_data);
exit_unregister:
- unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+ cpuhp_remove_state_nocalls(hp_state);
exit_free:
kfree(cpu_clamping_mask);
return retval;
@@ -793,9 +801,9 @@ module_init(powerclamp_init);
static void __exit powerclamp_exit(void)
{
- unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
end_power_clamp();
- free_percpu(powerclamp_thread);
+ cpuhp_remove_state_nocalls(hp_state);
+ free_percpu(worker_data);
thermal_cooling_device_unregister(cooling_dev);
kfree(cpu_clamping_mask);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 83905ff46e40..e9a1fe342760 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -149,6 +149,7 @@ static struct platform_device_id max77620_thermal_devtype[] = {
{ .name = "max77620-thermal", },
{},
};
+MODULE_DEVICE_TABLE(platform, max77620_thermal_devtype);
static struct platform_driver max77620_thermal_driver = {
.driver = {
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index 819c6d5d7aa7..f50241962ad2 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -200,7 +200,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
struct qpnp_tm_chip *chip;
struct device_node *node;
u8 type, subtype;
- u32 res[2];
+ u32 res;
int ret, irq;
node = pdev->dev.of_node;
@@ -215,7 +215,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (!chip->map)
return -ENXIO;
- ret = of_property_read_u32_array(node, "reg", res, 2);
+ ret = of_property_read_u32(node, "reg", &res);
if (ret < 0)
return ret;
@@ -228,7 +228,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (PTR_ERR(chip->adc) == -EPROBE_DEFER)
return PTR_ERR(chip->adc);
- chip->base = res[0];
+ chip->base = res;
ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type);
if (ret < 0) {
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index e227a9f0acf7..b811b0fb61b1 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -524,11 +524,6 @@ static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs,
regs + TSADCV2_AUTO_PERIOD_HT);
writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
-
- if (IS_ERR(grf)) {
- pr_warn("%s: Missing rockchip,grf property\n", __func__);
- return;
- }
}
/**
@@ -971,6 +966,8 @@ static int rockchip_configure_from_dt(struct device *dev,
* need this property.
*/
thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(thermal->grf))
+ dev_warn(dev, "Missing rockchip,grf property\n");
return 0;
}
diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c
index 201304aeafeb..4e67795cb6ce 100644
--- a/drivers/thermal/tango_thermal.c
+++ b/drivers/thermal/tango_thermal.c
@@ -107,6 +107,7 @@ static const struct of_device_id tango_sensor_ids[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, tango_sensor_ids);
static struct platform_driver tango_thermal_driver = {
.probe = tango_thermal_probe,
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 226b0b4aced6..641faab6e24b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -5,22 +5,9 @@
* Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
* Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -64,6 +51,13 @@ static atomic_t in_suspend;
static struct thermal_governor *def_governor;
+/*
+ * Governor section: set of functions to handle thermal governors
+ *
+ * Functions to help in the life cycle of thermal governors within
+ * the thermal core and by the thermal governor code.
+ */
+
static struct thermal_governor *__find_governor(const char *name)
{
struct thermal_governor *pos;
@@ -142,11 +136,16 @@ int thermal_register_governor(struct thermal_governor *governor)
mutex_lock(&thermal_governor_lock);
err = -EBUSY;
- if (__find_governor(governor->name) == NULL) {
+ if (!__find_governor(governor->name)) {
+ bool match_default;
+
err = 0;
list_add(&governor->governor_list, &thermal_governor_list);
- if (!def_governor && !strncmp(governor->name,
- DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
+ match_default = !strncmp(governor->name,
+ DEFAULT_THERMAL_GOVERNOR,
+ THERMAL_NAME_LENGTH);
+
+ if (!def_governor && match_default)
def_governor = governor;
}
@@ -188,14 +187,14 @@ void thermal_unregister_governor(struct thermal_governor *governor)
mutex_lock(&thermal_governor_lock);
- if (__find_governor(governor->name) == NULL)
+ if (!__find_governor(governor->name))
goto exit;
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node) {
if (!strncasecmp(pos->governor->name, governor->name,
- THERMAL_NAME_LENGTH))
+ THERMAL_NAME_LENGTH))
thermal_set_governor(pos, NULL);
}
@@ -203,195 +202,92 @@ void thermal_unregister_governor(struct thermal_governor *governor)
list_del(&governor->governor_list);
exit:
mutex_unlock(&thermal_governor_lock);
- return;
}
-static int get_idr(struct idr *idr, struct mutex *lock, int *id)
+int thermal_zone_device_set_policy(struct thermal_zone_device *tz,
+ char *policy)
{
- int ret;
-
- if (lock)
- mutex_lock(lock);
- ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
- if (lock)
- mutex_unlock(lock);
- if (unlikely(ret < 0))
- return ret;
- *id = ret;
- return 0;
-}
-
-static void release_idr(struct idr *idr, struct mutex *lock, int id)
-{
- if (lock)
- mutex_lock(lock);
- idr_remove(idr, id);
- if (lock)
- mutex_unlock(lock);
-}
-
-int get_tz_trend(struct thermal_zone_device *tz, int trip)
-{
- enum thermal_trend trend;
-
- if (tz->emul_temperature || !tz->ops->get_trend ||
- tz->ops->get_trend(tz, trip, &trend)) {
- if (tz->temperature > tz->last_temperature)
- trend = THERMAL_TREND_RAISING;
- else if (tz->temperature < tz->last_temperature)
- trend = THERMAL_TREND_DROPPING;
- else
- trend = THERMAL_TREND_STABLE;
- }
-
- return trend;
-}
-EXPORT_SYMBOL(get_tz_trend);
-
-struct thermal_instance *get_thermal_instance(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int trip)
-{
- struct thermal_instance *pos = NULL;
- struct thermal_instance *target_instance = NULL;
+ struct thermal_governor *gov;
+ int ret = -EINVAL;
+ mutex_lock(&thermal_governor_lock);
mutex_lock(&tz->lock);
- mutex_lock(&cdev->lock);
- list_for_each_entry(pos, &tz->thermal_instances, tz_node) {
- if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
- target_instance = pos;
- break;
- }
- }
+ gov = __find_governor(strim(policy));
+ if (!gov)
+ goto exit;
- mutex_unlock(&cdev->lock);
- mutex_unlock(&tz->lock);
+ ret = thermal_set_governor(tz, gov);
- return target_instance;
-}
-EXPORT_SYMBOL(get_thermal_instance);
+exit:
+ mutex_unlock(&tz->lock);
+ mutex_unlock(&thermal_governor_lock);
-static void print_bind_err_msg(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int ret)
-{
- dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n",
- tz->type, cdev->type, ret);
+ return ret;
}
-static void __bind(struct thermal_zone_device *tz, int mask,
- struct thermal_cooling_device *cdev,
- unsigned long *limits,
- unsigned int weight)
+int thermal_build_list_of_policies(char *buf)
{
- int i, ret;
+ struct thermal_governor *pos;
+ ssize_t count = 0;
+ ssize_t size = PAGE_SIZE;
- for (i = 0; i < tz->trips; i++) {
- if (mask & (1 << i)) {
- unsigned long upper, lower;
+ mutex_lock(&thermal_governor_lock);
- upper = THERMAL_NO_LIMIT;
- lower = THERMAL_NO_LIMIT;
- if (limits) {
- lower = limits[i * 2];
- upper = limits[i * 2 + 1];
- }
- ret = thermal_zone_bind_cooling_device(tz, i, cdev,
- upper, lower,
- weight);
- if (ret)
- print_bind_err_msg(tz, cdev, ret);
- }
+ list_for_each_entry(pos, &thermal_governor_list, governor_list) {
+ size = PAGE_SIZE - count;
+ count += scnprintf(buf + count, size, "%s ", pos->name);
}
-}
+ count += scnprintf(buf + count, size, "\n");
-static void __unbind(struct thermal_zone_device *tz, int mask,
- struct thermal_cooling_device *cdev)
-{
- int i;
+ mutex_unlock(&thermal_governor_lock);
- for (i = 0; i < tz->trips; i++)
- if (mask & (1 << i))
- thermal_zone_unbind_cooling_device(tz, i, cdev);
+ return count;
}
-static void bind_cdev(struct thermal_cooling_device *cdev)
+static int __init thermal_register_governors(void)
{
- int i, ret;
- const struct thermal_zone_params *tzp;
- struct thermal_zone_device *pos = NULL;
-
- mutex_lock(&thermal_list_lock);
+ int result;
- list_for_each_entry(pos, &thermal_tz_list, node) {
- if (!pos->tzp && !pos->ops->bind)
- continue;
+ result = thermal_gov_step_wise_register();
+ if (result)
+ return result;
- if (pos->ops->bind) {
- ret = pos->ops->bind(pos, cdev);
- if (ret)
- print_bind_err_msg(pos, cdev, ret);
- continue;
- }
+ result = thermal_gov_fair_share_register();
+ if (result)
+ return result;
- tzp = pos->tzp;
- if (!tzp || !tzp->tbp)
- continue;
+ result = thermal_gov_bang_bang_register();
+ if (result)
+ return result;
- for (i = 0; i < tzp->num_tbps; i++) {
- if (tzp->tbp[i].cdev || !tzp->tbp[i].match)
- continue;
- if (tzp->tbp[i].match(pos, cdev))
- continue;
- tzp->tbp[i].cdev = cdev;
- __bind(pos, tzp->tbp[i].trip_mask, cdev,
- tzp->tbp[i].binding_limits,
- tzp->tbp[i].weight);
- }
- }
+ result = thermal_gov_user_space_register();
+ if (result)
+ return result;
- mutex_unlock(&thermal_list_lock);
+ return thermal_gov_power_allocator_register();
}
-static void bind_tz(struct thermal_zone_device *tz)
+static void thermal_unregister_governors(void)
{
- int i, ret;
- struct thermal_cooling_device *pos = NULL;
- const struct thermal_zone_params *tzp = tz->tzp;
-
- if (!tzp && !tz->ops->bind)
- return;
-
- mutex_lock(&thermal_list_lock);
-
- /* If there is ops->bind, try to use ops->bind */
- if (tz->ops->bind) {
- list_for_each_entry(pos, &thermal_cdev_list, node) {
- ret = tz->ops->bind(tz, pos);
- if (ret)
- print_bind_err_msg(tz, pos, ret);
- }
- goto exit;
- }
-
- if (!tzp || !tzp->tbp)
- goto exit;
-
- list_for_each_entry(pos, &thermal_cdev_list, node) {
- for (i = 0; i < tzp->num_tbps; i++) {
- if (tzp->tbp[i].cdev || !tzp->tbp[i].match)
- continue;
- if (tzp->tbp[i].match(tz, pos))
- continue;
- tzp->tbp[i].cdev = pos;
- __bind(tz, tzp->tbp[i].trip_mask, pos,
- tzp->tbp[i].binding_limits,
- tzp->tbp[i].weight);
- }
- }
-exit:
- mutex_unlock(&thermal_list_lock);
+ thermal_gov_step_wise_unregister();
+ thermal_gov_fair_share_unregister();
+ thermal_gov_bang_bang_unregister();
+ thermal_gov_user_space_unregister();
+ thermal_gov_power_allocator_unregister();
}
+/*
+ * Zone update section: main control loop applied to each zone while monitoring
+ *
+ * in polling mode. The monitoring is done using a workqueue.
+ * Same update may be done on a zone by calling thermal_zone_device_update().
+ *
+ * An update means:
+ * - Non-critical trips will invoke the governor responsible for that zone;
+ * - Hot trips will produce a notification to userspace;
+ * - Critical trip point will cause a system shutdown.
+ */
static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
int delay)
{
@@ -420,14 +316,15 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
}
static void handle_non_critical_trips(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type)
+ int trip,
+ enum thermal_trip_type trip_type)
{
tz->governor ? tz->governor->throttle(tz, trip) :
def_governor->throttle(tz, trip);
}
static void handle_critical_trips(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type)
+ int trip, enum thermal_trip_type trip_type)
{
int trip_temp;
@@ -471,105 +368,6 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
monitor_thermal_zone(tz);
}
-/**
- * thermal_zone_get_temp() - returns the temperature of a thermal zone
- * @tz: a valid pointer to a struct thermal_zone_device
- * @temp: a valid pointer to where to store the resulting temperature.
- *
- * When a valid thermal zone reference is passed, it will fetch its
- * temperature and fill @temp.
- *
- * Return: On success returns 0, an error code otherwise
- */
-int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
-{
- int ret = -EINVAL;
- int count;
- int crit_temp = INT_MAX;
- enum thermal_trip_type type;
-
- if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
- goto exit;
-
- mutex_lock(&tz->lock);
-
- ret = tz->ops->get_temp(tz, temp);
-
- if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
- for (count = 0; count < tz->trips; count++) {
- ret = tz->ops->get_trip_type(tz, count, &type);
- if (!ret && type == THERMAL_TRIP_CRITICAL) {
- ret = tz->ops->get_trip_temp(tz, count,
- &crit_temp);
- break;
- }
- }
-
- /*
- * Only allow emulating a temperature when the real temperature
- * is below the critical temperature so that the emulation code
- * cannot hide critical conditions.
- */
- if (!ret && *temp < crit_temp)
- *temp = tz->emul_temperature;
- }
-
- mutex_unlock(&tz->lock);
-exit:
- return ret;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
-
-void thermal_zone_set_trips(struct thermal_zone_device *tz)
-{
- int low = -INT_MAX;
- int high = INT_MAX;
- int trip_temp, hysteresis;
- int i, ret;
-
- mutex_lock(&tz->lock);
-
- if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
- goto exit;
-
- for (i = 0; i < tz->trips; i++) {
- int trip_low;
-
- tz->ops->get_trip_temp(tz, i, &trip_temp);
- tz->ops->get_trip_hyst(tz, i, &hysteresis);
-
- trip_low = trip_temp - hysteresis;
-
- if (trip_low < tz->temperature && trip_low > low)
- low = trip_low;
-
- if (trip_temp > tz->temperature && trip_temp < high)
- high = trip_temp;
- }
-
- /* No need to change trip points */
- if (tz->prev_low_trip == low && tz->prev_high_trip == high)
- goto exit;
-
- tz->prev_low_trip = low;
- tz->prev_high_trip = high;
-
- dev_dbg(&tz->device,
- "new temperature boundaries: %d < x < %d\n", low, high);
-
- /*
- * Set a temperature window. When this window is left the driver
- * must inform the thermal core via thermal_zone_device_update.
- */
- ret = tz->ops->set_trips(tz, low, high);
- if (ret)
- dev_err(&tz->device, "Failed to set trips: %d\n", ret);
-
-exit:
- mutex_unlock(&tz->lock);
-}
-EXPORT_SYMBOL_GPL(thermal_zone_set_trips);
-
static void update_temperature(struct thermal_zone_device *tz)
{
int temp, ret;
@@ -629,6 +427,24 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
+/**
+ * thermal_notify_framework - Sensor drivers use this API to notify framework
+ * @tz: thermal zone device
+ * @trip: indicates which trip point has been crossed
+ *
+ * This function handles the trip events from sensor drivers. It starts
+ * throttling the cooling devices according to the policy configured.
+ * For CRITICAL and HOT trip points, this notifies the respective drivers,
+ * and does actual throttling for other trip points i.e ACTIVE and PASSIVE.
+ * The throttling policy is based on the configured platform data; if no
+ * platform data is provided, this uses the step_wise throttling policy.
+ */
+void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
+{
+ handle_thermal_trip(tz, trip);
+}
+EXPORT_SYMBOL_GPL(thermal_notify_framework);
+
static void thermal_zone_device_check(struct work_struct *work)
{
struct thermal_zone_device *tz = container_of(work, struct
@@ -637,445 +453,12 @@ static void thermal_zone_device_check(struct work_struct *work)
thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
}
-/* sys I/F for thermal zone */
-
-#define to_thermal_zone(_dev) \
- container_of(_dev, struct thermal_zone_device, device)
-
-static ssize_t
-type_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
-
- return sprintf(buf, "%s\n", tz->type);
-}
-
-static ssize_t
-temp_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int temperature, ret;
-
- ret = thermal_zone_get_temp(tz, &temperature);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", temperature);
-}
-
-static ssize_t
-mode_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- enum thermal_device_mode mode;
- int result;
-
- if (!tz->ops->get_mode)
- return -EPERM;
-
- result = tz->ops->get_mode(tz, &mode);
- if (result)
- return result;
-
- return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled"
- : "disabled");
-}
-
-static ssize_t
-mode_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int result;
-
- if (!tz->ops->set_mode)
- return -EPERM;
-
- if (!strncmp(buf, "enabled", sizeof("enabled") - 1))
- result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED);
- else if (!strncmp(buf, "disabled", sizeof("disabled") - 1))
- result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED);
- else
- result = -EINVAL;
-
- if (result)
- return result;
-
- return count;
-}
-
-static ssize_t
-trip_point_type_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- enum thermal_trip_type type;
- int trip, result;
-
- if (!tz->ops->get_trip_type)
- return -EPERM;
-
- if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip))
- return -EINVAL;
-
- result = tz->ops->get_trip_type(tz, trip, &type);
- if (result)
- return result;
-
- switch (type) {
- case THERMAL_TRIP_CRITICAL:
- return sprintf(buf, "critical\n");
- case THERMAL_TRIP_HOT:
- return sprintf(buf, "hot\n");
- case THERMAL_TRIP_PASSIVE:
- return sprintf(buf, "passive\n");
- case THERMAL_TRIP_ACTIVE:
- return sprintf(buf, "active\n");
- default:
- return sprintf(buf, "unknown\n");
- }
-}
-
-static ssize_t
-trip_point_temp_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature;
-
- if (!tz->ops->set_trip_temp)
- return -EPERM;
-
- if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
- return -EINVAL;
-
- if (kstrtoint(buf, 10, &temperature))
- return -EINVAL;
-
- ret = tz->ops->set_trip_temp(tz, trip, temperature);
- if (ret)
- return ret;
-
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-
- return count;
-}
-
-static ssize_t
-trip_point_temp_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature;
-
- if (!tz->ops->get_trip_temp)
- return -EPERM;
-
- if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
- return -EINVAL;
-
- ret = tz->ops->get_trip_temp(tz, trip, &temperature);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", temperature);
-}
-
-static ssize_t
-trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature;
-
- if (!tz->ops->set_trip_hyst)
- return -EPERM;
-
- if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
- return -EINVAL;
-
- if (kstrtoint(buf, 10, &temperature))
- return -EINVAL;
-
- /*
- * We are not doing any check on the 'temperature' value
- * here. The driver implementing 'set_trip_hyst' has to
- * take care of this.
- */
- ret = tz->ops->set_trip_hyst(tz, trip, temperature);
-
- if (!ret)
- thermal_zone_set_trips(tz);
-
- return ret ? ret : count;
-}
-
-static ssize_t
-trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature;
-
- if (!tz->ops->get_trip_hyst)
- return -EPERM;
-
- if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
- return -EINVAL;
-
- ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
-
- return ret ? ret : sprintf(buf, "%d\n", temperature);
-}
-
-static ssize_t
-passive_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_cooling_device *cdev = NULL;
- int state;
-
- if (!sscanf(buf, "%d\n", &state))
- return -EINVAL;
-
- /* sanity check: values below 1000 millicelcius don't make sense
- * and can cause the system to go into a thermal heart attack
- */
- if (state && state < 1000)
- return -EINVAL;
-
- if (state && !tz->forced_passive) {
- mutex_lock(&thermal_list_lock);
- list_for_each_entry(cdev, &thermal_cdev_list, node) {
- if (!strncmp("Processor", cdev->type,
- sizeof("Processor")))
- thermal_zone_bind_cooling_device(tz,
- THERMAL_TRIPS_NONE, cdev,
- THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
- }
- mutex_unlock(&thermal_list_lock);
- if (!tz->passive_delay)
- tz->passive_delay = 1000;
- } else if (!state && tz->forced_passive) {
- mutex_lock(&thermal_list_lock);
- list_for_each_entry(cdev, &thermal_cdev_list, node) {
- if (!strncmp("Processor", cdev->type,
- sizeof("Processor")))
- thermal_zone_unbind_cooling_device(tz,
- THERMAL_TRIPS_NONE,
- cdev);
- }
- mutex_unlock(&thermal_list_lock);
- tz->passive_delay = 0;
- }
-
- tz->forced_passive = state;
-
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-
- return count;
-}
-
-static ssize_t
-passive_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
-
- return sprintf(buf, "%d\n", tz->forced_passive);
-}
-
-static ssize_t
-policy_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int ret = -EINVAL;
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_governor *gov;
- char name[THERMAL_NAME_LENGTH];
-
- snprintf(name, sizeof(name), "%s", buf);
-
- mutex_lock(&thermal_governor_lock);
- mutex_lock(&tz->lock);
-
- gov = __find_governor(strim(name));
- if (!gov)
- goto exit;
-
- ret = thermal_set_governor(tz, gov);
- if (!ret)
- ret = count;
-
-exit:
- mutex_unlock(&tz->lock);
- mutex_unlock(&thermal_governor_lock);
- return ret;
-}
-
-static ssize_t
-policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
-
- return sprintf(buf, "%s\n", tz->governor->name);
-}
-
-static ssize_t
-available_policies_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct thermal_governor *pos;
- ssize_t count = 0;
- ssize_t size = PAGE_SIZE;
-
- mutex_lock(&thermal_governor_lock);
-
- list_for_each_entry(pos, &thermal_governor_list, governor_list) {
- size = PAGE_SIZE - count;
- count += scnprintf(buf + count, size, "%s ", pos->name);
- }
- count += scnprintf(buf + count, size, "\n");
-
- mutex_unlock(&thermal_governor_lock);
-
- return count;
-}
-
-static ssize_t
-emul_temp_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int ret = 0;
- int temperature;
-
- if (kstrtoint(buf, 10, &temperature))
- return -EINVAL;
-
- if (!tz->ops->set_emul_temp) {
- mutex_lock(&tz->lock);
- tz->emul_temperature = temperature;
- mutex_unlock(&tz->lock);
- } else {
- ret = tz->ops->set_emul_temp(tz, temperature);
- }
-
- if (!ret)
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-
- return ret ? ret : count;
-}
-static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
-
-static ssize_t
-sustainable_power_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
-
- if (tz->tzp)
- return sprintf(buf, "%u\n", tz->tzp->sustainable_power);
- else
- return -EIO;
-}
-
-static ssize_t
-sustainable_power_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- u32 sustainable_power;
-
- if (!tz->tzp)
- return -EIO;
-
- if (kstrtou32(buf, 10, &sustainable_power))
- return -EINVAL;
-
- tz->tzp->sustainable_power = sustainable_power;
-
- return count;
-}
-static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
- sustainable_power_store);
-
-#define create_s32_tzp_attr(name) \
- static ssize_t \
- name##_show(struct device *dev, struct device_attribute *devattr, \
- char *buf) \
- { \
- struct thermal_zone_device *tz = to_thermal_zone(dev); \
- \
- if (tz->tzp) \
- return sprintf(buf, "%d\n", tz->tzp->name); \
- else \
- return -EIO; \
- } \
- \
- static ssize_t \
- name##_store(struct device *dev, struct device_attribute *devattr, \
- const char *buf, size_t count) \
- { \
- struct thermal_zone_device *tz = to_thermal_zone(dev); \
- s32 value; \
- \
- if (!tz->tzp) \
- return -EIO; \
- \
- if (kstrtos32(buf, 10, &value)) \
- return -EINVAL; \
- \
- tz->tzp->name = value; \
- \
- return count; \
- } \
- static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store)
-
-create_s32_tzp_attr(k_po);
-create_s32_tzp_attr(k_pu);
-create_s32_tzp_attr(k_i);
-create_s32_tzp_attr(k_d);
-create_s32_tzp_attr(integral_cutoff);
-create_s32_tzp_attr(slope);
-create_s32_tzp_attr(offset);
-#undef create_s32_tzp_attr
-
-static struct device_attribute *dev_tzp_attrs[] = {
- &dev_attr_sustainable_power,
- &dev_attr_k_po,
- &dev_attr_k_pu,
- &dev_attr_k_i,
- &dev_attr_k_d,
- &dev_attr_integral_cutoff,
- &dev_attr_slope,
- &dev_attr_offset,
-};
-
-static int create_tzp_attrs(struct device *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dev_tzp_attrs); i++) {
- int ret;
- struct device_attribute *dev_attr = dev_tzp_attrs[i];
-
- ret = device_create_file(dev, dev_attr);
- if (ret)
- return ret;
- }
-
- return 0;
-}
+/*
+ * Power actor section: interface to power actors to estimate power
+ *
+ * Set of functions used to interact to cooling devices that know
+ * how to estimate their devices power consumption.
+ */
/**
* power_actor_get_max_power() - get the maximum power that a cdev can consume
@@ -1127,12 +510,13 @@ int power_actor_get_min_power(struct thermal_cooling_device *cdev,
}
/**
- * power_actor_set_power() - limit the maximum power that a cooling device can consume
+ * power_actor_set_power() - limit the maximum power a cooling device consumes
* @cdev: pointer to &thermal_cooling_device
* @instance: thermal instance to update
* @power: the power in milliwatts
*
- * Set the cooling device to consume at most @power milliwatts.
+ * Set the cooling device to consume at most @power milliwatts. The limit is
+ * expected to be a cap at the maximum power consumption.
*
* Return: 0 on success, -EINVAL if the cooling device does not
* implement the power actor API or -E* for other failures.
@@ -1159,143 +543,75 @@ int power_actor_set_power(struct thermal_cooling_device *cdev,
return 0;
}
-static DEVICE_ATTR(type, 0444, type_show, NULL);
-static DEVICE_ATTR(temp, 0444, temp_show, NULL);
-static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
-static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
-static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
-static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL);
-
-/* sys I/F for cooling device */
-#define to_cooling_device(_dev) \
- container_of(_dev, struct thermal_cooling_device, device)
-
-static ssize_t
-thermal_cooling_device_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+void thermal_zone_device_rebind_exception(struct thermal_zone_device *tz,
+ const char *cdev_type, size_t size)
{
- struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct thermal_cooling_device *cdev = NULL;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(cdev, &thermal_cdev_list, node) {
+ /* skip non matching cdevs */
+ if (strncmp(cdev_type, cdev->type, size))
+ continue;
- return sprintf(buf, "%s\n", cdev->type);
+ /* re binding the exception matching the type pattern */
+ thermal_zone_bind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
+ }
+ mutex_unlock(&thermal_list_lock);
}
-static ssize_t
-thermal_cooling_device_max_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+void thermal_zone_device_unbind_exception(struct thermal_zone_device *tz,
+ const char *cdev_type, size_t size)
{
- struct thermal_cooling_device *cdev = to_cooling_device(dev);
- unsigned long state;
- int ret;
+ struct thermal_cooling_device *cdev = NULL;
- ret = cdev->ops->get_max_state(cdev, &state);
- if (ret)
- return ret;
- return sprintf(buf, "%ld\n", state);
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(cdev, &thermal_cdev_list, node) {
+ /* skip non matching cdevs */
+ if (strncmp(cdev_type, cdev->type, size))
+ continue;
+ /* unbinding the exception matching the type pattern */
+ thermal_zone_unbind_cooling_device(tz, THERMAL_TRIPS_NONE,
+ cdev);
+ }
+ mutex_unlock(&thermal_list_lock);
}
-static ssize_t
-thermal_cooling_device_cur_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+/*
+ * Device management section: cooling devices, zones devices, and binding
+ *
+ * Set of functions provided by the thermal core for:
+ * - cooling devices lifecycle: registration, unregistration,
+ * binding, and unbinding.
+ * - thermal zone devices lifecycle: registration, unregistration,
+ * binding, and unbinding.
+ */
+static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
- struct thermal_cooling_device *cdev = to_cooling_device(dev);
- unsigned long state;
int ret;
- ret = cdev->ops->get_cur_state(cdev, &state);
- if (ret)
+ if (lock)
+ mutex_lock(lock);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
+ if (lock)
+ mutex_unlock(lock);
+ if (unlikely(ret < 0))
return ret;
- return sprintf(buf, "%ld\n", state);
-}
-
-static ssize_t
-thermal_cooling_device_cur_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_cooling_device *cdev = to_cooling_device(dev);
- unsigned long state;
- int result;
-
- if (!sscanf(buf, "%ld\n", &state))
- return -EINVAL;
-
- if ((long)state < 0)
- return -EINVAL;
-
- result = cdev->ops->set_cur_state(cdev, state);
- if (result)
- return result;
- return count;
-}
-
-static struct device_attribute dev_attr_cdev_type =
-__ATTR(type, 0444, thermal_cooling_device_type_show, NULL);
-static DEVICE_ATTR(max_state, 0444,
- thermal_cooling_device_max_state_show, NULL);
-static DEVICE_ATTR(cur_state, 0644,
- thermal_cooling_device_cur_state_show,
- thermal_cooling_device_cur_state_store);
-
-static ssize_t
-thermal_cooling_device_trip_point_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct thermal_instance *instance;
-
- instance =
- container_of(attr, struct thermal_instance, attr);
-
- if (instance->trip == THERMAL_TRIPS_NONE)
- return sprintf(buf, "-1\n");
- else
- return sprintf(buf, "%d\n", instance->trip);
-}
-
-static struct attribute *cooling_device_attrs[] = {
- &dev_attr_cdev_type.attr,
- &dev_attr_max_state.attr,
- &dev_attr_cur_state.attr,
- NULL,
-};
-
-static const struct attribute_group cooling_device_attr_group = {
- .attrs = cooling_device_attrs,
-};
-
-static const struct attribute_group *cooling_device_attr_groups[] = {
- &cooling_device_attr_group,
- NULL,
-};
-
-static ssize_t
-thermal_cooling_device_weight_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct thermal_instance *instance;
-
- instance = container_of(attr, struct thermal_instance, weight_attr);
-
- return sprintf(buf, "%d\n", instance->weight);
+ *id = ret;
+ return 0;
}
-static ssize_t
-thermal_cooling_device_weight_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static void release_idr(struct idr *idr, struct mutex *lock, int id)
{
- struct thermal_instance *instance;
- int ret, weight;
-
- ret = kstrtoint(buf, 0, &weight);
- if (ret)
- return ret;
-
- instance = container_of(attr, struct thermal_instance, weight_attr);
- instance->weight = weight;
-
- return count;
+ if (lock)
+ mutex_lock(lock);
+ idr_remove(idr, id);
+ if (lock)
+ mutex_unlock(lock);
}
-/* Device management */
/**
* thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
@@ -1358,8 +674,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (lower > upper || upper > max_state)
return -EINVAL;
- dev =
- kzalloc(sizeof(struct thermal_instance), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->tz = tz;
@@ -1402,10 +717,10 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
- if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
- result = -EEXIST;
- break;
- }
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ result = -EEXIST;
+ break;
+ }
if (!result) {
list_add_tail(&dev->tz_node, &tz->thermal_instances);
list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
@@ -1485,8 +800,8 @@ static void thermal_release(struct device *dev)
sizeof("thermal_zone") - 1)) {
tz = to_thermal_zone(dev);
kfree(tz);
- } else if(!strncmp(dev_name(dev), "cooling_device",
- sizeof("cooling_device") - 1)){
+ } else if (!strncmp(dev_name(dev), "cooling_device",
+ sizeof("cooling_device") - 1)) {
cdev = to_cooling_device(dev);
kfree(cdev);
}
@@ -1497,6 +812,78 @@ static struct class thermal_class = {
.dev_release = thermal_release,
};
+static inline
+void print_bind_err_msg(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev, int ret)
+{
+ dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n",
+ tz->type, cdev->type, ret);
+}
+
+static void __bind(struct thermal_zone_device *tz, int mask,
+ struct thermal_cooling_device *cdev,
+ unsigned long *limits,
+ unsigned int weight)
+{
+ int i, ret;
+
+ for (i = 0; i < tz->trips; i++) {
+ if (mask & (1 << i)) {
+ unsigned long upper, lower;
+
+ upper = THERMAL_NO_LIMIT;
+ lower = THERMAL_NO_LIMIT;
+ if (limits) {
+ lower = limits[i * 2];
+ upper = limits[i * 2 + 1];
+ }
+ ret = thermal_zone_bind_cooling_device(tz, i, cdev,
+ upper, lower,
+ weight);
+ if (ret)
+ print_bind_err_msg(tz, cdev, ret);
+ }
+ }
+}
+
+static void bind_cdev(struct thermal_cooling_device *cdev)
+{
+ int i, ret;
+ const struct thermal_zone_params *tzp;
+ struct thermal_zone_device *pos = NULL;
+
+ mutex_lock(&thermal_list_lock);
+
+ list_for_each_entry(pos, &thermal_tz_list, node) {
+ if (!pos->tzp && !pos->ops->bind)
+ continue;
+
+ if (pos->ops->bind) {
+ ret = pos->ops->bind(pos, cdev);
+ if (ret)
+ print_bind_err_msg(pos, cdev, ret);
+ continue;
+ }
+
+ tzp = pos->tzp;
+ if (!tzp || !tzp->tbp)
+ continue;
+
+ for (i = 0; i < tzp->num_tbps; i++) {
+ if (tzp->tbp[i].cdev || !tzp->tbp[i].match)
+ continue;
+ if (tzp->tbp[i].match(pos, cdev))
+ continue;
+ tzp->tbp[i].cdev = cdev;
+ __bind(pos, tzp->tbp[i].trip_mask, cdev,
+ tzp->tbp[i].binding_limits,
+ tzp->tbp[i].weight);
+ }
+ }
+
+ mutex_unlock(&thermal_list_lock);
+}
+
/**
* __thermal_cooling_device_register() - register a new thermal cooling device
* @np: a pointer to a device tree node.
@@ -1529,7 +916,7 @@ __thermal_cooling_device_register(struct device_node *np,
!ops->set_cur_state)
return ERR_PTR(-EINVAL);
- cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL);
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return ERR_PTR(-ENOMEM);
@@ -1546,7 +933,7 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->ops = ops;
cdev->updated = false;
cdev->device.class = &thermal_class;
- cdev->device.groups = cooling_device_attr_groups;
+ thermal_cooling_device_setup_sysfs(cdev);
cdev->devdata = devdata;
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
result = device_register(&cdev->device);
@@ -1619,12 +1006,22 @@ thermal_of_cooling_device_register(struct device_node *np,
}
EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
+static void __unbind(struct thermal_zone_device *tz, int mask,
+ struct thermal_cooling_device *cdev)
+{
+ int i;
+
+ for (i = 0; i < tz->trips; i++)
+ if (mask & (1 << i))
+ thermal_zone_unbind_cooling_device(tz, i, cdev);
+}
+
/**
- * thermal_cooling_device_unregister - removes the registered thermal cooling device
+ * thermal_cooling_device_unregister - removes a thermal cooling device
* @cdev: the thermal cooling device to remove.
*
- * thermal_cooling_device_unregister() must be called when the device is no
- * longer needed.
+ * thermal_cooling_device_unregister() must be called when a registered
+ * thermal cooling device is no longer needed.
*/
void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
{
@@ -1638,8 +1035,8 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_cdev_list, node)
- if (pos == cdev)
- break;
+ if (pos == cdev)
+ break;
if (pos != cdev) {
/* thermal cooling device not found */
mutex_unlock(&thermal_list_lock);
@@ -1668,171 +1065,49 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
mutex_unlock(&thermal_list_lock);
- if (cdev->type[0])
- device_remove_file(&cdev->device, &dev_attr_cdev_type);
- device_remove_file(&cdev->device, &dev_attr_max_state);
- device_remove_file(&cdev->device, &dev_attr_cur_state);
-
release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
device_unregister(&cdev->device);
- return;
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
-void thermal_cdev_update(struct thermal_cooling_device *cdev)
+static void bind_tz(struct thermal_zone_device *tz)
{
- struct thermal_instance *instance;
- unsigned long target = 0;
+ int i, ret;
+ struct thermal_cooling_device *pos = NULL;
+ const struct thermal_zone_params *tzp = tz->tzp;
- mutex_lock(&cdev->lock);
- /* cooling device is updated*/
- if (cdev->updated) {
- mutex_unlock(&cdev->lock);
+ if (!tzp && !tz->ops->bind)
return;
- }
-
- /* Make sure cdev enters the deepest cooling state */
- list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
- dev_dbg(&cdev->device, "zone%d->target=%lu\n",
- instance->tz->id, instance->target);
- if (instance->target == THERMAL_NO_TARGET)
- continue;
- if (instance->target > target)
- target = instance->target;
- }
- cdev->ops->set_cur_state(cdev, target);
- cdev->updated = true;
- mutex_unlock(&cdev->lock);
- trace_cdev_update(cdev, target);
- dev_dbg(&cdev->device, "set to state %lu\n", target);
-}
-EXPORT_SYMBOL(thermal_cdev_update);
-/**
- * thermal_notify_framework - Sensor drivers use this API to notify framework
- * @tz: thermal zone device
- * @trip: indicates which trip point has been crossed
- *
- * This function handles the trip events from sensor drivers. It starts
- * throttling the cooling devices according to the policy configured.
- * For CRITICAL and HOT trip points, this notifies the respective drivers,
- * and does actual throttling for other trip points i.e ACTIVE and PASSIVE.
- * The throttling policy is based on the configured platform data; if no
- * platform data is provided, this uses the step_wise throttling policy.
- */
-void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
-{
- handle_thermal_trip(tz, trip);
-}
-EXPORT_SYMBOL_GPL(thermal_notify_framework);
-
-/**
- * create_trip_attrs() - create attributes for trip points
- * @tz: the thermal zone device
- * @mask: Writeable trip point bitmap.
- *
- * helper function to instantiate sysfs entries for every trip
- * point and its properties of a struct thermal_zone_device.
- *
- * Return: 0 on success, the proper error value otherwise.
- */
-static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
-{
- int indx;
- int size = sizeof(struct thermal_attr) * tz->trips;
-
- tz->trip_type_attrs = kzalloc(size, GFP_KERNEL);
- if (!tz->trip_type_attrs)
- return -ENOMEM;
-
- tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL);
- if (!tz->trip_temp_attrs) {
- kfree(tz->trip_type_attrs);
- return -ENOMEM;
- }
+ mutex_lock(&thermal_list_lock);
- if (tz->ops->get_trip_hyst) {
- tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL);
- if (!tz->trip_hyst_attrs) {
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- return -ENOMEM;
+ /* If there is ops->bind, try to use ops->bind */
+ if (tz->ops->bind) {
+ list_for_each_entry(pos, &thermal_cdev_list, node) {
+ ret = tz->ops->bind(tz, pos);
+ if (ret)
+ print_bind_err_msg(tz, pos, ret);
}
+ goto exit;
}
+ if (!tzp || !tzp->tbp)
+ goto exit;
- for (indx = 0; indx < tz->trips; indx++) {
- /* create trip type attribute */
- snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
- "trip_point_%d_type", indx);
-
- sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr);
- tz->trip_type_attrs[indx].attr.attr.name =
- tz->trip_type_attrs[indx].name;
- tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO;
- tz->trip_type_attrs[indx].attr.show = trip_point_type_show;
-
- device_create_file(&tz->device,
- &tz->trip_type_attrs[indx].attr);
-
- /* create trip temp attribute */
- snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH,
- "trip_point_%d_temp", indx);
-
- sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr);
- tz->trip_temp_attrs[indx].attr.attr.name =
- tz->trip_temp_attrs[indx].name;
- tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
- tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
- if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) &&
- mask & (1 << indx)) {
- tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
- tz->trip_temp_attrs[indx].attr.store =
- trip_point_temp_store;
- }
-
- device_create_file(&tz->device,
- &tz->trip_temp_attrs[indx].attr);
-
- /* create Optional trip hyst attribute */
- if (!tz->ops->get_trip_hyst)
- continue;
- snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH,
- "trip_point_%d_hyst", indx);
-
- sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr);
- tz->trip_hyst_attrs[indx].attr.attr.name =
- tz->trip_hyst_attrs[indx].name;
- tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
- tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
- if (tz->ops->set_trip_hyst) {
- tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
- tz->trip_hyst_attrs[indx].attr.store =
- trip_point_hyst_store;
+ list_for_each_entry(pos, &thermal_cdev_list, node) {
+ for (i = 0; i < tzp->num_tbps; i++) {
+ if (tzp->tbp[i].cdev || !tzp->tbp[i].match)
+ continue;
+ if (tzp->tbp[i].match(tz, pos))
+ continue;
+ tzp->tbp[i].cdev = pos;
+ __bind(tz, tzp->tbp[i].trip_mask, pos,
+ tzp->tbp[i].binding_limits,
+ tzp->tbp[i].weight);
}
-
- device_create_file(&tz->device,
- &tz->trip_hyst_attrs[indx].attr);
- }
- return 0;
-}
-
-static void remove_trip_attrs(struct thermal_zone_device *tz)
-{
- int indx;
-
- for (indx = 0; indx < tz->trips; indx++) {
- device_remove_file(&tz->device,
- &tz->trip_type_attrs[indx].attr);
- device_remove_file(&tz->device,
- &tz->trip_temp_attrs[indx].attr);
- if (tz->ops->get_trip_hyst)
- device_remove_file(&tz->device,
- &tz->trip_hyst_attrs[indx].attr);
}
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- kfree(tz->trip_hyst_attrs);
+exit:
+ mutex_unlock(&thermal_list_lock);
}
/**
@@ -1859,20 +1134,22 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
* in case of error, an ERR_PTR. Caller must check return value with
* IS_ERR*() helpers.
*/
-struct thermal_zone_device *thermal_zone_device_register(const char *type,
- int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+struct thermal_zone_device *
+thermal_zone_device_register(const char *type, int trips, int mask,
+ void *devdata, struct thermal_zone_device_ops *ops,
+ struct thermal_zone_params *tzp, int passive_delay,
+ int polling_delay)
{
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
int trip_temp;
int result;
int count;
- int passive = 0;
struct thermal_governor *governor;
+ if (!type || strlen(type) == 0)
+ return ERR_PTR(-EINVAL);
+
if (type && strlen(type) >= THERMAL_NAME_LENGTH)
return ERR_PTR(-EINVAL);
@@ -1885,7 +1162,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
return ERR_PTR(-EINVAL);
- tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
+ tz = kzalloc(sizeof(*tz), GFP_KERNEL);
if (!tz)
return ERR_PTR(-ENOMEM);
@@ -1898,7 +1175,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
return ERR_PTR(result);
}
- strlcpy(tz->type, type ? : "", sizeof(tz->type));
+ strlcpy(tz->type, type, sizeof(tz->type));
tz->ops = ops;
tz->tzp = tzp;
tz->device.class = &thermal_class;
@@ -1906,6 +1183,13 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
tz->trips = trips;
tz->passive_delay = passive_delay;
tz->polling_delay = polling_delay;
+
+ /* sys I/F */
+ /* Add nodes that are always present via .groups */
+ result = thermal_zone_create_device_groups(tz, mask);
+ if (result)
+ goto unregister;
+
/* A new thermal zone needs to be updated anyway. */
atomic_set(&tz->need_update, 1);
@@ -1917,32 +1201,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
return ERR_PTR(result);
}
- /* sys I/F */
- if (type) {
- result = device_create_file(&tz->device, &dev_attr_type);
- if (result)
- goto unregister;
- }
-
- result = device_create_file(&tz->device, &dev_attr_temp);
- if (result)
- goto unregister;
-
- if (ops->get_mode) {
- result = device_create_file(&tz->device, &dev_attr_mode);
- if (result)
- goto unregister;
- }
-
- result = create_trip_attrs(tz, mask);
- if (result)
- goto unregister;
-
for (count = 0; count < trips; count++) {
if (tz->ops->get_trip_type(tz, count, &trip_type))
set_bit(count, &tz->trips_disabled);
- if (trip_type == THERMAL_TRIP_PASSIVE)
- passive = 1;
if (tz->ops->get_trip_temp(tz, count, &trip_temp))
set_bit(count, &tz->trips_disabled);
/* Check for bogus trip points */
@@ -1950,33 +1211,6 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
set_bit(count, &tz->trips_disabled);
}
- if (!passive) {
- result = device_create_file(&tz->device, &dev_attr_passive);
- if (result)
- goto unregister;
- }
-
- if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) {
- result = device_create_file(&tz->device, &dev_attr_emul_temp);
- if (result)
- goto unregister;
- }
-
- /* Create policy attribute */
- result = device_create_file(&tz->device, &dev_attr_policy);
- if (result)
- goto unregister;
-
- /* Add thermal zone params */
- result = create_tzp_attrs(&tz->device);
- if (result)
- goto unregister;
-
- /* Create available_policies attribute */
- result = device_create_file(&tz->device, &dev_attr_available_policies);
- if (result)
- goto unregister;
-
/* Update 'this' zone's governor information */
mutex_lock(&thermal_governor_lock);
@@ -2006,7 +1240,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
/* Bind cooling devices for this zone */
bind_tz(tz);
- INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
thermal_zone_device_reset(tz);
/* Update the new thermal zone and mark it as already updated. */
@@ -2040,8 +1274,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node)
- if (pos == tz)
- break;
+ if (pos == tz)
+ break;
if (pos != tz) {
/* thermal zone device not found */
mutex_unlock(&thermal_list_lock);
@@ -2071,14 +1305,10 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, 0);
- if (tz->type[0])
- device_remove_file(&tz->device, &dev_attr_type);
- device_remove_file(&tz->device, &dev_attr_temp);
- if (tz->ops->get_mode)
- device_remove_file(&tz->device, &dev_attr_mode);
- device_remove_file(&tz->device, &dev_attr_policy);
- device_remove_file(&tz->device, &dev_attr_available_policies);
- remove_trip_attrs(tz);
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ kfree(tz->trip_hyst_attrs);
+ kfree(tz->trips_attribute_group.attrs);
thermal_set_governor(tz, NULL);
thermal_remove_hwmon_sysfs(tz);
@@ -2086,7 +1316,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
idr_destroy(&tz->idr);
mutex_destroy(&tz->lock);
device_unregister(&tz->device);
- return;
+ kfree(tz->device.groups);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
@@ -2128,43 +1358,13 @@ exit:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
-/**
- * thermal_zone_get_slope - return the slope attribute of the thermal zone
- * @tz: thermal zone device with the slope attribute
- *
- * Return: If the thermal zone device has a slope attribute, return it, else
- * return 1.
- */
-int thermal_zone_get_slope(struct thermal_zone_device *tz)
-{
- if (tz && tz->tzp)
- return tz->tzp->slope;
- return 1;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_get_slope);
-
-/**
- * thermal_zone_get_offset - return the offset attribute of the thermal zone
- * @tz: thermal zone device with the offset attribute
- *
- * Return: If the thermal zone device has a offset attribute, return it, else
- * return 0.
- */
-int thermal_zone_get_offset(struct thermal_zone_device *tz)
-{
- if (tz && tz->tzp)
- return tz->tzp->offset;
- return 0;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_get_offset);
-
#ifdef CONFIG_NET
static const struct genl_multicast_group thermal_event_mcgrps[] = {
{ .name = THERMAL_GENL_MCAST_GROUP_NAME, },
};
-static struct genl_family thermal_event_genl_family = {
- .id = GENL_ID_GENERATE,
+static struct genl_family thermal_event_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
.name = THERMAL_GENL_FAMILY_NAME,
.version = THERMAL_GENL_VERSION,
.maxattr = THERMAL_GENL_ATTR_MAX,
@@ -2173,7 +1373,7 @@ static struct genl_family thermal_event_genl_family = {
};
int thermal_generate_netlink_event(struct thermal_zone_device *tz,
- enum events event)
+ enum events event)
{
struct sk_buff *skb;
struct nlattr *attr;
@@ -2235,7 +1435,7 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
-static int genetlink_init(void)
+static int __init genetlink_init(void)
{
return genl_register_family(&thermal_event_genl_family);
}
@@ -2249,40 +1449,8 @@ static inline int genetlink_init(void) { return 0; }
static inline void genetlink_exit(void) {}
#endif /* !CONFIG_NET */
-static int __init thermal_register_governors(void)
-{
- int result;
-
- result = thermal_gov_step_wise_register();
- if (result)
- return result;
-
- result = thermal_gov_fair_share_register();
- if (result)
- return result;
-
- result = thermal_gov_bang_bang_register();
- if (result)
- return result;
-
- result = thermal_gov_user_space_register();
- if (result)
- return result;
-
- return thermal_gov_power_allocator_register();
-}
-
-static void thermal_unregister_governors(void)
-{
- thermal_gov_step_wise_unregister();
- thermal_gov_fair_share_unregister();
- thermal_gov_bang_bang_unregister();
- thermal_gov_user_space_unregister();
- thermal_gov_power_allocator_unregister();
-}
-
static int thermal_pm_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
+ unsigned long mode, void *_unused)
{
struct thermal_zone_device *tz;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 749d41abfbab..2412b3759e16 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -54,8 +54,34 @@ struct thermal_instance {
unsigned int weight; /* The weight of the cooling device */
};
+#define to_thermal_zone(_dev) \
+ container_of(_dev, struct thermal_zone_device, device)
+
+#define to_cooling_device(_dev) \
+ container_of(_dev, struct thermal_cooling_device, device)
+
int thermal_register_governor(struct thermal_governor *);
void thermal_unregister_governor(struct thermal_governor *);
+void thermal_zone_device_rebind_exception(struct thermal_zone_device *,
+ const char *, size_t);
+void thermal_zone_device_unbind_exception(struct thermal_zone_device *,
+ const char *, size_t);
+int thermal_zone_device_set_policy(struct thermal_zone_device *, char *);
+int thermal_build_list_of_policies(char *buf);
+
+/* sysfs I/F */
+int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
+void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *);
+/* used only at binding time */
+ssize_t
+thermal_cooling_device_trip_point_show(struct device *,
+ struct device_attribute *, char *);
+ssize_t thermal_cooling_device_weight_show(struct device *,
+ struct device_attribute *, char *);
+
+ssize_t thermal_cooling_device_weight_store(struct device *,
+ struct device_attribute *,
+ const char *, size_t);
#ifdef CONFIG_THERMAL_GOV_STEP_WISE
int thermal_gov_step_wise_register(void);
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
new file mode 100644
index 000000000000..8cdf75adcce1
--- /dev/null
+++ b/drivers/thermal/thermal_helpers.c
@@ -0,0 +1,226 @@
+/*
+ * thermal_helpers.c - helper functions to handle thermal devices
+ *
+ * Copyright (C) 2016 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Highly based on original thermal_core.c
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <trace/events/thermal.h>
+
+#include "thermal_core.h"
+
+int get_tz_trend(struct thermal_zone_device *tz, int trip)
+{
+ enum thermal_trend trend;
+
+ if (tz->emul_temperature || !tz->ops->get_trend ||
+ tz->ops->get_trend(tz, trip, &trend)) {
+ if (tz->temperature > tz->last_temperature)
+ trend = THERMAL_TREND_RAISING;
+ else if (tz->temperature < tz->last_temperature)
+ trend = THERMAL_TREND_DROPPING;
+ else
+ trend = THERMAL_TREND_STABLE;
+ }
+
+ return trend;
+}
+EXPORT_SYMBOL(get_tz_trend);
+
+struct thermal_instance *
+get_thermal_instance(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev, int trip)
+{
+ struct thermal_instance *pos = NULL;
+ struct thermal_instance *target_instance = NULL;
+
+ mutex_lock(&tz->lock);
+ mutex_lock(&cdev->lock);
+
+ list_for_each_entry(pos, &tz->thermal_instances, tz_node) {
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ target_instance = pos;
+ break;
+ }
+ }
+
+ mutex_unlock(&cdev->lock);
+ mutex_unlock(&tz->lock);
+
+ return target_instance;
+}
+EXPORT_SYMBOL(get_thermal_instance);
+
+/**
+ * thermal_zone_get_temp() - returns the temperature of a thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @temp: a valid pointer to where to store the resulting temperature.
+ *
+ * When a valid thermal zone reference is passed, it will fetch its
+ * temperature and fill @temp.
+ *
+ * Return: On success returns 0, an error code otherwise
+ */
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ int ret = -EINVAL;
+ int count;
+ int crit_temp = INT_MAX;
+ enum thermal_trip_type type;
+
+ if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
+ goto exit;
+
+ mutex_lock(&tz->lock);
+
+ ret = tz->ops->get_temp(tz, temp);
+
+ if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
+ for (count = 0; count < tz->trips; count++) {
+ ret = tz->ops->get_trip_type(tz, count, &type);
+ if (!ret && type == THERMAL_TRIP_CRITICAL) {
+ ret = tz->ops->get_trip_temp(tz, count,
+ &crit_temp);
+ break;
+ }
+ }
+
+ /*
+ * Only allow emulating a temperature when the real temperature
+ * is below the critical temperature so that the emulation code
+ * cannot hide critical conditions.
+ */
+ if (!ret && *temp < crit_temp)
+ *temp = tz->emul_temperature;
+ }
+
+ mutex_unlock(&tz->lock);
+exit:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
+
+void thermal_zone_set_trips(struct thermal_zone_device *tz)
+{
+ int low = -INT_MAX;
+ int high = INT_MAX;
+ int trip_temp, hysteresis;
+ int i, ret;
+
+ mutex_lock(&tz->lock);
+
+ if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
+ goto exit;
+
+ for (i = 0; i < tz->trips; i++) {
+ int trip_low;
+
+ tz->ops->get_trip_temp(tz, i, &trip_temp);
+ tz->ops->get_trip_hyst(tz, i, &hysteresis);
+
+ trip_low = trip_temp - hysteresis;
+
+ if (trip_low < tz->temperature && trip_low > low)
+ low = trip_low;
+
+ if (trip_temp > tz->temperature && trip_temp < high)
+ high = trip_temp;
+ }
+
+ /* No need to change trip points */
+ if (tz->prev_low_trip == low && tz->prev_high_trip == high)
+ goto exit;
+
+ tz->prev_low_trip = low;
+ tz->prev_high_trip = high;
+
+ dev_dbg(&tz->device,
+ "new temperature boundaries: %d < x < %d\n", low, high);
+
+ /*
+ * Set a temperature window. When this window is left the driver
+ * must inform the thermal core via thermal_zone_device_update.
+ */
+ ret = tz->ops->set_trips(tz, low, high);
+ if (ret)
+ dev_err(&tz->device, "Failed to set trips: %d\n", ret);
+
+exit:
+ mutex_unlock(&tz->lock);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_set_trips);
+
+void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{
+ struct thermal_instance *instance;
+ unsigned long target = 0;
+
+ mutex_lock(&cdev->lock);
+ /* cooling device is updated*/
+ if (cdev->updated) {
+ mutex_unlock(&cdev->lock);
+ return;
+ }
+
+ /* Make sure cdev enters the deepest cooling state */
+ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
+ dev_dbg(&cdev->device, "zone%d->target=%lu\n",
+ instance->tz->id, instance->target);
+ if (instance->target == THERMAL_NO_TARGET)
+ continue;
+ if (instance->target > target)
+ target = instance->target;
+ }
+ cdev->ops->set_cur_state(cdev, target);
+ cdev->updated = true;
+ mutex_unlock(&cdev->lock);
+ trace_cdev_update(cdev, target);
+ dev_dbg(&cdev->device, "set to state %lu\n", target);
+}
+EXPORT_SYMBOL(thermal_cdev_update);
+
+/**
+ * thermal_zone_get_slope - return the slope attribute of the thermal zone
+ * @tz: thermal zone device with the slope attribute
+ *
+ * Return: If the thermal zone device has a slope attribute, return it, else
+ * return 1.
+ */
+int thermal_zone_get_slope(struct thermal_zone_device *tz)
+{
+ if (tz && tz->tzp)
+ return tz->tzp->slope;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_slope);
+
+/**
+ * thermal_zone_get_offset - return the offset attribute of the thermal zone
+ * @tz: thermal zone device with the offset attribute
+ *
+ * Return: If the thermal zone device has a offset attribute, return it, else
+ * return 0.
+ */
+int thermal_zone_get_offset(struct thermal_zone_device *tz)
+{
+ if (tz && tz->tzp)
+ return tz->tzp->offset;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_offset);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index c41c7742903a..541af5946203 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -64,7 +64,7 @@ name_show(struct device *dev, struct device_attribute *attr, char *buf)
struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", hwmon->type);
}
-static DEVICE_ATTR(name, 0444, name_show, NULL);
+static DEVICE_ATTR_RO(name);
static ssize_t
temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
int temperature;
int ret;
- ret = tz->ops->get_trip_temp(tz, 0, &temperature);
+ ret = tz->ops->get_crit_temp(tz, &temperature);
if (ret)
return ret;
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
new file mode 100644
index 000000000000..a694de907a26
--- /dev/null
+++ b/drivers/thermal/thermal_sysfs.c
@@ -0,0 +1,771 @@
+/*
+ * thermal.c - sysfs interface of thermal devices
+ *
+ * Copyright (C) 2016 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Highly based on original thermal_core.c
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "thermal_core.h"
+
+/* sys I/F for thermal zone */
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ return sprintf(buf, "%s\n", tz->type);
+}
+
+static ssize_t
+temp_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int temperature, ret;
+
+ ret = thermal_zone_get_temp(tz, &temperature);
+
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", temperature);
+}
+
+static ssize_t
+mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ enum thermal_device_mode mode;
+ int result;
+
+ if (!tz->ops->get_mode)
+ return -EPERM;
+
+ result = tz->ops->get_mode(tz, &mode);
+ if (result)
+ return result;
+
+ return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled"
+ : "disabled");
+}
+
+static ssize_t
+mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int result;
+
+ if (!tz->ops->set_mode)
+ return -EPERM;
+
+ if (!strncmp(buf, "enabled", sizeof("enabled") - 1))
+ result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED);
+ else if (!strncmp(buf, "disabled", sizeof("disabled") - 1))
+ result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED);
+ else
+ result = -EINVAL;
+
+ if (result)
+ return result;
+
+ return count;
+}
+
+static ssize_t
+trip_point_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ enum thermal_trip_type type;
+ int trip, result;
+
+ if (!tz->ops->get_trip_type)
+ return -EPERM;
+
+ if (sscanf(attr->attr.name, "trip_point_%d_type", &trip) != 1)
+ return -EINVAL;
+
+ result = tz->ops->get_trip_type(tz, trip, &type);
+ if (result)
+ return result;
+
+ switch (type) {
+ case THERMAL_TRIP_CRITICAL:
+ return sprintf(buf, "critical\n");
+ case THERMAL_TRIP_HOT:
+ return sprintf(buf, "hot\n");
+ case THERMAL_TRIP_PASSIVE:
+ return sprintf(buf, "passive\n");
+ case THERMAL_TRIP_ACTIVE:
+ return sprintf(buf, "active\n");
+ default:
+ return sprintf(buf, "unknown\n");
+ }
+}
+
+static ssize_t
+trip_point_temp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip, ret;
+ int temperature;
+
+ if (!tz->ops->set_trip_temp)
+ return -EPERM;
+
+ if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1)
+ return -EINVAL;
+
+ if (kstrtoint(buf, 10, &temperature))
+ return -EINVAL;
+
+ ret = tz->ops->set_trip_temp(tz, trip, temperature);
+ if (ret)
+ return ret;
+
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ return count;
+}
+
+static ssize_t
+trip_point_temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip, ret;
+ int temperature;
+
+ if (!tz->ops->get_trip_temp)
+ return -EPERM;
+
+ if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1)
+ return -EINVAL;
+
+ ret = tz->ops->get_trip_temp(tz, trip, &temperature);
+
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", temperature);
+}
+
+static ssize_t
+trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip, ret;
+ int temperature;
+
+ if (!tz->ops->set_trip_hyst)
+ return -EPERM;
+
+ if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip) != 1)
+ return -EINVAL;
+
+ if (kstrtoint(buf, 10, &temperature))
+ return -EINVAL;
+
+ /*
+ * We are not doing any check on the 'temperature' value
+ * here. The driver implementing 'set_trip_hyst' has to
+ * take care of this.
+ */
+ ret = tz->ops->set_trip_hyst(tz, trip, temperature);
+
+ if (!ret)
+ thermal_zone_set_trips(tz);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip, ret;
+ int temperature;
+
+ if (!tz->ops->get_trip_hyst)
+ return -EPERM;
+
+ if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip) != 1)
+ return -EINVAL;
+
+ ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
+
+ return ret ? ret : sprintf(buf, "%d\n", temperature);
+}
+
+static ssize_t
+passive_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int state;
+
+ if (sscanf(buf, "%d\n", &state) != 1)
+ return -EINVAL;
+
+ /* sanity check: values below 1000 millicelcius don't make sense
+ * and can cause the system to go into a thermal heart attack
+ */
+ if (state && state < 1000)
+ return -EINVAL;
+
+ if (state && !tz->forced_passive) {
+ if (!tz->passive_delay)
+ tz->passive_delay = 1000;
+ thermal_zone_device_rebind_exception(tz, "Processor",
+ sizeof("Processor"));
+ } else if (!state && tz->forced_passive) {
+ tz->passive_delay = 0;
+ thermal_zone_device_unbind_exception(tz, "Processor",
+ sizeof("Processor"));
+ }
+
+ tz->forced_passive = state;
+
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ return count;
+}
+
+static ssize_t
+passive_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ return sprintf(buf, "%d\n", tz->forced_passive);
+}
+
+static ssize_t
+policy_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ char name[THERMAL_NAME_LENGTH];
+ int ret;
+
+ snprintf(name, sizeof(name), "%s", buf);
+
+ ret = thermal_zone_device_set_policy(tz, name);
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+
+static ssize_t
+policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ return sprintf(buf, "%s\n", tz->governor->name);
+}
+
+static ssize_t
+available_policies_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return thermal_build_list_of_policies(buf);
+}
+
+#if (IS_ENABLED(CONFIG_THERMAL_EMULATION))
+static ssize_t
+emul_temp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int ret = 0;
+ int temperature;
+
+ if (kstrtoint(buf, 10, &temperature))
+ return -EINVAL;
+
+ if (!tz->ops->set_emul_temp) {
+ mutex_lock(&tz->lock);
+ tz->emul_temperature = temperature;
+ mutex_unlock(&tz->lock);
+ } else {
+ ret = tz->ops->set_emul_temp(tz, temperature);
+ }
+
+ if (!ret)
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
+#endif
+
+static ssize_t
+sustainable_power_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (tz->tzp)
+ return sprintf(buf, "%u\n", tz->tzp->sustainable_power);
+ else
+ return -EIO;
+}
+
+static ssize_t
+sustainable_power_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ u32 sustainable_power;
+
+ if (!tz->tzp)
+ return -EIO;
+
+ if (kstrtou32(buf, 10, &sustainable_power))
+ return -EINVAL;
+
+ tz->tzp->sustainable_power = sustainable_power;
+
+ return count;
+}
+
+#define create_s32_tzp_attr(name) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *devattr, \
+ char *buf) \
+ { \
+ struct thermal_zone_device *tz = to_thermal_zone(dev); \
+ \
+ if (tz->tzp) \
+ return sprintf(buf, "%d\n", tz->tzp->name); \
+ else \
+ return -EIO; \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *devattr, \
+ const char *buf, size_t count) \
+ { \
+ struct thermal_zone_device *tz = to_thermal_zone(dev); \
+ s32 value; \
+ \
+ if (!tz->tzp) \
+ return -EIO; \
+ \
+ if (kstrtos32(buf, 10, &value)) \
+ return -EINVAL; \
+ \
+ tz->tzp->name = value; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store)
+
+create_s32_tzp_attr(k_po);
+create_s32_tzp_attr(k_pu);
+create_s32_tzp_attr(k_i);
+create_s32_tzp_attr(k_d);
+create_s32_tzp_attr(integral_cutoff);
+create_s32_tzp_attr(slope);
+create_s32_tzp_attr(offset);
+#undef create_s32_tzp_attr
+
+/*
+ * These are thermal zone device attributes that will always be present.
+ * All the attributes created for tzp (create_s32_tzp_attr) also are always
+ * present on the sysfs interface.
+ */
+static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR(temp, 0444, temp_show, NULL);
+static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
+static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL);
+static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
+ sustainable_power_store);
+
+/* These thermal zone device attributes are created based on conditions */
+static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
+static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
+
+/* These attributes are unconditionally added to a thermal zone */
+static struct attribute *thermal_zone_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_temp.attr,
+#if (IS_ENABLED(CONFIG_THERMAL_EMULATION))
+ &dev_attr_emul_temp.attr,
+#endif
+ &dev_attr_policy.attr,
+ &dev_attr_available_policies.attr,
+ &dev_attr_sustainable_power.attr,
+ &dev_attr_k_po.attr,
+ &dev_attr_k_pu.attr,
+ &dev_attr_k_i.attr,
+ &dev_attr_k_d.attr,
+ &dev_attr_integral_cutoff.attr,
+ &dev_attr_slope.attr,
+ &dev_attr_offset.attr,
+ NULL,
+};
+
+static struct attribute_group thermal_zone_attribute_group = {
+ .attrs = thermal_zone_dev_attrs,
+};
+
+/* We expose mode only if .get_mode is present */
+static struct attribute *thermal_zone_mode_attrs[] = {
+ &dev_attr_mode.attr,
+ NULL,
+};
+
+static umode_t thermal_zone_mode_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int attrno)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct thermal_zone_device *tz;
+
+ tz = container_of(dev, struct thermal_zone_device, device);
+
+ if (tz->ops->get_mode)
+ return attr->mode;
+
+ return 0;
+}
+
+static struct attribute_group thermal_zone_mode_attribute_group = {
+ .attrs = thermal_zone_mode_attrs,
+ .is_visible = thermal_zone_mode_is_visible,
+};
+
+/* We expose passive only if passive trips are present */
+static struct attribute *thermal_zone_passive_attrs[] = {
+ &dev_attr_passive.attr,
+ NULL,
+};
+
+static umode_t thermal_zone_passive_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int attrno)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct thermal_zone_device *tz;
+ enum thermal_trip_type trip_type;
+ int count, passive = 0;
+
+ tz = container_of(dev, struct thermal_zone_device, device);
+
+ for (count = 0; count < tz->trips && !passive; count++) {
+ tz->ops->get_trip_type(tz, count, &trip_type);
+
+ if (trip_type == THERMAL_TRIP_PASSIVE)
+ passive = 1;
+ }
+
+ if (!passive)
+ return attr->mode;
+
+ return 0;
+}
+
+static struct attribute_group thermal_zone_passive_attribute_group = {
+ .attrs = thermal_zone_passive_attrs,
+ .is_visible = thermal_zone_passive_is_visible,
+};
+
+static const struct attribute_group *thermal_zone_attribute_groups[] = {
+ &thermal_zone_attribute_group,
+ &thermal_zone_mode_attribute_group,
+ &thermal_zone_passive_attribute_group,
+ /* This is not NULL terminated as we create the group dynamically */
+};
+
+/**
+ * create_trip_attrs() - create attributes for trip points
+ * @tz: the thermal zone device
+ * @mask: Writeable trip point bitmap.
+ *
+ * helper function to instantiate sysfs entries for every trip
+ * point and its properties of a struct thermal_zone_device.
+ *
+ * Return: 0 on success, the proper error value otherwise.
+ */
+static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
+{
+ struct attribute **attrs;
+ int indx;
+
+ /* This function works only for zones with at least one trip */
+ if (tz->trips <= 0)
+ return -EINVAL;
+
+ tz->trip_type_attrs = kcalloc(tz->trips, sizeof(*tz->trip_type_attrs),
+ GFP_KERNEL);
+ if (!tz->trip_type_attrs)
+ return -ENOMEM;
+
+ tz->trip_temp_attrs = kcalloc(tz->trips, sizeof(*tz->trip_temp_attrs),
+ GFP_KERNEL);
+ if (!tz->trip_temp_attrs) {
+ kfree(tz->trip_type_attrs);
+ return -ENOMEM;
+ }
+
+ if (tz->ops->get_trip_hyst) {
+ tz->trip_hyst_attrs = kcalloc(tz->trips,
+ sizeof(*tz->trip_hyst_attrs),
+ GFP_KERNEL);
+ if (!tz->trip_hyst_attrs) {
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ return -ENOMEM;
+ }
+ }
+
+ attrs = kcalloc(tz->trips * 3 + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs) {
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ if (tz->ops->get_trip_hyst)
+ kfree(tz->trip_hyst_attrs);
+ return -ENOMEM;
+ }
+
+ for (indx = 0; indx < tz->trips; indx++) {
+ /* create trip type attribute */
+ snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_type", indx);
+
+ sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr);
+ tz->trip_type_attrs[indx].attr.attr.name =
+ tz->trip_type_attrs[indx].name;
+ tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO;
+ tz->trip_type_attrs[indx].attr.show = trip_point_type_show;
+ attrs[indx] = &tz->trip_type_attrs[indx].attr.attr;
+
+ /* create trip temp attribute */
+ snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_temp", indx);
+
+ sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr);
+ tz->trip_temp_attrs[indx].attr.attr.name =
+ tz->trip_temp_attrs[indx].name;
+ tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
+ tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
+ if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) &&
+ mask & (1 << indx)) {
+ tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
+ tz->trip_temp_attrs[indx].attr.store =
+ trip_point_temp_store;
+ }
+ attrs[indx + tz->trips] = &tz->trip_temp_attrs[indx].attr.attr;
+
+ /* create Optional trip hyst attribute */
+ if (!tz->ops->get_trip_hyst)
+ continue;
+ snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_hyst", indx);
+
+ sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr);
+ tz->trip_hyst_attrs[indx].attr.attr.name =
+ tz->trip_hyst_attrs[indx].name;
+ tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
+ tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
+ if (tz->ops->set_trip_hyst) {
+ tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
+ tz->trip_hyst_attrs[indx].attr.store =
+ trip_point_hyst_store;
+ }
+ attrs[indx + tz->trips * 2] =
+ &tz->trip_hyst_attrs[indx].attr.attr;
+ }
+ attrs[tz->trips * 3] = NULL;
+
+ tz->trips_attribute_group.attrs = attrs;
+
+ return 0;
+}
+
+int thermal_zone_create_device_groups(struct thermal_zone_device *tz,
+ int mask)
+{
+ const struct attribute_group **groups;
+ int i, size, result;
+
+ /* we need one extra for trips and the NULL to terminate the array */
+ size = ARRAY_SIZE(thermal_zone_attribute_groups) + 2;
+ /* This also takes care of API requirement to be NULL terminated */
+ groups = kcalloc(size, sizeof(*groups), GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < size - 2; i++)
+ groups[i] = thermal_zone_attribute_groups[i];
+
+ if (tz->trips) {
+ result = create_trip_attrs(tz, mask);
+ if (result) {
+ kfree(groups);
+
+ return result;
+ }
+
+ groups[size - 2] = &tz->trips_attribute_group;
+ }
+
+ tz->device.groups = groups;
+
+ return 0;
+}
+
+/* sys I/F for cooling device */
+static ssize_t
+thermal_cooling_device_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return sprintf(buf, "%s\n", cdev->type);
+}
+
+static ssize_t
+thermal_cooling_device_max_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ unsigned long state;
+ int ret;
+
+ ret = cdev->ops->get_max_state(cdev, &state);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%ld\n", state);
+}
+
+static ssize_t
+thermal_cooling_device_cur_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ unsigned long state;
+ int ret;
+
+ ret = cdev->ops->get_cur_state(cdev, &state);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%ld\n", state);
+}
+
+static ssize_t
+thermal_cooling_device_cur_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ unsigned long state;
+ int result;
+
+ if (sscanf(buf, "%ld\n", &state) != 1)
+ return -EINVAL;
+
+ if ((long)state < 0)
+ return -EINVAL;
+
+ result = cdev->ops->set_cur_state(cdev, state);
+ if (result)
+ return result;
+ return count;
+}
+
+static struct device_attribute dev_attr_cdev_type =
+__ATTR(type, 0444, thermal_cooling_device_type_show, NULL);
+static DEVICE_ATTR(max_state, 0444,
+ thermal_cooling_device_max_state_show, NULL);
+static DEVICE_ATTR(cur_state, 0644,
+ thermal_cooling_device_cur_state_show,
+ thermal_cooling_device_cur_state_store);
+
+static struct attribute *cooling_device_attrs[] = {
+ &dev_attr_cdev_type.attr,
+ &dev_attr_max_state.attr,
+ &dev_attr_cur_state.attr,
+ NULL,
+};
+
+static const struct attribute_group cooling_device_attr_group = {
+ .attrs = cooling_device_attrs,
+};
+
+static const struct attribute_group *cooling_device_attr_groups[] = {
+ &cooling_device_attr_group,
+ NULL,
+};
+
+void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *cdev)
+{
+ cdev->device.groups = cooling_device_attr_groups;
+}
+
+/* these helper will be used only at the time of bindig */
+ssize_t
+thermal_cooling_device_trip_point_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_instance *instance;
+
+ instance =
+ container_of(attr, struct thermal_instance, attr);
+
+ if (instance->trip == THERMAL_TRIPS_NONE)
+ return sprintf(buf, "-1\n");
+ else
+ return sprintf(buf, "%d\n", instance->trip);
+}
+
+ssize_t
+thermal_cooling_device_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_instance *instance;
+
+ instance = container_of(attr, struct thermal_instance, weight_attr);
+
+ return sprintf(buf, "%d\n", instance->weight);
+}
+
+ssize_t
+thermal_cooling_device_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_instance *instance;
+ int ret, weight;
+
+ ret = kstrtoint(buf, 0, &weight);
+ if (ret)
+ return ret;
+
+ instance = container_of(attr, struct thermal_instance, weight_attr);
+ instance->weight = weight;
+
+ return count;
+}
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 06ea9766a70a..ba9c302454fb 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1298,7 +1298,7 @@ int ti_bandgap_probe(struct platform_device *pdev)
if (IS_ERR(bgp->div_clk)) {
dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n");
ret = PTR_ERR(bgp->div_clk);
- goto free_irqs;
+ goto put_fclock;
}
for (i = 0; i < bgp->conf->sensor_count; i++) {
@@ -1430,8 +1430,9 @@ disable_clk:
if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
clk_disable_unprepare(bgp->fclock);
put_clks:
- clk_put(bgp->fclock);
clk_put(bgp->div_clk);
+put_fclock:
+ clk_put(bgp->fclock);
free_irqs:
if (TI_BANDGAP_HAS(bgp, TSHUT)) {
free_irq(gpio_to_irq(bgp->tshut_gpio), NULL);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 95f4c1bcdb4c..d93eee2f101b 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -54,37 +54,33 @@ MODULE_PARM_DESC(notify_delay_ms,
* is some wrong values returned by cpuid for number of thresholds.
*/
#define MAX_NUMBER_OF_TRIPS 2
-/* Limit number of package temp zones */
-#define MAX_PKG_TEMP_ZONE_IDS 256
-
-struct phy_dev_entry {
- struct list_head list;
- u16 phys_proc_id;
- u16 first_cpu;
- u32 tj_max;
- int ref_cnt;
- u32 start_pkg_therm_low;
- u32 start_pkg_therm_high;
- struct thermal_zone_device *tzone;
+
+struct pkg_device {
+ int cpu;
+ bool work_scheduled;
+ u32 tj_max;
+ u32 msr_pkg_therm_low;
+ u32 msr_pkg_therm_high;
+ struct delayed_work work;
+ struct thermal_zone_device *tzone;
+ struct cpumask cpumask;
};
static struct thermal_zone_params pkg_temp_tz_params = {
.no_hwmon = true,
};
-/* List maintaining number of package instances */
-static LIST_HEAD(phy_dev_list);
-static DEFINE_MUTEX(phy_dev_list_mutex);
-
-/* Interrupt to work function schedule queue */
-static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work);
+/* Keep track of how many package pointers we allocated in init() */
+static int max_packages __read_mostly;
+/* Array of package pointers */
+static struct pkg_device **packages;
+/* Serializes interrupt notification, work and hotplug */
+static DEFINE_SPINLOCK(pkg_temp_lock);
+/* Protects zone operation in the work function against hotplug removal */
+static DEFINE_MUTEX(thermal_zone_mutex);
-/* To track if the work is already scheduled on a package */
-static u8 *pkg_work_scheduled;
-
-/* Spin lock to prevent races with pkg_work_scheduled */
-static spinlock_t pkg_work_lock;
-static u16 max_phy_id;
+/* The dynamically assigned cpu hotplug state for module_exit() */
+static enum cpuhp_state pkg_thermal_hp_state __read_mostly;
/* Debug counters to show using debugfs */
static struct dentry *debugfs;
@@ -116,22 +112,20 @@ err_out:
return -ENOENT;
}
-static struct phy_dev_entry
- *pkg_temp_thermal_get_phy_entry(unsigned int cpu)
+/*
+ * Protection:
+ *
+ * - cpu hotplug: Read serialized by cpu hotplug lock
+ * Write must hold pkg_temp_lock
+ *
+ * - Other callsites: Must hold pkg_temp_lock
+ */
+static struct pkg_device *pkg_temp_thermal_get_dev(unsigned int cpu)
{
- u16 phys_proc_id = topology_physical_package_id(cpu);
- struct phy_dev_entry *phy_ptr;
-
- mutex_lock(&phy_dev_list_mutex);
-
- list_for_each_entry(phy_ptr, &phy_dev_list, list)
- if (phy_ptr->phys_proc_id == phys_proc_id) {
- mutex_unlock(&phy_dev_list_mutex);
- return phy_ptr;
- }
-
- mutex_unlock(&phy_dev_list_mutex);
+ int pkgid = topology_logical_package_id(cpu);
+ if (pkgid >= 0 && pkgid < max_packages)
+ return packages[pkgid];
return NULL;
}
@@ -141,61 +135,44 @@ static struct phy_dev_entry
*/
static int get_tj_max(int cpu, u32 *tj_max)
{
- u32 eax, edx;
- u32 val;
+ u32 eax, edx, val;
int err;
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err)
- goto err_ret;
- else {
- val = (eax >> 16) & 0xff;
- if (val)
- *tj_max = val * 1000;
- else {
- err = -EINVAL;
- goto err_ret;
- }
- }
+ return err;
- return 0;
-err_ret:
- *tj_max = 0;
- return err;
+ val = (eax >> 16) & 0xff;
+ *tj_max = val * 1000;
+
+ return val ? 0 : -EINVAL;
}
static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
+ struct pkg_device *pkgdev = tzd->devdata;
u32 eax, edx;
- struct phy_dev_entry *phy_dev_entry;
- phy_dev_entry = tzd->devdata;
- rdmsr_on_cpu(phy_dev_entry->first_cpu, MSR_IA32_PACKAGE_THERM_STATUS,
- &eax, &edx);
+ rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, &eax, &edx);
if (eax & 0x80000000) {
- *temp = phy_dev_entry->tj_max -
- ((eax >> 16) & 0x7f) * 1000;
+ *temp = pkgdev->tj_max - ((eax >> 16) & 0x7f) * 1000;
pr_debug("sys_get_curr_temp %d\n", *temp);
return 0;
}
-
return -EINVAL;
}
static int sys_get_trip_temp(struct thermal_zone_device *tzd,
- int trip, int *temp)
+ int trip, int *temp)
{
- u32 eax, edx;
- struct phy_dev_entry *phy_dev_entry;
- u32 mask, shift;
+ struct pkg_device *pkgdev = tzd->devdata;
unsigned long thres_reg_value;
+ u32 mask, shift, eax, edx;
int ret;
if (trip >= MAX_NUMBER_OF_TRIPS)
return -EINVAL;
- phy_dev_entry = tzd->devdata;
-
if (trip) {
mask = THERM_MASK_THRESHOLD1;
shift = THERM_SHIFT_THRESHOLD1;
@@ -204,14 +181,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
shift = THERM_SHIFT_THRESHOLD0;
}
- ret = rdmsr_on_cpu(phy_dev_entry->first_cpu,
- MSR_IA32_PACKAGE_THERM_INTERRUPT, &eax, &edx);
+ ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ &eax, &edx);
if (ret < 0)
- return -EINVAL;
+ return ret;
thres_reg_value = (eax & mask) >> shift;
if (thres_reg_value)
- *temp = phy_dev_entry->tj_max - thres_reg_value * 1000;
+ *temp = pkgdev->tj_max - thres_reg_value * 1000;
else
*temp = 0;
pr_debug("sys_get_trip_temp %d\n", *temp);
@@ -219,24 +196,20 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
return 0;
}
-static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
- int temp)
+static int
+sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
{
- u32 l, h;
- struct phy_dev_entry *phy_dev_entry;
- u32 mask, shift, intr;
+ struct pkg_device *pkgdev = tzd->devdata;
+ u32 l, h, mask, shift, intr;
int ret;
- phy_dev_entry = tzd->devdata;
-
- if (trip >= MAX_NUMBER_OF_TRIPS || temp >= phy_dev_entry->tj_max)
+ if (trip >= MAX_NUMBER_OF_TRIPS || temp >= pkgdev->tj_max)
return -EINVAL;
- ret = rdmsr_on_cpu(phy_dev_entry->first_cpu,
- MSR_IA32_PACKAGE_THERM_INTERRUPT,
- &l, &h);
+ ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ &l, &h);
if (ret < 0)
- return -EINVAL;
+ return ret;
if (trip) {
mask = THERM_MASK_THRESHOLD1;
@@ -252,24 +225,20 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
* When users space sets a trip temperature == 0, which is indication
* that, it is no longer interested in receiving notifications.
*/
- if (!temp)
+ if (!temp) {
l &= ~intr;
- else {
- l |= (phy_dev_entry->tj_max - temp)/1000 << shift;
+ } else {
+ l |= (pkgdev->tj_max - temp)/1000 << shift;
l |= intr;
}
- return wrmsr_on_cpu(phy_dev_entry->first_cpu,
- MSR_IA32_PACKAGE_THERM_INTERRUPT,
- l, h);
+ return wrmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
}
-static int sys_get_trip_type(struct thermal_zone_device *thermal,
- int trip, enum thermal_trip_type *type)
+static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip,
+ enum thermal_trip_type *type)
{
-
*type = THERMAL_TRIP_PASSIVE;
-
return 0;
}
@@ -281,7 +250,7 @@ static struct thermal_zone_device_ops tzone_ops = {
.set_trip_temp = sys_set_trip_temp,
};
-static bool pkg_temp_thermal_platform_thermal_rate_control(void)
+static bool pkg_thermal_rate_control(void)
{
return true;
}
@@ -289,8 +258,8 @@ static bool pkg_temp_thermal_platform_thermal_rate_control(void)
/* Enable threshold interrupt on local package/cpu */
static inline void enable_pkg_thres_interrupt(void)
{
- u32 l, h;
u8 thres_0, thres_1;
+ u32 l, h;
rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
/* only enable/disable if it had valid threshold value */
@@ -307,271 +276,232 @@ static inline void enable_pkg_thres_interrupt(void)
static inline void disable_pkg_thres_interrupt(void)
{
u32 l, h;
+
rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
- wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
- l & (~THERM_INT_THRESHOLD0_ENABLE) &
- (~THERM_INT_THRESHOLD1_ENABLE), h);
+
+ l &= ~(THERM_INT_THRESHOLD0_ENABLE | THERM_INT_THRESHOLD1_ENABLE);
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
}
static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
{
- __u64 msr_val;
+ struct thermal_zone_device *tzone = NULL;
int cpu = smp_processor_id();
- int phy_id = topology_physical_package_id(cpu);
- struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
- bool notify = false;
- unsigned long flags;
-
- if (!phdev)
- return;
+ struct pkg_device *pkgdev;
+ u64 msr_val, wr_val;
- spin_lock_irqsave(&pkg_work_lock, flags);
+ mutex_lock(&thermal_zone_mutex);
+ spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt;
- if (unlikely(phy_id > max_phy_id)) {
- spin_unlock_irqrestore(&pkg_work_lock, flags);
+
+ pkgdev = pkg_temp_thermal_get_dev(cpu);
+ if (!pkgdev) {
+ spin_unlock_irq(&pkg_temp_lock);
+ mutex_unlock(&thermal_zone_mutex);
return;
}
- pkg_work_scheduled[phy_id] = 0;
- spin_unlock_irqrestore(&pkg_work_lock, flags);
+ pkgdev->work_scheduled = false;
- enable_pkg_thres_interrupt();
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
- if (msr_val & THERM_LOG_THRESHOLD0) {
- wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS,
- msr_val & ~THERM_LOG_THRESHOLD0);
- notify = true;
- }
- if (msr_val & THERM_LOG_THRESHOLD1) {
- wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS,
- msr_val & ~THERM_LOG_THRESHOLD1);
- notify = true;
- }
- if (notify) {
- pr_debug("thermal_zone_device_update\n");
- thermal_zone_device_update(phdev->tzone,
- THERMAL_EVENT_UNSPECIFIED);
+ wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1);
+ if (wr_val != msr_val) {
+ wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val);
+ tzone = pkgdev->tzone;
}
+
+ enable_pkg_thres_interrupt();
+ spin_unlock_irq(&pkg_temp_lock);
+
+ /*
+ * If tzone is not NULL, then thermal_zone_mutex will prevent the
+ * concurrent removal in the cpu offline callback.
+ */
+ if (tzone)
+ thermal_zone_device_update(tzone, THERMAL_EVENT_UNSPECIFIED);
+
+ mutex_unlock(&thermal_zone_mutex);
}
-static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
+static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
+{
+ unsigned long ms = msecs_to_jiffies(notify_delay_ms);
+
+ schedule_delayed_work_on(cpu, work, ms);
+}
+
+static int pkg_thermal_notify(u64 msr_val)
{
- unsigned long flags;
int cpu = smp_processor_id();
- int phy_id = topology_physical_package_id(cpu);
+ struct pkg_device *pkgdev;
+ unsigned long flags;
- /*
- * When a package is in interrupted state, all CPU's in that package
- * are in the same interrupt state. So scheduling on any one CPU in
- * the package is enough and simply return for others.
- */
- spin_lock_irqsave(&pkg_work_lock, flags);
+ spin_lock_irqsave(&pkg_temp_lock, flags);
++pkg_interrupt_cnt;
- if (unlikely(phy_id > max_phy_id) || unlikely(!pkg_work_scheduled) ||
- pkg_work_scheduled[phy_id]) {
- disable_pkg_thres_interrupt();
- spin_unlock_irqrestore(&pkg_work_lock, flags);
- return -EINVAL;
- }
- pkg_work_scheduled[phy_id] = 1;
- spin_unlock_irqrestore(&pkg_work_lock, flags);
disable_pkg_thres_interrupt();
- schedule_delayed_work_on(cpu,
- &per_cpu(pkg_temp_thermal_threshold_work, cpu),
- msecs_to_jiffies(notify_delay_ms));
- return 0;
-}
-static int find_siblings_cpu(int cpu)
-{
- int i;
- int id = topology_physical_package_id(cpu);
-
- for_each_online_cpu(i)
- if (i != cpu && topology_physical_package_id(i) == id)
- return i;
+ /* Work is per package, so scheduling it once is enough. */
+ pkgdev = pkg_temp_thermal_get_dev(cpu);
+ if (pkgdev && !pkgdev->work_scheduled) {
+ pkgdev->work_scheduled = true;
+ pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
+ }
+ spin_unlock_irqrestore(&pkg_temp_lock, flags);
return 0;
}
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
- int err;
- u32 tj_max;
- struct phy_dev_entry *phy_dev_entry;
- int thres_count;
- u32 eax, ebx, ecx, edx;
- u8 *temp;
- unsigned long flags;
+ int pkgid = topology_logical_package_id(cpu);
+ u32 tj_max, eax, ebx, ecx, edx;
+ struct pkg_device *pkgdev;
+ int thres_count, err;
+
+ if (pkgid >= max_packages)
+ return -ENOMEM;
cpuid(6, &eax, &ebx, &ecx, &edx);
thres_count = ebx & 0x07;
if (!thres_count)
return -ENODEV;
- if (topology_physical_package_id(cpu) > MAX_PKG_TEMP_ZONE_IDS)
- return -ENODEV;
-
thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS);
err = get_tj_max(cpu, &tj_max);
if (err)
- goto err_ret;
-
- mutex_lock(&phy_dev_list_mutex);
+ return err;
- phy_dev_entry = kzalloc(sizeof(*phy_dev_entry), GFP_KERNEL);
- if (!phy_dev_entry) {
- err = -ENOMEM;
- goto err_ret_unlock;
- }
+ pkgdev = kzalloc(sizeof(*pkgdev), GFP_KERNEL);
+ if (!pkgdev)
+ return -ENOMEM;
- spin_lock_irqsave(&pkg_work_lock, flags);
- if (topology_physical_package_id(cpu) > max_phy_id)
- max_phy_id = topology_physical_package_id(cpu);
- temp = krealloc(pkg_work_scheduled,
- (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
- if (!temp) {
- spin_unlock_irqrestore(&pkg_work_lock, flags);
- err = -ENOMEM;
- goto err_ret_free;
- }
- pkg_work_scheduled = temp;
- pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
- spin_unlock_irqrestore(&pkg_work_lock, flags);
-
- phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
- phy_dev_entry->first_cpu = cpu;
- phy_dev_entry->tj_max = tj_max;
- phy_dev_entry->ref_cnt = 1;
- phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
+ INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn);
+ pkgdev->cpu = cpu;
+ pkgdev->tj_max = tj_max;
+ pkgdev->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
- (thres_count == MAX_NUMBER_OF_TRIPS) ?
- 0x03 : 0x01,
- phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
- if (IS_ERR(phy_dev_entry->tzone)) {
- err = PTR_ERR(phy_dev_entry->tzone);
- goto err_ret_free;
+ (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01,
+ pkgdev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
+ if (IS_ERR(pkgdev->tzone)) {
+ err = PTR_ERR(pkgdev->tzone);
+ kfree(pkgdev);
+ return err;
}
/* Store MSR value for package thermal interrupt, to restore at exit */
- rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
- &phy_dev_entry->start_pkg_therm_low,
- &phy_dev_entry->start_pkg_therm_high);
-
- list_add_tail(&phy_dev_entry->list, &phy_dev_list);
- pr_debug("pkg_temp_thermal_device_add :phy_id %d cpu %d\n",
- phy_dev_entry->phys_proc_id, cpu);
-
- mutex_unlock(&phy_dev_list_mutex);
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, pkgdev->msr_pkg_therm_low,
+ pkgdev->msr_pkg_therm_high);
+ cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ spin_lock_irq(&pkg_temp_lock);
+ packages[pkgid] = pkgdev;
+ spin_unlock_irq(&pkg_temp_lock);
return 0;
-
-err_ret_free:
- kfree(phy_dev_entry);
-err_ret_unlock:
- mutex_unlock(&phy_dev_list_mutex);
-
-err_ret:
- return err;
}
-static int pkg_temp_thermal_device_remove(unsigned int cpu)
+static int pkg_thermal_cpu_offline(unsigned int cpu)
{
- struct phy_dev_entry *n;
- u16 phys_proc_id = topology_physical_package_id(cpu);
- struct phy_dev_entry *phdev =
- pkg_temp_thermal_get_phy_entry(cpu);
+ struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ bool lastcpu, was_target;
+ int target;
- if (!phdev)
- return -ENODEV;
+ if (!pkgdev)
+ return 0;
- mutex_lock(&phy_dev_list_mutex);
- /* If we are loosing the first cpu for this package, we need change */
- if (phdev->first_cpu == cpu) {
- phdev->first_cpu = find_siblings_cpu(cpu);
- pr_debug("thermal_device_remove: first cpu switched %d\n",
- phdev->first_cpu);
+ target = cpumask_any_but(&pkgdev->cpumask, cpu);
+ cpumask_clear_cpu(cpu, &pkgdev->cpumask);
+ lastcpu = target >= nr_cpu_ids;
+ /*
+ * Remove the sysfs files, if this is the last cpu in the package
+ * before doing further cleanups.
+ */
+ if (lastcpu) {
+ struct thermal_zone_device *tzone = pkgdev->tzone;
+
+ /*
+ * We must protect against a work function calling
+ * thermal_zone_update, after/while unregister. We null out
+ * the pointer under the zone mutex, so the worker function
+ * won't try to call.
+ */
+ mutex_lock(&thermal_zone_mutex);
+ pkgdev->tzone = NULL;
+ mutex_unlock(&thermal_zone_mutex);
+
+ thermal_zone_device_unregister(tzone);
}
+
+ /* Protect against work and interrupts */
+ spin_lock_irq(&pkg_temp_lock);
+
/*
- * It is possible that no siblings left as this was the last cpu
- * going offline. We don't need to worry about this assignment
- * as the phydev entry will be removed in this case and
- * thermal zone is removed.
- */
- --phdev->ref_cnt;
- pr_debug("thermal_device_remove: pkg: %d cpu %d ref_cnt %d\n",
- phys_proc_id, cpu, phdev->ref_cnt);
- if (!phdev->ref_cnt)
- list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
- if (phdev->phys_proc_id == phys_proc_id) {
- thermal_zone_device_unregister(phdev->tzone);
- list_del(&phdev->list);
- kfree(phdev);
- break;
- }
- }
- mutex_unlock(&phy_dev_list_mutex);
+ * Check whether this cpu was the current target and store the new
+ * one. When we drop the lock, then the interrupt notify function
+ * will see the new target.
+ */
+ was_target = pkgdev->cpu == cpu;
+ pkgdev->cpu = target;
- return 0;
-}
+ /*
+ * If this is the last CPU in the package remove the package
+ * reference from the array and restore the interrupt MSR. When we
+ * drop the lock neither the interrupt notify function nor the
+ * worker will see the package anymore.
+ */
+ if (lastcpu) {
+ packages[topology_logical_package_id(cpu)] = NULL;
+ /* After this point nothing touches the MSR anymore. */
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ pkgdev->msr_pkg_therm_low, pkgdev->msr_pkg_therm_high);
+ }
-static int get_core_online(unsigned int cpu)
-{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
-
- /* Check if there is already an instance for this package */
- if (!phdev) {
- if (!cpu_has(c, X86_FEATURE_DTHERM) ||
- !cpu_has(c, X86_FEATURE_PTS))
- return -ENODEV;
- if (pkg_temp_thermal_device_add(cpu))
- return -ENODEV;
- } else {
- mutex_lock(&phy_dev_list_mutex);
- ++phdev->ref_cnt;
- pr_debug("get_core_online: cpu %d ref_cnt %d\n",
- cpu, phdev->ref_cnt);
- mutex_unlock(&phy_dev_list_mutex);
+ /*
+ * Check whether there is work scheduled and whether the work is
+ * targeted at the outgoing CPU.
+ */
+ if (pkgdev->work_scheduled && was_target) {
+ /*
+ * To cancel the work we need to drop the lock, otherwise
+ * we might deadlock if the work needs to be flushed.
+ */
+ spin_unlock_irq(&pkg_temp_lock);
+ cancel_delayed_work_sync(&pkgdev->work);
+ spin_lock_irq(&pkg_temp_lock);
+ /*
+ * If this is not the last cpu in the package and the work
+ * did not run after we dropped the lock above, then we
+ * need to reschedule the work, otherwise the interrupt
+ * stays disabled forever.
+ */
+ if (!lastcpu && pkgdev->work_scheduled)
+ pkg_thermal_schedule_work(target, &pkgdev->work);
}
- INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu),
- pkg_temp_thermal_threshold_work_fn);
- pr_debug("get_core_online: cpu %d successful\n", cpu);
+ spin_unlock_irq(&pkg_temp_lock);
+ /* Final cleanup if this is the last cpu */
+ if (lastcpu)
+ kfree(pkgdev);
return 0;
}
-static void put_core_offline(unsigned int cpu)
+static int pkg_thermal_cpu_online(unsigned int cpu)
{
- if (!pkg_temp_thermal_device_remove(cpu))
- cancel_delayed_work_sync(
- &per_cpu(pkg_temp_thermal_threshold_work, cpu));
+ struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
- pr_debug("put_core_offline: cpu %d\n", cpu);
-}
+ /* Paranoia check */
+ if (!cpu_has(c, X86_FEATURE_DTHERM) || !cpu_has(c, X86_FEATURE_PTS))
+ return -ENODEV;
-static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- get_core_online(cpu);
- break;
- case CPU_DOWN_PREPARE:
- put_core_offline(cpu);
- break;
+ /* If the package exists, nothing to do */
+ if (pkgdev) {
+ cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ return 0;
}
- return NOTIFY_OK;
+ return pkg_temp_thermal_device_add(cpu);
}
-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
- .notifier_call = pkg_temp_thermal_cpu_callback,
-};
-
static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS },
{}
@@ -580,71 +510,46 @@ MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
static int __init pkg_temp_thermal_init(void)
{
- int i;
+ int ret;
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
- spin_lock_init(&pkg_work_lock);
- platform_thermal_package_notify =
- pkg_temp_thermal_platform_thermal_notify;
- platform_thermal_package_rate_control =
- pkg_temp_thermal_platform_thermal_rate_control;
+ max_packages = topology_max_packages();
+ packages = kzalloc(max_packages * sizeof(struct pkg_device *), GFP_KERNEL);
+ if (!packages)
+ return -ENOMEM;
- cpu_notifier_register_begin();
- for_each_online_cpu(i)
- if (get_core_online(i))
- goto err_ret;
- __register_hotcpu_notifier(&pkg_temp_thermal_notifier);
- cpu_notifier_register_done();
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
+ pkg_thermal_cpu_online, pkg_thermal_cpu_offline);
+ if (ret < 0)
+ goto err;
- pkg_temp_debugfs_init(); /* Don't care if fails */
+ /* Store the state for module exit */
+ pkg_thermal_hp_state = ret;
- return 0;
+ platform_thermal_package_notify = pkg_thermal_notify;
+ platform_thermal_package_rate_control = pkg_thermal_rate_control;
-err_ret:
- for_each_online_cpu(i)
- put_core_offline(i);
- cpu_notifier_register_done();
- kfree(pkg_work_scheduled);
- platform_thermal_package_notify = NULL;
- platform_thermal_package_rate_control = NULL;
+ /* Don't care if it fails */
+ pkg_temp_debugfs_init();
+ return 0;
- return -ENODEV;
+err:
+ kfree(packages);
+ return ret;
}
+module_init(pkg_temp_thermal_init)
static void __exit pkg_temp_thermal_exit(void)
{
- struct phy_dev_entry *phdev, *n;
- int i;
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
- mutex_lock(&phy_dev_list_mutex);
- list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
- /* Retore old MSR value for package thermal interrupt */
- wrmsr_on_cpu(phdev->first_cpu,
- MSR_IA32_PACKAGE_THERM_INTERRUPT,
- phdev->start_pkg_therm_low,
- phdev->start_pkg_therm_high);
- thermal_zone_device_unregister(phdev->tzone);
- list_del(&phdev->list);
- kfree(phdev);
- }
- mutex_unlock(&phy_dev_list_mutex);
platform_thermal_package_notify = NULL;
platform_thermal_package_rate_control = NULL;
- for_each_online_cpu(i)
- cancel_delayed_work_sync(
- &per_cpu(pkg_temp_thermal_threshold_work, i));
- cpu_notifier_register_done();
-
- kfree(pkg_work_scheduled);
+ cpuhp_remove_state(pkg_thermal_hp_state);
debugfs_remove_recursive(debugfs);
+ kfree(packages);
}
-
-module_init(pkg_temp_thermal_init)
module_exit(pkg_temp_thermal_exit)
MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver");
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index c121acc15bfe..d35db16aa43f 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,6 +1,8 @@
menuconfig THUNDERBOLT
tristate "Thunderbolt support for Apple devices"
depends on PCI
+ depends on X86 || COMPILE_TEST
+ select APPLE_PROPERTIES if EFI_STUB && X86
select CRC32
help
Cactus Ridge Thunderbolt Controller driver
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 2b9602c2c355..6392990c984d 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -5,6 +5,7 @@
*/
#include <linux/crc32.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include "tb.h"
@@ -360,6 +361,40 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
}
/**
+ * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
+ */
+static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
+{
+ struct device *dev = &sw->tb->nhi->pdev->dev;
+ int len, res;
+
+ len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
+ if (len < 0 || len < sizeof(struct tb_drom_header))
+ return -EINVAL;
+
+ sw->drom = kmalloc(len, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
+ len);
+ if (res)
+ goto err;
+
+ *size = ((struct tb_drom_header *)sw->drom)->data_len +
+ TB_DROM_DATA_START;
+ if (*size > len)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(sw->drom);
+ sw->drom = NULL;
+ return -EINVAL;
+}
+
+/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
int tb_drom_read(struct tb_switch *sw)
@@ -374,6 +409,13 @@ int tb_drom_read(struct tb_switch *sw)
if (tb_route(sw) == 0) {
/*
+ * Apple's NHI EFI driver supplies a DROM for the root switch
+ * in a device property. Use it if available.
+ */
+ if (tb_drom_copy_efi(sw, &size) == 0)
+ goto parse;
+
+ /*
* The root switch contains only a dummy drom (header only,
* no entries). Hardcode the configuration here.
*/
@@ -418,6 +460,7 @@ int tb_drom_read(struct tb_switch *sw)
if (res)
goto err;
+parse:
header = (void *) sw->drom;
if (header->data_len + TB_DROM_DATA_START != size) {
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 86b996c702a0..75cf0691e6c5 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -1,11 +1,11 @@
/*
- * Thunderbolt Cactus Ridge driver - NHI registers
+ * Thunderbolt driver - NHI registers
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
-#ifndef DSL3510_REGS_H_
-#define DSL3510_REGS_H_
+#ifndef NHI_REGS_H_
+#define NHI_REGS_H_
#include <linux/types.h>
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 9840fdecb73b..c6f30b1695a9 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -460,7 +460,7 @@ int tb_switch_resume(struct tb_switch *sw)
tb_sw_warn(sw, "uid read failed\n");
return err;
}
- if (sw->uid != uid) {
+ if (sw != sw->tb->root_switch && sw->uid != uid) {
tb_sw_info(sw,
"changed while suspended (uid %#llx -> %#llx)\n",
sw->uid, uid);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 208f573495dc..dfbb974927f2 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1012,8 +1012,6 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tty_lock(tty);
tmp.line = tty->index;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 54cab59e20ed..f3932baed07d 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2711,15 +2711,6 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
return;
}
-static int gsm_change_mtu(struct net_device *net, int new_mtu)
-{
- struct gsm_mux_net *mux_net = netdev_priv(net);
- if ((new_mtu < 8) || (new_mtu > mux_net->dlci->gsm->mtu))
- return -EINVAL;
- net->mtu = new_mtu;
- return 0;
-}
-
static void gsm_mux_net_init(struct net_device *net)
{
static const struct net_device_ops gsm_netdev_ops = {
@@ -2728,7 +2719,6 @@ static void gsm_mux_net_init(struct net_device *net)
.ndo_start_xmit = gsm_mux_net_start_xmit,
.ndo_tx_timeout = gsm_mux_net_tx_timeout,
.ndo_get_stats = gsm_mux_net_get_stats,
- .ndo_change_mtu = gsm_change_mtu,
};
net->netdev_ops = &gsm_netdev_ops;
@@ -2787,6 +2777,8 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
return -ENOMEM;
}
net->mtu = dlci->gsm->mtu;
+ net->min_mtu = 8;
+ net->max_mtu = dlci->gsm->mtu;
mux_net = netdev_priv(net);
mux_net->dlci = dlci;
kref_init(&mux_net->ref);
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index d6fd0e802ef5..39b3723a32a6 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -63,44 +63,23 @@
#define VERSION_STRING DRIVER_DESC " 2.1d"
-/* Macros definitions */
-
/* Default debug printout level */
#define NOZOMI_DEBUG_LEVEL 0x00
-
-#define P_BUF_SIZE 128
-#define NFO(_err_flag_, args...) \
-do { \
- char tmp[P_BUF_SIZE]; \
- snprintf(tmp, sizeof(tmp), ##args); \
- printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \
- __func__, tmp); \
-} while (0)
-
-#define DBG1(args...) D_(0x01, ##args)
-#define DBG2(args...) D_(0x02, ##args)
-#define DBG3(args...) D_(0x04, ##args)
-#define DBG4(args...) D_(0x08, ##args)
-#define DBG5(args...) D_(0x10, ##args)
-#define DBG6(args...) D_(0x20, ##args)
-#define DBG7(args...) D_(0x40, ##args)
-#define DBG8(args...) D_(0x80, ##args)
-
-#ifdef DEBUG
-/* Do we need this settable at runtime? */
static int debug = NOZOMI_DEBUG_LEVEL;
+module_param(debug, int, S_IRUGO | S_IWUSR);
-#define D(lvl, args...) do \
- {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
- while (0)
-#define D_(lvl, args...) D(lvl, ##args)
-
-/* These printouts are always printed */
+/* Macros definitions */
+#define DBG_(lvl, fmt, args...) \
+do { \
+ if (lvl & debug) \
+ pr_debug("[%d] %s(): " fmt "\n", \
+ __LINE__, __func__, ##args); \
+} while (0)
-#else
-static int debug;
-#define D_(lvl, args...)
-#endif
+#define DBG1(args...) DBG_(0x01, ##args)
+#define DBG2(args...) DBG_(0x02, ##args)
+#define DBG3(args...) DBG_(0x04, ##args)
+#define DBG4(args...) DBG_(0x08, ##args)
/* TODO: rewrite to optimize macros... */
@@ -1320,7 +1299,7 @@ static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", dc->card_type);
}
-static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL);
+static DEVICE_ATTR_RO(card_type);
static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1329,7 +1308,7 @@ static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", dc->open_ttys);
}
-static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL);
+static DEVICE_ATTR_RO(open_ttys);
static void make_sysfs_files(struct nozomi *dc)
{
@@ -1943,7 +1922,5 @@ static __exit void nozomi_exit(void)
module_init(nozomi_init);
module_exit(nozomi_exit);
-module_param(debug, int, S_IRUGO | S_IWUSR);
-
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index b0cc47c77b40..d66c1edd9892 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1189,8 +1189,6 @@ static int get_config(struct r_port *info, struct rocket_config __user *retinfo)
{
struct rocket_config tmp;
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof (tmp));
mutex_lock(&info->port.mutex);
tmp.line = info->line;
@@ -1255,8 +1253,6 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
struct rocket_ports tmp;
int board;
- if (!retports)
- return -EFAULT;
memset(&tmp, 0, sizeof (tmp));
tmp.tty_major = rocket_driver->major;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index a697a8585ddc..ce8d4ffcc425 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -80,6 +80,7 @@ struct serial8250_config {
#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
#define UART_CAP_RPM (1 << 15) /* Runtime PM is active while idle */
+#define UART_CAP_IRDA (1 << 16) /* UART supports IrDA line discipline */
#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
@@ -129,8 +130,13 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
}
struct uart_8250_port *serial8250_get_port(int line);
+
void serial8250_rpm_get(struct uart_8250_port *p);
void serial8250_rpm_put(struct uart_8250_port *p);
+
+void serial8250_rpm_get_tx(struct uart_8250_port *p);
+void serial8250_rpm_put_tx(struct uart_8250_port *p);
+
int serial8250_em485_init(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 240a361b674f..61569a765d9e 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -425,10 +425,10 @@ struct uart_8250_port *serial8250_get_port(int line)
EXPORT_SYMBOL_GPL(serial8250_get_port);
static void (*serial8250_isa_config)(int port, struct uart_port *up,
- unsigned short *capabilities);
+ u32 *capabilities);
void serial8250_set_isa_configurator(
- void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
+ void (*v)(int port, struct uart_port *up, u32 *capabilities))
{
serial8250_isa_config = v;
}
@@ -830,6 +830,7 @@ static int serial8250_probe(struct platform_device *dev)
uart.port.handle_irq = p->handle_irq;
uart.port.handle_break = p->handle_break;
uart.port.set_termios = p->set_termios;
+ uart.port.set_ldisc = p->set_ldisc;
uart.port.get_mctrl = p->get_mctrl;
uart.port.pm = p->pm;
uart.port.dev = &dev->dev;
@@ -1023,6 +1024,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
/* Possibly override set_termios call */
if (up->port.set_termios)
uart->port.set_termios = up->port.set_termios;
+ if (up->port.set_ldisc)
+ uart->port.set_ldisc = up->port.set_ldisc;
if (up->port.get_mctrl)
uart->port.get_mctrl = up->port.get_mctrl;
if (up->port.set_mctrl)
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index fdbddbc6375d..26f17456b0d7 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -72,10 +72,15 @@ int serial8250_tx_dma(struct uart_8250_port *p)
struct dma_async_tx_descriptor *desc;
int ret;
- if (uart_tx_stopped(&p->port) || dma->tx_running ||
- uart_circ_empty(xmit))
+ if (dma->tx_running)
return 0;
+ if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
+ /* We have been called from __dma_tx_complete() */
+ serial8250_rpm_put_tx(p);
+ return 0;
+ }
+
dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
desc = dmaengine_prep_slave_single(dma->txchan,
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 459d726f9d59..c89fafc972b6 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -53,6 +53,8 @@
/* Helper for fifo size calculation */
#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+/* DesignWare specific register fields */
+#define DW_UART_MCR_SIRE BIT(6)
struct dw8250_data {
u8 usr_reg;
@@ -254,6 +256,22 @@ out:
serial8250_do_set_termios(p, termios, old);
}
+static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
+{
+ struct uart_8250_port *up = up_to_u8250p(p);
+ unsigned int mcr = p->serial_in(p, UART_MCR);
+
+ if (up->capabilities & UART_CAP_IRDA) {
+ if (termios->c_line == N_IRDA)
+ mcr |= DW_UART_MCR_SIRE;
+ else
+ mcr &= ~DW_UART_MCR_SIRE;
+
+ p->serial_out(p, UART_MCR, mcr);
+ }
+ serial8250_do_set_ldisc(p, termios);
+}
+
/*
* dw8250_fallback_dma_filter will prevent the UART from getting just any free
* channel on platforms that have DMA engines, but don't have any channels
@@ -357,6 +375,9 @@ static void dw8250_setup_port(struct uart_port *p)
if (reg & DW_UART_CPR_AFCE_MODE)
up->capabilities |= UART_CAP_AFE;
+
+ if (reg & DW_UART_CPR_SIR_MODE)
+ up->capabilities |= UART_CAP_IRDA;
}
static int dw8250_probe(struct platform_device *pdev)
@@ -392,6 +413,7 @@ static int dw8250_probe(struct platform_device *pdev)
p->iotype = UPIO_MEM;
p->serial_in = dw8250_serial_in;
p->serial_out = dw8250_serial_out;
+ p->set_ldisc = dw8250_set_ldisc;
p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
if (!p->membase)
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 0facc789fe7d..b67e7a544935 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -21,8 +21,11 @@
#define EXIT_KEY 0xAA
#define CHIP_ID1 0x20
#define CHIP_ID2 0x21
-#define CHIP_ID_0 0x1602
-#define CHIP_ID_1 0x0501
+#define CHIP_ID_F81865 0x0407
+#define CHIP_ID_F81866 0x1010
+#define CHIP_ID_F81216AD 0x1602
+#define CHIP_ID_F81216H 0x0501
+#define CHIP_ID_F81216 0x0802
#define VENDOR_ID1 0x23
#define VENDOR_ID1_VAL 0x19
#define VENDOR_ID2 0x24
@@ -43,12 +46,60 @@
#define RXW4C_IRA BIT(3)
#define TXW4C_IRA BIT(2)
+#define FIFO_CTRL 0xF6
+#define FIFO_MODE_MASK (BIT(1) | BIT(0))
+#define FIFO_MODE_128 (BIT(1) | BIT(0))
+#define RXFTHR_MODE_MASK (BIT(5) | BIT(4))
+#define RXFTHR_MODE_4X BIT(5)
+
+#define F81216_LDN_LOW 0x0
+#define F81216_LDN_HIGH 0x4
+
+/*
+ * F81866 registers
+ *
+ * The IRQ setting mode of F81866 is not the same with F81216 series.
+ * Level/Low: IRQ_MODE0:0, IRQ_MODE1:0
+ * Edge/High: IRQ_MODE0:1, IRQ_MODE1:0
+ */
+#define F81866_IRQ_MODE 0xf0
+#define F81866_IRQ_SHARE BIT(0)
+#define F81866_IRQ_MODE0 BIT(1)
+
+#define F81866_FIFO_CTRL FIFO_CTRL
+#define F81866_IRQ_MODE1 BIT(3)
+
+#define F81866_LDN_LOW 0x10
+#define F81866_LDN_HIGH 0x16
+
struct fintek_8250 {
+ u16 pid;
u16 base_port;
u8 index;
u8 key;
};
+static u8 sio_read_reg(struct fintek_8250 *pdata, u8 reg)
+{
+ outb(reg, pdata->base_port + ADDR_PORT);
+ return inb(pdata->base_port + DATA_PORT);
+}
+
+static void sio_write_reg(struct fintek_8250 *pdata, u8 reg, u8 data)
+{
+ outb(reg, pdata->base_port + ADDR_PORT);
+ outb(data, pdata->base_port + DATA_PORT);
+}
+
+static void sio_write_mask_reg(struct fintek_8250 *pdata, u8 reg, u8 mask,
+ u8 data)
+{
+ u8 tmp;
+
+ tmp = (sio_read_reg(pdata, reg) & ~mask) | (mask & data);
+ sio_write_reg(pdata, reg, tmp);
+}
+
static int fintek_8250_enter_key(u16 base_port, u8 key)
{
if (!request_muxed_region(base_port, 2, "8250_fintek"))
@@ -66,29 +117,55 @@ static void fintek_8250_exit_key(u16 base_port)
release_region(base_port + ADDR_PORT, 2);
}
-static int fintek_8250_check_id(u16 base_port)
+static int fintek_8250_check_id(struct fintek_8250 *pdata)
{
u16 chip;
- outb(VENDOR_ID1, base_port + ADDR_PORT);
- if (inb(base_port + DATA_PORT) != VENDOR_ID1_VAL)
+ if (sio_read_reg(pdata, VENDOR_ID1) != VENDOR_ID1_VAL)
return -ENODEV;
- outb(VENDOR_ID2, base_port + ADDR_PORT);
- if (inb(base_port + DATA_PORT) != VENDOR_ID2_VAL)
+ if (sio_read_reg(pdata, VENDOR_ID2) != VENDOR_ID2_VAL)
return -ENODEV;
- outb(CHIP_ID1, base_port + ADDR_PORT);
- chip = inb(base_port + DATA_PORT);
- outb(CHIP_ID2, base_port + ADDR_PORT);
- chip |= inb(base_port + DATA_PORT) << 8;
-
- if (chip != CHIP_ID_0 && chip != CHIP_ID_1)
+ chip = sio_read_reg(pdata, CHIP_ID1);
+ chip |= sio_read_reg(pdata, CHIP_ID2) << 8;
+
+ switch (chip) {
+ case CHIP_ID_F81865:
+ case CHIP_ID_F81866:
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ break;
+ default:
return -ENODEV;
+ }
+ pdata->pid = chip;
return 0;
}
+static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
+ int *max)
+{
+ switch (pdata->pid) {
+ case CHIP_ID_F81865:
+ case CHIP_ID_F81866:
+ *min = F81866_LDN_LOW;
+ *max = F81866_LDN_HIGH;
+ return 0;
+
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ *min = F81216_LDN_LOW;
+ *max = F81216_LDN_HIGH;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
static int fintek_8250_rs485_config(struct uart_port *port,
struct serial_rs485 *rs485)
{
@@ -128,10 +205,8 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (fintek_8250_enter_key(pdata->base_port, pdata->key))
return -EBUSY;
- outb(LDN, pdata->base_port + ADDR_PORT);
- outb(pdata->index, pdata->base_port + DATA_PORT);
- outb(RS485, pdata->base_port + ADDR_PORT);
- outb(config, pdata->base_port + DATA_PORT);
+ sio_write_reg(pdata, LDN, pdata->index);
+ sio_write_reg(pdata, RS485, config);
fintek_8250_exit_key(pdata->base_port);
port->rs485 = *rs485;
@@ -139,40 +214,90 @@ static int fintek_8250_rs485_config(struct uart_port *port,
return 0;
}
-static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
+static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
+{
+ sio_write_reg(pdata, LDN, pdata->index);
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81866:
+ sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1,
+ 0);
+ /* fall through */
+ case CHIP_ID_F81865:
+ sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE,
+ F81866_IRQ_SHARE);
+ sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_MODE0,
+ is_level ? 0 : F81866_IRQ_MODE0);
+ break;
+
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81216:
+ sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE,
+ IRQ_SHARE);
+ sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_MODE_MASK,
+ is_level ? IRQ_LEVEL_LOW : IRQ_EDGE_HIGH);
+ break;
+ }
+}
+
+static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
+{
+ switch (pdata->pid) {
+ case CHIP_ID_F81216H: /* 128Bytes FIFO */
+ case CHIP_ID_F81866:
+ sio_write_mask_reg(pdata, FIFO_CTRL,
+ FIFO_MODE_MASK | RXFTHR_MODE_MASK,
+ FIFO_MODE_128 | RXFTHR_MODE_4X);
+ break;
+
+ default: /* Default 16Bytes FIFO */
+ break;
+ }
+}
+
+static int probe_setup_port(struct fintek_8250 *pdata, u16 io_address,
+ unsigned int irq)
{
static const u16 addr[] = {0x4e, 0x2e};
static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67};
- int i, j, k;
+ struct irq_data *irq_data;
+ bool level_mode = false;
+ int i, j, k, min, max;
for (i = 0; i < ARRAY_SIZE(addr); i++) {
for (j = 0; j < ARRAY_SIZE(keys); j++) {
+ pdata->base_port = addr[i];
+ pdata->key = keys[j];
if (fintek_8250_enter_key(addr[i], keys[j]))
continue;
- if (fintek_8250_check_id(addr[i])) {
+ if (fintek_8250_check_id(pdata) ||
+ fintek_8250_get_ldn_range(pdata, &min, &max)) {
fintek_8250_exit_key(addr[i]);
continue;
}
- for (k = 0; k < 4; k++) {
+ for (k = min; k < max; k++) {
u16 aux;
- outb(LDN, addr[i] + ADDR_PORT);
- outb(k, addr[i] + DATA_PORT);
-
- outb(IO_ADDR1, addr[i] + ADDR_PORT);
- aux = inb(addr[i] + DATA_PORT);
- outb(IO_ADDR2, addr[i] + ADDR_PORT);
- aux |= inb(addr[i] + DATA_PORT) << 8;
+ sio_write_reg(pdata, LDN, k);
+ aux = sio_read_reg(pdata, IO_ADDR1);
+ aux |= sio_read_reg(pdata, IO_ADDR2) << 8;
if (aux != io_address)
continue;
- fintek_8250_exit_key(addr[i]);
- pdata->key = keys[j];
- pdata->base_port = addr[i];
pdata->index = k;
+ irq_data = irq_get_irq_data(irq);
+ if (irq_data)
+ level_mode =
+ irqd_is_level_type(irq_data);
+
+ fintek_8250_set_irq_mode(pdata, level_mode);
+ fintek_8250_set_max_fifo(pdata);
+ fintek_8250_exit_key(addr[i]);
+
return 0;
}
@@ -183,39 +308,29 @@ static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
return -ENODEV;
}
-static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
+static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
{
- int status;
- u8 tmp;
-
- status = fintek_8250_enter_key(pdata->base_port, pdata->key);
- if (status)
- return status;
-
- outb(LDN, pdata->base_port + ADDR_PORT);
- outb(pdata->index, pdata->base_port + DATA_PORT);
-
- outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
- tmp = inb(pdata->base_port + DATA_PORT);
-
- tmp &= ~IRQ_MODE_MASK;
- tmp |= IRQ_SHARE;
- if (!level_mode)
- tmp |= IRQ_EDGE_HIGH;
-
- outb(tmp, pdata->base_port + DATA_PORT);
- fintek_8250_exit_key(pdata->base_port);
- return 0;
+ struct fintek_8250 *pdata = uart->port.private_data;
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81216AD:
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81866:
+ case CHIP_ID_F81865:
+ uart->port.rs485_config = fintek_8250_rs485_config;
+ break;
+
+ default: /* No RS485 Auto direction functional */
+ break;
+ }
}
int fintek_8250_probe(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata;
struct fintek_8250 probe_data;
- struct irq_data *irq_data = irq_get_irq_data(uart->port.irq);
- bool level_mode = irqd_is_level_type(irq_data);
- if (find_base_port(&probe_data, uart->port.iobase))
+ if (probe_setup_port(&probe_data, uart->port.iobase, uart->port.irq))
return -ENODEV;
pdata = devm_kzalloc(uart->port.dev, sizeof(*pdata), GFP_KERNEL);
@@ -223,8 +338,8 @@ int fintek_8250_probe(struct uart_8250_port *uart)
return -ENOMEM;
memcpy(pdata, &probe_data, sizeof(probe_data));
- uart->port.rs485_config = fintek_8250_rs485_config;
uart->port.private_data = pdata;
+ fintek_8250_set_rs485_handler(uart);
- return fintek_8250_set_irq_mode(pdata, level_mode);
+ return 0;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index b9923464599f..58cbb30a9401 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -157,12 +157,12 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
.nr_channels = 2,
.is_private = true,
- .is_nollp = true,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 4095,
.nr_masters = 1,
.data_width = {4},
+ .multi_block = {0},
};
static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
@@ -174,7 +174,7 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
int ret;
chip->dev = &pdev->dev;
- chip->irq = pdev->irq;
+ chip->irq = pci_irq_vector(pdev, 0);
chip->regs = pci_ioremap_bar(pdev, 1);
chip->pdata = &qrk_serial_dma_pdata;
@@ -183,6 +183,9 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
if (ret)
return;
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
/* Special DMA address for UART */
dma->rx_dma_addr = 0xfffff000;
dma->tx_dma_addr = 0xfffff000;
@@ -280,8 +283,6 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- pci_set_master(pdev);
-
lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL);
if (!lpss)
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 39c2324484dd..ac013edf4992 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -303,10 +303,10 @@ static void mid8250_remove(struct pci_dev *pdev)
{
struct mid8250 *mid = pci_get_drvdata(pdev);
+ serial8250_unregister_port(mid->line);
+
if (mid->board->exit)
mid->board->exit(mid);
-
- serial8250_unregister_port(mid->line);
}
static const struct mid8250_board pnw_board = {
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 7a8b5fc81a19..d25ab1cd4295 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -332,8 +332,6 @@ static const struct of_device_id of_platform_serial_table[] = {
.data = (void *)PORT_ALTR_16550_F128, },
{ .compatible = "mrvl,mmp-uart",
.data = (void *)PORT_XSCALE, },
- { .compatible = "mrvl,pxa-uart",
- .data = (void *)PORT_XSCALE, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, of_platform_serial_table);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index b98c1578f45a..aa0166b6d450 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -52,6 +52,7 @@ struct serial_private {
struct pci_dev *dev;
unsigned int nr;
struct pci_serial_quirk *quirk;
+ const struct pciserial_board *board;
int line[0];
};
@@ -1329,6 +1330,30 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
+static int pci_pericom_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset, maxnr;
+
+ bar = FL_GET_BASE(board->flags);
+ if (board->flags & FL_BASE_BARS)
+ bar += idx;
+ else
+ offset += idx * board->uart_offset;
+
+ if (idx==3)
+ offset = 0x38;
+
+ maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+ (board->reg_shift + 3);
+
+ if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
+ return 1;
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
static int
ce4100_serial_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -2096,6 +2121,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.exit = pci_plx9050_exit,
},
/*
+ * Pericom (Only 7954 - It have a offset jump for port 4)
+ */
+ {
+ .vendor = PCI_VENDOR_ID_PERICOM,
+ .device = PCI_DEVICE_ID_PERICOM_PI7C9X7954,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ /*
* PLX
*/
{
@@ -3862,6 +3897,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
}
}
priv->nr = i;
+ priv->board = board;
return priv;
err_deinit:
@@ -3872,7 +3908,7 @@ err_out:
}
EXPORT_SYMBOL_GPL(pciserial_init_ports);
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
{
struct pci_serial_quirk *quirk;
int i;
@@ -3886,7 +3922,11 @@ void pciserial_remove_ports(struct serial_private *priv)
quirk = find_quirk(priv->dev);
if (quirk->exit)
quirk->exit(priv->dev);
+}
+void pciserial_remove_ports(struct serial_private *priv)
+{
+ pciserial_detach_ports(priv);
kfree(priv);
}
EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -5577,7 +5617,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_DISCONNECT;
if (priv)
- pciserial_suspend_ports(priv);
+ pciserial_detach_ports(priv);
pci_disable_device(dev);
@@ -5602,9 +5642,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
+ const struct pciserial_board *board;
- if (priv)
- pciserial_resume_ports(priv);
+ if (!priv)
+ return;
+
+ board = priv->board;
+ kfree(priv);
+ priv = pciserial_init_ports(dev, board);
+
+ if (!IS_ERR(priv)) {
+ pci_set_drvdata(dev, priv);
+ }
}
static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1731b98d2471..fe4399b41df6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -636,7 +636,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
* once and disable_runtime_pm_tx() will still disable RPM because the fifo is
* empty and the HW can idle again.
*/
-static void serial8250_rpm_get_tx(struct uart_8250_port *p)
+void serial8250_rpm_get_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
@@ -648,8 +648,9 @@ static void serial8250_rpm_get_tx(struct uart_8250_port *p)
return;
pm_runtime_get_sync(p->port.dev);
}
+EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx);
-static void serial8250_rpm_put_tx(struct uart_8250_port *p)
+void serial8250_rpm_put_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
@@ -662,6 +663,7 @@ static void serial8250_rpm_put_tx(struct uart_8250_port *p)
pm_runtime_mark_last_busy(p->port.dev);
pm_runtime_put_autosuspend(p->port.dev);
}
+EXPORT_SYMBOL_GPL(serial8250_rpm_put_tx);
/*
* IER sleep support. UARTs which have EFRs need the "extended
@@ -2691,8 +2693,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
serial8250_do_set_termios(port, termios, old);
}
-static void
-serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
@@ -2708,7 +2709,16 @@ serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
}
}
}
+EXPORT_SYMBOL_GPL(serial8250_do_set_ldisc);
+static void
+serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+{
+ if (port->set_ldisc)
+ port->set_ldisc(port, termios);
+ else
+ serial8250_do_set_ldisc(port, termios);
+}
void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
new file mode 100644
index 000000000000..4d68731af534
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -0,0 +1,190 @@
+/*
+ * drivers/tty/serial/8250/8250_pxa.c -- driver for PXA on-board UARTS
+ * Copyright: (C) 2013 Sergei Ianovich <ynvich@gmail.com>
+ *
+ * replaces drivers/serial/pxa.c by Nicolas Pitre
+ * Created: Feb 20, 2003
+ * Copyright: (C) 2003 Monta Vista Software, Inc.
+ *
+ * Based on drivers/serial/8250.c by Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include "8250.h"
+
+struct pxa8250_data {
+ int line;
+ struct clk *clk;
+};
+
+static int __maybe_unused serial_pxa_suspend(struct device *dev)
+{
+ struct pxa8250_data *data = dev_get_drvdata(dev);
+
+ serial8250_suspend_port(data->line);
+
+ return 0;
+}
+
+static int __maybe_unused serial_pxa_resume(struct device *dev)
+{
+ struct pxa8250_data *data = dev_get_drvdata(dev);
+
+ serial8250_resume_port(data->line);
+
+ return 0;
+}
+
+static const struct dev_pm_ops serial_pxa_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(serial_pxa_suspend, serial_pxa_resume)
+};
+
+static const struct of_device_id serial_pxa_dt_ids[] = {
+ { .compatible = "mrvl,pxa-uart", },
+ { .compatible = "mrvl,mmp-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
+
+/* Uart divisor latch write */
+static void serial_pxa_dl_write(struct uart_8250_port *up, int value)
+{
+ unsigned int dll;
+
+ serial_out(up, UART_DLL, value & 0xff);
+ /*
+ * work around Erratum #74 according to Marvel(R) PXA270M Processor
+ * Specification Update (April 19, 2010)
+ */
+ dll = serial_in(up, UART_DLL);
+ WARN_ON(dll != (value & 0xff));
+
+ serial_out(up, UART_DLM, value >> 8 & 0xff);
+}
+
+
+static void serial_pxa_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct pxa8250_data *data = port->private_data;
+
+ if (!state)
+ clk_prepare_enable(data->clk);
+ else
+ clk_disable_unprepare(data->clk);
+}
+
+static int serial_pxa_probe(struct platform_device *pdev)
+{
+ struct uart_8250_port uart = {};
+ struct pxa8250_data *data;
+ struct resource *mmres, *irqres;
+ int ret;
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mmres || !irqres)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ ret = clk_prepare(data->clk);
+ if (ret)
+ return ret;
+
+ uart.port.type = PORT_XSCALE;
+ uart.port.iotype = UPIO_MEM32;
+ uart.port.mapbase = mmres->start;
+ uart.port.regshift = 2;
+ uart.port.irq = irqres->start;
+ uart.port.fifosize = 64;
+ uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST;
+ uart.port.dev = &pdev->dev;
+ uart.port.uartclk = clk_get_rate(data->clk);
+ uart.port.pm = serial_pxa_pm;
+ uart.port.private_data = data;
+ uart.dl_write = serial_pxa_dl_write;
+
+ ret = serial8250_register_8250_port(&uart);
+ if (ret < 0)
+ goto err_clk;
+
+ data->line = ret;
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+ err_clk:
+ clk_unprepare(data->clk);
+ return ret;
+}
+
+static int serial_pxa_remove(struct platform_device *pdev)
+{
+ struct pxa8250_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+
+ clk_unprepare(data->clk);
+
+ return 0;
+}
+
+static struct platform_driver serial_pxa_driver = {
+ .probe = serial_pxa_probe,
+ .remove = serial_pxa_remove,
+
+ .driver = {
+ .name = "pxa2xx-uart",
+ .pm = &serial_pxa_pm_ops,
+ .of_match_table = serial_pxa_dt_ids,
+ },
+};
+
+module_platform_driver(serial_pxa_driver);
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static int __init early_serial_pxa_setup(struct earlycon_device *device,
+ const char *options)
+{
+ struct uart_port *port = &device->port;
+
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+
+ port->regshift = 2;
+ return early_serial8250_setup(device, NULL);
+}
+OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup);
+#endif
+
+MODULE_AUTHOR("Sergei Ianovich");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-uart");
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 417d9e7038e1..746680ebf90c 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -24,10 +24,22 @@
/* Most (but not all) of UniPhier UART devices have 64-depth FIFO. */
#define UNIPHIER_UART_DEFAULT_FIFO_SIZE 64
-#define UNIPHIER_UART_CHAR_FCR 3 /* Character / FIFO Control Register */
-#define UNIPHIER_UART_LCR_MCR 4 /* Line/Modem Control Register */
-#define UNIPHIER_UART_LCR_SHIFT 8
-#define UNIPHIER_UART_DLR 9 /* Divisor Latch Register */
+/*
+ * This hardware is similar to 8250, but its register map is a bit different:
+ * - MMIO32 (regshift = 2)
+ * - FCR is not at 2, but 3
+ * - LCR and MCR are not at 3 and 4, they share 4
+ * - Divisor latch at 9, no divisor latch access bit
+ */
+
+#define UNIPHIER_UART_REGSHIFT 2
+
+/* bit[15:8] = CHAR (not used), bit[7:0] = FCR */
+#define UNIPHIER_UART_CHAR_FCR (3 << (UNIPHIER_UART_REGSHIFT))
+/* bit[15:8] = LCR, bit[7:0] = MCR */
+#define UNIPHIER_UART_LCR_MCR (4 << (UNIPHIER_UART_REGSHIFT))
+/* Divisor Latch Register */
+#define UNIPHIER_UART_DLR (9 << (UNIPHIER_UART_REGSHIFT))
struct uniphier8250_priv {
int line;
@@ -44,7 +56,7 @@ static int __init uniphier_early_console_setup(struct earlycon_device *device,
/* This hardware always expects MMIO32 register interface. */
device->port.iotype = UPIO_MEM32;
- device->port.regshift = 2;
+ device->port.regshift = UNIPHIER_UART_REGSHIFT;
/*
* Do not touch the divisor register in early_serial8250_setup();
@@ -68,17 +80,16 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
switch (offset) {
case UART_LCR:
- valshift = UNIPHIER_UART_LCR_SHIFT;
+ valshift = 8;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
default:
+ offset <<= UNIPHIER_UART_REGSHIFT;
break;
}
- offset <<= p->regshift;
-
/*
* The return value must be masked with 0xff because LCR and MCR reside
* in the same register that must be accessed by 32-bit write/read.
@@ -90,27 +101,26 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
static void uniphier_serial_out(struct uart_port *p, int offset, int value)
{
unsigned int valshift = 0;
- bool normal = false;
+ bool normal = true;
switch (offset) {
case UART_FCR:
offset = UNIPHIER_UART_CHAR_FCR;
break;
case UART_LCR:
- valshift = UNIPHIER_UART_LCR_SHIFT;
+ valshift = 8;
/* Divisor latch access bit does not exist. */
value &= ~UART_LCR_DLAB;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
+ normal = false;
break;
default:
- normal = true;
+ offset <<= UNIPHIER_UART_REGSHIFT;
break;
}
- offset <<= p->regshift;
-
if (normal) {
writel(value, p->membase + offset);
} else {
@@ -139,16 +149,12 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
*/
static int uniphier_serial_dl_read(struct uart_8250_port *up)
{
- int offset = UNIPHIER_UART_DLR << up->port.regshift;
-
- return readl(up->port.membase + offset);
+ return readl(up->port.membase + UNIPHIER_UART_DLR);
}
static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
{
- int offset = UNIPHIER_UART_DLR << up->port.regshift;
-
- writel(value, up->port.membase + offset);
+ writel(value, up->port.membase + UNIPHIER_UART_DLR);
}
static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
@@ -234,7 +240,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
up.port.type = PORT_16550A;
up.port.iotype = UPIO_MEM32;
- up.port.regshift = 2;
+ up.port.regshift = UNIPHIER_UART_REGSHIFT;
up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE;
up.capabilities = UART_CAP_FIFO;
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 899834776b36..0b8b6740ba43 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -439,6 +439,16 @@ config SERIAL_8250_MOXA
This driver can also be built as a module. The module will be called
8250_moxa. If you want to do that, say M here.
+config SERIAL_8250_PXA
+ tristate "PXA serial port support"
+ depends on SERIAL_8250
+ depends on ARCH_PXA || ARCH_MMP
+ help
+ If you have a machine based on an Intel XScale PXA2xx CPU you can
+ enable its onboard serial ports by enabling this option. The option is
+ applicable to both devicetree and legacy boards, and early console is
+ part of its support.
+
config SERIAL_OF_PLATFORM
tristate "Devicetree based probing for 8250 ports"
depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 276c6fb60337..850e721877a9 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
obj-$(CONFIG_SERIAL_8250_MOXA) += 8250_moxa.o
+obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 25c1d7bc0100..e9cf5b67f1b7 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -438,17 +438,27 @@ config SERIAL_MPSC_CONSOLE
Say Y here if you want to support a serial console on a Marvell MPSC.
config SERIAL_PXA
- bool "PXA serial port support"
+ bool "PXA serial port support (DEPRECATED)"
depends on ARCH_PXA || ARCH_MMP
select SERIAL_CORE
+ select SERIAL_8250_PXA if SERIAL_8250=y
+ select SERIAL_PXA_NON8250 if !SERIAL_8250=y
help
If you have a machine based on an Intel XScale PXA2xx CPU you
can enable its onboard serial ports by enabling this option.
+ Unless you have a specific need, you should use SERIAL_8250_PXA
+ instead of this.
+
+config SERIAL_PXA_NON8250
+ bool
+ depends on !SERIAL_8250
+
config SERIAL_PXA_CONSOLE
- bool "Console on PXA serial port"
+ bool "Console on PXA serial port (DEPRECATED)"
depends on SERIAL_PXA
select SERIAL_CORE_CONSOLE
+ select SERIAL_8250_CONSOLE if SERIAL_8250=y
help
If you have enabled the serial port on the Intel XScale PXA
CPU you can make it the console by answering Y to this option.
@@ -460,6 +470,9 @@ config SERIAL_PXA_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+ Unless you have a specific need, you should use SERIAL_8250_PXA
+ and SERIAL_8250_CONSOLE instead of this.
+
config SERIAL_SA1100
bool "SA1100 serial port support"
depends on ARCH_SA1100
@@ -1626,7 +1639,7 @@ config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
depends on HAS_DMA
- depends on ARM || COMPILE_TEST
+ depends on ARCH_STM32 || COMPILE_TEST
help
This driver is for the on-chip Serial Controller on
STMicroelectronics STM32 MCUs.
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 1278d376da50..2d6288bc4554 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250/
obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
-obj-$(CONFIG_SERIAL_PXA) += pxa.o
+obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
@@ -62,13 +62,11 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
-obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o
obj-$(CONFIG_SERIAL_TILEGX) += tilegx.o
-obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
@@ -96,3 +94,6 @@ obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
+
+obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
+obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e2c33b9528d8..d4171d71a258 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2315,12 +2315,67 @@ static int __init pl011_console_setup(struct console *co, char *options)
return uart_set_options(&uap->port, co, baud, parity, bits, flow);
}
+/**
+ * pl011_console_match - non-standard console matching
+ * @co: registering console
+ * @name: name from console command line
+ * @idx: index from console command line
+ * @options: ptr to option string from console command line
+ *
+ * Only attempts to match console command lines of the form:
+ * console=pl011,mmio|mmio32,<addr>[,<options>]
+ * console=pl011,0x<addr>[,<options>]
+ * This form is used to register an initial earlycon boot console and
+ * replace it with the amba_console at pl011 driver init.
+ *
+ * Performs console setup for a match (as required by interface)
+ * If no <options> are specified, then assume the h/w is already setup.
+ *
+ * Returns 0 if console matches; otherwise non-zero to use default matching
+ */
+static int __init pl011_console_match(struct console *co, char *name, int idx,
+ char *options)
+{
+ unsigned char iotype;
+ resource_size_t addr;
+ int i;
+
+ if (strcmp(name, "pl011") != 0)
+ return -ENODEV;
+
+ if (uart_parse_earlycon(options, &iotype, &addr, &options))
+ return -ENODEV;
+
+ if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
+ return -ENODEV;
+
+ /* try to match the port specified on the command line */
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
+ struct uart_port *port;
+
+ if (!amba_ports[i])
+ continue;
+
+ port = &amba_ports[i]->port;
+
+ if (port->mapbase != addr)
+ continue;
+
+ co->index = i;
+ port->cons = co;
+ return pl011_console_setup(co, options);
+ }
+
+ return -ENODEV;
+}
+
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAMA",
.write = pl011_console_write,
.device = uart_console_device,
.setup = pl011_console_setup,
+ .match = pl011_console_match,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &amba_reg,
@@ -2357,6 +2412,7 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
return 0;
}
OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
+OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
#else
#define AMBA_CONSOLE NULL
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 315c84979b18..e92c23470e51 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -28,7 +28,6 @@ static char *serial_version = "$Revision: 1.25 $";
#include <linux/bitops.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
-#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -3214,8 +3213,6 @@ get_serial_info(struct e100_serial * info,
* should set them to something else than 0.
*/
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = info->type;
tmp.line = info->line;
@@ -4098,7 +4095,7 @@ static void show_serial_version(void)
&serial_version[11]); /* "$Revision: x.yy" */
}
-/* rs_init inits the driver at boot (using the module_init chain) */
+/* rs_init inits the driver at boot (using the initcall chain) */
static const struct tty_operations rs_ops = {
.open = rs_open,
@@ -4247,5 +4244,4 @@ static int __init rs_init(void)
}
/* this makes sure that rs_init is called during kernel boot */
-
-module_init(rs_init);
+device_initcall(rs_init);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 76103f2c4a80..a1c6519837a4 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -430,6 +430,65 @@ static void lpuart_flush_buffer(struct uart_port *port)
}
}
+#if defined(CONFIG_CONSOLE_POLL)
+
+static int lpuart_poll_init(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ unsigned long flags;
+ unsigned char temp;
+
+ sport->port.fifosize = 0;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ /* Disable Rx & Tx */
+ writeb(0, sport->port.membase + UARTCR2);
+
+ temp = readb(sport->port.membase + UARTPFIFO);
+ /* Enable Rx and Tx FIFO */
+ writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
+ sport->port.membase + UARTPFIFO);
+
+ /* flush Tx and Rx FIFO */
+ writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+ sport->port.membase + UARTCFIFO);
+
+ /* explicitly clear RDRF */
+ if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
+ readb(sport->port.membase + UARTDR);
+ writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
+ }
+
+ writeb(0, sport->port.membase + UARTTWFIFO);
+ writeb(1, sport->port.membase + UARTRWFIFO);
+
+ /* Enable Rx and Tx */
+ writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ return 0;
+}
+
+static void lpuart_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ /* drain */
+ while (!(readb(port->membase + UARTSR1) & UARTSR1_TDRE))
+ barrier();
+
+ writeb(c, port->membase + UARTDR);
+}
+
+static int lpuart_poll_get_char(struct uart_port *port)
+{
+ if (!(readb(port->membase + UARTSR1) & UARTSR1_RDRF))
+ return NO_POLL_CHAR;
+
+ return readb(port->membase + UARTDR);
+}
+
+#endif
+
static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
@@ -1595,6 +1654,11 @@ static const struct uart_ops lpuart_pops = {
.config_port = lpuart_config_port,
.verify_port = lpuart_verify_port,
.flush_buffer = lpuart_flush_buffer,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_init = lpuart_poll_init,
+ .poll_get_char = lpuart_poll_get_char,
+ .poll_put_char = lpuart_poll_put_char,
+#endif
};
static const struct uart_ops lpuart32_pops = {
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index d386346248de..157883653256 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1042,6 +1042,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret) {
dev_err(&spi->dev, "SPI setup wasn't successful %d", ret);
+ kfree(ifx_dev);
return -ENODEV;
}
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index e5c42fef69d2..3be051abb2a2 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -1082,7 +1082,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
if (!port) {
printk(KERN_WARNING
"IOC4 serial memory not available for port\n");
- return -ENOMEM;
+ goto free;
}
spin_lock_init(&port->ip_lock);
@@ -1190,6 +1190,11 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
handle_dma_error_intr, port);
}
return 0;
+
+free:
+ while (port_number)
+ kfree(ports[--port_number]);
+ return -ENOMEM;
}
/**
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 770454e0dfa3..8c1c9112b3fd 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1016,7 +1016,7 @@ static void mxs_auart_settermios(struct uart_port *u,
ctrl |= AUART_LINECTRL_EPS;
}
- u->read_status_mask = 0;
+ u->read_status_mask = AUART_STAT_OERR;
if (termios->c_iflag & INPCK)
u->read_status_mask |= AUART_STAT_PERR;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index cd9d9e878475..75952811c0da 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -925,6 +925,8 @@ static struct platform_driver serial_pxa_driver = {
},
};
+
+/* 8250 driver for PXA serial ports should be used */
static int __init serial_pxa_init(void)
{
int ret;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index fb0672554123..793395451982 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1264,7 +1264,7 @@ static int sc16is7xx_probe(struct device *dev,
/* Setup interrupt */
ret = devm_request_irq(dev, irq, sc16is7xx_irq,
- IRQF_ONESHOT | flags, dev_name(dev), s);
+ flags, dev_name(dev), s);
if (!ret)
return 0;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f2303f390345..d0847375ea64 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -73,7 +73,7 @@ static inline struct uart_port *uart_port_ref(struct uart_state *state)
static inline void uart_port_deref(struct uart_port *uport)
{
- if (uport && atomic_dec_and_test(&uport->state->refcount))
+ if (atomic_dec_and_test(&uport->state->refcount))
wake_up(&uport->state->remove_wait);
}
@@ -88,9 +88,10 @@ static inline void uart_port_deref(struct uart_port *uport)
#define uart_port_unlock(uport, flags) \
({ \
struct uart_port *__uport = uport; \
- if (__uport) \
+ if (__uport) { \
spin_unlock_irqrestore(&__uport->lock, flags); \
- uart_port_deref(__uport); \
+ uart_port_deref(__uport); \
+ } \
})
static inline struct uart_port *uart_port_check(struct uart_state *state)
@@ -1515,7 +1516,10 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
unsigned long char_time, expire;
port = uart_port_ref(state);
- if (!port || port->type == PORT_UNKNOWN || port->fifosize == 0) {
+ if (!port)
+ return;
+
+ if (port->type == PORT_UNKNOWN || port->fifosize == 0) {
uart_port_deref(port);
return;
}
@@ -2365,9 +2369,10 @@ static int uart_poll_get_char(struct tty_driver *driver, int line)
if (state) {
port = uart_port_ref(state);
- if (port)
+ if (port) {
ret = port->ops->poll_get_char(port);
- uart_port_deref(port);
+ uart_port_deref(port);
+ }
}
return ret;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4b26252c2885..91e7dddbf72c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1142,11 +1142,8 @@ static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count)
int copied;
copied = tty_insert_flip_string(tport, buf, count);
- if (copied < count) {
- dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
- count - copied);
+ if (copied < count)
port->icount.buf_overrun++;
- }
port->icount.rx += copied;
@@ -1161,8 +1158,6 @@ static int sci_dma_rx_find_active(struct sci_port *s)
if (s->active_rx == s->cookie_rx[i])
return i;
- dev_err(s->port.dev, "%s: Rx cookie %d not found!\n", __func__,
- s->active_rx);
return -1;
}
@@ -1223,9 +1218,9 @@ static void sci_dma_rx_complete(void *arg)
dma_async_issue_pending(chan);
+ spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
__func__, s->cookie_rx[active], active, s->active_rx);
- spin_unlock_irqrestore(&port->lock, flags);
return;
fail:
@@ -1273,8 +1268,6 @@ static void sci_submit_rx(struct sci_port *s)
if (dma_submit_error(s->cookie_rx[i]))
goto fail;
- dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
- s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
@@ -1288,7 +1281,6 @@ fail:
for (i = 0; i < 2; i++)
s->cookie_rx[i] = -EINVAL;
s->active_rx = -EINVAL;
- dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n");
sci_rx_dma_release(s, true);
}
@@ -1358,10 +1350,10 @@ static void rx_timer_fn(unsigned long arg)
int active, count;
u16 scr;
- spin_lock_irqsave(&port->lock, flags);
-
dev_dbg(port->dev, "DMA Rx timed out\n");
+ spin_lock_irqsave(&port->lock, flags);
+
active = sci_dma_rx_find_active(s);
if (active < 0) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -1370,9 +1362,9 @@ static void rx_timer_fn(unsigned long arg)
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
if (status == DMA_COMPLETE) {
+ spin_unlock_irqrestore(&port->lock, flags);
dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
s->active_rx, active);
- spin_unlock_irqrestore(&port->lock, flags);
/* Let packet complete handler take care of the packet */
return;
@@ -1396,8 +1388,6 @@ static void rx_timer_fn(unsigned long arg)
/* Handle incomplete DMA receive */
dmaengine_terminate_all(s->chan_rx);
read = sg_dma_len(&s->sg_rx[active]) - state.residue;
- dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
- s->active_rx);
if (read) {
count = sci_dma_rx_push(s, s->rx_buf[active], read);
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 4e603d060e80..99ef5c6e4766 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -598,7 +598,8 @@ static int hv_remove(struct platform_device *dev)
uart_remove_one_port(&sunhv_reg, port);
sunserial_unregister_minors(&sunhv_reg, 1);
-
+ kfree(con_read_page);
+ kfree(con_write_page);
kfree(port);
sunhv_port = NULL;
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 9ad98eaa35bf..72df2e1b88af 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1500,6 +1500,7 @@ static int su_probe(struct platform_device *op)
out_unmap:
of_iounmap(&op->resource[0], up->port.membase, up->reg_size);
+ kfree(up);
return err;
}
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index c13e27ecb0b7..415885c56435 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -7973,7 +7973,6 @@ static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 7aca2d4670e4..8267bcf2405e 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1768,7 +1768,6 @@ static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index dec156586de1..d66620f7eaa3 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1887,7 +1887,6 @@ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size)
static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 9d7ab7b66a8a..71e81406ef71 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -9,6 +9,17 @@
* Support for multiple unimaps by Jakub Jelinek <jj@ultra.linux.cz>, July 1998
*
* Fix bug in inverse translation. Stanislav Voronyi <stas@cnti.uanet.kharkov.ua>, Dec 1998
+ *
+ * In order to prevent the following circular lock dependency:
+ * &mm->mmap_sem --> cpu_hotplug.lock --> console_lock --> &mm->mmap_sem
+ *
+ * We cannot allow page fault to happen while holding the console_lock.
+ * Therefore, all the userspace copy operations have to be done outside
+ * the console_lock critical sections.
+ *
+ * As all the affected functions are all called directly from vt_ioctl(), we
+ * can allocate some small buffers directly on stack without worrying about
+ * stack overflow.
*/
#include <linux/module.h>
@@ -22,6 +33,7 @@
#include <linux/console.h>
#include <linux/consolemap.h>
#include <linux/vt_kern.h>
+#include <linux/string.h>
static unsigned short translations[][256] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
@@ -309,18 +321,19 @@ static void update_user_maps(void)
int con_set_trans_old(unsigned char __user * arg)
{
int i;
- unsigned short *p = translations[USER_MAP];
+ unsigned short inbuf[E_TABSZ];
if (!access_ok(VERIFY_READ, arg, E_TABSZ))
return -EFAULT;
- console_lock();
- for (i=0; i<E_TABSZ ; i++) {
+ for (i = 0; i < E_TABSZ ; i++) {
unsigned char uc;
__get_user(uc, arg+i);
- p[i] = UNI_DIRECT_BASE | uc;
+ inbuf[i] = UNI_DIRECT_BASE | uc;
}
+ console_lock();
+ memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
update_user_maps();
console_unlock();
return 0;
@@ -330,35 +343,37 @@ int con_get_trans_old(unsigned char __user * arg)
{
int i, ch;
unsigned short *p = translations[USER_MAP];
+ unsigned char outbuf[E_TABSZ];
if (!access_ok(VERIFY_WRITE, arg, E_TABSZ))
return -EFAULT;
console_lock();
- for (i=0; i<E_TABSZ ; i++)
+ for (i = 0; i < E_TABSZ ; i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
- __put_user((ch & ~0xff) ? 0 : ch, arg+i);
+ outbuf[i] = (ch & ~0xff) ? 0 : ch;
}
console_unlock();
+
+ for (i = 0; i < E_TABSZ ; i++)
+ __put_user(outbuf[i], arg+i);
return 0;
}
int con_set_trans_new(ushort __user * arg)
{
int i;
- unsigned short *p = translations[USER_MAP];
+ unsigned short inbuf[E_TABSZ];
if (!access_ok(VERIFY_READ, arg, E_TABSZ*sizeof(unsigned short)))
return -EFAULT;
- console_lock();
- for (i=0; i<E_TABSZ ; i++) {
- unsigned short us;
- __get_user(us, arg+i);
- p[i] = us;
- }
+ for (i = 0; i < E_TABSZ ; i++)
+ __get_user(inbuf[i], arg+i);
+ console_lock();
+ memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
update_user_maps();
console_unlock();
return 0;
@@ -367,16 +382,17 @@ int con_set_trans_new(ushort __user * arg)
int con_get_trans_new(ushort __user * arg)
{
int i;
- unsigned short *p = translations[USER_MAP];
+ unsigned short outbuf[E_TABSZ];
if (!access_ok(VERIFY_WRITE, arg, E_TABSZ*sizeof(unsigned short)))
return -EFAULT;
console_lock();
- for (i=0; i<E_TABSZ ; i++)
- __put_user(p[i], arg+i);
+ memcpy(outbuf, translations[USER_MAP], sizeof(outbuf));
console_unlock();
-
+
+ for (i = 0; i < E_TABSZ ; i++)
+ __put_user(outbuf[i], arg+i);
return 0;
}
@@ -536,10 +552,20 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
int err = 0, err1, i;
struct uni_pagedir *p, *q;
+ struct unipair *unilist, *plist;
if (!ct)
return 0;
+ unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ if (!unilist)
+ return -ENOMEM;
+
+ for (i = ct, plist = unilist; i; i--, plist++, list++) {
+ __get_user(plist->unicode, &list->unicode);
+ __get_user(plist->fontpos, &list->fontpos);
+ }
+
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
@@ -557,8 +583,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
err1 = con_do_clear_unimap(vc);
if (err1) {
- console_unlock();
- return err1;
+ err = err1;
+ goto out_unlock;
}
/*
@@ -592,8 +618,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
*vc->vc_uni_pagedir_loc = p;
con_release_unimap(q);
kfree(q);
- console_unlock();
- return err1;
+ err = err1;
+ goto out_unlock;
}
}
} else {
@@ -617,22 +643,17 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
/*
* Insert user specified unicode pairs into new table.
*/
- while (ct--) {
- unsigned short unicode, fontpos;
- __get_user(unicode, &list->unicode);
- __get_user(fontpos, &list->fontpos);
- if ((err1 = con_insert_unipair(p, unicode,fontpos)) != 0)
+ for (plist = unilist; ct; ct--, plist++) {
+ err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
+ if (err1)
err = err1;
- list++;
}
/*
* Merge with fontmaps of any other virtual consoles.
*/
- if (con_unify_unimap(vc, p)) {
- console_unlock();
- return err;
- }
+ if (con_unify_unimap(vc, p))
+ goto out_unlock;
for (i = 0; i <= 3; i++)
set_inverse_transl(vc, p, i); /* Update inverse translations */
@@ -640,6 +661,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
out_unlock:
console_unlock();
+ kfree(unilist);
return err;
}
@@ -735,9 +757,15 @@ EXPORT_SYMBOL(con_copy_unimap);
*/
int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
{
- int i, j, k, ect;
+ int i, j, k;
+ ushort ect;
u16 **p1, *p2;
struct uni_pagedir *p;
+ struct unipair *unilist, *plist;
+
+ unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ if (!unilist)
+ return -ENOMEM;
console_lock();
@@ -750,21 +778,26 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
for (j = 0; j < 32; j++) {
p2 = *(p1++);
if (p2)
- for (k = 0; k < 64; k++) {
- if (*p2 < MAX_GLYPH && ect++ < ct) {
- __put_user((u_short)((i<<11)+(j<<6)+k),
- &list->unicode);
- __put_user((u_short) *p2,
- &list->fontpos);
- list++;
+ for (k = 0; k < 64; k++, p2++) {
+ if (*p2 >= MAX_GLYPH)
+ continue;
+ if (ect < ct) {
+ unilist[ect].unicode =
+ (i<<11)+(j<<6)+k;
+ unilist[ect].fontpos = *p2;
}
- p2++;
+ ect++;
}
}
}
}
- __put_user(ect, uct);
console_unlock();
+ for (i = min(ect, ct), plist = unilist; i; i--, list++, plist++) {
+ __put_user(plist->unicode, &list->unicode);
+ __put_user(plist->fontpos, &list->fontpos);
+ }
+ __put_user(ect, uct);
+ kfree(unilist);
return ((ect <= ct) ? 0 : -ENOMEM);
}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 0f8caae4267d..3dd6a491cdba 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -982,7 +982,7 @@ static void kbd_led_trigger_activate(struct led_classdev *cdev)
KBD_LED_TRIGGER((_led_bit) + 8, _name)
static struct kbd_led_trigger kbd_led_triggers[] = {
- KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrollock"),
+ KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrolllock"),
KBD_LED_TRIGGER(VC_NUMLOCK, "kbd-numlock"),
KBD_LED_TRIGGER(VC_CAPSLOCK, "kbd-capslock"),
KBD_LED_TRIGGER(VC_KANALOCK, "kbd-kanalock"),
@@ -1256,7 +1256,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
case KEY_SYSRQ:
/*
* Real AT keyboards (that's what we're trying
- * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
+ * to emulate here) emit 0xe0 0x2a 0xe0 0x37 when
* pressing PrtSc/SysRq alone, but simply 0x54
* when pressing Alt+PrtSc/SysRq.
*/
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8c3bf3d613c0..4c10a9df3b91 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -315,38 +315,27 @@ void schedule_console_callback(void)
schedule_work(&console_work);
}
-static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
+static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int nr)
{
- unsigned short *d, *s;
+ u16 *clear, *d, *s;
- if (t+nr >= b)
+ if (t + nr >= b)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
- if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+ if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr))
return;
- d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
- s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
- scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
- vc->vc_size_row * nr);
-}
-static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
-{
- unsigned short *s;
- unsigned int step;
+ s = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * t);
+ d = (u16 *)(vc->vc_origin + vc->vc_size_row * (t + nr));
- if (t+nr >= b)
- nr = b - t - 1;
- if (b > vc->vc_rows || t >= b || nr < 1)
- return;
- if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
- return;
- s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
- step = vc->vc_cols * nr;
- scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
- scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+ if (dir == SM_UP) {
+ clear = s + (b - t - nr) * vc->vc_cols;
+ swap(s, d);
+ }
+ scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
+ scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr);
}
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
@@ -1120,7 +1109,7 @@ static void lf(struct vc_data *vc)
* if below scrolling region
*/
if (vc->vc_y + 1 == vc->vc_bottom)
- scrup(vc, vc->vc_top, vc->vc_bottom, 1);
+ con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1);
else if (vc->vc_y < vc->vc_rows - 1) {
vc->vc_y++;
vc->vc_pos += vc->vc_size_row;
@@ -1135,7 +1124,7 @@ static void ri(struct vc_data *vc)
* if above scrolling region
*/
if (vc->vc_y == vc->vc_top)
- scrdown(vc, vc->vc_top, vc->vc_bottom, 1);
+ con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1);
else if (vc->vc_y > 0) {
vc->vc_y--;
vc->vc_pos -= vc->vc_size_row;
@@ -1631,7 +1620,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr)
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr = 1;
- scrdown(vc, vc->vc_y, vc->vc_bottom, nr);
+ con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_DOWN, nr);
vc->vc_need_wrap = 0;
}
@@ -1652,7 +1641,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr)
nr = vc->vc_rows - vc->vc_y;
else if (!nr)
nr=1;
- scrup(vc, vc->vc_y, vc->vc_bottom, nr);
+ con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_UP, nr);
vc->vc_need_wrap = 0;
}
@@ -3934,10 +3923,6 @@ void unblank_screen(void)
*/
static void blank_screen_t(unsigned long dummy)
{
- if (unlikely(!keventd_up())) {
- mod_timer(&console_timer, jiffies + (blankinterval * HZ));
- return;
- }
blank_timer_expired = 1;
schedule_work(&console_work);
}
@@ -4295,6 +4280,46 @@ void vcs_scr_updated(struct vc_data *vc)
notify_update(vc);
}
+void vc_scrolldelta_helper(struct vc_data *c, int lines,
+ unsigned int rolled_over, void *base, unsigned int size)
+{
+ unsigned long ubase = (unsigned long)base;
+ ptrdiff_t scr_end = (void *)c->vc_scr_end - base;
+ ptrdiff_t vorigin = (void *)c->vc_visible_origin - base;
+ ptrdiff_t origin = (void *)c->vc_origin - base;
+ int margin = c->vc_size_row * 4;
+ int from, wrap, from_off, avail;
+
+ /* Turn scrollback off */
+ if (!lines) {
+ c->vc_visible_origin = c->vc_origin;
+ return;
+ }
+
+ /* Do we have already enough to allow jumping from 0 to the end? */
+ if (rolled_over > scr_end + margin) {
+ from = scr_end;
+ wrap = rolled_over + c->vc_size_row;
+ } else {
+ from = 0;
+ wrap = size;
+ }
+
+ from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
+ avail = (origin - from + wrap) % wrap;
+
+ /* Only a little piece would be left? Show all incl. the piece! */
+ if (avail < 2 * margin)
+ margin = 0;
+ if (from_off < margin)
+ from_off = 0;
+ if (from_off > avail - margin)
+ from_off = avail;
+
+ c->vc_visible_origin = ubase + (from + from_off) % wrap;
+}
+EXPORT_SYMBOL_GPL(vc_scrolldelta_helper);
+
/*
* Visible symbols for modules
*/
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 52c98ce1b6fe..7e8dc78a9796 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -155,4 +155,13 @@ config UIO_MF624
If you compile this as a module, it will be called uio_mf624.
+config UIO_HV_GENERIC
+ tristate "Generic driver for Hyper-V VMBus"
+ depends on HYPERV
+ help
+ Generic driver that you can bind, dynamically, to any
+ Hyper-V VMBus device. It is useful to provide direct access
+ to network and storage devices from userspace.
+
+ If you compile this as a module, it will be called uio_hv_generic.
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index 8560dad52d0f..e9663bb8a4c7 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
+obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
new file mode 100644
index 000000000000..50958f167305
--- /dev/null
+++ b/drivers/uio/uio_hv_generic.c
@@ -0,0 +1,218 @@
+/*
+ * uio_hv_generic - generic UIO driver for VMBus
+ *
+ * Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
+ * Copyright (c) 2016, Microsoft Corporation.
+ *
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Since the driver does not declare any device ids, you must allocate
+ * id and bind the device to the driver yourself. For example:
+ *
+ * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
+ * > /sys/bus/vmbus/drivers/uio_hv_generic
+ * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ * > /sys/bus/vmbus/drivers/hv_netvsc/unbind
+ * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uio_driver.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/hyperv.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "../hv/hyperv_vmbus.h"
+
+#define DRIVER_VERSION "0.02.0"
+#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
+#define DRIVER_DESC "Generic UIO driver for VMBus devices"
+
+/*
+ * List of resources to be mapped to user space
+ * can be extended up to MAX_UIO_MAPS(5) items
+ */
+enum hv_uio_map {
+ TXRX_RING_MAP = 0,
+ INT_PAGE_MAP,
+ MON_PAGE_MAP,
+};
+
+#define HV_RING_SIZE 512
+
+struct hv_uio_private_data {
+ struct uio_info info;
+ struct hv_device *device;
+};
+
+static int
+hv_uio_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int mi;
+
+ if (vma->vm_pgoff >= MAX_UIO_MAPS)
+ return -EINVAL;
+
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+
+ mi = (int)vma->vm_pgoff;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ info->mem[mi].addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+/*
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+ *
+ * @param info
+ * pointer to uio_info.
+ * @param irq_state
+ * state value. 1 to enable interrupt, 0 to disable interrupt.
+ */
+static int
+hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
+{
+ struct hv_uio_private_data *pdata = info->priv;
+ struct hv_device *dev = pdata->device;
+
+ dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
+ virt_mb();
+
+ return 0;
+}
+
+/*
+ * Callback from vmbus_event when something is in inbound ring.
+ */
+static void hv_uio_channel_cb(void *context)
+{
+ struct hv_uio_private_data *pdata = context;
+ struct hv_device *dev = pdata->device;
+
+ dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ virt_mb();
+
+ uio_event_notify(&pdata->info);
+}
+
+static int
+hv_uio_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hv_uio_private_data *pdata;
+ int ret;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
+ HV_RING_SIZE * PAGE_SIZE, NULL, 0,
+ hv_uio_channel_cb, pdata);
+ if (ret)
+ goto fail;
+
+ dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ dev->channel->batched_reading = false;
+
+ /* Fill general uio info */
+ pdata->info.name = "uio_hv_generic";
+ pdata->info.version = DRIVER_VERSION;
+ pdata->info.irqcontrol = hv_uio_irqcontrol;
+ pdata->info.mmap = hv_uio_mmap;
+ pdata->info.irq = UIO_IRQ_CUSTOM;
+
+ /* mem resources */
+ pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
+ pdata->info.mem[TXRX_RING_MAP].addr
+ = virt_to_phys(dev->channel->ringbuffer_pages);
+ pdata->info.mem[TXRX_RING_MAP].size
+ = dev->channel->ringbuffer_pagecount * PAGE_SIZE;
+ pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
+
+ pdata->info.mem[INT_PAGE_MAP].name = "int_page";
+ pdata->info.mem[INT_PAGE_MAP].addr =
+ virt_to_phys(vmbus_connection.int_page);
+ pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+
+ pdata->info.mem[MON_PAGE_MAP].name = "monitor_pages";
+ pdata->info.mem[MON_PAGE_MAP].addr =
+ virt_to_phys(vmbus_connection.monitor_pages[1]);
+ pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+
+ pdata->info.priv = pdata;
+ pdata->device = dev;
+
+ ret = uio_register_device(&dev->device, &pdata->info);
+ if (ret) {
+ dev_err(&dev->device, "hv_uio register failed\n");
+ goto fail_close;
+ }
+
+ hv_set_drvdata(dev, pdata);
+
+ return 0;
+
+fail_close:
+ vmbus_close(dev->channel);
+fail:
+ kfree(pdata);
+
+ return ret;
+}
+
+static int
+hv_uio_remove(struct hv_device *dev)
+{
+ struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
+
+ if (!pdata)
+ return 0;
+
+ uio_unregister_device(&pdata->info);
+ hv_set_drvdata(dev, NULL);
+ vmbus_close(dev->channel);
+ kfree(pdata);
+ return 0;
+}
+
+static struct hv_driver hv_uio_drv = {
+ .name = "uio_hv_generic",
+ .id_table = NULL, /* only dynamic id's */
+ .probe = hv_uio_probe,
+ .remove = hv_uio_remove,
+};
+
+static int __init
+hyperv_module_init(void)
+{
+ return vmbus_driver_register(&hv_uio_drv);
+}
+
+static void __exit
+hyperv_module_exit(void)
+{
+ vmbus_driver_unregister(&hv_uio_drv);
+}
+
+module_init(hyperv_module_init);
+module_exit(hyperv_module_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index ca9e2fafb0b6..31d5b1d3b5af 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -111,6 +111,7 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
gdev->sram_vaddr,
sram_pool_sz);
kfree(gdev->info);
+ clk_disable(gdev->pruss_clk);
clk_put(gdev->pruss_clk);
kfree(gdev);
}
@@ -143,7 +144,14 @@ static int pruss_probe(struct platform_device *pdev)
kfree(gdev);
return ret;
} else {
- clk_enable(gdev->pruss_clk);
+ ret = clk_enable(gdev->pruss_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ clk_put(gdev->pruss_clk);
+ kfree(gdev->info);
+ kfree(gdev);
+ return ret;
+ }
}
regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 644e978cbd3e..fbe493d44e81 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,6 +95,8 @@ source "drivers/usb/usbip/Kconfig"
endif
+source "drivers/usb/mtu3/Kconfig"
+
source "drivers/usb/musb/Kconfig"
source "drivers/usb/dwc3/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index dca78565eb55..7791af6c102c 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_USB_ISP1760) += isp1760/
obj-$(CONFIG_USB_MON) += mon/
+obj-$(CONFIG_USB_MTU3) += mtu3/
obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 099179457f60..5f4a8157fad8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/usb/chipidea.h>
+#include <linux/usb/of.h>
#include <linux/clk.h>
#include "ci.h"
@@ -146,6 +147,9 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
if (of_find_property(np, "external-vbus-divider", NULL))
data->evdo = 1;
+ if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
+ data->ulpi = 1;
+
return data;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 409aa5ca8dda..d666c9f036ba 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -19,6 +19,7 @@ struct imx_usbmisc_data {
unsigned int disable_oc:1; /* over current detect disabled */
unsigned int oc_polarity:1; /* over current polarity if oc enabled */
unsigned int evdo:1; /* set external vbus divider option */
+ unsigned int ulpi:1; /* connected to an ULPI phy */
};
int imx_usbmisc_init(struct imx_usbmisc_data *);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index c9e80ad48fdc..cf132f057137 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -365,7 +365,7 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
- node->ptr->token |= mul << __ffs(TD_MULTO);
+ node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
}
temp = (u32) (hwreq->req.dma + hwreq->req.actual);
@@ -504,7 +504,7 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
- hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
+ hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
}
ret = hw_ep_prime(ci, hwep->num, hwep->dir,
@@ -529,7 +529,7 @@ static void free_pending_td(struct ci_hw_ep *hwep)
static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
struct td_node *node)
{
- hwep->qh.ptr->td.next = node->dma;
+ hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
hwep->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
@@ -821,7 +821,7 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
}
if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
- hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
+ hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
dev_err(hwep->ci->dev, "request length too big for isochronous\n");
return -EMSGSIZE;
}
@@ -1253,8 +1253,8 @@ static int ep_enable(struct usb_ep *ep,
hwep->num = usb_endpoint_num(desc);
hwep->type = usb_endpoint_type(desc);
- hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
- hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
+ hwep->ep.maxpacket = usb_endpoint_maxp(desc);
+ hwep->ep.mult = usb_endpoint_maxp_mult(desc);
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
cap |= QH_IOS;
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index e66df0020bd4..2ecd1174d66c 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -22,11 +22,11 @@
/* DMA layout of transfer descriptors */
struct ci_hw_td {
/* 0 */
- u32 next;
+ __le32 next;
#define TD_TERMINATE BIT(0)
#define TD_ADDR_MASK (0xFFFFFFEUL << 5)
/* 1 */
- u32 token;
+ __le32 token;
#define TD_STATUS (0x00FFUL << 0)
#define TD_STATUS_TR_ERR BIT(3)
#define TD_STATUS_DT_ERR BIT(5)
@@ -36,7 +36,7 @@ struct ci_hw_td {
#define TD_IOC BIT(15)
#define TD_TOTAL_BYTES (0x7FFFUL << 16)
/* 2 */
- u32 page[5];
+ __le32 page[5];
#define TD_CURR_OFFSET (0x0FFFUL << 0)
#define TD_FRAME_NUM (0x07FFUL << 0)
#define TD_RESERVED_MASK (0x0FFFUL << 0)
@@ -45,18 +45,18 @@ struct ci_hw_td {
/* DMA layout of queue heads */
struct ci_hw_qh {
/* 0 */
- u32 cap;
+ __le32 cap;
#define QH_IOS BIT(15)
#define QH_MAX_PKT (0x07FFUL << 16)
#define QH_ZLT BIT(29)
#define QH_MULT (0x0003UL << 30)
#define QH_ISO_MULT(x) ((x >> 11) & 0x03)
/* 1 */
- u32 curr;
+ __le32 curr;
/* 2 - 8 */
struct ci_hw_td td;
/* 9 */
- u32 RESERVED;
+ __le32 RESERVED;
struct usb_ctrlrequest setup;
} __attribute__ ((packed, aligned(4)));
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 20d02a5e418d..e77a4ed4f021 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -46,11 +46,23 @@
#define MX53_USB_OTG_PHY_CTRL_0_OFFSET 0x08
#define MX53_USB_OTG_PHY_CTRL_1_OFFSET 0x0c
+#define MX53_USB_CTRL_1_OFFSET 0x10
+#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK (0x11 << 2)
+#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI BIT(2)
+#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK (0x11 << 6)
+#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI BIT(6)
#define MX53_USB_UH2_CTRL_OFFSET 0x14
#define MX53_USB_UH3_CTRL_OFFSET 0x18
+#define MX53_USB_CLKONOFF_CTRL_OFFSET 0x24
+#define MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF BIT(21)
+#define MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF BIT(22)
#define MX53_BM_OVER_CUR_DIS_H1 BIT(5)
#define MX53_BM_OVER_CUR_DIS_OTG BIT(8)
#define MX53_BM_OVER_CUR_DIS_UHx BIT(30)
+#define MX53_USB_CTRL_1_UH2_ULPI_EN BIT(26)
+#define MX53_USB_CTRL_1_UH3_ULPI_EN BIT(27)
+#define MX53_USB_UHx_CTRL_WAKE_UP_EN BIT(7)
+#define MX53_USB_UHx_CTRL_ULPI_INT_EN BIT(8)
#define MX53_USB_PHYCTRL1_PLLDIV_MASK 0x3
#define MX53_USB_PLL_DIV_24_MHZ 0x01
@@ -199,31 +211,77 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
val |= MX53_USB_PLL_DIV_24_MHZ;
writel(val, usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET);
- if (data->disable_oc) {
- spin_lock_irqsave(&usbmisc->lock, flags);
- switch (data->index) {
- case 0:
+ spin_lock_irqsave(&usbmisc->lock, flags);
+
+ switch (data->index) {
+ case 0:
+ if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_OTG;
- break;
- case 1:
+ writel(val, reg);
+ }
+ break;
+ case 1:
+ if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_H1;
- break;
- case 2:
+ writel(val, reg);
+ }
+ break;
+ case 2:
+ if (data->ulpi) {
+ /* set USBH2 into ULPI-mode. */
+ reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
+ val = readl(reg) | MX53_USB_CTRL_1_UH2_ULPI_EN;
+ /* select ULPI clock */
+ val &= ~MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK;
+ val |= MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI;
+ writel(val, reg);
+ /* Set interrupt wake up enable */
+ reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+ /* Disable internal 60Mhz clock */
+ reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+ val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+ writel(val, reg);
+ }
+ if (data->disable_oc) {
reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
- break;
- case 3:
+ writel(val, reg);
+ }
+ break;
+ case 3:
+ if (data->ulpi) {
+ /* set USBH3 into ULPI-mode. */
+ reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
+ val = readl(reg) | MX53_USB_CTRL_1_UH3_ULPI_EN;
+ /* select ULPI clock */
+ val &= ~MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK;
+ val |= MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI;
+ writel(val, reg);
+ /* Set interrupt wake up enable */
reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
- val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
- break;
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+ /* Disable internal 60Mhz clock */
+ reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+ val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+ writel(val, reg);
}
- if (reg && val)
+ if (data->disable_oc) {
+ reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
+ val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
writel(val, reg);
- spin_unlock_irqrestore(&usbmisc->lock, flags);
+ }
+ break;
}
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fada988512a1..e35b1508d3eb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -133,8 +133,8 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
buf, len, 5000);
dev_dbg(&acm->control->dev,
- "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
- __func__, request, value, len, retval);
+ "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
+ __func__, request, value, len, retval);
usb_autopm_put_interface(acm->control);
@@ -158,6 +158,17 @@ static inline int acm_set_control(struct acm *acm, int control)
#define acm_send_break(acm, ms) \
acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
+static void acm_kill_urbs(struct acm *acm)
+{
+ int i;
+
+ usb_kill_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_kill_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_kill_urb(acm->read_urbs[i]);
+}
+
/*
* Write buffer management.
* All of these assume proper locks taken by the caller.
@@ -291,13 +302,13 @@ static void acm_ctrl_irq(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&acm->control->dev,
- "%s - urb shutting down with status: %d\n",
- __func__, status);
+ "%s - urb shutting down with status: %d\n",
+ __func__, status);
return;
default:
dev_dbg(&acm->control->dev,
- "%s - nonzero urb status received: %d\n",
- __func__, status);
+ "%s - nonzero urb status received: %d\n",
+ __func__, status);
goto exit;
}
@@ -306,16 +317,16 @@ static void acm_ctrl_irq(struct urb *urb)
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
- dev_dbg(&acm->control->dev, "%s - network connection: %d\n",
- __func__, dr->wValue);
+ dev_dbg(&acm->control->dev,
+ "%s - network connection: %d\n", __func__, dr->wValue);
break;
case USB_CDC_NOTIFY_SERIAL_STATE:
newctrl = get_unaligned_le16(data);
if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
- dev_dbg(&acm->control->dev, "%s - calling hangup\n",
- __func__);
+ dev_dbg(&acm->control->dev,
+ "%s - calling hangup\n", __func__);
tty_port_tty_hangup(&acm->port, false);
}
@@ -357,8 +368,8 @@ static void acm_ctrl_irq(struct urb *urb)
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval && retval != -EPERM)
- dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n",
- __func__, retval);
+ dev_err(&acm->control->dev,
+ "%s - usb_submit_urb failed: %d\n", __func__, retval);
}
static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
@@ -372,8 +383,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
if (res) {
if (res != -EPERM) {
dev_err(&acm->data->dev,
- "urb %d failed submission with %d\n",
- index, res);
+ "urb %d failed submission with %d\n",
+ index, res);
}
set_bit(index, &acm->read_urbs_free);
return res;
@@ -416,30 +427,43 @@ static void acm_read_bulk_callback(struct urb *urb)
int status = urb->status;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
- rb->index, urb->actual_length,
- status);
+ rb->index, urb->actual_length, status);
+
+ set_bit(rb->index, &acm->read_urbs_free);
if (!acm->dev) {
- set_bit(rb->index, &acm->read_urbs_free);
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
return;
}
- if (status) {
- set_bit(rb->index, &acm->read_urbs_free);
- if ((status != -ENOENT) || (urb->actual_length == 0))
- return;
+ switch (status) {
+ case 0:
+ usb_mark_last_busy(acm->dev);
+ acm_process_read_urb(acm, urb);
+ break;
+ case -EPIPE:
+ set_bit(EVENT_RX_STALL, &acm->flags);
+ schedule_work(&acm->work);
+ return;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&acm->data->dev,
+ "%s - urb shutting down with status: %d\n",
+ __func__, status);
+ return;
+ default:
+ dev_dbg(&acm->data->dev,
+ "%s - nonzero urb status received: %d\n",
+ __func__, status);
+ break;
}
- usb_mark_last_busy(acm->dev);
-
- acm_process_read_urb(acm, urb);
/*
* Unthrottle may run on another CPU which needs to see events
* in the same order. Submission has an implict barrier
*/
smp_mb__before_atomic();
- set_bit(rb->index, &acm->read_urbs_free);
/* throttle device if requested by tty */
spin_lock_irqsave(&acm->read_lock, flags);
@@ -469,14 +493,30 @@ static void acm_write_bulk(struct urb *urb)
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
+ set_bit(EVENT_TTY_WAKEUP, &acm->flags);
schedule_work(&acm->work);
}
static void acm_softint(struct work_struct *work)
{
+ int i;
struct acm *acm = container_of(work, struct acm, work);
- tty_port_tty_wakeup(&acm->port);
+ if (test_bit(EVENT_RX_STALL, &acm->flags)) {
+ if (!(usb_autopm_get_interface(acm->data))) {
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_kill_urb(acm->read_urbs[i]);
+ usb_clear_halt(acm->dev, acm->in);
+ acm_submit_read_urbs(acm, GFP_KERNEL);
+ usb_autopm_put_interface(acm->data);
+ }
+ clear_bit(EVENT_RX_STALL, &acm->flags);
+ }
+
+ if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+ tty_port_tty_wakeup(&acm->port);
+ clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
+ }
}
/*
@@ -608,7 +648,6 @@ static void acm_port_shutdown(struct tty_port *port)
struct acm *acm = container_of(port, struct acm, port);
struct urb *urb;
struct acm_wb *wb;
- int i;
/*
* Need to grab write_lock to prevent race with resume, but no need to
@@ -630,11 +669,7 @@ static void acm_port_shutdown(struct tty_port *port)
usb_autopm_put_interface_async(acm->control);
}
- usb_kill_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_kill_urb(acm->read_urbs[i]);
+ acm_kill_urbs(acm);
}
static void acm_tty_cleanup(struct tty_struct *tty)
@@ -837,8 +872,8 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
- dev_dbg(&acm->control->dev, "%s - send break failed\n",
- __func__);
+ dev_dbg(&acm->control->dev,
+ "%s - send break failed\n", __func__);
return retval;
}
@@ -877,9 +912,6 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
{
struct serial_struct tmp;
- if (!info)
- return -EINVAL;
-
memset(&tmp, 0, sizeof(tmp));
tmp.flags = ASYNC_LOW_LATENCY;
tmp.xmit_fifo_size = acm->writesize;
@@ -969,25 +1001,20 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
return rv;
}
-static int get_serial_usage(struct acm *acm,
- struct serial_icounter_struct __user *count)
+static int acm_tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
{
- struct serial_icounter_struct icount;
- int rv = 0;
-
- memset(&icount, 0, sizeof(icount));
- icount.dsr = acm->iocount.dsr;
- icount.rng = acm->iocount.rng;
- icount.dcd = acm->iocount.dcd;
- icount.frame = acm->iocount.frame;
- icount.overrun = acm->iocount.overrun;
- icount.parity = acm->iocount.parity;
- icount.brk = acm->iocount.brk;
+ struct acm *acm = tty->driver_data;
- if (copy_to_user(count, &icount, sizeof(icount)) > 0)
- rv = -EFAULT;
+ icount->dsr = acm->iocount.dsr;
+ icount->rng = acm->iocount.rng;
+ icount->dcd = acm->iocount.dcd;
+ icount->frame = acm->iocount.frame;
+ icount->overrun = acm->iocount.overrun;
+ icount->parity = acm->iocount.parity;
+ icount->brk = acm->iocount.brk;
- return rv;
+ return 0;
}
static int acm_tty_ioctl(struct tty_struct *tty,
@@ -1012,9 +1039,6 @@ static int acm_tty_ioctl(struct tty_struct *tty,
rv = wait_serial_change(acm, arg);
usb_autopm_put_interface(acm->control);
break;
- case TIOCGICOUNT:
- rv = get_serial_usage(acm, (struct serial_icounter_struct __user *) arg);
- break;
}
return rv;
@@ -1088,19 +1112,17 @@ static void acm_write_buffers_free(struct acm *acm)
{
int i;
struct acm_wb *wb;
- struct usb_device *usb_dev = interface_to_usbdev(acm->control);
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
- usb_free_coherent(usb_dev, acm->writesize, wb->buf, wb->dmah);
+ usb_free_coherent(acm->dev, acm->writesize, wb->buf, wb->dmah);
}
static void acm_read_buffers_free(struct acm *acm)
{
- struct usb_device *usb_dev = interface_to_usbdev(acm->control);
int i;
for (i = 0; i < acm->rx_buflimit; i++)
- usb_free_coherent(usb_dev, acm->readsize,
+ usb_free_coherent(acm->dev, acm->readsize,
acm->read_buffers[i].base, acm->read_buffers[i].dma);
}
@@ -1345,9 +1367,16 @@ made_compressed_probe:
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->is_int_ep = usb_endpoint_xfer_int(epread);
- if (acm->is_int_ep)
+ if (usb_endpoint_xfer_int(epread)) {
acm->bInterval = epread->bInterval;
+ acm->in = usb_rcvintpipe(usb_dev, epread->bEndpointAddress);
+ } else {
+ acm->in = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
+ }
+ if (usb_endpoint_xfer_int(epwrite))
+ acm->out = usb_sndintpipe(usb_dev, epwrite->bEndpointAddress);
+ else
+ acm->out = usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress);
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
init_usb_anchor(&acm->delayed);
@@ -1382,20 +1411,15 @@ made_compressed_probe:
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = rb->dma;
- if (acm->is_int_ep) {
- usb_fill_int_urb(urb, acm->dev,
- usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
- rb->base,
+ if (usb_endpoint_xfer_int(epread))
+ usb_fill_int_urb(urb, acm->dev, acm->in, rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
- } else {
- usb_fill_bulk_urb(urb, acm->dev,
- usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
- rb->base,
+ else
+ usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
- }
acm->read_urbs[i] = urb;
__set_bit(i, &acm->read_urbs_free);
@@ -1408,12 +1432,10 @@ made_compressed_probe:
goto alloc_fail7;
if (usb_endpoint_xfer_int(epwrite))
- usb_fill_int_urb(snd->urb, usb_dev,
- usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
+ usb_fill_int_urb(snd->urb, usb_dev, acm->out,
NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
else
- usb_fill_bulk_urb(snd->urb, usb_dev,
- usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ usb_fill_bulk_urb(snd->urb, usb_dev, acm->out,
NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (quirks & SEND_ZERO_PACKET)
@@ -1485,8 +1507,8 @@ skip_countries:
}
if (quirks & CLEAR_HALT_CONDITIONS) {
- usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
- usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
+ usb_clear_halt(usb_dev, acm->in);
+ usb_clear_halt(usb_dev, acm->out);
}
return 0;
@@ -1520,25 +1542,10 @@ alloc_fail:
return rv;
}
-static void stop_data_traffic(struct acm *acm)
-{
- int i;
-
- usb_kill_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_kill_urb(acm->read_urbs[i]);
-
- cancel_work_sync(&acm->work);
-}
-
static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
- struct usb_device *usb_dev = interface_to_usbdev(intf);
struct tty_struct *tty;
- int i;
/* sibling interface is already cleaning up */
if (!acm)
@@ -1564,17 +1571,13 @@ static void acm_disconnect(struct usb_interface *intf)
tty_kref_put(tty);
}
- stop_data_traffic(acm);
+ acm_kill_urbs(acm);
+ cancel_work_sync(&acm->work);
tty_unregister_device(acm_tty_driver, acm->minor);
- usb_free_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_free_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
- usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+ usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
@@ -1603,7 +1606,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- stop_data_traffic(acm);
+ acm_kill_urbs(acm);
+ cancel_work_sync(&acm->work);
return 0;
}
@@ -1657,6 +1661,15 @@ static int acm_reset_resume(struct usb_interface *intf)
#endif /* CONFIG_PM */
+static int acm_pre_reset(struct usb_interface *intf)
+{
+ struct acm *acm = usb_get_intfdata(intf);
+
+ clear_bit(EVENT_RX_STALL, &acm->flags);
+
+ return 0;
+}
+
#define NOKIA_PCSUITE_ACM_INFO(x) \
USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
@@ -1719,6 +1732,7 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
.driver_info = QUIRK_CONTROL_LINE_STATE, },
{ USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
+ { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
/* Motorola H24 HSPA module: */
@@ -1898,6 +1912,7 @@ static struct usb_driver acm_driver = {
.resume = acm_resume,
.reset_resume = acm_reset_resume,
#endif
+ .pre_reset = acm_pre_reset,
.id_table = acm_ids,
#ifdef CONFIG_PM
.supports_autosuspend = 1,
@@ -1927,6 +1942,7 @@ static const struct tty_operations acm_ops = {
.set_termios = acm_tty_set_termios,
.tiocmget = acm_tty_tiocmget,
.tiocmset = acm_tty_tiocmset,
+ .get_icount = acm_tty_get_icount,
};
/*
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1f1eabfd8462..c980f11cdf56 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -83,6 +83,7 @@ struct acm {
struct usb_device *dev; /* the corresponding usb device */
struct usb_interface *control; /* control interface */
struct usb_interface *data; /* data interface */
+ unsigned in, out; /* i/o pipes */
struct tty_port port; /* our tty port data */
struct urb *ctrlurb; /* urbs */
u8 *ctrl_buffer; /* buffers of urbs */
@@ -102,6 +103,9 @@ struct acm {
spinlock_t write_lock;
struct mutex mutex;
bool disconnected;
+ unsigned long flags;
+# define EVENT_TTY_WAKEUP 0
+# define EVENT_RX_STALL 1
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
@@ -116,7 +120,6 @@ struct acm {
unsigned int ctrl_caps; /* control capabilities from the class specific header */
unsigned int susp_count; /* number of suspended interfaces */
unsigned int combined_interfaces:1; /* control and data collapsed */
- unsigned int is_int_ep:1; /* interrupt endpoints contrary to spec used */
unsigned int throttled:1; /* actually throttled */
unsigned int throttle_req:1; /* throttle requested */
u8 bInterval;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index a6c1fae7d52a..f03692ec5520 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -157,6 +157,7 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
}
data = usb_get_intfdata(intf);
+ /* Protect reference to data from file structure until release */
kref_get(&data->kref);
/* Store pointer in file structure's private data field */
@@ -531,7 +532,7 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
}
/*
- * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-IN endpoint.
+ * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
* @transfer_size: number of bytes to request from the device.
*
* See the USBTMC specification, Table 4.
@@ -1471,7 +1472,7 @@ static int usbtmc_probe(struct usb_interface *intf,
if (!data->iin_urb)
goto error_register;
- /* will reference data in int urb */
+ /* Protect interrupt in endpoint data until iin_urb is freed */
kref_get(&data->kref);
/* allocate buffer for interrupt in */
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 98e39f91723a..b9bf6e2eb6fe 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -3,6 +3,9 @@
*
* This implementation plugs in through generic "usb_bus" level methods,
* and should work with all USB controllers, regardless of bus type.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index a2d90aca779f..0aa9e7d697a5 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/hcd.h>
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index ef04b50e6bbb..f2987ddb1cde 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -182,14 +182,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
dir = usb_endpoint_dir_in(desc) ? 'I' : 'O';
- if (speed == USB_SPEED_HIGH) {
- switch (usb_endpoint_maxp(desc) & (0x03 << 11)) {
- case 1 << 11:
- bandwidth = 2; break;
- case 2 << 11:
- bandwidth = 3; break;
- }
- }
+ if (speed == USB_SPEED_HIGH)
+ bandwidth = usb_endpoint_maxp_mult(desc);
/* this isn't checking for illegal values */
switch (usb_endpoint_type(desc)) {
@@ -233,7 +227,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
start += sprintf(start, format_endpt, desc->bEndpointAddress, dir,
desc->bmAttributes, type,
- (usb_endpoint_maxp(desc) & 0x07ff) *
+ usb_endpoint_maxp(desc) *
bandwidth,
interval, unit);
return start;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index dadd1e8dfe09..cdee5130638b 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -15,6 +15,9 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ *
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
* matching, probing, releasing, suspending and resuming for
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 101983b7e8d2..a60bc830a056 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -5,8 +5,10 @@
* (C) Copyright 2002,2004 IBM Corp.
* (C) Copyright 2006 Novell Inc.
*
- * Endpoint sysfs stuff
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*
+ * Endpoint sysfs stuff
*/
#include <linux/kernel.h>
@@ -50,8 +52,7 @@ static ssize_t wMaxPacketSize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ep_device *ep = to_ep_device(dev);
- return sprintf(buf, "%04x\n",
- usb_endpoint_maxp(ep->desc) & 0x07ff);
+ return sprintf(buf, "%04x\n", usb_endpoint_maxp(ep->desc));
}
static DEVICE_ATTR_RO(wMaxPacketSize);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 822ced9639aa..e26bd5e773ad 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -13,6 +13,8 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8dd784f..bd3e0c5a6db2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -15,6 +15,8 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index cbb146736f57..143454ea385b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6,6 +6,8 @@
* (C) Copyright 1999 Gregory P. Smith
* (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au)
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/kernel.h>
@@ -101,6 +103,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
+static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+ struct usb_port *port_dev);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@@ -899,82 +903,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
}
/*
- * If USB 3.0 ports are placed into the Disabled state, they will no longer
- * detect any device connects or disconnects. This is generally not what the
- * USB core wants, since it expects a disabled port to produce a port status
- * change event when a new device connects.
- *
- * Instead, set the link state to Disabled, wait for the link to settle into
- * that state, clear any change bits, and then put the port into the RxDetect
- * state.
+ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
+ * a connection with a plugged-in cable but will signal the host when the cable
+ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
*/
-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
-{
- int ret;
- int total_time;
- u16 portchange, portstatus;
-
- if (!hub_is_superspeed(hub->hdev))
- return -EINVAL;
-
- ret = hub_port_status(hub, port1, &portstatus, &portchange);
- if (ret < 0)
- return ret;
-
- /*
- * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
- * Controller [1022:7814] will have spurious result making the following
- * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
- * as high-speed device if we set the usb 3.0 port link state to
- * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
- * check the state here to avoid the bug.
- */
- if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_RX_DETECT) {
- dev_dbg(&hub->ports[port1 - 1]->dev,
- "Not disabling port; link state is RxDetect\n");
- return ret;
- }
-
- ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
- if (ret)
- return ret;
-
- /* Wait for the link to enter the disabled state. */
- for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
- ret = hub_port_status(hub, port1, &portstatus, &portchange);
- if (ret < 0)
- return ret;
-
- if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_SS_DISABLED)
- break;
- if (total_time >= HUB_DEBOUNCE_TIMEOUT)
- break;
- msleep(HUB_DEBOUNCE_STEP);
- }
- if (total_time >= HUB_DEBOUNCE_TIMEOUT)
- dev_warn(&hub->ports[port1 - 1]->dev,
- "Could not disable after %d ms\n", total_time);
-
- return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
-}
-
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *hdev = hub->hdev;
int ret = 0;
- if (port_dev->child && set_state)
- usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
if (!hub->error) {
- if (hub_is_superspeed(hub->hdev))
- ret = hub_usb3_port_disable(hub, port1);
- else
+ if (hub_is_superspeed(hub->hdev)) {
+ hub_usb3_port_prepare_disable(hub, port_dev);
+ ret = hub_set_port_link_state(hub, port_dev->portnum,
+ USB_SS_PORT_LS_U3);
+ } else {
ret = usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
+ }
}
+ if (port_dev->child && set_state)
+ usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
if (ret && ret != -ENODEV)
dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
return ret;
@@ -2731,8 +2681,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (ret < 0)
return ret;
- /* The port state is unknown until the reset completes. */
- if (!(portstatus & USB_PORT_STAT_RESET))
+ /*
+ * The port state is unknown until the reset completes.
+ *
+ * On top of that, some chips may require additional time
+ * to re-establish a connection after the reset is complete,
+ * so also wait for the connection to be re-established.
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
@@ -4140,6 +4097,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
+static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+ struct usb_port *port_dev)
+{
+ struct usb_device *udev = port_dev->child;
+ int ret;
+
+ if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
+ ret = hub_set_port_link_state(hub, port_dev->portnum,
+ USB_SS_PORT_LS_U0);
+ if (!ret) {
+ msleep(USB_RESUME_TIMEOUT);
+ ret = usb_disable_remote_wakeup(udev);
+ }
+ if (ret)
+ dev_warn(&udev->dev,
+ "Port disable: can't disable remote wake\n");
+ udev->do_remote_wakeup = 0;
+ }
+}
#else /* CONFIG_PM */
@@ -4147,6 +4124,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
#define hub_resume NULL
#define hub_reset_resume NULL
+static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+ struct usb_port *port_dev) { }
+
int usb_disable_lpm(struct usb_device *udev)
{
return 0;
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 3ed5162677ad..1713248ab15a 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -74,8 +74,7 @@ static void usbport_trig_update_count(struct usbport_trig_data *usbport_data)
usbport_data->count = 0;
usb_for_each_dev(usbport_data, usbport_trig_usb_dev_check);
- led_cdev->brightness_set(led_cdev,
- usbport_data->count ? LED_FULL : LED_OFF);
+ led_set_brightness(led_cdev, usbport_data->count ? LED_FULL : LED_OFF);
}
/***************************************
@@ -228,12 +227,12 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
case USB_DEVICE_ADD:
usbport_trig_add_usb_dev_ports(usb_dev, usbport_data);
if (observed && usbport_data->count++ == 0)
- led_cdev->brightness_set(led_cdev, LED_FULL);
+ led_set_brightness(led_cdev, LED_FULL);
return NOTIFY_OK;
case USB_DEVICE_REMOVE:
usbport_trig_remove_usb_dev_ports(usbport_data, usb_dev);
if (observed && --usbport_data->count == 0)
- led_cdev->brightness_set(led_cdev, LED_OFF);
+ led_set_brightness(led_cdev, LED_OFF);
return NOTIFY_OK;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 3a4707746157..dea55914d641 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1,5 +1,8 @@
/*
* message.c - synchronous message handling
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/pci.h> /* for scatterlist macros */
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91dfa2e..b12a463a3e22 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -6,6 +6,8 @@
* notifier functions originally based on those in kernel/sys.c
* but fixed up to not be so broken.
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c953a0f1c695..dfc68ed24db1 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -7,6 +7,8 @@
*
* All of the sysfs file attributes for usb devices and interfaces.
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
*/
@@ -14,6 +16,7 @@
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
+#include <linux/of.h>
#include "usb.h"
/* Active configuration fields */
@@ -104,6 +107,17 @@ static ssize_t bConfigurationValue_store(struct device *dev,
static DEVICE_ATTR_IGNORE_LOCKDEP(bConfigurationValue, S_IRUGO | S_IWUSR,
bConfigurationValue_show, bConfigurationValue_store);
+#ifdef CONFIG_OF
+static ssize_t devspec_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct device_node *of_node = dev->of_node;
+
+ return sprintf(buf, "%s\n", of_node_full_name(of_node));
+}
+static DEVICE_ATTR_RO(devspec);
+#endif
+
/* String fields */
#define usb_string_attr(name) \
static ssize_t name##_show(struct device *dev, \
@@ -786,6 +800,9 @@ static struct attribute *dev_attrs[] = {
&dev_attr_remove.attr,
&dev_attr_removable.attr,
&dev_attr_ltm_capable.attr,
+#ifdef CONFIG_OF
+ &dev_attr_devspec.attr,
+#endif
NULL,
};
static struct attribute_group dev_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index a9039696476e..d75cb8c0f7df 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bitops.h>
@@ -407,11 +412,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
}
/* "high bandwidth" mode, 1-3 packets/uframe? */
- if (dev->speed == USB_SPEED_HIGH) {
- int mult = 1 + ((max >> 11) & 0x03);
- max &= 0x07ff;
- max *= mult;
- }
+ if (dev->speed == USB_SPEED_HIGH)
+ max *= usb_endpoint_maxp_mult(&ep->desc);
if (urb->number_of_packets <= 0)
return -EINVAL;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 592151461017..a2ccc69fb45c 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -12,6 +12,9 @@
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ *
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
* generic USB things that the real drivers can use..
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 53318126ed91..dc6949248823 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
#include <linux/pm.h>
#include <linux/acpi.h>
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index 50fdaace1e73..b9237e1e45d0 100644
--- a/drivers/usb/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
@@ -3,6 +3,7 @@ ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC2) += dwc2.o
dwc2-y := core.o core_intr.o platform.o
+dwc2-y += params.o
ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
dwc2-y += hcd.o hcd_intr.o
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 4c0fa0b17353..11d8ae9aead1 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -135,7 +135,7 @@ int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
u32 pcgcctl;
int ret = 0;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
return -ENOTSUPP;
pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
@@ -188,7 +188,7 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
u32 pcgcctl;
int ret = 0;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
return -ENOTSUPP;
/* Backup all registers */
@@ -445,7 +445,7 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
* the force mode. We only need to call this once during probe if
* dr_mode == OTG.
*/
-static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
{
u32 gusbcfg;
@@ -541,7 +541,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
addr = hsotg->regs + HAINTMSK;
dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
addr = hsotg->regs + HFLBADDR;
dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
@@ -551,7 +551,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- for (i = 0; i < hsotg->core_params->host_channels; i++) {
+ for (i = 0; i < hsotg->params.host_channels; i++) {
dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
addr = hsotg->regs + HCCHAR(i);
dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
@@ -571,7 +571,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
addr = hsotg->regs + HCDMA(i);
dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
addr = hsotg->regs + HCDMAB(i);
dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
(unsigned long)addr, dwc2_readl(addr));
@@ -735,704 +735,13 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
udelay(1);
}
-#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
-
-/* Parameter access functions */
-void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- switch (val) {
- case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
- if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
- valid = 0;
- break;
- case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- break;
- default:
- valid = 0;
- break;
- }
- break;
- case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
- /* always valid */
- break;
- default:
- valid = 0;
- break;
- }
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for otg_cap parameter. Check HW configuration.\n",
- val);
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
- break;
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
- break;
- default:
- val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
- break;
- }
- dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
- }
-
- hsotg->core_params->otg_cap = val;
-}
-
-void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_enable parameter. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
- dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
- }
-
- hsotg->core_params->dma_enable = val;
-}
-
-void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !hsotg->hw_params.dma_desc_enable))
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
- val);
- val = (hsotg->core_params->dma_enable > 0 &&
- hsotg->hw_params.dma_desc_enable);
- dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
- }
-
- hsotg->core_params->dma_desc_enable = val;
-}
-
-void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !hsotg->hw_params.dma_desc_enable))
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
- val);
- val = (hsotg->core_params->dma_enable > 0 &&
- hsotg->hw_params.dma_desc_enable);
- }
-
- hsotg->core_params->dma_desc_fs_enable = val;
- dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
-}
-
-void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for host_support_fs_low_power\n");
- dev_err(hsotg->dev,
- "host_support_fs_low_power must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev,
- "Setting host_support_fs_low_power to %d\n", val);
- }
-
- hsotg->core_params->host_support_fs_ls_low_power = val;
-}
-
-void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
- valid = 0;
- if (val < 0)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.enable_dynamic_fifo;
- dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
- }
-
- hsotg->core_params->enable_dynamic_fifo = val;
-}
-
-void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_rx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
- }
-
- hsotg->core_params->host_rx_fifo_size = val;
-}
-
-void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_nperio_tx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
- val);
- }
-
- hsotg->core_params->host_nperio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_perio_tx_fifo_size;
- dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
- val);
- }
-
- hsotg->core_params->host_perio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for max_transfer_size. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.max_transfer_size;
- dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
- }
-
- hsotg->core_params->max_transfer_size = val;
-}
-
-void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 15 || val > hsotg->hw_params.max_packet_count)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for max_packet_count. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.max_packet_count;
- dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
- }
-
- hsotg->core_params->max_packet_count = val;
-}
-
-void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (val < 1 || val > hsotg->hw_params.host_channels)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_channels. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.host_channels;
- dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
- }
-
- hsotg->core_params->host_channels = val;
-}
-
-void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 0;
- u32 hs_phy_type, fs_phy_type;
-
- if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
- DWC2_PHY_TYPE_PARAM_ULPI)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_type\n");
- dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
- }
-
- valid = 0;
- }
-
- hs_phy_type = hsotg->hw_params.hs_phy_type;
- fs_phy_type = hsotg->hw_params.fs_phy_type;
- if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
- (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
- valid = 1;
- else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
- (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
- valid = 1;
- else if (val == DWC2_PHY_TYPE_PARAM_FS &&
- fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
- valid = 1;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for phy_type. Check HW configuration.\n",
- val);
- val = DWC2_PHY_TYPE_PARAM_FS;
- if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
- if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
- hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
- val = DWC2_PHY_TYPE_PARAM_UTMI;
- else
- val = DWC2_PHY_TYPE_PARAM_ULPI;
- }
- dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
- }
-
- hsotg->core_params->phy_type = val;
-}
-
-static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
-{
- return hsotg->core_params->phy_type;
-}
-
-void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for speed parameter\n");
- dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == DWC2_SPEED_PARAM_HIGH &&
- dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for speed parameter. Check HW configuration.\n",
- val);
- val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
- DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
- dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
- }
-
- hsotg->core_params->speed = val;
-}
-
-void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
- DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for host_ls_low_power_phy_clk parameter\n");
- dev_err(hsotg->dev,
- "host_ls_low_power_phy_clk must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
- dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
- val);
- val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
- ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
- : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
- dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
- val);
- }
-
- hsotg->core_params->host_ls_low_power_phy_clk = val;
-}
-
-void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
- dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
- }
-
- hsotg->core_params->phy_ulpi_ddr = val;
-}
-
-void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for phy_ulpi_ext_vbus\n");
- dev_err(hsotg->dev,
- "phy_ulpi_ext_vbus must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
- }
-
- hsotg->core_params->phy_ulpi_ext_vbus = val;
-}
-
-void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 0;
-
- switch (hsotg->hw_params.utmi_phy_data_width) {
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
- valid = (val == 8);
- break;
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
- valid = (val == 16);
- break;
- case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
- valid = (val == 8 || val == 16);
- break;
- }
-
- if (!valid) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "%d invalid for phy_utmi_width. Check HW configuration.\n",
- val);
- }
- val = (hsotg->hw_params.utmi_phy_data_width ==
- GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
- dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
- }
-
- hsotg->core_params->phy_utmi_width = val;
-}
-
-void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
- dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
- }
-
- hsotg->core_params->ulpi_fs_ls = val;
-}
-
-void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for ts_dline\n");
- dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
- }
-
- hsotg->core_params->ts_dline = val;
-}
-
-void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
- dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
- }
-
- valid = 0;
- }
-
- if (val == 1 && !(hsotg->hw_params.i2c_enable))
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for i2c_enable. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.i2c_enable;
- dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
- }
-
- hsotg->core_params->i2c_enable = val;
-}
-
-void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "Wrong value for en_multiple_tx_fifo,\n");
- dev_err(hsotg->dev,
- "en_multiple_tx_fifo must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.en_multiple_tx_fifo;
- dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
- }
-
- hsotg->core_params->en_multiple_tx_fifo = val;
-}
-
-void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
-{
- int valid = 1;
-
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter reload_ctl\n", val);
- dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter reload_ctl. Check HW configuration.\n",
- val);
- val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
- dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
- }
-
- hsotg->core_params->reload_ctl = val;
-}
-
-void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
-{
- if (val != -1)
- hsotg->core_params->ahbcfg = val;
- else
- hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
- GAHBCFG_HBSTLEN_SHIFT;
-}
-
-void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter otg_ver\n", val);
- dev_err(hsotg->dev,
- "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
- }
-
- hsotg->core_params->otg_ver = val;
-}
-
-static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter uframe_sched\n",
- val);
- dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
- }
- val = 1;
- dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
- }
-
- hsotg->core_params->uframe_sched = val;
-}
-
-static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter external_id_pin_ctl\n",
- val);
- dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
- }
-
- hsotg->core_params->external_id_pin_ctl = val;
-}
-
-static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
- int val)
-{
- if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter hibernation\n",
- val);
- dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
- }
- val = 0;
- dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
- }
-
- hsotg->core_params->hibernation = val;
-}
-
-/*
- * This function is called during module intialization to pass module parameters
- * for the DWC_otg core.
- */
-void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params)
-{
- dev_dbg(hsotg->dev, "%s()\n", __func__);
-
- dwc2_set_param_otg_cap(hsotg, params->otg_cap);
- dwc2_set_param_dma_enable(hsotg, params->dma_enable);
- dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
- dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
- dwc2_set_param_host_support_fs_ls_low_power(hsotg,
- params->host_support_fs_ls_low_power);
- dwc2_set_param_enable_dynamic_fifo(hsotg,
- params->enable_dynamic_fifo);
- dwc2_set_param_host_rx_fifo_size(hsotg,
- params->host_rx_fifo_size);
- dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
- params->host_nperio_tx_fifo_size);
- dwc2_set_param_host_perio_tx_fifo_size(hsotg,
- params->host_perio_tx_fifo_size);
- dwc2_set_param_max_transfer_size(hsotg,
- params->max_transfer_size);
- dwc2_set_param_max_packet_count(hsotg,
- params->max_packet_count);
- dwc2_set_param_host_channels(hsotg, params->host_channels);
- dwc2_set_param_phy_type(hsotg, params->phy_type);
- dwc2_set_param_speed(hsotg, params->speed);
- dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
- params->host_ls_low_power_phy_clk);
- dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
- dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
- params->phy_ulpi_ext_vbus);
- dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
- dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
- dwc2_set_param_ts_dline(hsotg, params->ts_dline);
- dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
- dwc2_set_param_en_multiple_tx_fifo(hsotg,
- params->en_multiple_tx_fifo);
- dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
- dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
- dwc2_set_param_otg_ver(hsotg, params->otg_ver);
- dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
- dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
- dwc2_set_param_hibernation(hsotg, params->hibernation);
-}
-
/*
* Forces either host or device mode if the controller is not
* currently in that mode.
*
* Returns true if the mode was forced.
*/
-static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
{
if (host && dwc2_is_host_mode(hsotg))
return false;
@@ -1442,232 +751,9 @@ static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
return dwc2_force_mode(hsotg, host);
}
-/*
- * Gets host hardware parameters. Forces host mode if not currently in
- * host mode. Should be called immediately after a core soft reset in
- * order to get the reset values.
- */
-static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- u32 gnptxfsiz;
- u32 hptxfsiz;
- bool forced;
-
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
- return;
-
- forced = dwc2_force_mode_if_needed(hsotg, true);
-
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
- dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
- dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
-
- if (forced)
- dwc2_clear_force_mode(hsotg);
-
- hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
- hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
-}
-
-/*
- * Gets device hardware parameters. Forces device mode if not
- * currently in device mode. Should be called immediately after a core
- * soft reset in order to get the reset values.
- */
-static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- bool forced;
- u32 gnptxfsiz;
-
- if (hsotg->dr_mode == USB_DR_MODE_HOST)
- return;
-
- forced = dwc2_force_mode_if_needed(hsotg, false);
-
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
-
- if (forced)
- dwc2_clear_force_mode(hsotg);
-
- hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
- FIFOSIZE_DEPTH_SHIFT;
-}
-
-/**
- * During device initialization, read various hardware configuration
- * registers and interpret the contents.
- */
-int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- unsigned width;
- u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
- u32 grxfsiz;
-
- /*
- * Attempt to ensure this device is really a DWC_otg Controller.
- * Read and verify the GSNPSID register contents. The value should be
- * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
- * as in "OTG version 2.xx" or "OTG version 3.xx".
- */
- hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
- if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
- (hw->snpsid & 0xfffff000) != 0x4f543000) {
- dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
- hw->snpsid);
- return -ENODEV;
- }
-
- dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
- hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
- hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
-
- hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
- hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
- hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
- hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
- grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
-
- dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
- dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
- dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
- dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
- dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
-
- /*
- * Host specific hardware parameters. Reading these parameters
- * requires the controller to be in host mode. The mode will
- * be forced, if necessary, to read these values.
- */
- dwc2_get_host_hwparams(hsotg);
- dwc2_get_dev_hwparams(hsotg);
-
- /* hwcfg1 */
- hw->dev_ep_dirs = hwcfg1;
-
- /* hwcfg2 */
- hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
- GHWCFG2_OP_MODE_SHIFT;
- hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
- GHWCFG2_ARCHITECTURE_SHIFT;
- hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
- hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
- GHWCFG2_NUM_HOST_CHAN_SHIFT);
- hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
- GHWCFG2_HS_PHY_TYPE_SHIFT;
- hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
- GHWCFG2_FS_PHY_TYPE_SHIFT;
- hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
- GHWCFG2_NUM_DEV_EP_SHIFT;
- hw->nperio_tx_q_depth =
- (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
- GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
- hw->host_perio_tx_q_depth =
- (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
- GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
- hw->dev_token_q_depth =
- (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
- GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
-
- /* hwcfg3 */
- width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
- hw->max_transfer_size = (1 << (width + 11)) - 1;
- width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
- GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
- hw->max_packet_count = (1 << (width + 4)) - 1;
- hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
- hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
- GHWCFG3_DFIFO_DEPTH_SHIFT;
-
- /* hwcfg4 */
- hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
- hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
- GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
- hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
- hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
- hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
- GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
-
- /* fifo sizes */
- hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
- GRXFSIZ_DEPTH_SHIFT;
-
- dev_dbg(hsotg->dev, "Detected values from hardware:\n");
- dev_dbg(hsotg->dev, " op_mode=%d\n",
- hw->op_mode);
- dev_dbg(hsotg->dev, " arch=%d\n",
- hw->arch);
- dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
- hw->dma_desc_enable);
- dev_dbg(hsotg->dev, " power_optimized=%d\n",
- hw->power_optimized);
- dev_dbg(hsotg->dev, " i2c_enable=%d\n",
- hw->i2c_enable);
- dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
- hw->hs_phy_type);
- dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
- hw->fs_phy_type);
- dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
- hw->utmi_phy_data_width);
- dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
- hw->num_dev_ep);
- dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
- hw->num_dev_perio_in_ep);
- dev_dbg(hsotg->dev, " host_channels=%d\n",
- hw->host_channels);
- dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
- hw->max_transfer_size);
- dev_dbg(hsotg->dev, " max_packet_count=%d\n",
- hw->max_packet_count);
- dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
- hw->nperio_tx_q_depth);
- dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
- hw->host_perio_tx_q_depth);
- dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
- hw->dev_token_q_depth);
- dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
- hw->enable_dynamic_fifo);
- dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
- hw->en_multiple_tx_fifo);
- dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
- hw->total_fifo_size);
- dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
- hw->host_rx_fifo_size);
- dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
- hw->host_nperio_tx_fifo_size);
- dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
- hw->host_perio_tx_fifo_size);
- dev_dbg(hsotg->dev, "\n");
-
- return 0;
-}
-
-/*
- * Sets all parameters to the given value.
- *
- * Assumes that the dwc2_core_params struct contains only integers.
- */
-void dwc2_set_all_params(struct dwc2_core_params *params, int value)
-{
- int *p = (int *)params;
- size_t size = sizeof(*params) / sizeof(*p);
- int i;
-
- for (i = 0; i < size; i++)
- p[i] = value;
-}
-
-
u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
{
- return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
+ return hsotg->params.otg_ver == 1 ? 0x0200 : 0x0103;
}
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2a21a0414b1d..9548d3e03453 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -172,6 +172,11 @@ struct dwc2_hsotg_req;
* @periodic: Set if this is a periodic ep, such as Interrupt
* @isochronous: Set if this is a isochronous ep
* @send_zlp: Set if we need to send a zero-length packet.
+ * @desc_list_dma: The DMA address of descriptor chain currently in use.
+ * @desc_list: Pointer to descriptor DMA chain head currently in use.
+ * @desc_count: Count of entries within the DMA descriptor chain of EP.
+ * @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1.
+ * @next_desc: index of next free descriptor in the ISOC chain under SW control.
* @total_data: The total number of data bytes done.
* @fifo_size: The size of the FIFO (for periodic IN endpoints)
* @fifo_load: The amount of data loaded into the FIFO (periodic IN)
@@ -219,6 +224,13 @@ struct dwc2_hsotg_ep {
#define TARGET_FRAME_INITIAL 0xFFFFFFFF
bool frame_overrun;
+ dma_addr_t desc_list_dma;
+ struct dwc2_dma_desc *desc_list;
+ u8 desc_count;
+
+ unsigned char isoc_chain_num;
+ unsigned int next_desc;
+
char name[10];
};
@@ -286,7 +298,7 @@ enum dwc2_ep0_state {
* @otg_ver: OTG version supported
* 0 - 1.3 (default)
* 1 - 2.0
- * @dma_enable: Specifies whether to use slave or DMA mode for accessing
+ * @host_dma: Specifies whether to use slave or DMA mode for accessing
* the data FIFOs. The driver will automatically detect the
* value for this parameter if none is specified.
* 0 - Slave (always available)
@@ -314,7 +326,8 @@ enum dwc2_ep0_state {
* @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
* 1 - Allow dynamic FIFO sizing (default, if available)
* @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
- * are enabled
+ * are enabled for non-periodic IN endpoints in device
+ * mode.
* @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when
* dynamic FIFO sizing is enabled
* 16 to 32768
@@ -417,6 +430,20 @@ enum dwc2_ep0_state {
* needed.
* 0 - No (default)
* 1 - Yes
+ * @g_dma: Enables gadget dma usage (default: autodetect).
+ * @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
+ * @g_rx_fifo_size: The periodic rx fifo size for the device, in
+ * DWORDS from 16-32768 (default: 2048 if
+ * possible, otherwise autodetect).
+ * @g_np_tx_fifo_size: The non-periodic tx fifo size for the device in
+ * DWORDS from 16-32768 (default: 1024 if
+ * possible, otherwise autodetect).
+ * @g_tx_fifo_size: An array of TX fifo sizes in dedicated fifo
+ * mode. Each value corresponds to one EP
+ * starting from EP1 (max 15 values). Sizes are
+ * in DWORDS with possible values from from
+ * 16-32768 (default: 256, 256, 256, 256, 768,
+ * 768, 768, 768, 0, 0, 0, 0, 0, 0, 0).
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -430,11 +457,18 @@ struct dwc2_core_params {
* dwc2_set_all_params!
*/
int otg_cap;
+#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+
int otg_ver;
- int dma_enable;
int dma_desc_enable;
int dma_desc_fs_enable;
int speed;
+#define DWC2_SPEED_PARAM_HIGH 0
+#define DWC2_SPEED_PARAM_FULL 1
+#define DWC2_SPEED_PARAM_LOW 2
+
int enable_dynamic_fifo;
int en_multiple_tx_fifo;
int host_rx_fifo_size;
@@ -444,19 +478,44 @@ struct dwc2_core_params {
int max_packet_count;
int host_channels;
int phy_type;
+#define DWC2_PHY_TYPE_PARAM_FS 0
+#define DWC2_PHY_TYPE_PARAM_UTMI 1
+#define DWC2_PHY_TYPE_PARAM_ULPI 2
+
int phy_utmi_width;
int phy_ulpi_ddr;
int phy_ulpi_ext_vbus;
+#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
+
int i2c_enable;
int ulpi_fs_ls;
int host_support_fs_ls_low_power;
int host_ls_low_power_phy_clk;
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+
int ts_dline;
int reload_ctl;
int ahbcfg;
int uframe_sched;
int external_id_pin_ctl;
int hibernation;
+
+ /*
+ * The following parameters are *only* set via device
+ * properties and cannot be set directly in this structure.
+ */
+
+ /* Host parameters */
+ bool host_dma;
+
+ /* Gadget parameters */
+ bool g_dma;
+ bool g_dma_desc;
+ u16 g_rx_fifo_size;
+ u16 g_np_tx_fifo_size;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
/**
@@ -516,10 +575,9 @@ struct dwc2_hw_params {
unsigned op_mode:3;
unsigned arch:2;
unsigned dma_desc_enable:1;
- unsigned dma_desc_fs_enable:1;
unsigned enable_dynamic_fifo:1;
unsigned en_multiple_tx_fifo:1;
- unsigned host_rx_fifo_size:16;
+ unsigned rx_fifo_size:16;
unsigned host_nperio_tx_fifo_size:16;
unsigned dev_nperio_tx_fifo_size:16;
unsigned host_perio_tx_fifo_size:16;
@@ -839,11 +897,13 @@ struct dwc2_hregs_backup {
* @ctrl_req: Request for EP0 control packets.
* @ep0_state: EP0 control transfers state
* @test_mode: USB test mode requested by the host
+ * @setup_desc_dma: EP0 setup stage desc chain DMA address
+ * @setup_desc: EP0 setup stage desc chain pointer
+ * @ctrl_in_desc_dma: EP0 IN data phase desc chain DMA address
+ * @ctrl_in_desc: EP0 IN data phase desc chain pointer
+ * @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address
+ * @ctrl_out_desc: EP0 OUT data phase desc chain pointer
* @eps: The endpoints being supplied to the gadget framework
- * @g_using_dma: Indicate if dma usage is enabled
- * @g_rx_fifo_sz: Contains rx fifo size value
- * @g_np_g_tx_fifo_sz: Contains Non-Periodic tx fifo size value
- * @g_tx_fifo_sz: Contains tx fifo size value per endpoints
*/
struct dwc2_hsotg {
struct device *dev;
@@ -851,7 +911,7 @@ struct dwc2_hsotg {
/** Params detected from hardware */
struct dwc2_hw_params hw_params;
/** Params to actually use */
- struct dwc2_core_params *core_params;
+ struct dwc2_core_params params;
enum usb_otg_state op_state;
enum usb_dr_mode dr_mode;
unsigned int hcd_enabled:1;
@@ -891,6 +951,8 @@ struct dwc2_hsotg {
#define DWC2_CORE_REV_2_94a 0x4f54294a
#define DWC2_CORE_REV_3_00a 0x4f54300a
#define DWC2_CORE_REV_3_10a 0x4f54310a
+#define DWC2_FS_IOT_REV_1_00a 0x5531100a
+#define DWC2_HS_IOT_REV_1_00a 0x5532100a
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
@@ -986,15 +1048,18 @@ struct dwc2_hsotg {
enum dwc2_ep0_state ep0_state;
u8 test_mode;
+ dma_addr_t setup_desc_dma[2];
+ struct dwc2_dma_desc *setup_desc[2];
+ dma_addr_t ctrl_in_desc_dma;
+ struct dwc2_dma_desc *ctrl_in_desc;
+ dma_addr_t ctrl_out_desc_dma;
+ struct dwc2_dma_desc *ctrl_out_desc;
+
struct usb_gadget gadget;
unsigned int enabled:1;
unsigned int connected:1;
struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS];
struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS];
- u32 g_using_dma;
- u32 g_rx_fifo_sz;
- u32 g_np_g_tx_fifo_sz;
- u32 g_tx_fifo_sz[MAX_EPS_CHANNELS];
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
};
@@ -1016,6 +1081,22 @@ enum dwc2_halt_status {
DWC2_HC_XFER_URB_DEQUEUE,
};
+/* Core version information */
+static inline bool dwc2_is_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xfff00000) == 0x55300000;
+}
+
+static inline bool dwc2_is_fs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55310000;
+}
+
+static inline bool dwc2_is_hs_iot(struct dwc2_hsotg *hsotg)
+{
+ return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55320000;
+}
+
/*
* The following functions support initialization of the core driver component
* and the DWC_otg controller
@@ -1025,6 +1106,8 @@ extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host);
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg);
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
@@ -1044,217 +1127,16 @@ extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
/* This function should be called on every hardware interrupt. */
extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
-/* OTG Core Parameters */
-
-/*
- * Specifies the OTG capabilities. The driver will automatically
- * detect the value for this parameter if none is specified.
- * 0 - HNP and SRP capable (default)
- * 1 - SRP Only capable
- * 2 - No HNP/SRP capable
- */
-extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
-#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
-#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
-
-/*
- * Specifies whether to use slave or DMA mode for accessing the data
- * FIFOs. The driver will automatically detect the value for this
- * parameter if none is specified.
- * 0 - Slave
- * 1 - DMA (default, if available)
- */
-extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode for accessing the data
- * FIFOs in device mode. The driver will automatically detect
- * the value for this parameter if none is specified.
- * 0 - address DMA
- * 1 - DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode with full speed devices
- * for accessing the data FIFOs in host mode.
- * 0 - address DMA
- * 1 - FS DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Specifies the maximum speed of operation in host and device mode.
- * The actual speed depends on the speed of the attached device and
- * the value of phy_type. The actual speed depends on the speed of the
- * attached device.
- * 0 - High Speed (default)
- * 1 - Full Speed
- */
-extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_SPEED_PARAM_HIGH 0
-#define DWC2_SPEED_PARAM_FULL 1
-
-/*
- * Specifies whether low power mode is supported when attached
- * to a Full Speed or Low Speed device in host mode.
- *
- * 0 - Don't support low power mode (default)
- * 1 - Support low power mode
- */
-extern void dwc2_set_param_host_support_fs_ls_low_power(
- struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the PHY clock rate in low power mode when connected to a
- * Low Speed device in host mode. This parameter is applicable only if
- * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
- * then defaults to 6 MHZ otherwise 48 MHZ.
- *
- * 0 - 48 MHz
- * 1 - 6 MHz
- */
-extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
- int val);
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
-
-/*
- * 0 - Use cC FIFO size parameters
- * 1 - Allow dynamic FIFO sizing (default)
- */
-extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Number of 4-byte words in the Rx FIFO in host mode when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 1024)
- */
-extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Number of 4-byte words in the non-periodic Tx FIFO in host mode
- * when Dynamic FIFO sizing is enabled in the core.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * Number of 4-byte words in the host periodic Tx FIFO when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
- int val);
-
-/*
- * The maximum transfer size supported in bytes.
- * 2047 to 65,535 (default 65,535)
- */
-extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The maximum number of packets in a transfer.
- * 15 to 511 (default 511)
- */
-extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The number of host channel registers to use.
- * 1 to 16 (default 11)
- * Note: The FPGA configuration supports a maximum of 11 host channels.
- */
-extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the type of PHY interface to use. By default, the driver
- * will automatically detect the phy_type.
- *
- * 0 - Full Speed PHY
- * 1 - UTMI+ (default)
- * 2 - ULPI
- */
-extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_TYPE_PARAM_FS 0
-#define DWC2_PHY_TYPE_PARAM_UTMI 1
-#define DWC2_PHY_TYPE_PARAM_ULPI 2
-
-/*
- * Specifies the UTMI+ Data Width. This parameter is
- * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
- * PHY_TYPE, this parameter indicates the data width between
- * the MAC and the ULPI Wrapper.) Also, this parameter is
- * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
- * to "8 and 16 bits", meaning that the core has been
- * configured to work at either data path width.
- *
- * 8 or 16 bits (default 16)
- */
-extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether the ULPI operates at double or single
- * data rate. This parameter is only applicable if PHY_TYPE is
- * ULPI.
- *
- * 0 - single data rate ULPI interface with 8 bit wide data
- * bus (default)
- * 1 - double data rate ULPI interface with 4 bit wide data
- * bus
- */
-extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether to use the internal or external supply to
- * drive the vbus with a ULPI phy.
- */
-extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_ULPI_INTERNAL_VBUS 0
-#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1
-
-/*
- * Specifies whether to use the I2Cinterface for full speed PHY. This
- * parameter is only applicable if PHY_TYPE is FS.
- * 0 - No (default)
- * 1 - Yes
- */
-extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether dedicated transmit FIFOs are
- * enabled for non periodic IN endpoints in device mode
- * 0 - No
- * 1 - Yes
- */
-extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
- int val);
-
-extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- const struct dwc2_core_params *params);
-
-extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
-
-extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+/* The device ID match table */
+extern const struct of_device_id dwc2_of_match_table[];
extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
+/* Parameters */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+int dwc2_init_params(struct dwc2_hsotg *hsotg);
+
/*
* The following functions check the controller's OTG operation mode
* capability (GHWCFG2.OTG_MODE).
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index d85c5c9f96c1..5b228ba6045f 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -159,9 +159,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
" ++OTG Interrupt: Session Request Success Status Change++\n");
gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
if (gotgctl & GOTGCTL_SESREQSCS) {
- if (hsotg->core_params->phy_type ==
+ if (hsotg->params.phy_type ==
DWC2_PHY_TYPE_PARAM_FS
- && hsotg->core_params->i2c_enable > 0) {
+ && hsotg->params.i2c_enable > 0) {
hsotg->srp_success = 1;
} else {
/* Clear Session Request */
@@ -370,7 +370,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
/* Change to L0 state */
hsotg->lx_state = DWC2_L0;
} else {
- if (hsotg->core_params->hibernation)
+ if (hsotg->params.hibernation)
return;
if (hsotg->lx_state != DWC2_L1) {
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 55d91f24f94a..0a130916a91c 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -213,7 +213,7 @@ static int fifo_show(struct seq_file *seq, void *v)
val = dwc2_readl(regs + GNPTXFSIZ);
seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
val >> FIFOSIZE_DEPTH_SHIFT,
- val & FIFOSIZE_DEPTH_MASK);
+ val & FIFOSIZE_STARTADDR_MASK);
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 24fbebc9b409..b95930f20d90 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -93,7 +93,18 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
*/
static inline bool using_dma(struct dwc2_hsotg *hsotg)
{
- return hsotg->g_using_dma;
+ return hsotg->params.g_dma;
+}
+
+/*
+ * using_desc_dma - return the descriptor DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using descriptor DMA.
+ */
+static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.g_dma_desc;
}
/**
@@ -190,16 +201,17 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
unsigned int addr;
int timeout;
u32 val;
+ u32 *txfsz = hsotg->params.g_tx_fifo_size;
/* Reset fifo map if not correctly cleared during previous session */
WARN_ON(hsotg->fifo_map);
hsotg->fifo_map = 0;
/* set RX/NPTX FIFO sizes */
- dwc2_writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ);
- dwc2_writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) |
- (hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT),
- hsotg->regs + GNPTXFSIZ);
+ dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ);
+ dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) |
+ (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
+ hsotg->regs + GNPTXFSIZ);
/*
* arange all the rest of the TX FIFOs, as some versions of this
@@ -209,7 +221,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
*/
/* start at the end of the GNPTXFSIZ, rounded up */
- addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz;
+ addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
/*
* Configure fifos sizes from provided configuration and assign
@@ -217,15 +229,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* given endpoint.
*/
for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
- if (!hsotg->g_tx_fifo_sz[ep])
+ if (!txfsz[ep])
continue;
val = addr;
- val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
- WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+ val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
"insufficient fifo memory");
- addr += hsotg->g_tx_fifo_sz[ep];
+ addr += txfsz[ep];
dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
+ val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep));
}
/*
@@ -303,12 +316,55 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_req *hs_req)
{
struct usb_request *req = &hs_req->req;
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+}
- /* ignore this if we're not moving any data */
- if (hs_req->req.length == 0)
- return;
+/*
+ * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
+ * for Control endpoint
+ * @hsotg: The device state.
+ *
+ * This function will allocate 4 descriptor chains for EP 0: 2 for
+ * Setup stage, per one for IN and OUT data/status transactions.
+ */
+static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
+{
+ hsotg->setup_desc[0] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[0],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[0])
+ goto fail;
+
+ hsotg->setup_desc[1] =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->setup_desc_dma[1],
+ GFP_KERNEL);
+ if (!hsotg->setup_desc[1])
+ goto fail;
+
+ hsotg->ctrl_in_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_in_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_in_desc)
+ goto fail;
+
+ hsotg->ctrl_out_desc =
+ dmam_alloc_coherent(hsotg->dev,
+ sizeof(struct dwc2_dma_desc),
+ &hsotg->ctrl_out_desc_dma,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_out_desc)
+ goto fail;
- usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+ return 0;
+
+fail:
+ return -ENOMEM;
}
/**
@@ -541,6 +597,273 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
+ * DMA descriptor chain prepared for specific endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * depending on its descriptor chain capacity so that transfers that
+ * are too long can be split.
+ */
+static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+{
+ int is_isoc = hs_ep->isochronous;
+ unsigned int maxsize;
+
+ if (is_isoc)
+ maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+ DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ else
+ maxsize = DEV_DMA_NBYTES_LIMIT;
+
+ /* Above size of one descriptor was chosen, multiple it */
+ maxsize *= MAX_DMA_DESC_NUM_GENERIC;
+
+ return maxsize;
+}
+
+/*
+ * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
+ * @hs_ep: The endpoint
+ * @mask: RX/TX bytes mask to be defined
+ *
+ * Returns maximum data payload for one descriptor after analyzing endpoint
+ * characteristics.
+ * DMA descriptor transfer bytes limit depends on EP type:
+ * Control out - MPS,
+ * Isochronous - descriptor rx/tx bytes bitfield limit,
+ * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
+ * have concatenations from various descriptors within one packet.
+ *
+ * Selects corresponding mask for RX/TX bytes as well.
+ */
+static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+{
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
+ u32 desc_size = 0;
+
+ if (!hs_ep->index && !dir_in) {
+ desc_size = mps;
+ *mask = DEV_DMA_NBYTES_MASK;
+ } else if (hs_ep->isochronous) {
+ if (dir_in) {
+ desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
+ } else {
+ desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+ *mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
+ }
+ } else {
+ desc_size = DEV_DMA_NBYTES_LIMIT;
+ *mask = DEV_DMA_NBYTES_MASK;
+
+ /* Round down desc_size to be mps multiple */
+ desc_size -= desc_size % mps;
+ }
+
+ return desc_size;
+}
+
+/*
+ * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
+ * @hs_ep: The endpoint
+ * @dma_buff: DMA address to use
+ * @len: Length of the transfer
+ *
+ * This function will iterate over descriptor chain and fill its entries
+ * with corresponding information based on transfer data.
+ */
+static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff,
+ unsigned int len)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ u32 mps = hs_ep->ep.maxpacket;
+ u32 maxsize = 0;
+ u32 offset = 0;
+ u32 mask = 0;
+ int i;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+
+ hs_ep->desc_count = (len / maxsize) +
+ ((len % maxsize) ? 1 : 0);
+ if (len == 0)
+ hs_ep->desc_count = 1;
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY
+ << DEV_DMA_BUFF_STS_SHIFT);
+
+ if (len > maxsize) {
+ if (!hs_ep->index && !dir_in)
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ desc->status |= (maxsize <<
+ DEV_DMA_NBYTES_SHIFT & mask);
+ desc->buf = dma_buff + offset;
+
+ len -= maxsize;
+ offset += maxsize;
+ } else {
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+ if (dir_in)
+ desc->status |= (len % mps) ? DEV_DMA_SHORT :
+ ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0);
+ if (len > maxsize)
+ dev_err(hsotg->dev, "wrong len %d\n", len);
+
+ desc->status |=
+ len << DEV_DMA_NBYTES_SHIFT & mask;
+ desc->buf = dma_buff + offset;
+ }
+
+ desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+ desc->status |= (DEV_DMA_BUFF_STS_HREADY
+ << DEV_DMA_BUFF_STS_SHIFT);
+ desc++;
+ }
+}
+
+/*
+ * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
+ * @hs_ep: The isochronous endpoint.
+ * @dma_buff: usb requests dma buffer.
+ * @len: usb request transfer length.
+ *
+ * Finds out index of first free entry either in the bottom or up half of
+ * descriptor chain depend on which is under SW control and not processed
+ * by HW. Then fills that descriptor with the data of the arrived usb request,
+ * frame info, sets Last and IOC bits increments next_desc. If filled
+ * descriptor is not the first one, removes L bit from the previous descriptor
+ * status.
+ */
+static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+ dma_addr_t dma_buff, unsigned int len)
+{
+ struct dwc2_dma_desc *desc;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 index;
+ u32 maxsize = 0;
+ u32 mask = 0;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+ if (len > maxsize) {
+ dev_err(hsotg->dev, "wrong len %d\n", len);
+ return -EINVAL;
+ }
+
+ /*
+ * If SW has already filled half of chain, then return and wait for
+ * the other chain to be processed by HW.
+ */
+ if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2)
+ return -EBUSY;
+
+ /* Increment frame number by interval for IN */
+ if (hs_ep->dir_in)
+ dwc2_gadget_incr_frame_num(hs_ep);
+
+ index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num +
+ hs_ep->next_desc;
+
+ /* Sanity check of calculated index */
+ if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) ||
+ (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) {
+ dev_err(hsotg->dev, "wrong index %d for iso chain\n", index);
+ return -EINVAL;
+ }
+
+ desc = &hs_ep->desc_list[index];
+
+ /* Clear L bit of previous desc if more than one entries in the chain */
+ if (hs_ep->next_desc)
+ hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
+
+ dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
+ __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
+
+ desc->status = 0;
+ desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
+
+ desc->buf = dma_buff;
+ desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
+ ((len << DEV_DMA_NBYTES_SHIFT) & mask));
+
+ if (hs_ep->dir_in) {
+ desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+ DEV_DMA_ISOC_PID_MASK) |
+ ((len % hs_ep->ep.maxpacket) ?
+ DEV_DMA_SHORT : 0) |
+ ((hs_ep->target_frame <<
+ DEV_DMA_ISOC_FRNUM_SHIFT) &
+ DEV_DMA_ISOC_FRNUM_MASK);
+ }
+
+ desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+ desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
+
+ /* Update index of last configured entry in the chain */
+ hs_ep->next_desc++;
+
+ return 0;
+}
+
+/*
+ * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
+ * @hs_ep: The isochronous endpoint.
+ *
+ * Prepare first descriptor chain for isochronous endpoints. Afterwards
+ * write DMA address to HW and enable the endpoint.
+ *
+ * Switch between descriptor chains via isoc_chain_num to give SW opportunity
+ * to prepare second descriptor chain while first one is being processed by HW.
+ */
+static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req, *treq;
+ int index = hs_ep->index;
+ int ret;
+ u32 dma_reg;
+ u32 depctl;
+ u32 ctrl;
+
+ if (list_empty(&hs_ep->queue)) {
+ dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ hs_req->req.length);
+ if (ret) {
+ dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
+ break;
+ }
+ }
+
+ depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+ dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+
+ /* write descriptor chain address to control register */
+ dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+ ctrl = dwc2_readl(hsotg->regs + depctl);
+ ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+ dwc2_writel(ctrl, hsotg->regs + depctl);
+
+ /* Switch ISOC descriptor chain number being processed by SW*/
+ hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+ hs_ep->next_desc = 0;
+}
+
+/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
* @hs_ep: The endpoint to process a request for
@@ -565,6 +888,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
unsigned length;
unsigned packets;
unsigned maxreq;
+ unsigned int dma_reg;
if (index != 0) {
if (hs_ep->req && !continuing) {
@@ -579,6 +903,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
}
+ dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
@@ -598,7 +923,11 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
ureq->length, ureq->actual);
- maxreq = get_ep_limit(hs_ep);
+ if (!using_desc_dma(hsotg))
+ maxreq = get_ep_limit(hs_ep);
+ else
+ maxreq = dwc2_gadget_get_chain_limit(hs_ep);
+
if (length > maxreq) {
int round = maxreq % hs_ep->ep.maxpacket;
@@ -650,22 +979,51 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
/* store the request as the current one we're doing */
hs_ep->req = hs_req;
- /* write size / packets */
- dwc2_writel(epsize, hsotg->regs + epsize_reg);
+ if (using_desc_dma(hsotg)) {
+ u32 offset = 0;
+ u32 mps = hs_ep->ep.maxpacket;
- if (using_dma(hsotg) && !continuing) {
- unsigned int dma_reg;
+ /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
+ if (!dir_in) {
+ if (!index)
+ length = mps;
+ else if (length % mps)
+ length += (mps - (length % mps));
+ }
/*
- * write DMA address to control register, buffer already
- * synced by dwc2_hsotg_ep_queue().
+ * If more data to send, adjust DMA for EP0 out data stage.
+ * ureq->dma stays unchanged, hence increment it by already
+ * passed passed data count before starting new transaction.
*/
+ if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
+ continuing)
+ offset = ureq->actual;
- dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
- dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+ /* Fill DDMA chain entries */
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
+ length);
- dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
- __func__, &ureq->dma, dma_reg);
+ /* write descriptor chain address to control register */
+ dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
+ __func__, (u32)hs_ep->desc_list_dma, dma_reg);
+ } else {
+ /* write size / packets */
+ dwc2_writel(epsize, hsotg->regs + epsize_reg);
+
+ if (using_dma(hsotg) && !continuing && (length != 0)) {
+ /*
+ * write DMA address to control register, buffer
+ * already synced by dwc2_hsotg_ep_queue().
+ */
+
+ dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
+ __func__, &ureq->dma, dma_reg);
+ }
}
if (hs_ep->isochronous && hs_ep->interval == 1) {
@@ -738,13 +1096,8 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep,
struct usb_request *req)
{
- struct dwc2_hsotg_req *hs_req = our_req(req);
int ret;
- /* if the length is zero, ignore the DMA data */
- if (hs_req->req.length == 0)
- return 0;
-
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;
@@ -835,6 +1188,41 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
return false;
}
+/*
+ * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
+ * @hsotg: The driver state
+ * @hs_ep: the ep descriptor chain is for
+ *
+ * Called to update EP0 structure's pointers depend on stage of
+ * control transfer.
+ */
+static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ switch (hsotg->ep0_state) {
+ case DWC2_EP0_SETUP:
+ case DWC2_EP0_STATUS_OUT:
+ hs_ep->desc_list = hsotg->setup_desc[0];
+ hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
+ break;
+ case DWC2_EP0_DATA_IN:
+ case DWC2_EP0_STATUS_IN:
+ hs_ep->desc_list = hsotg->ctrl_in_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
+ break;
+ case DWC2_EP0_DATA_OUT:
+ hs_ep->desc_list = hsotg->ctrl_out_desc;
+ hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
+ break;
+ default:
+ dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
+ hsotg->ep0_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
@@ -870,10 +1258,32 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
if (ret)
return ret;
}
+ /* If using descriptor DMA configure EP0 descriptor chain pointers */
+ if (using_desc_dma(hs) && !hs_ep->index) {
+ ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
+ if (ret)
+ return ret;
+ }
first = list_empty(&hs_ep->queue);
list_add_tail(&hs_req->queue, &hs_ep->queue);
+ /*
+ * Handle DDMA isochronous transfers separately - just add new entry
+ * to the half of descriptor chain that is not processed by HW.
+ * Transfer will be started once SW gets either one of NAK or
+ * OutTknEpDis interrupts.
+ */
+ if (using_desc_dma(hs) && hs_ep->isochronous &&
+ hs_ep->target_frame != TARGET_FRAME_INITIAL) {
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ hs_req->req.length);
+ if (ret)
+ dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__);
+
+ return 0;
+ }
+
if (first) {
if (!hs_ep->isochronous) {
dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
@@ -1099,10 +1509,8 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
*/
static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
{
- if (list_empty(&hs_ep->queue))
- return NULL;
-
- return list_first_entry(&hs_ep->queue, struct dwc2_hsotg_req, queue);
+ return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
+ queue);
}
/**
@@ -1440,14 +1848,21 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
if (hs_ep->dir_in)
dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
- index);
+ index);
else
dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
- index);
+ index);
+ if (using_desc_dma(hsotg)) {
+ /* Not specific buffer needed for ep0 ZLP */
+ dma_addr_t dma = hs_ep->desc_list_dma;
- dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
- DXEPTSIZ_XFERSIZE(0), hsotg->regs +
- epsiz_reg);
+ dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
+ } else {
+ dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(0), hsotg->regs +
+ epsiz_reg);
+ }
ctrl = dwc2_readl(hsotg->regs + epctl_reg);
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
@@ -1510,6 +1925,10 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
spin_lock(&hsotg->lock);
}
+ /* In DDMA don't need to proceed to starting of next ISOC request */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous)
+ return;
+
/*
* Look to see if there is anything else to do. Note, the completion
* of the previous request may have caused a new request to be started
@@ -1521,6 +1940,115 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
}
}
+/*
+ * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
+ * @hs_ep: The endpoint the request was on.
+ *
+ * Get first request from the ep queue, determine descriptor on which complete
+ * happened. SW based on isoc_chain_num discovers which half of the descriptor
+ * chain is currently in use by HW, adjusts dma_address and calculates index
+ * of completed descriptor based on the value of DEPDMA register. Update actual
+ * length of request, giveback to gadget.
+ */
+static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ struct usb_request *ureq;
+ int index;
+ dma_addr_t dma_addr;
+ u32 dma_reg;
+ u32 depdma;
+ u32 desc_sts;
+ u32 mask;
+
+ hs_req = get_ep_head(hs_ep);
+ if (!hs_req) {
+ dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
+ return;
+ }
+ ureq = &hs_req->req;
+
+ dma_addr = hs_ep->desc_list_dma;
+
+ /*
+ * If lower half of descriptor chain is currently use by SW,
+ * that means higher half is being processed by HW, so shift
+ * DMA address to higher half of descriptor chain.
+ */
+ if (!hs_ep->isoc_chain_num)
+ dma_addr += sizeof(struct dwc2_dma_desc) *
+ (MAX_DMA_DESC_NUM_GENERIC / 2);
+
+ dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index);
+ depdma = dwc2_readl(hsotg->regs + dma_reg);
+
+ index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1;
+ desc_sts = hs_ep->desc_list[index].status;
+
+ mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
+ DEV_DMA_ISOC_RX_NBYTES_MASK;
+ ureq->actual = ureq->length -
+ ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT);
+
+ /* Adjust actual length for ISOC Out if length is not align of 4 */
+ if (!hs_ep->dir_in && ureq->length & 0x3)
+ ureq->actual += 4 - (ureq->length & 0x3);
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+}
+
+/*
+ * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any.
+ * @hs_ep: The isochronous endpoint to be re-enabled.
+ *
+ * If ep has been disabled due to last descriptor servicing (IN endpoint) or
+ * BNA (OUT endpoint) check the status of other half of descriptor chain that
+ * was under SW control till HW was busy and restart the endpoint if needed.
+ */
+static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 depctl;
+ u32 dma_reg;
+ u32 ctrl;
+ u32 dma_addr = hs_ep->desc_list_dma;
+ unsigned char index = hs_ep->index;
+
+ dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+ depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+
+ ctrl = dwc2_readl(hsotg->regs + depctl);
+
+ /*
+ * EP was disabled if HW has processed last descriptor or BNA was set.
+ * So restart ep if SW has prepared new descriptor chain in ep_queue
+ * routine while HW was busy.
+ */
+ if (!(ctrl & DXEPCTL_EPENA)) {
+ if (!hs_ep->next_desc) {
+ dev_dbg(hsotg->dev, "%s: No more ISOC requests\n",
+ __func__);
+ return;
+ }
+
+ dma_addr += sizeof(struct dwc2_dma_desc) *
+ (MAX_DMA_DESC_NUM_GENERIC / 2) *
+ hs_ep->isoc_chain_num;
+ dwc2_writel(dma_addr, hsotg->regs + dma_reg);
+
+ ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+ dwc2_writel(ctrl, hsotg->regs + depctl);
+
+ /* Switch ISOC descriptor chain number being processed by SW*/
+ hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+ hs_ep->next_desc = 0;
+
+ dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n",
+ __func__);
+ }
+}
+
/**
* dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
* @hsotg: The device state.
@@ -1618,6 +2146,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
dwc2_writel(ctrl, hsotg->regs + epctl_reg);
}
+/*
+ * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
+ * @hs_ep - The endpoint on which transfer went
+ *
+ * Iterate over endpoints descriptor chain and get info on bytes remained
+ * in DMA descriptors after transfer has completed. Used for non isoc EPs.
+ */
+static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned int bytes_rem = 0;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ int i;
+ u32 status;
+
+ if (!desc)
+ return -EINVAL;
+
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ status = desc->status;
+ bytes_rem += status & DEV_DMA_NBYTES_MASK;
+
+ if (status & DEV_DMA_STS_MASK)
+ dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+ i, status & DEV_DMA_STS_MASK);
+ }
+
+ return bytes_rem;
+}
+
/**
* dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
* @hsotg: The device instance
@@ -1648,6 +2206,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
return;
}
+ if (using_desc_dma(hsotg))
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+
if (using_dma(hsotg)) {
unsigned size_done;
@@ -1682,7 +2243,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
*/
}
- if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+ /* DDMA IN status phase will start from StsPhseRcvd interrupt */
+ if (!using_desc_dma(hsotg) && epnum == 0 &&
+ hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
/* Move to STATUS IN */
dwc2_hsotg_ep0_zlp(hsotg, true);
return;
@@ -1812,17 +2375,17 @@ static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
* @hsotg: The driver state.
* @ep: The index number of the endpoint
* @mps: The maximum packet size in bytes
+ * @mc: The multicount value
*
* Configure the maximum packet size for the given endpoint, updating
* the hardware control registers to reflect this.
*/
static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
- unsigned int ep, unsigned int mps, unsigned int dir_in)
+ unsigned int ep, unsigned int mps,
+ unsigned int mc, unsigned int dir_in)
{
struct dwc2_hsotg_ep *hs_ep;
void __iomem *regs = hsotg->regs;
- u32 mpsval;
- u32 mcval;
u32 reg;
hs_ep = index_to_ep(hsotg, ep, dir_in);
@@ -1830,32 +2393,32 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
return;
if (ep == 0) {
+ u32 mps_bytes = mps;
+
/* EP0 is a special case */
- mpsval = dwc2_hsotg_ep0_mps(mps);
- if (mpsval > 3)
+ mps = dwc2_hsotg_ep0_mps(mps_bytes);
+ if (mps > 3)
goto bad_mps;
- hs_ep->ep.maxpacket = mps;
+ hs_ep->ep.maxpacket = mps_bytes;
hs_ep->mc = 1;
} else {
- mpsval = mps & DXEPCTL_MPS_MASK;
- if (mpsval > 1024)
+ if (mps > 1024)
goto bad_mps;
- mcval = ((mps >> 11) & 0x3) + 1;
- hs_ep->mc = mcval;
- if (mcval > 3)
+ hs_ep->mc = mc;
+ if (mc > 3)
goto bad_mps;
- hs_ep->ep.maxpacket = mpsval;
+ hs_ep->ep.maxpacket = mps;
}
if (dir_in) {
reg = dwc2_readl(regs + DIEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
- reg |= mpsval;
+ reg |= mps;
dwc2_writel(reg, regs + DIEPCTL(ep));
} else {
reg = dwc2_readl(regs + DOEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
- reg |= mpsval;
+ reg |= mps;
dwc2_writel(reg, regs + DOEPCTL(ep));
}
@@ -1954,6 +2517,13 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
/* Finish ZLP handling for IN EP0 transactions */
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
dev_dbg(hsotg->dev, "zlp packet sent\n");
+
+ /*
+ * While send zlp for DWC2_EP0_STATUS_IN EP direction was
+ * changed to IN. Change back to complete OUT transfer request
+ */
+ hs_ep->dir_in = 0;
+
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
if (hsotg->test_mode) {
int ret;
@@ -1979,8 +2549,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
* past the end of the buffer (DMA transfers are always 32bit
* aligned).
*/
-
- size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ if (using_desc_dma(hsotg)) {
+ size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+ if (size_left < 0)
+ dev_err(hsotg->dev, "error parsing DDMA results %d\n",
+ size_left);
+ } else {
+ size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+ }
size_done = hs_ep->size_loaded - size_left;
size_done += hs_ep->last_load;
@@ -2128,12 +2704,28 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
struct dwc2_hsotg *hsotg = ep->parent;
int dir_in = ep->dir_in;
u32 doepmsk;
+ u32 tmp;
if (dir_in || !ep->isochronous)
return;
+ /*
+ * Store frame in which irq was asserted here, as
+ * it can change while completing request below.
+ */
+ tmp = dwc2_hsotg_read_frameno(hsotg);
+
dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
+ if (using_desc_dma(hsotg)) {
+ if (ep->target_frame == TARGET_FRAME_INITIAL) {
+ /* Start first ISO Out */
+ ep->target_frame = tmp;
+ dwc2_gadget_start_isoc_ddma(ep);
+ }
+ return;
+ }
+
if (ep->interval > 1 &&
ep->target_frame == TARGET_FRAME_INITIAL) {
u32 dsts;
@@ -2182,6 +2774,12 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+
+ if (using_desc_dma(hsotg)) {
+ dwc2_gadget_start_isoc_ddma(hs_ep);
+ return;
+ }
+
if (hs_ep->interval > 1) {
u32 ctrl = dwc2_readl(hsotg->regs +
DIEPCTL(hs_ep->index));
@@ -2237,8 +2835,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
ints &= ~DXEPINT_XFERCOMPL;
- if (ints & DXEPINT_STSPHSERCVD)
- dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__);
+ /*
+ * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
+ * stage and xfercomplete was generated without SETUP phase done
+ * interrupt. SW should parse received setup packet only after host's
+ * exit from setup phase of control transfer.
+ */
+ if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
+ hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
+ ints &= ~DXEPINT_XFERCOMPL;
if (ints & DXEPINT_XFERCOMPL) {
dev_dbg(hsotg->dev,
@@ -2246,11 +2851,17 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
__func__, dwc2_readl(hsotg->regs + epctl_reg),
dwc2_readl(hsotg->regs + epsiz_reg));
- /*
- * we get OutDone from the FIFO, so we only need to look
- * at completing IN requests here
- */
- if (dir_in) {
+ /* In DDMA handle isochronous requests separately */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous) {
+ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+ /* Try to start next isoc request */
+ dwc2_gadget_start_next_isoc_ddma(hs_ep);
+ } else if (dir_in) {
+ /*
+ * We get OutDone from the FIFO, so we only
+ * need to look at completing IN requests here
+ * if operating slave mode
+ */
if (hs_ep->isochronous && hs_ep->interval > 1)
dwc2_gadget_incr_frame_num(hs_ep);
@@ -2302,9 +2913,30 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
}
}
+ if (ints & DXEPINT_STSPHSERCVD) {
+ dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
+
+ /* Move to STATUS IN for DDMA */
+ if (using_desc_dma(hsotg))
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ }
+
if (ints & DXEPINT_BACK2BACKSETUP)
dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+ if (ints & DXEPINT_BNAINTR) {
+ dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
+
+ /*
+ * Try to start next isoc request, if any.
+ * Sometimes the endpoint remains enabled after BNA interrupt
+ * assertion, which is not expected, hence we can enter here
+ * couple of times.
+ */
+ if (hs_ep->isochronous)
+ dwc2_gadget_start_next_isoc_ddma(hs_ep);
+ }
+
if (dir_in && !hs_ep->isochronous) {
/* not sure if this is important, but we'll clear it anyway */
if (ints & DXEPINT_INTKNTXFEMP) {
@@ -2372,6 +3004,8 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
case DSTS_ENUMSPD_LS:
hsotg->gadget.speed = USB_SPEED_LOW;
+ ep0_mps = 8;
+ ep_mps = 8;
/*
* note, we don't actually support LS in this driver at the
* moment, and the documentation seems to imply that it isn't
@@ -2390,13 +3024,15 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
if (ep0_mps) {
int i;
/* Initialize ep0 for both in and out directions */
- dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1);
- dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
for (i = 1; i < hsotg->num_of_eps; i++) {
if (hsotg->eps_in[i])
- dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 1);
if (hsotg->eps_out[i])
- dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+ 0, 0);
}
}
@@ -2516,6 +3152,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
u32 intmsk;
u32 val;
u32 usbcfg;
+ u32 dcfg = 0;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2534,10 +3171,17 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
GUSBCFG_HNPCAP);
- /* set the PLL on, remove the HNP/SRP and set the PHY */
- val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (val << GUSBCFG_USBTRDTIM_SHIFT);
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
+ (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) {
+ /* FS/LS Dedicated Transceiver Interface */
+ usbcfg |= GUSBCFG_PHYSEL;
+ } else {
+ /* set the PLL on, remove the HNP/SRP and set the PHY */
+ val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
+ usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (val << GUSBCFG_USBTRDTIM_SHIFT);
+ }
dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
dwc2_hsotg_init_fifo(hsotg);
@@ -2545,7 +3189,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
if (!is_usb_reset)
__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
- dwc2_writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS, hsotg->regs + DCFG);
+ dcfg |= DCFG_EPMISCNT(1);
+
+ switch (hsotg->params.speed) {
+ case DWC2_SPEED_PARAM_LOW:
+ dcfg |= DCFG_DEVSPD_LS;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
+ dcfg |= DCFG_DEVSPD_FS48;
+ else
+ dcfg |= DCFG_DEVSPD_FS;
+ break;
+ default:
+ dcfg |= DCFG_DEVSPD_HS;
+ }
+
+ dwc2_writel(dcfg, hsotg->regs + DCFG);
/* Clear any pending OTG interrupts */
dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
@@ -2556,23 +3216,31 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
GINTSTS_USBRST | GINTSTS_RESETDET |
GINTSTS_ENUMDONE | GINTSTS_OTGINT |
- GINTSTS_USBSUSP | GINTSTS_WKUPINT |
- GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+ GINTSTS_USBSUSP | GINTSTS_WKUPINT;
- if (hsotg->core_params->external_id_pin_ctl <= 0)
+ if (!using_desc_dma(hsotg))
+ intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+
+ if (hsotg->params.external_id_pin_ctl <= 0)
intmsk |= GINTSTS_CONIDSTSCHNG;
dwc2_writel(intmsk, hsotg->regs + GINTMSK);
- if (using_dma(hsotg))
+ if (using_dma(hsotg)) {
dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
(GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT),
hsotg->regs + GAHBCFG);
- else
+
+ /* Set DDMA mode support in the core if needed */
+ if (using_desc_dma(hsotg))
+ __orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN);
+
+ } else {
dwc2_writel(((hsotg->dedicated_fifos) ?
(GAHBCFG_NP_TXF_EMP_LVL |
GAHBCFG_P_TXF_EMP_LVL) : 0) |
GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG);
+ }
/*
* If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
@@ -2588,13 +3256,18 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/*
* don't need XferCompl, we get that from RXFIFO in slave mode. In
- * DMA mode we may need this.
+ * DMA mode we may need this and StsPhseRcvd.
*/
- dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) |
+ dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
+ DOEPMSK_STSPHSERCVDMSK) : 0) |
DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
- DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK,
+ DOEPMSK_SETUPMSK,
hsotg->regs + DOEPMSK);
+ /* Enable BNA interrupt for DDMA */
+ if (using_desc_dma(hsotg))
+ __orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK);
+
dwc2_writel(0, hsotg->regs + DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
@@ -2935,6 +3608,95 @@ irq_retry:
return IRQ_HANDLED;
}
+static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
+ u32 bit, u32 timeout)
+{
+ u32 i;
+
+ for (i = 0; i < timeout; i++) {
+ if (dwc2_readl(hs_otg->regs + reg) & bit)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep)
+{
+ u32 epctrl_reg;
+ u32 epint_reg;
+
+ epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
+ DOEPCTL(hs_ep->index);
+ epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
+ DOEPINT(hs_ep->index);
+
+ dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
+ hs_ep->name);
+
+ if (hs_ep->dir_in) {
+ if (hsotg->dedicated_fifos || hs_ep->periodic) {
+ __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
+ DXEPINT_INEPNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DIEPINT.NAKEFF\n",
+ __func__);
+ } else {
+ __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK);
+ /* Wait for Nak effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GINNAKEFF, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout GINTSTS.GINNAKEFF\n",
+ __func__);
+ }
+ } else {
+ if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
+ __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+
+ /* Wait for global nak to take effect */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_GOUTNAKEFF, 100))
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
+ __func__);
+ }
+
+ /* Disable ep */
+ __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
+
+ /* Wait for ep to be disabled */
+ if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
+ dev_warn(hsotg->dev,
+ "%s: timeout DOEPCTL.EPDisable\n", __func__);
+
+ /* Clear EPDISBLD interrupt */
+ __orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD);
+
+ if (hs_ep->dir_in) {
+ unsigned short fifo_index;
+
+ if (hsotg->dedicated_fifos || hs_ep->periodic)
+ fifo_index = hs_ep->fifo_index;
+ else
+ fifo_index = 0;
+
+ /* Flush TX FIFO */
+ dwc2_flush_tx_fifo(hsotg, fifo_index);
+
+ /* Clear Global In NP NAK in Shared FIFO for non periodic ep */
+ if (!hsotg->dedicated_fifos && !hs_ep->periodic)
+ __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK);
+
+ } else {
+ /* Remove global NAKs */
+ __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+ }
+}
+
/**
* dwc2_hsotg_ep_enable - enable the given endpoint
* @ep: The USB endpint to configure
@@ -2952,6 +3714,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
u32 epctrl_reg;
u32 epctrl;
u32 mps;
+ u32 mc;
u32 mask;
unsigned int dir_in;
unsigned int i, val, size;
@@ -2975,6 +3738,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
}
mps = usb_endpoint_maxp(desc);
+ mc = usb_endpoint_maxp_mult(desc);
/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
@@ -2984,6 +3748,18 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
__func__, epctrl, epctrl_reg);
+ /* Allocate DMA descriptor chain for non-ctrl endpoints */
+ if (using_desc_dma(hsotg)) {
+ hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+ MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ &hs_ep->desc_list_dma, GFP_KERNEL);
+ if (!hs_ep->desc_list) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+ }
+
spin_lock_irqsave(&hsotg->lock, flags);
epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
@@ -2996,7 +3772,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
epctrl |= DXEPCTL_USBACTEP;
/* update the endpoint state */
- dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
+ dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
/* default, set to non-periodic */
hs_ep->isochronous = 0;
@@ -3011,6 +3787,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->isochronous = 1;
hs_ep->interval = 1 << (desc->bInterval - 1);
hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ hs_ep->isoc_chain_num = 0;
+ hs_ep->next_desc = 0;
if (dir_in) {
hs_ep->periodic = 1;
mask = dwc2_readl(hsotg->regs + DIEPMSK);
@@ -3067,7 +3845,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_err(hsotg->dev,
"%s: No suitable fifo found\n", __func__);
ret = -ENOMEM;
- goto error;
+ goto error1;
}
hsotg->fifo_map |= 1 << fifo_index;
epctrl |= DXEPCTL_TXFNUM(fifo_index);
@@ -3089,8 +3867,17 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
/* enable the endpoint interrupt */
dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
-error:
+error1:
spin_unlock_irqrestore(&hsotg->lock, flags);
+
+error2:
+ if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
+ dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+ }
+
return ret;
}
@@ -3115,11 +3902,23 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
+ /* Remove DMA memory allocated for non-control Endpoints */
+ if (using_desc_dma(hsotg)) {
+ dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ sizeof(struct dwc2_dma_desc),
+ hs_ep->desc_list, hs_ep->desc_list_dma);
+ hs_ep->desc_list = NULL;
+ }
+
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
+
+ if (ctrl & DXEPCTL_EPENA)
+ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+
ctrl &= ~DXEPCTL_EPENA;
ctrl &= ~DXEPCTL_USBACTEP;
ctrl |= DXEPCTL_SNAK;
@@ -3158,77 +3957,6 @@ static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
return false;
}
-static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
- u32 bit, u32 timeout)
-{
- u32 i;
-
- for (i = 0; i < timeout; i++) {
- if (dwc2_readl(hs_otg->regs + reg) & bit)
- return 0;
- udelay(1);
- }
-
- return -ETIMEDOUT;
-}
-
-static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
- struct dwc2_hsotg_ep *hs_ep)
-{
- u32 epctrl_reg;
- u32 epint_reg;
-
- epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
- DOEPCTL(hs_ep->index);
- epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
- DOEPINT(hs_ep->index);
-
- dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
- hs_ep->name);
- if (hs_ep->dir_in) {
- __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
- /* Wait for Nak effect */
- if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
- DXEPINT_INEPNAKEFF, 100))
- dev_warn(hsotg->dev,
- "%s: timeout DIEPINT.NAKEFF\n", __func__);
- } else {
- if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
- __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
-
- /* Wait for global nak to take effect */
- if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
- GINTSTS_GOUTNAKEFF, 100))
- dev_warn(hsotg->dev,
- "%s: timeout GINTSTS.GOUTNAKEFF\n", __func__);
- }
-
- /* Disable ep */
- __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
-
- /* Wait for ep to be disabled */
- if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
- dev_warn(hsotg->dev,
- "%s: timeout DOEPCTL.EPDisable\n", __func__);
-
- if (hs_ep->dir_in) {
- if (hsotg->dedicated_fifos) {
- dwc2_writel(GRSTCTL_TXFNUM(hs_ep->fifo_index) |
- GRSTCTL_TXFFLSH, hsotg->regs + GRSTCTL);
- /* Wait for fifo flush */
- if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
- GRSTCTL_TXFFLSH, 100))
- dev_warn(hsotg->dev,
- "%s: timeout flushing fifos\n",
- __func__);
- }
- /* TODO: Flush shared tx fifo */
- } else {
- /* Remove global NAKs */
- __bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
- }
-}
-
/**
* dwc2_hsotg_ep_dequeue - dequeue given endpoint
* @ep: The endpoint to dequeue.
@@ -3665,14 +4393,21 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
hs_ep->parent = hsotg;
hs_ep->ep.name = hs_ep->name;
- usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
+
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
+ usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
+ else
+ usb_ep_set_maxpacket_limit(&hs_ep->ep,
+ epnum ? 1024 : EP0_MPS_LIMIT);
hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
if (epnum == 0) {
hs_ep->ep.caps.type_control = true;
} else {
- hs_ep->ep.caps.type_iso = true;
- hs_ep->ep.caps.type_bulk = true;
+ if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
+ hs_ep->ep.caps.type_iso = true;
+ hs_ep->ep.caps.type_bulk = true;
+ }
hs_ep->ep.caps.type_int = true;
}
@@ -3802,51 +4537,6 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
#endif
}
-#ifdef CONFIG_OF
-static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
-{
- struct device_node *np = hsotg->dev->of_node;
- u32 len = 0;
- u32 i = 0;
-
- /* Enable dma if requested in device tree */
- hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
-
- /*
- * Register TX periodic fifo size per endpoint.
- * EP0 is excluded since it has no fifo configuration.
- */
- if (!of_find_property(np, "g-tx-fifo-size", &len))
- goto rx_fifo;
-
- len /= sizeof(u32);
-
- /* Read tx fifo sizes other than ep0 */
- if (of_property_read_u32_array(np, "g-tx-fifo-size",
- &hsotg->g_tx_fifo_sz[1], len))
- goto rx_fifo;
-
- /* Add ep0 */
- len++;
-
- /* Make remaining TX fifos unavailable */
- if (len < MAX_EPS_CHANNELS) {
- for (i = len; i < MAX_EPS_CHANNELS; i++)
- hsotg->g_tx_fifo_sz[i] = 0;
- }
-
-rx_fifo:
- /* Register RX fifo size */
- of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
-
- /* Register NPTX fifo size */
- of_property_read_u32(np, "g-np-tx-fifo-size",
- &hsotg->g_np_g_tx_fifo_sz);
-}
-#else
-static inline void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) { }
-#endif
-
/**
* dwc2_gadget_init - init function for gadget
* @dwc2: The data structure for the DWC2 driver.
@@ -3857,33 +4547,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
- int i;
- u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
-
- /* Initialize to legacy fifo configuration values */
- hsotg->g_rx_fifo_sz = 2048;
- hsotg->g_np_g_tx_fifo_sz = 1024;
- memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
- /* Device tree specific probe */
- dwc2_hsotg_of_probe(hsotg);
-
- /* Check against largest possible value. */
- if (hsotg->g_np_g_tx_fifo_sz >
- hsotg->hw_params.dev_nperio_tx_fifo_size) {
- dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n",
- hsotg->g_np_g_tx_fifo_sz,
- hsotg->hw_params.dev_nperio_tx_fifo_size);
- hsotg->g_np_g_tx_fifo_sz =
- hsotg->hw_params.dev_nperio_tx_fifo_size;
- }
/* Dump fifo information */
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
- hsotg->g_np_g_tx_fifo_sz);
- dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
- for (i = 0; i < MAX_EPS_CHANNELS; i++)
- dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
- hsotg->g_tx_fifo_sz[i]);
+ hsotg->params.g_np_tx_fifo_size);
+ dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
@@ -3909,6 +4577,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
if (!hsotg->ep0_buff)
return -ENOMEM;
+ if (using_desc_dma(hsotg)) {
+ ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
+ if (ret < 0)
+ return ret;
+ }
+
ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED,
dev_name(hsotg->dev), hsotg);
if (ret < 0) {
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index df5a06578005..911c3b36ac06 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -79,9 +79,9 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
/* Enable the interrupts in the GINTMSK */
intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
- if (hsotg->core_params->dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
intmsk |= GINTSTS_RXFLVL;
- if (hsotg->core_params->external_id_pin_ctl <= 0)
+ if (hsotg->params.external_id_pin_ctl <= 0)
intmsk |= GINTSTS_CONIDSTSCHNG;
intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
@@ -100,8 +100,8 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) ||
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ hsotg->params.ulpi_fs_ls > 0) ||
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* Full speed PHY */
val = HCFG_FSLSPCLKSEL_48_MHZ;
} else {
@@ -152,7 +152,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (dwc2_is_host_mode(hsotg))
dwc2_init_fs_ls_pclk_sel(hsotg);
- if (hsotg->core_params->i2c_enable > 0) {
+ if (hsotg->params.i2c_enable > 0) {
dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
/* Program GUSBCFG.OtgUtmiFsSel to I2C */
@@ -189,20 +189,20 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
* so only program the first time. Do a soft reset immediately after
* setting phyif.
*/
- switch (hsotg->core_params->phy_type) {
+ switch (hsotg->params.phy_type) {
case DWC2_PHY_TYPE_PARAM_ULPI:
/* ULPI interface */
dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
- if (hsotg->core_params->phy_ulpi_ddr > 0)
+ if (hsotg->params.phy_ulpi_ddr > 0)
usbcfg |= GUSBCFG_DDRSEL;
break;
case DWC2_PHY_TYPE_PARAM_UTMI:
/* UTMI+ interface */
dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
- if (hsotg->core_params->phy_utmi_width == 16)
+ if (hsotg->params.phy_utmi_width == 16)
usbcfg |= GUSBCFG_PHYIF16;
break;
default:
@@ -230,9 +230,10 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
u32 usbcfg;
int retval = 0;
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
- /* If FS mode with FS PHY */
+ if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
+ hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* If FS/LS mode with FS/LS PHY */
retval = dwc2_fs_phy_init(hsotg, select_phy);
if (retval)
return retval;
@@ -245,7 +246,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) {
+ hsotg->params.ulpi_fs_ls > 0) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg |= GUSBCFG_ULPI_FS_LS;
@@ -272,9 +273,9 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
case GHWCFG2_INT_DMA_ARCH:
dev_dbg(hsotg->dev, "Internal DMA Mode\n");
- if (hsotg->core_params->ahbcfg != -1) {
+ if (hsotg->params.ahbcfg != -1) {
ahbcfg &= GAHBCFG_CTRL_MASK;
- ahbcfg |= hsotg->core_params->ahbcfg &
+ ahbcfg |= hsotg->params.ahbcfg &
~GAHBCFG_CTRL_MASK;
}
break;
@@ -285,21 +286,21 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
break;
}
- dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
- hsotg->core_params->dma_enable,
- hsotg->core_params->dma_desc_enable);
+ dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n",
+ hsotg->params.host_dma,
+ hsotg->params.dma_desc_enable);
- if (hsotg->core_params->dma_enable > 0) {
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.host_dma > 0) {
+ if (hsotg->params.dma_desc_enable > 0)
dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
else
dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
} else {
dev_dbg(hsotg->dev, "Using Slave mode\n");
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
}
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
ahbcfg |= GAHBCFG_DMA_EN;
dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
@@ -316,10 +317,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- if (hsotg->core_params->otg_cap ==
+ if (hsotg->params.otg_cap ==
DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_HNPCAP;
- if (hsotg->core_params->otg_cap !=
+ if (hsotg->params.otg_cap !=
DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_SRPCAP;
break;
@@ -327,7 +328,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- if (hsotg->core_params->otg_cap !=
+ if (hsotg->params.otg_cap !=
DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
usbcfg |= GUSBCFG_SRPCAP;
break;
@@ -390,7 +391,7 @@ static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
*/
static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
@@ -449,7 +450,7 @@ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
if (!params->enable_dynamic_fifo)
@@ -490,7 +491,7 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
dwc2_readl(hsotg->regs + HPTXFSIZ));
- if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
+ if (hsotg->params.en_multiple_tx_fifo > 0 &&
hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
/*
* Global DFIFOCFG calculation for Host mode -
@@ -598,7 +599,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
#ifdef VERBOSE_DEBUG
- int num_channels = hsotg->core_params->host_channels;
+ int num_channels = hsotg->params.host_channels;
struct dwc2_qh *qh;
u32 hcchar;
u32 hcsplt;
@@ -648,6 +649,35 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
#endif /* VERBOSE_DEBUG */
}
+static int _dwc2_hcd_start(struct usb_hcd *hcd);
+
+static void dwc2_host_start(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
+ _dwc2_hcd_start(hcd);
+}
+
+static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
+{
+ struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+ hcd->self.is_b_host = 0;
+}
+
+static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
+ int *hub_addr, int *hub_port)
+{
+ struct urb *urb = context;
+
+ if (urb->dev->tt)
+ *hub_addr = urb->dev->tt->hub->devnum;
+ else
+ *hub_addr = 0;
+ *hub_port = urb->dev->ttport;
+}
+
/*
* =========================================================================
* Low Level Host Channel Access Functions
@@ -741,7 +771,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
* For Descriptor DMA mode core halts the channel on AHB error.
* Interrupt is not required.
*/
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcintmsk |= HCINTMSK_AHBERR;
@@ -774,7 +804,7 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
{
u32 intmsk;
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_hc_enable_dma_ints(hsotg, chan);
@@ -994,7 +1024,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
/* No need to set the bit in DDMA for disabling the channel */
/* TODO check it everywhere channel is disabled */
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcchar |= HCCHAR_CHENA;
@@ -1004,7 +1034,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
}
hcchar |= HCCHAR_CHDIS;
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA not enabled\n");
hcchar |= HCCHAR_CHENA;
@@ -1143,7 +1173,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
bytes_in_fifo = sizeof(u32) *
- (hsotg->core_params->host_perio_tx_fifo_size -
+ (hsotg->params.host_perio_tx_fifo_size -
fifo_space);
/*
@@ -1339,8 +1369,8 @@ static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan)
{
- u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
- u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
+ u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
+ u16 max_hc_pkt_count = hsotg->params.max_packet_count;
u32 hcchar;
u32 hctsiz = 0;
u16 num_packets;
@@ -1350,7 +1380,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (chan->do_ping) {
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, no DMA\n");
dwc2_hc_do_ping(hsotg, chan);
@@ -1478,7 +1508,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
TSIZ_SC_MC_PID_SHIFT);
}
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_writel((u32)chan->xfer_dma,
hsotg->regs + HCDMA(chan->hc_num));
if (dbg_hc(chan))
@@ -1521,7 +1551,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
chan->xfer_started = 1;
chan->requests++;
- if (hsotg->core_params->dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
!chan->ep_is_in && chan->xfer_len > 0)
/* Load OUT packet into the appropriate Tx FIFO */
dwc2_hc_write_packet(hsotg, chan);
@@ -1799,12 +1829,12 @@ void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
/* Must be called with interrupt disabled and spinlock held */
static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
{
- int num_channels = hsotg->core_params->host_channels;
+ int num_channels = hsotg->params.host_channels;
struct dwc2_host_chan *channel;
u32 hcchar;
int i;
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
/* Flush out any channel requests in slave mode */
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
@@ -1840,9 +1870,9 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
channel->qh = NULL;
}
/* All channels have been freed, mark them available */
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels =
- hsotg->core_params->host_channels;
+ hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
@@ -2077,7 +2107,7 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
* Free the QTD and clean up the associated QH. Leave the QH in the
* schedule if it has any remaining QTDs.
*/
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
u8 in_process = urb_qtd->in_process;
dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
@@ -2185,13 +2215,13 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
/* Set ULPI External VBUS bit if needed */
usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
- if (hsotg->core_params->phy_ulpi_ext_vbus ==
+ if (hsotg->params.phy_ulpi_ext_vbus ==
DWC2_PHY_ULPI_EXTERNAL_VBUS)
usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
/* Set external TS Dline pulsing bit if needed */
usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
- if (hsotg->core_params->ts_dline > 0)
+ if (hsotg->params.ts_dline > 0)
usbcfg |= GUSBCFG_TERMSELDLPULSE;
dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
@@ -2230,10 +2260,10 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
/* Program the GOTGCTL register */
otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
otgctl &= ~GOTGCTL_OTGVER;
- if (hsotg->core_params->otg_ver > 0)
+ if (hsotg->params.otg_ver > 0)
otgctl |= GOTGCTL_OTGVER;
dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
- dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
+ dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->params.otg_ver);
/* Clear the SRP success bit for FS-I2c */
hsotg->srp_success = 0;
@@ -2277,7 +2307,8 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
/* Initialize Host Configuration Register */
dwc2_init_fs_ls_pclk_sel(hsotg);
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
+ if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+ hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_FSLSSUPP;
dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -2288,13 +2319,13 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
* runtime. This bit needs to be programmed during initial configuration
* and its value must not be changed during runtime.
*/
- if (hsotg->core_params->reload_ctl > 0) {
+ if (hsotg->params.reload_ctl > 0) {
hfir = dwc2_readl(hsotg->regs + HFIR);
hfir |= HFIR_RLDCTRL;
dwc2_writel(hfir, hsotg->regs + HFIR);
}
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
u32 op_mode = hsotg->hw_params.op_mode;
if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
@@ -2306,7 +2337,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
"Hardware does not support descriptor DMA mode -\n");
dev_err(hsotg->dev,
"falling back to buffer DMA mode.\n");
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
} else {
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_DESCDMA;
@@ -2332,12 +2363,12 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
otgctl &= ~GOTGCTL_HSTSETHNPEN;
dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
- if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (hsotg->params.dma_desc_enable <= 0) {
int num_channels, i;
u32 hcchar;
/* Flush out any leftover queued requests */
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
hcchar &= ~HCCHAR_CHENA;
@@ -2399,9 +2430,9 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
hsotg->flags.d32 = 0;
hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels =
- hsotg->core_params->host_channels;
+ hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
@@ -2415,7 +2446,7 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
hc_list_entry)
list_del_init(&chan->hc_list_entry);
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
chan = hsotg->hc_ptr_array[i];
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
@@ -2457,7 +2488,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->do_ping = 0;
chan->ep_is_in = 0;
chan->data_pid_start = DWC2_HC_PID_SETUP;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->setup_dma;
else
chan->xfer_buf = urb->setup_packet;
@@ -2484,7 +2515,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->do_ping = 0;
chan->data_pid_start = DWC2_HC_PID_DATA1;
chan->xfer_len = 0;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = hsotg->status_buf_dma;
else
chan->xfer_buf = hsotg->status_buf;
@@ -2502,13 +2533,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
case USB_ENDPOINT_XFER_ISOC:
chan->ep_type = USB_ENDPOINT_XFER_ISOC;
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
break;
frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
frame_desc->status = 0;
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
chan->xfer_dma = urb->dma;
chan->xfer_dma += frame_desc->offset +
qtd->isoc_split_offset;
@@ -2690,7 +2721,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
!dwc2_hcd_is_pipe_in(&urb->pipe_info))
urb->actual_length = urb->length;
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->dma + urb->actual_length;
else
chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
@@ -2715,7 +2746,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/
chan->multi_count = dwc2_hb_mult(qh->maxp);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
chan->desc_list_addr = qh->desc_list_dma;
chan->desc_list_sz = qh->desc_list_sz;
}
@@ -2752,7 +2783,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
while (qh_ptr != &hsotg->periodic_sched_ready) {
if (list_empty(&hsotg->free_hc_list))
break;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
if (hsotg->available_host_channels <= 1)
break;
hsotg->available_host_channels--;
@@ -2776,17 +2807,17 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
* schedule. Some free host channels may not be used if they are
* reserved for periodic transfers.
*/
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
qh_ptr = hsotg->non_periodic_sched_inactive.next;
while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
- if (hsotg->core_params->uframe_sched <= 0 &&
+ if (hsotg->params.uframe_sched <= 0 &&
hsotg->non_periodic_channels >= num_channels -
hsotg->periodic_channels)
break;
if (list_empty(&hsotg->free_hc_list))
break;
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
if (hsotg->available_host_channels < 1)
break;
hsotg->available_host_channels--;
@@ -2808,7 +2839,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
else
ret_val = DWC2_TRANSACTION_ALL;
- if (hsotg->core_params->uframe_sched <= 0)
+ if (hsotg->params.uframe_sched <= 0)
hsotg->non_periodic_channels++;
}
@@ -2847,8 +2878,8 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
- if (hsotg->core_params->dma_enable > 0) {
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
@@ -2957,7 +2988,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
* The flag prevents any halts to get into the request queue in
* the middle of multiple high-bandwidth packets getting queued.
*/
- if (hsotg->core_params->dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
qh->channel->multi_count > 1)
hsotg->queuing_high_bandwidth = 1;
@@ -2976,7 +3007,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
* controller automatically handles multiple packets for
* high-bandwidth transfers.
*/
- if (hsotg->core_params->dma_enable > 0 || status == 0 ||
+ if (hsotg->params.host_dma > 0 || status == 0 ||
qh->channel->requests == qh->channel->multi_count) {
qh_ptr = qh_ptr->next;
/*
@@ -2993,7 +3024,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
exit:
if (no_queue_space || no_fifo_space ||
- (hsotg->core_params->dma_enable <= 0 &&
+ (hsotg->params.host_dma <= 0 &&
!list_empty(&hsotg->periodic_sched_assigned))) {
/*
* May need to queue more transactions as the request
@@ -3073,7 +3104,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
- if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
+ if (hsotg->params.host_dma <= 0 && qspcavail == 0) {
no_queue_space = 1;
break;
}
@@ -3106,7 +3137,7 @@ next:
hsotg->non_periodic_qh_ptr->next;
} while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
@@ -3307,7 +3338,7 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
* If hibernation is supported, Phy clock will be suspended
* after registers are backuped.
*/
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
/* Suspend the Phy Clock */
pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
pcgctl |= PCGCTL_STOPPCLK;
@@ -3342,7 +3373,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
* If hibernation is supported, Phy clock is already resumed
* after registers restore.
*/
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
@@ -3569,7 +3600,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
port_status |= USB_PORT_STAT_TEST;
/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
- if (hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_fs_enable) {
/*
* Enable descriptor DMA only if a full speed
* device is connected.
@@ -3583,7 +3614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u32 hcfg;
dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
- hsotg->core_params->dma_desc_enable = 1;
+ hsotg->params.dma_desc_enable = 1;
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg |= HCFG_DESCDMA;
dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -3824,7 +3855,7 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
u32 p_tx_status;
int i;
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
dev_dbg(hsotg->dev, "\n");
dev_dbg(hsotg->dev,
"************************************************************\n");
@@ -4020,35 +4051,6 @@ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
return p->hsotg;
}
-static int _dwc2_hcd_start(struct usb_hcd *hcd);
-
-void dwc2_host_start(struct dwc2_hsotg *hsotg)
-{
- struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
- hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
- _dwc2_hcd_start(hcd);
-}
-
-void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
-{
- struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
- hcd->self.is_b_host = 0;
-}
-
-void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
- int *hub_port)
-{
- struct urb *urb = context;
-
- if (urb->dev->tt)
- *hub_addr = urb->dev->tt->hub->devnum;
- else
- *hub_addr = 0;
- *hub_port = urb->dev->ttport;
-}
-
/**
* dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
*
@@ -4365,7 +4367,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
- if (!hsotg->core_params->hibernation)
+ if (!hsotg->params.hibernation)
goto skip_power_saving;
/*
@@ -4417,7 +4419,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
if (hsotg->lx_state != DWC2_L2)
goto unlock;
- if (!hsotg->core_params->hibernation) {
+ if (!hsotg->params.hibernation) {
hsotg->lx_state = DWC2_L0;
goto unlock;
}
@@ -4510,9 +4512,6 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
case PIPE_ISOCHRONOUS:
pipetype = "ISOCHRONOUS";
break;
- default:
- pipetype = "UNKNOWN";
- break;
}
dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
@@ -4609,8 +4608,6 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
case PIPE_INTERRUPT:
ep_type = USB_ENDPOINT_XFER_INT;
break;
- default:
- dev_warn(hsotg->dev, "Wrong ep type\n");
}
dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
@@ -4919,7 +4916,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
}
}
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (hsotg->status_buf) {
dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
hsotg->status_buf,
@@ -4999,16 +4996,16 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
hsotg->last_frame_num = HFNUM_MAX_FRNUM;
/* Check if the bus driver or platform code has setup a dma_mask */
- if (hsotg->core_params->dma_enable > 0 &&
+ if (hsotg->params.host_dma > 0 &&
hsotg->dev->dma_mask == NULL) {
dev_warn(hsotg->dev,
"dma_mask not set, disabling DMA\n");
- hsotg->core_params->dma_enable = 0;
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.host_dma = 0;
+ hsotg->params.dma_desc_enable = 0;
}
/* Set device flags indicating whether the HCD supports DMA */
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set DMA mask\n");
if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
@@ -5019,7 +5016,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
if (!hcd)
goto error1;
- if (hsotg->core_params->dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
hcd->self.uses_dma = 0;
hcd->has_tt = 1;
@@ -5067,7 +5064,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* in the controller. Initialize the channel descriptor array.
*/
INIT_LIST_HEAD(&hsotg->free_hc_list);
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
for (i = 0; i < num_channels; i++) {
@@ -5091,7 +5088,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* done after usb_add_hcd since that function allocates the DMA buffer
* pool.
*/
- if (hsotg->core_params->dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
DWC2_HCD_STATUS_BUF_SIZE,
&hsotg->status_buf_dma, GFP_KERNEL);
@@ -5107,10 +5104,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* DMA mode.
* Alignment must be set to 512 bytes.
*/
- if (hsotg->core_params->dma_desc_enable ||
- hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_enable ||
+ hsotg->params.dma_desc_fs_enable) {
hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
- sizeof(struct dwc2_hcd_dma_desc) *
+ sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
NULL);
if (!hsotg->desc_gen_cache) {
@@ -5121,12 +5118,12 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* Disable descriptor dma mode since it will not be
* usable.
*/
- hsotg->core_params->dma_desc_enable = 0;
- hsotg->core_params->dma_desc_fs_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
+ hsotg->params.dma_desc_fs_enable = 0;
}
hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
- sizeof(struct dwc2_hcd_dma_desc) *
+ sizeof(struct dwc2_dma_desc) *
MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
if (!hsotg->desc_hsisoc_cache) {
dev_err(hsotg->dev,
@@ -5138,8 +5135,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
* Disable descriptor dma mode since it will not be
* usable.
*/
- hsotg->core_params->dma_desc_enable = 0;
- hsotg->core_params->dma_desc_fs_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
+ hsotg->params.dma_desc_fs_enable = 0;
}
}
@@ -5184,7 +5181,6 @@ error3:
error2:
usb_put_hcd(hcd);
error1:
- kfree(hsotg->core_params);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
kfree(hsotg->last_frame_num_array);
@@ -5250,7 +5246,7 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
hr = &hsotg->hr_backup;
hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i)
hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
hr->hprt0 = dwc2_read_hprt0(hsotg);
@@ -5286,7 +5282,7 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i)
dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7758bfb644ff..1ed5fa2beff4 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -348,7 +348,7 @@ struct dwc2_qh {
struct list_head qtd_list;
struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
- struct dwc2_hcd_dma_desc *desc_list;
+ struct dwc2_dma_desc *desc_list;
dma_addr_t desc_list_dma;
u32 desc_list_sz;
u32 *n_bytes;
@@ -793,11 +793,6 @@ extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg);
#define URB_SEND_ZERO_PACKET 0x2
/* Host driver callbacks */
-
-extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
- int *hub_addr, int *hub_port);
extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
void *context, gfp_t mem_flags,
int *ttport);
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 0e1d42b5dec5..cf0367768cb3 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -95,7 +95,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
else
desc_cache = hsotg->desc_gen_cache;
- qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
+ qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh);
qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
@@ -297,7 +297,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan = qh->channel;
if (dwc2_qh_is_non_per(qh)) {
- if (hsotg->core_params->uframe_sched > 0)
+ if (hsotg->params.uframe_sched > 0)
hsotg->available_host_channels++;
else
hsotg->non_periodic_channels--;
@@ -322,7 +322,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
qh->ntd = 0;
if (qh->desc_list)
- memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
+ memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh));
}
@@ -404,7 +404,7 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) &&
- (hsotg->core_params->uframe_sched > 0 ||
+ (hsotg->params.uframe_sched > 0 ||
!hsotg->periodic_channels) && hsotg->frame_list) {
dwc2_per_sched_disable(hsotg);
dwc2_frame_list_free(hsotg);
@@ -542,7 +542,7 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u32 max_xfer_size,
u16 idx)
{
- struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
struct dwc2_hcd_iso_packet_desc *frame_desc;
memset(dma_desc, 0, sizeof(*dma_desc));
@@ -571,8 +571,8 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (idx * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
@@ -645,8 +645,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (idx *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
#else
@@ -679,8 +679,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (idx * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (idx * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
#endif
}
@@ -690,11 +690,11 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd, struct dwc2_qh *qh,
int n_desc)
{
- struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
+ struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
int len = chan->xfer_len;
- if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
- len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
+ if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
+ len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
if (chan->ep_is_in) {
int num_packets;
@@ -721,8 +721,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
- (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ (n_desc * sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
/*
@@ -778,8 +778,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma +
((n_desc - 1) *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
@@ -808,8 +808,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
n_desc - 1, &qh->desc_list[n_desc - 1]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (n_desc - 1) *
- sizeof(struct dwc2_hcd_dma_desc),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
if (n_desc > 1) {
qh->desc_list[0].status |= HOST_DMA_A;
@@ -817,7 +817,7 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
&qh->desc_list[0]);
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma,
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
}
chan->ntd = n_desc;
@@ -893,7 +893,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u16 idx)
{
- struct dwc2_hcd_dma_desc *dma_desc;
+ struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 remain = 0;
int rc = 0;
@@ -902,8 +902,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
return -EINVAL;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
@@ -1066,7 +1066,7 @@ stop_scan:
static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
- struct dwc2_hcd_dma_desc *dma_desc,
+ struct dwc2_dma_desc *dma_desc,
enum dwc2_halt_status halt_status,
u32 n_bytes, int *xfer_done)
{
@@ -1154,7 +1154,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
{
struct dwc2_qh *qh = chan->qh;
struct dwc2_hcd_urb *urb = qtd->urb;
- struct dwc2_hcd_dma_desc *dma_desc;
+ struct dwc2_dma_desc *dma_desc;
u32 n_bytes;
int failed;
@@ -1165,8 +1165,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
dma_sync_single_for_cpu(hsotg->dev,
qh->desc_list_dma + (desc_num *
- sizeof(struct dwc2_hcd_dma_desc)),
- sizeof(struct dwc2_hcd_dma_desc),
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[desc_num];
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 906f223542ee..b8f4b6aaf1d0 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -256,7 +256,7 @@ static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
u32 *hprt0_modify)
{
- struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_core_params *params = &hsotg->params;
int do_reset = 0;
u32 usbcfg;
u32 prtspd;
@@ -395,10 +395,10 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
} else {
hsotg->flags.b.port_enable_change = 1;
- if (hsotg->core_params->dma_desc_fs_enable) {
+ if (hsotg->params.dma_desc_fs_enable) {
u32 hcfg;
- hsotg->core_params->dma_desc_enable = 0;
+ hsotg->params.dma_desc_enable = 0;
hsotg->new_connection = false;
hcfg = dwc2_readl(hsotg->regs + HCFG);
hcfg &= ~HCFG_DESCDMA;
@@ -604,7 +604,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
/* Skip whole frame */
if (chan->qh->do_split &&
chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
}
@@ -743,7 +743,7 @@ cleanup:
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
hsotg->available_host_channels++;
} else {
switch (chan->ep_type) {
@@ -789,7 +789,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_release_channel(hsotg, chan, qtd, halt_status);
@@ -915,6 +915,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
u32 len;
+ u32 hctsiz;
+ u32 pid;
if (!qtd->urb)
return 0;
@@ -932,7 +934,10 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
qtd->isoc_split_offset += len;
- if (frame_desc->actual_length >= frame_desc->length) {
+ hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
+
+ if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
frame_desc->status = 0;
qtd->isoc_frame_index++;
qtd->complete_split = 0;
@@ -974,7 +979,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
if (pipe_type == USB_ENDPOINT_XFER_ISOC)
/* Do not disable the interrupt, just clear it */
@@ -985,7 +990,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
/* Handle xfer complete on CSPLIT */
if (chan->qh->do_split) {
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
if (qtd->complete_split &&
dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
qtd))
@@ -1097,7 +1102,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
chnum);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_STALL);
goto handle_stall_done;
@@ -1207,7 +1212,7 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
- if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
+ if (hsotg->params.host_dma > 0 && chan->ep_is_in) {
/*
* NAK interrupts are enabled on bulk/control IN
* transfers in DMA mode for the sole purpose of
@@ -1353,7 +1358,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
*/
if (chan->do_split && chan->complete_split) {
if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
- hsotg->core_params->dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
qtd->isoc_frame_index++;
@@ -1374,7 +1379,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh = chan->qh;
bool past_end;
- if (hsotg->core_params->uframe_sched <= 0) {
+ if (hsotg->params.uframe_sched <= 0) {
int frnum = dwc2_hcd_get_frame_number(hsotg);
/* Don't have num_hs_transfers; simple logic */
@@ -1467,7 +1472,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_BABBLE_ERR);
goto disable_int;
@@ -1572,7 +1577,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
/* Core halts the channel for Descriptor DMA mode */
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_AHB_ERR);
goto handle_ahberr_done;
@@ -1604,7 +1609,7 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- if (hsotg->core_params->dma_desc_enable > 0) {
+ if (hsotg->params.dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_XACT_ERR);
goto handle_xacterr_done;
@@ -1798,8 +1803,8 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
(chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
- hsotg->core_params->dma_desc_enable <= 0)) {
- if (hsotg->core_params->dma_desc_enable > 0)
+ hsotg->params.dma_desc_enable <= 0)) {
+ if (hsotg->params.dma_desc_enable > 0)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
@@ -1830,7 +1835,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
} else if (chan->hcint & HCINTMSK_STALL) {
dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XACTERR) &&
- hsotg->core_params->dma_desc_enable <= 0) {
+ hsotg->params.dma_desc_enable <= 0) {
if (out_nak_enh) {
if (chan->hcint &
(HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
@@ -1850,10 +1855,10 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
*/
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
- hsotg->core_params->dma_desc_enable > 0) {
+ hsotg->params.dma_desc_enable > 0) {
dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
} else if ((chan->hcint & HCINTMSK_AHBERR) &&
- hsotg->core_params->dma_desc_enable > 0) {
+ hsotg->params.dma_desc_enable > 0) {
dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
} else if (chan->hcint & HCINTMSK_BBLERR) {
dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
@@ -1946,7 +1951,7 @@ static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
chnum);
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
} else {
if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
@@ -2023,7 +2028,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
* interrupt unmasked
*/
WARN_ON(hcint != HCINTMSK_CHHLTD);
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
chan->halt_status);
else
@@ -2051,7 +2056,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
qtd_list_entry);
- if (hsotg->core_params->dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
hcint &= ~HCINTMSK_CHHLTD;
}
@@ -2156,7 +2161,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
}
}
- for (i = 0; i < hsotg->core_params->host_channels; i++) {
+ for (i = 0; i < hsotg->params.host_channels; i++) {
if (haint & (1 << i))
dwc2_hc_n_intr(hsotg, i);
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 13754353251f..5713f03a4e56 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -75,7 +75,7 @@ static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
int status;
int num_channels;
- num_channels = hsotg->core_params->host_channels;
+ num_channels = hsotg->params.host_channels;
if (hsotg->periodic_channels + hsotg->non_periodic_channels <
num_channels
&& hsotg->periodic_channels < num_channels - 1) {
@@ -355,6 +355,37 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period,
}
}
+/**
+ * dwc2_get_ls_map() - Get the map used for the given qh
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ *
+ * We'll always get the periodic map out of our TT. Note that even if we're
+ * running the host straight in low speed / full speed mode it appears as if
+ * a TT is allocated for us, so we'll use it. If that ever changes we can
+ * add logic here to get a map out of "hsotg" if !qh->do_split.
+ *
+ * Returns: the map or NULL if a map couldn't be found.
+ */
+static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ unsigned long *map;
+
+ /* Don't expect to be missing a TT and be doing low speed scheduling */
+ if (WARN_ON(!qh->dwc_tt))
+ return NULL;
+
+ /* Get the map and adjust if this is a multi_tt hub */
+ map = qh->dwc_tt->periodic_bitmaps;
+ if (qh->dwc_tt->usb_tt->multi)
+ map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+
+ return map;
+}
+
+#ifdef DWC2_PRINT_SCHEDULE
/*
* cat_printf() - A printf() + strcat() helper
*
@@ -454,35 +485,6 @@ static void pmap_print(unsigned long *map, int bits_per_period,
}
}
-/**
- * dwc2_get_ls_map() - Get the map used for the given qh
- *
- * @hsotg: The HCD state structure for the DWC OTG controller.
- * @qh: QH for the periodic transfer.
- *
- * We'll always get the periodic map out of our TT. Note that even if we're
- * running the host straight in low speed / full speed mode it appears as if
- * a TT is allocated for us, so we'll use it. If that ever changes we can
- * add logic here to get a map out of "hsotg" if !qh->do_split.
- *
- * Returns: the map or NULL if a map couldn't be found.
- */
-static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
- struct dwc2_qh *qh)
-{
- unsigned long *map;
-
- /* Don't expect to be missing a TT and be doing low speed scheduling */
- if (WARN_ON(!qh->dwc_tt))
- return NULL;
-
- /* Get the map and adjust if this is a multi_tt hub */
- map = qh->dwc_tt->periodic_bitmaps;
- if (qh->dwc_tt->usb_tt->multi)
- map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
-
- return map;
-}
struct dwc2_qh_print_data {
struct dwc2_hsotg *hsotg;
@@ -519,9 +521,6 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
* If we don't have tracing turned on, don't run unless the special
* define is turned on.
*/
-#ifndef DWC2_PRINT_SCHEDULE
- return;
-#endif
if (qh->schedule_low_speed) {
unsigned long *map = dwc2_get_ls_map(hsotg, qh);
@@ -559,8 +558,12 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
dwc2_qh_print, &print_data);
}
-
+ return;
}
+#else
+static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh) {};
+#endif
/**
* dwc2_ls_pmap_schedule() - Schedule a low speed QH
@@ -1104,7 +1107,7 @@ static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
next_active_frame = earliest_frame;
/* Get the "no microframe schduler" out of the way... */
- if (hsotg->core_params->uframe_sched <= 0) {
+ if (hsotg->params.uframe_sched <= 0) {
if (qh->do_split)
/* Splits are active at microframe 0 minus 1 */
next_active_frame |= 0x7;
@@ -1197,7 +1200,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
status = dwc2_uframe_schedule(hsotg, qh);
} else {
status = dwc2_periodic_channel_available(hsotg);
@@ -1218,7 +1221,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
return status;
}
- if (hsotg->core_params->uframe_sched <= 0)
+ if (hsotg->params.uframe_sched <= 0)
/* Reserve periodic channel */
hsotg->periodic_channels++;
@@ -1254,7 +1257,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* Update claimed usecs per (micro)frame */
hsotg->periodic_usecs -= qh->host_us;
- if (hsotg->core_params->uframe_sched > 0) {
+ if (hsotg->params.uframe_sched > 0) {
dwc2_uframe_unschedule(hsotg, qh);
} else {
/* Release periodic channel reservation */
@@ -1328,7 +1331,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
int status = 0;
max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
- max_channel_xfer_size = hsotg->core_params->max_transfer_size;
+ max_channel_xfer_size = hsotg->params.max_transfer_size;
if (max_xfer_size > max_channel_xfer_size) {
dev_err(hsotg->dev,
@@ -1391,7 +1394,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
qh->unreserve_pending = 0;
- if (hsotg->core_params->dma_desc_enable > 0)
+ if (hsotg->params.dma_desc_enable > 0)
/* Don't rely on SOF and start in ready schedule */
list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
else
@@ -1599,7 +1602,7 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
dwc2_qh_init(hsotg, qh, urb, mem_flags);
- if (hsotg->core_params->dma_desc_enable > 0 &&
+ if (hsotg->params.dma_desc_enable > 0 &&
dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
dwc2_hcd_qh_free(hsotg, qh);
return NULL;
@@ -1711,7 +1714,7 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
dwc2_deschedule_periodic(hsotg, qh);
hsotg->periodic_qh_count--;
if (!hsotg->periodic_qh_count &&
- hsotg->core_params->dma_desc_enable <= 0) {
+ hsotg->params.dma_desc_enable <= 0) {
intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
intr_mask &= ~GINTSTS_SOF;
dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 91058441e62a..5be056b39e5c 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -412,6 +412,7 @@
/* Device mode registers */
#define DCFG HSOTG_REG(0x800)
+#define DCFG_DESCDMA_EN (1 << 23)
#define DCFG_EPMISCNT_MASK (0x1f << 18)
#define DCFG_EPMISCNT_SHIFT 18
#define DCFG_EPMISCNT_LIMIT 0x1f
@@ -473,6 +474,7 @@
#define DIEPMSK_XFERCOMPLMSK (1 << 0)
#define DOEPMSK HSOTG_REG(0x814)
+#define DOEPMSK_BNAMSK (1 << 9)
#define DOEPMSK_BACK2BACKSETUP (1 << 6)
#define DOEPMSK_STSPHSERCVDMSK (1 << 5)
#define DOEPMSK_OUTTKNEPDISMSK (1 << 4)
@@ -790,7 +792,8 @@
#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch))
/**
- * struct dwc2_hcd_dma_desc - Host-mode DMA descriptor structure
+ * struct dwc2_dma_desc - DMA descriptor structure,
+ * used for both host and gadget modes
*
* @status: DMA descriptor status quadlet
* @buf: DMA descriptor data buffer pointer
@@ -798,10 +801,12 @@
* DMA Descriptor structure contains two quadlets:
* Status quadlet and Data buffer pointer.
*/
-struct dwc2_hcd_dma_desc {
+struct dwc2_dma_desc {
u32 status;
u32 buf;
-};
+} __packed;
+
+/* Host Mode DMA descriptor status quadlet */
#define HOST_DMA_A (1 << 31)
#define HOST_DMA_STS_MASK (0x3 << 28)
@@ -817,8 +822,43 @@ struct dwc2_hcd_dma_desc {
#define HOST_DMA_ISOC_NBYTES_SHIFT 0
#define HOST_DMA_NBYTES_MASK (0x1ffff << 0)
#define HOST_DMA_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_LIMIT 131071
+
+/* Device Mode DMA descriptor status quadlet */
+
+#define DEV_DMA_BUFF_STS_MASK (0x3 << 30)
+#define DEV_DMA_BUFF_STS_SHIFT 30
+#define DEV_DMA_BUFF_STS_HREADY 0
+#define DEV_DMA_BUFF_STS_DMABUSY 1
+#define DEV_DMA_BUFF_STS_DMADONE 2
+#define DEV_DMA_BUFF_STS_HBUSY 3
+#define DEV_DMA_STS_MASK (0x3 << 28)
+#define DEV_DMA_STS_SHIFT 28
+#define DEV_DMA_STS_SUCC 0
+#define DEV_DMA_STS_BUFF_FLUSH 1
+#define DEV_DMA_STS_BUFF_ERR 3
+#define DEV_DMA_L (1 << 27)
+#define DEV_DMA_SHORT (1 << 26)
+#define DEV_DMA_IOC (1 << 25)
+#define DEV_DMA_SR (1 << 24)
+#define DEV_DMA_MTRF (1 << 23)
+#define DEV_DMA_ISOC_PID_MASK (0x3 << 23)
+#define DEV_DMA_ISOC_PID_SHIFT 23
+#define DEV_DMA_ISOC_PID_DATA0 0
+#define DEV_DMA_ISOC_PID_DATA2 1
+#define DEV_DMA_ISOC_PID_DATA1 2
+#define DEV_DMA_ISOC_PID_MDATA 3
+#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12)
+#define DEV_DMA_ISOC_FRNUM_SHIFT 12
+#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0)
+#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff
+#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0)
+#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff
+#define DEV_DMA_ISOC_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_MASK (0xffff << 0)
+#define DEV_DMA_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_LIMIT 0xffff
-#define MAX_DMA_DESC_SIZE 131071
#define MAX_DMA_DESC_NUM_GENERIC 64
#define MAX_DMA_DESC_NUM_HS_ISOC 256
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
new file mode 100644
index 000000000000..a786256535b6
--- /dev/null
+++ b/drivers/usb/dwc2/params.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (C) 2004-2016 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include "core.h"
+
+static const struct dwc2_core_params params_hi6220 = {
+ .otg_cap = 2, /* No HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 512,
+ .host_perio_tx_fifo_size = 512,
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 16,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8,
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_bcm2835 = {
+ .otg_cap = 0, /* HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = 0, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 774, /* 774 DWORDs */
+ .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
+ .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 8,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8, /* 8 bits */
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = 0x10,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_rk3066 = {
+ .otg_cap = 2, /* non-HNP/non-SRP */
+ .otg_ver = -1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = -1,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 525, /* 525 DWORDs */
+ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
+ .host_perio_tx_fifo_size = 256, /* 256 DWORDs */
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_ltq = {
+ .otg_cap = 2, /* non-HNP/non-SRP */
+ .otg_ver = -1,
+ .dma_desc_enable = -1,
+ .dma_desc_fs_enable = -1,
+ .speed = -1,
+ .enable_dynamic_fifo = -1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 288, /* 288 DWORDs */
+ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
+ .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_amlogic = {
+ .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
+ .otg_ver = -1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = DWC2_SPEED_PARAM_HIGH,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 500,
+ .host_perio_tx_fifo_size = 500,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = 16,
+ .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = 1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+static const struct dwc2_core_params params_default = {
+ .otg_cap = -1,
+ .otg_ver = -1,
+
+ /*
+ * Disable descriptor dma mode by default as the HW can support
+ * it, but does not support it for SPLIT transactions.
+ * Disable it for FS devices as well.
+ */
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+
+ .speed = -1,
+ .enable_dynamic_fifo = -1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = -1,
+ .host_nperio_tx_fifo_size = -1,
+ .host_perio_tx_fifo_size = -1,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = -1,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
+const struct of_device_id dwc2_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+ { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
+ { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
+ { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
+ { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
+ { .compatible = "snps,dwc2", .data = NULL },
+ { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+ { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
+ { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
+ { .compatible = "amcc,dwc-otg", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
+
+static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
+ char *property, u8 size, u64 *value)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+
+ switch (size) {
+ case 0:
+ *value = device_property_read_bool(hsotg->dev, property);
+ break;
+ case 1:
+ if (device_property_read_u8(hsotg->dev, property, &val8))
+ return;
+
+ *value = val8;
+ break;
+ case 2:
+ if (device_property_read_u16(hsotg->dev, property, &val16))
+ return;
+
+ *value = val16;
+ break;
+ case 4:
+ if (device_property_read_u32(hsotg->dev, property, &val32))
+ return;
+
+ *value = val32;
+ break;
+ case 8:
+ if (device_property_read_u64(hsotg->dev, property, value))
+ return;
+
+ break;
+ default:
+ /*
+ * The size is checked by the only function that calls
+ * this so this should never happen.
+ */
+ WARN_ON(1);
+ return;
+ }
+}
+
+static void dwc2_set_core_param(void *param, u8 size, u64 value)
+{
+ switch (size) {
+ case 0:
+ *((bool *)param) = !!value;
+ break;
+ case 1:
+ *((u8 *)param) = (u8)value;
+ break;
+ case 2:
+ *((u16 *)param) = (u16)value;
+ break;
+ case 4:
+ *((u32 *)param) = (u32)value;
+ break;
+ case 8:
+ *((u64 *)param) = (u64)value;
+ break;
+ default:
+ /*
+ * The size is checked by the only function that calls
+ * this so this should never happen.
+ */
+ WARN_ON(1);
+ return;
+ }
+}
+
+/**
+ * dwc2_set_param() - Set a core parameter
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @param: Pointer to the parameter to set
+ * @lookup: True if the property should be looked up
+ * @property: The device property to read
+ * @legacy: The param value to set if @property is not available. This
+ * will typically be the legacy value set in the static
+ * params structure.
+ * @def: The default value
+ * @min: The minimum value
+ * @max: The maximum value
+ * @size: The size of the core parameter in bytes, or 0 for bool.
+ *
+ * This function looks up @property and sets the @param to that value.
+ * If the property doesn't exist it uses the passed-in @value. It will
+ * verify that the value falls between @min and @max. If it doesn't,
+ * it will output an error and set the parameter to either @def or,
+ * failing that, to @min.
+ *
+ * The @size is used to write to @param and to query the device
+ * properties so that this same function can be used with different
+ * types of parameters.
+ */
+static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
+ bool lookup, char *property, u64 legacy,
+ u64 def, u64 min, u64 max, u8 size)
+{
+ u64 sizemax;
+ u64 value;
+
+ if (WARN_ON(!hsotg || !param || !property))
+ return;
+
+ if (WARN((size > 8) || ((size & (size - 1)) != 0),
+ "Invalid size %d for %s\n", size, property))
+ return;
+
+ dev_vdbg(hsotg->dev, "%s: Setting %s: legacy=%llu, def=%llu, min=%llu, max=%llu, size=%d\n",
+ __func__, property, legacy, def, min, max, size);
+
+ sizemax = (1ULL << (size * 8)) - 1;
+ value = legacy;
+
+ /* Override legacy settings. */
+ if (lookup)
+ dwc2_get_device_property(hsotg, property, size, &value);
+
+ /*
+ * While the value is not valid, try setting it to the default
+ * value, and failing that, set it to the minimum.
+ */
+ while ((value < min) || (value > max)) {
+ /* Print an error unless the value is set to auto. */
+ if (value != sizemax)
+ dev_err(hsotg->dev, "Invalid value %llu for param %s\n",
+ value, property);
+
+ /*
+ * If we are already the default, just set it to the
+ * minimum.
+ */
+ if (value == def) {
+ dev_vdbg(hsotg->dev, "%s: setting value to min=%llu\n",
+ __func__, min);
+ value = min;
+ break;
+ }
+
+ /* Try the default value */
+ dev_vdbg(hsotg->dev, "%s: setting value to default=%llu\n",
+ __func__, def);
+ value = def;
+ }
+
+ dev_dbg(hsotg->dev, "Setting %s to %llu\n", property, value);
+ dwc2_set_core_param(param, size, value);
+}
+
+/**
+ * dwc2_set_param_u16() - Set a u16 parameter
+ *
+ * See dwc2_set_param().
+ */
+static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+ bool lookup, char *property, u16 legacy,
+ u16 def, u16 min, u16 max)
+{
+ dwc2_set_param(hsotg, param, lookup, property,
+ legacy, def, min, max, 2);
+}
+
+/**
+ * dwc2_set_param_bool() - Set a bool parameter
+ *
+ * See dwc2_set_param().
+ *
+ * Note: there is no 'legacy' argument here because there is no legacy
+ * source of bool params.
+ */
+static void dwc2_set_param_bool(struct dwc2_hsotg *hsotg, bool *param,
+ bool lookup, char *property,
+ bool def, bool min, bool max)
+{
+ dwc2_set_param(hsotg, param, lookup, property,
+ def, def, min, max, 0);
+}
+
+#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
+
+/* Parameter access functions */
+static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ switch (val) {
+ case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
+ if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
+ valid = 0;
+ break;
+ case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+ break;
+ case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+ /* always valid */
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for otg_cap parameter. Check HW configuration.\n",
+ val);
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
+ break;
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
+ break;
+ default:
+ val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ break;
+ }
+ dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
+ }
+
+ hsotg->params.otg_cap = val;
+}
+
+static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
+ !hsotg->hw_params.dma_desc_enable))
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
+ val);
+ val = (hsotg->params.host_dma > 0 &&
+ hsotg->hw_params.dma_desc_enable);
+ dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
+ }
+
+ hsotg->params.dma_desc_enable = val;
+}
+
+static void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
+ !hsotg->hw_params.dma_desc_enable))
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
+ val);
+ val = (hsotg->params.host_dma > 0 &&
+ hsotg->hw_params.dma_desc_enable);
+ }
+
+ hsotg->params.dma_desc_fs_enable = val;
+ dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
+}
+
+static void
+dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for host_support_fs_low_power\n");
+ dev_err(hsotg->dev,
+ "host_support_fs_low_power must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev,
+ "Setting host_support_fs_low_power to %d\n", val);
+ }
+
+ hsotg->params.host_support_fs_ls_low_power = val;
+}
+
+static void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
+ valid = 0;
+ if (val < 0)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.enable_dynamic_fifo;
+ dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
+ }
+
+ hsotg->params.enable_dynamic_fifo = val;
+}
+
+static void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.rx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.rx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
+ }
+
+ hsotg->params.host_rx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_nperio_tx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
+ val);
+ }
+
+ hsotg->params.host_nperio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_perio_tx_fifo_size;
+ dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
+ val);
+ }
+
+ hsotg->params.host_perio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for max_transfer_size. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.max_transfer_size;
+ dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
+ }
+
+ hsotg->params.max_transfer_size = val;
+}
+
+static void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 15 || val > hsotg->hw_params.max_packet_count)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for max_packet_count. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.max_packet_count;
+ dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
+ }
+
+ hsotg->params.max_packet_count = val;
+}
+
+static void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (val < 1 || val > hsotg->hw_params.host_channels)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_channels. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.host_channels;
+ dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
+ }
+
+ hsotg->params.host_channels = val;
+}
+
+static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 0;
+ u32 hs_phy_type, fs_phy_type;
+
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
+ DWC2_PHY_TYPE_PARAM_ULPI)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for phy_type\n");
+ dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
+ }
+
+ valid = 0;
+ }
+
+ hs_phy_type = hsotg->hw_params.hs_phy_type;
+ fs_phy_type = hsotg->hw_params.fs_phy_type;
+ if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
+ (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+ valid = 1;
+ else if (val == DWC2_PHY_TYPE_PARAM_FS &&
+ fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ valid = 1;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for phy_type. Check HW configuration.\n",
+ val);
+ val = DWC2_PHY_TYPE_PARAM_FS;
+ if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
+ if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+ hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
+ val = DWC2_PHY_TYPE_PARAM_UTMI;
+ else
+ val = DWC2_PHY_TYPE_PARAM_ULPI;
+ }
+ dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
+ }
+
+ hsotg->params.phy_type = val;
+}
+
+static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
+{
+ return hsotg->params.phy_type;
+}
+
+static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 2)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for speed parameter\n");
+ dev_err(hsotg->dev, "max_speed parameter must be 0, 1, or 2\n");
+ }
+ valid = 0;
+ }
+
+ if (dwc2_is_hs_iot(hsotg) &&
+ val == DWC2_SPEED_PARAM_LOW)
+ valid = 0;
+
+ if (val == DWC2_SPEED_PARAM_HIGH &&
+ dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for speed parameter. Check HW configuration.\n",
+ val);
+ val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
+ DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
+ dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
+ }
+
+ hsotg->params.speed = val;
+}
+
+static void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
+ DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for host_ls_low_power_phy_clk parameter\n");
+ dev_err(hsotg->dev,
+ "host_ls_low_power_phy_clk must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
+ dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
+ val);
+ val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
+ ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
+ : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
+ dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
+ val);
+ }
+
+ hsotg->params.host_ls_low_power_phy_clk = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
+ dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
+ }
+
+ hsotg->params.phy_ulpi_ddr = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for phy_ulpi_ext_vbus\n");
+ dev_err(hsotg->dev,
+ "phy_ulpi_ext_vbus must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
+ }
+
+ hsotg->params.phy_ulpi_ext_vbus = val;
+}
+
+static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 0;
+
+ switch (hsotg->hw_params.utmi_phy_data_width) {
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
+ valid = (val == 8);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
+ valid = (val == 16);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
+ valid = (val == 8 || val == 16);
+ break;
+ }
+
+ if (!valid) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "%d invalid for phy_utmi_width. Check HW configuration.\n",
+ val);
+ }
+ val = (hsotg->hw_params.utmi_phy_data_width ==
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
+ dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
+ }
+
+ hsotg->params.phy_utmi_width = val;
+}
+
+static void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
+ dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
+ }
+
+ hsotg->params.ulpi_fs_ls = val;
+}
+
+static void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for ts_dline\n");
+ dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
+ }
+
+ hsotg->params.ts_dline = val;
+}
+
+static void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
+ dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
+ }
+
+ valid = 0;
+ }
+
+ if (val == 1 && !(hsotg->hw_params.i2c_enable))
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for i2c_enable. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.i2c_enable;
+ dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
+ }
+
+ hsotg->params.i2c_enable = val;
+}
+
+static void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "Wrong value for en_multiple_tx_fifo,\n");
+ dev_err(hsotg->dev,
+ "en_multiple_tx_fifo must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.en_multiple_tx_fifo;
+ dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
+ }
+
+ hsotg->params.en_multiple_tx_fifo = val;
+}
+
+static void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
+{
+ int valid = 1;
+
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter reload_ctl\n", val);
+ dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
+ }
+ valid = 0;
+ }
+
+ if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
+ valid = 0;
+
+ if (!valid) {
+ if (val >= 0)
+ dev_err(hsotg->dev,
+ "%d invalid for parameter reload_ctl. Check HW configuration.\n",
+ val);
+ val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
+ dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
+ }
+
+ hsotg->params.reload_ctl = val;
+}
+
+static void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
+{
+ if (val != -1)
+ hsotg->params.ahbcfg = val;
+ else
+ hsotg->params.ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+}
+
+static void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter otg_ver\n", val);
+ dev_err(hsotg->dev,
+ "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
+ }
+
+ hsotg->params.otg_ver = val;
+}
+
+static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter uframe_sched\n",
+ val);
+ dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
+ }
+ val = 1;
+ dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
+ }
+
+ hsotg->params.uframe_sched = val;
+}
+
+static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter external_id_pin_ctl\n",
+ val);
+ dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
+ }
+
+ hsotg->params.external_id_pin_ctl = val;
+}
+
+static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
+ int val)
+{
+ if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+ if (val >= 0) {
+ dev_err(hsotg->dev,
+ "'%d' invalid for parameter hibernation\n",
+ val);
+ dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
+ }
+ val = 0;
+ dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
+ }
+
+ hsotg->params.hibernation = val;
+}
+
+static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
+{
+ int i;
+ int num;
+ char *property = "g-tx-fifo-size";
+ struct dwc2_core_params *p = &hsotg->params;
+
+ memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size));
+
+ /* Read tx fifo sizes */
+ num = device_property_read_u32_array(hsotg->dev, property, NULL, 0);
+
+ if (num > 0) {
+ device_property_read_u32_array(hsotg->dev, property,
+ &p->g_tx_fifo_size[1],
+ num);
+ } else {
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
+
+ memcpy(&p->g_tx_fifo_size[1],
+ p_tx_fifo,
+ sizeof(p_tx_fifo));
+
+ num = ARRAY_SIZE(p_tx_fifo);
+ }
+
+ for (i = 0; i < num; i++) {
+ if ((i + 1) >= ARRAY_SIZE(p->g_tx_fifo_size))
+ break;
+
+ dev_dbg(hsotg->dev, "Setting %s[%d] to %d\n",
+ property, i + 1, p->g_tx_fifo_size[i + 1]);
+ }
+}
+
+static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ /* Buffer DMA */
+ dwc2_set_param_bool(hsotg, &p->g_dma,
+ false, "gadget-dma",
+ true, false,
+ dma_capable);
+
+ /* DMA Descriptor */
+ dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
+ "gadget-dma-desc",
+ p->g_dma, false,
+ !!hw->dma_desc_enable);
+}
+
+/**
+ * dwc2_set_parameters() - Set all core parameters.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @params: The parameters to set
+ */
+static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+ const struct dwc2_core_params *params)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ struct dwc2_core_params *p = &hsotg->params;
+ bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+ dwc2_set_param_otg_cap(hsotg, params->otg_cap);
+ if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ dev_dbg(hsotg->dev, "Setting HOST parameters\n");
+
+ dwc2_set_param_bool(hsotg, &p->host_dma,
+ false, "host-dma",
+ true, false,
+ dma_capable);
+ }
+ dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
+ dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
+
+ dwc2_set_param_host_support_fs_ls_low_power(hsotg,
+ params->host_support_fs_ls_low_power);
+ dwc2_set_param_enable_dynamic_fifo(hsotg,
+ params->enable_dynamic_fifo);
+ dwc2_set_param_host_rx_fifo_size(hsotg,
+ params->host_rx_fifo_size);
+ dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+ params->host_nperio_tx_fifo_size);
+ dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+ params->host_perio_tx_fifo_size);
+ dwc2_set_param_max_transfer_size(hsotg,
+ params->max_transfer_size);
+ dwc2_set_param_max_packet_count(hsotg,
+ params->max_packet_count);
+ dwc2_set_param_host_channels(hsotg, params->host_channels);
+ dwc2_set_param_phy_type(hsotg, params->phy_type);
+ dwc2_set_param_speed(hsotg, params->speed);
+ dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
+ params->host_ls_low_power_phy_clk);
+ dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
+ dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
+ params->phy_ulpi_ext_vbus);
+ dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
+ dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
+ dwc2_set_param_ts_dline(hsotg, params->ts_dline);
+ dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
+ dwc2_set_param_en_multiple_tx_fifo(hsotg,
+ params->en_multiple_tx_fifo);
+ dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
+ dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
+ dwc2_set_param_otg_ver(hsotg, params->otg_ver);
+ dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
+ dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
+ dwc2_set_param_hibernation(hsotg, params->hibernation);
+
+ /*
+ * Set devicetree-only parameters. These parameters do not
+ * take any values from @params.
+ */
+ if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
+ (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+ dev_dbg(hsotg->dev, "Setting peripheral device properties\n");
+
+ dwc2_set_gadget_dma(hsotg);
+
+ /*
+ * The values for g_rx_fifo_size (2048) and
+ * g_np_tx_fifo_size (1024) come from the legacy s3c
+ * gadget driver. These defaults have been hard-coded
+ * for some time so many platforms depend on these
+ * values. Leave them as defaults for now and only
+ * auto-detect if the hardware does not support the
+ * default.
+ */
+ dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+ true, "g-rx-fifo-size", 2048,
+ hw->rx_fifo_size,
+ 16, hw->rx_fifo_size);
+
+ dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+ true, "g-np-tx-fifo-size", 1024,
+ hw->dev_nperio_tx_fifo_size,
+ 16, hw->dev_nperio_tx_fifo_size);
+
+ dwc2_set_param_tx_fifo_sizes(hsotg);
+ }
+}
+
+/*
+ * Gets host hardware parameters. Forces host mode if not currently in
+ * host mode. Should be called immediately after a core soft reset in
+ * order to get the reset values.
+ */
+static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 gnptxfsiz;
+ u32 hptxfsiz;
+ bool forced;
+
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, true);
+
+ gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+ dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+ hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/*
+ * Gets device hardware parameters. Forces device mode if not
+ * currently in device mode. Should be called immediately after a core
+ * soft reset in order to get the reset values.
+ */
+static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ bool forced;
+ u32 gnptxfsiz;
+
+ if (hsotg->dr_mode == USB_DR_MODE_HOST)
+ return;
+
+ forced = dwc2_force_mode_if_needed(hsotg, false);
+
+ gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+
+ if (forced)
+ dwc2_clear_force_mode(hsotg);
+
+ hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+}
+
+/**
+ * During device initialization, read various hardware configuration
+ * registers and interpret the contents.
+ */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ unsigned int width;
+ u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+ u32 grxfsiz;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
+ * as in "OTG version 2.xx" or "OTG version 3.xx".
+ */
+ hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
+ if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
+ (hw->snpsid & 0xfffff000) != 0x4f543000 &&
+ (hw->snpsid & 0xffff0000) != 0x55310000 &&
+ (hw->snpsid & 0xffff0000) != 0x55320000) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+
+ hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
+ hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
+ hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
+ hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
+ grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
+
+ dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
+ dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
+ dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
+ dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
+ dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
+
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
+
+ /* hwcfg1 */
+ hw->dev_ep_dirs = hwcfg1;
+
+ /* hwcfg2 */
+ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+ hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
+ GHWCFG2_ARCHITECTURE_SHIFT;
+ hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
+ GHWCFG2_NUM_HOST_CHAN_SHIFT);
+ hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
+ GHWCFG2_HS_PHY_TYPE_SHIFT;
+ hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
+ GHWCFG2_FS_PHY_TYPE_SHIFT;
+ hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
+ GHWCFG2_NUM_DEV_EP_SHIFT;
+ hw->nperio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->host_perio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->dev_token_q_depth =
+ (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
+ GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
+
+ /* hwcfg3 */
+ width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_transfer_size = (1 << (width + 11)) - 1;
+ width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_packet_count = (1 << (width + 4)) - 1;
+ hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
+ hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
+ GHWCFG3_DFIFO_DEPTH_SHIFT;
+
+ /* hwcfg4 */
+ hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
+ GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
+ hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
+ hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
+
+ /* fifo sizes */
+ hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+ GRXFSIZ_DEPTH_SHIFT;
+
+ dev_dbg(hsotg->dev, "Detected values from hardware:\n");
+ dev_dbg(hsotg->dev, " op_mode=%d\n",
+ hw->op_mode);
+ dev_dbg(hsotg->dev, " arch=%d\n",
+ hw->arch);
+ dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
+ hw->dma_desc_enable);
+ dev_dbg(hsotg->dev, " power_optimized=%d\n",
+ hw->power_optimized);
+ dev_dbg(hsotg->dev, " i2c_enable=%d\n",
+ hw->i2c_enable);
+ dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
+ hw->hs_phy_type);
+ dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
+ hw->fs_phy_type);
+ dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
+ hw->utmi_phy_data_width);
+ dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
+ hw->num_dev_ep);
+ dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
+ hw->num_dev_perio_in_ep);
+ dev_dbg(hsotg->dev, " host_channels=%d\n",
+ hw->host_channels);
+ dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
+ hw->max_transfer_size);
+ dev_dbg(hsotg->dev, " max_packet_count=%d\n",
+ hw->max_packet_count);
+ dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
+ hw->nperio_tx_q_depth);
+ dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
+ hw->host_perio_tx_q_depth);
+ dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
+ hw->dev_token_q_depth);
+ dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
+ hw->enable_dynamic_fifo);
+ dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
+ hw->en_multiple_tx_fifo);
+ dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
+ hw->total_fifo_size);
+ dev_dbg(hsotg->dev, " rx_fifo_size=%d\n",
+ hw->rx_fifo_size);
+ dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
+ hw->host_nperio_tx_fifo_size);
+ dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
+ hw->host_perio_tx_fifo_size);
+ dev_dbg(hsotg->dev, "\n");
+
+ return 0;
+}
+
+int dwc2_init_params(struct dwc2_hsotg *hsotg)
+{
+ const struct of_device_id *match;
+ struct dwc2_core_params params;
+
+ match = of_match_device(dwc2_of_match_table, hsotg->dev);
+ if (match && match->data)
+ params = *((struct dwc2_core_params *)match->data);
+ else
+ params = params_default;
+
+ if (dwc2_is_fs_iot(hsotg)) {
+ params.speed = DWC2_SPEED_PARAM_FULL;
+ params.phy_type = DWC2_PHY_TYPE_PARAM_FS;
+ }
+
+ dwc2_set_parameters(hsotg, &params);
+
+ return 0;
+}
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index ae419615a176..a23329e3d7cd 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -62,6 +62,20 @@ struct dwc2_pci_glue {
struct platform_device *phy;
};
+static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
+ pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) {
+ struct property_entry properties[] = {
+ { },
+ };
+
+ return platform_device_add_properties(dwc2, properties);
+ }
+
+ return 0;
+}
+
static void dwc2_pci_remove(struct pci_dev *pci)
{
struct dwc2_pci_glue *glue = pci_get_drvdata(pci);
@@ -122,6 +136,10 @@ static int dwc2_pci_probe(struct pci_dev *pci,
return PTR_ERR(phy);
}
+ ret = dwc2_pci_quirks(pci, dwc2);
+ if (ret)
+ goto err;
+
ret = platform_device_add(dwc2);
if (ret) {
dev_err(dev, "failed to register dwc2 device\n");
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 8e1728b39a49..4fc8c603afb8 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -55,165 +55,6 @@
static const char dwc2_driver_name[] = "dwc2";
-static const struct dwc2_core_params params_hi6220 = {
- .otg_cap = 2, /* No HNP/SRP capable */
- .otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = 0, /* High Speed */
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = 1,
- .host_rx_fifo_size = 512,
- .host_nperio_tx_fifo_size = 512,
- .host_perio_tx_fifo_size = 512,
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = 16,
- .phy_type = 1, /* UTMI */
- .phy_utmi_width = 8,
- .phy_ulpi_ddr = 0, /* Single */
- .phy_ulpi_ext_vbus = 0,
- .i2c_enable = 0,
- .ulpi_fs_ls = 0,
- .host_support_fs_ls_low_power = 0,
- .host_ls_low_power_phy_clk = 0, /* 48 MHz */
- .ts_dline = 0,
- .reload_ctl = 0,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_bcm2835 = {
- .otg_cap = 0, /* HNP/SRP capable */
- .otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = 0, /* High Speed */
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = 1,
- .host_rx_fifo_size = 774, /* 774 DWORDs */
- .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */
- .host_perio_tx_fifo_size = 512, /* 512 DWORDs */
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = 8,
- .phy_type = 1, /* UTMI */
- .phy_utmi_width = 8, /* 8 bits */
- .phy_ulpi_ddr = 0, /* Single */
- .phy_ulpi_ext_vbus = 0,
- .i2c_enable = 0,
- .ulpi_fs_ls = 0,
- .host_support_fs_ls_low_power = 0,
- .host_ls_low_power_phy_clk = 0, /* 48 MHz */
- .ts_dline = 0,
- .reload_ctl = 0,
- .ahbcfg = 0x10,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_rk3066 = {
- .otg_cap = 2, /* non-HNP/non-SRP */
- .otg_ver = -1,
- .dma_enable = -1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = -1,
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 525, /* 525 DWORDs */
- .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
- .host_perio_tx_fifo_size = 256, /* 256 DWORDs */
- .max_transfer_size = -1,
- .max_packet_count = -1,
- .host_channels = -1,
- .phy_type = -1,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = -1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = -1,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_ltq = {
- .otg_cap = 2, /* non-HNP/non-SRP */
- .otg_ver = -1,
- .dma_enable = -1,
- .dma_desc_enable = -1,
- .dma_desc_fs_enable = -1,
- .speed = -1,
- .enable_dynamic_fifo = -1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 288, /* 288 DWORDs */
- .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
- .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
- .max_transfer_size = 65535,
- .max_packet_count = 511,
- .host_channels = -1,
- .phy_type = -1,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = -1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = -1,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
-static const struct dwc2_core_params params_amlogic = {
- .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
- .otg_ver = -1,
- .dma_enable = 1,
- .dma_desc_enable = 0,
- .dma_desc_fs_enable = 0,
- .speed = DWC2_SPEED_PARAM_HIGH,
- .enable_dynamic_fifo = 1,
- .en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 512,
- .host_nperio_tx_fifo_size = 500,
- .host_perio_tx_fifo_size = 500,
- .max_transfer_size = -1,
- .max_packet_count = -1,
- .host_channels = 16,
- .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
- .phy_utmi_width = -1,
- .phy_ulpi_ddr = -1,
- .phy_ulpi_ext_vbus = -1,
- .i2c_enable = -1,
- .ulpi_fs_ls = -1,
- .host_support_fs_ls_low_power = -1,
- .host_ls_low_power_phy_clk = -1,
- .ts_dline = -1,
- .reload_ctl = 1,
- .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
- GAHBCFG_HBSTLEN_SHIFT,
- .uframe_sched = 0,
- .external_id_pin_ctl = -1,
- .hibernation = -1,
-};
-
/*
* Check the dr_mode against the module configuration and hardware
* capabilities.
@@ -510,20 +351,6 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
disable_irq(hsotg->irq);
}
-static const struct of_device_id dwc2_of_match_table[] = {
- { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
- { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
- { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
- { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
- { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
- { .compatible = "snps,dwc2", .data = NULL },
- { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
- { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
- { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
- {},
-};
-MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
-
/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
@@ -538,30 +365,10 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
*/
static int dwc2_driver_probe(struct platform_device *dev)
{
- const struct of_device_id *match;
- const struct dwc2_core_params *params;
- struct dwc2_core_params defparams;
struct dwc2_hsotg *hsotg;
struct resource *res;
int retval;
- match = of_match_device(dwc2_of_match_table, &dev->dev);
- if (match && match->data) {
- params = match->data;
- } else {
- /* Default all params to autodetect */
- dwc2_set_all_params(&defparams, -1);
- params = &defparams;
-
- /*
- * Disable descriptor dma mode by default as the HW can support
- * it, but does not support it for SPLIT transactions.
- * Disable it for FS devices as well.
- */
- defparams.dma_desc_enable = 0;
- defparams.dma_desc_fs_enable = 0;
- }
-
hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
return -ENOMEM;
@@ -591,13 +398,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
spin_lock_init(&hsotg->lock);
- hsotg->core_params = devm_kzalloc(&dev->dev,
- sizeof(*hsotg->core_params), GFP_KERNEL);
- if (!hsotg->core_params)
- return -ENOMEM;
-
- dwc2_set_all_params(hsotg->core_params, -1);
-
hsotg->irq = platform_get_irq(dev, 0);
if (hsotg->irq < 0) {
dev_err(&dev->dev, "missing IRQ resource\n");
@@ -631,11 +431,12 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
goto error;
- /* Validate parameter values */
- dwc2_set_parameters(hsotg, params);
-
dwc2_force_dr_mode(hsotg);
+ retval = dwc2_init_params(hsotg);
+ if (retval)
+ goto error;
+
if (hsotg->dr_mode != USB_DR_MODE_HOST) {
retval = dwc2_gadget_init(hsotg, hsotg->irq);
if (retval)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b97cde76914d..c5aa235863e8 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -62,7 +62,7 @@ config USB_DWC3_OMAP
config USB_DWC3_EXYNOS
tristate "Samsung Exynos Platform"
- depends on ARCH_EXYNOS && OF || COMPILE_TEST
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && OF
default USB_DWC3
help
Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
@@ -70,7 +70,7 @@ config USB_DWC3_EXYNOS
config USB_DWC3_PCI
tristate "PCIe-based Platforms"
- depends on PCI
+ depends on PCI && ACPI
default USB_DWC3
help
If you're using the DesignWare Core IP with a PCIe, please say
@@ -98,7 +98,7 @@ config USB_DWC3_OF_SIMPLE
config USB_DWC3_ST
tristate "STMicroelectronics Platforms"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
default USB_DWC3
help
STMicroelectronics SoCs with one DesignWare Core USB3 IP
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 22420e17d68b..ffca34029b21 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -3,7 +3,11 @@ CFLAGS_trace.o := -I$(src)
obj-$(CONFIG_USB_DWC3) += dwc3.o
-dwc3-y := core.o debug.o trace.o
+dwc3-y := core.o
+
+ifneq ($(CONFIG_FTRACE),)
+ dwc3-y += trace.o
+endif
ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
dwc3-y += host.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fea446900cad..369bab16a824 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -169,33 +169,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
return -ETIMEDOUT;
}
-/**
- * dwc3_soft_reset - Issue soft reset
- * @dwc: Pointer to our controller context structure
- */
-static int dwc3_soft_reset(struct dwc3 *dwc)
-{
- unsigned long timeout;
- u32 reg;
-
- timeout = jiffies + msecs_to_jiffies(500);
- dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
- do {
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (!(reg & DWC3_DCTL_CSFTRST))
- break;
-
- if (time_after(jiffies, timeout)) {
- dev_err(dwc->dev, "Reset Timed Out\n");
- return -ETIMEDOUT;
- }
-
- cpu_relax();
- } while (true);
-
- return 0;
-}
-
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
@@ -229,7 +202,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
struct dwc3_event_buffer *evt)
{
- dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+ dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
}
/**
@@ -251,7 +224,11 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
evt->dwc = dwc;
evt->length = length;
- evt->buf = dma_alloc_coherent(dwc->dev, length,
+ evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
+ if (!evt->cache)
+ return ERR_PTR(-ENOMEM);
+
+ evt->buf = dma_alloc_coherent(dwc->sysdev, length,
&evt->dma, GFP_KERNEL);
if (!evt->buf)
return ERR_PTR(-ENOMEM);
@@ -305,13 +282,7 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
- dwc3_trace(trace_dwc3_core,
- "Event buf %p dma %08llx length %d\n",
- evt->buf, (unsigned long long) evt->dma,
- evt->length);
-
evt->lpos = 0;
-
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
lower_32_bits(evt->dma));
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
@@ -370,11 +341,11 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
if (!WARN_ON(dwc->scratchbuf))
return 0;
- scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+ scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dwc->dev, scratch_addr)) {
- dev_err(dwc->dev, "failed to map scratch buffer\n");
+ if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
+ dev_err(dwc->sysdev, "failed to map scratch buffer\n");
ret = -EFAULT;
goto err0;
}
@@ -398,7 +369,7 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
return 0;
err1:
- dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
err0:
@@ -417,7 +388,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
if (!WARN_ON(dwc->scratchbuf))
return;
- dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
kfree(dwc->scratchbuf);
}
@@ -428,9 +399,6 @@ static void dwc3_core_num_eps(struct dwc3 *dwc)
dwc->num_in_eps = DWC3_NUM_IN_EPS(parms);
dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps;
-
- dwc3_trace(trace_dwc3_core, "found %d IN and %d OUT endpoints",
- dwc->num_in_eps, dwc->num_out_eps);
}
static void dwc3_cache_hwparams(struct dwc3 *dwc)
@@ -524,13 +492,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
}
/* FALLTHROUGH */
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
- /* Making sure the interface and PHY are operational */
- ret = dwc3_soft_reset(dwc);
- if (ret)
- return ret;
-
- udelay(1);
-
ret = dwc3_ulpi_init(dwc);
if (ret)
return ret;
@@ -594,19 +555,12 @@ static void dwc3_core_exit(struct dwc3 *dwc)
phy_power_off(dwc->usb3_generic_phy);
}
-/**
- * dwc3_core_init - Low-level initialization of DWC3 Core
- * @dwc: Pointer to our controller context structure
- *
- * Returns 0 on success otherwise negative errno.
- */
-static int dwc3_core_init(struct dwc3 *dwc)
+static bool dwc3_core_is_valid(struct dwc3 *dwc)
{
- u32 hwparams4 = dwc->hwparams.hwparams4;
- u32 reg;
- int ret;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+
/* This should read as U3 followed by revision number */
if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
/* Detected DWC_usb3 IP */
@@ -616,36 +570,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
dwc->revision |= DWC3_REVISION_IS_DWC31;
} else {
- dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
- ret = -ENODEV;
- goto err0;
+ return false;
}
- /*
- * Write Linux Version Code to our GUID register so it's easy to figure
- * out which kernel version a bug was found.
- */
- dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
-
- /* Handle USB2.0-only core configuration */
- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
- if (dwc->maximum_speed == USB_SPEED_SUPER)
- dwc->maximum_speed = USB_SPEED_HIGH;
- }
-
- /* issue device SoftReset too */
- ret = dwc3_soft_reset(dwc);
- if (ret)
- goto err0;
-
- ret = dwc3_core_soft_reset(dwc);
- if (ret)
- goto err0;
+ return true;
+}
- ret = dwc3_phy_setup(dwc);
- if (ret)
- goto err0;
+static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+{
+ u32 hwparams4 = dwc->hwparams.hwparams4;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -683,13 +617,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
reg |= DWC3_GCTL_GBLHIBERNATIONEN;
break;
default:
- dwc3_trace(trace_dwc3_core, "No power optimization available\n");
+ /* nothing */
+ break;
}
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
- dwc3_trace(trace_dwc3_core,
- "running on FPGA platform\n");
+ dev_info(dwc->dev, "Running with FPGA optmizations\n");
dwc->is_fpga = true;
}
@@ -714,7 +648,47 @@ static int dwc3_core_init(struct dwc3 *dwc)
reg |= DWC3_GCTL_U2RSTECN;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+/**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_core_init(struct dwc3 *dwc)
+{
+ u32 reg;
+ int ret;
+
+ if (!dwc3_core_is_valid(dwc)) {
+ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+
+ /*
+ * Write Linux Version Code to our GUID register so it's easy to figure
+ * out which kernel version a bug was found.
+ */
+ dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+
+ /* Handle USB2.0-only core configuration */
+ if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+ DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
+ if (dwc->maximum_speed == USB_SPEED_SUPER)
+ dwc->maximum_speed = USB_SPEED_HIGH;
+ }
+
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+ goto err0;
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ goto err0;
+
+ dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
ret = dwc3_setup_scratch_buffers(dwc);
@@ -766,6 +740,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /*
+ * Enable hardware control of sending remote wakeup in HS when
+ * the device is in the L1 state.
+ */
+ if (dwc->revision >= DWC3_REVISION_290A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
return 0;
err4:
@@ -919,57 +903,13 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
}
}
-#define DWC3_ALIGN_MASK (16 - 1)
-
-static int dwc3_probe(struct platform_device *pdev)
+static void dwc3_get_properties(struct dwc3 *dwc)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct dwc3 *dwc;
+ struct device *dev = dwc->dev;
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- int ret;
-
- void __iomem *regs;
- void *mem;
-
- mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
- dwc->mem = mem;
- dwc->dev = dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory resource\n");
- return -ENODEV;
- }
-
- dwc->xhci_resources[0].start = res->start;
- dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
- DWC3_XHCI_REGS_END;
- dwc->xhci_resources[0].flags = res->flags;
- dwc->xhci_resources[0].name = res->name;
-
- res->start += DWC3_GLOBALS_REGS_START;
-
- /*
- * Request memory region but exclude xHCI regs,
- * since it will be requested by the xhci-plat driver.
- */
- regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- goto err0;
- }
-
- dwc->regs = regs;
- dwc->regs_size = resource_size(res);
-
/* default to highest possible threshold */
lpm_nyet_threshold = 0xff;
@@ -986,6 +926,13 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
+ dwc->sysdev_is_parent = device_property_read_bool(dev,
+ "linux,sysdev_is_parent");
+ if (dwc->sysdev_is_parent)
+ dwc->sysdev = dwc->dev->parent;
+ else
+ dwc->sysdev = dwc->dev;
+
dwc->has_lpm_erratum = device_property_read_bool(dev,
"snps,has-lpm-erratum");
device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -1041,6 +988,112 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->hird_threshold = hird_threshold
| (dwc->is_utmi_l1_suspend << 4);
+ dwc->imod_interval = 0;
+}
+
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+ return ((dwc3_is_usb3(dwc) &&
+ dwc->revision >= DWC3_REVISION_300A) ||
+ (dwc3_is_usb31(dwc) &&
+ dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
+static void dwc3_check_params(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+
+ /* Check for proper value of imod_interval */
+ if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
+ dev_warn(dwc->dev, "Interrupt moderation not supported\n");
+ dwc->imod_interval = 0;
+ }
+
+ /*
+ * Workaround for STAR 9000961433 which affects only version
+ * 3.00a of the DWC_usb3 core. This prevents the controller
+ * interrupt from being masked while handling events. IMOD
+ * allows us to work around this issue. Enable it for the
+ * affected version.
+ */
+ if (!dwc->imod_interval &&
+ (dwc->revision == DWC3_REVISION_300A))
+ dwc->imod_interval = 1;
+
+ /* Check the maximum_speed parameter */
+ switch (dwc->maximum_speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ break;
+ default:
+ dev_err(dev, "invalid maximum_speed parameter %d\n",
+ dwc->maximum_speed);
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default to superspeed */
+ dwc->maximum_speed = USB_SPEED_SUPER;
+
+ /*
+ * default to superspeed plus if we are capable.
+ */
+ if (dwc3_is_usb31(dwc) &&
+ (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+ DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+
+ break;
+ }
+}
+
+static int dwc3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct dwc3 *dwc;
+
+ int ret;
+
+ void __iomem *regs;
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing memory resource\n");
+ return -ENODEV;
+ }
+
+ dwc->xhci_resources[0].start = res->start;
+ dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
+ DWC3_XHCI_REGS_END;
+ dwc->xhci_resources[0].flags = res->flags;
+ dwc->xhci_resources[0].name = res->name;
+
+ res->start += DWC3_GLOBALS_REGS_START;
+
+ /*
+ * Request memory region but exclude xHCI regs,
+ * since it will be requested by the xhci-plat driver.
+ */
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto err0;
+ }
+
+ dwc->regs = regs;
+ dwc->regs_size = resource_size(res);
+
+ dwc3_get_properties(dwc);
+
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
@@ -1050,12 +1103,6 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
- if (!dev->dma_mask) {
- dev->dma_mask = dev->parent->dma_mask;
- dev->dma_parms = dev->parent->dma_parms;
- dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
- }
-
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
@@ -1087,32 +1134,7 @@ static int dwc3_probe(struct platform_device *pdev)
goto err4;
}
- /* Check the maximum_speed parameter */
- switch (dwc->maximum_speed) {
- case USB_SPEED_LOW:
- case USB_SPEED_FULL:
- case USB_SPEED_HIGH:
- case USB_SPEED_SUPER:
- case USB_SPEED_SUPER_PLUS:
- break;
- default:
- dev_err(dev, "invalid maximum_speed parameter %d\n",
- dwc->maximum_speed);
- /* fall through */
- case USB_SPEED_UNKNOWN:
- /* default to superspeed */
- dwc->maximum_speed = USB_SPEED_SUPER;
-
- /*
- * default to superspeed plus if we are capable.
- */
- if (dwc3_is_usb31(dwc) &&
- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
- dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
-
- break;
- }
+ dwc3_check_params(dwc);
ret = dwc3_core_init_mode(dwc);
if (ret)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 6b60e42626a2..de5a8570be04 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -26,6 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
+#include <linux/wait.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -37,6 +38,7 @@
#define DWC3_MSG_MAX 500
/* Global constants */
+#define DWC3_PULL_UP_TIMEOUT 500 /* ms */
#define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */
#define DWC3_EP0_BOUNCE_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
@@ -65,6 +67,7 @@
#define DWC3_DEVICE_EVENT_OVERFLOW 11
#define DWC3_GEVNTCOUNT_MASK 0xfffc
+#define DWC3_GEVNTCOUNT_EHB (1 << 31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
@@ -147,6 +150,8 @@
#define DWC3_DEPCMDPAR0 0x08
#define DWC3_DEPCMD 0x0c
+#define DWC3_DEV_IMOD(n) (0xca00 + (n * 0x4))
+
/* OTG Registers */
#define DWC3_OCFG 0xcc00
#define DWC3_OCTL 0xcc04
@@ -198,6 +203,9 @@
#define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1)
#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
+/* Global User Control 1 Register */
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW (1 << 24)
+
/* Global USB2 PHY Configuration Register */
#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30)
@@ -450,6 +458,8 @@
#define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0)
#define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0)
+#define DWC3_DEPCMD_CMD(x) ((x) & 0xf)
+
/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
#define DWC3_DALEPENA_EP(n) (1 << n)
@@ -458,6 +468,11 @@
#define DWC3_DEPCMD_TYPE_BULK 2
#define DWC3_DEPCMD_TYPE_INTR 3
+#define DWC3_DEV_IMOD_COUNT_SHIFT 16
+#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
+#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
+
/* Structures */
struct dwc3_trb;
@@ -465,6 +480,7 @@ struct dwc3_trb;
/**
* struct dwc3_event_buffer - Software event buffer representation
* @buf: _THE_ buffer
+ * @cache: The buffer cache used in the threaded interrupt
* @length: size of this buffer
* @lpos: event offset
* @count: cache of last read event count register
@@ -474,6 +490,7 @@ struct dwc3_trb;
*/
struct dwc3_event_buffer {
void *buf;
+ void *cache;
unsigned length;
unsigned int lpos;
unsigned int count;
@@ -499,6 +516,7 @@ struct dwc3_event_buffer {
* @endpoint: usb endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
+ * @wait_end_transfer: wait_queue_head_t for waiting on End Transfer complete
* @lock: spinlock for endpoint request queue traversal
* @regs: pointer to first endpoint register
* @trb_pool: array of transaction buffers
@@ -524,12 +542,13 @@ struct dwc3_ep {
struct list_head pending_list;
struct list_head started_list;
+ wait_queue_head_t wait_end_transfer;
+
spinlock_t lock;
void __iomem *regs;
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
- const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
u32 saved_state;
@@ -540,6 +559,8 @@ struct dwc3_ep {
#define DWC3_EP_BUSY (1 << 4)
#define DWC3_EP_PENDING_REQUEST (1 << 5)
#define DWC3_EP_MISSED_ISOC (1 << 6)
+#define DWC3_EP_END_TRANSFER_PENDING (1 << 7)
+#define DWC3_EP_TRANSFER_STARTED (1 << 8)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN (1 << 31)
@@ -703,7 +724,7 @@ struct dwc3_hwparams {
* @dep: struct dwc3_ep owning this request
* @sg: pointer to first incomplete sg
* @num_pending_sgs: counter to pending sgs
- * @first_trb_index: index to first trb used by this request
+ * @remaining: amount of data remaining
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
* @trb_dma: DMA address of @trb
@@ -718,7 +739,7 @@ struct dwc3_request {
struct scatterlist *sg;
unsigned num_pending_sgs;
- u8 first_trb_index;
+ unsigned remaining;
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
@@ -748,6 +769,7 @@ struct dwc3_scratchpad_array {
* @ep0_usb_req: dummy req used while handling STD USB requests
* @ep0_bounce_addr: dma address of ep0_bounce
* @scratch_addr: dma address of scratchbuf
+ * @ep0_in_setup: one control transfer is completed and enter setup phase
* @lock: for synchronizing
* @dev: pointer to our struct device
* @xhci: pointer to our xHCI child
@@ -784,7 +806,6 @@ struct dwc3_scratchpad_array {
* @ep0state: state of endpoint zero
* @link_state: link state
* @speed: device speed (super, high, full, low)
- * @mem: points to start of memory which is used for this struct.
* @hwparams: copy of hwparams registers
* @root: debugfs root folder pointer
* @regset: debugfs pointer to regdump file
@@ -798,6 +819,7 @@ struct dwc3_scratchpad_array {
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
* @has_hibernation: true when dwc3 was configured with Hibernation
+ * @sysdev_is_parent: true when dwc3 device has a parent driver
* @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
* there's now way for software to detect this in runtime.
* @is_utmi_l1_suspend: the core asserts output signal
@@ -833,6 +855,8 @@ struct dwc3_scratchpad_array {
* 1 - -3.5dB de-emphasis
* 2 - No de-emphasis
* 3 - Reserved
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
@@ -846,11 +870,13 @@ struct dwc3 {
dma_addr_t ep0_bounce_addr;
dma_addr_t scratch_addr;
struct dwc3_request ep0_usb_req;
+ struct completion ep0_in_setup;
/* device lock */
spinlock_t lock;
struct device *dev;
+ struct device *sysdev;
struct platform_device *xhci;
struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM];
@@ -909,6 +935,7 @@ struct dwc3 {
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_290A 0x5533290a
#define DWC3_REVISION_300A 0x5533300a
#define DWC3_REVISION_310A 0x5533310a
@@ -918,6 +945,7 @@ struct dwc3 {
*/
#define DWC3_REVISION_IS_DWC31 0x80000000
#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31)
+#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -934,8 +962,6 @@ struct dwc3 {
u8 num_out_eps;
u8 num_in_eps;
- void *mem;
-
struct dwc3_hwparams hwparams;
struct dentry *root;
struct debugfs_regset32 *regset;
@@ -952,6 +978,7 @@ struct dwc3 {
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned has_hibernation:1;
+ unsigned sysdev_is_parent:1;
unsigned has_lpm_erratum:1;
unsigned is_utmi_l1_suspend:1;
unsigned is_fpga:1;
@@ -978,6 +1005,8 @@ struct dwc3 {
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+
+ u16 imod_interval;
};
/* -------------------------------------------------------------------------- */
@@ -1039,12 +1068,16 @@ struct dwc3_event_depevt {
/* Control-only Status */
#define DEPEVT_STATUS_CONTROL_DATA 1
#define DEPEVT_STATUS_CONTROL_STATUS 2
+#define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3)
/* In response to Start Transfer */
#define DEPEVT_TRANSFER_NO_RESOURCE 1
#define DEPEVT_TRANSFER_BUS_EXPIRY 2
u32 parameters:16;
+
+/* For Command Complete Events */
+#define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8)
} __packed;
/**
@@ -1133,12 +1166,20 @@ struct dwc3_gadget_ep_cmd_params {
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+ return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
/* check whether we are on the DWC_usb31 core */
static inline bool dwc3_is_usb31(struct dwc3 *dwc)
{
return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
}
+bool dwc3_has_imod(struct dwc3 *dwc);
+
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/debug.c b/drivers/usb/dwc3/debug.c
deleted file mode 100644
index 0be6885bc370..000000000000
--- a/drivers/usb/dwc3/debug.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * debug.c - DesignWare USB3 DRD Controller Debug/Trace Support
- *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
- *
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "debug.h"
-
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
-
- trace(&vaf);
-
- va_end(args);
-}
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 33ab2a203c1b..eeed4ffd8131 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -124,6 +124,22 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
}
}
+static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
+{
+ switch (state) {
+ case EP0_UNCONNECTED:
+ return "Unconnected";
+ case EP0_SETUP_PHASE:
+ return "Setup Phase";
+ case EP0_DATA_PHASE:
+ return "Data Phase";
+ case EP0_STATUS_PHASE:
+ return "Status Phase";
+ default:
+ return "UNKNOWN";
+ }
+}
+
/**
* dwc3_gadget_event_string - returns event name
* @event: the event code
@@ -184,10 +200,11 @@ dwc3_gadget_event_string(const struct dwc3_event_devt *event)
* @event: then event code
*/
static inline const char *
-dwc3_ep_event_string(const struct dwc3_event_depevt *event)
+dwc3_ep_event_string(const struct dwc3_event_depevt *event, u32 ep0state)
{
u8 epnum = event->endpoint_number;
static char str[256];
+ size_t len;
int status;
int ret;
@@ -199,6 +216,10 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
strcat(str, "Transfer Complete");
+ len = strlen(str);
+
+ if (epnum <= 1)
+ sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state));
break;
case DWC3_DEPEVT_XFERINPROGRESS:
strcat(str, "Transfer In-Progress");
@@ -207,6 +228,19 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
strcat(str, "Transfer Not Ready");
status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
strcat(str, status ? " (Active)" : " (Not Active)");
+
+ /* Control Endpoints */
+ if (epnum <= 1) {
+ int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
+
+ switch (phase) {
+ case DEPEVT_STATUS_CONTROL_DATA:
+ strcat(str, " [Data Phase]");
+ break;
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ strcat(str, " [Status Phase]");
+ }
+ }
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
strcat(str, "FIFO");
@@ -270,14 +304,14 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
}
}
-static inline const char *dwc3_decode_event(u32 event)
+static inline const char *dwc3_decode_event(u32 event, u32 ep0state)
{
const union dwc3_event evt = (union dwc3_event) event;
if (evt.type.is_devspec)
return dwc3_gadget_event_string(&evt.devt);
else
- return dwc3_ep_event_string(&evt.depevt);
+ return dwc3_ep_event_string(&evt.depevt, ep0state);
}
static inline const char *dwc3_ep_cmd_status_string(int status)
@@ -310,7 +344,6 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
}
}
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
extern void dwc3_debugfs_init(struct dwc3 *);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f1fb7e7aa54..e27899bb5706 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
@@ -117,15 +116,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
if (!exynos)
return -ENOMEM;
- /*
- * Right now device-tree probed devices don't get dma_mask set.
- * Since shared usb code relies on it, set it here for now.
- * Once we move to full device tree support this will vanish off.
- */
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, exynos);
exynos->dev = dev;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6df0f5dad9a4..2b73339f286b 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -39,6 +39,27 @@
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
+#define PCI_INTEL_BXT_STATE_D0 0
+#define PCI_INTEL_BXT_STATE_D3 3
+
+/**
+ * struct dwc3_pci - Driver private structure
+ * @dwc3: child dwc3 platform_device
+ * @pci: our link to PCI bus
+ * @uuid: _DSM UUID
+ * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
+ */
+struct dwc3_pci {
+ struct platform_device *dwc3;
+ struct pci_dev *pci;
+
+ u8 uuid[16];
+
+ unsigned int has_dsm_for_pm:1;
+};
+
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -48,8 +69,21 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
{ },
};
-static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
+static int dwc3_pci_quirks(struct dwc3_pci *dwc)
{
+ struct platform_device *dwc3 = dwc->dwc3;
+ struct pci_dev *pdev = dwc->pci;
+ int ret;
+
+ struct property_entry sysdev_property[] = {
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ { },
+ };
+
+ ret = platform_device_add_properties(dwc3, sysdev_property);
+ if (ret)
+ return ret;
+
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
struct property_entry properties[] = {
@@ -89,6 +123,12 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
if (ret < 0)
return ret;
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
+ acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
+ dwc->has_dsm_for_pm = true;
+ }
+
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
struct gpio_desc *gpio;
@@ -139,8 +179,8 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
static int dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
+ struct dwc3_pci *dwc;
struct resource res[2];
- struct platform_device *dwc3;
int ret;
struct device *dev = &pci->dev;
@@ -152,11 +192,13 @@ static int dwc3_pci_probe(struct pci_dev *pci,
pci_set_master(pci);
- dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
- if (!dwc3) {
- dev_err(dev, "couldn't allocate dwc3 device\n");
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+ if (!dwc->dwc3)
return -ENOMEM;
- }
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
@@ -169,20 +211,21 @@ static int dwc3_pci_probe(struct pci_dev *pci,
res[1].name = "dwc_usb3";
res[1].flags = IORESOURCE_IRQ;
- ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
+ ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
return ret;
}
- dwc3->dev.parent = dev;
- ACPI_COMPANION_SET(&dwc3->dev, ACPI_COMPANION(dev));
+ dwc->pci = pci;
+ dwc->dwc3->dev.parent = dev;
+ ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
- ret = dwc3_pci_quirks(pci, dwc3);
+ ret = dwc3_pci_quirks(dwc);
if (ret)
goto err;
- ret = platform_device_add(dwc3);
+ ret = platform_device_add(dwc->dwc3);
if (ret) {
dev_err(dev, "failed to register dwc3 device\n");
goto err;
@@ -190,21 +233,23 @@ static int dwc3_pci_probe(struct pci_dev *pci,
device_init_wakeup(dev, true);
device_set_run_wake(dev, true);
- pci_set_drvdata(pci, dwc3);
+ pci_set_drvdata(pci, dwc);
pm_runtime_put(dev);
return 0;
err:
- platform_device_put(dwc3);
+ platform_device_put(dwc->dwc3);
return ret;
}
static void dwc3_pci_remove(struct pci_dev *pci)
{
+ struct dwc3_pci *dwc = pci_get_drvdata(pci);
+
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
- platform_device_unregister(pci_get_drvdata(pci));
+ platform_device_unregister(dwc->dwc3);
}
static const struct pci_device_id dwc3_pci_id_table[] = {
@@ -234,40 +279,75 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
+static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
+{
+ union acpi_object *obj;
+ union acpi_object tmp;
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ if (!dwc->has_dsm_for_pm)
+ return 0;
+
+ tmp.type = ACPI_TYPE_INTEGER;
+ tmp.integer.value = param;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
+ 1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
+ if (!obj) {
+ dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+#endif /* CONFIG_PM || CONFIG_PM_SLEEP */
+
#ifdef CONFIG_PM
static int dwc3_pci_runtime_suspend(struct device *dev)
{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
if (device_run_wake(dev))
- return 0;
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
return -EBUSY;
}
static int dwc3_pci_runtime_resume(struct device *dev)
{
- struct platform_device *dwc3 = dev_get_drvdata(dev);
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+ struct platform_device *dwc3 = dwc->dwc3;
+ int ret;
+
+ ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
+ if (ret)
+ return ret;
return pm_runtime_get(&dwc3->dev);
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
-static int dwc3_pci_pm_dummy(struct device *dev)
+static int dwc3_pci_suspend(struct device *dev)
{
- /*
- * There's nothing to do here. No, seriously. Everything is either taken
- * care either by PCI subsystem or dwc3/core.c, so we have nothing
- * missing here.
- *
- * So you'd think we didn't need this at all, but PCI subsystem will
- * bail out if we don't have a valid callback :-s
- */
- return 0;
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
+}
+
+static int dwc3_pci_resume(struct device *dev)
+{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
}
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
NULL)
};
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index aaaf256f71dd..dfbf464eb88c 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -219,7 +219,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- dma_set_coherent_mask(dev, dev->coherent_dma_mask);
dwc3_data->dev = dev;
dwc3_data->regmap = regmap;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fe79d771dee4..4878d187c7d4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,22 +39,6 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req);
-static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
-{
- switch (state) {
- case EP0_UNCONNECTED:
- return "Unconnected";
- case EP0_SETUP_PHASE:
- return "Setup Phase";
- case EP0_DATA_PHASE:
- return "Data Phase";
- case EP0_STATUS_PHASE:
- return "Status Phase";
- default:
- return "UNKNOWN";
- }
-}
-
static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
u32 len, u32 type, bool chain)
{
@@ -65,10 +49,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
int ret;
dep = dwc->eps[epnum];
- if (dep->flags & DWC3_EP_BUSY) {
- dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name);
+ if (dep->flags & DWC3_EP_BUSY)
return 0;
- }
trb = &dwc->ep0_trb[dep->trb_enqueue];
@@ -99,11 +81,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
trace_dwc3_prepare_trb(dep, trb);
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
- if (ret < 0) {
- dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed",
- dep->name);
+ if (ret < 0)
return ret;
- }
dep->flags |= DWC3_EP_BUSY;
dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
@@ -163,9 +142,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
if (dwc->ep0state == EP0_STATUS_PHASE)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
- else
- dwc3_trace(trace_dwc3_ep0,
- "too early for delayed status");
return 0;
}
@@ -229,9 +205,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
- dwc3_trace(trace_dwc3_ep0,
- "trying to queue request %p to disabled %s",
- request, dep->name);
+ dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
ret = -ESHUTDOWN;
goto out;
}
@@ -242,11 +217,6 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
goto out;
}
- dwc3_trace(trace_dwc3_ep0,
- "queueing request %p to %s length %d state '%s'",
- request, dep->name, request->length,
- dwc3_ep0_state_string(dwc->ep0state));
-
ret = __dwc3_gadget_ep0_queue(dep, req);
out:
@@ -308,6 +278,8 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
{
int ret;
+ complete(&dwc->ep0_in_setup);
+
ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
WARN_ON(ret < 0);
@@ -395,126 +367,198 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
-static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU1ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU1ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU2ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU2ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
+ u32 wIndex, int set)
+{
+ if ((wIndex & 0xff) != 0)
+ return -EINVAL;
+ if (!set)
+ return -EINVAL;
+
+ switch (wIndex >> 8) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ dwc->test_mode_nr = wIndex >> 8;
+ dwc->test_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_device(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
- struct dwc3_ep *dep;
- u32 recip;
+ enum usb_device_state state;
u32 wValue;
u32 wIndex;
- u32 reg;
- int ret;
- enum usb_device_state state;
+ int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
- recip = ctrl->bRequestType & USB_RECIP_MASK;
state = dwc->gadget.state;
- switch (recip) {
- case USB_RECIP_DEVICE:
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ break;
+ /*
+ * 9.4.1 says only only for SS, in AddressState only for
+ * default control pipe
+ */
+ case USB_DEVICE_U1_ENABLE:
+ ret = dwc3_ep0_handle_u1(dwc, state, set);
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ ret = dwc3_ep0_handle_u2(dwc, state, set);
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ ret = -EINVAL;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- switch (wValue) {
- case USB_DEVICE_REMOTE_WAKEUP:
- break;
+ return ret;
+}
+
+static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ enum usb_device_state state;
+ u32 wValue;
+ u32 wIndex;
+ int ret = 0;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ state = dwc->gadget.state;
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
/*
- * 9.4.1 says only only for SS, in AddressState only for
- * default control pipe
+ * REVISIT: Ideally we would enable some low power mode here,
+ * however it's unclear what we should be doing here.
+ *
+ * For now, we're not doing anything, just making sure we return
+ * 0 so USB Command Verifier tests pass without any errors.
*/
- case USB_DEVICE_U1_ENABLE:
- if (state != USB_STATE_CONFIGURED)
- return -EINVAL;
- if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
- (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU1ENA;
- else
- reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- break;
+ return ret;
+}
- case USB_DEVICE_U2_ENABLE:
- if (state != USB_STATE_CONFIGURED)
- return -EINVAL;
- if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
- (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return -EINVAL;
+static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ struct dwc3_ep *dep;
+ enum usb_device_state state;
+ u32 wValue;
+ u32 wIndex;
+ int ret;
- reg = dwc3_readl(dwc->regs, DWC3_DCTL);
- if (set)
- reg |= DWC3_DCTL_INITU2ENA;
- else
- reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- break;
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ state = dwc->gadget.state;
- case USB_DEVICE_LTM_ENABLE:
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
return -EINVAL;
- case USB_DEVICE_TEST_MODE:
- if ((wIndex & 0xff) != 0)
- return -EINVAL;
- if (!set)
- return -EINVAL;
-
- switch (wIndex >> 8) {
- case TEST_J:
- case TEST_K:
- case TEST_SE0_NAK:
- case TEST_PACKET:
- case TEST_FORCE_EN:
- dwc->test_mode_nr = wIndex >> 8;
- dwc->test_mode = true;
- break;
- default:
- return -EINVAL;
- }
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
break;
- default:
+
+ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ if (ret)
return -EINVAL;
- }
break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ u32 recip;
+ int ret;
+ enum usb_device_state state;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+ state = dwc->gadget.state;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ ret = dwc3_ep0_handle_device(dwc, ctrl, set);
+ break;
case USB_RECIP_INTERFACE:
- switch (wValue) {
- case USB_INTRF_FUNC_SUSPEND:
- if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
- /* XXX enable Low power suspend */
- ;
- if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
- /* XXX enable remote wakeup */
- ;
- break;
- default:
- return -EINVAL;
- }
+ ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
break;
-
case USB_RECIP_ENDPOINT:
- switch (wValue) {
- case USB_ENDPOINT_HALT:
- dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
- if (!dep)
- return -EINVAL;
- if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
- break;
- ret = __dwc3_gadget_ep_set_halt(dep, set, true);
- if (ret)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
+ ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
break;
-
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -525,13 +569,12 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
addr = le16_to_cpu(ctrl->wValue);
if (addr > 127) {
- dwc3_trace(trace_dwc3_ep0, "invalid device address %d", addr);
+ dev_err(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
}
if (state == USB_STATE_CONFIGURED) {
- dwc3_trace(trace_dwc3_ep0,
- "trying to set address when configured");
+ dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
return -EINVAL;
}
@@ -716,35 +759,27 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS");
ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
break;
case USB_REQ_SET_ADDRESS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS");
ret = dwc3_ep0_set_address(dwc, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION");
ret = dwc3_ep0_set_config(dwc, ctrl);
break;
case USB_REQ_SET_SEL:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL");
ret = dwc3_ep0_set_sel(dwc, ctrl);
break;
case USB_REQ_SET_ISOCH_DELAY:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
default:
- dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
}
@@ -820,9 +855,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (status == DWC3_TRBSTS_SETUP_PENDING) {
dwc->setup_packet_pending = true;
-
- dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
-
if (r)
dwc3_gadget_giveback(ep0, r, -ECONNRESET);
@@ -912,7 +944,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
if (ret < 0) {
- dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d",
+ dev_err(dwc->dev, "invalid test #%d\n",
dwc->test_mode_nr);
dwc3_ep0_stall_and_restart(dwc);
return;
@@ -920,10 +952,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
}
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
- if (status == DWC3_TRBSTS_SETUP_PENDING) {
+ if (status == DWC3_TRBSTS_SETUP_PENDING)
dwc->setup_packet_pending = true;
- dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
- }
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
@@ -940,17 +970,14 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Setup Phase");
dwc3_ep0_inspect_setup(dwc, event);
break;
case EP0_DATA_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Data Phase");
dwc3_ep0_complete_data(dwc, event);
break;
case EP0_STATUS_PHASE:
- dwc3_trace(trace_dwc3_ep0, "Status Phase");
dwc3_ep0_complete_status(dwc, event);
break;
default:
@@ -974,12 +1001,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
u32 transfer_size = 0;
u32 maxpacket;
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->number);
- if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request");
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
return;
- }
maxpacket = dep->endpoint.maxpacket;
@@ -1002,12 +1027,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
dwc->ep0_bounce_addr, transfer_size,
DWC3_TRBCTL_CONTROL_DATA, false);
} else {
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->number);
- if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request");
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
return;
- }
ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
@@ -1065,8 +1088,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
{
switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA:
- dwc3_trace(trace_dwc3_ep0, "Control Data");
-
/*
* We already have a DATA transfer in the controller's cache,
* if we receive a XferNotReady(DATA) we will ignore it, unless
@@ -1079,8 +1100,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
if (dwc->ep0_expect_in != event->endpoint_number) {
struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
- dwc3_trace(trace_dwc3_ep0,
- "Wrong direction for Data phase");
+ dev_err(dwc->dev, "unexpected direction for Data Phase\n");
dwc3_ep0_end_control_data(dwc, dep);
dwc3_ep0_stall_and_restart(dwc);
return;
@@ -1092,13 +1112,10 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
return;
- dwc3_trace(trace_dwc3_ep0, "Control Status");
-
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->delayed_status) {
WARN_ON_ONCE(event->endpoint_number != 1);
- dwc3_trace(trace_dwc3_ep0, "Delayed Status");
return;
}
@@ -1109,10 +1126,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
- dwc3_ep_event_string(event),
- dwc3_ep0_state_string(dwc->ep0state));
-
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_ep0_xfer_complete(dwc, event);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1dfa56a5f1c5..efddaf5d11d1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -139,9 +139,6 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
udelay(5);
}
- dwc3_trace(trace_dwc3_gadget,
- "link state change request timed out");
-
return -ETIMEDOUT;
}
@@ -178,6 +175,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
req->started = false;
list_del(&req->list);
req->trb = NULL;
+ req->remaining = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -185,8 +183,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (dwc->ep0_bounced && dep->number == 0)
dwc->ep0_bounced = false;
else
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
trace_dwc3_gadget_giveback(req);
@@ -216,7 +214,7 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
ret = -EINVAL;
break;
}
- } while (timeout--);
+ } while (--timeout);
if (!timeout) {
ret = -ETIMEDOUT;
@@ -233,6 +231,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params)
{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 timeout = 500;
u32 reg;
@@ -258,7 +257,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
}
}
- if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int needs_wakeup;
needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
@@ -276,7 +275,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
- dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
+ /*
+ * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
+ * not relying on XferNotReady, we can make use of a special "No
+ * Response Update Transfer" command where we should clear both CmdAct
+ * and CmdIOC bits.
+ *
+ * With this, we don't need to wait for command completion and can
+ * straight away issue further commands to the endpoint.
+ *
+ * NOTICE: We're making an assumption that control endpoints will never
+ * make use of Update Transfer command. This is a safe assumption
+ * because we can never have more than one request at a time with
+ * Control Endpoints. If anybody changes that assumption, this chunk
+ * needs to be updated accordingly.
+ */
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
+ !usb_endpoint_xfer_isoc(desc))
+ cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
+ else
+ cmd |= DWC3_DEPCMD_CMDACT;
+
+ dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
do {
reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
@@ -318,6 +338,20 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
+ if (ret == 0) {
+ switch (DWC3_DEPCMD_CMD(cmd)) {
+ case DWC3_DEPCMD_STARTTRANSFER:
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+ break;
+ case DWC3_DEPCMD_ENDTRANSFER:
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+
if (unlikely(susphy)) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
@@ -365,7 +399,7 @@ static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
if (dep->trb_pool)
return 0;
- dep->trb_pool = dma_alloc_coherent(dwc->dev,
+ dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
&dep->trb_pool_dma, GFP_KERNEL);
if (!dep->trb_pool) {
@@ -381,7 +415,7 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
- dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
dep->trb_pool, dep->trb_pool_dma);
dep->trb_pool = NULL;
@@ -454,16 +488,19 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
}
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc,
- const struct usb_ss_ep_comp_descriptor *comp_desc,
bool modify, bool restore)
{
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
struct dwc3_gadget_ep_cmd_params params;
if (dev_WARN_ONCE(dwc->dev, modify && restore,
"Can't modify and restore\n"))
return -EINVAL;
+ comp_desc = dep->endpoint.comp_desc;
+ desc = dep->endpoint.desc;
+
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
@@ -542,24 +579,21 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
* Caller should take care of locking
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc,
- const struct usb_ss_ep_comp_descriptor *comp_desc,
bool modify, bool restore)
{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
+
u32 reg;
int ret;
- dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
-
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_start_config(dwc, dep);
if (ret)
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
- restore);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore);
if (ret)
return ret;
@@ -567,17 +601,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- dep->endpoint.desc = desc;
- dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ init_waitqueue_head(&dep->wait_end_transfer);
+
if (usb_endpoint_xfer_control(desc))
- return 0;
+ goto out;
/* Initialize the TRB ring */
dep->trb_dequeue = 0;
@@ -595,6 +630,39 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
+ /*
+ * Issue StartTransfer here with no-op TRB so we can always rely on No
+ * Response Update Transfer command.
+ */
+ if (usb_endpoint_xfer_bulk(desc)) {
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb *trb;
+ dma_addr_t trb_dma;
+ u32 cmd;
+
+ memset(&params, 0, sizeof(params));
+ trb = &dep->trb_pool[0];
+ trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+ params.param0 = upper_32_bits(trb_dma);
+ params.param1 = lower_32_bits(trb_dma);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0)
+ return ret;
+
+ dep->flags |= DWC3_EP_BUSY;
+
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
+ WARN_ON_ONCE(!dep->resource_index);
+ }
+
+
+out:
+ trace_dwc3_gadget_ep_enable(dep);
+
return 0;
}
@@ -632,7 +700,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
struct dwc3 *dwc = dep->dwc;
u32 reg;
- dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
+ trace_dwc3_gadget_ep_disable(dep);
dwc3_remove_requests(dwc, dep);
@@ -645,10 +713,14 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
dep->stream_capable = false;
- dep->endpoint.desc = NULL;
- dep->comp_desc = NULL;
dep->type = 0;
- dep->flags = 0;
+ dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
+
+ /* Clear out the ep descriptors for non-ep0 */
+ if (dep->number > 1) {
+ dep->endpoint.comp_desc = NULL;
+ dep->endpoint.desc = NULL;
+ }
return 0;
}
@@ -695,7 +767,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
return 0;
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -771,10 +843,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
unsigned length, unsigned chain, unsigned node)
{
struct dwc3_trb *trb;
-
- dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s",
- dep->name, req, (unsigned long long) dma,
- length, chain ? " chain" : "");
+ struct dwc3 *dwc = dep->dwc;
+ struct usb_gadget *gadget = &dwc->gadget;
+ enum usb_device_speed speed = gadget->speed;
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -782,7 +853,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
dwc3_gadget_move_started_request(req);
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
- req->first_trb_index = dep->trb_enqueue;
dep->queued_requests++;
}
@@ -798,10 +868,16 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
break;
case USB_ENDPOINT_XFER_ISOC:
- if (!node)
+ if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
- else
+
+ if (speed == USB_SPEED_HIGH) {
+ struct usb_ep *ep = &dep->endpoint;
+ trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+ }
+ } else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+ }
/* always enable Interrupt on Missed ISOC */
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
@@ -816,15 +892,21 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
* This is only possible with faulty memory because we
* checked it already :)
*/
- BUG();
+ dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
+ usb_endpoint_type(dep->endpoint.desc));
}
/* always enable Continue on Short Packet */
- trb->ctrl |= DWC3_TRB_CTRL_CSP;
+ if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+ if (req->request.short_not_ok)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ }
if ((!req->request.no_interrupt && !chain) ||
(dwc3_calc_trbs_left(dep) == 0))
- trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -859,6 +941,7 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{
struct dwc3_trb *tmp;
+ struct dwc3 *dwc = dep->dwc;
u8 trbs_left;
/*
@@ -870,7 +953,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
- if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+ if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO,
+ "%s No TRBS left\n", dep->name))
return 0;
return DWC3_TRB_NUM - 1;
@@ -941,6 +1025,24 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
if (!dwc3_calc_trbs_left(dep))
return;
+ /*
+ * We can get in a situation where there's a request in the started list
+ * but there weren't enough TRBs to fully kick it in the first time
+ * around, so it has been waiting for more TRBs to be freed up.
+ *
+ * In that case, we should check if we have a request with pending_sgs
+ * in the started list and prepare TRBs for that request first,
+ * otherwise we will prepare TRBs completely out of order and that will
+ * break things.
+ */
+ list_for_each_entry(req, &dep->started_list, list) {
+ if (req->num_pending_sgs > 0)
+ dwc3_prepare_one_trb_sg(dep, req);
+
+ if (!dwc3_calc_trbs_left(dep))
+ return;
+ }
+
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
if (req->num_pending_sgs > 0)
dwc3_prepare_one_trb_sg(dep, req);
@@ -956,7 +1058,6 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
- struct dwc3 *dwc = dep->dwc;
int starting;
int ret;
u32 cmd;
@@ -989,9 +1090,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
* here and stop, unmap, free and del each of the linked
* requests instead of what we do now.
*/
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
- list_del(&req->list);
+ if (req->trb)
+ memset(req->trb, 0, sizeof(struct dwc3_trb));
+ dep->queued_requests--;
+ dwc3_gadget_giveback(dep, req, ret);
return ret;
}
@@ -1005,14 +1107,21 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
return 0;
}
+static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_SOFFN(reg);
+}
+
static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
u32 uf;
if (list_empty(&dep->pending_list)) {
- dwc3_trace(trace_dwc3_gadget,
- "ISOC ep %s run out for requests",
+ dev_info(dwc->dev, "%s: ran out of requests\n",
dep->name);
dep->flags |= DWC3_EP_PENDING_REQUEST;
return;
@@ -1041,16 +1150,15 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
int ret;
if (!dep->endpoint.desc) {
- dwc3_trace(trace_dwc3_gadget,
- "trying to queue request %p to disabled %s",
- &req->request, dep->endpoint.name);
+ dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
&req->request, req->dep->name)) {
- dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
- &req->request, req->dep->name);
+ dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
+ dep->name, &req->request, req->dep->name);
return -EINVAL;
}
@@ -1063,8 +1171,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
trace_dwc3_ep_queue(req);
- ret = usb_gadget_map_request(&dwc->gadget, &req->request,
- dep->direction);
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+ dep->direction);
if (ret)
return ret;
@@ -1082,10 +1190,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* errors which will force us issue EndTransfer command.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
- list_empty(&dep->started_list)) {
- dwc3_stop_active_transfer(dwc, dep->number, true);
- dep->flags = DWC3_EP_ENABLED;
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+ dwc3_stop_active_transfer(dwc, dep->number, true);
+ dep->flags = DWC3_EP_ENABLED;
+ } else {
+ u32 cur_uf;
+
+ cur_uf = __dwc3_gadget_get_frame(dwc);
+ __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+ }
}
return 0;
}
@@ -1094,10 +1209,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return 0;
ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (ret && ret != -EBUSY)
- dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers",
- dep->name);
if (ret == -EBUSY)
ret = 0;
@@ -1116,7 +1227,6 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
struct usb_request *request;
struct usb_ep *ep = &dep->endpoint;
- dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
if (!request)
return -ENOMEM;
@@ -1235,9 +1345,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
if (!protocol && ((dep->direction && transfer_in_flight) ||
(!dep->direction && started))) {
- dwc3_trace(trace_dwc3_gadget,
- "%s: pending request, cannot halt",
- dep->name);
return -EAGAIN;
}
@@ -1331,10 +1438,8 @@ static const struct usb_ep_ops dwc3_gadget_ep_ops = {
static int dwc3_gadget_get_frame(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
- u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- return DWC3_DSTS_SOFFN(reg);
+ return __dwc3_gadget_get_frame(dwc);
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
@@ -1357,10 +1462,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
if ((speed == DWC3_DSTS_SUPERSPEED) ||
- (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
- dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
+ (speed == DWC3_DSTS_SUPERSPEED_PLUS))
return 0;
- }
link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1369,9 +1472,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
break;
default:
- dwc3_trace(trace_dwc3_gadget,
- "can't wakeup from '%s'",
- dwc3_gadget_link_string(link_state));
return -EINVAL;
}
@@ -1476,11 +1576,6 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
if (!timeout)
return -ETIMEDOUT;
- dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
- dwc->gadget_driver
- ? dwc->gadget_driver->function : "no-function",
- is_on ? "connect" : "disconnect");
-
return 0;
}
@@ -1492,6 +1587,21 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
+ /*
+ * Per databook, when we want to stop the gadget, if a control transfer
+ * is still in process, complete it and get the core into setup phase.
+ */
+ if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
+ reinit_completion(&dwc->ep0_in_setup);
+
+ ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+ msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ if (ret == 0) {
+ dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
+ return -ETIMEDOUT;
+ }
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1509,11 +1619,13 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
- DWC3_DEVTEN_ULSTCNGEN |
DWC3_DEVTEN_CONNECTDONEEN |
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ if (dwc->revision < DWC3_REVISION_250A)
+ reg |= DWC3_DEVTEN_ULSTCNGEN;
+
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
@@ -1573,6 +1685,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
int ret = 0;
u32 reg;
+ /*
+ * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+ * the core supports IMOD, disable it.
+ */
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ } else if (dwc3_has_imod(dwc)) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+ }
+
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -1633,16 +1756,14 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
@@ -1708,9 +1829,6 @@ err0:
static void __dwc3_gadget_stop(struct dwc3 *dwc)
{
- if (pm_runtime_suspended(dwc->dev))
- return;
-
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1720,9 +1838,30 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ int epnum;
spin_lock_irqsave(&dwc->lock, flags);
+
+ if (pm_runtime_suspended(dwc->dev))
+ goto out;
+
__dwc3_gadget_stop(dwc);
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ continue;
+
+ wait_event_lock_irq(dep->wait_end_transfer,
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+ dwc->lock);
+ }
+
+out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1765,9 +1904,13 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
(epnum & 1) ? "in" : "out");
dep->endpoint.name = dep->name;
- spin_lock_init(&dep->lock);
- dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
+ if (!(dep->number > 1)) {
+ dep->endpoint.desc = &dwc3_gadget_ep0_desc;
+ dep->endpoint.comp_desc = NULL;
+ }
+
+ spin_lock_init(&dep->lock);
if (epnum == 0 || epnum == 1) {
usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
@@ -1815,15 +1958,13 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
if (ret < 0) {
- dwc3_trace(trace_dwc3_gadget,
- "failed to allocate OUT endpoints");
+ dev_err(dwc->dev, "failed to initialize OUT endpoints\n");
return ret;
}
ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
if (ret < 0) {
- dwc3_trace(trace_dwc3_gadget,
- "failed to allocate IN endpoints");
+ dev_err(dwc->dev, "failed to initialize IN endpoints\n");
return ret;
}
@@ -1892,15 +2033,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
return 1;
count = trb->size & DWC3_TRB_SIZE_MASK;
- req->request.actual += count;
+ req->remaining += count;
if (dep->direction) {
if (count) {
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
- dwc3_trace(trace_dwc3_gadget,
- "%s: incomplete IN transfer",
- dep->name);
/*
* If missed isoc occurred and there is
* no request queued then issue END
@@ -1946,11 +2084,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_request *req, *n;
struct dwc3_trb *trb;
bool ioc = false;
- int ret;
+ int ret = 0;
list_for_each_entry_safe(req, n, &dep->started_list, list) {
unsigned length;
- unsigned actual;
int chain;
length = req->request.length;
@@ -1964,6 +2101,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
+ if (trb->ctrl & DWC3_TRB_CTRL_HWO)
+ break;
+
req->sg = sg_next(s);
req->num_pending_sgs--;
@@ -1978,17 +2118,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
event, status, chain);
}
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- actual = length - req->request.actual;
- req->request.actual = actual;
+ req->request.actual = length - req->remaining;
- if (ret && chain && (actual < length) && req->num_pending_sgs)
+ if ((req->request.actual < length) && req->num_pending_sgs)
return __dwc3_gadget_kick_transfer(dep, 0);
dwc3_gadget_giveback(dep, req, status);
@@ -2096,10 +2228,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
{
struct dwc3_ep *dep;
u8 epnum = event->endpoint_number;
+ u8 cmd;
dep = dwc->eps[epnum];
- if (!(dep->flags & DWC3_EP_ENABLED))
+ if (!(dep->flags & DWC3_EP_ENABLED) &&
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
if (epnum == 0 || epnum == 1) {
@@ -2112,9 +2246,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->resource_index = 0;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dwc3_trace(trace_dwc3_gadget,
- "%s is an Isochronous endpoint",
- dep->name);
+ dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n");
return;
}
@@ -2127,22 +2259,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dwc3_gadget_start_isoc(dwc, dep, event);
} else {
- int active;
int ret;
- active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
-
- dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
- dep->name, active ? "Transfer Active"
- : "Transfer Not Active");
-
ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret || ret == -EBUSY)
return;
-
- dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers",
- dep->name);
}
break;
@@ -2152,26 +2273,16 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->name);
return;
}
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ cmd = DEPEVT_PARAMETER_CMD(event->parameters);
- switch (event->status) {
- case DEPEVT_STREAMEVT_FOUND:
- dwc3_trace(trace_dwc3_gadget,
- "Stream %d found and started",
- event->parameters);
-
- break;
- case DEPEVT_STREAMEVT_NOTFOUND:
- /* FALLTHROUGH */
- default:
- dwc3_trace(trace_dwc3_gadget,
- "unable to find suitable stream");
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ wake_up(&dep->wait_end_transfer);
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
- break;
- case DWC3_DEPEVT_EPCMDCMPLT:
- dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
break;
}
}
@@ -2224,7 +2335,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
dep = dwc->eps[epnum];
- if (!dep->resource_index)
+ if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
+ !dep->resource_index)
return;
/*
@@ -2268,25 +2380,9 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
- if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
+ if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
udelay(100);
-}
-
-static void dwc3_stop_active_transfers(struct dwc3 *dwc)
-{
- u32 epnum;
-
- for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
- struct dwc3_ep *dep;
-
- dep = dwc->eps[epnum];
- if (!dep)
- continue;
-
- if (!(dep->flags & DWC3_EP_ENABLED))
- continue;
-
- dwc3_remove_requests(dwc, dep);
}
}
@@ -2375,8 +2471,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc->test_mode = false;
-
- dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
/* Reset device address to zero */
@@ -2385,32 +2479,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
-static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
-{
- u32 reg;
- u32 usb30_clock = DWC3_GCTL_CLK_BUS;
-
- /*
- * We change the clock only at SS but I dunno why I would want to do
- * this. Maybe it becomes part of the power saving plan.
- */
-
- if ((speed != DWC3_DSTS_SUPERSPEED) &&
- (speed != DWC3_DSTS_SUPERSPEED_PLUS))
- return;
-
- /*
- * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
- * each time on Connect Done.
- */
- if (!usb30_clock)
- return;
-
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-}
-
static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -2422,7 +2490,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
- dwc3_update_ram_clk_sel(dwc, speed);
+ /*
+ * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
+ * each time on Connect Done.
+ *
+ * Currently we always use the reset value. If any platform
+ * wants to set this to a different value, we need to add a
+ * setting and update GCTL.RAMCLKSEL here.
+ */
switch (speed) {
case DWC3_DSTS_SUPERSPEED_PLUS:
@@ -2491,7 +2566,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
*/
WARN_ONCE(dwc->revision < DWC3_REVISION_240A
&& dwc->has_lpm_erratum,
- "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
+ "LPM Erratum not available on dwc3 revisions < 2.40a\n");
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
@@ -2504,16 +2579,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
}
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
- false);
+ ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -2570,8 +2643,6 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
- dwc3_trace(trace_dwc3_gadget,
- "ignoring transition U3 -> Resume");
return;
}
}
@@ -2705,11 +2776,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEVICE_EVENT_EOPF:
/* It changed to be suspend event for version 2.30a and above */
- if (dwc->revision < DWC3_REVISION_230A) {
- dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
- } else {
- dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
-
+ if (dwc->revision >= DWC3_REVISION_230A) {
/*
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
@@ -2720,16 +2787,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
}
break;
case DWC3_DEVICE_EVENT_SOF:
- dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
- break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
- dwc3_trace(trace_dwc3_gadget, "Erratic Error");
- break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
- dwc3_trace(trace_dwc3_gadget, "Command Complete");
- break;
case DWC3_DEVICE_EVENT_OVERFLOW:
- dwc3_trace(trace_dwc3_gadget, "Overflow");
break;
default:
dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
@@ -2739,7 +2799,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
static void dwc3_process_event_entry(struct dwc3 *dwc,
const union dwc3_event *event)
{
- trace_dwc3_event(event->raw);
+ trace_dwc3_event(event->raw, dwc);
/* Endpoint IRQ, handle it and return early */
if (event->type.is_devspec == 0) {
@@ -2772,7 +2832,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
while (left > 0) {
union dwc3_event event;
- event.raw = *(u32 *) (evt->buf + evt->lpos);
+ event.raw = *(u32 *) (evt->cache + evt->lpos);
dwc3_process_event_entry(dwc, &event);
@@ -2785,10 +2845,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
* boundary so I worry about that once we try to handle
* that.
*/
- evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+ evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
-
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
}
evt->count = 0;
@@ -2800,6 +2858,11 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
reg &= ~DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ }
+
return ret;
}
@@ -2820,6 +2883,7 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
{
struct dwc3 *dwc = evt->dwc;
+ u32 amount;
u32 count;
u32 reg;
@@ -2843,6 +2907,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
reg |= DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ amount = min(count, evt->length - evt->lpos);
+ memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
+
+ if (amount < count)
+ memcpy(evt->cache, evt->buf, count - amount);
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+
return IRQ_WAKE_THREAD;
}
@@ -2853,6 +2925,39 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
return dwc3_check_event_buf(evt);
}
+static int dwc3_gadget_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing peripheral IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
/**
* dwc3_gadget_init - Initializes gadget related registers
* @dwc: pointer to our controller context structure
@@ -2861,35 +2966,18 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
- int ret, irq;
- struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int ret;
+ int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing peripheral IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- }
- }
+ irq = dwc3_gadget_get_irq(dwc);
+ if (irq < 0) {
+ ret = irq;
+ goto err0;
}
dwc->irq_gadget = irq;
- dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
&dwc->ctrl_req_addr, GFP_KERNEL);
if (!dwc->ctrl_req) {
dev_err(dwc->dev, "failed to allocate ctrl request\n");
@@ -2897,8 +2985,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err0;
}
- dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
- &dwc->ep0_trb_addr, GFP_KERNEL);
+ dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
+ sizeof(*dwc->ep0_trb) * 2,
+ &dwc->ep0_trb_addr, GFP_KERNEL);
if (!dwc->ep0_trb) {
dev_err(dwc->dev, "failed to allocate ep0 trb\n");
ret = -ENOMEM;
@@ -2911,7 +3000,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err2;
}
- dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
+ dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev,
DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
GFP_KERNEL);
if (!dwc->ep0_bounce) {
@@ -2926,6 +3015,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err4;
}
+ init_completion(&dwc->ep0_in_setup);
+
dwc->gadget.ops = &dwc3_gadget_ops;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.sg_supported = true;
@@ -2949,8 +3040,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
* composite.c that we are USB 2.0 + LPM ECN.
*/
if (dwc->revision < DWC3_REVISION_220A)
- dwc3_trace(trace_dwc3_gadget,
- "Changing max_speed on rev %08x",
+ dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
dwc->gadget.max_speed = dwc->maximum_speed;
@@ -2983,18 +3073,18 @@ err5:
err4:
dwc3_gadget_free_endpoints(dwc);
- dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+ dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
dwc->ep0_bounce, dwc->ep0_bounce_addr);
err3:
kfree(dwc->setup_buf);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err1:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
dwc->ctrl_req, dwc->ctrl_req_addr);
err0:
@@ -3009,16 +3099,16 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dwc3_gadget_free_endpoints(dwc);
- dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+ dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
dwc->ep0_bounce, dwc->ep0_bounce_addr);
kfree(dwc->setup_buf);
kfree(dwc->zlp_buf);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
dwc->ctrl_req, dwc->ctrl_req_addr);
}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index e4a1d974a5ae..3129bcf74d7d 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -62,10 +62,7 @@ struct dwc3;
static inline struct dwc3_request *next_request(struct list_head *list)
{
- if (list_empty(list))
- return NULL;
-
- return list_first_entry(list, struct dwc3_request, list);
+ return list_first_entry_or_null(list, struct dwc3_request, list);
}
static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f6533c68fed1..487f0ff6ae25 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -19,6 +19,39 @@
#include "core.h"
+static int dwc3_host_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "host");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing host IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[2];
@@ -27,39 +60,18 @@ int dwc3_host_init(struct dwc3 *dwc)
struct resource *res;
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
- irq = platform_get_irq_byname(dwc3_pdev, "host");
- if (irq == -EPROBE_DEFER)
+ irq = dwc3_host_get_irq(dwc);
+ if (irq < 0)
return irq;
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing host IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- } else {
- res = platform_get_resource(dwc3_pdev,
- IORESOURCE_IRQ, 0);
- }
- } else {
- res = platform_get_resource_byname(dwc3_pdev,
- IORESOURCE_IRQ,
- "dwc_usb3");
- }
-
- } else {
+ res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
+ if (!res)
res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
- "host");
- }
+ "dwc_usb3");
+ if (!res)
+ res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -ENOMEM;
dwc->xhci_resources[1].start = irq;
dwc->xhci_resources[1].end = irq;
@@ -72,11 +84,7 @@ int dwc3_host_init(struct dwc3 *dwc)
return -ENOMEM;
}
- dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
-
xhci->dev.parent = dwc->dev;
- xhci->dev.dma_mask = dwc->dev->dma_mask;
- xhci->dev.dma_parms = dwc->dev->dma_parms;
dwc->xhci = xhci;
@@ -99,9 +107,9 @@ int dwc3_host_init(struct dwc3 *dwc)
}
phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
ret = platform_device_add(xhci);
if (ret) {
@@ -112,9 +120,9 @@ int dwc3_host_init(struct dwc3 *dwc)
return 0;
err2:
phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&xhci->dev));
+ dev_name(dwc->dev));
err1:
platform_device_put(xhci);
return ret;
@@ -123,8 +131,8 @@ err1:
void dwc3_host_exit(struct dwc3 *dwc)
{
phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(&dwc->xhci->dev));
+ dev_name(dwc->dev));
phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(&dwc->xhci->dev));
+ dev_name(dwc->dev));
platform_device_unregister(dwc->xhci);
}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index a06f9a8fecc7..c69b06696824 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -40,8 +40,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_readl, "addr %p value %08x",
- base - DWC3_GLOBALS_REGS_START + offset, value);
+ trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value);
return value;
}
@@ -60,8 +59,7 @@ static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
* documentation, so we revert it back to the proper addresses, the
* same way they are described on SNPS documentation
*/
- dwc3_trace(trace_dwc3_writel, "addr %p value %08x",
- base - DWC3_GLOBALS_REGS_START + offset, value);
+ trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value);
}
#endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index d24cefd191b5..2b124f94d858 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -37,47 +37,66 @@ DECLARE_EVENT_CLASS(dwc3_log_msg,
TP_printk("%s", __get_str(msg))
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_readl,
+DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_writel,
+DEFINE_EVENT(dwc3_log_msg, dwc3_core,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
+DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_core,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+DECLARE_EVENT_CLASS(dwc3_log_io,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value),
+ TP_STRUCT__entry(
+ __field(void *, base)
+ __field(u32, offset)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->base = base;
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+ TP_printk("addr %p value %08x", __entry->base + __entry->offset,
+ __entry->value)
);
-DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
+DEFINE_EVENT(dwc3_log_io, dwc3_readl,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
+);
+
+DEFINE_EVENT(dwc3_log_io, dwc3_writel,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
);
DECLARE_EVENT_CLASS(dwc3_log_event,
- TP_PROTO(u32 event),
- TP_ARGS(event),
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc),
TP_STRUCT__entry(
__field(u32, event)
+ __field(u32, ep0state)
),
TP_fast_assign(
__entry->event = event;
+ __entry->ep0state = dwc->ep0state;
),
TP_printk("event (%08x): %s", __entry->event,
- dwc3_decode_event(__entry->event))
+ dwc3_decode_event(__entry->event, __entry->ep0state))
);
DEFINE_EVENT(dwc3_log_event, dwc3_event,
- TP_PROTO(u32 event),
- TP_ARGS(event)
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc)
);
DECLARE_EVENT_CLASS(dwc3_log_ctrl,
@@ -237,6 +256,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__field(u32, bph)
__field(u32, size)
__field(u32, ctrl)
+ __field(u32, type)
),
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
@@ -247,11 +267,31 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->bph = trb->bph;
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
+ __entry->type = usb_endpoint_type(dep->endpoint.desc);
),
- TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
__get_str(name), __entry->queued, __entry->allocated,
__entry->trb, __entry->bph, __entry->bpl,
- __entry->size, __entry->ctrl,
+ ({char *s;
+ int pcm = ((__entry->size >> 24) & 3) + 1;
+ switch (__entry->type) {
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_ISOC:
+ switch (pcm) {
+ case 1:
+ s = "1x ";
+ break;
+ case 2:
+ s = "2x ";
+ break;
+ case 3:
+ s = "3x ";
+ break;
+ }
+ default:
+ s = "";
+ } s; }),
+ DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
__entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
__entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
__entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
@@ -301,6 +341,57 @@ DEFINE_EVENT(dwc3_log_trb, dwc3_complete_trb,
TP_ARGS(dep, trb)
);
+DECLARE_EVENT_CLASS(dwc3_log_ep,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep),
+ TP_STRUCT__entry(
+ __dynamic_array(char, name, DWC3_MSG_MAX)
+ __field(unsigned, maxpacket)
+ __field(unsigned, maxpacket_limit)
+ __field(unsigned, max_streams)
+ __field(unsigned, maxburst)
+ __field(unsigned, flags)
+ __field(unsigned, direction)
+ __field(u8, trb_enqueue)
+ __field(u8, trb_dequeue)
+ ),
+ TP_fast_assign(
+ snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
+ __entry->maxpacket = dep->endpoint.maxpacket;
+ __entry->maxpacket_limit = dep->endpoint.maxpacket_limit;
+ __entry->max_streams = dep->endpoint.max_streams;
+ __entry->maxburst = dep->endpoint.maxburst;
+ __entry->flags = dep->flags;
+ __entry->direction = dep->direction;
+ __entry->trb_enqueue = dep->trb_enqueue;
+ __entry->trb_dequeue = dep->trb_dequeue;
+ ),
+ TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c",
+ __get_str(name), __entry->maxpacket,
+ __entry->maxpacket_limit, __entry->max_streams,
+ __entry->maxburst, __entry->trb_enqueue,
+ __entry->trb_dequeue,
+ __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e',
+ __entry->flags & DWC3_EP_STALL ? 'S' : 's',
+ __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
+ __entry->flags & DWC3_EP_BUSY ? 'B' : 'b',
+ __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
+ __entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm',
+ __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e',
+ __entry->direction ? '<' : '>'
+ )
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_enable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_disable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
#endif /* __DWC3_TRACE_H */
/* this part has to be here */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 32176f779861..41ab61f9b6e0 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -201,7 +201,12 @@ ep_found:
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
- _ep->mult = 0;
+ _ep->mult = 1;
+
+ if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
+ usb_endpoint_xfer_int(_ep->desc)))
+ _ep->mult = usb_endpoint_maxp_mult(_ep->desc);
+
if (!want_comp_desc)
return 0;
@@ -218,7 +223,7 @@ ep_found:
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
- _ep->mult = comp_desc->bmAttributes & 0x3;
+ _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
@@ -2382,18 +2387,8 @@ EXPORT_SYMBOL_GPL(usb_composite_setup_continue);
static char *composite_default_mfr(struct usb_gadget *gadget)
{
- char *mfr;
- int len;
-
- len = snprintf(NULL, 0, "%s %s with %s", init_utsname()->sysname,
- init_utsname()->release, gadget->name);
- len++;
- mfr = kmalloc(len, GFP_KERNEL);
- if (!mfr)
- return NULL;
- snprintf(mfr, len, "%s %s with %s", init_utsname()->sysname,
- init_utsname()->release, gadget->name);
- return mfr;
+ return kasprintf(GFP_KERNEL, "%s %s with %s", init_utsname()->sysname,
+ init_utsname()->release, gadget->name);
}
void usb_composite_overwrite_options(struct usb_composite_dev *cdev,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 3984787f8e97..78c44979dde3 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -408,7 +408,7 @@ out:
return ret;
}
-static int config_usb_cfg_unlink(
+static void config_usb_cfg_unlink(
struct config_item *usb_cfg_ci,
struct config_item *usb_func_ci)
{
@@ -437,12 +437,11 @@ static int config_usb_cfg_unlink(
list_del(&f->list);
usb_put_function(f);
mutex_unlock(&gi->lock);
- return 0;
+ return;
}
}
mutex_unlock(&gi->lock);
WARN(1, "Unable to locate function to unbind\n");
- return 0;
}
static struct configfs_item_operations gadget_config_item_ops = {
@@ -865,7 +864,7 @@ out:
return ret;
}
-static int os_desc_unlink(struct config_item *os_desc_ci,
+static void os_desc_unlink(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
@@ -878,7 +877,6 @@ static int os_desc_unlink(struct config_item *os_desc_ci,
cdev->os_desc_config = NULL;
WARN_ON(gi->composite.gadget_driver.udc_name);
mutex_unlock(&gi->lock);
- return 0;
}
static struct configfs_item_operations os_desc_ops = {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 17989b72cdae..aab3fc1dbb94 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -266,7 +266,7 @@ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
{
struct ffs_data *ffs = req->context;
- complete_all(&ffs->ep0req_completion);
+ complete(&ffs->ep0req_completion);
}
static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
@@ -949,7 +949,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
goto error_mutex;
}
if (!io_data->read &&
- copy_from_iter(data, data_len, &io_data->data) != data_len) {
+ !copy_from_iter_full(data, data_len, &io_data->data)) {
ret = -EFAULT;
goto error_mutex;
}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index e2966f87c860..3151d2a0fe59 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -98,6 +98,60 @@ static struct hid_descriptor hidg_desc = {
/*.desc[0].wDescriptorLenght = DYNAMIC */
};
+/* Super-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
+ .bLength = sizeof(hidg_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
+ .bLength = sizeof(hidg_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ /* .wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_descriptor_header *hidg_ss_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
+ NULL,
+};
+
/* High-Speed Support */
static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
@@ -624,8 +678,14 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+ hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_in_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_ss_out_comp_desc.wBytesPerInterval =
+ cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
/*
@@ -641,8 +701,13 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_hs_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
+ hidg_ss_in_ep_desc.bEndpointAddress =
+ hidg_fs_in_ep_desc.bEndpointAddress;
+ hidg_ss_out_ep_desc.bEndpointAddress =
+ hidg_fs_out_ep_desc.bEndpointAddress;
+
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, NULL, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors, NULL);
if (status)
goto fail;
@@ -840,7 +905,7 @@ static void hidg_free_inst(struct usb_function_instance *f)
mutex_lock(&hidg_ida_lock);
hidg_put_minor(opts->minor);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
mutex_unlock(&hidg_ida_lock);
@@ -866,7 +931,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
mutex_lock(&hidg_ida_lock);
- if (idr_is_empty(&hidg_ida.idr)) {
+ if (ida_is_empty(&hidg_ida)) {
status = ghid_setup(NULL, HIDG_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -879,7 +944,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
goto unlock;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 639603722709..e8008fa35e1e 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -998,7 +998,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm)
/* Merge the skbs */
swap(skb2, ncm->skb_tx_data);
if (ncm->skb_tx_data) {
- dev_kfree_skb_any(ncm->skb_tx_data);
+ dev_consume_skb_any(ncm->skb_tx_data);
ncm->skb_tx_data = NULL;
}
@@ -1009,7 +1009,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm)
/* Copy NTB across. */
ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len);
memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len);
- dev_kfree_skb_any(ncm->skb_tx_ndp);
+ dev_consume_skb_any(ncm->skb_tx_ndp);
ncm->skb_tx_ndp = NULL;
/* Insert zero'd datagram. */
@@ -1078,6 +1078,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
if (!ncm->skb_tx_data)
goto err;
+ ncm->skb_tx_data->dev = ncm->netdev;
ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len);
memset(ntb_data, 0, ncb_len);
/* dwSignature */
@@ -1096,6 +1097,8 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
GFP_ATOMIC);
if (!ncm->skb_tx_ndp)
goto err;
+
+ ncm->skb_tx_ndp->dev = ncm->netdev;
ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp,
opts->ndp_size);
memset(ntb_ndp, 0, ncb_len);
@@ -1133,7 +1136,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
memset(ntb_data, 0, dgram_pad);
ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len);
memcpy(ntb_data, skb->data, skb->len);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
skb = NULL;
} else if (ncm->skb_tx_data && ncm->timer_force_tx) {
@@ -1329,7 +1332,7 @@ static int ncm_unwrap_ntb(struct gether *port,
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
VDBG(port->func.config->cdev,
"Parsed NTB with %d frames\n", dgram_counter);
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 0473d619d5bf..b4058f0000e4 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -261,19 +261,10 @@ out:
return NETDEV_TX_OK;
}
-static int pn_net_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
static const struct net_device_ops pn_netdev_ops = {
.ndo_open = pn_net_open,
.ndo_stop = pn_net_close,
.ndo_start_xmit = pn_net_xmit,
- .ndo_change_mtu = pn_net_mtu,
};
static void pn_net_setup(struct net_device *dev)
@@ -282,6 +273,8 @@ static void pn_net_setup(struct net_device *dev)
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = PHONET_DEV_MTU;
+ dev->min_mtu = PHONET_MIN_MTU;
+ dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 0de36cda6e41..8054da9276dd 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1265,7 +1265,7 @@ static void gprinter_free_inst(struct usb_function_instance *f)
mutex_lock(&printer_ida_lock);
gprinter_put_minor(opts->minor);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
mutex_unlock(&printer_ida_lock);
@@ -1289,7 +1289,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
mutex_lock(&printer_ida_lock);
- if (idr_is_empty(&printer_ida.idr)) {
+ if (ida_is_empty(&printer_ida)) {
status = gprinter_setup(PRINTER_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -1302,7 +1302,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
goto unlock;
}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 197f73386fac..d2351139342f 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1073,7 +1073,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
struct usbg_cmd *cmd;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index cd214ec8a601..969cfe741380 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1067,13 +1067,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
if (!agdev->out_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- goto err;
+ return ret;
}
agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
if (!agdev->in_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- goto err;
+ return ret;
}
uac2->p_prm.uac2 = uac2;
@@ -1091,7 +1091,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
NULL);
if (ret)
- goto err;
+ return ret;
prm = &agdev->uac2.c_prm;
prm->max_psize = hs_epout_desc.wMaxPacketSize;
@@ -1106,19 +1106,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
if (!prm->rbuf) {
prm->max_psize = 0;
- goto err_free_descs;
+ goto err;
}
ret = alsa_uac2_init(agdev);
if (ret)
- goto err_free_descs;
+ goto err;
return 0;
-err_free_descs:
- usb_free_all_descriptors(fn);
err:
kfree(agdev->uac2.p_prm.rbuf);
kfree(agdev->uac2.c_prm.rbuf);
+err_free_descs:
+ usb_free_all_descriptors(fn);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ab6ac1b74ac0..a3b5e468b116 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -80,8 +80,7 @@ static const struct file_operations rndis_proc_fops;
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/* supported OIDs */
-static const u32 oid_supported_list[] =
-{
+static const u32 oid_supported_list[] = {
/* the general stuff */
RNDIS_OID_GEN_SUPPORTED_LIST,
RNDIS_OID_GEN_HARDWARE_STATUS,
@@ -474,8 +473,7 @@ static int gen_ndis_query_resp(struct rndis_params *params, u32 OID, u8 *buf,
break;
default:
- pr_warning("%s: query unknown OID 0x%08X\n",
- __func__, OID);
+ pr_warn("%s: query unknown OID 0x%08X\n", __func__, OID);
}
if (retval < 0)
length = 0;
@@ -546,8 +544,8 @@ static int gen_ndis_set_resp(struct rndis_params *params, u32 OID,
break;
default:
- pr_warning("%s: set unknown OID 0x%08X, size %d\n",
- __func__, OID, buf_len);
+ pr_warn("%s: set unknown OID 0x%08X, size %d\n",
+ __func__, OID, buf_len);
}
return retval;
@@ -854,7 +852,7 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf)
* In one case those messages seemed to relate to the host
* suspending itself.
*/
- pr_warning("%s: unknown RNDIS message 0x%08X len %d\n",
+ pr_warn("%s: unknown RNDIS message 0x%08X len %d\n",
__func__, MsgType, MsgLength);
print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
buf, MsgLength);
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index ef92eb66d8ad..21e0430ffb98 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -22,8 +22,7 @@
#define RNDIS_MAXIMUM_FRAME_SIZE 1518
#define RNDIS_MAX_TOTAL_SIZE 1558
-typedef struct rndis_init_msg_type
-{
+typedef struct rndis_init_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -32,8 +31,7 @@ typedef struct rndis_init_msg_type
__le32 MaxTransferSize;
} rndis_init_msg_type;
-typedef struct rndis_init_cmplt_type
-{
+typedef struct rndis_init_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -49,15 +47,13 @@ typedef struct rndis_init_cmplt_type
__le32 AFListSize;
} rndis_init_cmplt_type;
-typedef struct rndis_halt_msg_type
-{
+typedef struct rndis_halt_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
} rndis_halt_msg_type;
-typedef struct rndis_query_msg_type
-{
+typedef struct rndis_query_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -67,8 +63,7 @@ typedef struct rndis_query_msg_type
__le32 DeviceVcHandle;
} rndis_query_msg_type;
-typedef struct rndis_query_cmplt_type
-{
+typedef struct rndis_query_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -77,8 +72,7 @@ typedef struct rndis_query_cmplt_type
__le32 InformationBufferOffset;
} rndis_query_cmplt_type;
-typedef struct rndis_set_msg_type
-{
+typedef struct rndis_set_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
@@ -88,31 +82,27 @@ typedef struct rndis_set_msg_type
__le32 DeviceVcHandle;
} rndis_set_msg_type;
-typedef struct rndis_set_cmplt_type
-{
+typedef struct rndis_set_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
__le32 Status;
} rndis_set_cmplt_type;
-typedef struct rndis_reset_msg_type
-{
+typedef struct rndis_reset_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 Reserved;
} rndis_reset_msg_type;
-typedef struct rndis_reset_cmplt_type
-{
+typedef struct rndis_reset_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 Status;
__le32 AddressingReset;
} rndis_reset_cmplt_type;
-typedef struct rndis_indicate_status_msg_type
-{
+typedef struct rndis_indicate_status_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 Status;
@@ -120,23 +110,20 @@ typedef struct rndis_indicate_status_msg_type
__le32 StatusBufferOffset;
} rndis_indicate_status_msg_type;
-typedef struct rndis_keepalive_msg_type
-{
+typedef struct rndis_keepalive_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
} rndis_keepalive_msg_type;
-typedef struct rndis_keepalive_cmplt_type
-{
+typedef struct rndis_keepalive_cmplt_type {
__le32 MessageType;
__le32 MessageLength;
__le32 RequestID;
__le32 Status;
} rndis_keepalive_cmplt_type;
-struct rndis_packet_msg_type
-{
+struct rndis_packet_msg_type {
__le32 MessageType;
__le32 MessageLength;
__le32 DataOffset;
@@ -150,8 +137,7 @@ struct rndis_packet_msg_type
__le32 Reserved;
} __attribute__ ((packed));
-struct rndis_config_parameter
-{
+struct rndis_config_parameter {
__le32 ParameterNameOffset;
__le32 ParameterNameLength;
__le32 ParameterType;
@@ -160,23 +146,20 @@ struct rndis_config_parameter
};
/* implementation specific */
-enum rndis_state
-{
+enum rndis_state {
RNDIS_UNINITIALIZED,
RNDIS_INITIALIZED,
RNDIS_DATA_INITIALIZED,
};
-typedef struct rndis_resp_t
-{
+typedef struct rndis_resp_t {
struct list_head list;
u8 *buf;
u32 length;
int send;
} rndis_resp_t;
-typedef struct rndis_params
-{
+typedef struct rndis_params {
int confignr;
u8 used;
u16 saved_filter;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 5d1bd13a56c1..b4e5d6dfd549 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -142,15 +142,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
-static int ueth_change_mtu(struct net_device *net, int new_mtu)
-{
- if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN)
- return -ERANGE;
- net->mtu = new_mtu;
-
- return 0;
-}
-
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
{
struct eth_dev *dev = netdev_priv(net);
@@ -224,7 +215,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
- skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+ skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
if (skb == NULL) {
DBG(dev, "no rx skb\n");
goto enomem;
@@ -455,16 +446,17 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
/* FALLTHROUGH */
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
+ dev_kfree_skb_any(skb);
break;
case 0:
dev->net->stats.tx_bytes += skb->len;
+ dev_consume_skb_any(skb);
}
dev->net->stats.tx_packets++;
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->tx_reqs);
spin_unlock(&dev->req_lock);
- dev_kfree_skb_any(skb);
atomic_dec(&dev->tx_qlen);
if (netif_carrier_ok(dev->net))
@@ -729,7 +721,6 @@ static const struct net_device_ops eth_netdev_ops = {
.ndo_open = eth_open,
.ndo_stop = eth_stop,
.ndo_start_xmit = eth_start_xmit,
- .ndo_change_mtu = ueth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -792,6 +783,10 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
net->ethtool_ops = &ops;
+ /* MTU range: 14 - 15412 */
+ net->min_mtu = ETH_HLEN;
+ net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
SET_NETDEV_DEVTYPE(net, &gadget_type);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index e0cd1e4c8892..000677c991b0 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -622,8 +622,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
switch (req->status) {
default:
/* presumably a transient fault */
- pr_warning("%s: unexpected %s status %d\n",
- __func__, ep->name, req->status);
+ pr_warn("%s: unexpected %s status %d\n",
+ __func__, ep->name, req->status);
/* FALL THROUGH */
case 0:
/* normal completion */
@@ -1256,7 +1256,8 @@ static void gserial_console_exit(void)
struct gscons_info *info = &gscons_info;
unregister_console(&gserial_cons);
- kthread_stop(info->console_thread);
+ if (info->console_thread != NULL)
+ kthread_stop(info->console_thread);
gs_buf_free(&info->con_buf);
}
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 7d3bb6272e06..11d70dead32b 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -26,14 +26,12 @@
#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
-struct uvc_request_data
-{
+struct uvc_request_data {
__s32 length;
__u8 data[60];
};
-struct uvc_event
-{
+struct uvc_event {
union {
enum usb_device_speed speed;
struct usb_ctrlrequest req;
@@ -104,8 +102,7 @@ extern unsigned int uvc_gadget_trace_param;
* Structures
*/
-struct uvc_video
-{
+struct uvc_video {
struct usb_ep *ep;
/* Frame parameters */
@@ -134,15 +131,13 @@ struct uvc_video
unsigned int fid;
};
-enum uvc_state
-{
+enum uvc_state {
UVC_STATE_DISCONNECTED,
UVC_STATE_CONNECTED,
UVC_STATE_STREAMING,
};
-struct uvc_device
-{
+struct uvc_device {
struct video_device vdev;
struct v4l2_device v4l2_dev;
enum uvc_state state;
@@ -175,8 +170,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
return container_of(f, struct uvc_device, func);
}
-struct uvc_file_handle
-{
+struct uvc_file_handle {
struct v4l2_fh vfh;
struct uvc_video *device;
};
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 31125a4a2658..4e037d2a7a60 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -547,7 +547,7 @@ out:
return ret;
}
-static int uvcg_control_class_drop_link(struct config_item *src,
+static void uvcg_control_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *control, *header;
@@ -555,7 +555,6 @@ static int uvcg_control_class_drop_link(struct config_item *src,
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header **class_array;
struct uvcg_control_header *target_hdr;
- int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -569,23 +568,17 @@ static int uvcg_control_class_drop_link(struct config_item *src,
mutex_lock(&opts->lock);
class_array = uvcg_get_ctl_class_arr(src, opts);
- if (!class_array)
- goto unlock;
- if (opts->refcnt) {
- ret = -EBUSY;
+ if (!class_array || opts->refcnt)
goto unlock;
- }
target_hdr = to_uvcg_control_header(target);
--target_hdr->linked;
class_array[0] = NULL;
- ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
mutex_unlock(su_mutex);
- return ret;
}
static struct configfs_item_operations uvcg_control_class_item_ops = {
@@ -777,7 +770,7 @@ out:
return ret;
}
-static int uvcg_streaming_header_drop_link(struct config_item *src,
+static void uvcg_streaming_header_drop_link(struct config_item *src,
struct config_item *target)
{
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
@@ -786,7 +779,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
struct uvcg_streaming_header *src_hdr;
struct uvcg_format *target_fmt = NULL;
struct uvcg_format_ptr *format_ptr, *tmp;
- int ret = -EINVAL;
src_hdr = to_uvcg_streaming_header(src);
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -811,8 +803,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
- return ret;
-
}
static struct configfs_item_operations uvcg_streaming_header_item_ops = {
@@ -2051,7 +2041,7 @@ out:
return ret;
}
-static int uvcg_streaming_class_drop_link(struct config_item *src,
+static void uvcg_streaming_class_drop_link(struct config_item *src,
struct config_item *target)
{
struct config_item *streaming, *header;
@@ -2059,7 +2049,6 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
struct uvc_descriptor_header ***class_array;
struct uvcg_streaming_header *target_hdr;
- int ret = -EINVAL;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -2076,23 +2065,19 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
if (!class_array || !*class_array)
goto unlock;
- if (opts->refcnt) {
- ret = -EBUSY;
+ if (opts->refcnt)
goto unlock;
- }
target_hdr = to_uvcg_streaming_header(target);
--target_hdr->linked;
kfree(**class_array);
kfree(*class_array);
*class_array = NULL;
- ret = 0;
unlock:
mutex_unlock(&opts->lock);
out:
mutex_unlock(su_mutex);
- return ret;
}
static struct configfs_item_operations uvcg_streaming_class_item_ops = {
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index f4ccbd56f4d2..3e22b45687d3 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -53,8 +53,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
* V4L2 ioctls
*/
-struct uvc_format
-{
+struct uvc_format {
u8 bpp;
u32 fcc;
};
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 3d0d5d94a62f..0f01c04d7cbd 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
req_size = video->ep->maxpacket
* max_t(unsigned int, video->ep->maxburst, 1)
- * (video->ep->mult + 1);
+ * (video->ep->mult);
for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index bd82dd12deff..10b2576f8b6a 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -667,7 +667,7 @@ ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
return -ENOMEM;
}
- if (unlikely(copy_from_iter(buf, len, from) != len)) {
+ if (unlikely(!copy_from_iter_full(buf, len, from))) {
value = -EFAULT;
goto out;
}
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index 0a433e6b346b..9bbe72764f31 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -175,7 +175,7 @@ struct at91_request {
#endif
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
#define DBG(stuff...) pr_debug("udc: " stuff)
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 45bc997d0711..f3212db9bc37 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -529,7 +529,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
- maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+ maxpacket = usb_endpoint_maxp(desc);
if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
|| ep->index == 0
@@ -573,7 +573,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
* Bits 11:12 specify number of _additional_
* transactions per microframe.
*/
- nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
+ nr_trans = usb_endpoint_maxp_mult(desc);
if (nr_trans > 3)
return -EINVAL;
@@ -1464,8 +1464,8 @@ restart:
pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
DBG(DBG_HW, "Packet length: %u\n", pkt_len);
if (pkt_len != sizeof(crq)) {
- pr_warning("udc: Invalid packet length %u "
- "(expected %zu)\n", pkt_len, sizeof(crq));
+ pr_warn("udc: Invalid packet length %u (expected %zu)\n",
+ pkt_len, sizeof(crq));
set_protocol_stall(udc, ep);
return;
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 4d5e9188beae..6e920f1dce02 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -182,7 +182,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
usb_endpoint_xfer_int(desc)) {
param2 |= si;
- mbs = (usb_endpoint_maxp(desc) & 0x1800) >> 11;
+ mbs = usb_endpoint_maxp_mult(desc);
param2 |= mbs << MB_SHIFT;
}
break;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index ccaa74ab6c0e..ff1ef24d1777 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -446,7 +446,7 @@ static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
bd_xfr->start_bdi = bd_list->eqp_bdi;
bd = bdi_to_bd(ep, bd_list->eqp_bdi);
req_len = req->usb_req.length;
- maxp = usb_endpoint_maxp(ep->desc) & 0x7ff;
+ maxp = usb_endpoint_maxp(ep->desc);
tfs = roundup(req->usb_req.length, maxp);
tfs = tfs/maxp;
dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 77d07904f932..02b14e91ae6c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -503,7 +503,7 @@ static int dummy_enable(struct usb_ep *_ep,
* maximum packet size.
* For SS devices the wMaxPacketSize is limited by 1024.
*/
- max = usb_endpoint_maxp(desc) & 0x7ff;
+ max = usb_endpoint_maxp(desc);
/* drivers must not request bad settings, since lower levels
* (hardware or its drivers) may not check. some endpoints
@@ -1483,8 +1483,7 @@ static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
int tmp;
/* high bandwidth mode */
- tmp = usb_endpoint_maxp(ep->desc);
- tmp = (tmp >> 11) & 0x03;
+ tmp = usb_endpoint_maxp_mult(ep->desc);
tmp *= 8 /* applies to entire frame */;
limit += limit * tmp;
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index aab5221d6c2e..71094e479a96 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -585,8 +585,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
break;
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
- mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x7ff; /* bit 0~10 */
+ mult = usb_endpoint_maxp_mult(desc);
/* 3 transactions at most */
if (mult > 3)
goto en_done;
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index 84715625b2b3..e92b8408b6f6 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -554,7 +554,7 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
#endif
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 948845c90e47..42ff308578df 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -218,7 +218,7 @@ static int config_ep(struct fusb300_ep *ep,
(info.type == USB_ENDPOINT_XFER_ISOC)) {
info.interval = desc->bInterval;
if (info.type == USB_ENDPOINT_XFER_ISOC)
- info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11);
+ info.bw_num = usb_endpoint_maxp_mult(desc);
}
ep_fifo_setting(fusb300, info);
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 39b7136d31d9..b16f8af34050 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1539,7 +1539,7 @@ static int gr_ep_enable(struct usb_ep *_ep,
* additional transactions.
*/
max = 0x7ff & usb_endpoint_maxp(desc);
- nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
+ nt = usb_endpoint_maxp_mult(desc) - 1;
buffer_size = GR_BUFFER_SIZE(epctrl);
if (nt && (mode == 0 || mode == 2)) {
dev_err(dev->dev,
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 6e977dc22570..de3e03483659 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -637,7 +637,7 @@ static void init_controller(struct m66592 *m66592)
clock = M66592_XTAL48;
break;
default:
- pr_warning("m66592-udc: xtal configuration error\n");
+ pr_warn("m66592-udc: xtal configuration error\n");
clock = 0;
}
@@ -649,7 +649,7 @@ static void init_controller(struct m66592 *m66592)
irq_sense = 0;
break;
default:
- pr_warning("m66592-udc: irq trigger config error\n");
+ pr_warn("m66592-udc: irq trigger config error\n");
irq_sense = 0;
}
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index b9e19a591322..8d726bd767fd 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -462,6 +462,12 @@ static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
req->trb_head->trb_hw,
trb_num * sizeof(*trb_hw),
DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(u3d->gadget.dev.parent,
+ req->trb_head->trb_dma)) {
+ kfree(req->trb_head->trb_hw);
+ kfree(req->trb_head);
+ return -EFAULT;
+ }
req->chain = 1;
}
@@ -487,30 +493,32 @@ mv_u3d_start_queue(struct mv_u3d_ep *ep)
ret = usb_gadget_map_request(&u3d->gadget, &req->req,
mv_u3d_ep_dir(ep));
if (ret)
- return ret;
+ goto break_processing;
req->req.status = -EINPROGRESS;
req->req.actual = 0;
req->trb_count = 0;
- /* build trbs and push them to device queue */
- if (!mv_u3d_req_to_trb(req)) {
- ret = mv_u3d_queue_trb(ep, req);
- if (ret) {
- ep->processing = 0;
- return ret;
- }
- } else {
- ep->processing = 0;
+ /* build trbs */
+ ret = mv_u3d_req_to_trb(req);
+ if (ret) {
dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
- return -ENOMEM;
+ goto break_processing;
}
+ /* and push them to device queue */
+ ret = mv_u3d_queue_trb(ep, req);
+ if (ret)
+ goto break_processing;
+
/* irq handler advances the queue */
- if (req)
- list_add_tail(&req->queue, &ep->queue);
+ list_add_tail(&req->queue, &ep->queue);
return 0;
+
+break_processing:
+ ep->processing = 0;
+ return ret;
}
static int mv_u3d_ep_enable(struct usb_ep *_ep,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index ce73b3552269..d82a91bddbd9 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -494,8 +494,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
break;
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
- mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x7ff; /* bit 0~10 */
+ mult = usb_endpoint_maxp_mult(desc);
/* 3 transactions at most */
if (mult > 3)
goto en_done;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 7c6113432093..078c91d546e0 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -202,10 +202,10 @@ net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- max = usb_endpoint_maxp(desc) & 0x1fff;
+ max = usb_endpoint_maxp(desc);
spin_lock_irqsave(&dev->lock, flags);
- _ep->maxpacket = max & 0x7fff;
+ _ep->maxpacket = max;
ep->desc = desc;
/* net2272_ep_reset() has already been called */
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 61c938c36d88..85504419ab31 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -224,14 +224,14 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
}
/* sanity check ep-e/ep-f since their fifos are small */
- max = usb_endpoint_maxp(desc) & 0x1fff;
+ max = usb_endpoint_maxp(desc);
if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
ret = -ERANGE;
goto print_err;
}
spin_lock_irqsave(&dev->lock, flags);
- _ep->maxpacket = max & 0x7ff;
+ _ep->maxpacket = max;
ep->desc = desc;
/* ep_reset() has already been called */
@@ -1839,7 +1839,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
(t & USB_DIR_IN) ? "in" : "out",
type_string(d->bmAttributes),
- usb_endpoint_maxp(d) & 0x1fff,
+ usb_endpoint_maxp(d),
ep->dma ? "dma" : "pio", ep->fifo_size
);
} else /* ep0 should only have one transfer queued */
diff --git a/drivers/usb/gadget/udc/omap_udc.h b/drivers/usb/gadget/udc/omap_udc.h
index cfadeb5fc5de..26974196cf44 100644
--- a/drivers/usb/gadget/udc/omap_udc.h
+++ b/drivers/usb/gadget/udc/omap_udc.h
@@ -187,7 +187,7 @@ struct omap_udc {
#endif
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
#define DBG(stuff...) pr_debug("udc: " stuff)
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index 4b8b72d7ab37..a458bec2536d 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -248,7 +248,7 @@ dump_state(struct pxa25x_udc *dev)
#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warning("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
#define INFO(stuff...) pr_info("udc: " stuff)
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index eb3571ee59e3..4643a01262b4 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1047,10 +1047,10 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- max = usb_endpoint_maxp(desc) & 0x1fff;
+ max = usb_endpoint_maxp(desc);
local_irq_save(flags);
- _ep->maxpacket = max & 0x7ff;
+ _ep->maxpacket = max;
ep->ep.desc = desc;
ep->halted = 0;
ep->bEndpointAddress = desc->bEndpointAddress;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 0b80cee30da4..6361fc739306 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -479,9 +479,10 @@ config USB_OHCI_HCD_OMAP3
OMAP3 and later chips.
config USB_OHCI_HCD_DAVINCI
- bool "OHCI support for TI DaVinci DA8xx"
+ tristate "OHCI support for TI DaVinci DA8xx"
depends on ARCH_DAVINCI_DA8XX
- depends on USB_OHCI_HCD=y
+ depends on USB_OHCI_HCD
+ select PHY_DA8XX_USB
default y
help
Enables support for the DaVinci DA8xx integrated OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 6ef785b0ea8f..2644537b7bcf 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_USB_OHCI_HCD_AT91) += ohci-at91.o
obj-$(CONFIG_USB_OHCI_HCD_S3C2410) += ohci-s3c2410.o
obj-$(CONFIG_USB_OHCI_HCD_LPC32XX) += ohci-nxp.o
obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
+obj-$(CONFIG_USB_OHCI_HCD_DAVINCI) += ohci-da8xx.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 9f5ffb629973..91701cc68082 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -286,6 +286,9 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
if (pdata->has_fsl_erratum_a005275 == 1)
ehci->has_fsl_hs_errata = 1;
+ if (pdata->has_fsl_erratum_a005697 == 1)
+ ehci->has_fsl_susp_errata = 1;
+
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 74f62d68f013..df169c8e7225 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -310,6 +310,14 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
spin_unlock_irq(&ehci->lock);
+ if (changed && ehci_has_fsl_susp_errata(ehci))
+ /*
+ * Wait for at least 10 millisecondes to ensure the controller
+ * enter the suspend status before initiating a port resume
+ * using the Force Port Resume bit (Not-EHCI compatible).
+ */
+ usleep_range(10000, 20000);
+
if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
/*
* Wait for HCD to enter low-power mode or for the bus
@@ -1200,6 +1208,12 @@ int ehci_hub_control(
wIndex, (temp1 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
+ if (ehci_has_fsl_susp_errata(ehci)) {
+ /* 10ms for HCD enter suspend */
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ usleep_range(10000, 20000);
+ spin_lock_irqsave(&ehci->lock, flags);
+ }
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 3b3649d88c5f..93326974ff4b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -258,9 +258,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* These workarounds need to be applied after ehci_setup() */
switch (pdev->vendor) {
case PCI_VENDOR_ID_NEC:
- ehci->need_io_watchdog = 0;
- break;
case PCI_VENDOR_ID_INTEL:
+ case PCI_VENDOR_ID_AMD:
ehci->need_io_watchdog = 0;
break;
case PCI_VENDOR_ID_NVIDIA:
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index eca3710d8fc4..8f3f055c05fa 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -550,11 +550,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
-// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
-// ... and packet size, for any kind of endpoint descriptor
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-
/*
* reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
@@ -651,7 +646,7 @@ qh_urb_transaction (
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
/*
* buffer gets wrapped in one or more qtds;
@@ -770,9 +765,11 @@ qh_make (
gfp_t flags
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
+ struct usb_host_endpoint *ep;
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
+ int mult;
struct usb_tt *tt = urb->dev->tt;
struct ehci_qh_hw *hw;
@@ -787,13 +784,15 @@ qh_make (
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
- maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
+ ep = usb_pipe_endpoint (urb->dev, urb->pipe);
+ maxp = usb_endpoint_maxp (&ep->desc);
+ mult = usb_endpoint_maxp_mult (&ep->desc);
/* 1024 byte maxpacket is a hardware ceiling. High bandwidth
* acts like up to 3KB, but is built from smaller packets.
*/
- if (max_packet(maxp) > 1024) {
- ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
+ if (maxp > 1024) {
+ ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
goto done;
}
@@ -809,8 +808,7 @@ qh_make (
unsigned tmp;
qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
- is_input, 0,
- hb_mult(maxp) * max_packet(maxp)));
+ is_input, 0, mult * maxp));
qh->ps.phase = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
@@ -854,7 +852,7 @@ qh_make (
think_time = tt ? tt->think_time : 0;
qh->ps.tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time (urb->dev->speed,
- is_input, 0, max_packet (maxp)));
+ is_input, 0, maxp));
if (urb->interval > ehci->periodic_size)
urb->interval = ehci->periodic_size;
qh->ps.period = urb->interval;
@@ -925,11 +923,11 @@ qh_make (
* to help them do so. So now people expect to use
* such nonconformant devices with Linux too; sigh.
*/
- info1 |= max_packet(maxp) << 16;
+ info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
- info1 |= max_packet (maxp) << 16;
- info2 |= hb_mult (maxp) << 30;
+ info1 |= maxp << 16;
+ info2 |= mult << 30;
}
break;
default:
@@ -1221,7 +1219,7 @@ static int submit_single_step_set_feature(
token |= (1 /* "in" */ << 8); /*This is IN stage*/
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0);
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1dfe54f14737..980a6b3b2da2 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1064,11 +1064,10 @@ iso_stream_init(
/* knows about ITD vs SITD */
if (dev->speed == USB_SPEED_HIGH) {
- unsigned multi = hb_mult(maxp);
+ unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
stream->highspeed = 1;
- maxp = max_packet(maxp);
buf1 |= maxp;
maxp *= multi;
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index e42a29e8e229..63b9d0c67963 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -33,8 +33,7 @@ static const char hcd_name[] = "ehci-w90x900 ";
static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
-static int usb_w90x900_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int ehci_w90x900_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
@@ -42,7 +41,8 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
int retval = 0, irq;
unsigned long val;
- hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI");
+ hcd = usb_create_hcd(&ehci_w90x900_hc_driver,
+ &pdev->dev, "w90x900 EHCI");
if (!hcd) {
retval = -ENOMEM;
goto err1;
@@ -63,9 +63,9 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
/* enable PHY 0,1,the regs only apply to w90p910
- * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
- * w90p910 IC relative to ehci->regs.
- */
+ * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
+ * w90p910 IC relative to ehci->regs.
+ */
val = __raw_readl(ehci->regs+PHY0_CTR);
val |= ENPHY;
__raw_writel(val, ehci->regs+PHY0_CTR);
@@ -92,26 +92,12 @@ err1:
return retval;
}
-static void usb_w90x900_remove(struct usb_hcd *hcd,
- struct platform_device *pdev)
-{
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
-}
-
-static int ehci_w90x900_probe(struct platform_device *pdev)
-{
- if (usb_disabled())
- return -ENODEV;
-
- return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev);
-}
-
static int ehci_w90x900_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- usb_w90x900_remove(hcd, pdev);
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
return 0;
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 3f3b74aeca97..a8e36170d8b8 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -219,6 +219,7 @@ struct ehci_hcd { /* one per controller */
unsigned no_selective_suspend:1;
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */
+ unsigned has_fsl_susp_errata:1; /* NXP SUSP quirk */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned big_endian_capbase:1;
@@ -710,6 +711,13 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
#endif
/*
+ * Some Freescale/NXP processors have an erratum (USB A-005697)
+ * in which we need to wait for 10ms for bus to enter suspend mode
+ * after setting SUSP bit.
+ */
+#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata)
+
+/*
* While most USB host controllers implement their registers in
* little-endian format, a minority (celleb companion chip) implement
* them in big endian format.
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index f07ccb25bc24..e90ddb530765 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -226,6 +226,8 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
of_property_read_bool(np, "fsl,usb-erratum-a007792");
pdata->has_fsl_erratum_a005275 =
of_property_read_bool(np, "fsl,usb-erratum-a005275");
+ pdata->has_fsl_erratum_a005697 =
+ of_property_read_bool(np, "fsl,usb_erratum-a005697");
/*
* Determine whether phy_clk_valid needs to be checked
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 6cf82ee460a6..0f2b4b358e1a 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -147,7 +147,7 @@ static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362
if (epq)
DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
else
- pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
+ pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
return epq;
}
@@ -157,8 +157,9 @@ static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
int offset;
if (index * epq->blk_size > epq->buf_size) {
- pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
- epq->buf_size / epq->blk_size);
+ pr_warn("%s: Bad %s index %d(%d)\n",
+ __func__, epq->name, index,
+ epq->buf_size / epq->blk_size);
return -EINVAL;
}
offset = epq->buf_start + index * epq->blk_size;
@@ -902,8 +903,8 @@ static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
ptd_offset = next_ptd(epq, ep);
if (ptd_offset < 0) {
- pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
- ep->num_req, epq->name);
+ pr_warn("%s: req %d No more %s PTD buffers available\n",
+ __func__, ep->num_req, epq->name);
break;
}
}
@@ -973,8 +974,8 @@ static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done
break;
}
if (done_map)
- pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
- epq->skip_map);
+ pr_warn("%s: done_map not clear: %08lx:%08lx\n",
+ __func__, done_map, epq->skip_map);
atomic_dec(&epq->finishing);
}
@@ -1433,7 +1434,7 @@ static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
} else
DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
} else {
- pr_warning("%s: No EP in URB %p\n", __func__, urb);
+ pr_warn("%s: No EP in URB %p\n", __func__, urb);
retval = -EINVAL;
}
done:
@@ -1748,10 +1749,10 @@ static int isp1362_bus_suspend(struct usb_hcd *hcd)
/* FALL THROUGH */
case OHCI_USB_RESET:
status = -EBUSY;
- pr_warning("%s: needs reinit!\n", __func__);
+ pr_warn("%s: needs reinit!\n", __func__);
goto done;
case OHCI_USB_SUSPEND:
- pr_warning("%s: already suspended?\n", __func__);
+ pr_warn("%s: already suspended?\n", __func__);
goto done;
}
DBG(0, "%s: suspend root hub\n", __func__);
@@ -1839,7 +1840,7 @@ static int isp1362_bus_resume(struct usb_hcd *hcd)
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
if (hcd->state == HC_STATE_RESUMING) {
- pr_warning("%s: duplicate resume\n", __func__);
+ pr_warn("%s: duplicate resume\n", __func__);
status = 0;
} else
switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
@@ -2474,8 +2475,8 @@ static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
__func__, offset);
break;
}
- pr_warning("%s: memory check with offset %02x ok after second read\n",
- __func__, offset);
+ pr_warn("%s: memory check with offset %02x ok after second read\n",
+ __func__, offset);
}
}
kfree(ref);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index b38a228134df..be9e63836881 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -14,8 +14,8 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/atmel.h>
#include <linux/io.h>
@@ -39,8 +39,8 @@
#define AT91_MAX_USBH_PORTS 3
struct at91_usbh_data {
- int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
- int overcurrent_pin[AT91_MAX_USBH_PORTS];
+ struct gpio_desc *vbus_pin[AT91_MAX_USBH_PORTS];
+ struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
u8 ports; /* number of ports on root hub */
u8 overcurrent_supported;
u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
@@ -68,8 +68,6 @@ static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst =
.extra_priv_size = sizeof(struct ohci_at91_priv),
};
-extern int usb_disabled(void);
-
/*-------------------------------------------------------------------------*/
static void at91_start_clock(struct ohci_at91_priv *ohci_at91)
@@ -268,11 +266,8 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (!valid_port(port))
return;
- if (!gpio_is_valid(pdata->vbus_pin[port]))
- return;
-
- gpio_set_value(pdata->vbus_pin[port],
- pdata->vbus_pin_active_low[port] ^ enable);
+ gpiod_set_value(pdata->vbus_pin[port],
+ pdata->vbus_pin_active_low[port] ^ enable);
}
static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -280,11 +275,8 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (!valid_port(port))
return -EINVAL;
- if (!gpio_is_valid(pdata->vbus_pin[port]))
- return -EINVAL;
-
- return gpio_get_value(pdata->vbus_pin[port]) ^
- pdata->vbus_pin_active_low[port];
+ return gpiod_get_value(pdata->vbus_pin[port]) ^
+ pdata->vbus_pin_active_low[port];
}
/*
@@ -474,16 +466,13 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
{
struct platform_device *pdev = data;
struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
- int val, gpio, port;
+ int val, port;
/* From the GPIO notifying the over-current situation, find
* out the corresponding port */
at91_for_each_port(port) {
- if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
- gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
- gpio = pdata->overcurrent_pin[port];
+ if (gpiod_to_irq(pdata->overcurrent_pin[port]) == irq)
break;
- }
}
if (port == AT91_MAX_USBH_PORTS) {
@@ -491,7 +480,7 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
return IRQ_HANDLED;
}
- val = gpio_get_value(gpio);
+ val = gpiod_get_value(pdata->overcurrent_pin[port]);
/* When notified of an over-current situation, disable power
on the corresponding port, and mark this port in
@@ -522,9 +511,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct at91_usbh_data *pdata;
int i;
- int gpio;
int ret;
- enum of_gpio_flags flags;
+ int err;
u32 ports;
/* Right now device-tree probed devices don't get dma_mask set.
@@ -545,38 +533,16 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
pdata->ports = ports;
at91_for_each_port(i) {
- /*
- * do not configure PIO if not in relation with
- * real USB port on board
- */
- if (i >= pdata->ports) {
- pdata->vbus_pin[i] = -EINVAL;
- pdata->overcurrent_pin[i] = -EINVAL;
+ pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev,
+ "atmel,vbus-gpio",
+ GPIOD_IN);
+ if (IS_ERR(pdata->vbus_pin[i])) {
+ err = PTR_ERR(pdata->vbus_pin[i]);
+ dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
continue;
}
- gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i,
- &flags);
- pdata->vbus_pin[i] = gpio;
- if (!gpio_is_valid(gpio))
- continue;
- pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
-
- ret = gpio_request(gpio, "ohci_vbus");
- if (ret) {
- dev_err(&pdev->dev,
- "can't request vbus gpio %d\n", gpio);
- continue;
- }
- ret = gpio_direction_output(gpio,
- !pdata->vbus_pin_active_low[i]);
- if (ret) {
- dev_err(&pdev->dev,
- "can't put vbus gpio %d as output %d\n",
- gpio, !pdata->vbus_pin_active_low[i]);
- gpio_free(gpio);
- continue;
- }
+ pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]);
ohci_at91_usb_set_power(pdata, i, 1);
}
@@ -586,37 +552,21 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
break;
pdata->overcurrent_pin[i] =
- of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
-
- if (!gpio_is_valid(pdata->overcurrent_pin[i]))
- continue;
- gpio = pdata->overcurrent_pin[i];
-
- ret = gpio_request(gpio, "ohci_overcurrent");
- if (ret) {
- dev_err(&pdev->dev,
- "can't request overcurrent gpio %d\n",
- gpio);
+ devm_gpiod_get_optional(&pdev->dev,
+ "atmel,oc-gpio", GPIOD_IN);
+ if (IS_ERR(pdata->overcurrent_pin[i])) {
+ err = PTR_ERR(pdata->overcurrent_pin[i]);
+ dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
continue;
}
- ret = gpio_direction_input(gpio);
- if (ret) {
- dev_err(&pdev->dev,
- "can't configure overcurrent gpio %d as input\n",
- gpio);
- gpio_free(gpio);
- continue;
- }
-
- ret = request_irq(gpio_to_irq(gpio),
- ohci_hcd_at91_overcurrent_irq,
- IRQF_SHARED, "ohci_overcurrent", pdev);
- if (ret) {
- gpio_free(gpio);
- dev_err(&pdev->dev,
- "can't get gpio IRQ for overcurrent\n");
- }
+ ret = devm_request_irq(&pdev->dev,
+ gpiod_to_irq(pdata->overcurrent_pin[i]),
+ ohci_hcd_at91_overcurrent_irq,
+ IRQF_SHARED,
+ "ohci_overcurrent", pdev);
+ if (ret)
+ dev_info(&pdev->dev, "failed to request gpio \"overcurrent\" IRQ\n");
}
device_init_wakeup(&pdev->dev, 1);
@@ -629,19 +579,8 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
int i;
if (pdata) {
- at91_for_each_port(i) {
- if (!gpio_is_valid(pdata->vbus_pin[i]))
- continue;
+ at91_for_each_port(i)
ohci_at91_usb_set_power(pdata, i, 0);
- gpio_free(pdata->vbus_pin[i]);
- }
-
- at91_for_each_port(i) {
- if (!gpio_is_valid(pdata->overcurrent_pin[i]))
- continue;
- free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
- gpio_free(pdata->overcurrent_pin[i]);
- }
}
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index e5c33bc98ea4..05da2cb59612 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -11,62 +11,192 @@
* kind, whether express or implied.
*/
+#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-#include <mach/da8xx.h>
+#include <linux/phy/phy.h>
#include <linux/platform_data/usb-davinci.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <asm/unaligned.h>
-#ifndef CONFIG_ARCH_DAVINCI_DA8XX
-#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
-#endif
+#include "ohci.h"
-#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
+#define DRIVER_DESC "DA8XX"
+#define DRV_NAME "ohci-da8xx"
-static struct clk *usb11_clk;
-static struct clk *usb20_clk;
+static struct hc_driver __read_mostly ohci_da8xx_hc_driver;
+
+static int (*orig_ohci_hub_control)(struct usb_hcd *hcd, u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength);
+static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
+
+struct da8xx_ohci_hcd {
+ struct usb_hcd *hcd;
+ struct clk *usb11_clk;
+ struct phy *usb11_phy;
+ struct regulator *vbus_reg;
+ struct notifier_block nb;
+ unsigned int reg_enabled;
+};
+
+#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
/* Over-current indicator change bitmask */
static volatile u16 ocic_mask;
-static void ohci_da8xx_clock(int on)
+static int ohci_da8xx_enable(struct usb_hcd *hcd)
{
- u32 cfgchip2;
-
- cfgchip2 = __raw_readl(CFGCHIP2);
- if (on) {
- clk_enable(usb11_clk);
-
- /*
- * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
- * need to enable the USB 2.0 module clocking, start its PHY,
- * and not allow it to stop the clock during USB 2.0 suspend.
- */
- if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
- clk_enable(usb20_clk);
-
- cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
- cfgchip2 |= CFGCHIP2_PHY_PLLON;
- __raw_writel(cfgchip2, CFGCHIP2);
-
- pr_info("Waiting for USB PHY clock good...\n");
- while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
- cpu_relax();
- }
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ int ret;
+
+ ret = clk_prepare_enable(da8xx_ohci->usb11_clk);
+ if (ret)
+ return ret;
+
+ ret = phy_init(da8xx_ohci->usb11_phy);
+ if (ret)
+ goto err_phy_init;
+
+ ret = phy_power_on(da8xx_ohci->usb11_phy);
+ if (ret)
+ goto err_phy_power_on;
+
+ return 0;
+
+err_phy_power_on:
+ phy_exit(da8xx_ohci->usb11_phy);
+err_phy_init:
+ clk_disable_unprepare(da8xx_ohci->usb11_clk);
+
+ return ret;
+}
+
+static void ohci_da8xx_disable(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+
+ phy_power_off(da8xx_ohci->usb11_phy);
+ phy_exit(da8xx_ohci->usb11_phy);
+ clk_disable_unprepare(da8xx_ohci->usb11_clk);
+}
- /* Enable USB 1.1 PHY */
- cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
- } else {
- clk_disable(usb11_clk);
- if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
- clk_disable(usb20_clk);
+static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ int ret;
+
+ if (hub && hub->set_power)
+ return hub->set_power(1, on);
+
+ if (!da8xx_ohci->vbus_reg)
+ return 0;
- /* Disable USB 1.1 PHY */
- cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
+ if (on && !da8xx_ohci->reg_enabled) {
+ ret = regulator_enable(da8xx_ohci->vbus_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ da8xx_ohci->reg_enabled = 1;
+
+ } else if (!on && da8xx_ohci->reg_enabled) {
+ ret = regulator_disable(da8xx_ohci->vbus_reg);
+ if (ret) {
+ dev_err(dev, "Failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ da8xx_ohci->reg_enabled = 0;
}
- __raw_writel(cfgchip2, CFGCHIP2);
+
+ return 0;
+}
+
+static int ohci_da8xx_get_power(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->get_power)
+ return hub->get_power(1);
+
+ if (da8xx_ohci->vbus_reg)
+ return regulator_is_enabled(da8xx_ohci->vbus_reg);
+
+ return 1;
+}
+
+static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ unsigned int flags;
+ int ret;
+
+ if (hub && hub->get_oci)
+ return hub->get_oci(1);
+
+ if (!da8xx_ohci->vbus_reg)
+ return 0;
+
+ ret = regulator_get_error_flags(da8xx_ohci->vbus_reg, &flags);
+ if (ret)
+ return ret;
+
+ if (flags & REGULATOR_ERROR_OVER_CURRENT)
+ return 1;
+
+ return 0;
+}
+
+static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->set_power)
+ return 1;
+
+ if (da8xx_ohci->vbus_reg)
+ return 1;
+
+ return 0;
+}
+
+static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->get_oci)
+ return 1;
+
+ if (da8xx_ohci->vbus_reg)
+ return 1;
+
+ return 0;
+}
+
+static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->potpgt)
+ return 1;
+
+ return 0;
}
/*
@@ -82,7 +212,51 @@ static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
hub->set_power(port, 0);
}
-static int ohci_da8xx_init(struct usb_hcd *hcd)
+static int ohci_da8xx_regulator_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci =
+ container_of(nb, struct da8xx_ohci_hcd, nb);
+
+ if (event & REGULATOR_EVENT_OVER_CURRENT) {
+ ocic_mask |= 1 << 1;
+ ohci_da8xx_set_power(da8xx_ohci->hcd, 0);
+ }
+
+ return 0;
+}
+
+static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ int ret = 0;
+
+ if (hub && hub->ocic_notify) {
+ ret = hub->ocic_notify(ohci_da8xx_ocic_handler);
+ } else if (da8xx_ohci->vbus_reg) {
+ da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event;
+ ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg,
+ &da8xx_ohci->nb);
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to register notifier: %d\n", ret);
+
+ return ret;
+}
+
+static void ohci_da8xx_unregister_notify(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+
+ if (hub && hub->ocic_notify)
+ hub->ocic_notify(NULL);
+}
+
+static int ohci_da8xx_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
@@ -92,7 +266,9 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
dev_dbg(dev, "starting USB controller\n");
- ohci_da8xx_clock(1);
+ result = ohci_da8xx_enable(hcd);
+ if (result < 0)
+ return result;
/*
* DA8xx only have 1 port connected to the pins but the HC root hub
@@ -100,9 +276,11 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
*/
ohci->num_ports = 1;
- result = ohci_init(ohci);
- if (result < 0)
+ result = ohci_setup(hcd);
+ if (result < 0) {
+ ohci_da8xx_disable(hcd);
return result;
+ }
/*
* Since we're providing a board-specific root hub port power control
@@ -111,45 +289,29 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
* the correct hub descriptor...
*/
rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
- if (hub->set_power) {
+ if (ohci_da8xx_has_set_power(hcd)) {
rh_a &= ~RH_A_NPS;
rh_a |= RH_A_PSM;
}
- if (hub->get_oci) {
+ if (ohci_da8xx_has_oci(hcd)) {
rh_a &= ~RH_A_NOCP;
rh_a |= RH_A_OCPM;
}
- rh_a &= ~RH_A_POTPGT;
- rh_a |= hub->potpgt << 24;
+ if (ohci_da8xx_has_potpgt(hcd)) {
+ rh_a &= ~RH_A_POTPGT;
+ rh_a |= hub->potpgt << 24;
+ }
ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
return result;
}
-static void ohci_da8xx_stop(struct usb_hcd *hcd)
-{
- ohci_stop(hcd);
- ohci_da8xx_clock(0);
-}
-
-static int ohci_da8xx_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int result;
-
- result = ohci_run(ohci);
- if (result < 0)
- ohci_da8xx_stop(hcd);
-
- return result;
-}
-
/*
* Update the status data from the hub with the over-current indicator change.
*/
static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- int length = ohci_hub_status_data(hcd, buf);
+ int length = orig_ohci_hub_status_data(hcd, buf);
/* See if we have OCIC bit set on port 1 */
if (ocic_mask & (1 << 1)) {
@@ -171,7 +333,6 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
int temp;
switch (typeReq) {
@@ -185,11 +346,11 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
/* The port power status (PPS) bit defaults to 1 */
- if (hub->get_power && hub->get_power(wIndex) == 0)
+ if (!ohci_da8xx_get_power(hcd))
temp &= ~RH_PS_PPS;
/* The port over-current indicator (POCI) bit is always 0 */
- if (hub->get_oci && hub->get_oci(wIndex) > 0)
+ if (ohci_da8xx_get_oci(hcd) > 0)
temp |= RH_PS_POCI;
/* The over-current indicator change (OCIC) bit is 0 too */
@@ -214,10 +375,7 @@ check_port:
dev_dbg(dev, "%sPortFeature(%u): %s\n",
temp ? "Set" : "Clear", wIndex, "POWER");
- if (!hub->set_power)
- return -EPIPE;
-
- return hub->set_power(wIndex, temp) ? -EPIPE : 0;
+ return ohci_da8xx_set_power(hcd, temp) ? -EPIPE : 0;
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(dev, "%sPortFeature(%u): %s\n",
temp ? "Set" : "Clear", wIndex,
@@ -231,86 +389,61 @@ check_port:
}
}
- return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ return orig_ohci_hub_control(hcd, typeReq, wValue,
+ wIndex, buf, wLength);
}
-static const struct hc_driver ohci_da8xx_hc_driver = {
- .description = hcd_name,
- .product_desc = "DA8xx OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .reset = ohci_da8xx_init,
- .start = ohci_da8xx_start,
- .stop = ohci_da8xx_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_da8xx_hub_status_data,
- .hub_control = ohci_da8xx_hub_control,
-
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_ohci_ids[] = {
+ { .compatible = "ti,da830-ohci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da8xx_ohci_ids);
+#endif
-
-/**
- * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- */
-static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int ohci_da8xx_probe(struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
+ struct da8xx_ohci_hcd *da8xx_ohci;
struct usb_hcd *hcd;
struct resource *mem;
int error, irq;
+ hcd = usb_create_hcd(&ohci_da8xx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
- if (hub == NULL)
- return -ENODEV;
+ da8xx_ohci = to_da8xx_ohci(hcd);
+ da8xx_ohci->hcd = hcd;
- usb11_clk = devm_clk_get(&pdev->dev, "usb11");
- if (IS_ERR(usb11_clk))
- return PTR_ERR(usb11_clk);
+ da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, "usb11");
+ if (IS_ERR(da8xx_ohci->usb11_clk)) {
+ error = PTR_ERR(da8xx_ohci->usb11_clk);
+ if (error != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get clock.\n");
+ goto err;
+ }
- usb20_clk = devm_clk_get(&pdev->dev, "usb20");
- if (IS_ERR(usb20_clk))
- return PTR_ERR(usb20_clk);
+ da8xx_ohci->usb11_phy = devm_phy_get(&pdev->dev, "usb-phy");
+ if (IS_ERR(da8xx_ohci->usb11_phy)) {
+ error = PTR_ERR(da8xx_ohci->usb11_phy);
+ if (error != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get phy.\n");
+ goto err;
+ }
- hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd)
- return -ENOMEM;
+ da8xx_ohci->vbus_reg = devm_regulator_get_optional(&pdev->dev, "vbus");
+ if (IS_ERR(da8xx_ohci->vbus_reg)) {
+ error = PTR_ERR(da8xx_ohci->vbus_reg);
+ if (error == -ENODEV) {
+ da8xx_ohci->vbus_reg = NULL;
+ } else if (error == -EPROBE_DEFER) {
+ goto err;
+ } else {
+ dev_err(&pdev->dev, "Failed to get regulator\n");
+ goto err;
+ }
+ }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -321,60 +454,38 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
- ohci_hcd_init(hcd_to_ohci(hcd));
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
error = -ENODEV;
goto err;
}
+
error = usb_add_hcd(hcd, irq, 0);
if (error)
goto err;
device_wakeup_enable(hcd->self.controller);
- if (hub->ocic_notify) {
- error = hub->ocic_notify(ohci_da8xx_ocic_handler);
- if (!error)
- return 0;
- }
+ error = ohci_da8xx_register_notify(hcd);
+ if (error)
+ goto err_remove_hcd;
+
+ return 0;
+err_remove_hcd:
usb_remove_hcd(hcd);
err:
usb_put_hcd(hcd);
return error;
}
-/**
- * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
- * @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- */
-static inline void
-usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_da8xx_remove(struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
- hub->ocic_notify(NULL);
+ ohci_da8xx_unregister_notify(hcd);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
-}
-
-static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
-{
- return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
-}
-
-static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
- usb_hcd_da8xx_remove(hcd, dev);
return 0;
}
@@ -397,7 +508,7 @@ static int ohci_da8xx_suspend(struct platform_device *pdev,
if (ret)
return ret;
- ohci_da8xx_clock(0);
+ ohci_da8xx_disable(hcd);
hcd->state = HC_STATE_SUSPENDED;
return ret;
@@ -407,32 +518,77 @@ static int ohci_da8xx_resume(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
- ohci_da8xx_clock(1);
- dev->dev.power.power_state = PMSG_ON;
- usb_hcd_resume_root_hub(hcd);
+ ret = ohci_da8xx_enable(hcd);
+ if (ret)
+ return ret;
+
+ ohci_resume(hcd, false);
+
return 0;
}
#endif
+static const struct ohci_driver_overrides da8xx_overrides __initconst = {
+ .reset = ohci_da8xx_reset,
+ .extra_priv_size = sizeof(struct da8xx_ohci_hcd),
+};
+
/*
* Driver definition to register with platform structure.
*/
static struct platform_driver ohci_hcd_da8xx_driver = {
- .probe = ohci_hcd_da8xx_drv_probe,
- .remove = ohci_hcd_da8xx_drv_remove,
+ .probe = ohci_da8xx_probe,
+ .remove = ohci_da8xx_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_da8xx_suspend,
.resume = ohci_da8xx_resume,
#endif
.driver = {
- .name = "ohci",
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(da8xx_ohci_ids),
},
};
-MODULE_ALIAS("platform:ohci");
+static int __init ohci_da8xx_init(void)
+{
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", DRV_NAME);
+ ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides);
+
+ /*
+ * The Davinci da8xx HW has some unusual quirks, which require
+ * da8xx-specific workarounds. We override certain hc_driver
+ * functions here to achieve that. We explicitly do not enhance
+ * ohci_driver_overrides to allow this more easily, since this
+ * is an unusual case, and we don't want to encourage others to
+ * override these functions by making it too easy.
+ */
+
+ orig_ohci_hub_control = ohci_da8xx_hc_driver.hub_control;
+ orig_ohci_hub_status_data = ohci_da8xx_hc_driver.hub_status_data;
+
+ ohci_da8xx_hc_driver.hub_status_data = ohci_da8xx_hub_status_data;
+ ohci_da8xx_hc_driver.hub_control = ohci_da8xx_hub_control;
+
+ return platform_driver_register(&ohci_hcd_da8xx_driver);
+}
+module_init(ohci_da8xx_init);
+
+static void __exit ohci_da8xx_exit(void)
+{
+ platform_driver_unregister(&ohci_hcd_da8xx_driver);
+}
+module_exit(ohci_da8xx_exit);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 86612ac3fda2..8685cf3e6292 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1219,11 +1219,6 @@ MODULE_LICENSE ("GPL");
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
-#ifdef CONFIG_USB_OHCI_HCD_DAVINCI
-#include "ohci-da8xx.c"
-#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver
-#endif
-
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
#include "ohci-ppc-of.c"
#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
@@ -1303,19 +1298,9 @@ static int __init ohci_hcd_mod_init(void)
goto error_tmio;
#endif
-#ifdef DAVINCI_PLATFORM_DRIVER
- retval = platform_driver_register(&DAVINCI_PLATFORM_DRIVER);
- if (retval < 0)
- goto error_davinci;
-#endif
-
return retval;
/* Error path */
-#ifdef DAVINCI_PLATFORM_DRIVER
- platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
- error_davinci:
-#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
error_tmio:
@@ -1351,9 +1336,6 @@ module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
-#ifdef DAVINCI_PLATFORM_DRIVER
- platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
-#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
#endif
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index c9e315c6808a..ed8a762b8670 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -88,10 +88,9 @@ td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
dma_addr_t dma;
struct td *td;
- td = dma_pool_alloc (hc->td_cache, mem_flags, &dma);
+ td = dma_pool_zalloc (hc->td_cache, mem_flags, &dma);
if (td) {
/* in case hc fetches it, make it look dead */
- memset (td, 0, sizeof *td);
td->hwNextTD = cpu_to_hc32 (hc, dma);
td->td_dma = dma;
/* hashed in td_fill */
@@ -122,9 +121,8 @@ ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
dma_addr_t dma;
struct ed *ed;
- ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma);
+ ed = dma_pool_zalloc (hc->ed_cache, mem_flags, &dma);
if (ed) {
- memset (ed, 0, sizeof (*ed));
INIT_LIST_HEAD (&ed->td_list);
ed->dma = dma;
}
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index b7d4756232ae..6df8e2ed40fd 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -56,8 +56,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver;
static struct i2c_client *isp1301_i2c_client;
-extern int usb_disabled(void);
-
static struct clk *usb_host_clk;
static void isp1301_configure_lpc32xx(void)
@@ -127,6 +125,7 @@ static inline void isp1301_vbus_off(void)
static void ohci_nxp_start_hc(void)
{
unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
+
__raw_writel(tmp, USB_OTG_STAT_CONTROL);
isp1301_vbus_on();
}
@@ -134,6 +133,7 @@ static void ohci_nxp_start_hc(void)
static void ohci_nxp_stop_hc(void)
{
unsigned long tmp;
+
isp1301_vbus_off();
tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN;
__raw_writel(tmp, USB_OTG_STAT_CONTROL);
@@ -155,9 +155,8 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
- if (!isp1301_i2c_client) {
+ if (!isp1301_i2c_client)
return -EPROBE_DEFER;
- }
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 495c1454b9e8..b08e385399b9 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -68,9 +68,6 @@ static inline int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
#endif
-extern int usb_disabled(void);
-extern int ocpi_enable(void);
-
static struct clk *usb_host_ck;
static struct clk *usb_dc_ck;
@@ -296,15 +293,14 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
/**
- * usb_hcd_omap_probe - initialize OMAP-based HCDs
+ * ohci_hcd_omap_probe - initialize OMAP-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
-static int usb_hcd_omap_probe (const struct hc_driver *driver,
- struct platform_device *pdev)
+static int ohci_hcd_omap_probe(struct platform_device *pdev)
{
int retval, irq;
struct usb_hcd *hcd = 0;
@@ -336,7 +332,8 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
}
- hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev));
+ hcd = usb_create_hcd(&ohci_omap_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err0;
@@ -384,17 +381,18 @@ err0:
/* may be called with controller, bus, and devices active */
/**
- * usb_hcd_omap_remove - shutdown processing for OMAP-based HCDs
+ * ohci_hcd_omap_remove - shutdown processing for OMAP-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
- * Reverses the effect of usb_hcd_omap_probe(), first invoking
+ * Reverses the effect of ohci_hcd_omap_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
-static inline void
-usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_hcd_omap_remove(struct platform_device *pdev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
dev_dbg(hcd->self.controller, "stopping USB Controller\n");
usb_remove_hcd(hcd);
omap_ohci_clock_power(0);
@@ -409,21 +407,6 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
usb_put_hcd(hcd);
clk_put(usb_dc_ck);
clk_put(usb_host_ck);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_omap_drv_probe(struct platform_device *dev)
-{
- return usb_hcd_omap_probe(&ohci_omap_hc_driver, dev);
-}
-
-static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
-
- usb_hcd_omap_remove(hcd, dev);
-
return 0;
}
@@ -472,8 +455,8 @@ static int ohci_omap_resume(struct platform_device *dev)
* Driver definition to register with the OMAP bus
*/
static struct platform_driver ohci_hcd_omap_driver = {
- .probe = ohci_hcd_omap_drv_probe,
- .remove = ohci_hcd_omap_drv_remove,
+ .probe = ohci_hcd_omap_probe,
+ .remove = ohci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_omap_suspend,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index a667cf2d5788..79efde8f21e0 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -404,7 +404,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
/**
- * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs
+ * ohci_hcd_pxa27x_probe - initialize pxa27x-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
@@ -412,7 +412,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
* through the hotplug entry's driver_data.
*
*/
-int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev)
+static int ohci_hcd_pxa27x_probe(struct platform_device *pdev)
{
int retval, irq;
struct usb_hcd *hcd;
@@ -442,7 +442,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
if (IS_ERR(usb_clk))
return PTR_ERR(usb_clk);
- hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
+ hcd = usb_create_hcd(&ohci_pxa27x_hc_driver, &pdev->dev, "pxa27x");
if (!hcd)
return -ENOMEM;
@@ -503,17 +503,18 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
/* may be called with controller, bus, and devices active */
/**
- * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
+ * ohci_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
- * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking
+ * Reverses the effect of ohci_hcd_pxa27x_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*
*/
-void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_hcd_pxa27x_remove(struct platform_device *pdev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
unsigned int i;
@@ -524,28 +525,11 @@ void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
usb_put_hcd(hcd);
+ return 0;
}
/*-------------------------------------------------------------------------*/
-static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev)
-{
- pr_debug ("In ohci_hcd_pxa27x_drv_probe");
-
- if (usb_disabled())
- return -ENODEV;
-
- return usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, pdev);
-}
-
-static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_pxa27x_remove(hcd, pdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
{
@@ -598,8 +582,8 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
#endif
static struct platform_driver ohci_hcd_pxa27x_driver = {
- .probe = ohci_hcd_pxa27x_drv_probe,
- .remove = ohci_hcd_pxa27x_drv_remove,
+ .probe = ohci_hcd_pxa27x_probe,
+ .remove = ohci_hcd_pxa27x_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "pxa27x-ohci",
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 7a1919ca543a..b006b93126f7 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -43,6 +43,8 @@ static const char hcd_name[] = "ohci-s3c2410";
static struct clk *clk;
static struct clk *usb_clk;
+static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
+
/* forward definitions */
static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
@@ -321,26 +323,29 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
/* may be called with controller, bus, and devices active */
/*
- * usb_hcd_s3c2410_remove - shutdown processing for HCD
+ * ohci_hcd_s3c2410_remove - shutdown processing for HCD
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
- * Reverses the effect of usb_hcd_3c2410_probe(), first invoking
+ * Reverses the effect of ohci_hcd_3c2410_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*
*/
-static void
-usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
+static int
+ohci_hcd_s3c2410_remove(struct platform_device *dev)
{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
usb_remove_hcd(hcd);
s3c2410_stop_hc(dev);
usb_put_hcd(hcd);
+ return 0;
}
/**
- * usb_hcd_s3c2410_probe - initialize S3C2410-based HCDs
+ * ohci_hcd_s3c2410_probe - initialize S3C2410-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
@@ -348,8 +353,7 @@ usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
* through the hotplug entry's driver_data.
*
*/
-static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
- struct platform_device *dev)
+static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
@@ -358,7 +362,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
s3c2410_usb_set_power(info, 1, 1);
s3c2410_usb_set_power(info, 2, 1);
- hcd = usb_create_hcd(driver, &dev->dev, "s3c24xx");
+ hcd = usb_create_hcd(&ohci_s3c2410_hc_driver, &dev->dev, "s3c24xx");
if (hcd == NULL)
return -ENOMEM;
@@ -404,21 +408,6 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
/*-------------------------------------------------------------------------*/
-static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
-
-static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
-{
- return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev);
-}
-
-static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_s3c2410_remove(hcd, pdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
{
@@ -457,13 +446,21 @@ static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
.resume = ohci_hcd_s3c2410_drv_resume,
};
+static const struct of_device_id ohci_hcd_s3c2410_dt_ids[] = {
+ { .compatible = "samsung,s3c2410-ohci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids);
+
static struct platform_driver ohci_hcd_s3c2410_driver = {
- .probe = ohci_hcd_s3c2410_drv_probe,
- .remove = ohci_hcd_s3c2410_drv_remove,
+ .probe = ohci_hcd_s3c2410_probe,
+ .remove = ohci_hcd_s3c2410_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "s3c2410-ohci",
.pm = &ohci_hcd_s3c2410_pm_ops,
+ .of_match_table = ohci_hcd_s3c2410_dt_ids,
},
};
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 940304c33224..02260cfdedb1 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
uhci->wait_for_hp = 1;
+ /* Intel controllers use non-PME wakeup signalling */
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
+ device_set_run_wake(uhci_dev(uhci), 1);
+
/* Set up pointers to PCI-specific functions */
uhci->reset_hc = uhci_pci_reset_hc;
uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6afe32381209..321de2e0161b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1032,7 +1032,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
goto fail;
dev->num_rings_cached = 0;
- init_completion(&dev->cmd_completion);
dev->udev = udev;
/* Point to output device context in dcbaa. */
@@ -1370,7 +1369,7 @@ static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
if (udev->speed == USB_SPEED_HIGH &&
(usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)))
- return (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
+ return usb_endpoint_maxp_mult(&ep->desc) - 1;
return 0;
}
@@ -1415,10 +1414,10 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev,
else if (udev->speed >= USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
- max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
+ max_packet = usb_endpoint_maxp(&ep->desc);
+ max_burst = usb_endpoint_maxp_mult(&ep->desc);
/* A 0 in max burst means 1 transfer per ESIT */
- return max_packet * (max_burst + 1);
+ return max_packet * max_burst;
}
/* Set up an endpoint with one ring segment. Do not allocate stream rings.
@@ -1461,7 +1460,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_esit_payload = xhci_get_max_esit_payload(udev, ep);
interval = xhci_get_endpoint_interval(udev, ep);
mult = xhci_get_endpoint_mult(udev, ep);
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_packet = usb_endpoint_maxp(&ep->desc);
max_burst = xhci_get_endpoint_max_burst(udev, ep);
avg_trb_len = max_esit_payload;
@@ -2384,7 +2383,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- GFP_KERNEL);
+ flags);
if (!xhci->dcbaa)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
@@ -2480,7 +2479,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->erst.entries = dma_alloc_coherent(dev,
sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
- GFP_KERNEL);
+ flags);
if (!xhci->erst.entries)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2536,7 +2535,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* something other than the default (~1ms minimum between interrupts).
* See section 5.5.1.2.
*/
- init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = NULL;
for (i = 0; i < USB_MAXCHILDREN; ++i) {
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 73f763c4f5f5..6e7ddf6cafae 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -337,7 +337,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
- GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
+ usb_endpoint_maxp(&ep->desc),
usb_endpoint_dir_in(&ep->desc), ep);
if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
@@ -403,7 +403,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
- GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
+ usb_endpoint_maxp(&ep->desc),
usb_endpoint_dir_in(&ep->desc), ep);
if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 79959f17c38c..1094ebd2838f 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -94,6 +94,9 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
int ret;
int i;
+ if (!mtk->has_ippc)
+ return 0;
+
/* power on host ip */
value = readl(&ippc->ip_pw_ctr1);
value &= ~CTRL1_IP_HOST_PDN;
@@ -139,6 +142,9 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
int ret;
int i;
+ if (!mtk->has_ippc)
+ return 0;
+
/* power down all u3 ports */
for (i = 0; i < mtk->num_u3_ports; i++) {
value = readl(&ippc->u3_ctrl_p[i]);
@@ -173,6 +179,9 @@ static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value;
+ if (!mtk->has_ippc)
+ return 0;
+
/* reset whole ip */
value = readl(&ippc->ip_pw_ctr0);
value |= CTRL0_IP_SW_RST;
@@ -475,6 +484,7 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
/* called during probe() after chip reset completes */
static int xhci_mtk_setup(struct usb_hcd *hcd)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
int ret;
@@ -482,12 +492,21 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
ret = xhci_mtk_ssusb_config(mtk);
if (ret)
return ret;
+ }
+
+ ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
+ if (ret)
+ return ret;
+
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ mtk->num_u3_ports = xhci->num_usb3_ports;
+ mtk->num_u2_ports = xhci->num_usb2_ports;
ret = xhci_mtk_sch_init(mtk);
if (ret)
return ret;
}
- return xhci_gen_setup(hcd, xhci_mtk_quirks);
+ return ret;
}
static int xhci_mtk_probe(struct platform_device *pdev)
@@ -586,7 +605,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
mtk->hcd = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, mtk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
hcd->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
@@ -595,11 +614,16 @@ static int xhci_mtk_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mtk->ippc_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(mtk->ippc_regs)) {
- ret = PTR_ERR(mtk->ippc_regs);
- goto put_usb2_hcd;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
+ if (res) { /* ippc register is optional */
+ mtk->ippc_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtk->ippc_regs)) {
+ ret = PTR_ERR(mtk->ippc_regs);
+ goto put_usb2_hcd;
+ }
+ mtk->has_ippc = true;
+ } else {
+ mtk->has_ippc = false;
}
for (phy_num = 0; phy_num < mtk->num_phys; phy_num++) {
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 7da677c79ea8..2845c49efe1b 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -118,6 +118,7 @@ struct xhci_hcd_mtk {
struct usb_hcd *hcd;
struct mu3h_sch_bw_info *sch_array;
struct mu3c_ippc_regs __iomem *ippc_regs;
+ bool has_ippc;
int num_u2_ports;
int num_u3_ports;
struct regulator *vusb33;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ed56bf9ed885..ddfab301e366 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -100,6 +100,12 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
.plat_start = xhci_rcar_start,
};
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_r8a7796 = {
+ .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3,
+ .init_quirk = xhci_rcar_init_quirk,
+ .plat_start = xhci_rcar_start,
+};
+
static const struct of_device_id usb_xhci_of_match[] = {
{
.compatible = "generic-xhci",
@@ -124,6 +130,9 @@ static const struct of_device_id usb_xhci_of_match[] = {
.compatible = "renesas,xhci-r8a7795",
.data = &xhci_plat_renesas_rcar_gen3,
}, {
+ .compatible = "renesas,xhci-r8a7796",
+ .data = &xhci_plat_renesas_rcar_r8a7796,
+ }, {
.compatible = "renesas,rcar-gen2-xhci",
.data = &xhci_plat_renesas_rcar_gen2,
}, {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 0e4535e632ec..d28df386e780 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -19,6 +19,8 @@
#include "xhci-rcar.h"
/*
+* - The V3 firmware is for r8a7796 (with good performance).
+* - The V2 firmware can be used on both r8a7795 (es1.x) and r8a7796.
* - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes
* performance degradation. So, this driver continues to use the V1 if R-Car
* Gen2.
@@ -26,6 +28,7 @@
*/
MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1);
MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2);
+MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
/*** Register Offset ***/
#define RCAR_USB3_INT_ENA 0x224 /* Interrupt Enable */
@@ -92,6 +95,7 @@ static int xhci_rcar_is_gen3(struct device *dev)
struct device_node *node = dev->of_node;
return of_device_is_compatible(node, "renesas,xhci-r8a7795") ||
+ of_device_is_compatible(node, "renesas,xhci-r8a7796") ||
of_device_is_compatible(node, "renesas,rcar-gen3-xhci");
}
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index 2941a25cfe98..d2ffe20401cf 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -13,6 +13,7 @@
#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem"
#define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem"
#if IS_ENABLED(CONFIG_USB_XHCI_RCAR)
void xhci_rcar_start(struct usb_hcd *hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 797137e26549..bdf6b13d9b67 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -89,6 +89,11 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return seg->dma + (segment_offset * sizeof(*trb));
}
+static bool trb_is_noop(union xhci_trb *trb)
+{
+ return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
+}
+
static bool trb_is_link(union xhci_trb *trb)
{
return TRB_TYPE_LINK_LE32(trb->link.control);
@@ -110,6 +115,20 @@ static bool link_trb_toggles_cycle(union xhci_trb *trb)
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
+static bool last_td_in_urb(struct xhci_td *td)
+{
+ struct urb_priv *urb_priv = td->urb->hcpriv;
+
+ return urb_priv->td_cnt == urb_priv->length;
+}
+
+static void inc_td_cnt(struct urb *urb)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+
+ urb_priv->td_cnt++;
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -303,7 +322,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
"maybe the host is dead\n");
del_timer(&xhci->cmd_timer);
xhci->xhc_state |= XHCI_STATE_DYING;
- xhci_quiesce(xhci);
xhci_halt(xhci);
return -ESHUTDOWN;
}
@@ -473,9 +491,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
if (new_deq == cur_td->last_trb)
td_last_trb_found = true;
- if (cycle_found &&
- TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
- new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
+ if (cycle_found && trb_is_link(new_deq) &&
+ link_trb_toggles_cycle(new_deq))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &new_seg, &new_deq);
@@ -511,54 +528,32 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- struct xhci_td *cur_td, bool flip_cycle)
+ struct xhci_td *td, bool flip_cycle)
{
- struct xhci_segment *cur_seg;
- union xhci_trb *cur_trb;
-
- for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
- true;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
- /* Unchain any chained Link TRBs, but
- * leave the pointers intact.
- */
- cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
- /* Flip the cycle bit (link TRBs can't be the first
- * or last TRB).
- */
- if (flip_cycle)
- cur_trb->generic.field[3] ^=
- cpu_to_le32(TRB_CYCLE);
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Cancel (unchain) link TRB");
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Address = %p (0x%llx dma); "
- "in seg %p (0x%llx dma)",
- cur_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
- cur_seg,
- (unsigned long long)cur_seg->dma);
+ struct xhci_segment *seg = td->start_seg;
+ union xhci_trb *trb = td->first_trb;
+
+ while (1) {
+ if (trb_is_link(trb)) {
+ /* unchain chained link TRBs */
+ trb->link.control &= cpu_to_le32(~TRB_CHAIN);
} else {
- cur_trb->generic.field[0] = 0;
- cur_trb->generic.field[1] = 0;
- cur_trb->generic.field[2] = 0;
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */
- cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
- /* Flip the cycle bit except on the first or last TRB */
- if (flip_cycle && cur_trb != cur_td->first_trb &&
- cur_trb != cur_td->last_trb)
- cur_trb->generic.field[3] ^=
- cpu_to_le32(TRB_CYCLE);
- cur_trb->generic.field[3] |= cpu_to_le32(
+ trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "TRB to noop at offset 0x%llx",
- (unsigned long long)
- xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
- if (cur_trb == cur_td->last_trb)
+ /* flip cycle if asked to */
+ if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+ trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+ if (trb == td->last_trb)
break;
+
+ next_trb(xhci, ep_ring, &seg, &trb);
}
}
@@ -574,39 +569,33 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
ep->stop_cmds_pending--;
}
-/* Must be called with xhci->lock held in interrupt context */
+/*
+ * Must be called with xhci->lock held in interrupt context,
+ * releases and re-acquires xhci->lock
+ */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
- struct xhci_td *cur_td, int status)
+ struct xhci_td *cur_td, int status)
{
- struct usb_hcd *hcd;
- struct urb *urb;
- struct urb_priv *urb_priv;
+ struct urb *urb = cur_td->urb;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
- urb = cur_td->urb;
- urb_priv = urb->hcpriv;
- urb_priv->td_cnt++;
- hcd = bus_to_hcd(urb->dev->bus);
-
- /* Only giveback urb when this is the last td in urb */
- if (urb_priv->td_cnt == urb_priv->length) {
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
- if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
- if (xhci->quirks & XHCI_AMD_PLL_FIX)
- usb_amd_quirk_pll_enable();
- }
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_enable();
}
- usb_hcd_unlink_urb_from_ep(hcd, urb);
-
- spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(hcd, urb, status);
- xhci_urb_free_priv(urb_priv);
- spin_lock(&xhci->lock);
}
+ xhci_urb_free_priv(urb_priv);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&xhci->lock);
}
-void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_td *td)
+static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, struct xhci_td *td)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct xhci_segment *seg = td->bounce_seg;
@@ -752,7 +741,9 @@ remove_finished_td:
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (ep_ring && cur_td->bounce_seg)
xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
- xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+ inc_td_cnt(cur_td->urb);
+ if (last_td_in_urb(cur_td))
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -777,7 +768,10 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (cur_td->bounce_seg)
xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
- xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+
+ inc_td_cnt(cur_td->urb);
+ if (last_td_in_urb(cur_td))
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
@@ -814,7 +808,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
cur_td = list_first_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+
+ inc_td_cnt(cur_td->urb);
+ if (last_td_in_urb(cur_td))
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
@@ -1003,8 +1000,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
break;
case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
- ep_state = le32_to_cpu(ep_ctx->ep_info);
- ep_state &= EP_STATE_MASK;
+ ep_state = GET_EP_CTX_STATE(ep_ctx);
slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1096,12 +1092,12 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
}
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
- u32 cmd_comp_code)
+ struct xhci_command *command, u32 cmd_comp_code)
{
if (cmd_comp_code == COMP_SUCCESS)
- xhci->slot_id = slot_id;
+ command->slot_id = slot_id;
else
- xhci->slot_id = 0;
+ command->slot_id = 0;
}
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
@@ -1183,7 +1179,7 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
if (!(xhci->quirks & XHCI_NEC_HOST)) {
- xhci->error_bitmask |= 1 << 6;
+ xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
return;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -1325,14 +1321,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_trb = xhci->cmd_ring->dequeue;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
cmd_trb);
- /* Is the command ring deq ptr out of sync with the deq seg ptr? */
- if (cmd_dequeue_dma == 0) {
- xhci->error_bitmask |= 1 << 4;
- return;
- }
- /* Does the DMA address match our internal dequeue pointer address? */
- if (cmd_dma != (u64) cmd_dequeue_dma) {
- xhci->error_bitmask |= 1 << 5;
+ /*
+ * Check whether the completion event is for our internal kept
+ * command.
+ */
+ if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
+ xhci_warn(xhci,
+ "ERROR mismatched command completion event\n");
return;
}
@@ -1371,7 +1366,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
switch (cmd_type) {
case TRB_ENABLE_SLOT:
- xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
+ xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
break;
case TRB_DISABLE_SLOT:
xhci_handle_cmd_disable_slot(xhci, slot_id);
@@ -1418,7 +1413,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break;
default:
/* Skip over unknown commands on the event ring */
- xhci->error_bitmask |= 1 << 6;
+ xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
break;
}
@@ -1519,10 +1514,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
bool bogus_port_status = false;
/* Port status change events always have a successful completion code */
- if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
- xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
- xhci->error_bitmask |= 1 << 8;
- }
+ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
+ xhci_warn(xhci,
+ "WARN: xHC returned failed port status event\n");
+
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
@@ -1759,7 +1754,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
- struct xhci_td *td, union xhci_trb *event_trb)
+ struct xhci_td *td, union xhci_trb *ep_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
struct xhci_command *command;
@@ -1798,8 +1793,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
- if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
- cpu_to_le32(EP_STATE_HALTED))
+ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
return 1;
return 0;
@@ -1824,7 +1818,7 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
* Return 1 if the urb can be given back.
*/
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status, bool skip)
{
struct xhci_virt_device *xdev;
@@ -1833,7 +1827,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
int ep_index;
struct urb *urb = NULL;
struct xhci_ep_ctx *ep_ctx;
- int ret = 0;
struct urb_priv *urb_priv;
u32 trb_comp_code;
@@ -1866,7 +1859,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* The class driver clears the device side halt later.
*/
xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, event_trb);
+ ep_ring->stream_id, td, ep_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1889,41 +1882,54 @@ td_cleanup:
* unsigned). Play it safe and say we didn't transfer anything.
*/
if (urb->actual_length > urb->transfer_buffer_length) {
- xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
- urb->transfer_buffer_length,
- urb->actual_length);
+ xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
+ urb->transfer_buffer_length, urb->actual_length);
urb->actual_length = 0;
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
+ *status = 0;
}
list_del_init(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
- urb_priv->td_cnt++;
+ inc_td_cnt(urb);
/* Giveback the urb when all the tds are completed */
- if (urb_priv->td_cnt == urb_priv->length) {
- ret = 1;
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
- if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
- if (xhci->quirks & XHCI_AMD_PLL_FIX)
- usb_amd_quirk_pll_enable();
- }
- }
+ if (last_td_in_urb(td)) {
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
+ (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length, *status);
+
+ /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ *status = 0;
+ xhci_giveback_urb_in_irq(xhci, td, *status);
}
+ return 0;
+}
- return ret;
+/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
+static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ union xhci_trb *stop_trb)
+{
+ u32 sum;
+ union xhci_trb *trb = ring->dequeue;
+ struct xhci_segment *seg = ring->deq_seg;
+
+ for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
+ if (!trb_is_noop(trb) && !trb_is_link(trb))
+ sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
+ }
+ return sum;
}
/*
* Process control tds, update urb status and actual_length.
*/
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_virt_device *xdev;
@@ -1932,6 +1938,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ u32 remaining, requested;
+ bool on_data_stage;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
@@ -1939,195 +1947,161 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ requested = td->urb->transfer_buffer_length;
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ /* not setup (dequeue), or status stage means we are at data stage */
+ on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
switch (trb_comp_code) {
case COMP_SUCCESS:
- if (event_trb == ep_ring->dequeue) {
- xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
- "without IOC set??\n");
- *status = -ESHUTDOWN;
- } else if (event_trb != td->last_trb) {
- xhci_warn(xhci, "WARN: Success on ctrl data TRB "
- "without IOC set??\n");
+ if (ep_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
+ on_data_stage ? "data" : "setup");
*status = -ESHUTDOWN;
- } else {
- *status = 0;
+ break;
}
+ *status = 0;
break;
case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
+ *status = 0;
break;
case COMP_STOP_SHORT:
- if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
- xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+ if (on_data_stage)
+ td->urb->actual_length = remaining;
else
- td->urb->actual_length =
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+ goto finish_td;
case COMP_STOP:
- /* Did we stop at data stage? */
- if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- /* fall through */
+ if (on_data_stage)
+ td->urb->actual_length = requested - remaining;
+ goto finish_td;
case COMP_STOP_INVAL:
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ goto finish_td;
default:
if (!xhci_requires_manual_halt_cleanup(xhci,
- ep_ctx, trb_comp_code))
+ ep_ctx, trb_comp_code))
break;
- xhci_dbg(xhci, "TRB error code %u, "
- "halted endpoint index = %u\n",
- trb_comp_code, ep_index);
+ xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
+ trb_comp_code, ep_index);
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
- if (event_trb != ep_ring->dequeue &&
- event_trb != td->last_trb)
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ if (on_data_stage)
+ td->urb->actual_length = requested - remaining;
else if (!td->urb_length_set)
td->urb->actual_length = 0;
-
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ goto finish_td;
}
+
+ /* stopped at setup stage, no data transferred */
+ if (ep_trb == ep_ring->dequeue)
+ goto finish_td;
+
/*
- * Did we transfer any data, despite the errors that might have
- * happened? I.e. did we get past the setup stage?
+ * if on data stage then update the actual_length of the URB and flag it
+ * as set, so it won't be overwritten in the event for the last TRB.
*/
- if (event_trb != ep_ring->dequeue) {
- /* The event was for the status stage */
- if (event_trb == td->last_trb) {
- if (td->urb_length_set) {
- /* Don't overwrite a previously set error code
- */
- if ((*status == -EINPROGRESS || *status == 0) &&
- (td->urb->transfer_flags
- & URB_SHORT_NOT_OK))
- /* Did we already see a short data
- * stage? */
- *status = -EREMOTEIO;
- } else {
- td->urb->actual_length =
- td->urb->transfer_buffer_length;
- }
- } else {
- /*
- * Maybe the event was for the data stage? If so, update
- * already the actual_length of the URB and flag it as
- * set, so that it is not overwritten in the event for
- * the last TRB.
- */
- td->urb_length_set = true;
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- xhci_dbg(xhci, "Waiting for status "
- "stage event\n");
- return 0;
- }
+ if (on_data_stage) {
+ td->urb_length_set = true;
+ td->urb->actual_length = requested - remaining;
+ xhci_dbg(xhci, "Waiting for status stage event\n");
+ return 0;
}
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ /* at status stage */
+ if (!td->urb_length_set)
+ td->urb->actual_length = requested;
+
+finish_td:
+ return finish_td(xhci, td, ep_trb, event, ep, status, false);
}
/*
* Process isochronous tds, update urb packet status and actual_length.
*/
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
int idx;
- int len = 0;
- union xhci_trb *cur_trb;
- struct xhci_segment *cur_seg;
struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
- bool skip_td = false;
+ bool sum_trbs_for_length = false;
+ u32 remaining, requested, ep_trb_len;
+ int short_framestatus;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
+ requested = frame->length;
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+ short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+ -EREMOTEIO : 0;
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
- frame->status = 0;
+ if (remaining) {
+ frame->status = short_framestatus;
+ if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ sum_trbs_for_length = true;
break;
}
- if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
- trb_comp_code = COMP_SHORT_TX;
- /* fallthrough */
- case COMP_STOP_SHORT:
+ frame->status = 0;
+ break;
case COMP_SHORT_TX:
- frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
- -EREMOTEIO : 0;
+ frame->status = short_framestatus;
+ sum_trbs_for_length = true;
break;
case COMP_BW_OVER:
frame->status = -ECOMM;
- skip_td = true;
break;
case COMP_BUFF_OVER:
case COMP_BABBLE:
frame->status = -EOVERFLOW;
- skip_td = true;
break;
case COMP_DEV_ERR:
case COMP_STALL:
frame->status = -EPROTO;
- skip_td = true;
break;
case COMP_TX_ERR:
frame->status = -EPROTO;
- if (event_trb != td->last_trb)
+ if (ep_trb != td->last_trb)
return 0;
- skip_td = true;
break;
case COMP_STOP:
+ sum_trbs_for_length = true;
+ break;
+ case COMP_STOP_SHORT:
+ /* field normally containing residue now contains tranferred */
+ frame->status = short_framestatus;
+ requested = remaining;
+ break;
case COMP_STOP_INVAL:
+ requested = 0;
+ remaining = 0;
break;
default:
+ sum_trbs_for_length = true;
frame->status = -1;
break;
}
- if (trb_comp_code == COMP_SUCCESS || skip_td) {
- frame->actual_length = frame->length;
- td->urb->actual_length += frame->length;
- } else if (trb_comp_code == COMP_STOP_SHORT) {
- frame->actual_length =
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- td->urb->actual_length += frame->actual_length;
- } else {
- for (cur_trb = ep_ring->dequeue,
- cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
- !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
- len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
- }
- len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ if (sum_trbs_for_length)
+ frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ ep_trb_len - remaining;
+ else
+ frame->actual_length = requested;
- if (trb_comp_code != COMP_STOP_INVAL) {
- frame->actual_length = len;
- td->urb->actual_length += len;
- }
- }
+ td->urb->actual_length += frame->actual_length;
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ return finish_td(xhci, td, ep_trb, event, ep, status, false);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
@@ -2162,119 +2136,62 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
* Process bulk and interrupt tds, update urb status and actual_length.
*/
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
- union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
- union xhci_trb *cur_trb;
- struct xhci_segment *cur_seg;
u32 trb_comp_code;
+ u32 remaining, requested, ep_trb_len;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+ requested = td->urb->transfer_buffer_length;
switch (trb_comp_code) {
case COMP_SUCCESS:
- /* Double check that the HW transferred everything. */
- if (event_trb != td->last_trb ||
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
- xhci_warn(xhci, "WARN Successful completion "
- "on short TX\n");
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
- if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
- trb_comp_code = COMP_SHORT_TX;
- } else {
- *status = 0;
+ /* handle success with untransferred data as short packet */
+ if (ep_trb != td->last_trb || remaining) {
+ xhci_warn(xhci, "WARN Successful completion on short TX\n");
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ requested, remaining);
}
+ *status = 0;
break;
- case COMP_STOP_SHORT:
case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ requested, remaining);
+ *status = 0;
+ break;
+ case COMP_STOP_SHORT:
+ td->urb->actual_length = remaining;
+ goto finish_td;
+ case COMP_STOP_INVAL:
+ /* stopped on ep trb with invalid length, exclude it */
+ ep_trb_len = 0;
+ remaining = 0;
break;
default:
- /* Others already handled above */
+ /* do nothing */
break;
}
- if (trb_comp_code == COMP_SHORT_TX)
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
- /* Stopped - short packet completion */
- if (trb_comp_code == COMP_STOP_SHORT) {
- td->urb->actual_length =
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- if (td->urb->transfer_buffer_length <
- td->urb->actual_length) {
- xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
- td->urb->actual_length = 0;
- /* status will be set by usb core for canceled urbs */
- }
- /* Fast path - was this the last TRB in the TD for this URB? */
- } else if (event_trb == td->last_trb) {
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- if (td->urb->transfer_buffer_length <
- td->urb->actual_length) {
- xhci_warn(xhci, "HC gave bad length "
- "of %d bytes left\n",
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
- td->urb->actual_length = 0;
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
- }
- /* Don't overwrite a previously set error code */
- if (*status == -EINPROGRESS) {
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
- }
- } else {
- td->urb->actual_length =
- td->urb->transfer_buffer_length;
- /* Ignore a short packet completion if the
- * untransferred length was zero.
- */
- if (*status == -EREMOTEIO)
- *status = 0;
- }
- } else {
- /* Slow path - walk the list, starting from the dequeue
- * pointer, to get the actual length transferred.
- */
+ if (ep_trb == td->last_trb)
+ td->urb->actual_length = requested - remaining;
+ else
+ td->urb->actual_length =
+ sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ ep_trb_len - remaining;
+finish_td:
+ if (remaining > requested) {
+ xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
+ remaining);
td->urb->actual_length = 0;
- for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
- cur_trb != event_trb;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
- !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
- td->urb->actual_length +=
- TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
- }
- /* If the ring didn't stop on a Link or No-op TRB, add
- * in the actual bytes transferred from the Normal TRB
- */
- if (trb_comp_code != COMP_STOP_INVAL)
- td->urb->actual_length +=
- TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
- EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
}
-
- return finish_td(xhci, td, event_trb, event, ep, status, false);
+ return finish_td(xhci, td, ep_trb, event, ep, status, false);
}
/*
@@ -2293,16 +2210,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
unsigned int slot_id;
int ep_index;
struct xhci_td *td = NULL;
- dma_addr_t event_dma;
- struct xhci_segment *event_seg;
- union xhci_trb *event_trb;
- struct urb *urb = NULL;
+ dma_addr_t ep_trb_dma;
+ struct xhci_segment *ep_seg;
+ union xhci_trb *ep_trb;
int status = -EINPROGRESS;
- struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
struct list_head *tmp;
u32 trb_comp_code;
- int ret = 0;
int td_num = 0;
bool handling_skipped_tds = false;
@@ -2328,9 +2242,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- if (!ep_ring ||
- (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
- EP_STATE_DISABLED) {
+ if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
@@ -2352,7 +2264,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num++;
}
- event_dma = le64_to_cpu(event->buffer);
+ ep_trb_dma = le64_to_cpu(event->buffer);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
switch (trb_comp_code) {
@@ -2480,7 +2392,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci, "td_list is empty while skip "
"flag set. Clear skip flag.\n");
}
- ret = 0;
goto cleanup;
}
@@ -2489,7 +2400,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep->skip = false;
xhci_dbg(xhci, "All tds on the ep_ring skipped. "
"Clear skip flag.\n");
- ret = 0;
goto cleanup;
}
@@ -2498,8 +2408,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num--;
/* Is this a TRB in the currently executing TD? */
- event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
- td->last_trb, event_dma, false);
+ ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb, ep_trb_dma, false);
/*
* Skip the Force Stopped Event. The event_trb(event_dma) of FSE
@@ -2509,13 +2419,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* last TRB of the previous TD. The command completion handle
* will take care the rest.
*/
- if (!event_seg && (trb_comp_code == COMP_STOP ||
+ if (!ep_seg && (trb_comp_code == COMP_STOP ||
trb_comp_code == COMP_STOP_INVAL)) {
- ret = 0;
goto cleanup;
}
- if (!event_seg) {
+ if (!ep_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* Some host controllers give a spurious
@@ -2525,7 +2434,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
- ret = 0;
goto cleanup;
}
/* HC is busted, give up! */
@@ -2536,11 +2444,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
trb_comp_code);
trb_in_td(xhci, ep_ring->deq_seg,
ep_ring->dequeue, td->last_trb,
- event_dma, true);
+ ep_trb_dma, true);
return -ESHUTDOWN;
}
- ret = skip_isoc_td(xhci, td, event, ep, &status);
+ skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_TX)
@@ -2553,36 +2461,28 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep->skip = false;
}
- event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
- sizeof(*event_trb)];
+ ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
+ sizeof(*ep_trb)];
/*
* No-op TRB should not trigger interrupts.
- * If event_trb is a no-op TRB, it means the
+ * If ep_trb is a no-op TRB, it means the
* corresponding TD has been cancelled. Just ignore
* the TD.
*/
- if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
- xhci_dbg(xhci,
- "event_trb is a no-op TRB. Skip it\n");
+ if (trb_is_noop(ep_trb)) {
+ xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n");
goto cleanup;
}
- /* Now update the urb's actual_length and give back to
- * the core
- */
+ /* update the urb's actual_length and give back to the core */
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
- ret = process_ctrl_td(xhci, td, event_trb, event, ep,
- &status);
+ process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
- ret = process_isoc_td(xhci, td, event_trb, event, ep,
- &status);
+ process_isoc_td(xhci, td, ep_trb, event, ep, &status);
else
- ret = process_bulk_intr_td(xhci, td, event_trb, event,
- ep, &status);
-
+ process_bulk_intr_td(xhci, td, ep_trb, event, ep,
+ &status);
cleanup:
-
-
handling_skipped_tds = ep->skip &&
trb_comp_code != COMP_MISSED_INT &&
trb_comp_code != COMP_PING_ERR;
@@ -2594,33 +2494,6 @@ cleanup:
if (!handling_skipped_tds)
inc_deq(xhci, xhci->event_ring);
- if (ret) {
- urb = td->urb;
- urb_priv = urb->hcpriv;
-
- xhci_urb_free_priv(urb_priv);
-
- usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
- if ((urb->actual_length != urb->transfer_buffer_length &&
- (urb->transfer_flags &
- URB_SHORT_NOT_OK)) ||
- (status != 0 &&
- !usb_endpoint_xfer_isoc(&urb->ep->desc)))
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
- "expected = %d, status = %d\n",
- urb, urb->actual_length,
- urb->transfer_buffer_length,
- status);
- spin_unlock(&xhci->lock);
- /* EHCI, UHCI, and OHCI always unconditionally set the
- * urb->status of an isochronous endpoint to 0.
- */
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
- status = 0;
- usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
- spin_lock(&xhci->lock);
- }
-
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
@@ -2644,18 +2517,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1;
int ret;
+ /* Event ring hasn't been allocated yet. */
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
- xhci->error_bitmask |= 1 << 1;
- return 0;
+ xhci_err(xhci, "ERROR event ring not ready\n");
+ return -ENOMEM;
}
event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- xhci->event_ring->cycle_state) {
- xhci->error_bitmask |= 1 << 2;
+ xhci->event_ring->cycle_state)
return 0;
- }
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2663,7 +2535,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
*/
rmb();
/* FIXME: Handle more event types. */
- switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
+ switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_COMPLETION):
handle_cmd_completion(xhci, &event->event_cmd);
break;
@@ -2673,9 +2545,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
break;
case TRB_TYPE(TRB_TRANSFER):
ret = handle_tx_event(xhci, &event->trans_event);
- if (ret < 0)
- xhci->error_bitmask |= 1 << 9;
- else
+ if (ret >= 0)
update_ptrs = 0;
break;
case TRB_TYPE(TRB_DEV_NOTE):
@@ -2686,7 +2556,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
TRB_TYPE(48))
handle_vendor_event(xhci, event);
else
- xhci->error_bitmask |= 1 << 3;
+ xhci_warn(xhci, "ERROR unknown event type %d\n",
+ TRB_FIELD_TO_TYPE(
+ le32_to_cpu(event->event_cmd.flags)));
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
@@ -2931,8 +2803,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
return -EINVAL;
}
- ret = prepare_ring(xhci, ep_ring,
- le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
num_trbs, mem_flags);
if (ret)
return ret;
@@ -3120,7 +2991,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
if (xhci->quirks & XHCI_MTK_HOST)
trb_buff_len = 0;
- maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ maxp = usb_endpoint_maxp(&urb->ep->desc);
total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
/* Queueing functions don't count the current TRB into transferred */
@@ -3136,7 +3007,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
unsigned int max_pkt;
u32 new_buff_len;
- max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ max_pkt = usb_endpoint_maxp(&urb->ep->desc);
unalign = (enqd_len + *trb_buff_len) % max_pkt;
/* we got lucky, last normal TRB data on segment is packet aligned */
@@ -3650,7 +3521,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
- max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ max_pkt = usb_endpoint_maxp(&urb->ep->desc);
total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
/* A zero-length transfer still involves at least one packet. */
@@ -3828,7 +3699,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed.
*/
- ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
num_trbs, mem_flags);
if (ret)
return ret;
@@ -3841,8 +3712,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Calculate the start frame and put it in urb->start_frame. */
if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
- if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
- EP_STATE_RUNNING) {
+ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
urb->start_frame = xep->next_frame_id;
goto skip_start_over;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1a4ca02729c2..1cd56417cbec 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -113,12 +113,12 @@ int xhci_halt(struct xhci_hcd *xhci)
ret = xhci_handshake(&xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
- if (!ret) {
- xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- } else
- xhci_warn(xhci, "Host not halted after %u microseconds.\n",
- XHCI_MAX_HALT_USEC);
+ if (ret) {
+ xhci_warn(xhci, "Host halt failed, %d\n", ret);
+ return ret;
+ }
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
return ret;
}
@@ -167,6 +167,12 @@ int xhci_reset(struct xhci_hcd *xhci)
int ret, i;
state = readl(&xhci->op_regs->status);
+
+ if (state == ~(u32)0) {
+ xhci_warn(xhci, "Host not accessible, reset failed.\n");
+ return -ENODEV;
+ }
+
if ((state & STS_HALT) == 0) {
xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
return 0;
@@ -690,7 +696,6 @@ void xhci_stop(struct usb_hcd *hcd)
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
xhci_halt(xhci);
xhci_reset(xhci);
-
spin_unlock_irq(&xhci->lock);
}
@@ -1645,8 +1650,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request
*/
- if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
- cpu_to_le32(EP_STATE_DISABLED)) ||
+ if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
/* Do not warn when called after a usb_device_reset */
@@ -3209,7 +3213,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
+ max_packet = usb_endpoint_maxp(&eps[i]->desc);
vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
num_stream_ctxs,
num_streams,
@@ -3683,27 +3687,26 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
int ret, slot_id;
struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
if (!command)
return 0;
/* xhci->slot_id and xhci->addr_dev are not thread-safe */
mutex_lock(&xhci->mutex);
spin_lock_irqsave(&xhci->lock, flags);
- command->completion = &xhci->addr_dev;
ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
mutex_unlock(&xhci->mutex);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
- kfree(command);
+ xhci_free_command(xhci, command);
return 0;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
wait_for_completion(command->completion);
- slot_id = xhci->slot_id;
+ slot_id = command->slot_id;
mutex_unlock(&xhci->mutex);
if (!slot_id || command->status != COMP_SUCCESS) {
@@ -3711,7 +3714,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
HCS_MAX_SLOTS(
readl(&xhci->cap_regs->hcs_params1)));
- kfree(command);
+ xhci_free_command(xhci, command);
return 0;
}
@@ -3747,7 +3750,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
#endif
- kfree(command);
+ xhci_free_command(xhci, command);
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
@@ -3755,6 +3758,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
disable_slot:
/* Disable slot, if we can do it without mem alloc */
spin_lock_irqsave(&xhci->lock, flags);
+ kfree(command->completion);
command->completion = NULL;
command->status = 0;
if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
@@ -3816,14 +3820,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
}
}
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
if (!command) {
ret = -ENOMEM;
goto out;
}
command->in_ctx = virt_dev->in_ctx;
- command->completion = &xhci->addr_dev;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
@@ -3941,7 +3944,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
out:
mutex_unlock(&xhci->mutex);
- kfree(command);
+ if (command) {
+ kfree(command->completion);
+ kfree(command);
+ }
return ret;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f945380035d0..8ccc11a974b8 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -709,6 +709,8 @@ struct xhci_ep_ctx {
#define EP_STATE_HALTED 2
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
+#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
+
/* Mult - Max number of burtst within an interval, in EP companion desc. */
#define EP_MULT(p) (((p) & 0x3) << 8)
#define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3)
@@ -747,11 +749,6 @@ struct xhci_ep_ctx {
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
-/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
- * USB2.0 spec 9.6.6.
- */
-#define GET_MAX_PACKET(p) ((p) & 0x7ff)
-
/* tx_info bitmasks */
#define EP_AVG_TRB_LENGTH(p) ((p) & 0xffff)
#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) & 0xffff) << 16)
@@ -789,6 +786,7 @@ struct xhci_command {
/* Input context for changing device state */
struct xhci_container_ctx *in_ctx;
u32 status;
+ int slot_id;
/* If completion is null, no one is waiting on this command
* and the structure can be freed after the command completes.
*/
@@ -997,7 +995,6 @@ struct xhci_virt_device {
int num_rings_cached;
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
- struct completion cmd_completion;
u8 fake_port;
u8 real_port;
struct xhci_interval_bw_table *bw_table;
@@ -1583,8 +1580,6 @@ struct xhci_hcd {
/* slot enabling and address device helpers */
/* these are not thread safe so use mutex */
struct mutex mutex;
- struct completion addr_dev;
- int slot_id;
/* For USB 3.0 LPM enable/disable. */
struct xhci_command *lpm_command;
/* Internal mirror of the HW's dcbaa */
@@ -1618,8 +1613,6 @@ struct xhci_hcd {
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
#define XHCI_STATE_REMOVING (1 << 2)
- /* Statistics */
- int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 9535b2872183..79205b31e4a9 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -197,7 +197,7 @@ static int isp1760_plat_probe(struct platform_device *pdev)
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
- pr_warning("isp1760: IRQ resource not available\n");
+ pr_warn("isp1760: IRQ resource not available\n");
return -ENODEV;
}
irqflags = irq_res->flags & IRQF_TRIGGER_MASK;
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 6ddd08a32777..aa350dc9eb25 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -215,19 +215,7 @@ static int chaoskey_probe(struct usb_interface *interface,
dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name;
dev->hwrng.read = chaoskey_rng_read;
-
- /* Set the 'quality' metric. Quality is measured in units of
- * 1/1024's of a bit ("mills"). This should be set to 1024,
- * but there is a bug in the hwrng core which masks it with
- * 1023.
- *
- * The patch that has been merged to the crypto development
- * tree for that bug limits the value to 1024 at most, so by
- * setting this to 1024 + 1023, we get 1023 before the fix is
- * merged and 1024 afterwards. We'll patch this driver once
- * both bits of code are in the same tree.
- */
- dev->hwrng.quality = 1024 + 1023;
+ dev->hwrng.quality = 1024;
dev->hwrng_registered = (hwrng_register(&dev->hwrng) == 0);
if (!dev->hwrng_registered)
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 13731d512624..fc329c98a6e8 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -421,7 +421,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
} else if (result != -EREMOTEIO) {
mutex_unlock(&(rio->lock));
dev_err(&rio->rio_dev->dev,
- "Read Whoops - result:%u partial:%u this_read:%u\n",
+ "Read Whoops - result:%d partial:%u this_read:%u\n",
result, partial, this_read);
return -EIO;
} else {
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 460cebf322e3..4b5777ec1501 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -686,8 +686,6 @@ static void
sisusbcon_scrolldelta(struct vc_data *c, int lines)
{
struct sisusb_usb_data *sisusb;
- int margin = c->vc_size_row * 4;
- int ul, we, p, st;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
@@ -700,39 +698,8 @@ sisusbcon_scrolldelta(struct vc_data *c, int lines)
return;
}
- if (!lines) /* Turn scrollback off */
- c->vc_visible_origin = c->vc_origin;
- else {
-
- if (sisusb->con_rolled_over >
- (c->vc_scr_end - sisusb->scrbuf) + margin) {
-
- ul = c->vc_scr_end - sisusb->scrbuf;
- we = sisusb->con_rolled_over + c->vc_size_row;
-
- } else {
-
- ul = 0;
- we = sisusb->scrbuf_size;
-
- }
-
- p = (c->vc_visible_origin - sisusb->scrbuf - ul + we) % we +
- lines * c->vc_size_row;
-
- st = (c->vc_origin - sisusb->scrbuf - ul + we) % we;
-
- if (st < 2 * margin)
- margin = 0;
-
- if (p < margin)
- p = 0;
-
- if (p > st - margin)
- p = st;
-
- c->vc_visible_origin = sisusb->scrbuf + (p + ul) % we;
- }
+ vc_scrolldelta_helper(c, lines, sisusb->con_rolled_over,
+ (void *)sisusb->scrbuf, sisusb->scrbuf_size);
sisusbcon_set_start_address(sisusb, c);
@@ -808,9 +775,10 @@ sisusbcon_cursor(struct vc_data *c, int mode)
mutex_unlock(&sisusb->lock);
}
-static int
+static bool
sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
- int t, int b, int dir, int lines)
+ unsigned int t, unsigned int b, enum con_scroll dir,
+ unsigned int lines)
{
int cols = sisusb->sisusb_num_columns;
int length = ((b - t) * cols) * 2;
@@ -852,8 +820,9 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
}
/* Interface routine */
-static int
-sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+static bool
+sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
struct sisusb_usb_data *sisusb;
u16 eattr = c->vc_video_erase_char;
@@ -870,17 +839,17 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
*/
if (!lines)
- return 1;
+ return true;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
- return 0;
+ return false;
/* sisusb->lock is down */
if (sisusb_is_inactive(c, sisusb)) {
mutex_unlock(&sisusb->lock);
- return 0;
+ return false;
}
/* Special case */
@@ -971,7 +940,7 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
mutex_unlock(&sisusb->lock);
- return 1;
+ return true;
}
/* Interface routine */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5c8210dc6fd9..3525626bf086 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1915,7 +1915,7 @@ static struct urb *iso_alloc_urb(
if (bytes < 0 || !desc)
return NULL;
maxp = 0x7ff & usb_endpoint_maxp(desc);
- maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
+ maxp *= usb_endpoint_maxp_mult(desc);
packets = DIV_ROUND_UP(bytes, maxp);
urb = usb_alloc_urb(packets, GFP_KERNEL);
@@ -2001,8 +2001,8 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
1 << (desc->bInterval - 1),
(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
- usb_endpoint_maxp(desc) & 0x7ff,
- 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)));
+ usb_endpoint_maxp(desc),
+ usb_endpoint_maxp_mult(desc));
dev_info(&dev->intf->dev,
"total %lu msec (%lu packets)\n",
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
new file mode 100644
index 000000000000..25cd61947bee
--- /dev/null
+++ b/drivers/usb/mtu3/Kconfig
@@ -0,0 +1,54 @@
+# For MTK USB3.0 IP
+
+config USB_MTU3
+ tristate "MediaTek USB3 Dual Role controller"
+ depends on EXTCON && (USB || USB_GADGET) && HAS_DMA
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
+ help
+ Say Y or M here if your system runs on MediaTek SoCs with
+ Dual Role SuperSpeed USB controller. You can select usb
+ mode as peripheral role or host role, or both.
+
+ If you don't know what this is, please say N.
+
+ Choose M here to compile this driver as a module, and it
+ will be called mtu3.ko.
+
+
+if USB_MTU3
+choice
+ bool "MTU3 Mode Selection"
+ default USB_MTU3_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_MTU3_HOST if (USB && !USB_GADGET)
+ default USB_MTU3_GADGET if (!USB && USB_GADGET)
+
+config USB_MTU3_HOST
+ bool "Host only mode"
+ depends on USB=y || USB=USB_MTU3
+ help
+ Select this when you want to use MTU3 in host mode only,
+ thereby the gadget feature will be regressed.
+
+config USB_MTU3_GADGET
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_MTU3
+ help
+ Select this when you want to use MTU3 in gadget mode only,
+ thereby the host feature will be regressed.
+
+config USB_MTU3_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3))
+ help
+ This is the default mode of working of MTU3 controller where
+ both host and gadget features are enabled.
+
+endchoice
+
+config USB_MTU3_DEBUG
+ bool "Enable Debugging Messages"
+ help
+ Say Y here to enable debugging messages in the MTU3 Driver.
+
+endif
diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile
new file mode 100644
index 000000000000..60e0fff7a847
--- /dev/null
+++ b/drivers/usb/mtu3/Makefile
@@ -0,0 +1,18 @@
+
+ccflags-$(CONFIG_USB_MTU3_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_USB_MTU3) += mtu3.o
+
+mtu3-y := mtu3_plat.o
+
+ifneq ($(filter y,$(CONFIG_USB_MTU3_HOST) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
+ mtu3-y += mtu3_host.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_MTU3_GADGET) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
+ mtu3-y += mtu3_core.o mtu3_gadget_ep0.o mtu3_gadget.o mtu3_qmu.o
+endif
+
+ifneq ($(CONFIG_USB_MTU3_DUAL_ROLE),)
+ mtu3-y += mtu3_dr.o
+endif
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
new file mode 100644
index 000000000000..ba9df719f363
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3.h
@@ -0,0 +1,417 @@
+/*
+ * mtu3.h - MediaTek USB3 DRD header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTU3_H__
+#define __MTU3_H__
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/extcon.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+
+struct mtu3;
+struct mtu3_ep;
+struct mtu3_request;
+
+#include "mtu3_hw_regs.h"
+#include "mtu3_qmu.h"
+
+#define MU3D_EP_TXCR0(epnum) (U3D_TX1CSR0 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_TXCR1(epnum) (U3D_TX1CSR1 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_TXCR2(epnum) (U3D_TX1CSR2 + (((epnum) - 1) * 0x10))
+
+#define MU3D_EP_RXCR0(epnum) (U3D_RX1CSR0 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_TQCSR(epnum) (U3D_TXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQSAR(epnum) (U3D_TXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQCPR(epnum) (U3D_TXQCPR1 + (((epnum) - 1) * 0x10))
+
+#define SSUSB_U3_CTRL(p) (U3D_SSUSB_U3_CTRL_0P + ((p) * 0x08))
+#define SSUSB_U2_CTRL(p) (U3D_SSUSB_U2_CTRL_0P + ((p) * 0x08))
+
+#define MTU3_DRIVER_NAME "mtu3"
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#define MTU3_EP_ENABLED BIT(0)
+#define MTU3_EP_STALL BIT(1)
+#define MTU3_EP_WEDGE BIT(2)
+#define MTU3_EP_BUSY BIT(3)
+
+#define MTU3_U3_IP_SLOT_DEFAULT 2
+#define MTU3_U2_IP_SLOT_DEFAULT 1
+
+/**
+ * Normally the device works on HS or SS, to simplify fifo management,
+ * devide fifo into some 512B parts, use bitmap to manage it; And
+ * 128 bits size of bitmap is large enough, that means it can manage
+ * up to 64KB fifo size.
+ * NOTE: MTU3_EP_FIFO_UNIT should be power of two
+ */
+#define MTU3_EP_FIFO_UNIT (1 << 9)
+#define MTU3_FIFO_BIT_SIZE 128
+#define MTU3_U2_IP_EP0_FIFO_SIZE 64
+
+/**
+ * Maximum size of ep0 response buffer for ch9 requests,
+ * the SET_SEL request uses 6 so far, and GET_STATUS is 2
+ */
+#define EP0_RESPONSE_BUF 6
+
+/* device operated link and speed got from DEVICE_CONF register */
+enum mtu3_speed {
+ MTU3_SPEED_INACTIVE = 0,
+ MTU3_SPEED_FULL = 1,
+ MTU3_SPEED_HIGH = 3,
+ MTU3_SPEED_SUPER = 4,
+};
+
+/**
+ * @MU3D_EP0_STATE_SETUP: waits for SETUP or received a SETUP
+ * without data stage.
+ * @MU3D_EP0_STATE_TX: IN data stage
+ * @MU3D_EP0_STATE_RX: OUT data stage
+ * @MU3D_EP0_STATE_TX_END: the last IN data is transferred, and
+ * waits for its completion interrupt
+ * @MU3D_EP0_STATE_STALL: ep0 is in stall status, will be auto-cleared
+ * after receives a SETUP.
+ */
+enum mtu3_g_ep0_state {
+ MU3D_EP0_STATE_SETUP = 1,
+ MU3D_EP0_STATE_TX,
+ MU3D_EP0_STATE_RX,
+ MU3D_EP0_STATE_TX_END,
+ MU3D_EP0_STATE_STALL,
+};
+
+/**
+ * @base: the base address of fifo
+ * @limit: the bitmap size in bits
+ * @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT
+ */
+struct mtu3_fifo_info {
+ u32 base;
+ u32 limit;
+ DECLARE_BITMAP(bitmap, MTU3_FIFO_BIT_SIZE);
+};
+
+/**
+ * General Purpose Descriptor (GPD):
+ * The format of TX GPD is a little different from RX one.
+ * And the size of GPD is 16 bytes.
+ *
+ * @flag:
+ * bit0: Hardware Own (HWO)
+ * bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported
+ * bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1
+ * bit7: Interrupt On Completion (IOC)
+ * @chksum: This is used to validate the contents of this GPD;
+ * If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued
+ * when checksum validation fails;
+ * Checksum value is calculated over the 16 bytes of the GPD by default;
+ * @data_buf_len (RX ONLY): This value indicates the length of
+ * the assigned data buffer
+ * @next_gpd: Physical address of the next GPD
+ * @buffer: Physical address of the data buffer
+ * @buf_len:
+ * (TX): This value indicates the length of the assigned data buffer
+ * (RX): The total length of data received
+ * @ext_len: reserved
+ * @ext_flag:
+ * bit5 (TX ONLY): Zero Length Packet (ZLP),
+ */
+struct qmu_gpd {
+ __u8 flag;
+ __u8 chksum;
+ __le16 data_buf_len;
+ __le32 next_gpd;
+ __le32 buffer;
+ __le16 buf_len;
+ __u8 ext_len;
+ __u8 ext_flag;
+} __packed;
+
+/**
+* dma: physical base address of GPD segment
+* start: virtual base address of GPD segment
+* end: the last GPD element
+* enqueue: the first empty GPD to use
+* dequeue: the first completed GPD serviced by ISR
+* NOTE: the size of GPD ring should be >= 2
+*/
+struct mtu3_gpd_ring {
+ dma_addr_t dma;
+ struct qmu_gpd *start;
+ struct qmu_gpd *end;
+ struct qmu_gpd *enqueue;
+ struct qmu_gpd *dequeue;
+};
+
+/**
+* @vbus: vbus 5V used by host mode
+* @edev: external connector used to detect vbus and iddig changes
+* @vbus_nb: notifier for vbus detection
+* @vbus_nb: notifier for iddig(idpin) detection
+* @extcon_reg_dwork: delay work for extcon notifier register, waiting for
+* xHCI driver initialization, it's necessary for system bootup
+* as device.
+* @is_u3_drd: whether port0 supports usb3.0 dual-role device or not
+* @id_*: used to maually switch between host and device modes by idpin
+* @manual_drd_enabled: it's true when supports dual-role device by debugfs
+* to switch host/device modes depending on user input.
+*/
+struct otg_switch_mtk {
+ struct regulator *vbus;
+ struct extcon_dev *edev;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+ struct delayed_work extcon_reg_dwork;
+ bool is_u3_drd;
+ /* dual-role switch by debugfs */
+ struct pinctrl *id_pinctrl;
+ struct pinctrl_state *id_float;
+ struct pinctrl_state *id_ground;
+ bool manual_drd_enabled;
+};
+
+/**
+ * @mac_base: register base address of device MAC, exclude xHCI's
+ * @ippc_base: register base address of IP Power and Clock interface (IPPC)
+ * @vusb33: usb3.3V shared by device/host IP
+ * @sys_clk: system clock of mtu3, shared by device/host IP
+ * @dr_mode: works in which mode:
+ * host only, device only or dual-role mode
+ * @u2_ports: number of usb2.0 host ports
+ * @u3_ports: number of usb3.0 host ports
+ * @dbgfs_root: only used when supports manual dual-role switch via debugfs
+ * @wakeup_en: it's true when supports remote wakeup in host mode
+ * @wk_deb_p0: port0's wakeup debounce clock
+ * @wk_deb_p1: it's optional, and depends on port1 is supported or not
+ */
+struct ssusb_mtk {
+ struct device *dev;
+ struct mtu3 *u3d;
+ void __iomem *mac_base;
+ void __iomem *ippc_base;
+ struct phy **phys;
+ int num_phys;
+ /* common power & clock */
+ struct regulator *vusb33;
+ struct clk *sys_clk;
+ /* otg */
+ struct otg_switch_mtk otg_switch;
+ enum usb_dr_mode dr_mode;
+ bool is_host;
+ int u2_ports;
+ int u3_ports;
+ struct dentry *dbgfs_root;
+ /* usb wakeup for host mode */
+ bool wakeup_en;
+ struct clk *wk_deb_p0;
+ struct clk *wk_deb_p1;
+ struct regmap *pericfg;
+};
+
+/**
+ * @fifo_size: it is (@slot + 1) * @fifo_seg_size
+ * @fifo_seg_size: it is roundup_pow_of_two(@maxp)
+ */
+struct mtu3_ep {
+ struct usb_ep ep;
+ char name[12];
+ struct mtu3 *mtu;
+ u8 epnum;
+ u8 type;
+ u8 is_in;
+ u16 maxp;
+ int slot;
+ u32 fifo_size;
+ u32 fifo_addr;
+ u32 fifo_seg_size;
+ struct mtu3_fifo_info *fifo;
+
+ struct list_head req_list;
+ struct mtu3_gpd_ring gpd_ring;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+
+ int flags;
+ u8 wedged;
+ u8 busy;
+};
+
+struct mtu3_request {
+ struct usb_request request;
+ struct list_head list;
+ struct mtu3_ep *mep;
+ struct mtu3 *mtu;
+ struct qmu_gpd *gpd;
+ int epnum;
+};
+
+static inline struct ssusb_mtk *dev_to_ssusb(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+/**
+ * struct mtu3 - device driver instance data.
+ * @slot: MTU3_U2_IP_SLOT_DEFAULT for U2 IP only,
+ * MTU3_U3_IP_SLOT_DEFAULT for U3 IP
+ * @may_wakeup: means device's remote wakeup is enabled
+ * @is_self_powered: is reported in device status and the config descriptor
+ * @ep0_req: dummy request used while handling standard USB requests
+ * for GET_STATUS and SET_SEL
+ * @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests
+ */
+struct mtu3 {
+ spinlock_t lock;
+ struct ssusb_mtk *ssusb;
+ struct device *dev;
+ void __iomem *mac_base;
+ void __iomem *ippc_base;
+ int irq;
+
+ struct mtu3_fifo_info tx_fifo;
+ struct mtu3_fifo_info rx_fifo;
+
+ struct mtu3_ep *ep_array;
+ struct mtu3_ep *in_eps;
+ struct mtu3_ep *out_eps;
+ struct mtu3_ep *ep0;
+ int num_eps;
+ int slot;
+ int active_ep;
+
+ struct dma_pool *qmu_gpd_pool;
+ enum mtu3_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver;
+ struct mtu3_request ep0_req;
+ u8 setup_buf[EP0_RESPONSE_BUF];
+ u32 max_speed;
+
+ unsigned is_active:1;
+ unsigned may_wakeup:1;
+ unsigned is_self_powered:1;
+ unsigned test_mode:1;
+ unsigned softconnect:1;
+ unsigned u1_enable:1;
+ unsigned u2_enable:1;
+ unsigned is_u3_ip:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u32 hw_version;
+};
+
+static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g)
+{
+ return container_of(g, struct mtu3, g);
+}
+
+static inline int is_first_entry(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list_is_last(head, list);
+}
+
+static inline struct mtu3_request *to_mtu3_request(struct usb_request *req)
+{
+ return req ? container_of(req, struct mtu3_request, request) : NULL;
+}
+
+static inline struct mtu3_ep *to_mtu3_ep(struct usb_ep *ep)
+{
+ return ep ? container_of(ep, struct mtu3_ep, ep) : NULL;
+}
+
+static inline struct mtu3_request *next_request(struct mtu3_ep *mep)
+{
+ struct list_head *queue = &mep->req_list;
+
+ if (list_empty(queue))
+ return NULL;
+
+ return list_first_entry(queue, struct mtu3_request, list);
+}
+
+static inline void mtu3_writel(void __iomem *base, u32 offset, u32 data)
+{
+ writel(data, base + offset);
+}
+
+static inline u32 mtu3_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void mtu3_setbits(void __iomem *base, u32 offset, u32 bits)
+{
+ void __iomem *addr = base + offset;
+ u32 tmp = readl(addr);
+
+ writel((tmp | (bits)), addr);
+}
+
+static inline void mtu3_clrbits(void __iomem *base, u32 offset, u32 bits)
+{
+ void __iomem *addr = base + offset;
+ u32 tmp = readl(addr);
+
+ writel((tmp & ~(bits)), addr);
+}
+
+int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks);
+struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+void mtu3_free_request(struct usb_ep *ep, struct usb_request *req);
+void mtu3_req_complete(struct mtu3_ep *mep,
+ struct usb_request *req, int status);
+
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ int interval, int burst, int mult);
+void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep);
+void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set);
+void mtu3_ep0_setup(struct mtu3 *mtu);
+void mtu3_start(struct mtu3 *mtu);
+void mtu3_stop(struct mtu3 *mtu);
+void mtu3_dev_on_off(struct mtu3 *mtu, int is_on);
+
+int mtu3_gadget_setup(struct mtu3 *mtu);
+void mtu3_gadget_cleanup(struct mtu3 *mtu);
+void mtu3_gadget_reset(struct mtu3 *mtu);
+void mtu3_gadget_suspend(struct mtu3 *mtu);
+void mtu3_gadget_resume(struct mtu3 *mtu);
+void mtu3_gadget_disconnect(struct mtu3 *mtu);
+
+irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu);
+extern const struct usb_ep_ops mtu3_ep0_ops;
+
+#endif
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
new file mode 100644
index 000000000000..99c65b0788ff
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -0,0 +1,863 @@
+/*
+ * mtu3_core.c - hardware access layer and gadget init/exit of
+ * MediaTek usb3 Dual-Role Controller Driver
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtu3.h"
+
+static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
+{
+ struct mtu3_fifo_info *fifo = mep->fifo;
+ u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT);
+ u32 start_bit;
+
+ /* ensure that @mep->fifo_seg_size is power of two */
+ num_bits = roundup_pow_of_two(num_bits);
+ if (num_bits > fifo->limit)
+ return -EINVAL;
+
+ mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT;
+ num_bits = num_bits * (mep->slot + 1);
+ start_bit = bitmap_find_next_zero_area(fifo->bitmap,
+ fifo->limit, 0, num_bits, 0);
+ if (start_bit >= fifo->limit)
+ return -EOVERFLOW;
+
+ bitmap_set(fifo->bitmap, start_bit, num_bits);
+ mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT;
+ mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit;
+
+ dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n",
+ __func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+
+ return mep->fifo_addr;
+}
+
+static void ep_fifo_free(struct mtu3_ep *mep)
+{
+ struct mtu3_fifo_info *fifo = mep->fifo;
+ u32 addr = mep->fifo_addr;
+ u32 bits = mep->fifo_size / MTU3_EP_FIFO_UNIT;
+ u32 start_bit;
+
+ if (unlikely(addr < fifo->base || bits > fifo->limit))
+ return;
+
+ start_bit = (addr - fifo->base) / MTU3_EP_FIFO_UNIT;
+ bitmap_clear(fifo->bitmap, start_bit, bits);
+ mep->fifo_size = 0;
+ mep->fifo_seg_size = 0;
+
+ dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n",
+ __func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+}
+
+/* enable/disable U3D SS function */
+static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable)
+{
+ /* If usb3_en==0, LTSSM will go to SS.Disable state */
+ if (enable)
+ mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+ else
+ mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+
+ dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable);
+}
+
+/* set/clear U3D HS device soft connect */
+static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable)
+{
+ if (enable) {
+ mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+ SOFT_CONN | SUSPENDM_ENABLE);
+ } else {
+ mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+ SOFT_CONN | SUSPENDM_ENABLE);
+ }
+ dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable);
+}
+
+/* only port0 of U2/U3 supports device mode */
+static int mtu3_device_enable(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+ u32 check_clk = 0;
+
+ mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+ if (mtu->is_u3_ip) {
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
+ (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
+ SSUSB_U3_PORT_HOST_SEL));
+ }
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0),
+ (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
+ SSUSB_U2_PORT_HOST_SEL));
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+ return ssusb_check_clocks(mtu->ssusb, check_clk);
+}
+
+static void mtu3_device_disable(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+
+ if (mtu->is_u3_ip)
+ mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+ (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN));
+
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
+ SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* reset U3D's device module. */
+static void mtu3_device_reset(struct mtu3 *mtu)
+{
+ void __iomem *ibase = mtu->ippc_base;
+
+ mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+ udelay(1);
+ mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+}
+
+/* disable all interrupts */
+static void mtu3_intr_disable(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ /* Disable level 1 interrupts */
+ mtu3_writel(mbase, U3D_LV1IECR, ~0x0);
+ /* Disable endpoint interrupts */
+ mtu3_writel(mbase, U3D_EPIECR, ~0x0);
+}
+
+static void mtu3_intr_status_clear(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ /* Clear EP0 and Tx/Rx EPn interrupts status */
+ mtu3_writel(mbase, U3D_EPISR, ~0x0);
+ /* Clear U2 USB common interrupts status */
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0);
+ /* Clear U3 LTSSM interrupts status */
+ mtu3_writel(mbase, U3D_LTSSM_INTR, ~0x0);
+ /* Clear speed change interrupt status */
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0);
+}
+
+/* enable system global interrupt */
+static void mtu3_intr_enable(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ /*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */
+ value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR;
+ mtu3_writel(mbase, U3D_LV1IESR, value);
+
+ /* Enable U2 common USB interrupts */
+ value = SUSPEND_INTR | RESUME_INTR | RESET_INTR;
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value);
+
+ if (mtu->is_u3_ip) {
+ /* Enable U3 LTSSM interrupts */
+ value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR |
+ VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR;
+ mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
+ }
+
+ /* Enable QMU interrupts. */
+ value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT |
+ RXQ_LENERR_INT | RXQ_ZLPERR_INT;
+ mtu3_writel(mbase, U3D_QIESR1, value);
+
+ /* Enable speed change interrupt */
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR);
+}
+
+/* set/clear the stall and toggle bits for non-ep0 */
+void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ u8 epnum = mep->epnum;
+ u32 csr;
+
+ if (mep->is_in) { /* TX */
+ csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS;
+ if (set)
+ csr |= TX_SENDSTALL;
+ else
+ csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL;
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr);
+ } else { /* RX */
+ csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS;
+ if (set)
+ csr |= RX_SENDSTALL;
+ else
+ csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL;
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr);
+ }
+
+ if (!set) {
+ mtu3_setbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum));
+ mtu3_clrbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum));
+ mep->flags &= ~MTU3_EP_STALL;
+ } else {
+ mep->flags |= MTU3_EP_STALL;
+ }
+
+ dev_dbg(mtu->dev, "%s: %s\n", mep->name,
+ set ? "SEND STALL" : "CLEAR STALL, with EP RESET");
+}
+
+void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
+{
+ if (mtu->is_u3_ip && (mtu->max_speed == USB_SPEED_SUPER))
+ mtu3_ss_func_set(mtu, is_on);
+ else
+ mtu3_hs_softconn_set(mtu, is_on);
+
+ dev_info(mtu->dev, "gadget (%s) pullup D%s\n",
+ usb_speed_string(mtu->max_speed), is_on ? "+" : "-");
+}
+
+void mtu3_start(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__,
+ mtu3_readl(mbase, U3D_DEVICE_CONTROL));
+
+ mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+ /*
+ * When disable U2 port, USB2_CSR's register will be reset to
+ * default value after re-enable it again(HS is enabled by default).
+ * So if force mac to work as FS, disable HS function.
+ */
+ if (mtu->max_speed == USB_SPEED_FULL)
+ mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+
+ /* Initialize the default interrupts */
+ mtu3_intr_enable(mtu);
+ mtu->is_active = 1;
+
+ if (mtu->softconnect)
+ mtu3_dev_on_off(mtu, 1);
+}
+
+void mtu3_stop(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ mtu3_intr_disable(mtu);
+ mtu3_intr_status_clear(mtu);
+
+ if (mtu->softconnect)
+ mtu3_dev_on_off(mtu, 0);
+
+ mtu->is_active = 0;
+ mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* for non-ep0 */
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ int interval, int burst, int mult)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+ u32 csr0, csr1, csr2;
+ int fifo_sgsz, fifo_addr;
+ int num_pkts;
+
+ fifo_addr = ep_fifo_alloc(mep, mep->maxp);
+ if (fifo_addr < 0) {
+ dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp);
+ return -ENOMEM;
+ }
+ fifo_sgsz = ilog2(mep->fifo_seg_size);
+ dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz,
+ mep->fifo_seg_size, mep->fifo_size);
+
+ if (mep->is_in) {
+ csr0 = TX_TXMAXPKTSZ(mep->maxp);
+ csr0 |= TX_DMAREQEN;
+
+ num_pkts = (burst + 1) * (mult + 1) - 1;
+ csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot);
+ csr1 |= TX_MAX_PKT(num_pkts) | TX_MULT(mult);
+
+ csr2 = TX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= TX_FIFOSEGSIZE(fifo_sgsz);
+
+ switch (mep->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ csr1 |= TX_TYPE(TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ csr1 |= TX_TYPE(TYPE_ISO);
+ csr2 |= TX_BINTERVAL(interval);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ csr1 |= TX_TYPE(TYPE_INT);
+ csr2 |= TX_BINTERVAL(interval);
+ break;
+ }
+
+ /* Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum));
+
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0);
+ mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1);
+ mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2);
+
+ dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+ epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)),
+ mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)),
+ mtu3_readl(mbase, MU3D_EP_TXCR2(epnum)));
+ } else {
+ csr0 = RX_RXMAXPKTSZ(mep->maxp);
+ csr0 |= RX_DMAREQEN;
+
+ num_pkts = (burst + 1) * (mult + 1) - 1;
+ csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot);
+ csr1 |= RX_MAX_PKT(num_pkts) | RX_MULT(mult);
+
+ csr2 = RX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= RX_FIFOSEGSIZE(fifo_sgsz);
+
+ switch (mep->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ csr1 |= RX_TYPE(TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ csr1 |= RX_TYPE(TYPE_ISO);
+ csr2 |= RX_BINTERVAL(interval);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ csr1 |= RX_TYPE(TYPE_INT);
+ csr2 |= RX_BINTERVAL(interval);
+ break;
+ }
+
+ /*Enable QMU Done interrupt */
+ mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum));
+
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0);
+ mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1);
+ mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2);
+
+ dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+ epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)),
+ mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)),
+ mtu3_readl(mbase, MU3D_EP_RXCR2(epnum)));
+ }
+
+ dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2);
+ dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n",
+ __func__, mep->name, mep->fifo_addr, mep->fifo_size,
+ fifo_sgsz, mep->fifo_seg_size);
+
+ return 0;
+}
+
+/* for non-ep0 */
+void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+
+ if (mep->is_in) {
+ mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0);
+ mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum));
+ } else {
+ mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0);
+ mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0);
+ mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum));
+ }
+
+ ep_fifo_free(mep);
+
+ dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name);
+}
+
+/*
+ * Two scenarios:
+ * 1. when device IP supports SS, the fifo of EP0, TX EPs, RX EPs
+ * are separated;
+ * 2. when supports only HS, the fifo is shared for all EPs, and
+ * the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate
+ * the total fifo size of non-ep0, and ep0's is fixed to 64B,
+ * so the total fifo size is 64B + @EPNTXFFSZ;
+ * Due to the first 64B should be reserved for EP0, non-ep0's fifo
+ * starts from offset 64 and are divided into two equal parts for
+ * TX or RX EPs for simplification.
+ */
+static void get_ep_fifo_config(struct mtu3 *mtu)
+{
+ struct mtu3_fifo_info *tx_fifo;
+ struct mtu3_fifo_info *rx_fifo;
+ u32 fifosize;
+
+ if (mtu->is_u3_ip) {
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+ tx_fifo = &mtu->tx_fifo;
+ tx_fifo->base = 0;
+ tx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+ bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNRXFFSZ);
+ rx_fifo = &mtu->rx_fifo;
+ rx_fifo->base = 0;
+ rx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+ bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+ mtu->slot = MTU3_U3_IP_SLOT_DEFAULT;
+ } else {
+ fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+ tx_fifo = &mtu->tx_fifo;
+ tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE;
+ tx_fifo->limit = (fifosize / MTU3_EP_FIFO_UNIT) >> 1;
+ bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+ rx_fifo = &mtu->rx_fifo;
+ rx_fifo->base =
+ tx_fifo->base + tx_fifo->limit * MTU3_EP_FIFO_UNIT;
+ rx_fifo->limit = tx_fifo->limit;
+ bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+ mtu->slot = MTU3_U2_IP_SLOT_DEFAULT;
+ }
+
+ dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n",
+ __func__, tx_fifo->base, tx_fifo->limit,
+ rx_fifo->base, rx_fifo->limit);
+}
+
+void mtu3_ep0_setup(struct mtu3 *mtu)
+{
+ u32 maxpacket = mtu->g.ep0->maxpacket;
+ u32 csr;
+
+ dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket);
+
+ csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR);
+ csr &= ~EP0_MAXPKTSZ_MSK;
+ csr |= EP0_MAXPKTSZ(maxpacket);
+ csr &= EP0_W1C_BITS;
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+
+ /* Enable EP0 interrupt */
+ mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR);
+}
+
+static int mtu3_mem_alloc(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ struct mtu3_ep *ep_array;
+ int in_ep_num, out_ep_num;
+ u32 cap_epinfo;
+ int ret;
+ int i;
+
+ cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO);
+ in_ep_num = CAP_TX_EP_NUM(cap_epinfo);
+ out_ep_num = CAP_RX_EP_NUM(cap_epinfo);
+
+ dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n",
+ mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num,
+ mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num);
+
+ /* one for ep0, another is reserved */
+ mtu->num_eps = min(in_ep_num, out_ep_num) + 1;
+ ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL);
+ if (ep_array == NULL)
+ return -ENOMEM;
+
+ mtu->ep_array = ep_array;
+ mtu->in_eps = ep_array;
+ mtu->out_eps = &ep_array[mtu->num_eps];
+ /* ep0 uses in_eps[0], out_eps[0] is reserved */
+ mtu->ep0 = mtu->in_eps;
+ mtu->ep0->mtu = mtu;
+ mtu->ep0->epnum = 0;
+
+ for (i = 1; i < mtu->num_eps; i++) {
+ struct mtu3_ep *mep = mtu->in_eps + i;
+
+ mep->fifo = &mtu->tx_fifo;
+ mep = mtu->out_eps + i;
+ mep->fifo = &mtu->rx_fifo;
+ }
+
+ get_ep_fifo_config(mtu);
+
+ ret = mtu3_qmu_init(mtu);
+ if (ret)
+ kfree(mtu->ep_array);
+
+ return ret;
+}
+
+static void mtu3_mem_free(struct mtu3 *mtu)
+{
+ mtu3_qmu_exit(mtu);
+ kfree(mtu->ep_array);
+}
+
+static void mtu3_set_speed(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+
+ if (!mtu->is_u3_ip && (mtu->max_speed > USB_SPEED_HIGH))
+ mtu->max_speed = USB_SPEED_HIGH;
+
+ if (mtu->max_speed == USB_SPEED_FULL) {
+ /* disable U3 SS function */
+ mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+ /* disable HS function */
+ mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ } else if (mtu->max_speed == USB_SPEED_HIGH) {
+ mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+ /* HS/FS detected by HW */
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ }
+
+ dev_info(mtu->dev, "max_speed: %s\n",
+ usb_speed_string(mtu->max_speed));
+}
+
+static void mtu3_regs_init(struct mtu3 *mtu)
+{
+
+ void __iomem *mbase = mtu->mac_base;
+
+ /* be sure interrupts are disabled before registration of ISR */
+ mtu3_intr_disable(mtu);
+ mtu3_intr_status_clear(mtu);
+
+ if (mtu->is_u3_ip) {
+ /* disable LGO_U1/U2 by default */
+ mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
+ SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
+ SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
+ /* device responses to u3_exit from host automatically */
+ mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
+ /* automatically build U2 link when U3 detect fail */
+ mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH);
+ }
+
+ mtu3_set_speed(mtu);
+
+ /* delay about 0.1us from detecting reset to send chirp-K */
+ mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK);
+ /* U2/U3 detected by HW */
+ mtu3_writel(mbase, U3D_DEVICE_CONF, 0);
+ /* enable QMU 16B checksum */
+ mtu3_setbits(mbase, U3D_QCR0, QMU_CS16B_EN);
+ /* vbus detected by HW */
+ mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON);
+}
+
+static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ enum usb_device_speed udev_speed;
+ u32 maxpkt = 64;
+ u32 link;
+ u32 speed;
+
+ link = mtu3_readl(mbase, U3D_DEV_LINK_INTR);
+ link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */
+ dev_dbg(mtu->dev, "=== LINK[%x] ===\n", link);
+
+ if (!(link & SSUSB_DEV_SPEED_CHG_INTR))
+ return IRQ_NONE;
+
+ speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF));
+
+ switch (speed) {
+ case MTU3_SPEED_FULL:
+ udev_speed = USB_SPEED_FULL;
+ /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+ mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+ | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+ LPM_BESL_STALL | LPM_BESLD_STALL);
+ break;
+ case MTU3_SPEED_HIGH:
+ udev_speed = USB_SPEED_HIGH;
+ /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+ mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+ | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+ LPM_BESL_STALL | LPM_BESLD_STALL);
+ break;
+ case MTU3_SPEED_SUPER:
+ udev_speed = USB_SPEED_SUPER;
+ maxpkt = 512;
+ break;
+ default:
+ udev_speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+ dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed));
+
+ mtu->g.speed = udev_speed;
+ mtu->g.ep0->maxpacket = maxpkt;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+ if (udev_speed == USB_SPEED_UNKNOWN)
+ mtu3_gadget_disconnect(mtu);
+ else
+ mtu3_ep0_setup(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 ltssm;
+
+ ltssm = mtu3_readl(mbase, U3D_LTSSM_INTR);
+ ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */
+ dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm);
+
+ if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
+ mtu3_gadget_reset(mtu);
+
+ if (ltssm & VBUS_FALL_INTR)
+ mtu3_ss_func_set(mtu, false);
+
+ if (ltssm & VBUS_RISE_INTR)
+ mtu3_ss_func_set(mtu, true);
+
+ if (ltssm & EXIT_U3_INTR)
+ mtu3_gadget_resume(mtu);
+
+ if (ltssm & ENTER_U3_INTR)
+ mtu3_gadget_suspend(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 u2comm;
+
+ u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR);
+ u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE);
+ mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */
+ dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm);
+
+ if (u2comm & SUSPEND_INTR)
+ mtu3_gadget_suspend(mtu);
+
+ if (u2comm & RESUME_INTR)
+ mtu3_gadget_resume(mtu);
+
+ if (u2comm & RESET_INTR)
+ mtu3_gadget_reset(mtu);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_irq(int irq, void *data)
+{
+ struct mtu3 *mtu = (struct mtu3 *)data;
+ unsigned long flags;
+ u32 level1;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ /* U3D_LV1ISR is RU */
+ level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR);
+ level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER);
+
+ if (level1 & EP_CTRL_INTR)
+ mtu3_link_isr(mtu);
+
+ if (level1 & MAC2_INTR)
+ mtu3_u2_common_isr(mtu);
+
+ if (level1 & MAC3_INTR)
+ mtu3_u3_ltssm_isr(mtu);
+
+ if (level1 & BMU_INTR)
+ mtu3_ep0_isr(mtu);
+
+ if (level1 & QMU_INTR)
+ mtu3_qmu_isr(mtu);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int mtu3_hw_init(struct mtu3 *mtu)
+{
+ u32 cap_dev;
+ int ret;
+
+ mtu->hw_version = mtu3_readl(mtu->ippc_base, U3D_SSUSB_HW_ID);
+
+ cap_dev = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
+ mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(cap_dev);
+
+ dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
+ mtu->is_u3_ip ? "U3" : "U2");
+
+ mtu3_device_reset(mtu);
+
+ ret = mtu3_device_enable(mtu);
+ if (ret) {
+ dev_err(mtu->dev, "device enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = mtu3_mem_alloc(mtu);
+ if (ret)
+ return -ENOMEM;
+
+ mtu3_regs_init(mtu);
+
+ return 0;
+}
+
+static void mtu3_hw_exit(struct mtu3 *mtu)
+{
+ mtu3_device_disable(mtu);
+ mtu3_mem_free(mtu);
+}
+
+/*-------------------------------------------------------------------------*/
+
+int ssusb_gadget_init(struct ssusb_mtk *ssusb)
+{
+ struct device *dev = ssusb->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtu3 *mtu = NULL;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ mtu = devm_kzalloc(dev, sizeof(struct mtu3), GFP_KERNEL);
+ if (mtu == NULL)
+ return -ENOMEM;
+
+ mtu->irq = platform_get_irq(pdev, 0);
+ if (mtu->irq <= 0) {
+ dev_err(dev, "fail to get irq number\n");
+ return -ENODEV;
+ }
+ dev_info(dev, "irq %d\n", mtu->irq);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
+ mtu->mac_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtu->mac_base)) {
+ dev_err(dev, "error mapping memory for dev mac\n");
+ return PTR_ERR(mtu->mac_base);
+ }
+
+ spin_lock_init(&mtu->lock);
+ mtu->dev = dev;
+ mtu->ippc_base = ssusb->ippc_base;
+ ssusb->mac_base = mtu->mac_base;
+ ssusb->u3d = mtu;
+ mtu->ssusb = ssusb;
+ mtu->max_speed = usb_get_maximum_speed(dev);
+
+ /* check the max_speed parameter */
+ switch (mtu->max_speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(dev, "invalid max_speed: %s\n",
+ usb_speed_string(mtu->max_speed));
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default as SS */
+ mtu->max_speed = USB_SPEED_SUPER;
+ break;
+ }
+
+ dev_dbg(dev, "mac_base=0x%p, ippc_base=0x%p\n",
+ mtu->mac_base, mtu->ippc_base);
+
+ ret = mtu3_hw_init(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 hw init failed:%d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu);
+ if (ret) {
+ dev_err(dev, "request irq %d failed!\n", mtu->irq);
+ goto irq_err;
+ }
+
+ device_init_wakeup(dev, true);
+
+ ret = mtu3_gadget_setup(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 gadget init failed:%d\n", ret);
+ goto gadget_err;
+ }
+
+ /* init as host mode, power down device IP for power saving */
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_stop(mtu);
+
+ dev_dbg(dev, " %s() done...\n", __func__);
+
+ return 0;
+
+gadget_err:
+ device_init_wakeup(dev, false);
+
+irq_err:
+ mtu3_hw_exit(mtu);
+ ssusb->u3d = NULL;
+ dev_err(dev, " %s() fail...\n", __func__);
+
+ return ret;
+}
+
+void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
+{
+ struct mtu3 *mtu = ssusb->u3d;
+
+ mtu3_gadget_cleanup(mtu);
+ device_init_wakeup(ssusb->dev, false);
+ mtu3_hw_exit(mtu);
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
new file mode 100644
index 000000000000..1a8987e7c5b0
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -0,0 +1,379 @@
+/*
+ * mtu3_dr.c - dual role switch and host glue layer
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+#define USB2_PORT 2
+#define USB3_PORT 3
+
+enum mtu3_vbus_id_state {
+ MTU3_ID_FLOAT = 1,
+ MTU3_ID_GROUND,
+ MTU3_VBUS_OFF,
+ MTU3_VBUS_VALID,
+};
+
+static void toggle_opstate(struct ssusb_mtk *ssusb)
+{
+ if (!ssusb->otg_switch.is_u3_drd) {
+ mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
+ mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
+ }
+}
+
+/* only port0 supports dual-role mode */
+static int ssusb_port0_switch(struct ssusb_mtk *ssusb,
+ int version, bool tohost)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ u32 value;
+
+ dev_dbg(ssusb->dev, "%s (switch u%d port0 to %s)\n", __func__,
+ version, tohost ? "host" : "device");
+
+ if (version == USB2_PORT) {
+ /* 1. power off and disable u2 port0 */
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+ value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+
+ /* 2. power on, enable u2 port0 and select its mode */
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+ value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
+ value = tohost ? (value | SSUSB_U2_PORT_HOST_SEL) :
+ (value & (~SSUSB_U2_PORT_HOST_SEL));
+ mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+ } else {
+ /* 1. power off and disable u3 port0 */
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+ value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+
+ /* 2. power on, enable u3 port0 and select its mode */
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+ value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
+ value = tohost ? (value | SSUSB_U3_PORT_HOST_SEL) :
+ (value & (~SSUSB_U3_PORT_HOST_SEL));
+ mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+ }
+
+ return 0;
+}
+
+static void switch_port_to_host(struct ssusb_mtk *ssusb)
+{
+ u32 check_clk = 0;
+
+ dev_dbg(ssusb->dev, "%s\n", __func__);
+
+ ssusb_port0_switch(ssusb, USB2_PORT, true);
+
+ if (ssusb->otg_switch.is_u3_drd) {
+ ssusb_port0_switch(ssusb, USB3_PORT, true);
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ }
+
+ ssusb_check_clocks(ssusb, check_clk);
+
+ /* after all clocks are stable */
+ toggle_opstate(ssusb);
+}
+
+static void switch_port_to_device(struct ssusb_mtk *ssusb)
+{
+ u32 check_clk = 0;
+
+ dev_dbg(ssusb->dev, "%s\n", __func__);
+
+ ssusb_port0_switch(ssusb, USB2_PORT, false);
+
+ if (ssusb->otg_switch.is_u3_drd) {
+ ssusb_port0_switch(ssusb, USB3_PORT, false);
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+ }
+
+ ssusb_check_clocks(ssusb, check_clk);
+}
+
+int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct regulator *vbus = otg_sx->vbus;
+ int ret;
+
+ /* vbus is optional */
+ if (!vbus)
+ return 0;
+
+ dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off");
+
+ if (is_on) {
+ ret = regulator_enable(vbus);
+ if (ret) {
+ dev_err(ssusb->dev, "vbus regulator enable failed\n");
+ return ret;
+ }
+ } else {
+ regulator_disable(vbus);
+ }
+
+ return 0;
+}
+
+/*
+ * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND
+ * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID
+ */
+static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx,
+ enum mtu3_vbus_id_state status)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct mtu3 *mtu = ssusb->u3d;
+
+ dev_dbg(ssusb->dev, "mailbox state(%d)\n", status);
+
+ switch (status) {
+ case MTU3_ID_GROUND:
+ switch_port_to_host(ssusb);
+ ssusb_set_vbus(otg_sx, 1);
+ ssusb->is_host = true;
+ break;
+ case MTU3_ID_FLOAT:
+ ssusb->is_host = false;
+ ssusb_set_vbus(otg_sx, 0);
+ switch_port_to_device(ssusb);
+ break;
+ case MTU3_VBUS_OFF:
+ mtu3_stop(mtu);
+ pm_relax(ssusb->dev);
+ break;
+ case MTU3_VBUS_VALID:
+ /* avoid suspend when works as device */
+ pm_stay_awake(ssusb->dev);
+ mtu3_start(mtu);
+ break;
+ default:
+ dev_err(ssusb->dev, "invalid state\n");
+ }
+}
+
+static int ssusb_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(nb, struct otg_switch_mtk, id_nb);
+
+ if (event)
+ ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+ else
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+
+ return NOTIFY_DONE;
+}
+
+static int ssusb_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct otg_switch_mtk *otg_sx =
+ container_of(nb, struct otg_switch_mtk, vbus_nb);
+
+ if (event)
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ else
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+
+ return NOTIFY_DONE;
+}
+
+static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
+{
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct extcon_dev *edev = otg_sx->edev;
+ int ret;
+
+ /* extcon is optional */
+ if (!edev)
+ return 0;
+
+ otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB,
+ &otg_sx->vbus_nb);
+ if (ret < 0)
+ dev_err(ssusb->dev, "failed to register notifier for USB\n");
+
+ otg_sx->id_nb.notifier_call = ssusb_id_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+ &otg_sx->id_nb);
+ if (ret < 0)
+ dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n");
+
+ dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
+ extcon_get_cable_state_(edev, EXTCON_USB),
+ extcon_get_cable_state_(edev, EXTCON_USB_HOST));
+
+ /* default as host, switch to device mode if needed */
+ if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == false)
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ if (extcon_get_cable_state_(edev, EXTCON_USB) == true)
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+
+ return 0;
+}
+
+static void extcon_register_dwork(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct otg_switch_mtk *otg_sx =
+ container_of(dwork, struct otg_switch_mtk, extcon_reg_dwork);
+
+ ssusb_extcon_register(otg_sx);
+}
+
+/*
+ * We provide an interface via debugfs to switch between host and device modes
+ * depending on user input.
+ * This is useful in special cases, such as uses TYPE-A receptacle but also
+ * wants to support dual-role mode.
+ * It generates cable state changes by pulling up/down IDPIN and
+ * notifies driver to switch mode by "extcon-usb-gpio".
+ * NOTE: when use MICRO receptacle, should not enable this interface.
+ */
+static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ if (to_host)
+ pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_ground);
+ else
+ pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_float);
+}
+
+
+static int ssusb_mode_show(struct seq_file *sf, void *unused)
+{
+ struct ssusb_mtk *ssusb = sf->private;
+
+ seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n",
+ ssusb->is_host ? "host" : "device",
+ ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto");
+
+ return 0;
+}
+
+static int ssusb_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssusb_mode_show, inode->i_private);
+}
+
+static ssize_t ssusb_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *sf = file->private_data;
+ struct ssusb_mtk *ssusb = sf->private;
+ char buf[16];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
+ ssusb_mode_manual_switch(ssusb, 1);
+ } else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
+ ssusb_mode_manual_switch(ssusb, 0);
+ } else {
+ dev_err(ssusb->dev, "wrong or duplicated setting\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations ssusb_mode_fops = {
+ .open = ssusb_mode_open,
+ .write = ssusb_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void ssusb_debugfs_init(struct ssusb_mtk *ssusb)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
+ if (IS_ERR_OR_NULL(root)) {
+ if (!root)
+ dev_err(ssusb->dev, "create debugfs root failed\n");
+ return;
+ }
+ ssusb->dbgfs_root = root;
+
+ file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
+ ssusb, &ssusb_mode_fops);
+ if (!file)
+ dev_dbg(ssusb->dev, "create debugfs mode failed\n");
+}
+
+static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb)
+{
+ debugfs_remove_recursive(ssusb->dbgfs_root);
+}
+
+int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork, extcon_register_dwork);
+
+ if (otg_sx->manual_drd_enabled)
+ ssusb_debugfs_init(ssusb);
+
+ /* It is enough to delay 1s for waiting for host initialization */
+ schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ);
+
+ return 0;
+}
+
+void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ cancel_delayed_work(&otg_sx->extcon_reg_dwork);
+
+ if (otg_sx->edev) {
+ extcon_unregister_notifier(otg_sx->edev,
+ EXTCON_USB, &otg_sx->vbus_nb);
+ extcon_unregister_notifier(otg_sx->edev,
+ EXTCON_USB_HOST, &otg_sx->id_nb);
+ }
+
+ if (otg_sx->manual_drd_enabled)
+ ssusb_debugfs_exit(ssusb);
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
new file mode 100644
index 000000000000..9b228b5811b0
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -0,0 +1,108 @@
+/*
+ * mtu3_dr.h - dual role switch and host glue layer header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MTU3_DR_H_
+#define _MTU3_DR_H_
+
+#if IS_ENABLED(CONFIG_USB_MTU3_HOST) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+
+int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn);
+void ssusb_host_exit(struct ssusb_mtk *ssusb);
+int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
+ struct device_node *dn);
+int ssusb_host_enable(struct ssusb_mtk *ssusb);
+int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend);
+int ssusb_wakeup_enable(struct ssusb_mtk *ssusb);
+void ssusb_wakeup_disable(struct ssusb_mtk *ssusb);
+
+#else
+
+static inline int ssusb_host_init(struct ssusb_mtk *ssusb,
+
+ struct device_node *parent_dn)
+{
+ return 0;
+}
+
+static inline void ssusb_host_exit(struct ssusb_mtk *ssusb)
+{}
+
+static inline int ssusb_wakeup_of_property_parse(
+ struct ssusb_mtk *ssusb, struct device_node *dn)
+{
+ return 0;
+}
+
+static inline int ssusb_host_enable(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
+{
+ return 0;
+}
+
+static inline int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
+{}
+
+#endif
+
+
+#if IS_ENABLED(CONFIG_USB_MTU3_GADGET) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+int ssusb_gadget_init(struct ssusb_mtk *ssusb);
+void ssusb_gadget_exit(struct ssusb_mtk *ssusb);
+#else
+static inline int ssusb_gadget_init(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
+{}
+#endif
+
+
+#if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
+void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
+int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
+
+#else
+
+static inline int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
+{
+ return 0;
+}
+
+static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
+{}
+
+static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _MTU3_DR_H_ */
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
new file mode 100644
index 000000000000..9dd2441b4fa1
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -0,0 +1,730 @@
+/*
+ * mtu3_gadget.c - MediaTek usb3 DRD peripheral support
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mtu3.h"
+
+void mtu3_req_complete(struct mtu3_ep *mep,
+ struct usb_request *req, int status)
+__releases(mep->mtu->lock)
+__acquires(mep->mtu->lock)
+{
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ int busy = mep->busy;
+
+ mreq = to_mtu3_request(req);
+ list_del(&mreq->list);
+ if (mreq->request.status == -EINPROGRESS)
+ mreq->request.status = status;
+
+ mtu = mreq->mtu;
+ mep->busy = 1;
+ spin_unlock(&mtu->lock);
+
+ /* ep0 makes use of PIO, needn't unmap it */
+ if (mep->epnum)
+ usb_gadget_unmap_request(&mtu->g, req, mep->is_in);
+
+ dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", mep->name,
+ req, req->status, mreq->request.actual, mreq->request.length);
+
+ usb_gadget_giveback_request(&mep->ep, &mreq->request);
+
+ spin_lock(&mtu->lock);
+ mep->busy = busy;
+}
+
+static void nuke(struct mtu3_ep *mep, const int status)
+{
+ struct mtu3_request *mreq = NULL;
+
+ mep->busy = 1;
+ if (list_empty(&mep->req_list))
+ return;
+
+ dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status);
+
+ /* exclude EP0 */
+ if (mep->epnum)
+ mtu3_qmu_flush(mep);
+
+ while (!list_empty(&mep->req_list)) {
+ mreq = list_first_entry(&mep->req_list,
+ struct mtu3_request, list);
+ mtu3_req_complete(mep, &mreq->request, status);
+ }
+}
+
+static int mtu3_ep_enable(struct mtu3_ep *mep)
+{
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ struct mtu3 *mtu = mep->mtu;
+ u32 interval = 0;
+ u32 mult = 0;
+ u32 burst = 0;
+ int max_packet;
+ int ret;
+
+ desc = mep->desc;
+ comp_desc = mep->comp_desc;
+ mep->type = usb_endpoint_type(desc);
+ max_packet = usb_endpoint_maxp(desc);
+ mep->maxp = max_packet & GENMASK(10, 0);
+
+ switch (mtu->g.speed) {
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc)) {
+ interval = desc->bInterval;
+ interval = clamp_val(interval, 1, 16) - 1;
+ if (usb_endpoint_xfer_isoc(desc) && comp_desc)
+ mult = comp_desc->bmAttributes;
+ }
+ if (comp_desc)
+ burst = comp_desc->bMaxBurst;
+
+ break;
+ case USB_SPEED_HIGH:
+ if (usb_endpoint_xfer_isoc(desc) ||
+ usb_endpoint_xfer_int(desc)) {
+ interval = desc->bInterval;
+ interval = clamp_val(interval, 1, 16) - 1;
+ burst = (max_packet & GENMASK(12, 11)) >> 11;
+ }
+ break;
+ default:
+ break; /*others are ignored */
+ }
+
+ dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n",
+ __func__, mep->maxp, interval, burst, mult);
+
+ mep->ep.maxpacket = mep->maxp;
+ mep->ep.desc = desc;
+ mep->ep.comp_desc = comp_desc;
+
+ /* slot mainly affects bulk/isoc transfer, so ignore int */
+ mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot;
+
+ ret = mtu3_config_ep(mtu, mep, interval, burst, mult);
+ if (ret < 0)
+ return ret;
+
+ ret = mtu3_gpd_ring_alloc(mep);
+ if (ret < 0) {
+ mtu3_deconfig_ep(mtu, mep);
+ return ret;
+ }
+
+ mtu3_qmu_start(mep);
+
+ return 0;
+}
+
+static int mtu3_ep_disable(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+
+ mtu3_qmu_stop(mep);
+
+ /* abort all pending requests */
+ nuke(mep, -ESHUTDOWN);
+ mtu3_deconfig_ep(mtu, mep);
+ mtu3_gpd_ring_free(mep);
+
+ mep->desc = NULL;
+ mep->ep.desc = NULL;
+ mep->comp_desc = NULL;
+ mep->type = 0;
+ mep->flags = 0;
+
+ return 0;
+}
+
+static int mtu3_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct mtu3_ep *mep;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("%s invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ pr_debug("%s missing wMaxPacketSize\n", __func__);
+ return -EINVAL;
+ }
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+
+ /* check ep number and direction against endpoint */
+ if (usb_endpoint_num(desc) != mep->epnum)
+ return -EINVAL;
+
+ if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name);
+
+ if (mep->flags & MTU3_EP_ENABLED) {
+ dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n",
+ mep->name);
+ return 0;
+ }
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ mep->desc = desc;
+ mep->comp_desc = ep->comp_desc;
+
+ ret = mtu3_ep_enable(mep);
+ if (ret)
+ goto error;
+
+ mep->busy = 0;
+ mep->wedged = 0;
+ mep->flags |= MTU3_EP_ENABLED;
+ mtu->active_ep++;
+
+error:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep);
+
+ return ret;
+}
+
+static int mtu3_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3 *mtu = mep->mtu;
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name);
+
+ if (!(mep->flags & MTU3_EP_ENABLED)) {
+ dev_warn(mtu->dev, "%s is already disabled\n", mep->name);
+ return 0;
+ }
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ mtu3_ep_disable(mep);
+ mep->flags &= ~MTU3_EP_ENABLED;
+ mtu->active_ep--;
+ spin_unlock_irqrestore(&(mtu->lock), flags);
+
+ dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n",
+ __func__, mtu->active_ep, mtu->is_active);
+
+ return 0;
+}
+
+struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3_request *mreq;
+
+ mreq = kzalloc(sizeof(*mreq), gfp_flags);
+ if (!mreq)
+ return NULL;
+
+ mreq->request.dma = DMA_ADDR_INVALID;
+ mreq->epnum = mep->epnum;
+ mreq->mep = mep;
+
+ return &mreq->request;
+}
+
+void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(to_mtu3_request(req));
+}
+
+static int mtu3_gadget_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ struct mtu3_ep *mep;
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep || !req)
+ return -EINVAL;
+
+ if (!req->buf)
+ return -ENODATA;
+
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+ mreq = to_mtu3_request(req);
+ mreq->mtu = mtu;
+
+ if (mreq->mep != mep)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n",
+ __func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name,
+ mreq, ep->maxpacket, mreq->request.length);
+
+ if (req->length > GPD_BUF_SIZE) {
+ dev_warn(mtu->dev,
+ "req length > supported MAX:%d requested:%d\n",
+ GPD_BUF_SIZE, req->length);
+ return -EOPNOTSUPP;
+ }
+
+ /* don't queue if the ep is down */
+ if (!mep->desc) {
+ dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n",
+ req, ep->name);
+ return -ESHUTDOWN;
+ }
+
+ mreq->request.actual = 0;
+ mreq->request.status = -EINPROGRESS;
+
+ ret = usb_gadget_map_request(&mtu->g, req, mep->is_in);
+ if (ret) {
+ dev_err(mtu->dev, "dma mapping failed\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ if (mtu3_prepare_transfer(mep)) {
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ list_add_tail(&mreq->list, &mep->req_list);
+ mtu3_insert_gpd(mep, mreq);
+ mtu3_qmu_resume(mep);
+
+error:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return ret;
+}
+
+static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3_request *mreq = to_mtu3_request(req);
+ struct mtu3_request *r;
+ unsigned long flags;
+ int ret = 0;
+ struct mtu3 *mtu = mep->mtu;
+
+ if (!ep || !req || mreq->mep != mep)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ list_for_each_entry(r, &mep->req_list, list) {
+ if (r == mreq)
+ break;
+ }
+ if (r != mreq) {
+ dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */
+ mtu3_req_complete(mep, req, -ECONNRESET);
+ mtu3_qmu_start(mep);
+
+done:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return ret;
+}
+
+/*
+ * Set or clear the halt bit of an EP.
+ * A halted EP won't TX/RX any data but will queue requests.
+ */
+static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+ struct mtu3 *mtu = mep->mtu;
+ struct mtu3_request *mreq;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep)
+ return -EINVAL;
+
+ dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ if (mep->type == USB_ENDPOINT_XFER_ISOC) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mreq = next_request(mep);
+ if (value) {
+ /*
+ * If there is not request for TX-EP, QMU will not transfer
+ * data to TX-FIFO, so no need check whether TX-FIFO
+ * holds bytes or not here
+ */
+ if (mreq) {
+ dev_dbg(mtu->dev, "req in progress, cannot halt %s\n",
+ ep->name);
+ ret = -EAGAIN;
+ goto done;
+ }
+ } else {
+ mep->wedged = 0;
+ }
+
+ dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear");
+
+ mtu3_ep_stall_set(mep, value);
+
+done:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return ret;
+}
+
+/* Sets the halt feature with the clear requests ignored */
+static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct mtu3_ep *mep = to_mtu3_ep(ep);
+
+ if (!ep)
+ return -EINVAL;
+
+ mep->wedged = 1;
+
+ return usb_ep_set_halt(ep);
+}
+
+static const struct usb_ep_ops mtu3_ep_ops = {
+ .enable = mtu3_gadget_ep_enable,
+ .disable = mtu3_gadget_ep_disable,
+ .alloc_request = mtu3_alloc_request,
+ .free_request = mtu3_free_request,
+ .queue = mtu3_gadget_queue,
+ .dequeue = mtu3_gadget_dequeue,
+ .set_halt = mtu3_gadget_ep_set_halt,
+ .set_wedge = mtu3_gadget_ep_set_wedge,
+};
+
+static int mtu3_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+
+ return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM);
+}
+
+static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ /* remote wakeup feature is not enabled by host */
+ if (!mtu->may_wakeup)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ if (mtu->g.speed == USB_SPEED_SUPER) {
+ mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
+ } else {
+ mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ usleep_range(10000, 11000);
+ spin_lock_irqsave(&mtu->lock, flags);
+ mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
+ }
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ return 0;
+}
+
+static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+
+ mtu->is_self_powered = !!is_selfpowered;
+ return 0;
+}
+
+static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__,
+ is_on ? "on" : "off", mtu->is_active ? "" : "in");
+
+ /* we'd rather not pullup unless the device is active. */
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ is_on = !!is_on;
+ if (!mtu->is_active) {
+ /* save it for mtu3_start() to process the request */
+ mtu->softconnect = is_on;
+ } else if (is_on != mtu->softconnect) {
+ mtu->softconnect = is_on;
+ mtu3_dev_on_off(mtu, is_on);
+ }
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static int mtu3_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(gadget);
+ unsigned long flags;
+
+ if (mtu->gadget_driver) {
+ dev_err(mtu->dev, "%s is already bound to %s\n",
+ mtu->g.name, mtu->gadget_driver->driver.name);
+ return -EBUSY;
+ }
+
+ dev_dbg(mtu->dev, "bind driver %s\n", driver->function);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ mtu->softconnect = 0;
+ mtu->gadget_driver = driver;
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+ mtu3_start(mtu);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static void stop_activity(struct mtu3 *mtu)
+{
+ struct usb_gadget_driver *driver = mtu->gadget_driver;
+ int i;
+
+ /* don't disconnect if it's not connected */
+ if (mtu->g.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ else
+ mtu->g.speed = USB_SPEED_UNKNOWN;
+
+ /* deactivate the hardware */
+ if (mtu->softconnect) {
+ mtu->softconnect = 0;
+ mtu3_dev_on_off(mtu, 0);
+ }
+
+ /*
+ * killing any outstanding requests will quiesce the driver;
+ * then report disconnect
+ */
+ nuke(mtu->ep0, -ESHUTDOWN);
+ for (i = 1; i < mtu->num_eps; i++) {
+ nuke(mtu->in_eps + i, -ESHUTDOWN);
+ nuke(mtu->out_eps + i, -ESHUTDOWN);
+ }
+
+ if (driver) {
+ spin_unlock(&mtu->lock);
+ driver->disconnect(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+}
+
+static int mtu3_gadget_stop(struct usb_gadget *g)
+{
+ struct mtu3 *mtu = gadget_to_mtu3(g);
+ unsigned long flags;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ stop_activity(mtu);
+ mtu->gadget_driver = NULL;
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+ mtu3_stop(mtu);
+
+ spin_unlock_irqrestore(&mtu->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops mtu3_gadget_ops = {
+ .get_frame = mtu3_gadget_get_frame,
+ .wakeup = mtu3_gadget_wakeup,
+ .set_selfpowered = mtu3_gadget_set_self_powered,
+ .pullup = mtu3_gadget_pullup,
+ .udc_start = mtu3_gadget_start,
+ .udc_stop = mtu3_gadget_stop,
+};
+
+static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+ u32 epnum, u32 is_in)
+{
+ mep->epnum = epnum;
+ mep->mtu = mtu;
+ mep->is_in = is_in;
+
+ INIT_LIST_HEAD(&mep->req_list);
+
+ sprintf(mep->name, "ep%d%s", epnum,
+ !epnum ? "" : (is_in ? "in" : "out"));
+
+ mep->ep.name = mep->name;
+ INIT_LIST_HEAD(&mep->ep.ep_list);
+
+ /* initialize maxpacket as SS */
+ if (!epnum) {
+ usb_ep_set_maxpacket_limit(&mep->ep, 512);
+ mep->ep.caps.type_control = true;
+ mep->ep.ops = &mtu3_ep0_ops;
+ mtu->g.ep0 = &mep->ep;
+ } else {
+ usb_ep_set_maxpacket_limit(&mep->ep, 1024);
+ mep->ep.caps.type_iso = true;
+ mep->ep.caps.type_bulk = true;
+ mep->ep.caps.type_int = true;
+ mep->ep.ops = &mtu3_ep_ops;
+ list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list);
+ }
+
+ dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name,
+ mep->ep.maxpacket);
+
+ if (!epnum) {
+ mep->ep.caps.dir_in = true;
+ mep->ep.caps.dir_out = true;
+ } else if (is_in) {
+ mep->ep.caps.dir_in = true;
+ } else {
+ mep->ep.caps.dir_out = true;
+ }
+}
+
+static void mtu3_gadget_init_eps(struct mtu3 *mtu)
+{
+ u8 epnum;
+
+ /* initialize endpoint list just once */
+ INIT_LIST_HEAD(&(mtu->g.ep_list));
+
+ dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n",
+ __func__, mtu->num_eps);
+
+ init_hw_ep(mtu, mtu->ep0, 0, 0);
+ for (epnum = 1; epnum < mtu->num_eps; epnum++) {
+ init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1);
+ init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0);
+ }
+}
+
+int mtu3_gadget_setup(struct mtu3 *mtu)
+{
+ int ret;
+
+ mtu->g.ops = &mtu3_gadget_ops;
+ mtu->g.max_speed = mtu->max_speed;
+ mtu->g.speed = USB_SPEED_UNKNOWN;
+ mtu->g.sg_supported = 0;
+ mtu->g.name = MTU3_DRIVER_NAME;
+ mtu->is_active = 0;
+
+ mtu3_gadget_init_eps(mtu);
+
+ ret = usb_add_gadget_udc(mtu->dev, &mtu->g);
+ if (ret) {
+ dev_err(mtu->dev, "failed to register udc\n");
+ return ret;
+ }
+
+ usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
+
+ return 0;
+}
+
+void mtu3_gadget_cleanup(struct mtu3 *mtu)
+{
+ usb_del_gadget_udc(&mtu->g);
+}
+
+void mtu3_gadget_resume(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget RESUME\n");
+ if (mtu->gadget_driver && mtu->gadget_driver->resume) {
+ spin_unlock(&mtu->lock);
+ mtu->gadget_driver->resume(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+}
+
+/* called when SOF packets stop for 3+ msec or enters U3 */
+void mtu3_gadget_suspend(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget SUSPEND\n");
+ if (mtu->gadget_driver && mtu->gadget_driver->suspend) {
+ spin_unlock(&mtu->lock);
+ mtu->gadget_driver->suspend(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void mtu3_gadget_disconnect(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget DISCONNECT\n");
+ if (mtu->gadget_driver && mtu->gadget_driver->disconnect) {
+ spin_unlock(&mtu->lock);
+ mtu->gadget_driver->disconnect(&mtu->g);
+ spin_lock(&mtu->lock);
+ }
+
+ usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
+}
+
+void mtu3_gadget_reset(struct mtu3 *mtu)
+{
+ dev_dbg(mtu->dev, "gadget RESET\n");
+
+ /* report disconnect, if we didn't flush EP state */
+ if (mtu->g.speed != USB_SPEED_UNKNOWN)
+ mtu3_gadget_disconnect(mtu);
+
+ mtu->address = 0;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ mtu->may_wakeup = 0;
+}
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
new file mode 100644
index 000000000000..2d7427b48775
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -0,0 +1,881 @@
+/*
+ * mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng.Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mtu3.h"
+
+/* ep0 is always mtu3->in_eps[0] */
+#define next_ep0_request(mtu) next_request((mtu)->ep0)
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 mtu3_test_packet[53] = {
+ /* implicit SYNC then DATA0 to start */
+
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e,
+ /* implicit CRC16 then EOP to end */
+};
+
+static char *decode_ep0_state(struct mtu3 *mtu)
+{
+ switch (mtu->ep0_state) {
+ case MU3D_EP0_STATE_SETUP:
+ return "SETUP";
+ case MU3D_EP0_STATE_TX:
+ return "IN";
+ case MU3D_EP0_STATE_RX:
+ return "OUT";
+ case MU3D_EP0_STATE_TX_END:
+ return "TX-END";
+ case MU3D_EP0_STATE_STALL:
+ return "STALL";
+ default:
+ return "??";
+ }
+}
+
+static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req)
+{
+ mtu3_req_complete(mtu->ep0, req, 0);
+}
+
+static int
+forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
+__releases(mtu->lock)
+__acquires(mtu->lock)
+{
+ int ret;
+
+ if (!mtu->gadget_driver)
+ return -EOPNOTSUPP;
+
+ spin_unlock(&mtu->lock);
+ ret = mtu->gadget_driver->setup(&mtu->g, setup);
+ spin_lock(&mtu->lock);
+
+ dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len)
+{
+ void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
+ u16 index = 0;
+
+ dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n",
+ __func__, mep->epnum, len, src);
+
+ if (len >= 4) {
+ iowrite32_rep(fifo, src, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x02) {
+ writew(*(u16 *)&src[index], fifo);
+ index += 2;
+ }
+ if (len & 0x01)
+ writeb(src[index], fifo);
+}
+
+static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len)
+{
+ void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
+ u32 value;
+ u16 index = 0;
+
+ dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n",
+ __func__, mep->epnum, len, dst);
+
+ if (len >= 4) {
+ ioread32_rep(fifo, dst, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x3) {
+ value = readl(fifo);
+ memcpy(&dst[index], &value, len & 0x3);
+ }
+
+}
+
+static void ep0_load_test_packet(struct mtu3 *mtu)
+{
+ /*
+ * because the length of test packet is less than max packet of HS ep0,
+ * write it into fifo directly.
+ */
+ ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet));
+}
+
+/*
+ * A. send STALL for setup transfer without data stage:
+ * set SENDSTALL and SETUPPKTRDY at the same time;
+ * B. send STALL for other cases:
+ * set SENDSTALL only.
+ */
+static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
+{
+ struct mtu3 *mtu = mep0->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ u32 csr;
+
+ /* EP0_SENTSTALL is W1C */
+ csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ if (set)
+ csr |= EP0_SENDSTALL | pktrdy;
+ else
+ csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL;
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+ dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n",
+ set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
+}
+
+static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
+
+static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
+{}
+
+static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ struct usb_set_sel_req sel;
+
+ memcpy(&sel, req->buf, sizeof(sel));
+
+ mreq = to_mtu3_request(req);
+ mtu = mreq->mtu;
+ dev_dbg(mtu->dev, "u1sel:%d, u1pel:%d, u2sel:%d, u2pel:%d\n",
+ sel.u1_sel, sel.u1_pel, sel.u2_sel, sel.u2_pel);
+}
+
+/* queue data stage to handle 6 byte SET_SEL request */
+static int ep0_set_sel(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+ int ret;
+ u16 length = le16_to_cpu(setup->wLength);
+
+ if (unlikely(length != 6)) {
+ dev_err(mtu->dev, "%s wrong wLength:%d\n",
+ __func__, length);
+ return -EINVAL;
+ }
+
+ mtu->ep0_req.mep = mtu->ep0;
+ mtu->ep0_req.request.length = 6;
+ mtu->ep0_req.request.buf = mtu->setup_buf;
+ mtu->ep0_req.request.complete = ep0_set_sel_complete;
+ ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
+
+ return ret < 0 ? ret : 1;
+}
+
+static int
+ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
+{
+ struct mtu3_ep *mep = NULL;
+ int handled = 1;
+ u8 result[2] = {0, 0};
+ u8 epnum = 0;
+ int is_in;
+
+ switch (setup->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED;
+ result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+ /* superspeed only */
+ if (mtu->g.speed == USB_SPEED_SUPER) {
+ result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED;
+ result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED;
+ }
+
+ dev_dbg(mtu->dev, "%s result=%x, U1=%x, U2=%x\n", __func__,
+ result[0], mtu->u1_enable, mtu->u2_enable);
+
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = (u8) le16_to_cpu(setup->wIndex);
+ is_in = epnum & USB_DIR_IN;
+ epnum &= USB_ENDPOINT_NUMBER_MASK;
+
+ if (epnum >= mtu->num_eps) {
+ handled = -EINVAL;
+ break;
+ }
+ if (!epnum)
+ break;
+
+ mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
+ if (!mep->desc) {
+ handled = -EINVAL;
+ break;
+ }
+ if (mep->flags & MTU3_EP_STALL)
+ result[0] |= 1 << USB_ENDPOINT_HALT;
+
+ break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+
+ if (handled > 0) {
+ int ret;
+
+ /* prepare a data stage for GET_STATUS */
+ dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result);
+ memcpy(mtu->setup_buf, result, sizeof(result));
+ mtu->ep0_req.mep = mtu->ep0;
+ mtu->ep0_req.request.length = 2;
+ mtu->ep0_req.request.buf = &mtu->setup_buf;
+ mtu->ep0_req.request.complete = ep0_dummy_complete;
+ ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
+ if (ret < 0)
+ handled = ret;
+ }
+ return handled;
+}
+
+static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int handled = 1;
+
+ switch (le16_to_cpu(setup->wIndex) >> 8) {
+ case TEST_J:
+ dev_dbg(mtu->dev, "TEST_J\n");
+ mtu->test_mode_nr = TEST_J_MODE;
+ break;
+ case TEST_K:
+ dev_dbg(mtu->dev, "TEST_K\n");
+ mtu->test_mode_nr = TEST_K_MODE;
+ break;
+ case TEST_SE0_NAK:
+ dev_dbg(mtu->dev, "TEST_SE0_NAK\n");
+ mtu->test_mode_nr = TEST_SE0_NAK_MODE;
+ break;
+ case TEST_PACKET:
+ dev_dbg(mtu->dev, "TEST_PACKET\n");
+ mtu->test_mode_nr = TEST_PACKET_MODE;
+ break;
+ default:
+ handled = -EINVAL;
+ goto out;
+ }
+
+ mtu->test_mode = true;
+
+ /* no TX completion interrupt, and need restart platform after test */
+ if (mtu->test_mode_nr == TEST_PACKET_MODE)
+ ep0_load_test_packet(mtu);
+
+ mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr);
+
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+out:
+ return handled;
+}
+
+static int ep0_handle_feature_dev(struct mtu3 *mtu,
+ struct usb_ctrlrequest *setup, bool set)
+{
+ void __iomem *mbase = mtu->mac_base;
+ int handled = -EINVAL;
+ u32 lpc;
+
+ switch (le16_to_cpu(setup->wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ mtu->may_wakeup = !!set;
+ handled = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (!set || (mtu->g.speed != USB_SPEED_HIGH) ||
+ (le16_to_cpu(setup->wIndex) & 0xff))
+ break;
+
+ handled = handle_test_mode(mtu, setup);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ if (mtu->g.speed != USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
+ break;
+
+ lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+ if (set)
+ lpc |= SW_U1_ACCEPT_ENABLE;
+ else
+ lpc &= ~SW_U1_ACCEPT_ENABLE;
+ mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+ mtu->u1_enable = !!set;
+ handled = 1;
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (mtu->g.speed != USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
+ break;
+
+ lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+ if (set)
+ lpc |= SW_U2_ACCEPT_ENABLE;
+ else
+ lpc &= ~SW_U2_ACCEPT_ENABLE;
+ mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+ mtu->u2_enable = !!set;
+ handled = 1;
+ break;
+ default:
+ handled = -EINVAL;
+ break;
+ }
+ return handled;
+}
+
+static int ep0_handle_feature(struct mtu3 *mtu,
+ struct usb_ctrlrequest *setup, bool set)
+{
+ struct mtu3_ep *mep;
+ int handled = -EINVAL;
+ int is_in;
+ u16 value;
+ u16 index;
+ u8 epnum;
+
+ value = le16_to_cpu(setup->wValue);
+ index = le16_to_cpu(setup->wIndex);
+
+ switch (setup->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ handled = ep0_handle_feature_dev(mtu, setup, set);
+ break;
+ case USB_RECIP_INTERFACE:
+ /* superspeed only */
+ if ((value == USB_INTRF_FUNC_SUSPEND)
+ && (mtu->g.speed == USB_SPEED_SUPER)) {
+ /*
+ * forward the request because function drivers
+ * should handle it
+ */
+ handled = 0;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = index & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum == 0 || epnum >= mtu->num_eps ||
+ value != USB_ENDPOINT_HALT)
+ break;
+
+ is_in = index & USB_DIR_IN;
+ mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
+ if (!mep->desc)
+ break;
+
+ handled = 1;
+ /* ignore request if endpoint is wedged */
+ if (mep->wedged)
+ break;
+
+ mtu3_ep_stall_set(mep, set);
+ break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ return handled;
+}
+
+/*
+ * handle all control requests can be handled
+ * returns:
+ * negative errno - error happened
+ * zero - need delegate SETUP to gadget driver
+ * positive - already handled
+ */
+static int handle_standard_request(struct mtu3 *mtu,
+ struct usb_ctrlrequest *setup)
+{
+ void __iomem *mbase = mtu->mac_base;
+ enum usb_device_state state = mtu->g.state;
+ int handled = -EINVAL;
+ u32 dev_conf;
+ u16 value;
+
+ value = le16_to_cpu(setup->wValue);
+
+ /* the gadget driver handles everything except what we must handle */
+ switch (setup->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* change it after the status stage */
+ mtu->address = (u8) (value & 0x7f);
+ dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address);
+
+ dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF);
+ dev_conf &= ~DEV_ADDR_MSK;
+ dev_conf |= DEV_ADDR(mtu->address);
+ mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf);
+
+ if (mtu->address)
+ usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT);
+
+ handled = 1;
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ if (state == USB_STATE_ADDRESS) {
+ usb_gadget_set_state(&mtu->g,
+ USB_STATE_CONFIGURED);
+ } else if (state == USB_STATE_CONFIGURED) {
+ /*
+ * USB2 spec sec 9.4.7, if wValue is 0 then dev
+ * is moved to addressed state
+ */
+ if (!value)
+ usb_gadget_set_state(&mtu->g,
+ USB_STATE_ADDRESS);
+ }
+ handled = 0;
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ handled = ep0_handle_feature(mtu, setup, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ handled = ep0_handle_feature(mtu, setup, 1);
+ break;
+ case USB_REQ_GET_STATUS:
+ handled = ep0_get_status(mtu, setup);
+ break;
+ case USB_REQ_SET_SEL:
+ handled = ep0_set_sel(mtu, setup);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ handled = 1;
+ break;
+ default:
+ /* delegate SET_CONFIGURATION, etc */
+ handled = 0;
+ }
+
+ return handled;
+}
+
+/* receive an data packet (OUT) */
+static void ep0_rx_state(struct mtu3 *mtu)
+{
+ struct mtu3_request *mreq;
+ struct usb_request *req;
+ void __iomem *mbase = mtu->mac_base;
+ u32 maxp;
+ u32 csr;
+ u16 count = 0;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ mreq = next_ep0_request(mtu);
+ req = &mreq->request;
+
+ /* read packet and ack; or stall because of gadget driver bug */
+ if (req) {
+ void *buf = req->buf + req->actual;
+ unsigned int len = req->length - req->actual;
+
+ /* read the buffer */
+ count = mtu3_readl(mbase, U3D_RXCOUNT0);
+ if (count > len) {
+ req->status = -EOVERFLOW;
+ count = len;
+ }
+ ep0_read_fifo(mtu->ep0, buf, count);
+ req->actual += count;
+ csr |= EP0_RXPKTRDY;
+
+ maxp = mtu->g.ep0->maxpacket;
+ if (count < maxp || req->actual == req->length) {
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ dev_dbg(mtu->dev, "ep0 state: %s\n",
+ decode_ep0_state(mtu));
+
+ csr |= EP0_DATAEND;
+ } else {
+ req = NULL;
+ }
+ } else {
+ csr |= EP0_RXPKTRDY | EP0_SENDSTALL;
+ dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__);
+ }
+
+ mtu3_writel(mbase, U3D_EP0CSR, csr);
+
+ /* give back the request if have received all data */
+ if (req)
+ ep0_req_giveback(mtu, req);
+
+}
+
+/* transmitting to the host (IN) */
+static void ep0_tx_state(struct mtu3 *mtu)
+{
+ struct mtu3_request *mreq = next_ep0_request(mtu);
+ struct usb_request *req;
+ u32 csr;
+ u8 *src;
+ u8 count;
+ u32 maxp;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ if (!mreq)
+ return;
+
+ maxp = mtu->g.ep0->maxpacket;
+ req = &mreq->request;
+
+ /* load the data */
+ src = (u8 *)req->buf + req->actual;
+ count = min(maxp, req->length - req->actual);
+ if (count)
+ ep0_write_fifo(mtu->ep0, src, count);
+
+ dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n",
+ __func__, req->actual, req->length, count, maxp, req->zero);
+
+ req->actual += count;
+
+ if ((count < maxp)
+ || ((req->actual == req->length) && !req->zero))
+ mtu->ep0_state = MU3D_EP0_STATE_TX_END;
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+ csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY);
+
+ dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__,
+ mtu3_readl(mtu->mac_base, U3D_EP0CSR));
+}
+
+static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+ struct mtu3_request *mreq;
+ u32 count;
+ u32 csr;
+
+ csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
+ count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0);
+
+ ep0_read_fifo(mtu->ep0, (u8 *)setup, count);
+
+ dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n",
+ setup->bRequestType, setup->bRequest,
+ le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex),
+ le16_to_cpu(setup->wLength));
+
+ /* clean up any leftover transfers */
+ mreq = next_ep0_request(mtu);
+ if (mreq)
+ ep0_req_giveback(mtu, &mreq->request);
+
+ if (le16_to_cpu(setup->wLength) == 0) {
+ ; /* no data stage, nothing to do */
+ } else if (setup->bRequestType & USB_DIR_IN) {
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR,
+ csr | EP0_SETUPPKTRDY | EP0_DPHTX);
+ mtu->ep0_state = MU3D_EP0_STATE_TX;
+ } else {
+ mtu3_writel(mtu->mac_base, U3D_EP0CSR,
+ (csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX));
+ mtu->ep0_state = MU3D_EP0_STATE_RX;
+ }
+}
+
+static int ep0_handle_setup(struct mtu3 *mtu)
+__releases(mtu->lock)
+__acquires(mtu->lock)
+{
+ struct usb_ctrlrequest setup;
+ struct mtu3_request *mreq;
+ void __iomem *mbase = mtu->mac_base;
+ int handled = 0;
+
+ ep0_read_setup(mtu, &setup);
+
+ if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ handled = handle_standard_request(mtu, &setup);
+
+ dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n",
+ handled, decode_ep0_state(mtu));
+
+ if (handled < 0)
+ goto stall;
+ else if (handled > 0)
+ goto finish;
+
+ handled = forward_to_driver(mtu, &setup);
+ if (handled < 0) {
+stall:
+ dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled);
+
+ ep0_stall_set(mtu->ep0, true,
+ le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY);
+
+ return 0;
+ }
+
+finish:
+ if (mtu->test_mode) {
+ ; /* nothing to do */
+ } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
+
+ mtu3_writel(mbase, U3D_EP0CSR,
+ (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS)
+ | EP0_SETUPPKTRDY | EP0_DATAEND);
+
+ /* complete zlp request directly */
+ mreq = next_ep0_request(mtu);
+ if (mreq && !mreq->request.length)
+ ep0_req_giveback(mtu, &mreq->request);
+ }
+
+ return 0;
+}
+
+irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ struct mtu3_request *mreq;
+ u32 int_status;
+ irqreturn_t ret = IRQ_NONE;
+ u32 csr;
+ u32 len;
+
+ int_status = mtu3_readl(mbase, U3D_EPISR);
+ int_status &= mtu3_readl(mbase, U3D_EPIER);
+ mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */
+
+ /* only handle ep0's */
+ if (!(int_status & EP0ISR))
+ return IRQ_NONE;
+
+ csr = mtu3_readl(mbase, U3D_EP0CSR);
+
+ dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr);
+
+ /* we sent a stall.. need to clear it now.. */
+ if (csr & EP0_SENTSTALL) {
+ ep0_stall_set(mtu->ep0, false, 0);
+ csr = mtu3_readl(mbase, U3D_EP0CSR);
+ ret = IRQ_HANDLED;
+ }
+ dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+
+ switch (mtu->ep0_state) {
+ case MU3D_EP0_STATE_TX:
+ /* irq on clearing txpktrdy */
+ if ((csr & EP0_FIFOFULL) == 0) {
+ ep0_tx_state(mtu);
+ ret = IRQ_HANDLED;
+ }
+ break;
+ case MU3D_EP0_STATE_RX:
+ /* irq on set rxpktrdy */
+ if (csr & EP0_RXPKTRDY) {
+ ep0_rx_state(mtu);
+ ret = IRQ_HANDLED;
+ }
+ break;
+ case MU3D_EP0_STATE_TX_END:
+ mtu3_writel(mbase, U3D_EP0CSR,
+ (csr & EP0_W1C_BITS) | EP0_DATAEND);
+
+ mreq = next_ep0_request(mtu);
+ if (mreq)
+ ep0_req_giveback(mtu, &mreq->request);
+
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ ret = IRQ_HANDLED;
+ dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+ break;
+ case MU3D_EP0_STATE_SETUP:
+ if (!(csr & EP0_SETUPPKTRDY))
+ break;
+
+ len = mtu3_readl(mbase, U3D_RXCOUNT0);
+ if (len != 8) {
+ dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len);
+ break;
+ }
+
+ ep0_handle_setup(mtu);
+ ret = IRQ_HANDLED;
+ break;
+ default:
+ /* can't happen */
+ ep0_stall_set(mtu->ep0, true, 0);
+ WARN_ON(1);
+ break;
+ }
+
+ return ret;
+}
+
+
+static int mtu3_ep0_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int mtu3_ep0_disable(struct usb_ep *ep)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+ struct mtu3 *mtu = mep->mtu;
+
+ mreq->mtu = mtu;
+ mreq->request.actual = 0;
+ mreq->request.status = -EINPROGRESS;
+
+ dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__,
+ mep->name, decode_ep0_state(mtu), mreq->request.length);
+
+ if (!list_empty(&mep->req_list))
+ return -EBUSY;
+
+ switch (mtu->ep0_state) {
+ case MU3D_EP0_STATE_SETUP:
+ case MU3D_EP0_STATE_RX: /* control-OUT data */
+ case MU3D_EP0_STATE_TX: /* control-IN data */
+ break;
+ default:
+ dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__,
+ decode_ep0_state(mtu));
+ return -EINVAL;
+ }
+
+ list_add_tail(&mreq->list, &mep->req_list);
+
+ /* sequence #1, IN ... start writing the data */
+ if (mtu->ep0_state == MU3D_EP0_STATE_TX)
+ ep0_tx_state(mtu);
+
+ return 0;
+}
+
+static int mtu3_ep0_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp)
+{
+ struct mtu3_ep *mep;
+ struct mtu3_request *mreq;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep || !req)
+ return -EINVAL;
+
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+ mreq = to_mtu3_request(req);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+ ret = ep0_queue(mep, mreq);
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ return ret;
+}
+
+static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ /* we just won't support this */
+ return -EINVAL;
+}
+
+static int mtu3_ep0_halt(struct usb_ep *ep, int value)
+{
+ struct mtu3_ep *mep;
+ struct mtu3 *mtu;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep || !value)
+ return -EINVAL;
+
+ mep = to_mtu3_ep(ep);
+ mtu = mep->mtu;
+
+ dev_dbg(mtu->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&mtu->lock, flags);
+
+ if (!list_empty(&mep->req_list)) {
+ ret = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (mtu->ep0_state) {
+ /*
+ * stalls are usually issued after parsing SETUP packet, either
+ * directly in irq context from setup() or else later.
+ */
+ case MU3D_EP0_STATE_TX:
+ case MU3D_EP0_STATE_TX_END:
+ case MU3D_EP0_STATE_RX:
+ case MU3D_EP0_STATE_SETUP:
+ ep0_stall_set(mtu->ep0, true, 0);
+ break;
+ default:
+ dev_dbg(mtu->dev, "ep0 can't halt in state %s\n",
+ decode_ep0_state(mtu));
+ ret = -EINVAL;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&mtu->lock, flags);
+ return ret;
+}
+
+const struct usb_ep_ops mtu3_ep0_ops = {
+ .enable = mtu3_ep0_enable,
+ .disable = mtu3_ep0_disable,
+ .alloc_request = mtu3_alloc_request,
+ .free_request = mtu3_free_request,
+ .queue = mtu3_ep0_queue,
+ .dequeue = mtu3_ep0_dequeue,
+ .set_halt = mtu3_ep0_halt,
+};
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
new file mode 100644
index 000000000000..cd4d01087855
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -0,0 +1,294 @@
+/*
+ * mtu3_dr.c - dual role switch and host glue layer
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+#define PERI_WK_CTRL1 0x404
+#define UWK_CTL1_IS_C(x) (((x) & 0xf) << 26)
+#define UWK_CTL1_IS_E BIT(25)
+#define UWK_CTL1_IDDIG_C(x) (((x) & 0xf) << 11) /* cycle debounce */
+#define UWK_CTL1_IDDIG_E BIT(10) /* enable debounce */
+#define UWK_CTL1_IDDIG_P BIT(9) /* polarity */
+#define UWK_CTL1_IS_P BIT(6) /* polarity for ip sleep */
+
+/*
+ * ip-sleep wakeup mode:
+ * all clocks can be turn off, but power domain should be kept on
+ */
+static void ssusb_wakeup_ip_sleep_en(struct ssusb_mtk *ssusb)
+{
+ u32 tmp;
+ struct regmap *pericfg = ssusb->pericfg;
+
+ regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
+ tmp &= ~UWK_CTL1_IS_P;
+ tmp &= ~(UWK_CTL1_IS_C(0xf));
+ tmp |= UWK_CTL1_IS_C(0x8);
+ regmap_write(pericfg, PERI_WK_CTRL1, tmp);
+ regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_IS_E);
+
+ regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
+ dev_dbg(ssusb->dev, "%s(): WK_CTRL1[P6,E25,C26:29]=%#x\n",
+ __func__, tmp);
+}
+
+static void ssusb_wakeup_ip_sleep_dis(struct ssusb_mtk *ssusb)
+{
+ u32 tmp;
+
+ regmap_read(ssusb->pericfg, PERI_WK_CTRL1, &tmp);
+ tmp &= ~UWK_CTL1_IS_E;
+ regmap_write(ssusb->pericfg, PERI_WK_CTRL1, tmp);
+}
+
+int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
+ struct device_node *dn)
+{
+ struct device *dev = ssusb->dev;
+
+ /*
+ * Wakeup function is optional, so it is not an error if this property
+ * does not exist, and in such case, no need to get relative
+ * properties anymore.
+ */
+ ssusb->wakeup_en = of_property_read_bool(dn, "mediatek,enable-wakeup");
+ if (!ssusb->wakeup_en)
+ return 0;
+
+ ssusb->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0");
+ if (IS_ERR(ssusb->wk_deb_p0)) {
+ dev_err(dev, "fail to get wakeup_deb_p0\n");
+ return PTR_ERR(ssusb->wk_deb_p0);
+ }
+
+ if (of_property_read_bool(dn, "wakeup_deb_p1")) {
+ ssusb->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1");
+ if (IS_ERR(ssusb->wk_deb_p1)) {
+ dev_err(dev, "fail to get wakeup_deb_p1\n");
+ return PTR_ERR(ssusb->wk_deb_p1);
+ }
+ }
+
+ ssusb->pericfg = syscon_regmap_lookup_by_phandle(dn,
+ "mediatek,syscon-wakeup");
+ if (IS_ERR(ssusb->pericfg)) {
+ dev_err(dev, "fail to get pericfg regs\n");
+ return PTR_ERR(ssusb->pericfg);
+ }
+
+ return 0;
+}
+
+static int ssusb_wakeup_clks_enable(struct ssusb_mtk *ssusb)
+{
+ int ret;
+
+ ret = clk_prepare_enable(ssusb->wk_deb_p0);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable wk_deb_p0\n");
+ goto usb_p0_err;
+ }
+
+ ret = clk_prepare_enable(ssusb->wk_deb_p1);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable wk_deb_p1\n");
+ goto usb_p1_err;
+ }
+
+ return 0;
+
+usb_p1_err:
+ clk_disable_unprepare(ssusb->wk_deb_p0);
+usb_p0_err:
+ return -EINVAL;
+}
+
+static void ssusb_wakeup_clks_disable(struct ssusb_mtk *ssusb)
+{
+ clk_disable_unprepare(ssusb->wk_deb_p1);
+ clk_disable_unprepare(ssusb->wk_deb_p0);
+}
+
+static void host_ports_num_get(struct ssusb_mtk *ssusb)
+{
+ u32 xhci_cap;
+
+ xhci_cap = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP);
+ ssusb->u2_ports = SSUSB_IP_XHCI_U2_PORT_NUM(xhci_cap);
+ ssusb->u3_ports = SSUSB_IP_XHCI_U3_PORT_NUM(xhci_cap);
+
+ dev_dbg(ssusb->dev, "host - u2_ports:%d, u3_ports:%d\n",
+ ssusb->u2_ports, ssusb->u3_ports);
+}
+
+/* only configure ports will be used later */
+int ssusb_host_enable(struct ssusb_mtk *ssusb)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ int num_u3p = ssusb->u3_ports;
+ int num_u2p = ssusb->u2_ports;
+ u32 check_clk;
+ u32 value;
+ int i;
+
+ /* power on host ip */
+ mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
+
+ /* power on and enable all u3 ports */
+ for (i = 0; i < num_u3p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
+ value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
+ value |= SSUSB_U3_PORT_HOST_SEL;
+ mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
+ }
+
+ /* power on and enable all u2 ports */
+ for (i = 0; i < num_u2p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
+ value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
+ value |= SSUSB_U2_PORT_HOST_SEL;
+ mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
+ }
+
+ check_clk = SSUSB_XHCI_RST_B_STS;
+ if (num_u3p)
+ check_clk = SSUSB_U3_MAC_RST_B_STS;
+
+ return ssusb_check_clocks(ssusb, check_clk);
+}
+
+int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ int num_u3p = ssusb->u3_ports;
+ int num_u2p = ssusb->u2_ports;
+ u32 value;
+ int ret;
+ int i;
+
+ /* power down and disable all u3 ports */
+ for (i = 0; i < num_u3p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
+ value |= SSUSB_U3_PORT_PDN;
+ value |= suspend ? 0 : SSUSB_U3_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
+ }
+
+ /* power down and disable all u2 ports */
+ for (i = 0; i < num_u2p; i++) {
+ value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
+ value |= SSUSB_U2_PORT_PDN;
+ value |= suspend ? 0 : SSUSB_U2_PORT_DIS;
+ mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
+ }
+
+ /* power down host ip */
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
+
+ if (!suspend)
+ return 0;
+
+ /* wait for host ip to sleep */
+ ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
+ (value & SSUSB_IP_SLEEP_STS), 100, 100000);
+ if (ret)
+ dev_err(ssusb->dev, "ip sleep failed!!!\n");
+
+ return ret;
+}
+
+static void ssusb_host_setup(struct ssusb_mtk *ssusb)
+{
+ host_ports_num_get(ssusb);
+
+ /*
+ * power on host and power on/enable all ports
+ * if support OTG, gadget driver will switch port0 to device mode
+ */
+ ssusb_host_enable(ssusb);
+
+ /* if port0 supports dual-role, works as host mode by default */
+ ssusb_set_vbus(&ssusb->otg_switch, 1);
+}
+
+static void ssusb_host_cleanup(struct ssusb_mtk *ssusb)
+{
+ if (ssusb->is_host)
+ ssusb_set_vbus(&ssusb->otg_switch, 0);
+
+ ssusb_host_disable(ssusb, false);
+}
+
+/*
+ * If host supports multiple ports, the VBUSes(5V) of ports except port0
+ * which supports OTG are better to be enabled by default in DTS.
+ * Because the host driver will keep link with devices attached when system
+ * enters suspend mode, so no need to control VBUSes after initialization.
+ */
+int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn)
+{
+ struct device *parent_dev = ssusb->dev;
+ int ret;
+
+ ssusb_host_setup(ssusb);
+
+ ret = of_platform_populate(parent_dn, NULL, NULL, parent_dev);
+ if (ret) {
+ dev_dbg(parent_dev, "failed to create child devices at %s\n",
+ parent_dn->full_name);
+ return ret;
+ }
+
+ dev_info(parent_dev, "xHCI platform device register success...\n");
+
+ return 0;
+}
+
+void ssusb_host_exit(struct ssusb_mtk *ssusb)
+{
+ of_platform_depopulate(ssusb->dev);
+ ssusb_host_cleanup(ssusb);
+}
+
+int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
+{
+ int ret = 0;
+
+ if (ssusb->wakeup_en) {
+ ret = ssusb_wakeup_clks_enable(ssusb);
+ ssusb_wakeup_ip_sleep_en(ssusb);
+ }
+ return ret;
+}
+
+void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
+{
+ if (ssusb->wakeup_en) {
+ ssusb_wakeup_ip_sleep_dis(ssusb);
+ ssusb_wakeup_clks_disable(ssusb);
+ }
+}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
new file mode 100644
index 000000000000..212367295276
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -0,0 +1,473 @@
+/*
+ * mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SSUSB_HW_REGS_H_
+#define _SSUSB_HW_REGS_H_
+
+/* segment offset of MAC register */
+#define SSUSB_DEV_BASE 0x0000
+#define SSUSB_EPCTL_CSR_BASE 0x0800
+#define SSUSB_USB3_MAC_CSR_BASE 0x1400
+#define SSUSB_USB3_SYS_CSR_BASE 0x1400
+#define SSUSB_USB2_CSR_BASE 0x2400
+
+/* IPPC register in Infra */
+#define SSUSB_SIFSLV_IPPC_BASE 0x0000
+
+/* --------------- SSUSB_DEV REGISTER DEFINITION --------------- */
+
+#define U3D_LV1ISR (SSUSB_DEV_BASE + 0x0000)
+#define U3D_LV1IER (SSUSB_DEV_BASE + 0x0004)
+#define U3D_LV1IESR (SSUSB_DEV_BASE + 0x0008)
+#define U3D_LV1IECR (SSUSB_DEV_BASE + 0x000C)
+
+#define U3D_EPISR (SSUSB_DEV_BASE + 0x0080)
+#define U3D_EPIER (SSUSB_DEV_BASE + 0x0084)
+#define U3D_EPIESR (SSUSB_DEV_BASE + 0x0088)
+#define U3D_EPIECR (SSUSB_DEV_BASE + 0x008C)
+
+#define U3D_EP0CSR (SSUSB_DEV_BASE + 0x0100)
+#define U3D_RXCOUNT0 (SSUSB_DEV_BASE + 0x0108)
+#define U3D_RESERVED (SSUSB_DEV_BASE + 0x010C)
+#define U3D_TX1CSR0 (SSUSB_DEV_BASE + 0x0110)
+#define U3D_TX1CSR1 (SSUSB_DEV_BASE + 0x0114)
+#define U3D_TX1CSR2 (SSUSB_DEV_BASE + 0x0118)
+
+#define U3D_RX1CSR0 (SSUSB_DEV_BASE + 0x0210)
+#define U3D_RX1CSR1 (SSUSB_DEV_BASE + 0x0214)
+#define U3D_RX1CSR2 (SSUSB_DEV_BASE + 0x0218)
+
+#define U3D_FIFO0 (SSUSB_DEV_BASE + 0x0300)
+
+#define U3D_QCR0 (SSUSB_DEV_BASE + 0x0400)
+#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404)
+#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408)
+#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C)
+
+#define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510)
+#define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514)
+#define U3D_TXQCPR1 (SSUSB_DEV_BASE + 0x0518)
+
+#define U3D_RXQCSR1 (SSUSB_DEV_BASE + 0x0610)
+#define U3D_RXQSAR1 (SSUSB_DEV_BASE + 0x0614)
+#define U3D_RXQCPR1 (SSUSB_DEV_BASE + 0x0618)
+#define U3D_RXQLDPR1 (SSUSB_DEV_BASE + 0x061C)
+
+#define U3D_QISAR0 (SSUSB_DEV_BASE + 0x0700)
+#define U3D_QIER0 (SSUSB_DEV_BASE + 0x0704)
+#define U3D_QIESR0 (SSUSB_DEV_BASE + 0x0708)
+#define U3D_QIECR0 (SSUSB_DEV_BASE + 0x070C)
+#define U3D_QISAR1 (SSUSB_DEV_BASE + 0x0710)
+#define U3D_QIER1 (SSUSB_DEV_BASE + 0x0714)
+#define U3D_QIESR1 (SSUSB_DEV_BASE + 0x0718)
+#define U3D_QIECR1 (SSUSB_DEV_BASE + 0x071C)
+
+#define U3D_TQERRIR0 (SSUSB_DEV_BASE + 0x0780)
+#define U3D_TQERRIER0 (SSUSB_DEV_BASE + 0x0784)
+#define U3D_TQERRIESR0 (SSUSB_DEV_BASE + 0x0788)
+#define U3D_TQERRIECR0 (SSUSB_DEV_BASE + 0x078C)
+#define U3D_RQERRIR0 (SSUSB_DEV_BASE + 0x07C0)
+#define U3D_RQERRIER0 (SSUSB_DEV_BASE + 0x07C4)
+#define U3D_RQERRIESR0 (SSUSB_DEV_BASE + 0x07C8)
+#define U3D_RQERRIECR0 (SSUSB_DEV_BASE + 0x07CC)
+#define U3D_RQERRIR1 (SSUSB_DEV_BASE + 0x07D0)
+#define U3D_RQERRIER1 (SSUSB_DEV_BASE + 0x07D4)
+#define U3D_RQERRIESR1 (SSUSB_DEV_BASE + 0x07D8)
+#define U3D_RQERRIECR1 (SSUSB_DEV_BASE + 0x07DC)
+
+#define U3D_CAP_EP0FFSZ (SSUSB_DEV_BASE + 0x0C04)
+#define U3D_CAP_EPNTXFFSZ (SSUSB_DEV_BASE + 0x0C08)
+#define U3D_CAP_EPNRXFFSZ (SSUSB_DEV_BASE + 0x0C0C)
+#define U3D_CAP_EPINFO (SSUSB_DEV_BASE + 0x0C10)
+#define U3D_MISC_CTRL (SSUSB_DEV_BASE + 0x0C84)
+
+/*---------------- SSUSB_DEV FIELD DEFINITION ---------------*/
+
+/* U3D_LV1ISR */
+#define EP_CTRL_INTR BIT(5)
+#define MAC2_INTR BIT(4)
+#define DMA_INTR BIT(3)
+#define MAC3_INTR BIT(2)
+#define QMU_INTR BIT(1)
+#define BMU_INTR BIT(0)
+
+/* U3D_LV1IECR */
+#define LV1IECR_MSK GENMASK(31, 0)
+
+/* U3D_EPISR */
+#define EPRISR(x) (BIT(16) << (x))
+#define EPTISR(x) (BIT(0) << (x))
+#define EP0ISR BIT(0)
+
+/* U3D_EP0CSR */
+#define EP0_SENDSTALL BIT(25)
+#define EP0_FIFOFULL BIT(23)
+#define EP0_SENTSTALL BIT(22)
+#define EP0_DPHTX BIT(20)
+#define EP0_DATAEND BIT(19)
+#define EP0_TXPKTRDY BIT(18)
+#define EP0_SETUPPKTRDY BIT(17)
+#define EP0_RXPKTRDY BIT(16)
+#define EP0_MAXPKTSZ_MSK GENMASK(9, 0)
+#define EP0_MAXPKTSZ(x) ((x) & EP0_MAXPKTSZ_MSK)
+#define EP0_W1C_BITS (~(EP0_RXPKTRDY | EP0_SETUPPKTRDY | EP0_SENTSTALL))
+
+/* U3D_TX1CSR0 */
+#define TX_DMAREQEN BIT(29)
+#define TX_FIFOFULL BIT(25)
+#define TX_FIFOEMPTY BIT(24)
+#define TX_SENTSTALL BIT(22)
+#define TX_SENDSTALL BIT(21)
+#define TX_TXPKTRDY BIT(16)
+#define TX_TXMAXPKTSZ_MSK GENMASK(10, 0)
+#define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK)
+#define TX_W1C_BITS (~(TX_SENTSTALL))
+
+/* U3D_TX1CSR1 */
+#define TX_MULT(x) (((x) & 0x3) << 22)
+#define TX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define TX_SLOT(x) (((x) & 0x3f) << 8)
+#define TX_TYPE(x) (((x) & 0x3) << 4)
+#define TX_SS_BURST(x) (((x) & 0xf) << 0)
+
+/* for TX_TYPE & RX_TYPE */
+#define TYPE_BULK (0x0)
+#define TYPE_INT (0x1)
+#define TYPE_ISO (0x2)
+#define TYPE_MASK (0x3)
+
+/* U3D_TX1CSR2 */
+#define TX_BINTERVAL(x) (((x) & 0xff) << 24)
+#define TX_FIFOSEGSIZE(x) (((x) & 0xf) << 16)
+#define TX_FIFOADDR(x) (((x) & 0x1fff) << 0)
+
+/* U3D_RX1CSR0 */
+#define RX_DMAREQEN BIT(29)
+#define RX_SENTSTALL BIT(22)
+#define RX_SENDSTALL BIT(21)
+#define RX_RXPKTRDY BIT(16)
+#define RX_RXMAXPKTSZ_MSK GENMASK(10, 0)
+#define RX_RXMAXPKTSZ(x) ((x) & RX_RXMAXPKTSZ_MSK)
+#define RX_W1C_BITS (~(RX_SENTSTALL | RX_RXPKTRDY))
+
+/* U3D_RX1CSR1 */
+#define RX_MULT(x) (((x) & 0x3) << 22)
+#define RX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define RX_SLOT(x) (((x) & 0x3f) << 8)
+#define RX_TYPE(x) (((x) & 0x3) << 4)
+#define RX_SS_BURST(x) (((x) & 0xf) << 0)
+
+/* U3D_RX1CSR2 */
+#define RX_BINTERVAL(x) (((x) & 0xff) << 24)
+#define RX_FIFOSEGSIZE(x) (((x) & 0xf) << 16)
+#define RX_FIFOADDR(x) (((x) & 0x1fff) << 0)
+
+/* U3D_QCR0 */
+#define QMU_RX_CS_EN(x) (BIT(16) << (x))
+#define QMU_TX_CS_EN(x) (BIT(0) << (x))
+#define QMU_CS16B_EN BIT(0)
+
+/* U3D_QCR1 */
+#define QMU_TX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_QCR3 */
+#define QMU_RX_COZ(x) (BIT(16) << (x))
+#define QMU_RX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_TXQCSR1 */
+/* U3D_RXQCSR1 */
+#define QMU_Q_ACTIVE BIT(15)
+#define QMU_Q_STOP BIT(2)
+#define QMU_Q_RESUME BIT(1)
+#define QMU_Q_START BIT(0)
+
+/* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */
+#define QMU_RX_DONE_INT(x) (BIT(16) << (x))
+#define QMU_TX_DONE_INT(x) (BIT(0) << (x))
+
+/* U3D_QISAR1, U3D_QIER1, U3D_QIESR1, U3D_QIECR1 */
+#define RXQ_ZLPERR_INT BIT(20)
+#define RXQ_LENERR_INT BIT(18)
+#define RXQ_CSERR_INT BIT(17)
+#define RXQ_EMPTY_INT BIT(16)
+#define TXQ_LENERR_INT BIT(2)
+#define TXQ_CSERR_INT BIT(1)
+#define TXQ_EMPTY_INT BIT(0)
+
+/* U3D_TQERRIR0, U3D_TQERRIER0, U3D_TQERRIESR0, U3D_TQERRIECR0 */
+#define QMU_TX_LEN_ERR(x) (BIT(16) << (x))
+#define QMU_TX_CS_ERR(x) (BIT(0) << (x))
+
+/* U3D_RQERRIR0, U3D_RQERRIER0, U3D_RQERRIESR0, U3D_RQERRIECR0 */
+#define QMU_RX_LEN_ERR(x) (BIT(16) << (x))
+#define QMU_RX_CS_ERR(x) (BIT(0) << (x))
+
+/* U3D_RQERRIR1, U3D_RQERRIER1, U3D_RQERRIESR1, U3D_RQERRIECR1 */
+#define QMU_RX_ZLP_ERR(n) (BIT(16) << (n))
+
+/* U3D_CAP_EPINFO */
+#define CAP_RX_EP_NUM(x) (((x) >> 8) & 0x1f)
+#define CAP_TX_EP_NUM(x) ((x) & 0x1f)
+
+/* U3D_MISC_CTRL */
+#define VBUS_ON BIT(1)
+#define VBUS_FRC_EN BIT(0)
+
+
+/*---------------- SSUSB_EPCTL_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_DEVICE_CONF (SSUSB_EPCTL_CSR_BASE + 0x0000)
+#define U3D_EP_RST (SSUSB_EPCTL_CSR_BASE + 0x0004)
+
+#define U3D_DEV_LINK_INTR_ENABLE (SSUSB_EPCTL_CSR_BASE + 0x0050)
+#define U3D_DEV_LINK_INTR (SSUSB_EPCTL_CSR_BASE + 0x0054)
+
+/*---------------- SSUSB_EPCTL_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_DEVICE_CONF */
+#define DEV_ADDR_MSK GENMASK(30, 24)
+#define DEV_ADDR(x) ((0x7f & (x)) << 24)
+#define HW_USB2_3_SEL BIT(18)
+#define SW_USB2_3_SEL_EN BIT(17)
+#define SW_USB2_3_SEL BIT(16)
+#define SSUSB_DEV_SPEED(x) ((x) & 0x7)
+
+/* U3D_EP_RST */
+#define EP1_IN_RST BIT(17)
+#define EP1_OUT_RST BIT(1)
+#define EP_RST(is_in, epnum) (((is_in) ? BIT(16) : BIT(0)) << (epnum))
+#define EP0_RST BIT(0)
+
+/* U3D_DEV_LINK_INTR_ENABLE */
+/* U3D_DEV_LINK_INTR */
+#define SSUSB_DEV_SPEED_CHG_INTR BIT(0)
+
+
+/*---------------- SSUSB_USB3_MAC_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_LTSSM_CTRL (SSUSB_USB3_MAC_CSR_BASE + 0x0010)
+#define U3D_USB3_CONFIG (SSUSB_USB3_MAC_CSR_BASE + 0x001C)
+
+#define U3D_LTSSM_INTR_ENABLE (SSUSB_USB3_MAC_CSR_BASE + 0x013C)
+#define U3D_LTSSM_INTR (SSUSB_USB3_MAC_CSR_BASE + 0x0140)
+
+/*---------------- SSUSB_USB3_MAC_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_LTSSM_CTRL */
+#define FORCE_POLLING_FAIL BIT(4)
+#define FORCE_RXDETECT_FAIL BIT(3)
+#define SOFT_U3_EXIT_EN BIT(2)
+#define COMPLIANCE_EN BIT(1)
+#define U1_GO_U2_EN BIT(0)
+
+/* U3D_USB3_CONFIG */
+#define USB3_EN BIT(0)
+
+/* U3D_LTSSM_INTR_ENABLE */
+/* U3D_LTSSM_INTR */
+#define U3_RESUME_INTR BIT(18)
+#define U3_LFPS_TMOUT_INTR BIT(17)
+#define VBUS_FALL_INTR BIT(16)
+#define VBUS_RISE_INTR BIT(15)
+#define RXDET_SUCCESS_INTR BIT(14)
+#define EXIT_U3_INTR BIT(13)
+#define EXIT_U2_INTR BIT(12)
+#define EXIT_U1_INTR BIT(11)
+#define ENTER_U3_INTR BIT(10)
+#define ENTER_U2_INTR BIT(9)
+#define ENTER_U1_INTR BIT(8)
+#define ENTER_U0_INTR BIT(7)
+#define RECOVERY_INTR BIT(6)
+#define WARM_RST_INTR BIT(5)
+#define HOT_RST_INTR BIT(4)
+#define LOOPBACK_INTR BIT(3)
+#define COMPLIANCE_INTR BIT(2)
+#define SS_DISABLE_INTR BIT(1)
+#define SS_INACTIVE_INTR BIT(0)
+
+/*---------------- SSUSB_USB3_SYS_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_LINK_UX_INACT_TIMER (SSUSB_USB3_SYS_CSR_BASE + 0x020C)
+#define U3D_LINK_POWER_CONTROL (SSUSB_USB3_SYS_CSR_BASE + 0x0210)
+#define U3D_LINK_ERR_COUNT (SSUSB_USB3_SYS_CSR_BASE + 0x0214)
+
+/*---------------- SSUSB_USB3_SYS_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_LINK_UX_INACT_TIMER */
+#define DEV_U2_INACT_TIMEOUT_MSK GENMASK(23, 16)
+#define DEV_U2_INACT_TIMEOUT_VALUE(x) (((x) & 0xff) << 16)
+#define U2_INACT_TIMEOUT_MSK GENMASK(15, 8)
+#define U1_INACT_TIMEOUT_MSK GENMASK(7, 0)
+#define U1_INACT_TIMEOUT_VALUE(x) ((x) & 0xff)
+
+/* U3D_LINK_POWER_CONTROL */
+#define SW_U2_ACCEPT_ENABLE BIT(9)
+#define SW_U1_ACCEPT_ENABLE BIT(8)
+#define UX_EXIT BIT(5)
+#define LGO_U3 BIT(4)
+#define LGO_U2 BIT(3)
+#define LGO_U1 BIT(2)
+#define SW_U2_REQUEST_ENABLE BIT(1)
+#define SW_U1_REQUEST_ENABLE BIT(0)
+
+/* U3D_LINK_ERR_COUNT */
+#define CLR_LINK_ERR_CNT BIT(16)
+#define LINK_ERROR_COUNT GENMASK(15, 0)
+
+/*---------------- SSUSB_USB2_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_POWER_MANAGEMENT (SSUSB_USB2_CSR_BASE + 0x0004)
+#define U3D_DEVICE_CONTROL (SSUSB_USB2_CSR_BASE + 0x000C)
+#define U3D_USB2_TEST_MODE (SSUSB_USB2_CSR_BASE + 0x0014)
+#define U3D_COMMON_USB_INTR_ENABLE (SSUSB_USB2_CSR_BASE + 0x0018)
+#define U3D_COMMON_USB_INTR (SSUSB_USB2_CSR_BASE + 0x001C)
+#define U3D_LINK_RESET_INFO (SSUSB_USB2_CSR_BASE + 0x0024)
+#define U3D_USB20_FRAME_NUM (SSUSB_USB2_CSR_BASE + 0x003C)
+#define U3D_USB20_LPM_PARAMETER (SSUSB_USB2_CSR_BASE + 0x0044)
+#define U3D_USB20_MISC_CONTROL (SSUSB_USB2_CSR_BASE + 0x004C)
+
+/*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_POWER_MANAGEMENT */
+#define LPM_BESL_STALL BIT(14)
+#define LPM_BESLD_STALL BIT(13)
+#define LPM_RWP BIT(11)
+#define LPM_HRWE BIT(10)
+#define LPM_MODE(x) (((x) & 0x3) << 8)
+#define ISO_UPDATE BIT(7)
+#define SOFT_CONN BIT(6)
+#define HS_ENABLE BIT(5)
+#define RESUME BIT(2)
+#define SUSPENDM_ENABLE BIT(0)
+
+/* U3D_DEVICE_CONTROL */
+#define DC_HOSTREQ BIT(1)
+#define DC_SESSION BIT(0)
+
+/* U3D_USB2_TEST_MODE */
+#define U2U3_AUTO_SWITCH BIT(10)
+#define LPM_FORCE_STALL BIT(8)
+#define FIFO_ACCESS BIT(6)
+#define FORCE_FS BIT(5)
+#define FORCE_HS BIT(4)
+#define TEST_PACKET_MODE BIT(3)
+#define TEST_K_MODE BIT(2)
+#define TEST_J_MODE BIT(1)
+#define TEST_SE0_NAK_MODE BIT(0)
+
+/* U3D_COMMON_USB_INTR_ENABLE */
+/* U3D_COMMON_USB_INTR */
+#define LPM_RESUME_INTR BIT(9)
+#define LPM_INTR BIT(8)
+#define DISCONN_INTR BIT(5)
+#define CONN_INTR BIT(4)
+#define SOF_INTR BIT(3)
+#define RESET_INTR BIT(2)
+#define RESUME_INTR BIT(1)
+#define SUSPEND_INTR BIT(0)
+
+/* U3D_LINK_RESET_INFO */
+#define WTCHRP_MSK GENMASK(19, 16)
+
+/* U3D_USB20_LPM_PARAMETER */
+#define LPM_BESLCK_U3(x) (((x) & 0xf) << 12)
+#define LPM_BESLCK(x) (((x) & 0xf) << 8)
+#define LPM_BESLDCK(x) (((x) & 0xf) << 4)
+#define LPM_BESL GENMASK(3, 0)
+
+/* U3D_USB20_MISC_CONTROL */
+#define LPM_U3_ACK_EN BIT(0)
+
+/*---------------- SSUSB_SIFSLV_IPPC REGISTER DEFINITION ----------------*/
+
+#define U3D_SSUSB_IP_PW_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x0000)
+#define U3D_SSUSB_IP_PW_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x0004)
+#define U3D_SSUSB_IP_PW_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x0008)
+#define U3D_SSUSB_IP_PW_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x000C)
+#define U3D_SSUSB_IP_PW_STS1 (SSUSB_SIFSLV_IPPC_BASE + 0x0010)
+#define U3D_SSUSB_IP_PW_STS2 (SSUSB_SIFSLV_IPPC_BASE + 0x0014)
+#define U3D_SSUSB_OTG_STS (SSUSB_SIFSLV_IPPC_BASE + 0x0018)
+#define U3D_SSUSB_OTG_STS_CLR (SSUSB_SIFSLV_IPPC_BASE + 0x001C)
+#define U3D_SSUSB_IP_XHCI_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0024)
+#define U3D_SSUSB_IP_DEV_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0028)
+#define U3D_SSUSB_OTG_INT_EN (SSUSB_SIFSLV_IPPC_BASE + 0x002C)
+#define U3D_SSUSB_U3_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0030)
+#define U3D_SSUSB_U2_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0050)
+#define U3D_SSUSB_REF_CK_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x008C)
+#define U3D_SSUSB_DEV_RST_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x0098)
+#define U3D_SSUSB_HW_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A0)
+#define U3D_SSUSB_HW_SUB_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A4)
+#define U3D_SSUSB_IP_SPARE0 (SSUSB_SIFSLV_IPPC_BASE + 0x00C8)
+
+/*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/
+
+/* U3D_SSUSB_IP_PW_CTRL0 */
+#define SSUSB_IP_SW_RST BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL1 */
+#define SSUSB_IP_HOST_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL2 */
+#define SSUSB_IP_DEV_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL3 */
+#define SSUSB_IP_PCIE_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS1 */
+#define SSUSB_IP_SLEEP_STS BIT(30)
+#define SSUSB_U3_MAC_RST_B_STS BIT(16)
+#define SSUSB_XHCI_RST_B_STS BIT(11)
+#define SSUSB_SYS125_RST_B_STS BIT(10)
+#define SSUSB_REF_RST_B_STS BIT(8)
+#define SSUSB_SYSPLL_STABLE BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS2 */
+#define SSUSB_U2_MAC_SYS_RST_B_STS BIT(0)
+
+/* U3D_SSUSB_OTG_STS */
+#define SSUSB_VBUS_VALID BIT(9)
+
+/* U3D_SSUSB_OTG_STS_CLR */
+#define SSUSB_VBUS_INTR_CLR BIT(6)
+
+/* U3D_SSUSB_IP_XHCI_CAP */
+#define SSUSB_IP_XHCI_U2_PORT_NUM(x) (((x) >> 8) & 0xff)
+#define SSUSB_IP_XHCI_U3_PORT_NUM(x) ((x) & 0xff)
+
+/* U3D_SSUSB_IP_DEV_CAP */
+#define SSUSB_IP_DEV_U3_PORT_NUM(x) ((x) & 0xff)
+
+/* U3D_SSUSB_OTG_INT_EN */
+#define SSUSB_VBUS_CHG_INT_A_EN BIT(7)
+#define SSUSB_VBUS_CHG_INT_B_EN BIT(6)
+
+/* U3D_SSUSB_U3_CTRL_0P */
+#define SSUSB_U3_PORT_HOST_SEL BIT(2)
+#define SSUSB_U3_PORT_PDN BIT(1)
+#define SSUSB_U3_PORT_DIS BIT(0)
+
+/* U3D_SSUSB_U2_CTRL_0P */
+#define SSUSB_U2_PORT_OTG_SEL BIT(7)
+#define SSUSB_U2_PORT_HOST_SEL BIT(2)
+#define SSUSB_U2_PORT_PDN BIT(1)
+#define SSUSB_U2_PORT_DIS BIT(0)
+
+/* U3D_SSUSB_DEV_RST_CTRL */
+#define SSUSB_DEV_SW_RST BIT(0)
+
+#endif /* _SSUSB_HW_REGS_H_ */
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
new file mode 100644
index 000000000000..783367805c99
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+/* u2-port0 should be powered on and enabled; */
+int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks)
+{
+ void __iomem *ibase = ssusb->ippc_base;
+ u32 value, check_val;
+ int ret;
+
+ check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE |
+ SSUSB_REF_RST_B_STS;
+
+ ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
+ (check_val == (value & check_val)), 100, 20000);
+ if (ret) {
+ dev_err(ssusb->dev, "clks of sts1 are not stable!\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value,
+ (value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000);
+ if (ret) {
+ dev_err(ssusb->dev, "mac2 clock is not stable\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ssusb_phy_init(struct ssusb_mtk *ssusb)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ssusb->num_phys; i++) {
+ ret = phy_init(ssusb->phys[i]);
+ if (ret)
+ goto exit_phy;
+ }
+ return 0;
+
+exit_phy:
+ for (; i > 0; i--)
+ phy_exit(ssusb->phys[i - 1]);
+
+ return ret;
+}
+
+static int ssusb_phy_exit(struct ssusb_mtk *ssusb)
+{
+ int i;
+
+ for (i = 0; i < ssusb->num_phys; i++)
+ phy_exit(ssusb->phys[i]);
+
+ return 0;
+}
+
+static int ssusb_phy_power_on(struct ssusb_mtk *ssusb)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ssusb->num_phys; i++) {
+ ret = phy_power_on(ssusb->phys[i]);
+ if (ret)
+ goto power_off_phy;
+ }
+ return 0;
+
+power_off_phy:
+ for (; i > 0; i--)
+ phy_power_off(ssusb->phys[i - 1]);
+
+ return ret;
+}
+
+static void ssusb_phy_power_off(struct ssusb_mtk *ssusb)
+{
+ unsigned int i;
+
+ for (i = 0; i < ssusb->num_phys; i++)
+ phy_power_off(ssusb->phys[i]);
+}
+
+static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
+{
+ int ret = 0;
+
+ ret = regulator_enable(ssusb->vusb33);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable vusb33\n");
+ goto vusb33_err;
+ }
+
+ ret = clk_prepare_enable(ssusb->sys_clk);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable sys_clk\n");
+ goto clk_err;
+ }
+
+ ret = ssusb_phy_init(ssusb);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to init phy\n");
+ goto phy_init_err;
+ }
+
+ ret = ssusb_phy_power_on(ssusb);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to power on phy\n");
+ goto phy_err;
+ }
+
+ return 0;
+
+phy_err:
+ ssusb_phy_exit(ssusb);
+phy_init_err:
+ clk_disable_unprepare(ssusb->sys_clk);
+clk_err:
+ regulator_disable(ssusb->vusb33);
+vusb33_err:
+
+ return ret;
+}
+
+static void ssusb_rscs_exit(struct ssusb_mtk *ssusb)
+{
+ clk_disable_unprepare(ssusb->sys_clk);
+ regulator_disable(ssusb->vusb33);
+ ssusb_phy_power_off(ssusb);
+ ssusb_phy_exit(ssusb);
+}
+
+static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
+{
+ /* reset whole ip (xhci & u3d) */
+ mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
+ udelay(1);
+ mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
+}
+
+static int get_iddig_pinctrl(struct ssusb_mtk *ssusb)
+{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+ otg_sx->id_pinctrl = devm_pinctrl_get(ssusb->dev);
+ if (IS_ERR(otg_sx->id_pinctrl)) {
+ dev_err(ssusb->dev, "Cannot find id pinctrl!\n");
+ return PTR_ERR(otg_sx->id_pinctrl);
+ }
+
+ otg_sx->id_float =
+ pinctrl_lookup_state(otg_sx->id_pinctrl, "id_float");
+ if (IS_ERR(otg_sx->id_float)) {
+ dev_err(ssusb->dev, "Cannot find pinctrl id_float!\n");
+ return PTR_ERR(otg_sx->id_float);
+ }
+
+ otg_sx->id_ground =
+ pinctrl_lookup_state(otg_sx->id_pinctrl, "id_ground");
+ if (IS_ERR(otg_sx->id_ground)) {
+ dev_err(ssusb->dev, "Cannot find pinctrl id_ground!\n");
+ return PTR_ERR(otg_sx->id_ground);
+ }
+
+ return 0;
+}
+
+static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ struct device *dev = &pdev->dev;
+ struct regulator *vbus;
+ struct resource *res;
+ int i;
+ int ret;
+
+ ssusb->num_phys = of_count_phandle_with_args(node,
+ "phys", "#phy-cells");
+ if (ssusb->num_phys > 0) {
+ ssusb->phys = devm_kcalloc(dev, ssusb->num_phys,
+ sizeof(*ssusb->phys), GFP_KERNEL);
+ if (!ssusb->phys)
+ return -ENOMEM;
+ } else {
+ ssusb->num_phys = 0;
+ }
+
+ for (i = 0; i < ssusb->num_phys; i++) {
+ ssusb->phys[i] = devm_of_phy_get_by_index(dev, node, i);
+ if (IS_ERR(ssusb->phys[i])) {
+ dev_err(dev, "failed to get phy-%d\n", i);
+ return PTR_ERR(ssusb->phys[i]);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
+ ssusb->ippc_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ssusb->ippc_base)) {
+ dev_err(dev, "failed to map memory for ippc\n");
+ return PTR_ERR(ssusb->ippc_base);
+ }
+
+ ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33");
+ if (IS_ERR(ssusb->vusb33)) {
+ dev_err(dev, "failed to get vusb33\n");
+ return PTR_ERR(ssusb->vusb33);
+ }
+
+ ssusb->sys_clk = devm_clk_get(dev, "sys_ck");
+ if (IS_ERR(ssusb->sys_clk)) {
+ dev_err(dev, "failed to get sys clock\n");
+ return PTR_ERR(ssusb->sys_clk);
+ }
+
+ ssusb->dr_mode = usb_get_dr_mode(dev);
+ if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(dev, "dr_mode is error\n");
+ return -EINVAL;
+ }
+
+ if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return 0;
+
+ /* if host role is supported */
+ ret = ssusb_wakeup_of_property_parse(ssusb, node);
+ if (ret)
+ return ret;
+
+ if (ssusb->dr_mode != USB_DR_MODE_OTG)
+ return 0;
+
+ /* if dual-role mode is supported */
+ vbus = devm_regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(vbus)) {
+ dev_err(dev, "failed to get vbus\n");
+ return PTR_ERR(vbus);
+ }
+ otg_sx->vbus = vbus;
+
+ otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
+ otg_sx->manual_drd_enabled =
+ of_property_read_bool(node, "enable-manual-drd");
+
+ if (of_property_read_bool(node, "extcon")) {
+ otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
+ if (IS_ERR(otg_sx->edev)) {
+ dev_err(ssusb->dev, "couldn't get extcon device\n");
+ return -EPROBE_DEFER;
+ }
+ if (otg_sx->manual_drd_enabled) {
+ ret = get_iddig_pinctrl(ssusb);
+ if (ret)
+ return ret;
+ }
+ }
+
+ dev_info(dev, "dr_mode: %d, is_u3_dr: %d\n",
+ ssusb->dr_mode, otg_sx->is_u3_drd);
+
+ return 0;
+}
+
+static int mtu3_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct ssusb_mtk *ssusb;
+ int ret = -ENOMEM;
+
+ /* all elements are set to ZERO as default value */
+ ssusb = devm_kzalloc(dev, sizeof(*ssusb), GFP_KERNEL);
+ if (!ssusb)
+ return -ENOMEM;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "No suitable DMA config available\n");
+ return -ENOTSUPP;
+ }
+
+ platform_set_drvdata(pdev, ssusb);
+ ssusb->dev = dev;
+
+ ret = get_ssusb_rscs(pdev, ssusb);
+ if (ret)
+ return ret;
+
+ /* enable power domain */
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ device_enable_async_suspend(dev);
+
+ ret = ssusb_rscs_init(ssusb);
+ if (ret)
+ goto comm_init_err;
+
+ ssusb_ip_sw_reset(ssusb);
+
+ if (IS_ENABLED(CONFIG_USB_MTU3_HOST))
+ ssusb->dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_MTU3_GADGET))
+ ssusb->dr_mode = USB_DR_MODE_PERIPHERAL;
+
+ /* default as host */
+ ssusb->is_host = !(ssusb->dr_mode == USB_DR_MODE_PERIPHERAL);
+
+ switch (ssusb->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ ret = ssusb_gadget_init(ssusb);
+ if (ret) {
+ dev_err(dev, "failed to initialize gadget\n");
+ goto comm_exit;
+ }
+ break;
+ case USB_DR_MODE_HOST:
+ ret = ssusb_host_init(ssusb, node);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ goto comm_exit;
+ }
+ break;
+ case USB_DR_MODE_OTG:
+ ret = ssusb_gadget_init(ssusb);
+ if (ret) {
+ dev_err(dev, "failed to initialize gadget\n");
+ goto comm_exit;
+ }
+
+ ret = ssusb_host_init(ssusb, node);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ goto gadget_exit;
+ }
+
+ ssusb_otg_switch_init(ssusb);
+ break;
+ default:
+ dev_err(dev, "unsupported mode: %d\n", ssusb->dr_mode);
+ ret = -EINVAL;
+ goto comm_exit;
+ }
+
+ return 0;
+
+gadget_exit:
+ ssusb_gadget_exit(ssusb);
+comm_exit:
+ ssusb_rscs_exit(ssusb);
+comm_init_err:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int mtu3_remove(struct platform_device *pdev)
+{
+ struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+ switch (ssusb->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ ssusb_gadget_exit(ssusb);
+ break;
+ case USB_DR_MODE_HOST:
+ ssusb_host_exit(ssusb);
+ break;
+ case USB_DR_MODE_OTG:
+ ssusb_otg_switch_exit(ssusb);
+ ssusb_gadget_exit(ssusb);
+ ssusb_host_exit(ssusb);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ssusb_rscs_exit(ssusb);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+/*
+ * when support dual-role mode, we reject suspend when
+ * it works as device mode;
+ */
+static int __maybe_unused mtu3_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* REVISIT: disconnect it for only device mode? */
+ if (!ssusb->is_host)
+ return 0;
+
+ ssusb_host_disable(ssusb, true);
+ ssusb_phy_power_off(ssusb);
+ clk_disable_unprepare(ssusb->sys_clk);
+ ssusb_wakeup_enable(ssusb);
+
+ return 0;
+}
+
+static int __maybe_unused mtu3_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!ssusb->is_host)
+ return 0;
+
+ ssusb_wakeup_disable(ssusb);
+ clk_prepare_enable(ssusb->sys_clk);
+ ssusb_phy_power_on(ssusb);
+ ssusb_host_enable(ssusb);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtu3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtu3_suspend, mtu3_resume)
+};
+
+#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &mtu3_pm_ops : NULL)
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id mtu3_of_match[] = {
+ {.compatible = "mediatek,mt8173-mtu3",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtu3_of_match);
+
+#endif
+
+static struct platform_driver mtu3_driver = {
+ .probe = mtu3_probe,
+ .remove = mtu3_remove,
+ .driver = {
+ .name = MTU3_DRIVER_NAME,
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(mtu3_of_match),
+ },
+};
+module_platform_driver(mtu3_driver);
+
+MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek USB3 DRD Controller Driver");
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
new file mode 100644
index 000000000000..7d9ba8a52368
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -0,0 +1,573 @@
+/*
+ * mtu3_qmu.c - Queue Management Unit driver for device controller
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Queue Management Unit (QMU) is designed to unload SW effort
+ * to serve DMA interrupts.
+ * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
+ * SW links data buffers and triggers QMU to send / receive data to
+ * host / from device at a time.
+ * And now only GPD is supported.
+ *
+ * For more detailed information, please refer to QMU Programming Guide
+ */
+
+#include <linux/dmapool.h>
+#include <linux/iopoll.h>
+
+#include "mtu3.h"
+
+#define QMU_CHECKSUM_LEN 16
+
+#define GPD_FLAGS_HWO BIT(0)
+#define GPD_FLAGS_BDP BIT(1)
+#define GPD_FLAGS_BPS BIT(2)
+#define GPD_FLAGS_IOC BIT(7)
+
+#define GPD_EXT_FLAG_ZLP BIT(5)
+
+
+static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
+ dma_addr_t dma_addr)
+{
+ dma_addr_t dma_base = ring->dma;
+ struct qmu_gpd *gpd_head = ring->start;
+ u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
+
+ if (offset >= MAX_GPD_NUM)
+ return NULL;
+
+ return gpd_head + offset;
+}
+
+static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
+ struct qmu_gpd *gpd)
+{
+ dma_addr_t dma_base = ring->dma;
+ struct qmu_gpd *gpd_head = ring->start;
+ u32 offset;
+
+ offset = gpd - gpd_head;
+ if (offset >= MAX_GPD_NUM)
+ return 0;
+
+ return dma_base + (offset * sizeof(*gpd));
+}
+
+static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
+{
+ ring->start = gpd;
+ ring->enqueue = gpd;
+ ring->dequeue = gpd;
+ ring->end = gpd + MAX_GPD_NUM - 1;
+}
+
+static void reset_gpd_list(struct mtu3_ep *mep)
+{
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->start;
+
+ if (gpd) {
+ gpd->flag &= ~GPD_FLAGS_HWO;
+ gpd_ring_init(ring, gpd);
+ }
+}
+
+int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
+{
+ struct qmu_gpd *gpd;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+
+ /* software own all gpds as default */
+ gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
+ if (gpd == NULL)
+ return -ENOMEM;
+
+ gpd_ring_init(ring, gpd);
+
+ return 0;
+}
+
+void mtu3_gpd_ring_free(struct mtu3_ep *mep)
+{
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+
+ dma_pool_free(mep->mtu->qmu_gpd_pool,
+ ring->start, ring->dma);
+ memset(ring, 0, sizeof(*ring));
+}
+
+/*
+ * calculate check sum of a gpd or bd
+ * add "noinline" and "mb" to prevent wrong calculation
+ */
+static noinline u8 qmu_calc_checksum(u8 *data)
+{
+ u8 chksum = 0;
+ int i;
+
+ data[1] = 0x0; /* set checksum to 0 */
+
+ mb(); /* ensure the gpd/bd is really up-to-date */
+ for (i = 0; i < QMU_CHECKSUM_LEN; i++)
+ chksum += data[i];
+
+ /* Default: HWO=1, @flag[bit0] */
+ chksum += 1;
+
+ return 0xFF - chksum;
+}
+
+void mtu3_qmu_resume(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+ u32 offset;
+
+ offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+ mtu3_writel(mbase, offset, QMU_Q_RESUME);
+ if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
+ mtu3_writel(mbase, offset, QMU_Q_RESUME);
+}
+
+static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
+{
+ if (ring->enqueue < ring->end)
+ ring->enqueue++;
+ else
+ ring->enqueue = ring->start;
+
+ return ring->enqueue;
+}
+
+static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
+{
+ if (ring->dequeue < ring->end)
+ ring->dequeue++;
+ else
+ ring->dequeue = ring->start;
+
+ return ring->dequeue;
+}
+
+/* check if a ring is emtpy */
+static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
+{
+ struct qmu_gpd *enq = ring->enqueue;
+ struct qmu_gpd *next;
+
+ if (ring->enqueue < ring->end)
+ next = enq + 1;
+ else
+ next = ring->start;
+
+ /* one gpd is reserved to simplify gpd preparation */
+ return next == ring->dequeue;
+}
+
+int mtu3_prepare_transfer(struct mtu3_ep *mep)
+{
+ return gpd_ring_empty(&mep->gpd_ring);
+}
+
+static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+ struct qmu_gpd *enq;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->enqueue;
+ struct usb_request *req = &mreq->request;
+
+ /* set all fields to zero as default value */
+ memset(gpd, 0, sizeof(*gpd));
+
+ gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->buf_len = cpu_to_le16(req->length);
+ gpd->flag |= GPD_FLAGS_IOC;
+
+ /* get the next GPD */
+ enq = advance_enq_gpd(ring);
+ dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
+ mep->epnum, gpd, enq);
+
+ enq->flag &= ~GPD_FLAGS_HWO;
+ gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+
+ if (req->zero)
+ gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
+
+ gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+ gpd->flag |= GPD_FLAGS_HWO;
+
+ mreq->gpd = gpd;
+
+ return 0;
+}
+
+static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+ struct qmu_gpd *enq;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->enqueue;
+ struct usb_request *req = &mreq->request;
+
+ /* set all fields to zero as default value */
+ memset(gpd, 0, sizeof(*gpd));
+
+ gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->data_buf_len = cpu_to_le16(req->length);
+ gpd->flag |= GPD_FLAGS_IOC;
+
+ /* get the next GPD */
+ enq = advance_enq_gpd(ring);
+ dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
+ mep->epnum, gpd, enq);
+
+ enq->flag &= ~GPD_FLAGS_HWO;
+ gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+ gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+ gpd->flag |= GPD_FLAGS_HWO;
+
+ mreq->gpd = gpd;
+
+ return 0;
+}
+
+void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+
+ if (mep->is_in)
+ mtu3_prepare_tx_gpd(mep, mreq);
+ else
+ mtu3_prepare_rx_gpd(mep, mreq);
+}
+
+int mtu3_qmu_start(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ u8 epnum = mep->epnum;
+
+ if (mep->is_in) {
+ /* set QMU start address */
+ mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+ mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
+ /* send zero length packet according to ZLP flag in GPD */
+ mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
+ mtu3_writel(mbase, U3D_TQERRIESR0,
+ QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
+
+ if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
+ dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
+ return 0;
+ }
+ mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
+
+ } else {
+ mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN);
+ mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
+ /* don't expect ZLP */
+ mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
+ /* move to next GPD when receive ZLP */
+ mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
+ mtu3_writel(mbase, U3D_RQERRIESR0,
+ QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
+ mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
+
+ if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
+ dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
+ return 0;
+ }
+ mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
+ }
+
+ return 0;
+}
+
+/* may called in atomic context */
+void mtu3_qmu_stop(struct mtu3_ep *mep)
+{
+ struct mtu3 *mtu = mep->mtu;
+ void __iomem *mbase = mtu->mac_base;
+ int epnum = mep->epnum;
+ u32 value = 0;
+ u32 qcsr;
+ int ret;
+
+ qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+ if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
+ dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
+ return;
+ }
+ mtu3_writel(mbase, qcsr, QMU_Q_STOP);
+
+ ret = readl_poll_timeout_atomic(mbase + qcsr, value,
+ !(value & QMU_Q_ACTIVE), 1, 1000);
+ if (ret) {
+ dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
+ return;
+ }
+
+ dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
+}
+
+void mtu3_qmu_flush(struct mtu3_ep *mep)
+{
+
+ dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
+ ((mep->is_in) ? "TX" : "RX"));
+
+ /*Stop QMU */
+ mtu3_qmu_stop(mep);
+ reset_gpd_list(mep);
+}
+
+/*
+ * QMU can't transfer zero length packet directly (a hardware limit
+ * on old SoCs), so when needs to send ZLP, we intentionally trigger
+ * a length error interrupt, and in the ISR sends a ZLP by BMU.
+ */
+static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
+{
+ struct mtu3_ep *mep = mtu->in_eps + epnum;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ void __iomem *mbase = mtu->mac_base;
+ struct qmu_gpd *gpd_current = NULL;
+ dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+ struct usb_request *req = NULL;
+ struct mtu3_request *mreq;
+ u32 txcsr = 0;
+ int ret;
+
+ mreq = next_request(mep);
+ if (mreq && mreq->request.length == 0)
+ req = &mreq->request;
+ else
+ return;
+
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ if (le16_to_cpu(gpd_current->buf_len) != 0) {
+ dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
+ return;
+ }
+
+ dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
+
+ mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+
+ ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
+ txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
+ if (ret) {
+ dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
+ return;
+ }
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
+
+ /* by pass the current GDP */
+ gpd_current->flag |= GPD_FLAGS_BPS;
+ gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
+ gpd_current->flag |= GPD_FLAGS_HWO;
+
+ /*enable DMAREQEN, switch back to QMU mode */
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+ mtu3_qmu_resume(mep);
+}
+
+/*
+ * NOTE: request list maybe is already empty as following case:
+ * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
+ * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
+ * tasklet process both of them)-->qmu_interrupt for second one.
+ * To avoid upper case, put qmu_done_tx in ISR directly to process it.
+ */
+static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
+{
+ struct mtu3_ep *mep = mtu->in_eps + epnum;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ void __iomem *mbase = mtu->mac_base;
+ struct qmu_gpd *gpd = ring->dequeue;
+ struct qmu_gpd *gpd_current = NULL;
+ dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+ struct usb_request *request = NULL;
+ struct mtu3_request *mreq;
+
+ /*transfer phy address got from QMU register to virtual address */
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ __func__, epnum, gpd, gpd_current, ring->enqueue);
+
+ while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+ mreq = next_request(mep);
+
+ if (mreq == NULL || mreq->gpd != gpd) {
+ dev_err(mtu->dev, "no correct TX req is found\n");
+ break;
+ }
+
+ request = &mreq->request;
+ request->actual = le16_to_cpu(gpd->buf_len);
+ mtu3_req_complete(mep, request, 0);
+
+ gpd = advance_deq_gpd(ring);
+ }
+
+ dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
+ __func__, epnum, ring->dequeue, ring->enqueue);
+
+}
+
+static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
+{
+ struct mtu3_ep *mep = mtu->out_eps + epnum;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ void __iomem *mbase = mtu->mac_base;
+ struct qmu_gpd *gpd = ring->dequeue;
+ struct qmu_gpd *gpd_current = NULL;
+ dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
+ struct usb_request *req = NULL;
+ struct mtu3_request *mreq;
+
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ __func__, epnum, gpd, gpd_current, ring->enqueue);
+
+ while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+ mreq = next_request(mep);
+
+ if (mreq == NULL || mreq->gpd != gpd) {
+ dev_err(mtu->dev, "no correct RX req is found\n");
+ break;
+ }
+ req = &mreq->request;
+
+ req->actual = le16_to_cpu(gpd->buf_len);
+ mtu3_req_complete(mep, req, 0);
+
+ gpd = advance_deq_gpd(ring);
+ }
+
+ dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
+ __func__, epnum, ring->dequeue, ring->enqueue);
+}
+
+static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
+{
+ int i;
+
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (done_status & QMU_RX_DONE_INT(i))
+ qmu_done_rx(mtu, i);
+ if (done_status & QMU_TX_DONE_INT(i))
+ qmu_done_tx(mtu, i);
+ }
+}
+
+static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 errval;
+ int i;
+
+ if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
+ errval = mtu3_readl(mbase, U3D_RQERRIR0);
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (errval & QMU_RX_CS_ERR(i))
+ dev_err(mtu->dev, "Rx %d CS error!\n", i);
+
+ if (errval & QMU_RX_LEN_ERR(i))
+ dev_err(mtu->dev, "RX %d Length error\n", i);
+ }
+ mtu3_writel(mbase, U3D_RQERRIR0, errval);
+ }
+
+ if (qmu_status & RXQ_ZLPERR_INT) {
+ errval = mtu3_readl(mbase, U3D_RQERRIR1);
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (errval & QMU_RX_ZLP_ERR(i))
+ dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
+ }
+ mtu3_writel(mbase, U3D_RQERRIR1, errval);
+ }
+
+ if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
+ errval = mtu3_readl(mbase, U3D_TQERRIR0);
+ for (i = 1; i < mtu->num_eps; i++) {
+ if (errval & QMU_TX_CS_ERR(i))
+ dev_err(mtu->dev, "Tx %d checksum error!\n", i);
+
+ if (errval & QMU_TX_LEN_ERR(i))
+ qmu_tx_zlp_error_handler(mtu, i);
+ }
+ mtu3_writel(mbase, U3D_TQERRIR0, errval);
+ }
+}
+
+irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 qmu_status;
+ u32 qmu_done_status;
+
+ /* U3D_QISAR1 is read update */
+ qmu_status = mtu3_readl(mbase, U3D_QISAR1);
+ qmu_status &= mtu3_readl(mbase, U3D_QIER1);
+
+ qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
+ qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
+ mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
+ dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
+ (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
+ qmu_status);
+
+ if (qmu_done_status)
+ qmu_done_isr(mtu, qmu_done_status);
+
+ if (qmu_status)
+ qmu_exception_isr(mtu, qmu_status);
+
+ return IRQ_HANDLED;
+}
+
+int mtu3_qmu_init(struct mtu3 *mtu)
+{
+
+ compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
+
+ mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
+ QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
+
+ if (!mtu->qmu_gpd_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mtu3_qmu_exit(struct mtu3 *mtu)
+{
+ dma_pool_destroy(mtu->qmu_gpd_pool);
+}
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
new file mode 100644
index 000000000000..4dafa16bf120
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -0,0 +1,43 @@
+/*
+ * mtu3_qmu.h - Queue Management Unit driver header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTK_QMU_H__
+#define __MTK_QMU_H__
+
+#define MAX_GPD_NUM 64
+#define QMU_GPD_SIZE (sizeof(struct qmu_gpd))
+#define QMU_GPD_RING_SIZE (MAX_GPD_NUM * QMU_GPD_SIZE)
+
+#define GPD_BUF_SIZE 65532
+
+void mtu3_qmu_stop(struct mtu3_ep *mep);
+int mtu3_qmu_start(struct mtu3_ep *mep);
+void mtu3_qmu_resume(struct mtu3_ep *mep);
+void mtu3_qmu_flush(struct mtu3_ep *mep);
+
+void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq);
+int mtu3_prepare_transfer(struct mtu3_ep *mep);
+
+int mtu3_gpd_ring_alloc(struct mtu3_ep *mep);
+void mtu3_gpd_ring_free(struct mtu3_ep *mep);
+
+irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu);
+int mtu3_qmu_init(struct mtu3 *mtu);
+void mtu3_qmu_exit(struct mtu3 *mtu);
+
+#endif
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 2440f88e07a3..e89708d839e5 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -6,6 +6,9 @@
* Based on the DaVinci "glue layer" code.
* Copyright (C) 2005-2006 by Texas Instruments
*
+ * DT support
+ * Copyright (c) 2016 Petr Kulhavy <petr@barix.com>
+ *
* This file is part of the Inventra Controller Driver for Linux.
*
* The Inventra Controller Driver for Linux is free software; you
@@ -340,6 +343,13 @@ static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
enum phy_mode phy_mode;
+ /*
+ * The PHY has some issues when it is forced in device or host mode.
+ * Unless the user request another mode, configure the PHY in OTG mode.
+ */
+ if (!musb->is_initialized)
+ return phy_set_mode(glue->phy, PHY_MODE_USB_OTG);
+
switch (musb_mode) {
case MUSB_HOST: /* Force VBUS valid, ID = 0 */
phy_mode = PHY_MODE_USB_HOST;
@@ -366,6 +376,12 @@ static int da8xx_musb_init(struct musb *musb)
musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
+ ret = clk_prepare_enable(glue->clk);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable clock\n");
+ return ret;
+ }
+
/* Returns zero if e.g. not clocked */
rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
if (!rev)
@@ -377,12 +393,6 @@ static int da8xx_musb_init(struct musb *musb)
goto fail;
}
- ret = clk_prepare_enable(glue->clk);
- if (ret) {
- dev_err(glue->dev, "failed to enable clock\n");
- goto fail;
- }
-
setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
/* Reset the controller */
@@ -392,7 +402,7 @@ static int da8xx_musb_init(struct musb *musb)
ret = phy_init(glue->phy);
if (ret) {
dev_err(glue->dev, "Failed to init phy.\n");
- goto err_phy_init;
+ goto fail;
}
ret = phy_power_on(glue->phy);
@@ -412,9 +422,8 @@ static int da8xx_musb_init(struct musb *musb)
err_phy_power_on:
phy_exit(glue->phy);
-err_phy_init:
- clk_disable_unprepare(glue->clk);
fail:
+ clk_disable_unprepare(glue->clk);
return ret;
}
@@ -433,6 +442,21 @@ static int da8xx_musb_exit(struct musb *musb)
return 0;
}
+static inline u8 get_vbus_power(struct device *dev)
+{
+ struct regulator *vbus_supply;
+ int current_uA;
+
+ vbus_supply = regulator_get_optional(dev, "vbus");
+ if (IS_ERR(vbus_supply))
+ return 255;
+ current_uA = regulator_get_current_limit(vbus_supply);
+ regulator_put(vbus_supply);
+ if (current_uA <= 0 || current_uA > 510000)
+ return 255;
+ return current_uA / 1000 / 2;
+}
+
static const struct musb_platform_ops da8xx_ops = {
.quirks = MUSB_DMA_CPPI | MUSB_INDEXED_EP,
.init = da8xx_musb_init,
@@ -458,6 +482,12 @@ static const struct platform_device_info da8xx_dev_info = {
.dma_mask = DMA_BIT_MASK(32),
};
+static const struct musb_hdrc_config da8xx_config = {
+ .ram_bits = 10,
+ .num_eps = 5,
+ .multipoint = 1,
+};
+
static int da8xx_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
@@ -465,6 +495,7 @@ static int da8xx_probe(struct platform_device *pdev)
struct da8xx_glue *glue;
struct platform_device_info pinfo;
struct clk *clk;
+ struct device_node *np = pdev->dev.of_node;
int ret;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
@@ -487,6 +518,16 @@ static int da8xx_probe(struct platform_device *pdev)
glue->dev = &pdev->dev;
glue->clk = clk;
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->config = &da8xx_config;
+ pdata->mode = musb_get_mode(&pdev->dev);
+ pdata->power = get_vbus_power(&pdev->dev);
+ }
+
pdata->platform_ops = &da8xx_ops;
glue->usb_phy = usb_phy_generic_register();
@@ -537,11 +578,22 @@ static int da8xx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_id_table[] = {
+ {
+ .compatible = "ti,da830-musb",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, da8xx_id_table);
+#endif
+
static struct platform_driver da8xx_driver = {
.probe = da8xx_probe,
.remove = da8xx_remove,
.driver = {
.name = "musb-da8xx",
+ .of_match_table = of_match_ptr(da8xx_id_table),
},
};
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c3e172e15ec3..9e226468a13e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -100,6 +100,7 @@
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
+#include <linux/usb/of.h>
#include "musb_core.h"
#include "musb_trace.h"
@@ -130,6 +131,24 @@ static inline struct musb *dev_to_musb(struct device *dev)
return dev_get_drvdata(dev);
}
+enum musb_mode musb_get_mode(struct device *dev)
+{
+ enum usb_dr_mode mode;
+
+ mode = usb_get_dr_mode(dev);
+ switch (mode) {
+ case USB_DR_MODE_HOST:
+ return MUSB_HOST;
+ case USB_DR_MODE_PERIPHERAL:
+ return MUSB_PERIPHERAL;
+ case USB_DR_MODE_OTG:
+ case USB_DR_MODE_UNKNOWN:
+ default:
+ return MUSB_OTG;
+ }
+}
+EXPORT_SYMBOL_GPL(musb_get_mode);
+
/*-------------------------------------------------------------------------*/
#ifndef CONFIG_BLACKFIN
@@ -569,10 +588,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if (devctl & MUSB_DEVCTL_HM) {
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
- /* remote wakeup? later, GetPortStatus
- * will stop RESUME signaling
- */
-
+ /* remote wakeup? */
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
@@ -2414,8 +2430,9 @@ fail2:
musb_platform_exit(musb);
fail1:
- dev_err(musb->controller,
- "musb_init_controller failed with status %d\n", status);
+ if (status != -EPROBE_DEFER)
+ dev_err(musb->controller,
+ "%s failed with status %d\n", __func__, status);
musb_free(musb);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 91817d77d59c..a611e2f67bdc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -626,4 +626,10 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb)
musb->ops->post_root_reset_end(musb);
}
+/*
+ * gets the "dr_mode" property from DT and converts it into musb_mode
+ * if the property is not found or not recognized returns MUSB_OTG
+ */
+extern enum musb_mode musb_get_mode(struct device *dev);
+
#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index a55173c9e564..1acc4864f9f6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -974,8 +974,8 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
- tmp = usb_endpoint_maxp(desc);
- if (tmp & ~0x07ff) {
+ tmp = usb_endpoint_maxp_mult(desc) - 1;
+ if (tmp) {
int ok;
if (usb_endpoint_dir_in(desc))
@@ -987,12 +987,12 @@ static int musb_gadget_enable(struct usb_ep *ep,
musb_dbg(musb, "no support for high bandwidth ISO");
goto fail;
}
- musb_ep->hb_mult = (tmp >> 11) & 3;
+ musb_ep->hb_mult = tmp;
} else {
musb_ep->hb_mult = 0;
}
- musb_ep->packet_sz = tmp & 0x7ff;
+ musb_ep->packet_sz = usb_endpoint_maxp(desc);
tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
/* enable the interrupts for the endpoint, set the endpoint
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 53bc4ceefe89..f6cdbad00dac 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2237,7 +2237,7 @@ static int musb_urb_enqueue(
* Some musb cores don't support high bandwidth ISO transfers; and
* we don't (yet!) support high bandwidth interrupt transfers.
*/
- qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+ qh->hb_mult = usb_endpoint_maxp_mult(epd);
if (qh->hb_mult > 1) {
int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 61b5f1c3c5bc..0b4595439d51 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -132,7 +132,6 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
musb_dbg(musb, "Root port resuming, power %02x", power);
- /* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index e8be8e39ab8f..8b73214a9ea3 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -277,12 +277,12 @@ static int omap2430_musb_init(struct musb *musb)
if (status == -ENXIO)
return status;
- pr_err("HS USB OTG: no transceiver configured\n");
+ dev_dbg(dev, "HS USB OTG: no transceiver configured\n");
return -EPROBE_DEFER;
}
if (IS_ERR(musb->phy)) {
- pr_err("HS USB OTG: no PHY configured\n");
+ dev_err(dev, "HS USB OTG: no PHY configured\n");
return PTR_ERR(musb->phy);
}
musb->isr = omap2430_musb_interrupt;
@@ -301,7 +301,7 @@ static int omap2430_musb_init(struct musb *musb)
musb_writel(musb->mregs, OTG_INTERFSEL, l);
- pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+ dev_dbg(dev, "HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
"sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
musb_readl(musb->mregs, OTG_REVISION),
musb_readl(musb->mregs, OTG_SYSCONFIG),
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 1408245be18e..d0be0eadd0d9 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -186,16 +186,6 @@ static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci)
if (musb->int_usb)
writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB);
- /*
- * sunxi musb often signals babble on low / full speed device
- * disconnect, without ever raising MUSB_INTR_DISCONNECT, since
- * normally babble never happens treat it as disconnect.
- */
- if ((musb->int_usb & MUSB_INTR_BABBLE) && is_host_active(musb)) {
- musb->int_usb &= ~MUSB_INTR_BABBLE;
- musb->int_usb |= MUSB_INTR_DISCONNECT;
- }
-
if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
/* ep0 FADDR must be 0 when (re)entering peripheral mode */
musb_ep_select(musb->mregs, 0);
@@ -390,6 +380,20 @@ static int sunxi_musb_set_mode(struct musb *musb, u8 mode)
return 0;
}
+static int sunxi_musb_recover(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /*
+ * Schedule a phy_set_mode with the current glue->phy_mode value,
+ * this will force end the current session.
+ */
+ set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
+ schedule_work(&glue->work);
+
+ return 0;
+}
+
/*
* sunxi musb register layout
* 0x00 - 0x17 fifo regs, 1 long per fifo
@@ -618,6 +622,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.dma_init = sunxi_musb_dma_controller_create,
.dma_exit = sunxi_musb_dma_controller_destroy,
.set_mode = sunxi_musb_set_mode,
+ .recover = sunxi_musb_recover,
.set_vbus = sunxi_musb_set_vbus,
.pre_root_reset_end = sunxi_musb_pre_root_reset_end,
.post_root_reset_end = sunxi_musb_post_root_reset_end,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index b9c409a18faa..61cef7511a50 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -84,6 +84,7 @@ config SAMSUNG_USBPHY
config TWL6030_USB
tristate "TWL6030 USB Transceiver Driver"
depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
+ depends on OF
help
Enable this to support the USB OTG transceiver on TWL6030
family chips. This TWL6030 transceiver has the VBUS and ID GND
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 42a1afe36a90..5f5f19813fde 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -134,10 +134,12 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
return NULL;
dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ of_node_put(node);
if (!dev)
return NULL;
ctrl_usb = dev_get_drvdata(dev);
+ put_device(dev);
if (!ctrl_usb)
return NULL;
return &ctrl_usb->phy_ctrl;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 8311ba2968cd..89d6e7a5fdb7 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -59,6 +59,15 @@ EXPORT_SYMBOL_GPL(usb_phy_generic_unregister);
static int nop_set_suspend(struct usb_phy *x, int suspend)
{
+ struct usb_phy_generic *nop = dev_get_drvdata(x->dev);
+
+ if (!IS_ERR(nop->clk)) {
+ if (suspend)
+ clk_disable_unprepare(nop->clk);
+ else
+ clk_prepare_enable(nop->clk);
+ }
+
return 0;
}
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 8d111ec653e4..042c5a8fd423 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -94,7 +94,7 @@ struct isp1301 {
#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
-#if defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_TPS65010)
#include <linux/i2c/tps65010.h>
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index a72e8d670adc..628b600b02b1 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -108,7 +108,6 @@ struct twl6030_usb {
enum musb_vbus_id_status linkstat;
u8 asleep;
bool vbus_enable;
- const char *regulator;
};
#define comparator_to_twl(x) container_of((x), struct twl6030_usb, comparator)
@@ -166,7 +165,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
/* Program MISC2 register and set bit VUSB_IN_VBAT */
twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2);
- twl->usb3v3 = regulator_get(twl->dev, twl->regulator);
+ twl->usb3v3 = regulator_get(twl->dev, "usb");
if (IS_ERR(twl->usb3v3))
return -ENODEV;
@@ -341,7 +340,11 @@ static int twl6030_usb_probe(struct platform_device *pdev)
int status, err;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct twl4030_usb_data *pdata = dev_get_platdata(dev);
+
+ if (!np) {
+ dev_err(dev, "no DT info\n");
+ return -EINVAL;
+ }
twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -361,18 +364,6 @@ static int twl6030_usb_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- if (np) {
- twl->regulator = "usb";
- } else if (pdata) {
- if (pdata->features & TWL6032_SUBCLASS)
- twl->regulator = "ldousb";
- else
- twl->regulator = "vusb";
- } else {
- dev_err(&pdev->dev, "twl6030 initialized without pdata\n");
- return -EINVAL;
- }
-
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
@@ -436,13 +427,11 @@ static int twl6030_usb_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id twl6030_usb_id_table[] = {
{ .compatible = "ti,twl6030-usb" },
{}
};
MODULE_DEVICE_TABLE(of, twl6030_usb_id_table);
-#endif
static struct platform_driver twl6030_usb_driver = {
.probe = twl6030_usb_probe,
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 857e78337324..d1af831f43eb 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -100,10 +100,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
{
- if (list_empty(&pipe->list))
- return NULL;
-
- return list_first_entry(&pipe->list, struct usbhs_pkt, node);
+ return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
}
static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 56ecb8b5115d..d9bc8dafe000 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -255,6 +255,16 @@ config USB_SERIAL_F81232
To compile this driver as a module, choose M here: the
module will be called f81232.
+config USB_SERIAL_F8153X
+ tristate "USB Fintek F81532/534 Multi-Ports Serial Driver"
+ help
+ Say Y here if you want to use the Fintek F81532/534 Multi-Ports
+ USB to serial adapter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called f81534.
+
+
config USB_SERIAL_GARMIN
tristate "USB Garmin GPS driver"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 349d9df0895f..9e43b7b002eb 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_SERIAL_EDGEPORT) += io_edgeport.o
obj-$(CONFIG_USB_SERIAL_EDGEPORT_TI) += io_ti.o
obj-$(CONFIG_USB_SERIAL_EMPEG) += empeg.o
obj-$(CONFIG_USB_SERIAL_F81232) += f81232.o
+obj-$(CONFIG_USB_SERIAL_F8153X) += f81534.o
obj-$(CONFIG_USB_SERIAL_FTDI_SIO) += ftdi_sio.o
obj-$(CONFIG_USB_SERIAL_GARMIN) += garmin_gps.o
obj-$(CONFIG_USB_SERIAL_IPAQ) += ipaq.o
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index f139488d0816..2597b83a8ae2 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -61,13 +61,26 @@
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
+#define CH341_REQ_READ_VERSION 0x5F
#define CH341_REQ_WRITE_REG 0x9A
#define CH341_REQ_READ_REG 0x95
-#define CH341_REG_BREAK1 0x05
-#define CH341_REG_BREAK2 0x18
-#define CH341_NBREAK_BITS_REG1 0x01
-#define CH341_NBREAK_BITS_REG2 0x40
-
+#define CH341_REQ_SERIAL_INIT 0xA1
+#define CH341_REQ_MODEM_CTRL 0xA4
+
+#define CH341_REG_BREAK 0x05
+#define CH341_REG_LCR 0x18
+#define CH341_NBREAK_BITS 0x01
+
+#define CH341_LCR_ENABLE_RX 0x80
+#define CH341_LCR_ENABLE_TX 0x40
+#define CH341_LCR_MARK_SPACE 0x20
+#define CH341_LCR_PAR_EVEN 0x10
+#define CH341_LCR_ENABLE_PAR 0x08
+#define CH341_LCR_STOP_BITS_2 0x04
+#define CH341_LCR_CS8 0x03
+#define CH341_LCR_CS7 0x02
+#define CH341_LCR_CS6 0x01
+#define CH341_LCR_CS5 0x00
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
@@ -119,10 +132,10 @@ static int ch341_control_in(struct usb_device *dev,
return r;
}
-static int ch341_set_baudrate(struct usb_device *dev,
- struct ch341_private *priv)
+static int ch341_init_set_baudrate(struct usb_device *dev,
+ struct ch341_private *priv, unsigned ctrl)
{
- short a, b;
+ short a;
int r;
unsigned long factor;
short divisor;
@@ -142,18 +155,17 @@ static int ch341_set_baudrate(struct usb_device *dev,
factor = 0x10000 - factor;
a = (factor & 0xff00) | divisor;
- b = factor & 0xff;
- r = ch341_control_out(dev, 0x9a, 0x1312, a);
- if (!r)
- r = ch341_control_out(dev, 0x9a, 0x0f2c, b);
+ /* 0x9c is "enable SFR_UART Control register and timer" */
+ r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT,
+ 0x9c | (ctrl << 8), a | 0x80);
return r;
}
static int ch341_set_handshake(struct usb_device *dev, u8 control)
{
- return ch341_control_out(dev, 0xa4, ~control, 0);
+ return ch341_control_out(dev, CH341_REQ_MODEM_CTRL, ~control, 0);
}
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
@@ -167,7 +179,7 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
if (!buffer)
return -ENOMEM;
- r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size);
if (r < 0)
goto out;
@@ -197,24 +209,21 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
return -ENOMEM;
/* expect two bytes 0x27 0x00 */
- r = ch341_control_in(dev, 0x5f, 0, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r < 0)
goto out;
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
- r = ch341_control_out(dev, 0xa1, 0, 0);
- if (r < 0)
- goto out;
-
- r = ch341_set_baudrate(dev, priv);
+ r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
goto out;
/* expect two bytes 0x56 0x00 */
- r = ch341_control_in(dev, 0x95, 0x2518, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x2518, 0, buffer, size);
if (r < 0)
goto out;
- r = ch341_control_out(dev, 0x9a, 0x2518, 0x0050);
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, 0x0050);
if (r < 0)
goto out;
@@ -223,11 +232,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- r = ch341_control_out(dev, 0xa1, 0x501f, 0xd90a);
- if (r < 0)
- goto out;
-
- r = ch341_set_baudrate(dev, priv);
+ r = ch341_init_set_baudrate(dev, priv, 0);
if (r < 0)
goto out;
@@ -342,16 +347,53 @@ static void ch341_set_termios(struct tty_struct *tty,
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned baud_rate;
unsigned long flags;
+ unsigned char ctrl;
+ int r;
+
+ /* redundant changes may cause the chip to lose bytes */
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
+ return;
baud_rate = tty_get_baud_rate(tty);
priv->baud_rate = baud_rate;
+ ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
+
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ ctrl |= CH341_LCR_CS5;
+ break;
+ case CS6:
+ ctrl |= CH341_LCR_CS6;
+ break;
+ case CS7:
+ ctrl |= CH341_LCR_CS7;
+ break;
+ case CS8:
+ ctrl |= CH341_LCR_CS8;
+ break;
+ }
+
+ if (C_PARENB(tty)) {
+ ctrl |= CH341_LCR_ENABLE_PAR;
+ if (C_PARODD(tty) == 0)
+ ctrl |= CH341_LCR_PAR_EVEN;
+ if (C_CMSPAR(tty))
+ ctrl |= CH341_LCR_MARK_SPACE;
+ }
+
+ if (C_CSTOPB(tty))
+ ctrl |= CH341_LCR_STOP_BITS_2;
if (baud_rate) {
spin_lock_irqsave(&priv->lock, flags);
priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
spin_unlock_irqrestore(&priv->lock, flags);
- ch341_set_baudrate(port->serial->dev, priv);
+ r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
+ if (r < 0 && old_termios) {
+ priv->baud_rate = tty_termios_baud_rate(old_termios);
+ tty_termios_copy_hw(&tty->termios, old_termios);
+ }
} else {
spin_lock_irqsave(&priv->lock, flags);
priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
@@ -360,17 +402,12 @@ static void ch341_set_termios(struct tty_struct *tty,
ch341_set_handshake(port->serial->dev, priv->line_control);
- /* Unimplemented:
- * (cflag & CSIZE) : data bits [5, 8]
- * (cflag & PARENB) : parity {NONE, EVEN, ODD}
- * (cflag & CSTOPB) : stop bits [1, 2]
- */
}
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
{
const uint16_t ch341_break_reg =
- ((uint16_t) CH341_REG_BREAK2 << 8) | CH341_REG_BREAK1;
+ ((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
struct usb_serial_port *port = tty->driver_data;
int r;
uint16_t reg_contents;
@@ -391,12 +428,12 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
__func__, break_reg[0], break_reg[1]);
if (break_state != 0) {
dev_dbg(&port->dev, "%s - Enter break state requested\n", __func__);
- break_reg[0] &= ~CH341_NBREAK_BITS_REG1;
- break_reg[1] &= ~CH341_NBREAK_BITS_REG2;
+ break_reg[0] &= ~CH341_NBREAK_BITS;
+ break_reg[1] &= ~CH341_LCR_ENABLE_TX;
} else {
dev_dbg(&port->dev, "%s - Leave break state requested\n", __func__);
- break_reg[0] |= CH341_NBREAK_BITS_REG1;
- break_reg[1] |= CH341_NBREAK_BITS_REG2;
+ break_reg[0] |= CH341_NBREAK_BITS;
+ break_reg[1] |= CH341_LCR_ENABLE_TX;
}
dev_dbg(&port->dev, "%s - New ch341 break register contents - reg1: %x, reg2: %x\n",
__func__, break_reg[0], break_reg[1]);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 243ac5ebe46a..fff718352e0c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -23,6 +23,9 @@
#include <linux/usb.h>
#include <linux/uaccess.h>
#include <linux/usb/serial.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
#define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver"
@@ -33,7 +36,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *);
static void cp210x_get_termios_port(struct usb_serial_port *port,
- unsigned int *cflagp, unsigned int *baudp);
+ tcflag_t *cflagp, unsigned int *baudp);
static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
struct ktermios *);
static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
@@ -44,6 +47,9 @@ static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int, unsigned int);
static void cp210x_break_ctl(struct tty_struct *, int);
+static int cp210x_attach(struct usb_serial *);
+static void cp210x_disconnect(struct usb_serial *);
+static void cp210x_release(struct usb_serial *);
static int cp210x_port_probe(struct usb_serial_port *);
static int cp210x_port_remove(struct usb_serial_port *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
@@ -209,6 +215,16 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
+struct cp210x_serial_private {
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gc;
+ u8 config;
+ u8 gpio_mode;
+ bool gpio_registered;
+#endif
+ u8 partnum;
+};
+
struct cp210x_port_private {
__u8 bInterfaceNumber;
bool has_swapped_line_ctl;
@@ -230,6 +246,9 @@ static struct usb_serial_driver cp210x_device = {
.tx_empty = cp210x_tx_empty,
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
+ .attach = cp210x_attach,
+ .disconnect = cp210x_disconnect,
+ .release = cp210x_release,
.port_probe = cp210x_port_probe,
.port_remove = cp210x_port_remove,
.dtr_rts = cp210x_dtr_rts
@@ -272,6 +291,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CP210X_SET_CHARS 0x19
#define CP210X_GET_BAUDRATE 0x1D
#define CP210X_SET_BAUDRATE 0x1E
+#define CP210X_VENDOR_SPECIFIC 0xFF
/* CP210X_IFC_ENABLE */
#define UART_ENABLE 0x0001
@@ -314,6 +334,21 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CONTROL_WRITE_DTR 0x0100
#define CONTROL_WRITE_RTS 0x0200
+/* CP210X_VENDOR_SPECIFIC values */
+#define CP210X_READ_LATCH 0x00C2
+#define CP210X_GET_PARTNUM 0x370B
+#define CP210X_GET_PORTCONFIG 0x370C
+#define CP210X_GET_DEVICEMODE 0x3711
+#define CP210X_WRITE_LATCH 0x37E1
+
+/* Part number definitions */
+#define CP210X_PARTNUM_CP2101 0x01
+#define CP210X_PARTNUM_CP2102 0x02
+#define CP210X_PARTNUM_CP2103 0x03
+#define CP210X_PARTNUM_CP2104 0x04
+#define CP210X_PARTNUM_CP2105 0x05
+#define CP210X_PARTNUM_CP2108 0x08
+
/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
struct cp210x_comm_status {
__le32 ulErrors;
@@ -369,6 +404,60 @@ struct cp210x_flow_ctl {
#define CP210X_SERIAL_RTS_ACTIVE 1
#define CP210X_SERIAL_RTS_FLOW_CTL 2
+/* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */
+struct cp210x_pin_mode {
+ u8 eci;
+ u8 sci;
+} __packed;
+
+#define CP210X_PIN_MODE_MODEM 0
+#define CP210X_PIN_MODE_GPIO BIT(0)
+
+/*
+ * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes.
+ * Structure needs padding due to unused/unspecified bytes.
+ */
+struct cp210x_config {
+ __le16 gpio_mode;
+ u8 __pad0[2];
+ __le16 reset_state;
+ u8 __pad1[4];
+ __le16 suspend_state;
+ u8 sci_cfg;
+ u8 eci_cfg;
+ u8 device_cfg;
+} __packed;
+
+/* GPIO modes */
+#define CP210X_SCI_GPIO_MODE_OFFSET 9
+#define CP210X_SCI_GPIO_MODE_MASK GENMASK(11, 9)
+
+#define CP210X_ECI_GPIO_MODE_OFFSET 2
+#define CP210X_ECI_GPIO_MODE_MASK GENMASK(3, 2)
+
+/* CP2105 port configuration values */
+#define CP2105_GPIO0_TXLED_MODE BIT(0)
+#define CP2105_GPIO1_RXLED_MODE BIT(1)
+#define CP2105_GPIO1_RS485_MODE BIT(2)
+
+/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
+struct cp210x_gpio_write {
+ u8 mask;
+ u8 state;
+} __packed;
+
+/*
+ * Helper to get interface number when we only have struct usb_serial.
+ */
+static u8 cp210x_interface_num(struct usb_serial *serial)
+{
+ struct usb_host_interface *cur_altsetting;
+
+ cur_altsetting = serial->interface->cur_altsetting;
+
+ return cur_altsetting->desc.bInterfaceNumber;
+}
+
/*
* Reads a variable-sized block of CP210X_ registers, identified by req.
* Returns data into buf in native USB byte order.
@@ -402,7 +491,7 @@ static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n",
req, bufsize, result);
if (result >= 0)
- result = -EPROTO;
+ result = -EIO;
/*
* FIXME Some callers don't bother to check for error,
@@ -465,6 +554,40 @@ static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val)
}
/*
+ * Reads a variable-sized vendor block of CP210X_ registers, identified by val.
+ * Returns data into buf in native USB byte order.
+ */
+static int cp210x_read_vendor_block(struct usb_serial *serial, u8 type, u16 val,
+ void *buf, int bufsize)
+{
+ void *dmabuf;
+ int result;
+
+ dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ CP210X_VENDOR_SPECIFIC, type, val,
+ cp210x_interface_num(serial), dmabuf, bufsize,
+ USB_CTRL_GET_TIMEOUT);
+ if (result == bufsize) {
+ memcpy(buf, dmabuf, bufsize);
+ result = 0;
+ } else {
+ dev_err(&serial->interface->dev,
+ "failed to get vendor val 0x%04x size %d: %d\n", val,
+ bufsize, result);
+ if (result >= 0)
+ result = -EIO;
+ }
+
+ kfree(dmabuf);
+
+ return result;
+}
+
+/*
* Writes any 16-bit CP210X_ register (req) whose value is passed
* entirely in the wValue field of the USB request.
*/
@@ -515,7 +638,7 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n",
req, bufsize, result);
if (result >= 0)
- result = -EPROTO;
+ result = -EIO;
}
return result;
@@ -533,6 +656,42 @@ static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val)
return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val));
}
+#ifdef CONFIG_GPIOLIB
+/*
+ * Writes a variable-sized vendor block of CP210X_ registers, identified by val.
+ * Data in buf must be in native USB byte order.
+ */
+static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type,
+ u16 val, void *buf, int bufsize)
+{
+ void *dmabuf;
+ int result;
+
+ dmabuf = kmemdup(buf, bufsize, GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ CP210X_VENDOR_SPECIFIC, type, val,
+ cp210x_interface_num(serial), dmabuf, bufsize,
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(dmabuf);
+
+ if (result == bufsize) {
+ result = 0;
+ } else {
+ dev_err(&serial->interface->dev,
+ "failed to set vendor val 0x%04x size %d: %d\n", val,
+ bufsize, result);
+ if (result >= 0)
+ result = -EIO;
+ }
+
+ return result;
+}
+#endif
+
/*
* Detect CP2108 GET_LINE_CTL bug and activate workaround.
* Write a known good value 0x800, read it back.
@@ -683,7 +842,7 @@ static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port,
} else {
dev_err(&port->dev, "failed to get comm status: %d\n", result);
if (result >= 0)
- result = -EPROTO;
+ result = -EIO;
}
kfree(sts);
@@ -719,7 +878,7 @@ static void cp210x_get_termios(struct tty_struct *tty,
&tty->termios.c_cflag, &baud);
tty_encode_baud_rate(tty, baud, baud);
} else {
- unsigned int cflag;
+ tcflag_t cflag;
cflag = 0;
cp210x_get_termios_port(port, &cflag, &baud);
}
@@ -730,10 +889,10 @@ static void cp210x_get_termios(struct tty_struct *tty,
* This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
*/
static void cp210x_get_termios_port(struct usb_serial_port *port,
- unsigned int *cflagp, unsigned int *baudp)
+ tcflag_t *cflagp, unsigned int *baudp)
{
struct device *dev = &port->dev;
- unsigned int cflag;
+ tcflag_t cflag;
struct cp210x_flow_ctl flow_ctl;
u32 baud;
u16 bits;
@@ -930,16 +1089,9 @@ static void cp210x_set_termios(struct tty_struct *tty,
dev_dbg(dev, "%s - data bits = 7\n", __func__);
break;
case CS8:
- bits |= BITS_DATA_8;
- dev_dbg(dev, "%s - data bits = 8\n", __func__);
- break;
- /*case CS9:
- bits |= BITS_DATA_9;
- dev_dbg(dev, "%s - data bits = 9\n", __func__);
- break;*/
default:
- dev_dbg(dev, "cp210x driver does not support the number of bits requested, using 8 bit mode\n");
bits |= BITS_DATA_8;
+ dev_dbg(dev, "%s - data bits = 8\n", __func__);
break;
}
if (cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits))
@@ -1108,10 +1260,188 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
cp210x_write_u16_reg(port, CP210X_SET_BREAK, state);
}
+#ifdef CONFIG_GPIOLIB
+static int cp210x_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ switch (offset) {
+ case 0:
+ if (priv->config & CP2105_GPIO0_TXLED_MODE)
+ return -ENODEV;
+ break;
+ case 1:
+ if (priv->config & (CP2105_GPIO1_RXLED_MODE |
+ CP2105_GPIO1_RS485_MODE))
+ return -ENODEV;
+ break;
+ }
+
+ return 0;
+}
+
+static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ int result;
+ u8 buf;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_INTERFACE_TO_HOST,
+ CP210X_READ_LATCH, &buf, sizeof(buf));
+ if (result < 0)
+ return result;
+
+ return !!(buf & BIT(gpio));
+}
+
+static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_gpio_write buf;
+
+ if (value == 1)
+ buf.state = BIT(gpio);
+ else
+ buf.state = 0;
+
+ buf.mask = BIT(gpio);
+
+ cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE,
+ CP210X_WRITE_LATCH, &buf, sizeof(buf));
+}
+
+static int cp210x_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ /* Hardware does not support an input mode */
+ return 0;
+}
+
+static int cp210x_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ /* Hardware does not support an input mode */
+ return -ENOTSUPP;
+}
+
+static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ return 0;
+}
+
+static int cp210x_gpio_set_single_ended(struct gpio_chip *gc, unsigned int gpio,
+ enum single_ended_mode mode)
+{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ /* Succeed only if in correct mode (this can't be set at runtime) */
+ if ((mode == LINE_MODE_PUSH_PULL) && (priv->gpio_mode & BIT(gpio)))
+ return 0;
+
+ if ((mode == LINE_MODE_OPEN_DRAIN) && !(priv->gpio_mode & BIT(gpio)))
+ return 0;
+
+ return -ENOTSUPP;
+}
+
+/*
+ * This function is for configuring GPIO using shared pins, where other signals
+ * are made unavailable by configuring the use of GPIO. This is believed to be
+ * only applicable to the cp2105 at this point, the other devices supported by
+ * this driver that provide GPIO do so in a way that does not impact other
+ * signals and are thus expected to have very different initialisation.
+ */
+static int cp2105_shared_gpio_init(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ struct cp210x_pin_mode mode;
+ struct cp210x_config config;
+ u8 intf_num = cp210x_interface_num(serial);
+ int result;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_DEVICEMODE, &mode,
+ sizeof(mode));
+ if (result < 0)
+ return result;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PORTCONFIG, &config,
+ sizeof(config));
+ if (result < 0)
+ return result;
+
+ /* 2 banks of GPIO - One for the pins taken from each serial port */
+ if (intf_num == 0) {
+ if (mode.eci == CP210X_PIN_MODE_MODEM)
+ return 0;
+
+ priv->config = config.eci_cfg;
+ priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+ CP210X_ECI_GPIO_MODE_MASK) >>
+ CP210X_ECI_GPIO_MODE_OFFSET);
+ priv->gc.ngpio = 2;
+ } else if (intf_num == 1) {
+ if (mode.sci == CP210X_PIN_MODE_MODEM)
+ return 0;
+
+ priv->config = config.sci_cfg;
+ priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+ CP210X_SCI_GPIO_MODE_MASK) >>
+ CP210X_SCI_GPIO_MODE_OFFSET);
+ priv->gc.ngpio = 3;
+ } else {
+ return -ENODEV;
+ }
+
+ priv->gc.label = "cp210x";
+ priv->gc.request = cp210x_gpio_request;
+ priv->gc.get_direction = cp210x_gpio_direction_get;
+ priv->gc.direction_input = cp210x_gpio_direction_input;
+ priv->gc.direction_output = cp210x_gpio_direction_output;
+ priv->gc.get = cp210x_gpio_get;
+ priv->gc.set = cp210x_gpio_set;
+ priv->gc.set_single_ended = cp210x_gpio_set_single_ended;
+ priv->gc.owner = THIS_MODULE;
+ priv->gc.parent = &serial->interface->dev;
+ priv->gc.base = -1;
+ priv->gc.can_sleep = true;
+
+ result = gpiochip_add_data(&priv->gc, serial);
+ if (!result)
+ priv->gpio_registered = true;
+
+ return result;
+}
+
+static void cp210x_gpio_remove(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ if (priv->gpio_registered) {
+ gpiochip_remove(&priv->gc);
+ priv->gpio_registered = false;
+ }
+}
+
+#else
+
+static int cp2105_shared_gpio_init(struct usb_serial *serial)
+{
+ return 0;
+}
+
+static void cp210x_gpio_remove(struct usb_serial *serial)
+{
+ /* Nothing to do */
+}
+
+#endif
+
static int cp210x_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct usb_host_interface *cur_altsetting;
struct cp210x_port_private *port_priv;
int ret;
@@ -1119,8 +1449,7 @@ static int cp210x_port_probe(struct usb_serial_port *port)
if (!port_priv)
return -ENOMEM;
- cur_altsetting = serial->interface->cur_altsetting;
- port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
+ port_priv->bInterfaceNumber = cp210x_interface_num(serial);
usb_set_serial_port_data(port, port_priv);
@@ -1143,6 +1472,52 @@ static int cp210x_port_remove(struct usb_serial_port *port)
return 0;
}
+static int cp210x_attach(struct usb_serial *serial)
+{
+ int result;
+ struct cp210x_serial_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PARTNUM, &priv->partnum,
+ sizeof(priv->partnum));
+ if (result < 0)
+ goto err_free_priv;
+
+ usb_set_serial_data(serial, priv);
+
+ if (priv->partnum == CP210X_PARTNUM_CP2105) {
+ result = cp2105_shared_gpio_init(serial);
+ if (result < 0) {
+ dev_err(&serial->interface->dev,
+ "GPIO initialisation failed, continuing without GPIO support\n");
+ }
+ }
+
+ return 0;
+err_free_priv:
+ kfree(priv);
+
+ return result;
+}
+
+static void cp210x_disconnect(struct usb_serial *serial)
+{
+ cp210x_gpio_remove(serial);
+}
+
+static void cp210x_release(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ cp210x_gpio_remove(serial);
+
+ kfree(priv);
+}
+
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
new file mode 100644
index 000000000000..8282a6a18fee
--- /dev/null
+++ b/drivers/usb/serial/f81534.c
@@ -0,0 +1,1409 @@
+/*
+ * F81532/F81534 USB to Serial Ports Bridge
+ *
+ * F81532 => 2 Serial Ports
+ * F81534 => 4 Serial Ports
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2016 Feature Integration Technology Inc., (Fintek)
+ * Copyright (C) 2016 Tom Tsai (Tom_Tsai@fintek.com.tw)
+ * Copyright (C) 2016 Peter Hong (Peter_Hong@fintek.com.tw)
+ *
+ * The F81532/F81534 had 1 control endpoint for setting, 1 endpoint bulk-out
+ * for all serial port TX and 1 endpoint bulk-in for all serial port read in
+ * (Read Data/MSR/LSR).
+ *
+ * Write URB is fixed with 512bytes, per serial port used 128Bytes.
+ * It can be described by f81534_prepare_write_buffer()
+ *
+ * Read URB is 512Bytes max, per serial port used 128Bytes.
+ * It can be described by f81534_process_read_urb() and maybe received with
+ * 128x1,2,3,4 bytes.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+/* Serial Port register Address */
+#define F81534_UART_BASE_ADDRESS 0x1200
+#define F81534_UART_OFFSET 0x10
+#define F81534_DIVISOR_LSB_REG (0x00 + F81534_UART_BASE_ADDRESS)
+#define F81534_DIVISOR_MSB_REG (0x01 + F81534_UART_BASE_ADDRESS)
+#define F81534_FIFO_CONTROL_REG (0x02 + F81534_UART_BASE_ADDRESS)
+#define F81534_LINE_CONTROL_REG (0x03 + F81534_UART_BASE_ADDRESS)
+#define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS)
+#define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS)
+#define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS)
+
+#define F81534_DEF_CONF_ADDRESS_START 0x3000
+#define F81534_DEF_CONF_SIZE 8
+
+#define F81534_CUSTOM_ADDRESS_START 0x2f00
+#define F81534_CUSTOM_DATA_SIZE 0x10
+#define F81534_CUSTOM_NO_CUSTOM_DATA 0xff
+#define F81534_CUSTOM_VALID_TOKEN 0xf0
+#define F81534_CONF_OFFSET 1
+
+#define F81534_MAX_DATA_BLOCK 64
+#define F81534_MAX_BUS_RETRY 20
+
+/* Default URB timeout for USB operations */
+#define F81534_USB_MAX_RETRY 10
+#define F81534_USB_TIMEOUT 1000
+#define F81534_SET_GET_REGISTER 0xA0
+
+#define F81534_NUM_PORT 4
+#define F81534_UNUSED_PORT 0xff
+#define F81534_WRITE_BUFFER_SIZE 512
+
+#define DRIVER_DESC "Fintek F81532/F81534"
+#define FINTEK_VENDOR_ID_1 0x1934
+#define FINTEK_VENDOR_ID_2 0x2C42
+#define FINTEK_DEVICE_ID 0x1202
+#define F81534_MAX_TX_SIZE 124
+#define F81534_MAX_RX_SIZE 124
+#define F81534_RECEIVE_BLOCK_SIZE 128
+#define F81534_MAX_RECEIVE_BLOCK_SIZE 512
+
+#define F81534_TOKEN_RECEIVE 0x01
+#define F81534_TOKEN_WRITE 0x02
+#define F81534_TOKEN_TX_EMPTY 0x03
+#define F81534_TOKEN_MSR_CHANGE 0x04
+
+/*
+ * We used interal SPI bus to access FLASH section. We must wait the SPI bus to
+ * idle if we performed any command.
+ *
+ * SPI Bus status register: F81534_BUS_REG_STATUS
+ * Bit 0/1 : BUSY
+ * Bit 2 : IDLE
+ */
+#define F81534_BUS_BUSY (BIT(0) | BIT(1))
+#define F81534_BUS_IDLE BIT(2)
+#define F81534_BUS_READ_DATA 0x1004
+#define F81534_BUS_REG_STATUS 0x1003
+#define F81534_BUS_REG_START 0x1002
+#define F81534_BUS_REG_END 0x1001
+
+#define F81534_CMD_READ 0x03
+
+#define F81534_DEFAULT_BAUD_RATE 9600
+#define F81534_MAX_BAUDRATE 115200
+
+#define F81534_PORT_CONF_DISABLE_PORT BIT(3)
+#define F81534_PORT_CONF_NOT_EXIST_PORT BIT(7)
+#define F81534_PORT_UNAVAILABLE \
+ (F81534_PORT_CONF_DISABLE_PORT | F81534_PORT_CONF_NOT_EXIST_PORT)
+
+#define F81534_1X_RXTRIGGER 0xc3
+#define F81534_8X_RXTRIGGER 0xcf
+
+static const struct usb_device_id f81534_id_table[] = {
+ { USB_DEVICE(FINTEK_VENDOR_ID_1, FINTEK_DEVICE_ID) },
+ { USB_DEVICE(FINTEK_VENDOR_ID_2, FINTEK_DEVICE_ID) },
+ {} /* Terminating entry */
+};
+
+#define F81534_TX_EMPTY_BIT 0
+
+struct f81534_serial_private {
+ u8 conf_data[F81534_DEF_CONF_SIZE];
+ int tty_idx[F81534_NUM_PORT];
+ u8 setting_idx;
+ int opened_port;
+ struct mutex urb_mutex;
+};
+
+struct f81534_port_private {
+ struct mutex mcr_mutex;
+ unsigned long tx_empty;
+ spinlock_t msr_lock;
+ u8 shadow_mcr;
+ u8 shadow_msr;
+ u8 phy_num;
+};
+
+static int f81534_logic_to_phy_port(struct usb_serial *serial,
+ struct usb_serial_port *port)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(port->serial);
+ int count = 0;
+ int i;
+
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
+ continue;
+
+ if (port->port_number == count)
+ return i;
+
+ ++count;
+ }
+
+ return -ENODEV;
+}
+
+static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data)
+{
+ struct usb_interface *interface = serial->interface;
+ struct usb_device *dev = serial->dev;
+ size_t count = F81534_USB_MAX_RETRY;
+ int status;
+ u8 *tmp;
+
+ tmp = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ *tmp = data;
+
+ /*
+ * Our device maybe not reply when heavily loading, We'll retry for
+ * F81534_USB_MAX_RETRY times.
+ */
+ while (count--) {
+ status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ F81534_SET_GET_REGISTER,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ reg, 0, tmp, sizeof(u8),
+ F81534_USB_TIMEOUT);
+ if (status > 0) {
+ status = 0;
+ break;
+ } else if (status == 0) {
+ status = -EIO;
+ }
+ }
+
+ if (status < 0) {
+ dev_err(&interface->dev, "%s: reg: %x data: %x failed: %d\n",
+ __func__, reg, data, status);
+ }
+
+ kfree(tmp);
+ return status;
+}
+
+static int f81534_get_register(struct usb_serial *serial, u16 reg, u8 *data)
+{
+ struct usb_interface *interface = serial->interface;
+ struct usb_device *dev = serial->dev;
+ size_t count = F81534_USB_MAX_RETRY;
+ int status;
+ u8 *tmp;
+
+ tmp = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ /*
+ * Our device maybe not reply when heavily loading, We'll retry for
+ * F81534_USB_MAX_RETRY times.
+ */
+ while (count--) {
+ status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ F81534_SET_GET_REGISTER,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ reg, 0, tmp, sizeof(u8),
+ F81534_USB_TIMEOUT);
+ if (status > 0) {
+ status = 0;
+ break;
+ } else if (status == 0) {
+ status = -EIO;
+ }
+ }
+
+ if (status < 0) {
+ dev_err(&interface->dev, "%s: reg: %x failed: %d\n", __func__,
+ reg, status);
+ goto end;
+ }
+
+ *data = *tmp;
+
+end:
+ kfree(tmp);
+ return status;
+}
+
+static int f81534_set_port_register(struct usb_serial_port *port, u16 reg,
+ u8 data)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ return f81534_set_register(port->serial,
+ reg + port_priv->phy_num * F81534_UART_OFFSET, data);
+}
+
+static int f81534_get_port_register(struct usb_serial_port *port, u16 reg,
+ u8 *data)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ return f81534_get_register(port->serial,
+ reg + port_priv->phy_num * F81534_UART_OFFSET, data);
+}
+
+/*
+ * If we try to access the internal flash via SPI bus, we should check the bus
+ * status for every command. e.g., F81534_BUS_REG_START/F81534_BUS_REG_END
+ */
+static int f81534_wait_for_spi_idle(struct usb_serial *serial)
+{
+ size_t count = F81534_MAX_BUS_RETRY;
+ u8 tmp;
+ int status;
+
+ do {
+ status = f81534_get_register(serial, F81534_BUS_REG_STATUS,
+ &tmp);
+ if (status)
+ return status;
+
+ if (tmp & F81534_BUS_BUSY)
+ continue;
+
+ if (tmp & F81534_BUS_IDLE)
+ break;
+
+ } while (--count);
+
+ if (!count) {
+ dev_err(&serial->interface->dev,
+ "%s: timed out waiting for idle SPI bus\n",
+ __func__);
+ return -EIO;
+ }
+
+ return f81534_set_register(serial, F81534_BUS_REG_STATUS,
+ tmp & ~F81534_BUS_IDLE);
+}
+
+static int f81534_get_spi_register(struct usb_serial *serial, u16 reg,
+ u8 *data)
+{
+ int status;
+
+ status = f81534_get_register(serial, reg, data);
+ if (status)
+ return status;
+
+ return f81534_wait_for_spi_idle(serial);
+}
+
+static int f81534_set_spi_register(struct usb_serial *serial, u16 reg, u8 data)
+{
+ int status;
+
+ status = f81534_set_register(serial, reg, data);
+ if (status)
+ return status;
+
+ return f81534_wait_for_spi_idle(serial);
+}
+
+static int f81534_read_flash(struct usb_serial *serial, u32 address,
+ size_t size, u8 *buf)
+{
+ u8 tmp_buf[F81534_MAX_DATA_BLOCK];
+ size_t block = 0;
+ size_t read_size;
+ size_t count;
+ int status;
+ int offset;
+ u16 reg_tmp;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ F81534_CMD_READ);
+ if (status)
+ return status;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ (address >> 16) & 0xff);
+ if (status)
+ return status;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ (address >> 8) & 0xff);
+ if (status)
+ return status;
+
+ status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+ (address >> 0) & 0xff);
+ if (status)
+ return status;
+
+ /* Continuous read mode */
+ do {
+ read_size = min_t(size_t, F81534_MAX_DATA_BLOCK, size);
+
+ for (count = 0; count < read_size; ++count) {
+ /* To write F81534_BUS_REG_END when final byte */
+ if (size <= F81534_MAX_DATA_BLOCK &&
+ read_size == count + 1)
+ reg_tmp = F81534_BUS_REG_END;
+ else
+ reg_tmp = F81534_BUS_REG_START;
+
+ /*
+ * Dummy code, force IC to generate a read pulse, the
+ * set of value 0xf1 is dont care (any value is ok)
+ */
+ status = f81534_set_spi_register(serial, reg_tmp,
+ 0xf1);
+ if (status)
+ return status;
+
+ status = f81534_get_spi_register(serial,
+ F81534_BUS_READ_DATA,
+ &tmp_buf[count]);
+ if (status)
+ return status;
+
+ offset = count + block * F81534_MAX_DATA_BLOCK;
+ buf[offset] = tmp_buf[count];
+ }
+
+ size -= read_size;
+ ++block;
+ } while (size);
+
+ return 0;
+}
+
+static void f81534_prepare_write_buffer(struct usb_serial_port *port, u8 *buf)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int phy_num = port_priv->phy_num;
+ u8 tx_len;
+ int i;
+
+ /*
+ * The block layout is fixed with 4x128 Bytes, per 128 Bytes a port.
+ * index 0: port phy idx (e.g., 0,1,2,3)
+ * index 1: only F81534_TOKEN_WRITE
+ * index 2: serial TX out length
+ * index 3: fix to 0
+ * index 4~127: serial out data block
+ */
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ buf[i * F81534_RECEIVE_BLOCK_SIZE] = i;
+ buf[i * F81534_RECEIVE_BLOCK_SIZE + 1] = F81534_TOKEN_WRITE;
+ buf[i * F81534_RECEIVE_BLOCK_SIZE + 2] = 0;
+ buf[i * F81534_RECEIVE_BLOCK_SIZE + 3] = 0;
+ }
+
+ tx_len = kfifo_out_locked(&port->write_fifo,
+ &buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 4],
+ F81534_MAX_TX_SIZE, &port->lock);
+
+ buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 2] = tx_len;
+}
+
+static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ struct urb *urb;
+ unsigned long flags;
+ int result;
+
+ /* Check is any data in write_fifo */
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (kfifo_is_empty(&port->write_fifo)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Check H/W is TXEMPTY */
+ if (!test_and_clear_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty))
+ return 0;
+
+ urb = port->write_urbs[0];
+ f81534_prepare_write_buffer(port, port->bulk_out_buffers[0]);
+ urb->transfer_buffer_length = F81534_WRITE_BUFFER_SIZE;
+
+ result = usb_submit_urb(urb, mem_flags);
+ if (result) {
+ set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+ dev_err(&port->dev, "%s: submit failed: %d\n", __func__,
+ result);
+ return result;
+ }
+
+ usb_serial_port_softint(port);
+ return 0;
+}
+
+static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
+{
+ if (!baudrate)
+ return 0;
+
+ /* Round to nearest divisor */
+ return DIV_ROUND_CLOSEST(clockrate, baudrate);
+}
+
+static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
+ u8 lcr)
+{
+ u32 divisor;
+ int status;
+ u8 value;
+
+ if (baudrate <= 1200)
+ value = F81534_1X_RXTRIGGER; /* 128 FIFO & TL: 1x */
+ else
+ value = F81534_8X_RXTRIGGER; /* 128 FIFO & TL: 8x */
+
+ status = f81534_set_port_register(port, F81534_CONFIG1_REG, value);
+ if (status) {
+ dev_err(&port->dev, "%s: CONFIG1 setting failed\n", __func__);
+ return status;
+ }
+
+ if (baudrate <= 1200)
+ value = UART_FCR_TRIGGER_1 | UART_FCR_ENABLE_FIFO; /* TL: 1 */
+ else
+ value = UART_FCR_R_TRIG_11 | UART_FCR_ENABLE_FIFO; /* TL: 14 */
+
+ status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG,
+ value);
+ if (status) {
+ dev_err(&port->dev, "%s: FCR setting failed\n", __func__);
+ return status;
+ }
+
+ divisor = f81534_calc_baud_divisor(baudrate, F81534_MAX_BAUDRATE);
+ value = UART_LCR_DLAB;
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
+ value);
+ if (status) {
+ dev_err(&port->dev, "%s: set LCR failed\n", __func__);
+ return status;
+ }
+
+ value = divisor & 0xff;
+ status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value);
+ if (status) {
+ dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__);
+ return status;
+ }
+
+ value = (divisor >> 8) & 0xff;
+ status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value);
+ if (status) {
+ dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__);
+ return status;
+ }
+
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, lcr);
+ if (status) {
+ dev_err(&port->dev, "%s: set LCR failed\n", __func__);
+ return status;
+ }
+
+ return 0;
+}
+
+static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set,
+ unsigned int clear)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+ u8 tmp;
+
+ if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0)
+ return 0; /* no change */
+
+ mutex_lock(&port_priv->mcr_mutex);
+
+ /* 'Set' takes precedence over 'Clear' */
+ clear &= ~set;
+
+ /* Always enable UART_MCR_OUT2 */
+ tmp = UART_MCR_OUT2 | port_priv->shadow_mcr;
+
+ if (clear & TIOCM_DTR)
+ tmp &= ~UART_MCR_DTR;
+
+ if (clear & TIOCM_RTS)
+ tmp &= ~UART_MCR_RTS;
+
+ if (set & TIOCM_DTR)
+ tmp |= UART_MCR_DTR;
+
+ if (set & TIOCM_RTS)
+ tmp |= UART_MCR_RTS;
+
+ status = f81534_set_port_register(port, F81534_MODEM_CONTROL_REG, tmp);
+ if (status < 0) {
+ dev_err(&port->dev, "%s: MCR write failed\n", __func__);
+ mutex_unlock(&port_priv->mcr_mutex);
+ return status;
+ }
+
+ port_priv->shadow_mcr = tmp;
+ mutex_unlock(&port_priv->mcr_mutex);
+ return 0;
+}
+
+/*
+ * This function will search the data area with token F81534_CUSTOM_VALID_TOKEN
+ * for latest configuration index. If nothing found
+ * (*index = F81534_CUSTOM_NO_CUSTOM_DATA), We'll load default configure in
+ * F81534_DEF_CONF_ADDRESS_START section.
+ *
+ * Due to we only use block0 to save data, so *index should be 0 or
+ * F81534_CUSTOM_NO_CUSTOM_DATA.
+ */
+static int f81534_find_config_idx(struct usb_serial *serial, u8 *index)
+{
+ u8 tmp;
+ int status;
+
+ status = f81534_read_flash(serial, F81534_CUSTOM_ADDRESS_START, 1,
+ &tmp);
+ if (status) {
+ dev_err(&serial->interface->dev, "%s: read failed: %d\n",
+ __func__, status);
+ return status;
+ }
+
+ /* We'll use the custom data when the data is valid. */
+ if (tmp == F81534_CUSTOM_VALID_TOKEN)
+ *index = 0;
+ else
+ *index = F81534_CUSTOM_NO_CUSTOM_DATA;
+
+ return 0;
+}
+
+/*
+ * We had 2 generation of F81532/534 IC. All has an internal storage.
+ *
+ * 1st is pure USB-to-TTL RS232 IC and designed for 4 ports only, no any
+ * internal data will used. All mode and gpio control should manually set
+ * by AP or Driver and all storage space value are 0xff. The
+ * f81534_calc_num_ports() will run to final we marked as "oldest version"
+ * for this IC.
+ *
+ * 2rd is designed to more generic to use any transceiver and this is our
+ * mass production type. We'll save data in F81534_CUSTOM_ADDRESS_START
+ * (0x2f00) with 9bytes. The 1st byte is a indicater. If the token is
+ * F81534_CUSTOM_VALID_TOKEN(0xf0), the IC is 2nd gen type, the following
+ * 4bytes save port mode (0:RS232/1:RS485 Invert/2:RS485), and the last
+ * 4bytes save GPIO state(value from 0~7 to represent 3 GPIO output pin).
+ * The f81534_calc_num_ports() will run to "new style" with checking
+ * F81534_PORT_UNAVAILABLE section.
+ */
+static int f81534_calc_num_ports(struct usb_serial *serial)
+{
+ u8 setting[F81534_CUSTOM_DATA_SIZE];
+ u8 setting_idx;
+ u8 num_port = 0;
+ int status;
+ size_t i;
+
+ /* Check had custom setting */
+ status = f81534_find_config_idx(serial, &setting_idx);
+ if (status) {
+ dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
+ __func__, status);
+ return 0;
+ }
+
+ /*
+ * We'll read custom data only when data available, otherwise we'll
+ * read default value instead.
+ */
+ if (setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) {
+ status = f81534_read_flash(serial,
+ F81534_CUSTOM_ADDRESS_START +
+ F81534_CONF_OFFSET,
+ sizeof(setting), setting);
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: get custom data failed: %d\n",
+ __func__, status);
+ return 0;
+ }
+
+ dev_dbg(&serial->interface->dev,
+ "%s: read config from block: %d\n", __func__,
+ setting_idx);
+ } else {
+ /* Read default board setting */
+ status = f81534_read_flash(serial,
+ F81534_DEF_CONF_ADDRESS_START, F81534_NUM_PORT,
+ setting);
+
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: read failed: %d\n", __func__,
+ status);
+ return 0;
+ }
+
+ dev_dbg(&serial->interface->dev, "%s: read default config\n",
+ __func__);
+ }
+
+ /* New style, find all possible ports */
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ if (setting[i] & F81534_PORT_UNAVAILABLE)
+ continue;
+
+ ++num_port;
+ }
+
+ if (num_port)
+ return num_port;
+
+ dev_warn(&serial->interface->dev, "%s: Read Failed. default 4 ports\n",
+ __func__);
+ return 4; /* Nothing found, oldest version IC */
+}
+
+static void f81534_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ u8 new_lcr = 0;
+ int status;
+ u32 baud;
+
+ if (C_BAUD(tty) == B0)
+ f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
+
+ if (C_PARENB(tty)) {
+ new_lcr |= UART_LCR_PARITY;
+
+ if (!C_PARODD(tty))
+ new_lcr |= UART_LCR_EPAR;
+
+ if (C_CMSPAR(tty))
+ new_lcr |= UART_LCR_SPAR;
+ }
+
+ if (C_CSTOPB(tty))
+ new_lcr |= UART_LCR_STOP;
+
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ new_lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ new_lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ new_lcr |= UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ new_lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ baud = tty_get_baud_rate(tty);
+ if (!baud)
+ return;
+
+ if (baud > F81534_MAX_BAUDRATE) {
+ if (old_termios)
+ baud = tty_termios_baud_rate(old_termios);
+ else
+ baud = F81534_DEFAULT_BAUD_RATE;
+
+ tty_encode_baud_rate(tty, baud, baud);
+ }
+
+ dev_dbg(&port->dev, "%s: baud: %d\n", __func__, baud);
+
+ status = f81534_set_port_config(port, baud, new_lcr);
+ if (status < 0) {
+ dev_err(&port->dev, "%s: set port config failed: %d\n",
+ __func__, status);
+ }
+}
+
+static int f81534_submit_read_urb(struct usb_serial *serial, gfp_t flags)
+{
+ return usb_serial_generic_submit_read_urbs(serial->port[0], flags);
+}
+
+static void f81534_msr_changed(struct usb_serial_port *port, u8 msr)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
+ unsigned long flags;
+ u8 old_msr;
+
+ if (!(msr & UART_MSR_ANY_DELTA))
+ return;
+
+ spin_lock_irqsave(&port_priv->msr_lock, flags);
+ old_msr = port_priv->shadow_msr;
+ port_priv->shadow_msr = msr;
+ spin_unlock_irqrestore(&port_priv->msr_lock, flags);
+
+ dev_dbg(&port->dev, "%s: MSR from %02x to %02x\n", __func__, old_msr,
+ msr);
+
+ /* Update input line counters */
+ if (msr & UART_MSR_DCTS)
+ port->icount.cts++;
+ if (msr & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (msr & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ port->icount.rng++;
+
+ wake_up_interruptible(&port->port.delta_msr_wait);
+
+ if (!(msr & UART_MSR_DDCD))
+ return;
+
+ dev_dbg(&port->dev, "%s: DCD Changed: phy_num: %d from %x to %x\n",
+ __func__, port_priv->phy_num, old_msr, msr);
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ usb_serial_handle_dcd_change(port, tty, msr & UART_MSR_DCD);
+ tty_kref_put(tty);
+}
+
+static int f81534_read_msr(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ int status;
+ u8 msr;
+
+ /* Get MSR initial value */
+ status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
+ if (status)
+ return status;
+
+ /* Force update current state */
+ spin_lock_irqsave(&port_priv->msr_lock, flags);
+ port_priv->shadow_msr = msr;
+ spin_unlock_irqrestore(&port_priv->msr_lock, flags);
+
+ return 0;
+}
+
+static int f81534_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(port->serial);
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+
+ status = f81534_set_port_register(port,
+ F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ if (status) {
+ dev_err(&port->dev, "%s: Clear FIFO failed: %d\n", __func__,
+ status);
+ return status;
+ }
+
+ if (tty)
+ f81534_set_termios(tty, port, NULL);
+
+ status = f81534_read_msr(port);
+ if (status)
+ return status;
+
+ mutex_lock(&serial_priv->urb_mutex);
+
+ /* Submit Read URBs for first port opened */
+ if (!serial_priv->opened_port) {
+ status = f81534_submit_read_urb(port->serial, GFP_KERNEL);
+ if (status)
+ goto exit;
+ }
+
+ serial_priv->opened_port++;
+
+exit:
+ mutex_unlock(&serial_priv->urb_mutex);
+
+ set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+ return status;
+}
+
+static void f81534_close(struct usb_serial_port *port)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(port->serial);
+ struct usb_serial_port *port0 = port->serial->port[0];
+ unsigned long flags;
+ size_t i;
+
+ usb_kill_urb(port->write_urbs[0]);
+
+ spin_lock_irqsave(&port->lock, flags);
+ kfifo_reset_out(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Kill Read URBs when final port closed */
+ mutex_lock(&serial_priv->urb_mutex);
+ serial_priv->opened_port--;
+
+ if (!serial_priv->opened_port) {
+ for (i = 0; i < ARRAY_SIZE(port0->read_urbs); ++i)
+ usb_kill_urb(port0->read_urbs[i]);
+ }
+
+ mutex_unlock(&serial_priv->urb_mutex);
+}
+
+static int f81534_get_serial_info(struct usb_serial_port *port,
+ struct serial_struct __user *retinfo)
+{
+ struct f81534_port_private *port_priv;
+ struct serial_struct tmp;
+
+ port_priv = usb_get_serial_port_data(port);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ tmp.type = PORT_16550A;
+ tmp.port = port->port_number;
+ tmp.line = port->minor;
+ tmp.baud_base = F81534_MAX_BAUDRATE;
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int f81534_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct serial_struct __user *buf = (struct serial_struct __user *)arg;
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return f81534_get_serial_info(port, buf);
+ default:
+ break;
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static void f81534_process_per_serial_block(struct usb_serial_port *port,
+ u8 *data)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int phy_num = data[0];
+ size_t read_size = 0;
+ size_t i;
+ char tty_flag;
+ int status;
+ u8 lsr;
+
+ /*
+ * The block layout is 128 Bytes
+ * index 0: port phy idx (e.g., 0,1,2,3),
+ * index 1: It's could be
+ * F81534_TOKEN_RECEIVE
+ * F81534_TOKEN_TX_EMPTY
+ * F81534_TOKEN_MSR_CHANGE
+ * index 2: serial in size (data+lsr, must be even)
+ * meaningful for F81534_TOKEN_RECEIVE only
+ * index 3: current MSR with this device
+ * index 4~127: serial in data block (data+lsr, must be even)
+ */
+ switch (data[1]) {
+ case F81534_TOKEN_TX_EMPTY:
+ set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+
+ /* Try to submit writer */
+ status = f81534_submit_writer(port, GFP_ATOMIC);
+ if (status)
+ dev_err(&port->dev, "%s: submit failed\n", __func__);
+ return;
+
+ case F81534_TOKEN_MSR_CHANGE:
+ f81534_msr_changed(port, data[3]);
+ return;
+
+ case F81534_TOKEN_RECEIVE:
+ read_size = data[2];
+ if (read_size > F81534_MAX_RX_SIZE) {
+ dev_err(&port->dev,
+ "%s: phy: %d read_size: %zu larger than: %d\n",
+ __func__, phy_num, read_size,
+ F81534_MAX_RX_SIZE);
+ return;
+ }
+
+ break;
+
+ default:
+ dev_warn(&port->dev, "%s: unknown token: %02x\n", __func__,
+ data[1]);
+ return;
+ }
+
+ for (i = 4; i < 4 + read_size; i += 2) {
+ tty_flag = TTY_NORMAL;
+ lsr = data[i + 1];
+
+ if (lsr & UART_LSR_BRK_ERROR_BITS) {
+ if (lsr & UART_LSR_BI) {
+ tty_flag = TTY_BREAK;
+ port->icount.brk++;
+ usb_serial_handle_break(port);
+ } else if (lsr & UART_LSR_PE) {
+ tty_flag = TTY_PARITY;
+ port->icount.parity++;
+ } else if (lsr & UART_LSR_FE) {
+ tty_flag = TTY_FRAME;
+ port->icount.frame++;
+ }
+
+ if (lsr & UART_LSR_OE) {
+ port->icount.overrun++;
+ tty_insert_flip_char(&port->port, 0,
+ TTY_OVERRUN);
+ }
+ }
+
+ if (port->port.console && port->sysrq) {
+ if (usb_serial_handle_sysrq_char(port, data[i]))
+ continue;
+ }
+
+ tty_insert_flip_char(&port->port, data[i], tty_flag);
+ }
+
+ tty_flip_buffer_push(&port->port);
+}
+
+static void f81534_process_read_urb(struct urb *urb)
+{
+ struct f81534_serial_private *serial_priv;
+ struct usb_serial_port *port;
+ struct usb_serial *serial;
+ u8 *buf;
+ int phy_port_num;
+ int tty_port_num;
+ size_t i;
+
+ if (!urb->actual_length ||
+ urb->actual_length % F81534_RECEIVE_BLOCK_SIZE) {
+ return;
+ }
+
+ port = urb->context;
+ serial = port->serial;
+ buf = urb->transfer_buffer;
+ serial_priv = usb_get_serial_data(serial);
+
+ for (i = 0; i < urb->actual_length; i += F81534_RECEIVE_BLOCK_SIZE) {
+ phy_port_num = buf[i];
+ if (phy_port_num >= F81534_NUM_PORT) {
+ dev_err(&port->dev,
+ "%s: phy_port_num: %d larger than: %d\n",
+ __func__, phy_port_num, F81534_NUM_PORT);
+ continue;
+ }
+
+ tty_port_num = serial_priv->tty_idx[phy_port_num];
+ port = serial->port[tty_port_num];
+
+ if (tty_port_initialized(&port->port))
+ f81534_process_per_serial_block(port, &buf[i]);
+ }
+}
+
+static void f81534_write_usb_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ case -EPIPE:
+ dev_err(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ default:
+ dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
+ __func__, urb->status);
+ break;
+ }
+}
+
+static int f81534_setup_ports(struct usb_serial *serial)
+{
+ struct usb_serial_port *port;
+ u8 port0_out_address;
+ int buffer_size;
+ size_t i;
+
+ /*
+ * In our system architecture, we had 2 or 4 serial ports,
+ * but only get 1 set of bulk in/out endpoints.
+ *
+ * The usb-serial subsystem will generate port 0 data,
+ * but port 1/2/3 will not. It's will generate write URB and buffer
+ * by following code and use the port0 read URB for read operation.
+ */
+ for (i = 1; i < serial->num_ports; ++i) {
+ port0_out_address = serial->port[0]->bulk_out_endpointAddress;
+ buffer_size = serial->port[0]->bulk_out_size;
+ port = serial->port[i];
+
+ if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
+ return -ENOMEM;
+
+ port->bulk_out_size = buffer_size;
+ port->bulk_out_endpointAddress = port0_out_address;
+
+ port->write_urbs[0] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->write_urbs[0])
+ return -ENOMEM;
+
+ port->bulk_out_buffers[0] = kzalloc(buffer_size, GFP_KERNEL);
+ if (!port->bulk_out_buffers[0])
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(port->write_urbs[0], serial->dev,
+ usb_sndbulkpipe(serial->dev,
+ port0_out_address),
+ port->bulk_out_buffers[0], buffer_size,
+ serial->type->write_bulk_callback, port);
+
+ port->write_urb = port->write_urbs[0];
+ port->bulk_out_buffer = port->bulk_out_buffers[0];
+ }
+
+ return 0;
+}
+
+static int f81534_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *endpoint;
+ struct usb_host_interface *iface_desc;
+ struct device *dev;
+ int num_bulk_in = 0;
+ int num_bulk_out = 0;
+ int size_bulk_in = 0;
+ int size_bulk_out = 0;
+ int i;
+
+ dev = &serial->interface->dev;
+ iface_desc = serial->interface->cur_altsetting;
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(endpoint)) {
+ ++num_bulk_in;
+ size_bulk_in = usb_endpoint_maxp(endpoint);
+ }
+
+ if (usb_endpoint_is_bulk_out(endpoint)) {
+ ++num_bulk_out;
+ size_bulk_out = usb_endpoint_maxp(endpoint);
+ }
+ }
+
+ if (num_bulk_in != 1 || num_bulk_out != 1) {
+ dev_err(dev, "expected endpoints not found\n");
+ return -ENODEV;
+ }
+
+ if (size_bulk_out != F81534_WRITE_BUFFER_SIZE ||
+ size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) {
+ dev_err(dev, "unsupported endpoint max packet size\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int f81534_attach(struct usb_serial *serial)
+{
+ struct f81534_serial_private *serial_priv;
+ int index = 0;
+ int status;
+ int i;
+
+ serial_priv = devm_kzalloc(&serial->interface->dev,
+ sizeof(*serial_priv), GFP_KERNEL);
+ if (!serial_priv)
+ return -ENOMEM;
+
+ usb_set_serial_data(serial, serial_priv);
+
+ mutex_init(&serial_priv->urb_mutex);
+
+ status = f81534_setup_ports(serial);
+ if (status)
+ return status;
+
+ /* Check had custom setting */
+ status = f81534_find_config_idx(serial, &serial_priv->setting_idx);
+ if (status) {
+ dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
+ __func__, status);
+ return status;
+ }
+
+ /*
+ * We'll read custom data only when data available, otherwise we'll
+ * read default value instead.
+ */
+ if (serial_priv->setting_idx == F81534_CUSTOM_NO_CUSTOM_DATA) {
+ /*
+ * The default configuration layout:
+ * byte 0/1/2/3: uart setting
+ */
+ status = f81534_read_flash(serial,
+ F81534_DEF_CONF_ADDRESS_START,
+ F81534_DEF_CONF_SIZE,
+ serial_priv->conf_data);
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: read reserve data failed: %d\n",
+ __func__, status);
+ return status;
+ }
+ } else {
+ /* Only read 8 bytes for mode & GPIO */
+ status = f81534_read_flash(serial,
+ F81534_CUSTOM_ADDRESS_START +
+ F81534_CONF_OFFSET,
+ sizeof(serial_priv->conf_data),
+ serial_priv->conf_data);
+ if (status) {
+ dev_err(&serial->interface->dev,
+ "%s: idx: %d get data failed: %d\n",
+ __func__, serial_priv->setting_idx,
+ status);
+ return status;
+ }
+ }
+
+ /* Assign phy-to-logic mapping */
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
+ continue;
+
+ serial_priv->tty_idx[i] = index++;
+ dev_dbg(&serial->interface->dev,
+ "%s: phy_num: %d, tty_idx: %d\n", __func__, i,
+ serial_priv->tty_idx[i]);
+ }
+
+ return 0;
+}
+
+static int f81534_port_probe(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv;
+
+ port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
+ if (!port_priv)
+ return -ENOMEM;
+
+ spin_lock_init(&port_priv->msr_lock);
+ mutex_init(&port_priv->mcr_mutex);
+
+ /* Assign logic-to-phy mapping */
+ port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port);
+ if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT)
+ return -ENODEV;
+
+ usb_set_serial_port_data(port, port_priv);
+ dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
+ port->port_number, port_priv->phy_num);
+
+ return 0;
+}
+
+static int f81534_tiocmget(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+ int r;
+ u8 msr;
+ u8 mcr;
+
+ /* Read current MSR from device */
+ status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
+ if (status)
+ return status;
+
+ mutex_lock(&port_priv->mcr_mutex);
+ mcr = port_priv->shadow_mcr;
+ mutex_unlock(&port_priv->mcr_mutex);
+
+ r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) |
+ (mcr & UART_MCR_RTS ? TIOCM_RTS : 0) |
+ (msr & UART_MSR_CTS ? TIOCM_CTS : 0) |
+ (msr & UART_MSR_DCD ? TIOCM_CAR : 0) |
+ (msr & UART_MSR_RI ? TIOCM_RI : 0) |
+ (msr & UART_MSR_DSR ? TIOCM_DSR : 0);
+
+ return r;
+}
+
+static int f81534_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ return f81534_update_mctrl(port, set, clear);
+}
+
+static void f81534_dtr_rts(struct usb_serial_port *port, int on)
+{
+ if (on)
+ f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
+ else
+ f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
+}
+
+static int f81534_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const u8 *buf, int count)
+{
+ int bytes_out, status;
+
+ if (!count)
+ return 0;
+
+ bytes_out = kfifo_in_locked(&port->write_fifo, buf, count,
+ &port->lock);
+
+ status = f81534_submit_writer(port, GFP_ATOMIC);
+ if (status) {
+ dev_err(&port->dev, "%s: submit failed\n", __func__);
+ return status;
+ }
+
+ return bytes_out;
+}
+
+static bool f81534_tx_empty(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ return test_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+}
+
+static int f81534_resume(struct usb_serial *serial)
+{
+ struct f81534_serial_private *serial_priv =
+ usb_get_serial_data(serial);
+ struct usb_serial_port *port;
+ int error = 0;
+ int status;
+ size_t i;
+
+ /*
+ * We'll register port 0 bulkin when port had opened, It'll take all
+ * port received data, MSR register change and TX_EMPTY information.
+ */
+ mutex_lock(&serial_priv->urb_mutex);
+
+ if (serial_priv->opened_port) {
+ status = f81534_submit_read_urb(serial, GFP_NOIO);
+ if (status) {
+ mutex_unlock(&serial_priv->urb_mutex);
+ return status;
+ }
+ }
+
+ mutex_unlock(&serial_priv->urb_mutex);
+
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ if (!tty_port_initialized(&port->port))
+ continue;
+
+ status = f81534_submit_writer(port, GFP_NOIO);
+ if (status) {
+ dev_err(&port->dev, "%s: submit failed\n", __func__);
+ ++error;
+ }
+ }
+
+ if (error)
+ return -EIO;
+
+ return 0;
+}
+
+static struct usb_serial_driver f81534_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "f81534",
+ },
+ .description = DRIVER_DESC,
+ .id_table = f81534_id_table,
+ .open = f81534_open,
+ .close = f81534_close,
+ .write = f81534_write,
+ .tx_empty = f81534_tx_empty,
+ .calc_num_ports = f81534_calc_num_ports,
+ .probe = f81534_probe,
+ .attach = f81534_attach,
+ .port_probe = f81534_port_probe,
+ .dtr_rts = f81534_dtr_rts,
+ .process_read_urb = f81534_process_read_urb,
+ .ioctl = f81534_ioctl,
+ .tiocmget = f81534_tiocmget,
+ .tiocmset = f81534_tiocmset,
+ .write_bulk_callback = f81534_write_usb_callback,
+ .set_termios = f81534_set_termios,
+ .resume = f81534_resume,
+};
+
+static struct usb_serial_driver *const serial_drivers[] = {
+ &f81534_device, NULL
+};
+
+module_usb_serial_driver(serial_drivers, f81534_id_table);
+
+MODULE_DEVICE_TABLE(usb, f81534_id_table);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Peter Hong <Peter_Hong@fintek.com.tw>");
+MODULE_AUTHOR("Tom Tsai <Tom_Tsai@fintek.com.tw>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6e9fc8bcc285..23d14b98ae2a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1455,8 +1455,6 @@ static int get_serial_info(struct usb_serial_port *port,
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.flags = priv->flags;
tmp.baud_base = priv->baud_base;
@@ -1538,9 +1536,6 @@ static int get_lsr_info(struct usb_serial_port *port,
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned int result = 0;
- if (!retinfo)
- return -EFAULT;
-
if (priv->transmit_empty)
result = TIOCSER_TEMT;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 11c05ce2f35f..dcc0c58aaad5 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1554,9 +1554,6 @@ static int get_serial_info(struct edgeport_port *edge_port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index fce82fd79f77..c339163698eb 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2459,9 +2459,6 @@ static int get_serial_info(struct edgeport_port *edge_port,
struct serial_struct tmp;
unsigned cwait;
- if (!retinfo)
- return -EFAULT;
-
cwait = edge_port->port->port.closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
cwait = jiffies_to_msecs(cwait) / 10;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fc5d3a791e08..0ee190fc1bf8 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -296,7 +296,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
rc = usb_serial_generic_open(tty, port);
if (rc) {
retval = rc;
- goto exit;
+ goto err_free_cfg;
}
rc = usb_control_msg(port->serial->dev,
@@ -311,21 +311,38 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
if (rc < 0) {
dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
retval = rc;
+ goto err_generic_close;
} else
dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
rc = klsi_105_get_line_state(port, &line_state);
- if (rc >= 0) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_state = line_state;
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
- retval = 0;
- } else
+ if (rc < 0) {
retval = rc;
+ goto err_disable_read;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->line_state = line_state;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
+ line_state);
+
+ return 0;
-exit:
+err_disable_read:
+ usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ KL5KUSB105A_SIO_CONFIGURE,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
+ 0, /* index */
+ NULL, 0,
+ KLSI_TIMEOUT);
+err_generic_close:
+ usb_serial_generic_close(port);
+err_free_cfg:
kfree(cfg);
+
return retval;
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index de9992b492b0..d52caa03679c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1861,9 +1861,6 @@ static int get_serial_info(struct moschip_port *mos7720_port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 57426d703a09..9a220b8e810f 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1956,9 +1956,6 @@ static int mos7840_get_serial_info(struct moschip_port *mos7840_port,
if (mos7840_port == NULL)
return -1;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4b7bfb394a32..5ded6f524d59 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -336,9 +336,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!serial)
- return -EFAULT;
-
memset(&tmp, 0x00, sizeof(tmp));
/* fake emulate a 16550 uart to make userspace code happy */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9894e341c6ac..7ce31a4c7e7f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
+#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
@@ -1210,6 +1212,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
@@ -1989,6 +1995,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 85acb50a7ee2..659cb8606bd9 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -463,9 +463,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.line = port->minor;
tmp.port = 0;
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 70a098de429f..2a156144c76c 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -318,9 +318,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.line = port->minor;
tmp.port = 0;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index a8b9bdba314f..8db9d071d940 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1426,9 +1426,6 @@ static int ti_get_serial_info(struct ti_port *tport,
struct serial_struct ret_serial;
unsigned cwait;
- if (!ret_arg)
- return -EFAULT;
-
cwait = port->port.closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
cwait = jiffies_to_msecs(cwait) / 10;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 3dfdfc81254b..59bfcb3da116 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -140,9 +140,6 @@ static int get_serial_info(struct usb_serial_port *port,
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
-
memset(&tmp, 0, sizeof(tmp));
tmp.line = port->minor;
tmp.port = port->port_number;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 2cba13a532cd..615bea08ec0a 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -52,7 +52,6 @@
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 03eccf29ace0..c4724fb3a691 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -460,13 +460,14 @@ static void vhci_tx_urb(struct urb *urb)
{
struct vhci_device *vdev = get_vdev(urb->dev);
struct vhci_priv *priv;
- struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+ struct vhci_hcd *vhci;
unsigned long flags;
if (!vdev) {
pr_err("could not get virtual device");
return;
}
+ vhci = vdev_to_vhci(vdev);
priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
if (!priv) {
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index c404017c1b5a..b96e5b189269 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -361,6 +361,7 @@ static void set_status_attr(int id)
status->attr.attr.name = status->name;
status->attr.attr.mode = S_IRUGO;
status->attr.show = status_show;
+ sysfs_attr_init(&status->attr.attr);
}
static int init_status_attrs(void)
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 7091848df6c8..968471b62cbc 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -242,10 +242,10 @@ static const struct usb_gadget_ops vgadget_ops = {
static int vep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
- struct vep *ep;
- struct vudc *udc;
- unsigned maxp;
- unsigned long flags;
+ struct vep *ep;
+ struct vudc *udc;
+ unsigned int maxp;
+ unsigned long flags;
ep = to_vep(_ep);
udc = ep_to_vudc(ep);
@@ -259,7 +259,7 @@ static int vep_enable(struct usb_ep *_ep,
spin_lock_irqsave(&udc->lock, flags);
- maxp = usb_endpoint_maxp(desc) & 0x7ff;
+ maxp = usb_endpoint_maxp(desc);
_ep->maxpacket = maxp;
ep->desc = desc;
ep->type = usb_endpoint_type(desc);
@@ -549,30 +549,34 @@ static int init_vudc_hw(struct vudc *udc)
sprintf(ep->name, "ep%d%s", num,
i ? (is_out ? "out" : "in") : "");
ep->ep.name = ep->name;
+
+ ep->ep.ops = &vep_ops;
+
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
+ ep->ep.max_streams = 16;
+ ep->gadget = &udc->gadget;
+ INIT_LIST_HEAD(&ep->req_queue);
+
if (i == 0) {
+ /* ep0 */
ep->ep.caps.type_control = true;
ep->ep.caps.dir_out = true;
ep->ep.caps.dir_in = true;
+
+ udc->gadget.ep0 = &ep->ep;
} else {
+ /* All other eps */
ep->ep.caps.type_iso = true;
ep->ep.caps.type_int = true;
ep->ep.caps.type_bulk = true;
- }
- if (is_out)
- ep->ep.caps.dir_out = true;
- else
- ep->ep.caps.dir_in = true;
+ if (is_out)
+ ep->ep.caps.dir_out = true;
+ else
+ ep->ep.caps.dir_in = true;
- ep->ep.ops = &vep_ops;
- list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
- ep->halted = ep->wedged = ep->already_seen =
- ep->setup_stage = 0;
- usb_ep_set_maxpacket_limit(&ep->ep, ~0);
- ep->ep.max_streams = 16;
- ep->gadget = &udc->gadget;
- ep->desc = NULL;
- INIT_LIST_HEAD(&ep->req_queue);
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ }
}
spin_lock_init(&udc->lock);
@@ -589,9 +593,6 @@ static int init_vudc_hw(struct vudc *udc)
ud->eh_ops.reset = vudc_device_reset;
ud->eh_ops.unusable = vudc_device_unusable;
- udc->gadget.ep0 = &udc->ep[0].ep;
- list_del_init(&udc->ep[0].ep.ep_list);
-
v_init_timer(udc);
return 0;
diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
index aba6bd478045..4cfd475ee865 100644
--- a/drivers/usb/usbip/vudc_transfer.c
+++ b/drivers/usb/usbip/vudc_transfer.c
@@ -73,8 +73,8 @@ static int handle_control_request(struct vudc *udc, struct urb *urb,
{
struct vep *ep2;
int ret_val = 1;
- unsigned w_index;
- unsigned w_value;
+ unsigned int w_index;
+ unsigned int w_value;
w_index = le16_to_cpu(setup->wIndex);
w_value = le16_to_cpu(setup->wValue);
@@ -200,7 +200,7 @@ static int transfer(struct vudc *udc,
top:
/* if there's no request queued, the device is NAKing; return */
list_for_each_entry(req, &ep->req_queue, req_entry) {
- unsigned host_len, dev_len, len;
+ unsigned int host_len, dev_len, len;
void *ubuf_pos, *rbuf_pos;
int is_short, to_host;
int rescan = 0;
@@ -339,6 +339,8 @@ static void v_timer(unsigned long _vudc)
total = timer->frame_limit;
}
+ /* We have to clear ep0 flags separately as it's not on the list */
+ udc->ep[0].already_seen = 0;
list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
ep = to_vep(_ep);
ep->already_seen = 0;
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
index 415b14002a61..d4de56b93d68 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -53,7 +53,7 @@ static ssize_t wusb_disconnect_store(struct device *dev,
wusbhc_put(wusbhc);
return size;
}
-static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store);
+static DEVICE_ATTR_WO(wusb_disconnect);
static ssize_t wusb_cdid_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -69,7 +69,7 @@ static ssize_t wusb_cdid_show(struct device *dev,
wusb_dev_put(wusb_dev);
return result + 1;
}
-static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL);
+static DEVICE_ATTR_RO(wusb_cdid);
static ssize_t wusb_ck_store(struct device *dev,
struct device_attribute *attr,
@@ -105,7 +105,7 @@ static ssize_t wusb_ck_store(struct device *dev,
wusbhc_put(wusbhc);
return result < 0 ? result : size;
}
-static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store);
+static DEVICE_ATTR_WO(wusb_ck);
static struct attribute *wusb_dev_attrs[] = {
&dev_attr_wusb_disconnect.attr,
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 8c9421b69da0..170f2c38de9b 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -240,6 +240,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
if (new_secd == NULL) {
dev_err(dev,
"Can't allocate space for security descriptors\n");
+ result = -ENOMEM;
goto out;
}
secd = new_secd;
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index ed4622279c63..e3819fc182b0 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -198,6 +198,7 @@ static int wa_nep_queue(struct wahc *wa, size_t size)
if (nw == NULL) {
if (printk_ratelimit())
dev_err(dev, "No memory to queue notification\n");
+ result = -ENOMEM;
goto out;
}
INIT_WORK(&nw->work, wa_notif_dispatch);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 167fcc71f5f6..e70322b1dd02 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1203,6 +1203,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
sizeof(struct wa_xfer_packet_info_hwaiso) +
(seg_isoc_frame_count * sizeof(__le16));
}
+ result = -ENOMEM;
seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
GFP_ATOMIC);
if (seg == NULL)
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 94f401ab859f..a273a91cf667 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -84,8 +84,7 @@ static ssize_t wusb_trust_timeout_store(struct device *dev,
out:
return result < 0 ? result : size;
}
-static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
- wusb_trust_timeout_store);
+static DEVICE_ATTR_RW(wusb_trust_timeout);
/*
* Show the current WUSB CHID.
@@ -145,7 +144,7 @@ static ssize_t wusb_chid_store(struct device *dev,
result = wusbhc_chid_set(wusbhc, &chid);
return result < 0 ? result : size;
}
-static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
+static DEVICE_ATTR_RW(wusb_chid);
static ssize_t wusb_phy_rate_show(struct device *dev,
@@ -174,8 +173,7 @@ static ssize_t wusb_phy_rate_store(struct device *dev,
wusbhc->phy_rate = phy_rate;
return size;
}
-static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show,
- wusb_phy_rate_store);
+static DEVICE_ATTR_RW(wusb_phy_rate);
static ssize_t wusb_dnts_show(struct device *dev,
struct device_attribute *attr,
@@ -205,7 +203,7 @@ static ssize_t wusb_dnts_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(wusb_dnts, 0644, wusb_dnts_show, wusb_dnts_store);
+static DEVICE_ATTR_RW(wusb_dnts);
static ssize_t wusb_retry_count_show(struct device *dev,
struct device_attribute *attr,
@@ -234,8 +232,7 @@ static ssize_t wusb_retry_count_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(wusb_retry_count, 0644, wusb_retry_count_show,
- wusb_retry_count_store);
+static DEVICE_ATTR_RW(wusb_retry_count);
/* Group all the WUSBHC attributes */
static struct attribute *wusbhc_attrs[] = {
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index da6e2ce77495..23eced02aaf6 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -48,4 +48,5 @@ menuconfig VFIO_NOIOMMU
source "drivers/vfio/pci/Kconfig"
source "drivers/vfio/platform/Kconfig"
+source "drivers/vfio/mdev/Kconfig"
source "virt/lib/Kconfig"
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 7b8a31f63fea..4a23c13b6be4 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o
obj-$(CONFIG_VFIO_PCI) += pci/
obj-$(CONFIG_VFIO_PLATFORM) += platform/
+obj-$(CONFIG_VFIO_MDEV) += mdev/
diff --git a/drivers/vfio/mdev/Kconfig b/drivers/vfio/mdev/Kconfig
new file mode 100644
index 000000000000..14fdb106a827
--- /dev/null
+++ b/drivers/vfio/mdev/Kconfig
@@ -0,0 +1,17 @@
+
+config VFIO_MDEV
+ tristate "Mediated device driver framework"
+ depends on VFIO
+ default n
+ help
+ Provides a framework to virtualize devices.
+ See Documentation/vfio-mediated-device.txt for more details.
+
+ If you don't know what do here, say N.
+
+config VFIO_MDEV_DEVICE
+ tristate "VFIO driver for Mediated devices"
+ depends on VFIO && VFIO_MDEV
+ default n
+ help
+ VFIO based driver for Mediated devices.
diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile
new file mode 100644
index 000000000000..fa2d5ea466ee
--- /dev/null
+++ b/drivers/vfio/mdev/Makefile
@@ -0,0 +1,5 @@
+
+mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o
+
+obj-$(CONFIG_VFIO_MDEV) += mdev.o
+obj-$(CONFIG_VFIO_MDEV_DEVICE) += vfio_mdev.o
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
new file mode 100644
index 000000000000..be1ee89ee917
--- /dev/null
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -0,0 +1,385 @@
+/*
+ * Mediated device Core Driver
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/sysfs.h>
+#include <linux/mdev.h>
+
+#include "mdev_private.h"
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "NVIDIA Corporation"
+#define DRIVER_DESC "Mediated device Core Driver"
+
+static LIST_HEAD(parent_list);
+static DEFINE_MUTEX(parent_list_lock);
+static struct class_compat *mdev_bus_compat_class;
+
+static int _find_mdev_device(struct device *dev, void *data)
+{
+ struct mdev_device *mdev;
+
+ if (!dev_is_mdev(dev))
+ return 0;
+
+ mdev = to_mdev_device(dev);
+
+ if (uuid_le_cmp(mdev->uuid, *(uuid_le *)data) == 0)
+ return 1;
+
+ return 0;
+}
+
+static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
+{
+ struct device *dev;
+
+ dev = device_find_child(parent->dev, &uuid, _find_mdev_device);
+ if (dev) {
+ put_device(dev);
+ return true;
+ }
+
+ return false;
+}
+
+/* Should be called holding parent_list_lock */
+static struct parent_device *__find_parent_device(struct device *dev)
+{
+ struct parent_device *parent;
+
+ list_for_each_entry(parent, &parent_list, next) {
+ if (parent->dev == dev)
+ return parent;
+ }
+ return NULL;
+}
+
+static void mdev_release_parent(struct kref *kref)
+{
+ struct parent_device *parent = container_of(kref, struct parent_device,
+ ref);
+ struct device *dev = parent->dev;
+
+ kfree(parent);
+ put_device(dev);
+}
+
+static
+inline struct parent_device *mdev_get_parent(struct parent_device *parent)
+{
+ if (parent)
+ kref_get(&parent->ref);
+
+ return parent;
+}
+
+static inline void mdev_put_parent(struct parent_device *parent)
+{
+ if (parent)
+ kref_put(&parent->ref, mdev_release_parent);
+}
+
+static int mdev_device_create_ops(struct kobject *kobj,
+ struct mdev_device *mdev)
+{
+ struct parent_device *parent = mdev->parent;
+ int ret;
+
+ ret = parent->ops->create(kobj, mdev);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_groups(&mdev->dev.kobj,
+ parent->ops->mdev_attr_groups);
+ if (ret)
+ parent->ops->remove(mdev);
+
+ return ret;
+}
+
+/*
+ * mdev_device_remove_ops gets called from sysfs's 'remove' and when parent
+ * device is being unregistered from mdev device framework.
+ * - 'force_remove' is set to 'false' when called from sysfs's 'remove' which
+ * indicates that if the mdev device is active, used by VMM or userspace
+ * application, vendor driver could return error then don't remove the device.
+ * - 'force_remove' is set to 'true' when called from mdev_unregister_device()
+ * which indicate that parent device is being removed from mdev device
+ * framework so remove mdev device forcefully.
+ */
+static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
+{
+ struct parent_device *parent = mdev->parent;
+ int ret;
+
+ /*
+ * Vendor driver can return error if VMM or userspace application is
+ * using this mdev device.
+ */
+ ret = parent->ops->remove(mdev);
+ if (ret && !force_remove)
+ return -EBUSY;
+
+ sysfs_remove_groups(&mdev->dev.kobj, parent->ops->mdev_attr_groups);
+ return 0;
+}
+
+static int mdev_device_remove_cb(struct device *dev, void *data)
+{
+ if (!dev_is_mdev(dev))
+ return 0;
+
+ return mdev_device_remove(dev, data ? *(bool *)data : true);
+}
+
+/*
+ * mdev_register_device : Register a device
+ * @dev: device structure representing parent device.
+ * @ops: Parent device operation structure to be registered.
+ *
+ * Add device to list of registered parent devices.
+ * Returns a negative value on error, otherwise 0.
+ */
+int mdev_register_device(struct device *dev, const struct parent_ops *ops)
+{
+ int ret;
+ struct parent_device *parent;
+
+ /* check for mandatory ops */
+ if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups)
+ return -EINVAL;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&parent_list_lock);
+
+ /* Check for duplicate */
+ parent = __find_parent_device(dev);
+ if (parent) {
+ ret = -EEXIST;
+ goto add_dev_err;
+ }
+
+ parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+ if (!parent) {
+ ret = -ENOMEM;
+ goto add_dev_err;
+ }
+
+ kref_init(&parent->ref);
+ mutex_init(&parent->lock);
+
+ parent->dev = dev;
+ parent->ops = ops;
+
+ if (!mdev_bus_compat_class) {
+ mdev_bus_compat_class = class_compat_register("mdev_bus");
+ if (!mdev_bus_compat_class) {
+ ret = -ENOMEM;
+ goto add_dev_err;
+ }
+ }
+
+ ret = parent_create_sysfs_files(parent);
+ if (ret)
+ goto add_dev_err;
+
+ ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL);
+ if (ret)
+ dev_warn(dev, "Failed to create compatibility class link\n");
+
+ list_add(&parent->next, &parent_list);
+ mutex_unlock(&parent_list_lock);
+
+ dev_info(dev, "MDEV: Registered\n");
+ return 0;
+
+add_dev_err:
+ mutex_unlock(&parent_list_lock);
+ if (parent)
+ mdev_put_parent(parent);
+ else
+ put_device(dev);
+ return ret;
+}
+EXPORT_SYMBOL(mdev_register_device);
+
+/*
+ * mdev_unregister_device : Unregister a parent device
+ * @dev: device structure representing parent device.
+ *
+ * Remove device from list of registered parent devices. Give a chance to free
+ * existing mediated devices for given device.
+ */
+
+void mdev_unregister_device(struct device *dev)
+{
+ struct parent_device *parent;
+ bool force_remove = true;
+
+ mutex_lock(&parent_list_lock);
+ parent = __find_parent_device(dev);
+
+ if (!parent) {
+ mutex_unlock(&parent_list_lock);
+ return;
+ }
+ dev_info(dev, "MDEV: Unregistering\n");
+
+ list_del(&parent->next);
+ class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
+
+ device_for_each_child(dev, (void *)&force_remove,
+ mdev_device_remove_cb);
+
+ parent_remove_sysfs_files(parent);
+
+ mutex_unlock(&parent_list_lock);
+ mdev_put_parent(parent);
+}
+EXPORT_SYMBOL(mdev_unregister_device);
+
+static void mdev_device_release(struct device *dev)
+{
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ dev_dbg(&mdev->dev, "MDEV: destroying\n");
+ kfree(mdev);
+}
+
+int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
+{
+ int ret;
+ struct mdev_device *mdev;
+ struct parent_device *parent;
+ struct mdev_type *type = to_mdev_type(kobj);
+
+ parent = mdev_get_parent(type->parent);
+ if (!parent)
+ return -EINVAL;
+
+ mutex_lock(&parent->lock);
+
+ /* Check for duplicate */
+ if (mdev_device_exist(parent, uuid)) {
+ ret = -EEXIST;
+ goto create_err;
+ }
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev) {
+ ret = -ENOMEM;
+ goto create_err;
+ }
+
+ memcpy(&mdev->uuid, &uuid, sizeof(uuid_le));
+ mdev->parent = parent;
+ kref_init(&mdev->ref);
+
+ mdev->dev.parent = dev;
+ mdev->dev.bus = &mdev_bus_type;
+ mdev->dev.release = mdev_device_release;
+ dev_set_name(&mdev->dev, "%pUl", uuid.b);
+
+ ret = device_register(&mdev->dev);
+ if (ret) {
+ put_device(&mdev->dev);
+ goto create_err;
+ }
+
+ ret = mdev_device_create_ops(kobj, mdev);
+ if (ret)
+ goto create_failed;
+
+ ret = mdev_create_sysfs_files(&mdev->dev, type);
+ if (ret) {
+ mdev_device_remove_ops(mdev, true);
+ goto create_failed;
+ }
+
+ mdev->type_kobj = kobj;
+ dev_dbg(&mdev->dev, "MDEV: created\n");
+
+ mutex_unlock(&parent->lock);
+ return ret;
+
+create_failed:
+ device_unregister(&mdev->dev);
+
+create_err:
+ mutex_unlock(&parent->lock);
+ mdev_put_parent(parent);
+ return ret;
+}
+
+int mdev_device_remove(struct device *dev, bool force_remove)
+{
+ struct mdev_device *mdev;
+ struct parent_device *parent;
+ struct mdev_type *type;
+ int ret;
+
+ mdev = to_mdev_device(dev);
+ type = to_mdev_type(mdev->type_kobj);
+ parent = mdev->parent;
+ mutex_lock(&parent->lock);
+
+ ret = mdev_device_remove_ops(mdev, force_remove);
+ if (ret) {
+ mutex_unlock(&parent->lock);
+ return ret;
+ }
+
+ mdev_remove_sysfs_files(dev, type);
+ device_unregister(dev);
+ mutex_unlock(&parent->lock);
+ mdev_put_parent(parent);
+ return ret;
+}
+
+static int __init mdev_init(void)
+{
+ int ret;
+
+ ret = mdev_bus_register();
+
+ /*
+ * Attempt to load known vfio_mdev. This gives us a working environment
+ * without the user needing to explicitly load vfio_mdev driver.
+ */
+ if (!ret)
+ request_module_nowait("vfio_mdev");
+
+ return ret;
+}
+
+static void __exit mdev_exit(void)
+{
+ if (mdev_bus_compat_class)
+ class_compat_unregister(mdev_bus_compat_class);
+
+ mdev_bus_unregister();
+}
+
+module_init(mdev_init)
+module_exit(mdev_exit)
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
new file mode 100644
index 000000000000..6f0391f6f9b6
--- /dev/null
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -0,0 +1,119 @@
+/*
+ * MDEV driver
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/mdev.h>
+
+#include "mdev_private.h"
+
+static int mdev_attach_iommu(struct mdev_device *mdev)
+{
+ int ret;
+ struct iommu_group *group;
+
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ ret = iommu_group_add_device(group, &mdev->dev);
+ if (!ret)
+ dev_info(&mdev->dev, "MDEV: group_id = %d\n",
+ iommu_group_id(group));
+
+ iommu_group_put(group);
+ return ret;
+}
+
+static void mdev_detach_iommu(struct mdev_device *mdev)
+{
+ iommu_group_remove_device(&mdev->dev);
+ dev_info(&mdev->dev, "MDEV: detaching iommu\n");
+}
+
+static int mdev_probe(struct device *dev)
+{
+ struct mdev_driver *drv = to_mdev_driver(dev->driver);
+ struct mdev_device *mdev = to_mdev_device(dev);
+ int ret;
+
+ ret = mdev_attach_iommu(mdev);
+ if (ret)
+ return ret;
+
+ if (drv && drv->probe) {
+ ret = drv->probe(dev);
+ if (ret)
+ mdev_detach_iommu(mdev);
+ }
+
+ return ret;
+}
+
+static int mdev_remove(struct device *dev)
+{
+ struct mdev_driver *drv = to_mdev_driver(dev->driver);
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ if (drv && drv->remove)
+ drv->remove(dev);
+
+ mdev_detach_iommu(mdev);
+
+ return 0;
+}
+
+struct bus_type mdev_bus_type = {
+ .name = "mdev",
+ .probe = mdev_probe,
+ .remove = mdev_remove,
+};
+EXPORT_SYMBOL_GPL(mdev_bus_type);
+
+/**
+ * mdev_register_driver - register a new MDEV driver
+ * @drv: the driver to register
+ * @owner: module owner of driver to be registered
+ *
+ * Returns a negative value on error, otherwise 0.
+ **/
+int mdev_register_driver(struct mdev_driver *drv, struct module *owner)
+{
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &mdev_bus_type;
+ drv->driver.owner = owner;
+
+ /* register with core */
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mdev_register_driver);
+
+/*
+ * mdev_unregister_driver - unregister MDEV driver
+ * @drv: the driver to unregister
+ */
+void mdev_unregister_driver(struct mdev_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mdev_unregister_driver);
+
+int mdev_bus_register(void)
+{
+ return bus_register(&mdev_bus_type);
+}
+
+void mdev_bus_unregister(void)
+{
+ bus_unregister(&mdev_bus_type);
+}
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
new file mode 100644
index 000000000000..d35097cbf3d7
--- /dev/null
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -0,0 +1,41 @@
+/*
+ * Mediated device interal definitions
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MDEV_PRIVATE_H
+#define MDEV_PRIVATE_H
+
+int mdev_bus_register(void);
+void mdev_bus_unregister(void);
+
+struct mdev_type {
+ struct kobject kobj;
+ struct kobject *devices_kobj;
+ struct parent_device *parent;
+ struct list_head next;
+ struct attribute_group *group;
+};
+
+#define to_mdev_type_attr(_attr) \
+ container_of(_attr, struct mdev_type_attribute, attr)
+#define to_mdev_type(_kobj) \
+ container_of(_kobj, struct mdev_type, kobj)
+
+int parent_create_sysfs_files(struct parent_device *parent);
+void parent_remove_sysfs_files(struct parent_device *parent);
+
+int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type);
+void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
+
+int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid);
+int mdev_device_remove(struct device *dev, bool force_remove);
+
+#endif /* MDEV_PRIVATE_H */
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
new file mode 100644
index 000000000000..1a53deb2ee10
--- /dev/null
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -0,0 +1,286 @@
+/*
+ * File attributes for Mediated devices
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mdev.h>
+
+#include "mdev_private.h"
+
+/* Static functions */
+
+static ssize_t mdev_type_attr_show(struct kobject *kobj,
+ struct attribute *__attr, char *buf)
+{
+ struct mdev_type_attribute *attr = to_mdev_type_attr(__attr);
+ struct mdev_type *type = to_mdev_type(kobj);
+ ssize_t ret = -EIO;
+
+ if (attr->show)
+ ret = attr->show(kobj, type->parent->dev, buf);
+ return ret;
+}
+
+static ssize_t mdev_type_attr_store(struct kobject *kobj,
+ struct attribute *__attr,
+ const char *buf, size_t count)
+{
+ struct mdev_type_attribute *attr = to_mdev_type_attr(__attr);
+ struct mdev_type *type = to_mdev_type(kobj);
+ ssize_t ret = -EIO;
+
+ if (attr->store)
+ ret = attr->store(&type->kobj, type->parent->dev, buf, count);
+ return ret;
+}
+
+static const struct sysfs_ops mdev_type_sysfs_ops = {
+ .show = mdev_type_attr_show,
+ .store = mdev_type_attr_store,
+};
+
+static ssize_t create_store(struct kobject *kobj, struct device *dev,
+ const char *buf, size_t count)
+{
+ char *str;
+ uuid_le uuid;
+ int ret;
+
+ if ((count < UUID_STRING_LEN) || (count > UUID_STRING_LEN + 1))
+ return -EINVAL;
+
+ str = kstrndup(buf, count, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ ret = uuid_le_to_bin(str, &uuid);
+ kfree(str);
+ if (ret)
+ return ret;
+
+ ret = mdev_device_create(kobj, dev, uuid);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+MDEV_TYPE_ATTR_WO(create);
+
+static void mdev_type_release(struct kobject *kobj)
+{
+ struct mdev_type *type = to_mdev_type(kobj);
+
+ pr_debug("Releasing group %s\n", kobj->name);
+ kfree(type);
+}
+
+static struct kobj_type mdev_type_ktype = {
+ .sysfs_ops = &mdev_type_sysfs_ops,
+ .release = mdev_type_release,
+};
+
+struct mdev_type *add_mdev_supported_type(struct parent_device *parent,
+ struct attribute_group *group)
+{
+ struct mdev_type *type;
+ int ret;
+
+ if (!group->name) {
+ pr_err("%s: Type name empty!\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = kzalloc(sizeof(*type), GFP_KERNEL);
+ if (!type)
+ return ERR_PTR(-ENOMEM);
+
+ type->kobj.kset = parent->mdev_types_kset;
+
+ ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
+ "%s-%s", dev_driver_string(parent->dev),
+ group->name);
+ if (ret) {
+ kfree(type);
+ return ERR_PTR(ret);
+ }
+
+ ret = sysfs_create_file(&type->kobj, &mdev_type_attr_create.attr);
+ if (ret)
+ goto attr_create_failed;
+
+ type->devices_kobj = kobject_create_and_add("devices", &type->kobj);
+ if (!type->devices_kobj) {
+ ret = -ENOMEM;
+ goto attr_devices_failed;
+ }
+
+ ret = sysfs_create_files(&type->kobj,
+ (const struct attribute **)group->attrs);
+ if (ret) {
+ ret = -ENOMEM;
+ goto attrs_failed;
+ }
+
+ type->group = group;
+ type->parent = parent;
+ return type;
+
+attrs_failed:
+ kobject_put(type->devices_kobj);
+attr_devices_failed:
+ sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr);
+attr_create_failed:
+ kobject_del(&type->kobj);
+ kobject_put(&type->kobj);
+ return ERR_PTR(ret);
+}
+
+static void remove_mdev_supported_type(struct mdev_type *type)
+{
+ sysfs_remove_files(&type->kobj,
+ (const struct attribute **)type->group->attrs);
+ kobject_put(type->devices_kobj);
+ sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr);
+ kobject_del(&type->kobj);
+ kobject_put(&type->kobj);
+}
+
+static int add_mdev_supported_type_groups(struct parent_device *parent)
+{
+ int i;
+
+ for (i = 0; parent->ops->supported_type_groups[i]; i++) {
+ struct mdev_type *type;
+
+ type = add_mdev_supported_type(parent,
+ parent->ops->supported_type_groups[i]);
+ if (IS_ERR(type)) {
+ struct mdev_type *ltype, *tmp;
+
+ list_for_each_entry_safe(ltype, tmp, &parent->type_list,
+ next) {
+ list_del(&ltype->next);
+ remove_mdev_supported_type(ltype);
+ }
+ return PTR_ERR(type);
+ }
+ list_add(&type->next, &parent->type_list);
+ }
+ return 0;
+}
+
+/* mdev sysfs functions */
+void parent_remove_sysfs_files(struct parent_device *parent)
+{
+ struct mdev_type *type, *tmp;
+
+ list_for_each_entry_safe(type, tmp, &parent->type_list, next) {
+ list_del(&type->next);
+ remove_mdev_supported_type(type);
+ }
+
+ sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups);
+ kset_unregister(parent->mdev_types_kset);
+}
+
+int parent_create_sysfs_files(struct parent_device *parent)
+{
+ int ret;
+
+ parent->mdev_types_kset = kset_create_and_add("mdev_supported_types",
+ NULL, &parent->dev->kobj);
+
+ if (!parent->mdev_types_kset)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&parent->type_list);
+
+ ret = sysfs_create_groups(&parent->dev->kobj,
+ parent->ops->dev_attr_groups);
+ if (ret)
+ goto create_err;
+
+ ret = add_mdev_supported_type_groups(parent);
+ if (ret)
+ sysfs_remove_groups(&parent->dev->kobj,
+ parent->ops->dev_attr_groups);
+ else
+ return ret;
+
+create_err:
+ kset_unregister(parent->mdev_types_kset);
+ return ret;
+}
+
+static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val && device_remove_file_self(dev, attr)) {
+ int ret;
+
+ ret = mdev_device_remove(dev, false);
+ if (ret) {
+ device_create_file(dev, attr);
+ return ret;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(remove);
+
+static const struct attribute *mdev_device_attrs[] = {
+ &dev_attr_remove.attr,
+ NULL,
+};
+
+int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type)
+{
+ int ret;
+
+ ret = sysfs_create_files(&dev->kobj, mdev_device_attrs);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(type->devices_kobj, &dev->kobj, dev_name(dev));
+ if (ret)
+ goto device_link_failed;
+
+ ret = sysfs_create_link(&dev->kobj, &type->kobj, "mdev_type");
+ if (ret)
+ goto type_link_failed;
+
+ return ret;
+
+type_link_failed:
+ sysfs_remove_link(type->devices_kobj, dev_name(dev));
+device_link_failed:
+ sysfs_remove_files(&dev->kobj, mdev_device_attrs);
+ return ret;
+}
+
+void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type)
+{
+ sysfs_remove_link(&dev->kobj, "mdev_type");
+ sysfs_remove_link(type->devices_kobj, dev_name(dev));
+ sysfs_remove_files(&dev->kobj, mdev_device_attrs);
+}
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
new file mode 100644
index 000000000000..ffc36758cb84
--- /dev/null
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -0,0 +1,148 @@
+/*
+ * VFIO based driver for Mediated device
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "mdev_private.h"
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "NVIDIA Corporation"
+#define DRIVER_DESC "VFIO based driver for Mediated device"
+
+static int vfio_mdev_open(void *device_data)
+{
+ struct mdev_device *mdev = device_data;
+ struct parent_device *parent = mdev->parent;
+ int ret;
+
+ if (unlikely(!parent->ops->open))
+ return -EINVAL;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ ret = parent->ops->open(mdev);
+ if (ret)
+ module_put(THIS_MODULE);
+
+ return ret;
+}
+
+static void vfio_mdev_release(void *device_data)
+{
+ struct mdev_device *mdev = device_data;
+ struct parent_device *parent = mdev->parent;
+
+ if (likely(parent->ops->release))
+ parent->ops->release(mdev);
+
+ module_put(THIS_MODULE);
+}
+
+static long vfio_mdev_unlocked_ioctl(void *device_data,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mdev_device *mdev = device_data;
+ struct parent_device *parent = mdev->parent;
+
+ if (unlikely(!parent->ops->ioctl))
+ return -EINVAL;
+
+ return parent->ops->ioctl(mdev, cmd, arg);
+}
+
+static ssize_t vfio_mdev_read(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_device *mdev = device_data;
+ struct parent_device *parent = mdev->parent;
+
+ if (unlikely(!parent->ops->read))
+ return -EINVAL;
+
+ return parent->ops->read(mdev, buf, count, ppos);
+}
+
+static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_device *mdev = device_data;
+ struct parent_device *parent = mdev->parent;
+
+ if (unlikely(!parent->ops->write))
+ return -EINVAL;
+
+ return parent->ops->write(mdev, buf, count, ppos);
+}
+
+static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
+{
+ struct mdev_device *mdev = device_data;
+ struct parent_device *parent = mdev->parent;
+
+ if (unlikely(!parent->ops->mmap))
+ return -EINVAL;
+
+ return parent->ops->mmap(mdev, vma);
+}
+
+static const struct vfio_device_ops vfio_mdev_dev_ops = {
+ .name = "vfio-mdev",
+ .open = vfio_mdev_open,
+ .release = vfio_mdev_release,
+ .ioctl = vfio_mdev_unlocked_ioctl,
+ .read = vfio_mdev_read,
+ .write = vfio_mdev_write,
+ .mmap = vfio_mdev_mmap,
+};
+
+int vfio_mdev_probe(struct device *dev)
+{
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ return vfio_add_group_dev(dev, &vfio_mdev_dev_ops, mdev);
+}
+
+void vfio_mdev_remove(struct device *dev)
+{
+ vfio_del_group_dev(dev);
+}
+
+struct mdev_driver vfio_mdev_driver = {
+ .name = "vfio_mdev",
+ .probe = vfio_mdev_probe,
+ .remove = vfio_mdev_remove,
+};
+
+static int __init vfio_mdev_init(void)
+{
+ return mdev_register_driver(&vfio_mdev_driver, THIS_MODULE);
+}
+
+static void __exit vfio_mdev_exit(void)
+{
+ mdev_unregister_driver(&vfio_mdev_driver);
+}
+
+module_init(vfio_mdev_init)
+module_exit(vfio_mdev_exit)
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 031bc08d000d..dcd7c2a99618 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -558,10 +558,9 @@ static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
struct vfio_info_cap *caps)
{
- struct vfio_info_cap_header *header;
struct vfio_region_info_cap_sparse_mmap *sparse;
size_t end, size;
- int nr_areas = 2, i = 0;
+ int nr_areas = 2, i = 0, ret;
end = pci_resource_len(vdev->pdev, vdev->msix_bar);
@@ -572,13 +571,10 @@ static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
- header = vfio_info_cap_add(caps, size,
- VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
- if (IS_ERR(header))
- return PTR_ERR(header);
+ sparse = kzalloc(size, GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
- sparse = container_of(header,
- struct vfio_region_info_cap_sparse_mmap, header);
sparse->nr_areas = nr_areas;
if (vdev->msix_offset & PAGE_MASK) {
@@ -594,26 +590,11 @@ static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
i++;
}
- return 0;
-}
-
-static int region_type_cap(struct vfio_pci_device *vdev,
- struct vfio_info_cap *caps,
- unsigned int type, unsigned int subtype)
-{
- struct vfio_info_cap_header *header;
- struct vfio_region_info_cap_type *cap;
-
- header = vfio_info_cap_add(caps, sizeof(*cap),
- VFIO_REGION_INFO_CAP_TYPE, 1);
- if (IS_ERR(header))
- return PTR_ERR(header);
-
- cap = container_of(header, struct vfio_region_info_cap_type, header);
- cap->type = type;
- cap->subtype = subtype;
+ ret = vfio_info_add_capability(caps, VFIO_REGION_INFO_CAP_SPARSE_MMAP,
+ sparse);
+ kfree(sparse);
- return 0;
+ return ret;
}
int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
@@ -752,6 +733,9 @@ static long vfio_pci_ioctl(void *device_data,
break;
default:
+ {
+ struct vfio_region_info_cap_type cap_type;
+
if (info.index >=
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
@@ -762,11 +746,16 @@ static long vfio_pci_ioctl(void *device_data,
info.size = vdev->region[i].size;
info.flags = vdev->region[i].flags;
- ret = region_type_cap(vdev, &caps,
- vdev->region[i].type,
- vdev->region[i].subtype);
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_TYPE,
+ &cap_type);
if (ret)
return ret;
+
+ }
}
if (caps.size) {
@@ -829,45 +818,25 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
- size_t size;
u8 *data = NULL;
int max, ret = 0;
+ size_t data_size = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
- if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
- hdr.count >= (U32_MAX - hdr.start) ||
- hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
- VFIO_IRQ_SET_ACTION_TYPE_MASK))
- return -EINVAL;
-
max = vfio_pci_get_irq_count(vdev, hdr.index);
- if (hdr.start >= max || hdr.start + hdr.count > max)
- return -EINVAL;
- switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
- case VFIO_IRQ_SET_DATA_NONE:
- size = 0;
- break;
- case VFIO_IRQ_SET_DATA_BOOL:
- size = sizeof(uint8_t);
- break;
- case VFIO_IRQ_SET_DATA_EVENTFD:
- size = sizeof(int32_t);
- break;
- default:
- return -EINVAL;
- }
-
- if (size) {
- if (hdr.argsz - minsz < hdr.count * size)
- return -EINVAL;
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
+ VFIO_PCI_NUM_IRQS, &data_size);
+ if (ret)
+ return ret;
+ if (data_size) {
data = memdup_user((void __user *)(arg + minsz),
- hdr.count * size);
+ data_size);
if (IS_ERR(data))
return PTR_ERR(data);
}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 65d4a3015542..330a57024cbc 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -31,8 +31,6 @@
#include "vfio_pci_private.h"
-#define PCI_CFG_SPACE_SIZE 256
-
/* Fake capability ID for standard config space */
#define PCI_CAP_ID_BASIC 0
@@ -152,7 +150,7 @@ static int vfio_user_config_read(struct pci_dev *pdev, int offset,
*val = cpu_to_le32(tmp_val);
- return pcibios_err_to_errno(ret);
+ return ret;
}
static int vfio_user_config_write(struct pci_dev *pdev, int offset,
@@ -173,7 +171,7 @@ static int vfio_user_config_write(struct pci_dev *pdev, int offset,
break;
}
- return pcibios_err_to_errno(ret);
+ return ret;
}
static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
@@ -257,7 +255,7 @@ static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
ret = vfio_user_config_read(vdev->pdev, pos, val, count);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */
if (offset < 4)
@@ -295,7 +293,7 @@ static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
ret = vfio_user_config_read(vdev->pdev, pos, val, count);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
return count;
}
@@ -1089,7 +1087,7 @@ static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos,
start + PCI_MSI_FLAGS,
flags);
if (ret)
- return pcibios_err_to_errno(ret);
+ return ret;
}
return count;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index d78142830754..4c27f4be3c3d 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -364,36 +364,21 @@ static long vfio_platform_ioctl(void *device_data,
struct vfio_irq_set hdr;
u8 *data = NULL;
int ret = 0;
+ size_t data_size = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
- if (hdr.argsz < minsz)
- return -EINVAL;
-
- if (hdr.index >= vdev->num_irqs)
- return -EINVAL;
-
- if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
- VFIO_IRQ_SET_ACTION_TYPE_MASK))
- return -EINVAL;
-
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
-
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
-
- if (hdr.argsz - minsz < size)
- return -EINVAL;
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs,
+ vdev->num_irqs, &data_size);
+ if (ret)
+ return ret;
- data = memdup_user((void __user *)(arg + minsz), size);
+ if (data_size) {
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
if (IS_ERR(data))
return PTR_ERR(data);
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index d1d70e0b011b..9901c4671e2f 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -86,6 +86,8 @@ struct vfio_group {
struct mutex unbound_lock;
atomic_t opened;
bool noiommu;
+ struct kvm *kvm;
+ struct blocking_notifier_head notifier;
};
struct vfio_device {
@@ -339,6 +341,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
#ifdef CONFIG_VFIO_NOIOMMU
group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
#endif
+ BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
group->nb.notifier_call = vfio_iommu_group_notifier;
@@ -480,6 +483,21 @@ static struct vfio_group *vfio_group_get_from_minor(int minor)
return group;
}
+static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
+{
+ struct iommu_group *iommu_group;
+ struct vfio_group *group;
+
+ iommu_group = iommu_group_get(dev);
+ if (!iommu_group)
+ return NULL;
+
+ group = vfio_group_get_from_iommu(iommu_group);
+ iommu_group_put(iommu_group);
+
+ return group;
+}
+
/**
* Device objects - create, release, get, put, search
*/
@@ -811,16 +829,10 @@ EXPORT_SYMBOL_GPL(vfio_add_group_dev);
*/
struct vfio_device *vfio_device_get_from_dev(struct device *dev)
{
- struct iommu_group *iommu_group;
struct vfio_group *group;
struct vfio_device *device;
- iommu_group = iommu_group_get(dev);
- if (!iommu_group)
- return NULL;
-
- group = vfio_group_get_from_iommu(iommu_group);
- iommu_group_put(iommu_group);
+ group = vfio_group_get_from_dev(dev);
if (!group)
return NULL;
@@ -1376,6 +1388,23 @@ static bool vfio_group_viable(struct vfio_group *group)
group, vfio_dev_viable) == 0);
}
+static int vfio_group_add_container_user(struct vfio_group *group)
+{
+ if (!atomic_inc_not_zero(&group->container_users))
+ return -EINVAL;
+
+ if (group->noiommu) {
+ atomic_dec(&group->container_users);
+ return -EPERM;
+ }
+ if (!group->container->iommu_driver || !vfio_group_viable(group)) {
+ atomic_dec(&group->container_users);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct file_operations vfio_device_fops;
static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
@@ -1555,6 +1584,9 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
filep->private_data = NULL;
+ /* Any user didn't unregister? */
+ WARN_ON(group->notifier.head);
+
vfio_group_try_dissolve_container(group);
atomic_dec(&group->opened);
@@ -1685,23 +1717,14 @@ static const struct file_operations vfio_device_fops = {
struct vfio_group *vfio_group_get_external_user(struct file *filep)
{
struct vfio_group *group = filep->private_data;
+ int ret;
if (filep->f_op != &vfio_group_fops)
return ERR_PTR(-EINVAL);
- if (!atomic_inc_not_zero(&group->container_users))
- return ERR_PTR(-EINVAL);
-
- if (group->noiommu) {
- atomic_dec(&group->container_users);
- return ERR_PTR(-EPERM);
- }
-
- if (!group->container->iommu_driver ||
- !vfio_group_viable(group)) {
- atomic_dec(&group->container_users);
- return ERR_PTR(-EINVAL);
- }
+ ret = vfio_group_add_container_user(group);
+ if (ret)
+ return ERR_PTR(ret);
vfio_group_get(group);
@@ -1763,7 +1786,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
header->version = version;
/* Add to the end of the capability chain */
- for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next)
+ for (tmp = buf; tmp->next; tmp = buf + tmp->next)
; /* nothing */
tmp->next = caps->size;
@@ -1776,11 +1799,403 @@ EXPORT_SYMBOL_GPL(vfio_info_cap_add);
void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
{
struct vfio_info_cap_header *tmp;
+ void *buf = (void *)caps->buf;
- for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next - offset)
+ for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
tmp->next += offset;
}
-EXPORT_SYMBOL_GPL(vfio_info_cap_shift);
+EXPORT_SYMBOL(vfio_info_cap_shift);
+
+static int sparse_mmap_cap(struct vfio_info_cap *caps, void *cap_type)
+{
+ struct vfio_info_cap_header *header;
+ struct vfio_region_info_cap_sparse_mmap *sparse_cap, *sparse = cap_type;
+ size_t size;
+
+ size = sizeof(*sparse) + sparse->nr_areas * sizeof(*sparse->areas);
+ header = vfio_info_cap_add(caps, size,
+ VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
+ if (IS_ERR(header))
+ return PTR_ERR(header);
+
+ sparse_cap = container_of(header,
+ struct vfio_region_info_cap_sparse_mmap, header);
+ sparse_cap->nr_areas = sparse->nr_areas;
+ memcpy(sparse_cap->areas, sparse->areas,
+ sparse->nr_areas * sizeof(*sparse->areas));
+ return 0;
+}
+
+static int region_type_cap(struct vfio_info_cap *caps, void *cap_type)
+{
+ struct vfio_info_cap_header *header;
+ struct vfio_region_info_cap_type *type_cap, *cap = cap_type;
+
+ header = vfio_info_cap_add(caps, sizeof(*cap),
+ VFIO_REGION_INFO_CAP_TYPE, 1);
+ if (IS_ERR(header))
+ return PTR_ERR(header);
+
+ type_cap = container_of(header, struct vfio_region_info_cap_type,
+ header);
+ type_cap->type = cap->type;
+ type_cap->subtype = cap->subtype;
+ return 0;
+}
+
+int vfio_info_add_capability(struct vfio_info_cap *caps, int cap_type_id,
+ void *cap_type)
+{
+ int ret = -EINVAL;
+
+ if (!cap_type)
+ return 0;
+
+ switch (cap_type_id) {
+ case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
+ ret = sparse_mmap_cap(caps, cap_type);
+ break;
+
+ case VFIO_REGION_INFO_CAP_TYPE:
+ ret = region_type_cap(caps, cap_type);
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_info_add_capability);
+
+int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
+ int max_irq_type, size_t *data_size)
+{
+ unsigned long minsz;
+ size_t size;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
+ (hdr->count >= (U32_MAX - hdr->start)) ||
+ (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
+ VFIO_IRQ_SET_ACTION_TYPE_MASK)))
+ return -EINVAL;
+
+ if (data_size)
+ *data_size = 0;
+
+ if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
+ return -EINVAL;
+
+ switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (size) {
+ if (hdr->argsz - minsz < hdr->count * size)
+ return -EINVAL;
+
+ if (!data_size)
+ return -EINVAL;
+
+ *data_size = hdr->count * size;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
+
+/*
+ * Pin a set of guest PFNs and return their associated host PFNs for local
+ * domain only.
+ * @dev [in] : device
+ * @user_pfn [in]: array of user/guest PFNs to be unpinned.
+ * @npage [in] : count of elements in user_pfn array. This count should not
+ * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @prot [in] : protection flags
+ * @phys_pfn[out]: array of host PFNs
+ * Return error or number of pages pinned.
+ */
+int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
+ int prot, unsigned long *phys_pfn)
+{
+ struct vfio_container *container;
+ struct vfio_group *group;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!dev || !user_pfn || !phys_pfn || !npage)
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ group = vfio_group_get_from_dev(dev);
+ if (!group)
+ return -ENODEV;
+
+ ret = vfio_group_add_container_user(group);
+ if (ret)
+ goto err_pin_pages;
+
+ container = group->container;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->pin_pages))
+ ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
+ npage, prot, phys_pfn);
+ else
+ ret = -ENOTTY;
+
+ up_read(&container->group_lock);
+ vfio_group_try_dissolve_container(group);
+
+err_pin_pages:
+ vfio_group_put(group);
+ return ret;
+}
+EXPORT_SYMBOL(vfio_pin_pages);
+
+/*
+ * Unpin set of host PFNs for local domain only.
+ * @dev [in] : device
+ * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
+ * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @npage [in] : count of elements in user_pfn array. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ * Return error or number of pages unpinned.
+ */
+int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
+{
+ struct vfio_container *container;
+ struct vfio_group *group;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!dev || !user_pfn || !npage)
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ group = vfio_group_get_from_dev(dev);
+ if (!group)
+ return -ENODEV;
+
+ ret = vfio_group_add_container_user(group);
+ if (ret)
+ goto err_unpin_pages;
+
+ container = group->container;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->unpin_pages))
+ ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
+ npage);
+ else
+ ret = -ENOTTY;
+
+ up_read(&container->group_lock);
+ vfio_group_try_dissolve_container(group);
+
+err_unpin_pages:
+ vfio_group_put(group);
+ return ret;
+}
+EXPORT_SYMBOL(vfio_unpin_pages);
+
+static int vfio_register_iommu_notifier(struct vfio_group *group,
+ unsigned long *events,
+ struct notifier_block *nb)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ ret = vfio_group_add_container_user(group);
+ if (ret)
+ return -EINVAL;
+
+ container = group->container;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->register_notifier))
+ ret = driver->ops->register_notifier(container->iommu_data,
+ events, nb);
+ else
+ ret = -ENOTTY;
+
+ up_read(&container->group_lock);
+ vfio_group_try_dissolve_container(group);
+
+ return ret;
+}
+
+static int vfio_unregister_iommu_notifier(struct vfio_group *group,
+ struct notifier_block *nb)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ ret = vfio_group_add_container_user(group);
+ if (ret)
+ return -EINVAL;
+
+ container = group->container;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->unregister_notifier))
+ ret = driver->ops->unregister_notifier(container->iommu_data,
+ nb);
+ else
+ ret = -ENOTTY;
+
+ up_read(&container->group_lock);
+ vfio_group_try_dissolve_container(group);
+
+ return ret;
+}
+
+void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
+{
+ group->kvm = kvm;
+ blocking_notifier_call_chain(&group->notifier,
+ VFIO_GROUP_NOTIFY_SET_KVM, kvm);
+}
+EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
+
+static int vfio_register_group_notifier(struct vfio_group *group,
+ unsigned long *events,
+ struct notifier_block *nb)
+{
+ struct vfio_container *container;
+ int ret;
+ bool set_kvm = false;
+
+ if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
+ set_kvm = true;
+
+ /* clear known events */
+ *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
+
+ /* refuse to continue if still events remaining */
+ if (*events)
+ return -EINVAL;
+
+ ret = vfio_group_add_container_user(group);
+ if (ret)
+ return -EINVAL;
+
+ container = group->container;
+ down_read(&container->group_lock);
+
+ ret = blocking_notifier_chain_register(&group->notifier, nb);
+
+ /*
+ * The attaching of kvm and vfio_group might already happen, so
+ * here we replay once upon registration.
+ */
+ if (!ret && set_kvm && group->kvm)
+ blocking_notifier_call_chain(&group->notifier,
+ VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
+
+ up_read(&container->group_lock);
+ vfio_group_try_dissolve_container(group);
+
+ return ret;
+}
+
+static int vfio_unregister_group_notifier(struct vfio_group *group,
+ struct notifier_block *nb)
+{
+ struct vfio_container *container;
+ int ret;
+
+ ret = vfio_group_add_container_user(group);
+ if (ret)
+ return -EINVAL;
+
+ container = group->container;
+ down_read(&container->group_lock);
+
+ ret = blocking_notifier_chain_unregister(&group->notifier, nb);
+
+ up_read(&container->group_lock);
+ vfio_group_try_dissolve_container(group);
+
+ return ret;
+}
+
+int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
+ unsigned long *events, struct notifier_block *nb)
+{
+ struct vfio_group *group;
+ int ret;
+
+ if (!dev || !nb || !events || (*events == 0))
+ return -EINVAL;
+
+ group = vfio_group_get_from_dev(dev);
+ if (!group)
+ return -ENODEV;
+
+ switch (type) {
+ case VFIO_IOMMU_NOTIFY:
+ ret = vfio_register_iommu_notifier(group, events, nb);
+ break;
+ case VFIO_GROUP_NOTIFY:
+ ret = vfio_register_group_notifier(group, events, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ vfio_group_put(group);
+ return ret;
+}
+EXPORT_SYMBOL(vfio_register_notifier);
+
+int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
+ struct notifier_block *nb)
+{
+ struct vfio_group *group;
+ int ret;
+
+ if (!dev || !nb)
+ return -EINVAL;
+
+ group = vfio_group_get_from_dev(dev);
+ if (!group)
+ return -ENODEV;
+
+ switch (type) {
+ case VFIO_IOMMU_NOTIFY:
+ ret = vfio_unregister_iommu_notifier(group, nb);
+ break;
+ case VFIO_GROUP_NOTIFY:
+ ret = vfio_unregister_group_notifier(group, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ vfio_group_put(group);
+ return ret;
+}
+EXPORT_SYMBOL(vfio_unregister_notifier);
/**
* Module/class support
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 80378ddadc5c..c8823578a1b2 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
static void tce_iommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group);
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
{
long ret = 0, locked, lock_limit;
- if (!current || !current->mm)
- return -ESRCH; /* process exited */
+ if (WARN_ON_ONCE(!mm))
+ return -EPERM;
if (!npages)
return 0;
- down_write(&current->mm->mmap_sem);
- locked = current->mm->locked_vm + npages;
+ down_write(&mm->mmap_sem);
+ locked = mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
- current->mm->locked_vm += npages;
+ mm->locked_vm += npages;
pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
return ret;
}
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
{
- if (!current || !current->mm || !npages)
- return; /* process exited */
+ if (!mm || !npages)
+ return;
- down_write(&current->mm->mmap_sem);
- if (WARN_ON_ONCE(npages > current->mm->locked_vm))
- npages = current->mm->locked_vm;
- current->mm->locked_vm -= npages;
+ down_write(&mm->mmap_sem);
+ if (WARN_ON_ONCE(npages > mm->locked_vm))
+ npages = mm->locked_vm;
+ mm->locked_vm -= npages;
pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
}
/*
@@ -89,6 +89,15 @@ struct tce_iommu_group {
};
/*
+ * A container needs to remember which preregistered region it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+ struct list_head next;
+ struct mm_iommu_table_group_mem_t *mem;
+};
+
+/*
* The container descriptor supports only a single group per container.
* Required by the API as the container is not supplied with the IOMMU group
* at the moment of initialization.
@@ -97,24 +106,68 @@ struct tce_container {
struct mutex lock;
bool enabled;
bool v2;
+ bool def_window_pending;
unsigned long locked_pages;
+ struct mm_struct *mm;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct list_head group_list;
+ struct list_head prereg_list;
};
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+ if (container->mm) {
+ if (container->mm == current->mm)
+ return 0;
+ return -EPERM;
+ }
+ BUG_ON(!current->mm);
+ container->mm = current->mm;
+ atomic_inc(&container->mm->mm_count);
+
+ return 0;
+}
+
+static long tce_iommu_prereg_free(struct tce_container *container,
+ struct tce_iommu_prereg *tcemem)
+{
+ long ret;
+
+ ret = mm_iommu_put(container->mm, tcemem->mem);
+ if (ret)
+ return ret;
+
+ list_del(&tcemem->next);
+ kfree(tcemem);
+
+ return 0;
+}
+
static long tce_iommu_unregister_pages(struct tce_container *container,
__u64 vaddr, __u64 size)
{
struct mm_iommu_table_group_mem_t *mem;
+ struct tce_iommu_prereg *tcemem;
+ bool found = false;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
- mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
+ mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
if (!mem)
return -ENOENT;
- return mm_iommu_put(mem);
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ return tce_iommu_prereg_free(container, tcemem);
}
static long tce_iommu_register_pages(struct tce_container *container,
@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container,
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem = NULL;
+ struct tce_iommu_prereg *tcemem;
unsigned long entries = size >> PAGE_SHIFT;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr))
return -EINVAL;
- ret = mm_iommu_get(vaddr, entries, &mem);
+ mem = mm_iommu_find(container->mm, vaddr, entries);
+ if (mem) {
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem)
+ return -EBUSY;
+ }
+ }
+
+ ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
+ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+ tcemem->mem = mem;
+ list_add(&tcemem->next, &container->prereg_list);
+
container->enabled = true;
return 0;
}
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
BUG_ON(tbl->it_userspace);
- ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
if (ret)
return ret;
uas = vzalloc(cb);
if (!uas) {
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
return -ENOMEM;
}
tbl->it_userspace = uas;
@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
return 0;
}
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
vfree(tbl->it_userspace);
tbl->it_userspace = NULL;
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
}
static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container)
struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp;
- if (!current->mm)
- return -ESRCH; /* process exited */
-
if (container->enabled)
return -EBUSY;
@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container)
if (!table_group->tce32_size)
return -EPERM;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
locked = table_group->tce32_size >> PAGE_SHIFT;
- ret = try_increment_locked_vm(locked);
+ ret = try_increment_locked_vm(container->mm, locked);
if (ret)
return ret;
@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container)
container->enabled = false;
- if (!current->mm)
- return;
-
- decrement_locked_vm(container->locked_pages);
+ BUG_ON(!container->mm);
+ decrement_locked_vm(container->mm, container->locked_pages);
}
static void *tce_iommu_open(unsigned long arg)
@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg)
mutex_init(&container->lock);
INIT_LIST_HEAD_RCU(&container->group_list);
+ INIT_LIST_HEAD_RCU(&container->prereg_list);
container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg)
static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl);
static void tce_iommu_release(void *iommu_data)
{
@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data)
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
+ }
+
+ while (!list_empty(&container->prereg_list)) {
+ struct tce_iommu_prereg *tcemem;
+
+ tcemem = list_first_entry(&container->prereg_list,
+ struct tce_iommu_prereg, next);
+ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
}
tce_iommu_disable(container);
+ if (container->mm)
+ mmdrop(container->mm);
mutex_destroy(&container->lock);
kfree(container);
@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container,
put_page(page);
}
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+ unsigned long tce, unsigned long size,
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem;
- mem = mm_iommu_lookup(tce, size);
+ mem = mm_iommu_lookup(container->mm, tce, size);
if (!mem)
return -EINVAL;
@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
return 0;
}
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
- unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+ struct iommu_table *tbl, unsigned long entry)
{
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
- if (!pua || !current || !current->mm)
+ if (!pua)
return;
- ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
&hpa, &mem);
if (ret)
pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
if (container->v2) {
- tce_iommu_unuse_page_v2(tbl, entry);
+ tce_iommu_unuse_page_v2(container, tbl, entry);
continue;
}
@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container,
unsigned long hpa;
enum dma_data_direction dirtmp;
+ if (!tbl->it_userspace) {
+ ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
entry + i);
- ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
- &hpa, &mem);
+ ret = tce_iommu_prereg_ua_to_hpa(container,
+ tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
if (ret)
break;
@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
*pua = tce;
@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container,
if (!table_size)
return -EINVAL;
- ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
if (ret)
return ret;
@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container,
WARN_ON(!ret && !(*ptbl)->it_ops->free);
WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
- if (!ret && container->v2) {
- ret = tce_iommu_userspace_view_alloc(*ptbl);
- if (ret)
- (*ptbl)->it_ops->free(*ptbl);
- }
-
- if (ret)
- decrement_locked_vm(table_size >> PAGE_SHIFT);
-
return ret;
}
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl)
{
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
tbl->it_ops->free(tbl);
- decrement_locked_vm(pages);
+ decrement_locked_vm(container->mm, pages);
}
static long tce_iommu_create_window(struct tce_container *container,
@@ -663,7 +741,7 @@ unset_exit:
table_group = iommu_group_get_iommudata(tcegrp->grp);
table_group->ops->unset_window(table_group, num);
}
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
return ret;
}
@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container,
/* Free table */
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
container->tables[num] = NULL;
return 0;
}
+static long tce_iommu_create_default_window(struct tce_container *container)
+{
+ long ret;
+ __u64 start_addr = 0;
+ struct tce_iommu_group *tcegrp;
+ struct iommu_table_group *table_group;
+
+ if (!container->def_window_pending)
+ return 0;
+
+ if (!tce_groups_attached(container))
+ return -ENODEV;
+
+ tcegrp = list_first_entry(&container->group_list,
+ struct tce_iommu_group, next);
+ table_group = iommu_group_get_iommudata(tcegrp->grp);
+ if (!table_group)
+ return -ENODEV;
+
+ ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
+ table_group->tce32_size, 1, &start_addr);
+ WARN_ON_ONCE(!ret && start_addr);
+
+ if (!ret)
+ container->def_window_pending = false;
+
+ return ret;
+}
+
static long tce_iommu_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data,
}
return (ret < 0) ? 0 : ret;
+ }
+
+ /*
+ * Sanity check to prevent one userspace from manipulating
+ * another userspace mm.
+ */
+ BUG_ON(!container);
+ if (container->mm && container->mm != current->mm)
+ return -EPERM;
+ switch (cmd) {
case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
struct vfio_iommu_spapr_tce_info info;
struct tce_iommu_group *tcegrp;
@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data,
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (copy_from_user(&param, (void __user *)arg, minsz))
return -EFAULT;
@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ if (!container->mm)
+ return -EPERM;
+
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data,
mutex_lock(&container->lock);
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
ret = tce_iommu_create_window(container, create.page_shift,
create.window_size, create.levels,
&create.start_addr);
@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data,
if (remove.flags)
return -EINVAL;
+ if (container->def_window_pending && !remove.start_addr) {
+ container->def_window_pending = false;
+ return 0;
+ }
+
mutex_lock(&container->lock);
ret = tce_iommu_remove_window(container, remove.start_addr);
@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container,
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
if (tbl->it_map)
iommu_release_ownership(tbl);
@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container,
if (!tbl || !tbl->it_map)
continue;
- rc = tce_iommu_userspace_view_alloc(tbl);
- if (!rc)
- rc = iommu_take_ownership(tbl);
-
+ rc = iommu_take_ownership(tbl);
if (rc) {
for (j = 0; j < i; ++j)
iommu_release_ownership(
@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
- long i, ret = 0;
- struct iommu_table *tbl = NULL;
-
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
table_group->ops->take_ownership(table_group);
- /*
- * If it the first group attached, check if there is
- * a default DMA window and create one if none as
- * the userspace expects it to exist.
- */
- if (!tce_groups_attached(container) && !container->tables[0]) {
- ret = tce_iommu_create_table(container,
- table_group,
- 0, /* window number */
- IOMMU_PAGE_SHIFT_4K,
- table_group->tce32_size,
- 1, /* default levels */
- &tbl);
- if (ret)
- goto release_exit;
- else
- container->tables[0] = tbl;
- }
-
- /* Set all windows to the new group */
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
- tbl = container->tables[i];
-
- if (!tbl)
- continue;
-
- /* Set the default window to a new group */
- ret = table_group->ops->set_window(table_group, i, tbl);
- if (ret)
- goto release_exit;
- }
-
return 0;
-
-release_exit:
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- table_group->ops->unset_window(table_group, i);
-
- table_group->ops->release_ownership(table_group);
-
- return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data,
}
if (!table_group->ops || !table_group->ops->take_ownership ||
- !table_group->ops->release_ownership)
+ !table_group->ops->release_ownership) {
ret = tce_iommu_take_ownership(container, table_group);
- else
+ } else {
ret = tce_iommu_take_ownership_ddw(container, table_group);
+ if (!tce_groups_attached(container) && !container->tables[0])
+ container->def_window_pending = true;
+ }
if (!ret) {
tcegrp->grp = iommu_group;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ba19424e4a1..f3726ba12aa6 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,6 +36,9 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/workqueue.h>
+#include <linux/pid_namespace.h>
+#include <linux/mdev.h>
+#include <linux/notifier.h>
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -55,8 +58,10 @@ MODULE_PARM_DESC(disable_hugepages,
struct vfio_iommu {
struct list_head domain_list;
+ struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock;
struct rb_root dma_list;
+ struct blocking_notifier_head notifier;
bool v2;
bool nesting;
};
@@ -75,6 +80,9 @@ struct vfio_dma {
unsigned long vaddr; /* Process virtual addr */
size_t size; /* Map size (bytes) */
int prot; /* IOMMU_READ/WRITE */
+ bool iommu_mapped;
+ struct task_struct *task;
+ struct rb_root pfn_list; /* Ex-user pinned pfn list */
};
struct vfio_group {
@@ -83,6 +91,21 @@ struct vfio_group {
};
/*
+ * Guest RAM pinning working set or DMA target
+ */
+struct vfio_pfn {
+ struct rb_node node;
+ dma_addr_t iova; /* Device address */
+ unsigned long pfn; /* Host pfn */
+ atomic_t ref_count;
+};
+
+#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
+ (!list_empty(&iommu->domain_list))
+
+static int put_pfn(unsigned long pfn, int prot);
+
+/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
*/
@@ -130,6 +153,97 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
rb_erase(&old->node, &iommu->dma_list);
}
+/*
+ * Helper Functions for host iova-pfn list
+ */
+static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
+{
+ struct vfio_pfn *vpfn;
+ struct rb_node *node = dma->pfn_list.rb_node;
+
+ while (node) {
+ vpfn = rb_entry(node, struct vfio_pfn, node);
+
+ if (iova < vpfn->iova)
+ node = node->rb_left;
+ else if (iova > vpfn->iova)
+ node = node->rb_right;
+ else
+ return vpfn;
+ }
+ return NULL;
+}
+
+static void vfio_link_pfn(struct vfio_dma *dma,
+ struct vfio_pfn *new)
+{
+ struct rb_node **link, *parent = NULL;
+ struct vfio_pfn *vpfn;
+
+ link = &dma->pfn_list.rb_node;
+ while (*link) {
+ parent = *link;
+ vpfn = rb_entry(parent, struct vfio_pfn, node);
+
+ if (new->iova < vpfn->iova)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &dma->pfn_list);
+}
+
+static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old)
+{
+ rb_erase(&old->node, &dma->pfn_list);
+}
+
+static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
+ unsigned long pfn)
+{
+ struct vfio_pfn *vpfn;
+
+ vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL);
+ if (!vpfn)
+ return -ENOMEM;
+
+ vpfn->iova = iova;
+ vpfn->pfn = pfn;
+ atomic_set(&vpfn->ref_count, 1);
+ vfio_link_pfn(dma, vpfn);
+ return 0;
+}
+
+static void vfio_remove_from_pfn_list(struct vfio_dma *dma,
+ struct vfio_pfn *vpfn)
+{
+ vfio_unlink_pfn(dma, vpfn);
+ kfree(vpfn);
+}
+
+static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
+ unsigned long iova)
+{
+ struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
+
+ if (vpfn)
+ atomic_inc(&vpfn->ref_count);
+ return vpfn;
+}
+
+static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
+{
+ int ret = 0;
+
+ if (atomic_dec_and_test(&vpfn->ref_count)) {
+ ret = put_pfn(vpfn->pfn, dma->prot);
+ vfio_remove_from_pfn_list(dma, vpfn);
+ }
+ return ret;
+}
+
struct vwork {
struct mm_struct *mm;
long npage;
@@ -150,17 +264,22 @@ static void vfio_lock_acct_bg(struct work_struct *work)
kfree(vwork);
}
-static void vfio_lock_acct(long npage)
+static void vfio_lock_acct(struct task_struct *task, long npage)
{
struct vwork *vwork;
struct mm_struct *mm;
- if (!current->mm || !npage)
+ if (!npage)
+ return;
+
+ mm = get_task_mm(task);
+ if (!mm)
return; /* process exited or nothing to do */
- if (down_write_trylock(&current->mm->mmap_sem)) {
- current->mm->locked_vm += npage;
- up_write(&current->mm->mmap_sem);
+ if (down_write_trylock(&mm->mmap_sem)) {
+ mm->locked_vm += npage;
+ up_write(&mm->mmap_sem);
+ mmput(mm);
return;
}
@@ -170,11 +289,8 @@ static void vfio_lock_acct(long npage)
* wouldn't need this silliness
*/
vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
- if (!vwork)
- return;
- mm = get_task_mm(current);
- if (!mm) {
- kfree(vwork);
+ if (!vwork) {
+ mmput(mm);
return;
}
INIT_WORK(&vwork->work, vfio_lock_acct_bg);
@@ -228,20 +344,36 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
-static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
+static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+ int prot, unsigned long *pfn)
{
struct page *page[1];
struct vm_area_struct *vma;
- int ret = -EFAULT;
+ int ret;
+
+ if (mm == current->mm) {
+ ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
+ page);
+ } else {
+ unsigned int flags = 0;
+
+ if (prot & IOMMU_WRITE)
+ flags |= FOLL_WRITE;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
+ NULL, NULL);
+ up_read(&mm->mmap_sem);
+ }
- if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) {
+ if (ret == 1) {
*pfn = page_to_pfn(page[0]);
return 0;
}
- down_read(&current->mm->mmap_sem);
+ down_read(&mm->mmap_sem);
- vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
+ vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
*pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -249,8 +381,7 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
ret = 0;
}
- up_read(&current->mm->mmap_sem);
-
+ up_read(&mm->mmap_sem);
return ret;
}
@@ -259,88 +390,299 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
* the iommu can only map chunks of consecutive pfns anyway, so get the
* first page and all consecutive pages with the same locking.
*/
-static long vfio_pin_pages(unsigned long vaddr, long npage,
- int prot, unsigned long *pfn_base)
+static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
+ long npage, unsigned long *pfn_base)
{
- unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- bool lock_cap = capable(CAP_IPC_LOCK);
- long ret, i;
+ unsigned long limit;
+ bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
+ CAP_IPC_LOCK);
+ struct mm_struct *mm;
+ long ret, i = 0, lock_acct = 0;
bool rsvd;
+ dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
- if (!current->mm)
+ mm = get_task_mm(dma->task);
+ if (!mm)
return -ENODEV;
- ret = vaddr_get_pfn(vaddr, prot, pfn_base);
+ ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
if (ret)
- return ret;
+ goto pin_pg_remote_exit;
rsvd = is_invalid_reserved_pfn(*pfn_base);
+ limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
- put_pfn(*pfn_base, prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
- limit << PAGE_SHIFT);
- return -ENOMEM;
+ /*
+ * Reserved pages aren't counted against the user, externally pinned
+ * pages are already counted against the user.
+ */
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+ if (!lock_cap && mm->locked_vm + 1 > limit) {
+ put_pfn(*pfn_base, dma->prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
+ limit << PAGE_SHIFT);
+ ret = -ENOMEM;
+ goto pin_pg_remote_exit;
+ }
+ lock_acct++;
}
- if (unlikely(disable_hugepages)) {
- if (!rsvd)
- vfio_lock_acct(1);
- return 1;
- }
+ i++;
+ if (likely(!disable_hugepages)) {
+ /* Lock all the consecutive pages from pfn_base */
+ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage;
+ i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
+ unsigned long pfn = 0;
- /* Lock all the consecutive pages from pfn_base */
- for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
- unsigned long pfn = 0;
+ ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn);
+ if (ret)
+ break;
- ret = vaddr_get_pfn(vaddr, prot, &pfn);
- if (ret)
- break;
+ if (pfn != *pfn_base + i ||
+ rsvd != is_invalid_reserved_pfn(pfn)) {
+ put_pfn(pfn, dma->prot);
+ break;
+ }
- if (pfn != *pfn_base + i ||
- rsvd != is_invalid_reserved_pfn(pfn)) {
- put_pfn(pfn, prot);
- break;
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+ if (!lock_cap &&
+ mm->locked_vm + lock_acct + 1 > limit) {
+ put_pfn(pfn, dma->prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) "
+ "exceeded\n", __func__,
+ limit << PAGE_SHIFT);
+ break;
+ }
+ lock_acct++;
+ }
}
+ }
- if (!rsvd && !lock_cap &&
- current->mm->locked_vm + i + 1 > limit) {
- put_pfn(pfn, prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
- __func__, limit << PAGE_SHIFT);
- break;
+ vfio_lock_acct(dma->task, lock_acct);
+ ret = i;
+
+pin_pg_remote_exit:
+ mmput(mm);
+ return ret;
+}
+
+static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
+ unsigned long pfn, long npage,
+ bool do_accounting)
+{
+ long unlocked = 0, locked = 0;
+ long i;
+
+ for (i = 0; i < npage; i++) {
+ if (put_pfn(pfn++, dma->prot)) {
+ unlocked++;
+ if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT)))
+ locked++;
}
}
- if (!rsvd)
- vfio_lock_acct(i);
+ if (do_accounting)
+ vfio_lock_acct(dma->task, locked - unlocked);
- return i;
+ return unlocked;
}
-static long vfio_unpin_pages(unsigned long pfn, long npage,
- int prot, bool do_accounting)
+static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
+ unsigned long *pfn_base, bool do_accounting)
{
- unsigned long unlocked = 0;
- long i;
+ unsigned long limit;
+ bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
+ CAP_IPC_LOCK);
+ struct mm_struct *mm;
+ int ret;
+ bool rsvd;
- for (i = 0; i < npage; i++)
- unlocked += put_pfn(pfn++, prot);
+ mm = get_task_mm(dma->task);
+ if (!mm)
+ return -ENODEV;
+
+ ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
+ if (ret)
+ goto pin_page_exit;
+
+ rsvd = is_invalid_reserved_pfn(*pfn_base);
+ limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (!rsvd && !lock_cap && mm->locked_vm + 1 > limit) {
+ put_pfn(*pfn_base, dma->prot);
+ pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK (%ld) exceeded\n",
+ __func__, dma->task->comm, task_pid_nr(dma->task),
+ limit << PAGE_SHIFT);
+ ret = -ENOMEM;
+ goto pin_page_exit;
+ }
+
+ if (!rsvd && do_accounting)
+ vfio_lock_acct(dma->task, 1);
+ ret = 1;
+
+pin_page_exit:
+ mmput(mm);
+ return ret;
+}
+
+static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
+ bool do_accounting)
+{
+ int unlocked;
+ struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
+
+ if (!vpfn)
+ return 0;
+
+ unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
if (do_accounting)
- vfio_lock_acct(-unlocked);
+ vfio_lock_acct(dma->task, -unlocked);
return unlocked;
}
-static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
+static int vfio_iommu_type1_pin_pages(void *iommu_data,
+ unsigned long *user_pfn,
+ int npage, int prot,
+ unsigned long *phys_pfn)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ int i, j, ret;
+ unsigned long remote_vaddr;
+ struct vfio_dma *dma;
+ bool do_accounting;
+
+ if (!iommu || !user_pfn || !phys_pfn)
+ return -EINVAL;
+
+ /* Supported for v2 version only */
+ if (!iommu->v2)
+ return -EACCES;
+
+ mutex_lock(&iommu->lock);
+
+ /* Fail if notifier list is empty */
+ if ((!iommu->external_domain) || (!iommu->notifier.head)) {
+ ret = -EINVAL;
+ goto pin_done;
+ }
+
+ /*
+ * If iommu capable domain exist in the container then all pages are
+ * already pinned and accounted. Accouting should be done if there is no
+ * iommu capable domain in the container.
+ */
+ do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
+
+ for (i = 0; i < npage; i++) {
+ dma_addr_t iova;
+ struct vfio_pfn *vpfn;
+
+ iova = user_pfn[i] << PAGE_SHIFT;
+ dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
+ if (!dma) {
+ ret = -EINVAL;
+ goto pin_unwind;
+ }
+
+ if ((dma->prot & prot) != prot) {
+ ret = -EPERM;
+ goto pin_unwind;
+ }
+
+ vpfn = vfio_iova_get_vfio_pfn(dma, iova);
+ if (vpfn) {
+ phys_pfn[i] = vpfn->pfn;
+ continue;
+ }
+
+ remote_vaddr = dma->vaddr + iova - dma->iova;
+ ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
+ do_accounting);
+ if (ret <= 0) {
+ WARN_ON(!ret);
+ goto pin_unwind;
+ }
+
+ ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+ if (ret) {
+ vfio_unpin_page_external(dma, iova, do_accounting);
+ goto pin_unwind;
+ }
+ }
+
+ ret = i;
+ goto pin_done;
+
+pin_unwind:
+ phys_pfn[i] = 0;
+ for (j = 0; j < i; j++) {
+ dma_addr_t iova;
+
+ iova = user_pfn[j] << PAGE_SHIFT;
+ dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
+ vfio_unpin_page_external(dma, iova, do_accounting);
+ phys_pfn[j] = 0;
+ }
+pin_done:
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static int vfio_iommu_type1_unpin_pages(void *iommu_data,
+ unsigned long *user_pfn,
+ int npage)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ bool do_accounting;
+ int i;
+
+ if (!iommu || !user_pfn)
+ return -EINVAL;
+
+ /* Supported for v2 version only */
+ if (!iommu->v2)
+ return -EACCES;
+
+ mutex_lock(&iommu->lock);
+
+ if (!iommu->external_domain) {
+ mutex_unlock(&iommu->lock);
+ return -EINVAL;
+ }
+
+ do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
+ for (i = 0; i < npage; i++) {
+ struct vfio_dma *dma;
+ dma_addr_t iova;
+
+ iova = user_pfn[i] << PAGE_SHIFT;
+ dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
+ if (!dma)
+ goto unpin_exit;
+ vfio_unpin_page_external(dma, iova, do_accounting);
+ }
+
+unpin_exit:
+ mutex_unlock(&iommu->lock);
+ return i > npage ? npage : (i > 0 ? i : -EINVAL);
+}
+
+static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
+ bool do_accounting)
{
dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
long unlocked = 0;
if (!dma->size)
- return;
+ return 0;
+
+ if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
+ return 0;
+
/*
* We use the IOMMU to track the physical addresses, otherwise we'd
* need a much more complicated tracking system. Unfortunately that
@@ -382,21 +724,28 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
if (WARN_ON(!unmapped))
break;
- unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
- unmapped >> PAGE_SHIFT,
- dma->prot, false);
+ unlocked += vfio_unpin_pages_remote(dma, iova,
+ phys >> PAGE_SHIFT,
+ unmapped >> PAGE_SHIFT,
+ false);
iova += unmapped;
cond_resched();
}
- vfio_lock_acct(-unlocked);
+ dma->iommu_mapped = false;
+ if (do_accounting) {
+ vfio_lock_acct(dma->task, -unlocked);
+ return 0;
+ }
+ return unlocked;
}
static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
{
- vfio_unmap_unpin(iommu, dma);
+ vfio_unmap_unpin(iommu, dma, true);
vfio_unlink_dma(iommu, dma);
+ put_task_struct(dma->task);
kfree(dma);
}
@@ -430,9 +779,9 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap)
{
uint64_t mask;
- struct vfio_dma *dma;
+ struct vfio_dma *dma, *dma_last = NULL;
size_t unmapped = 0;
- int ret = 0;
+ int ret = 0, retries = 0;
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
@@ -442,7 +791,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
return -EINVAL;
WARN_ON(mask & PAGE_MASK);
-
+again:
mutex_lock(&iommu->lock);
/*
@@ -477,7 +826,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
* mappings within the range.
*/
if (iommu->v2) {
- dma = vfio_find_dma(iommu, unmap->iova, 0);
+ dma = vfio_find_dma(iommu, unmap->iova, 1);
if (dma && dma->iova != unmap->iova) {
ret = -EINVAL;
goto unlock;
@@ -492,6 +841,38 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
if (!iommu->v2 && unmap->iova > dma->iova)
break;
+ /*
+ * Task with same address space who mapped this iova range is
+ * allowed to unmap the iova range.
+ */
+ if (dma->task->mm != current->mm)
+ break;
+
+ if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
+ struct vfio_iommu_type1_dma_unmap nb_unmap;
+
+ if (dma_last == dma) {
+ BUG_ON(++retries > 10);
+ } else {
+ dma_last = dma;
+ retries = 0;
+ }
+
+ nb_unmap.iova = dma->iova;
+ nb_unmap.size = dma->size;
+
+ /*
+ * Notify anyone (mdev vendor drivers) to invalidate and
+ * unmap iovas within the range we're about to unmap.
+ * Vendor drivers MUST unpin pages in response to an
+ * invalidation.
+ */
+ mutex_unlock(&iommu->lock);
+ blocking_notifier_call_chain(&iommu->notifier,
+ VFIO_IOMMU_NOTIFY_DMA_UNMAP,
+ &nb_unmap);
+ goto again;
+ }
unmapped += dma->size;
vfio_remove_dma(iommu, dma);
}
@@ -558,17 +939,56 @@ unwind:
return ret;
}
+static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
+ size_t map_size)
+{
+ dma_addr_t iova = dma->iova;
+ unsigned long vaddr = dma->vaddr;
+ size_t size = map_size;
+ long npage;
+ unsigned long pfn;
+ int ret = 0;
+
+ while (size) {
+ /* Pin a contiguous chunk of memory */
+ npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
+ size >> PAGE_SHIFT, &pfn);
+ if (npage <= 0) {
+ WARN_ON(!npage);
+ ret = (int)npage;
+ break;
+ }
+
+ /* Map it! */
+ ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
+ dma->prot);
+ if (ret) {
+ vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
+ npage, true);
+ break;
+ }
+
+ size -= npage << PAGE_SHIFT;
+ dma->size += npage << PAGE_SHIFT;
+ }
+
+ dma->iommu_mapped = true;
+
+ if (ret)
+ vfio_remove_dma(iommu, dma);
+
+ return ret;
+}
+
static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map)
{
dma_addr_t iova = map->iova;
unsigned long vaddr = map->vaddr;
size_t size = map->size;
- long npage;
int ret = 0, prot = 0;
uint64_t mask;
struct vfio_dma *dma;
- unsigned long pfn;
/* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
@@ -594,47 +1014,33 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
mutex_lock(&iommu->lock);
if (vfio_find_dma(iommu, iova, size)) {
- mutex_unlock(&iommu->lock);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
- mutex_unlock(&iommu->lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unlock;
}
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
+ get_task_struct(current);
+ dma->task = current;
+ dma->pfn_list = RB_ROOT;
/* Insert zero-sized and grow as we map chunks of it */
vfio_link_dma(iommu, dma);
- while (size) {
- /* Pin a contiguous chunk of memory */
- npage = vfio_pin_pages(vaddr + dma->size,
- size >> PAGE_SHIFT, prot, &pfn);
- if (npage <= 0) {
- WARN_ON(!npage);
- ret = (int)npage;
- break;
- }
-
- /* Map it! */
- ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
- if (ret) {
- vfio_unpin_pages(pfn, npage, prot, true);
- break;
- }
-
- size -= npage << PAGE_SHIFT;
- dma->size += npage << PAGE_SHIFT;
- }
-
- if (ret)
- vfio_remove_dma(iommu, dma);
+ /* Don't pin and map if container doesn't contain IOMMU capable domain*/
+ if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
+ dma->size = size;
+ else
+ ret = vfio_pin_map_dma(iommu, dma, size);
+out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
@@ -662,10 +1068,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
n = rb_first(&iommu->dma_list);
- /* If there's not a domain, there better not be any mappings */
- if (WARN_ON(n && !d))
- return -EINVAL;
-
for (; n; n = rb_next(n)) {
struct vfio_dma *dma;
dma_addr_t iova;
@@ -674,21 +1076,49 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
iova = dma->iova;
while (iova < dma->iova + dma->size) {
- phys_addr_t phys = iommu_iova_to_phys(d->domain, iova);
+ phys_addr_t phys;
size_t size;
- if (WARN_ON(!phys)) {
- iova += PAGE_SIZE;
- continue;
+ if (dma->iommu_mapped) {
+ phys_addr_t p;
+ dma_addr_t i;
+
+ phys = iommu_iova_to_phys(d->domain, iova);
+
+ if (WARN_ON(!phys)) {
+ iova += PAGE_SIZE;
+ continue;
+ }
+
+ size = PAGE_SIZE;
+ p = phys + size;
+ i = iova + size;
+ while (i < dma->iova + dma->size &&
+ p == iommu_iova_to_phys(d->domain, i)) {
+ size += PAGE_SIZE;
+ p += PAGE_SIZE;
+ i += PAGE_SIZE;
+ }
+ } else {
+ unsigned long pfn;
+ unsigned long vaddr = dma->vaddr +
+ (iova - dma->iova);
+ size_t n = dma->iova + dma->size - iova;
+ long npage;
+
+ npage = vfio_pin_pages_remote(dma, vaddr,
+ n >> PAGE_SHIFT,
+ &pfn);
+ if (npage <= 0) {
+ WARN_ON(!npage);
+ ret = (int)npage;
+ return ret;
+ }
+
+ phys = pfn << PAGE_SHIFT;
+ size = npage << PAGE_SHIFT;
}
- size = PAGE_SIZE;
-
- while (iova + size < dma->iova + dma->size &&
- phys + size == iommu_iova_to_phys(d->domain,
- iova + size))
- size += PAGE_SIZE;
-
ret = iommu_map(domain->domain, iova, phys,
size, dma->prot | domain->prot);
if (ret)
@@ -696,8 +1126,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
iova += size;
}
+ dma->iommu_mapped = true;
}
-
return 0;
}
@@ -734,22 +1164,39 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
__free_pages(pages, order);
}
+static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
+ struct iommu_group *iommu_group)
+{
+ struct vfio_group *g;
+
+ list_for_each_entry(g, &domain->group_list, next) {
+ if (g->iommu_group == iommu_group)
+ return g;
+ }
+
+ return NULL;
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
- struct vfio_group *group, *g;
+ struct vfio_group *group;
struct vfio_domain *domain, *d;
- struct bus_type *bus = NULL;
+ struct bus_type *bus = NULL, *mdev_bus;
int ret;
mutex_lock(&iommu->lock);
list_for_each_entry(d, &iommu->domain_list, next) {
- list_for_each_entry(g, &d->group_list, next) {
- if (g->iommu_group != iommu_group)
- continue;
+ if (find_iommu_group(d, iommu_group)) {
+ mutex_unlock(&iommu->lock);
+ return -EINVAL;
+ }
+ }
+ if (iommu->external_domain) {
+ if (find_iommu_group(iommu->external_domain, iommu_group)) {
mutex_unlock(&iommu->lock);
return -EINVAL;
}
@@ -769,6 +1216,25 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_free;
+ mdev_bus = symbol_get(mdev_bus_type);
+
+ if (mdev_bus) {
+ if ((bus == mdev_bus) && !iommu_present(bus)) {
+ symbol_put(mdev_bus_type);
+ if (!iommu->external_domain) {
+ INIT_LIST_HEAD(&domain->group_list);
+ iommu->external_domain = domain;
+ } else
+ kfree(domain);
+
+ list_add(&group->next,
+ &iommu->external_domain->group_list);
+ mutex_unlock(&iommu->lock);
+ return 0;
+ }
+ symbol_put(mdev_bus_type);
+ }
+
domain->domain = iommu_domain_alloc(bus);
if (!domain->domain) {
ret = -EIO;
@@ -859,6 +1325,46 @@ static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
}
+static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
+{
+ struct rb_node *n, *p;
+
+ n = rb_first(&iommu->dma_list);
+ for (; n; n = rb_next(n)) {
+ struct vfio_dma *dma;
+ long locked = 0, unlocked = 0;
+
+ dma = rb_entry(n, struct vfio_dma, node);
+ unlocked += vfio_unmap_unpin(iommu, dma, false);
+ p = rb_first(&dma->pfn_list);
+ for (; p; p = rb_next(p)) {
+ struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
+ node);
+
+ if (!is_invalid_reserved_pfn(vpfn->pfn))
+ locked++;
+ }
+ vfio_lock_acct(dma->task, locked - unlocked);
+ }
+}
+
+static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
+{
+ struct rb_node *n;
+
+ n = rb_first(&iommu->dma_list);
+ for (; n; n = rb_next(n)) {
+ struct vfio_dma *dma;
+
+ dma = rb_entry(n, struct vfio_dma, node);
+
+ if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
+ break;
+ }
+ /* mdev vendor driver must unregister notifier */
+ WARN_ON(iommu->notifier.head);
+}
+
static void vfio_iommu_type1_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -868,31 +1374,55 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
mutex_lock(&iommu->lock);
- list_for_each_entry(domain, &iommu->domain_list, next) {
- list_for_each_entry(group, &domain->group_list, next) {
- if (group->iommu_group != iommu_group)
- continue;
-
- iommu_detach_group(domain->domain, iommu_group);
+ if (iommu->external_domain) {
+ group = find_iommu_group(iommu->external_domain, iommu_group);
+ if (group) {
list_del(&group->next);
kfree(group);
- /*
- * Group ownership provides privilege, if the group
- * list is empty, the domain goes away. If it's the
- * last domain, then all the mappings go away too.
- */
- if (list_empty(&domain->group_list)) {
- if (list_is_singular(&iommu->domain_list))
+
+ if (list_empty(&iommu->external_domain->group_list)) {
+ vfio_sanity_check_pfn_list(iommu);
+
+ if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
vfio_iommu_unmap_unpin_all(iommu);
- iommu_domain_free(domain->domain);
- list_del(&domain->next);
- kfree(domain);
+
+ kfree(iommu->external_domain);
+ iommu->external_domain = NULL;
+ }
+ goto detach_group_done;
+ }
+ }
+
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ group = find_iommu_group(domain, iommu_group);
+ if (!group)
+ continue;
+
+ iommu_detach_group(domain->domain, iommu_group);
+ list_del(&group->next);
+ kfree(group);
+ /*
+ * Group ownership provides privilege, if the group list is
+ * empty, the domain goes away. If it's the last domain with
+ * iommu and external domain doesn't exist, then all the
+ * mappings go away too. If it's the last domain with iommu and
+ * external domain exist, update accounting
+ */
+ if (list_empty(&domain->group_list)) {
+ if (list_is_singular(&iommu->domain_list)) {
+ if (!iommu->external_domain)
+ vfio_iommu_unmap_unpin_all(iommu);
+ else
+ vfio_iommu_unmap_unpin_reaccount(iommu);
}
- goto done;
+ iommu_domain_free(domain->domain);
+ list_del(&domain->next);
+ kfree(domain);
}
+ break;
}
-done:
+detach_group_done:
mutex_unlock(&iommu->lock);
}
@@ -920,31 +1450,46 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
mutex_init(&iommu->lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
return iommu;
}
+static void vfio_release_domain(struct vfio_domain *domain, bool external)
+{
+ struct vfio_group *group, *group_tmp;
+
+ list_for_each_entry_safe(group, group_tmp,
+ &domain->group_list, next) {
+ if (!external)
+ iommu_detach_group(domain->domain, group->iommu_group);
+ list_del(&group->next);
+ kfree(group);
+ }
+
+ if (!external)
+ iommu_domain_free(domain->domain);
+}
+
static void vfio_iommu_type1_release(void *iommu_data)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain, *domain_tmp;
- struct vfio_group *group, *group_tmp;
+
+ if (iommu->external_domain) {
+ vfio_release_domain(iommu->external_domain, true);
+ vfio_sanity_check_pfn_list(iommu);
+ kfree(iommu->external_domain);
+ }
vfio_iommu_unmap_unpin_all(iommu);
list_for_each_entry_safe(domain, domain_tmp,
&iommu->domain_list, next) {
- list_for_each_entry_safe(group, group_tmp,
- &domain->group_list, next) {
- iommu_detach_group(domain->domain, group->iommu_group);
- list_del(&group->next);
- kfree(group);
- }
- iommu_domain_free(domain->domain);
+ vfio_release_domain(domain, false);
list_del(&domain->next);
kfree(domain);
}
-
kfree(iommu);
}
@@ -1040,14 +1585,42 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return -ENOTTY;
}
+static int vfio_iommu_type1_register_notifier(void *iommu_data,
+ unsigned long *events,
+ struct notifier_block *nb)
+{
+ struct vfio_iommu *iommu = iommu_data;
+
+ /* clear known events */
+ *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+
+ /* refuse to register if still events remaining */
+ if (*events)
+ return -EINVAL;
+
+ return blocking_notifier_chain_register(&iommu->notifier, nb);
+}
+
+static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
+ struct notifier_block *nb)
+{
+ struct vfio_iommu *iommu = iommu_data;
+
+ return blocking_notifier_chain_unregister(&iommu->notifier, nb);
+}
+
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
- .name = "vfio-iommu-type1",
- .owner = THIS_MODULE,
- .open = vfio_iommu_type1_open,
- .release = vfio_iommu_type1_release,
- .ioctl = vfio_iommu_type1_ioctl,
- .attach_group = vfio_iommu_type1_attach_group,
- .detach_group = vfio_iommu_type1_detach_group,
+ .name = "vfio-iommu-type1",
+ .owner = THIS_MODULE,
+ .open = vfio_iommu_type1_open,
+ .release = vfio_iommu_type1_release,
+ .ioctl = vfio_iommu_type1_ioctl,
+ .attach_group = vfio_iommu_type1_attach_group,
+ .detach_group = vfio_iommu_type1_detach_group,
+ .pin_pages = vfio_iommu_type1_pin_pages,
+ .unpin_pages = vfio_iommu_type1_unpin_pages,
+ .register_notifier = vfio_iommu_type1_register_notifier,
+ .unregister_notifier = vfio_iommu_type1_unregister_notifier,
};
static int __init vfio_iommu_type1_init(void)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5dc128a8da83..5dc34653274a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -342,7 +342,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
endtime = busy_clock() + vq->busyloop_timeout;
while (vhost_can_busy_poll(vq->dev, endtime) &&
vhost_vq_avail_empty(vq->dev, vq))
- cpu_relax_lowlatency();
+ cpu_relax();
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
@@ -533,7 +533,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
while (vhost_can_busy_poll(&net->dev, endtime) &&
!sk_has_rx_data(sk) &&
vhost_vq_avail_empty(&net->dev, vq))
- cpu_relax_lowlatency();
+ cpu_relax();
preempt_enable();
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6e29d053843d..253310cdaaca 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -922,8 +922,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
- ret = copy_from_iter(req, req_size, &out_iter);
- if (unlikely(ret != req_size)) {
+ if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
vq_err(vq, "Faulted on copy_from_iter\n");
vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
@@ -1749,7 +1748,6 @@ out:
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
- struct se_portal_group *se_tpg;
struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
@@ -1758,7 +1756,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST;
}
- se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
if (!tv_nexus) {
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c6f2d89c0e97..d6432603880c 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -49,7 +49,7 @@ enum {
INTERVAL_TREE_DEFINE(struct vhost_umem_node,
rb, __u64, __subtree_last,
- START, LAST, , vhost_umem_interval_tree);
+ START, LAST, static inline, vhost_umem_interval_tree);
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
@@ -261,8 +261,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
/* We can only add the work to the list after we're
* sure it was not in the list.
+ * test_and_set_bit() implies a memory barrier.
*/
- smp_mb();
llist_add(&work->node, &dev->work_list);
wake_up_process(dev->worker);
}
@@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
+ vq->last_used_event = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
@@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access);
-static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
const void *from, unsigned size)
{
int ret;
@@ -749,7 +750,7 @@ out:
}
static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
- void *from, unsigned size)
+ void __user *from, unsigned size)
{
int ret;
@@ -783,7 +784,7 @@ out:
}
static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
- void *addr, unsigned size)
+ void __user *addr, unsigned size)
{
int ret;
@@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access)
return 0;
}
-int vhost_process_iotlb_msg(struct vhost_dev *dev,
- struct vhost_iotlb_msg *msg)
+static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
{
int ret = 0;
@@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EINVAL;
break;
}
- vq->last_avail_idx = s.num;
+ vq->last_avail_idx = vq->last_used_event = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
@@ -1862,8 +1863,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
i, count);
return -EINVAL;
}
- if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
- sizeof(desc))) {
+ if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
@@ -2159,10 +2159,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__u16 old, new;
__virtio16 event;
bool v;
- /* Flush out used index updates. This is paired
- * with the barrier that the Guest executes when enabling
- * interrupts. */
- smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2170,6 +2166,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
if (vhost_get_user(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
@@ -2184,11 +2184,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
+ /* We're sure if the following conditions are met, there's no
+ * need to notify guest:
+ * 1) cached used event is ahead of new
+ * 2) old to new updating does not cross cached used event. */
+ if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
+ !vring_need_event(vq->last_used_event, new, old))
+ return false;
+
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
+
if (vhost_get_user(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- return vring_need_event(vhost16_to_cpu(vq, event), new, old);
+ vq->last_used_event = vhost16_to_cpu(vq, event);
+
+ return vring_need_event(vq->last_used_event, new, old);
}
/* This actually signals the guest, using eventfd. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 78f3c5fc02e4..a9cbbb148f46 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -107,6 +107,9 @@ struct vhost_virtqueue {
/* Last index we used. */
u16 last_used_idx;
+ /* Last used evet we've seen */
+ u16 last_used_event;
+
/* Used flags */
u16 used_flags;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 3bb02c60a2f5..bb8971f2a634 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
*
* Since these may be in userspace, we use (inline) accessors.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/vringh.h>
#include <linux/virtio_ring.h>
@@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user);
static inline int getu16_kern(const struct vringh *vrh,
u16 *val, const __virtio16 *p)
{
- *val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p));
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
return 0;
}
static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
{
- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
+ WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
return 0;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a504e2e003da..bbbf588540ed 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
- spin_lock_bh(&vhost_vsock_lock);
list_for_each_entry(vsock, &vhost_vsock_list, list) {
u32 other_cid = vsock->guest_cid;
@@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
continue;
if (other_cid == guest_cid) {
- spin_unlock_bh(&vhost_vsock_lock);
return vsock;
}
}
- spin_unlock_bh(&vhost_vsock_lock);
return NULL;
}
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ vsock = __vhost_vsock_get(guest_cid);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return vsock;
+}
+
static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
@@ -195,7 +203,6 @@ static int
vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
{
struct vhost_vsock *vsock;
- struct vhost_virtqueue *vq;
int len = pkt->len;
/* Find the vhost_vsock according to guest context id */
@@ -205,8 +212,6 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
return -ENODEV;
}
- vq = &vsock->vqs[VSOCK_VQ_RX];
-
if (pkt->reply)
atomic_inc(&vsock->queued_replies);
@@ -562,11 +567,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
return -EINVAL;
/* Refuse if CID is already in use */
- other = vhost_vsock_get(guest_cid);
- if (other && other != vsock)
- return -EADDRINUSE;
-
spin_lock_bh(&vhost_vsock_lock);
+ other = __vhost_vsock_get(guest_cid);
+ if (other && other != vsock) {
+ spin_unlock_bh(&vhost_vsock_lock);
+ return -EADDRINUSE;
+ }
vsock->guest_cid = guest_cid;
spin_unlock_bh(&vhost_vsock_lock);
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 38da6e299149..c3f1fb9ee820 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -9,7 +9,7 @@ config VGA_CONSOLE
depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
- !ARM64 && !ARC && !MICROBLAZE
+ !ARM64 && !ARC && !MICROBLAZE && !OPENRISC
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index b87f5cfdaea5..a44f5627b82a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -164,8 +164,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
-static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- int count);
static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
int height, int width);
static int fbcon_switch(struct vc_data *vc);
@@ -1795,15 +1793,15 @@ static inline void fbcon_softback_note(struct vc_data *vc, int t,
softback_curr = softback_in;
}
-static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- int count)
+static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
- return -EINVAL;
+ return true;
fbcon_cursor(vc, CM_ERASE);
@@ -1831,7 +1829,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
(b - count)),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
break;
case SCROLL_WRAP_MOVE:
@@ -1903,7 +1901,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
(b - count)),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
}
break;
@@ -1922,7 +1920,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
t),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
break;
case SCROLL_WRAP_MOVE:
@@ -1992,10 +1990,10 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
t),
vc->vc_video_erase_char,
vc->vc_size_row * count);
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index bacbb044d77c..ec192a1bf297 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -488,12 +488,13 @@ static void mdacon_cursor(struct vc_data *c, int mode)
}
}
-static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
u16 eattr = mda_convert_attr(c->vc_video_erase_char);
if (!lines)
- return 0;
+ return false;
if (lines > c->vc_rows) /* maximum realistic size */
lines = c->vc_rows;
@@ -514,7 +515,7 @@ static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
break;
}
- return 0;
+ return false;
}
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index e3b9521e4ec3..1e11614322fe 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -574,8 +574,8 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig
return newport_set_font(vc->vc_num, font);
}
-static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
- int lines)
+static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
int count, x, y;
unsigned short *s, *d;
@@ -595,7 +595,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
(vc->vc_color & 0xf0) >> 4);
}
npregs->cset.topscan = (topscan - 1) & 0x3ff;
- return 0;
+ return false;
}
count = (b - t - lines) * vc->vc_cols;
@@ -670,7 +670,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
}
}
}
- return 1;
+ return true;
}
static int newport_dummy(struct vc_data *c)
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 3a10ac19598f..79c9bd8d3025 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -153,12 +153,13 @@ static void sticon_cursor(struct vc_data *conp, int mode)
}
}
-static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
+static bool sticon_scroll(struct vc_data *conp, unsigned int t,
+ unsigned int b, enum con_scroll dir, unsigned int count)
{
struct sti_struct *sti = sticon_sti;
if (vga_is_gfx)
- return 0;
+ return false;
sticon_cursor(conp, CM_ERASE);
@@ -174,7 +175,7 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
break;
}
- return 0;
+ return false;
}
static void sticon_init(struct vc_data *c, int init)
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 11576611a974..c22a56232b7c 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -60,15 +60,6 @@ static struct vgastate vgastate;
#define BLANK 0x0020
-#define CAN_LOAD_EGA_FONTS /* undefine if the user must not do this */
-#define CAN_LOAD_PALETTE /* undefine if the user must not do this */
-
-/* You really do _NOT_ want to define this, unless you have buggy
- * Trident VGA which will resize cursor when moving it between column
- * 15 & 16. If you define this and your VGA is OK, inverse bug will
- * appear.
- */
-#undef TRIDENT_GLITCH
#define VGA_FONTWIDTH 8 /* VGA does not support fontwidths != 8 */
/*
* Interface used by the world
@@ -83,14 +74,12 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
-static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
- int lines);
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
static struct uni_pagedir *vgacon_uni_pagedir;
static int vgacon_refcount;
/* Description of the hardware situation */
-static int vga_init_done __read_mostly;
+static bool vga_init_done;
static unsigned long vga_vram_base __read_mostly; /* Base of video memory */
static unsigned long vga_vram_end __read_mostly; /* End of video memory */
static unsigned int vga_vram_size __read_mostly; /* Size of video memory */
@@ -98,31 +87,31 @@ static u16 vga_video_port_reg __read_mostly; /* Video register select port */
static u16 vga_video_port_val __read_mostly; /* Video register value port */
static unsigned int vga_video_num_columns; /* Number of text columns */
static unsigned int vga_video_num_lines; /* Number of text lines */
-static int vga_can_do_color __read_mostly; /* Do we support colors? */
+static bool vga_can_do_color; /* Do we support colors? */
static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */
static unsigned char vga_video_type __read_mostly; /* Card type */
-static unsigned char vga_hardscroll_enabled __read_mostly;
-static unsigned char vga_hardscroll_user_enable __read_mostly = 1;
-static unsigned char vga_font_is_default = 1;
+static bool vga_font_is_default = true;
static int vga_vesa_blanked;
-static int vga_palette_blanked;
-static int vga_is_gfx;
-static int vga_512_chars;
+static bool vga_palette_blanked;
+static bool vga_is_gfx;
+static bool vga_512_chars;
static int vga_video_font_height;
static int vga_scan_lines __read_mostly;
static unsigned int vga_rolled_over;
-static int vgacon_text_mode_force;
+static bool vgacon_text_mode_force;
+static bool vga_hardscroll_enabled;
+static bool vga_hardscroll_user_enable = true;
bool vgacon_text_force(void)
{
- return vgacon_text_mode_force ? true : false;
+ return vgacon_text_mode_force;
}
EXPORT_SYMBOL(vgacon_text_force);
static int __init text_mode(char *str)
{
- vgacon_text_mode_force = 1;
+ vgacon_text_mode_force = true;
return 1;
}
@@ -136,7 +125,7 @@ static int __init no_scroll(char *str)
* Braille reader made by F.H. Papenmeier (Germany).
* Use the "no-scroll" bootflag.
*/
- vga_hardscroll_user_enable = vga_hardscroll_enabled = 0;
+ vga_hardscroll_user_enable = vga_hardscroll_enabled = false;
return 1;
}
@@ -159,18 +148,10 @@ static inline void write_vga(unsigned char reg, unsigned int val)
* handlers, thus the write has to be IRQ-atomic.
*/
raw_spin_lock_irqsave(&vga_lock, flags);
-
-#ifndef SLOW_VGA
v1 = reg + (val & 0xff00);
v2 = reg + 1 + ((val << 8) & 0xff00);
outw(v1, vga_video_port_reg);
outw(v2, vga_video_port_reg);
-#else
- outb_p(reg, vga_video_port_reg);
- outb_p(val >> 8, vga_video_port_val);
- outb_p(reg + 1, vga_video_port_reg);
- outb_p(val & 0xff, vga_video_port_val);
-#endif
raw_spin_unlock_irqrestore(&vga_lock, flags);
}
@@ -334,31 +315,8 @@ static void vgacon_restore_screen(struct vc_data *c)
static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
- if (!lines) /* Turn scrollback off */
- c->vc_visible_origin = c->vc_origin;
- else {
- int margin = c->vc_size_row * 4;
- int ul, we, p, st;
-
- if (vga_rolled_over >
- (c->vc_scr_end - vga_vram_base) + margin) {
- ul = c->vc_scr_end - vga_vram_base;
- we = vga_rolled_over + c->vc_size_row;
- } else {
- ul = 0;
- we = vga_vram_size;
- }
- p = (c->vc_visible_origin - vga_vram_base - ul + we) % we +
- lines * c->vc_size_row;
- st = (c->vc_origin - vga_vram_base - ul + we) % we;
- if (st < 2 * margin)
- margin = 0;
- if (p < margin)
- p = 0;
- if (p > st - margin)
- p = st;
- c->vc_visible_origin = vga_vram_base + (p + ul) % we;
- }
+ vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base,
+ vga_vram_size);
vga_set_mem_top(c);
}
#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
@@ -427,7 +385,7 @@ static const char *vgacon_startup(void)
}
} else {
/* If not, it is color. */
- vga_can_do_color = 1;
+ vga_can_do_color = true;
vga_vram_base = 0xb8000;
vga_video_port_reg = VGA_CRT_IC;
vga_video_port_val = VGA_CRT_DC;
@@ -451,18 +409,6 @@ static const char *vgacon_startup(void)
request_resource(&ioport_resource,
&vga_console_resource);
-#ifdef VGA_CAN_DO_64KB
- /*
- * get 64K rather than 32K of video RAM.
- * This doesn't actually work on all "VGA"
- * controllers (it seems like setting MM=01
- * and COE=1 isn't necessarily a good idea)
- */
- vga_vram_base = 0xa0000;
- vga_vram_size = 0x10000;
- outb_p(6, VGA_GFX_I);
- outb_p(6, VGA_GFX_D);
-#endif
/*
* Normalise the palette registers, to point
* the 16 screen colours to the first 16
@@ -542,7 +488,7 @@ static const char *vgacon_startup(void)
if (!vga_init_done) {
vgacon_scrollback_startup();
- vga_init_done = 1;
+ vga_init_done = true;
}
return display_desc;
@@ -634,7 +580,7 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity,
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count)
{
- int col = vga_can_do_color;
+ const bool col = vga_can_do_color;
while (count--) {
u16 a = scr_readw(p);
@@ -652,11 +598,6 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
unsigned long flags;
int curs, cure;
-#ifdef TRIDENT_GLITCH
- if (xpos < 16)
- from--, to--;
-#endif
-
if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto))
return;
cursor_size_lastfrom = from;
@@ -858,12 +799,10 @@ static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
{
-#ifdef CAN_LOAD_PALETTE
if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked
|| !con_is_visible(vc))
return;
vga_set_palette(vc, table);
-#endif
}
/* structure holding original VGA register settings */
@@ -1006,24 +945,24 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
}
if (vga_palette_blanked) {
vga_set_palette(c, color_table);
- vga_palette_blanked = 0;
+ vga_palette_blanked = false;
return 0;
}
- vga_is_gfx = 0;
+ vga_is_gfx = false;
/* Tell console.c that it has to restore the screen itself */
return 1;
case 1: /* Normal blanking */
case -1: /* Obsolete */
if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
vga_pal_blank(&vgastate);
- vga_palette_blanked = 1;
+ vga_palette_blanked = true;
return 0;
}
vgacon_set_origin(c);
scr_memsetw((void *) vga_vram_base, BLANK,
c->vc_screenbuf_size);
if (mode_switch)
- vga_is_gfx = 1;
+ vga_is_gfx = true;
return 1;
default: /* VESA blanking */
if (vga_video_type == VIDEO_TYPE_VGAC) {
@@ -1046,15 +985,14 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
* (sizif@botik.yaroslavl.su).
*/
-#ifdef CAN_LOAD_EGA_FONTS
-
#define colourmap 0xa0000
/* Pauline Middelink <middelin@polyware.iaf.nl> reports that we
should use 0xA0000 for the bwmap as well.. */
#define blackwmap 0xa0000
#define cmapsz 8192
-static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+static int vgacon_do_font_op(struct vgastate *state, char *arg, int set,
+ bool ch512)
{
unsigned short video_port_status = vga_video_port_reg + 6;
int font_select = 0x00, beg, i;
@@ -1063,10 +1001,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
if (vga_video_type != VIDEO_TYPE_EGAM) {
charmap = (char *) VGA_MAP_MEM(colourmap, 0);
beg = 0x0e;
-#ifdef VGA_CAN_DO_64KB
- if (vga_video_type == VIDEO_TYPE_VGAC)
- beg = 0x06;
-#endif
} else {
charmap = (char *) VGA_MAP_MEM(blackwmap, 0);
beg = 0x0a;
@@ -1080,7 +1014,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
if (!arg)
return -EINVAL; /* Return to default font not supported */
- vga_font_is_default = 0;
+ vga_font_is_default = false;
font_select = ch512 ? 0x04 : 0x00;
#else
/*
@@ -1091,7 +1025,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
if (set) {
vga_font_is_default = !arg;
if (!arg)
- ch512 = 0; /* Default font is always 256 */
+ ch512 = false; /* Default font is always 256 */
font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00;
}
@@ -1295,13 +1229,6 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars);
}
-#else
-
-#define vgacon_font_set NULL
-#define vgacon_font_get NULL
-
-#endif
-
static int vgacon_resize(struct vc_data *c, unsigned int width,
unsigned int height, unsigned int user)
{
@@ -1350,17 +1277,17 @@ static void vgacon_save_screen(struct vc_data *c)
c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size);
}
-static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
- int lines)
+static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int lines)
{
unsigned long oldo;
unsigned int delta;
if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT)
- return 0;
+ return false;
if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2)
- return 0;
+ return false;
vgacon_restore_screen(c);
oldo = c->vc_origin;
@@ -1396,7 +1323,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
c->vc_visible_origin = c->vc_origin;
vga_set_mem_top(c);
c->vc_pos = (c->vc_pos - oldo) + c->vc_origin;
- return 1;
+ return true;
}
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 37a37c4d04cb..8c4dc1e1f94f 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -118,6 +118,31 @@ static inline bool fb_base_is_valid(void)
return false;
}
+#define efifb_attr_decl(name, fmt) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, fmt "\n", (screen_info.lfb_##name)); \
+} \
+static DEVICE_ATTR_RO(name)
+
+efifb_attr_decl(base, "0x%x");
+efifb_attr_decl(linelength, "%u");
+efifb_attr_decl(height, "%u");
+efifb_attr_decl(width, "%u");
+efifb_attr_decl(depth, "%u");
+
+static struct attribute *efifb_attrs[] = {
+ &dev_attr_base.attr,
+ &dev_attr_linelength.attr,
+ &dev_attr_width.attr,
+ &dev_attr_height.attr,
+ &dev_attr_depth.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(efifb);
+
static int efifb_probe(struct platform_device *dev)
{
struct fb_info *info;
@@ -205,14 +230,13 @@ static int efifb_probe(struct platform_device *dev)
} else {
/* We cannot make this fatal. Sometimes this comes from magic
spaces our resource handlers simply don't know about */
- printk(KERN_WARNING
- "efifb: cannot reserve video memory at 0x%lx\n",
+ pr_warn("efifb: cannot reserve video memory at 0x%lx\n",
efifb_fix.smem_start);
}
info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
if (!info) {
- printk(KERN_ERR "efifb: cannot allocate framebuffer\n");
+ pr_err("efifb: cannot allocate framebuffer\n");
err = -ENOMEM;
goto err_release_mem;
}
@@ -230,16 +254,15 @@ static int efifb_probe(struct platform_device *dev)
info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
- printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
- "0x%x @ 0x%lx\n",
+ pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
err = -EIO;
goto err_release_fb;
}
- printk(KERN_INFO "efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
+ pr_info("efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
efifb_fix.smem_start, size_remap/1024, size_total/1024);
- printk(KERN_INFO "efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ pr_info("efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
efifb_defined.xres, efifb_defined.yres,
efifb_defined.bits_per_pixel, efifb_fix.line_length,
screen_info.pages);
@@ -247,7 +270,7 @@ static int efifb_probe(struct platform_device *dev)
efifb_defined.xres_virtual = efifb_defined.xres;
efifb_defined.yres_virtual = efifb_fix.smem_len /
efifb_fix.line_length;
- printk(KERN_INFO "efifb: scrolling: redraw\n");
+ pr_info("efifb: scrolling: redraw\n");
efifb_defined.yres_virtual = efifb_defined.yres;
/* some dummy values for timing to make fbset happy */
@@ -265,7 +288,7 @@ static int efifb_probe(struct platform_device *dev)
efifb_defined.transp.offset = screen_info.rsvd_pos;
efifb_defined.transp.length = screen_info.rsvd_size;
- printk(KERN_INFO "efifb: %s: "
+ pr_info("efifb: %s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
"Truecolor",
screen_info.rsvd_size,
@@ -285,12 +308,19 @@ static int efifb_probe(struct platform_device *dev)
info->fix = efifb_fix;
info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE;
- if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) {
- printk(KERN_ERR "efifb: cannot allocate colormap\n");
+ err = sysfs_create_groups(&dev->dev.kobj, efifb_groups);
+ if (err) {
+ pr_err("efifb: cannot add sysfs attrs\n");
goto err_unmap;
}
- if ((err = register_framebuffer(info)) < 0) {
- printk(KERN_ERR "efifb: cannot register framebuffer\n");
+ err = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (err < 0) {
+ pr_err("efifb: cannot allocate colormap\n");
+ goto err_groups;
+ }
+ err = register_framebuffer(info);
+ if (err < 0) {
+ pr_err("efifb: cannot register framebuffer\n");
goto err_fb_dealoc;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
@@ -298,6 +328,8 @@ static int efifb_probe(struct platform_device *dev)
err_fb_dealoc:
fb_dealloc_cmap(&info->cmap);
+err_groups:
+ sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
err_unmap:
iounmap(info->screen_base);
err_release_fb:
@@ -313,6 +345,7 @@ static int efifb_remove(struct platform_device *pdev)
struct fb_info *info = platform_get_drvdata(pdev);
unregister_framebuffer(info);
+ sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
framebuffer_release(info);
return 0;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index f948baa16d82..e219a0a22077 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -836,7 +836,7 @@ static void xxxfb_remove(struct pci_dev *dev)
* @dev: PCI device
* @msg: the suspend event code.
*
- * See Documentation/power/devices.txt for more information
+ * See Documentation/power/admin-guide/devices.rst for more information
*/
static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
{
@@ -851,7 +851,7 @@ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
* xxxfb_resume - Optional but recommended function. Resume the device.
* @dev: PCI device
*
- * See Documentation/power/devices.txt for more information
+ * See Documentation/power/admin-guide/devices.rst for more information
*/
static int xxxfb_resume(struct pci_dev *dev)
{
@@ -915,7 +915,7 @@ static void __exit xxxfb_exit(void)
* @dev: platform device
* @msg: the suspend event code.
*
- * See Documentation/power/devices.txt for more information
+ * See Documentation/power/admin-guide/devices.rst for more information
*/
static int xxxfb_suspend(struct platform_device *dev, pm_message_t msg)
{
@@ -930,7 +930,7 @@ static int xxxfb_suspend(struct platform_device *dev, pm_message_t msg)
* xxxfb_resume - Optional but recommended function. Resume the device.
* @dev: platform device
*
- * See Documentation/power/devices.txt for more information
+ * See Documentation/power/admin-guide/devices.rst for more information
*/
static int xxxfb_resume(struct platform_dev *dev)
{
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d517eed3..d0115a7af0a9 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -633,7 +633,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct xenfb_info *info = dev_get_drvdata(&dev->dev);
- int val;
switch (backend_state) {
case XenbusStateInitialising:
@@ -657,16 +656,12 @@ InitWait:
if (dev->state != XenbusStateConnected)
goto InitWait; /* no InitWait seen yet, fudge it */
- if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "request-update", "%d", &val) < 0)
- val = 0;
- if (val)
+ if (xenbus_read_unsigned(info->xbdev->otherend,
+ "request-update", 0))
info->update_wanted = 1;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-resize", "%d", &val) < 0)
- val = 0;
- info->feature_resize = val;
+ info->feature_resize = xenbus_read_unsigned(dev->otherend,
+ "feature-resize", 0);
break;
case XenbusStateClosed:
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 162689227a23..1cf907ecded4 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -533,6 +533,10 @@ hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
return "4:3";
case HDMI_PICTURE_ASPECT_16_9:
return "16:9";
+ case HDMI_PICTURE_ASPECT_64_27:
+ return "64:27";
+ case HDMI_PICTURE_ASPECT_256_135:
+ return "256:135";
case HDMI_PICTURE_ASPECT_RESERVED:
return "Reserved";
}
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 8a1076beecd3..32b0a7543433 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -88,6 +88,15 @@ static int of_parse_display_timing(const struct device_node *np,
dt->flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ if (!of_property_read_u32(np, "syncclk-active", &val))
+ dt->flags |= val ? DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+ else if (dt->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))
+ dt->flags |= dt->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
+ DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+
if (of_property_read_bool(np, "interlaced"))
dt->flags |= DISPLAY_FLAGS_INTERLACED;
if (of_property_read_bool(np, "doublescan"))
@@ -110,7 +119,7 @@ static int of_parse_display_timing(const struct device_node *np,
* @name: name of the timing node
* @dt: display_timing struct to fill
**/
-int of_get_display_timing(struct device_node *np, const char *name,
+int of_get_display_timing(const struct device_node *np, const char *name,
struct display_timing *dt)
{
struct device_node *timing_np;
@@ -133,7 +142,7 @@ EXPORT_SYMBOL_GPL(of_get_display_timing);
* of_get_display_timings - parse all display_timing entries from a device_node
* @np: device_node with the subnodes
**/
-struct display_timings *of_get_display_timings(struct device_node *np)
+struct display_timings *of_get_display_timings(const struct device_node *np)
{
struct device_node *timings_np;
struct device_node *entry;
@@ -249,7 +258,7 @@ EXPORT_SYMBOL_GPL(of_get_display_timings);
* of_display_timings_exist - check if a display-timings node is provided
* @np: device_node with the timing
**/
-int of_display_timings_exist(struct device_node *np)
+int of_display_timings_exist(const struct device_node *np)
{
struct device_node *timings_np;
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 77590320d44c..623f72334fa5 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -75,7 +75,7 @@ config VIRTIO_MMIO_CMDLINE_DEVICES
Allow virtio-mmio devices instantiation via the kernel command line
or module parameters. Be aware that using incorrect parameters (base
address in particular) can crash your system - you have been warned.
- See Documentation/kernel-parameters.txt for details.
+ See Documentation/admin-guide/kernel-parameters.rst for details.
If unsure, say 'N'.
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 48bfea91dbca..d47a2fcef818 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -489,6 +489,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
};
+static void virtio_mmio_release_dev_empty(struct device *_d) {}
/* Platform device */
@@ -511,6 +512,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
return -ENOMEM;
vm_dev->vdev.dev.parent = &pdev->dev;
+ vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty;
vm_dev->vdev.config = &virtio_mmio_config_ops;
vm_dev->pdev = pdev;
INIT_LIST_HEAD(&vm_dev->virtqueues);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d9a905827967..186cbab327b8 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -37,7 +37,7 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
synchronize_irq(vp_dev->pci_dev->irq);
for (i = 0; i < vp_dev->msix_vectors; ++i)
- synchronize_irq(vp_dev->msix_entries[i].vector);
+ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
}
/* the notify function used when creating a virt queue */
@@ -102,41 +102,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
return vp_vring_interrupt(irq, opaque);
}
-static void vp_free_vectors(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- int i;
-
- if (vp_dev->intx_enabled) {
- free_irq(vp_dev->pci_dev->irq, vp_dev);
- vp_dev->intx_enabled = 0;
- }
-
- for (i = 0; i < vp_dev->msix_used_vectors; ++i)
- free_irq(vp_dev->msix_entries[i].vector, vp_dev);
-
- for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
-
- if (vp_dev->msix_enabled) {
- /* Disable the vector used for configuration */
- vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
-
- pci_disable_msix(vp_dev->pci_dev);
- vp_dev->msix_enabled = 0;
- }
-
- vp_dev->msix_vectors = 0;
- vp_dev->msix_used_vectors = 0;
- kfree(vp_dev->msix_names);
- vp_dev->msix_names = NULL;
- kfree(vp_dev->msix_entries);
- vp_dev->msix_entries = NULL;
- kfree(vp_dev->msix_affinity_masks);
- vp_dev->msix_affinity_masks = NULL;
-}
-
static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
bool per_vq_vectors)
{
@@ -147,10 +112,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
vp_dev->msix_vectors = nvectors;
- vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
- GFP_KERNEL);
- if (!vp_dev->msix_entries)
- goto error;
vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
GFP_KERNEL);
if (!vp_dev->msix_names)
@@ -165,12 +126,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
- for (i = 0; i < nvectors; ++i)
- vp_dev->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(vp_dev->pci_dev,
- vp_dev->msix_entries, nvectors);
- if (err)
+ err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors,
+ PCI_IRQ_MSIX);
+ if (err < 0)
goto error;
vp_dev->msix_enabled = 1;
@@ -178,7 +136,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name);
- err = request_irq(vp_dev->msix_entries[v].vector,
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_config_changed, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
@@ -197,7 +155,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name);
- err = request_irq(vp_dev->msix_entries[v].vector,
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
@@ -206,19 +164,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
}
return 0;
error:
- vp_free_vectors(vdev);
- return err;
-}
-
-static int vp_request_intx(struct virtio_device *vdev)
-{
- int err;
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
- IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
- if (!err)
- vp_dev->intx_enabled = 1;
return err;
}
@@ -276,67 +221,88 @@ void vp_del_vqs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq, *n;
- struct virtio_pci_vq_info *info;
+ int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- info = vp_dev->vqs[vq->index];
- if (vp_dev->per_vq_vectors &&
- info->msix_vector != VIRTIO_MSI_NO_VECTOR)
- free_irq(vp_dev->msix_entries[info->msix_vector].vector,
- vq);
+ if (vp_dev->per_vq_vectors) {
+ int v = vp_dev->vqs[vq->index]->msix_vector;
+
+ if (v != VIRTIO_MSI_NO_VECTOR)
+ free_irq(pci_irq_vector(vp_dev->pci_dev, v),
+ vq);
+ }
vp_del_vq(vq);
}
vp_dev->per_vq_vectors = false;
- vp_free_vectors(vdev);
+ if (vp_dev->intx_enabled) {
+ free_irq(vp_dev->pci_dev->irq, vp_dev);
+ vp_dev->intx_enabled = 0;
+ }
+
+ for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+ free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
+
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+
+ if (vp_dev->msix_enabled) {
+ /* Disable the vector used for configuration */
+ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
+
+ pci_free_irq_vectors(vp_dev->pci_dev);
+ vp_dev->msix_enabled = 0;
+ }
+
+ vp_dev->msix_vectors = 0;
+ vp_dev->msix_used_vectors = 0;
+ kfree(vp_dev->msix_names);
+ vp_dev->msix_names = NULL;
+ kfree(vp_dev->msix_affinity_masks);
+ vp_dev->msix_affinity_masks = NULL;
kfree(vp_dev->vqs);
vp_dev->vqs = NULL;
}
-static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- bool use_msix,
bool per_vq_vectors)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u16 msix_vec;
int i, err, nvectors, allocated_vectors;
- vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
+ vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
- if (!use_msix) {
- /* Old style: one normal interrupt for change and all vqs. */
- err = vp_request_intx(vdev);
- if (err)
- goto error_find;
+ if (per_vq_vectors) {
+ /* Best option: one for change interrupt, one per vq. */
+ nvectors = 1;
+ for (i = 0; i < nvqs; ++i)
+ if (callbacks[i])
+ ++nvectors;
} else {
- if (per_vq_vectors) {
- /* Best option: one for change interrupt, one per vq. */
- nvectors = 1;
- for (i = 0; i < nvqs; ++i)
- if (callbacks[i])
- ++nvectors;
- } else {
- /* Second best: one for change, shared for all vqs. */
- nvectors = 2;
- }
-
- err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
- if (err)
- goto error_find;
+ /* Second best: one for change, shared for all vqs. */
+ nvectors = 2;
}
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
+ if (err)
+ goto error_find;
+
vp_dev->per_vq_vectors = per_vq_vectors;
allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
- } else if (!callbacks[i] || !vp_dev->msix_enabled)
+ }
+
+ if (!callbacks[i])
msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vp_dev->per_vq_vectors)
msix_vec = allocated_vectors++;
@@ -356,14 +322,12 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
sizeof *vp_dev->msix_names,
"%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]);
- err = request_irq(vp_dev->msix_entries[msix_vec].vector,
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, 0,
vp_dev->msix_names[msix_vec],
vqs[i]);
- if (err) {
- vp_del_vq(vqs[i]);
+ if (err)
goto error_find;
- }
}
return 0;
@@ -372,6 +336,43 @@ error_find:
return err;
}
+static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[], vq_callback_t *callbacks[],
+ const char * const names[])
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i, err;
+
+ vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+ if (!vp_dev->vqs)
+ return -ENOMEM;
+
+ err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
+ dev_name(&vdev->dev), vp_dev);
+ if (err)
+ goto out_del_vqs;
+
+ vp_dev->intx_enabled = 1;
+ vp_dev->per_vq_vectors = false;
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+ vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ VIRTIO_MSI_NO_VECTOR);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto out_del_vqs;
+ }
+ }
+
+ return 0;
+out_del_vqs:
+ vp_del_vqs(vdev);
+ return err;
+}
+
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
@@ -381,17 +382,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- true, false);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false);
if (!err)
return 0;
/* Finally fall back to regular interrupts. */
- return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- false, false);
+ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
}
const char *vp_bus_name(struct virtio_device *vdev)
@@ -419,7 +418,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (vp_dev->msix_enabled) {
mask = vp_dev->msix_affinity_masks[info->msix_vector];
- irq = vp_dev->msix_entries[info->msix_vector].vector;
+ irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
if (cpu == -1)
irq_set_affinity_hint(irq, NULL);
else {
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 28263200ed42..b2f666250ae0 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -85,7 +85,6 @@ struct virtio_pci_device {
/* MSI-X support */
int msix_enabled;
int intx_enabled;
- struct msix_entry *msix_entries;
cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index e76bd91a29da..4bf7ab375894 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -33,12 +33,12 @@ static inline u8 vp_ioread8(u8 __iomem *addr)
{
return ioread8(addr);
}
-static inline u16 vp_ioread16 (u16 __iomem *addr)
+static inline u16 vp_ioread16 (__le16 __iomem *addr)
{
return ioread16(addr);
}
-static inline u32 vp_ioread32(u32 __iomem *addr)
+static inline u32 vp_ioread32(__le32 __iomem *addr)
{
return ioread32(addr);
}
@@ -48,12 +48,12 @@ static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
iowrite8(value, addr);
}
-static inline void vp_iowrite16(u16 value, u16 __iomem *addr)
+static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
{
iowrite16(value, addr);
}
-static inline void vp_iowrite32(u32 value, u32 __iomem *addr)
+static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
{
iowrite32(value, addr);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 489bfc61cf30..409aeaa49246 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -420,7 +420,7 @@ unmap_release:
if (i == err_idx)
break;
vring_unmap_one(vq, &desc[i]);
- i = vq->vring.desc[i].next;
+ i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
}
vq->vq.num_free += total_sg;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
unsigned int i, j;
- u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
+ __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */
vq->desc_state[head].data = NULL;
@@ -649,7 +649,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
* @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
*
- * If the driver wrote data into the buffer, @len will be set to the
+ * If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short
* writes.
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index 2a9d5cdedea2..b29c6fde7473 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -412,11 +412,11 @@ static void mei_wdt_unregister_work(struct work_struct *work)
}
/**
- * mei_wdt_event_rx - callback for data receive
+ * mei_wdt_rx - callback for data receive
*
* @cldev: bus device
*/
-static void mei_wdt_event_rx(struct mei_cl_device *cldev)
+static void mei_wdt_rx(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
struct mei_wdt_start_response res;
@@ -484,11 +484,11 @@ out:
}
/*
- * mei_wdt_notify_event - callback for event notification
+ * mei_wdt_notif - callback for event notification
*
* @cldev: bus device
*/
-static void mei_wdt_notify_event(struct mei_cl_device *cldev)
+static void mei_wdt_notif(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
@@ -498,23 +498,6 @@ static void mei_wdt_notify_event(struct mei_cl_device *cldev)
mei_wdt_register(wdt);
}
-/**
- * mei_wdt_event - callback for event receive
- *
- * @cldev: bus device
- * @events: event mask
- * @context: callback context
- */
-static void mei_wdt_event(struct mei_cl_device *cldev,
- u32 events, void *context)
-{
- if (events & BIT(MEI_CL_EVENT_RX))
- mei_wdt_event_rx(cldev);
-
- if (events & BIT(MEI_CL_EVENT_NOTIF))
- mei_wdt_notify_event(cldev);
-}
-
#if IS_ENABLED(CONFIG_DEBUG_FS)
static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf,
@@ -625,16 +608,17 @@ static int mei_wdt_probe(struct mei_cl_device *cldev,
goto err_out;
}
- ret = mei_cldev_register_event_cb(wdt->cldev,
- BIT(MEI_CL_EVENT_RX) |
- BIT(MEI_CL_EVENT_NOTIF),
- mei_wdt_event, NULL);
+ ret = mei_cldev_register_rx_cb(wdt->cldev, mei_wdt_rx);
+ if (ret) {
+ dev_err(&cldev->dev, "Could not reg rx event ret=%d\n", ret);
+ goto err_disable;
+ }
+ ret = mei_cldev_register_notif_cb(wdt->cldev, mei_wdt_notif);
/* on legacy devices notification is not supported
- * this doesn't fail the registration for RX event
*/
if (ret && ret != -EOPNOTSUPP) {
- dev_err(&cldev->dev, "Could not register event ret=%d\n", ret);
+ dev_err(&cldev->dev, "Could not reg notif event ret=%d\n", ret);
goto err_disable;
}
@@ -701,25 +685,7 @@ static struct mei_cl_driver mei_wdt_driver = {
.remove = mei_wdt_remove,
};
-static int __init mei_wdt_init(void)
-{
- int ret;
-
- ret = mei_cldev_driver_register(&mei_wdt_driver);
- if (ret) {
- pr_err(KBUILD_MODNAME ": module registration failed\n");
- return ret;
- }
- return 0;
-}
-
-static void __exit mei_wdt_exit(void)
-{
- mei_cldev_driver_unregister(&mei_wdt_driver);
-}
-
-module_init(mei_wdt_init);
-module_exit(mei_wdt_exit);
+module_mei_cl_driver(mei_wdt_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 8b4fa4df3fc1..b5cdceb36cff 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -373,7 +373,7 @@ void octeon_wdt_nmi_stage3(u64 reg[32])
octeon_wdt_write_string("*** Chip soft reset soon ***\r\n");
}
-static void octeon_wdt_disable_interrupt(int cpu)
+static int octeon_wdt_cpu_pre_down(unsigned int cpu)
{
unsigned int core;
unsigned int irq;
@@ -391,9 +391,10 @@ static void octeon_wdt_disable_interrupt(int cpu)
cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
free_irq(irq, octeon_wdt_poke_irq);
+ return 0;
}
-static void octeon_wdt_setup_interrupt(int cpu)
+static int octeon_wdt_cpu_online(unsigned int cpu)
{
unsigned int core;
unsigned int irq;
@@ -423,25 +424,8 @@ static void octeon_wdt_setup_interrupt(int cpu)
ciu_wdog.s.len = timeout_cnt;
ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */
cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
-}
-static int octeon_wdt_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- octeon_wdt_disable_interrupt(cpu);
- break;
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- octeon_wdt_setup_interrupt(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+ return 0;
}
static int octeon_wdt_ping(struct watchdog_device __always_unused *wdog)
@@ -530,10 +514,6 @@ static int octeon_wdt_stop(struct watchdog_device *wdog)
return 0;
}
-static struct notifier_block octeon_wdt_cpu_notifier = {
- .notifier_call = octeon_wdt_cpu_callback,
-};
-
static const struct watchdog_info octeon_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "OCTEON",
@@ -552,6 +532,7 @@ static struct watchdog_device octeon_wdt = {
.ops = &octeon_wdt_ops,
};
+static enum cpuhp_state octeon_wdt_online;
/**
* Module/ driver initialization.
*
@@ -561,7 +542,6 @@ static int __init octeon_wdt_init(void)
{
int i;
int ret;
- int cpu;
u64 *ptr;
/*
@@ -609,14 +589,16 @@ static int __init octeon_wdt_init(void)
cpumask_clear(&irq_enabled_cpus);
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- octeon_wdt_setup_interrupt(cpu);
-
- __register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
- cpu_notifier_register_done();
-
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "watchdog/octeon:online",
+ octeon_wdt_cpu_online, octeon_wdt_cpu_pre_down);
+ if (ret < 0)
+ goto err;
+ octeon_wdt_online = ret;
return 0;
+err:
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0);
+ watchdog_unregister_device(&octeon_wdt);
+ return ret;
}
/**
@@ -624,22 +606,8 @@ static int __init octeon_wdt_init(void)
*/
static void __exit octeon_wdt_cleanup(void)
{
- int cpu;
-
watchdog_unregister_device(&octeon_wdt);
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
-
- for_each_online_cpu(cpu) {
- int core = cpu2core(cpu);
- /* Disable the watchdog */
- cvmx_write_csr(CVMX_CIU_WDOGX(core), 0);
- /* Free the interrupt handler */
- free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
- }
-
- cpu_notifier_register_done();
+ cpuhp_remove_state(octeon_wdt_online);
/*
* Disable the boot-bus memory, the code it points to is soon
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index e1d39a1e9628..8965e3f536c3 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/clk.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -155,12 +156,27 @@ static struct miscdevice sa1100dog_miscdev = {
};
static int margin __initdata = 60; /* (secs) Default is 1 minute */
+static struct clk *clk;
static int __init sa1100dog_init(void)
{
int ret;
- oscr_freq = get_clock_tick_rate();
+ clk = clk_get(NULL, "OSTIMER0");
+ if (IS_ERR(clk)) {
+ pr_err("SA1100/PXA2xx Watchdog Timer: clock not found: %d\n",
+ (int) PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("SA1100/PXA2xx Watchdog Timer: clock failed to prepare+enable: %d\n",
+ ret);
+ goto err;
+ }
+
+ oscr_freq = clk_get_rate(clk);
/*
* Read the reset status, and save it for later. If
@@ -176,11 +192,17 @@ static int __init sa1100dog_init(void)
pr_info("SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n",
margin);
return ret;
+err:
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ return ret;
}
static void __exit sa1100dog_exit(void)
{
misc_deregister(&sa1100dog_miscdev);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
}
module_init(sa1100dog_init);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e4db19e88ab1..db107fa50ca1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -180,7 +180,6 @@ static void __balloon_append(struct page *page)
static void balloon_append(struct page *page)
{
__balloon_append(page);
- adjust_managed_page_count(page, -1);
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
@@ -201,8 +200,6 @@ static struct page *balloon_retrieve(bool require_lowmem)
else
balloon_stats.balloon_low--;
- adjust_managed_page_count(page, 1);
-
return page;
}
@@ -478,7 +475,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
#endif
/* Relinquish the page back to the allocator. */
- __free_reserved_page(page);
+ free_reserved_page(page);
}
balloon_stats.current_pages += rc;
@@ -509,6 +506,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
+ adjust_managed_page_count(page, -1);
scrub_page(page);
list_add(&page->lru, &pages);
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 9ecfcdcdd6d6..fd8e872d2943 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -37,7 +37,6 @@
#include <asm/desc.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
-#include <asm/idle.h>
#include <asm/io_apic.h>
#include <asm/i8259.h>
#include <asm/xen/pci.h>
@@ -948,7 +947,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
continue;
if (status.status != EVTCHNSTAT_virq)
continue;
- if (status.u.virq == virq && status.vcpu == cpu) {
+ if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
rc = port;
break;
}
@@ -1256,7 +1255,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
irq_enter();
#ifdef CONFIG_X86
- exit_idle();
inc_irq_stat(irq_hv_callback_count);
#endif
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 7a47c4c9fb1b..1bf55a32a4b3 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -127,18 +127,21 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
struct gntalloc_gref *gref, *next;
readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
- rc = -ENOMEM;
for (i = 0; i < op->count; i++) {
gref = kzalloc(sizeof(*gref), GFP_KERNEL);
- if (!gref)
+ if (!gref) {
+ rc = -ENOMEM;
goto undo;
+ }
list_add_tail(&gref->next_gref, &queue_gref);
list_add_tail(&gref->next_file, &queue_file);
gref->users = 1;
gref->file_index = op->index + i * PAGE_SIZE;
gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!gref->page)
+ if (!gref->page) {
+ rc = -ENOMEM;
goto undo;
+ }
/* Grant foreign access to the page. */
rc = gnttab_grant_foreign_access(op->domid,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bb952121ea94..2ef2b61b69df 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1007,7 +1007,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_ops = &gntdev_vmops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
if (use_ptemod)
vma->vm_flags |= VM_DONTCOPY;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index b59c9455aae1..112ce422dc22 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -125,8 +125,4 @@ static struct pci_driver platform_driver = {
.id_table = platform_pci_tbl,
};
-static int __init platform_pci_init(void)
-{
- return pci_register_driver(&platform_driver);
-}
-device_initcall(platform_pci_init);
+builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 702040fe2001..6e3306f4a525 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -602,7 +602,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
vma, vma->vm_start, vma->vm_end,
- vmf->pgoff, vmf->virtual_address);
+ vmf->pgoff, (void *)vmf->address);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 87e6035c9e81..478fb91e3df2 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -405,7 +405,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
+ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
+ attrs);
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
@@ -416,11 +417,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size)) {
- swiotlb_tbl_unmap_single(dev, map, size, dir);
- dev_addr = 0;
- }
- return dev_addr;
+ if (dma_capable(dev, dev_addr, size))
+ return dev_addr;
+
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+ return DMA_ERROR_CODE;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
@@ -444,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
return;
}
@@ -557,11 +560,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
start_dma_addr,
sg_phys(sg),
sg->length,
- dir);
+ dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
dev_warn(hwdev, "swiotlb buffer is full\n");
/* Don't panic here, we expect map_sg users
to do proper error handling. */
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sg_dma_len(sgl) = 0;
@@ -648,13 +652,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
}
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
-int
-xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return !dma_addr;
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5ce878c51d03..3f0aee0a068b 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -362,7 +362,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
int err = 0;
int num_devs;
int domain, bus, slot, func;
- int substate;
+ unsigned int substate;
int i, len;
char state_str[64];
char dev_str[64];
@@ -395,10 +395,8 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
"configuration");
goto out;
}
- err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
- "%d", &substate);
- if (err != 1)
- substate = XenbusStateUnknown;
+ substate = xenbus_read_unsigned(pdev->xdev->nodename, state_str,
+ XenbusStateUnknown);
switch (substate) {
case XenbusStateInitialising:
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1e8be12ebb55..6c0ead4be784 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -538,6 +538,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
+ filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+
u = kzalloc(sizeof(*u), GFP_KERNEL);
if (u == NULL)
return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 33a31cfef55d..4bdf654041e9 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -702,7 +702,7 @@ device_initcall(xenbus_probe_initcall);
*/
static int __init xenstored_local_init(void)
{
- int err = 0;
+ int err = -ENOMEM;
unsigned long page = 0;
struct evtchn_alloc_unbound alloc_unbound;
@@ -826,7 +826,7 @@ static int __init xenbus_init(void)
* Create xenfs mountpoint in /proc for compatibility with
* utilities that expect to find "xenbus" under "/proc/xen".
*/
- proc_mkdir("xen", NULL);
+ proc_create_mount_point("xen");
#endif
out_error:
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 04f7f85a5edf..37929df829a3 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -224,13 +224,7 @@ static int read_frontend_details(struct xenbus_device *xendev)
int xenbus_dev_is_online(struct xenbus_device *dev)
{
- int rc, val;
-
- rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
- if (rc != 1)
- val = 0; /* no online node present */
-
- return val;
+ return !!xenbus_read_unsigned(dev->nodename, "online", 0);
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 22f7cd711c57..6afb993c5809 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -559,6 +559,21 @@ int xenbus_scanf(struct xenbus_transaction t,
}
EXPORT_SYMBOL_GPL(xenbus_scanf);
+/* Read an (optional) unsigned value. */
+unsigned int xenbus_read_unsigned(const char *dir, const char *node,
+ unsigned int default_val)
+{
+ unsigned int val;
+ int ret;
+
+ ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
+ if (ret <= 0)
+ val = default_val;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(xenbus_read_unsigned);
+
/* Single printf and write: returns -errno or 0. */
int xenbus_printf(struct xenbus_transaction t,
const char *dir, const char *node, const char *fmt, ...)
@@ -672,7 +687,7 @@ static bool xen_strict_xenbus_quirk(void)
}
static void xs_reset_watches(void)
{
- int err, supported = 0;
+ int err;
if (!xen_hvm_domain() || xen_initial_domain())
return;
@@ -680,9 +695,8 @@ static void xs_reset_watches(void)
if (xen_strict_xenbus_quirk())
return;
- err = xenbus_scanf(XBT_NIL, "control",
- "platform-feature-xs_reset_watches", "%d", &supported);
- if (err != 1 || !supported)
+ if (!xenbus_read_unsigned("control",
+ "platform-feature-xs_reset_watches", 0))
return;
err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));